From a4327474cbcf1512d82436757c19871583ab22c0 Mon Sep 17 00:00:00 2001 From: Vincent Landgraf Date: Fri, 3 Sep 2021 15:07:57 +0200 Subject: [PATCH 1/2] add grpc client and server middleware setup code --- Makefile | 17 +- README.md | 4 + docker-compose.yml | 1 + go.mod | 28 +- go.sum | 1155 +- grpc/README.md | 6 + grpc/client.go | 56 + grpc/server.go | 161 + http/security/helper.go | 11 + maintenance/log/handler.go | 7 +- tools.go | 4 + tools/testserver/main.go | 73 + tools/testserver/math/math.pb.go | 222 + tools/testserver/math/math.proto | 19 + tools/testserver/math/math_grpc.pb.go | 137 + vendor/4d63.com/gochecknoglobals/LICENSE | 21 + .../checknoglobals/check_no_globals.go | 154 + vendor/github.com/Antonboom/errname/LICENSE | 21 + .../errname/pkg/analyzer/analyzer.go | 133 + .../Antonboom/errname/pkg/analyzer/facts.go | 237 + vendor/github.com/BurntSushi/toml/.gitignore | 2 + vendor/github.com/BurntSushi/toml/COMPATIBLE | 1 + vendor/github.com/BurntSushi/toml/COPYING | 21 + vendor/github.com/BurntSushi/toml/README.md | 220 + vendor/github.com/BurntSushi/toml/decode.go | 511 + .../BurntSushi/toml/decode_go116.go | 18 + .../github.com/BurntSushi/toml/decode_meta.go | 123 + .../github.com/BurntSushi/toml/deprecated.go | 33 + vendor/github.com/BurntSushi/toml/doc.go | 13 + vendor/github.com/BurntSushi/toml/encode.go | 650 + vendor/github.com/BurntSushi/toml/go.mod | 3 + vendor/github.com/BurntSushi/toml/go.sum | 0 .../github.com/BurntSushi/toml/internal/tz.go | 36 + vendor/github.com/BurntSushi/toml/lex.go | 1225 + vendor/github.com/BurntSushi/toml/parse.go | 739 + .../github.com/BurntSushi/toml/type_check.go | 70 + .../github.com/BurntSushi/toml/type_fields.go | 242 + .../github.com/Djarvur/go-err113/.gitignore | 15 + .../Djarvur/go-err113/.golangci.yml | 150 + .../github.com/Djarvur/go-err113/.travis.yml | 24 + vendor/github.com/Djarvur/go-err113/LICENSE | 21 + .../github.com/Djarvur/go-err113/README.adoc | 75 + .../Djarvur/go-err113/comparison.go | 123 + .../Djarvur/go-err113/definition.go | 74 + vendor/github.com/Djarvur/go-err113/err113.go | 90 + vendor/github.com/Djarvur/go-err113/go.mod | 5 + vendor/github.com/Djarvur/go-err113/go.sum | 20 + .../github.com/Masterminds/semver/.travis.yml | 29 + .../Masterminds/semver/CHANGELOG.md | 109 + .../github.com/Masterminds/semver/LICENSE.txt | 19 + vendor/github.com/Masterminds/semver/Makefile | 36 + .../github.com/Masterminds/semver/README.md | 194 + .../Masterminds/semver/appveyor.yml | 44 + .../Masterminds/semver/collection.go | 24 + .../Masterminds/semver/constraints.go | 423 + vendor/github.com/Masterminds/semver/doc.go | 115 + .../github.com/Masterminds/semver/version.go | 425 + .../Masterminds/semver/version_fuzz.go | 10 + .../OpenPeeDeeP/depguard/.gitignore | 14 + .../github.com/OpenPeeDeeP/depguard/LICENSE | 674 + .../github.com/OpenPeeDeeP/depguard/README.md | 77 + .../OpenPeeDeeP/depguard/depguard.go | 241 + vendor/github.com/OpenPeeDeeP/depguard/go.mod | 9 + vendor/github.com/OpenPeeDeeP/depguard/go.sum | 6 + vendor/github.com/alexkohler/prealloc/LICENSE | 21 + .../alexkohler/prealloc/pkg/prealloc.go | 267 + .../github.com/ashanbrown/forbidigo/LICENSE | 13 + .../forbidigo/forbidigo/config_options.go | 45 + .../forbidigo/forbidigo/forbidigo.go | 193 + vendor/github.com/ashanbrown/makezero/LICENSE | 13 + .../ashanbrown/makezero/makezero/makezero.go | 200 + vendor/github.com/bkielbasa/cyclop/LICENSE | 21 + .../bkielbasa/cyclop/pkg/analyzer/analyzer.go | 104 + vendor/github.com/bombsimon/wsl/v3/.gitignore | 70 + .../github.com/bombsimon/wsl/v3/.travis.yml | 25 + vendor/github.com/bombsimon/wsl/v3/LICENSE | 21 + vendor/github.com/bombsimon/wsl/v3/README.md | 126 + vendor/github.com/bombsimon/wsl/v3/go.mod | 12 + vendor/github.com/bombsimon/wsl/v3/go.sum | 25 + vendor/github.com/bombsimon/wsl/v3/wsl.go | 1247 + .../github.com/cespare/xxhash/v2/.travis.yml | 8 + .../github.com/cespare/xxhash/v2/LICENSE.txt | 22 + vendor/github.com/cespare/xxhash/v2/README.md | 67 + vendor/github.com/cespare/xxhash/v2/go.mod | 3 + vendor/github.com/cespare/xxhash/v2/go.sum | 0 vendor/github.com/cespare/xxhash/v2/xxhash.go | 236 + .../cespare/xxhash/v2/xxhash_amd64.go | 13 + .../cespare/xxhash/v2/xxhash_amd64.s | 215 + .../cespare/xxhash/v2/xxhash_other.go | 76 + .../cespare/xxhash/v2/xxhash_safe.go | 15 + .../cespare/xxhash/v2/xxhash_unsafe.go | 46 + .../charithe/durationcheck/.gitignore | 1 + .../github.com/charithe/durationcheck/LICENSE | 201 + .../charithe/durationcheck/Makefile | 5 + .../charithe/durationcheck/README.md | 48 + .../charithe/durationcheck/durationcheck.go | 188 + .../github.com/charithe/durationcheck/go.mod | 5 + .../github.com/charithe/durationcheck/go.sum | 26 + vendor/github.com/chavacava/garif/.gitignore | 3 + vendor/github.com/chavacava/garif/LICENSE | 21 + vendor/github.com/chavacava/garif/README.md | 52 + .../chavacava/garif/constructors.go | 338 + .../github.com/chavacava/garif/decorators.go | 94 + vendor/github.com/chavacava/garif/doc.go | 11 + vendor/github.com/chavacava/garif/go.mod | 5 + vendor/github.com/chavacava/garif/go.sum | 11 + vendor/github.com/chavacava/garif/io.go | 26 + vendor/github.com/chavacava/garif/models.go | 1486 + vendor/github.com/daixiang0/gci/LICENSE | 29 + .../github.com/daixiang0/gci/pkg/gci/gci.go | 383 + .../github.com/daixiang0/gci/pkg/gci/std.go | 161 + .../denis-tingajkin/go-header/.gitignore | 1 + .../denis-tingajkin/go-header/.go-header.yml | 19 + .../denis-tingajkin/go-header/LICENSE | 674 + .../denis-tingajkin/go-header/README.md | 81 + .../denis-tingajkin/go-header/analyzer.go | 146 + .../denis-tingajkin/go-header/config.go | 99 + .../denis-tingajkin/go-header/go.mod | 10 + .../denis-tingajkin/go-header/go.sum | 31 + .../denis-tingajkin/go-header/issue.go | 48 + .../denis-tingajkin/go-header/location.go | 35 + .../denis-tingajkin/go-header/option.go | 44 + .../denis-tingajkin/go-header/reader.go | 116 + .../denis-tingajkin/go-header/value.go | 128 + vendor/github.com/esimonov/ifshort/LICENSE | 21 + .../esimonov/ifshort/pkg/analyzer/analyzer.go | 247 + .../ifshort/pkg/analyzer/occurrences.go | 259 + vendor/github.com/ettle/strcase/.gitignore | 18 + vendor/github.com/ettle/strcase/.golangci.yml | 88 + vendor/github.com/ettle/strcase/.readme.tmpl | 80 + vendor/github.com/ettle/strcase/LICENSE | 21 + vendor/github.com/ettle/strcase/Makefile | 16 + vendor/github.com/ettle/strcase/README.md | 542 + vendor/github.com/ettle/strcase/caser.go | 87 + vendor/github.com/ettle/strcase/convert.go | 297 + vendor/github.com/ettle/strcase/doc.go | 155 + vendor/github.com/ettle/strcase/go.mod | 5 + vendor/github.com/ettle/strcase/go.sum | 11 + vendor/github.com/ettle/strcase/initialism.go | 43 + vendor/github.com/ettle/strcase/split.go | 164 + vendor/github.com/ettle/strcase/strcase.go | 81 + vendor/github.com/ettle/strcase/unicode.go | 48 + vendor/github.com/fatih/color/LICENSE.md | 20 + vendor/github.com/fatih/color/README.md | 178 + vendor/github.com/fatih/color/color.go | 618 + vendor/github.com/fatih/color/doc.go | 135 + vendor/github.com/fatih/color/go.mod | 8 + vendor/github.com/fatih/color/go.sum | 7 + vendor/github.com/fatih/structtag/LICENSE | 60 + vendor/github.com/fatih/structtag/README.md | 73 + vendor/github.com/fatih/structtag/go.mod | 3 + vendor/github.com/fatih/structtag/tags.go | 315 + .../fsnotify/fsnotify/.editorconfig | 12 + .../fsnotify/fsnotify/.gitattributes | 1 + .../github.com/fsnotify/fsnotify/.gitignore | 6 + .../github.com/fsnotify/fsnotify/.travis.yml | 36 + vendor/github.com/fsnotify/fsnotify/AUTHORS | 52 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 317 + .../fsnotify/fsnotify/CONTRIBUTING.md | 77 + vendor/github.com/fsnotify/fsnotify/LICENSE | 28 + vendor/github.com/fsnotify/fsnotify/README.md | 130 + vendor/github.com/fsnotify/fsnotify/fen.go | 37 + .../github.com/fsnotify/fsnotify/fsnotify.go | 68 + vendor/github.com/fsnotify/fsnotify/go.mod | 5 + vendor/github.com/fsnotify/fsnotify/go.sum | 2 + .../github.com/fsnotify/fsnotify/inotify.go | 337 + .../fsnotify/fsnotify/inotify_poller.go | 187 + vendor/github.com/fsnotify/fsnotify/kqueue.go | 521 + .../fsnotify/fsnotify/open_mode_bsd.go | 11 + .../fsnotify/fsnotify/open_mode_darwin.go | 12 + .../github.com/fsnotify/fsnotify/windows.go | 561 + vendor/github.com/fzipp/gocyclo/CHANGELOG.md | 38 + vendor/github.com/fzipp/gocyclo/CONTRIBUTORS | 7 + vendor/github.com/fzipp/gocyclo/LICENSE | 27 + vendor/github.com/fzipp/gocyclo/README.md | 107 + vendor/github.com/fzipp/gocyclo/analyze.go | 151 + vendor/github.com/fzipp/gocyclo/complexity.go | 48 + vendor/github.com/fzipp/gocyclo/directives.go | 39 + vendor/github.com/fzipp/gocyclo/go.mod | 3 + vendor/github.com/fzipp/gocyclo/stats.go | 73 + vendor/github.com/go-critic/go-critic/LICENSE | 22 + .../checkers/appendAssign_checker.go | 102 + .../checkers/appendCombine_checker.go | 102 + .../go-critic/checkers/argOrder_checker.go | 97 + .../go-critic/checkers/assignOp_checker.go | 102 + .../go-critic/checkers/badCall_checker.go | 63 + .../go-critic/checkers/badCond_checker.go | 147 + .../go-critic/checkers/badLock_checker.go | 116 + .../go-critic/checkers/badRegexp_checker.go | 445 + .../checkers/boolExprSimplify_checker.go | 346 + .../checkers/builtinShadowDecl_checker.go | 63 + .../checkers/builtinShadow_checker.go | 36 + .../go-critic/checkers/captLocal_checker.go | 49 + .../go-critic/checkers/caseOrder_checker.go | 88 + .../go-critic/go-critic/checkers/checkers.go | 19 + .../checkers/codegenComment_checker.go | 61 + .../checkers/commentFormatting_checker.go | 79 + .../checkers/commentedOutCode_checker.go | 157 + .../checkers/commentedOutImport_checker.go | 76 + .../checkers/defaultCaseOrder_checker.go | 65 + .../checkers/deferUnlambda_checker.go | 94 + .../checkers/deprecatedComment_checker.go | 149 + .../go-critic/checkers/docStub_checker.go | 95 + .../go-critic/checkers/dupArg_checker.go | 133 + .../checkers/dupBranchBody_checker.go | 58 + .../go-critic/checkers/dupCase_checker.go | 57 + .../go-critic/checkers/dupImports_checker.go | 63 + .../go-critic/checkers/dupSubExpr_checker.go | 102 + .../go-critic/checkers/elseif_checker.go | 71 + .../checkers/emptyFallthrough_checker.go | 70 + .../checkers/emptyStringTest_checker.go | 58 + .../go-critic/checkers/equalFold_checker.go | 87 + .../go-critic/checkers/evalOrder_checker.go | 87 + .../checkers/exitAfterDefer_checker.go | 84 + .../checkers/filepathJoin_checker.go | 50 + .../go-critic/checkers/flagDeref_checker.go | 65 + .../go-critic/checkers/flagName_checker.go | 88 + .../go-critic/checkers/hexLiteral_checker.go | 60 + .../go-critic/checkers/hugeParam_checker.go | 63 + .../go-critic/checkers/ifElseChain_checker.go | 99 + .../checkers/importShadow_checker.go | 47 + .../go-critic/checkers/indexAlloc_checker.go | 50 + .../go-critic/checkers/initClause_checker.go | 56 + .../internal/astwalk/comment_walker.go | 41 + .../internal/astwalk/doc_comment_walker.go | 48 + .../checkers/internal/astwalk/expr_walker.go | 31 + .../internal/astwalk/func_decl_walker.go | 23 + .../internal/astwalk/local_comment_walker.go | 32 + .../internal/astwalk/local_def_visitor.go | 51 + .../internal/astwalk/local_def_walker.go | 118 + .../internal/astwalk/local_expr_walker.go | 29 + .../internal/astwalk/stmt_list_walker.go | 33 + .../checkers/internal/astwalk/stmt_walker.go | 29 + .../internal/astwalk/type_expr_walker.go | 114 + .../checkers/internal/astwalk/visitor.go | 80 + .../checkers/internal/astwalk/walk_handler.go | 34 + .../checkers/internal/astwalk/walker.go | 57 + .../checkers/internal/lintutil/astfind.go | 27 + .../checkers/internal/lintutil/astflow.go | 86 + .../checkers/internal/lintutil/astset.go | 44 + .../checkers/internal/lintutil/zero_value.go | 94 + .../go-critic/checkers/mapKey_checker.go | 124 + .../checkers/methodExprCall_checker.go | 57 + .../checkers/nestingReduce_checker.go | 73 + .../go-critic/checkers/newDeref_checker.go | 45 + .../checkers/nilValReturn_checker.go | 71 + .../checkers/octalLiteral_checker.go | 82 + .../go-critic/checkers/offBy1_checker.go | 66 + .../checkers/paramTypeCombine_checker.go | 93 + .../checkers/ptrToRefParam_checker.go | 70 + .../checkers/rangeExprCopy_checker.go | 80 + .../checkers/rangeValCopy_checker.go | 75 + .../go-critic/checkers/regexpMust_checker.go | 47 + .../checkers/regexpPattern_checker.go | 68 + .../checkers/regexpSimplify_checker.go | 511 + .../go-critic/checkers/ruleguard_checker.go | 157 + .../checkers/singleCaseSwitch_checker.go | 84 + .../go-critic/checkers/sloppyLen_checker.go | 72 + .../checkers/sloppyReassign_checker.go | 80 + .../checkers/sloppyTypeAssert_checker.go | 75 + .../go-critic/checkers/sortSlice_checker.go | 135 + .../go-critic/checkers/sqlQuery_checker.go | 167 + .../checkers/stringXbytes_checker.go | 47 + .../go-critic/checkers/switchTrue_checker.go | 49 + .../checkers/tooManyResults_checker.go | 54 + .../go-critic/checkers/truncateCmp_checker.go | 117 + .../checkers/typeAssertChain_checker.go | 132 + .../checkers/typeDefFirst_checker.go | 88 + .../checkers/typeSwitchVar_checker.go | 97 + .../go-critic/checkers/typeUnparen_checker.go | 86 + .../go-critic/checkers/underef_checker.go | 127 + .../go-critic/checkers/unlabelStmt_checker.go | 170 + .../go-critic/checkers/unlambda_checker.go | 100 + .../checkers/unnamedResult_checker.go | 103 + .../checkers/unnecessaryBlock_checker.go | 69 + .../checkers/unnecessaryDefer_checker.go | 111 + .../go-critic/checkers/unslice_checker.go | 59 + .../go-critic/go-critic/checkers/utils.go | 309 + .../go-critic/checkers/valSwap_checker.go | 64 + .../go-critic/checkers/weakCond_checker.go | 77 + .../go-critic/checkers/whyNoLint_checker.go | 52 + .../go-critic/checkers/wrapperFunc_checker.go | 229 + .../checkers/yodaStyleExpr_checker.go | 66 + .../go-critic/framework/linter/checkers_db.go | 136 + .../go-critic/framework/linter/context.go | 35 + .../go-critic/framework/linter/lintpack.go | 269 + .../go-toolsmith/astcast/.travis.yml | 9 + .../github.com/go-toolsmith/astcast/LICENSE | 21 + .../github.com/go-toolsmith/astcast/README.md | 86 + .../go-toolsmith/astcast/astcast.go | 590 + vendor/github.com/go-toolsmith/astcast/go.mod | 6 + vendor/github.com/go-toolsmith/astcast/go.sum | 4 + .../go-toolsmith/astcopy/.travis.yml | 9 + .../github.com/go-toolsmith/astcopy/LICENSE | 21 + .../github.com/go-toolsmith/astcopy/README.md | 41 + .../go-toolsmith/astcopy/astcopy.go | 955 + vendor/github.com/go-toolsmith/astcopy/go.mod | 6 + vendor/github.com/go-toolsmith/astcopy/go.sum | 4 + .../go-toolsmith/astequal/.gitignore | 5 + .../go-toolsmith/astequal/.travis.yml | 9 + .../github.com/go-toolsmith/astequal/LICENSE | 21 + .../go-toolsmith/astequal/README.md | 67 + .../go-toolsmith/astequal/astequal.go | 734 + .../github.com/go-toolsmith/astequal/go.mod | 1 + .../go-toolsmith/astfmt/.travis.yml | 9 + vendor/github.com/go-toolsmith/astfmt/LICENSE | 21 + .../github.com/go-toolsmith/astfmt/README.md | 39 + .../github.com/go-toolsmith/astfmt/astfmt.go | 111 + vendor/github.com/go-toolsmith/astfmt/go.mod | 6 + vendor/github.com/go-toolsmith/astfmt/go.sum | 4 + .../github.com/go-toolsmith/astp/.gitignore | 4 + .../github.com/go-toolsmith/astp/.travis.yml | 9 + vendor/github.com/go-toolsmith/astp/LICENSE | 21 + vendor/github.com/go-toolsmith/astp/README.md | 39 + vendor/github.com/go-toolsmith/astp/decl.go | 39 + vendor/github.com/go-toolsmith/astp/expr.go | 141 + vendor/github.com/go-toolsmith/astp/go.mod | 6 + vendor/github.com/go-toolsmith/astp/go.sum | 4 + vendor/github.com/go-toolsmith/astp/stmt.go | 135 + .../go-toolsmith/strparse/.travis.yml | 9 + .../github.com/go-toolsmith/strparse/LICENSE | 21 + .../go-toolsmith/strparse/README.md | 34 + .../github.com/go-toolsmith/strparse/go.mod | 1 + .../go-toolsmith/strparse/strparse.go | 59 + .../github.com/go-toolsmith/typep/.travis.yml | 9 + vendor/github.com/go-toolsmith/typep/LICENSE | 21 + .../github.com/go-toolsmith/typep/README.md | 37 + vendor/github.com/go-toolsmith/typep/doc.go | 2 + vendor/github.com/go-toolsmith/typep/go.mod | 1 + .../go-toolsmith/typep/predicates.go | 36 + .../github.com/go-toolsmith/typep/safeExpr.go | 73 + .../go-toolsmith/typep/simplePredicates.go | 359 + vendor/github.com/go-xmlfmt/xmlfmt/LICENSE | 21 + vendor/github.com/go-xmlfmt/xmlfmt/README.md | 178 + vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go | 56 + vendor/github.com/gobwas/glob/.gitignore | 8 + vendor/github.com/gobwas/glob/.travis.yml | 9 + vendor/github.com/gobwas/glob/LICENSE | 21 + vendor/github.com/gobwas/glob/bench.sh | 26 + .../gobwas/glob/compiler/compiler.go | 525 + vendor/github.com/gobwas/glob/glob.go | 80 + vendor/github.com/gobwas/glob/match/any.go | 45 + vendor/github.com/gobwas/glob/match/any_of.go | 82 + vendor/github.com/gobwas/glob/match/btree.go | 146 + .../github.com/gobwas/glob/match/contains.go | 58 + .../github.com/gobwas/glob/match/every_of.go | 99 + vendor/github.com/gobwas/glob/match/list.go | 49 + vendor/github.com/gobwas/glob/match/match.go | 81 + vendor/github.com/gobwas/glob/match/max.go | 49 + vendor/github.com/gobwas/glob/match/min.go | 57 + .../github.com/gobwas/glob/match/nothing.go | 27 + vendor/github.com/gobwas/glob/match/prefix.go | 50 + .../gobwas/glob/match/prefix_any.go | 55 + .../gobwas/glob/match/prefix_suffix.go | 62 + vendor/github.com/gobwas/glob/match/range.go | 48 + vendor/github.com/gobwas/glob/match/row.go | 77 + .../github.com/gobwas/glob/match/segments.go | 91 + vendor/github.com/gobwas/glob/match/single.go | 43 + vendor/github.com/gobwas/glob/match/suffix.go | 35 + .../gobwas/glob/match/suffix_any.go | 43 + vendor/github.com/gobwas/glob/match/super.go | 33 + vendor/github.com/gobwas/glob/match/text.go | 45 + vendor/github.com/gobwas/glob/readme.md | 148 + .../github.com/gobwas/glob/syntax/ast/ast.go | 122 + .../gobwas/glob/syntax/ast/parser.go | 157 + .../gobwas/glob/syntax/lexer/lexer.go | 273 + .../gobwas/glob/syntax/lexer/token.go | 88 + .../github.com/gobwas/glob/syntax/syntax.go | 14 + .../gobwas/glob/util/runes/runes.go | 154 + .../gobwas/glob/util/strings/strings.go | 39 + vendor/github.com/gofrs/flock/.gitignore | 24 + vendor/github.com/gofrs/flock/.travis.yml | 10 + vendor/github.com/gofrs/flock/LICENSE | 27 + vendor/github.com/gofrs/flock/README.md | 41 + vendor/github.com/gofrs/flock/appveyor.yml | 25 + vendor/github.com/gofrs/flock/flock.go | 144 + vendor/github.com/gofrs/flock/flock_aix.go | 281 + vendor/github.com/gofrs/flock/flock_unix.go | 197 + vendor/github.com/gofrs/flock/flock_winapi.go | 76 + .../github.com/gofrs/flock/flock_windows.go | 142 + .../golang/protobuf/proto/buffer.go | 324 + .../github.com/golang/protobuf/proto/clone.go | 253 - .../golang/protobuf/proto/decode.go | 427 - .../golang/protobuf/proto/defaults.go | 63 + .../golang/protobuf/proto/deprecated.go | 126 +- .../golang/protobuf/proto/discard.go | 356 +- .../golang/protobuf/proto/encode.go | 203 - .../github.com/golang/protobuf/proto/equal.go | 301 - .../golang/protobuf/proto/extensions.go | 771 +- .../github.com/golang/protobuf/proto/lib.go | 965 - .../golang/protobuf/proto/message_set.go | 181 - .../golang/protobuf/proto/pointer_reflect.go | 360 - .../golang/protobuf/proto/pointer_unsafe.go | 313 - .../golang/protobuf/proto/properties.go | 648 +- .../github.com/golang/protobuf/proto/proto.go | 167 + .../golang/protobuf/proto/registry.go | 317 + .../golang/protobuf/proto/table_marshal.go | 2776 -- .../golang/protobuf/proto/table_merge.go | 654 - .../golang/protobuf/proto/table_unmarshal.go | 2053 - .../github.com/golang/protobuf/proto/text.go | 843 - .../golang/protobuf/proto/text_decode.go | 801 + .../golang/protobuf/proto/text_encode.go | 560 + .../golang/protobuf/proto/text_parser.go | 880 - .../github.com/golang/protobuf/proto/wire.go | 78 + .../golang/protobuf/proto/wrappers.go | 34 + .../github.com/golang/protobuf/ptypes/any.go | 179 + .../golang/protobuf/ptypes/any/any.pb.go | 62 + .../github.com/golang/protobuf/ptypes/doc.go | 10 + .../golang/protobuf/ptypes/duration.go | 76 + .../protobuf/ptypes/duration/duration.pb.go | 63 + .../golang/protobuf/ptypes/timestamp.go | 112 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 + vendor/github.com/golangci/check/LICENSE | 674 + .../check/cmd/structcheck/structcheck.go | 193 + .../golangci/check/cmd/varcheck/varcheck.go | 163 + vendor/github.com/golangci/dupl/.travis.yml | 5 + vendor/github.com/golangci/dupl/LICENSE | 21 + vendor/github.com/golangci/dupl/README.md | 63 + .../github.com/golangci/dupl/job/buildtree.go | 22 + vendor/github.com/golangci/dupl/job/parse.go | 36 + vendor/github.com/golangci/dupl/main.go | 148 + .../github.com/golangci/dupl/printer/html.go | 120 + .../golangci/dupl/printer/plumbing.go | 50 + .../golangci/dupl/printer/printer.go | 11 + .../github.com/golangci/dupl/printer/text.go | 100 + .../golangci/dupl/suffixtree/dupl.go | 98 + .../golangci/dupl/suffixtree/suffixtree.go | 216 + .../golangci/dupl/syntax/golang/golang.go | 392 + .../github.com/golangci/dupl/syntax/syntax.go | 175 + vendor/github.com/golangci/go-misc/LICENSE | 27 + .../golangci/go-misc/deadcode/README.md | 18 + .../golangci/go-misc/deadcode/deadcode.go | 135 + .../context => golangci/gofmt/gofmt}/LICENSE | 8 +- vendor/github.com/golangci/gofmt/gofmt/doc.go | 104 + .../github.com/golangci/gofmt/gofmt/gofmt.go | 327 + .../golangci/gofmt/gofmt/golangci.go | 46 + .../golangci/gofmt/gofmt/internal.go | 176 + .../golangci/gofmt/gofmt/rewrite.go | 303 + .../golangci/gofmt/gofmt/simplify.go | 165 + .../golangci/gofmt/goimports/LICENSE | 27 + .../golangci/gofmt/goimports/goimports.go | 358 + .../golangci/gofmt/goimports/golangci.go | 33 + .../github.com/golangci/golangci-lint/LICENSE | 674 + .../golangci-lint/cmd/golangci-lint/main.go | 25 + .../cmd/golangci-lint/mod_version.go | 17 + .../golangci-lint/internal/cache/cache.go | 527 + .../golangci-lint/internal/cache/default.go | 87 + .../golangci-lint/internal/cache/hash.go | 186 + .../internal/errorutil/errors.go | 23 + .../internal/pkgcache/pkgcache.go | 229 + .../internal/renameio/renameio.go | 93 + .../internal/robustio/robustio.go | 53 + .../internal/robustio/robustio_darwin.go | 29 + .../internal/robustio/robustio_flaky.go | 93 + .../internal/robustio/robustio_other.go | 28 + .../internal/robustio/robustio_windows.go | 33 + .../golangci-lint/pkg/commands/cache.go | 84 + .../golangci-lint/pkg/commands/config.go | 66 + .../golangci-lint/pkg/commands/executor.go | 250 + .../golangci-lint/pkg/commands/help.go | 92 + .../golangci-lint/pkg/commands/linters.go | 51 + .../golangci-lint/pkg/commands/root.go | 165 + .../golangci-lint/pkg/commands/run.go | 566 + .../golangci-lint/pkg/commands/version.go | 60 + .../golangci-lint/pkg/config/config.go | 26 + .../golangci-lint/pkg/config/issues.go | 204 + .../golangci-lint/pkg/config/linters.go | 11 + .../pkg/config/linters_settings.go | 486 + .../pkg/config/linters_settings_gocritic.go | 365 + .../golangci-lint/pkg/config/output.go | 36 + .../golangci-lint/pkg/config/reader.go | 221 + .../golangci/golangci-lint/pkg/config/run.go | 37 + .../golangci-lint/pkg/config/severity.go | 18 + .../golangci-lint/pkg/exitcodes/exitcodes.go | 34 + .../golangci-lint/pkg/fsutils/filecache.go | 67 + .../golangci-lint/pkg/fsutils/fsutils.go | 100 + .../golangci-lint/pkg/fsutils/linecache.go | 70 + .../golangci-lint/pkg/golinters/asciicheck.go | 19 + .../golangci-lint/pkg/golinters/bodyclose.go | 21 + .../golangci-lint/pkg/golinters/cyclop.go | 39 + .../golangci-lint/pkg/golinters/deadcode.go | 52 + .../golangci-lint/pkg/golinters/depguard.go | 112 + .../golangci-lint/pkg/golinters/dogsled.go | 97 + .../golangci-lint/pkg/golinters/dupl.go | 83 + .../pkg/golinters/durationcheck.go | 15 + .../golangci-lint/pkg/golinters/errcheck.go | 251 + .../golangci-lint/pkg/golinters/errname.go | 21 + .../golangci-lint/pkg/golinters/errorlint.go | 31 + .../golangci-lint/pkg/golinters/exhaustive.go | 27 + .../pkg/golinters/exhaustivestruct.go | 31 + .../pkg/golinters/exportloopref.go | 19 + .../golangci-lint/pkg/golinters/forbidigo.go | 70 + .../pkg/golinters/forcetypeassert.go | 19 + .../golangci-lint/pkg/golinters/funlen.go | 64 + .../golangci-lint/pkg/golinters/gci.go | 98 + .../pkg/golinters/goanalysis/adapters.go | 36 + .../pkg/golinters/goanalysis/errors.go | 72 + .../pkg/golinters/goanalysis/issue.go | 31 + .../pkg/golinters/goanalysis/linter.go | 213 + .../pkg/golinters/goanalysis/load/guard.go | 30 + .../pkg/golinters/goanalysis/metalinter.go | 90 + .../pkg/golinters/goanalysis/runner.go | 342 + .../pkg/golinters/goanalysis/runner_action.go | 381 + .../pkg/golinters/goanalysis/runner_facts.go | 125 + .../goanalysis/runner_loadingpackage.go | 497 + .../pkg/golinters/goanalysis/runners.go | 269 + .../pkg/golinters/gochecknoglobals.go | 29 + .../pkg/golinters/gochecknoinits.go | 73 + .../golangci-lint/pkg/golinters/gocognit.go | 68 + .../golangci-lint/pkg/golinters/goconst.go | 92 + .../golangci-lint/pkg/golinters/gocritic.go | 167 + .../golangci-lint/pkg/golinters/gocyclo.go | 61 + .../golangci-lint/pkg/golinters/godot.go | 84 + .../golangci-lint/pkg/golinters/godox.go | 63 + .../golangci-lint/pkg/golinters/goerr113.go | 19 + .../golangci-lint/pkg/golinters/gofmt.go | 72 + .../pkg/golinters/gofmt_common.go | 286 + .../golangci-lint/pkg/golinters/gofumpt.go | 108 + .../golangci-lint/pkg/golinters/goheader.go | 85 + .../golangci-lint/pkg/golinters/goimports.go | 73 + .../golangci-lint/pkg/golinters/golint.go | 78 + .../golangci-lint/pkg/golinters/gomnd.go | 27 + .../pkg/golinters/gomoddirectives.go | 64 + .../golangci-lint/pkg/golinters/gomodguard.go | 99 + .../pkg/golinters/goprintffuncname.go | 17 + .../golangci-lint/pkg/golinters/gosec.go | 128 + .../golangci-lint/pkg/golinters/gosimple.go | 21 + .../golangci-lint/pkg/golinters/govet.go | 185 + .../golangci-lint/pkg/golinters/ifshort.go | 28 + .../golangci-lint/pkg/golinters/importas.go | 48 + .../pkg/golinters/ineffassign.go | 17 + .../golangci-lint/pkg/golinters/interfacer.go | 67 + .../golangci-lint/pkg/golinters/lll.go | 124 + .../golangci-lint/pkg/golinters/makezero.go | 60 + .../golangci-lint/pkg/golinters/maligned.go | 58 + .../golangci-lint/pkg/golinters/misspell.go | 132 + .../golangci-lint/pkg/golinters/nakedret.go | 123 + .../golangci-lint/pkg/golinters/nestif.go | 65 + .../golangci-lint/pkg/golinters/nilerr.go | 18 + .../golangci-lint/pkg/golinters/nlreturn.go | 19 + .../golangci-lint/pkg/golinters/noctx.go | 21 + .../golangci-lint/pkg/golinters/nolintlint.go | 93 + .../pkg/golinters/nolintlint/README.md | 31 + .../pkg/golinters/nolintlint/nolintlint.go | 305 + .../pkg/golinters/paralleltest.go | 21 + .../golangci-lint/pkg/golinters/prealloc.go | 57 + .../pkg/golinters/predeclared.go | 26 + .../golangci-lint/pkg/golinters/promlinter.go | 63 + .../golangci-lint/pkg/golinters/revive.go | 295 + .../pkg/golinters/rowerrcheck.go | 23 + .../golangci-lint/pkg/golinters/scopelint.go | 177 + .../pkg/golinters/sqlclosecheck.go | 21 + .../pkg/golinters/staticcheck.go | 21 + .../pkg/golinters/staticcheck_common.go | 168 + .../pkg/golinters/structcheck.go | 55 + .../golangci-lint/pkg/golinters/stylecheck.go | 30 + .../pkg/golinters/tagliatelle.go | 30 + .../pkg/golinters/testpackage.go | 23 + .../golangci-lint/pkg/golinters/thelper.go | 61 + .../golangci-lint/pkg/golinters/tparallel.go | 21 + .../golangci-lint/pkg/golinters/typecheck.go | 28 + .../golangci-lint/pkg/golinters/unconvert.go | 53 + .../golangci-lint/pkg/golinters/unparam.go | 76 + .../golangci-lint/pkg/golinters/unused.go | 69 + .../golangci-lint/pkg/golinters/util.go | 24 + .../golangci-lint/pkg/golinters/varcheck.go | 55 + .../pkg/golinters/wastedassign.go | 21 + .../golangci-lint/pkg/golinters/whitespace.go | 85 + .../golangci-lint/pkg/golinters/wrapcheck.go | 32 + .../golangci-lint/pkg/golinters/wsl.go | 83 + .../golangci/golangci-lint/pkg/goutil/env.go | 61 + .../golangci-lint/pkg/lint/linter/config.go | 127 + .../golangci-lint/pkg/lint/linter/context.go | 49 + .../golangci-lint/pkg/lint/linter/linter.go | 13 + .../pkg/lint/lintersdb/enabled_set.go | 207 + .../pkg/lint/lintersdb/manager.go | 618 + .../pkg/lint/lintersdb/validator.go | 107 + .../golangci/golangci-lint/pkg/lint/load.go | 315 + .../golangci/golangci-lint/pkg/lint/runner.go | 329 + .../golangci-lint/pkg/logutils/log.go | 31 + .../golangci-lint/pkg/logutils/logutils.go | 49 + .../golangci-lint/pkg/logutils/mock.go | 47 + .../golangci-lint/pkg/logutils/out.go | 9 + .../golangci-lint/pkg/logutils/stderr_log.go | 121 + .../golangci-lint/pkg/packages/errors.go | 39 + .../golangci-lint/pkg/packages/skip.go | 25 + .../golangci-lint/pkg/packages/util.go | 102 + .../golangci-lint/pkg/printers/checkstyle.go | 87 + .../golangci-lint/pkg/printers/codeclimate.go | 57 + .../golangci-lint/pkg/printers/github.go | 46 + .../golangci-lint/pkg/printers/html.go | 155 + .../golangci-lint/pkg/printers/json.go | 41 + .../golangci-lint/pkg/printers/junitxml.go | 79 + .../golangci-lint/pkg/printers/printer.go | 11 + .../golangci-lint/pkg/printers/tab.go | 58 + .../golangci-lint/pkg/printers/text.go | 91 + .../golangci/golangci-lint/pkg/report/data.go | 26 + .../golangci/golangci-lint/pkg/report/log.go | 64 + .../golangci-lint/pkg/result/issue.go | 98 + .../processors/autogenerated_exclude.go | 134 + .../pkg/result/processors/base_rule.go | 69 + .../pkg/result/processors/cgo.go | 58 + .../pkg/result/processors/diff.go | 74 + .../pkg/result/processors/exclude.go | 59 + .../pkg/result/processors/exclude_rules.go | 90 + .../result/processors/filename_unadjuster.go | 131 + .../pkg/result/processors/fixer.go | 248 + .../result/processors/identifier_marker.go | 125 + .../pkg/result/processors/max_from_linter.go | 54 + .../processors/max_per_file_from_linter.go | 59 + .../pkg/result/processors/max_same_issues.go | 81 + .../pkg/result/processors/nolint.go | 309 + .../pkg/result/processors/path_prefixer.go | 37 + .../pkg/result/processors/path_prettifier.go | 48 + .../pkg/result/processors/path_shortener.go | 40 + .../pkg/result/processors/processor.go | 11 + .../pkg/result/processors/severity_rules.go | 103 + .../pkg/result/processors/skip_dirs.go | 143 + .../pkg/result/processors/skip_files.go | 52 + .../pkg/result/processors/sort_results.go | 173 + .../pkg/result/processors/source_code.go | 47 + .../pkg/result/processors/uniq_by_line.go | 58 + .../pkg/result/processors/utils.go | 62 + .../golangci-lint/pkg/sliceutil/sliceutil.go | 17 + .../golangci-lint/pkg/timeutils/stopwatch.go | 116 + vendor/github.com/golangci/lint-1/.travis.yml | 19 + .../golangci/lint-1/CONTRIBUTING.md | 15 + vendor/github.com/golangci/lint-1/LICENSE | 27 + vendor/github.com/golangci/lint-1/README.md | 88 + vendor/github.com/golangci/lint-1/go.mod | 3 + vendor/github.com/golangci/lint-1/go.sum | 6 + vendor/github.com/golangci/lint-1/lint.go | 1655 + vendor/github.com/golangci/maligned/LICENSE | 27 + vendor/github.com/golangci/maligned/README | 7 + .../github.com/golangci/maligned/maligned.go | 253 + .../github.com/golangci/misspell/.gitignore | 34 + .../github.com/golangci/misspell/.travis.yml | 20 + .../github.com/golangci/misspell/Dockerfile | 37 + .../github.com/golangci/misspell/Gopkg.lock | 24 + .../github.com/golangci/misspell/Gopkg.toml | 34 + vendor/github.com/golangci/misspell/LICENSE | 22 + vendor/github.com/golangci/misspell/Makefile | 74 + vendor/github.com/golangci/misspell/README.md | 424 + .../golangci/misspell/RELEASE-HOWTO.md | 38 + vendor/github.com/golangci/misspell/ascii.go | 62 + vendor/github.com/golangci/misspell/case.go | 59 + .../golangci/misspell/goreleaser.yml | 38 + .../golangci/misspell/install-misspell.sh | 362 + vendor/github.com/golangci/misspell/legal.go | 48 + vendor/github.com/golangci/misspell/mime.go | 210 + .../github.com/golangci/misspell/notwords.go | 85 + .../github.com/golangci/misspell/replace.go | 246 + .../golangci/misspell/stringreplacer.go | 336 + .../golangci/misspell/stringreplacer_test.gox | 421 + vendor/github.com/golangci/misspell/url.go | 17 + vendor/github.com/golangci/misspell/words.go | 31194 ++++++++++++++++ vendor/github.com/golangci/revgrep/.gitignore | 1 + .../github.com/golangci/revgrep/.travis.yml | 7 + vendor/github.com/golangci/revgrep/LICENSE | 201 + vendor/github.com/golangci/revgrep/README.md | 58 + vendor/github.com/golangci/revgrep/go.mod | 3 + vendor/github.com/golangci/revgrep/go.sum | 0 vendor/github.com/golangci/revgrep/revgrep.go | 410 + vendor/github.com/golangci/unconvert/LICENSE | 27 + vendor/github.com/golangci/unconvert/README | 36 + .../golangci/unconvert/unconvert.go | 665 + vendor/github.com/google/go-cmp/LICENSE | 27 + .../github.com/google/go-cmp/cmp/compare.go | 682 + .../google/go-cmp/cmp/export_panic.go | 15 + .../google/go-cmp/cmp/export_unsafe.go | 35 + .../go-cmp/cmp/internal/diff/debug_disable.go | 17 + .../go-cmp/cmp/internal/diff/debug_enable.go | 122 + .../google/go-cmp/cmp/internal/diff/diff.go | 398 + .../google/go-cmp/cmp/internal/flags/flags.go | 9 + .../cmp/internal/flags/toolchain_legacy.go | 10 + .../cmp/internal/flags/toolchain_recent.go | 10 + .../go-cmp/cmp/internal/function/func.go | 99 + .../google/go-cmp/cmp/internal/value/name.go | 157 + .../cmp/internal/value/pointer_purego.go | 33 + .../cmp/internal/value/pointer_unsafe.go | 36 + .../google/go-cmp/cmp/internal/value/sort.go | 106 + .../google/go-cmp/cmp/internal/value/zero.go | 48 + .../github.com/google/go-cmp/cmp/options.go | 552 + vendor/github.com/google/go-cmp/cmp/path.go | 378 + vendor/github.com/google/go-cmp/cmp/report.go | 54 + .../google/go-cmp/cmp/report_compare.go | 432 + .../google/go-cmp/cmp/report_references.go | 264 + .../google/go-cmp/cmp/report_reflect.go | 402 + .../google/go-cmp/cmp/report_slices.go | 465 + .../google/go-cmp/cmp/report_text.go | 431 + .../google/go-cmp/cmp/report_value.go | 121 + vendor/github.com/google/uuid/README.md | 2 +- vendor/github.com/google/uuid/hash.go | 4 +- vendor/github.com/google/uuid/marshal.go | 7 +- vendor/github.com/google/uuid/sql.go | 2 +- vendor/github.com/google/uuid/uuid.go | 10 +- vendor/github.com/google/uuid/version1.go | 12 +- vendor/github.com/google/uuid/version4.go | 15 +- .../gordonklaus/ineffassign/LICENSE | 21 + .../pkg/ineffassign/ineffassign.go | 591 + vendor/github.com/gorilla/context/.travis.yml | 19 - vendor/github.com/gorilla/context/README.md | 10 - vendor/github.com/gorilla/context/context.go | 143 - vendor/github.com/gorilla/context/doc.go | 88 - vendor/github.com/gorilla/mux/AUTHORS | 8 + .../github.com/gorilla/mux/ISSUE_TEMPLATE.md | 11 - vendor/github.com/gorilla/mux/LICENSE | 2 +- vendor/github.com/gorilla/mux/README.md | 176 +- .../github.com/gorilla/mux/context_gorilla.go | 26 - .../github.com/gorilla/mux/context_native.go | 24 - vendor/github.com/gorilla/mux/doc.go | 2 +- vendor/github.com/gorilla/mux/go.mod | 3 + vendor/github.com/gorilla/mux/middleware.go | 56 +- vendor/github.com/gorilla/mux/mux.go | 150 +- vendor/github.com/gorilla/mux/regexp.go | 116 +- vendor/github.com/gorilla/mux/route.go | 179 +- vendor/github.com/gorilla/mux/test_helpers.go | 2 +- .../gostaticanalysis/analysisutil/LICENSE | 21 + .../gostaticanalysis/analysisutil/README.md | 5 + .../gostaticanalysis/analysisutil/call.go | 405 + .../analysisutil/diagnostic.go | 45 + .../gostaticanalysis/analysisutil/file.go | 18 + .../gostaticanalysis/analysisutil/go.mod | 8 + .../gostaticanalysis/analysisutil/go.sum | 37 + .../gostaticanalysis/analysisutil/pkg.go | 49 + .../gostaticanalysis/analysisutil/ssa.go | 146 + .../analysisutil/ssainspect.go | 37 + .../gostaticanalysis/analysisutil/types.go | 208 + .../gostaticanalysis/comment/LICENSE | 21 + .../gostaticanalysis/comment/README.md | 10 + .../gostaticanalysis/comment/comment.go | 147 + .../gostaticanalysis/comment/go.mod | 8 + .../gostaticanalysis/comment/go.sum | 24 + .../comment/passes/commentmap/commentmap.go | 20 + .../forcetypeassert/.reviewdog.yml | 8 + .../gostaticanalysis/forcetypeassert/LICENSE | 21 + .../forcetypeassert/README.md | 17 + .../forcetypeassert/forcetypeassert.go | 67 + .../gostaticanalysis/forcetypeassert/go.mod | 5 + .../gostaticanalysis/forcetypeassert/go.sum | 6 + .../gostaticanalysis/nilerr/LICENSE | 21 + .../gostaticanalysis/nilerr/README.md | 41 + .../github.com/gostaticanalysis/nilerr/go.mod | 8 + .../github.com/gostaticanalysis/nilerr/go.sum | 34 + .../gostaticanalysis/nilerr/nilerr.go | 291 + .../go-grpc-middleware/.gitignore | 204 + .../go-grpc-middleware/.travis.yml | 16 + .../go-grpc-middleware/CHANGELOG.md | 51 + .../go-grpc-middleware/CONTRIBUTING.md | 20 + .../grpc-ecosystem/go-grpc-middleware/LICENSE | 201 + .../go-grpc-middleware/README.md | 86 + .../go-grpc-middleware/auth/auth.go | 68 + .../go-grpc-middleware/auth/doc.go | 20 + .../go-grpc-middleware/auth/metadata.go | 37 + .../go-grpc-middleware/chain.go | 120 + .../grpc-ecosystem/go-grpc-middleware/doc.go | 69 + .../grpc-ecosystem/go-grpc-middleware/go.mod | 22 + .../grpc-ecosystem/go-grpc-middleware/go.sum | 122 + .../go-grpc-middleware/makefile | 17 + .../go-grpc-middleware/retry/backoff.go | 44 + .../go-grpc-middleware/retry/doc.go | 25 + .../go-grpc-middleware/retry/options.go | 142 + .../go-grpc-middleware/retry/retry.go | 329 + .../go-grpc-middleware/slack.png | Bin 0 -> 5088 bytes .../go-grpc-middleware/tags/context.go | 78 + .../go-grpc-middleware/tags/doc.go | 22 + .../go-grpc-middleware/tags/fieldextractor.go | 85 + .../go-grpc-middleware/tags/interceptors.go | 85 + .../go-grpc-middleware/tags/options.go | 44 + .../opentracing/client_interceptors.go | 143 + .../tracing/opentracing/doc.go | 22 + .../tracing/opentracing/id_extract.go | 82 + .../tracing/opentracing/metadata.go | 50 + .../tracing/opentracing/options.go | 89 + .../opentracing/server_interceptors.go | 98 + .../util/backoffutils/backoff.go | 28 + .../go-grpc-middleware/util/metautils/doc.go | 19 + .../util/metautils/nicemd.go | 126 + .../go-grpc-middleware/wrappers.go | 30 + .../go-grpc-prometheus/.gitignore | 201 + .../go-grpc-prometheus/.travis.yml | 25 + .../go-grpc-prometheus/CHANGELOG.md | 24 + .../grpc-ecosystem/go-grpc-prometheus/LICENSE | 201 + .../go-grpc-prometheus/README.md | 247 + .../go-grpc-prometheus/client.go | 39 + .../go-grpc-prometheus/client_metrics.go | 170 + .../go-grpc-prometheus/client_reporter.go | 46 + .../go-grpc-prometheus/makefile | 16 + .../go-grpc-prometheus/metric_options.go | 41 + .../go-grpc-prometheus/server.go | 48 + .../go-grpc-prometheus/server_metrics.go | 185 + .../go-grpc-prometheus/server_reporter.go | 46 + .../grpc-ecosystem/go-grpc-prometheus/util.go | 50 + vendor/github.com/hashicorp/errwrap/LICENSE | 354 + vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 169 + vendor/github.com/hashicorp/errwrap/go.mod | 1 + .../hashicorp/go-multierror/LICENSE | 353 + .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 150 + .../hashicorp/go-multierror/append.go | 43 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../github.com/hashicorp/go-multierror/go.mod | 5 + .../github.com/hashicorp/go-multierror/go.sum | 2 + .../hashicorp/go-multierror/group.go | 38 + .../hashicorp/go-multierror/multierror.go | 121 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/sort.go | 16 + vendor/github.com/hashicorp/hcl/.gitignore | 9 + vendor/github.com/hashicorp/hcl/.travis.yml | 13 + vendor/github.com/hashicorp/hcl/LICENSE | 354 + vendor/github.com/hashicorp/hcl/Makefile | 18 + vendor/github.com/hashicorp/hcl/README.md | 125 + vendor/github.com/hashicorp/hcl/appveyor.yml | 19 + vendor/github.com/hashicorp/hcl/decoder.go | 729 + vendor/github.com/hashicorp/hcl/go.mod | 3 + vendor/github.com/hashicorp/hcl/go.sum | 2 + vendor/github.com/hashicorp/hcl/hcl.go | 11 + .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 + .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 + .../hashicorp/hcl/hcl/parser/error.go | 17 + .../hashicorp/hcl/hcl/parser/parser.go | 532 + .../hashicorp/hcl/hcl/printer/nodes.go | 789 + .../hashicorp/hcl/hcl/printer/printer.go | 66 + .../hashicorp/hcl/hcl/scanner/scanner.go | 652 + .../hashicorp/hcl/hcl/strconv/quote.go | 241 + .../hashicorp/hcl/hcl/token/position.go | 46 + .../hashicorp/hcl/hcl/token/token.go | 219 + .../hashicorp/hcl/json/parser/flatten.go | 117 + .../hashicorp/hcl/json/parser/parser.go | 313 + .../hashicorp/hcl/json/scanner/scanner.go | 451 + .../hashicorp/hcl/json/token/position.go | 46 + .../hashicorp/hcl/json/token/token.go | 118 + vendor/github.com/hashicorp/hcl/lex.go | 38 + vendor/github.com/hashicorp/hcl/parse.go | 39 + vendor/github.com/jgautheron/goconst/LICENSE | 21 + .../github.com/jgautheron/goconst/README.md | 50 + vendor/github.com/jgautheron/goconst/api.go | 74 + vendor/github.com/jgautheron/goconst/go.mod | 3 + .../github.com/jgautheron/goconst/parser.go | 176 + .../github.com/jgautheron/goconst/visitor.go | 160 + .../github.com/jingyugao/rowserrcheck/LICENSE | 21 + .../rowserrcheck/passes/rowserr/rowserr.go | 331 + .../jirfag/go-printf-func-name/LICENSE | 21 + .../pkg/analyzer/analyzer.go | 74 + vendor/github.com/json-iterator/go/go.sum | 1 + .../github.com/json-iterator/go/iter_float.go | 3 + .../github.com/json-iterator/go/iter_int.go | 3 +- vendor/github.com/json-iterator/go/reflect.go | 2 +- .../go/reflect_json_raw_message.go | 24 +- .../go/reflect_struct_decoder.go | 5 + vendor/github.com/julz/importas/.gitignore | 2 + vendor/github.com/julz/importas/LICENSE | 201 + vendor/github.com/julz/importas/README.md | 47 + vendor/github.com/julz/importas/analyzer.go | 116 + vendor/github.com/julz/importas/config.go | 69 + vendor/github.com/julz/importas/flags.go | 31 + vendor/github.com/julz/importas/go.mod | 5 + vendor/github.com/julz/importas/go.sum | 26 + vendor/github.com/kisielk/errcheck/LICENSE | 22 + .../errcheck/errcheck/embedded_walker.go | 144 + .../kisielk/errcheck/errcheck/errcheck.go | 676 + .../kisielk/errcheck/errcheck/tags.go | 12 + .../kisielk/errcheck/errcheck/tags_compat.go | 13 + .../mux => kisielk/gotool}/.travis.yml | 28 +- vendor/github.com/kisielk/gotool/LEGAL | 32 + vendor/github.com/kisielk/gotool/LICENSE | 20 + vendor/github.com/kisielk/gotool/README.md | 6 + vendor/github.com/kisielk/gotool/go.mod | 1 + vendor/github.com/kisielk/gotool/go13.go | 15 + vendor/github.com/kisielk/gotool/go14-15.go | 15 + vendor/github.com/kisielk/gotool/go16-18.go | 15 + .../kisielk/gotool/internal/load/path.go | 27 + .../kisielk/gotool/internal/load/pkg.go | 25 + .../kisielk/gotool/internal/load/search.go | 354 + vendor/github.com/kisielk/gotool/match.go | 56 + vendor/github.com/kisielk/gotool/match18.go | 317 + vendor/github.com/kisielk/gotool/tool.go | 48 + vendor/github.com/kulti/thelper/LICENSE | 21 + .../kulti/thelper/pkg/analyzer/analyzer.go | 416 + .../kulti/thelper/pkg/analyzer/report.go | 56 + .../kunwardeep/paralleltest/LICENSE | 21 + .../pkg/paralleltest/paralleltest.go | 256 + .../kyoh86/exportloopref/.golangci.yml | 4 + .../kyoh86/exportloopref/.goreleaser.yml | 43 + .../github.com/kyoh86/exportloopref/LICENSE | 21 + .../github.com/kyoh86/exportloopref/Makefile | 16 + .../github.com/kyoh86/exportloopref/README.md | 221 + .../kyoh86/exportloopref/exportloopref.go | 305 + vendor/github.com/kyoh86/exportloopref/go.mod | 5 + vendor/github.com/kyoh86/exportloopref/go.sum | 20 + .../ldez/gomoddirectives/.gitignore | 2 + .../ldez/gomoddirectives/.golangci.yml | 87 + .../github.com/ldez/gomoddirectives/LICENSE | 190 + .../github.com/ldez/gomoddirectives/Makefile | 15 + vendor/github.com/ldez/gomoddirectives/go.mod | 8 + vendor/github.com/ldez/gomoddirectives/go.sum | 25 + .../ldez/gomoddirectives/gomoddirectives.go | 125 + .../github.com/ldez/gomoddirectives/module.go | 47 + .../github.com/ldez/gomoddirectives/readme.md | 16 + vendor/github.com/ldez/tagliatelle/.gitignore | 3 + .../github.com/ldez/tagliatelle/.golangci.yml | 77 + vendor/github.com/ldez/tagliatelle/LICENSE | 190 + vendor/github.com/ldez/tagliatelle/Makefile | 15 + vendor/github.com/ldez/tagliatelle/go.mod | 8 + vendor/github.com/ldez/tagliatelle/go.sum | 38 + vendor/github.com/ldez/tagliatelle/readme.md | 31 + .../ldez/tagliatelle/tagliatelle.go | 192 + .../magiconair/properties/.gitignore | 6 + .../magiconair/properties/.travis.yml | 17 + .../magiconair/properties/CHANGELOG.md | 160 + .../magiconair/properties/LICENSE.md | 24 + .../magiconair/properties/README.md | 128 + .../magiconair/properties/decode.go | 289 + .../github.com/magiconair/properties/doc.go | 156 + .../github.com/magiconair/properties/go.mod | 3 + .../magiconair/properties/integrate.go | 34 + .../github.com/magiconair/properties/lex.go | 407 + .../github.com/magiconair/properties/load.go | 293 + .../magiconair/properties/parser.go | 95 + .../magiconair/properties/properties.go | 854 + .../magiconair/properties/rangecheck.go | 31 + .../github.com/maratori/testpackage/LICENSE | 21 + .../pkg/testpackage/testpackage.go | 53 + vendor/github.com/matoous/godox/.gitignore | 19 + vendor/github.com/matoous/godox/.golangci.yml | 71 + vendor/github.com/matoous/godox/.revive.toml | 135 + vendor/github.com/matoous/godox/LICENSE | 21 + vendor/github.com/matoous/godox/README.md | 23 + vendor/github.com/matoous/godox/go.mod | 5 + vendor/github.com/matoous/godox/go.sum | 8 + vendor/github.com/matoous/godox/godox.go | 84 + .../github.com/mattn/go-colorable/.travis.yml | 15 + vendor/github.com/mattn/go-colorable/LICENSE | 21 + .../github.com/mattn/go-colorable/README.md | 48 + .../mattn/go-colorable/colorable_appengine.go | 37 + .../mattn/go-colorable/colorable_others.go | 38 + .../mattn/go-colorable/colorable_windows.go | 1043 + vendor/github.com/mattn/go-colorable/go.mod | 8 + vendor/github.com/mattn/go-colorable/go.sum | 5 + .../github.com/mattn/go-colorable/go.test.sh | 12 + .../mattn/go-colorable/noncolorable.go | 55 + vendor/github.com/mattn/go-isatty/.travis.yml | 15 +- vendor/github.com/mattn/go-isatty/README.md | 2 +- vendor/github.com/mattn/go-isatty/go.mod | 2 +- vendor/github.com/mattn/go-isatty/go.sum | 4 +- vendor/github.com/mattn/go-isatty/go.test.sh | 12 + .../mattn/go-isatty/isatty_android.go | 23 - .../github.com/mattn/go-isatty/isatty_bsd.go | 12 +- .../mattn/go-isatty/isatty_tcgets.go | 1 - .../github.com/mattn/go-isatty/renovate.json | 8 + .../github.com/mattn/go-runewidth/.travis.yml | 16 + vendor/github.com/mattn/go-runewidth/LICENSE | 21 + .../github.com/mattn/go-runewidth/README.md | 27 + vendor/github.com/mattn/go-runewidth/go.mod | 3 + .../github.com/mattn/go-runewidth/go.test.sh | 12 + .../mattn/go-runewidth/runewidth.go | 257 + .../mattn/go-runewidth/runewidth_appengine.go | 8 + .../mattn/go-runewidth/runewidth_js.go | 9 + .../mattn/go-runewidth/runewidth_posix.go | 82 + .../mattn/go-runewidth/runewidth_table.go | 437 + .../mattn/go-runewidth/runewidth_windows.go | 28 + .../mbilski/exhaustivestruct/LICENSE | 21 + .../exhaustivestruct/pkg/analyzer/analyzer.go | 187 + vendor/github.com/mgechev/dots/.travis.yml | 2 + vendor/github.com/mgechev/dots/LICENSE | 21 + vendor/github.com/mgechev/dots/README.md | 100 + vendor/github.com/mgechev/dots/resolve.go | 456 + vendor/github.com/mgechev/revive/LICENSE | 21 + .../mgechev/revive/config/config.go | 226 + .../mgechev/revive/formatter/checkstyle.go | 76 + .../mgechev/revive/formatter/default.go | 26 + .../mgechev/revive/formatter/friendly.go | 149 + .../mgechev/revive/formatter/json.go | 40 + .../mgechev/revive/formatter/ndjson.go | 34 + .../mgechev/revive/formatter/plain.go | 26 + .../mgechev/revive/formatter/sarif.go | 107 + .../mgechev/revive/formatter/severity.go | 13 + .../mgechev/revive/formatter/stylish.go | 89 + .../mgechev/revive/formatter/unix.go | 27 + .../github.com/mgechev/revive/lint/config.go | 35 + .../github.com/mgechev/revive/lint/failure.go | 39 + vendor/github.com/mgechev/revive/lint/file.go | 278 + .../mgechev/revive/lint/formatter.go | 14 + .../github.com/mgechev/revive/lint/linter.go | 99 + .../github.com/mgechev/revive/lint/package.go | 178 + vendor/github.com/mgechev/revive/lint/rule.go | 31 + .../github.com/mgechev/revive/lint/utils.go | 128 + .../mgechev/revive/rule/add-constant.go | 152 + .../mgechev/revive/rule/argument-limit.go | 67 + .../github.com/mgechev/revive/rule/atomic.go | 94 + .../mgechev/revive/rule/bare-return.go | 84 + .../mgechev/revive/rule/blank-imports.go | 75 + .../revive/rule/bool-literal-in-expr.go | 73 + .../mgechev/revive/rule/call-to-gc.go | 70 + .../revive/rule/cognitive-complexity.go | 195 + .../mgechev/revive/rule/confusing-naming.go | 190 + .../mgechev/revive/rule/confusing-results.go | 67 + .../revive/rule/constant-logical-expr.go | 88 + .../revive/rule/context-as-argument.go | 63 + .../mgechev/revive/rule/context-keys-type.go | 81 + .../mgechev/revive/rule/cyclomatic.go | 118 + .../mgechev/revive/rule/deep-exit.go | 94 + .../github.com/mgechev/revive/rule/defer.go | 137 + .../mgechev/revive/rule/dot-imports.go | 54 + .../mgechev/revive/rule/duplicated-imports.go | 39 + .../mgechev/revive/rule/early-return.go | 78 + .../mgechev/revive/rule/empty-block.go | 65 + .../mgechev/revive/rule/empty-lines.go | 113 + .../mgechev/revive/rule/error-naming.go | 79 + .../mgechev/revive/rule/error-return.go | 67 + .../mgechev/revive/rule/error-strings.go | 98 + .../github.com/mgechev/revive/rule/errorf.go | 93 + .../mgechev/revive/rule/exported.go | 272 + .../mgechev/revive/rule/file-header.go | 69 + .../mgechev/revive/rule/flag-param.go | 104 + .../mgechev/revive/rule/function-length.go | 153 + .../revive/rule/function-result-limit.go | 68 + .../mgechev/revive/rule/get-return.go | 70 + .../mgechev/revive/rule/identical-branches.go | 82 + .../mgechev/revive/rule/if-return.go | 115 + .../mgechev/revive/rule/import-shadowing.go | 108 + .../mgechev/revive/rule/imports-blacklist.go | 52 + .../revive/rule/increment-decrement.go | 74 + .../mgechev/revive/rule/indent-error-flow.go | 78 + .../mgechev/revive/rule/line-length-limit.go | 84 + .../mgechev/revive/rule/max-public-structs.go | 70 + .../mgechev/revive/rule/modifies-param.go | 80 + .../revive/rule/modifies-value-receiver.go | 134 + .../mgechev/revive/rule/nested-structs.go | 61 + .../mgechev/revive/rule/package-comments.go | 121 + .../mgechev/revive/rule/range-val-address.go | 126 + .../revive/rule/range-val-in-closure.go | 111 + .../github.com/mgechev/revive/rule/range.go | 82 + .../mgechev/revive/rule/receiver-naming.go | 81 + .../revive/rule/redefines-builtin-id.go | 145 + .../mgechev/revive/rule/string-format.go | 282 + .../mgechev/revive/rule/string-of-int.go | 95 + .../mgechev/revive/rule/struct-tag.go | 236 + .../mgechev/revive/rule/superfluous-else.go | 114 + .../mgechev/revive/rule/time-naming.go | 93 + .../revive/rule/unconditional-recursion.go | 183 + .../mgechev/revive/rule/unexported-naming.go | 115 + .../mgechev/revive/rule/unexported-return.go | 106 + .../mgechev/revive/rule/unhandled-error.go | 120 + .../mgechev/revive/rule/unnecessary-stmt.go | 107 + .../mgechev/revive/rule/unreachable-code.go | 114 + .../mgechev/revive/rule/unused-param.go | 102 + .../mgechev/revive/rule/unused-receiver.go | 77 + .../mgechev/revive/rule/useless-break.go | 77 + .../github.com/mgechev/revive/rule/utils.go | 192 + .../mgechev/revive/rule/var-declarations.go | 120 + .../mgechev/revive/rule/var-naming.go | 230 + .../mgechev/revive/rule/waitgroup-by-value.go | 66 + .../mitchellh/mapstructure/CHANGELOG.md | 73 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 256 + .../mitchellh/mapstructure/error.go | 50 + .../github.com/mitchellh/mapstructure/go.mod | 3 + .../mitchellh/mapstructure/mapstructure.go | 1462 + .../github.com/moricho/tparallel/.gitignore | 3 + .../moricho/tparallel/.goreleaser.yml | 38 + vendor/github.com/moricho/tparallel/LICENSE | 21 + vendor/github.com/moricho/tparallel/Makefile | 13 + vendor/github.com/moricho/tparallel/README.md | 100 + vendor/github.com/moricho/tparallel/go.mod | 8 + vendor/github.com/moricho/tparallel/go.sum | 34 + .../moricho/tparallel/pkg/ssafunc/ssafunc.go | 34 + .../tparallel/pkg/ssainstr/ssainstr.go | 63 + .../github.com/moricho/tparallel/testmap.go | 63 + .../github.com/moricho/tparallel/tparallel.go | 72 + vendor/github.com/nakabonne/nestif/.gitignore | 16 + vendor/github.com/nakabonne/nestif/LICENSE | 25 + vendor/github.com/nakabonne/nestif/README.md | 122 + vendor/github.com/nakabonne/nestif/go.mod | 8 + vendor/github.com/nakabonne/nestif/go.sum | 12 + vendor/github.com/nakabonne/nestif/nestif.go | 148 + .../github.com/nbutton23/zxcvbn-go/.gitignore | 2 + .../nbutton23/zxcvbn-go/LICENSE.txt | 20 + .../github.com/nbutton23/zxcvbn-go/Makefile | 15 + .../github.com/nbutton23/zxcvbn-go/README.md | 78 + .../zxcvbn-go/adjacency/adjcmartix.go | 108 + .../nbutton23/zxcvbn-go/data/bindata.go | 444 + .../zxcvbn-go/entropy/entropyCalculator.go | 216 + .../zxcvbn-go/frequency/frequency.go | 50 + vendor/github.com/nbutton23/zxcvbn-go/go.mod | 9 + vendor/github.com/nbutton23/zxcvbn-go/go.sum | 5 + .../nbutton23/zxcvbn-go/match/match.go | 44 + .../zxcvbn-go/matching/dateMatchers.go | 209 + .../zxcvbn-go/matching/dictionaryMatch.go | 57 + .../nbutton23/zxcvbn-go/matching/leet.go | 234 + .../nbutton23/zxcvbn-go/matching/matching.go | 82 + .../zxcvbn-go/matching/repeatMatch.go | 67 + .../zxcvbn-go/matching/sequenceMatch.go | 76 + .../zxcvbn-go/matching/spatialMatch.go | 88 + .../nbutton23/zxcvbn-go/scoring/scoring.go | 177 + .../zxcvbn-go/utils/math/mathutils.go | 40 + .../github.com/nbutton23/zxcvbn-go/zxcvbn.go | 22 + .../nishanths/exhaustive/.gitignore | 7 + .../nishanths/exhaustive/.travis.yml | 12 + .../github.com/nishanths/exhaustive/LICENSE | 25 + .../github.com/nishanths/exhaustive/README.md | 70 + .../github.com/nishanths/exhaustive/enum.go | 146 + .../nishanths/exhaustive/exhaustive.go | 207 + .../nishanths/exhaustive/generated.go | 34 + vendor/github.com/nishanths/exhaustive/go.mod | 8 + vendor/github.com/nishanths/exhaustive/go.sum | 28 + .../nishanths/exhaustive/regexp_flag.go | 35 + .../github.com/nishanths/exhaustive/switch.go | 444 + .../github.com/nishanths/predeclared/LICENSE | 29 + .../predeclared/passes/predeclared/go18.go | 9 + .../passes/predeclared/pre_go18.go | 53 + .../passes/predeclared/predeclared.go | 202 + .../olekukonko/tablewriter/.gitignore | 15 + .../olekukonko/tablewriter/.travis.yml | 22 + .../olekukonko/tablewriter/LICENSE.md | 19 + .../olekukonko/tablewriter/README.md | 431 + .../github.com/olekukonko/tablewriter/csv.go | 52 + .../github.com/olekukonko/tablewriter/go.mod | 5 + .../github.com/olekukonko/tablewriter/go.sum | 2 + .../olekukonko/tablewriter/table.go | 967 + .../tablewriter/table_with_color.go | 136 + .../github.com/olekukonko/tablewriter/util.go | 93 + .../github.com/olekukonko/tablewriter/wrap.go | 99 + .../opentracing/opentracing-go/.gitignore | 14 +- .../opentracing/opentracing-go/.travis.yml | 22 +- .../opentracing/opentracing-go/CHANGELOG.md | 38 +- .../opentracing/opentracing-go/LICENSE | 222 +- .../opentracing/opentracing-go/Makefile | 20 +- .../opentracing/opentracing-go/README.md | 32 +- .../opentracing/opentracing-go/ext/tags.go | 16 +- .../opentracing-go/globaltracer.go | 18 +- .../opentracing/opentracing-go/gocontext.go | 19 +- .../opentracing/opentracing-go/log/field.go | 24 + .../opentracing/opentracing-go/propagation.go | 14 +- .../opentracing/opentracing-go/span.go | 4 + .../opentracing/opentracing-go/tracer.go | 5 +- .../pelletier/go-toml/.dockerignore | 2 + .../github.com/pelletier/go-toml/.gitignore | 5 + .../pelletier/go-toml/CONTRIBUTING.md | 132 + .../github.com/pelletier/go-toml/Dockerfile | 11 + vendor/github.com/pelletier/go-toml/LICENSE | 247 + vendor/github.com/pelletier/go-toml/Makefile | 29 + .../go-toml/PULL_REQUEST_TEMPLATE.md | 5 + vendor/github.com/pelletier/go-toml/README.md | 176 + .../pelletier/go-toml/azure-pipelines.yml | 188 + .../github.com/pelletier/go-toml/benchmark.sh | 35 + vendor/github.com/pelletier/go-toml/doc.go | 23 + .../pelletier/go-toml/example-crlf.toml | 30 + .../github.com/pelletier/go-toml/example.toml | 30 + vendor/github.com/pelletier/go-toml/fuzz.go | 31 + vendor/github.com/pelletier/go-toml/fuzz.sh | 15 + vendor/github.com/pelletier/go-toml/go.mod | 3 + .../pelletier/go-toml/keysparsing.go | 112 + vendor/github.com/pelletier/go-toml/lexer.go | 1031 + .../github.com/pelletier/go-toml/localtime.go | 287 + .../github.com/pelletier/go-toml/marshal.go | 1308 + .../go-toml/marshal_OrderPreserve_test.toml | 39 + .../pelletier/go-toml/marshal_test.toml | 39 + vendor/github.com/pelletier/go-toml/parser.go | 508 + .../github.com/pelletier/go-toml/position.go | 29 + vendor/github.com/pelletier/go-toml/token.go | 136 + vendor/github.com/pelletier/go-toml/toml.go | 533 + .../github.com/pelletier/go-toml/tomlpub.go | 71 + .../pelletier/go-toml/tomltree_create.go | 155 + .../pelletier/go-toml/tomltree_write.go | 552 + .../pelletier/go-toml/tomltree_writepub.go | 6 + .../phayes/checkstyle/.scrutinizer.yml | 15 + vendor/github.com/phayes/checkstyle/LICENSE | 29 + vendor/github.com/phayes/checkstyle/README.md | 44 + .../phayes/checkstyle/checkstyle.go | 112 + vendor/github.com/phayes/checkstyle/godoc.go | 36 + .../github.com/polyfloyd/go-errorlint/LICENSE | 21 + .../go-errorlint/errorlint/allowed.go | 137 + .../go-errorlint/errorlint/analysis.go | 52 + .../polyfloyd/go-errorlint/errorlint/lint.go | 249 + .../prometheus/client_golang/AUTHORS.md | 18 - .../client_golang/prometheus/build_info.go | 29 + .../prometheus/build_info_pre_1.12.go | 22 + .../client_golang/prometheus/collector.go | 73 +- .../client_golang/prometheus/counter.go | 235 +- .../client_golang/prometheus/desc.go | 73 +- .../client_golang/prometheus/doc.go | 116 +- .../client_golang/prometheus/fnv.go | 13 + .../client_golang/prometheus/gauge.go | 213 +- .../client_golang/prometheus/go_collector.go | 189 +- .../client_golang/prometheus/histogram.go | 351 +- .../client_golang/prometheus/http.go | 490 - .../prometheus/internal/metric.go | 85 + .../client_golang/prometheus/labels.go | 87 + .../client_golang/prometheus/metric.go | 94 +- .../client_golang/prometheus/observer.go | 64 + .../prometheus/process_collector.go | 203 +- .../prometheus/process_collector_other.go | 65 + .../prometheus/process_collector_windows.go | 116 + .../prometheus/promhttp/delegator.go | 370 + .../client_golang/prometheus/promhttp/http.go | 356 +- .../prometheus/promhttp/instrument_client.go | 219 + .../prometheus/promhttp/instrument_server.go | 447 + .../client_golang/prometheus/registry.go | 760 +- .../client_golang/prometheus/summary.go | 323 +- .../prometheus/testutil/promlint/promlint.go | 386 + .../client_golang/prometheus/timer.go | 54 + .../client_golang/prometheus/untyped.go | 102 +- .../client_golang/prometheus/value.go | 145 +- .../client_golang/prometheus/vec.go | 502 +- .../client_golang/prometheus/wrap.go | 212 + .../prometheus/client_model/go/metrics.pb.go | 268 +- .../prometheus/common/expfmt/encode.go | 124 +- .../prometheus/common/expfmt/expfmt.go | 11 +- .../common/expfmt/openmetrics_create.go | 527 + .../prometheus/common/expfmt/text_create.go | 354 +- .../prometheus/common/expfmt/text_parse.go | 13 +- .../bitbucket.org/ww/goautoneg/autoneg.go | 6 +- .../prometheus/common/model/metric.go | 1 - .../prometheus/common/model/time.go | 14 +- .../prometheus/procfs/.golangci.yml | 4 + .../github.com/prometheus/procfs/.travis.yml | 12 - .../prometheus/procfs/CODE_OF_CONDUCT.md | 3 + .../prometheus/procfs/CONTRIBUTING.md | 109 +- .../prometheus/procfs/MAINTAINERS.md | 3 +- vendor/github.com/prometheus/procfs/Makefile | 66 +- .../prometheus/procfs/Makefile.common | 302 + vendor/github.com/prometheus/procfs/README.md | 54 +- .../github.com/prometheus/procfs/SECURITY.md | 6 + vendor/github.com/prometheus/procfs/arp.go | 85 + .../github.com/prometheus/procfs/buddyinfo.go | 18 +- .../github.com/prometheus/procfs/cpuinfo.go | 481 + .../prometheus/procfs/cpuinfo_armx.go | 19 + .../prometheus/procfs/cpuinfo_mipsx.go | 19 + .../prometheus/procfs/cpuinfo_others.go | 19 + .../prometheus/procfs/cpuinfo_ppcx.go | 19 + .../prometheus/procfs/cpuinfo_riscvx.go | 19 + .../prometheus/procfs/cpuinfo_s390x.go | 18 + .../prometheus/procfs/cpuinfo_x86.go | 19 + vendor/github.com/prometheus/procfs/crypto.go | 153 + .../prometheus/procfs/fixtures.ttar | 6333 +++- vendor/github.com/prometheus/procfs/fs.go | 71 +- .../github.com/prometheus/procfs/fscache.go | 422 + vendor/github.com/prometheus/procfs/go.mod | 9 + vendor/github.com/prometheus/procfs/go.sum | 8 + .../prometheus/procfs/internal/fs/fs.go | 55 + .../prometheus/procfs/internal/util/parse.go | 53 +- .../procfs/internal/util/readfile.go | 38 + .../procfs/internal/util/sysreadfile.go | 48 + .../internal/util/sysreadfile_compat.go | 26 + .../procfs/internal/util/valueparser.go | 91 + vendor/github.com/prometheus/procfs/ipvs.go | 42 +- .../prometheus/procfs/kernel_random.go | 62 + .../github.com/prometheus/procfs/loadavg.go | 62 + vendor/github.com/prometheus/procfs/mdstat.go | 168 +- .../github.com/prometheus/procfs/meminfo.go | 277 + .../github.com/prometheus/procfs/mountinfo.go | 180 + .../prometheus/procfs/mountstats.go | 78 +- .../prometheus/procfs/net_conntrackstat.go | 153 + .../github.com/prometheus/procfs/net_dev.go | 39 +- .../prometheus/procfs/net_ip_socket.go | 220 + .../prometheus/procfs/net_protocols.go | 180 + .../prometheus/procfs/net_sockstat.go | 163 + .../prometheus/procfs/net_softnet.go | 102 + .../github.com/prometheus/procfs/net_tcp.go | 64 + .../github.com/prometheus/procfs/net_udp.go | 64 + .../github.com/prometheus/procfs/net_unix.go | 257 + .../github.com/prometheus/procfs/nfs/nfs.go | 263 - .../github.com/prometheus/procfs/nfs/parse.go | 317 - .../prometheus/procfs/nfs/parse_nfs.go | 67 - .../prometheus/procfs/nfs/parse_nfsd.go | 89 - vendor/github.com/prometheus/procfs/proc.go | 125 +- .../prometheus/procfs/proc_cgroup.go | 98 + .../prometheus/procfs/proc_environ.go | 37 + .../prometheus/procfs/proc_fdinfo.go | 133 + .../github.com/prometheus/procfs/proc_io.go | 16 +- .../prometheus/procfs/proc_limits.go | 94 +- .../github.com/prometheus/procfs/proc_maps.go | 209 + .../github.com/prometheus/procfs/proc_ns.go | 10 +- .../github.com/prometheus/procfs/proc_psi.go | 100 + .../prometheus/procfs/proc_smaps.go | 165 + .../github.com/prometheus/procfs/proc_stat.go | 33 +- .../prometheus/procfs/proc_status.go | 170 + .../github.com/prometheus/procfs/schedstat.go | 121 + vendor/github.com/prometheus/procfs/slab.go | 151 + vendor/github.com/prometheus/procfs/stat.go | 68 +- vendor/github.com/prometheus/procfs/swaps.go | 89 + vendor/github.com/prometheus/procfs/ttar | 42 +- vendor/github.com/prometheus/procfs/vm.go | 210 + vendor/github.com/prometheus/procfs/xfrm.go | 5 +- .../github.com/prometheus/procfs/xfs/parse.go | 330 - .../github.com/prometheus/procfs/xfs/xfs.go | 163 - .../github.com/prometheus/procfs/zoneinfo.go | 196 + .../github.com/quasilyte/go-ruleguard/LICENSE | 29 + .../go-ruleguard/internal/gogrep/compile.go | 976 + .../internal/gogrep/gen_operations.go | 311 + .../go-ruleguard/internal/gogrep/gogrep.go | 66 + .../internal/gogrep/instructions.go | 107 + .../go-ruleguard/internal/gogrep/match.go | 731 + .../internal/gogrep/operation_string.go | 129 + .../internal/gogrep/operations.gen.go | 1249 + .../go-ruleguard/internal/gogrep/parse.go | 360 + .../go-ruleguard/internal/gogrep/slices.go | 51 + .../go-ruleguard/internal/golist/golist.go | 30 + .../go-ruleguard/internal/xtypes/xtypes.go | 256 + .../quasilyte/go-ruleguard/nodetag/nodetag.go | 277 + .../go-ruleguard/ruleguard/bundle.go | 19 + .../go-ruleguard/ruleguard/engine.go | 171 + .../go-ruleguard/ruleguard/filters.go | 267 + .../go-ruleguard/ruleguard/gorule.go | 146 + .../go-ruleguard/ruleguard/goutil/goutil.go | 21 + .../go-ruleguard/ruleguard/goutil/resolve.go | 33 + .../go-ruleguard/ruleguard/importer.go | 116 + .../go-ruleguard/ruleguard/libdsl.go | 276 + .../go-ruleguard/ruleguard/match_data.go | 46 + .../go-ruleguard/ruleguard/parser.go | 988 + .../go-ruleguard/ruleguard/quasigo/compile.go | 707 + .../ruleguard/quasigo/debug_info.go | 16 + .../go-ruleguard/ruleguard/quasigo/disasm.go | 74 + .../go-ruleguard/ruleguard/quasigo/env.go | 42 + .../go-ruleguard/ruleguard/quasigo/eval.go | 239 + .../ruleguard/quasigo/gen_opcodes.go | 184 + .../ruleguard/quasigo/opcode_string.go | 63 + .../ruleguard/quasigo/opcodes.gen.go | 219 + .../go-ruleguard/ruleguard/quasigo/quasigo.go | 165 + .../go-ruleguard/ruleguard/quasigo/utils.go | 60 + .../go-ruleguard/ruleguard/ruleguard.go | 87 + .../go-ruleguard/ruleguard/runner.go | 349 + .../ruleguard/typematch/patternop_string.go | 34 + .../ruleguard/typematch/typematch.go | 536 + .../quasilyte/go-ruleguard/ruleguard/utils.go | 251 + .../github.com/quasilyte/regex/syntax/LICENSE | 21 + .../quasilyte/regex/syntax/README.md | 26 + .../github.com/quasilyte/regex/syntax/ast.go | 147 + .../quasilyte/regex/syntax/errors.go | 27 + .../github.com/quasilyte/regex/syntax/go.mod | 3 + .../quasilyte/regex/syntax/lexer.go | 455 + .../quasilyte/regex/syntax/operation.go | 189 + .../regex/syntax/operation_string.go | 59 + .../quasilyte/regex/syntax/parser.go | 471 + .../github.com/quasilyte/regex/syntax/pos.go | 10 + .../regex/syntax/tokenkind_string.go | 59 + .../quasilyte/regex/syntax/utils.go | 30 + .../ryancurrah/gomodguard/.dockerignore | 1 + .../ryancurrah/gomodguard/.gitignore | 25 + .../ryancurrah/gomodguard/.golangci.yml | 132 + .../ryancurrah/gomodguard/.goreleaser.yml | 29 + .../ryancurrah/gomodguard/Dockerfile | 17 + .../gomodguard/Dockerfile.goreleaser | 10 + .../github.com/ryancurrah/gomodguard/LICENSE | 21 + .../github.com/ryancurrah/gomodguard/Makefile | 42 + .../ryancurrah/gomodguard/README.md | 131 + .../github.com/ryancurrah/gomodguard/cmd.go | 247 + .../github.com/ryancurrah/gomodguard/go.mod | 12 + .../github.com/ryancurrah/gomodguard/go.sum | 26 + .../ryancurrah/gomodguard/gomodguard.go | 486 + .../ryanrolds/sqlclosecheck/LICENSE | 19 + .../sqlclosecheck/pkg/analyzer/analyzer.go | 311 + .../sanposhiho/wastedassign/v2/LICENSE | 21 + .../sanposhiho/wastedassign/v2/README.md | 66 + .../sanposhiho/wastedassign/v2/go.mod | 5 + .../sanposhiho/wastedassign/v2/go.sum | 26 + .../wastedassign/v2/wastedassign.go | 272 + .../github.com/securego/gosec/v2/.gitignore | 35 + .../securego/gosec/v2/.golangci.yml | 23 + .../securego/gosec/v2/.goreleaser.yml | 21 + .../github.com/securego/gosec/v2/Dockerfile | 15 + .../github.com/securego/gosec/v2/LICENSE.txt | 154 + vendor/github.com/securego/gosec/v2/Makefile | 71 + vendor/github.com/securego/gosec/v2/README.md | 401 + vendor/github.com/securego/gosec/v2/USERS.md | 28 + .../github.com/securego/gosec/v2/action.yml | 19 + .../github.com/securego/gosec/v2/analyzer.go | 375 + .../github.com/securego/gosec/v2/call_list.go | 109 + vendor/github.com/securego/gosec/v2/config.go | 125 + .../github.com/securego/gosec/v2/cwe/data.go | 143 + .../github.com/securego/gosec/v2/cwe/types.go | 34 + .../securego/gosec/v2/entrypoint.sh | 7 + vendor/github.com/securego/gosec/v2/errors.go | 33 + vendor/github.com/securego/gosec/v2/go.mod | 18 + vendor/github.com/securego/gosec/v2/go.sum | 702 + .../github.com/securego/gosec/v2/helpers.go | 451 + .../securego/gosec/v2/import_tracker.go | 75 + .../github.com/securego/gosec/v2/install.sh | 375 + vendor/github.com/securego/gosec/v2/issue.go | 201 + .../securego/gosec/v2/renovate.json | 24 + vendor/github.com/securego/gosec/v2/report.go | 24 + .../github.com/securego/gosec/v2/resolve.go | 95 + vendor/github.com/securego/gosec/v2/rule.go | 59 + .../securego/gosec/v2/rules/archive.go | 65 + .../securego/gosec/v2/rules/bad_defer.go | 68 + .../securego/gosec/v2/rules/bind.go | 83 + .../securego/gosec/v2/rules/blocklist.go | 94 + .../gosec/v2/rules/decompression-bomb.go | 110 + .../securego/gosec/v2/rules/errors.go | 120 + .../securego/gosec/v2/rules/fileperms.go | 113 + .../gosec/v2/rules/hardcoded_credentials.go | 173 + .../gosec/v2/rules/implicit_aliasing.go | 119 + .../gosec/v2/rules/integer_overflow.go | 89 + .../securego/gosec/v2/rules/pprof.go | 42 + .../securego/gosec/v2/rules/rand.go | 58 + .../securego/gosec/v2/rules/readfile.go | 128 + .../github.com/securego/gosec/v2/rules/rsa.go | 58 + .../securego/gosec/v2/rules/rulelist.go | 116 + .../github.com/securego/gosec/v2/rules/sql.go | 303 + .../github.com/securego/gosec/v2/rules/ssh.go | 38 + .../securego/gosec/v2/rules/ssrf.go | 66 + .../securego/gosec/v2/rules/subproc.go | 85 + .../securego/gosec/v2/rules/tempfiles.go | 58 + .../securego/gosec/v2/rules/templates.go | 60 + .../github.com/securego/gosec/v2/rules/tls.go | 171 + .../securego/gosec/v2/rules/tls_config.go | 92 + .../securego/gosec/v2/rules/unsafe.go | 53 + .../securego/gosec/v2/rules/weakcrypto.go | 58 + vendor/github.com/shazow/go-diff/LICENSE | 22 + .../shazow/go-diff/difflib/differ.go | 39 + vendor/github.com/sirupsen/logrus/.gitignore | 4 + .../github.com/sirupsen/logrus/.golangci.yml | 40 + vendor/github.com/sirupsen/logrus/.travis.yml | 15 + .../github.com/sirupsen/logrus/CHANGELOG.md | 259 + vendor/github.com/sirupsen/logrus/LICENSE | 21 + vendor/github.com/sirupsen/logrus/README.md | 513 + vendor/github.com/sirupsen/logrus/alt_exit.go | 76 + .../github.com/sirupsen/logrus/appveyor.yml | 14 + .../github.com/sirupsen/logrus/buffer_pool.go | 52 + vendor/github.com/sirupsen/logrus/doc.go | 26 + vendor/github.com/sirupsen/logrus/entry.go | 431 + vendor/github.com/sirupsen/logrus/exported.go | 270 + .../github.com/sirupsen/logrus/formatter.go | 78 + vendor/github.com/sirupsen/logrus/go.mod | 10 + vendor/github.com/sirupsen/logrus/go.sum | 8 + vendor/github.com/sirupsen/logrus/hooks.go | 34 + .../sirupsen/logrus/json_formatter.go | 128 + vendor/github.com/sirupsen/logrus/logger.go | 404 + vendor/github.com/sirupsen/logrus/logrus.go | 186 + .../logrus/terminal_check_appengine.go | 11 + .../sirupsen/logrus/terminal_check_bsd.go | 13 + .../sirupsen/logrus/terminal_check_js.go | 7 + .../logrus/terminal_check_no_terminal.go | 11 + .../logrus/terminal_check_notappengine.go | 17 + .../sirupsen/logrus/terminal_check_solaris.go | 11 + .../sirupsen/logrus/terminal_check_unix.go | 13 + .../sirupsen/logrus/terminal_check_windows.go | 27 + .../sirupsen/logrus/text_formatter.go | 339 + vendor/github.com/sirupsen/logrus/writer.go | 70 + vendor/github.com/sonatard/noctx/.gitignore | 1 + .../github.com/sonatard/noctx/.golangci.yml | 20 + vendor/github.com/sonatard/noctx/LICENSE | 21 + vendor/github.com/sonatard/noctx/Makefile | 16 + vendor/github.com/sonatard/noctx/README.md | 95 + vendor/github.com/sonatard/noctx/go.mod | 8 + vendor/github.com/sonatard/noctx/go.sum | 16 + .../github.com/sonatard/noctx/ngfunc/main.go | 57 + .../sonatard/noctx/ngfunc/report.go | 29 + .../github.com/sonatard/noctx/ngfunc/types.go | 65 + vendor/github.com/sonatard/noctx/noctx.go | 31 + .../sonatard/noctx/reqwithoutctx/main.go | 14 + .../sonatard/noctx/reqwithoutctx/report.go | 26 + .../sonatard/noctx/reqwithoutctx/ssa.go | 180 + vendor/github.com/sourcegraph/go-diff/LICENSE | 35 + .../sourcegraph/go-diff/diff/diff.go | 132 + .../sourcegraph/go-diff/diff/doc.go | 2 + .../sourcegraph/go-diff/diff/parse.go | 725 + .../sourcegraph/go-diff/diff/print.go | 141 + .../sourcegraph/go-diff/diff/reader_util.go | 37 + vendor/github.com/spf13/afero/.gitignore | 2 + vendor/github.com/spf13/afero/.travis.yml | 26 + vendor/github.com/spf13/afero/LICENSE.txt | 174 + vendor/github.com/spf13/afero/README.md | 430 + vendor/github.com/spf13/afero/afero.go | 111 + vendor/github.com/spf13/afero/appveyor.yml | 15 + vendor/github.com/spf13/afero/basepath.go | 211 + .../github.com/spf13/afero/cacheOnReadFs.go | 311 + vendor/github.com/spf13/afero/const_bsds.go | 22 + .../github.com/spf13/afero/const_win_unix.go | 26 + .../github.com/spf13/afero/copyOnWriteFs.go | 326 + vendor/github.com/spf13/afero/go.mod | 9 + vendor/github.com/spf13/afero/go.sum | 29 + vendor/github.com/spf13/afero/httpFs.go | 114 + vendor/github.com/spf13/afero/iofs.go | 288 + vendor/github.com/spf13/afero/ioutil.go | 240 + vendor/github.com/spf13/afero/lstater.go | 27 + vendor/github.com/spf13/afero/match.go | 110 + vendor/github.com/spf13/afero/mem/dir.go | 37 + vendor/github.com/spf13/afero/mem/dirmap.go | 43 + vendor/github.com/spf13/afero/mem/file.go | 338 + vendor/github.com/spf13/afero/memmap.go | 404 + vendor/github.com/spf13/afero/os.go | 113 + vendor/github.com/spf13/afero/path.go | 106 + vendor/github.com/spf13/afero/readonlyfs.go | 96 + vendor/github.com/spf13/afero/regexpfs.go | 224 + vendor/github.com/spf13/afero/symlink.go | 55 + vendor/github.com/spf13/afero/unionFile.go | 317 + vendor/github.com/spf13/afero/util.go | 330 + vendor/github.com/spf13/cast/.gitignore | 25 + vendor/github.com/spf13/cast/.travis.yml | 16 + vendor/github.com/spf13/cast/LICENSE | 21 + vendor/github.com/spf13/cast/Makefile | 40 + vendor/github.com/spf13/cast/README.md | 75 + vendor/github.com/spf13/cast/cast.go | 171 + vendor/github.com/spf13/cast/caste.go | 1249 + vendor/github.com/spf13/cast/go.mod | 7 + vendor/github.com/spf13/cast/go.sum | 6 + vendor/github.com/spf13/cobra/.gitignore | 5 +- vendor/github.com/spf13/cobra/.golangci.yml | 48 + vendor/github.com/spf13/cobra/.travis.yml | 21 - vendor/github.com/spf13/cobra/CHANGELOG.md | 51 + vendor/github.com/spf13/cobra/CONDUCT.md | 37 + vendor/github.com/spf13/cobra/CONTRIBUTING.md | 50 + vendor/github.com/spf13/cobra/Makefile | 40 + vendor/github.com/spf13/cobra/README.md | 663 +- vendor/github.com/spf13/cobra/args.go | 22 +- .../spf13/cobra/bash_completions.go | 345 +- .../spf13/cobra/bash_completions.md | 150 +- .../spf13/cobra/bash_completionsV2.go | 302 + vendor/github.com/spf13/cobra/cobra.go | 24 +- vendor/github.com/spf13/cobra/command.go | 327 +- vendor/github.com/spf13/cobra/command_win.go | 8 +- vendor/github.com/spf13/cobra/completions.go | 781 + .../spf13/cobra/fish_completions.go | 219 + .../spf13/cobra/fish_completions.md | 4 + vendor/github.com/spf13/cobra/go.mod | 11 + vendor/github.com/spf13/cobra/go.sum | 592 + .../spf13/cobra/powershell_completions.go | 285 + .../spf13/cobra/powershell_completions.md | 3 + .../spf13/cobra/projects_using_cobra.md | 38 + .../spf13/cobra/shell_completions.go | 84 + .../spf13/cobra/shell_completions.md | 546 + vendor/github.com/spf13/cobra/user_guide.md | 637 + .../github.com/spf13/cobra/zsh_completions.go | 310 +- .../github.com/spf13/cobra/zsh_completions.md | 48 + .../spf13/jwalterweatherman/.gitignore | 24 + .../spf13/jwalterweatherman/LICENSE | 21 + .../spf13/jwalterweatherman/README.md | 148 + .../jwalterweatherman/default_notepad.go | 111 + .../github.com/spf13/jwalterweatherman/go.mod | 7 + .../spf13/jwalterweatherman/log_counter.go | 46 + .../spf13/jwalterweatherman/notepad.go | 225 + vendor/github.com/spf13/pflag/.travis.yml | 7 +- vendor/github.com/spf13/pflag/README.md | 4 +- vendor/github.com/spf13/pflag/bool_slice.go | 38 + vendor/github.com/spf13/pflag/count.go | 4 +- .../github.com/spf13/pflag/duration_slice.go | 38 + vendor/github.com/spf13/pflag/flag.go | 23 +- .../github.com/spf13/pflag/float32_slice.go | 174 + .../github.com/spf13/pflag/float64_slice.go | 166 + vendor/github.com/spf13/pflag/go.mod | 3 + vendor/github.com/spf13/pflag/go.sum | 0 vendor/github.com/spf13/pflag/int32_slice.go | 174 + vendor/github.com/spf13/pflag/int64_slice.go | 166 + vendor/github.com/spf13/pflag/int_slice.go | 30 + vendor/github.com/spf13/pflag/ip_slice.go | 40 +- vendor/github.com/spf13/pflag/string_array.go | 26 + vendor/github.com/spf13/pflag/string_slice.go | 22 +- .../github.com/spf13/pflag/string_to_int.go | 149 + .../github.com/spf13/pflag/string_to_int64.go | 149 + .../spf13/pflag/string_to_string.go | 160 + vendor/github.com/spf13/pflag/uint_slice.go | 42 + vendor/github.com/spf13/viper/.editorconfig | 15 + vendor/github.com/spf13/viper/.gitignore | 5 + vendor/github.com/spf13/viper/.golangci.yml | 93 + vendor/github.com/spf13/viper/LICENSE | 21 + vendor/github.com/spf13/viper/Makefile | 76 + vendor/github.com/spf13/viper/README.md | 865 + .../github.com/spf13/viper/TROUBLESHOOTING.md | 23 + vendor/github.com/spf13/viper/flags.go | 57 + vendor/github.com/spf13/viper/go.mod | 21 + vendor/github.com/spf13/viper/go.sum | 632 + vendor/github.com/spf13/viper/util.go | 230 + vendor/github.com/spf13/viper/viper.go | 2169 ++ vendor/github.com/spf13/viper/watch.go | 11 + vendor/github.com/spf13/viper/watch_wasm.go | 30 + vendor/github.com/ssgreg/nlreturn/v2/LICENSE | 21 + .../nlreturn/v2/pkg/nlreturn/nlreturn.go | 86 + .../github.com/stretchr/objx/.codeclimate.yml | 13 + vendor/github.com/stretchr/objx/.gitignore | 11 + vendor/github.com/stretchr/objx/.travis.yml | 25 + vendor/github.com/stretchr/objx/Gopkg.lock | 30 + vendor/github.com/stretchr/objx/Gopkg.toml | 8 + vendor/github.com/stretchr/objx/LICENSE | 22 + vendor/github.com/stretchr/objx/README.md | 80 + vendor/github.com/stretchr/objx/Taskfile.yml | 32 + vendor/github.com/stretchr/objx/accessors.go | 148 + vendor/github.com/stretchr/objx/constants.go | 13 + .../github.com/stretchr/objx/conversions.go | 108 + vendor/github.com/stretchr/objx/doc.go | 66 + vendor/github.com/stretchr/objx/map.go | 190 + vendor/github.com/stretchr/objx/mutations.go | 77 + vendor/github.com/stretchr/objx/security.go | 12 + vendor/github.com/stretchr/objx/tests.go | 17 + .../stretchr/objx/type_specific_codegen.go | 2501 ++ vendor/github.com/stretchr/objx/value.go | 53 + .../testify/assert/assertion_compare.go | 172 +- .../testify/assert/assertion_format.go | 97 + .../testify/assert/assertion_forward.go | 194 + .../testify/assert/assertion_order.go | 81 + .../stretchr/testify/assert/assertions.go | 83 +- .../github.com/stretchr/testify/mock/doc.go | 44 + .../github.com/stretchr/testify/mock/mock.go | 1008 + .../stretchr/testify/require/require.go | 248 + .../testify/require/require_forward.go | 194 + vendor/github.com/subosito/gotenv/.env | 1 + .../github.com/subosito/gotenv/.env.invalid | 1 + vendor/github.com/subosito/gotenv/.gitignore | 3 + vendor/github.com/subosito/gotenv/.travis.yml | 10 + .../github.com/subosito/gotenv/CHANGELOG.md | 47 + vendor/github.com/subosito/gotenv/LICENSE | 21 + vendor/github.com/subosito/gotenv/README.md | 131 + .../github.com/subosito/gotenv/appveyor.yml | 9 + vendor/github.com/subosito/gotenv/gotenv.go | 265 + .../github.com/tdakkota/asciicheck/.gitignore | 33 + vendor/github.com/tdakkota/asciicheck/LICENSE | 21 + .../github.com/tdakkota/asciicheck/README.md | 72 + .../github.com/tdakkota/asciicheck/ascii.go | 18 + .../tdakkota/asciicheck/asciicheck.go | 49 + vendor/github.com/tdakkota/asciicheck/go.mod | 5 + vendor/github.com/tetafro/godot/.gitignore | 4 + vendor/github.com/tetafro/godot/.godot.yaml | 16 + vendor/github.com/tetafro/godot/.golangci.yml | 67 + .../github.com/tetafro/godot/.goreleaser.yml | 11 + vendor/github.com/tetafro/godot/LICENSE | 21 + vendor/github.com/tetafro/godot/Makefile | 25 + vendor/github.com/tetafro/godot/README.md | 84 + vendor/github.com/tetafro/godot/checks.go | 269 + vendor/github.com/tetafro/godot/getters.go | 283 + vendor/github.com/tetafro/godot/go.mod | 5 + vendor/github.com/tetafro/godot/go.sum | 4 + vendor/github.com/tetafro/godot/godot.go | 135 + vendor/github.com/tetafro/godot/settings.go | 29 + vendor/github.com/timakin/bodyclose/LICENSE | 21 + .../bodyclose/passes/bodyclose/bodyclose.go | 368 + .../github.com/tomarrell/wrapcheck/v2/LICENSE | 21 + .../wrapcheck/v2/wrapcheck/wrapcheck.go | 337 + .../tommy-muehle/go-mnd/v2/.editorconfig | 21 + .../tommy-muehle/go-mnd/v2/.gitattributes | 9 + .../tommy-muehle/go-mnd/v2/.gitignore | 3 + .../tommy-muehle/go-mnd/v2/.goreleaser.yml | 29 + .../tommy-muehle/go-mnd/v2/Dockerfile | 17 + .../github.com/tommy-muehle/go-mnd/v2/LICENSE | 21 + .../tommy-muehle/go-mnd/v2/Makefile | 32 + .../tommy-muehle/go-mnd/v2/README.md | 230 + .../tommy-muehle/go-mnd/v2/action.yml | 19 + .../tommy-muehle/go-mnd/v2/analyzer.go | 118 + .../tommy-muehle/go-mnd/v2/checks/argument.go | 121 + .../tommy-muehle/go-mnd/v2/checks/assign.go | 86 + .../tommy-muehle/go-mnd/v2/checks/case.go | 68 + .../tommy-muehle/go-mnd/v2/checks/checks.go | 3 + .../go-mnd/v2/checks/condition.go | 55 + .../go-mnd/v2/checks/operation.go | 77 + .../tommy-muehle/go-mnd/v2/checks/return.go | 68 + .../tommy-muehle/go-mnd/v2/config/config.go | 118 + .../tommy-muehle/go-mnd/v2/entrypoint.sh | 7 + .../github.com/tommy-muehle/go-mnd/v2/go.mod | 9 + .../github.com/tommy-muehle/go-mnd/v2/go.sum | 28 + vendor/github.com/ultraware/funlen/LICENSE | 7 + vendor/github.com/ultraware/funlen/README.md | 9 + vendor/github.com/ultraware/funlen/main.go | 104 + .../github.com/ultraware/whitespace/LICENSE | 7 + .../github.com/ultraware/whitespace/README.md | 7 + .../github.com/ultraware/whitespace/main.go | 158 + vendor/github.com/uudashr/gocognit/LICENSE | 21 + vendor/github.com/uudashr/gocognit/README.md | 185 + vendor/github.com/uudashr/gocognit/doc.go | 2 + vendor/github.com/uudashr/gocognit/go.mod | 5 + vendor/github.com/uudashr/gocognit/go.sum | 27 + .../github.com/uudashr/gocognit/gocognit.go | 385 + .../github.com/yeya24/promlinter/.gitignore | 20 + vendor/github.com/yeya24/promlinter/LICENSE | 201 + vendor/github.com/yeya24/promlinter/Makefile | 39 + vendor/github.com/yeya24/promlinter/README.md | 74 + vendor/github.com/yeya24/promlinter/go.mod | 9 + vendor/github.com/yeya24/promlinter/go.sum | 114 + .../yeya24/promlinter/promlinter.go | 664 + .../x/crypto/argon2/blamka_amd64.go | 3 +- .../golang.org/x/crypto/argon2/blamka_amd64.s | 3 +- .../golang.org/x/crypto/argon2/blamka_ref.go | 3 +- .../x/crypto/blake2b/blake2bAVX2_amd64.go | 3 +- .../x/crypto/blake2b/blake2bAVX2_amd64.s | 97 +- .../x/crypto/blake2b/blake2b_amd64.go | 3 +- .../x/crypto/blake2b/blake2b_amd64.s | 58 +- .../x/crypto/blake2b/blake2b_ref.go | 3 +- .../golang.org/x/crypto/blake2b/register.go | 1 + .../golang.org/x/net/http/httpguts/httplex.go | 10 +- vendor/golang.org/x/net/http2/.gitignore | 2 + vendor/golang.org/x/net/http2/Dockerfile | 51 + vendor/golang.org/x/net/http2/Makefile | 3 + vendor/golang.org/x/net/http2/README | 20 + vendor/golang.org/x/net/http2/ciphers.go | 641 + .../x/net/http2/client_conn_pool.go | 278 + vendor/golang.org/x/net/http2/databuffer.go | 146 + vendor/golang.org/x/net/http2/errors.go | 133 + vendor/golang.org/x/net/http2/flow.go | 52 + vendor/golang.org/x/net/http2/frame.go | 1614 + vendor/golang.org/x/net/http2/go111.go | 30 + vendor/golang.org/x/net/http2/gotrack.go | 170 + vendor/golang.org/x/net/http2/headermap.go | 88 + vendor/golang.org/x/net/http2/hpack/encode.go | 240 + vendor/golang.org/x/net/http2/hpack/hpack.go | 504 + .../golang.org/x/net/http2/hpack/huffman.go | 229 + vendor/golang.org/x/net/http2/hpack/tables.go | 479 + vendor/golang.org/x/net/http2/http2.go | 385 + vendor/golang.org/x/net/http2/not_go111.go | 21 + vendor/golang.org/x/net/http2/pipe.go | 168 + vendor/golang.org/x/net/http2/server.go | 2984 ++ vendor/golang.org/x/net/http2/transport.go | 2760 ++ vendor/golang.org/x/net/http2/write.go | 365 + vendor/golang.org/x/net/http2/writesched.go | 248 + .../x/net/http2/writesched_priority.go | 452 + .../x/net/http2/writesched_random.go | 77 + .../x/net/internal/timeseries/timeseries.go | 525 + vendor/golang.org/x/net/trace/events.go | 532 + vendor/golang.org/x/net/trace/histogram.go | 365 + vendor/golang.org/x/net/trace/trace.go | 1130 + vendor/golang.org/x/sys/cpu/cpu.go | 5 +- vendor/golang.org/x/sys/cpu/cpu_aix.go | 1 + vendor/golang.org/x/sys/unix/README.md | 6 +- vendor/golang.org/x/sys/unix/asm_bsd_386.s | 4 +- vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 4 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 9 + .../golang.org/x/sys/unix/syscall_darwin.go | 33 + vendor/golang.org/x/sys/unix/syscall_linux.go | 71 + .../x/sys/unix/syscall_linux_386.go | 4 + .../x/sys/unix/syscall_linux_amd64.go | 4 + .../x/sys/unix/syscall_linux_arm.go | 4 + .../x/sys/unix/syscall_linux_arm64.go | 4 + .../x/sys/unix/syscall_linux_mips64x.go | 4 + .../x/sys/unix/syscall_linux_mipsx.go | 4 + .../x/sys/unix/syscall_linux_ppc.go | 4 + .../x/sys/unix/syscall_linux_ppc64x.go | 4 + .../x/sys/unix/syscall_linux_riscv64.go | 4 + .../x/sys/unix/syscall_linux_s390x.go | 4 + .../x/sys/unix/syscall_linux_sparc64.go | 4 + .../x/sys/unix/zerrors_darwin_amd64.go | 5 + .../x/sys/unix/zerrors_darwin_arm64.go | 5 + .../x/sys/unix/zerrors_freebsd_386.go | 5 + .../x/sys/unix/zerrors_freebsd_amd64.go | 5 + .../x/sys/unix/zerrors_freebsd_arm.go | 5 + .../x/sys/unix/zerrors_freebsd_arm64.go | 5 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 96 + .../x/sys/unix/zerrors_linux_386.go | 19 + .../x/sys/unix/zerrors_linux_amd64.go | 19 + .../x/sys/unix/zerrors_linux_arm.go | 19 + .../x/sys/unix/zerrors_linux_arm64.go | 19 + .../x/sys/unix/zerrors_linux_mips.go | 19 + .../x/sys/unix/zerrors_linux_mips64.go | 19 + .../x/sys/unix/zerrors_linux_mips64le.go | 19 + .../x/sys/unix/zerrors_linux_mipsle.go | 19 + .../x/sys/unix/zerrors_linux_ppc.go | 19 + .../x/sys/unix/zerrors_linux_ppc64.go | 19 + .../x/sys/unix/zerrors_linux_ppc64le.go | 19 + .../x/sys/unix/zerrors_linux_riscv64.go | 19 + .../x/sys/unix/zerrors_linux_s390x.go | 19 + .../x/sys/unix/zerrors_linux_sparc64.go | 19 + .../x/sys/unix/ztypes_darwin_amd64.go | 104 + .../x/sys/unix/ztypes_darwin_arm64.go | 104 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 3 + .../x/sys/unix/ztypes_freebsd_386.go | 5 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 5 +- .../x/sys/unix/ztypes_freebsd_arm.go | 5 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 5 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 165 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 18 +- .../x/sys/unix/ztypes_linux_amd64.go | 18 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 18 +- .../x/sys/unix/ztypes_linux_arm64.go | 18 +- .../x/sys/unix/ztypes_linux_mips.go | 18 +- .../x/sys/unix/ztypes_linux_mips64.go | 18 +- .../x/sys/unix/ztypes_linux_mips64le.go | 18 +- .../x/sys/unix/ztypes_linux_mipsle.go | 18 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 18 +- .../x/sys/unix/ztypes_linux_ppc64.go | 18 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 18 +- .../x/sys/unix/ztypes_linux_riscv64.go | 18 +- .../x/sys/unix/ztypes_linux_s390x.go | 18 +- .../x/sys/unix/ztypes_linux_sparc64.go | 18 +- .../x/sys/unix/ztypes_netbsd_386.go | 4 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 4 +- .../x/sys/unix/ztypes_netbsd_arm.go | 4 +- .../x/sys/unix/ztypes_netbsd_arm64.go | 4 +- .../x/sys/unix/ztypes_openbsd_386.go | 4 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 4 +- .../x/sys/unix/ztypes_openbsd_arm.go | 4 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 4 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 4 +- vendor/golang.org/x/sys/windows/aliases.go | 13 + .../golang.org/x/sys/windows/dll_windows.go | 416 + vendor/golang.org/x/sys/windows/empty.s | 9 + .../golang.org/x/sys/windows/env_windows.go | 54 + vendor/golang.org/x/sys/windows/eventlog.go | 20 + .../golang.org/x/sys/windows/exec_windows.go | 195 + .../x/sys/windows/memory_windows.go | 37 + vendor/golang.org/x/sys/windows/mkerrors.bash | 70 + .../x/sys/windows/mkknownfolderids.bash | 27 + vendor/golang.org/x/sys/windows/mksyscall.go | 9 + vendor/golang.org/x/sys/windows/race.go | 30 + vendor/golang.org/x/sys/windows/race0.go | 25 + .../x/sys/windows/security_windows.go | 1443 + vendor/golang.org/x/sys/windows/service.go | 237 + .../x/sys/windows/setupapierrors_windows.go | 100 + vendor/golang.org/x/sys/windows/str.go | 22 + vendor/golang.org/x/sys/windows/syscall.go | 112 + .../x/sys/windows/syscall_windows.go | 1672 + .../golang.org/x/sys/windows/types_windows.go | 2775 ++ .../x/sys/windows/types_windows_386.go | 35 + .../x/sys/windows/types_windows_amd64.go | 34 + .../x/sys/windows/types_windows_arm.go | 35 + .../x/sys/windows/types_windows_arm64.go | 34 + .../x/sys/windows/zerrors_windows.go | 9468 +++++ .../x/sys/windows/zknownfolderids_windows.go | 149 + .../x/sys/windows/zsyscall_windows.go | 3652 ++ .../x/text/secure/bidirule/bidirule10.0.0.go | 1 + .../x/text/secure/bidirule/bidirule9.0.0.go | 1 + vendor/golang.org/x/text/unicode/bidi/bidi.go | 221 +- vendor/golang.org/x/text/unicode/bidi/core.go | 63 +- .../x/text/unicode/bidi/tables10.0.0.go | 1 + .../x/text/unicode/bidi/tables11.0.0.go | 1 + .../x/text/unicode/bidi/tables12.0.0.go | 3 +- .../x/text/unicode/bidi/tables13.0.0.go | 1956 + .../x/text/unicode/bidi/tables9.0.0.go | 1 + .../x/text/unicode/norm/tables10.0.0.go | 1 + .../x/text/unicode/norm/tables11.0.0.go | 1 + .../x/text/unicode/norm/tables12.0.0.go | 3 +- .../x/text/unicode/norm/tables13.0.0.go | 7761 ++++ .../x/text/unicode/norm/tables9.0.0.go | 1 + vendor/golang.org/x/text/width/kind_string.go | 28 + .../golang.org/x/text/width/tables10.0.0.go | 1319 + .../golang.org/x/text/width/tables11.0.0.go | 1331 + .../golang.org/x/text/width/tables12.0.0.go | 1351 + .../golang.org/x/text/width/tables13.0.0.go | 1352 + vendor/golang.org/x/text/width/tables9.0.0.go | 1287 + vendor/golang.org/x/text/width/transform.go | 239 + vendor/golang.org/x/text/width/trieval.go | 30 + vendor/golang.org/x/text/width/width.go | 206 + .../x/tools/go/analysis/analysis.go | 242 + .../x/tools/go/analysis/diagnostic.go | 65 + vendor/golang.org/x/tools/go/analysis/doc.go | 321 + .../go/analysis/passes/asmdecl/asmdecl.go | 802 + .../tools/go/analysis/passes/assign/assign.go | 76 + .../tools/go/analysis/passes/atomic/atomic.go | 96 + .../passes/atomicalign/atomicalign.go | 117 + .../x/tools/go/analysis/passes/bools/bools.go | 221 + .../go/analysis/passes/buildssa/buildssa.go | 117 + .../go/analysis/passes/buildtag/buildtag.go | 367 + .../analysis/passes/buildtag/buildtag_old.go | 174 + .../go/analysis/passes/cgocall/cgocall.go | 376 + .../go/analysis/passes/composite/composite.go | 117 + .../go/analysis/passes/composite/whitelist.go | 34 + .../go/analysis/passes/copylock/copylock.go | 300 + .../go/analysis/passes/ctrlflow/ctrlflow.go | 226 + .../passes/deepequalerrors/deepequalerrors.go | 115 + .../go/analysis/passes/errorsas/errorsas.go | 75 + .../passes/fieldalignment/fieldalignment.go | 368 + .../go/analysis/passes/findcall/findcall.go | 98 + .../passes/framepointer/framepointer.go | 91 + .../passes/httpresponse/httpresponse.go | 169 + .../passes/ifaceassert/ifaceassert.go | 105 + .../go/analysis/passes/inspect/inspect.go | 49 + .../passes/internal/analysisutil/util.go | 120 + .../passes/loopclosure/loopclosure.go | 165 + .../analysis/passes/lostcancel/lostcancel.go | 330 + .../go/analysis/passes/nilfunc/nilfunc.go | 74 + .../go/analysis/passes/nilness/nilness.go | 354 + .../go/analysis/passes/pkgfact/pkgfact.go | 127 + .../tools/go/analysis/passes/printf/printf.go | 1122 + .../tools/go/analysis/passes/printf/types.go | 246 + .../reflectvaluecompare.go | 99 + .../tools/go/analysis/passes/shadow/shadow.go | 290 + .../x/tools/go/analysis/passes/shift/dead.go | 101 + .../x/tools/go/analysis/passes/shift/shift.go | 101 + .../passes/sigchanyzer/sigchanyzer.go | 154 + .../go/analysis/passes/sortslice/analyzer.go | 123 + .../analysis/passes/stdmethods/stdmethods.go | 204 + .../analysis/passes/stringintconv/string.go | 126 + .../go/analysis/passes/structtag/structtag.go | 313 + .../testinggoroutine/testinggoroutine.go | 154 + .../x/tools/go/analysis/passes/tests/tests.go | 188 + .../go/analysis/passes/unmarshal/unmarshal.go | 100 + .../passes/unreachable/unreachable.go | 325 + .../go/analysis/passes/unsafeptr/unsafeptr.go | 168 + .../passes/unusedresult/unusedresult.go | 131 + .../passes/unusedwrite/unusedwrite.go | 184 + .../x/tools/go/analysis/validate.go | 130 + .../x/tools/go/ast/astutil/enclosing.go | 627 + .../x/tools/go/ast/astutil/imports.go | 482 + .../x/tools/go/ast/astutil/rewrite.go | 483 + .../golang.org/x/tools/go/ast/astutil/util.go | 18 + .../x/tools/go/ast/inspector/inspector.go | 186 + .../x/tools/go/ast/inspector/typeof.go | 220 + vendor/golang.org/x/tools/go/cfg/builder.go | 510 + vendor/golang.org/x/tools/go/cfg/cfg.go | 150 + .../x/tools/go/gcexportdata/gcexportdata.go | 133 + .../x/tools/go/gcexportdata/importer.go | 73 + .../golang.org/x/tools/go/internal/cgo/cgo.go | 222 + .../x/tools/go/internal/cgo/cgo_pkgconfig.go | 39 + .../x/tools/go/internal/gcimporter/bexport.go | 852 + .../x/tools/go/internal/gcimporter/bimport.go | 1039 + .../go/internal/gcimporter/exportdata.go | 93 + .../go/internal/gcimporter/gcimporter.go | 1078 + .../x/tools/go/internal/gcimporter/iexport.go | 781 + .../x/tools/go/internal/gcimporter/iimport.go | 676 + .../go/internal/gcimporter/newInterface10.go | 22 + .../go/internal/gcimporter/newInterface11.go | 14 + .../tools/go/internal/packagesdriver/sizes.go | 49 + vendor/golang.org/x/tools/go/loader/doc.go | 204 + vendor/golang.org/x/tools/go/loader/loader.go | 1078 + vendor/golang.org/x/tools/go/loader/util.go | 124 + vendor/golang.org/x/tools/go/packages/doc.go | 221 + .../x/tools/go/packages/external.go | 101 + .../golang.org/x/tools/go/packages/golist.go | 1099 + .../x/tools/go/packages/golist_overlay.go | 575 + .../x/tools/go/packages/loadmode_string.go | 57 + .../x/tools/go/packages/packages.go | 1239 + .../golang.org/x/tools/go/packages/visit.go | 59 + vendor/golang.org/x/tools/go/ssa/blockopt.go | 187 + vendor/golang.org/x/tools/go/ssa/builder.go | 2386 ++ vendor/golang.org/x/tools/go/ssa/const.go | 169 + vendor/golang.org/x/tools/go/ssa/create.go | 270 + vendor/golang.org/x/tools/go/ssa/doc.go | 125 + vendor/golang.org/x/tools/go/ssa/dom.go | 341 + vendor/golang.org/x/tools/go/ssa/emit.go | 478 + vendor/golang.org/x/tools/go/ssa/func.go | 691 + vendor/golang.org/x/tools/go/ssa/identical.go | 12 + .../golang.org/x/tools/go/ssa/identical_17.go | 12 + vendor/golang.org/x/tools/go/ssa/lift.go | 653 + vendor/golang.org/x/tools/go/ssa/lvalue.go | 120 + vendor/golang.org/x/tools/go/ssa/methods.go | 239 + vendor/golang.org/x/tools/go/ssa/mode.go | 105 + vendor/golang.org/x/tools/go/ssa/print.go | 431 + vendor/golang.org/x/tools/go/ssa/sanity.go | 539 + vendor/golang.org/x/tools/go/ssa/source.go | 293 + vendor/golang.org/x/tools/go/ssa/ssa.go | 1696 + .../golang.org/x/tools/go/ssa/ssautil/load.go | 175 + .../x/tools/go/ssa/ssautil/switch.go | 234 + .../x/tools/go/ssa/ssautil/visit.go | 79 + vendor/golang.org/x/tools/go/ssa/testmain.go | 274 + vendor/golang.org/x/tools/go/ssa/util.go | 89 + vendor/golang.org/x/tools/go/ssa/wrappers.go | 290 + .../x/tools/go/types/objectpath/objectpath.go | 524 + .../x/tools/go/types/typeutil/callee.go | 46 + .../x/tools/go/types/typeutil/imports.go | 31 + .../x/tools/go/types/typeutil/map.go | 313 + .../tools/go/types/typeutil/methodsetcache.go | 72 + .../x/tools/go/types/typeutil/ui.go | 52 + vendor/golang.org/x/tools/imports/forward.go | 77 + .../internal/analysisinternal/analysis.go | 425 + .../x/tools/internal/event/core/event.go | 85 + .../x/tools/internal/event/core/export.go | 70 + .../x/tools/internal/event/core/fast.go | 77 + .../golang.org/x/tools/internal/event/doc.go | 7 + .../x/tools/internal/event/event.go | 127 + .../x/tools/internal/event/keys/keys.go | 564 + .../x/tools/internal/event/keys/standard.go | 22 + .../x/tools/internal/event/label/label.go | 215 + .../x/tools/internal/fastwalk/fastwalk.go | 196 + .../fastwalk/fastwalk_dirent_fileno.go | 14 + .../internal/fastwalk/fastwalk_dirent_ino.go | 15 + .../fastwalk/fastwalk_dirent_namlen_bsd.go | 14 + .../fastwalk/fastwalk_dirent_namlen_linux.go | 29 + .../internal/fastwalk/fastwalk_portable.go | 38 + .../tools/internal/fastwalk/fastwalk_unix.go | 153 + .../x/tools/internal/gocommand/invoke.go | 273 + .../x/tools/internal/gocommand/vendor.go | 107 + .../x/tools/internal/gocommand/version.go | 51 + .../x/tools/internal/gopathwalk/walk.go | 264 + .../x/tools/internal/imports/fix.go | 1730 + .../x/tools/internal/imports/imports.go | 346 + .../x/tools/internal/imports/mod.go | 695 + .../x/tools/internal/imports/mod_cache.go | 236 + .../x/tools/internal/imports/sortimports.go | 280 + .../x/tools/internal/imports/zstdlib.go | 10733 ++++++ .../x/tools/internal/lsp/fuzzy/input.go | 168 + .../x/tools/internal/lsp/fuzzy/matcher.go | 398 + .../internal/packagesinternal/packages.go | 28 + .../x/tools/internal/typeparams/doc.go | 11 + .../tools/internal/typeparams/notypeparams.go | 90 + .../x/tools/internal/typeparams/typeparams.go | 105 + .../tools/internal/typesinternal/errorcode.go | 1368 + .../typesinternal/errorcode_string.go | 153 + .../x/tools/internal/typesinternal/types.go | 45 + vendor/google.golang.org/genproto/LICENSE | 202 + .../googleapis/rpc/status/status.pb.go | 201 + vendor/google.golang.org/grpc/AUTHORS | 1 + .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 61 + vendor/google.golang.org/grpc/GOVERNANCE.md | 1 + vendor/google.golang.org/grpc/LICENSE | 202 + vendor/google.golang.org/grpc/MAINTAINERS.md | 27 + vendor/google.golang.org/grpc/Makefile | 48 + vendor/google.golang.org/grpc/README.md | 141 + vendor/google.golang.org/grpc/SECURITY.md | 3 + .../grpc/attributes/attributes.go | 79 + vendor/google.golang.org/grpc/backoff.go | 61 + .../google.golang.org/grpc/backoff/backoff.go | 52 + .../grpc/balancer/balancer.go | 388 + .../grpc/balancer/base/balancer.go | 270 + .../grpc/balancer/base/base.go | 71 + .../grpc/balancer/grpclb/state/state.go | 51 + .../grpc/balancer/roundrobin/roundrobin.go | 83 + .../grpc/balancer_conn_wrappers.go | 267 + .../grpc_binarylog_v1/binarylog.pb.go | 1187 + vendor/google.golang.org/grpc/call.go | 74 + vendor/google.golang.org/grpc/clientconn.go | 1601 + .../grpc/cmd/protoc-gen-go-grpc/LICENSE | 202 + .../grpc/cmd/protoc-gen-go-grpc/README.md | 21 + .../grpc/cmd/protoc-gen-go-grpc/go.mod | 5 + .../grpc/cmd/protoc-gen-go-grpc/go.sum | 18 + .../grpc/cmd/protoc-gen-go-grpc/grpc.go | 430 + .../grpc/cmd/protoc-gen-go-grpc/main.go | 68 + vendor/google.golang.org/grpc/codec.go | 50 + vendor/google.golang.org/grpc/codegen.sh | 17 + .../grpc/codes/code_string.go | 62 + vendor/google.golang.org/grpc/codes/codes.go | 244 + .../grpc/connectivity/connectivity.go | 63 + .../grpc/credentials/credentials.go | 272 + .../grpc/credentials/go12.go | 30 + .../google.golang.org/grpc/credentials/tls.go | 233 + vendor/google.golang.org/grpc/dialoptions.go | 622 + vendor/google.golang.org/grpc/doc.go | 26 + .../grpc/encoding/encoding.go | 130 + .../grpc/encoding/proto/proto.go | 58 + vendor/google.golang.org/grpc/go.mod | 17 + vendor/google.golang.org/grpc/go.sum | 122 + .../grpc/grpclog/component.go | 117 + .../google.golang.org/grpc/grpclog/grpclog.go | 132 + .../google.golang.org/grpc/grpclog/logger.go | 87 + .../grpc/grpclog/loggerv2.go | 221 + vendor/google.golang.org/grpc/install_gae.sh | 6 + vendor/google.golang.org/grpc/interceptor.go | 101 + .../grpc/internal/backoff/backoff.go | 73 + .../grpc/internal/balancerload/load.go | 46 + .../grpc/internal/binarylog/binarylog.go | 170 + .../internal/binarylog/binarylog_testutil.go | 42 + .../grpc/internal/binarylog/env_config.go | 208 + .../grpc/internal/binarylog/method_logger.go | 422 + .../grpc/internal/binarylog/sink.go | 159 + .../grpc/internal/buffer/unbounded.go | 85 + .../grpc/internal/channelz/funcs.go | 737 + .../grpc/internal/channelz/logging.go | 102 + .../grpc/internal/channelz/types.go | 701 + .../grpc/internal/channelz/types_linux.go | 53 + .../grpc/internal/channelz/types_nonlinux.go | 42 + .../grpc/internal/channelz/util_linux.go | 39 + .../grpc/internal/channelz/util_nonlinux.go | 26 + .../grpc/internal/credentials/credentials.go | 49 + .../grpc/internal/credentials/spiffe.go | 77 + .../internal/credentials/spiffe_appengine.go | 31 + .../grpc/internal/credentials/syscallconn.go | 60 + .../credentials/syscallconn_appengine.go | 30 + .../grpc/internal/credentials/util.go | 50 + .../grpc/internal/envconfig/envconfig.go | 38 + .../grpc/internal/grpclog/grpclog.go | 126 + .../grpc/internal/grpclog/prefixLogger.go | 81 + .../grpc/internal/grpcrand/grpcrand.go | 60 + .../grpc/internal/grpcsync/event.go | 61 + .../grpc/internal/grpcutil/encode_duration.go | 63 + .../grpc/internal/grpcutil/metadata.go | 40 + .../grpc/internal/grpcutil/method.go | 84 + .../grpc/internal/grpcutil/target.go | 89 + .../grpc/internal/internal.go | 88 + .../grpc/internal/metadata/metadata.go | 50 + .../grpc/internal/resolver/config_selector.go | 164 + .../internal/resolver/dns/dns_resolver.go | 463 + .../grpc/internal/resolver/dns/go113.go | 33 + .../resolver/passthrough/passthrough.go | 57 + .../grpc/internal/resolver/unix/unix.go | 63 + .../internal/serviceconfig/serviceconfig.go | 178 + .../grpc/internal/status/status.go | 162 + .../grpc/internal/syscall/syscall_linux.go | 114 + .../grpc/internal/syscall/syscall_nonlinux.go | 76 + .../grpc/internal/transport/bdp_estimator.go | 141 + .../grpc/internal/transport/controlbuf.go | 980 + .../grpc/internal/transport/defaults.go | 49 + .../grpc/internal/transport/flowcontrol.go | 217 + .../grpc/internal/transport/handler_server.go | 462 + .../grpc/internal/transport/http2_client.go | 1648 + .../grpc/internal/transport/http2_server.go | 1347 + .../grpc/internal/transport/http_util.go | 444 + .../transport/networktype/networktype.go | 46 + .../grpc/internal/transport/proxy.go | 142 + .../grpc/internal/transport/transport.go | 804 + .../grpc/internal/xds_handshake_cluster.go | 40 + .../grpc/keepalive/keepalive.go | 85 + .../grpc/metadata/metadata.go | 240 + vendor/google.golang.org/grpc/peer/peer.go | 51 + .../google.golang.org/grpc/picker_wrapper.go | 177 + vendor/google.golang.org/grpc/pickfirst.go | 136 + vendor/google.golang.org/grpc/preloader.go | 67 + vendor/google.golang.org/grpc/regenerate.sh | 119 + .../grpc/resolver/resolver.go | 260 + .../grpc/resolver_conn_wrapper.go | 187 + vendor/google.golang.org/grpc/rpc_util.go | 914 + vendor/google.golang.org/grpc/server.go | 1868 + .../google.golang.org/grpc/service_config.go | 404 + .../grpc/serviceconfig/serviceconfig.go | 44 + .../google.golang.org/grpc/stats/handlers.go | 63 + vendor/google.golang.org/grpc/stats/stats.go | 312 + .../google.golang.org/grpc/status/status.go | 129 + vendor/google.golang.org/grpc/stream.go | 1600 + vendor/google.golang.org/grpc/tap/tap.go | 56 + vendor/google.golang.org/grpc/trace.go | 123 + vendor/google.golang.org/grpc/version.go | 22 + vendor/google.golang.org/grpc/vet.sh | 215 + vendor/google.golang.org/protobuf/AUTHORS | 3 + .../google.golang.org/protobuf/CONTRIBUTORS | 3 + vendor/google.golang.org/protobuf/LICENSE | 27 + vendor/google.golang.org/protobuf/PATENTS | 22 + .../cmd/protoc-gen-go/internal_gengo/init.go | 168 + .../cmd/protoc-gen-go/internal_gengo/main.go | 884 + .../protoc-gen-go/internal_gengo/reflect.go | 351 + .../internal_gengo/well_known_types.go | 1080 + .../protobuf/cmd/protoc-gen-go/main.go | 56 + .../protobuf/compiler/protogen/protogen.go | 1261 + .../protobuf/encoding/prototext/decode.go | 770 + .../protobuf/encoding/prototext/doc.go | 7 + .../protobuf/encoding/prototext/encode.go | 371 + .../protobuf/encoding/protowire/wire.go | 538 + .../protobuf/internal/descfmt/stringer.go | 318 + .../protobuf/internal/descopts/options.go | 29 + .../protobuf/internal/detrand/rand.go | 69 + .../internal/encoding/defval/default.go | 213 + .../encoding/messageset/messageset.go | 241 + .../protobuf/internal/encoding/tag/tag.go | 207 + .../protobuf/internal/encoding/text/decode.go | 665 + .../internal/encoding/text/decode_number.go | 190 + .../internal/encoding/text/decode_string.go | 161 + .../internal/encoding/text/decode_token.go | 373 + .../protobuf/internal/encoding/text/doc.go | 29 + .../protobuf/internal/encoding/text/encode.go | 270 + .../protobuf/internal/errors/errors.go | 89 + .../protobuf/internal/errors/is_go112.go | 39 + .../protobuf/internal/errors/is_go113.go | 12 + .../protobuf/internal/filedesc/build.go | 158 + .../protobuf/internal/filedesc/desc.go | 631 + .../protobuf/internal/filedesc/desc_init.go | 471 + .../protobuf/internal/filedesc/desc_lazy.go | 704 + .../protobuf/internal/filedesc/desc_list.go | 450 + .../internal/filedesc/desc_list_gen.go | 356 + .../protobuf/internal/filedesc/placeholder.go | 107 + .../protobuf/internal/filetype/build.go | 297 + .../protobuf/internal/flags/flags.go | 24 + .../internal/flags/proto_legacy_disable.go | 9 + .../internal/flags/proto_legacy_enable.go | 9 + .../protobuf/internal/genid/any_gen.go | 34 + .../protobuf/internal/genid/api_gen.go | 106 + .../protobuf/internal/genid/descriptor_gen.go | 829 + .../protobuf/internal/genid/doc.go | 11 + .../protobuf/internal/genid/duration_gen.go | 34 + .../protobuf/internal/genid/empty_gen.go | 19 + .../protobuf/internal/genid/field_mask_gen.go | 31 + .../protobuf/internal/genid/goname.go | 25 + .../protobuf/internal/genid/map_entry.go | 16 + .../internal/genid/source_context_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 116 + .../protobuf/internal/genid/timestamp_gen.go | 34 + .../protobuf/internal/genid/type_gen.go | 184 + .../protobuf/internal/genid/wrappers.go | 13 + .../protobuf/internal/genid/wrappers_gen.go | 175 + .../protobuf/internal/impl/api_export.go | 177 + .../protobuf/internal/impl/checkinit.go | 141 + .../protobuf/internal/impl/codec_extension.go | 223 + .../protobuf/internal/impl/codec_field.go | 830 + .../protobuf/internal/impl/codec_gen.go | 5637 +++ .../protobuf/internal/impl/codec_map.go | 388 + .../protobuf/internal/impl/codec_map_go111.go | 37 + .../protobuf/internal/impl/codec_map_go112.go | 11 + .../protobuf/internal/impl/codec_message.go | 217 + .../internal/impl/codec_messageset.go | 123 + .../protobuf/internal/impl/codec_reflect.go | 209 + .../protobuf/internal/impl/codec_tables.go | 557 + .../protobuf/internal/impl/codec_unsafe.go | 17 + .../protobuf/internal/impl/convert.go | 496 + .../protobuf/internal/impl/convert_list.go | 141 + .../protobuf/internal/impl/convert_map.go | 121 + .../protobuf/internal/impl/decode.go | 276 + .../protobuf/internal/impl/encode.go | 201 + .../protobuf/internal/impl/enum.go | 21 + .../protobuf/internal/impl/extension.go | 156 + .../protobuf/internal/impl/legacy_enum.go | 219 + .../protobuf/internal/impl/legacy_export.go | 92 + .../internal/impl/legacy_extension.go | 176 + .../protobuf/internal/impl/legacy_file.go | 81 + .../protobuf/internal/impl/legacy_message.go | 565 + .../protobuf/internal/impl/merge.go | 176 + .../protobuf/internal/impl/merge_gen.go | 209 + .../protobuf/internal/impl/message.go | 276 + .../protobuf/internal/impl/message_reflect.go | 465 + .../internal/impl/message_reflect_field.go | 543 + .../internal/impl/message_reflect_gen.go | 249 + .../protobuf/internal/impl/pointer_reflect.go | 178 + .../protobuf/internal/impl/pointer_unsafe.go | 174 + .../protobuf/internal/impl/validate.go | 576 + .../protobuf/internal/impl/weak.go | 74 + .../protobuf/internal/order/order.go | 89 + .../protobuf/internal/order/range.go | 115 + .../protobuf/internal/pragma/pragma.go | 29 + .../protobuf/internal/set/ints.go | 58 + .../protobuf/internal/strs/strings.go | 196 + .../protobuf/internal/strs/strings_pure.go | 27 + .../protobuf/internal/strs/strings_unsafe.go | 94 + .../protobuf/internal/version/version.go | 79 + .../protobuf/proto/checkinit.go | 71 + .../protobuf/proto/decode.go | 278 + .../protobuf/proto/decode_gen.go | 603 + .../google.golang.org/protobuf/proto/doc.go | 94 + .../protobuf/proto/encode.go | 319 + .../protobuf/proto/encode_gen.go | 97 + .../google.golang.org/protobuf/proto/equal.go | 167 + .../protobuf/proto/extension.go | 92 + .../google.golang.org/protobuf/proto/merge.go | 139 + .../protobuf/proto/messageset.go | 93 + .../google.golang.org/protobuf/proto/proto.go | 43 + .../protobuf/proto/proto_methods.go | 19 + .../protobuf/proto/proto_reflect.go | 19 + .../google.golang.org/protobuf/proto/reset.go | 43 + .../google.golang.org/protobuf/proto/size.go | 97 + .../protobuf/proto/size_gen.go | 55 + .../protobuf/proto/wrappers.go | 29 + .../protobuf/reflect/protodesc/desc.go | 276 + .../protobuf/reflect/protodesc/desc_init.go | 248 + .../reflect/protodesc/desc_resolve.go | 286 + .../reflect/protodesc/desc_validate.go | 374 + .../protobuf/reflect/protodesc/proto.go | 252 + .../protobuf/reflect/protoreflect/methods.go | 77 + .../protobuf/reflect/protoreflect/proto.go | 504 + .../protobuf/reflect/protoreflect/source.go | 128 + .../reflect/protoreflect/source_gen.go | 461 + .../protobuf/reflect/protoreflect/type.go | 665 + .../protobuf/reflect/protoreflect/value.go | 285 + .../reflect/protoreflect/value_pure.go | 59 + .../reflect/protoreflect/value_union.go | 411 + .../reflect/protoreflect/value_unsafe.go | 98 + .../reflect/protoregistry/registry.go | 880 + .../protobuf/runtime/protoiface/legacy.go | 15 + .../protobuf/runtime/protoiface/methods.go | 167 + .../protobuf/runtime/protoimpl/impl.go | 44 + .../protobuf/runtime/protoimpl/version.go | 56 + .../types/descriptorpb/descriptor.pb.go | 3957 ++ .../protobuf/types/known/anypb/any.pb.go | 498 + .../types/known/durationpb/duration.pb.go | 379 + .../types/known/timestamppb/timestamp.pb.go | 390 + .../protobuf/types/pluginpb/plugin.pb.go | 653 + vendor/gopkg.in/ini.v1/file.go | 18 +- vendor/gopkg.in/ini.v1/ini.go | 10 +- vendor/gopkg.in/ini.v1/parser.go | 4 +- vendor/gopkg.in/ini.v1/section.go | 10 +- vendor/gopkg.in/ini.v1/struct.go | 62 +- vendor/gopkg.in/yaml.v2/.travis.yml | 1 + vendor/gopkg.in/yaml.v2/apic.go | 5 + vendor/gopkg.in/yaml.v2/go.mod | 8 +- vendor/gopkg.in/yaml.v2/yaml.go | 14 +- vendor/gopkg.in/yaml.v3/.travis.yml | 16 - vendor/gopkg.in/yaml.v3/apic.go | 1 + vendor/gopkg.in/yaml.v3/decode.go | 65 +- vendor/gopkg.in/yaml.v3/emitterc.go | 58 +- vendor/gopkg.in/yaml.v3/encode.go | 30 +- vendor/gopkg.in/yaml.v3/parserc.go | 48 +- vendor/gopkg.in/yaml.v3/scannerc.go | 49 +- vendor/gopkg.in/yaml.v3/yaml.go | 40 +- vendor/gopkg.in/yaml.v3/yamlh.go | 2 + vendor/honnef.co/go/tools/LICENSE | 20 + vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY | 121 + .../honnef.co/go/tools/analysis/code/code.go | 294 + .../honnef.co/go/tools/analysis/code/visit.go | 51 + .../honnef.co/go/tools/analysis/edit/edit.go | 74 + .../go/tools/analysis/facts/deprecated.go | 145 + .../go/tools/analysis/facts/directives.go | 20 + .../go/tools/analysis/facts/generated.go | 97 + .../tools/analysis/facts/nilness/nilness.go | 242 + .../go/tools/analysis/facts/purity.go | 178 + .../go/tools/analysis/facts/token.go | 24 + .../analysis/facts/typedness/typedness.go | 242 + .../honnef.co/go/tools/analysis/lint/lint.go | 198 + .../go/tools/analysis/report/report.go | 247 + vendor/honnef.co/go/tools/config/config.go | 245 + vendor/honnef.co/go/tools/config/example.conf | 10 + .../go/tools/go/ast/astutil/upstream.go | 20 + .../honnef.co/go/tools/go/ast/astutil/util.go | 299 + vendor/honnef.co/go/tools/go/ir/LICENSE | 28 + vendor/honnef.co/go/tools/go/ir/UPSTREAM | 9 + vendor/honnef.co/go/tools/go/ir/blockopt.go | 209 + vendor/honnef.co/go/tools/go/ir/builder.go | 2479 ++ vendor/honnef.co/go/tools/go/ir/const.go | 153 + vendor/honnef.co/go/tools/go/ir/create.go | 288 + vendor/honnef.co/go/tools/go/ir/doc.go | 129 + vendor/honnef.co/go/tools/go/ir/dom.go | 469 + vendor/honnef.co/go/tools/go/ir/emit.go | 461 + vendor/honnef.co/go/tools/go/ir/exits.go | 317 + vendor/honnef.co/go/tools/go/ir/func.go | 983 + vendor/honnef.co/go/tools/go/ir/html.go | 1124 + vendor/honnef.co/go/tools/go/ir/identical.go | 7 + .../honnef.co/go/tools/go/ir/identical_17.go | 7 + .../honnef.co/go/tools/go/ir/irutil/load.go | 184 + .../honnef.co/go/tools/go/ir/irutil/loops.go | 54 + .../honnef.co/go/tools/go/ir/irutil/stub.go | 32 + .../honnef.co/go/tools/go/ir/irutil/switch.go | 264 + .../go/tools/go/ir/irutil/terminates.go | 70 + .../honnef.co/go/tools/go/ir/irutil/util.go | 165 + .../honnef.co/go/tools/go/ir/irutil/visit.go | 79 + vendor/honnef.co/go/tools/go/ir/lift.go | 1075 + vendor/honnef.co/go/tools/go/ir/lvalue.go | 116 + vendor/honnef.co/go/tools/go/ir/methods.go | 239 + vendor/honnef.co/go/tools/go/ir/mode.go | 98 + vendor/honnef.co/go/tools/go/ir/print.go | 472 + vendor/honnef.co/go/tools/go/ir/sanity.go | 561 + vendor/honnef.co/go/tools/go/ir/source.go | 270 + vendor/honnef.co/go/tools/go/ir/ssa.go | 1898 + .../honnef.co/go/tools/go/ir/staticcheck.conf | 3 + vendor/honnef.co/go/tools/go/ir/util.go | 89 + vendor/honnef.co/go/tools/go/ir/wrappers.go | 290 + vendor/honnef.co/go/tools/go/ir/write.go | 5 + .../go/tools/go/types/typeutil/upstream.go | 25 + .../go/tools/go/types/typeutil/util.go | 131 + .../tools/internal/passes/buildir/buildir.go | 107 + .../go/tools/internal/sharedcheck/lint.go | 206 + vendor/honnef.co/go/tools/knowledge/arg.go | 64 + .../go/tools/knowledge/deprecated.go | 217 + vendor/honnef.co/go/tools/pattern/convert.go | 242 + vendor/honnef.co/go/tools/pattern/doc.go | 273 + vendor/honnef.co/go/tools/pattern/fuzz.go | 50 + vendor/honnef.co/go/tools/pattern/lexer.go | 221 + vendor/honnef.co/go/tools/pattern/match.go | 547 + vendor/honnef.co/go/tools/pattern/parser.go | 463 + vendor/honnef.co/go/tools/pattern/pattern.go | 496 + vendor/honnef.co/go/tools/printf/fuzz.go | 11 + vendor/honnef.co/go/tools/printf/printf.go | 197 + vendor/honnef.co/go/tools/simple/analysis.go | 152 + vendor/honnef.co/go/tools/simple/doc.go | 499 + vendor/honnef.co/go/tools/simple/lint.go | 1938 + .../go/tools/staticcheck/analysis.go | 302 + .../go/tools/staticcheck/buildtag.go | 21 + vendor/honnef.co/go/tools/staticcheck/doc.go | 1126 + vendor/honnef.co/go/tools/staticcheck/lint.go | 4793 +++ .../honnef.co/go/tools/staticcheck/rules.go | 291 + .../go/tools/staticcheck/structtag.go | 58 + .../honnef.co/go/tools/stylecheck/analysis.go | 83 + vendor/honnef.co/go/tools/stylecheck/doc.go | 237 + vendor/honnef.co/go/tools/stylecheck/lint.go | 936 + vendor/honnef.co/go/tools/stylecheck/names.go | 281 + vendor/honnef.co/go/tools/unused/edge.go | 55 + .../go/tools/unused/edgekind_string.go | 109 + .../honnef.co/go/tools/unused/implements.go | 82 + .../go/tools/unused/typemap/identical.go | 149 + .../honnef.co/go/tools/unused/typemap/map.go | 318 + vendor/honnef.co/go/tools/unused/unused.go | 1716 + vendor/modules.txt | 569 +- vendor/mvdan.cc/gofumpt/LICENSE | 27 + vendor/mvdan.cc/gofumpt/LICENSE.google | 27 + vendor/mvdan.cc/gofumpt/format/format.go | 703 + vendor/mvdan.cc/interfacer/LICENSE | 27 + vendor/mvdan.cc/interfacer/check/cache.go | 50 + vendor/mvdan.cc/interfacer/check/check.go | 462 + vendor/mvdan.cc/interfacer/check/types.go | 170 + vendor/mvdan.cc/lint/.travis.yml | 7 + vendor/mvdan.cc/lint/LICENSE | 27 + vendor/mvdan.cc/lint/README.md | 27 + vendor/mvdan.cc/lint/lint.go | 28 + vendor/mvdan.cc/unparam/LICENSE | 27 + vendor/mvdan.cc/unparam/check/check.go | 979 + 2356 files changed, 438324 insertions(+), 17608 deletions(-) create mode 100644 grpc/README.md create mode 100644 grpc/client.go create mode 100644 grpc/server.go create mode 100644 tools/testserver/math/math.pb.go create mode 100644 tools/testserver/math/math.proto create mode 100644 tools/testserver/math/math_grpc.pb.go create mode 100644 vendor/4d63.com/gochecknoglobals/LICENSE create mode 100644 vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go create mode 100644 vendor/github.com/Antonboom/errname/LICENSE create mode 100644 vendor/github.com/Antonboom/errname/pkg/analyzer/analyzer.go create mode 100644 vendor/github.com/Antonboom/errname/pkg/analyzer/facts.go create mode 100644 vendor/github.com/BurntSushi/toml/.gitignore create mode 100644 vendor/github.com/BurntSushi/toml/COMPATIBLE create mode 100644 vendor/github.com/BurntSushi/toml/COPYING create mode 100644 vendor/github.com/BurntSushi/toml/README.md create mode 100644 vendor/github.com/BurntSushi/toml/decode.go create mode 100644 vendor/github.com/BurntSushi/toml/decode_go116.go create mode 100644 vendor/github.com/BurntSushi/toml/decode_meta.go create mode 100644 vendor/github.com/BurntSushi/toml/deprecated.go create mode 100644 vendor/github.com/BurntSushi/toml/doc.go create mode 100644 vendor/github.com/BurntSushi/toml/encode.go create mode 100644 vendor/github.com/BurntSushi/toml/go.mod create mode 100644 vendor/github.com/BurntSushi/toml/go.sum create mode 100644 vendor/github.com/BurntSushi/toml/internal/tz.go create mode 100644 vendor/github.com/BurntSushi/toml/lex.go create mode 100644 vendor/github.com/BurntSushi/toml/parse.go create mode 100644 vendor/github.com/BurntSushi/toml/type_check.go create mode 100644 vendor/github.com/BurntSushi/toml/type_fields.go create mode 100644 vendor/github.com/Djarvur/go-err113/.gitignore create mode 100644 vendor/github.com/Djarvur/go-err113/.golangci.yml create mode 100644 vendor/github.com/Djarvur/go-err113/.travis.yml create mode 100644 vendor/github.com/Djarvur/go-err113/LICENSE create mode 100644 vendor/github.com/Djarvur/go-err113/README.adoc create mode 100644 vendor/github.com/Djarvur/go-err113/comparison.go create mode 100644 vendor/github.com/Djarvur/go-err113/definition.go create mode 100644 vendor/github.com/Djarvur/go-err113/err113.go create mode 100644 vendor/github.com/Djarvur/go-err113/go.mod create mode 100644 vendor/github.com/Djarvur/go-err113/go.sum create mode 100644 vendor/github.com/Masterminds/semver/.travis.yml create mode 100644 vendor/github.com/Masterminds/semver/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/semver/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/semver/Makefile create mode 100644 vendor/github.com/Masterminds/semver/README.md create mode 100644 vendor/github.com/Masterminds/semver/appveyor.yml create mode 100644 vendor/github.com/Masterminds/semver/collection.go create mode 100644 vendor/github.com/Masterminds/semver/constraints.go create mode 100644 vendor/github.com/Masterminds/semver/doc.go create mode 100644 vendor/github.com/Masterminds/semver/version.go create mode 100644 vendor/github.com/Masterminds/semver/version_fuzz.go create mode 100644 vendor/github.com/OpenPeeDeeP/depguard/.gitignore create mode 100644 vendor/github.com/OpenPeeDeeP/depguard/LICENSE create mode 100644 vendor/github.com/OpenPeeDeeP/depguard/README.md create mode 100644 vendor/github.com/OpenPeeDeeP/depguard/depguard.go create mode 100644 vendor/github.com/OpenPeeDeeP/depguard/go.mod create mode 100644 vendor/github.com/OpenPeeDeeP/depguard/go.sum create mode 100644 vendor/github.com/alexkohler/prealloc/LICENSE create mode 100644 vendor/github.com/alexkohler/prealloc/pkg/prealloc.go create mode 100644 vendor/github.com/ashanbrown/forbidigo/LICENSE create mode 100644 vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go create mode 100644 vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go create mode 100644 vendor/github.com/ashanbrown/makezero/LICENSE create mode 100644 vendor/github.com/ashanbrown/makezero/makezero/makezero.go create mode 100644 vendor/github.com/bkielbasa/cyclop/LICENSE create mode 100644 vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go create mode 100644 vendor/github.com/bombsimon/wsl/v3/.gitignore create mode 100644 vendor/github.com/bombsimon/wsl/v3/.travis.yml create mode 100644 vendor/github.com/bombsimon/wsl/v3/LICENSE create mode 100644 vendor/github.com/bombsimon/wsl/v3/README.md create mode 100644 vendor/github.com/bombsimon/wsl/v3/go.mod create mode 100644 vendor/github.com/bombsimon/wsl/v3/go.sum create mode 100644 vendor/github.com/bombsimon/wsl/v3/wsl.go create mode 100644 vendor/github.com/cespare/xxhash/v2/.travis.yml create mode 100644 vendor/github.com/cespare/xxhash/v2/LICENSE.txt create mode 100644 vendor/github.com/cespare/xxhash/v2/README.md create mode 100644 vendor/github.com/cespare/xxhash/v2/go.mod create mode 100644 vendor/github.com/cespare/xxhash/v2/go.sum create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_other.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_safe.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go create mode 100644 vendor/github.com/charithe/durationcheck/.gitignore create mode 100644 vendor/github.com/charithe/durationcheck/LICENSE create mode 100644 vendor/github.com/charithe/durationcheck/Makefile create mode 100644 vendor/github.com/charithe/durationcheck/README.md create mode 100644 vendor/github.com/charithe/durationcheck/durationcheck.go create mode 100644 vendor/github.com/charithe/durationcheck/go.mod create mode 100644 vendor/github.com/charithe/durationcheck/go.sum create mode 100644 vendor/github.com/chavacava/garif/.gitignore create mode 100644 vendor/github.com/chavacava/garif/LICENSE create mode 100644 vendor/github.com/chavacava/garif/README.md create mode 100644 vendor/github.com/chavacava/garif/constructors.go create mode 100644 vendor/github.com/chavacava/garif/decorators.go create mode 100644 vendor/github.com/chavacava/garif/doc.go create mode 100644 vendor/github.com/chavacava/garif/go.mod create mode 100644 vendor/github.com/chavacava/garif/go.sum create mode 100644 vendor/github.com/chavacava/garif/io.go create mode 100644 vendor/github.com/chavacava/garif/models.go create mode 100644 vendor/github.com/daixiang0/gci/LICENSE create mode 100644 vendor/github.com/daixiang0/gci/pkg/gci/gci.go create mode 100644 vendor/github.com/daixiang0/gci/pkg/gci/std.go create mode 100644 vendor/github.com/denis-tingajkin/go-header/.gitignore create mode 100644 vendor/github.com/denis-tingajkin/go-header/.go-header.yml create mode 100644 vendor/github.com/denis-tingajkin/go-header/LICENSE create mode 100644 vendor/github.com/denis-tingajkin/go-header/README.md create mode 100644 vendor/github.com/denis-tingajkin/go-header/analyzer.go create mode 100644 vendor/github.com/denis-tingajkin/go-header/config.go create mode 100644 vendor/github.com/denis-tingajkin/go-header/go.mod create mode 100644 vendor/github.com/denis-tingajkin/go-header/go.sum create mode 100644 vendor/github.com/denis-tingajkin/go-header/issue.go create mode 100644 vendor/github.com/denis-tingajkin/go-header/location.go create mode 100644 vendor/github.com/denis-tingajkin/go-header/option.go create mode 100644 vendor/github.com/denis-tingajkin/go-header/reader.go create mode 100644 vendor/github.com/denis-tingajkin/go-header/value.go create mode 100644 vendor/github.com/esimonov/ifshort/LICENSE create mode 100644 vendor/github.com/esimonov/ifshort/pkg/analyzer/analyzer.go create mode 100644 vendor/github.com/esimonov/ifshort/pkg/analyzer/occurrences.go create mode 100644 vendor/github.com/ettle/strcase/.gitignore create mode 100644 vendor/github.com/ettle/strcase/.golangci.yml create mode 100644 vendor/github.com/ettle/strcase/.readme.tmpl create mode 100644 vendor/github.com/ettle/strcase/LICENSE create mode 100644 vendor/github.com/ettle/strcase/Makefile create mode 100644 vendor/github.com/ettle/strcase/README.md create mode 100644 vendor/github.com/ettle/strcase/caser.go create mode 100644 vendor/github.com/ettle/strcase/convert.go create mode 100644 vendor/github.com/ettle/strcase/doc.go create mode 100644 vendor/github.com/ettle/strcase/go.mod create mode 100644 vendor/github.com/ettle/strcase/go.sum create mode 100644 vendor/github.com/ettle/strcase/initialism.go create mode 100644 vendor/github.com/ettle/strcase/split.go create mode 100644 vendor/github.com/ettle/strcase/strcase.go create mode 100644 vendor/github.com/ettle/strcase/unicode.go create mode 100644 vendor/github.com/fatih/color/LICENSE.md create mode 100644 vendor/github.com/fatih/color/README.md create mode 100644 vendor/github.com/fatih/color/color.go create mode 100644 vendor/github.com/fatih/color/doc.go create mode 100644 vendor/github.com/fatih/color/go.mod create mode 100644 vendor/github.com/fatih/color/go.sum create mode 100644 vendor/github.com/fatih/structtag/LICENSE create mode 100644 vendor/github.com/fatih/structtag/README.md create mode 100644 vendor/github.com/fatih/structtag/go.mod create mode 100644 vendor/github.com/fatih/structtag/tags.go create mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/.gitignore create mode 100644 vendor/github.com/fsnotify/fsnotify/.travis.yml create mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS create mode 100644 vendor/github.com/fsnotify/fsnotify/CHANGELOG.md create mode 100644 vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md create mode 100644 vendor/github.com/fsnotify/fsnotify/LICENSE create mode 100644 vendor/github.com/fsnotify/fsnotify/README.md create mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go create mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/go.mod create mode 100644 vendor/github.com/fsnotify/fsnotify/go.sum create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go create mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go create mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go create mode 100644 vendor/github.com/fzipp/gocyclo/CHANGELOG.md create mode 100644 vendor/github.com/fzipp/gocyclo/CONTRIBUTORS create mode 100644 vendor/github.com/fzipp/gocyclo/LICENSE create mode 100644 vendor/github.com/fzipp/gocyclo/README.md create mode 100644 vendor/github.com/fzipp/gocyclo/analyze.go create mode 100644 vendor/github.com/fzipp/gocyclo/complexity.go create mode 100644 vendor/github.com/fzipp/gocyclo/directives.go create mode 100644 vendor/github.com/fzipp/gocyclo/go.mod create mode 100644 vendor/github.com/fzipp/gocyclo/stats.go create mode 100644 vendor/github.com/go-critic/go-critic/LICENSE create mode 100644 vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/argOrder_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/assignOp_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/badCall_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/badLock_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/checkers.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/deferUnlambda_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/dupArg_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/emptyStringTest_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/equalFold_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/flagDeref_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/indexAlloc_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/comment_walker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/doc_comment_walker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/expr_walker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/func_decl_walker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_comment_walker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_walker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_expr_walker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_list_walker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_walker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/type_expr_walker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walk_handler.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astfind.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astset.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/zero_value.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/offBy1_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/regexpMust_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/sloppyLen_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/stringXbytes_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/switchTrue_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/underef_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/unslice_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/utils.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/valSwap_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/wrapperFunc_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/checkers/yodaStyleExpr_checker.go create mode 100644 vendor/github.com/go-critic/go-critic/framework/linter/checkers_db.go create mode 100644 vendor/github.com/go-critic/go-critic/framework/linter/context.go create mode 100644 vendor/github.com/go-critic/go-critic/framework/linter/lintpack.go create mode 100644 vendor/github.com/go-toolsmith/astcast/.travis.yml create mode 100644 vendor/github.com/go-toolsmith/astcast/LICENSE create mode 100644 vendor/github.com/go-toolsmith/astcast/README.md create mode 100644 vendor/github.com/go-toolsmith/astcast/astcast.go create mode 100644 vendor/github.com/go-toolsmith/astcast/go.mod create mode 100644 vendor/github.com/go-toolsmith/astcast/go.sum create mode 100644 vendor/github.com/go-toolsmith/astcopy/.travis.yml create mode 100644 vendor/github.com/go-toolsmith/astcopy/LICENSE create mode 100644 vendor/github.com/go-toolsmith/astcopy/README.md create mode 100644 vendor/github.com/go-toolsmith/astcopy/astcopy.go create mode 100644 vendor/github.com/go-toolsmith/astcopy/go.mod create mode 100644 vendor/github.com/go-toolsmith/astcopy/go.sum create mode 100644 vendor/github.com/go-toolsmith/astequal/.gitignore create mode 100644 vendor/github.com/go-toolsmith/astequal/.travis.yml create mode 100644 vendor/github.com/go-toolsmith/astequal/LICENSE create mode 100644 vendor/github.com/go-toolsmith/astequal/README.md create mode 100644 vendor/github.com/go-toolsmith/astequal/astequal.go create mode 100644 vendor/github.com/go-toolsmith/astequal/go.mod create mode 100644 vendor/github.com/go-toolsmith/astfmt/.travis.yml create mode 100644 vendor/github.com/go-toolsmith/astfmt/LICENSE create mode 100644 vendor/github.com/go-toolsmith/astfmt/README.md create mode 100644 vendor/github.com/go-toolsmith/astfmt/astfmt.go create mode 100644 vendor/github.com/go-toolsmith/astfmt/go.mod create mode 100644 vendor/github.com/go-toolsmith/astfmt/go.sum create mode 100644 vendor/github.com/go-toolsmith/astp/.gitignore create mode 100644 vendor/github.com/go-toolsmith/astp/.travis.yml create mode 100644 vendor/github.com/go-toolsmith/astp/LICENSE create mode 100644 vendor/github.com/go-toolsmith/astp/README.md create mode 100644 vendor/github.com/go-toolsmith/astp/decl.go create mode 100644 vendor/github.com/go-toolsmith/astp/expr.go create mode 100644 vendor/github.com/go-toolsmith/astp/go.mod create mode 100644 vendor/github.com/go-toolsmith/astp/go.sum create mode 100644 vendor/github.com/go-toolsmith/astp/stmt.go create mode 100644 vendor/github.com/go-toolsmith/strparse/.travis.yml create mode 100644 vendor/github.com/go-toolsmith/strparse/LICENSE create mode 100644 vendor/github.com/go-toolsmith/strparse/README.md create mode 100644 vendor/github.com/go-toolsmith/strparse/go.mod create mode 100644 vendor/github.com/go-toolsmith/strparse/strparse.go create mode 100644 vendor/github.com/go-toolsmith/typep/.travis.yml create mode 100644 vendor/github.com/go-toolsmith/typep/LICENSE create mode 100644 vendor/github.com/go-toolsmith/typep/README.md create mode 100644 vendor/github.com/go-toolsmith/typep/doc.go create mode 100644 vendor/github.com/go-toolsmith/typep/go.mod create mode 100644 vendor/github.com/go-toolsmith/typep/predicates.go create mode 100644 vendor/github.com/go-toolsmith/typep/safeExpr.go create mode 100644 vendor/github.com/go-toolsmith/typep/simplePredicates.go create mode 100644 vendor/github.com/go-xmlfmt/xmlfmt/LICENSE create mode 100644 vendor/github.com/go-xmlfmt/xmlfmt/README.md create mode 100644 vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go create mode 100644 vendor/github.com/gobwas/glob/.gitignore create mode 100644 vendor/github.com/gobwas/glob/.travis.yml create mode 100644 vendor/github.com/gobwas/glob/LICENSE create mode 100644 vendor/github.com/gobwas/glob/bench.sh create mode 100644 vendor/github.com/gobwas/glob/compiler/compiler.go create mode 100644 vendor/github.com/gobwas/glob/glob.go create mode 100644 vendor/github.com/gobwas/glob/match/any.go create mode 100644 vendor/github.com/gobwas/glob/match/any_of.go create mode 100644 vendor/github.com/gobwas/glob/match/btree.go create mode 100644 vendor/github.com/gobwas/glob/match/contains.go create mode 100644 vendor/github.com/gobwas/glob/match/every_of.go create mode 100644 vendor/github.com/gobwas/glob/match/list.go create mode 100644 vendor/github.com/gobwas/glob/match/match.go create mode 100644 vendor/github.com/gobwas/glob/match/max.go create mode 100644 vendor/github.com/gobwas/glob/match/min.go create mode 100644 vendor/github.com/gobwas/glob/match/nothing.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix_any.go create mode 100644 vendor/github.com/gobwas/glob/match/prefix_suffix.go create mode 100644 vendor/github.com/gobwas/glob/match/range.go create mode 100644 vendor/github.com/gobwas/glob/match/row.go create mode 100644 vendor/github.com/gobwas/glob/match/segments.go create mode 100644 vendor/github.com/gobwas/glob/match/single.go create mode 100644 vendor/github.com/gobwas/glob/match/suffix.go create mode 100644 vendor/github.com/gobwas/glob/match/suffix_any.go create mode 100644 vendor/github.com/gobwas/glob/match/super.go create mode 100644 vendor/github.com/gobwas/glob/match/text.go create mode 100644 vendor/github.com/gobwas/glob/readme.md create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/ast.go create mode 100644 vendor/github.com/gobwas/glob/syntax/ast/parser.go create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/lexer.go create mode 100644 vendor/github.com/gobwas/glob/syntax/lexer/token.go create mode 100644 vendor/github.com/gobwas/glob/syntax/syntax.go create mode 100644 vendor/github.com/gobwas/glob/util/runes/runes.go create mode 100644 vendor/github.com/gobwas/glob/util/strings/strings.go create mode 100644 vendor/github.com/gofrs/flock/.gitignore create mode 100644 vendor/github.com/gofrs/flock/.travis.yml create mode 100644 vendor/github.com/gofrs/flock/LICENSE create mode 100644 vendor/github.com/gofrs/flock/README.md create mode 100644 vendor/github.com/gofrs/flock/appveyor.yml create mode 100644 vendor/github.com/gofrs/flock/flock.go create mode 100644 vendor/github.com/gofrs/flock/flock_aix.go create mode 100644 vendor/github.com/gofrs/flock/flock_unix.go create mode 100644 vendor/github.com/gofrs/flock/flock_winapi.go create mode 100644 vendor/github.com/gofrs/flock/flock_windows.go create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go delete mode 100644 vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/golangci/check/LICENSE create mode 100644 vendor/github.com/golangci/check/cmd/structcheck/structcheck.go create mode 100644 vendor/github.com/golangci/check/cmd/varcheck/varcheck.go create mode 100644 vendor/github.com/golangci/dupl/.travis.yml create mode 100644 vendor/github.com/golangci/dupl/LICENSE create mode 100644 vendor/github.com/golangci/dupl/README.md create mode 100644 vendor/github.com/golangci/dupl/job/buildtree.go create mode 100644 vendor/github.com/golangci/dupl/job/parse.go create mode 100644 vendor/github.com/golangci/dupl/main.go create mode 100644 vendor/github.com/golangci/dupl/printer/html.go create mode 100644 vendor/github.com/golangci/dupl/printer/plumbing.go create mode 100644 vendor/github.com/golangci/dupl/printer/printer.go create mode 100644 vendor/github.com/golangci/dupl/printer/text.go create mode 100644 vendor/github.com/golangci/dupl/suffixtree/dupl.go create mode 100644 vendor/github.com/golangci/dupl/suffixtree/suffixtree.go create mode 100644 vendor/github.com/golangci/dupl/syntax/golang/golang.go create mode 100644 vendor/github.com/golangci/dupl/syntax/syntax.go create mode 100644 vendor/github.com/golangci/go-misc/LICENSE create mode 100644 vendor/github.com/golangci/go-misc/deadcode/README.md create mode 100644 vendor/github.com/golangci/go-misc/deadcode/deadcode.go rename vendor/github.com/{gorilla/context => golangci/gofmt/gofmt}/LICENSE (83%) create mode 100644 vendor/github.com/golangci/gofmt/gofmt/doc.go create mode 100644 vendor/github.com/golangci/gofmt/gofmt/gofmt.go create mode 100644 vendor/github.com/golangci/gofmt/gofmt/golangci.go create mode 100644 vendor/github.com/golangci/gofmt/gofmt/internal.go create mode 100644 vendor/github.com/golangci/gofmt/gofmt/rewrite.go create mode 100644 vendor/github.com/golangci/gofmt/gofmt/simplify.go create mode 100644 vendor/github.com/golangci/gofmt/goimports/LICENSE create mode 100644 vendor/github.com/golangci/gofmt/goimports/goimports.go create mode 100644 vendor/github.com/golangci/gofmt/goimports/golangci.go create mode 100644 vendor/github.com/golangci/golangci-lint/LICENSE create mode 100644 vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go create mode 100644 vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/mod_version.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/cache/cache.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/cache/default.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/cache/hash.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/commands/config.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/commands/help.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/commands/root.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/commands/run.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/commands/version.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/config/config.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/config/issues.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/config/linters.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings_gocritic.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/config/output.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/config/reader.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/config/run.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/config/severity.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/errname.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustivestruct.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/exportloopref.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/adapters.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load/guard.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_facts.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/ifshort.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/rowerrcheck.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/enabled_set.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/lint/load.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/logutils/out.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/packages/skip.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/packages/util.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/printers/github.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/printers/html.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/printers/json.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/printers/text.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/report/data.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/report/log.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/issue.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/processor.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go create mode 100644 vendor/github.com/golangci/lint-1/.travis.yml create mode 100644 vendor/github.com/golangci/lint-1/CONTRIBUTING.md create mode 100644 vendor/github.com/golangci/lint-1/LICENSE create mode 100644 vendor/github.com/golangci/lint-1/README.md create mode 100644 vendor/github.com/golangci/lint-1/go.mod create mode 100644 vendor/github.com/golangci/lint-1/go.sum create mode 100644 vendor/github.com/golangci/lint-1/lint.go create mode 100644 vendor/github.com/golangci/maligned/LICENSE create mode 100644 vendor/github.com/golangci/maligned/README create mode 100644 vendor/github.com/golangci/maligned/maligned.go create mode 100644 vendor/github.com/golangci/misspell/.gitignore create mode 100644 vendor/github.com/golangci/misspell/.travis.yml create mode 100644 vendor/github.com/golangci/misspell/Dockerfile create mode 100644 vendor/github.com/golangci/misspell/Gopkg.lock create mode 100644 vendor/github.com/golangci/misspell/Gopkg.toml create mode 100644 vendor/github.com/golangci/misspell/LICENSE create mode 100644 vendor/github.com/golangci/misspell/Makefile create mode 100644 vendor/github.com/golangci/misspell/README.md create mode 100644 vendor/github.com/golangci/misspell/RELEASE-HOWTO.md create mode 100644 vendor/github.com/golangci/misspell/ascii.go create mode 100644 vendor/github.com/golangci/misspell/case.go create mode 100644 vendor/github.com/golangci/misspell/goreleaser.yml create mode 100644 vendor/github.com/golangci/misspell/install-misspell.sh create mode 100644 vendor/github.com/golangci/misspell/legal.go create mode 100644 vendor/github.com/golangci/misspell/mime.go create mode 100644 vendor/github.com/golangci/misspell/notwords.go create mode 100644 vendor/github.com/golangci/misspell/replace.go create mode 100644 vendor/github.com/golangci/misspell/stringreplacer.go create mode 100644 vendor/github.com/golangci/misspell/stringreplacer_test.gox create mode 100644 vendor/github.com/golangci/misspell/url.go create mode 100644 vendor/github.com/golangci/misspell/words.go create mode 100644 vendor/github.com/golangci/revgrep/.gitignore create mode 100644 vendor/github.com/golangci/revgrep/.travis.yml create mode 100644 vendor/github.com/golangci/revgrep/LICENSE create mode 100644 vendor/github.com/golangci/revgrep/README.md create mode 100644 vendor/github.com/golangci/revgrep/go.mod create mode 100644 vendor/github.com/golangci/revgrep/go.sum create mode 100644 vendor/github.com/golangci/revgrep/revgrep.go create mode 100644 vendor/github.com/golangci/unconvert/LICENSE create mode 100644 vendor/github.com/golangci/unconvert/README create mode 100644 vendor/github.com/golangci/unconvert/unconvert.go create mode 100644 vendor/github.com/google/go-cmp/LICENSE create mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go create mode 100644 vendor/github.com/google/go-cmp/cmp/options.go create mode 100644 vendor/github.com/google/go-cmp/cmp/path.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go create mode 100644 vendor/github.com/gordonklaus/ineffassign/LICENSE create mode 100644 vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go delete mode 100644 vendor/github.com/gorilla/context/.travis.yml delete mode 100644 vendor/github.com/gorilla/context/README.md delete mode 100644 vendor/github.com/gorilla/context/context.go delete mode 100644 vendor/github.com/gorilla/context/doc.go create mode 100644 vendor/github.com/gorilla/mux/AUTHORS delete mode 100644 vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md delete mode 100644 vendor/github.com/gorilla/mux/context_gorilla.go delete mode 100644 vendor/github.com/gorilla/mux/context_native.go create mode 100644 vendor/github.com/gorilla/mux/go.mod create mode 100644 vendor/github.com/gostaticanalysis/analysisutil/LICENSE create mode 100644 vendor/github.com/gostaticanalysis/analysisutil/README.md create mode 100644 vendor/github.com/gostaticanalysis/analysisutil/call.go create mode 100644 vendor/github.com/gostaticanalysis/analysisutil/diagnostic.go create mode 100644 vendor/github.com/gostaticanalysis/analysisutil/file.go create mode 100644 vendor/github.com/gostaticanalysis/analysisutil/go.mod create mode 100644 vendor/github.com/gostaticanalysis/analysisutil/go.sum create mode 100644 vendor/github.com/gostaticanalysis/analysisutil/pkg.go create mode 100644 vendor/github.com/gostaticanalysis/analysisutil/ssa.go create mode 100644 vendor/github.com/gostaticanalysis/analysisutil/ssainspect.go create mode 100644 vendor/github.com/gostaticanalysis/analysisutil/types.go create mode 100644 vendor/github.com/gostaticanalysis/comment/LICENSE create mode 100644 vendor/github.com/gostaticanalysis/comment/README.md create mode 100644 vendor/github.com/gostaticanalysis/comment/comment.go create mode 100644 vendor/github.com/gostaticanalysis/comment/go.mod create mode 100644 vendor/github.com/gostaticanalysis/comment/go.sum create mode 100644 vendor/github.com/gostaticanalysis/comment/passes/commentmap/commentmap.go create mode 100644 vendor/github.com/gostaticanalysis/forcetypeassert/.reviewdog.yml create mode 100644 vendor/github.com/gostaticanalysis/forcetypeassert/LICENSE create mode 100644 vendor/github.com/gostaticanalysis/forcetypeassert/README.md create mode 100644 vendor/github.com/gostaticanalysis/forcetypeassert/forcetypeassert.go create mode 100644 vendor/github.com/gostaticanalysis/forcetypeassert/go.mod create mode 100644 vendor/github.com/gostaticanalysis/forcetypeassert/go.sum create mode 100644 vendor/github.com/gostaticanalysis/nilerr/LICENSE create mode 100644 vendor/github.com/gostaticanalysis/nilerr/README.md create mode 100644 vendor/github.com/gostaticanalysis/nilerr/go.mod create mode 100644 vendor/github.com/gostaticanalysis/nilerr/go.sum create mode 100644 vendor/github.com/gostaticanalysis/nilerr/nilerr.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/auth.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/metadata.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.sum create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/client_interceptors.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/id_extract.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/metadata.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/options.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/server_interceptors.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/errwrap/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/go.sum create mode 100644 vendor/github.com/hashicorp/go-multierror/group.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go create mode 100644 vendor/github.com/hashicorp/hcl/.gitignore create mode 100644 vendor/github.com/hashicorp/hcl/.travis.yml create mode 100644 vendor/github.com/hashicorp/hcl/LICENSE create mode 100644 vendor/github.com/hashicorp/hcl/Makefile create mode 100644 vendor/github.com/hashicorp/hcl/README.md create mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml create mode 100644 vendor/github.com/hashicorp/hcl/decoder.go create mode 100644 vendor/github.com/hashicorp/hcl/go.mod create mode 100644 vendor/github.com/hashicorp/hcl/go.sum create mode 100644 vendor/github.com/hashicorp/hcl/hcl.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/printer.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go create mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/lex.go create mode 100644 vendor/github.com/hashicorp/hcl/parse.go create mode 100644 vendor/github.com/jgautheron/goconst/LICENSE create mode 100644 vendor/github.com/jgautheron/goconst/README.md create mode 100644 vendor/github.com/jgautheron/goconst/api.go create mode 100644 vendor/github.com/jgautheron/goconst/go.mod create mode 100644 vendor/github.com/jgautheron/goconst/parser.go create mode 100644 vendor/github.com/jgautheron/goconst/visitor.go create mode 100644 vendor/github.com/jingyugao/rowserrcheck/LICENSE create mode 100644 vendor/github.com/jingyugao/rowserrcheck/passes/rowserr/rowserr.go create mode 100644 vendor/github.com/jirfag/go-printf-func-name/LICENSE create mode 100644 vendor/github.com/jirfag/go-printf-func-name/pkg/analyzer/analyzer.go create mode 100644 vendor/github.com/julz/importas/.gitignore create mode 100644 vendor/github.com/julz/importas/LICENSE create mode 100644 vendor/github.com/julz/importas/README.md create mode 100644 vendor/github.com/julz/importas/analyzer.go create mode 100644 vendor/github.com/julz/importas/config.go create mode 100644 vendor/github.com/julz/importas/flags.go create mode 100644 vendor/github.com/julz/importas/go.mod create mode 100644 vendor/github.com/julz/importas/go.sum create mode 100644 vendor/github.com/kisielk/errcheck/LICENSE create mode 100644 vendor/github.com/kisielk/errcheck/errcheck/embedded_walker.go create mode 100644 vendor/github.com/kisielk/errcheck/errcheck/errcheck.go create mode 100644 vendor/github.com/kisielk/errcheck/errcheck/tags.go create mode 100644 vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go rename vendor/github.com/{gorilla/mux => kisielk/gotool}/.travis.yml (53%) create mode 100644 vendor/github.com/kisielk/gotool/LEGAL create mode 100644 vendor/github.com/kisielk/gotool/LICENSE create mode 100644 vendor/github.com/kisielk/gotool/README.md create mode 100644 vendor/github.com/kisielk/gotool/go.mod create mode 100644 vendor/github.com/kisielk/gotool/go13.go create mode 100644 vendor/github.com/kisielk/gotool/go14-15.go create mode 100644 vendor/github.com/kisielk/gotool/go16-18.go create mode 100644 vendor/github.com/kisielk/gotool/internal/load/path.go create mode 100644 vendor/github.com/kisielk/gotool/internal/load/pkg.go create mode 100644 vendor/github.com/kisielk/gotool/internal/load/search.go create mode 100644 vendor/github.com/kisielk/gotool/match.go create mode 100644 vendor/github.com/kisielk/gotool/match18.go create mode 100644 vendor/github.com/kisielk/gotool/tool.go create mode 100644 vendor/github.com/kulti/thelper/LICENSE create mode 100644 vendor/github.com/kulti/thelper/pkg/analyzer/analyzer.go create mode 100644 vendor/github.com/kulti/thelper/pkg/analyzer/report.go create mode 100644 vendor/github.com/kunwardeep/paralleltest/LICENSE create mode 100644 vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go create mode 100644 vendor/github.com/kyoh86/exportloopref/.golangci.yml create mode 100644 vendor/github.com/kyoh86/exportloopref/.goreleaser.yml create mode 100644 vendor/github.com/kyoh86/exportloopref/LICENSE create mode 100644 vendor/github.com/kyoh86/exportloopref/Makefile create mode 100644 vendor/github.com/kyoh86/exportloopref/README.md create mode 100644 vendor/github.com/kyoh86/exportloopref/exportloopref.go create mode 100644 vendor/github.com/kyoh86/exportloopref/go.mod create mode 100644 vendor/github.com/kyoh86/exportloopref/go.sum create mode 100644 vendor/github.com/ldez/gomoddirectives/.gitignore create mode 100644 vendor/github.com/ldez/gomoddirectives/.golangci.yml create mode 100644 vendor/github.com/ldez/gomoddirectives/LICENSE create mode 100644 vendor/github.com/ldez/gomoddirectives/Makefile create mode 100644 vendor/github.com/ldez/gomoddirectives/go.mod create mode 100644 vendor/github.com/ldez/gomoddirectives/go.sum create mode 100644 vendor/github.com/ldez/gomoddirectives/gomoddirectives.go create mode 100644 vendor/github.com/ldez/gomoddirectives/module.go create mode 100644 vendor/github.com/ldez/gomoddirectives/readme.md create mode 100644 vendor/github.com/ldez/tagliatelle/.gitignore create mode 100644 vendor/github.com/ldez/tagliatelle/.golangci.yml create mode 100644 vendor/github.com/ldez/tagliatelle/LICENSE create mode 100644 vendor/github.com/ldez/tagliatelle/Makefile create mode 100644 vendor/github.com/ldez/tagliatelle/go.mod create mode 100644 vendor/github.com/ldez/tagliatelle/go.sum create mode 100644 vendor/github.com/ldez/tagliatelle/readme.md create mode 100644 vendor/github.com/ldez/tagliatelle/tagliatelle.go create mode 100644 vendor/github.com/magiconair/properties/.gitignore create mode 100644 vendor/github.com/magiconair/properties/.travis.yml create mode 100644 vendor/github.com/magiconair/properties/CHANGELOG.md create mode 100644 vendor/github.com/magiconair/properties/LICENSE.md create mode 100644 vendor/github.com/magiconair/properties/README.md create mode 100644 vendor/github.com/magiconair/properties/decode.go create mode 100644 vendor/github.com/magiconair/properties/doc.go create mode 100644 vendor/github.com/magiconair/properties/go.mod create mode 100644 vendor/github.com/magiconair/properties/integrate.go create mode 100644 vendor/github.com/magiconair/properties/lex.go create mode 100644 vendor/github.com/magiconair/properties/load.go create mode 100644 vendor/github.com/magiconair/properties/parser.go create mode 100644 vendor/github.com/magiconair/properties/properties.go create mode 100644 vendor/github.com/magiconair/properties/rangecheck.go create mode 100644 vendor/github.com/maratori/testpackage/LICENSE create mode 100644 vendor/github.com/maratori/testpackage/pkg/testpackage/testpackage.go create mode 100644 vendor/github.com/matoous/godox/.gitignore create mode 100644 vendor/github.com/matoous/godox/.golangci.yml create mode 100644 vendor/github.com/matoous/godox/.revive.toml create mode 100644 vendor/github.com/matoous/godox/LICENSE create mode 100644 vendor/github.com/matoous/godox/README.md create mode 100644 vendor/github.com/matoous/godox/go.mod create mode 100644 vendor/github.com/matoous/godox/go.sum create mode 100644 vendor/github.com/matoous/godox/godox.go create mode 100644 vendor/github.com/mattn/go-colorable/.travis.yml create mode 100644 vendor/github.com/mattn/go-colorable/LICENSE create mode 100644 vendor/github.com/mattn/go-colorable/README.md create mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/mattn/go-colorable/go.mod create mode 100644 vendor/github.com/mattn/go-colorable/go.sum create mode 100644 vendor/github.com/mattn/go-colorable/go.test.sh create mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_android.go create mode 100644 vendor/github.com/mattn/go-isatty/renovate.json create mode 100644 vendor/github.com/mattn/go-runewidth/.travis.yml create mode 100644 vendor/github.com/mattn/go-runewidth/LICENSE create mode 100644 vendor/github.com/mattn/go-runewidth/README.md create mode 100644 vendor/github.com/mattn/go-runewidth/go.mod create mode 100644 vendor/github.com/mattn/go-runewidth/go.test.sh create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_appengine.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_js.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_posix.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_table.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_windows.go create mode 100644 vendor/github.com/mbilski/exhaustivestruct/LICENSE create mode 100644 vendor/github.com/mbilski/exhaustivestruct/pkg/analyzer/analyzer.go create mode 100644 vendor/github.com/mgechev/dots/.travis.yml create mode 100644 vendor/github.com/mgechev/dots/LICENSE create mode 100644 vendor/github.com/mgechev/dots/README.md create mode 100644 vendor/github.com/mgechev/dots/resolve.go create mode 100644 vendor/github.com/mgechev/revive/LICENSE create mode 100644 vendor/github.com/mgechev/revive/config/config.go create mode 100644 vendor/github.com/mgechev/revive/formatter/checkstyle.go create mode 100644 vendor/github.com/mgechev/revive/formatter/default.go create mode 100644 vendor/github.com/mgechev/revive/formatter/friendly.go create mode 100644 vendor/github.com/mgechev/revive/formatter/json.go create mode 100644 vendor/github.com/mgechev/revive/formatter/ndjson.go create mode 100644 vendor/github.com/mgechev/revive/formatter/plain.go create mode 100644 vendor/github.com/mgechev/revive/formatter/sarif.go create mode 100644 vendor/github.com/mgechev/revive/formatter/severity.go create mode 100644 vendor/github.com/mgechev/revive/formatter/stylish.go create mode 100644 vendor/github.com/mgechev/revive/formatter/unix.go create mode 100644 vendor/github.com/mgechev/revive/lint/config.go create mode 100644 vendor/github.com/mgechev/revive/lint/failure.go create mode 100644 vendor/github.com/mgechev/revive/lint/file.go create mode 100644 vendor/github.com/mgechev/revive/lint/formatter.go create mode 100644 vendor/github.com/mgechev/revive/lint/linter.go create mode 100644 vendor/github.com/mgechev/revive/lint/package.go create mode 100644 vendor/github.com/mgechev/revive/lint/rule.go create mode 100644 vendor/github.com/mgechev/revive/lint/utils.go create mode 100644 vendor/github.com/mgechev/revive/rule/add-constant.go create mode 100644 vendor/github.com/mgechev/revive/rule/argument-limit.go create mode 100644 vendor/github.com/mgechev/revive/rule/atomic.go create mode 100644 vendor/github.com/mgechev/revive/rule/bare-return.go create mode 100644 vendor/github.com/mgechev/revive/rule/blank-imports.go create mode 100644 vendor/github.com/mgechev/revive/rule/bool-literal-in-expr.go create mode 100644 vendor/github.com/mgechev/revive/rule/call-to-gc.go create mode 100644 vendor/github.com/mgechev/revive/rule/cognitive-complexity.go create mode 100644 vendor/github.com/mgechev/revive/rule/confusing-naming.go create mode 100644 vendor/github.com/mgechev/revive/rule/confusing-results.go create mode 100644 vendor/github.com/mgechev/revive/rule/constant-logical-expr.go create mode 100644 vendor/github.com/mgechev/revive/rule/context-as-argument.go create mode 100644 vendor/github.com/mgechev/revive/rule/context-keys-type.go create mode 100644 vendor/github.com/mgechev/revive/rule/cyclomatic.go create mode 100644 vendor/github.com/mgechev/revive/rule/deep-exit.go create mode 100644 vendor/github.com/mgechev/revive/rule/defer.go create mode 100644 vendor/github.com/mgechev/revive/rule/dot-imports.go create mode 100644 vendor/github.com/mgechev/revive/rule/duplicated-imports.go create mode 100644 vendor/github.com/mgechev/revive/rule/early-return.go create mode 100644 vendor/github.com/mgechev/revive/rule/empty-block.go create mode 100644 vendor/github.com/mgechev/revive/rule/empty-lines.go create mode 100644 vendor/github.com/mgechev/revive/rule/error-naming.go create mode 100644 vendor/github.com/mgechev/revive/rule/error-return.go create mode 100644 vendor/github.com/mgechev/revive/rule/error-strings.go create mode 100644 vendor/github.com/mgechev/revive/rule/errorf.go create mode 100644 vendor/github.com/mgechev/revive/rule/exported.go create mode 100644 vendor/github.com/mgechev/revive/rule/file-header.go create mode 100644 vendor/github.com/mgechev/revive/rule/flag-param.go create mode 100644 vendor/github.com/mgechev/revive/rule/function-length.go create mode 100644 vendor/github.com/mgechev/revive/rule/function-result-limit.go create mode 100644 vendor/github.com/mgechev/revive/rule/get-return.go create mode 100644 vendor/github.com/mgechev/revive/rule/identical-branches.go create mode 100644 vendor/github.com/mgechev/revive/rule/if-return.go create mode 100644 vendor/github.com/mgechev/revive/rule/import-shadowing.go create mode 100644 vendor/github.com/mgechev/revive/rule/imports-blacklist.go create mode 100644 vendor/github.com/mgechev/revive/rule/increment-decrement.go create mode 100644 vendor/github.com/mgechev/revive/rule/indent-error-flow.go create mode 100644 vendor/github.com/mgechev/revive/rule/line-length-limit.go create mode 100644 vendor/github.com/mgechev/revive/rule/max-public-structs.go create mode 100644 vendor/github.com/mgechev/revive/rule/modifies-param.go create mode 100644 vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go create mode 100644 vendor/github.com/mgechev/revive/rule/nested-structs.go create mode 100644 vendor/github.com/mgechev/revive/rule/package-comments.go create mode 100644 vendor/github.com/mgechev/revive/rule/range-val-address.go create mode 100644 vendor/github.com/mgechev/revive/rule/range-val-in-closure.go create mode 100644 vendor/github.com/mgechev/revive/rule/range.go create mode 100644 vendor/github.com/mgechev/revive/rule/receiver-naming.go create mode 100644 vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go create mode 100644 vendor/github.com/mgechev/revive/rule/string-format.go create mode 100644 vendor/github.com/mgechev/revive/rule/string-of-int.go create mode 100644 vendor/github.com/mgechev/revive/rule/struct-tag.go create mode 100644 vendor/github.com/mgechev/revive/rule/superfluous-else.go create mode 100644 vendor/github.com/mgechev/revive/rule/time-naming.go create mode 100644 vendor/github.com/mgechev/revive/rule/unconditional-recursion.go create mode 100644 vendor/github.com/mgechev/revive/rule/unexported-naming.go create mode 100644 vendor/github.com/mgechev/revive/rule/unexported-return.go create mode 100644 vendor/github.com/mgechev/revive/rule/unhandled-error.go create mode 100644 vendor/github.com/mgechev/revive/rule/unnecessary-stmt.go create mode 100644 vendor/github.com/mgechev/revive/rule/unreachable-code.go create mode 100644 vendor/github.com/mgechev/revive/rule/unused-param.go create mode 100644 vendor/github.com/mgechev/revive/rule/unused-receiver.go create mode 100644 vendor/github.com/mgechev/revive/rule/useless-break.go create mode 100644 vendor/github.com/mgechev/revive/rule/utils.go create mode 100644 vendor/github.com/mgechev/revive/rule/var-declarations.go create mode 100644 vendor/github.com/mgechev/revive/rule/var-naming.go create mode 100644 vendor/github.com/mgechev/revive/rule/waitgroup-by-value.go create mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/go.mod create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/moricho/tparallel/.gitignore create mode 100644 vendor/github.com/moricho/tparallel/.goreleaser.yml create mode 100644 vendor/github.com/moricho/tparallel/LICENSE create mode 100644 vendor/github.com/moricho/tparallel/Makefile create mode 100644 vendor/github.com/moricho/tparallel/README.md create mode 100644 vendor/github.com/moricho/tparallel/go.mod create mode 100644 vendor/github.com/moricho/tparallel/go.sum create mode 100644 vendor/github.com/moricho/tparallel/pkg/ssafunc/ssafunc.go create mode 100644 vendor/github.com/moricho/tparallel/pkg/ssainstr/ssainstr.go create mode 100644 vendor/github.com/moricho/tparallel/testmap.go create mode 100644 vendor/github.com/moricho/tparallel/tparallel.go create mode 100644 vendor/github.com/nakabonne/nestif/.gitignore create mode 100644 vendor/github.com/nakabonne/nestif/LICENSE create mode 100644 vendor/github.com/nakabonne/nestif/README.md create mode 100644 vendor/github.com/nakabonne/nestif/go.mod create mode 100644 vendor/github.com/nakabonne/nestif/go.sum create mode 100644 vendor/github.com/nakabonne/nestif/nestif.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/.gitignore create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/LICENSE.txt create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/Makefile create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/README.md create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/adjacency/adjcmartix.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/data/bindata.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/frequency/frequency.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/go.mod create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/go.sum create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/match/match.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/dateMatchers.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/dictionaryMatch.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/leet.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/matching.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/repeatMatch.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/sequenceMatch.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/matching/spatialMatch.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/utils/math/mathutils.go create mode 100644 vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go create mode 100644 vendor/github.com/nishanths/exhaustive/.gitignore create mode 100644 vendor/github.com/nishanths/exhaustive/.travis.yml create mode 100644 vendor/github.com/nishanths/exhaustive/LICENSE create mode 100644 vendor/github.com/nishanths/exhaustive/README.md create mode 100644 vendor/github.com/nishanths/exhaustive/enum.go create mode 100644 vendor/github.com/nishanths/exhaustive/exhaustive.go create mode 100644 vendor/github.com/nishanths/exhaustive/generated.go create mode 100644 vendor/github.com/nishanths/exhaustive/go.mod create mode 100644 vendor/github.com/nishanths/exhaustive/go.sum create mode 100644 vendor/github.com/nishanths/exhaustive/regexp_flag.go create mode 100644 vendor/github.com/nishanths/exhaustive/switch.go create mode 100644 vendor/github.com/nishanths/predeclared/LICENSE create mode 100644 vendor/github.com/nishanths/predeclared/passes/predeclared/go18.go create mode 100644 vendor/github.com/nishanths/predeclared/passes/predeclared/pre_go18.go create mode 100644 vendor/github.com/nishanths/predeclared/passes/predeclared/predeclared.go create mode 100644 vendor/github.com/olekukonko/tablewriter/.gitignore create mode 100644 vendor/github.com/olekukonko/tablewriter/.travis.yml create mode 100644 vendor/github.com/olekukonko/tablewriter/LICENSE.md create mode 100644 vendor/github.com/olekukonko/tablewriter/README.md create mode 100644 vendor/github.com/olekukonko/tablewriter/csv.go create mode 100644 vendor/github.com/olekukonko/tablewriter/go.mod create mode 100644 vendor/github.com/olekukonko/tablewriter/go.sum create mode 100644 vendor/github.com/olekukonko/tablewriter/table.go create mode 100644 vendor/github.com/olekukonko/tablewriter/table_with_color.go create mode 100644 vendor/github.com/olekukonko/tablewriter/util.go create mode 100644 vendor/github.com/olekukonko/tablewriter/wrap.go create mode 100644 vendor/github.com/pelletier/go-toml/.dockerignore create mode 100644 vendor/github.com/pelletier/go-toml/.gitignore create mode 100644 vendor/github.com/pelletier/go-toml/CONTRIBUTING.md create mode 100644 vendor/github.com/pelletier/go-toml/Dockerfile create mode 100644 vendor/github.com/pelletier/go-toml/LICENSE create mode 100644 vendor/github.com/pelletier/go-toml/Makefile create mode 100644 vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/pelletier/go-toml/README.md create mode 100644 vendor/github.com/pelletier/go-toml/azure-pipelines.yml create mode 100644 vendor/github.com/pelletier/go-toml/benchmark.sh create mode 100644 vendor/github.com/pelletier/go-toml/doc.go create mode 100644 vendor/github.com/pelletier/go-toml/example-crlf.toml create mode 100644 vendor/github.com/pelletier/go-toml/example.toml create mode 100644 vendor/github.com/pelletier/go-toml/fuzz.go create mode 100644 vendor/github.com/pelletier/go-toml/fuzz.sh create mode 100644 vendor/github.com/pelletier/go-toml/go.mod create mode 100644 vendor/github.com/pelletier/go-toml/keysparsing.go create mode 100644 vendor/github.com/pelletier/go-toml/lexer.go create mode 100644 vendor/github.com/pelletier/go-toml/localtime.go create mode 100644 vendor/github.com/pelletier/go-toml/marshal.go create mode 100644 vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml create mode 100644 vendor/github.com/pelletier/go-toml/marshal_test.toml create mode 100644 vendor/github.com/pelletier/go-toml/parser.go create mode 100644 vendor/github.com/pelletier/go-toml/position.go create mode 100644 vendor/github.com/pelletier/go-toml/token.go create mode 100644 vendor/github.com/pelletier/go-toml/toml.go create mode 100644 vendor/github.com/pelletier/go-toml/tomlpub.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_create.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_write.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_writepub.go create mode 100644 vendor/github.com/phayes/checkstyle/.scrutinizer.yml create mode 100644 vendor/github.com/phayes/checkstyle/LICENSE create mode 100644 vendor/github.com/phayes/checkstyle/README.md create mode 100644 vendor/github.com/phayes/checkstyle/checkstyle.go create mode 100644 vendor/github.com/phayes/checkstyle/godoc.go create mode 100644 vendor/github.com/polyfloyd/go-errorlint/LICENSE create mode 100644 vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go create mode 100644 vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go create mode 100644 vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go delete mode 100644 vendor/github.com/prometheus/client_golang/AUTHORS.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go create mode 100644 vendor/github.com/prometheus/common/expfmt/openmetrics_create.go create mode 100644 vendor/github.com/prometheus/procfs/.golangci.yml delete mode 100644 vendor/github.com/prometheus/procfs/.travis.yml create mode 100644 vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/prometheus/procfs/Makefile.common create mode 100644 vendor/github.com/prometheus/procfs/SECURITY.md create mode 100644 vendor/github.com/prometheus/procfs/arp.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_armx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_others.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_s390x.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_x86.go create mode 100644 vendor/github.com/prometheus/procfs/crypto.go create mode 100644 vendor/github.com/prometheus/procfs/fscache.go create mode 100644 vendor/github.com/prometheus/procfs/go.mod create mode 100644 vendor/github.com/prometheus/procfs/go.sum create mode 100644 vendor/github.com/prometheus/procfs/internal/fs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/readfile.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/valueparser.go create mode 100644 vendor/github.com/prometheus/procfs/kernel_random.go create mode 100644 vendor/github.com/prometheus/procfs/loadavg.go create mode 100644 vendor/github.com/prometheus/procfs/meminfo.go create mode 100644 vendor/github.com/prometheus/procfs/mountinfo.go create mode 100644 vendor/github.com/prometheus/procfs/net_conntrackstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_ip_socket.go create mode 100644 vendor/github.com/prometheus/procfs/net_protocols.go create mode 100644 vendor/github.com/prometheus/procfs/net_sockstat.go create mode 100644 vendor/github.com/prometheus/procfs/net_softnet.go create mode 100644 vendor/github.com/prometheus/procfs/net_tcp.go create mode 100644 vendor/github.com/prometheus/procfs/net_udp.go create mode 100644 vendor/github.com/prometheus/procfs/net_unix.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/nfs.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/parse.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfs.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go create mode 100644 vendor/github.com/prometheus/procfs/proc_cgroup.go create mode 100644 vendor/github.com/prometheus/procfs/proc_environ.go create mode 100644 vendor/github.com/prometheus/procfs/proc_fdinfo.go create mode 100644 vendor/github.com/prometheus/procfs/proc_maps.go create mode 100644 vendor/github.com/prometheus/procfs/proc_psi.go create mode 100644 vendor/github.com/prometheus/procfs/proc_smaps.go create mode 100644 vendor/github.com/prometheus/procfs/proc_status.go create mode 100644 vendor/github.com/prometheus/procfs/schedstat.go create mode 100644 vendor/github.com/prometheus/procfs/slab.go create mode 100644 vendor/github.com/prometheus/procfs/swaps.go create mode 100644 vendor/github.com/prometheus/procfs/vm.go delete mode 100644 vendor/github.com/prometheus/procfs/xfs/parse.go delete mode 100644 vendor/github.com/prometheus/procfs/xfs/xfs.go create mode 100644 vendor/github.com/prometheus/procfs/zoneinfo.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/LICENSE create mode 100644 vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/compile.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/gen_operations.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/gogrep.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/instructions.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/match.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/operation_string.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/operations.gen.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/parse.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/slices.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/internal/golist/golist.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/internal/xtypes/xtypes.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/nodetag/nodetag.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/bundle.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/goutil/goutil.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/goutil/resolve.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/importer.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/libdsl.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/compile.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/debug_info.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/disasm.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/env.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/eval.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/opcode_string.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/opcodes.gen.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/quasigo.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/utils.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/patternop_string.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go create mode 100644 vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go create mode 100644 vendor/github.com/quasilyte/regex/syntax/LICENSE create mode 100644 vendor/github.com/quasilyte/regex/syntax/README.md create mode 100644 vendor/github.com/quasilyte/regex/syntax/ast.go create mode 100644 vendor/github.com/quasilyte/regex/syntax/errors.go create mode 100644 vendor/github.com/quasilyte/regex/syntax/go.mod create mode 100644 vendor/github.com/quasilyte/regex/syntax/lexer.go create mode 100644 vendor/github.com/quasilyte/regex/syntax/operation.go create mode 100644 vendor/github.com/quasilyte/regex/syntax/operation_string.go create mode 100644 vendor/github.com/quasilyte/regex/syntax/parser.go create mode 100644 vendor/github.com/quasilyte/regex/syntax/pos.go create mode 100644 vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go create mode 100644 vendor/github.com/quasilyte/regex/syntax/utils.go create mode 100644 vendor/github.com/ryancurrah/gomodguard/.dockerignore create mode 100644 vendor/github.com/ryancurrah/gomodguard/.gitignore create mode 100644 vendor/github.com/ryancurrah/gomodguard/.golangci.yml create mode 100644 vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml create mode 100644 vendor/github.com/ryancurrah/gomodguard/Dockerfile create mode 100644 vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser create mode 100644 vendor/github.com/ryancurrah/gomodguard/LICENSE create mode 100644 vendor/github.com/ryancurrah/gomodguard/Makefile create mode 100644 vendor/github.com/ryancurrah/gomodguard/README.md create mode 100644 vendor/github.com/ryancurrah/gomodguard/cmd.go create mode 100644 vendor/github.com/ryancurrah/gomodguard/go.mod create mode 100644 vendor/github.com/ryancurrah/gomodguard/go.sum create mode 100644 vendor/github.com/ryancurrah/gomodguard/gomodguard.go create mode 100644 vendor/github.com/ryanrolds/sqlclosecheck/LICENSE create mode 100644 vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go create mode 100644 vendor/github.com/sanposhiho/wastedassign/v2/LICENSE create mode 100644 vendor/github.com/sanposhiho/wastedassign/v2/README.md create mode 100644 vendor/github.com/sanposhiho/wastedassign/v2/go.mod create mode 100644 vendor/github.com/sanposhiho/wastedassign/v2/go.sum create mode 100644 vendor/github.com/sanposhiho/wastedassign/v2/wastedassign.go create mode 100644 vendor/github.com/securego/gosec/v2/.gitignore create mode 100644 vendor/github.com/securego/gosec/v2/.golangci.yml create mode 100644 vendor/github.com/securego/gosec/v2/.goreleaser.yml create mode 100644 vendor/github.com/securego/gosec/v2/Dockerfile create mode 100644 vendor/github.com/securego/gosec/v2/LICENSE.txt create mode 100644 vendor/github.com/securego/gosec/v2/Makefile create mode 100644 vendor/github.com/securego/gosec/v2/README.md create mode 100644 vendor/github.com/securego/gosec/v2/USERS.md create mode 100644 vendor/github.com/securego/gosec/v2/action.yml create mode 100644 vendor/github.com/securego/gosec/v2/analyzer.go create mode 100644 vendor/github.com/securego/gosec/v2/call_list.go create mode 100644 vendor/github.com/securego/gosec/v2/config.go create mode 100644 vendor/github.com/securego/gosec/v2/cwe/data.go create mode 100644 vendor/github.com/securego/gosec/v2/cwe/types.go create mode 100644 vendor/github.com/securego/gosec/v2/entrypoint.sh create mode 100644 vendor/github.com/securego/gosec/v2/errors.go create mode 100644 vendor/github.com/securego/gosec/v2/go.mod create mode 100644 vendor/github.com/securego/gosec/v2/go.sum create mode 100644 vendor/github.com/securego/gosec/v2/helpers.go create mode 100644 vendor/github.com/securego/gosec/v2/import_tracker.go create mode 100644 vendor/github.com/securego/gosec/v2/install.sh create mode 100644 vendor/github.com/securego/gosec/v2/issue.go create mode 100644 vendor/github.com/securego/gosec/v2/renovate.json create mode 100644 vendor/github.com/securego/gosec/v2/report.go create mode 100644 vendor/github.com/securego/gosec/v2/resolve.go create mode 100644 vendor/github.com/securego/gosec/v2/rule.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/archive.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/bad_defer.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/bind.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/blocklist.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/errors.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/fileperms.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/integer_overflow.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/pprof.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/rand.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/readfile.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/rsa.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/rulelist.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/sql.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/ssh.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/ssrf.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/subproc.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/tempfiles.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/templates.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/tls.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/tls_config.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/unsafe.go create mode 100644 vendor/github.com/securego/gosec/v2/rules/weakcrypto.go create mode 100644 vendor/github.com/shazow/go-diff/LICENSE create mode 100644 vendor/github.com/shazow/go-diff/difflib/differ.go create mode 100644 vendor/github.com/sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml create mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/sirupsen/logrus/README.md create mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml create mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go create mode 100644 vendor/github.com/sirupsen/logrus/doc.go create mode 100644 vendor/github.com/sirupsen/logrus/entry.go create mode 100644 vendor/github.com/sirupsen/logrus/exported.go create mode 100644 vendor/github.com/sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/go.mod create mode 100644 vendor/github.com/sirupsen/logrus/go.sum create mode 100644 vendor/github.com/sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/logger.go create mode 100644 vendor/github.com/sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go create mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/writer.go create mode 100644 vendor/github.com/sonatard/noctx/.gitignore create mode 100644 vendor/github.com/sonatard/noctx/.golangci.yml create mode 100644 vendor/github.com/sonatard/noctx/LICENSE create mode 100644 vendor/github.com/sonatard/noctx/Makefile create mode 100644 vendor/github.com/sonatard/noctx/README.md create mode 100644 vendor/github.com/sonatard/noctx/go.mod create mode 100644 vendor/github.com/sonatard/noctx/go.sum create mode 100644 vendor/github.com/sonatard/noctx/ngfunc/main.go create mode 100644 vendor/github.com/sonatard/noctx/ngfunc/report.go create mode 100644 vendor/github.com/sonatard/noctx/ngfunc/types.go create mode 100644 vendor/github.com/sonatard/noctx/noctx.go create mode 100644 vendor/github.com/sonatard/noctx/reqwithoutctx/main.go create mode 100644 vendor/github.com/sonatard/noctx/reqwithoutctx/report.go create mode 100644 vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go create mode 100644 vendor/github.com/sourcegraph/go-diff/LICENSE create mode 100644 vendor/github.com/sourcegraph/go-diff/diff/diff.go create mode 100644 vendor/github.com/sourcegraph/go-diff/diff/doc.go create mode 100644 vendor/github.com/sourcegraph/go-diff/diff/parse.go create mode 100644 vendor/github.com/sourcegraph/go-diff/diff/print.go create mode 100644 vendor/github.com/sourcegraph/go-diff/diff/reader_util.go create mode 100644 vendor/github.com/spf13/afero/.gitignore create mode 100644 vendor/github.com/spf13/afero/.travis.yml create mode 100644 vendor/github.com/spf13/afero/LICENSE.txt create mode 100644 vendor/github.com/spf13/afero/README.md create mode 100644 vendor/github.com/spf13/afero/afero.go create mode 100644 vendor/github.com/spf13/afero/appveyor.yml create mode 100644 vendor/github.com/spf13/afero/basepath.go create mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go create mode 100644 vendor/github.com/spf13/afero/const_bsds.go create mode 100644 vendor/github.com/spf13/afero/const_win_unix.go create mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go create mode 100644 vendor/github.com/spf13/afero/go.mod create mode 100644 vendor/github.com/spf13/afero/go.sum create mode 100644 vendor/github.com/spf13/afero/httpFs.go create mode 100644 vendor/github.com/spf13/afero/iofs.go create mode 100644 vendor/github.com/spf13/afero/ioutil.go create mode 100644 vendor/github.com/spf13/afero/lstater.go create mode 100644 vendor/github.com/spf13/afero/match.go create mode 100644 vendor/github.com/spf13/afero/mem/dir.go create mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go create mode 100644 vendor/github.com/spf13/afero/mem/file.go create mode 100644 vendor/github.com/spf13/afero/memmap.go create mode 100644 vendor/github.com/spf13/afero/os.go create mode 100644 vendor/github.com/spf13/afero/path.go create mode 100644 vendor/github.com/spf13/afero/readonlyfs.go create mode 100644 vendor/github.com/spf13/afero/regexpfs.go create mode 100644 vendor/github.com/spf13/afero/symlink.go create mode 100644 vendor/github.com/spf13/afero/unionFile.go create mode 100644 vendor/github.com/spf13/afero/util.go create mode 100644 vendor/github.com/spf13/cast/.gitignore create mode 100644 vendor/github.com/spf13/cast/.travis.yml create mode 100644 vendor/github.com/spf13/cast/LICENSE create mode 100644 vendor/github.com/spf13/cast/Makefile create mode 100644 vendor/github.com/spf13/cast/README.md create mode 100644 vendor/github.com/spf13/cast/cast.go create mode 100644 vendor/github.com/spf13/cast/caste.go create mode 100644 vendor/github.com/spf13/cast/go.mod create mode 100644 vendor/github.com/spf13/cast/go.sum create mode 100644 vendor/github.com/spf13/cobra/.golangci.yml delete mode 100644 vendor/github.com/spf13/cobra/.travis.yml create mode 100644 vendor/github.com/spf13/cobra/CHANGELOG.md create mode 100644 vendor/github.com/spf13/cobra/CONDUCT.md create mode 100644 vendor/github.com/spf13/cobra/CONTRIBUTING.md create mode 100644 vendor/github.com/spf13/cobra/Makefile create mode 100644 vendor/github.com/spf13/cobra/bash_completionsV2.go create mode 100644 vendor/github.com/spf13/cobra/completions.go create mode 100644 vendor/github.com/spf13/cobra/fish_completions.go create mode 100644 vendor/github.com/spf13/cobra/fish_completions.md create mode 100644 vendor/github.com/spf13/cobra/go.mod create mode 100644 vendor/github.com/spf13/cobra/go.sum create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.go create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.md create mode 100644 vendor/github.com/spf13/cobra/projects_using_cobra.md create mode 100644 vendor/github.com/spf13/cobra/shell_completions.go create mode 100644 vendor/github.com/spf13/cobra/shell_completions.md create mode 100644 vendor/github.com/spf13/cobra/user_guide.md create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.md create mode 100644 vendor/github.com/spf13/jwalterweatherman/.gitignore create mode 100644 vendor/github.com/spf13/jwalterweatherman/LICENSE create mode 100644 vendor/github.com/spf13/jwalterweatherman/README.md create mode 100644 vendor/github.com/spf13/jwalterweatherman/default_notepad.go create mode 100644 vendor/github.com/spf13/jwalterweatherman/go.mod create mode 100644 vendor/github.com/spf13/jwalterweatherman/log_counter.go create mode 100644 vendor/github.com/spf13/jwalterweatherman/notepad.go create mode 100644 vendor/github.com/spf13/pflag/float32_slice.go create mode 100644 vendor/github.com/spf13/pflag/float64_slice.go create mode 100644 vendor/github.com/spf13/pflag/go.mod create mode 100644 vendor/github.com/spf13/pflag/go.sum create mode 100644 vendor/github.com/spf13/pflag/int32_slice.go create mode 100644 vendor/github.com/spf13/pflag/int64_slice.go create mode 100644 vendor/github.com/spf13/pflag/string_to_int.go create mode 100644 vendor/github.com/spf13/pflag/string_to_int64.go create mode 100644 vendor/github.com/spf13/pflag/string_to_string.go create mode 100644 vendor/github.com/spf13/viper/.editorconfig create mode 100644 vendor/github.com/spf13/viper/.gitignore create mode 100644 vendor/github.com/spf13/viper/.golangci.yml create mode 100644 vendor/github.com/spf13/viper/LICENSE create mode 100644 vendor/github.com/spf13/viper/Makefile create mode 100644 vendor/github.com/spf13/viper/README.md create mode 100644 vendor/github.com/spf13/viper/TROUBLESHOOTING.md create mode 100644 vendor/github.com/spf13/viper/flags.go create mode 100644 vendor/github.com/spf13/viper/go.mod create mode 100644 vendor/github.com/spf13/viper/go.sum create mode 100644 vendor/github.com/spf13/viper/util.go create mode 100644 vendor/github.com/spf13/viper/viper.go create mode 100644 vendor/github.com/spf13/viper/watch.go create mode 100644 vendor/github.com/spf13/viper/watch_wasm.go create mode 100644 vendor/github.com/ssgreg/nlreturn/v2/LICENSE create mode 100644 vendor/github.com/ssgreg/nlreturn/v2/pkg/nlreturn/nlreturn.go create mode 100644 vendor/github.com/stretchr/objx/.codeclimate.yml create mode 100644 vendor/github.com/stretchr/objx/.gitignore create mode 100644 vendor/github.com/stretchr/objx/.travis.yml create mode 100644 vendor/github.com/stretchr/objx/Gopkg.lock create mode 100644 vendor/github.com/stretchr/objx/Gopkg.toml create mode 100644 vendor/github.com/stretchr/objx/LICENSE create mode 100644 vendor/github.com/stretchr/objx/README.md create mode 100644 vendor/github.com/stretchr/objx/Taskfile.yml create mode 100644 vendor/github.com/stretchr/objx/accessors.go create mode 100644 vendor/github.com/stretchr/objx/constants.go create mode 100644 vendor/github.com/stretchr/objx/conversions.go create mode 100644 vendor/github.com/stretchr/objx/doc.go create mode 100644 vendor/github.com/stretchr/objx/map.go create mode 100644 vendor/github.com/stretchr/objx/mutations.go create mode 100644 vendor/github.com/stretchr/objx/security.go create mode 100644 vendor/github.com/stretchr/objx/tests.go create mode 100644 vendor/github.com/stretchr/objx/type_specific_codegen.go create mode 100644 vendor/github.com/stretchr/objx/value.go create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_order.go create mode 100644 vendor/github.com/stretchr/testify/mock/doc.go create mode 100644 vendor/github.com/stretchr/testify/mock/mock.go create mode 100644 vendor/github.com/subosito/gotenv/.env create mode 100644 vendor/github.com/subosito/gotenv/.env.invalid create mode 100644 vendor/github.com/subosito/gotenv/.gitignore create mode 100644 vendor/github.com/subosito/gotenv/.travis.yml create mode 100644 vendor/github.com/subosito/gotenv/CHANGELOG.md create mode 100644 vendor/github.com/subosito/gotenv/LICENSE create mode 100644 vendor/github.com/subosito/gotenv/README.md create mode 100644 vendor/github.com/subosito/gotenv/appveyor.yml create mode 100644 vendor/github.com/subosito/gotenv/gotenv.go create mode 100644 vendor/github.com/tdakkota/asciicheck/.gitignore create mode 100644 vendor/github.com/tdakkota/asciicheck/LICENSE create mode 100644 vendor/github.com/tdakkota/asciicheck/README.md create mode 100644 vendor/github.com/tdakkota/asciicheck/ascii.go create mode 100644 vendor/github.com/tdakkota/asciicheck/asciicheck.go create mode 100644 vendor/github.com/tdakkota/asciicheck/go.mod create mode 100644 vendor/github.com/tetafro/godot/.gitignore create mode 100644 vendor/github.com/tetafro/godot/.godot.yaml create mode 100644 vendor/github.com/tetafro/godot/.golangci.yml create mode 100644 vendor/github.com/tetafro/godot/.goreleaser.yml create mode 100644 vendor/github.com/tetafro/godot/LICENSE create mode 100644 vendor/github.com/tetafro/godot/Makefile create mode 100644 vendor/github.com/tetafro/godot/README.md create mode 100644 vendor/github.com/tetafro/godot/checks.go create mode 100644 vendor/github.com/tetafro/godot/getters.go create mode 100644 vendor/github.com/tetafro/godot/go.mod create mode 100644 vendor/github.com/tetafro/godot/go.sum create mode 100644 vendor/github.com/tetafro/godot/godot.go create mode 100644 vendor/github.com/tetafro/godot/settings.go create mode 100644 vendor/github.com/timakin/bodyclose/LICENSE create mode 100644 vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go create mode 100644 vendor/github.com/tomarrell/wrapcheck/v2/LICENSE create mode 100644 vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/.editorconfig create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/.gitattributes create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/.gitignore create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/.goreleaser.yml create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/Dockerfile create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/LICENSE create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/Makefile create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/README.md create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/action.yml create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/analyzer.go create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/checks/argument.go create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/checks/assign.go create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/checks/case.go create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/checks/checks.go create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/checks/condition.go create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/checks/operation.go create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/checks/return.go create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/config/config.go create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/entrypoint.sh create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/go.mod create mode 100644 vendor/github.com/tommy-muehle/go-mnd/v2/go.sum create mode 100644 vendor/github.com/ultraware/funlen/LICENSE create mode 100644 vendor/github.com/ultraware/funlen/README.md create mode 100644 vendor/github.com/ultraware/funlen/main.go create mode 100644 vendor/github.com/ultraware/whitespace/LICENSE create mode 100644 vendor/github.com/ultraware/whitespace/README.md create mode 100644 vendor/github.com/ultraware/whitespace/main.go create mode 100644 vendor/github.com/uudashr/gocognit/LICENSE create mode 100644 vendor/github.com/uudashr/gocognit/README.md create mode 100644 vendor/github.com/uudashr/gocognit/doc.go create mode 100644 vendor/github.com/uudashr/gocognit/go.mod create mode 100644 vendor/github.com/uudashr/gocognit/go.sum create mode 100644 vendor/github.com/uudashr/gocognit/gocognit.go create mode 100644 vendor/github.com/yeya24/promlinter/.gitignore create mode 100644 vendor/github.com/yeya24/promlinter/LICENSE create mode 100644 vendor/github.com/yeya24/promlinter/Makefile create mode 100644 vendor/github.com/yeya24/promlinter/README.md create mode 100644 vendor/github.com/yeya24/promlinter/go.mod create mode 100644 vendor/github.com/yeya24/promlinter/go.sum create mode 100644 vendor/github.com/yeya24/promlinter/promlinter.go create mode 100644 vendor/golang.org/x/net/http2/.gitignore create mode 100644 vendor/golang.org/x/net/http2/Dockerfile create mode 100644 vendor/golang.org/x/net/http2/Makefile create mode 100644 vendor/golang.org/x/net/http2/README create mode 100644 vendor/golang.org/x/net/http2/ciphers.go create mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go create mode 100644 vendor/golang.org/x/net/http2/databuffer.go create mode 100644 vendor/golang.org/x/net/http2/errors.go create mode 100644 vendor/golang.org/x/net/http2/flow.go create mode 100644 vendor/golang.org/x/net/http2/frame.go create mode 100644 vendor/golang.org/x/net/http2/go111.go create mode 100644 vendor/golang.org/x/net/http2/gotrack.go create mode 100644 vendor/golang.org/x/net/http2/headermap.go create mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go create mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go create mode 100644 vendor/golang.org/x/net/http2/http2.go create mode 100644 vendor/golang.org/x/net/http2/not_go111.go create mode 100644 vendor/golang.org/x/net/http2/pipe.go create mode 100644 vendor/golang.org/x/net/http2/server.go create mode 100644 vendor/golang.org/x/net/http2/transport.go create mode 100644 vendor/golang.org/x/net/http2/write.go create mode 100644 vendor/golang.org/x/net/http2/writesched.go create mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go create mode 100644 vendor/golang.org/x/net/http2/writesched_random.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/sys/windows/aliases.go create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go create mode 100644 vendor/golang.org/x/sys/windows/empty.s create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go create mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash create mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/race.go create mode 100644 vendor/golang.org/x/sys/windows/race0.go create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go create mode 100644 vendor/golang.org/x/sys/windows/service.go create mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go create mode 100644 vendor/golang.org/x/sys/windows/str.go create mode 100644 vendor/golang.org/x/sys/windows/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go create mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go create mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/width/kind_string.go create mode 100644 vendor/golang.org/x/text/width/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/width/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/width/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/width/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/width/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/width/transform.go create mode 100644 vendor/golang.org/x/text/width/trieval.go create mode 100644 vendor/golang.org/x/text/width/width.go create mode 100644 vendor/golang.org/x/tools/go/analysis/analysis.go create mode 100644 vendor/golang.org/x/tools/go/analysis/diagnostic.go create mode 100644 vendor/golang.org/x/tools/go/analysis/doc.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag_old.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/composite/whitelist.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/printf/types.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/shift/dead.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go create mode 100644 vendor/golang.org/x/tools/go/analysis/validate.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/inspector.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/typeof.go create mode 100644 vendor/golang.org/x/tools/go/cfg/builder.go create mode 100644 vendor/golang.org/x/tools/go/cfg/cfg.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/importer.go create mode 100644 vendor/golang.org/x/tools/go/internal/cgo/cgo.go create mode 100644 vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go create mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go create mode 100644 vendor/golang.org/x/tools/go/loader/doc.go create mode 100644 vendor/golang.org/x/tools/go/loader/loader.go create mode 100644 vendor/golang.org/x/tools/go/loader/util.go create mode 100644 vendor/golang.org/x/tools/go/packages/doc.go create mode 100644 vendor/golang.org/x/tools/go/packages/external.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist_overlay.go create mode 100644 vendor/golang.org/x/tools/go/packages/loadmode_string.go create mode 100644 vendor/golang.org/x/tools/go/packages/packages.go create mode 100644 vendor/golang.org/x/tools/go/packages/visit.go create mode 100644 vendor/golang.org/x/tools/go/ssa/blockopt.go create mode 100644 vendor/golang.org/x/tools/go/ssa/builder.go create mode 100644 vendor/golang.org/x/tools/go/ssa/const.go create mode 100644 vendor/golang.org/x/tools/go/ssa/create.go create mode 100644 vendor/golang.org/x/tools/go/ssa/doc.go create mode 100644 vendor/golang.org/x/tools/go/ssa/dom.go create mode 100644 vendor/golang.org/x/tools/go/ssa/emit.go create mode 100644 vendor/golang.org/x/tools/go/ssa/func.go create mode 100644 vendor/golang.org/x/tools/go/ssa/identical.go create mode 100644 vendor/golang.org/x/tools/go/ssa/identical_17.go create mode 100644 vendor/golang.org/x/tools/go/ssa/lift.go create mode 100644 vendor/golang.org/x/tools/go/ssa/lvalue.go create mode 100644 vendor/golang.org/x/tools/go/ssa/methods.go create mode 100644 vendor/golang.org/x/tools/go/ssa/mode.go create mode 100644 vendor/golang.org/x/tools/go/ssa/print.go create mode 100644 vendor/golang.org/x/tools/go/ssa/sanity.go create mode 100644 vendor/golang.org/x/tools/go/ssa/source.go create mode 100644 vendor/golang.org/x/tools/go/ssa/ssa.go create mode 100644 vendor/golang.org/x/tools/go/ssa/ssautil/load.go create mode 100644 vendor/golang.org/x/tools/go/ssa/ssautil/switch.go create mode 100644 vendor/golang.org/x/tools/go/ssa/ssautil/visit.go create mode 100644 vendor/golang.org/x/tools/go/ssa/testmain.go create mode 100644 vendor/golang.org/x/tools/go/ssa/util.go create mode 100644 vendor/golang.org/x/tools/go/ssa/wrappers.go create mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/callee.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/map.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/ui.go create mode 100644 vendor/golang.org/x/tools/imports/forward.go create mode 100644 vendor/golang.org/x/tools/internal/analysisinternal/analysis.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/export.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/fast.go create mode 100644 vendor/golang.org/x/tools/internal/event/doc.go create mode 100644 vendor/golang.org/x/tools/internal/event/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/keys.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/standard.go create mode 100644 vendor/golang.org/x/tools/internal/event/label/label.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/vendor.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/version.go create mode 100644 vendor/golang.org/x/tools/internal/gopathwalk/walk.go create mode 100644 vendor/golang.org/x/tools/internal/imports/fix.go create mode 100644 vendor/golang.org/x/tools/internal/imports/imports.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mod.go create mode 100644 vendor/golang.org/x/tools/internal/imports/mod_cache.go create mode 100644 vendor/golang.org/x/tools/internal/imports/sortimports.go create mode 100644 vendor/golang.org/x/tools/internal/imports/zstdlib.go create mode 100644 vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go create mode 100644 vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go create mode 100644 vendor/golang.org/x/tools/internal/packagesinternal/packages.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/doc.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/notypeparams.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types.go create mode 100644 vendor/google.golang.org/genproto/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md create mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md create mode 100644 vendor/google.golang.org/grpc/Makefile create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/google.golang.org/grpc/SECURITY.md create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/state/state.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/README.md create mode 100644 vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.mod create mode 100644 vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.sum create mode 100644 vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/grpc.go create mode 100644 vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/main.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/codegen.sh create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/go12.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 vendor/google.golang.org/grpc/dialoptions.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go create mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go create mode 100644 vendor/google.golang.org/grpc/go.mod create mode 100644 vendor/google.golang.org/grpc/go.sum create mode 100644 vendor/google.golang.org/grpc/grpclog/component.go create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/install_gae.sh create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/syscallconn.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/util.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/method.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/config_selector.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/go113.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/unix/unix.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/status/status.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/proxy.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/preloader.go create mode 100644 vendor/google.golang.org/grpc/regenerate.sh create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/service_config.go create mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/status/status.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/version.go create mode 100644 vendor/google.golang.org/grpc/vet.sh create mode 100644 vendor/google.golang.org/protobuf/AUTHORS create mode 100644 vendor/google.golang.org/protobuf/CONTRIBUTORS create mode 100644 vendor/google.golang.org/protobuf/LICENSE create mode 100644 vendor/google.golang.org/protobuf/PATENTS create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/main.go create mode 100644 vendor/google.golang.org/protobuf/compiler/protogen/protogen.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go create mode 100644 vendor/google.golang.org/protobuf/proto/size.go create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go create mode 100644 vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go delete mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml create mode 100644 vendor/honnef.co/go/tools/LICENSE create mode 100644 vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY create mode 100644 vendor/honnef.co/go/tools/analysis/code/code.go create mode 100644 vendor/honnef.co/go/tools/analysis/code/visit.go create mode 100644 vendor/honnef.co/go/tools/analysis/edit/edit.go create mode 100644 vendor/honnef.co/go/tools/analysis/facts/deprecated.go create mode 100644 vendor/honnef.co/go/tools/analysis/facts/directives.go create mode 100644 vendor/honnef.co/go/tools/analysis/facts/generated.go create mode 100644 vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go create mode 100644 vendor/honnef.co/go/tools/analysis/facts/purity.go create mode 100644 vendor/honnef.co/go/tools/analysis/facts/token.go create mode 100644 vendor/honnef.co/go/tools/analysis/facts/typedness/typedness.go create mode 100644 vendor/honnef.co/go/tools/analysis/lint/lint.go create mode 100644 vendor/honnef.co/go/tools/analysis/report/report.go create mode 100644 vendor/honnef.co/go/tools/config/config.go create mode 100644 vendor/honnef.co/go/tools/config/example.conf create mode 100644 vendor/honnef.co/go/tools/go/ast/astutil/upstream.go create mode 100644 vendor/honnef.co/go/tools/go/ast/astutil/util.go create mode 100644 vendor/honnef.co/go/tools/go/ir/LICENSE create mode 100644 vendor/honnef.co/go/tools/go/ir/UPSTREAM create mode 100644 vendor/honnef.co/go/tools/go/ir/blockopt.go create mode 100644 vendor/honnef.co/go/tools/go/ir/builder.go create mode 100644 vendor/honnef.co/go/tools/go/ir/const.go create mode 100644 vendor/honnef.co/go/tools/go/ir/create.go create mode 100644 vendor/honnef.co/go/tools/go/ir/doc.go create mode 100644 vendor/honnef.co/go/tools/go/ir/dom.go create mode 100644 vendor/honnef.co/go/tools/go/ir/emit.go create mode 100644 vendor/honnef.co/go/tools/go/ir/exits.go create mode 100644 vendor/honnef.co/go/tools/go/ir/func.go create mode 100644 vendor/honnef.co/go/tools/go/ir/html.go create mode 100644 vendor/honnef.co/go/tools/go/ir/identical.go create mode 100644 vendor/honnef.co/go/tools/go/ir/identical_17.go create mode 100644 vendor/honnef.co/go/tools/go/ir/irutil/load.go create mode 100644 vendor/honnef.co/go/tools/go/ir/irutil/loops.go create mode 100644 vendor/honnef.co/go/tools/go/ir/irutil/stub.go create mode 100644 vendor/honnef.co/go/tools/go/ir/irutil/switch.go create mode 100644 vendor/honnef.co/go/tools/go/ir/irutil/terminates.go create mode 100644 vendor/honnef.co/go/tools/go/ir/irutil/util.go create mode 100644 vendor/honnef.co/go/tools/go/ir/irutil/visit.go create mode 100644 vendor/honnef.co/go/tools/go/ir/lift.go create mode 100644 vendor/honnef.co/go/tools/go/ir/lvalue.go create mode 100644 vendor/honnef.co/go/tools/go/ir/methods.go create mode 100644 vendor/honnef.co/go/tools/go/ir/mode.go create mode 100644 vendor/honnef.co/go/tools/go/ir/print.go create mode 100644 vendor/honnef.co/go/tools/go/ir/sanity.go create mode 100644 vendor/honnef.co/go/tools/go/ir/source.go create mode 100644 vendor/honnef.co/go/tools/go/ir/ssa.go create mode 100644 vendor/honnef.co/go/tools/go/ir/staticcheck.conf create mode 100644 vendor/honnef.co/go/tools/go/ir/util.go create mode 100644 vendor/honnef.co/go/tools/go/ir/wrappers.go create mode 100644 vendor/honnef.co/go/tools/go/ir/write.go create mode 100644 vendor/honnef.co/go/tools/go/types/typeutil/upstream.go create mode 100644 vendor/honnef.co/go/tools/go/types/typeutil/util.go create mode 100644 vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go create mode 100644 vendor/honnef.co/go/tools/internal/sharedcheck/lint.go create mode 100644 vendor/honnef.co/go/tools/knowledge/arg.go create mode 100644 vendor/honnef.co/go/tools/knowledge/deprecated.go create mode 100644 vendor/honnef.co/go/tools/pattern/convert.go create mode 100644 vendor/honnef.co/go/tools/pattern/doc.go create mode 100644 vendor/honnef.co/go/tools/pattern/fuzz.go create mode 100644 vendor/honnef.co/go/tools/pattern/lexer.go create mode 100644 vendor/honnef.co/go/tools/pattern/match.go create mode 100644 vendor/honnef.co/go/tools/pattern/parser.go create mode 100644 vendor/honnef.co/go/tools/pattern/pattern.go create mode 100644 vendor/honnef.co/go/tools/printf/fuzz.go create mode 100644 vendor/honnef.co/go/tools/printf/printf.go create mode 100644 vendor/honnef.co/go/tools/simple/analysis.go create mode 100644 vendor/honnef.co/go/tools/simple/doc.go create mode 100644 vendor/honnef.co/go/tools/simple/lint.go create mode 100644 vendor/honnef.co/go/tools/staticcheck/analysis.go create mode 100644 vendor/honnef.co/go/tools/staticcheck/buildtag.go create mode 100644 vendor/honnef.co/go/tools/staticcheck/doc.go create mode 100644 vendor/honnef.co/go/tools/staticcheck/lint.go create mode 100644 vendor/honnef.co/go/tools/staticcheck/rules.go create mode 100644 vendor/honnef.co/go/tools/staticcheck/structtag.go create mode 100644 vendor/honnef.co/go/tools/stylecheck/analysis.go create mode 100644 vendor/honnef.co/go/tools/stylecheck/doc.go create mode 100644 vendor/honnef.co/go/tools/stylecheck/lint.go create mode 100644 vendor/honnef.co/go/tools/stylecheck/names.go create mode 100644 vendor/honnef.co/go/tools/unused/edge.go create mode 100644 vendor/honnef.co/go/tools/unused/edgekind_string.go create mode 100644 vendor/honnef.co/go/tools/unused/implements.go create mode 100644 vendor/honnef.co/go/tools/unused/typemap/identical.go create mode 100644 vendor/honnef.co/go/tools/unused/typemap/map.go create mode 100644 vendor/honnef.co/go/tools/unused/unused.go create mode 100644 vendor/mvdan.cc/gofumpt/LICENSE create mode 100644 vendor/mvdan.cc/gofumpt/LICENSE.google create mode 100644 vendor/mvdan.cc/gofumpt/format/format.go create mode 100644 vendor/mvdan.cc/interfacer/LICENSE create mode 100644 vendor/mvdan.cc/interfacer/check/cache.go create mode 100644 vendor/mvdan.cc/interfacer/check/check.go create mode 100644 vendor/mvdan.cc/interfacer/check/types.go create mode 100644 vendor/mvdan.cc/lint/.travis.yml create mode 100644 vendor/mvdan.cc/lint/LICENSE create mode 100644 vendor/mvdan.cc/lint/README.md create mode 100644 vendor/mvdan.cc/lint/lint.go create mode 100644 vendor/mvdan.cc/unparam/LICENSE create mode 100644 vendor/mvdan.cc/unparam/check/check.go diff --git a/Makefile b/Makefile index aa53459f3..a20853f73 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,7 @@ GOPATH?=~/go GO:=go GO_TEST_FLAGS:=-mod=vendor -count=1 -v -cover -race +PROTO_TMP:=$(shell pwd)/proto.tmp export JAEGER_SERVICE_NAME:=unittest export JAEGER_SAMPLER_TYPE:=const @@ -37,11 +38,19 @@ jsonapi: -path tools/testserver/simple/open-api.go \ -source tools/testserver/simple/open-api.json -lint: $(GOPATH)/bin/golangci-lint - $(GOPATH)/bin/golangci-lint run +grpc: tools/testserver/math/math.pb.go -$(GOPATH)/bin/golangci-lint: - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.21.0 +tools/testserver/math/math.pb.go: tools/testserver/math/math.proto + mkdir -p $(PROTO_TMP) + GOBIN=$(PROTO_TMP) $(GO) install -mod=vendor google.golang.org/grpc/cmd/protoc-gen-go-grpc + GOBIN=$(PROTO_TMP) $(GO) install -mod=vendor google.golang.org/protobuf/cmd/protoc-gen-go + protoc --plugin=$(PROTO_TMP)/protoc-gen-go-grpc \ + --plugin=$(PROTO_TMP)/protoc-gen-go \ + -I=./ --go-grpc_out=$(dir @) --go_out=$(dir @) $< + rm -rf $(PROTO_TMP) + +lint: + $(GO) run -mod=vendor github.com/golangci/golangci-lint/cmd/golangci-lint run --timeout 2m test: $(GO) test $(GO_TEST_FLAGS) -short ./... diff --git a/README.md b/README.md index 7fee525d1..a1faa97e7 100644 --- a/README.md +++ b/README.md @@ -25,12 +25,16 @@ A pace/bricks microservice is: * **http** (logging, metrics, tracing, retries) * **s3** via http (logging, metrics, tracing, health) * **couchdb** via http (logging, metrics, tracing, retries, health) + * **grpc** (logging, metrics, tracing, retries) * provides two commands **control** and **daemon** * provides a **RESTful** API * code is generated from the **OpenAPIv3** spec * authenticated via **OAuth2** * encoded using **[json:api](https://jsonapi.org/)** * that supports **logging**, **tracing** and **metrics** +* optionally provides a **GRPC** API + * code is generated from the **protoc** spec + * that supports **logging**, **tracing** and **metrics** ## Install diff --git a/docker-compose.yml b/docker-compose.yml index dafa02ebe..89a5d9bd4 100755 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -62,6 +62,7 @@ services: working_dir: /srv ports: - "3000:3000" + - "3001:3001" environment: - JAEGER_AGENT_HOST=jaeger - JAEGER_SAMPLER_TYPE=const diff --git a/go.mod b/go.mod index 9e4e0b82c..5d0085b0a 100644 --- a/go.mod +++ b/go.mod @@ -7,43 +7,37 @@ replace github.com/adjust/rmq/v3 => github.com/daemonfire300/rmq/v3 v3.0.2 require ( github.com/adjust/rmq/v3 v3.0.0 github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d - github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect github.com/bsm/redislock v0.5.0 github.com/caarlos0/env v3.3.0+incompatible github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261 github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect github.com/dave/jennifer v1.0.2 github.com/getkin/kin-openapi v0.0.0-20180813063848-e1956e8013e5 - github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kivik/couchdb/v3 v3.2.6 github.com/go-kivik/kivik/v3 v3.2.3 github.com/go-pg/pg v6.14.5+incompatible github.com/go-redis/redis/v7 v7.4.1 github.com/golang-jwt/jwt v3.2.2+incompatible - github.com/gorilla/context v1.1.1 // indirect - github.com/gorilla/mux v1.6.2 - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/golangci/golangci-lint v1.42.0 + github.com/gorilla/mux v1.8.0 + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect github.com/jpillora/backoff v1.0.0 - github.com/mattn/go-isatty v0.0.11 + github.com/mattn/go-isatty v0.0.12 github.com/mattn/goveralls v0.0.9 - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/minio/minio-go/v7 v7.0.7 - github.com/opentracing/opentracing-go v1.0.2 + github.com/opentracing/opentracing-go v1.1.0 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 - github.com/prometheus/client_golang v0.8.0 - github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect - github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e // indirect - github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 // indirect + github.com/prometheus/client_golang v1.7.1 github.com/rs/xid v1.2.1 github.com/rs/zerolog v1.17.2 github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 github.com/sony/gobreaker v0.4.1 - github.com/spf13/cobra v0.0.3 - github.com/spf13/pflag v1.0.2 // indirect + github.com/spf13/cobra v1.2.1 github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e - github.com/stretchr/testify v1.6.1 + github.com/stretchr/testify v1.7.0 github.com/uber-go/atomic v1.3.2 // indirect github.com/uber/jaeger-client-go v2.14.0+incompatible github.com/uber/jaeger-lib v1.5.0 @@ -51,6 +45,10 @@ require ( golang.org/x/mod v0.5.0 // indirect golang.org/x/tools v0.1.5 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 + google.golang.org/genproto v0.0.0-20210719143636-1d5a45f8e492 // indirect + google.golang.org/grpc v1.39.0 + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 + google.golang.org/protobuf v1.27.1 ) replace github.com/codahale/hdrhistogram => github.com/HdrHistogram/hdrhistogram-go v0.9.0 diff --git a/go.sum b/go.sum index 5d6e16b48..560e38e1f 100644 --- a/go.sum +++ b/go.sum @@ -1,34 +1,202 @@ +4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a h1:wFEQiK85fRsEVF0CRrPAos5LoAryUsIX1kPW/WrIqFw= +4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= +cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Antonboom/errname v0.1.3 h1:qKV8gSzPzBqrG/q0dgraZXJCymWt6KuD9+Y7K7xtzN8= +github.com/Antonboom/errname v0.1.3/go.mod h1:jRXo3m0E0EuCnK3wbsSVH3X55Z4iTDLl6ZfCxwFj4TM= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3UzkWdp5tH1WMcg= github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQee8Us= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/ashanbrown/forbidigo v1.2.0 h1:RMlEFupPCxQ1IogYOQUnIQwGEUGK8g5vAPMRyJoSxbc= +github.com/ashanbrown/forbidigo v1.2.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde h1:YOsoVXsZQPA9aOTy1g0lAJv5VzZUvwQuZqug8XPeqfM= +github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= +github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= +github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bsm/redislock v0.5.0 h1:ODM11/cbuUXQqLgZWK6XQnufaTjsBE2UcwBc2EAFNDA= github.com/bsm/redislock v0.5.0/go.mod h1:qagqKlV+xiLy26iV34Y3zRPxRcJjQYbV7pZfWFeSZ8M= github.com/caarlos0/env v3.3.0+incompatible h1:jCfY0ilpzC2FFViyZyDKCxKybDESTwaR+ebh8zm6AOE= github.com/caarlos0/env v3.3.0+incompatible/go.mod h1:tdCsowwCzMLdkqRYDlHpZCp2UooDD3MspDBjZ2AD02Y= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261 h1:6/yVvBsKeAw05IUj4AzvrxaCnDjN4nUqKjW9+w5wixg= github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.8 h1:cnZrThioNW9gSV5JsRIXmkyHUbcDH7Y9hkzFDVc9/j0= +github.com/charithe/durationcheck v0.0.8/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI= +github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/daemonfire300/rmq/v3 v3.0.2 h1:kcbo8hfsAf9vTb/Cgbbf+tP3sMg51ycv0c22IP+o1Y0= github.com/daemonfire300/rmq/v3 v3.0.2/go.mod h1:dOsB6dfrpqSo1qa6wAyS96SpkVsVM5rchnge4pojujU= +github.com/daixiang0/gci v0.2.9 h1:iwJvwQpBZmMg31w+QQ6jsyZ54KEATn6/nfARbBNW294= +github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/dave/jennifer v1.0.2 h1:ixSwWgh8HCIJN9GlVNvdbKHrD/qfh5Mvd4ZCaFAJbr8= github.com/dave/jennifer v1.0.2/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denis-tingajkin/go-header v0.4.2 h1:jEeSF4sdv8/3cT/WY8AgDHUoItNSoEZ7qg9dX7pc218= +github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/esimonov/ifshort v1.0.2 h1:K5s1W2fGfkoWXsFlxBNqT6J0ZCncPaKrGM5qe0bni68= +github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/flimzy/diff v0.1.5/go.mod h1:lFJtC7SPsK0EroDmGTSrdtWKAxOk3rO+q+e04LL05Hs= github.com/flimzy/testy v0.1.17-0.20190521133342-95b386c3ece6/go.mod h1:3szguN8NXqgq9bt9Gu8TQVj698PJWmyx/VY1frwwKrM= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= +github.com/fzipp/gocyclo v0.3.1 h1:A9UeX3HJSXTBzvHzhqoYVuE0eAhe+aM8XBCCwsPMZOc= +github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/getkin/kin-openapi v0.0.0-20180813063848-e1956e8013e5 h1:gXUMwj0PndSd+Ub1PptoPX0nAwrrS8/SVFTJwOIzjyk= github.com/getkin/kin-openapi v0.0.0-20180813063848-e1956e8013e5/go.mod h1:+0ZtELZf+SlWH8ZdA/IeFb3L/PKOKJx8eGxAlUZ/sOU= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-critic/go-critic v0.5.6 h1:siUR1+322iVikWXoV75I1YRfNaC/yaLzhdF9Zwd8Tus= +github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kivik/couchdb/v3 v3.2.6 h1:IzoAH5K7jsY1BFNibtdjAoPXRmm3rdQKJGjDMzXMvok= github.com/go-kivik/couchdb/v3 v3.2.6/go.mod h1:tUgf+ftTYkkNPyHskJW2O+6I1NUQvg7ucooVvhPQcxg= github.com/go-kivik/kivik/v3 v3.0.1/go.mod h1:7tmQDvkta/pcijpUjLMsQ9HJUELiKD5zm6jQ3Gb9cxE= @@ -37,56 +205,330 @@ github.com/go-kivik/kivik/v3 v3.2.3 h1:ZFGR3hMDa+AUmPUCQxq4da3+3C4awdFQwdOtjLS+M github.com/go-kivik/kivik/v3 v3.2.3/go.mod h1:chqVuHKAU9j2C7qL0cAH2FCO26oL+0B4aIBeCRMnLa8= github.com/go-kivik/kiviktest/v3 v3.0.3 h1:4zX1F1eLTbIvyKiylzdWDgPqofJWiuQzQhKwZwUXMB0= github.com/go-kivik/kiviktest/v3 v3.0.3/go.mod h1:sqsz3M2sJxTxAUdOj+2SU21y4phcpYc0FJIn+hbf1D0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-pg/pg v6.14.5+incompatible h1:Tc74MTCCIVd8sAJshYHqutcHhO64/EBHBTydzCGt3Js= github.com/go-pg/pg v6.14.5+incompatible/go.mod h1:a2oXow+aFOrvwcKs3eIA0lNFmMilrxK2sOkB5NWe0vA= +github.com/go-redis/redis v6.15.8+incompatible h1:BKZuG6mCnRj5AOaWJXoCgf6rqTYnYJLe4en2hxT7r9o= +github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v7 v7.4.1 h1:PASvf36gyUpr2zdOUS/9Zqc80GbM+9BDyiJSJDDOrTI= github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v1.0.0 h1:4DFWWMXVfbcN5So1sBNW9+yeiMqLFGl1wFLTL5R0Tgg= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.42.0 h1:hqf1zo6zY3GKGjjBk3ttdH22tGwF6ZRpk6j6xyJmE8I= +github.com/golangci/golangci-lint v1.42.0/go.mod h1:wgkGQnU9lOUFvTFo5QBSOvaSSddEV21Z1zYkJSbppZA= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5 h1:c9Mqqrm/Clj5biNaG7rABrmwUq88nHh0uABo2b/WYmc= +github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= +github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254 h1:Nb2aRlC404yz7gQIfRZxX9/MLvQiqXyiBTJtgAy6yrI= +github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= +github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.4.1 h1:/7clKqrVfiVwiBQLM0Uke4KvXnO6JcCTS7HwF2D6wG8= +github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1 h1:xHopR5L2lRz6OsjH4R2HG5wRhW9ySl3FsHIvi5pcXwc= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5 h1:rx8127mFPqXXsfPSo8BwnIU97MKFZc89WHAHt8PwDVY= +github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= +github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= +github.com/jingyugao/rowserrcheck v1.1.0 h1:u6h4eiNuCLqk73Ic5TXQq9yZS+uEXTdusn7c3w1Mr6A= +github.com/jingyugao/rowserrcheck v1.1.0/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a h1:eeaG9XMUvRBYXJi4pg1ZKM7nxc5AfXfojeLLW7O5J3k= github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d h1:XeSMXURZPtUffuWAaq90o6kLgZdgu+QA8wk4MPC8ikI= +github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.0 h1:YTDO4pNy7AUN/021p+JGHycQyYNIyMoenM1YDVK6RlY= +github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.4.0 h1:2Nx7XbdbE/BYZeoip2mURKUdtHQRuy6Ug+wR7K9ywNM= +github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kunwardeep/paralleltest v1.0.2 h1:/jJRv0TiqPoEy/Y8dQxCFJhD56uS/pnvtatgTZBHokU= +github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= +github.com/ldez/gomoddirectives v0.2.2 h1:p9/sXuNFArS2RLc+UpYZSI4KQwGMEDWC/LbtF5OPFVg= +github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/tagliatelle v0.2.0 h1:693V8Bf1NdShJ8eu/s84QySA0J2VWBanVBa2WwXD/Wk= +github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= +github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/mattn/goveralls v0.0.9 h1:XmIwwrO9a9pqSW6IpI89BSCShzQxx0j/oKnnvELQNME= github.com/mattn/goveralls v0.0.9/go.mod h1:FRbM1PS8oVsOe9JtdzAAXM+DsvDMMHcM1C7drGJD8HY= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= +github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.1.0 h1:TvabpsolbtlzZTyJcgMRN38MHrgi8C0DhmGE5dhscGY= +github.com/mgechev/revive v1.1.0/go.mod h1:PKqk4L74K6wVNwY2b6fr+9Qqr/3hIsHVfZCJdbvozrY= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/minio-go/v7 v7.0.7 h1:Qld/xb8C1Pwbu0jU46xAceyn9xXKCMW+3XfNbpmTB70= @@ -94,126 +536,605 @@ github.com/minio/minio-go/v7 v7.0.7/go.mod h1:pEZBUa+L2m9oECoIA6IcSK8bv/qggtQVLo github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sio v0.2.1/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= +github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= +github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= +github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= +github.com/nakabonne/nestif v0.3.0 h1:+yOViDGhg8ygGrmII72nV9B/zGxY188TYpfolntsaPw= +github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.2.3 h1:+ANTMqRNrqwInnP9aszg/0jDo+zbXa4x66U19Bx/oTk= +github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= +github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= +github.com/nishanths/predeclared v0.2.1 h1:1TXtjmy4f3YCFjTxRd8zcFHOmoUir+gp0ESzjFzG2sw= +github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/otiai10/copy v1.0.2 h1:DDNipYy6RkIkjMwy+AWzgKiNTyj2RUI9yEMeETEpVyc= github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v0.0.0-20190513014714-f5a3d24e5776/go.mod h1:3HNVkVOU7vZeFXocWuvtcS0XSFLcf2XUSDHkq9t1jU4= github.com/otiai10/mint v1.2.4/go.mod h1:d+b7n/0R3tdyUYYylALXpWQ/kTN+QobSq/4SRGBkR3M= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/polyfloyd/go-errorlint v0.0.0-20210510181950-ab96adb96fea h1:Sk6Xawg57ZkjXmFYD1xCHSKN6FtYM+km51MM7Lveyyc= +github.com/polyfloyd/go-errorlint v0.0.0-20210510181950-ab96adb96fea/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= +github.com/quasilyte/go-ruleguard v0.3.4 h1:F6l5p6+7WBcTKS7foNQ4wqA39zjn2+RbdbyzGxIq1B0= +github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= +github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.17.2 h1:RMRHFw2+wF7LO0QqtELQwo8hqSmqISyCJeFeAAuWcRo= github.com/rs/zerolog v1.17.2/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8= +github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= +github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= +github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/securego/gosec/v2 v2.8.1 h1:Tyy/nsH39TYCOkqf5HAgRE+7B5D8sHDwPdXRgFWokh8= +github.com/securego/gosec/v2 v2.8.1/go.mod h1:pUmsq6+VyFEElJMUX+QB3p3LWNHXg1R3xh2ssVJPs8Q= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil/v3 v3.21.7/go.mod h1:RGl11Y7XMTQPmHh8F0ayC6haKNBgH4PXMJuTAcMOlz4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/ssgreg/nlreturn/v2 v2.1.0 h1:6/s4Rc49L6Uo6RLjhWZGBpWWjfzk2yrf1nIW8m4wgVA= +github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e h1:mOtuXaRAbVZsxAHVdPR3IjfmN8T1h2iczJLynhLybf8= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhFAz3nWUcuvpH7WuOMv8LQoCWmruLfFH2U= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tetafro/godot v1.4.8 h1:rhuUH+tBrx24yVAr6Ox3/UxcsiUPPJcGhinfLdbdew0= +github.com/tetafro/godot v1.4.8/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWhPe2iw9lwfQVF1KB3Q4fpP3X7/2VBG8= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tklauser/go-sysconf v0.3.7/go.mod h1:JZIdXh4RmBvZDBZ41ld2bGxRV3n4daiiqA3skYhAoQ4= +github.com/tklauser/numcpus v0.2.3/go.mod h1:vpEPS/JC+oZGGQ/My/vJnNsvMDQL6PwOqt8dsCw5j+E= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck/v2 v2.3.0 h1:i3DNjtyyL1xwaBQOsPPk8LAcpayWfQv2rxNi9b/eEx4= +github.com/tomarrell/wrapcheck/v2 v2.3.0/go.mod h1:aF5rnkdtqNWP/gC7vPUO5pKsB0Oac2FDTQP4F+dpZMU= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/tommy-muehle/go-mnd/v2 v2.4.0 h1:1t0f8Uiaq+fqKteUR4N9Umr6E99R+lDnLnq7PwX2PPE= +github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo= github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.14.0+incompatible h1:1KGTNRby0tDiVDDhvzL0pz0N26M9DobVCfSqz4Z/UPc= github.com/uber/jaeger-client-go v2.14.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo= github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg= +github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4= +github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= +github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yeya24/promlinter v0.1.0 h1:goWULN0jH5Yajmu/K+v1xCqIREeB+48OiJ2uu2ssc7U= +github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zenazn/goji v0.9.0 h1:RSQQAbXGArQ0dIDEq+PI6WqN6if+5KHu6x2Cx/GXLTQ= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= gitlab.com/flimzy/testy v0.0.3/go.mod h1:YObF4cq711ubd/3U0ydRQQVz7Cnq/ChgJpVwNr/AJac= gitlab.com/flimzy/testy v0.3.2 h1:4djQFwBJ1ayM681Zx7Y3+OKns/E9zAfGFsLc967jfdk= gitlab.com/flimzy/testy v0.3.2/go.mod h1:YObF4cq711ubd/3U0ydRQQVz7Cnq/ChgJpVwNr/AJac= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 h1:DZhuSZLsGlFL4CmhA8BcRA0mnthyA/nZ00AqCUo7vHg= golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0 h1:UG21uOlmZabA4fW5i7ZX6bjw1xELEGg/ZLgZq9auk/Q= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -221,20 +1142,180 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210719143636-1d5a45f8e492 h1:7yQQsvnwjfEahbNNEKcBHv3mR+HnB1ctGY/z1JXzx8M= +google.golang.org/genproto v0.0.0-20210719143636-1d5a45f8e492/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.2.1 h1:/EPr//+UMMXwMTkXvCCoaJDq8cpjMO80Ou+L4PDo2mY= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +mvdan.cc/gofumpt v0.1.1 h1:bi/1aS/5W00E2ny5q65w9SnKpWEF/UIOqDYBILpo9rA= +mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 h1:HT3e4Krq+IE44tiN36RvVEb6tvqeIdtsVSsxmNPqlFU= +mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/grpc/README.md b/grpc/README.md new file mode 100644 index 000000000..9230e1406 --- /dev/null +++ b/grpc/README.md @@ -0,0 +1,6 @@ +# GRPC server + +## Environment based configuration + +* `ADDR` + * Address golang listen address in the [Dial format](https://golang.org/pkg/net/#Dial) diff --git a/grpc/client.go b/grpc/client.go new file mode 100644 index 000000000..aad508b0b --- /dev/null +++ b/grpc/client.go @@ -0,0 +1,56 @@ +// Copyright © 2021 by PACE Telematics GmbH. All rights reserved. +// Created at 2021/09/03 by Vincent Landgraf + +package grpc + +import ( + "context" + "time" + + "github.com/pace/bricks/maintenance/log" + "google.golang.org/grpc" + + grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" + grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" +) + +func Dial(address string) (*grpc.ClientConn, error) { + var conn *grpc.ClientConn + opts := []grpc_retry.CallOption{ + grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), + } + conn, err := grpc.Dial(address, grpc.WithInsecure(), + grpc.WithChainStreamInterceptor( + grpc_opentracing.StreamClientInterceptor(), + grpc_prometheus.StreamClientInterceptor, + grpc_retry.StreamClientInterceptor(opts...), + func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + start := time.Now() + cs, err := streamer(ctx, desc, cc, method, opts...) + log.Ctx(ctx).Debug().Str("method", method). + Dur("duration", time.Since(start)). + Str("type", "stream"). + Err(err). + Msg("GRPC requested") + return cs, err + }, + ), + grpc.WithChainUnaryInterceptor( + grpc_opentracing.UnaryClientInterceptor(), + grpc_prometheus.UnaryClientInterceptor, + grpc_retry.UnaryClientInterceptor(opts...), + func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + start := time.Now() + err := invoker(ctx, method, req, reply, cc, opts...) + log.Ctx(ctx).Debug().Str("method", method). + Dur("duration", time.Since(start)). + Str("type", "unary"). + Err(err). + Msg("GRPC requested") + return err + }, + ), + ) + return conn, err +} diff --git a/grpc/server.go b/grpc/server.go new file mode 100644 index 000000000..550c94b28 --- /dev/null +++ b/grpc/server.go @@ -0,0 +1,161 @@ +// Copyright © 2021 by PACE Telematics GmbH. All rights reserved. +// Created at 2021/09/03 by Vincent Landgraf + +package grpc + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" + grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" + grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/pace/bricks/http/security" + "github.com/pace/bricks/maintenance/errors" + "github.com/pace/bricks/maintenance/log" + "github.com/rs/zerolog" + + "github.com/caarlos0/env" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +var InternalServerError = errors.New("internal server error") + +type Config struct { + Address string `env:"GRPC_ADDR" envDefault:":3001"` +} + +type AuthBackend interface { + AuthorizeStream(ctx context.Context) (context.Context, error) + AuthorizeUnary(ctx context.Context) (context.Context, error) +} + +func ListenAndServe(gs *grpc.Server) error { + listener, err := Listener() + if err != nil { + return err + } + log.Logger().Info().Str("addr", listener.Addr().String()).Msg("Starting grpc server ...") + err = gs.Serve(listener) + if err != nil { + return err + } + return nil +} + +func Listener() (net.Listener, error) { + var cfg Config + err := env.Parse(&cfg) + if err != nil { + return nil, fmt.Errorf("failed to parse grpc server environment: %w", err) + } + + tcpListener, err := net.Listen("tcp", cfg.Address) + if err != nil { + return nil, fmt.Errorf("unable to create grpc listener for %q: %w", cfg.Address, err) + } + return tcpListener, nil +} + +func Server(ab AuthBackend) *grpc.Server { + myServer := grpc.NewServer( + grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( + func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + ctx := log.WithContext(stream.Context()) + wrappedStream := grpc_middleware.WrapServerStream(stream) + wrappedStream.WrappedContext = ctx + var addr string + if p, ok := peer.FromContext(ctx); ok { + addr = p.Addr.String() + } + + md, _ := metadata.FromIncomingContext(ctx) + + bt := md.Get("bearer_token") + if len(bt) > 0 { + ctx = security.ContextWithToken(ctx, security.TokenString(bt[0])) + } + + logger := zerolog.Ctx(ctx) + logger.UpdateContext(func(c zerolog.Context) zerolog.Context { + return c.Str("req_id", strings.Join(md.Get("req_id"), "")) + }) + + start := time.Now() + err := handler(srv, wrappedStream) + + log.Ctx(ctx).Info().Str("method", info.FullMethod). + Dur("duration", time.Since(start)). + Str("type", "stream"). + Str("ip", addr). + Err(err). + Msg("GRPC completed") + return err + }, + func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) (err error) { + defer errors.HandleWithCtx(stream.Context(), "GRPC"+info.FullMethod) + err = InternalServerError // default in case of a panic + err = handler(srv, stream) + return err + }, + grpc_ctxtags.StreamServerInterceptor(), + grpc_opentracing.StreamServerInterceptor(), + grpc_prometheus.StreamServerInterceptor, + grpc_auth.StreamServerInterceptor(ab.AuthorizeStream), + )), + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( + func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + var addr string + if p, ok := peer.FromContext(ctx); ok { + addr = p.Addr.String() + } + + ctx = log.WithContext(ctx) + + md, _ := metadata.FromIncomingContext(ctx) + + bt := md.Get("bearer_token") + if len(bt) > 0 { + ctx = security.ContextWithToken(ctx, security.TokenString(bt[0])) + } + + logger := zerolog.Ctx(ctx) + logger.UpdateContext(func(c zerolog.Context) zerolog.Context { + return c.Str("req_id", strings.Join(md.Get("req_id"), "")) + }) + + start := time.Now() + resp, err = handler(ctx, req) + + log.Ctx(ctx).Info().Str("method", info.FullMethod). + Dur("duration", time.Since(start)). + Str("type", "unary"). + Str("ip", addr). + Interface("md", md). + Str("user_agent", strings.Join(md.Get("user-agent"), ",")). + Err(err). + Msg("GRPC completed") + return + }, + func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + defer errors.HandleWithCtx(ctx, "GRPC"+info.FullMethod) + err = InternalServerError // default in case of a panic + resp, err = handler(ctx, req) + return + }, + grpc_ctxtags.UnaryServerInterceptor(), + grpc_opentracing.UnaryServerInterceptor(), + grpc_prometheus.UnaryServerInterceptor, + grpc_auth.UnaryServerInterceptor(ab.AuthorizeUnary), + )), + ) + + return myServer +} diff --git a/http/security/helper.go b/http/security/helper.go index f4f56d937..ded7eeb9b 100644 --- a/http/security/helper.go +++ b/http/security/helper.go @@ -6,6 +6,8 @@ package security import ( "context" "strings" + + "google.golang.org/grpc/metadata" ) // Token represents an authentication token. @@ -15,6 +17,12 @@ type Token interface { } type ctx string +type TokenString string + +func (ts TokenString) GetValue() string { + return string(ts) +} + // prefix of the Authorization header const headerPrefix = "Bearer " @@ -34,6 +42,9 @@ func GetBearerTokenFromHeader(authHeader string) string { // ContextWithToken creates a new Context with the token func ContextWithToken(targetCtx context.Context, token Token) context.Context { + if token != nil { + targetCtx = metadata.AppendToOutgoingContext(targetCtx, "bearer_token", token.GetValue()) + } return context.WithValue(targetCtx, tokenKey, token) } diff --git a/maintenance/log/handler.go b/maintenance/log/handler.go index 55c43d034..22ea38fe4 100755 --- a/maintenance/log/handler.go +++ b/maintenance/log/handler.go @@ -4,13 +4,15 @@ package log import ( - "github.com/opentracing/opentracing-go" - "github.com/uber/jaeger-client-go" "net" "net/http" "strings" "time" + "github.com/opentracing/opentracing-go" + "github.com/uber/jaeger-client-go" + "google.golang.org/grpc/metadata" + "github.com/pace/bricks/maintenance/log/hlog" "github.com/rs/xid" "github.com/rs/zerolog" @@ -124,6 +126,7 @@ func RequestIDHandler(fieldKey, headerName string) func(next http.Handler) http. } ctx = hlog.WithValue(ctx, id) + ctx = metadata.AppendToOutgoingContext(ctx, "req_id", id.String()) r = r.WithContext(ctx) // log requests with request id diff --git a/tools.go b/tools.go index f4626a01d..9e860d4e6 100644 --- a/tools.go +++ b/tools.go @@ -1,8 +1,12 @@ +//go:build tools // +build tools package bricks import ( + _ "github.com/golangci/golangci-lint/cmd/golangci-lint" _ "github.com/mattn/goveralls" _ "golang.org/x/tools/cmd/cover" + _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" + _ "google.golang.org/protobuf/cmd/protoc-gen-go" ) diff --git a/tools/testserver/main.go b/tools/testserver/main.go index d62d9bbb6..2184f88bd 100755 --- a/tools/testserver/main.go +++ b/tools/testserver/main.go @@ -10,6 +10,8 @@ import ( "net/http" "time" + "github.com/pace/bricks/grpc" + "github.com/pace/bricks/http/security" "github.com/pace/bricks/http/transport" "github.com/pace/bricks/maintenance/health/servicehealthcheck" @@ -26,6 +28,7 @@ import ( "github.com/pace/bricks/maintenance/log" _ "github.com/pace/bricks/maintenance/tracing" "github.com/pace/bricks/test/livetest" + "github.com/pace/bricks/tools/testserver/math" simple "github.com/pace/bricks/tools/testserver/simple" ) @@ -61,6 +64,37 @@ func (*TestService) GetTest(ctx context.Context, w simple.GetTestResponseWriter, return nil } +type GrpcAuthBackend struct{} + +func (*GrpcAuthBackend) AuthorizeStream(ctx context.Context) (context.Context, error) { + return ctx, nil +} + +func (*GrpcAuthBackend) AuthorizeUnary(ctx context.Context) (context.Context, error) { + token, ok := security.GetTokenFromContext(ctx) + if ok { + log.Ctx(ctx).Debug().Msgf("Token: %v", token.GetValue()) + } else { + return nil, fmt.Errorf("unauthenticated") + } + return ctx, nil +} + +type SimpleMathServer struct { + math.UnimplementedMathServiceServer +} + +func (*SimpleMathServer) Add(ctx context.Context, i *math.Input) (*math.Output, error) { + var o math.Output + o.C = i.A + i.B + log.Ctx(ctx).Debug().Msgf("A: %d + B: %d = C: %d", i.A, i.B, o.C) + return &o, nil +} + +func (*SimpleMathServer) Substract(ctx context.Context, i *math.Input) (*math.Output, error) { + panic("not implemented") +} + func main() { db := postgres.DefaultConnectionPool() rdb := redis.Client() @@ -83,6 +117,16 @@ func main() { return }) + ms := &SimpleMathServer{} + gs := grpc.Server(&GrpcAuthBackend{}) + math.RegisterMathServiceServer(gs, ms) + go func() { + err := grpc.ListenAndServe(gs) + if err != nil { + log.Fatal(err) + } + }() + h.Handle("/pay/beta/test", simple.Router(new(TestService))) h.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) { @@ -114,6 +158,35 @@ func main() { fmt.Fprintf(w, `{"street":"Haid-und-Neu-Straße 18, 76131 Karlsruhe", "sunset": "%s"}`, fetchSunsetandSunrise(ctx)) }) + h.HandleFunc("/grpc", func(rw http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + conn, err := grpc.Dial(":3001") + if err != nil { + log.Fatalf("did not connect: %s", err) + } + defer conn.Close() + + ctx = security.ContextWithToken(ctx, security.TokenString("test")) + + c := math.NewMathServiceClient(conn) + o, err := c.Add(ctx, &math.Input{ + A: 1, + B: 23, + }) + if err != nil { + log.Ctx(ctx).Debug().Err(err).Msg("failed to add") + return + } + log.Ctx(ctx).Info().Msgf("C: %d", o.C) + + _, err = c.Substract(ctx, &math.Input{}) + if err != nil { + log.Ctx(ctx).Debug().Err(err).Msg("failed to substract") + return + } + }) + h.HandleFunc("/couch", func(w http.ResponseWriter, r *http.Request) { row := cdb.Get(r.Context(), "$health_check") if row.Err != nil { diff --git a/tools/testserver/math/math.pb.go b/tools/testserver/math/math.pb.go new file mode 100644 index 000000000..c4bfd46b9 --- /dev/null +++ b/tools/testserver/math/math.pb.go @@ -0,0 +1,222 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: tools/testserver/math/math.proto + +package math + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Input struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + A int64 `protobuf:"varint,1,opt,name=a,proto3" json:"a,omitempty"` + B int64 `protobuf:"varint,2,opt,name=b,proto3" json:"b,omitempty"` +} + +func (x *Input) Reset() { + *x = Input{} + if protoimpl.UnsafeEnabled { + mi := &file_tools_testserver_math_math_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Input) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Input) ProtoMessage() {} + +func (x *Input) ProtoReflect() protoreflect.Message { + mi := &file_tools_testserver_math_math_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Input.ProtoReflect.Descriptor instead. +func (*Input) Descriptor() ([]byte, []int) { + return file_tools_testserver_math_math_proto_rawDescGZIP(), []int{0} +} + +func (x *Input) GetA() int64 { + if x != nil { + return x.A + } + return 0 +} + +func (x *Input) GetB() int64 { + if x != nil { + return x.B + } + return 0 +} + +type Output struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + C int64 `protobuf:"varint,1,opt,name=c,proto3" json:"c,omitempty"` +} + +func (x *Output) Reset() { + *x = Output{} + if protoimpl.UnsafeEnabled { + mi := &file_tools_testserver_math_math_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Output) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Output) ProtoMessage() {} + +func (x *Output) ProtoReflect() protoreflect.Message { + mi := &file_tools_testserver_math_math_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Output.ProtoReflect.Descriptor instead. +func (*Output) Descriptor() ([]byte, []int) { + return file_tools_testserver_math_math_proto_rawDescGZIP(), []int{1} +} + +func (x *Output) GetC() int64 { + if x != nil { + return x.C + } + return 0 +} + +var File_tools_testserver_math_math_proto protoreflect.FileDescriptor + +var file_tools_testserver_math_math_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x2f, 0x6d, 0x61, 0x74, 0x68, 0x2f, 0x6d, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x04, 0x4d, 0x61, 0x74, 0x68, 0x22, 0x23, 0x0a, 0x05, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x01, 0x61, 0x12, + 0x0c, 0x0a, 0x01, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x01, 0x62, 0x22, 0x16, 0x0a, + 0x06, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x63, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x01, 0x63, 0x32, 0x57, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x68, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x12, 0x0b, 0x2e, 0x4d, 0x61, + 0x74, 0x68, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x0c, 0x2e, 0x4d, 0x61, 0x74, 0x68, 0x2e, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x26, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x12, 0x0b, 0x2e, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x1a, 0x0c, 0x2e, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x42, 0x17, + 0x5a, 0x15, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x2f, 0x6d, 0x61, 0x74, 0x68, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_tools_testserver_math_math_proto_rawDescOnce sync.Once + file_tools_testserver_math_math_proto_rawDescData = file_tools_testserver_math_math_proto_rawDesc +) + +func file_tools_testserver_math_math_proto_rawDescGZIP() []byte { + file_tools_testserver_math_math_proto_rawDescOnce.Do(func() { + file_tools_testserver_math_math_proto_rawDescData = protoimpl.X.CompressGZIP(file_tools_testserver_math_math_proto_rawDescData) + }) + return file_tools_testserver_math_math_proto_rawDescData +} + +var file_tools_testserver_math_math_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_tools_testserver_math_math_proto_goTypes = []interface{}{ + (*Input)(nil), // 0: Math.Input + (*Output)(nil), // 1: Math.Output +} +var file_tools_testserver_math_math_proto_depIdxs = []int32{ + 0, // 0: Math.MathService.Add:input_type -> Math.Input + 0, // 1: Math.MathService.Substract:input_type -> Math.Input + 1, // 2: Math.MathService.Add:output_type -> Math.Output + 1, // 3: Math.MathService.Substract:output_type -> Math.Output + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_tools_testserver_math_math_proto_init() } +func file_tools_testserver_math_math_proto_init() { + if File_tools_testserver_math_math_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_tools_testserver_math_math_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Input); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tools_testserver_math_math_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Output); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_tools_testserver_math_math_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_tools_testserver_math_math_proto_goTypes, + DependencyIndexes: file_tools_testserver_math_math_proto_depIdxs, + MessageInfos: file_tools_testserver_math_math_proto_msgTypes, + }.Build() + File_tools_testserver_math_math_proto = out.File + file_tools_testserver_math_math_proto_rawDesc = nil + file_tools_testserver_math_math_proto_goTypes = nil + file_tools_testserver_math_math_proto_depIdxs = nil +} diff --git a/tools/testserver/math/math.proto b/tools/testserver/math/math.proto new file mode 100644 index 000000000..4970e0fcc --- /dev/null +++ b/tools/testserver/math/math.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +option go_package = "tools/testserver/math"; + +package Math; + +message Input { + int64 a = 1; + int64 b = 2; +} + +message Output { + int64 c = 1; +} + +service MathService { + rpc Add(Input) returns (Output); + rpc Substract(Input) returns (Output); +} \ No newline at end of file diff --git a/tools/testserver/math/math_grpc.pb.go b/tools/testserver/math/math_grpc.pb.go new file mode 100644 index 000000000..d234815b2 --- /dev/null +++ b/tools/testserver/math/math_grpc.pb.go @@ -0,0 +1,137 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package math + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// MathServiceClient is the client API for MathService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MathServiceClient interface { + Add(ctx context.Context, in *Input, opts ...grpc.CallOption) (*Output, error) + Substract(ctx context.Context, in *Input, opts ...grpc.CallOption) (*Output, error) +} + +type mathServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMathServiceClient(cc grpc.ClientConnInterface) MathServiceClient { + return &mathServiceClient{cc} +} + +func (c *mathServiceClient) Add(ctx context.Context, in *Input, opts ...grpc.CallOption) (*Output, error) { + out := new(Output) + err := c.cc.Invoke(ctx, "/Math.MathService/Add", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *mathServiceClient) Substract(ctx context.Context, in *Input, opts ...grpc.CallOption) (*Output, error) { + out := new(Output) + err := c.cc.Invoke(ctx, "/Math.MathService/Substract", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MathServiceServer is the server API for MathService service. +// All implementations must embed UnimplementedMathServiceServer +// for forward compatibility +type MathServiceServer interface { + Add(context.Context, *Input) (*Output, error) + Substract(context.Context, *Input) (*Output, error) + mustEmbedUnimplementedMathServiceServer() +} + +// UnimplementedMathServiceServer must be embedded to have forward compatible implementations. +type UnimplementedMathServiceServer struct { +} + +func (UnimplementedMathServiceServer) Add(context.Context, *Input) (*Output, error) { + return nil, status.Errorf(codes.Unimplemented, "method Add not implemented") +} +func (UnimplementedMathServiceServer) Substract(context.Context, *Input) (*Output, error) { + return nil, status.Errorf(codes.Unimplemented, "method Substract not implemented") +} +func (UnimplementedMathServiceServer) mustEmbedUnimplementedMathServiceServer() {} + +// UnsafeMathServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MathServiceServer will +// result in compilation errors. +type UnsafeMathServiceServer interface { + mustEmbedUnimplementedMathServiceServer() +} + +func RegisterMathServiceServer(s grpc.ServiceRegistrar, srv MathServiceServer) { + s.RegisterService(&MathService_ServiceDesc, srv) +} + +func _MathService_Add_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Input) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MathServiceServer).Add(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/Math.MathService/Add", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MathServiceServer).Add(ctx, req.(*Input)) + } + return interceptor(ctx, in, info, handler) +} + +func _MathService_Substract_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Input) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MathServiceServer).Substract(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/Math.MathService/Substract", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MathServiceServer).Substract(ctx, req.(*Input)) + } + return interceptor(ctx, in, info, handler) +} + +// MathService_ServiceDesc is the grpc.ServiceDesc for MathService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var MathService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "Math.MathService", + HandlerType: (*MathServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Add", + Handler: _MathService_Add_Handler, + }, + { + MethodName: "Substract", + Handler: _MathService_Substract_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tools/testserver/math/math.proto", +} diff --git a/vendor/4d63.com/gochecknoglobals/LICENSE b/vendor/4d63.com/gochecknoglobals/LICENSE new file mode 100644 index 000000000..c401e6608 --- /dev/null +++ b/vendor/4d63.com/gochecknoglobals/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Leigh McCulloch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go b/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go new file mode 100644 index 000000000..5b6325dd9 --- /dev/null +++ b/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go @@ -0,0 +1,154 @@ +package checknoglobals + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// allowedExpression is a struct representing packages and methods that will +// be an allowed combination to use as a global variable, f.ex. Name `regexp` +// and SelName `MustCompile`. +type allowedExpression struct { + Name string + SelName string +} + +const Doc = `check that no global variables exist + +This analyzer checks for global variables and errors on any found. + +A global variable is a variable declared in package scope and that can be read +and written to by any function within the package. Global variables can cause +side effects which are difficult to keep track of. A code in one function may +change the variables state while another unrelated chunk of code may be +effected by it.` + +// Analyzer provides an Analyzer that checks that there are no global +// variables, except for errors and variables containing regular +// expressions. +func Analyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "gochecknoglobals", + Doc: Doc, + Run: checkNoGlobals, + Flags: flags(), + RunDespiteErrors: true, + } +} + +func flags() flag.FlagSet { + flags := flag.NewFlagSet("", flag.ExitOnError) + flags.Bool("t", false, "Include tests") + + return *flags +} + +func isAllowed(v ast.Node) bool { + switch i := v.(type) { + case *ast.Ident: + return i.Name == "_" || i.Name == "version" || looksLikeError(i) + case *ast.CallExpr: + if expr, ok := i.Fun.(*ast.SelectorExpr); ok { + return isAllowedSelectorExpression(expr) + } + case *ast.CompositeLit: + if expr, ok := i.Type.(*ast.SelectorExpr); ok { + return isAllowedSelectorExpression(expr) + } + } + + return false +} + +func isAllowedSelectorExpression(v *ast.SelectorExpr) bool { + x, ok := v.X.(*ast.Ident) + if !ok { + return false + } + + allowList := []allowedExpression{ + {Name: "regexp", SelName: "MustCompile"}, + } + + for _, i := range allowList { + if x.Name == i.Name && v.Sel.Name == i.SelName { + return true + } + } + + return false +} + +// looksLikeError returns true if the AST identifier starts +// with 'err' or 'Err', or false otherwise. +// +// TODO: https://github.com/leighmcculloch/gochecknoglobals/issues/5 +func looksLikeError(i *ast.Ident) bool { + prefix := "err" + if i.IsExported() { + prefix = "Err" + } + return strings.HasPrefix(i.Name, prefix) +} + +func checkNoGlobals(pass *analysis.Pass) (interface{}, error) { + includeTests := pass.Analyzer.Flags.Lookup("t").Value.(flag.Getter).Get().(bool) + + for _, file := range pass.Files { + filename := pass.Fset.Position(file.Pos()).Filename + if !strings.HasSuffix(filename, ".go") { + continue + } + if !includeTests && strings.HasSuffix(filename, "_test.go") { + continue + } + + for _, decl := range file.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + if genDecl.Tok != token.VAR { + continue + } + for _, spec := range genDecl.Specs { + valueSpec := spec.(*ast.ValueSpec) + onlyAllowedValues := false + + for _, vn := range valueSpec.Values { + if isAllowed(vn) { + onlyAllowedValues = true + continue + } + + onlyAllowedValues = false + break + } + + if onlyAllowedValues { + continue + } + + for _, vn := range valueSpec.Names { + if isAllowed(vn) { + continue + } + + message := fmt.Sprintf("%s is a global variable", vn.Name) + pass.Report(analysis.Diagnostic{ + Pos: vn.Pos(), + Category: "global", + Message: message, + }) + } + } + } + } + + return nil, nil +} diff --git a/vendor/github.com/Antonboom/errname/LICENSE b/vendor/github.com/Antonboom/errname/LICENSE new file mode 100644 index 000000000..e2002e4d4 --- /dev/null +++ b/vendor/github.com/Antonboom/errname/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Anton Telyshev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Antonboom/errname/pkg/analyzer/analyzer.go b/vendor/github.com/Antonboom/errname/pkg/analyzer/analyzer.go new file mode 100644 index 000000000..88cfc6e3b --- /dev/null +++ b/vendor/github.com/Antonboom/errname/pkg/analyzer/analyzer.go @@ -0,0 +1,133 @@ +package analyzer + +import ( + "go/ast" + "go/token" + "strconv" + "strings" + "unicode" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +// New returns new errname analyzer. +func New() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "errname", + Doc: "Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`.", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +type stringSet = map[string]struct{} + +var ( + imports = []ast.Node{(*ast.ImportSpec)(nil)} + types = []ast.Node{(*ast.TypeSpec)(nil)} + funcs = []ast.Node{(*ast.FuncDecl)(nil)} +) + +func run(pass *analysis.Pass) (interface{}, error) { + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + pkgAliases := map[string]string{} + insp.Preorder(imports, func(node ast.Node) { + i := node.(*ast.ImportSpec) + if n := i.Name; n != nil && i.Path != nil { + if path, err := strconv.Unquote(i.Path.Value); err == nil { + pkgAliases[n.Name] = getPkgFromPath(path) + } + } + }) + + allTypes := stringSet{} + typesSpecs := map[string]*ast.TypeSpec{} + insp.Preorder(types, func(node ast.Node) { + t := node.(*ast.TypeSpec) + allTypes[t.Name.Name] = struct{}{} + typesSpecs[t.Name.Name] = t + }) + + errorTypes := stringSet{} + insp.Preorder(funcs, func(node ast.Node) { + f := node.(*ast.FuncDecl) + t, ok := isMethodError(f) + if !ok { + return + } + errorTypes[t] = struct{}{} + + tSpec, ok := typesSpecs[t] + if !ok { + panic("no specification for type " + t) + } + + var isValid bool + switch tSpec.Type.(type) { + case *ast.ArrayType: + isValid = isValidErrorArrayTypeName(t) + default: + isValid = isValidErrorTypeName(t) + } + if !isValid { + reportAboutErrorType(pass, tSpec.Pos(), t) + } + }) + + errorFuncs := stringSet{} + insp.Preorder(funcs, func(node ast.Node) { + f := node.(*ast.FuncDecl) + if isFuncReturningErr(f.Type, allTypes, errorTypes) { + errorFuncs[f.Name.Name] = struct{}{} + } + }) + + inspectPkgLevelVarsOnly := func(node ast.Node) bool { + switch v := node.(type) { + case *ast.FuncDecl: + return false + + case *ast.ValueSpec: + if name, ok := isSentinelError(v, pkgAliases, allTypes, errorTypes, errorFuncs); ok && !isValidErrorVarName(name) { + reportAboutErrorVar(pass, v.Pos(), name) + } + } + return true + } + for _, f := range pass.Files { + ast.Inspect(f, inspectPkgLevelVarsOnly) + } + + return nil, nil +} + +func reportAboutErrorType(pass *analysis.Pass, typePos token.Pos, typeName string) { + var form string + if unicode.IsLower([]rune(typeName)[0]) { + form = "xxxError" + } else { + form = "XxxError" + } + pass.Reportf(typePos, "the type name `%s` should conform to the `%s` format", typeName, form) +} + +func reportAboutErrorVar(pass *analysis.Pass, pos token.Pos, varName string) { + var form string + if unicode.IsLower([]rune(varName)[0]) { + form = "errXxx" + } else { + form = "ErrXxx" + } + pass.Reportf(pos, "the variable name `%s` should conform to the `%s` format", varName, form) +} + +func getPkgFromPath(p string) string { + idx := strings.LastIndex(p, "/") + if idx == -1 { + return p + } + return p[idx+1:] +} diff --git a/vendor/github.com/Antonboom/errname/pkg/analyzer/facts.go b/vendor/github.com/Antonboom/errname/pkg/analyzer/facts.go new file mode 100644 index 000000000..c1db0f31e --- /dev/null +++ b/vendor/github.com/Antonboom/errname/pkg/analyzer/facts.go @@ -0,0 +1,237 @@ +package analyzer + +import ( + "go/ast" + "go/token" + "strings" + "unicode" +) + +func isMethodError(f *ast.FuncDecl) (typeName string, ok bool) { + if f.Recv == nil { + return "", false + } + if f.Name.Name != "Error" { + return "", false + } + + if f.Type == nil || f.Type.Results == nil || len(f.Type.Results.List) != 1 { + return "", false + } + + returnType, ok := f.Type.Results.List[0].Type.(*ast.Ident) + if !ok { + return "", false + } + + var receiverType string + + switch rt := f.Recv.List[0].Type.(type) { + case *ast.Ident: + receiverType = rt.Name + case *ast.StarExpr: + if i, ok := rt.X.(*ast.Ident); ok { + receiverType = i.Name + } + } + + return receiverType, returnType.Name == "string" +} + +func isValidErrorTypeName(s string) bool { + if isInitialism(s) { + return true + } + + words := split(s) + wordsCnt := wordsCount(words) + + if wordsCnt["error"] != 1 { + return false + } + return words[len(words)-1] == "error" +} + +func isValidErrorArrayTypeName(s string) bool { + if isInitialism(s) { + return true + } + + words := split(s) + wordsCnt := wordsCount(words) + + if wordsCnt["errors"] != 1 { + return false + } + return words[len(words)-1] == "errors" +} + +func isFuncReturningErr(fType *ast.FuncType, allTypes, errorTypes stringSet) bool { + if fType == nil || fType.Results == nil || len(fType.Results.List) != 1 { + return false + } + + var returnTypeName string + switch rt := fType.Results.List[0].Type.(type) { + case *ast.Ident: + returnTypeName = rt.Name + case *ast.StarExpr: + if i, ok := rt.X.(*ast.Ident); ok { + returnTypeName = i.Name + } + } + + return isErrorType(returnTypeName, allTypes, errorTypes) +} + +func isErrorType(tName string, allTypes, errorTypes stringSet) bool { + _, isUserType := allTypes[tName] + _, isErrType := errorTypes[tName] + return isErrType || (tName == "error" && !isUserType) +} + +var knownErrConstructors = stringSet{ + "fmt.Errorf": {}, + "errors.Errorf": {}, + "errors.New": {}, + "errors.Newf": {}, + "errors.NewWithDepth": {}, + "errors.NewWithDepthf": {}, + "errors.NewAssertionErrorWithWrappedErrf": {}, +} + +func isSentinelError( + v *ast.ValueSpec, + pkgAliases map[string]string, + allTypes, errorTypes, errorFuncs stringSet, +) (varName string, ok bool) { + if len(v.Names) != 1 { + return "", false + } + varName = v.Names[0].Name + + switch vv := v.Type.(type) { + // var ErrEndOfFile error + // var ErrEndOfFile SomeErrType + case *ast.Ident: + if isErrorType(vv.Name, allTypes, errorTypes) { + return varName, true + } + + // var ErrEndOfFile *SomeErrType + case *ast.StarExpr: + if i, ok := vv.X.(*ast.Ident); ok && isErrorType(i.Name, allTypes, errorTypes) { + return varName, true + } + } + + if len(v.Values) != 1 { + return "", false + } + + switch vv := v.Values[0].(type) { + case *ast.CallExpr: + switch fun := vv.Fun.(type) { + // var ErrEndOfFile = errors.New("end of file") + case *ast.SelectorExpr: + pkg, ok := fun.X.(*ast.Ident) + if !ok { + return "", false + } + pkgFun := fun.Sel + + pkgName := pkg.Name + if a, ok := pkgAliases[pkgName]; ok { + pkgName = a + } + + _, ok = knownErrConstructors[pkgName+"."+pkgFun.Name] + return varName, ok + + // var ErrEndOfFile = newErrEndOfFile() + // var ErrEndOfFile = new(EndOfFileError) + // const ErrEndOfFile = constError("end of file") + case *ast.Ident: + if isErrorType(fun.Name, allTypes, errorTypes) { + return varName, true + } + + if _, ok := errorFuncs[fun.Name]; ok { + return varName, true + } + + if fun.Name == "new" && len(vv.Args) == 1 { + if i, ok := vv.Args[0].(*ast.Ident); ok { + return varName, isErrorType(i.Name, allTypes, errorTypes) + } + } + + // var ErrEndOfFile = func() error { ... } + case *ast.FuncLit: + return varName, isFuncReturningErr(fun.Type, allTypes, errorTypes) + } + + // var ErrEndOfFile = &EndOfFileError{} + case *ast.UnaryExpr: + if vv.Op == token.AND { // & + if lit, ok := vv.X.(*ast.CompositeLit); ok { + if i, ok := lit.Type.(*ast.Ident); ok { + return varName, isErrorType(i.Name, allTypes, errorTypes) + } + } + } + + // var ErrEndOfFile = EndOfFileError{} + case *ast.CompositeLit: + if i, ok := vv.Type.(*ast.Ident); ok { + return varName, isErrorType(i.Name, allTypes, errorTypes) + } + } + + return "", false +} + +func isValidErrorVarName(s string) bool { + if isInitialism(s) { + return true + } + + words := split(s) + wordsCnt := wordsCount(words) + + if wordsCnt["err"] != 1 { + return false + } + return words[0] == "err" +} + +func isInitialism(s string) bool { + return strings.ToLower(s) == s || strings.ToUpper(s) == s +} + +func split(s string) []string { + var words []string + ss := []rune(s) + + var b strings.Builder + b.WriteRune(ss[0]) + + for _, r := range ss[1:] { + if unicode.IsUpper(r) { + words = append(words, strings.ToLower(b.String())) + b.Reset() + } + b.WriteRune(r) + } + + words = append(words, strings.ToLower(b.String())) + return words +} + +func wordsCount(w []string) map[string]int { + result := make(map[string]int, len(w)) + for _, ww := range w { + result[ww]++ + } + return result +} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 000000000..cd11be965 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,2 @@ +toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 000000000..f621b0119 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1 @@ +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 000000000..01b574320 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 000000000..64410cf75 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,220 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://godocs.io/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.13 or newer; install it with: + + $ go get github.com/BurntSushi/toml + +It also comes with a TOML validator CLI tool: + + $ go get github.com/BurntSushi/toml/cmd/tomlv + $ tomlv some-toml-file.toml + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles XML and +JSON. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other most other decoders **only exported fields** are +considered when encoding and decoding; private fields are silently ignored. + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. + diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 000000000..d3d3b8397 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,511 @@ +package toml + +import ( + "encoding" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "strings" + "time" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use the PrimitiveDecode() function to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded interface{} + context Key +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps (dealer's choice – they can be +// used interchangeably). +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed +// in the local timezone. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + + // TODO: have parser should read from io.Reader? Or at the very least, make + // it read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// Decode the TOML data in to the pointer v. +// +// See the documentation on Decoder for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at path and decode it for you. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + if k := rv.Type().Key().Kind(); k != reflect.String { + return fmt.Errorf( + "toml: cannot decode to a map with non-string key type (%s in %q)", + k, rv.Type()) + } + + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return true + } + return false +} + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 000000000..38aa75fdc --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,18 @@ +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS is just like Decode, except it will automatically read the contents +// of the file at `path` from a fs.FS instance. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 000000000..ad8899c6c --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,123 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not be +// inferable via reflection. In particular, whether a key has been defined and +// the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use: +// +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key represents any TOML key, including key groups. Use (MetaData).Keys to get +// values of this type. +type Key []string + +func (k Key) String() string { return strings.Join(k, ".") } + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return `"` + quotedReplacer.Replace(k[i]) + `"` + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 000000000..db89eac1d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,33 @@ +package toml + +import ( + "encoding" + "io" +) + +// DEPRECATED! +// +// Use the identical encoding.TextMarshaler instead. It is defined here to +// support Go 1.1 and older. +type TextMarshaler encoding.TextMarshaler + +// DEPRECATED! +// +// Use the identical encoding.TextUnmarshaler instead. It is defined here to +// support Go 1.1 and older. +type TextUnmarshaler encoding.TextUnmarshaler + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// DEPRECATED! +// +// Use NewDecoder(reader).Decode(&v) instead. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + return NewDecoder(r).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 000000000..099c4a77d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,13 @@ +/* +Package toml implements decoding and encoding of TOML files. + +This package supports TOML v1.0.0, as listed on https://toml.io + +There is also support for delaying decoding with the Primitive type, and +querying the set of keys in a TOML document with the MetaData type. + +The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +and can be used to verify if TOML document is valid. It can also be used to +print the type of each key. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 000000000..10d88ac63 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,650 @@ +package toml + +import ( + "bufio" + "encoding" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New("toml: cannot encode an anonymous field that is not a struct") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: Only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + // The string to use for a single indentation level. The default is two + // spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the Encoder's writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch t := rv.Interface().(type) { + case time.Time, encoding.TextMarshaler: + enc.writeKeyValue(key, rv, false) + return + // TODO: #76 would make this superfluous after implemented. + case Primitive: + enc.encode(key, reflect.ValueOf(t.undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case encoding.TextMarshaler: + // Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + + switch rv.Kind() { + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string, trailC bool) { + sort.Strings(mapKeys) + for i, mapKey := range mapKeys { + val := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields. + continue + } + + frv := rv.Field(i) + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + if getOptions(f.Tag).name == "" { + addFields(t, frv, append(start, f.Index...)) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), append(start, f.Index...)) + } + continue + } + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(fieldVal) { + continue + } + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + writeFields(fieldsDirect) + writeFields(fieldsSub) + if inline { + enc.wf("}") + } +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case encoding.TextMarshaler: + return tomlString + default: + // Someone used a pointer receiver: we can make it work for pointer + // values. + if rv.CanAddr() { + _, ok := rv.Addr().Interface().(encoding.TextMarshaler) + if ok { + return tomlString + } + } + return tomlHash + } + default: + _, ok := rv.Interface().(encoding.TextMarshaler) + if ok { + return tomlString + } + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("") // Need *some* return value + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + + /// Don't allow nil. + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + if tomlTypeOfGo(rv.Index(i)) == nil { + encPanic(errArrayNilElement) + } + } + + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// If inline is true it won't add a newline at the end. +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + if len(key) == 0 { + encPanic(errNoKey) + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/BurntSushi/toml/go.mod b/vendor/github.com/BurntSushi/toml/go.mod new file mode 100644 index 000000000..82989481d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/go.mod @@ -0,0 +1,3 @@ +module github.com/BurntSushi/toml + +go 1.16 diff --git a/vendor/github.com/BurntSushi/toml/go.sum b/vendor/github.com/BurntSushi/toml/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 000000000..022f15bc2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 000000000..adc4eb5d5 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1225 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to four runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos]) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf( + "expected end of table array name delimiter %q, but got %q instead", + arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == stringStart: + lx.ignore() // ignore the '"' + return lexString + case r == rawStringStart: + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == stringStart || r == rawStringStart: + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator %q", keySep) + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %s instead", + arrayEnd, runeOrEOF(r)) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + default: + return lx.errorf( + "expected a comma or an inline table terminator %q, but got %s instead", + inlineTableEnd, runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isControl(r) || r == '\r': + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\r': + if lx.peek() != '\n' { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineString + case '\\': + return lexMultilineStringEscape + case stringEnd: + /// Found " → try to read two more "". + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == stringEnd { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), `"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + + if isControl(r) { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isControl(r) || r == '\r': + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\r': + if lx.peek() != '\n' { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineRawString + case rawStringEnd: + /// Found ' → try to read two more ''. + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == rawStringEnd { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + + if isControl(r) { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + case isControl(r): + return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r) + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +// Control characters except \n, \t +func isControl(r rune) bool { + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isOctal(r rune) bool { + return r >= '0' && r <= '7' +} + +func isBinary(r rune) bool { + return r == '0' || r == '1' +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 000000000..d9ae5db94 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,739 @@ +package toml + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + ordered []Key // List of keys in the order that they appear in the TOML data. + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + approxLine int // Rough approximation of line number + implicits map[string]bool // Record implied keys (e.g. 'key.group.names'). +} + +// ParseError is used when a file can't be parsed: for example invalid integer +// literals, duplicate keys, etc. +type ParseError struct { + Message string + Line int + LastKey string +} + +func (pe ParseError) Error() string { + return fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + pe.Line, pe.LastKey, pe.Message) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(ParseError); ok { + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + data = data[2:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if strings.ContainsRune(data[:ex], 0) { + return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8") + } + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf(format, v...) + panic(ParseError{ + Message: msg, + Line: p.approxLine, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val) + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.next() + p.approxLine = name.line + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.next() + p.approxLine = name.line + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.next() + p.approxLine = k.line + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicf("Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicf("Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location +}{ + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} + +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray) + + // p.setType(p.currentKey, typ) + var ( + array []interface{} + types []tomlType + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.next() + p.approxLine = k.line + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType) { + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true } +func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false } +func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] } +func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray } +func (p *parser) addImplicitContext(key Key) { + p.addImplicit(key) + p.addContext(key, false) +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// Remove newlines inside triple-quoted strings if a line ends with "\". +func stripEscapedNewlines(s string) string { + split := strings.Split(s, "\n") + if len(split) < 1 { + return s + } + + escNL := false // Keep track of the last non-blank line was escaped. + for i, line := range split { + line = strings.TrimRight(line, " \t\r") + + if len(line) == 0 || line[len(line)-1] != '\\' { + split[i] = strings.TrimRight(split[i], "\r") + if !escNL && i != len(split)-1 { + split[i] += "\n" + } + continue + } + + escBS := true + for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { + escBS = !escBS + } + if escNL { + line = strings.TrimLeft(line, " \t\r") + } + escNL = !escBS + + if escBS { + split[i] += "\n" + continue + } + + split[i] = line[:len(line)-1] // Remove \ + if len(split)-1 > i { + split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + } + } + return strings.Join(split, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case ' ', '\t': + p.panicf("invalid escape: '\\%c'", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 000000000..d56aa80fa --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,70 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 000000000..608997c22 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/Djarvur/go-err113/.gitignore b/vendor/github.com/Djarvur/go-err113/.gitignore new file mode 100644 index 000000000..66fd13c90 --- /dev/null +++ b/vendor/github.com/Djarvur/go-err113/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/Djarvur/go-err113/.golangci.yml b/vendor/github.com/Djarvur/go-err113/.golangci.yml new file mode 100644 index 000000000..2abdfc639 --- /dev/null +++ b/vendor/github.com/Djarvur/go-err113/.golangci.yml @@ -0,0 +1,150 @@ +# This file contains all available configuration options +# with their default values. + +# options for analysis running +run: + # default concurrency is a available CPU number + concurrency: 4 + + # timeout for analysis, e.g. 30s, 5m, default is 1m + deadline: 15m + + # exit code when at least one issue was found, default is 1 + issues-exit-code: 1 + + # include test files or not, default is true + tests: false + + # list of build tags, all linters use it. Default is empty list. + #build-tags: + # - mytag + + # which dirs to skip: they won't be analyzed; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but next dirs are always skipped independently + # from this option's value: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + skip-dirs: + - /gen$ + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + skip-files: + - ".*\\.my\\.go$" + - lib/bad.go + - ".*\\.template\\.go$" + +# output configuration options +output: + # colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number" + format: colored-line-number + + # print lines of code with issue, default is true + print-issued-lines: true + + # print linter name in the end of issue text, default is true + print-linter-name: true + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + govet: + # report about shadowed variables + check-shadowing: true + + # Obtain type information from installed (to $GOPATH/pkg) package files: + # golangci-lint will execute `go install -i` and `go test -i` for analyzed packages + # before analyzing them. + # By default this option is disabled and govet gets type information by loader from source code. + # Loading from source code is slow, but it's done only once for all linters. + # Go-installing of packages first time is much slower than loading them from source code, + # therefore this option is disabled by default. + # But repeated installation is fast in go >= 1.10 because of build caching. + # Enable this option only if all conditions are met: + # 1. you use only "fast" linters (--fast e.g.): no program loading occurs + # 2. you use go >= 1.10 + # 3. you do repeated runs (false for CI) or cache $GOPATH/pkg or `go env GOCACHE` dir in CI. + use-installed-packages: false + golint: + # minimal confidence for issues, default is 0.8 + min-confidence: 0.8 + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + gocyclo: + # minimal code complexity to report, 30 by default (but we recommend 10-20) + min-complexity: 10 + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + dupl: + # tokens count to trigger issue, 150 by default + threshold: 100 + goconst: + # minimal length of string constant, 3 by default + min-len: 3 + # minimal occurrences count to trigger, 3 by default + min-occurrences: 3 + depguard: + list-type: blacklist + include-go-root: false + packages: + - github.com/davecgh/go-spew/spew + +linters: + #enable: + # - staticcheck + # - unused + # - gosimple + enable-all: true + disable: + - lll + disable-all: false + #presets: + # - bugs + # - unused + fast: false + +issues: + # List of regexps of issue texts to exclude, empty list by default. + # But independently from this option we use default exclude patterns, + # it can be disabled by `exclude-use-default: false`. To list all + # excluded by default patterns execute `golangci-lint run --help` + exclude: + - "`parseTained` is unused" + - "`parseState` is unused" + + # Independently from option `exclude` we use default exclude patterns, + # it can be disabled by this option. To list all + # excluded by default patterns execute `golangci-lint run --help`. + # Default value for this option is false. + exclude-use-default: false + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same: 0 + + # Show only new issues: if there are unstaged changes or untracked files, + # only those changes are analyzed, else only changes in HEAD~ are analyzed. + # It's a super-useful option for integration of golangci-lint into existing + # large codebase. It's not practical to fix all existing issues at the moment + # of integration: much better don't allow issues in new code. + # Default is false. + new: false + + # Show only new issues created after git revision `REV` + #new-from-rev: REV + + # Show only new issues created in git patch with set file path. + #new-from-patch: path/to/patch/file \ No newline at end of file diff --git a/vendor/github.com/Djarvur/go-err113/.travis.yml b/vendor/github.com/Djarvur/go-err113/.travis.yml new file mode 100644 index 000000000..44fe77d53 --- /dev/null +++ b/vendor/github.com/Djarvur/go-err113/.travis.yml @@ -0,0 +1,24 @@ +language: go + +go: + - "1.13" + - "1.14" + - tip + +env: + - GO111MODULE=on + +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + - go get golang.org/x/tools/cmd/goimports + - wget -O - -q https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh + +script: + - test -z "$(goimports -d ./ 2>&1)" + - ./bin/golangci-lint run + - go test -v -race ./... + +after_success: + - test "$TRAVIS_GO_VERSION" = "1.14" && goveralls -service=travis-ci diff --git a/vendor/github.com/Djarvur/go-err113/LICENSE b/vendor/github.com/Djarvur/go-err113/LICENSE new file mode 100644 index 000000000..a78ad8c77 --- /dev/null +++ b/vendor/github.com/Djarvur/go-err113/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Djarvur + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Djarvur/go-err113/README.adoc b/vendor/github.com/Djarvur/go-err113/README.adoc new file mode 100644 index 000000000..b26af4038 --- /dev/null +++ b/vendor/github.com/Djarvur/go-err113/README.adoc @@ -0,0 +1,75 @@ += err113 image:https://godoc.org/github.com/Djarvur/go-err113?status.svg["GoDoc",link="http://godoc.org/github.com/Djarvur/go-err113"] image:https://travis-ci.org/Djarvur/go-err113.svg["Build Status",link="https://travis-ci.org/Djarvur/go-err113"] image:https://coveralls.io/repos/Djarvur/go-err113/badge.svg?branch=master&service=github["Coverage Status",link="https://coveralls.io/github/Djarvur/go-err113?branch=master"] +Daniel Podolsky +:toc: + +Golang linter to check the errors handling expressions + +== Details + +Starting from Go 1.13 the standard `error` type behaviour was changed: one `error` could be derived from another with `fmt.Errorf()` method using `%w` format specifier. + +So the errors hierarchy could be built for flexible and responsible errors processing. + +And to make this possible at least two simple rules should be followed: + +1. `error` values should not be compared directly but with `errors.Is()` method. +1. `error` should not be created dynamically from scratch but by the wrapping the static (package-level) error. + +This linter is checking the code for these 2 rules compliance. + +=== Reports + +So, `err113` reports every `==` and `!=` comparison for exact `error` type variables except comparison to `nil` and `io.EOF`. + +Also, any call of `errors.New()` and `fmt.Errorf()` methods are reported except the calls used to initialise package-level variables and the `fmt.Errorf()` calls wrapping the other errors. + +Note: non-standard packages, like `github.com/pkg/errors` are ignored completely. + +== Install + +``` +go get -u github.com/Djarvur/go-err113/cmd/err113 +``` + +== Usage + +Defined by link:https://pkg.go.dev/golang.org/x/tools/go/analysis/singlechecker[singlechecker] package. + +``` +err113: checks the error handling rules according to the Go 1.13 new error type + +Usage: err113 [-flag] [package] + + +Flags: + -V print version and exit + -all + no effect (deprecated) + -c int + display offending line with this many lines of context (default -1) + -cpuprofile string + write CPU profile to this file + -debug string + debug flags, any subset of "fpstv" + -fix + apply all suggested fixes + -flags + print analyzer flags in JSON + -json + emit JSON output + -memprofile string + write memory profile to this file + -source + no effect (deprecated) + -tags string + no effect (deprecated) + -trace string + write trace log to this file + -v no effect (deprecated) +``` + +== Thanks + +To link:https://github.com/quasilyte[Iskander (Alex) Sharipov] for the really useful advices. + +To link:https://github.com/jackwhelpton[Jack Whelpton] for the bugfix provided. \ No newline at end of file diff --git a/vendor/github.com/Djarvur/go-err113/comparison.go b/vendor/github.com/Djarvur/go-err113/comparison.go new file mode 100644 index 000000000..8a8555783 --- /dev/null +++ b/vendor/github.com/Djarvur/go-err113/comparison.go @@ -0,0 +1,123 @@ +package err113 + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" +) + +func inspectComparision(pass *analysis.Pass, n ast.Node) bool { // nolint: unparam + // check whether the call expression matches time.Now().Sub() + be, ok := n.(*ast.BinaryExpr) + if !ok { + return true + } + + // check if it is a comparison operation + if be.Op != token.EQL && be.Op != token.NEQ { + return true + } + + if !areBothErrors(be.X, be.Y, pass.TypesInfo) { + return true + } + + oldExpr := render(pass.Fset, be) + + negate := "" + if be.Op == token.NEQ { + negate = "!" + } + + newExpr := fmt.Sprintf("%s%s.Is(%s, %s)", negate, "errors", rawString(be.X), rawString(be.Y)) + + pass.Report( + analysis.Diagnostic{ + Pos: be.Pos(), + Message: fmt.Sprintf("do not compare errors directly %q, use %q instead", oldExpr, newExpr), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: fmt.Sprintf("should replace %q with %q", oldExpr, newExpr), + TextEdits: []analysis.TextEdit{ + { + Pos: be.Pos(), + End: be.End(), + NewText: []byte(newExpr), + }, + }, + }, + }, + }, + ) + + return true +} + +func isError(v ast.Expr, info *types.Info) bool { + if intf, ok := info.TypeOf(v).Underlying().(*types.Interface); ok { + return intf.NumMethods() == 1 && intf.Method(0).FullName() == "(error).Error" + } + + return false +} + +func isEOF(ex ast.Expr, info *types.Info) bool { + se, ok := ex.(*ast.SelectorExpr) + if !ok || se.Sel.Name != "EOF" { + return false + } + + if ep, ok := asImportedName(se.X, info); !ok || ep != "io" { + return false + } + + return true +} + +func asImportedName(ex ast.Expr, info *types.Info) (string, bool) { + ei, ok := ex.(*ast.Ident) + if !ok { + return "", false + } + + ep, ok := info.ObjectOf(ei).(*types.PkgName) + if !ok { + return "", false + } + + return ep.Imported().Path(), true +} + +func areBothErrors(x, y ast.Expr, typesInfo *types.Info) bool { + // check that both left and right hand side are not nil + if typesInfo.Types[x].IsNil() || typesInfo.Types[y].IsNil() { + return false + } + + // check that both left and right hand side are not io.EOF + if isEOF(x, typesInfo) || isEOF(y, typesInfo) { + return false + } + + // check that both left and right hand side are errors + if !isError(x, typesInfo) && !isError(y, typesInfo) { + return false + } + + return true +} + +func rawString(x ast.Expr) string { + switch t := x.(type) { + case *ast.Ident: + return t.Name + case *ast.SelectorExpr: + return fmt.Sprintf("%s.%s", rawString(t.X), t.Sel.Name) + case *ast.CallExpr: + return fmt.Sprintf("%s()", rawString(t.Fun)) + } + return fmt.Sprintf("%s", x) +} diff --git a/vendor/github.com/Djarvur/go-err113/definition.go b/vendor/github.com/Djarvur/go-err113/definition.go new file mode 100644 index 000000000..689236bac --- /dev/null +++ b/vendor/github.com/Djarvur/go-err113/definition.go @@ -0,0 +1,74 @@ +package err113 + +import ( + "go/ast" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" +) + +var methods2check = map[string]map[string]func(*ast.CallExpr, *types.Info) bool{ // nolint: gochecknoglobals + "errors": {"New": justTrue}, + "fmt": {"Errorf": checkWrap}, +} + +func justTrue(*ast.CallExpr, *types.Info) bool { + return true +} + +func checkWrap(ce *ast.CallExpr, info *types.Info) bool { + return !(len(ce.Args) > 0 && strings.Contains(toString(ce.Args[0], info), `%w`)) +} + +func inspectDefinition(pass *analysis.Pass, tlds map[*ast.CallExpr]struct{}, n ast.Node) bool { //nolint: unparam + // check whether the call expression matches time.Now().Sub() + ce, ok := n.(*ast.CallExpr) + if !ok { + return true + } + + if _, ok = tlds[ce]; ok { + return true + } + + fn, ok := ce.Fun.(*ast.SelectorExpr) + if !ok { + return true + } + + fxName, ok := asImportedName(fn.X, pass.TypesInfo) + if !ok { + return true + } + + methods, ok := methods2check[fxName] + if !ok { + return true + } + + checkFunc, ok := methods[fn.Sel.Name] + if !ok { + return true + } + + if !checkFunc(ce, pass.TypesInfo) { + return true + } + + pass.Reportf( + ce.Pos(), + "do not define dynamic errors, use wrapped static errors instead: %q", + render(pass.Fset, ce), + ) + + return true +} + +func toString(ex ast.Expr, info *types.Info) string { + if tv, ok := info.Types[ex]; ok && tv.Value != nil { + return tv.Value.ExactString() + } + + return "" +} diff --git a/vendor/github.com/Djarvur/go-err113/err113.go b/vendor/github.com/Djarvur/go-err113/err113.go new file mode 100644 index 000000000..ec4f52ac7 --- /dev/null +++ b/vendor/github.com/Djarvur/go-err113/err113.go @@ -0,0 +1,90 @@ +// Package err113 is a Golang linter to check the errors handling expressions +package err113 + +import ( + "bytes" + "go/ast" + "go/printer" + "go/token" + + "golang.org/x/tools/go/analysis" +) + +// NewAnalyzer creates a new analysis.Analyzer instance tuned to run err113 checks. +func NewAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "err113", + Doc: "checks the error handling rules according to the Go 1.13 new error type", + Run: run, + } +} + +func run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + tlds := enumerateFileDecls(file) + + ast.Inspect( + file, + func(n ast.Node) bool { + return inspectComparision(pass, n) && + inspectDefinition(pass, tlds, n) + }, + ) + } + + return nil, nil +} + +// render returns the pretty-print of the given node. +func render(fset *token.FileSet, x interface{}) string { + var buf bytes.Buffer + if err := printer.Fprint(&buf, fset, x); err != nil { + panic(err) + } + + return buf.String() +} + +func enumerateFileDecls(f *ast.File) map[*ast.CallExpr]struct{} { + res := make(map[*ast.CallExpr]struct{}) + + var ces []*ast.CallExpr // nolint: prealloc + + for _, d := range f.Decls { + ces = append(ces, enumerateDeclVars(d)...) + } + + for _, ce := range ces { + res[ce] = struct{}{} + } + + return res +} + +func enumerateDeclVars(d ast.Decl) (res []*ast.CallExpr) { + td, ok := d.(*ast.GenDecl) + if !ok || td.Tok != token.VAR { + return nil + } + + for _, s := range td.Specs { + res = append(res, enumerateSpecValues(s)...) + } + + return res +} + +func enumerateSpecValues(s ast.Spec) (res []*ast.CallExpr) { + vs, ok := s.(*ast.ValueSpec) + if !ok { + return nil + } + + for _, v := range vs.Values { + if ce, ok := v.(*ast.CallExpr); ok { + res = append(res, ce) + } + } + + return res +} diff --git a/vendor/github.com/Djarvur/go-err113/go.mod b/vendor/github.com/Djarvur/go-err113/go.mod new file mode 100644 index 000000000..6e28e9336 --- /dev/null +++ b/vendor/github.com/Djarvur/go-err113/go.mod @@ -0,0 +1,5 @@ +module github.com/Djarvur/go-err113 + +go 1.13 + +require golang.org/x/tools v0.0.0-20200324003944-a576cf524670 diff --git a/vendor/github.com/Djarvur/go-err113/go.sum b/vendor/github.com/Djarvur/go-err113/go.sum new file mode 100644 index 000000000..dab64209d --- /dev/null +++ b/vendor/github.com/Djarvur/go-err113/go.sum @@ -0,0 +1,20 @@ +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670 h1:fW7EP/GZqIvbHessHd1PLca+77TBOsRBqtaybMgXJq8= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml new file mode 100644 index 000000000..096369d44 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/.travis.yml @@ -0,0 +1,29 @@ +language: go + +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - tip + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +script: + - make setup + - make test + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md new file mode 100644 index 000000000..e405c9a84 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md @@ -0,0 +1,109 @@ +# 1.5.0 (2019-09-11) + +## Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +## Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +## Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +# 1.4.2 (2018-04-10) + +## Changed +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +## Fixed +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +# 1.4.1 (2018-04-02) + +## Fixed +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +# 1.4.0 (2017-10-04) + +## Changed +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +# 1.3.1 (2017-07-10) + +## Fixed +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +# 1.3.0 (2017-05-02) + +## Added +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +## Fixed +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +## Changed +- #55: The godoc icon moved from png to svg + +# 1.2.3 (2017-04-03) + +## Fixed +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +# Release 1.2.2 (2016-12-13) + +## Fixed +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +# Release 1.2.1 (2016-11-28) + +## Fixed +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +# Release 1.2.0 (2016-11-04) + +## Added +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +## Fixed +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +# Release 1.1.1 (2016-06-30) + +## Changed +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +# Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +# Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +# Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt new file mode 100644 index 000000000..9ff7da9c4 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile new file mode 100644 index 000000000..a7a1b4e36 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/Makefile @@ -0,0 +1,36 @@ +.PHONY: setup +setup: + go get -u gopkg.in/alecthomas/gometalinter.v1 + gometalinter.v1 --install + +.PHONY: test +test: validate lint + @echo "==> Running tests" + go test -v + +.PHONY: validate +validate: + @echo "==> Running static validations" + @gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1 + +.PHONY: lint +lint: + @echo "==> Running linters" + @gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || : diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md new file mode 100644 index 000000000..1b52d2f43 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/README.md @@ -0,0 +1,194 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + +```go + v, err := semver.NewVersion("1.2.3-beta.1+build345") +``` + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the [documentation](https://godoc.org/github.com/Masterminds/semver). + +## Sorting Semantic Versions + +A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/) +package from the standard library. For example, + +```go + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) +``` + +## Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +## Working With Pre-release Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precidence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons without a pre-release comparator will skip pre-release versions. +For example, `>=1.2.3` will skip pre-releases when looking at a list of releases +while `>=1.2.3-0` will evaluate and find pre-releases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +## Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +## Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +## Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +## Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +``` + +# Fuzzing + + [dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing. + +1. `go-fuzz-build` +2. `go-fuzz -workdir=fuzz` + +# Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml new file mode 100644 index 000000000..b2778df15 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/appveyor.yml @@ -0,0 +1,44 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\semver +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +install: + - go version + - go env + - go get -u gopkg.in/alecthomas/gometalinter.v1 + - set PATH=%PATH%;%GOPATH%\bin + - gometalinter.v1.exe --install + +build_script: + - go install -v ./... + +test_script: + - "gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1" + - "gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || :" + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go new file mode 100644 index 000000000..a78235895 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go new file mode 100644 index 000000000..b94b93413 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/constraints.go @@ -0,0 +1,423 @@ +package semver + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + cs := strings.Split(v, ",") + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if !c.check(v) { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if !c.check(v) { + em := fmt.Errorf(c.msg, v, c.orig) + e = append(e, em) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +var constraintOps map[string]cfunc +var constraintMsg map[string]string +var constraintRegex *regexp.Regexp + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + constraintMsg = map[string]string{ + "": "%s is not equal to %s", + "=": "%s is not equal to %s", + "!=": "%s is equal to %s", + ">": "%s is less than or equal to %s", + "<": "%s is greater than or equal to %s", + ">=": "%s is less than %s", + "=>": "%s is less than %s", + "<=": "%s is greater than %s", + "=<": "%s is greater than %s", + "~": "%s does not have same major and minor version as %s", + "~>": "%s does not have same major and minor version as %s", + "^": "%s does not have same major version as %s", + } + + ops := make([]string, 0, len(constraintOps)) + for k := range constraintOps { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) +} + +// An individual constraint +type constraint struct { + // The callback function for the restraint. It performs the logic for + // the constraint. + function cfunc + + msg string + + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) bool { + return c.function(v, c) +} + +type cfunc func(v *Version, c *constraint) bool + +func parseConstraint(c string) (*constraint, error) { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + ver := m[2] + orig := ver + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + function: constraintOps[m[1]], + msg: constraintMsg[m[1]], + con: con, + orig: orig, + minorDirty: minorDirty, + patchDirty: patchDirty, + dirty: dirty, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) bool { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.con.Major() != v.Major() { + return true + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true + } else if c.minorDirty { + return false + } + + return false + } + + return !v.Equal(c.con) +} + +func constraintGreaterThan(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) == 1 +} + +func constraintLessThan(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) < 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +func constraintGreaterThanEqual(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) >= 0 +} + +func constraintLessThanEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) <= 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true + } + + if v.Major() != c.con.Major() { + return false + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.dirty { + c.msg = constraintMsg["~"] + return constraintTilde(v, c) + } + + return v.Equal(c.con) +} + +// ^* --> (any) +// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0 +// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0 +// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0 +// ^1.2.3 --> >=1.2.3, <2.0.0 +// ^1.2.0 --> >=1.2.0, <2.0.0 +func constraintCaret(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + if v.Major() != c.con.Major() { + return false + } + + return true +} + +var constraintRangeRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go new file mode 100644 index 000000000..6a6c24c6d --- /dev/null +++ b/vendor/github.com/Masterminds/semver/doc.go @@ -0,0 +1,115 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the documentation at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + + * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3, < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `~1.x` is equivalent to `>= 1, < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go new file mode 100644 index 000000000..400d4f934 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version.go @@ -0,0 +1,425 @@ +package semver + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp +var validPrereleaseRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// SemVerRegex is the regular expression used to parse a semantic version. +const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// ValidPrerelease is the regular expression which validates +// both prerelease and metadata values. +const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch int64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + SemVerRegex + "$") + validPrereleaseRegex = regexp.MustCompile(ValidPrerelease) +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var temp int64 + temp, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.major = temp + + if m[2] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.minor = temp + } else { + sv.minor = 0 + } + + if m[3] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.patch = temp + } else { + sv.patch = 0 + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v *Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v *Version) Major() int64 { + return v.major +} + +// Minor returns the minor version. +func (v *Version) Minor() int64 { + return v.minor +} + +// Patch returns the patch version. +func (v *Version) Patch() int64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v *Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v *Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v *Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps curent patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hypen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) { + return vNext, ErrInvalidPrerelease + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) { + return vNext, ErrInvalidMetadata + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + temp = nil + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v *Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func compareSegment(v, o int64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} diff --git a/vendor/github.com/Masterminds/semver/version_fuzz.go b/vendor/github.com/Masterminds/semver/version_fuzz.go new file mode 100644 index 000000000..b42bcd62b --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version_fuzz.go @@ -0,0 +1,10 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + if _, err := NewVersion(string(data)); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/.gitignore b/vendor/github.com/OpenPeeDeeP/depguard/.gitignore new file mode 100644 index 000000000..97cca67c6 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +.idea diff --git a/vendor/github.com/OpenPeeDeeP/depguard/LICENSE b/vendor/github.com/OpenPeeDeeP/depguard/LICENSE new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/github.com/OpenPeeDeeP/depguard/README.md b/vendor/github.com/OpenPeeDeeP/depguard/README.md new file mode 100644 index 000000000..d704ce6ad --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/README.md @@ -0,0 +1,77 @@ +# Depguard + +Go linter that checks package imports are in a list of acceptable packages. It +supports a white list and black list option and can do prefix or glob matching. +This allows you to allow imports from a whole organization or only +allow specific packages within a repository. It is recommended to use prefix +matching as it is faster than glob matching. The fewer glob matches the better. + +> If a pattern is matched by prefix it does not try to match via glob. + +## Install + +```bash +go get -u github.com/OpenPeeDeeP/depguard +``` + +## Config + +By default, Depguard looks for a file named `.depguard.json` in the current +current working directory. If it is somewhere else, pass in the `-c` flag with +the location of your configuration file. + +The following is an example configuration file. + +```json +{ + "type": "whitelist", + "packages": ["github.com/OpenPeeDeeP/depguard"], + "packageErrorMessages": { + "github.com/OpenPeeDeeP/depguards": "Please use \"github.com/OpenPeeDeeP/depguard\"," + }, + "inTests": ["github.com/stretchr/testify"], + "includeGoStdLib": true +} +``` + +- `type` can be either `whitelist` or `blacklist`. This check is case insensitive. + If not specified the default is `blacklist`. +- `packages` is a list of packages for the list type specified. +- `packageErrorMessages` is a mapping from packages to the error message to display +- `inTests` is a list of packages allowed/disallowed only in test files. +- Set `includeGoStdLib` (`includeGoRoot` for backwards compatability) to true if you want to check the list against standard lib. + If not specified the default is false. + +## Gometalinter + +The binary installation of this linter can be used with +[Gometalinter](github.com/alecthomas/gometalinter). + +If you use a configuration file for Gometalinter then the following will need to +be added to your configuration file. + +```json +{ + "linters": { + "depguard": { + "command": "depguard -c path/to/config.json", + "pattern": "PATH:LINE:COL:MESSAGE", + "installFrom": "github.com/OpenPeeDeeP/depguard", + "isFast": true, + "partitionStrategy": "packages" + } + } +} +``` + +If you prefer the command line way the following will work for you as well. + +```bash +gometalinter --linter='depguard:depguard -c path/to/config.json:PATH:LINE:COL:MESSAGE' +``` + +## Golangci-lint + +This linter was built with +[Golangci-lint](https://github.com/golangci/golangci-lint) in mind. It is compatable +and read their docs to see how to implement all their linters, including this one. diff --git a/vendor/github.com/OpenPeeDeeP/depguard/depguard.go b/vendor/github.com/OpenPeeDeeP/depguard/depguard.go new file mode 100644 index 000000000..1dbffb7d6 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/depguard.go @@ -0,0 +1,241 @@ +package depguard + +import ( + "go/build" + "go/token" + "io/ioutil" + "path" + "sort" + "strings" + + "github.com/gobwas/glob" + "golang.org/x/tools/go/loader" +) + +// ListType states what kind of list is passed in. +type ListType int + +const ( + // LTBlacklist states the list given is a blacklist. (default) + LTBlacklist ListType = iota + // LTWhitelist states the list given is a whitelist. + LTWhitelist +) + +// StringToListType makes it easier to turn a string into a ListType. +// It assumes that the string representation is lower case. +var StringToListType = map[string]ListType{ + "whitelist": LTWhitelist, + "blacklist": LTBlacklist, +} + +// Issue with the package with PackageName at the Position. +type Issue struct { + PackageName string + Position token.Position +} + +// Depguard checks imports to make sure they follow the given list and constraints. +type Depguard struct { + ListType ListType + IncludeGoRoot bool + + Packages []string + prefixPackages []string + globPackages []glob.Glob + + TestPackages []string + prefixTestPackages []string + globTestPackages []glob.Glob + + prefixRoot []string +} + +// Run checks for dependencies given the program and validates them against +// Packages. +func (dg *Depguard) Run(config *loader.Config, prog *loader.Program) ([]*Issue, error) { + // Shortcut execution on an empty blacklist as that means every package is allowed + if dg.ListType == LTBlacklist && len(dg.Packages) == 0 { + return nil, nil + } + + if err := dg.initialize(config, prog); err != nil { + return nil, err + } + directImports, err := dg.createImportMap(prog) + if err != nil { + return nil, err + } + var issues []*Issue + for pkg, positions := range directImports { + for _, pos := range positions { + + prefixList, globList := dg.prefixPackages, dg.globPackages + if len(dg.TestPackages) > 0 && strings.Index(pos.Filename, "_test.go") != -1 { + prefixList, globList = dg.prefixTestPackages, dg.globTestPackages + } + + if dg.flagIt(pkg, prefixList, globList) { + issues = append(issues, &Issue{ + PackageName: pkg, + Position: pos, + }) + } + } + } + return issues, nil +} + +func (dg *Depguard) initialize(config *loader.Config, prog *loader.Program) error { + // parse ordinary guarded packages + for _, pkg := range dg.Packages { + if strings.ContainsAny(pkg, "!?*[]{}") { + g, err := glob.Compile(pkg, '/') + if err != nil { + return err + } + dg.globPackages = append(dg.globPackages, g) + } else { + dg.prefixPackages = append(dg.prefixPackages, pkg) + } + } + + // Sort the packages so we can have a faster search in the array + sort.Strings(dg.prefixPackages) + + // parse guarded tests packages + for _, pkg := range dg.TestPackages { + if strings.ContainsAny(pkg, "!?*[]{}") { + g, err := glob.Compile(pkg, '/') + if err != nil { + return err + } + dg.globTestPackages = append(dg.globTestPackages, g) + } else { + dg.prefixTestPackages = append(dg.prefixTestPackages, pkg) + } + } + + // Sort the test packages so we can have a faster search in the array + sort.Strings(dg.prefixTestPackages) + + if !dg.IncludeGoRoot { + var err error + dg.prefixRoot, err = listRootPrefixs(config.Build) + if err != nil { + return err + } + } + + return nil +} + +func (dg *Depguard) createImportMap(prog *loader.Program) (map[string][]token.Position, error) { + importMap := make(map[string][]token.Position) + // For the directly imported packages + for _, imported := range prog.InitialPackages() { + // Go through their files + for _, file := range imported.Files { + // And populate a map of all direct imports and their positions + // This will filter out GoRoot depending on the Depguard.IncludeGoRoot + for _, fileImport := range file.Imports { + fileImportPath := cleanBasicLitString(fileImport.Path.Value) + if !dg.IncludeGoRoot && dg.isRoot(fileImportPath) { + continue + } + position := prog.Fset.Position(fileImport.Pos()) + positions, found := importMap[fileImportPath] + if !found { + importMap[fileImportPath] = []token.Position{ + position, + } + continue + } + importMap[fileImportPath] = append(positions, position) + } + } + } + return importMap, nil +} + +func pkgInList(pkg string, prefixList []string, globList []glob.Glob) bool { + if pkgInPrefixList(pkg, prefixList) { + return true + } + return pkgInGlobList(pkg, globList) +} + +func pkgInPrefixList(pkg string, prefixList []string) bool { + // Idx represents where in the package slice the passed in package would go + // when sorted. -1 Just means that it would be at the very front of the slice. + idx := sort.Search(len(prefixList), func(i int) bool { + return prefixList[i] > pkg + }) - 1 + // This means that the package passed in has no way to be prefixed by anything + // in the package list as it is already smaller then everything + if idx == -1 { + return false + } + return strings.HasPrefix(pkg, prefixList[idx]) +} + +func pkgInGlobList(pkg string, globList []glob.Glob) bool { + for _, g := range globList { + if g.Match(pkg) { + return true + } + } + return false +} + +// InList | WhiteList | BlackList +// y | | x +// n | x | +func (dg *Depguard) flagIt(pkg string, prefixList []string, globList []glob.Glob) bool { + return pkgInList(pkg, prefixList, globList) == (dg.ListType == LTBlacklist) +} + +func cleanBasicLitString(value string) string { + return strings.Trim(value, "\"\\") +} + +// We can do this as all imports that are not root are either prefixed with a domain +// or prefixed with `./` or `/` to dictate it is a local file reference +func listRootPrefixs(buildCtx *build.Context) ([]string, error) { + if buildCtx == nil { + buildCtx = &build.Default + } + root := path.Join(buildCtx.GOROOT, "src") + fs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + var pkgPrefix []string + for _, f := range fs { + if !f.IsDir() { + continue + } + pkgPrefix = append(pkgPrefix, f.Name()) + } + return pkgPrefix, nil +} + +func (dg *Depguard) isRoot(importPath string) bool { + // Idx represents where in the package slice the passed in package would go + // when sorted. -1 Just means that it would be at the very front of the slice. + idx := sort.Search(len(dg.prefixRoot), func(i int) bool { + return dg.prefixRoot[i] > importPath + }) - 1 + // This means that the package passed in has no way to be prefixed by anything + // in the package list as it is already smaller then everything + if idx == -1 { + return false + } + // if it is prefixed by a root prefix we need to check if it is an exact match + // or prefix with `/` as this could return false posative if the domain was + // `archive.com` for example as `archive` is a go root package. + if strings.HasPrefix(importPath, dg.prefixRoot[idx]) { + return strings.HasPrefix(importPath, dg.prefixRoot[idx]+"/") || importPath == dg.prefixRoot[idx] + } + return false +} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/go.mod b/vendor/github.com/OpenPeeDeeP/depguard/go.mod new file mode 100644 index 000000000..5ad37edb8 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/go.mod @@ -0,0 +1,9 @@ +module github.com/OpenPeeDeeP/depguard + +go 1.13 + +require ( + github.com/gobwas/glob v0.2.3 + github.com/kisielk/gotool v1.0.0 + golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b +) diff --git a/vendor/github.com/OpenPeeDeeP/depguard/go.sum b/vendor/github.com/OpenPeeDeeP/depguard/go.sum new file mode 100644 index 000000000..24693c36d --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/go.sum @@ -0,0 +1,6 @@ +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b h1:7tibmaEqrQYA+q6ri7NQjuxqSwechjtDHKq6/e85S38= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/alexkohler/prealloc/LICENSE b/vendor/github.com/alexkohler/prealloc/LICENSE new file mode 100644 index 000000000..9310fbcff --- /dev/null +++ b/vendor/github.com/alexkohler/prealloc/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Alex Kohler + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alexkohler/prealloc/pkg/prealloc.go b/vendor/github.com/alexkohler/prealloc/pkg/prealloc.go new file mode 100644 index 000000000..72d8b95f7 --- /dev/null +++ b/vendor/github.com/alexkohler/prealloc/pkg/prealloc.go @@ -0,0 +1,267 @@ +package pkg + +import ( + "fmt" + "go/ast" + "go/token" +) + +type sliceDeclaration struct { + name string + // sType string + genD *ast.GenDecl +} + +type returnsVisitor struct { + // flags + simple bool + includeRangeLoops bool + includeForLoops bool + // visitor fields + sliceDeclarations []*sliceDeclaration + preallocHints []Hint + returnsInsideOfLoop bool + arrayTypes []string +} + +func Check(files []*ast.File, simple, includeRangeLoops, includeForLoops bool) []Hint { + hints := []Hint{} + for _, f := range files { + retVis := &returnsVisitor{ + simple: simple, + includeRangeLoops: includeRangeLoops, + includeForLoops: includeForLoops, + } + ast.Walk(retVis, f) + // if simple is true, then we actually have to check if we had returns + // inside of our loop. Otherwise, we can just report all messages. + if !retVis.simple || !retVis.returnsInsideOfLoop { + hints = append(hints, retVis.preallocHints...) + } + } + + return hints +} + +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + + return false +} + +func (v *returnsVisitor) Visit(node ast.Node) ast.Visitor { + + v.sliceDeclarations = nil + v.returnsInsideOfLoop = false + + switch n := node.(type) { + case *ast.TypeSpec: + if _, ok := n.Type.(*ast.ArrayType); ok { + if n.Name != nil { + v.arrayTypes = append(v.arrayTypes, n.Name.Name) + } + } + case *ast.FuncDecl: + if n.Body != nil { + for _, stmt := range n.Body.List { + switch s := stmt.(type) { + // Find non pre-allocated slices + case *ast.DeclStmt: + genD, ok := s.Decl.(*ast.GenDecl) + if !ok { + continue + } + if genD.Tok == token.TYPE { + for _, spec := range genD.Specs { + tSpec, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + + if _, ok := tSpec.Type.(*ast.ArrayType); ok { + if tSpec.Name != nil { + v.arrayTypes = append(v.arrayTypes, tSpec.Name.Name) + } + } + } + } else if genD.Tok == token.VAR { + for _, spec := range genD.Specs { + vSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + var isArrType bool + switch val := vSpec.Type.(type) { + case *ast.ArrayType: + isArrType = true + case *ast.Ident: + isArrType = contains(v.arrayTypes, val.Name) + } + if isArrType { + if vSpec.Names != nil { + /*atID, ok := arrayType.Elt.(*ast.Ident) + if !ok { + continue + }*/ + + // We should handle multiple slices declared on same line e.g. var mySlice1, mySlice2 []uint32 + for _, vName := range vSpec.Names { + v.sliceDeclarations = append(v.sliceDeclarations, &sliceDeclaration{name: vName.Name /*sType: atID.Name,*/, genD: genD}) + } + } + } + } + } + + case *ast.RangeStmt: + if v.includeRangeLoops { + if len(v.sliceDeclarations) == 0 { + continue + } + // Check the value being ranged over and ensure it's not a channel (we cannot offer any recommendations on channel ranges). + rangeIdent, ok := s.X.(*ast.Ident) + if ok && rangeIdent.Obj != nil { + valueSpec, ok := rangeIdent.Obj.Decl.(*ast.ValueSpec) + if ok { + if _, rangeTargetIsChannel := valueSpec.Type.(*ast.ChanType); rangeTargetIsChannel { + continue + } + } + } + if s.Body != nil { + v.handleLoops(s.Body) + } + } + + case *ast.ForStmt: + if v.includeForLoops { + if len(v.sliceDeclarations) == 0 { + continue + } + if s.Body != nil { + v.handleLoops(s.Body) + } + } + + default: + } + } + } + } + return v +} + +// handleLoops is a helper function to share the logic required for both *ast.RangeLoops and *ast.ForLoops +func (v *returnsVisitor) handleLoops(blockStmt *ast.BlockStmt) { + + for _, stmt := range blockStmt.List { + switch bodyStmt := stmt.(type) { + case *ast.AssignStmt: + asgnStmt := bodyStmt + for index, expr := range asgnStmt.Rhs { + if index >= len(asgnStmt.Lhs) { + continue + } + + lhsIdent, ok := asgnStmt.Lhs[index].(*ast.Ident) + if !ok { + continue + } + + callExpr, ok := expr.(*ast.CallExpr) + if !ok { + continue + } + + rhsFuncIdent, ok := callExpr.Fun.(*ast.Ident) + if !ok { + continue + } + + if rhsFuncIdent.Name != "append" { + continue + } + + // e.g., `x = append(x)` + // Pointless, but pre-allocation will not help. + if len(callExpr.Args) < 2 { + continue + } + + rhsIdent, ok := callExpr.Args[0].(*ast.Ident) + if !ok { + continue + } + + // e.g., `x = append(y, a)` + // This is weird (and maybe a logic error), + // but we cannot recommend pre-allocation. + if lhsIdent.Name != rhsIdent.Name { + continue + } + + // e.g., `x = append(x, y...)` + // we should ignore this. Pre-allocating in this case + // is confusing, and is not possible in general. + if callExpr.Ellipsis.IsValid() { + continue + } + + for _, sliceDecl := range v.sliceDeclarations { + if sliceDecl.name == lhsIdent.Name { + // This is a potential mark, we just need to make sure there are no returns/continues in the + // range loop. + // now we just need to grab whatever we're ranging over + /*sxIdent, ok := s.X.(*ast.Ident) + if !ok { + continue + }*/ + + v.preallocHints = append(v.preallocHints, Hint{ + Pos: sliceDecl.genD.Pos(), + DeclaredSliceName: sliceDecl.name, + }) + } + } + } + case *ast.IfStmt: + ifStmt := bodyStmt + if ifStmt.Body != nil { + for _, ifBodyStmt := range ifStmt.Body.List { + // TODO should probably handle embedded ifs here + switch /*ift :=*/ ifBodyStmt.(type) { + case *ast.BranchStmt, *ast.ReturnStmt: + v.returnsInsideOfLoop = true + default: + } + } + } + + default: + + } + } + +} + +// Hint stores the information about an occurrence of a slice that could be +// preallocated. +type Hint struct { + Pos token.Pos + DeclaredSliceName string +} + +func (h Hint) String() string { + return fmt.Sprintf("%v: Consider preallocating %v", h.Pos, h.DeclaredSliceName) +} + +func (h Hint) StringFromFS(f *token.FileSet) string { + file := f.File(h.Pos) + lineNumber := file.Position(h.Pos).Line + + return fmt.Sprintf("%v:%v Consider preallocating %v", file.Name(), lineNumber, h.DeclaredSliceName) +} diff --git a/vendor/github.com/ashanbrown/forbidigo/LICENSE b/vendor/github.com/ashanbrown/forbidigo/LICENSE new file mode 100644 index 000000000..dc1d47ad5 --- /dev/null +++ b/vendor/github.com/ashanbrown/forbidigo/LICENSE @@ -0,0 +1,13 @@ +Copyright 2019 Andrew Shannon Brown + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go b/vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go new file mode 100644 index 000000000..a39f754f0 --- /dev/null +++ b/vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go @@ -0,0 +1,45 @@ +package forbidigo + +// Code generated by github.com/launchdarkly/go-options. DO NOT EDIT. + +type ApplyOptionFunc func(c *config) error + +func (f ApplyOptionFunc) apply(c *config) error { + return f(c) +} + +func newConfig(options ...Option) (config, error) { + var c config + err := applyConfigOptions(&c, options...) + return c, err +} + +func applyConfigOptions(c *config, options ...Option) error { + c.ExcludeGodocExamples = true + for _, o := range options { + if err := o.apply(c); err != nil { + return err + } + } + return nil +} + +type Option interface { + apply(*config) error +} + +// OptionExcludeGodocExamples don't check inside Godoc examples (see https://blog.golang.org/examples) +func OptionExcludeGodocExamples(o bool) ApplyOptionFunc { + return func(c *config) error { + c.ExcludeGodocExamples = o + return nil + } +} + +// OptionIgnorePermitDirectives don't check for `permit` directives(for example, in favor of `nolint`) +func OptionIgnorePermitDirectives(o bool) ApplyOptionFunc { + return func(c *config) error { + c.IgnorePermitDirectives = o + return nil + } +} diff --git a/vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go b/vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go new file mode 100644 index 000000000..2337404ae --- /dev/null +++ b/vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go @@ -0,0 +1,193 @@ +// forbidigo provides a linter for forbidding the use of specific identifiers +package forbidigo + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "log" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +type Issue interface { + Details() string + Position() token.Position + String() string +} + +type UsedIssue struct { + identifier string + pattern string + position token.Position +} + +func (a UsedIssue) Details() string { + return fmt.Sprintf("use of `%s` forbidden by pattern `%s`", a.identifier, a.pattern) +} + +func (a UsedIssue) Position() token.Position { + return a.position +} + +func (a UsedIssue) String() string { return toString(a) } + +func toString(i Issue) string { + return fmt.Sprintf("%s at %s", i.Details(), i.Position()) +} + +type Linter struct { + cfg config + patterns []*regexp.Regexp +} + +func DefaultPatterns() []string { + return []string{`^(fmt\.Print(|f|ln)|print|println)$`} +} + +//go:generate go-options config +type config struct { + // don't check inside Godoc examples (see https://blog.golang.org/examples) + ExcludeGodocExamples bool `options:",true"` + IgnorePermitDirectives bool // don't check for `permit` directives(for example, in favor of `nolint`) +} + +func NewLinter(patterns []string, options ...Option) (*Linter, error) { + cfg, err := newConfig(options...) + if err != nil { + return nil, errors.Wrapf(err, "failed to process options") + } + + if len(patterns) == 0 { + patterns = DefaultPatterns() + } + compiledPatterns := make([]*regexp.Regexp, 0, len(patterns)) + for _, p := range patterns { + re, err := regexp.Compile(p) + if err != nil { + return nil, fmt.Errorf("unable to compile pattern `%s`: %s", p, err) + } + compiledPatterns = append(compiledPatterns, re) + } + return &Linter{ + cfg: cfg, + patterns: compiledPatterns, + }, nil +} + +type visitor struct { + cfg config + isTestFile bool // godoc only runs on test files + + linter *Linter + comments []*ast.CommentGroup + + fset *token.FileSet + issues []Issue +} + +func (l *Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { + var issues []Issue //nolint:prealloc // we don't know how many there will be + for _, node := range nodes { + var comments []*ast.CommentGroup + isTestFile := false + isWholeFileExample := false + if file, ok := node.(*ast.File); ok { + comments = file.Comments + fileName := fset.Position(file.Pos()).Filename + isTestFile = strings.HasSuffix(fileName, "_test.go") + + // From https://blog.golang.org/examples, a "whole file example" is: + // a file that ends in _test.go and contains exactly one example function, + // no test or benchmark functions, and at least one other package-level declaration. + if l.cfg.ExcludeGodocExamples && isTestFile && len(file.Decls) > 1 { + numExamples := 0 + numTestsAndBenchmarks := 0 + for _, decl := range file.Decls { + funcDecl, isFuncDecl := decl.(*ast.FuncDecl) + // consider only functions, not methods + if !isFuncDecl || funcDecl.Recv != nil || funcDecl.Name == nil { + continue + } + funcName := funcDecl.Name.Name + if strings.HasPrefix(funcName, "Test") || strings.HasPrefix(funcName, "Benchmark") { + numTestsAndBenchmarks++ + break // not a whole file example + } + if strings.HasPrefix(funcName, "Example") { + numExamples++ + } + } + + // if this is a whole file example, skip this node + isWholeFileExample = numExamples == 1 && numTestsAndBenchmarks == 0 + } + } + if isWholeFileExample { + continue + } + visitor := visitor{ + cfg: l.cfg, + isTestFile: isTestFile, + linter: l, + fset: fset, + comments: comments, + } + ast.Walk(&visitor, node) + issues = append(issues, visitor.issues...) + } + return issues, nil +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + switch node := node.(type) { + case *ast.FuncDecl: + // don't descend into godoc examples if we are ignoring them + isGodocExample := v.isTestFile && node.Recv == nil && node.Name != nil && strings.HasPrefix(node.Name.Name, "Example") + if isGodocExample && v.cfg.ExcludeGodocExamples { + return nil + } + return v + case *ast.SelectorExpr: + case *ast.Ident: + default: + return v + } + for _, p := range v.linter.patterns { + if p.MatchString(v.textFor(node)) && !v.permit(node) { + v.issues = append(v.issues, UsedIssue{ + identifier: v.textFor(node), + pattern: p.String(), + position: v.fset.Position(node.Pos()), + }) + } + } + return nil +} + +func (v *visitor) textFor(node ast.Node) string { + buf := new(bytes.Buffer) + if err := printer.Fprint(buf, v.fset, node); err != nil { + log.Fatalf("ERROR: unable to print node at %s: %s", v.fset.Position(node.Pos()), err) + } + return buf.String() +} + +func (v *visitor) permit(node ast.Node) bool { + if v.cfg.IgnorePermitDirectives { + return false + } + nodePos := v.fset.Position(node.Pos()) + var nolint = regexp.MustCompile(fmt.Sprintf(`^//\s?permit:%s\b`, regexp.QuoteMeta(v.textFor(node)))) + for _, c := range v.comments { + commentPos := v.fset.Position(c.Pos()) + if commentPos.Line == nodePos.Line && len(c.List) > 0 && nolint.MatchString(c.List[0].Text) { + return true + } + } + return false +} diff --git a/vendor/github.com/ashanbrown/makezero/LICENSE b/vendor/github.com/ashanbrown/makezero/LICENSE new file mode 100644 index 000000000..dc1d47ad5 --- /dev/null +++ b/vendor/github.com/ashanbrown/makezero/LICENSE @@ -0,0 +1,13 @@ +Copyright 2019 Andrew Shannon Brown + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/ashanbrown/makezero/makezero/makezero.go b/vendor/github.com/ashanbrown/makezero/makezero/makezero.go new file mode 100644 index 000000000..db9b45adc --- /dev/null +++ b/vendor/github.com/ashanbrown/makezero/makezero/makezero.go @@ -0,0 +1,200 @@ +// makezero provides a linter for appends to slices initialized with non-zero length. +package makezero + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "go/types" + "log" + "regexp" +) + +type Issue interface { + Details() string + Position() token.Position + String() string +} + +type AppendIssue struct { + name string + position token.Position +} + +func (a AppendIssue) Details() string { + return fmt.Sprintf("append to slice `%s` with non-zero initialized length", a.name) +} + +func (a AppendIssue) Position() token.Position { + return a.position +} + +func (a AppendIssue) String() string { return toString(a) } + +type MustHaveNonZeroInitLenIssue struct { + name string + position token.Position +} + +func (i MustHaveNonZeroInitLenIssue) Details() string { + return fmt.Sprintf("slice `%s` does not have non-zero initial length", i.name) +} + +func (i MustHaveNonZeroInitLenIssue) Position() token.Position { + return i.position +} + +func (i MustHaveNonZeroInitLenIssue) String() string { return toString(i) } + +func toString(i Issue) string { + return fmt.Sprintf("%s at %s", i.Details(), i.Position()) +} + +type visitor struct { + initLenMustBeZero bool + + comments []*ast.CommentGroup // comments to apply during this visit + info *types.Info + + nonZeroLengthSliceDecls map[interface{}]struct{} + fset *token.FileSet + issues []Issue +} + +type Linter struct { + initLenMustBeZero bool +} + +func NewLinter(initialLengthMustBeZero bool) *Linter { + return &Linter{ + initLenMustBeZero: initialLengthMustBeZero, + } +} + +func (l Linter) Run(fset *token.FileSet, info *types.Info, nodes ...ast.Node) ([]Issue, error) { + var issues []Issue // nolint:prealloc // don't know how many there will be + for _, node := range nodes { + var comments []*ast.CommentGroup + if file, ok := node.(*ast.File); ok { + comments = file.Comments + } + visitor := visitor{ + nonZeroLengthSliceDecls: make(map[interface{}]struct{}), + initLenMustBeZero: l.initLenMustBeZero, + info: info, + fset: fset, + comments: comments, + } + ast.Walk(&visitor, node) + issues = append(issues, visitor.issues...) + } + return issues, nil +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + switch node := node.(type) { + case *ast.CallExpr: + fun, ok := node.Fun.(*ast.Ident) + if !ok || fun.Name != "append" { + break + } + if sliceIdent, ok := node.Args[0].(*ast.Ident); ok && + v.hasNonZeroInitialLength(sliceIdent) && + !v.hasNoLintOnSameLine(fun) { + v.issues = append(v.issues, AppendIssue{name: sliceIdent.Name, position: v.fset.Position(fun.Pos())}) + } + case *ast.AssignStmt: + for i, right := range node.Rhs { + if right, ok := right.(*ast.CallExpr); ok { + fun, ok := right.Fun.(*ast.Ident) + if !ok || fun.Name != "make" { + continue + } + left := node.Lhs[i] + if len(right.Args) == 2 { + // ignore if not a slice or it has explicit zero length + if !v.isSlice(right.Args[0]) { + break + } else if lit, ok := right.Args[1].(*ast.BasicLit); ok && lit.Kind == token.INT && lit.Value == "0" { + break + } + if v.initLenMustBeZero && !v.hasNoLintOnSameLine(fun) { + v.issues = append(v.issues, MustHaveNonZeroInitLenIssue{ + name: v.textFor(left), + position: v.fset.Position(node.Pos()), + }) + } + v.recordNonZeroLengthSlices(left) + } + } + } + } + return v +} + +func (v *visitor) textFor(node ast.Node) string { + typeBuf := new(bytes.Buffer) + if err := printer.Fprint(typeBuf, v.fset, node); err != nil { + log.Fatalf("ERROR: unable to print type: %s", err) + } + return typeBuf.String() +} + +func (v *visitor) hasNonZeroInitialLength(ident *ast.Ident) bool { + if ident.Obj == nil { + log.Printf("WARNING: could not determine with %q at %s is a slice (missing object type)", + ident.Name, v.fset.Position(ident.Pos()).String()) + return false + } + _, exists := v.nonZeroLengthSliceDecls[ident.Obj.Decl] + return exists +} + +func (v *visitor) recordNonZeroLengthSlices(node ast.Node) { + ident, ok := node.(*ast.Ident) + if !ok { + return + } + if ident.Obj == nil { + return + } + v.nonZeroLengthSliceDecls[ident.Obj.Decl] = struct{}{} +} + +func (v *visitor) isSlice(node ast.Node) bool { + // determine type if this is a user-defined type + if ident, ok := node.(*ast.Ident); ok { + obj := ident.Obj + if obj == nil { + if v.info != nil { + _, ok := v.info.ObjectOf(ident).Type().(*types.Slice) + return ok + } + return false + } + spec, ok := obj.Decl.(*ast.TypeSpec) + if !ok { + return false + } + node = spec.Type + } + + if node, ok := node.(*ast.ArrayType); ok { + return node.Len == nil // only slices have zero length + } + return false +} + +func (v *visitor) hasNoLintOnSameLine(node ast.Node) bool { + var nolint = regexp.MustCompile(`^\s*nozero\b`) + nodePos := v.fset.Position(node.Pos()) + for _, c := range v.comments { + commentPos := v.fset.Position(c.Pos()) + if commentPos.Line == nodePos.Line && nolint.MatchString(c.Text()) { + return true + } + } + return false +} diff --git a/vendor/github.com/bkielbasa/cyclop/LICENSE b/vendor/github.com/bkielbasa/cyclop/LICENSE new file mode 100644 index 000000000..b4a776a40 --- /dev/null +++ b/vendor/github.com/bkielbasa/cyclop/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Bartłomiej Klimczak + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go b/vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go new file mode 100644 index 000000000..9b2801352 --- /dev/null +++ b/vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go @@ -0,0 +1,104 @@ +package analyzer + +import ( + "flag" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/analysis" +) + +var ( + flagSet flag.FlagSet +) + +var maxComplexity int +var packageAverage float64 +var skipTests bool + +func NewAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "cyclop", + Doc: "calculates cyclomatic complexity", + Run: run, + Flags: flagSet, + } +} + +func init() { + flagSet.IntVar(&maxComplexity, "maxComplexity", 10, "max complexity the function can have") + flagSet.Float64Var(&packageAverage, "packageAverage", 0, "max avarage complexity in package") + flagSet.BoolVar(&skipTests, "skipTests", false, "should the linter execute on test files as well") +} + +func run(pass *analysis.Pass) (interface{}, error) { + var sum, count float64 + var pkgName string + var pkgPos token.Pos + + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + f, ok := node.(*ast.FuncDecl) + if !ok { + if node == nil { + return true + } + if file, ok := node.(*ast.File); ok { + pkgName = file.Name.Name + pkgPos = node.Pos() + } + // we check function by function + return true + } + + if skipTests && testFunc(f) { + return true + } + + count++ + comp := complexity(f) + sum += float64(comp) + if comp > maxComplexity { + pass.Reportf(node.Pos(), "calculated cyclomatic complexity for function %s is %d, max is %d", f.Name.Name, comp, maxComplexity) + } + + return true + }) + } + + if packageAverage > 0 { + avg := sum / count + if avg > packageAverage { + pass.Reportf(pkgPos, "the avarage complexity for the package %s is %f, max is %f", pkgName, avg, packageAverage) + } + } + + return nil, nil +} + +func testFunc(f *ast.FuncDecl) bool { + return strings.HasPrefix(f.Name.Name, "Test") +} + +func complexity(fn *ast.FuncDecl) int { + v := complexityVisitor{} + ast.Walk(&v, fn) + return v.Complexity +} + +type complexityVisitor struct { + Complexity int +} + +func (v *complexityVisitor) Visit(n ast.Node) ast.Visitor { + switch n := n.(type) { + case *ast.FuncDecl, *ast.IfStmt, *ast.ForStmt, *ast.RangeStmt, *ast.CaseClause, *ast.CommClause: + v.Complexity++ + case *ast.BinaryExpr: + if n.Op == token.LAND || n.Op == token.LOR { + v.Complexity++ + } + } + return v +} diff --git a/vendor/github.com/bombsimon/wsl/v3/.gitignore b/vendor/github.com/bombsimon/wsl/v3/.gitignore new file mode 100644 index 000000000..1c8eba613 --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v3/.gitignore @@ -0,0 +1,70 @@ + +# Created by https://www.gitignore.io/api/go,vim,macos + +### Go ### +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +### Go Patch ### +/vendor/ +/Godeps/ + +### macOS ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + + +# End of https://www.gitignore.io/api/go,vim,macos diff --git a/vendor/github.com/bombsimon/wsl/v3/.travis.yml b/vendor/github.com/bombsimon/wsl/v3/.travis.yml new file mode 100644 index 000000000..5e2e26ed1 --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v3/.travis.yml @@ -0,0 +1,25 @@ +--- +language: go + +go: + - 1.13.x + - 1.12.x + - 1.11.x + +env: + global: + - GO111MODULE=on + +install: + - go get -v golang.org/x/tools/cmd/cover github.com/mattn/goveralls + +script: + - go test -v -covermode=count -coverprofile=coverage.out + +after_script: + - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci + +notifications: + email: false + +# vim: set ts=2 sw=2 et: diff --git a/vendor/github.com/bombsimon/wsl/v3/LICENSE b/vendor/github.com/bombsimon/wsl/v3/LICENSE new file mode 100644 index 000000000..4dade6d1c --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v3/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Simon Sawert + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/bombsimon/wsl/v3/README.md b/vendor/github.com/bombsimon/wsl/v3/README.md new file mode 100644 index 000000000..9812f94a7 --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v3/README.md @@ -0,0 +1,126 @@ +# WSL - Whitespace Linter + +[![forthebadge](https://forthebadge.com/images/badges/made-with-go.svg)](https://forthebadge.com) +[![forthebadge](https://forthebadge.com/images/badges/built-with-love.svg)](https://forthebadge.com) + +[![Build Status](https://travis-ci.org/bombsimon/wsl.svg?branch=master)](https://travis-ci.org/bombsimon/wsl) +[![Coverage Status](https://coveralls.io/repos/github/bombsimon/wsl/badge.svg?branch=master)](https://coveralls.io/github/bombsimon/wsl?branch=master) + +WSL is a linter that enforces a very **non scientific** vision of how to make +code more readable by enforcing empty lines at the right places. + +I think too much code out there is to cuddly and a bit too warm for it's own +good, making it harder for other people to read and understand. The linter will +warn about newlines in and around blocks, in the beginning of files and other +places in the code. + +**I know this linter is aggressive** and a lot of projects I've tested it on +have failed miserably. For this linter to be useful at all I want to be open to +new ideas, configurations and discussions! Also note that some of the warnings +might be bugs or unintentional false positives so I would love an +[issue](https://github.com/bombsimon/wsl/issues/new) to fix, discuss, change or +make something configurable! + +## Installation + +### By `go get` (local installation) + +You can do that by using: + +```sh +go get -u github.com/bombsimon/wsl/cmd/... +``` + +### By golangci-lint (CI automation) + +`wsl` is already integrated with +[golangci-lint](https://github.com/golangci/golangci-lint). Please refer to the +instructions there. + +## Usage + +How to use depends on how you install `wsl`. + +### With local binary + +The general command format for `wsl` is: + +```sh +$ wsl [flags] [files...] +$ wsl [flags] + +# Examples + +$ wsl ./main.go +$ wsl --no-test ./main.go +$ wsl --allow-cuddle-declarations ./main.go +$ wsl --no-test --allow-cuddle-declaration ./main.go +$ wsl --no-test --allow-trailing-comment ./myProject/... +``` + +The "..." wildcard is not used like other `go` commands but instead can only +be to a relative or absolute path. + +By default, the linter will run on `./...` which means all go files in the +current path and all subsequent paths, including test files. To disable linting +test files, use `-n` or `--no-test`. + +### By `golangci-lint` (CI automation) + +The recommended command is: + +```sh +golangci-lint run --disable-all --enable wsl +``` + +For more information, please refer to +[golangci-lint](https://github.com/golangci/golangci-lint)'s documentation. + +## Issues and configuration + +The linter suppers a few ways to configure it to satisfy more than one kind of +code style. These settings could be set either with flags or with YAML +configuration if used via `golangci-lint`. + +The supported configuration can be found [in the documentation](doc/configuration.md). + +Below are the available checklist for any hit from `wsl`. If you do not see any, +feel free to raise an [issue](https://github.com/bombsimon/wsl/issues/new). + +> **Note**: this linter doesn't take in consideration the issues that will be +> fixed with `go fmt -s` so ensure that the code is properly formatted before +> use. + +* [Anonymous switch statements should never be cuddled](doc/rules.md#anonymous-switch-statements-should-never-be-cuddled) +* [Append only allowed to cuddle with appended value](doc/rules.md#append-only-allowed-to-cuddle-with-appended-value) +* [Assignments should only be cuddled with other assignments](doc/rules.md#assignments-should-only-be-cuddled-with-other-assignments) +* [Block should not end with a whitespace (or comment)](doc/rules.md#block-should-not-end-with-a-whitespace-or-comment) +* [Block should not start with a whitespace](doc/rules.md#block-should-not-start-with-a-whitespace) +* [Case block should end with newline at this size](doc/rules.md#case-block-should-end-with-newline-at-this-size) +* [Branch statements should not be cuddled if block has more than two lines](doc/rules.md#branch-statements-should-not-be-cuddled-if-block-has-more-than-two-lines) +* [Declarations should never be cuddled](doc/rules.md#declarations-should-never-be-cuddled) +* [Defer statements should only be cuddled with expressions on same variable](doc/rules.md#defer-statements-should-only-be-cuddled-with-expressions-on-same-variable) +* [Expressions should not be cuddled with blocks](doc/rules.md#expressions-should-not-be-cuddled-with-blocks) +* [Expressions should not be cuddled with declarations or returns](doc/rules.md#expressions-should-not-be-cuddled-with-declarations-or-returns) +* [For statement without condition should never be cuddled](doc/rules.md#for-statement-without-condition-should-never-be-cuddled) +* [For statements should only be cuddled with assignments used in the iteration](doc/rules.md#for-statements-should-only-be-cuddled-with-assignments-used-in-the-iteration) +* [Go statements can only invoke functions assigned on line above](doc/rules.md#go-statements-can-only-invoke-functions-assigned-on-line-above) +* [If statements should only be cuddled with assignments](doc/rules.md#if-statements-should-only-be-cuddled-with-assignments) +* [If statements should only be cuddled with assignments used in the if + statement + itself](doc/rules.md#if-statements-should-only-be-cuddled-with-assignments-used-in-the-if-statement-itself) +* [If statements that check an error must be cuddled with the statement that assigned the error](doc/rules.md#if-statements-that-check-an-error-must-be-cuddled-with-the-statement-that-assigned-the-error) +* [Only cuddled expressions if assigning variable or using from line + above](doc/rules.md#only-cuddled-expressions-if-assigning-variable-or-using-from-line-above) +* [Only one cuddle assignment allowed before defer statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-defer-statement) +* [Only one cuddle assginment allowed before for statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-for-statement) +* [Only one cuddle assignment allowed before go statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-go-statement) +* [Only one cuddle assignment allowed before if statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-if-statement) +* [Only one cuddle assignment allowed before range statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-range-statement) +* [Only one cuddle assignment allowed before switch statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-switch-statement) +* [Only one cuddle assignment allowed before type switch statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-type-switch-statement) +* [Ranges should only be cuddled with assignments used in the iteration](doc/rules.md#ranges-should-only-be-cuddled-with-assignments-used-in-the-iteration) +* [Return statements should not be cuddled if block has more than two lines](doc/rules.md#return-statements-should-not-be-cuddled-if-block-has-more-than-two-lines) +* [Short declarations should cuddle only with other short declarations](doc/rules.md#short-declaration-should-cuddle-only-with-other-short-declarations) +* [Switch statements should only be cuddled with variables switched](doc/rules.md#switch-statements-should-only-be-cuddled-with-variables-switched) +* [Type switch statements should only be cuddled with variables switched](doc/rules.md#type-switch-statements-should-only-be-cuddled-with-variables-switched) diff --git a/vendor/github.com/bombsimon/wsl/v3/go.mod b/vendor/github.com/bombsimon/wsl/v3/go.mod new file mode 100644 index 000000000..0c325eda1 --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v3/go.mod @@ -0,0 +1,12 @@ +module github.com/bombsimon/wsl/v3 + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/stretchr/testify v1.5.1 + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect +) diff --git a/vendor/github.com/bombsimon/wsl/v3/go.sum b/vendor/github.com/bombsimon/wsl/v3/go.sum new file mode 100644 index 000000000..3bdb59247 --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v3/go.sum @@ -0,0 +1,25 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/bombsimon/wsl/v3/wsl.go b/vendor/github.com/bombsimon/wsl/v3/wsl.go new file mode 100644 index 000000000..313b52787 --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v3/wsl.go @@ -0,0 +1,1247 @@ +package wsl + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "io/ioutil" + "reflect" + "strings" +) + +// Error reason strings +const ( + reasonMustCuddleErrCheck = "if statements that check an error must be cuddled with the statement that assigned the error" + reasonOnlyCuddleIfWithAssign = "if statements should only be cuddled with assignments" + reasonOnlyOneCuddle = "only one cuddle assignment allowed before if statement" + reasonOnlyCuddleWithUsedAssign = "if statements should only be cuddled with assignments used in the if statement itself" + reasonOnlyCuddle2LineReturn = "return statements should not be cuddled if block has more than two lines" + reasonMultiLineBranchCuddle = "branch statements should not be cuddled if block has more than two lines" + reasonAppendCuddledWithoutUse = "append only allowed to cuddle with appended value" + reasonAssignsCuddleAssign = "assignments should only be cuddled with other assignments" + reasonNeverCuddleDeclare = "declarations should never be cuddled" + reasonExpressionCuddledWithDeclOrRet = "expressions should not be cuddled with declarations or returns" + reasonExpressionCuddledWithBlock = "expressions should not be cuddled with blocks" + reasonExprCuddlingNonAssignedVar = "only cuddled expressions if assigning variable or using from line above" + reasonOneCuddleBeforeRange = "only one cuddle assignment allowed before range statement" + reasonRangeCuddledWithoutUse = "ranges should only be cuddled with assignments used in the iteration" + reasonOneCuddleBeforeDefer = "only one cuddle assignment allowed before defer statement" + reasonDeferCuddledWithOtherVar = "defer statements should only be cuddled with expressions on same variable" + reasonForWithoutCondition = "for statement without condition should never be cuddled" + reasonForWithMoreThanOneCuddle = "only one cuddle assignment allowed before for statement" + reasonForCuddledAssignWithoutUse = "for statements should only be cuddled with assignments used in the iteration" + reasonOneCuddleBeforeGo = "only one cuddle assignment allowed before go statement" + reasonGoFuncWithoutAssign = "go statements can only invoke functions assigned on line above" + reasonSwitchManyCuddles = "only one cuddle assignment allowed before switch statement" + reasonAnonSwitchCuddled = "anonymous switch statements should never be cuddled" + reasonSwitchCuddledWithoutUse = "switch statements should only be cuddled with variables switched" + reasonTypeSwitchTooCuddled = "only one cuddle assignment allowed before type switch statement" + reasonTypeSwitchCuddledWithoutUse = "type switch statements should only be cuddled with variables switched" + reasonBlockStartsWithWS = "block should not start with a whitespace" + reasonBlockEndsWithWS = "block should not end with a whitespace (or comment)" + reasonCaseBlockTooCuddly = "case block should end with newline at this size" + reasonShortDeclNotExclusive = "short declaration should cuddle only with other short declarations" +) + +// Warning strings +const ( + warnTypeNotImplement = "type not implemented" + warnStmtNotImplemented = "stmt type not implemented" + warnBodyStmtTypeNotImplemented = "body statement type not implemented " + warnWSNodeTypeNotImplemented = "whitespace node type not implemented " + warnUnknownLHS = "UNKNOWN LHS" + warnUnknownRHS = "UNKNOWN RHS" +) + +type Configuration struct { + // StrictAppend will do strict checking when assigning from append (x = + // append(x, y)). If this is set to true the append call must append either + // a variable assigned, called or used on the line above. Example on not + // allowed when this is true: + // + // x := []string{} + // y := "not going in X" + // x = append(x, "not y") // This is not allowed with StrictAppend + // z := "going in X" + // + // x = append(x, z) // This is allowed with StrictAppend + // + // m := transform(z) + // x = append(x, z) // So is this because Z is used above. + StrictAppend bool + + // AllowAssignAndCallCuddle allows assignments to be cuddled with variables + // used in calls on line above and calls to be cuddled with assignments of + // variables used in call on line above. + // Example supported with this set to true: + // + // x.Call() + // x = Assign() + // x.AnotherCall() + // x = AnotherAssign() + AllowAssignAndCallCuddle bool + + // AllowAssignAndCallCuddle allows assignments to be cuddled with anything. + // Example supported with this set to true: + // if x == 1 { + // x = 0 + // } + // z := x + 2 + // fmt.Println("x") + // y := "x" + AllowAssignAndAnythingCuddle bool + + // AllowMultiLineAssignCuddle allows cuddling to assignments even if they + // span over multiple lines. This defaults to true which allows the + // following example: + // + // err := function( + // "multiple", "lines", + // ) + // if err != nil { + // // ... + // } + AllowMultiLineAssignCuddle bool + + // If the number of lines in a case block is equal to or lager than this + // number, the case *must* end white a newline. + ForceCaseTrailingWhitespaceLimit int + + // AllowTrailingComment will allow blocks to end with comments. + AllowTrailingComment bool + + // AllowSeparatedLeadingComment will allow multiple comments in the + // beginning of a block separated with newline. Example: + // func () { + // // Comment one + // + // // Comment two + // fmt.Println("x") + // } + AllowSeparatedLeadingComment bool + + // AllowCuddleDeclaration will allow multiple var/declaration statements to + // be cuddled. This defaults to false but setting it to true will enable the + // following example: + // var foo bool + // var err error + AllowCuddleDeclaration bool + + // AllowCuddleWithCalls is a list of call idents that everything can be + // cuddled with. Defaults to calls looking like locks to support a flow like + // this: + // + // mu.Lock() + // allow := thisAssignment + AllowCuddleWithCalls []string + + // AllowCuddleWithRHS is a list of right hand side variables that is allowed + // to be cuddled with anything. Defaults to assignments or calls looking + // like unlocks to support a flow like this: + // + // allow := thisAssignment() + // mu.Unlock() + AllowCuddleWithRHS []string + + // ForceCuddleErrCheckAndAssign will cause an error when an If statement that + // checks an error variable doesn't cuddle with the assignment of that variable. + // This defaults to false but setting it to true will cause the following + // to generate an error: + // + // err := ProduceError() + // + // if err != nil { + // return err + // } + ForceCuddleErrCheckAndAssign bool + + // When ForceCuddleErrCheckAndAssign is enabled this is a list of names + // used for error variables to check for in the conditional. + // Defaults to just "err" + ErrorVariableNames []string + + // ForceExclusiveShortDeclarations will cause an error if a short declaration + // (:=) cuddles with anything other than another short declaration. For example + // + // a := 2 + // b := 3 + // + // is allowed, but + // + // a := 2 + // b = 3 + // + // is not allowed. This logic overrides ForceCuddleErrCheckAndAssign among others. + ForceExclusiveShortDeclarations bool +} + +// DefaultConfig returns default configuration +func DefaultConfig() Configuration { + return Configuration{ + StrictAppend: true, + AllowAssignAndCallCuddle: true, + AllowAssignAndAnythingCuddle: false, + AllowMultiLineAssignCuddle: true, + AllowTrailingComment: false, + AllowSeparatedLeadingComment: false, + ForceCuddleErrCheckAndAssign: false, + ForceExclusiveShortDeclarations: false, + ForceCaseTrailingWhitespaceLimit: 0, + AllowCuddleWithCalls: []string{"Lock", "RLock"}, + AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, + ErrorVariableNames: []string{"err"}, + } +} + +// Result represents the result of one error. +type Result struct { + FileName string + LineNumber int + Position token.Position + Reason string +} + +// String returns the filename, line number and reason of a Result. +func (r *Result) String() string { + return fmt.Sprintf("%s:%d: %s", r.FileName, r.LineNumber, r.Reason) +} + +type Processor struct { + config Configuration + result []Result + warnings []string + fileSet *token.FileSet + file *ast.File +} + +// NewProcessor will create a Processor. +func NewProcessorWithConfig(cfg Configuration) *Processor { + return &Processor{ + result: []Result{}, + config: cfg, + } +} + +// NewProcessor will create a Processor. +func NewProcessor() *Processor { + return NewProcessorWithConfig(DefaultConfig()) +} + +// ProcessFiles takes a string slice with file names (full paths) and lints +// them. +// nolint: gocritic +func (p *Processor) ProcessFiles(filenames []string) ([]Result, []string) { + for _, filename := range filenames { + data, err := ioutil.ReadFile(filename) + if err != nil { + panic(err) + } + + p.process(filename, data) + } + + return p.result, p.warnings +} + +func (p *Processor) process(filename string, data []byte) { + fileSet := token.NewFileSet() + file, err := parser.ParseFile(fileSet, filename, data, parser.ParseComments) + + // If the file is not parsable let's add a syntax error and move on. + if err != nil { + p.result = append(p.result, Result{ + FileName: filename, + LineNumber: 0, + Reason: fmt.Sprintf("invalid syntax, file cannot be linted (%s)", err.Error()), + }) + + return + } + + p.fileSet = fileSet + p.file = file + + for _, d := range p.file.Decls { + switch v := d.(type) { + case *ast.FuncDecl: + p.parseBlockBody(v.Name, v.Body) + case *ast.GenDecl: + // `go fmt` will handle proper spacing for GenDecl such as imports, + // constants etc. + default: + p.addWarning(warnTypeNotImplement, d.Pos(), v) + } + } +} + +// parseBlockBody will parse any kind of block statements such as switch cases +// and if statements. A list of Result is returned. +func (p *Processor) parseBlockBody(ident *ast.Ident, block *ast.BlockStmt) { + // Nothing to do if there's no value. + if reflect.ValueOf(block).IsNil() { + return + } + + // Start by finding leading and trailing whitespaces. + p.findLeadingAndTrailingWhitespaces(ident, block, nil) + + // Parse the block body contents. + p.parseBlockStatements(block.List) +} + +// parseBlockStatements will parse all the statements found in the body of a +// node. A list of Result is returned. +// nolint: gocognit +func (p *Processor) parseBlockStatements(statements []ast.Stmt) { + for i, stmt := range statements { + // Start by checking if this statement is another block (other than if, + // for and range). This could be assignment to a function, defer or go + // call with an inline function or similar. If this is found we start by + // parsing this body block before moving on. + for _, stmtBlocks := range p.findBlockStmt(stmt) { + p.parseBlockBody(nil, stmtBlocks) + } + + firstBodyStatement := p.firstBodyStatement(i, statements) + + // First statement, nothing to do. + if i == 0 { + continue + } + + previousStatement := statements[i-1] + previousStatementIsMultiline := p.nodeStart(previousStatement) != p.nodeEnd(previousStatement) + cuddledWithLastStmt := p.nodeEnd(previousStatement) == p.nodeStart(stmt)-1 + + // If we're not cuddled and we don't need to enforce err-check cuddling + // then we can bail out here + if !cuddledWithLastStmt && !p.config.ForceCuddleErrCheckAndAssign { + continue + } + + // We don't force error cuddling for multilines. (#86) + if p.config.ForceCuddleErrCheckAndAssign && previousStatementIsMultiline && !cuddledWithLastStmt { + continue + } + + // Extract assigned variables on the line above + // which is the only thing we allow cuddling with. If the assignment is + // made over multiple lines we should not allow cuddling. + var assignedOnLineAbove []string + + // We want to keep track of what was called on the line above to support + // special handling of things such as mutexes. + var calledOnLineAbove []string + + // Check if the previous statement spans over multiple lines. + var cuddledWithMultiLineAssignment = cuddledWithLastStmt && p.nodeStart(previousStatement) != p.nodeStart(stmt)-1 + + // Ensure previous line is not a multi line assignment and if not get + // rightAndLeftHandSide assigned variables. + if !cuddledWithMultiLineAssignment { + assignedOnLineAbove = p.findLHS(previousStatement) + calledOnLineAbove = p.findRHS(previousStatement) + } + + // If previous assignment is multi line and we allow it, fetch + // assignments (but only assignments). + if cuddledWithMultiLineAssignment && p.config.AllowMultiLineAssignCuddle { + if _, ok := previousStatement.(*ast.AssignStmt); ok { + assignedOnLineAbove = p.findLHS(previousStatement) + } + } + + // We could potentially have a block which require us to check the first + // argument before ruling out an allowed cuddle. + var calledOrAssignedFirstInBlock []string + + if firstBodyStatement != nil { + calledOrAssignedFirstInBlock = append(p.findLHS(firstBodyStatement), p.findRHS(firstBodyStatement)...) + } + + var ( + leftHandSide = p.findLHS(stmt) + rightHandSide = p.findRHS(stmt) + rightAndLeftHandSide = append(leftHandSide, rightHandSide...) + calledOrAssignedOnLineAbove = append(calledOnLineAbove, assignedOnLineAbove...) + ) + + // If we called some kind of lock on the line above we allow cuddling + // anything. + if atLeastOneInListsMatch(calledOnLineAbove, p.config.AllowCuddleWithCalls) { + continue + } + + // If we call some kind of unlock on this line we allow cuddling with + // anything. + if atLeastOneInListsMatch(rightHandSide, p.config.AllowCuddleWithRHS) { + continue + } + + moreThanOneStatementAbove := func() bool { + if i < 2 { + return false + } + + statementBeforePreviousStatement := statements[i-2] + + return p.nodeStart(previousStatement)-1 == p.nodeEnd(statementBeforePreviousStatement) + } + + isLastStatementInBlockOfOnlyTwoLines := func() bool { + // If we're the last statement, check if there's no more than two + // lines from the starting statement and the end of this statement. + // This is to support short return functions such as: + // func (t *Typ) X() { + // t.X = true + // return t + // } + // nolint: gocritic + if i == len(statements)-1 && i == 1 { + if p.nodeEnd(stmt)-p.nodeStart(previousStatement) <= 2 { + return true + } + } + + return false + } + + // If it's a short declaration we should not cuddle with anything else + // if ForceExclusiveShortDeclarations is set on; either this or the + // previous statement could be the short decl, so we'll find out which + // it was and use *that* statement's position + if p.config.ForceExclusiveShortDeclarations && cuddledWithLastStmt { + if p.isShortDecl(stmt) && !p.isShortDecl(previousStatement) { + p.addError(stmt.Pos(), reasonShortDeclNotExclusive) + } else if p.isShortDecl(previousStatement) && !p.isShortDecl(stmt) { + p.addError(previousStatement.Pos(), reasonShortDeclNotExclusive) + } + } + + // If it's not an if statement and we're not cuddled move on. The only + // reason we need to keep going for if statements is to check if we + // should be cuddled with an error check. + if _, ok := stmt.(*ast.IfStmt); !ok { + if !cuddledWithLastStmt { + continue + } + } + + switch t := stmt.(type) { + case *ast.IfStmt: + checkingErrInitializedInline := func() bool { + if t.Init == nil { + return false + } + + // Variables were initialized inline in the if statement + // Let's make sure it's the err just to be safe + return atLeastOneInListsMatch(p.findLHS(t.Init), p.config.ErrorVariableNames) + } + + if !cuddledWithLastStmt { + checkingErr := atLeastOneInListsMatch(rightAndLeftHandSide, p.config.ErrorVariableNames) + if checkingErr { + // We only want to enforce cuddling error checks if the + // error was assigned on the line above. See + // https://github.com/bombsimon/wsl/issues/78. + // This is needed since `assignedOnLineAbove` is not + // actually just assignments but everything from LHS in the + // previous statement. This means that if previous line was + // `if err ...`, `err` will now be in the list + // `assignedOnLineAbove`. + if _, ok := previousStatement.(*ast.AssignStmt); !ok { + continue + } + + if checkingErrInitializedInline() { + continue + } + + if atLeastOneInListsMatch(assignedOnLineAbove, p.config.ErrorVariableNames) { + p.addError(t.Pos(), reasonMustCuddleErrCheck) + } + } + + continue + } + + if len(assignedOnLineAbove) == 0 { + p.addError(t.Pos(), reasonOnlyCuddleIfWithAssign) + continue + } + + if moreThanOneStatementAbove() { + p.addError(t.Pos(), reasonOnlyOneCuddle) + continue + } + + if atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + continue + } + + if atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + continue + } + + p.addError(t.Pos(), reasonOnlyCuddleWithUsedAssign) + case *ast.ReturnStmt: + if isLastStatementInBlockOfOnlyTwoLines() { + continue + } + + p.addError(t.Pos(), reasonOnlyCuddle2LineReturn) + case *ast.BranchStmt: + if isLastStatementInBlockOfOnlyTwoLines() { + continue + } + + p.addError(t.Pos(), reasonMultiLineBranchCuddle) + case *ast.AssignStmt: + // append is usually an assignment but should not be allowed to be + // cuddled with anything not appended. + if len(rightHandSide) > 0 && rightHandSide[len(rightHandSide)-1] == "append" { + if p.config.StrictAppend { + if !atLeastOneInListsMatch(calledOrAssignedOnLineAbove, rightHandSide) { + p.addError(t.Pos(), reasonAppendCuddledWithoutUse) + } + } + + continue + } + + if _, ok := previousStatement.(*ast.AssignStmt); ok { + continue + } + + if p.config.AllowAssignAndAnythingCuddle { + continue + } + + if _, ok := previousStatement.(*ast.DeclStmt); ok && p.config.AllowCuddleDeclaration { + continue + } + + // If the assignment is from a type or variable called on the line + // above we can allow it by setting AllowAssignAndCallCuddle to + // true. + // Example (x is used): + // x.function() + // a.Field = x.anotherFunction() + if p.config.AllowAssignAndCallCuddle { + if atLeastOneInListsMatch(calledOrAssignedOnLineAbove, rightAndLeftHandSide) { + continue + } + } + + p.addError(t.Pos(), reasonAssignsCuddleAssign) + case *ast.DeclStmt: + if !p.config.AllowCuddleDeclaration { + p.addError(t.Pos(), reasonNeverCuddleDeclare) + } + case *ast.ExprStmt: + switch previousStatement.(type) { + case *ast.DeclStmt, *ast.ReturnStmt: + if p.config.AllowAssignAndCallCuddle && p.config.AllowCuddleDeclaration { + continue + } + + p.addError(t.Pos(), reasonExpressionCuddledWithDeclOrRet) + case *ast.IfStmt, *ast.RangeStmt, *ast.SwitchStmt: + p.addError(t.Pos(), reasonExpressionCuddledWithBlock) + } + + // If the expression is called on a type or variable used or + // assigned on the line we can allow it by setting + // AllowAssignAndCallCuddle to true. + // Example of allowed cuddled (x is used): + // a.Field = x.func() + // x.function() + if p.config.AllowAssignAndCallCuddle { + if atLeastOneInListsMatch(calledOrAssignedOnLineAbove, rightAndLeftHandSide) { + continue + } + } + + // If we assigned variables on the line above but didn't use them in + // this expression there should probably be a newline between them. + if len(assignedOnLineAbove) > 0 && !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + p.addError(t.Pos(), reasonExprCuddlingNonAssignedVar) + } + case *ast.RangeStmt: + if moreThanOneStatementAbove() { + p.addError(t.Pos(), reasonOneCuddleBeforeRange) + continue + } + + if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + p.addError(t.Pos(), reasonRangeCuddledWithoutUse) + } + } + case *ast.DeferStmt: + if _, ok := previousStatement.(*ast.DeferStmt); ok { + // We may cuddle multiple defers to group logic. + continue + } + + // Special treatment of deferring body closes after error checking + // according to best practices. See + // https://github.com/bombsimon/wsl/issues/31 which links to + // discussion about error handling after HTTP requests. This is hard + // coded and very specific but for now this is to be seen as a + // special case. What this does is that it *only* allows a defer + // statement with `Close` on the right hand side to be cuddled with + // an if-statement to support this: + // resp, err := client.Do(req) + // if err != nil { + // return err + // } + // defer resp.Body.Close() + if _, ok := previousStatement.(*ast.IfStmt); ok { + if atLeastOneInListsMatch(rightHandSide, []string{"Close"}) { + continue + } + } + + if moreThanOneStatementAbove() { + p.addError(t.Pos(), reasonOneCuddleBeforeDefer) + + continue + } + + // Be extra nice with RHS, it's common to use this for locks: + // m.Lock() + // defer m.Unlock() + previousRHS := p.findRHS(previousStatement) + if atLeastOneInListsMatch(rightHandSide, previousRHS) { + continue + } + + // Allow use to cuddled defer func literals with usages on line + // abouve. Example: + // b := getB() + // defer func() { + // makesSenseToUse(b) + // }() + if c, ok := t.Call.Fun.(*ast.FuncLit); ok { + funcLitFirstStmt := append(p.findLHS(c.Body), p.findRHS(c.Body)...) + + if atLeastOneInListsMatch(assignedOnLineAbove, funcLitFirstStmt) { + continue + } + } + + if atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + continue + } + + if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + p.addError(t.Pos(), reasonDeferCuddledWithOtherVar) + } + case *ast.ForStmt: + if len(rightAndLeftHandSide) == 0 { + p.addError(t.Pos(), reasonForWithoutCondition) + + continue + } + + if moreThanOneStatementAbove() { + p.addError(t.Pos(), reasonForWithMoreThanOneCuddle) + + continue + } + + // The same rule applies for ranges as for if statements, see + // comments regarding variable usages on the line before or as the + // first line in the block for details. + if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + p.addError(t.Pos(), reasonForCuddledAssignWithoutUse) + } + } + case *ast.GoStmt: + if _, ok := previousStatement.(*ast.GoStmt); ok { + continue + } + + if moreThanOneStatementAbove() { + p.addError(t.Pos(), reasonOneCuddleBeforeGo) + + continue + } + + if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + p.addError(t.Pos(), reasonGoFuncWithoutAssign) + } + case *ast.SwitchStmt: + if moreThanOneStatementAbove() { + p.addError(t.Pos(), reasonSwitchManyCuddles) + + continue + } + + if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { + if len(rightAndLeftHandSide) == 0 { + p.addError(t.Pos(), reasonAnonSwitchCuddled) + } else { + p.addError(t.Pos(), reasonSwitchCuddledWithoutUse) + } + } + case *ast.TypeSwitchStmt: + if moreThanOneStatementAbove() { + p.addError(t.Pos(), reasonTypeSwitchTooCuddled) + + continue + } + + // Allowed to type assert on variable assigned on line above. + if !atLeastOneInListsMatch(rightHandSide, assignedOnLineAbove) { + // Allow type assertion on variables used in the first case + // immediately. + if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + p.addError(t.Pos(), reasonTypeSwitchCuddledWithoutUse) + } + } + case *ast.CaseClause, *ast.CommClause: + // Case clauses will be checked by not allowing leading ot trailing + // whitespaces within the block. There's nothing in the case itself + // that may be cuddled. + default: + p.addWarning(warnStmtNotImplemented, t.Pos(), t) + } + } +} + +// firstBodyStatement returns the first statement inside a body block. This is +// because variables may be cuddled with conditions or statements if it's used +// directly as the first argument inside a body. +// The body will then be parsed as a *ast.BlockStmt (regular block) or as a list +// of []ast.Stmt (case block). +func (p *Processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { + stmt := allStmt[i] + + // Start by checking if the statement has a body (probably if-statement, + // a range, switch case or similar. Whenever a body is found we start by + // parsing it before moving on in the AST. + statementBody := reflect.Indirect(reflect.ValueOf(stmt)).FieldByName("Body") + + // Some cases allow cuddling depending on the first statement in a body + // of a block or case. If possible extract the first statement. + var firstBodyStatement ast.Node + + if !statementBody.IsValid() { + return firstBodyStatement + } + + switch statementBodyContent := statementBody.Interface().(type) { + case *ast.BlockStmt: + if len(statementBodyContent.List) > 0 { + firstBodyStatement = statementBodyContent.List[0] + + // If the first body statement is a *ast.CaseClause we're + // actually interested in the **next** body to know what's + // inside the first case. + if x, ok := firstBodyStatement.(*ast.CaseClause); ok { + if len(x.Body) > 0 { + firstBodyStatement = x.Body[0] + } + } + } + + p.parseBlockBody(nil, statementBodyContent) + case []ast.Stmt: + // The Body field for an *ast.CaseClause or *ast.CommClause is of type + // []ast.Stmt. We must check leading and trailing whitespaces and then + // pass the statements to parseBlockStatements to parse it's content. + var nextStatement ast.Node + + // Check if there's more statements (potential cases) after the + // current one. + if len(allStmt)-1 > i { + nextStatement = allStmt[i+1] + } + + p.findLeadingAndTrailingWhitespaces(nil, stmt, nextStatement) + p.parseBlockStatements(statementBodyContent) + default: + p.addWarning( + warnBodyStmtTypeNotImplemented, + stmt.Pos(), statementBodyContent, + ) + } + + return firstBodyStatement +} + +func (p *Processor) findLHS(node ast.Node) []string { + var lhs []string + + if node == nil { + return lhs + } + + switch t := node.(type) { + case *ast.BasicLit, *ast.FuncLit, *ast.SelectStmt, + *ast.LabeledStmt, *ast.ForStmt, *ast.SwitchStmt, + *ast.ReturnStmt, *ast.GoStmt, *ast.CaseClause, + *ast.CommClause, *ast.CallExpr, *ast.UnaryExpr, + *ast.BranchStmt, *ast.TypeSpec, *ast.ChanType, + *ast.DeferStmt, *ast.TypeAssertExpr, *ast.RangeStmt: + // Nothing to add to LHS + case *ast.IncDecStmt: + return p.findLHS(t.X) + case *ast.Ident: + return []string{t.Name} + case *ast.AssignStmt: + for _, v := range t.Lhs { + lhs = append(lhs, p.findLHS(v)...) + } + case *ast.GenDecl: + for _, v := range t.Specs { + lhs = append(lhs, p.findLHS(v)...) + } + case *ast.ValueSpec: + for _, v := range t.Names { + lhs = append(lhs, p.findLHS(v)...) + } + case *ast.BlockStmt: + for _, v := range t.List { + lhs = append(lhs, p.findLHS(v)...) + } + case *ast.BinaryExpr: + return append( + p.findLHS(t.X), + p.findLHS(t.Y)..., + ) + case *ast.DeclStmt: + return p.findLHS(t.Decl) + case *ast.IfStmt: + return p.findLHS(t.Cond) + case *ast.TypeSwitchStmt: + return p.findLHS(t.Assign) + case *ast.SendStmt: + return p.findLHS(t.Chan) + default: + if x, ok := maybeX(t); ok { + return p.findLHS(x) + } + + p.addWarning(warnUnknownLHS, t.Pos(), t) + } + + return lhs +} + +func (p *Processor) findRHS(node ast.Node) []string { + var rhs []string + + if node == nil { + return rhs + } + + switch t := node.(type) { + case *ast.BasicLit, *ast.SelectStmt, *ast.ChanType, + *ast.LabeledStmt, *ast.DeclStmt, *ast.BranchStmt, + *ast.TypeSpec, *ast.ArrayType, *ast.CaseClause, + *ast.CommClause, *ast.KeyValueExpr, *ast.MapType, + *ast.FuncLit: + // Nothing to add to RHS + case *ast.Ident: + return []string{t.Name} + case *ast.SelectorExpr: + // TODO: Should this be RHS? + // t.X is needed for defer as of now and t.Sel needed for special + // functions such as Lock() + rhs = p.findRHS(t.X) + rhs = append(rhs, p.findRHS(t.Sel)...) + case *ast.AssignStmt: + for _, v := range t.Rhs { + rhs = append(rhs, p.findRHS(v)...) + } + case *ast.CallExpr: + for _, v := range t.Args { + rhs = append(rhs, p.findRHS(v)...) + } + + rhs = append(rhs, p.findRHS(t.Fun)...) + case *ast.CompositeLit: + for _, v := range t.Elts { + rhs = append(rhs, p.findRHS(v)...) + } + case *ast.IfStmt: + rhs = append(rhs, p.findRHS(t.Cond)...) + rhs = append(rhs, p.findRHS(t.Init)...) + case *ast.BinaryExpr: + return append( + p.findRHS(t.X), + p.findRHS(t.Y)..., + ) + case *ast.TypeSwitchStmt: + return p.findRHS(t.Assign) + case *ast.ReturnStmt: + for _, v := range t.Results { + rhs = append(rhs, p.findRHS(v)...) + } + case *ast.BlockStmt: + for _, v := range t.List { + rhs = append(rhs, p.findRHS(v)...) + } + case *ast.SwitchStmt: + return p.findRHS(t.Tag) + case *ast.GoStmt: + return p.findRHS(t.Call) + case *ast.ForStmt: + return p.findRHS(t.Cond) + case *ast.DeferStmt: + return p.findRHS(t.Call) + case *ast.SendStmt: + return p.findLHS(t.Value) + case *ast.IndexExpr: + rhs = append(rhs, p.findRHS(t.Index)...) + rhs = append(rhs, p.findRHS(t.X)...) + case *ast.SliceExpr: + rhs = append(rhs, p.findRHS(t.X)...) + rhs = append(rhs, p.findRHS(t.Low)...) + rhs = append(rhs, p.findRHS(t.High)...) + default: + if x, ok := maybeX(t); ok { + return p.findRHS(x) + } + + p.addWarning(warnUnknownRHS, t.Pos(), t) + } + + return rhs +} + +func (p *Processor) isShortDecl(node ast.Node) bool { + if t, ok := node.(*ast.AssignStmt); ok { + return t.Tok == token.DEFINE + } + + return false +} + +func (p *Processor) findBlockStmt(node ast.Node) []*ast.BlockStmt { + var blocks []*ast.BlockStmt + + switch t := node.(type) { + case *ast.AssignStmt: + for _, x := range t.Rhs { + blocks = append(blocks, p.findBlockStmt(x)...) + } + case *ast.CallExpr: + blocks = append(blocks, p.findBlockStmt(t.Fun)...) + case *ast.FuncLit: + blocks = append(blocks, t.Body) + case *ast.ExprStmt: + blocks = append(blocks, p.findBlockStmt(t.X)...) + case *ast.ReturnStmt: + for _, x := range t.Results { + blocks = append(blocks, p.findBlockStmt(x)...) + } + case *ast.DeferStmt: + blocks = append(blocks, p.findBlockStmt(t.Call)...) + case *ast.GoStmt: + blocks = append(blocks, p.findBlockStmt(t.Call)...) + } + + return blocks +} + +// maybeX extracts the X field from an AST node and returns it with a true value +// if it exists. If the node doesn't have an X field nil and false is returned. +// Known fields with X that are handled: +// IndexExpr, ExprStmt, SelectorExpr, StarExpr, ParentExpr, TypeAssertExpr, +// RangeStmt, UnaryExpr, ParenExpr, SliceExpr, IncDecStmt. +func maybeX(node interface{}) (ast.Node, bool) { + maybeHasX := reflect.Indirect(reflect.ValueOf(node)).FieldByName("X") + if !maybeHasX.IsValid() { + return nil, false + } + + n, ok := maybeHasX.Interface().(ast.Node) + if !ok { + return nil, false + } + + return n, true +} + +func atLeastOneInListsMatch(listOne, listTwo []string) bool { + sliceToMap := func(s []string) map[string]struct{} { + m := map[string]struct{}{} + + for _, v := range s { + m[v] = struct{}{} + } + + return m + } + + m1 := sliceToMap(listOne) + m2 := sliceToMap(listTwo) + + for k1 := range m1 { + if _, ok := m2[k1]; ok { + return true + } + } + + for k2 := range m2 { + if _, ok := m1[k2]; ok { + return true + } + } + + return false +} + +// findLeadingAndTrailingWhitespaces will find leading and trailing whitespaces +// in a node. The method takes comments in consideration which will make the +// parser more gentle. +// nolint: gocognit +func (p *Processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, nextStatement ast.Node) { + var ( + allowedLinesBeforeFirstStatement = 1 + commentMap = ast.NewCommentMap(p.fileSet, stmt, p.file.Comments) + blockStatements []ast.Stmt + blockStartLine int + blockEndLine int + blockStartPos token.Pos + blockEndPos token.Pos + ) + + // Depending on the block type, get the statements in the block and where + // the block starts (and ends). + switch t := stmt.(type) { + case *ast.BlockStmt: + blockStatements = t.List + blockStartPos = t.Lbrace + blockEndPos = t.Rbrace + case *ast.CaseClause: + blockStatements = t.Body + blockStartPos = t.Colon + case *ast.CommClause: + blockStatements = t.Body + blockStartPos = t.Colon + default: + p.addWarning(warnWSNodeTypeNotImplemented, stmt.Pos(), stmt) + + return + } + + // Ignore empty blocks even if they have newlines or just comments. + if len(blockStatements) < 1 { + return + } + + blockStartLine = p.fileSet.Position(blockStartPos).Line + blockEndLine = p.fileSet.Position(blockEndPos).Line + + // No whitespace possible if LBrace and RBrace is on the same line. + if blockStartLine == blockEndLine { + return + } + + var ( + firstStatement = blockStatements[0] + lastStatement = blockStatements[len(blockStatements)-1] + seenCommentGroups = 0 + ) + + // Get the comment related to the first statement, we do allow commends in + // the beginning of a block before the first statement. + if c, ok := commentMap[firstStatement]; ok { + for _, commentGroup := range c { + // If the comment group is on the same line as the block start + // (LBrace) we should not consider it. + if p.nodeStart(commentGroup) == blockStartLine { + continue + } + + // We only care about comments before our statement from the comment + // map. As soon as we hit comments after our statement let's break + // out! + if commentGroup.Pos() > firstStatement.Pos() { + break + } + + // We store number of seen comment groups because we allow multiple + // groups with a newline between them; but if the first one has WS + // before it, we're not going to count it to force an error. + if p.config.AllowSeparatedLeadingComment { + cg := p.fileSet.Position(commentGroup.Pos()).Line + + if seenCommentGroups > 0 || cg == blockStartLine+1 { + seenCommentGroups++ + } + } else { + seenCommentGroups++ + } + + // Support both /* multiline */ and //single line comments + for _, c := range commentGroup.List { + allowedLinesBeforeFirstStatement += len(strings.Split(c.Text, "\n")) + } + } + } + + // If we allow separated comments, allow for a space after each group + if p.config.AllowSeparatedLeadingComment { + if seenCommentGroups > 1 { + allowedLinesBeforeFirstStatement += seenCommentGroups - 1 + } else if seenCommentGroups == 1 { + allowedLinesBeforeFirstStatement += 1 + } + } + + // And now if the first statement is passed the number of allowed lines, + // then we had extra WS, possibly before the first comment group. + if p.nodeStart(firstStatement) > blockStartLine+allowedLinesBeforeFirstStatement { + p.addError( + blockStartPos, + reasonBlockStartsWithWS, + ) + } + + // If the blockEndLine is not 0 we're a regular block (not case). + if blockEndLine != 0 { + if p.config.AllowTrailingComment { + if lastComment, ok := commentMap[lastStatement]; ok { + var ( + lastCommentGroup = lastComment[len(lastComment)-1] + lastCommentLine = lastCommentGroup.List[len(lastCommentGroup.List)-1] + countNewlines = 0 + ) + + countNewlines += len(strings.Split(lastCommentLine.Text, "\n")) + + // No newlines between trailing comments and end of block. + if p.nodeStart(lastCommentLine)+countNewlines != blockEndLine-1 { + return + } + } + } + + if p.nodeEnd(lastStatement) != blockEndLine-1 && !isExampleFunc(ident) { + p.addError(blockEndPos, reasonBlockEndsWithWS) + } + + return + } + + // If we don't have any nextStatement the trailing whitespace will be + // handled when parsing the switch. If we do have a next statement we can + // see where it starts by getting it's colon position. We set the end of the + // current case to the position of the next case. + switch n := nextStatement.(type) { + case *ast.CaseClause: + blockEndPos = n.Case + case *ast.CommClause: + blockEndPos = n.Case + default: + // No more cases + return + } + + blockEndLine = p.fileSet.Position(blockEndPos).Line - 1 + + var ( + blockSize = blockEndLine - blockStartLine + caseTrailingCommentLines int + ) + + // TODO: I don't know what comments are bound to in cases. For regular + // blocks the last comment is bound to the last statement but for cases + // they are bound to the case clause expression. This will however get us all + // comments and depending on the case expression this gets tricky. + // + // To handle this I get the comment map from the current statement (the case + // itself) and iterate through all groups and all comment within all groups. + // I then get the comments after the last statement but before the next case + // clause and just map each line of comment that way. + for _, commentGroups := range commentMap { + for _, commentGroup := range commentGroups { + for _, comment := range commentGroup.List { + commentLine := p.fileSet.Position(comment.Pos()).Line + + // Ignore comments before the last statement. + if commentLine <= p.nodeStart(lastStatement) { + continue + } + + // Ignore comments after the end of this case. + if commentLine > blockEndLine { + continue + } + + // This allows /* multiline */ comments with newlines as well + // as regular (//) ones + caseTrailingCommentLines += len(strings.Split(comment.Text, "\n")) + } + } + } + + hasTrailingWhitespace := p.nodeEnd(lastStatement)+caseTrailingCommentLines != blockEndLine + + // If the force trailing limit is configured and we don't end with a newline. + if p.config.ForceCaseTrailingWhitespaceLimit > 0 && !hasTrailingWhitespace { + // Check if the block size is too big to miss the newline. + if blockSize >= p.config.ForceCaseTrailingWhitespaceLimit { + p.addError(lastStatement.Pos(), reasonCaseBlockTooCuddly) + } + } +} + +func isExampleFunc(ident *ast.Ident) bool { + return ident != nil && strings.HasPrefix(ident.Name, "Example") +} + +func (p *Processor) nodeStart(node ast.Node) int { + return p.fileSet.Position(node.Pos()).Line +} + +func (p *Processor) nodeEnd(node ast.Node) int { + var line = p.fileSet.Position(node.End()).Line + + if isEmptyLabeledStmt(node) { + return p.fileSet.Position(node.Pos()).Line + } + + return line +} + +func isEmptyLabeledStmt(node ast.Node) bool { + v, ok := node.(*ast.LabeledStmt) + if !ok { + return false + } + + _, empty := v.Stmt.(*ast.EmptyStmt) + + return empty +} + +// Add an error for the file and line number for the current token.Pos with the +// given reason. +func (p *Processor) addError(pos token.Pos, reason string) { + position := p.fileSet.Position(pos) + + p.result = append(p.result, Result{ + FileName: position.Filename, + LineNumber: position.Line, + Position: position, + Reason: reason, + }) +} + +func (p *Processor) addWarning(w string, pos token.Pos, t interface{}) { + position := p.fileSet.Position(pos) + + p.warnings = append(p.warnings, + fmt.Sprintf("%s:%d: %s (%T)", position.Filename, position.Line, w, t), + ) +} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml new file mode 100644 index 000000000..c516ea88d --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - "1.x" + - master +env: + - TAGS="" + - TAGS="-tags purego" +script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 000000000..24b53065f --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 000000000..2fd8693c2 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,67 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod new file mode 100644 index 000000000..49f67608b --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/cespare/xxhash/v2 + +go 1.11 diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 000000000..db0b35fbe --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,236 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go new file mode 100644 index 000000000..ad14b807f --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 000000000..d580e32ae --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ b_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 000000000..4a5a82160 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 000000000..fc9bea7a3 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,15 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 000000000..53bf76efb --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,46 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "reflect" + "unsafe" +) + +// Notes: +// +// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ +// for some discussion about these unsafe conversions. +// +// In the future it's possible that compiler optimizations will make these +// unsafe operations unnecessary: https://golang.org/issue/2205. +// +// Both of these wrapper functions still incur function call overhead since they +// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write +// for strings to squeeze out a bit more speed. Mid-stack inlining should +// eventually fix this. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return d.Write(b) +} diff --git a/vendor/github.com/charithe/durationcheck/.gitignore b/vendor/github.com/charithe/durationcheck/.gitignore new file mode 100644 index 000000000..c2b126a84 --- /dev/null +++ b/vendor/github.com/charithe/durationcheck/.gitignore @@ -0,0 +1 @@ +/durationcheck diff --git a/vendor/github.com/charithe/durationcheck/LICENSE b/vendor/github.com/charithe/durationcheck/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/charithe/durationcheck/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/charithe/durationcheck/Makefile b/vendor/github.com/charithe/durationcheck/Makefile new file mode 100644 index 000000000..8e2f81ae8 --- /dev/null +++ b/vendor/github.com/charithe/durationcheck/Makefile @@ -0,0 +1,5 @@ +build: + @GO111MODULE=on go build -ldflags '-s -w' -o durationcheck ./cmd/durationcheck/main.go + +install: + @GO111MODULE=on go install -ldflags '-s -w' ./cmd/durationcheck diff --git a/vendor/github.com/charithe/durationcheck/README.md b/vendor/github.com/charithe/durationcheck/README.md new file mode 100644 index 000000000..122edb745 --- /dev/null +++ b/vendor/github.com/charithe/durationcheck/README.md @@ -0,0 +1,48 @@ +[![CircleCI](https://circleci.com/gh/charithe/durationcheck.svg?style=svg)](https://circleci.com/gh/charithe/durationcheck) + + + +Duration Check +=============== + +A Go linter to detect cases where two `time.Duration` values are being multiplied in possibly erroneous ways. + +For example, consider the following (highly contrived) function: + +```go +func waitFor(someDuration time.Duration) { + timeToWait := someDuration * time.Second + time.Sleep(timeToWait) +} +``` + +Although the above code would compile without any errors, its runtime behaviour would almost certainly be incorrect. +A caller would reasonably expect `waitFor(5 * time.Seconds)` to wait for ~5 seconds but they would actually end up +waiting for ~1,388,889 hours. + +The above example is just for illustration purposes only. The problem is glaringly obvious in such a simple function +and even the greenest Gopher would discover the issue immediately. However, imagine a much more complicated function +with many more lines and it is not inconceivable that such logic errors could go unnoticed. + +See the [test cases](testdata/src/a/a.go) for more examples of the types of errors detected by the linter. + + +Installation +------------- + +Requires Go 1.11 or above. + +``` +go get -u github.com/charithe/durationcheck/cmd/durationcheck +``` + +Usage +----- + +Invoke `durationcheck` with your package name + +``` +durationcheck ./... +# or +durationcheck github.com/you/yourproject/... +``` diff --git a/vendor/github.com/charithe/durationcheck/durationcheck.go b/vendor/github.com/charithe/durationcheck/durationcheck.go new file mode 100644 index 000000000..7f7008e91 --- /dev/null +++ b/vendor/github.com/charithe/durationcheck/durationcheck.go @@ -0,0 +1,188 @@ +package durationcheck + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/token" + "go/types" + "log" + "os" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "durationcheck", + Doc: "check for two durations multiplied together", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + // if the package does not import time, it can be skipped from analysis + if !hasImport(pass.Pkg, "time") { + return nil, nil + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeTypes := []ast.Node{ + (*ast.BinaryExpr)(nil), + } + + inspect.Preorder(nodeTypes, check(pass)) + + return nil, nil +} + +func hasImport(pkg *types.Package, importPath string) bool { + for _, imp := range pkg.Imports() { + if imp.Path() == importPath { + return true + } + } + + return false +} + +// check contains the logic for checking that time.Duration is used correctly in the code being analysed +func check(pass *analysis.Pass) func(ast.Node) { + return func(node ast.Node) { + expr := node.(*ast.BinaryExpr) + // we are only interested in multiplication + if expr.Op != token.MUL { + return + } + + // get the types of the two operands + x, xOK := pass.TypesInfo.Types[expr.X] + y, yOK := pass.TypesInfo.Types[expr.Y] + + if !xOK || !yOK { + return + } + + if isDuration(x.Type) && isDuration(y.Type) { + // check that both sides are acceptable expressions + if isUnacceptableExpr(pass, expr.X) && isUnacceptableExpr(pass, expr.Y) { + pass.Reportf(expr.Pos(), "Multiplication of durations: `%s`", formatNode(expr)) + } + } + } +} + +func isDuration(x types.Type) bool { + return x.String() == "time.Duration" || x.String() == "*time.Duration" +} + +// isUnacceptableExpr returns true if the argument is not an acceptable time.Duration expression +func isUnacceptableExpr(pass *analysis.Pass, expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.BasicLit: + return false + case *ast.Ident: + return !isAcceptableNestedExpr(pass, e) + case *ast.CallExpr: + return !isAcceptableCast(pass, e) + case *ast.BinaryExpr: + return !isAcceptableNestedExpr(pass, e) + case *ast.UnaryExpr: + return !isAcceptableNestedExpr(pass, e) + case *ast.SelectorExpr: + return !isAcceptableNestedExpr(pass, e) + case *ast.StarExpr: + return !isAcceptableNestedExpr(pass, e) + case *ast.ParenExpr: + return !isAcceptableNestedExpr(pass, e) + case *ast.IndexExpr: + return !isAcceptableNestedExpr(pass, e) + default: + return true + } +} + +// isAcceptableCast returns true if the argument is an acceptable expression cast to time.Duration +func isAcceptableCast(pass *analysis.Pass, e *ast.CallExpr) bool { + // check that there's a single argument + if len(e.Args) != 1 { + return false + } + + // check that the argument is acceptable + if !isAcceptableNestedExpr(pass, e.Args[0]) { + return false + } + + // check for time.Duration cast + selector, ok := e.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + + return isDurationCast(selector) +} + +func isDurationCast(selector *ast.SelectorExpr) bool { + pkg, ok := selector.X.(*ast.Ident) + if !ok { + return false + } + + if pkg.Name != "time" { + return false + } + + return selector.Sel.Name == "Duration" +} + +func isAcceptableNestedExpr(pass *analysis.Pass, n ast.Expr) bool { + switch e := n.(type) { + case *ast.BasicLit: + return true + case *ast.BinaryExpr: + return isAcceptableNestedExpr(pass, e.X) && isAcceptableNestedExpr(pass, e.Y) + case *ast.UnaryExpr: + return isAcceptableNestedExpr(pass, e.X) + case *ast.Ident: + return isAcceptableIdent(pass, e) + case *ast.CallExpr: + t := pass.TypesInfo.TypeOf(e) + return !isDuration(t) + case *ast.SelectorExpr: + return isAcceptableNestedExpr(pass, e.X) && isAcceptableIdent(pass, e.Sel) + case *ast.StarExpr: + return isAcceptableNestedExpr(pass, e.X) + case *ast.ParenExpr: + return isAcceptableNestedExpr(pass, e.X) + case *ast.IndexExpr: + t := pass.TypesInfo.TypeOf(e) + return !isDuration(t) + default: + return false + } +} + +func isAcceptableIdent(pass *analysis.Pass, ident *ast.Ident) bool { + obj := pass.TypesInfo.ObjectOf(ident) + return !isDuration(obj.Type()) +} + +func formatNode(node ast.Node) string { + buf := new(bytes.Buffer) + if err := format.Node(buf, token.NewFileSet(), node); err != nil { + log.Printf("Error formatting expression: %v", err) + return "" + } + + return buf.String() +} + +func printAST(msg string, node ast.Node) { + fmt.Printf(">>> %s:\n%s\n\n\n", msg, formatNode(node)) + ast.Fprint(os.Stdout, nil, node, nil) + fmt.Println("--------------") +} diff --git a/vendor/github.com/charithe/durationcheck/go.mod b/vendor/github.com/charithe/durationcheck/go.mod new file mode 100644 index 000000000..eb058f21d --- /dev/null +++ b/vendor/github.com/charithe/durationcheck/go.mod @@ -0,0 +1,5 @@ +module github.com/charithe/durationcheck + +go 1.14 + +require golang.org/x/tools v0.1.0 diff --git a/vendor/github.com/charithe/durationcheck/go.sum b/vendor/github.com/charithe/durationcheck/go.sum new file mode 100644 index 000000000..21d696a65 --- /dev/null +++ b/vendor/github.com/charithe/durationcheck/go.sum @@ -0,0 +1,26 @@ +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/chavacava/garif/.gitignore b/vendor/github.com/chavacava/garif/.gitignore new file mode 100644 index 000000000..5dee1052c --- /dev/null +++ b/vendor/github.com/chavacava/garif/.gitignore @@ -0,0 +1,3 @@ +*.test +*.out +.devcontainer/ \ No newline at end of file diff --git a/vendor/github.com/chavacava/garif/LICENSE b/vendor/github.com/chavacava/garif/LICENSE new file mode 100644 index 000000000..2bba73fb7 --- /dev/null +++ b/vendor/github.com/chavacava/garif/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Salvador Cavadini + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/chavacava/garif/README.md b/vendor/github.com/chavacava/garif/README.md new file mode 100644 index 000000000..6a19c6147 --- /dev/null +++ b/vendor/github.com/chavacava/garif/README.md @@ -0,0 +1,52 @@ +# garif + +A GO package to create and manipulate SARIF logs. + +SARIF, from _Static Analysis Results Interchange Format_, is a standard JSON-based format for the output of static analysis tools defined and promoted by [OASIS](https://www.oasis-open.org/). + +Current supported version of the standard is [SARIF-v2.1.0](https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html +). + +## Usage + +The package provides access to every element of the SARIF model, therefore you are free to manipulate it at every detail. + +The package also provides constructors functions (`New...`) and decorators methods (`With...`) that simplify the creation of SARIF files for common use cases. + +Using these constructors and decorators we can easily create the example SARIF file of the [Microsoft SARIF pages](https://github.com/microsoft/sarif-tutorials/blob/master/docs/1-Introduction.md) + + +```go +import to `github.com/chavacava/garif` + +// ... + +rule := garif.NewRule("no-unused-vars"). + WithHelpUri("https://eslint.org/docs/rules/no-unused-vars"). + WithShortDescription("disallow unused variables"). + WithProperties("category", "Variables") + +driver := garif.NewDriver("ESLint"). + WithInformationUri("https://eslint.org"). + WithRules(rule) + +run := garif.NewRun(NewTool(driver)). + WithArtifactsURIs("file:///C:/dev/sarif/sarif-tutorials/samples/Introduction/simple-example.js") + +run.WithResult(rule.Id, "'x' is assigned a value but never used.", "file:///C:/dev/sarif/sarif-tutorials/samples/Introduction/simple-example.js", 1, 5) + +logFile := garif.NewLogFile([]*Run{run}, Version210) + +logFile.Write(os.Stdout) +``` + +## Why this package? +This package was initiated during my works on adding to [`revive`](https://github.com/mgechev/revive) a SARIF output formatter. +I've tried to use [go-sarif](https://github.com/owenrumney/go-sarif) by [Owen Rumney](https://github.com/owenrumney) but it is too focused in the use case of the static analyzer [tfsec](https://tfsec.dev) so I've decided to create a package flexible enough to generate SARIF files in broader cases. + +## More information about SARIF +For more information about SARIF, you can visit the [Oasis Open](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=sarif) site. + + +## Contributing +Of course, contributions are welcome! \ No newline at end of file diff --git a/vendor/github.com/chavacava/garif/constructors.go b/vendor/github.com/chavacava/garif/constructors.go new file mode 100644 index 000000000..8910e396e --- /dev/null +++ b/vendor/github.com/chavacava/garif/constructors.go @@ -0,0 +1,338 @@ +package garif + +// NewAddress creates a valid Address +func NewAddress() *Address { + return &Address{} +} + +// NewArtifact creates a valid Artifact +func NewArtifact() *Artifact { + return &Artifact{} +} + +// NewArtifactChange creates a valid ArtifactChange +func NewArtifactChange(location *ArtifactLocation, replacements ...*Replacement) *ArtifactChange { + return &ArtifactChange{ + ArtifactLocation: location, + Replacements: replacements, + } +} + +// NewArtifactContent creates a valid ArtifactContent +func NewArtifactContent() *ArtifactContent { + return &ArtifactContent{} +} + +// NewArtifactLocation creates a valid ArtifactLocation +func NewArtifactLocation() *ArtifactLocation { + return &ArtifactLocation{} +} + +// NewAttachment creates a valid Attachment +func NewAttachment(location *ArtifactLocation) *Attachment { + return &Attachment{ArtifactLocation: location} +} + +// NewCodeFlow creates a valid CodeFlow +func NewCodeFlow(threadFlows ...*ThreadFlow) *CodeFlow { + return &CodeFlow{ThreadFlows: threadFlows} +} + +// NewConfigurationOverride creates a valid ConfigurationOverride +func NewConfigurationOverride(configuration *ReportingConfiguration, descriptor *ReportingDescriptorReference) *ConfigurationOverride { + return &ConfigurationOverride{ + Configuration: configuration, + Descriptor: descriptor, + } +} + +// NewConversion creates a valid Conversion +func NewConversion(tool *Tool) *Conversion { + return &Conversion{Tool: tool} +} + +// NewEdge creates a valid Edge +func NewEdge(id, sourceNodeId, targetNodeId string) *Edge { + return &Edge{ + Id: id, + SourceNodeId: sourceNodeId, + TargetNodeId: targetNodeId, + } +} + +// NewEdgeTraversal creates a valid EdgeTraversal +func NewEdgeTraversal(edgeId string) *EdgeTraversal { + return &EdgeTraversal{ + EdgeId: edgeId, + } +} + +// NewException creates a valid Exception +func NewException() *Exception { + return &Exception{} +} + +// NewExternalProperties creates a valid ExternalProperties +func NewExternalProperties() *ExternalProperties { + return &ExternalProperties{} +} + +// NewExternalPropertyFileReference creates a valid ExternalPropertyFileReference +func NewExternalPropertyFileReference() *ExternalPropertyFileReference { + return &ExternalPropertyFileReference{} +} + +// NewExternalPropertyFileReferences creates a valid ExternalPropertyFileReferences +func NewExternalPropertyFileReferences() *ExternalPropertyFileReferences { + return &ExternalPropertyFileReferences{} +} + +// NewFix creates a valid Fix +func NewFix(artifactChanges ...*ArtifactChange) *Fix { + return &Fix{ + ArtifactChanges: artifactChanges, + } +} + +// NewGraph creates a valid Graph +func NewGraph() *Graph { + return &Graph{} +} + +// NewGraphTraversal creates a valid GraphTraversal +func NewGraphTraversal() *GraphTraversal { + return &GraphTraversal{} +} + +// NewInvocation creates a valid Invocation +func NewInvocation(executionSuccessful bool) *Invocation { + return &Invocation{ + ExecutionSuccessful: executionSuccessful, + } +} + +// NewLocation creates a valid Location +func NewLocation() *Location { + return &Location{} +} + +// NewLocationRelationship creates a valid LocationRelationship +func NewLocationRelationship(target int) *LocationRelationship { + return &LocationRelationship{ + Target: target, + } +} + +type LogFileVersion string + +const Version210 LogFileVersion = "2.1.0" + +// NewLogFile creates a valid LogFile +func NewLogFile(runs []*Run, version LogFileVersion) *LogFile { + return &LogFile{ + Runs: runs, + Version: version, + } +} + +// NewLogicalLocation creates a valid LogicalLocation +func NewLogicalLocation() *LogicalLocation { + return &LogicalLocation{} +} + +// NewMessage creates a valid Message +func NewMessage() *Message { + return &Message{} +} + +// NewMessageFromText creates a valid Message with the given text +func NewMessageFromText(text string) *Message { + return &Message{ + Text: text, + } +} + +// NewMultiformatMessageString creates a valid MultiformatMessageString +func NewMultiformatMessageString(text string) *MultiformatMessageString { + return &MultiformatMessageString{ + Text: text, + } +} + +// NewNode creates a valid Node +func NewNode(id string) *Node { + return &Node{ + Id: id, + } +} + +// NewNotification creates a valid Notification +func NewNotification(message *Message) *Notification { + return &Notification{ + Message: message, + } +} + +// NewPhysicalLocation creates a valid PhysicalLocation +func NewPhysicalLocation() *PhysicalLocation { + return &PhysicalLocation{} +} + +// NewPropertyBag creates a valid PropertyBag +func NewPropertyBag() *PropertyBag { + return &PropertyBag{} +} + +// NewRectangle creates a valid Rectangle +func NewRectangle() *Rectangle { + return &Rectangle{} +} + +// NewRegion creates a valid Region +func NewRegion() *Region { + return &Region{} +} + +// NewReplacement creates a valid Replacement +func NewReplacement(deletedRegion *Region) *Replacement { + return &Replacement{ + DeletedRegion: deletedRegion, + } +} + +// NewReportingConfiguration creates a valid ReportingConfiguration +func NewReportingConfiguration() *ReportingConfiguration { + return &ReportingConfiguration{} +} + +// NewReportingDescriptor creates a valid ReportingDescriptor +func NewReportingDescriptor(id string) *ReportingDescriptor { + return &ReportingDescriptor{ + Id: id, + } +} + +// NewRule is an alias for NewReportingDescriptor +func NewRule(id string) *ReportingDescriptor { + return NewReportingDescriptor(id) +} + +// NewReportingDescriptorReference creates a valid ReportingDescriptorReference +func NewReportingDescriptorReference() *ReportingDescriptorReference { + return &ReportingDescriptorReference{} +} + +// NewReportingDescriptorRelationship creates a valid ReportingDescriptorRelationship +func NewReportingDescriptorRelationship(target *ReportingDescriptorReference) *ReportingDescriptorRelationship { + return &ReportingDescriptorRelationship{ + Target: target, + } +} + +// NewResult creates a valid Result +func NewResult(message *Message) *Result { + return &Result{ + Message: message, + } +} + +// NewResultProvenance creates a valid ResultProvenance +func NewResultProvenance() *ResultProvenance { + return &ResultProvenance{} +} + +// NewRun creates a valid Run +func NewRun(tool *Tool) *Run { + return &Run{ + Tool: tool, + } +} + +// NewRunAutomationDetails creates a valid RunAutomationDetails +func NewRunAutomationDetails() *RunAutomationDetails { + return &RunAutomationDetails{} +} + +// New creates a valid +func NewSpecialLocations() *SpecialLocations { + return &SpecialLocations{} +} + +// NewStack creates a valid Stack +func NewStack(frames ...*StackFrame) *Stack { + return &Stack{ + Frames: frames, + } +} + +// NewStackFrame creates a valid StackFrame +func NewStackFrame() *StackFrame { + return &StackFrame{} +} + +// NewSuppression creates a valid Suppression +func NewSuppression(kind string) *Suppression { + return &Suppression{ + Kind: kind, + } +} + +// NewThreadFlow creates a valid ThreadFlow +func NewThreadFlow(locations []*ThreadFlowLocation) *ThreadFlow { + return &ThreadFlow{ + Locations: locations, + } +} + +// NewThreadFlowLocation creates a valid ThreadFlowLocation +func NewThreadFlowLocation() *ThreadFlowLocation { + return &ThreadFlowLocation{} +} + +// NewTool creates a valid Tool +func NewTool(driver *ToolComponent) *Tool { + return &Tool{ + Driver: driver, + } +} + +// NewToolComponent creates a valid ToolComponent +func NewToolComponent(name string) *ToolComponent { + return &ToolComponent{ + Name: name, + } +} + +// NewDriver is an alias for NewToolComponent +func NewDriver(name string) *ToolComponent { + return NewToolComponent(name) +} + +// NewToolComponentReference creates a valid ToolComponentReference +func NewToolComponentReference() *ToolComponentReference { + return &ToolComponentReference{} +} + +// NewTranslationMetadata creates a valid TranslationMetadata +func NewTranslationMetadata(name string) *TranslationMetadata { + return &TranslationMetadata{ + Name: name, + } +} + +// NewVersionControlDetails creates a valid VersionControlDetails +func NewVersionControlDetails(repositoryUri string) *VersionControlDetails { + return &VersionControlDetails{ + RepositoryUri: repositoryUri, + } +} + +// NewWebRequest creates a valid WebRequest +func NewWebRequest() *WebRequest { + return &WebRequest{} +} + +// NewWebResponse creates a valid WebResponse +func NewWebResponse() *WebResponse { + return &WebResponse{} +} diff --git a/vendor/github.com/chavacava/garif/decorators.go b/vendor/github.com/chavacava/garif/decorators.go new file mode 100644 index 000000000..00b599fb8 --- /dev/null +++ b/vendor/github.com/chavacava/garif/decorators.go @@ -0,0 +1,94 @@ +package garif + +// WithLineColumn sets a physical location with the given line and column +func (l *Location) WithLineColumn(line, column int) *Location { + if l.PhysicalLocation == nil { + l.PhysicalLocation = NewPhysicalLocation() + } + + l.PhysicalLocation.Region = NewRegion() + l.PhysicalLocation.Region.StartLine = line + l.PhysicalLocation.Region.StartColumn = column + + return l +} + +// WithURI sets a physical location with the given URI +func (l *Location) WithURI(uri string) *Location { + if l.PhysicalLocation == nil { + l.PhysicalLocation = NewPhysicalLocation() + } + + l.PhysicalLocation.ArtifactLocation = NewArtifactLocation() + l.PhysicalLocation.ArtifactLocation.Uri = uri + + return l +} + +// WithKeyValue sets (overwrites) the value of the given key +func (b PropertyBag) WithKeyValue(key string, value interface{}) PropertyBag { + b[key] = value + return b +} + +// WithHelpUri sets the help URI for this ReportingDescriptor +func (r *ReportingDescriptor) WithHelpUri(uri string) *ReportingDescriptor { + r.HelpUri = uri + return r +} + +// WithProperties adds the key & value to the properties of this ReportingDescriptor +func (r *ReportingDescriptor) WithProperties(key string, value interface{}) *ReportingDescriptor { + if r.Properties == nil { + r.Properties = NewPropertyBag() + } + + r.Properties.WithKeyValue(key, value) + + return r +} + +// WithArtifactsURIs adds the given URI as artifacts of this Run +func (r *Run) WithArtifactsURIs(uris ...string) *Run { + if r.Artifacts == nil { + r.Artifacts = []*Artifact{} + } + + for _, uri := range uris { + a := NewArtifact() + a.Location = NewArtifactLocation() + a.Location.Uri = uri + r.Artifacts = append(r.Artifacts, a) + } + + return r +} + +// WithResult adds a result to this Run +func (r *Run) WithResult(ruleId string, message string, uri string, line int, column int) *Run { + if r.Results == nil { + r.Results = []*Result{} + } + + msg := NewMessage() + msg.Text = message + result := NewResult(msg) + location := NewLocation().WithURI(uri).WithLineColumn(line, column) + + result.Locations = append(result.Locations, location) + result.RuleId = ruleId + r.Results = append(r.Results, result) + return r +} + +// WithInformationUri sets the information URI +func (t *ToolComponent) WithInformationUri(uri string) *ToolComponent { + t.InformationUri = uri + return t +} + +// WithRules sets (overwrites) the rules +func (t *ToolComponent) WithRules(rules ...*ReportingDescriptor) *ToolComponent { + t.Rules = rules + return t +} diff --git a/vendor/github.com/chavacava/garif/doc.go b/vendor/github.com/chavacava/garif/doc.go new file mode 100644 index 000000000..50fa6dfe5 --- /dev/null +++ b/vendor/github.com/chavacava/garif/doc.go @@ -0,0 +1,11 @@ +// Package garif defines all the GO structures required to model a SARIF log file. +// These structures were created using the JSON-schema sarif-schema-2.1.0.json of SARIF logfiles +// available at https://github.com/oasis-tcs/sarif-spec/tree/master/Schemata. +// +// The package provides constructors for all structures (see constructors.go) These constructors +// ensure that the returned structure instantiation is valid with respect to the JSON schema and +// should be used in place of plain structure instantiation. +// The root structure is LogFile. +// +// The package provides utility decorators for the most commonly used structures (see decorators.go) +package garif diff --git a/vendor/github.com/chavacava/garif/go.mod b/vendor/github.com/chavacava/garif/go.mod new file mode 100644 index 000000000..4c8a7f5ce --- /dev/null +++ b/vendor/github.com/chavacava/garif/go.mod @@ -0,0 +1,5 @@ +module github.com/chavacava/garif + +go 1.16 + +require github.com/stretchr/testify v1.7.0 diff --git a/vendor/github.com/chavacava/garif/go.sum b/vendor/github.com/chavacava/garif/go.sum new file mode 100644 index 000000000..acb88a48f --- /dev/null +++ b/vendor/github.com/chavacava/garif/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/chavacava/garif/io.go b/vendor/github.com/chavacava/garif/io.go new file mode 100644 index 000000000..ce5719c96 --- /dev/null +++ b/vendor/github.com/chavacava/garif/io.go @@ -0,0 +1,26 @@ +package garif + +import ( + "encoding/json" + "io" +) + +// Write writes the JSON +func (l *LogFile) Write(w io.Writer) error { + marshal, err := json.Marshal(l) + if err != nil { + return err + } + _, err = w.Write(marshal) + return err +} + +// PrettyWrite writes indented JSON +func (l *LogFile) PrettyWrite(w io.Writer) error { + marshal, err := json.MarshalIndent(l, "", " ") + if err != nil { + return err + } + _, err = w.Write(marshal) + return err +} diff --git a/vendor/github.com/chavacava/garif/models.go b/vendor/github.com/chavacava/garif/models.go new file mode 100644 index 000000000..3668436a3 --- /dev/null +++ b/vendor/github.com/chavacava/garif/models.go @@ -0,0 +1,1486 @@ +package garif + +// Address A physical or virtual address, or a range of addresses, in an 'addressable region' (memory or a binary file). +type Address struct { + + // The address expressed as a byte offset from the start of the addressable region. + AbsoluteAddress int `json:"absoluteAddress,omitempty"` + + // A human-readable fully qualified name that is associated with the address. + FullyQualifiedName string `json:"fullyQualifiedName,omitempty"` + + // The index within run.addresses of the cached object for this address. + Index int `json:"index,omitempty"` + + // An open-ended string that identifies the address kind. + // 'data', 'function', 'header','instruction', 'module', 'page', 'section', + // 'segment', 'stack', 'stackFrame', 'table' are well-known values. + Kind string `json:"kind,omitempty"` + + // The number of bytes in this range of addresses. + Length int `json:"length,omitempty"` + + // A name that is associated with the address, e.g., '.text'. + Name string `json:"name,omitempty"` + + // The byte offset of this address from the absolute or relative address of the parent object. + OffsetFromParent int `json:"offsetFromParent,omitempty"` + + // The index within run.addresses of the parent object. + ParentIndex int `json:"parentIndex,omitempty"` + + // Key/value pairs that provide additional information about the address. + Properties *PropertyBag `json:"properties,omitempty"` + + // The address expressed as a byte offset from the absolute address of the top-most parent object. + RelativeAddress int `json:"relativeAddress,omitempty"` +} + +// Artifact A single artifact. In some cases, this artifact might be nested within another artifact. +type Artifact struct { + + // The contents of the artifact. + Contents *ArtifactContent `json:"contents,omitempty"` + + // A short description of the artifact. + Description *Message `json:"description,omitempty"` + + // Specifies the encoding for an artifact object that refers to a text file. + Encoding string `json:"encoding,omitempty"` + + // A dictionary, each of whose keys is the name of a hash function and each of whose values is + // the hashed value of the artifact produced by the specified hash function. + Hashes map[string]string `json:"hashes,omitempty"` + + // The Coordinated Universal Time (UTC) date and time at which the artifact was most recently modified. + // See "Date/time properties" in the SARIF spec for the required format. + LastModifiedTimeUtc string `json:"lastModifiedTimeUtc,omitempty"` + + // The length of the artifact in bytes. + Length int `json:"length,omitempty"` + + // The location of the artifact. + Location *ArtifactLocation `json:"location,omitempty"` + + // The MIME type (RFC 2045) of the artifact. + MimeType string `json:"mimeType,omitempty"` + + // The offset in bytes of the artifact within its containing artifact. + Offset int `json:"offset,omitempty"` + + // Identifies the index of the immediate parent of the artifact, if this artifact is nested. + ParentIndex int `json:"parentIndex,omitempty"` + + // Key/value pairs that provide additional information about the artifact. + Properties *PropertyBag `json:"properties,omitempty"` + + // The role or roles played by the artifact in the analysis. + Roles []interface{} `json:"roles,omitempty"` + + // Specifies the source language for any artifact object that refers to a text file that contains source code. + SourceLanguage string `json:"sourceLanguage,omitempty"` +} + +// ArtifactChange A change to a single artifact. +type ArtifactChange struct { + + // The location of the artifact to change. + ArtifactLocation *ArtifactLocation `json:"artifactLocation"` + + // Key/value pairs that provide additional information about the change. + Properties *PropertyBag `json:"properties,omitempty"` + + // An array of replacement objects, each of which represents the replacement of a single region in a + // single artifact specified by 'artifactLocation'. + Replacements []*Replacement `json:"replacements"` +} + +// ArtifactContent Represents the contents of an artifact. +type ArtifactContent struct { + + // MIME Base64-encoded content from a binary artifact, or from a text artifact in its original encoding. + Binary string `json:"binary,omitempty"` + + // Key/value pairs that provide additional information about the artifact content. + Properties *PropertyBag `json:"properties,omitempty"` + + // An alternate rendered representation of the artifact (e.g., a decompiled representation of a binary region). + Rendered *MultiformatMessageString `json:"rendered,omitempty"` + + // UTF-8-encoded content from a text artifact. + Text string `json:"text,omitempty"` +} + +// ArtifactLocation Specifies the location of an artifact. +type ArtifactLocation struct { + + // A short description of the artifact location. + Description *Message `json:"description,omitempty"` + + // The index within the run artifacts array of the artifact object associated with the artifact location. + Index int `json:"index,omitempty"` + + // Key/value pairs that provide additional information about the artifact location. + Properties *PropertyBag `json:"properties,omitempty"` + + // A string containing a valid relative or absolute URI. + Uri string `json:"uri,omitempty"` + + // A string which indirectly specifies the absolute URI with respect to which a relative URI in the "uri" property is interpreted. + UriBaseId string `json:"uriBaseId,omitempty"` +} + +// Attachment An artifact relevant to a result. +type Attachment struct { + + // The location of the attachment. + ArtifactLocation *ArtifactLocation `json:"artifactLocation"` + + // A message describing the role played by the attachment. + Description *Message `json:"description,omitempty"` + + // Key/value pairs that provide additional information about the attachment. + Properties *PropertyBag `json:"properties,omitempty"` + + // An array of rectangles specifying areas of interest within the image. + Rectangles []*Rectangle `json:"rectangles,omitempty"` + + // An array of regions of interest within the attachment. + Regions []*Region `json:"regions,omitempty"` +} + +// CodeFlow A set of threadFlows which together describe a pattern of code execution relevant to detecting a result. +type CodeFlow struct { + + // A message relevant to the code flow. + Message *Message `json:"message,omitempty"` + + // Key/value pairs that provide additional information about the code flow. + Properties *PropertyBag `json:"properties,omitempty"` + + // An array of one or more unique threadFlow objects, each of which describes the progress of a program + // through a thread of execution. + ThreadFlows []*ThreadFlow `json:"threadFlows"` +} + +// ConfigurationOverride Information about how a specific rule or notification was reconfigured at runtime. +type ConfigurationOverride struct { + + // Specifies how the rule or notification was configured during the scan. + Configuration *ReportingConfiguration `json:"configuration"` + + // A reference used to locate the descriptor whose configuration was overridden. + Descriptor *ReportingDescriptorReference `json:"descriptor"` + + // Key/value pairs that provide additional information about the configuration override. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// Conversion Describes how a converter transformed the output of a static analysis tool from the analysis tool's native output format into the SARIF format. +type Conversion struct { + + // The locations of the analysis tool's per-run log files. + AnalysisToolLogFiles []*ArtifactLocation `json:"analysisToolLogFiles,omitempty"` + + // An invocation object that describes the invocation of the converter. + Invocation *Invocation `json:"invocation,omitempty"` + + // Key/value pairs that provide additional information about the conversion. + Properties *PropertyBag `json:"properties,omitempty"` + + // A tool object that describes the converter. + Tool *Tool `json:"tool"` +} + +// Edge Represents a directed edge in a graph. +type Edge struct { + + // A string that uniquely identifies the edge within its graph. + Id string `json:"id"` + + // A short description of the edge. + Label *Message `json:"label,omitempty"` + + // Key/value pairs that provide additional information about the edge. + Properties *PropertyBag `json:"properties,omitempty"` + + // Identifies the source node (the node at which the edge starts). + SourceNodeId string `json:"sourceNodeId"` + + // Identifies the target node (the node at which the edge ends). + TargetNodeId string `json:"targetNodeId"` +} + +// EdgeTraversal Represents the traversal of a single edge during a graph traversal. +type EdgeTraversal struct { + + // Identifies the edge being traversed. + EdgeId string `json:"edgeId"` + + // The values of relevant expressions after the edge has been traversed. + FinalState map[string]*MultiformatMessageString `json:"finalState,omitempty"` + + // A message to display to the user as the edge is traversed. + Message *Message `json:"message,omitempty"` + + // Key/value pairs that provide additional information about the edge traversal. + Properties *PropertyBag `json:"properties,omitempty"` + + // The number of edge traversals necessary to return from a nested graph. + StepOverEdgeCount int `json:"stepOverEdgeCount,omitempty"` +} + +// Exception Describes a runtime exception encountered during the execution of an analysis tool. +type Exception struct { + + // An array of exception objects each of which is considered a cause of this exception. + InnerExceptions []*Exception `json:"innerExceptions,omitempty"` + + // A string that identifies the kind of exception, for example, the fully qualified type name of an object that was thrown, or the symbolic name of a signal. + Kind string `json:"kind,omitempty"` + + // A message that describes the exception. + Message string `json:"message,omitempty"` + + // Key/value pairs that provide additional information about the exception. + Properties *PropertyBag `json:"properties,omitempty"` + + // The sequence of function calls leading to the exception. + Stack *Stack `json:"stack,omitempty"` +} + +// ExternalProperties The top-level element of an external property file. +type ExternalProperties struct { + + // Addresses that will be merged with a separate run. + Addresses []*Address `json:"addresses,omitempty"` + + // An array of artifact objects that will be merged with a separate run. + Artifacts []*Artifact `json:"artifacts,omitempty"` + + // A conversion object that will be merged with a separate run. + Conversion *Conversion `json:"conversion,omitempty"` + + // The analysis tool object that will be merged with a separate run. + Driver *ToolComponent `json:"driver,omitempty"` + + // Tool extensions that will be merged with a separate run. + Extensions []*ToolComponent `json:"extensions,omitempty"` + + // Key/value pairs that provide additional information that will be merged with a separate run. + ExternalizedProperties *PropertyBag `json:"externalizedProperties,omitempty"` + + // An array of graph objects that will be merged with a separate run. + Graphs []*Graph `json:"graphs,omitempty"` + + // A stable, unique identifer for this external properties object, in the form of a GUID. + Guid string `json:"guid,omitempty"` + + // Describes the invocation of the analysis tool that will be merged with a separate run. + Invocations []*Invocation `json:"invocations,omitempty"` + + // An array of logical locations such as namespaces, types or functions that will be merged with a separate run. + LogicalLocations []*LogicalLocation `json:"logicalLocations,omitempty"` + + // Tool policies that will be merged with a separate run. + Policies []*ToolComponent `json:"policies,omitempty"` + + // Key/value pairs that provide additional information about the external properties. + Properties *PropertyBag `json:"properties,omitempty"` + + // An array of result objects that will be merged with a separate run. + Results []*Result `json:"results,omitempty"` + + // A stable, unique identifer for the run associated with this external properties object, in the form of a GUID. + RunGuid string `json:"runGuid,omitempty"` + + // The URI of the JSON schema corresponding to the version of the external property file format. + Schema string `json:"schema,omitempty"` + + // Tool taxonomies that will be merged with a separate run. + Taxonomies []*ToolComponent `json:"taxonomies,omitempty"` + + // An array of threadFlowLocation objects that will be merged with a separate run. + ThreadFlowLocations []*ThreadFlowLocation `json:"threadFlowLocations,omitempty"` + + // Tool translations that will be merged with a separate run. + Translations []*ToolComponent `json:"translations,omitempty"` + + // The SARIF format version of this external properties object. + Version interface{} `json:"version,omitempty"` + + // Requests that will be merged with a separate run. + WebRequests []*WebRequest `json:"webRequests,omitempty"` + + // Responses that will be merged with a separate run. + WebResponses []*WebResponse `json:"webResponses,omitempty"` +} + +// ExternalPropertyFileReference Contains information that enables a SARIF consumer to locate the external property file that contains the value of an externalized property associated with the run. +type ExternalPropertyFileReference struct { + + // A stable, unique identifer for the external property file in the form of a GUID. + Guid string `json:"guid,omitempty"` + + // A non-negative integer specifying the number of items contained in the external property file. + ItemCount int `json:"itemCount,omitempty"` + + // The location of the external property file. + Location *ArtifactLocation `json:"location,omitempty"` + + // Key/value pairs that provide additional information about the external property file. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// ExternalPropertyFileReferences References to external property files that should be inlined with the content of a root log file. +type ExternalPropertyFileReferences struct { + + // An array of external property files containing run.addresses arrays to be merged with the root log file. + Addresses []*ExternalPropertyFileReference `json:"addresses,omitempty"` + + // An array of external property files containing run.artifacts arrays to be merged with the root log file. + Artifacts []*ExternalPropertyFileReference `json:"artifacts,omitempty"` + + // An external property file containing a run.conversion object to be merged with the root log file. + Conversion *ExternalPropertyFileReference `json:"conversion,omitempty"` + + // An external property file containing a run.driver object to be merged with the root log file. + Driver *ExternalPropertyFileReference `json:"driver,omitempty"` + + // An array of external property files containing run.extensions arrays to be merged with the root log file. + Extensions []*ExternalPropertyFileReference `json:"extensions,omitempty"` + + // An external property file containing a run.properties object to be merged with the root log file. + ExternalizedProperties *ExternalPropertyFileReference `json:"externalizedProperties,omitempty"` + + // An array of external property files containing a run.graphs object to be merged with the root log file. + Graphs []*ExternalPropertyFileReference `json:"graphs,omitempty"` + + // An array of external property files containing run.invocations arrays to be merged with the root log file. + Invocations []*ExternalPropertyFileReference `json:"invocations,omitempty"` + + // An array of external property files containing run.logicalLocations arrays to be merged with the root log file. + LogicalLocations []*ExternalPropertyFileReference `json:"logicalLocations,omitempty"` + + // An array of external property files containing run.policies arrays to be merged with the root log file. + Policies []*ExternalPropertyFileReference `json:"policies,omitempty"` + + // Key/value pairs that provide additional information about the external property files. + Properties *PropertyBag `json:"properties,omitempty"` + + // An array of external property files containing run.results arrays to be merged with the root log file. + Results []*ExternalPropertyFileReference `json:"results,omitempty"` + + // An array of external property files containing run.taxonomies arrays to be merged with the root log file. + Taxonomies []*ExternalPropertyFileReference `json:"taxonomies,omitempty"` + + // An array of external property files containing run.threadFlowLocations arrays to be merged with the root log file. + ThreadFlowLocations []*ExternalPropertyFileReference `json:"threadFlowLocations,omitempty"` + + // An array of external property files containing run.translations arrays to be merged with the root log file. + Translations []*ExternalPropertyFileReference `json:"translations,omitempty"` + + // An array of external property files containing run.requests arrays to be merged with the root log file. + WebRequests []*ExternalPropertyFileReference `json:"webRequests,omitempty"` + + // An array of external property files containing run.responses arrays to be merged with the root log file. + WebResponses []*ExternalPropertyFileReference `json:"webResponses,omitempty"` +} + +// Fix A proposed fix for the problem represented by a result object. +// A fix specifies a set of artifacts to modify. For each artifact, +// it specifies a set of bytes to remove, and provides a set of new bytes to replace them. +type Fix struct { + + // One or more artifact changes that comprise a fix for a result. + ArtifactChanges []*ArtifactChange `json:"artifactChanges"` + + // A message that describes the proposed fix, enabling viewers to present the proposed change to an end user. + Description *Message `json:"description,omitempty"` + + // Key/value pairs that provide additional information about the fix. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// Graph A network of nodes and directed edges that describes some aspect of the +// structure of the code (for example, a call graph). +type Graph struct { + + // A description of the graph. + Description *Message `json:"description,omitempty"` + + // An array of edge objects representing the edges of the graph. + Edges []*Edge `json:"edges,omitempty"` + + // An array of node objects representing the nodes of the graph. + Nodes []*Node `json:"nodes,omitempty"` + + // Key/value pairs that provide additional information about the graph. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// GraphTraversal Represents a path through a graph. +type GraphTraversal struct { + + // A description of this graph traversal. + Description *Message `json:"description,omitempty"` + + // The sequences of edges traversed by this graph traversal. + EdgeTraversals []*EdgeTraversal `json:"edgeTraversals,omitempty"` + + // Values of relevant expressions at the start of the graph traversal that remain constant for the graph traversal. + ImmutableState map[string]*MultiformatMessageString `json:"immutableState,omitempty"` + + // Values of relevant expressions at the start of the graph traversal that may change during graph traversal. + InitialState map[string]*MultiformatMessageString `json:"initialState,omitempty"` + + // Key/value pairs that provide additional information about the graph traversal. + Properties *PropertyBag `json:"properties,omitempty"` + + // The index within the result.graphs to be associated with the result. + ResultGraphIndex int `json:"resultGraphIndex,omitempty"` + + // The index within the run.graphs to be associated with the result. + RunGraphIndex int `json:"runGraphIndex,omitempty"` +} + +// Invocation The runtime environment of the analysis tool run. +type Invocation struct { + + // The account under which the invocation occurred. + Account string `json:"account,omitempty"` + + // An array of strings, containing in order the command line arguments passed to the tool from the operating system. + Arguments []string `json:"arguments,omitempty"` + + // The command line used to invoke the tool. + CommandLine string `json:"commandLine,omitempty"` + + // The Coordinated Universal Time (UTC) date and time at which the invocation ended. See "Date/time properties" in the SARIF spec for the required format. + EndTimeUtc string `json:"endTimeUtc,omitempty"` + + // The environment variables associated with the analysis tool process, expressed as key/value pairs. + EnvironmentVariables map[string]string `json:"environmentVariables,omitempty"` + + // An absolute URI specifying the location of the executable that was invoked. + ExecutableLocation *ArtifactLocation `json:"executableLocation,omitempty"` + + // Specifies whether the tool's execution completed successfully. + ExecutionSuccessful bool `json:"executionSuccessful"` + + // The process exit code. + ExitCode int `json:"exitCode,omitempty"` + + // The reason for the process exit. + ExitCodeDescription string `json:"exitCodeDescription,omitempty"` + + // The name of the signal that caused the process to exit. + ExitSignalName string `json:"exitSignalName,omitempty"` + + // The numeric value of the signal that caused the process to exit. + ExitSignalNumber int `json:"exitSignalNumber,omitempty"` + + // The machine on which the invocation occurred. + Machine string `json:"machine,omitempty"` + + // An array of configurationOverride objects that describe notifications related runtime overrides. + NotificationConfigurationOverrides []*ConfigurationOverride `json:"notificationConfigurationOverrides,omitempty"` + + // The id of the process in which the invocation occurred. + ProcessId int `json:"processId,omitempty"` + + // The reason given by the operating system that the process failed to start. + ProcessStartFailureMessage string `json:"processStartFailureMessage,omitempty"` + + // Key/value pairs that provide additional information about the invocation. + Properties *PropertyBag `json:"properties,omitempty"` + + // The locations of any response files specified on the tool's command line. + ResponseFiles []*ArtifactLocation `json:"responseFiles,omitempty"` + + // An array of configurationOverride objects that describe rules related runtime overrides. + RuleConfigurationOverrides []*ConfigurationOverride `json:"ruleConfigurationOverrides,omitempty"` + + // The Coordinated Universal Time (UTC) date and time at which the invocation started. See "Date/time properties" in the SARIF spec for the required format. + StartTimeUtc string `json:"startTimeUtc,omitempty"` + + // A file containing the standard error stream from the process that was invoked. + Stderr *ArtifactLocation `json:"stderr,omitempty"` + + // A file containing the standard input stream to the process that was invoked. + Stdin *ArtifactLocation `json:"stdin,omitempty"` + + // A file containing the standard output stream from the process that was invoked. + Stdout *ArtifactLocation `json:"stdout,omitempty"` + + // A file containing the interleaved standard output and standard error stream from the process that was invoked. + StdoutStderr *ArtifactLocation `json:"stdoutStderr,omitempty"` + + // A list of conditions detected by the tool that are relevant to the tool's configuration. + ToolConfigurationNotifications []*Notification `json:"toolConfigurationNotifications,omitempty"` + + // A list of runtime conditions detected by the tool during the analysis. + ToolExecutionNotifications []*Notification `json:"toolExecutionNotifications,omitempty"` + + // The working directory for the invocation. + WorkingDirectory *ArtifactLocation `json:"workingDirectory,omitempty"` +} + +// Location A location within a programming artifact. +type Location struct { + + // A set of regions relevant to the location. + Annotations []*Region `json:"annotations,omitempty"` + + // Value that distinguishes this location from all other locations within a single result object. + Id int `json:"id,omitempty"` + + // The logical locations associated with the result. + LogicalLocations []*LogicalLocation `json:"logicalLocations,omitempty"` + + // A message relevant to the location. + Message *Message `json:"message,omitempty"` + + // Identifies the artifact and region. + PhysicalLocation *PhysicalLocation `json:"physicalLocation,omitempty"` + + // Key/value pairs that provide additional information about the location. + Properties *PropertyBag `json:"properties,omitempty"` + + // An array of objects that describe relationships between this location and others. + Relationships []*LocationRelationship `json:"relationships,omitempty"` +} + +// LocationRelationship Information about the relation of one location to another. +type LocationRelationship struct { + + // A description of the location relationship. + Description *Message `json:"description,omitempty"` + + // A set of distinct strings that categorize the relationship. Well-known kinds include 'includes', 'isIncludedBy' and 'relevant'. + Kinds []string `json:"kinds,omitempty"` + + // Key/value pairs that provide additional information about the location relationship. + Properties *PropertyBag `json:"properties,omitempty"` + + // A reference to the related location. + Target int `json:"target"` +} + +// LogFile Static Analysis Results Format (SARIF) Version 2.1.0 JSON Schema. +type LogFile struct { + + // References to external property files that share data between runs. + InlineExternalProperties []*ExternalProperties `json:"inlineExternalProperties,omitempty"` + + // Key/value pairs that provide additional information about the log file. + Properties *PropertyBag `json:"properties,omitempty"` + + // The set of runs contained in this log file. + Runs []*Run `json:"runs"` + + // The URI of the JSON schema corresponding to the version. + Schema string `json:"$schema,omitempty"` + + // The SARIF format version of this log file. + Version interface{} `json:"version"` +} + +// LogicalLocation A logical location of a construct that produced a result. +type LogicalLocation struct { + + // The machine-readable name for the logical location, such as a mangled function name provided by a C++ compiler that encodes calling convention, return type and other details along with the function name. + DecoratedName string `json:"decoratedName,omitempty"` + + // The human-readable fully qualified name of the logical location. + FullyQualifiedName string `json:"fullyQualifiedName,omitempty"` + + // The index within the logical locations array. + Index int `json:"index,omitempty"` + + // The type of construct this logical location component refers to. Should be one of 'function', 'member', 'module', 'namespace', 'parameter', 'resource', 'returnType', 'type', 'variable', 'object', 'array', 'property', 'value', 'element', 'text', 'attribute', 'comment', 'declaration', 'dtd' or 'processingInstruction', if any of those accurately describe the construct. + Kind string `json:"kind,omitempty"` + + // Identifies the construct in which the result occurred. For example, this property might contain the name of a class or a method. + Name string `json:"name,omitempty"` + + // Identifies the index of the immediate parent of the construct in which the result was detected. For example, this property might point to a logical location that represents the namespace that holds a type. + ParentIndex int `json:"parentIndex,omitempty"` + + // Key/value pairs that provide additional information about the logical location. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// Message Encapsulates a message intended to be read by the end user. +type Message struct { + + // An array of strings to substitute into the message string. + Arguments []string `json:"arguments,omitempty"` + + // The identifier for this message. + Id string `json:"id,omitempty"` + + // A Markdown message string. + Markdown string `json:"markdown,omitempty"` + + // Key/value pairs that provide additional information about the message. + Properties *PropertyBag `json:"properties,omitempty"` + + // A plain text message string. + Text string `json:"text,omitempty"` +} + +// MultiformatMessageString A message string or message format string rendered in multiple formats. +type MultiformatMessageString struct { + + // A Markdown message string or format string. + Markdown string `json:"markdown,omitempty"` + + // Key/value pairs that provide additional information about the message. + Properties *PropertyBag `json:"properties,omitempty"` + + // A plain text message string or format string. + Text string `json:"text"` +} + +// Node Represents a node in a graph. +type Node struct { + + // Array of child nodes. + Children []*Node `json:"children,omitempty"` + + // A string that uniquely identifies the node within its graph. + Id string `json:"id"` + + // A short description of the node. + Label *Message `json:"label,omitempty"` + + // A code location associated with the node. + Location *Location `json:"location,omitempty"` + + // Key/value pairs that provide additional information about the node. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// Notification Describes a condition relevant to the tool itself, as opposed to being relevant to a target being analyzed by the tool. +type Notification struct { + + // A reference used to locate the rule descriptor associated with this notification. + AssociatedRule *ReportingDescriptorReference `json:"associatedRule,omitempty"` + + // A reference used to locate the descriptor relevant to this notification. + Descriptor *ReportingDescriptorReference `json:"descriptor,omitempty"` + + // The runtime exception, if any, relevant to this notification. + Exception *Exception `json:"exception,omitempty"` + + // A value specifying the severity level of the notification. + Level interface{} `json:"level,omitempty"` + + // The locations relevant to this notification. + Locations []*Location `json:"locations,omitempty"` + + // A message that describes the condition that was encountered. + Message *Message `json:"message"` + + // Key/value pairs that provide additional information about the notification. + Properties *PropertyBag `json:"properties,omitempty"` + + // The thread identifier of the code that generated the notification. + ThreadId int `json:"threadId,omitempty"` + + // The Coordinated Universal Time (UTC) date and time at which the analysis tool generated the notification. + TimeUtc string `json:"timeUtc,omitempty"` +} + +// PhysicalLocation A physical location relevant to a result. Specifies a reference to a programming artifact together with a range of bytes or characters within that artifact. +type PhysicalLocation struct { + + // The address of the location. + Address *Address `json:"address,omitempty"` + + // The location of the artifact. + ArtifactLocation *ArtifactLocation `json:"artifactLocation,omitempty"` + + // Specifies a portion of the artifact that encloses the region. Allows a viewer to display additional context around the region. + ContextRegion *Region `json:"contextRegion,omitempty"` + + // Key/value pairs that provide additional information about the physical location. + Properties *PropertyBag `json:"properties,omitempty"` + + // Specifies a portion of the artifact. + Region *Region `json:"region,omitempty"` +} + +type PropertyBag map[string]interface{} + +/* +// PropertyBag Key/value pairs that provide additional information about the object. +type PropertyBag struct { + AdditionalProperties map[string]interface{} `json:"-,omitempty"` + + // A set of distinct strings that provide additional information. + Tags []string `json:"tags,omitempty"` +} +*/ +// Rectangle An area within an image. +type Rectangle struct { + + // The Y coordinate of the bottom edge of the rectangle, measured in the image's natural units. + Bottom float64 `json:"bottom,omitempty"` + + // The X coordinate of the left edge of the rectangle, measured in the image's natural units. + Left float64 `json:"left,omitempty"` + + // A message relevant to the rectangle. + Message *Message `json:"message,omitempty"` + + // Key/value pairs that provide additional information about the rectangle. + Properties *PropertyBag `json:"properties,omitempty"` + + // The X coordinate of the right edge of the rectangle, measured in the image's natural units. + Right float64 `json:"right,omitempty"` + + // The Y coordinate of the top edge of the rectangle, measured in the image's natural units. + Top float64 `json:"top,omitempty"` +} + +// Region A region within an artifact where a result was detected. +type Region struct { + + // The length of the region in bytes. + ByteLength int `json:"byteLength,omitempty"` + + // The zero-based offset from the beginning of the artifact of the first byte in the region. + ByteOffset int `json:"byteOffset,omitempty"` + + // The length of the region in characters. + CharLength int `json:"charLength,omitempty"` + + // The zero-based offset from the beginning of the artifact of the first character in the region. + CharOffset int `json:"charOffset,omitempty"` + + // The column number of the character following the end of the region. + EndColumn int `json:"endColumn,omitempty"` + + // The line number of the last character in the region. + EndLine int `json:"endLine,omitempty"` + + // A message relevant to the region. + Message *Message `json:"message,omitempty"` + + // Key/value pairs that provide additional information about the region. + Properties *PropertyBag `json:"properties,omitempty"` + + // The portion of the artifact contents within the specified region. + Snippet *ArtifactContent `json:"snippet,omitempty"` + + // Specifies the source language, if any, of the portion of the artifact specified by the region object. + SourceLanguage string `json:"sourceLanguage,omitempty"` + + // The column number of the first character in the region. + StartColumn int `json:"startColumn,omitempty"` + + // The line number of the first character in the region. + StartLine int `json:"startLine,omitempty"` +} + +// Replacement The replacement of a single region of an artifact. +type Replacement struct { + + // The region of the artifact to delete. + DeletedRegion *Region `json:"deletedRegion"` + + // The content to insert at the location specified by the 'deletedRegion' property. + InsertedContent *ArtifactContent `json:"insertedContent,omitempty"` + + // Key/value pairs that provide additional information about the replacement. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// ReportingConfiguration Information about a rule or notification that can be configured at runtime. +type ReportingConfiguration struct { + + // Specifies whether the report may be produced during the scan. + Enabled bool `json:"enabled,omitempty"` + + // Specifies the failure level for the report. + Level interface{} `json:"level,omitempty"` + + // Contains configuration information specific to a report. + Parameters *PropertyBag `json:"parameters,omitempty"` + + // Key/value pairs that provide additional information about the reporting configuration. + Properties *PropertyBag `json:"properties,omitempty"` + + // Specifies the relative priority of the report. Used for analysis output only. + Rank float64 `json:"rank,omitempty"` +} + +// ReportingDescriptor Metadata that describes a specific report produced by the tool, as part of the analysis it provides or its runtime reporting. +type ReportingDescriptor struct { + + // Default reporting configuration information. + DefaultConfiguration *ReportingConfiguration `json:"defaultConfiguration,omitempty"` + + // An array of unique identifies in the form of a GUID by which this report was known in some previous version of the analysis tool. + DeprecatedGuids []string `json:"deprecatedGuids,omitempty"` + + // An array of stable, opaque identifiers by which this report was known in some previous version of the analysis tool. + DeprecatedIds []string `json:"deprecatedIds,omitempty"` + + // An array of readable identifiers by which this report was known in some previous version of the analysis tool. + DeprecatedNames []string `json:"deprecatedNames,omitempty"` + + // A description of the report. Should, as far as possible, provide details sufficient to enable resolution of any problem indicated by the result. + FullDescription *MultiformatMessageString `json:"fullDescription,omitempty"` + + // A unique identifer for the reporting descriptor in the form of a GUID. + Guid string `json:"guid,omitempty"` + + // Provides the primary documentation for the report, useful when there is no online documentation. + Help *MultiformatMessageString `json:"help,omitempty"` + + // A URI where the primary documentation for the report can be found. + HelpUri string `json:"helpUri,omitempty"` + + // A stable, opaque identifier for the report. + Id string `json:"id"` + + // A set of name/value pairs with arbitrary names. Each value is a multiformatMessageString object, which holds message strings in plain text and (optionally) Markdown format. The strings can include placeholders, which can be used to construct a message in combination with an arbitrary number of additional string arguments. + MessageStrings map[string]*MultiformatMessageString `json:"messageStrings,omitempty"` + + // A report identifier that is understandable to an end user. + Name string `json:"name,omitempty"` + + // Key/value pairs that provide additional information about the report. + Properties *PropertyBag `json:"properties,omitempty"` + + // An array of objects that describe relationships between this reporting descriptor and others. + Relationships []*ReportingDescriptorRelationship `json:"relationships,omitempty"` + + // A concise description of the report. Should be a single sentence that is understandable when visible space is limited to a single line of text. + ShortDescription *MultiformatMessageString `json:"shortDescription,omitempty"` +} + +// ReportingDescriptorReference Information about how to locate a relevant reporting descriptor. +type ReportingDescriptorReference struct { + + // A guid that uniquely identifies the descriptor. + Guid string `json:"guid,omitempty"` + + // The id of the descriptor. + Id string `json:"id,omitempty"` + + // The index into an array of descriptors in toolComponent.ruleDescriptors, toolComponent.notificationDescriptors, or toolComponent.taxonomyDescriptors, depending on context. + Index int `json:"index,omitempty"` + + // Key/value pairs that provide additional information about the reporting descriptor reference. + Properties *PropertyBag `json:"properties,omitempty"` + + // A reference used to locate the toolComponent associated with the descriptor. + ToolComponent *ToolComponentReference `json:"toolComponent,omitempty"` +} + +// ReportingDescriptorRelationship Information about the relation of one reporting descriptor to another. +type ReportingDescriptorRelationship struct { + + // A description of the reporting descriptor relationship. + Description *Message `json:"description,omitempty"` + + // A set of distinct strings that categorize the relationship. Well-known kinds include 'canPrecede', 'canFollow', 'willPrecede', 'willFollow', 'superset', 'subset', 'equal', 'disjoint', 'relevant', and 'incomparable'. + Kinds []string `json:"kinds,omitempty"` + + // Key/value pairs that provide additional information about the reporting descriptor reference. + Properties *PropertyBag `json:"properties,omitempty"` + + // A reference to the related reporting descriptor. + Target *ReportingDescriptorReference `json:"target"` +} + +// Result A result produced by an analysis tool. +type Result struct { + + // Identifies the artifact that the analysis tool was instructed to scan. This need not be the same as the artifact where the result actually occurred. + AnalysisTarget *ArtifactLocation `json:"analysisTarget,omitempty"` + + // A set of artifacts relevant to the result. + Attachments []*Attachment `json:"attachments,omitempty"` + + // The state of a result relative to a baseline of a previous run. + BaselineState interface{} `json:"baselineState,omitempty"` + + // An array of 'codeFlow' objects relevant to the result. + CodeFlows []*CodeFlow `json:"codeFlows,omitempty"` + + // A stable, unique identifier for the equivalence class of logically identical results to which this result belongs, in the form of a GUID. + CorrelationGuid string `json:"correlationGuid,omitempty"` + + // A set of strings each of which individually defines a stable, unique identity for the result. + Fingerprints map[string]string `json:"fingerprints,omitempty"` + + // An array of 'fix' objects, each of which represents a proposed fix to the problem indicated by the result. + Fixes []*Fix `json:"fixes,omitempty"` + + // An array of one or more unique 'graphTraversal' objects. + GraphTraversals []*GraphTraversal `json:"graphTraversals,omitempty"` + + // An array of zero or more unique graph objects associated with the result. + Graphs []*Graph `json:"graphs,omitempty"` + + // A stable, unique identifer for the result in the form of a GUID. + Guid string `json:"guid,omitempty"` + + // An absolute URI at which the result can be viewed. + HostedViewerUri string `json:"hostedViewerUri,omitempty"` + + // A value that categorizes results by evaluation state. + Kind interface{} `json:"kind,omitempty"` + + // A value specifying the severity level of the result. + Level interface{} `json:"level,omitempty"` + + // The set of locations where the result was detected. Specify only one location unless the problem indicated by the result can only be corrected by making a change at every specified location. + Locations []*Location `json:"locations,omitempty"` + + // A message that describes the result. The first sentence of the message only will be displayed when visible space is limited. + Message *Message `json:"message"` + + // A positive integer specifying the number of times this logically unique result was observed in this run. + OccurrenceCount int `json:"occurrenceCount,omitempty"` + + // A set of strings that contribute to the stable, unique identity of the result. + PartialFingerprints map[string]string `json:"partialFingerprints,omitempty"` + + // Key/value pairs that provide additional information about the result. + Properties *PropertyBag `json:"properties,omitempty"` + + // Information about how and when the result was detected. + Provenance *ResultProvenance `json:"provenance,omitempty"` + + // A number representing the priority or importance of the result. + Rank float64 `json:"rank,omitempty"` + + // A set of locations relevant to this result. + RelatedLocations []*Location `json:"relatedLocations,omitempty"` + + // A reference used to locate the rule descriptor relevant to this result. + Rule *ReportingDescriptorReference `json:"rule,omitempty"` + + // The stable, unique identifier of the rule, if any, to which this result is relevant. + RuleId string `json:"ruleId,omitempty"` + + // The index within the tool component rules array of the rule object associated with this result. + RuleIndex int `json:"ruleIndex,omitempty"` + + // An array of 'stack' objects relevant to the result. + Stacks []*Stack `json:"stacks,omitempty"` + + // A set of suppressions relevant to this result. + Suppressions []*Suppression `json:"suppressions,omitempty"` + + // An array of references to taxonomy reporting descriptors that are applicable to the result. + Taxa []*ReportingDescriptorReference `json:"taxa,omitempty"` + + // A web request associated with this result. + WebRequest *WebRequest `json:"webRequest,omitempty"` + + // A web response associated with this result. + WebResponse *WebResponse `json:"webResponse,omitempty"` + + // The URIs of the work items associated with this result. + WorkItemUris []string `json:"workItemUris,omitempty"` +} + +// ResultProvenance Contains information about how and when a result was detected. +type ResultProvenance struct { + + // An array of physicalLocation objects which specify the portions of an analysis tool's output that a converter transformed into the result. + ConversionSources []*PhysicalLocation `json:"conversionSources,omitempty"` + + // A GUID-valued string equal to the automationDetails.guid property of the run in which the result was first detected. + FirstDetectionRunGuid string `json:"firstDetectionRunGuid,omitempty"` + + // The Coordinated Universal Time (UTC) date and time at which the result was first detected. See "Date/time properties" in the SARIF spec for the required format. + FirstDetectionTimeUtc string `json:"firstDetectionTimeUtc,omitempty"` + + // The index within the run.invocations array of the invocation object which describes the tool invocation that detected the result. + InvocationIndex int `json:"invocationIndex,omitempty"` + + // A GUID-valued string equal to the automationDetails.guid property of the run in which the result was most recently detected. + LastDetectionRunGuid string `json:"lastDetectionRunGuid,omitempty"` + + // The Coordinated Universal Time (UTC) date and time at which the result was most recently detected. See "Date/time properties" in the SARIF spec for the required format. + LastDetectionTimeUtc string `json:"lastDetectionTimeUtc,omitempty"` + + // Key/value pairs that provide additional information about the result. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// Run Describes a single run of an analysis tool, and contains the reported output of that run. +type Run struct { + + // Addresses associated with this run instance, if any. + Addresses []*Address `json:"addresses,omitempty"` + + // An array of artifact objects relevant to the run. + Artifacts []*Artifact `json:"artifacts,omitempty"` + + // Automation details that describe this run. + AutomationDetails *RunAutomationDetails `json:"automationDetails,omitempty"` + + // The 'guid' property of a previous SARIF 'run' that comprises the baseline that was used to compute result 'baselineState' properties for the run. + BaselineGuid string `json:"baselineGuid,omitempty"` + + // Specifies the unit in which the tool measures columns. + ColumnKind interface{} `json:"columnKind,omitempty"` + + // A conversion object that describes how a converter transformed an analysis tool's native reporting format into the SARIF format. + Conversion *Conversion `json:"conversion,omitempty"` + + // Specifies the default encoding for any artifact object that refers to a text file. + DefaultEncoding string `json:"defaultEncoding,omitempty"` + + // Specifies the default source language for any artifact object that refers to a text file that contains source code. + DefaultSourceLanguage string `json:"defaultSourceLanguage,omitempty"` + + // References to external property files that should be inlined with the content of a root log file. + ExternalPropertyFileReferences *ExternalPropertyFileReferences `json:"externalPropertyFileReferences,omitempty"` + + // An array of zero or more unique graph objects associated with the run. + Graphs []*Graph `json:"graphs,omitempty"` + + // Describes the invocation of the analysis tool. + Invocations []*Invocation `json:"invocations,omitempty"` + + // The language of the messages emitted into the log file during this run (expressed as an ISO 639-1 two-letter lowercase culture code) and an optional region (expressed as an ISO 3166-1 two-letter uppercase subculture code associated with a country or region). The casing is recommended but not required (in order for this data to conform to RFC5646). + Language string `json:"language,omitempty"` + + // An array of logical locations such as namespaces, types or functions. + LogicalLocations []*LogicalLocation `json:"logicalLocations,omitempty"` + + // An ordered list of character sequences that were treated as line breaks when computing region information for the run. + NewlineSequences []string `json:"newlineSequences,omitempty"` + + // The artifact location specified by each uriBaseId symbol on the machine where the tool originally ran. + OriginalUriBaseIds map[string]*ArtifactLocation `json:"originalUriBaseIds,omitempty"` + + // Contains configurations that may potentially override both reportingDescriptor.defaultConfiguration (the tool's default severities) and invocation.configurationOverrides (severities established at run-time from the command line). + Policies []*ToolComponent `json:"policies,omitempty"` + + // Key/value pairs that provide additional information about the run. + Properties *PropertyBag `json:"properties,omitempty"` + + // An array of strings used to replace sensitive information in a redaction-aware property. + RedactionTokens []string `json:"redactionTokens,omitempty"` + + // The set of results contained in an SARIF log. The results array can be omitted when a run is solely exporting rules metadata. It must be present (but may be empty) if a log file represents an actual scan. + Results []*Result `json:"results,omitempty"` + + // Automation details that describe the aggregate of runs to which this run belongs. + RunAggregates []*RunAutomationDetails `json:"runAggregates,omitempty"` + + // A specialLocations object that defines locations of special significance to SARIF consumers. + SpecialLocations *SpecialLocations `json:"specialLocations,omitempty"` + + // An array of toolComponent objects relevant to a taxonomy in which results are categorized. + Taxonomies []*ToolComponent `json:"taxonomies,omitempty"` + + // An array of threadFlowLocation objects cached at run level. + ThreadFlowLocations []*ThreadFlowLocation `json:"threadFlowLocations,omitempty"` + + // Information about the tool or tool pipeline that generated the results in this run. A run can only contain results produced by a single tool or tool pipeline. A run can aggregate results from multiple log files, as long as context around the tool run (tool command-line arguments and the like) is identical for all aggregated files. + Tool *Tool `json:"tool"` + + // The set of available translations of the localized data provided by the tool. + Translations []*ToolComponent `json:"translations,omitempty"` + + // Specifies the revision in version control of the artifacts that were scanned. + VersionControlProvenance []*VersionControlDetails `json:"versionControlProvenance,omitempty"` + + // An array of request objects cached at run level. + WebRequests []*WebRequest `json:"webRequests,omitempty"` + + // An array of response objects cached at run level. + WebResponses []*WebResponse `json:"webResponses,omitempty"` +} + +// RunAutomationDetails Information that describes a run's identity and role within an engineering system process. +type RunAutomationDetails struct { + + // A stable, unique identifier for the equivalence class of runs to which this object's containing run object belongs in the form of a GUID. + CorrelationGuid string `json:"correlationGuid,omitempty"` + + // A description of the identity and role played within the engineering system by this object's containing run object. + Description *Message `json:"description,omitempty"` + + // A stable, unique identifer for this object's containing run object in the form of a GUID. + Guid string `json:"guid,omitempty"` + + // A hierarchical string that uniquely identifies this object's containing run object. + Id string `json:"id,omitempty"` + + // Key/value pairs that provide additional information about the run automation details. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// SpecialLocations Defines locations of special significance to SARIF consumers. +type SpecialLocations struct { + + // Provides a suggestion to SARIF consumers to display file paths relative to the specified location. + DisplayBase *ArtifactLocation `json:"displayBase,omitempty"` + + // Key/value pairs that provide additional information about the special locations. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// Stack A call stack that is relevant to a result. +type Stack struct { + + // An array of stack frames that represents a sequence of calls, rendered in reverse chronological order, that comprise the call stack. + Frames []*StackFrame `json:"frames"` + + // A message relevant to this call stack. + Message *Message `json:"message,omitempty"` + + // Key/value pairs that provide additional information about the stack. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// StackFrame A function call within a stack trace. +type StackFrame struct { + + // The location to which this stack frame refers. + Location *Location `json:"location,omitempty"` + + // The name of the module that contains the code of this stack frame. + Module string `json:"module,omitempty"` + + // The parameters of the call that is executing. + Parameters []string `json:"parameters,omitempty"` + + // Key/value pairs that provide additional information about the stack frame. + Properties *PropertyBag `json:"properties,omitempty"` + + // The thread identifier of the stack frame. + ThreadId int `json:"threadId,omitempty"` +} + +// Suppression A suppression that is relevant to a result. +type Suppression struct { + + // A stable, unique identifer for the supression in the form of a GUID. + Guid string `json:"guid,omitempty"` + + // A string representing the justification for the suppression. + Justification string `json:"justification,omitempty"` + + // A string that indicates where the suppression is persisted. + Kind string `json:"kind"` + + // Identifies the location associated with the suppression. + Location *Location `json:"location,omitempty"` + + // Key/value pairs that provide additional information about the suppression. + Properties *PropertyBag `json:"properties,omitempty"` + + // A string that indicates the review status of the suppression. + Status interface{} `json:"status,omitempty"` +} + +// ThreadFlow Describes a sequence of code locations that specify a path through a single thread of execution such as an operating system or fiber. +type ThreadFlow struct { + + // An string that uniquely identifies the threadFlow within the codeFlow in which it occurs. + Id string `json:"id,omitempty"` + + // Values of relevant expressions at the start of the thread flow that remain constant. + ImmutableState map[string]*MultiformatMessageString `json:"immutableState,omitempty"` + + // Values of relevant expressions at the start of the thread flow that may change during thread flow execution. + InitialState map[string]*MultiformatMessageString `json:"initialState,omitempty"` + + // A temporally ordered array of 'threadFlowLocation' objects, each of which describes a location visited by the tool while producing the result. + Locations []*ThreadFlowLocation `json:"locations"` + + // A message relevant to the thread flow. + Message *Message `json:"message,omitempty"` + + // Key/value pairs that provide additional information about the thread flow. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// ThreadFlowLocation A location visited by an analysis tool while simulating or monitoring the execution of a program. +type ThreadFlowLocation struct { + + // An integer representing the temporal order in which execution reached this location. + ExecutionOrder int `json:"executionOrder,omitempty"` + + // The Coordinated Universal Time (UTC) date and time at which this location was executed. + ExecutionTimeUtc string `json:"executionTimeUtc,omitempty"` + + // Specifies the importance of this location in understanding the code flow in which it occurs. The order from most to least important is "essential", "important", "unimportant". Default: "important". + Importance interface{} `json:"importance,omitempty"` + + // The index within the run threadFlowLocations array. + Index int `json:"index,omitempty"` + + // A set of distinct strings that categorize the thread flow location. Well-known kinds include 'acquire', 'release', 'enter', 'exit', 'call', 'return', 'branch', 'implicit', 'false', 'true', 'caution', 'danger', 'unknown', 'unreachable', 'taint', 'function', 'handler', 'lock', 'memory', 'resource', 'scope' and 'value'. + Kinds []string `json:"kinds,omitempty"` + + // The code location. + Location *Location `json:"location,omitempty"` + + // The name of the module that contains the code that is executing. + Module string `json:"module,omitempty"` + + // An integer representing a containment hierarchy within the thread flow. + NestingLevel int `json:"nestingLevel,omitempty"` + + // Key/value pairs that provide additional information about the threadflow location. + Properties *PropertyBag `json:"properties,omitempty"` + + // The call stack leading to this location. + Stack *Stack `json:"stack,omitempty"` + + // A dictionary, each of whose keys specifies a variable or expression, the associated value of which represents the variable or expression value. For an annotation of kind 'continuation', for example, this dictionary might hold the current assumed values of a set of global variables. + State map[string]*MultiformatMessageString `json:"state,omitempty"` + + // An array of references to rule or taxonomy reporting descriptors that are applicable to the thread flow location. + Taxa []*ReportingDescriptorReference `json:"taxa,omitempty"` + + // A web request associated with this thread flow location. + WebRequest *WebRequest `json:"webRequest,omitempty"` + + // A web response associated with this thread flow location. + WebResponse *WebResponse `json:"webResponse,omitempty"` +} + +// Tool The analysis tool that was run. +type Tool struct { + + // The analysis tool that was run. + Driver *ToolComponent `json:"driver"` + + // Tool extensions that contributed to or reconfigured the analysis tool that was run. + Extensions []*ToolComponent `json:"extensions,omitempty"` + + // Key/value pairs that provide additional information about the tool. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// ToolComponent A component, such as a plug-in or the driver, of the analysis tool that was run. +type ToolComponent struct { + + // The component which is strongly associated with this component. For a translation, this refers to the component which has been translated. For an extension, this is the driver that provides the extension's plugin model. + AssociatedComponent *ToolComponentReference `json:"associatedComponent,omitempty"` + + // The kinds of data contained in this object. + Contents []interface{} `json:"contents,omitempty"` + + // The binary version of the tool component's primary executable file expressed as four non-negative integers separated by a period (for operating systems that express file versions in this way). + DottedQuadFileVersion string `json:"dottedQuadFileVersion,omitempty"` + + // The absolute URI from which the tool component can be downloaded. + DownloadUri string `json:"downloadUri,omitempty"` + + // A comprehensive description of the tool component. + FullDescription *MultiformatMessageString `json:"fullDescription,omitempty"` + + // The name of the tool component along with its version and any other useful identifying information, such as its locale. + FullName string `json:"fullName,omitempty"` + + // A dictionary, each of whose keys is a resource identifier and each of whose values is a multiformatMessageString object, which holds message strings in plain text and (optionally) Markdown format. The strings can include placeholders, which can be used to construct a message in combination with an arbitrary number of additional string arguments. + GlobalMessageStrings map[string]*MultiformatMessageString `json:"globalMessageStrings,omitempty"` + + // A unique identifer for the tool component in the form of a GUID. + Guid string `json:"guid,omitempty"` + + // The absolute URI at which information about this version of the tool component can be found. + InformationUri string `json:"informationUri,omitempty"` + + // Specifies whether this object contains a complete definition of the localizable and/or non-localizable data for this component, as opposed to including only data that is relevant to the results persisted to this log file. + IsComprehensive bool `json:"isComprehensive,omitempty"` + + // The language of the messages emitted into the log file during this run (expressed as an ISO 639-1 two-letter lowercase language code) and an optional region (expressed as an ISO 3166-1 two-letter uppercase subculture code associated with a country or region). The casing is recommended but not required (in order for this data to conform to RFC5646). + Language string `json:"language,omitempty"` + + // The semantic version of the localized strings defined in this component; maintained by components that provide translations. + LocalizedDataSemanticVersion string `json:"localizedDataSemanticVersion,omitempty"` + + // An array of the artifactLocation objects associated with the tool component. + Locations []*ArtifactLocation `json:"locations,omitempty"` + + // The minimum value of localizedDataSemanticVersion required in translations consumed by this component; used by components that consume translations. + MinimumRequiredLocalizedDataSemanticVersion string `json:"minimumRequiredLocalizedDataSemanticVersion,omitempty"` + + // The name of the tool component. + Name string `json:"name"` + + // An array of reportingDescriptor objects relevant to the notifications related to the configuration and runtime execution of the tool component. + Notifications []*ReportingDescriptor `json:"notifications,omitempty"` + + // The organization or company that produced the tool component. + Organization string `json:"organization,omitempty"` + + // A product suite to which the tool component belongs. + Product string `json:"product,omitempty"` + + // A localizable string containing the name of the suite of products to which the tool component belongs. + ProductSuite string `json:"productSuite,omitempty"` + + // Key/value pairs that provide additional information about the tool component. + Properties *PropertyBag `json:"properties,omitempty"` + + // A string specifying the UTC date (and optionally, the time) of the component's release. + ReleaseDateUtc string `json:"releaseDateUtc,omitempty"` + + // An array of reportingDescriptor objects relevant to the analysis performed by the tool component. + Rules []*ReportingDescriptor `json:"rules,omitempty"` + + // The tool component version in the format specified by Semantic Versioning 2.0. + SemanticVersion string `json:"semanticVersion,omitempty"` + + // A brief description of the tool component. + ShortDescription *MultiformatMessageString `json:"shortDescription,omitempty"` + + // An array of toolComponentReference objects to declare the taxonomies supported by the tool component. + SupportedTaxonomies []*ToolComponentReference `json:"supportedTaxonomies,omitempty"` + + // An array of reportingDescriptor objects relevant to the definitions of both standalone and tool-defined taxonomies. + Taxa []*ReportingDescriptor `json:"taxa,omitempty"` + + // Translation metadata, required for a translation, not populated by other component types. + TranslationMetadata *TranslationMetadata `json:"translationMetadata,omitempty"` + + // The tool component version, in whatever format the component natively provides. + Version string `json:"version,omitempty"` +} + +// ToolComponentReference Identifies a particular toolComponent object, either the driver or an extension. +type ToolComponentReference struct { + + // The 'guid' property of the referenced toolComponent. + Guid string `json:"guid,omitempty"` + + // An index into the referenced toolComponent in tool.extensions. + Index int `json:"index,omitempty"` + + // The 'name' property of the referenced toolComponent. + Name string `json:"name,omitempty"` + + // Key/value pairs that provide additional information about the toolComponentReference. + Properties *PropertyBag `json:"properties,omitempty"` +} + +// TranslationMetadata Provides additional metadata related to translation. +type TranslationMetadata struct { + + // The absolute URI from which the translation metadata can be downloaded. + DownloadUri string `json:"downloadUri,omitempty"` + + // A comprehensive description of the translation metadata. + FullDescription *MultiformatMessageString `json:"fullDescription,omitempty"` + + // The full name associated with the translation metadata. + FullName string `json:"fullName,omitempty"` + + // The absolute URI from which information related to the translation metadata can be downloaded. + InformationUri string `json:"informationUri,omitempty"` + + // The name associated with the translation metadata. + Name string `json:"name"` + + // Key/value pairs that provide additional information about the translation metadata. + Properties *PropertyBag `json:"properties,omitempty"` + + // A brief description of the translation metadata. + ShortDescription *MultiformatMessageString `json:"shortDescription,omitempty"` +} + +// VersionControlDetails Specifies the information necessary to retrieve a desired revision from a version control system. +type VersionControlDetails struct { + + // A Coordinated Universal Time (UTC) date and time that can be used to synchronize an enlistment to the state of the repository at that time. + AsOfTimeUtc string `json:"asOfTimeUtc,omitempty"` + + // The name of a branch containing the revision. + Branch string `json:"branch,omitempty"` + + // The location in the local file system to which the root of the repository was mapped at the time of the analysis. + MappedTo *ArtifactLocation `json:"mappedTo,omitempty"` + + // Key/value pairs that provide additional information about the version control details. + Properties *PropertyBag `json:"properties,omitempty"` + + // The absolute URI of the repository. + RepositoryUri string `json:"repositoryUri"` + + // A string that uniquely and permanently identifies the revision within the repository. + RevisionId string `json:"revisionId,omitempty"` + + // A tag that has been applied to the revision. + RevisionTag string `json:"revisionTag,omitempty"` +} + +// WebRequest Describes an HTTP request. +type WebRequest struct { + + // The body of the request. + Body *ArtifactContent `json:"body,omitempty"` + + // The request headers. + Headers map[string]string `json:"headers,omitempty"` + + // The index within the run.webRequests array of the request object associated with this result. + Index int `json:"index,omitempty"` + + // The HTTP method. Well-known values are 'GET', 'PUT', 'POST', 'DELETE', 'PATCH', 'HEAD', 'OPTIONS', 'TRACE', 'CONNECT'. + Method string `json:"method,omitempty"` + + // The request parameters. + Parameters map[string]string `json:"parameters,omitempty"` + + // Key/value pairs that provide additional information about the request. + Properties *PropertyBag `json:"properties,omitempty"` + + // The request protocol. Example: 'http'. + Protocol string `json:"protocol,omitempty"` + + // The target of the request. + Target string `json:"target,omitempty"` + + // The request version. Example: '1.1'. + Version string `json:"version,omitempty"` +} + +// WebResponse Describes the response to an HTTP request. +type WebResponse struct { + + // The body of the response. + Body *ArtifactContent `json:"body,omitempty"` + + // The response headers. + Headers map[string]string `json:"headers,omitempty"` + + // The index within the run.webResponses array of the response object associated with this result. + Index int `json:"index,omitempty"` + + // Specifies whether a response was received from the server. + NoResponseReceived bool `json:"noResponseReceived,omitempty"` + + // Key/value pairs that provide additional information about the response. + Properties *PropertyBag `json:"properties,omitempty"` + + // The response protocol. Example: 'http'. + Protocol string `json:"protocol,omitempty"` + + // The response reason. Example: 'Not found'. + ReasonPhrase string `json:"reasonPhrase,omitempty"` + + // The response status code. Example: 451. + StatusCode int `json:"statusCode,omitempty"` + + // The response version. Example: '1.1'. + Version string `json:"version,omitempty"` +} diff --git a/vendor/github.com/daixiang0/gci/LICENSE b/vendor/github.com/daixiang0/gci/LICENSE new file mode 100644 index 000000000..e1292f738 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2020, Xiang Dai +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/daixiang0/gci/pkg/gci/gci.go b/vendor/github.com/daixiang0/gci/pkg/gci/gci.go new file mode 100644 index 000000000..7efa576ca --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/gci/gci.go @@ -0,0 +1,383 @@ +package gci + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" +) + +const ( + // pkg type: standard, remote, local + standard int = iota + // 3rd-party packages + remote + local + + commentFlag = "//" +) + +var ( + importStartFlag = []byte(` +import ( +`) + importEndFlag = []byte(` +) +`) +) + +type FlagSet struct { + LocalFlag []string + DoWrite, DoDiff *bool +} + +type pkg struct { + list map[int][]string + comment map[string]string + alias map[string]string +} + +// ParseLocalFlag takes a comma-separated list of +// package-name-prefixes (as passed to the "-local" flag), and splits +// it in to a list. This is different than strings.Split in that it +// handles the empty string and empty entries in the list. +func ParseLocalFlag(str string) []string { + return strings.FieldsFunc(str, func(c rune) bool { return c == ',' }) +} + +func newPkg(data [][]byte, localFlag []string) *pkg { + listMap := make(map[int][]string) + commentMap := make(map[string]string) + aliasMap := make(map[string]string) + p := &pkg{ + list: listMap, + comment: commentMap, + alias: aliasMap, + } + + formatData := make([]string, 0) + // remove all empty lines + for _, v := range data { + if len(v) > 0 { + formatData = append(formatData, strings.TrimSpace(string(v))) + } + } + + n := len(formatData) + for i := n - 1; i >= 0; i-- { + line := formatData[i] + + // check commentFlag: + // 1. one line commentFlag + // 2. commentFlag after import path + commentIndex := strings.Index(line, commentFlag) + if commentIndex == 0 { + // comment in the last line is useless, ignore it + if i+1 >= n { + continue + } + pkg, _, _ := getPkgInfo(formatData[i+1], strings.Index(formatData[i+1], commentFlag) >= 0) + p.comment[pkg] = line + continue + } else if commentIndex > 0 { + pkg, alias, comment := getPkgInfo(line, true) + if alias != "" { + p.alias[pkg] = alias + } + + p.comment[pkg] = comment + pkgType := getPkgType(pkg, localFlag) + p.list[pkgType] = append(p.list[pkgType], pkg) + continue + } + + pkg, alias, _ := getPkgInfo(line, false) + + if alias != "" { + p.alias[pkg] = alias + } + + pkgType := getPkgType(pkg, localFlag) + p.list[pkgType] = append(p.list[pkgType], pkg) + } + + return p +} + +// fmt format import pkgs as expected +func (p *pkg) fmt() []byte { + ret := make([]string, 0, 100) + + for pkgType := range []int{standard, remote, local} { + sort.Strings(p.list[pkgType]) + for _, s := range p.list[pkgType] { + if p.comment[s] != "" { + l := fmt.Sprintf("%s%s%s%s", linebreak, indent, p.comment[s], linebreak) + ret = append(ret, l) + } + + if p.alias[s] != "" { + s = fmt.Sprintf("%s%s%s%s%s", indent, p.alias[s], blank, s, linebreak) + } else { + s = fmt.Sprintf("%s%s%s", indent, s, linebreak) + } + + ret = append(ret, s) + } + + if len(p.list[pkgType]) > 0 { + ret = append(ret, linebreak) + } + } + if len(ret) > 0 && ret[len(ret)-1] == linebreak { + ret = ret[:len(ret)-1] + } + + // remove duplicate empty lines + s1 := fmt.Sprintf("%s%s%s%s", linebreak, linebreak, linebreak, indent) + s2 := fmt.Sprintf("%s%s%s", linebreak, linebreak, indent) + return []byte(strings.ReplaceAll(strings.Join(ret, ""), s1, s2)) +} + +// getPkgInfo assume line is a import path, and return (path, alias, comment) +func getPkgInfo(line string, comment bool) (string, string, string) { + if comment { + s := strings.Split(line, commentFlag) + pkgArray := strings.Split(s[0], blank) + if len(pkgArray) > 1 { + return pkgArray[1], pkgArray[0], fmt.Sprintf("%s%s%s", commentFlag, blank, strings.TrimSpace(s[1])) + } else { + return strings.TrimSpace(pkgArray[0]), "", fmt.Sprintf("%s%s%s", commentFlag, blank, strings.TrimSpace(s[1])) + } + } else { + pkgArray := strings.Split(line, blank) + if len(pkgArray) > 1 { + return pkgArray[1], pkgArray[0], "" + } else { + return pkgArray[0], "", "" + } + } +} + +func getPkgType(line string, localFlag []string) int { + pkgName := strings.Trim(line, "\"\\`") + + for _, localPkg := range localFlag { + if strings.HasPrefix(pkgName, localPkg) { + return local + } + } + + if isStandardPackage(pkgName) { + return standard + } + + return remote +} + +const ( + blank = " " + indent = "\t" + linebreak = "\n" +) + +func diff(b1, b2 []byte, filename string) (data []byte, err error) { + f1, err := writeTempFile("", "gci", b1) + if err != nil { + return + } + defer os.Remove(f1) + + f2, err := writeTempFile("", "gci", b2) + if err != nil { + return + } + defer os.Remove(f2) + + cmd := "diff" + + data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + return replaceTempFilename(data, filename) + } + return +} + +func writeTempFile(dir, prefix string, data []byte) (string, error) { + file, err := ioutil.TempFile(dir, prefix) + if err != nil { + return "", err + } + _, err = file.Write(data) + if err1 := file.Close(); err == nil { + err = err1 + } + if err != nil { + os.Remove(file.Name()) + return "", err + } + return file.Name(), nil +} + +// replaceTempFilename replaces temporary filenames in diff with actual one. +// +// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 +// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 +// ... +// -> +// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 +// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 +// ... +func replaceTempFilename(diff []byte, filename string) ([]byte, error) { + bs := bytes.SplitN(diff, []byte{'\n'}, 3) + if len(bs) < 3 { + return nil, fmt.Errorf("got unexpected diff for %s", filename) + } + // Preserve timestamps. + var t0, t1 []byte + if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { + t0 = bs[0][i:] + } + if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { + t1 = bs[1][i:] + } + // Always print filepath with slash separator. + f := filepath.ToSlash(filename) + bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) + bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) + return bytes.Join(bs, []byte{'\n'}), nil +} + +func visitFile(set *FlagSet) filepath.WalkFunc { + return func(path string, f os.FileInfo, err error) error { + if err == nil && isGoFile(f) { + err = processFile(path, os.Stdout, set) + } + return err + } +} + +func WalkDir(path string, set *FlagSet) error { + return filepath.Walk(path, visitFile(set)) +} + +func isGoFile(f os.FileInfo) bool { + // ignore non-Go files + name := f.Name() + return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") +} + +func ProcessFile(filename string, out io.Writer, set *FlagSet) error { + return processFile(filename, out, set) +} + +func processFile(filename string, out io.Writer, set *FlagSet) error { + var err error + + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + + src, err := ioutil.ReadAll(f) + if err != nil { + return err + } + + ori := make([]byte, len(src)) + copy(ori, src) + start := bytes.Index(src, importStartFlag) + // in case no importStartFlag or importStartFlag exist in the commentFlag + if start < 0 { + fmt.Printf("skip file %s since no import\n", filename) + return nil + } + end := bytes.Index(src[start:], importEndFlag) + start + + ret := bytes.Split(src[start+len(importStartFlag):end], []byte(linebreak)) + + p := newPkg(ret, set.LocalFlag) + + res := append(src[:start+len(importStartFlag)], append(p.fmt(), src[end+1:]...)...) + + if !bytes.Equal(ori, res) { + if *set.DoWrite { + // On Windows, we need to re-set the permissions from the file. See golang/go#38225. + var perms os.FileMode + if fi, err := os.Stat(filename); err == nil { + perms = fi.Mode() & os.ModePerm + } + err = ioutil.WriteFile(filename, res, perms) + if err != nil { + return err + } + } + if *set.DoDiff { + data, err := diff(ori, res, filename) + if err != nil { + return fmt.Errorf("failed to diff: %v", err) + } + fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) + if _, err := out.Write(data); err != nil { + return fmt.Errorf("failed to write: %v", err) + } + } + } + if !*set.DoWrite && !*set.DoDiff { + if _, err = out.Write(res); err != nil { + return fmt.Errorf("failed to write: %v", err) + } + } + + return err +} + +// Run return source and result in []byte if succeed +func Run(filename string, set *FlagSet) ([]byte, []byte, error) { + var err error + + f, err := os.Open(filename) + if err != nil { + return nil, nil, err + } + defer f.Close() + + src, err := ioutil.ReadAll(f) + if err != nil { + return nil, nil, err + } + + ori := make([]byte, len(src)) + copy(ori, src) + start := bytes.Index(src, importStartFlag) + // in case no importStartFlag or importStartFlag exist in the commentFlag + if start < 0 { + return nil, nil, nil + } + end := bytes.Index(src[start:], importEndFlag) + start + + // in case import flags are part of a codegen template, or otherwise "wrong" + if start+len(importStartFlag) > end { + return nil, nil, nil + } + + ret := bytes.Split(src[start+len(importStartFlag):end], []byte(linebreak)) + + p := newPkg(ret, set.LocalFlag) + + res := append(src[:start+len(importStartFlag)], append(p.fmt(), src[end+1:]...)...) + + if bytes.Equal(ori, res) { + return ori, nil, nil + } + + return ori, res, nil +} diff --git a/vendor/github.com/daixiang0/gci/pkg/gci/std.go b/vendor/github.com/daixiang0/gci/pkg/gci/std.go new file mode 100644 index 000000000..ac96b55ab --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/gci/std.go @@ -0,0 +1,161 @@ +package gci + +// Code generated based on go1.16beta1. DO NOT EDIT. + +var standardPackages = map[string]struct{}{ + "archive/tar": {}, + "archive/zip": {}, + "bufio": {}, + "bytes": {}, + "compress/bzip2": {}, + "compress/flate": {}, + "compress/gzip": {}, + "compress/lzw": {}, + "compress/zlib": {}, + "container/heap": {}, + "container/list": {}, + "container/ring": {}, + "context": {}, + "crypto": {}, + "crypto/aes": {}, + "crypto/cipher": {}, + "crypto/des": {}, + "crypto/dsa": {}, + "crypto/ecdsa": {}, + "crypto/ed25519": {}, + "crypto/elliptic": {}, + "crypto/hmac": {}, + "crypto/md5": {}, + "crypto/rand": {}, + "crypto/rc4": {}, + "crypto/rsa": {}, + "crypto/sha1": {}, + "crypto/sha256": {}, + "crypto/sha512": {}, + "crypto/subtle": {}, + "crypto/tls": {}, + "crypto/x509": {}, + "crypto/x509/pkix": {}, + "database/sql": {}, + "database/sql/driver": {}, + "debug/dwarf": {}, + "debug/elf": {}, + "debug/gosym": {}, + "debug/macho": {}, + "debug/pe": {}, + "debug/plan9obj": {}, + "embed": {}, + "encoding": {}, + "encoding/ascii85": {}, + "encoding/asn1": {}, + "encoding/base32": {}, + "encoding/base64": {}, + "encoding/binary": {}, + "encoding/csv": {}, + "encoding/gob": {}, + "encoding/hex": {}, + "encoding/json": {}, + "encoding/pem": {}, + "encoding/xml": {}, + "errors": {}, + "expvar": {}, + "flag": {}, + "fmt": {}, + "go/ast": {}, + "go/build": {}, + "go/constant": {}, + "go/doc": {}, + "go/format": {}, + "go/importer": {}, + "go/parser": {}, + "go/printer": {}, + "go/scanner": {}, + "go/token": {}, + "go/types": {}, + "hash": {}, + "hash/adler32": {}, + "hash/crc32": {}, + "hash/crc64": {}, + "hash/fnv": {}, + "hash/maphash": {}, + "html": {}, + "html/template": {}, + "image": {}, + "image/color": {}, + "image/color/palette": {}, + "image/draw": {}, + "image/gif": {}, + "image/jpeg": {}, + "image/png": {}, + "index/suffixarray": {}, + "io": {}, + "io/fs": {}, + "io/ioutil": {}, + "log": {}, + "log/syslog": {}, + "math": {}, + "math/big": {}, + "math/bits": {}, + "math/cmplx": {}, + "math/rand": {}, + "mime": {}, + "mime/multipart": {}, + "mime/quotedprintable": {}, + "net": {}, + "net/http": {}, + "net/http/cgi": {}, + "net/http/cookiejar": {}, + "net/http/fcgi": {}, + "net/http/httptest": {}, + "net/http/httptrace": {}, + "net/http/httputil": {}, + "net/http/pprof": {}, + "net/mail": {}, + "net/rpc": {}, + "net/rpc/jsonrpc": {}, + "net/smtp": {}, + "net/textproto": {}, + "net/url": {}, + "os": {}, + "os/exec": {}, + "os/signal": {}, + "os/user": {}, + "path": {}, + "path/filepath": {}, + "plugin": {}, + "reflect": {}, + "regexp": {}, + "regexp/syntax": {}, + "runtime": {}, + "runtime/cgo": {}, + "runtime/debug": {}, + "runtime/metrics": {}, + "runtime/pprof": {}, + "runtime/race": {}, + "runtime/trace": {}, + "sort": {}, + "strconv": {}, + "strings": {}, + "sync": {}, + "sync/atomic": {}, + "syscall": {}, + "testing": {}, + "testing/fstest": {}, + "testing/iotest": {}, + "testing/quick": {}, + "text/scanner": {}, + "text/tabwriter": {}, + "text/template": {}, + "text/template/parse": {}, + "time": {}, + "time/tzdata": {}, + "unicode": {}, + "unicode/utf16": {}, + "unicode/utf8": {}, + "unsafe": {}, +} + +func isStandardPackage(pkg string) bool { + _, ok := standardPackages[pkg] + return ok +} diff --git a/vendor/github.com/denis-tingajkin/go-header/.gitignore b/vendor/github.com/denis-tingajkin/go-header/.gitignore new file mode 100644 index 000000000..62c893550 --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/.gitignore @@ -0,0 +1 @@ +.idea/ \ No newline at end of file diff --git a/vendor/github.com/denis-tingajkin/go-header/.go-header.yml b/vendor/github.com/denis-tingajkin/go-header/.go-header.yml new file mode 100644 index 000000000..446d7317e --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/.go-header.yml @@ -0,0 +1,19 @@ +values: + regexp: + copyright-holder: Copyright \(c\) {{year-range}} Denis Tingajkin +template: | + {{copyright-holder}} + + SPDX-License-Identifier: Apache-2.0 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/denis-tingajkin/go-header/LICENSE b/vendor/github.com/denis-tingajkin/go-header/LICENSE new file mode 100644 index 000000000..a2c9fda21 --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/github.com/denis-tingajkin/go-header/README.md b/vendor/github.com/denis-tingajkin/go-header/README.md new file mode 100644 index 000000000..1a2a3d9a6 --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/README.md @@ -0,0 +1,81 @@ +# go-header +[![Actions Status](https://github.com/denis-tingajkin/go-header/workflows/ci/badge.svg)](https://github.com/denis-tingajkin/go-header/actions) + +Go source code linter providing checks for license headers. + +## Installation + +For installation you can simply use `go get`. + +```bash +go get github.com/denis-tingajkin/go-header/cmd/go-header +``` + +## Configuration + +To configuring `.go-header.yml` linter you simply need to fill the next fields: + +```yaml +--- +temaplte: # expects header template string. +tempalte-path: # expects path to file with license header string. +values: # expects `const` or `regexp` node with values where values is a map string to string. + const: + key1: value1 # const value just checks equality. Note `key1` should be used in template string as {{ key1 }} or {{ KEY1 }}. + regexp: + key2: value2 # regexp value just checks regex match. The value should be a valid regexp pattern. Note `key2` should be used in template string as {{ key2 }} or {{ KEY2 }}. +``` + +Where `values` also can be used recursively. Example: + +```yaml +values: + const: + key1: "value" + regexp: + key2: "{{key1}} value1" # Reads as regex pattern "value value1" +``` + +## Bult-in values + +- **YEAR** - Expects current year. Example header value: `2020`. Example of template using: `{{YEAR}}` or `{{year}}`. +- **YEAR-RANGE** - Expects any valid year interval or current year. Example header value: `2020` or `2000-2020`. Example of template using: `{{year-range}}` or `{{YEAR-RANGE}}`. + +## Execution + +`go-header` linter expects file paths on input. If you want to run `go-header` only on diff files, then you can use this command: + +```bash +go-header $(git diff --name-only | grep -E '.*\.go') +``` + +## Setup example + +### Step 1 + +Create configuration file `.go-header.yml` in the root of project. + +```yaml +--- +values: + const: + MY COMPANY: mycompany.com +template: | + {{ MY COMPANY }} + SPDX-License-Identifier: Apache-2.0 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +``` + +### Step 2 +You are ready! Execute `go-header ${PATH_TO_FILES}` from the root of the project. diff --git a/vendor/github.com/denis-tingajkin/go-header/analyzer.go b/vendor/github.com/denis-tingajkin/go-header/analyzer.go new file mode 100644 index 000000000..5707890b0 --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/analyzer.go @@ -0,0 +1,146 @@ +// Copyright (c) 2020 Denis Tingajkin +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goheader + +import ( + "fmt" + "go/ast" + "os" + "os/exec" + "strings" + "time" +) + +type Target struct { + Path string + File *ast.File +} + +const iso = "2006-01-02 15:04:05 -0700" + +func (t *Target) ModTime() (time.Time, error) { + diff, err := exec.Command("git", "diff", t.Path).CombinedOutput() + if err == nil && len(diff) == 0 { + line, err := exec.Command("git", "log", "-1", "--pretty=format:%cd", "--date=iso", "--", t.Path).CombinedOutput() + if err == nil { + return time.Parse(iso, string(line)) + } + } + info, err := os.Stat(t.Path) + if err != nil { + return time.Time{}, err + } + return info.ModTime(), nil +} + +type Analyzer struct { + values map[string]Value + template string +} + +func (a *Analyzer) Analyze(target *Target) Issue { + if a.template == "" { + return NewIssue("Missed template for check") + } + if t, err := target.ModTime(); err == nil { + if t.Year() != time.Now().Year() { + return nil + } + } + file := target.File + var header string + var offset = Location{ + Position: 1, + } + if len(file.Comments) > 0 && file.Comments[0].Pos() < file.Package { + if strings.HasPrefix(file.Comments[0].List[0].Text, "/*") { + header = (&ast.CommentGroup{List: []*ast.Comment{file.Comments[0].List[0]}}).Text() + } else { + header = file.Comments[0].Text() + offset.Position += 3 + } + } + header = strings.TrimSpace(header) + if header == "" { + return NewIssue("Missed header for check") + } + s := NewReader(header) + s.SetOffset(offset) + t := NewReader(a.template) + for !s.Done() && !t.Done() { + templateCh := t.Peek() + if templateCh == '{' { + name := a.readField(t) + if a.values[name] == nil { + return NewIssue(fmt.Sprintf("Template has unknown value: %v", name)) + } + if i := a.values[name].Read(s); i != nil { + return i + } + continue + } + sourceCh := s.Peek() + if sourceCh != templateCh { + l := s.Location() + notNextLine := func(r rune) bool { + return r != '\n' + } + actual := s.ReadWhile(notNextLine) + expected := t.ReadWhile(notNextLine) + return NewIssueWithLocation(fmt.Sprintf("Actual: %v\nExpected:%v", actual, expected), l) + } + s.Next() + t.Next() + } + if !s.Done() { + l := s.Location() + return NewIssueWithLocation(fmt.Sprintf("Unexpected string: %v", s.Finish()), l) + } + if !t.Done() { + l := s.Location() + return NewIssueWithLocation(fmt.Sprintf("Missed string: %v", t.Finish()), l) + } + return nil +} + +func (a *Analyzer) readField(reader *Reader) string { + _ = reader.Next() + _ = reader.Next() + + r := reader.ReadWhile(func(r rune) bool { + return r != '}' + }) + + _ = reader.Next() + _ = reader.Next() + + return strings.ToLower(strings.TrimSpace(r)) +} + +func New(options ...Option) *Analyzer { + a := &Analyzer{} + for _, o := range options { + o.apply(a) + } + for _, v := range a.values { + err := v.Calculate(a.values) + if err != nil { + panic(err.Error()) + } + } + return a +} diff --git a/vendor/github.com/denis-tingajkin/go-header/config.go b/vendor/github.com/denis-tingajkin/go-header/config.go new file mode 100644 index 000000000..fa8b23c2d --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/config.go @@ -0,0 +1,99 @@ +// Copyright (c) 2020 Denis Tingajkin +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goheader + +import ( + "errors" + "fmt" + "io/ioutil" + "strings" + "time" + + "gopkg.in/yaml.v2" +) + +// Configuration represents go-header linter setup parameters +type Configuration struct { + // Values is map of values. Supports two types 'const` and `regexp`. Values can be used recursively. + Values map[string]map[string]string `yaml:"values"'` + // Template is template for checking. Uses values. + Template string `yaml:"template"` + // TemplatePath path to the template file. Useful if need to load the template from a specific file. + TemplatePath string `yaml:"template-path"` +} + +func (c *Configuration) builtInValues() map[string]Value { + var result = make(map[string]Value) + year := fmt.Sprint(time.Now().Year()) + result["year-range"] = &RegexpValue{ + RawValue: strings.ReplaceAll(`(20\d\d\-YEAR)|(YEAR)`, "YEAR", year), + } + result["year"] = &ConstValue{ + RawValue: year, + } + return result +} + +func (c *Configuration) GetValues() (map[string]Value, error) { + var result = c.builtInValues() + createConst := func(raw string) Value { + return &ConstValue{RawValue: raw} + } + createRegexp := func(raw string) Value { + return &RegexpValue{RawValue: raw} + } + appendValues := func(m map[string]string, create func(string) Value) { + for k, v := range m { + key := strings.ToLower(k) + result[key] = create(v) + } + } + for k, v := range c.Values { + switch k { + case "const": + appendValues(v, createConst) + case "regexp": + appendValues(v, createRegexp) + default: + return nil, fmt.Errorf("unknown value type %v", k) + } + } + return result, nil +} + +func (c *Configuration) GetTemplate() (string, error) { + if c.Template != "" { + return c.Template, nil + } + if c.TemplatePath == "" { + return "", errors.New("template has not passed") + } + if b, err := ioutil.ReadFile(c.TemplatePath); err != nil { + return "", err + } else { + c.Template = strings.TrimSpace(string(b)) + return c.Template, nil + } +} + +func (c *Configuration) Parse(p string) error { + b, err := ioutil.ReadFile(p) + if err != nil { + return err + } + return yaml.Unmarshal(b, c) +} diff --git a/vendor/github.com/denis-tingajkin/go-header/go.mod b/vendor/github.com/denis-tingajkin/go-header/go.mod new file mode 100644 index 000000000..68984cb02 --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/go.mod @@ -0,0 +1,10 @@ +module github.com/denis-tingajkin/go-header + +go 1.15 + +require ( + github.com/fatih/color v1.9.0 + github.com/sirupsen/logrus v1.6.0 + github.com/stretchr/testify v1.5.1 + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/denis-tingajkin/go-header/go.sum b/vendor/github.com/denis-tingajkin/go-header/go.sum new file mode 100644 index 000000000..4033b08f0 --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/go.sum @@ -0,0 +1,31 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/denis-tingajkin/go-header/issue.go b/vendor/github.com/denis-tingajkin/go-header/issue.go new file mode 100644 index 000000000..2ff7bfd3c --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/issue.go @@ -0,0 +1,48 @@ +// Copyright (c) 2020 Denis Tingajkin +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goheader + +type Issue interface { + Location() Location + Message() string +} + +type issue struct { + msg string + location Location +} + +func (i *issue) Location() Location { + return i.location +} + +func (i *issue) Message() string { + return i.msg +} + +func NewIssueWithLocation(msg string, location Location) Issue { + return &issue{ + msg: msg, + location: location, + } +} + +func NewIssue(msg string) Issue { + return &issue{ + msg: msg, + } +} diff --git a/vendor/github.com/denis-tingajkin/go-header/location.go b/vendor/github.com/denis-tingajkin/go-header/location.go new file mode 100644 index 000000000..ba4d1907b --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/location.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 Denis Tingajkin +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goheader + +import "fmt" + +type Location struct { + Line int + Position int +} + +func (l Location) String() string { + return fmt.Sprintf("%v:%v", l.Line+1, l.Position) +} + +func (l Location) Add(other Location) Location { + return Location{ + Line: l.Line + other.Line, + Position: l.Position + other.Position, + } +} diff --git a/vendor/github.com/denis-tingajkin/go-header/option.go b/vendor/github.com/denis-tingajkin/go-header/option.go new file mode 100644 index 000000000..afbcb62e1 --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/option.go @@ -0,0 +1,44 @@ +// Copyright (c) 2020 Denis Tingajkin +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goheader + +import "strings" + +type Option interface { + apply(*Analyzer) +} + +type applyAnalyzerOptionFunc func(*Analyzer) + +func (f applyAnalyzerOptionFunc) apply(a *Analyzer) { + f(a) +} + +func WithValues(values map[string]Value) Option { + return applyAnalyzerOptionFunc(func(a *Analyzer) { + a.values = make(map[string]Value) + for k, v := range values { + a.values[strings.ToLower(k)] = v + } + }) +} + +func WithTemplate(template string) Option { + return applyAnalyzerOptionFunc(func(a *Analyzer) { + a.template = template + }) +} diff --git a/vendor/github.com/denis-tingajkin/go-header/reader.go b/vendor/github.com/denis-tingajkin/go-header/reader.go new file mode 100644 index 000000000..2393c9488 --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/reader.go @@ -0,0 +1,116 @@ +/* +Copyright (c) 2020 Denis Tingajkin + +SPDX-License-Identifier: Apache-2.0 + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package goheader + +func NewReader(text string) *Reader { + return &Reader{source: text} +} + +type Reader struct { + source string + position int + location Location + offset Location +} + +func (r *Reader) SetOffset(offset Location) { + r.offset = offset +} + +func (r *Reader) Position() int { + return r.position +} + +func (r *Reader) Location() Location { + return r.location.Add(r.offset) +} + +func (r *Reader) Peek() rune { + if r.Done() { + return rune(0) + } + return rune(r.source[r.position]) +} + +func (r *Reader) Done() bool { + return r.position >= len(r.source) +} + +func (r *Reader) Next() rune { + if r.Done() { + return rune(0) + } + reuslt := r.Peek() + if reuslt == '\n' { + r.location.Line++ + r.location.Position = 0 + } else { + r.location.Position++ + } + r.position++ + return reuslt +} + +func (r *Reader) Finish() string { + if r.position >= len(r.source) { + return "" + } + defer r.till() + return r.source[r.position:] +} + +func (r *Reader) SetPosition(pos int) { + if pos < 0 { + r.position = 0 + } + r.position = pos + r.location = r.calculateLocation() +} + +func (r *Reader) ReadWhile(match func(rune) bool) string { + if match == nil { + return "" + } + start := r.position + for !r.Done() && match(r.Peek()) { + r.Next() + } + return r.source[start:r.position] +} + +func (r *Reader) till() { + r.position = len(r.source) + r.location = r.calculateLocation() +} + +func (r *Reader) calculateLocation() Location { + min := len(r.source) + if min > r.position { + min = r.position + } + x, y := 0, 0 + for i := 0; i < min; i++ { + if r.source[i] == '\n' { + y++ + x = 0 + } else { + x++ + } + } + return Location{Line: y, Position: x} +} diff --git a/vendor/github.com/denis-tingajkin/go-header/value.go b/vendor/github.com/denis-tingajkin/go-header/value.go new file mode 100644 index 000000000..2a3adcdce --- /dev/null +++ b/vendor/github.com/denis-tingajkin/go-header/value.go @@ -0,0 +1,128 @@ +// Copyright (c) 2020 Denis Tingajkin +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package goheader + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +type Calculable interface { + Calculate(map[string]Value) error + Get() string +} + +type Value interface { + Calculable + Read(*Reader) Issue +} + +func calculateValue(calculable Calculable, values map[string]Value) (string, error) { + sb := strings.Builder{} + r := calculable.Get() + var endIndex int + var startIndex int + for startIndex = strings.Index(r, "{{"); startIndex >= 0; startIndex = strings.Index(r, "{{") { + _, _ = sb.WriteString(r[:startIndex]) + endIndex = strings.Index(r, "}}") + if endIndex < 0 { + return "", errors.New("missed value ending") + } + subVal := strings.ToLower(strings.TrimSpace(r[startIndex+2 : endIndex])) + if val := values[subVal]; val != nil { + if err := val.Calculate(values); err != nil { + return "", err + } + sb.WriteString(val.Get()) + } else { + return "", fmt.Errorf("unknown value name %v", subVal) + } + endIndex += 2 + r = r[endIndex:] + } + _, _ = sb.WriteString(r) + return sb.String(), nil +} + +type ConstValue struct { + RawValue string +} + +func (c *ConstValue) Calculate(values map[string]Value) error { + v, err := calculateValue(c, values) + if err != nil { + return err + } + c.RawValue = v + return nil +} + +func (c *ConstValue) Get() string { + return c.RawValue +} + +func (c *ConstValue) Read(s *Reader) Issue { + l := s.Location() + p := s.Position() + for _, ch := range c.Get() { + if ch != s.Peek() { + s.SetPosition(p) + f := s.ReadWhile(func(r rune) bool { + return r != '\n' + }) + return NewIssueWithLocation(fmt.Sprintf("Expected:%v, Actual: %v", c.Get(), f), l) + } + s.Next() + } + return nil +} + +type RegexpValue struct { + RawValue string +} + +func (r *RegexpValue) Calculate(values map[string]Value) error { + v, err := calculateValue(r, values) + if err != nil { + return err + } + r.RawValue = v + return nil +} + +func (r *RegexpValue) Get() string { + return r.RawValue +} + +func (r *RegexpValue) Read(s *Reader) Issue { + l := s.Location() + p := regexp.MustCompile(r.Get()) + pos := s.Position() + str := s.Finish() + s.SetPosition(pos) + indexes := p.FindAllIndex([]byte(str), -1) + if len(indexes) == 0 { + return NewIssueWithLocation(fmt.Sprintf("Pattern %v doesn't match.", p.String()), l) + } + s.SetPosition(pos + indexes[0][1]) + return nil +} + +var _ Value = &ConstValue{} +var _ Value = &RegexpValue{} diff --git a/vendor/github.com/esimonov/ifshort/LICENSE b/vendor/github.com/esimonov/ifshort/LICENSE new file mode 100644 index 000000000..a04e339c0 --- /dev/null +++ b/vendor/github.com/esimonov/ifshort/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Eugene Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/esimonov/ifshort/pkg/analyzer/analyzer.go b/vendor/github.com/esimonov/ifshort/pkg/analyzer/analyzer.go new file mode 100644 index 000000000..7e4df7da1 --- /dev/null +++ b/vendor/github.com/esimonov/ifshort/pkg/analyzer/analyzer.go @@ -0,0 +1,247 @@ +package analyzer + +import ( + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var maxDeclChars, maxDeclLines int + +const ( + maxDeclLinesUsage = `maximum length of variable declaration measured in number of lines, after which the linter won't suggest using short syntax. +Has precedence over max-decl-chars.` + maxDeclCharsUsage = `maximum length of variable declaration measured in number of characters, after which the linter won't suggest using short syntax.` +) + +func init() { + Analyzer.Flags.IntVar(&maxDeclLines, "max-decl-lines", 1, maxDeclLinesUsage) + Analyzer.Flags.IntVar(&maxDeclChars, "max-decl-chars", 30, maxDeclCharsUsage) +} + +// Analyzer is an analysis.Analyzer instance for ifshort linter. +var Analyzer = &analysis.Analyzer{ + Name: "ifshort", + Doc: "Checks that your code uses short syntax for if-statements whenever possible.", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + fdecl := node.(*ast.FuncDecl) + + /*if fdecl.Name.Name != "notUsed_BinaryExpressionInIndex_OK" { + return + }*/ + + if fdecl == nil || fdecl.Body == nil { + return + } + + candidates := getNamedOccurrenceMap(fdecl, pass) + + for _, stmt := range fdecl.Body.List { + candidates.checkStatement(stmt, token.NoPos) + } + + for varName := range candidates { + for marker, occ := range candidates[varName] { + // If two or more vars with the same scope marker - skip them. + if candidates.isFoundByScopeMarker(marker) { + continue + } + + pass.Reportf(occ.declarationPos, + "variable '%s' is only used in the if-statement (%s); consider using short syntax", + varName, pass.Fset.Position(occ.ifStmtPos)) + } + } + }) + return nil, nil +} + +func (nom namedOccurrenceMap) checkStatement(stmt ast.Stmt, ifPos token.Pos) { + switch v := stmt.(type) { + case *ast.AssignStmt: + for _, el := range v.Rhs { + nom.checkExpression(el, ifPos) + } + if isAssign(v.Tok) { + for _, el := range v.Lhs { + nom.checkExpression(el, ifPos) + } + } + case *ast.DeferStmt: + for _, a := range v.Call.Args { + nom.checkExpression(a, ifPos) + } + case *ast.ExprStmt: + if callExpr, ok := v.X.(*ast.CallExpr); ok { + nom.checkExpression(callExpr, ifPos) + } + case *ast.ForStmt: + for _, el := range v.Body.List { + nom.checkStatement(el, ifPos) + } + + if bexpr, ok := v.Cond.(*ast.BinaryExpr); ok { + nom.checkExpression(bexpr.X, ifPos) + nom.checkExpression(bexpr.Y, ifPos) + } + + nom.checkStatement(v.Post, ifPos) + case *ast.GoStmt: + for _, a := range v.Call.Args { + nom.checkExpression(a, ifPos) + } + case *ast.IfStmt: + for _, el := range v.Body.List { + nom.checkStatement(el, v.If) + } + + switch cond := v.Cond.(type) { + case *ast.BinaryExpr: + nom.checkExpression(cond.X, v.If) + nom.checkExpression(cond.Y, v.If) + case *ast.CallExpr: + nom.checkExpression(cond, v.If) + } + + if init, ok := v.Init.(*ast.AssignStmt); ok { + for _, e := range init.Rhs { + nom.checkExpression(e, v.If) + } + } + case *ast.IncDecStmt: + nom.checkExpression(v.X, ifPos) + case *ast.RangeStmt: + nom.checkExpression(v.X, ifPos) + if v.Body != nil { + for _, e := range v.Body.List { + nom.checkStatement(e, ifPos) + } + } + case *ast.ReturnStmt: + for _, r := range v.Results { + nom.checkExpression(r, ifPos) + } + case *ast.SendStmt: + nom.checkExpression(v.Chan, ifPos) + nom.checkExpression(v.Value, ifPos) + case *ast.SwitchStmt: + nom.checkExpression(v.Tag, ifPos) + + for _, el := range v.Body.List { + clauses, ok := el.(*ast.CaseClause) + if !ok { + continue + } + + for _, c := range clauses.List { + switch v := c.(type) { + case *ast.BinaryExpr: + nom.checkExpression(v.X, ifPos) + nom.checkExpression(v.Y, ifPos) + case *ast.Ident: + nom.checkExpression(v, ifPos) + } + } + + for _, c := range clauses.Body { + if est, ok := c.(*ast.ExprStmt); ok { + nom.checkExpression(est.X, ifPos) + } + + switch v := c.(type) { + case *ast.AssignStmt: + for _, el := range v.Rhs { + nom.checkExpression(el, ifPos) + } + case *ast.ExprStmt: + nom.checkExpression(v.X, ifPos) + } + } + } + } +} + +func (nom namedOccurrenceMap) checkExpression(candidate ast.Expr, ifPos token.Pos) { + switch v := candidate.(type) { + case *ast.BinaryExpr: + nom.checkExpression(v.X, ifPos) + nom.checkExpression(v.Y, ifPos) + case *ast.CallExpr: + for _, arg := range v.Args { + nom.checkExpression(arg, ifPos) + } + nom.checkExpression(v.Fun, ifPos) + if fun, ok := v.Fun.(*ast.SelectorExpr); ok { + nom.checkExpression(fun.X, ifPos) + } + case *ast.CompositeLit: + for _, el := range v.Elts { + switch v := el.(type) { + case *ast.Ident: + nom.checkExpression(v, ifPos) + case *ast.KeyValueExpr: + nom.checkExpression(v.Key, ifPos) + nom.checkExpression(v.Value, ifPos) + } + } + case *ast.FuncLit: + for _, el := range v.Body.List { + nom.checkStatement(el, ifPos) + } + case *ast.Ident: + if _, ok := nom[v.Name]; !ok || nom[v.Name].isEmponymousKey(ifPos) { + return + } + + scopeMarker1 := nom[v.Name].getScopeMarkerForPosition(v.Pos()) + + delete(nom[v.Name], scopeMarker1) + + for k := range nom { + for scopeMarker2 := range nom[k] { + if scopeMarker1 == scopeMarker2 { + delete(nom[k], scopeMarker2) + } + } + } + case *ast.IndexExpr: + nom.checkExpression(v.X, ifPos) + switch index := v.Index.(type) { + case *ast.BinaryExpr: + nom.checkExpression(index.X, ifPos) + case *ast.Ident: + nom.checkExpression(index, ifPos) + } + case *ast.SelectorExpr: + nom.checkExpression(v.X, ifPos) + case *ast.SliceExpr: + nom.checkExpression(v.High, ifPos) + nom.checkExpression(v.Low, ifPos) + nom.checkExpression(v.X, ifPos) + case *ast.TypeAssertExpr: + nom.checkExpression(v.X, ifPos) + case *ast.UnaryExpr: + nom.checkExpression(v.X, ifPos) + } +} + +func isAssign(tok token.Token) bool { + return (tok == token.ASSIGN || + tok == token.ADD_ASSIGN || tok == token.SUB_ASSIGN || + tok == token.MUL_ASSIGN || tok == token.QUO_ASSIGN || tok == token.REM_ASSIGN || + tok == token.AND_ASSIGN || tok == token.OR_ASSIGN || tok == token.XOR_ASSIGN || tok == token.AND_NOT_ASSIGN || + tok == token.SHL_ASSIGN || tok == token.SHR_ASSIGN) +} diff --git a/vendor/github.com/esimonov/ifshort/pkg/analyzer/occurrences.go b/vendor/github.com/esimonov/ifshort/pkg/analyzer/occurrences.go new file mode 100644 index 000000000..34224c93a --- /dev/null +++ b/vendor/github.com/esimonov/ifshort/pkg/analyzer/occurrences.go @@ -0,0 +1,259 @@ +package analyzer + +import ( + "go/ast" + "go/token" + "time" + + "golang.org/x/tools/go/analysis" +) + +// occurrence is a variable occurrence. +type occurrence struct { + declarationPos token.Pos + ifStmtPos token.Pos +} + +func (occ *occurrence) isComplete() bool { + return occ.ifStmtPos != token.NoPos && occ.declarationPos != token.NoPos +} + +// scopeMarkeredOccurences is a map of scope markers to variable occurrences. +type scopeMarkeredOccurences map[int64]occurrence + +func (smo scopeMarkeredOccurences) getGreatestMarker() int64 { + var maxScopeMarker int64 + + for marker := range smo { + if marker > maxScopeMarker { + maxScopeMarker = marker + } + } + return maxScopeMarker +} + +// find scope marker of the greatest token.Pos that is smaller than provided. +func (smo scopeMarkeredOccurences) getScopeMarkerForPosition(pos token.Pos) int64 { + var m int64 + var foundPos token.Pos + + for marker, occ := range smo { + if occ.declarationPos < pos && occ.declarationPos >= foundPos { + m = marker + foundPos = occ.declarationPos + } + } + return m +} + +func (smo scopeMarkeredOccurences) isEmponymousKey(pos token.Pos) bool { + if pos == token.NoPos { + return false + } + + for _, occ := range smo { + if occ.ifStmtPos == pos { + return true + } + } + return false +} + +// namedOccurrenceMap is a map of variable names to scopeMarkeredOccurences. +type namedOccurrenceMap map[string]scopeMarkeredOccurences + +func getNamedOccurrenceMap(fdecl *ast.FuncDecl, pass *analysis.Pass) namedOccurrenceMap { + nom := namedOccurrenceMap(map[string]scopeMarkeredOccurences{}) + + if fdecl == nil || fdecl.Body == nil { + return nom + } + + for _, stmt := range fdecl.Body.List { + switch v := stmt.(type) { + case *ast.AssignStmt: + nom.addFromAssignment(pass, v) + case *ast.IfStmt: + nom.addFromCondition(v) + nom.addFromIfClause(v) + nom.addFromElseClause(v) + } + } + + candidates := namedOccurrenceMap(map[string]scopeMarkeredOccurences{}) + + for varName, markeredOccs := range nom { + for marker, occ := range markeredOccs { + if !occ.isComplete() && !nom.isFoundByScopeMarker(marker) { + continue + } + if _, ok := candidates[varName]; !ok { + candidates[varName] = scopeMarkeredOccurences{ + marker: occ, + } + } else { + candidates[varName][marker] = occ + } + } + } + return candidates +} + +func (nom namedOccurrenceMap) isFoundByScopeMarker(scopeMarker int64) bool { + var i int + + for _, markeredOccs := range nom { + for marker := range markeredOccs { + if marker == scopeMarker { + i++ + } + } + } + return i >= 2 +} + +func (nom namedOccurrenceMap) addFromAssignment(pass *analysis.Pass, assignment *ast.AssignStmt) { + if assignment.Tok != token.DEFINE { + return + } + + scopeMarker := time.Now().UnixNano() + + for i, el := range assignment.Lhs { + ident, ok := el.(*ast.Ident) + if !ok { + continue + } + + if ident.Name == "_" || ident.Obj == nil || isUnshortenableAssignment(ident.Obj.Decl) { + continue + } + + if markeredOccs, ok := nom[ident.Name]; ok { + markeredOccs[scopeMarker] = occurrence{ + declarationPos: ident.Pos(), + } + nom[ident.Name] = markeredOccs + } else { + newOcc := occurrence{} + if areFlagSettingsSatisfied(pass, assignment, i) { + newOcc.declarationPos = ident.Pos() + } + nom[ident.Name] = scopeMarkeredOccurences{scopeMarker: newOcc} + } + } +} + +func isUnshortenableAssignment(decl interface{}) bool { + assign, ok := decl.(*ast.AssignStmt) + if !ok { + return false + } + + for _, el := range assign.Rhs { + u, ok := el.(*ast.UnaryExpr) + if !ok { + continue + } + + if u.Op == token.AND { + if _, ok := u.X.(*ast.CompositeLit); ok { + return true + } + } + } + return false +} + +func areFlagSettingsSatisfied(pass *analysis.Pass, assignment *ast.AssignStmt, i int) bool { + lh := assignment.Lhs[i] + rh := assignment.Rhs[len(assignment.Rhs)-1] + + if len(assignment.Rhs) == len(assignment.Lhs) { + rh = assignment.Rhs[i] + } + + if pass.Fset.Position(rh.End()).Line-pass.Fset.Position(rh.Pos()).Line > maxDeclLines { + return false + } + if int(rh.End()-lh.Pos()) > maxDeclChars { + return false + } + return true +} + +func (nom namedOccurrenceMap) addFromCondition(stmt *ast.IfStmt) { + switch v := stmt.Cond.(type) { + case *ast.BinaryExpr: + for _, v := range [2]ast.Expr{v.X, v.Y} { + switch e := v.(type) { + case *ast.Ident: + nom.addFromIdent(stmt.If, e) + case *ast.SelectorExpr: + nom.addFromIdent(stmt.If, e.X) + } + } + case *ast.Ident: + nom.addFromIdent(stmt.If, v) + case *ast.CallExpr: + for _, a := range v.Args { + switch e := a.(type) { + case *ast.Ident: + nom.addFromIdent(stmt.If, e) + case *ast.CallExpr: + nom.addFromCallExpr(stmt.If, e) + } + } + } +} + +func (nom namedOccurrenceMap) addFromIfClause(stmt *ast.IfStmt) { + nom.addFromBlockStmt(stmt.Body, stmt.If) +} + +func (nom namedOccurrenceMap) addFromElseClause(stmt *ast.IfStmt) { + nom.addFromBlockStmt(stmt.Else, stmt.If) +} + +func (nom namedOccurrenceMap) addFromBlockStmt(stmt ast.Stmt, ifPos token.Pos) { + blockStmt, ok := stmt.(*ast.BlockStmt) + if !ok { + return + } + + for _, el := range blockStmt.List { + exptStmt, ok := el.(*ast.ExprStmt) + if !ok { + continue + } + + if callExpr, ok := exptStmt.X.(*ast.CallExpr); ok { + nom.addFromCallExpr(ifPos, callExpr) + } + } +} + +func (nom namedOccurrenceMap) addFromCallExpr(ifPos token.Pos, callExpr *ast.CallExpr) { + for _, arg := range callExpr.Args { + nom.addFromIdent(ifPos, arg) + } +} + +func (nom namedOccurrenceMap) addFromIdent(ifPos token.Pos, v ast.Expr) { + ident, ok := v.(*ast.Ident) + if !ok { + return + } + + if markeredOccs, ok := nom[ident.Name]; ok { + marker := nom[ident.Name].getGreatestMarker() + + occ := markeredOccs[marker] + if occ.isComplete() { + return + } + + occ.ifStmtPos = ifPos + nom[ident.Name][marker] = occ + } +} diff --git a/vendor/github.com/ettle/strcase/.gitignore b/vendor/github.com/ettle/strcase/.gitignore new file mode 100644 index 000000000..54bc1fbff --- /dev/null +++ b/vendor/github.com/ettle/strcase/.gitignore @@ -0,0 +1,18 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# CPU and memory profiles +*.prof + +# Dependency directories +vendor/ diff --git a/vendor/github.com/ettle/strcase/.golangci.yml b/vendor/github.com/ettle/strcase/.golangci.yml new file mode 100644 index 000000000..4d31fcc5b --- /dev/null +++ b/vendor/github.com/ettle/strcase/.golangci.yml @@ -0,0 +1,88 @@ +linters-settings: + dupl: + threshold: 100 + gocyclo: + min-complexity: 15 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - ifElseChain + - whyNoLint + - wrapperFunc + golint: + min-confidence: 0.5 + govet: + check-shadowing: true + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + nolintlint: + allow-leading-space: false + allow-unused: false + require-specific: true + + require-explanation: true + allow-no-explanation: + - gocyclo + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - dupl + - errcheck + - gochecknoinits + - gocritic + - gocyclo + - gofmt + - goimports + - golint + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - interfacer + - lll + - misspell + - nakedret + - nolintlint + - rowserrcheck + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + + # don't enable: + # - asciicheck + # - gochecknoglobals + # - gocognit + # - godot + # - godox + # - goerr113 + # - maligned + # - nestif + # - prealloc + # - testpackage + # - wsl + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 0 diff --git a/vendor/github.com/ettle/strcase/.readme.tmpl b/vendor/github.com/ettle/strcase/.readme.tmpl new file mode 100644 index 000000000..135765c40 --- /dev/null +++ b/vendor/github.com/ettle/strcase/.readme.tmpl @@ -0,0 +1,80 @@ +{{with .PDoc}} +# Go Strcase + +[![Go Report Card](https://goreportcard.com/badge/github.com/ettle/strcase)](https://goreportcard.com/report/github.com/ettle/strcase) +[![Coverage](http://gocover.io/_badge/github.com/ettle/strcase?0)](http://gocover.io/github.com/ettle/strcase) +[![GoDoc](https://godoc.org/github.com/ettle/strcase?status.svg)](https://pkg.go.dev/github.com/ettle/strcase) + +Convert strings to `snake_case`, `camelCase`, `PascalCase`, `kebab-case` and more! Supports Go initialisms, customization, and Unicode. + +`import "{{.ImportPath}}"` + +## Overview +{{comment_md .Doc}} +{{example_html $ ""}} + +## Index{{if .Consts}} +* [Constants](#pkg-constants){{end}}{{if .Vars}} +* [Variables](#pkg-variables){{end}}{{- range .Funcs -}}{{$name_html := html .Name}} +* [{{node_html $ .Decl false | sanitize}}](#{{$name_html}}){{- end}}{{- range .Types}}{{$tname_html := html .Name}} +* [type {{$tname_html}}](#{{$tname_html}}){{- range .Funcs}}{{$name_html := html .Name}} + * [{{node_html $ .Decl false | sanitize}}](#{{$name_html}}){{- end}}{{- range .Methods}}{{$name_html := html .Name}} + * [{{node_html $ .Decl false | sanitize}}](#{{$tname_html}}.{{$name_html}}){{- end}}{{- end}}{{- if $.Notes}}{{- range $marker, $item := $.Notes}} +* [{{noteTitle $marker | html}}s](#pkg-note-{{$marker}}){{end}}{{end}} +{{if $.Examples}} +#### Examples{{- range $.Examples}} +* [{{example_name .Name}}](#example_{{.Name}}){{- end}}{{- end}} + +{{with .Consts}}## Constants +{{range .}}{{node $ .Decl | pre}} +{{comment_md .Doc}}{{end}}{{end}} +{{with .Vars}}## Variables +{{range .}}{{node $ .Decl | pre}} +{{comment_md .Doc}}{{end}}{{end}} + +{{range .Funcs}}{{$name_html := html .Name}}## func [{{$name_html}}]({{gh_url $ .Decl}}) +{{node $ .Decl | pre}} +{{comment_md .Doc}} +{{example_html $ .Name}} +{{callgraph_html $ "" .Name}}{{end}} +{{range .Types}}{{$tname := .Name}}{{$tname_html := html .Name}}## type [{{$tname_html}}]({{gh_url $ .Decl}}) +{{node $ .Decl | pre}} +{{comment_md .Doc}}{{range .Consts}} +{{node $ .Decl | pre }} +{{comment_md .Doc}}{{end}}{{range .Vars}} +{{node $ .Decl | pre }} +{{comment_md .Doc}}{{end}} + +{{example_html $ $tname}} +{{implements_html $ $tname}} +{{methodset_html $ $tname}} + +{{range .Funcs}}{{$name_html := html .Name}}### func [{{$name_html}}]({{gh_url $ .Decl}}) +{{node $ .Decl | pre}} +{{comment_md .Doc}} +{{example_html $ .Name}}{{end}} +{{callgraph_html $ "" .Name}} + +{{range .Methods}}{{$name_html := html .Name}}### func ({{md .Recv}}) [{{$name_html}}]({{gh_url $ .Decl}}) +{{node $ .Decl | pre}} +{{comment_md .Doc}} +{{$name := printf "%s_%s" $tname .Name}}{{example_html $ $name}} +{{callgraph_html $ .Recv .Name}} +{{end}}{{end}}{{end}} + +{{with $.Notes}} +{{range $marker, $content := .}} +## {{noteTitle $marker | html}}s + +{{end}} +{{end}} +{{if .Dirs}} +## Subdirectories +{{range $.Dirs.List}} +{{indent .Depth}}* [{{.Name | html}}]({{print "./" .Path}}){{if .Synopsis}} {{ .Synopsis}}{{end -}} +{{end}} +{{end}} diff --git a/vendor/github.com/ettle/strcase/LICENSE b/vendor/github.com/ettle/strcase/LICENSE new file mode 100644 index 000000000..4f0116be2 --- /dev/null +++ b/vendor/github.com/ettle/strcase/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Liyan David Chang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ettle/strcase/Makefile b/vendor/github.com/ettle/strcase/Makefile new file mode 100644 index 000000000..462f8b473 --- /dev/null +++ b/vendor/github.com/ettle/strcase/Makefile @@ -0,0 +1,16 @@ +.PHONY: benchmark docs lint test + +docs: + which godoc2ghmd || ( go get github.com/DevotedHealth/godoc2ghmd && go mod tidy ) + godoc2ghmd -template .readme.tmpl github.com/ettle/strcase > README.md + +test: + go test -cover ./... + +lint: + which golangci-lint || ( go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.27.0 && go mod tidy ) + golangci-lint run + golangci-lint run benchmark/*.go + +benchmark: + cd benchmark && go test -bench=. -test.benchmem && go mod tidy diff --git a/vendor/github.com/ettle/strcase/README.md b/vendor/github.com/ettle/strcase/README.md new file mode 100644 index 000000000..ee165e3e5 --- /dev/null +++ b/vendor/github.com/ettle/strcase/README.md @@ -0,0 +1,542 @@ + +# Go Strcase + +[![Go Report Card](https://goreportcard.com/badge/github.com/ettle/strcase)](https://goreportcard.com/report/github.com/ettle/strcase) +[![Coverage](http://gocover.io/_badge/github.com/ettle/strcase?0)](http://gocover.io/github.com/ettle/strcase) +[![GoDoc](https://godoc.org/github.com/ettle/strcase?status.svg)](https://pkg.go.dev/github.com/ettle/strcase) + +Convert strings to `snake_case`, `camelCase`, `PascalCase`, `kebab-case` and more! Supports Go initialisms, customization, and Unicode. + +`import "github.com/ettle/strcase"` + +## Overview +Package strcase is a package for converting strings into various word cases +(e.g. snake_case, camelCase) + + + go get -u github.com/ettle/strcase + +Example usage + + + strcase.ToSnake("Hello World") // hello_world + strcase.ToSNAKE("Hello World") // HELLO_WORLD + + strcase.ToKebab("helloWorld") // hello-world + strcase.ToKEBAB("helloWorld") // HELLO-WORLD + + strcase.ToPascal("hello-world") // HelloWorld + strcase.ToCamel("hello-world") // helloWorld + + // Handle odd cases + strcase.ToSnake("FOOBar") // foo_bar + + // Support Go initialisms + strcase.ToGoCamel("http_response") // HTTPResponse + + // Specify case and delimiter + strcase.ToCase("HelloWorld", strcase.UpperCase, '.') // HELLO.WORLD + +### Why this package +String strcase is pretty straight forward and there are a number of methods to +do it. This package is fully featured, more customizable, better tested, and +faster* than other packages and what you would probably whip up yourself. + +### Unicode support +We work for with unicode strings and pay very little performance penalty for it +as we optimized for the common use case of ASCII only strings. + +### Customization +You can create a custom caser that changes the behavior to what you want. This +customization also reduces the pressure for us to change the default behavior +which means that things are more stable for everyone involved. The goal is to +make the common path easy and fast, while making the uncommon path possible. + + + c := NewCaser( + // Use Go's default initialisms e.g. ID, HTML + true, + // Override initialisms (e.g. don't initialize HTML but initialize SSL + map[string]bool{"SSL": true, "HTML": false}, + // Write your own custom SplitFn + // + NewSplitFn( + []rune{'*', '.', ','}, + SplitCase, + SplitAcronym, + PreserveNumberFormatting, + SplitBeforeNumber, + SplitAfterNumber, + )) + assert.Equal(t, "http_200", c.ToSnake("http200")) + +### Initialism support +By default, we use the golint intialisms list. You can customize and override +the initialisms if you wish to add additional ones, such as "SSL" or "CMS" or +domain specific ones to your industry. + + + ToGoCamel("http_response") // HTTPResponse + ToGoSnake("http_response") // HTTP_response + +### Test coverage +We have a wide ranging test suite to make sure that we understand our behavior. +Test coverage isn't everything, but we aim for 100% coverage. + +### Fast +Optimized to reduce memory allocations with Builder. Benchmarked and optimized +around common cases. + +We're on par with the fastest packages (that have less features) and much +faster than others. We also benchmarked against code snippets. Using string +builders to reduce memory allocation and reordering boolean checks for the +common cases have a large performance impact. + +Hopefully I was fair to each library and happy to rerun benchmarks differently +or reword my commentary based on suggestions or updates. + + + // This package + // Go intialisms and custom casers are slower + BenchmarkToTitle-4 992491 1559 ns/op 32 B/op 1 allocs/op + BenchmarkToSnake-4 1000000 1475 ns/op 32 B/op 1 allocs/op + BenchmarkToSNAKE-4 1000000 1609 ns/op 32 B/op 1 allocs/op + BenchmarkToGoSnake-4 275010 3697 ns/op 44 B/op 4 allocs/op + BenchmarkToCustomCaser-4 342704 4191 ns/op 56 B/op 4 allocs/op + + // Segment has very fast snake case and camel case libraries + // No features or customization, but very very fast + BenchmarkSegment-4 1303809 938 ns/op 16 B/op 1 allocs/op + + // Stdlib strings.Title for comparison, even though it only splits on spaces + BenchmarkToTitleStrings-4 1213467 1164 ns/op 16 B/op 1 allocs/op + + // Other libraries or code snippets + // - Most are slower, by up to an order of magnitude + // - None support initialisms or customization + // - Some generate only camelCase or snake_case + // - Many lack unicode support + BenchmarkToSnakeStoewer-4 973200 2075 ns/op 64 B/op 2 allocs/op + // Copying small rune arrays is slow + BenchmarkToSnakeSiongui-4 264315 4229 ns/op 48 B/op 10 allocs/op + BenchmarkGoValidator-4 206811 5152 ns/op 184 B/op 9 allocs/op + // String alloction is slow + BenchmarkToSnakeFatih-4 82675 12280 ns/op 392 B/op 26 allocs/op + BenchmarkToSnakeIanColeman-4 83276 13903 ns/op 145 B/op 13 allocs/op + // Regexp is slow + BenchmarkToSnakeGolangPrograms-4 74448 18586 ns/op 176 B/op 11 allocs/op + + // These results aren't a surprise - my initial version of this library was + // painfully slow. I think most of us, without spending some time with + // profilers and benchmarks, would write also something on the slower side. + +### Why not this package +If every nanosecond matters and this is used in a tight loop, use segment.io's +libraries (https://github.com/segmentio/go-snakecase and +https://github.com/segmentio/go-camelcase). They lack features, but make up for +it by being blazing fast. Alternatively, if you need your code to work slightly +differently, fork them and tailor it for your use case. + +If you don't like having external imports, I get it. This package only imports +packages for testing, otherwise it only uses the standard library. If that's +not enough, you can use this repo as the foundation for your own. MIT Licensed. + +This package is still relatively new and while I've used it for a while +personally, it doesn't have the miles that other packages do. I've tested this +code agains't their test cases to make sure that there aren't any surprises. + +### Migrating from other packages +If you are migrating from from another package, you may find slight differences +in output. To reduce the delta, you may find it helpful to use the following +custom casers to mimic the behavior of the other package. + + + // From https://github.com/iancoleman/strcase + var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-', '.'}, SplitCase, SplitAcronym, SplitBeforeNumber)) + + // From https://github.com/stoewer/go-strcase + var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-'}, SplitCase), SplitAcronym) + + + + +## Index +* [func ToCamel(s string) string](#ToCamel) +* [func ToCase(s string, wordCase WordCase, delimiter rune) string](#ToCase) +* [func ToGoCamel(s string) string](#ToGoCamel) +* [func ToGoCase(s string, wordCase WordCase, delimiter rune) string](#ToGoCase) +* [func ToGoKebab(s string) string](#ToGoKebab) +* [func ToGoPascal(s string) string](#ToGoPascal) +* [func ToGoSnake(s string) string](#ToGoSnake) +* [func ToKEBAB(s string) string](#ToKEBAB) +* [func ToKebab(s string) string](#ToKebab) +* [func ToPascal(s string) string](#ToPascal) +* [func ToSNAKE(s string) string](#ToSNAKE) +* [func ToSnake(s string) string](#ToSnake) +* [type Caser](#Caser) + * [func NewCaser(goInitialisms bool, initialismOverrides map[string]bool, splitFn SplitFn) *Caser](#NewCaser) + * [func (c *Caser) ToCamel(s string) string](#Caser.ToCamel) + * [func (c *Caser) ToCase(s string, wordCase WordCase, delimiter rune) string](#Caser.ToCase) + * [func (c *Caser) ToKEBAB(s string) string](#Caser.ToKEBAB) + * [func (c *Caser) ToKebab(s string) string](#Caser.ToKebab) + * [func (c *Caser) ToPascal(s string) string](#Caser.ToPascal) + * [func (c *Caser) ToSNAKE(s string) string](#Caser.ToSNAKE) + * [func (c *Caser) ToSnake(s string) string](#Caser.ToSnake) +* [type SplitAction](#SplitAction) +* [type SplitFn](#SplitFn) + * [func NewSplitFn(delimiters []rune, splitOptions ...SplitOption) SplitFn](#NewSplitFn) +* [type SplitOption](#SplitOption) +* [type WordCase](#WordCase) + + + + + +## func [ToCamel](./strcase.go#L57) +``` go +func ToCamel(s string) string +``` +ToCamel returns words in camelCase (capitalized words concatenated together, with first word lower case). +Also known as lowerCamelCase or mixedCase. + + + +## func [ToCase](./strcase.go#L70) +``` go +func ToCase(s string, wordCase WordCase, delimiter rune) string +``` +ToCase returns words in given case and delimiter. + + + +## func [ToGoCamel](./strcase.go#L65) +``` go +func ToGoCamel(s string) string +``` +ToGoCamel returns words in camelCase (capitalized words concatenated together, with first word lower case). +Also known as lowerCamelCase or mixedCase. + +Respects Go's common initialisms (e.g. httpResponse -> HTTPResponse). + + + +## func [ToGoCase](./strcase.go#L77) +``` go +func ToGoCase(s string, wordCase WordCase, delimiter rune) string +``` +ToGoCase returns words in given case and delimiter. + +Respects Go's common initialisms (e.g. httpResponse -> HTTPResponse). + + + +## func [ToGoKebab](./strcase.go#L31) +``` go +func ToGoKebab(s string) string +``` +ToGoKebab returns words in kebab-case (lower case words with dashes). +Also known as dash-case. + +Respects Go's common initialisms (e.g. http-response -> HTTP-response). + + + +## func [ToGoPascal](./strcase.go#L51) +``` go +func ToGoPascal(s string) string +``` +ToGoPascal returns words in PascalCase (capitalized words concatenated together). +Also known as UpperPascalCase. + +Respects Go's common initialisms (e.g. HttpResponse -> HTTPResponse). + + + +## func [ToGoSnake](./strcase.go#L11) +``` go +func ToGoSnake(s string) string +``` +ToGoSnake returns words in snake_case (lower case words with underscores). + +Respects Go's common initialisms (e.g. http_response -> HTTP_response). + + + +## func [ToKEBAB](./strcase.go#L37) +``` go +func ToKEBAB(s string) string +``` +ToKEBAB returns words in KEBAB-CASE (upper case words with dashes). +Also known as SCREAMING-KEBAB-CASE or SCREAMING-DASH-CASE. + + + +## func [ToKebab](./strcase.go#L23) +``` go +func ToKebab(s string) string +``` +ToKebab returns words in kebab-case (lower case words with dashes). +Also known as dash-case. + + + +## func [ToPascal](./strcase.go#L43) +``` go +func ToPascal(s string) string +``` +ToPascal returns words in PascalCase (capitalized words concatenated together). +Also known as UpperPascalCase. + + + +## func [ToSNAKE](./strcase.go#L17) +``` go +func ToSNAKE(s string) string +``` +ToSNAKE returns words in SNAKE_CASE (upper case words with underscores). +Also known as SCREAMING_SNAKE_CASE or UPPER_CASE. + + + +## func [ToSnake](./strcase.go#L4) +``` go +func ToSnake(s string) string +``` +ToSnake returns words in snake_case (lower case words with underscores). + + + + +## type [Caser](./caser.go#L4-L7) +``` go +type Caser struct { + // contains filtered or unexported fields +} + +``` +Caser allows for customization of parsing and intialisms + + + + + + + +### func [NewCaser](./caser.go#L24) +``` go +func NewCaser(goInitialisms bool, initialismOverrides map[string]bool, splitFn SplitFn) *Caser +``` +NewCaser returns a configured Caser. + +A Caser should be created when you want fine grained control over how the words are split. + + + Notes on function arguments + + goInitialisms: Whether to use Golint's intialisms + + initialismOverrides: A mapping of extra initialisms + Keys must be in ALL CAPS. Merged with Golint's if goInitialisms is set. + Setting a key to false will override Golint's. + + splitFn: How to separate words + Override the default split function. Consider using NewSplitFn to + configure one instead of writing your own. + + + + + +### func (\*Caser) [ToCamel](./caser.go#L80) +``` go +func (c *Caser) ToCamel(s string) string +``` +ToCamel returns words in camelCase (capitalized words concatenated together, with first word lower case). +Also known as lowerCamelCase or mixedCase. + + + + +### func (\*Caser) [ToCase](./caser.go#L85) +``` go +func (c *Caser) ToCase(s string, wordCase WordCase, delimiter rune) string +``` +ToCase returns words with a given case and delimiter. + + + + +### func (\*Caser) [ToKEBAB](./caser.go#L68) +``` go +func (c *Caser) ToKEBAB(s string) string +``` +ToKEBAB returns words in KEBAB-CASE (upper case words with dashes). +Also known as SCREAMING-KEBAB-CASE or SCREAMING-DASH-CASE. + + + + +### func (\*Caser) [ToKebab](./caser.go#L62) +``` go +func (c *Caser) ToKebab(s string) string +``` +ToKebab returns words in kebab-case (lower case words with dashes). +Also known as dash-case. + + + + +### func (\*Caser) [ToPascal](./caser.go#L74) +``` go +func (c *Caser) ToPascal(s string) string +``` +ToPascal returns words in PascalCase (capitalized words concatenated together). +Also known as UpperPascalCase. + + + + +### func (\*Caser) [ToSNAKE](./caser.go#L56) +``` go +func (c *Caser) ToSNAKE(s string) string +``` +ToSNAKE returns words in SNAKE_CASE (upper case words with underscores). +Also known as SCREAMING_SNAKE_CASE or UPPER_CASE. + + + + +### func (\*Caser) [ToSnake](./caser.go#L50) +``` go +func (c *Caser) ToSnake(s string) string +``` +ToSnake returns words in snake_case (lower case words with underscores). + + + + +## type [SplitAction](./split.go#L110) +``` go +type SplitAction int +``` +SplitAction defines if and how to split a string + + +``` go +const ( + // Noop - Continue to next character + Noop SplitAction = iota + // Split - Split between words + // e.g. to split between wordsWithoutDelimiters + Split + // SkipSplit - Split the word and drop the character + // e.g. to split words with delimiters + SkipSplit + // Skip - Remove the character completely + Skip +) +``` + + + + + + + + + +## type [SplitFn](./split.go#L6) +``` go +type SplitFn func(prev, curr, next rune) SplitAction +``` +SplitFn defines how to split a string into words + + + + + + + +### func [NewSplitFn](./split.go#L14-L17) +``` go +func NewSplitFn( + delimiters []rune, + splitOptions ...SplitOption, +) SplitFn +``` +NewSplitFn returns a SplitFn based on the options provided. + +NewSplitFn covers the majority of common options that other strcase +libraries provide and should allow you to simply create a custom caser. +For more complicated use cases, feel free to write your own SplitFn +nolint:gocyclo + + + + + +## type [SplitOption](./split.go#L93) +``` go +type SplitOption int +``` +SplitOption are options that allow for configuring NewSplitFn + + +``` go +const ( + // SplitCase - FooBar -> Foo_Bar + SplitCase SplitOption = iota + // SplitAcronym - FOOBar -> Foo_Bar + // It won't preserve FOO's case. If you want, you can set the Caser's initialisms so FOO will be in all caps + SplitAcronym + // SplitBeforeNumber - port80 -> port_80 + SplitBeforeNumber + // SplitAfterNumber - 200status -> 200_status + SplitAfterNumber + // PreserveNumberFormatting - a.b.2,000.3.c -> a_b_2,000.3_c + PreserveNumberFormatting +) +``` + + + + + + + + + +## type [WordCase](./convert.go#L6) +``` go +type WordCase int +``` +WordCase is an enumeration of the ways to format a word. + + +``` go +const ( + // Original - Preserve the original input strcase + Original WordCase = iota + // LowerCase - All letters lower cased (example) + LowerCase + // UpperCase - All letters upper cased (EXAMPLE) + UpperCase + // TitleCase - Only first letter upper cased (Example) + TitleCase + // CamelCase - TitleCase except lower case first word (exampleText) + CamelCase +) +``` + + + + + + + + + + + + + diff --git a/vendor/github.com/ettle/strcase/caser.go b/vendor/github.com/ettle/strcase/caser.go new file mode 100644 index 000000000..891a67189 --- /dev/null +++ b/vendor/github.com/ettle/strcase/caser.go @@ -0,0 +1,87 @@ +package strcase + +// Caser allows for customization of parsing and intialisms +type Caser struct { + initialisms map[string]bool + splitFn SplitFn +} + +// NewCaser returns a configured Caser. +// +// A Caser should be created when you want fine grained control over how the words are split. +// +// Notes on function arguments +// +// goInitialisms: Whether to use Golint's intialisms +// +// initialismOverrides: A mapping of extra initialisms +// Keys must be in ALL CAPS. Merged with Golint's if goInitialisms is set. +// Setting a key to false will override Golint's. +// +// splitFn: How to separate words +// Override the default split function. Consider using NewSplitFn to +// configure one instead of writing your own. +func NewCaser(goInitialisms bool, initialismOverrides map[string]bool, splitFn SplitFn) *Caser { + c := &Caser{ + initialisms: golintInitialisms, + splitFn: splitFn, + } + + if c.splitFn == nil { + c.splitFn = defaultSplitFn + } + + if goInitialisms && initialismOverrides != nil { + c.initialisms = map[string]bool{} + for k, v := range golintInitialisms { + c.initialisms[k] = v + } + for k, v := range initialismOverrides { + c.initialisms[k] = v + } + } else if !goInitialisms { + c.initialisms = initialismOverrides + } + + return c +} + +// ToSnake returns words in snake_case (lower case words with underscores). +func (c *Caser) ToSnake(s string) string { + return convert(s, c.splitFn, '_', LowerCase, c.initialisms) +} + +// ToSNAKE returns words in SNAKE_CASE (upper case words with underscores). +// Also known as SCREAMING_SNAKE_CASE or UPPER_CASE. +func (c *Caser) ToSNAKE(s string) string { + return convert(s, c.splitFn, '_', UpperCase, c.initialisms) +} + +// ToKebab returns words in kebab-case (lower case words with dashes). +// Also known as dash-case. +func (c *Caser) ToKebab(s string) string { + return convert(s, c.splitFn, '-', LowerCase, c.initialisms) +} + +// ToKEBAB returns words in KEBAB-CASE (upper case words with dashes). +// Also known as SCREAMING-KEBAB-CASE or SCREAMING-DASH-CASE. +func (c *Caser) ToKEBAB(s string) string { + return convert(s, c.splitFn, '-', UpperCase, c.initialisms) +} + +// ToPascal returns words in PascalCase (capitalized words concatenated together). +// Also known as UpperPascalCase. +func (c *Caser) ToPascal(s string) string { + return convert(s, c.splitFn, '\x00', TitleCase, c.initialisms) +} + +// ToCamel returns words in camelCase (capitalized words concatenated together, with first word lower case). +// Also known as lowerCamelCase or mixedCase. +func (c *Caser) ToCamel(s string) string { + return convert(s, c.splitFn, '\x00', CamelCase, c.initialisms) +} + +// ToCase returns words with a given case and delimiter. +func (c *Caser) ToCase(s string, wordCase WordCase, delimiter rune) string { + return convert(s, c.splitFn, delimiter, wordCase, c.initialisms) +} diff --git a/vendor/github.com/ettle/strcase/convert.go b/vendor/github.com/ettle/strcase/convert.go new file mode 100644 index 000000000..70fedb144 --- /dev/null +++ b/vendor/github.com/ettle/strcase/convert.go @@ -0,0 +1,297 @@ +package strcase + +import "strings" + +// WordCase is an enumeration of the ways to format a word. +type WordCase int + +const ( + // Original - Preserve the original input strcase + Original WordCase = iota + // LowerCase - All letters lower cased (example) + LowerCase + // UpperCase - All letters upper cased (EXAMPLE) + UpperCase + // TitleCase - Only first letter upper cased (Example) + TitleCase + // CamelCase - TitleCase except lower case first word (exampleText) + // Notably, even if the first word is an initialism, it will be lower + // cased. This is important for code generators where capital letters + // mean exported functions. i.e. jsonString(), not JSONString() + CamelCase +) + +// We have 3 convert functions for performance reasons +// The general convert could handle everything, but is not optimized +// +// The other two functions are optimized for the general use cases - that is the non-custom caser functions +// Case 1: Any Case and supports Go Initialisms +// Case 2: UpperCase words, which don't need to support initialisms since everything is in upper case + +// convertWithoutInitialims only works for to UpperCase and LowerCase +//nolint:gocyclo +func convertWithoutInitialisms(input string, delimiter rune, wordCase WordCase) string { + input = strings.TrimSpace(input) + runes := []rune(input) + if len(runes) == 0 { + return "" + } + + var b strings.Builder + b.Grow(len(input) * 2) // In case we need to write delimiters where they weren't before + + var prev, curr rune + next := runes[0] // 0 length will have already returned so safe to index + inWord := false + firstWord := true + for i := 0; i < len(runes); i++ { + prev = curr + curr = next + if i+1 == len(runes) { + next = 0 + } else { + next = runes[i+1] + } + + switch defaultSplitFn(prev, curr, next) { + case SkipSplit: + if inWord && delimiter != 0 { + b.WriteRune(delimiter) + } + inWord = false + continue + case Split: + if inWord && delimiter != 0 { + b.WriteRune(delimiter) + } + inWord = false + } + switch wordCase { + case UpperCase: + b.WriteRune(toUpper(curr)) + case LowerCase: + b.WriteRune(toLower(curr)) + case TitleCase: + if inWord { + b.WriteRune(toLower(curr)) + } else { + b.WriteRune(toUpper(curr)) + } + case CamelCase: + if inWord { + b.WriteRune(toLower(curr)) + } else if firstWord { + b.WriteRune(toLower(curr)) + firstWord = false + } else { + b.WriteRune(toUpper(curr)) + } + default: + // Must be original case + b.WriteRune(curr) + } + inWord = inWord || true + } + return b.String() +} + +// convertWithGoInitialisms changes a input string to a certain case with a +// delimiter, respecting go initialisms but not skip runes +//nolint:gocyclo +func convertWithGoInitialisms(input string, delimiter rune, wordCase WordCase) string { + input = strings.TrimSpace(input) + runes := []rune(input) + if len(runes) == 0 { + return "" + } + + var b strings.Builder + b.Grow(len(input) * 2) // In case we need to write delimiters where they weren't before + + firstWord := true + + addWord := func(start, end int) { + if start == end { + return + } + + if !firstWord && delimiter != 0 { + b.WriteRune(delimiter) + } + + // Don't bother with initialisms if the word is longer than 5 + // A quick proxy to avoid the extra memory allocations + if end-start <= 5 { + key := strings.ToUpper(string(runes[start:end])) + if golintInitialisms[key] { + if !firstWord || wordCase != CamelCase { + b.WriteString(key) + firstWord = false + return + } + } + } + + for i := start; i < end; i++ { + r := runes[i] + switch wordCase { + case UpperCase: + panic("use convertWithoutInitialisms instead") + case LowerCase: + b.WriteRune(toLower(r)) + case TitleCase: + if i == start { + b.WriteRune(toUpper(r)) + } else { + b.WriteRune(toLower(r)) + } + case CamelCase: + if !firstWord && i == start { + b.WriteRune(toUpper(r)) + } else { + b.WriteRune(toLower(r)) + } + default: + b.WriteRune(r) + } + } + firstWord = false + } + + var prev, curr rune + next := runes[0] // 0 length will have already returned so safe to index + wordStart := 0 + for i := 0; i < len(runes); i++ { + prev = curr + curr = next + if i+1 == len(runes) { + next = 0 + } else { + next = runes[i+1] + } + + switch defaultSplitFn(prev, curr, next) { + case Split: + addWord(wordStart, i) + wordStart = i + case SkipSplit: + addWord(wordStart, i) + wordStart = i + 1 + } + } + + if wordStart != len(runes) { + addWord(wordStart, len(runes)) + } + return b.String() +} + +// convert changes a input string to a certain case with a delimiter, +// respecting arbitrary initialisms and skip characters +//nolint:gocyclo +func convert(input string, fn SplitFn, delimiter rune, wordCase WordCase, + initialisms map[string]bool) string { + input = strings.TrimSpace(input) + runes := []rune(input) + if len(runes) == 0 { + return "" + } + + var b strings.Builder + b.Grow(len(input) * 2) // In case we need to write delimiters where they weren't before + + firstWord := true + var skipIndexes []int + + addWord := func(start, end int) { + // If you have nothing good to say, say nothing at all + if start == end || len(skipIndexes) == end-start { + skipIndexes = nil + return + } + + // If you have something to say, start with a delimiter + if !firstWord && delimiter != 0 { + b.WriteRune(delimiter) + } + + // Check if you're an initialism + // Note - we don't check skip characters here since initialisms + // will probably never have junk characters in between + // I'm open to it if there is a use case + if initialisms != nil { + var word strings.Builder + for i := start; i < end; i++ { + word.WriteRune(toUpper(runes[i])) + } + key := word.String() + if initialisms[key] { + if !firstWord || wordCase != CamelCase { + b.WriteString(key) + firstWord = false + return + } + } + } + + skipIdx := 0 + for i := start; i < end; i++ { + if len(skipIndexes) > 0 && skipIdx < len(skipIndexes) && i == skipIndexes[skipIdx] { + skipIdx++ + continue + } + r := runes[i] + switch wordCase { + case UpperCase: + b.WriteRune(toUpper(r)) + case LowerCase: + b.WriteRune(toLower(r)) + case TitleCase: + if i == start { + b.WriteRune(toUpper(r)) + } else { + b.WriteRune(toLower(r)) + } + case CamelCase: + if !firstWord && i == start { + b.WriteRune(toUpper(r)) + } else { + b.WriteRune(toLower(r)) + } + default: + b.WriteRune(r) + } + } + firstWord = false + skipIndexes = nil + } + + var prev, curr rune + next := runes[0] // 0 length will have already returned so safe to index + wordStart := 0 + for i := 0; i < len(runes); i++ { + prev = curr + curr = next + if i+1 == len(runes) { + next = 0 + } else { + next = runes[i+1] + } + + switch fn(prev, curr, next) { + case Skip: + skipIndexes = append(skipIndexes, i) + case Split: + addWord(wordStart, i) + wordStart = i + case SkipSplit: + addWord(wordStart, i) + wordStart = i + 1 + } + } + + if wordStart != len(runes) { + addWord(wordStart, len(runes)) + } + return b.String() +} diff --git a/vendor/github.com/ettle/strcase/doc.go b/vendor/github.com/ettle/strcase/doc.go new file mode 100644 index 000000000..b898a4e45 --- /dev/null +++ b/vendor/github.com/ettle/strcase/doc.go @@ -0,0 +1,155 @@ +/* +Package strcase is a package for converting strings into various word cases +(e.g. snake_case, camelCase) + + go get -u github.com/ettle/strcase + +Example usage + + strcase.ToSnake("Hello World") // hello_world + strcase.ToSNAKE("Hello World") // HELLO_WORLD + + strcase.ToKebab("helloWorld") // hello-world + strcase.ToKEBAB("helloWorld") // HELLO-WORLD + + strcase.ToPascal("hello-world") // HelloWorld + strcase.ToCamel("hello-world") // helloWorld + + // Handle odd cases + strcase.ToSnake("FOOBar") // foo_bar + + // Support Go initialisms + strcase.ToGoPascal("http_response") // HTTPResponse + + // Specify case and delimiter + strcase.ToCase("HelloWorld", strcase.UpperCase, '.') // HELLO.WORLD + +Why this package + +String strcase is pretty straight forward and there are a number of methods to +do it. This package is fully featured, more customizable, better tested, and +faster* than other packages and what you would probably whip up yourself. + +Unicode support + +We work for with unicode strings and pay very little performance penalty for it +as we optimized for the common use case of ASCII only strings. + +Customization + +You can create a custom caser that changes the behavior to what you want. This +customization also reduces the pressure for us to change the default behavior +which means that things are more stable for everyone involved. The goal is to +make the common path easy and fast, while making the uncommon path possible. + + c := NewCaser( + // Use Go's default initialisms e.g. ID, HTML + true, + // Override initialisms (e.g. don't initialize HTML but initialize SSL + map[string]bool{"SSL": true, "HTML": false}, + // Write your own custom SplitFn + // + NewSplitFn( + []rune{'*', '.', ','}, + SplitCase, + SplitAcronym, + PreserveNumberFormatting, + SplitBeforeNumber, + SplitAfterNumber, + )) + assert.Equal(t, "http_200", c.ToSnake("http200")) + +Initialism support + +By default, we use the golint intialisms list. You can customize and override +the initialisms if you wish to add additional ones, such as "SSL" or "CMS" or +domain specific ones to your industry. + + ToGoPascal("http_response") // HTTPResponse + ToGoSnake("http_response") // HTTP_response + +Test coverage + +We have a wide ranging test suite to make sure that we understand our behavior. +Test coverage isn't everything, but we aim for 100% coverage. + +Fast + +Optimized to reduce memory allocations with Builder. Benchmarked and optimized +around common cases. + +We're on par with the fastest packages (that have less features) and much +faster than others. We also benchmarked against code snippets. Using string +builders to reduce memory allocation and reordering boolean checks for the +common cases have a large performance impact. + +Hopefully I was fair to each library and happy to rerun benchmarks differently +or reword my commentary based on suggestions or updates. + + // This package - faster then almost all libraries + // Initialisms are more complicated and slightly slower, but still faster then other libraries that do less + BenchmarkToTitle-4 7821166 221 ns/op 32 B/op 1 allocs/op + BenchmarkToSnake-4 9378589 202 ns/op 32 B/op 1 allocs/op + BenchmarkToSNAKE-4 6174453 223 ns/op 32 B/op 1 allocs/op + BenchmarkToGoSnake-4 3114266 434 ns/op 44 B/op 4 allocs/op + BenchmarkToCustomCaser-4 2973855 448 ns/op 56 B/op 4 allocs/op + + // Segment has very fast snake case and camel case libraries + // No features or customization, but very very fast + BenchmarkSegment-4 24003495 64.9 ns/op 16 B/op 1 allocs/op + + // Stdlib strings.Title for comparison, even though it only splits on spaces + BenchmarkToTitleStrings-4 11259376 161 ns/op 16 B/op 1 allocs/op + + // Other libraries or code snippets + // - Most are slower, by up to an order of magnitude + // - None support initialisms or customization + // - Some generate only camelCase or snake_case + // - Many lack unicode support + BenchmarkToSnakeStoewer-4 7103268 297 ns/op 64 B/op 2 allocs/op + // Copying small rune arrays is slow + BenchmarkToSnakeSiongui-4 3710768 413 ns/op 48 B/op 10 allocs/op + BenchmarkGoValidator-4 2416479 1049 ns/op 184 B/op 9 allocs/op + // String alloction is slow + BenchmarkToSnakeFatih-4 1000000 2407 ns/op 624 B/op 26 allocs/op + BenchmarkToSnakeIanColeman-4 1005766 1426 ns/op 160 B/op 13 allocs/op + // Regexp is slow + BenchmarkToSnakeGolangPrograms-4 614689 2237 ns/op 225 B/op 11 allocs/op + + + + // These results aren't a surprise - my initial version of this library was + // painfully slow. I think most of us, without spending some time with + // profilers and benchmarks, would write also something on the slower side. + + +Why not this package + +If every nanosecond matters and this is used in a tight loop, use segment.io's +libraries (https://github.com/segmentio/go-snakecase and +https://github.com/segmentio/go-camelcase). They lack features, but make up for +it by being blazing fast. Alternatively, if you need your code to work slightly +differently, fork them and tailor it for your use case. + +If you don't like having external imports, I get it. This package only imports +packages for testing, otherwise it only uses the standard library. If that's +not enough, you can use this repo as the foundation for your own. MIT Licensed. + +This package is still relatively new and while I've used it for a while +personally, it doesn't have the miles that other packages do. I've tested this +code agains't their test cases to make sure that there aren't any surprises. + +Migrating from other packages + +If you are migrating from from another package, you may find slight differences +in output. To reduce the delta, you may find it helpful to use the following +custom casers to mimic the behavior of the other package. + + // From https://github.com/iancoleman/strcase + var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-', '.'}, SplitCase, SplitAcronym, SplitBeforeNumber)) + + // From https://github.com/stoewer/go-strcase + var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-'}, SplitCase), SplitAcronym) + +*/ +package strcase diff --git a/vendor/github.com/ettle/strcase/go.mod b/vendor/github.com/ettle/strcase/go.mod new file mode 100644 index 000000000..0219bfaa0 --- /dev/null +++ b/vendor/github.com/ettle/strcase/go.mod @@ -0,0 +1,5 @@ +module github.com/ettle/strcase + +go 1.12 + +require github.com/stretchr/testify v1.5.1 diff --git a/vendor/github.com/ettle/strcase/go.sum b/vendor/github.com/ettle/strcase/go.sum new file mode 100644 index 000000000..331fa6982 --- /dev/null +++ b/vendor/github.com/ettle/strcase/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/ettle/strcase/initialism.go b/vendor/github.com/ettle/strcase/initialism.go new file mode 100644 index 000000000..3c313d3e9 --- /dev/null +++ b/vendor/github.com/ettle/strcase/initialism.go @@ -0,0 +1,43 @@ +package strcase + +// golintInitialisms are the golint initialisms +var golintInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, +} diff --git a/vendor/github.com/ettle/strcase/split.go b/vendor/github.com/ettle/strcase/split.go new file mode 100644 index 000000000..84381106b --- /dev/null +++ b/vendor/github.com/ettle/strcase/split.go @@ -0,0 +1,164 @@ +package strcase + +import "unicode" + +// SplitFn defines how to split a string into words +type SplitFn func(prev, curr, next rune) SplitAction + +// NewSplitFn returns a SplitFn based on the options provided. +// +// NewSplitFn covers the majority of common options that other strcase +// libraries provide and should allow you to simply create a custom caser. +// For more complicated use cases, feel free to write your own SplitFn +//nolint:gocyclo +func NewSplitFn( + delimiters []rune, + splitOptions ...SplitOption, +) SplitFn { + var splitCase, splitAcronym, splitBeforeNumber, splitAfterNumber, preserveNumberFormatting bool + + for _, option := range splitOptions { + switch option { + case SplitCase: + splitCase = true + case SplitAcronym: + splitAcronym = true + case SplitBeforeNumber: + splitBeforeNumber = true + case SplitAfterNumber: + splitAfterNumber = true + case PreserveNumberFormatting: + preserveNumberFormatting = true + } + } + + return func(prev, curr, next rune) SplitAction { + // The most common case will be that it's just a letter + // There are safe cases to process + if isLower(curr) && !isNumber(prev) { + return Noop + } + if isUpper(prev) && isUpper(curr) && isUpper(next) { + return Noop + } + + if preserveNumberFormatting { + if (curr == '.' || curr == ',') && + isNumber(prev) && isNumber(next) { + return Noop + } + } + + if unicode.IsSpace(curr) { + return SkipSplit + } + for _, d := range delimiters { + if curr == d { + return SkipSplit + } + } + + if splitBeforeNumber { + if isNumber(curr) && !isNumber(prev) { + if preserveNumberFormatting && (prev == '.' || prev == ',') { + return Noop + } + return Split + } + } + + if splitAfterNumber { + if isNumber(prev) && !isNumber(curr) { + return Split + } + } + + if splitCase { + if !isUpper(prev) && isUpper(curr) { + return Split + } + } + + if splitAcronym { + if isUpper(prev) && isUpper(curr) && isLower(next) { + return Split + } + } + + return Noop + } +} + +// SplitOption are options that allow for configuring NewSplitFn +type SplitOption int + +const ( + // SplitCase - FooBar -> Foo_Bar + SplitCase SplitOption = iota + // SplitAcronym - FOOBar -> Foo_Bar + // It won't preserve FOO's case. If you want, you can set the Caser's initialisms so FOO will be in all caps + SplitAcronym + // SplitBeforeNumber - port80 -> port_80 + SplitBeforeNumber + // SplitAfterNumber - 200status -> 200_status + SplitAfterNumber + // PreserveNumberFormatting - a.b.2,000.3.c -> a_b_2,000.3_c + PreserveNumberFormatting +) + +// SplitAction defines if and how to split a string +type SplitAction int + +const ( + // Noop - Continue to next character + Noop SplitAction = iota + // Split - Split between words + // e.g. to split between wordsWithoutDelimiters + Split + // SkipSplit - Split the word and drop the character + // e.g. to split words with delimiters + SkipSplit + // Skip - Remove the character completely + Skip +) + +//nolint:gocyclo +func defaultSplitFn(prev, curr, next rune) SplitAction { + // The most common case will be that it's just a letter so let lowercase letters return early since we know what they should do + if isLower(curr) { + return Noop + } + // Delimiters are _, -, ., and unicode spaces + // Handle . lower down as it needs to happen after number exceptions + if curr == '_' || curr == '-' || isSpace(curr) { + return SkipSplit + } + + if isUpper(curr) { + if isLower(prev) { + // fooBar + return Split + } else if isUpper(prev) && isLower(next) { + // FOOBar + return Split + } + } + + // Do numeric exceptions last to avoid perf penalty + if unicode.IsNumber(prev) { + // v4.3 is not split + if (curr == '.' || curr == ',') && unicode.IsNumber(next) { + return Noop + } + if !unicode.IsNumber(curr) && curr != '.' { + return Split + } + } + // While period is a default delimiter, keep it down here to avoid + // penalty for other delimiters + if curr == '.' { + return SkipSplit + } + + return Noop +} diff --git a/vendor/github.com/ettle/strcase/strcase.go b/vendor/github.com/ettle/strcase/strcase.go new file mode 100644 index 000000000..46b4f7a68 --- /dev/null +++ b/vendor/github.com/ettle/strcase/strcase.go @@ -0,0 +1,81 @@ +package strcase + +// ToSnake returns words in snake_case (lower case words with underscores). +func ToSnake(s string) string { + return convertWithoutInitialisms(s, '_', LowerCase) +} + +// ToGoSnake returns words in snake_case (lower case words with underscores). +// +// Respects Go's common initialisms (e.g. http_response -> HTTP_response). +func ToGoSnake(s string) string { + return convertWithGoInitialisms(s, '_', LowerCase) +} + +// ToSNAKE returns words in SNAKE_CASE (upper case words with underscores). +// Also known as SCREAMING_SNAKE_CASE or UPPER_CASE. +func ToSNAKE(s string) string { + return convertWithoutInitialisms(s, '_', UpperCase) +} + +// ToKebab returns words in kebab-case (lower case words with dashes). +// Also known as dash-case. +func ToKebab(s string) string { + return convertWithoutInitialisms(s, '-', LowerCase) +} + +// ToGoKebab returns words in kebab-case (lower case words with dashes). +// Also known as dash-case. +// +// Respects Go's common initialisms (e.g. http-response -> HTTP-response). +func ToGoKebab(s string) string { + return convertWithGoInitialisms(s, '-', LowerCase) +} + +// ToKEBAB returns words in KEBAB-CASE (upper case words with dashes). +// Also known as SCREAMING-KEBAB-CASE or SCREAMING-DASH-CASE. +func ToKEBAB(s string) string { + return convertWithoutInitialisms(s, '-', UpperCase) +} + +// ToPascal returns words in PascalCase (capitalized words concatenated together). +// Also known as UpperPascalCase. +func ToPascal(s string) string { + return convertWithoutInitialisms(s, 0, TitleCase) +} + +// ToGoPascal returns words in PascalCase (capitalized words concatenated together). +// Also known as UpperPascalCase. +// +// Respects Go's common initialisms (e.g. HttpResponse -> HTTPResponse). +func ToGoPascal(s string) string { + return convertWithGoInitialisms(s, 0, TitleCase) +} + +// ToCamel returns words in camelCase (capitalized words concatenated together, with first word lower case). +// Also known as lowerCamelCase or mixedCase. +func ToCamel(s string) string { + return convertWithoutInitialisms(s, 0, CamelCase) +} + +// ToGoCamel returns words in camelCase (capitalized words concatenated together, with first word lower case). +// Also known as lowerCamelCase or mixedCase. +// +// Respects Go's common initialisms, but first word remains lowercased which is +// important for code generator use cases (e.g. toJson -> toJSON, httpResponse +// -> httpResponse). +func ToGoCamel(s string) string { + return convertWithGoInitialisms(s, 0, CamelCase) +} + +// ToCase returns words in given case and delimiter. +func ToCase(s string, wordCase WordCase, delimiter rune) string { + return convertWithoutInitialisms(s, delimiter, wordCase) +} + +// ToGoCase returns words in given case and delimiter. +// +// Respects Go's common initialisms (e.g. httpResponse -> HTTPResponse). +func ToGoCase(s string, wordCase WordCase, delimiter rune) string { + return convertWithGoInitialisms(s, delimiter, wordCase) +} diff --git a/vendor/github.com/ettle/strcase/unicode.go b/vendor/github.com/ettle/strcase/unicode.go new file mode 100644 index 000000000..b75e25a51 --- /dev/null +++ b/vendor/github.com/ettle/strcase/unicode.go @@ -0,0 +1,48 @@ +package strcase + +import "unicode" + +// Unicode functions, optimized for the common case of ascii +// No performance lost by wrapping since these functions get inlined by the compiler + +func isUpper(r rune) bool { + return unicode.IsUpper(r) +} + +func isLower(r rune) bool { + return unicode.IsLower(r) +} + +func isNumber(r rune) bool { + if r >= '0' && r <= '9' { + return true + } + return unicode.IsNumber(r) +} + +func isSpace(r rune) bool { + if r == ' ' || r == '\t' || r == '\n' || r == '\r' { + return true + } else if r < 128 { + return false + } + return unicode.IsSpace(r) +} + +func toUpper(r rune) rune { + if r >= 'a' && r <= 'z' { + return r - 32 + } else if r < 128 { + return r + } + return unicode.ToUpper(r) +} + +func toLower(r rune) rune { + if r >= 'A' && r <= 'Z' { + return r + 32 + } else if r < 128 { + return r + } + return unicode.ToLower(r) +} diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 000000000..25fdaf639 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 000000000..5c751f215 --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,178 @@ +# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + +![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) + + +## Install + +```bash +go get github.com/fatih/color +``` + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`). + +The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment +variable is set (regardless of its value). + +`Color` has support to disable/enable colors programatically both globally and +for single color definitions. For example suppose you have a CLI app and a +`--no-color` bool flag. You can easily disable the color output with: + +```go +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## GitHub Actions + +To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 000000000..98a60f3c8 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,618 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. It's also set to true if the NO_COLOR environment variable is + // set (regardless of its value). This is a global option and affects all + // colors. For more control over each color block use the methods + // DisableColor() individually. + NoColor = noColorExists() || os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// noColorExists returns true if the environment variable NO_COLOR exists. +func noColorExists() bool { + _, exists := os.LookupEnv("NO_COLOR") + return exists +} + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{ + params: make([]Attribute, 0), + } + + if noColorExists() { + c.noColor = boolPtr(true) + } + + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user set action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 000000000..04541de78 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,135 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +You can also disable the color by setting the NO_COLOR environment variable to any value. + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/fatih/color/go.mod b/vendor/github.com/fatih/color/go.mod new file mode 100644 index 000000000..78872815e --- /dev/null +++ b/vendor/github.com/fatih/color/go.mod @@ -0,0 +1,8 @@ +module github.com/fatih/color + +go 1.13 + +require ( + github.com/mattn/go-colorable v0.1.8 + github.com/mattn/go-isatty v0.0.12 +) diff --git a/vendor/github.com/fatih/color/go.sum b/vendor/github.com/fatih/color/go.sum new file mode 100644 index 000000000..54f7c46e8 --- /dev/null +++ b/vendor/github.com/fatih/color/go.sum @@ -0,0 +1,7 @@ +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/fatih/structtag/LICENSE b/vendor/github.com/fatih/structtag/LICENSE new file mode 100644 index 000000000..4fd15f9f8 --- /dev/null +++ b/vendor/github.com/fatih/structtag/LICENSE @@ -0,0 +1,60 @@ +Copyright (c) 2017, Fatih Arslan +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of structtag nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +This software includes some portions from Go. Go is used under the terms of the +BSD like license. + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The Go gopher was designed by Renee French. http://reneefrench.blogspot.com/ The design is licensed under the Creative Commons 3.0 Attributions license. Read this article for more details: https://blog.golang.org/gopher diff --git a/vendor/github.com/fatih/structtag/README.md b/vendor/github.com/fatih/structtag/README.md new file mode 100644 index 000000000..c4e8b1e86 --- /dev/null +++ b/vendor/github.com/fatih/structtag/README.md @@ -0,0 +1,73 @@ +# structtag [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structtag) + +structtag provides an easy way of parsing and manipulating struct tag fields. +Please vendor the library as it might change in future versions. + +# Install + +```bash +go get github.com/fatih/structtag +``` + +# Example + +```go +package main + +import ( + "fmt" + "reflect" + "sort" + + "github.com/fatih/structtag" +) + +func main() { + type t struct { + t string `json:"foo,omitempty,string" xml:"foo"` + } + + // get field tag + tag := reflect.TypeOf(t{}).Field(0).Tag + + // ... and start using structtag by parsing the tag + tags, err := structtag.Parse(string(tag)) + if err != nil { + panic(err) + } + + // iterate over all tags + for _, t := range tags.Tags() { + fmt.Printf("tag: %+v\n", t) + } + + // get a single tag + jsonTag, err := tags.Get("json") + if err != nil { + panic(err) + } + fmt.Println(jsonTag) // Output: json:"foo,omitempty,string" + fmt.Println(jsonTag.Key) // Output: json + fmt.Println(jsonTag.Name) // Output: foo + fmt.Println(jsonTag.Options) // Output: [omitempty string] + + // change existing tag + jsonTag.Name = "foo_bar" + jsonTag.Options = nil + tags.Set(jsonTag) + + // add new tag + tags.Set(&structtag.Tag{ + Key: "hcl", + Name: "foo", + Options: []string{"squash"}, + }) + + // print the tags + fmt.Println(tags) // Output: json:"foo_bar" xml:"foo" hcl:"foo,squash" + + // sort tags according to keys + sort.Sort(tags) + fmt.Println(tags) // Output: hcl:"foo,squash" json:"foo_bar" xml:"foo" +} +``` diff --git a/vendor/github.com/fatih/structtag/go.mod b/vendor/github.com/fatih/structtag/go.mod new file mode 100644 index 000000000..660d6a1f1 --- /dev/null +++ b/vendor/github.com/fatih/structtag/go.mod @@ -0,0 +1,3 @@ +module github.com/fatih/structtag + +go 1.12 diff --git a/vendor/github.com/fatih/structtag/tags.go b/vendor/github.com/fatih/structtag/tags.go new file mode 100644 index 000000000..c168fb21c --- /dev/null +++ b/vendor/github.com/fatih/structtag/tags.go @@ -0,0 +1,315 @@ +package structtag + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +var ( + errTagSyntax = errors.New("bad syntax for struct tag pair") + errTagKeySyntax = errors.New("bad syntax for struct tag key") + errTagValueSyntax = errors.New("bad syntax for struct tag value") + + errKeyNotSet = errors.New("tag key does not exist") + errTagNotExist = errors.New("tag does not exist") + errTagKeyMismatch = errors.New("mismatch between key and tag.key") +) + +// Tags represent a set of tags from a single struct field +type Tags struct { + tags []*Tag +} + +// Tag defines a single struct's string literal tag +type Tag struct { + // Key is the tag key, such as json, xml, etc.. + // i.e: `json:"foo,omitempty". Here key is: "json" + Key string + + // Name is a part of the value + // i.e: `json:"foo,omitempty". Here name is: "foo" + Name string + + // Options is a part of the value. It contains a slice of tag options i.e: + // `json:"foo,omitempty". Here options is: ["omitempty"] + Options []string +} + +// Parse parses a single struct field tag and returns the set of tags. +func Parse(tag string) (*Tags, error) { + var tags []*Tag + + hasTag := tag != "" + + // NOTE(arslan) following code is from reflect and vet package with some + // modifications to collect all necessary information and extend it with + // usable methods + for tag != "" { + // Skip leading space. + i := 0 + for i < len(tag) && tag[i] == ' ' { + i++ + } + tag = tag[i:] + if tag == "" { + break + } + + // Scan to colon. A space, a quote or a control character is a syntax + // error. Strictly speaking, control chars include the range [0x7f, + // 0x9f], not just [0x00, 0x1f], but in practice, we ignore the + // multi-byte control characters as it is simpler to inspect the tag's + // bytes than the tag's runes. + i = 0 + for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { + i++ + } + + if i == 0 { + return nil, errTagKeySyntax + } + if i+1 >= len(tag) || tag[i] != ':' { + return nil, errTagSyntax + } + if tag[i+1] != '"' { + return nil, errTagValueSyntax + } + + key := string(tag[:i]) + tag = tag[i+1:] + + // Scan quoted string to find value. + i = 1 + for i < len(tag) && tag[i] != '"' { + if tag[i] == '\\' { + i++ + } + i++ + } + if i >= len(tag) { + return nil, errTagValueSyntax + } + + qvalue := string(tag[:i+1]) + tag = tag[i+1:] + + value, err := strconv.Unquote(qvalue) + if err != nil { + return nil, errTagValueSyntax + } + + res := strings.Split(value, ",") + name := res[0] + options := res[1:] + if len(options) == 0 { + options = nil + } + + tags = append(tags, &Tag{ + Key: key, + Name: name, + Options: options, + }) + } + + if hasTag && len(tags) == 0 { + return nil, nil + } + + return &Tags{ + tags: tags, + }, nil +} + +// Get returns the tag associated with the given key. If the key is present +// in the tag the value (which may be empty) is returned. Otherwise the +// returned value will be the empty string. The ok return value reports whether +// the tag exists or not (which the return value is nil). +func (t *Tags) Get(key string) (*Tag, error) { + for _, tag := range t.tags { + if tag.Key == key { + return tag, nil + } + } + + return nil, errTagNotExist +} + +// Set sets the given tag. If the tag key already exists it'll override it +func (t *Tags) Set(tag *Tag) error { + if tag.Key == "" { + return errKeyNotSet + } + + added := false + for i, tg := range t.tags { + if tg.Key == tag.Key { + added = true + t.tags[i] = tag + } + } + + if !added { + // this means this is a new tag, add it + t.tags = append(t.tags, tag) + } + + return nil +} + +// AddOptions adds the given option for the given key. If the option already +// exists it doesn't add it again. +func (t *Tags) AddOptions(key string, options ...string) { + for i, tag := range t.tags { + if tag.Key != key { + continue + } + + for _, opt := range options { + if !tag.HasOption(opt) { + tag.Options = append(tag.Options, opt) + } + } + + t.tags[i] = tag + } +} + +// DeleteOptions deletes the given options for the given key +func (t *Tags) DeleteOptions(key string, options ...string) { + hasOption := func(option string) bool { + for _, opt := range options { + if opt == option { + return true + } + } + return false + } + + for i, tag := range t.tags { + if tag.Key != key { + continue + } + + var updated []string + for _, opt := range tag.Options { + if !hasOption(opt) { + updated = append(updated, opt) + } + } + + tag.Options = updated + t.tags[i] = tag + } +} + +// Delete deletes the tag for the given keys +func (t *Tags) Delete(keys ...string) { + hasKey := func(key string) bool { + for _, k := range keys { + if k == key { + return true + } + } + return false + } + + var updated []*Tag + for _, tag := range t.tags { + if !hasKey(tag.Key) { + updated = append(updated, tag) + } + } + + t.tags = updated +} + +// Tags returns a slice of tags. The order is the original tag order unless it +// was changed. +func (t *Tags) Tags() []*Tag { + return t.tags +} + +// Tags returns a slice of tags. The order is the original tag order unless it +// was changed. +func (t *Tags) Keys() []string { + var keys []string + for _, tag := range t.tags { + keys = append(keys, tag.Key) + } + return keys +} + +// String reassembles the tags into a valid literal tag field representation +func (t *Tags) String() string { + tags := t.Tags() + if len(tags) == 0 { + return "" + } + + var buf bytes.Buffer + for i, tag := range t.Tags() { + buf.WriteString(tag.String()) + if i != len(tags)-1 { + buf.WriteString(" ") + } + } + return buf.String() +} + +// HasOption returns true if the given option is available in options +func (t *Tag) HasOption(opt string) bool { + for _, tagOpt := range t.Options { + if tagOpt == opt { + return true + } + } + + return false +} + +// Value returns the raw value of the tag, i.e. if the tag is +// `json:"foo,omitempty", the Value is "foo,omitempty" +func (t *Tag) Value() string { + options := strings.Join(t.Options, ",") + if options != "" { + return fmt.Sprintf(`%s,%s`, t.Name, options) + } + return t.Name +} + +// String reassembles the tag into a valid tag field representation +func (t *Tag) String() string { + return fmt.Sprintf(`%s:%q`, t.Key, t.Value()) +} + +// GoString implements the fmt.GoStringer interface +func (t *Tag) GoString() string { + template := `{ + Key: '%s', + Name: '%s', + Option: '%s', + }` + + if t.Options == nil { + return fmt.Sprintf(template, t.Key, t.Name, "nil") + } + + options := strings.Join(t.Options, ",") + return fmt.Sprintf(template, t.Key, t.Name, options) +} + +func (t *Tags) Len() int { + return len(t.tags) +} + +func (t *Tags) Less(i int, j int) bool { + return t.tags[i].Key < t.tags[j].Key +} + +func (t *Tags) Swap(i int, j int) { + t.tags[i], t.tags[j] = t.tags[j], t.tags[i] +} diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 000000000..fad895851 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*.go] +indent_style = tab +indent_size = 4 +insert_final_newline = true + +[*.{yml,yaml}] +indent_style = space +indent_size = 2 +insert_final_newline = true +trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes new file mode 100644 index 000000000..32f1001be --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes @@ -0,0 +1 @@ +go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 000000000..4cd0cbaf4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml new file mode 100644 index 000000000..a9c30165c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -0,0 +1,36 @@ +sudo: false +language: go + +go: + - "stable" + - "1.11.x" + - "1.10.x" + - "1.9.x" + +matrix: + include: + - go: "stable" + env: GOLINT=true + allow_failures: + - go: tip + fast_finish: true + + +before_install: + - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi + +script: + - go test --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi + - go vet ./... + +os: + - linux + - osx + - windows + +notifications: + email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 000000000..5ab5d41c5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 000000000..be4d7ea2c --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,317 @@ +# Changelog + +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## v1.4.2 / 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## v1.4.1 / 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## v1.4.0 / 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## v1.3.1 / 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## v1.3.0 / 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## v1.2.10 / 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## v1.2.9 / 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## v1.2.8 / 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## v1.2.5 / 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## v1.2.1 / 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 000000000..828a60b24 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 000000000..e180c8fb0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 000000000..b2629e522 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,130 @@ +# File system notifications for Go + +[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) + +fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: + +```console +go get -u golang.org/x/sys/... +``` + +Cross platform: Windows, Linux, BSD and macOS. + +| Adapter | OS | Status | +| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | +| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | +| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | +| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | +| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | + +\* Android and iOS are untested. + +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. + +Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. + +## Usage + +```go +package main + +import ( + "log" + + "github.com/fsnotify/fsnotify" +) + +func main() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Op&fsnotify.Write == fsnotify.Write { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + err = watcher.Add("/tmp/foo") + if err != nil { + log.Fatal(err) + } + <-done +} +``` + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## Example + +See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** + +fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 000000000..ced39cb88 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 000000000..89cab046d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ( + ErrEventOverflow = errors.New("fsnotify queue overflow") +) diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod new file mode 100644 index 000000000..ff11e13f2 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.mod @@ -0,0 +1,5 @@ +module github.com/fsnotify/fsnotify + +go 1.13 + +require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum new file mode 100644 index 000000000..f60af9855 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 000000000..d9fd1b88a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 000000000..b33f2b4d4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 000000000..86e76a3d6 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 000000000..2306c4620 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 000000000..870c4d6d1 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 000000000..09436f31d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/fzipp/gocyclo/CHANGELOG.md b/vendor/github.com/fzipp/gocyclo/CHANGELOG.md new file mode 100644 index 000000000..3959a62a5 --- /dev/null +++ b/vendor/github.com/fzipp/gocyclo/CHANGELOG.md @@ -0,0 +1,38 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.3.1] +### Added +- Test coverage + +### Fixed +- Fix cyclomatic complexity for function literals (base complexity of 1 was missing) + +## [0.3.0] - 2020-10-17 +### Added +- New `-avg-short` and `-total-short` options for printing average and total cyclomatic complexities without label +- Export the `AnalyzeASTFile` function in package API +- Doc comments for exported functions and types + +### Fixed +- Ignore `default` cases + +## [0.2.0] - 2020-10-17 +### Added +- Support for gocyclo as a package +- Support for ignoring of individual functions via a new `gocyclo:ignore` directive +- New `-total` option to compute total cyclomatic complexity +- New `-ignore` option to ignore files matching a regular expression +- Analysis of function literals at declaration level + +### Changed +- Breaking: installation changed to `go get github.com/fzipp/gocyclo/cmd/gocyclo` + +## [0.1.0] - 2020-10-17 + +### Added +- `go.mod` file; beginning of versioning + diff --git a/vendor/github.com/fzipp/gocyclo/CONTRIBUTORS b/vendor/github.com/fzipp/gocyclo/CONTRIBUTORS new file mode 100644 index 000000000..1c09f1a06 --- /dev/null +++ b/vendor/github.com/fzipp/gocyclo/CONTRIBUTORS @@ -0,0 +1,7 @@ +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Frederik Zipp +Harshavardhana diff --git a/vendor/github.com/fzipp/gocyclo/LICENSE b/vendor/github.com/fzipp/gocyclo/LICENSE new file mode 100644 index 000000000..45f88d6cb --- /dev/null +++ b/vendor/github.com/fzipp/gocyclo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Frederik Zipp. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of the copyright owner nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fzipp/gocyclo/README.md b/vendor/github.com/fzipp/gocyclo/README.md new file mode 100644 index 000000000..f1056934c --- /dev/null +++ b/vendor/github.com/fzipp/gocyclo/README.md @@ -0,0 +1,107 @@ +# gocyclo + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/fzipp/gocyclo)](https://pkg.go.dev/github.com/fzipp/gocyclo) +[![Go Report Card](https://goreportcard.com/badge/github.com/fzipp/gocyclo)](https://goreportcard.com/report/github.com/fzipp/gocyclo) + +Gocyclo calculates +[cyclomatic complexities](https://en.wikipedia.org/wiki/Cyclomatic_complexity) +of functions in Go source code. + +Cyclomatic complexity is a +[code quality metric](https://en.wikipedia.org/wiki/Software_metric) +which can be used to identify code that needs refactoring. +It measures the number of linearly independent paths through a function's +source code. + +The cyclomatic complexity of a function is calculated according to the +following rules: + +``` + 1 is the base complexity of a function ++1 for each 'if', 'for', 'case', '&&' or '||' +``` + +A function with a higher cyclomatic complexity requires more test cases to +cover all possible paths and is potentially harder to understand. The +complexity can be reduced by applying common refactoring techniques that lead +to smaller functions. + +## Installation + +To install the `gocyclo` command, run + +``` +$ go get github.com/fzipp/gocyclo/cmd/gocyclo +``` + +and put the resulting binary in one of your PATH directories if +`$GOPATH/bin` isn't already in your PATH. + +## Usage + +``` +Calculate cyclomatic complexities of Go functions. +Usage: + gocyclo [flags] ... + +Flags: + -over N show functions with complexity > N only and + return exit code 1 if the set is non-empty + -top N show the top N most complex functions only + -avg, -avg-short show the average complexity over all functions; + the short option prints the value without a label + -total, -total-short show the total complexity for all functions; + the short option prints the value without a label + -ignore REGEX exclude files matching the given regular expression + +The output fields for each line are: + +``` + +## Examples + +``` +$ gocyclo . +$ gocyclo main.go +$ gocyclo -top 10 src/ +$ gocyclo -over 25 docker +$ gocyclo -avg . +$ gocyclo -top 20 -ignore "_test|Godeps|vendor/" . +$ gocyclo -over 3 -avg gocyclo/ +``` + +Example output: + +``` +9 gocyclo (*complexityVisitor).Visit complexity.go:30:1 +8 main main cmd/gocyclo/main.go:53:1 +7 gocyclo (*fileAnalyzer).analyzeDecl analyze.go:96:1 +4 gocyclo Analyze analyze.go:24:1 +4 gocyclo parseDirectives directives.go:27:1 +4 gocyclo (Stats).SortAndFilter stats.go:52:1 +Average: 2.72 +``` + +Note that the average is calculated over all analyzed functions, +not just the printed ones. + +### Ignoring individual functions + +Individual functions can be ignored with a `gocyclo:ignore` directive: + +``` +//gocyclo:ignore +func f1() { + // ... +} + +//gocyclo:ignore +var f2 = func() { + // ... +} +``` + +## License + +This project is free and open source software licensed under the +[BSD 3-Clause License](LICENSE). diff --git a/vendor/github.com/fzipp/gocyclo/analyze.go b/vendor/github.com/fzipp/gocyclo/analyze.go new file mode 100644 index 000000000..c053e83e6 --- /dev/null +++ b/vendor/github.com/fzipp/gocyclo/analyze.go @@ -0,0 +1,151 @@ +// Copyright 2020 Frederik Zipp. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocyclo + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "log" + "os" + "path/filepath" + "regexp" + "strings" +) + +// Analyze calculates the cyclomatic complexities of the functions and methods +// in the Go source code files in the given paths. If a path is a directory +// all Go files under that directory are analyzed recursively. +// Files with paths matching the 'ignore' regular expressions are skipped. +// The 'ignore' parameter can be nil, meaning that no files are skipped. +func Analyze(paths []string, ignore *regexp.Regexp) Stats { + var stats Stats + for _, path := range paths { + info, err := os.Stat(path) + if err != nil { + log.Printf("could not get file info for path %q: %s\n", path, err) + continue + } + if info.IsDir() { + stats = analyzeDir(path, ignore, stats) + } else { + stats = analyzeFile(path, ignore, stats) + } + } + return stats +} + +func analyzeDir(dirname string, ignore *regexp.Regexp, stats Stats) Stats { + filepath.Walk(dirname, func(path string, info os.FileInfo, err error) error { + if err == nil && isGoFile(info) { + stats = analyzeFile(path, ignore, stats) + } + return err + }) + return stats +} + +func isGoFile(f os.FileInfo) bool { + return !f.IsDir() && strings.HasSuffix(f.Name(), ".go") +} + +func analyzeFile(path string, ignore *regexp.Regexp, stats Stats) Stats { + if isIgnored(path, ignore) { + return stats + } + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, path, nil, parser.ParseComments) + if err != nil { + log.Fatal(err) + } + return AnalyzeASTFile(f, fset, stats) +} + +func isIgnored(path string, ignore *regexp.Regexp) bool { + return ignore != nil && ignore.MatchString(path) +} + +// AnalyzeASTFile calculates the cyclomatic complexities of the functions +// and methods in the abstract syntax tree (AST) of a parsed Go file and +// appends the results to the given Stats slice. +func AnalyzeASTFile(f *ast.File, fs *token.FileSet, s Stats) Stats { + analyzer := &fileAnalyzer{ + file: f, + fileSet: fs, + stats: s, + } + return analyzer.analyze() +} + +type fileAnalyzer struct { + file *ast.File + fileSet *token.FileSet + stats Stats +} + +func (a *fileAnalyzer) analyze() Stats { + for _, decl := range a.file.Decls { + a.analyzeDecl(decl) + } + return a.stats +} + +func (a *fileAnalyzer) analyzeDecl(d ast.Decl) { + switch decl := d.(type) { + case *ast.FuncDecl: + a.addStatIfNotIgnored(decl, funcName(decl), decl.Doc) + case *ast.GenDecl: + for _, spec := range decl.Specs { + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + for _, value := range valueSpec.Values { + funcLit, ok := value.(*ast.FuncLit) + if !ok { + continue + } + a.addStatIfNotIgnored(funcLit, valueSpec.Names[0].Name, decl.Doc) + } + } + } +} + +func (a *fileAnalyzer) addStatIfNotIgnored(node ast.Node, funcName string, doc *ast.CommentGroup) { + if parseDirectives(doc).HasIgnore() { + return + } + a.stats = append(a.stats, Stat{ + PkgName: a.file.Name.Name, + FuncName: funcName, + Complexity: Complexity(node), + Pos: a.fileSet.Position(node.Pos()), + }) +} + +// funcName returns the name representation of a function or method: +// "(Type).Name" for methods or simply "Name" for functions. +func funcName(fn *ast.FuncDecl) string { + if fn.Recv != nil { + if fn.Recv.NumFields() > 0 { + typ := fn.Recv.List[0].Type + return fmt.Sprintf("(%s).%s", recvString(typ), fn.Name) + } + } + return fn.Name.Name +} + +// recvString returns a string representation of recv of the +// form "T", "*T", or "BADRECV" (if not a proper receiver type). +func recvString(recv ast.Expr) string { + switch t := recv.(type) { + case *ast.Ident: + return t.Name + case *ast.StarExpr: + return "*" + recvString(t.X) + } + return "BADRECV" +} diff --git a/vendor/github.com/fzipp/gocyclo/complexity.go b/vendor/github.com/fzipp/gocyclo/complexity.go new file mode 100644 index 000000000..65f5077e8 --- /dev/null +++ b/vendor/github.com/fzipp/gocyclo/complexity.go @@ -0,0 +1,48 @@ +// Copyright 2020 Frederik Zipp. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocyclo calculates the cyclomatic complexities of functions and +// methods in Go source code. +package gocyclo + +import ( + "go/ast" + "go/token" +) + +// Complexity calculates the cyclomatic complexity of a function. +// The 'fn' node is either a *ast.FuncDecl or a *ast.FuncLit. +func Complexity(fn ast.Node) int { + v := complexityVisitor{ + complexity: 1, + } + ast.Walk(&v, fn) + return v.complexity +} + +type complexityVisitor struct { + // complexity is the cyclomatic complexity + complexity int +} + +// Visit implements the ast.Visitor interface. +func (v *complexityVisitor) Visit(n ast.Node) ast.Visitor { + switch n := n.(type) { + case *ast.IfStmt, *ast.ForStmt, *ast.RangeStmt: + v.complexity++ + case *ast.CaseClause: + if n.List != nil { // ignore default case + v.complexity++ + } + case *ast.CommClause: + if n.Comm != nil { // ignore default case + v.complexity++ + } + case *ast.BinaryExpr: + if n.Op == token.LAND || n.Op == token.LOR { + v.complexity++ + } + } + return v +} diff --git a/vendor/github.com/fzipp/gocyclo/directives.go b/vendor/github.com/fzipp/gocyclo/directives.go new file mode 100644 index 000000000..b4ee3c448 --- /dev/null +++ b/vendor/github.com/fzipp/gocyclo/directives.go @@ -0,0 +1,39 @@ +// Copyright 2020 Frederik Zipp. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocyclo + +import ( + "go/ast" + "strings" +) + +type directives []string + +func (ds directives) HasIgnore() bool { + return ds.isPresent("ignore") +} + +func (ds directives) isPresent(name string) bool { + for _, d := range ds { + if d == name { + return true + } + } + return false +} + +func parseDirectives(doc *ast.CommentGroup) directives { + if doc == nil { + return directives{} + } + const prefix = "//gocyclo:" + var ds directives + for _, comment := range doc.List { + if strings.HasPrefix(comment.Text, prefix) { + ds = append(ds, strings.TrimSpace(strings.TrimPrefix(comment.Text, prefix))) + } + } + return ds +} diff --git a/vendor/github.com/fzipp/gocyclo/go.mod b/vendor/github.com/fzipp/gocyclo/go.mod new file mode 100644 index 000000000..c80982786 --- /dev/null +++ b/vendor/github.com/fzipp/gocyclo/go.mod @@ -0,0 +1,3 @@ +module github.com/fzipp/gocyclo + +go 1.15 diff --git a/vendor/github.com/fzipp/gocyclo/stats.go b/vendor/github.com/fzipp/gocyclo/stats.go new file mode 100644 index 000000000..90f5eefc2 --- /dev/null +++ b/vendor/github.com/fzipp/gocyclo/stats.go @@ -0,0 +1,73 @@ +// Copyright 2020 Frederik Zipp. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocyclo + +import ( + "fmt" + "go/token" + "sort" +) + +// Stat holds the cyclomatic complexity of a function, along with its package +// and and function name and its position in the source code. +type Stat struct { + PkgName string + FuncName string + Complexity int + Pos token.Position +} + +// String formats the cyclomatic complexity information of a function in +// the following format: " " +func (s Stat) String() string { + return fmt.Sprintf("%d %s %s %s", s.Complexity, s.PkgName, s.FuncName, s.Pos) +} + +// Stats hold the cyclomatic complexities of many functions. +type Stats []Stat + +// AverageComplexity calculates the average cyclomatic complexity of the +// cyclomatic complexities in s. +func (s Stats) AverageComplexity() float64 { + return float64(s.TotalComplexity()) / float64(len(s)) +} + +// TotalComplexity calculates the total sum of all cyclomatic +// complexities in s. +func (s Stats) TotalComplexity() uint64 { + total := uint64(0) + for _, stat := range s { + total += uint64(stat.Complexity) + } + return total +} + +// SortAndFilter sorts the cyclomatic complexities in s in descending order +// and returns a slice of s limited to the 'top' N entries with a cyclomatic +// complexity greater than 'over'. If 'top' is negative, i.e. -1, it does +// not limit the result. If 'over' is <= 0 it does not limit the result either, +// because a function has a base cyclomatic complexity of at least 1. +func (s Stats) SortAndFilter(top, over int) Stats { + result := make(Stats, len(s)) + copy(result, s) + sort.Sort(byComplexityDesc(result)) + for i, stat := range result { + if i == top { + return result[:i] + } + if stat.Complexity <= over { + return result[:i] + } + } + return result +} + +type byComplexityDesc Stats + +func (s byComplexityDesc) Len() int { return len(s) } +func (s byComplexityDesc) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byComplexityDesc) Less(i, j int) bool { + return s[i].Complexity >= s[j].Complexity +} diff --git a/vendor/github.com/go-critic/go-critic/LICENSE b/vendor/github.com/go-critic/go-critic/LICENSE new file mode 100644 index 000000000..b944b4bbd --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2018-2019 Alekseev Artem +Copyright (c) 2018-2019 Ravil Bikbulatov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go b/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go new file mode 100644 index 000000000..a9324dd02 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go @@ -0,0 +1,102 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/astp" + "golang.org/x/tools/go/ast/astutil" +) + +func init() { + var info linter.CheckerInfo + info.Name = "appendAssign" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects suspicious append result assignments" + info.Before = ` +p.positives = append(p.negatives, x) +p.negatives = append(p.negatives, y)` + info.After = ` +p.positives = append(p.positives, x) +p.negatives = append(p.negatives, y)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&appendAssignChecker{ctx: ctx}), nil + }) +} + +type appendAssignChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *appendAssignChecker) VisitStmt(stmt ast.Stmt) { + assign, ok := stmt.(*ast.AssignStmt) + if !ok || (assign.Tok != token.ASSIGN && assign.Tok != token.DEFINE) || len(assign.Lhs) != len(assign.Rhs) { + return + } + for i, rhs := range assign.Rhs { + call, ok := rhs.(*ast.CallExpr) + if !ok || qualifiedName(call.Fun) != "append" { + continue + } + c.checkAppend(assign.Lhs[i], call) + } +} + +func (c *appendAssignChecker) checkAppend(x ast.Expr, call *ast.CallExpr) { + if call.Ellipsis != token.NoPos { + // Try to detect `xs = append(ys, xs...)` idiom. + for _, arg := range call.Args[1:] { + y := arg + if arg, ok := arg.(*ast.SliceExpr); ok { + y = arg.X + } + if astequal.Expr(x, y) { + return + } + } + } + + switch x := x.(type) { + case *ast.Ident: + if x.Name == "_" { + return // Don't check assignments to blank ident + } + case *ast.IndexExpr: + if !astp.IsIndexExpr(call.Args[0]) { + // Most likely `m[k] = append(x, ...)` + // pattern, where x was retrieved by m[k] before. + // + // TODO: it's possible to record such map/slice reads + // and check whether it was done before this call. + // But for now, treat it like x belongs to m[k]. + return + } + } + + switch y := call.Args[0].(type) { + case *ast.SliceExpr: + if _, ok := c.ctx.TypeOf(y.X).(*types.Array); ok { + // Arrays are frequently used as scratch storages. + return + } + c.matchSlices(call, x, y.X) + case *ast.IndexExpr, *ast.Ident, *ast.SelectorExpr: + c.matchSlices(call, x, y) + } +} + +func (c *appendAssignChecker) matchSlices(cause ast.Node, x, y ast.Expr) { + if !astequal.Expr(x, astutil.Unparen(y)) { + c.warn(cause) + } +} + +func (c *appendAssignChecker) warn(cause ast.Node) { + c.ctx.Warn(cause, "append result not assigned to the same slice") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go b/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go new file mode 100644 index 000000000..03662fc21 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go @@ -0,0 +1,102 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astequal" +) + +func init() { + var info linter.CheckerInfo + info.Name = "appendCombine" + info.Tags = []string{"performance"} + info.Summary = "Detects `append` chains to the same slice that can be done in a single `append` call" + info.Before = ` +xs = append(xs, 1) +xs = append(xs, 2)` + info.After = `xs = append(xs, 1, 2)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmtList(&appendCombineChecker{ctx: ctx}), nil + }) +} + +type appendCombineChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *appendCombineChecker) VisitStmtList(list []ast.Stmt) { + var cause ast.Node // First append + var slice ast.Expr // Slice being appended to + chain := 0 // How much appends in a row we've seen + + // Break the chain. + // If enough appends are in chain, print warning. + flush := func() { + if chain > 1 { + c.warn(cause, chain) + } + chain = 0 + slice = nil + } + + for _, stmt := range list { + call := c.matchAppend(stmt, slice) + if call == nil { + flush() + continue + } + + if chain == 0 { + // First append in a chain. + chain = 1 + slice = call.Args[0] + cause = stmt + } else { + chain++ + } + } + + // Required for printing chains that consist of trailing + // statements from the list. + flush() +} + +func (c *appendCombineChecker) matchAppend(stmt ast.Stmt, slice ast.Expr) *ast.CallExpr { + // Seeking for: + // slice = append(slice, xs...) + // xs are 0-N append arguments, but not variadic argument, + // because it makes append combining impossible. + + assign := astcast.ToAssignStmt(stmt) + if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 { + return nil + } + + call, ok := assign.Rhs[0].(*ast.CallExpr) + { + cond := ok && + qualifiedName(call.Fun) == "append" && + call.Ellipsis == token.NoPos && + astequal.Expr(assign.Lhs[0], call.Args[0]) + if !cond { + return nil + } + } + + // Check that current append slice match previous append slice. + // Otherwise we should break the chain. + if slice == nil || astequal.Expr(slice, call.Args[0]) { + return call + } + return nil +} + +func (c *appendCombineChecker) warn(cause ast.Node, chain int) { + c.ctx.Warn(cause, "can combine chain of %d appends into one", chain) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/argOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/argOrder_checker.go new file mode 100644 index 000000000..98cabc54f --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/argOrder_checker.go @@ -0,0 +1,97 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/astp" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "argOrder" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects suspicious arguments order" + info.Before = `strings.HasPrefix("#", userpass)` + info.After = `strings.HasPrefix(userpass, "#")` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&argOrderChecker{ctx: ctx}), nil + }) +} + +type argOrderChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *argOrderChecker) VisitExpr(expr ast.Expr) { + call := astcast.ToCallExpr(expr) + + // For now only handle functions of 2 args. + // TODO(quasilyte): generalize the algorithm and add more patterns. + if len(call.Args) != 2 { + return + } + + calledExpr := astcast.ToSelectorExpr(call.Fun) + obj, ok := c.ctx.TypesInfo.ObjectOf(astcast.ToIdent(calledExpr.X)).(*types.PkgName) + if !ok || !isStdlibPkg(obj.Imported()) { + return + } + + x := call.Args[0] + y := call.Args[1] + switch calledExpr.Sel.Name { + case "HasPrefix", "HasSuffix", "Contains", "TrimPrefix", "TrimSuffix", "Split": + if obj.Name() != "bytes" && obj.Name() != "strings" { + return + } + if c.isConstLiteral(x) && !c.isConstLiteral(y) { + c.warn(call) + } + } +} + +func (c *argOrderChecker) isConstLiteral(x ast.Expr) bool { + // Also permit byte slices. + switch x := x.(type) { + case *ast.BasicLit: + return true + + case *ast.CallExpr: + // Handle `[]byte("abc")` as well. + if len(x.Args) != 1 || !astp.IsBasicLit(x.Args[0]) { + return false + } + typ, ok := c.ctx.TypeOf(x.Fun).(*types.Slice) + return ok && typep.HasUint8Kind(typ.Elem()) + + case *ast.CompositeLit: + // Check if it's a const byte slice. + typ, ok := c.ctx.TypeOf(x).(*types.Slice) + if !ok || !typep.HasUint8Kind(typ.Elem()) { + return false + } + for _, elt := range x.Elts { + if !astp.IsBasicLit(elt) { + return false + } + } + return true + + default: + return false + } +} + +func (c *argOrderChecker) warn(call *ast.CallExpr) { + fixed := astcopy.CallExpr(call) + fixed.Args[0], fixed.Args[1] = fixed.Args[1], fixed.Args[0] + c.ctx.Warn(call, "probably meant `%s`", fixed) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/assignOp_checker.go b/vendor/github.com/go-critic/go-critic/checkers/assignOp_checker.go new file mode 100644 index 000000000..d0bf64417 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/assignOp_checker.go @@ -0,0 +1,102 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "assignOp" + info.Tags = []string{"style"} + info.Summary = "Detects assignments that can be simplified by using assignment operators" + info.Before = `x = x * 2` + info.After = `x *= 2` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&assignOpChecker{ctx: ctx}), nil + }) +} + +type assignOpChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *assignOpChecker) VisitStmt(stmt ast.Stmt) { + assign, ok := stmt.(*ast.AssignStmt) + cond := ok && + assign.Tok == token.ASSIGN && + len(assign.Lhs) == 1 && + len(assign.Rhs) == 1 && + typep.SideEffectFree(c.ctx.TypesInfo, assign.Lhs[0]) + if !cond { + return + } + + // TODO(quasilyte): can take commutativity into account. + expr, ok := assign.Rhs[0].(*ast.BinaryExpr) + if !ok || !astequal.Expr(assign.Lhs[0], expr.X) { + return + } + + // TODO(quasilyte): perform unparen? + switch expr.Op { + case token.MUL: + c.warn(assign, token.MUL_ASSIGN, expr.Y) + case token.QUO: + c.warn(assign, token.QUO_ASSIGN, expr.Y) + case token.REM: + c.warn(assign, token.REM_ASSIGN, expr.Y) + case token.ADD: + c.warn(assign, token.ADD_ASSIGN, expr.Y) + case token.SUB: + c.warn(assign, token.SUB_ASSIGN, expr.Y) + case token.AND: + c.warn(assign, token.AND_ASSIGN, expr.Y) + case token.OR: + c.warn(assign, token.OR_ASSIGN, expr.Y) + case token.XOR: + c.warn(assign, token.XOR_ASSIGN, expr.Y) + case token.SHL: + c.warn(assign, token.SHL_ASSIGN, expr.Y) + case token.SHR: + c.warn(assign, token.SHR_ASSIGN, expr.Y) + case token.AND_NOT: + c.warn(assign, token.AND_NOT_ASSIGN, expr.Y) + } +} + +func (c *assignOpChecker) warn(cause *ast.AssignStmt, op token.Token, rhs ast.Expr) { + suggestion := c.simplify(cause, op, rhs) + c.ctx.Warn(cause, "replace `%s` with `%s`", cause, suggestion) +} + +func (c *assignOpChecker) simplify(cause *ast.AssignStmt, op token.Token, rhs ast.Expr) ast.Stmt { + if lit, ok := rhs.(*ast.BasicLit); ok && lit.Kind == token.INT && lit.Value == "1" { + switch op { + case token.ADD_ASSIGN: + return &ast.IncDecStmt{ + X: cause.Lhs[0], + TokPos: cause.TokPos, + Tok: token.INC, + } + case token.SUB_ASSIGN: + return &ast.IncDecStmt{ + X: cause.Lhs[0], + TokPos: cause.TokPos, + Tok: token.DEC, + } + } + } + suggestion := astcopy.AssignStmt(cause) + suggestion.Tok = op + suggestion.Rhs[0] = rhs + return suggestion +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/badCall_checker.go b/vendor/github.com/go-critic/go-critic/checkers/badCall_checker.go new file mode 100644 index 000000000..7435ee57b --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/badCall_checker.go @@ -0,0 +1,63 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astcopy" +) + +func init() { + var info linter.CheckerInfo + info.Name = "badCall" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects suspicious function calls" + info.Before = `strings.Replace(s, from, to, 0)` + info.After = `strings.Replace(s, from, to, -1)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&badCallChecker{ctx: ctx}), nil + }) +} + +type badCallChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *badCallChecker) VisitExpr(expr ast.Expr) { + call := astcast.ToCallExpr(expr) + if len(call.Args) == 0 { + return + } + + // TODO(quasilyte): handle methods. + + switch qualifiedName(call.Fun) { + case "strings.Replace", "bytes.Replace": + if n := astcast.ToBasicLit(call.Args[3]); n.Value == "0" { + c.warnBadArg(n, "-1") + } + case "strings.SplitN", "bytes.SplitN": + if n := astcast.ToBasicLit(call.Args[2]); n.Value == "0" { + c.warnBadArg(n, "-1") + } + case "append": + if len(call.Args) == 1 { + c.warnAppend(call) + } + } +} + +func (c *badCallChecker) warnBadArg(badArg *ast.BasicLit, correction string) { + goodArg := astcopy.BasicLit(badArg) + goodArg.Value = correction + c.ctx.Warn(badArg, "suspicious arg %s, probably meant %s", + badArg, goodArg) +} + +func (c *badCallChecker) warnAppend(call *ast.CallExpr) { + c.ctx.Warn(call, "no-op append call, probably missing arguments") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go b/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go new file mode 100644 index 000000000..149f0ac88 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go @@ -0,0 +1,147 @@ +package checkers + +import ( + "go/ast" + "go/constant" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/typep" + "golang.org/x/tools/go/ast/astutil" +) + +func init() { + var info linter.CheckerInfo + info.Name = "badCond" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects suspicious condition expressions" + info.Before = ` +for i := 0; i > n; i++ { + xs[i] = 0 +}` + info.After = ` +for i := 0; i < n; i++ { + xs[i] = 0 +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForFuncDecl(&badCondChecker{ctx: ctx}), nil + }) +} + +type badCondChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *badCondChecker) VisitFuncDecl(decl *ast.FuncDecl) { + ast.Inspect(decl.Body, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.ForStmt: + c.checkForStmt(n) + case ast.Expr: + c.checkExpr(n) + } + return true + }) +} + +func (c *badCondChecker) checkExpr(expr ast.Expr) { + // TODO(quasilyte): recognize more patterns. + + cond := astcast.ToBinaryExpr(expr) + lhs := astcast.ToBinaryExpr(astutil.Unparen(cond.X)) + rhs := astcast.ToBinaryExpr(astutil.Unparen(cond.Y)) + + if cond.Op != token.LAND { + return + } + + // Notes: + // `x != a || x != b` handled by go vet. + + // Pattern 1. + // `x < a && x > b`; Where `a` is less than `b`. + if c.lessAndGreater(lhs, rhs) { + c.warnCond(cond, "always false") + return + } + + // Pattern 2. + // `x == a && x == b` + // + // Valid when `b == a` is intended, but still reported. + // We can disable "just suspicious" warnings by default + // is users are upset with the current behavior. + if c.equalToBoth(lhs, rhs) { + c.warnCond(cond, "suspicious") + return + } +} + +func (c *badCondChecker) equalToBoth(lhs, rhs *ast.BinaryExpr) bool { + return lhs.Op == token.EQL && rhs.Op == token.EQL && + astequal.Expr(lhs.X, rhs.X) +} + +func (c *badCondChecker) lessAndGreater(lhs, rhs *ast.BinaryExpr) bool { + if lhs.Op != token.LSS || rhs.Op != token.GTR { + return false + } + if !astequal.Expr(lhs.X, rhs.X) { + return false + } + a := c.ctx.TypesInfo.Types[lhs.Y].Value + b := c.ctx.TypesInfo.Types[rhs.Y].Value + return a != nil && b != nil && constant.Compare(a, token.LSS, b) +} + +func (c *badCondChecker) checkForStmt(stmt *ast.ForStmt) { + // TODO(quasilyte): handle other kinds of bad conditionals. + + init := astcast.ToAssignStmt(stmt.Init) + if init.Tok != token.DEFINE || len(init.Lhs) != 1 || len(init.Rhs) != 1 { + return + } + if astcast.ToBasicLit(init.Rhs[0]).Value != "0" { + return + } + + iter := astcast.ToIdent(init.Lhs[0]) + cond := astcast.ToBinaryExpr(stmt.Cond) + if cond.Op != token.GTR || !astequal.Expr(iter, cond.X) { + return + } + if !typep.SideEffectFree(c.ctx.TypesInfo, cond.Y) { + return + } + + post := astcast.ToIncDecStmt(stmt.Post) + if post.Tok != token.INC || !astequal.Expr(iter, post.X) { + return + } + + mutated := lintutil.CouldBeMutated(c.ctx.TypesInfo, stmt.Body, cond.Y) || + lintutil.CouldBeMutated(c.ctx.TypesInfo, stmt.Body, iter) + if mutated { + return + } + + c.warnForStmt(stmt, cond) +} + +func (c *badCondChecker) warnForStmt(cause ast.Node, cond *ast.BinaryExpr) { + suggest := astcopy.BinaryExpr(cond) + suggest.Op = token.LSS + c.ctx.Warn(cause, "`%s` in loop; probably meant `%s`?", + cond, suggest) +} + +func (c *badCondChecker) warnCond(cond *ast.BinaryExpr, tag string) { + c.ctx.Warn(cond, "`%s` condition is %s", cond, tag) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/badLock_checker.go b/vendor/github.com/go-critic/go-critic/checkers/badLock_checker.go new file mode 100644 index 000000000..8628ff2d7 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/badLock_checker.go @@ -0,0 +1,116 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astequal" +) + +func init() { + var info linter.CheckerInfo + info.Name = "badLock" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects suspicious mutex lock/unlock operations" + info.Before = ` +mu.Lock() +mu.Unlock()` + info.After = ` +mu.Lock() +defer mu.Unlock()` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmtList(&badLockChecker{ctx: ctx}), nil + }) +} + +type badLockChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *badLockChecker) VisitStmtList(list []ast.Stmt) { + if len(list) < 2 { + return + } + + for i := 0; i < len(list)-1; i++ { + current, ok := list[i].(*ast.ExprStmt) + if !ok { + continue + } + deferred := false + var next ast.Expr + switch x := list[i+1].(type) { + case *ast.ExprStmt: + next = x.X + case *ast.DeferStmt: + next = x.Call + deferred = true + default: + continue + } + + mutex1, lockFunc, ok := c.asLockedMutex(current.X) + if !ok { + continue + } + mutex2, unlockFunc, ok := c.asUnlockedMutex(next) + if !ok { + continue + } + if !astequal.Expr(mutex1, mutex2) { + continue + } + + switch { + case !deferred: + c.warnImmediateUnlock(mutex2) + case lockFunc == "Lock" && unlockFunc == "RUnlock": + c.warnMismatchingUnlock(mutex2, "Unlock") + case lockFunc == "RLock" && unlockFunc == "Unlock": + c.warnMismatchingUnlock(mutex2, "RUnlock") + } + } +} + +func (c *badLockChecker) asLockedMutex(e ast.Expr) (ast.Expr, string, bool) { + call, ok := e.(*ast.CallExpr) + if !ok || len(call.Args) != 0 { + return nil, "", false + } + switch fn := call.Fun.(type) { + case *ast.SelectorExpr: + if fn.Sel.Name == "Lock" || fn.Sel.Name == "RLock" { + return fn.X, fn.Sel.Name, true + } + return nil, "", false + default: + return nil, "", false + } +} + +func (c *badLockChecker) asUnlockedMutex(e ast.Expr) (ast.Expr, string, bool) { + call, ok := e.(*ast.CallExpr) + if !ok || len(call.Args) != 0 { + return nil, "", false + } + switch fn := call.Fun.(type) { + case *ast.SelectorExpr: + if fn.Sel.Name == "Unlock" || fn.Sel.Name == "RUnlock" { + return fn.X, fn.Sel.Name, true + } + return nil, "", false + default: + return nil, "", false + } +} + +func (c *badLockChecker) warnImmediateUnlock(cause ast.Node) { + c.ctx.Warn(cause, "defer is missing, mutex is unlocked immediately") +} + +func (c *badLockChecker) warnMismatchingUnlock(cause ast.Node, suggestion string) { + c.ctx.Warn(cause, "suspicious unlock, maybe %s was intended?", suggestion) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go b/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go new file mode 100644 index 000000000..e0d4b7487 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go @@ -0,0 +1,445 @@ +package checkers + +import ( + "go/ast" + "go/constant" + "sort" + "strconv" + "unicode" + "unicode/utf8" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/quasilyte/regex/syntax" +) + +func init() { + var info linter.CheckerInfo + info.Name = "badRegexp" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects suspicious regexp patterns" + info.Before = "regexp.MustCompile(`(?:^aa|bb|cc)foo[aba]`)" + info.After = "regexp.MustCompile(`^(?:aa|bb|cc)foo[ab]`)" + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + opts := &syntax.ParserOptions{} + c := &badRegexpChecker{ + ctx: ctx, + parser: syntax.NewParser(opts), + } + return astwalk.WalkerForExpr(c), nil + }) +} + +type badRegexpChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + parser *syntax.Parser + cause ast.Expr + + flagStates []regexpFlagState + goodAnchors []syntax.Position +} + +type regexpFlagState [utf8.RuneSelf]bool + +func (c *badRegexpChecker) VisitExpr(x ast.Expr) { + call, ok := x.(*ast.CallExpr) + if !ok { + return + } + + switch qualifiedName(call.Fun) { + case "regexp.Compile", "regexp.MustCompile": + cv := c.ctx.TypesInfo.Types[call.Args[0]].Value + if cv == nil || cv.Kind() != constant.String { + return + } + pat := constant.StringVal(cv) + c.cause = call.Args[0] + c.checkPattern(pat) + } +} + +func (c *badRegexpChecker) checkPattern(pat string) { + re, err := c.parser.Parse(pat) + if err != nil { + return + } + + c.flagStates = c.flagStates[:0] + c.goodAnchors = c.goodAnchors[:0] + + // In Go all flags (modifiers) are set to false by default, + // so we start from the empty flag set. + c.flagStates = append(c.flagStates, regexpFlagState{}) + + c.markGoodCarets(re.Expr) + c.walk(re.Expr) +} + +func (c *badRegexpChecker) markGoodCarets(e syntax.Expr) { + canSkip := func(e syntax.Expr) bool { + switch e.Op { + case syntax.OpFlagOnlyGroup: + return true + case syntax.OpGroup: + x := e.Args[0] + return x.Op == syntax.OpConcat && len(x.Args) == 0 + } + return false + } + + if e.Op == syntax.OpConcat && len(e.Args) > 1 { + i := 0 + for i < len(e.Args) && canSkip(e.Args[i]) { + i++ + } + if i < len(e.Args) { + c.markGoodCarets(e.Args[i]) + } + return + } + if e.Op == syntax.OpCaret { + c.addGoodAnchor(e.Pos) + } + for _, a := range e.Args { + c.markGoodCarets(a) + } +} + +func (c *badRegexpChecker) walk(e syntax.Expr) { + switch e.Op { + case syntax.OpAlt: + c.checkAltAnchor(e) + c.checkAltDups(e) + for _, a := range e.Args { + c.walk(a) + } + + case syntax.OpCharClass, syntax.OpNegCharClass: + if c.checkCharClassRanges(e) { + c.checkCharClassDups(e) + } + + case syntax.OpStar, syntax.OpPlus: + c.checkNestedQuantifier(e) + c.walk(e.Args[0]) + + case syntax.OpFlagOnlyGroup: + c.updateFlagState(c.currentFlagState(), e, e.Args[0].Value) + case syntax.OpGroupWithFlags: + // Creates a new context using the current context copy. + // New flags are evaluated inside a new context. + // After nested expressions are processed, previous context is restored. + nflags := len(c.flagStates) + c.flagStates = append(c.flagStates, *c.currentFlagState()) + c.updateFlagState(c.currentFlagState(), e, e.Args[1].Value) + c.walk(e.Args[0]) + c.flagStates = c.flagStates[:nflags] + case syntax.OpGroup, syntax.OpCapture, syntax.OpNamedCapture: + // Like with OpGroupWithFlags, but doesn't evaluate any new flags. + nflags := len(c.flagStates) + c.flagStates = append(c.flagStates, *c.currentFlagState()) + c.walk(e.Args[0]) + c.flagStates = c.flagStates[:nflags] + + case syntax.OpCaret: + if !c.isGoodAnchor(e) { + c.warn("dangling or redundant ^, maybe \\^ is intended?") + } + + default: + for _, a := range e.Args { + c.walk(a) + } + } +} + +func (c *badRegexpChecker) currentFlagState() *regexpFlagState { + return &c.flagStates[len(c.flagStates)-1] +} + +func (c *badRegexpChecker) updateFlagState(state *regexpFlagState, e syntax.Expr, flagString string) { + clearing := false + for i := 0; i < len(flagString); i++ { + ch := flagString[i] + if ch == '-' { + clearing = true + continue + } + if int(ch) >= len(state) { + continue // Should never happen in practice, but we don't want a panic + } + + if clearing { + if !state[ch] { + c.warn("clearing unset flag %c in %s", ch, e.Value) + } + } else { + if state[ch] { + c.warn("redundant flag %c in %s", ch, e.Value) + } + } + state[ch] = !clearing + } +} + +func (c *badRegexpChecker) checkNestedQuantifier(e syntax.Expr) { + x := e.Args[0] + switch x.Op { + case syntax.OpGroup, syntax.OpCapture, syntax.OpGroupWithFlags: + if len(e.Args) == 1 { + x = x.Args[0] + } + } + + switch x.Op { + case syntax.OpPlus, syntax.OpStar: + c.warn("repeated greedy quantifier in %s", e.Value) + } +} + +func (c *badRegexpChecker) checkAltDups(alt syntax.Expr) { + // Seek duplicated alternation expressions. + + set := make(map[string]struct{}, len(alt.Args)) + for _, a := range alt.Args { + if _, ok := set[a.Value]; ok { + c.warn("`%s` is duplicated in %s", a.Value, alt.Value) + } + set[a.Value] = struct{}{} + } +} + +func (c *badRegexpChecker) isCharOrLit(e syntax.Expr) bool { + return e.Op == syntax.OpChar || e.Op == syntax.OpLiteral +} + +func (c *badRegexpChecker) checkAltAnchor(alt syntax.Expr) { + // Seek suspicious anchors. + + // Case 1: an alternation of literals where 1st expr begins with ^ anchor. + first := alt.Args[0] + if first.Op == syntax.OpConcat && len(first.Args) == 2 && first.Args[0].Op == syntax.OpCaret && c.isCharOrLit(first.Args[1]) { + matched := true + for _, a := range alt.Args[1:] { + if !c.isCharOrLit(a) { + matched = false + break + } + } + if matched { + c.warn("^ applied only to `%s` in %s", first.Value[len(`^`):], alt.Value) + } + } + + // Case 2: an alternation of literals where last expr ends with $ anchor. + last := alt.Args[len(alt.Args)-1] + if last.Op == syntax.OpConcat && len(last.Args) == 2 && last.Args[1].Op == syntax.OpDollar && c.isCharOrLit(last.Args[0]) { + matched := true + for _, a := range alt.Args[:len(alt.Args)-1] { + if !c.isCharOrLit(a) { + matched = false + break + } + } + if matched { + c.warn("$ applied only to `%s` in %s", last.Value[:len(last.Value)-len(`$`)], alt.Value) + } + } +} + +func (c *badRegexpChecker) checkCharClassRanges(cc syntax.Expr) bool { + // Seek for suspicious ranges like `!-_`. + // + // We permit numerical ranges (0-9, hex and octal literals) + // and simple ascii letter ranges. + + for _, e := range cc.Args { + if e.Op != syntax.OpCharRange { + continue + } + switch e.Args[0].Op { + case syntax.OpEscapeOctal, syntax.OpEscapeHex: + continue + } + ch := c.charClassBoundRune(e.Args[0]) + if ch == 0 { + return false + } + good := unicode.IsLetter(ch) || (ch >= '0' && ch <= '9') + if !good { + c.warnSloppyCharRange(e.Value, cc.Value) + } + } + + return true +} + +func (c *badRegexpChecker) checkCharClassDups(cc syntax.Expr) { + // Seek for excessive elements inside a character class. + // Report them as intersections. + + if len(cc.Args) == 1 { + return // Can't had duplicates. + } + + type charRange struct { + low rune + high rune + source string + } + ranges := make([]charRange, 0, 8) + addRange := func(source string, low, high rune) { + ranges = append(ranges, charRange{source: source, low: low, high: high}) + } + addRange1 := func(source string, ch rune) { + addRange(source, ch, ch) + } + + // 1. Collect ranges, O(n). + for _, e := range cc.Args { + switch e.Op { + case syntax.OpEscapeOctal: + addRange1(e.Value, c.octalToRune(e)) + case syntax.OpEscapeHex: + addRange1(e.Value, c.hexToRune(e)) + case syntax.OpChar: + addRange1(e.Value, c.stringToRune(e.Value)) + case syntax.OpCharRange: + addRange(e.Value, c.charClassBoundRune(e.Args[0]), c.charClassBoundRune(e.Args[1])) + case syntax.OpEscapeMeta: + addRange1(e.Value, rune(e.Value[1])) + case syntax.OpEscapeChar: + ch := c.stringToRune(e.Value[len(`\`):]) + if unicode.IsPunct(ch) { + addRange1(e.Value, ch) + break + } + switch e.Value { + case `\|`, `\<`, `\>`, `\+`, `\=`: // How to cover all symbols? + addRange1(e.Value, c.stringToRune(e.Value[len(`\`):])) + case `\t`: + addRange1(e.Value, '\t') + case `\n`: + addRange1(e.Value, '\n') + case `\r`: + addRange1(e.Value, '\r') + case `\v`: + addRange1(e.Value, '\v') + case `\d`: + addRange(e.Value, '0', '9') + case `\D`: + addRange(e.Value, 0, '0'-1) + addRange(e.Value, '9'+1, utf8.MaxRune) + case `\s`: + addRange(e.Value, '\t', '\n') // 9-10 + addRange(e.Value, '\f', '\r') // 12-13 + addRange1(e.Value, ' ') // 32 + case `\S`: + addRange(e.Value, 0, '\t'-1) + addRange(e.Value, '\n'+1, '\f'-1) + addRange(e.Value, '\r'+1, ' '-1) + addRange(e.Value, ' '+1, utf8.MaxRune) + case `\w`: + addRange(e.Value, '0', '9') // 48-57 + addRange(e.Value, 'A', 'Z') // 65-90 + addRange1(e.Value, '_') // 95 + addRange(e.Value, 'a', 'z') // 97-122 + case `\W`: + addRange(e.Value, 0, '0'-1) + addRange(e.Value, '9'+1, 'A'-1) + addRange(e.Value, 'Z'+1, '_'-1) + addRange(e.Value, '_'+1, 'a'-1) + addRange(e.Value, 'z'+1, utf8.MaxRune) + default: + // Give up: unknown escape sequence. + return + } + default: + // Give up: unexpected operation inside char class. + return + } + } + + // 2. Sort ranges, O(nlogn). + sort.Slice(ranges, func(i, j int) bool { + return ranges[i].low < ranges[j].low + }) + + // 3. Search for duplicates, O(n). + for i := 0; i < len(ranges)-1; i++ { + x := ranges[i+0] + y := ranges[i+1] + if x.high >= y.low { + c.warnCharClassDup(x.source, y.source, cc.Value) + break + } + } +} + +func (c *badRegexpChecker) charClassBoundRune(e syntax.Expr) rune { + switch e.Op { + case syntax.OpChar: + return c.stringToRune(e.Value) + case syntax.OpEscapeHex: + return c.hexToRune(e) + case syntax.OpEscapeOctal: + return c.octalToRune(e) + default: + return 0 + } +} + +func (c *badRegexpChecker) octalToRune(e syntax.Expr) rune { + v, _ := strconv.ParseInt(e.Value[len(`\`):], 8, 32) + return rune(v) +} + +func (c *badRegexpChecker) hexToRune(e syntax.Expr) rune { + var s string + switch e.Form { + case syntax.FormEscapeHexFull: + s = e.Value[len(`\x{`) : len(e.Value)-len(`}`)] + default: + s = e.Value[len(`\x`):] + } + v, _ := strconv.ParseInt(s, 16, 32) + return rune(v) +} + +func (c *badRegexpChecker) stringToRune(s string) rune { + ch, _ := utf8.DecodeRuneInString(s) + return ch +} + +func (c *badRegexpChecker) addGoodAnchor(pos syntax.Position) { + c.goodAnchors = append(c.goodAnchors, pos) +} + +func (c *badRegexpChecker) isGoodAnchor(e syntax.Expr) bool { + for _, pos := range c.goodAnchors { + if e.Pos == pos { + return true + } + } + return false +} + +func (c *badRegexpChecker) warn(format string, args ...interface{}) { + c.ctx.Warn(c.cause, format, args...) +} + +func (c *badRegexpChecker) warnSloppyCharRange(rng, charClass string) { + c.ctx.Warn(c.cause, "suspicious char range `%s` in %s", rng, charClass) +} + +func (c *badRegexpChecker) warnCharClassDup(x, y, charClass string) { + if x == y { + c.ctx.Warn(c.cause, "`%s` is duplicated in %s", x, charClass) + } else { + c.ctx.Warn(c.cause, "`%s` intersects with `%s` in %s", x, y, charClass) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go b/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go new file mode 100644 index 000000000..325fb56a3 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go @@ -0,0 +1,346 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/astp" + "github.com/go-toolsmith/typep" + "golang.org/x/tools/go/ast/astutil" +) + +func init() { + var info linter.CheckerInfo + info.Name = "boolExprSimplify" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects bool expressions that can be simplified" + info.Before = ` +a := !(elapsed >= expectElapsedMin) +b := !(x) == !(y)` + info.After = ` +a := elapsed < expectElapsedMin +b := (x) == (y)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&boolExprSimplifyChecker{ctx: ctx}), nil + }) +} + +type boolExprSimplifyChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + hasFloats bool +} + +func (c *boolExprSimplifyChecker) VisitExpr(x ast.Expr) { + if !astp.IsBinaryExpr(x) && !astp.IsUnaryExpr(x) { + return + } + + // Throw away non-bool expressions and avoid redundant + // AST copying below. + if typ := c.ctx.TypeOf(x); typ == nil || !typep.HasBoolKind(typ.Underlying()) { + return + } + + // We'll loose all types info after a copy, + // this is why we record valuable info before doing it. + c.hasFloats = lintutil.ContainsNode(x, func(n ast.Node) bool { + if x, ok := n.(*ast.BinaryExpr); ok { + return typep.HasFloatProp(c.ctx.TypeOf(x.X).Underlying()) || + typep.HasFloatProp(c.ctx.TypeOf(x.Y).Underlying()) + } + return false + }) + + y := c.simplifyBool(astcopy.Expr(x)) + if !astequal.Expr(x, y) { + c.warn(x, y) + } +} + +func (c *boolExprSimplifyChecker) simplifyBool(x ast.Expr) ast.Expr { + return astutil.Apply(x, nil, func(cur *astutil.Cursor) bool { + return c.doubleNegation(cur) || + c.negatedEquals(cur) || + c.invertComparison(cur) || + c.combineChecks(cur) || + c.removeIncDec(cur) || + c.foldRanges(cur) || + true + }).(ast.Expr) +} + +func (c *boolExprSimplifyChecker) doubleNegation(cur *astutil.Cursor) bool { + neg1 := astcast.ToUnaryExpr(cur.Node()) + neg2 := astcast.ToUnaryExpr(astutil.Unparen(neg1.X)) + if neg1.Op == token.NOT && neg2.Op == token.NOT { + cur.Replace(astutil.Unparen(neg2.X)) + return true + } + return false +} + +func (c *boolExprSimplifyChecker) negatedEquals(cur *astutil.Cursor) bool { + x, ok := cur.Node().(*ast.BinaryExpr) + if !ok || x.Op != token.EQL { + return false + } + neg1 := astcast.ToUnaryExpr(x.X) + neg2 := astcast.ToUnaryExpr(x.Y) + if neg1.Op == token.NOT && neg2.Op == token.NOT { + x.X = neg1.X + x.Y = neg2.X + return true + } + return false +} + +func (c *boolExprSimplifyChecker) invertComparison(cur *astutil.Cursor) bool { + if c.hasFloats { // See #673 + return false + } + + neg := astcast.ToUnaryExpr(cur.Node()) + cmp := astcast.ToBinaryExpr(astutil.Unparen(neg.X)) + if neg.Op != token.NOT { + return false + } + + // Replace operator to its negated form. + switch cmp.Op { + case token.EQL: + cmp.Op = token.NEQ + case token.NEQ: + cmp.Op = token.EQL + case token.LSS: + cmp.Op = token.GEQ + case token.GTR: + cmp.Op = token.LEQ + case token.LEQ: + cmp.Op = token.GTR + case token.GEQ: + cmp.Op = token.LSS + + default: + return false + } + cur.Replace(cmp) + return true +} + +func (c *boolExprSimplifyChecker) isSafe(x ast.Expr) bool { + return typep.SideEffectFree(c.ctx.TypesInfo, x) +} + +func (c *boolExprSimplifyChecker) combineChecks(cur *astutil.Cursor) bool { + or, ok := cur.Node().(*ast.BinaryExpr) + if !ok || or.Op != token.LOR { + return false + } + + lhs := astcast.ToBinaryExpr(astutil.Unparen(or.X)) + rhs := astcast.ToBinaryExpr(astutil.Unparen(or.Y)) + + if !astequal.Expr(lhs.X, rhs.X) || !astequal.Expr(lhs.Y, rhs.Y) { + return false + } + if !c.isSafe(lhs.X) || !c.isSafe(lhs.Y) { + return false + } + + combTable := [...]struct { + x token.Token + y token.Token + result token.Token + }{ + {token.GTR, token.EQL, token.GEQ}, + {token.EQL, token.GTR, token.GEQ}, + {token.LSS, token.EQL, token.LEQ}, + {token.EQL, token.LSS, token.LEQ}, + } + for _, comb := range &combTable { + if comb.x == lhs.Op && comb.y == rhs.Op { + lhs.Op = comb.result + cur.Replace(lhs) + return true + } + } + return false +} + +func (c *boolExprSimplifyChecker) removeIncDec(cur *astutil.Cursor) bool { + cmp := astcast.ToBinaryExpr(cur.Node()) + + matchOneWay := func(op token.Token, x, y *ast.BinaryExpr) bool { + if x.Op != op || astcast.ToBasicLit(x.Y).Value != "1" { + return false + } + if y.Op == op && astcast.ToBasicLit(y.Y).Value == "1" { + return false + } + return true + } + replace := func(lhsOp, rhsOp, replacement token.Token) bool { + lhs := astcast.ToBinaryExpr(cmp.X) + rhs := astcast.ToBinaryExpr(cmp.Y) + switch { + case matchOneWay(lhsOp, lhs, rhs): + cmp.X = lhs.X + cmp.Op = replacement + cur.Replace(cmp) + return true + case matchOneWay(rhsOp, rhs, lhs): + cmp.Y = rhs.X + cmp.Op = replacement + cur.Replace(cmp) + return true + default: + return false + } + } + + switch cmp.Op { + case token.GTR: + // `x > y-1` => `x >= y` + // `x+1 > y` => `x >= y` + return replace(token.ADD, token.SUB, token.GEQ) + + case token.GEQ: + // `x >= y+1` => `x > y` + // `x-1 >= y` => `x > y` + return replace(token.SUB, token.ADD, token.GTR) + + case token.LSS: + // `x < y+1` => `x <= y` + // `x-1 < y` => `x <= y` + return replace(token.SUB, token.ADD, token.LEQ) + + case token.LEQ: + // `x <= y-1` => `x < y` + // `x+1 <= y` => `x < y` + return replace(token.ADD, token.SUB, token.LSS) + + default: + return false + } +} + +func (c *boolExprSimplifyChecker) foldRanges(cur *astutil.Cursor) bool { + if c.hasFloats { // See #848 + return false + } + + e, ok := cur.Node().(*ast.BinaryExpr) + if !ok { + return false + } + lhs := astcast.ToBinaryExpr(e.X) + rhs := astcast.ToBinaryExpr(e.Y) + if !c.isSafe(lhs.X) || !c.isSafe(rhs.X) { + return false + } + if !astequal.Expr(lhs.X, rhs.X) { + return false + } + + c1, ok := c.int64val(lhs.Y) + if !ok { + return false + } + c2, ok := c.int64val(rhs.Y) + if !ok { + return false + } + + type combination struct { + lhsOp token.Token + rhsOp token.Token + rhsDiff int64 + resDelta int64 + } + match := func(comb *combination) bool { + if lhs.Op != comb.lhsOp || rhs.Op != comb.rhsOp { + return false + } + if c2-c1 != comb.rhsDiff { + return false + } + return true + } + + switch e.Op { + case token.LAND: + combTable := [...]combination{ + // `x > c && x < c+2` => `x == c+1` + {token.GTR, token.LSS, 2, 1}, + // `x >= c && x < c+1` => `x == c` + {token.GEQ, token.LSS, 1, 0}, + // `x > c && x <= c+1` => `x == c+1` + {token.GTR, token.LEQ, 1, 1}, + // `x >= c && x <= c` => `x == c` + {token.GEQ, token.LEQ, 0, 0}, + } + for i := range combTable { + comb := combTable[i] + if match(&comb) { + lhs.Op = token.EQL + v := c1 + comb.resDelta + lhs.Y.(*ast.BasicLit).Value = fmt.Sprint(v) + cur.Replace(lhs) + return true + } + } + + case token.LOR: + combTable := [...]combination{ + // `x < c || x > c` => `x != c` + {token.LSS, token.GTR, 0, 0}, + // `x <= c || x > c+1` => `x != c+1` + {token.LEQ, token.GTR, 1, 1}, + // `x < c || x >= c+1` => `x != c` + {token.LSS, token.GEQ, 1, 0}, + // `x <= c || x >= c+2` => `x != c+1` + {token.LEQ, token.GEQ, 2, 1}, + } + for i := range combTable { + comb := combTable[i] + if match(&comb) { + lhs.Op = token.NEQ + v := c1 + comb.resDelta + lhs.Y.(*ast.BasicLit).Value = fmt.Sprint(v) + cur.Replace(lhs) + return true + } + } + } + + return false +} + +func (c *boolExprSimplifyChecker) int64val(x ast.Expr) (int64, bool) { + // TODO(quasilyte): if we had types info, we could use TypesInfo.Types[x].Value, + // but since copying erases leaves us without it, only basic literals are handled. + lit, ok := x.(*ast.BasicLit) + if !ok { + return 0, false + } + v, err := strconv.ParseInt(lit.Value, 10, 64) + if err != nil { + return 0, false + } + return v, true +} + +func (c *boolExprSimplifyChecker) warn(cause, suggestion ast.Expr) { + c.SkipChilds = true + c.ctx.Warn(cause, "can simplify `%s` to `%s`", cause, suggestion) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go b/vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go new file mode 100644 index 000000000..94d51a996 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go @@ -0,0 +1,63 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "builtinShadowDecl" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects top-level declarations that shadow the predeclared identifiers" + info.Before = `type int struct {}` + info.After = `type myInt struct {}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return &builtinShadowDeclChecker{ctx: ctx}, nil + }) +} + +type builtinShadowDeclChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *builtinShadowDeclChecker) WalkFile(f *ast.File) { + for _, decl := range f.Decls { + switch decl := decl.(type) { + case *ast.FuncDecl: + // Don't check methods. They can shadow anything safely. + if decl.Recv == nil { + c.checkName(decl.Name) + } + case *ast.GenDecl: + c.visitGenDecl(decl) + } + } +} + +func (c *builtinShadowDeclChecker) visitGenDecl(decl *ast.GenDecl) { + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.ValueSpec: + for _, name := range spec.Names { + c.checkName(name) + } + case *ast.TypeSpec: + c.checkName(spec.Name) + } + } +} + +func (c *builtinShadowDeclChecker) checkName(name *ast.Ident) { + if isBuiltin(name.Name) { + c.warn(name) + } +} + +func (c *builtinShadowDeclChecker) warn(ident *ast.Ident) { + c.ctx.Warn(ident, "shadowing of predeclared identifier: %s", ident) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go b/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go new file mode 100644 index 000000000..1e1661deb --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go @@ -0,0 +1,36 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "builtinShadow" + info.Tags = []string{"style", "opinionated"} + info.Summary = "Detects when predeclared identifiers are shadowed in assignments" + info.Before = `len := 10` + info.After = `length := 10` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForLocalDef(&builtinShadowChecker{ctx: ctx}, ctx.TypesInfo), nil + }) +} + +type builtinShadowChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *builtinShadowChecker) VisitLocalDef(name astwalk.Name, _ ast.Expr) { + if isBuiltin(name.ID.Name) { + c.warn(name.ID) + } +} + +func (c *builtinShadowChecker) warn(ident *ast.Ident) { + c.ctx.Warn(ident, "shadowing of predeclared identifier: %s", ident) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go b/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go new file mode 100644 index 000000000..d9b4b7e75 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go @@ -0,0 +1,49 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "captLocal" + info.Tags = []string{"style"} + info.Params = linter.CheckerParams{ + "paramsOnly": { + Value: true, + Usage: "whether to restrict checker to params only", + }, + } + info.Summary = "Detects capitalized names for local variables" + info.Before = `func f(IN int, OUT *int) (ERR error) {}` + info.After = `func f(in int, out *int) (err error) {}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &captLocalChecker{ctx: ctx} + c.paramsOnly = info.Params.Bool("paramsOnly") + return astwalk.WalkerForLocalDef(c, ctx.TypesInfo), nil + }) +} + +type captLocalChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + paramsOnly bool +} + +func (c *captLocalChecker) VisitLocalDef(def astwalk.Name, _ ast.Expr) { + if c.paramsOnly && def.Kind != astwalk.NameParam { + return + } + if ast.IsExported(def.ID.Name) { + c.warn(def.ID) + } +} + +func (c *captLocalChecker) warn(id ast.Node) { + c.ctx.Warn(id, "`%s' should not be capitalized", id) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go new file mode 100644 index 000000000..047ea4fee --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go @@ -0,0 +1,88 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "caseOrder" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects erroneous case order inside switch statements" + info.Before = ` +switch x.(type) { +case ast.Expr: + fmt.Println("expr") +case *ast.BasicLit: + fmt.Println("basic lit") // Never executed +}` + info.After = ` +switch x.(type) { +case *ast.BasicLit: + fmt.Println("basic lit") // Now reachable +case ast.Expr: + fmt.Println("expr") +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&caseOrderChecker{ctx: ctx}), nil + }) +} + +type caseOrderChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *caseOrderChecker) VisitStmt(stmt ast.Stmt) { + switch stmt := stmt.(type) { + case *ast.TypeSwitchStmt: + c.checkTypeSwitch(stmt) + case *ast.SwitchStmt: + c.checkSwitch(stmt) + } +} + +func (c *caseOrderChecker) checkTypeSwitch(s *ast.TypeSwitchStmt) { + type ifaceType struct { + node ast.Node + typ *types.Interface + } + var ifaces []ifaceType // Interfaces seen so far + for _, cc := range s.Body.List { + cc := cc.(*ast.CaseClause) + for _, x := range cc.List { + typ := c.ctx.TypeOf(x) + if typ == linter.UnknownType { + c.warnUnknownType(cc, x) + return + } + for _, iface := range ifaces { + if types.Implements(typ, iface.typ) { + c.warnTypeSwitch(cc, x, iface.node) + break + } + } + if iface, ok := typ.Underlying().(*types.Interface); ok { + ifaces = append(ifaces, ifaceType{node: x, typ: iface}) + } + } + } +} + +func (c *caseOrderChecker) warnTypeSwitch(cause, concrete, iface ast.Node) { + c.ctx.Warn(cause, "case %s must go before the %s case", concrete, iface) +} + +func (c *caseOrderChecker) warnUnknownType(cause, concrete ast.Node) { + c.ctx.Warn(cause, "type is not defined %s", concrete) +} + +func (c *caseOrderChecker) checkSwitch(s *ast.SwitchStmt) { + // TODO(quasilyte): can handle expression cases that overlap. + // Cases that have narrower value range should go before wider ones. +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/checkers.go b/vendor/github.com/go-critic/go-critic/checkers/checkers.go new file mode 100644 index 000000000..0c2ebc00c --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/checkers.go @@ -0,0 +1,19 @@ +// Package checkers is a gocritic linter main checkers collection. +package checkers + +import ( + "os" + + "github.com/go-critic/go-critic/framework/linter" +) + +var collection = &linter.CheckerCollection{ + URL: "https://github.com/go-critic/go-critic/checkers", +} + +var debug = func() func() bool { + v := os.Getenv("DEBUG") != "" + return func() bool { + return v + } +}() diff --git a/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go b/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go new file mode 100644 index 000000000..52a72d28c --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go @@ -0,0 +1,61 @@ +package checkers + +import ( + "go/ast" + "regexp" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "codegenComment" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects malformed 'code generated' file comments" + info.Before = `// This file was automatically generated by foogen` + info.After = `// Code generated by foogen. DO NOT EDIT.` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + patterns := []string{ + "this (?:file|code) (?:was|is) auto(?:matically)? generated", + "this (?:file|code) (?:was|is) generated automatically", + "this (?:file|code) (?:was|is) generated by", + "this (?:file|code) (?:was|is) (?:auto(?:matically)? )?generated", + "this (?:file|code) (?:was|is) generated", + "code in this file (?:was|is) auto(?:matically)? generated", + "generated (?:file|code) - do not edit", + // TODO(quasilyte): more of these. + } + re := regexp.MustCompile("(?i)" + strings.Join(patterns, "|")) + return &codegenCommentChecker{ + ctx: ctx, + badCommentRE: re, + }, nil + }) +} + +type codegenCommentChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + badCommentRE *regexp.Regexp +} + +func (c *codegenCommentChecker) WalkFile(f *ast.File) { + if f.Doc == nil { + return + } + + for _, comment := range f.Doc.List { + if c.badCommentRE.MatchString(comment.Text) { + c.warn(comment) + return + } + } +} + +func (c *codegenCommentChecker) warn(cause ast.Node) { + c.ctx.Warn(cause, "comment should match `Code generated .* DO NOT EDIT.` regexp") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go new file mode 100644 index 000000000..d4939f3f6 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go @@ -0,0 +1,79 @@ +package checkers + +import ( + "go/ast" + "regexp" + "strings" + "unicode" + "unicode/utf8" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "commentFormatting" + info.Tags = []string{"style"} + info.Summary = "Detects comments with non-idiomatic formatting" + info.Before = `//This is a comment` + info.After = `// This is a comment` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + parts := []string{ + `^//go:generate .*$`, // e.g.: go:generate value + `^//[\w-]+:.*$`, // e.g.: key: value + `^//nolint\b`, // e.g.: nolint + `^//line /.*:\d+`, // e.g.: line /path/to/file:123 + `^//export \w+$`, // e.g.: export Foo + } + pat := "(?m)" + strings.Join(parts, "|") + pragmaRE := regexp.MustCompile(pat) + return astwalk.WalkerForComment(&commentFormattingChecker{ + ctx: ctx, + pragmaRE: pragmaRE, + }), nil + }) +} + +type commentFormattingChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + pragmaRE *regexp.Regexp +} + +func (c *commentFormattingChecker) VisitComment(cg *ast.CommentGroup) { + if strings.HasPrefix(cg.List[0].Text, "/*") { + return + } + for _, comment := range cg.List { + if len(comment.Text) <= len("// ") { + continue + } + if c.pragmaRE.MatchString(comment.Text) { + continue + } + + // Make a decision based on a first comment text rune. + r, _ := utf8.DecodeRuneInString(comment.Text[len("//"):]) + if !c.specialChar(r) && !unicode.IsSpace(r) { + c.warn(comment) + return + } + } +} + +func (c *commentFormattingChecker) specialChar(r rune) bool { + // Permitted list to avoid false-positives. + switch r { + case '+', '-', '#', '!': + return true + default: + return false + } +} + +func (c *commentFormattingChecker) warn(comment *ast.Comment) { + c.ctx.Warn(comment, "put a space between `//` and comment text") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go new file mode 100644 index 000000000..554e0621f --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go @@ -0,0 +1,157 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/token" + "regexp" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/strparse" +) + +func init() { + var info linter.CheckerInfo + info.Name = "commentedOutCode" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects commented-out code inside function bodies" + info.Before = ` +// fmt.Println("Debugging hard") +foo(1, 2)` + info.After = `foo(1, 2)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForLocalComment(&commentedOutCodeChecker{ + ctx: ctx, + notQuiteFuncCall: regexp.MustCompile(`\w+\s+\([^)]*\)\s*$`), + }), nil + }) +} + +type commentedOutCodeChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + fn *ast.FuncDecl + + notQuiteFuncCall *regexp.Regexp +} + +func (c *commentedOutCodeChecker) EnterFunc(fn *ast.FuncDecl) bool { + c.fn = fn // Need to store current function inside checker context + return fn.Body != nil +} + +func (c *commentedOutCodeChecker) VisitLocalComment(cg *ast.CommentGroup) { + s := cg.Text() // Collect text once + + // We do multiple heuristics to avoid false positives. + // Many things can be improved here. + + markers := []string{ + "TODO", // TODO comments with code are permitted. + + // "http://" is interpreted as a label with comment. + // There are other protocols we might want to include. + "http://", + "https://", + + "e.g. ", // Clearly not a "selector expr" (mostly due to extra space) + } + for _, m := range markers { + if strings.Contains(s, m) { + return + } + } + + // Some very short comment that can be skipped. + // Usually triggering on these results in false positive. + // Unless there is a very popular call like print/println. + cond := len(s) < len("quite too short") && + !strings.Contains(s, "print") && + !strings.Contains(s, "fmt.") && + !strings.Contains(s, "log.") + if cond { + return + } + + // Almost looks like a commented-out function call, + // but there is a whitespace between function name and + // parameters list. Skip these to avoid false positives. + if c.notQuiteFuncCall.MatchString(s) { + return + } + + stmt := strparse.Stmt(s) + + if c.isPermittedStmt(stmt) { + return + } + + if stmt != strparse.BadStmt { + c.warn(cg) + return + } + + // Don't try to parse one-liner as block statement + if len(cg.List) == 1 && !strings.Contains(s, "\n") { + return + } + + // Some attempts to avoid false positives. + if c.skipBlock(s) { + return + } + + // Add braces to make block statement from + // multiple statements. + stmt = strparse.Stmt(fmt.Sprintf("{ %s }", s)) + + if stmt, ok := stmt.(*ast.BlockStmt); ok && len(stmt.List) != 0 { + c.warn(cg) + } +} + +func (c *commentedOutCodeChecker) skipBlock(s string) bool { + lines := strings.Split(s, "\n") // There is at least 1 line, that's invariant + + // Special example test block. + if isExampleTestFunc(c.fn) && strings.Contains(lines[0], "Output:") { + return true + } + + return false +} + +func (c *commentedOutCodeChecker) isPermittedStmt(stmt ast.Stmt) bool { + switch stmt := stmt.(type) { + case *ast.ExprStmt: + return c.isPermittedExpr(stmt.X) + case *ast.LabeledStmt: + return c.isPermittedStmt(stmt.Stmt) + case *ast.DeclStmt: + decl := stmt.Decl.(*ast.GenDecl) + return decl.Tok == token.TYPE + default: + return false + } +} + +func (c *commentedOutCodeChecker) isPermittedExpr(x ast.Expr) bool { + // Permit anything except expressions that can be used + // with complete result discarding. + switch x := x.(type) { + case *ast.CallExpr: + return false + case *ast.UnaryExpr: + // "<-" channel receive is not permitted. + return x.Op != token.ARROW + default: + return true + } +} + +func (c *commentedOutCodeChecker) warn(cause ast.Node) { + c.ctx.Warn(cause, "may want to remove commented-out code") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go new file mode 100644 index 000000000..3c086569b --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go @@ -0,0 +1,76 @@ +package checkers + +import ( + "go/ast" + "go/token" + "regexp" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "commentedOutImport" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects commented-out imports" + info.Before = ` +import ( + "fmt" + //"os" +)` + info.After = ` +import ( + "fmt" +)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + const pattern = `(?m)^(?://|/\*)?\s*"([a-zA-Z0-9_/]+)"\s*(?:\*/)?$` + return &commentedOutImportChecker{ + ctx: ctx, + importStringRE: regexp.MustCompile(pattern), + }, nil + }) +} + +type commentedOutImportChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + importStringRE *regexp.Regexp +} + +func (c *commentedOutImportChecker) WalkFile(f *ast.File) { + // TODO(quasilyte): handle commented-out import spec, + // for example: // import "errors". + + for _, decl := range f.Decls { + decl, ok := decl.(*ast.GenDecl) + if !ok || decl.Tok != token.IMPORT { + // Import decls can only be in the beginning of the file. + // If we've met some other decl, there will be no more + // import decls. + break + } + + // Find comments inside this import decl span. + for _, cg := range f.Comments { + if cg.Pos() > decl.Rparen { + break // Below the decl, stop. + } + if cg.Pos() < decl.Lparen { + continue // Before the decl, skip. + } + + for _, comment := range cg.List { + for _, m := range c.importStringRE.FindAllStringSubmatch(comment.Text, -1) { + c.warn(comment, m[1]) + } + } + } + } +} + +func (c *commentedOutImportChecker) warn(cause ast.Node, path string) { + c.ctx.Warn(cause, "remove commented-out %q import", path) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go new file mode 100644 index 000000000..e06944d62 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go @@ -0,0 +1,65 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "defaultCaseOrder" + info.Tags = []string{"style"} + info.Summary = "Detects when default case in switch isn't on 1st or last position" + info.Before = ` +switch { +case x > y: + // ... +default: // <- not the best position + // ... +case x == 10: + // ... +}` + info.After = ` +switch { +case x > y: + // ... +case x == 10: + // ... +default: // <- last case (could also be the first one) + // ... +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&defaultCaseOrderChecker{ctx: ctx}), nil + }) +} + +type defaultCaseOrderChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *defaultCaseOrderChecker) VisitStmt(stmt ast.Stmt) { + swtch, ok := stmt.(*ast.SwitchStmt) + if !ok { + return + } + for i, stmt := range swtch.Body.List { + caseStmt, ok := stmt.(*ast.CaseClause) + if !ok { + continue + } + // is `default` case + if caseStmt.List == nil { + if i != 0 && i != len(swtch.Body.List)-1 { + c.warn(caseStmt) + } + } + } +} + +func (c *defaultCaseOrderChecker) warn(cause *ast.CaseClause) { + c.ctx.Warn(cause, "consider to make `default` case as first or as last case") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/deferUnlambda_checker.go b/vendor/github.com/go-critic/go-critic/checkers/deferUnlambda_checker.go new file mode 100644 index 000000000..b312bfb68 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/deferUnlambda_checker.go @@ -0,0 +1,94 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" +) + +func init() { + var info linter.CheckerInfo + info.Name = "deferUnlambda" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects deferred function literals that can be simplified" + info.Before = `defer func() { f() }()` + info.After = `f()` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&deferUnlambdaChecker{ctx: ctx}), nil + }) +} + +type deferUnlambdaChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *deferUnlambdaChecker) VisitStmt(x ast.Stmt) { + def, ok := x.(*ast.DeferStmt) + if !ok { + return + } + + // We don't analyze deferred function args. + // Most deferred calls don't have them, so it's not a big deal to skip them. + if len(def.Call.Args) != 0 { + return + } + + fn, ok := def.Call.Fun.(*ast.FuncLit) + if !ok { + return + } + + if len(fn.Body.List) != 1 { + return + } + + call, ok := astcast.ToExprStmt(fn.Body.List[0]).X.(*ast.CallExpr) + if !ok || !c.isFunctionCall(call) { + return + } + + // Skip recover() as it can't be moved outside of the lambda. + // Skip panic() to avoid affecting the stack trace. + switch qualifiedName(call.Fun) { + case "recover", "panic": + return + } + + for _, arg := range call.Args { + if !c.isConstExpr(arg) { + return + } + } + + c.warn(def, call) +} + +func (c *deferUnlambdaChecker) isFunctionCall(e *ast.CallExpr) bool { + switch fnExpr := e.Fun.(type) { + case *ast.Ident: + return true + case *ast.SelectorExpr: + x, ok := fnExpr.X.(*ast.Ident) + if !ok { + return false + } + _, ok = c.ctx.TypesInfo.ObjectOf(x).(*types.PkgName) + return ok + default: + return false + } +} + +func (c *deferUnlambdaChecker) isConstExpr(e ast.Expr) bool { + return c.ctx.TypesInfo.Types[e].Value != nil +} + +func (c *deferUnlambdaChecker) warn(cause, suggestion ast.Node) { + c.ctx.Warn(cause, "can rewrite as `defer %s`", suggestion) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go b/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go new file mode 100644 index 000000000..f60e58b58 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go @@ -0,0 +1,149 @@ +package checkers + +import ( + "go/ast" + "regexp" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "deprecatedComment" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects malformed 'deprecated' doc-comments" + info.Before = ` +// deprecated, use FuncNew instead +func FuncOld() int` + info.After = ` +// Deprecated: use FuncNew instead +func FuncOld() int` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &deprecatedCommentChecker{ctx: ctx} + + c.commonPatterns = []*regexp.Regexp{ + regexp.MustCompile(`(?i)this (?:function|type) is deprecated`), + regexp.MustCompile(`(?i)deprecated[.!]? use \S* instead`), + regexp.MustCompile(`(?i)\[\[deprecated\]\].*`), + regexp.MustCompile(`(?i)note: deprecated\b.*`), + regexp.MustCompile(`(?i)deprecated in.*`), + // TODO(quasilyte): more of these? + } + + // TODO(quasilyte): may want to generate this list programmatically. + // + // TODO(quasilyte): currently it only handles a single missing letter. + // Might want to handle other kinds of common misspell/typo kinds. + c.commonTypos = []string{ + "Dprecated: ", + "Derecated: ", + "Depecated: ", + "Deprcated: ", + "Depreated: ", + "Deprected: ", + "Deprecaed: ", + "Deprecatd: ", + "Deprecate: ", + "Derpecate: ", + "Derpecated: ", + "Depreacted: ", + } + for i := range c.commonTypos { + c.commonTypos[i] = strings.ToUpper(c.commonTypos[i]) + } + + return astwalk.WalkerForDocComment(c), nil + }) +} + +type deprecatedCommentChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + commonPatterns []*regexp.Regexp + commonTypos []string +} + +func (c *deprecatedCommentChecker) VisitDocComment(doc *ast.CommentGroup) { + // There are 3 accepted forms of deprecation comments: + // + // 1. inline, that can't be handled with a DocCommentVisitor. + // Note that "Deprecated: " may not even be the comment prefix there. + // Example: "The line number in the input. Deprecated: Kept for compatibility." + // TODO(quasilyte): fix it. + // + // 2. Longer form-1. It's a doc-comment that only contains "deprecation" notice. + // + // 3. Like form-2, but may also include doc-comment text. + // Distinguished by an empty line. + // + // See https://github.com/golang/go/issues/10909#issuecomment-136492606. + // + // It's desirable to see how people make mistakes with the format, + // this is why there is currently no special treatment for these cases. + // TODO(quasilyte): do more audits and grow the negative tests suite. + // + // TODO(quasilyte): there are also multi-line deprecation comments. + + for _, comment := range doc.List { + if strings.HasPrefix(comment.Text, "/*") { + // TODO(quasilyte): handle multi-line doc comments. + continue + } + l := comment.Text[len("//"):] + if len(l) < len("Deprecated: ") { + continue + } + l = strings.TrimSpace(l) + + // Check whether someone messed up with a prefix casing. + upcase := strings.ToUpper(l) + if strings.HasPrefix(upcase, "DEPRECATED: ") && !strings.HasPrefix(l, "Deprecated: ") { + c.warnCasing(comment, l) + return + } + + // Check is someone used comma instead of a colon. + if strings.HasPrefix(l, "Deprecated, ") { + c.warnComma(comment) + return + } + + // Check for other commonly used patterns. + for _, pat := range c.commonPatterns { + if pat.MatchString(l) { + c.warnPattern(comment) + return + } + } + + // Detect some simple typos. + for _, prefixWithTypo := range c.commonTypos { + if strings.HasPrefix(upcase, prefixWithTypo) { + c.warnTypo(comment, l) + return + } + } + } +} + +func (c *deprecatedCommentChecker) warnCasing(cause ast.Node, line string) { + prefix := line[:len("DEPRECATED: ")] + c.ctx.Warn(cause, "use `Deprecated: ` (note the casing) instead of `%s`", prefix) +} + +func (c *deprecatedCommentChecker) warnPattern(cause ast.Node) { + c.ctx.Warn(cause, "the proper format is `Deprecated: `") +} + +func (c *deprecatedCommentChecker) warnComma(cause ast.Node) { + c.ctx.Warn(cause, "use `:` instead of `,` in `Deprecated, `") +} + +func (c *deprecatedCommentChecker) warnTypo(cause ast.Node, line string) { + word := strings.Split(line, ":")[0] + c.ctx.Warn(cause, "typo in `%s`; should be `Deprecated`", word) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go b/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go new file mode 100644 index 000000000..d8aaaf743 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go @@ -0,0 +1,95 @@ +package checkers + +import ( + "go/ast" + "go/token" + "regexp" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "docStub" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects comments that silence go lint complaints about doc-comment" + info.Before = ` +// Foo ... +func Foo() { +}` + info.After = ` +// (A) - remove the doc-comment stub +func Foo() {} +// (B) - replace it with meaningful comment +// Foo is a demonstration-only function. +func Foo() {}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + re := `(?i)^\.\.\.$|^\.$|^xxx\.?$|^whatever\.?$` + c := &docStubChecker{ + ctx: ctx, + stubCommentRE: regexp.MustCompile(re), + } + return c, nil + }) +} + +type docStubChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + stubCommentRE *regexp.Regexp +} + +func (c *docStubChecker) WalkFile(f *ast.File) { + for _, decl := range f.Decls { + switch decl := decl.(type) { + case *ast.FuncDecl: + c.visitDoc(decl, decl.Name, decl.Doc, false) + case *ast.GenDecl: + if decl.Tok != token.TYPE { + continue + } + if len(decl.Specs) == 1 { + spec := decl.Specs[0].(*ast.TypeSpec) + // Only 1 spec, use doc from the decl itself. + c.visitDoc(spec, spec.Name, decl.Doc, true) + } + // N specs, use per-spec doc. + for _, spec := range decl.Specs { + spec := spec.(*ast.TypeSpec) + c.visitDoc(spec, spec.Name, spec.Doc, true) + } + } + } +} + +func (c *docStubChecker) visitDoc(decl ast.Node, sym *ast.Ident, doc *ast.CommentGroup, article bool) { + if !sym.IsExported() || doc == nil { + return + } + line := strings.TrimSpace(doc.List[0].Text[len("//"):]) + if article { + // Skip optional article. + for _, a := range []string{"The ", "An ", "A "} { + if strings.HasPrefix(line, a) { + line = line[len(a):] + break + } + } + } + if !strings.HasPrefix(line, sym.Name) { + return + } + line = strings.TrimSpace(line[len(sym.Name):]) + // Now try to detect the "stub" part. + if c.stubCommentRE.MatchString(line) { + c.warn(decl) + } +} + +func (c *docStubChecker) warn(cause ast.Node) { + c.ctx.Warn(cause, "silencing go lint doc-comment warnings is unadvised") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupArg_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupArg_checker.go new file mode 100644 index 000000000..9f116d781 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/dupArg_checker.go @@ -0,0 +1,133 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astequal" +) + +func init() { + var info linter.CheckerInfo + info.Name = "dupArg" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects suspicious duplicated arguments" + info.Before = `copy(dst, dst)` + info.After = `copy(dst, src)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &dupArgChecker{ctx: ctx} + // newMatcherFunc returns a function that matches a call if + // args[xIndex] and args[yIndex] are equal. + newMatcherFunc := func(xIndex, yIndex int) func(*ast.CallExpr) bool { + return func(call *ast.CallExpr) bool { + if len(call.Args) <= xIndex || len(call.Args) <= yIndex { + return false + } + x := call.Args[xIndex] + y := call.Args[yIndex] + return astequal.Expr(x, y) + } + } + + // m maps pattern string to a matching function. + // String patterns are used for documentation purposes (readability). + m := map[string]func(*ast.CallExpr) bool{ + "(x, x, ...)": newMatcherFunc(0, 1), + "(x, _, x, ...)": newMatcherFunc(0, 2), + "(_, x, x, ...)": newMatcherFunc(1, 2), + } + + // TODO(quasilyte): handle x.Equal(x) cases. + // Example: *math/Big.Int.Cmp method. + + // TODO(quasilyte): more perky mode that will also + // report things like io.Copy(x, x). + // Probably safe thing to do even without that option + // if `x` is not interface (requires type checks + // that are not incorporated into this checker yet). + + c.matchers = map[string]func(*ast.CallExpr) bool{ + "copy": m["(x, x, ...)"], + + "math.Max": m["(x, x, ...)"], + "math.Min": m["(x, x, ...)"], + + "reflect.Copy": m["(x, x, ...)"], + "reflect.DeepEqual": m["(x, x, ...)"], + + "strings.Contains": m["(x, x, ...)"], + "strings.Compare": m["(x, x, ...)"], + "strings.EqualFold": m["(x, x, ...)"], + "strings.HasPrefix": m["(x, x, ...)"], + "strings.HasSuffix": m["(x, x, ...)"], + "strings.Index": m["(x, x, ...)"], + "strings.LastIndex": m["(x, x, ...)"], + "strings.Split": m["(x, x, ...)"], + "strings.SplitAfter": m["(x, x, ...)"], + "strings.SplitAfterN": m["(x, x, ...)"], + "strings.SplitN": m["(x, x, ...)"], + "strings.Replace": m["(_, x, x, ...)"], + "strings.ReplaceAll": m["(_, x, x, ...)"], + + "bytes.Contains": m["(x, x, ...)"], + "bytes.Compare": m["(x, x, ...)"], + "bytes.Equal": m["(x, x, ...)"], + "bytes.EqualFold": m["(x, x, ...)"], + "bytes.HasPrefix": m["(x, x, ...)"], + "bytes.HasSuffix": m["(x, x, ...)"], + "bytes.Index": m["(x, x, ...)"], + "bytes.LastIndex": m["(x, x, ...)"], + "bytes.Split": m["(x, x, ...)"], + "bytes.SplitAfter": m["(x, x, ...)"], + "bytes.SplitAfterN": m["(x, x, ...)"], + "bytes.SplitN": m["(x, x, ...)"], + "bytes.Replace": m["(_, x, x, ...)"], + "bytes.ReplaceAll": m["(_, x, x, ...)"], + + "types.Identical": m["(x, x, ...)"], + "types.IdenticalIgnoreTags": m["(x, x, ...)"], + + "draw.Draw": m["(x, _, x, ...)"], + + // TODO(quasilyte): more of these. + } + return astwalk.WalkerForExpr(c), nil + }) +} + +type dupArgChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + matchers map[string]func(*ast.CallExpr) bool +} + +func (c *dupArgChecker) VisitExpr(expr ast.Expr) { + call, ok := expr.(*ast.CallExpr) + if !ok { + return + } + + // TODO(quasilyte): this kind of check is needed in multiple + // places and the code is somewhat duplicated around. + // We probably need to stop using qualifiedName for non-experimental checkers. + if calledExpr, ok := call.Fun.(*ast.SelectorExpr); ok { + obj, ok := c.ctx.TypesInfo.ObjectOf(astcast.ToIdent(calledExpr.X)).(*types.PkgName) + if !ok || !isStdlibPkg(obj.Imported()) { + return + } + } + + m := c.matchers[qualifiedName(call.Fun)] + if m != nil && m(call) { + c.warn(call) + } +} + +func (c *dupArgChecker) warn(cause ast.Node) { + c.ctx.Warn(cause, "suspicious duplicated args in `%s`", cause) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go new file mode 100644 index 000000000..83de50528 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go @@ -0,0 +1,58 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astequal" +) + +func init() { + var info linter.CheckerInfo + info.Name = "dupBranchBody" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects duplicated branch bodies inside conditional statements" + info.Before = ` +if cond { + println("cond=true") +} else { + println("cond=true") +}` + info.After = ` +if cond { + println("cond=true") +} else { + println("cond=false") +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&dupBranchBodyChecker{ctx: ctx}), nil + }) +} + +type dupBranchBodyChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *dupBranchBodyChecker) VisitStmt(stmt ast.Stmt) { + // TODO(quasilyte): extend to check switch statements as well. + // Should be very careful with type switches. + + if stmt, ok := stmt.(*ast.IfStmt); ok { + c.checkIf(stmt) + } +} + +func (c *dupBranchBodyChecker) checkIf(stmt *ast.IfStmt) { + thenBody := stmt.Body + elseBody, ok := stmt.Else.(*ast.BlockStmt) + if ok && astequal.Stmt(thenBody, elseBody) { + c.warnIf(stmt) + } +} + +func (c *dupBranchBodyChecker) warnIf(cause ast.Node) { + c.ctx.Warn(cause, "both branches in if statement has same body") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go new file mode 100644 index 000000000..0c1962682 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go @@ -0,0 +1,57 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "dupCase" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects duplicated case clauses inside switch statements" + info.Before = ` +switch x { +case ys[0], ys[1], ys[2], ys[0], ys[4]: +}` + info.After = ` +switch x { +case ys[0], ys[1], ys[2], ys[3], ys[4]: +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&dupCaseChecker{ctx: ctx}), nil + }) +} + +type dupCaseChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + astSet lintutil.AstSet +} + +func (c *dupCaseChecker) VisitStmt(stmt ast.Stmt) { + if stmt, ok := stmt.(*ast.SwitchStmt); ok { + c.checkSwitch(stmt) + } +} + +func (c *dupCaseChecker) checkSwitch(stmt *ast.SwitchStmt) { + c.astSet.Clear() + for i := range stmt.Body.List { + cc := stmt.Body.List[i].(*ast.CaseClause) + for _, x := range cc.List { + if !c.astSet.Insert(x) { + c.warn(x) + } + } + } +} + +func (c *dupCaseChecker) warn(cause ast.Node) { + c.ctx.Warn(cause, "'case %s' is duplicated", cause) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go new file mode 100644 index 000000000..54658eb9f --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go @@ -0,0 +1,63 @@ +package checkers + +import ( + "fmt" + "go/ast" + + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "dupImport" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects multiple imports of the same package under different aliases" + info.Before = ` +import ( + "fmt" + priting "fmt" // Imported the second time +)` + info.After = ` +import( + "fmt" +)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return &dupImportChecker{ctx: ctx}, nil + }) +} + +type dupImportChecker struct { + ctx *linter.CheckerContext +} + +func (c *dupImportChecker) WalkFile(f *ast.File) { + imports := make(map[string][]*ast.ImportSpec) + for _, importDcl := range f.Imports { + pkg := importDcl.Path.Value + imports[pkg] = append(imports[pkg], importDcl) + } + + for _, importList := range imports { + if len(importList) == 1 { + continue + } + c.warn(importList) + } +} + +func (c *dupImportChecker) warn(importList []*ast.ImportSpec) { + msg := fmt.Sprintf("package is imported %d times under different aliases on lines", len(importList)) + for idx, importDcl := range importList { + switch { + case idx == len(importList)-1: + msg += " and" + case idx > 0: + msg += "," + } + msg += fmt.Sprintf(" %d", c.ctx.FileSet.Position(importDcl.Pos()).Line) + } + for _, importDcl := range importList { + c.ctx.Warn(importDcl, msg) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go new file mode 100644 index 000000000..00f8fd0eb --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go @@ -0,0 +1,102 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "dupSubExpr" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects suspicious duplicated sub-expressions" + info.Before = ` +sort.Slice(xs, func(i, j int) bool { + return xs[i].v < xs[i].v // Duplicated index +})` + info.After = ` +sort.Slice(xs, func(i, j int) bool { + return xs[i].v < xs[j].v +})` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &dupSubExprChecker{ctx: ctx} + + ops := []struct { + op token.Token + float bool // Whether float args require special care + }{ + {op: token.LOR}, // x || x + {op: token.LAND}, // x && x + {op: token.OR}, // x | x + {op: token.AND}, // x & x + {op: token.XOR}, // x ^ x + {op: token.LSS}, // x < x + {op: token.GTR}, // x > x + {op: token.AND_NOT}, // x &^ x + {op: token.REM}, // x % x + + {op: token.EQL, float: true}, // x == x + {op: token.NEQ, float: true}, // x != x + {op: token.LEQ, float: true}, // x <= x + {op: token.GEQ, float: true}, // x >= x + {op: token.QUO, float: true}, // x / x + {op: token.SUB, float: true}, // x - x + } + + c.opSet = make(map[token.Token]bool) + c.floatOpsSet = make(map[token.Token]bool) + for _, opInfo := range ops { + c.opSet[opInfo.op] = true + if opInfo.float { + c.floatOpsSet[opInfo.op] = true + } + } + + return astwalk.WalkerForExpr(c), nil + }) +} + +type dupSubExprChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + // opSet is a set of binary operations that do not make + // sense with duplicated (same) RHS and LHS. + opSet map[token.Token]bool + + floatOpsSet map[token.Token]bool +} + +func (c *dupSubExprChecker) VisitExpr(expr ast.Expr) { + if expr, ok := expr.(*ast.BinaryExpr); ok { + c.checkBinaryExpr(expr) + } +} + +func (c *dupSubExprChecker) checkBinaryExpr(expr *ast.BinaryExpr) { + if !c.opSet[expr.Op] { + return + } + if c.resultIsFloat(expr.X) && c.floatOpsSet[expr.Op] { + return + } + if typep.SideEffectFree(c.ctx.TypesInfo, expr) && c.opSet[expr.Op] && astequal.Expr(expr.X, expr.Y) { + c.warn(expr) + } +} + +func (c *dupSubExprChecker) resultIsFloat(expr ast.Expr) bool { + typ, ok := c.ctx.TypeOf(expr).(*types.Basic) + return ok && typ.Info()&types.IsFloat != 0 +} + +func (c *dupSubExprChecker) warn(cause *ast.BinaryExpr) { + c.ctx.Warn(cause, "suspicious identical LHS and RHS for `%s` operator", cause.Op) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go b/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go new file mode 100644 index 000000000..d017ee6ca --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go @@ -0,0 +1,71 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astp" +) + +func init() { + var info linter.CheckerInfo + info.Name = "elseif" + info.Tags = []string{"style"} + info.Params = linter.CheckerParams{ + "skipBalanced": { + Value: true, + Usage: "whether to skip balanced if-else pairs", + }, + } + info.Summary = "Detects else with nested if statement that can be replaced with else-if" + info.Before = ` +if cond1 { +} else { + if x := cond2; x { + } +}` + info.After = ` +if cond1 { +} else if x := cond2; x { +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &elseifChecker{ctx: ctx} + c.skipBalanced = info.Params.Bool("skipBalanced") + return astwalk.WalkerForStmt(c), nil + }) +} + +type elseifChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + skipBalanced bool +} + +func (c *elseifChecker) VisitStmt(stmt ast.Stmt) { + if stmt, ok := stmt.(*ast.IfStmt); ok { + elseBody, ok := stmt.Else.(*ast.BlockStmt) + if !ok || len(elseBody.List) != 1 { + return + } + innerIfStmt, ok := elseBody.List[0].(*ast.IfStmt) + if !ok { + return + } + balanced := len(stmt.Body.List) == 1 && + astp.IsIfStmt(stmt.Body.List[0]) + if balanced && c.skipBalanced { + return // Configured to skip balanced statements + } + if innerIfStmt.Else != nil { + return + } + c.warn(stmt.Else) + } +} + +func (c *elseifChecker) warn(cause ast.Node) { + c.ctx.Warn(cause, "can replace 'else {if cond {}}' with 'else if cond {}'") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go b/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go new file mode 100644 index 000000000..ebb8dad45 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go @@ -0,0 +1,70 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "emptyFallthrough" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects fallthrough that can be avoided by using multi case values" + info.Before = `switch kind { +case reflect.Int: + fallthrough +case reflect.Int32: + return Int +}` + info.After = `switch kind { +case reflect.Int, reflect.Int32: + return Int +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&emptyFallthroughChecker{ctx: ctx}), nil + }) +} + +type emptyFallthroughChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *emptyFallthroughChecker) VisitStmt(stmt ast.Stmt) { + ss, ok := stmt.(*ast.SwitchStmt) + if !ok { + return + } + + prevCaseDefault := false + for i := len(ss.Body.List) - 1; i >= 0; i-- { + if cc, ok := ss.Body.List[i].(*ast.CaseClause); ok { + warn := false + if len(cc.Body) == 1 { + if bs, ok := cc.Body[0].(*ast.BranchStmt); ok && bs.Tok == token.FALLTHROUGH { + warn = true + if prevCaseDefault { + c.warnDefault(bs) + } else if cc.List != nil { + c.warn(bs) + } + } + } + if !warn { + prevCaseDefault = cc.List == nil + } + } + } +} + +func (c *emptyFallthroughChecker) warnDefault(cause ast.Node) { + c.ctx.Warn(cause, "remove empty case containing only fallthrough to default case") +} + +func (c *emptyFallthroughChecker) warn(cause ast.Node) { + c.ctx.Warn(cause, "replace empty case containing only fallthrough with expression list") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/emptyStringTest_checker.go b/vendor/github.com/go-critic/go-critic/checkers/emptyStringTest_checker.go new file mode 100644 index 000000000..27ccbd2f2 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/emptyStringTest_checker.go @@ -0,0 +1,58 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "emptyStringTest" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects empty string checks that can be written more idiomatically" + info.Before = `len(s) == 0` + info.After = `s == ""` + info.Note = "See https://dmitri.shuralyov.com/idiomatic-go#empty-string-check." + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&emptyStringTestChecker{ctx: ctx}), nil + }) +} + +type emptyStringTestChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *emptyStringTestChecker) VisitExpr(e ast.Expr) { + cmp := astcast.ToBinaryExpr(e) + if cmp.Op != token.EQL && cmp.Op != token.NEQ { + return + } + lenCall := astcast.ToCallExpr(cmp.X) + if astcast.ToIdent(lenCall.Fun).Name != "len" { + return + } + s := lenCall.Args[0] + if !typep.HasStringProp(c.ctx.TypeOf(s)) { + return + } + zero := astcast.ToBasicLit(cmp.Y) + if zero.Value != "0" { + return + } + c.warn(cmp, s) +} + +func (c *emptyStringTestChecker) warn(cmp *ast.BinaryExpr, s ast.Expr) { + suggest := astcopy.BinaryExpr(cmp) + suggest.X = s + suggest.Y = &ast.BasicLit{Value: `""`} + c.ctx.Warn(cmp, "replace `%s` with `%s`", cmp, suggest) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/equalFold_checker.go b/vendor/github.com/go-critic/go-critic/checkers/equalFold_checker.go new file mode 100644 index 000000000..13f7fdbe2 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/equalFold_checker.go @@ -0,0 +1,87 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astequal" +) + +func init() { + var info linter.CheckerInfo + info.Name = "equalFold" + info.Tags = []string{"performance", "experimental"} + info.Summary = "Detects unoptimal strings/bytes case-insensitive comparison" + info.Before = `strings.ToLower(x) == strings.ToLower(y)` + info.After = `strings.EqualFold(x, y)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&equalFoldChecker{ctx: ctx}), nil + }) +} + +type equalFoldChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *equalFoldChecker) VisitExpr(e ast.Expr) { + switch e := e.(type) { + case *ast.CallExpr: + c.checkBytes(e) + case *ast.BinaryExpr: + c.checkStrings(e) + } +} + +// uncaseCall simplifies lower(x) or upper(x) to x. +// If no simplification is applied, second return value is false. +func (c *equalFoldChecker) uncaseCall(x ast.Expr, lower, upper string) (ast.Expr, bool) { + call := astcast.ToCallExpr(x) + name := qualifiedName(call.Fun) + if name != lower && name != upper { + return x, false + } + return call.Args[0], true +} + +func (c *equalFoldChecker) checkBytes(expr *ast.CallExpr) { + if qualifiedName(expr.Fun) != "bytes.Equal" { + return + } + + x, ok1 := c.uncaseCall(expr.Args[0], "bytes.ToLower", "bytes.ToUpper") + y, ok2 := c.uncaseCall(expr.Args[1], "bytes.ToLower", "bytes.ToUpper") + if !ok1 && !ok2 { + return + } + if !astequal.Expr(x, y) { + c.warnBytes(expr, x, y) + } +} + +func (c *equalFoldChecker) checkStrings(expr *ast.BinaryExpr) { + if expr.Op != token.EQL && expr.Op != token.NEQ { + return + } + + x, ok1 := c.uncaseCall(expr.X, "strings.ToLower", "strings.ToUpper") + y, ok2 := c.uncaseCall(expr.Y, "strings.ToLower", "strings.ToUpper") + if !ok1 && !ok2 { + return + } + if !astequal.Expr(x, y) { + c.warnStrings(expr, x, y) + } +} + +func (c *equalFoldChecker) warnStrings(cause ast.Node, x, y ast.Expr) { + c.ctx.Warn(cause, "consider replacing with strings.EqualFold(%s, %s)", x, y) +} + +func (c *equalFoldChecker) warnBytes(cause ast.Node, x, y ast.Expr) { + c.ctx.Warn(cause, "consider replacing with bytes.EqualFold(%s, %s)", x, y) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go new file mode 100644 index 000000000..6ba07fe86 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go @@ -0,0 +1,87 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "evalOrder" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects unwanted dependencies on the evaluation order" + info.Before = `return x, f(&x)` + info.After = ` +err := f(&x) +return x, err +` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&evalOrderChecker{ctx: ctx}), nil + }) +} + +type evalOrderChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *evalOrderChecker) VisitStmt(stmt ast.Stmt) { + ret := astcast.ToReturnStmt(stmt) + if len(ret.Results) < 2 { + return + } + + // TODO(quasilyte): handle selector expressions like o.val in addition + // to bare identifiers. + addrTake := &ast.UnaryExpr{Op: token.AND} + for _, res := range ret.Results { + id, ok := res.(*ast.Ident) + if !ok { + continue + } + addrTake.X = id // addrTake is &id now + for _, res := range ret.Results { + call, ok := res.(*ast.CallExpr) + if !ok { + continue + } + + // 1. Check if there is a call in form of id.method() where + // method takes id by a pointer. + if sel, ok := call.Fun.(*ast.SelectorExpr); ok { + if astequal.Node(sel.X, id) && c.hasPtrRecv(sel.Sel) { + c.warn(call) + } + } + + // 2. Check that there is no call that uses &id as an argument. + dependency := lintutil.ContainsNode(call, func(n ast.Node) bool { + return astequal.Node(addrTake, n) + }) + if dependency { + c.warn(call) + } + } + } +} + +func (c *evalOrderChecker) hasPtrRecv(fn *ast.Ident) bool { + sig, ok := c.ctx.TypeOf(fn).(*types.Signature) + if !ok { + return false + } + return typep.IsPointer(sig.Recv().Type()) +} + +func (c *evalOrderChecker) warn(call *ast.CallExpr) { + c.ctx.Warn(call, "may want to evaluate %s before the return statement", call) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go b/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go new file mode 100644 index 000000000..63e0049f2 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go @@ -0,0 +1,84 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astfmt" + "github.com/go-toolsmith/astp" + "golang.org/x/tools/go/ast/astutil" +) + +func init() { + var info linter.CheckerInfo + info.Name = "exitAfterDefer" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects calls to exit/fatal inside functions that use defer" + info.Before = ` +defer os.Remove(filename) +if bad { + log.Fatalf("something bad happened") +}` + info.After = ` +defer os.Remove(filename) +if bad { + log.Printf("something bad happened") + return +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForFuncDecl(&exitAfterDeferChecker{ctx: ctx}), nil + }) +} + +type exitAfterDeferChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *exitAfterDeferChecker) VisitFuncDecl(fn *ast.FuncDecl) { + // TODO(quasilyte): handle goto and other kinds of flow that break + // the algorithm below that expects the latter statement to be + // executed after the ones that come before it. + + var deferStmt *ast.DeferStmt + pre := func(cur *astutil.Cursor) bool { + // Don't recurse into local anonymous functions. + return !astp.IsFuncLit(cur.Node()) + } + post := func(cur *astutil.Cursor) bool { + switch n := cur.Node().(type) { + case *ast.DeferStmt: + deferStmt = n + case *ast.CallExpr: + // See #995. We allow `defer os.Exit()` calls + // as it's harder to determine whether they're going + // to clutter anything without actually trying to + // simulate the defer stack + understanding the control flow. + // TODO: can we use CFG here? + if _, ok := cur.Parent().(*ast.DeferStmt); ok { + return true + } + if deferStmt != nil { + switch qualifiedName(n.Fun) { + case "log.Fatal", "log.Fatalf", "log.Fatalln", "os.Exit": + c.warn(n, deferStmt) + return false + } + } + } + return true + } + astutil.Apply(fn.Body, pre, post) +} + +func (c *exitAfterDeferChecker) warn(cause *ast.CallExpr, deferStmt *ast.DeferStmt) { + s := astfmt.Sprint(deferStmt) + if fnlit, ok := deferStmt.Call.Fun.(*ast.FuncLit); ok { + // To avoid long and multi-line warning messages, + // collapse the function literals. + s = "defer " + astfmt.Sprint(fnlit.Type) + "{...}(...)" + } + c.ctx.Warn(cause, "%s will exit, and `%s` will not run", cause.Fun, s) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go b/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go new file mode 100644 index 000000000..698f5366d --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go @@ -0,0 +1,50 @@ +package checkers + +import ( + "go/ast" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" +) + +func init() { + var info linter.CheckerInfo + info.Name = "filepathJoin" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects problems in filepath.Join() function calls" + info.Before = `filepath.Join("dir/", filename)` + info.After = `filepath.Join("dir", filename)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&filepathJoinChecker{ctx: ctx}), nil + }) +} + +type filepathJoinChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *filepathJoinChecker) VisitExpr(expr ast.Expr) { + call := astcast.ToCallExpr(expr) + if qualifiedName(call.Fun) != "filepath.Join" { + return + } + + for _, arg := range call.Args { + arg, ok := arg.(*ast.BasicLit) + if ok && c.hasSeparator(arg) { + c.warnSeparator(arg) + } + } +} + +func (c *filepathJoinChecker) hasSeparator(v *ast.BasicLit) bool { + return strings.ContainsAny(v.Value, `/\`) +} + +func (c *filepathJoinChecker) warnSeparator(sep ast.Expr) { + c.ctx.Warn(sep, "%s contains a path separator", sep) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/flagDeref_checker.go b/vendor/github.com/go-critic/go-critic/checkers/flagDeref_checker.go new file mode 100644 index 000000000..3fe5e52fb --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/flagDeref_checker.go @@ -0,0 +1,65 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "flagDeref" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects immediate dereferencing of `flag` package pointers" + info.Details = "Suggests to use pointer to array to avoid the copy using `&` on range expression." + info.Before = `b := *flag.Bool("b", false, "b docs")` + info.After = ` +var b bool +flag.BoolVar(&b, "b", false, "b docs")` + info.Note = ` +Dereferencing returned pointers will lead to hard to find errors +where flag values are not updated after flag.Parse().` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &flagDerefChecker{ + ctx: ctx, + flagPtrFuncs: map[string]bool{ + "flag.Bool": true, + "flag.Duration": true, + "flag.Float64": true, + "flag.Int": true, + "flag.Int64": true, + "flag.String": true, + "flag.Uint": true, + "flag.Uint64": true, + }, + } + return astwalk.WalkerForExpr(c), nil + }) +} + +type flagDerefChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + flagPtrFuncs map[string]bool +} + +func (c *flagDerefChecker) VisitExpr(expr ast.Expr) { + if expr, ok := expr.(*ast.StarExpr); ok { + call, ok := expr.X.(*ast.CallExpr) + if !ok { + return + } + called := qualifiedName(call.Fun) + if c.flagPtrFuncs[called] { + c.warn(expr, called+"Var") + } + } +} + +func (c *flagDerefChecker) warn(x ast.Node, suggestion string) { + c.ctx.Warn(x, "immediate deref in %s is most likely an error; consider using %s", + x, suggestion) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go b/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go new file mode 100644 index 000000000..7f6ce3c01 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go @@ -0,0 +1,88 @@ +package checkers + +import ( + "go/ast" + "go/constant" + "go/types" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" +) + +func init() { + var info linter.CheckerInfo + info.Name = "flagName" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects suspicious flag names" + info.Before = `b := flag.Bool(" foo ", false, "description")` + info.After = `b := flag.Bool("foo", false, "description")` + info.Note = "https://github.com/golang/go/issues/41792" + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&flagNameChecker{ctx: ctx}), nil + }) +} + +type flagNameChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *flagNameChecker) VisitExpr(expr ast.Expr) { + call := astcast.ToCallExpr(expr) + calledExpr := astcast.ToSelectorExpr(call.Fun) + obj, ok := c.ctx.TypesInfo.ObjectOf(astcast.ToIdent(calledExpr.X)).(*types.PkgName) + if !ok { + return + } + sym := calledExpr.Sel + pkg := obj.Imported() + if pkg.Path() != "flag" { + return + } + + switch sym.Name { + case "Bool", "Duration", "Float64", "String", + "Int", "Int64", "Uint", "Uint64": + c.checkFlagName(call, call.Args[0]) + case "BoolVar", "DurationVar", "Float64Var", "StringVar", + "IntVar", "Int64Var", "UintVar", "Uint64Var": + c.checkFlagName(call, call.Args[1]) + } +} + +func (c *flagNameChecker) checkFlagName(call *ast.CallExpr, arg ast.Expr) { + cv := c.ctx.TypesInfo.Types[arg].Value + if cv == nil { + return // Non-constant name + } + name := constant.StringVal(cv) + switch { + case name == "": + c.warnEmpty(call) + case strings.HasPrefix(name, "-"): + c.warnHypenPrefix(call, name) + case strings.Contains(name, "="): + c.warnEq(call, name) + case strings.Contains(name, " "): + c.warnWhitespace(call, name) + } +} + +func (c *flagNameChecker) warnEmpty(cause ast.Node) { + c.ctx.Warn(cause, "empty flag name") +} + +func (c *flagNameChecker) warnHypenPrefix(cause ast.Node, name string) { + c.ctx.Warn(cause, "flag name %q should not start with a hypen", name) +} + +func (c *flagNameChecker) warnEq(cause ast.Node, name string) { + c.ctx.Warn(cause, "flag name %q should not contain '='", name) +} + +func (c *flagNameChecker) warnWhitespace(cause ast.Node, name string) { + c.ctx.Warn(cause, "flag name %q contains whitespace", name) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go b/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go new file mode 100644 index 000000000..ae61a1125 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go @@ -0,0 +1,60 @@ +package checkers + +import ( + "go/ast" + "go/token" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" +) + +func init() { + var info linter.CheckerInfo + info.Name = "hexLiteral" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects hex literals that have mixed case letter digits" + info.Before = ` +x := 0X12 +y := 0xfF` + info.After = ` +x := 0x12 +// (A) +y := 0xff +// (B) +y := 0xFF` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&hexLiteralChecker{ctx: ctx}), nil + }) +} + +type hexLiteralChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *hexLiteralChecker) warn0X(lit *ast.BasicLit) { + suggest := "0x" + lit.Value[len("0X"):] + c.ctx.Warn(lit, "prefer 0x over 0X, s/%s/%s/", lit.Value, suggest) +} + +func (c *hexLiteralChecker) warnMixedDigits(lit *ast.BasicLit) { + c.ctx.Warn(lit, "don't mix hex literal letter digits casing") +} + +func (c *hexLiteralChecker) VisitExpr(expr ast.Expr) { + lit := astcast.ToBasicLit(expr) + if lit.Kind != token.INT || len(lit.Value) < 3 { + return + } + if strings.HasPrefix(lit.Value, "0X") { + c.warn0X(lit) + return + } + digits := lit.Value[len("0x"):] + if strings.ToLower(digits) != digits && strings.ToUpper(digits) != digits { + c.warnMixedDigits(lit) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go b/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go new file mode 100644 index 000000000..c430431a7 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go @@ -0,0 +1,63 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "hugeParam" + info.Tags = []string{"performance"} + info.Params = linter.CheckerParams{ + "sizeThreshold": { + Value: 80, + Usage: "size in bytes that makes the warning trigger", + }, + } + info.Summary = "Detects params that incur excessive amount of copying" + info.Before = `func f(x [1024]int) {}` + info.After = `func f(x *[1024]int) {}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForFuncDecl(&hugeParamChecker{ + ctx: ctx, + sizeThreshold: int64(info.Params.Int("sizeThreshold")), + }), nil + }) +} + +type hugeParamChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + sizeThreshold int64 +} + +func (c *hugeParamChecker) VisitFuncDecl(decl *ast.FuncDecl) { + // TODO(quasilyte): maybe it's worthwhile to permit skipping + // test files for this checker? + if decl.Recv != nil { + c.checkParams(decl.Recv.List) + } + c.checkParams(decl.Type.Params.List) +} + +func (c *hugeParamChecker) checkParams(params []*ast.Field) { + for _, p := range params { + for _, id := range p.Names { + typ := c.ctx.TypeOf(id) + size := c.ctx.SizesInfo.Sizeof(typ) + if size >= c.sizeThreshold { + c.warn(id, size) + } + } + } +} + +func (c *hugeParamChecker) warn(cause *ast.Ident, size int64) { + c.ctx.Warn(cause, "%s is heavy (%d bytes); consider passing it by pointer", + cause, size) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go new file mode 100644 index 000000000..b1fcf4147 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go @@ -0,0 +1,99 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "ifElseChain" + info.Tags = []string{"style"} + info.Summary = "Detects repeated if-else statements and suggests to replace them with switch statement" + info.Before = ` +if cond1 { + // Code A. +} else if cond2 { + // Code B. +} else { + // Code C. +}` + info.After = ` +switch { +case cond1: + // Code A. +case cond2: + // Code B. +default: + // Code C. +}` + info.Note = ` +Permits single else or else-if; repeated else-if or else + else-if +will trigger suggestion to use switch statement. +See [EffectiveGo#switch](https://golang.org/doc/effective_go.html#switch).` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&ifElseChainChecker{ctx: ctx}), nil + }) +} + +type ifElseChainChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + cause *ast.IfStmt + visited map[*ast.IfStmt]bool +} + +func (c *ifElseChainChecker) EnterFunc(fn *ast.FuncDecl) bool { + if fn.Body == nil { + return false + } + c.visited = make(map[*ast.IfStmt]bool) + return true +} + +func (c *ifElseChainChecker) VisitStmt(stmt ast.Stmt) { + if stmt, ok := stmt.(*ast.IfStmt); ok { + if c.visited[stmt] { + return + } + c.cause = stmt + c.checkIfStmt(stmt) + } +} + +func (c *ifElseChainChecker) checkIfStmt(stmt *ast.IfStmt) { + const minThreshold = 2 + if c.countIfelseLen(stmt) >= minThreshold { + c.warn() + } +} + +func (c *ifElseChainChecker) countIfelseLen(stmt *ast.IfStmt) int { + count := 0 + for { + switch e := stmt.Else.(type) { + case *ast.IfStmt: + if e.Init != nil { + return 0 // Give up + } + // Else if. + stmt = e + count++ + c.visited[e] = true + case *ast.BlockStmt: + // Else branch. + return count + 1 + default: + // No else or else if. + return count + } + } +} + +func (c *ifElseChainChecker) warn() { + c.ctx.Warn(c.cause, "rewrite if-else to switch statement") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go b/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go new file mode 100644 index 000000000..5ac711fc1 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go @@ -0,0 +1,47 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "importShadow" + info.Tags = []string{"style", "opinionated"} + info.Summary = "Detects when imported package names shadowed in the assignments" + info.Before = ` +// "path/filepath" is imported. +filepath := "foo.txt"` + info.After = ` +filename := "foo.txt"` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + ctx.Require.PkgObjects = true + return astwalk.WalkerForLocalDef(&importShadowChecker{ctx: ctx}, ctx.TypesInfo), nil + }) +} + +type importShadowChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *importShadowChecker) VisitLocalDef(def astwalk.Name, _ ast.Expr) { + for pkgObj, name := range c.ctx.PkgObjects { + if name == def.ID.Name && name != "_" { + c.warn(def.ID, name, pkgObj.Imported()) + } + } +} + +func (c *importShadowChecker) warn(id ast.Node, importedName string, pkg *types.Package) { + if isStdlibPkg(pkg) { + c.ctx.Warn(id, "shadow of imported package '%s'", importedName) + } else { + c.ctx.Warn(id, "shadow of imported from '%s' package '%s'", pkg.Path(), importedName) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/indexAlloc_checker.go b/vendor/github.com/go-critic/go-critic/checkers/indexAlloc_checker.go new file mode 100644 index 000000000..908285c03 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/indexAlloc_checker.go @@ -0,0 +1,50 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "indexAlloc" + info.Tags = []string{"performance"} + info.Summary = "Detects strings.Index calls that may cause unwanted allocs" + info.Before = `strings.Index(string(x), y)` + info.After = `bytes.Index(x, []byte(y))` + info.Note = `See Go issue for details: https://github.com/golang/go/issues/25864` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&indexAllocChecker{ctx: ctx}), nil + }) +} + +type indexAllocChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *indexAllocChecker) VisitExpr(e ast.Expr) { + call := astcast.ToCallExpr(e) + if qualifiedName(call.Fun) != "strings.Index" { + return + } + stringConv := astcast.ToCallExpr(call.Args[0]) + if qualifiedName(stringConv.Fun) != "string" { + return + } + x := stringConv.Args[0] + y := call.Args[1] + if typep.SideEffectFree(c.ctx.TypesInfo, x) && typep.SideEffectFree(c.ctx.TypesInfo, y) { + c.warn(e, x, y) + } +} + +func (c *indexAllocChecker) warn(cause ast.Node, x, y ast.Expr) { + c.ctx.Warn(cause, "consider replacing %s with bytes.Index(%s, []byte(%s))", + cause, x, y) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go b/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go new file mode 100644 index 000000000..a1b6b2a8a --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go @@ -0,0 +1,56 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astp" +) + +func init() { + var info linter.CheckerInfo + info.Name = "initClause" + info.Tags = []string{"style", "opinionated", "experimental"} + info.Summary = "Detects non-assignment statements inside if/switch init clause" + info.Before = `if sideEffect(); cond { +}` + info.After = `sideEffect() +if cond { +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&initClauseChecker{ctx: ctx}), nil + }) +} + +type initClauseChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *initClauseChecker) VisitStmt(stmt ast.Stmt) { + initClause := c.getInitClause(stmt) + if initClause != nil && !astp.IsAssignStmt(initClause) { + c.warn(stmt, initClause) + } +} + +func (c *initClauseChecker) getInitClause(x ast.Stmt) ast.Stmt { + switch x := x.(type) { + case *ast.IfStmt: + return x.Init + case *ast.SwitchStmt: + return x.Init + default: + return nil + } +} + +func (c *initClauseChecker) warn(stmt, clause ast.Stmt) { + name := "if" + if astp.IsSwitchStmt(stmt) { + name = "switch" + } + c.ctx.Warn(stmt, "consider to move `%s` before %s", clause, name) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/comment_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/comment_walker.go new file mode 100644 index 000000000..6c60e3fed --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/comment_walker.go @@ -0,0 +1,41 @@ +package astwalk + +import ( + "go/ast" + "strings" +) + +type commentWalker struct { + visitor CommentVisitor +} + +func (w *commentWalker) WalkFile(f *ast.File) { + if !w.visitor.EnterFile(f) { + return + } + + for _, cg := range f.Comments { + visitCommentGroups(cg, w.visitor.VisitComment) + } +} + +func visitCommentGroups(cg *ast.CommentGroup, visit func(*ast.CommentGroup)) { + var group []*ast.Comment + visitGroup := func(list []*ast.Comment) { + if len(list) == 0 { + return + } + cg := &ast.CommentGroup{List: list} + visit(cg) + } + for _, comment := range cg.List { + if strings.HasPrefix(comment.Text, "/*") { + visitGroup(group) + group = group[:0] + visitGroup([]*ast.Comment{comment}) + } else { + group = append(group, comment) + } + } + visitGroup(group) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/doc_comment_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/doc_comment_walker.go new file mode 100644 index 000000000..39b536508 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/doc_comment_walker.go @@ -0,0 +1,48 @@ +package astwalk + +import ( + "go/ast" +) + +type docCommentWalker struct { + visitor DocCommentVisitor +} + +func (w *docCommentWalker) WalkFile(f *ast.File) { + for _, decl := range f.Decls { + switch decl := decl.(type) { + case *ast.FuncDecl: + if decl.Doc != nil { + w.visitor.VisitDocComment(decl.Doc) + } + case *ast.GenDecl: + if decl.Doc != nil { + w.visitor.VisitDocComment(decl.Doc) + } + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.ImportSpec: + if spec.Doc != nil { + w.visitor.VisitDocComment(spec.Doc) + } + case *ast.ValueSpec: + if spec.Doc != nil { + w.visitor.VisitDocComment(spec.Doc) + } + case *ast.TypeSpec: + if spec.Doc != nil { + w.visitor.VisitDocComment(spec.Doc) + } + ast.Inspect(spec.Type, func(n ast.Node) bool { + if n, ok := n.(*ast.Field); ok { + if n.Doc != nil { + w.visitor.VisitDocComment(n.Doc) + } + } + return true + }) + } + } + } + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/expr_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/expr_walker.go new file mode 100644 index 000000000..de66c1081 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/expr_walker.go @@ -0,0 +1,31 @@ +package astwalk + +import ( + "go/ast" +) + +type exprWalker struct { + visitor ExprVisitor +} + +func (w *exprWalker) WalkFile(f *ast.File) { + if !w.visitor.EnterFile(f) { + return + } + + for _, decl := range f.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok { + if !w.visitor.EnterFunc(decl) { + continue + } + } + + ast.Inspect(decl, func(x ast.Node) bool { + if x, ok := x.(ast.Expr); ok { + w.visitor.VisitExpr(x) + return !w.visitor.skipChilds() + } + return true + }) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/func_decl_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/func_decl_walker.go new file mode 100644 index 000000000..c7e3a4371 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/func_decl_walker.go @@ -0,0 +1,23 @@ +package astwalk + +import ( + "go/ast" +) + +type funcDeclWalker struct { + visitor FuncDeclVisitor +} + +func (w *funcDeclWalker) WalkFile(f *ast.File) { + if !w.visitor.EnterFile(f) { + return + } + + for _, decl := range f.Decls { + decl, ok := decl.(*ast.FuncDecl) + if !ok || !w.visitor.EnterFunc(decl) { + continue + } + w.visitor.VisitFuncDecl(decl) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_comment_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_comment_walker.go new file mode 100644 index 000000000..e042f0d5e --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_comment_walker.go @@ -0,0 +1,32 @@ +package astwalk + +import ( + "go/ast" +) + +type localCommentWalker struct { + visitor LocalCommentVisitor +} + +func (w *localCommentWalker) WalkFile(f *ast.File) { + if !w.visitor.EnterFile(f) { + return + } + + for _, decl := range f.Decls { + decl, ok := decl.(*ast.FuncDecl) + if !ok || !w.visitor.EnterFunc(decl) { + continue + } + + for _, cg := range f.Comments { + // Not sure that decls/comments are sorted + // by positions, so do a naive full scan for now. + if cg.Pos() < decl.Pos() || cg.Pos() > decl.End() { + continue + } + + visitCommentGroups(cg, w.visitor.VisitLocalComment) + } + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go new file mode 100644 index 000000000..bed0f44ab --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go @@ -0,0 +1,51 @@ +package astwalk + +import ( + "go/ast" +) + +// LocalDefVisitor visits every name definitions inside a function. +// +// Next elements are considered as name definitions: +// - Function parameters (input, output, receiver) +// - Every LHS of ":=" assignment that defines a new name +// - Every local var/const declaration. +// +// NOTE: this visitor is experimental. +// This is also why it lives in a separate file. +type LocalDefVisitor interface { + walkerEvents + VisitLocalDef(Name, ast.Expr) +} + +type ( + // NameKind describes what kind of name Name object holds. + NameKind int + + // Name holds ver/const/param definition symbol info. + Name struct { + ID *ast.Ident + Kind NameKind + + // Index is NameVar-specific field that is used to + // specify nth tuple element being assigned to the name. + Index int + } +) + +// NOTE: set of name kinds is not stable and may change over time. +// +// TODO(quasilyte): is NameRecv/NameParam/NameResult granularity desired? +// TODO(quasilyte): is NameVar/NameBind (var vs :=) granularity desired? +const ( + // NameParam is function/method receiver/input/output name. + // Initializing expression is always nil. + NameParam NameKind = iota + // NameVar is var or ":=" declared name. + // Initizlizing expression may be nil for var-declared names + // without explicit initializing expression. + NameVar + // NameConst is const-declared name. + // Initializing expression is never nil. + NameConst +) diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_walker.go new file mode 100644 index 000000000..f6808cbb4 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_walker.go @@ -0,0 +1,118 @@ +package astwalk + +import ( + "go/ast" + "go/token" + "go/types" +) + +type localDefWalker struct { + visitor LocalDefVisitor + info *types.Info +} + +func (w *localDefWalker) WalkFile(f *ast.File) { + for _, decl := range f.Decls { + decl, ok := decl.(*ast.FuncDecl) + if !ok || !w.visitor.EnterFunc(decl) { + continue + } + w.walkFunc(decl) + } +} + +func (w *localDefWalker) walkFunc(decl *ast.FuncDecl) { + w.walkSignature(decl) + w.walkFuncBody(decl) +} + +func (w *localDefWalker) walkFuncBody(decl *ast.FuncDecl) { + ast.Inspect(decl.Body, func(x ast.Node) bool { + switch x := x.(type) { + case *ast.AssignStmt: + if x.Tok != token.DEFINE { + return false + } + if len(x.Lhs) != len(x.Rhs) { + // Multi-value assignment. + // Invariant: there is only 1 RHS. + for i, lhs := range x.Lhs { + id, ok := lhs.(*ast.Ident) + if !ok || w.info.Defs[id] == nil { + continue + } + def := Name{ID: id, Kind: NameVar, Index: i} + w.visitor.VisitLocalDef(def, x.Rhs[0]) + } + } else { + // Simple 1-1 assignments. + for i, lhs := range x.Lhs { + id, ok := lhs.(*ast.Ident) + if !ok || w.info.Defs[id] == nil { + continue + } + def := Name{ID: id, Kind: NameVar} + w.visitor.VisitLocalDef(def, x.Rhs[i]) + } + } + return false + + case *ast.GenDecl: + // Decls always introduce new names. + for _, spec := range x.Specs { + spec, ok := spec.(*ast.ValueSpec) + if !ok { // Ignore type/import specs + return false + } + switch { + case len(spec.Values) == 0: + // var-specific decls without explicit init. + for _, id := range spec.Names { + def := Name{ID: id, Kind: NameVar} + w.visitor.VisitLocalDef(def, nil) + } + case len(spec.Names) != len(spec.Values): + // var-specific decls that assign tuple results. + for i, id := range spec.Names { + def := Name{ID: id, Kind: NameVar, Index: i} + w.visitor.VisitLocalDef(def, spec.Values[0]) + } + default: + // Can be either var or const decl. + kind := NameVar + if x.Tok == token.CONST { + kind = NameConst + } + for i, id := range spec.Names { + def := Name{ID: id, Kind: kind} + w.visitor.VisitLocalDef(def, spec.Values[i]) + } + } + } + return false + } + + return true + }) +} + +func (w *localDefWalker) walkSignature(decl *ast.FuncDecl) { + for _, p := range decl.Type.Params.List { + for _, id := range p.Names { + def := Name{ID: id, Kind: NameParam} + w.visitor.VisitLocalDef(def, nil) + } + } + if decl.Type.Results != nil { + for _, p := range decl.Type.Results.List { + for _, id := range p.Names { + def := Name{ID: id, Kind: NameParam} + w.visitor.VisitLocalDef(def, nil) + } + } + } + if decl.Recv != nil && len(decl.Recv.List[0].Names) != 0 { + def := Name{ID: decl.Recv.List[0].Names[0], Kind: NameParam} + w.visitor.VisitLocalDef(def, nil) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_expr_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_expr_walker.go new file mode 100644 index 000000000..e455b3f8b --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_expr_walker.go @@ -0,0 +1,29 @@ +package astwalk + +import ( + "go/ast" +) + +type localExprWalker struct { + visitor LocalExprVisitor +} + +func (w *localExprWalker) WalkFile(f *ast.File) { + if !w.visitor.EnterFile(f) { + return + } + + for _, decl := range f.Decls { + decl, ok := decl.(*ast.FuncDecl) + if !ok || !w.visitor.EnterFunc(decl) { + continue + } + ast.Inspect(decl.Body, func(x ast.Node) bool { + if x, ok := x.(ast.Expr); ok { + w.visitor.VisitLocalExpr(x) + return !w.visitor.skipChilds() + } + return true + }) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_list_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_list_walker.go new file mode 100644 index 000000000..45c406e7e --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_list_walker.go @@ -0,0 +1,33 @@ +package astwalk + +import ( + "go/ast" +) + +type stmtListWalker struct { + visitor StmtListVisitor +} + +func (w *stmtListWalker) WalkFile(f *ast.File) { + if !w.visitor.EnterFile(f) { + return + } + + for _, decl := range f.Decls { + decl, ok := decl.(*ast.FuncDecl) + if !ok || !w.visitor.EnterFunc(decl) { + continue + } + ast.Inspect(decl.Body, func(x ast.Node) bool { + switch x := x.(type) { + case *ast.BlockStmt: + w.visitor.VisitStmtList(x.List) + case *ast.CaseClause: + w.visitor.VisitStmtList(x.Body) + case *ast.CommClause: + w.visitor.VisitStmtList(x.Body) + } + return !w.visitor.skipChilds() + }) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_walker.go new file mode 100644 index 000000000..912de867d --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_walker.go @@ -0,0 +1,29 @@ +package astwalk + +import ( + "go/ast" +) + +type stmtWalker struct { + visitor StmtVisitor +} + +func (w *stmtWalker) WalkFile(f *ast.File) { + if !w.visitor.EnterFile(f) { + return + } + + for _, decl := range f.Decls { + decl, ok := decl.(*ast.FuncDecl) + if !ok || !w.visitor.EnterFunc(decl) { + continue + } + ast.Inspect(decl.Body, func(x ast.Node) bool { + if x, ok := x.(ast.Stmt); ok { + w.visitor.VisitStmt(x) + return !w.visitor.skipChilds() + } + return true + }) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/type_expr_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/type_expr_walker.go new file mode 100644 index 000000000..24c150084 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/type_expr_walker.go @@ -0,0 +1,114 @@ +package astwalk + +import ( + "go/ast" + "go/token" + "go/types" + + "github.com/go-toolsmith/astp" + "github.com/go-toolsmith/typep" +) + +type typeExprWalker struct { + visitor TypeExprVisitor + info *types.Info +} + +func (w *typeExprWalker) WalkFile(f *ast.File) { + if !w.visitor.EnterFile(f) { + return + } + + for _, decl := range f.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok { + if !w.visitor.EnterFunc(decl) { + continue + } + } + switch decl := decl.(type) { + case *ast.FuncDecl: + if !w.visitor.EnterFunc(decl) { + continue + } + w.walkSignature(decl.Type) + ast.Inspect(decl.Body, w.walk) + case *ast.GenDecl: + if decl.Tok == token.IMPORT { + continue + } + ast.Inspect(decl, w.walk) + } + } +} + +func (w *typeExprWalker) visit(x ast.Expr) bool { + w.visitor.VisitTypeExpr(x) + return !w.visitor.skipChilds() +} + +func (w *typeExprWalker) walk(x ast.Node) bool { + switch x := x.(type) { + case *ast.ParenExpr: + if typep.IsTypeExpr(w.info, x.X) { + return w.visit(x) + } + return true + case *ast.CallExpr: + // Pointer conversions require parenthesis around pointer type. + // These casts are represented as call expressions. + // Because it's impossible for the visitor to distinguish such + // "required" parenthesis, walker skips outmost parenthesis in such cases. + return w.inspectInner(x.Fun) + case *ast.SelectorExpr: + // Like with conversions, method expressions are another special. + return w.inspectInner(x.X) + case *ast.StarExpr: + if typep.IsTypeExpr(w.info, x.X) { + return w.visit(x) + } + return true + case *ast.MapType: + return w.visit(x) + case *ast.FuncType: + return w.visit(x) + case *ast.StructType: + return w.visit(x) + case *ast.InterfaceType: + if !w.visit(x) { + return false + } + for _, method := range x.Methods.List { + switch x := method.Type.(type) { + case *ast.FuncType: + w.walkSignature(x) + default: + // Embedded interface. + w.walk(x) + } + } + return false + case *ast.ArrayType: + return w.visit(x) + } + return true +} + +func (w *typeExprWalker) inspectInner(x ast.Expr) bool { + parens, ok := x.(*ast.ParenExpr) + if ok && typep.IsTypeExpr(w.info, parens.X) && astp.IsStarExpr(parens.X) { + ast.Inspect(parens.X, w.walk) + return false + } + return true +} + +func (w *typeExprWalker) walkSignature(typ *ast.FuncType) { + for _, p := range typ.Params.List { + ast.Inspect(p.Type, w.walk) + } + if typ.Results != nil { + for _, p := range typ.Results.List { + ast.Inspect(p.Type, w.walk) + } + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go new file mode 100644 index 000000000..9f973a2b3 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go @@ -0,0 +1,80 @@ +package astwalk + +import ( + "go/ast" +) + +// Visitor interfaces. +type ( + // DocCommentVisitor visits every doc-comment. + // Does not visit doc-comments for function-local definitions (types, etc). + // Also does not visit package doc-comment (file-level doc-comments). + DocCommentVisitor interface { + VisitDocComment(*ast.CommentGroup) + } + + // FuncDeclVisitor visits every top-level function declaration. + FuncDeclVisitor interface { + walkerEvents + VisitFuncDecl(*ast.FuncDecl) + } + + // ExprVisitor visits every expression inside AST file. + ExprVisitor interface { + walkerEvents + VisitExpr(ast.Expr) + } + + // LocalExprVisitor visits every expression inside function body. + LocalExprVisitor interface { + walkerEvents + VisitLocalExpr(ast.Expr) + } + + // StmtListVisitor visits every statement list inside function body. + // This includes block statement bodies as well as implicit blocks + // introduced by case clauses and alike. + StmtListVisitor interface { + walkerEvents + VisitStmtList([]ast.Stmt) + } + + // StmtVisitor visits every statement inside function body. + StmtVisitor interface { + walkerEvents + VisitStmt(ast.Stmt) + } + + // TypeExprVisitor visits every type describing expression. + // It also traverses struct types and interface types to run + // checker over their fields/method signatures. + TypeExprVisitor interface { + walkerEvents + VisitTypeExpr(ast.Expr) + } + + // LocalCommentVisitor visits every comment inside function body. + LocalCommentVisitor interface { + walkerEvents + VisitLocalComment(*ast.CommentGroup) + } + + // CommentVisitor visits every comment. + CommentVisitor interface { + walkerEvents + VisitComment(*ast.CommentGroup) + } +) + +// walkerEvents describes common hooks available for most visitor types. +type walkerEvents interface { + // EnterFile is called for every file that is about to be traversed. + // If false is returned, file is not visited. + EnterFile(*ast.File) bool + + // EnterFunc is called for every function declaration that is about + // to be traversed. If false is returned, function is not visited. + EnterFunc(*ast.FuncDecl) bool + + skipChilds() bool +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walk_handler.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walk_handler.go new file mode 100644 index 000000000..1f6e948d5 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walk_handler.go @@ -0,0 +1,34 @@ +package astwalk + +import ( + "go/ast" +) + +// WalkHandler is a type to be embedded into every checker +// that uses astwalk walkers. +type WalkHandler struct { + // SkipChilds controls whether currently analyzed + // node childs should be traversed. + // + // Value is reset after each visitor invocation, + // so there is no need to set value back to false. + SkipChilds bool +} + +// EnterFile is a default walkerEvents.EnterFile implementation +// that reports every file as accepted candidate for checking. +func (w *WalkHandler) EnterFile(f *ast.File) bool { + return true +} + +// EnterFunc is a default walkerEvents.EnterFunc implementation +// that skips extern function (ones that do not have body). +func (w *WalkHandler) EnterFunc(decl *ast.FuncDecl) bool { + return decl.Body != nil +} + +func (w *WalkHandler) skipChilds() bool { + v := w.SkipChilds + w.SkipChilds = false + return v +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go new file mode 100644 index 000000000..cd5e1c979 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go @@ -0,0 +1,57 @@ +package astwalk + +import ( + "go/types" + + "github.com/go-critic/go-critic/framework/linter" +) + +// WalkerForFuncDecl returns file walker implementation for FuncDeclVisitor. +func WalkerForFuncDecl(v FuncDeclVisitor) linter.FileWalker { + return &funcDeclWalker{visitor: v} +} + +// WalkerForExpr returns file walker implementation for ExprVisitor. +func WalkerForExpr(v ExprVisitor) linter.FileWalker { + return &exprWalker{visitor: v} +} + +// WalkerForLocalExpr returns file walker implementation for LocalExprVisitor. +func WalkerForLocalExpr(v LocalExprVisitor) linter.FileWalker { + return &localExprWalker{visitor: v} +} + +// WalkerForStmtList returns file walker implementation for StmtListVisitor. +func WalkerForStmtList(v StmtListVisitor) linter.FileWalker { + return &stmtListWalker{visitor: v} +} + +// WalkerForStmt returns file walker implementation for StmtVisitor. +func WalkerForStmt(v StmtVisitor) linter.FileWalker { + return &stmtWalker{visitor: v} +} + +// WalkerForTypeExpr returns file walker implementation for TypeExprVisitor. +func WalkerForTypeExpr(v TypeExprVisitor, info *types.Info) linter.FileWalker { + return &typeExprWalker{visitor: v, info: info} +} + +// WalkerForLocalComment returns file walker implementation for LocalCommentVisitor. +func WalkerForLocalComment(v LocalCommentVisitor) linter.FileWalker { + return &localCommentWalker{visitor: v} +} + +// WalkerForComment returns file walker implementation for CommentVisitor. +func WalkerForComment(v CommentVisitor) linter.FileWalker { + return &commentWalker{visitor: v} +} + +// WalkerForDocComment returns file walker implementation for DocCommentVisitor. +func WalkerForDocComment(v DocCommentVisitor) linter.FileWalker { + return &docCommentWalker{visitor: v} +} + +// WalkerForLocalDef returns file walker implementation for LocalDefVisitor. +func WalkerForLocalDef(v LocalDefVisitor, info *types.Info) linter.FileWalker { + return &localDefWalker{visitor: v, info: info} +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astfind.go b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astfind.go new file mode 100644 index 000000000..3c0a95afc --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astfind.go @@ -0,0 +1,27 @@ +package lintutil + +import ( + "go/ast" + + "golang.org/x/tools/go/ast/astutil" +) + +// FindNode applies pred for root and all it's childs until it returns true. +// Matched node is returned. +// If none of the nodes matched predicate, nil is returned. +func FindNode(root ast.Node, pred func(ast.Node) bool) ast.Node { + var found ast.Node + astutil.Apply(root, nil, func(cur *astutil.Cursor) bool { + if pred(cur.Node()) { + found = cur.Node() + return false + } + return true + }) + return found +} + +// ContainsNode reports whether `FindNode(root, pred)!=nil`. +func ContainsNode(root ast.Node, pred func(ast.Node) bool) bool { + return FindNode(root, pred) != nil +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go new file mode 100644 index 000000000..63d181e5e --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go @@ -0,0 +1,86 @@ +package lintutil + +import ( + "go/ast" + "go/token" + "go/types" + + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/astp" + "github.com/go-toolsmith/typep" +) + +// Different utilities to make simple analysis over typed ast values flow. +// +// It's primitive and can't replace SSA, but the bright side is that +// it does not require building an additional IR eagerly. +// Expected to be used sparingly inside a few checkers. +// +// If proven really useful, can be moved to go-toolsmith library. + +// IsImmutable reports whether n can be midified through any operation. +func IsImmutable(info *types.Info, n ast.Expr) bool { + if astp.IsBasicLit(n) { + return true + } + tv, ok := info.Types[n] + return ok && !tv.Assignable() && !tv.Addressable() +} + +// CouldBeMutated reports whether dst can be modified inside body. +// +// Note that it does not take already existing pointers to dst. +// An example of safe and correct usage is checking of something +// that was just defined, so the dst is a result of that definition. +func CouldBeMutated(info *types.Info, body ast.Node, dst ast.Expr) bool { + if IsImmutable(info, dst) { // Fast path. + return false + } + + // We don't track pass-by-value. + // If it's already a pointer, passing it by value + // means that there can be a potential indirect modification. + // + // It's possible to be less conservative here and find at least + // one such value pass before giving up. + if typep.IsPointer(info.TypeOf(dst)) { + return true + } + + var isDst func(x ast.Expr) bool + if dst, ok := dst.(*ast.Ident); ok { + // Identifier can be shadowed, + // so we need to check the object as well. + obj := info.ObjectOf(dst) + if obj == nil { + return true // Being conservative + } + isDst = func(x ast.Expr) bool { + id, ok := x.(*ast.Ident) + return ok && id.Name == dst.Name && info.ObjectOf(id) == obj + } + } else { + isDst = func(x ast.Expr) bool { + return astequal.Expr(dst, x) + } + } + + return ContainsNode(body, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.UnaryExpr: + if n.Op == token.AND && isDst(n.X) { + return true // Address taken + } + case *ast.AssignStmt: + for _, lhs := range n.Lhs { + if isDst(lhs) { + return true + } + } + case *ast.IncDecStmt: + // Incremented or decremented. + return isDst(n.X) + } + return false + }) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astset.go b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astset.go new file mode 100644 index 000000000..ebe7835e5 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astset.go @@ -0,0 +1,44 @@ +package lintutil + +import ( + "go/ast" + + "github.com/go-toolsmith/astequal" +) + +// AstSet is a simple ast.Node set. +// Zero value is ready to use set. +// Can be reused after Clear call. +type AstSet struct { + items []ast.Node +} + +// Contains reports whether s contains x. +func (s *AstSet) Contains(x ast.Node) bool { + for i := range s.items { + if astequal.Node(s.items[i], x) { + return true + } + } + return false +} + +// Insert pushes x in s if it's not already there. +// Returns true if element was inserted. +func (s *AstSet) Insert(x ast.Node) bool { + if s.Contains(x) { + return false + } + s.items = append(s.items, x) + return true +} + +// Clear removes all element from set. +func (s *AstSet) Clear() { + s.items = s.items[:0] +} + +// Len returns the number of elements contained inside s. +func (s *AstSet) Len() int { + return len(s.items) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/zero_value.go b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/zero_value.go new file mode 100644 index 000000000..4370f5818 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/zero_value.go @@ -0,0 +1,94 @@ +package lintutil + +import ( + "go/ast" + "go/constant" + "go/token" + "go/types" +) + +// IsZeroValue reports whether x represents zero value of its type. +// +// The functions is conservative and may return false for zero values +// if some cases are not handled in a comprehensive way +// but is should never return true for something that's not a proper zv. +func IsZeroValue(info *types.Info, x ast.Expr) bool { + switch x := x.(type) { + case *ast.BasicLit: + typ := info.TypeOf(x).Underlying().(*types.Basic) + v := info.Types[x].Value + var z constant.Value + switch { + case typ.Kind() == types.String: + z = constant.MakeString("") + case typ.Info()&types.IsInteger != 0: + z = constant.MakeInt64(0) + case typ.Info()&types.IsUnsigned != 0: + z = constant.MakeUint64(0) + case typ.Info()&types.IsFloat != 0: + z = constant.MakeFloat64(0) + default: + return false + } + return constant.Compare(v, token.EQL, z) + + case *ast.CompositeLit: + return len(x.Elts) == 0 + + default: + // Note that this function is not comprehensive. + return false + } +} + +// ZeroValueOf returns a zero value expression for typeExpr of type typ. +// If function can't find such a value, nil is returned. +func ZeroValueOf(typeExpr ast.Expr, typ types.Type) ast.Expr { + switch utyp := typ.Underlying().(type) { + case *types.Basic: + info := utyp.Info() + var zv ast.Expr + switch { + case info&types.IsInteger != 0: + zv = &ast.BasicLit{Kind: token.INT, Value: "0"} + case info&types.IsFloat != 0: + zv = &ast.BasicLit{Kind: token.FLOAT, Value: "0.0"} + case info&types.IsString != 0: + zv = &ast.BasicLit{Kind: token.STRING, Value: `""`} + case info&types.IsBoolean != 0: + zv = &ast.Ident{Name: "false"} + } + if isDefaultLiteralType(typ) { + return zv + } + return &ast.CallExpr{ + Fun: typeExpr, + Args: []ast.Expr{zv}, + } + + case *types.Slice, *types.Map, *types.Pointer, *types.Interface: + return &ast.CallExpr{ + Fun: typeExpr, + Args: []ast.Expr{&ast.Ident{Name: "nil"}}, + } + + case *types.Array, *types.Struct: + return &ast.CompositeLit{Type: typeExpr} + + default: + return nil + } +} + +func isDefaultLiteralType(typ types.Type) bool { + btyp, ok := typ.(*types.Basic) + if !ok { + return false + } + switch btyp.Kind() { + case types.Bool, types.Int, types.Float64, types.String: + return true + default: + return false + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go b/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go new file mode 100644 index 000000000..64c2821dd --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go @@ -0,0 +1,124 @@ +package checkers + +import ( + "go/ast" + "go/types" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astp" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "mapKey" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects suspicious map literal keys" + info.Before = ` +_ = map[string]int{ + "foo": 1, + "bar ": 2, +}` + info.After = ` +_ = map[string]int{ + "foo": 1, + "bar": 2, +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&mapKeyChecker{ctx: ctx}), nil + }) +} + +type mapKeyChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + astSet lintutil.AstSet +} + +func (c *mapKeyChecker) VisitExpr(expr ast.Expr) { + lit := astcast.ToCompositeLit(expr) + if len(lit.Elts) < 2 { + return + } + + typ, ok := c.ctx.TypeOf(lit).Underlying().(*types.Map) + if !ok { + return + } + if !typep.HasStringKind(typ.Key().Underlying()) { + return + } + + c.checkWhitespace(lit) + c.checkDuplicates(lit) +} + +func (c *mapKeyChecker) checkDuplicates(lit *ast.CompositeLit) { + c.astSet.Clear() + + for _, elt := range lit.Elts { + kv := astcast.ToKeyValueExpr(elt) + if astp.IsBasicLit(kv.Key) { + // Basic lits are handled by the compiler. + continue + } + if !typep.SideEffectFree(c.ctx.TypesInfo, kv.Key) { + continue + } + if !c.astSet.Insert(kv.Key) { + c.warnDupKey(kv.Key) + } + } +} + +func (c *mapKeyChecker) checkWhitespace(lit *ast.CompositeLit) { + var whitespaceKey ast.Node + for _, elt := range lit.Elts { + key := astcast.ToBasicLit(astcast.ToKeyValueExpr(elt).Key) + if len(key.Value) < len(`" "`) { + continue + } + // s is unquoted string literal value. + s := key.Value[len(`"`) : len(key.Value)-len(`"`)] + if !strings.Contains(s, " ") { + continue + } + if whitespaceKey != nil { + // Already seen something with a whitespace. + // More than one entry => not suspicious. + return + } + if s == " " { + // If space is used as a key, maybe this map + // has something to do with spaces. Give up. + return + } + // Check if it has exactly 1 space prefix or suffix. + bad := strings.HasPrefix(s, " ") && !strings.HasPrefix(s, " ") || + strings.HasSuffix(s, " ") && !strings.HasSuffix(s, " ") + if !bad { + // These spaces can be a padding, + // or a legitimate part of a key. Give up. + return + } + whitespaceKey = key + } + + if whitespaceKey != nil { + c.warnWhitespace(whitespaceKey) + } +} + +func (c *mapKeyChecker) warnWhitespace(key ast.Node) { + c.ctx.Warn(key, "suspucious whitespace in %s key", key) +} + +func (c *mapKeyChecker) warnDupKey(key ast.Node) { + c.ctx.Warn(key, "suspicious duplicate %s key", key) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go b/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go new file mode 100644 index 000000000..2553def14 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go @@ -0,0 +1,57 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "methodExprCall" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects method expression call that can be replaced with a method call" + info.Before = `f := foo{} +foo.bar(f)` + info.After = `f := foo{} +f.bar()` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&methodExprCallChecker{ctx: ctx}), nil + }) +} + +type methodExprCallChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *methodExprCallChecker) VisitExpr(x ast.Expr) { + call := astcast.ToCallExpr(x) + s := astcast.ToSelectorExpr(call.Fun) + + if len(call.Args) < 1 || astcast.ToIdent(call.Args[0]).Name == "nil" { + return + } + + if typep.IsTypeExpr(c.ctx.TypesInfo, s.X) { + c.warn(call, s) + } +} + +func (c *methodExprCallChecker) warn(cause *ast.CallExpr, s *ast.SelectorExpr) { + selector := astcopy.SelectorExpr(s) + selector.X = cause.Args[0] + + // Remove "&" from the receiver (if any). + if u, ok := selector.X.(*ast.UnaryExpr); ok && u.Op == token.AND { + selector.X = u.X + } + + c.ctx.Warn(cause, "consider to change `%s` to `%s`", cause.Fun, selector) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go b/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go new file mode 100644 index 000000000..a68acecca --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go @@ -0,0 +1,73 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "nestingReduce" + info.Tags = []string{"style", "opinionated", "experimental"} + info.Params = linter.CheckerParams{ + "bodyWidth": { + Value: 5, + Usage: "min number of statements inside a branch to trigger a warning", + }, + } + info.Summary = "Finds where nesting level could be reduced" + info.Before = ` +for _, v := range a { + if v.Bool { + body() + } +}` + info.After = ` +for _, v := range a { + if !v.Bool { + continue + } + body() +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &nestingReduceChecker{ctx: ctx} + c.bodyWidth = info.Params.Int("bodyWidth") + return astwalk.WalkerForStmt(c), nil + }) +} + +type nestingReduceChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + bodyWidth int +} + +func (c *nestingReduceChecker) VisitStmt(stmt ast.Stmt) { + switch stmt := stmt.(type) { + case *ast.ForStmt: + c.checkLoopBody(stmt.Body.List) + case *ast.RangeStmt: + c.checkLoopBody(stmt.Body.List) + } +} + +func (c *nestingReduceChecker) checkLoopBody(body []ast.Stmt) { + if len(body) != 1 { + return + } + stmt, ok := body[0].(*ast.IfStmt) + if !ok { + return + } + if len(stmt.Body.List) >= c.bodyWidth && stmt.Else == nil { + c.warnLoop(stmt) + } +} + +func (c *nestingReduceChecker) warnLoop(cause ast.Node) { + c.ctx.Warn(cause, "invert if cond, replace body with `continue`, move old body after the statement") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go b/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go new file mode 100644 index 000000000..7e564b70f --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go @@ -0,0 +1,45 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "golang.org/x/tools/go/ast/astutil" +) + +func init() { + var info linter.CheckerInfo + info.Name = "newDeref" + info.Tags = []string{"style"} + info.Summary = "Detects immediate dereferencing of `new` expressions" + info.Before = `x := *new(bool)` + info.After = `x := false` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&newDerefChecker{ctx: ctx}), nil + }) +} + +type newDerefChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *newDerefChecker) VisitExpr(expr ast.Expr) { + deref := astcast.ToStarExpr(expr) + call := astcast.ToCallExpr(deref.X) + if astcast.ToIdent(call.Fun).Name == "new" { + typ := c.ctx.TypeOf(call.Args[0]) + zv := lintutil.ZeroValueOf(astutil.Unparen(call.Args[0]), typ) + if zv != nil { + c.warn(expr, zv) + } + } +} + +func (c *newDerefChecker) warn(cause, suggestion ast.Expr) { + c.ctx.Warn(cause, "replace `%s` with `%s`", cause, suggestion) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go b/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go new file mode 100644 index 000000000..0a8e793ee --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go @@ -0,0 +1,71 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "nilValReturn" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects return statements those results evaluate to nil" + info.Before = ` +if err == nil { + return err +}` + info.After = ` +// (A) - return nil explicitly +if err == nil { + return nil +} +// (B) - typo in "==", change to "!=" +if err != nil { + return err +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&nilValReturnChecker{ctx: ctx}), nil + }) +} + +type nilValReturnChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *nilValReturnChecker) VisitStmt(stmt ast.Stmt) { + ifStmt, ok := stmt.(*ast.IfStmt) + if !ok || len(ifStmt.Body.List) != 1 { + return + } + ret, ok := ifStmt.Body.List[0].(*ast.ReturnStmt) + if !ok { + return + } + expr, ok := ifStmt.Cond.(*ast.BinaryExpr) + if !ok { + return + } + xIsNil := expr.Op == token.EQL && + typep.SideEffectFree(c.ctx.TypesInfo, expr.X) && + qualifiedName(expr.Y) == "nil" + if !xIsNil { + return + } + for _, res := range ret.Results { + if astequal.Expr(expr.X, res) { + c.warn(ret, expr.X) + break + } + } +} + +func (c *nilValReturnChecker) warn(cause, val ast.Node) { + c.ctx.Warn(cause, "returned expr is always nil; replace %s with nil", val) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go b/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go new file mode 100644 index 000000000..486940452 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go @@ -0,0 +1,82 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" +) + +func init() { + var info linter.CheckerInfo + info.Name = "octalLiteral" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects octal literals passed to functions" + info.Before = `foo(02)` + info.After = `foo(2)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &octalLiteralChecker{ + ctx: ctx, + octFriendlyPkg: map[string]bool{ + "os": true, + "io/ioutil": true, + }, + } + return astwalk.WalkerForExpr(c), nil + }) +} + +type octalLiteralChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + octFriendlyPkg map[string]bool +} + +func (c *octalLiteralChecker) VisitExpr(expr ast.Expr) { + call := astcast.ToCallExpr(expr) + calledExpr := astcast.ToSelectorExpr(call.Fun) + ident := astcast.ToIdent(calledExpr.X) + + if obj, ok := c.ctx.TypesInfo.ObjectOf(ident).(*types.PkgName); ok { + pkg := obj.Imported() + if c.octFriendlyPkg[pkg.Path()] { + return + } + } + + for _, arg := range call.Args { + if lit := astcast.ToBasicLit(c.unsign(arg)); len(lit.Value) > 1 && + c.isIntLiteral(lit) && + c.isOctalLiteral(lit) { + c.warn(call) + return + } + } +} + +func (c *octalLiteralChecker) unsign(e ast.Expr) ast.Expr { + u, ok := e.(*ast.UnaryExpr) + if !ok { + return e + } + return u.X +} + +func (c *octalLiteralChecker) isIntLiteral(lit *ast.BasicLit) bool { + return lit.Kind == token.INT +} + +func (c *octalLiteralChecker) isOctalLiteral(lit *ast.BasicLit) bool { + return lit.Value[0] == '0' && + lit.Value[1] != 'x' && + lit.Value[1] != 'X' +} + +func (c *octalLiteralChecker) warn(expr ast.Expr) { + c.ctx.Warn(expr, "suspicious octal args in `%s`", expr) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/offBy1_checker.go b/vendor/github.com/go-critic/go-critic/checkers/offBy1_checker.go new file mode 100644 index 000000000..ece3fdfdb --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/offBy1_checker.go @@ -0,0 +1,66 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "offBy1" + info.Tags = []string{"diagnostic"} + info.Summary = "Detects various off-by-one kind of errors" + info.Before = `xs[len(xs)]` + info.After = `xs[len(xs)-1]` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&offBy1Checker{ctx: ctx}), nil + }) +} + +type offBy1Checker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *offBy1Checker) VisitExpr(e ast.Expr) { + // TODO(quasilyte): handle more off-by-1 patterns. + // TODO(quasilyte): check whether go/analysis can help here. + + // Detect s[len(s)] expressions that always panic. + // The correct form is s[len(s)-1]. + + indexExpr := astcast.ToIndexExpr(e) + indexed := indexExpr.X + if !typep.IsSlice(c.ctx.TypeOf(indexed)) { + return + } + if !typep.SideEffectFree(c.ctx.TypesInfo, indexed) { + return + } + call := astcast.ToCallExpr(indexExpr.Index) + if astcast.ToIdent(call.Fun).Name != "len" { + return + } + if len(call.Args) != 1 || !astequal.Expr(call.Args[0], indexed) { + return + } + c.warnLenIndex(indexExpr) +} + +func (c *offBy1Checker) warnLenIndex(cause *ast.IndexExpr) { + suggest := astcopy.IndexExpr(cause) + suggest.Index = &ast.BinaryExpr{ + Op: token.SUB, + X: cause.Index, + Y: &ast.BasicLit{Value: "1"}, + } + c.ctx.Warn(cause, "index expr always panics; maybe you wanted %s?", suggest) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go b/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go new file mode 100644 index 000000000..8cdad4eee --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go @@ -0,0 +1,93 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astequal" +) + +func init() { + var info linter.CheckerInfo + info.Name = "paramTypeCombine" + info.Tags = []string{"style", "opinionated"} + info.Summary = "Detects if function parameters could be combined by type and suggest the way to do it" + info.Before = `func foo(a, b int, c, d int, e, f int, g int) {}` + info.After = `func foo(a, b, c, d, e, f, g int) {}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForFuncDecl(¶mTypeCombineChecker{ctx: ctx}), nil + }) +} + +type paramTypeCombineChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *paramTypeCombineChecker) EnterFunc(*ast.FuncDecl) bool { + return true +} + +func (c *paramTypeCombineChecker) VisitFuncDecl(decl *ast.FuncDecl) { + typ := c.optimizeFuncType(decl.Type) + if !astequal.Expr(typ, decl.Type) { + c.warn(decl.Type, typ) + } +} + +func (c *paramTypeCombineChecker) optimizeFuncType(f *ast.FuncType) *ast.FuncType { + return &ast.FuncType{ + Params: c.optimizeParams(f.Params), + Results: c.optimizeParams(f.Results), + } +} +func (c *paramTypeCombineChecker) optimizeParams(params *ast.FieldList) *ast.FieldList { + // To avoid false positives, skip unnamed param lists. + // + // We're using a property that Go only permits unnamed params + // for the whole list, so it's enough to check whether any of + // ast.Field have empty name list. + skip := params == nil || + len(params.List) < 2 || + len(params.List[0].Names) == 0 || + c.paramsAreMultiLine(params) + if skip { + return params + } + + list := []*ast.Field{} + names := make([]*ast.Ident, len(params.List[0].Names)) + copy(names, params.List[0].Names) + list = append(list, &ast.Field{ + Names: names, + Type: params.List[0].Type, + }) + for i, p := range params.List[1:] { + names = make([]*ast.Ident, len(p.Names)) + copy(names, p.Names) + if astequal.Expr(p.Type, params.List[i].Type) { + list[len(list)-1].Names = + append(list[len(list)-1].Names, names...) + } else { + list = append(list, &ast.Field{ + Names: names, + Type: params.List[i+1].Type, + }) + } + } + return &ast.FieldList{ + List: list, + } +} + +func (c *paramTypeCombineChecker) warn(f1, f2 *ast.FuncType) { + c.ctx.Warn(f1, "%s could be replaced with %s", f1, f2) +} + +func (c *paramTypeCombineChecker) paramsAreMultiLine(params *ast.FieldList) bool { + startPos := c.ctx.FileSet.Position(params.Opening) + endPos := c.ctx.FileSet.Position(params.Closing) + return startPos.Line != endPos.Line +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go new file mode 100644 index 000000000..88c8f4cb3 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go @@ -0,0 +1,70 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "ptrToRefParam" + info.Tags = []string{"style", "opinionated", "experimental"} + info.Summary = "Detects input and output parameters that have a type of pointer to referential type" + info.Before = `func f(m *map[string]int) (*chan *int)` + info.After = `func f(m map[string]int) (chan *int)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForFuncDecl(&ptrToRefParamChecker{ctx: ctx}), nil + }) +} + +type ptrToRefParamChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *ptrToRefParamChecker) VisitFuncDecl(fn *ast.FuncDecl) { + c.checkParams(fn.Type.Params.List) + if fn.Type.Results != nil { + c.checkParams(fn.Type.Results.List) + } +} + +func (c *ptrToRefParamChecker) checkParams(params []*ast.Field) { + for _, param := range params { + ptr, ok := c.ctx.TypeOf(param.Type).(*types.Pointer) + if !ok { + continue + } + + if c.isRefType(ptr.Elem()) { + if len(param.Names) == 0 { + c.ctx.Warn(param, "consider to make non-pointer type for `%s`", param.Type) + } else { + for i := range param.Names { + c.warn(param.Names[i]) + } + } + } + } +} + +func (c *ptrToRefParamChecker) isRefType(x types.Type) bool { + switch typ := x.(type) { + case *types.Map, *types.Chan, *types.Interface: + return true + case *types.Named: + // Handle underlying type only for interfaces. + if _, ok := typ.Underlying().(*types.Interface); ok { + return true + } + } + return false +} + +func (c *ptrToRefParamChecker) warn(id *ast.Ident) { + c.ctx.Warn(id, "consider `%s' to be of non-pointer type", id) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go b/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go new file mode 100644 index 000000000..5615af467 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go @@ -0,0 +1,80 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "rangeExprCopy" + info.Tags = []string{"performance"} + info.Params = linter.CheckerParams{ + "sizeThreshold": { + Value: 512, + Usage: "size in bytes that makes the warning trigger", + }, + "skipTestFuncs": { + Value: true, + Usage: "whether to check test functions", + }, + } + info.Summary = "Detects expensive copies of `for` loop range expressions" + info.Details = "Suggests to use pointer to array to avoid the copy using `&` on range expression." + info.Before = ` +var xs [2048]byte +for _, x := range xs { // Copies 2048 bytes + // Loop body. +}` + info.After = ` +var xs [2048]byte +for _, x := range &xs { // No copy + // Loop body. +}` + info.Note = "See Go issue for details: https://github.com/golang/go/issues/15812." + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &rangeExprCopyChecker{ctx: ctx} + c.sizeThreshold = int64(info.Params.Int("sizeThreshold")) + c.skipTestFuncs = info.Params.Bool("skipTestFuncs") + return astwalk.WalkerForStmt(c), nil + }) +} + +type rangeExprCopyChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + sizeThreshold int64 + skipTestFuncs bool +} + +func (c *rangeExprCopyChecker) EnterFunc(fn *ast.FuncDecl) bool { + return fn.Body != nil && + !(c.skipTestFuncs && isUnitTestFunc(c.ctx, fn)) +} + +func (c *rangeExprCopyChecker) VisitStmt(stmt ast.Stmt) { + rng, ok := stmt.(*ast.RangeStmt) + if !ok || rng.Key == nil || rng.Value == nil { + return + } + tv := c.ctx.TypesInfo.Types[rng.X] + if !tv.Addressable() { + return + } + if _, ok := tv.Type.(*types.Array); !ok { + return + } + if size := c.ctx.SizesInfo.Sizeof(tv.Type); size >= c.sizeThreshold { + c.warn(rng, size) + } +} + +func (c *rangeExprCopyChecker) warn(rng *ast.RangeStmt, size int64) { + c.ctx.Warn(rng, "copy of %s (%d bytes) can be avoided with &%s", + rng.X, size, rng.X) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go b/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go new file mode 100644 index 000000000..b34aa5c28 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go @@ -0,0 +1,75 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "rangeValCopy" + info.Tags = []string{"performance"} + info.Params = linter.CheckerParams{ + "sizeThreshold": { + Value: 128, + Usage: "size in bytes that makes the warning trigger", + }, + "skipTestFuncs": { + Value: true, + Usage: "whether to check test functions", + }, + } + info.Summary = "Detects loops that copy big objects during each iteration" + info.Details = "Suggests to use index access or take address and make use pointer instead." + info.Before = ` +xs := make([][1024]byte, length) +for _, x := range xs { + // Loop body. +}` + info.After = ` +xs := make([][1024]byte, length) +for i := range xs { + x := &xs[i] + // Loop body. +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &rangeValCopyChecker{ctx: ctx} + c.sizeThreshold = int64(info.Params.Int("sizeThreshold")) + c.skipTestFuncs = info.Params.Bool("skipTestFuncs") + return astwalk.WalkerForStmt(c), nil + }) +} + +type rangeValCopyChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + sizeThreshold int64 + skipTestFuncs bool +} + +func (c *rangeValCopyChecker) EnterFunc(fn *ast.FuncDecl) bool { + return fn.Body != nil && + !(c.skipTestFuncs && isUnitTestFunc(c.ctx, fn)) +} + +func (c *rangeValCopyChecker) VisitStmt(stmt ast.Stmt) { + rng, ok := stmt.(*ast.RangeStmt) + if !ok || rng.Value == nil { + return + } + typ := c.ctx.TypeOf(rng.Value) + if typ == nil { + return + } + if size := c.ctx.SizesInfo.Sizeof(typ); size >= c.sizeThreshold { + c.warn(rng, size) + } +} + +func (c *rangeValCopyChecker) warn(n ast.Node, size int64) { + c.ctx.Warn(n, "each iteration copies %d bytes (consider pointers or indexing)", size) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/regexpMust_checker.go b/vendor/github.com/go-critic/go-critic/checkers/regexpMust_checker.go new file mode 100644 index 000000000..600aa73d0 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/regexpMust_checker.go @@ -0,0 +1,47 @@ +package checkers + +import ( + "go/ast" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astp" + "golang.org/x/tools/go/ast/astutil" +) + +func init() { + var info linter.CheckerInfo + info.Name = "regexpMust" + info.Tags = []string{"style"} + info.Summary = "Detects `regexp.Compile*` that can be replaced with `regexp.MustCompile*`" + info.Before = `re, _ := regexp.Compile("const pattern")` + info.After = `re := regexp.MustCompile("const pattern")` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(®expMustChecker{ctx: ctx}), nil + }) +} + +type regexpMustChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *regexpMustChecker) VisitExpr(x ast.Expr) { + if x, ok := x.(*ast.CallExpr); ok { + switch name := qualifiedName(x.Fun); name { + case "regexp.Compile", "regexp.CompilePOSIX": + // Only check for trivial string args, permit parenthesis. + if !astp.IsBasicLit(astutil.Unparen(x.Args[0])) { + return + } + c.warn(x, strings.Replace(name, "Compile", "MustCompile", 1)) + } + } +} + +func (c *regexpMustChecker) warn(cause *ast.CallExpr, suggestion string) { + c.ctx.Warn(cause, "for const patterns like %s, use %s", + cause.Args[0], suggestion) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go b/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go new file mode 100644 index 000000000..31dc4aad3 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go @@ -0,0 +1,68 @@ +package checkers + +import ( + "go/ast" + "go/constant" + "regexp" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "regexpPattern" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects suspicious regexp patterns" + info.Before = "regexp.MustCompile(`google.com|yandex.ru`)" + info.After = "regexp.MustCompile(`google\\.com|yandex\\.ru`)" + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + domains := []string{ + "com", + "org", + "info", + "net", + "ru", + "de", + } + + allDomains := strings.Join(domains, "|") + domainRE := regexp.MustCompile(`[^\\]\.(` + allDomains + `)\b`) + return astwalk.WalkerForExpr(®expPatternChecker{ + ctx: ctx, + domainRE: domainRE, + }), nil + }) +} + +type regexpPatternChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + domainRE *regexp.Regexp +} + +func (c *regexpPatternChecker) VisitExpr(x ast.Expr) { + call, ok := x.(*ast.CallExpr) + if !ok { + return + } + + switch qualifiedName(call.Fun) { + case "regexp.Compile", "regexp.CompilePOSIX", "regexp.MustCompile", "regexp.MustCompilePosix": + cv := c.ctx.TypesInfo.Types[call.Args[0]].Value + if cv == nil || cv.Kind() != constant.String { + return + } + s := constant.StringVal(cv) + if m := c.domainRE.FindStringSubmatch(s); m != nil { + c.warnDomain(call.Args[0], m[1]) + } + } +} + +func (c *regexpPatternChecker) warnDomain(cause ast.Expr, domain string) { + c.ctx.Warn(cause, "'.%s' should probably be '\\.%s'", domain, domain) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go b/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go new file mode 100644 index 000000000..b7dd15948 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go @@ -0,0 +1,511 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/constant" + "log" + "strings" + "unicode/utf8" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/quasilyte/regex/syntax" +) + +func init() { + var info linter.CheckerInfo + info.Name = "regexpSimplify" + info.Tags = []string{"style", "experimental", "opinionated"} + info.Summary = "Detects regexp patterns that can be simplified" + info.Before = "regexp.MustCompile(`(?:a|b|c) [a-z][a-z]*`)" + info.After = "regexp.MustCompile(`[abc] {3}[a-z]+`)" + + // TODO(quasilyte): add params to control most opinionated replacements + // like `[0-9] -> \d` + // `[[:digit:]] -> \d` + // `[A-Za-z0-9_]` -> `\w` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + opts := &syntax.ParserOptions{ + NoLiterals: true, + } + c := ®expSimplifyChecker{ + ctx: ctx, + parser: syntax.NewParser(opts), + out: &strings.Builder{}, + } + return astwalk.WalkerForExpr(c), nil + }) +} + +type regexpSimplifyChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + parser *syntax.Parser + + // out is a tmp buffer where we build a simplified regexp pattern. + out *strings.Builder + // score is a number of applied simplifications + score int +} + +func (c *regexpSimplifyChecker) VisitExpr(x ast.Expr) { + call, ok := x.(*ast.CallExpr) + if !ok { + return + } + + switch qualifiedName(call.Fun) { + case "regexp.Compile", "regexp.MustCompile": + cv := c.ctx.TypesInfo.Types[call.Args[0]].Value + if cv == nil || cv.Kind() != constant.String { + return + } + pat := constant.StringVal(cv) + if len(pat) > 60 { + // Skip scary regexp patterns for now. + break + } + + // Only do 2 passes. + simplified := pat + for pass := 0; pass < 2; pass++ { + candidate := c.simplify(pass, simplified) + if candidate == "" { + break + } + simplified = candidate + } + if simplified != "" && simplified != pat { + c.warn(call.Args[0], pat, simplified) + } + } +} + +func (c *regexpSimplifyChecker) simplify(pass int, pat string) string { + re, err := c.parser.Parse(pat) + if err != nil { + return "" + } + + c.score = 0 + c.out.Reset() + + // TODO(quasilyte): suggest char ranges for things like [012345689]? + // TODO(quasilyte): evaluate char range to suggest better replacements. + // TODO(quasilyte): (?:ab|ac) -> a[bc] + // TODO(quasilyte): suggest "s" and "." flag if things like [\w\W] are used. + // TODO(quasilyte): x{n}x? -> x{n,n+1} + + c.walk(re.Expr) + + if debug() { + // This happens only in one of two cases: + // 1. Parser has a bug and we got invalid AST for the given pattern. + // 2. Simplifier incorrectly built a replacement string from the AST. + if c.score == 0 && c.out.String() != pat { + log.Printf("pass %d: unexpected pattern diff:\n\thave: %q\n\twant: %q", + pass, c.out.String(), pat) + } + } + + if c.score > 0 { + return c.out.String() + } + return "" +} + +func (c *regexpSimplifyChecker) walk(e syntax.Expr) { + out := c.out + + switch e.Op { + case syntax.OpConcat: + c.walkConcat(e) + + case syntax.OpAlt: + c.walkAlt(e) + + case syntax.OpCharRange: + s := c.simplifyCharRange(e) + if s != "" { + out.WriteString(s) + c.score++ + } else { + out.WriteString(e.Value) + } + + case syntax.OpGroupWithFlags: + out.WriteString("(") + out.WriteString(e.Args[1].Value) + out.WriteString(":") + c.walk(e.Args[0]) + out.WriteString(")") + case syntax.OpGroup: + c.walkGroup(e) + case syntax.OpCapture: + out.WriteString("(") + c.walk(e.Args[0]) + out.WriteString(")") + case syntax.OpNamedCapture: + out.WriteString("(?P<") + out.WriteString(e.Args[1].Value) + out.WriteString(">") + c.walk(e.Args[0]) + out.WriteString(")") + + case syntax.OpRepeat: + // TODO(quasilyte): is it worth it to analyze repeat argument + // more closely and handle `{n,n} -> {n}` cases? + rep := e.Args[1].Value + switch rep { + case "{0,1}": + c.walk(e.Args[0]) + out.WriteString("?") + c.score++ + case "{1,}": + c.walk(e.Args[0]) + out.WriteString("+") + c.score++ + case "{0,}": + c.walk(e.Args[0]) + out.WriteString("*") + c.score++ + case "{0}": + // Maybe {0} should be reported by another check, regexpLint? + c.score++ + case "{1}": + c.walk(e.Args[0]) + c.score++ + default: + c.walk(e.Args[0]) + out.WriteString(rep) + } + + case syntax.OpPosixClass: + out.WriteString(e.Value) + + case syntax.OpNegCharClass: + s := c.simplifyNegCharClass(e) + if s != "" { + c.out.WriteString(s) + c.score++ + } else { + out.WriteString("[^") + for _, e := range e.Args { + c.walk(e) + } + out.WriteString("]") + } + + case syntax.OpCharClass: + s := c.simplifyCharClass(e) + if s != "" { + c.out.WriteString(s) + c.score++ + } else { + out.WriteString("[") + for _, e := range e.Args { + c.walk(e) + } + out.WriteString("]") + } + + case syntax.OpEscapeChar: + switch e.Value { + case `\&`, `\#`, `\!`, `\@`, `\%`, `\<`, `\>`, `\:`, `\;`, `\/`, `\,`, `\=`, `\.`: + c.score++ + out.WriteString(e.Value[len(`\`):]) + default: + out.WriteString(e.Value) + } + + case syntax.OpQuestion, syntax.OpNonGreedy: + c.walk(e.Args[0]) + out.WriteString("?") + case syntax.OpStar: + c.walk(e.Args[0]) + out.WriteString("*") + case syntax.OpPlus: + c.walk(e.Args[0]) + out.WriteString("+") + + default: + out.WriteString(e.Value) + } +} + +func (c *regexpSimplifyChecker) walkGroup(g syntax.Expr) { + switch g.Args[0].Op { + case syntax.OpChar, syntax.OpEscapeChar, syntax.OpEscapeMeta, syntax.OpCharClass: + c.walk(g.Args[0]) + c.score++ + return + } + + c.out.WriteString("(?:") + c.walk(g.Args[0]) + c.out.WriteString(")") +} + +func (c *regexpSimplifyChecker) simplifyNegCharClass(e syntax.Expr) string { + switch e.Value { + case `[^0-9]`: + return `\D` + case `[^\s]`: + return `\S` + case `[^\S]`: + return `\s` + case `[^\w]`: + return `\W` + case `[^\W]`: + return `\w` + case `[^\d]`: + return `\D` + case `[^\D]`: + return `\d` + case `[^[:^space:]]`: + return `\s` + case `[^[:space:]]`: + return `\S` + case `[^[:^word:]]`: + return `\w` + case `[^[:word:]]`: + return `\W` + case `[^[:^digit:]]`: + return `\d` + case `[^[:digit:]]`: + return `\D` + } + + return "" +} + +func (c *regexpSimplifyChecker) simplifyCharClass(e syntax.Expr) string { + switch e.Value { + case `[0-9]`: + return `\d` + case `[[:word:]]`: + return `\w` + case `[[:^word:]]`: + return `\W` + case `[[:digit:]]`: + return `\d` + case `[[:^digit:]]`: + return `\D` + case `[[:space:]]`: + return `\s` + case `[[:^space:]]`: + return `\S` + case `[][]`: + return `\]\[` + case `[]]`: + return `\]` + } + + if len(e.Args) == 1 { + switch e.Args[0].Op { + case syntax.OpChar: + switch v := e.Args[0].Value; v { + case "|", "*", "+", "?", ".", "[", "^", "$", "(", ")": + // Can't take outside of the char group without escaping. + default: + return v + } + case syntax.OpEscapeChar: + return e.Args[0].Value + } + } + + return "" +} + +func (c *regexpSimplifyChecker) canMerge(x, y syntax.Expr) bool { + if x.Op != y.Op { + return false + } + switch x.Op { + case syntax.OpChar, syntax.OpCharClass, syntax.OpEscapeMeta, syntax.OpEscapeChar, syntax.OpNegCharClass, syntax.OpGroup: + return x.Value == y.Value + default: + return false + } +} + +func (c *regexpSimplifyChecker) canCombine(x, y syntax.Expr) (threshold int, ok bool) { + if x.Op != y.Op { + return 0, false + } + + switch x.Op { + case syntax.OpDot: + return 3, true + + case syntax.OpChar: + if x.Value != y.Value { + return 0, false + } + if x.Value == " " { + return 1, true + } + return 4, true + + case syntax.OpEscapeMeta, syntax.OpEscapeChar: + if x.Value == y.Value { + return 2, true + } + + case syntax.OpCharClass, syntax.OpNegCharClass, syntax.OpGroup: + if x.Value == y.Value { + return 1, true + } + } + + return 0, false +} + +func (c *regexpSimplifyChecker) concatLiteral(e syntax.Expr) string { + if e.Op == syntax.OpConcat && c.allChars(e) { + return e.Value + } + return "" +} + +func (c *regexpSimplifyChecker) allChars(e syntax.Expr) bool { + for _, a := range e.Args { + if a.Op != syntax.OpChar { + return false + } + } + return true +} + +func (c *regexpSimplifyChecker) factorPrefixSuffix(alt syntax.Expr) bool { + // TODO: more forms of prefixes/suffixes? + // + // A more generalized algorithm could handle `fo|fo1|fo2` -> `fo[12]?`. + // but it's an open question whether the latter form universally better. + // + // Right now it handles only the simplest cases: + // `http|https` -> `https?` + // `xfoo|foo` -> `x?foo` + if len(alt.Args) != 2 { + return false + } + x := c.concatLiteral(alt.Args[0]) + y := c.concatLiteral(alt.Args[1]) + if x == y { + return false // Reject non-literals and identical strings early + } + + // Let x be a shorter string. + if len(x) > len(y) { + x, y = y, x + } + // Do we have a common prefix? + tail := strings.TrimPrefix(y, x) + if len(tail) <= utf8.UTFMax && utf8.RuneCountInString(tail) == 1 { + c.out.WriteString(x + tail + "?") + c.score++ + return true + } + // Do we have a common suffix? + head := strings.TrimSuffix(y, x) + if len(head) <= utf8.UTFMax && utf8.RuneCountInString(head) == 1 { + c.out.WriteString(head + "?" + x) + c.score++ + return true + } + return false +} + +func (c *regexpSimplifyChecker) walkAlt(alt syntax.Expr) { + // `x|y|z` -> `[xyz]`. + if c.allChars(alt) { + c.score++ + c.out.WriteString("[") + for _, e := range alt.Args { + c.out.WriteString(e.Value) + } + c.out.WriteString("]") + return + } + + if c.factorPrefixSuffix(alt) { + return + } + + for i, e := range alt.Args { + c.walk(e) + if i != len(alt.Args)-1 { + c.out.WriteString("|") + } + } +} + +func (c *regexpSimplifyChecker) walkConcat(concat syntax.Expr) { + i := 0 + for i < len(concat.Args) { + x := concat.Args[i] + c.walk(x) + i++ + + if i >= len(concat.Args) { + break + } + + // Try merging `xy*` into `x+` where x=y. + if concat.Args[i].Op == syntax.OpStar { + if c.canMerge(x, concat.Args[i].Args[0]) { + c.out.WriteString("+") + c.score++ + i++ + continue + } + } + + // Try combining `xy` into `x{2}` where x=y. + threshold, ok := c.canCombine(x, concat.Args[i]) + if !ok { + continue + } + n := 1 // Can combine at least 1 pair. + for j := i + 1; j < len(concat.Args); j++ { + _, ok := c.canCombine(x, concat.Args[j]) + if !ok { + break + } + n++ + } + if n >= threshold { + fmt.Fprintf(c.out, "{%d}", n+1) + c.score++ + i += n + } + } +} + +func (c *regexpSimplifyChecker) simplifyCharRange(rng syntax.Expr) string { + if rng.Args[0].Op != syntax.OpChar || rng.Args[1].Op != syntax.OpChar { + return "" + } + + lo := rng.Args[0].Value + hi := rng.Args[1].Value + if len(lo) == 1 && len(hi) == 1 { + switch hi[0] - lo[0] { + case 0: + return lo + case 1: + return fmt.Sprintf("%s%s", lo, hi) + case 2: + return fmt.Sprintf("%s%s%s", lo, string(lo[0]+1), hi) + } + } + + return "" +} + +func (c *regexpSimplifyChecker) warn(cause ast.Expr, orig, suggest string) { + c.ctx.Warn(cause, "can re-write `%s` as `%s`", orig, suggest) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go new file mode 100644 index 000000000..ecb3dc9ee --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go @@ -0,0 +1,157 @@ +package checkers + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/go-critic/go-critic/framework/linter" + "github.com/quasilyte/go-ruleguard/ruleguard" +) + +func init() { + var info linter.CheckerInfo + info.Name = "ruleguard" + info.Tags = []string{"style", "experimental"} + info.Params = linter.CheckerParams{ + "rules": { + Value: "", + Usage: "comma-separated list of gorule file paths. Glob patterns such as 'rules-*.go' may be specified", + }, + "debug": { + Value: "", + Usage: "enable debug for the specified named rules group", + }, + "failOnError": { + Value: false, + Usage: "If true, panic when the gorule files contain a syntax error. If false, log and skip rules that contain an error", + }, + } + info.Summary = "Runs user-defined rules using ruleguard linter" + info.Details = "Reads a rules file and turns them into go-critic checkers." + info.Before = `N/A` + info.After = `N/A` + info.Note = "See https://github.com/quasilyte/go-ruleguard." + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return newRuleguardChecker(&info, ctx) + }) +} + +func newRuleguardChecker(info *linter.CheckerInfo, ctx *linter.CheckerContext) (*ruleguardChecker, error) { + c := &ruleguardChecker{ + ctx: ctx, + debugGroup: info.Params.String("debug"), + } + rulesFlag := info.Params.String("rules") + if rulesFlag == "" { + return c, nil + } + failOnErrorFlag := info.Params.Bool("failOnError") + + // TODO(quasilyte): handle initialization errors better when we make + // a transition to the go/analysis framework. + // + // For now, we log error messages and return a ruleguard checker + // with an empty rules set. + + engine := ruleguard.NewEngine() + fset := token.NewFileSet() + filePatterns := strings.Split(rulesFlag, ",") + + parseContext := &ruleguard.ParseContext{ + Fset: fset, + } + + loaded := 0 + for _, filePattern := range filePatterns { + filenames, err := filepath.Glob(strings.TrimSpace(filePattern)) + if err != nil { + // The only possible returned error is ErrBadPattern, when pattern is malformed. + log.Printf("ruleguard init error: %+v", err) + continue + } + for _, filename := range filenames { + data, err := ioutil.ReadFile(filename) + if err != nil { + if failOnErrorFlag { + return nil, fmt.Errorf("ruleguard init error: %+v", err) + } + log.Printf("ruleguard init error: %+v", err) + continue + } + if err := engine.Load(parseContext, filename, bytes.NewReader(data)); err != nil { + if failOnErrorFlag { + return nil, fmt.Errorf("ruleguard init error: %+v", err) + } + log.Printf("ruleguard init error: %+v", err) + continue + } + loaded++ + } + } + + if loaded != 0 { + c.engine = engine + } + return c, nil +} + +type ruleguardChecker struct { + ctx *linter.CheckerContext + + debugGroup string + engine *ruleguard.Engine +} + +func (c *ruleguardChecker) WalkFile(f *ast.File) { + if c.engine == nil { + return + } + + type ruleguardReport struct { + node ast.Node + message string + } + var reports []ruleguardReport + + ctx := &ruleguard.RunContext{ + Debug: c.debugGroup, + DebugPrint: func(s string) { + fmt.Fprintln(os.Stderr, s) + }, + Pkg: c.ctx.Pkg, + Types: c.ctx.TypesInfo, + Sizes: c.ctx.SizesInfo, + Fset: c.ctx.FileSet, + Report: func(_ ruleguard.GoRuleInfo, n ast.Node, msg string, _ *ruleguard.Suggestion) { + // TODO(quasilyte): investigate whether we should add a rule name as + // a message prefix here. + reports = append(reports, ruleguardReport{ + node: n, + message: msg, + }) + }, + } + + if err := c.engine.Run(ctx, f); err != nil { + // Normally this should never happen, but since + // we don't have a better mechanism to report errors, + // emit a warning. + c.ctx.Warn(f, "execution error: %v", err) + } + + sort.Slice(reports, func(i, j int) bool { + return reports[i].message < reports[j].message + }) + for _, report := range reports { + c.ctx.Warn(report.node, report.message) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go b/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go new file mode 100644 index 000000000..b369a4344 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go @@ -0,0 +1,84 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "golang.org/x/tools/go/ast/astutil" +) + +func init() { + var info linter.CheckerInfo + info.Name = "singleCaseSwitch" + info.Tags = []string{"style"} + info.Summary = "Detects switch statements that could be better written as if statement" + info.Before = ` +switch x := x.(type) { +case int: + body() +}` + info.After = ` +if x, ok := x.(int); ok { + body() +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&singleCaseSwitchChecker{ctx: ctx}), nil + }) +} + +type singleCaseSwitchChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *singleCaseSwitchChecker) VisitStmt(stmt ast.Stmt) { + switch stmt := stmt.(type) { + case *ast.SwitchStmt: + c.checkSwitchStmt(stmt, stmt.Body) + case *ast.TypeSwitchStmt: + c.checkSwitchStmt(stmt, stmt.Body) + } +} + +func (c *singleCaseSwitchChecker) checkSwitchStmt(stmt ast.Stmt, body *ast.BlockStmt) { + if len(body.List) != 1 { + return + } + cc := body.List[0].(*ast.CaseClause) + if c.hasBreak(cc) { + return + } + switch { + case cc.List == nil: + c.warnDefault(stmt) + case len(cc.List) == 1: + c.warn(stmt) + } +} + +func (c *singleCaseSwitchChecker) hasBreak(stmt ast.Stmt) bool { + found := false + astutil.Apply(stmt, func(cur *astutil.Cursor) bool { + switch n := cur.Node().(type) { + case *ast.BranchStmt: + if n.Tok == token.BREAK { + found = true + } + case *ast.ForStmt, *ast.RangeStmt, *ast.SelectStmt, *ast.SwitchStmt: + return false + } + return true + }, nil) + return found +} + +func (c *singleCaseSwitchChecker) warn(stmt ast.Stmt) { + c.ctx.Warn(stmt, "should rewrite switch statement to if statement") +} + +func (c *singleCaseSwitchChecker) warnDefault(stmt ast.Stmt) { + c.ctx.Warn(stmt, "found switch with default case only") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/sloppyLen_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sloppyLen_checker.go new file mode 100644 index 000000000..a08ef0a5c --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/sloppyLen_checker.go @@ -0,0 +1,72 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/astfmt" +) + +func init() { + var info linter.CheckerInfo + info.Name = "sloppyLen" + info.Tags = []string{"style"} + info.Summary = "Detects usage of `len` when result is obvious or doesn't make sense" + info.Before = ` +len(arr) >= 0 // Sloppy +len(arr) <= 0 // Sloppy +len(arr) < 0 // Doesn't make sense at all` + info.After = ` +len(arr) > 0 +len(arr) == 0` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&sloppyLenChecker{ctx: ctx}), nil + }) +} + +type sloppyLenChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *sloppyLenChecker) VisitExpr(x ast.Expr) { + expr, ok := x.(*ast.BinaryExpr) + if !ok { + return + } + + if expr.Op == token.LSS || expr.Op == token.GEQ || expr.Op == token.LEQ { + if c.isLenCall(expr.X) && c.isZero(expr.Y) { + c.warn(expr) + } + } +} + +func (c *sloppyLenChecker) isLenCall(x ast.Expr) bool { + call, ok := x.(*ast.CallExpr) + return ok && qualifiedName(call.Fun) == "len" && len(call.Args) == 1 +} + +func (c *sloppyLenChecker) isZero(x ast.Expr) bool { + value, ok := x.(*ast.BasicLit) + return ok && value.Value == "0" +} + +func (c *sloppyLenChecker) warn(cause *ast.BinaryExpr) { + info := "" + switch cause.Op { + case token.LSS: + info = "is always false" + case token.GEQ: + info = "is always true" + case token.LEQ: + expr := astcopy.BinaryExpr(cause) + expr.Op = token.EQL + info = astfmt.Sprintf("can be %s", expr) + } + c.ctx.Warn(cause, "%s %s", cause, info) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go new file mode 100644 index 000000000..2f9ac62e1 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go @@ -0,0 +1,80 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/astequal" +) + +func init() { + var info linter.CheckerInfo + info.Name = "sloppyReassign" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects suspicious/confusing re-assignments" + info.Before = `if err = f(); err != nil { return err }` + info.After = `if err := f(); err != nil { return err }` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&sloppyReassignChecker{ctx: ctx}), nil + }) +} + +type sloppyReassignChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *sloppyReassignChecker) VisitStmt(stmt ast.Stmt) { + // Right now only check assignments in if statements init. + ifStmt := astcast.ToIfStmt(stmt) + assign := astcast.ToAssignStmt(ifStmt.Init) + if assign.Tok != token.ASSIGN { + return + } + + // TODO(quasilyte): is handling of multi-value assignments worthwhile? + if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 { + return + } + + // TODO(quasilyte): handle not only the simplest, return-only case. + body := ifStmt.Body.List + if len(body) != 1 { + return + } + + // Variable that is being re-assigned. + reAssigned := astcast.ToIdent(assign.Lhs[0]) + if reAssigned.Name == "" { + return + } + + // TODO(quasilyte): handle not only nil comparisons. + eqToNil := &ast.BinaryExpr{ + Op: token.NEQ, + X: reAssigned, + Y: &ast.Ident{Name: "nil"}, + } + if !astequal.Expr(ifStmt.Cond, eqToNil) { + return + } + + results := astcast.ToReturnStmt(body[0]).Results + for _, res := range results { + if astequal.Expr(reAssigned, res) { + c.warnAssignToDefine(assign, reAssigned.Name) + break + } + } +} + +func (c *sloppyReassignChecker) warnAssignToDefine(assign *ast.AssignStmt, name string) { + suggest := astcopy.AssignStmt(assign) + suggest.Tok = token.DEFINE + c.ctx.Warn(assign, "re-assignment to `%s` can be replaced with `%s`", name, suggest) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go new file mode 100644 index 000000000..243925368 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go @@ -0,0 +1,75 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" +) + +func init() { + var info linter.CheckerInfo + info.Name = "sloppyTypeAssert" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects redundant type assertions" + info.Before = ` +func f(r io.Reader) interface{} { + return r.(interface{}) +} +` + info.After = ` +func f(r io.Reader) interface{} { + return r +} +` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&sloppyTypeAssertChecker{ctx: ctx}), nil + }) +} + +type sloppyTypeAssertChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *sloppyTypeAssertChecker) VisitExpr(expr ast.Expr) { + assert := astcast.ToTypeAssertExpr(expr) + if assert.Type == nil { + return + } + + toType := c.ctx.TypeOf(expr) + fromType := c.ctx.TypeOf(assert.X) + + if types.Identical(toType, fromType) { + c.warnIdentical(expr) + return + } + + toIface, ok := toType.Underlying().(*types.Interface) + if !ok { + return + } + + switch { + case toIface.Empty(): + c.warnEmpty(expr) + case types.Implements(fromType, toIface): + c.warnImplements(expr, assert.X) + } +} + +func (c *sloppyTypeAssertChecker) warnIdentical(cause ast.Expr) { + c.ctx.Warn(cause, "type assertion from/to types are identical") +} + +func (c *sloppyTypeAssertChecker) warnEmpty(cause ast.Expr) { + c.ctx.Warn(cause, "type assertion to interface{} may be redundant") +} + +func (c *sloppyTypeAssertChecker) warnImplements(cause, val ast.Expr) { + c.ctx.Warn(cause, "type assertion may be redundant as %s always implements selected interface", val) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go new file mode 100644 index 000000000..29550da3f --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go @@ -0,0 +1,135 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/typep" + "golang.org/x/tools/go/ast/astutil" +) + +func init() { + var info linter.CheckerInfo + info.Name = "sortSlice" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects suspicious sort.Slice calls" + info.Before = `sort.Slice(xs, func(i, j) bool { return keys[i] < keys[j] })` + info.After = `sort.Slice(kv, func(i, j) bool { return kv[i].key < kv[j].key })` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&sortSliceChecker{ctx: ctx}), nil + }) +} + +type sortSliceChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *sortSliceChecker) VisitExpr(expr ast.Expr) { + call := astcast.ToCallExpr(expr) + if len(call.Args) != 2 { + return + } + switch qualifiedName(call.Fun) { + case "sort.Slice", "sort.SliceStable": + // OK. + default: + return + } + + slice := c.unwrapSlice(call.Args[0]) + lessFunc, ok := call.Args[1].(*ast.FuncLit) + if !ok { + return + } + if !typep.SideEffectFree(c.ctx.TypesInfo, slice) { + return // Don't check unpredictable slice values + } + + ivar, jvar := c.paramIdents(lessFunc.Type) + if ivar == nil || jvar == nil { + return + } + + if len(lessFunc.Body.List) != 1 { + return + } + ret, ok := lessFunc.Body.List[0].(*ast.ReturnStmt) + if !ok { + return + } + cmp := astcast.ToBinaryExpr(astutil.Unparen(ret.Results[0])) + if !typep.SideEffectFree(c.ctx.TypesInfo, cmp) { + return + } + switch cmp.Op { + case token.LSS, token.LEQ, token.GTR, token.GEQ: + // Both cmp.X and cmp.Y are expected to be some expressions + // over the `slice` expression. In the simplest case, + // it's a `slice[i] slice[j]`. + if !c.containsSlice(cmp.X, slice) && !c.containsSlice(cmp.Y, slice) { + c.warnSlice(cmp, slice) + } + + // This one is more about the style, but can reveal potential issue + // or misprint in sorting condition. + // We give a warn if X contains indexing with `i` index and Y + // contains indexing with `j`. + if c.containsIndex(cmp.X, jvar) && c.containsIndex(cmp.Y, ivar) { + c.warnIndex(cmp, ivar, jvar) + } + } +} + +func (c *sortSliceChecker) paramIdents(e *ast.FuncType) (ivar, jvar *ast.Ident) { + // Covers both `i, j int` and `i int, j int`. + idents := make([]*ast.Ident, 0, 2) + for _, field := range e.Params.List { + idents = append(idents, field.Names...) + } + if len(idents) == 2 { + return idents[0], idents[1] + } + return nil, nil +} + +func (c *sortSliceChecker) unwrapSlice(e ast.Expr) ast.Expr { + switch e := e.(type) { + case *ast.ParenExpr: + return c.unwrapSlice(e.X) + case *ast.SliceExpr: + return e.X + default: + return e + } +} + +func (c *sortSliceChecker) containsIndex(e, index ast.Expr) bool { + return lintutil.ContainsNode(e, func(n ast.Node) bool { + indexing, ok := n.(*ast.IndexExpr) + if !ok { + return false + } + return astequal.Expr(indexing.Index, index) + }) +} + +func (c *sortSliceChecker) containsSlice(e, slice ast.Expr) bool { + return lintutil.ContainsNode(e, func(n ast.Node) bool { + return astequal.Node(n, slice) + }) +} + +func (c *sortSliceChecker) warnSlice(cause ast.Node, slice ast.Expr) { + c.ctx.Warn(cause, "cmp func must use %s slice in comparison", slice) +} + +func (c *sortSliceChecker) warnIndex(cause ast.Node, ivar, jvar *ast.Ident) { + c.ctx.Warn(cause, "unusual order of {%s,%s} params in comparison", ivar, jvar) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go new file mode 100644 index 000000000..eb3b49d88 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go @@ -0,0 +1,167 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" +) + +func init() { + var info linter.CheckerInfo + info.Name = "sqlQuery" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects issue in Query() and Exec() calls" + info.Before = `_, err := db.Query("UPDATE ...")` + info.After = `_, err := db.Exec("UPDATE ...")` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&sqlQueryChecker{ctx: ctx}), nil + }) +} + +type sqlQueryChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *sqlQueryChecker) VisitStmt(stmt ast.Stmt) { + assign := astcast.ToAssignStmt(stmt) + if len(assign.Lhs) != 2 { // Query() has 2 return values. + return + } + if len(assign.Rhs) != 1 { + return + } + + // If Query() is called, but first return value is ignored, + // there is no way to close/read the returned rows. + // This can cause a connection leak. + if id, ok := assign.Lhs[0].(*ast.Ident); ok && id.Name != "_" { + return + } + + call := astcast.ToCallExpr(assign.Rhs[0]) + funcExpr := astcast.ToSelectorExpr(call.Fun) + if !c.funcIsQuery(funcExpr) { + return + } + + if c.typeHasExecMethod(c.ctx.TypeOf(funcExpr.X)) { + c.warnAndSuggestExec(funcExpr) + } else { + c.warnRowsIgnored(funcExpr) + } +} + +func (c *sqlQueryChecker) funcIsQuery(funcExpr *ast.SelectorExpr) bool { + if funcExpr.Sel == nil { + return false + } + switch funcExpr.Sel.Name { + case "Query", "QueryContext": + // Stdlib and friends. + case "Queryx", "QueryxContext": + // sqlx. + default: + return false + } + + // To avoid false positives (unrelated types can have Query method) + // check that the 1st returned type has Row-like name. + typ, ok := c.ctx.TypeOf(funcExpr).Underlying().(*types.Signature) + if !ok || typ.Results() == nil || typ.Results().Len() != 2 { + return false + } + if !c.typeIsRowsLike(typ.Results().At(0).Type()) { + return false + } + + return true +} + +func (c *sqlQueryChecker) typeIsRowsLike(typ types.Type) bool { + switch typ := typ.(type) { + case *types.Pointer: + return c.typeIsRowsLike(typ.Elem()) + case *types.Named: + return typ.Obj().Name() == "Rows" + default: + return false + } +} + +func (c *sqlQueryChecker) funcIsExec(fn *types.Func) bool { + if fn.Name() != "Exec" { + return false + } + + // Expect exactly 2 results. + sig := fn.Type().(*types.Signature) + if sig.Results() == nil || sig.Results().Len() != 2 { + return false + } + + // Expect at least 1 param and it should be a string (query). + params := sig.Params() + if params == nil || params.Len() == 0 { + return false + } + if typ, ok := params.At(0).Type().(*types.Basic); !ok || typ.Kind() != types.String { + return false + } + + return true +} + +func (c *sqlQueryChecker) typeHasExecMethod(typ types.Type) bool { + switch typ := typ.(type) { + case *types.Struct: + for i := 0; i < typ.NumFields(); i++ { + if c.typeHasExecMethod(typ.Field(i).Type()) { + return true + } + } + case *types.Interface: + for i := 0; i < typ.NumMethods(); i++ { + if c.funcIsExec(typ.Method(i)) { + return true + } + } + case *types.Pointer: + return c.typeHasExecMethod(typ.Elem()) + case *types.Named: + for i := 0; i < typ.NumMethods(); i++ { + if c.funcIsExec(typ.Method(i)) { + return true + } + } + switch ut := typ.Underlying().(type) { + case *types.Interface: + return c.typeHasExecMethod(ut) + case *types.Struct: + // Check embedded types. + for i := 0; i < ut.NumFields(); i++ { + field := ut.Field(i) + if !field.Embedded() { + continue + } + if c.typeHasExecMethod(field.Type()) { + return true + } + } + } + } + + return false +} + +func (c *sqlQueryChecker) warnAndSuggestExec(funcExpr *ast.SelectorExpr) { + c.ctx.Warn(funcExpr, "use %s.Exec() if returned result is not needed", funcExpr.X) +} + +func (c *sqlQueryChecker) warnRowsIgnored(funcExpr *ast.SelectorExpr) { + c.ctx.Warn(funcExpr, "ignoring Query() rows result may lead to a connection leak") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/stringXbytes_checker.go b/vendor/github.com/go-critic/go-critic/checkers/stringXbytes_checker.go new file mode 100644 index 000000000..bb9f16c07 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/stringXbytes_checker.go @@ -0,0 +1,47 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "stringXbytes" + info.Tags = []string{"style"} + info.Summary = "Detects redundant conversions between string and []byte" + info.Before = `copy(b, []byte(s))` + info.After = `copy(b, s)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&stringXbytes{ctx: ctx}), nil + }) +} + +type stringXbytes struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *stringXbytes) VisitExpr(expr ast.Expr) { + x, ok := expr.(*ast.CallExpr) + if !ok || qualifiedName(x.Fun) != "copy" || len(x.Args) != 2 { + return + } + + src := x.Args[1] + + byteCast, ok := src.(*ast.CallExpr) + if ok && typep.IsTypeExpr(c.ctx.TypesInfo, byteCast.Fun) && + typep.HasStringProp(c.ctx.TypeOf(byteCast.Args[0])) { + + c.warn(byteCast, byteCast.Args[0]) + } +} + +func (c *stringXbytes) warn(cause *ast.CallExpr, suggestion ast.Expr) { + c.ctx.Warn(cause, "can simplify `%s` to `%s`", cause, suggestion) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/switchTrue_checker.go b/vendor/github.com/go-critic/go-critic/checkers/switchTrue_checker.go new file mode 100644 index 000000000..0501a0ba1 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/switchTrue_checker.go @@ -0,0 +1,49 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "switchTrue" + info.Tags = []string{"style"} + info.Summary = "Detects switch-over-bool statements that use explicit `true` tag value" + info.Before = ` +switch true { +case x > y: +}` + info.After = ` +switch { +case x > y: +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&switchTrueChecker{ctx: ctx}), nil + }) +} + +type switchTrueChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *switchTrueChecker) VisitStmt(stmt ast.Stmt) { + if stmt, ok := stmt.(*ast.SwitchStmt); ok { + if qualifiedName(stmt.Tag) == "true" { + c.warn(stmt) + } + } +} + +func (c *switchTrueChecker) warn(cause *ast.SwitchStmt) { + if cause.Init == nil { + c.ctx.Warn(cause, "replace 'switch true {}' with 'switch {}'") + } else { + c.ctx.Warn(cause, "replace 'switch %s; true {}' with 'switch %s; {}'", + cause.Init, cause.Init) + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go b/vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go new file mode 100644 index 000000000..4d4dcc26e --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go @@ -0,0 +1,54 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "tooManyResultsChecker" + info.Tags = []string{"style", "opinionated", "experimental"} + info.Params = linter.CheckerParams{ + "maxResults": { + Value: 5, + Usage: "maximum number of results", + }, + } + info.Summary = "Detects function with too many results" + info.Before = `func fn() (a, b, c, d float32, _ int, _ bool)` + info.After = `func fn() (resultStruct, bool)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := astwalk.WalkerForFuncDecl(&tooManyResultsChecker{ + ctx: ctx, + maxParams: info.Params.Int("maxResults"), + }) + return c, nil + }) +} + +type tooManyResultsChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + maxParams int +} + +func (c *tooManyResultsChecker) VisitFuncDecl(decl *ast.FuncDecl) { + typ := c.ctx.TypeOf(decl.Name) + sig, ok := typ.(*types.Signature) + if !ok { + return + } + + if count := sig.Results().Len(); count > c.maxParams { + c.warn(decl) + } +} + +func (c *tooManyResultsChecker) warn(n ast.Node) { + c.ctx.Warn(n, "function has more than %d results, consider to simplify the function", c.maxParams) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go b/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go new file mode 100644 index 000000000..cd2346c78 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go @@ -0,0 +1,117 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astp" +) + +func init() { + var info linter.CheckerInfo + info.Name = "truncateCmp" + info.Tags = []string{"diagnostic", "experimental"} + info.Params = linter.CheckerParams{ + "skipArchDependent": { + Value: true, + Usage: "whether to skip int/uint/uintptr types", + }, + } + info.Summary = "Detects potential truncation issues when comparing ints of different sizes" + info.Before = ` +func f(x int32, y int16) bool { + return int16(x) < y +}` + info.After = ` +func f(x int32, int16) bool { + return x < int32(y) +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &truncateCmpChecker{ctx: ctx} + c.skipArchDependent = info.Params.Bool("skipArchDependent") + return astwalk.WalkerForExpr(c), nil + }) +} + +type truncateCmpChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + skipArchDependent bool +} + +func (c *truncateCmpChecker) VisitExpr(expr ast.Expr) { + cmp := astcast.ToBinaryExpr(expr) + switch cmp.Op { + case token.LSS, token.GTR, token.LEQ, token.GEQ, token.EQL, token.NEQ: + if astp.IsBasicLit(cmp.X) || astp.IsBasicLit(cmp.Y) { + return // Don't bother about untyped consts + } + leftCast := c.isTruncCast(cmp.X) + rightCast := c.isTruncCast(cmp.Y) + switch { + case leftCast && rightCast: + return + case leftCast: + c.checkCmp(cmp.X, cmp.Y) + case rightCast: + c.checkCmp(cmp.Y, cmp.X) + } + default: + return + } +} + +func (c *truncateCmpChecker) isTruncCast(x ast.Expr) bool { + switch astcast.ToIdent(astcast.ToCallExpr(x).Fun).Name { + case "int8", "int16", "int32", "uint8", "uint16", "uint32": + return true + default: + return false + } +} + +func (c *truncateCmpChecker) checkCmp(cmpX, cmpY ast.Expr) { + // Check if we have a cast to a type that can truncate. + xcast := astcast.ToCallExpr(cmpX) + if len(xcast.Args) != 1 { + return // Just in case of the shadowed builtin + } + + x := xcast.Args[0] + y := cmpY + + // Check that both x and y are signed or unsigned int-typed. + xtyp, ok := c.ctx.TypeOf(x).Underlying().(*types.Basic) + if !ok || xtyp.Info()&types.IsInteger == 0 { + return + } + ytyp, ok := c.ctx.TypeOf(y).Underlying().(*types.Basic) + if !ok || xtyp.Info() != ytyp.Info() { + return + } + + xsize := c.ctx.SizesInfo.Sizeof(xtyp) + ysize := c.ctx.SizesInfo.Sizeof(ytyp) + if xsize <= ysize { + return + } + + if c.skipArchDependent { + switch xtyp.Kind() { + case types.Int, types.Uint, types.Uintptr: + return + } + } + + c.warn(xcast, xsize*8, ysize*8, xtyp.String()) +} + +func (c *truncateCmpChecker) warn(cause ast.Expr, xsize, ysize int64, suggest string) { + c.ctx.Warn(cause, "truncation in comparison %d->%d bit; cast the other operand to %s instead", xsize, ysize, suggest) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go new file mode 100644 index 000000000..d87657c3b --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go @@ -0,0 +1,132 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/astp" +) + +func init() { + var info linter.CheckerInfo + info.Name = "typeAssertChain" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects repeated type assertions and suggests to replace them with type switch statement" + info.Before = ` +if x, ok := v.(T1); ok { + // Code A, uses x. +} else if x, ok := v.(T2); ok { + // Code B, uses x. +} else if x, ok := v.(T3); ok { + // Code C, uses x. +}` + info.After = ` +switch x := v.(T1) { +case cond1: + // Code A, uses x. +case cond2: + // Code B, uses x. +default: + // Code C, uses x. +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&typeAssertChainChecker{ctx: ctx}), nil + }) +} + +type typeAssertChainChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + cause *ast.IfStmt + visited map[*ast.IfStmt]bool + typeSet lintutil.AstSet +} + +func (c *typeAssertChainChecker) EnterFunc(fn *ast.FuncDecl) bool { + if fn.Body == nil { + return false + } + c.visited = make(map[*ast.IfStmt]bool) + return true +} + +func (c *typeAssertChainChecker) VisitStmt(stmt ast.Stmt) { + ifstmt, ok := stmt.(*ast.IfStmt) + if !ok || c.visited[ifstmt] || ifstmt.Init == nil { + return + } + assertion := c.getTypeAssert(ifstmt) + if assertion == nil { + return + } + c.cause = ifstmt + c.checkIfStmt(ifstmt, assertion) +} + +func (c *typeAssertChainChecker) getTypeAssert(ifstmt *ast.IfStmt) *ast.TypeAssertExpr { + assign := astcast.ToAssignStmt(ifstmt.Init) + if len(assign.Lhs) != 2 || len(assign.Rhs) != 1 { + return nil + } + if !astp.IsIdent(assign.Lhs[0]) || assign.Tok != token.DEFINE { + return nil + } + if !astequal.Expr(assign.Lhs[1], ifstmt.Cond) { + return nil + } + + assertion, ok := assign.Rhs[0].(*ast.TypeAssertExpr) + if !ok { + return nil + } + return assertion +} + +func (c *typeAssertChainChecker) checkIfStmt(stmt *ast.IfStmt, assertion *ast.TypeAssertExpr) { + if c.countTypeAssertions(stmt, assertion) >= 2 { + c.warn() + } +} + +func (c *typeAssertChainChecker) countTypeAssertions(stmt *ast.IfStmt, assertion *ast.TypeAssertExpr) int { + c.typeSet.Clear() + + count := 1 + x := assertion.X + c.typeSet.Insert(assertion.Type) + for { + e, ok := stmt.Else.(*ast.IfStmt) + if !ok { + return count + } + assertion = c.getTypeAssert(e) + if assertion == nil { + return count + } + if !c.typeSet.Insert(assertion.Type) { + // Asserted type is duplicated. + // Type switch does not permit duplicate cases, + // so give up. + return 0 + } + if !astequal.Expr(x, assertion.X) { + // Mixed type asserting chain. + // Can't be easily translated to a type switch. + return 0 + } + stmt = e + count++ + c.visited[e] = true + } +} + +func (c *typeAssertChainChecker) warn() { + c.ctx.Warn(c.cause, "rewrite if-else to type switch statement") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go new file mode 100644 index 000000000..491e71dfd --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go @@ -0,0 +1,88 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "typeDefFirst" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects method declarations preceding the type definition itself" + info.Before = ` +func (r rec) Method() {} +type rec struct{} +` + info.After = ` +type rec struct{} +func (r rec) Method() {} +` + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return &typeDefFirstChecker{ + ctx: ctx, + }, nil + }) +} + +type typeDefFirstChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + trackedTypes map[string]bool +} + +func (c *typeDefFirstChecker) WalkFile(f *ast.File) { + if len(f.Decls) == 0 { + return + } + + c.trackedTypes = make(map[string]bool) + for _, decl := range f.Decls { + c.walkDecl(decl) + } +} + +func (c *typeDefFirstChecker) walkDecl(decl ast.Decl) { + switch decl := decl.(type) { + case *ast.FuncDecl: + if decl.Recv == nil { + return + } + receiver := decl.Recv.List[0] + typeName := c.receiverType(receiver.Type) + c.trackedTypes[typeName] = true + + case *ast.GenDecl: + if decl.Tok != token.TYPE { + return + } + for _, spec := range decl.Specs { + spec, ok := spec.(*ast.TypeSpec) + if !ok { + return + } + typeName := spec.Name.Name + if val, ok := c.trackedTypes[typeName]; ok && val { + c.warn(decl, typeName) + } + } + } +} + +func (c *typeDefFirstChecker) receiverType(e ast.Expr) string { + switch e := e.(type) { + case *ast.StarExpr: + return c.receiverType(e.X) + case *ast.Ident: + return e.Name + default: + panic("unreachable") + } +} + +func (c *typeDefFirstChecker) warn(cause ast.Node, typeName string) { + c.ctx.Warn(cause, "definition of type '%s' should appear before its methods", typeName) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go new file mode 100644 index 000000000..6bbec5037 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go @@ -0,0 +1,97 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/astp" +) + +func init() { + var info linter.CheckerInfo + info.Name = "typeSwitchVar" + info.Tags = []string{"style"} + info.Summary = "Detects type switches that can benefit from type guard clause with variable" + info.Before = ` +switch v.(type) { +case int: + return v.(int) +case point: + return v.(point).x + v.(point).y +default: + return 0 +}` + info.After = ` +switch v := v.(type) { +case int: + return v +case point: + return v.x + v.y +default: + return 0 +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&typeSwitchVarChecker{ctx: ctx}), nil + }) +} + +type typeSwitchVarChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + count int +} + +func (c *typeSwitchVarChecker) VisitStmt(stmt ast.Stmt) { + if stmt, ok := stmt.(*ast.TypeSwitchStmt); ok { + c.count = 0 + c.checkTypeSwitch(stmt) + } +} + +func (c *typeSwitchVarChecker) checkTypeSwitch(root *ast.TypeSwitchStmt) { + if astp.IsAssignStmt(root.Assign) { + return // Already with type guard + } + // Must be a *ast.ExprStmt then. + expr := root.Assign.(*ast.ExprStmt).X.(*ast.TypeAssertExpr).X + object := c.ctx.TypesInfo.ObjectOf(identOf(expr)) + if object == nil { + return // Give up: can't handle shadowing without object + } + + for _, clause := range root.Body.List { + clause := clause.(*ast.CaseClause) + // Multiple types in a list mean that assert.X will have + // a type of interface{} inside clause body. + // We are looking for precise type case. + if len(clause.List) != 1 { + continue + } + // Create artificial node just for matching. + assert1 := ast.TypeAssertExpr{X: expr, Type: clause.List[0]} + for _, stmt := range clause.Body { + assert2 := lintutil.FindNode(stmt, func(x ast.Node) bool { + return astequal.Node(&assert1, x) + }) + if object == c.ctx.TypesInfo.ObjectOf(identOf(assert2)) { + c.count++ + break + } + } + } + if c.count > 0 { + c.warn(root) + } +} + +func (c *typeSwitchVarChecker) warn(n ast.Node) { + msg := "case" + if c.count > 1 { + msg = "cases" + } + c.ctx.Warn(n, "%d "+msg+" can benefit from type switch with assignment", c.count) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go new file mode 100644 index 000000000..a3f02e14c --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go @@ -0,0 +1,86 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/astp" + "golang.org/x/tools/go/ast/astutil" +) + +func init() { + var info linter.CheckerInfo + info.Name = "typeUnparen" + info.Tags = []string{"style", "opinionated"} + info.Summary = "Detects unneded parenthesis inside type expressions and suggests to remove them" + info.Before = `type foo [](func([](func())))` + info.After = `type foo []func([]func())` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForTypeExpr(&typeUnparenChecker{ctx: ctx}, ctx.TypesInfo), nil + }) +} + +type typeUnparenChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *typeUnparenChecker) VisitTypeExpr(x ast.Expr) { + switch x := x.(type) { + case *ast.ParenExpr: + switch x.X.(type) { + case *ast.StructType: + c.ctx.Warn(x, "could simplify (struct{...}) to struct{...}") + case *ast.InterfaceType: + c.ctx.Warn(x, "could simplify (interface{...}) to interface{...}") + default: + c.warn(x, c.unparenExpr(astcopy.Expr(x))) + } + default: + c.checkTypeExpr(x) + } +} + +func (c *typeUnparenChecker) checkTypeExpr(x ast.Expr) { + switch x := x.(type) { + case *ast.ArrayType: + // Arrays require extra care: we don't want to unparen + // length expression as they are not type expressions. + if !c.hasParens(x.Elt) { + return + } + noParens := astcopy.ArrayType(x) + noParens.Elt = c.unparenExpr(noParens.Elt) + c.warn(x, noParens) + case *ast.StructType, *ast.InterfaceType: + // Only nested fields are to be reported. + default: + if !c.hasParens(x) { + return + } + c.warn(x, c.unparenExpr(astcopy.Expr(x))) + } +} + +func (c *typeUnparenChecker) hasParens(x ast.Expr) bool { + return lintutil.ContainsNode(x, astp.IsParenExpr) +} + +func (c *typeUnparenChecker) unparenExpr(x ast.Expr) ast.Expr { + // Replace every paren expr with expression it encloses. + return astutil.Apply(x, nil, func(cur *astutil.Cursor) bool { + if paren, ok := cur.Node().(*ast.ParenExpr); ok { + cur.Replace(paren.X) + } + return true + }).(ast.Expr) +} + +func (c *typeUnparenChecker) warn(cause, noParens ast.Expr) { + c.SkipChilds = true + c.ctx.Warn(cause, "could simplify %s to %s", cause, noParens) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go b/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go new file mode 100644 index 000000000..d0426a9a5 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go @@ -0,0 +1,127 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astp" +) + +func init() { + var info linter.CheckerInfo + info.Name = "underef" + info.Tags = []string{"style"} + info.Params = linter.CheckerParams{ + "skipRecvDeref": { + Value: true, + Usage: "whether to skip (*x).method() calls where x is a pointer receiver", + }, + } + info.Summary = "Detects dereference expressions that can be omitted" + info.Before = ` +(*k).field = 5 +v := (*a)[5] // only if a is array` + info.After = ` +k.field = 5 +v := a[5]` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &underefChecker{ctx: ctx} + c.skipRecvDeref = info.Params.Bool("skipRecvDeref") + return astwalk.WalkerForExpr(c), nil + }) +} + +type underefChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + skipRecvDeref bool +} + +func (c *underefChecker) VisitExpr(expr ast.Expr) { + switch n := expr.(type) { + case *ast.SelectorExpr: + expr := astcast.ToParenExpr(n.X) + if c.skipRecvDeref && c.isPtrRecvMethodCall(n.Sel) { + return + } + + if expr, ok := expr.X.(*ast.StarExpr); ok { + if c.checkStarExpr(expr) { + c.warnSelect(n) + } + } + case *ast.IndexExpr: + expr := astcast.ToParenExpr(n.X) + if expr, ok := expr.X.(*ast.StarExpr); ok { + if !c.checkStarExpr(expr) { + return + } + if c.checkArray(expr) { + c.warnArray(n) + } + } + } +} + +func (c *underefChecker) isPtrRecvMethodCall(fn *ast.Ident) bool { + typ, ok := c.ctx.TypeOf(fn).(*types.Signature) + if ok && typ != nil && typ.Recv() != nil { + _, ok := typ.Recv().Type().(*types.Pointer) + return ok + } + return false +} + +func (c *underefChecker) underef(x *ast.ParenExpr) ast.Expr { + // If there is only 1 deref, can remove parenthesis, + // otherwise can remove StarExpr only. + dereferenced := x.X.(*ast.StarExpr).X + if astp.IsStarExpr(dereferenced) { + return &ast.ParenExpr{X: dereferenced} + } + return dereferenced +} + +func (c *underefChecker) warnSelect(expr *ast.SelectorExpr) { + // TODO: add () to function output. + c.ctx.Warn(expr, "could simplify %s to %s.%s", + expr, + c.underef(expr.X.(*ast.ParenExpr)), + expr.Sel.Name) +} + +func (c *underefChecker) warnArray(expr *ast.IndexExpr) { + c.ctx.Warn(expr, "could simplify %s to %s[%s]", + expr, + c.underef(expr.X.(*ast.ParenExpr)), + expr.Index) +} + +// checkStarExpr checks if ast.StarExpr could be simplified. +func (c *underefChecker) checkStarExpr(expr *ast.StarExpr) bool { + typ, ok := c.ctx.TypeOf(expr.X).Underlying().(*types.Pointer) + if !ok { + return false + } + + switch typ.Elem().Underlying().(type) { + case *types.Pointer, *types.Interface: + return false + default: + return true + } +} + +func (c *underefChecker) checkArray(expr *ast.StarExpr) bool { + typ, ok := c.ctx.TypeOf(expr.X).(*types.Pointer) + if !ok { + return false + } + _, ok = typ.Elem().(*types.Array) + return ok +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go new file mode 100644 index 000000000..fab864ec5 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go @@ -0,0 +1,170 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "unlabelStmt" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects redundant statement labels" + info.Before = ` +derp: +for x := range xs { + if x == 0 { + break derp + } +}` + info.After = ` +for x := range xs { + if x == 0 { + break + } +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmt(&unlabelStmtChecker{ctx: ctx}), nil + }) +} + +type unlabelStmtChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *unlabelStmtChecker) EnterFunc(fn *ast.FuncDecl) bool { + if fn.Body == nil { + return false + } + // TODO(quasilyte): should not do additional traversal here. + // For now, skip all functions that contain goto statement. + return !lintutil.ContainsNode(fn.Body, func(n ast.Node) bool { + br, ok := n.(*ast.BranchStmt) + return ok && br.Tok == token.GOTO + }) +} + +func (c *unlabelStmtChecker) VisitStmt(stmt ast.Stmt) { + labeled, ok := stmt.(*ast.LabeledStmt) + if !ok || !c.canBreakFrom(labeled.Stmt) { + return + } + + // We have a labeled statement from that have labeled continue/break. + // This is an invariant, since unused label is a compile-time error + // and we're currently skipping functions containing goto. + // + // Also note that Go labels are function-scoped and there + // can be no re-definitions. This means that we don't + // need to care about label shadowing or things like that. + // + // The task is to find cases where labeled branch (continue/break) + // is redundant and can be re-written, decreasing the label usages + // and potentially leading to its redundancy, + // or finding the redundant labels right away. + + name := labeled.Label.Name + + // Simplest case that can prove that label is redundant. + // + // If labeled branch is somewhere inside the statement block itself + // and none of the nested break'able statements refer to that label, + // the label can be removed. + matchUsage := func(n ast.Node) bool { + return c.canBreakFrom(n) && c.usesLabel(c.blockStmtOf(n), name) + } + if !lintutil.ContainsNode(c.blockStmtOf(labeled.Stmt), matchUsage) { + c.warnRedundant(labeled) + return + } + + // Only for loops: if last stmt in list is a loop + // that contains labeled "continue" to the outer loop label, + // it can be refactored to use "break" instead. + if c.isLoop(labeled.Stmt) { + body := c.blockStmtOf(labeled.Stmt) + if len(body.List) == 0 { + return + } + last := body.List[len(body.List)-1] + if !c.isLoop(last) { + return + } + br := lintutil.FindNode(c.blockStmtOf(last), func(n ast.Node) bool { + br, ok := n.(*ast.BranchStmt) + return ok && br.Label != nil && + br.Label.Name == name && br.Tok == token.CONTINUE + }) + if br != nil { + c.warnLabeledContinue(br, name) + } + } +} + +// isLoop reports whether n is a loop of some kind. +// In other words, it tells whether n body can contain "continue" +// associated with n. +func (c *unlabelStmtChecker) isLoop(n ast.Node) bool { + switch n.(type) { + case *ast.ForStmt, *ast.RangeStmt: + return true + default: + return false + } +} + +// canBreakFrom reports whether it is possible to "break" or "continue" from n body. +func (c *unlabelStmtChecker) canBreakFrom(n ast.Node) bool { + switch n.(type) { + case *ast.RangeStmt, *ast.ForStmt, *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: + return true + default: + return false + } +} + +// blockStmtOf returns body of specified node. +// +// TODO(quasilyte): handle other statements and see if it can be useful +// in other checkers. +func (c *unlabelStmtChecker) blockStmtOf(n ast.Node) *ast.BlockStmt { + switch n := n.(type) { + case *ast.RangeStmt: + return n.Body + case *ast.ForStmt: + return n.Body + case *ast.SwitchStmt: + return n.Body + case *ast.TypeSwitchStmt: + return n.Body + case *ast.SelectStmt: + return n.Body + + default: + return nil + } +} + +// usesLabel reports whether n contains a usage of label. +func (c *unlabelStmtChecker) usesLabel(n *ast.BlockStmt, label string) bool { + return lintutil.ContainsNode(n, func(n ast.Node) bool { + branch, ok := n.(*ast.BranchStmt) + return ok && branch.Label != nil && + branch.Label.Name == label && + (branch.Tok == token.CONTINUE || branch.Tok == token.BREAK) + }) +} + +func (c *unlabelStmtChecker) warnRedundant(cause *ast.LabeledStmt) { + c.ctx.Warn(cause, "label %s is redundant", cause.Label) +} + +func (c *unlabelStmtChecker) warnLabeledContinue(cause ast.Node, label string) { + c.ctx.Warn(cause, "change `continue %s` to `break`", label) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go new file mode 100644 index 000000000..cce995d7a --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go @@ -0,0 +1,100 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/typep" +) + +func init() { + var info linter.CheckerInfo + info.Name = "unlambda" + info.Tags = []string{"style"} + info.Summary = "Detects function literals that can be simplified" + info.Before = `func(x int) int { return fn(x) }` + info.After = `fn` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&unlambdaChecker{ctx: ctx}), nil + }) +} + +type unlambdaChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *unlambdaChecker) VisitExpr(x ast.Expr) { + fn, ok := x.(*ast.FuncLit) + if !ok || len(fn.Body.List) != 1 { + return + } + + ret, ok := fn.Body.List[0].(*ast.ReturnStmt) + if !ok || len(ret.Results) != 1 { + return + } + + result := astcast.ToCallExpr(ret.Results[0]) + callable := qualifiedName(result.Fun) + if callable == "" { + return // Skip tricky cases; only handle simple calls + } + if isBuiltin(callable) { + return // See #762 + } + hasVars := lintutil.ContainsNode(result.Fun, func(n ast.Node) bool { + id, ok := n.(*ast.Ident) + if !ok { + return false + } + obj, ok := c.ctx.TypesInfo.ObjectOf(id).(*types.Var) + if !ok { + return false + } + // Permit only non-pointer struct method values. + return !typep.IsStruct(obj.Type().Underlying()) + }) + if hasVars { + return // See #888 #1007 + } + + fnType := c.ctx.TypeOf(fn) + resultType := c.ctx.TypeOf(result.Fun) + if !types.Identical(fnType, resultType) { + return + } + // Now check that all arguments match the parameters. + n := 0 + for _, params := range fn.Type.Params.List { + if _, ok := params.Type.(*ast.Ellipsis); ok { + if result.Ellipsis == token.NoPos { + return + } + n++ + continue + } + + for _, id := range params.Names { + if !astequal.Expr(id, result.Args[n]) { + return + } + n++ + } + } + + if len(result.Args) == n { + c.warn(fn, callable) + } +} + +func (c *unlambdaChecker) warn(cause ast.Node, suggestion string) { + c.ctx.Warn(cause, "replace `%s` with `%s`", cause, suggestion) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go new file mode 100644 index 000000000..3149d9e87 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go @@ -0,0 +1,103 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "unnamedResult" + info.Tags = []string{"style", "opinionated", "experimental"} + info.Params = linter.CheckerParams{ + "checkExported": { + Value: false, + Usage: "whether to check exported functions", + }, + } + info.Summary = "Detects unnamed results that may benefit from names" + info.Before = `func f() (float64, float64)` + info.After = `func f() (x, y float64)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + c := &unnamedResultChecker{ctx: ctx} + c.checkExported = info.Params.Bool("checkExported") + return astwalk.WalkerForFuncDecl(c), nil + }) +} + +type unnamedResultChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + checkExported bool +} + +func (c *unnamedResultChecker) VisitFuncDecl(decl *ast.FuncDecl) { + if c.checkExported && !ast.IsExported(decl.Name.Name) { + return + } + results := decl.Type.Results + switch { + case results == nil: + return // Function has no results + case len(results.List) != 0 && results.List[0].Names != nil: + return // Skip named results + } + + typeName := func(x ast.Expr) string { return c.typeName(c.ctx.TypeOf(x)) } + isError := func(x ast.Expr) bool { return qualifiedName(x) == "error" } + isBool := func(x ast.Expr) bool { return qualifiedName(x) == "bool" } + + // Main difference with case of len=2 is that we permit any + // typ1 as long as second type is either error or bool. + if results.NumFields() == 2 { + typ1, typ2 := results.List[0].Type, results.List[1].Type + name1, name2 := typeName(typ1), typeName(typ2) + cond := (name1 != name2 && name2 != "") || + (!isError(typ1) && isError(typ2)) || + (!isBool(typ1) && isBool(typ2)) + if !cond { + c.warn(decl) + } + return + } + + seen := make(map[string]bool, len(results.List)) + for i := range results.List { + typ := results.List[i].Type + name := typeName(typ) + isLast := i == len(results.List)-1 + + cond := !seen[name] || + (isLast && (isError(typ) || isBool(typ))) + if !cond { + c.warn(decl) + return + } + + seen[name] = true + } +} + +func (c *unnamedResultChecker) typeName(typ types.Type) string { + switch typ := typ.(type) { + case *types.Array: + return c.typeName(typ.Elem()) + case *types.Pointer: + return c.typeName(typ.Elem()) + case *types.Slice: + return c.typeName(typ.Elem()) + case *types.Named: + return typ.Obj().Name() + default: + return "" + } +} + +func (c *unnamedResultChecker) warn(n ast.Node) { + c.ctx.Warn(n, "consider giving a name to these results") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go new file mode 100644 index 000000000..72807ddbf --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go @@ -0,0 +1,69 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + var info linter.CheckerInfo + info.Name = "unnecessaryBlock" + info.Tags = []string{"style", "opinionated", "experimental"} + info.Summary = "Detects unnecessary braced statement blocks" + info.Before = ` +x := 1 +{ + print(x) +}` + info.After = ` +x := 1 +print(x)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmtList(&unnecessaryBlockChecker{ctx: ctx}), nil + }) +} + +type unnecessaryBlockChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *unnecessaryBlockChecker) VisitStmtList(statements []ast.Stmt) { + // Using StmtListVisitor instead of StmtVisitor makes it easier to avoid + // false positives on IfStmt, RangeStmt, ForStmt and alike. + // We only inspect BlockStmt inside statement lists, so this method is not + // called for IfStmt itself, for example. + + for _, stmt := range statements { + stmt, ok := stmt.(*ast.BlockStmt) + if ok && !c.hasDefinitions(stmt) { + c.warn(stmt) + } + } +} + +func (c *unnecessaryBlockChecker) hasDefinitions(stmt *ast.BlockStmt) bool { + for _, bs := range stmt.List { + switch stmt := bs.(type) { + case *ast.AssignStmt: + if stmt.Tok == token.DEFINE { + return true + } + case *ast.DeclStmt: + decl := stmt.Decl.(*ast.GenDecl) + if len(decl.Specs) != 0 { + return true + } + } + } + + return false +} + +func (c *unnecessaryBlockChecker) warn(expr ast.Stmt) { + c.ctx.Warn(expr, "block doesn't have definitions, can be simply deleted") +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go new file mode 100644 index 000000000..ef72142a1 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go @@ -0,0 +1,111 @@ +package checkers + +import ( + "go/ast" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astfmt" +) + +func init() { + var info linter.CheckerInfo + info.Name = "unnecessaryDefer" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects redundantly deferred calls" + info.Before = ` +func() { + defer os.Remove(filename) +}` + info.After = ` +func() { + os.Remove(filename) +}` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForFuncDecl(&unnecessaryDeferChecker{ctx: ctx}), nil + }) +} + +type unnecessaryDeferChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + isFunc bool +} + +// Visit implements the ast.Visitor. This visitor keeps track of the block +// statement belongs to a function or any other block. If the block is not a +// function and ends with a defer statement that should be OK since it's +// defering the outer function. +func (c *unnecessaryDeferChecker) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl, *ast.FuncLit: + c.isFunc = true + case *ast.BlockStmt: + c.checkDeferBeforeReturn(n) + default: + c.isFunc = false + } + + return c +} + +func (c *unnecessaryDeferChecker) VisitFuncDecl(funcDecl *ast.FuncDecl) { + // We always start as a function (*ast.FuncDecl.Body passed) + c.isFunc = true + + ast.Walk(c, funcDecl.Body) +} + +func (c *unnecessaryDeferChecker) checkDeferBeforeReturn(funcDecl *ast.BlockStmt) { + // Check if we have an explicit return or if it's just the end of the scope. + explicitReturn := false + retIndex := len(funcDecl.List) + for i, stmt := range funcDecl.List { + retStmt, ok := stmt.(*ast.ReturnStmt) + if !ok { + continue + } + explicitReturn = true + if !c.isTrivialReturn(retStmt) { + continue + } + retIndex = i + break + } + if retIndex == 0 { + return + } + + if deferStmt, ok := funcDecl.List[retIndex-1].(*ast.DeferStmt); ok { + // If the block is a function and ending with return or if we have an + // explicit return in any other block we should warn about + // unnecessary defer. + if c.isFunc || explicitReturn { + c.warn(deferStmt) + } + } +} + +func (c *unnecessaryDeferChecker) isTrivialReturn(ret *ast.ReturnStmt) bool { + for _, e := range ret.Results { + if !c.isConstExpr(e) { + return false + } + } + return true +} + +func (c *unnecessaryDeferChecker) isConstExpr(e ast.Expr) bool { + return c.ctx.TypesInfo.Types[e].Value != nil +} + +func (c *unnecessaryDeferChecker) warn(deferStmt *ast.DeferStmt) { + s := astfmt.Sprint(deferStmt) + if fnlit, ok := deferStmt.Call.Fun.(*ast.FuncLit); ok { + // To avoid long and multi-line warning messages, + // collapse the function literals. + s = "defer " + astfmt.Sprint(fnlit.Type) + "{...}(...)" + } + c.ctx.Warn(deferStmt, "%s is placed just before return", s) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/unslice_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unslice_checker.go new file mode 100644 index 000000000..26a4de061 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/unslice_checker.go @@ -0,0 +1,59 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astequal" +) + +func init() { + var info linter.CheckerInfo + info.Name = "unslice" + info.Tags = []string{"style"} + info.Summary = "Detects slice expressions that can be simplified to sliced expression itself" + info.Before = ` +f(s[:]) // s is string +copy(b[:], values...) // b is []byte` + info.After = ` +f(s) +copy(b, values...)` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&unsliceChecker{ctx: ctx}), nil + }) +} + +type unsliceChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *unsliceChecker) VisitExpr(expr ast.Expr) { + unsliced := c.unslice(expr) + if !astequal.Expr(expr, unsliced) { + c.warn(expr, unsliced) + c.SkipChilds = true + } +} + +func (c *unsliceChecker) unslice(expr ast.Expr) ast.Expr { + slice, ok := expr.(*ast.SliceExpr) + if !ok || slice.Low != nil || slice.High != nil { + // No need to worry about 3-index slicing, + // because it's only permitted if expr.High is not nil. + return expr + } + switch c.ctx.TypeOf(slice.X).(type) { + case *types.Slice, *types.Basic: + // Basic kind catches strings, Slice cathes everything else. + return c.unslice(slice.X) + } + return expr +} + +func (c *unsliceChecker) warn(cause, unsliced ast.Expr) { + c.ctx.Warn(cause, "could simplify %s to %s", cause, unsliced) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/utils.go b/vendor/github.com/go-critic/go-critic/checkers/utils.go new file mode 100644 index 000000000..b71f24d74 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/utils.go @@ -0,0 +1,309 @@ +package checkers + +import ( + "go/ast" + "go/types" + "strings" + + "github.com/go-critic/go-critic/framework/linter" +) + +// goStdlib contains `go list std` command output list. +// Used to detect packages that belong to standard Go packages distribution. +var goStdlib = map[string]bool{ + "archive/tar": true, + "archive/zip": true, + "bufio": true, + "bytes": true, + "compress/bzip2": true, + "compress/flate": true, + "compress/gzip": true, + "compress/lzw": true, + "compress/zlib": true, + "container/heap": true, + "container/list": true, + "container/ring": true, + "context": true, + "crypto": true, + "crypto/aes": true, + "crypto/cipher": true, + "crypto/des": true, + "crypto/dsa": true, + "crypto/ecdsa": true, + "crypto/elliptic": true, + "crypto/hmac": true, + "crypto/internal/randutil": true, + "crypto/internal/subtle": true, + "crypto/md5": true, + "crypto/rand": true, + "crypto/rc4": true, + "crypto/rsa": true, + "crypto/sha1": true, + "crypto/sha256": true, + "crypto/sha512": true, + "crypto/subtle": true, + "crypto/tls": true, + "crypto/x509": true, + "crypto/x509/pkix": true, + "database/sql": true, + "database/sql/driver": true, + "debug/dwarf": true, + "debug/elf": true, + "debug/gosym": true, + "debug/macho": true, + "debug/pe": true, + "debug/plan9obj": true, + "encoding": true, + "encoding/ascii85": true, + "encoding/asn1": true, + "encoding/base32": true, + "encoding/base64": true, + "encoding/binary": true, + "encoding/csv": true, + "encoding/gob": true, + "encoding/hex": true, + "encoding/json": true, + "encoding/pem": true, + "encoding/xml": true, + "errors": true, + "expvar": true, + "flag": true, + "fmt": true, + "go/ast": true, + "go/build": true, + "go/constant": true, + "go/doc": true, + "go/format": true, + "go/importer": true, + "go/internal/gccgoimporter": true, + "go/internal/gcimporter": true, + "go/internal/srcimporter": true, + "go/parser": true, + "go/printer": true, + "go/scanner": true, + "go/token": true, + "go/types": true, + "hash": true, + "hash/adler32": true, + "hash/crc32": true, + "hash/crc64": true, + "hash/fnv": true, + "html": true, + "html/template": true, + "image": true, + "image/color": true, + "image/color/palette": true, + "image/draw": true, + "image/gif": true, + "image/internal/imageutil": true, + "image/jpeg": true, + "image/png": true, + "index/suffixarray": true, + "internal/bytealg": true, + "internal/cpu": true, + "internal/nettrace": true, + "internal/poll": true, + "internal/race": true, + "internal/singleflight": true, + "internal/syscall/unix": true, + "internal/syscall/windows": true, + "internal/syscall/windows/registry": true, + "internal/syscall/windows/sysdll": true, + "internal/testenv": true, + "internal/testlog": true, + "internal/trace": true, + "io": true, + "io/ioutil": true, + "log": true, + "log/syslog": true, + "math": true, + "math/big": true, + "math/bits": true, + "math/cmplx": true, + "math/rand": true, + "mime": true, + "mime/multipart": true, + "mime/quotedprintable": true, + "net": true, + "net/http": true, + "net/http/cgi": true, + "net/http/cookiejar": true, + "net/http/fcgi": true, + "net/http/httptest": true, + "net/http/httptrace": true, + "net/http/httputil": true, + "net/http/internal": true, + "net/http/pprof": true, + "net/internal/socktest": true, + "net/mail": true, + "net/rpc": true, + "net/rpc/jsonrpc": true, + "net/smtp": true, + "net/textproto": true, + "net/url": true, + "os": true, + "os/exec": true, + "os/signal": true, + "os/signal/internal/pty": true, + "os/user": true, + "path": true, + "path/filepath": true, + "plugin": true, + "reflect": true, + "regexp": true, + "regexp/syntax": true, + "runtime": true, + "runtime/cgo": true, + "runtime/debug": true, + "runtime/internal/atomic": true, + "runtime/internal/sys": true, + "runtime/pprof": true, + "runtime/pprof/internal/profile": true, + "runtime/race": true, + "runtime/trace": true, + "sort": true, + "strconv": true, + "strings": true, + "sync": true, + "sync/atomic": true, + "syscall": true, + "testing": true, + "testing/internal/testdeps": true, + "testing/iotest": true, + "testing/quick": true, + "text/scanner": true, + "text/tabwriter": true, + "text/template": true, + "text/template/parse": true, + "time": true, + "unicode": true, + "unicode/utf16": true, + "unicode/utf8": true, + "unsafe": true, +} + +var goBuiltins = map[string]bool{ + // Types + "bool": true, + "byte": true, + "complex64": true, + "complex128": true, + "error": true, + "float32": true, + "float64": true, + "int": true, + "int8": true, + "int16": true, + "int32": true, + "int64": true, + "rune": true, + "string": true, + "uint": true, + "uint8": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uintptr": true, + + // Constants + "true": true, + "false": true, + "iota": true, + + // Zero value + "nil": true, + + // Functions + "append": true, + "cap": true, + "close": true, + "complex": true, + "copy": true, + "delete": true, + "imag": true, + "len": true, + "make": true, + "new": true, + "panic": true, + "print": true, + "println": true, + "real": true, + "recover": true, +} + +// isBuiltin reports whether sym belongs to a predefined identifier set. +func isBuiltin(sym string) bool { + return goBuiltins[sym] +} + +// isStdlibPkg reports whether pkg is a package from the Go standard library. +func isStdlibPkg(pkg *types.Package) bool { + return pkg != nil && goStdlib[pkg.Path()] +} + +// isExampleTestFunc reports whether FuncDecl looks like a testable example function. +func isExampleTestFunc(fn *ast.FuncDecl) bool { + return len(fn.Type.Params.List) == 0 && strings.HasPrefix(fn.Name.String(), "Example") +} + +// isUnitTestFunc reports whether FuncDecl declares testing function. +func isUnitTestFunc(ctx *linter.CheckerContext, fn *ast.FuncDecl) bool { + if !strings.HasPrefix(fn.Name.Name, "Test") { + return false + } + typ := ctx.TypesInfo.TypeOf(fn.Name) + if sig, ok := typ.(*types.Signature); ok { + return sig.Results().Len() == 0 && + sig.Params().Len() == 1 && + sig.Params().At(0).Type().String() == "*testing.T" + } + return false +} + +// qualifiedName returns called expr fully-quallified name. +// +// It works for simple identifiers like f => "f" and identifiers +// from other package like pkg.f => "pkg.f". +// +// For all unexpected expressions returns empty string. +func qualifiedName(x ast.Expr) string { + switch x := x.(type) { + case *ast.SelectorExpr: + pkg, ok := x.X.(*ast.Ident) + if !ok { + return "" + } + return pkg.Name + "." + x.Sel.Name + case *ast.Ident: + return x.Name + default: + return "" + } +} + +// identOf returns identifier for x that can be used to obtain associated types.Object. +// Returns nil for expressions that yield temporary results, like `f().field`. +func identOf(x ast.Node) *ast.Ident { + switch x := x.(type) { + case *ast.Ident: + return x + case *ast.SelectorExpr: + return identOf(x.Sel) + case *ast.TypeAssertExpr: + // x.(type) - x may contain ident. + return identOf(x.X) + case *ast.IndexExpr: + // x[i] - x may contain ident. + return identOf(x.X) + case *ast.StarExpr: + // *x - x may contain ident. + return identOf(x.X) + case *ast.SliceExpr: + // x[:] - x may contain ident. + return identOf(x.X) + + default: + // Note that this function is not comprehensive. + return nil + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/valSwap_checker.go b/vendor/github.com/go-critic/go-critic/checkers/valSwap_checker.go new file mode 100644 index 000000000..d03e11223 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/valSwap_checker.go @@ -0,0 +1,64 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astequal" +) + +func init() { + var info linter.CheckerInfo + info.Name = "valSwap" + info.Tags = []string{"style"} + info.Summary = "Detects value swapping code that are not using parallel assignment" + info.Before = ` +tmp := *x +*x = *y +*y = tmp` + info.After = `*x, *y = *y, *x` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForStmtList(&valSwapChecker{ctx: ctx}), nil + }) +} + +type valSwapChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *valSwapChecker) VisitStmtList(list []ast.Stmt) { + for len(list) >= 3 { + tmpAssign := astcast.ToAssignStmt(list[0]) + assignX := astcast.ToAssignStmt(list[1]) + assignY := astcast.ToAssignStmt(list[2]) + + cond := c.isSimpleAssign(tmpAssign) && + c.isSimpleAssign(assignX) && + c.isSimpleAssign(assignY) && + assignX.Tok == token.ASSIGN && + assignY.Tok == token.ASSIGN && + astequal.Expr(assignX.Lhs[0], tmpAssign.Rhs[0]) && + astequal.Expr(assignX.Rhs[0], assignY.Lhs[0]) && + astequal.Expr(assignY.Rhs[0], tmpAssign.Lhs[0]) + if cond { + c.warn(tmpAssign, assignX.Lhs[0], assignY.Lhs[0]) + list = list[3:] + } else { + list = list[1:] + } + } +} + +func (c *valSwapChecker) isSimpleAssign(x *ast.AssignStmt) bool { + return len(x.Lhs) == 1 && len(x.Rhs) == 1 +} + +func (c *valSwapChecker) warn(cause, x, y ast.Node) { + c.ctx.Warn(cause, "can re-write as `%s, %s = %s, %s`", + x, y, y, x) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go b/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go new file mode 100644 index 000000000..831857c41 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go @@ -0,0 +1,77 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/typep" + "golang.org/x/tools/go/ast/astutil" +) + +func init() { + var info linter.CheckerInfo + info.Name = "weakCond" + info.Tags = []string{"diagnostic", "experimental"} + info.Summary = "Detects conditions that are unsafe due to not being exhaustive" + info.Before = `xs != nil && xs[0] != nil` + info.After = `len(xs) != 0 && xs[0] != nil` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForExpr(&weakCondChecker{ctx: ctx}), nil + }) +} + +type weakCondChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *weakCondChecker) VisitExpr(expr ast.Expr) { + // TODO(Quasilyte): more patterns. + // TODO(Quasilyte): analyze and fix false positives. + + cond := astcast.ToBinaryExpr(expr) + lhs := astcast.ToBinaryExpr(astutil.Unparen(cond.X)) + rhs := astutil.Unparen(cond.Y) + + // Pattern 1. + // `x != nil && usageOf(x[i])` + // Pattern 2. + // `x == nil || usageOf(x[i])` + + // lhs is `x nil` + x := lhs.X + if !typep.IsSlice(c.ctx.TypeOf(x)) { + return + } + if astcast.ToIdent(lhs.Y).Name != "nil" { + return + } + + pat1prefix := cond.Op == token.LAND && lhs.Op == token.NEQ + pat2prefix := cond.Op == token.LOR && lhs.Op == token.EQL + if !pat1prefix && !pat2prefix { + return + } + + if c.isIndexed(rhs, x) { + c.warn(expr, "nil check may not be enough, check for len") + } +} + +// isIndexed reports whether x is indexed inside given expr tree. +func (c *weakCondChecker) isIndexed(tree, x ast.Expr) bool { + return lintutil.ContainsNode(tree, func(n ast.Node) bool { + indexing := astcast.ToIndexExpr(n) + return astequal.Expr(x, indexing.X) + }) +} + +func (c *weakCondChecker) warn(cause ast.Node, suggest string) { + c.ctx.Warn(cause, "suspicious `%s`; %s", cause, suggest) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go b/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go new file mode 100644 index 000000000..260039f2b --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go @@ -0,0 +1,52 @@ +package checkers + +import ( + "go/ast" + "regexp" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" +) + +func init() { + info := linter.CheckerInfo{ + Name: "whyNoLint", + Tags: []string{"style", "experimental"}, + Summary: "Ensures that `//nolint` comments include an explanation", + Before: `//nolint`, + After: `//nolint // reason`, + } + re := regexp.MustCompile(`^// *nolint(?::[^ ]+)? *(.*)$`) + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForComment(&whyNoLintChecker{ + ctx: ctx, + re: re, + }), nil + }) +} + +type whyNoLintChecker struct { + astwalk.WalkHandler + + ctx *linter.CheckerContext + re *regexp.Regexp +} + +func (c whyNoLintChecker) VisitComment(cg *ast.CommentGroup) { + if strings.HasPrefix(cg.List[0].Text, "/*") { + return + } + for _, comment := range cg.List { + sl := c.re.FindStringSubmatch(comment.Text) + if len(sl) < 2 { + continue + } + + if s := sl[1]; !strings.HasPrefix(s, "//") || strings.TrimPrefix(s, "//") == "" { + c.ctx.Warn(cg, "include an explanation for nolint directive") + return + } + } +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/wrapperFunc_checker.go b/vendor/github.com/go-critic/go-critic/checkers/wrapperFunc_checker.go new file mode 100644 index 000000000..d474989d0 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/wrapperFunc_checker.go @@ -0,0 +1,229 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + "strings" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcast" +) + +func init() { + var info linter.CheckerInfo + info.Name = "wrapperFunc" + info.Tags = []string{"style"} + info.Summary = "Detects function calls that can be replaced with convenience wrappers" + info.Before = `wg.Add(-1)` + info.After = `wg.Done()` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + type arg struct { + index int + value string + } + type pattern struct { + pkg string + typ string // Only for typ patterns + args []arg + suggestion string + } + type matcher struct { + pkgPatterns []pattern + typPatterns []pattern + } + + typPatterns := map[string][]arg{ + "sync.WaitGroup.Add => WaitGroup.Done": { + {0, "-1"}, + }, + + "bytes.Buffer.Truncate => Buffer.Reset": { + {0, "0"}, + }, + } + + pkgPatterns := map[string][]arg{ + "http.HandlerFunc => http.NotFoundHandler": { + {0, "http.NotFound"}, + }, + + "strings.SplitN => strings.Split": { + {2, "-1"}, + }, + "strings.Replace => strings.ReplaceAll": { + {3, "-1"}, + }, + "strings.TrimFunc => strings.TrimSpace": { + {1, "unicode.IsSpace"}, + }, + "strings.Map => strings.ToTitle": { + {0, "unicode.ToTitle"}, + }, + + "bytes.SplitN => bytes.Split": { + {2, "-1"}, + }, + "bytes.Replace => bytes.ReplaceAll": { + {3, "-1"}, + }, + "bytes.TrimFunc => bytes.TrimSpace": { + {1, "unicode.IsSpace"}, + }, + "bytes.Map => bytes.ToUpper": { + {0, "unicode.ToUpper"}, + }, + "bytes.Map => bytes.ToLower": { + {0, "unicode.ToLower"}, + }, + "bytes.Map => bytes.ToTitle": { + {0, "unicode.ToTitle"}, + }, + + "draw.DrawMask => draw.Draw": { + {4, "nil"}, + {5, "image.Point{}"}, + }, + } + + matchers := make(map[string]*matcher) + + type templateKey struct { + from string + to string + } + decodeKey := func(key string) templateKey { + parts := strings.Split(key, " => ") + return templateKey{from: parts[0], to: parts[1]} + } + + // Expand pkg patterns. + for key, args := range pkgPatterns { + key := decodeKey(key) + parts := strings.Split(key.from, ".") + fn := parts[1] + m := matchers[fn] + if m == nil { + m = &matcher{} + matchers[fn] = m + } + m.pkgPatterns = append(m.pkgPatterns, pattern{ + pkg: parts[0], + args: args, + suggestion: key.to, + }) + } + // Expand typ patterns. + for key, args := range typPatterns { + key := decodeKey(key) + parts := strings.Split(key.from, ".") + fn := parts[2] + m := matchers[fn] + if m == nil { + m = &matcher{} + matchers[fn] = m + } + m.typPatterns = append(m.typPatterns, pattern{ + pkg: parts[0], + typ: parts[1], + args: args, + suggestion: key.to, + }) + } + + var valueOf func(x ast.Expr) string + valueOf = func(x ast.Expr) string { + switch x := x.(type) { + case *ast.Ident: + return x.Name + case *ast.SelectorExpr: + id, ok := x.X.(*ast.Ident) + if ok { + return id.Name + "." + x.Sel.Name + } + case *ast.BasicLit: + return x.Value + case *ast.UnaryExpr: + switch x.Op { + case token.SUB: + return "-" + valueOf(x.X) + case token.ADD: + return valueOf(x.X) + } + } + return "" + } + + findSuggestion := func(call *ast.CallExpr, pkg, typ string, patterns []pattern) string { + for _, pat := range patterns { + if pat.pkg != pkg || pat.typ != typ { + continue + } + for _, arg := range pat.args { + if arg.value == valueOf(call.Args[arg.index]) { + return pat.suggestion + } + } + } + return "" + } + + c := &wrapperFuncChecker{ctx: ctx} + c.findSuggestion = func(call *ast.CallExpr) string { + sel := astcast.ToSelectorExpr(call.Fun).Sel + if sel == nil { + return "" + } + x := astcast.ToSelectorExpr(call.Fun).X + + m := matchers[sel.Name] + if m == nil { + return "" + } + + if x, ok := x.(*ast.Ident); ok { + obj, ok := c.ctx.TypesInfo.ObjectOf(x).(*types.PkgName) + if ok { + return findSuggestion(call, obj.Name(), "", m.pkgPatterns) + } + } + + typ := c.ctx.TypeOf(x) + tn, ok := typ.(*types.Named) + if !ok { + return "" + } + return findSuggestion( + call, + tn.Obj().Pkg().Name(), + tn.Obj().Name(), + m.typPatterns) + } + + return astwalk.WalkerForExpr(c), nil + }) +} + +type wrapperFuncChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext + + findSuggestion func(*ast.CallExpr) string +} + +func (c *wrapperFuncChecker) VisitExpr(expr ast.Expr) { + call := astcast.ToCallExpr(expr) + if len(call.Args) == 0 { + return + } + + if suggest := c.findSuggestion(call); suggest != "" { + c.warn(call, suggest) + } +} + +func (c *wrapperFuncChecker) warn(cause ast.Node, suggest string) { + c.ctx.Warn(cause, "use %s method in `%s`", suggest, cause) +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/yodaStyleExpr_checker.go b/vendor/github.com/go-critic/go-critic/checkers/yodaStyleExpr_checker.go new file mode 100644 index 000000000..c533d143b --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/checkers/yodaStyleExpr_checker.go @@ -0,0 +1,66 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/framework/linter" + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/astp" +) + +func init() { + var info linter.CheckerInfo + info.Name = "yodaStyleExpr" + info.Tags = []string{"style", "experimental"} + info.Summary = "Detects Yoda style expressions and suggests to replace them" + info.Before = `return nil != ptr` + info.After = `return ptr != nil` + + collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { + return astwalk.WalkerForLocalExpr(&yodaStyleExprChecker{ctx: ctx}), nil + }) +} + +type yodaStyleExprChecker struct { + astwalk.WalkHandler + ctx *linter.CheckerContext +} + +func (c *yodaStyleExprChecker) VisitLocalExpr(expr ast.Expr) { + binexpr, ok := expr.(*ast.BinaryExpr) + if !ok { + return + } + switch binexpr.Op { + case token.EQL, token.NEQ, token.LSS, token.LEQ, token.GEQ, token.GTR: + if c.isConstExpr(binexpr.X) && !c.isConstExpr(binexpr.Y) { + c.warn(binexpr) + } + } +} + +func (c *yodaStyleExprChecker) isConstExpr(expr ast.Expr) bool { + return qualifiedName(expr) == "nil" || astp.IsBasicLit(expr) +} + +func (c *yodaStyleExprChecker) invert(expr *ast.BinaryExpr) { + expr.X, expr.Y = expr.Y, expr.X + switch expr.Op { + case token.LSS: + expr.Op = token.GEQ + case token.LEQ: + expr.Op = token.GTR + case token.GEQ: + expr.Op = token.LSS + case token.GTR: + expr.Op = token.LEQ + } +} + +func (c *yodaStyleExprChecker) warn(expr *ast.BinaryExpr) { + e := astcopy.BinaryExpr(expr) + c.invert(e) + c.ctx.Warn(expr, "consider to change order in expression to %s", e) +} diff --git a/vendor/github.com/go-critic/go-critic/framework/linter/checkers_db.go b/vendor/github.com/go-critic/go-critic/framework/linter/checkers_db.go new file mode 100644 index 000000000..0a3fc0292 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/framework/linter/checkers_db.go @@ -0,0 +1,136 @@ +package linter + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "github.com/go-toolsmith/astfmt" +) + +type checkerProto struct { + info *CheckerInfo + constructor func(*Context) (*Checker, error) +} + +// prototypes is a set of registered checkers that are not yet instantiated. +// Registration should be done with AddChecker function. +// Initialized checkers can be obtained with NewChecker function. +var prototypes = make(map[string]checkerProto) + +func getCheckersInfo() []*CheckerInfo { + infoList := make([]*CheckerInfo, 0, len(prototypes)) + for _, proto := range prototypes { + infoCopy := *proto.info + infoList = append(infoList, &infoCopy) + } + sort.Slice(infoList, func(i, j int) bool { + return infoList[i].Name < infoList[j].Name + }) + return infoList +} + +func addChecker(info *CheckerInfo, constructor func(*CheckerContext) (FileWalker, error)) { + if _, ok := prototypes[info.Name]; ok { + panic(fmt.Sprintf("checker with name %q already registered", info.Name)) + } + + // Validate param value type. + for pname, param := range info.Params { + switch param.Value.(type) { + case string, int, bool: + // OK. + default: + panic(fmt.Sprintf("unsupported %q param type value: %T", + pname, param.Value)) + } + } + + trimDocumentation := func(info *CheckerInfo) { + fields := []*string{ + &info.Summary, + &info.Details, + &info.Before, + &info.After, + &info.Note, + } + for _, f := range fields { + *f = strings.TrimSpace(*f) + } + } + + trimDocumentation(info) + + if err := validateCheckerInfo(info); err != nil { + panic(err) + } + + proto := checkerProto{ + info: info, + constructor: func(ctx *Context) (*Checker, error) { + var c Checker + c.Info = info + c.ctx = CheckerContext{ + Context: ctx, + printer: astfmt.NewPrinter(ctx.FileSet), + } + var err error + c.fileWalker, err = constructor(&c.ctx) + return &c, err + }, + } + + prototypes[info.Name] = proto +} + +func newChecker(ctx *Context, info *CheckerInfo) (*Checker, error) { + proto, ok := prototypes[info.Name] + if !ok { + panic(fmt.Sprintf("checker with name %q not registered", info.Name)) + } + return proto.constructor(ctx) +} + +func validateCheckerInfo(info *CheckerInfo) error { + steps := []func(*CheckerInfo) error{ + validateCheckerName, + validateCheckerDocumentation, + validateCheckerTags, + } + + for _, step := range steps { + if err := step(info); err != nil { + return fmt.Errorf("%q validation error: %v", info.Name, err) + } + } + return nil +} + +var validIdentRE = regexp.MustCompile(`^\w+$`) + +func validateCheckerName(info *CheckerInfo) error { + if !validIdentRE.MatchString(info.Name) { + return fmt.Errorf("checker name contains illegal chars") + } + return nil +} + +func validateCheckerDocumentation(info *CheckerInfo) error { + // TODO(quasilyte): validate documentation. + return nil +} + +func validateCheckerTags(info *CheckerInfo) error { + tagSet := make(map[string]bool) + for _, tag := range info.Tags { + if tagSet[tag] { + return fmt.Errorf("duplicated tag %q", tag) + } + if !validIdentRE.MatchString(tag) { + return fmt.Errorf("checker tag %q contains illegal chars", tag) + } + tagSet[tag] = true + } + return nil +} diff --git a/vendor/github.com/go-critic/go-critic/framework/linter/context.go b/vendor/github.com/go-critic/go-critic/framework/linter/context.go new file mode 100644 index 000000000..6e108ab6a --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/framework/linter/context.go @@ -0,0 +1,35 @@ +package linter + +import ( + "go/ast" + "go/types" + "strconv" +) + +func resolvePkgObjects(ctx *Context, f *ast.File) { + ctx.PkgObjects = make(map[*types.PkgName]string, len(f.Imports)) + + for _, spec := range f.Imports { + if spec.Name != nil { + obj := ctx.TypesInfo.ObjectOf(spec.Name) + ctx.PkgObjects[obj.(*types.PkgName)] = spec.Name.Name + } else { + obj := ctx.TypesInfo.Implicits[spec] + ctx.PkgObjects[obj.(*types.PkgName)] = obj.Name() + } + } +} + +func resolvePkgRenames(ctx *Context, f *ast.File) { + ctx.PkgRenames = make(map[string]string) + + for _, spec := range f.Imports { + if spec.Name != nil { + path, err := strconv.Unquote(spec.Path.Value) + if err != nil { + panic(err) + } + ctx.PkgRenames[path] = spec.Name.Name + } + } +} diff --git a/vendor/github.com/go-critic/go-critic/framework/linter/lintpack.go b/vendor/github.com/go-critic/go-critic/framework/linter/lintpack.go new file mode 100644 index 000000000..5c8662c69 --- /dev/null +++ b/vendor/github.com/go-critic/go-critic/framework/linter/lintpack.go @@ -0,0 +1,269 @@ +package linter + +import ( + "go/ast" + "go/token" + "go/types" + + "github.com/go-toolsmith/astfmt" +) + +// CheckerCollection provides additional information for a group of checkers. +type CheckerCollection struct { + // URL is a link for a main source of information on the collection. + URL string +} + +// AddChecker registers a new checker into a checkers pool. +// Constructor is used to create a new checker instance. +// Checker name (defined in CheckerInfo.Name) must be unique. +// +// CheckerInfo.Collection is automatically set to the coll (the receiver). +// +// If checker is never needed, for example if it is disabled, +// constructor will not be called. +func (coll *CheckerCollection) AddChecker(info *CheckerInfo, constructor func(*CheckerContext) (FileWalker, error)) { + if coll == nil { + panic("adding checker to a nil collection") + } + info.Collection = coll + addChecker(info, constructor) +} + +// CheckerParam describes a single checker customizable parameter. +type CheckerParam struct { + // Value holds parameter bound value. + // It might be overwritten by the integrating linter. + // + // Permitted types include: + // - int + // - bool + // - string + Value interface{} + + // Usage gives an overview about what parameter does. + Usage string +} + +// CheckerParams holds all checker-specific parameters. +// +// Provides convenient access to the loosely typed underlying map. +type CheckerParams map[string]*CheckerParam + +// Int lookups pname key in underlying map and type-asserts it to int. +func (params CheckerParams) Int(pname string) int { return params[pname].Value.(int) } + +// Bool lookups pname key in underlying map and type-asserts it to bool. +func (params CheckerParams) Bool(pname string) bool { return params[pname].Value.(bool) } + +// String lookups pname key in underlying map and type-asserts it to string. +func (params CheckerParams) String(pname string) string { return params[pname].Value.(string) } + +// CheckerInfo holds checker metadata and structured documentation. +type CheckerInfo struct { + // Name is a checker name. + Name string + + // Tags is a list of labels that can be used to enable or disable checker. + // Common tags are "experimental" and "performance". + Tags []string + + // Params declares checker-specific parameters. Optional. + Params CheckerParams + + // Summary is a short one sentence description. + // Should not end with a period. + Summary string + + // Details extends summary with additional info. Optional. + Details string + + // Before is a code snippet of code that will violate rule. + Before string + + // After is a code snippet of fixed code that complies to the rule. + After string + + // Note is an optional caution message or advice. + Note string + + // Collection establishes a checker-to-collection relationship. + Collection *CheckerCollection +} + +// GetCheckersInfo returns a checkers info list for all registered checkers. +// The slice is sorted by a checker name. +// +// Info objects can be used to instantiate checkers with NewChecker function. +func GetCheckersInfo() []*CheckerInfo { + return getCheckersInfo() +} + +// HasTag reports whether checker described by the info has specified tag. +func (info *CheckerInfo) HasTag(tag string) bool { + for i := range info.Tags { + if info.Tags[i] == tag { + return true + } + } + return false +} + +// Checker is an implementation of a check that is described by the associated info. +type Checker struct { + // Info is an info object that was used to instantiate this checker. + Info *CheckerInfo + + ctx CheckerContext + + fileWalker FileWalker +} + +// Check runs rule checker over file f. +func (c *Checker) Check(f *ast.File) []Warning { + c.ctx.warnings = c.ctx.warnings[:0] + c.fileWalker.WalkFile(f) + return c.ctx.warnings +} + +// Warning represents issue that is found by checker. +type Warning struct { + // Node is an AST node that caused warning to trigger. + // Can be used to obtain proper error location. + Node ast.Node + + // Text is warning message without source location info. + Text string +} + +// NewChecker returns initialized checker identified by an info. +// info must be non-nil. +// Returns an error if info describes a checker that was not properly registered, +// or if checker fails to initialize. +func NewChecker(ctx *Context, info *CheckerInfo) (*Checker, error) { + return newChecker(ctx, info) +} + +// Context is a readonly state shared among every checker. +type Context struct { + // TypesInfo carries parsed packages types information. + TypesInfo *types.Info + + // SizesInfo carries alignment and type size information. + // Arch-dependent. + SizesInfo types.Sizes + + // FileSet is a file set that was used during the program loading. + FileSet *token.FileSet + + // Pkg describes package that is being checked. + Pkg *types.Package + + // Filename is a currently checked file name. + Filename string + + // Require records what optional resources are required + // by the checkers set that use this context. + // + // Every require fields makes associated context field + // to be properly initialized. + // For example, Context.require.PkgObjects => Context.PkgObjects. + Require struct { + PkgObjects bool + PkgRenames bool + } + + // PkgObjects stores all imported packages and their local names. + PkgObjects map[*types.PkgName]string + + // PkgRenames maps package path to its local renaming. + // Contains no entries for packages that were imported without + // explicit local names. + PkgRenames map[string]string +} + +// NewContext returns new shared context to be used by every checker. +// +// All data carried by the context is readonly for checkers, +// but can be modified by the integrating application. +func NewContext(fset *token.FileSet, sizes types.Sizes) *Context { + return &Context{ + FileSet: fset, + SizesInfo: sizes, + TypesInfo: &types.Info{}, + } +} + +// SetPackageInfo sets package-related metadata. +// +// Must be called for every package being checked. +func (c *Context) SetPackageInfo(info *types.Info, pkg *types.Package) { + if info != nil { + // We do this kind of assignment to avoid + // changing c.typesInfo field address after + // every re-assignment. + *c.TypesInfo = *info + } + c.Pkg = pkg +} + +// SetFileInfo sets file-related metadata. +// +// Must be called for every source code file being checked. +func (c *Context) SetFileInfo(name string, f *ast.File) { + c.Filename = name + if c.Require.PkgObjects { + resolvePkgObjects(c, f) + } + if c.Require.PkgRenames { + resolvePkgRenames(c, f) + } +} + +// CheckerContext is checker-local context copy. +// Fields that are not from Context itself are writeable. +type CheckerContext struct { + *Context + + // printer used to format warning text. + printer *astfmt.Printer + + warnings []Warning +} + +// Warn adds a Warning to checker output. +func (ctx *CheckerContext) Warn(node ast.Node, format string, args ...interface{}) { + ctx.warnings = append(ctx.warnings, Warning{ + Text: ctx.printer.Sprintf(format, args...), + Node: node, + }) +} + +// UnknownType is a special sentinel value that is returned from the CheckerContext.TypeOf +// method instead of the nil type. +var UnknownType types.Type = types.Typ[types.Invalid] + +// TypeOf returns the type of expression x. +// +// Unlike TypesInfo.TypeOf, it never returns nil. +// Instead, it returns the Invalid type as a sentinel UnknownType value. +func (ctx *CheckerContext) TypeOf(x ast.Expr) types.Type { + typ := ctx.TypesInfo.TypeOf(x) + if typ != nil { + return typ + } + // Usually it means that some incorrect type info was loaded + // or the analyzed package was only partially (?) correct. + // To avoid nil pointer panics we can return a sentinel value + // that will fail most type assertions as well as kind checks + // (if the call side expects a *types.Basic). + return UnknownType +} + +// FileWalker is an interface every checker should implement. +// +// The WalkFile method is executed for every Go file inside the +// package that is being checked. +type FileWalker interface { + WalkFile(*ast.File) +} diff --git a/vendor/github.com/go-toolsmith/astcast/.travis.yml b/vendor/github.com/go-toolsmith/astcast/.travis.yml new file mode 100644 index 000000000..c32ac0062 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcast/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.x +install: + - # Prevent default install action "go get -t -v ./...". +script: + - go get -t -v ./... + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/go-toolsmith/astcast/LICENSE b/vendor/github.com/go-toolsmith/astcast/LICENSE new file mode 100644 index 000000000..eef17180f --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcast/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 go-toolsmith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-toolsmith/astcast/README.md b/vendor/github.com/go-toolsmith/astcast/README.md new file mode 100644 index 000000000..b618da461 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcast/README.md @@ -0,0 +1,86 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astcast)](https://goreportcard.com/report/github.com/go-toolsmith/astcast) +[![GoDoc](https://godoc.org/github.com/go-toolsmith/astcast?status.svg)](https://godoc.org/github.com/go-toolsmith/astcast) + +# astcast + +Package astcast wraps type assertion operations in such way that you don't have +to worry about nil pointer results anymore. + +## Installation + +```bash +go get -v github.com/go-toolsmith/astcast +``` + +## Example + +```go +package main + +import ( + "fmt" + + "github.com/go-toolsmith/astcast" + "github.com/go-toolsmith/strparse" +) + +func main() { + x := strparse.Expr(`(foo * bar) + 1`) + + // x type is ast.Expr, we want to access bar operand + // that is a RHS of the LHS of the addition. + // Note that addition LHS (X field) is has parenthesis, + // so we have to remove them too. + + add := astcast.ToBinaryExpr(x) + mul := astcast.ToBinaryExpr(astcast.ToParenExpr(add.X).X) + bar := astcast.ToIdent(mul.Y) + fmt.Printf("%T %s\n", bar, bar.Name) // => *ast.Ident bar + + // If argument has different dynamic type, + // non-nil sentinel object of requested type is returned. + // Those sentinel objects are exported so if you need + // to know whether it was a nil interface value of + // failed type assertion, you can compare returned + // object with such a sentinel. + + y := astcast.ToCallExpr(strparse.Expr(`x`)) + if y == astcast.NilCallExpr { + fmt.Println("it is a sentinel, type assertion failed") + } +} +``` + +Without `astcast`, you would have to do a lots of type assertions: + +```go +package main + +import ( + "fmt" + + "github.com/go-toolsmith/strparse" +) + +func main() { + x := strparse.Expr(`(foo * bar) + 1`) + + add, ok := x.(*ast.BinaryExpr) + if !ok || add == nil { + return + } + additionLHS, ok := add.X.(*ast.ParenExpr) + if !ok || additionLHS == nil { + return + } + mul, ok := additionLHS.X.(*ast.BinaryExpr) + if !ok || mul == nil { + return + } + bar, ok := mul.Y.(*ast.Ident) + if !ok || bar == nil { + return + } + fmt.Printf("%T %s\n", bar, bar.Name) +} +``` diff --git a/vendor/github.com/go-toolsmith/astcast/astcast.go b/vendor/github.com/go-toolsmith/astcast/astcast.go new file mode 100644 index 000000000..746d568aa --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcast/astcast.go @@ -0,0 +1,590 @@ +// Code generated by astcast_generate.go; DO NOT EDIT + +// Package astcast wraps type assertion operations in such way that you don't have +// to worry about nil pointer results anymore. +package astcast + +import ( + "go/ast" +) + +// A set of sentinel nil-like values that are returned +// by all "casting" functions in case of failed type assertion. +var ( + NilArrayType = &ast.ArrayType{} + NilBadExpr = &ast.BadExpr{} + NilBasicLit = &ast.BasicLit{} + NilBinaryExpr = &ast.BinaryExpr{} + NilCallExpr = &ast.CallExpr{} + NilChanType = &ast.ChanType{} + NilCompositeLit = &ast.CompositeLit{} + NilEllipsis = &ast.Ellipsis{} + NilFuncLit = &ast.FuncLit{} + NilFuncType = &ast.FuncType{} + NilIdent = &ast.Ident{} + NilIndexExpr = &ast.IndexExpr{} + NilInterfaceType = &ast.InterfaceType{} + NilKeyValueExpr = &ast.KeyValueExpr{} + NilMapType = &ast.MapType{} + NilParenExpr = &ast.ParenExpr{} + NilSelectorExpr = &ast.SelectorExpr{} + NilSliceExpr = &ast.SliceExpr{} + NilStarExpr = &ast.StarExpr{} + NilStructType = &ast.StructType{} + NilTypeAssertExpr = &ast.TypeAssertExpr{} + NilUnaryExpr = &ast.UnaryExpr{} + NilAssignStmt = &ast.AssignStmt{} + NilBadStmt = &ast.BadStmt{} + NilBlockStmt = &ast.BlockStmt{} + NilBranchStmt = &ast.BranchStmt{} + NilCaseClause = &ast.CaseClause{} + NilCommClause = &ast.CommClause{} + NilDeclStmt = &ast.DeclStmt{} + NilDeferStmt = &ast.DeferStmt{} + NilEmptyStmt = &ast.EmptyStmt{} + NilExprStmt = &ast.ExprStmt{} + NilForStmt = &ast.ForStmt{} + NilGoStmt = &ast.GoStmt{} + NilIfStmt = &ast.IfStmt{} + NilIncDecStmt = &ast.IncDecStmt{} + NilLabeledStmt = &ast.LabeledStmt{} + NilRangeStmt = &ast.RangeStmt{} + NilReturnStmt = &ast.ReturnStmt{} + NilSelectStmt = &ast.SelectStmt{} + NilSendStmt = &ast.SendStmt{} + NilSwitchStmt = &ast.SwitchStmt{} + NilTypeSwitchStmt = &ast.TypeSwitchStmt{} + NilComment = &ast.Comment{} + NilCommentGroup = &ast.CommentGroup{} + NilFieldList = &ast.FieldList{} + NilFile = &ast.File{} + NilPackage = &ast.Package{} +) + +// ToArrayType returns x as a non-nil *ast.ArrayType. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilArrayType. +func ToArrayType(x ast.Node) *ast.ArrayType { + if x, ok := x.(*ast.ArrayType); ok { + return x + } + return NilArrayType +} + +// ToBadExpr returns x as a non-nil *ast.BadExpr. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilBadExpr. +func ToBadExpr(x ast.Node) *ast.BadExpr { + if x, ok := x.(*ast.BadExpr); ok { + return x + } + return NilBadExpr +} + +// ToBasicLit returns x as a non-nil *ast.BasicLit. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilBasicLit. +func ToBasicLit(x ast.Node) *ast.BasicLit { + if x, ok := x.(*ast.BasicLit); ok { + return x + } + return NilBasicLit +} + +// ToBinaryExpr returns x as a non-nil *ast.BinaryExpr. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilBinaryExpr. +func ToBinaryExpr(x ast.Node) *ast.BinaryExpr { + if x, ok := x.(*ast.BinaryExpr); ok { + return x + } + return NilBinaryExpr +} + +// ToCallExpr returns x as a non-nil *ast.CallExpr. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilCallExpr. +func ToCallExpr(x ast.Node) *ast.CallExpr { + if x, ok := x.(*ast.CallExpr); ok { + return x + } + return NilCallExpr +} + +// ToChanType returns x as a non-nil *ast.ChanType. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilChanType. +func ToChanType(x ast.Node) *ast.ChanType { + if x, ok := x.(*ast.ChanType); ok { + return x + } + return NilChanType +} + +// ToCompositeLit returns x as a non-nil *ast.CompositeLit. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilCompositeLit. +func ToCompositeLit(x ast.Node) *ast.CompositeLit { + if x, ok := x.(*ast.CompositeLit); ok { + return x + } + return NilCompositeLit +} + +// ToEllipsis returns x as a non-nil *ast.Ellipsis. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilEllipsis. +func ToEllipsis(x ast.Node) *ast.Ellipsis { + if x, ok := x.(*ast.Ellipsis); ok { + return x + } + return NilEllipsis +} + +// ToFuncLit returns x as a non-nil *ast.FuncLit. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilFuncLit. +func ToFuncLit(x ast.Node) *ast.FuncLit { + if x, ok := x.(*ast.FuncLit); ok { + return x + } + return NilFuncLit +} + +// ToFuncType returns x as a non-nil *ast.FuncType. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilFuncType. +func ToFuncType(x ast.Node) *ast.FuncType { + if x, ok := x.(*ast.FuncType); ok { + return x + } + return NilFuncType +} + +// ToIdent returns x as a non-nil *ast.Ident. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilIdent. +func ToIdent(x ast.Node) *ast.Ident { + if x, ok := x.(*ast.Ident); ok { + return x + } + return NilIdent +} + +// ToIndexExpr returns x as a non-nil *ast.IndexExpr. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilIndexExpr. +func ToIndexExpr(x ast.Node) *ast.IndexExpr { + if x, ok := x.(*ast.IndexExpr); ok { + return x + } + return NilIndexExpr +} + +// ToInterfaceType returns x as a non-nil *ast.InterfaceType. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilInterfaceType. +func ToInterfaceType(x ast.Node) *ast.InterfaceType { + if x, ok := x.(*ast.InterfaceType); ok { + return x + } + return NilInterfaceType +} + +// ToKeyValueExpr returns x as a non-nil *ast.KeyValueExpr. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilKeyValueExpr. +func ToKeyValueExpr(x ast.Node) *ast.KeyValueExpr { + if x, ok := x.(*ast.KeyValueExpr); ok { + return x + } + return NilKeyValueExpr +} + +// ToMapType returns x as a non-nil *ast.MapType. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilMapType. +func ToMapType(x ast.Node) *ast.MapType { + if x, ok := x.(*ast.MapType); ok { + return x + } + return NilMapType +} + +// ToParenExpr returns x as a non-nil *ast.ParenExpr. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilParenExpr. +func ToParenExpr(x ast.Node) *ast.ParenExpr { + if x, ok := x.(*ast.ParenExpr); ok { + return x + } + return NilParenExpr +} + +// ToSelectorExpr returns x as a non-nil *ast.SelectorExpr. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilSelectorExpr. +func ToSelectorExpr(x ast.Node) *ast.SelectorExpr { + if x, ok := x.(*ast.SelectorExpr); ok { + return x + } + return NilSelectorExpr +} + +// ToSliceExpr returns x as a non-nil *ast.SliceExpr. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilSliceExpr. +func ToSliceExpr(x ast.Node) *ast.SliceExpr { + if x, ok := x.(*ast.SliceExpr); ok { + return x + } + return NilSliceExpr +} + +// ToStarExpr returns x as a non-nil *ast.StarExpr. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilStarExpr. +func ToStarExpr(x ast.Node) *ast.StarExpr { + if x, ok := x.(*ast.StarExpr); ok { + return x + } + return NilStarExpr +} + +// ToStructType returns x as a non-nil *ast.StructType. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilStructType. +func ToStructType(x ast.Node) *ast.StructType { + if x, ok := x.(*ast.StructType); ok { + return x + } + return NilStructType +} + +// ToTypeAssertExpr returns x as a non-nil *ast.TypeAssertExpr. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilTypeAssertExpr. +func ToTypeAssertExpr(x ast.Node) *ast.TypeAssertExpr { + if x, ok := x.(*ast.TypeAssertExpr); ok { + return x + } + return NilTypeAssertExpr +} + +// ToUnaryExpr returns x as a non-nil *ast.UnaryExpr. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilUnaryExpr. +func ToUnaryExpr(x ast.Node) *ast.UnaryExpr { + if x, ok := x.(*ast.UnaryExpr); ok { + return x + } + return NilUnaryExpr +} + +// ToAssignStmt returns x as a non-nil *ast.AssignStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilAssignStmt. +func ToAssignStmt(x ast.Node) *ast.AssignStmt { + if x, ok := x.(*ast.AssignStmt); ok { + return x + } + return NilAssignStmt +} + +// ToBadStmt returns x as a non-nil *ast.BadStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilBadStmt. +func ToBadStmt(x ast.Node) *ast.BadStmt { + if x, ok := x.(*ast.BadStmt); ok { + return x + } + return NilBadStmt +} + +// ToBlockStmt returns x as a non-nil *ast.BlockStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilBlockStmt. +func ToBlockStmt(x ast.Node) *ast.BlockStmt { + if x, ok := x.(*ast.BlockStmt); ok { + return x + } + return NilBlockStmt +} + +// ToBranchStmt returns x as a non-nil *ast.BranchStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilBranchStmt. +func ToBranchStmt(x ast.Node) *ast.BranchStmt { + if x, ok := x.(*ast.BranchStmt); ok { + return x + } + return NilBranchStmt +} + +// ToCaseClause returns x as a non-nil *ast.CaseClause. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilCaseClause. +func ToCaseClause(x ast.Node) *ast.CaseClause { + if x, ok := x.(*ast.CaseClause); ok { + return x + } + return NilCaseClause +} + +// ToCommClause returns x as a non-nil *ast.CommClause. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilCommClause. +func ToCommClause(x ast.Node) *ast.CommClause { + if x, ok := x.(*ast.CommClause); ok { + return x + } + return NilCommClause +} + +// ToDeclStmt returns x as a non-nil *ast.DeclStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilDeclStmt. +func ToDeclStmt(x ast.Node) *ast.DeclStmt { + if x, ok := x.(*ast.DeclStmt); ok { + return x + } + return NilDeclStmt +} + +// ToDeferStmt returns x as a non-nil *ast.DeferStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilDeferStmt. +func ToDeferStmt(x ast.Node) *ast.DeferStmt { + if x, ok := x.(*ast.DeferStmt); ok { + return x + } + return NilDeferStmt +} + +// ToEmptyStmt returns x as a non-nil *ast.EmptyStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilEmptyStmt. +func ToEmptyStmt(x ast.Node) *ast.EmptyStmt { + if x, ok := x.(*ast.EmptyStmt); ok { + return x + } + return NilEmptyStmt +} + +// ToExprStmt returns x as a non-nil *ast.ExprStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilExprStmt. +func ToExprStmt(x ast.Node) *ast.ExprStmt { + if x, ok := x.(*ast.ExprStmt); ok { + return x + } + return NilExprStmt +} + +// ToForStmt returns x as a non-nil *ast.ForStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilForStmt. +func ToForStmt(x ast.Node) *ast.ForStmt { + if x, ok := x.(*ast.ForStmt); ok { + return x + } + return NilForStmt +} + +// ToGoStmt returns x as a non-nil *ast.GoStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilGoStmt. +func ToGoStmt(x ast.Node) *ast.GoStmt { + if x, ok := x.(*ast.GoStmt); ok { + return x + } + return NilGoStmt +} + +// ToIfStmt returns x as a non-nil *ast.IfStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilIfStmt. +func ToIfStmt(x ast.Node) *ast.IfStmt { + if x, ok := x.(*ast.IfStmt); ok { + return x + } + return NilIfStmt +} + +// ToIncDecStmt returns x as a non-nil *ast.IncDecStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilIncDecStmt. +func ToIncDecStmt(x ast.Node) *ast.IncDecStmt { + if x, ok := x.(*ast.IncDecStmt); ok { + return x + } + return NilIncDecStmt +} + +// ToLabeledStmt returns x as a non-nil *ast.LabeledStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilLabeledStmt. +func ToLabeledStmt(x ast.Node) *ast.LabeledStmt { + if x, ok := x.(*ast.LabeledStmt); ok { + return x + } + return NilLabeledStmt +} + +// ToRangeStmt returns x as a non-nil *ast.RangeStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilRangeStmt. +func ToRangeStmt(x ast.Node) *ast.RangeStmt { + if x, ok := x.(*ast.RangeStmt); ok { + return x + } + return NilRangeStmt +} + +// ToReturnStmt returns x as a non-nil *ast.ReturnStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilReturnStmt. +func ToReturnStmt(x ast.Node) *ast.ReturnStmt { + if x, ok := x.(*ast.ReturnStmt); ok { + return x + } + return NilReturnStmt +} + +// ToSelectStmt returns x as a non-nil *ast.SelectStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilSelectStmt. +func ToSelectStmt(x ast.Node) *ast.SelectStmt { + if x, ok := x.(*ast.SelectStmt); ok { + return x + } + return NilSelectStmt +} + +// ToSendStmt returns x as a non-nil *ast.SendStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilSendStmt. +func ToSendStmt(x ast.Node) *ast.SendStmt { + if x, ok := x.(*ast.SendStmt); ok { + return x + } + return NilSendStmt +} + +// ToSwitchStmt returns x as a non-nil *ast.SwitchStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilSwitchStmt. +func ToSwitchStmt(x ast.Node) *ast.SwitchStmt { + if x, ok := x.(*ast.SwitchStmt); ok { + return x + } + return NilSwitchStmt +} + +// ToTypeSwitchStmt returns x as a non-nil *ast.TypeSwitchStmt. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilTypeSwitchStmt. +func ToTypeSwitchStmt(x ast.Node) *ast.TypeSwitchStmt { + if x, ok := x.(*ast.TypeSwitchStmt); ok { + return x + } + return NilTypeSwitchStmt +} + +// ToComment returns x as a non-nil *ast.Comment. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilComment. +func ToComment(x ast.Node) *ast.Comment { + if x, ok := x.(*ast.Comment); ok { + return x + } + return NilComment +} + +// ToCommentGroup returns x as a non-nil *ast.CommentGroup. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilCommentGroup. +func ToCommentGroup(x ast.Node) *ast.CommentGroup { + if x, ok := x.(*ast.CommentGroup); ok { + return x + } + return NilCommentGroup +} + +// ToFieldList returns x as a non-nil *ast.FieldList. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilFieldList. +func ToFieldList(x ast.Node) *ast.FieldList { + if x, ok := x.(*ast.FieldList); ok { + return x + } + return NilFieldList +} + +// ToFile returns x as a non-nil *ast.File. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilFile. +func ToFile(x ast.Node) *ast.File { + if x, ok := x.(*ast.File); ok { + return x + } + return NilFile +} + +// ToPackage returns x as a non-nil *ast.Package. +// If ast.Node actually has such dynamic type, the result is +// identical to normal type assertion. In case if it has +// different type, the returned value is NilPackage. +func ToPackage(x ast.Node) *ast.Package { + if x, ok := x.(*ast.Package); ok { + return x + } + return NilPackage +} diff --git a/vendor/github.com/go-toolsmith/astcast/go.mod b/vendor/github.com/go-toolsmith/astcast/go.mod new file mode 100644 index 000000000..3e431993e --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcast/go.mod @@ -0,0 +1,6 @@ +module github.com/go-toolsmith/astcast + +require ( + github.com/go-toolsmith/astequal v1.0.0 // indirect + github.com/go-toolsmith/strparse v1.0.0 +) diff --git a/vendor/github.com/go-toolsmith/astcast/go.sum b/vendor/github.com/go-toolsmith/astcast/go.sum new file mode 100644 index 000000000..aa0857030 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcast/go.sum @@ -0,0 +1,4 @@ +github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= diff --git a/vendor/github.com/go-toolsmith/astcopy/.travis.yml b/vendor/github.com/go-toolsmith/astcopy/.travis.yml new file mode 100644 index 000000000..8994d395c --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcopy/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.x +install: + - # Prevent default install action "go get -t -v ./...". +script: + - go get -t -v ./... + - go tool vet . + - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astcopy/LICENSE b/vendor/github.com/go-toolsmith/astcopy/LICENSE new file mode 100644 index 000000000..eef17180f --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcopy/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 go-toolsmith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-toolsmith/astcopy/README.md b/vendor/github.com/go-toolsmith/astcopy/README.md new file mode 100644 index 000000000..4dae5c41b --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcopy/README.md @@ -0,0 +1,41 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astcopy)](https://goreportcard.com/report/github.com/go-toolsmith/astcopy) +[![GoDoc](https://godoc.org/github.com/go-toolsmith/astcopy?status.svg)](https://godoc.org/github.com/go-toolsmith/astcopy) +[![Build Status](https://travis-ci.org/go-toolsmith/astcopy.svg?branch=master)](https://travis-ci.org/go-toolsmith/astcopy) + +# astcopy + +Package astcopy implements Go AST reflection-free deep copy operations. + +## Installation: + +```bash +go get github.com/go-toolsmith/astcopy +``` + +## Example + +```go +package main + +import ( + "fmt" + "go/ast" + "go/token" + + "github.com/go-toolsmith/astcopy" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/strparse" +) + +func main() { + x := strparse.Expr(`1 + 2`).(*ast.BinaryExpr) + y := astcopy.BinaryExpr(x) + fmt.Println(astequal.Expr(x, y)) // => true + + // Now modify x and make sure y is not modified. + z := astcopy.BinaryExpr(y) + x.Op = token.SUB + fmt.Println(astequal.Expr(y, z)) // => true + fmt.Println(astequal.Expr(x, y)) // => false +} +``` diff --git a/vendor/github.com/go-toolsmith/astcopy/astcopy.go b/vendor/github.com/go-toolsmith/astcopy/astcopy.go new file mode 100644 index 000000000..2feffb199 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcopy/astcopy.go @@ -0,0 +1,955 @@ +// Package astcopy implements Go AST reflection-free deep copy operations. +package astcopy + +import ( + "go/ast" +) + +// Node returns x node deep copy. +// Copy of nil argument is nil. +func Node(x ast.Node) ast.Node { + return copyNode(x) +} + +// NodeList returns xs node slice deep copy. +// Copy of nil argument is nil. +func NodeList(xs []ast.Node) []ast.Node { + if xs == nil { + return nil + } + cp := make([]ast.Node, len(xs)) + for i := range xs { + cp[i] = copyNode(xs[i]) + } + return cp +} + +// Expr returns x expression deep copy. +// Copy of nil argument is nil. +func Expr(x ast.Expr) ast.Expr { + return copyExpr(x) +} + +// ExprList returns xs expression slice deep copy. +// Copy of nil argument is nil. +func ExprList(xs []ast.Expr) []ast.Expr { + if xs == nil { + return nil + } + cp := make([]ast.Expr, len(xs)) + for i := range xs { + cp[i] = copyExpr(xs[i]) + } + return cp +} + +// Stmt returns x statement deep copy. +// Copy of nil argument is nil. +func Stmt(x ast.Stmt) ast.Stmt { + return copyStmt(x) +} + +// StmtList returns xs statement slice deep copy. +// Copy of nil argument is nil. +func StmtList(xs []ast.Stmt) []ast.Stmt { + if xs == nil { + return nil + } + cp := make([]ast.Stmt, len(xs)) + for i := range xs { + cp[i] = copyStmt(xs[i]) + } + return cp +} + +// Decl returns x declaration deep copy. +// Copy of nil argument is nil. +func Decl(x ast.Decl) ast.Decl { + return copyDecl(x) +} + +// DeclList returns xs declaration slice deep copy. +// Copy of nil argument is nil. +func DeclList(xs []ast.Decl) []ast.Decl { + if xs == nil { + return nil + } + cp := make([]ast.Decl, len(xs)) + for i := range xs { + cp[i] = copyDecl(xs[i]) + } + return cp +} + +// BadExpr returns x deep copy. +// Copy of nil argument is nil. +func BadExpr(x *ast.BadExpr) *ast.BadExpr { + if x == nil { + return nil + } + cp := *x + return &cp +} + +// Ident returns x deep copy. +// Copy of nil argument is nil. +func Ident(x *ast.Ident) *ast.Ident { + if x == nil { + return nil + } + cp := *x + return &cp +} + +// IdentList returns xs identifier slice deep copy. +// Copy of nil argument is nil. +func IdentList(xs []*ast.Ident) []*ast.Ident { + if xs == nil { + return nil + } + cp := make([]*ast.Ident, len(xs)) + for i := range xs { + cp[i] = Ident(xs[i]) + } + return cp +} + +// Ellipsis returns x deep copy. +// Copy of nil argument is nil. +func Ellipsis(x *ast.Ellipsis) *ast.Ellipsis { + if x == nil { + return nil + } + cp := *x + cp.Elt = copyExpr(x.Elt) + return &cp +} + +// BasicLit returns x deep copy. +// Copy of nil argument is nil. +func BasicLit(x *ast.BasicLit) *ast.BasicLit { + if x == nil { + return nil + } + cp := *x + return &cp +} + +// FuncLit returns x deep copy. +// Copy of nil argument is nil. +func FuncLit(x *ast.FuncLit) *ast.FuncLit { + if x == nil { + return nil + } + cp := *x + cp.Type = FuncType(x.Type) + cp.Body = BlockStmt(x.Body) + return &cp +} + +// CompositeLit returns x deep copy. +// Copy of nil argument is nil. +func CompositeLit(x *ast.CompositeLit) *ast.CompositeLit { + if x == nil { + return nil + } + cp := *x + cp.Type = copyExpr(x.Type) + cp.Elts = ExprList(x.Elts) + return &cp +} + +// ParenExpr returns x deep copy. +// Copy of nil argument is nil. +func ParenExpr(x *ast.ParenExpr) *ast.ParenExpr { + if x == nil { + return nil + } + cp := *x + cp.X = copyExpr(x.X) + return &cp +} + +// SelectorExpr returns x deep copy. +// Copy of nil argument is nil. +func SelectorExpr(x *ast.SelectorExpr) *ast.SelectorExpr { + if x == nil { + return nil + } + cp := *x + cp.X = copyExpr(x.X) + cp.Sel = Ident(x.Sel) + return &cp +} + +// IndexExpr returns x deep copy. +// Copy of nil argument is nil. +func IndexExpr(x *ast.IndexExpr) *ast.IndexExpr { + if x == nil { + return nil + } + cp := *x + cp.X = copyExpr(x.X) + cp.Index = copyExpr(x.Index) + return &cp +} + +// SliceExpr returns x deep copy. +// Copy of nil argument is nil. +func SliceExpr(x *ast.SliceExpr) *ast.SliceExpr { + if x == nil { + return nil + } + cp := *x + cp.X = copyExpr(x.X) + cp.Low = copyExpr(x.Low) + cp.High = copyExpr(x.High) + cp.Max = copyExpr(x.Max) + return &cp +} + +// TypeAssertExpr returns x deep copy. +// Copy of nil argument is nil. +func TypeAssertExpr(x *ast.TypeAssertExpr) *ast.TypeAssertExpr { + if x == nil { + return nil + } + cp := *x + cp.X = copyExpr(x.X) + cp.Type = copyExpr(x.Type) + return &cp +} + +// CallExpr returns x deep copy. +// Copy of nil argument is nil. +func CallExpr(x *ast.CallExpr) *ast.CallExpr { + if x == nil { + return nil + } + cp := *x + cp.Fun = copyExpr(x.Fun) + cp.Args = ExprList(x.Args) + return &cp +} + +// StarExpr returns x deep copy. +// Copy of nil argument is nil. +func StarExpr(x *ast.StarExpr) *ast.StarExpr { + if x == nil { + return nil + } + cp := *x + cp.X = copyExpr(x.X) + return &cp +} + +// UnaryExpr returns x deep copy. +// Copy of nil argument is nil. +func UnaryExpr(x *ast.UnaryExpr) *ast.UnaryExpr { + if x == nil { + return nil + } + cp := *x + cp.X = copyExpr(x.X) + return &cp +} + +// BinaryExpr returns x deep copy. +// Copy of nil argument is nil. +func BinaryExpr(x *ast.BinaryExpr) *ast.BinaryExpr { + if x == nil { + return nil + } + cp := *x + cp.X = copyExpr(x.X) + cp.Y = copyExpr(x.Y) + return &cp +} + +// KeyValueExpr returns x deep copy. +// Copy of nil argument is nil. +func KeyValueExpr(x *ast.KeyValueExpr) *ast.KeyValueExpr { + if x == nil { + return nil + } + cp := *x + cp.Key = copyExpr(x.Key) + cp.Value = copyExpr(x.Value) + return &cp +} + +// ArrayType returns x deep copy. +// Copy of nil argument is nil. +func ArrayType(x *ast.ArrayType) *ast.ArrayType { + if x == nil { + return nil + } + cp := *x + cp.Len = copyExpr(x.Len) + cp.Elt = copyExpr(x.Elt) + return &cp +} + +// StructType returns x deep copy. +// Copy of nil argument is nil. +func StructType(x *ast.StructType) *ast.StructType { + if x == nil { + return nil + } + cp := *x + cp.Fields = FieldList(x.Fields) + return &cp +} + +// Field returns x deep copy. +// Copy of nil argument is nil. +func Field(x *ast.Field) *ast.Field { + if x == nil { + return nil + } + cp := *x + cp.Names = IdentList(x.Names) + cp.Type = copyExpr(x.Type) + cp.Tag = BasicLit(x.Tag) + cp.Doc = CommentGroup(x.Doc) + cp.Comment = CommentGroup(x.Comment) + return &cp +} + +// FieldList returns x deep copy. +// Copy of nil argument is nil. +func FieldList(x *ast.FieldList) *ast.FieldList { + if x == nil { + return nil + } + cp := *x + if x.List != nil { + cp.List = make([]*ast.Field, len(x.List)) + for i := range x.List { + cp.List[i] = Field(x.List[i]) + } + } + return &cp +} + +// FuncType returns x deep copy. +// Copy of nil argument is nil. +func FuncType(x *ast.FuncType) *ast.FuncType { + if x == nil { + return nil + } + cp := *x + cp.Params = FieldList(x.Params) + cp.Results = FieldList(x.Results) + return &cp +} + +// InterfaceType returns x deep copy. +// Copy of nil argument is nil. +func InterfaceType(x *ast.InterfaceType) *ast.InterfaceType { + if x == nil { + return nil + } + cp := *x + cp.Methods = FieldList(x.Methods) + return &cp +} + +// MapType returns x deep copy. +// Copy of nil argument is nil. +func MapType(x *ast.MapType) *ast.MapType { + if x == nil { + return nil + } + cp := *x + cp.Key = copyExpr(x.Key) + cp.Value = copyExpr(x.Value) + return &cp +} + +// ChanType returns x deep copy. +// Copy of nil argument is nil. +func ChanType(x *ast.ChanType) *ast.ChanType { + if x == nil { + return nil + } + cp := *x + cp.Value = copyExpr(x.Value) + return &cp +} + +// BlockStmt returns x deep copy. +// Copy of nil argument is nil. +func BlockStmt(x *ast.BlockStmt) *ast.BlockStmt { + if x == nil { + return nil + } + cp := *x + cp.List = StmtList(x.List) + return &cp +} + +// ImportSpec returns x deep copy. +// Copy of nil argument is nil. +func ImportSpec(x *ast.ImportSpec) *ast.ImportSpec { + if x == nil { + return nil + } + cp := *x + cp.Name = Ident(x.Name) + cp.Path = BasicLit(x.Path) + cp.Doc = CommentGroup(x.Doc) + cp.Comment = CommentGroup(x.Comment) + return &cp +} + +// ValueSpec returns x deep copy. +// Copy of nil argument is nil. +func ValueSpec(x *ast.ValueSpec) *ast.ValueSpec { + if x == nil { + return nil + } + cp := *x + cp.Names = IdentList(x.Names) + cp.Values = ExprList(x.Values) + cp.Type = copyExpr(x.Type) + cp.Doc = CommentGroup(x.Doc) + cp.Comment = CommentGroup(x.Comment) + return &cp +} + +// TypeSpec returns x deep copy. +// Copy of nil argument is nil. +func TypeSpec(x *ast.TypeSpec) *ast.TypeSpec { + if x == nil { + return nil + } + cp := *x + cp.Name = Ident(x.Name) + cp.Type = copyExpr(x.Type) + cp.Doc = CommentGroup(x.Doc) + cp.Comment = CommentGroup(x.Comment) + return &cp +} + +// Spec returns x deep copy. +// Copy of nil argument is nil. +func Spec(x ast.Spec) ast.Spec { + if x == nil { + return nil + } + + switch x := x.(type) { + case *ast.ImportSpec: + return ImportSpec(x) + case *ast.ValueSpec: + return ValueSpec(x) + case *ast.TypeSpec: + return TypeSpec(x) + default: + panic("unhandled spec") + } +} + +// SpecList returns xs spec slice deep copy. +// Copy of nil argument is nil. +func SpecList(xs []ast.Spec) []ast.Spec { + if xs == nil { + return nil + } + cp := make([]ast.Spec, len(xs)) + for i := range xs { + cp[i] = Spec(xs[i]) + } + return cp +} + +// BadStmt returns x deep copy. +// Copy of nil argument is nil. +func BadStmt(x *ast.BadStmt) *ast.BadStmt { + if x == nil { + return nil + } + cp := *x + return &cp +} + +// DeclStmt returns x deep copy. +// Copy of nil argument is nil. +func DeclStmt(x *ast.DeclStmt) *ast.DeclStmt { + if x == nil { + return nil + } + cp := *x + cp.Decl = copyDecl(x.Decl) + return &cp +} + +// EmptyStmt returns x deep copy. +// Copy of nil argument is nil. +func EmptyStmt(x *ast.EmptyStmt) *ast.EmptyStmt { + if x == nil { + return nil + } + cp := *x + return &cp +} + +// LabeledStmt returns x deep copy. +// Copy of nil argument is nil. +func LabeledStmt(x *ast.LabeledStmt) *ast.LabeledStmt { + if x == nil { + return nil + } + cp := *x + cp.Label = Ident(x.Label) + cp.Stmt = copyStmt(x.Stmt) + return &cp +} + +// ExprStmt returns x deep copy. +// Copy of nil argument is nil. +func ExprStmt(x *ast.ExprStmt) *ast.ExprStmt { + if x == nil { + return nil + } + cp := *x + cp.X = copyExpr(x.X) + return &cp +} + +// SendStmt returns x deep copy. +// Copy of nil argument is nil. +func SendStmt(x *ast.SendStmt) *ast.SendStmt { + if x == nil { + return nil + } + cp := *x + cp.Chan = copyExpr(x.Chan) + cp.Value = copyExpr(x.Value) + return &cp +} + +// IncDecStmt returns x deep copy. +// Copy of nil argument is nil. +func IncDecStmt(x *ast.IncDecStmt) *ast.IncDecStmt { + if x == nil { + return nil + } + cp := *x + cp.X = copyExpr(x.X) + return &cp +} + +// AssignStmt returns x deep copy. +// Copy of nil argument is nil. +func AssignStmt(x *ast.AssignStmt) *ast.AssignStmt { + if x == nil { + return nil + } + cp := *x + cp.Lhs = ExprList(x.Lhs) + cp.Rhs = ExprList(x.Rhs) + return &cp +} + +// GoStmt returns x deep copy. +// Copy of nil argument is nil. +func GoStmt(x *ast.GoStmt) *ast.GoStmt { + if x == nil { + return nil + } + cp := *x + cp.Call = CallExpr(x.Call) + return &cp +} + +// DeferStmt returns x deep copy. +// Copy of nil argument is nil. +func DeferStmt(x *ast.DeferStmt) *ast.DeferStmt { + if x == nil { + return nil + } + cp := *x + cp.Call = CallExpr(x.Call) + return &cp +} + +// ReturnStmt returns x deep copy. +// Copy of nil argument is nil. +func ReturnStmt(x *ast.ReturnStmt) *ast.ReturnStmt { + if x == nil { + return nil + } + cp := *x + cp.Results = ExprList(x.Results) + return &cp +} + +// BranchStmt returns x deep copy. +// Copy of nil argument is nil. +func BranchStmt(x *ast.BranchStmt) *ast.BranchStmt { + if x == nil { + return nil + } + cp := *x + cp.Label = Ident(x.Label) + return &cp +} + +// IfStmt returns x deep copy. +// Copy of nil argument is nil. +func IfStmt(x *ast.IfStmt) *ast.IfStmt { + if x == nil { + return nil + } + cp := *x + cp.Init = copyStmt(x.Init) + cp.Cond = copyExpr(x.Cond) + cp.Body = BlockStmt(x.Body) + cp.Else = copyStmt(x.Else) + return &cp +} + +// CaseClause returns x deep copy. +// Copy of nil argument is nil. +func CaseClause(x *ast.CaseClause) *ast.CaseClause { + if x == nil { + return nil + } + cp := *x + cp.List = ExprList(x.List) + cp.Body = StmtList(x.Body) + return &cp +} + +// SwitchStmt returns x deep copy. +// Copy of nil argument is nil. +func SwitchStmt(x *ast.SwitchStmt) *ast.SwitchStmt { + if x == nil { + return nil + } + cp := *x + cp.Init = copyStmt(x.Init) + cp.Tag = copyExpr(x.Tag) + cp.Body = BlockStmt(x.Body) + return &cp +} + +// TypeSwitchStmt returns x deep copy. +// Copy of nil argument is nil. +func TypeSwitchStmt(x *ast.TypeSwitchStmt) *ast.TypeSwitchStmt { + if x == nil { + return nil + } + cp := *x + cp.Init = copyStmt(x.Init) + cp.Assign = copyStmt(x.Assign) + cp.Body = BlockStmt(x.Body) + return &cp +} + +// CommClause returns x deep copy. +// Copy of nil argument is nil. +func CommClause(x *ast.CommClause) *ast.CommClause { + if x == nil { + return nil + } + cp := *x + cp.Comm = copyStmt(x.Comm) + cp.Body = StmtList(x.Body) + return &cp +} + +// SelectStmt returns x deep copy. +// Copy of nil argument is nil. +func SelectStmt(x *ast.SelectStmt) *ast.SelectStmt { + if x == nil { + return nil + } + cp := *x + cp.Body = BlockStmt(x.Body) + return &cp +} + +// ForStmt returns x deep copy. +// Copy of nil argument is nil. +func ForStmt(x *ast.ForStmt) *ast.ForStmt { + if x == nil { + return nil + } + cp := *x + cp.Init = copyStmt(x.Init) + cp.Cond = copyExpr(x.Cond) + cp.Post = copyStmt(x.Post) + cp.Body = BlockStmt(x.Body) + return &cp +} + +// RangeStmt returns x deep copy. +// Copy of nil argument is nil. +func RangeStmt(x *ast.RangeStmt) *ast.RangeStmt { + if x == nil { + return nil + } + cp := *x + cp.Key = copyExpr(x.Key) + cp.Value = copyExpr(x.Value) + cp.X = copyExpr(x.X) + cp.Body = BlockStmt(x.Body) + return &cp +} + +// Comment returns x deep copy. +// Copy of nil argument is nil. +func Comment(x *ast.Comment) *ast.Comment { + if x == nil { + return nil + } + cp := *x + return &cp +} + +// CommentGroup returns x deep copy. +// Copy of nil argument is nil. +func CommentGroup(x *ast.CommentGroup) *ast.CommentGroup { + if x == nil { + return nil + } + cp := *x + if x.List != nil { + cp.List = make([]*ast.Comment, len(x.List)) + for i := range x.List { + cp.List[i] = Comment(x.List[i]) + } + } + return &cp +} + +// File returns x deep copy. +// Copy of nil argument is nil. +func File(x *ast.File) *ast.File { + if x == nil { + return nil + } + cp := *x + cp.Doc = CommentGroup(x.Doc) + cp.Name = Ident(x.Name) + cp.Decls = DeclList(x.Decls) + cp.Imports = make([]*ast.ImportSpec, len(x.Imports)) + for i := range x.Imports { + cp.Imports[i] = ImportSpec(x.Imports[i]) + } + cp.Unresolved = IdentList(x.Unresolved) + cp.Comments = make([]*ast.CommentGroup, len(x.Comments)) + for i := range x.Comments { + cp.Comments[i] = CommentGroup(x.Comments[i]) + } + return &cp +} + +// Package returns x deep copy. +// Copy of nil argument is nil. +func Package(x *ast.Package) *ast.Package { + if x == nil { + return nil + } + cp := *x + cp.Files = make(map[string]*ast.File) + for filename, f := range x.Files { + cp.Files[filename] = f + } + return &cp +} + +// BadDecl returns x deep copy. +// Copy of nil argument is nil. +func BadDecl(x *ast.BadDecl) *ast.BadDecl { + if x == nil { + return nil + } + cp := *x + return &cp +} + +// GenDecl returns x deep copy. +// Copy of nil argument is nil. +func GenDecl(x *ast.GenDecl) *ast.GenDecl { + if x == nil { + return nil + } + cp := *x + cp.Specs = SpecList(x.Specs) + cp.Doc = CommentGroup(x.Doc) + return &cp +} + +// FuncDecl returns x deep copy. +// Copy of nil argument is nil. +func FuncDecl(x *ast.FuncDecl) *ast.FuncDecl { + if x == nil { + return nil + } + cp := *x + cp.Recv = FieldList(x.Recv) + cp.Name = Ident(x.Name) + cp.Type = FuncType(x.Type) + cp.Body = BlockStmt(x.Body) + cp.Doc = CommentGroup(x.Doc) + return &cp +} + +func copyNode(x ast.Node) ast.Node { + switch x := x.(type) { + case ast.Expr: + return copyExpr(x) + case ast.Stmt: + return copyStmt(x) + case ast.Decl: + return copyDecl(x) + + case ast.Spec: + return Spec(x) + case *ast.FieldList: + return FieldList(x) + case *ast.Comment: + return Comment(x) + case *ast.CommentGroup: + return CommentGroup(x) + case *ast.File: + return File(x) + case *ast.Package: + return Package(x) + + default: + panic("unhandled node") + } +} + +func copyExpr(x ast.Expr) ast.Expr { + if x == nil { + return nil + } + + switch x := x.(type) { + case *ast.BadExpr: + return BadExpr(x) + case *ast.Ident: + return Ident(x) + case *ast.Ellipsis: + return Ellipsis(x) + case *ast.BasicLit: + return BasicLit(x) + case *ast.FuncLit: + return FuncLit(x) + case *ast.CompositeLit: + return CompositeLit(x) + case *ast.ParenExpr: + return ParenExpr(x) + case *ast.SelectorExpr: + return SelectorExpr(x) + case *ast.IndexExpr: + return IndexExpr(x) + case *ast.SliceExpr: + return SliceExpr(x) + case *ast.TypeAssertExpr: + return TypeAssertExpr(x) + case *ast.CallExpr: + return CallExpr(x) + case *ast.StarExpr: + return StarExpr(x) + case *ast.UnaryExpr: + return UnaryExpr(x) + case *ast.BinaryExpr: + return BinaryExpr(x) + case *ast.KeyValueExpr: + return KeyValueExpr(x) + case *ast.ArrayType: + return ArrayType(x) + case *ast.StructType: + return StructType(x) + case *ast.FuncType: + return FuncType(x) + case *ast.InterfaceType: + return InterfaceType(x) + case *ast.MapType: + return MapType(x) + case *ast.ChanType: + return ChanType(x) + + default: + panic("unhandled expr") + } +} + +func copyStmt(x ast.Stmt) ast.Stmt { + if x == nil { + return nil + } + + switch x := x.(type) { + case *ast.BadStmt: + return BadStmt(x) + case *ast.DeclStmt: + return DeclStmt(x) + case *ast.EmptyStmt: + return EmptyStmt(x) + case *ast.LabeledStmt: + return LabeledStmt(x) + case *ast.ExprStmt: + return ExprStmt(x) + case *ast.SendStmt: + return SendStmt(x) + case *ast.IncDecStmt: + return IncDecStmt(x) + case *ast.AssignStmt: + return AssignStmt(x) + case *ast.GoStmt: + return GoStmt(x) + case *ast.DeferStmt: + return DeferStmt(x) + case *ast.ReturnStmt: + return ReturnStmt(x) + case *ast.BranchStmt: + return BranchStmt(x) + case *ast.BlockStmt: + return BlockStmt(x) + case *ast.IfStmt: + return IfStmt(x) + case *ast.CaseClause: + return CaseClause(x) + case *ast.SwitchStmt: + return SwitchStmt(x) + case *ast.TypeSwitchStmt: + return TypeSwitchStmt(x) + case *ast.CommClause: + return CommClause(x) + case *ast.SelectStmt: + return SelectStmt(x) + case *ast.ForStmt: + return ForStmt(x) + case *ast.RangeStmt: + return RangeStmt(x) + + default: + panic("unhandled stmt") + } +} + +func copyDecl(x ast.Decl) ast.Decl { + if x == nil { + return nil + } + + switch x := x.(type) { + case *ast.BadDecl: + return BadDecl(x) + case *ast.GenDecl: + return GenDecl(x) + case *ast.FuncDecl: + return FuncDecl(x) + + default: + panic("unhandled decl") + } +} diff --git a/vendor/github.com/go-toolsmith/astcopy/go.mod b/vendor/github.com/go-toolsmith/astcopy/go.mod new file mode 100644 index 000000000..6f3b3027a --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcopy/go.mod @@ -0,0 +1,6 @@ +module github.com/go-toolsmith/astcopy + +require ( + github.com/go-toolsmith/astequal v1.0.0 + github.com/go-toolsmith/strparse v1.0.0 +) diff --git a/vendor/github.com/go-toolsmith/astcopy/go.sum b/vendor/github.com/go-toolsmith/astcopy/go.sum new file mode 100644 index 000000000..aa0857030 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcopy/go.sum @@ -0,0 +1,4 @@ +github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= diff --git a/vendor/github.com/go-toolsmith/astequal/.gitignore b/vendor/github.com/go-toolsmith/astequal/.gitignore new file mode 100644 index 000000000..f38c2b852 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astequal/.gitignore @@ -0,0 +1,5 @@ +bin +pkg +src/main +tmp + diff --git a/vendor/github.com/go-toolsmith/astequal/.travis.yml b/vendor/github.com/go-toolsmith/astequal/.travis.yml new file mode 100644 index 000000000..8994d395c --- /dev/null +++ b/vendor/github.com/go-toolsmith/astequal/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.x +install: + - # Prevent default install action "go get -t -v ./...". +script: + - go get -t -v ./... + - go tool vet . + - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astequal/LICENSE b/vendor/github.com/go-toolsmith/astequal/LICENSE new file mode 100644 index 000000000..717f894f5 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astequal/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Iskander Sharipov / Quasilyte + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-toolsmith/astequal/README.md b/vendor/github.com/go-toolsmith/astequal/README.md new file mode 100644 index 000000000..b14f80f6f --- /dev/null +++ b/vendor/github.com/go-toolsmith/astequal/README.md @@ -0,0 +1,67 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astequal)](https://goreportcard.com/report/github.com/go-toolsmith/astequal) +[![GoDoc](https://godoc.org/github.com/go-toolsmith/astequal?status.svg)](https://godoc.org/github.com/go-toolsmith/astequal) +[![Build Status](https://travis-ci.org/go-toolsmith/astequal.svg?branch=master)](https://travis-ci.org/go-toolsmith/astequal) + + +# astequal + +Package astequal provides AST (deep) equallity check operations. + +## Installation: + +```bash +go get github.com/go-toolsmith/astequal +``` + +## Example + +```go +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "log" + "reflect" + + "github.com/go-toolsmith/astequal" +) + +func main() { + const code = ` + package foo + + func main() { + x := []int{1, 2, 3} + x := []int{1, 2, 3} + }` + + fset := token.NewFileSet() + pkg, err := parser.ParseFile(fset, "string", code, 0) + if err != nil { + log.Fatalf("parse error: %+v", err) + } + + fn := pkg.Decls[0].(*ast.FuncDecl) + x := fn.Body.List[0] + y := fn.Body.List[1] + + // Reflect DeepEqual will fail due to different Pos values. + // astequal only checks whether two nodes describe AST. + fmt.Println(reflect.DeepEqual(x, y)) // => false + fmt.Println(astequal.Node(x, y)) // => true + fmt.Println(astequal.Stmt(x, y)) // => true +} +``` + +## Performance + +`astequal` outperforms reflection-based comparison by a big margin: + +``` +BenchmarkEqualExpr/astequal.Expr-8 5000000 298 ns/op 0 B/op 0 allocs/op +BenchmarkEqualExpr/astequal.Node-8 3000000 409 ns/op 0 B/op 0 allocs/op +BenchmarkEqualExpr/reflect.DeepEqual-8 50000 38898 ns/op 10185 B/op 156 allocs/op +``` diff --git a/vendor/github.com/go-toolsmith/astequal/astequal.go b/vendor/github.com/go-toolsmith/astequal/astequal.go new file mode 100644 index 000000000..6a32d7218 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astequal/astequal.go @@ -0,0 +1,734 @@ +// Package astequal provides AST (deep) equallity check operations. +package astequal + +import ( + "go/ast" + "go/token" +) + +// Node reports whether two AST nodes are structurally (deep) equal. +// +// Nil arguments are permitted: true is returned if x and y are both nils. +// +// See also: Expr, Stmt, Decl functions. +func Node(x, y ast.Node) bool { + return astNodeEq(x, y) +} + +// Expr reports whether two AST expressions are structurally (deep) equal. +// +// Nil arguments are permitted: true is returned if x and y are both nils. +// ast.BadExpr comparison always yields false. +func Expr(x, y ast.Expr) bool { + return astExprEq(x, y) +} + +// Stmt reports whether two AST statements are structurally (deep) equal. +// +// Nil arguments are permitted: true is returned if x and y are both nils. +// ast.BadStmt comparison always yields false. +func Stmt(x, y ast.Stmt) bool { + return astStmtEq(x, y) +} + +// Decl reports whether two AST declarations are structurally (deep) equal. +// +// Nil arguments are permitted: true is returned if x and y are both nils. +// ast.BadDecl comparison always yields false. +func Decl(x, y ast.Decl) bool { + return astDeclEq(x, y) +} + +// Functions to perform deep equallity checks between arbitrary AST nodes. + +// Compare interface node types. +// +// Interfaces, as well as their values, can be nil. +// +// Even if AST does expect field X to be mandatory, +// nil checks are required as nodes can be constructed +// manually, or be partially invalid/incomplete. + +func astNodeEq(x, y ast.Node) bool { + switch x := x.(type) { + case ast.Expr: + y, ok := y.(ast.Expr) + return ok && astExprEq(x, y) + case ast.Stmt: + y, ok := y.(ast.Stmt) + return ok && astStmtEq(x, y) + case ast.Decl: + y, ok := y.(ast.Decl) + return ok && astDeclEq(x, y) + default: + return false + } +} + +func astExprEq(x, y ast.Expr) bool { + if x == nil || y == nil { + return x == y + } + + switch x := x.(type) { + case *ast.Ident: + y, ok := y.(*ast.Ident) + return ok && astIdentEq(x, y) + + case *ast.BasicLit: + y, ok := y.(*ast.BasicLit) + return ok && astBasicLitEq(x, y) + + case *ast.FuncLit: + y, ok := y.(*ast.FuncLit) + return ok && astFuncLitEq(x, y) + + case *ast.CompositeLit: + y, ok := y.(*ast.CompositeLit) + return ok && astCompositeLitEq(x, y) + + case *ast.ParenExpr: + y, ok := y.(*ast.ParenExpr) + return ok && astParenExprEq(x, y) + + case *ast.SelectorExpr: + y, ok := y.(*ast.SelectorExpr) + return ok && astSelectorExprEq(x, y) + + case *ast.IndexExpr: + y, ok := y.(*ast.IndexExpr) + return ok && astIndexExprEq(x, y) + + case *ast.SliceExpr: + y, ok := y.(*ast.SliceExpr) + return ok && astSliceExprEq(x, y) + + case *ast.TypeAssertExpr: + y, ok := y.(*ast.TypeAssertExpr) + return ok && astTypeAssertExprEq(x, y) + + case *ast.CallExpr: + y, ok := y.(*ast.CallExpr) + return ok && astCallExprEq(x, y) + + case *ast.StarExpr: + y, ok := y.(*ast.StarExpr) + return ok && astStarExprEq(x, y) + + case *ast.UnaryExpr: + y, ok := y.(*ast.UnaryExpr) + return ok && astUnaryExprEq(x, y) + + case *ast.BinaryExpr: + y, ok := y.(*ast.BinaryExpr) + return ok && astBinaryExprEq(x, y) + + case *ast.KeyValueExpr: + y, ok := y.(*ast.KeyValueExpr) + return ok && astKeyValueExprEq(x, y) + + case *ast.ArrayType: + y, ok := y.(*ast.ArrayType) + return ok && astArrayTypeEq(x, y) + + case *ast.StructType: + y, ok := y.(*ast.StructType) + return ok && astStructTypeEq(x, y) + + case *ast.FuncType: + y, ok := y.(*ast.FuncType) + return ok && astFuncTypeEq(x, y) + + case *ast.InterfaceType: + y, ok := y.(*ast.InterfaceType) + return ok && astInterfaceTypeEq(x, y) + + case *ast.MapType: + y, ok := y.(*ast.MapType) + return ok && astMapTypeEq(x, y) + + case *ast.ChanType: + y, ok := y.(*ast.ChanType) + return ok && astChanTypeEq(x, y) + + case *ast.Ellipsis: + y, ok := y.(*ast.Ellipsis) + return ok && astEllipsisEq(x, y) + + default: + return false + } +} + +func astStmtEq(x, y ast.Stmt) bool { + if x == nil || y == nil { + return x == y + } + + switch x := x.(type) { + case *ast.ExprStmt: + y, ok := y.(*ast.ExprStmt) + return ok && astExprStmtEq(x, y) + + case *ast.SendStmt: + y, ok := y.(*ast.SendStmt) + return ok && astSendStmtEq(x, y) + + case *ast.IncDecStmt: + y, ok := y.(*ast.IncDecStmt) + return ok && astIncDecStmtEq(x, y) + + case *ast.AssignStmt: + y, ok := y.(*ast.AssignStmt) + return ok && astAssignStmtEq(x, y) + + case *ast.GoStmt: + y, ok := y.(*ast.GoStmt) + return ok && astGoStmtEq(x, y) + + case *ast.DeferStmt: + y, ok := y.(*ast.DeferStmt) + return ok && astDeferStmtEq(x, y) + + case *ast.ReturnStmt: + y, ok := y.(*ast.ReturnStmt) + return ok && astReturnStmtEq(x, y) + + case *ast.BranchStmt: + y, ok := y.(*ast.BranchStmt) + return ok && astBranchStmtEq(x, y) + + case *ast.BlockStmt: + y, ok := y.(*ast.BlockStmt) + return ok && astBlockStmtEq(x, y) + + case *ast.IfStmt: + y, ok := y.(*ast.IfStmt) + return ok && astIfStmtEq(x, y) + + case *ast.CaseClause: + y, ok := y.(*ast.CaseClause) + return ok && astCaseClauseEq(x, y) + + case *ast.SwitchStmt: + y, ok := y.(*ast.SwitchStmt) + return ok && astSwitchStmtEq(x, y) + + case *ast.TypeSwitchStmt: + y, ok := y.(*ast.TypeSwitchStmt) + return ok && astTypeSwitchStmtEq(x, y) + + case *ast.CommClause: + y, ok := y.(*ast.CommClause) + return ok && astCommClauseEq(x, y) + + case *ast.SelectStmt: + y, ok := y.(*ast.SelectStmt) + return ok && astSelectStmtEq(x, y) + + case *ast.ForStmt: + y, ok := y.(*ast.ForStmt) + return ok && astForStmtEq(x, y) + + case *ast.RangeStmt: + y, ok := y.(*ast.RangeStmt) + return ok && astRangeStmtEq(x, y) + + case *ast.DeclStmt: + y, ok := y.(*ast.DeclStmt) + return ok && astDeclStmtEq(x, y) + + case *ast.LabeledStmt: + y, ok := y.(*ast.LabeledStmt) + return ok && astLabeledStmtEq(x, y) + + case *ast.EmptyStmt: + y, ok := y.(*ast.EmptyStmt) + return ok && astEmptyStmtEq(x, y) + + default: + return false + } +} + +func astDeclEq(x, y ast.Decl) bool { + if x == nil || y == nil { + return x == y + } + + switch x := x.(type) { + case *ast.GenDecl: + y, ok := y.(*ast.GenDecl) + return ok && astGenDeclEq(x, y) + + case *ast.FuncDecl: + y, ok := y.(*ast.FuncDecl) + return ok && astFuncDeclEq(x, y) + + default: + return false + } +} + +// Compare concrete nodes for equallity. +// +// Any node of pointer type permitted to be nil, +// hence nil checks are mandatory. + +func astIdentEq(x, y *ast.Ident) bool { + if x == nil || y == nil { + return x == y + } + return x.Name == y.Name +} + +func astKeyValueExprEq(x, y *ast.KeyValueExpr) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.Key, y.Key) && astExprEq(x.Value, y.Value) +} + +func astArrayTypeEq(x, y *ast.ArrayType) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.Len, y.Len) && astExprEq(x.Elt, y.Elt) +} + +func astStructTypeEq(x, y *ast.StructType) bool { + if x == nil || y == nil { + return x == y + } + return astFieldListEq(x.Fields, y.Fields) +} + +func astFuncTypeEq(x, y *ast.FuncType) bool { + if x == nil || y == nil { + return x == y + } + return astFieldListEq(x.Params, y.Params) && + astFieldListEq(x.Results, y.Results) +} + +func astBasicLitEq(x, y *ast.BasicLit) bool { + if x == nil || y == nil { + return x == y + } + return x.Kind == y.Kind && x.Value == y.Value +} + +func astBlockStmtEq(x, y *ast.BlockStmt) bool { + if x == nil || y == nil { + return x == y + } + return astStmtSliceEq(x.List, y.List) +} + +func astFieldEq(x, y *ast.Field) bool { + if x == nil || y == nil { + return x == y + } + return astIdentSliceEq(x.Names, y.Names) && + astExprEq(x.Type, y.Type) +} + +func astFuncLitEq(x, y *ast.FuncLit) bool { + if x == nil || y == nil { + return x == y + } + return astFuncTypeEq(x.Type, y.Type) && + astBlockStmtEq(x.Body, y.Body) +} + +func astCompositeLitEq(x, y *ast.CompositeLit) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.Type, y.Type) && + astExprSliceEq(x.Elts, y.Elts) +} + +func astSelectorExprEq(x, y *ast.SelectorExpr) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.X, y.X) && astIdentEq(x.Sel, y.Sel) +} + +func astIndexExprEq(x, y *ast.IndexExpr) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.X, y.X) && astExprEq(x.Index, y.Index) +} + +func astSliceExprEq(x, y *ast.SliceExpr) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.X, y.X) && + astExprEq(x.Low, y.Low) && + astExprEq(x.High, y.High) && + astExprEq(x.Max, y.Max) +} + +func astTypeAssertExprEq(x, y *ast.TypeAssertExpr) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.X, y.X) && astExprEq(x.Type, y.Type) +} + +func astInterfaceTypeEq(x, y *ast.InterfaceType) bool { + if x == nil || y == nil { + return x == y + } + return astFieldListEq(x.Methods, y.Methods) +} + +func astMapTypeEq(x, y *ast.MapType) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.Key, y.Key) && astExprEq(x.Value, y.Value) +} + +func astChanTypeEq(x, y *ast.ChanType) bool { + if x == nil || y == nil { + return x == y + } + return x.Dir == y.Dir && astExprEq(x.Value, y.Value) +} + +func astCallExprEq(x, y *ast.CallExpr) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.Fun, y.Fun) && + astExprSliceEq(x.Args, y.Args) && + (x.Ellipsis == 0) == (y.Ellipsis == 0) +} + +func astEllipsisEq(x, y *ast.Ellipsis) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.Elt, y.Elt) +} + +func astUnaryExprEq(x, y *ast.UnaryExpr) bool { + if x == nil || y == nil { + return x == y + } + return x.Op == y.Op && astExprEq(x.X, y.X) +} + +func astBinaryExprEq(x, y *ast.BinaryExpr) bool { + if x == nil || y == nil { + return x == y + } + return x.Op == y.Op && + astExprEq(x.X, y.X) && + astExprEq(x.Y, y.Y) +} + +func astParenExprEq(x, y *ast.ParenExpr) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.X, y.X) +} + +func astStarExprEq(x, y *ast.StarExpr) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.X, y.X) +} + +func astFieldListEq(x, y *ast.FieldList) bool { + if x == nil || y == nil { + return x == y + } + return astFieldSliceEq(x.List, y.List) +} + +func astEmptyStmtEq(x, y *ast.EmptyStmt) bool { + if x == nil || y == nil { + return x == y + } + return x.Implicit == y.Implicit +} + +func astLabeledStmtEq(x, y *ast.LabeledStmt) bool { + if x == nil || y == nil { + return x == y + } + return astIdentEq(x.Label, y.Label) && astStmtEq(x.Stmt, y.Stmt) +} + +func astExprStmtEq(x, y *ast.ExprStmt) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.X, y.X) +} + +func astSendStmtEq(x, y *ast.SendStmt) bool { + if x == nil || y == nil { + return x == y + } + return astExprEq(x.Chan, y.Chan) && astExprEq(x.Value, y.Value) +} + +func astDeclStmtEq(x, y *ast.DeclStmt) bool { + if x == nil || y == nil { + return x == y + } + return astDeclEq(x.Decl, y.Decl) +} + +func astIncDecStmtEq(x, y *ast.IncDecStmt) bool { + if x == nil || y == nil { + return x == y + } + return x.Tok == y.Tok && astExprEq(x.X, y.X) +} + +func astAssignStmtEq(x, y *ast.AssignStmt) bool { + if x == nil || y == nil { + return x == y + } + return x.Tok == y.Tok && + astExprSliceEq(x.Lhs, y.Lhs) && + astExprSliceEq(x.Rhs, y.Rhs) +} + +func astGoStmtEq(x, y *ast.GoStmt) bool { + if x == nil || y == nil { + return x == y + } + return astCallExprEq(x.Call, y.Call) +} + +func astDeferStmtEq(x, y *ast.DeferStmt) bool { + if x == nil || y == nil { + return x == y + } + return astCallExprEq(x.Call, y.Call) +} + +func astReturnStmtEq(x, y *ast.ReturnStmt) bool { + if x == nil || y == nil { + return x == y + } + return astExprSliceEq(x.Results, y.Results) +} + +func astBranchStmtEq(x, y *ast.BranchStmt) bool { + if x == nil || y == nil { + return x == y + } + return x.Tok == y.Tok && astIdentEq(x.Label, y.Label) +} + +func astIfStmtEq(x, y *ast.IfStmt) bool { + if x == nil || y == nil { + return x == y + } + return astStmtEq(x.Init, y.Init) && + astExprEq(x.Cond, y.Cond) && + astBlockStmtEq(x.Body, y.Body) && + astStmtEq(x.Else, y.Else) +} + +func astCaseClauseEq(x, y *ast.CaseClause) bool { + if x == nil || y == nil { + return x == y + } + return astExprSliceEq(x.List, y.List) && + astStmtSliceEq(x.Body, y.Body) +} + +func astSwitchStmtEq(x, y *ast.SwitchStmt) bool { + if x == nil || y == nil { + return x == y + } + return astStmtEq(x.Init, y.Init) && + astExprEq(x.Tag, y.Tag) && + astBlockStmtEq(x.Body, y.Body) +} + +func astTypeSwitchStmtEq(x, y *ast.TypeSwitchStmt) bool { + if x == nil || y == nil { + return x == y + } + return astStmtEq(x.Init, y.Init) && + astStmtEq(x.Assign, y.Assign) && + astBlockStmtEq(x.Body, y.Body) +} + +func astCommClauseEq(x, y *ast.CommClause) bool { + if x == nil || y == nil { + return x == y + } + return astStmtEq(x.Comm, y.Comm) && astStmtSliceEq(x.Body, y.Body) +} + +func astSelectStmtEq(x, y *ast.SelectStmt) bool { + if x == nil || y == nil { + return x == y + } + return astBlockStmtEq(x.Body, y.Body) +} + +func astForStmtEq(x, y *ast.ForStmt) bool { + if x == nil || y == nil { + return x == y + } + return astStmtEq(x.Init, y.Init) && + astExprEq(x.Cond, y.Cond) && + astStmtEq(x.Post, y.Post) && + astBlockStmtEq(x.Body, y.Body) +} + +func astRangeStmtEq(x, y *ast.RangeStmt) bool { + if x == nil || y == nil { + return x == y + } + return x.Tok == y.Tok && + astExprEq(x.Key, y.Key) && + astExprEq(x.Value, y.Value) && + astExprEq(x.X, y.X) && + astBlockStmtEq(x.Body, y.Body) +} + +func astFuncDeclEq(x, y *ast.FuncDecl) bool { + if x == nil || y == nil { + return x == y + } + return astFieldListEq(x.Recv, y.Recv) && + astIdentEq(x.Name, y.Name) && + astFuncTypeEq(x.Type, y.Type) && + astBlockStmtEq(x.Body, y.Body) +} + +func astGenDeclEq(x, y *ast.GenDecl) bool { + if x == nil || y == nil { + return x == y + } + + if x.Tok != y.Tok { + return false + } + if len(x.Specs) != len(y.Specs) { + return false + } + + switch x.Tok { + case token.IMPORT: + for i := range x.Specs { + xspec := x.Specs[i].(*ast.ImportSpec) + yspec := y.Specs[i].(*ast.ImportSpec) + if !astImportSpecEq(xspec, yspec) { + return false + } + } + case token.TYPE: + for i := range x.Specs { + xspec := x.Specs[i].(*ast.TypeSpec) + yspec := y.Specs[i].(*ast.TypeSpec) + if !astTypeSpecEq(xspec, yspec) { + return false + } + } + default: + for i := range x.Specs { + xspec := x.Specs[i].(*ast.ValueSpec) + yspec := y.Specs[i].(*ast.ValueSpec) + if !astValueSpecEq(xspec, yspec) { + return false + } + } + } + + return true +} + +func astImportSpecEq(x, y *ast.ImportSpec) bool { + if x == nil || y == nil { + return x == y + } + return astIdentEq(x.Name, y.Name) && astBasicLitEq(x.Path, y.Path) +} + +func astTypeSpecEq(x, y *ast.TypeSpec) bool { + if x == nil || y == nil { + return x == y + } + return astIdentEq(x.Name, y.Name) && astExprEq(x.Type, y.Type) +} + +func astValueSpecEq(x, y *ast.ValueSpec) bool { + if x == nil || y == nil { + return x == y + } + return astIdentSliceEq(x.Names, y.Names) && + astExprEq(x.Type, y.Type) && + astExprSliceEq(x.Values, y.Values) +} + +// Compare slices for equallity. +// +// Each slice element that has pointer type permitted to be nil, +// hence instead of using adhoc comparison of values, +// equallity functions that are defined above are used. + +func astIdentSliceEq(xs, ys []*ast.Ident) bool { + if len(xs) != len(ys) { + return false + } + for i := range xs { + if !astIdentEq(xs[i], ys[i]) { + return false + } + } + return true +} + +func astFieldSliceEq(xs, ys []*ast.Field) bool { + if len(xs) != len(ys) { + return false + } + for i := range xs { + if !astFieldEq(xs[i], ys[i]) { + return false + } + } + return true +} + +func astStmtSliceEq(xs, ys []ast.Stmt) bool { + if len(xs) != len(ys) { + return false + } + for i := range xs { + if !astStmtEq(xs[i], ys[i]) { + return false + } + } + return true +} + +func astExprSliceEq(xs, ys []ast.Expr) bool { + if len(xs) != len(ys) { + return false + } + for i := range xs { + if !astExprEq(xs[i], ys[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/go-toolsmith/astequal/go.mod b/vendor/github.com/go-toolsmith/astequal/go.mod new file mode 100644 index 000000000..86fa40772 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astequal/go.mod @@ -0,0 +1 @@ +module github.com/go-toolsmith/astequal diff --git a/vendor/github.com/go-toolsmith/astfmt/.travis.yml b/vendor/github.com/go-toolsmith/astfmt/.travis.yml new file mode 100644 index 000000000..c32ac0062 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astfmt/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.x +install: + - # Prevent default install action "go get -t -v ./...". +script: + - go get -t -v ./... + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/go-toolsmith/astfmt/LICENSE b/vendor/github.com/go-toolsmith/astfmt/LICENSE new file mode 100644 index 000000000..eef17180f --- /dev/null +++ b/vendor/github.com/go-toolsmith/astfmt/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 go-toolsmith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-toolsmith/astfmt/README.md b/vendor/github.com/go-toolsmith/astfmt/README.md new file mode 100644 index 000000000..954c92bf4 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astfmt/README.md @@ -0,0 +1,39 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/strparse)](https://goreportcard.com/report/github.com/go-toolsmith/strparse) +[![GoDoc](https://godoc.org/github.com/go-toolsmith/strparse?status.svg)](https://godoc.org/github.com/go-toolsmith/strparse) + + +# astfmt + +Package astfmt implements ast.Node formatting with fmt-like API. + +## Installation + +```bash +go get github.com/go-toolsmith/astfmt +``` + +## Example + +```go +package main + +import ( + "go/token" + "os" + + "github.com/go-toolsmith/astfmt" + "github.com/go-toolsmith/strparse" +) + +func Example() { + x := strparse.Expr(`foo(bar(baz(1+2)))`) + // astfmt functions add %s support for ast.Node arguments. + astfmt.Println(x) // => foo(bar(baz(1 + 2))) + astfmt.Fprintf(os.Stdout, "node=%s\n", x) // => node=foo(bar(baz(1 + 2))) + + // Can use specific file set with printer. + fset := token.NewFileSet() // Suppose this fset is used when parsing + pp := astfmt.NewPrinter(fset) + pp.Println(x) // => foo(bar(baz(1 + 2))) +} +``` diff --git a/vendor/github.com/go-toolsmith/astfmt/astfmt.go b/vendor/github.com/go-toolsmith/astfmt/astfmt.go new file mode 100644 index 000000000..ca993e033 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astfmt/astfmt.go @@ -0,0 +1,111 @@ +// Package astfmt implements `ast.Node` formatting with fmt-like API. +package astfmt + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "io" +) + +// Println calls fmt.Println with additional support of %s format +// for ast.Node arguments. +// +// Uses empty file set for AST printing. +func Println(args ...interface{}) error { + return defaultPrinter.Println(args...) +} + +// Fprintf calls fmt.Fprintf with additional support of %s format +// for ast.Node arguments. +// +// Uses empty file set for AST printing. +func Fprintf(w io.Writer, format string, args ...interface{}) error { + return defaultPrinter.Fprintf(w, format, args...) +} + +// Sprintf calls fmt.Sprintf with additional support of %s format +// for ast.Node arguments. +// +// Uses empty file set for AST printing. +func Sprintf(format string, args ...interface{}) string { + return defaultPrinter.Sprintf(format, args...) +} + +// Sprint calls fmt.Sprint with additional support of %s format +// for ast.Node arguments. +// +// Uses empty file set for AST printing. +func Sprint(args ...interface{}) string { + return defaultPrinter.Sprint(args...) +} + +// NewPrinter returns printer that uses bound file set when printing AST nodes. +func NewPrinter(fset *token.FileSet) *Printer { + return &Printer{fset: fset} +} + +// Printer provides API close to fmt package for printing AST nodes. +// Unlike freestanding functions from this package, it makes it possible +// to associate appropriate file set for better output. +type Printer struct { + fset *token.FileSet +} + +// Println printer method is like Println function, but uses bound file set when printing. +func (p *Printer) Println(args ...interface{}) error { + _, err := fmt.Println(wrapArgs(p.fset, args)...) + return err +} + +// Fprintf printer method is like Fprintf function, but uses bound file set when printing. +func (p *Printer) Fprintf(w io.Writer, format string, args ...interface{}) error { + _, err := fmt.Fprintf(w, format, wrapArgs(p.fset, args)...) + return err +} + +// Sprintf printer method is like Sprintf function, but uses bound file set when printing. +func (p *Printer) Sprintf(format string, args ...interface{}) string { + return fmt.Sprintf(format, wrapArgs(p.fset, args)...) +} + +// Sprint printer method is like Sprint function, but uses bound file set when printing. +func (p *Printer) Sprint(args ...interface{}) string { + return fmt.Sprint(wrapArgs(p.fset, args)...) +} + +// defaultPrinter is used in printing functions like Println. +// Uses empty file set. +var defaultPrinter = NewPrinter(token.NewFileSet()) + +// wrapArgs returns arguments slice with every ast.Node element +// replaced with fmtNode wrapper that supports additional formatting. +func wrapArgs(fset *token.FileSet, args []interface{}) []interface{} { + for i := range args { + if x, ok := args[i].(ast.Node); ok { + args[i] = fmtNode{fset: fset, node: x} + } + } + return args +} + +type fmtNode struct { + fset *token.FileSet + node ast.Node +} + +func (n fmtNode) String() string { + var buf bytes.Buffer + if err := printer.Fprint(&buf, n.fset, n.node); err != nil { + return fmt.Sprintf("%%!s(ast.Node=%s)", err) + } + return buf.String() +} + +func (n fmtNode) GoString() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%#v", n.node) + return buf.String() +} diff --git a/vendor/github.com/go-toolsmith/astfmt/go.mod b/vendor/github.com/go-toolsmith/astfmt/go.mod new file mode 100644 index 000000000..d23db1566 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astfmt/go.mod @@ -0,0 +1,6 @@ +module github.com/go-toolsmith/astfmt + +require ( + github.com/go-toolsmith/astequal v1.0.0 // indirect + github.com/go-toolsmith/strparse v1.0.0 +) diff --git a/vendor/github.com/go-toolsmith/astfmt/go.sum b/vendor/github.com/go-toolsmith/astfmt/go.sum new file mode 100644 index 000000000..aa0857030 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astfmt/go.sum @@ -0,0 +1,4 @@ +github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= diff --git a/vendor/github.com/go-toolsmith/astp/.gitignore b/vendor/github.com/go-toolsmith/astp/.gitignore new file mode 100644 index 000000000..1f6187ecd --- /dev/null +++ b/vendor/github.com/go-toolsmith/astp/.gitignore @@ -0,0 +1,4 @@ +bin +pkg +src/main +tmp \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astp/.travis.yml b/vendor/github.com/go-toolsmith/astp/.travis.yml new file mode 100644 index 000000000..8994d395c --- /dev/null +++ b/vendor/github.com/go-toolsmith/astp/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.x +install: + - # Prevent default install action "go get -t -v ./...". +script: + - go get -t -v ./... + - go tool vet . + - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astp/LICENSE b/vendor/github.com/go-toolsmith/astp/LICENSE new file mode 100644 index 000000000..eef17180f --- /dev/null +++ b/vendor/github.com/go-toolsmith/astp/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 go-toolsmith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-toolsmith/astp/README.md b/vendor/github.com/go-toolsmith/astp/README.md new file mode 100644 index 000000000..7313c6ab8 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astp/README.md @@ -0,0 +1,39 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astp)](https://goreportcard.com/report/github.com/go-toolsmith/astp) +[![GoDoc](https://godoc.org/github.com/go-toolsmith/astp?status.svg)](https://godoc.org/github.com/go-toolsmith/astp) +[![Build Status](https://travis-ci.org/go-toolsmith/astp.svg?branch=master)](https://travis-ci.org/go-toolsmith/astp) + + +# astp + +Package astp provides AST predicates. + +## Installation: + +```bash +go get github.com/go-toolsmith/astp +``` + +## Example + +```go +package main + +import ( + "fmt" + + "github.com/go-toolsmith/astp" + "github.com/go-toolsmith/strparse" +) + +func main() { + if astp.IsIdent(strparse.Expr(`x`)) { + fmt.Println("ident") + } + if astp.IsBlockStmt(strparse.Stmt(`{f()}`)) { + fmt.Println("block stmt") + } + if astp.IsGenDecl(strparse.Decl(`var x int = 10`)) { + fmt.Println("gen decl") + } +} +``` diff --git a/vendor/github.com/go-toolsmith/astp/decl.go b/vendor/github.com/go-toolsmith/astp/decl.go new file mode 100644 index 000000000..4654ad95c --- /dev/null +++ b/vendor/github.com/go-toolsmith/astp/decl.go @@ -0,0 +1,39 @@ +package astp + +import "go/ast" + +// IsDecl reports whether a node is a ast.Decl. +func IsDecl(node ast.Node) bool { + _, ok := node.(ast.Decl) + return ok +} + +// IsFuncDecl reports whether a given ast.Node is a function declaration (*ast.FuncDecl). +func IsFuncDecl(node ast.Node) bool { + _, ok := node.(*ast.FuncDecl) + return ok +} + +// IsGenDecl reports whether a given ast.Node is a generic declaration (*ast.GenDecl). +func IsGenDecl(node ast.Node) bool { + _, ok := node.(*ast.GenDecl) + return ok +} + +// IsImportSpec reports whether a given ast.Node is an import declaration (*ast.ImportSpec). +func IsImportSpec(node ast.Node) bool { + _, ok := node.(*ast.ImportSpec) + return ok +} + +// IsValueSpec reports whether a given ast.Node is a value declaration (*ast.ValueSpec). +func IsValueSpec(node ast.Node) bool { + _, ok := node.(*ast.ValueSpec) + return ok +} + +// IsTypeSpec reports whether a given ast.Node is a type declaration (*ast.TypeSpec). +func IsTypeSpec(node ast.Node) bool { + _, ok := node.(*ast.TypeSpec) + return ok +} diff --git a/vendor/github.com/go-toolsmith/astp/expr.go b/vendor/github.com/go-toolsmith/astp/expr.go new file mode 100644 index 000000000..adf9668ce --- /dev/null +++ b/vendor/github.com/go-toolsmith/astp/expr.go @@ -0,0 +1,141 @@ +package astp + +import "go/ast" + +// IsExpr reports whether a given ast.Node is an expression(ast.Expr). +func IsExpr(node ast.Node) bool { + _, ok := node.(ast.Expr) + return ok +} + +// IsBadExpr reports whether a given ast.Node is a bad expression (*ast.IsBadExpr). +func IsBadExpr(node ast.Node) bool { + _, ok := node.(*ast.BadExpr) + return ok +} + +// IsIdent reports whether a given ast.Node is an identifier (*ast.IsIdent). +func IsIdent(node ast.Node) bool { + _, ok := node.(*ast.Ident) + return ok +} + +// IsEllipsis reports whether a given ast.Node is an `...` (ellipsis) (*ast.IsEllipsis). +func IsEllipsis(node ast.Node) bool { + _, ok := node.(*ast.Ellipsis) + return ok +} + +// IsBasicLit reports whether a given ast.Node is a literal of basic type (*ast.IsBasicLit). +func IsBasicLit(node ast.Node) bool { + _, ok := node.(*ast.BasicLit) + return ok +} + +// IsFuncLit reports whether a given ast.Node is a function literal (*ast.IsFuncLit). +func IsFuncLit(node ast.Node) bool { + _, ok := node.(*ast.FuncLit) + return ok +} + +// IsCompositeLit reports whether a given ast.Node is a composite literal (*ast.IsCompositeLit). +func IsCompositeLit(node ast.Node) bool { + _, ok := node.(*ast.CompositeLit) + return ok +} + +// IsParenExpr reports whether a given ast.Node is a parenthesized expression (*ast.IsParenExpr). +func IsParenExpr(node ast.Node) bool { + _, ok := node.(*ast.ParenExpr) + return ok +} + +// IsSelectorExpr reports whether a given ast.Node is a selector expression (*ast.IsSelectorExpr). +func IsSelectorExpr(node ast.Node) bool { + _, ok := node.(*ast.SelectorExpr) + return ok +} + +// IsIndexExpr reports whether a given ast.Node is an index expression (*ast.IsIndexExpr). +func IsIndexExpr(node ast.Node) bool { + _, ok := node.(*ast.IndexExpr) + return ok +} + +// IsSliceExpr reports whether a given ast.Node is a slice expression (*ast.IsSliceExpr). +func IsSliceExpr(node ast.Node) bool { + _, ok := node.(*ast.SliceExpr) + return ok +} + +// IsTypeAssertExpr reports whether a given ast.Node is a type assert expression (*ast.IsTypeAssertExpr). +func IsTypeAssertExpr(node ast.Node) bool { + _, ok := node.(*ast.TypeAssertExpr) + return ok +} + +// IsCallExpr reports whether a given ast.Node is an expression followed by an argument list (*ast.IsCallExpr). +func IsCallExpr(node ast.Node) bool { + _, ok := node.(*ast.CallExpr) + return ok +} + +// IsStarExpr reports whether a given ast.Node is a star expression(unary "*" or apointer) (*ast.IsStarExpr) +func IsStarExpr(node ast.Node) bool { + _, ok := node.(*ast.StarExpr) + return ok +} + +// IsUnaryExpr reports whether a given ast.Node is a unary expression (*ast.IsUnaryExpr). +func IsUnaryExpr(node ast.Node) bool { + _, ok := node.(*ast.UnaryExpr) + return ok +} + +// IsBinaryExpr reports whether a given ast.Node is a binary expression (*ast.IsBinaryExpr). +func IsBinaryExpr(node ast.Node) bool { + _, ok := node.(*ast.BinaryExpr) + return ok +} + +// IsKeyValueExpr reports whether a given ast.Node is a (key:value) pair (*ast.IsKeyValueExpr). +func IsKeyValueExpr(node ast.Node) bool { + _, ok := node.(*ast.KeyValueExpr) + return ok +} + +// IsArrayType reports whether a given ast.Node is an array or slice type (*ast.IsArrayType). +func IsArrayType(node ast.Node) bool { + _, ok := node.(*ast.ArrayType) + return ok +} + +// IsStructType reports whether a given ast.Node is a struct type (*ast.IsStructType). +func IsStructType(node ast.Node) bool { + _, ok := node.(*ast.StructType) + return ok +} + +// IsFuncType reports whether a given ast.Node is a function type (*ast.IsFuncType). +func IsFuncType(node ast.Node) bool { + _, ok := node.(*ast.FuncType) + return ok +} + +// IsInterfaceType reports whether a given ast.Node is an interface type (*ast.IsInterfaceType). +func IsInterfaceType(node ast.Node) bool { + _, ok := node.(*ast.InterfaceType) + return ok +} + +// IsMapType reports whether a given ast.Node is a map type (*ast.IsMapType). +func IsMapType(node ast.Node) bool { + _, ok := node.(*ast.MapType) + return ok +} + +// IsChanType reports whether a given ast.Node is a channel type (*ast.IsChanType). +func IsChanType(node ast.Node) bool { + _, ok := node.(*ast.ChanType) + return ok +} diff --git a/vendor/github.com/go-toolsmith/astp/go.mod b/vendor/github.com/go-toolsmith/astp/go.mod new file mode 100644 index 000000000..023a09392 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astp/go.mod @@ -0,0 +1,6 @@ +module github.com/go-toolsmith/astp + +require ( + github.com/go-toolsmith/astequal v1.0.0 // indirect + github.com/go-toolsmith/strparse v1.0.0 +) diff --git a/vendor/github.com/go-toolsmith/astp/go.sum b/vendor/github.com/go-toolsmith/astp/go.sum new file mode 100644 index 000000000..aa0857030 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astp/go.sum @@ -0,0 +1,4 @@ +github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= diff --git a/vendor/github.com/go-toolsmith/astp/stmt.go b/vendor/github.com/go-toolsmith/astp/stmt.go new file mode 100644 index 000000000..19645d212 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astp/stmt.go @@ -0,0 +1,135 @@ +package astp + +import "go/ast" + +// IsStmt reports whether a given ast.Node is a statement(ast.Stmt). +func IsStmt(node ast.Node) bool { + _, ok := node.(ast.Stmt) + return ok +} + +// IsBadStmt reports whether a given ast.Node is a bad statement(*ast.BadStmt) +func IsBadStmt(node ast.Node) bool { + _, ok := node.(*ast.BadStmt) + return ok +} + +// IsDeclStmt reports whether a given ast.Node is a declaration statement(*ast.DeclStmt) +func IsDeclStmt(node ast.Node) bool { + _, ok := node.(*ast.DeclStmt) + return ok +} + +// IsEmptyStmt reports whether a given ast.Node is an empty statement(*ast.EmptyStmt) +func IsEmptyStmt(node ast.Node) bool { + _, ok := node.(*ast.EmptyStmt) + return ok +} + +// IsLabeledStmt reports whether a given ast.Node is a label statement(*ast.LabeledStmt) +func IsLabeledStmt(node ast.Node) bool { + _, ok := node.(*ast.LabeledStmt) + return ok +} + +// IsExprStmt reports whether a given ast.Node is an expression statement(*ast.ExprStmt) +func IsExprStmt(node ast.Node) bool { + _, ok := node.(*ast.ExprStmt) + return ok +} + +// IsSendStmt reports whether a given ast.Node is a send to chan statement(*ast.SendStmt) +func IsSendStmt(node ast.Node) bool { + _, ok := node.(*ast.SendStmt) + return ok +} + +// IsIncDecStmt reports whether a given ast.Node is a increment/decrement statement(*ast.IncDecStmt) +func IsIncDecStmt(node ast.Node) bool { + _, ok := node.(*ast.IncDecStmt) + return ok +} + +// IsAssignStmt reports whether a given ast.Node is an assignment statement(*ast.AssignStmt) +func IsAssignStmt(node ast.Node) bool { + _, ok := node.(*ast.AssignStmt) + return ok +} + +// IsGoStmt reports whether a given ast.Node is a go statement(*ast.GoStmt) +func IsGoStmt(node ast.Node) bool { + _, ok := node.(*ast.GoStmt) + return ok +} + +// IsDeferStmt reports whether a given ast.Node is a defer statement(*ast.DeferStmt) +func IsDeferStmt(node ast.Node) bool { + _, ok := node.(*ast.DeferStmt) + return ok +} + +// IsReturnStmt reports whether a given ast.Node is a return statement(*ast.ReturnStmt) +func IsReturnStmt(node ast.Node) bool { + _, ok := node.(*ast.ReturnStmt) + return ok +} + +// IsBranchStmt reports whether a given ast.Node is a branch(goto/continue/break/fallthrough)statement(*ast.BranchStmt) +func IsBranchStmt(node ast.Node) bool { + _, ok := node.(*ast.BranchStmt) + return ok +} + +// IsBlockStmt reports whether a given ast.Node is a block statement(*ast.BlockStmt) +func IsBlockStmt(node ast.Node) bool { + _, ok := node.(*ast.BlockStmt) + return ok +} + +// IsIfStmt reports whether a given ast.Node is an if statement(*ast.IfStmt) +func IsIfStmt(node ast.Node) bool { + _, ok := node.(*ast.IfStmt) + return ok +} + +// IsCaseClause reports whether a given ast.Node is a case statement(*ast.CaseClause) +func IsCaseClause(node ast.Node) bool { + _, ok := node.(*ast.CaseClause) + return ok +} + +// IsSwitchStmt reports whether a given ast.Node is a switch statement(*ast.SwitchStmt) +func IsSwitchStmt(node ast.Node) bool { + _, ok := node.(*ast.SwitchStmt) + return ok +} + +// IsTypeSwitchStmt reports whether a given ast.Node is a type switch statement(*ast.TypeSwitchStmt) +func IsTypeSwitchStmt(node ast.Node) bool { + _, ok := node.(*ast.TypeSwitchStmt) + return ok +} + +// IsCommClause reports whether a given ast.Node is a select statement(*ast.CommClause) +func IsCommClause(node ast.Node) bool { + _, ok := node.(*ast.CommClause) + return ok +} + +// IsSelectStmt reports whether a given ast.Node is a selection statement(*ast.SelectStmt) +func IsSelectStmt(node ast.Node) bool { + _, ok := node.(*ast.SelectStmt) + return ok +} + +// IsForStmt reports whether a given ast.Node is a for statement(*ast.ForStmt) +func IsForStmt(node ast.Node) bool { + _, ok := node.(*ast.ForStmt) + return ok +} + +// IsRangeStmt reports whether a given ast.Node is a range statement(*ast.RangeStmt) +func IsRangeStmt(node ast.Node) bool { + _, ok := node.(*ast.RangeStmt) + return ok +} diff --git a/vendor/github.com/go-toolsmith/strparse/.travis.yml b/vendor/github.com/go-toolsmith/strparse/.travis.yml new file mode 100644 index 000000000..8994d395c --- /dev/null +++ b/vendor/github.com/go-toolsmith/strparse/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.x +install: + - # Prevent default install action "go get -t -v ./...". +script: + - go get -t -v ./... + - go tool vet . + - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/strparse/LICENSE b/vendor/github.com/go-toolsmith/strparse/LICENSE new file mode 100644 index 000000000..eef17180f --- /dev/null +++ b/vendor/github.com/go-toolsmith/strparse/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 go-toolsmith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-toolsmith/strparse/README.md b/vendor/github.com/go-toolsmith/strparse/README.md new file mode 100644 index 000000000..ae80a5398 --- /dev/null +++ b/vendor/github.com/go-toolsmith/strparse/README.md @@ -0,0 +1,34 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/strparse)](https://goreportcard.com/report/github.com/go-toolsmith/strparse) +[![GoDoc](https://godoc.org/github.com/go-toolsmith/strparse?status.svg)](https://godoc.org/github.com/go-toolsmith/strparse) +[![Build Status](https://travis-ci.org/go-toolsmith/strparse.svg?branch=master)](https://travis-ci.org/go-toolsmith/strparse) + + +# strparse + +Package strparse provides convenience wrappers around `go/parser` for simple +expression, statement and declaretion parsing from string. + +## Installation + +```bash +go get github.com/go-toolsmith/strparse +``` + +## Example + +```go +package main + +import ( + "go-toolsmith/astequal" + "go-toolsmith/strparse" +) + +func main() { + // Comparing AST strings for equallity (note different spacing): + x := strparse.Expr(`1 + f(v[0].X)`) + y := strparse.Expr(` 1+f( v[0].X ) `) + fmt.Println(astequal.Expr(x, y)) // => true +} + +``` diff --git a/vendor/github.com/go-toolsmith/strparse/go.mod b/vendor/github.com/go-toolsmith/strparse/go.mod new file mode 100644 index 000000000..ed9d88136 --- /dev/null +++ b/vendor/github.com/go-toolsmith/strparse/go.mod @@ -0,0 +1 @@ +module github.com/go-toolsmith/strparse diff --git a/vendor/github.com/go-toolsmith/strparse/strparse.go b/vendor/github.com/go-toolsmith/strparse/strparse.go new file mode 100644 index 000000000..894c7ebac --- /dev/null +++ b/vendor/github.com/go-toolsmith/strparse/strparse.go @@ -0,0 +1,59 @@ +// Package strparse provides convenience wrappers around `go/parser` for simple +// expression, statement and declaration parsing from string. +// +// Can be used to construct AST nodes using source syntax. +package strparse + +import ( + "go/ast" + "go/parser" + "go/token" +) + +var ( + // BadExpr is returned as a parse result for malformed expressions. + // Should be treated as constant or readonly variable. + BadExpr = &ast.BadExpr{} + + // BadStmt is returned as a parse result for malformed statmenents. + // Should be treated as constant or readonly variable. + BadStmt = &ast.BadStmt{} + + // BadDecl is returned as a parse result for malformed declarations. + // Should be treated as constant or readonly variable. + BadDecl = &ast.BadDecl{} +) + +// Expr parses single expression node from s. +// In case of parse error, BadExpr is returned. +func Expr(s string) ast.Expr { + node, err := parser.ParseExpr(s) + if err != nil { + return BadExpr + } + return node +} + +// Stmt parses single statement node from s. +// In case of parse error, BadStmt is returned. +func Stmt(s string) ast.Stmt { + node, err := parser.ParseFile(token.NewFileSet(), "", "package main;func main() {"+s+"}", 0) + if err != nil { + return BadStmt + } + fn := node.Decls[0].(*ast.FuncDecl) + if len(fn.Body.List) != 1 { + return BadStmt + } + return fn.Body.List[0] +} + +// Decl parses single declaration node from s. +// In case of parse error, BadDecl is returned. +func Decl(s string) ast.Decl { + node, err := parser.ParseFile(token.NewFileSet(), "", "package main;"+s, 0) + if err != nil || len(node.Decls) != 1 { + return BadDecl + } + return node.Decls[0] +} diff --git a/vendor/github.com/go-toolsmith/typep/.travis.yml b/vendor/github.com/go-toolsmith/typep/.travis.yml new file mode 100644 index 000000000..d3ff3cca8 --- /dev/null +++ b/vendor/github.com/go-toolsmith/typep/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.x +install: + - # Prevent default install action "go get -t -v ./...". +script: + - go get -t -v ./... + - go vet ./... + - go test -v -race ./... diff --git a/vendor/github.com/go-toolsmith/typep/LICENSE b/vendor/github.com/go-toolsmith/typep/LICENSE new file mode 100644 index 000000000..eef17180f --- /dev/null +++ b/vendor/github.com/go-toolsmith/typep/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 go-toolsmith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-toolsmith/typep/README.md b/vendor/github.com/go-toolsmith/typep/README.md new file mode 100644 index 000000000..f7979148f --- /dev/null +++ b/vendor/github.com/go-toolsmith/typep/README.md @@ -0,0 +1,37 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/typep)](https://goreportcard.com/report/github.com/go-toolsmith/typep) +[![GoDoc](https://godoc.org/github.com/go-toolsmith/typep?status.svg)](https://godoc.org/github.com/go-toolsmith/typep) +[![Build Status](https://travis-ci.org/go-toolsmith/typep.svg?branch=master)](https://travis-ci.org/go-toolsmith/typep) + +# typep + +Package typep provides type predicates. + +## Installation: + +```bash +go get -v github.com/go-toolsmith/typep +``` + +## Example + +```go +package main + +import ( + "fmt" + + "github.com/go-toolsmith/typep" + "github.com/go-toolsmith/strparse" +) + +func main() { + floatTyp := types.Typ[types.Float32] + intTyp := types.Typ[types.Int] + ptr := types.NewPointer(intTyp) + arr := types.NewArray(intTyp, 64) + fmt.Println(typep.HasFloatProp(floatTyp)) // => true + fmt.Println(typep.HasFloatProp(intTyp)) // => false + fmt.Println(typep.IsPointer(ptr)) // => true + fmt.Println(typep.IsArray(arr)) // => true +} +``` diff --git a/vendor/github.com/go-toolsmith/typep/doc.go b/vendor/github.com/go-toolsmith/typep/doc.go new file mode 100644 index 000000000..990bc402c --- /dev/null +++ b/vendor/github.com/go-toolsmith/typep/doc.go @@ -0,0 +1,2 @@ +// Package typep provides type predicates. +package typep diff --git a/vendor/github.com/go-toolsmith/typep/go.mod b/vendor/github.com/go-toolsmith/typep/go.mod new file mode 100644 index 000000000..197a57d30 --- /dev/null +++ b/vendor/github.com/go-toolsmith/typep/go.mod @@ -0,0 +1 @@ +module github.com/go-toolsmith/typep diff --git a/vendor/github.com/go-toolsmith/typep/predicates.go b/vendor/github.com/go-toolsmith/typep/predicates.go new file mode 100644 index 000000000..b07325a72 --- /dev/null +++ b/vendor/github.com/go-toolsmith/typep/predicates.go @@ -0,0 +1,36 @@ +package typep + +import ( + "go/ast" + "go/types" +) + +// IsTypeExpr reports whether x represents a type expression. +// +// Type expression does not evaluate to any run time value, +// but rather describes a type that is used inside Go expression. +// +// For example, (*T)(v) is a CallExpr that "calls" (*T). +// (*T) is a type expression that tells Go compiler type v should be converted to. +func IsTypeExpr(info *types.Info, x ast.Expr) bool { + switch x := x.(type) { + case *ast.StarExpr: + return IsTypeExpr(info, x.X) + case *ast.ParenExpr: + return IsTypeExpr(info, x.X) + case *ast.SelectorExpr: + return IsTypeExpr(info, x.Sel) + + case *ast.Ident: + // Identifier may be a type expression if object + // it reffers to is a type name. + _, ok := info.ObjectOf(x).(*types.TypeName) + return ok + + case *ast.FuncType, *ast.StructType, *ast.InterfaceType, *ast.ArrayType, *ast.MapType, *ast.ChanType: + return true + + default: + return false + } +} diff --git a/vendor/github.com/go-toolsmith/typep/safeExpr.go b/vendor/github.com/go-toolsmith/typep/safeExpr.go new file mode 100644 index 000000000..d5835d97b --- /dev/null +++ b/vendor/github.com/go-toolsmith/typep/safeExpr.go @@ -0,0 +1,73 @@ +package typep + +import ( + "go/ast" + "go/token" + "go/types" +) + +// SideEffectFree reports whether expr is softly safe expression and contains +// no significant side-effects. As opposed to strictly safe expressions, +// soft safe expressions permit some forms of side-effects, like +// panic possibility during indexing or nil pointer dereference. +// +// Uses types info to determine type conversion expressions that +// are the only permitted kinds of call expressions. +// Note that is does not check whether called function really +// has any side effects. The analysis is very conservative. +func SideEffectFree(info *types.Info, expr ast.Expr) bool { + // This list switch is not comprehensive and uses + // whitelist to be on the conservative side. + // Can be extended as needed. + + if expr == nil { + return true + } + + switch expr := expr.(type) { + case *ast.StarExpr: + return SideEffectFree(info, expr.X) + case *ast.BinaryExpr: + return SideEffectFree(info, expr.X) && + SideEffectFree(info, expr.Y) + case *ast.UnaryExpr: + return expr.Op != token.ARROW && + SideEffectFree(info, expr.X) + case *ast.BasicLit, *ast.Ident: + return true + case *ast.SliceExpr: + return SideEffectFree(info, expr.X) && + SideEffectFree(info, expr.Low) && + SideEffectFree(info, expr.High) && + SideEffectFree(info, expr.Max) + case *ast.IndexExpr: + return SideEffectFree(info, expr.X) && + SideEffectFree(info, expr.Index) + case *ast.SelectorExpr: + return SideEffectFree(info, expr.X) + case *ast.ParenExpr: + return SideEffectFree(info, expr.X) + case *ast.TypeAssertExpr: + return SideEffectFree(info, expr.X) + case *ast.CompositeLit: + return SideEffectFreeList(info, expr.Elts) + case *ast.CallExpr: + return IsTypeExpr(info, expr.Fun) && + SideEffectFreeList(info, expr.Args) + + default: + return false + } +} + +// SideEffectFreeList reports whether every expr in list is safe. +// +// See SideEffectFree. +func SideEffectFreeList(info *types.Info, list []ast.Expr) bool { + for _, expr := range list { + if !SideEffectFree(info, expr) { + return false + } + } + return true +} diff --git a/vendor/github.com/go-toolsmith/typep/simplePredicates.go b/vendor/github.com/go-toolsmith/typep/simplePredicates.go new file mode 100644 index 000000000..3bc9c29c8 --- /dev/null +++ b/vendor/github.com/go-toolsmith/typep/simplePredicates.go @@ -0,0 +1,359 @@ +// Code generated by simplePredicates_generate.go; DO NOT EDIT + +package typep + +import ( + "go/types" +) + +// Simple 1-to-1 type predicates via type assertion. + +// IsBasic reports whether a given type has *types.Basic type. +func IsBasic(typ types.Type) bool { + _, ok := typ.(*types.Basic) + return ok +} + +// IsArray reports whether a given type has *types.Array type. +func IsArray(typ types.Type) bool { + _, ok := typ.(*types.Array) + return ok +} + +// IsSlice reports whether a given type has *types.Slice type. +func IsSlice(typ types.Type) bool { + _, ok := typ.(*types.Slice) + return ok +} + +// IsStruct reports whether a given type has *types.Struct type. +func IsStruct(typ types.Type) bool { + _, ok := typ.(*types.Struct) + return ok +} + +// IsPointer reports whether a given type has *types.Pointer type. +func IsPointer(typ types.Type) bool { + _, ok := typ.(*types.Pointer) + return ok +} + +// IsTuple reports whether a given type has *types.Tuple type. +func IsTuple(typ types.Type) bool { + _, ok := typ.(*types.Tuple) + return ok +} + +// IsSignature reports whether a given type has *types.Signature type. +func IsSignature(typ types.Type) bool { + _, ok := typ.(*types.Signature) + return ok +} + +// IsInterface reports whether a given type has *types.Interface type. +func IsInterface(typ types.Type) bool { + _, ok := typ.(*types.Interface) + return ok +} + +// IsMap reports whether a given type has *types.Map type. +func IsMap(typ types.Type) bool { + _, ok := typ.(*types.Map) + return ok +} + +// IsChan reports whether a given type has *types.Chan type. +func IsChan(typ types.Type) bool { + _, ok := typ.(*types.Chan) + return ok +} + +// IsNamed reports whether a given type has *types.Named type. +func IsNamed(typ types.Type) bool { + _, ok := typ.(*types.Named) + return ok +} + +// *types.Basic predicates for the info field. + +// HasBooleanProp reports whether typ is a *types.Basic has IsBoolean property. +func HasBooleanProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsBoolean != 0 + } + return false +} + +// HasIntegerProp reports whether typ is a *types.Basic has IsInteger property. +func HasIntegerProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsInteger != 0 + } + return false +} + +// HasUnsignedProp reports whether typ is a *types.Basic has IsUnsigned property. +func HasUnsignedProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsUnsigned != 0 + } + return false +} + +// HasFloatProp reports whether typ is a *types.Basic has IsFloat property. +func HasFloatProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsFloat != 0 + } + return false +} + +// HasComplexProp reports whether typ is a *types.Basic has IsComplex property. +func HasComplexProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsComplex != 0 + } + return false +} + +// HasStringProp reports whether typ is a *types.Basic has IsString property. +func HasStringProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsString != 0 + } + return false +} + +// HasUntypedProp reports whether typ is a *types.Basic has IsUntyped property. +func HasUntypedProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsUntyped != 0 + } + return false +} + +// HasOrderedProp reports whether typ is a *types.Basic has IsOrdered property. +func HasOrderedProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsOrdered != 0 + } + return false +} + +// HasNumericProp reports whether typ is a *types.Basic has IsNumeric property. +func HasNumericProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsNumeric != 0 + } + return false +} + +// HasConstTypeProp reports whether typ is a *types.Basic has IsConstType property. +func HasConstTypeProp(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Info()&types.IsConstType != 0 + } + return false +} + +// *types.Basic predicates for the kind field. + +// HasBoolKind reports whether typ is a *types.Basic with its kind set to types.Bool. +func HasBoolKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Bool + } + return false +} + +// HasIntKind reports whether typ is a *types.Basic with its kind set to types.Int. +func HasIntKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Int + } + return false +} + +// HasInt8Kind reports whether typ is a *types.Basic with its kind set to types.Int8. +func HasInt8Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Int8 + } + return false +} + +// HasInt16Kind reports whether typ is a *types.Basic with its kind set to types.Int16. +func HasInt16Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Int16 + } + return false +} + +// HasInt32Kind reports whether typ is a *types.Basic with its kind set to types.Int32. +func HasInt32Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Int32 + } + return false +} + +// HasInt64Kind reports whether typ is a *types.Basic with its kind set to types.Int64. +func HasInt64Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Int64 + } + return false +} + +// HasUintKind reports whether typ is a *types.Basic with its kind set to types.Uint. +func HasUintKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uint + } + return false +} + +// HasUint8Kind reports whether typ is a *types.Basic with its kind set to types.Uint8. +func HasUint8Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uint8 + } + return false +} + +// HasUint16Kind reports whether typ is a *types.Basic with its kind set to types.Uint16. +func HasUint16Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uint16 + } + return false +} + +// HasUint32Kind reports whether typ is a *types.Basic with its kind set to types.Uint32. +func HasUint32Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uint32 + } + return false +} + +// HasUint64Kind reports whether typ is a *types.Basic with its kind set to types.Uint64. +func HasUint64Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uint64 + } + return false +} + +// HasUintptrKind reports whether typ is a *types.Basic with its kind set to types.Uintptr. +func HasUintptrKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Uintptr + } + return false +} + +// HasFloat32Kind reports whether typ is a *types.Basic with its kind set to types.Float32. +func HasFloat32Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Float32 + } + return false +} + +// HasFloat64Kind reports whether typ is a *types.Basic with its kind set to types.Float64. +func HasFloat64Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Float64 + } + return false +} + +// HasComplex64Kind reports whether typ is a *types.Basic with its kind set to types.Complex64. +func HasComplex64Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Complex64 + } + return false +} + +// HasComplex128Kind reports whether typ is a *types.Basic with its kind set to types.Complex128. +func HasComplex128Kind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.Complex128 + } + return false +} + +// HasStringKind reports whether typ is a *types.Basic with its kind set to types.String. +func HasStringKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.String + } + return false +} + +// HasUnsafePointerKind reports whether typ is a *types.Basic with its kind set to types.UnsafePointer. +func HasUnsafePointerKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UnsafePointer + } + return false +} + +// HasUntypedBoolKind reports whether typ is a *types.Basic with its kind set to types.UntypedBool. +func HasUntypedBoolKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedBool + } + return false +} + +// HasUntypedIntKind reports whether typ is a *types.Basic with its kind set to types.UntypedInt. +func HasUntypedIntKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedInt + } + return false +} + +// HasUntypedRuneKind reports whether typ is a *types.Basic with its kind set to types.UntypedRune. +func HasUntypedRuneKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedRune + } + return false +} + +// HasUntypedFloatKind reports whether typ is a *types.Basic with its kind set to types.UntypedFloat. +func HasUntypedFloatKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedFloat + } + return false +} + +// HasUntypedComplexKind reports whether typ is a *types.Basic with its kind set to types.UntypedComplex. +func HasUntypedComplexKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedComplex + } + return false +} + +// HasUntypedStringKind reports whether typ is a *types.Basic with its kind set to types.UntypedString. +func HasUntypedStringKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedString + } + return false +} + +// HasUntypedNilKind reports whether typ is a *types.Basic with its kind set to types.UntypedNil. +func HasUntypedNilKind(typ types.Type) bool { + if typ, ok := typ.(*types.Basic); ok { + return typ.Kind() == types.UntypedNil + } + return false +} diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE b/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE new file mode 100644 index 000000000..890776ab7 --- /dev/null +++ b/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 go-xmlfmt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/README.md b/vendor/github.com/go-xmlfmt/xmlfmt/README.md new file mode 100644 index 000000000..4eb6d69a0 --- /dev/null +++ b/vendor/github.com/go-xmlfmt/xmlfmt/README.md @@ -0,0 +1,178 @@ +# Go XML Formatter + +[![MIT License](http://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) +[![Go Doc](https://img.shields.io/badge/godoc-reference-4b68a3.svg)](https://godoc.org/github.com/go-xmlfmt/xmlfmt) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-xmlfmt/xmlfmt)](https://goreportcard.com/report/github.com/go-xmlfmt/xmlfmt) +[![Codeship Status](https://codeship.com/projects/c49f02b0-a384-0134-fb20-2e0351080565/status?branch=master)](https://codeship.com/projects/190297) + +## Synopsis + +The Go XML Formatter, xmlfmt, will format the XML string in a readable way. + +```go +package main + +import "github.com/go-xmlfmt/xmlfmt" + +func main() { + xml1 := `aSome org-or-otherWouldnt you like to knowPatCalifia` + x := xmlfmt.FormatXML(xml1, "\t", " ") + print(x) +} + +``` + +Output: + +```xml + + + a + + + + + + Some org-or-other + + Wouldnt you like to know + + + + Pat + + Califia + + + + + +``` + +There is no XML decoding and encoding involved, only pure regular expression matching and replacing. So it is much faster than going through decoding and encoding procedures. Moreover, the exact XML source string is preserved, instead of being changed by the encoder. This is why this package exists in the first place. + +## Command + +To use it on command line, check out [xmlfmt](https://github.com/AntonioSun/xmlfmt): + + +``` +$ xmlfmt +XML Formatter +built on 2019-12-08 + +The xmlfmt will format the XML string without rewriting the document + +Options: + + -h, --help display help information + -f, --file *The xml file to read from (or stdin) + -p, --prefix each element begins on a new line and this prefix + -i, --indent[= ] indent string for nested elements +``` + + +## Justification + +### The format + +The Go XML Formatter is not called XML Beautifier because the result is not *exactly* as what people would expect -- some, but not all, closing tags stays on the same line, just as shown above. Having been looking at the result and thinking over it, I now think it is actually a better way to present it, as those closing tags on the same line are better stay that way in my opinion. I.e., + +When it comes to very big XML strings, which is what I’m dealing every day, saving spaces by not allowing those closing tags taking extra lines is plus instead of negative to me. + +### The alternative + +To format it “properly”, i.e., as what people would normally see, is very hard using pure regular expression. In fact, according to Sam Whited from the go-nuts mlist, + +> Regular expression is, well, regular. This means that they can parse regular grammars, but can't parse context free grammars (like XML). It is actually impossible to use a regex to do this task; it will always be fragile, unfortunately. + +So if the output format is so important to you, then unfortunately you have to go through decoding and encoding procedures. But there are some drawbacks as well, as put by James McGill, in http://stackoverflow.com/questions/21117161, besides such method being slow: + +> I like this solution, but am still in search of a Golang XML formatter/prettyprinter that doesn't rewrite the document (other than formatting whitespace). Marshalling or using the Encoder will change namespace declarations. +> +> For example an element like "< ns1:Element />" will be translated to something like '< Element xmlns="http://bla...bla/ns1" >< /Element >' which seems harmless enough except when the intent is to not alter the xml other than formatting. -- James McGill Nov 12 '15 + +Using Sam's code as an example, + +https://play.golang.org/p/JUqQY3WpW5 + +The above code formats the following XML + +```xml + + + + + + 123 + John Brown + + + + +``` + +into this: + +```xml + +
+ + + + 123 + John Brown + + + +
+``` + +I know they are syntactically the same, however the problem is that they *look* totally different. + +That's why there is this package, an XML Beautifier that doesn't rewrite the document. + +## Credit + +The credit goes to **diotalevi** from his post at http://www.perlmonks.org/?node_id=261292. + +However, it does not work for all cases. For example, + +```sh +$ echo '
123John Brown
' | perl -pe 's/(?<=>)\s+(?=<)//g; s(<(/?)([^/>]+)(/?)>\s*(?=(".($1&&($4 eq"
+123 +John Brown + + + + +``` + +I simplified the algorithm, and now it should work for all cases: + +```sh +echo '
123John Brown
' | perl -pe 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge' +``` +```xml + +
+
+ + + + + 123 + + John Brown + + + +
+``` + +This package is a direct translate from above Perl code into Go, +then further enhanced by @ruandao. diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go b/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go new file mode 100644 index 000000000..b744f5b35 --- /dev/null +++ b/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go @@ -0,0 +1,56 @@ +//////////////////////////////////////////////////////////////////////////// +// Porgram: xmlfmt.go +// Purpose: Go XML Beautify from XML string using pure string manipulation +// Authors: Antonio Sun (c) 2016-2019, All rights reserved +//////////////////////////////////////////////////////////////////////////// + +package xmlfmt + +import ( + "regexp" + "strings" +) + +var ( + reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`) + // NL is the newline string used in XML output, define for DOS-convenient. + NL = "\r\n" +) + +// FormatXML will (purly) reformat the XML string in a readable way, without any rewriting/altering the structure +func FormatXML(xmls, prefix, indent string) string { + src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><") + + rf := replaceTag(prefix, indent) + return (prefix + reg.ReplaceAllStringFunc(src, rf)) +} + +// replaceTag returns a closure function to do 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+?)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge' as in Perl +// and deal with comments as well +func replaceTag(prefix, indent string) func(string) string { + indentLevel := 0 + return func(m string) string { + // head elem + if strings.HasPrefix(m, "") { + return NL + prefix + strings.Repeat(indent, indentLevel) + m + } + // comment elem + if strings.HasPrefix(m, " "${filename}" -benchmem + echo "OK" + git checkout ${backup} + sleep 5 + fi +} + + +to=$1 +current=`git rev-parse --abbrev-ref HEAD` + +bench ${to} $2 +bench ${current} $2 + +benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench" diff --git a/vendor/github.com/gobwas/glob/compiler/compiler.go b/vendor/github.com/gobwas/glob/compiler/compiler.go new file mode 100644 index 000000000..02e7de80a --- /dev/null +++ b/vendor/github.com/gobwas/glob/compiler/compiler.go @@ -0,0 +1,525 @@ +package compiler + +// TODO use constructor with all matchers, and to their structs private +// TODO glue multiple Text nodes (like after QuoteMeta) + +import ( + "fmt" + "reflect" + + "github.com/gobwas/glob/match" + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/util/runes" +) + +func optimizeMatcher(matcher match.Matcher) match.Matcher { + switch m := matcher.(type) { + + case match.Any: + if len(m.Separators) == 0 { + return match.NewSuper() + } + + case match.AnyOf: + if len(m.Matchers) == 1 { + return m.Matchers[0] + } + + return m + + case match.List: + if m.Not == false && len(m.List) == 1 { + return match.NewText(string(m.List)) + } + + return m + + case match.BTree: + m.Left = optimizeMatcher(m.Left) + m.Right = optimizeMatcher(m.Right) + + r, ok := m.Value.(match.Text) + if !ok { + return m + } + + var ( + leftNil = m.Left == nil + rightNil = m.Right == nil + ) + if leftNil && rightNil { + return match.NewText(r.Str) + } + + _, leftSuper := m.Left.(match.Super) + lp, leftPrefix := m.Left.(match.Prefix) + la, leftAny := m.Left.(match.Any) + + _, rightSuper := m.Right.(match.Super) + rs, rightSuffix := m.Right.(match.Suffix) + ra, rightAny := m.Right.(match.Any) + + switch { + case leftSuper && rightSuper: + return match.NewContains(r.Str, false) + + case leftSuper && rightNil: + return match.NewSuffix(r.Str) + + case rightSuper && leftNil: + return match.NewPrefix(r.Str) + + case leftNil && rightSuffix: + return match.NewPrefixSuffix(r.Str, rs.Suffix) + + case rightNil && leftPrefix: + return match.NewPrefixSuffix(lp.Prefix, r.Str) + + case rightNil && leftAny: + return match.NewSuffixAny(r.Str, la.Separators) + + case leftNil && rightAny: + return match.NewPrefixAny(r.Str, ra.Separators) + } + + return m + } + + return matcher +} + +func compileMatchers(matchers []match.Matcher) (match.Matcher, error) { + if len(matchers) == 0 { + return nil, fmt.Errorf("compile error: need at least one matcher") + } + if len(matchers) == 1 { + return matchers[0], nil + } + if m := glueMatchers(matchers); m != nil { + return m, nil + } + + idx := -1 + maxLen := -1 + var val match.Matcher + for i, matcher := range matchers { + if l := matcher.Len(); l != -1 && l >= maxLen { + maxLen = l + idx = i + val = matcher + } + } + + if val == nil { // not found matcher with static length + r, err := compileMatchers(matchers[1:]) + if err != nil { + return nil, err + } + return match.NewBTree(matchers[0], nil, r), nil + } + + left := matchers[:idx] + var right []match.Matcher + if len(matchers) > idx+1 { + right = matchers[idx+1:] + } + + var l, r match.Matcher + var err error + if len(left) > 0 { + l, err = compileMatchers(left) + if err != nil { + return nil, err + } + } + + if len(right) > 0 { + r, err = compileMatchers(right) + if err != nil { + return nil, err + } + } + + return match.NewBTree(val, l, r), nil +} + +func glueMatchers(matchers []match.Matcher) match.Matcher { + if m := glueMatchersAsEvery(matchers); m != nil { + return m + } + if m := glueMatchersAsRow(matchers); m != nil { + return m + } + return nil +} + +func glueMatchersAsRow(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + c []match.Matcher + l int + ) + for _, matcher := range matchers { + if ml := matcher.Len(); ml == -1 { + return nil + } else { + c = append(c, matcher) + l += ml + } + } + return match.NewRow(l, c...) +} + +func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + hasAny bool + hasSuper bool + hasSingle bool + min int + separator []rune + ) + + for i, matcher := range matchers { + var sep []rune + + switch m := matcher.(type) { + case match.Super: + sep = []rune{} + hasSuper = true + + case match.Any: + sep = m.Separators + hasAny = true + + case match.Single: + sep = m.Separators + hasSingle = true + min++ + + case match.List: + if !m.Not { + return nil + } + sep = m.List + hasSingle = true + min++ + + default: + return nil + } + + // initialize + if i == 0 { + separator = sep + } + + if runes.Equal(sep, separator) { + continue + } + + return nil + } + + if hasSuper && !hasAny && !hasSingle { + return match.NewSuper() + } + + if hasAny && !hasSuper && !hasSingle { + return match.NewAny(separator) + } + + if (hasAny || hasSuper) && min > 0 && len(separator) == 0 { + return match.NewMin(min) + } + + every := match.NewEveryOf() + + if min > 0 { + every.Add(match.NewMin(min)) + + if !hasAny && !hasSuper { + every.Add(match.NewMax(min)) + } + } + + if len(separator) > 0 { + every.Add(match.NewContains(string(separator), true)) + } + + return every +} + +func minimizeMatchers(matchers []match.Matcher) []match.Matcher { + var done match.Matcher + var left, right, count int + + for l := 0; l < len(matchers); l++ { + for r := len(matchers); r > l; r-- { + if glued := glueMatchers(matchers[l:r]); glued != nil { + var swap bool + + if done == nil { + swap = true + } else { + cl, gl := done.Len(), glued.Len() + swap = cl > -1 && gl > -1 && gl > cl + swap = swap || count < r-l + } + + if swap { + done = glued + left = l + right = r + count = r - l + } + } + } + } + + if done == nil { + return matchers + } + + next := append(append([]match.Matcher{}, matchers[:left]...), done) + if right < len(matchers) { + next = append(next, matchers[right:]...) + } + + if len(next) == len(matchers) { + return next + } + + return minimizeMatchers(next) +} + +// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree +func minimizeTree(tree *ast.Node) *ast.Node { + switch tree.Kind { + case ast.KindAnyOf: + return minimizeTreeAnyOf(tree) + default: + return nil + } +} + +// minimizeAnyOf tries to find common children of given node of AnyOf pattern +// it searches for common children from left and from right +// if any common children are found – then it returns new optimized ast tree +// else it returns nil +func minimizeTreeAnyOf(tree *ast.Node) *ast.Node { + if !areOfSameKind(tree.Children, ast.KindPattern) { + return nil + } + + commonLeft, commonRight := commonChildren(tree.Children) + commonLeftCount, commonRightCount := len(commonLeft), len(commonRight) + if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts + return nil + } + + var result []*ast.Node + if commonLeftCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...)) + } + + var anyOf []*ast.Node + for _, child := range tree.Children { + reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount] + var node *ast.Node + if len(reuse) == 0 { + // this pattern is completely reduced by commonLeft and commonRight patterns + // so it become nothing + node = ast.NewNode(ast.KindNothing, nil) + } else { + node = ast.NewNode(ast.KindPattern, nil, reuse...) + } + anyOf = appendIfUnique(anyOf, node) + } + switch { + case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing: + result = append(result, anyOf[0]) + case len(anyOf) > 1: + result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...)) + } + + if commonRightCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...)) + } + + return ast.NewNode(ast.KindPattern, nil, result...) +} + +func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) { + if len(nodes) <= 1 { + return + } + + // find node that has least number of children + idx := leastChildren(nodes) + if idx == -1 { + return + } + tree := nodes[idx] + treeLength := len(tree.Children) + + // allocate max able size for rightCommon slice + // to get ability insert elements in reverse order (from end to start) + // without sorting + commonRight = make([]*ast.Node, treeLength) + lastRight := treeLength // will use this to get results as commonRight[lastRight:] + + var ( + breakLeft bool + breakRight bool + commonTotal int + ) + for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 { + treeLeft := tree.Children[i] + treeRight := tree.Children[j] + + for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ { + // skip least children node + if k == idx { + continue + } + + restLeft := nodes[k].Children[i] + restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength] + + breakLeft = breakLeft || !treeLeft.Equal(restLeft) + + // disable searching for right common parts, if left part is already overlapping + breakRight = breakRight || (!breakLeft && j <= i) + breakRight = breakRight || !treeRight.Equal(restRight) + } + + if !breakLeft { + commonTotal++ + commonLeft = append(commonLeft, treeLeft) + } + if !breakRight { + commonTotal++ + lastRight = j + commonRight[j] = treeRight + } + } + + commonRight = commonRight[lastRight:] + + return +} + +func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node { + for _, n := range target { + if reflect.DeepEqual(n, val) { + return target + } + } + return append(target, val) +} + +func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool { + for _, n := range nodes { + if n.Kind != kind { + return false + } + } + return true +} + +func leastChildren(nodes []*ast.Node) int { + min := -1 + idx := -1 + for i, n := range nodes { + if idx == -1 || (len(n.Children) < min) { + min = len(n.Children) + idx = i + } + } + return idx +} + +func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) { + var matchers []match.Matcher + for _, desc := range tree.Children { + m, err := compile(desc, sep) + if err != nil { + return nil, err + } + matchers = append(matchers, optimizeMatcher(m)) + } + return matchers, nil +} + +func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) { + switch tree.Kind { + case ast.KindAnyOf: + // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go) + if n := minimizeTree(tree); n != nil { + return compile(n, sep) + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + return match.NewAnyOf(matchers...), nil + + case ast.KindPattern: + if len(tree.Children) == 0 { + return match.NewNothing(), nil + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + m, err = compileMatchers(minimizeMatchers(matchers)) + if err != nil { + return nil, err + } + + case ast.KindAny: + m = match.NewAny(sep) + + case ast.KindSuper: + m = match.NewSuper() + + case ast.KindSingle: + m = match.NewSingle(sep) + + case ast.KindNothing: + m = match.NewNothing() + + case ast.KindList: + l := tree.Value.(ast.List) + m = match.NewList([]rune(l.Chars), l.Not) + + case ast.KindRange: + r := tree.Value.(ast.Range) + m = match.NewRange(r.Lo, r.Hi, r.Not) + + case ast.KindText: + t := tree.Value.(ast.Text) + m = match.NewText(t.Text) + + default: + return nil, fmt.Errorf("could not compile tree: unknown node type") + } + + return optimizeMatcher(m), nil +} + +func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) { + m, err := compile(tree, sep) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/gobwas/glob/glob.go b/vendor/github.com/gobwas/glob/glob.go new file mode 100644 index 000000000..2afde343a --- /dev/null +++ b/vendor/github.com/gobwas/glob/glob.go @@ -0,0 +1,80 @@ +package glob + +import ( + "github.com/gobwas/glob/compiler" + "github.com/gobwas/glob/syntax" +) + +// Glob represents compiled glob pattern. +type Glob interface { + Match(string) bool +} + +// Compile creates Glob for given pattern and strings (if any present after pattern) as separators. +// The pattern syntax is: +// +// pattern: +// { term } +// +// term: +// `*` matches any sequence of non-separator characters +// `**` matches any sequence of characters +// `?` matches any single non-separator character +// `[` [ `!` ] { character-range } `]` +// character class (must be non-empty) +// `{` pattern-list `}` +// pattern alternatives +// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) +// `\` c matches character c +// +// character-range: +// c matches character c (c != `\\`, `-`, `]`) +// `\` c matches character c +// lo `-` hi matches character c for lo <= c <= hi +// +// pattern-list: +// pattern { `,` pattern } +// comma-separated (without spaces) patterns +// +func Compile(pattern string, separators ...rune) (Glob, error) { + ast, err := syntax.Parse(pattern) + if err != nil { + return nil, err + } + + matcher, err := compiler.Compile(ast, separators) + if err != nil { + return nil, err + } + + return matcher, nil +} + +// MustCompile is the same as Compile, except that if Compile returns error, this will panic +func MustCompile(pattern string, separators ...rune) Glob { + g, err := Compile(pattern, separators...) + if err != nil { + panic(err) + } + + return g +} + +// QuoteMeta returns a string that quotes all glob pattern meta characters +// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`. +func QuoteMeta(s string) string { + b := make([]byte, 2*len(s)) + + // a byte loop is correct because all meta characters are ASCII + j := 0 + for i := 0; i < len(s); i++ { + if syntax.Special(s[i]) { + b[j] = '\\' + j++ + } + b[j] = s[i] + j++ + } + + return string(b[0:j]) +} diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go new file mode 100644 index 000000000..514a9a5c4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/strings" +) + +type Any struct { + Separators []rune +} + +func NewAny(s []rune) Any { + return Any{s} +} + +func (self Any) Match(s string) bool { + return strings.IndexAnyRunes(s, self.Separators) == -1 +} + +func (self Any) Index(s string) (int, []int) { + found := strings.IndexAnyRunes(s, self.Separators) + switch found { + case -1: + case 0: + return 0, segments0 + default: + s = s[:found] + } + + segments := acquireSegments(len(s)) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Any) Len() int { + return lenNo +} + +func (self Any) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/any_of.go b/vendor/github.com/gobwas/glob/match/any_of.go new file mode 100644 index 000000000..8e65356cd --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any_of.go @@ -0,0 +1,82 @@ +package match + +import "fmt" + +type AnyOf struct { + Matchers Matchers +} + +func NewAnyOf(m ...Matcher) AnyOf { + return AnyOf{Matchers(m)} +} + +func (self *AnyOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self AnyOf) Match(s string) bool { + for _, m := range self.Matchers { + if m.Match(s) { + return true + } + } + + return false +} + +func (self AnyOf) Index(s string) (int, []int) { + index := -1 + + segments := acquireSegments(len(s)) + for _, m := range self.Matchers { + idx, seg := m.Index(s) + if idx == -1 { + continue + } + + if index == -1 || idx < index { + index = idx + segments = append(segments[:0], seg...) + continue + } + + if idx > index { + continue + } + + // here idx == index + segments = appendMerge(segments, seg) + } + + if index == -1 { + releaseSegments(segments) + return -1, nil + } + + return index, segments +} + +func (self AnyOf) Len() (l int) { + l = -1 + for _, m := range self.Matchers { + ml := m.Len() + switch { + case l == -1: + l = ml + continue + + case ml == -1: + return -1 + + case l != ml: + return -1 + } + } + + return +} + +func (self AnyOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/btree.go b/vendor/github.com/gobwas/glob/match/btree.go new file mode 100644 index 000000000..a8130e93e --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/btree.go @@ -0,0 +1,146 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type BTree struct { + Value Matcher + Left Matcher + Right Matcher + ValueLengthRunes int + LeftLengthRunes int + RightLengthRunes int + LengthRunes int +} + +func NewBTree(Value, Left, Right Matcher) (tree BTree) { + tree.Value = Value + tree.Left = Left + tree.Right = Right + + lenOk := true + if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 { + lenOk = false + } + + if Left != nil { + if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 { + lenOk = false + } + } + + if Right != nil { + if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 { + lenOk = false + } + } + + if lenOk { + tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes + } else { + tree.LengthRunes = -1 + } + + return tree +} + +func (self BTree) Len() int { + return self.LengthRunes +} + +// todo? +func (self BTree) Index(s string) (int, []int) { + return -1, nil +} + +func (self BTree) Match(s string) bool { + inputLen := len(s) + + // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part + // here we manipulating byte length for better optimizations + // but these checks still works, cause minLen of 1-rune string is 1 byte. + if self.LengthRunes != -1 && self.LengthRunes > inputLen { + return false + } + + // try to cut unnecessary parts + // by knowledge of length of right and left part + var offset, limit int + if self.LeftLengthRunes >= 0 { + offset = self.LeftLengthRunes + } + if self.RightLengthRunes >= 0 { + limit = inputLen - self.RightLengthRunes + } else { + limit = inputLen + } + + for offset < limit { + // search for matching part in substring + index, segments := self.Value.Index(s[offset:limit]) + if index == -1 { + releaseSegments(segments) + return false + } + + l := s[:offset+index] + var left bool + if self.Left != nil { + left = self.Left.Match(l) + } else { + left = l == "" + } + + if left { + for i := len(segments) - 1; i >= 0; i-- { + length := segments[i] + + var right bool + var r string + // if there is no string for the right branch + if inputLen <= offset+index+length { + r = "" + } else { + r = s[offset+index+length:] + } + + if self.Right != nil { + right = self.Right.Match(r) + } else { + right = r == "" + } + + if right { + releaseSegments(segments) + return true + } + } + } + + _, step := utf8.DecodeRuneInString(s[offset+index:]) + offset += index + step + + releaseSegments(segments) + } + + return false +} + +func (self BTree) String() string { + const n string = "" + var l, r string + if self.Left == nil { + l = n + } else { + l = self.Left.String() + } + if self.Right == nil { + r = n + } else { + r = self.Right.String() + } + + return fmt.Sprintf("%s]>", l, self.Value, r) +} diff --git a/vendor/github.com/gobwas/glob/match/contains.go b/vendor/github.com/gobwas/glob/match/contains.go new file mode 100644 index 000000000..0998e95b0 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/contains.go @@ -0,0 +1,58 @@ +package match + +import ( + "fmt" + "strings" +) + +type Contains struct { + Needle string + Not bool +} + +func NewContains(needle string, not bool) Contains { + return Contains{needle, not} +} + +func (self Contains) Match(s string) bool { + return strings.Contains(s, self.Needle) != self.Not +} + +func (self Contains) Index(s string) (int, []int) { + var offset int + + idx := strings.Index(s, self.Needle) + + if !self.Not { + if idx == -1 { + return -1, nil + } + + offset = idx + len(self.Needle) + if len(s) <= offset { + return 0, []int{offset} + } + s = s[offset:] + } else if idx != -1 { + s = s[:idx] + } + + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, offset+i) + } + + return 0, append(segments, offset+len(s)) +} + +func (self Contains) Len() int { + return lenNo +} + +func (self Contains) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, self.Needle) +} diff --git a/vendor/github.com/gobwas/glob/match/every_of.go b/vendor/github.com/gobwas/glob/match/every_of.go new file mode 100644 index 000000000..7c968ee36 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/every_of.go @@ -0,0 +1,99 @@ +package match + +import ( + "fmt" +) + +type EveryOf struct { + Matchers Matchers +} + +func NewEveryOf(m ...Matcher) EveryOf { + return EveryOf{Matchers(m)} +} + +func (self *EveryOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self EveryOf) Len() (l int) { + for _, m := range self.Matchers { + if ml := m.Len(); l > 0 { + l += ml + } else { + return -1 + } + } + + return +} + +func (self EveryOf) Index(s string) (int, []int) { + var index int + var offset int + + // make `in` with cap as len(s), + // cause it is the maximum size of output segments values + next := acquireSegments(len(s)) + current := acquireSegments(len(s)) + + sub := s + for i, m := range self.Matchers { + idx, seg := m.Index(sub) + if idx == -1 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + if i == 0 { + // we use copy here instead of `current = seg` + // cause seg is a slice from reusable buffer `in` + // and it could be overwritten in next iteration + current = append(current, seg...) + } else { + // clear the next + next = next[:0] + + delta := index - (idx + offset) + for _, ex := range current { + for _, n := range seg { + if ex+delta == n { + next = append(next, n) + } + } + } + + if len(next) == 0 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + current = append(current[:0], next...) + } + + index = idx + offset + sub = s[index:] + offset += idx + } + + releaseSegments(next) + + return index, current +} + +func (self EveryOf) Match(s string) bool { + for _, m := range self.Matchers { + if !m.Match(s) { + return false + } + } + + return true +} + +func (self EveryOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go new file mode 100644 index 000000000..7fd763ecd --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/list.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +type List struct { + List []rune + Not bool +} + +func NewList(list []rune, not bool) List { + return List{list, not} +} + +func (self List) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inList := runes.IndexRune(self.List, r) != -1 + return inList == !self.Not +} + +func (self List) Len() int { + return lenOne +} + +func (self List) Index(s string) (int, []int) { + for i, r := range s { + if self.Not == (runes.IndexRune(self.List, r) == -1) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self List) String() string { + var not string + if self.Not { + not = "!" + } + + return fmt.Sprintf("", not, string(self.List)) +} diff --git a/vendor/github.com/gobwas/glob/match/match.go b/vendor/github.com/gobwas/glob/match/match.go new file mode 100644 index 000000000..f80e007fb --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/match.go @@ -0,0 +1,81 @@ +package match + +// todo common table of rune's length + +import ( + "fmt" + "strings" +) + +const lenOne = 1 +const lenZero = 0 +const lenNo = -1 + +type Matcher interface { + Match(string) bool + Index(string) (int, []int) + Len() int + String() string +} + +type Matchers []Matcher + +func (m Matchers) String() string { + var s []string + for _, matcher := range m { + s = append(s, fmt.Sprint(matcher)) + } + + return fmt.Sprintf("%s", strings.Join(s, ",")) +} + +// appendMerge merges and sorts given already SORTED and UNIQUE segments. +func appendMerge(target, sub []int) []int { + lt, ls := len(target), len(sub) + out := make([]int, 0, lt+ls) + + for x, y := 0, 0; x < lt || y < ls; { + if x >= lt { + out = append(out, sub[y:]...) + break + } + + if y >= ls { + out = append(out, target[x:]...) + break + } + + xValue := target[x] + yValue := sub[y] + + switch { + + case xValue == yValue: + out = append(out, xValue) + x++ + y++ + + case xValue < yValue: + out = append(out, xValue) + x++ + + case yValue < xValue: + out = append(out, yValue) + y++ + + } + } + + target = append(target[:0], out...) + + return target +} + +func reverseSegments(input []int) { + l := len(input) + m := l / 2 + + for i := 0; i < m; i++ { + input[i], input[l-i-1] = input[l-i-1], input[i] + } +} diff --git a/vendor/github.com/gobwas/glob/match/max.go b/vendor/github.com/gobwas/glob/match/max.go new file mode 100644 index 000000000..d72f69eff --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/max.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Max struct { + Limit int +} + +func NewMax(l int) Max { + return Max{l} +} + +func (self Max) Match(s string) bool { + var l int + for range s { + l += 1 + if l > self.Limit { + return false + } + } + + return true +} + +func (self Max) Index(s string) (int, []int) { + segments := acquireSegments(self.Limit + 1) + segments = append(segments, 0) + var count int + for i, r := range s { + count++ + if count > self.Limit { + break + } + segments = append(segments, i+utf8.RuneLen(r)) + } + + return 0, segments +} + +func (self Max) Len() int { + return lenNo +} + +func (self Max) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/min.go b/vendor/github.com/gobwas/glob/match/min.go new file mode 100644 index 000000000..db57ac8eb --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/min.go @@ -0,0 +1,57 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Min struct { + Limit int +} + +func NewMin(l int) Min { + return Min{l} +} + +func (self Min) Match(s string) bool { + var l int + for range s { + l += 1 + if l >= self.Limit { + return true + } + } + + return false +} + +func (self Min) Index(s string) (int, []int) { + var count int + + c := len(s) - self.Limit + 1 + if c <= 0 { + return -1, nil + } + + segments := acquireSegments(c) + for i, r := range s { + count++ + if count >= self.Limit { + segments = append(segments, i+utf8.RuneLen(r)) + } + } + + if len(segments) == 0 { + return -1, nil + } + + return 0, segments +} + +func (self Min) Len() int { + return lenNo +} + +func (self Min) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/nothing.go b/vendor/github.com/gobwas/glob/match/nothing.go new file mode 100644 index 000000000..0d4ecd36b --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/nothing.go @@ -0,0 +1,27 @@ +package match + +import ( + "fmt" +) + +type Nothing struct{} + +func NewNothing() Nothing { + return Nothing{} +} + +func (self Nothing) Match(s string) bool { + return len(s) == 0 +} + +func (self Nothing) Index(s string) (int, []int) { + return 0, segments0 +} + +func (self Nothing) Len() int { + return lenZero +} + +func (self Nothing) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/prefix.go b/vendor/github.com/gobwas/glob/match/prefix.go new file mode 100644 index 000000000..a7347250e --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix.go @@ -0,0 +1,50 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +type Prefix struct { + Prefix string +} + +func NewPrefix(p string) Prefix { + return Prefix{p} +} + +func (self Prefix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + length := len(self.Prefix) + var sub string + if len(s) > idx+length { + sub = s[idx+length:] + } else { + sub = "" + } + + segments := acquireSegments(len(sub) + 1) + segments = append(segments, length) + for i, r := range sub { + segments = append(segments, length+i+utf8.RuneLen(r)) + } + + return idx, segments +} + +func (self Prefix) Len() int { + return lenNo +} + +func (self Prefix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) +} + +func (self Prefix) String() string { + return fmt.Sprintf("", self.Prefix) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_any.go b/vendor/github.com/gobwas/glob/match/prefix_any.go new file mode 100644 index 000000000..8ee58fe1b --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_any.go @@ -0,0 +1,55 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" + + sutil "github.com/gobwas/glob/util/strings" +) + +type PrefixAny struct { + Prefix string + Separators []rune +} + +func NewPrefixAny(s string, sep []rune) PrefixAny { + return PrefixAny{s, sep} +} + +func (self PrefixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + n := len(self.Prefix) + sub := s[idx+n:] + i := sutil.IndexAnyRunes(sub, self.Separators) + if i > -1 { + sub = sub[:i] + } + + seg := acquireSegments(len(sub) + 1) + seg = append(seg, n) + for i, r := range sub { + seg = append(seg, n+i+utf8.RuneLen(r)) + } + + return idx, seg +} + +func (self PrefixAny) Len() int { + return lenNo +} + +func (self PrefixAny) Match(s string) bool { + if !strings.HasPrefix(s, self.Prefix) { + return false + } + return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1 +} + +func (self PrefixAny) String() string { + return fmt.Sprintf("", self.Prefix, string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/vendor/github.com/gobwas/glob/match/prefix_suffix.go new file mode 100644 index 000000000..8208085a1 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_suffix.go @@ -0,0 +1,62 @@ +package match + +import ( + "fmt" + "strings" +) + +type PrefixSuffix struct { + Prefix, Suffix string +} + +func NewPrefixSuffix(p, s string) PrefixSuffix { + return PrefixSuffix{p, s} +} + +func (self PrefixSuffix) Index(s string) (int, []int) { + prefixIdx := strings.Index(s, self.Prefix) + if prefixIdx == -1 { + return -1, nil + } + + suffixLen := len(self.Suffix) + if suffixLen <= 0 { + return prefixIdx, []int{len(s) - prefixIdx} + } + + if (len(s) - prefixIdx) <= 0 { + return -1, nil + } + + segments := acquireSegments(len(s) - prefixIdx) + for sub := s[prefixIdx:]; ; { + suffixIdx := strings.LastIndex(sub, self.Suffix) + if suffixIdx == -1 { + break + } + + segments = append(segments, suffixIdx+suffixLen) + sub = sub[:suffixIdx] + } + + if len(segments) == 0 { + releaseSegments(segments) + return -1, nil + } + + reverseSegments(segments) + + return prefixIdx, segments +} + +func (self PrefixSuffix) Len() int { + return lenNo +} + +func (self PrefixSuffix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix) +} + +func (self PrefixSuffix) String() string { + return fmt.Sprintf("", self.Prefix, self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/range.go b/vendor/github.com/gobwas/glob/match/range.go new file mode 100644 index 000000000..ce30245a4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/range.go @@ -0,0 +1,48 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Range struct { + Lo, Hi rune + Not bool +} + +func NewRange(lo, hi rune, not bool) Range { + return Range{lo, hi, not} +} + +func (self Range) Len() int { + return lenOne +} + +func (self Range) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inRange := r >= self.Lo && r <= self.Hi + + return inRange == !self.Not +} + +func (self Range) Index(s string) (int, []int) { + for i, r := range s { + if self.Not != (r >= self.Lo && r <= self.Hi) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Range) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, string(self.Lo), string(self.Hi)) +} diff --git a/vendor/github.com/gobwas/glob/match/row.go b/vendor/github.com/gobwas/glob/match/row.go new file mode 100644 index 000000000..4379042e4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/row.go @@ -0,0 +1,77 @@ +package match + +import ( + "fmt" +) + +type Row struct { + Matchers Matchers + RunesLength int + Segments []int +} + +func NewRow(len int, m ...Matcher) Row { + return Row{ + Matchers: Matchers(m), + RunesLength: len, + Segments: []int{len}, + } +} + +func (self Row) matchAll(s string) bool { + var idx int + for _, m := range self.Matchers { + length := m.Len() + + var next, i int + for next = range s[idx:] { + i++ + if i == length { + break + } + } + + if i < length || !m.Match(s[idx:idx+next+1]) { + return false + } + + idx += next + 1 + } + + return true +} + +func (self Row) lenOk(s string) bool { + var i int + for range s { + i++ + if i > self.RunesLength { + return false + } + } + return self.RunesLength == i +} + +func (self Row) Match(s string) bool { + return self.lenOk(s) && self.matchAll(s) +} + +func (self Row) Len() (l int) { + return self.RunesLength +} + +func (self Row) Index(s string) (int, []int) { + for i := range s { + if len(s[i:]) < self.RunesLength { + break + } + if self.matchAll(s[i:]) { + return i, self.Segments + } + } + return -1, nil +} + +func (self Row) String() string { + return fmt.Sprintf("", self.RunesLength, self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/segments.go b/vendor/github.com/gobwas/glob/match/segments.go new file mode 100644 index 000000000..9ea6f3094 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/segments.go @@ -0,0 +1,91 @@ +package match + +import ( + "sync" +) + +type SomePool interface { + Get() []int + Put([]int) +} + +var segmentsPools [1024]sync.Pool + +func toPowerOfTwo(v int) int { + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v++ + + return v +} + +const ( + cacheFrom = 16 + cacheToAndHigher = 1024 + cacheFromIndex = 15 + cacheToAndHigherIndex = 1023 +) + +var ( + segments0 = []int{0} + segments1 = []int{1} + segments2 = []int{2} + segments3 = []int{3} + segments4 = []int{4} +) + +var segmentsByRuneLength [5][]int = [5][]int{ + 0: segments0, + 1: segments1, + 2: segments2, + 3: segments3, + 4: segments4, +} + +func init() { + for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 { + func(i int) { + segmentsPools[i-1] = sync.Pool{New: func() interface{} { + return make([]int, 0, i) + }} + }(i) + } +} + +func getTableIndex(c int) int { + p := toPowerOfTwo(c) + switch { + case p >= cacheToAndHigher: + return cacheToAndHigherIndex + case p <= cacheFrom: + return cacheFromIndex + default: + return p - 1 + } +} + +func acquireSegments(c int) []int { + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return make([]int, 0, c) + } + + return segmentsPools[getTableIndex(c)].Get().([]int)[:0] +} + +func releaseSegments(s []int) { + c := cap(s) + + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return + } + + segmentsPools[getTableIndex(c)].Put(s) +} diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go new file mode 100644 index 000000000..ee6e3954c --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/single.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +// single represents ? +type Single struct { + Separators []rune +} + +func NewSingle(s []rune) Single { + return Single{s} +} + +func (self Single) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + return runes.IndexRune(self.Separators, r) == -1 +} + +func (self Single) Len() int { + return lenOne +} + +func (self Single) Index(s string) (int, []int) { + for i, r := range s { + if runes.IndexRune(self.Separators, r) == -1 { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Single) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix.go b/vendor/github.com/gobwas/glob/match/suffix.go new file mode 100644 index 000000000..85bea8c68 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix.go @@ -0,0 +1,35 @@ +package match + +import ( + "fmt" + "strings" +) + +type Suffix struct { + Suffix string +} + +func NewSuffix(s string) Suffix { + return Suffix{s} +} + +func (self Suffix) Len() int { + return lenNo +} + +func (self Suffix) Match(s string) bool { + return strings.HasSuffix(s, self.Suffix) +} + +func (self Suffix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + return 0, []int{idx + len(self.Suffix)} +} + +func (self Suffix) String() string { + return fmt.Sprintf("", self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix_any.go b/vendor/github.com/gobwas/glob/match/suffix_any.go new file mode 100644 index 000000000..c5106f819 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix_any.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "strings" + + sutil "github.com/gobwas/glob/util/strings" +) + +type SuffixAny struct { + Suffix string + Separators []rune +} + +func NewSuffixAny(s string, sep []rune) SuffixAny { + return SuffixAny{s, sep} +} + +func (self SuffixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1 + + return i, []int{idx + len(self.Suffix) - i} +} + +func (self SuffixAny) Len() int { + return lenNo +} + +func (self SuffixAny) Match(s string) bool { + if !strings.HasSuffix(s, self.Suffix) { + return false + } + return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1 +} + +func (self SuffixAny) String() string { + return fmt.Sprintf("", string(self.Separators), self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/super.go b/vendor/github.com/gobwas/glob/match/super.go new file mode 100644 index 000000000..3875950bb --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/super.go @@ -0,0 +1,33 @@ +package match + +import ( + "fmt" +) + +type Super struct{} + +func NewSuper() Super { + return Super{} +} + +func (self Super) Match(s string) bool { + return true +} + +func (self Super) Len() int { + return lenNo +} + +func (self Super) Index(s string) (int, []int) { + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Super) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/text.go b/vendor/github.com/gobwas/glob/match/text.go new file mode 100644 index 000000000..0a17616d3 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/text.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// raw represents raw string to match +type Text struct { + Str string + RunesLength int + BytesLength int + Segments []int +} + +func NewText(s string) Text { + return Text{ + Str: s, + RunesLength: utf8.RuneCountInString(s), + BytesLength: len(s), + Segments: []int{len(s)}, + } +} + +func (self Text) Match(s string) bool { + return self.Str == s +} + +func (self Text) Len() int { + return self.RunesLength +} + +func (self Text) Index(s string) (int, []int) { + index := strings.Index(s, self.Str) + if index == -1 { + return -1, nil + } + + return index, self.Segments +} + +func (self Text) String() string { + return fmt.Sprintf("", self.Str) +} diff --git a/vendor/github.com/gobwas/glob/readme.md b/vendor/github.com/gobwas/glob/readme.md new file mode 100644 index 000000000..f58144e73 --- /dev/null +++ b/vendor/github.com/gobwas/glob/readme.md @@ -0,0 +1,148 @@ +# glob.[go](https://golang.org) + +[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url] + +> Go Globbing Library. + +## Install + +```shell + go get github.com/gobwas/glob +``` + +## Example + +```go + +package main + +import "github.com/gobwas/glob" + +func main() { + var g glob.Glob + + // create simple glob + g = glob.MustCompile("*.github.com") + g.Match("api.github.com") // true + + // quote meta characters and then create simple glob + g = glob.MustCompile(glob.QuoteMeta("*.github.com")) + g.Match("*.github.com") // true + + // create new glob with set of delimiters as ["."] + g = glob.MustCompile("api.*.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // false + + // create new glob with set of delimiters as ["."] + // but now with super wildcard + g = glob.MustCompile("api.**.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // true + + // create glob with single symbol wildcard + g = glob.MustCompile("?at") + g.Match("cat") // true + g.Match("fat") // true + g.Match("at") // false + + // create glob with single symbol wildcard and delimiters ['f'] + g = glob.MustCompile("?at", 'f') + g.Match("cat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[abc]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[!abc]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[a-c]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[!a-c]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with pattern-alternatives list + g = glob.MustCompile("{cat,bat,[fr]at}") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // true + g.Match("rat") // true + g.Match("at") // false + g.Match("zat") // false +} + +``` + +## Performance + +This library is created for compile-once patterns. This means, that compilation could take time, but +strings matching is done faster, than in case when always parsing template. + +If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower. + +Run `go test -bench=.` from source root to see the benchmarks: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432 +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199 +`https://*.google.*` | `https://account.google.com` | `true` | 96 +`https://*.google.*` | `https://google.com` | `false` | 66 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24 +`abc*` | `abcdef` | `true` | 8.15 +`abc*` | `af` | `false` | 5.68 +`*def` | `abcdef` | `true` | 8.84 +`*def` | `af` | `false` | 5.74 +`ab*ef` | `abcdef` | `true` | 15.2 +`ab*ef` | `af` | `false` | 10.4 + +The same things with `regexp` package: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553 +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383 +`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205 +`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272 +`^abc.*$` | `abcdef` | `true` | 237 +`^abc.*$` | `af` | `false` | 100 +`^.*def$` | `abcdef` | `true` | 464 +`^.*def$` | `af` | `false` | 265 +`^ab.*ef$` | `abcdef` | `true` | 375 +`^ab.*ef$` | `af` | `false` | 145 + +[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg +[godoc-url]: https://godoc.org/github.com/gobwas/glob +[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master +[travis-url]: https://travis-ci.org/gobwas/glob + +## Syntax + +Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm), +except that `**` is aka super-asterisk, that do not sensitive for separators. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/vendor/github.com/gobwas/glob/syntax/ast/ast.go new file mode 100644 index 000000000..3220a694a --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/ast.go @@ -0,0 +1,122 @@ +package ast + +import ( + "bytes" + "fmt" +) + +type Node struct { + Parent *Node + Children []*Node + Value interface{} + Kind Kind +} + +func NewNode(k Kind, v interface{}, ch ...*Node) *Node { + n := &Node{ + Kind: k, + Value: v, + } + for _, c := range ch { + Insert(n, c) + } + return n +} + +func (a *Node) Equal(b *Node) bool { + if a.Kind != b.Kind { + return false + } + if a.Value != b.Value { + return false + } + if len(a.Children) != len(b.Children) { + return false + } + for i, c := range a.Children { + if !c.Equal(b.Children[i]) { + return false + } + } + return true +} + +func (a *Node) String() string { + var buf bytes.Buffer + buf.WriteString(a.Kind.String()) + if a.Value != nil { + buf.WriteString(" =") + buf.WriteString(fmt.Sprintf("%v", a.Value)) + } + if len(a.Children) > 0 { + buf.WriteString(" [") + for i, c := range a.Children { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(c.String()) + } + buf.WriteString("]") + } + return buf.String() +} + +func Insert(parent *Node, children ...*Node) { + parent.Children = append(parent.Children, children...) + for _, ch := range children { + ch.Parent = parent + } +} + +type List struct { + Not bool + Chars string +} + +type Range struct { + Not bool + Lo, Hi rune +} + +type Text struct { + Text string +} + +type Kind int + +const ( + KindNothing Kind = iota + KindPattern + KindList + KindRange + KindText + KindAny + KindSuper + KindSingle + KindAnyOf +) + +func (k Kind) String() string { + switch k { + case KindNothing: + return "Nothing" + case KindPattern: + return "Pattern" + case KindList: + return "List" + case KindRange: + return "Range" + case KindText: + return "Text" + case KindAny: + return "Any" + case KindSuper: + return "Super" + case KindSingle: + return "Single" + case KindAnyOf: + return "AnyOf" + default: + return "" + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go new file mode 100644 index 000000000..429b40943 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/parser.go @@ -0,0 +1,157 @@ +package ast + +import ( + "errors" + "fmt" + "github.com/gobwas/glob/syntax/lexer" + "unicode/utf8" +) + +type Lexer interface { + Next() lexer.Token +} + +type parseFn func(*Node, Lexer) (parseFn, *Node, error) + +func Parse(lexer Lexer) (*Node, error) { + var parser parseFn + + root := NewNode(KindPattern, nil) + + var ( + tree *Node + err error + ) + for parser, tree = parserMain, root; parser != nil; { + parser, tree, err = parser(tree, lexer) + if err != nil { + return nil, err + } + } + + return root, nil +} + +func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) { + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, nil + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Text: + Insert(tree, NewNode(KindText, Text{token.Raw})) + return parserMain, tree, nil + + case lexer.Any: + Insert(tree, NewNode(KindAny, nil)) + return parserMain, tree, nil + + case lexer.Super: + Insert(tree, NewNode(KindSuper, nil)) + return parserMain, tree, nil + + case lexer.Single: + Insert(tree, NewNode(KindSingle, nil)) + return parserMain, tree, nil + + case lexer.RangeOpen: + return parserRange, tree, nil + + case lexer.TermsOpen: + a := NewNode(KindAnyOf, nil) + Insert(tree, a) + + p := NewNode(KindPattern, nil) + Insert(a, p) + + return parserMain, p, nil + + case lexer.Separator: + p := NewNode(KindPattern, nil) + Insert(tree.Parent, p) + + return parserMain, p, nil + + case lexer.TermsClose: + return parserMain, tree.Parent.Parent, nil + + default: + return nil, tree, fmt.Errorf("unexpected token: %s", token) + } + } + return nil, tree, fmt.Errorf("unknown error") +} + +func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) { + var ( + not bool + lo rune + hi rune + chars string + ) + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, errors.New("unexpected end") + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Not: + not = true + + case lexer.RangeLo: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + lo = r + + case lexer.RangeBetween: + // + + case lexer.RangeHi: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + + hi = r + + if hi < lo { + return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo)) + } + + case lexer.Text: + chars = token.Raw + + case lexer.RangeClose: + isRange := lo != 0 && hi != 0 + isChars := chars != "" + + if isChars == isRange { + return nil, tree, fmt.Errorf("could not parse range") + } + + if isRange { + Insert(tree, NewNode(KindRange, Range{ + Lo: lo, + Hi: hi, + Not: not, + })) + } else { + Insert(tree, NewNode(KindList, List{ + Chars: chars, + Not: not, + })) + } + + return parserMain, tree, nil + } + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go new file mode 100644 index 000000000..a1c8d1962 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go @@ -0,0 +1,273 @@ +package lexer + +import ( + "bytes" + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +const ( + char_any = '*' + char_comma = ',' + char_single = '?' + char_escape = '\\' + char_range_open = '[' + char_range_close = ']' + char_terms_open = '{' + char_terms_close = '}' + char_range_not = '!' + char_range_between = '-' +) + +var specials = []byte{ + char_any, + char_single, + char_escape, + char_range_open, + char_range_close, + char_terms_open, + char_terms_close, +} + +func Special(c byte) bool { + return bytes.IndexByte(specials, c) != -1 +} + +type tokens []Token + +func (i *tokens) shift() (ret Token) { + ret = (*i)[0] + copy(*i, (*i)[1:]) + *i = (*i)[:len(*i)-1] + return +} + +func (i *tokens) push(v Token) { + *i = append(*i, v) +} + +func (i *tokens) empty() bool { + return len(*i) == 0 +} + +var eof rune = 0 + +type lexer struct { + data string + pos int + err error + + tokens tokens + termsLevel int + + lastRune rune + lastRuneSize int + hasRune bool +} + +func NewLexer(source string) *lexer { + l := &lexer{ + data: source, + tokens: tokens(make([]Token, 0, 4)), + } + return l +} + +func (l *lexer) Next() Token { + if l.err != nil { + return Token{Error, l.err.Error()} + } + if !l.tokens.empty() { + return l.tokens.shift() + } + + l.fetchItem() + return l.Next() +} + +func (l *lexer) peek() (r rune, w int) { + if l.pos == len(l.data) { + return eof, 0 + } + + r, w = utf8.DecodeRuneInString(l.data[l.pos:]) + if r == utf8.RuneError { + l.errorf("could not read rune") + r = eof + w = 0 + } + + return +} + +func (l *lexer) read() rune { + if l.hasRune { + l.hasRune = false + l.seek(l.lastRuneSize) + return l.lastRune + } + + r, s := l.peek() + l.seek(s) + + l.lastRune = r + l.lastRuneSize = s + + return r +} + +func (l *lexer) seek(w int) { + l.pos += w +} + +func (l *lexer) unread() { + if l.hasRune { + l.errorf("could not unread rune") + return + } + l.seek(-l.lastRuneSize) + l.hasRune = true +} + +func (l *lexer) errorf(f string, v ...interface{}) { + l.err = fmt.Errorf(f, v...) +} + +func (l *lexer) inTerms() bool { + return l.termsLevel > 0 +} + +func (l *lexer) termsEnter() { + l.termsLevel++ +} + +func (l *lexer) termsLeave() { + l.termsLevel-- +} + +var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open} +var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma) + +func (l *lexer) fetchItem() { + r := l.read() + switch { + case r == eof: + l.tokens.push(Token{EOF, ""}) + + case r == char_terms_open: + l.termsEnter() + l.tokens.push(Token{TermsOpen, string(r)}) + + case r == char_comma && l.inTerms(): + l.tokens.push(Token{Separator, string(r)}) + + case r == char_terms_close && l.inTerms(): + l.tokens.push(Token{TermsClose, string(r)}) + l.termsLeave() + + case r == char_range_open: + l.tokens.push(Token{RangeOpen, string(r)}) + l.fetchRange() + + case r == char_single: + l.tokens.push(Token{Single, string(r)}) + + case r == char_any: + if l.read() == char_any { + l.tokens.push(Token{Super, string(r) + string(r)}) + } else { + l.unread() + l.tokens.push(Token{Any, string(r)}) + } + + default: + l.unread() + + var breakers []rune + if l.inTerms() { + breakers = inTermsBreakers + } else { + breakers = inTextBreakers + } + l.fetchText(breakers) + } +} + +func (l *lexer) fetchRange() { + var wantHi bool + var wantClose bool + var seenNot bool + for { + r := l.read() + if r == eof { + l.errorf("unexpected end of input") + return + } + + if wantClose { + if r != char_range_close { + l.errorf("expected close range character") + } else { + l.tokens.push(Token{RangeClose, string(r)}) + } + return + } + + if wantHi { + l.tokens.push(Token{RangeHi, string(r)}) + wantClose = true + continue + } + + if !seenNot && r == char_range_not { + l.tokens.push(Token{Not, string(r)}) + seenNot = true + continue + } + + if n, w := l.peek(); n == char_range_between { + l.seek(w) + l.tokens.push(Token{RangeLo, string(r)}) + l.tokens.push(Token{RangeBetween, string(n)}) + wantHi = true + continue + } + + l.unread() // unread first peek and fetch as text + l.fetchText([]rune{char_range_close}) + wantClose = true + } +} + +func (l *lexer) fetchText(breakers []rune) { + var data []rune + var escaped bool + +reading: + for { + r := l.read() + if r == eof { + break + } + + if !escaped { + if r == char_escape { + escaped = true + continue + } + + if runes.IndexRune(breakers, r) != -1 { + l.unread() + break reading + } + } + + escaped = false + data = append(data, r) + } + + if len(data) > 0 { + l.tokens.push(Token{Text, string(data)}) + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/vendor/github.com/gobwas/glob/syntax/lexer/token.go new file mode 100644 index 000000000..2797c4e83 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/token.go @@ -0,0 +1,88 @@ +package lexer + +import "fmt" + +type TokenType int + +const ( + EOF TokenType = iota + Error + Text + Char + Any + Super + Single + Not + Separator + RangeOpen + RangeClose + RangeLo + RangeHi + RangeBetween + TermsOpen + TermsClose +) + +func (tt TokenType) String() string { + switch tt { + case EOF: + return "eof" + + case Error: + return "error" + + case Text: + return "text" + + case Char: + return "char" + + case Any: + return "any" + + case Super: + return "super" + + case Single: + return "single" + + case Not: + return "not" + + case Separator: + return "separator" + + case RangeOpen: + return "range_open" + + case RangeClose: + return "range_close" + + case RangeLo: + return "range_lo" + + case RangeHi: + return "range_hi" + + case RangeBetween: + return "range_between" + + case TermsOpen: + return "terms_open" + + case TermsClose: + return "terms_close" + + default: + return "undef" + } +} + +type Token struct { + Type TokenType + Raw string +} + +func (t Token) String() string { + return fmt.Sprintf("%v<%q>", t.Type, t.Raw) +} diff --git a/vendor/github.com/gobwas/glob/syntax/syntax.go b/vendor/github.com/gobwas/glob/syntax/syntax.go new file mode 100644 index 000000000..1d168b148 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/syntax.go @@ -0,0 +1,14 @@ +package syntax + +import ( + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/syntax/lexer" +) + +func Parse(s string) (*ast.Node, error) { + return ast.Parse(lexer.NewLexer(s)) +} + +func Special(b byte) bool { + return lexer.Special(b) +} diff --git a/vendor/github.com/gobwas/glob/util/runes/runes.go b/vendor/github.com/gobwas/glob/util/runes/runes.go new file mode 100644 index 000000000..a72355641 --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/runes/runes.go @@ -0,0 +1,154 @@ +package runes + +func Index(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + return 0 + case ln == 1: + return IndexRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := 0; i < ls && ls-i >= ln; i++ { + for y := 0; y < ln; y++ { + if s[i+y] != needle[y] { + continue head + } + } + + return i + } + + return -1 +} + +func LastIndex(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + if ls == 0 { + return 0 + } + return ls + case ln == 1: + return IndexLastRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := ls - 1; i >= 0 && i >= ln; i-- { + for y := ln - 1; y >= 0; y-- { + if s[i-(ln-y-1)] != needle[y] { + continue head + } + } + + return i - ln + 1 + } + + return -1 +} + +// IndexAny returns the index of the first instance of any Unicode code point +// from chars in s, or -1 if no Unicode code point from chars is present in s. +func IndexAny(s, chars []rune) int { + if len(chars) > 0 { + for i, c := range s { + for _, m := range chars { + if c == m { + return i + } + } + } + } + return -1 +} + +func Contains(s, needle []rune) bool { + return Index(s, needle) >= 0 +} + +func Max(s []rune) (max rune) { + for _, r := range s { + if r > max { + max = r + } + } + + return +} + +func Min(s []rune) rune { + min := rune(-1) + for _, r := range s { + if min == -1 { + min = r + continue + } + + if r < min { + min = r + } + } + + return min +} + +func IndexRune(s []rune, r rune) int { + for i, c := range s { + if c == r { + return i + } + } + return -1 +} + +func IndexLastRune(s []rune, r rune) int { + for i := len(s) - 1; i >= 0; i-- { + if s[i] == r { + return i + } + } + + return -1 +} + +func Equal(a, b []rune) bool { + if len(a) == len(b) { + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + + return true + } + + return false +} + +// HasPrefix tests whether the string s begins with prefix. +func HasPrefix(s, prefix []rune) bool { + return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix) +} + +// HasSuffix tests whether the string s ends with suffix. +func HasSuffix(s, suffix []rune) bool { + return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix) +} diff --git a/vendor/github.com/gobwas/glob/util/strings/strings.go b/vendor/github.com/gobwas/glob/util/strings/strings.go new file mode 100644 index 000000000..e8ee1920b --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/strings/strings.go @@ -0,0 +1,39 @@ +package strings + +import ( + "strings" + "unicode/utf8" +) + +func IndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + if i := strings.IndexRune(s, r); i != -1 { + return i + } + } + + return -1 +} + +func LastIndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + i := -1 + if 0 <= r && r < utf8.RuneSelf { + i = strings.LastIndexByte(s, byte(r)) + } else { + sub := s + for len(sub) > 0 { + j := strings.IndexRune(s, r) + if j == -1 { + break + } + i = j + sub = sub[i+1:] + } + } + if i != -1 { + return i + } + } + return -1 +} diff --git a/vendor/github.com/gofrs/flock/.gitignore b/vendor/github.com/gofrs/flock/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/gofrs/flock/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/gofrs/flock/.travis.yml b/vendor/github.com/gofrs/flock/.travis.yml new file mode 100644 index 000000000..b16d040fa --- /dev/null +++ b/vendor/github.com/gofrs/flock/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.14.x + - 1.15.x +script: go test -v -check.vv -race ./... +sudo: false +notifications: + email: + on_success: never + on_failure: always diff --git a/vendor/github.com/gofrs/flock/LICENSE b/vendor/github.com/gofrs/flock/LICENSE new file mode 100644 index 000000000..8b8ff36fe --- /dev/null +++ b/vendor/github.com/gofrs/flock/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015-2020, Tim Heckman +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of gofrs nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gofrs/flock/README.md b/vendor/github.com/gofrs/flock/README.md new file mode 100644 index 000000000..71ce63692 --- /dev/null +++ b/vendor/github.com/gofrs/flock/README.md @@ -0,0 +1,41 @@ +# flock +[![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock) +[![GoDoc](https://img.shields.io/badge/godoc-flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock) +[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE) +[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/flock)](https://goreportcard.com/report/github.com/gofrs/flock) + +`flock` implements a thread-safe sync.Locker interface for file locking. It also +includes a non-blocking TryLock() function to allow locking without blocking execution. + +## License +`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details. + +## Go Compatibility +This package makes use of the `context` package that was introduced in Go 1.7. As such, this +package has an implicit dependency on Go 1.7+. + +## Installation +``` +go get -u github.com/gofrs/flock +``` + +## Usage +```Go +import "github.com/gofrs/flock" + +fileLock := flock.New("/var/lock/go-lock.lock") + +locked, err := fileLock.TryLock() + +if err != nil { + // handle locking error +} + +if locked { + // do work + fileLock.Unlock() +} +``` + +For more detailed usage information take a look at the package API docs on +[GoDoc](https://godoc.org/github.com/gofrs/flock). diff --git a/vendor/github.com/gofrs/flock/appveyor.yml b/vendor/github.com/gofrs/flock/appveyor.yml new file mode 100644 index 000000000..909b4bf7c --- /dev/null +++ b/vendor/github.com/gofrs/flock/appveyor.yml @@ -0,0 +1,25 @@ +version: '{build}' + +build: false +deploy: false + +clone_folder: 'c:\gopath\src\github.com\gofrs\flock' + +environment: + GOPATH: 'c:\gopath' + GOVERSION: '1.15' + +init: + - git config --global core.autocrlf input + +install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi + - msiexec /i go%GOVERSION%.windows-amd64.msi /q + - set Path=c:\go\bin;c:\gopath\bin;%Path% + - go version + - go env + +test_script: + - go get -t ./... + - go test -race -v ./... diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go new file mode 100644 index 000000000..95c784ca5 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock.go @@ -0,0 +1,144 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// Package flock implements a thread-safe interface for file locking. +// It also includes a non-blocking TryLock() function to allow locking +// without blocking execution. +// +// Package flock is released under the BSD 3-Clause License. See the LICENSE file +// for more details. +// +// While using this library, remember that the locking behaviors are not +// guaranteed to be the same on each platform. For example, some UNIX-like +// operating systems will transparently convert a shared lock to an exclusive +// lock. If you Unlock() the flock from a location where you believe that you +// have the shared lock, you may accidentally drop the exclusive lock. +package flock + +import ( + "context" + "os" + "runtime" + "sync" + "time" +) + +// Flock is the struct type to handle file locking. All fields are unexported, +// with access to some of the fields provided by getter methods (Path() and Locked()). +type Flock struct { + path string + m sync.RWMutex + fh *os.File + l bool + r bool +} + +// New returns a new instance of *Flock. The only parameter +// it takes is the path to the desired lockfile. +func New(path string) *Flock { + return &Flock{path: path} +} + +// NewFlock returns a new instance of *Flock. The only parameter +// it takes is the path to the desired lockfile. +// +// Deprecated: Use New instead. +func NewFlock(path string) *Flock { + return New(path) +} + +// Close is equivalent to calling Unlock. +// +// This will release the lock and close the underlying file descriptor. +// It will not remove the file from disk, that's up to your application. +func (f *Flock) Close() error { + return f.Unlock() +} + +// Path returns the path as provided in NewFlock(). +func (f *Flock) Path() string { + return f.path +} + +// Locked returns the lock state (locked: true, unlocked: false). +// +// Warning: by the time you use the returned value, the state may have changed. +func (f *Flock) Locked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.l +} + +// RLocked returns the read lock state (locked: true, unlocked: false). +// +// Warning: by the time you use the returned value, the state may have changed. +func (f *Flock) RLocked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.r +} + +func (f *Flock) String() string { + return f.path +} + +// TryLockContext repeatedly tries to take an exclusive lock until one of the +// conditions is met: TryLock succeeds, TryLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(ctx, f.TryLock, retryDelay) +} + +// TryRLockContext repeatedly tries to take a shared lock until one of the +// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(ctx, f.TryRLock, retryDelay) +} + +func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Duration) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + for { + if ok, err := fn(); ok || err != nil { + return ok, err + } + select { + case <-ctx.Done(): + return false, ctx.Err() + case <-time.After(retryDelay): + // try again + } + } +} + +func (f *Flock) setFh() error { + // open a new os.File instance + // create it if it doesn't exist, and open the file read-only. + flags := os.O_CREATE + if runtime.GOOS == "aix" { + // AIX cannot preform write-lock (ie exclusive) on a + // read-only file. + flags |= os.O_RDWR + } else { + flags |= os.O_RDONLY + } + fh, err := os.OpenFile(f.path, flags, os.FileMode(0600)) + if err != nil { + return err + } + + // set the filehandle on the struct + f.fh = fh + return nil +} + +// ensure the file handle is closed if no lock is held +func (f *Flock) ensureFhState() { + if !f.l && !f.r && f.fh != nil { + f.fh.Close() + f.fh = nil + } +} diff --git a/vendor/github.com/gofrs/flock/flock_aix.go b/vendor/github.com/gofrs/flock/flock_aix.go new file mode 100644 index 000000000..7277c1b6b --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_aix.go @@ -0,0 +1,281 @@ +// Copyright 2019 Tim Heckman. All rights reserved. Use of this source code is +// governed by the BSD 3-Clause license that can be found in the LICENSE file. + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code implements the filelock API using POSIX 'fcntl' locks, which attach +// to an (inode, process) pair rather than a file descriptor. To avoid unlocking +// files prematurely when the same file is opened through different descriptors, +// we allow only one read-lock at a time. +// +// This code is adapted from the Go package: +// cmd/go/internal/lockedfile/internal/filelock + +//+build aix + +package flock + +import ( + "errors" + "io" + "os" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +type lockType int16 + +const ( + readLock lockType = unix.F_RDLCK + writeLock lockType = unix.F_WRLCK +) + +type cmdType int + +const ( + tryLock cmdType = unix.F_SETLK + waitLock cmdType = unix.F_SETLKW +) + +type inode = uint64 + +type inodeLock struct { + owner *Flock + queue []<-chan *Flock +} + +var ( + mu sync.Mutex + inodes = map[*Flock]inode{} + locks = map[inode]inodeLock{} +) + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already exclusive-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +// +// If the *Flock has a shared lock (RLock), this may transparently replace the +// shared lock with an exclusive lock on some UNIX-like operating systems. Be +// careful when using exclusive locks in conjunction with shared locks +// (RLock()), because calling Unlock() may accidentally release the exclusive +// lock that was once a shared lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, writeLock) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already shared-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, readLock) +} + +func (f *Flock) lock(locked *bool, flag lockType) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + defer f.ensureFhState() + } + + if _, err := f.doLock(waitLock, flag, true); err != nil { + return err + } + + *locked = true + return nil +} + +func (f *Flock) doLock(cmd cmdType, lt lockType, blocking bool) (bool, error) { + // POSIX locks apply per inode and process, and the lock for an inode is + // released when *any* descriptor for that inode is closed. So we need to + // synchronize access to each inode internally, and must serialize lock and + // unlock calls that refer to the same inode through different descriptors. + fi, err := f.fh.Stat() + if err != nil { + return false, err + } + ino := inode(fi.Sys().(*syscall.Stat_t).Ino) + + mu.Lock() + if i, dup := inodes[f]; dup && i != ino { + mu.Unlock() + return false, &os.PathError{ + Path: f.Path(), + Err: errors.New("inode for file changed since last Lock or RLock"), + } + } + + inodes[f] = ino + + var wait chan *Flock + l := locks[ino] + if l.owner == f { + // This file already owns the lock, but the call may change its lock type. + } else if l.owner == nil { + // No owner: it's ours now. + l.owner = f + } else if !blocking { + // Already owned: cannot take the lock. + mu.Unlock() + return false, nil + } else { + // Already owned: add a channel to wait on. + wait = make(chan *Flock) + l.queue = append(l.queue, wait) + } + locks[ino] = l + mu.Unlock() + + if wait != nil { + wait <- f + } + + err = setlkw(f.fh.Fd(), cmd, lt) + + if err != nil { + f.doUnlock() + if cmd == tryLock && err == unix.EACCES { + return false, nil + } + return false, err + } + + return true, nil +} + +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + if err := f.doUnlock(); err != nil { + return err + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +func (f *Flock) doUnlock() (err error) { + var owner *Flock + mu.Lock() + ino, ok := inodes[f] + if ok { + owner = locks[ino].owner + } + mu.Unlock() + + if owner == f { + err = setlkw(f.fh.Fd(), waitLock, unix.F_UNLCK) + } + + mu.Lock() + l := locks[ino] + if len(l.queue) == 0 { + // No waiters: remove the map entry. + delete(locks, ino) + } else { + // The first waiter is sending us their file now. + // Receive it and update the queue. + l.owner = <-l.queue[0] + l.queue = l.queue[1:] + locks[ino] = l + } + delete(inodes, f) + mu.Unlock() + + return err +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, writeLock) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being share-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, readLock) +} + +func (f *Flock) try(locked *bool, flag lockType) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + defer f.ensureFhState() + } + + haslock, err := f.doLock(tryLock, flag, false) + if err != nil { + return false, err + } + + *locked = haslock + return haslock, nil +} + +// setlkw calls FcntlFlock with cmd for the entire file indicated by fd. +func setlkw(fd uintptr, cmd cmdType, lt lockType) error { + for { + err := unix.FcntlFlock(fd, int(cmd), &unix.Flock_t{ + Type: int16(lt), + Whence: io.SeekStart, + Start: 0, + Len: 0, // All bytes. + }) + if err != unix.EINTR { + return err + } + } +} diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go new file mode 100644 index 000000000..c315a3e29 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_unix.go @@ -0,0 +1,197 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build !aix,!windows + +package flock + +import ( + "os" + "syscall" +) + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already exclusive-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +// +// If the *Flock has a shared lock (RLock), this may transparently replace the +// shared lock with an exclusive lock on some UNIX-like operating systems. Be +// careful when using exclusive locks in conjunction with shared locks +// (RLock()), because calling Unlock() may accidentally release the exclusive +// lock that was once a shared lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, syscall.LOCK_EX) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already shared-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) lock(locked *bool, flag int) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + defer f.ensureFhState() + } + + if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil { + shouldRetry, reopenErr := f.reopenFDOnError(err) + if reopenErr != nil { + return reopenErr + } + + if !shouldRetry { + return err + } + + if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil { + return err + } + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// syscall.LOCK_UN on the file and closes the file descriptor. It does not +// remove the file from disk. It's up to your application to do. +// +// Please note, if your shared lock became an exclusive lock this may +// unintentionally drop the exclusive lock if called by the consumer that +// believes they have a shared lock. Please see Lock() for more details. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil { + return err + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, syscall.LOCK_EX) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being share-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) try(locked *bool, flag int) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + defer f.ensureFhState() + } + + var retried bool +retry: + err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB) + + switch err { + case syscall.EWOULDBLOCK: + return false, nil + case nil: + *locked = true + return true, nil + } + if !retried { + if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil { + return false, reopenErr + } else if shouldRetry { + retried = true + goto retry + } + } + + return false, err +} + +// reopenFDOnError determines whether we should reopen the file handle +// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c: +// Since Linux 3.4 (commit 55725513) +// Probably NFSv4 where flock() is emulated by fcntl(). +func (f *Flock) reopenFDOnError(err error) (bool, error) { + if err != syscall.EIO && err != syscall.EBADF { + return false, nil + } + if st, err := f.fh.Stat(); err == nil { + // if the file is able to be read and written + if st.Mode()&0600 == 0600 { + f.fh.Close() + f.fh = nil + + // reopen in read-write mode and set the filehandle + fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600)) + if err != nil { + return false, err + } + f.fh = fh + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/gofrs/flock/flock_winapi.go b/vendor/github.com/gofrs/flock/flock_winapi.go new file mode 100644 index 000000000..fe405a255 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_winapi.go @@ -0,0 +1,76 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build windows + +package flock + +import ( + "syscall" + "unsafe" +) + +var ( + kernel32, _ = syscall.LoadLibrary("kernel32.dll") + procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx") + procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx") +) + +const ( + winLockfileFailImmediately = 0x00000001 + winLockfileExclusiveLock = 0x00000002 + winLockfileSharedLock = 0x00000000 +) + +// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows +// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as: +// +// > The function requests an exclusive lock. Otherwise, it requests a shared +// > lock. +// +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + +func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procLockFileEx), + 6, + uintptr(handle), + uintptr(flags), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset))) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} + +func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procUnlockFileEx), + 5, + uintptr(handle), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset)), + 0) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go new file mode 100644 index 000000000..ddb534cce --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_windows.go @@ -0,0 +1,142 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +package flock + +import ( + "syscall" +) + +// ErrorLockViolation is the error code returned from the Windows syscall when a +// lock would block and you ask to fail immediately. +const ErrorLockViolation syscall.Errno = 0x21 // 33 + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, winLockfileExclusiveLock) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, winLockfileSharedLock) +} + +func (f *Flock) lock(locked *bool, flag uint32) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + defer f.ensureFhState() + } + + if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// UnlockFileEx() on the file and closes the file descriptor. It does not remove +// the file from disk. It's up to your application to do. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, winLockfileExclusiveLock) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being shared-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, winLockfileSharedLock) +} + +func (f *Flock) try(locked *bool, flag uint32) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + defer f.ensureFhState() + } + + _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{}) + + if errNo > 0 { + if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING { + return false, nil + } + + return false, errNo + } + + *locked = true + + return true, nil +} diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 000000000..e810e6fea --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249f7..000000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08be..000000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go new file mode 100644 index 000000000..d399bf069 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/defaults.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// SetDefaults sets unpopulated scalar fields to their default values. +// Fields within a oneof are not set even if they have a default value. +// SetDefaults is recursively called upon any populated message fields. +func SetDefaults(m Message) { + if m != nil { + setDefaults(MessageReflect(m)) + } +} + +func setDefaults(m protoreflect.Message) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if !m.Has(fd) { + if fd.HasDefault() && fd.ContainingOneof() == nil { + v := fd.Default() + if fd.Kind() == protoreflect.BytesKind { + v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes + } + m.Set(fd, v) + } + continue + } + } + + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + setDefaults(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + setDefaults(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + setDefaults(v.Message()) + return true + }) + } + } + return true + }) +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go index 35b882c09..e8db57e09 100644 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -1,63 +1,113 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -import "errors" +import ( + "encoding/json" + "errors" + "fmt" + "strconv" -// Deprecated: do not use. + protoV2 "google.golang.org/protobuf/proto" +) + +var ( + // Deprecated: No longer returned. + ErrNil = errors.New("proto: Marshal called with nil") + + // Deprecated: No longer returned. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") + + // Deprecated: No longer returned. + ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") +) + +// Deprecated: Do not use. type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } -// Deprecated: do not use. +// Deprecated: Do not use. func GetStats() Stats { return Stats{} } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSet(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSet([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSetJSON(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSetJSON([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func RegisterMessageSetType(Message, int32, string) {} + +// Deprecated: Do not use. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Deprecated: Do not use. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// Deprecated: Do not use; this type existed for intenal-use only. +type InternalMessageInfo struct{} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) DiscardUnknown(m Message) { + DiscardUnknown(m) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { + return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Size(m Message) int { + return protoV2.Size(MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { + return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go index dea2617ce..2187e877f 100644 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -1,48 +1,13 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" + "google.golang.org/protobuf/reflect/protoreflect" ) -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -51,300 +16,43 @@ type generatedDiscarder interface { // marshal to be able to produce a message that continues to have those // unrecognized fields. To avoid this, DiscardUnknown is used to // explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di + if m != nil { + discardUnknown(MessageReflect(m)) } - return di } -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) +func discardUnknown(m protoreflect.Message) { + m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + discardUnknown(m.Get(fd).Message()) } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + discardUnknown(ls.Get(i).Message()) } } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + discardUnknown(v.Message()) + return true + }) } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue } - vf := v.Field(i) - tf := f.Type + return true + }) - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } + // Discard unknown fields. + if len(m.GetUnknown()) > 0 { + m.SetUnknown(nil) } } diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2cf..000000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f9b6e41b3..000000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,301 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index fa88add30..42fc120c9 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -1,607 +1,356 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Types and routines for supporting protocol buffer extensions. - */ - import ( "errors" "fmt" - "io" "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} +type ( + // ExtensionDesc represents an extension descriptor and + // is used to interact with an extension field in a message. + // + // Variables of this type are generated in code by protoc-gen-go. + ExtensionDesc = protoimpl.ExtensionInfo -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} + // ExtensionRange represents a range of message extensions. + // Used in code generated by protoc-gen-go. + ExtensionRange = protoiface.ExtensionRangeV1 -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} + // Deprecated: Do not use; this is an internal type. + Extension = protoimpl.ExtensionFieldV1 -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} + // Deprecated: Do not use; this is an internal type. + XXX_InternalExtensions = protoimpl.ExtensionFields +) -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} +// ErrMissingExtension reports whether the extension was not present. +var ErrMissingExtension = errors.New("proto: missing extension") var errNotExtendable = errors.New("proto: not an extendable proto.Message") -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension +// HasExtension reports whether the extension field is present in m +// either as an explicitly populated field or as an unknown field. +func HasExtension(m Message, xt *ExtensionDesc) (has bool) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return false } -} -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension + // Check whether any populated known field matches the field number. + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + has = mr.Has(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + has = int32(fd.Number()) == xt.Field + return !has }) - e.p.extensionMap = make(map[int32]Extension) } - return e.p.extensionMap -} -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil + // Check whether any unknown field matches the field number. + for b := mr.GetUnknown(); !has && len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. - value interface{} - - // enc is the raw bytes for the extension field. - enc []byte + return has } -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false + }) } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok + clearUnknown(mr, fieldNum(xt.Field)) } -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) } -// GetExtension retrieves a proto2 extended field from pb. +// GetExtension retrieves a proto2 extended field from m. // // If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), // then GetExtension parses the encoded field and returns a Go value of the specified type. // If the field is not present, then the default value is returned (if one is specified), // otherwise ErrMissingExtension is reported. // -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return extensionAsLegacyType(e.value), nil + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil } - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) + } } - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: return nil, ErrMissingExtension } - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() } + return v, nil +} - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByName(field) } -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) } -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } + + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) if err != nil { - return + if err == ErrMissingExtension { + continue + } + return vs, err } + vs[i] = v } - return + return vs, nil } -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable } - registeredExtensions := RegisteredExtensions(pb) - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) + } + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() } - - extensions = append(extensions, desc) } - return extensions, nil -} -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) return nil } -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) + } + b0 = b0[n:] } -} -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd + } + } + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) } - m[desc.Field] = desc + return xts, nil } -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) } -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } + return true + default: + return false } - return v } -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - return v + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) + } +} + +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 } diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index fdd328bb7..000000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,965 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index f48a75676..000000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 94fa9194a..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,360 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - if deref { - u = u.Elem() - } - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index dbfffe071..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,313 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} - } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index a4b8c0cd3..dcdc2202f 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -1,162 +1,104 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - import ( "fmt" - "log" "reflect" - "sort" "strconv" "strings" "sync" -) - -const debug bool = false -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" ) -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. + // It is keyed by the protobuf field name. OneofTypes map[string]*OneofProperties } -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. WireType int - Tag int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. Required bool + // Optional reports whether this is a optional field. Optional bool + // Repeated reports whether this is a repeated field. Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) + s += "," + strconv.Itoa(p.Tag) if p.Required { s += ",req" } @@ -170,18 +112,21 @@ func (p *Properties) String() string { s += ",packed" } s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { + if p.JSONName != "" { s += ",json=" + p.JSONName } - if p.proto3 { + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { s += ",proto3" } - if p.oneof { + if p.Oneof { s += ",oneof" } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } if p.HasDefault { s += ",def=" + p.Default } @@ -189,356 +134,173 @@ func (p *Properties) String() string { } // Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": p.Optional = true - case f == "rep": + case s == "req": + p.Required = true + case s == "rep": p.Repeated = true - case f == "packed": + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) + p.Default, i = tag[len("def="):], len(tag) } + tag = strings.TrimPrefix(tag[i:], ",") } } -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - // Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name if tag == "" { return } p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } } -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) +var propertiesCache sync.Map // map[reflect.Type]*StructProperties -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) } -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) } + var hasOneof bool prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) + // Construct a list of properties for each field in the struct. for i := 0; i < t.NumField(); i++ { - f := t.Field(i) p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") + + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field } + + prop.Prop = append(prop.Prop, p) } - // Re-order prop.order. - sort.Sort(prop) + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T Prop: new(Properties), } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i } - prop.reqCount = reqCount return prop } -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 000000000..5aee89c32 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 000000000..066b4323b --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,317 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index 5cb11fa95..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2776 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } - sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - deref: deref, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def6a..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index acee2fc52..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 1aaee725b..000000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 000000000..47eb3e445 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(rune(i)), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 000000000..a31134eeb --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3af2..000000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 000000000..d7c28da5a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 000000000..398e34859 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 000000000..85f9f5736 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,179 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "github.com/golang/protobuf/ptypes/any" +) + +const urlPrefix = "type.googleapis.com/" + +// AnyMessageName returns the message name contained in an anypb.Any message. +// Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. +func AnyMessageName(any *anypb.Any) (string, error) { + name, err := anyMessageName(any) + return string(name), err +} +func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + name := protoreflect.FullName(any.TypeUrl) + if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return name, nil +} + +// MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. +func MarshalAny(m proto.Message) (*anypb.Any, error) { + switch dm := m.(type) { + case DynamicAny: + m = dm.Message + case *DynamicAny: + if dm == nil { + return nil, proto.ErrNil + } + m = dm.Message + } + b, err := proto.Marshal(m) + if err != nil { + return nil, err + } + return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil +} + +// Empty returns a new message of the type specified in an anypb.Any message. +// It returns protoregistry.NotFound if the corresponding message type could not +// be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. +func Empty(any *anypb.Any) (proto.Message, error) { + name, err := anyMessageName(any) + if err != nil { + return nil, err + } + mt, err := protoregistry.GlobalTypes.FindMessageByName(name) + if err != nil { + return nil, err + } + return proto.MessageV1(mt.New().Interface()), nil +} + +// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message +// into the provided message m. It returns an error if the target message +// does not match the type in the Any message or if an unmarshal error occurs. +// +// The target message m may be a *DynamicAny message. If the underlying message +// type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. +func UnmarshalAny(any *anypb.Any, m proto.Message) error { + if dm, ok := m.(*DynamicAny); ok { + if dm.Message == nil { + var err error + dm.Message, err = Empty(any) + if err != nil { + return err + } + } + m = dm.Message + } + + anyName, err := AnyMessageName(any) + if err != nil { + return err + } + msgName := proto.MessageName(m) + if anyName != msgName { + return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) + } + return proto.Unmarshal(any.Value, m) +} + +// Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. +func Is(any *anypb.Any, m proto.Message) bool { + if any == nil || m == nil { + return false + } + name := proto.MessageName(m) + if !strings.HasSuffix(any.TypeUrl, name) { + return false + } + return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in an anypb.Any message. +// The allocated message is stored in the embedded proto.Message. +// +// Example: +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. +type DynamicAny struct{ proto.Message } + +func (m DynamicAny) String() string { + if m.Message == nil { + return "" + } + return m.Message.String() +} +func (m DynamicAny) Reset() { + if m.Message == nil { + return + } + m.Message.Reset() +} +func (m DynamicAny) ProtoMessage() { + return +} +func (m DynamicAny) ProtoReflect() protoreflect.Message { + if m.Message == nil { + return nil + } + return dynamicAny{proto.MessageReflect(m.Message)} +} + +type dynamicAny struct{ protoreflect.Message } + +func (m dynamicAny) Type() protoreflect.MessageType { + return dynamicAnyType{m.Message.Type()} +} +func (m dynamicAny) New() protoreflect.Message { + return dynamicAnyType{m.Message.Type()}.New() +} +func (m dynamicAny) Interface() protoreflect.ProtoMessage { + return DynamicAny{proto.MessageV1(m.Message.Interface())} +} + +type dynamicAnyType struct{ protoreflect.MessageType } + +func (t dynamicAnyType) New() protoreflect.Message { + return dynamicAny{t.MessageType.New()} +} +func (t dynamicAnyType) Zero() protoreflect.Message { + return dynamicAny{t.MessageType.Zero()} +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 000000000..0ef27d33d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/any/any.proto + +package any + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/any.proto. + +type Any = anypb.Any + +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 000000000..d3c33259d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 000000000..b2b55dd85 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + durationpb "github.com/golang/protobuf/ptypes/duration" +) + +// Range of google.protobuf.Duration as specified in duration.proto. +// This is about 10,000 years in seconds. +const ( + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// Duration converts a durationpb.Duration to a time.Duration. +// Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. +func Duration(dur *durationpb.Duration) (time.Duration, error) { + if err := validateDuration(dur); err != nil { + return 0, err + } + d := time.Duration(dur.Seconds) * time.Second + if int64(d/time.Second) != dur.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + if dur.Nanos != 0 { + d += time.Duration(dur.Nanos) * time.Nanosecond + if (d < 0) != (dur.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. +func DurationProto(d time.Duration) *durationpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durationpb.Duration{ + Seconds: int64(secs), + Nanos: int32(nanos), + } +} + +// validateDuration determines whether the durationpb.Duration is valid +// according to the definition in google/protobuf/duration.proto. +// A valid durpb.Duration may still be too large to fit into a time.Duration +// Note that the range of durationpb.Duration is about 10,000 years, +// while the range of time.Duration is about 290 years. +func validateDuration(dur *durationpb.Duration) error { + if dur == nil { + return errors.New("duration: nil Duration") + } + if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", dur) + } + if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", dur) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 000000000..d0079ee3e --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,63 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto + +package duration + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/duration.proto. + +type Duration = durationpb.Duration + +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 000000000..8368a3f70 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" +) + +// Range of google.protobuf.Duration as specified in timestamp.proto. +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// Timestamp converts a timestamppb.Timestamp to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return +// value is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. +func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. +func TimestampNow() *timestamppb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. +func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { + ts := ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. +// For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. +func TimestampString(ts *timestamppb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) +// and has a Nanos field in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes the problem. +// +// Every valid Timestamp can be represented by a time.Time, +// but the converse is not true. +func validateTimestamp(ts *timestamppb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 000000000..a76f80760 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,64 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +package timestamp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/timestamp.proto. + +type Timestamp = timestamppb.Timestamp + +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil +} diff --git a/vendor/github.com/golangci/check/LICENSE b/vendor/github.com/golangci/check/LICENSE new file mode 100644 index 000000000..5a1774b8e --- /dev/null +++ b/vendor/github.com/golangci/check/LICENSE @@ -0,0 +1,674 @@ +GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. {http://fsf.org/} + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see {http://www.gnu.org/licenses/}. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + opennota Copyright (C) 2013 opennota + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +{http://www.gnu.org/licenses/}. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +{http://www.gnu.org/philosophy/why-not-lgpl.html}. diff --git a/vendor/github.com/golangci/check/cmd/structcheck/structcheck.go b/vendor/github.com/golangci/check/cmd/structcheck/structcheck.go new file mode 100644 index 000000000..5dc5f8380 --- /dev/null +++ b/vendor/github.com/golangci/check/cmd/structcheck/structcheck.go @@ -0,0 +1,193 @@ +// structcheck +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +package structcheck + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/loader" +) + +var ( + assignmentsOnly = flag.Bool("structcheck.a", false, "Count assignments only") + loadTestFiles = flag.Bool("structcheck.t", false, "Load test files too") + buildTags = flag.String("structcheck.tags", "", "Build tags") +) + +type visitor struct { + prog *loader.Program + pkg *loader.PackageInfo + m map[types.Type]map[string]int + skip map[types.Type]struct{} +} + +func (v *visitor) decl(t types.Type, fieldName string) { + if _, ok := v.m[t]; !ok { + v.m[t] = make(map[string]int) + } + if _, ok := v.m[t][fieldName]; !ok { + v.m[t][fieldName] = 0 + } +} + +func (v *visitor) assignment(t types.Type, fieldName string) { + if _, ok := v.m[t]; !ok { + v.m[t] = make(map[string]int) + } + if _, ok := v.m[t][fieldName]; ok { + v.m[t][fieldName]++ + } else { + v.m[t][fieldName] = 1 + } +} + +func (v *visitor) typeSpec(node *ast.TypeSpec) { + if strukt, ok := node.Type.(*ast.StructType); ok { + t := v.pkg.Info.Defs[node.Name].Type() + for _, f := range strukt.Fields.List { + if len(f.Names) > 0 { + fieldName := f.Names[0].Name + v.decl(t, fieldName) + } + } + } +} + +func (v *visitor) typeAndFieldName(expr *ast.SelectorExpr) (types.Type, string, bool) { + selection := v.pkg.Info.Selections[expr] + if selection == nil { + return nil, "", false + } + recv := selection.Recv() + if ptr, ok := recv.(*types.Pointer); ok { + recv = ptr.Elem() + } + return recv, selection.Obj().Name(), true +} + +func (v *visitor) assignStmt(node *ast.AssignStmt) { + for _, lhs := range node.Lhs { + var selector *ast.SelectorExpr + switch expr := lhs.(type) { + case *ast.SelectorExpr: + selector = expr + case *ast.IndexExpr: + if expr, ok := expr.X.(*ast.SelectorExpr); ok { + selector = expr + } + } + if selector != nil { + if t, fn, ok := v.typeAndFieldName(selector); ok { + v.assignment(t, fn) + } + } + } +} + +func (v *visitor) compositeLiteral(node *ast.CompositeLit) { + t := v.pkg.Info.Types[node.Type].Type + for _, expr := range node.Elts { + if kv, ok := expr.(*ast.KeyValueExpr); ok { + if ident, ok := kv.Key.(*ast.Ident); ok { + v.assignment(t, ident.Name) + } + } else { + // Struct literal with positional values. + // All the fields are assigned. + v.skip[t] = struct{}{} + break + } + } +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + switch node := node.(type) { + case *ast.TypeSpec: + v.typeSpec(node) + + case *ast.AssignStmt: + if *assignmentsOnly { + v.assignStmt(node) + } + + case *ast.SelectorExpr: + if !*assignmentsOnly { + if t, fn, ok := v.typeAndFieldName(node); ok { + v.assignment(t, fn) + } + } + + case *ast.CompositeLit: + v.compositeLiteral(node) + } + + return v +} + +type Issue struct { + Pos token.Position + Type string + FieldName string +} + +func Run(program *loader.Program, reportExported bool) []Issue { + var issues []Issue + for _, pkg := range program.InitialPackages() { + visitor := &visitor{ + m: make(map[types.Type]map[string]int), + skip: make(map[types.Type]struct{}), + prog: program, + pkg: pkg, + } + for _, f := range pkg.Files { + ast.Walk(visitor, f) + } + + for t := range visitor.m { + if _, skip := visitor.skip[t]; skip { + continue + } + for fieldName, v := range visitor.m[t] { + if !reportExported && ast.IsExported(fieldName) { + continue + } + if v == 0 { + field, _, _ := types.LookupFieldOrMethod(t, false, pkg.Pkg, fieldName) + if field == nil { + fmt.Printf("%s: unknown field or method: %s.%s\n", pkg.Pkg.Path(), t, fieldName) + continue + } + if fieldName == "XMLName" { + if named, ok := field.Type().(*types.Named); ok && named.Obj().Pkg().Path() == "encoding/xml" { + continue + } + } + pos := program.Fset.Position(field.Pos()) + issues = append(issues, Issue{ + Pos: pos, + Type: types.TypeString(t, nil), + FieldName: fieldName, + }) + } + } + } + } + + return issues +} diff --git a/vendor/github.com/golangci/check/cmd/varcheck/varcheck.go b/vendor/github.com/golangci/check/cmd/varcheck/varcheck.go new file mode 100644 index 000000000..8e93e0473 --- /dev/null +++ b/vendor/github.com/golangci/check/cmd/varcheck/varcheck.go @@ -0,0 +1,163 @@ +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +package varcheck + +import ( + "flag" + "go/ast" + "go/token" + "strings" + + "go/types" + + "golang.org/x/tools/go/loader" +) + +var ( + buildTags = flag.String("varcheck.tags", "", "Build tags") +) + +type object struct { + pkgPath string + name string +} + +type visitor struct { + prog *loader.Program + pkg *loader.PackageInfo + uses map[object]int + positions map[object]token.Position + insideFunc bool +} + +func getKey(obj types.Object) object { + if obj == nil { + return object{} + } + + pkg := obj.Pkg() + pkgPath := "" + if pkg != nil { + pkgPath = pkg.Path() + } + + return object{ + pkgPath: pkgPath, + name: obj.Name(), + } +} + +func (v *visitor) decl(obj types.Object) { + key := getKey(obj) + if _, ok := v.uses[key]; !ok { + v.uses[key] = 0 + } + if _, ok := v.positions[key]; !ok { + v.positions[key] = v.prog.Fset.Position(obj.Pos()) + } +} + +func (v *visitor) use(obj types.Object) { + key := getKey(obj) + if _, ok := v.uses[key]; ok { + v.uses[key]++ + } else { + v.uses[key] = 1 + } +} + +func isReserved(name string) bool { + return name == "_" || strings.HasPrefix(strings.ToLower(name), "_cgo_") +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + switch node := node.(type) { + case *ast.Ident: + v.use(v.pkg.Info.Uses[node]) + + case *ast.ValueSpec: + if !v.insideFunc { + for _, ident := range node.Names { + if !isReserved(ident.Name) { + v.decl(v.pkg.Info.Defs[ident]) + } + } + } + for _, val := range node.Values { + ast.Walk(v, val) + } + if node.Type != nil { + ast.Walk(v, node.Type) + } + return nil + + case *ast.FuncDecl: + if node.Body != nil { + v.insideFunc = true + ast.Walk(v, node.Body) + v.insideFunc = false + } + + if node.Recv != nil { + ast.Walk(v, node.Recv) + } + if node.Type != nil { + ast.Walk(v, node.Type) + } + + return nil + } + + return v +} + +type Issue struct { + Pos token.Position + VarName string +} + +func Run(program *loader.Program, reportExported bool) []Issue { + var issues []Issue + uses := make(map[object]int) + positions := make(map[object]token.Position) + + for _, pkgInfo := range program.InitialPackages() { + if pkgInfo.Pkg.Path() == "unsafe" { + continue + } + + v := &visitor{ + prog: program, + pkg: pkgInfo, + uses: uses, + positions: positions, + } + + for _, f := range v.pkg.Files { + ast.Walk(v, f) + } + } + + for obj, useCount := range uses { + if useCount == 0 && (reportExported || !ast.IsExported(obj.name)) { + pos := positions[obj] + issues = append(issues, Issue{ + Pos: pos, + VarName: obj.name, + }) + } + } + + return issues +} diff --git a/vendor/github.com/golangci/dupl/.travis.yml b/vendor/github.com/golangci/dupl/.travis.yml new file mode 100644 index 000000000..33de24c0f --- /dev/null +++ b/vendor/github.com/golangci/dupl/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.3 + - 1.8 + - 1.9 diff --git a/vendor/github.com/golangci/dupl/LICENSE b/vendor/github.com/golangci/dupl/LICENSE new file mode 100644 index 000000000..ab317d841 --- /dev/null +++ b/vendor/github.com/golangci/dupl/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Michal Bohuslávek + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/golangci/dupl/README.md b/vendor/github.com/golangci/dupl/README.md new file mode 100644 index 000000000..f34901d7a --- /dev/null +++ b/vendor/github.com/golangci/dupl/README.md @@ -0,0 +1,63 @@ +# dupl [![Build Status](https://travis-ci.org/mibk/dupl.png)](https://travis-ci.org/mibk/dupl) + +**dupl** is a tool written in Go for finding code clones. So far it can find clones only +in the Go source files. The method uses suffix tree for serialized ASTs. It ignores values +of AST nodes. It just operates with their types (e.g. `if a == 13 {}` and `if x == 100 {}` are +considered the same provided it exceeds the minimal token sequence size). + +Due to the used method dupl can report so called "false positives" on the output. These are +the ones we do not consider clones (whether they are too small, or the values of the matched +tokens are completely different). + +## Installation + +```bash +go get -u github.com/golangci/dupl +``` + +## Usage + +``` +Usage of dupl: + dupl [flags] [paths] + +Paths: + If the given path is a file, dupl will use it regardless of + the file extension. If it is a directory it will recursively + search for *.go files in that directory. + + If no path is given dupl will recursively search for *.go + files in the current directory. + +Flags: + -files + read file names from stdin one at each line + -html + output the results as HTML, including duplicate code fragments + -plumbing + plumbing (easy-to-parse) output for consumption by scripts or tools + -t, -threshold size + minimum token sequence size as a clone (default 15) + -vendor + check files in vendor directory + -v, -verbose + explain what is being done + +Examples: + dupl -t 100 + Search clones in the current directory of size at least + 100 tokens. + dupl $(find app/ -name '*_test.go') + Search for clones in tests in the app directory. + find app/ -name '*_test.go' |dupl -files + The same as above. +``` + +## Example + +The reduced output of this command with the following parameters for the [Docker](https://www.docker.com) source code +looks like [this](http://htmlpreview.github.io/?https://github.com/golangci/dupl/blob/master/_output_example/docker.html). + +```bash +$ dupl -t 200 -html >docker.html +``` diff --git a/vendor/github.com/golangci/dupl/job/buildtree.go b/vendor/github.com/golangci/dupl/job/buildtree.go new file mode 100644 index 000000000..e9aad54c0 --- /dev/null +++ b/vendor/github.com/golangci/dupl/job/buildtree.go @@ -0,0 +1,22 @@ +package job + +import ( + "github.com/golangci/dupl/suffixtree" + "github.com/golangci/dupl/syntax" +) + +func BuildTree(schan chan []*syntax.Node) (t *suffixtree.STree, d *[]*syntax.Node, done chan bool) { + t = suffixtree.New() + data := make([]*syntax.Node, 0, 100) + done = make(chan bool) + go func() { + for seq := range schan { + data = append(data, seq...) + for _, node := range seq { + t.Update(node) + } + } + done <- true + }() + return t, &data, done +} diff --git a/vendor/github.com/golangci/dupl/job/parse.go b/vendor/github.com/golangci/dupl/job/parse.go new file mode 100644 index 000000000..eb9d7c625 --- /dev/null +++ b/vendor/github.com/golangci/dupl/job/parse.go @@ -0,0 +1,36 @@ +package job + +import ( + "log" + + "github.com/golangci/dupl/syntax" + "github.com/golangci/dupl/syntax/golang" +) + +func Parse(fchan chan string) chan []*syntax.Node { + + // parse AST + achan := make(chan *syntax.Node) + go func() { + for file := range fchan { + ast, err := golang.Parse(file) + if err != nil { + log.Println(err) + continue + } + achan <- ast + } + close(achan) + }() + + // serialize + schan := make(chan []*syntax.Node) + go func() { + for ast := range achan { + seq := syntax.Serialize(ast) + schan <- seq + } + close(schan) + }() + return schan +} diff --git a/vendor/github.com/golangci/dupl/main.go b/vendor/github.com/golangci/dupl/main.go new file mode 100644 index 000000000..3030a97ae --- /dev/null +++ b/vendor/github.com/golangci/dupl/main.go @@ -0,0 +1,148 @@ +package dupl + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + + "github.com/golangci/dupl/job" + "github.com/golangci/dupl/printer" + "github.com/golangci/dupl/syntax" +) + +const defaultThreshold = 15 + +var ( + paths = []string{"."} + vendor = flag.Bool("dupl.vendor", false, "") + verbose = flag.Bool("dupl.verbose", false, "") + files = flag.Bool("dupl.files", false, "") + + html = flag.Bool("dupl.html", false, "") + plumbing = flag.Bool("dupl.plumbing", false, "") +) + +const ( + vendorDirPrefix = "vendor" + string(filepath.Separator) + vendorDirInPath = string(filepath.Separator) + vendorDirPrefix +) + +func init() { + flag.BoolVar(verbose, "dupl.v", false, "alias for -verbose") +} + +func Run(files []string, threshold int) ([]printer.Issue, error) { + fchan := make(chan string, 1024) + go func() { + for _, f := range files { + fchan <- f + } + close(fchan) + }() + schan := job.Parse(fchan) + t, data, done := job.BuildTree(schan) + <-done + + // finish stream + t.Update(&syntax.Node{Type: -1}) + + mchan := t.FindDuplOver(threshold) + duplChan := make(chan syntax.Match) + go func() { + for m := range mchan { + match := syntax.FindSyntaxUnits(*data, m, threshold) + if len(match.Frags) > 0 { + duplChan <- match + } + } + close(duplChan) + }() + + return makeIssues(duplChan) +} + +func makeIssues(duplChan <-chan syntax.Match) ([]printer.Issue, error) { + groups := make(map[string][][]*syntax.Node) + for dupl := range duplChan { + groups[dupl.Hash] = append(groups[dupl.Hash], dupl.Frags...) + } + keys := make([]string, 0, len(groups)) + for k := range groups { + keys = append(keys, k) + } + sort.Strings(keys) + + p := printer.NewPlumbing(ioutil.ReadFile) + + var issues []printer.Issue + for _, k := range keys { + uniq := unique(groups[k]) + if len(uniq) > 1 { + i, err := p.MakeIssues(uniq) + if err != nil { + return nil, err + } + issues = append(issues, i...) + } + } + + return issues, nil +} + +func unique(group [][]*syntax.Node) [][]*syntax.Node { + fileMap := make(map[string]map[int]struct{}) + + var newGroup [][]*syntax.Node + for _, seq := range group { + node := seq[0] + file, ok := fileMap[node.Filename] + if !ok { + file = make(map[int]struct{}) + fileMap[node.Filename] = file + } + if _, ok := file[node.Pos]; !ok { + file[node.Pos] = struct{}{} + newGroup = append(newGroup, seq) + } + } + return newGroup +} + +func usage() { + fmt.Fprintln(os.Stderr, `Usage: dupl [flags] [paths] + +Paths: + If the given path is a file, dupl will use it regardless of + the file extension. If it is a directory, it will recursively + search for *.go files in that directory. + + If no path is given, dupl will recursively search for *.go + files in the current directory. + +Flags: + -files + read file names from stdin one at each line + -html + output the results as HTML, including duplicate code fragments + -plumbing + plumbing (easy-to-parse) output for consumption by scripts or tools + -t, -threshold size + minimum token sequence size as a clone (default 15) + -vendor + check files in vendor directory + -v, -verbose + explain what is being done + +Examples: + dupl -t 100 + Search clones in the current directory of size at least + 100 tokens. + dupl $(find app/ -name '*_test.go') + Search for clones in tests in the app directory. + find app/ -name '*_test.go' |dupl -files + The same as above.`) + os.Exit(2) +} diff --git a/vendor/github.com/golangci/dupl/printer/html.go b/vendor/github.com/golangci/dupl/printer/html.go new file mode 100644 index 000000000..5ad9e25c7 --- /dev/null +++ b/vendor/github.com/golangci/dupl/printer/html.go @@ -0,0 +1,120 @@ +package printer + +import ( + "bytes" + "fmt" + "io" + "regexp" + "sort" + + "github.com/golangci/dupl/syntax" +) + +type html struct { + iota int + w io.Writer + ReadFile +} + +func NewHTML(w io.Writer, fread ReadFile) Printer { + return &html{w: w, ReadFile: fread} +} + +func (p *html) PrintHeader() error { + _, err := fmt.Fprint(p.w, ` + +Duplicates + +`) + return err +} + +func (p *html) PrintClones(dups [][]*syntax.Node) error { + p.iota++ + fmt.Fprintf(p.w, "

#%d found %d clones

\n", p.iota, len(dups)) + + clones := make([]clone, len(dups)) + for i, dup := range dups { + cnt := len(dup) + if cnt == 0 { + panic("zero length dup") + } + nstart := dup[0] + nend := dup[cnt-1] + + file, err := p.ReadFile(nstart.Filename) + if err != nil { + return err + } + + lineStart, _ := blockLines(file, nstart.Pos, nend.End) + cl := clone{filename: nstart.Filename, lineStart: lineStart} + start := findLineBeg(file, nstart.Pos) + content := append(toWhitespace(file[start:nstart.Pos]), file[nstart.Pos:nend.End]...) + cl.fragment = deindent(content) + clones[i] = cl + } + + sort.Sort(byNameAndLine(clones)) + for _, cl := range clones { + fmt.Fprintf(p.w, "

%s:%d

\n
%s
\n", cl.filename, cl.lineStart, cl.fragment) + } + return nil +} + +func (*html) PrintFooter() error { return nil } + +func findLineBeg(file []byte, index int) int { + for i := index; i >= 0; i-- { + if file[i] == '\n' { + return i + 1 + } + } + return 0 +} + +func toWhitespace(str []byte) []byte { + var out []byte + for _, c := range bytes.Runes(str) { + if c == '\t' { + out = append(out, '\t') + } else { + out = append(out, ' ') + } + } + return out +} + +func deindent(block []byte) []byte { + const maxVal = 99 + min := maxVal + re := regexp.MustCompile(`(^|\n)(\t*)\S`) + for _, line := range re.FindAllSubmatch(block, -1) { + indent := line[2] + if len(indent) < min { + min = len(indent) + } + } + if min == 0 || min == maxVal { + return block + } + block = block[min:] +Loop: + for i := 0; i < len(block); i++ { + if block[i] == '\n' && i != len(block)-1 { + for j := 0; j < min; j++ { + if block[i+j+1] != '\t' { + continue Loop + } + } + block = append(block[:i+1], block[i+1+min:]...) + } + } + return block +} diff --git a/vendor/github.com/golangci/dupl/printer/plumbing.go b/vendor/github.com/golangci/dupl/printer/plumbing.go new file mode 100644 index 000000000..cf39d01b7 --- /dev/null +++ b/vendor/github.com/golangci/dupl/printer/plumbing.go @@ -0,0 +1,50 @@ +package printer + +import ( + "sort" + + "github.com/golangci/dupl/syntax" +) + +type Clone clone + +func (c Clone) Filename() string { + return c.filename +} + +func (c Clone) LineStart() int { + return c.lineStart +} + +func (c Clone) LineEnd() int { + return c.lineEnd +} + +type Issue struct { + From, To Clone +} + +type Plumbing struct { + ReadFile +} + +func NewPlumbing(fread ReadFile) *Plumbing { + return &Plumbing{fread} +} + +func (p *Plumbing) MakeIssues(dups [][]*syntax.Node) ([]Issue, error) { + clones, err := prepareClonesInfo(p.ReadFile, dups) + if err != nil { + return nil, err + } + sort.Sort(byNameAndLine(clones)) + var issues []Issue + for i, cl := range clones { + nextCl := clones[(i+1)%len(clones)] + issues = append(issues, Issue{ + From: Clone(cl), + To: Clone(nextCl), + }) + } + return issues, nil +} diff --git a/vendor/github.com/golangci/dupl/printer/printer.go b/vendor/github.com/golangci/dupl/printer/printer.go new file mode 100644 index 000000000..385217bfc --- /dev/null +++ b/vendor/github.com/golangci/dupl/printer/printer.go @@ -0,0 +1,11 @@ +package printer + +import "github.com/golangci/dupl/syntax" + +type ReadFile func(filename string) ([]byte, error) + +type Printer interface { + PrintHeader() error + PrintClones(dups [][]*syntax.Node) error + PrintFooter() error +} diff --git a/vendor/github.com/golangci/dupl/printer/text.go b/vendor/github.com/golangci/dupl/printer/text.go new file mode 100644 index 000000000..8359fa76f --- /dev/null +++ b/vendor/github.com/golangci/dupl/printer/text.go @@ -0,0 +1,100 @@ +package printer + +import ( + "fmt" + "io" + "sort" + + "github.com/golangci/dupl/syntax" +) + +type text struct { + cnt int + w io.Writer + ReadFile +} + +func NewText(w io.Writer, fread ReadFile) Printer { + return &text{w: w, ReadFile: fread} +} + +func (p *text) PrintHeader() error { return nil } + +func (p *text) PrintClones(dups [][]*syntax.Node) error { + p.cnt++ + fmt.Fprintf(p.w, "found %d clones:\n", len(dups)) + clones, err := prepareClonesInfo(p.ReadFile, dups) + if err != nil { + return err + } + sort.Sort(byNameAndLine(clones)) + for _, cl := range clones { + fmt.Fprintf(p.w, " %s:%d,%d\n", cl.filename, cl.lineStart, cl.lineEnd) + } + return nil +} + +func (p *text) PrintFooter() error { + _, err := fmt.Fprintf(p.w, "\nFound total %d clone groups.\n", p.cnt) + return err +} + +func prepareClonesInfo(fread ReadFile, dups [][]*syntax.Node) ([]clone, error) { + clones := make([]clone, len(dups)) + for i, dup := range dups { + cnt := len(dup) + if cnt == 0 { + panic("zero length dup") + } + nstart := dup[0] + nend := dup[cnt-1] + + file, err := fread(nstart.Filename) + if err != nil { + return nil, err + } + + cl := clone{filename: nstart.Filename} + cl.lineStart, cl.lineEnd = blockLines(file, nstart.Pos, nend.End) + clones[i] = cl + } + return clones, nil +} + +func blockLines(file []byte, from, to int) (int, int) { + line := 1 + lineStart, lineEnd := 0, 0 + for offset, b := range file { + if b == '\n' { + line++ + } + if offset == from { + lineStart = line + } + if offset == to-1 { + lineEnd = line + break + } + } + return lineStart, lineEnd +} + +type clone struct { + filename string + lineStart int + lineEnd int + fragment []byte +} + +type byNameAndLine []clone + +func (c byNameAndLine) Len() int { return len(c) } + +func (c byNameAndLine) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +func (c byNameAndLine) Less(i, j int) bool { + if c[i].filename == c[j].filename { + return c[i].lineStart < c[j].lineStart + } + return c[i].filename < c[j].filename +} diff --git a/vendor/github.com/golangci/dupl/suffixtree/dupl.go b/vendor/github.com/golangci/dupl/suffixtree/dupl.go new file mode 100644 index 000000000..ab145b4f3 --- /dev/null +++ b/vendor/github.com/golangci/dupl/suffixtree/dupl.go @@ -0,0 +1,98 @@ +package suffixtree + +import "sort" + +type Match struct { + Ps []Pos + Len Pos +} + +type posList struct { + positions []Pos +} + +func newPosList() *posList { + return &posList{make([]Pos, 0)} +} + +func (p *posList) append(p2 *posList) { + p.positions = append(p.positions, p2.positions...) +} + +func (p *posList) add(pos Pos) { + p.positions = append(p.positions, pos) +} + +type contextList struct { + lists map[int]*posList +} + +func newContextList() *contextList { + return &contextList{make(map[int]*posList)} +} + +func (c *contextList) getAll() []Pos { + keys := make([]int, 0, len(c.lists)) + for k := range c.lists { + keys = append(keys, k) + } + sort.Ints(keys) + var ps []Pos + for _, k := range keys { + ps = append(ps, c.lists[k].positions...) + } + return ps +} + +func (c *contextList) append(c2 *contextList) { + for lc, pl := range c2.lists { + if _, ok := c.lists[lc]; ok { + c.lists[lc].append(pl) + } else { + c.lists[lc] = pl + } + } +} + +// FindDuplOver find pairs of maximal duplicities over a threshold +// length. +func (t *STree) FindDuplOver(threshold int) <-chan Match { + auxTran := newTran(0, 0, t.root) + ch := make(chan Match) + go func() { + walkTrans(auxTran, 0, threshold, ch) + close(ch) + }() + return ch +} + +func walkTrans(parent *tran, length, threshold int, ch chan<- Match) *contextList { + s := parent.state + + cl := newContextList() + + if len(s.trans) == 0 { + pl := newPosList() + start := parent.end + 1 - Pos(length) + pl.add(start) + ch := 0 + if start > 0 { + ch = s.tree.data[start-1].Val() + } + cl.lists[ch] = pl + return cl + } + + for _, t := range s.trans { + ln := length + t.len() + cl2 := walkTrans(t, ln, threshold, ch) + if ln >= threshold { + cl.append(cl2) + } + } + if length >= threshold && len(cl.lists) > 1 { + m := Match{cl.getAll(), Pos(length)} + ch <- m + } + return cl +} diff --git a/vendor/github.com/golangci/dupl/suffixtree/suffixtree.go b/vendor/github.com/golangci/dupl/suffixtree/suffixtree.go new file mode 100644 index 000000000..738015025 --- /dev/null +++ b/vendor/github.com/golangci/dupl/suffixtree/suffixtree.go @@ -0,0 +1,216 @@ +package suffixtree + +import ( + "bytes" + "fmt" + "math" + "strings" +) + +const infinity = math.MaxInt32 + +// Pos denotes position in data slice. +type Pos int32 + +type Token interface { + Val() int +} + +// STree is a struct representing a suffix tree. +type STree struct { + data []Token + root *state + auxState *state // auxiliary state + + // active point + s *state + start, end Pos +} + +// New creates new suffix tree. +func New() *STree { + t := new(STree) + t.data = make([]Token, 0, 50) + t.root = newState(t) + t.auxState = newState(t) + t.root.linkState = t.auxState + t.s = t.root + return t +} + +// Update refreshes the suffix tree to by new data. +func (t *STree) Update(data ...Token) { + t.data = append(t.data, data...) + for _ = range data { + t.update() + t.s, t.start = t.canonize(t.s, t.start, t.end) + t.end++ + } +} + +// update transforms suffix tree T(n) to T(n+1). +func (t *STree) update() { + oldr := t.root + + // (s, (start, end)) is the canonical reference pair for the active point + s := t.s + start, end := t.start, t.end + var r *state + for { + var endPoint bool + r, endPoint = t.testAndSplit(s, start, end-1) + if endPoint { + break + } + r.fork(end) + if oldr != t.root { + oldr.linkState = r + } + oldr = r + s, start = t.canonize(s.linkState, start, end-1) + } + if oldr != t.root { + oldr.linkState = r + } + + // update active point + t.s = s + t.start = start +} + +// testAndSplit tests whether a state with canonical ref. pair +// (s, (start, end)) is the end point, that is, a state that have +// a c-transition. If not, then state (exs, (start, end)) is made +// explicit (if not already so). +func (t *STree) testAndSplit(s *state, start, end Pos) (exs *state, endPoint bool) { + c := t.data[t.end] + if start <= end { + tr := s.findTran(t.data[start]) + splitPoint := tr.start + end - start + 1 + if t.data[splitPoint].Val() == c.Val() { + return s, true + } + // make the (s, (start, end)) state explicit + newSt := newState(s.tree) + newSt.addTran(splitPoint, tr.end, tr.state) + tr.end = splitPoint - 1 + tr.state = newSt + return newSt, false + } + if s == t.auxState || s.findTran(c) != nil { + return s, true + } + return s, false +} + +// canonize returns updated state and start position for ref. pair +// (s, (start, end)) of state r so the new ref. pair is canonical, +// that is, referenced from the closest explicit ancestor of r. +func (t *STree) canonize(s *state, start, end Pos) (*state, Pos) { + if s == t.auxState { + s, start = t.root, start+1 + } + if start > end { + return s, start + } + + var tr *tran + for { + if start <= end { + tr = s.findTran(t.data[start]) + if tr == nil { + panic(fmt.Sprintf("there should be some transition for '%d' at %d", + t.data[start].Val(), start)) + } + } + if tr.end-tr.start > end-start { + break + } + start += tr.end - tr.start + 1 + s = tr.state + } + if s == nil { + panic("there should always be some suffix link resolution") + } + return s, start +} + +func (t *STree) At(p Pos) Token { + if p < 0 || p >= Pos(len(t.data)) { + panic("position out of bounds") + } + return t.data[p] +} + +func (t *STree) String() string { + buf := new(bytes.Buffer) + printState(buf, t.root, 0) + return buf.String() +} + +func printState(buf *bytes.Buffer, s *state, ident int) { + for _, tr := range s.trans { + fmt.Fprint(buf, strings.Repeat(" ", ident)) + fmt.Fprintf(buf, "* (%d, %d)\n", tr.start, tr.ActEnd()) + printState(buf, tr.state, ident+1) + } +} + +// state is an explicit state of the suffix tree. +type state struct { + tree *STree + trans []*tran + linkState *state +} + +func newState(t *STree) *state { + return &state{ + tree: t, + trans: make([]*tran, 0), + linkState: nil, + } +} + +func (s *state) addTran(start, end Pos, r *state) { + s.trans = append(s.trans, newTran(start, end, r)) +} + +// fork creates a new branch from the state s. +func (s *state) fork(i Pos) *state { + r := newState(s.tree) + s.addTran(i, infinity, r) + return r +} + +// findTran finds c-transition. +func (s *state) findTran(c Token) *tran { + for _, tran := range s.trans { + if s.tree.data[tran.start].Val() == c.Val() { + return tran + } + } + return nil +} + +// tran represents a state's transition. +type tran struct { + start, end Pos + state *state +} + +func newTran(start, end Pos, s *state) *tran { + return &tran{start, end, s} +} + +func (t *tran) len() int { + return int(t.end - t.start + 1) +} + +// ActEnd returns actual end position as consistent with +// the actual length of the data in the STree. +func (t *tran) ActEnd() Pos { + if t.end == infinity { + return Pos(len(t.state.tree.data)) - 1 + } + return t.end +} diff --git a/vendor/github.com/golangci/dupl/syntax/golang/golang.go b/vendor/github.com/golangci/dupl/syntax/golang/golang.go new file mode 100644 index 000000000..a0b1e77e1 --- /dev/null +++ b/vendor/github.com/golangci/dupl/syntax/golang/golang.go @@ -0,0 +1,392 @@ +package golang + +import ( + "go/ast" + "go/parser" + "go/token" + + "github.com/golangci/dupl/syntax" +) + +const ( + BadNode = iota + File + ArrayType + AssignStmt + BasicLit + BinaryExpr + BlockStmt + BranchStmt + CallExpr + CaseClause + ChanType + CommClause + CompositeLit + DeclStmt + DeferStmt + Ellipsis + EmptyStmt + ExprStmt + Field + FieldList + ForStmt + FuncDecl + FuncLit + FuncType + GenDecl + GoStmt + Ident + IfStmt + IncDecStmt + IndexExpr + InterfaceType + KeyValueExpr + LabeledStmt + MapType + ParenExpr + RangeStmt + ReturnStmt + SelectStmt + SelectorExpr + SendStmt + SliceExpr + StarExpr + StructType + SwitchStmt + TypeAssertExpr + TypeSpec + TypeSwitchStmt + UnaryExpr + ValueSpec +) + +// Parse the given file and return uniform syntax tree. +func Parse(filename string) (*syntax.Node, error) { + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, filename, nil, 0) + if err != nil { + return nil, err + } + t := &transformer{ + fileset: fset, + filename: filename, + } + return t.trans(file), nil +} + +type transformer struct { + fileset *token.FileSet + filename string +} + +// trans transforms given golang AST to uniform tree structure. +func (t *transformer) trans(node ast.Node) (o *syntax.Node) { + o = syntax.NewNode() + o.Filename = t.filename + st, end := node.Pos(), node.End() + o.Pos, o.End = t.fileset.File(st).Offset(st), t.fileset.File(end).Offset(end) + + switch n := node.(type) { + case *ast.ArrayType: + o.Type = ArrayType + if n.Len != nil { + o.AddChildren(t.trans(n.Len)) + } + o.AddChildren(t.trans(n.Elt)) + + case *ast.AssignStmt: + o.Type = AssignStmt + for _, e := range n.Rhs { + o.AddChildren(t.trans(e)) + } + + for _, e := range n.Lhs { + o.AddChildren(t.trans(e)) + } + + case *ast.BasicLit: + o.Type = BasicLit + + case *ast.BinaryExpr: + o.Type = BinaryExpr + o.AddChildren(t.trans(n.X), t.trans(n.Y)) + + case *ast.BlockStmt: + o.Type = BlockStmt + for _, stmt := range n.List { + o.AddChildren(t.trans(stmt)) + } + + case *ast.BranchStmt: + o.Type = BranchStmt + if n.Label != nil { + o.AddChildren(t.trans(n.Label)) + } + + case *ast.CallExpr: + o.Type = CallExpr + o.AddChildren(t.trans(n.Fun)) + for _, arg := range n.Args { + o.AddChildren(t.trans(arg)) + } + + case *ast.CaseClause: + o.Type = CaseClause + for _, e := range n.List { + o.AddChildren(t.trans(e)) + } + for _, stmt := range n.Body { + o.AddChildren(t.trans(stmt)) + } + + case *ast.ChanType: + o.Type = ChanType + o.AddChildren(t.trans(n.Value)) + + case *ast.CommClause: + o.Type = CommClause + if n.Comm != nil { + o.AddChildren(t.trans(n.Comm)) + } + for _, stmt := range n.Body { + o.AddChildren(t.trans(stmt)) + } + + case *ast.CompositeLit: + o.Type = CompositeLit + if n.Type != nil { + o.AddChildren(t.trans(n.Type)) + } + for _, e := range n.Elts { + o.AddChildren(t.trans(e)) + } + + case *ast.DeclStmt: + o.Type = DeclStmt + o.AddChildren(t.trans(n.Decl)) + + case *ast.DeferStmt: + o.Type = DeferStmt + o.AddChildren(t.trans(n.Call)) + + case *ast.Ellipsis: + o.Type = Ellipsis + if n.Elt != nil { + o.AddChildren(t.trans(n.Elt)) + } + + case *ast.EmptyStmt: + o.Type = EmptyStmt + + case *ast.ExprStmt: + o.Type = ExprStmt + o.AddChildren(t.trans(n.X)) + + case *ast.Field: + o.Type = Field + for _, name := range n.Names { + o.AddChildren(t.trans(name)) + } + o.AddChildren(t.trans(n.Type)) + + case *ast.FieldList: + o.Type = FieldList + for _, field := range n.List { + o.AddChildren(t.trans(field)) + } + + case *ast.File: + o.Type = File + for _, decl := range n.Decls { + if genDecl, ok := decl.(*ast.GenDecl); ok && genDecl.Tok == token.IMPORT { + // skip import declarations + continue + } + o.AddChildren(t.trans(decl)) + } + + case *ast.ForStmt: + o.Type = ForStmt + if n.Init != nil { + o.AddChildren(t.trans(n.Init)) + } + if n.Cond != nil { + o.AddChildren(t.trans(n.Cond)) + } + if n.Post != nil { + o.AddChildren(t.trans(n.Post)) + } + o.AddChildren(t.trans(n.Body)) + + case *ast.FuncDecl: + o.Type = FuncDecl + if n.Recv != nil { + o.AddChildren(t.trans(n.Recv)) + } + o.AddChildren(t.trans(n.Name), t.trans(n.Type)) + if n.Body != nil { + o.AddChildren(t.trans(n.Body)) + } + + case *ast.FuncLit: + o.Type = FuncLit + o.AddChildren(t.trans(n.Type), t.trans(n.Body)) + + case *ast.FuncType: + o.Type = FuncType + o.AddChildren(t.trans(n.Params)) + if n.Results != nil { + o.AddChildren(t.trans(n.Results)) + } + + case *ast.GenDecl: + o.Type = GenDecl + for _, spec := range n.Specs { + o.AddChildren(t.trans(spec)) + } + + case *ast.GoStmt: + o.Type = GoStmt + o.AddChildren(t.trans(n.Call)) + + case *ast.Ident: + o.Type = Ident + + case *ast.IfStmt: + o.Type = IfStmt + if n.Init != nil { + o.AddChildren(t.trans(n.Init)) + } + o.AddChildren(t.trans(n.Cond), t.trans(n.Body)) + if n.Else != nil { + o.AddChildren(t.trans(n.Else)) + } + + case *ast.IncDecStmt: + o.Type = IncDecStmt + o.AddChildren(t.trans(n.X)) + + case *ast.IndexExpr: + o.Type = IndexExpr + o.AddChildren(t.trans(n.X), t.trans(n.Index)) + + case *ast.InterfaceType: + o.Type = InterfaceType + o.AddChildren(t.trans(n.Methods)) + + case *ast.KeyValueExpr: + o.Type = KeyValueExpr + o.AddChildren(t.trans(n.Key), t.trans(n.Value)) + + case *ast.LabeledStmt: + o.Type = LabeledStmt + o.AddChildren(t.trans(n.Label), t.trans(n.Stmt)) + + case *ast.MapType: + o.Type = MapType + o.AddChildren(t.trans(n.Key), t.trans(n.Value)) + + case *ast.ParenExpr: + o.Type = ParenExpr + o.AddChildren(t.trans(n.X)) + + case *ast.RangeStmt: + o.Type = RangeStmt + if n.Key != nil { + o.AddChildren(t.trans(n.Key)) + } + if n.Value != nil { + o.AddChildren(t.trans(n.Value)) + } + o.AddChildren(t.trans(n.X), t.trans(n.Body)) + + case *ast.ReturnStmt: + o.Type = ReturnStmt + for _, e := range n.Results { + o.AddChildren(t.trans(e)) + } + + case *ast.SelectStmt: + o.Type = SelectStmt + o.AddChildren(t.trans(n.Body)) + + case *ast.SelectorExpr: + o.Type = SelectorExpr + o.AddChildren(t.trans(n.X), t.trans(n.Sel)) + + case *ast.SendStmt: + o.Type = SendStmt + o.AddChildren(t.trans(n.Chan), t.trans(n.Value)) + + case *ast.SliceExpr: + o.Type = SliceExpr + o.AddChildren(t.trans(n.X)) + if n.Low != nil { + o.AddChildren(t.trans(n.Low)) + } + if n.High != nil { + o.AddChildren(t.trans(n.High)) + } + if n.Max != nil { + o.AddChildren(t.trans(n.Max)) + } + + case *ast.StarExpr: + o.Type = StarExpr + o.AddChildren(t.trans(n.X)) + + case *ast.StructType: + o.Type = StructType + o.AddChildren(t.trans(n.Fields)) + + case *ast.SwitchStmt: + o.Type = SwitchStmt + if n.Init != nil { + o.AddChildren(t.trans(n.Init)) + } + if n.Tag != nil { + o.AddChildren(t.trans(n.Tag)) + } + o.AddChildren(t.trans(n.Body)) + + case *ast.TypeAssertExpr: + o.Type = TypeAssertExpr + o.AddChildren(t.trans(n.X)) + if n.Type != nil { + o.AddChildren(t.trans(n.Type)) + } + + case *ast.TypeSpec: + o.Type = TypeSpec + o.AddChildren(t.trans(n.Name), t.trans(n.Type)) + + case *ast.TypeSwitchStmt: + o.Type = TypeSwitchStmt + if n.Init != nil { + o.AddChildren(t.trans(n.Init)) + } + o.AddChildren(t.trans(n.Assign), t.trans(n.Body)) + + case *ast.UnaryExpr: + o.Type = UnaryExpr + o.AddChildren(t.trans(n.X)) + + case *ast.ValueSpec: + o.Type = ValueSpec + for _, name := range n.Names { + o.AddChildren(t.trans(name)) + } + if n.Type != nil { + o.AddChildren(t.trans(n.Type)) + } + for _, val := range n.Values { + o.AddChildren(t.trans(val)) + } + + default: + o.Type = BadNode + + } + + return o +} diff --git a/vendor/github.com/golangci/dupl/syntax/syntax.go b/vendor/github.com/golangci/dupl/syntax/syntax.go new file mode 100644 index 000000000..e2c750afd --- /dev/null +++ b/vendor/github.com/golangci/dupl/syntax/syntax.go @@ -0,0 +1,175 @@ +package syntax + +import ( + "crypto/sha1" + + "github.com/golangci/dupl/suffixtree" +) + +type Node struct { + Type int + Filename string + Pos, End int + Children []*Node + Owns int +} + +func NewNode() *Node { + return &Node{} +} + +func (n *Node) AddChildren(children ...*Node) { + n.Children = append(n.Children, children...) +} + +func (n *Node) Val() int { + return n.Type +} + +type Match struct { + Hash string + Frags [][]*Node +} + +func Serialize(n *Node) []*Node { + stream := make([]*Node, 0, 10) + serial(n, &stream) + return stream +} + +func serial(n *Node, stream *[]*Node) int { + *stream = append(*stream, n) + var count int + for _, child := range n.Children { + count += serial(child, stream) + } + n.Owns = count + return count + 1 +} + +// FindSyntaxUnits finds all complete syntax units in the match group and returns them +// with the corresponding hash. +func FindSyntaxUnits(data []*Node, m suffixtree.Match, threshold int) Match { + if len(m.Ps) == 0 { + return Match{} + } + firstSeq := data[m.Ps[0] : m.Ps[0]+m.Len] + indexes := getUnitsIndexes(firstSeq, threshold) + + // TODO: is this really working? + indexCnt := len(indexes) + if indexCnt > 0 { + lasti := indexes[indexCnt-1] + firstn := firstSeq[lasti] + for i := 1; i < len(m.Ps); i++ { + n := data[int(m.Ps[i])+lasti] + if firstn.Owns != n.Owns { + indexes = indexes[:indexCnt-1] + break + } + } + } + if len(indexes) == 0 || isCyclic(indexes, firstSeq) || spansMultipleFiles(indexes, firstSeq) { + return Match{} + } + + match := Match{Frags: make([][]*Node, len(m.Ps))} + for i, pos := range m.Ps { + match.Frags[i] = make([]*Node, len(indexes)) + for j, index := range indexes { + match.Frags[i][j] = data[int(pos)+index] + } + } + + lastIndex := indexes[len(indexes)-1] + match.Hash = hashSeq(firstSeq[indexes[0] : lastIndex+firstSeq[lastIndex].Owns]) + return match +} + +func getUnitsIndexes(nodeSeq []*Node, threshold int) []int { + var indexes []int + var split bool + for i := 0; i < len(nodeSeq); { + n := nodeSeq[i] + switch { + case n.Owns >= len(nodeSeq)-i: + // not complete syntax unit + i++ + split = true + continue + case n.Owns+1 < threshold: + split = true + default: + if split { + indexes = indexes[:0] + split = false + } + indexes = append(indexes, i) + } + i += n.Owns + 1 + } + return indexes +} + +// isCyclic finds out whether there is a repetive pattern in the found clone. If positive, +// it return false to point out that the clone would be redundant. +func isCyclic(indexes []int, nodes []*Node) bool { + cnt := len(indexes) + if cnt <= 1 { + return false + } + + alts := make(map[int]bool) + for i := 1; i <= cnt/2; i++ { + if cnt%i == 0 { + alts[i] = true + } + } + + for i := 0; i < indexes[cnt/2]; i++ { + nstart := nodes[i+indexes[0]] + AltLoop: + for alt := range alts { + for j := alt; j < cnt; j += alt { + index := i + indexes[j] + if index < len(nodes) { + nalt := nodes[index] + if nstart.Owns == nalt.Owns && nstart.Type == nalt.Type { + continue + } + } else if i >= indexes[alt] { + return true + } + delete(alts, alt) + continue AltLoop + } + } + if len(alts) == 0 { + return false + } + } + return true +} + +func spansMultipleFiles(indexes []int, nodes []*Node) bool { + if len(indexes) < 2 { + return false + } + f := nodes[indexes[0]].Filename + for i := 1; i < len(indexes); i++ { + if nodes[indexes[i]].Filename != f { + return true + } + } + return false +} + +func hashSeq(nodes []*Node) string { + h := sha1.New() + bytes := make([]byte, len(nodes)) + for i, node := range nodes { + bytes[i] = byte(node.Type) + } + h.Write(bytes) + return string(h.Sum(nil)) +} diff --git a/vendor/github.com/golangci/go-misc/LICENSE b/vendor/github.com/golangci/go-misc/LICENSE new file mode 100644 index 000000000..cc42dd45d --- /dev/null +++ b/vendor/github.com/golangci/go-misc/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rémy Oudompheng. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * The name of Rémy Oudompheng may not be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golangci/go-misc/deadcode/README.md b/vendor/github.com/golangci/go-misc/deadcode/README.md new file mode 100644 index 000000000..550423128 --- /dev/null +++ b/vendor/github.com/golangci/go-misc/deadcode/README.md @@ -0,0 +1,18 @@ +# deadcode + +`deadcode` is a very simple utility which detects unused declarations in a Go package. + +## Usage +``` +deadcode [-test] [packages] + + -test Include test files + packages A list of packages using the same conventions as the go tool +``` + +## Limitations + +* Self-referential unused code is not currently reported +* A single package can be tested at a time +* Unused methods are not reported + diff --git a/vendor/github.com/golangci/go-misc/deadcode/deadcode.go b/vendor/github.com/golangci/go-misc/deadcode/deadcode.go new file mode 100644 index 000000000..2e7cfc962 --- /dev/null +++ b/vendor/github.com/golangci/go-misc/deadcode/deadcode.go @@ -0,0 +1,135 @@ +package deadcode + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + "path/filepath" + "sort" + "strings" + + "golang.org/x/tools/go/loader" +) + +var exitCode int + +var ( + withTestFiles bool +) + +type Issue struct { + Pos token.Position + UnusedIdentName string +} + +func Run(program *loader.Program) ([]Issue, error) { + ctx := &Context{ + program: program, + } + report := ctx.Process() + var issues []Issue + for _, obj := range report { + issues = append(issues, Issue{ + Pos: program.Fset.Position(obj.Pos()), + UnusedIdentName: obj.Name(), + }) + } + + return issues, nil +} + +func fatalf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} + +type Context struct { + cwd string + withTests bool + + program *loader.Program +} + +// pos resolves a compact position encoding into a verbose one +func (ctx *Context) pos(pos token.Pos) token.Position { + if ctx.cwd == "" { + ctx.cwd, _ = os.Getwd() + } + p := ctx.program.Fset.Position(pos) + f, err := filepath.Rel(ctx.cwd, p.Filename) + if err == nil { + p.Filename = f + } + return p +} + +// error formats the error to standard error, adding program +// identification and a newline +func (ctx *Context) errorf(pos token.Pos, format string, args ...interface{}) { + p := ctx.pos(pos) + fmt.Fprintf(os.Stderr, p.String()+": "+format+"\n", args...) + exitCode = 2 +} + +func (ctx *Context) Load(args ...string) { + // TODO +} + +func (ctx *Context) Process() []types.Object { + prog := ctx.program + var allUnused []types.Object + for _, pkg := range prog.Imported { + unused := ctx.doPackage(prog, pkg) + allUnused = append(allUnused, unused...) + } + for _, pkg := range prog.Created { + unused := ctx.doPackage(prog, pkg) + allUnused = append(allUnused, unused...) + } + sort.Sort(objects(allUnused)) + return allUnused +} + +func isTestFuncByName(name string) bool { + return strings.HasPrefix(name, "Test") || strings.HasPrefix(name, "Benchmark") || strings.HasPrefix(name, "Example") +} + +func (ctx *Context) doPackage(prog *loader.Program, pkg *loader.PackageInfo) []types.Object { + used := make(map[types.Object]bool) + for _, file := range pkg.Files { + ast.Inspect(file, func(n ast.Node) bool { + id, ok := n.(*ast.Ident) + if !ok { + return true + } + obj := pkg.Info.Uses[id] + if obj != nil { + used[obj] = true + } + return false + }) + } + + global := pkg.Pkg.Scope() + var unused []types.Object + for _, name := range global.Names() { + if pkg.Pkg.Name() == "main" && name == "main" { + continue + } + obj := global.Lookup(name) + _, isSig := obj.Type().(*types.Signature) + pos := ctx.pos(obj.Pos()) + isTestMethod := isSig && isTestFuncByName(obj.Name()) && strings.HasSuffix(pos.Filename, "_test.go") + if !used[obj] && ((pkg.Pkg.Name() == "main" && !isTestMethod) || !ast.IsExported(name)) { + unused = append(unused, obj) + } + } + return unused +} + +type objects []types.Object + +func (s objects) Len() int { return len(s) } +func (s objects) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s objects) Less(i, j int) bool { return s[i].Pos() < s[j].Pos() } diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/golangci/gofmt/gofmt/LICENSE similarity index 83% rename from vendor/github.com/gorilla/context/LICENSE rename to vendor/github.com/golangci/gofmt/gofmt/LICENSE index 0e5fb8728..6a66aea5e 100644 --- a/vendor/github.com/gorilla/context/LICENSE +++ b/vendor/github.com/golangci/gofmt/gofmt/LICENSE @@ -1,16 +1,16 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/github.com/golangci/gofmt/gofmt/doc.go b/vendor/github.com/golangci/gofmt/gofmt/doc.go new file mode 100644 index 000000000..da0c8581d --- /dev/null +++ b/vendor/github.com/golangci/gofmt/gofmt/doc.go @@ -0,0 +1,104 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Gofmt formats Go programs. +It uses tabs for indentation and blanks for alignment. +Alignment assumes that an editor is using a fixed-width font. + +Without an explicit path, it processes the standard input. Given a file, +it operates on that file; given a directory, it operates on all .go files in +that directory, recursively. (Files starting with a period are ignored.) +By default, gofmt prints the reformatted sources to standard output. + +Usage: + gofmt [flags] [path ...] + +The flags are: + -d + Do not print reformatted sources to standard output. + If a file's formatting is different than gofmt's, print diffs + to standard output. + -e + Print all (including spurious) errors. + -l + Do not print reformatted sources to standard output. + If a file's formatting is different from gofmt's, print its name + to standard output. + -r rule + Apply the rewrite rule to the source before reformatting. + -s + Try to simplify code (after applying the rewrite rule, if any). + -w + Do not print reformatted sources to standard output. + If a file's formatting is different from gofmt's, overwrite it + with gofmt's version. If an error occurred during overwriting, + the original file is restored from an automatic backup. + +Debugging support: + -cpuprofile filename + Write cpu profile to the specified file. + + +The rewrite rule specified with the -r flag must be a string of the form: + + pattern -> replacement + +Both pattern and replacement must be valid Go expressions. +In the pattern, single-character lowercase identifiers serve as +wildcards matching arbitrary sub-expressions; those expressions +will be substituted for the same identifiers in the replacement. + +When gofmt reads from standard input, it accepts either a full Go program +or a program fragment. A program fragment must be a syntactically +valid declaration list, statement list, or expression. When formatting +such a fragment, gofmt preserves leading indentation as well as leading +and trailing spaces, so that individual sections of a Go program can be +formatted by piping them through gofmt. + +Examples + +To check files for unnecessary parentheses: + + gofmt -r '(a) -> a' -l *.go + +To remove the parentheses: + + gofmt -r '(a) -> a' -w *.go + +To convert the package tree from explicit slice upper bounds to implicit ones: + + gofmt -r 'α[β:len(α)] -> α[β:]' -w $GOROOT/src + +The simplify command + +When invoked with -s gofmt will make the following source transformations where possible. + + An array, slice, or map composite literal of the form: + []T{T{}, T{}} + will be simplified to: + []T{{}, {}} + + A slice expression of the form: + s[a:len(s)] + will be simplified to: + s[a:] + + A range of the form: + for x, _ = range v {...} + will be simplified to: + for x = range v {...} + + A range of the form: + for _ = range v {...} + will be simplified to: + for range v {...} + +This may result in changes that are incompatible with earlier versions of Go. +*/ +package gofmt + +// BUG(rsc): The implementation of -r is a bit slow. +// BUG(gri): If -w fails, the restored original file may not have some of the +// original file attributes. diff --git a/vendor/github.com/golangci/gofmt/gofmt/gofmt.go b/vendor/github.com/golangci/gofmt/gofmt/gofmt.go new file mode 100644 index 000000000..fb9c8cb37 --- /dev/null +++ b/vendor/github.com/golangci/gofmt/gofmt/gofmt.go @@ -0,0 +1,327 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gofmt + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/scanner" + "go/token" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "runtime/pprof" + "strings" + "sync" +) + +var ( + // main operation modes + list = flag.Bool("gofmt.l", false, "list files whose formatting differs from gofmt's") + write = flag.Bool("gofmt.w", false, "write result to (source) file instead of stdout") + rewriteRule = flag.String("gofmt.r", "", "rewrite rule (e.g., 'a[b:len(a)] -> a[b:]')") + simplifyAST = flag.Bool("gofmt.s", false, "simplify code") + doDiff = flag.Bool("gofmt.d", false, "display diffs instead of rewriting files") + allErrors = flag.Bool("gofmt.e", false, "report all errors (not just the first 10 on different lines)") + + // debugging + cpuprofile = flag.String("gofmt.cpuprofile", "", "write cpu profile to this file") +) + +const ( + tabWidth = 8 + printerMode = printer.UseSpaces | printer.TabIndent +) + +var ( + fileSet = token.NewFileSet() // per process FileSet + exitCode = 0 + rewrite func(*ast.File) *ast.File + parserMode parser.Mode + parserModeInitOnce sync.Once +) + +func report(err error) { + scanner.PrintError(os.Stderr, err) + exitCode = 2 +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: gofmt [flags] [path ...]\n") + flag.PrintDefaults() +} + +func initParserMode() { + parserModeInitOnce.Do(func() { + parserMode = parser.ParseComments + if *allErrors { + parserMode |= parser.AllErrors + } + }) +} + +func isGoFile(f os.FileInfo) bool { + // ignore non-Go files + name := f.Name() + return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") +} + +// If in == nil, the source is the contents of the file with the given filename. +func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error { + var perm os.FileMode = 0644 + if in == nil { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return err + } + in = f + perm = fi.Mode().Perm() + } + + src, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + file, sourceAdj, indentAdj, err := parse(fileSet, filename, src, stdin) + if err != nil { + return err + } + + if rewrite != nil { + if sourceAdj == nil { + file = rewrite(file) + } else { + fmt.Fprintf(os.Stderr, "warning: rewrite ignored for incomplete programs\n") + } + } + + ast.SortImports(fileSet, file) + + if *simplifyAST { + simplify(file) + } + + res, err := format(fileSet, file, sourceAdj, indentAdj, src, printer.Config{Mode: printerMode, Tabwidth: tabWidth}) + if err != nil { + return err + } + + if !bytes.Equal(src, res) { + // formatting has changed + if *list { + fmt.Fprintln(out, filename) + } + if *write { + // make a temporary backup before overwriting original + bakname, err := backupFile(filename+".", src, perm) + if err != nil { + return err + } + err = ioutil.WriteFile(filename, res, perm) + if err != nil { + os.Rename(bakname, filename) + return err + } + err = os.Remove(bakname) + if err != nil { + return err + } + } + if *doDiff { + data, err := diff(src, res, filename) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) + out.Write(data) + } + } + + if !*list && !*write && !*doDiff { + _, err = out.Write(res) + } + + return err +} + +func visitFile(path string, f os.FileInfo, err error) error { + if err == nil && isGoFile(f) { + err = processFile(path, nil, os.Stdout, false) + } + // Don't complain if a file was deleted in the meantime (i.e. + // the directory changed concurrently while running gofmt). + if err != nil && !os.IsNotExist(err) { + report(err) + } + return nil +} + +func walkDir(path string) { + filepath.Walk(path, visitFile) +} + +func gofmtMain() { + flag.Usage = usage + flag.Parse() + + if *cpuprofile != "" { + f, err := os.Create(*cpuprofile) + if err != nil { + fmt.Fprintf(os.Stderr, "creating cpu profile: %s\n", err) + exitCode = 2 + return + } + defer f.Close() + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + initParserMode() + initRewrite() + + if flag.NArg() == 0 { + if *write { + fmt.Fprintln(os.Stderr, "error: cannot use -w with standard input") + exitCode = 2 + return + } + if err := processFile("", os.Stdin, os.Stdout, true); err != nil { + report(err) + } + return + } + + for i := 0; i < flag.NArg(); i++ { + path := flag.Arg(i) + switch dir, err := os.Stat(path); { + case err != nil: + report(err) + case dir.IsDir(): + walkDir(path) + default: + if err := processFile(path, nil, os.Stdout, false); err != nil { + report(err) + } + } + } +} + +func writeTempFile(dir, prefix string, data []byte) (string, error) { + file, err := ioutil.TempFile(dir, prefix) + if err != nil { + return "", err + } + _, err = file.Write(data) + if err1 := file.Close(); err == nil { + err = err1 + } + if err != nil { + os.Remove(file.Name()) + return "", err + } + return file.Name(), nil +} + +func diff(b1, b2 []byte, filename string) (data []byte, err error) { + f1, err := writeTempFile("", "gofmt", b1) + if err != nil { + return + } + defer os.Remove(f1) + + f2, err := writeTempFile("", "gofmt", b2) + if err != nil { + return + } + defer os.Remove(f2) + + cmd := "diff" + if runtime.GOOS == "plan9" { + cmd = "/bin/ape/diff" + } + + data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + return replaceTempFilename(data, filename) + } + return +} + +// replaceTempFilename replaces temporary filenames in diff with actual one. +// +// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 +// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 +// ... +// -> +// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 +// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 +// ... +func replaceTempFilename(diff []byte, filename string) ([]byte, error) { + bs := bytes.SplitN(diff, []byte{'\n'}, 3) + if len(bs) < 3 { + return nil, fmt.Errorf("got unexpected diff for %s", filename) + } + // Preserve timestamps. + var t0, t1 []byte + if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { + t0 = bs[0][i:] + } + if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { + t1 = bs[1][i:] + } + // Always print filepath with slash separator. + f := filepath.ToSlash(filename) + bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) + bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) + return bytes.Join(bs, []byte{'\n'}), nil +} + +const chmodSupported = runtime.GOOS != "windows" + +// backupFile writes data to a new file named filename with permissions perm, +// with 0 && isSpace(src[i-1]) { + i-- + } + return append(res, src[i:]...), nil +} + +// isSpace reports whether the byte is a space character. +// isSpace defines a space as being among the following bytes: ' ', '\t', '\n' and '\r'. +func isSpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} diff --git a/vendor/github.com/golangci/gofmt/gofmt/rewrite.go b/vendor/github.com/golangci/gofmt/gofmt/rewrite.go new file mode 100644 index 000000000..73741e0a9 --- /dev/null +++ b/vendor/github.com/golangci/gofmt/gofmt/rewrite.go @@ -0,0 +1,303 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gofmt + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +func initRewrite() { + if *rewriteRule == "" { + rewrite = nil // disable any previous rewrite + return + } + f := strings.Split(*rewriteRule, "->") + if len(f) != 2 { + fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") + os.Exit(2) + } + pattern := parseExpr(f[0], "pattern") + replace := parseExpr(f[1], "replacement") + rewrite = func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } +} + +// parseExpr parses s as an expression. +// It might make sense to expand this to allow statement patterns, +// but there are problems with preserving formatting and also +// with what a wildcard for a statement looks like. +func parseExpr(s, what string) ast.Expr { + x, err := parser.ParseExpr(s) + if err != nil { + fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) + os.Exit(2) + } + return x +} + +// Keep this function for debugging. +/* +func dump(msg string, val reflect.Value) { + fmt.Printf("%s:\n", msg) + ast.Print(fileSet, val.Interface()) + fmt.Println() +} +*/ + +// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. +func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { + cmap := ast.NewCommentMap(fileSet, p, p.Comments) + m := make(map[string]reflect.Value) + pat := reflect.ValueOf(pattern) + repl := reflect.ValueOf(replace) + + var rewriteVal func(val reflect.Value) reflect.Value + rewriteVal = func(val reflect.Value) reflect.Value { + // don't bother if val is invalid to start with + if !val.IsValid() { + return reflect.Value{} + } + val = apply(rewriteVal, val) + for k := range m { + delete(m, k) + } + if match(m, pat, val) { + val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) + } + return val + } + + r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) + r.Comments = cmap.Filter(r).Comments() // recreate comments list + return r +} + +// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. +func set(x, y reflect.Value) { + // don't bother if x cannot be set or y is invalid + if !x.CanSet() || !y.IsValid() { + return + } + defer func() { + if x := recover(); x != nil { + if s, ok := x.(string); ok && + (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { + // x cannot be set to y - ignore this rewrite + return + } + panic(x) + } + }() + x.Set(y) +} + +// Values/types for special cases. +var ( + objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) + scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) + + identType = reflect.TypeOf((*ast.Ident)(nil)) + objectPtrType = reflect.TypeOf((*ast.Object)(nil)) + positionType = reflect.TypeOf(token.NoPos) + callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) + scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) +) + +// apply replaces each AST field x in val with f(x), returning val. +// To avoid extra conversions, f operates on the reflect.Value form. +func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { + if !val.IsValid() { + return reflect.Value{} + } + + // *ast.Objects introduce cycles and are likely incorrect after + // rewrite; don't follow them but replace with nil instead + if val.Type() == objectPtrType { + return objectPtrNil + } + + // similarly for scopes: they are likely incorrect after a rewrite; + // replace them with nil + if val.Type() == scopePtrType { + return scopePtrNil + } + + switch v := reflect.Indirect(val); v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + e := v.Index(i) + set(e, f(e)) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + e := v.Field(i) + set(e, f(e)) + } + case reflect.Interface: + e := v.Elem() + set(v, f(e)) + } + return val +} + +func isWildcard(s string) bool { + rune, size := utf8.DecodeRuneInString(s) + return size == len(s) && unicode.IsLower(rune) +} + +// match reports whether pattern matches val, +// recording wildcard submatches in m. +// If m == nil, match checks whether pattern == val. +func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { + // Wildcard matches any expression. If it appears multiple + // times in the pattern, it must match the same expression + // each time. + if m != nil && pattern.IsValid() && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) && val.IsValid() { + // wildcards only match valid (non-nil) expressions. + if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { + if old, ok := m[name]; ok { + return match(nil, old, val) + } + m[name] = val + return true + } + } + } + + // Otherwise, pattern and val must match recursively. + if !pattern.IsValid() || !val.IsValid() { + return !pattern.IsValid() && !val.IsValid() + } + if pattern.Type() != val.Type() { + return false + } + + // Special cases. + switch pattern.Type() { + case identType: + // For identifiers, only the names need to match + // (and none of the other *ast.Object information). + // This is a common case, handle it all here instead + // of recursing down any further via reflection. + p := pattern.Interface().(*ast.Ident) + v := val.Interface().(*ast.Ident) + return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name + case objectPtrType, positionType: + // object pointers and token positions always match + return true + case callExprType: + // For calls, the Ellipsis fields (token.Position) must + // match since that is how f(x) and f(x...) are different. + // Check them here but fall through for the remaining fields. + p := pattern.Interface().(*ast.CallExpr) + v := val.Interface().(*ast.CallExpr) + if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { + return false + } + } + + p := reflect.Indirect(pattern) + v := reflect.Indirect(val) + if !p.IsValid() || !v.IsValid() { + return !p.IsValid() && !v.IsValid() + } + + switch p.Kind() { + case reflect.Slice: + if p.Len() != v.Len() { + return false + } + for i := 0; i < p.Len(); i++ { + if !match(m, p.Index(i), v.Index(i)) { + return false + } + } + return true + + case reflect.Struct: + for i := 0; i < p.NumField(); i++ { + if !match(m, p.Field(i), v.Field(i)) { + return false + } + } + return true + + case reflect.Interface: + return match(m, p.Elem(), v.Elem()) + } + + // Handle token integers, etc. + return p.Interface() == v.Interface() +} + +// subst returns a copy of pattern with values from m substituted in place +// of wildcards and pos used as the position of tokens from the pattern. +// if m == nil, subst returns a copy of pattern and doesn't change the line +// number information. +func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { + if !pattern.IsValid() { + return reflect.Value{} + } + + // Wildcard gets replaced with map value. + if m != nil && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) { + if old, ok := m[name]; ok { + return subst(nil, old, reflect.Value{}) + } + } + } + + if pos.IsValid() && pattern.Type() == positionType { + // use new position only if old position was valid in the first place + if old := pattern.Interface().(token.Pos); !old.IsValid() { + return pattern + } + return pos + } + + // Otherwise copy. + switch p := pattern; p.Kind() { + case reflect.Slice: + v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) + for i := 0; i < p.Len(); i++ { + v.Index(i).Set(subst(m, p.Index(i), pos)) + } + return v + + case reflect.Struct: + v := reflect.New(p.Type()).Elem() + for i := 0; i < p.NumField(); i++ { + v.Field(i).Set(subst(m, p.Field(i), pos)) + } + return v + + case reflect.Ptr: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos).Addr()) + } + return v + + case reflect.Interface: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos)) + } + return v + } + + return pattern +} diff --git a/vendor/github.com/golangci/gofmt/gofmt/simplify.go b/vendor/github.com/golangci/gofmt/gofmt/simplify.go new file mode 100644 index 000000000..2c75495a6 --- /dev/null +++ b/vendor/github.com/golangci/gofmt/gofmt/simplify.go @@ -0,0 +1,165 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gofmt + +import ( + "go/ast" + "go/token" + "reflect" +) + +type simplifier struct{} + +func (s simplifier) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.CompositeLit: + // array, slice, and map composite literals may be simplified + outer := n + var keyType, eltType ast.Expr + switch typ := outer.Type.(type) { + case *ast.ArrayType: + eltType = typ.Elt + case *ast.MapType: + keyType = typ.Key + eltType = typ.Value + } + + if eltType != nil { + var ktyp reflect.Value + if keyType != nil { + ktyp = reflect.ValueOf(keyType) + } + typ := reflect.ValueOf(eltType) + for i, x := range outer.Elts { + px := &outer.Elts[i] + // look at value of indexed/named elements + if t, ok := x.(*ast.KeyValueExpr); ok { + if keyType != nil { + s.simplifyLiteral(ktyp, keyType, t.Key, &t.Key) + } + x = t.Value + px = &t.Value + } + s.simplifyLiteral(typ, eltType, x, px) + } + // node was simplified - stop walk (there are no subnodes to simplify) + return nil + } + + case *ast.SliceExpr: + // a slice expression of the form: s[a:len(s)] + // can be simplified to: s[a:] + // if s is "simple enough" (for now we only accept identifiers) + // + // Note: This may not be correct because len may have been redeclared in another + // file belonging to the same package. However, this is extremely unlikely + // and so far (April 2016, after years of supporting this rewrite feature) + // has never come up, so let's keep it working as is (see also #15153). + if n.Max != nil { + // - 3-index slices always require the 2nd and 3rd index + break + } + if s, _ := n.X.(*ast.Ident); s != nil && s.Obj != nil { + // the array/slice object is a single, resolved identifier + if call, _ := n.High.(*ast.CallExpr); call != nil && len(call.Args) == 1 && !call.Ellipsis.IsValid() { + // the high expression is a function call with a single argument + if fun, _ := call.Fun.(*ast.Ident); fun != nil && fun.Name == "len" && fun.Obj == nil { + // the function called is "len" and it is not locally defined; and + // because we don't have dot imports, it must be the predefined len() + if arg, _ := call.Args[0].(*ast.Ident); arg != nil && arg.Obj == s.Obj { + // the len argument is the array/slice object + n.High = nil + } + } + } + } + // Note: We could also simplify slice expressions of the form s[0:b] to s[:b] + // but we leave them as is since sometimes we want to be very explicit + // about the lower bound. + // An example where the 0 helps: + // x, y, z := b[0:2], b[2:4], b[4:6] + // An example where it does not: + // x, y := b[:n], b[n:] + + case *ast.RangeStmt: + // - a range of the form: for x, _ = range v {...} + // can be simplified to: for x = range v {...} + // - a range of the form: for _ = range v {...} + // can be simplified to: for range v {...} + if isBlank(n.Value) { + n.Value = nil + } + if isBlank(n.Key) && n.Value == nil { + n.Key = nil + } + } + + return s +} + +func (s simplifier) simplifyLiteral(typ reflect.Value, astType, x ast.Expr, px *ast.Expr) { + ast.Walk(s, x) // simplify x + + // if the element is a composite literal and its literal type + // matches the outer literal's element type exactly, the inner + // literal type may be omitted + if inner, ok := x.(*ast.CompositeLit); ok { + if match(nil, typ, reflect.ValueOf(inner.Type)) { + inner.Type = nil + } + } + // if the outer literal's element type is a pointer type *T + // and the element is & of a composite literal of type T, + // the inner &T may be omitted. + if ptr, ok := astType.(*ast.StarExpr); ok { + if addr, ok := x.(*ast.UnaryExpr); ok && addr.Op == token.AND { + if inner, ok := addr.X.(*ast.CompositeLit); ok { + if match(nil, reflect.ValueOf(ptr.X), reflect.ValueOf(inner.Type)) { + inner.Type = nil // drop T + *px = inner // drop & + } + } + } + } +} + +func isBlank(x ast.Expr) bool { + ident, ok := x.(*ast.Ident) + return ok && ident.Name == "_" +} + +func simplify(f *ast.File) { + // remove empty declarations such as "const ()", etc + removeEmptyDeclGroups(f) + + var s simplifier + ast.Walk(s, f) +} + +func removeEmptyDeclGroups(f *ast.File) { + i := 0 + for _, d := range f.Decls { + if g, ok := d.(*ast.GenDecl); !ok || !isEmpty(f, g) { + f.Decls[i] = d + i++ + } + } + f.Decls = f.Decls[:i] +} + +func isEmpty(f *ast.File, g *ast.GenDecl) bool { + if g.Doc != nil || g.Specs != nil { + return false + } + + for _, c := range f.Comments { + // if there is a comment in the declaration, it is not considered empty + if g.Pos() <= c.Pos() && c.End() <= g.End() { + return false + } + } + + return true +} diff --git a/vendor/github.com/golangci/gofmt/goimports/LICENSE b/vendor/github.com/golangci/gofmt/goimports/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/golangci/gofmt/goimports/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golangci/gofmt/goimports/goimports.go b/vendor/github.com/golangci/gofmt/goimports/goimports.go new file mode 100644 index 000000000..8878b700d --- /dev/null +++ b/vendor/github.com/golangci/gofmt/goimports/goimports.go @@ -0,0 +1,358 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goimports + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "go/scanner" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "runtime/pprof" + "strings" + + "golang.org/x/tools/imports" +) + +var ( + // main operation modes + list = flag.Bool("goimports.l", false, "list files whose formatting differs from goimport's") + write = flag.Bool("goimports.w", false, "write result to (source) file instead of stdout") + doDiff = flag.Bool("goimports.d", false, "display diffs instead of rewriting files") + srcdir = flag.String("goimports.srcdir", "", "choose imports as if source code is from `dir`. When operating on a single file, dir may instead be the complete file name.") + verbose bool // verbose logging + + cpuProfile = flag.String("goimports.cpuprofile", "", "CPU profile output") + memProfile = flag.String("goimports.memprofile", "", "memory profile output") + memProfileRate = flag.Int("goimports.memrate", 0, "if > 0, sets runtime.MemProfileRate") + + options = &imports.Options{ + TabWidth: 8, + TabIndent: true, + Comments: true, + Fragment: true, + } + exitCode = 0 +) + +func init() { + flag.BoolVar(&options.AllErrors, "goimports.e", false, "report all errors (not just the first 10 on different lines)") + flag.StringVar(&imports.LocalPrefix, "goimports.local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list") +} + +func report(err error) { + scanner.PrintError(os.Stderr, err) + exitCode = 2 +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: goimports [flags] [path ...]\n") + flag.PrintDefaults() + os.Exit(2) +} + +func isGoFile(f os.FileInfo) bool { + // ignore non-Go files + name := f.Name() + return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") +} + +// argumentType is which mode goimports was invoked as. +type argumentType int + +const ( + // fromStdin means the user is piping their source into goimports. + fromStdin argumentType = iota + + // singleArg is the common case from editors, when goimports is run on + // a single file. + singleArg + + // multipleArg is when the user ran "goimports file1.go file2.go" + // or ran goimports on a directory tree. + multipleArg +) + +func processFile(filename string, in io.Reader, out io.Writer, argType argumentType) error { + opt := options + if argType == fromStdin { + nopt := *options + nopt.Fragment = true + opt = &nopt + } + + if in == nil { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + in = f + } + + src, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + target := filename + if *srcdir != "" { + // Determine whether the provided -srcdirc is a directory or file + // and then use it to override the target. + // + // See https://github.com/dominikh/go-mode.el/issues/146 + if isFile(*srcdir) { + if argType == multipleArg { + return errors.New("-srcdir value can't be a file when passing multiple arguments or when walking directories") + } + target = *srcdir + } else if argType == singleArg && strings.HasSuffix(*srcdir, ".go") && !isDir(*srcdir) { + // For a file which doesn't exist on disk yet, but might shortly. + // e.g. user in editor opens $DIR/newfile.go and newfile.go doesn't yet exist on disk. + // The goimports on-save hook writes the buffer to a temp file + // first and runs goimports before the actual save to newfile.go. + // The editor's buffer is named "newfile.go" so that is passed to goimports as: + // goimports -srcdir=/gopath/src/pkg/newfile.go /tmp/gofmtXXXXXXXX.go + // and then the editor reloads the result from the tmp file and writes + // it to newfile.go. + target = *srcdir + } else { + // Pretend that file is from *srcdir in order to decide + // visible imports correctly. + target = filepath.Join(*srcdir, filepath.Base(filename)) + } + } + + res, err := imports.Process(target, src, opt) + if err != nil { + return err + } + + if !bytes.Equal(src, res) { + // formatting has changed + if *list { + fmt.Fprintln(out, filename) + } + if *write { + if argType == fromStdin { + // filename is "" + return errors.New("can't use -w on stdin") + } + err = ioutil.WriteFile(filename, res, 0) + if err != nil { + return err + } + } + if *doDiff { + if argType == fromStdin { + filename = "stdin.go" // because .orig looks silly + } + data, err := diff(src, res, filename) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) + out.Write(data) + } + } + + if !*list && !*write && !*doDiff { + _, err = out.Write(res) + } + + return err +} + +func visitFile(path string, f os.FileInfo, err error) error { + if err == nil && isGoFile(f) { + err = processFile(path, nil, os.Stdout, multipleArg) + } + if err != nil { + report(err) + } + return nil +} + +func walkDir(path string) { + filepath.Walk(path, visitFile) +} + +// parseFlags parses command line flags and returns the paths to process. +// It's a var so that custom implementations can replace it in other files. +var parseFlags = func() []string { + flag.BoolVar(&verbose, "v", false, "verbose logging") + + flag.Parse() + return flag.Args() +} + +func bufferedFileWriter(dest string) (w io.Writer, close func()) { + f, err := os.Create(dest) + if err != nil { + log.Fatal(err) + } + bw := bufio.NewWriter(f) + return bw, func() { + if err := bw.Flush(); err != nil { + log.Fatalf("error flushing %v: %v", dest, err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } + } +} + +func gofmtMain() { + flag.Usage = usage + paths := parseFlags() + + if *cpuProfile != "" { + bw, flush := bufferedFileWriter(*cpuProfile) + pprof.StartCPUProfile(bw) + defer flush() + defer pprof.StopCPUProfile() + } + // doTrace is a conditionally compiled wrapper around runtime/trace. It is + // used to allow goimports to compile under gccgo, which does not support + // runtime/trace. See https://golang.org/issue/15544. + if *memProfileRate > 0 { + runtime.MemProfileRate = *memProfileRate + bw, flush := bufferedFileWriter(*memProfile) + defer func() { + runtime.GC() // materialize all statistics + if err := pprof.WriteHeapProfile(bw); err != nil { + log.Fatal(err) + } + flush() + }() + } + + if verbose { + log.SetFlags(log.LstdFlags | log.Lmicroseconds) + imports.Debug = true + } + if options.TabWidth < 0 { + fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth) + exitCode = 2 + return + } + + if len(paths) == 0 { + if err := processFile("", os.Stdin, os.Stdout, fromStdin); err != nil { + report(err) + } + return + } + + argType := singleArg + if len(paths) > 1 { + argType = multipleArg + } + + for _, path := range paths { + switch dir, err := os.Stat(path); { + case err != nil: + report(err) + case dir.IsDir(): + walkDir(path) + default: + if err := processFile(path, nil, os.Stdout, argType); err != nil { + report(err) + } + } + } +} + +func writeTempFile(dir, prefix string, data []byte) (string, error) { + file, err := ioutil.TempFile(dir, prefix) + if err != nil { + return "", err + } + _, err = file.Write(data) + if err1 := file.Close(); err == nil { + err = err1 + } + if err != nil { + os.Remove(file.Name()) + return "", err + } + return file.Name(), nil +} + +func diff(b1, b2 []byte, filename string) (data []byte, err error) { + f1, err := writeTempFile("", "gofmt", b1) + if err != nil { + return + } + defer os.Remove(f1) + + f2, err := writeTempFile("", "gofmt", b2) + if err != nil { + return + } + defer os.Remove(f2) + + cmd := "diff" + if runtime.GOOS == "plan9" { + cmd = "/bin/ape/diff" + } + + data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + return replaceTempFilename(data, filename) + } + return +} + +// replaceTempFilename replaces temporary filenames in diff with actual one. +// +// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 +// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 +// ... +// -> +// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 +// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 +// ... +func replaceTempFilename(diff []byte, filename string) ([]byte, error) { + bs := bytes.SplitN(diff, []byte{'\n'}, 3) + if len(bs) < 3 { + return nil, fmt.Errorf("got unexpected diff for %s", filename) + } + // Preserve timestamps. + var t0, t1 []byte + if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { + t0 = bs[0][i:] + } + if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { + t1 = bs[1][i:] + } + // Always print filepath with slash separator. + f := filepath.ToSlash(filename) + bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) + bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) + return bytes.Join(bs, []byte{'\n'}), nil +} + +// isFile reports whether name is a file. +func isFile(name string) bool { + fi, err := os.Stat(name) + return err == nil && fi.Mode().IsRegular() +} + +// isDir reports whether name is a directory. +func isDir(name string) bool { + fi, err := os.Stat(name) + return err == nil && fi.IsDir() +} diff --git a/vendor/github.com/golangci/gofmt/goimports/golangci.go b/vendor/github.com/golangci/gofmt/goimports/golangci.go new file mode 100644 index 000000000..e9d013d58 --- /dev/null +++ b/vendor/github.com/golangci/gofmt/goimports/golangci.go @@ -0,0 +1,33 @@ +package goimports + +import ( + "bytes" + "fmt" + "io/ioutil" + + "golang.org/x/tools/imports" +) + +func Run(filename string) ([]byte, error) { + src, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + res, err := imports.Process(filename, src, options) + if err != nil { + return nil, err + } + + if bytes.Equal(src, res) { + return nil, nil + } + + // formatting has changed + data, err := diff(src, res, filename) + if err != nil { + return nil, fmt.Errorf("error computing diff: %s", err) + } + + return data, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/LICENSE b/vendor/github.com/golangci/golangci-lint/LICENSE new file mode 100644 index 000000000..e72bfddab --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go new file mode 100644 index 000000000..282d794b8 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "fmt" + "os" + + "github.com/golangci/golangci-lint/pkg/commands" + "github.com/golangci/golangci-lint/pkg/exitcodes" +) + +var ( + // Populated by goreleaser during build + version = "master" + commit = "?" + date = "" +) + +func main() { + e := commands.NewExecutor(version, commit, date) + + if err := e.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "failed executing command with error %v\n", err) + os.Exit(exitcodes.Failure) + } +} diff --git a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/mod_version.go b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/mod_version.go new file mode 100644 index 000000000..119a8a60d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/mod_version.go @@ -0,0 +1,17 @@ +package main + +import ( + "fmt" + "runtime/debug" +) + +//nolint:gochecknoinits +func init() { + if info, available := debug.ReadBuildInfo(); available { + if date == "" { + version = info.Main.Version + commit = fmt.Sprintf("(unknown, mod sum: %q)", info.Main.Sum) + date = "(unknown)" + } + } +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go new file mode 100644 index 000000000..51c75a77d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go @@ -0,0 +1,527 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cache implements a build artifact cache. +// +// This package is a slightly modified fork of Go's +// cmd/go/internal/cache package. +package cache + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/golangci/golangci-lint/internal/renameio" + "github.com/golangci/golangci-lint/internal/robustio" +) + +// An ActionID is a cache action key, the hash of a complete description of a +// repeatable computation (command line, environment variables, +// input file contents, executable contents). +type ActionID [HashSize]byte + +// An OutputID is a cache output key, the hash of an output of a computation. +type OutputID [HashSize]byte + +// A Cache is a package cache, backed by a file system directory tree. +type Cache struct { + dir string + now func() time.Time +} + +// Open opens and returns the cache in the given directory. +// +// It is safe for multiple processes on a single machine to use the +// same cache directory in a local file system simultaneously. +// They will coordinate using operating system file locks and may +// duplicate effort but will not corrupt the cache. +// +// However, it is NOT safe for multiple processes on different machines +// to share a cache directory (for example, if the directory were stored +// in a network file system). File locking is notoriously unreliable in +// network file systems and may not suffice to protect the cache. +// +func Open(dir string) (*Cache, error) { + info, err := os.Stat(dir) + if err != nil { + return nil, err + } + if !info.IsDir() { + return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")} + } + for i := 0; i < 256; i++ { + name := filepath.Join(dir, fmt.Sprintf("%02x", i)) + if err := os.MkdirAll(name, 0744); err != nil { + return nil, err + } + } + c := &Cache{ + dir: dir, + now: time.Now, + } + return c, nil +} + +// fileName returns the name of the file corresponding to the given id. +func (c *Cache) fileName(id [HashSize]byte, key string) string { + return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key) +} + +var errMissing = errors.New("cache entry not found") + +func IsErrMissing(err error) bool { + return errors.Cause(err) == errMissing +} + +const ( + // action entry file is "v1 \n" + hexSize = HashSize * 2 + entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1 +) + +// verify controls whether to run the cache in verify mode. +// In verify mode, the cache always returns errMissing from Get +// but then double-checks in Put that the data being written +// exactly matches any existing entry. This provides an easy +// way to detect program behavior that would have been different +// had the cache entry been returned from Get. +// +// verify is enabled by setting the environment variable +// GODEBUG=gocacheverify=1. +var verify = false + +// DebugTest is set when GODEBUG=gocachetest=1 is in the environment. +var DebugTest = false + +func init() { initEnv() } + +func initEnv() { + verify = false + debugHash = false + debug := strings.Split(os.Getenv("GODEBUG"), ",") + for _, f := range debug { + if f == "gocacheverify=1" { + verify = true + } + if f == "gocachehash=1" { + debugHash = true + } + if f == "gocachetest=1" { + DebugTest = true + } + } +} + +// Get looks up the action ID in the cache, +// returning the corresponding output ID and file size, if any. +// Note that finding an output ID does not guarantee that the +// saved file for that output ID is still available. +func (c *Cache) Get(id ActionID) (Entry, error) { + if verify { + return Entry{}, errMissing + } + return c.get(id) +} + +type Entry struct { + OutputID OutputID + Size int64 + Time time.Time +} + +// get is Get but does not respect verify mode, so that Put can use it. +func (c *Cache) get(id ActionID) (Entry, error) { + missing := func() (Entry, error) { + return Entry{}, errMissing + } + failed := func(err error) (Entry, error) { + return Entry{}, err + } + fileName := c.fileName(id, "a") + f, err := os.Open(fileName) + if err != nil { + if os.IsNotExist(err) { + return missing() + } + return failed(err) + } + defer f.Close() + entry := make([]byte, entrySize+1) // +1 to detect whether f is too long + if n, readErr := io.ReadFull(f, entry); n != entrySize || readErr != io.ErrUnexpectedEOF { + return failed(fmt.Errorf("read %d/%d bytes from %s with error %s", n, entrySize, fileName, readErr)) + } + if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' { + return failed(fmt.Errorf("bad data in %s", fileName)) + } + eid, entry := entry[3:3+hexSize], entry[3+hexSize:] + eout, entry := entry[1:1+hexSize], entry[1+hexSize:] + esize, entry := entry[1:1+20], entry[1+20:] + etime := entry[1 : 1+20] + var buf [HashSize]byte + if _, err = hex.Decode(buf[:], eid); err != nil || buf != id { + return failed(errors.Wrapf(err, "failed to hex decode eid data in %s", fileName)) + } + if _, err = hex.Decode(buf[:], eout); err != nil { + return failed(errors.Wrapf(err, "failed to hex decode eout data in %s", fileName)) + } + i := 0 + for i < len(esize) && esize[i] == ' ' { + i++ + } + size, err := strconv.ParseInt(string(esize[i:]), 10, 64) + if err != nil || size < 0 { + return failed(fmt.Errorf("failed to parse esize int from %s with error %s", fileName, err)) + } + i = 0 + for i < len(etime) && etime[i] == ' ' { + i++ + } + tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) + if err != nil || tm < 0 { + return failed(fmt.Errorf("failed to parse etime int from %s with error %s", fileName, err)) + } + + if err = c.used(fileName); err != nil { + return failed(errors.Wrapf(err, "failed to mark %s as used", fileName)) + } + + return Entry{buf, size, time.Unix(0, tm)}, nil +} + +// GetBytes looks up the action ID in the cache and returns +// the corresponding output bytes. +// GetBytes should only be used for data that can be expected to fit in memory. +func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) { + entry, err := c.Get(id) + if err != nil { + return nil, entry, err + } + outputFile, err := c.OutputFile(entry.OutputID) + if err != nil { + return nil, entry, err + } + + data, err := robustio.ReadFile(outputFile) + if err != nil { + return nil, entry, err + } + + if sha256.Sum256(data) != entry.OutputID { + return nil, entry, errMissing + } + return data, entry, nil +} + +// OutputFile returns the name of the cache file storing output with the given OutputID. +func (c *Cache) OutputFile(out OutputID) (string, error) { + file := c.fileName(out, "d") + if err := c.used(file); err != nil { + return "", err + } + return file, nil +} + +// Time constants for cache expiration. +// +// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour), +// to avoid causing many unnecessary inode updates. The mtimes therefore +// roughly reflect "time of last use" but may in fact be older by at most an hour. +// +// We scan the cache for entries to delete at most once per trimInterval (1 day). +// +// When we do scan the cache, we delete entries that have not been used for +// at least trimLimit (5 days). Statistics gathered from a month of usage by +// Go developers found that essentially all reuse of cached entries happened +// within 5 days of the previous reuse. See golang.org/issue/22990. +const ( + mtimeInterval = 1 * time.Hour + trimInterval = 24 * time.Hour + trimLimit = 5 * 24 * time.Hour +) + +// used makes a best-effort attempt to update mtime on file, +// so that mtime reflects cache access time. +// +// Because the reflection only needs to be approximate, +// and to reduce the amount of disk activity caused by using +// cache entries, used only updates the mtime if the current +// mtime is more than an hour old. This heuristic eliminates +// nearly all of the mtime updates that would otherwise happen, +// while still keeping the mtimes useful for cache trimming. +func (c *Cache) used(file string) error { + info, err := os.Stat(file) + if err != nil { + if os.IsNotExist(err) { + return errMissing + } + return errors.Wrapf(err, "failed to stat file %s", file) + } + + if c.now().Sub(info.ModTime()) < mtimeInterval { + return nil + } + + if err := os.Chtimes(file, c.now(), c.now()); err != nil { + return errors.Wrapf(err, "failed to change time of file %s", file) + } + + return nil +} + +// Trim removes old cache entries that are likely not to be reused. +func (c *Cache) Trim() { + now := c.now() + + // We maintain in dir/trim.txt the time of the last completed cache trim. + // If the cache has been trimmed recently enough, do nothing. + // This is the common case. + data, _ := renameio.ReadFile(filepath.Join(c.dir, "trim.txt")) + t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) + if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval { + return + } + + // Trim each of the 256 subdirectories. + // We subtract an additional mtimeInterval + // to account for the imprecision of our "last used" mtimes. + cutoff := now.Add(-trimLimit - mtimeInterval) + for i := 0; i < 256; i++ { + subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i)) + c.trimSubdir(subdir, cutoff) + } + + // Ignore errors from here: if we don't write the complete timestamp, the + // cache will appear older than it is, and we'll trim it again next time. + _ = renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666) +} + +// trimSubdir trims a single cache subdirectory. +func (c *Cache) trimSubdir(subdir string, cutoff time.Time) { + // Read all directory entries from subdir before removing + // any files, in case removing files invalidates the file offset + // in the directory scan. Also, ignore error from f.Readdirnames, + // because we don't care about reporting the error and we still + // want to process any entries found before the error. + f, err := os.Open(subdir) + if err != nil { + return + } + names, _ := f.Readdirnames(-1) + f.Close() + + for _, name := range names { + // Remove only cache entries (xxxx-a and xxxx-d). + if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") { + continue + } + entry := filepath.Join(subdir, name) + info, err := os.Stat(entry) + if err == nil && info.ModTime().Before(cutoff) { + os.Remove(entry) + } + } +} + +// putIndexEntry adds an entry to the cache recording that executing the action +// with the given id produces an output with the given output id (hash) and size. +func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error { + // Note: We expect that for one reason or another it may happen + // that repeating an action produces a different output hash + // (for example, if the output contains a time stamp or temp dir name). + // While not ideal, this is also not a correctness problem, so we + // don't make a big deal about it. In particular, we leave the action + // cache entries writable specifically so that they can be overwritten. + // + // Setting GODEBUG=gocacheverify=1 does make a big deal: + // in verify mode we are double-checking that the cache entries + // are entirely reproducible. As just noted, this may be unrealistic + // in some cases but the check is also useful for shaking out real bugs. + entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano()) + + if verify && allowVerify { + old, err := c.get(id) + if err == nil && (old.OutputID != out || old.Size != size) { + // panic to show stack trace, so we can see what code is generating this cache entry. + msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size) + panic(msg) + } + } + file := c.fileName(id, "a") + + // Copy file to cache directory. + mode := os.O_WRONLY | os.O_CREATE + f, err := os.OpenFile(file, mode, 0666) + if err != nil { + return err + } + _, err = f.WriteString(entry) + if err == nil { + // Truncate the file only *after* writing it. + // (This should be a no-op, but truncate just in case of previous corruption.) + // + // This differs from ioutil.WriteFile, which truncates to 0 *before* writing + // via os.O_TRUNC. Truncating only after writing ensures that a second write + // of the same content to the same file is idempotent, and does not — even + // temporarily! — undo the effect of the first write. + err = f.Truncate(int64(len(entry))) + } + if closeErr := f.Close(); err == nil { + err = closeErr + } + if err != nil { + // TODO(bcmills): This Remove potentially races with another go command writing to file. + // Can we eliminate it? + os.Remove(file) + return err + } + if err = os.Chtimes(file, c.now(), c.now()); err != nil { // mainly for tests + return errors.Wrapf(err, "failed to change time of file %s", file) + } + + return nil +} + +// Put stores the given output in the cache as the output for the action ID. +// It may read file twice. The content of file must not change between the two passes. +func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { + return c.put(id, file, true) +} + +// PutNoVerify is like Put but disables the verify check +// when GODEBUG=goverifycache=1 is set. +// It is meant for data that is OK to cache but that we expect to vary slightly from run to run, +// like test output containing times and the like. +func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { + return c.put(id, file, false) +} + +func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) { + // Compute output ID. + h := sha256.New() + if _, err := file.Seek(0, 0); err != nil { + return OutputID{}, 0, err + } + size, err := io.Copy(h, file) + if err != nil { + return OutputID{}, 0, err + } + var out OutputID + h.Sum(out[:0]) + + // Copy to cached output file (if not already present). + if err := c.copyFile(file, out, size); err != nil { + return out, size, err + } + + // Add to cache index. + return out, size, c.putIndexEntry(id, out, size, allowVerify) +} + +// PutBytes stores the given bytes in the cache as the output for the action ID. +func (c *Cache) PutBytes(id ActionID, data []byte) error { + _, _, err := c.Put(id, bytes.NewReader(data)) + return err +} + +// copyFile copies file into the cache, expecting it to have the given +// output ID and size, if that file is not present already. +func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { + name := c.fileName(out, "d") + info, err := os.Stat(name) + if err == nil && info.Size() == size { + // Check hash. + if f, openErr := os.Open(name); openErr == nil { + h := sha256.New() + if _, copyErr := io.Copy(h, f); copyErr != nil { + return errors.Wrap(copyErr, "failed to copy to sha256") + } + + f.Close() + var out2 OutputID + h.Sum(out2[:0]) + if out == out2 { + return nil + } + } + // Hash did not match. Fall through and rewrite file. + } + + // Copy file to cache directory. + mode := os.O_RDWR | os.O_CREATE + if err == nil && info.Size() > size { // shouldn't happen but fix in case + mode |= os.O_TRUNC + } + f, err := os.OpenFile(name, mode, 0666) + if err != nil { + return err + } + defer f.Close() + if size == 0 { + // File now exists with correct size. + // Only one possible zero-length file, so contents are OK too. + // Early return here makes sure there's a "last byte" for code below. + return nil + } + + // From here on, if any of the I/O writing the file fails, + // we make a best-effort attempt to truncate the file f + // before returning, to avoid leaving bad bytes in the file. + + // Copy file to f, but also into h to double-check hash. + if _, err = file.Seek(0, 0); err != nil { + _ = f.Truncate(0) + return err + } + h := sha256.New() + w := io.MultiWriter(f, h) + if _, err = io.CopyN(w, file, size-1); err != nil { + _ = f.Truncate(0) + return err + } + // Check last byte before writing it; writing it will make the size match + // what other processes expect to find and might cause them to start + // using the file. + buf := make([]byte, 1) + if _, err = file.Read(buf); err != nil { + _ = f.Truncate(0) + return err + } + if n, wErr := h.Write(buf); n != len(buf) { + return fmt.Errorf("wrote to hash %d/%d bytes with error %s", n, len(buf), wErr) + } + + sum := h.Sum(nil) + if !bytes.Equal(sum, out[:]) { + _ = f.Truncate(0) + return fmt.Errorf("file content changed underfoot") + } + + // Commit cache file entry. + if _, err = f.Write(buf); err != nil { + _ = f.Truncate(0) + return err + } + if err = f.Close(); err != nil { + // Data might not have been written, + // but file may look like it is the right size. + // To be extra careful, remove cached file. + os.Remove(name) + return err + } + if err = os.Chtimes(name, c.now(), c.now()); err != nil { // mainly for tests + return errors.Wrapf(err, "failed to change time of file %s", name) + } + + return nil +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/default.go b/vendor/github.com/golangci/golangci-lint/internal/cache/default.go new file mode 100644 index 000000000..e8866cb30 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/default.go @@ -0,0 +1,87 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "sync" +) + +// Default returns the default cache to use. +func Default() (*Cache, error) { + defaultOnce.Do(initDefaultCache) + return defaultCache, defaultDirErr +} + +var ( + defaultOnce sync.Once + defaultCache *Cache +) + +// cacheREADME is a message stored in a README in the cache directory. +// Because the cache lives outside the normal Go trees, we leave the +// README as a courtesy to explain where it came from. +const cacheREADME = `This directory holds cached build artifacts from golangci-lint. +` + +// initDefaultCache does the work of finding the default cache +// the first time Default is called. +func initDefaultCache() { + dir := DefaultDir() + if err := os.MkdirAll(dir, 0744); err != nil { + log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) + } + if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { + // Best effort. + if wErr := ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666); wErr != nil { + log.Fatalf("Failed to write README file to cache dir %s: %s", dir, err) + } + } + + c, err := Open(dir) + if err != nil { + log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) + } + defaultCache = c +} + +var ( + defaultDirOnce sync.Once + defaultDir string + defaultDirErr error +) + +// DefaultDir returns the effective GOLANGCI_LINT_CACHE setting. +func DefaultDir() string { + // Save the result of the first call to DefaultDir for later use in + // initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that + // subprocesses will inherit it, but that means initDefaultCache can't + // otherwise distinguish between an explicit "off" and a UserCacheDir error. + + defaultDirOnce.Do(func() { + defaultDir = os.Getenv("GOLANGCI_LINT_CACHE") + if filepath.IsAbs(defaultDir) { + return + } + if defaultDir != "" { + defaultDirErr = fmt.Errorf("GOLANGCI_LINT_CACHE is not an absolute path") + return + } + + // Compute default location. + dir, err := os.UserCacheDir() + if err != nil { + defaultDirErr = fmt.Errorf("GOLANGCI_LINT_CACHE is not defined and %v", err) + return + } + defaultDir = filepath.Join(dir, "golangci-lint") + }) + + return defaultDir +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go b/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go new file mode 100644 index 000000000..4ce79e325 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go @@ -0,0 +1,186 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "bytes" + "crypto/sha256" + "fmt" + "hash" + "io" + "os" + "sync" +) + +var debugHash = false // set when GODEBUG=gocachehash=1 + +// HashSize is the number of bytes in a hash. +const HashSize = 32 + +// A Hash provides access to the canonical hash function used to index the cache. +// The current implementation uses salted SHA256, but clients must not assume this. +type Hash struct { + h hash.Hash + name string // for debugging + buf *bytes.Buffer // for verify +} + +// hashSalt is a salt string added to the beginning of every hash +// created by NewHash. Using the golangci-lint version makes sure that different +// versions of the command do not address the same cache +// entries, so that a bug in one version does not affect the execution +// of other versions. This salt will result in additional ActionID files +// in the cache, but not additional copies of the large output files, +// which are still addressed by unsalted SHA256. +var hashSalt []byte + +func SetSalt(b []byte) { + hashSalt = b +} + +// Subkey returns an action ID corresponding to mixing a parent +// action ID with a string description of the subkey. +func Subkey(parent ActionID, desc string) (ActionID, error) { + h := sha256.New() + const subkeyPrefix = "subkey:" + if n, err := h.Write([]byte(subkeyPrefix)); n != len(subkeyPrefix) { + return ActionID{}, fmt.Errorf("wrote %d/%d bytes of subkey prefix with error %s", n, len(subkeyPrefix), err) + } + if n, err := h.Write(parent[:]); n != len(parent) { + return ActionID{}, fmt.Errorf("wrote %d/%d bytes of parent with error %s", n, len(parent), err) + } + if n, err := h.Write([]byte(desc)); n != len(desc) { + return ActionID{}, fmt.Errorf("wrote %d/%d bytes of desc with error %s", n, len(desc), err) + } + + var out ActionID + h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out) + } + if verify { + hashDebug.Lock() + hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc) + hashDebug.Unlock() + } + return out, nil +} + +// NewHash returns a new Hash. +// The caller is expected to Write data to it and then call Sum. +func NewHash(name string) (*Hash, error) { + h := &Hash{h: sha256.New(), name: name} + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name) + } + if n, err := h.Write(hashSalt); n != len(hashSalt) { + return nil, fmt.Errorf("wrote %d/%d bytes of hash salt with error %s", n, len(hashSalt), err) + } + if verify { + h.buf = new(bytes.Buffer) + } + return h, nil +} + +// Write writes data to the running hash. +func (h *Hash) Write(b []byte) (int, error) { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b) + } + if h.buf != nil { + h.buf.Write(b) + } + return h.h.Write(b) +} + +// Sum returns the hash of the data written previously. +func (h *Hash) Sum() [HashSize]byte { + var out [HashSize]byte + h.h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out) + } + if h.buf != nil { + hashDebug.Lock() + if hashDebug.m == nil { + hashDebug.m = make(map[[HashSize]byte]string) + } + hashDebug.m[out] = h.buf.String() + hashDebug.Unlock() + } + return out +} + +// In GODEBUG=gocacheverify=1 mode, +// hashDebug holds the input to every computed hash ID, +// so that we can work backward from the ID involved in a +// cache entry mismatch to a description of what should be there. +var hashDebug struct { + sync.Mutex + m map[[HashSize]byte]string +} + +// reverseHash returns the input used to compute the hash id. +func reverseHash(id [HashSize]byte) string { + hashDebug.Lock() + s := hashDebug.m[id] + hashDebug.Unlock() + return s +} + +var hashFileCache struct { + sync.Mutex + m map[string][HashSize]byte +} + +// FileHash returns the hash of the named file. +// It caches repeated lookups for a given file, +// and the cache entry for a file can be initialized +// using SetFileHash. +// The hash used by FileHash is not the same as +// the hash used by NewHash. +func FileHash(file string) ([HashSize]byte, error) { + hashFileCache.Lock() + out, ok := hashFileCache.m[file] + hashFileCache.Unlock() + + if ok { + return out, nil + } + + h := sha256.New() + f, err := os.Open(file) + if err != nil { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) + } + return [HashSize]byte{}, err + } + _, err = io.Copy(h, f) + f.Close() + if err != nil { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) + } + return [HashSize]byte{}, err + } + h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out) + } + + SetFileHash(file, out) + return out, nil +} + +// SetFileHash sets the hash returned by FileHash for file. +func SetFileHash(file string, sum [HashSize]byte) { + hashFileCache.Lock() + if hashFileCache.m == nil { + hashFileCache.m = make(map[string][HashSize]byte) + } + hashFileCache.m[file] = sum + hashFileCache.Unlock() +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go b/vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go new file mode 100644 index 000000000..5cb86d669 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go @@ -0,0 +1,23 @@ +package errorutil + +import ( + "fmt" +) + +// PanicError can be used to not print stacktrace twice +type PanicError struct { + recovered interface{} + stack []byte +} + +func NewPanicError(recovered interface{}, stack []byte) *PanicError { + return &PanicError{recovered: recovered, stack: stack} +} + +func (e PanicError) Error() string { + return fmt.Sprint(e.recovered) +} + +func (e PanicError) Stack() []byte { + return e.stack +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go b/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go new file mode 100644 index 000000000..86007d042 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go @@ -0,0 +1,229 @@ +package pkgcache + +import ( + "bytes" + "encoding/gob" + "encoding/hex" + "fmt" + "runtime" + "sort" + "sync" + + "github.com/pkg/errors" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/internal/cache" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/timeutils" +) + +type HashMode int + +const ( + HashModeNeedOnlySelf HashMode = iota + HashModeNeedDirectDeps + HashModeNeedAllDeps +) + +// Cache is a per-package data cache. A cached data is invalidated when +// package or it's dependencies change. +type Cache struct { + lowLevelCache *cache.Cache + pkgHashes sync.Map + sw *timeutils.Stopwatch + log logutils.Log // not used now, but may be needed for future debugging purposes + ioSem chan struct{} // semaphore limiting parallel IO +} + +func NewCache(sw *timeutils.Stopwatch, log logutils.Log) (*Cache, error) { + c, err := cache.Default() + if err != nil { + return nil, err + } + return &Cache{ + lowLevelCache: c, + sw: sw, + log: log, + ioSem: make(chan struct{}, runtime.GOMAXPROCS(-1)), + }, nil +} + +func (c *Cache) Trim() { + c.sw.TrackStage("trim", func() { + c.lowLevelCache.Trim() + }) +} + +func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data interface{}) error { + var err error + buf := &bytes.Buffer{} + c.sw.TrackStage("gob", func() { + err = gob.NewEncoder(buf).Encode(data) + }) + if err != nil { + return errors.Wrap(err, "failed to gob encode") + } + + var aID cache.ActionID + + c.sw.TrackStage("key build", func() { + aID, err = c.pkgActionID(pkg, mode) + if err == nil { + subkey, subkeyErr := cache.Subkey(aID, key) + if subkeyErr != nil { + err = errors.Wrap(subkeyErr, "failed to build subkey") + } + aID = subkey + } + }) + if err != nil { + return errors.Wrapf(err, "failed to calculate package %s action id", pkg.Name) + } + c.ioSem <- struct{}{} + c.sw.TrackStage("cache io", func() { + err = c.lowLevelCache.PutBytes(aID, buf.Bytes()) + }) + <-c.ioSem + if err != nil { + return errors.Wrapf(err, "failed to save data to low-level cache by key %s for package %s", key, pkg.Name) + } + + return nil +} + +var ErrMissing = errors.New("missing data") + +func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data interface{}) error { + var aID cache.ActionID + var err error + c.sw.TrackStage("key build", func() { + aID, err = c.pkgActionID(pkg, mode) + if err == nil { + subkey, subkeyErr := cache.Subkey(aID, key) + if subkeyErr != nil { + err = errors.Wrap(subkeyErr, "failed to build subkey") + } + aID = subkey + } + }) + if err != nil { + return errors.Wrapf(err, "failed to calculate package %s action id", pkg.Name) + } + + var b []byte + c.ioSem <- struct{}{} + c.sw.TrackStage("cache io", func() { + b, _, err = c.lowLevelCache.GetBytes(aID) + }) + <-c.ioSem + if err != nil { + if cache.IsErrMissing(err) { + return ErrMissing + } + return errors.Wrapf(err, "failed to get data from low-level cache by key %s for package %s", key, pkg.Name) + } + + c.sw.TrackStage("gob", func() { + err = gob.NewDecoder(bytes.NewReader(b)).Decode(data) + }) + if err != nil { + return errors.Wrap(err, "failed to gob decode") + } + + return nil +} + +func (c *Cache) pkgActionID(pkg *packages.Package, mode HashMode) (cache.ActionID, error) { + hash, err := c.packageHash(pkg, mode) + if err != nil { + return cache.ActionID{}, errors.Wrap(err, "failed to get package hash") + } + + key, err := cache.NewHash("action ID") + if err != nil { + return cache.ActionID{}, errors.Wrap(err, "failed to make a hash") + } + fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) + fmt.Fprintf(key, "pkghash %s\n", hash) + + return key.Sum(), nil +} + +// packageHash computes a package's hash. The hash is based on all Go +// files that make up the package, as well as the hashes of imported +// packages. +func (c *Cache) packageHash(pkg *packages.Package, mode HashMode) (string, error) { + type hashResults map[HashMode]string + hashResI, ok := c.pkgHashes.Load(pkg) + if ok { + hashRes := hashResI.(hashResults) + if _, ok := hashRes[mode]; !ok { + return "", fmt.Errorf("no mode %d in hash result", mode) + } + return hashRes[mode], nil + } + + hashRes := hashResults{} + + key, err := cache.NewHash("package hash") + if err != nil { + return "", errors.Wrap(err, "failed to make a hash") + } + + fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) + for _, f := range pkg.CompiledGoFiles { + c.ioSem <- struct{}{} + h, fErr := cache.FileHash(f) + <-c.ioSem + if fErr != nil { + return "", errors.Wrapf(fErr, "failed to calculate file %s hash", f) + } + fmt.Fprintf(key, "file %s %x\n", f, h) + } + curSum := key.Sum() + hashRes[HashModeNeedOnlySelf] = hex.EncodeToString(curSum[:]) + + imps := make([]*packages.Package, 0, len(pkg.Imports)) + for _, imp := range pkg.Imports { + imps = append(imps, imp) + } + sort.Slice(imps, func(i, j int) bool { + return imps[i].PkgPath < imps[j].PkgPath + }) + + calcDepsHash := func(depMode HashMode) error { + for _, dep := range imps { + if dep.PkgPath == "unsafe" { + continue + } + + depHash, depErr := c.packageHash(dep, depMode) + if depErr != nil { + return errors.Wrapf(depErr, "failed to calculate hash for dependency %s with mode %d", dep.Name, depMode) + } + + fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, depHash) + } + return nil + } + + if err := calcDepsHash(HashModeNeedOnlySelf); err != nil { + return "", err + } + + curSum = key.Sum() + hashRes[HashModeNeedDirectDeps] = hex.EncodeToString(curSum[:]) + + if err := calcDepsHash(HashModeNeedAllDeps); err != nil { + return "", err + } + curSum = key.Sum() + hashRes[HashModeNeedAllDeps] = hex.EncodeToString(curSum[:]) + + if _, ok := hashRes[mode]; !ok { + return "", fmt.Errorf("invalid mode %d", mode) + } + + c.pkgHashes.Store(pkg, hashRes) + return hashRes[mode], nil +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go b/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go new file mode 100644 index 000000000..fa9d93bf7 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go @@ -0,0 +1,93 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package renameio writes files atomically by renaming temporary files. +package renameio + +import ( + "bytes" + "io" + "math/rand" + "os" + "path/filepath" + "strconv" + + "github.com/golangci/golangci-lint/internal/robustio" +) + +const patternSuffix = ".tmp" + +// Pattern returns a glob pattern that matches the unrenamed temporary files +// created when writing to filename. +func Pattern(filename string) string { + return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) +} + +// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary +// file in the same directory as filename, then renames it atomically to the +// final name. +// +// That ensures that the final location, if it exists, is always a complete file. +func WriteFile(filename string, data []byte, perm os.FileMode) (err error) { + return WriteToFile(filename, bytes.NewReader(data), perm) +} + +// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader +// instead of a slice. +func WriteToFile(filename string, data io.Reader, perm os.FileMode) (err error) { + f, err := tempFile(filepath.Dir(filename), filepath.Base(filename), perm) + if err != nil { + return err + } + defer func() { + // Only call os.Remove on f.Name() if we failed to rename it: otherwise, + // some other process may have created a new file with the same name after + // that. + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + if _, err := io.Copy(f, data); err != nil { + return err + } + // Sync the file before renaming it: otherwise, after a crash the reader may + // observe a 0-length file instead of the actual contents. + // See https://golang.org/issue/22397#issuecomment-380831736. + if err := f.Sync(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + + return robustio.Rename(f.Name(), filename) +} + +// tempFile creates a new temporary file with given permission bits. +func tempFile(dir, prefix string, perm os.FileMode) (f *os.File, err error) { + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+patternSuffix) + f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm) + if os.IsExist(err) { + continue + } + break + } + return +} + +// ReadFile is like ioutil.ReadFile, but on Windows retries spurious errors that +// may occur if the file is concurrently replaced. +// +// Errors are classified heuristically and retries are bounded, so even this +// function may occasionally return a spurious error on Windows. +// If so, the error will likely wrap one of: +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION +func ReadFile(filename string) ([]byte, error) { + return robustio.ReadFile(filename) +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go new file mode 100644 index 000000000..76e47ad1f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go @@ -0,0 +1,53 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package robustio wraps I/O functions that are prone to failure on Windows, +// transparently retrying errors up to an arbitrary timeout. +// +// Errors are classified heuristically and retries are bounded, so the functions +// in this package do not completely eliminate spurious errors. However, they do +// significantly reduce the rate of failure in practice. +// +// If so, the error will likely wrap one of: +// The functions in this package do not completely eliminate spurious errors, +// but substantially reduce their rate of occurrence in practice. +package robustio + +// Rename is like os.Rename, but on Windows retries errors that may occur if the +// file is concurrently read or overwritten. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func Rename(oldpath, newpath string) error { + return rename(oldpath, newpath) +} + +// ReadFile is like ioutil.ReadFile, but on Windows retries errors that may +// occur if the file is concurrently replaced. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func ReadFile(filename string) ([]byte, error) { + return readFile(filename) +} + +// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur +// if an executable file in the directory has recently been executed. +// +// (See golang.org/issue/19491.) +func RemoveAll(path string) error { + return removeAll(path) +} + +// IsEphemeralError reports whether err is one of the errors that the functions +// in this package attempt to mitigate. +// +// Errors considered ephemeral include: +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION +// +// This set may be expanded in the future; programs must not rely on the +// non-ephemerality of any given error. +func IsEphemeralError(err error) bool { + return isEphemeralError(err) +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go new file mode 100644 index 000000000..1ac0d10d7 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "os" + "syscall" +) + +const errFileNotFound = syscall.ENOENT + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + switch werr := err.(type) { + case *os.PathError: + err = werr.Err + case *os.LinkError: + err = werr.Err + case *os.SyscallError: + err = werr.Err + + } + if errno, ok := err.(syscall.Errno); ok { + return errno == errFileNotFound + } + return false +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go new file mode 100644 index 000000000..e0bf5b9b3 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go @@ -0,0 +1,93 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows darwin + +package robustio + +import ( + "io/ioutil" + "math/rand" + "os" + "syscall" + "time" +) + +const arbitraryTimeout = 500 * time.Millisecond + +const ERROR_SHARING_VIOLATION = 32 + +// retry retries ephemeral errors from f up to an arbitrary timeout +// to work around filesystem flakiness on Windows and Darwin. +func retry(f func() (err error, mayRetry bool)) error { + var ( + bestErr error + lowestErrno syscall.Errno + start time.Time + nextSleep time.Duration = 1 * time.Millisecond + ) + for { + err, mayRetry := f() + if err == nil || !mayRetry { + return err + } + + if errno, ok := err.(syscall.Errno); ok && (lowestErrno == 0 || errno < lowestErrno) { + bestErr = err + lowestErrno = errno + } else if bestErr == nil { + bestErr = err + } + + if start.IsZero() { + start = time.Now() + } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { + break + } + time.Sleep(nextSleep) + nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) + } + + return bestErr +} + +// rename is like os.Rename, but retries ephemeral errors. +// +// On windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with +// MOVEFILE_REPLACE_EXISTING. +// +// Windows also provides a different system call, ReplaceFile, +// that provides similar semantics, but perhaps preserves more metadata. (The +// documentation on the differences between the two is very sparse.) +// +// Empirical error rates with MoveFileEx are lower under modest concurrency, so +// for now we're sticking with what the os package already provides. +func rename(oldpath, newpath string) (err error) { + return retry(func() (err error, mayRetry bool) { + err = os.Rename(oldpath, newpath) + return err, isEphemeralError(err) + }) +} + +// readFile is like ioutil.ReadFile, but retries ephemeral errors. +func readFile(filename string) ([]byte, error) { + var b []byte + err := retry(func() (err error, mayRetry bool) { + b, err = ioutil.ReadFile(filename) + + // Unlike in rename, we do not retry errFileNotFound here: it can occur + // as a spurious error, but the file may also genuinely not exist, so the + // increase in robustness is probably not worth the extra latency. + + return err, isEphemeralError(err) && err != errFileNotFound + }) + return b, err +} + +func removeAll(path string) error { + return retry(func() (err error, mayRetry bool) { + err = os.RemoveAll(path) + return err, isEphemeralError(err) + }) +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go new file mode 100644 index 000000000..a2428856f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go @@ -0,0 +1,28 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !windows,!darwin + +package robustio + +import ( + "io/ioutil" + "os" +) + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func readFile(filename string) ([]byte, error) { + return ioutil.ReadFile(filename) +} + +func removeAll(path string) error { + return os.RemoveAll(path) +} + +func isEphemeralError(err error) bool { + return false +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go new file mode 100644 index 000000000..a35237d44 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go @@ -0,0 +1,33 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "os" + "syscall" +) + +const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + switch werr := err.(type) { + case *os.PathError: + err = werr.Err + case *os.LinkError: + err = werr.Err + case *os.SyscallError: + err = werr.Err + } + if errno, ok := err.(syscall.Errno); ok { + switch errno { + case syscall.ERROR_ACCESS_DENIED, + syscall.ERROR_FILE_NOT_FOUND, + ERROR_SHARING_VIOLATION: + return true + } + } + return false +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go new file mode 100644 index 000000000..359e2d63c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go @@ -0,0 +1,84 @@ +package commands + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/golangci/golangci-lint/internal/cache" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +func (e *Executor) initCache() { + cacheCmd := &cobra.Command{ + Use: "cache", + Short: "Cache control and information", + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint cache") + } + if err := cmd.Help(); err != nil { + e.log.Fatalf("Can't run cache: %s", err) + } + }, + } + e.rootCmd.AddCommand(cacheCmd) + + cacheCmd.AddCommand(&cobra.Command{ + Use: "clean", + Short: "Clean cache", + Run: e.executeCleanCache, + }) + cacheCmd.AddCommand(&cobra.Command{ + Use: "status", + Short: "Show cache status", + Run: e.executeCacheStatus, + }) + + // TODO: add trim command? +} + +func (e *Executor) executeCleanCache(_ *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint cache clean") + } + + cacheDir := cache.DefaultDir() + if err := os.RemoveAll(cacheDir); err != nil { + e.log.Fatalf("Failed to remove dir %s: %s", cacheDir, err) + } + + os.Exit(0) +} + +func (e *Executor) executeCacheStatus(_ *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint cache status") + } + + cacheDir := cache.DefaultDir() + fmt.Fprintf(logutils.StdOut, "Dir: %s\n", cacheDir) + cacheSizeBytes, err := dirSizeBytes(cacheDir) + if err == nil { + fmt.Fprintf(logutils.StdOut, "Size: %s\n", fsutils.PrettifyBytesCount(cacheSizeBytes)) + } + + os.Exit(0) +} + +func dirSizeBytes(path string) (int64, error) { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + size += info.Size() + } + return err + }) + return size, err +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/config.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/config.go new file mode 100644 index 000000000..4b63e2e52 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/config.go @@ -0,0 +1,66 @@ +package commands + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "github.com/golangci/golangci-lint/pkg/exitcodes" + "github.com/golangci/golangci-lint/pkg/fsutils" +) + +func (e *Executor) initConfig() { + cmd := &cobra.Command{ + Use: "config", + Short: "Config", + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint config") + } + if err := cmd.Help(); err != nil { + e.log.Fatalf("Can't run help: %s", err) + } + }, + } + e.rootCmd.AddCommand(cmd) + + pathCmd := &cobra.Command{ + Use: "path", + Short: "Print used config path", + Run: e.executePathCmd, + } + e.initRunConfiguration(pathCmd) // allow --config + cmd.AddCommand(pathCmd) +} + +func (e *Executor) getUsedConfig() string { + usedConfigFile := viper.ConfigFileUsed() + if usedConfigFile == "" { + return "" + } + + prettyUsedConfigFile, err := fsutils.ShortestRelPath(usedConfigFile, "") + if err != nil { + e.log.Warnf("Can't pretty print config file path: %s", err) + return usedConfigFile + } + + return prettyUsedConfigFile +} + +func (e *Executor) executePathCmd(_ *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint config path") + } + + usedConfigFile := e.getUsedConfig() + if usedConfigFile == "" { + e.log.Warnf("No config file detected") + os.Exit(exitcodes.NoConfigFileDetected) + } + + fmt.Println(usedConfigFile) + os.Exit(0) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go new file mode 100644 index 000000000..3edb6e4b0 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go @@ -0,0 +1,250 @@ +package commands + +import ( + "bytes" + "context" + "crypto/sha256" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/fatih/color" + "github.com/gofrs/flock" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "gopkg.in/yaml.v3" + + "github.com/golangci/golangci-lint/internal/cache" + "github.com/golangci/golangci-lint/internal/pkgcache" + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load" + "github.com/golangci/golangci-lint/pkg/goutil" + "github.com/golangci/golangci-lint/pkg/lint" + "github.com/golangci/golangci-lint/pkg/lint/lintersdb" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/report" + "github.com/golangci/golangci-lint/pkg/timeutils" +) + +type Executor struct { + rootCmd *cobra.Command + runCmd *cobra.Command + lintersCmd *cobra.Command + + exitCode int + version, commit, date string + + cfg *config.Config + log logutils.Log + reportData report.Data + DBManager *lintersdb.Manager + EnabledLintersSet *lintersdb.EnabledSet + contextLoader *lint.ContextLoader + goenv *goutil.Env + fileCache *fsutils.FileCache + lineCache *fsutils.LineCache + pkgCache *pkgcache.Cache + debugf logutils.DebugFunc + sw *timeutils.Stopwatch + + loadGuard *load.Guard + flock *flock.Flock +} + +func NewExecutor(version, commit, date string) *Executor { + startedAt := time.Now() + e := &Executor{ + cfg: config.NewDefault(), + version: version, + commit: commit, + date: date, + DBManager: lintersdb.NewManager(nil, nil), + debugf: logutils.Debug("exec"), + } + + e.debugf("Starting execution...") + e.log = report.NewLogWrapper(logutils.NewStderrLog(""), &e.reportData) + + // to setup log level early we need to parse config from command line extra time to + // find `-v` option + commandLineCfg, err := e.getConfigForCommandLine() + if err != nil && err != pflag.ErrHelp { + e.log.Fatalf("Can't get config for command line: %s", err) + } + if commandLineCfg != nil { + logutils.SetupVerboseLog(e.log, commandLineCfg.Run.IsVerbose) + + switch commandLineCfg.Output.Color { + case "always": + color.NoColor = false + case "never": + color.NoColor = true + case "auto": + // nothing + default: + e.log.Fatalf("invalid value %q for --color; must be 'always', 'auto', or 'never'", commandLineCfg.Output.Color) + } + } + + // init of commands must be done before config file reading because + // init sets config with the default values of flags + e.initRoot() + e.initRun() + e.initHelp() + e.initLinters() + e.initConfig() + e.initVersion() + e.initCache() + + // init e.cfg by values from config: flags parse will see these values + // like the default ones. It will overwrite them only if the same option + // is found in command-line: it's ok, command-line has higher priority. + + r := config.NewFileReader(e.cfg, commandLineCfg, e.log.Child("config_reader")) + if err = r.Read(); err != nil { + e.log.Fatalf("Can't read config: %s", err) + } + + // recreate after getting config + e.DBManager = lintersdb.NewManager(e.cfg, e.log).WithCustomLinters() + + e.cfg.LintersSettings.Gocritic.InferEnabledChecks(e.log) + if err = e.cfg.LintersSettings.Gocritic.Validate(e.log); err != nil { + e.log.Fatalf("Invalid gocritic settings: %s", err) + } + + // Slice options must be explicitly set for proper merging of config and command-line options. + fixSlicesFlags(e.runCmd.Flags()) + fixSlicesFlags(e.lintersCmd.Flags()) + + e.EnabledLintersSet = lintersdb.NewEnabledSet(e.DBManager, + lintersdb.NewValidator(e.DBManager), e.log.Child("lintersdb"), e.cfg) + e.goenv = goutil.NewEnv(e.log.Child("goenv")) + e.fileCache = fsutils.NewFileCache() + e.lineCache = fsutils.NewLineCache(e.fileCache) + + e.sw = timeutils.NewStopwatch("pkgcache", e.log.Child("stopwatch")) + e.pkgCache, err = pkgcache.NewCache(e.sw, e.log.Child("pkgcache")) + if err != nil { + e.log.Fatalf("Failed to build packages cache: %s", err) + } + e.loadGuard = load.NewGuard() + e.contextLoader = lint.NewContextLoader(e.cfg, e.log.Child("loader"), e.goenv, + e.lineCache, e.fileCache, e.pkgCache, e.loadGuard) + if err = e.initHashSalt(version); err != nil { + e.log.Fatalf("Failed to init hash salt: %s", err) + } + e.debugf("Initialized executor in %s", time.Since(startedAt)) + return e +} + +func (e *Executor) Execute() error { + return e.rootCmd.Execute() +} + +func (e *Executor) initHashSalt(version string) error { + binSalt, err := computeBinarySalt(version) + if err != nil { + return errors.Wrap(err, "failed to calculate binary salt") + } + + configSalt, err := computeConfigSalt(e.cfg) + if err != nil { + return errors.Wrap(err, "failed to calculate config salt") + } + + var b bytes.Buffer + b.Write(binSalt) + b.Write(configSalt) + cache.SetSalt(b.Bytes()) + return nil +} + +func computeBinarySalt(version string) ([]byte, error) { + if version != "" && version != "(devel)" { + return []byte(version), nil + } + + if logutils.HaveDebugTag("bin_salt") { + return []byte("debug"), nil + } + + p, err := os.Executable() + if err != nil { + return nil, err + } + f, err := os.Open(p) + if err != nil { + return nil, err + } + defer f.Close() + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return nil, err + } + return h.Sum(nil), nil +} + +func computeConfigSalt(cfg *config.Config) ([]byte, error) { + // We don't hash all config fields to reduce meaningless cache + // invalidations. At least, it has a huge impact on tests speed. + + lintersSettingsBytes, err := yaml.Marshal(cfg.LintersSettings) + if err != nil { + return nil, errors.Wrap(err, "failed to json marshal config linter settings") + } + + var configData bytes.Buffer + configData.WriteString("linters-settings=") + configData.Write(lintersSettingsBytes) + configData.WriteString("\nbuild-tags=%s" + strings.Join(cfg.Run.BuildTags, ",")) + + h := sha256.New() + if _, err := h.Write(configData.Bytes()); err != nil { + return nil, err + } + return h.Sum(nil), nil +} + +func (e *Executor) acquireFileLock() bool { + if e.cfg.Run.AllowParallelRunners { + e.debugf("Parallel runners are allowed, no locking") + return true + } + + lockFile := filepath.Join(os.TempDir(), "golangci-lint.lock") + e.debugf("Locking on file %s...", lockFile) + f := flock.New(lockFile) + const retryDelay = time.Second + + ctx := context.Background() + if !e.cfg.Run.AllowSerialRunners { + const totalTimeout = 5 * time.Second + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, totalTimeout) + defer cancel() + } + if ok, _ := f.TryLockContext(ctx, retryDelay); !ok { + return false + } + + e.flock = f + return true +} + +func (e *Executor) releaseFileLock() { + if e.cfg.Run.AllowParallelRunners { + return + } + + if err := e.flock.Unlock(); err != nil { + e.debugf("Failed to unlock on file: %s", err) + } + if err := os.Remove(e.flock.Path()); err != nil { + e.debugf("Failed to remove lock file: %s", err) + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go new file mode 100644 index 000000000..ef276481c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go @@ -0,0 +1,92 @@ +package commands + +import ( + "fmt" + "os" + "sort" + "strings" + + "github.com/fatih/color" + "github.com/spf13/cobra" + + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +func (e *Executor) initHelp() { + helpCmd := &cobra.Command{ + Use: "help", + Short: "Help", + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint help") + } + if err := cmd.Help(); err != nil { + e.log.Fatalf("Can't run help: %s", err) + } + }, + } + e.rootCmd.SetHelpCommand(helpCmd) + + lintersHelpCmd := &cobra.Command{ + Use: "linters", + Short: "Help about linters", + Run: e.executeLintersHelp, + } + helpCmd.AddCommand(lintersHelpCmd) +} + +func printLinterConfigs(lcs []*linter.Config) { + sort.Slice(lcs, func(i, j int) bool { + return strings.Compare(lcs[i].Name(), lcs[j].Name()) < 0 + }) + for _, lc := range lcs { + altNamesStr := "" + if len(lc.AlternativeNames) != 0 { + altNamesStr = fmt.Sprintf(" (%s)", strings.Join(lc.AlternativeNames, ", ")) + } + + // If the linter description spans multiple lines, truncate everything following the first newline + linterDescription := lc.Linter.Desc() + firstNewline := strings.IndexRune(linterDescription, '\n') + if firstNewline > 0 { + linterDescription = linterDescription[:firstNewline] + } + + fmt.Fprintf(logutils.StdOut, "%s%s: %s [fast: %t, auto-fix: %t]\n", color.YellowString(lc.Name()), + altNamesStr, linterDescription, !lc.IsSlowLinter(), lc.CanAutoFix) + } +} + +func (e *Executor) executeLintersHelp(_ *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint help linters") + } + + var enabledLCs, disabledLCs []*linter.Config + for _, lc := range e.DBManager.GetAllSupportedLinterConfigs() { + if lc.EnabledByDefault { + enabledLCs = append(enabledLCs, lc) + } else { + disabledLCs = append(disabledLCs, lc) + } + } + + color.Green("Enabled by default linters:\n") + printLinterConfigs(enabledLCs) + color.Red("\nDisabled by default linters:\n") + printLinterConfigs(disabledLCs) + + color.Green("\nLinters presets:") + for _, p := range e.DBManager.AllPresets() { + linters := e.DBManager.GetAllLinterConfigsForPreset(p) + linterNames := []string{} + for _, lc := range linters { + linterNames = append(linterNames, lc.Name()) + } + sort.Strings(linterNames) + fmt.Fprintf(logutils.StdOut, "%s: %s\n", color.YellowString(p), strings.Join(linterNames, ", ")) + } + + os.Exit(0) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go new file mode 100644 index 000000000..873dab817 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go @@ -0,0 +1,51 @@ +package commands + +import ( + "log" + "os" + + "github.com/fatih/color" + "github.com/spf13/cobra" + + "github.com/golangci/golangci-lint/pkg/lint/linter" +) + +func (e *Executor) initLinters() { + e.lintersCmd = &cobra.Command{ + Use: "linters", + Short: "List current linters configuration", + Run: e.executeLinters, + } + e.rootCmd.AddCommand(e.lintersCmd) + e.initRunConfiguration(e.lintersCmd) +} + +func (e *Executor) executeLinters(_ *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint linters") + } + + enabledLintersMap, err := e.EnabledLintersSet.GetEnabledLintersMap() + if err != nil { + log.Fatalf("Can't get enabled linters: %s", err) + } + + color.Green("Enabled by your configuration linters:\n") + enabledLinters := make([]*linter.Config, 0, len(enabledLintersMap)) + for _, linter := range enabledLintersMap { + enabledLinters = append(enabledLinters, linter) + } + printLinterConfigs(enabledLinters) + + var disabledLCs []*linter.Config + for _, lc := range e.DBManager.GetAllSupportedLinterConfigs() { + if enabledLintersMap[lc.Name()] == nil { + disabledLCs = append(disabledLCs, lc) + } + } + + color.Red("\nDisabled by your configuration linters:\n") + printLinterConfigs(disabledLCs) + + os.Exit(0) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go new file mode 100644 index 000000000..f90df9901 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go @@ -0,0 +1,165 @@ +package commands + +import ( + "fmt" + "os" + "runtime" + "runtime/pprof" + "runtime/trace" + "strconv" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +func (e *Executor) persistentPreRun(_ *cobra.Command, _ []string) { + if e.cfg.Run.PrintVersion { + fmt.Fprintf(logutils.StdOut, "golangci-lint has version %s built from %s on %s\n", e.version, e.commit, e.date) + os.Exit(0) + } + + runtime.GOMAXPROCS(e.cfg.Run.Concurrency) + + if e.cfg.Run.CPUProfilePath != "" { + f, err := os.Create(e.cfg.Run.CPUProfilePath) + if err != nil { + e.log.Fatalf("Can't create file %s: %s", e.cfg.Run.CPUProfilePath, err) + } + if err := pprof.StartCPUProfile(f); err != nil { + e.log.Fatalf("Can't start CPU profiling: %s", err) + } + } + + if e.cfg.Run.MemProfilePath != "" { + if rate := os.Getenv("GL_MEMPROFILE_RATE"); rate != "" { + runtime.MemProfileRate, _ = strconv.Atoi(rate) + } + } + + if e.cfg.Run.TracePath != "" { + f, err := os.Create(e.cfg.Run.TracePath) + if err != nil { + e.log.Fatalf("Can't create file %s: %s", e.cfg.Run.TracePath, err) + } + if err = trace.Start(f); err != nil { + e.log.Fatalf("Can't start tracing: %s", err) + } + } +} + +func (e *Executor) persistentPostRun(_ *cobra.Command, _ []string) { + if e.cfg.Run.CPUProfilePath != "" { + pprof.StopCPUProfile() + } + if e.cfg.Run.MemProfilePath != "" { + f, err := os.Create(e.cfg.Run.MemProfilePath) + if err != nil { + e.log.Fatalf("Can't create file %s: %s", e.cfg.Run.MemProfilePath, err) + } + + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + printMemStats(&ms, e.log) + + if err := pprof.WriteHeapProfile(f); err != nil { + e.log.Fatalf("Can't write heap profile: %s", err) + } + f.Close() + } + if e.cfg.Run.TracePath != "" { + trace.Stop() + } + + os.Exit(e.exitCode) +} + +func printMemStats(ms *runtime.MemStats, logger logutils.Log) { + logger.Infof("Mem stats: alloc=%s total_alloc=%s sys=%s "+ + "heap_alloc=%s heap_sys=%s heap_idle=%s heap_released=%s heap_in_use=%s "+ + "stack_in_use=%s stack_sys=%s "+ + "mspan_sys=%s mcache_sys=%s buck_hash_sys=%s gc_sys=%s other_sys=%s "+ + "mallocs_n=%d frees_n=%d heap_objects_n=%d gc_cpu_fraction=%.2f", + formatMemory(ms.Alloc), formatMemory(ms.TotalAlloc), formatMemory(ms.Sys), + formatMemory(ms.HeapAlloc), formatMemory(ms.HeapSys), + formatMemory(ms.HeapIdle), formatMemory(ms.HeapReleased), formatMemory(ms.HeapInuse), + formatMemory(ms.StackInuse), formatMemory(ms.StackSys), + formatMemory(ms.MSpanSys), formatMemory(ms.MCacheSys), formatMemory(ms.BuckHashSys), + formatMemory(ms.GCSys), formatMemory(ms.OtherSys), + ms.Mallocs, ms.Frees, ms.HeapObjects, ms.GCCPUFraction) +} + +func formatMemory(memBytes uint64) string { + const Kb = 1024 + const Mb = Kb * 1024 + + if memBytes < Kb { + return fmt.Sprintf("%db", memBytes) + } + if memBytes < Mb { + return fmt.Sprintf("%dkb", memBytes/Kb) + } + return fmt.Sprintf("%dmb", memBytes/Mb) +} + +func getDefaultConcurrency() int { + if os.Getenv("HELP_RUN") == "1" { + // Make stable concurrency for README help generating builds. + const prettyConcurrency = 8 + return prettyConcurrency + } + + return runtime.NumCPU() +} + +func (e *Executor) initRoot() { + rootCmd := &cobra.Command{ + Use: "golangci-lint", + Short: "golangci-lint is a smart linters runner.", + Long: `Smart, fast linters runner. Run it in cloud for every GitHub pull request on https://golangci.com`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint") + } + if err := cmd.Help(); err != nil { + e.log.Fatalf("Can't run help: %s", err) + } + }, + PersistentPreRun: e.persistentPreRun, + PersistentPostRun: e.persistentPostRun, + } + + initRootFlagSet(rootCmd.PersistentFlags(), e.cfg, e.needVersionOption()) + e.rootCmd = rootCmd +} + +func (e *Executor) needVersionOption() bool { + return e.date != "" +} + +func initRootFlagSet(fs *pflag.FlagSet, cfg *config.Config, needVersionOption bool) { + fs.BoolVarP(&cfg.Run.IsVerbose, "verbose", "v", false, wh("verbose output")) + + var silent bool + fs.BoolVarP(&silent, "silent", "s", false, wh("disables congrats outputs")) + if err := fs.MarkHidden("silent"); err != nil { + panic(err) + } + err := fs.MarkDeprecated("silent", + "now golangci-lint by default is silent: it doesn't print Congrats message") + if err != nil { + panic(err) + } + + fs.StringVar(&cfg.Run.CPUProfilePath, "cpu-profile-path", "", wh("Path to CPU profile output file")) + fs.StringVar(&cfg.Run.MemProfilePath, "mem-profile-path", "", wh("Path to memory profile output file")) + fs.StringVar(&cfg.Run.TracePath, "trace-path", "", wh("Path to trace output file")) + fs.IntVarP(&cfg.Run.Concurrency, "concurrency", "j", getDefaultConcurrency(), wh("Concurrency (default NumCPU)")) + if needVersionOption { + fs.BoolVar(&cfg.Run.PrintVersion, "version", false, wh("Print version")) + } + + fs.StringVar(&cfg.Output.Color, "color", "auto", wh("Use color when printing; can be 'always', 'auto', or 'never'")) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go new file mode 100644 index 000000000..271fffe94 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go @@ -0,0 +1,566 @@ +package commands + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "runtime" + "strings" + "time" + + "github.com/fatih/color" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/exitcodes" + "github.com/golangci/golangci-lint/pkg/lint" + "github.com/golangci/golangci-lint/pkg/lint/lintersdb" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/packages" + "github.com/golangci/golangci-lint/pkg/printers" + "github.com/golangci/golangci-lint/pkg/result" + "github.com/golangci/golangci-lint/pkg/result/processors" +) + +func getDefaultIssueExcludeHelp() string { + parts := []string{"Use or not use default excludes:"} + for _, ep := range config.DefaultExcludePatterns { + parts = append(parts, + fmt.Sprintf(" # %s %s: %s", ep.ID, ep.Linter, ep.Why), + fmt.Sprintf(" - %s", color.YellowString(ep.Pattern)), + "", + ) + } + return strings.Join(parts, "\n") +} + +func getDefaultDirectoryExcludeHelp() string { + parts := []string{"Use or not use default excluded directories:"} + for _, dir := range packages.StdExcludeDirRegexps { + parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(dir))) + } + parts = append(parts, "") + return strings.Join(parts, "\n") +} + +func wh(text string) string { + return color.GreenString(text) +} + +const defaultTimeout = time.Minute + +//nolint:funlen +func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, isFinalInit bool) { + hideFlag := func(name string) { + if err := fs.MarkHidden(name); err != nil { + panic(err) + } + + // we run initFlagSet multiple times, but we wouldn't like to see deprecation message multiple times + if isFinalInit { + const deprecateMessage = "flag will be removed soon, please, use .golangci.yml config" + if err := fs.MarkDeprecated(name, deprecateMessage); err != nil { + panic(err) + } + } + } + + // Output config + oc := &cfg.Output + fs.StringVar(&oc.Format, "out-format", + config.OutFormatColoredLineNumber, + wh(fmt.Sprintf("Format of output: %s", strings.Join(config.OutFormats, "|")))) + fs.BoolVar(&oc.PrintIssuedLine, "print-issued-lines", true, wh("Print lines of code with issue")) + fs.BoolVar(&oc.PrintLinterName, "print-linter-name", true, wh("Print linter name in issue line")) + fs.BoolVar(&oc.UniqByLine, "uniq-by-line", true, wh("Make issues output unique by line")) + fs.BoolVar(&oc.SortResults, "sort-results", false, wh("Sort linter results")) + fs.BoolVar(&oc.PrintWelcomeMessage, "print-welcome", false, wh("Print welcome message")) + fs.StringVar(&oc.PathPrefix, "path-prefix", "", wh("Path prefix to add to output")) + hideFlag("print-welcome") // no longer used + + fs.BoolVar(&cfg.InternalCmdTest, "internal-cmd-test", false, wh("Option is used only for testing golangci-lint command, don't use it")) + if err := fs.MarkHidden("internal-cmd-test"); err != nil { + panic(err) + } + + // Run config + rc := &cfg.Run + fs.StringVar(&rc.ModulesDownloadMode, "modules-download-mode", "", + "Modules download mode. If not empty, passed as -mod= to go tools") + fs.IntVar(&rc.ExitCodeIfIssuesFound, "issues-exit-code", + exitcodes.IssuesFound, wh("Exit code when issues were found")) + fs.StringSliceVar(&rc.BuildTags, "build-tags", nil, wh("Build tags")) + + fs.DurationVar(&rc.Timeout, "deadline", defaultTimeout, wh("Deadline for total work")) + if err := fs.MarkHidden("deadline"); err != nil { + panic(err) + } + fs.DurationVar(&rc.Timeout, "timeout", defaultTimeout, wh("Timeout for total work")) + + fs.BoolVar(&rc.AnalyzeTests, "tests", true, wh("Analyze tests (*_test.go)")) + fs.BoolVar(&rc.PrintResourcesUsage, "print-resources-usage", false, + wh("Print avg and max memory usage of golangci-lint and total time")) + fs.StringVarP(&rc.Config, "config", "c", "", wh("Read config from file path `PATH`")) + fs.BoolVar(&rc.NoConfig, "no-config", false, wh("Don't read config")) + fs.StringSliceVar(&rc.SkipDirs, "skip-dirs", nil, wh("Regexps of directories to skip")) + fs.BoolVar(&rc.UseDefaultSkipDirs, "skip-dirs-use-default", true, getDefaultDirectoryExcludeHelp()) + fs.StringSliceVar(&rc.SkipFiles, "skip-files", nil, wh("Regexps of files to skip")) + + const allowParallelDesc = "Allow multiple parallel golangci-lint instances running. " + + "If false (default) - golangci-lint acquires file lock on start." + fs.BoolVar(&rc.AllowParallelRunners, "allow-parallel-runners", false, wh(allowParallelDesc)) + const allowSerialDesc = "Allow multiple golangci-lint instances running, but serialize them around a lock. " + + "If false (default) - golangci-lint exits with an error if it fails to acquire file lock on start." + fs.BoolVar(&rc.AllowSerialRunners, "allow-serial-runners", false, wh(allowSerialDesc)) + + // Linters settings config + lsc := &cfg.LintersSettings + + // Hide all linters settings flags: they were initially visible, + // but when number of linters started to grow it became obvious that + // we can't fill 90% of flags by linters settings: common flags became hard to find. + // New linters settings should be done only through config file. + fs.BoolVar(&lsc.Errcheck.CheckTypeAssertions, "errcheck.check-type-assertions", + false, "Errcheck: check for ignored type assertion results") + hideFlag("errcheck.check-type-assertions") + fs.BoolVar(&lsc.Errcheck.CheckAssignToBlank, "errcheck.check-blank", false, + "Errcheck: check for errors assigned to blank identifier: _ = errFunc()") + hideFlag("errcheck.check-blank") + fs.StringVar(&lsc.Errcheck.Exclude, "errcheck.exclude", "", + "Path to a file containing a list of functions to exclude from checking") + hideFlag("errcheck.exclude") + fs.StringVar(&lsc.Errcheck.Ignore, "errcheck.ignore", "fmt:.*", + `Comma-separated list of pairs of the form pkg:regex. The regex is used to ignore names within pkg`) + hideFlag("errcheck.ignore") + + fs.BoolVar(&lsc.Govet.CheckShadowing, "govet.check-shadowing", false, + "Govet: check for shadowed variables") + hideFlag("govet.check-shadowing") + + fs.Float64Var(&lsc.Golint.MinConfidence, "golint.min-confidence", 0.8, + "Golint: minimum confidence of a problem to print it") + hideFlag("golint.min-confidence") + + fs.BoolVar(&lsc.Gofmt.Simplify, "gofmt.simplify", true, "Gofmt: simplify code") + hideFlag("gofmt.simplify") + + fs.IntVar(&lsc.Gocyclo.MinComplexity, "gocyclo.min-complexity", + 30, "Minimal complexity of function to report it") + hideFlag("gocyclo.min-complexity") + + fs.BoolVar(&lsc.Maligned.SuggestNewOrder, "maligned.suggest-new", false, + "Maligned: print suggested more optimal struct fields ordering") + hideFlag("maligned.suggest-new") + + fs.IntVar(&lsc.Dupl.Threshold, "dupl.threshold", + 150, "Dupl: Minimal threshold to detect copy-paste") + hideFlag("dupl.threshold") + + fs.BoolVar(&lsc.Goconst.MatchWithConstants, "goconst.match-constant", + true, "Goconst: look for existing constants matching the values") + hideFlag("goconst.match-constant") + fs.IntVar(&lsc.Goconst.MinStringLen, "goconst.min-len", + 3, "Goconst: minimum constant string length") + hideFlag("goconst.min-len") + fs.IntVar(&lsc.Goconst.MinOccurrencesCount, "goconst.min-occurrences", + 3, "Goconst: minimum occurrences of constant string count to trigger issue") + hideFlag("goconst.min-occurrences") + fs.BoolVar(&lsc.Goconst.ParseNumbers, "goconst.numbers", + false, "Goconst: search also for duplicated numbers") + hideFlag("goconst.numbers") + fs.IntVar(&lsc.Goconst.NumberMin, "goconst.min", + 3, "minimum value, only works with goconst.numbers") + hideFlag("goconst.min") + fs.IntVar(&lsc.Goconst.NumberMax, "goconst.max", + 3, "maximum value, only works with goconst.numbers") + hideFlag("goconst.max") + fs.BoolVar(&lsc.Goconst.IgnoreCalls, "goconst.ignore-calls", + true, "Goconst: ignore when constant is not used as function argument") + hideFlag("goconst.ignore-calls") + + // (@dixonwille) These flag is only used for testing purposes. + fs.StringSliceVar(&lsc.Depguard.Packages, "depguard.packages", nil, + "Depguard: packages to add to the list") + hideFlag("depguard.packages") + + fs.BoolVar(&lsc.Depguard.IncludeGoRoot, "depguard.include-go-root", false, + "Depguard: check list against standard lib") + hideFlag("depguard.include-go-root") + + fs.IntVar(&lsc.Lll.TabWidth, "lll.tab-width", 1, + "Lll: tab width in spaces") + hideFlag("lll.tab-width") + + // Linters config + lc := &cfg.Linters + fs.StringSliceVarP(&lc.Enable, "enable", "E", nil, wh("Enable specific linter")) + fs.StringSliceVarP(&lc.Disable, "disable", "D", nil, wh("Disable specific linter")) + fs.BoolVar(&lc.EnableAll, "enable-all", false, wh("Enable all linters")) + if err := fs.MarkHidden("enable-all"); err != nil { + panic(err) + } + + fs.BoolVar(&lc.DisableAll, "disable-all", false, wh("Disable all linters")) + fs.StringSliceVarP(&lc.Presets, "presets", "p", nil, + wh(fmt.Sprintf("Enable presets (%s) of linters. Run 'golangci-lint linters' to see "+ + "them. This option implies option --disable-all", strings.Join(m.AllPresets(), "|")))) + fs.BoolVar(&lc.Fast, "fast", false, wh("Run only fast linters from enabled linters set (first run won't be fast)")) + + // Issues config + ic := &cfg.Issues + fs.StringSliceVarP(&ic.ExcludePatterns, "exclude", "e", nil, wh("Exclude issue by regexp")) + fs.BoolVar(&ic.UseDefaultExcludes, "exclude-use-default", true, getDefaultIssueExcludeHelp()) + fs.BoolVar(&ic.ExcludeCaseSensitive, "exclude-case-sensitive", false, wh("If set to true exclude "+ + "and exclude rules regular expressions are case sensitive")) + + fs.IntVar(&ic.MaxIssuesPerLinter, "max-issues-per-linter", 50, + wh("Maximum issues count per one linter. Set to 0 to disable")) + fs.IntVar(&ic.MaxSameIssues, "max-same-issues", 3, + wh("Maximum count of issues with the same text. Set to 0 to disable")) + + fs.BoolVarP(&ic.Diff, "new", "n", false, + wh("Show only new issues: if there are unstaged changes or untracked files, only those changes "+ + "are analyzed, else only changes in HEAD~ are analyzed.\nIt's a super-useful option for integration "+ + "of golangci-lint into existing large codebase.\nIt's not practical to fix all existing issues at "+ + "the moment of integration: much better to not allow issues in new code.\nFor CI setups, prefer "+ + "--new-from-rev=HEAD~, as --new can skip linting the current patch if any scripts generate "+ + "unstaged files before golangci-lint runs.")) + fs.StringVar(&ic.DiffFromRevision, "new-from-rev", "", + wh("Show only new issues created after git revision `REV`")) + fs.StringVar(&ic.DiffPatchFilePath, "new-from-patch", "", + wh("Show only new issues created in git patch with file path `PATH`")) + fs.BoolVar(&ic.NeedFix, "fix", false, "Fix found issues (if it's supported by the linter)") +} + +func (e *Executor) initRunConfiguration(cmd *cobra.Command) { + fs := cmd.Flags() + fs.SortFlags = false // sort them as they are defined here + initFlagSet(fs, e.cfg, e.DBManager, true) +} + +func (e *Executor) getConfigForCommandLine() (*config.Config, error) { + // We use another pflag.FlagSet here to not set `changed` flag + // on cmd.Flags() options. Otherwise string slice options will be duplicated. + fs := pflag.NewFlagSet("config flag set", pflag.ContinueOnError) + + var cfg config.Config + // Don't do `fs.AddFlagSet(cmd.Flags())` because it shares flags representations: + // `changed` variable inside string slice vars will be shared. + // Use another config variable here, not e.cfg, to not + // affect main parsing by this parsing of only config option. + initFlagSet(fs, &cfg, e.DBManager, false) + initVersionFlagSet(fs, &cfg) + + // Parse max options, even force version option: don't want + // to get access to Executor here: it's error-prone to use + // cfg vs e.cfg. + initRootFlagSet(fs, &cfg, true) + + fs.Usage = func() {} // otherwise help text will be printed twice + if err := fs.Parse(os.Args); err != nil { + if err == pflag.ErrHelp { + return nil, err + } + + return nil, fmt.Errorf("can't parse args: %s", err) + } + + return &cfg, nil +} + +func (e *Executor) initRun() { + e.runCmd = &cobra.Command{ + Use: "run", + Short: "Run the linters", + Run: e.executeRun, + PreRun: func(_ *cobra.Command, _ []string) { + if ok := e.acquireFileLock(); !ok { + e.log.Fatalf("Parallel golangci-lint is running") + } + }, + PostRun: func(_ *cobra.Command, _ []string) { + e.releaseFileLock() + }, + } + e.rootCmd.AddCommand(e.runCmd) + + e.runCmd.SetOut(logutils.StdOut) // use custom output to properly color it in Windows terminals + e.runCmd.SetErr(logutils.StdErr) + + e.initRunConfiguration(e.runCmd) +} + +func fixSlicesFlags(fs *pflag.FlagSet) { + // It's a dirty hack to set flag.Changed to true for every string slice flag. + // It's necessary to merge config and command-line slices: otherwise command-line + // flags will always overwrite ones from the config. + fs.VisitAll(func(f *pflag.Flag) { + if f.Value.Type() != "stringSlice" { + return + } + + s, err := fs.GetStringSlice(f.Name) + if err != nil { + return + } + + if s == nil { // assume that every string slice flag has nil as the default + return + } + + var safe []string + for _, v := range s { + // add quotes to escape comma because spf13/pflag use a CSV parser: + // https://github.com/spf13/pflag/blob/85dd5c8bc61cfa382fecd072378089d4e856579d/string_slice.go#L43 + safe = append(safe, `"`+v+`"`) + } + + // calling Set sets Changed to true: next Set calls will append, not overwrite + _ = f.Value.Set(strings.Join(safe, ",")) + }) +} + +func (e *Executor) runAnalysis(ctx context.Context, args []string) ([]result.Issue, error) { + e.cfg.Run.Args = args + + lintersToRun, err := e.EnabledLintersSet.GetOptimizedLinters() + if err != nil { + return nil, err + } + + enabledLintersMap, err := e.EnabledLintersSet.GetEnabledLintersMap() + if err != nil { + return nil, err + } + + for _, lc := range e.DBManager.GetAllSupportedLinterConfigs() { + isEnabled := enabledLintersMap[lc.Name()] != nil + e.reportData.AddLinter(lc.Name(), isEnabled, lc.EnabledByDefault) + } + + lintCtx, err := e.contextLoader.Load(ctx, lintersToRun) + if err != nil { + return nil, errors.Wrap(err, "context loading failed") + } + lintCtx.Log = e.log.Child("linters context") + + runner, err := lint.NewRunner(e.cfg, e.log.Child("runner"), + e.goenv, e.EnabledLintersSet, e.lineCache, e.DBManager, lintCtx.Packages) + if err != nil { + return nil, err + } + + issues, err := runner.Run(ctx, lintersToRun, lintCtx) + if err != nil { + return nil, err + } + + fixer := processors.NewFixer(e.cfg, e.log, e.fileCache) + return fixer.Process(issues), nil +} + +func (e *Executor) setOutputToDevNull() (savedStdout, savedStderr *os.File) { + savedStdout, savedStderr = os.Stdout, os.Stderr + devNull, err := os.Open(os.DevNull) + if err != nil { + e.log.Warnf("Can't open null device %q: %s", os.DevNull, err) + return + } + + os.Stdout, os.Stderr = devNull, devNull + return +} + +func (e *Executor) setExitCodeIfIssuesFound(issues []result.Issue) { + if len(issues) != 0 { + e.exitCode = e.cfg.Run.ExitCodeIfIssuesFound + } +} + +func (e *Executor) runAndPrint(ctx context.Context, args []string) error { + if err := e.goenv.Discover(ctx); err != nil { + e.log.Warnf("Failed to discover go env: %s", err) + } + + if !logutils.HaveDebugTag("linters_output") { + // Don't allow linters and loader to print anything + log.SetOutput(ioutil.Discard) + savedStdout, savedStderr := e.setOutputToDevNull() + defer func() { + os.Stdout, os.Stderr = savedStdout, savedStderr + }() + } + + issues, err := e.runAnalysis(ctx, args) + if err != nil { + return err // XXX: don't loose type + } + + p, err := e.createPrinter() + if err != nil { + return err + } + + e.setExitCodeIfIssuesFound(issues) + + if err = p.Print(ctx, issues); err != nil { + return fmt.Errorf("can't print %d issues: %s", len(issues), err) + } + + e.fileCache.PrintStats(e.log) + + return nil +} + +func (e *Executor) createPrinter() (printers.Printer, error) { + var p printers.Printer + format := e.cfg.Output.Format + switch format { + case config.OutFormatJSON: + p = printers.NewJSON(&e.reportData) + case config.OutFormatColoredLineNumber, config.OutFormatLineNumber: + p = printers.NewText(e.cfg.Output.PrintIssuedLine, + format == config.OutFormatColoredLineNumber, e.cfg.Output.PrintLinterName, + e.log.Child("text_printer")) + case config.OutFormatTab: + p = printers.NewTab(e.cfg.Output.PrintLinterName, e.log.Child("tab_printer")) + case config.OutFormatCheckstyle: + p = printers.NewCheckstyle() + case config.OutFormatCodeClimate: + p = printers.NewCodeClimate() + case config.OutFormatHTML: + p = printers.NewHTML() + case config.OutFormatJunitXML: + p = printers.NewJunitXML() + case config.OutFormatGithubActions: + p = printers.NewGithub() + default: + return nil, fmt.Errorf("unknown output format %s", format) + } + + return p, nil +} + +func (e *Executor) executeRun(_ *cobra.Command, args []string) { + needTrackResources := e.cfg.Run.IsVerbose || e.cfg.Run.PrintResourcesUsage + trackResourcesEndCh := make(chan struct{}) + defer func() { // XXX: this defer must be before ctx.cancel defer + if needTrackResources { // wait until resource tracking finished to print properly + <-trackResourcesEndCh + } + }() + + e.setTimeoutToDeadlineIfOnlyDeadlineIsSet() + ctx, cancel := context.WithTimeout(context.Background(), e.cfg.Run.Timeout) + defer cancel() + + if needTrackResources { + go watchResources(ctx, trackResourcesEndCh, e.log, e.debugf) + } + + if err := e.runAndPrint(ctx, args); err != nil { + e.log.Errorf("Running error: %s", err) + if e.exitCode == exitcodes.Success { + if exitErr, ok := errors.Cause(err).(*exitcodes.ExitError); ok { + e.exitCode = exitErr.Code + } else { + e.exitCode = exitcodes.Failure + } + } + } + + e.setupExitCode(ctx) +} + +// to be removed when deadline is finally decommissioned +func (e *Executor) setTimeoutToDeadlineIfOnlyDeadlineIsSet() { + // nolint:staticcheck + deadlineValue := e.cfg.Run.Deadline + if deadlineValue != 0 && e.cfg.Run.Timeout == defaultTimeout { + e.cfg.Run.Timeout = deadlineValue + } +} + +func (e *Executor) setupExitCode(ctx context.Context) { + if ctx.Err() != nil { + e.exitCode = exitcodes.Timeout + e.log.Errorf("Timeout exceeded: try increasing it by passing --timeout option") + return + } + + if e.exitCode != exitcodes.Success { + return + } + + needFailOnWarnings := (os.Getenv("GL_TEST_RUN") == "1" || os.Getenv("FAIL_ON_WARNINGS") == "1") + if needFailOnWarnings && len(e.reportData.Warnings) != 0 { + e.exitCode = exitcodes.WarningInTest + return + } + + if e.reportData.Error != "" { + // it's a case e.g. when typecheck linter couldn't parse and error and just logged it + e.exitCode = exitcodes.ErrorWasLogged + return + } +} + +func watchResources(ctx context.Context, done chan struct{}, logger logutils.Log, debugf logutils.DebugFunc) { + startedAt := time.Now() + debugf("Started tracking time") + + var maxRSSMB, totalRSSMB float64 + var iterationsCount int + + const intervalMS = 100 + ticker := time.NewTicker(intervalMS * time.Millisecond) + defer ticker.Stop() + + logEveryRecord := os.Getenv("GL_MEM_LOG_EVERY") == "1" + const MB = 1024 * 1024 + + track := func() { + var m runtime.MemStats + runtime.ReadMemStats(&m) + + if logEveryRecord { + debugf("Stopping memory tracing iteration, printing ...") + printMemStats(&m, logger) + } + + rssMB := float64(m.Sys) / MB + if rssMB > maxRSSMB { + maxRSSMB = rssMB + } + totalRSSMB += rssMB + iterationsCount++ + } + + for { + track() + + stop := false + select { + case <-ctx.Done(): + stop = true + debugf("Stopped resources tracking") + case <-ticker.C: + } + + if stop { + break + } + } + track() + + avgRSSMB := totalRSSMB / float64(iterationsCount) + + logger.Infof("Memory: %d samples, avg is %.1fMB, max is %.1fMB", + iterationsCount, avgRSSMB, maxRSSMB) + logger.Infof("Execution took %s", time.Since(startedAt)) + close(done) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go new file mode 100644 index 000000000..8b48e515b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go @@ -0,0 +1,60 @@ +package commands + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/golangci/golangci-lint/pkg/config" +) + +type jsonVersion struct { + Version string `json:"version"` + Commit string `json:"commit"` + Date string `json:"date"` +} + +func (e *Executor) initVersionConfiguration(cmd *cobra.Command) { + fs := cmd.Flags() + fs.SortFlags = false // sort them as they are defined here + initVersionFlagSet(fs, e.cfg) +} + +func initVersionFlagSet(fs *pflag.FlagSet, cfg *config.Config) { + // Version config + vc := &cfg.Version + fs.StringVar(&vc.Format, "format", "", wh("The version's format can be: 'short', 'json'")) +} + +func (e *Executor) initVersion() { + versionCmd := &cobra.Command{ + Use: "version", + Short: "Version", + RunE: func(cmd *cobra.Command, _ []string) error { + switch strings.ToLower(e.cfg.Version.Format) { + case "short": + fmt.Println(e.version) + case "json": + ver := jsonVersion{ + Version: e.version, + Commit: e.commit, + Date: e.date, + } + data, err := json.Marshal(&ver) + if err != nil { + return err + } + fmt.Println(string(data)) + default: + fmt.Printf("golangci-lint has version %s built from %s on %s\n", e.version, e.commit, e.date) + } + return nil + }, + } + + e.rootCmd.AddCommand(versionCmd) + e.initVersionConfiguration(versionCmd) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go new file mode 100644 index 000000000..931ddbbbe --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go @@ -0,0 +1,26 @@ +package config + +type Config struct { + Run Run + + Output Output + + LintersSettings LintersSettings `mapstructure:"linters-settings"` + Linters Linters + Issues Issues + Severity Severity + Version Version + + InternalCmdTest bool `mapstructure:"internal-cmd-test"` // Option is used only for testing golangci-lint command, don't use it + InternalTest bool // Option is used only for testing golangci-lint code, don't use it +} + +func NewDefault() *Config { + return &Config{ + LintersSettings: defaultLintersSettings, + } +} + +type Version struct { + Format string `mapstructure:"format"` +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go new file mode 100644 index 000000000..71bf2a90e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go @@ -0,0 +1,204 @@ +package config + +import ( + "fmt" + "regexp" +) + +const excludeRuleMinConditionsCount = 2 + +var DefaultExcludePatterns = []ExcludePattern{ + { + ID: "EXC0001", + Pattern: "Error return value of .((os\\.)?std(out|err)\\..*|.*Close" + + "|.*Flush|os\\.Remove(All)?|.*print(f|ln)?|os\\.(Un)?Setenv). is not checked", + Linter: "errcheck", + Why: "Almost all programs ignore errors on these functions and in most cases it's ok", + }, + { + ID: "EXC0002", + Pattern: "(comment on exported (method|function|type|const)|" + + "should have( a package)? comment|comment should be of the form)", + Linter: "golint", + Why: "Annoying issue about not having a comment. The rare codebase has such comments", + }, + { + ID: "EXC0003", + Pattern: "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this", + Linter: "golint", + Why: "False positive when tests are defined in package 'test'", + }, + { + ID: "EXC0004", + Pattern: "(possible misuse of unsafe.Pointer|should have signature)", + Linter: "govet", + Why: "Common false positives", + }, + { + ID: "EXC0005", + Pattern: "ineffective break statement. Did you mean to break out of the outer loop", + Linter: "staticcheck", + Why: "Developers tend to write in C-style with an explicit 'break' in a 'switch', so it's ok to ignore", + }, + { + ID: "EXC0006", + Pattern: "Use of unsafe calls should be audited", + Linter: "gosec", + Why: "Too many false-positives on 'unsafe' usage", + }, + { + ID: "EXC0007", + Pattern: "Subprocess launch(ed with variable|ing should be audited)", + Linter: "gosec", + Why: "Too many false-positives for parametrized shell calls", + }, + { + ID: "EXC0008", + Pattern: "(G104|G307)", + Linter: "gosec", + Why: "Duplicated errcheck checks", + }, + { + ID: "EXC0009", + Pattern: "(Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less)", + Linter: "gosec", + Why: "Too many issues in popular repos", + }, + { + ID: "EXC0010", + Pattern: "Potential file inclusion via variable", + Linter: "gosec", + Why: "False positive is triggered by 'src, err := ioutil.ReadFile(filename)'", + }, + { + ID: "EXC0011", + Pattern: "(comment on exported (method|function|type|const)|" + + "should have( a package)? comment|comment should be of the form)", + Linter: "stylecheck", + Why: "Annoying issue about not having a comment. The rare codebase has such comments", + }, + { + ID: "EXC0012", + Pattern: `exported (.+) should have comment( \(or a comment on this block\))? or be unexported`, + Linter: "revive", + Why: "Annoying issue about not having a comment. The rare codebase has such comments", + }, + { + ID: "EXC0013", + Pattern: `package comment should be of the form "(.+)...`, + Linter: "revive", + Why: "Annoying issue about not having a comment. The rare codebase has such comments", + }, + { + ID: "EXC0014", + Pattern: `comment on exported (.+) should be of the form "(.+)..."`, + Linter: "revive", + Why: "Annoying issue about not having a comment. The rare codebase has such comments", + }, + { + ID: "EXC0015", + Pattern: `should have a package comment, unless it's in another file for this package`, + Linter: "revive", + Why: "Annoying issue about not having a comment. The rare codebase has such comments", + }, +} + +type Issues struct { + IncludeDefaultExcludes []string `mapstructure:"include"` + ExcludeCaseSensitive bool `mapstructure:"exclude-case-sensitive"` + ExcludePatterns []string `mapstructure:"exclude"` + ExcludeRules []ExcludeRule `mapstructure:"exclude-rules"` + UseDefaultExcludes bool `mapstructure:"exclude-use-default"` + + MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"` + MaxSameIssues int `mapstructure:"max-same-issues"` + + DiffFromRevision string `mapstructure:"new-from-rev"` + DiffPatchFilePath string `mapstructure:"new-from-patch"` + Diff bool `mapstructure:"new"` + + NeedFix bool `mapstructure:"fix"` +} + +type ExcludeRule struct { + BaseRule `mapstructure:",squash"` +} + +func (e ExcludeRule) Validate() error { + return e.BaseRule.Validate(excludeRuleMinConditionsCount) +} + +type BaseRule struct { + Linters []string + Path string + Text string + Source string +} + +func (b BaseRule) Validate(minConditionsCount int) error { + if err := validateOptionalRegex(b.Path); err != nil { + return fmt.Errorf("invalid path regex: %v", err) + } + if err := validateOptionalRegex(b.Text); err != nil { + return fmt.Errorf("invalid text regex: %v", err) + } + if err := validateOptionalRegex(b.Source); err != nil { + return fmt.Errorf("invalid source regex: %v", err) + } + nonBlank := 0 + if len(b.Linters) > 0 { + nonBlank++ + } + if b.Path != "" { + nonBlank++ + } + if b.Text != "" { + nonBlank++ + } + if b.Source != "" { + nonBlank++ + } + if nonBlank < minConditionsCount { + return fmt.Errorf("at least %d of (text, source, path, linters) should be set", minConditionsCount) + } + return nil +} + +func validateOptionalRegex(value string) error { + if value == "" { + return nil + } + _, err := regexp.Compile(value) + return err +} + +type ExcludePattern struct { + ID string + Pattern string + Linter string + Why string +} + +func GetDefaultExcludePatternsStrings() []string { + ret := make([]string, len(DefaultExcludePatterns)) + for i, p := range DefaultExcludePatterns { + ret[i] = p.Pattern + } + return ret +} + +func GetExcludePatterns(include []string) []ExcludePattern { + includeMap := make(map[string]bool, len(include)) + for _, inc := range include { + includeMap[inc] = true + } + + var ret []ExcludePattern + for _, p := range DefaultExcludePatterns { + if !includeMap[p.ID] { + ret = append(ret, p) + } + } + + return ret +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters.go new file mode 100644 index 000000000..ccbdc123a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters.go @@ -0,0 +1,11 @@ +package config + +type Linters struct { + Enable []string + Disable []string + EnableAll bool `mapstructure:"enable-all"` + DisableAll bool `mapstructure:"disable-all"` + Fast bool + + Presets []string +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go new file mode 100644 index 000000000..fd5f41318 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go @@ -0,0 +1,486 @@ +package config + +import "github.com/pkg/errors" + +var defaultLintersSettings = LintersSettings{ + Lll: LllSettings{ + LineLength: 120, + TabWidth: 1, + }, + Unparam: UnparamSettings{ + Algo: "cha", + }, + Nakedret: NakedretSettings{ + MaxFuncLines: 30, + }, + Prealloc: PreallocSettings{ + Simple: true, + RangeLoops: true, + ForLoops: false, + }, + Gocritic: GocriticSettings{ + SettingsPerCheck: map[string]GocriticCheckSettings{}, + }, + Godox: GodoxSettings{ + Keywords: []string{}, + }, + Dogsled: DogsledSettings{ + MaxBlankIdentifiers: 2, + }, + Gocognit: GocognitSettings{ + MinComplexity: 30, + }, + WSL: WSLSettings{ + StrictAppend: true, + AllowAssignAndCallCuddle: true, + AllowAssignAndAnythingCuddle: false, + AllowMultiLineAssignCuddle: true, + AllowCuddleDeclaration: false, + AllowTrailingComment: false, + AllowSeparatedLeadingComment: false, + ForceCuddleErrCheckAndAssign: false, + ForceExclusiveShortDeclarations: false, + ForceCaseTrailingWhitespaceLimit: 0, + }, + NoLintLint: NoLintLintSettings{ + RequireExplanation: false, + AllowLeadingSpace: true, + RequireSpecific: false, + AllowUnused: false, + }, + Testpackage: TestpackageSettings{ + SkipRegexp: `(export|internal)_test\.go`, + }, + Nestif: NestifSettings{ + MinComplexity: 5, + }, + Exhaustive: ExhaustiveSettings{ + CheckGenerated: false, + DefaultSignifiesExhaustive: false, + }, + Gofumpt: GofumptSettings{ + LangVersion: "", + ExtraRules: false, + }, + ErrorLint: ErrorLintSettings{ + Errorf: true, + Asserts: true, + Comparison: true, + }, + Ifshort: IfshortSettings{ + MaxDeclLines: 1, + MaxDeclChars: 30, + }, + Predeclared: PredeclaredSettings{ + Ignore: "", + Qualified: false, + }, + Forbidigo: ForbidigoSettings{ + ExcludeGodocExamples: true, + }, +} + +type LintersSettings struct { + Cyclop Cyclop + Depguard DepGuardSettings + Dogsled DogsledSettings + Dupl DuplSettings + Errcheck ErrcheckSettings + ErrorLint ErrorLintSettings + Exhaustive ExhaustiveSettings + ExhaustiveStruct ExhaustiveStructSettings + Forbidigo ForbidigoSettings + Funlen FunlenSettings + Gci GciSettings + Gocognit GocognitSettings + Goconst GoConstSettings + Gocritic GocriticSettings + Gocyclo GoCycloSettings + Godot GodotSettings + Godox GodoxSettings + Gofmt GoFmtSettings + Gofumpt GofumptSettings + Goheader GoHeaderSettings + Goimports GoImportsSettings + Golint GoLintSettings + Gomnd GoMndSettings + GoModDirectives GoModDirectivesSettings + Gomodguard GoModGuardSettings + Gosec GoSecSettings + Gosimple StaticCheckSettings + Govet GovetSettings + Ifshort IfshortSettings + ImportAs ImportAsSettings + Lll LllSettings + Makezero MakezeroSettings + Maligned MalignedSettings + Misspell MisspellSettings + Nakedret NakedretSettings + Nestif NestifSettings + NoLintLint NoLintLintSettings + Prealloc PreallocSettings + Predeclared PredeclaredSettings + Promlinter PromlinterSettings + Revive ReviveSettings + RowsErrCheck RowsErrCheckSettings + Staticcheck StaticCheckSettings + Structcheck StructCheckSettings + Stylecheck StaticCheckSettings + Tagliatelle TagliatelleSettings + Testpackage TestpackageSettings + Thelper ThelperSettings + Unparam UnparamSettings + Unused StaticCheckSettings + Varcheck VarCheckSettings + Whitespace WhitespaceSettings + Wrapcheck WrapcheckSettings + WSL WSLSettings + + Custom map[string]CustomLinterSettings +} + +type Cyclop struct { + MaxComplexity int `mapstructure:"max-complexity"` + PackageAverage float64 `mapstructure:"package-average"` + SkipTests bool `mapstructure:"skip-tests"` +} + +type DepGuardSettings struct { + ListType string `mapstructure:"list-type"` + Packages []string + IncludeGoRoot bool `mapstructure:"include-go-root"` + PackagesWithErrorMessage map[string]string `mapstructure:"packages-with-error-message"` +} + +type DogsledSettings struct { + MaxBlankIdentifiers int `mapstructure:"max-blank-identifiers"` +} + +type DuplSettings struct { + Threshold int +} + +type ErrcheckSettings struct { + CheckTypeAssertions bool `mapstructure:"check-type-assertions"` + CheckAssignToBlank bool `mapstructure:"check-blank"` + Ignore string `mapstructure:"ignore"` + ExcludeFunctions []string `mapstructure:"exclude-functions"` + + // Deprecated: use ExcludeFunctions instead + Exclude string `mapstructure:"exclude"` +} + +type ErrorLintSettings struct { + Errorf bool `mapstructure:"errorf"` + Asserts bool `mapstructure:"asserts"` + Comparison bool `mapstructure:"comparison"` +} + +type ExhaustiveSettings struct { + CheckGenerated bool `mapstructure:"check-generated"` + DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"` + IgnorePattern string `mapstructure:"ignore-pattern"` +} + +type ExhaustiveStructSettings struct { + StructPatterns []string `mapstructure:"struct-patterns"` +} + +type ForbidigoSettings struct { + Forbid []string `mapstructure:"forbid"` + ExcludeGodocExamples bool `mapstructure:"exclude-godoc-examples"` +} + +type FunlenSettings struct { + Lines int + Statements int +} + +type GciSettings struct { + LocalPrefixes string `mapstructure:"local-prefixes"` +} + +type GocognitSettings struct { + MinComplexity int `mapstructure:"min-complexity"` +} + +type GoConstSettings struct { + IgnoreTests bool `mapstructure:"ignore-tests"` + MatchWithConstants bool `mapstructure:"match-constant"` + MinStringLen int `mapstructure:"min-len"` + MinOccurrencesCount int `mapstructure:"min-occurrences"` + ParseNumbers bool `mapstructure:"numbers"` + NumberMin int `mapstructure:"min"` + NumberMax int `mapstructure:"max"` + IgnoreCalls bool `mapstructure:"ignore-calls"` +} + +type GoCycloSettings struct { + MinComplexity int `mapstructure:"min-complexity"` +} + +type GodotSettings struct { + Scope string `mapstructure:"scope"` + Exclude []string `mapstructure:"exclude"` + Capital bool `mapstructure:"capital"` + + // Deprecated: use `Scope` instead + CheckAll bool `mapstructure:"check-all"` +} + +type GodoxSettings struct { + Keywords []string +} + +type GoFmtSettings struct { + Simplify bool +} + +type GofumptSettings struct { + LangVersion string `mapstructure:"lang-version"` + ExtraRules bool `mapstructure:"extra-rules"` +} + +type GoHeaderSettings struct { + Values map[string]map[string]string `mapstructure:"values"` + Template string `mapstructure:"template"` + TemplatePath string `mapstructure:"template-path"` +} + +type GoImportsSettings struct { + LocalPrefixes string `mapstructure:"local-prefixes"` +} + +type GoLintSettings struct { + MinConfidence float64 `mapstructure:"min-confidence"` +} + +type GoMndSettings struct { + Settings map[string]map[string]interface{} +} + +type GoModDirectivesSettings struct { + ReplaceAllowList []string `mapstructure:"replace-allow-list"` + ReplaceLocal bool `mapstructure:"replace-local"` + ExcludeForbidden bool `mapstructure:"exclude-forbidden"` + RetractAllowNoExplanation bool `mapstructure:"retract-allow-no-explanation"` +} + +type GoModGuardSettings struct { + Allowed struct { + Modules []string `mapstructure:"modules"` + Domains []string `mapstructure:"domains"` + } `mapstructure:"allowed"` + Blocked struct { + Modules []map[string]struct { + Recommendations []string `mapstructure:"recommendations"` + Reason string `mapstructure:"reason"` + } `mapstructure:"modules"` + Versions []map[string]struct { + Version string `mapstructure:"version"` + Reason string `mapstructure:"reason"` + } `mapstructure:"versions"` + LocalReplaceDirectives bool `mapstructure:"local_replace_directives"` + } `mapstructure:"blocked"` +} + +type GoSecSettings struct { + Includes []string + Excludes []string + Config map[string]interface{} `mapstructure:"config"` +} + +type GovetSettings struct { + CheckShadowing bool `mapstructure:"check-shadowing"` + Settings map[string]map[string]interface{} + + Enable []string + Disable []string + EnableAll bool `mapstructure:"enable-all"` + DisableAll bool `mapstructure:"disable-all"` +} + +func (cfg GovetSettings) Validate() error { + if cfg.EnableAll && cfg.DisableAll { + return errors.New("enable-all and disable-all can't be combined") + } + if cfg.EnableAll && len(cfg.Enable) != 0 { + return errors.New("enable-all and enable can't be combined") + } + if cfg.DisableAll && len(cfg.Disable) != 0 { + return errors.New("disable-all and disable can't be combined") + } + return nil +} + +type IfshortSettings struct { + MaxDeclLines int `mapstructure:"max-decl-lines"` + MaxDeclChars int `mapstructure:"max-decl-chars"` +} + +type ImportAsSettings struct { + Alias []ImportAsAlias + NoUnaliased bool `mapstructure:"no-unaliased"` +} + +type ImportAsAlias struct { + Pkg string + Alias string +} + +type LllSettings struct { + LineLength int `mapstructure:"line-length"` + TabWidth int `mapstructure:"tab-width"` +} + +type MakezeroSettings struct { + Always bool +} + +type MalignedSettings struct { + SuggestNewOrder bool `mapstructure:"suggest-new"` +} + +type MisspellSettings struct { + Locale string + IgnoreWords []string `mapstructure:"ignore-words"` +} + +type NakedretSettings struct { + MaxFuncLines int `mapstructure:"max-func-lines"` +} + +type NestifSettings struct { + MinComplexity int `mapstructure:"min-complexity"` +} + +type NoLintLintSettings struct { + RequireExplanation bool `mapstructure:"require-explanation"` + AllowLeadingSpace bool `mapstructure:"allow-leading-space"` + RequireSpecific bool `mapstructure:"require-specific"` + AllowNoExplanation []string `mapstructure:"allow-no-explanation"` + AllowUnused bool `mapstructure:"allow-unused"` +} + +type PreallocSettings struct { + Simple bool + RangeLoops bool `mapstructure:"range-loops"` + ForLoops bool `mapstructure:"for-loops"` +} + +type PredeclaredSettings struct { + Ignore string `mapstructure:"ignore"` + Qualified bool `mapstructure:"q"` +} + +type PromlinterSettings struct { + Strict bool `mapstructure:"strict"` + DisabledLinters []string `mapstructure:"disabled-linters"` +} + +type ReviveSettings struct { + IgnoreGeneratedHeader bool `mapstructure:"ignore-generated-header"` + Confidence float64 + Severity string + EnableAllRules bool `mapstructure:"enable-all-rules"` + Rules []struct { + Name string + Arguments []interface{} + Severity string + Disabled bool + } + ErrorCode int `mapstructure:"error-code"` + WarningCode int `mapstructure:"warning-code"` + Directives []struct { + Name string + Severity string + } +} + +type RowsErrCheckSettings struct { + Packages []string +} + +type StaticCheckSettings struct { + GoVersion string `mapstructure:"go"` + + Checks []string `mapstructure:"checks"` + Initialisms []string `mapstructure:"initialisms"` // only for stylecheck + DotImportWhitelist []string `mapstructure:"dot-import-whitelist"` // only for stylecheck + HTTPStatusCodeWhitelist []string `mapstructure:"http-status-code-whitelist"` // only for stylecheck +} + +func (s *StaticCheckSettings) HasConfiguration() bool { + return len(s.Initialisms) > 0 || len(s.HTTPStatusCodeWhitelist) > 0 || len(s.DotImportWhitelist) > 0 || len(s.Checks) > 0 +} + +type StructCheckSettings struct { + CheckExportedFields bool `mapstructure:"exported-fields"` +} + +type TagliatelleSettings struct { + Case struct { + Rules map[string]string + UseFieldName bool `mapstructure:"use-field-name"` + } +} + +type TestpackageSettings struct { + SkipRegexp string `mapstructure:"skip-regexp"` +} + +type ThelperSettings struct { + Test struct { + First bool `mapstructure:"first"` + Name bool `mapstructure:"name"` + Begin bool `mapstructure:"begin"` + } `mapstructure:"test"` + Benchmark struct { + First bool `mapstructure:"first"` + Name bool `mapstructure:"name"` + Begin bool `mapstructure:"begin"` + } `mapstructure:"benchmark"` + TB struct { + First bool `mapstructure:"first"` + Name bool `mapstructure:"name"` + Begin bool `mapstructure:"begin"` + } `mapstructure:"tb"` +} + +type UnparamSettings struct { + CheckExported bool `mapstructure:"check-exported"` + Algo string +} + +type VarCheckSettings struct { + CheckExportedFields bool `mapstructure:"exported-fields"` +} + +type WhitespaceSettings struct { + MultiIf bool `mapstructure:"multi-if"` + MultiFunc bool `mapstructure:"multi-func"` +} + +type WrapcheckSettings struct { + IgnoreSigs []string `mapstructure:"ignoreSigs"` + IgnorePackageGlobs []string `mapstructure:"ignorePackageGlobs"` +} + +type WSLSettings struct { + StrictAppend bool `mapstructure:"strict-append"` + AllowAssignAndCallCuddle bool `mapstructure:"allow-assign-and-call"` + AllowAssignAndAnythingCuddle bool `mapstructure:"allow-assign-and-anything"` + AllowMultiLineAssignCuddle bool `mapstructure:"allow-multiline-assign"` + AllowCuddleDeclaration bool `mapstructure:"allow-cuddle-declarations"` + AllowTrailingComment bool `mapstructure:"allow-trailing-comment"` + AllowSeparatedLeadingComment bool `mapstructure:"allow-separated-leading-comment"` + ForceCuddleErrCheckAndAssign bool `mapstructure:"force-err-cuddling"` + ForceExclusiveShortDeclarations bool `mapstructure:"force-short-decl-cuddling"` + ForceCaseTrailingWhitespaceLimit int `mapstructure:"force-case-trailing-whitespace"` +} + +type CustomLinterSettings struct { + Path string + Description string + OriginalURL string `mapstructure:"original-url"` +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings_gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings_gocritic.go new file mode 100644 index 000000000..34f850758 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings_gocritic.go @@ -0,0 +1,365 @@ +package config + +import ( + "fmt" + "sort" + "strings" + + _ "github.com/go-critic/go-critic/checkers" // this import register checkers + "github.com/go-critic/go-critic/framework/linter" + "github.com/pkg/errors" + + "github.com/golangci/golangci-lint/pkg/logutils" +) + +const gocriticDebugKey = "gocritic" + +var ( + gocriticDebugf = logutils.Debug(gocriticDebugKey) + isGocriticDebug = logutils.HaveDebugTag(gocriticDebugKey) + allGocriticCheckers = linter.GetCheckersInfo() + allGocriticCheckerMap = func() map[string]*linter.CheckerInfo { + checkInfoMap := make(map[string]*linter.CheckerInfo) + for _, checkInfo := range allGocriticCheckers { + checkInfoMap[checkInfo.Name] = checkInfo + } + return checkInfoMap + }() +) + +type GocriticCheckSettings map[string]interface{} + +type GocriticSettings struct { + EnabledChecks []string `mapstructure:"enabled-checks"` + DisabledChecks []string `mapstructure:"disabled-checks"` + EnabledTags []string `mapstructure:"enabled-tags"` + DisabledTags []string `mapstructure:"disabled-tags"` + SettingsPerCheck map[string]GocriticCheckSettings `mapstructure:"settings"` + + inferredEnabledChecks map[string]bool +} + +func debugChecksListf(checks []string, format string, args ...interface{}) { + if isGocriticDebug { + prefix := fmt.Sprintf(format, args...) + gocriticDebugf(prefix+" checks (%d): %s", len(checks), sprintStrings(checks)) + } +} + +func stringsSliceToSet(ss []string) map[string]bool { + ret := map[string]bool{} + for _, s := range ss { + ret[s] = true + } + + return ret +} + +func buildGocriticTagToCheckersMap() map[string][]string { + tagToCheckers := map[string][]string{} + for _, checker := range allGocriticCheckers { + for _, tag := range checker.Tags { + tagToCheckers[tag] = append(tagToCheckers[tag], checker.Name) + } + } + return tagToCheckers +} + +func gocriticCheckerTagsDebugf() { + if !isGocriticDebug { + return + } + + tagToCheckers := buildGocriticTagToCheckersMap() + + var allTags []string + for tag := range tagToCheckers { + allTags = append(allTags, tag) + } + sort.Strings(allTags) + + gocriticDebugf("All gocritic existing tags and checks:") + for _, tag := range allTags { + debugChecksListf(tagToCheckers[tag], " tag %q", tag) + } +} + +func (s *GocriticSettings) gocriticDisabledCheckersDebugf() { + if !isGocriticDebug { + return + } + + var disabledCheckers []string + for _, checker := range allGocriticCheckers { + if s.inferredEnabledChecks[strings.ToLower(checker.Name)] { + continue + } + + disabledCheckers = append(disabledCheckers, checker.Name) + } + + if len(disabledCheckers) == 0 { + gocriticDebugf("All checks are enabled") + } else { + debugChecksListf(disabledCheckers, "Final not used") + } +} + +func (s *GocriticSettings) InferEnabledChecks(log logutils.Log) { + gocriticCheckerTagsDebugf() + + enabledByDefaultChecks := getDefaultEnabledGocriticCheckersNames() + debugChecksListf(enabledByDefaultChecks, "Enabled by default") + + disabledByDefaultChecks := getDefaultDisabledGocriticCheckersNames() + debugChecksListf(disabledByDefaultChecks, "Disabled by default") + + var enabledChecks []string + + // EnabledTags + if len(s.EnabledTags) != 0 { + tagToCheckers := buildGocriticTagToCheckersMap() + for _, tag := range s.EnabledTags { + enabledChecks = append(enabledChecks, tagToCheckers[tag]...) + } + debugChecksListf(enabledChecks, "Enabled by config tags %s", sprintStrings(s.EnabledTags)) + } + + if !(len(s.EnabledTags) == 0 && len(s.EnabledChecks) != 0) { + // don't use default checks only if we have no enabled tags and enable some checks manually + enabledChecks = append(enabledChecks, enabledByDefaultChecks...) + } + + // DisabledTags + if len(s.DisabledTags) != 0 { + enabledChecks = filterByDisableTags(enabledChecks, s.DisabledTags, log) + } + + // EnabledChecks + if len(s.EnabledChecks) != 0 { + debugChecksListf(s.EnabledChecks, "Enabled by config") + + alreadyEnabledChecksSet := stringsSliceToSet(enabledChecks) + for _, enabledCheck := range s.EnabledChecks { + if alreadyEnabledChecksSet[enabledCheck] { + log.Warnf("No need to enable check %q: it's already enabled", enabledCheck) + continue + } + enabledChecks = append(enabledChecks, enabledCheck) + } + } + + // DisabledChecks + if len(s.DisabledChecks) != 0 { + debugChecksListf(s.DisabledChecks, "Disabled by config") + + enabledChecksSet := stringsSliceToSet(enabledChecks) + for _, disabledCheck := range s.DisabledChecks { + if !enabledChecksSet[disabledCheck] { + log.Warnf("Gocritic check %q was explicitly disabled via config. However, as this check"+ + "is disabled by default, there is no need to explicitly disable it via config.", disabledCheck) + continue + } + delete(enabledChecksSet, disabledCheck) + } + + enabledChecks = nil + for enabledCheck := range enabledChecksSet { + enabledChecks = append(enabledChecks, enabledCheck) + } + } + + s.inferredEnabledChecks = map[string]bool{} + for _, check := range enabledChecks { + s.inferredEnabledChecks[strings.ToLower(check)] = true + } + + debugChecksListf(enabledChecks, "Final used") + s.gocriticDisabledCheckersDebugf() +} + +func validateStringsUniq(ss []string) error { + set := map[string]bool{} + for _, s := range ss { + _, ok := set[s] + if ok { + return fmt.Errorf("%q occurs multiple times in list", s) + } + set[s] = true + } + + return nil +} + +func intersectStringSlice(s1, s2 []string) []string { + s1Map := make(map[string]struct{}) + for _, s := range s1 { + s1Map[s] = struct{}{} + } + + result := make([]string, 0) + for _, s := range s2 { + if _, exists := s1Map[s]; exists { + result = append(result, s) + } + } + + return result +} + +func (s *GocriticSettings) Validate(log logutils.Log) error { + if len(s.EnabledTags) == 0 { + if len(s.EnabledChecks) != 0 && len(s.DisabledChecks) != 0 { + return errors.New("both enabled and disabled check aren't allowed for gocritic") + } + } else { + if err := validateStringsUniq(s.EnabledTags); err != nil { + return errors.Wrap(err, "validate enabled tags") + } + + tagToCheckers := buildGocriticTagToCheckersMap() + for _, tag := range s.EnabledTags { + if _, ok := tagToCheckers[tag]; !ok { + return fmt.Errorf("gocritic [enabled]tag %q doesn't exist", tag) + } + } + } + + if len(s.DisabledTags) > 0 { + tagToCheckers := buildGocriticTagToCheckersMap() + for _, tag := range s.EnabledTags { + if _, ok := tagToCheckers[tag]; !ok { + return fmt.Errorf("gocritic [disabled]tag %q doesn't exist", tag) + } + } + } + + if err := validateStringsUniq(s.EnabledChecks); err != nil { + return errors.Wrap(err, "validate enabled checks") + } + if err := validateStringsUniq(s.DisabledChecks); err != nil { + return errors.Wrap(err, "validate disabled checks") + } + + if err := s.validateCheckerNames(log); err != nil { + return errors.Wrap(err, "validation failed") + } + + return nil +} + +func (s *GocriticSettings) IsCheckEnabled(name string) bool { + return s.inferredEnabledChecks[strings.ToLower(name)] +} + +func sprintAllowedCheckerNames(allowedNames map[string]bool) string { + var namesSlice []string + for name := range allowedNames { + namesSlice = append(namesSlice, name) + } + return sprintStrings(namesSlice) +} + +func sprintStrings(ss []string) string { + sort.Strings(ss) + return fmt.Sprint(ss) +} + +// getAllCheckerNames returns a map containing all checker names supported by gocritic. +func getAllCheckerNames() map[string]bool { + allCheckerNames := map[string]bool{} + for _, checker := range allGocriticCheckers { + allCheckerNames[strings.ToLower(checker.Name)] = true + } + + return allCheckerNames +} + +func isEnabledByDefaultGocriticCheck(info *linter.CheckerInfo) bool { + return !info.HasTag("experimental") && + !info.HasTag("opinionated") && + !info.HasTag("performance") +} + +func getDefaultEnabledGocriticCheckersNames() []string { + var enabled []string + for _, info := range allGocriticCheckers { + enable := isEnabledByDefaultGocriticCheck(info) + if enable { + enabled = append(enabled, info.Name) + } + } + + return enabled +} + +func getDefaultDisabledGocriticCheckersNames() []string { + var disabled []string + for _, info := range allGocriticCheckers { + enable := isEnabledByDefaultGocriticCheck(info) + if !enable { + disabled = append(disabled, info.Name) + } + } + + return disabled +} + +func (s *GocriticSettings) validateCheckerNames(log logutils.Log) error { + allowedNames := getAllCheckerNames() + + for _, name := range s.EnabledChecks { + if !allowedNames[strings.ToLower(name)] { + return fmt.Errorf("enabled checker %s doesn't exist, all existing checkers: %s", + name, sprintAllowedCheckerNames(allowedNames)) + } + } + + for _, name := range s.DisabledChecks { + if !allowedNames[strings.ToLower(name)] { + return fmt.Errorf("disabled checker %s doesn't exist, all existing checkers: %s", + name, sprintAllowedCheckerNames(allowedNames)) + } + } + + for checkName := range s.SettingsPerCheck { + if _, ok := allowedNames[checkName]; !ok { + return fmt.Errorf("invalid setting, checker %s doesn't exist, all existing checkers: %s", + checkName, sprintAllowedCheckerNames(allowedNames)) + } + if !s.IsCheckEnabled(checkName) { + log.Warnf("Gocritic settings were provided for not enabled check %q", checkName) + } + } + + return nil +} + +func (s *GocriticSettings) GetLowercasedParams() map[string]GocriticCheckSettings { + ret := map[string]GocriticCheckSettings{} + for checker, params := range s.SettingsPerCheck { + ret[strings.ToLower(checker)] = params + } + return ret +} + +func filterByDisableTags(enabledChecks, disableTags []string, log logutils.Log) []string { + enabledChecksSet := stringsSliceToSet(enabledChecks) + for _, enabledCheck := range enabledChecks { + checkInfo, checkInfoExists := allGocriticCheckerMap[enabledCheck] + if !checkInfoExists { + log.Warnf("Gocritic check %q was not exists via filtering disabled tags", enabledCheck) + continue + } + hitTags := intersectStringSlice(checkInfo.Tags, disableTags) + if len(hitTags) != 0 { + delete(enabledChecksSet, enabledCheck) + } + debugChecksListf(enabledChecks, "Disabled by config tags %s", sprintStrings(disableTags)) + } + enabledChecks = nil + for enabledCheck := range enabledChecksSet { + enabledChecks = append(enabledChecks, enabledCheck) + } + return enabledChecks +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/output.go b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go new file mode 100644 index 000000000..d67f110f6 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go @@ -0,0 +1,36 @@ +package config + +const ( + OutFormatJSON = "json" + OutFormatLineNumber = "line-number" + OutFormatColoredLineNumber = "colored-line-number" + OutFormatTab = "tab" + OutFormatCheckstyle = "checkstyle" + OutFormatCodeClimate = "code-climate" + OutFormatHTML = "html" + OutFormatJunitXML = "junit-xml" + OutFormatGithubActions = "github-actions" +) + +var OutFormats = []string{ + OutFormatColoredLineNumber, + OutFormatLineNumber, + OutFormatJSON, + OutFormatTab, + OutFormatCheckstyle, + OutFormatCodeClimate, + OutFormatHTML, + OutFormatJunitXML, + OutFormatGithubActions, +} + +type Output struct { + Format string + Color string + PrintIssuedLine bool `mapstructure:"print-issued-lines"` + PrintLinterName bool `mapstructure:"print-linter-name"` + UniqByLine bool `mapstructure:"uniq-by-line"` + SortResults bool `mapstructure:"sort-results"` + PrintWelcomeMessage bool `mapstructure:"print-welcome"` + PathPrefix string `mapstructure:"path-prefix"` +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go b/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go new file mode 100644 index 000000000..6e97277da --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go @@ -0,0 +1,221 @@ +package config + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/mitchellh/go-homedir" + "github.com/spf13/viper" + + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/sliceutil" +) + +type FileReader struct { + log logutils.Log + cfg *Config + commandLineCfg *Config +} + +func NewFileReader(toCfg, commandLineCfg *Config, log logutils.Log) *FileReader { + return &FileReader{ + log: log, + cfg: toCfg, + commandLineCfg: commandLineCfg, + } +} + +func (r *FileReader) Read() error { + // XXX: hack with double parsing for 2 purposes: + // 1. to access "config" option here. + // 2. to give config less priority than command line. + + configFile, err := r.parseConfigOption() + if err != nil { + if err == errConfigDisabled { + return nil + } + + return fmt.Errorf("can't parse --config option: %s", err) + } + + if configFile != "" { + viper.SetConfigFile(configFile) + } else { + r.setupConfigFileSearch() + } + + return r.parseConfig() +} + +func (r *FileReader) parseConfig() error { + if err := viper.ReadInConfig(); err != nil { + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + return nil + } + + return fmt.Errorf("can't read viper config: %s", err) + } + + usedConfigFile := viper.ConfigFileUsed() + if usedConfigFile == "" { + return nil + } + + usedConfigFile, err := fsutils.ShortestRelPath(usedConfigFile, "") + if err != nil { + r.log.Warnf("Can't pretty print config file path: %s", err) + } + r.log.Infof("Used config file %s", usedConfigFile) + + if err := viper.Unmarshal(r.cfg); err != nil { + return fmt.Errorf("can't unmarshal config by viper: %s", err) + } + + if err := r.validateConfig(); err != nil { + return fmt.Errorf("can't validate config: %s", err) + } + + if r.cfg.InternalTest { // just for testing purposes: to detect config file usage + fmt.Fprintln(logutils.StdOut, "test") + os.Exit(0) + } + + return nil +} + +func (r *FileReader) validateConfig() error { + c := r.cfg + if len(c.Run.Args) != 0 { + return errors.New("option run.args in config isn't supported now") + } + + if c.Run.CPUProfilePath != "" { + return errors.New("option run.cpuprofilepath in config isn't allowed") + } + + if c.Run.MemProfilePath != "" { + return errors.New("option run.memprofilepath in config isn't allowed") + } + + if c.Run.TracePath != "" { + return errors.New("option run.tracepath in config isn't allowed") + } + + if c.Run.IsVerbose { + return errors.New("can't set run.verbose option with config: only on command-line") + } + for i, rule := range c.Issues.ExcludeRules { + if err := rule.Validate(); err != nil { + return fmt.Errorf("error in exclude rule #%d: %v", i, err) + } + } + if len(c.Severity.Rules) > 0 && c.Severity.Default == "" { + return errors.New("can't set severity rule option: no default severity defined") + } + for i, rule := range c.Severity.Rules { + if err := rule.Validate(); err != nil { + return fmt.Errorf("error in severity rule #%d: %v", i, err) + } + } + if err := c.LintersSettings.Govet.Validate(); err != nil { + return fmt.Errorf("error in govet config: %v", err) + } + return nil +} + +func getFirstPathArg() string { + args := os.Args + + // skip all args ([golangci-lint, run/linters]) before files/dirs list + for len(args) != 0 { + if args[0] == "run" { + args = args[1:] + break + } + + args = args[1:] + } + + // find first file/dir arg + firstArg := "./..." + for _, arg := range args { + if !strings.HasPrefix(arg, "-") { + firstArg = arg + break + } + } + + return firstArg +} + +func (r *FileReader) setupConfigFileSearch() { + firstArg := getFirstPathArg() + absStartPath, err := filepath.Abs(firstArg) + if err != nil { + r.log.Warnf("Can't make abs path for %q: %s", firstArg, err) + absStartPath = filepath.Clean(firstArg) + } + + // start from it + var curDir string + if fsutils.IsDir(absStartPath) { + curDir = absStartPath + } else { + curDir = filepath.Dir(absStartPath) + } + + // find all dirs from it up to the root + configSearchPaths := []string{"./"} + + for { + configSearchPaths = append(configSearchPaths, curDir) + newCurDir := filepath.Dir(curDir) + if curDir == newCurDir || newCurDir == "" { + break + } + curDir = newCurDir + } + + // find home directory for global config + if home, err := homedir.Dir(); err != nil { + r.log.Warnf("Can't get user's home directory: %s", err.Error()) + } else if !sliceutil.Contains(configSearchPaths, home) { + configSearchPaths = append(configSearchPaths, home) + } + + r.log.Infof("Config search paths: %s", configSearchPaths) + viper.SetConfigName(".golangci") + for _, p := range configSearchPaths { + viper.AddConfigPath(p) + } +} + +var errConfigDisabled = errors.New("config is disabled by --no-config") + +func (r *FileReader) parseConfigOption() (string, error) { + cfg := r.commandLineCfg + if cfg == nil { + return "", nil + } + + configFile := cfg.Run.Config + if cfg.Run.NoConfig && configFile != "" { + return "", fmt.Errorf("can't combine option --config and --no-config") + } + + if cfg.Run.NoConfig { + return "", errConfigDisabled + } + + configFile, err := homedir.Expand(configFile) + if err != nil { + return "", fmt.Errorf("failed to expand configuration path") + } + + return configFile, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/run.go b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go new file mode 100644 index 000000000..ff6347945 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go @@ -0,0 +1,37 @@ +package config + +import "time" + +type Run struct { + IsVerbose bool `mapstructure:"verbose"` + Silent bool + CPUProfilePath string + MemProfilePath string + TracePath string + Concurrency int + PrintResourcesUsage bool `mapstructure:"print-resources-usage"` + + Config string + NoConfig bool + + Args []string + + BuildTags []string `mapstructure:"build-tags"` + ModulesDownloadMode string `mapstructure:"modules-download-mode"` + + ExitCodeIfIssuesFound int `mapstructure:"issues-exit-code"` + AnalyzeTests bool `mapstructure:"tests"` + + // Deprecated: Deadline exists for historical compatibility + // and should not be used. To set run timeout use Timeout instead. + Deadline time.Duration + Timeout time.Duration + + PrintVersion bool + SkipFiles []string `mapstructure:"skip-files"` + SkipDirs []string `mapstructure:"skip-dirs"` + UseDefaultSkipDirs bool `mapstructure:"skip-dirs-use-default"` + + AllowParallelRunners bool `mapstructure:"allow-parallel-runners"` + AllowSerialRunners bool `mapstructure:"allow-serial-runners"` +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/severity.go b/vendor/github.com/golangci/golangci-lint/pkg/config/severity.go new file mode 100644 index 000000000..3068a0ed6 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/severity.go @@ -0,0 +1,18 @@ +package config + +const severityRuleMinConditionsCount = 1 + +type Severity struct { + Default string `mapstructure:"default-severity"` + CaseSensitive bool `mapstructure:"case-sensitive"` + Rules []SeverityRule `mapstructure:"rules"` +} + +type SeverityRule struct { + BaseRule `mapstructure:",squash"` + Severity string +} + +func (s *SeverityRule) Validate() error { + return s.BaseRule.Validate(severityRuleMinConditionsCount) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go b/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go new file mode 100644 index 000000000..536f90361 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go @@ -0,0 +1,34 @@ +package exitcodes + +const ( + Success = 0 + IssuesFound = 1 + WarningInTest = 2 + Failure = 3 + Timeout = 4 + NoGoFiles = 5 + NoConfigFileDetected = 6 + ErrorWasLogged = 7 +) + +type ExitError struct { + Message string + Code int +} + +func (e ExitError) Error() string { + return e.Message +} + +var ( + ErrNoGoFiles = &ExitError{ + Message: "no go files to analyze", + Code: NoGoFiles, + } + ErrFailure = &ExitError{ + Message: "failed to analyze", + Code: Failure, + } +) + +// 1 diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go new file mode 100644 index 000000000..2b17a0398 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go @@ -0,0 +1,67 @@ +package fsutils + +import ( + "fmt" + "io/ioutil" + "sync" + + "github.com/pkg/errors" + + "github.com/golangci/golangci-lint/pkg/logutils" +) + +type FileCache struct { + files sync.Map +} + +func NewFileCache() *FileCache { + return &FileCache{} +} + +func (fc *FileCache) GetFileBytes(filePath string) ([]byte, error) { + cachedBytes, ok := fc.files.Load(filePath) + if ok { + return cachedBytes.([]byte), nil + } + + fileBytes, err := ioutil.ReadFile(filePath) + if err != nil { + return nil, errors.Wrapf(err, "can't read file %s", filePath) + } + + fc.files.Store(filePath, fileBytes) + return fileBytes, nil +} + +func PrettifyBytesCount(n int64) string { + const ( + Multiplexer = 1024 + KiB = 1 * Multiplexer + MiB = KiB * Multiplexer + GiB = MiB * Multiplexer + ) + + if n >= GiB { + return fmt.Sprintf("%.1fGiB", float64(n)/GiB) + } + if n >= MiB { + return fmt.Sprintf("%.1fMiB", float64(n)/MiB) + } + if n >= KiB { + return fmt.Sprintf("%.1fKiB", float64(n)/KiB) + } + return fmt.Sprintf("%dB", n) +} + +func (fc *FileCache) PrintStats(log logutils.Log) { + var size int64 + var mapLen int + fc.files.Range(func(_, fileBytes interface{}) bool { + mapLen++ + size += int64(len(fileBytes.([]byte))) + + return true + }) + + log.Infof("File cache stats: %d entries of total size %s", mapLen, PrettifyBytesCount(size)) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go new file mode 100644 index 000000000..a39c105e4 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go @@ -0,0 +1,100 @@ +package fsutils + +import ( + "fmt" + "os" + "path/filepath" + "sync" +) + +func IsDir(filename string) bool { + fi, err := os.Stat(filename) + return err == nil && fi.IsDir() +} + +var cachedWd string +var cachedWdError error +var getWdOnce sync.Once +var useCache = true + +func UseWdCache(use bool) { + useCache = use +} + +func Getwd() (string, error) { + if !useCache { // for tests + return os.Getwd() + } + + getWdOnce.Do(func() { + cachedWd, cachedWdError = os.Getwd() + if cachedWdError != nil { + return + } + + evaledWd, err := EvalSymlinks(cachedWd) + if err != nil { + cachedWd, cachedWdError = "", fmt.Errorf("can't eval symlinks on wd %s: %s", cachedWd, err) + return + } + + cachedWd = evaledWd + }) + + return cachedWd, cachedWdError +} + +var evalSymlinkCache sync.Map + +type evalSymlinkRes struct { + path string + err error +} + +func EvalSymlinks(path string) (string, error) { + r, ok := evalSymlinkCache.Load(path) + if ok { + er := r.(evalSymlinkRes) + return er.path, er.err + } + + var er evalSymlinkRes + er.path, er.err = filepath.EvalSymlinks(path) + evalSymlinkCache.Store(path, er) + + return er.path, er.err +} + +func ShortestRelPath(path, wd string) (string, error) { + if wd == "" { // get it if user don't have cached working dir + var err error + wd, err = Getwd() + if err != nil { + return "", fmt.Errorf("can't get working directory: %s", err) + } + } + + evaledPath, err := EvalSymlinks(path) + if err != nil { + return "", fmt.Errorf("can't eval symlinks for path %s: %s", path, err) + } + path = evaledPath + + // make path absolute and then relative to be able to fix this case: + // we are in /test dir, we want to normalize ../test, and have file file.go in this dir; + // it must have normalized path file.go, not ../test/file.go, + var absPath string + if filepath.IsAbs(path) { + absPath = path + } else { + absPath = filepath.Join(wd, path) + } + + relPath, err := filepath.Rel(wd, absPath) + if err != nil { + return "", fmt.Errorf("can't get relative path for path %s and root %s: %s", + absPath, wd, err) + } + + return relPath, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go new file mode 100644 index 000000000..ab408e7d5 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go @@ -0,0 +1,70 @@ +package fsutils + +import ( + "bytes" + "fmt" + "sync" + + "github.com/pkg/errors" +) + +type fileLinesCache [][]byte + +type LineCache struct { + files sync.Map + fileCache *FileCache +} + +func NewLineCache(fc *FileCache) *LineCache { + return &LineCache{ + fileCache: fc, + } +} + +// GetLine returns a index1-th (1-based index) line from the file on filePath +func (lc *LineCache) GetLine(filePath string, index1 int) (string, error) { + if index1 == 0 { // some linters, e.g. gosec can do it: it really means first line + index1 = 1 + } + + const index1To0Offset = -1 + rawLine, err := lc.getRawLine(filePath, index1+index1To0Offset) + if err != nil { + return "", err + } + + return string(bytes.Trim(rawLine, "\r")), nil +} + +func (lc *LineCache) getRawLine(filePath string, index0 int) ([]byte, error) { + fc, err := lc.getFileCache(filePath) + if err != nil { + return nil, errors.Wrapf(err, "failed to get file %s lines cache", filePath) + } + + if index0 < 0 { + return nil, fmt.Errorf("invalid file line index0 < 0: %d", index0) + } + + if index0 >= len(fc) { + return nil, fmt.Errorf("invalid file line index0 (%d) >= len(fc) (%d)", index0, len(fc)) + } + + return fc[index0], nil +} + +func (lc *LineCache) getFileCache(filePath string) (fileLinesCache, error) { + loadedFc, ok := lc.files.Load(filePath) + if ok { + return loadedFc.(fileLinesCache), nil + } + + fileBytes, err := lc.fileCache.GetFileBytes(filePath) + if err != nil { + return nil, errors.Wrapf(err, "can't get file %s bytes from cache", filePath) + } + + fc := bytes.Split(fileBytes, []byte("\n")) + lc.files.Store(filePath, fileLinesCache(fc)) + return fc, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go new file mode 100644 index 000000000..1bf8c7b7d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/tdakkota/asciicheck" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewAsciicheck() *goanalysis.Linter { + return goanalysis.NewLinter( + "asciicheck", + "Simple linter to check that your code does not contain non-ASCII identifiers", + []*analysis.Analyzer{ + asciicheck.NewAnalyzer(), + }, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go new file mode 100644 index 000000000..0e03813d1 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go @@ -0,0 +1,21 @@ +package golinters + +import ( + "github.com/timakin/bodyclose/passes/bodyclose" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewBodyclose() *goanalysis.Linter { + analyzers := []*analysis.Analyzer{ + bodyclose.Analyzer, + } + + return goanalysis.NewLinter( + "bodyclose", + "checks whether HTTP response body is closed successfully", + analyzers, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go new file mode 100644 index 000000000..6f55b2797 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go @@ -0,0 +1,39 @@ +package golinters + +import ( + "github.com/bkielbasa/cyclop/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +const cyclopName = "cyclop" + +func NewCyclop(settings *config.Cyclop) *goanalysis.Linter { + a := analyzer.NewAnalyzer() + + var cfg map[string]map[string]interface{} + if settings != nil { + d := map[string]interface{}{ + "skipTests": settings.SkipTests, + } + + if settings.MaxComplexity != 0 { + d["maxComplexity"] = settings.MaxComplexity + } + + if settings.PackageAverage != 0 { + d["packageAverage"] = settings.PackageAverage + } + + cfg = map[string]map[string]interface{}{a.Name: d} + } + + return goanalysis.NewLinter( + cyclopName, + "checks function and package cyclomatic complexity", + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go new file mode 100644 index 000000000..6ff38909f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go @@ -0,0 +1,52 @@ +package golinters + +import ( + "fmt" + "sync" + + deadcodeAPI "github.com/golangci/go-misc/deadcode" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewDeadcode() *goanalysis.Linter { + const linterName = "deadcode" + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (interface{}, error) { + prog := goanalysis.MakeFakeLoaderProgram(pass) + issues, err := deadcodeAPI.Run(prog) + if err != nil { + return nil, err + } + res := make([]goanalysis.Issue, 0, len(issues)) + for _, i := range issues { + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: fmt.Sprintf("%s is unused", formatCode(i.UnusedIdentName, nil)), + FromLinter: linterName, + }, pass)) + } + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + }, + } + return goanalysis.NewLinter( + linterName, + "Finds unused code", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go new file mode 100644 index 000000000..aa372e956 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go @@ -0,0 +1,112 @@ +package golinters + +import ( + "fmt" + "strings" + "sync" + + "github.com/OpenPeeDeeP/depguard" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/loader" //nolint:staticcheck // require changes in github.com/OpenPeeDeeP/depguard + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func setDepguardListType(dg *depguard.Depguard, lintCtx *linter.Context) error { + listType := lintCtx.Settings().Depguard.ListType + var found bool + dg.ListType, found = depguard.StringToListType[strings.ToLower(listType)] + if !found { + if listType != "" { + return fmt.Errorf("unsure what list type %s is", listType) + } + dg.ListType = depguard.LTBlacklist + } + + return nil +} + +func setupDepguardPackages(dg *depguard.Depguard, lintCtx *linter.Context) { + if dg.ListType == depguard.LTBlacklist { + // if the list type was a blacklist the packages with error messages should + // be included in the blacklist package list + + noMessagePackages := make(map[string]bool) + for _, pkg := range dg.Packages { + noMessagePackages[pkg] = true + } + + for pkg := range lintCtx.Settings().Depguard.PackagesWithErrorMessage { + if _, ok := noMessagePackages[pkg]; !ok { + dg.Packages = append(dg.Packages, pkg) + } + } + } +} + +func NewDepguard() *goanalysis.Linter { + const linterName = "depguard" + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + linterName, + "Go linter that checks if package imports are in a list of acceptable packages", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + dgSettings := &lintCtx.Settings().Depguard + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + prog := goanalysis.MakeFakeLoaderProgram(pass) + dg := &depguard.Depguard{ + Packages: dgSettings.Packages, + IncludeGoRoot: dgSettings.IncludeGoRoot, + } + if err := setDepguardListType(dg, lintCtx); err != nil { + return nil, err + } + setupDepguardPackages(dg, lintCtx) + + loadConfig := &loader.Config{ + Cwd: "", // fallbacked to os.Getcwd + Build: nil, // fallbacked to build.Default + } + issues, err := dg.Run(loadConfig, prog) + if err != nil { + return nil, err + } + if len(issues) == 0 { + return nil, nil + } + msgSuffix := "is in the blacklist" + if dg.ListType == depguard.LTWhitelist { + msgSuffix = "is not in the whitelist" + } + res := make([]goanalysis.Issue, 0, len(issues)) + for _, i := range issues { + userSuppliedMsgSuffix := dgSettings.PackagesWithErrorMessage[i.PackageName] + if userSuppliedMsgSuffix != "" { + userSuppliedMsgSuffix = ": " + userSuppliedMsgSuffix + } + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: i.Position, + Text: fmt.Sprintf("%s %s%s", formatCode(i.PackageName, lintCtx.Cfg), msgSuffix, userSuppliedMsgSuffix), + FromLinter: linterName, + }, pass)) + } + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go new file mode 100644 index 000000000..8978ff913 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go @@ -0,0 +1,97 @@ +package golinters + +import ( + "fmt" + "go/ast" + "go/token" + "sync" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const dogsledLinterName = "dogsled" + +func NewDogsled() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: dogsledLinterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + dogsledLinterName, + "Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var pkgIssues []goanalysis.Issue + for _, f := range pass.Files { + v := returnsVisitor{ + maxBlanks: lintCtx.Settings().Dogsled.MaxBlankIdentifiers, + f: pass.Fset, + } + ast.Walk(&v, f) + for i := range v.issues { + pkgIssues = append(pkgIssues, goanalysis.NewIssue(&v.issues[i], pass)) + } + } + + mu.Lock() + resIssues = append(resIssues, pkgIssues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +type returnsVisitor struct { + f *token.FileSet + maxBlanks int + issues []result.Issue +} + +func (v *returnsVisitor) Visit(node ast.Node) ast.Visitor { + funcDecl, ok := node.(*ast.FuncDecl) + if !ok { + return v + } + if funcDecl.Body == nil { + return v + } + + for _, expr := range funcDecl.Body.List { + assgnStmt, ok := expr.(*ast.AssignStmt) + if !ok { + continue + } + + numBlank := 0 + for _, left := range assgnStmt.Lhs { + ident, ok := left.(*ast.Ident) + if !ok { + continue + } + if ident.Name == "_" { + numBlank++ + } + } + + if numBlank > v.maxBlanks { + v.issues = append(v.issues, result.Issue{ + FromLinter: dogsledLinterName, + Text: fmt.Sprintf("declaration has %v blank identifiers", numBlank), + Pos: v.f.Position(assgnStmt.Pos()), + }) + } + } + return v +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go new file mode 100644 index 000000000..ed1c4fcbd --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go @@ -0,0 +1,83 @@ +package golinters + +import ( + "fmt" + "go/token" + "sync" + + duplAPI "github.com/golangci/dupl" + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const duplLinterName = "dupl" + +func NewDupl() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: duplLinterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + duplLinterName, + "Tool for code clone detection", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var fileNames []string + for _, f := range pass.Files { + pos := pass.Fset.PositionFor(f.Pos(), false) + fileNames = append(fileNames, pos.Filename) + } + + issues, err := duplAPI.Run(fileNames, lintCtx.Settings().Dupl.Threshold) + if err != nil { + return nil, err + } + + if len(issues) == 0 { + return nil, nil + } + + res := make([]goanalysis.Issue, 0, len(issues)) + for _, i := range issues { + toFilename, err := fsutils.ShortestRelPath(i.To.Filename(), "") + if err != nil { + return nil, errors.Wrapf(err, "failed to get shortest rel path for %q", i.To.Filename()) + } + dupl := fmt.Sprintf("%s:%d-%d", toFilename, i.To.LineStart(), i.To.LineEnd()) + text := fmt.Sprintf("%d-%d lines are duplicate of %s", + i.From.LineStart(), i.From.LineEnd(), + formatCode(dupl, lintCtx.Cfg)) + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.From.Filename(), + Line: i.From.LineStart(), + }, + LineRange: &result.Range{ + From: i.From.LineStart(), + To: i.From.LineEnd(), + }, + Text: text, + FromLinter: duplLinterName, + }, pass)) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go new file mode 100644 index 000000000..9c452af50 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go @@ -0,0 +1,15 @@ +package golinters + +import ( + "github.com/charithe/durationcheck" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewDurationCheck() *goanalysis.Linter { + a := durationcheck.Analyzer + + return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). + WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go new file mode 100644 index 000000000..2d9a4fc4c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go @@ -0,0 +1,251 @@ +package golinters + +import ( + "bufio" + "fmt" + "os" + "os/user" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/kisielk/errcheck/errcheck" + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewErrcheck() *goanalysis.Linter { + const linterName = "errcheck" + + var mu sync.Mutex + var res []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + + return goanalysis.NewLinter( + linterName, + "Errcheck is a program for checking for unchecked errors "+ + "in go programs. These unchecked errors can be critical bugs in some cases", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + // copied from errcheck + checker, err := getChecker(&lintCtx.Settings().Errcheck) + if err != nil { + lintCtx.Log.Errorf("failed to get checker: %v", err) + return + } + + checker.Tags = lintCtx.Cfg.Run.BuildTags + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + pkg := &packages.Package{ + Fset: pass.Fset, + Syntax: pass.Files, + Types: pass.Pkg, + TypesInfo: pass.TypesInfo, + } + + errcheckIssues := checker.CheckPackage(pkg).Unique() + if len(errcheckIssues.UncheckedErrors) == 0 { + return nil, nil + } + + issues := make([]goanalysis.Issue, len(errcheckIssues.UncheckedErrors)) + for i, err := range errcheckIssues.UncheckedErrors { + var text string + if err.FuncName != "" { + text = fmt.Sprintf( + "Error return value of %s is not checked", + formatCode(err.SelectorName, lintCtx.Cfg), + ) + } else { + text = "Error return value is not checked" + } + + issues[i] = goanalysis.NewIssue( + &result.Issue{ + FromLinter: linterName, + Text: text, + Pos: err.Pos, + }, + pass, + ) + } + + mu.Lock() + res = append(res, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return res + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +// parseIgnoreConfig was taken from errcheck in order to keep the API identical. +// https://github.com/kisielk/errcheck/blob/1787c4bee836470bf45018cfbc783650db3c6501/main.go#L25-L60 +func parseIgnoreConfig(s string) (map[string]*regexp.Regexp, error) { + if s == "" { + return nil, nil + } + + cfg := map[string]*regexp.Regexp{} + + for _, pair := range strings.Split(s, ",") { + colonIndex := strings.Index(pair, ":") + var pkg, re string + if colonIndex == -1 { + pkg = "" + re = pair + } else { + pkg = pair[:colonIndex] + re = pair[colonIndex+1:] + } + regex, err := regexp.Compile(re) + if err != nil { + return nil, err + } + cfg[pkg] = regex + } + + return cfg, nil +} + +func getChecker(errCfg *config.ErrcheckSettings) (*errcheck.Checker, error) { + ignoreConfig, err := parseIgnoreConfig(errCfg.Ignore) + if err != nil { + return nil, errors.Wrap(err, "failed to parse 'ignore' directive") + } + + checker := errcheck.Checker{ + Exclusions: errcheck.Exclusions{ + BlankAssignments: !errCfg.CheckAssignToBlank, + TypeAssertions: !errCfg.CheckTypeAssertions, + SymbolRegexpsByPackage: map[string]*regexp.Regexp{}, + Symbols: append([]string{}, errcheck.DefaultExcludedSymbols...), + }, + } + + for pkg, re := range ignoreConfig { + checker.Exclusions.SymbolRegexpsByPackage[pkg] = re + } + + if errCfg.Exclude != "" { + exclude, err := readExcludeFile(errCfg.Exclude) + if err != nil { + return nil, err + } + + checker.Exclusions.Symbols = append(checker.Exclusions.Symbols, exclude...) + } + + checker.Exclusions.Symbols = append(checker.Exclusions.Symbols, errCfg.ExcludeFunctions...) + + return &checker, nil +} + +func getFirstPathArg() string { + args := os.Args + + // skip all args ([golangci-lint, run/linters]) before files/dirs list + for len(args) != 0 { + if args[0] == "run" { + args = args[1:] + break + } + + args = args[1:] + } + + // find first file/dir arg + firstArg := "./..." + for _, arg := range args { + if !strings.HasPrefix(arg, "-") { + firstArg = arg + break + } + } + + return firstArg +} + +func setupConfigFileSearch(name string) []string { + if strings.HasPrefix(name, "~") { + if u, err := user.Current(); err == nil { + name = strings.Replace(name, "~", u.HomeDir, 1) + } + } + + if filepath.IsAbs(name) { + return []string{name} + } + + firstArg := getFirstPathArg() + + absStartPath, err := filepath.Abs(firstArg) + if err != nil { + absStartPath = filepath.Clean(firstArg) + } + + // start from it + var curDir string + if fsutils.IsDir(absStartPath) { + curDir = absStartPath + } else { + curDir = filepath.Dir(absStartPath) + } + + // find all dirs from it up to the root + configSearchPaths := []string{filepath.Join(".", name)} + for { + configSearchPaths = append(configSearchPaths, filepath.Join(curDir, name)) + newCurDir := filepath.Dir(curDir) + if curDir == newCurDir || newCurDir == "" { + break + } + curDir = newCurDir + } + + return configSearchPaths +} + +func readExcludeFile(name string) ([]string, error) { + var err error + var fh *os.File + + for _, path := range setupConfigFileSearch(name) { + if fh, err = os.Open(path); err == nil { + break + } + } + + if fh == nil { + return nil, errors.Wrapf(err, "failed reading exclude file: %s", name) + } + + scanner := bufio.NewScanner(fh) + + var excludes []string + for scanner.Scan() { + excludes = append(excludes, scanner.Text()) + } + + if err := scanner.Err(); err != nil { + return nil, errors.Wrapf(err, "failed scanning file: %s", name) + } + + return excludes, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errname.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errname.go new file mode 100644 index 000000000..7ee811347 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errname.go @@ -0,0 +1,21 @@ +package golinters + +import ( + "github.com/Antonboom/errname/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewErrName() *goanalysis.Linter { + analyzers := []*analysis.Analyzer{ + analyzer.New(), + } + + return goanalysis.NewLinter( + "errname", + "Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`.", + analyzers, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go new file mode 100644 index 000000000..dd9d90161 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go @@ -0,0 +1,31 @@ +package golinters + +import ( + "github.com/polyfloyd/go-errorlint/errorlint" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewErrorLint(cfg *config.ErrorLintSettings) *goanalysis.Linter { + a := errorlint.NewAnalyzer() + + cfgMap := map[string]map[string]interface{}{} + + if cfg != nil { + cfgMap[a.Name] = map[string]interface{}{ + "errorf": cfg.Errorf, + "asserts": cfg.Asserts, + "comparison": cfg.Comparison, + } + } + + return goanalysis.NewLinter( + a.Name, + "errorlint is a linter for that can be used to find code "+ + "that will cause problems with the error wrapping scheme introduced in Go 1.13.", + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go new file mode 100644 index 000000000..9acee6a80 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go @@ -0,0 +1,27 @@ +package golinters + +import ( + "github.com/nishanths/exhaustive" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewExhaustive(settings *config.ExhaustiveSettings) *goanalysis.Linter { + a := exhaustive.Analyzer + + var cfg map[string]map[string]interface{} + if settings != nil { + cfg = map[string]map[string]interface{}{ + a.Name: { + exhaustive.CheckGeneratedFlag: settings.CheckGenerated, + exhaustive.DefaultSignifiesExhaustiveFlag: settings.DefaultSignifiesExhaustive, + exhaustive.IgnorePatternFlag: settings.IgnorePattern, + }, + } + } + + return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, cfg). + WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustivestruct.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustivestruct.go new file mode 100644 index 000000000..6a1dbd71c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustivestruct.go @@ -0,0 +1,31 @@ +package golinters + +import ( + "strings" + + "github.com/mbilski/exhaustivestruct/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewExhaustiveStruct(settings *config.ExhaustiveStructSettings) *goanalysis.Linter { + a := analyzer.Analyzer + + var cfg map[string]map[string]interface{} + if settings != nil { + cfg = map[string]map[string]interface{}{ + a.Name: { + "struct_patterns": strings.Join(settings.StructPatterns, ","), + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exportloopref.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exportloopref.go new file mode 100644 index 000000000..1131c575b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exportloopref.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/kyoh86/exportloopref" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewExportLoopRef() *goanalysis.Linter { + a := exportloopref.Analyzer + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go new file mode 100644 index 000000000..2fa9d5183 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go @@ -0,0 +1,70 @@ +package golinters + +import ( + "sync" + + "github.com/ashanbrown/forbidigo/forbidigo" + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewForbidigo() *goanalysis.Linter { + const linterName = "forbidigo" + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + linterName, + "Forbids identifiers", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + s := &lintCtx.Settings().Forbidigo + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var res []goanalysis.Issue + options := []forbidigo.Option{ + forbidigo.OptionExcludeGodocExamples(s.ExcludeGodocExamples), + // disable "//permit" directives so only "//nolint" directives matters within golangci lint + forbidigo.OptionIgnorePermitDirectives(true), + } + forbid, err := forbidigo.NewLinter(s.Forbid, options...) + if err != nil { + return nil, errors.Wrapf(err, "failed to create linter %q", linterName) + } + + for _, file := range pass.Files { + hints, err := forbid.Run(pass.Fset, file) + if err != nil { + return nil, errors.Wrapf(err, "forbidigo linter failed on file %q", file.Name.String()) + } + for _, hint := range hints { + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: hint.Position(), + Text: hint.Details(), + FromLinter: linterName, + }, pass)) + } + } + + if len(res) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert.go new file mode 100644 index 000000000..873c833b5 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/gostaticanalysis/forcetypeassert" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewForceTypeAssert() *goanalysis.Linter { + a := forcetypeassert.Analyzer + + return goanalysis.NewLinter( + a.Name, + "finds forced type assertions", + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go new file mode 100644 index 000000000..29cb6b7ef --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go @@ -0,0 +1,64 @@ +package golinters + +import ( + "go/token" + "strings" + "sync" + + "github.com/ultraware/funlen" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const funlenLinterName = "funlen" + +func NewFunlen() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: funlenLinterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + funlenLinterName, + "Tool for detection of long functions", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var issues []funlen.Message + for _, file := range pass.Files { + fileIssues := funlen.Run(file, pass.Fset, lintCtx.Settings().Funlen.Lines, lintCtx.Settings().Funlen.Statements) + issues = append(issues, fileIssues...) + } + + if len(issues) == 0 { + return nil, nil + } + + res := make([]goanalysis.Issue, len(issues)) + for k, i := range issues { + res[k] = goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.Pos.Filename, + Line: i.Pos.Line, + }, + Text: strings.TrimRight(i.Message, "\n"), + FromLinter: funlenLinterName, + }, pass) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go new file mode 100644 index 000000000..49effb813 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go @@ -0,0 +1,98 @@ +package golinters + +import ( + "bytes" + "fmt" + "strings" + "sync" + + "github.com/daixiang0/gci/pkg/gci" + "github.com/pkg/errors" + "github.com/shazow/go-diff/difflib" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" +) + +const gciName = "gci" + +func NewGci() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + differ := difflib.New() + + analyzer := &analysis.Analyzer{ + Name: gciName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + gciName, + "Gci control golang package import order and make it always deterministic.", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + localFlag := lintCtx.Settings().Gci.LocalPrefixes + goimportsFlag := lintCtx.Settings().Goimports.LocalPrefixes + if localFlag == "" && goimportsFlag != "" { + localFlag = goimportsFlag + } + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var fileNames []string + for _, f := range pass.Files { + pos := pass.Fset.PositionFor(f.Pos(), false) + fileNames = append(fileNames, pos.Filename) + } + + var issues []goanalysis.Issue + + flagSet := gci.FlagSet{} + if localFlag != "" { + flagSet.LocalFlag = strings.Split(localFlag, ",") + } + + for _, f := range fileNames { + source, result, err := gci.Run(f, &flagSet) + if err != nil { + return nil, err + } + if result == nil { + continue + } + + diff := bytes.Buffer{} + _, err = diff.WriteString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) + if err != nil { + return nil, fmt.Errorf("can't write diff header: %v", err) + } + + err = differ.Diff(&diff, bytes.NewReader(source), bytes.NewReader(result)) + if err != nil { + return nil, fmt.Errorf("can't get gci diff output: %v", err) + } + + is, err := extractIssuesFromPatch(diff.String(), lintCtx.Log, lintCtx, gciName) + if err != nil { + return nil, errors.Wrapf(err, "can't extract issues from gci diff output %q", diff.String()) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/adapters.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/adapters.go new file mode 100644 index 000000000..b702d1660 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/adapters.go @@ -0,0 +1,36 @@ +package goanalysis + +import ( + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/loader" //nolint:staticcheck // it's an adapter for golang.org/x/tools/go/packages +) + +func MakeFakeLoaderProgram(pass *analysis.Pass) *loader.Program { + prog := &loader.Program{ + Fset: pass.Fset, + Created: []*loader.PackageInfo{ + { + Pkg: pass.Pkg, + Importable: true, // not used + TransitivelyErrorFree: true, // TODO + + Files: pass.Files, + Errors: nil, + Info: *pass.TypesInfo, + }, + }, + AllPackages: map[*types.Package]*loader.PackageInfo{ + pass.Pkg: { + Pkg: pass.Pkg, + Importable: true, + TransitivelyErrorFree: true, + Files: pass.Files, + Errors: nil, + Info: *pass.TypesInfo, + }, + }, + } + return prog +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go new file mode 100644 index 000000000..13b9ccf0a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go @@ -0,0 +1,72 @@ +package goanalysis + +import ( + "fmt" + + "github.com/pkg/errors" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/pkg/lint/linter" + libpackages "github.com/golangci/golangci-lint/pkg/packages" + "github.com/golangci/golangci-lint/pkg/result" +) + +type IllTypedError struct { + Pkg *packages.Package +} + +func (e *IllTypedError) Error() string { + return fmt.Sprintf("errors in package: %v", e.Pkg.Errors) +} + +func buildIssuesFromIllTypedError(errs []error, lintCtx *linter.Context) ([]result.Issue, error) { + var issues []result.Issue + uniqReportedIssues := map[string]bool{} + + var other error + + for _, err := range errs { + err := err + + var ill *IllTypedError + if !errors.As(err, &ill) { + if other == nil { + other = err + } + continue + } + + for _, err := range libpackages.ExtractErrors(ill.Pkg) { + i, perr := parseError(err) + if perr != nil { // failed to parse + if uniqReportedIssues[err.Msg] { + continue + } + uniqReportedIssues[err.Msg] = true + lintCtx.Log.Errorf("typechecking error: %s", err.Msg) + } else { + i.Pkg = ill.Pkg // to save to cache later + issues = append(issues, *i) + } + } + } + + if len(issues) == 0 && other != nil { + return nil, other + } + + return issues, nil +} + +func parseError(srcErr packages.Error) (*result.Issue, error) { + pos, err := libpackages.ParseErrorPosition(srcErr.Pos) + if err != nil { + return nil, err + } + + return &result.Issue{ + Pos: *pos, + Text: srcErr.Msg, + FromLinter: "typecheck", + }, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go new file mode 100644 index 000000000..f331a3ab9 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go @@ -0,0 +1,31 @@ +package goanalysis + +import ( + "go/token" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/result" +) + +type Issue struct { + result.Issue + Pass *analysis.Pass +} + +func NewIssue(i *result.Issue, pass *analysis.Pass) Issue { + return Issue{ + Issue: *i, + Pass: pass, + } +} + +type EncodingIssue struct { + FromLinter string + Text string + Pos token.Position + LineRange *result.Range + Replacement *result.Replacement + ExpectNoLint bool + ExpectedNoLintLinter string +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go new file mode 100644 index 000000000..ef49e4284 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go @@ -0,0 +1,213 @@ +package goanalysis + +import ( + "context" + "flag" + "fmt" + "strings" + + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const ( + TheOnlyAnalyzerName = "the_only_name" + TheOnlyanalyzerDoc = "the_only_doc" +) + +type LoadMode int + +func (loadMode LoadMode) String() string { + switch loadMode { + case LoadModeNone: + return "none" + case LoadModeSyntax: + return "syntax" + case LoadModeTypesInfo: + return "types info" + case LoadModeWholeProgram: + return "whole program" + } + panic(fmt.Sprintf("unknown load mode %d", loadMode)) +} + +const ( + LoadModeNone LoadMode = iota + LoadModeSyntax + LoadModeTypesInfo + LoadModeWholeProgram +) + +type Linter struct { + name, desc string + analyzers []*analysis.Analyzer + cfg map[string]map[string]interface{} + issuesReporter func(*linter.Context) []Issue + contextSetter func(*linter.Context) + loadMode LoadMode + needUseOriginalPackages bool +} + +func NewLinter(name, desc string, analyzers []*analysis.Analyzer, cfg map[string]map[string]interface{}) *Linter { + return &Linter{name: name, desc: desc, analyzers: analyzers, cfg: cfg} +} + +func (lnt *Linter) Run(_ context.Context, lintCtx *linter.Context) ([]result.Issue, error) { + if err := lnt.preRun(lintCtx); err != nil { + return nil, err + } + + return runAnalyzers(lnt, lintCtx) +} + +func (lnt *Linter) UseOriginalPackages() { + lnt.needUseOriginalPackages = true +} + +func (lnt *Linter) LoadMode() LoadMode { + return lnt.loadMode +} + +func (lnt *Linter) WithLoadMode(loadMode LoadMode) *Linter { + lnt.loadMode = loadMode + return lnt +} + +func (lnt *Linter) WithIssuesReporter(r func(*linter.Context) []Issue) *Linter { + lnt.issuesReporter = r + return lnt +} + +func (lnt *Linter) WithContextSetter(cs func(*linter.Context)) *Linter { + lnt.contextSetter = cs + return lnt +} + +func (lnt *Linter) Name() string { + return lnt.name +} + +func (lnt *Linter) Desc() string { + return lnt.desc +} + +func (lnt *Linter) allAnalyzerNames() []string { + var ret []string + for _, a := range lnt.analyzers { + ret = append(ret, a.Name) + } + return ret +} + +func (lnt *Linter) configureAnalyzer(a *analysis.Analyzer, cfg map[string]interface{}) error { + for k, v := range cfg { + f := a.Flags.Lookup(k) + if f == nil { + validFlagNames := allFlagNames(&a.Flags) + if len(validFlagNames) == 0 { + return fmt.Errorf("analyzer doesn't have settings") + } + + return fmt.Errorf("analyzer doesn't have setting %q, valid settings: %v", + k, validFlagNames) + } + + if err := f.Value.Set(valueToString(v)); err != nil { + return errors.Wrapf(err, "failed to set analyzer setting %q with value %v", k, v) + } + } + + return nil +} + +func (lnt *Linter) configure() error { + analyzersMap := map[string]*analysis.Analyzer{} + for _, a := range lnt.analyzers { + analyzersMap[a.Name] = a + } + + for analyzerName, analyzerSettings := range lnt.cfg { + a := analyzersMap[analyzerName] + if a == nil { + return fmt.Errorf("settings key %q must be valid analyzer name, valid analyzers: %v", + analyzerName, lnt.allAnalyzerNames()) + } + + if err := lnt.configureAnalyzer(a, analyzerSettings); err != nil { + return errors.Wrapf(err, "failed to configure analyzer %s", analyzerName) + } + } + + return nil +} + +func (lnt *Linter) preRun(lintCtx *linter.Context) error { + if err := analysis.Validate(lnt.analyzers); err != nil { + return errors.Wrap(err, "failed to validate analyzers") + } + + if err := lnt.configure(); err != nil { + return errors.Wrap(err, "failed to configure analyzers") + } + + if lnt.contextSetter != nil { + lnt.contextSetter(lintCtx) + } + + return nil +} + +func (lnt *Linter) getName() string { + return lnt.name +} + +func (lnt *Linter) getLinterNameForDiagnostic(*Diagnostic) string { + return lnt.name +} + +func (lnt *Linter) getAnalyzers() []*analysis.Analyzer { + return lnt.analyzers +} + +func (lnt *Linter) useOriginalPackages() bool { + return lnt.needUseOriginalPackages +} + +func (lnt *Linter) reportIssues(lintCtx *linter.Context) []Issue { + if lnt.issuesReporter != nil { + return lnt.issuesReporter(lintCtx) + } + return nil +} + +func (lnt *Linter) getLoadMode() LoadMode { + return lnt.loadMode +} + +func allFlagNames(fs *flag.FlagSet) []string { + var ret []string + fs.VisitAll(func(f *flag.Flag) { + ret = append(ret, f.Name) + }) + return ret +} + +func valueToString(v interface{}) string { + if ss, ok := v.([]string); ok { + return strings.Join(ss, ",") + } + + if is, ok := v.([]interface{}); ok { + var ss []string + for _, i := range is { + ss = append(ss, fmt.Sprint(i)) + } + + return valueToString(ss) + } + + return fmt.Sprint(v) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load/guard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load/guard.go new file mode 100644 index 000000000..ab7775cc8 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load/guard.go @@ -0,0 +1,30 @@ +package load + +import ( + "sync" + + "golang.org/x/tools/go/packages" +) + +type Guard struct { + loadMutexes map[*packages.Package]*sync.Mutex + mutex sync.Mutex +} + +func NewGuard() *Guard { + return &Guard{ + loadMutexes: map[*packages.Package]*sync.Mutex{}, + } +} + +func (g *Guard) AddMutexForPkg(pkg *packages.Package) { + g.loadMutexes[pkg] = &sync.Mutex{} +} + +func (g *Guard) MutexForPkg(pkg *packages.Package) *sync.Mutex { + return g.loadMutexes[pkg] +} + +func (g *Guard) Mutex() *sync.Mutex { + return &g.mutex +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go new file mode 100644 index 000000000..5c24d1096 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go @@ -0,0 +1,90 @@ +package goanalysis + +import ( + "context" + + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +type MetaLinter struct { + linters []*Linter + analyzerToLinterName map[*analysis.Analyzer]string +} + +func NewMetaLinter(linters []*Linter) *MetaLinter { + ml := &MetaLinter{linters: linters} + ml.analyzerToLinterName = ml.getAnalyzerToLinterNameMapping() + return ml +} + +func (ml MetaLinter) Run(_ context.Context, lintCtx *linter.Context) ([]result.Issue, error) { + for _, l := range ml.linters { + if err := l.preRun(lintCtx); err != nil { + return nil, errors.Wrapf(err, "failed to pre-run %s", l.Name()) + } + } + + return runAnalyzers(ml, lintCtx) +} + +func (ml MetaLinter) Name() string { + return "goanalysis_metalinter" +} + +func (ml MetaLinter) Desc() string { + return "" +} + +func (ml MetaLinter) getLoadMode() LoadMode { + loadMode := LoadModeNone + for _, l := range ml.linters { + if l.loadMode > loadMode { + loadMode = l.loadMode + } + } + return loadMode +} + +func (ml MetaLinter) getAnalyzers() []*analysis.Analyzer { + var allAnalyzers []*analysis.Analyzer + for _, l := range ml.linters { + allAnalyzers = append(allAnalyzers, l.analyzers...) + } + return allAnalyzers +} + +func (ml MetaLinter) getName() string { + return "metalinter" +} + +func (ml MetaLinter) useOriginalPackages() bool { + return false // `unused` can't be run by this metalinter +} + +func (ml MetaLinter) reportIssues(lintCtx *linter.Context) []Issue { + var ret []Issue + for _, lnt := range ml.linters { + if lnt.issuesReporter != nil { + ret = append(ret, lnt.issuesReporter(lintCtx)...) + } + } + return ret +} + +func (ml MetaLinter) getLinterNameForDiagnostic(diag *Diagnostic) string { + return ml.analyzerToLinterName[diag.Analyzer] +} + +func (ml MetaLinter) getAnalyzerToLinterNameMapping() map[*analysis.Analyzer]string { + analyzerToLinterName := map[*analysis.Analyzer]string{} + for _, l := range ml.linters { + for _, a := range l.analyzers { + analyzerToLinterName[a] = l.Name() + } + } + return analyzerToLinterName +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go new file mode 100644 index 000000000..8b460d16b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go @@ -0,0 +1,342 @@ +// checker is a partial copy of https://github.com/golang/tools/blob/master/go/analysis/internal/checker +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package goanalysis defines the implementation of the checker commands. +// The same code drives the multi-analysis driver, the single-analysis +// driver that is conventionally provided for convenience along with +// each analysis package, and the test driver. +package goanalysis + +import ( + "encoding/gob" + "go/token" + "runtime" + "sort" + "sync" + + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/internal/errorutil" + "github.com/golangci/golangci-lint/internal/pkgcache" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/timeutils" +) + +var ( + debugf = logutils.Debug("goanalysis") + + analyzeDebugf = logutils.Debug("goanalysis/analyze") + isMemoryDebug = logutils.HaveDebugTag("goanalysis/memory") + issuesCacheDebugf = logutils.Debug("goanalysis/issues/cache") + + factsDebugf = logutils.Debug("goanalysis/facts") + factsCacheDebugf = logutils.Debug("goanalysis/facts/cache") + factsInheritDebugf = logutils.Debug("goanalysis/facts/inherit") + factsExportDebugf = logutils.Debug("goanalysis/facts") + isFactsExportDebug = logutils.HaveDebugTag("goanalysis/facts/export") +) + +type Diagnostic struct { + analysis.Diagnostic + Analyzer *analysis.Analyzer + Position token.Position + Pkg *packages.Package +} + +type runner struct { + log logutils.Log + prefix string // ensure unique analyzer names + pkgCache *pkgcache.Cache + loadGuard *load.Guard + loadMode LoadMode + passToPkg map[*analysis.Pass]*packages.Package + passToPkgGuard sync.Mutex + sw *timeutils.Stopwatch +} + +func newRunner(prefix string, logger logutils.Log, pkgCache *pkgcache.Cache, loadGuard *load.Guard, + loadMode LoadMode, sw *timeutils.Stopwatch) *runner { + return &runner{ + prefix: prefix, + log: logger, + pkgCache: pkgCache, + loadGuard: loadGuard, + loadMode: loadMode, + passToPkg: map[*analysis.Pass]*packages.Package{}, + sw: sw, + } +} + +// Run loads the packages specified by args using go/packages, +// then applies the specified analyzers to them. +// Analysis flags must already have been set. +// It provides most of the logic for the main functions of both the +// singlechecker and the multi-analysis commands. +// It returns the appropriate exit code. +func (r *runner) run(analyzers []*analysis.Analyzer, initialPackages []*packages.Package) ([]Diagnostic, + []error, map[*analysis.Pass]*packages.Package) { + debugf("Analyzing %d packages on load mode %s", len(initialPackages), r.loadMode) + defer r.pkgCache.Trim() + + roots := r.analyze(initialPackages, analyzers) + + diags, errs := extractDiagnostics(roots) + + return diags, errs, r.passToPkg +} + +type actKey struct { + *analysis.Analyzer + *packages.Package +} + +func (r *runner) markAllActions(a *analysis.Analyzer, pkg *packages.Package, markedActions map[actKey]struct{}) { + k := actKey{a, pkg} + if _, ok := markedActions[k]; ok { + return + } + + for _, req := range a.Requires { + r.markAllActions(req, pkg, markedActions) + } + + if len(a.FactTypes) != 0 { + for path := range pkg.Imports { + r.markAllActions(a, pkg.Imports[path], markedActions) + } + } + + markedActions[k] = struct{}{} +} + +func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, + initialPkgs map[*packages.Package]bool, actions map[actKey]*action, actAlloc *actionAllocator) *action { + k := actKey{a, pkg} + act, ok := actions[k] + if ok { + return act + } + + act = actAlloc.alloc() + act.a = a + act.pkg = pkg + act.r = r + act.isInitialPkg = initialPkgs[pkg] + act.needAnalyzeSource = initialPkgs[pkg] + act.analysisDoneCh = make(chan struct{}) + + depsCount := len(a.Requires) + if len(a.FactTypes) > 0 { + depsCount += len(pkg.Imports) + } + act.deps = make([]*action, 0, depsCount) + + // Add a dependency on each required analyzers. + for _, req := range a.Requires { + act.deps = append(act.deps, r.makeAction(req, pkg, initialPkgs, actions, actAlloc)) + } + + r.buildActionFactDeps(act, a, pkg, initialPkgs, actions, actAlloc) + + actions[k] = act + + return act +} + +func (r *runner) buildActionFactDeps(act *action, a *analysis.Analyzer, pkg *packages.Package, + initialPkgs map[*packages.Package]bool, actions map[actKey]*action, actAlloc *actionAllocator) { + // An analysis that consumes/produces facts + // must run on the package's dependencies too. + if len(a.FactTypes) == 0 { + return + } + + act.objectFacts = make(map[objectFactKey]analysis.Fact) + act.packageFacts = make(map[packageFactKey]analysis.Fact) + + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // for determinism + for _, path := range paths { + dep := r.makeAction(a, pkg.Imports[path], initialPkgs, actions, actAlloc) + act.deps = append(act.deps, dep) + } + + // Need to register fact types for pkgcache proper gob encoding. + for _, f := range a.FactTypes { + gob.Register(f) + } +} + +//nolint:gocritic +func (r *runner) prepareAnalysis(pkgs []*packages.Package, + analyzers []*analysis.Analyzer) (map[*packages.Package]bool, []*action, []*action) { + // Construct the action graph. + + // Each graph node (action) is one unit of analysis. + // Edges express package-to-package (vertical) dependencies, + // and analysis-to-analysis (horizontal) dependencies. + + // This place is memory-intensive: e.g. Istio project has 120k total actions. + // Therefore optimize it carefully. + markedActions := make(map[actKey]struct{}, len(analyzers)*len(pkgs)) + for _, a := range analyzers { + for _, pkg := range pkgs { + r.markAllActions(a, pkg, markedActions) + } + } + totalActionsCount := len(markedActions) + + actions := make(map[actKey]*action, totalActionsCount) + actAlloc := newActionAllocator(totalActionsCount) + + initialPkgs := make(map[*packages.Package]bool, len(pkgs)) + for _, pkg := range pkgs { + initialPkgs[pkg] = true + } + + // Build nodes for initial packages. + roots := make([]*action, 0, len(pkgs)*len(analyzers)) + for _, a := range analyzers { + for _, pkg := range pkgs { + root := r.makeAction(a, pkg, initialPkgs, actions, actAlloc) + root.isroot = true + roots = append(roots, root) + } + } + + allActions := make([]*action, 0, len(actions)) + for _, act := range actions { + allActions = append(allActions, act) + } + + debugf("Built %d actions", len(actions)) + + return initialPkgs, allActions, roots +} + +func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action { + initialPkgs, actions, rootActions := r.prepareAnalysis(pkgs, analyzers) + + actionPerPkg := map[*packages.Package][]*action{} + for _, act := range actions { + actionPerPkg[act.pkg] = append(actionPerPkg[act.pkg], act) + } + + // Fill Imports field. + loadingPackages := map[*packages.Package]*loadingPackage{} + var dfs func(pkg *packages.Package) + dfs = func(pkg *packages.Package) { + if loadingPackages[pkg] != nil { + return + } + + imports := map[string]*loadingPackage{} + for impPath, imp := range pkg.Imports { + dfs(imp) + impLp := loadingPackages[imp] + impLp.dependents++ + imports[impPath] = impLp + } + + loadingPackages[pkg] = &loadingPackage{ + pkg: pkg, + imports: imports, + isInitial: initialPkgs[pkg], + log: r.log, + actions: actionPerPkg[pkg], + loadGuard: r.loadGuard, + dependents: 1, // self dependent + } + } + for _, act := range actions { + dfs(act.pkg) + } + + // Limit memory and IO usage. + gomaxprocs := runtime.GOMAXPROCS(-1) + debugf("Analyzing at most %d packages in parallel", gomaxprocs) + loadSem := make(chan struct{}, gomaxprocs) + + var wg sync.WaitGroup + debugf("There are %d initial and %d total packages", len(initialPkgs), len(loadingPackages)) + for _, lp := range loadingPackages { + if lp.isInitial { + wg.Add(1) + go func(lp *loadingPackage) { + lp.analyzeRecursive(r.loadMode, loadSem) + wg.Done() + }(lp) + } + } + wg.Wait() + + return rootActions +} + +//nolint:nakedret +func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []error) { + extracted := make(map[*action]bool) + var extract func(*action) + var visitAll func(actions []*action) + visitAll = func(actions []*action) { + for _, act := range actions { + if !extracted[act] { + extracted[act] = true + visitAll(act.deps) + extract(act) + } + } + } + + // De-duplicate diagnostics by position (not token.Pos) to + // avoid double-reporting in source files that belong to + // multiple packages, such as foo and foo.test. + type key struct { + token.Position + *analysis.Analyzer + message string + } + seen := make(map[key]bool) + + extract = func(act *action) { + if act.err != nil { + if pe, ok := act.err.(*errorutil.PanicError); ok { + panic(pe) + } + retErrors = append(retErrors, errors.Wrap(act.err, act.a.Name)) + return + } + + if act.isroot { + for _, diag := range act.diagnostics { + // We don't display a.Name/f.Category + // as most users don't care. + + posn := act.pkg.Fset.Position(diag.Pos) + k := key{posn, act.a, diag.Message} + if seen[k] { + continue // duplicate + } + seen[k] = true + + retDiag := Diagnostic{ + Diagnostic: diag, + Analyzer: act.a, + Position: posn, + Pkg: act.pkg, + } + retDiags = append(retDiags, retDiag) + } + } + } + visitAll(roots) + return +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go new file mode 100644 index 000000000..96c613e83 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go @@ -0,0 +1,381 @@ +package goanalysis + +import ( + "fmt" + "go/types" + "reflect" + "runtime/debug" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/go/types/objectpath" + + "github.com/golangci/golangci-lint/internal/errorutil" + "github.com/golangci/golangci-lint/internal/pkgcache" +) + +type actionAllocator struct { + allocatedActions []action + nextFreeIndex int +} + +func newActionAllocator(maxCount int) *actionAllocator { + return &actionAllocator{ + allocatedActions: make([]action, maxCount), + nextFreeIndex: 0, + } +} + +func (actAlloc *actionAllocator) alloc() *action { + if actAlloc.nextFreeIndex == len(actAlloc.allocatedActions) { + panic(fmt.Sprintf("Made too many allocations of actions: %d allowed", len(actAlloc.allocatedActions))) + } + act := &actAlloc.allocatedActions[actAlloc.nextFreeIndex] + actAlloc.nextFreeIndex++ + return act +} + +// An action represents one unit of analysis work: the application of +// one analysis to one package. Actions form a DAG, both within a +// package (as different analyzers are applied, either in sequence or +// parallel), and across packages (as dependencies are analyzed). +type action struct { + a *analysis.Analyzer + pkg *packages.Package + pass *analysis.Pass + deps []*action + objectFacts map[objectFactKey]analysis.Fact + packageFacts map[packageFactKey]analysis.Fact + result interface{} + diagnostics []analysis.Diagnostic + err error + r *runner + analysisDoneCh chan struct{} + loadCachedFactsDone bool + loadCachedFactsOk bool + isroot bool + isInitialPkg bool + needAnalyzeSource bool +} + +func (act *action) String() string { + return fmt.Sprintf("%s@%s", act.a, act.pkg) +} + +func (act *action) loadCachedFacts() bool { + if act.loadCachedFactsDone { // can't be set in parallel + return act.loadCachedFactsOk + } + + res := func() bool { + if act.isInitialPkg { + return true // load cached facts only for non-initial packages + } + + if len(act.a.FactTypes) == 0 { + return true // no need to load facts + } + + return act.loadPersistedFacts() + }() + act.loadCachedFactsDone = true + act.loadCachedFactsOk = res + return res +} + +func (act *action) waitUntilDependingAnalyzersWorked() { + for _, dep := range act.deps { + if dep.pkg == act.pkg { + <-dep.analysisDoneCh + } + } +} + +func (act *action) analyzeSafe() { + defer func() { + if p := recover(); p != nil { + act.err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s", + act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack()) + } + }() + act.r.sw.TrackStage(act.a.Name, func() { + act.analyze() + }) +} + +func (act *action) analyze() { + defer close(act.analysisDoneCh) // unblock actions depending on this action + + if !act.needAnalyzeSource { + return + } + + defer func(now time.Time) { + analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.r.prefix, act.a.Name, act.pkg.Name, time.Since(now)) + }(time.Now()) + + // Report an error if any dependency failures. + var depErrors *multierror.Error + for _, dep := range act.deps { + if dep.err == nil { + continue + } + + depErrors = multierror.Append(depErrors, errors.Cause(dep.err)) + } + if depErrors != nil { + depErrors.ErrorFormat = func(e []error) string { + return fmt.Sprintf("failed prerequisites: %v", e) + } + + act.err = depErrors + return + } + + // Plumb the output values of the dependencies + // into the inputs of this action. Also facts. + inputs := make(map[*analysis.Analyzer]interface{}) + startedAt := time.Now() + for _, dep := range act.deps { + if dep.pkg == act.pkg { + // Same package, different analysis (horizontal edge): + // in-memory outputs of prerequisite analyzers + // become inputs to this analysis pass. + inputs[dep.a] = dep.result + } else if dep.a == act.a { // (always true) + // Same analysis, different package (vertical edge): + // serialized facts produced by prerequisite analysis + // become available to this analysis pass. + inheritFacts(act, dep) + } + } + factsDebugf("%s: Inherited facts in %s", act, time.Since(startedAt)) + + // Run the analysis. + pass := &analysis.Pass{ + Analyzer: act.a, + Fset: act.pkg.Fset, + Files: act.pkg.Syntax, + OtherFiles: act.pkg.OtherFiles, + Pkg: act.pkg.Types, + TypesInfo: act.pkg.TypesInfo, + TypesSizes: act.pkg.TypesSizes, + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, + ImportObjectFact: act.importObjectFact, + ExportObjectFact: act.exportObjectFact, + ImportPackageFact: act.importPackageFact, + ExportPackageFact: act.exportPackageFact, + AllObjectFacts: act.allObjectFacts, + AllPackageFacts: act.allPackageFacts, + } + act.pass = pass + act.r.passToPkgGuard.Lock() + act.r.passToPkg[pass] = act.pkg + act.r.passToPkgGuard.Unlock() + + if act.pkg.IllTyped { + // It looks like there should be !pass.Analyzer.RunDespiteErrors + // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here + // but it exit before it if packages.Load have failed. + act.err = errors.Wrap(&IllTypedError{Pkg: act.pkg}, "analysis skipped") + } else { + startedAt = time.Now() + act.result, act.err = pass.Analyzer.Run(pass) + analyzedIn := time.Since(startedAt) + if analyzedIn > time.Millisecond*10 { + debugf("%s: run analyzer in %s", act, analyzedIn) + } + } + + // disallow calls after Run + pass.ExportObjectFact = nil + pass.ExportPackageFact = nil + + if err := act.persistFactsToCache(); err != nil { + act.r.log.Warnf("Failed to persist facts to cache: %s", err) + } +} + +// importObjectFact implements Pass.ImportObjectFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// importObjectFact copies the fact value to *ptr. +func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := objectFactKey{obj, act.factType(ptr)} + if v, ok := act.objectFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportObjectFact implements Pass.ExportObjectFact. +func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { + if obj.Pkg() != act.pkg.Types { + act.r.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.a, act.pkg, obj, fact) + } + + key := objectFactKey{obj, act.factType(fact)} + act.objectFacts[key] = fact // clobber any existing entry + if isFactsExportDebug { + objstr := types.ObjectString(obj, (*types.Package).Name) + factsExportDebugf("%s: object %s has fact %s\n", + act.pkg.Fset.Position(obj.Pos()), objstr, fact) + } +} + +func (act *action) allObjectFacts() []analysis.ObjectFact { + out := make([]analysis.ObjectFact, 0, len(act.objectFacts)) + for key, fact := range act.objectFacts { + out = append(out, analysis.ObjectFact{ + Object: key.obj, + Fact: fact, + }) + } + return out +} + +// importPackageFact implements Pass.ImportPackageFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// fact copies the fact value to *ptr. +func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := packageFactKey{pkg, act.factType(ptr)} + if v, ok := act.packageFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportPackageFact implements Pass.ExportPackageFact. +func (act *action) exportPackageFact(fact analysis.Fact) { + key := packageFactKey{act.pass.Pkg, act.factType(fact)} + act.packageFacts[key] = fact // clobber any existing entry + factsDebugf("%s: package %s has fact %s\n", + act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) +} + +func (act *action) allPackageFacts() []analysis.PackageFact { + out := make([]analysis.PackageFact, 0, len(act.packageFacts)) + for key, fact := range act.packageFacts { + out = append(out, analysis.PackageFact{ + Package: key.pkg, + Fact: fact, + }) + } + return out +} + +func (act *action) factType(fact analysis.Fact) reflect.Type { + t := reflect.TypeOf(fact) + if t.Kind() != reflect.Ptr { + act.r.log.Fatalf("invalid Fact type: got %T, want pointer", t) + } + return t +} + +func (act *action) persistFactsToCache() error { + analyzer := act.a + if len(analyzer.FactTypes) == 0 { + return nil + } + + // Merge new facts into the package and persist them. + var facts []Fact + for key, fact := range act.packageFacts { + if key.pkg != act.pkg.Types { + // The fact is from inherited facts from another package + continue + } + facts = append(facts, Fact{ + Path: "", + Fact: fact, + }) + } + for key, fact := range act.objectFacts { + obj := key.obj + if obj.Pkg() != act.pkg.Types { + // The fact is from inherited facts from another package + continue + } + + path, err := objectpath.For(obj) + if err != nil { + // The object is not globally addressable + continue + } + + facts = append(facts, Fact{ + Path: string(path), + Fact: fact, + }) + } + + factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) + + key := fmt.Sprintf("%s/facts", analyzer.Name) + return act.r.pkgCache.Put(act.pkg, pkgcache.HashModeNeedAllDeps, key, facts) +} + +func (act *action) loadPersistedFacts() bool { + var facts []Fact + key := fmt.Sprintf("%s/facts", act.a.Name) + if err := act.r.pkgCache.Get(act.pkg, pkgcache.HashModeNeedAllDeps, key, &facts); err != nil { + if err != pkgcache.ErrMissing { + act.r.log.Warnf("Failed to get persisted facts: %s", err) + } + + factsCacheDebugf("No cached facts for package %q and analyzer %s", act.pkg.Name, act.a.Name) + return false + } + + factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) + + for _, f := range facts { + if f.Path == "" { // this is a package fact + key := packageFactKey{act.pkg.Types, act.factType(f.Fact)} + act.packageFacts[key] = f.Fact + continue + } + obj, err := objectpath.Object(act.pkg.Types, objectpath.Path(f.Path)) + if err != nil { + // Be lenient about these errors. For example, when + // analyzing io/ioutil from source, we may get a fact + // for methods on the devNull type, and objectpath + // will happily create a path for them. However, when + // we later load io/ioutil from export data, the path + // no longer resolves. + // + // If an exported type embeds the unexported type, + // then (part of) the unexported type will become part + // of the type information and our path will resolve + // again. + continue + } + factKey := objectFactKey{obj, act.factType(f.Fact)} + act.objectFacts[factKey] = f.Fact + } + + return true +} + +func (act *action) markDepsForAnalyzingSource() { + // Horizontal deps (analyzer.Requires) must be loaded from source and analyzed before analyzing + // this action. + for _, dep := range act.deps { + if dep.pkg == act.pkg { + // Analyze source only for horizontal dependencies, e.g. from "buildssa". + dep.needAnalyzeSource = true // can't be set in parallel + } + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_facts.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_facts.go new file mode 100644 index 000000000..1d0fb974e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_facts.go @@ -0,0 +1,125 @@ +package goanalysis + +import ( + "bytes" + "encoding/gob" + "fmt" + "go/types" + "reflect" + + "golang.org/x/tools/go/analysis" +) + +type objectFactKey struct { + obj types.Object + typ reflect.Type +} + +type packageFactKey struct { + pkg *types.Package + typ reflect.Type +} + +type Fact struct { + Path string // non-empty only for object facts + Fact analysis.Fact +} + +// inheritFacts populates act.facts with +// those it obtains from its dependency, dep. +func inheritFacts(act, dep *action) { + serialize := false + + for key, fact := range dep.objectFacts { + // Filter out facts related to objects + // that are irrelevant downstream + // (equivalently: not in the compiler export data). + if !exportedFrom(key.obj, dep.pkg.Types) { + factsInheritDebugf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) + continue + } + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces. + if serialize { + var err error + fact, err = codeFact(fact) + if err != nil { + act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + } + + factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact) + act.objectFacts[key] = fact + } + + for key, fact := range dep.packageFacts { + // TODO: filter out facts that belong to + // packages not mentioned in the export data + // to prevent side channels. + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces + // and is deterministic. + if serialize { + var err error + fact, err = codeFact(fact) + if err != nil { + act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + } + + factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact) + act.packageFacts[key] = fact + } +} + +// codeFact encodes then decodes a fact, +// just to exercise that logic. +func codeFact(fact analysis.Fact) (analysis.Fact, error) { + // We encode facts one at a time. + // A real modular driver would emit all facts + // into one encoder to improve gob efficiency. + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(fact); err != nil { + return nil, err + } + + // Encode it twice and assert that we get the same bits. + // This helps detect nondeterministic Gob encoding (e.g. of maps). + var buf2 bytes.Buffer + if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { + return nil, err + } + if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { + return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) + } + + newFact := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) + if err := gob.NewDecoder(&buf).Decode(newFact); err != nil { + return nil, err + } + return newFact, nil +} + +// exportedFrom reports whether obj may be visible to a package that imports pkg. +// This includes not just the exported members of pkg, but also unexported +// constants, types, fields, and methods, perhaps belonging to other packages, +// that find there way into the API. +// This is an over-approximation of the more accurate approach used by +// gc export data, which walks the type graph, but it's much simpler. +// +// TODO(adonovan): do more accurate filtering by walking the type graph. +func exportedFrom(obj types.Object, pkg *types.Package) bool { + switch obj := obj.(type) { + case *types.Func: + return obj.Exported() && obj.Pkg() == pkg || + obj.Type().(*types.Signature).Recv() != nil + case *types.Var: + return obj.Exported() && obj.Pkg() == pkg || + obj.IsField() + case *types.TypeName, *types.Const: + return true + } + return false // Nil, Builtin, Label, or PkgName +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go new file mode 100644 index 000000000..9fa396854 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go @@ -0,0 +1,497 @@ +package goanalysis + +import ( + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/types" + "os" + "reflect" + "sync" + "sync/atomic" + + "github.com/pkg/errors" + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +const unsafePkgName = "unsafe" + +type loadingPackage struct { + pkg *packages.Package + imports map[string]*loadingPackage + isInitial bool + log logutils.Log + actions []*action // all actions with this package + loadGuard *load.Guard + dependents int32 // number of depending on it packages + analyzeOnce sync.Once + decUseMutex sync.Mutex +} + +func (lp *loadingPackage) analyzeRecursive(loadMode LoadMode, loadSem chan struct{}) { + lp.analyzeOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + wg.Add(len(lp.imports)) + for _, imp := range lp.imports { + go func(imp *loadingPackage) { + imp.analyzeRecursive(loadMode, loadSem) + wg.Done() + }(imp) + } + wg.Wait() + lp.analyze(loadMode, loadSem) + }) +} + +func (lp *loadingPackage) analyze(loadMode LoadMode, loadSem chan struct{}) { + loadSem <- struct{}{} + defer func() { + <-loadSem + }() + + // Save memory on unused more fields. + defer lp.decUse(loadMode < LoadModeWholeProgram) + + if err := lp.loadWithFacts(loadMode); err != nil { + werr := errors.Wrapf(err, "failed to load package %s", lp.pkg.Name) + // Don't need to write error to errCh, it will be extracted and reported on another layer. + // Unblock depending actions and propagate error. + for _, act := range lp.actions { + close(act.analysisDoneCh) + act.err = werr + } + return + } + + var actsWg sync.WaitGroup + actsWg.Add(len(lp.actions)) + for _, act := range lp.actions { + go func(act *action) { + defer actsWg.Done() + + act.waitUntilDependingAnalyzersWorked() + + act.analyzeSafe() + }(act) + } + actsWg.Wait() +} + +func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error { + pkg := lp.pkg + + // Many packages have few files, much fewer than there + // are CPU cores. Additionally, parsing each individual file is + // very fast. A naive parallel implementation of this loop won't + // be faster, and tends to be slower due to extra scheduling, + // bookkeeping and potentially false sharing of cache lines. + pkg.Syntax = make([]*ast.File, 0, len(pkg.CompiledGoFiles)) + for _, file := range pkg.CompiledGoFiles { + f, err := parser.ParseFile(pkg.Fset, file, nil, parser.ParseComments) + if err != nil { + pkg.Errors = append(pkg.Errors, lp.convertError(err)...) + continue + } + pkg.Syntax = append(pkg.Syntax, f) + } + if len(pkg.Errors) != 0 { + pkg.IllTyped = true + return nil + } + + if loadMode == LoadModeSyntax { + return nil + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) + + pkg.IllTyped = true + + pkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + + importer := func(path string) (*types.Package, error) { + if path == unsafePkgName { + return types.Unsafe, nil + } + if path == "C" { + // go/packages doesn't tell us that cgo preprocessing + // failed. When we subsequently try to parse the package, + // we'll encounter the raw C import. + return nil, errors.New("cgo preprocessing failed") + } + imp := pkg.Imports[path] + if imp == nil { + return nil, nil + } + if len(imp.Errors) > 0 { + return nil, imp.Errors[0] + } + return imp.Types, nil + } + tc := &types.Config{ + Importer: importerFunc(importer), + Error: func(err error) { + pkg.Errors = append(pkg.Errors, lp.convertError(err)...) + }, + } + _ = types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax) + // Don't handle error here: errors are adding by tc.Error function. + + illTyped := len(pkg.Errors) != 0 + if !illTyped { + for _, imp := range lp.imports { + if imp.pkg.IllTyped { + illTyped = true + break + } + } + } + pkg.IllTyped = illTyped + return nil +} + +func (lp *loadingPackage) loadFromExportData() error { + pkg := lp.pkg + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) + + pkg.IllTyped = true + for path, pkg := range pkg.Imports { + if pkg.Types == nil { + return fmt.Errorf("dependency %q hasn't been loaded yet", path) + } + } + if pkg.ExportFile == "" { + return fmt.Errorf("no export data for %q", pkg.ID) + } + f, err := os.Open(pkg.ExportFile) + if err != nil { + return err + } + defer f.Close() + + r, err := gcexportdata.NewReader(f) + if err != nil { + return err + } + + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*packages.Package]bool) // all visited packages + var visit func(pkgs map[string]*packages.Package) + visit = func(pkgs map[string]*packages.Package) { + for _, pkg := range pkgs { + if !seen[pkg] { + seen[pkg] = true + view[pkg.PkgPath] = pkg.Types + visit(pkg.Imports) + } + } + } + visit(pkg.Imports) + tpkg, err := gcexportdata.Read(r, pkg.Fset, view, pkg.PkgPath) + if err != nil { + return err + } + pkg.Types = tpkg + pkg.IllTyped = false + return nil +} + +func (lp *loadingPackage) loadWithFacts(loadMode LoadMode) error { + pkg := lp.pkg + + if pkg.PkgPath == unsafePkgName { + // Fill in the blanks to avoid surprises. + pkg.Syntax = []*ast.File{} + if loadMode >= LoadModeTypesInfo { + pkg.Types = types.Unsafe + pkg.TypesInfo = new(types.Info) + } + return nil + } + + if pkg.TypesInfo != nil { + // Already loaded package, e.g. because another not go/analysis linter required types for deps. + // Try load cached facts for it. + + for _, act := range lp.actions { + if !act.loadCachedFacts() { + // Cached facts loading failed: analyze later the action from source. + act.needAnalyzeSource = true + factsCacheDebugf("Loading of facts for already loaded %s failed, analyze it from source later", act) + act.markDepsForAnalyzingSource() + } + } + return nil + } + + if lp.isInitial { + // No need to load cached facts: the package will be analyzed from source + // because it's the initial. + return lp.loadFromSource(loadMode) + } + + return lp.loadImportedPackageWithFacts(loadMode) +} + +func (lp *loadingPackage) loadImportedPackageWithFacts(loadMode LoadMode) error { + pkg := lp.pkg + + // Load package from export data + if loadMode >= LoadModeTypesInfo { + if err := lp.loadFromExportData(); err != nil { + // We asked Go to give us up to date export data, yet + // we can't load it. There must be something wrong. + // + // Attempt loading from source. This should fail (because + // otherwise there would be export data); we just want to + // get the compile errors. If loading from source succeeds + // we discard the result, anyway. Otherwise we'll fail + // when trying to reload from export data later. + + // Otherwise it panics because uses already existing (from exported data) types. + pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) + if srcErr := lp.loadFromSource(loadMode); srcErr != nil { + return srcErr + } + // Make sure this package can't be imported successfully + pkg.Errors = append(pkg.Errors, packages.Error{ + Pos: "-", + Msg: fmt.Sprintf("could not load export data: %s", err), + Kind: packages.ParseError, + }) + return errors.Wrap(err, "could not load export data") + } + } + + needLoadFromSource := false + for _, act := range lp.actions { + if act.loadCachedFacts() { + continue + } + + // Cached facts loading failed: analyze later the action from source. + factsCacheDebugf("Loading of facts for %s failed, analyze it from source later", act) + act.needAnalyzeSource = true // can't be set in parallel + needLoadFromSource = true + + act.markDepsForAnalyzingSource() + } + + if needLoadFromSource { + // Cached facts loading failed: analyze later the action from source. To perform + // the analysis we need to load the package from source code. + + // Otherwise it panics because uses already existing (from exported data) types. + if loadMode >= LoadModeTypesInfo { + pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) + } + return lp.loadFromSource(loadMode) + } + + return nil +} + +func (lp *loadingPackage) decUse(canClearTypes bool) { + lp.decUseMutex.Lock() + defer lp.decUseMutex.Unlock() + + for _, act := range lp.actions { + pass := act.pass + if pass == nil { + continue + } + + pass.Files = nil + pass.TypesInfo = nil + pass.TypesSizes = nil + pass.ResultOf = nil + pass.Pkg = nil + pass.OtherFiles = nil + pass.AllObjectFacts = nil + pass.AllPackageFacts = nil + pass.ImportObjectFact = nil + pass.ExportObjectFact = nil + pass.ImportPackageFact = nil + pass.ExportPackageFact = nil + act.pass = nil + act.deps = nil + if act.result != nil { + if isMemoryDebug { + debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.result)) + } + act.result = nil + } + } + + lp.pkg.Syntax = nil + lp.pkg.TypesInfo = nil + lp.pkg.TypesSizes = nil + + // Can't set lp.pkg.Imports to nil because of loadFromExportData.visit. + + dependents := atomic.AddInt32(&lp.dependents, -1) + if dependents != 0 { + return + } + + if canClearTypes { + // canClearTypes is set to true if we can discard type + // information after the package and its dependents have been + // processed. This is the case when no whole program checkers (unused) are + // being run. + lp.pkg.Types = nil + } + lp.pkg = nil + + for _, imp := range lp.imports { + imp.decUse(canClearTypes) + } + lp.imports = nil + + for _, act := range lp.actions { + if !lp.isInitial { + act.pkg = nil + } + act.packageFacts = nil + act.objectFacts = nil + } + lp.actions = nil +} + +func (lp *loadingPackage) convertError(err error) []packages.Error { + var errs []packages.Error + // taken from go/packages + switch err := err.(type) { + case packages.Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, packages.Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: packages.ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, packages.Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: packages.ParseError, + }) + } + + case types.Error: + // from type checker + errs = append(errs, packages.Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: packages.TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, packages.Error{ + Pos: "-", + Msg: err.Error(), + Kind: packages.UnknownError, + }) + + // If you see this error message, please file a bug. + lp.log.Warnf("Internal error: error %q (%T) without position", err, err) + } + return errs +} + +func (lp *loadingPackage) String() string { + return fmt.Sprintf("%s@%s", lp.pkg.PkgPath, lp.pkg.Name) +} + +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +func sizeOfValueTreeBytes(v interface{}) int { + return sizeOfReflectValueTreeBytes(reflect.ValueOf(v), map[uintptr]struct{}{}) +} + +func sizeOfReflectValueTreeBytes(rv reflect.Value, visitedPtrs map[uintptr]struct{}) int { + switch rv.Kind() { + case reflect.Ptr: + ptrSize := int(rv.Type().Size()) + if rv.IsNil() { + return ptrSize + } + ptr := rv.Pointer() + if _, ok := visitedPtrs[ptr]; ok { + return 0 + } + visitedPtrs[ptr] = struct{}{} + return ptrSize + sizeOfReflectValueTreeBytes(rv.Elem(), visitedPtrs) + case reflect.Interface: + if rv.IsNil() { + return 0 + } + return sizeOfReflectValueTreeBytes(rv.Elem(), visitedPtrs) + case reflect.Struct: + ret := 0 + for i := 0; i < rv.NumField(); i++ { + ret += sizeOfReflectValueTreeBytes(rv.Field(i), visitedPtrs) + } + return ret + case reflect.Slice, reflect.Array, reflect.Chan: + return int(rv.Type().Size()) + rv.Cap()*int(rv.Type().Elem().Size()) + case reflect.Map: + ret := 0 + for _, key := range rv.MapKeys() { + mv := rv.MapIndex(key) + ret += sizeOfReflectValueTreeBytes(key, visitedPtrs) + ret += sizeOfReflectValueTreeBytes(mv, visitedPtrs) + } + return ret + case reflect.String: + return rv.Len() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Uintptr, reflect.Bool, reflect.Float32, reflect.Float64, + reflect.Complex64, reflect.Complex128, reflect.Func, reflect.UnsafePointer: + return int(rv.Type().Size()) + case reflect.Invalid: + return 0 + default: + panic("unknown rv of type " + fmt.Sprint(rv)) + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go new file mode 100644 index 000000000..7e4cf902e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go @@ -0,0 +1,269 @@ +package goanalysis + +import ( + "fmt" + "runtime" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/internal/pkgcache" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" + "github.com/golangci/golangci-lint/pkg/timeutils" +) + +type runAnalyzersConfig interface { + getName() string + getLinterNameForDiagnostic(*Diagnostic) string + getAnalyzers() []*analysis.Analyzer + useOriginalPackages() bool + reportIssues(*linter.Context) []Issue + getLoadMode() LoadMode +} + +func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Issue, error) { + log := lintCtx.Log.Child("goanalysis") + sw := timeutils.NewStopwatch("analyzers", log) + + const stagesToPrint = 10 + defer sw.PrintTopStages(stagesToPrint) + + runner := newRunner(cfg.getName(), log, lintCtx.PkgCache, lintCtx.LoadGuard, cfg.getLoadMode(), sw) + + pkgs := lintCtx.Packages + if cfg.useOriginalPackages() { + pkgs = lintCtx.OriginalPackages + } + + issues, pkgsFromCache := loadIssuesFromCache(pkgs, lintCtx, cfg.getAnalyzers()) + var pkgsToAnalyze []*packages.Package + for _, pkg := range pkgs { + if !pkgsFromCache[pkg] { + pkgsToAnalyze = append(pkgsToAnalyze, pkg) + } + } + + diags, errs, passToPkg := runner.run(cfg.getAnalyzers(), pkgsToAnalyze) + + defer func() { + if len(errs) == 0 { + // If we try to save to cache even if we have compilation errors + // we won't see them on repeated runs. + saveIssuesToCache(pkgs, pkgsFromCache, issues, lintCtx, cfg.getAnalyzers()) + } + }() + + buildAllIssues := func() []result.Issue { + var retIssues []result.Issue + reportedIssues := cfg.reportIssues(lintCtx) + for i := range reportedIssues { + issue := &reportedIssues[i].Issue + if issue.Pkg == nil { + issue.Pkg = passToPkg[reportedIssues[i].Pass] + } + retIssues = append(retIssues, *issue) + } + retIssues = append(retIssues, buildIssues(diags, cfg.getLinterNameForDiagnostic)...) + return retIssues + } + + errIssues, err := buildIssuesFromIllTypedError(errs, lintCtx) + if err != nil { + return nil, err + } + + issues = append(issues, errIssues...) + issues = append(issues, buildAllIssues()...) + + return issues, nil +} + +func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) string) []result.Issue { + var issues []result.Issue + for i := range diags { + diag := &diags[i] + linterName := linterNameBuilder(diag) + + var text string + if diag.Analyzer.Name == linterName { + text = diag.Message + } else { + text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message) + } + + issues = append(issues, result.Issue{ + FromLinter: linterName, + Text: text, + Pos: diag.Position, + Pkg: diag.Pkg, + }) + + if len(diag.Related) > 0 { + for _, info := range diag.Related { + issues = append(issues, result.Issue{ + FromLinter: linterName, + Text: fmt.Sprintf("%s(related information): %s", diag.Analyzer.Name, info.Message), + Pos: diag.Pkg.Fset.Position(info.Pos), + Pkg: diag.Pkg, + }) + } + } + } + return issues +} + +func getIssuesCacheKey(analyzers []*analysis.Analyzer) string { + return "lint/result:" + analyzersHashID(analyzers) +} + +func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages.Package]bool, + issues []result.Issue, lintCtx *linter.Context, analyzers []*analysis.Analyzer) { + startedAt := time.Now() + perPkgIssues := map[*packages.Package][]result.Issue{} + for ind := range issues { + i := &issues[ind] + perPkgIssues[i.Pkg] = append(perPkgIssues[i.Pkg], *i) + } + + savedIssuesCount := int32(0) + lintResKey := getIssuesCacheKey(analyzers) + + workerCount := runtime.GOMAXPROCS(-1) + var wg sync.WaitGroup + wg.Add(workerCount) + + pkgCh := make(chan *packages.Package, len(allPkgs)) + for i := 0; i < workerCount; i++ { + go func() { + defer wg.Done() + for pkg := range pkgCh { + pkgIssues := perPkgIssues[pkg] + encodedIssues := make([]EncodingIssue, 0, len(pkgIssues)) + for ind := range pkgIssues { + i := &pkgIssues[ind] + encodedIssues = append(encodedIssues, EncodingIssue{ + FromLinter: i.FromLinter, + Text: i.Text, + Pos: i.Pos, + LineRange: i.LineRange, + Replacement: i.Replacement, + ExpectNoLint: i.ExpectNoLint, + ExpectedNoLintLinter: i.ExpectedNoLintLinter, + }) + } + + atomic.AddInt32(&savedIssuesCount, int32(len(encodedIssues))) + if err := lintCtx.PkgCache.Put(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, encodedIssues); err != nil { + lintCtx.Log.Infof("Failed to save package %s issues (%d) to cache: %s", pkg, len(pkgIssues), err) + } else { + issuesCacheDebugf("Saved package %s issues (%d) to cache", pkg, len(pkgIssues)) + } + } + }() + } + + for _, pkg := range allPkgs { + if pkgsFromCache[pkg] { + continue + } + + pkgCh <- pkg + } + close(pkgCh) + wg.Wait() + + issuesCacheDebugf("Saved %d issues from %d packages to cache in %s", savedIssuesCount, len(allPkgs), time.Since(startedAt)) +} + +//nolint:gocritic +func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, + analyzers []*analysis.Analyzer) ([]result.Issue, map[*packages.Package]bool) { + startedAt := time.Now() + + lintResKey := getIssuesCacheKey(analyzers) + type cacheRes struct { + issues []result.Issue + loadErr error + } + pkgToCacheRes := make(map[*packages.Package]*cacheRes, len(pkgs)) + for _, pkg := range pkgs { + pkgToCacheRes[pkg] = &cacheRes{} + } + + workerCount := runtime.GOMAXPROCS(-1) + var wg sync.WaitGroup + wg.Add(workerCount) + + pkgCh := make(chan *packages.Package, len(pkgs)) + for i := 0; i < workerCount; i++ { + go func() { + defer wg.Done() + for pkg := range pkgCh { + var pkgIssues []EncodingIssue + err := lintCtx.PkgCache.Get(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, &pkgIssues) + cacheRes := pkgToCacheRes[pkg] + cacheRes.loadErr = err + if err != nil { + continue + } + if len(pkgIssues) == 0 { + continue + } + + issues := make([]result.Issue, 0, len(pkgIssues)) + for _, i := range pkgIssues { + issues = append(issues, result.Issue{ + FromLinter: i.FromLinter, + Text: i.Text, + Pos: i.Pos, + LineRange: i.LineRange, + Replacement: i.Replacement, + Pkg: pkg, + ExpectNoLint: i.ExpectNoLint, + ExpectedNoLintLinter: i.ExpectedNoLintLinter, + }) + } + cacheRes.issues = issues + } + }() + } + + for _, pkg := range pkgs { + pkgCh <- pkg + } + close(pkgCh) + wg.Wait() + + loadedIssuesCount := 0 + var issues []result.Issue + pkgsFromCache := map[*packages.Package]bool{} + for pkg, cacheRes := range pkgToCacheRes { + if cacheRes.loadErr == nil { + loadedIssuesCount += len(cacheRes.issues) + pkgsFromCache[pkg] = true + issues = append(issues, cacheRes.issues...) + issuesCacheDebugf("Loaded package %s issues (%d) from cache", pkg, len(cacheRes.issues)) + } else { + issuesCacheDebugf("Didn't load package %s issues from cache: %s", pkg, cacheRes.loadErr) + } + } + issuesCacheDebugf("Loaded %d issues from cache in %s, analyzing %d/%d packages", + loadedIssuesCount, time.Since(startedAt), len(pkgs)-len(pkgsFromCache), len(pkgs)) + return issues, pkgsFromCache +} + +func analyzersHashID(analyzers []*analysis.Analyzer) string { + names := make([]string, 0, len(analyzers)) + for _, a := range analyzers { + names = append(names, a.Name) + } + + sort.Strings(names) + return strings.Join(names, ",") +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go new file mode 100644 index 000000000..804865cfc --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "4d63.com/gochecknoglobals/checknoglobals" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGochecknoglobals() *goanalysis.Linter { + gochecknoglobals := checknoglobals.Analyzer() + + // gochecknoglobals only lints test files if the `-t` flag is passed so we + // pass the `t` flag as true to the analyzer before running it. This can be + // turned of by using the regular golangci-lint flags such as `--tests` or + // `--skip-files`. + linterConfig := map[string]map[string]interface{}{ + gochecknoglobals.Name: { + "t": true, + }, + } + + return goanalysis.NewLinter( + gochecknoglobals.Name, + gochecknoglobals.Doc, + []*analysis.Analyzer{gochecknoglobals}, + linterConfig, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go new file mode 100644 index 000000000..f9715bda8 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go @@ -0,0 +1,73 @@ +package golinters + +import ( + "fmt" + "go/ast" + "go/token" + "sync" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const gochecknoinitsName = "gochecknoinits" + +func NewGochecknoinits() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: gochecknoinitsName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (interface{}, error) { + var res []goanalysis.Issue + for _, file := range pass.Files { + fileIssues := checkFileForInits(file, pass.Fset) + for i := range fileIssues { + res = append(res, goanalysis.NewIssue(&fileIssues[i], pass)) + } + } + if len(res) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + }, + } + return goanalysis.NewLinter( + gochecknoinitsName, + "Checks that no init functions are present in Go code", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func checkFileForInits(f *ast.File, fset *token.FileSet) []result.Issue { + var res []result.Issue + for _, decl := range f.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + + name := funcDecl.Name.Name + if name == "init" && funcDecl.Recv.NumFields() == 0 { + res = append(res, result.Issue{ + Pos: fset.Position(funcDecl.Pos()), + Text: fmt.Sprintf("don't use %s function", formatCode(name, nil)), + FromLinter: gochecknoinitsName, + }) + } + } + + return res +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go new file mode 100644 index 000000000..eb42dd149 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go @@ -0,0 +1,68 @@ +package golinters + +import ( + "fmt" + "sort" + "sync" + + "github.com/uudashr/gocognit" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const gocognitName = "gocognit" + +func NewGocognit() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: goanalysis.TheOnlyAnalyzerName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + gocognitName, + "Computes and checks the cognitive complexity of functions", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var stats []gocognit.Stat + for _, f := range pass.Files { + stats = gocognit.ComplexityStats(f, pass.Fset, stats) + } + if len(stats) == 0 { + return nil, nil + } + + sort.SliceStable(stats, func(i, j int) bool { + return stats[i].Complexity > stats[j].Complexity + }) + + res := make([]goanalysis.Issue, 0, len(stats)) + for _, s := range stats { + if s.Complexity <= lintCtx.Settings().Gocognit.MinComplexity { + break // Break as the stats is already sorted from greatest to least + } + + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: s.Pos, + Text: fmt.Sprintf("cognitive complexity %d of func %s is high (> %d)", + s.Complexity, formatCode(s.FuncName, lintCtx.Cfg), lintCtx.Settings().Gocognit.MinComplexity), + FromLinter: gocognitName, + }, pass)) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go new file mode 100644 index 000000000..bdec4e10b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go @@ -0,0 +1,92 @@ +package golinters + +import ( + "fmt" + "sync" + + goconstAPI "github.com/jgautheron/goconst" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const goconstName = "goconst" + +func NewGoconst() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: goconstName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + goconstName, + "Finds repeated strings that could be replaced by a constant", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + issues, err := checkConstants(pass, lintCtx) + if err != nil || len(issues) == 0 { + return nil, err + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func checkConstants(pass *analysis.Pass, lintCtx *linter.Context) ([]goanalysis.Issue, error) { + settings := lintCtx.Settings().Goconst + + cfg := goconstAPI.Config{ + IgnoreTests: settings.IgnoreTests, + MatchWithConstants: settings.MatchWithConstants, + MinStringLength: settings.MinStringLen, + MinOccurrences: settings.MinOccurrencesCount, + ParseNumbers: settings.ParseNumbers, + NumberMin: settings.NumberMin, + NumberMax: settings.NumberMax, + ExcludeTypes: map[goconstAPI.Type]bool{}, + } + + if settings.IgnoreCalls { + cfg.ExcludeTypes[goconstAPI.Call] = true + } + + goconstIssues, err := goconstAPI.Run(pass.Files, pass.Fset, &cfg) + if err != nil { + return nil, err + } + + if len(goconstIssues) == 0 { + return nil, nil + } + + res := make([]goanalysis.Issue, 0, len(goconstIssues)) + for _, i := range goconstIssues { + textBegin := fmt.Sprintf("string %s has %d occurrences", formatCode(i.Str, lintCtx.Cfg), i.OccurrencesCount) + var textEnd string + if i.MatchingConst == "" { + textEnd = ", make it a constant" + } else { + textEnd = fmt.Sprintf(", but such constant %s already exists", formatCode(i.MatchingConst, lintCtx.Cfg)) + } + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: textBegin + textEnd, + FromLinter: goconstName, + }, pass)) + } + + return res, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go new file mode 100644 index 000000000..75eb7d307 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go @@ -0,0 +1,167 @@ +package golinters + +import ( + "fmt" + "go/ast" + "go/types" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + + gocriticlinter "github.com/go-critic/go-critic/framework/linter" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const gocriticName = "gocritic" + +func NewGocritic() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + sizes := types.SizesFor("gc", runtime.GOARCH) + + analyzer := &analysis.Analyzer{ + Name: gocriticName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + gocriticName, + `Provides many diagnostics that check for bugs, performance and style issues. +Extensible without recompilation through dynamic rules. +Dynamic rules are written declaratively with AST patterns, filters, report message and optional suggestion.`, + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + linterCtx := gocriticlinter.NewContext(pass.Fset, sizes) + enabledCheckers, err := buildEnabledCheckers(lintCtx, linterCtx) + if err != nil { + return nil, err + } + + linterCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg) + var res []goanalysis.Issue + pkgIssues := runGocriticOnPackage(linterCtx, enabledCheckers, pass.Files) + for i := range pkgIssues { + res = append(res, goanalysis.NewIssue(&pkgIssues[i], pass)) + } + if len(res) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func normalizeCheckerInfoParams(info *gocriticlinter.CheckerInfo) gocriticlinter.CheckerParams { + // lowercase info param keys here because golangci-lint's config parser lowercases all strings + ret := gocriticlinter.CheckerParams{} + for k, v := range info.Params { + ret[strings.ToLower(k)] = v + } + + return ret +} + +func configureCheckerInfo(info *gocriticlinter.CheckerInfo, allParams map[string]config.GocriticCheckSettings) error { + params := allParams[strings.ToLower(info.Name)] + if params == nil { // no config for this checker + return nil + } + + infoParams := normalizeCheckerInfoParams(info) + for k, p := range params { + v, ok := infoParams[k] + if ok { + v.Value = p + continue + } + + // param `k` isn't supported + if len(info.Params) == 0 { + return fmt.Errorf("checker %s config param %s doesn't exist: checker doesn't have params", + info.Name, k) + } + + var supportedKeys []string + for sk := range info.Params { + supportedKeys = append(supportedKeys, sk) + } + sort.Strings(supportedKeys) + + return fmt.Errorf("checker %s config param %s doesn't exist, all existing: %s", + info.Name, k, supportedKeys) + } + + return nil +} + +func buildEnabledCheckers(lintCtx *linter.Context, linterCtx *gocriticlinter.Context) ([]*gocriticlinter.Checker, error) { + s := lintCtx.Settings().Gocritic + allParams := s.GetLowercasedParams() + + var enabledCheckers []*gocriticlinter.Checker + for _, info := range gocriticlinter.GetCheckersInfo() { + if !s.IsCheckEnabled(info.Name) { + continue + } + + if err := configureCheckerInfo(info, allParams); err != nil { + return nil, err + } + + c, err := gocriticlinter.NewChecker(linterCtx, info) + if err != nil { + return nil, err + } + enabledCheckers = append(enabledCheckers, c) + } + + return enabledCheckers, nil +} + +func runGocriticOnPackage(linterCtx *gocriticlinter.Context, checkers []*gocriticlinter.Checker, + files []*ast.File) []result.Issue { + var res []result.Issue + for _, f := range files { + filename := filepath.Base(linterCtx.FileSet.Position(f.Pos()).Filename) + linterCtx.SetFileInfo(filename, f) + + issues := runGocriticOnFile(linterCtx, f, checkers) + res = append(res, issues...) + } + return res +} + +func runGocriticOnFile(ctx *gocriticlinter.Context, f *ast.File, checkers []*gocriticlinter.Checker) []result.Issue { + var res []result.Issue + + for _, c := range checkers { + // All checkers are expected to use *lint.Context + // as read-only structure, so no copying is required. + for _, warn := range c.Check(f) { + pos := ctx.FileSet.Position(warn.Node.Pos()) + res = append(res, result.Issue{ + Pos: pos, + Text: fmt.Sprintf("%s: %s", c.Info.Name, warn.Text), + FromLinter: gocriticName, + }) + } + } + + return res +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go new file mode 100644 index 000000000..5c61fec72 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go @@ -0,0 +1,61 @@ +package golinters + +import ( + "fmt" + "sync" + + "github.com/fzipp/gocyclo" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const gocycloName = "gocyclo" + +func NewGocyclo() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: gocycloName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + gocycloName, + "Computes and checks the cyclomatic complexity of functions", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var stats gocyclo.Stats + for _, f := range pass.Files { + stats = gocyclo.AnalyzeASTFile(f, pass.Fset, stats) + } + if len(stats) == 0 { + return nil, nil + } + + stats = stats.SortAndFilter(-1, lintCtx.Settings().Gocyclo.MinComplexity) + + res := make([]goanalysis.Issue, 0, len(stats)) + for _, s := range stats { + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: s.Pos, + Text: fmt.Sprintf("cyclomatic complexity %d of func %s is high (> %d)", + s.Complexity, formatCode(s.FuncName, lintCtx.Cfg), lintCtx.Settings().Gocyclo.MinComplexity), + FromLinter: gocycloName, + }, pass)) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go new file mode 100644 index 000000000..625245890 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go @@ -0,0 +1,84 @@ +package golinters + +import ( + "sync" + + "github.com/tetafro/godot" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const godotName = "godot" + +func NewGodot() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: godotName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + godotName, + "Check if comments end in a period", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + cfg := lintCtx.Cfg.LintersSettings.Godot + settings := godot.Settings{ + Scope: godot.Scope(cfg.Scope), + Exclude: cfg.Exclude, + Period: true, + Capital: cfg.Capital, + } + + // Convert deprecated setting + if cfg.CheckAll { // nolint: staticcheck + settings.Scope = godot.TopLevelScope + } + + if settings.Scope == "" { + settings.Scope = godot.DeclScope + } + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var issues []godot.Issue + for _, file := range pass.Files { + iss, err := godot.Run(file, pass.Fset, settings) + if err != nil { + return nil, err + } + issues = append(issues, iss...) + } + + if len(issues) == 0 { + return nil, nil + } + + res := make([]goanalysis.Issue, len(issues)) + for k, i := range issues { + issue := result.Issue{ + Pos: i.Pos, + Text: i.Message, + FromLinter: godotName, + Replacement: &result.Replacement{ + NewLines: []string{i.Replacement}, + }, + } + + res[k] = goanalysis.NewIssue(&issue, pass) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go new file mode 100644 index 000000000..2a4dd9faf --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go @@ -0,0 +1,63 @@ +package golinters + +import ( + "go/token" + "strings" + "sync" + + "github.com/matoous/godox" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const godoxName = "godox" + +func NewGodox() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: godoxName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + godoxName, + "Tool for detection of FIXME, TODO and other comment keywords", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var issues []godox.Message + for _, file := range pass.Files { + issues = append(issues, godox.Run(file, pass.Fset, lintCtx.Settings().Godox.Keywords...)...) + } + + if len(issues) == 0 { + return nil, nil + } + + res := make([]goanalysis.Issue, len(issues)) + for k, i := range issues { + res[k] = goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.Pos.Filename, + Line: i.Pos.Line, + }, + Text: strings.TrimRight(i.Message, "\n"), + FromLinter: godoxName, + }, pass) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go new file mode 100644 index 000000000..0c10005a0 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/Djarvur/go-err113" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGoerr113() *goanalysis.Linter { + return goanalysis.NewLinter( + "goerr113", + "Golang linter to check the errors handling expressions", + []*analysis.Analyzer{ + err113.NewAnalyzer(), + }, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go new file mode 100644 index 000000000..aa340dcf3 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go @@ -0,0 +1,72 @@ +package golinters + +import ( + "sync" + + gofmtAPI "github.com/golangci/gofmt/gofmt" + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" +) + +const gofmtName = "gofmt" + +func NewGofmt() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: gofmtName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + gofmtName, + "Gofmt checks whether code was gofmt-ed. By default "+ + "this tool runs with -s option to check for code simplification", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var fileNames []string + for _, f := range pass.Files { + pos := pass.Fset.PositionFor(f.Pos(), false) + fileNames = append(fileNames, pos.Filename) + } + + var issues []goanalysis.Issue + + for _, f := range fileNames { + diff, err := gofmtAPI.Run(f, lintCtx.Settings().Gofmt.Simplify) + if err != nil { // TODO: skip + return nil, err + } + if diff == nil { + continue + } + + is, err := extractIssuesFromPatch(string(diff), lintCtx.Log, lintCtx, gofmtName) + if err != nil { + return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff)) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go new file mode 100644 index 000000000..39e8092e9 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go @@ -0,0 +1,286 @@ +package golinters + +import ( + "bytes" + "fmt" + "go/token" + "strings" + + "github.com/pkg/errors" + diffpkg "github.com/sourcegraph/go-diff/diff" + + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type Change struct { + LineRange result.Range + Replacement result.Replacement +} + +type diffLineType string + +const ( + diffLineAdded diffLineType = "added" + diffLineOriginal diffLineType = "original" + diffLineDeleted diffLineType = "deleted" +) + +type diffLine struct { + originalNumber int // 1-based original line number + typ diffLineType + data string // "+" or "-" stripped line +} + +type hunkChangesParser struct { + // needed because we merge currently added lines with the last original line + lastOriginalLine *diffLine + + // if the first line of diff is an adding we save all additions to replacementLinesToPrepend + replacementLinesToPrepend []string + + log logutils.Log + + lines []diffLine + + ret []Change +} + +func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) { + lines := bytes.Split(h.Body, []byte{'\n'}) + currentOriginalLineNumer := int(h.OrigStartLine) + var ret []diffLine + + for i, line := range lines { + dl := diffLine{ + originalNumber: currentOriginalLineNumer, + } + + lineStr := string(line) + + if strings.HasPrefix(lineStr, "-") { + dl.typ = diffLineDeleted + dl.data = strings.TrimPrefix(lineStr, "-") + currentOriginalLineNumer++ + } else if strings.HasPrefix(lineStr, "+") { + dl.typ = diffLineAdded + dl.data = strings.TrimPrefix(lineStr, "+") + } else { + if i == len(lines)-1 && lineStr == "" { + // handle last \n: don't add an empty original line + break + } + + dl.typ = diffLineOriginal + dl.data = strings.TrimPrefix(lineStr, " ") + currentOriginalLineNumer++ + } + + ret = append(ret, dl) + } + + p.lines = ret +} + +func (p *hunkChangesParser) handleOriginalLine(line diffLine, i *int) { + if len(p.replacementLinesToPrepend) == 0 { + p.lastOriginalLine = &line + *i++ + return + } + + // check following added lines for the case: + // + added line 1 + // original line + // + added line 2 + + *i++ + var followingAddedLines []string + for ; *i < len(p.lines) && p.lines[*i].typ == diffLineAdded; *i++ { + followingAddedLines = append(followingAddedLines, p.lines[*i].data) + } + + p.ret = append(p.ret, Change{ + LineRange: result.Range{ + From: line.originalNumber, + To: line.originalNumber, + }, + Replacement: result.Replacement{ + NewLines: append(p.replacementLinesToPrepend, append([]string{line.data}, followingAddedLines...)...), + }, + }) + p.replacementLinesToPrepend = nil + p.lastOriginalLine = &line +} + +func (p *hunkChangesParser) handleDeletedLines(deletedLines []diffLine, addedLines []string) { + change := Change{ + LineRange: result.Range{ + From: deletedLines[0].originalNumber, + To: deletedLines[len(deletedLines)-1].originalNumber, + }, + } + + if len(addedLines) != 0 { + //nolint:gocritic + change.Replacement.NewLines = append(p.replacementLinesToPrepend, addedLines...) + if len(p.replacementLinesToPrepend) != 0 { + p.replacementLinesToPrepend = nil + } + + p.ret = append(p.ret, change) + return + } + + // delete-only change with possible prepending + if len(p.replacementLinesToPrepend) != 0 { + change.Replacement.NewLines = p.replacementLinesToPrepend + p.replacementLinesToPrepend = nil + } else { + change.Replacement.NeedOnlyDelete = true + } + + p.ret = append(p.ret, change) +} + +func (p *hunkChangesParser) handleAddedOnlyLines(addedLines []string) { + if p.lastOriginalLine == nil { + // the first line is added; the diff looks like: + // 1. + ... + // 2. - ... + // or + // 1. + ... + // 2. ... + + p.replacementLinesToPrepend = addedLines + return + } + + // add-only change merged into the last original line with possible prepending + p.ret = append(p.ret, Change{ + LineRange: result.Range{ + From: p.lastOriginalLine.originalNumber, + To: p.lastOriginalLine.originalNumber, + }, + Replacement: result.Replacement{ + NewLines: append(p.replacementLinesToPrepend, append([]string{p.lastOriginalLine.data}, addedLines...)...), + }, + }) + p.replacementLinesToPrepend = nil +} + +func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change { + p.parseDiffLines(h) + + for i := 0; i < len(p.lines); { + line := p.lines[i] + if line.typ == diffLineOriginal { + p.handleOriginalLine(line, &i) + continue + } + + var deletedLines []diffLine + for ; i < len(p.lines) && p.lines[i].typ == diffLineDeleted; i++ { + deletedLines = append(deletedLines, p.lines[i]) + } + + var addedLines []string + for ; i < len(p.lines) && p.lines[i].typ == diffLineAdded; i++ { + addedLines = append(addedLines, p.lines[i].data) + } + + if len(deletedLines) != 0 { + p.handleDeletedLines(deletedLines, addedLines) + continue + } + + // no deletions, only additions + p.handleAddedOnlyLines(addedLines) + } + + if len(p.replacementLinesToPrepend) != 0 { + p.log.Infof("The diff contains only additions: no original or deleted lines: %#v", p.lines) + return nil + } + + return p.ret +} + +func getErrorTextForLinter(lintCtx *linter.Context, linterName string) string { + text := "File is not formatted" + switch linterName { + case gofumptName: + text = "File is not `gofumpt`-ed" + if lintCtx.Settings().Gofumpt.ExtraRules { + text += " with `-extra`" + } + case gofmtName: + text = "File is not `gofmt`-ed" + if lintCtx.Settings().Gofmt.Simplify { + text += " with `-s`" + } + case goimportsName: + text = "File is not `goimports`-ed" + if lintCtx.Settings().Goimports.LocalPrefixes != "" { + text += " with -local " + lintCtx.Settings().Goimports.LocalPrefixes + } + case gciName: + text = "File is not `gci`-ed" + localPrefixes := lintCtx.Settings().Gci.LocalPrefixes + goimportsFlag := lintCtx.Settings().Goimports.LocalPrefixes + if localPrefixes == "" && goimportsFlag != "" { + localPrefixes = goimportsFlag + } + + if localPrefixes != "" { + text += " with -local " + localPrefixes + } + } + return text +} + +func extractIssuesFromPatch(patch string, log logutils.Log, lintCtx *linter.Context, linterName string) ([]result.Issue, error) { + diffs, err := diffpkg.ParseMultiFileDiff([]byte(patch)) + if err != nil { + return nil, errors.Wrap(err, "can't parse patch") + } + + if len(diffs) == 0 { + return nil, fmt.Errorf("got no diffs from patch parser: %v", diffs) + } + + issues := []result.Issue{} + for _, d := range diffs { + if len(d.Hunks) == 0 { + log.Warnf("Got no hunks in diff %+v", d) + continue + } + + for _, hunk := range d.Hunks { + p := hunkChangesParser{ + log: log, + } + changes := p.parse(hunk) + for _, change := range changes { + change := change // fix scope + i := result.Issue{ + FromLinter: linterName, + Pos: token.Position{ + Filename: d.NewName, + Line: change.LineRange.From, + }, + Text: getErrorTextForLinter(lintCtx, linterName), + Replacement: &change.Replacement, + } + if change.LineRange.From != change.LineRange.To { + i.LineRange = &change.LineRange + } + + issues = append(issues, i) + } + } + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go new file mode 100644 index 000000000..75c088144 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go @@ -0,0 +1,108 @@ +package golinters + +import ( + "bytes" + "fmt" + "io/ioutil" + "sync" + + "github.com/pkg/errors" + "github.com/shazow/go-diff/difflib" + "golang.org/x/tools/go/analysis" + "mvdan.cc/gofumpt/format" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" +) + +const gofumptName = "gofumpt" + +func NewGofumpt() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + differ := difflib.New() + + analyzer := &analysis.Analyzer{ + Name: gofumptName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + gofumptName, + "Gofumpt checks whether code was gofumpt-ed.", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + settings := lintCtx.Settings().Gofumpt + + options := format.Options{ + LangVersion: getLangVersion(settings), + ExtraRules: settings.ExtraRules, + } + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var fileNames []string + for _, f := range pass.Files { + pos := pass.Fset.PositionFor(f.Pos(), false) + fileNames = append(fileNames, pos.Filename) + } + + var issues []goanalysis.Issue + + for _, f := range fileNames { + input, err := ioutil.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("unable to open file %s: %w", f, err) + } + + output, err := format.Source(input, options) + if err != nil { + return nil, fmt.Errorf("error while running gofumpt: %w", err) + } + + if !bytes.Equal(input, output) { + out := bytes.Buffer{} + _, err = out.WriteString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) + if err != nil { + return nil, fmt.Errorf("error while running gofumpt: %w", err) + } + + err = differ.Diff(&out, bytes.NewReader(input), bytes.NewReader(output)) + if err != nil { + return nil, fmt.Errorf("error while running gofumpt: %w", err) + } + + diff := out.String() + is, err := extractIssuesFromPatch(diff, lintCtx.Log, lintCtx, gofumptName) + if err != nil { + return nil, errors.Wrapf(err, "can't extract issues from gofumpt diff output %q", diff) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func getLangVersion(settings config.GofumptSettings) string { + if settings.LangVersion == "" { + // TODO: defaults to "1.15", in the future (v2) must be set by using build.Default.ReleaseTags like staticcheck. + return "1.15" + } + return settings.LangVersion +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go new file mode 100644 index 000000000..2ff587b0d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go @@ -0,0 +1,85 @@ +package golinters + +import ( + "go/token" + "sync" + + goheader "github.com/denis-tingajkin/go-header" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const goHeaderName = "goheader" + +func NewGoHeader() *goanalysis.Linter { + var mu sync.Mutex + var issues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: goHeaderName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + goHeaderName, + "Checks is file header matches to pattern", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + cfg := lintCtx.Cfg.LintersSettings.Goheader + c := &goheader.Configuration{ + Values: cfg.Values, + Template: cfg.Template, + TemplatePath: cfg.TemplatePath, + } + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + if c.TemplatePath == "" && c.Template == "" { + // User did not pass template, so then do not run go-header linter + return nil, nil + } + template, err := c.GetTemplate() + if err != nil { + return nil, err + } + values, err := c.GetValues() + if err != nil { + return nil, err + } + a := goheader.New(goheader.WithTemplate(template), goheader.WithValues(values)) + var res []goanalysis.Issue + for _, file := range pass.Files { + path := pass.Fset.Position(file.Pos()).Filename + i := a.Analyze(&goheader.Target{ + File: file, + Path: path, + }) + if i == nil { + continue + } + issue := result.Issue{ + Pos: token.Position{ + Line: i.Location().Line + 1, + Column: i.Location().Position, + Filename: path, + }, + Text: i.Message(), + FromLinter: goHeaderName, + } + res = append(res, goanalysis.NewIssue(&issue, pass)) + } + if len(res) == 0 { + return nil, nil + } + + mu.Lock() + issues = append(issues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return issues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go new file mode 100644 index 000000000..b58af1967 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go @@ -0,0 +1,73 @@ +package golinters + +import ( + "sync" + + goimportsAPI "github.com/golangci/gofmt/goimports" + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/imports" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" +) + +const goimportsName = "goimports" + +func NewGoimports() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: goimportsName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + goimportsName, + "In addition to fixing imports, goimports also formats your code in the same style as gofmt.", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + imports.LocalPrefix = lintCtx.Settings().Goimports.LocalPrefixes + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var fileNames []string + for _, f := range pass.Files { + pos := pass.Fset.PositionFor(f.Pos(), false) + fileNames = append(fileNames, pos.Filename) + } + + var issues []goanalysis.Issue + + for _, f := range fileNames { + diff, err := goimportsAPI.Run(f) + if err != nil { // TODO: skip + return nil, err + } + if diff == nil { + continue + } + + is, err := extractIssuesFromPatch(string(diff), lintCtx.Log, lintCtx, goimportsName) + if err != nil { + return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff)) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go new file mode 100644 index 000000000..3b1b1b66f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go @@ -0,0 +1,78 @@ +package golinters + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "sync" + + lintAPI "github.com/golangci/lint-1" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func golintProcessPkg(minConfidence float64, files []*ast.File, fset *token.FileSet, + typesPkg *types.Package, typesInfo *types.Info) ([]result.Issue, error) { + l := new(lintAPI.Linter) + ps, err := l.LintPkg(files, fset, typesPkg, typesInfo) + if err != nil { + return nil, fmt.Errorf("can't lint %d files: %s", len(files), err) + } + + if len(ps) == 0 { + return nil, nil + } + + issues := make([]result.Issue, 0, len(ps)) // This is worst case + for idx := range ps { + if ps[idx].Confidence >= minConfidence { + issues = append(issues, result.Issue{ + Pos: ps[idx].Position, + Text: ps[idx].Text, + FromLinter: golintName, + }) + // TODO: use p.Link and p.Category + } + } + + return issues, nil +} + +const golintName = "golint" + +func NewGolint() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: golintName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + golintName, + "Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + res, err := golintProcessPkg(lintCtx.Settings().Golint.MinConfidence, pass.Files, pass.Fset, pass.Pkg, pass.TypesInfo) + if err != nil || len(res) == 0 { + return nil, err + } + + mu.Lock() + for i := range res { + resIssues = append(resIssues, goanalysis.NewIssue(&res[i], pass)) + } + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go new file mode 100644 index 000000000..f7e71b7da --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go @@ -0,0 +1,27 @@ +package golinters + +import ( + mnd "github.com/tommy-muehle/go-mnd/v2" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGoMND(cfg *config.Config) *goanalysis.Linter { + analyzers := []*analysis.Analyzer{ + mnd.Analyzer, + } + + var linterCfg map[string]map[string]interface{} + if cfg != nil { + linterCfg = cfg.LintersSettings.Gomnd.Settings + } + + return goanalysis.NewLinter( + "gomnd", + "An analyzer to detect magic numbers.", + analyzers, + linterCfg, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go new file mode 100644 index 000000000..40d3bf786 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go @@ -0,0 +1,64 @@ +package golinters + +import ( + "sync" + + "github.com/ldez/gomoddirectives" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const goModDirectivesName = "gomoddirectives" + +// NewGoModDirectives returns a new gomoddirectives linter. +func NewGoModDirectives(settings *config.GoModDirectivesSettings) *goanalysis.Linter { + var issues []goanalysis.Issue + var once sync.Once + + var opts gomoddirectives.Options + if settings != nil { + opts.ReplaceAllowLocal = settings.ReplaceLocal + opts.ReplaceAllowList = settings.ReplaceAllowList + opts.RetractAllowNoExplanation = settings.RetractAllowNoExplanation + opts.ExcludeForbidden = settings.ExcludeForbidden + } + + analyzer := &analysis.Analyzer{ + Name: goanalysis.TheOnlyAnalyzerName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + + return goanalysis.NewLinter( + goModDirectivesName, + "Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod.", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + once.Do(func() { + results, err := gomoddirectives.Analyze(opts) + if err != nil { + lintCtx.Log.Warnf("running %s failed: %s: "+ + "if you are not using go modules it is suggested to disable this linter", goModDirectivesName, err) + return + } + + for _, p := range results { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + FromLinter: goModDirectivesName, + Pos: p.Start, + Text: p.Reason, + }, pass)) + } + }) + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return issues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go new file mode 100644 index 000000000..30ca6cc3d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go @@ -0,0 +1,99 @@ +package golinters + +import ( + "sync" + + "github.com/ryancurrah/gomodguard" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const ( + gomodguardName = "gomodguard" + gomodguardDesc = "Allow and block list linter for direct Go module dependencies. " + + "This is different from depguard where there are different block " + + "types for example version constraints and module recommendations." +) + +// NewGomodguard returns a new Gomodguard linter. +func NewGomodguard() *goanalysis.Linter { + var ( + issues []goanalysis.Issue + mu = sync.Mutex{} + analyzer = &analysis.Analyzer{ + Name: goanalysis.TheOnlyAnalyzerName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + ) + + return goanalysis.NewLinter( + gomodguardName, + gomodguardDesc, + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + linterCfg := lintCtx.Cfg.LintersSettings.Gomodguard + + processorCfg := &gomodguard.Configuration{} + processorCfg.Allowed.Modules = linterCfg.Allowed.Modules + processorCfg.Allowed.Domains = linterCfg.Allowed.Domains + processorCfg.Blocked.LocalReplaceDirectives = linterCfg.Blocked.LocalReplaceDirectives + + for n := range linterCfg.Blocked.Modules { + for k, v := range linterCfg.Blocked.Modules[n] { + m := map[string]gomodguard.BlockedModule{k: { + Recommendations: v.Recommendations, + Reason: v.Reason, + }} + processorCfg.Blocked.Modules = append(processorCfg.Blocked.Modules, m) + break + } + } + + for n := range linterCfg.Blocked.Versions { + for k, v := range linterCfg.Blocked.Versions[n] { + m := map[string]gomodguard.BlockedVersion{k: { + Version: v.Version, + Reason: v.Reason, + }} + processorCfg.Blocked.Versions = append(processorCfg.Blocked.Versions, m) + break + } + } + + processor, err := gomodguard.NewProcessor(processorCfg) + if err != nil { + lintCtx.Log.Warnf("running gomodguard failed: %s: if you are not using go modules "+ + "it is suggested to disable this linter", err) + return + } + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var files []string + + for _, file := range pass.Files { + files = append(files, pass.Fset.PositionFor(file.Pos(), false).Filename) + } + + gomodguardIssues := processor.ProcessFiles(files) + + mu.Lock() + defer mu.Unlock() + + for _, gomodguardIssue := range gomodguardIssues { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + FromLinter: gomodguardName, + Pos: gomodguardIssue.Position, + Text: gomodguardIssue.Reason, + }, pass)) + } + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return issues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go new file mode 100644 index 000000000..c5516dc7f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go @@ -0,0 +1,17 @@ +package golinters + +import ( + "github.com/jirfag/go-printf-func-name/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGoPrintfFuncName() *goanalysis.Linter { + return goanalysis.NewLinter( + "goprintffuncname", + "Checks that printf-like functions are named with `f` at the end", + []*analysis.Analyzer{analyzer.Analyzer}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go new file mode 100644 index 000000000..328ba5ccc --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go @@ -0,0 +1,128 @@ +package golinters + +import ( + "fmt" + "go/token" + "io/ioutil" + "log" + "strconv" + "strings" + "sync" + + "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/rules" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const gosecName = "gosec" + +func NewGosec(settings *config.GoSecSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + gasConfig := gosec.NewConfig() + + var filters []rules.RuleFilter + if settings != nil { + filters = gosecRuleFilters(settings.Includes, settings.Excludes) + + for k, v := range settings.Config { + // Uses ToUpper because the parsing of the map's key change the key to lowercase. + // The value is not impacted by that: the case is respected. + gasConfig.Set(strings.ToUpper(k), v) + } + } + + ruleDefinitions := rules.Generate(filters...) + + logger := log.New(ioutil.Discard, "", 0) + + analyzer := &analysis.Analyzer{ + Name: gosecName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + gosecName, + "Inspects source code for security problems", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + gosecAnalyzer := gosec.NewAnalyzer(gasConfig, true, logger) + gosecAnalyzer.LoadRules(ruleDefinitions.Builders()) + + pkg := &packages.Package{ + Fset: pass.Fset, + Syntax: pass.Files, + Types: pass.Pkg, + TypesInfo: pass.TypesInfo, + } + gosecAnalyzer.Check(pkg) + issues, _, _ := gosecAnalyzer.Report() + if len(issues) == 0 { + return nil, nil + } + + res := make([]goanalysis.Issue, 0, len(issues)) + for _, i := range issues { + text := fmt.Sprintf("%s: %s", i.RuleID, i.What) // TODO: use severity and confidence + var r *result.Range + line, err := strconv.Atoi(i.Line) + if err != nil { + r = &result.Range{} + if n, rerr := fmt.Sscanf(i.Line, "%d-%d", &r.From, &r.To); rerr != nil || n != 2 { + lintCtx.Log.Warnf("Can't convert gosec line number %q of %v to int: %s", i.Line, i, err) + continue + } + line = r.From + } + + column, err := strconv.Atoi(i.Col) + if err != nil { + lintCtx.Log.Warnf("Can't convert gosec column number %q of %v to int: %s", i.Col, i, err) + continue + } + + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.File, + Line: line, + Column: column, + }, + Text: text, + LineRange: r, + FromLinter: gosecName, + }, pass)) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +// based on https://github.com/securego/gosec/blob/569328eade2ccbad4ce2d0f21ee158ab5356a5cf/cmd/gosec/main.go#L170-L188 +func gosecRuleFilters(includes, excludes []string) []rules.RuleFilter { + var filters []rules.RuleFilter + + if len(includes) > 0 { + filters = append(filters, rules.NewRuleFilter(false, includes...)) + } + + if len(excludes) > 0 { + filters = append(filters, rules.NewRuleFilter(true, excludes...)) + } + + return filters +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go new file mode 100644 index 000000000..fa14f1a96 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go @@ -0,0 +1,21 @@ +package golinters + +import ( + "honnef.co/go/tools/simple" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGosimple(settings *config.StaticCheckSettings) *goanalysis.Linter { + cfg := staticCheckConfig(settings) + + analyzers := setupStaticCheckAnalyzers(simple.Analyzers, getGoVersion(settings), cfg.Checks) + + return goanalysis.NewLinter( + "gosimple", + "Linter for Go source code that specializes in simplifying a code", + analyzers, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go new file mode 100644 index 000000000..b3860e017 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go @@ -0,0 +1,185 @@ +package golinters + +import ( + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/asmdecl" + "golang.org/x/tools/go/analysis/passes/assign" + "golang.org/x/tools/go/analysis/passes/atomic" + "golang.org/x/tools/go/analysis/passes/atomicalign" + "golang.org/x/tools/go/analysis/passes/bools" + _ "golang.org/x/tools/go/analysis/passes/buildssa" // unused, internal analyzer + "golang.org/x/tools/go/analysis/passes/buildtag" + "golang.org/x/tools/go/analysis/passes/cgocall" + "golang.org/x/tools/go/analysis/passes/composite" + "golang.org/x/tools/go/analysis/passes/copylock" + _ "golang.org/x/tools/go/analysis/passes/ctrlflow" // unused, internal analyzer + "golang.org/x/tools/go/analysis/passes/deepequalerrors" + "golang.org/x/tools/go/analysis/passes/errorsas" + "golang.org/x/tools/go/analysis/passes/fieldalignment" + "golang.org/x/tools/go/analysis/passes/findcall" + "golang.org/x/tools/go/analysis/passes/framepointer" + "golang.org/x/tools/go/analysis/passes/httpresponse" + "golang.org/x/tools/go/analysis/passes/ifaceassert" + _ "golang.org/x/tools/go/analysis/passes/inspect" // unused internal analyzer + "golang.org/x/tools/go/analysis/passes/loopclosure" + "golang.org/x/tools/go/analysis/passes/lostcancel" + "golang.org/x/tools/go/analysis/passes/nilfunc" + "golang.org/x/tools/go/analysis/passes/nilness" + _ "golang.org/x/tools/go/analysis/passes/pkgfact" // unused, internal analyzer + "golang.org/x/tools/go/analysis/passes/printf" + "golang.org/x/tools/go/analysis/passes/reflectvaluecompare" + "golang.org/x/tools/go/analysis/passes/shadow" + "golang.org/x/tools/go/analysis/passes/shift" + "golang.org/x/tools/go/analysis/passes/sigchanyzer" + "golang.org/x/tools/go/analysis/passes/sortslice" + "golang.org/x/tools/go/analysis/passes/stdmethods" + "golang.org/x/tools/go/analysis/passes/stringintconv" + "golang.org/x/tools/go/analysis/passes/structtag" + "golang.org/x/tools/go/analysis/passes/testinggoroutine" + "golang.org/x/tools/go/analysis/passes/tests" + "golang.org/x/tools/go/analysis/passes/unmarshal" + "golang.org/x/tools/go/analysis/passes/unreachable" + "golang.org/x/tools/go/analysis/passes/unsafeptr" + "golang.org/x/tools/go/analysis/passes/unusedresult" + "golang.org/x/tools/go/analysis/passes/unusedwrite" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +var ( + allAnalyzers = []*analysis.Analyzer{ + asmdecl.Analyzer, + assign.Analyzer, + atomic.Analyzer, + atomicalign.Analyzer, + bools.Analyzer, + buildtag.Analyzer, + cgocall.Analyzer, + composite.Analyzer, + copylock.Analyzer, + deepequalerrors.Analyzer, + errorsas.Analyzer, + fieldalignment.Analyzer, + findcall.Analyzer, + framepointer.Analyzer, + httpresponse.Analyzer, + ifaceassert.Analyzer, + loopclosure.Analyzer, + lostcancel.Analyzer, + nilfunc.Analyzer, + nilness.Analyzer, + printf.Analyzer, + reflectvaluecompare.Analyzer, + shadow.Analyzer, + shift.Analyzer, + sigchanyzer.Analyzer, + sortslice.Analyzer, + stdmethods.Analyzer, + stringintconv.Analyzer, + structtag.Analyzer, + testinggoroutine.Analyzer, + tests.Analyzer, + unmarshal.Analyzer, + unreachable.Analyzer, + unsafeptr.Analyzer, + unusedresult.Analyzer, + unusedwrite.Analyzer, + } + + // https://github.com/golang/go/blob/879db69ce2de814bc3203c39b45617ba51cc5366/src/cmd/vet/main.go#L40-L68 + defaultAnalyzers = []*analysis.Analyzer{ + asmdecl.Analyzer, + assign.Analyzer, + atomic.Analyzer, + bools.Analyzer, + buildtag.Analyzer, + cgocall.Analyzer, + composite.Analyzer, + copylock.Analyzer, + errorsas.Analyzer, + framepointer.Analyzer, + httpresponse.Analyzer, + ifaceassert.Analyzer, + loopclosure.Analyzer, + lostcancel.Analyzer, + nilfunc.Analyzer, + printf.Analyzer, + shift.Analyzer, + sigchanyzer.Analyzer, + stdmethods.Analyzer, + stringintconv.Analyzer, + structtag.Analyzer, + testinggoroutine.Analyzer, + tests.Analyzer, + unmarshal.Analyzer, + unreachable.Analyzer, + unsafeptr.Analyzer, + unusedresult.Analyzer, + } +) + +func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers []*analysis.Analyzer) bool { + if cfg.EnableAll { + for _, n := range cfg.Disable { + if n == name { + return false + } + } + return true + } + // Raw for loops should be OK on small slice lengths. + for _, n := range cfg.Enable { + if n == name { + return true + } + } + for _, n := range cfg.Disable { + if n == name { + return false + } + } + if cfg.DisableAll { + return false + } + for _, a := range defaultAnalyzers { + if a.Name == name { + return true + } + } + return false +} + +func analyzersFromConfig(cfg *config.GovetSettings) []*analysis.Analyzer { + if cfg == nil { + return defaultAnalyzers + } + + if cfg.CheckShadowing { + // Keeping for backward compatibility. + cfg.Enable = append(cfg.Enable, shadow.Analyzer.Name) + } + + var enabledAnalyzers []*analysis.Analyzer + for _, a := range allAnalyzers { + if isAnalyzerEnabled(a.Name, cfg, defaultAnalyzers) { + enabledAnalyzers = append(enabledAnalyzers, a) + } + } + + return enabledAnalyzers +} + +func NewGovet(cfg *config.GovetSettings) *goanalysis.Linter { + var settings map[string]map[string]interface{} + if cfg != nil { + settings = cfg.Settings + } + return goanalysis.NewLinter( + "govet", + "Vet examines Go source code and reports suspicious constructs, "+ + "such as Printf calls whose arguments do not align with the format string", + analyzersFromConfig(cfg), + settings, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ifshort.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ifshort.go new file mode 100644 index 000000000..c26f08e40 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ifshort.go @@ -0,0 +1,28 @@ +package golinters + +import ( + "github.com/esimonov/ifshort/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewIfshort(settings *config.IfshortSettings) *goanalysis.Linter { + var cfg map[string]map[string]interface{} + if settings != nil { + cfg = map[string]map[string]interface{}{ + analyzer.Analyzer.Name: { + "max-decl-lines": settings.MaxDeclLines, + "max-decl-chars": settings.MaxDeclChars, + }, + } + } + + return goanalysis.NewLinter( + "ifshort", + "Checks that your code uses short syntax for if-statements whenever possible", + []*analysis.Analyzer{analyzer.Analyzer}, + cfg, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go new file mode 100644 index 000000000..523aa257b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go @@ -0,0 +1,48 @@ +package golinters + +import ( + "fmt" + "strconv" + + "github.com/julz/importas" // nolint: misspell + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" +) + +func NewImportAs(settings *config.ImportAsSettings) *goanalysis.Linter { + analyzer := importas.Analyzer + + return goanalysis.NewLinter( + analyzer.Name, + analyzer.Doc, + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + if settings == nil { + return + } + if len(settings.Alias) == 0 { + lintCtx.Log.Infof("importas settings found, but no aliases listed. List aliases under alias: key.") // nolint: misspell + } + + err := analyzer.Flags.Set("no-unaliased", strconv.FormatBool(settings.NoUnaliased)) + if err != nil { + lintCtx.Log.Errorf("failed to parse configuration: %v", err) + } + + for _, a := range settings.Alias { + if a.Pkg == "" { + lintCtx.Log.Errorf("invalid configuration, empty package: pkg=%s alias=%s", a.Pkg, a.Alias) + continue + } + + err := analyzer.Flags.Set("alias", fmt.Sprintf("%s:%s", a.Pkg, a.Alias)) + if err != nil { + lintCtx.Log.Errorf("failed to parse configuration: %v", err) + } + } + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go new file mode 100644 index 000000000..c87bb2fa5 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go @@ -0,0 +1,17 @@ +package golinters + +import ( + "github.com/gordonklaus/ineffassign/pkg/ineffassign" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewIneffassign() *goanalysis.Linter { + return goanalysis.NewLinter( + "ineffassign", + "Detects when assignments to existing variables are not used", + []*analysis.Analyzer{ineffassign.Analyzer}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go new file mode 100644 index 000000000..1edbe894c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go @@ -0,0 +1,67 @@ +package golinters + +import ( + "sync" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "mvdan.cc/interfacer/check" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const interfacerName = "interfacer" + +func NewInterfacer() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: interfacerName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + } + return goanalysis.NewLinter( + interfacerName, + "Linter that suggests narrower interface types", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + ssaPkg := ssa.Pkg + c := &check.Checker{} + prog := goanalysis.MakeFakeLoaderProgram(pass) + c.Program(prog) + c.ProgramSSA(ssaPkg.Prog) + + issues, err := c.Check() + if err != nil { + return nil, err + } + if len(issues) == 0 { + return nil, nil + } + + res := make([]goanalysis.Issue, 0, len(issues)) + for _, i := range issues { + pos := pass.Fset.Position(i.Pos()) + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: pos, + Text: i.Message(), + FromLinter: interfacerName, + }, pass)) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go new file mode 100644 index 000000000..5f26e91dd --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go @@ -0,0 +1,124 @@ +package golinters + +import ( + "bufio" + "fmt" + "go/token" + "os" + "strings" + "sync" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]result.Issue, error) { + var res []result.Issue + + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("can't open file %s: %s", filename, err) + } + defer f.Close() + + lineNumber := 1 + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + line = strings.Replace(line, "\t", tabSpaces, -1) + lineLen := utf8.RuneCountInString(line) + if lineLen > maxLineLen { + res = append(res, result.Issue{ + Pos: token.Position{ + Filename: filename, + Line: lineNumber, + }, + Text: fmt.Sprintf("line is %d characters", lineLen), + FromLinter: lllName, + }) + } + lineNumber++ + } + + if err := scanner.Err(); err != nil { + if err == bufio.ErrTooLong && maxLineLen < bufio.MaxScanTokenSize { + // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize + // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize + // we can return this line as a long line instead of returning an error. + // The reason for this change is that this case might happen with autogenerated files + // The go-bindata tool for instance might generate a file with a very long line. + // In this case, as it's a auto generated file, the warning returned by lll will + // be ignored. + // But if we return a linter error here, and this error happens for an autogenerated + // file the error will be discarded (fine), but all the subsequent errors for lll will + // be discarded for other files and we'll miss legit error. + res = append(res, result.Issue{ + Pos: token.Position{ + Filename: filename, + Line: lineNumber, + Column: 1, + }, + Text: fmt.Sprintf("line is more than %d characters", bufio.MaxScanTokenSize), + FromLinter: lllName, + }) + } else { + return nil, fmt.Errorf("can't scan file %s: %s", filename, err) + } + } + + return res, nil +} + +const lllName = "lll" + +func NewLLL() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: lllName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + lllName, + "Reports long lines", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var fileNames []string + for _, f := range pass.Files { + pos := pass.Fset.PositionFor(f.Pos(), false) + fileNames = append(fileNames, pos.Filename) + } + + var res []goanalysis.Issue + spaces := strings.Repeat(" ", lintCtx.Settings().Lll.TabWidth) + for _, f := range fileNames { + issues, err := getLLLIssuesForFile(f, lintCtx.Settings().Lll.LineLength, spaces) + if err != nil { + return nil, err + } + for i := range issues { + res = append(res, goanalysis.NewIssue(&issues[i], pass)) + } + } + + if len(res) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go new file mode 100644 index 000000000..cdde09291 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go @@ -0,0 +1,60 @@ +package golinters + +import ( + "sync" + + "github.com/ashanbrown/makezero/makezero" + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const makezeroName = "makezero" + +func NewMakezero() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: makezeroName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + makezeroName, + "Finds slice declarations with non-zero initial length", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + s := &lintCtx.Settings().Makezero + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var res []goanalysis.Issue + linter := makezero.NewLinter(s.Always) + for _, file := range pass.Files { + hints, err := linter.Run(pass.Fset, pass.TypesInfo, file) + if err != nil { + return nil, errors.Wrapf(err, "makezero linter failed on file %q", file.Name.String()) + } + for _, hint := range hints { + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: hint.Position(), + Text: hint.Details(), + FromLinter: makezeroName, + }, pass)) + } + } + if len(res) == 0 { + return nil, nil + } + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go new file mode 100644 index 000000000..22422b8c6 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go @@ -0,0 +1,58 @@ +package golinters + +import ( + "fmt" + "sync" + + malignedAPI "github.com/golangci/maligned" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewMaligned() *goanalysis.Linter { + const linterName = "maligned" + var mu sync.Mutex + var res []goanalysis.Issue + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + linterName, + "Tool to detect Go structs that would take less memory if their fields were sorted", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + prog := goanalysis.MakeFakeLoaderProgram(pass) + + malignedIssues := malignedAPI.Run(prog) + if len(malignedIssues) == 0 { + return nil, nil + } + + issues := make([]goanalysis.Issue, 0, len(malignedIssues)) + for _, i := range malignedIssues { + text := fmt.Sprintf("struct of size %d bytes could be of size %d bytes", i.OldSize, i.NewSize) + if lintCtx.Settings().Maligned.SuggestNewOrder { + text += fmt.Sprintf(":\n%s", formatCodeBlock(i.NewStructDef, lintCtx.Cfg)) + } + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: text, + FromLinter: linterName, + }, pass)) + } + + mu.Lock() + res = append(res, issues...) + mu.Unlock() + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return res + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go new file mode 100644 index 000000000..80ecf9bb6 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go @@ -0,0 +1,132 @@ +package golinters + +import ( + "fmt" + "go/token" + "strings" + "sync" + + "github.com/golangci/misspell" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func runMisspellOnFile(fileName string, r *misspell.Replacer, lintCtx *linter.Context) ([]result.Issue, error) { + var res []result.Issue + fileContent, err := lintCtx.FileCache.GetFileBytes(fileName) + if err != nil { + return nil, fmt.Errorf("can't get file %s contents: %s", fileName, err) + } + + // use r.Replace, not r.ReplaceGo because r.ReplaceGo doesn't find + // issues inside strings: it searches only inside comments. r.Replace + // searches all words: it treats input as a plain text. A standalone misspell + // tool uses r.Replace by default. + _, diffs := r.Replace(string(fileContent)) + for _, diff := range diffs { + text := fmt.Sprintf("`%s` is a misspelling of `%s`", diff.Original, diff.Corrected) + pos := token.Position{ + Filename: fileName, + Line: diff.Line, + Column: diff.Column + 1, + } + replacement := &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: diff.Column, + Length: len(diff.Original), + NewString: diff.Corrected, + }, + } + + res = append(res, result.Issue{ + Pos: pos, + Text: text, + FromLinter: misspellName, + Replacement: replacement, + }) + } + + return res, nil +} + +const misspellName = "misspell" + +func NewMisspell() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + var ruleErr error + + analyzer := &analysis.Analyzer{ + Name: misspellName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + misspellName, + "Finds commonly misspelled English words in comments", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + r := misspell.Replacer{ + Replacements: misspell.DictMain, + } + + // Figure out regional variations + settings := lintCtx.Settings().Misspell + locale := settings.Locale + switch strings.ToUpper(locale) { + case "": + // nothing + case "US": + r.AddRuleList(misspell.DictAmerican) + case "UK", "GB": + r.AddRuleList(misspell.DictBritish) + case "NZ", "AU", "CA": + ruleErr = fmt.Errorf("unknown locale: %q", locale) + } + + if ruleErr == nil { + if len(settings.IgnoreWords) != 0 { + r.RemoveRule(settings.IgnoreWords) + } + + r.Compile() + } + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + if ruleErr != nil { + return nil, ruleErr + } + + var fileNames []string + for _, f := range pass.Files { + pos := pass.Fset.PositionFor(f.Pos(), false) + fileNames = append(fileNames, pos.Filename) + } + + var res []goanalysis.Issue + for _, f := range fileNames { + issues, err := runMisspellOnFile(f, &r, lintCtx) + if err != nil { + return nil, err + } + for i := range issues { + res = append(res, goanalysis.NewIssue(&issues[i], pass)) + } + } + if len(res) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go new file mode 100644 index 000000000..86735a51a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go @@ -0,0 +1,123 @@ +package golinters + +import ( + "fmt" + "go/ast" + "go/token" + "sync" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +type nakedretVisitor struct { + maxLength int + f *token.FileSet + issues []result.Issue +} + +func (v *nakedretVisitor) processFuncDecl(funcDecl *ast.FuncDecl) { + file := v.f.File(funcDecl.Pos()) + functionLineLength := file.Position(funcDecl.End()).Line - file.Position(funcDecl.Pos()).Line + + // Scan the body for usage of the named returns + for _, stmt := range funcDecl.Body.List { + s, ok := stmt.(*ast.ReturnStmt) + if !ok { + continue + } + + if len(s.Results) != 0 { + continue + } + + file := v.f.File(s.Pos()) + if file == nil || functionLineLength <= v.maxLength { + continue + } + if funcDecl.Name == nil { + continue + } + + v.issues = append(v.issues, result.Issue{ + FromLinter: nakedretName, + Text: fmt.Sprintf("naked return in func `%s` with %d lines of code", + funcDecl.Name.Name, functionLineLength), + Pos: v.f.Position(s.Pos()), + }) + } +} + +func (v *nakedretVisitor) Visit(node ast.Node) ast.Visitor { + funcDecl, ok := node.(*ast.FuncDecl) + if !ok { + return v + } + + var namedReturns []*ast.Ident + + // We've found a function + if funcDecl.Type != nil && funcDecl.Type.Results != nil { + for _, field := range funcDecl.Type.Results.List { + for _, ident := range field.Names { + if ident != nil { + namedReturns = append(namedReturns, ident) + } + } + } + } + + if len(namedReturns) == 0 || funcDecl.Body == nil { + return v + } + + v.processFuncDecl(funcDecl) + return v +} + +const nakedretName = "nakedret" + +func NewNakedret() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: nakedretName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + nakedretName, + "Finds naked returns in functions greater than a specified function length", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var res []goanalysis.Issue + for _, file := range pass.Files { + v := nakedretVisitor{ + maxLength: lintCtx.Settings().Nakedret.MaxFuncLines, + f: pass.Fset, + } + ast.Walk(&v, file) + for i := range v.issues { + res = append(res, goanalysis.NewIssue(&v.issues[i], pass)) + } + } + + if len(res) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go new file mode 100644 index 000000000..0998a8ce2 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go @@ -0,0 +1,65 @@ +package golinters + +import ( + "sort" + "sync" + + "github.com/nakabonne/nestif" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const nestifName = "nestif" + +func NewNestif() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: goanalysis.TheOnlyAnalyzerName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + nestifName, + "Reports deeply nested if statements", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + checker := &nestif.Checker{ + MinComplexity: lintCtx.Settings().Nestif.MinComplexity, + } + var issues []nestif.Issue + for _, f := range pass.Files { + issues = append(issues, checker.Check(f, pass.Fset)...) + } + if len(issues) == 0 { + return nil, nil + } + + sort.SliceStable(issues, func(i, j int) bool { + return issues[i].Complexity > issues[j].Complexity + }) + + res := make([]goanalysis.Issue, 0, len(issues)) + for _, i := range issues { + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: i.Message, + FromLinter: nestifName, + }, pass)) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go new file mode 100644 index 000000000..d8a9a613e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go @@ -0,0 +1,18 @@ +package golinters + +import ( + "github.com/gostaticanalysis/nilerr" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewNilErr() *goanalysis.Linter { + a := nilerr.Analyzer + return goanalysis.NewLinter( + a.Name, + "Finds the code that returns nil even if it checks that the error is not nil.", + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go new file mode 100644 index 000000000..3b661c64c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/ssgreg/nlreturn/v2/pkg/nlreturn" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewNLReturn() *goanalysis.Linter { + return goanalysis.NewLinter( + "nlreturn", + "nlreturn checks for a new line before return and branch statements to increase code clarity", + []*analysis.Analyzer{ + nlreturn.NewAnalyzer(), + }, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go new file mode 100644 index 000000000..b5c4a4be2 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go @@ -0,0 +1,21 @@ +package golinters + +import ( + "github.com/sonatard/noctx" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewNoctx() *goanalysis.Linter { + analyzers := []*analysis.Analyzer{ + noctx.Analyzer, + } + + return goanalysis.NewLinter( + "noctx", + "noctx finds sending http request without context.Context", + analyzers, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go new file mode 100644 index 000000000..889cff864 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go @@ -0,0 +1,93 @@ +package golinters + +import ( + "fmt" + "go/ast" + "sync" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/golinters/nolintlint" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const NolintlintName = "nolintlint" + +func NewNoLintLint() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: NolintlintName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + NolintlintName, + "Reports ill-formed or insufficient nolint directives", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var needs nolintlint.Needs + settings := lintCtx.Settings().NoLintLint + if settings.RequireExplanation { + needs |= nolintlint.NeedsExplanation + } + if !settings.AllowLeadingSpace { + needs |= nolintlint.NeedsMachineOnly + } + if settings.RequireSpecific { + needs |= nolintlint.NeedsSpecific + } + if !settings.AllowUnused { + needs |= nolintlint.NeedsUnused + } + + lnt, err := nolintlint.NewLinter(needs, settings.AllowNoExplanation) + if err != nil { + return nil, err + } + + nodes := make([]ast.Node, 0, len(pass.Files)) + for _, n := range pass.Files { + nodes = append(nodes, n) + } + issues, err := lnt.Run(pass.Fset, nodes...) + if err != nil { + return nil, fmt.Errorf("linter failed to run: %s", err) + } + var res []goanalysis.Issue + for _, i := range issues { + expectNoLint := false + var expectedNolintLinter string + if ii, ok := i.(nolintlint.UnusedCandidate); ok { + expectedNolintLinter = ii.ExpectedLinter + expectNoLint = true + } + issue := &result.Issue{ + FromLinter: NolintlintName, + Text: i.Details(), + Pos: i.Position(), + ExpectNoLint: expectNoLint, + ExpectedNoLintLinter: expectedNolintLinter, + Replacement: i.Replacement(), + } + res = append(res, goanalysis.NewIssue(issue, pass)) + } + + if len(res) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md new file mode 100644 index 000000000..3d440d5a5 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md @@ -0,0 +1,31 @@ +# nolintlint + +nolintlint is a Go static analysis tool to find ill-formed or insufficiently explained `// nolint` directives for golangci +(or any other linter, using th ) + +## Purpose + +To ensure that lint exceptions have explanations. Consider the case below: + +```Go +import "crypto/md5" //nolint + +func hash(data []byte) []byte { + return md5.New().Sum(data) //nolint +} +``` + +In the above case, nolint directives are present but the user has no idea why this is being done or which linter +is being suppressed (in this case, gosec recommends against use of md5). `nolintlint` can require that the code provide an explanation, which might look as follows: + +```Go +import "crypto/md5" //nolint:gosec // this is not used in a secure application + +func hash(data []byte) []byte { + return md5.New().Sum(data) //nolint:gosec // this result is not used in a secure application +} +``` + +`nolintlint` can also identify cases where you may have written `// nolint`. Finally `nolintlint`, can also enforce that you +use the machine-readable nolint directive format `//nolint` and that you mention what linter is being suppressed, as shown above when we write `//nolint:gosec`. + diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go new file mode 100644 index 000000000..4466cab41 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go @@ -0,0 +1,305 @@ +// Package nolintlint provides a linter to ensure that all //nolint directives are followed by explanations +package nolintlint + +import ( + "fmt" + "go/ast" + "go/token" + "regexp" + "strings" + "unicode" + + "github.com/golangci/golangci-lint/pkg/result" +) + +type BaseIssue struct { + fullDirective string + directiveWithOptionalLeadingSpace string + position token.Position + replacement *result.Replacement +} + +//nolint:gocritic // TODO must be change in the future. +func (b BaseIssue) Position() token.Position { + return b.position +} + +//nolint:gocritic // TODO must be change in the future. +func (b BaseIssue) Replacement() *result.Replacement { + return b.replacement +} + +type ExtraLeadingSpace struct { + BaseIssue +} + +//nolint:gocritic // TODO must be change in the future. +func (i ExtraLeadingSpace) Details() string { + return fmt.Sprintf("directive `%s` should not have more than one leading space", i.fullDirective) +} + +//nolint:gocritic // TODO must be change in the future. +func (i ExtraLeadingSpace) String() string { return toString(i) } + +type NotMachine struct { + BaseIssue +} + +//nolint:gocritic // TODO must be change in the future. +func (i NotMachine) Details() string { + expected := i.fullDirective[:2] + strings.TrimLeftFunc(i.fullDirective[2:], unicode.IsSpace) + return fmt.Sprintf("directive `%s` should be written without leading space as `%s`", + i.fullDirective, expected) +} + +//nolint:gocritic // TODO must be change in the future. +func (i NotMachine) String() string { return toString(i) } + +type NotSpecific struct { + BaseIssue +} + +//nolint:gocritic // TODO must be change in the future. +func (i NotSpecific) Details() string { + return fmt.Sprintf("directive `%s` should mention specific linter such as `%s:my-linter`", + i.fullDirective, i.directiveWithOptionalLeadingSpace) +} + +//nolint:gocritic // TODO must be change in the future. +func (i NotSpecific) String() string { return toString(i) } + +type ParseError struct { + BaseIssue +} + +//nolint:gocritic // TODO must be change in the future. +func (i ParseError) Details() string { + return fmt.Sprintf("directive `%s` should match `%s[:] [// ]`", + i.fullDirective, + i.directiveWithOptionalLeadingSpace) +} + +//nolint:gocritic // TODO must be change in the future. +func (i ParseError) String() string { return toString(i) } + +type NoExplanation struct { + BaseIssue + fullDirectiveWithoutExplanation string +} + +//nolint:gocritic // TODO must be change in the future. +func (i NoExplanation) Details() string { + return fmt.Sprintf("directive `%s` should provide explanation such as `%s // this is why`", + i.fullDirective, i.fullDirectiveWithoutExplanation) +} + +//nolint:gocritic // TODO must be change in the future. +func (i NoExplanation) String() string { return toString(i) } + +type UnusedCandidate struct { + BaseIssue + ExpectedLinter string +} + +//nolint:gocritic // TODO must be change in the future. +func (i UnusedCandidate) Details() string { + details := fmt.Sprintf("directive `%s` is unused", i.fullDirective) + if i.ExpectedLinter != "" { + details += fmt.Sprintf(" for linter %q", i.ExpectedLinter) + } + return details +} + +//nolint:gocritic // TODO must be change in the future. +func (i UnusedCandidate) String() string { return toString(i) } + +func toString(i Issue) string { + return fmt.Sprintf("%s at %s", i.Details(), i.Position()) +} + +type Issue interface { + Details() string + Position() token.Position + String() string + Replacement() *result.Replacement +} + +type Needs uint + +const ( + NeedsMachineOnly Needs = 1 << iota + NeedsSpecific + NeedsExplanation + NeedsUnused + NeedsAll = NeedsMachineOnly | NeedsSpecific | NeedsExplanation +) + +var commentPattern = regexp.MustCompile(`^//\s*(nolint)(:\s*[\w-]+\s*(?:,\s*[\w-]+\s*)*)?\b`) + +// matches a complete nolint directive +var fullDirectivePattern = regexp.MustCompile(`^//\s*nolint(?::(\s*[\w-]+\s*(?:,\s*[\w-]+\s*)*))?\s*(//.*)?\s*\n?$`) + +type Linter struct { + needs Needs // indicates which linter checks to perform + excludeByLinter map[string]bool +} + +// NewLinter creates a linter that enforces that the provided directives fulfill the provided requirements +func NewLinter(needs Needs, excludes []string) (*Linter, error) { + excludeByName := make(map[string]bool) + for _, e := range excludes { + excludeByName[e] = true + } + + return &Linter{ + needs: needs, + excludeByLinter: excludeByName, + }, nil +} + +var leadingSpacePattern = regexp.MustCompile(`^//(\s*)`) +var trailingBlankExplanation = regexp.MustCompile(`\s*(//\s*)?$`) + +//nolint:funlen,gocyclo +func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { + var issues []Issue + + for _, node := range nodes { + file, ok := node.(*ast.File) + if !ok { + continue + } + + for _, c := range file.Comments { + for _, comment := range c.List { + if !commentPattern.MatchString(comment.Text) { + continue + } + + // check for a space between the "//" and the directive + leadingSpaceMatches := leadingSpacePattern.FindStringSubmatch(comment.Text) + + var leadingSpace string + if len(leadingSpaceMatches) > 0 { + leadingSpace = leadingSpaceMatches[1] + } + + directiveWithOptionalLeadingSpace := comment.Text + if len(leadingSpace) > 0 { + split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], "//") + directiveWithOptionalLeadingSpace = "// " + strings.TrimSpace(split[1]) + } + + pos := fset.Position(comment.Pos()) + end := fset.Position(comment.End()) + + base := BaseIssue{ + fullDirective: comment.Text, + directiveWithOptionalLeadingSpace: directiveWithOptionalLeadingSpace, + position: pos, + } + + // check for, report and eliminate leading spaces so we can check for other issues + if len(leadingSpace) > 0 { + removeWhitespace := &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: pos.Column + 1, + Length: len(leadingSpace), + NewString: "", + }, + } + if (l.needs & NeedsMachineOnly) != 0 { + issue := NotMachine{BaseIssue: base} + issue.BaseIssue.replacement = removeWhitespace + issues = append(issues, issue) + } else if len(leadingSpace) > 1 { + issue := ExtraLeadingSpace{BaseIssue: base} + issue.BaseIssue.replacement = removeWhitespace + issue.BaseIssue.replacement.Inline.NewString = " " // assume a single space was intended + issues = append(issues, issue) + } + } + + fullMatches := fullDirectivePattern.FindStringSubmatch(comment.Text) + if len(fullMatches) == 0 { + issues = append(issues, ParseError{BaseIssue: base}) + continue + } + + lintersText, explanation := fullMatches[1], fullMatches[2] + var linters []string + if len(lintersText) > 0 { + lls := strings.Split(lintersText, ",") + linters = make([]string, 0, len(lls)) + rangeStart := (pos.Column - 1) + len("//") + len(leadingSpace) + len("nolint:") + for i, ll := range lls { + rangeEnd := rangeStart + len(ll) + if i < len(lls)-1 { + rangeEnd++ // include trailing comma + } + trimmedLinterName := strings.TrimSpace(ll) + if trimmedLinterName != "" { + linters = append(linters, trimmedLinterName) + } + rangeStart = rangeEnd + } + } + + if (l.needs & NeedsSpecific) != 0 { + if len(linters) == 0 { + issues = append(issues, NotSpecific{BaseIssue: base}) + } + } + + // when detecting unused directives, we send all the directives through and filter them out in the nolint processor + if (l.needs & NeedsUnused) != 0 { + removeNolintCompletely := &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: pos.Column - 1, + Length: end.Column - pos.Column, + NewString: "", + }, + } + + if len(linters) == 0 { + issue := UnusedCandidate{BaseIssue: base} + issue.replacement = removeNolintCompletely + issues = append(issues, issue) + } else { + for _, linter := range linters { + issue := UnusedCandidate{BaseIssue: base, ExpectedLinter: linter} + // only offer replacement if there is a single linter + // because of issues around commas and the possibility of all + // linters being removed + if len(linters) == 1 { + issue.replacement = removeNolintCompletely + } + issues = append(issues, issue) + } + } + } + + if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == "//") { + needsExplanation := len(linters) == 0 // if no linters are mentioned, we must have explanation + // otherwise, check if we are excluding all of the mentioned linters + for _, ll := range linters { + if !l.excludeByLinter[ll] { // if a linter does require explanation + needsExplanation = true + break + } + } + + if needsExplanation { + fullDirectiveWithoutExplanation := trailingBlankExplanation.ReplaceAllString(comment.Text, "") + issues = append(issues, NoExplanation{ + BaseIssue: base, + fullDirectiveWithoutExplanation: fullDirectiveWithoutExplanation, + }) + } + } + } + } + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go new file mode 100644 index 000000000..3b784baf5 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go @@ -0,0 +1,21 @@ +package golinters + +import ( + "github.com/kunwardeep/paralleltest/pkg/paralleltest" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewParallelTest() *goanalysis.Linter { + analyzers := []*analysis.Analyzer{ + paralleltest.NewAnalyzer(), + } + + return goanalysis.NewLinter( + "paralleltest", + "paralleltest detects missing usage of t.Parallel() method in your Go test", + analyzers, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go new file mode 100644 index 000000000..3d06cf147 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go @@ -0,0 +1,57 @@ +package golinters + +import ( + "fmt" + "sync" + + "github.com/alexkohler/prealloc/pkg" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const preallocName = "prealloc" + +func NewPrealloc() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: preallocName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + preallocName, + "Finds slice declarations that could potentially be preallocated", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + s := &lintCtx.Settings().Prealloc + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var res []goanalysis.Issue + hints := pkg.Check(pass.Files, s.Simple, s.RangeLoops, s.ForLoops) + for _, hint := range hints { + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: pass.Fset.Position(hint.Pos), + Text: fmt.Sprintf("Consider preallocating %s", formatCode(hint.DeclaredSliceName, lintCtx.Cfg)), + FromLinter: preallocName, + }, pass)) + } + + if len(res) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go new file mode 100644 index 000000000..caccd4823 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go @@ -0,0 +1,26 @@ +package golinters + +import ( + "github.com/nishanths/predeclared/passes/predeclared" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewPredeclared(settings *config.PredeclaredSettings) *goanalysis.Linter { + a := predeclared.Analyzer + + var cfg map[string]map[string]interface{} + if settings != nil { + cfg = map[string]map[string]interface{}{ + a.Name: { + predeclared.IgnoreFlag: settings.Ignore, + predeclared.QualifiedFlag: settings.Qualified, + }, + } + } + + return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, cfg). + WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go new file mode 100644 index 000000000..4fba3d274 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go @@ -0,0 +1,63 @@ +package golinters + +import ( + "fmt" + "sync" + + "github.com/yeya24/promlinter" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewPromlinter() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + const linterName = "promlinter" + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + linterName, + "Check Prometheus metrics naming via promlint", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + strict := lintCtx.Cfg.LintersSettings.Promlinter.Strict + disabledLinters := lintCtx.Cfg.LintersSettings.Promlinter.DisabledLinters + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + issues := promlinter.RunLint(pass.Fset, pass.Files, promlinter.Setting{ + Strict: strict, + DisabledLintFuncs: disabledLinters, + }) + + if len(issues) == 0 { + return nil, nil + } + + res := make([]goanalysis.Issue, len(issues)) + for k, i := range issues { + issue := result.Issue{ + Pos: i.Pos, + Text: fmt.Sprintf("Metric: %s Error: %s", i.Metric, i.Text), + FromLinter: linterName, + } + + res[k] = goanalysis.NewIssue(&issue, pass) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go new file mode 100644 index 000000000..590332e66 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go @@ -0,0 +1,295 @@ +package golinters + +import ( + "bytes" + "encoding/json" + "fmt" + "go/token" + "io/ioutil" + "reflect" + + "github.com/BurntSushi/toml" + "github.com/mgechev/dots" + reviveConfig "github.com/mgechev/revive/config" + "github.com/mgechev/revive/lint" + "github.com/mgechev/revive/rule" + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +const reviveName = "revive" + +var reviveDebugf = logutils.Debug("revive") + +// jsonObject defines a JSON object of a failure +type jsonObject struct { + Severity lint.Severity + lint.Failure `json:",inline"` +} + +// NewRevive returns a new Revive linter. +func NewRevive(cfg *config.ReviveSettings) *goanalysis.Linter { + var issues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: goanalysis.TheOnlyAnalyzerName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + + return goanalysis.NewLinter( + reviveName, + "Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint.", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var files []string + + for _, file := range pass.Files { + files = append(files, pass.Fset.PositionFor(file.Pos(), false).Filename) + } + + conf, err := getReviveConfig(cfg) + if err != nil { + return nil, err + } + + formatter, err := reviveConfig.GetFormatter("json") + if err != nil { + return nil, err + } + + revive := lint.New(ioutil.ReadFile) + + lintingRules, err := reviveConfig.GetLintingRules(conf) + if err != nil { + return nil, err + } + + packages, err := dots.ResolvePackages(files, []string{}) + if err != nil { + return nil, err + } + + failures, err := revive.Lint(packages, lintingRules, *conf) + if err != nil { + return nil, err + } + + formatChan := make(chan lint.Failure) + exitChan := make(chan bool) + + var output string + go func() { + output, err = formatter.Format(formatChan, *conf) + if err != nil { + lintCtx.Log.Errorf("Format error: %v", err) + } + exitChan <- true + }() + + for f := range failures { + if f.Confidence < conf.Confidence { + continue + } + + formatChan <- f + } + + close(formatChan) + <-exitChan + + var results []jsonObject + err = json.Unmarshal([]byte(output), &results) + if err != nil { + return nil, err + } + + for i := range results { + issues = append(issues, reviveToIssue(pass, &results[i])) + } + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return issues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func reviveToIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { + lineRangeTo := object.Position.End.Line + if object.RuleName == (&rule.ExportedRule{}).Name() { + lineRangeTo = object.Position.Start.Line + } + + return goanalysis.NewIssue(&result.Issue{ + Severity: string(object.Severity), + Text: fmt.Sprintf("%s: %s", object.RuleName, object.Failure.Failure), + Pos: token.Position{ + Filename: object.Position.Start.Filename, + Line: object.Position.Start.Line, + Offset: object.Position.Start.Offset, + Column: object.Position.Start.Column, + }, + LineRange: &result.Range{ + From: object.Position.Start.Line, + To: lineRangeTo, + }, + FromLinter: reviveName, + }, pass) +} + +// This function mimics the GetConfig function of revive. +// This allow to get default values and right types. +// https://github.com/golangci/golangci-lint/issues/1745 +// https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L155 +func getReviveConfig(cfg *config.ReviveSettings) (*lint.Config, error) { + conf := defaultConfig() + + if !reflect.DeepEqual(cfg, &config.ReviveSettings{}) { + rawRoot := createConfigMap(cfg) + buf := bytes.NewBuffer(nil) + + err := toml.NewEncoder(buf).Encode(rawRoot) + if err != nil { + return nil, errors.Wrap(err, "failed to encode configuration") + } + + conf = &lint.Config{} + _, err = toml.DecodeReader(buf, conf) + if err != nil { + return nil, errors.Wrap(err, "failed to decode configuration") + } + } + + normalizeConfig(conf) + + reviveDebugf("revive configuration: %#v", conf) + + return conf, nil +} + +func createConfigMap(cfg *config.ReviveSettings) map[string]interface{} { + rawRoot := map[string]interface{}{ + "ignoreGeneratedHeader": cfg.IgnoreGeneratedHeader, + "confidence": cfg.Confidence, + "severity": cfg.Severity, + "errorCode": cfg.ErrorCode, + "warningCode": cfg.WarningCode, + "enableAllRules": cfg.EnableAllRules, + } + + rawDirectives := map[string]map[string]interface{}{} + for _, directive := range cfg.Directives { + rawDirectives[directive.Name] = map[string]interface{}{ + "severity": directive.Severity, + } + } + + if len(rawDirectives) > 0 { + rawRoot["directive"] = rawDirectives + } + + rawRules := map[string]map[string]interface{}{} + for _, s := range cfg.Rules { + rawRules[s.Name] = map[string]interface{}{ + "severity": s.Severity, + "arguments": safeTomlSlice(s.Arguments), + "disabled": s.Disabled, + } + } + + if len(rawRules) > 0 { + rawRoot["rule"] = rawRules + } + + return rawRoot +} + +func safeTomlSlice(r []interface{}) []interface{} { + if len(r) == 0 { + return nil + } + + if _, ok := r[0].(map[interface{}]interface{}); !ok { + return r + } + + var typed []interface{} + for _, elt := range r { + item := map[string]interface{}{} + for k, v := range elt.(map[interface{}]interface{}) { + item[k.(string)] = v + } + + typed = append(typed, item) + } + + return typed +} + +// This element is not exported by revive, so we need copy the code. +// Extracted from https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L15 +var defaultRules = []lint.Rule{ + &rule.VarDeclarationsRule{}, + &rule.PackageCommentsRule{}, + &rule.DotImportsRule{}, + &rule.BlankImportsRule{}, + &rule.ExportedRule{}, + &rule.VarNamingRule{}, + &rule.IndentErrorFlowRule{}, + &rule.RangeRule{}, + &rule.ErrorfRule{}, + &rule.ErrorNamingRule{}, + &rule.ErrorStringsRule{}, + &rule.ReceiverNamingRule{}, + &rule.IncrementDecrementRule{}, + &rule.ErrorReturnRule{}, + &rule.UnexportedReturnRule{}, + &rule.TimeNamingRule{}, + &rule.ContextKeysType{}, + &rule.ContextAsArgumentRule{}, +} + +// This element is not exported by revive, so we need copy the code. +// Extracted from https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L133 +func normalizeConfig(cfg *lint.Config) { + if cfg.Confidence == 0 { + cfg.Confidence = 0.8 + } + severity := cfg.Severity + if severity != "" { + for k, v := range cfg.Rules { + if v.Severity == "" { + v.Severity = severity + } + cfg.Rules[k] = v + } + for k, v := range cfg.Directives { + if v.Severity == "" { + v.Severity = severity + } + cfg.Directives[k] = v + } + } +} + +// This element is not exported by revive, so we need copy the code. +// Extracted from https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L182 +func defaultConfig() *lint.Config { + defaultConfig := lint.Config{ + Confidence: 0.0, + Severity: lint.SeverityWarning, + Rules: map[string]lint.RuleConfig{}, + } + for _, r := range defaultRules { + defaultConfig.Rules[r.Name()] = lint.RuleConfig{} + } + return &defaultConfig +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowerrcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowerrcheck.go new file mode 100644 index 000000000..d4c89d382 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowerrcheck.go @@ -0,0 +1,23 @@ +package golinters + +import ( + "github.com/jingyugao/rowserrcheck/passes/rowserr" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" +) + +func NewRowsErrCheck() *goanalysis.Linter { + analyzer := rowserr.NewAnalyzer() + return goanalysis.NewLinter( + "rowserrcheck", + "checks whether Err of rows is checked successfully", + []*analysis.Analyzer{analyzer}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo). + WithContextSetter(func(lintCtx *linter.Context) { + pkgs := lintCtx.Settings().RowsErrCheck.Packages + analyzer.Run = rowserr.NewRun(pkgs...) + }) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go new file mode 100644 index 000000000..ba3921e19 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go @@ -0,0 +1,177 @@ +package golinters + +import ( + "fmt" + "go/ast" + "go/token" + "sync" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const scopelintName = "scopelint" + +func NewScopelint() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: scopelintName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + scopelintName, + "Scopelint checks for unpinned variables in go programs", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var res []result.Issue + for _, file := range pass.Files { + n := Node{ + fset: pass.Fset, + DangerObjects: map[*ast.Object]int{}, + UnsafeObjects: map[*ast.Object]int{}, + SkipFuncs: map[*ast.FuncLit]int{}, + issues: &res, + } + ast.Walk(&n, file) + } + + if len(res) == 0 { + return nil, nil + } + + mu.Lock() + for i := range res { + resIssues = append(resIssues, goanalysis.NewIssue(&res[i], pass)) + } + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +// The code below is copy-pasted from https://github.com/kyoh86/scopelint 92cbe2cc9276abda0e309f52cc9e309d407f174e + +// Node represents a Node being linted. +type Node struct { + fset *token.FileSet + DangerObjects map[*ast.Object]int + UnsafeObjects map[*ast.Object]int + SkipFuncs map[*ast.FuncLit]int + issues *[]result.Issue +} + +// Visit method is invoked for each node encountered by Walk. +// If the result visitor w is not nil, Walk visits each of the children +// of node with the visitor w, followed by a call of w.Visit(nil). +//nolint:gocyclo,gocritic +func (f *Node) Visit(node ast.Node) ast.Visitor { + switch typedNode := node.(type) { + case *ast.ForStmt: + switch init := typedNode.Init.(type) { + case *ast.AssignStmt: + for _, lh := range init.Lhs { + switch tlh := lh.(type) { + case *ast.Ident: + f.UnsafeObjects[tlh.Obj] = 0 + } + } + } + + case *ast.RangeStmt: + // Memory variables declared in range statement + switch k := typedNode.Key.(type) { + case *ast.Ident: + f.UnsafeObjects[k.Obj] = 0 + } + switch v := typedNode.Value.(type) { + case *ast.Ident: + f.UnsafeObjects[v.Obj] = 0 + } + + case *ast.UnaryExpr: + if typedNode.Op == token.AND { + switch ident := typedNode.X.(type) { + case *ast.Ident: + if _, unsafe := f.UnsafeObjects[ident.Obj]; unsafe { + f.errorf(ident, "Using a reference for the variable on range scope %s", formatCode(ident.Name, nil)) + } + } + } + + case *ast.Ident: + if _, obj := f.DangerObjects[typedNode.Obj]; obj { + // It is the naked variable in scope of range statement. + f.errorf(node, "Using the variable on range scope %s in function literal", formatCode(typedNode.Name, nil)) + break + } + + case *ast.CallExpr: + // Ignore func literals that'll be called immediately. + switch funcLit := typedNode.Fun.(type) { + case *ast.FuncLit: + f.SkipFuncs[funcLit] = 0 + } + + case *ast.FuncLit: + if _, skip := f.SkipFuncs[typedNode]; !skip { + dangers := map[*ast.Object]int{} + for d := range f.DangerObjects { + dangers[d] = 0 + } + for u := range f.UnsafeObjects { + dangers[u] = 0 + f.UnsafeObjects[u]++ + } + return &Node{ + fset: f.fset, + DangerObjects: dangers, + UnsafeObjects: f.UnsafeObjects, + SkipFuncs: f.SkipFuncs, + issues: f.issues, + } + } + + case *ast.ReturnStmt: + unsafe := map[*ast.Object]int{} + for u := range f.UnsafeObjects { + if f.UnsafeObjects[u] == 0 { + continue + } + unsafe[u] = f.UnsafeObjects[u] + } + return &Node{ + fset: f.fset, + DangerObjects: f.DangerObjects, + UnsafeObjects: unsafe, + SkipFuncs: f.SkipFuncs, + issues: f.issues, + } + } + return f +} + +// The variadic arguments may start with link and category types, +// and must end with a format string and any arguments. +//nolint:interfacer +func (f *Node) errorf(n ast.Node, format string, args ...interface{}) { + pos := f.fset.Position(n.Pos()) + f.errorAtf(pos, format, args...) +} + +func (f *Node) errorAtf(pos token.Position, format string, args ...interface{}) { + *f.issues = append(*f.issues, result.Issue{ + Pos: pos, + Text: fmt.Sprintf(format, args...), + FromLinter: scopelintName, + }) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go new file mode 100644 index 000000000..48ca246e7 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go @@ -0,0 +1,21 @@ +package golinters + +import ( + "github.com/ryanrolds/sqlclosecheck/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewSQLCloseCheck() *goanalysis.Linter { + analyzers := []*analysis.Analyzer{ + analyzer.NewAnalyzer(), + } + + return goanalysis.NewLinter( + "sqlclosecheck", + "Checks that sql.Rows and sql.Stmt are closed.", + analyzers, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go new file mode 100644 index 000000000..2226eabb4 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go @@ -0,0 +1,21 @@ +package golinters + +import ( + "honnef.co/go/tools/staticcheck" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewStaticcheck(settings *config.StaticCheckSettings) *goanalysis.Linter { + cfg := staticCheckConfig(settings) + + analyzers := setupStaticCheckAnalyzers(staticcheck.Analyzers, getGoVersion(settings), cfg.Checks) + + return goanalysis.NewLinter( + "staticcheck", + "Staticcheck is a go vet on steroids, applying a ton of static analysis checks", + analyzers, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go new file mode 100644 index 000000000..dc6360d7e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go @@ -0,0 +1,168 @@ +package golinters + +import ( + "strings" + "unicode" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/analysis/lint" + scconfig "honnef.co/go/tools/config" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +var debugf = logutils.Debug("megacheck") + +func getGoVersion(settings *config.StaticCheckSettings) string { + var goVersion string + if settings != nil { + goVersion = settings.GoVersion + } + + if goVersion != "" { + return goVersion + } + + // TODO: uses "1.13" for backward compatibility, but in the future (v2) must be set by using build.Default.ReleaseTags like staticcheck. + return "1.13" +} + +func setupStaticCheckAnalyzers(src []*lint.Analyzer, goVersion string, checks []string) []*analysis.Analyzer { + var names []string + for _, a := range src { + names = append(names, a.Analyzer.Name) + } + + filter := filterAnalyzerNames(names, checks) + + var ret []*analysis.Analyzer + for _, a := range src { + if filter[a.Analyzer.Name] { + setAnalyzerGoVersion(a.Analyzer, goVersion) + ret = append(ret, a.Analyzer) + } + } + + return ret +} + +func setAnalyzerGoVersion(a *analysis.Analyzer, goVersion string) { + if v := a.Flags.Lookup("go"); v != nil { + if err := v.Value.Set(goVersion); err != nil { + debugf("Failed to set go version: %s", err) + } + } +} + +func staticCheckConfig(settings *config.StaticCheckSettings) *scconfig.Config { + var cfg *scconfig.Config + + if settings == nil || !settings.HasConfiguration() { + return &scconfig.Config{ + Checks: []string{"*"}, // override for compatibility reason. Must drop in the next major version. + Initialisms: scconfig.DefaultConfig.Initialisms, + DotImportWhitelist: scconfig.DefaultConfig.DotImportWhitelist, + HTTPStatusCodeWhitelist: scconfig.DefaultConfig.HTTPStatusCodeWhitelist, + } + } + + cfg = &scconfig.Config{ + Checks: settings.Checks, + Initialisms: settings.Initialisms, + DotImportWhitelist: settings.DotImportWhitelist, + HTTPStatusCodeWhitelist: settings.HTTPStatusCodeWhitelist, + } + + if len(cfg.Checks) == 0 { + cfg.Checks = append(cfg.Checks, "*") // override for compatibility reason. Must drop in the next major version. + } + + if len(cfg.Initialisms) == 0 { + cfg.Initialisms = append(cfg.Initialisms, scconfig.DefaultConfig.Initialisms...) + } + + if len(cfg.DotImportWhitelist) == 0 { + cfg.DotImportWhitelist = append(cfg.DotImportWhitelist, scconfig.DefaultConfig.DotImportWhitelist...) + } + + if len(cfg.HTTPStatusCodeWhitelist) == 0 { + cfg.HTTPStatusCodeWhitelist = append(cfg.HTTPStatusCodeWhitelist, scconfig.DefaultConfig.HTTPStatusCodeWhitelist...) + } + + cfg.Checks = normalizeList(cfg.Checks) + cfg.Initialisms = normalizeList(cfg.Initialisms) + cfg.DotImportWhitelist = normalizeList(cfg.DotImportWhitelist) + cfg.HTTPStatusCodeWhitelist = normalizeList(cfg.HTTPStatusCodeWhitelist) + + return cfg +} + +// https://github.com/dominikh/go-tools/blob/9bf17c0388a65710524ba04c2d821469e639fdc2/lintcmd/lint.go#L437-L477 +// nolint // Keep the original source code. +func filterAnalyzerNames(analyzers []string, checks []string) map[string]bool { + allowedChecks := map[string]bool{} + + for _, check := range checks { + b := true + if len(check) > 1 && check[0] == '-' { + b = false + check = check[1:] + } + + if check == "*" || check == "all" { + // Match all + for _, c := range analyzers { + allowedChecks[c] = b + } + } else if strings.HasSuffix(check, "*") { + // Glob + prefix := check[:len(check)-1] + isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1 + + for _, a := range analyzers { + idx := strings.IndexFunc(a, func(r rune) bool { return unicode.IsNumber(r) }) + if isCat { + // Glob is S*, which should match S1000 but not SA1000 + cat := a[:idx] + if prefix == cat { + allowedChecks[a] = b + } + } else { + // Glob is S1* + if strings.HasPrefix(a, prefix) { + allowedChecks[a] = b + } + } + } + } else { + // Literal check name + allowedChecks[check] = b + } + } + return allowedChecks +} + +// https://github.com/dominikh/go-tools/blob/9bf17c0388a65710524ba04c2d821469e639fdc2/config/config.go#L95-L116 +func normalizeList(list []string) []string { + if len(list) > 1 { + nlist := make([]string, 0, len(list)) + nlist = append(nlist, list[0]) + for i, el := range list[1:] { + if el != list[i] { + nlist = append(nlist, el) + } + } + list = nlist + } + + for _, el := range list { + if el == "inherit" { + // This should never happen, because the default config + // should not use "inherit" + panic(`unresolved "inherit"`) + } + } + + return list +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go new file mode 100644 index 000000000..7c16f8ec3 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go @@ -0,0 +1,55 @@ +package golinters // nolint:dupl + +import ( + "fmt" + "sync" + + structcheckAPI "github.com/golangci/check/cmd/structcheck" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewStructcheck() *goanalysis.Linter { + const linterName = "structcheck" + var mu sync.Mutex + var res []goanalysis.Issue + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + linterName, + "Finds unused struct fields", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + checkExported := lintCtx.Settings().Structcheck.CheckExportedFields + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + prog := goanalysis.MakeFakeLoaderProgram(pass) + + structcheckIssues := structcheckAPI.Run(prog, checkExported) + if len(structcheckIssues) == 0 { + return nil, nil + } + + issues := make([]goanalysis.Issue, 0, len(structcheckIssues)) + for _, i := range structcheckIssues { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: fmt.Sprintf("%s is unused", formatCode(i.FieldName, lintCtx.Cfg)), + FromLinter: linterName, + }, pass)) + } + + mu.Lock() + res = append(res, issues...) + mu.Unlock() + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return res + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go new file mode 100644 index 000000000..899f6ff58 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "golang.org/x/tools/go/analysis" + scconfig "honnef.co/go/tools/config" + "honnef.co/go/tools/stylecheck" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewStylecheck(settings *config.StaticCheckSettings) *goanalysis.Linter { + cfg := staticCheckConfig(settings) + + // `scconfig.Analyzer` is a singleton, then it's not possible to have more than one instance for all staticcheck "sub-linters". + // When we will merge the 4 "sub-linters", the problem will disappear: https://github.com/golangci/golangci-lint/issues/357 + // Currently only stylecheck analyzer has a configuration in staticcheck. + scconfig.Analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + return cfg, nil + } + + analyzers := setupStaticCheckAnalyzers(stylecheck.Analyzers, getGoVersion(settings), cfg.Checks) + + return goanalysis.NewLinter( + "stylecheck", + "Stylecheck is a replacement for golint", + analyzers, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go new file mode 100644 index 000000000..5f58fc1d3 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "github.com/ldez/tagliatelle" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewTagliatelle(settings *config.TagliatelleSettings) *goanalysis.Linter { + cfg := tagliatelle.Config{ + Rules: map[string]string{ + "json": "camel", + "yaml": "camel", + }, + } + + if settings != nil { + for k, v := range settings.Case.Rules { + cfg.Rules[k] = v + } + cfg.UseFieldName = settings.Case.UseFieldName + } + + a := tagliatelle.New(cfg) + + return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). + WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go new file mode 100644 index 000000000..1248e78fd --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go @@ -0,0 +1,23 @@ +package golinters + +import ( + "github.com/maratori/testpackage/pkg/testpackage" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewTestpackage(cfg *config.TestpackageSettings) *goanalysis.Linter { + var a = testpackage.NewAnalyzer() + var settings map[string]map[string]interface{} + if cfg != nil { + settings = map[string]map[string]interface{}{ + a.Name: { + testpackage.SkipRegexpFlagName: cfg.SkipRegexp, + }, + } + } + return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, settings). + WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go new file mode 100644 index 000000000..1d92f2fbf --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go @@ -0,0 +1,61 @@ +package golinters + +import ( + "strings" + + "github.com/kulti/thelper/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewThelper(cfg *config.ThelperSettings) *goanalysis.Linter { + a := analyzer.NewAnalyzer() + + cfgMap := map[string]map[string]interface{}{} + if cfg != nil { + var opts []string + + if cfg.Test.Name { + opts = append(opts, "t_name") + } + if cfg.Test.Begin { + opts = append(opts, "t_begin") + } + if cfg.Test.First { + opts = append(opts, "t_first") + } + + if cfg.Benchmark.Name { + opts = append(opts, "b_name") + } + if cfg.Benchmark.Begin { + opts = append(opts, "b_begin") + } + if cfg.Benchmark.First { + opts = append(opts, "b_first") + } + + if cfg.TB.Name { + opts = append(opts, "tb_name") + } + if cfg.TB.Begin { + opts = append(opts, "tb_begin") + } + if cfg.TB.First { + opts = append(opts, "tb_first") + } + + cfgMap[a.Name] = map[string]interface{}{ + "checks": strings.Join(opts, ","), + } + } + + return goanalysis.NewLinter( + "thelper", + "thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers", + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go new file mode 100644 index 000000000..a4b96eb73 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go @@ -0,0 +1,21 @@ +package golinters + +import ( + "github.com/moricho/tparallel" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewTparallel() *goanalysis.Linter { + analyzers := []*analysis.Analyzer{ + tparallel.Analyzer, + } + + return goanalysis.NewLinter( + "tparallel", + "tparallel detects inappropriate usage of t.Parallel() method in your Go test codes", + analyzers, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go new file mode 100644 index 000000000..24f4339fb --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go @@ -0,0 +1,28 @@ +package golinters + +import ( + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewTypecheck() *goanalysis.Linter { + const linterName = "typecheck" + + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (interface{}, error) { + return nil, nil + }, + } + + linter := goanalysis.NewLinter( + linterName, + "Like the front-end of a Go compiler, parses and type-checks Go code", + []*analysis.Analyzer{analyzer}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) + + return linter +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go new file mode 100644 index 000000000..456f6836c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go @@ -0,0 +1,53 @@ +package golinters + +import ( + "sync" + + unconvertAPI "github.com/golangci/unconvert" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewUnconvert() *goanalysis.Linter { + const linterName = "unconvert" + var mu sync.Mutex + var res []goanalysis.Issue + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + linterName, + "Remove unnecessary type conversions", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + prog := goanalysis.MakeFakeLoaderProgram(pass) + + positions := unconvertAPI.Run(prog) + if len(positions) == 0 { + return nil, nil + } + + issues := make([]goanalysis.Issue, 0, len(positions)) + for _, pos := range positions { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: pos, + Text: "unnecessary conversion", + FromLinter: linterName, + }, pass)) + } + + mu.Lock() + res = append(res, issues...) + mu.Unlock() + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return res + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go new file mode 100644 index 000000000..33dd55c9b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go @@ -0,0 +1,76 @@ +package golinters + +import ( + "sync" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/packages" + "mvdan.cc/unparam/check" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewUnparam() *goanalysis.Linter { + const linterName = "unparam" + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + } + return goanalysis.NewLinter( + linterName, + "Reports unused function parameters", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + us := &lintCtx.Settings().Unparam + if us.Algo != "cha" { + lintCtx.Log.Warnf("`linters-settings.unparam.algo` isn't supported by the newest `unparam`") + } + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + ssaPkg := ssa.Pkg + + pkg := &packages.Package{ + Fset: pass.Fset, + Syntax: pass.Files, + Types: pass.Pkg, + TypesInfo: pass.TypesInfo, + } + + c := &check.Checker{} + c.CheckExportedFuncs(us.CheckExported) + c.Packages([]*packages.Package{pkg}) + c.ProgramSSA(ssaPkg.Prog) + + unparamIssues, err := c.Check() + if err != nil { + return nil, err + } + + var res []goanalysis.Issue + for _, i := range unparamIssues { + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: pass.Fset.Position(i.Pos()), + Text: i.Message(), + FromLinter: linterName, + }, pass)) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go new file mode 100644 index 000000000..cfdf1f2ca --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go @@ -0,0 +1,69 @@ +package golinters + +import ( + "fmt" + "sync" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/unused" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +type UnusedSettings struct { + GoVersion string +} + +func NewUnused(settings *config.StaticCheckSettings) *goanalysis.Linter { + const name = "unused" + + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: name, + Doc: unused.Analyzer.Analyzer.Doc, + Requires: unused.Analyzer.Analyzer.Requires, + Run: func(pass *analysis.Pass) (interface{}, error) { + res, err := unused.Analyzer.Analyzer.Run(pass) + if err != nil { + return nil, err + } + + sr := unused.Serialize(pass, res.(unused.Result), pass.Fset) + + var issues []goanalysis.Issue + for _, object := range sr.Unused { + issue := goanalysis.NewIssue(&result.Issue{ + FromLinter: name, + Text: fmt.Sprintf("%s %s is unused", object.Kind, object.Name), + Pos: object.Position, + }, pass) + + issues = append(issues, issue) + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, + } + + setAnalyzerGoVersion(analyzer, getGoVersion(settings)) + + lnt := goanalysis.NewLinter( + name, + "Checks Go code for unused constants, variables, functions and types", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(lintCtx *linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) + + return lnt +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go new file mode 100644 index 000000000..1940f30e3 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go @@ -0,0 +1,24 @@ +package golinters + +import ( + "fmt" + "strings" + + "github.com/golangci/golangci-lint/pkg/config" +) + +func formatCode(code string, _ *config.Config) string { + if strings.Contains(code, "`") { + return code // TODO: properly escape or remove + } + + return fmt.Sprintf("`%s`", code) +} + +func formatCodeBlock(code string, _ *config.Config) string { + if strings.Contains(code, "`") { + return code // TODO: properly escape or remove + } + + return fmt.Sprintf("```\n%s\n```", code) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go new file mode 100644 index 000000000..dcf2e7de8 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go @@ -0,0 +1,55 @@ +package golinters // nolint:dupl + +import ( + "fmt" + "sync" + + varcheckAPI "github.com/golangci/check/cmd/varcheck" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewVarcheck() *goanalysis.Linter { + const linterName = "varcheck" + var mu sync.Mutex + var res []goanalysis.Issue + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + linterName, + "Finds unused global variables and constants", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + checkExported := lintCtx.Settings().Varcheck.CheckExportedFields + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + prog := goanalysis.MakeFakeLoaderProgram(pass) + + varcheckIssues := varcheckAPI.Run(prog, checkExported) + if len(varcheckIssues) == 0 { + return nil, nil + } + + issues := make([]goanalysis.Issue, 0, len(varcheckIssues)) + for _, i := range varcheckIssues { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: fmt.Sprintf("%s is unused", formatCode(i.VarName, lintCtx.Cfg)), + FromLinter: linterName, + }, pass)) + } + + mu.Lock() + res = append(res, issues...) + mu.Unlock() + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return res + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go new file mode 100644 index 000000000..d359fb019 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go @@ -0,0 +1,21 @@ +package golinters + +import ( + "github.com/sanposhiho/wastedassign/v2" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewWastedAssign() *goanalysis.Linter { + analyzers := []*analysis.Analyzer{ + wastedassign.Analyzer, + } + + return goanalysis.NewLinter( + "wastedassign", + "wastedassign finds wasted assignment statements.", + analyzers, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go new file mode 100644 index 000000000..d475465a2 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go @@ -0,0 +1,85 @@ +package golinters + +import ( + "go/token" + "sync" + + "github.com/pkg/errors" + "github.com/ultraware/whitespace" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewWhitespace() *goanalysis.Linter { + const linterName = "whitespace" + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + return goanalysis.NewLinter( + linterName, + "Tool for detection of leading and trailing whitespace", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + cfg := lintCtx.Cfg.LintersSettings.Whitespace + settings := whitespace.Settings{MultiIf: cfg.MultiIf, MultiFunc: cfg.MultiFunc} + + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var issues []whitespace.Message + for _, file := range pass.Files { + issues = append(issues, whitespace.Run(file, pass.Fset, settings)...) + } + + if len(issues) == 0 { + return nil, nil + } + + res := make([]goanalysis.Issue, len(issues)) + for k, i := range issues { + issue := result.Issue{ + Pos: token.Position{ + Filename: i.Pos.Filename, + Line: i.Pos.Line, + }, + LineRange: &result.Range{From: i.Pos.Line, To: i.Pos.Line}, + Text: i.Message, + FromLinter: linterName, + Replacement: &result.Replacement{}, + } + + bracketLine, err := lintCtx.LineCache.GetLine(issue.Pos.Filename, issue.Pos.Line) + if err != nil { + return nil, errors.Wrapf(err, "failed to get line %s:%d", issue.Pos.Filename, issue.Pos.Line) + } + + switch i.Type { + case whitespace.MessageTypeLeading: + issue.LineRange.To++ // cover two lines by the issue: opening bracket "{" (issue.Pos.Line) and following empty line + case whitespace.MessageTypeTrailing: + issue.LineRange.From-- // cover two lines by the issue: closing bracket "}" (issue.Pos.Line) and preceding empty line + issue.Pos.Line-- // set in sync with LineRange.From to not break fixer and other code features + case whitespace.MessageTypeAddAfter: + bracketLine += "\n" + } + issue.Replacement.NewLines = []string{bracketLine} + + res[k] = goanalysis.NewIssue(&issue, pass) + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go new file mode 100644 index 000000000..5eaf085d7 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go @@ -0,0 +1,32 @@ +package golinters + +import ( + "github.com/tomarrell/wrapcheck/v2/wrapcheck" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +const wrapcheckName = "wrapcheck" + +func NewWrapcheck(settings *config.WrapcheckSettings) *goanalysis.Linter { + cfg := wrapcheck.NewDefaultConfig() + if settings != nil { + if len(settings.IgnoreSigs) != 0 { + cfg.IgnoreSigs = settings.IgnoreSigs + } + if len(settings.IgnorePackageGlobs) != 0 { + cfg.IgnorePackageGlobs = settings.IgnorePackageGlobs + } + } + + a := wrapcheck.NewAnalyzer(cfg) + + return goanalysis.NewLinter( + wrapcheckName, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go new file mode 100644 index 000000000..29d00faea --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go @@ -0,0 +1,83 @@ +package golinters + +import ( + "sync" + + "github.com/bombsimon/wsl/v3" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const ( + name = "wsl" +) + +// NewWSL returns a new WSL linter. +func NewWSL() *goanalysis.Linter { + var ( + issues []goanalysis.Issue + mu = sync.Mutex{} + analyzer = &analysis.Analyzer{ + Name: goanalysis.TheOnlyAnalyzerName, + Doc: goanalysis.TheOnlyanalyzerDoc, + } + ) + + return goanalysis.NewLinter( + name, + "Whitespace Linter - Forces you to use empty lines!", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + var ( + files = []string{} + linterCfg = lintCtx.Cfg.LintersSettings.WSL + processorCfg = wsl.Configuration{ + StrictAppend: linterCfg.StrictAppend, + AllowAssignAndCallCuddle: linterCfg.AllowAssignAndCallCuddle, + AllowAssignAndAnythingCuddle: linterCfg.AllowAssignAndAnythingCuddle, + AllowMultiLineAssignCuddle: linterCfg.AllowMultiLineAssignCuddle, + AllowCuddleDeclaration: linterCfg.AllowCuddleDeclaration, + AllowTrailingComment: linterCfg.AllowTrailingComment, + AllowSeparatedLeadingComment: linterCfg.AllowSeparatedLeadingComment, + ForceCuddleErrCheckAndAssign: linterCfg.ForceCuddleErrCheckAndAssign, + ForceCaseTrailingWhitespaceLimit: linterCfg.ForceCaseTrailingWhitespaceLimit, + ForceExclusiveShortDeclarations: linterCfg.ForceExclusiveShortDeclarations, + AllowCuddleWithCalls: []string{"Lock", "RLock"}, + AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, + ErrorVariableNames: []string{"err"}, + } + ) + + for _, file := range pass.Files { + files = append(files, pass.Fset.PositionFor(file.Pos(), false).Filename) + } + + wslErrors, _ := wsl.NewProcessorWithConfig(processorCfg). + ProcessFiles(files) + + if len(wslErrors) == 0 { + return nil, nil + } + + mu.Lock() + defer mu.Unlock() + + for _, err := range wslErrors { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + FromLinter: name, + Pos: err.Position, + Text: err.Reason, + }, pass)) + } + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return issues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go b/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go new file mode 100644 index 000000000..1c05b9805 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go @@ -0,0 +1,61 @@ +package goutil + +import ( + "context" + "encoding/json" + "os" + "os/exec" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/golangci/golangci-lint/pkg/logutils" +) + +type EnvKey string + +const ( + EnvGoCache EnvKey = "GOCACHE" + EnvGoRoot EnvKey = "GOROOT" +) + +type Env struct { + vars map[string]string + log logutils.Log + debugf logutils.DebugFunc +} + +func NewEnv(log logutils.Log) *Env { + return &Env{ + vars: map[string]string{}, + log: log, + debugf: logutils.Debug("env"), + } +} + +func (e *Env) Discover(ctx context.Context) error { + startedAt := time.Now() + args := []string{"env", "-json"} + args = append(args, string(EnvGoCache), string(EnvGoRoot)) + out, err := exec.CommandContext(ctx, "go", args...).Output() + if err != nil { + return errors.Wrap(err, "failed to run 'go env'") + } + + if err = json.Unmarshal(out, &e.vars); err != nil { + return errors.Wrapf(err, "failed to parse 'go %s' json", strings.Join(args, " ")) + } + + e.debugf("Read go env for %s: %#v", time.Since(startedAt), e.vars) + return nil +} + +func (e Env) Get(k EnvKey) string { + envValue := os.Getenv(string(k)) + if envValue != "" { + return envValue + } + + return e.vars[string(k)] +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go new file mode 100644 index 000000000..2372a011e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go @@ -0,0 +1,127 @@ +package linter + +import ( + "golang.org/x/tools/go/packages" +) + +const ( + PresetBugs = "bugs" // Related to bugs detection. + PresetComment = "comment" // Related to comments analysis. + PresetComplexity = "complexity" // Related to code complexity analysis. + PresetError = "error" // Related to error handling analysis. + PresetFormatting = "format" // Related to code formatting. + PresetImport = "import" // Related to imports analysis. + PresetMetaLinter = "metalinter" // Related to linter that contains multiple rules or multiple linters. + PresetModule = "module" // Related to Go modules analysis. + PresetPerformance = "performance" // Related to performance. + PresetSQL = "sql" // Related to SQL. + PresetStyle = "style" // Related to coding style. + PresetTest = "test" // Related to the analysis of the code of the tests. + PresetUnused = "unused" // Related to the detection of unused code. +) + +// LastLinter nolintlint must be last because it looks at the results of all the previous linters for unused nolint directives. +const LastLinter = "nolintlint" + +type Deprecation struct { + Since string + Message string + Replacement string +} + +type Config struct { + Linter Linter + EnabledByDefault bool + + LoadMode packages.LoadMode + + InPresets []string + AlternativeNames []string + + OriginalURL string // URL of original (not forked) repo, needed for autogenerated README + CanAutoFix bool + IsSlow bool + DoesChangeTypes bool + + Since string + Deprecation *Deprecation +} + +func (lc *Config) ConsiderSlow() *Config { + lc.IsSlow = true + return lc +} + +func (lc *Config) IsSlowLinter() bool { + return lc.IsSlow +} + +func (lc *Config) WithLoadFiles() *Config { + lc.LoadMode |= packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles + return lc +} + +func (lc *Config) WithLoadForGoAnalysis() *Config { + lc = lc.WithLoadFiles() + lc.LoadMode |= packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedTypesSizes + lc.IsSlow = true + return lc +} + +func (lc *Config) WithPresets(presets ...string) *Config { + lc.InPresets = presets + return lc +} + +func (lc *Config) WithURL(url string) *Config { + lc.OriginalURL = url + return lc +} + +func (lc *Config) WithAlternativeNames(names ...string) *Config { + lc.AlternativeNames = names + return lc +} + +func (lc *Config) WithAutoFix() *Config { + lc.CanAutoFix = true + return lc +} + +func (lc *Config) WithChangeTypes() *Config { + lc.DoesChangeTypes = true + return lc +} + +func (lc *Config) WithSince(version string) *Config { + lc.Since = version + return lc +} + +func (lc *Config) Deprecated(message, version, replacement string) *Config { + lc.Deprecation = &Deprecation{ + Since: version, + Message: message, + Replacement: replacement, + } + return lc +} + +func (lc *Config) IsDeprecated() bool { + return lc.Deprecation != nil +} + +func (lc *Config) AllNames() []string { + return append([]string{lc.Name()}, lc.AlternativeNames...) +} + +func (lc *Config) Name() string { + return lc.Linter.Name() +} + +func NewConfig(linter Linter) *Config { + lc := &Config{ + Linter: linter, + } + return lc.WithLoadFiles() +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go new file mode 100644 index 000000000..a9f9d7d7f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go @@ -0,0 +1,49 @@ +package linter + +import ( + "go/ast" + + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/internal/pkgcache" + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +type Context struct { + // Packages are deduplicated (test and normal packages) packages + Packages []*packages.Package + + // OriginalPackages aren't deduplicated: they contain both normal and test + // version for each of packages + OriginalPackages []*packages.Package + + Cfg *config.Config + FileCache *fsutils.FileCache + LineCache *fsutils.LineCache + Log logutils.Log + + PkgCache *pkgcache.Cache + LoadGuard *load.Guard +} + +func (c *Context) Settings() *config.LintersSettings { + return &c.Cfg.LintersSettings +} + +func (c *Context) ClearTypesInPackages() { + for _, p := range c.Packages { + clearTypes(p) + } + for _, p := range c.OriginalPackages { + clearTypes(p) + } +} + +func clearTypes(p *packages.Package) { + p.Types = nil + p.TypesInfo = nil + p.Syntax = []*ast.File{} +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go new file mode 100644 index 000000000..cfe9ec020 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go @@ -0,0 +1,13 @@ +package linter + +import ( + "context" + + "github.com/golangci/golangci-lint/pkg/result" +) + +type Linter interface { + Run(ctx context.Context, lintCtx *Context) ([]result.Issue, error) + Name() string + Desc() string +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/enabled_set.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/enabled_set.go new file mode 100644 index 000000000..9814aa857 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/enabled_set.go @@ -0,0 +1,207 @@ +package lintersdb + +import ( + "os" + "sort" + "strings" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +type EnabledSet struct { + m *Manager + v *Validator + log logutils.Log + cfg *config.Config + debugf logutils.DebugFunc +} + +func NewEnabledSet(m *Manager, v *Validator, log logutils.Log, cfg *config.Config) *EnabledSet { + return &EnabledSet{ + m: m, + v: v, + log: log, + cfg: cfg, + debugf: logutils.Debug("enabled_linters"), + } +} + +func (es EnabledSet) build(lcfg *config.Linters, enabledByDefaultLinters []*linter.Config) map[string]*linter.Config { + es.debugf("Linters config: %#v", lcfg) + resultLintersSet := map[string]*linter.Config{} + switch { + case len(lcfg.Presets) != 0: + break // imply --disable-all + case lcfg.EnableAll: + resultLintersSet = linterConfigsToMap(es.m.GetAllSupportedLinterConfigs()) + case lcfg.DisableAll: + break + default: + resultLintersSet = linterConfigsToMap(enabledByDefaultLinters) + } + + // --presets can only add linters to default set + for _, p := range lcfg.Presets { + for _, lc := range es.m.GetAllLinterConfigsForPreset(p) { + lc := lc + resultLintersSet[lc.Name()] = lc + } + } + + // --fast removes slow linters from current set. + // It should be after --presets to be able to run only fast linters in preset. + // It should be before --enable and --disable to be able to enable or disable specific linter. + if lcfg.Fast { + for name, lc := range resultLintersSet { + if lc.IsSlowLinter() { + delete(resultLintersSet, name) + } + } + } + + for _, name := range lcfg.Enable { + for _, lc := range es.m.GetLinterConfigs(name) { + // it's important to use lc.Name() nor name because name can be alias + resultLintersSet[lc.Name()] = lc + } + } + + for _, name := range lcfg.Disable { + for _, lc := range es.m.GetLinterConfigs(name) { + // it's important to use lc.Name() nor name because name can be alias + delete(resultLintersSet, lc.Name()) + } + } + + return resultLintersSet +} + +func (es EnabledSet) GetEnabledLintersMap() (map[string]*linter.Config, error) { + if err := es.v.validateEnabledDisabledLintersConfig(&es.cfg.Linters); err != nil { + return nil, err + } + + enabledLinters := es.build(&es.cfg.Linters, es.m.GetAllEnabledByDefaultLinters()) + if os.Getenv("GL_TEST_RUN") == "1" { + es.verbosePrintLintersStatus(enabledLinters) + } + return enabledLinters, nil +} + +// GetOptimizedLinters returns enabled linters after optimization (merging) of multiple linters +// into a fewer number of linters. E.g. some go/analysis linters can be optimized into +// one metalinter for data reuse and speed up. +func (es EnabledSet) GetOptimizedLinters() ([]*linter.Config, error) { + if err := es.v.validateEnabledDisabledLintersConfig(&es.cfg.Linters); err != nil { + return nil, err + } + + resultLintersSet := es.build(&es.cfg.Linters, es.m.GetAllEnabledByDefaultLinters()) + es.verbosePrintLintersStatus(resultLintersSet) + es.combineGoAnalysisLinters(resultLintersSet) + + var resultLinters []*linter.Config + for _, lc := range resultLintersSet { + resultLinters = append(resultLinters, lc) + } + + // Make order of execution of linters (go/analysis metalinter and unused) stable. + sort.Slice(resultLinters, func(i, j int) bool { + a, b := resultLinters[i], resultLinters[j] + + if b.Name() == linter.LastLinter { + return true + } + + if a.Name() == linter.LastLinter { + return false + } + + if a.DoesChangeTypes != b.DoesChangeTypes { + return b.DoesChangeTypes // move type-changing linters to the end to optimize speed + } + return strings.Compare(a.Name(), b.Name()) < 0 + }) + + return resultLinters, nil +} + +func (es EnabledSet) combineGoAnalysisLinters(linters map[string]*linter.Config) { + var goanalysisLinters []*goanalysis.Linter + goanalysisPresets := map[string]bool{} + for _, linter := range linters { + lnt, ok := linter.Linter.(*goanalysis.Linter) + if !ok { + continue + } + if lnt.LoadMode() == goanalysis.LoadModeWholeProgram { + // It's ineffective by CPU and memory to run whole-program and incremental analyzers at once. + continue + } + goanalysisLinters = append(goanalysisLinters, lnt) + for _, p := range linter.InPresets { + goanalysisPresets[p] = true + } + } + + if len(goanalysisLinters) <= 1 { + es.debugf("Didn't combine go/analysis linters: got only %d linters", len(goanalysisLinters)) + return + } + + for _, lnt := range goanalysisLinters { + delete(linters, lnt.Name()) + } + + // Make order of execution of go/analysis analyzers stable. + sort.Slice(goanalysisLinters, func(i, j int) bool { + a, b := goanalysisLinters[i], goanalysisLinters[j] + + if b.Name() == linter.LastLinter { + return true + } + + if a.Name() == linter.LastLinter { + return false + } + + return strings.Compare(a.Name(), b.Name()) <= 0 + }) + + ml := goanalysis.NewMetaLinter(goanalysisLinters) + + var presets []string + for p := range goanalysisPresets { + presets = append(presets, p) + } + + mlConfig := &linter.Config{ + Linter: ml, + EnabledByDefault: false, + InPresets: presets, + AlternativeNames: nil, + OriginalURL: "", + } + + mlConfig = mlConfig.WithLoadForGoAnalysis() + + linters[ml.Name()] = mlConfig + es.debugf("Combined %d go/analysis linters into one metalinter", len(goanalysisLinters)) +} + +func (es EnabledSet) verbosePrintLintersStatus(lcs map[string]*linter.Config) { + var linterNames []string + for _, lc := range lcs { + linterNames = append(linterNames, lc.Name()) + } + sort.StringSlice(linterNames).Sort() + es.log.Infof("Active %d linters: %s", len(linterNames), linterNames) + + if len(es.cfg.Linters.Presets) != 0 { + sort.StringSlice(es.cfg.Linters.Presets).Sort() + es.log.Infof("Active presets: %s", es.cfg.Linters.Presets) + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go new file mode 100644 index 000000000..a69a6ec38 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go @@ -0,0 +1,618 @@ +package lintersdb + +import ( + "fmt" + "path/filepath" + "plugin" + + "github.com/spf13/viper" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/report" +) + +type Manager struct { + nameToLCs map[string][]*linter.Config + cfg *config.Config + log logutils.Log +} + +func NewManager(cfg *config.Config, log logutils.Log) *Manager { + m := &Manager{cfg: cfg, log: log} + nameToLCs := make(map[string][]*linter.Config) + for _, lc := range m.GetAllSupportedLinterConfigs() { + for _, name := range lc.AllNames() { + nameToLCs[name] = append(nameToLCs[name], lc) + } + } + + m.nameToLCs = nameToLCs + return m +} + +func (m *Manager) WithCustomLinters() *Manager { + if m.log == nil { + m.log = report.NewLogWrapper(logutils.NewStderrLog(""), &report.Data{}) + } + if m.cfg != nil { + for name, settings := range m.cfg.LintersSettings.Custom { + lc, err := m.loadCustomLinterConfig(name, settings) + + if err != nil { + m.log.Errorf("Unable to load custom analyzer %s:%s, %v", + name, + settings.Path, + err) + } else { + m.nameToLCs[name] = append(m.nameToLCs[name], lc) + } + } + } + return m +} + +func (Manager) AllPresets() []string { + return []string{ + linter.PresetBugs, + linter.PresetComment, + linter.PresetComplexity, + linter.PresetError, + linter.PresetFormatting, + linter.PresetImport, + linter.PresetMetaLinter, + linter.PresetModule, + linter.PresetPerformance, + linter.PresetSQL, + linter.PresetStyle, + linter.PresetTest, + linter.PresetUnused, + } +} + +func (m Manager) allPresetsSet() map[string]bool { + ret := map[string]bool{} + for _, p := range m.AllPresets() { + ret[p] = true + } + return ret +} + +func (m Manager) GetLinterConfigs(name string) []*linter.Config { + return m.nameToLCs[name] +} + +func enableLinterConfigs(lcs []*linter.Config, isEnabled func(lc *linter.Config) bool) []*linter.Config { + var ret []*linter.Config + for _, lc := range lcs { + lc := lc + lc.EnabledByDefault = isEnabled(lc) + ret = append(ret, lc) + } + + return ret +} + +//nolint:funlen +func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config { + var govetCfg *config.GovetSettings + var testpackageCfg *config.TestpackageSettings + var exhaustiveCfg *config.ExhaustiveSettings + var exhaustiveStructCfg *config.ExhaustiveStructSettings + var errorlintCfg *config.ErrorLintSettings + var thelperCfg *config.ThelperSettings + var predeclaredCfg *config.PredeclaredSettings + var ifshortCfg *config.IfshortSettings + var reviveCfg *config.ReviveSettings + var cyclopCfg *config.Cyclop + var importAsCfg *config.ImportAsSettings + var goModDirectivesCfg *config.GoModDirectivesSettings + var tagliatelleCfg *config.TagliatelleSettings + var gosecCfg *config.GoSecSettings + var gosimpleCfg *config.StaticCheckSettings + var staticcheckCfg *config.StaticCheckSettings + var stylecheckCfg *config.StaticCheckSettings + var unusedCfg *config.StaticCheckSettings + var wrapcheckCfg *config.WrapcheckSettings + + if m.cfg != nil { + govetCfg = &m.cfg.LintersSettings.Govet + testpackageCfg = &m.cfg.LintersSettings.Testpackage + exhaustiveCfg = &m.cfg.LintersSettings.Exhaustive + exhaustiveStructCfg = &m.cfg.LintersSettings.ExhaustiveStruct + errorlintCfg = &m.cfg.LintersSettings.ErrorLint + thelperCfg = &m.cfg.LintersSettings.Thelper + predeclaredCfg = &m.cfg.LintersSettings.Predeclared + ifshortCfg = &m.cfg.LintersSettings.Ifshort + reviveCfg = &m.cfg.LintersSettings.Revive + cyclopCfg = &m.cfg.LintersSettings.Cyclop + importAsCfg = &m.cfg.LintersSettings.ImportAs + goModDirectivesCfg = &m.cfg.LintersSettings.GoModDirectives + tagliatelleCfg = &m.cfg.LintersSettings.Tagliatelle + gosecCfg = &m.cfg.LintersSettings.Gosec + gosimpleCfg = &m.cfg.LintersSettings.Gosimple + staticcheckCfg = &m.cfg.LintersSettings.Staticcheck + stylecheckCfg = &m.cfg.LintersSettings.Stylecheck + unusedCfg = &m.cfg.LintersSettings.Unused + wrapcheckCfg = &m.cfg.LintersSettings.Wrapcheck + } + + const megacheckName = "megacheck" + + lcs := []*linter.Config{ + linter.NewConfig(golinters.NewGovet(govetCfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs, linter.PresetMetaLinter). + WithAlternativeNames("vet", "vetshadow"). + WithURL("https://golang.org/cmd/vet/"), + linter.NewConfig(golinters.NewBodyclose()). + WithSince("v1.18.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetPerformance, linter.PresetBugs). + WithURL("https://github.com/timakin/bodyclose"), + linter.NewConfig(golinters.NewNoctx()). + WithSince("v1.28.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetPerformance, linter.PresetBugs). + WithURL("https://github.com/sonatard/noctx"), + linter.NewConfig(golinters.NewErrcheck()). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs, linter.PresetError). + WithURL("https://github.com/kisielk/errcheck"), + linter.NewConfig(golinters.NewGolint()). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/golang/lint"). + Deprecated("The repository of the linter has been archived by the owner.", "v1.41.0", "revive"), + linter.NewConfig(golinters.NewRowsErrCheck()). + WithSince("v1.23.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs, linter.PresetSQL). + WithURL("https://github.com/jingyugao/rowserrcheck"), + + linter.NewConfig(golinters.NewStaticcheck(staticcheckCfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs, linter.PresetMetaLinter). + WithAlternativeNames(megacheckName). + WithURL("https://staticcheck.io/"), + linter.NewConfig(golinters.NewUnused(unusedCfg)). + WithSince("v1.20.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithAlternativeNames(megacheckName). + ConsiderSlow(). + WithChangeTypes(). + WithURL("https://github.com/dominikh/go-tools/tree/master/unused"), + linter.NewConfig(golinters.NewGosimple(gosimpleCfg)). + WithSince("v1.20.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithAlternativeNames(megacheckName). + WithURL("https://github.com/dominikh/go-tools/tree/master/simple"), + + linter.NewConfig(golinters.NewStylecheck(stylecheckCfg)). + WithSince("v1.20.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"), + linter.NewConfig(golinters.NewGosec(gosecCfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/securego/gosec"). + WithAlternativeNames("gas"), + linter.NewConfig(golinters.NewStructcheck()). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/opennota/check"), + linter.NewConfig(golinters.NewVarcheck()). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/opennota/check"), + linter.NewConfig(golinters.NewInterfacer()). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/mvdan/interfacer"). + Deprecated("The repository of the linter has been archived by the owner.", "v1.38.0", ""), + linter.NewConfig(golinters.NewUnconvert()). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/mdempsky/unconvert"), + linter.NewConfig(golinters.NewIneffassign()). + WithSince("v1.0.0"). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/gordonklaus/ineffassign"), + linter.NewConfig(golinters.NewDupl()). + WithSince("v1.0.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/mibk/dupl"), + linter.NewConfig(golinters.NewGoconst()). + WithSince("v1.0.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/jgautheron/goconst"), + linter.NewConfig(golinters.NewDeadcode()). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode"), + linter.NewConfig(golinters.NewGocyclo()). + WithSince("v1.0.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/fzipp/gocyclo"), + linter.NewConfig(golinters.NewCyclop(cyclopCfg)). + WithSince("v1.37.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/bkielbasa/cyclop"), + linter.NewConfig(golinters.NewGocognit()). + WithSince("v1.20.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/uudashr/gocognit"), + linter.NewConfig(golinters.NewTypecheck()). + WithSince("v1.3.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL(""), + linter.NewConfig(golinters.NewAsciicheck()). + WithSince("v1.26.0"). + WithPresets(linter.PresetBugs, linter.PresetStyle). + WithURL("https://github.com/tdakkota/asciicheck"), + + linter.NewConfig(golinters.NewGofmt()). + WithSince("v1.0.0"). + WithPresets(linter.PresetFormatting). + WithAutoFix(). + WithURL("https://golang.org/cmd/gofmt/"), + linter.NewConfig(golinters.NewGofumpt()). + WithSince("v1.28.0"). + WithPresets(linter.PresetFormatting). + WithAutoFix(). + WithURL("https://github.com/mvdan/gofumpt"), + linter.NewConfig(golinters.NewGoimports()). + WithSince("v1.20.0"). + WithPresets(linter.PresetFormatting, linter.PresetImport). + WithAutoFix(). + WithURL("https://godoc.org/golang.org/x/tools/cmd/goimports"), + linter.NewConfig(golinters.NewGoHeader()). + WithSince("v1.28.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/denis-tingajkin/go-header"), + linter.NewConfig(golinters.NewGci()). + WithSince("v1.30.0"). + WithPresets(linter.PresetFormatting, linter.PresetImport). + WithAutoFix(). + WithURL("https://github.com/daixiang0/gci"), + linter.NewConfig(golinters.NewMaligned()). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetPerformance). + WithURL("https://github.com/mdempsky/maligned"). + Deprecated("The repository of the linter has been archived by the owner.", "v1.38.0", "govet 'fieldalignment'"), + linter.NewConfig(golinters.NewDepguard()). + WithSince("v1.4.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle, linter.PresetImport, linter.PresetModule). + WithURL("https://github.com/OpenPeeDeeP/depguard"), + linter.NewConfig(golinters.NewMisspell()). + WithSince("v1.8.0"). + WithPresets(linter.PresetStyle, linter.PresetComment). + WithAutoFix(). + WithURL("https://github.com/client9/misspell"), + linter.NewConfig(golinters.NewLLL()). + WithSince("v1.8.0"). + WithPresets(linter.PresetStyle), + linter.NewConfig(golinters.NewUnparam()). + WithSince("v1.9.0"). + WithPresets(linter.PresetUnused). + WithLoadForGoAnalysis(). + WithURL("https://github.com/mvdan/unparam"), + linter.NewConfig(golinters.NewDogsled()). + WithSince("v1.19.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/alexkohler/dogsled"), + linter.NewConfig(golinters.NewNakedret()). + WithSince("v1.19.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/alexkohler/nakedret"), + linter.NewConfig(golinters.NewPrealloc()). + WithSince("v1.19.0"). + WithPresets(linter.PresetPerformance). + WithURL("https://github.com/alexkohler/prealloc"), + linter.NewConfig(golinters.NewScopelint()). + WithSince("v1.12.0"). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/kyoh86/scopelint"). + Deprecated("The repository of the linter has been deprecated by the owner.", "v1.39.0", "exportloopref"), + linter.NewConfig(golinters.NewGocritic()). + WithSince("v1.12.0"). + WithPresets(linter.PresetStyle, linter.PresetMetaLinter). + WithLoadForGoAnalysis(). + WithURL("https://github.com/go-critic/go-critic"), + linter.NewConfig(golinters.NewGochecknoinits()). + WithSince("v1.12.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/leighmcculloch/gochecknoinits"), + linter.NewConfig(golinters.NewGochecknoglobals()). + WithSince("v1.12.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/leighmcculloch/gochecknoglobals"), + linter.NewConfig(golinters.NewGodox()). + WithSince("v1.19.0"). + WithPresets(linter.PresetStyle, linter.PresetComment). + WithURL("https://github.com/matoous/godox"), + linter.NewConfig(golinters.NewFunlen()). + WithSince("v1.18.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/ultraware/funlen"), + linter.NewConfig(golinters.NewWhitespace()). + WithSince("v1.19.0"). + WithPresets(linter.PresetStyle). + WithAutoFix(). + WithURL("https://github.com/ultraware/whitespace"), + linter.NewConfig(golinters.NewWSL()). + WithSince("v1.20.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/bombsimon/wsl"), + linter.NewConfig(golinters.NewGoPrintfFuncName()). + WithSince("v1.23.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/jirfag/go-printf-func-name"), + linter.NewConfig(golinters.NewGoMND(m.cfg)). + WithSince("v1.22.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/tommy-muehle/go-mnd"), + linter.NewConfig(golinters.NewGoerr113()). + WithSince("v1.26.0"). + WithPresets(linter.PresetStyle, linter.PresetError). + WithLoadForGoAnalysis(). + WithURL("https://github.com/Djarvur/go-err113"), + linter.NewConfig(golinters.NewGomodguard()). + WithSince("v1.25.0"). + WithPresets(linter.PresetStyle, linter.PresetImport, linter.PresetModule). + WithURL("https://github.com/ryancurrah/gomodguard"), + linter.NewConfig(golinters.NewGodot()). + WithSince("v1.25.0"). + WithPresets(linter.PresetStyle, linter.PresetComment). + WithAutoFix(). + WithURL("https://github.com/tetafro/godot"), + linter.NewConfig(golinters.NewTestpackage(testpackageCfg)). + WithSince("v1.25.0"). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithURL("https://github.com/maratori/testpackage"), + linter.NewConfig(golinters.NewNestif()). + WithSince("v1.25.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/nakabonne/nestif"), + linter.NewConfig(golinters.NewExportLoopRef()). + WithSince("v1.28.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/kyoh86/exportloopref"), + linter.NewConfig(golinters.NewExhaustive(exhaustiveCfg)). + WithSince(" v1.28.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/nishanths/exhaustive"), + linter.NewConfig(golinters.NewSQLCloseCheck()). + WithSince("v1.28.0"). + WithPresets(linter.PresetBugs, linter.PresetSQL). + WithLoadForGoAnalysis(). + WithURL("https://github.com/ryanrolds/sqlclosecheck"), + linter.NewConfig(golinters.NewNLReturn()). + WithSince("v1.30.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/ssgreg/nlreturn"), + linter.NewConfig(golinters.NewWrapcheck(wrapcheckCfg)). + WithSince("v1.32.0"). + WithPresets(linter.PresetStyle, linter.PresetError). + WithLoadForGoAnalysis(). + WithURL("https://github.com/tomarrell/wrapcheck"), + linter.NewConfig(golinters.NewThelper(thelperCfg)). + WithSince("v1.34.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/kulti/thelper"), + linter.NewConfig(golinters.NewTparallel()). + WithSince("v1.32.0"). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithLoadForGoAnalysis(). + WithURL("https://github.com/moricho/tparallel"), + linter.NewConfig(golinters.NewExhaustiveStruct(exhaustiveStructCfg)). + WithSince("v1.32.0"). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithLoadForGoAnalysis(). + WithURL("https://github.com/mbilski/exhaustivestruct"), + linter.NewConfig(golinters.NewErrorLint(errorlintCfg)). + WithSince("v1.32.0"). + WithPresets(linter.PresetBugs, linter.PresetError). + WithLoadForGoAnalysis(). + WithURL("https://github.com/polyfloyd/go-errorlint"), + linter.NewConfig(golinters.NewParallelTest()). + WithSince("v1.33.0"). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithURL("https://github.com/kunwardeep/paralleltest"), + linter.NewConfig(golinters.NewMakezero()). + WithSince("v1.34.0"). + WithPresets(linter.PresetStyle, linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/ashanbrown/makezero"), + linter.NewConfig(golinters.NewForbidigo()). + WithSince("v1.34.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/ashanbrown/forbidigo"), + linter.NewConfig(golinters.NewIfshort(ifshortCfg)). + WithSince("v1.36.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/esimonov/ifshort"), + linter.NewConfig(golinters.NewPredeclared(predeclaredCfg)). + WithSince("v1.35.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/nishanths/predeclared"), + linter.NewConfig(golinters.NewRevive(reviveCfg)). + WithSince("v1.37.0"). + WithPresets(linter.PresetStyle, linter.PresetMetaLinter). + ConsiderSlow(). + WithURL("https://github.com/mgechev/revive"), + linter.NewConfig(golinters.NewDurationCheck()). + WithSince("v1.37.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/charithe/durationcheck"), + linter.NewConfig(golinters.NewWastedAssign()). + WithSince("v1.38.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/sanposhiho/wastedassign"), + linter.NewConfig(golinters.NewImportAs(importAsCfg)). + WithSince("v1.38.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/julz/importas"), + linter.NewConfig(golinters.NewNilErr()). + WithSince("v1.38.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/gostaticanalysis/nilerr"), + linter.NewConfig(golinters.NewForceTypeAssert()). + WithSince("v1.38.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/gostaticanalysis/forcetypeassert"), + linter.NewConfig(golinters.NewGoModDirectives(goModDirectivesCfg)). + WithSince("v1.39.0"). + WithPresets(linter.PresetStyle, linter.PresetModule). + WithURL("https://github.com/ldez/gomoddirectives"), + linter.NewConfig(golinters.NewPromlinter()). + WithSince("v1.40.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/yeya24/promlinter"), + linter.NewConfig(golinters.NewTagliatelle(tagliatelleCfg)). + WithSince("v1.40.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/ldez/tagliatelle"), + linter.NewConfig(golinters.NewErrName()). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/Antonboom/errname"). + WithSince("v1.42.0"), + + // nolintlint must be last because it looks at the results of all the previous linters for unused nolint directives + linter.NewConfig(golinters.NewNoLintLint()). + WithSince("v1.26.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/golangci/golangci-lint/blob/master/pkg/golinters/nolintlint/README.md"), + } + + enabledByDefault := map[string]bool{ + golinters.NewGovet(nil).Name(): true, + golinters.NewErrcheck().Name(): true, + golinters.NewStaticcheck(staticcheckCfg).Name(): true, + golinters.NewUnused(unusedCfg).Name(): true, + golinters.NewGosimple(gosimpleCfg).Name(): true, + golinters.NewStructcheck().Name(): true, + golinters.NewVarcheck().Name(): true, + golinters.NewIneffassign().Name(): true, + golinters.NewDeadcode().Name(): true, + golinters.NewTypecheck().Name(): true, + } + return enableLinterConfigs(lcs, func(lc *linter.Config) bool { + return enabledByDefault[lc.Name()] + }) +} + +func (m Manager) GetAllEnabledByDefaultLinters() []*linter.Config { + var ret []*linter.Config + for _, lc := range m.GetAllSupportedLinterConfigs() { + if lc.EnabledByDefault { + ret = append(ret, lc) + } + } + + return ret +} + +func linterConfigsToMap(lcs []*linter.Config) map[string]*linter.Config { + ret := map[string]*linter.Config{} + for _, lc := range lcs { + lc := lc // local copy + ret[lc.Name()] = lc + } + + return ret +} + +func (m Manager) GetAllLinterConfigsForPreset(p string) []*linter.Config { + var ret []*linter.Config + for _, lc := range m.GetAllSupportedLinterConfigs() { + for _, ip := range lc.InPresets { + if p == ip { + ret = append(ret, lc) + break + } + } + } + + return ret +} + +func (m Manager) loadCustomLinterConfig(name string, settings config.CustomLinterSettings) (*linter.Config, error) { + analyzer, err := m.getAnalyzerPlugin(settings.Path) + if err != nil { + return nil, err + } + m.log.Infof("Loaded %s: %s", settings.Path, name) + customLinter := goanalysis.NewLinter( + name, + settings.Description, + analyzer.GetAnalyzers(), + nil).WithLoadMode(goanalysis.LoadModeTypesInfo) + linterConfig := linter.NewConfig(customLinter) + linterConfig.EnabledByDefault = true + linterConfig.IsSlow = false + linterConfig.WithURL(settings.OriginalURL) + return linterConfig, nil +} + +type AnalyzerPlugin interface { + GetAnalyzers() []*analysis.Analyzer +} + +func (m Manager) getAnalyzerPlugin(path string) (AnalyzerPlugin, error) { + if !filepath.IsAbs(path) { + // resolve non-absolute paths relative to config file's directory + configFilePath := viper.ConfigFileUsed() + absConfigFilePath, err := filepath.Abs(configFilePath) + if err != nil { + return nil, fmt.Errorf("could not get absolute representation of config file path %q: %v", configFilePath, err) + } + path = filepath.Join(filepath.Dir(absConfigFilePath), path) + } + + plug, err := plugin.Open(path) + if err != nil { + return nil, err + } + + symbol, err := plug.Lookup("AnalyzerPlugin") + if err != nil { + return nil, err + } + + analyzerPlugin, ok := symbol.(AnalyzerPlugin) + if !ok { + return nil, fmt.Errorf("plugin %s does not abide by 'AnalyzerPlugin' interface", path) + } + + return analyzerPlugin, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go new file mode 100644 index 000000000..47c128930 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go @@ -0,0 +1,107 @@ +package lintersdb + +import ( + "fmt" + "strings" + + "github.com/golangci/golangci-lint/pkg/config" +) + +type Validator struct { + m *Manager +} + +func NewValidator(m *Manager) *Validator { + return &Validator{ + m: m, + } +} + +func (v Validator) validateLintersNames(cfg *config.Linters) error { + allNames := append([]string{}, cfg.Enable...) + allNames = append(allNames, cfg.Disable...) + + unknownNames := []string{} + + for _, name := range allNames { + if v.m.GetLinterConfigs(name) == nil { + unknownNames = append(unknownNames, name) + } + } + + if len(unknownNames) > 0 { + return fmt.Errorf("unknown linters: '%v', run 'golangci-lint help linters' to see the list of supported linters", + strings.Join(unknownNames, ",")) + } + + return nil +} + +func (v Validator) validatePresets(cfg *config.Linters) error { + allPresets := v.m.allPresetsSet() + for _, p := range cfg.Presets { + if !allPresets[p] { + return fmt.Errorf("no such preset %q: only next presets exist: (%s)", + p, strings.Join(v.m.AllPresets(), "|")) + } + } + + if len(cfg.Presets) != 0 && cfg.EnableAll { + return fmt.Errorf("--presets is incompatible with --enable-all") + } + + return nil +} + +func (v Validator) validateAllDisableEnableOptions(cfg *config.Linters) error { + if cfg.EnableAll && cfg.DisableAll { + return fmt.Errorf("--enable-all and --disable-all options must not be combined") + } + + if cfg.DisableAll { + if len(cfg.Enable) == 0 && len(cfg.Presets) == 0 { + return fmt.Errorf("all linters were disabled, but no one linter was enabled: must enable at least one") + } + + if len(cfg.Disable) != 0 { + return fmt.Errorf("can't combine options --disable-all and --disable %s", cfg.Disable[0]) + } + } + + if cfg.EnableAll && len(cfg.Enable) != 0 && !cfg.Fast { + return fmt.Errorf("can't combine options --enable-all and --enable %s", cfg.Enable[0]) + } + + return nil +} + +func (v Validator) validateDisabledAndEnabledAtOneMoment(cfg *config.Linters) error { + enabledLintersSet := map[string]bool{} + for _, name := range cfg.Enable { + enabledLintersSet[name] = true + } + + for _, name := range cfg.Disable { + if enabledLintersSet[name] { + return fmt.Errorf("linter %q can't be disabled and enabled at one moment", name) + } + } + + return nil +} + +func (v Validator) validateEnabledDisabledLintersConfig(cfg *config.Linters) error { + validators := []func(cfg *config.Linters) error{ + v.validateLintersNames, + v.validatePresets, + v.validateAllDisableEnableOptions, + v.validateDisabledAndEnabledAtOneMoment, + } + for _, v := range validators { + if err := v(cfg); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go new file mode 100644 index 000000000..69852afb9 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go @@ -0,0 +1,315 @@ +package lint + +import ( + "context" + "fmt" + "go/build" + "go/token" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/pkg/errors" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/internal/pkgcache" + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/exitcodes" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis/load" + "github.com/golangci/golangci-lint/pkg/goutil" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +type ContextLoader struct { + cfg *config.Config + log logutils.Log + debugf logutils.DebugFunc + goenv *goutil.Env + pkgTestIDRe *regexp.Regexp + lineCache *fsutils.LineCache + fileCache *fsutils.FileCache + pkgCache *pkgcache.Cache + loadGuard *load.Guard +} + +func NewContextLoader(cfg *config.Config, log logutils.Log, goenv *goutil.Env, + lineCache *fsutils.LineCache, fileCache *fsutils.FileCache, pkgCache *pkgcache.Cache, loadGuard *load.Guard) *ContextLoader { + return &ContextLoader{ + cfg: cfg, + log: log, + debugf: logutils.Debug("loader"), + goenv: goenv, + pkgTestIDRe: regexp.MustCompile(`^(.*) \[(.*)\.test\]`), + lineCache: lineCache, + fileCache: fileCache, + pkgCache: pkgCache, + loadGuard: loadGuard, + } +} + +func (cl *ContextLoader) prepareBuildContext() { + // Set GOROOT to have working cross-compilation: cross-compiled binaries + // have invalid GOROOT. XXX: can't use runtime.GOROOT(). + goroot := cl.goenv.Get(goutil.EnvGoRoot) + if goroot == "" { + return + } + + os.Setenv("GOROOT", goroot) + build.Default.GOROOT = goroot + build.Default.BuildTags = cl.cfg.Run.BuildTags +} + +func (cl *ContextLoader) findLoadMode(linters []*linter.Config) packages.LoadMode { + loadMode := packages.LoadMode(0) + for _, lc := range linters { + loadMode |= lc.LoadMode + } + + return loadMode +} + +func (cl *ContextLoader) buildArgs() []string { + args := cl.cfg.Run.Args + if len(args) == 0 { + return []string{"./..."} + } + + var retArgs []string + for _, arg := range args { + if strings.HasPrefix(arg, ".") || filepath.IsAbs(arg) { + retArgs = append(retArgs, arg) + } else { + // go/packages doesn't work well if we don't have prefix ./ for local packages + retArgs = append(retArgs, fmt.Sprintf(".%c%s", filepath.Separator, arg)) + } + } + + return retArgs +} + +func (cl *ContextLoader) makeBuildFlags() ([]string, error) { + var buildFlags []string + + if len(cl.cfg.Run.BuildTags) != 0 { + // go help build + buildFlags = append(buildFlags, "-tags", strings.Join(cl.cfg.Run.BuildTags, " ")) + cl.log.Infof("Using build tags: %v", cl.cfg.Run.BuildTags) + } + + mod := cl.cfg.Run.ModulesDownloadMode + if mod != "" { + // go help modules + allowedMods := []string{"mod", "readonly", "vendor"} + var ok bool + for _, am := range allowedMods { + if am == mod { + ok = true + break + } + } + if !ok { + return nil, fmt.Errorf("invalid modules download path %s, only (%s) allowed", mod, strings.Join(allowedMods, "|")) + } + + buildFlags = append(buildFlags, fmt.Sprintf("-mod=%s", cl.cfg.Run.ModulesDownloadMode)) + } + + return buildFlags, nil +} + +func stringifyLoadMode(mode packages.LoadMode) string { + m := map[packages.LoadMode]string{ + packages.NeedCompiledGoFiles: "compiled_files", + packages.NeedDeps: "deps", + packages.NeedExportsFile: "exports_file", + packages.NeedFiles: "files", + packages.NeedImports: "imports", + packages.NeedName: "name", + packages.NeedSyntax: "syntax", + packages.NeedTypes: "types", + packages.NeedTypesInfo: "types_info", + packages.NeedTypesSizes: "types_sizes", + } + + var flags []string + for flag, flagStr := range m { + if mode&flag != 0 { + flags = append(flags, flagStr) + } + } + + return fmt.Sprintf("%d (%s)", mode, strings.Join(flags, "|")) +} + +func (cl *ContextLoader) debugPrintLoadedPackages(pkgs []*packages.Package) { + cl.debugf("loaded %d pkgs", len(pkgs)) + for i, pkg := range pkgs { + var syntaxFiles []string + for _, sf := range pkg.Syntax { + syntaxFiles = append(syntaxFiles, pkg.Fset.Position(sf.Pos()).Filename) + } + cl.debugf("Loaded pkg #%d: ID=%s GoFiles=%s CompiledGoFiles=%s Syntax=%s", + i, pkg.ID, pkg.GoFiles, pkg.CompiledGoFiles, syntaxFiles) + } +} + +func (cl *ContextLoader) parseLoadedPackagesErrors(pkgs []*packages.Package) error { + for _, pkg := range pkgs { + for _, err := range pkg.Errors { + if strings.Contains(err.Msg, "no Go files") { + return errors.Wrapf(exitcodes.ErrNoGoFiles, "package %s", pkg.PkgPath) + } + if strings.Contains(err.Msg, "cannot find package") { + // when analyzing not existing directory + return errors.Wrap(exitcodes.ErrFailure, err.Msg) + } + } + } + + return nil +} + +func (cl *ContextLoader) loadPackages(ctx context.Context, loadMode packages.LoadMode) ([]*packages.Package, error) { + defer func(startedAt time.Time) { + cl.log.Infof("Go packages loading at mode %s took %s", stringifyLoadMode(loadMode), time.Since(startedAt)) + }(time.Now()) + + cl.prepareBuildContext() + + buildFlags, err := cl.makeBuildFlags() + if err != nil { + return nil, errors.Wrap(err, "failed to make build flags for go list") + } + + conf := &packages.Config{ + Mode: loadMode, + Tests: cl.cfg.Run.AnalyzeTests, + Context: ctx, + BuildFlags: buildFlags, + Logf: cl.debugf, + //TODO: use fset, parsefile, overlay + } + + args := cl.buildArgs() + cl.debugf("Built loader args are %s", args) + pkgs, err := packages.Load(conf, args...) + if err != nil { + return nil, errors.Wrap(err, "failed to load with go/packages") + } + + // Currently, go/packages doesn't guarantee that error will be returned + // if context was canceled. See + // https://github.com/golang/tools/commit/c5cec6710e927457c3c29d6c156415e8539a5111#r39261855 + if ctx.Err() != nil { + return nil, errors.Wrap(ctx.Err(), "timed out to load packages") + } + + if loadMode&packages.NeedSyntax == 0 { + // Needed e.g. for go/analysis loading. + fset := token.NewFileSet() + packages.Visit(pkgs, nil, func(pkg *packages.Package) { + pkg.Fset = fset + cl.loadGuard.AddMutexForPkg(pkg) + }) + } + + cl.debugPrintLoadedPackages(pkgs) + + if err := cl.parseLoadedPackagesErrors(pkgs); err != nil { + return nil, err + } + + return cl.filterTestMainPackages(pkgs), nil +} + +func (cl *ContextLoader) tryParseTestPackage(pkg *packages.Package) (name string, isTest bool) { + matches := cl.pkgTestIDRe.FindStringSubmatch(pkg.ID) + if matches == nil { + return "", false + } + + return matches[1], true +} + +func (cl *ContextLoader) filterTestMainPackages(pkgs []*packages.Package) []*packages.Package { + var retPkgs []*packages.Package + for _, pkg := range pkgs { + if pkg.Name == "main" && strings.HasSuffix(pkg.PkgPath, ".test") { + // it's an implicit testmain package + cl.debugf("skip pkg ID=%s", pkg.ID) + continue + } + + retPkgs = append(retPkgs, pkg) + } + + return retPkgs +} + +func (cl *ContextLoader) filterDuplicatePackages(pkgs []*packages.Package) []*packages.Package { + packagesWithTests := map[string]bool{} + for _, pkg := range pkgs { + name, isTest := cl.tryParseTestPackage(pkg) + if !isTest { + continue + } + packagesWithTests[name] = true + } + + cl.debugf("package with tests: %#v", packagesWithTests) + + var retPkgs []*packages.Package + for _, pkg := range pkgs { + _, isTest := cl.tryParseTestPackage(pkg) + if !isTest && packagesWithTests[pkg.PkgPath] { + // If tests loading is enabled, + // for package with files a.go and a_test.go go/packages loads two packages: + // 1. ID=".../a" GoFiles=[a.go] + // 2. ID=".../a [.../a.test]" GoFiles=[a.go a_test.go] + // We need only the second package, otherwise we can get warnings about unused variables/fields/functions + // in a.go if they are used only in a_test.go. + cl.debugf("skip pkg ID=%s because we load it with test package", pkg.ID) + continue + } + + retPkgs = append(retPkgs, pkg) + } + + return retPkgs +} + +func (cl *ContextLoader) Load(ctx context.Context, linters []*linter.Config) (*linter.Context, error) { + loadMode := cl.findLoadMode(linters) + pkgs, err := cl.loadPackages(ctx, loadMode) + if err != nil { + return nil, errors.Wrap(err, "failed to load packages") + } + + deduplicatedPkgs := cl.filterDuplicatePackages(pkgs) + + if len(deduplicatedPkgs) == 0 { + return nil, exitcodes.ErrNoGoFiles + } + + ret := &linter.Context{ + Packages: deduplicatedPkgs, + + // At least `unused` linters works properly only on original (not deduplicated) packages, + // see https://github.com/golangci/golangci-lint/pull/585. + OriginalPackages: pkgs, + + Cfg: cl.cfg, + Log: cl.log, + FileCache: cl.fileCache, + LineCache: cl.lineCache, + PkgCache: cl.pkgCache, + LoadGuard: cl.loadGuard, + } + + return ret, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go new file mode 100644 index 000000000..8882b9300 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go @@ -0,0 +1,329 @@ +package lint + +import ( + "context" + "fmt" + "runtime/debug" + "strings" + + "github.com/pkg/errors" + gopackages "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/internal/errorutil" + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/goutil" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/lint/lintersdb" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/packages" + "github.com/golangci/golangci-lint/pkg/result" + "github.com/golangci/golangci-lint/pkg/result/processors" + "github.com/golangci/golangci-lint/pkg/timeutils" +) + +type Runner struct { + Processors []processors.Processor + Log logutils.Log +} + +func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lintersdb.EnabledSet, + lineCache *fsutils.LineCache, dbManager *lintersdb.Manager, pkgs []*gopackages.Package) (*Runner, error) { + skipFilesProcessor, err := processors.NewSkipFiles(cfg.Run.SkipFiles) + if err != nil { + return nil, err + } + + skipDirs := cfg.Run.SkipDirs + if cfg.Run.UseDefaultSkipDirs { + skipDirs = append(skipDirs, packages.StdExcludeDirRegexps...) + } + skipDirsProcessor, err := processors.NewSkipDirs(skipDirs, log.Child("skip dirs"), cfg.Run.Args) + if err != nil { + return nil, err + } + + enabledLinters, err := es.GetEnabledLintersMap() + if err != nil { + return nil, errors.Wrap(err, "failed to get enabled linters") + } + + // print deprecated messages + if !cfg.InternalCmdTest { + for name, lc := range enabledLinters { + if !lc.IsDeprecated() { + continue + } + + var extra string + if lc.Deprecation.Replacement != "" { + extra = fmt.Sprintf(" Replaced by %s.", lc.Deprecation.Replacement) + } + + log.Warnf("The linter '%s' is deprecated (since %s) due to: %s %s", name, lc.Deprecation.Since, lc.Deprecation.Message, extra) + } + } + + return &Runner{ + Processors: []processors.Processor{ + processors.NewCgo(goenv), + + // Must go after Cgo. + processors.NewFilenameUnadjuster(pkgs, log.Child("filename_unadjuster")), + + // Must be before diff, nolint and exclude autogenerated processor at least. + processors.NewPathPrettifier(), + skipFilesProcessor, + skipDirsProcessor, // must be after path prettifier + + processors.NewAutogeneratedExclude(), + + // Must be before exclude because users see already marked output and configure excluding by it. + processors.NewIdentifierMarker(), + + getExcludeProcessor(&cfg.Issues), + getExcludeRulesProcessor(&cfg.Issues, log, lineCache), + processors.NewNolint(log.Child("nolint"), dbManager, enabledLinters), + + processors.NewUniqByLine(cfg), + processors.NewDiff(cfg.Issues.Diff, cfg.Issues.DiffFromRevision, cfg.Issues.DiffPatchFilePath), + processors.NewMaxPerFileFromLinter(cfg), + processors.NewMaxSameIssues(cfg.Issues.MaxSameIssues, log.Child("max_same_issues"), cfg), + processors.NewMaxFromLinter(cfg.Issues.MaxIssuesPerLinter, log.Child("max_from_linter"), cfg), + processors.NewSourceCode(lineCache, log.Child("source_code")), + processors.NewPathShortener(), + getSeverityRulesProcessor(&cfg.Severity, log, lineCache), + processors.NewPathPrefixer(cfg.Output.PathPrefix), + processors.NewSortResults(cfg), + }, + Log: log, + }, nil +} + +func (r *Runner) runLinterSafe(ctx context.Context, lintCtx *linter.Context, + lc *linter.Config) (ret []result.Issue, err error) { + defer func() { + if panicData := recover(); panicData != nil { + if pe, ok := panicData.(*errorutil.PanicError); ok { + err = fmt.Errorf("%s: %w", lc.Name(), pe) + + // Don't print stacktrace from goroutines twice + r.Log.Errorf("Panic: %s: %s", pe, pe.Stack()) + } else { + err = fmt.Errorf("panic occurred: %s", panicData) + r.Log.Errorf("Panic stack trace: %s", debug.Stack()) + } + } + }() + + issues, err := lc.Linter.Run(ctx, lintCtx) + + if lc.DoesChangeTypes { + // Packages in lintCtx might be dirty due to the last analysis, + // which affects to the next analysis. + // To avoid this issue, we clear type information from the packages. + // See https://github.com/golangci/golangci-lint/pull/944. + // Currently DoesChangeTypes is true only for `unused`. + lintCtx.ClearTypesInPackages() + } + + if err != nil { + return nil, err + } + + for i := range issues { + if issues[i].FromLinter == "" { + issues[i].FromLinter = lc.Name() + } + } + + return issues, nil +} + +type processorStat struct { + inCount int + outCount int +} + +func (r Runner) processLintResults(inIssues []result.Issue) []result.Issue { + sw := timeutils.NewStopwatch("processing", r.Log) + + var issuesBefore, issuesAfter int + statPerProcessor := map[string]processorStat{} + + var outIssues []result.Issue + if len(inIssues) != 0 { + issuesBefore += len(inIssues) + outIssues = r.processIssues(inIssues, sw, statPerProcessor) + issuesAfter += len(outIssues) + } + + // finalize processors: logging, clearing, no heavy work here + + for _, p := range r.Processors { + p := p + sw.TrackStage(p.Name(), func() { + p.Finish() + }) + } + + if issuesBefore != issuesAfter { + r.Log.Infof("Issues before processing: %d, after processing: %d", issuesBefore, issuesAfter) + } + r.printPerProcessorStat(statPerProcessor) + sw.PrintStages() + + return outIssues +} + +func (r Runner) printPerProcessorStat(stat map[string]processorStat) { + parts := make([]string, 0, len(stat)) + for name, ps := range stat { + if ps.inCount != 0 { + parts = append(parts, fmt.Sprintf("%s: %d/%d", name, ps.outCount, ps.inCount)) + } + } + if len(parts) != 0 { + r.Log.Infof("Processors filtering stat (out/in): %s", strings.Join(parts, ", ")) + } +} + +func (r Runner) Run(ctx context.Context, linters []*linter.Config, lintCtx *linter.Context) ([]result.Issue, error) { + sw := timeutils.NewStopwatch("linters", r.Log) + defer sw.Print() + + var issues []result.Issue + for _, lc := range linters { + lc := lc + sw.TrackStage(lc.Name(), func() { + linterIssues, err := r.runLinterSafe(ctx, lintCtx, lc) + if err != nil { + r.Log.Warnf("Can't run linter %s: %v", lc.Linter.Name(), err) + return + } + issues = append(issues, linterIssues...) + }) + } + + return r.processLintResults(issues), nil +} + +func (r *Runner) processIssues(issues []result.Issue, sw *timeutils.Stopwatch, statPerProcessor map[string]processorStat) []result.Issue { + for _, p := range r.Processors { + var newIssues []result.Issue + var err error + p := p + sw.TrackStage(p.Name(), func() { + newIssues, err = p.Process(issues) + }) + + if err != nil { + r.Log.Warnf("Can't process result by %s processor: %s", p.Name(), err) + } else { + stat := statPerProcessor[p.Name()] + stat.inCount += len(issues) + stat.outCount += len(newIssues) + statPerProcessor[p.Name()] = stat + issues = newIssues + } + + if issues == nil { + issues = []result.Issue{} + } + } + + return issues +} + +func getExcludeProcessor(cfg *config.Issues) processors.Processor { + var excludeTotalPattern string + + if len(cfg.ExcludePatterns) != 0 { + excludeTotalPattern = fmt.Sprintf("(%s)", strings.Join(cfg.ExcludePatterns, "|")) + } + + var excludeProcessor processors.Processor + if cfg.ExcludeCaseSensitive { + excludeProcessor = processors.NewExcludeCaseSensitive(excludeTotalPattern) + } else { + excludeProcessor = processors.NewExclude(excludeTotalPattern) + } + + return excludeProcessor +} + +func getExcludeRulesProcessor(cfg *config.Issues, log logutils.Log, lineCache *fsutils.LineCache) processors.Processor { + var excludeRules []processors.ExcludeRule + for _, r := range cfg.ExcludeRules { + excludeRules = append(excludeRules, processors.ExcludeRule{ + BaseRule: processors.BaseRule{ + Text: r.Text, + Source: r.Source, + Path: r.Path, + Linters: r.Linters, + }, + }) + } + + if cfg.UseDefaultExcludes { + for _, r := range config.GetExcludePatterns(cfg.IncludeDefaultExcludes) { + excludeRules = append(excludeRules, processors.ExcludeRule{ + BaseRule: processors.BaseRule{ + Text: r.Pattern, + Linters: []string{r.Linter}, + }, + }) + } + } + + var excludeRulesProcessor processors.Processor + if cfg.ExcludeCaseSensitive { + excludeRulesProcessor = processors.NewExcludeRulesCaseSensitive( + excludeRules, + lineCache, + log.Child("exclude_rules"), + ) + } else { + excludeRulesProcessor = processors.NewExcludeRules( + excludeRules, + lineCache, + log.Child("exclude_rules"), + ) + } + + return excludeRulesProcessor +} + +func getSeverityRulesProcessor(cfg *config.Severity, log logutils.Log, lineCache *fsutils.LineCache) processors.Processor { + var severityRules []processors.SeverityRule + for _, r := range cfg.Rules { + severityRules = append(severityRules, processors.SeverityRule{ + Severity: r.Severity, + BaseRule: processors.BaseRule{ + Text: r.Text, + Source: r.Source, + Path: r.Path, + Linters: r.Linters, + }, + }) + } + + var severityRulesProcessor processors.Processor + if cfg.CaseSensitive { + severityRulesProcessor = processors.NewSeverityRulesCaseSensitive( + cfg.Default, + severityRules, + lineCache, + log.Child("severity_rules"), + ) + } else { + severityRulesProcessor = processors.NewSeverityRules( + cfg.Default, + severityRules, + lineCache, + log.Child("severity_rules"), + ) + } + + return severityRulesProcessor +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go new file mode 100644 index 000000000..b955417a8 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go @@ -0,0 +1,31 @@ +package logutils + +type Log interface { + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + + Child(name string) Log + SetLevel(level LogLevel) +} + +type LogLevel int + +const ( + // Debug messages, write to debug logs only by logutils.Debug. + LogLevelDebug LogLevel = 0 + + // Information messages, don't write too much messages, + // only useful ones: they are shown when running with -v. + LogLevelInfo LogLevel = 1 + + // Hidden errors: non critical errors: work can be continued, no need to fail whole program; + // tests will crash if any warning occurred. + LogLevelWarn LogLevel = 2 + + // Only not hidden from user errors: whole program failing, usually + // error logging happens in 1-2 places: in the "main" function. + LogLevelError LogLevel = 3 +) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go new file mode 100644 index 000000000..93c9873d9 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go @@ -0,0 +1,49 @@ +package logutils + +import ( + "os" + "strings" +) + +func getEnabledDebugs() map[string]bool { + ret := map[string]bool{} + debugVar := os.Getenv("GL_DEBUG") + if debugVar == "" { + return ret + } + + for _, tag := range strings.Split(debugVar, ",") { + ret[tag] = true + } + + return ret +} + +var enabledDebugs = getEnabledDebugs() + +type DebugFunc func(format string, args ...interface{}) + +func nopDebugf(format string, args ...interface{}) {} + +func Debug(tag string) DebugFunc { + if !enabledDebugs[tag] { + return nopDebugf + } + + logger := NewStderrLog(tag) + logger.SetLevel(LogLevelDebug) + + return func(format string, args ...interface{}) { + logger.Debugf(format, args...) + } +} + +func HaveDebugTag(tag string) bool { + return enabledDebugs[tag] +} + +func SetupVerboseLog(log Log, isVerbose bool) { + if isVerbose { + log.SetLevel(LogLevelInfo) + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go new file mode 100644 index 000000000..e897ce1ed --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go @@ -0,0 +1,47 @@ +package logutils + +import ( + "github.com/stretchr/testify/mock" +) + +type MockLog struct { + mock.Mock +} + +func NewMockLog() *MockLog { + return &MockLog{} +} + +func (m *MockLog) Fatalf(format string, args ...interface{}) { + mArgs := []interface{}{format} + m.Called(append(mArgs, args...)...) +} + +func (m *MockLog) Panicf(format string, args ...interface{}) { + mArgs := []interface{}{format} + m.Called(append(mArgs, args...)...) +} + +func (m *MockLog) Errorf(format string, args ...interface{}) { + mArgs := []interface{}{format} + m.Called(append(mArgs, args...)...) +} + +func (m *MockLog) Warnf(format string, args ...interface{}) { + mArgs := []interface{}{format} + m.Called(append(mArgs, args...)...) +} + +func (m *MockLog) Infof(format string, args ...interface{}) { + mArgs := []interface{}{format} + m.Called(append(mArgs, args...)...) +} + +func (m *MockLog) Child(name string) Log { + m.Called(name) + return m +} + +func (m *MockLog) SetLevel(level LogLevel) { + m.Called(level) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/out.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/out.go new file mode 100644 index 000000000..67c70dc8f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/out.go @@ -0,0 +1,9 @@ +package logutils + +import ( + "github.com/fatih/color" + colorable "github.com/mattn/go-colorable" +) + +var StdOut = color.Output // https://github.com/golangci/golangci-lint/issues/14 +var StdErr = colorable.NewColorableStderr() diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go new file mode 100644 index 000000000..b4697ee4c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go @@ -0,0 +1,121 @@ +package logutils + +import ( + "fmt" + "os" + "time" + + "github.com/sirupsen/logrus" //nolint:depguard + + "github.com/golangci/golangci-lint/pkg/exitcodes" +) + +type StderrLog struct { + name string + logger *logrus.Logger + level LogLevel +} + +var _ Log = NewStderrLog("") + +func NewStderrLog(name string) *StderrLog { + sl := &StderrLog{ + name: name, + logger: logrus.New(), + level: LogLevelWarn, + } + + switch os.Getenv("LOG_LEVEL") { + case "error", "err": + sl.logger.SetLevel(logrus.ErrorLevel) + case "warning", "warn": + sl.logger.SetLevel(logrus.WarnLevel) + case "info": + sl.logger.SetLevel(logrus.InfoLevel) + default: + sl.logger.SetLevel(logrus.DebugLevel) + } + + sl.logger.Out = StdErr + formatter := &logrus.TextFormatter{ + DisableTimestamp: true, // `INFO[0007] msg` -> `INFO msg` + } + if os.Getenv("LOG_TIMESTAMP") == "1" { + formatter.DisableTimestamp = false + formatter.FullTimestamp = true + formatter.TimestampFormat = time.StampMilli + } + sl.logger.Formatter = formatter + + return sl +} + +func (sl StderrLog) prefix() string { + prefix := "" + if sl.name != "" { + prefix = fmt.Sprintf("[%s] ", sl.name) + } + + return prefix +} + +func (sl StderrLog) Fatalf(format string, args ...interface{}) { + sl.logger.Errorf("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) + os.Exit(exitcodes.Failure) +} + +func (sl StderrLog) Panicf(format string, args ...interface{}) { + v := fmt.Sprintf("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) + panic(v) +} + +func (sl StderrLog) Errorf(format string, args ...interface{}) { + if sl.level > LogLevelError { + return + } + + sl.logger.Errorf("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) + // don't call exitIfTest() because the idea is to + // crash on hidden errors (warnings); but Errorf MUST NOT be + // called on hidden errors, see log levels comments. +} + +func (sl StderrLog) Warnf(format string, args ...interface{}) { + if sl.level > LogLevelWarn { + return + } + + sl.logger.Warnf("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) +} + +func (sl StderrLog) Infof(format string, args ...interface{}) { + if sl.level > LogLevelInfo { + return + } + + sl.logger.Infof("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) +} + +func (sl StderrLog) Debugf(format string, args ...interface{}) { + if sl.level > LogLevelDebug { + return + } + + sl.logger.Debugf("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) +} + +func (sl StderrLog) Child(name string) Log { + prefix := "" + if sl.name != "" { + prefix = sl.name + "/" + } + + child := sl + child.name = prefix + name + + return &child +} + +func (sl *StderrLog) SetLevel(level LogLevel) { + sl.level = level +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go b/vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go new file mode 100644 index 000000000..c620573b9 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go @@ -0,0 +1,39 @@ +package packages + +import ( + "fmt" + "go/token" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +//nolint:gomnd +func ParseErrorPosition(pos string) (*token.Position, error) { + // file:line(:colon) + parts := strings.Split(pos, ":") + if len(parts) == 1 { + return nil, errors.New("no colons") + } + + file := parts[0] + line, err := strconv.Atoi(parts[1]) + if err != nil { + return nil, fmt.Errorf("can't parse line number %q: %s", parts[1], err) + } + + var column int + if len(parts) == 3 { // no column + column, err = strconv.Atoi(parts[2]) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse column from %q", parts[2]) + } + } + + return &token.Position{ + Filename: file, + Line: line, + Column: column, + }, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/packages/skip.go b/vendor/github.com/golangci/golangci-lint/pkg/packages/skip.go new file mode 100644 index 000000000..cdd327f5d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/packages/skip.go @@ -0,0 +1,25 @@ +package packages + +import ( + "fmt" + "path/filepath" + "regexp" +) + +func pathElemReImpl(e string, sep rune) string { + escapedSep := regexp.QuoteMeta(string(sep)) // needed for windows sep '\\' + return fmt.Sprintf(`(^|%s)%s($|%s)`, escapedSep, e, escapedSep) +} + +func pathElemRe(e string) string { + return pathElemReImpl(e, filepath.Separator) +} + +var StdExcludeDirRegexps = []string{ + pathElemRe("vendor"), + pathElemRe("third_party"), + pathElemRe("testdata"), + pathElemRe("examples"), + pathElemRe("Godeps"), + pathElemRe("builtin"), +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/packages/util.go b/vendor/github.com/golangci/golangci-lint/pkg/packages/util.go new file mode 100644 index 000000000..e4268897f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/packages/util.go @@ -0,0 +1,102 @@ +package packages + +import ( + "fmt" + "regexp" + "strings" + + "golang.org/x/tools/go/packages" +) + +// reFile matches a line who starts with path and position. +// ex: `/example/main.go:11:17: foobar` +var reFile = regexp.MustCompile(`^.+\.go:\d+:\d+: .+`) + +func ExtractErrors(pkg *packages.Package) []packages.Error { + errors := extractErrorsImpl(pkg, map[*packages.Package]bool{}) + if len(errors) == 0 { + return errors + } + + seenErrors := map[string]bool{} + var uniqErrors []packages.Error + for _, err := range errors { + msg := stackCrusher(err.Error()) + if seenErrors[msg] { + continue + } + + if msg != err.Error() { + continue + } + + seenErrors[msg] = true + + uniqErrors = append(uniqErrors, err) + } + + if len(pkg.GoFiles) != 0 { + // errors were extracted from deps and have at least one file in package + for i := range uniqErrors { + if _, parseErr := ParseErrorPosition(uniqErrors[i].Pos); parseErr == nil { + continue + } + + // change pos to local file to properly process it by processors (properly read line etc) + uniqErrors[i].Msg = fmt.Sprintf("%s: %s", uniqErrors[i].Pos, uniqErrors[i].Msg) + uniqErrors[i].Pos = fmt.Sprintf("%s:1", pkg.GoFiles[0]) + } + + // some errors like "code in directory expects import" don't have Pos, set it here + for i := range uniqErrors { + err := &uniqErrors[i] + if err.Pos == "" { + err.Pos = fmt.Sprintf("%s:1", pkg.GoFiles[0]) + } + } + } + + return uniqErrors +} + +func extractErrorsImpl(pkg *packages.Package, seenPackages map[*packages.Package]bool) []packages.Error { + if seenPackages[pkg] { + return nil + } + seenPackages[pkg] = true + + if !pkg.IllTyped { // otherwise it may take hours to traverse all deps many times + return nil + } + + if len(pkg.Errors) > 0 { + return pkg.Errors + } + + var errors []packages.Error + for _, iPkg := range pkg.Imports { + iPkgErrors := extractErrorsImpl(iPkg, seenPackages) + if iPkgErrors != nil { + errors = append(errors, iPkgErrors...) + } + } + + return errors +} + +func stackCrusher(msg string) string { + index := strings.Index(msg, "(") + lastIndex := strings.LastIndex(msg, ")") + + if index == -1 || index == len(msg)-1 || lastIndex == -1 || lastIndex != len(msg)-1 { + return msg + } + + frag := msg[index+1 : lastIndex] + + if !reFile.MatchString(frag) { + return msg + } + + return stackCrusher(frag) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go new file mode 100644 index 000000000..c5b948a98 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go @@ -0,0 +1,87 @@ +package printers + +import ( + "context" + "encoding/xml" + "fmt" + + "github.com/go-xmlfmt/xmlfmt" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type checkstyleOutput struct { + XMLName xml.Name `xml:"checkstyle"` + Version string `xml:"version,attr"` + Files []*checkstyleFile `xml:"file"` +} + +type checkstyleFile struct { + Name string `xml:"name,attr"` + Errors []*checkstyleError `xml:"error"` +} + +type checkstyleError struct { + Column int `xml:"column,attr"` + Line int `xml:"line,attr"` + Message string `xml:"message,attr"` + Severity string `xml:"severity,attr"` + Source string `xml:"source,attr"` +} + +const defaultCheckstyleSeverity = "error" + +type Checkstyle struct{} + +func NewCheckstyle() *Checkstyle { + return &Checkstyle{} +} + +func (Checkstyle) Print(ctx context.Context, issues []result.Issue) error { + out := checkstyleOutput{ + Version: "5.0", + } + + files := map[string]*checkstyleFile{} + + for i := range issues { + issue := &issues[i] + file, ok := files[issue.FilePath()] + if !ok { + file = &checkstyleFile{ + Name: issue.FilePath(), + } + + files[issue.FilePath()] = file + } + + severity := defaultCheckstyleSeverity + if issue.Severity != "" { + severity = issue.Severity + } + + newError := &checkstyleError{ + Column: issue.Column(), + Line: issue.Line(), + Message: issue.Text, + Source: issue.FromLinter, + Severity: severity, + } + + file.Errors = append(file.Errors, newError) + } + + out.Files = make([]*checkstyleFile, 0, len(files)) + for _, file := range files { + out.Files = append(out.Files, file) + } + + data, err := xml.Marshal(&out) + if err != nil { + return err + } + + fmt.Fprintf(logutils.StdOut, "%s%s\n", xml.Header, xmlfmt.FormatXML(string(data), "", " ")) + return nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go new file mode 100644 index 000000000..35a22ce99 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go @@ -0,0 +1,57 @@ +package printers + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +// CodeClimateIssue is a subset of the Code Climate spec - https://github.com/codeclimate/spec/blob/master/SPEC.md#data-types +// It is just enough to support GitLab CI Code Quality - https://docs.gitlab.com/ee/user/project/merge_requests/code_quality.html +type CodeClimateIssue struct { + Description string `json:"description"` + Severity string `json:"severity,omitempty"` + Fingerprint string `json:"fingerprint"` + Location struct { + Path string `json:"path"` + Lines struct { + Begin int `json:"begin"` + } `json:"lines"` + } `json:"location"` +} + +type CodeClimate struct { +} + +func NewCodeClimate() *CodeClimate { + return &CodeClimate{} +} + +func (p CodeClimate) Print(ctx context.Context, issues []result.Issue) error { + codeClimateIssues := []CodeClimateIssue{} + for i := range issues { + issue := &issues[i] + codeClimateIssue := CodeClimateIssue{} + codeClimateIssue.Description = issue.Description() + codeClimateIssue.Location.Path = issue.Pos.Filename + codeClimateIssue.Location.Lines.Begin = issue.Pos.Line + codeClimateIssue.Fingerprint = issue.Fingerprint() + + if issue.Severity != "" { + codeClimateIssue.Severity = issue.Severity + } + + codeClimateIssues = append(codeClimateIssues, codeClimateIssue) + } + + outputJSON, err := json.Marshal(codeClimateIssues) + if err != nil { + return err + } + + fmt.Fprint(logutils.StdOut, string(outputJSON)) + return nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go new file mode 100644 index 000000000..4ebc26685 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go @@ -0,0 +1,46 @@ +package printers + +import ( + "context" + "fmt" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type github struct { +} + +const defaultGithubSeverity = "error" + +// NewGithub output format outputs issues according to Github actions format: +// https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message +func NewGithub() Printer { + return &github{} +} + +// print each line as: ::error file=app.js,line=10,col=15::Something went wrong +func formatIssueAsGithub(issue *result.Issue) string { + severity := defaultGithubSeverity + if issue.Severity != "" { + severity = issue.Severity + } + + ret := fmt.Sprintf("::%s file=%s,line=%d", severity, issue.FilePath(), issue.Line()) + if issue.Pos.Column != 0 { + ret += fmt.Sprintf(",col=%d", issue.Pos.Column) + } + + ret += fmt.Sprintf("::%s (%s)", issue.Text, issue.FromLinter) + return ret +} + +func (g *github) Print(_ context.Context, issues []result.Issue) error { + for ind := range issues { + _, err := fmt.Fprintln(logutils.StdOut, formatIssueAsGithub(&issues[ind])) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go new file mode 100644 index 000000000..65ab753bd --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go @@ -0,0 +1,155 @@ +package printers + +import ( + "context" + "fmt" + "html/template" + "strings" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +const templateContent = ` + + + + golangci-lint + + + + + + + + + + +
+
+
+
+
+ + + +` + +type htmlIssue struct { + Title string + Pos string + Linter string + Code string +} + +type HTML struct{} + +func NewHTML() *HTML { + return &HTML{} +} + +func (h HTML) Print(_ context.Context, issues []result.Issue) error { + var htmlIssues []htmlIssue + + for i := range issues { + pos := fmt.Sprintf("%s:%d", issues[i].FilePath(), issues[i].Line()) + if issues[i].Pos.Column != 0 { + pos += fmt.Sprintf(":%d", issues[i].Pos.Column) + } + + htmlIssues = append(htmlIssues, htmlIssue{ + Title: strings.TrimSpace(issues[i].Text), + Pos: pos, + Linter: issues[i].FromLinter, + Code: strings.Join(issues[i].SourceLines, "\n"), + }) + } + + t, err := template.New("golangci-lint").Parse(templateContent) + if err != nil { + return err + } + + return t.Execute(logutils.StdOut, struct{ Issues []htmlIssue }{Issues: htmlIssues}) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go new file mode 100644 index 000000000..6ffa996fb --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go @@ -0,0 +1,41 @@ +package printers + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/report" + "github.com/golangci/golangci-lint/pkg/result" +) + +type JSON struct { + rd *report.Data +} + +func NewJSON(rd *report.Data) *JSON { + return &JSON{ + rd: rd, + } +} + +type JSONResult struct { + Issues []result.Issue + Report *report.Data +} + +func (p JSON) Print(ctx context.Context, issues []result.Issue) error { + res := JSONResult{ + Issues: issues, + Report: p.rd, + } + + outputJSON, err := json.Marshal(res) + if err != nil { + return err + } + + fmt.Fprint(logutils.StdOut, string(outputJSON)) + return nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go new file mode 100644 index 000000000..9277cd66f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go @@ -0,0 +1,79 @@ +package printers + +import ( + "context" + "encoding/xml" + "strings" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type testSuitesXML struct { + XMLName xml.Name `xml:"testsuites"` + TestSuites []testSuiteXML +} + +type testSuiteXML struct { + XMLName xml.Name `xml:"testsuite"` + Suite string `xml:"name,attr"` + Tests int `xml:"tests,attr"` + Errors int `xml:"errors,attr"` + Failures int `xml:"failures,attr"` + TestCases []testCaseXML `xml:"testcase"` +} + +type testCaseXML struct { + Name string `xml:"name,attr"` + ClassName string `xml:"classname,attr"` + Failure failureXML `xml:"failure"` +} + +type failureXML struct { + Message string `xml:"message,attr"` + Content string `xml:",cdata"` +} + +type JunitXML struct { +} + +func NewJunitXML() *JunitXML { + return &JunitXML{} +} + +func (JunitXML) Print(ctx context.Context, issues []result.Issue) error { + suites := make(map[string]testSuiteXML) // use a map to group by file + + for ind := range issues { + i := &issues[ind] + suiteName := i.FilePath() + testSuite := suites[suiteName] + testSuite.Suite = i.FilePath() + testSuite.Tests++ + testSuite.Failures++ + + tc := testCaseXML{ + Name: i.FromLinter, + ClassName: i.Pos.String(), + Failure: failureXML{ + Message: i.Text, + Content: strings.Join(i.SourceLines, "\n"), + }, + } + + testSuite.TestCases = append(testSuite.TestCases, tc) + suites[suiteName] = testSuite + } + + var res testSuitesXML + for _, val := range suites { + res.TestSuites = append(res.TestSuites, val) + } + + enc := xml.NewEncoder(logutils.StdOut) + enc.Indent("", " ") + if err := enc.Encode(res); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go new file mode 100644 index 000000000..bfafb88e2 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go @@ -0,0 +1,11 @@ +package printers + +import ( + "context" + + "github.com/golangci/golangci-lint/pkg/result" +) + +type Printer interface { + Print(ctx context.Context, issues []result.Issue) error +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go new file mode 100644 index 000000000..d3cdce673 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go @@ -0,0 +1,58 @@ +package printers + +import ( + "context" + "fmt" + "io" + "text/tabwriter" + + "github.com/fatih/color" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type Tab struct { + printLinterName bool + log logutils.Log +} + +func NewTab(printLinterName bool, log logutils.Log) *Tab { + return &Tab{ + printLinterName: printLinterName, + log: log, + } +} + +func (p Tab) SprintfColored(ca color.Attribute, format string, args ...interface{}) string { + c := color.New(ca) + return c.Sprintf(format, args...) +} + +func (p *Tab) Print(ctx context.Context, issues []result.Issue) error { + w := tabwriter.NewWriter(logutils.StdOut, 0, 0, 2, ' ', 0) + + for i := range issues { + p.printIssue(&issues[i], w) + } + + if err := w.Flush(); err != nil { + p.log.Warnf("Can't flush tab writer: %s", err) + } + + return nil +} + +func (p Tab) printIssue(i *result.Issue, w io.Writer) { + text := p.SprintfColored(color.FgRed, "%s", i.Text) + if p.printLinterName { + text = fmt.Sprintf("%s\t%s", i.FromLinter, text) + } + + pos := p.SprintfColored(color.Bold, "%s:%d", i.FilePath(), i.Line()) + if i.Pos.Column != 0 { + pos += fmt.Sprintf(":%d", i.Pos.Column) + } + + fmt.Fprintf(w, "%s\t%s\n", pos, text) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go new file mode 100644 index 000000000..181452888 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go @@ -0,0 +1,91 @@ +package printers + +import ( + "context" + "fmt" + "strings" + + "github.com/fatih/color" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type Text struct { + printIssuedLine bool + useColors bool + printLinterName bool + + log logutils.Log +} + +func NewText(printIssuedLine, useColors, printLinterName bool, log logutils.Log) *Text { + return &Text{ + printIssuedLine: printIssuedLine, + useColors: useColors, + printLinterName: printLinterName, + log: log, + } +} + +func (p Text) SprintfColored(ca color.Attribute, format string, args ...interface{}) string { + if !p.useColors { + return fmt.Sprintf(format, args...) + } + + c := color.New(ca) + return c.Sprintf(format, args...) +} + +func (p *Text) Print(ctx context.Context, issues []result.Issue) error { + for i := range issues { + p.printIssue(&issues[i]) + + if !p.printIssuedLine { + continue + } + + p.printSourceCode(&issues[i]) + p.printUnderLinePointer(&issues[i]) + } + + return nil +} + +func (p Text) printIssue(i *result.Issue) { + text := p.SprintfColored(color.FgRed, "%s", strings.TrimSpace(i.Text)) + if p.printLinterName { + text += fmt.Sprintf(" (%s)", i.FromLinter) + } + pos := p.SprintfColored(color.Bold, "%s:%d", i.FilePath(), i.Line()) + if i.Pos.Column != 0 { + pos += fmt.Sprintf(":%d", i.Pos.Column) + } + fmt.Fprintf(logutils.StdOut, "%s: %s\n", pos, text) +} + +func (p Text) printSourceCode(i *result.Issue) { + for _, line := range i.SourceLines { + fmt.Fprintln(logutils.StdOut, line) + } +} + +func (p Text) printUnderLinePointer(i *result.Issue) { + // if column == 0 it means column is unknown (e.g. for gosec) + if len(i.SourceLines) != 1 || i.Pos.Column == 0 { + return + } + + col0 := i.Pos.Column - 1 + line := i.SourceLines[0] + prefixRunes := make([]rune, 0, len(line)) + for j := 0; j < len(line) && j < col0; j++ { + if line[j] == '\t' { + prefixRunes = append(prefixRunes, '\t') + } else { + prefixRunes = append(prefixRunes, ' ') + } + } + + fmt.Fprintf(logutils.StdOut, "%s%s\n", string(prefixRunes), p.SprintfColored(color.FgYellow, "^")) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/report/data.go b/vendor/github.com/golangci/golangci-lint/pkg/report/data.go new file mode 100644 index 000000000..f083fa9f5 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/report/data.go @@ -0,0 +1,26 @@ +package report + +type Warning struct { + Tag string `json:",omitempty"` + Text string +} + +type LinterData struct { + Name string + Enabled bool `json:",omitempty"` + EnabledByDefault bool `json:",omitempty"` +} + +type Data struct { + Warnings []Warning `json:",omitempty"` + Linters []LinterData `json:",omitempty"` + Error string `json:",omitempty"` +} + +func (d *Data) AddLinter(name string, enabled, enabledByDefault bool) { + d.Linters = append(d.Linters, LinterData{ + Name: name, + Enabled: enabled, + EnabledByDefault: enabledByDefault, + }) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/report/log.go b/vendor/github.com/golangci/golangci-lint/pkg/report/log.go new file mode 100644 index 000000000..45ab6cae8 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/report/log.go @@ -0,0 +1,64 @@ +package report + +import ( + "fmt" + "strings" + + "github.com/golangci/golangci-lint/pkg/logutils" +) + +type LogWrapper struct { + rd *Data + tags []string + origLog logutils.Log +} + +func NewLogWrapper(log logutils.Log, reportData *Data) *LogWrapper { + return &LogWrapper{ + rd: reportData, + origLog: log, + } +} + +func (lw LogWrapper) Fatalf(format string, args ...interface{}) { + lw.origLog.Fatalf(format, args...) +} + +func (lw LogWrapper) Panicf(format string, args ...interface{}) { + lw.origLog.Panicf(format, args...) +} + +func (lw LogWrapper) Errorf(format string, args ...interface{}) { + lw.origLog.Errorf(format, args...) + lw.rd.Error = fmt.Sprintf(format, args...) +} + +func (lw LogWrapper) Warnf(format string, args ...interface{}) { + lw.origLog.Warnf(format, args...) + w := Warning{ + Tag: strings.Join(lw.tags, "/"), + Text: fmt.Sprintf(format, args...), + } + + lw.rd.Warnings = append(lw.rd.Warnings, w) +} + +func (lw LogWrapper) Infof(format string, args ...interface{}) { + lw.origLog.Infof(format, args...) +} + +func (lw LogWrapper) Child(name string) logutils.Log { + c := lw + c.origLog = lw.origLog.Child(name) + c.tags = append([]string{}, lw.tags...) + c.tags = append(c.tags, name) + return c +} + +func (lw LogWrapper) SetLevel(level logutils.LogLevel) { + lw.origLog.SetLevel(level) +} + +func (lw LogWrapper) GoString() string { + return fmt.Sprintf("lw: %+v, orig log: %#v", lw, lw.origLog) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go new file mode 100644 index 000000000..707a2b17c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go @@ -0,0 +1,98 @@ +package result + +import ( + "crypto/md5" //nolint:gosec + "fmt" + "go/token" + + "golang.org/x/tools/go/packages" +) + +type Range struct { + From, To int +} + +type Replacement struct { + NeedOnlyDelete bool // need to delete all lines of the issue without replacement with new lines + NewLines []string // if NeedDelete is false it's the replacement lines + Inline *InlineFix +} + +type InlineFix struct { + StartCol int // zero-based + Length int // length of chunk to be replaced + NewString string +} + +type Issue struct { + FromLinter string + Text string + + Severity string + + // Source lines of a code with the issue to show + SourceLines []string + + // If we know how to fix the issue we can provide replacement lines + Replacement *Replacement + + // Pkg is needed for proper caching of linting results + Pkg *packages.Package `json:"-"` + + LineRange *Range `json:",omitempty"` + + Pos token.Position + + // HunkPos is used only when golangci-lint is run over a diff + HunkPos int `json:",omitempty"` + + // If we are expecting a nolint (because this is from nolintlint), record the expected linter + ExpectNoLint bool + ExpectedNoLintLinter string +} + +func (i *Issue) FilePath() string { + return i.Pos.Filename +} + +func (i *Issue) Line() int { + return i.Pos.Line +} + +func (i *Issue) Column() int { + return i.Pos.Column +} + +func (i *Issue) GetLineRange() Range { + if i.LineRange == nil { + return Range{ + From: i.Line(), + To: i.Line(), + } + } + + if i.LineRange.From == 0 { + return Range{ + From: i.Line(), + To: i.Line(), + } + } + + return *i.LineRange +} + +func (i *Issue) Description() string { + return fmt.Sprintf("%s: %s", i.FromLinter, i.Text) +} + +func (i *Issue) Fingerprint() string { + firstLine := "" + if len(i.SourceLines) > 0 { + firstLine = i.SourceLines[0] + } + + hash := md5.New() //nolint:gosec + _, _ = hash.Write([]byte(fmt.Sprintf("%s%s%s", i.Pos.Filename, i.Text, firstLine))) + + return fmt.Sprintf("%X", hash.Sum(nil)) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go new file mode 100644 index 000000000..57388f64f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go @@ -0,0 +1,134 @@ +package processors + +import ( + "fmt" + "go/parser" + "go/token" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +var autogenDebugf = logutils.Debug("autogen_exclude") + +type ageFileSummary struct { + isGenerated bool +} + +type ageFileSummaryCache map[string]*ageFileSummary + +type AutogeneratedExclude struct { + fileSummaryCache ageFileSummaryCache +} + +func NewAutogeneratedExclude() *AutogeneratedExclude { + return &AutogeneratedExclude{ + fileSummaryCache: ageFileSummaryCache{}, + } +} + +var _ Processor = &AutogeneratedExclude{} + +func (p AutogeneratedExclude) Name() string { + return "autogenerated_exclude" +} + +func (p *AutogeneratedExclude) Process(issues []result.Issue) ([]result.Issue, error) { + return filterIssuesErr(issues, p.shouldPassIssue) +} + +func isSpecialAutogeneratedFile(filePath string) bool { + fileName := filepath.Base(filePath) + // fake files or generation definitions to which //line points to for generated files + return filepath.Ext(fileName) != ".go" +} + +func (p *AutogeneratedExclude) shouldPassIssue(i *result.Issue) (bool, error) { + if i.FromLinter == "typecheck" { + // don't hide typechecking errors in generated files: users expect to see why the project isn't compiling + return true, nil + } + + if filepath.Base(i.FilePath()) == "go.mod" { + return true, nil + } + + if isSpecialAutogeneratedFile(i.FilePath()) { + return false, nil + } + + fs, err := p.getOrCreateFileSummary(i) + if err != nil { + return false, err + } + + // don't report issues for autogenerated files + return !fs.isGenerated, nil +} + +// isGenerated reports whether the source file is generated code. +// Using a bit laxer rules than https://golang.org/s/generatedcode to +// match more generated code. See #48 and #72. +func isGeneratedFileByComment(doc string) bool { + const ( + genCodeGenerated = "code generated" + genDoNotEdit = "do not edit" + genAutoFile = "autogenerated file" // easyjson + ) + + markers := []string{genCodeGenerated, genDoNotEdit, genAutoFile} + doc = strings.ToLower(doc) + for _, marker := range markers { + if strings.Contains(doc, marker) { + autogenDebugf("doc contains marker %q: file is generated", marker) + return true + } + } + + autogenDebugf("doc of len %d doesn't contain any of markers: %s", len(doc), markers) + return false +} + +func (p *AutogeneratedExclude) getOrCreateFileSummary(i *result.Issue) (*ageFileSummary, error) { + fs := p.fileSummaryCache[i.FilePath()] + if fs != nil { + return fs, nil + } + + fs = &ageFileSummary{} + p.fileSummaryCache[i.FilePath()] = fs + + if i.FilePath() == "" { + return nil, fmt.Errorf("no file path for issue") + } + + doc, err := getDoc(i.FilePath()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get doc of file %s", i.FilePath()) + } + + fs.isGenerated = isGeneratedFileByComment(doc) + autogenDebugf("file %q is generated: %t", i.FilePath(), fs.isGenerated) + return fs, nil +} + +func getDoc(filePath string) (string, error) { + fset := token.NewFileSet() + syntax, err := parser.ParseFile(fset, filePath, nil, parser.PackageClauseOnly|parser.ParseComments) + if err != nil { + return "", errors.Wrap(err, "failed to parse file") + } + + var docLines []string + for _, c := range syntax.Comments { + docLines = append(docLines, strings.TrimSpace(c.Text())) + } + + return strings.Join(docLines, "\n"), nil +} + +func (p AutogeneratedExclude) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go new file mode 100644 index 000000000..b6ce4f215 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go @@ -0,0 +1,69 @@ +package processors + +import ( + "regexp" + + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type BaseRule struct { + Text string + Source string + Path string + Linters []string +} + +type baseRule struct { + text *regexp.Regexp + source *regexp.Regexp + path *regexp.Regexp + linters []string +} + +func (r *baseRule) isEmpty() bool { + return r.text == nil && r.source == nil && r.path == nil && len(r.linters) == 0 +} + +func (r *baseRule) match(issue *result.Issue, lineCache *fsutils.LineCache, log logutils.Log) bool { + if r.isEmpty() { + return false + } + if r.text != nil && !r.text.MatchString(issue.Text) { + return false + } + if r.path != nil && !r.path.MatchString(issue.FilePath()) { + return false + } + if len(r.linters) != 0 && !r.matchLinter(issue) { + return false + } + + // the most heavyweight checking last + if r.source != nil && !r.matchSource(issue, lineCache, log) { + return false + } + + return true +} + +func (r *baseRule) matchLinter(issue *result.Issue) bool { + for _, linter := range r.linters { + if linter == issue.FromLinter { + return true + } + } + + return false +} + +func (r *baseRule) matchSource(issue *result.Issue, lineCache *fsutils.LineCache, log logutils.Log) bool { // nolint:interfacer + sourceLine, errSourceLine := lineCache.GetLine(issue.FilePath(), issue.Line()) + if errSourceLine != nil { + log.Warnf("Failed to get line %s:%d from line cache: %s", issue.FilePath(), issue.Line(), errSourceLine) + return false // can't properly match + } + + return r.source.MatchString(sourceLine) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go new file mode 100644 index 000000000..c8793871a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go @@ -0,0 +1,58 @@ +package processors + +import ( + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "github.com/golangci/golangci-lint/pkg/goutil" + "github.com/golangci/golangci-lint/pkg/result" +) + +type Cgo struct { + goCacheDir string +} + +var _ Processor = Cgo{} + +func NewCgo(goenv *goutil.Env) *Cgo { + return &Cgo{ + goCacheDir: goenv.Get(goutil.EnvGoCache), + } +} + +func (p Cgo) Name() string { + return "cgo" +} + +func (p Cgo) Process(issues []result.Issue) ([]result.Issue, error) { + return filterIssuesErr(issues, func(i *result.Issue) (bool, error) { + // some linters (.e.g gosec, deadcode) return incorrect filepaths for cgo issues, + // also cgo files have strange issues looking like false positives. + + // cache dir contains all preprocessed files including cgo files + + issueFilePath := i.FilePath() + if !filepath.IsAbs(i.FilePath()) { + absPath, err := filepath.Abs(i.FilePath()) + if err != nil { + return false, errors.Wrapf(err, "failed to build abs path for %q", i.FilePath()) + } + issueFilePath = absPath + } + + if p.goCacheDir != "" && strings.HasPrefix(issueFilePath, p.goCacheDir) { + return false, nil + } + + if filepath.Base(i.FilePath()) == "_cgo_gotypes.go" { + // skip cgo warning for go1.10 + return false, nil + } + + return true, nil + }) +} + +func (Cgo) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go new file mode 100644 index 000000000..fc4aba4b9 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go @@ -0,0 +1,74 @@ +package processors + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "github.com/golangci/revgrep" + + "github.com/golangci/golangci-lint/pkg/result" +) + +type Diff struct { + onlyNew bool + fromRev string + patchFilePath string + patch string +} + +var _ Processor = Diff{} + +func NewDiff(onlyNew bool, fromRev, patchFilePath string) *Diff { + return &Diff{ + onlyNew: onlyNew, + fromRev: fromRev, + patchFilePath: patchFilePath, + patch: os.Getenv("GOLANGCI_DIFF_PROCESSOR_PATCH"), + } +} + +func (p Diff) Name() string { + return "diff" +} + +func (p Diff) Process(issues []result.Issue) ([]result.Issue, error) { + if !p.onlyNew && p.fromRev == "" && p.patchFilePath == "" && p.patch == "" { // no need to work + return issues, nil + } + + var patchReader io.Reader + if p.patchFilePath != "" { + patch, err := ioutil.ReadFile(p.patchFilePath) + if err != nil { + return nil, fmt.Errorf("can't read from patch file %s: %s", p.patchFilePath, err) + } + patchReader = bytes.NewReader(patch) + } else if p.patch != "" { + patchReader = strings.NewReader(p.patch) + } + + c := revgrep.Checker{ + Patch: patchReader, + RevisionFrom: p.fromRev, + } + if err := c.Prepare(); err != nil { + return nil, fmt.Errorf("can't prepare diff by revgrep: %s", err) + } + + return transformIssues(issues, func(i *result.Issue) *result.Issue { + hunkPos, isNew := c.IsNewIssue(i) + if !isNew { + return nil + } + + newI := *i + newI.HunkPos = hunkPos + return &newI + }), nil +} + +func (Diff) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go new file mode 100644 index 000000000..92959a328 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go @@ -0,0 +1,59 @@ +package processors + +import ( + "regexp" + + "github.com/golangci/golangci-lint/pkg/result" +) + +type Exclude struct { + pattern *regexp.Regexp +} + +var _ Processor = Exclude{} + +func NewExclude(pattern string) *Exclude { + var patternRe *regexp.Regexp + if pattern != "" { + patternRe = regexp.MustCompile("(?i)" + pattern) + } + return &Exclude{ + pattern: patternRe, + } +} + +func (p Exclude) Name() string { + return "exclude" +} + +func (p Exclude) Process(issues []result.Issue) ([]result.Issue, error) { + if p.pattern == nil { + return issues, nil + } + + return filterIssues(issues, func(i *result.Issue) bool { + return !p.pattern.MatchString(i.Text) + }), nil +} + +func (p Exclude) Finish() {} + +type ExcludeCaseSensitive struct { + *Exclude +} + +var _ Processor = ExcludeCaseSensitive{} + +func NewExcludeCaseSensitive(pattern string) *ExcludeCaseSensitive { + var patternRe *regexp.Regexp + if pattern != "" { + patternRe = regexp.MustCompile(pattern) + } + return &ExcludeCaseSensitive{ + &Exclude{pattern: patternRe}, + } +} + +func (p ExcludeCaseSensitive) Name() string { + return "exclude-case-sensitive" +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go new file mode 100644 index 000000000..d4d6569f4 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go @@ -0,0 +1,90 @@ +package processors + +import ( + "regexp" + + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type excludeRule struct { + baseRule +} + +type ExcludeRule struct { + BaseRule +} + +type ExcludeRules struct { + rules []excludeRule + lineCache *fsutils.LineCache + log logutils.Log +} + +func NewExcludeRules(rules []ExcludeRule, lineCache *fsutils.LineCache, log logutils.Log) *ExcludeRules { + r := &ExcludeRules{ + lineCache: lineCache, + log: log, + } + r.rules = createRules(rules, "(?i)") + + return r +} + +func createRules(rules []ExcludeRule, prefix string) []excludeRule { + parsedRules := make([]excludeRule, 0, len(rules)) + for _, rule := range rules { + parsedRule := excludeRule{} + parsedRule.linters = rule.Linters + if rule.Text != "" { + parsedRule.text = regexp.MustCompile(prefix + rule.Text) + } + if rule.Source != "" { + parsedRule.source = regexp.MustCompile(prefix + rule.Source) + } + if rule.Path != "" { + parsedRule.path = regexp.MustCompile(rule.Path) + } + parsedRules = append(parsedRules, parsedRule) + } + return parsedRules +} + +func (p ExcludeRules) Process(issues []result.Issue) ([]result.Issue, error) { + if len(p.rules) == 0 { + return issues, nil + } + return filterIssues(issues, func(i *result.Issue) bool { + for _, rule := range p.rules { + rule := rule + if rule.match(i, p.lineCache, p.log) { + return false + } + } + return true + }), nil +} + +func (ExcludeRules) Name() string { return "exclude-rules" } +func (ExcludeRules) Finish() {} + +var _ Processor = ExcludeRules{} + +type ExcludeRulesCaseSensitive struct { + *ExcludeRules +} + +func NewExcludeRulesCaseSensitive(rules []ExcludeRule, lineCache *fsutils.LineCache, log logutils.Log) *ExcludeRulesCaseSensitive { + r := &ExcludeRules{ + lineCache: lineCache, + log: log, + } + r.rules = createRules(rules, "") + + return &ExcludeRulesCaseSensitive{r} +} + +func (ExcludeRulesCaseSensitive) Name() string { return "exclude-rules-case-sensitive" } + +var _ Processor = ExcludeCaseSensitive{} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go new file mode 100644 index 000000000..96540245b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go @@ -0,0 +1,131 @@ +package processors + +import ( + "go/parser" + "go/token" + "path/filepath" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type posMapper func(pos token.Position) token.Position + +type adjustMap struct { + sync.Mutex + m map[string]posMapper +} + +// FilenameUnadjuster is needed because a lot of linters use fset.Position(f.Pos()) +// to get filename. And they return adjusted filename (e.g. *.qtpl) for an issue. We need +// restore real .go filename to properly output it, parse it, etc. +type FilenameUnadjuster struct { + m map[string]posMapper // map from adjusted filename to position mapper: adjusted -> unadjusted position + log logutils.Log + loggedUnadjustments map[string]bool +} + +var _ Processor = &FilenameUnadjuster{} + +func processUnadjusterPkg(m *adjustMap, pkg *packages.Package, log logutils.Log) { + fset := token.NewFileSet() // it's more memory efficient to not store all in one fset + + for _, filename := range pkg.CompiledGoFiles { + // It's important to call func here to run GC + processUnadjusterFile(filename, m, log, fset) + } +} + +func processUnadjusterFile(filename string, m *adjustMap, log logutils.Log, fset *token.FileSet) { + syntax, err := parser.ParseFile(fset, filename, nil, parser.ParseComments) + if err != nil { + // Error will be reported by typecheck + return + } + + adjustedFilename := fset.PositionFor(syntax.Pos(), true).Filename + if adjustedFilename == "" { + return + } + + unadjustedFilename := fset.PositionFor(syntax.Pos(), false).Filename + if unadjustedFilename == "" || unadjustedFilename == adjustedFilename { + return + } + + if !strings.HasSuffix(unadjustedFilename, ".go") { + return // file.go -> /caches/cgo-xxx + } + + m.Lock() + defer m.Unlock() + m.m[adjustedFilename] = func(adjustedPos token.Position) token.Position { + tokenFile := fset.File(syntax.Pos()) + if tokenFile == nil { + log.Warnf("Failed to get token file for %s", adjustedFilename) + return adjustedPos + } + return fset.PositionFor(tokenFile.Pos(adjustedPos.Offset), false) + } +} + +func NewFilenameUnadjuster(pkgs []*packages.Package, log logutils.Log) *FilenameUnadjuster { + m := adjustMap{m: map[string]posMapper{}} + + startedAt := time.Now() + var wg sync.WaitGroup + wg.Add(len(pkgs)) + for _, pkg := range pkgs { + go func(pkg *packages.Package) { + // It's important to call func here to run GC + processUnadjusterPkg(&m, pkg, log) + wg.Done() + }(pkg) + } + wg.Wait() + log.Infof("Pre-built %d adjustments in %s", len(m.m), time.Since(startedAt)) + + return &FilenameUnadjuster{ + m: m.m, + log: log, + loggedUnadjustments: map[string]bool{}, + } +} + +func (p FilenameUnadjuster) Name() string { + return "filename_unadjuster" +} + +func (p *FilenameUnadjuster) Process(issues []result.Issue) ([]result.Issue, error) { + return transformIssues(issues, func(i *result.Issue) *result.Issue { + issueFilePath := i.FilePath() + if !filepath.IsAbs(i.FilePath()) { + absPath, err := filepath.Abs(i.FilePath()) + if err != nil { + p.log.Warnf("failed to build abs path for %q: %s", i.FilePath(), err) + return i + } + issueFilePath = absPath + } + + mapper := p.m[issueFilePath] + if mapper == nil { + return i + } + + newI := *i + newI.Pos = mapper(i.Pos) + if !p.loggedUnadjustments[i.Pos.Filename] { + p.log.Infof("Unadjusted from %v to %v", i.Pos, newI.Pos) + p.loggedUnadjustments[i.Pos.Filename] = true + } + return &newI + }), nil +} + +func (FilenameUnadjuster) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go new file mode 100644 index 000000000..17f519e32 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go @@ -0,0 +1,248 @@ +package processors + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/pkg/errors" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" + "github.com/golangci/golangci-lint/pkg/timeutils" +) + +type Fixer struct { + cfg *config.Config + log logutils.Log + fileCache *fsutils.FileCache + sw *timeutils.Stopwatch +} + +func NewFixer(cfg *config.Config, log logutils.Log, fileCache *fsutils.FileCache) *Fixer { + return &Fixer{ + cfg: cfg, + log: log, + fileCache: fileCache, + sw: timeutils.NewStopwatch("fixer", log), + } +} + +func (f Fixer) printStat() { + f.sw.PrintStages() +} + +func (f Fixer) Process(issues []result.Issue) []result.Issue { + if !f.cfg.Issues.NeedFix { + return issues + } + + outIssues := make([]result.Issue, 0, len(issues)) + issuesToFixPerFile := map[string][]result.Issue{} + for i := range issues { + issue := &issues[i] + if issue.Replacement == nil { + outIssues = append(outIssues, *issue) + continue + } + + issuesToFixPerFile[issue.FilePath()] = append(issuesToFixPerFile[issue.FilePath()], *issue) + } + + for file, issuesToFix := range issuesToFixPerFile { + var err error + f.sw.TrackStage("all", func() { + err = f.fixIssuesInFile(file, issuesToFix) + }) + if err != nil { + f.log.Errorf("Failed to fix issues in file %s: %s", file, err) + + // show issues only if can't fix them + outIssues = append(outIssues, issuesToFix...) + } + } + + f.printStat() + return outIssues +} + +func (f Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error { + // TODO: don't read the whole file into memory: read line by line; + // can't just use bufio.scanner: it has a line length limit + origFileData, err := f.fileCache.GetFileBytes(filePath) + if err != nil { + return errors.Wrapf(err, "failed to get file bytes for %s", filePath) + } + origFileLines := bytes.Split(origFileData, []byte("\n")) + + tmpFileName := filepath.Join(filepath.Dir(filePath), fmt.Sprintf(".%s.golangci_fix", filepath.Base(filePath))) + tmpOutFile, err := os.Create(tmpFileName) + if err != nil { + return errors.Wrapf(err, "failed to make file %s", tmpFileName) + } + + // merge multiple issues per line into one issue + issuesPerLine := map[int][]result.Issue{} + for i := range issues { + issue := &issues[i] + issuesPerLine[issue.Line()] = append(issuesPerLine[issue.Line()], *issue) + } + + issues = issues[:0] // reuse the same memory + for line, lineIssues := range issuesPerLine { + if mergedIssue := f.mergeLineIssues(line, lineIssues, origFileLines); mergedIssue != nil { + issues = append(issues, *mergedIssue) + } + } + + issues = f.findNotIntersectingIssues(issues) + + if err = f.writeFixedFile(origFileLines, issues, tmpOutFile); err != nil { + tmpOutFile.Close() + os.Remove(tmpOutFile.Name()) + return err + } + + tmpOutFile.Close() + if err = os.Rename(tmpOutFile.Name(), filePath); err != nil { + os.Remove(tmpOutFile.Name()) + return errors.Wrapf(err, "failed to rename %s -> %s", tmpOutFile.Name(), filePath) + } + + return nil +} + +func (f Fixer) mergeLineIssues(lineNum int, lineIssues []result.Issue, origFileLines [][]byte) *result.Issue { + origLine := origFileLines[lineNum-1] // lineNum is 1-based + + if len(lineIssues) == 1 && lineIssues[0].Replacement.Inline == nil { + return &lineIssues[0] + } + + // check issues first + for ind := range lineIssues { + i := &lineIssues[ind] + if i.LineRange != nil { + f.log.Infof("Line %d has multiple issues but at least one of them is ranged: %#v", lineNum, lineIssues) + return &lineIssues[0] + } + + r := i.Replacement + if r.Inline == nil || len(r.NewLines) != 0 || r.NeedOnlyDelete { + f.log.Infof("Line %d has multiple issues but at least one of them isn't inline: %#v", lineNum, lineIssues) + return &lineIssues[0] + } + + if r.Inline.StartCol < 0 || r.Inline.Length <= 0 || r.Inline.StartCol+r.Inline.Length > len(origLine) { + f.log.Warnf("Line %d (%q) has invalid inline fix: %#v, %#v", lineNum, origLine, i, r.Inline) + return nil + } + } + + return f.applyInlineFixes(lineIssues, origLine, lineNum) +} + +func (f Fixer) applyInlineFixes(lineIssues []result.Issue, origLine []byte, lineNum int) *result.Issue { + sort.Slice(lineIssues, func(i, j int) bool { + return lineIssues[i].Replacement.Inline.StartCol < lineIssues[j].Replacement.Inline.StartCol + }) + + var newLineBuf bytes.Buffer + newLineBuf.Grow(len(origLine)) + + //nolint:misspell + // example: origLine="it's becouse of them", StartCol=5, Length=7, NewString="because" + + curOrigLinePos := 0 + for i := range lineIssues { + fix := lineIssues[i].Replacement.Inline + if fix.StartCol < curOrigLinePos { + f.log.Warnf("Line %d has multiple intersecting issues: %#v", lineNum, lineIssues) + return nil + } + + if curOrigLinePos != fix.StartCol { + newLineBuf.Write(origLine[curOrigLinePos:fix.StartCol]) + } + newLineBuf.WriteString(fix.NewString) + curOrigLinePos = fix.StartCol + fix.Length + } + if curOrigLinePos != len(origLine) { + newLineBuf.Write(origLine[curOrigLinePos:]) + } + + mergedIssue := lineIssues[0] // use text from the first issue (it's not really used) + mergedIssue.Replacement = &result.Replacement{ + NewLines: []string{newLineBuf.String()}, + } + return &mergedIssue +} + +func (f Fixer) findNotIntersectingIssues(issues []result.Issue) []result.Issue { + sort.SliceStable(issues, func(i, j int) bool { + a, b := issues[i], issues[j] + return a.Line() < b.Line() + }) + + var ret []result.Issue + var currentEnd int + for i := range issues { + issue := &issues[i] + rng := issue.GetLineRange() + if rng.From <= currentEnd { + f.log.Infof("Skip issue %#v: intersects with end %d", issue, currentEnd) + continue // skip intersecting issue + } + f.log.Infof("Fix issue %#v with range %v", issue, issue.GetLineRange()) + ret = append(ret, *issue) + currentEnd = rng.To + } + + return ret +} + +func (f Fixer) writeFixedFile(origFileLines [][]byte, issues []result.Issue, tmpOutFile *os.File) error { + // issues aren't intersecting + + nextIssueIndex := 0 + for i := 0; i < len(origFileLines); i++ { + var outLine string + var nextIssue *result.Issue + if nextIssueIndex != len(issues) { + nextIssue = &issues[nextIssueIndex] + } + + origFileLineNumber := i + 1 + if nextIssue == nil || origFileLineNumber != nextIssue.GetLineRange().From { + outLine = string(origFileLines[i]) + } else { + nextIssueIndex++ + rng := nextIssue.GetLineRange() + if rng.From > rng.To { + // Maybe better decision is to skip such issues, re-evaluate if regressed. + f.log.Warnf("[fixer]: issue line range is probably invalid, fix can be incorrect (from=%d, to=%d, linter=%s)", + rng.From, rng.To, nextIssue.FromLinter, + ) + } + i += rng.To - rng.From + if nextIssue.Replacement.NeedOnlyDelete { + continue + } + outLine = strings.Join(nextIssue.Replacement.NewLines, "\n") + } + + if i < len(origFileLines)-1 { + outLine += "\n" + } + if _, err := tmpOutFile.WriteString(outLine); err != nil { + return errors.Wrap(err, "failed to write output line") + } + } + + return nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go new file mode 100644 index 000000000..5cc4e56ba --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go @@ -0,0 +1,125 @@ +package processors + +import ( + "regexp" + + "github.com/golangci/golangci-lint/pkg/result" +) + +type replacePattern struct { + re string + repl string +} + +type replaceRegexp struct { + re *regexp.Regexp + repl string +} + +var replacePatterns = []replacePattern{ + // unparam + {`^(\S+) - (\S+) is unused$`, "`${1}` - `${2}` is unused"}, + {`^(\S+) - (\S+) always receives (\S+) \((.*)\)$`, "`${1}` - `${2}` always receives `${3}` (`${4}`)"}, + {`^(\S+) - (\S+) always receives (.*)$`, "`${1}` - `${2}` always receives `${3}`"}, + {`^(\S+) - result (\S+) is always (\S+)`, "`${1}` - result `${2}` is always `${3}`"}, + + // interfacer + {`^(\S+) can be (\S+)$`, "`${1}` can be `${2}`"}, + + // govet + {`^printf: (\S+) arg list ends with redundant newline$`, "printf: `${1}` arg list ends with redundant newline"}, + {`^composites: (\S+) composite literal uses unkeyed fields$`, "composites: `${1}` composite literal uses unkeyed fields"}, + + // gosec + {`^(\S+): Blacklisted import (\S+): weak cryptographic primitive$`, + "${1}: Blacklisted import `${2}`: weak cryptographic primitive"}, + {`^TLS InsecureSkipVerify set true.$`, "TLS `InsecureSkipVerify` set true."}, + + // gosimple + {`should replace loop with (.*)$`, "should replace loop with `${1}`"}, + {`should use a simple channel send/receive instead of select with a single case`, + "should use a simple channel send/receive instead of `select` with a single case"}, + {`should omit comparison to bool constant, can be simplified to (.+)$`, + "should omit comparison to bool constant, can be simplified to `${1}`"}, + {`should write (.+) instead of (.+)$`, "should write `${1}` instead of `${2}`"}, + {`redundant return statement$`, "redundant `return` statement"}, + {`should replace this if statement with an unconditional strings.TrimPrefix`, + "should replace this `if` statement with an unconditional `strings.TrimPrefix`"}, + + // staticcheck + {`this value of (\S+) is never used$`, "this value of `${1}` is never used"}, + {`should use time.Since instead of time.Now\(\).Sub$`, + "should use `time.Since` instead of `time.Now().Sub`"}, + {`should check returned error before deferring response.Close\(\)$`, + "should check returned error before deferring `response.Close()`"}, + {`no value of type uint is less than 0$`, "no value of type `uint` is less than `0`"}, + + // unused + {`(func|const|field|type|var) (\S+) is unused$`, "${1} `${2}` is unused"}, + + // typecheck + {`^unknown field (\S+) in struct literal$`, "unknown field `${1}` in struct literal"}, + {`^invalid operation: (\S+) \(variable of type (\S+)\) has no field or method (\S+)$`, + "invalid operation: `${1}` (variable of type `${2}`) has no field or method `${3}`"}, + {`^undeclared name: (\S+)$`, "undeclared name: `${1}`"}, + {`^cannot use addr \(variable of type (\S+)\) as (\S+) value in argument to (\S+)$`, + "cannot use addr (variable of type `${1}`) as `${2}` value in argument to `${3}`"}, + {`^other declaration of (\S+)$`, "other declaration of `${1}`"}, + {`^(\S+) redeclared in this block$`, "`${1}` redeclared in this block"}, + + // golint + {`^exported (type|method|function|var|const) (\S+) should have comment or be unexported$`, + "exported ${1} `${2}` should have comment or be unexported"}, + {`^comment on exported (type|method|function|var|const) (\S+) should be of the form "(\S+) ..."$`, + "comment on exported ${1} `${2}` should be of the form `${3} ...`"}, + {`^should replace (.+) with (.+)$`, "should replace `${1}` with `${2}`"}, + {`^if block ends with a return statement, so drop this else and outdent its block$`, + "`if` block ends with a `return` statement, so drop this `else` and outdent its block"}, + {`^(struct field|var|range var|const|type|(?:func|method|interface method) (?:parameter|result)) (\S+) should be (\S+)$`, + "${1} `${2}` should be `${3}`"}, + {`^don't use underscores in Go names; var (\S+) should be (\S+)$`, + "don't use underscores in Go names; var `${1}` should be `${2}`"}, +} + +type IdentifierMarker struct { + replaceRegexps []replaceRegexp +} + +func NewIdentifierMarker() *IdentifierMarker { + var replaceRegexps []replaceRegexp + for _, p := range replacePatterns { + r := replaceRegexp{ + re: regexp.MustCompile(p.re), + repl: p.repl, + } + replaceRegexps = append(replaceRegexps, r) + } + + return &IdentifierMarker{ + replaceRegexps: replaceRegexps, + } +} + +func (im IdentifierMarker) Process(issues []result.Issue) ([]result.Issue, error) { + return transformIssues(issues, func(i *result.Issue) *result.Issue { + iCopy := *i + iCopy.Text = im.markIdentifiers(iCopy.Text) + return &iCopy + }), nil +} + +func (im IdentifierMarker) markIdentifiers(s string) string { + for _, rr := range im.replaceRegexps { + rs := rr.re.ReplaceAllString(s, rr.repl) + if rs != s { + return rs + } + } + + return s +} + +func (im IdentifierMarker) Name() string { + return "identifier_marker" +} +func (im IdentifierMarker) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go new file mode 100644 index 000000000..c58666c56 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go @@ -0,0 +1,54 @@ +package processors + +import ( + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type MaxFromLinter struct { + lc linterToCountMap + limit int + log logutils.Log + cfg *config.Config +} + +var _ Processor = &MaxFromLinter{} + +func NewMaxFromLinter(limit int, log logutils.Log, cfg *config.Config) *MaxFromLinter { + return &MaxFromLinter{ + lc: linterToCountMap{}, + limit: limit, + log: log, + cfg: cfg, + } +} + +func (p MaxFromLinter) Name() string { + return "max_from_linter" +} + +func (p *MaxFromLinter) Process(issues []result.Issue) ([]result.Issue, error) { + if p.limit <= 0 { // no limit + return issues, nil + } + + return filterIssues(issues, func(i *result.Issue) bool { + if i.Replacement != nil && p.cfg.Issues.NeedFix { + // we need to fix all issues at once => we need to return all of them + return true + } + + p.lc[i.FromLinter]++ // always inc for stat + return p.lc[i.FromLinter] <= p.limit + }), nil +} + +func (p MaxFromLinter) Finish() { + walkStringToIntMapSortedByValue(p.lc, func(linter string, count int) { + if count > p.limit { + p.log.Infof("%d/%d issues from linter %s were hidden, use --max-issues-per-linter", + count-p.limit, count, linter) + } + }) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go new file mode 100644 index 000000000..e36446c9f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go @@ -0,0 +1,59 @@ +package processors + +import ( + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/result" +) + +type linterToCountMap map[string]int +type fileToLinterToCountMap map[string]linterToCountMap + +type MaxPerFileFromLinter struct { + flc fileToLinterToCountMap + maxPerFileFromLinterConfig map[string]int +} + +var _ Processor = &MaxPerFileFromLinter{} + +func NewMaxPerFileFromLinter(cfg *config.Config) *MaxPerFileFromLinter { + maxPerFileFromLinterConfig := map[string]int{} + + if !cfg.Issues.NeedFix { + // if we don't fix we do this limiting to not annoy user; + // otherwise we need to fix all issues in the file at once + maxPerFileFromLinterConfig["gofmt"] = 1 + maxPerFileFromLinterConfig["goimports"] = 1 + } + + return &MaxPerFileFromLinter{ + flc: fileToLinterToCountMap{}, + maxPerFileFromLinterConfig: maxPerFileFromLinterConfig, + } +} + +func (p MaxPerFileFromLinter) Name() string { + return "max_per_file_from_linter" +} + +func (p *MaxPerFileFromLinter) Process(issues []result.Issue) ([]result.Issue, error) { + return filterIssues(issues, func(i *result.Issue) bool { + limit := p.maxPerFileFromLinterConfig[i.FromLinter] + if limit == 0 { + return true + } + + lm := p.flc[i.FilePath()] + if lm == nil { + p.flc[i.FilePath()] = linterToCountMap{} + } + count := p.flc[i.FilePath()][i.FromLinter] + if count >= limit { + return false + } + + p.flc[i.FilePath()][i.FromLinter]++ + return true + }), nil +} + +func (p MaxPerFileFromLinter) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go new file mode 100644 index 000000000..84fdf0c05 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go @@ -0,0 +1,81 @@ +package processors + +import ( + "sort" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type textToCountMap map[string]int + +type MaxSameIssues struct { + tc textToCountMap + limit int + log logutils.Log + cfg *config.Config +} + +var _ Processor = &MaxSameIssues{} + +func NewMaxSameIssues(limit int, log logutils.Log, cfg *config.Config) *MaxSameIssues { + return &MaxSameIssues{ + tc: textToCountMap{}, + limit: limit, + log: log, + cfg: cfg, + } +} + +func (MaxSameIssues) Name() string { + return "max_same_issues" +} + +func (p *MaxSameIssues) Process(issues []result.Issue) ([]result.Issue, error) { + if p.limit <= 0 { // no limit + return issues, nil + } + + return filterIssues(issues, func(i *result.Issue) bool { + if i.Replacement != nil && p.cfg.Issues.NeedFix { + // we need to fix all issues at once => we need to return all of them + return true + } + + p.tc[i.Text]++ // always inc for stat + return p.tc[i.Text] <= p.limit + }), nil +} + +func (p MaxSameIssues) Finish() { + walkStringToIntMapSortedByValue(p.tc, func(text string, count int) { + if count > p.limit { + p.log.Infof("%d/%d issues with text %q were hidden, use --max-same-issues", + count-p.limit, count, text) + } + }) +} + +type kv struct { + Key string + Value int +} + +func walkStringToIntMapSortedByValue(m map[string]int, walk func(k string, v int)) { + var ss []kv + for k, v := range m { + ss = append(ss, kv{ + Key: k, + Value: v, + }) + } + + sort.Slice(ss, func(i, j int) bool { + return ss[i].Value > ss[j].Value + }) + + for _, kv := range ss { + walk(kv.Key, kv.Value) + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go new file mode 100644 index 000000000..0788a7160 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go @@ -0,0 +1,309 @@ +package processors + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "regexp" + "sort" + "strings" + + "github.com/golangci/golangci-lint/pkg/golinters" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/lint/lintersdb" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +var nolintDebugf = logutils.Debug("nolint") + +type ignoredRange struct { + linters []string + matchedIssueFromLinter map[string]bool + result.Range + col int + originalRange *ignoredRange // pre-expanded range (used to match nolintlint issues) +} + +func (i *ignoredRange) doesMatch(issue *result.Issue) bool { + if issue.Line() < i.From || issue.Line() > i.To { + return false + } + + // only allow selective nolinting of nolintlint + nolintFoundForLinter := len(i.linters) == 0 && issue.FromLinter != golinters.NolintlintName + + for _, linterName := range i.linters { + if linterName == issue.FromLinter { + nolintFoundForLinter = true + break + } + } + + if nolintFoundForLinter { + return true + } + + // handle possible unused nolint directives + // nolintlint generates potential issues for every nolint directive and they are filtered out here + if issue.FromLinter == golinters.NolintlintName && issue.ExpectNoLint { + if issue.ExpectedNoLintLinter != "" { + return i.matchedIssueFromLinter[issue.ExpectedNoLintLinter] + } + return len(i.matchedIssueFromLinter) > 0 + } + + return false +} + +type fileData struct { + ignoredRanges []ignoredRange +} + +type filesCache map[string]*fileData + +type Nolint struct { + cache filesCache + dbManager *lintersdb.Manager + enabledLinters map[string]*linter.Config + log logutils.Log + + unknownLintersSet map[string]bool +} + +func NewNolint(log logutils.Log, dbManager *lintersdb.Manager, enabledLinters map[string]*linter.Config) *Nolint { + return &Nolint{ + cache: filesCache{}, + dbManager: dbManager, + enabledLinters: enabledLinters, + log: log, + unknownLintersSet: map[string]bool{}, + } +} + +var _ Processor = &Nolint{} + +func (p Nolint) Name() string { + return "nolint" +} + +func (p *Nolint) Process(issues []result.Issue) ([]result.Issue, error) { + // put nolintlint issues last because we process other issues first to determine which nolint directives are unused + sort.Stable(sortWithNolintlintLast(issues)) + return filterIssuesErr(issues, p.shouldPassIssue) +} + +func (p *Nolint) getOrCreateFileData(i *result.Issue) (*fileData, error) { + fd := p.cache[i.FilePath()] + if fd != nil { + return fd, nil + } + + fd = &fileData{} + p.cache[i.FilePath()] = fd + + if i.FilePath() == "" { + return nil, fmt.Errorf("no file path for issue") + } + + // TODO: migrate this parsing to go/analysis facts + // or cache them somehow per file. + + // Don't use cached AST because they consume a lot of memory on large projects. + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, i.FilePath(), nil, parser.ParseComments) + if err != nil { + // Don't report error because it's already must be reporter by typecheck or go/analysis. + return fd, nil + } + + fd.ignoredRanges = p.buildIgnoredRangesForFile(f, fset, i.FilePath()) + nolintDebugf("file %s: built nolint ranges are %+v", i.FilePath(), fd.ignoredRanges) + return fd, nil +} + +func (p *Nolint) buildIgnoredRangesForFile(f *ast.File, fset *token.FileSet, filePath string) []ignoredRange { + inlineRanges := p.extractFileCommentsInlineRanges(fset, f.Comments...) + nolintDebugf("file %s: inline nolint ranges are %+v", filePath, inlineRanges) + + if len(inlineRanges) == 0 { + return nil + } + + e := rangeExpander{ + fset: fset, + inlineRanges: inlineRanges, + } + + ast.Walk(&e, f) + + // TODO: merge all ranges: there are repeated ranges + allRanges := append([]ignoredRange{}, inlineRanges...) + allRanges = append(allRanges, e.expandedRanges...) + + return allRanges +} + +func (p *Nolint) shouldPassIssue(i *result.Issue) (bool, error) { + nolintDebugf("got issue: %v", *i) + if i.FromLinter == golinters.NolintlintName && i.ExpectNoLint && i.ExpectedNoLintLinter != "" { + // don't expect disabled linters to cover their nolint statements + nolintDebugf("enabled linters: %v", p.enabledLinters) + if p.enabledLinters[i.ExpectedNoLintLinter] == nil { + return false, nil + } + nolintDebugf("checking that lint issue was used for %s: %v", i.ExpectedNoLintLinter, i) + } + + fd, err := p.getOrCreateFileData(i) + if err != nil { + return false, err + } + + for _, ir := range fd.ignoredRanges { + if ir.doesMatch(i) { + nolintDebugf("found ignored range for issue %v: %v", i, ir) + ir.matchedIssueFromLinter[i.FromLinter] = true + if ir.originalRange != nil { + ir.originalRange.matchedIssueFromLinter[i.FromLinter] = true + } + return false, nil + } + } + + return true, nil +} + +type rangeExpander struct { + fset *token.FileSet + inlineRanges []ignoredRange + expandedRanges []ignoredRange +} + +func (e *rangeExpander) Visit(node ast.Node) ast.Visitor { + if node == nil { + return e + } + + nodeStartPos := e.fset.Position(node.Pos()) + nodeStartLine := nodeStartPos.Line + nodeEndLine := e.fset.Position(node.End()).Line + + var foundRange *ignoredRange + for _, r := range e.inlineRanges { + if r.To == nodeStartLine-1 && nodeStartPos.Column == r.col { + r := r + foundRange = &r + break + } + } + if foundRange == nil { + return e + } + + expandedRange := *foundRange + // store the original unexpanded range for matching nolintlint issues + if expandedRange.originalRange == nil { + expandedRange.originalRange = foundRange + } + if expandedRange.To < nodeEndLine { + expandedRange.To = nodeEndLine + } + + nolintDebugf("found range is %v for node %#v [%d;%d], expanded range is %v", + *foundRange, node, nodeStartLine, nodeEndLine, expandedRange) + e.expandedRanges = append(e.expandedRanges, expandedRange) + + return e +} + +func (p *Nolint) extractFileCommentsInlineRanges(fset *token.FileSet, comments ...*ast.CommentGroup) []ignoredRange { + var ret []ignoredRange + for _, g := range comments { + for _, c := range g.List { + ir := p.extractInlineRangeFromComment(c.Text, g, fset) + if ir != nil { + ret = append(ret, *ir) + } + } + } + + return ret +} + +func (p *Nolint) extractInlineRangeFromComment(text string, g ast.Node, fset *token.FileSet) *ignoredRange { + text = strings.TrimLeft(text, "/ ") + if ok, _ := regexp.MatchString(`^nolint( |:|$)`, text); !ok { + return nil + } + + buildRange := func(linters []string) *ignoredRange { + pos := fset.Position(g.Pos()) + return &ignoredRange{ + Range: result.Range{ + From: pos.Line, + To: fset.Position(g.End()).Line, + }, + col: pos.Column, + linters: linters, + matchedIssueFromLinter: make(map[string]bool), + } + } + + if !strings.HasPrefix(text, "nolint:") { + return buildRange(nil) // ignore all linters + } + + // ignore specific linters + var linters []string + text = strings.Split(text, "//")[0] // allow another comment after this comment + linterItems := strings.Split(strings.TrimPrefix(text, "nolint:"), ",") + for _, linter := range linterItems { + linterName := strings.ToLower(strings.TrimSpace(linter)) + + lcs := p.dbManager.GetLinterConfigs(linterName) + if lcs == nil { + p.unknownLintersSet[linterName] = true + linters = append(linters, linterName) + nolintDebugf("unknown linter %s on line %d", linterName, fset.Position(g.Pos()).Line) + continue + } + + for _, lc := range lcs { + linters = append(linters, lc.Name()) // normalize name to work with aliases + } + } + + nolintDebugf("%d: linters are %s", fset.Position(g.Pos()).Line, linters) + return buildRange(linters) +} + +func (p Nolint) Finish() { + if len(p.unknownLintersSet) == 0 { + return + } + + unknownLinters := []string{} + for name := range p.unknownLintersSet { + unknownLinters = append(unknownLinters, name) + } + sort.Strings(unknownLinters) + + p.log.Warnf("Found unknown linters in //nolint directives: %s", strings.Join(unknownLinters, ", ")) +} + +// put nolintlint last +type sortWithNolintlintLast []result.Issue + +func (issues sortWithNolintlintLast) Len() int { + return len(issues) +} + +func (issues sortWithNolintlintLast) Less(i, j int) bool { + return issues[i].FromLinter != golinters.NolintlintName && issues[j].FromLinter == golinters.NolintlintName +} + +func (issues sortWithNolintlintLast) Swap(i, j int) { + issues[j], issues[i] = issues[i], issues[j] +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go new file mode 100644 index 000000000..5ce940b39 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go @@ -0,0 +1,37 @@ +package processors + +import ( + "path" + + "github.com/golangci/golangci-lint/pkg/result" +) + +// PathPrefixer adds a customizable prefix to every output path +type PathPrefixer struct { + prefix string +} + +var _ Processor = new(PathPrefixer) + +// NewPathPrefixer returns a new path prefixer for the provided string +func NewPathPrefixer(prefix string) *PathPrefixer { + return &PathPrefixer{prefix: prefix} +} + +// Name returns the name of this processor +func (*PathPrefixer) Name() string { + return "path_prefixer" +} + +// Process adds the prefix to each path +func (p *PathPrefixer) Process(issues []result.Issue) ([]result.Issue, error) { + if p.prefix != "" { + for i := range issues { + issues[i].Pos.Filename = path.Join(p.prefix, issues[i].Pos.Filename) + } + } + return issues, nil +} + +// Finish is implemented to satisfy the Processor interface +func (*PathPrefixer) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go new file mode 100644 index 000000000..3a140999c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go @@ -0,0 +1,48 @@ +package processors + +import ( + "fmt" + "path/filepath" + + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type PathPrettifier struct { + root string +} + +var _ Processor = PathPrettifier{} + +func NewPathPrettifier() *PathPrettifier { + root, err := fsutils.Getwd() + if err != nil { + panic(fmt.Sprintf("Can't get working dir: %s", err)) + } + return &PathPrettifier{ + root: root, + } +} + +func (p PathPrettifier) Name() string { + return "path_prettifier" +} + +func (p PathPrettifier) Process(issues []result.Issue) ([]result.Issue, error) { + return transformIssues(issues, func(i *result.Issue) *result.Issue { + if !filepath.IsAbs(i.FilePath()) { + return i + } + + rel, err := fsutils.ShortestRelPath(i.FilePath(), "") + if err != nil { + return i + } + + newI := i + newI.Pos.Filename = rel + return newI + }), nil +} + +func (p PathPrettifier) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go new file mode 100644 index 000000000..484f7f1f1 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go @@ -0,0 +1,40 @@ +package processors + +import ( + "fmt" + "strings" + + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type PathShortener struct { + wd string +} + +var _ Processor = PathShortener{} + +func NewPathShortener() *PathShortener { + wd, err := fsutils.Getwd() + if err != nil { + panic(fmt.Sprintf("Can't get working dir: %s", err)) + } + return &PathShortener{ + wd: wd, + } +} + +func (p PathShortener) Name() string { + return "path_shortener" +} + +func (p PathShortener) Process(issues []result.Issue) ([]result.Issue, error) { + return transformIssues(issues, func(i *result.Issue) *result.Issue { + newI := i + newI.Text = strings.Replace(newI.Text, p.wd+"/", "", -1) + newI.Text = strings.Replace(newI.Text, p.wd, "", -1) + return newI + }), nil +} + +func (p PathShortener) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/processor.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/processor.go new file mode 100644 index 000000000..1a7a40434 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/processor.go @@ -0,0 +1,11 @@ +package processors + +import ( + "github.com/golangci/golangci-lint/pkg/result" +) + +type Processor interface { + Process(issues []result.Issue) ([]result.Issue, error) + Name() string + Finish() +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go new file mode 100644 index 000000000..7c9a4c1d6 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go @@ -0,0 +1,103 @@ +package processors + +import ( + "regexp" + + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type severityRule struct { + baseRule + severity string +} + +type SeverityRule struct { + BaseRule + Severity string +} + +type SeverityRules struct { + defaultSeverity string + rules []severityRule + lineCache *fsutils.LineCache + log logutils.Log +} + +func NewSeverityRules(defaultSeverity string, rules []SeverityRule, lineCache *fsutils.LineCache, log logutils.Log) *SeverityRules { + r := &SeverityRules{ + lineCache: lineCache, + log: log, + defaultSeverity: defaultSeverity, + } + r.rules = createSeverityRules(rules, "(?i)") + + return r +} + +func createSeverityRules(rules []SeverityRule, prefix string) []severityRule { + parsedRules := make([]severityRule, 0, len(rules)) + for _, rule := range rules { + parsedRule := severityRule{} + parsedRule.linters = rule.Linters + parsedRule.severity = rule.Severity + if rule.Text != "" { + parsedRule.text = regexp.MustCompile(prefix + rule.Text) + } + if rule.Source != "" { + parsedRule.source = regexp.MustCompile(prefix + rule.Source) + } + if rule.Path != "" { + parsedRule.path = regexp.MustCompile(rule.Path) + } + parsedRules = append(parsedRules, parsedRule) + } + return parsedRules +} + +func (p SeverityRules) Process(issues []result.Issue) ([]result.Issue, error) { + if len(p.rules) == 0 && p.defaultSeverity == "" { + return issues, nil + } + return transformIssues(issues, func(i *result.Issue) *result.Issue { + for _, rule := range p.rules { + rule := rule + + ruleSeverity := p.defaultSeverity + if rule.severity != "" { + ruleSeverity = rule.severity + } + + if rule.match(i, p.lineCache, p.log) { + i.Severity = ruleSeverity + return i + } + } + i.Severity = p.defaultSeverity + return i + }), nil +} + +func (SeverityRules) Name() string { return "severity-rules" } +func (SeverityRules) Finish() {} + +var _ Processor = SeverityRules{} + +type SeverityRulesCaseSensitive struct { + *SeverityRules +} + +func NewSeverityRulesCaseSensitive(defaultSeverity string, rules []SeverityRule, + lineCache *fsutils.LineCache, log logutils.Log) *SeverityRulesCaseSensitive { + r := &SeverityRules{ + lineCache: lineCache, + log: log, + defaultSeverity: defaultSeverity, + } + r.rules = createSeverityRules(rules, "") + + return &SeverityRulesCaseSensitive{r} +} + +func (SeverityRulesCaseSensitive) Name() string { return "severity-rules-case-sensitive" } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go new file mode 100644 index 000000000..d657c5a04 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go @@ -0,0 +1,143 @@ +package processors + +import ( + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type skipStat struct { + pattern string + count int +} + +type SkipDirs struct { + patterns []*regexp.Regexp + log logutils.Log + skippedDirs map[string]*skipStat + absArgsDirs []string + skippedDirsCache map[string]bool +} + +var _ Processor = (*SkipDirs)(nil) + +const goFileSuffix = ".go" + +func NewSkipDirs(patterns []string, log logutils.Log, runArgs []string) (*SkipDirs, error) { + var patternsRe []*regexp.Regexp + for _, p := range patterns { + p = normalizePathInRegex(p) + patternRe, err := regexp.Compile(p) + if err != nil { + return nil, errors.Wrapf(err, "can't compile regexp %q", p) + } + patternsRe = append(patternsRe, patternRe) + } + + if len(runArgs) == 0 { + runArgs = append(runArgs, "./...") + } + var absArgsDirs []string + for _, arg := range runArgs { + base := filepath.Base(arg) + if base == "..." || strings.HasSuffix(base, goFileSuffix) { + arg = filepath.Dir(arg) + } + + absArg, err := filepath.Abs(arg) + if err != nil { + return nil, errors.Wrapf(err, "failed to abs-ify arg %q", arg) + } + absArgsDirs = append(absArgsDirs, absArg) + } + + return &SkipDirs{ + patterns: patternsRe, + log: log, + skippedDirs: map[string]*skipStat{}, + absArgsDirs: absArgsDirs, + skippedDirsCache: map[string]bool{}, + }, nil +} + +func (p *SkipDirs) Name() string { + return "skip_dirs" +} + +func (p *SkipDirs) Process(issues []result.Issue) ([]result.Issue, error) { + if len(p.patterns) == 0 { + return issues, nil + } + + return filterIssues(issues, p.shouldPassIssue), nil +} + +func (p *SkipDirs) shouldPassIssue(i *result.Issue) bool { + if filepath.IsAbs(i.FilePath()) { + if !isSpecialAutogeneratedFile(i.FilePath()) { + p.log.Warnf("Got abs path %s in skip dirs processor, it should be relative", i.FilePath()) + } + return true + } + + issueRelDir := filepath.Dir(i.FilePath()) + + if toPass, ok := p.skippedDirsCache[issueRelDir]; ok { + if !toPass { + p.skippedDirs[issueRelDir].count++ + } + return toPass + } + + issueAbsDir, err := filepath.Abs(issueRelDir) + if err != nil { + p.log.Warnf("Can't abs-ify path %q: %s", issueRelDir, err) + return true + } + + toPass := p.shouldPassIssueDirs(issueRelDir, issueAbsDir) + p.skippedDirsCache[issueRelDir] = toPass + return toPass +} + +func (p *SkipDirs) shouldPassIssueDirs(issueRelDir, issueAbsDir string) bool { + for _, absArgDir := range p.absArgsDirs { + if absArgDir == issueAbsDir { + // we must not skip issues if they are from explicitly set dirs + // even if they match skip patterns + return true + } + } + + // We use issueRelDir for matching: it's the relative to the current + // work dir path of directory of source file with the issue. It can lead + // to unexpected behavior if we're analyzing files out of current work dir. + // The alternative solution is to find relative to args path, but it has + // disadvantages (https://github.com/golangci/golangci-lint/pull/313). + + for _, pattern := range p.patterns { + if pattern.MatchString(issueRelDir) { + ps := pattern.String() + if p.skippedDirs[issueRelDir] == nil { + p.skippedDirs[issueRelDir] = &skipStat{ + pattern: ps, + } + } + p.skippedDirs[issueRelDir].count++ + return false + } + } + + return true +} + +func (p *SkipDirs) Finish() { + for dir, stat := range p.skippedDirs { + p.log.Infof("Skipped %d issues from dir %s by pattern %s", stat.count, dir, stat.pattern) + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go new file mode 100644 index 000000000..1e2ca7aeb --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go @@ -0,0 +1,52 @@ +package processors + +import ( + "fmt" + "regexp" + + "github.com/golangci/golangci-lint/pkg/result" +) + +type SkipFiles struct { + patterns []*regexp.Regexp +} + +var _ Processor = (*SkipFiles)(nil) + +func NewSkipFiles(patterns []string) (*SkipFiles, error) { + var patternsRe []*regexp.Regexp + for _, p := range patterns { + p = normalizePathInRegex(p) + patternRe, err := regexp.Compile(p) + if err != nil { + return nil, fmt.Errorf("can't compile regexp %q: %s", p, err) + } + patternsRe = append(patternsRe, patternRe) + } + + return &SkipFiles{ + patterns: patternsRe, + }, nil +} + +func (p SkipFiles) Name() string { + return "skip_files" +} + +func (p SkipFiles) Process(issues []result.Issue) ([]result.Issue, error) { + if len(p.patterns) == 0 { + return issues, nil + } + + return filterIssues(issues, func(i *result.Issue) bool { + for _, p := range p.patterns { + if p.MatchString(i.FilePath()) { + return false + } + } + + return true + }), nil +} + +func (p SkipFiles) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go new file mode 100644 index 000000000..e726c3adf --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go @@ -0,0 +1,173 @@ +package processors + +import ( + "sort" + "strings" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/result" +) + +// Base propose of this functionality to sort results (issues) +// produced by various linters by analyzing code. We achieving this +// by sorting results.Issues using processor step, and chain based +// rules that can compare different properties of the Issues struct. + +var _ Processor = (*SortResults)(nil) + +type SortResults struct { + cmp comparator + cfg *config.Config +} + +func NewSortResults(cfg *config.Config) *SortResults { + // For sorting we are comparing (in next order): file names, line numbers, + // position, and finally - giving up. + return &SortResults{ + cmp: ByName{ + next: ByLine{ + next: ByColumn{}, + }, + }, + cfg: cfg, + } +} + +// Process is performing sorting of the result issues. +func (sr SortResults) Process(issues []result.Issue) ([]result.Issue, error) { + if !sr.cfg.Output.SortResults { + return issues, nil + } + + sort.Slice(issues, func(i, j int) bool { + return sr.cmp.Compare(&issues[i], &issues[j]) == Less + }) + + return issues, nil +} + +func (sr SortResults) Name() string { return "sort_results" } +func (sr SortResults) Finish() {} + +type compareResult int + +const ( + Less compareResult = iota - 1 + Equal + Greater + None +) + +func (c compareResult) isNeutral() bool { + // return true if compare result is incomparable or equal. + return c == None || c == Equal +} + +//nolint:exhaustive +func (c compareResult) String() string { + switch c { + case Less: + return "Less" + case Equal: + return "Equal" + case Greater: + return "Greater" + } + + return "None" +} + +// comparator describe how to implement compare for two "issues" lexicographically +type comparator interface { + Compare(a, b *result.Issue) compareResult + Next() comparator +} + +var ( + _ comparator = (*ByName)(nil) + _ comparator = (*ByLine)(nil) + _ comparator = (*ByColumn)(nil) +) + +type ByName struct{ next comparator } + +//nolint:golint +func (cmp ByName) Next() comparator { return cmp.next } + +//nolint:golint +func (cmp ByName) Compare(a, b *result.Issue) compareResult { + var res compareResult + + if res = compareResult(strings.Compare(a.FilePath(), b.FilePath())); !res.isNeutral() { + return res + } + + if next := cmp.Next(); next != nil { + return next.Compare(a, b) + } + + return res +} + +type ByLine struct{ next comparator } + +//nolint:golint +func (cmp ByLine) Next() comparator { return cmp.next } + +//nolint:golint +func (cmp ByLine) Compare(a, b *result.Issue) compareResult { + var res compareResult + + if res = numericCompare(a.Line(), b.Line()); !res.isNeutral() { + return res + } + + if next := cmp.Next(); next != nil { + return next.Compare(a, b) + } + + return res +} + +type ByColumn struct{ next comparator } + +//nolint:golint +func (cmp ByColumn) Next() comparator { return cmp.next } + +//nolint:golint +func (cmp ByColumn) Compare(a, b *result.Issue) compareResult { + var res compareResult + + if res = numericCompare(a.Column(), b.Column()); !res.isNeutral() { + return res + } + + if next := cmp.Next(); next != nil { + return next.Compare(a, b) + } + + return res +} + +func numericCompare(a, b int) compareResult { + var ( + isValuesInvalid = a < 0 || b < 0 + isZeroValuesBoth = a == 0 && b == 0 + isEqual = a == b + isZeroValueInA = b > 0 && a == 0 + isZeroValueInB = a > 0 && b == 0 + ) + + switch { + case isZeroValuesBoth || isEqual: + return Equal + case isValuesInvalid || isZeroValueInA || isZeroValueInB: + return None + case a > b: + return Greater + case a < b: + return Less + } + + return Equal +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go new file mode 100644 index 000000000..cfd73cb98 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go @@ -0,0 +1,47 @@ +package processors + +import ( + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type SourceCode struct { + lineCache *fsutils.LineCache + log logutils.Log +} + +var _ Processor = SourceCode{} + +func NewSourceCode(lc *fsutils.LineCache, log logutils.Log) *SourceCode { + return &SourceCode{ + lineCache: lc, + log: log, + } +} + +func (p SourceCode) Name() string { + return "source_code" +} + +func (p SourceCode) Process(issues []result.Issue) ([]result.Issue, error) { + return transformIssues(issues, func(i *result.Issue) *result.Issue { + newI := *i + + lineRange := i.GetLineRange() + for lineNumber := lineRange.From; lineNumber <= lineRange.To; lineNumber++ { + line, err := p.lineCache.GetLine(i.FilePath(), lineNumber) + if err != nil { + p.log.Warnf("Failed to get line %d for file %s: %s", + lineNumber, i.FilePath(), err) + return i + } + + newI.SourceLines = append(newI.SourceLines, line) + } + + return &newI + }), nil +} + +func (p SourceCode) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go new file mode 100644 index 000000000..17167dde5 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go @@ -0,0 +1,58 @@ +package processors + +import ( + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/result" +) + +type lineToCount map[int]int +type fileToLineToCount map[string]lineToCount + +type UniqByLine struct { + flc fileToLineToCount + cfg *config.Config +} + +func NewUniqByLine(cfg *config.Config) *UniqByLine { + return &UniqByLine{ + flc: fileToLineToCount{}, + cfg: cfg, + } +} + +var _ Processor = &UniqByLine{} + +func (p UniqByLine) Name() string { + return "uniq_by_line" +} + +func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) { + if !p.cfg.Output.UniqByLine { + return issues, nil + } + + return filterIssues(issues, func(i *result.Issue) bool { + if i.Replacement != nil && p.cfg.Issues.NeedFix { + // if issue will be auto-fixed we shouldn't collapse issues: + // e.g. one line can contain 2 misspellings, they will be in 2 issues and misspell should fix both of them. + return true + } + + lc := p.flc[i.FilePath()] + if lc == nil { + lc = lineToCount{} + p.flc[i.FilePath()] = lc + } + + const limit = 1 + count := lc[i.Line()] + if count == limit { + return false + } + + lc[i.Line()]++ + return true + }), nil +} + +func (p UniqByLine) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go new file mode 100644 index 000000000..7108fd3b3 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go @@ -0,0 +1,62 @@ +package processors + +import ( + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + + "github.com/golangci/golangci-lint/pkg/result" +) + +func filterIssues(issues []result.Issue, filter func(i *result.Issue) bool) []result.Issue { + retIssues := make([]result.Issue, 0, len(issues)) + for i := range issues { + if filter(&issues[i]) { + retIssues = append(retIssues, issues[i]) + } + } + + return retIssues +} + +func filterIssuesErr(issues []result.Issue, filter func(i *result.Issue) (bool, error)) ([]result.Issue, error) { + retIssues := make([]result.Issue, 0, len(issues)) + for i := range issues { + ok, err := filter(&issues[i]) + if err != nil { + return nil, errors.Wrapf(err, "can't filter issue %#v", issues[i]) + } + + if ok { + retIssues = append(retIssues, issues[i]) + } + } + + return retIssues, nil +} + +func transformIssues(issues []result.Issue, transform func(i *result.Issue) *result.Issue) []result.Issue { + retIssues := make([]result.Issue, 0, len(issues)) + for i := range issues { + newI := transform(&issues[i]) + if newI != nil { + retIssues = append(retIssues, *newI) + } + } + + return retIssues +} + +var separatorToReplace = regexp.QuoteMeta(string(filepath.Separator)) + +func normalizePathInRegex(path string) string { + if filepath.Separator == '/' { + return path + } + + // This replacing should be safe because "/" are disallowed in Windows + // https://docs.microsoft.com/ru-ru/windows/win32/fileio/naming-a-file + return strings.ReplaceAll(path, "/", separatorToReplace) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go b/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go new file mode 100644 index 000000000..cb89e34e0 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go @@ -0,0 +1,17 @@ +package sliceutil + +// IndexOf get the index of the given value in the given string slice, +// or -1 if not found. +func IndexOf(slice []string, value string) int { + for i, v := range slice { + if v == value { + return i + } + } + return -1 +} + +// Contains check if a string slice contains a value. +func Contains(slice []string, value string) bool { + return IndexOf(slice, value) != -1 +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go new file mode 100644 index 000000000..9628bd80f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go @@ -0,0 +1,116 @@ +package timeutils + +import ( + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/golangci/golangci-lint/pkg/logutils" +) + +const noStagesText = "no stages" + +type Stopwatch struct { + name string + startedAt time.Time + stages map[string]time.Duration + log logutils.Log + + sync.Mutex +} + +func NewStopwatch(name string, log logutils.Log) *Stopwatch { + return &Stopwatch{ + name: name, + startedAt: time.Now(), + stages: map[string]time.Duration{}, + log: log, + } +} + +type stageDuration struct { + name string + d time.Duration +} + +func (s *Stopwatch) stageDurationsSorted() []stageDuration { + stageDurations := []stageDuration{} + for n, d := range s.stages { + stageDurations = append(stageDurations, stageDuration{ + name: n, + d: d, + }) + } + sort.Slice(stageDurations, func(i, j int) bool { + return stageDurations[i].d > stageDurations[j].d + }) + return stageDurations +} + +func (s *Stopwatch) sprintStages() string { + if len(s.stages) == 0 { + return noStagesText + } + + stageDurations := s.stageDurationsSorted() + + stagesStrings := []string{} + for _, s := range stageDurations { + stagesStrings = append(stagesStrings, fmt.Sprintf("%s: %s", s.name, s.d)) + } + + return fmt.Sprintf("stages: %s", strings.Join(stagesStrings, ", ")) +} + +func (s *Stopwatch) sprintTopStages(n int) string { + if len(s.stages) == 0 { + return noStagesText + } + + stageDurations := s.stageDurationsSorted() + + stagesStrings := []string{} + for i := 0; i < len(stageDurations) && i < n; i++ { + s := stageDurations[i] + stagesStrings = append(stagesStrings, fmt.Sprintf("%s: %s", s.name, s.d)) + } + + return fmt.Sprintf("top %d stages: %s", n, strings.Join(stagesStrings, ", ")) +} + +func (s *Stopwatch) Print() { + p := fmt.Sprintf("%s took %s", s.name, time.Since(s.startedAt)) + if len(s.stages) == 0 { + s.log.Infof("%s", p) + return + } + + s.log.Infof("%s with %s", p, s.sprintStages()) +} + +func (s *Stopwatch) PrintStages() { + var stagesDuration time.Duration + for _, s := range s.stages { + stagesDuration += s + } + s.log.Infof("%s took %s with %s", s.name, stagesDuration, s.sprintStages()) +} + +func (s *Stopwatch) PrintTopStages(n int) { + var stagesDuration time.Duration + for _, s := range s.stages { + stagesDuration += s + } + s.log.Infof("%s took %s with %s", s.name, stagesDuration, s.sprintTopStages(n)) +} + +func (s *Stopwatch) TrackStage(name string, f func()) { + startedAt := time.Now() + f() + + s.Lock() + s.stages[name] += time.Since(startedAt) + s.Unlock() +} diff --git a/vendor/github.com/golangci/lint-1/.travis.yml b/vendor/github.com/golangci/lint-1/.travis.yml new file mode 100644 index 000000000..bc2f4b311 --- /dev/null +++ b/vendor/github.com/golangci/lint-1/.travis.yml @@ -0,0 +1,19 @@ +sudo: false +language: go +go: + - 1.10.x + - 1.11.x + - master + +go_import_path: github.com/golangci/lint-1 + +install: + - go get -t -v ./... + +script: + - go test -v -race ./... + +matrix: + allow_failures: + - go: master + fast_finish: true diff --git a/vendor/github.com/golangci/lint-1/CONTRIBUTING.md b/vendor/github.com/golangci/lint-1/CONTRIBUTING.md new file mode 100644 index 000000000..2e39a1c67 --- /dev/null +++ b/vendor/github.com/golangci/lint-1/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing to Golint + +## Before filing an issue: + +### Are you having trouble building golint? + +Check you have the latest version of its dependencies. Run +``` +go get -u github.com/golangci/lint-1/golint +``` +If you still have problems, consider searching for existing issues before filing a new issue. + +## Before sending a pull request: + +Have you understood the purpose of golint? Make sure to carefully read `README`. diff --git a/vendor/github.com/golangci/lint-1/LICENSE b/vendor/github.com/golangci/lint-1/LICENSE new file mode 100644 index 000000000..65d761bc9 --- /dev/null +++ b/vendor/github.com/golangci/lint-1/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golangci/lint-1/README.md b/vendor/github.com/golangci/lint-1/README.md new file mode 100644 index 000000000..2de6ee835 --- /dev/null +++ b/vendor/github.com/golangci/lint-1/README.md @@ -0,0 +1,88 @@ +Golint is a linter for Go source code. + +[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint) + +## Installation + +Golint requires a +[supported release of Go](https://golang.org/doc/devel/release.html#policy). + + go get -u github.com/golangci/lint-1/golint + +To find out where `golint` was installed you can run `go list -f {{.Target}} github.com/golangci/lint-1/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting. + +## Usage + +Invoke `golint` with one or more filenames, directories, or packages named +by its import path. Golint uses the same +[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as +the `go` command and therefore +also supports relative import paths like `./...`. Additionally the `...` +wildcard can be used as suffix on relative and absolute file paths to recurse +into them. + +The output of this tool is a list of suggestions in Vim quickfix format, +which is accepted by lots of different editors. + +## Purpose + +Golint differs from gofmt. Gofmt reformats Go source code, whereas +golint prints out style mistakes. + +Golint differs from govet. Govet is concerned with correctness, whereas +golint is concerned with coding style. Golint is in use at Google, and it +seeks to match the accepted style of the open source Go project. + +The suggestions made by golint are exactly that: suggestions. +Golint is not perfect, and has both false positives and false negatives. +Do not treat its output as a gold standard. We will not be adding pragmas +or other knobs to suppress specific warnings, so do not expect or require +code to be completely "lint-free". +In short, this tool is not, and will never be, trustworthy enough for its +suggestions to be enforced automatically, for example as part of a build process. +Golint makes suggestions for many of the mechanically checkable items listed in +[Effective Go](https://golang.org/doc/effective_go.html) and the +[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments). + +## Scope + +Golint is meant to carry out the stylistic conventions put forth in +[Effective Go](https://golang.org/doc/effective_go.html) and +[CodeReviewComments](https://golang.org/wiki/CodeReviewComments). +Changes that are not aligned with those documents will not be considered. + +## Contributions + +Contributions to this project are welcome provided they are [in scope](#scope), +though please send mail before starting work on anything major. +Contributors retain their copyright, so we need you to fill out +[a short form](https://developers.google.com/open-source/cla/individual) +before we can accept your contribution. + +## Vim + +Add this to your ~/.vimrc: + + set rtp+=$GOPATH/src/github.com/golangci/lint-1/misc/vim + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running `:Lint` will run golint on the current file and populate the quickfix list. + +Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w` + + autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow + + +## Emacs + +Add this to your `.emacs` file: + + (add-to-list 'load-path (concat (getenv "GOPATH") "/src/github.com/golang/lint/misc/emacs")) + (require 'golint) + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running M-x golint will run golint on the current file. + +For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html). diff --git a/vendor/github.com/golangci/lint-1/go.mod b/vendor/github.com/golangci/lint-1/go.mod new file mode 100644 index 000000000..fafbd340b --- /dev/null +++ b/vendor/github.com/golangci/lint-1/go.mod @@ -0,0 +1,3 @@ +module github.com/golangci/lint-1 + +require golang.org/x/tools v0.0.0-20190311212946-11955173bddd diff --git a/vendor/github.com/golangci/lint-1/go.sum b/vendor/github.com/golangci/lint-1/go.sum new file mode 100644 index 000000000..7d0e2e618 --- /dev/null +++ b/vendor/github.com/golangci/lint-1/go.sum @@ -0,0 +1,6 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= diff --git a/vendor/github.com/golangci/lint-1/lint.go b/vendor/github.com/golangci/lint-1/lint.go new file mode 100644 index 000000000..886c85bf0 --- /dev/null +++ b/vendor/github.com/golangci/lint-1/lint.go @@ -0,0 +1,1655 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package lint contains a linter for Go source code. +package lint // import "github.com/golangci/lint-1" + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "io/ioutil" + "regexp" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/gcexportdata" +) + +const styleGuideBase = "https://golang.org/wiki/CodeReviewComments" + +// A Linter lints Go source code. +type Linter struct { +} + +// Problem represents a problem in some source code. +type Problem struct { + Position token.Position // position in source file + Text string // the prose that describes the problem + Link string // (optional) the link to the style guide for the problem + Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness + LineText string // the source line + Category string // a short name for the general category of the problem + + // If the problem has a suggested fix (the minority case), + // ReplacementLine is a full replacement for the relevant line of the source file. + ReplacementLine string +} + +func (p *Problem) String() string { + if p.Link != "" { + return p.Text + "\n\n" + p.Link + } + return p.Text +} + +type byPosition []Problem + +func (p byPosition) Len() int { return len(p) } +func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p byPosition) Less(i, j int) bool { + pi, pj := p[i].Position, p[j].Position + + if pi.Filename != pj.Filename { + return pi.Filename < pj.Filename + } + if pi.Line != pj.Line { + return pi.Line < pj.Line + } + if pi.Column != pj.Column { + return pi.Column < pj.Column + } + + return p[i].Text < p[j].Text +} + +// Lint lints src. +func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) { + return l.LintFiles(map[string][]byte{filename: src}) +} + +// LintFiles lints a set of files of a single package. +// The argument is a map of filename to source. +func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { + pkg := &pkg{ + fset: token.NewFileSet(), + files: make(map[string]*file), + } + var pkgName string + for filename, src := range files { + if isGenerated(src) { + continue // See issue #239 + } + f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments) + if err != nil { + return nil, err + } + if pkgName == "" { + pkgName = f.Name.Name + } else if f.Name.Name != pkgName { + return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) + } + pkg.files[filename] = &file{ + pkg: pkg, + f: f, + fset: pkg.fset, + src: src, + filename: filename, + } + } + if len(pkg.files) == 0 { + return nil, nil + } + return pkg.lint(), nil +} + +// LintFiles lints a set of files of a single package. +// The argument is a map of filename to source. +func (l *Linter) LintPkg(files []*ast.File, fset *token.FileSet, typesPkg *types.Package, typesInfo *types.Info) ([]Problem, error) { + pkg := &pkg{ + fset: fset, + files: make(map[string]*file), + typesPkg: typesPkg, + typesInfo: typesInfo, + } + var pkgName string + for _, f := range files { + // use PositionFor, not Position because of //line directives: + // this filename will be used for source lines extraction. + filename := fset.PositionFor(f.Pos(), false).Filename + if filename == "" { + return nil, fmt.Errorf("no file name for file %+v", f) + } + + if pkgName == "" { + pkgName = f.Name.Name + } else if f.Name.Name != pkgName { + return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) + } + + // TODO: reuse golangci-lint lines cache + src, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("can't read file %s: %s", filename, err) + } + + pkg.files[filename] = &file{ + pkg: pkg, + f: f, + fset: pkg.fset, + src: src, + filename: filename, + } + } + if len(pkg.files) == 0 { + return nil, nil + } + return pkg.lint(), nil +} + +var ( + genHdr = []byte("// Code generated ") + genFtr = []byte(" DO NOT EDIT.") +) + +// isGenerated reports whether the source file is generated code +// according the rules from https://golang.org/s/generatedcode. +func isGenerated(src []byte) bool { + sc := bufio.NewScanner(bytes.NewReader(src)) + for sc.Scan() { + b := sc.Bytes() + if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) { + return true + } + } + return false +} + +// pkg represents a package being linted. +type pkg struct { + fset *token.FileSet + files map[string]*file + + typesPkg *types.Package + typesInfo *types.Info + + // sortable is the set of types in the package that implement sort.Interface. + sortable map[string]bool + // main is whether this is a "main" package. + main bool + + problems []Problem +} + +func (p *pkg) lint() []Problem { + p.scanSortable() + p.main = p.isMain() + + for _, f := range p.files { + f.lint() + } + + sort.Sort(byPosition(p.problems)) + + return p.problems +} + +// file represents a file being linted. +type file struct { + pkg *pkg + f *ast.File + fset *token.FileSet + src []byte + filename string +} + +func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") } + +func (f *file) lint() { + f.lintPackageComment() + f.lintImports() + f.lintBlankImports() + f.lintExported() + f.lintNames() + f.lintElses() + f.lintRanges() + f.lintErrorf() + f.lintErrors() + f.lintErrorStrings() + f.lintReceiverNames() + f.lintIncDec() + f.lintErrorReturn() + f.lintUnexportedReturn() + f.lintTimeNames() + f.lintContextKeyTypes() + f.lintContextArgs() +} + +type link string +type category string + +// The variadic arguments may start with link and category types, +// and must end with a format string and any arguments. +// It returns the new Problem. +func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem { + pos := f.fset.Position(n.Pos()) + if pos.Filename == "" { + pos.Filename = f.filename + } + return f.pkg.errorfAt(pos, confidence, args...) +} + +func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem { + problem := Problem{ + Position: pos, + Confidence: confidence, + } + if pos.Filename != "" { + // The file might not exist in our mapping if a //line directive was encountered. + if f, ok := p.files[pos.Filename]; ok { + problem.LineText = srcLine(f.src, pos) + } + } + +argLoop: + for len(args) > 1 { // always leave at least the format string in args + switch v := args[0].(type) { + case link: + problem.Link = string(v) + case category: + problem.Category = string(v) + default: + break argLoop + } + args = args[1:] + } + + problem.Text = fmt.Sprintf(args[0].(string), args[1:]...) + + p.problems = append(p.problems, problem) + return &p.problems[len(p.problems)-1] +} + +var newImporter = func(fset *token.FileSet) types.ImporterFrom { + return gcexportdata.NewImporter(fset, make(map[string]*types.Package)) +} + +func (p *pkg) typeCheck() error { + config := &types.Config{ + // By setting a no-op error reporter, the type checker does as much work as possible. + Error: func(error) {}, + Importer: newImporter(p.fset), + } + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + } + var anyFile *file + var astFiles []*ast.File + for _, f := range p.files { + anyFile = f + astFiles = append(astFiles, f.f) + } + pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info) + // Remember the typechecking info, even if config.Check failed, + // since we will get partial information. + p.typesPkg = pkg + p.typesInfo = info + return err +} + +func (p *pkg) typeOf(expr ast.Expr) types.Type { + if p.typesInfo == nil { + return nil + } + return p.typesInfo.TypeOf(expr) +} + +func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool { + n, ok := typ.(*types.Named) + if !ok { + return false + } + tn := n.Obj() + return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name +} + +// scopeOf returns the tightest scope encompassing id. +func (p *pkg) scopeOf(id *ast.Ident) *types.Scope { + var scope *types.Scope + if obj := p.typesInfo.ObjectOf(id); obj != nil { + scope = obj.Parent() + } + if scope == p.typesPkg.Scope() { + // We were given a top-level identifier. + // Use the file-level scope instead of the package-level scope. + pos := id.Pos() + for _, f := range p.files { + if f.f.Pos() <= pos && pos < f.f.End() { + scope = p.typesInfo.Scopes[f.f] + break + } + } + } + return scope +} + +func (p *pkg) scanSortable() { + p.sortable = make(map[string]bool) + + // bitfield for which methods exist on each type. + const ( + Len = 1 << iota + Less + Swap + ) + nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap} + has := make(map[string]int) + for _, f := range p.files { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + // TODO(dsymonds): We could check the signature to be more precise. + recv := receiverType(fn) + if i, ok := nmap[fn.Name.Name]; ok { + has[recv] |= i + } + return false + }) + } + for typ, ms := range has { + if ms == Len|Less|Swap { + p.sortable[typ] = true + } + } +} + +func (p *pkg) isMain() bool { + for _, f := range p.files { + if f.isMain() { + return true + } + } + return false +} + +func (f *file) isMain() bool { + if f.f.Name.Name == "main" { + return true + } + return false +} + +// lintPackageComment checks package comments. It complains if +// there is no package comment, or if it is not of the right form. +// This has a notable false positive in that a package comment +// could rightfully appear in a different file of the same package, +// but that's not easy to fix since this linter is file-oriented. +func (f *file) lintPackageComment() { + if f.isTest() { + return + } + + const ref = styleGuideBase + "#package-comments" + prefix := "Package " + f.f.Name.Name + " " + + // Look for a detached package comment. + // First, scan for the last comment that occurs before the "package" keyword. + var lastCG *ast.CommentGroup + for _, cg := range f.f.Comments { + if cg.Pos() > f.f.Package { + // Gone past "package" keyword. + break + } + lastCG = cg + } + if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) { + endPos := f.fset.Position(lastCG.End()) + pkgPos := f.fset.Position(f.f.Package) + if endPos.Line+1 < pkgPos.Line { + // There isn't a great place to anchor this error; + // the start of the blank lines between the doc and the package statement + // is at least pointing at the location of the problem. + pos := token.Position{ + Filename: endPos.Filename, + // Offset not set; it is non-trivial, and doesn't appear to be needed. + Line: endPos.Line + 1, + Column: 1, + } + f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement") + return + } + } + + if f.f.Doc == nil { + f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package") + return + } + s := f.f.Doc.Text() + if ts := strings.TrimLeft(s, " \t"); ts != s { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space") + s = ts + } + // Only non-main packages need to keep to this form. + if !f.pkg.main && !strings.HasPrefix(s, prefix) { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix) + } +} + +func (f *file) isCgo() bool { + if f.src == nil { + return false + } + newLinePos := bytes.Index(f.src, []byte("\n")) + if newLinePos < 0 { + return false + } + firstLine := string(f.src[:newLinePos]) + + // files using cgo have implicitly added comment "Created by cgo - DO NOT EDIT" for go <= 1.10 + // and "Code generated by cmd/cgo" for go >= 1.11 + return strings.Contains(firstLine, "Created by cgo") || strings.Contains(firstLine, "Code generated by cmd/cgo") +} + +// lintBlankImports complains if a non-main package has blank imports that are +// not documented. +func (f *file) lintBlankImports() { + // In package main and in tests, we don't complain about blank imports. + if f.pkg.main || f.isTest() || f.isCgo() { + return + } + + // The first element of each contiguous group of blank imports should have + // an explanatory comment of some kind. + for i, imp := range f.f.Imports { + pos := f.fset.Position(imp.Pos()) + + if !isBlank(imp.Name) { + continue // Ignore non-blank imports. + } + if i > 0 { + prev := f.f.Imports[i-1] + prevPos := f.fset.Position(prev.Pos()) + if isBlank(prev.Name) && prevPos.Line+1 == pos.Line { + continue // A subsequent blank in a group. + } + } + + // This is the first blank import of a group. + if imp.Doc == nil && imp.Comment == nil { + ref := "" + f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it") + } + } +} + +// lintImports examines import blocks. +func (f *file) lintImports() { + for i, is := range f.f.Imports { + _ = i + if is.Name != nil && is.Name.Name == "." && !f.isTest() { + f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports") + } + + } +} + +const docCommentsLink = styleGuideBase + "#doc-comments" + +// lintExported examines the exported names. +// It complains if any required doc comments are missing, +// or if they are not of the right form. The exact rules are in +// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function +// also tracks the GenDecl structure being traversed to permit +// doc comments for constants to be on top of the const block. +// It also complains if the names stutter when combined with +// the package name. +func (f *file) lintExported() { + if f.isTest() { + return + } + + var lastGen *ast.GenDecl // last GenDecl entered. + + // Set of GenDecls that have already had missing comments flagged. + genDeclMissingComments := make(map[*ast.GenDecl]bool) + + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return false + } + // token.CONST, token.TYPE or token.VAR + lastGen = v + return true + case *ast.FuncDecl: + f.lintFuncDoc(v) + if v.Recv == nil { + // Only check for stutter on functions, not methods. + // Method names are not used package-qualified. + f.checkStutter(v.Name, "func") + } + // Don't proceed inside funcs. + return false + case *ast.TypeSpec: + // inside a GenDecl, which usually has the doc + doc := v.Doc + if doc == nil { + doc = lastGen.Doc + } + f.lintTypeDoc(v, doc) + f.checkStutter(v.Name, "type") + // Don't proceed inside types. + return false + case *ast.ValueSpec: + f.lintValueSpecDoc(v, lastGen, genDeclMissingComments) + return false + } + return true + }) +} + +var ( + allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) + anyCapsRE = regexp.MustCompile(`[A-Z]`) +) + +// knownNameExceptions is a set of names that are known to be exempt from naming checks. +// This is usually because they are constrained by having to match names in the +// standard library. +var knownNameExceptions = map[string]bool{ + "LastInsertId": true, // must match database/sql + "kWh": true, +} + +func isInTopLevel(f *ast.File, ident *ast.Ident) bool { + path, _ := astutil.PathEnclosingInterval(f, ident.Pos(), ident.End()) + for _, f := range path { + switch f.(type) { + case *ast.File, *ast.GenDecl, *ast.ValueSpec, *ast.Ident: + continue + } + return false + } + return true +} + +// lintNames examines all names in the file. +// It complains if any use underscores or incorrect known initialisms. +func (f *file) lintNames() { + // Package names need slightly different handling than other names. + if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name") + } + if anyCapsRE.MatchString(f.f.Name.Name) { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name)) + } + + check := func(id *ast.Ident, thing string) { + if id.Name == "_" { + return + } + if knownNameExceptions[id.Name] { + return + } + + // Handle two common styles from other languages that don't belong in Go. + if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { + capCount := 0 + for _, c := range id.Name { + if 'A' <= c && c <= 'Z' { + capCount++ + } + } + if capCount >= 2 { + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase") + return + } + } + if thing == "const" || (thing == "var" && isInTopLevel(f.f, id)) { + if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { + should := string(id.Name[1]+'a'-'A') + id.Name[2:] + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should) + } + } + + should := lintName(id.Name) + if id.Name == should { + return + } + + if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") { + f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should) + return + } + f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should) + } + checkList := func(fl *ast.FieldList, thing string) { + if fl == nil { + return + } + for _, f := range fl.List { + for _, id := range f.Names { + check(id, thing) + } + } + } + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.AssignStmt: + if v.Tok == token.ASSIGN { + return true + } + for _, exp := range v.Lhs { + if id, ok := exp.(*ast.Ident); ok { + check(id, "var") + } + } + case *ast.FuncDecl: + if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { + return true + } + + thing := "func" + if v.Recv != nil { + thing = "method" + } + + // Exclude naming warnings for functions that are exported to C but + // not exported in the Go API. + // See https://github.com/golang/lint/issues/144. + if ast.IsExported(v.Name.Name) || !isCgoExported(v) { + check(v.Name, thing) + } + + checkList(v.Type.Params, thing+" parameter") + checkList(v.Type.Results, thing+" result") + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return true + } + var thing string + switch v.Tok { + case token.CONST: + thing = "const" + case token.TYPE: + thing = "type" + case token.VAR: + thing = "var" + } + for _, spec := range v.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + check(s.Name, thing) + case *ast.ValueSpec: + for _, id := range s.Names { + check(id, thing) + } + } + } + case *ast.InterfaceType: + // Do not check interface method names. + // They are often constrainted by the method names of concrete types. + for _, x := range v.Methods.List { + ft, ok := x.Type.(*ast.FuncType) + if !ok { // might be an embedded interface name + continue + } + checkList(ft.Params, "interface method parameter") + checkList(ft.Results, "interface method result") + } + case *ast.RangeStmt: + if v.Tok == token.ASSIGN { + return true + } + if id, ok := v.Key.(*ast.Ident); ok { + check(id, "range var") + } + if id, ok := v.Value.(*ast.Ident); ok { + check(id, "range var") + } + case *ast.StructType: + for _, f := range v.Fields.List { + for _, id := range f.Names { + check(id, "struct field") + } + } + } + return true + }) +} + +// lintName returns a different name if it should be different. +func lintName(name string) (should string) { + // Fast path for simple cases: "_" and all lowercase. + if name == "_" { + return name + } + allLower := true + for _, r := range name { + if !unicode.IsLower(r) { + allLower = false + break + } + } + if allLower { + return name + } + + // Split camelCase at any lower->upper transition, and split on underscores. + // Check each word for common initialisms. + runes := []rune(name) + w, i := 0, 0 // index of start of word, scan + for i+1 <= len(runes) { + eow := false // whether we hit the end of a word + if i+1 == len(runes) { + eow = true + } else if runes[i+1] == '_' { + // underscore; shift the remainder forward over any run of underscores + eow = true + n := 1 + for i+n+1 < len(runes) && runes[i+n+1] == '_' { + n++ + } + + // Leave at most one underscore if the underscore is between two digits + if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { + n-- + } + + copy(runes[i+1:], runes[i+n+1:]) + runes = runes[:len(runes)-n] + } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { + // lower->non-lower + eow = true + } + i++ + if !eow { + continue + } + + // [w,i) is a word. + word := string(runes[w:i]) + if u := strings.ToUpper(word); commonInitialisms[u] { + // Keep consistent case, which is lowercase only at the start. + if w == 0 && unicode.IsLower(runes[w]) { + u = strings.ToLower(u) + } + // All the common initialisms are ASCII, + // so we can replace the bytes exactly. + copy(runes[w:], []rune(u)) + } else if w > 0 && strings.ToLower(word) == word { + // already all lowercase, and not the first word, so uppercase the first character. + runes[w] = unicode.ToUpper(runes[w]) + } + w = i + } + return string(runes) +} + +// commonInitialisms is a set of common initialisms. +// Only add entries that are highly unlikely to be non-initialisms. +// For instance, "ID" is fine (Freudian code is rare), but "AND" is not. +var commonInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, +} + +// lintTypeDoc examines the doc comment on a type. +// It complains if they are missing from an exported type, +// or if they are not of the standard form. +func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { + if !ast.IsExported(t.Name.Name) { + return + } + if doc == nil { + f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name) + return + } + + s := doc.Text() + articles := [...]string{"A", "An", "The"} + for _, a := range articles { + if strings.HasPrefix(s, a+" ") { + s = s[len(a)+1:] + break + } + } + if !strings.HasPrefix(s, t.Name.Name+" ") { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name) + } +} + +var commonMethods = map[string]bool{ + "Error": true, + "Read": true, + "ServeHTTP": true, + "String": true, + "Write": true, +} + +// lintFuncDoc examines doc comments on functions and methods. +// It complains if they are missing, or not of the right form. +// It has specific exclusions for well-known methods (see commonMethods above). +func (f *file) lintFuncDoc(fn *ast.FuncDecl) { + if !ast.IsExported(fn.Name.Name) { + // func is unexported + return + } + kind := "function" + name := fn.Name.Name + if fn.Recv != nil && len(fn.Recv.List) > 0 { + // method + kind = "method" + recv := receiverType(fn) + if !ast.IsExported(recv) { + // receiver is unexported + return + } + if commonMethods[name] { + return + } + switch name { + case "Len", "Less", "Swap": + if f.pkg.sortable[recv] { + return + } + } + name = recv + "." + name + } + if fn.Doc == nil { + f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name) + return + } + s := fn.Doc.Text() + prefix := fn.Name.Name + " " + if !strings.HasPrefix(s, prefix) { + f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +// lintValueSpecDoc examines package-global variables and constants. +// It complains if they are not individually declared, +// or if they are not suitably documented in the right form (unless they are in a block that is commented). +func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) { + kind := "var" + if gd.Tok == token.CONST { + kind = "const" + } + + if len(vs.Names) > 1 { + // Check that none are exported except for the first. + for _, n := range vs.Names[1:] { + if ast.IsExported(n.Name) { + f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name) + return + } + } + } + + // Only one name. + name := vs.Names[0].Name + if !ast.IsExported(name) { + return + } + + if vs.Doc == nil && gd.Doc == nil { + if genDeclMissingComments[gd] { + return + } + block := "" + if kind == "const" && gd.Lparen.IsValid() { + block = " (or a comment on this block)" + } + f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block) + genDeclMissingComments[gd] = true + return + } + // If this GenDecl has parens and a comment, we don't check its comment form. + if gd.Lparen.IsValid() && gd.Doc != nil { + return + } + // The relevant text to check will be on either vs.Doc or gd.Doc. + // Use vs.Doc preferentially. + doc := vs.Doc + if doc == nil { + doc = gd.Doc + } + prefix := name + " " + if !strings.HasPrefix(doc.Text(), prefix) { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +func (f *file) checkStutter(id *ast.Ident, thing string) { + pkg, name := f.f.Name.Name, id.Name + if !ast.IsExported(name) { + // unexported name + return + } + // A name stutters if the package name is a strict prefix + // and the next character of the name starts a new word. + if len(name) <= len(pkg) { + // name is too short to stutter. + // This permits the name to be the same as the package name. + return + } + if !strings.EqualFold(pkg, name[:len(pkg)]) { + return + } + // We can assume the name is well-formed UTF-8. + // If the next rune after the package name is uppercase or an underscore + // the it's starting a new word and thus this name stutters. + rem := name[len(pkg):] + if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) { + f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem) + } +} + +// zeroLiteral is a set of ast.BasicLit values that are zero values. +// It is not exhaustive. +var zeroLiteral = map[string]bool{ + "false": true, // bool + // runes + `'\x00'`: true, + `'\000'`: true, + // strings + `""`: true, + "``": true, + // numerics + "0": true, + "0.": true, + "0.0": true, + "0i": true, +} + +// lintElses examines else blocks. It complains about any else block whose if block ends in a return. +func (f *file) lintElses() { + // We don't want to flag if { } else if { } else { } constructions. + // They will appear as an IfStmt whose Else field is also an IfStmt. + // Record such a node so we ignore it when we visit it. + ignore := make(map[*ast.IfStmt]bool) + + f.walk(func(node ast.Node) bool { + ifStmt, ok := node.(*ast.IfStmt) + if !ok || ifStmt.Else == nil { + return true + } + if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { + ignore[elseif] = true + return true + } + if ignore[ifStmt] { + return true + } + if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { + // only care about elses without conditions + return true + } + if len(ifStmt.Body.List) == 0 { + return true + } + shortDecl := false // does the if statement have a ":=" initialization statement? + if ifStmt.Init != nil { + if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { + shortDecl = true + } + } + lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] + if _, ok := lastStmt.(*ast.ReturnStmt); ok { + extra := "" + if shortDecl { + extra = " (move short variable declaration to its own line if necessary)" + } + f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra) + } + return true + }) +} + +// lintRanges examines range clauses. It complains about redundant constructions. +func (f *file) lintRanges() { + f.walk(func(node ast.Node) bool { + rs, ok := node.(*ast.RangeStmt) + if !ok { + return true + } + + if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) { + p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`") + + newRS := *rs // shallow copy + newRS.Value = nil + newRS.Key = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + + return true + } + + if isIdent(rs.Value, "_") { + p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok) + + newRS := *rs // shallow copy + newRS.Value = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + } + + return true + }) +} + +// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation. +func (f *file) lintErrorf() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok || len(ce.Args) != 1 { + return true + } + isErrorsNew := isPkgDot(ce.Fun, "errors", "New") + var isTestingError bool + se, ok := ce.Fun.(*ast.SelectorExpr) + if ok && se.Sel.Name == "Error" { + if typ := f.pkg.typeOf(se.X); typ != nil { + isTestingError = typ.String() == "*testing.T" + } + } + if !isErrorsNew && !isTestingError { + return true + } + if !f.imports("errors") { + return true + } + arg := ce.Args[0] + ce, ok = arg.(*ast.CallExpr) + if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") { + return true + } + errorfPrefix := "fmt" + if isTestingError { + errorfPrefix = f.render(se.X) + } + p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix) + + m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`) + if m != nil { + p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3] + } + + return true + }) +} + +// lintErrors examines global error vars. It complains if they aren't named in the standard way. +func (f *file) lintErrors() { + for _, decl := range f.f.Decls { + gd, ok := decl.(*ast.GenDecl) + if !ok || gd.Tok != token.VAR { + continue + } + for _, spec := range gd.Specs { + spec := spec.(*ast.ValueSpec) + if len(spec.Names) != 1 || len(spec.Values) != 1 { + continue + } + ce, ok := spec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + continue + } + + id := spec.Names[0] + prefix := "err" + if id.IsExported() { + prefix = "Err" + } + if !strings.HasPrefix(id.Name, prefix) { + f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix) + } + } + } +} + +func lintErrorString(s string) (isClean bool, conf float64) { + const basicConfidence = 0.8 + const capConfidence = basicConfidence - 0.2 + first, firstN := utf8.DecodeRuneInString(s) + last, _ := utf8.DecodeLastRuneInString(s) + if last == '.' || last == ':' || last == '!' || last == '\n' { + return false, basicConfidence + } + if unicode.IsUpper(first) { + // People use proper nouns and exported Go identifiers in error strings, + // so decrease the confidence of warnings for capitalization. + if len(s) <= firstN { + return false, capConfidence + } + // Flag strings starting with something that doesn't look like an initialism. + if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) { + return false, capConfidence + } + } + return true, 0 +} + +// lintErrorStrings examines error strings. +// It complains if they are capitalized or end in punctuation or a newline. +func (f *file) lintErrorStrings() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok { + return true + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + return true + } + if len(ce.Args) < 1 { + return true + } + str, ok := ce.Args[0].(*ast.BasicLit) + if !ok || str.Kind != token.STRING { + return true + } + s, _ := strconv.Unquote(str.Value) // can assume well-formed Go + if s == "" { + return true + } + clean, conf := lintErrorString(s) + if clean { + return true + } + + f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"), + "error strings should not be capitalized or end with punctuation or a newline") + return true + }) +} + +// lintReceiverNames examines receiver names. It complains about inconsistent +// names used for the same type and names such as "this". +func (f *file) lintReceiverNames() { + typeReceiver := map[string]string{} + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + names := fn.Recv.List[0].Names + if len(names) < 1 { + return true + } + name := names[0].Name + const ref = styleGuideBase + "#receiver-names" + if name == "_" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`) + return true + } + if name == "this" || name == "self" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`) + return true + } + recv := receiverType(fn) + if prev, ok := typeReceiver[recv]; ok && prev != name { + f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv) + return true + } + typeReceiver[recv] = name + return true + }) +} + +// lintIncDec examines statements that increment or decrement a variable. +// It complains if they don't use x++ or x--. +func (f *file) lintIncDec() { + f.walk(func(n ast.Node) bool { + as, ok := n.(*ast.AssignStmt) + if !ok { + return true + } + if len(as.Lhs) != 1 { + return true + } + if !isOne(as.Rhs[0]) { + return true + } + var suffix string + switch as.Tok { + case token.ADD_ASSIGN: + suffix = "++" + case token.SUB_ASSIGN: + suffix = "--" + default: + return true + } + f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix) + return true + }) +} + +// lintErrorReturn examines function declarations that return an error. +// It complains if the error isn't the last parameter. +func (f *file) lintErrorReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Type.Results == nil { + return true + } + ret := fn.Type.Results.List + if len(ret) <= 1 { + return true + } + if isIdent(ret[len(ret)-1].Type, "error") { + return true + } + // An error return parameter should be the last parameter. + // Flag any error parameters found before the last. + for _, r := range ret[:len(ret)-1] { + if isIdent(r.Type, "error") { + f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items") + break // only flag one + } + } + return true + }) +} + +// lintUnexportedReturn examines exported function declarations. +// It complains if any return an unexported type. +func (f *file) lintUnexportedReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok { + return true + } + if fn.Type.Results == nil { + return false + } + if !fn.Name.IsExported() { + return false + } + thing := "func" + if fn.Recv != nil && len(fn.Recv.List) > 0 { + thing = "method" + if !ast.IsExported(receiverType(fn)) { + // Don't report exported methods of unexported types, + // such as private implementations of sort.Interface. + return false + } + } + for _, ret := range fn.Type.Results.List { + typ := f.pkg.typeOf(ret.Type) + if exportedType(typ) { + continue + } + f.errorf(ret.Type, 0.8, category("unexported-type-in-api"), + "exported %s %s returns unexported type %s, which can be annoying to use", + thing, fn.Name.Name, typ) + break // only flag one + } + return false + }) +} + +// exportedType reports whether typ is an exported type. +// It is imprecise, and will err on the side of returning true, +// such as for composite types. +func exportedType(typ types.Type) bool { + switch T := typ.(type) { + case *types.Named: + // Builtin types have no package. + return T.Obj().Pkg() == nil || T.Obj().Exported() + case *types.Map: + return exportedType(T.Key()) && exportedType(T.Elem()) + case interface { + Elem() types.Type + }: // array, slice, pointer, chan + return exportedType(T.Elem()) + } + // Be conservative about other types, such as struct, interface, etc. + return true +} + +// timeSuffixes is a list of name suffixes that imply a time unit. +// This is not an exhaustive list. +var timeSuffixes = []string{ + "Sec", "Secs", "Seconds", + "Msec", "Msecs", + "Milli", "Millis", "Milliseconds", + "Usec", "Usecs", "Microseconds", + "MS", "Ms", +} + +func (f *file) lintTimeNames() { + f.walk(func(node ast.Node) bool { + v, ok := node.(*ast.ValueSpec) + if !ok { + return true + } + for _, name := range v.Names { + origTyp := f.pkg.typeOf(name) + // Look for time.Duration or *time.Duration; + // the latter is common when using flag.Duration. + typ := origTyp + if pt, ok := typ.(*types.Pointer); ok { + typ = pt.Elem() + } + if !f.pkg.isNamedType(typ, "time", "Duration") { + continue + } + suffix := "" + for _, suf := range timeSuffixes { + if strings.HasSuffix(name.Name, suf) { + suffix = suf + break + } + } + if suffix == "" { + continue + } + f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix) + } + return true + }) +} + +// lintContextKeyTypes checks for call expressions to context.WithValue with +// basic types used for the key argument. +// See: https://golang.org/issue/17293 +func (f *file) lintContextKeyTypes() { + f.walk(func(node ast.Node) bool { + switch node := node.(type) { + case *ast.CallExpr: + f.checkContextKeyType(node) + } + + return true + }) +} + +// checkContextKeyType reports an error if the call expression calls +// context.WithValue with a key argument of basic type. +func (f *file) checkContextKeyType(x *ast.CallExpr) { + sel, ok := x.Fun.(*ast.SelectorExpr) + if !ok { + return + } + pkg, ok := sel.X.(*ast.Ident) + if !ok || pkg.Name != "context" { + return + } + if sel.Sel.Name != "WithValue" { + return + } + + // key is second argument to context.WithValue + if len(x.Args) != 3 { + return + } + key := f.pkg.typesInfo.Types[x.Args[1]] + + if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid { + f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type)) + } +} + +// lintContextArgs examines function declarations that contain an +// argument with a type of context.Context +// It complains if that argument isn't the first parameter. +func (f *file) lintContextArgs() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || len(fn.Type.Params.List) <= 1 { + return true + } + // A context.Context should be the first parameter of a function. + // Flag any that show up after the first. + for _, arg := range fn.Type.Params.List[1:] { + if isPkgDot(arg.Type, "context", "Context") { + f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function") + break // only flag one + } + } + return true + }) +} + +// containsComments returns whether the interval [start, end) contains any +// comments without "// MATCH " prefix. +func (f *file) containsComments(start, end token.Pos) bool { + for _, cgroup := range f.f.Comments { + comments := cgroup.List + if comments[0].Slash >= end { + // All comments starting with this group are after end pos. + return false + } + if comments[len(comments)-1].Slash < start { + // Comments group ends before start pos. + continue + } + for _, c := range comments { + if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") { + return true + } + } + } + return false +} + +// receiverType returns the named type of the method receiver, sans "*", +// or "invalid-type" if fn.Recv is ill formed. +func receiverType(fn *ast.FuncDecl) string { + switch e := fn.Recv.List[0].Type.(type) { + case *ast.Ident: + return e.Name + case *ast.StarExpr: + if id, ok := e.X.(*ast.Ident); ok { + return id.Name + } + } + // The parser accepts much more than just the legal forms. + return "invalid-type" +} + +func (f *file) walk(fn func(ast.Node) bool) { + ast.Walk(walker(fn), f.f) +} + +func (f *file) render(x interface{}) string { + var buf bytes.Buffer + if err := printer.Fprint(&buf, f.fset, x); err != nil { + panic(err) + } + return buf.String() +} + +func (f *file) debugRender(x interface{}) string { + var buf bytes.Buffer + if err := ast.Fprint(&buf, f.fset, x, nil); err != nil { + panic(err) + } + return buf.String() +} + +// walker adapts a function to satisfy the ast.Visitor interface. +// The function return whether the walk should proceed into the node's children. +type walker func(ast.Node) bool + +func (w walker) Visit(node ast.Node) ast.Visitor { + if w(node) { + return w + } + return nil +} + +func isIdent(expr ast.Expr, ident string) bool { + id, ok := expr.(*ast.Ident) + return ok && id.Name == ident +} + +// isBlank returns whether id is the blank identifier "_". +// If id == nil, the answer is false. +func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } + +func isPkgDot(expr ast.Expr, pkg, name string) bool { + sel, ok := expr.(*ast.SelectorExpr) + return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name) +} + +func isOne(expr ast.Expr) bool { + lit, ok := expr.(*ast.BasicLit) + return ok && lit.Kind == token.INT && lit.Value == "1" +} + +func isCgoExported(f *ast.FuncDecl) bool { + if f.Recv != nil || f.Doc == nil { + return false + } + + cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name))) + for _, c := range f.Doc.List { + if cgoExport.MatchString(c.Text) { + return true + } + } + return false +} + +var basicTypeKinds = map[types.BasicKind]string{ + types.UntypedBool: "bool", + types.UntypedInt: "int", + types.UntypedRune: "rune", + types.UntypedFloat: "float64", + types.UntypedComplex: "complex128", + types.UntypedString: "string", +} + +// isUntypedConst reports whether expr is an untyped constant, +// and indicates what its default type is. +// scope may be nil. +func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) { + // Re-evaluate expr outside of its context to see if it's untyped. + // (An expr evaluated within, for example, an assignment context will get the type of the LHS.) + exprStr := f.render(expr) + tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr) + if err != nil { + return "", false + } + if b, ok := tv.Type.(*types.Basic); ok { + if dt, ok := basicTypeKinds[b.Kind()]; ok { + return dt, true + } + } + + return "", false +} + +// firstLineOf renders the given node and returns its first line. +// It will also match the indentation of another node. +func (f *file) firstLineOf(node, match ast.Node) string { + line := f.render(node) + if i := strings.Index(line, "\n"); i >= 0 { + line = line[:i] + } + return f.indentOf(match) + line +} + +func (f *file) indentOf(node ast.Node) string { + line := srcLine(f.src, f.fset.Position(node.Pos())) + for i, r := range line { + switch r { + case ' ', '\t': + default: + return line[:i] + } + } + return line // unusual or empty line +} + +func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) { + line := srcLine(f.src, f.fset.Position(node.Pos())) + line = strings.TrimSuffix(line, "\n") + rx := regexp.MustCompile(pattern) + return rx.FindStringSubmatch(line) +} + +// imports returns true if the current file imports the specified package path. +func (f *file) imports(importPath string) bool { + all := astutil.Imports(f.fset, f.f) + for _, p := range all { + for _, i := range p { + uq, err := strconv.Unquote(i.Path.Value) + if err == nil && importPath == uq { + return true + } + } + } + return false +} + +// srcLine returns the complete line at p, including the terminating newline. +func srcLine(src []byte, p token.Position) string { + // Run to end of line in both directions if not at line start/end. + lo, hi := p.Offset, p.Offset+1 + for lo > 0 && src[lo-1] != '\n' { + lo-- + } + for hi < len(src) && src[hi-1] != '\n' { + hi++ + } + return string(src[lo:hi]) +} diff --git a/vendor/github.com/golangci/maligned/LICENSE b/vendor/github.com/golangci/maligned/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/golangci/maligned/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golangci/maligned/README b/vendor/github.com/golangci/maligned/README new file mode 100644 index 000000000..4e57f6eab --- /dev/null +++ b/vendor/github.com/golangci/maligned/README @@ -0,0 +1,7 @@ +Install: + + go get github.com/mdempsky/maligned + +Usage: + + maligned cmd/compile/internal/gc cmd/link/internal/ld diff --git a/vendor/github.com/golangci/maligned/maligned.go b/vendor/github.com/golangci/maligned/maligned.go new file mode 100644 index 000000000..c2492b2ff --- /dev/null +++ b/vendor/github.com/golangci/maligned/maligned.go @@ -0,0 +1,253 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package maligned + +import ( + "fmt" + "go/ast" + "go/build" + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/go/loader" +) + +var fset = token.NewFileSet() + +type Issue struct { + OldSize, NewSize int + NewStructDef string + Pos token.Position +} + +func Run(prog *loader.Program) []Issue { + flagVerbose := true + fset = prog.Fset + + var issues []Issue + + for _, pkg := range prog.InitialPackages() { + for _, file := range pkg.Files { + ast.Inspect(file, func(node ast.Node) bool { + if s, ok := node.(*ast.StructType); ok { + i := malign(node.Pos(), pkg.Types[s].Type.(*types.Struct), flagVerbose) + if i != nil { + issues = append(issues, *i) + } + } + return true + }) + } + } + + return issues +} + +func malign(pos token.Pos, str *types.Struct, verbose bool) *Issue { + wordSize := int64(8) + maxAlign := int64(8) + switch build.Default.GOARCH { + case "386", "arm": + wordSize, maxAlign = 4, 4 + case "amd64p32": + wordSize = 4 + } + + s := gcSizes{wordSize, maxAlign} + sz := s.Sizeof(str) + opt, fields := optimalSize(str, &s, verbose) + if sz == opt { + return nil + } + + newStructDefParts := []string{"struct{"} + + var w int + for _, f := range fields { + if n := len(f.Name()); n > w { + w = n + } + } + spaces := strings.Repeat(" ", w) + for _, f := range fields { + line := fmt.Sprintf("\t%s%s\t%s,", f.Name(), spaces[len(f.Name()):], f.Type().String()) + newStructDefParts = append(newStructDefParts, line) + } + newStructDefParts = append(newStructDefParts, "}") + + return &Issue{ + OldSize: int(sz), + NewSize: int(opt), + NewStructDef: strings.Join(newStructDefParts, "\n"), + Pos: fset.Position(pos), + } +} + +func optimalSize(str *types.Struct, sizes *gcSizes, stable bool) (int64, []*types.Var) { + nf := str.NumFields() + fields := make([]*types.Var, nf) + alignofs := make([]int64, nf) + sizeofs := make([]int64, nf) + for i := 0; i < nf; i++ { + fields[i] = str.Field(i) + ft := fields[i].Type() + alignofs[i] = sizes.Alignof(ft) + sizeofs[i] = sizes.Sizeof(ft) + } + if stable { // Stable keeps as much of the order as possible, but slower + sort.Stable(&byAlignAndSize{fields, alignofs, sizeofs}) + } else { + sort.Sort(&byAlignAndSize{fields, alignofs, sizeofs}) + } + return sizes.Sizeof(types.NewStruct(fields, nil)), fields +} + +type byAlignAndSize struct { + fields []*types.Var + alignofs []int64 + sizeofs []int64 +} + +func (s *byAlignAndSize) Len() int { return len(s.fields) } +func (s *byAlignAndSize) Swap(i, j int) { + s.fields[i], s.fields[j] = s.fields[j], s.fields[i] + s.alignofs[i], s.alignofs[j] = s.alignofs[j], s.alignofs[i] + s.sizeofs[i], s.sizeofs[j] = s.sizeofs[j], s.sizeofs[i] +} + +func (s *byAlignAndSize) Less(i, j int) bool { + // Place zero sized objects before non-zero sized objects. + if s.sizeofs[i] == 0 && s.sizeofs[j] != 0 { + return true + } + if s.sizeofs[j] == 0 && s.sizeofs[i] != 0 { + return false + } + + // Next, place more tightly aligned objects before less tightly aligned objects. + if s.alignofs[i] != s.alignofs[j] { + return s.alignofs[i] > s.alignofs[j] + } + + // Lastly, order by size. + if s.sizeofs[i] != s.sizeofs[j] { + return s.sizeofs[i] > s.sizeofs[j] + } + + return false +} + +// Code below based on go/types.StdSizes. + +type gcSizes struct { + WordSize int64 + MaxAlign int64 +} + +func (s *gcSizes) Alignof(T types.Type) int64 { + // NOTE: On amd64, complex64 is 8 byte aligned, + // even though float32 is only 4 byte aligned. + + // For arrays and structs, alignment is defined in terms + // of alignment of the elements and fields, respectively. + switch t := T.Underlying().(type) { + case *types.Array: + // spec: "For a variable x of array type: unsafe.Alignof(x) + // is the same as unsafe.Alignof(x[0]), but at least 1." + return s.Alignof(t.Elem()) + case *types.Struct: + // spec: "For a variable x of struct type: unsafe.Alignof(x) + // is the largest of the values unsafe.Alignof(x.f) for each + // field f of x, but at least 1." + max := int64(1) + for i, nf := 0, t.NumFields(); i < nf; i++ { + if a := s.Alignof(t.Field(i).Type()); a > max { + max = a + } + } + return max + } + a := s.Sizeof(T) // may be 0 + // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1." + if a < 1 { + return 1 + } + if a > s.MaxAlign { + return s.MaxAlign + } + return a +} + +var basicSizes = [...]byte{ + types.Bool: 1, + types.Int8: 1, + types.Int16: 2, + types.Int32: 4, + types.Int64: 8, + types.Uint8: 1, + types.Uint16: 2, + types.Uint32: 4, + types.Uint64: 8, + types.Float32: 4, + types.Float64: 8, + types.Complex64: 8, + types.Complex128: 16, +} + +func (s *gcSizes) Sizeof(T types.Type) int64 { + switch t := T.Underlying().(type) { + case *types.Basic: + k := t.Kind() + if int(k) < len(basicSizes) { + if s := basicSizes[k]; s > 0 { + return int64(s) + } + } + if k == types.String { + return s.WordSize * 2 + } + case *types.Array: + n := t.Len() + if n == 0 { + return 0 + } + a := s.Alignof(t.Elem()) + z := s.Sizeof(t.Elem()) + return align(z, a)*(n-1) + z + case *types.Slice: + return s.WordSize * 3 + case *types.Struct: + nf := t.NumFields() + if nf == 0 { + return 0 + } + + var o int64 + max := int64(1) + for i := 0; i < nf; i++ { + ft := t.Field(i).Type() + a, sz := s.Alignof(ft), s.Sizeof(ft) + if a > max { + max = a + } + if i == nf-1 && sz == 0 && o != 0 { + sz = 1 + } + o = align(o, a) + sz + } + return align(o, max) + case *types.Interface: + return s.WordSize * 2 + } + return s.WordSize // catch-all +} + +// align returns the smallest y >= x such that y % a == 0. +func align(x, a int64) int64 { + y := x + a - 1 + return y - y%a +} diff --git a/vendor/github.com/golangci/misspell/.gitignore b/vendor/github.com/golangci/misspell/.gitignore new file mode 100644 index 000000000..b1b707e32 --- /dev/null +++ b/vendor/github.com/golangci/misspell/.gitignore @@ -0,0 +1,34 @@ +dist/ +bin/ +vendor/ + +# editor turds +*~ +*.gz +*.bz2 +*.csv + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/golangci/misspell/.travis.yml b/vendor/github.com/golangci/misspell/.travis.yml new file mode 100644 index 000000000..e63e6c2bd --- /dev/null +++ b/vendor/github.com/golangci/misspell/.travis.yml @@ -0,0 +1,20 @@ +sudo: required +dist: trusty +group: edge +language: go +go: + - "1.10" +git: + depth: 1 + +script: + - ./scripts/travis.sh + +# calls goreleaser when a new tag is pushed +deploy: +- provider: script + skip_cleanup: true + script: curl -sL http://git.io/goreleaser | bash + on: + tags: true + condition: $TRAVIS_OS_NAME = linux diff --git a/vendor/github.com/golangci/misspell/Dockerfile b/vendor/github.com/golangci/misspell/Dockerfile new file mode 100644 index 000000000..b8ea37b4c --- /dev/null +++ b/vendor/github.com/golangci/misspell/Dockerfile @@ -0,0 +1,37 @@ +FROM golang:1.10.0-alpine + +# cache buster +RUN echo 4 + +# git is needed for "go get" below +RUN apk add --no-cache git make + +# these are my standard testing / linting tools +RUN /bin/true \ + && go get -u github.com/golang/dep/cmd/dep \ + && go get -u github.com/alecthomas/gometalinter \ + && gometalinter --install \ + && rm -rf /go/src /go/pkg +# +# * SCOWL word list +# +# Downloads +# http://wordlist.aspell.net/dicts/ +# --> http://app.aspell.net/create +# + +# use en_US large size +# use regular size for others +ENV SOURCE_US_BIG http://app.aspell.net/create?max_size=70&spelling=US&max_variant=2&diacritic=both&special=hacker&special=roman-numerals&download=wordlist&encoding=utf-8&format=inline + +# should be able tell difference between English variations using this +ENV SOURCE_US http://app.aspell.net/create?max_size=60&spelling=US&max_variant=1&diacritic=both&download=wordlist&encoding=utf-8&format=inline +ENV SOURCE_GB_ISE http://app.aspell.net/create?max_size=60&spelling=GBs&max_variant=2&diacritic=both&download=wordlist&encoding=utf-8&format=inline +ENV SOURCE_GB_IZE http://app.aspell.net/create?max_size=60&spelling=GBz&max_variant=2&diacritic=both&download=wordlist&encoding=utf-8&format=inline +ENV SOURCE_CA http://app.aspell.net/create?max_size=60&spelling=CA&max_variant=2&diacritic=both&download=wordlist&encoding=utf-8&format=inline + +RUN /bin/true \ + && mkdir /scowl-wl \ + && wget -O /scowl-wl/words-US-60.txt ${SOURCE_US} \ + && wget -O /scowl-wl/words-GB-ise-60.txt ${SOURCE_GB_ISE} + diff --git a/vendor/github.com/golangci/misspell/Gopkg.lock b/vendor/github.com/golangci/misspell/Gopkg.lock new file mode 100644 index 000000000..90ed45115 --- /dev/null +++ b/vendor/github.com/golangci/misspell/Gopkg.lock @@ -0,0 +1,24 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/gobwas/glob" + packages = [ + ".", + "compiler", + "match", + "syntax", + "syntax/ast", + "syntax/lexer", + "util/runes", + "util/strings" + ] + revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" + version = "v0.2.3" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "087ea4c49358ea8258ad9edfe514cd5ce9975c889c258e5ec7b5d2b720aae113" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/golangci/misspell/Gopkg.toml b/vendor/github.com/golangci/misspell/Gopkg.toml new file mode 100644 index 000000000..e9b8e6a45 --- /dev/null +++ b/vendor/github.com/golangci/misspell/Gopkg.toml @@ -0,0 +1,34 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/gobwas/glob" + version = "0.2.3" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/github.com/golangci/misspell/LICENSE b/vendor/github.com/golangci/misspell/LICENSE new file mode 100644 index 000000000..423e1f9e0 --- /dev/null +++ b/vendor/github.com/golangci/misspell/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015-2017 Nick Galbreath + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/golangci/misspell/Makefile b/vendor/github.com/golangci/misspell/Makefile new file mode 100644 index 000000000..862ab77b0 --- /dev/null +++ b/vendor/github.com/golangci/misspell/Makefile @@ -0,0 +1,74 @@ +CONTAINER=nickg/misspell + +install: ## install misspell into GOPATH/bin + go install ./cmd/misspell + +build: hooks ## build and lint misspell + ./scripts/build.sh + +test: ## run all tests + go test . + +# real publishing is done only by travis +publish: ## test goreleaser + ./scripts/goreleaser-dryrun.sh + +# the grep in line 2 is to remove misspellings in the spelling dictionary +# that trigger false positives!! +falsepositives: /scowl-wl + cat /scowl-wl/words-US-60.txt | \ + grep -i -v -E "payed|Tyre|Euclidian|nonoccurence|dependancy|reenforced|accidently|surprize|dependance|idealogy|binominal|causalities|conquerer|withing|casette|analyse|analogue|dialogue|paralyse|catalogue|archaeolog|clarinettist|catalyses|cancell|chisell|ageing|cataloguing" | \ + misspell -debug -error + cat /scowl-wl/words-GB-ise-60.txt | \ + grep -v -E "payed|nonoccurence|withing" | \ + misspell -locale=UK -debug -error +# cat /scowl-wl/words-GB-ize-60.txt | \ +# grep -v -E "withing" | \ +# misspell -debug -error +# cat /scowl-wl/words-CA-60.txt | \ +# grep -v -E "withing" | \ +# misspell -debug -error + +bench: ## run benchmarks + go test -bench '.*' + +clean: ## clean up time + rm -rf dist/ bin/ + go clean ./... + git gc --aggressive + +ci: ## run test like travis-ci does, requires docker + docker run --rm \ + -v $(PWD):/go/src/github.com/client9/misspell \ + -w /go/src/github.com/client9/misspell \ + ${CONTAINER} \ + make build falsepositives + +docker-build: ## build a docker test image + docker build -t ${CONTAINER} . + +docker-pull: ## pull latest test image + docker pull ${CONTAINER} + +docker-console: ## log into the test image + docker run --rm -it \ + -v $(PWD):/go/src/github.com/client9/misspell \ + -w /go/src/github.com/client9/misspell \ + ${CONTAINER} sh + +.git/hooks/pre-commit: scripts/pre-commit.sh + cp -f scripts/pre-commit.sh .git/hooks/pre-commit +.git/hooks/commit-msg: scripts/commit-msg.sh + cp -f scripts/commit-msg.sh .git/hooks/commit-msg +hooks: .git/hooks/pre-commit .git/hooks/commit-msg ## install git precommit hooks + +.PHONY: help ci console docker-build bench + +# https://www.client9.com/self-documenting-makefiles/ +help: + @awk -F ':|##' '/^[^\t].+?:.*?##/ {\ + printf "\033[36m%-30s\033[0m %s\n", $$1, $$NF \ + }' $(MAKEFILE_LIST) +.DEFAULT_GOAL=help +.PHONY=help + diff --git a/vendor/github.com/golangci/misspell/README.md b/vendor/github.com/golangci/misspell/README.md new file mode 100644 index 000000000..5b68af04d --- /dev/null +++ b/vendor/github.com/golangci/misspell/README.md @@ -0,0 +1,424 @@ +[![Build Status](https://travis-ci.org/client9/misspell.svg?branch=master)](https://travis-ci.org/client9/misspell) [![Go Report Card](https://goreportcard.com/badge/github.com/client9/misspell)](https://goreportcard.com/report/github.com/client9/misspell) [![GoDoc](https://godoc.org/github.com/client9/misspell?status.svg)](https://godoc.org/github.com/client9/misspell) [![Coverage](http://gocover.io/_badge/github.com/client9/misspell)](http://gocover.io/github.com/client9/misspell) [![license](https://img.shields.io/badge/license-MIT-blue.svg?style=flat)](https://raw.githubusercontent.com/client9/misspell/master/LICENSE) + +Correct commonly misspelled English words... quickly. + +### Install + + +If you just want a binary and to start using `misspell`: + +``` +curl -L -o ./install-misspell.sh https://git.io/misspell +sh ./install-misspell.sh +``` + + +Both will install as `./bin/misspell`. You can adjust the download location using the `-b` flag. File a ticket if you want another platform supported. + + +If you use [Go](https://golang.org/), the best way to run `misspell` is by using [gometalinter](#gometalinter). Otherwise, install `misspell` the old-fashioned way: + +``` +go get -u github.com/client9/misspell/cmd/misspell +``` + +and misspell will be in your `GOPATH` + + +Also if you like to live dangerously, one could do + +```bash +curl -L https://git.io/misspell | bash +``` + +### Usage + + +```bash +$ misspell all.html your.txt important.md files.go +your.txt:42:10 found "langauge" a misspelling of "language" + +# ^ file, line, column +``` + +``` +$ misspell -help +Usage of misspell: + -debug + Debug matching, very slow + -error + Exit with 2 if misspelling found + -f string + 'csv', 'sqlite3' or custom Golang template for output + -i string + ignore the following corrections, comma separated + -j int + Number of workers, 0 = number of CPUs + -legal + Show legal information and exit + -locale string + Correct spellings using locale perferances for US or UK. Default is to use a neutral variety of English. Setting locale to US will correct the British spelling of 'colour' to 'color' + -o string + output file or [stderr|stdout|] (default "stdout") + -q Do not emit misspelling output + -source string + Source mode: auto=guess, go=golang source, text=plain or markdown-like text (default "auto") + -w Overwrite file with corrections (default is just to display) +``` + +## FAQ + +* [Automatic Corrections](#correct) +* [Converting UK spellings to US](#locale) +* [Using pipes and stdin](#stdin) +* [Golang special support](#golang) +* [gometalinter support](#gometalinter) +* [CSV Output](#csv) +* [Using SQLite3](#sqlite) +* [Changing output format](#output) +* [Checking a folder recursively](#recursive) +* [Performance](#performance) +* [Known Issues](#issues) +* [Debugging](#debug) +* [False Negatives and missing words](#missing) +* [Origin of Word Lists](#words) +* [Software License](#license) +* [Problem statement](#problem) +* [Other spelling correctors](#others) +* [Other ideas](#otherideas) + + +### How can I make the corrections automatically? + +Just add the `-w` flag! + +``` +$ misspell -w all.html your.txt important.md files.go +your.txt:9:21:corrected "langauge" to "language" + +# ^ File is rewritten only if a misspelling is found +``` + + +### How do I convert British spellings to American (or vice-versa)? + +Add the `-locale US` flag! + +```bash +$ misspell -locale US important.txt +important.txt:10:20 found "colour" a misspelling of "color" +``` + +Add the `-locale UK` flag! + +```bash +$ echo "My favorite color is blue" | misspell -locale UK +stdin:1:3:found "favorite color" a misspelling of "favourite colour" +``` + +Help is appreciated as I'm neither British nor an +expert in the English language. + + +### How do you check an entire folder recursively? + +Just list a directory you'd like to check + +```bash +misspell . +misspell aDirectory anotherDirectory aFile +``` + +You can also run misspell recursively using the following shell tricks: + +```bash +misspell directory/**/* +``` + +or + +```bash +find . -type f | xargs misspell +``` + +You can select a type of file as well. The following examples selects all `.txt` files that are *not* in the `vendor` directory: + +```bash +find . -type f -name '*.txt' | grep -v vendor/ | xargs misspell -error +``` + + +### Can I use pipes or `stdin` for input? + +Yes! + +Print messages to `stderr` only: + +```bash +$ echo "zeebra" | misspell +stdin:1:0:found "zeebra" a misspelling of "zebra" +``` + +Print messages to `stderr`, and corrected text to `stdout`: + +```bash +$ echo "zeebra" | misspell -w +stdin:1:0:corrected "zeebra" to "zebra" +zebra +``` + +Only print the corrected text to `stdout`: + +```bash +$ echo "zeebra" | misspell -w -q +zebra +``` + + +### Are there special rules for golang source files? + +Yes! If the file ends in `.go`, then misspell will only check spelling in +comments. + +If you want to force a file to be checked as a golang source, use `-source=go` +on the command line. Conversely, you can check a golang source as if it were +pure text by using `-source=text`. You might want to do this since many +variable names have misspellings in them! + +### Can I check only-comments in other other programming languages? + +I'm told the using `-source=go` works well for ruby, javascript, java, c and +c++. + +It doesn't work well for python and bash. + + +### Does this work with gometalinter? + +[gometalinter](https://github.com/alecthomas/gometalinter) runs +multiple golang linters. Starting on [2016-06-12](https://github.com/alecthomas/gometalinter/pull/134) +gometalinter supports `misspell` natively but it is disabled by default. + +```bash +# update your copy of gometalinter +go get -u github.com/alecthomas/gometalinter + +# install updates and misspell +gometalinter --install --update +``` + +To use, just enable `misspell` + +``` +gometalinter --enable misspell ./... +``` + +Note that gometalinter only checks golang files, and uses the default options +of `misspell` + +You may wish to run this on your plaintext (.txt) and/or markdown files too. + + + +### How Can I Get CSV Output? + +Using `-f csv`, the output is standard comma-seprated values with headers in the first row. + +``` +misspell -f csv * +file,line,column,typo,corrected +"README.md",9,22,langauge,language +"README.md",47,25,langauge,language +``` + + +### How can I export to SQLite3? + +Using `-f sqlite`, the output is a [sqlite3](https://www.sqlite.org/index.html) dump-file. + +```bash +$ misspell -f sqlite * > /tmp/misspell.sql +$ cat /tmp/misspell.sql + +PRAGMA foreign_keys=OFF; +BEGIN TRANSACTION; +CREATE TABLE misspell( + "file" TEXT, + "line" INTEGER,i + "column" INTEGER,i + "typo" TEXT, + "corrected" TEXT +); +INSERT INTO misspell VALUES("install.txt",202,31,"immediatly","immediately"); +# etc... +COMMIT; +``` + +```bash +$ sqlite3 -init /tmp/misspell.sql :memory: 'select count(*) from misspell' +1 +``` + +With some tricks you can directly pipe output to sqlite3 by using `-init /dev/stdin`: + +``` +misspell -f sqlite * | sqlite3 -init /dev/stdin -column -cmd '.width 60 15' ':memory' \ + 'select substr(file,35),typo,count(*) as count from misspell group by file, typo order by count desc;' +``` + + +### How can I ignore rules? + +Using the `-i "comma,separated,rules"` flag you can specify corrections to ignore. + +For example, if you were to run `misspell -w -error -source=text` against document that contains the string `Guy Finkelshteyn Braswell`, misspell would change the text to `Guy Finkelstheyn Bras well`. You can then +determine the rules to ignore by reverting the change and running the with the `-debug` flag. You can then see +that the corrections were `htey -> they` and `aswell -> as well`. To ignore these two rules, you add `-i "htey,aswell"` to +your command. With debug mode on, you can see it print the corrections, but it will no longer make them. + + +### How can I change the output format? + +Using the `-f template` flag you can pass in a +[golang text template](https://golang.org/pkg/text/template/) to format the output. + +One can use `printf "%q" VALUE` to safely quote a value. + +The default template is compatible with [gometalinter](https://github.com/alecthomas/gometalinter) +``` +{{ .Filename }}:{{ .Line }}:{{ .Column }}:corrected {{ printf "%q" .Original }} to "{{ printf "%q" .Corrected }}" +``` + +To just print probable misspellings: + +``` +-f '{{ .Original }}' +``` + + +### What problem does this solve? + +This corrects commonly misspelled English words in computer source +code, and other text-based formats (`.txt`, `.md`, etc). + +It is designed to run quickly so it can be +used as a [pre-commit hook](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) +with minimal burden on the developer. + +It does not work with binary formats (e.g. Word, etc). + +It is not a complete spell-checking program nor a grammar checker. + + +### What are other misspelling correctors and what's wrong with them? + +Some other misspelling correctors: + +* https://github.com/vlajos/misspell_fixer +* https://github.com/lyda/misspell-check +* https://github.com/lucasdemarchi/codespell + +They all work but had problems that prevented me from using them at scale: + +* slow, all of the above check one misspelling at a time (i.e. linear) using regexps +* not MIT/Apache2 licensed (or equivalent) +* have dependencies that don't work for me (python3, bash, linux sed, etc) +* don't understand American vs. British English and sometimes makes unwelcome "corrections" + +That said, they might be perfect for you and many have more features +than this project! + + +### How fast is it? + +Misspell is easily 100x to 1000x faster than other spelling correctors. You +should be able to check and correct 1000 files in under 250ms. + +This uses the mighty power of golang's +[strings.Replacer](https://golang.org/pkg/strings/#Replacer) which is +a implementation or variation of the +[Aho–Corasick algorithm](https://en.wikipedia.org/wiki/Aho–Corasick_algorithm). +This makes multiple substring matches *simultaneously*. + +In addition this uses multiple CPU cores to work on multiple files. + + +### What problems does it have? + +Unlike the other projects, this doesn't know what a "word" is. There may be +more false positives and false negatives due to this. On the other hand, it +sometimes catches things others don't. + +Either way, please file bugs and we'll fix them! + +Since it operates in parallel to make corrections, it can be non-obvious to +determine exactly what word was corrected. + + +### It's making mistakes. How can I debug? + +Run using `-debug` flag on the file you want. It should then print what word +it is trying to correct. Then [file a +bug](https://github.com/client9/misspell/issues) describing the problem. +Thanks! + + +### Why is it making mistakes or missing items in golang files? + +The matching function is *case-sensitive*, so variable names that are multiple +worlds either in all-upper or all-lower case sometimes can cause false +positives. For instance a variable named `bodyreader` could trigger a false +positive since `yrea` is in the middle that could be corrected to `year`. +Other problems happen if the variable name uses a English contraction that +should use an apostrophe. The best way of fixing this is to use the +[Effective Go naming +conventions](https://golang.org/doc/effective_go.html#mixed-caps) and use +[camelCase](https://en.wikipedia.org/wiki/CamelCase) for variable names. You +can check your code using [golint](https://github.com/golang/lint) + + +### What license is this? + +The main code is [MIT](https://github.com/client9/misspell/blob/master/LICENSE). + +Misspell also makes uses of the Golang standard library and contains a modified version of Golang's [strings.Replacer](https://golang.org/pkg/strings/#Replacer) +which are covered under a [BSD License](https://github.com/golang/go/blob/master/LICENSE). Type `misspell -legal` for more details or see [legal.go](https://github.com/client9/misspell/blob/master/legal.go) + + +### Where do the word lists come from? + +It started with a word list from +[Wikipedia](https://en.wikipedia.org/wiki/Wikipedia:Lists_of_common_misspellings/For_machines). +Unfortunately, this list had to be highly edited as many of the words are +obsolete or based from mistakes on mechanical typewriters (I'm guessing). + +Additional words were added based on actually mistakes seen in +the wild (meaning self-generated). + +Variations of UK and US spellings are based on many sources including: + +* http://www.tysto.com/uk-us-spelling-list.html (with heavy editing, many are incorrect) +* http://www.oxforddictionaries.com/us/words/american-and-british-spelling-american (excellent site but incomplete) +* Diffing US and UK [scowl dictionaries](http://wordlist.aspell.net) + +American English is more accepting of spelling variations than is British +English, so "what is American or not" is subject to opinion. Corrections and help welcome. + + +### What are some other enhancements that could be done? + +Here's some ideas for enhancements: + +*Capitalization of proper nouns* could be done (e.g. weekday and month names, country names, language names) + +*Opinionated US spellings* US English has a number of words with alternate +spellings. Think [adviser vs. +advisor](http://grammarist.com/spelling/adviser-advisor/). While "advisor" is not wrong, the opinionated US +locale would correct "advisor" to "adviser". + +*Versioning* Some type of versioning is needed so reporting mistakes and errors is easier. + +*Feedback* Mistakes would be sent to some server for agregation and feedback review. + +*Contractions and Apostrophes* This would optionally correct "isnt" to +"isn't", etc. diff --git a/vendor/github.com/golangci/misspell/RELEASE-HOWTO.md b/vendor/github.com/golangci/misspell/RELEASE-HOWTO.md new file mode 100644 index 000000000..55b52d962 --- /dev/null +++ b/vendor/github.com/golangci/misspell/RELEASE-HOWTO.md @@ -0,0 +1,38 @@ +# Release HOWTO + +since I forget. + + +1. Review existing tags and pick new release number + + ```sh + git tag + ``` + +2. Tag locally + + ```sh + git tag -a v0.1.0 -m "First release" + ``` + + If things get screwed up, delete the tag with + + ```sh + git tag -d v0.1.0 + ``` + +3. Test goreleaser + + TODO: how to install goreleaser + + ```sh + ./scripts/goreleaser-dryrun.sh + ``` + +4. Push + + ```bash + git push origin v0.1.0 + ``` + +5. Verify release and edit notes. See https://github.com/client9/misspell/releases diff --git a/vendor/github.com/golangci/misspell/ascii.go b/vendor/github.com/golangci/misspell/ascii.go new file mode 100644 index 000000000..1430718d6 --- /dev/null +++ b/vendor/github.com/golangci/misspell/ascii.go @@ -0,0 +1,62 @@ +package misspell + +// ByteToUpper converts an ascii byte to upper cases +// Uses a branchless algorithm +func ByteToUpper(x byte) byte { + b := byte(0x80) | x + c := b - byte(0x61) + d := ^(b - byte(0x7b)) + e := (c & d) & (^x & 0x7f) + return x - (e >> 2) +} + +// ByteToLower converts an ascii byte to lower case +// uses a branchless algorithm +func ByteToLower(eax byte) byte { + ebx := eax&byte(0x7f) + byte(0x25) + ebx = ebx&byte(0x7f) + byte(0x1a) + ebx = ((ebx & ^eax) >> 2) & byte(0x20) + return eax + ebx +} + +// ByteEqualFold does ascii compare, case insensitive +func ByteEqualFold(a, b byte) bool { + return a == b || ByteToLower(a) == ByteToLower(b) +} + +// StringEqualFold ASCII case-insensitive comparison +// golang toUpper/toLower for both bytes and strings +// appears to be Unicode based which is super slow +// based from https://codereview.appspot.com/5180044/patch/14007/21002 +func StringEqualFold(s1, s2 string) bool { + if len(s1) != len(s2) { + return false + } + for i := 0; i < len(s1); i++ { + c1 := s1[i] + c2 := s2[i] + // c1 & c2 + if c1 != c2 { + c1 |= 'a' - 'A' + c2 |= 'a' - 'A' + if c1 != c2 || c1 < 'a' || c1 > 'z' { + return false + } + } + } + return true +} + +// StringHasPrefixFold is similar to strings.HasPrefix but comparison +// is done ignoring ASCII case. +// / +func StringHasPrefixFold(s1, s2 string) bool { + // prefix is bigger than input --> false + if len(s1) < len(s2) { + return false + } + if len(s1) == len(s2) { + return StringEqualFold(s1, s2) + } + return StringEqualFold(s1[:len(s2)], s2) +} diff --git a/vendor/github.com/golangci/misspell/case.go b/vendor/github.com/golangci/misspell/case.go new file mode 100644 index 000000000..2ea3850df --- /dev/null +++ b/vendor/github.com/golangci/misspell/case.go @@ -0,0 +1,59 @@ +package misspell + +import ( + "strings" +) + +// WordCase is an enum of various word casing styles +type WordCase int + +// Various WordCase types.. likely to be not correct +const ( + CaseUnknown WordCase = iota + CaseLower + CaseUpper + CaseTitle +) + +// CaseStyle returns what case style a word is in +func CaseStyle(word string) WordCase { + upperCount := 0 + lowerCount := 0 + + // this iterates over RUNES not BYTES + for i := 0; i < len(word); i++ { + ch := word[i] + switch { + case ch >= 'a' && ch <= 'z': + lowerCount++ + case ch >= 'A' && ch <= 'Z': + upperCount++ + } + } + + switch { + case upperCount != 0 && lowerCount == 0: + return CaseUpper + case upperCount == 0 && lowerCount != 0: + return CaseLower + case upperCount == 1 && lowerCount > 0 && word[0] >= 'A' && word[0] <= 'Z': + return CaseTitle + } + return CaseUnknown +} + +// CaseVariations returns +// If AllUpper or First-Letter-Only is upcased: add the all upper case version +// If AllLower, add the original, the title and upcase forms +// If Mixed, return the original, and the all upcase form +// +func CaseVariations(word string, style WordCase) []string { + switch style { + case CaseLower: + return []string{word, strings.ToUpper(word[0:1]) + word[1:], strings.ToUpper(word)} + case CaseUpper: + return []string{strings.ToUpper(word)} + default: + return []string{word, strings.ToUpper(word)} + } +} diff --git a/vendor/github.com/golangci/misspell/goreleaser.yml b/vendor/github.com/golangci/misspell/goreleaser.yml new file mode 100644 index 000000000..560cb3810 --- /dev/null +++ b/vendor/github.com/golangci/misspell/goreleaser.yml @@ -0,0 +1,38 @@ +# goreleaser.yml +# https://github.com/goreleaser/goreleaser + +project_name: misspell + +builds: + - + main: cmd/misspell/main.go + binary: misspell + ldflags: -s -w -X main.version={{.Version}} + goos: + - darwin + - linux + - windows + goarch: + - amd64 + env: + - CGO_ENABLED=0 + ignore: + - goos: darwin + goarch: 386 + - goos: windows + goarch: 386 + +archive: + name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + replacements: + amd64: 64bit + 386: 32bit + darwin: mac + files: + - none* + +checksum: + name_template: "{{ .ProjectName }}_{{ .Version }}_checksums.txt" + +snapshot: + name_template: "SNAPSHOT-{{.Commit}}" diff --git a/vendor/github.com/golangci/misspell/install-misspell.sh b/vendor/github.com/golangci/misspell/install-misspell.sh new file mode 100644 index 000000000..e24a84a20 --- /dev/null +++ b/vendor/github.com/golangci/misspell/install-misspell.sh @@ -0,0 +1,362 @@ +#!/bin/sh +set -e +# Code generated by godownloader. DO NOT EDIT. +# + +usage() { + this=$1 + cat </dev/null +} +echoerr() { + echo "$@" 1>&2 +} +log_prefix() { + echo "$0" +} +_logp=6 +log_set_priority() { + _logp="$1" +} +log_priority() { + if test -z "$1"; then + echo "$_logp" + return + fi + [ "$1" -ge "$_logp" ] +} +log_debug() { + log_priority 7 && echoerr "$(log_prefix)" "DEBUG" "$@" +} +log_info() { + log_priority 6 && echoerr "$(log_prefix)" "INFO" "$@" +} +log_err() { + log_priority 3 && echoerr "$(log_prefix)" "ERR" "$@" +} +log_crit() { + log_priority 2 && echoerr "$(log_prefix)" "CRIT" "$@" +} +uname_os() { + os=$(uname -s | tr '[:upper:]' '[:lower:]') + case "$os" in + msys_nt) os="windows" ;; + esac + echo "$os" +} +uname_arch() { + arch=$(uname -m) + case $arch in + x86_64) arch="amd64" ;; + x86) arch="386" ;; + i686) arch="386" ;; + i386) arch="386" ;; + aarch64) arch="arm64" ;; + armv5*) arch="arm5" ;; + armv6*) arch="arm6" ;; + armv7*) arch="arm7" ;; + esac + echo ${arch} +} +uname_os_check() { + os=$(uname_os) + case "$os" in + darwin) return 0 ;; + dragonfly) return 0 ;; + freebsd) return 0 ;; + linux) return 0 ;; + android) return 0 ;; + nacl) return 0 ;; + netbsd) return 0 ;; + openbsd) return 0 ;; + plan9) return 0 ;; + solaris) return 0 ;; + windows) return 0 ;; + esac + log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib" + return 1 +} +uname_arch_check() { + arch=$(uname_arch) + case "$arch" in + 386) return 0 ;; + amd64) return 0 ;; + arm64) return 0 ;; + armv5) return 0 ;; + armv6) return 0 ;; + armv7) return 0 ;; + ppc64) return 0 ;; + ppc64le) return 0 ;; + mips) return 0 ;; + mipsle) return 0 ;; + mips64) return 0 ;; + mips64le) return 0 ;; + s390x) return 0 ;; + amd64p32) return 0 ;; + esac + log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib" + return 1 +} +untar() { + tarball=$1 + case "${tarball}" in + *.tar.gz | *.tgz) tar -xzf "${tarball}" ;; + *.tar) tar -xf "${tarball}" ;; + *.zip) unzip "${tarball}" ;; + *) + log_err "untar unknown archive format for ${tarball}" + return 1 + ;; + esac +} +mktmpdir() { + test -z "$TMPDIR" && TMPDIR="$(mktemp -d)" + mkdir -p "${TMPDIR}" + echo "${TMPDIR}" +} +http_download() { + local_file=$1 + source_url=$2 + header=$3 + headerflag='' + destflag='' + if is_command curl; then + cmd='curl --fail -sSL' + destflag='-o' + headerflag='-H' + elif is_command wget; then + cmd='wget -q' + destflag='-O' + headerflag='--header' + else + log_crit "http_download unable to find wget or curl" + return 1 + fi + if [ -z "$header" ]; then + $cmd $destflag "$local_file" "$source_url" + else + $cmd $headerflag "$header" $destflag "$local_file" "$source_url" + fi +} +github_api() { + local_file=$1 + source_url=$2 + header="" + case "$source_url" in + https://api.github.com*) + test -z "$GITHUB_TOKEN" || header="Authorization: token $GITHUB_TOKEN" + ;; + esac + http_download "$local_file" "$source_url" "$header" +} +github_last_release() { + owner_repo=$1 + version=$2 + test -z "$version" && version="latest" + giturl="https://github.com/${owner_repo}/releases/${version}" + json=$(http_download "-" "$giturl" "Accept:application/json") + version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//') + test -z "$version" && return 1 + echo "$version" +} +hash_sha256() { + TARGET=${1:-/dev/stdin} + if is_command gsha256sum; then + hash=$(gsha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command sha256sum; then + hash=$(sha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command shasum; then + hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command openssl; then + hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f a + else + log_crit "hash_sha256 unable to find command to compute sha-256 hash" + return 1 + fi +} +hash_sha256_verify() { + TARGET=$1 + checksums=$2 + if [ -z "$checksums" ]; then + log_err "hash_sha256_verify checksum file not specified in arg2" + return 1 + fi + BASENAME=${TARGET##*/} + want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1) + if [ -z "$want" ]; then + log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'" + return 1 + fi + got=$(hash_sha256 "$TARGET") + if [ "$want" != "$got" ]; then + log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got" + return 1 + fi +} +cat /dev/null < 50000 { + fin, err := os.Open(filename) + if err != nil { + return "", fmt.Errorf("Unable to open large file %q: %s", filename, err) + } + defer fin.Close() + buf := make([]byte, 512) + _, err = io.ReadFull(fin, buf) + if err != nil { + return "", fmt.Errorf("Unable to read 512 bytes from %q: %s", filename, err) + } + if !isTextFile(buf) { + return "", nil + } + + // set so we don't double check this file + isText = true + } + + // read in whole file + raw, err := ioutil.ReadFile(filename) + if err != nil { + return "", fmt.Errorf("Unable to read all %q: %s", filename, err) + } + + if !isText && !isTextFile(raw) { + return "", nil + } + return string(raw), nil +} diff --git a/vendor/github.com/golangci/misspell/notwords.go b/vendor/github.com/golangci/misspell/notwords.go new file mode 100644 index 000000000..06d0d5a5a --- /dev/null +++ b/vendor/github.com/golangci/misspell/notwords.go @@ -0,0 +1,85 @@ +package misspell + +import ( + "bytes" + "regexp" + "strings" +) + +var ( + reEmail = regexp.MustCompile(`[a-zA-Z0-9_.%+-]+@[a-zA-Z0-9-.]+\.[a-zA-Z]{2,6}[^a-zA-Z]`) + reHost = regexp.MustCompile(`[a-zA-Z0-9-.]+\.[a-zA-Z]+`) + reBackslash = regexp.MustCompile(`\\[a-z]`) +) + +// RemovePath attempts to strip away embedded file system paths, e.g. +// /foo/bar or /static/myimg.png +// +// TODO: windows style +// +func RemovePath(s string) string { + out := bytes.Buffer{} + var idx int + for len(s) > 0 { + if idx = strings.IndexByte(s, '/'); idx == -1 { + out.WriteString(s) + break + } + + if idx > 0 { + idx-- + } + + var chclass string + switch s[idx] { + case '/', ' ', '\n', '\t', '\r': + chclass = " \n\r\t" + case '[': + chclass = "]\n" + case '(': + chclass = ")\n" + default: + out.WriteString(s[:idx+2]) + s = s[idx+2:] + continue + } + + endx := strings.IndexAny(s[idx+1:], chclass) + if endx != -1 { + out.WriteString(s[:idx+1]) + out.Write(bytes.Repeat([]byte{' '}, endx)) + s = s[idx+endx+1:] + } else { + out.WriteString(s) + break + } + } + return out.String() +} + +// replaceWithBlanks returns a string with the same number of spaces as the input +func replaceWithBlanks(s string) string { + return strings.Repeat(" ", len(s)) +} + +// RemoveEmail remove email-like strings, e.g. "nickg+junk@xfoobar.com", "nickg@xyz.abc123.biz" +func RemoveEmail(s string) string { + return reEmail.ReplaceAllStringFunc(s, replaceWithBlanks) +} + +// RemoveHost removes host-like strings "foobar.com" "abc123.fo1231.biz" +func RemoveHost(s string) string { + return reHost.ReplaceAllStringFunc(s, replaceWithBlanks) +} + +// RemoveBackslashEscapes removes characters that are preceeded by a backslash +// commonly found in printf format stringd "\nto" +func removeBackslashEscapes(s string) string { + return reBackslash.ReplaceAllStringFunc(s, replaceWithBlanks) +} + +// RemoveNotWords blanks out all the not words +func RemoveNotWords(s string) string { + // do most selective/specific first + return removeBackslashEscapes(RemoveHost(RemoveEmail(RemovePath(StripURL(s))))) +} diff --git a/vendor/github.com/golangci/misspell/replace.go b/vendor/github.com/golangci/misspell/replace.go new file mode 100644 index 000000000..a99bbcc58 --- /dev/null +++ b/vendor/github.com/golangci/misspell/replace.go @@ -0,0 +1,246 @@ +package misspell + +import ( + "bufio" + "bytes" + "io" + "regexp" + "strings" + "text/scanner" +) + +func max(x, y int) int { + if x > y { + return x + } + return y +} + +func inArray(haystack []string, needle string) bool { + for _, word := range haystack { + if needle == word { + return true + } + } + return false +} + +var wordRegexp = regexp.MustCompile(`[a-zA-Z0-9']+`) + +// Diff is datastructure showing what changed in a single line +type Diff struct { + Filename string + FullLine string + Line int + Column int + Original string + Corrected string +} + +// Replacer is the main struct for spelling correction +type Replacer struct { + Replacements []string + Debug bool + engine *StringReplacer + corrected map[string]string +} + +// New creates a new default Replacer using the main rule list +func New() *Replacer { + r := Replacer{ + Replacements: DictMain, + } + r.Compile() + return &r +} + +// RemoveRule deletes existings rules. +// TODO: make inplace to save memory +func (r *Replacer) RemoveRule(ignore []string) { + newwords := make([]string, 0, len(r.Replacements)) + for i := 0; i < len(r.Replacements); i += 2 { + if inArray(ignore, r.Replacements[i]) { + continue + } + newwords = append(newwords, r.Replacements[i:i+2]...) + } + r.engine = nil + r.Replacements = newwords +} + +// AddRuleList appends new rules. +// Input is in the same form as Strings.Replacer: [ old1, new1, old2, new2, ....] +// Note: does not check for duplictes +func (r *Replacer) AddRuleList(additions []string) { + r.engine = nil + r.Replacements = append(r.Replacements, additions...) +} + +// Compile compiles the rules. Required before using the Replace functions +func (r *Replacer) Compile() { + + r.corrected = make(map[string]string, len(r.Replacements)/2) + for i := 0; i < len(r.Replacements); i += 2 { + r.corrected[r.Replacements[i]] = r.Replacements[i+1] + } + r.engine = NewStringReplacer(r.Replacements...) +} + +/* +line1 and line2 are different +extract words from each line1 + +replace word -> newword +if word == new-word + continue +if new-word in list of replacements + continue +new word not original, and not in list of replacements + some substring got mixed up. UNdo +*/ +func (r *Replacer) recheckLine(s string, lineNum int, buf io.Writer, next func(Diff)) { + first := 0 + redacted := RemoveNotWords(s) + + idx := wordRegexp.FindAllStringIndex(redacted, -1) + for _, ab := range idx { + word := s[ab[0]:ab[1]] + newword := r.engine.Replace(word) + if newword == word { + // no replacement done + continue + } + + // ignore camelCase words + // https://github.com/client9/misspell/issues/113 + if CaseStyle(word) == CaseUnknown { + continue + } + + if StringEqualFold(r.corrected[strings.ToLower(word)], newword) { + // word got corrected into something we know + io.WriteString(buf, s[first:ab[0]]) + io.WriteString(buf, newword) + first = ab[1] + next(Diff{ + FullLine: s, + Line: lineNum, + Original: word, + Corrected: newword, + Column: ab[0], + }) + continue + } + // Word got corrected into something unknown. Ignore it + } + io.WriteString(buf, s[first:]) +} + +// ReplaceGo is a specialized routine for correcting Golang source +// files. Currently only checks comments, not identifiers for +// spelling. +func (r *Replacer) ReplaceGo(input string) (string, []Diff) { + var s scanner.Scanner + s.Init(strings.NewReader(input)) + s.Mode = scanner.ScanIdents | scanner.ScanFloats | scanner.ScanChars | scanner.ScanStrings | scanner.ScanRawStrings | scanner.ScanComments + lastPos := 0 + output := "" +Loop: + for { + switch s.Scan() { + case scanner.Comment: + origComment := s.TokenText() + newComment := r.engine.Replace(origComment) + + if origComment != newComment { + // s.Pos().Offset is the end of the current token + // subtract len(origComment) to get the start of the token + offset := s.Pos().Offset + output = output + input[lastPos:offset-len(origComment)] + newComment + lastPos = offset + } + case scanner.EOF: + break Loop + } + } + + if lastPos == 0 { + // no changes, no copies + return input, nil + } + if lastPos < len(input) { + output = output + input[lastPos:] + } + diffs := make([]Diff, 0, 8) + buf := bytes.NewBuffer(make([]byte, 0, max(len(input), len(output))+100)) + // faster that making a bytes.Buffer and bufio.ReadString + outlines := strings.SplitAfter(output, "\n") + inlines := strings.SplitAfter(input, "\n") + for i := 0; i < len(inlines); i++ { + if inlines[i] == outlines[i] { + buf.WriteString(outlines[i]) + continue + } + r.recheckLine(inlines[i], i+1, buf, func(d Diff) { + diffs = append(diffs, d) + }) + } + + return buf.String(), diffs + +} + +// Replace is corrects misspellings in input, returning corrected version +// along with a list of diffs. +func (r *Replacer) Replace(input string) (string, []Diff) { + output := r.engine.Replace(input) + if input == output { + return input, nil + } + diffs := make([]Diff, 0, 8) + buf := bytes.NewBuffer(make([]byte, 0, max(len(input), len(output))+100)) + // faster that making a bytes.Buffer and bufio.ReadString + outlines := strings.SplitAfter(output, "\n") + inlines := strings.SplitAfter(input, "\n") + for i := 0; i < len(inlines); i++ { + if inlines[i] == outlines[i] { + buf.WriteString(outlines[i]) + continue + } + r.recheckLine(inlines[i], i+1, buf, func(d Diff) { + diffs = append(diffs, d) + }) + } + + return buf.String(), diffs +} + +// ReplaceReader applies spelling corrections to a reader stream. Diffs are +// emitted through a callback. +func (r *Replacer) ReplaceReader(raw io.Reader, w io.Writer, next func(Diff)) error { + var ( + err error + line string + lineNum int + ) + reader := bufio.NewReader(raw) + for err == nil { + lineNum++ + line, err = reader.ReadString('\n') + + // if it's EOF, then line has the last line + // don't like the check of err here and + // in for loop + if err != nil && err != io.EOF { + return err + } + // easily 5x faster than regexp+map + if line == r.engine.Replace(line) { + io.WriteString(w, line) + continue + } + // but it can be inaccurate, so we need to double check + r.recheckLine(line, lineNum, w, next) + } + return nil +} diff --git a/vendor/github.com/golangci/misspell/stringreplacer.go b/vendor/github.com/golangci/misspell/stringreplacer.go new file mode 100644 index 000000000..3151eceb7 --- /dev/null +++ b/vendor/github.com/golangci/misspell/stringreplacer.go @@ -0,0 +1,336 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package misspell + +import ( + "io" + // "log" + "strings" +) + +// StringReplacer replaces a list of strings with replacements. +// It is safe for concurrent use by multiple goroutines. +type StringReplacer struct { + r replacer +} + +// replacer is the interface that a replacement algorithm needs to implement. +type replacer interface { + Replace(s string) string + WriteString(w io.Writer, s string) (n int, err error) +} + +// NewStringReplacer returns a new Replacer from a list of old, new string pairs. +// Replacements are performed in order, without overlapping matches. +func NewStringReplacer(oldnew ...string) *StringReplacer { + if len(oldnew)%2 == 1 { + panic("strings.NewReplacer: odd argument count") + } + + return &StringReplacer{r: makeGenericReplacer(oldnew)} +} + +// Replace returns a copy of s with all replacements performed. +func (r *StringReplacer) Replace(s string) string { + return r.r.Replace(s) +} + +// WriteString writes s to w with all replacements performed. +func (r *StringReplacer) WriteString(w io.Writer, s string) (n int, err error) { + return r.r.WriteString(w, s) +} + +// trieNode is a node in a lookup trie for prioritized key/value pairs. Keys +// and values may be empty. For example, the trie containing keys "ax", "ay", +// "bcbc", "x" and "xy" could have eight nodes: +// +// n0 - +// n1 a- +// n2 .x+ +// n3 .y+ +// n4 b- +// n5 .cbc+ +// n6 x+ +// n7 .y+ +// +// n0 is the root node, and its children are n1, n4 and n6; n1's children are +// n2 and n3; n4's child is n5; n6's child is n7. Nodes n0, n1 and n4 (marked +// with a trailing "-") are partial keys, and nodes n2, n3, n5, n6 and n7 +// (marked with a trailing "+") are complete keys. +type trieNode struct { + // value is the value of the trie node's key/value pair. It is empty if + // this node is not a complete key. + value string + // priority is the priority (higher is more important) of the trie node's + // key/value pair; keys are not necessarily matched shortest- or longest- + // first. Priority is positive if this node is a complete key, and zero + // otherwise. In the example above, positive/zero priorities are marked + // with a trailing "+" or "-". + priority int + + // A trie node may have zero, one or more child nodes: + // * if the remaining fields are zero, there are no children. + // * if prefix and next are non-zero, there is one child in next. + // * if table is non-zero, it defines all the children. + // + // Prefixes are preferred over tables when there is one child, but the + // root node always uses a table for lookup efficiency. + + // prefix is the difference in keys between this trie node and the next. + // In the example above, node n4 has prefix "cbc" and n4's next node is n5. + // Node n5 has no children and so has zero prefix, next and table fields. + prefix string + next *trieNode + + // table is a lookup table indexed by the next byte in the key, after + // remapping that byte through genericReplacer.mapping to create a dense + // index. In the example above, the keys only use 'a', 'b', 'c', 'x' and + // 'y', which remap to 0, 1, 2, 3 and 4. All other bytes remap to 5, and + // genericReplacer.tableSize will be 5. Node n0's table will be + // []*trieNode{ 0:n1, 1:n4, 3:n6 }, where the 0, 1 and 3 are the remapped + // 'a', 'b' and 'x'. + table []*trieNode +} + +func (t *trieNode) add(key, val string, priority int, r *genericReplacer) { + if key == "" { + if t.priority == 0 { + t.value = val + t.priority = priority + } + return + } + + if t.prefix != "" { + // Need to split the prefix among multiple nodes. + var n int // length of the longest common prefix + for ; n < len(t.prefix) && n < len(key); n++ { + if t.prefix[n] != key[n] { + break + } + } + if n == len(t.prefix) { + t.next.add(key[n:], val, priority, r) + } else if n == 0 { + // First byte differs, start a new lookup table here. Looking up + // what is currently t.prefix[0] will lead to prefixNode, and + // looking up key[0] will lead to keyNode. + var prefixNode *trieNode + if len(t.prefix) == 1 { + prefixNode = t.next + } else { + prefixNode = &trieNode{ + prefix: t.prefix[1:], + next: t.next, + } + } + keyNode := new(trieNode) + t.table = make([]*trieNode, r.tableSize) + t.table[r.mapping[t.prefix[0]]] = prefixNode + t.table[r.mapping[key[0]]] = keyNode + t.prefix = "" + t.next = nil + keyNode.add(key[1:], val, priority, r) + } else { + // Insert new node after the common section of the prefix. + next := &trieNode{ + prefix: t.prefix[n:], + next: t.next, + } + t.prefix = t.prefix[:n] + t.next = next + next.add(key[n:], val, priority, r) + } + } else if t.table != nil { + // Insert into existing table. + m := r.mapping[key[0]] + if t.table[m] == nil { + t.table[m] = new(trieNode) + } + t.table[m].add(key[1:], val, priority, r) + } else { + t.prefix = key + t.next = new(trieNode) + t.next.add("", val, priority, r) + } +} + +func (r *genericReplacer) lookup(s string, ignoreRoot bool) (val string, keylen int, found bool) { + // Iterate down the trie to the end, and grab the value and keylen with + // the highest priority. + bestPriority := 0 + node := &r.root + n := 0 + for node != nil { + if node.priority > bestPriority && !(ignoreRoot && node == &r.root) { + bestPriority = node.priority + val = node.value + keylen = n + found = true + } + + if s == "" { + break + } + if node.table != nil { + index := r.mapping[ByteToLower(s[0])] + if int(index) == r.tableSize { + break + } + node = node.table[index] + s = s[1:] + n++ + } else if node.prefix != "" && StringHasPrefixFold(s, node.prefix) { + n += len(node.prefix) + s = s[len(node.prefix):] + node = node.next + } else { + break + } + } + return +} + +// genericReplacer is the fully generic algorithm. +// It's used as a fallback when nothing faster can be used. +type genericReplacer struct { + root trieNode + // tableSize is the size of a trie node's lookup table. It is the number + // of unique key bytes. + tableSize int + // mapping maps from key bytes to a dense index for trieNode.table. + mapping [256]byte +} + +func makeGenericReplacer(oldnew []string) *genericReplacer { + r := new(genericReplacer) + // Find each byte used, then assign them each an index. + for i := 0; i < len(oldnew); i += 2 { + key := strings.ToLower(oldnew[i]) + for j := 0; j < len(key); j++ { + r.mapping[key[j]] = 1 + } + } + + for _, b := range r.mapping { + r.tableSize += int(b) + } + + var index byte + for i, b := range r.mapping { + if b == 0 { + r.mapping[i] = byte(r.tableSize) + } else { + r.mapping[i] = index + index++ + } + } + // Ensure root node uses a lookup table (for performance). + r.root.table = make([]*trieNode, r.tableSize) + + for i := 0; i < len(oldnew); i += 2 { + r.root.add(strings.ToLower(oldnew[i]), oldnew[i+1], len(oldnew)-i, r) + } + return r +} + +type appendSliceWriter []byte + +// Write writes to the buffer to satisfy io.Writer. +func (w *appendSliceWriter) Write(p []byte) (int, error) { + *w = append(*w, p...) + return len(p), nil +} + +// WriteString writes to the buffer without string->[]byte->string allocations. +func (w *appendSliceWriter) WriteString(s string) (int, error) { + *w = append(*w, s...) + return len(s), nil +} + +type stringWriterIface interface { + WriteString(string) (int, error) +} + +type stringWriter struct { + w io.Writer +} + +func (w stringWriter) WriteString(s string) (int, error) { + return w.w.Write([]byte(s)) +} + +func getStringWriter(w io.Writer) stringWriterIface { + sw, ok := w.(stringWriterIface) + if !ok { + sw = stringWriter{w} + } + return sw +} + +func (r *genericReplacer) Replace(s string) string { + buf := make(appendSliceWriter, 0, len(s)) + r.WriteString(&buf, s) + return string(buf) +} + +func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) { + sw := getStringWriter(w) + var last, wn int + var prevMatchEmpty bool + for i := 0; i <= len(s); { + // Fast path: s[i] is not a prefix of any pattern. + if i != len(s) && r.root.priority == 0 { + index := int(r.mapping[ByteToLower(s[i])]) + if index == r.tableSize || r.root.table[index] == nil { + i++ + continue + } + } + + // Ignore the empty match iff the previous loop found the empty match. + val, keylen, match := r.lookup(s[i:], prevMatchEmpty) + prevMatchEmpty = match && keylen == 0 + if match { + orig := s[i : i+keylen] + switch CaseStyle(orig) { + case CaseUnknown: + // pretend we didn't match + // i++ + // continue + case CaseUpper: + val = strings.ToUpper(val) + case CaseLower: + val = strings.ToLower(val) + case CaseTitle: + if len(val) < 2 { + val = strings.ToUpper(val) + } else { + val = strings.ToUpper(val[:1]) + strings.ToLower(val[1:]) + } + } + wn, err = sw.WriteString(s[last:i]) + n += wn + if err != nil { + return + } + //log.Printf("%d: Going to correct %q with %q", i, s[i:i+keylen], val) + wn, err = sw.WriteString(val) + n += wn + if err != nil { + return + } + i += keylen + last = i + continue + } + i++ + } + if last != len(s) { + wn, err = sw.WriteString(s[last:]) + n += wn + } + return +} diff --git a/vendor/github.com/golangci/misspell/stringreplacer_test.gox b/vendor/github.com/golangci/misspell/stringreplacer_test.gox new file mode 100644 index 000000000..70da997f6 --- /dev/null +++ b/vendor/github.com/golangci/misspell/stringreplacer_test.gox @@ -0,0 +1,421 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package misspell_test + +import ( + "bytes" + "fmt" + "strings" + "testing" + + . "github.com/client9/misspell" +) + +var htmlEscaper = NewStringReplacer( + "&", "&", + "<", "<", + ">", ">", + `"`, """, + "'", "'", +) + +var htmlUnescaper = NewStringReplacer( + "&", "&", + "<", "<", + ">", ">", + """, `"`, + "'", "'", +) + +// The http package's old HTML escaping function. +func oldHTMLEscape(s string) string { + s = strings.Replace(s, "&", "&", -1) + s = strings.Replace(s, "<", "<", -1) + s = strings.Replace(s, ">", ">", -1) + s = strings.Replace(s, `"`, """, -1) + s = strings.Replace(s, "'", "'", -1) + return s +} + +var capitalLetters = NewStringReplacer("a", "A", "b", "B") + +// TestReplacer tests the replacer implementations. +func TestReplacer(t *testing.T) { + type testCase struct { + r *StringReplacer + in, out string + } + var testCases []testCase + + // str converts 0xff to "\xff". This isn't just string(b) since that converts to UTF-8. + str := func(b byte) string { + return string([]byte{b}) + } + var s []string + + // inc maps "\x00"->"\x01", ..., "a"->"b", "b"->"c", ..., "\xff"->"\x00". + for i := 0; i < 256; i++ { + s = append(s, str(byte(i)), str(byte(i+1))) + } + inc := NewStringReplacer(s...) + + // Test cases with 1-byte old strings, 1-byte new strings. + testCases = append(testCases, + testCase{capitalLetters, "brad", "BrAd"}, + testCase{capitalLetters, strings.Repeat("a", (32<<10)+123), strings.Repeat("A", (32<<10)+123)}, + testCase{capitalLetters, "", ""}, + + testCase{inc, "brad", "csbe"}, + testCase{inc, "\x00\xff", "\x01\x00"}, + testCase{inc, "", ""}, + + testCase{NewStringReplacer("a", "1", "a", "2"), "brad", "br1d"}, + ) + + // repeat maps "a"->"a", "b"->"bb", "c"->"ccc", ... + s = nil + for i := 0; i < 256; i++ { + n := i + 1 - 'a' + if n < 1 { + n = 1 + } + s = append(s, str(byte(i)), strings.Repeat(str(byte(i)), n)) + } + repeat := NewStringReplacer(s...) + + // Test cases with 1-byte old strings, variable length new strings. + testCases = append(testCases, + testCase{htmlEscaper, "No changes", "No changes"}, + testCase{htmlEscaper, "I <3 escaping & stuff", "I <3 escaping & stuff"}, + testCase{htmlEscaper, "&&&", "&&&"}, + testCase{htmlEscaper, "", ""}, + + testCase{repeat, "brad", "bbrrrrrrrrrrrrrrrrrradddd"}, + testCase{repeat, "abba", "abbbba"}, + testCase{repeat, "", ""}, + + testCase{NewStringReplacer("a", "11", "a", "22"), "brad", "br11d"}, + ) + + // The remaining test cases have variable length old strings. + + testCases = append(testCases, + testCase{htmlUnescaper, "&amp;", "&"}, + testCase{htmlUnescaper, "<b>HTML's neat</b>", "HTML's neat"}, + testCase{htmlUnescaper, "", ""}, + + testCase{NewStringReplacer("a", "1", "a", "2", "xxx", "xxx"), "brad", "br1d"}, + + testCase{NewStringReplacer("a", "1", "aa", "2", "aaa", "3"), "aaaa", "1111"}, + + testCase{NewStringReplacer("aaa", "3", "aa", "2", "a", "1"), "aaaa", "31"}, + ) + + // gen1 has multiple old strings of variable length. There is no + // overall non-empty common prefix, but some pairwise common prefixes. + gen1 := NewStringReplacer( + "aaa", "3[aaa]", + "aa", "2[aa]", + "a", "1[a]", + "i", "i", + "longerst", "most long", + "longer", "medium", + "long", "short", + "xx", "xx", + "x", "X", + "X", "Y", + "Y", "Z", + ) + testCases = append(testCases, + testCase{gen1, "fooaaabar", "foo3[aaa]b1[a]r"}, + testCase{gen1, "long, longerst, longer", "short, most long, medium"}, + testCase{gen1, "xxxxx", "xxxxX"}, + testCase{gen1, "XiX", "YiY"}, + testCase{gen1, "", ""}, + ) + + // gen2 has multiple old strings with no pairwise common prefix. + gen2 := NewStringReplacer( + "roses", "red", + "violets", "blue", + "sugar", "sweet", + ) + testCases = append(testCases, + testCase{gen2, "roses are red, violets are blue...", "red are red, blue are blue..."}, + testCase{gen2, "", ""}, + ) + + // gen3 has multiple old strings with an overall common prefix. + gen3 := NewStringReplacer( + "abracadabra", "poof", + "abracadabrakazam", "splat", + "abraham", "lincoln", + "abrasion", "scrape", + "abraham", "isaac", + ) + testCases = append(testCases, + testCase{gen3, "abracadabrakazam abraham", "poofkazam lincoln"}, + testCase{gen3, "abrasion abracad", "scrape abracad"}, + testCase{gen3, "abba abram abrasive", "abba abram abrasive"}, + testCase{gen3, "", ""}, + ) + + // foo{1,2,3,4} have multiple old strings with an overall common prefix + // and 1- or 2- byte extensions from the common prefix. + foo1 := NewStringReplacer( + "foo1", "A", + "foo2", "B", + "foo3", "C", + ) + foo2 := NewStringReplacer( + "foo1", "A", + "foo2", "B", + "foo31", "C", + "foo32", "D", + ) + foo3 := NewStringReplacer( + "foo11", "A", + "foo12", "B", + "foo31", "C", + "foo32", "D", + ) + foo4 := NewStringReplacer( + "foo12", "B", + "foo32", "D", + ) + testCases = append(testCases, + testCase{foo1, "fofoofoo12foo32oo", "fofooA2C2oo"}, + testCase{foo1, "", ""}, + + testCase{foo2, "fofoofoo12foo32oo", "fofooA2Doo"}, + testCase{foo2, "", ""}, + + testCase{foo3, "fofoofoo12foo32oo", "fofooBDoo"}, + testCase{foo3, "", ""}, + + testCase{foo4, "fofoofoo12foo32oo", "fofooBDoo"}, + testCase{foo4, "", ""}, + ) + + // genAll maps "\x00\x01\x02...\xfe\xff" to "[all]", amongst other things. + allBytes := make([]byte, 256) + for i := range allBytes { + allBytes[i] = byte(i) + } + allString := string(allBytes) + genAll := NewStringReplacer( + allString, "[all]", + "\xff", "[ff]", + "\x00", "[00]", + ) + testCases = append(testCases, + testCase{genAll, allString, "[all]"}, + testCase{genAll, "a\xff" + allString + "\x00", "a[ff][all][00]"}, + testCase{genAll, "", ""}, + ) + + // Test cases with empty old strings. + + blankToX1 := NewStringReplacer("", "X") + blankToX2 := NewStringReplacer("", "X", "", "") + blankHighPriority := NewStringReplacer("", "X", "o", "O") + blankLowPriority := NewStringReplacer("o", "O", "", "X") + blankNoOp1 := NewStringReplacer("", "") + blankNoOp2 := NewStringReplacer("", "", "", "A") + blankFoo := NewStringReplacer("", "X", "foobar", "R", "foobaz", "Z") + testCases = append(testCases, + testCase{blankToX1, "foo", "XfXoXoX"}, + testCase{blankToX1, "", "X"}, + + testCase{blankToX2, "foo", "XfXoXoX"}, + testCase{blankToX2, "", "X"}, + + testCase{blankHighPriority, "oo", "XOXOX"}, + testCase{blankHighPriority, "ii", "XiXiX"}, + testCase{blankHighPriority, "oiio", "XOXiXiXOX"}, + testCase{blankHighPriority, "iooi", "XiXOXOXiX"}, + testCase{blankHighPriority, "", "X"}, + + testCase{blankLowPriority, "oo", "OOX"}, + testCase{blankLowPriority, "ii", "XiXiX"}, + testCase{blankLowPriority, "oiio", "OXiXiOX"}, + testCase{blankLowPriority, "iooi", "XiOOXiX"}, + testCase{blankLowPriority, "", "X"}, + + testCase{blankNoOp1, "foo", "foo"}, + testCase{blankNoOp1, "", ""}, + + testCase{blankNoOp2, "foo", "foo"}, + testCase{blankNoOp2, "", ""}, + + testCase{blankFoo, "foobarfoobaz", "XRXZX"}, + testCase{blankFoo, "foobar-foobaz", "XRX-XZX"}, + testCase{blankFoo, "", "X"}, + ) + + // single string replacer + + abcMatcher := NewStringReplacer("abc", "[match]") + + testCases = append(testCases, + testCase{abcMatcher, "", ""}, + testCase{abcMatcher, "ab", "ab"}, + testCase{abcMatcher, "abc", "[match]"}, + testCase{abcMatcher, "abcd", "[match]d"}, + testCase{abcMatcher, "cabcabcdabca", "c[match][match]d[match]a"}, + ) + + // Issue 6659 cases (more single string replacer) + + noHello := NewStringReplacer("Hello", "") + testCases = append(testCases, + testCase{noHello, "Hello", ""}, + testCase{noHello, "Hellox", "x"}, + testCase{noHello, "xHello", "x"}, + testCase{noHello, "xHellox", "xx"}, + ) + + // No-arg test cases. + + nop := NewStringReplacer() + testCases = append(testCases, + testCase{nop, "abc", "abc"}, + testCase{nop, "", ""}, + ) + + // Run the test cases. + + for i, tc := range testCases { + if s := tc.r.Replace(tc.in); s != tc.out { + t.Errorf("%d. strings.Replace(%q) = %q, want %q", i, tc.in, s, tc.out) + } + var buf bytes.Buffer + n, err := tc.r.WriteString(&buf, tc.in) + if err != nil { + t.Errorf("%d. WriteString: %v", i, err) + continue + } + got := buf.String() + if got != tc.out { + t.Errorf("%d. WriteString(%q) wrote %q, want %q", i, tc.in, got, tc.out) + continue + } + if n != len(tc.out) { + t.Errorf("%d. WriteString(%q) wrote correct string but reported %d bytes; want %d (%q)", + i, tc.in, n, len(tc.out), tc.out) + } + } +} + +type errWriter struct{} + +func (errWriter) Write(p []byte) (n int, err error) { + return 0, fmt.Errorf("unwritable") +} + +func BenchmarkGenericNoMatch(b *testing.B) { + str := strings.Repeat("A", 100) + strings.Repeat("B", 100) + generic := NewStringReplacer("a", "A", "b", "B", "12", "123") // varying lengths forces generic + for i := 0; i < b.N; i++ { + generic.Replace(str) + } +} + +func BenchmarkGenericMatch1(b *testing.B) { + str := strings.Repeat("a", 100) + strings.Repeat("b", 100) + generic := NewStringReplacer("a", "A", "b", "B", "12", "123") + for i := 0; i < b.N; i++ { + generic.Replace(str) + } +} + +func BenchmarkGenericMatch2(b *testing.B) { + str := strings.Repeat("It's <b>HTML</b>!", 100) + for i := 0; i < b.N; i++ { + htmlUnescaper.Replace(str) + } +} + +func benchmarkSingleString(b *testing.B, pattern, text string) { + r := NewStringReplacer(pattern, "[match]") + b.SetBytes(int64(len(text))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + r.Replace(text) + } +} + +func BenchmarkSingleMaxSkipping(b *testing.B) { + benchmarkSingleString(b, strings.Repeat("b", 25), strings.Repeat("a", 10000)) +} + +func BenchmarkSingleLongSuffixFail(b *testing.B) { + benchmarkSingleString(b, "b"+strings.Repeat("a", 500), strings.Repeat("a", 1002)) +} + +func BenchmarkSingleMatch(b *testing.B) { + benchmarkSingleString(b, "abcdef", strings.Repeat("abcdefghijklmno", 1000)) +} + +func BenchmarkByteByteNoMatch(b *testing.B) { + str := strings.Repeat("A", 100) + strings.Repeat("B", 100) + for i := 0; i < b.N; i++ { + capitalLetters.Replace(str) + } +} + +func BenchmarkByteByteMatch(b *testing.B) { + str := strings.Repeat("a", 100) + strings.Repeat("b", 100) + for i := 0; i < b.N; i++ { + capitalLetters.Replace(str) + } +} + +func BenchmarkByteStringMatch(b *testing.B) { + str := "<" + strings.Repeat("a", 99) + strings.Repeat("b", 99) + ">" + for i := 0; i < b.N; i++ { + htmlEscaper.Replace(str) + } +} + +func BenchmarkHTMLEscapeNew(b *testing.B) { + str := "I <3 to escape HTML & other text too." + for i := 0; i < b.N; i++ { + htmlEscaper.Replace(str) + } +} + +func BenchmarkHTMLEscapeOld(b *testing.B) { + str := "I <3 to escape HTML & other text too." + for i := 0; i < b.N; i++ { + oldHTMLEscape(str) + } +} + +func BenchmarkByteStringReplacerWriteString(b *testing.B) { + str := strings.Repeat("I <3 to escape HTML & other text too.", 100) + buf := new(bytes.Buffer) + for i := 0; i < b.N; i++ { + htmlEscaper.WriteString(buf, str) + buf.Reset() + } +} + +func BenchmarkByteReplacerWriteString(b *testing.B) { + str := strings.Repeat("abcdefghijklmnopqrstuvwxyz", 100) + buf := new(bytes.Buffer) + for i := 0; i < b.N; i++ { + capitalLetters.WriteString(buf, str) + buf.Reset() + } +} + +// BenchmarkByteByteReplaces compares byteByteImpl against multiple Replaces. +func BenchmarkByteByteReplaces(b *testing.B) { + str := strings.Repeat("a", 100) + strings.Repeat("b", 100) + for i := 0; i < b.N; i++ { + strings.Replace(strings.Replace(str, "a", "A", -1), "b", "B", -1) + } +} diff --git a/vendor/github.com/golangci/misspell/url.go b/vendor/github.com/golangci/misspell/url.go new file mode 100644 index 000000000..1a259f5f9 --- /dev/null +++ b/vendor/github.com/golangci/misspell/url.go @@ -0,0 +1,17 @@ +package misspell + +import ( + "regexp" +) + +// Regexp for URL https://mathiasbynens.be/demo/url-regex +// +// original @imme_emosol (54 chars) has trouble with dashes in hostname +// @(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS +var reURL = regexp.MustCompile(`(?i)(https?|ftp)://(-\.)?([^\s/?\.#]+\.?)+(/[^\s]*)?`) + +// StripURL attemps to replace URLs with blank spaces, e.g. +// "xxx http://foo.com/ yyy -> "xxx yyyy" +func StripURL(s string) string { + return reURL.ReplaceAllStringFunc(s, replaceWithBlanks) +} diff --git a/vendor/github.com/golangci/misspell/words.go b/vendor/github.com/golangci/misspell/words.go new file mode 100644 index 000000000..1603d87e6 --- /dev/null +++ b/vendor/github.com/golangci/misspell/words.go @@ -0,0 +1,31194 @@ +package misspell + +// Code generated automatically. DO NOT EDIT. + +// DictMain is the main rule set, not including locale-specific spellings +var DictMain = []string{ + "differentiatiations", "differentiations", + "disproportionaltely", "disproportionately", + "oversimplificiation", "oversimplification", + "transcendentational", "transcendental", + "anthromorphization", "anthropomorphization", + "disporportionately", "disproportionately", + "dispraportionately", "disproportionately", + "disproportianately", "disproportionately", + "disproportionatley", "disproportionately", + "disproprotionately", "disproportionately", + "fundamentalistisch", "fundamentalists", + "fundamentalistiska", "fundamentalists", + "fundamentalistiske", "fundamentalists", + "fundamentalistiskt", "fundamentalists", + "histocompatability", "histocompatibility", + "microtransacations", "microtransactions", + "microtransacciones", "microtransactions", + "microtransactional", "microtransactions", + "microtransactioned", "microtransactions", + "misunderstandingly", "misunderstandings", + "oversemplification", "oversimplification", + "oversimplifacation", "oversimplification", + "oversimplificaiton", "oversimplification", + "oversimplificating", "oversimplification", + "oversimplyfication", "oversimplification", + "cardiovasculaires", "cardiovascular", + "certificationkits", "certifications", + "counterporductive", "counterproductive", + "coutnerproductive", "counterproductive", + "disporportionatly", "disproportionately", + "disproportiantely", "disproportionately", + "disproportionatly", "disproportionately", + "disproportionnate", "disproportionate", + "disrepresentation", "misrepresentation", + "fundamentalistisk", "fundamentalists", + "incompatabilities", "incompatibilities", + "inconsequentional", "inconsequential", + "indistinguishible", "indistinguishable", + "indistingusihable", "indistinguishable", + "indistinquishable", "indistinguishable", + "indistuingishable", "indistinguishable", + "instatutionalized", "institutionalized", + "institucionalized", "institutionalized", + "institutionilized", "institutionalized", + "instutitionalized", "institutionalized", + "instututionalized", "institutionalized", + "interchangeablely", "interchangeably", + "interchangeablity", "interchangeably", + "intercontinential", "intercontinental", + "micortransactions", "microtransactions", + "microstansactions", "microtransactions", + "microtramsactions", "microtransactions", + "microtranasctions", "microtransactions", + "microtransacitons", "microtransactions", + "microtransacrions", "microtransactions", + "microtransactioms", "microtransactions", + "microtransactiosn", "microtransactions", + "microtranscations", "microtransactions", + "microtrasnactions", "microtransactions", + "mircotransactions", "microtransactions", + "misinterpretating", "misinterpreting", + "misrepresantation", "misrepresentation", + "misrepresentaiton", "misrepresentation", + "misrepresentating", "misrepresenting", + "misunderstantings", "misunderstandings", + "mocrotransactions", "microtransactions", + "oversimplifaction", "oversimplification", + "oversimplificaton", "oversimplification", + "oversimplifiction", "oversimplification", + "responsibillities", "responsibilities", + "unconstitutionnal", "unconstitutional", + "accomplishements", "accomplishments", + "admininistrative", "administrative", + "antidepresssants", "antidepressants", + "architechturally", "architecturally", + "cardiovasculaire", "cardiovascular", + "charactarization", "characterization", + "characterazation", "characterization", + "characterisitics", "characteristics", + "characteristsics", "characteristic", + "characterizarion", "characterization", + "charecterization", "characterization", + "charicterization", "characterization", + "circumstantional", "circumstantial", + "conversationable", "conversational", + "counterprodutive", "counterproductive", + "demonstrationens", "demonstrations", + "deterministische", "deterministic", + "differenciations", "differentiation", + "differentiantion", "differentiation", + "differentiatiors", "differentiation", + "differentitation", "differentiation", + "disperportionate", "disproportionate", + "disporportionate", "disproportionate", + "dispraportionate", "disproportionate", + "disproportianate", "disproportionate", + "disproportionaly", "disproportionately", + "disproprotionate", "disproportionate", + "electromagnectic", "electromagnetic", + "enviornmentalist", "environmentalist", + "environmentality", "environmentally", + "extraordinairily", "extraordinarily", + "extraordinarilly", "extraordinary", + "extraterrestials", "extraterrestrials", + "fundamentalismos", "fundamentalists", + "fundamentalismus", "fundamentalists", + "fundamentalistas", "fundamentalists", + "fundamentalisten", "fundamentalists", + "fundamentalister", "fundamentalists", + "imcomprehensible", "incomprehensible", + "immunosupressant", "immunosuppressant", + "imperfectionists", "imperfections", + "implementaciones", "implementations", + "implementationen", "implementations", + "implementationer", "implementations", + "inappropriatelly", "inappropriately", + "incompatablities", "incompatibilities", + "incompatiblities", "incompatibilities", + "incomprehencible", "incomprehensible", + "incomprehendible", "incomprehensible", + "incomprehenisble", "incomprehensible", + "incomprehensable", "incomprehensible", + "incomprehinsible", "incomprehensible", + "incomprihensible", "incomprehensible", + "inconprehensible", "incomprehensible", + "inconsistentcies", "inconsistencies", + "inconstitutional", "unconstitutional", + "incrompehensible", "incomprehensible", + "indistinguisable", "indistinguishable", + "institutionlized", "institutionalized", + "intellectualiser", "intellectuals", + "intellectualisme", "intellectuals", + "interchangeabley", "interchangeably", + "internationnally", "internationally", + "interpretaciones", "interpretations", + "interpretationen", "interpretations", + "manoeuverability", "maneuverability", + "massachusettians", "massachusetts", + "microtransacions", "microtransactions", + "microtransacting", "microtransactions", + "microtransactios", "microtransactions", + "microtransactons", "microtransactions", + "microtransations", "microtransactions", + "microtranscation", "microtransactions", + "mircotransaction", "microtransactions", + "miscommunciation", "miscommunication", + "miscommunicaiton", "miscommunication", + "miscomunnication", "miscommunication", + "miscummunication", "miscommunication", + "misinterpretated", "misinterpreted", + "misinterpretions", "misinterpreting", + "misinterpretting", "misinterpreting", + "misproportionate", "disproportionate", + "misrepresenation", "misrepresentation", + "misrepresentaion", "misrepresentation", + "misrepresentated", "misrepresented", + "misrepresentatie", "misrepresentation", + "misrepresentativ", "misrepresentation", + "misubderstanding", "misunderstandings", + "misudnerstanding", "misunderstandings", + "misundarstanding", "misunderstandings", + "misunderatanding", "misunderstandings", + "misunderdtanding", "misunderstandings", + "misundersatnding", "misunderstandings", + "misundersranding", "misunderstandings", + "misunderstadings", "misunderstandings", + "misunderstadning", "misunderstandings", + "misunderstamding", "misunderstandings", + "misunderstandigs", "misunderstandings", + "misunderstandimg", "misunderstandings", + "misunderstandind", "misunderstandings", + "misunderstanging", "misunderstandings", + "misunderstanidng", "misunderstandings", + "misunderstanings", "misunderstandings", + "misunderstansing", "misunderstandings", + "misunderstanting", "misunderstandings", + "misunderstending", "misunderstandings", + "misunderstnading", "misunderstandings", + "misunderstsnding", "misunderstandings", + "misunderstunding", "misunderstandings", + "misundertsanding", "misunderstandings", + "misundrestanding", "misunderstandings", + "misunterstanding", "misunderstandings", + "nationalistische", "nationalistic", + "nationalististic", "nationalistic", + "neconstitutional", "unconstitutional", + "notwhithstanding", "notwithstanding", + "objectificiation", "objectification", + "organisationnels", "organisations", + "perpendiculaires", "perpendicular", + "phillosophically", "philosophically", + "preinitalization", "preinitialization", + "prescriptionists", "prescriptions", + "procrastinarting", "procrastinating", + "procrastinationg", "procrastinating", + "procrastinazione", "procrastination", + "professionalisim", "professionalism", + "professionalisme", "professionals", + "professionallism", "professionalism", + "professionnalism", "professionalism", + "programattically", "programmatically", + "proportionallity", "proportionally", + "reaponsibilities", "responsibilities", + "reinitalizations", "reinitializations", + "representaciones", "representations", + "representationen", "representations", + "representationer", "representations", + "repsonsibilities", "responsibilities", + "responcibilities", "responsibilities", + "responisbilities", "responsibilities", + "responsabilities", "responsibilities", + "responsebilities", "responsibilities", + "straightforeward", "straightforward", + "surrepetitiously", "surreptitiously", + "technologicially", "technologically", + "unconditionnally", "unconditionally", + "unconfortability", "discomfort", + "unconstititional", "unconstitutional", + "uncontrollablely", "uncontrollably", + "underestimateing", "underestimating", + "understandablely", "understandably", + "unintentionnally", "unintentionally", + "unsubstantianted", "unsubstantiated", + "unsubstantiative", "unsubstantiated", + "acclimitization", "acclimatization", + "accomplishemnts", "accomplishments", + "accountabillity", "accountability", + "acknolwedgement", "acknowledgement", + "acknoweldgement", "acknowledgement", + "acknowldegement", "acknowledgement", + "acknowlegdement", "acknowledgement", + "administratieve", "administrative", + "administratiors", "administrators", + "administrativne", "administrative", + "aforementionned", "aforementioned", + "anitdepressants", "antidepressants", + "antidepressents", "antidepressants", + "archetecturally", "architecturally", + "associationthis", "associations", + "authobiographic", "autobiographic", + "awknowledgement", "acknowledgement", + "bureaucratische", "bureaucratic", + "cardiovascualar", "cardiovascular", + "carnagie-mellon", "carnegie-mellon", + "carnigie-mellon", "carnegie-mellon", + "celebrationists", "celebrations", + "charactaristics", "characteristics", + "characterisitcs", "characteristics", + "characterisitic", "characteristic", + "characterizaton", "characterization", + "charactersistic", "characteristic", + "charactersitics", "characteristics", + "charactoristics", "characteristics", + "charecteristics", "characteristics", + "comfrontational", "confrontational", + "commuinications", "communications", + "compatabilities", "compatibilities", + "complimentarity", "complimentary", + "compositionwise", "compositions", + "confidenciality", "confidential", + "confidentuality", "confidential", + "confrentational", "confrontational", + "confrontacional", "confrontational", + "conglaturations", "congratulations", + "congradulations", "congratulations", + "congragulations", "congratulations", + "congratualtions", "congratulations", + "congraturations", "congratulations", + "consequentually", "consequently", + "constitutionnal", "constitutional", + "deinitalization", "deinitialization", + "denominationals", "denominations", + "destinationhash", "destinations", + "deterministisch", "deterministic", + "developmentwise", "developments", + "differantiation", "differentiation", + "differenciation", "differentiation", + "differientation", "differentiation", + "discriminatoire", "discriminate", + "discriminatorie", "discriminate", + "disproportiante", "disproportionate", + "disproportinate", "disproportionate", + "elecrtomagnetic", "electromagnetic", + "electormagnetic", "electromagnetic", + "electromagentic", "electromagnetic", + "electromagnatic", "electromagnetic", + "electromangetic", "electromagnetic", + "electromegnetic", "electromagnetic", + "electronagnetic", "electromagnetic", + "enivronmentally", "environmentally", + "entrepreneurers", "entrepreneurs", + "enviornmentally", "environmentally", + "enviromentalist", "environmentalist", + "environemntally", "environmentally", + "envrionmentally", "environmentally", + "evolutionarilly", "evolutionary", + "experementation", "experimentation", + "experimantation", "experimentation", + "experimentacion", "experimentation", + "experimentating", "experimentation", + "experimenterade", "experimented", + "experimintation", "experimentation", + "expirementation", "experimentation", + "extraodrinarily", "extraordinarily", + "extraordinairly", "extraordinarily", + "extraordinarely", "extraordinarily", + "extraordinaryly", "extraordinarily", + "extraterrestial", "extraterrestrial", + "extroardinarily", "extraordinarily", + "fondamentalists", "fundamentalists", + "fundamendalists", "fundamentalists", + "fundamentalisme", "fundamentals", + "fundamentalismo", "fundamentals", + "fundamentalista", "fundamentals", + "fundamentalisti", "fundamentals", + "fundamnetalists", "fundamentalists", + "fundemantalists", "fundamentalists", + "fundimentalists", "fundamentalists", + "fundumentalists", "fundamentalists", + "gongratulations", "congratulations", + "grammaticallity", "grammatically", + "gundamentalists", "fundamentalists", + "idiosynchracies", "idiosyncrasies", + "implementaitons", "implementations", + "implimentations", "implementations", + "inapporpriately", "inappropriately", + "inappropraitely", "inappropriately", + "inappropriatley", "inappropriately", + "incompatability", "incompatibility", + "incompetentence", "incompetence", + "incomprehensibe", "incomprehensible", + "incomprehesible", "incomprehensible", + "inconcequential", "inconsequential", + "inconcistencies", "inconsistencies", + "inconditionally", "unconditionally", + "inconsecuential", "inconsequential", + "inconsequantial", "inconsequential", + "inconsequencial", "inconsequential", + "inconsequentual", "inconsequential", + "inconsiquential", "inconsequential", + "inconsistancies", "inconsistencies", + "inconsistencias", "inconsistencies", + "inconsistensies", "inconsistencies", + "inconsistenties", "inconsistencies", + "independentisme", "independents", + "independentiste", "independents", + "independentness", "independents", + "inexperiencable", "inexperience", + "inplementations", "implementations", + "instantaneoulsy", "instantaneous", + "institutionella", "institutional", + "institutionnels", "institutions", + "instutionalized", "institutionalized", + "insubstantiated", "unsubstantiated", + "interchangabley", "interchangeably", + "interchangebale", "interchangeable", + "intercontinetal", "intercontinental", + "interpertations", "interpretations", + "interpratations", "interpretations", + "interpritations", "interpretations", + "intersectionals", "intersections", + "intrepretations", "interpretations", + "investigationes", "investigations", + "journalistische", "journalistic", + "libertarianisim", "libertarianism", + "libertarianisme", "libertarians", + "libertarianismo", "libertarians", + "libertarianists", "libertarians", + "libertariansism", "libertarianism", + "manisfestations", "manifestations", + "manouverability", "maneuverability", + "manufacturerers", "manufacturers", + "marshmallowiest", "marshmallows", + "marshmallowness", "marshmallows", + "microtransacton", "microtransactions", + "mininterpreting", "misinterpreting", + "miscommuniation", "miscommunication", + "miscommunicatie", "miscommunication", + "miscommuniction", "miscommunication", + "misinterperting", "misinterpreting", + "misinterprating", "misinterpreting", + "misinterprented", "misinterpret", + "misinterprested", "misinterpret", + "misinterpretion", "misinterpreting", + "misinterpretted", "misinterpreted", + "misinterpriting", "misinterpreting", + "misintrepreting", "misinterpreting", + "misrepresention", "misrepresenting", + "misunderstading", "misunderstanding", + "misunderstandig", "misunderstandings", + "misunderstandng", "misunderstandings", + "misunderstaning", "misunderstanding", + "multicultralism", "multiculturalism", + "multinationella", "multinational", + "nationalistisch", "nationalists", + "nationalistisen", "nationalists", + "nationalistiska", "nationalists", + "nationalistiske", "nationalists", + "nationalistiskt", "nationalists", + "nationalistista", "nationalists", + "objectificaiton", "objectification", + "objectivication", "objectification", + "organisationens", "organisations", + "organisationers", "organisations", + "overestimateing", "overestimating", + "paychologically", "psychologically", + "performancetest", "performances", + "performancewise", "performances", + "perpendiculaire", "perpendicular", + "pharamceuticals", "pharmaceutical", + "pharmacueticals", "pharmaceutical", + "philoshopically", "philosophically", + "philosohpically", "philosophically", + "philosophycally", "philosophically", + "phsycologically", "psychologically", + "phychologically", "psychologically", + "phylosophically", "philosophically", + "physcologically", "psychologically", + "precrastination", "procrastination", + "prefessionalism", "professionalism", + "premonasterians", "premonstratensians", + "procastrinating", "procrastinating", + "procastrination", "procrastination", + "procrascinating", "procrastinating", + "procrastenating", "procrastinating", + "procrastiantion", "procrastination", + "procrastibating", "procrastinating", + "procrastibation", "procrastination", + "procrastonating", "procrastinating", + "procrestinating", "procrastinating", + "procrestination", "procrastination", + "professionalsim", "professionalism", + "prograstination", "procrastination", + "progressionists", "progressions", + "progressionwise", "progressions", + "prokrastination", "procrastination", + "proportionallly", "proportionally", + "proscratination", "procrastination", + "pscyhologically", "psychologically", + "pshycologically", "psychologically", + "psichologically", "psychologically", + "psychedelicious", "psychedelics", + "psychedelicness", "psychedelics", + "psycholigically", "psychologically", + "psychopathische", "psychopathic", + "pyschologically", "psychologically", + "racionalization", "rationalization", + "rationalizaiton", "rationalization", + "rationalizating", "rationalization", + "reccomendations", "recommendations", + "recommandations", "recommendations", + "recommondations", "recommendations", + "reinitalization", "reinitialization", + "repersentations", "representations", + "represantations", "representations", + "represantatives", "representatives", + "representatieve", "representative", + "representativas", "representatives", + "representetives", "representatives", + "representitives", "representatives", + "responibilities", "responsibilities", + "responsibilites", "responsibilities", + "responsibilitys", "responsibilities", + "responsibillity", "responsibility", + "responsibilties", "responsibilities", + "responsiblities", "responsibilities", + "ridiculoussness", "ridiculousness", + "saskatchewinian", "saskatchewan", + "satisfactorally", "satisfactory", + "satisfactorilly", "satisfactory", + "schizophreniiic", "schizophrenic", + "sensationalisim", "sensationalism", + "spreadsheeticus", "spreadsheets", + "starightforward", "straightforward", + "straigthforward", "straightforward", + "striaghtforward", "straightforward", + "sustainabillity", "sustainability", + "technoligically", "technologically", + "troubelshooting", "troubleshooting", + "troublehsooting", "troubleshooting", + "troubleshotting", "troubleshooting", + "trustworthyness", "trustworthiness", + "ubsubstantiated", "unsubstantiated", + "unappropriately", "inappropriately", + "uncomfortablely", "uncomfortably", + "uncomfortablity", "uncomfortably", + "unconditionable", "unconditional", + "unconstituional", "unconstitutional", + "uncontitutional", "unconstitutional", + "uncontrollabley", "uncontrollably", + "uncontrollablly", "uncontrollably", + "unconventionnal", "unconventional", + "underastimating", "underestimating", + "underestemating", "underestimating", + "understandabley", "understandably", + "unintensionally", "unintentionally", + "unprofessionnal", "unprofessional", + "unresponsivness", "unresponsive", + "unsibstantiated", "unsubstantiated", + "unsubstanciated", "unsubstantiated", + "unsubstansiated", "unsubstantiated", + "unsusbtantiated", "unsubstantiated", + "untranslateable", "untranslatable", + "vulernabilities", "vulnerabilities", + "vulnarabilities", "vulnerabilities", + "vulnurabilities", "vulnerabilities", + "vunlerabilities", "vulnerabilities", + "vurnerabilities", "vulnerabilities", + "accomplishemnt", "accomplishment", + "accomplishents", "accomplishes", + "acconplishment", "accomplishment", + "acknowledgeing", "acknowledging", + "acknowledgemnt", "acknowledgement", + "acomplishments", "accomplishments", + "administartion", "administration", + "administartors", "administrators", + "administraters", "administrators", + "administratief", "administrative", + "administratiei", "administrative", + "administratior", "administrator", + "administrativo", "administration", + "adminsitration", "administration", + "adminsitrative", "administrative", + "adminsitrators", "administrators", + "affectionatley", "affectionate", + "aforememtioned", "aforementioned", + "aforementioend", "aforementioned", + "alternativelly", "alternatively", + "amministrative", "administrative", + "anitdepressant", "antidepressants", + "approproximate", "approximate", + "approximatelly", "approximately", + "archeaologists", "archeologists", + "architechtures", "architectures", + "architectureal", "architectural", + "architecturial", "architectural", + "assassintation", "assassination", + "authenitcation", "authentication", + "authenticaiton", "authentication", + "authobiography", "autobiography", + "breakthroughts", "breakthroughs", + "bureaucratisch", "bureaucratic", + "calssification", "classification", + "capatilization", "capitalization", + "capitalizacion", "capitalization", + "capitalizaiton", "capitalization", + "capitalizating", "capitalization", + "capitilazation", "capitalization", + "capitolization", "capitalization", + "captialization", "capitalization", + "cardiocascular", "cardiovascular", + "cardiovascualr", "cardiovascular", + "cardiovasuclar", "cardiovascular", + "caridovascular", "cardiovascular", + "cessationalism", "sensationalism", + "cessationalist", "sensationalist", + "charactaristic", "characteristic", + "characterisics", "characteristics", + "characterisitc", "characteristics", + "characteristcs", "characteristics", + "characteritics", "characteristic", + "charactersitic", "characteristics", + "charasteristic", "characteristics", + "charecteristic", "characteristic", + "cheeseburguers", "cheeseburgers", + "cinematagraphy", "cinematography", + "cinematagrophy", "cinematography", + "cinematograhpy", "cinematography", + "cinematogrophy", "cinematography", + "cinematogrpahy", "cinematography", + "cinemetography", "cinematography", + "cinimatography", "cinematography", + "circumstansial", "circumstantial", + "circumstantual", "circumstantial", + "circumstential", "circumstantial", + "circunstantial", "circumstantial", + "classificaiton", "classification", + "coincedentally", "coincidentally", + "coinsidentally", "coincidentally", + "commemmorating", "commemorating", + "communciations", "communications", + "compatablities", "compatibilities", + "compatibillity", "compatibility", + "compatiblities", "compatibilities", + "competitioners", "competitions", + "comphrehensive", "comprehensive", + "computationnal", "computational", + "concatentation", "concatenation", + "conciderations", "considerations", + "condescenscion", "condescension", + "condradictions", "contradictions", + "configuartions", "configurations", + "confugurations", "configurations", + "conglaturation", "congratulations", + "congratulatons", "congratulations", + "conicidentally", "coincidentally", + "conifgurations", "configurations", + "conscioussness", "consciousness", + "consentrations", "concentrations", + "consiciousness", "consciousness", + "considerablely", "considerably", + "considerstions", "considerations", + "constititional", "constitutional", + "constitucional", "constitutional", + "contamporaries", "contemporaries", + "contemporaneus", "contemporaneous", + "contraceptivos", "contraceptives", + "contradicitons", "contradictions", + "contradictiong", "contradicting", + "contriceptives", "contraceptives", + "controceptives", "contraceptives", + "controdictions", "contradictions", + "conversacional", "conversational", + "converstaional", "conversational", + "correpsondence", "correspondence", + "correspondants", "correspondents", + "correspondense", "correspondence", + "correspondente", "correspondence", + "corrispondants", "correspondents", + "corrispondence", "correspondence", + "corrospondence", "correspondence", + "costumizations", "customization", + "councidentally", "coincidentally", + "crystalisation", "crystallisation", + "curcumstantial", "circumstantial", + "demenstrations", "demonstrations", + "deminstrations", "demonstrations", + "demonstartions", "demonstrations", + "demonstrativno", "demonstrations", + "demonstrativos", "demonstrations", + "demosntrations", "demonstrations", + "desintegration", "disintegration", + "deterioriating", "deteriorating", + "determinisitic", "deterministic", + "differentiaton", "differentiation", + "disatisfaction", "dissatisfaction", + "discrimanatory", "discriminatory", + "discriminacion", "discrimination", + "discriminitory", "discriminatory", + "disillusionned", "disillusioned", + "diskrimination", "discrimination", + "disproportiate", "disproportionate", + "distingiushing", "distinguishing", + "distingquished", "distinguished", + "distingusihing", "distinguishing", + "distinquishing", "distinguishing", + "distuingishing", "distinguishing", + "dysfunctionnal", "dysfunctional", + "eldistribution", "redistribution", + "electromagnetc", "electromagnetic", + "electromagntic", "electromagnetic", + "endoctrination", "indoctrination", + "enthusiastisch", "enthusiastic", + "entrepreneuers", "entrepreneurs", + "entrepreneures", "entrepreneurs", + "enviormentally", "environmentally", + "enviromentally", "environmentally", + "environmentals", "environments", + "environmentaly", "environmentally", + "experimentaion", "experimentation", + "experimentella", "experimental", + "extraordinairy", "extraordinary", + "extraordinarly", "extraordinary", + "extrordinarily", "extraordinarily", + "fondamentalist", "fundamentalist", + "foreshadowning", "foreshadowing", + "functionallity", "functionality", + "fundamendalist", "fundamentalist", + "fundamentalits", "fundamentalists", + "fundamnetalist", "fundamentalist", + "fundemantalist", "fundamentalist", + "fundimentalist", "fundamentalist", + "fundumentalist", "fundamentalist", + "generalizacion", "generalization", + "generalizating", "generalization", + "generelization", "generalization", + "geographacilly", "geographically", + "geographycally", "geographically", + "geogrpahically", "geographically", + "geopraphically", "geographically", + "goegraphically", "geographically", + "grandchilderen", "grandchildren", + "gravitationnal", "gravitational", + "groubdbreaking", "groundbreaking", + "groudnbreaking", "groundbreaking", + "hallcuinations", "hallucination", + "hallicunations", "hallucinations", + "hallucenations", "hallucinations", + "halluciantions", "hallucinations", + "hallucinaitons", "hallucination", + "hallunications", "hallucinations", + "hallusinations", "hallucinations", + "halluzinations", "hallucinations", + "hellucinations", "hallucinations", + "heterosexuella", "heterosexual", + "hipothetically", "hypothetically", + "homosexuallity", "homosexuality", + "hullucinations", "hallucinations", + "hyopthetically", "hypothetically", + "hypathetically", "hypothetically", + "hypethetically", "hypothetically", + "hypotehtically", "hypothetically", + "hypotethically", "hypothetically", + "identificacion", "identification", + "identificaiton", "identification", + "identificativo", "identification", + "identifikation", "identification", + "imlpementation", "implementations", + "impelmentation", "implementations", + "impersonationg", "impersonating", + "implementacion", "implementation", + "implementaiton", "implementation", + "implementating", "implementation", + "implementatino", "implementations", + "implemetnation", "implementations", + "implimentation", "implementation", + "impossibillity", "impossibility", + "inadvertantely", "inadvertently", + "inappropriatly", "inappropriately", + "inapproprietly", "inappropriately", + "incompatablity", "incompatibility", + "incompatiblity", "incompatibility", + "inconsequental", "inconsequential", + "inconsistentcy", "inconsistency", + "incontrollably", "uncontrollably", + "inconventional", "unconventional", + "inconvienenced", "inconvenience", + "indestrictible", "indestructible", + "indestructuble", "indestructible", + "indetification", "identification", + "indistructible", "indestructible", + "individuallity", "individuality", + "indocrtination", "indoctrination", + "indoctrication", "indoctrination", + "indoktrination", "indoctrination", + "industiralized", "industrialized", + "industrailized", "industrialized", + "industrualized", "industrialized", + "industructible", "indestructible", + "inexplicablely", "inexplicably", + "infrastracture", "infrastructure", + "infrastructuur", "infrastructure", + "infrastrucutre", "infrastructure", + "infrastrukture", "infrastructure", + "infrastrutture", "infrastructure", + "infrasturcture", "infrastructure", + "initalisations", "initialisations", + "initalizations", "initializations", + "inplementation", "implementation", + "inspirationnal", "inspirational", + "instinctivelly", "instinctively", + "institutionale", "institutionalized", + "institutionals", "institutions", + "institutionnal", "institutional", + "intellectualis", "intellectuals", + "intellectualls", "intellectuals", + "intellecutally", "intellectually", + "intercepticons", "interceptions", + "interchangable", "interchangeable", + "interchangably", "interchangeably", + "interchangeble", "interchangeable", + "interchangebly", "interchangeably", + "interlectually", "intellectually", + "internationaal", "international", + "internationaly", "internationally", + "internationnal", "international", + "interpersonnal", "interpersonal", + "interpertation", "interpretation", + "interpratation", "interpretation", + "interpretacion", "interpretation", + "interpretaiton", "interpretations", + "interpretating", "interpretation", + "interpritation", "interpretation", + "interstellaire", "interstellar", + "intillectually", "intellectually", + "intrepretation", "interpretation", + "invesitgations", "investigations", + "investiagtions", "investigations", + "investigatiors", "investigations", + "investigativos", "investigations", + "investigstions", "investigations", + "irrationallity", "irrationally", + "irresponsibile", "irresponsible", + "journalistisch", "journalistic", + "justificativos", "justifications", + "koncentrations", "concentrations", + "liberatrianism", "libertarianism", + "libertarainism", "libertarianism", + "libertariansim", "libertarianism", + "libertarinaism", "libertarianism", + "libertaryanism", "libertarianism", + "libertatianism", "libertarianism", + "liberterianism", "libertarianism", + "libretarianism", "libertarianism", + "manufactureers", "manufactures", + "manufactureras", "manufactures", + "manufacturered", "manufactured", + "manufactureres", "manufacturers", + "manufactureros", "manufactures", + "massachusettes", "massachusetts", + "massachussetts", "massachusetts", + "mataphorically", "metaphorically", + "mathameticians", "mathematicians", + "mathemagically", "mathematically", + "mathematitians", "mathematicians", + "mathemetically", "mathematically", + "mathemeticians", "mathematicians", + "mathimatically", "mathematically", + "mediterainnean", "mediterranean", + "mediterrannean", "mediterranean", + "metaphotically", "metaphorically", + "metephorically", "metaphorically", + "methaporically", "metaphorically", + "metiphorically", "metaphorically", + "metophorically", "metaphorically", + "metropolitaine", "metropolitan", + "misconseptions", "misconceptions", + "misinterperted", "misinterpreted", + "misintrepreted", "misinterpreted", + "mulitnationals", "multinational", + "mulitplication", "multiplication", + "multiplicacion", "multiplication", + "multiplicaiton", "multiplication", + "multiplicativo", "multiplication", + "multiplikation", "multiplication", + "mutlinationals", "multinational", + "mutliplication", "multiplication", + "nationalisitic", "nationalistic", + "nationalistics", "nationalists", + "nationalisties", "nationalists", + "nationalistisk", "nationalists", + "neighbourhoood", "neighbourhood", + "nieghbourhoods", "neighbourhood", + "northereastern", "northeastern", + "objectificaton", "objectification", + "opthalmologist", "ophthalmologist", + "organizacional", "organizational", + "organizaitonal", "organizational", + "organziational", "organizational", + "orginazational", "organizational", + "overestemating", "overestimating", + "overextimating", "overestimating", + "overhwelmingly", "overwhelmingly", + "overhwlemingly", "overwhelmingly", + "overpolulation", "overpopulation", + "overpopluation", "overpopulation", + "oversetimating", "overestimating", + "overshadowered", "overshadowed", + "overwhemlingly", "overwhelmingly", + "overwhlemingly", "overwhelmingly", + "paliamentarian", "parliamentarian", + "parliamentiary", "parliamentary", + "performancepcs", "performances", + "personalitites", "personalities", + "pharamceutical", "pharmaceutical", + "pharmaceudical", "pharmaceutical", + "pharmacuetical", "pharmaceutical", + "pharmaseutical", "pharmaceutical", + "pharmeceutical", "pharmaceutical", + "philosophicaly", "philosophically", + "phramaceutical", "pharmaceutical", + "playthroughers", "playthroughs", + "porportionally", "proportionally", + "practitionners", "practitioners", + "predeterminded", "predetermined", + "predominantely", "predominantly", + "predominantley", "predominantly", + "preinitalizing", "preinitializing", + "prerequisities", "prerequisite", + "procrastinatin", "procrastination", + "procrastinaton", "procrastination", + "professionials", "professionalism", + "professionnals", "professionals", + "profitabillity", "profitability", + "progressivelly", "progressively", + "progressivisme", "progressives", + "pronounciation", "pronunciation", + "proportianally", "proportionally", + "proportionalty", "proportionally", + "proportionella", "proportionally", + "proprotionally", "proportionally", + "protruberances", "protuberances", + "pseudononymous", "pseudonymous", + "psychologicaly", "psychologically", + "qaulifications", "qualification", + "qualifiactions", "qualification", + "qualificaitons", "qualifications", + "quarterbackers", "quarterbacks", + "rationalizaton", "rationalization", + "reaponsibility", "responsibility", + "recommandation", "recommendation", + "recommedations", "recommendations", + "recommondation", "recommendation", + "reconnaissence", "reconnaissance", + "reconstruccion", "reconstruction", + "reconsturction", "reconstruction", + "redistirbution", "redistribution", + "redistribucion", "redistribution", + "redistributivo", "redistribution", + "redistrubition", "redistribution", + "refridgeration", "refrigeration", + "rehabilitacion", "rehabilitation", + "rehabilitaiton", "rehabilitation", + "reinforcemnets", "reinforcements", + "rekommendation", "recommendation", + "rektifications", "certifications", + "reniforcements", "reinforcements", + "repersentation", "representation", + "represantation", "representation", + "represantative", "representative", + "representacion", "representation", + "representaiton", "representations", + "representatief", "representative", + "representating", "representation", + "representativo", "representation", + "representetive", "representative", + "representitive", "representative", + "representstion", "representations", + "representstive", "representatives", + "represetnation", "representations", + "represnetation", "representations", + "reprezentative", "representative", + "repsonsibility", "responsibility", + "resistribution", "redistribution", + "responcibility", "responsibility", + "responisbility", "responsibility", + "responnsibilty", "responsibility", + "responsability", "responsibility", + "responsibilies", "responsibilities", + "responsibities", "responsibilities", + "restaraunteurs", "restaurateurs", + "retroactivelly", "retroactively", + "revolutionairy", "revolutionary", + "revolutionnary", "revolutionary", + "ridicilousness", "ridiculousness", + "ridicoulusness", "ridiculousness", + "rienforcements", "reinforcements", + "righteoussness", "righteousness", + "satisfactoraly", "satisfactory", + "satisfactority", "satisfactorily", + "sceintifically", "scientifically", + "schizophrentic", "schizophrenic", + "screenwrighter", "screenwriter", + "sensacionalism", "sensationalism", + "sensacionalist", "sensationalist", + "sensasionalism", "sensationalism", + "sensasionalist", "sensationalist", + "sensationality", "sensationalist", + "sensationalizm", "sensationalism", + "sensationalsim", "sensationalism", + "sensationilism", "sensationalism", + "sensationilist", "sensationalist", + "sensationslism", "sensationalism", + "sensetionalism", "sensationalism", + "sensibilisiert", "sensibilities", + "sentationalism", "sensationalism", + "sentationalist", "sensationalist", + "senzationalism", "sensationalism", + "senzationalist", "sensationalist", + "sepcifications", "specification", + "simaltaneously", "simultaneously", + "simeltaneously", "simultaneously", + "similtaneously", "simultaneously", + "simlutaneously", "simultaneously", + "simplificacion", "simplification", + "simplificaiton", "simplification", + "simplificating", "simplification", + "simulatenously", "simultaneously", + "simulatneously", "simultaneously", + "simultaenously", "simultaneously", + "simultainously", "simultaneously", + "simultaneoulsy", "simultaneously", + "simultaniously", "simultaneously", + "simulteanously", "simultaneously", + "sistematically", "systematically", + "slaugterhouses", "slaughterhouses", + "specailization", "specialization", + "specialication", "specialization", + "specializaiton", "specialization", + "specificaitons", "specification", + "speciliazation", "specialization", + "spectacularely", "spectacularly", + "spectacularily", "spectacularly", + "spesifications", "specifications", + "spezialisation", "specialization", + "sportsmansship", "sportsmanship", + "spreadsheeters", "spreadsheets", + "straightforwad", "straightforward", + "subconcsiously", "subconsciously", + "subconsicously", "subconsciously", + "subsconciously", "subconsciously", + "sunconsciously", "subconsciously", + "superintendant", "superintendent", + "suppliementing", "supplementing", + "surrepetitious", "surreptitious", + "survivabililty", "survivability", + "survivabillity", "survivability", + "sustainabiltiy", "sustainability", + "syncronization", "synchronization", + "systemetically", "systematically", + "systimatically", "systematically", + "technologicaly", "technologically", + "thermodinamics", "thermodynamics", + "thermodyanmics", "thermodynamics", + "thermodymamics", "thermodynamics", + "thermodymanics", "thermodynamics", + "thermodynamcis", "thermodynamics", + "thermodynanics", "thermodynamics", + "thermodynmaics", "thermodynamics", + "thernodynamics", "thermodynamics", + "theromdynamics", "thermodynamics", + "transformacion", "transformation", + "transfromation", "transformation", + "transitionable", "transitional", + "transitionning", "transitioning", + "transofrmation", "transformation", + "trasnformation", "transformation", + "trasnportation", "transportation", + "unbelievablely", "unbelievably", + "unchallengable", "unchallengeable", + "uncomfortabley", "uncomfortably", + "uncomfortablly", "uncomfortably", + "unconciousness", "unconsciousness", + "unconditionaly", "unconditionally", + "unconditionnal", "unconditional", + "unconsciouslly", "unconsciously", + "uncontrallable", "uncontrollable", + "uncontrallably", "uncontrollably", + "uncontrolablly", "uncontrollably", + "unconvectional", "unconventional", + "unconvencional", "unconventional", + "unconvensional", "unconventional", + "unconventianal", "unconventional", + "underastimated", "underestimated", + "underestamated", "underestimated", + "underestemated", "underestimated", + "underestimeted", "underestimated", + "undersetimated", "underestimated", + "understandebly", "understandably", + "understandible", "understandable", + "understandibly", "understandably", + "undestructible", "indestructible", + "unforetunately", "unfortunately", + "unfortunatelly", "unfortunately", + "unfourtunately", "unfortunately", + "uninitalizable", "uninitializable", + "unintelligient", "unintelligent", + "unintentionaly", "unintentionally", + "unintentionnal", "unintentional", + "unmanouverable", "unmaneuverable", + "unneccessarily", "unnecessarily", + "unnecessarilly", "unnecessarily", + "unprecendented", "unprecedented", + "unprofessionel", "unprofessional", + "unreasonablely", "unreasonably", + "unsubstantiaed", "unsubstantiated", + "unsurprizingly", "unsurprisingly", + "vizualisations", "visualization", + "vulnerabilites", "vulnerabilities", + "vulnerabillity", "vulnerability", + "vulnerablility", "vulnerability", + "wholeheartadly", "wholeheartedly", + "wholeheartidly", "wholeheartedly", + "abbrievations", "abbreviation", + "accelleration", "acceleration", + "accomadations", "accommodations", + "accommadating", "accommodating", + "accommadation", "accommodation", + "accommidation", "accommodation", + "accomodations", "accommodations", + "accomondating", "accommodating", + "accomondation", "accommodation", + "accomplishent", "accomplishment", + "accountabilty", "accountability", + "accredidation", "accreditation", + "acknolwedging", "acknowledging", + "acknowlegding", "acknowledging", + "acomplishment", "accomplishment", + "acquaintaince", "acquaintance", + "acquaintences", "acquaintances", + "acquaintinces", "acquaintances", + "acquanitances", "acquaintance", + "acquantainces", "acquaintances", + "acquantiances", "acquaintances", + "acquiantances", "acquaintances", + "acquiantences", "acquaintances", + "adminastrator", "administrator", + "administartor", "administrator", + "administraion", "administration", + "administraron", "administrator", + "administrater", "administrator", + "administratio", "administrator", + "administraton", "administration", + "adminsitrator", "administrator", + "adminstration", "administration", + "adminstrative", "administrative", + "admissability", "admissibility", + "adnimistrator", "administrators", + "adverticement", "advertisement", + "advertisiment", "advertisement", + "advertisments", "advertisements", + "advirtisement", "advertisement", + "aestethically", "aesthetically", + "aesthatically", "aesthetically", + "aesthitically", "aesthetically", + "affectionnate", "affectionate", + "aforementiond", "aforementioned", + "agriculturual", "agricultural", + "agrumentative", "argumentative", + "alterantively", "alternatively", + "alternativets", "alternatives", + "alternativley", "alternatively", + "alternitavely", "alternatively", + "alternitively", "alternatively", + "aninteresting", "uninteresting", + "annoucnements", "announcements", + "antagonisitic", "antagonistic", + "anthropolgist", "anthropologist", + "apporpriately", "appropriately", + "apporpriation", "appropriation", + "apporximately", "approximately", + "appreciateing", "appreciating", + "appreciateive", "appreciative", + "appreciationg", "appreciating", + "appropirately", "appropriately", + "appropiration", "appropriation", + "appropraitely", "appropriately", + "appropreation", "appropriation", + "appropriatley", "appropriately", + "appropropiate", "appropriate", + "approrpiation", "appropriation", + "approxamately", "approximately", + "approxiamtely", "approximately", + "approximatley", "approximately", + "approximitely", "approximately", + "aqcuaintances", "acquaintances", + "aqquaintances", "acquaintances", + "archaelogical", "archaeological", + "archaelogists", "archaeologists", + "archeaologist", "archeologist", + "archetectural", "architectural", + "architechture", "architecture", + "architechural", "architectural", + "architectrual", "architectural", + "architecutral", "architectural", + "argumentitive", "argumentative", + "arugmentative", "argumentative", + "asethetically", "aesthetically", + "assasinations", "assassinations", + "audomoderator", "automoderator", + "australianess", "australians", + "authenticaion", "authentication", + "authenticaton", "authentication", + "autherization", "authorization", + "authoratitive", "authoritative", + "authoritatian", "authoritarian", + "authoritation", "authorization", + "authorititive", "authoritative", + "authoritorian", "authoritarian", + "authorotative", "authoritative", + "authroization", "authorization", + "automoderador", "automoderator", + "automoderater", "automoderator", + "automodorator", "automoderator", + "automoterator", "automoderator", + "autoritharian", "authoritarian", + "availabillity", "availability", + "awknowledging", "acknowledging", + "billingualism", "bilingualism", + "billionairres", "billionaire", + "borderlanders", "borderlands", + "breadtfeeding", "breastfeeding", + "breastfeading", "breastfeeding", + "breatsfeeding", "breastfeeding", + "broadacasting", "broadcasting", + "bureaucractic", "bureaucratic", + "bureaucratics", "bureaucrats", + "bureaucratius", "bureaucrats", + "californiaman", "californian", + "calrification", "clarification", + "capitalizaton", "capitalization", + "carbohdyrates", "carbohydrates", + "carbohidrates", "carbohydrates", + "carbohyrdates", "carbohydrates", + "carboyhdrates", "carbohydrates", + "carthographer", "cartographer", + "catagorically", "categorically", + "catastrophies", "catastrophe", + "catastrophize", "catastrophe", + "catigorically", "categorically", + "catterpillars", "caterpillars", + "celebrationis", "celebrations", + "ceritfication", "certifications", + "certificaiton", "certification", + "championchips", "championship", + "championshiop", "championships", + "championsship", "championships", + "chanpionships", "championships", + "charactarized", "characterized", + "characterisic", "characteristic", + "characteristc", "characteristics", + "characterists", "characteristics", + "charicterized", "characterized", + "charismatisch", "charismatic", + "checkpointusa", "checkpoints", + "cheeseburgare", "cheeseburger", + "cheeseburgler", "cheeseburger", + "cheeseburguer", "cheeseburger", + "cheezeburgers", "cheeseburgers", + "chornological", "chronological", + "chronoligical", "chronological", + "chronologicly", "chronological", + "cinematograhy", "cinematography", + "cinematograpy", "cinematography", + "circomference", "circumference", + "circumcission", "circumcision", + "circumferance", "circumference", + "circumsicions", "circumcision", + "circumstanial", "circumstantial", + "circumstantal", "circumstantial", + "circumstnaces", "circumstance", + "circunference", "circumference", + "circunstances", "circumstances", + "cirucmference", "circumference", + "cirucmstances", "circumstances", + "civilications", "civilizations", + "civilizaitons", "civilizations", + "clarificaiton", "clarification", + "clasification", "clarification", + "clerification", "clarification", + "coincidentaly", "coincidentally", + "coincidential", "coincidental", + "colaborations", "collaborations", + "collabaration", "collaboration", + "collaberation", "collaboration", + "collaberative", "collaborative", + "collaboratore", "collaborate", + "collectioners", "collections", + "collectivelly", "collectively", + "collobaration", "collaboration", + "combatibility", "compatibility", + "comeptitively", "competitively", + "comfortablely", "comfortably", + "comfortablity", "comfortably", + "comfrontation", "confrontation", + "commemerative", "commemorative", + "commericially", "commercially", + "commerorative", "commemorative", + "comminication", "communication", + "comminucation", "communications", + "commissionees", "commissions", + "commissionned", "commissioned", + "commissionner", "commissioner", + "commmemorated", "commemorated", + "commuications", "communications", + "commuincation", "communications", + "communciation", "communication", + "communiaction", "communications", + "communicaiton", "communication", + "communicatoin", "communications", + "communicatons", "communications", + "compadibility", "compatibility", + "comparativley", "comparatively", + "comparetively", "comparatively", + "comparitavely", "comparatively", + "comparitively", "comparatively", + "compatability", "compatibility", + "compatibiltiy", "compatibility", + "compeditively", "competitively", + "compensantion", "compensation", + "compensationg", "compensating", + "comperatively", "comparatively", + "comperhension", "comprehension", + "competatively", "competitively", + "competitavely", "competitively", + "competitevely", "competitively", + "competitivley", "competitively", + "competiveness", "competitiveness", + "compilcations", "complication", + "compitability", "compatibility", + "complciations", "complication", + "complecations", "complications", + "compliactions", "complication", + "complicaitons", "complication", + "complilations", "complications", + "complimentery", "complimentary", + "complimentoni", "complimenting", + "complimentory", "complimentary", + "comprehention", "comprehension", + "computacional", "computational", + "comtamination", "contamination", + "comtemplating", "contemplating", + "concatination", "contamination", + "conceivablely", "conceivably", + "concencration", "concentration", + "concenrtation", "concentrations", + "concentartion", "concentrations", + "concentracion", "concentration", + "concentraited", "concentrated", + "concentraiton", "concentrations", + "concentratons", "concentrations", + "concervatives", "conservatives", + "concideration", "consideration", + "concioussness", "consciousness", + "concnetration", "concentrations", + "concsiousness", "consciousness", + "condascending", "condescending", + "condescencion", "condescension", + "condescendion", "condescension", + "condescensing", "condescension", + "condiscending", "condescending", + "conditionning", "conditioning", + "condradicting", "contradicting", + "condradiction", "contradiction", + "condradictory", "contradictory", + "conecntration", "concentrations", + "conenctration", "concentrations", + "confidentally", "confidentially", + "configrations", "configurations", + "configruation", "configurations", + "configuartion", "configuration", + "configuracion", "configuration", + "configuraiton", "configuration", + "configuratoin", "configurations", + "configureable", "configurable", + "confrentation", "confrontation", + "confrontacion", "confrontation", + "confrontating", "confrontation", + "confrontativo", "confrontation", + "congratualted", "congratulate", + "conifguration", "configurations", + "conisderation", "considerations", + "connecticunts", "connecticut", + "connectivitiy", "connectivity", + "conpassionate", "compassionate", + "conplications", "complications", + "conplimentary", "complimentary", + "conplimenting", "complimenting", + "conprehension", "comprehension", + "consdieration", "considerations", + "consenquently", "consequently", + "consentrating", "concentrating", + "consentration", "concentration", + "consequencies", "consequence", + "consequentely", "consequently", + "consequeseces", "consequences", + "conservatisim", "conservatism", + "conservativsm", "conservatism", + "conservitives", "conservatives", + "consicousness", "consciousness", + "considerabely", "considerable", + "considerabile", "considerable", + "considerabley", "considerably", + "considerablly", "considerably", + "consideracion", "consideration", + "consideratoin", "considerations", + "considerstion", "considerations", + "considertaion", "considerations", + "consituencies", "constituencies", + "consitutional", "constitutional", + "constallation", "constellation", + "constarnation", "consternation", + "constillation", "constellation", + "constituintes", "constituents", + "constituional", "constitutional", + "constitutents", "constitutes", + "constitutinal", "constitutional", + "constructicon", "construction", + "constructieve", "constructive", + "constructiong", "constructing", + "consttruction", "construction", + "contaminacion", "contamination", + "contaminanted", "contaminated", + "contanimation", "contamination", + "contenplating", "contemplating", + "contimplating", "contemplating", + "contraceptivo", "contraception", + "contradiccion", "contradiction", + "contradicitng", "contradicting", + "contradiciton", "contradiction", + "contradictary", "contradictory", + "contradictons", "contradicts", + "contraticting", "contradicting", + "contravercial", "controversial", + "contraversial", "controversial", + "contreception", "contraception", + "contreversial", "controversial", + "contributeurs", "contributes", + "contributiors", "contributors", + "contriception", "contraception", + "contridictory", "contradictory", + "contritutions", "contributions", + "contriversial", "controversial", + "controception", "contraception", + "controdicting", "contradicting", + "controdiction", "contradiction", + "controvercial", "controversial", + "controverisal", "controversial", + "controversary", "controversy", + "controversity", "controversy", + "controvertial", "controversial", + "contstruction", "construction", + "conventionnal", "conventional", + "converastions", "conservation", + "conversationa", "conservation", + "conversationg", "conservation", + "conversationy", "conservation", + "conversatiosn", "conservation", + "conversatives", "conservatives", + "converstaions", "conversations", + "convorsations", "conversations", + "cooresponding", "corresponding", + "coorperations", "corporations", + "correctionals", "corrections", + "correpsonding", "corresponding", + "correspondant", "correspondent", + "correspondece", "correspondence", + "corresponders", "corresponds", + "corresponsing", "corresponding", + "corrispondant", "correspondent", + "corrisponding", "corresponding", + "corrosponding", "corresponding", + "costomization", "customization", + "costumization", "customization", + "counterfeight", "counterfeit", + "creationistas", "creationists", + "cricumference", "circumference", + "cringeworthey", "cringeworthy", + "cringeworthly", "cringeworthy", + "crytopgraphic", "cryptographic", + "curcumference", "circumference", + "curcumstances", "circumstances", + "custumization", "customization", + "cuztomization", "customization", + "decentraliced", "decentralized", + "decentrilized", "decentralized", + "decomissioned", "decommissioned", + "decompositing", "decomposing", + "definitivelly", "definitively", + "deinitalizing", "deinitializing", + "demenstration", "demonstration", + "democraticaly", "democratically", + "democraticlly", "democratically", + "demoninations", "denominations", + "demonstarting", "demonstrating", + "demonstartion", "demonstration", + "demonstraiton", "demonstrations", + "demonstratbly", "demonstrably", + "demonstraties", "demonstrate", + "demonstrativo", "demonstration", + "demosntrating", "demonstrating", + "demosntration", "demonstrations", + "denomenations", "denominations", + "denomonations", "denominations", + "deomnstration", "demonstrations", + "dermatalogist", "dermatologist", + "dermatolagist", "dermatologist", + "dermatoligist", "dermatologist", + "dermatologyst", "dermatologist", + "dermetologist", "dermatologist", + "dermitologist", "dermatologist", + "derpatologist", "dermatologist", + "desentralized", "decentralized", + "desillusioned", "disillusioned", + "desintegrated", "disintegrated", + "desinterested", "disinterested", + "determenation", "determination", + "determinacion", "determination", + "determinining", "determining", + "determinisitc", "deterministic", + "determinsitic", "deterministic", + "detmatologist", "dermatologist", + "developmently", "developmental", + "dezentralized", "decentralized", + "differantiate", "differentiate", + "differenciate", "differentiate", + "differintiate", "differentiate", + "diffirentiate", "differentiate", + "disadvandages", "disadvantaged", + "disadvantadge", "disadvantaged", + "disadvanteged", "disadvantaged", + "disadvanteges", "disadvantages", + "disadvatanges", "disadvantages", + "disadventaged", "disadvantaged", + "disadventages", "disadvantages", + "disallusioned", "disillusioned", + "disappearence", "disappearance", + "disappearnace", "disappearance", + "disappearring", "disappearing", + "disatvantaged", "disadvantaged", + "disatvantages", "disadvantages", + "disciplinairy", "disciplinary", + "disciplinerad", "disciplined", + "discipliniary", "disciplinary", + "disconnecters", "disconnects", + "discontinuted", "discontinued", + "discrimianted", "discriminated", + "discriminante", "discriminate", + "discriminatie", "discriminate", + "discriminatin", "discrimination", + "disillisioned", "disillusioned", + "disillutioned", "disillusioned", + "disingenuious", "disingenuous", + "disollusioned", "disillusioned", + "disrecpectful", "disrespectful", + "disrecpecting", "disrespecting", + "disrepsectful", "disrespectful", + "disrepsecting", "disrespecting", + "disresepctful", "disrespectful", + "disresepcting", "disrespecting", + "disrespection", "disrespecting", + "disrespekting", "disrespecting", + "disrispectful", "disrespectful", + "disrispecting", "disrespecting", + "dissagreement", "disagreement", + "dissapearance", "disappearance", + "dissapointted", "dissapointed", + "dissappointed", "disappointed", + "dissobediance", "disobedience", + "dissobedience", "disobedience", + "distingishing", "distinguishing", + "distinguising", "distinguishing", + "distinquished", "distinguished", + "distirbutions", "distributions", + "distiungished", "distinguished", + "distribustion", "distributions", + "distributiors", "distributors", + "distributivos", "distributions", + "distrobutions", "distributions", + "distrubitions", "distributions", + "distuingished", "distinguished", + "documantaries", "documentaries", + "documenatries", "documentaries", + "documentacion", "documentation", + "documentaires", "documentaries", + "documentaiton", "documentation", + "documentarios", "documentaries", + "documentaties", "documentaries", + "documentating", "documentation", + "documenteries", "documentaries", + "documentories", "documentaries", + "drammatically", "grammatically", + "dsyfunctional", "dysfunctional", + "dumbfoundeads", "dumbfounded", + "dusfunctional", "dysfunctional", + "dustification", "justification", + "dysfonctional", "dysfunctional", + "dysfucntional", "dysfunctional", + "dysfuncitonal", "dysfunctional", + "dysfunktional", "dysfunctional", + "easthetically", "aesthetically", + "effectiviness", "effectiveness", + "effictiveness", "effectiveness", + "effortlessely", "effortlessly", + "effortlessley", "effortlessly", + "embarrasement", "embarrassment", + "embarrasments", "embarrassment", + "embarressment", "embarrassment", + "emberrassment", "embarrassment", + "encarceration", "incarceration", + "encorporating", "incorporating", + "encyclopeadia", "encyclopedia", + "encyclopeadic", "encyclopedia", + "encyclopeedia", "encyclopedia", + "encycolpedias", "encyclopedia", + "endoctrinated", "indoctrinated", + "enlightenting", "enlightening", + "enlightnement", "enlightenment", + "enligthenment", "enlightenment", + "enteratinment", "entertainment", + "enterpreneurs", "entrepreneurs", + "enterprenuers", "entrepreneurs", + "enterpreuners", "entrepreneurs", + "entertianment", "entertainment", + "enthusiasists", "enthusiasts", + "enthusiastics", "enthusiasts", + "entrepraneurs", "entrepreneurs", + "entreprenaurs", "entrepreneurs", + "entrepreneuer", "entrepreneurs", + "entreprenours", "entrepreneurs", + "entreprenuers", "entrepreneurs", + "entreprenures", "entrepreneurs", + "entrepreuners", "entrepreneurs", + "entretainment", "entertainment", + "enviornmental", "environmental", + "environemntal", "environmental", + "environmently", "environmental", + "envolutionary", "evolutionary", + "envrionmental", "environmental", + "estabilshment", "establishments", + "establishemnt", "establishments", + "establishmnet", "establishments", + "establsihment", "establishments", + "estbalishment", "establishments", + "ethnocentricm", "ethnocentrism", + "evolutionairy", "evolutionary", + "evolutionarly", "evolutionary", + "evolutionnary", "evolutionary", + "exaggeratting", "exaggerating", + "excpetionally", "exceptionally", + "executioneers", "executioner", + "existentiella", "existential", + "expectionally", "exceptionally", + "experementing", "experimenting", + "experienceing", "experiencing", + "experimentais", "experiments", + "experimention", "experimenting", + "experimentors", "experiments", + "expirementing", "experimenting", + "expodentially", "exponentially", + "exponantially", "exponentially", + "exponencially", "exponentially", + "exponentiella", "exponential", + "extraodrinary", "extraordinary", + "extraordianry", "extraordinary", + "extraordinair", "extraordinary", + "extraordinaly", "extraordinary", + "extraoridnary", "extraordinary", + "extremeophile", "extremophile", + "extroardinary", "extraordinary", + "familiarizate", "familiarize", + "fantasitcally", "fantastically", + "fantasmically", "fantastically", + "fantistically", "fantastically", + "faptastically", "fantastically", + "figurativeley", "figuratively", + "figurativelly", "figuratively", + "frankenstiens", "frankenstein", + "frankenstined", "frankenstein", + "frankenstiner", "frankenstein", + "frankenstines", "frankenstein", + "friendzoneado", "friendzoned", + "fucntionality", "functionality", + "funcitonality", "functionality", + "functionailty", "functionality", + "fundamentalis", "fundamentals", + "fundamnetally", "fundamentally", + "fundementally", "fundamentally", + "fundimentally", "fundamentally", + "gamifications", "ramifications", + "generalizaing", "generalizing", + "generalizaton", "generalization", + "generationals", "generations", + "generationens", "generations", + "generationers", "generations", + "generationnal", "generational", + "geographicaly", "geographically", + "geographicial", "geographical", + "geometricians", "geometers", + "goreshadowing", "foreshadowing", + "governmential", "governmental", + "gradification", "gratification", + "grammarically", "grammatically", + "grandchildern", "grandchildren", + "gratificacion", "gratification", + "gratificaiton", "gratification", + "grativational", "gravitational", + "gravitacional", "gravitational", + "gravitaitonal", "gravitational", + "hallcuination", "hallucination", + "hallicunation", "hallucination", + "hallucenation", "hallucination", + "halluciantion", "hallucinations", + "hallukination", "hallucination", + "hallunication", "hallucination", + "hallusination", "hallucination", + "halluzination", "hallucination", + "heiroglyphics", "hieroglyphics", + "hellucination", "hallucination", + "highlightning", "highlighting", + "homesexuality", "homosexuality", + "homosexualtiy", "homosexuality", + "homosexulaity", "homosexuality", + "horizontallly", "horizontally", + "hullucination", "hallucination", + "hypocriticial", "hypocritical", + "hypotheticaly", "hypothetically", + "hystericallly", "hysterically", + "identificaton", "identification", + "ideoligically", "ideologically", + "ideosyncratic", "idiosyncratic", + "idiologically", "ideologically", + "illistrations", "illustrations", + "illustartions", "illustrations", + "imperfactions", "imperfections", + "impersinating", "impersonating", + "implementaion", "implementation", + "implementatin", "implementations", + "implimenation", "implementation", + "imprefections", "imperfections", + "impresonating", "impersonating", + "inaccessibile", "inaccessible", + "inadventently", "inadvertently", + "inadverdently", "inadvertently", + "inadvertantly", "inadvertently", + "inadvertendly", "inadvertently", + "inapporpriate", "inappropriate", + "inappropirate", "inappropriate", + "inappropraite", "inappropriate", + "inaproppriate", "inappropriate", + "incarcaration", "incarceration", + "incarciration", "incarceration", + "incarseration", "incarceration", + "incerceration", "incarceration", + "incidentially", "incidentally", + "incomfortable", "uncomfortable", + "incomfortably", "uncomfortably", + "incompatabile", "incompatible", + "incompatiable", "incompatible", + "incompatibile", "incompatible", + "inconciderate", "inconsiderate", + "inconcistency", "inconsistency", + "inconditional", "unconditional", + "inconsciously", "unconsciously", + "inconsiderant", "inconsiderate", + "inconsistance", "inconsistency", + "inconsistancy", "inconsistency", + "inconsistenly", "inconsistency", + "inconsistensy", "inconsistency", + "inconsistenty", "inconsistency", + "inconveinence", "inconvenience", + "inconveniance", "inconvenience", + "inconveniente", "inconvenience", + "inconvienence", "inconvenience", + "incoroporated", "incorporated", + "incorparating", "incorporating", + "incorperating", "incorporating", + "incorperation", "incorporation", + "incorruptable", "incorruptible", + "incramentally", "incrementally", + "incrementarla", "incremental", + "incrementarlo", "incremental", + "indavertently", "inadvertently", + "indefinitelly", "indefinitely", + "independantes", "independents", + "independantly", "independently", + "independendet", "independent", + "independendly", "independently", + "indepentently", "independently", + "indespensable", "indispensable", + "indespensible", "indispensable", + "indestructble", "indestructible", + "indestructibe", "indestructible", + "indictrinated", "indoctrinated", + "indipendently", "independently", + "indispensible", "indispensable", + "indivuduality", "individuality", + "indocrtinated", "indoctrinated", + "indocternated", "indoctrinated", + "indoctornated", "indoctrinated", + "indoctrinatie", "indoctrinated", + "indoctrinatin", "indoctrination", + "indoctronated", "indoctrinated", + "industrialied", "industrialized", + "industrialzed", "industrialized", + "inexeprienced", "inexperience", + "inexpeirenced", "inexperience", + "inexpereinced", "inexperienced", + "inexperianced", "inexperienced", + "inexperiecned", "inexperience", + "inexperineced", "inexperience", + "inexpierenced", "inexperienced", + "inexplicabley", "inexplicably", + "inexplicablly", "inexplicably", + "infilitration", "infiltration", + "infrastructre", "infrastructure", + "infrastrucure", "infrastructure", + "inintelligent", "unintelligent", + "ininteresting", "uninteresting", + "initalisation", "initialisation", + "initalization", "initialization", + "inperfections", "imperfections", + "inpersonating", "impersonating", + "inpossibility", "impossibility", + "inpredictable", "unpredictable", + "inresponsible", "irresponsible", + "insectiverous", "insectivorous", + "insecuritites", "insecurities", + "insiginficant", "insignificant", + "insiginifcant", "insignificant", + "insignificent", "insignificant", + "insignificunt", "insignificant", + "insignifigant", "insignificant", + "insiprational", "inspirational", + "insperational", "inspirational", + "inspiritional", "inspirational", + "inspriational", "inspirational", + "instantaenous", "instantaneous", + "instantanious", "instantaneous", + "instanteneous", "instantaneous", + "instantenious", "instantaneous", + "instincitvely", "instinctively", + "instinctivley", "instinctively", + "instititional", "institutional", + "institutionel", "institutional", + "insturmentals", "instrumental", + "instutitional", "institutional", + "insustainable", "unsustainable", + "intelelctuals", "intellectuals", + "intellectualy", "intellectually", + "intellectuels", "intellectuals", + "intellecutals", "intellectuals", + "intellegently", "intelligently", + "intelluctuals", "intellectuals", + "intepretation", "interpretation", + "intereactions", "intersections", + "interesctions", "intersections", + "interlectuals", "intellectuals", + "intermittient", "intermittent", + "intermittment", "intermittent", + "internacional", "international", + "interpersonel", "interpersonal", + "interpresonal", "interpersonal", + "interpretaion", "interpretation", + "interpretarea", "interpreter", + "interpretarem", "interpreter", + "interpretares", "interpreter", + "interpretarse", "interpreter", + "interpretarte", "interpreter", + "interpretatin", "interpretations", + "interpreteert", "interpreter", + "interragation", "interrogation", + "interregation", "interrogation", + "interrigation", "interrogation", + "interrogacion", "interrogation", + "interrogativo", "interrogation", + "intertainment", "entertainment", + "intillectuals", "intellectuals", + "intraspection", "introspection", + "intrensically", "intrinsically", + "intriniscally", "intrinsically", + "intrinsecally", "intrinsically", + "intrisincally", "intrinsically", + "intristically", "intrinsically", + "introductiory", "introductory", + "introspeccion", "introspection", + "introspectivo", "introspection", + "introspektion", "introspection", + "invertibrates", "invertebrates", + "invesitgation", "investigation", + "invesitgative", "investigative", + "invesitgators", "investigators", + "investagators", "investigators", + "investegating", "investigating", + "investegators", "investigators", + "investiagtion", "investigation", + "investiagtive", "investigative", + "investigacion", "investigation", + "investigaiton", "investigations", + "investigaters", "investigators", + "investigativo", "investigation", + "investigatons", "investigations", + "investigsting", "investigating", + "investigstion", "investigations", + "investogators", "investigators", + "invisibillity", "invisibility", + "involuntarely", "involuntary", + "involuntarity", "involuntary", + "invulnerabile", "invulnerable", + "irrationallly", "irrationally", + "irresponcible", "irresponsible", + "irresponisble", "irresponsible", + "irresponsable", "irresponsible", + "irresponsbile", "irresponsible", + "irreversiable", "irreversible", + "irreversibelt", "irreversible", + "irreversibile", "irreversible", + "irrisponsible", "irresponsible", + "jacksonvillle", "jacksonville", + "journalisitic", "journalistic", + "journalistens", "journalists", + "journalisters", "journalists", + "journalistisk", "journalists", + "jsutification", "justifications", + "jurisdicitons", "jurisdictions", + "jurisidctions", "jurisdictions", + "juristictions", "jurisdictions", + "jursidictions", "jurisdictions", + "jusitfication", "justifications", + "justifiaction", "justifications", + "justificacion", "justification", + "justificaiton", "justification", + "justificativo", "justification", + "justificatons", "justifications", + "justificstion", "justifications", + "justiifcation", "justifications", + "karbohydrates", "carbohydrates", + "knoweldgeable", "knowledgeable", + "knowledegable", "knowledgeable", + "knowledgebale", "knowledgable", + "knowlegdeable", "knowledgeable", + "kollaboration", "collaboration", + "koncentration", "concentration", + "konfiguration", "configuration", + "konfrontation", "confrontation", + "konservatives", "conservatives", + "konstellation", "constellation", + "kontamination", "contamination", + "legitimatelly", "legitimately", + "libertariaism", "libertarianism", + "libertariansm", "libertarianism", + "libitarianisn", "libertarianism", + "lighthearthed", "lighthearted", + "mainfestation", "manifestation", + "manafacturers", "manufacturers", + "manafacturing", "manufacturing", + "manafestation", "manifestation", + "manefestation", "manifestation", + "manfuacturers", "manufactures", + "manifacturers", "manufacturers", + "manifacturing", "manufacturing", + "manifastation", "manifestation", + "manifestacion", "manifestation", + "manifestating", "manifestation", + "manifistation", "manifestation", + "manipulationg", "manipulating", + "manufacterers", "manufacturers", + "manufactering", "manufacturing", + "manufacterurs", "manufactures", + "manufactorers", "manufacturers", + "manufactoring", "manufacturing", + "manufactuered", "manufactured", + "manufactuerer", "manufacturer", + "manufactueres", "manufactures", + "manufacturedd", "manufactured", + "manufactureds", "manufactures", + "manufacturerd", "manufactured", + "manufacturier", "manufacturer", + "manufacturors", "manufacturers", + "manufactuters", "manufactures", + "manufacutrers", "manufactures", + "manufcaturers", "manufactures", + "marshmalllows", "marshmallows", + "massachsuetts", "massachusetts", + "massachucetts", "massachusetts", + "massachuestts", "massachusetts", + "massachusents", "massachusetts", + "massachusites", "massachusetts", + "massachussets", "massachusetts", + "massechusetts", "massachusetts", + "masturbateing", "masturbating", + "materialisimo", "materialism", + "mathamatician", "mathematician", + "mathametician", "mathematician", + "mathematicals", "mathematics", + "mathematicaly", "mathematically", + "mathematicans", "mathematics", + "mathematicion", "mathematician", + "mathematitian", "mathematician", + "mathemetician", "mathematician", + "mathmatically", "mathematically", + "mathmaticians", "mathematicians", + "mechanicallly", "mechanically", + "medeterranean", "mediterranean", + "meditarrenean", "mediterranean", + "meditereanean", "mediterranean", + "membranaphone", "membranophone", + "metamorphysis", "metamorphosis", + "metaphoricaly", "metaphorically", + "metaphoricial", "metaphorical", + "metaphysicals", "metaphysics", + "metaphysicans", "metaphysics", + "methamatician", "mathematician", + "methematician", "mathematician", + "metropolitain", "metropolitan", + "metropolitcan", "metropolitan", + "metropolitian", "metropolitan", + "millionairres", "millionaire", + "minneapolites", "minneapolis", + "misanderstood", "misunderstood", + "miscellanious", "miscellaneous", + "misconcpetion", "misconceptions", + "misconecption", "misconceptions", + "misinterperet", "misinterpret", + "misinterprate", "misinterpret", + "misinterprent", "misinterpret", + "misinterprted", "misinterpret", + "misogynisitic", "misogynistic", + "misrepreseted", "misrepresented", + "misunterstood", "misunderstood", + "modificaitons", "modifications", + "motivationals", "motivations", + "motivationnal", "motivational", + "mulitnational", "multinational", + "multimational", "multinational", + "multiplicaton", "multiplication", + "muncipalities", "municipalities", + "munnicipality", "municipality", + "mutlinational", "multinational", + "nacionalistic", "nationalistic", + "narcissisitic", "narcissistic", + "narcississtic", "narcissistic", + "natioanlistic", "nationalistic", + "nationalisitc", "nationalistic", + "nationalistes", "nationalists", + "nationalsitic", "nationalistic", + "neigbhourhood", "neighbourhood", + "neighboorhoud", "neighbourhood", + "neighborehood", "neighbourhood", + "neighborhoood", "neighborhoods", + "neighbourbood", "neighbourhood", + "neighbourgood", "neighbourhood", + "neighbourhoud", "neighbourhood", + "neighourhoods", "neighborhoods", + "nieghborhoods", "neighborhoods", + "nieghbourhood", "neighbourhood", + "noncombatents", "noncombatants", + "noninitalized", "noninitialized", + "northwestener", "northwestern", + "notificaitons", "notifications", + "occassionally", "occasionally", + "operationable", "operational", + "oppertunities", "opportunities", + "opprotunities", "opportunities", + "oppurtunities", "opportunities", + "opthamologist", "ophthalmologist", + "organistaions", "organisations", + "organizatinal", "organizational", + "organizativos", "organizations", + "organsiations", "organisations", + "organziations", "organizations", + "orginasations", "organisations", + "orginazations", "organizations", + "overpopulaton", "overpopulation", + "overreactiong", "overreacting", + "overshaddowed", "overshadowed", + "overwheliming", "overwhelming", + "overwhelmigly", "overwhelmingly", + "overwhelmingy", "overwhelmingly", + "overwhelminly", "overwhelmingly", + "palestininans", "palestinians", + "paraphraseing", "paraphrasing", + "paraphrashing", "paraphrasing", + "parilamentary", "parliamentary", + "parlaimentary", "parliamentary", + "parliamantary", "parliamentary", + "parliamentery", "parliamentary", + "parliamnetary", "parliamentary", + "parliementary", "parliamentary", + "particiaption", "participation", + "participacion", "participation", + "participantes", "participants", + "participativo", "participation", + "particularely", "particularly", + "particularily", "particularly", + "particularlly", "particularly", + "partizipation", "participation", + "passionatelly", "passionately", + "paychiatrists", "psychiatrists", + "paychologists", "psychologists", + "pennsylvainia", "pennsylvania", + "pennsylvanica", "pennsylvania", + "pennsylvannia", "pennsylvania", + "perdominantly", "predominantly", + "perpandicular", "perpendicular", + "perpendicualr", "perpendicular", + "perpenticular", "perpendicular", + "perpetuationg", "perpetuating", + "perpindicular", "perpendicular", + "personalitits", "personalities", + "pessimistisch", "pessimistic", + "pharmaceutial", "pharmaceutical", + "philisophical", "philosophical", + "philosiphical", "philosophical", + "philosohpical", "philosophical", + "philosophycal", "philosophically", + "philospohical", "philosophical", + "phisiological", "physiological", + "photagraphers", "photographers", + "photographics", "photographs", + "photographied", "photographed", + "photographier", "photographer", + "photograpphed", "photographed", + "photogrophers", "photographers", + "photogrpahers", "photographers", + "photoshoppade", "photoshopped", + "photoshoppped", "photoshopped", + "phsyiological", "physiological", + "phychiatrists", "psychiatrists", + "phychological", "psychological", + "phychologists", "psychologists", + "phylosophical", "philosophical", + "physciatrists", "psychiatrists", + "physcological", "psychological", + "physcologists", "psychologists", + "physioligical", "physiological", + "planeswlakers", "planeswalker", + "plansewalkers", "planeswalker", + "playthroughts", "playthroughs", + "polysaccaride", "polysaccharide", + "practicallity", "practically", + "practicioners", "practitioners", + "practisioners", "practitioners", + "practitioneer", "practitioners", + "practitionner", "practitioner", + "pratictioners", "practitioners", + "preconceieved", "preconceived", + "predecessores", "predecessors", + "predetermiend", "predetermined", + "predetirmined", "predetermined", + "preditermined", "predetermined", + "predomenantly", "predominantly", + "predominently", "predominantly", + "pregressively", "progressively", + "preinitalized", "preinitialized", + "preinitalizes", "preinitializes", + "preliferation", "proliferation", + "prependicular", "perpendicular", + "preposterious", "preposterous", + "prerequisties", "prerequisite", + "prerequistite", "prerequisite", + "prescribtions", "prescriptions", + "presumptuious", "presumptuous", + "pretedermined", "predetermined", + "problematisch", "problematic", + "proclaimation", "proclamation", + "prodominantly", "predominantly", + "professionnal", "professional", + "profitiablity", "profitability", + "profitibality", "profitability", + "progressivily", "progressively", + "progressivley", "progressively", + "prononciation", "pronunciation", + "pronouciation", "pronunciation", + "pronunciacion", "pronunciation", + "pronunciating", "pronunciation", + "pronuncuation", "pronunciation", + "pronunication", "pronunciation", + "pronuntiation", "pronunciation", + "propabilities", "probabilities", + "proportionaly", "proportionally", + "proportionnal", "proportional", + "proseletyzing", "proselytizing", + "protagonistas", "protagonists", + "protagonistes", "protagonists", + "protestantisk", "protestants", + "protruberance", "protuberance", + "provocativley", "provocative", + "pscyhiatrists", "psychiatrists", + "pscyhological", "psychological", + "pscyhologists", "psychologists", + "pshycological", "psychological", + "pshycologists", "psychologists", + "psichological", "psychological", + "psychaitrists", "psychiatrists", + "psychedellics", "psychedelics", + "psychiatrisch", "psychiatric", + "psycholigical", "psychological", + "psycholigists", "psychologists", + "psychologycal", "psychologically", + "psychologysts", "psychologists", + "psychyatrists", "psychiatrists", + "psysiological", "physiological", + "purpendicular", "perpendicular", + "pyschiatrists", "psychiatrists", + "pyschological", "psychological", + "pyschologists", "psychologists", + "qaulification", "qualification", + "qualifiaction", "qualification", + "qualificaiton", "qualifications", + "qualificatons", "qualifications", + "qualifikation", "qualification", + "questionalble", "questionable", + "quinessential", "quintessential", + "ramificaitons", "ramifications", + "realisitcally", "realistically", + "realtionships", "relationships", + "reccommending", "recommending", + "receptionnist", "receptionist", + "receptionsist", "receptionist", + "reconaissance", "reconnaissance", + "reconcilation", "reconciliation", + "reconnaisance", "reconnaissance", + "reconstrucion", "reconstruction", + "recreationnal", "recreational", + "rectangulaire", "rectangular", + "redistribuito", "redistribution", + "redistributin", "redistribution", + "reencarnation", "reincarnation", + "refridgerator", "refrigerator", + "rehabilitaion", "rehabilitation", + "rehabilitatin", "rehabilitation", + "rehabilitaton", "rehabilitation", + "reincarantion", "reincarnation", + "reincatnation", "reincarnation", + "reinforcemens", "reinforcements", + "reinforcemnts", "reinforcements", + "reinitalising", "reinitialising", + "reinitalizing", "reinitializing", + "reinkarnation", "reincarnation", + "reinstallling", "reinstalling", + "reintarnation", "reincarnation", + "relationshits", "relationships", + "relationsship", "relationships", + "relatiopnship", "relationship", + "relentlessely", "relentlessly", + "relentlessley", "relentlessly", + "relinqushment", "relinquishment", + "remifications", "ramifications", + "reprehenisble", "reprehensible", + "reprehensable", "reprehensible", + "reprehinsible", "reprehensible", + "represenation", "representation", + "represensible", "reprehensible", + "representaion", "representation", + "representatie", "representatives", + "representatin", "representations", + "representerad", "represented", + "representitve", "representative", + "representives", "representatives", + "repricussions", "repercussions", + "reprihensible", "reprehensible", + "resolutionary", "revolutionary", + "respectivelly", "respectively", + "responsibiliy", "responsibility", + "responsibilty", "responsibility", + "responsiblity", "responsibility", + "respositories", "repositories", + "resssurecting", "resurrecting", + "ressurrection", "resurrection", + "restaraunteur", "restaurateur", + "retoractively", "retroactively", + "retroactivily", "retroactively", + "retroactivley", "retroactively", + "retrocatively", "retroactively", + "revelutionary", "revolutionary", + "revolutionair", "revolutionary", + "revolutionens", "revolutions", + "revolutioners", "revolutions", + "revoultionary", "revolutionary", + "ridiculouness", "ridiculousness", + "rienforcement", "reinforcements", + "righetousness", "righteousness", + "rightiousness", "righteousness", + "rigtheousness", "righteousness", + "rollarcoaster", "rollercoaster", + "rollercaoster", "rollercoaster", + "rollercoaters", "rollercoaster", + "rollercoatser", "rollercoaster", + "rollerocaster", "rollercoaster", + "rollertoaster", "rollercoaster", + "rollorcoaster", "rollercoaster", + "sacrastically", "sarcastically", + "sarcasitcally", "sarcastically", + "satisfactorly", "satisfactory", + "scandianvians", "scandinavian", + "scateboarding", "skateboarding", + "schisophrenic", "schizophrenic", + "schiziphrenic", "schizophrenic", + "schizophernia", "schizophrenia", + "schizophernic", "schizophrenic", + "schizophrania", "schizophrenia", + "schizoprhenia", "schizophrenia", + "schizoprhenic", "schizophrenic", + "schozophrenia", "schizophrenia", + "schozophrenic", "schizophrenic", + "schyzophrenia", "schizophrenia", + "schyzophrenic", "schizophrenic", + "schziophrenia", "schizophrenia", + "schziophrenic", "schizophrenic", + "scientificaly", "scientifically", + "scientificlly", "scientifically", + "segementation", "segmentation", + "sensationable", "sensational", + "sensationails", "sensationalism", + "sensationaism", "sensationalism", + "sensationalim", "sensationalism", + "sensationella", "sensational", + "shcizophrenic", "schizophrenic", + "significanlty", "significantly", + "significently", "significantly", + "signifigantly", "significantly", + "simultaneosly", "simultaneously", + "simultaneuous", "simultaneous", + "simultanously", "simultaneously", + "singificantly", "significantly", + "skatebaording", "skateboarding", + "skateborading", "skateboarding", + "socioecenomic", "socioeconomic", + "socioecomonic", "socioeconomic", + "socioeconimic", "socioeconomic", + "sohpisticated", "sophisticated", + "sophisitcated", "sophisticated", + "sophistacated", "sophisticated", + "sophistocated", "sophisticated", + "sophosticated", "sophisticated", + "spacification", "specification", + "specializaton", "specialization", + "specificaiton", "specifications", + "specificatons", "specifications", + "specifikation", "specification", + "spectaculaire", "spectacular", + "spectaculalry", "spectacularly", + "spectatularly", "spectacularly", + "spesification", "specification", + "spirituallity", "spiritually", + "sponatenously", "spontaneously", + "spontaenously", "spontaneously", + "spontainously", "spontaneously", + "spontaneoulsy", "spontaneously", + "spontaneuosly", "spontaneously", + "spontaniously", "spontaneously", + "spontanuously", "spontaneously", + "sponteanously", "spontaneously", + "sponteneously", "spontaneously", + "sporstmanship", "sportsmanship", + "sportmansship", "sportsmanship", + "sportsmamship", "sportsmanship", + "sportsmenship", "sportsmanship", + "sprotsmanship", "sportsmanship", + "stakeboarding", "skateboarding", + "startegically", "strategically", + "statisitcally", "statistically", + "statistacally", "statistically", + "stereotipical", "stereotypical", + "stereotpyical", "stereotypical", + "stereotypcial", "stereotypical", + "stereotypeing", "stereotyping", + "stereotypying", "stereotyping", + "steriotypical", "stereotypical", + "steroetypical", "stereotypical", + "steryotypical", "stereotypical", + "storytellling", "storytelling", + "stragegically", "strategically", + "stragetically", "strategically", + "straightenend", "straightened", + "straitforward", "straightforward", + "stratagically", "strategically", + "stratigically", "strategically", + "strawberrries", "strawberries", + "stregnthening", "strengthening", + "strenghtening", "strengthening", + "strengthining", "strengthening", + "stretegically", "strategically", + "subcatagories", "subcategories", + "subconsciosly", "subconsciously", + "subconsciouly", "subconsciously", + "subconsiously", "subconsciously", + "subjectivelly", "subjectively", + "subscribtions", "subscriptions", + "substancially", "substantially", + "substanitally", "substantially", + "substansially", "substantially", + "substantiable", "substantial", + "substantually", "substantially", + "substitutents", "substitutes", + "successfullly", "successfully", + "supermarkedet", "supermarket", + "supermarkerts", "supermarkets", + "superpowereds", "superpowers", + "supersticious", "superstitious", + "superstisious", "superstitious", + "superstitiosi", "superstitious", + "superstitiuos", "superstitious", + "superstituous", "superstitious", + "suphisticated", "sophisticated", + "supscriptions", "subscriptions", + "surreptiously", "surreptitiously", + "survavibility", "survivability", + "survibability", "survivability", + "survivabiltiy", "survivability", + "survivalibity", "survivability", + "survivavility", "survivability", + "survivebility", "survivability", + "susbtantially", "substantially", + "sustainabilty", "sustainability", + "synchornously", "synchronously", + "systematicaly", "systematically", + "systematiclly", "systematically", + "techmological", "technological", + "technicallity", "technically", + "technoligical", "technological", + "technologicly", "technological", + "techonlogical", "technological", + "telaportation", "teleportation", + "teleportating", "teleportation", + "teleprotation", "teleportation", + "teliportation", "teleportation", + "teloportation", "teleportation", + "territoriella", "territorial", + "theoratically", "theoretically", + "theoritically", "theoretically", + "therapeutisch", "therapeutic", + "thereotically", "theoretically", + "thermodynaics", "thermodynamics", + "thermodynamcs", "thermodynamics", + "theroetically", "theoretically", + "thoeretically", "theoretically", + "tranistioning", "transitioning", + "transcendance", "transcendence", + "transcribtion", "transcription", + "transcripcion", "transcription", + "transferrring", "transferring", + "transformarea", "transformer", + "transformarem", "transformer", + "transformarse", "transformers", + "transformaton", "transformation", + "transformered", "transformed", + "transgengered", "transgendered", + "transisioning", "transitioning", + "transitionals", "transitions", + "transitionnal", "transitional", + "transitionned", "transitioned", + "transkription", "transcription", + "translyvanian", "transylvania", + "transmisisons", "transmissions", + "transmissable", "transmissible", + "transmisssion", "transmissions", + "transparantie", "transparent", + "transparentcy", "transparency", + "transplantees", "transplants", + "transporation", "transportation", + "transportaion", "transportation", + "transportarme", "transporter", + "transportarse", "transporter", + "transportarte", "transporter", + "transporteurs", "transporter", + "transsexuella", "transsexual", + "transylvannia", "transylvania", + "trasngendered", "transgendered", + "troubleshooot", "troubleshoot", + "udnerestimate", "underestimated", + "umcomfortable", "uncomfortable", + "umcomfortably", "uncomfortably", + "umpredictable", "unpredictable", + "unappropriate", "inappropriate", + "unbelievabley", "unbelievably", + "unbelievablly", "unbelievably", + "uncertaintity", "uncertainty", + "uncomfertable", "uncomfortable", + "uncomfertably", "uncomfortably", + "uncomfortabel", "uncomfortably", + "uncomforyable", "uncomfortably", + "uncomfrotable", "uncomfortable", + "uncomfrotably", "uncomfortably", + "uncomftorable", "uncomfortable", + "uncomftorably", "uncomfortably", + "unconcsiously", "unconsciously", + "unconfortable", "uncomfortable", + "unconfortably", "uncomfortably", + "unconscioulsy", "unconsciously", + "unconsicously", "unconsciously", + "unconsiderate", "inconsiderate", + "uncontrollabe", "uncontrollable", + "uncontrollaby", "uncontrollably", + "unconventinal", "unconventional", + "uncounciously", "unconsciously", + "uncousciously", "unconsciously", + "underastimate", "underestimate", + "underesitmate", "underestimated", + "underestamate", "underestimate", + "underestemate", "underestimate", + "underestiamte", "underestimated", + "undergratuate", "undergraduate", + "underhwelming", "underwhelming", + "underhwleming", "underwhelming", + "underminining", "undermining", + "underpowererd", "underpowered", + "undersetimate", "underestimate", + "understandble", "understandable", + "understandbly", "understandably", + "underwealming", "underwhelming", + "underwhemling", "underwhelming", + "underwhleming", "underwhelming", + "undoctrinated", "indoctrinated", + "unexpectadely", "unexpectedly", + "unfomfortable", "uncomfortable", + "unforgiveable", "unforgivable", + "unfortuantely", "unfortunately", + "unfortunantly", "unfortunately", + "unfortunatley", "unfortunately", + "unfortuneatly", "unfortunately", + "unfortunetely", "unfortunately", + "unilaterallly", "unilaterally", + "uninstallling", "uninstalling", + "unintellegent", "unintelligent", + "unintelligant", "unintelligent", + "unintensional", "unintentional", + "uninteristing", "uninteresting", + "universitites", "universities", + "unnecassarily", "unnecessarily", + "unneccesarily", "unnecessarily", + "unnecessairly", "unnecessarily", + "unnecessarely", "unnecessarily", + "unnecessarity", "unnecessarily", + "unnecesserily", "unnecessarily", + "unnecissarily", "unnecessarily", + "unnessecarily", "unnecessarily", + "unoperational", "nonoperational", + "unprecendeted", "unprecedented", + "unprecidented", "unprecedented", + "unpredecented", "unprecedented", + "unpredicatble", "unpredictable", + "unpredictible", "unpredictable", + "unpresedented", "unprecedented", + "unpridictable", "unpredictable", + "unprofessinal", "unprofessional", + "unrealistisch", "unrealistic", + "unreasonabley", "unreasonably", + "unreasonablly", "unreasonably", + "unrestrictred", "unrestricted", + "unsistainable", "unsustainable", + "unsubscribade", "unsubscribed", + "unsubscribbed", "unsubscribe", + "unsuccesfully", "unsuccessfully", + "unsuccessfull", "unsuccessful", + "unsucessfully", "unsuccessfully", + "unsuprisingly", "unsurprisingly", + "unsuprizingly", "unsurprisingly", + "unsustainible", "unsustainable", + "unsustianable", "unsustainable", + "vertification", "certification", + "villification", "vilification", + "virualization", "visualization", + "visualizacion", "visualization", + "visualizaiton", "visualization", + "visualizating", "visualization", + "vitualization", "visualization", + "vizualization", "visualization", + "volounteering", "volunteering", + "vulberability", "vulnerability", + "vulernability", "vulnerability", + "vulnarability", "vulnerability", + "vulnerabiltiy", "vulnerability", + "vulnurability", "vulnerability", + "vunlerability", "vulnerability", + "vurnerability", "vulnerability", + "weightlfiting", "weightlifting", + "weightlifitng", "weightlifting", + "weightligting", "weightlifting", + "weigthlifting", "weightlifting", + "wholeheartdly", "wholeheartedly", + "wholeheartedy", "wholeheartedly", + "wholeheartely", "wholeheartedly", + "wieghtlifting", "weightlifting", + "abberivation", "abbreviation", + "abberviation", "abbreviation", + "abbreivation", "abbreviation", + "abbreveation", "abbreviation", + "abbrievation", "abbreviation", + "abortificant", "abortifacient", + "abrreviation", "abbreviation", + "academcially", "academically", + "accedentally", "accidentally", + "accelarating", "accelerating", + "accelaration", "acceleration", + "acceleartion", "acceleration", + "acceleraptor", "accelerator", + "accelorating", "accelerating", + "accessibilty", "accessibility", + "accidentlaly", "accidently", + "accomadating", "accommodating", + "accomadation", "accommodation", + "accomodating", "accommodating", + "accomodation", "accommodation", + "accrediation", "accreditation", + "acculumation", "accumulation", + "accumalation", "accumulation", + "accumilation", "accumulation", + "acedemically", "academically", + "acheivements", "achievements", + "acknolwedged", "acknowledged", + "acknolwedges", "acknowledges", + "acknoweldged", "acknowledged", + "acknoweldges", "acknowledges", + "acknowiedged", "acknowledged", + "acknowladges", "acknowledges", + "acknowldeged", "acknowledged", + "acknowledget", "acknowledgement", + "acknowleding", "acknowledging", + "acknowlegded", "acknowledged", + "acknowlegdes", "acknowledges", + "ackumulation", "accumulation", + "acquaintaces", "acquaintances", + "acquaintence", "acquaintance", + "acquantaince", "acquaintance", + "acquantiance", "acquaintances", + "acquiantance", "acquaintances", + "acquiantence", "acquaintance", + "adknowledged", "acknowledged", + "adknowledges", "acknowledges", + "administored", "administer", + "adminsitered", "administered", + "adminstrator", "administrator", + "advantagious", "advantageous", + "advantegeous", "advantageous", + "adventageous", "advantageous", + "adventureous", "adventures", + "adventureres", "adventures", + "adventurious", "adventurous", + "adventuruous", "adventurous", + "advertisiers", "advertisers", + "advertisment", "advertisement", + "advertisters", "advertisers", + "advertisting", "advertising", + "aestheticaly", "aesthetically", + "aestheticlly", "aesthetically", + "afficianados", "aficionados", + "afficionados", "aficionados", + "afghanisthan", "afghanistan", + "afterhtought", "afterthought", + "afterthougth", "afterthought", + "aggressivley", "aggressively", + "agircultural", "agricultural", + "agknowledged", "acknowledged", + "agnosticisim", "agnosticism", + "agracultural", "agricultural", + "agriculteral", "agricultural", + "agriculteurs", "agriculture", + "agricultrual", "agricultural", + "agriculutral", "agricultural", + "agrigultural", "agricultural", + "agrocultural", "agricultural", + "allegiancies", "allegiance", + "alterantives", "alternatives", + "alternatevly", "alternately", + "alternatiely", "alternately", + "alternatieve", "alternative", + "alternativly", "alternatively", + "alternativos", "alternatives", + "alternatvely", "alternately", + "alternitives", "alternatives", + "altruistisch", "altruistic", + "amendmenters", "amendments", + "amohetamines", "amphetamines", + "ampehtamines", "amphetamines", + "ampethamines", "amphetamines", + "amphatamines", "amphetamines", + "amphedamines", "amphetamines", + "amphetamenes", "amphetamines", + "amphetemines", "amphetamines", + "amphetimines", "amphetamines", + "amphetmaines", "amphetamines", + "anecdotallly", "anecdotally", + "annhiliation", "annihilation", + "annihalition", "annihilation", + "annihilatron", "annihilation", + "annihliation", "annihilation", + "annilihation", "annihilation", + "anniversairy", "anniversary", + "anniversarry", "anniversary", + "anniversiary", "anniversary", + "annoucenment", "announcements", + "annoucnement", "announcement", + "announcemnet", "announcements", + "announcemnts", "announcements", + "anphetamines", "amphetamines", + "ansalisation", "nasalisation", + "ansalization", "nasalization", + "antaganistic", "antagonistic", + "antagonisitc", "antagonistic", + "antagonostic", "antagonist", + "antibioticos", "antibiotics", + "anticiaption", "anticipation", + "anticipacion", "anticipation", + "antisipation", "anticipation", + "antogonistic", "antagonistic", + "antrhopology", "anthropology", + "antrophology", "anthropology", + "apllications", "applications", + "apocalypitic", "apocalyptic", + "apologistics", "apologists", + "apologizeing", "apologizing", + "apostrophied", "apostrophe", + "apostrophies", "apostrophe", + "apperciation", "appreciation", + "applicaitons", "applications", + "appoitnments", "appointments", + "apporachable", "approachable", + "appraochable", "approachable", + "appreceating", "appreciating", + "appreciaters", "appreciates", + "appreciatied", "appreciative", + "appreicating", "appreciating", + "appreication", "appreciation", + "appretiation", "appreciation", + "appropriatin", "appropriation", + "appropriatly", "appropriately", + "appropriaton", "appropriation", + "approprietly", "appropriately", + "approstraphe", "apostrophe", + "approxiately", "approximately", + "approximatly", "approximately", + "approximetly", "approximately", + "aproximately", "approximately", + "aqcuaintance", "acquaintance", + "aqquaintance", "acquaintance", + "arbitrariliy", "arbitrarily", + "arbitrarilly", "arbitrarily", + "archetecture", "architecture", + "architechure", "architecture", + "architectual", "architectural", + "architectuur", "architecture", + "architecutre", "architecture", + "architexture", "architecture", + "arcitechture", "architecture", + "areodynamics", "aerodynamics", + "argicultural", "agricultural", + "argumentatie", "argumentative", + "arithmetisch", "arithmetic", + "armageddomon", "armageddon", + "arrengements", "arrangements", + "articifially", "artificially", + "artificailly", "artificially", + "artificiella", "artificial", + "artificually", "artificially", + "artifiically", "artificially", + "assasination", "assassination", + "assassinatin", "assassination", + "assissinated", "assassinated", + "associationg", "associating", + "assoications", "associations", + "assosiations", "associations", + "assosication", "assassination", + "assotiations", "associations", + "assymetrical", "asymmetrical", + "asthetically", "aesthetically", + "astranomical", "astronomical", + "astromonical", "astronomical", + "astronautlis", "astronauts", + "astronimical", "astronomical", + "astronomicly", "astronomical", + "athleticisim", "athleticism", + "atmosphereic", "atmospheric", + "audiobookmrs", "audiobooks", + "auhtenticate", "authenticate", + "australianas", "australians", + "australianos", "australians", + "authentisity", "authenticity", + "authorithies", "authorities", + "authoritiers", "authorities", + "authorizaton", "authorization", + "authrorities", "authorities", + "autochtonous", "autochthonous", + "autocorrrect", "autocorrect", + "automobilies", "automobile", + "automodertor", "automoderator", + "automonomous", "autonomous", + "auxilliaries", "auxiliaries", + "avaliability", "availability", + "avialability", "availability", + "awknowledged", "acknowledged", + "awknowledges", "acknowledges", + "awkwardsness", "awkwardness", + "babysittting", "babysitting", + "beaurocratic", "bureaucratic", + "beautifullly", "beautifully", + "belligerante", "belligerent", + "beuraucratic", "bureaucratic", + "billionairre", "billionaire", + "billionaries", "billionaires", + "billioniares", "billionaires", + "bioligically", "biologically", + "birmingharam", "birmingham", + "bittersweeet", "bittersweet", + "blamethrower", "flamethrower", + "blueberrries", "blueberries", + "blueprintcss", "blueprints", + "boardcasting", "broadcasting", + "bobybuilding", "bodybuilding", + "bodybuidling", "bodybuilding", + "bodybuilidng", "bodybuilding", + "bodybuliding", "bodybuilding", + "bodydbuilder", "bodybuilder", + "bombardement", "bombardment", + "boradcasting", "broadcasting", + "botivational", "motivational", + "brainwahsing", "brainwashing", + "brakethrough", "breakthrough", + "braodcasting", "broadcasting", + "brazilianese", "brazilians", + "brazilianess", "brazilians", + "breakthorugh", "breakthrough", + "breaktrhough", "breakthrough", + "breastfeedig", "breastfeeding", + "breastfeeing", "breastfeeding", + "breasttaking", "breathtaking", + "brianwashing", "brainwashing", + "broadcastors", "broadcasts", + "brotherhoood", "brotherhood", + "buearucratic", "bureaucratic", + "bueraucratic", "bureaucratic", + "bulletprooof", "bulletproof", + "bureaocratic", "bureaucratic", + "bureaucracie", "bureaucratic", + "bureaucracts", "bureaucrats", + "bureaucrates", "bureaucrats", + "bureuacratic", "bureaucratic", + "businessemen", "businessmen", + "cababilities", "capabilities", + "caclulations", "calculations", + "calcluations", "calculation", + "calcualtions", "calculations", + "calculationg", "calculating", + "calculatoare", "calculator", + "californains", "californian", + "californican", "californian", + "californinan", "californian", + "caluclations", "calculations", + "camouflagued", "camouflage", + "canceltation", "cancellation", + "cannibalisim", "cannibalism", + "canniballism", "cannibalism", + "cannotations", "connotations", + "capitalistes", "capitalists", + "caracterized", "characterized", + "carbohydrats", "carbohydrates", + "carbohyrdate", "carbohydrates", + "caricaturale", "caricature", + "caricaturile", "caricature", + "caricaturise", "caricature", + "caricaturize", "caricature", + "catastraphic", "catastrophic", + "catastrohpic", "catastrophic", + "catastrophie", "catastrophe", + "categoricaly", "categorically", + "categoriezed", "categorized", + "catergorized", "categorized", + "caterpillers", "caterpillars", + "catestrophic", "catastrophic", + "catholicisim", "catholicism", + "catholocisim", "catholicism", + "catistrophic", "catastrophic", + "catostraphic", "catastrophic", + "catostrophic", "catastrophic", + "catterpilars", "caterpillars", + "catterpillar", "caterpillar", + "celebratings", "celebrations", + "celebritites", "celebrities", + "celibrations", "celebrations", + "cententenial", "centennial", + "cercumstance", "circumstance", + "cerification", "verification", + "certificiate", "certificate", + "challengeing", "challenging", + "chamiponship", "championships", + "champinoship", "championships", + "championchip", "championship", + "championsihp", "championships", + "championsips", "championships", + "champiosnhip", "championships", + "champoinship", "championship", + "chanpionship", "championship", + "charactarize", "characterize", + "charaterized", "characterized", + "charismastic", "charismatic", + "cheerlearder", "cheerleader", + "cheerleeders", "cheerleaders", + "cheeseberger", "cheeseburger", + "cheeseborger", "cheeseburger", + "cheesebruger", "cheeseburgers", + "cheeseburges", "cheeseburgers", + "cheeseburgie", "cheeseburger", + "cheezeburger", "cheeseburger", + "chirstianity", "christianity", + "chocolateers", "chocolates", + "chrisitanity", "christianity", + "christainity", "christianity", + "christiantiy", "christianity", + "christinaity", "christianity", + "chromosomers", "chromosomes", + "chronologial", "chronological", + "chrsitianity", "christianity", + "cilivization", "civilizations", + "circulatiing", "circulating", + "circulationg", "circulating", + "circumcisied", "circumcised", + "circumcition", "circumcision", + "circumsicion", "circumcision", + "circumsision", "circumcision", + "circumsition", "circumcision", + "circumsizion", "circumcision", + "circumstanes", "circumstance", + "circumstanta", "circumstantial", + "circumstante", "circumstance", + "circuncision", "circumcision", + "circunstance", "circumstance", + "civiliaztion", "civilizations", + "civilizacion", "civilization", + "civilizaiton", "civilization", + "civilizatoin", "civilizations", + "civilizatons", "civilizations", + "civilziation", "civilizations", + "civizilation", "civilizations", + "claculations", "calculations", + "classificato", "classification", + "cockroachers", "cockroaches", + "coefficienct", "coefficient", + "coencidental", "coincidental", + "coincedental", "coincidental", + "coincidencal", "coincidental", + "coincidentia", "coincidental", + "coindidental", "coincidental", + "coinsidental", "coincidental", + "cointerpoint", "counterpoint", + "collaberator", "collaborate", + "collaboratie", "collaborate", + "collaboratin", "collaboration", + "collectivily", "collectively", + "collectivley", "collectively", + "colonialisim", "colonialism", + "colonizacion", "colonization", + "colonizators", "colonizers", + "colonozation", "colonization", + "combanations", "combinations", + "combonations", "combinations", + "comdemnation", "condemnation", + "comemmorates", "commemorates", + "comemoretion", "commemoration", + "comeptitions", "competitions", + "comfirmation", "confirmation", + "comfortabley", "comfortably", + "comfortablly", "comfortably", + "comissioning", "commissioning", + "commandemnts", "commandment", + "commandmants", "commandments", + "commandmends", "commandments", + "commemmorate", "commemorate", + "commendments", "commandments", + "commenteries", "commenters", + "commenwealth", "commonwealth", + "commerciales", "commercials", + "commerically", "commercially", + "comminicated", "communicated", + "commishioned", "commissioned", + "commishioner", "commissioner", + "commisioning", "commissioning", + "commissionar", "commissioner", + "commissionor", "commissioner", + "committments", "commitments", + "commoditites", "commodities", + "commomwealth", "commonwealth", + "commonhealth", "commonwealth", + "commonweatlh", "commonwealth", + "commonwelath", "commonwealth", + "communciated", "communicated", + "communiation", "communication", + "communicatie", "communicate", + "communicatin", "communications", + "communicaton", "communication", + "communitites", "communities", + "compansating", "compensating", + "compansation", "compensation", + "comparativly", "comparatively", + "comparisions", "comparisons", + "comparission", "comparisons", + "comparissons", "comparisons", + "compatablity", "compatibility", + "compatibiliy", "compatibility", + "compatibilty", "compatibility", + "compatiblity", "compatibility", + "compensacion", "compensation", + "compensative", "compensate", + "compesitions", "compositions", + "competetions", "competitions", + "competitevly", "competitively", + "competitiion", "competition", + "competitiors", "competitors", + "competitivly", "competitively", + "competitivos", "competitions", + "compinsating", "compensating", + "compinsation", "compensation", + "complainging", "complaining", + "completetion", "completion", + "compliations", "compilation", + "complicacion", "complication", + "complicatied", "complicate", + "complicaties", "complicate", + "complicatred", "complicate", + "complicatted", "complicate", + "complilation", "complication", + "complimation", "complication", + "complimenary", "complimentary", + "complimentje", "complimented", + "complimentry", "complimentary", + "complination", "complication", + "complitation", "complication", + "composistion", "compositions", + "compramising", "compromising", + "compremising", "compromising", + "compresssion", "compression", + "compromissen", "compromise", + "compromisses", "compromises", + "compromizing", "compromising", + "compromosing", "compromising", + "comptability", "compatibility", + "compulsivley", "compulsive", + "compulsorary", "compulsory", + "computarized", "computerized", + "comrpomising", "compromising", + "comtaminated", "contaminated", + "comtemporary", "contemporary", + "conbinations", "combinations", + "concatinated", "contaminated", + "conceivabley", "conceivably", + "concellation", "cancellation", + "concentraded", "concentrated", + "concentraing", "concentrating", + "concentraion", "concentration", + "concentrarte", "concentrate", + "concentratie", "concentrate", + "concentratin", "concentration", + "concequences", "consequences", + "concequently", "consequently", + "concersation", "conservation", + "concervation", "conservation", + "concervatism", "conservatism", + "concervative", "conservative", + "conciderable", "considerable", + "conciderably", "considerably", + "conciousness", "consciousness", + "conclusiones", "conclusions", + "conclusivley", "conclusive", + "condamnation", "condemnation", + "condemantion", "condemnation", + "condenmation", "condemnation", + "condescening", "condescending", + "condescenion", "condescension", + "conditionnal", "conditional", + "conditionned", "conditioned", + "conditionner", "conditioner", + "condmenation", "condemnation", + "condolencies", "condolences", + "condolensces", "condolences", + "condomnation", "condemnation", + "condradicted", "contradicted", + "conenctivity", "connectivity", + "confedential", "confidential", + "confederancy", "confederacy", + "confederatie", "confederate", + "confermation", "confirmation", + "confersation", "conservation", + "confessionis", "confessions", + "confidencial", "confidential", + "confidentail", "confidential", + "confidentaly", "confidently", + "confidentely", "confidently", + "confidentiel", "confidential", + "configuratin", "configurations", + "configuraton", "configuration", + "confirmacion", "confirmation", + "confrimation", "confirmation", + "confrontaion", "confrontation", + "congegration", "congregation", + "congergation", "congregation", + "congradulate", "congratulate", + "congragation", "congregation", + "congragulate", "congratulate", + "congratualte", "congratulate", + "congregacion", "congregation", + "congresional", "congressional", + "congresssman", "congressman", + "congresssmen", "congressmen", + "congretation", "congregation", + "congrigation", "congregation", + "conicidental", "coincidental", + "connatations", "connotations", + "connecticuit", "connecticut", + "connectivety", "connectivity", + "connetations", "connotations", + "connitations", "connotations", + "connonations", "connotations", + "conolization", "colonization", + "conpensating", "compensating", + "conpensation", "compensation", + "conpetitions", "competitions", + "conplimented", "complimented", + "conpromising", "compromising", + "consciouness", "consciousness", + "consciouslly", "consciously", + "consectutive", "consecutive", + "consecuences", "consequences", + "consecuentes", "consequences", + "consecuently", "consequently", + "consensuarlo", "consensual", + "consentrated", "concentrated", + "consentrates", "concentrates", + "conseqeunces", "consequence", + "consequenses", "consequences", + "consequental", "consequently", + "consequneces", "consequence", + "conservacion", "conservation", + "conservaties", "conservatives", + "conservativo", "conservation", + "conservativs", "conservatism", + "conservitave", "conservatives", + "conservitism", "conservatism", + "conservitive", "conservative", + "considerarle", "considerable", + "considerarte", "considerate", + "consideraste", "considerate", + "consideratie", "considerate", + "consideratin", "considerations", + "consideribly", "considerably", + "consilidated", "consolidated", + "consipracies", "conspiracies", + "consiquently", "consequently", + "consistantly", "consistently", + "consistencey", "consistency", + "consistentcy", "consistently", + "consitutents", "constituents", + "consoldiated", "consolidated", + "consolitated", "consolidate", + "consolodated", "consolidated", + "consoltation", "consultation", + "conspericies", "conspiracies", + "conspiracize", "conspiracies", + "conspiriator", "conspirator", + "conspiricies", "conspiracies", + "conspriacies", "conspiracies", + "consqeuences", "consequence", + "constinually", "continually", + "constitition", "constitution", + "constituante", "constituents", + "constituants", "constituents", + "constituates", "constitutes", + "constitucion", "constitution", + "constituient", "constitute", + "constituinte", "constituents", + "constitutiei", "constitute", + "constitutues", "constitute", + "constiutents", "constituents", + "constracting", "constructing", + "constraction", "construction", + "constrainsts", "constraints", + "construccion", "construction", + "construciton", "construction", + "constructeds", "constructs", + "constructief", "constructive", + "constructies", "constructs", + "constructifs", "constructs", + "constructiin", "constructing", + "constructivo", "construction", + "consturction", "construction", + "consultating", "consultation", + "consumerisim", "consumerism", + "contaiminate", "contaminate", + "contaminatie", "contaminated", + "contaminaton", "contamination", + "contaminents", "containment", + "contamporary", "contemporary", + "contanimated", "contaminated", + "contaniments", "containment", + "contemperary", "contemporary", + "contemporany", "contemporary", + "continentais", "continents", + "continential", "continental", + "contineously", "continuously", + "continiously", "continuously", + "continuacion", "continuation", + "continuating", "continuation", + "continuativo", "continuation", + "continuining", "continuing", + "contirbution", "contribution", + "contirbutors", "contributors", + "contiunation", "continuation", + "contrabution", "contribution", + "contraceptie", "contraceptives", + "contradicing", "contradicting", + "contradicion", "contradiction", + "contradicory", "contradictory", + "contradictie", "contradicted", + "contradictin", "contradiction", + "contradicton", "contradiction", + "contraticted", "contradicted", + "contribucion", "contribution", + "contribuitor", "contributor", + "contributers", "contributors", + "contributivo", "contribution", + "contributons", "contributors", + "contrictions", "contractions", + "contridicted", "contradicted", + "controlleras", "controllers", + "controlllers", "controllers", + "controverial", "controversial", + "controveries", "controversies", + "controversal", "controversial", + "controversey", "controversy", + "contructions", "contractions", + "conveinently", "conveniently", + "convencional", "conventional", + "conveniantly", "conveniently", + "converastion", "conversations", + "converdation", "conservation", + "conversacion", "conversation", + "conversaiton", "conversations", + "conversatino", "conservation", + "conversatism", "conservatism", + "conversatoin", "conversations", + "conversiones", "conversions", + "converstaion", "conversation", + "convertables", "convertibles", + "convertiable", "convertible", + "convertibile", "convertible", + "convervation", "conservation", + "convervatism", "conservatism", + "converzation", "conservation", + "convesration", "conservation", + "convienently", "conveniently", + "convorsation", "conversation", + "convseration", "conservation", + "coordenation", "coordination", + "coordiantion", "coordination", + "coordinacion", "coordination", + "coordinaters", "coordinates", + "coordinatior", "coordinator", + "coordinatore", "coordinate", + "coordonation", "coordination", + "cooridnation", "coordination", + "coorperation", "cooperation", + "coprorations", "corporations", + "corinthianos", "corinthians", + "corinthinans", "corinthians", + "corparations", "corporations", + "corperations", "corporations", + "corporativos", "corporations", + "corproations", "corporations", + "corrdination", "coordination", + "correponding", "corresponding", + "correposding", "corresponding", + "correspondes", "corresponds", + "correspondig", "corresponding", + "corresponing", "corresponding", + "corrisponded", "corresponded", + "costomizable", "customizable", + "costumizable", "customizable", + "councidental", "coincidental", + "counsellling", "counselling", + "counterfiets", "counterfeit", + "counterfited", "counterfeit", + "counterracts", "counterparts", + "countertraps", "counterparts", + "countrywides", "countryside", + "coutnerparts", "counterparts", + "coutnerpoint", "counterpoint", + "covnersation", "conservation", + "crankenstein", "frankenstein", + "creationisim", "creationism", + "creationnism", "creationism", + "creationnist", "creationist", + "creationsism", "creationism", + "creationsist", "creationist", + "creationsits", "creationists", + "credibillity", "credibility", + "crigneworthy", "cringeworthy", + "cringewhorty", "cringeworthy", + "cringeworhty", "cringeworthy", + "cringewrothy", "cringeworthy", + "cringyworthy", "cringeworthy", + "criticallity", "critically", + "criticiszing", "criticising", + "croporations", "corporations", + "crucifiction", "crucifixion", + "cuestionable", "questionable", + "culiminating", "culminating", + "cumulatative", "cumulative", + "cuntaminated", "contaminated", + "curcumcision", "circumcision", + "curcumstance", "circumstance", + "custamizable", "customizable", + "custimizable", "customizable", + "customizaton", "customization", + "customizeble", "customizable", + "customizible", "customizable", + "custumizable", "customizable", + "cuztomizable", "customizable", + "dabilitating", "debilitating", + "dangerousely", "dangerously", + "decensitized", "desensitized", + "deceptionist", "receptionist", + "declareation", "declaration", + "decomposeion", "decomposition", + "decomposited", "decomposed", + "decscription", "description", + "deffensively", "defensively", + "deficiancies", "deficiencies", + "deficiencias", "deficiencies", + "deficiensies", "deficiencies", + "definatively", "definitively", + "defininitely", "definitively", + "definitavely", "definitively", + "definitevely", "definitively", + "definitifely", "definitively", + "definitinely", "definitively", + "definititely", "definitively", + "definitivley", "definitively", + "deinitalized", "deinitialized", + "deinitalizes", "deinitializes", + "delibaretely", "deliberately", + "deliberatley", "deliberately", + "delibirately", "deliberately", + "delibitating", "debilitating", + "deliverately", "deliberately", + "delusionally", "delusively", + "demesticated", "domesticated", + "democracries", "democracies", + "democraphics", "demographics", + "democratisch", "democratic", + "demograhpics", "demographics", + "demogrpahics", "demographics", + "demonination", "denominations", + "demonstarted", "demonstrated", + "demonstartes", "demonstrates", + "demonstrabil", "demonstrably", + "demonstraion", "demonstration", + "demonstraits", "demonstrates", + "demonstrants", "demonstrates", + "demonstratie", "demonstrate", + "demonstratin", "demonstration", + "demonstrerat", "demonstrate", + "demosntrably", "demonstrably", + "demosntrated", "demonstrated", + "demosntrates", "demonstrates", + "demostration", "demonstration", + "denomenation", "denomination", + "denominacion", "denomination", + "denominatior", "denominator", + "denominatons", "denominations", + "denomonation", "denomination", + "deomgraphics", "demographics", + "depencencies", "dependencies", + "dependancies", "dependencies", + "dependencias", "dependencies", + "dependenices", "dependencies", + "dependensies", "dependencies", + "deperecation", "deprecation", + "deplacements", "replacements", + "deregualtion", "deregulation", + "deregulaiton", "deregulation", + "derugulation", "deregulation", + "describtions", "descriptions", + "descriminant", "discriminant", + "descriptivos", "descriptions", + "desctiptions", "descriptions", + "desctruction", "destruction", + "desencitized", "desensitized", + "desensatized", "desensitized", + "desensitived", "desensitized", + "desentisized", "desensitized", + "desentitized", "desensitized", + "desentizised", "desensitized", + "desginations", "destinations", + "desgustingly", "disgustingly", + "desitnations", "destinations", + "despectively", "respectively", + "despensaries", "dispensaries", + "desperatedly", "desperately", + "desperatelly", "desperately", + "desqualified", "disqualified", + "desregarding", "disregarding", + "dessertation", "dissertation", + "destiantions", "destinations", + "destinctions", "destinations", + "destractions", "distractions", + "destributors", "distributors", + "determinanti", "determination", + "determinaton", "determination", + "determinging", "determining", + "determinisic", "deterministic", + "determinisim", "determinism", + "deterministc", "deterministic", + "determinitic", "deterministic", + "detrimential", "detrimental", + "developement", "development", + "developmenet", "developments", + "develpoments", "developments", + "devolopement", "development", + "devolopments", "developments", + "diasspointed", "dissapointed", + "dicitonaries", "dictionaries", + "dictadorship", "dictatorship", + "dictarorship", "dictatorship", + "dictatorshop", "dictatorship", + "dictionaires", "dictionaries", + "didsapointed", "dissapointed", + "differencial", "differential", + "differencies", "differences", + "differentate", "differentiate", + "differnetial", "differential", + "difficulites", "difficulties", + "difficutlies", "difficulties", + "diffuculties", "difficulties", + "dimensionals", "dimensions", + "dimensionnal", "dimensional", + "dimensionsal", "dimensional", + "diplomatisch", "diplomatic", + "directionnal", "directional", + "disaapointed", "dissapointed", + "disadvandage", "disadvantaged", + "disadvantged", "disadvantaged", + "disadvantges", "disadvantages", + "disadvatange", "disadvantage", + "disadventage", "disadvantage", + "disagremeent", "disagreements", + "disapointing", "disappointing", + "disappearnce", "disappearance", + "disappearred", "disappeared", + "disapperaing", "disappearing", + "disaspointed", "dissapointed", + "disastisfied", "dissatisfied", + "disatissfied", "dissatisfied", + "disatvantage", "disadvantage", + "discertation", "dissertation", + "disciniplary", "disciplinary", + "disciplanary", "disciplinary", + "disciplenary", "disciplinary", + "disciplinare", "discipline", + "disciplinera", "disciplinary", + "disciplinery", "disciplinary", + "disclipinary", "disciplinary", + "disconencted", "disconnected", + "disconnectes", "disconnects", + "disconnectme", "disconnected", + "disconnectus", "disconnects", + "discontiuned", "discontinued", + "discountined", "discontinued", + "discreditied", "discredited", + "discreditted", "discredited", + "discriminare", "discriminate", + "discriminted", "discriminated", + "disctinction", "distinction", + "disctinctive", "distinctive", + "disctintions", "distinctions", + "discualified", "disqualified", + "discustingly", "disgustingly", + "disemination", "dissemination", + "disenchanged", "disenchanted", + "disengenuous", "disingenuous", + "disenginuous", "disingenuous", + "disensitized", "desensitized", + "disgareement", "disagreements", + "disgruntaled", "disgruntled", + "disgrunteled", "disgruntled", + "disguntingly", "disgustingly", + "disingeneous", "disingenuous", + "disingenious", "disingenuous", + "disinteresed", "disinterested", + "disintereted", "disinterested", + "dismantleing", "dismantling", + "disobediance", "disobedience", + "disobeidence", "disobedience", + "dispalcement", "displacement", + "dispapointed", "dissapointed", + "dispencaries", "dispensaries", + "dispensaires", "dispensaries", + "dispensarios", "dispensaries", + "dispensiries", "dispensaries", + "dispensories", "dispensaries", + "disqaulified", "disqualified", + "disqualifyed", "disqualified", + "disqustingly", "disgustingly", + "disrecpected", "disrespected", + "disrepsected", "disrespected", + "disresepcted", "disrespected", + "disrespecful", "disrespectful", + "disrespecing", "disrespecting", + "disrespectul", "disrespectful", + "disrespekted", "disrespected", + "disrtibution", "distributions", + "dissapearing", "disappearing", + "dissapionted", "dissapointed", + "dissapoimted", "dissapointed", + "dissapoitned", "dissapointed", + "dissaponited", "dissapointed", + "dissapoonted", "dissapointed", + "dissapounted", "dissapointed", + "dissappinted", "dissapointed", + "dissapponted", "dissapointed", + "dissastified", "dissatisfied", + "dissatisifed", "dissatisfied", + "dissatsified", "dissatisfied", + "dissepointed", "dissapointed", + "dissipointed", "dissapointed", + "dissobediant", "disobedient", + "dissobedient", "disobedient", + "dissopointed", "dissapointed", + "disspaointed", "dissapointed", + "dissppointed", "dissapointed", + "dissspointed", "dissapointed", + "distinations", "distinctions", + "distincitons", "distinctions", + "distingished", "distinguished", + "distingishes", "distinguishes", + "distinguised", "distinguished", + "distirbuting", "distributing", + "distirbution", "distribution", + "distrabution", "distribution", + "distribitors", "distributors", + "distribtuion", "distributions", + "distribucion", "distribution", + "distribuited", "distributed", + "distribuiton", "distributions", + "distribuitor", "distributor", + "distribusion", "distributions", + "distributino", "distributions", + "distributior", "distributor", + "distributons", "distributors", + "distributore", "distribute", + "distriubtion", "distributions", + "distrobution", "distribution", + "distrubances", "disturbance", + "distrubiting", "distributing", + "distrubition", "distribution", + "distrubitors", "distributors", + "distrubution", "distribution", + "distrubutors", "distributors", + "distructions", "distractions", + "distustingly", "disgustingly", + "ditactorship", "dictatorship", + "documenation", "documentation", + "documentaion", "documentation", + "documentaire", "documentaries", + "documentarse", "documentaries", + "documentarsi", "documentaries", + "domesitcated", "domesticated", + "domisticated", "domesticated", + "donesticated", "domesticated", + "donwloadable", "downloadable", + "dossapointed", "dissapointed", + "downlaodable", "downloadable", + "downloadbale", "downloadable", + "downloadeble", "downloadable", + "drankenstein", "frankenstein", + "dublications", "publications", + "dusgustingly", "disgustingly", + "dynamicallly", "dynamically", + "dyregulation", "deregulation", + "earthquackes", "earthquakes", + "earthquakers", "earthquakes", + "econimically", "economically", + "economisesti", "economists", + "educationnal", "educational", + "effectionate", "affectionate", + "effectivelly", "effectively", + "effectivenss", "effectiveness", + "efficienctly", "efficiency", + "effordlessly", "effortlessly", + "ejacualtions", "ejaculation", + "electorlytes", "electrolytes", + "electricrain", "electrician", + "electrictian", "electrician", + "electrobytes", "electrolytes", + "electrocytes", "electrolytes", + "electrolites", "electrolytes", + "electroltyes", "electrolytes", + "electronicas", "electronics", + "electronicos", "electronics", + "electroyltes", "electrolytes", + "elektrolytes", "electrolytes", + "eloctrolytes", "electrolytes", + "embarassment", "embarrassment", + "embarasssing", "embarassing", + "embarrasment", "embarrassment", + "embarressing", "embarrassing", + "embarrissing", "embarrassing", + "emberrassing", "embarrassing", + "emphetamines", "amphetamines", + "emprisonment", "imprisonment", + "encarcerated", "incarcerated", + "enceclopedia", "encyclopedia", + "enchancement", "enhancement", + "enchancments", "enchantments", + "enchantmants", "enchantments", + "enchentments", "enchantments", + "enciclopedia", "encyclopedia", + "enclycopedia", "encyclopedia", + "encorporated", "incorporated", + "encourageing", "encouraging", + "encyclapedia", "encyclopedia", + "encyclepedia", "encyclopedia", + "encyclopadia", "encyclopedia", + "encyclopeida", "encyclopedia", + "encyclopidia", "encyclopedia", + "encycolpedia", "encyclopedia", + "encyklopedia", "encyclopedia", + "encylcopedia", "encyclopedia", + "encyplopedia", "encyclopedia", + "endoresments", "endorsement", + "enemployment", "unemployment", + "enfringement", "infringement", + "enlightended", "enlightened", + "enlightenend", "enlightened", + "enlightented", "enlightened", + "enlightining", "enlightening", + "enligthening", "enlightening", + "entaglements", "entanglements", + "entartaining", "entertaining", + "enterpreneur", "entrepreneurs", + "enterprenuer", "entrepreneur", + "entertainted", "entertained", + "enthusiaists", "enthusiasts", + "enthusuastic", "enthusiastic", + "entoxication", "intoxication", + "entrepeneurs", "entrepreneurs", + "entreperneur", "entrepreneurs", + "entreprenaur", "entrepreneur", + "entrepreners", "entrepreneurs", + "entrepreneus", "entrepreneurs", + "entreprenour", "entrepreneur", + "entreprenure", "entrepreneurs", + "entreprenurs", "entrepreneurs", + "entrepreuner", "entrepreneurs", + "entretaining", "entertaining", + "enviormental", "environmental", + "enviornments", "environments", + "enviromental", "environmental", + "environemnts", "environments", + "environmentl", "environmentally", + "environmetal", "environmental", + "envrionments", "environments", + "errorneously", "erroneously", + "establishmet", "establishments", + "evelutionary", "evolutionary", + "exagerrating", "exaggerating", + "exaggarating", "exaggerating", + "exaggaration", "exaggeration", + "exaggeratted", "exaggerated", + "exaggurating", "exaggerating", + "exagguration", "exaggeration", + "exceptionaly", "exceptionally", + "exceptionnal", "exceptional", + "exclusiveity", "exclusivity", + "exclusivelly", "exclusively", + "exclusivitiy", "exclusivity", + "excorciating", "excruciating", + "excrusiating", "excruciating", + "excurciating", "excruciating", + "exectuioners", "executioner", + "executioneer", "executioner", + "executionees", "executions", + "executioness", "executions", + "executionier", "executioner", + "executionner", "executioner", + "exeggerating", "exaggerating", + "exeggeration", "exaggeration", + "expeditonary", "expeditionary", + "expendatures", "expenditures", + "expendetures", "expenditures", + "expentitures", "expenditures", + "experamental", "experimental", + "expereincing", "experiencing", + "experemental", "experimental", + "experiancing", "experiencing", + "experiemntal", "experimental", + "experiemnted", "experimented", + "experimantal", "experimental", + "experimentan", "experimentation", + "experimentes", "experiments", + "experimentle", "experimented", + "experimentos", "experiments", + "experimentul", "experimental", + "expidentures", "expenditures", + "expierencing", "experiencing", + "expiremental", "experimental", + "expiremented", "experimented", + "explaination", "explanation", + "explenations", "explanations", + "expliotation", "exploitation", + "exploitaiton", "exploitation", + "exploitating", "exploitation", + "exploititive", "exploitative", + "explortation", "exploitation", + "explotiation", "exploitation", + "explotiative", "exploitative", + "expolitation", "exploitation", + "expolitative", "exploitative", + "exponentialy", "exponentially", + "expropiation", "expropriation", + "extensivelly", "extensively", + "extradiction", "extradition", + "extraordiary", "extraordinary", + "extraordinay", "extraordinary", + "extrapolerat", "extrapolate", + "extrapoloate", "extrapolate", + "extremistisk", "extremists", + "extrordinary", "extraordinary", + "extruciating", "excruciating", + "facilitatile", "facilitate", + "fahrenheight", "fahrenheit", + "falmethrower", "flamethrower", + "familiarlize", "familiarize", + "fanslaughter", "manslaughter", + "fantasticaly", "fantastically", + "fantasticlly", "fantastically", + "fashionalble", "fashionable", + "fermantation", "fermentation", + "fermentacion", "fermentation", + "fermentaiton", "fermentation", + "fermentating", "fermentation", + "fermintation", "fermentation", + "fictionaries", "dictionaries", + "figuartively", "figuratively", + "figuratevely", "figuratively", + "figurativley", "figuratively", + "figuretively", "figuratively", + "figuritively", "figuratively", + "fingerpoints", "fingerprints", + "firefigthers", "firefighters", + "flamethorwer", "flamethrower", + "flametrhower", "flamethrower", + "flanethrower", "flamethrower", + "flexibillity", "flexibility", + "flourishment", "flourishing", + "fluctiations", "fluctuations", + "flucutations", "fluctuations", + "fluxtuations", "fluctuations", + "forgivenness", "forgiveness", + "fortunatelly", "fortunately", + "framethrower", "flamethrower", + "frankenstain", "frankenstein", + "frankensteen", "frankenstein", + "frankenstine", "frankenstein", + "frankinstein", "frankenstein", + "frementation", "fermentation", + "friendzonded", "friendzoned", + "friendzonned", "friendzoned", + "friendzowned", "friendzoned", + "fringeworthy", "cringeworthy", + "fronkenstein", "frankenstein", + "fruitsations", "frustrations", + "frustrastion", "frustrations", + "fucntionally", "functionally", + "funcitonally", "functionally", + "functionable", "functional", + "functionaliy", "functionally", + "functionalty", "functionality", + "functionlity", "functionality", + "functionning", "functioning", + "fundamentais", "fundamentals", + "fundamentalt", "fundamentalist", + "fundamentaly", "fundamentally", + "fundemantals", "fundamentals", + "fundementals", "fundamentals", + "fundimentals", "fundamentals", + "furstrations", "frustrations", + "futuristisch", "futuristic", + "fwankenstein", "frankenstein", + "geneological", "genealogical", + "generacional", "generational", + "generalizare", "generalize", + "generalizate", "generalize", + "generelizing", "generalizing", + "geograhpical", "geographical", + "geographicly", "geographical", + "geographisch", "geographic", + "geogrpahical", "geographical", + "goegraphical", "geographical", + "governemntal", "governmental", + "governmently", "governmental", + "grammaticaal", "grammatical", + "grammaticaly", "grammatically", + "grandchilden", "grandchildren", + "grandchilder", "grandchildren", + "grandchilren", "grandchildren", + "grassrooters", "grassroots", + "gringeworthy", "cringeworthy", + "guantanameow", "guantanamo", + "guantanamero", "guantanamo", + "hallucinatin", "hallucinations", + "hallucinaton", "hallucination", + "handwritting", "handwriting", + "harrassments", "harassments", + "headqaurters", "headquarters", + "headquatered", "headquartered", + "healthercare", "healthcare", + "heavywieghts", "heavyweight", + "helicopteros", "helicopters", + "hererosexual", "heterosexual", + "heretosexual", "heterosexual", + "heteresexual", "heterosexual", + "hetreosexual", "heterosexual", + "highligthing", "highlighting", + "hipocritical", "hypocritical", + "hipothetical", "hypothetical", + "histarically", "historically", + "histerically", "historically", + "historicians", "historians", + "homogeneized", "homogenized", + "homogenenous", "homogeneous", + "homosexuales", "homosexuals", + "homosexualiy", "homosexuality", + "homosexualls", "homosexuals", + "homosexualty", "homosexuality", + "homosexuella", "homosexual", + "hopsitalized", "hospitalized", + "horisontally", "horizontally", + "horizantally", "horizontally", + "horiztonally", "horizontally", + "horozontally", "horizontally", + "hospitallity", "hospitality", + "hospitilized", "hospitalized", + "hospitolized", "hospitalized", + "hosptialized", "hospitalized", + "humanitarien", "humanitarian", + "humanitarion", "humanitarian", + "humanitatian", "humanitarian", + "humaniterian", "humanitarian", + "humantiarian", "humanitarian", + "huminatarian", "humanitarian", + "hurricanefps", "hurricanes", + "hyopthetical", "hypothetical", + "hypathetical", "hypothetical", + "hypertrophey", "hypertrophy", + "hypethetical", "hypothetical", + "hypocrticial", "hypocritical", + "hypocrytical", "hypocritical", + "hypotehtical", "hypothetical", + "hypotethical", "hypothetical", + "hypotherical", "hypothetical", + "hypotheticly", "hypothetical", + "hystarically", "hysterically", + "hystorically", "hysterically", + "idealistisch", "idealistic", + "identificato", "identification", + "identifierad", "identified", + "identifieras", "identifies", + "identifyable", "identifiable", + "ideologicaly", "ideologically", + "idiosyncracy", "idiosyncrasy", + "illegetimate", "illegitimate", + "illegitamate", "illegitimate", + "illegitamite", "illegitimate", + "illegitemate", "illegitimate", + "illegitimite", "illegitimate", + "illigetimate", "illegitimate", + "illigitemate", "illegitimate", + "illistration", "illustration", + "illsutration", "illustrations", + "illustartion", "illustration", + "illustraitor", "illustrator", + "illustraties", "illustrate", + "illustratior", "illustrator", + "imcompatible", "incompatible", + "imcompetence", "incompetence", + "imexperience", "inexperience", + "immediatelly", "immediately", + "immortallity", "immortality", + "imperialfist", "imperialist", + "imperialisim", "imperialism", + "imperialstic", "imperialist", + "implamenting", "implementing", + "implausibile", "implausible", + "implecations", "implications", + "implementase", "implements", + "implementasi", "implements", + "implementato", "implementation", + "implentation", "implementation", + "implimenting", "implementing", + "imporvements", "improvements", + "impossibilty", "impossibility", + "impossiblely", "impossibly", + "impossiblity", "impossibly", + "impovershied", "impoverished", + "impoversihed", "impoverished", + "imprefection", "imperfections", + "improsonment", "imprisonment", + "improviserad", "improvised", + "imrpovements", "improvements", + "imtimidating", "intimidating", + "imtimidation", "intimidation", + "inaccesibles", "inaccessible", + "inaccessable", "inaccessible", + "inaccessbile", "inaccessible", + "inaccurasies", "inaccuracies", + "inaccuraties", "inaccuracies", + "inaccuricies", "inaccuracies", + "inacuraccies", "inaccuracies", + "inadvertenly", "inadvertently", + "inappropiate", "inappropriate", + "inapproprate", "inappropriate", + "inappropriae", "inappropriately", + "inappropriet", "inappropriately", + "inattractive", "unattractive", + "inbelievable", "unbelievable", + "incarcelated", "incarcerated", + "incarcirated", "incarcerated", + "incarserated", "incarcerated", + "incedentally", "incidentally", + "incentiveise", "incentives", + "incestigator", "investigator", + "incomaptible", "incompatible", + "incomparible", "incompatible", + "incompatable", "incompatible", + "incompatibil", "incompatible", + "incompetance", "incompetence", + "incompetente", "incompetence", + "incompitable", "incompatible", + "incomptetent", "incompetent", + "inconcistent", "inconsistent", + "inconsistant", "inconsistent", + "inconsistecy", "inconsistency", + "inconsisteny", "inconsistency", + "inconveinent", "inconvenient", + "inconveniant", "inconvenient", + "inconveniece", "inconvenience", + "inconvenince", "inconvenience", + "inconvienent", "inconvenient", + "incorparated", "incorporated", + "incorperated", "incorporated", + "incorportaed", "incorporated", + "incorportate", "incorporate", + "incrediblely", "incredibly", + "incrementers", "increments", + "incremential", "incremental", + "indefinately", "indefinitely", + "indefineable", "undefinable", + "indefinetely", "indefinitely", + "indefinitive", "indefinite", + "indefinitley", "indefinitely", + "indefintiely", "indefinitely", + "indepedantly", "independently", + "indepencence", "independence", + "independance", "independence", + "independante", "independents", + "independenet", "independents", + "independenly", "independently", + "independense", "independents", + "independente", "independence", + "independetly", "independently", + "indepentents", "independents", + "indetifiable", "identifiable", + "indianaoplis", "indianapolis", + "indianopolis", "indianapolis", + "indicentally", "incidentally", + "indifferance", "indifference", + "indifferente", "indifference", + "indiffernece", "indifference", + "indimidating", "intimidating", + "indimidation", "intimidation", + "indipendence", "independence", + "indisputible", "indisputable", + "indisputibly", "indisputably", + "individuales", "individuals", + "individualty", "individuality", + "individuella", "individual", + "indiviudally", "individually", + "indivudually", "individually", + "indpendently", "independently", + "indroduction", "introduction", + "indroductory", "introductory", + "industriella", "industrial", + "industrijske", "industries", + "inefficienct", "inefficient", + "inefficienty", "inefficiently", + "inevitablely", "inevitably", + "inevitablity", "inevitably", + "inevititably", "inevitably", + "inexblicably", "inexplicably", + "inexpectedly", "unexpectedly", + "inexpereince", "inexperience", + "inexperiance", "inexperience", + "inexperieced", "inexperienced", + "inexperiened", "inexperienced", + "inexperiente", "inexperience", + "inexpierence", "inexperienced", + "inexplicabil", "inexplicably", + "inexplicibly", "inexplicably", + "infalability", "infallibility", + "infilitrated", "infiltrated", + "infiltraitor", "infiltrator", + "infiltratior", "infiltrator", + "infiltratred", "infiltrate", + "influenceing", "influencing", + "infogrpahics", "infographic", + "inforgivable", "unforgivable", + "infrantryman", "infantryman", + "infridgement", "infringement", + "infrignement", "infringement", + "ingestigator", "investigator", + "ingredientes", "ingredients", + "ingreediants", "ingredients", + "ininterested", "uninterested", + "initalizable", "initializable", + "inkompatible", "incompatible", + "inkompetence", "incompetence", + "inkonsistent", "inconsistent", + "inlightening", "enlightening", + "innersection", "intersection", + "innerstellar", "interstellar", + "inpenetrable", "impenetrable", + "inplementing", "implementing", + "inplications", "implications", + "inpoverished", "impoverished", + "inprisonment", "imprisonment", + "inproductive", "unproductive", + "inprovements", "improvements", + "inresponsive", "unresponsive", + "insentivised", "insensitive", + "insentivises", "insensitive", + "insignifiant", "insignificant", + "insignificat", "insignificant", + "insinuationg", "insinuating", + "instabillity", "instability", + "instalaltion", "installations", + "installatons", "installations", + "installatron", "installation", + "instantaneos", "instantaneous", + "instantaneus", "instantaneous", + "instantanous", "instantaneous", + "instinctivly", "instinctively", + "institutuion", "institution", + "instramental", "instrumental", + "instrcutions", "instruction", + "instrucitons", "instruction", + "instructiosn", "instruction", + "instructores", "instructors", + "instrumentos", "instruments", + "instrumentul", "instrumental", + "insturmental", "instrumental", + "instutitions", "institutions", + "insuccessful", "unsuccessful", + "insufficiant", "insufficient", + "insuffucient", "insufficient", + "insuspecting", "unsuspecting", + "intaxication", "intoxication", + "intelelctual", "intellectuals", + "intellectals", "intellectuals", + "intellectaul", "intellectuals", + "intellectuel", "intellectual", + "intellecutal", "intellectual", + "intelligance", "intelligence", + "intelligenly", "intelligently", + "intelligente", "intelligence", + "intelligenty", "intelligently", + "intelligient", "intelligent", + "intenational", "international", + "intentionnal", "intentional", + "intepretator", "interpretor", + "interatellar", "interstellar", + "interational", "international", + "intercection", "interception", + "intercepcion", "interception", + "interceptons", "interceptions", + "intereaction", "intersection", + "interections", "interactions", + "interersting", "interpreting", + "interesction", "intersection", + "interestigly", "interestingly", + "interestinly", "interestingly", + "interferance", "interference", + "interfereing", "interfering", + "interferisce", "interferes", + "interferisse", "interferes", + "interferring", "interfering", + "intergration", "integration", + "interlectual", "intellectual", + "intermediare", "intermediate", + "intermediete", "intermediate", + "intermettent", "intermittent", + "intermideate", "intermediate", + "intermidiate", "intermediate", + "internatinal", "international", + "internationl", "international", + "internations", "interactions", + "internediate", "intermediate", + "internelized", "internalized", + "internilized", "internalized", + "interperters", "interpreter", + "interperting", "interpreting", + "interprating", "interpreting", + "interpretare", "interpreter", + "interpretato", "interpretation", + "interpreteer", "interpreter", + "interpretier", "interpreter", + "interpretion", "interpreting", + "interpretter", "interpreter", + "interpriting", "interpreting", + "interraccial", "interracial", + "interractial", "interracial", + "interrogatin", "interrogation", + "interrumping", "interrupting", + "interrupteds", "interrupts", + "interruptors", "interrupts", + "interseccion", "intersection", + "interseciton", "intersections", + "interseption", "interception", + "intersetllar", "interstellar", + "interstallar", "interstellar", + "interstaller", "interstellar", + "intersteller", "interstellar", + "interstellor", "interstellar", + "intertaining", "entertaining", + "intertwinded", "intertwined", + "intertwinned", "intertwined", + "interveiwing", "interviewing", + "intervencion", "intervention", + "interveneing", "intervening", + "intervension", "intervention", + "interviening", "interviewing", + "intidimation", "intimidation", + "intillectual", "intellectual", + "intimidacion", "intimidation", + "intimidative", "intimidate", + "intimitading", "intimidating", + "intimitating", "intimidating", + "intimitation", "intimidation", + "intorduction", "introduction", + "intorductory", "introductory", + "intoxicacion", "intoxication", + "intoxination", "intoxication", + "intrepreting", "interpreting", + "intrinsicaly", "intrinsically", + "introdiction", "introduction", + "introduccion", "introduction", + "introduceras", "introduces", + "introduceres", "introduces", + "introduciton", "introduction", + "introductary", "introductory", + "introducting", "introduction", + "introductury", "introductory", + "introduktion", "introduction", + "introspectin", "introspection", + "intruduction", "introduction", + "intruductory", "introductory", + "intsrumental", "instrumental", + "intuitivelly", "intuitively", + "inturrupting", "interrupting", + "invervention", "intervention", + "investagated", "investigated", + "investagator", "investigator", + "investegated", "investigated", + "investegator", "investigator", + "investigaron", "investigator", + "investigater", "investigator", + "investigatie", "investigative", + "investigatin", "investigation", + "investigatio", "investigator", + "investigaton", "investigation", + "investingate", "investigate", + "investogator", "investigator", + "invicibility", "invisibility", + "invididually", "individually", + "invisibiltiy", "invisibility", + "invisilibity", "invisibility", + "invisivility", "invisibility", + "invlunerable", "invulnerable", + "involnerable", "invulnerable", + "involuntairy", "involuntary", + "involuntarly", "involuntary", + "invonvenient", "inconvenient", + "invulenrable", "invulnerable", + "invulernable", "invulnerable", + "invulnarable", "invulnerable", + "invulnerbale", "invulnerable", + "invulnurable", "invulnerable", + "invulverable", "invulnerable", + "invunlerable", "invulnerable", + "invurnerable", "invulnerable", + "irrationably", "irrationally", + "irrationatly", "irrationally", + "irrationella", "irrational", + "irreplacable", "irreplaceable", + "irresistable", "irresistible", + "irresistably", "irresistibly", + "irrespecitve", "irrespective", + "irresponsble", "irresponsible", + "irresponsibe", "irresponsible", + "irreverisble", "irreversible", + "irreversebly", "irreversible", + "irreversibel", "irreversible", + "irrevirsible", "irreversible", + "irrispective", "irrespective", + "irriversible", "irreversible", + "isdefinitely", "indefinitely", + "isntallation", "installation", + "isntrumental", "instrumental", + "jackonsville", "jacksonville", + "jounralistic", "journalistic", + "jouranlistic", "journalistic", + "journalisitc", "journalistic", + "journalistes", "journalists", + "judgementals", "judgements", + "juggernaunts", "juggernaut", + "juridisction", "jurisdictions", + "jurisdiccion", "jurisdiction", + "jurisdiciton", "jurisdiction", + "jurisdiktion", "jurisdiction", + "jurisfiction", "jurisdiction", + "jurisidction", "jurisdiction", + "juristiction", "jurisdiction", + "jursidiction", "jurisdiction", + "jusridiction", "jurisdiction", + "justificatin", "justifications", + "katastrophic", "catastrophic", + "kidnergarten", "kindergarten", + "kindergarden", "kindergarten", + "kingergarten", "kindergarten", + "kintergarten", "kindergarten", + "knolwedgable", "knowledgable", + "knoweldgable", "knowledgable", + "knowladgable", "knowledgable", + "knowldegable", "knowledgable", + "knowldgeable", "knowledgable", + "knowleagable", "knowledgable", + "knowledagble", "knowledgable", + "knowledeable", "knowledgable", + "knowledgabel", "knowledgable", + "knowledgeble", "knowledgeable", + "knowledgebly", "knowledgable", + "knowledgible", "knowledgable", + "knowlegdable", "knowledgable", + "knowlegeable", "knowledgeable", + "knwoledgable", "knowledgable", + "kolonization", "colonization", + "kombinations", "combinations", + "kommissioner", "commissioner", + "kompensation", "compensation", + "konfidential", "confidential", + "konfirmation", "confirmation", + "kongregation", "congregation", + "konservatism", "conservatism", + "konservative", "conservative", + "konsultation", "consultation", + "konversation", "conversation", + "koordination", "coordination", + "krankenstein", "frankenstein", + "leaglization", "legalization", + "legalizacion", "legalization", + "legalizaiton", "legalization", + "legendariske", "legendaries", + "legimitately", "legitimately", + "legislatiors", "legislators", + "legistration", "registration", + "legitamately", "legitimately", + "legitamitely", "legitimately", + "legitemately", "legitimately", + "legitimatley", "legitimately", + "legitimitely", "legitimately", + "liberatrians", "libertarians", + "libertarains", "libertarians", + "libertariens", "libertarians", + "libertaryans", "libertarians", + "libertatians", "libertarians", + "liberterians", "libertarians", + "libretarians", "libertarians", + "lighthearded", "lighthearted", + "linguisticas", "linguistics", + "linguisticos", "linguistics", + "linguistisch", "linguistics", + "litllefinger", "littlefinger", + "littelfinger", "littlefinger", + "litterfinger", "littlefinger", + "littiefinger", "littlefinger", + "littlefigner", "littlefinger", + "littlefinder", "littlefinger", + "littlepinger", "littlefinger", + "lnowledgable", "knowledgable", + "longitudonal", "longitudinal", + "madturbating", "masturbating", + "madturbation", "masturbation", + "magnificient", "magnificent", + "maintainance", "maintenance", + "maintainence", "maintenance", + "maintenaince", "maintenance", + "malfucntions", "malfunction", + "manafactured", "manufactured", + "manafacturer", "manufacturer", + "manafactures", "manufactures", + "manifactured", "manufactured", + "manifacturer", "manufacturer", + "manifactures", "manufactures", + "manifestaion", "manifestation", + "manifestanti", "manifestation", + "manipluating", "manipulating", + "manipluation", "manipulation", + "manipualting", "manipulating", + "manipualtion", "manipulation", + "manipualtive", "manipulative", + "manipulacion", "manipulation", + "manipulitive", "manipulative", + "maniuplating", "manipulating", + "maniuplation", "manipulation", + "maniuplative", "manipulative", + "manouverable", "maneuverable", + "mansalughter", "manslaughter", + "manslaugther", "manslaughter", + "mansluaghter", "manslaughter", + "manufactered", "manufactured", + "manufacterer", "manufacturer", + "manufacteres", "manufactures", + "manufacteurs", "manufactures", + "manufactored", "manufactured", + "manufactorer", "manufacturer", + "manufactores", "manufactures", + "manufactuers", "manufacturers", + "manufactuing", "manufacturing", + "manufacturas", "manufactures", + "manufacturor", "manufacturer", + "manufactuter", "manufacture", + "manufacuters", "manufactures", + "manufacutred", "manufacture", + "manufacutres", "manufactures", + "manufaturing", "manufacturing", + "manupilating", "manipulating", + "manupulating", "manipulating", + "manupulation", "manipulation", + "manupulative", "manipulative", + "marchmallows", "marshmallows", + "marganilized", "marginalized", + "margenalized", "marginalized", + "marginilized", "marginalized", + "marhsmallows", "marshmallows", + "marshamllows", "marshmallows", + "marshmallons", "marshmallows", + "masoginistic", "misogynistic", + "masogynistic", "misogynistic", + "massachusets", "massachusetts", + "massachustts", "massachusetts", + "masterbation", "masturbation", + "masterpeices", "masterpiece", + "mastrubating", "masturbating", + "mastrubation", "masturbation", + "mastubration", "masturbation", + "masturabting", "masturbating", + "masturabtion", "masturbation", + "masturbacion", "masturbation", + "masturbaited", "masturbated", + "masturbathon", "masturbation", + "masturbsting", "masturbating", + "masturdating", "masturbating", + "mastutbation", "masturbation", + "mataphorical", "metaphorical", + "mataphysical", "metaphysical", + "matchmakeing", "matchmaking", + "mathemathics", "mathematics", + "mathematican", "mathematician", + "mathematicas", "mathematics", + "mathematicks", "mathematics", + "mathematicly", "mathematical", + "mathematisch", "mathematics", + "mathemetical", "mathematical", + "matheticians", "mathematicians", + "mathimatical", "mathematical", + "mathmatician", "mathematician", + "mecahnically", "mechanically", + "mechancially", "mechanically", + "meditaciones", "medications", + "mediteranean", "mediterranean", + "mediterraean", "mediterranean", + "mediterranen", "mediterranean", + "memerization", "memorization", + "memorizacion", "memorization", + "memorozation", "memorization", + "metalurgical", "metallurgical", + "metaphisical", "metaphysical", + "metaphoricly", "metaphorical", + "metaphsyical", "metaphysical", + "metaphyiscal", "metaphysical", + "metaphyscial", "metaphysical", + "metaphysisch", "metaphysics", + "metephorical", "metaphorical", + "metephysical", "metaphysical", + "meterologist", "meteorologist", + "meterosexual", "heterosexual", + "methaporical", "metaphorical", + "methematical", "mathematical", + "metiphorical", "metaphorical", + "metophorical", "metaphorical", + "metorpolitan", "metropolitan", + "metrololitan", "metropolitan", + "metropilitan", "metropolitan", + "metroploitan", "metropolitan", + "metropolians", "metropolis", + "metropoliten", "metropolitan", + "metropolitin", "metropolitan", + "metropoliton", "metropolitan", + "microcentres", "microcenter", + "microphonies", "microphones", + "microscophic", "microscopic", + "microscopice", "microscope", + "microscoptic", "microscopic", + "midfieldiers", "midfielders", + "millenialism", "millennialism", + "millionairre", "millionaire", + "millionaries", "millionaires", + "millioniares", "millionaires", + "minimalisitc", "minimalist", + "minimalisity", "minimalist", + "mininterpret", "misinterpret", + "minipulating", "manipulating", + "minipulation", "manipulation", + "minipulative", "manipulative", + "miracilously", "miraculously", + "miracurously", "miraculous", + "miscarraiges", "miscarriage", + "miscelaneous", "miscellaneous", + "miscellanous", "miscellaneous", + "mischievious", "mischievous", + "misdameanors", "misdemeanors", + "misdeamenors", "misdemeanor", + "misfourtunes", "misfortunes", + "misgoynistic", "misogynistic", + "misinterpert", "misinterpret", + "misinterpred", "misinterpreted", + "misinterprit", "misinterpreting", + "misinterpted", "misinterpret", + "misintrepret", "misinterpret", + "misisonaries", "missionaries", + "misoganistic", "misogynistic", + "misogenistic", "misogynistic", + "misoginystic", "misogynistic", + "misognyistic", "misogynistic", + "misogonistic", "misogynistic", + "misogynisitc", "misogynistic", + "misogynsitic", "misogynistic", + "misogynystic", "misogynistic", + "missionaires", "missionaries", + "mississipppi", "mississippi", + "misspellling", "misspelling", + "misteriously", "mysteriously", + "misundersood", "misunderstood", + "misunderstod", "misunderstood", + "misygonistic", "misogynistic", + "modificacion", "modification", + "modificaiton", "modification", + "modificatons", "modifications", + "modifikation", "modification", + "modivational", "motivational", + "moisterizing", "moisturizing", + "moistorizing", "moisturizing", + "moisutrizing", "moisturizing", + "momentarilly", "momentarily", + "monolithisch", "monolithic", + "mositurizing", "moisturizing", + "motherbaords", "motherboards", + "motherborads", "motherboards", + "motivacional", "motivational", + "motovational", "motivational", + "mousturizing", "moisturizing", + "muktitasking", "multitasking", + "mulittasking", "multitasking", + "multinatinal", "multinational", + "multitaksing", "multitasking", + "munipulative", "manipulative", + "mutlitasking", "multitasking", + "mysoganistic", "misogynistic", + "mysogenistic", "misogynistic", + "mysogonistic", "misogynistic", + "mysterioulsy", "mysteriously", + "nacionalists", "nationalists", + "narcisisstic", "narcissistic", + "narcissictic", "narcissistic", + "narcissisism", "narcissism", + "narcissisist", "narcissist", + "narcissisitc", "narcissist", + "narcississts", "narcissist", + "narssicistic", "narcissistic", + "natioanlists", "nationalists", + "nationalisic", "nationalistic", + "nationalisim", "nationalism", + "nationalistc", "nationalistic", + "nationalites", "nationalist", + "nationalitic", "nationalistic", + "nationalitys", "nationalist", + "nationallity", "nationally", + "nationalsits", "nationalists", + "nationalties", "nationalist", + "nazionalists", "nationalists", + "neccessarily", "necessarily", + "neccessities", "necessities", + "necessarilly", "necessarily", + "necessitites", "necessities", + "neckbearders", "neckbeards", + "neckbeardese", "neckbeards", + "neckbeardest", "neckbeards", + "neckbeardies", "neckbeards", + "neckbeardius", "neckbeards", + "negociations", "negotiations", + "negoitations", "negotiations", + "negotiatians", "negotiations", + "negotiatiing", "negotiating", + "negotiationg", "negotiating", + "negotiatiors", "negotiations", + "neigbhorhood", "neighborhoods", + "neigbourhood", "neighbourhood", + "neighboorhod", "neighbourhood", + "neighborhing", "neighboring", + "neighborhods", "neighborhoods", + "neighbourghs", "neighbours", + "neighbourhod", "neighbourhood", + "neighbourood", "neighbourhood", + "neighbrohood", "neighborhoods", + "neighourhood", "neighborhood", + "neoroscience", "neuroscience", + "neruological", "neurological", + "neruoscience", "neuroscience", + "netropolitan", "metropolitan", + "neuorscience", "neuroscience", + "neuralogical", "neurological", + "neuroligical", "neurological", + "neurosceince", "neuroscience", + "neuroscienze", "neuroscience", + "neurosicence", "neuroscience", + "neverhteless", "nevertheless", + "nieghborhood", "neighborhood", + "norhtwestern", "northwestern", + "nothingsness", "nothingness", + "noticeablely", "noticeably", + "notificacion", "notification", + "notificaiton", "notification", + "notificatons", "notifications", + "nuerological", "neurological", + "nueroscience", "neuroscience", + "nutritionnal", "nutritional", + "obersvations", "observations", + "objectivelly", "objectively", + "objectiviser", "objectives", + "objectivitiy", "objectivity", + "obversations", "observations", + "ocassionally", "occasionally", + "occaisonally", "occasionally", + "occasioanlly", "occasionally", + "occassionaly", "occasionally", + "occationally", "occasionally", + "occurrencies", "occurrences", + "offensivelly", "offensively", + "ogranisation", "organisation", + "omniverously", "omnivorously", + "operationnal", "operational", + "opportuniste", "opportunities", + "opportunites", "opportunities", + "oppositition", "opposition", + "opthalmology", "ophthalmology", + "optimistisch", "optimistic", + "optimizacion", "optimization", + "optimizating", "optimization", + "optimziation", "optimization", + "optmizations", "optimizations", + "oragnisation", "organisation", + "orchastrated", "orchestrated", + "orchestarted", "orchestrated", + "orchestraded", "orchestrated", + "orchistrated", "orchestrated", + "orgainsation", "organisation", + "orgainzation", "organizations", + "organisaiton", "organisation", + "organisatons", "organisations", + "organistaion", "organisation", + "organizacion", "organization", + "organizaiton", "organization", + "organizativo", "organization", + "organizatons", "organizations", + "organsiation", "organisation", + "organziation", "organization", + "orginasation", "organisation", + "orginazation", "organization", + "orgnaisation", "organisations", + "originallity", "originality", + "outraegously", "outrageously", + "outrageoulsy", "outrageously", + "outragesouly", "outrageously", + "outrageuosly", "outrageously", + "outragiously", "outrageously", + "outsourceing", "outsourcing", + "overbearring", "overbearing", + "overblocking", "overclocking", + "overclcoking", "overclocking", + "overclicking", "overclocking", + "overcloaking", "overclocking", + "overclockign", "overclocking", + "overclokcing", "overclocking", + "overhearting", "overreacting", + "overheathing", "overheating", + "overhtinking", "overthinking", + "overhwelming", "overwhelming", + "overlappping", "overlapping", + "overlcocking", "overclocking", + "overreaktion", "overreaction", + "overwealming", "overwhelming", + "overwhelemed", "overwhelmed", + "overwhemling", "overwhelming", + "overwhleming", "overwhelming", + "owerpowering", "overpowering", + "painkilllers", "painkillers", + "palastinians", "palestinians", + "palesitnians", "palestinians", + "palestenians", "palestinians", + "palestinains", "palestinians", + "palestiniens", "palestinians", + "palestininan", "palestinian", + "palestininas", "palestinians", + "palistinians", "palestinians", + "palythroughs", "playthroughs", + "parapharsing", "paraphrasing", + "paraphenalia", "paraphernalia", + "paraphrashed", "paraphrase", + "paraphrazing", "paraphrasing", + "paraprashing", "paraphrasing", + "paraprhasing", "paraphrasing", + "parenthesees", "parentheses", + "parenthesies", "parenthesis", + "parliamentry", "parliamentary", + "partecipants", "participants", + "partecipated", "participated", + "parternships", "partnership", + "particapated", "participated", + "particiapnts", "participant", + "particiapted", "participated", + "participante", "participate", + "participaste", "participants", + "participatie", "participated", + "participatin", "participation", + "participatns", "participant", + "participaton", "participant", + "participents", "participants", + "particualrly", "particularly", + "particulalry", "particularly", + "particullary", "particularly", + "passionatley", "passionately", + "pathalogical", "pathological", + "pathelogical", "pathological", + "patholigical", "pathological", + "paychedelics", "psychedelics", + "paychiatrist", "psychiatrist", + "paychologist", "psychologist", + "paychopathic", "psychopathic", + "penetratiing", "penetrating", + "penisylvania", "pennsylvania", + "pennsilvania", "pennsylvania", + "pennslyvania", "pennsylvania", + "pennsylvaina", "pennsylvania", + "pennsyvlania", "pennsylvania", + "pennyslvania", "pennsylvania", + "penssylvania", "pennsylvania", + "pentsylvania", "pennsylvania", + "percentagens", "percentages", + "perferential", "preferential", + "performantes", "performances", + "performences", "performances", + "perfromances", "performances", + "peridoically", "periodically", + "peripathetic", "peripatetic", + "periphereals", "peripherals", + "peripherials", "peripherals", + "permanantely", "permanently", + "permanentely", "permanently", + "permissiable", "permissible", + "peroidically", "periodically", + "perpatrators", "perpetrators", + "perpatuating", "perpetuating", + "perpertators", "perpetrators", + "perpertrated", "perpetrated", + "perpetraitor", "perpetrator", + "perpetraters", "perpetrators", + "perpetuaters", "perpetuates", + "perpitrators", "perpetrators", + "perposefully", "purposefully", + "perposterous", "preposterous", + "perpretators", "perpetrators", + "perpsectives", "perspectives", + "perputrators", "perpetrators", + "perputuating", "perpetuating", + "persepctives", "perspectives", + "perservation", "preservation", + "perseverence", "perseverance", + "personalites", "personalities", + "personallity", "personally", + "personilized", "personalized", + "perspecitves", "perspectives", + "perspectivas", "perspectives", + "persumptuous", "presumptuous", + "perticularly", "particularly", + "pertubations", "perturbations", + "pessimisitic", "pessimistic", + "pessimisstic", "pessimistic", + "phenomenonal", "phenomenal", + "phenomenonly", "phenomenally", + "phenomonenon", "phenomenon", + "phialdelphia", "philadelphia", + "philadalphia", "philadelphia", + "philadelhpia", "philadelphia", + "philadeplhia", "philadelphia", + "philadlephia", "philadelphia", + "philedalphia", "philadelphia", + "philedelphia", "philadelphia", + "philidalphia", "philadelphia", + "philippinnes", "philippines", + "philippinoes", "philippines", + "philisophers", "philosophers", + "philisophies", "philosophies", + "phillippines", "philippines", + "philosiphers", "philosophers", + "philosiphies", "philosophies", + "philosohpers", "philosopher", + "philosohpies", "philosophies", + "philosophiae", "philosophies", + "philosophics", "philosophies", + "philosophios", "philosophies", + "philospohers", "philosophers", + "philospohies", "philosophies", + "photagrapher", "photographer", + "photochopped", "photoshopped", + "photograhper", "photographer", + "photograpers", "photographers", + "photographes", "photographs", + "photographyi", "photographic", + "photogropher", "photographer", + "photogrpahed", "photographed", + "photogrpaher", "photographer", + "photoshipped", "photoshopped", + "photoshooped", "photoshopped", + "photoshoppad", "photoshopped", + "phychedelics", "psychedelics", + "phychiatrist", "psychiatrist", + "phychologist", "psychologist", + "phychopathic", "psychopathic", + "physcedelics", "psychedelics", + "physciatrist", "psychiatrist", + "physcologist", "psychologist", + "physcopathic", "psychopathic", + "physicallity", "physically", + "physiologial", "physiological", + "pilgrimmages", "pilgrimages", + "pitchforkers", "pitchforks", + "pkaythroughs", "playthroughs", + "plabeswalker", "planeswalker", + "plaestinians", "palestinians", + "planeswaller", "planeswalker", + "planeswlaker", "planeswalker", + "planetwalker", "planeswalker", + "plansewalker", "planeswalker", + "plauthroughs", "playthroughs", + "playhtroughs", "playthroughs", + "playtgroughs", "playthroughs", + "playthorughs", "playthroughs", + "playthourghs", "playthroughs", + "playthrougth", "playthroughs", + "playthrouhgs", "playthroughs", + "playthtoughs", "playthroughs", + "playtrhoughs", "playthroughs", + "populationes", "populations", + "pornograpghy", "pornography", + "porportional", "proportional", + "portabillity", "portability", + "portagonists", "protagonists", + "positionning", "positioning", + "positivitely", "positivity", + "possessivize", "possessive", + "possibillity", "possibility", + "possiblility", "possibility", + "possiblities", "possibilities", + "powerfisting", "powerlifting", + "powerlfiting", "powerlifting", + "powerlifitng", "powerlifting", + "powerlisting", "powerlifting", + "powetlifting", "powerlifting", + "powrrlifting", "powerlifting", + "practicioner", "practitioner", + "practisioner", "practitioner", + "pratictioner", "practitioners", + "precedessors", "predecessors", + "preconveived", "preconceived", + "predacessors", "predecessors", + "predeccesors", "predecessor", + "predecesores", "predecessor", + "predescesors", "predecessors", + "predessecors", "predecessors", + "predetermind", "predetermined", + "predicessors", "predecessors", + "predocessors", "predecessors", + "predomiantly", "predominately", + "predominanty", "predominantly", + "predominatly", "predominantly", + "preferantial", "preferential", + "preferentail", "preferential", + "preformances", "performances", + "preinitalize", "preinitialize", + "preliminarly", "preliminary", + "prematurelly", "prematurely", + "premillenial", "premillennial", + "preocupation", "preoccupation", + "preperations", "preparations", + "prepetrators", "perpetrators", + "prepetuating", "perpetuating", + "prepostorous", "preposterous", + "preposturous", "preposterous", + "prerequisets", "prerequisite", + "prescirption", "prescriptions", + "prescribtion", "prescription", + "prescripcion", "prescription", + "prescriptons", "prescriptions", + "prescritpion", "prescriptions", + "presedential", "presidential", + "presentacion", "presentation", + "presentaiton", "presentations", + "preservacion", "preservation", + "preservating", "preservation", + "preservativo", "preservation", + "presidencial", "presidential", + "presidenital", "presidential", + "presidentail", "presidential", + "presnetation", "presentations", + "presonalized", "personalized", + "prespectives", "perspectives", + "presrciption", "prescriptions", + "presumpteous", "presumptuous", + "presumputous", "presumptuous", + "prevantative", "preventative", + "preventation", "presentation", + "preventetive", "preventative", + "preventitive", "preventative", + "prezidential", "presidential", + "principlaity", "principality", + "probabiliste", "probabilities", + "probabilites", "probabilities", + "probabillity", "probability", + "probablistic", "probabilistic", + "proclomation", "proclamation", + "proconceived", "preconceived", + "profesisonal", "professionals", + "professiinal", "professionalism", + "professioanl", "professionals", + "professiomal", "professionalism", + "professionel", "professional", + "professionsl", "professionalism", + "professoinal", "professionals", + "professonial", "professionals", + "proffesional", "professional", + "proficientcy", "proficiency", + "profissional", "professional", + "profitabiliy", "profitability", + "profitabilty", "profitability", + "profressions", "progressions", + "progatonists", "protagonists", + "programmeurs", "programmer", + "progressieve", "progressive", + "progressioin", "progressions", + "progressiong", "progressing", + "progressisme", "progresses", + "progressiste", "progresses", + "progressivas", "progressives", + "progressivey", "progressively", + "progressivly", "progressively", + "progressivsm", "progressives", + "progresssing", "progressing", + "progresssion", "progressions", + "progresssive", "progressives", + "prohibitting", "prohibiting", + "projecticles", "projectiles", + "proletariaat", "proletariat", + "proletariant", "proletariat", + "proletaricat", "proletariat", + "prominantely", "prominently", + "promiscuious", "promiscuous", + "promisculous", "promiscuous", + "promotionnal", "promotional", + "pronounceing", "pronouncing", + "pronunciaton", "pronunciation", + "propertional", "proportional", + "propesterous", "preposterous", + "proportianal", "proportional", + "proportionel", "proportional", + "proposterous", "preposterous", + "proprotional", "proportional", + "prostetution", "prostitution", + "prostitition", "prostitution", + "prostitucion", "prostitution", + "prostituiton", "prostitution", + "prostitutiei", "prostitute", + "protaganists", "protagonists", + "protaginists", "protagonists", + "protagnoists", "protagonists", + "protestantes", "protestants", + "protoganists", "protagonists", + "prouncements", "pronouncements", + "pruposefully", "purposefully", + "pscyhologist", "psychologist", + "pscyhopathic", "psychopathic", + "pshyciatrist", "psychiatrist", + "pshycologist", "psychologist", + "pshycopathic", "psychopathic", + "psichologist", "psychologist", + "psychaitrist", "psychiatrist", + "psychedellic", "psychedelic", + "psychedilics", "psychedelics", + "psychemedics", "psychedelics", + "psychiatirst", "psychiatrists", + "psychiatrics", "psychiatrist", + "psychiatrict", "psychiatrist", + "psychiatrits", "psychiatrists", + "psychistrist", "psychiatrist", + "psychodelics", "psychedelics", + "psycholigist", "psychologist", + "psychologial", "psychological", + "psychologits", "psychologists", + "psychologyst", "psychologist", + "psychopathes", "psychopaths", + "psychyatrist", "psychiatrist", + "puplications", "publications", + "puritannical", "puritanical", + "purpetrators", "perpetrators", + "purpetuating", "perpetuating", + "purpusefully", "purposefully", + "pyschedelics", "psychedelics", + "pyschiatrist", "psychiatrist", + "pyschologist", "psychologist", + "pyschopathic", "psychopathic", + "qualificaton", "qualification", + "qualifierais", "qualifiers", + "qualtitative", "quantitative", + "quantatitive", "quantitative", + "quantititive", "quantitative", + "quarterblack", "quarterback", + "quesitonable", "questionable", + "questionalbe", "questionable", + "questionning", "questioning", + "questionsign", "questioning", + "radioactieve", "radioactive", + "rationallity", "rationally", + "reactionairy", "reactionary", + "reactionnary", "reactionary", + "realisticaly", "realistically", + "realisticlly", "realistically", + "reasonablely", "reasonably", + "recallection", "recollection", + "reccomending", "recommending", + "reccommended", "recommended", + "recepcionist", "receptionist", + "receptionest", "receptionist", + "recgonizable", "recognizable", + "reciporcated", "reciprocate", + "reciprociate", "reciprocate", + "reciprocrate", "reciprocate", + "recognizible", "recognizable", + "recolleciton", "recollection", + "recommanding", "recommending", + "recommendeds", "recommends", + "recommendors", "recommends", + "recommeneded", "recommended", + "recommenting", "recommending", + "recongizable", "recognizable", + "recontructed", "reconstructed", + "recpetionist", "receptionist", + "recreacional", "recreational", + "recriational", "recreational", + "referenceing", "referencing", + "refirgerator", "refrigerator", + "refriderator", "refrigerator", + "refrigarator", "refrigerator", + "refrigerador", "refrigerator", + "refrigerater", "refrigerator", + "refrigirator", "refrigerator", + "regenaration", "regeneration", + "regeneracion", "regeneration", + "regestration", "registration", + "registartion", "registration", + "registrating", "registration", + "regrigerator", "refrigerator", + "regulatorias", "regulators", + "regulatories", "regulators", + "regulatorios", "regulators", + "reicarnation", "reincarnation", + "reinforcemnt", "reinforcement", + "reinitalised", "reinitialised", + "reinitalises", "reinitialises", + "reinitalized", "reinitialized", + "reinitalizes", "reinitializes", + "reinstallled", "reinstalled", + "reisntalling", "reinstalling", + "relaitonship", "relationships", + "relatinoship", "relationships", + "reliabillity", "reliability", + "reluctanctly", "reluctantly", + "remarkablely", "remarkably", + "rememberance", "remembrance", + "reminiscient", "reminiscent", + "renaissaince", "renaissance", + "renegeration", "regeneration", + "reorganision", "reorganisation", + "repalcements", "replacements", + "repersenting", "representing", + "reporduction", "reproduction", + "reporductive", "reproductive", + "reprecussion", "repercussions", + "representate", "representative", + "represention", "representing", + "representive", "representative", + "reproducable", "reproducible", + "reproduccion", "reproduction", + "reproduciton", "reproduction", + "reproducting", "reproduction", + "reproductivo", "reproduction", + "reproduktion", "reproduction", + "repsectfully", "respectfully", + "repsectively", "respectively", + "republicanas", "republicans", + "republicanos", "republicans", + "republicants", "republicans", + "republicians", "republicans", + "requerimento", "requirement", + "requeriments", "requirements", + "requierments", "requirements", + "requriements", "requirements", + "resembelance", "resemblance", + "reseptionist", "receptionist", + "reserrection", "resurrection", + "resintalling", "reinstalling", + "resistancies", "resistances", + "resistencias", "resistances", + "respecitvely", "respectively", + "respectabile", "respectable", + "respectivily", "respectively", + "respectivley", "respectively", + "respectuflly", "respectfully", + "respiratiory", "respiratory", + "responsabile", "responsible", + "responsaveis", "responsive", + "responsbilty", "responsibly", + "responsibile", "responsible", + "responsibily", "responsibility", + "responsibley", "responsibly", + "responsibliy", "responsibly", + "responsiblty", "responsibly", + "ressemblance", "resemblance", + "ressemblence", "resemblance", + "ressurection", "resurrection", + "restaurantes", "restaurants", + "restauration", "restoration", + "restauraunts", "restaurants", + "restirctions", "restrictions", + "restrainting", "restraining", + "restrcitions", "restriction", + "restricitons", "restrictions", + "resurreccion", "resurrection", + "resurrektion", "resurrection", + "retalitation", "retaliation", + "retributioon", "retribution", + "retroactivly", "retroactively", + "revolutionay", "revolutionary", + "revolutionos", "revolutions", + "rezurrection", "resurrection", + "rictatorship", "dictatorship", + "ridicilously", "ridiculously", + "ridicoulusly", "ridiculously", + "righteouness", "righteousness", + "rockerfeller", "rockefeller", + "rollercoaser", "rollercoaster", + "rollercoater", "rollercoaster", + "romanitcally", "romantically", + "roundabounts", "roundabout", + "rudimentatry", "rudimentary", + "rysurrection", "resurrection", + "sacksonville", "jacksonville", + "sacreligious", "sacrilegious", + "sacrificeing", "sacrificing", + "saksatchewan", "saskatchewan", + "salughtering", "slaughtering", + "sanctionning", "sanctioning", + "sarcasticaly", "sarcastically", + "sarcasticlly", "sarcastically", + "sascatchewan", "saskatchewan", + "saskatcehwan", "saskatchewan", + "saskatchawan", "saskatchewan", + "saskatechwan", "saskatchewan", + "sasketchawan", "saskatchewan", + "sasketchewan", "saskatchewan", + "sasktachewan", "saskatchewan", + "satasfaction", "satisfaction", + "satasfactory", "satisfactory", + "satisfaccion", "satisfaction", + "satisfacting", "satisfaction", + "satisfcation", "satisfaction", + "satisfiction", "satisfaction", + "satistactory", "satisfactory", + "satsifaction", "satisfaction", + "satsifactory", "satisfactory", + "scandanivian", "scandinavian", + "scandenavian", "scandinavian", + "scandianvian", "scandinavian", + "scandinacian", "scandinavian", + "scandinaivan", "scandinavia", + "scandinavica", "scandinavian", + "scandinavien", "scandinavian", + "scandinavion", "scandinavian", + "scandivanian", "scandinavian", + "scandonavian", "scandinavian", + "schizophrena", "schizophrenia", + "scholarhsips", "scholarships", + "scholerships", "scholarships", + "scholorships", "scholarships", + "scnadinavian", "scandinavian", + "screenshoots", "screenshot", + "sensationail", "sensational", + "sensationnal", "sensational", + "sensibilites", "sensibilities", + "sensitivitiy", "sensitivity", + "sentimentals", "sentiments", + "sertificates", "certificates", + "serveillance", "surveillance", + "seskatchewan", "saskatchewan", + "shakesperean", "shakespeare", + "shamelessely", "shamelessly", + "shamelessley", "shamelessly", + "shampionship", "championship", + "shardholders", "shareholders", + "shenanigains", "shenanigans", + "shenanigangs", "shenanigans", + "shenaniganns", "shenanigans", + "shenanighans", "shenanigans", + "shopkeeepers", "shopkeepers", + "showboarding", "snowboarding", + "siginificant", "significant", + "significanly", "significantly", + "significante", "significance", + "significanty", "significantly", + "significatly", "significantly", + "signleplayer", "singleplayer", + "simaltaneous", "simultaneous", + "simeltaneous", "simultaneous", + "similaraties", "similarities", + "similiarites", "similarities", + "similiarties", "similarities", + "similiraties", "similarities", + "similtaneous", "simultaneous", + "simliarities", "similarities", + "simlutaneous", "simultaneous", + "simpathizers", "sympathizers", + "simplistisch", "simplistic", + "simulatenous", "simultaneous", + "simulatneous", "simultaneous", + "simultaenous", "simultaneous", + "simultaneuos", "simultaneous", + "simultanious", "simultaneous", + "simulteneous", "simultaneous", + "singelplayer", "singleplayer", + "singlepalyer", "singleplayer", + "sinlgeplayer", "singleplayer", + "situationals", "situations", + "situationnal", "situational", + "skandinavian", "scandinavian", + "skateboaring", "skateboarding", + "skrawberries", "strawberries", + "slaugthering", "slaughtering", + "sloughtering", "slaughtering", + "sluaghtering", "slaughtering", + "snowballling", "snowballing", + "snowbaording", "snowboarding", + "socialistisk", "socialists", + "socialogical", "sociological", + "socioeconimc", "socioeconomic", + "socioeconmic", "socioeconomic", + "socioligical", "sociological", + "sociopolical", "sociological", + "somethingest", "somethings", + "sophisticaed", "sophisticated", + "sophisticted", "sophisticated", + "southamption", "southampton", + "southernerns", "southerners", + "sovereighnty", "sovereignty", + "sovereignety", "sovereignty", + "sovereignity", "sovereignty", + "specialistes", "specialists", + "specializare", "specialize", + "specializate", "specialize", + "specializeds", "specializes", + "specializied", "specialize", + "speciallized", "specialised", + "specifcation", "specification", + "spectacuarly", "spectacular", + "spectaculair", "spectacular", + "spectaculary", "spectacularly", + "spectacullar", "spectacularly", + "specualtions", "speculation", + "spermatozoan", "spermatozoon", + "spesifically", "specifically", + "spirituallly", "spiritually", + "spirtiuality", "spirituality", + "spirutuality", "spirituality", + "spontaneosly", "spontaneously", + "spontaneouly", "spontaneously", + "spreadhseets", "spreadsheets", + "spreadsheats", "spreadsheets", + "spreadsheeds", "spreadsheets", + "spreadsheeet", "spreadsheets", + "standartized", "standardized", + "standerdized", "standardized", + "stardardized", "standardized", + "starightened", "straightened", + "starwberries", "strawberries", + "statisticaly", "statistically", + "stereotpying", "stereotyping", + "stereotypers", "stereotypes", + "stereotypian", "stereotyping", + "steriotyping", "stereotyping", + "steroetyping", "stereotyping", + "steryotyping", "stereotyping", + "straigntened", "straightened", + "straigthened", "straightened", + "strategicaly", "strategically", + "strategiclly", "strategically", + "strawburries", "strawberries", + "streemlining", "streamlining", + "streightened", "straightened", + "strenghening", "strengthening", + "strenghtened", "strengthened", + "strengtheing", "strengthening", + "stroytelling", "storytelling", + "subconcsious", "subconscious", + "subconsicous", "subconscious", + "subcouncious", "subconscious", + "subcsription", "subscriptions", + "subesquently", "subsequently", + "subjectivety", "subjectively", + "subjectivily", "subjectively", + "subjectivley", "subjectively", + "subjudgation", "subjugation", + "subredditors", "subreddits", + "subscirption", "subscriptions", + "subsconcious", "subconscious", + "subscribbers", "subscribers", + "subscribbing", "subscribing", + "subscribirse", "subscriber", + "subscribtion", "subscription", + "subscriptons", "subscriptions", + "subscritpion", "subscriptions", + "subscrpition", "subscriptions", + "subsiquently", "subsequently", + "subsrciption", "subscriptions", + "subsricption", "subscriptions", + "substantialy", "substantially", + "substantitve", "substantive", + "substitition", "substitution", + "substituters", "substitutes", + "substitutivo", "substitution", + "substitutues", "substitutes", + "substracting", "subtracting", + "substraction", "subtraction", + "subterranian", "subterranean", + "succsessfull", "successful", + "sunconscious", "subconscious", + "supermarkeds", "supermarkets", + "supermarkers", "supermarkets", + "supermarkert", "supermarkets", + "supermarkten", "supermarket", + "supermarktes", "supermarkets", + "supernarkets", "supermarkets", + "supernatrual", "supernatural", + "supersticion", "superstition", + "superstision", "superstition", + "superstitios", "superstitious", + "superstitous", "superstitious", + "supervisiors", "supervisors", + "supervisoras", "supervisors", + "supervisores", "supervisors", + "supllemental", "supplemental", + "supplamental", "supplemental", + "supplamented", "supplemented", + "supplimental", "supplemental", + "suppresssion", "suppression", + "supscription", "subscription", + "supsiciously", "suspiciously", + "surprizingly", "surprisingly", + "surrenderred", "surrendered", + "surrundering", "surrendering", + "survaillance", "surveillance", + "survaillence", "surveillance", + "survallience", "surveillance", + "surveillence", "surveillance", + "survelliance", "surveillance", + "surviellance", "surveillance", + "survivabiity", "survivability", + "survivabiliy", "survivability", + "survivabilty", "survivability", + "susceptiable", "susceptible", + "susceptibile", "susceptible", + "suspeciously", "suspiciously", + "suspicioulsy", "suspiciously", + "suspiciuosly", "suspiciously", + "suspisiously", "suspiciously", + "sustainabily", "sustainability", + "symapthizers", "sympathizers", + "symetrically", "symmetrically", + "symmetricaly", "symmetrically", + "sympathethic", "sympathetic", + "sympathsizer", "sympathizers", + "sympathyzers", "sympathizers", + "sympethizers", "sympathizers", + "symphatizers", "sympathizers", + "sympithizers", "sympathizers", + "syncronously", "synchronously", + "sysmatically", "systematically", + "systematisch", "systematic", + "tablespooons", "tablespoon", + "tacticallity", "tactically", + "tangencially", "tangentially", + "tangenitally", "tangentially", + "tangientally", "tangentially", + "teamfighters", "teamfights", + "teansylvania", "transylvania", + "techanically", "mechanically", + "techincality", "technicality", + "technologial", "technological", + "telelevision", "television", + "teleportaion", "teleportation", + "teleportaton", "teleportation", + "temepratures", "temperatures", + "temparatures", "temperatures", + "temperaturas", "temperatures", + "temporarilly", "temporarily", + "tempreatures", "temperatures", + "tempuratures", "temperatures", + "tengentially", "tangentially", + "termendously", "tremendously", + "territorrial", "territorial", + "territorries", "territories", + "testasterone", "testosterone", + "testestorone", "testosterone", + "thanskgiving", "thanksgiving", + "theologicial", "theological", + "theoreticaly", "theoretically", + "thermomenter", "thermometer", + "thermomether", "thermometer", + "thumbnailers", "thumbnails", + "thunderboldt", "thunderbolt", + "tindergarten", "kindergarten", + "torubleshoot", "troubleshoot", + "totalitarion", "totalitarian", + "totalitatian", "totalitarian", + "touchscreeen", "touchscreen", + "traditionaly", "traditionally", + "traditionnal", "traditional", + "tradtionally", "traditionally", + "tramendously", "tremendously", + "tramsformers", "transformers", + "tramsforming", "transforming", + "tranditional", "transitional", + "tranistional", "transitional", + "tranistioned", "transitioned", + "tranlsations", "translations", + "tranmsission", "transmissions", + "transaltions", "translations", + "transaprency", "transparency", + "transational", "transitional", + "transcations", "transactions", + "transcendant", "transcendent", + "transcripton", "transcription", + "transcriptus", "transcripts", + "transesxuals", "transsexuals", + "transfarmers", "transformers", + "transfarring", "transferring", + "transferrred", "transferred", + "transformare", "transformers", + "transformase", "transforms", + "transformees", "transforms", + "transforners", "transformers", + "transfromers", "transformers", + "transfroming", "transforming", + "transgenderd", "transgendered", + "transgendred", "transgendered", + "transgenered", "transgender", + "transicional", "transitional", + "transilvania", "transylvania", + "transimssion", "transmissions", + "transisioned", "transitioned", + "translastion", "translations", + "translateing", "translating", + "translationg", "translating", + "translucient", "translucent", + "translyvania", "transylvania", + "transmisions", "transmission", + "transmisison", "transmission", + "transmissons", "transmissions", + "transmitirte", "transmitter", + "transmittted", "transmitted", + "transmorfers", "transformer", + "transofrmers", "transformers", + "transofrming", "transforming", + "transparancy", "transparency", + "transparenty", "transparency", + "transparrent", "transparent", + "transperancy", "transparency", + "transperency", "transparency", + "transplantes", "transplants", + "transporteur", "transporter", + "transportion", "transporting", + "transpotting", "transporting", + "transsmision", "transmissions", + "transylmania", "transylvania", + "transylvanai", "transylvania", + "trasnferring", "transferring", + "trasnformers", "transformers", + "trasnforming", "transforming", + "trasnmission", "transmissions", + "trasnparency", "transparency", + "trasnporting", "transporting", + "trememdously", "tremendously", + "tremendoulsy", "tremendously", + "tremondously", "tremendously", + "troubelshoot", "troubleshoot", + "troublehsoot", "troubleshoot", + "trumendously", "tremendously", + "trustworthly", "trustworthy", + "ubsubscribed", "unsubscribed", + "udnerpowered", "underpowered", + "umbelievable", "unbelievable", + "umemployment", "unemployment", + "unaccaptable", "unacceptable", + "unacceptible", "unacceptable", + "unaccpetable", "unacceptable", + "unacompanied", "unaccompanied", + "unappealling", "unappealing", + "unattractice", "unattractive", + "unautherized", "unauthorized", + "unauthroized", "unauthorized", + "unbeleivable", "unbelievable", + "unbeleivably", "unbelievably", + "unbeliavable", "unbelievable", + "unbeliavably", "unbelievably", + "unbeliebable", "unbelievable", + "unbelieveble", "unbelievable", + "unbelievibly", "unbelievably", + "unbeliveable", "unbelievable", + "unbeliveably", "unbelievably", + "unbelizeable", "unbelievable", + "unbolievable", "unbelievable", + "uncertainity", "uncertainty", + "uncertaintly", "uncertainty", + "uncompatible", "incompatible", + "unconditinal", "unconditional", + "unconsciosly", "unconsciously", + "unconsciouly", "unconsciously", + "unconsistent", "inconsistent", + "unconvenient", "inconvenient", + "unconvential", "unconventional", + "undecideable", "undecidable", + "undefinitely", "indefinitely", + "undeniablely", "undeniably", + "undergradate", "undergraduate", + "undergradute", "undergraduate", + "underminding", "undermining", + "undermineing", "undermining", + "undermineras", "undermines", + "undermineres", "undermines", + "underminging", "undermining", + "underminning", "undermining", + "undertakeing", "undertaking", + "underwhelimg", "underwhelming", + "underwheling", "underwhelming", + "undesireable", "undesirable", + "undoubtedbly", "undoubtedly", + "unemployemnt", "unemployment", + "unemplyoment", "unemployment", + "unempolyment", "unemployment", + "unenployment", "unemployment", + "unequalities", "inequalities", + "unexpectadly", "unexpectedly", + "unexpectetly", "unexpectedly", + "unexpectidly", "unexpectedly", + "unexperience", "inexperience", + "unexpextedly", "unexpectedly", + "unexplicably", "inexplicably", + "unforgetable", "unforgettable", + "unforgiveble", "unforgivable", + "unforgivible", "unforgivable", + "unfortunatly", "unfortunately", + "unfortunetly", "unfortunately", + "unilatreally", "unilaterally", + "uniliterally", "unilaterally", + "unimpresssed", "unimpressed", + "uninitalised", "uninitialised", + "uninitalized", "uninitialized", + "uninstallimg", "uninstalling", + "uninstallled", "uninstalled", + "unintentinal", "unintentional", + "uninteresing", "uninteresting", + "uninterneted", "uninterested", + "uninterruped", "uninterrupted", + "uninterupted", "uninterrupted", + "unisntalling", "uninstalling", + "unitesstates", "unitedstates", + "univerisites", "universities", + "univeristies", "universities", + "universitets", "universities", + "unliaterally", "unilaterally", + "unneccessary", "unnecessary", + "unnecesarily", "unnecessarily", + "unnecessairy", "unnecessarily", + "unnecessarly", "unnecessarily", + "unnistalling", "uninstalling", + "unpredictabe", "unpredictable", + "unpreductive", "unproductive", + "unproduktive", "unproductive", + "unrealisitic", "unrealistic", + "unreaponsive", "unresponsive", + "unreasonalby", "unreasonably", + "unrepsonsive", "unresponsive", + "unresponcive", "unresponsive", + "unresponisve", "unresponsive", + "unresponsibe", "unresponsive", + "unrestircted", "unrestricted", + "unrestrcited", "unrestricted", + "unristricted", "unrestricted", + "unseccessful", "unsuccessful", + "unsespecting", "unsuspecting", + "unsibscribed", "unsubscribed", + "unsoliciated", "unsolicited", + "unsolicitied", "unsolicited", + "unsubscirbed", "unsubscribed", + "unsubscrible", "unsubscribed", + "unsubscrided", "unsubscribed", + "unsubscriped", "unsubscribed", + "unsubscrubed", "unsubscribed", + "unsubsrcibed", "unsubscribed", + "unsucessfull", "unsuccessful", + "unsunscribed", "unsubscribed", + "unsurprizing", "unsurprising", + "unsusbcribed", "unsubscribed", + "unsustainble", "unsustainable", + "unvelievable", "unbelievable", + "unvelievably", "unbelievably", + "unviersities", "universities", + "unvulnerable", "invulnerable", + "varification", "verification", + "vegetarianas", "vegetarians", + "vegetarianos", "vegetarians", + "verficiation", "verification", + "verificacion", "verification", + "verificaiton", "verification", + "verifikation", "verification", + "vernaculaire", "vernacular", + "versatillity", "versatility", + "verticallity", "vertically", + "videogamemes", "videogames", + "visualizaton", "visualization", + "vocabularily", "vocabulary", + "vocabularity", "vocabulary", + "volonteering", "volunteering", + "volounteered", "volunteered", + "voluntarilly", "voluntarily", + "volunterring", "volunteering", + "vulnerabilty", "vulnerability", + "weightlifing", "weightlifting", + "withdrawalls", "withdrawals", + "withdrawling", "withdrawing", + "withdrawning", "withdrawing", + "wonderfullly", "wonderfully", + "worshippping", "worshipping", + "xenophobical", "xenophobia", + "abandenment", "abandonment", + "abandomnent", "abandonment", + "abandonding", "abandoning", + "abandonnent", "abandonment", + "abandonning", "abandoning", + "abbreviatin", "abbreviation", + "abbreviaton", "abbreviation", + "abdominable", "abdominal", + "abomanation", "abomination", + "abominacion", "abomination", + "abomonation", "abomination", + "abonimation", "abomination", + "aboriginial", "aboriginal", + "aborigional", "aboriginal", + "abreviation", "abbreviation", + "abritrarily", "arbitrarily", + "abritration", "arbitration", + "absolutelly", "absolutely", + "absolutelys", "absolutes", + "absolutisme", "absolutes", + "absolutiste", "absolutes", + "abstraccion", "abstraction", + "abstraktion", "abstraction", + "abstruction", "abstraction", + "abundancies", "abundances", + "academicaly", "academically", + "academicese", "academics", + "accelarated", "accelerated", + "accelarator", "accelerator", + "accelerater", "accelerator", + "acceleratie", "accelerate", + "acceleratio", "accelerator", + "acceleraton", "acceleration", + "accelorated", "accelerated", + "accelorator", "accelerator", + "acceptabelt", "acceptable", + "accesseries", "accessories", + "accessibile", "accessible", + "accessibily", "accessibility", + "accessoires", "accessories", + "accidantely", "accidently", + "accidentaly", "accidentally", + "accidentely", "accidently", + "accidential", "accidental", + "accidentily", "accidently", + "accidentlay", "accidently", + "accidentley", "accidently", + "accidentlly", "accidently", + "accomadated", "accommodated", + "accomadates", "accommodates", + "accommadate", "accommodate", + "accommidate", "accommodate", + "accomodated", "accommodated", + "accomodates", "accommodates", + "accomondate", "accommodate", + "accompained", "accompanied", + "accompanyed", "accompanied", + "accompianed", "accompanied", + "accompinied", "accompanied", + "accomplises", "accomplishes", + "accomplishs", "accomplishes", + "accomponied", "accompanied", + "accountatns", "accountants", + "accountents", "accountants", + "accquainted", "acquainted", + "accrediated", "accredited", + "accreditied", "accredited", + "accreditted", "accredited", + "acculumated", "accumulated", + "accumalated", "accumulated", + "accumelated", "accumulated", + "accumilated", "accumulated", + "accumulatin", "accumulation", + "accumulaton", "accumulation", + "accuratelly", "accurately", + "accustommed", "accustomed", + "acheivement", "achievement", + "acheivments", "achievements", + "achievemint", "achievement", + "achievemnts", "achievements", + "achievments", "achievements", + "achivements", "achievements", + "acknolwedge", "acknowledge", + "acknoweldge", "acknowledge", + "acknowleded", "acknowledged", + "acknowlegde", "acknowledge", + "acknowleged", "acknowledge", + "acknowleges", "acknowledges", + "acknwoledge", "acknowledges", + "acomplished", "accomplished", + "acopalyptic", "apocalyptic", + "acquaintace", "acquaintance", + "acquisation", "acquisition", + "activateing", "activating", + "activationg", "activating", + "activistion", "activision", + "additinally", "additionally", + "additionaly", "additionally", + "additonally", "additionally", + "adequatedly", "adequately", + "adjectiveus", "adjectives", + "administerd", "administered", + "administrar", "administrator", + "administren", "administer", + "administrer", "administer", + "administres", "administer", + "administrez", "administer", + "adminstered", "administered", + "adminstrate", "administrate", + "admittadely", "admittedly", + "adolencence", "adolescence", + "adolescance", "adolescence", + "adolescense", "adolescence", + "advantadges", "advantages", + "advantageos", "advantageous", + "advantageus", "advantageous", + "advantagous", "advantageous", + "adventerous", "adventures", + "adventourus", "adventurous", + "adversiting", "advertising", + "advertisors", "advertisers", + "advertisted", "advertised", + "aesthethics", "aesthetics", + "afficionado", "aficionado", + "affiliction", "affiliation", + "affirmitave", "affirmative", + "affirmitive", "affirmative", + "affixiation", "affiliation", + "affrimative", "affirmative", + "afgahnistan", "afghanistan", + "afganhistan", "afghanistan", + "afghanastan", "afghanistan", + "afghansitan", "afghanistan", + "afhganistan", "afghanistan", + "afternarket", "aftermarket", + "afterthougt", "afterthought", + "aggaravates", "aggravates", + "aggragating", "aggravating", + "aggregatore", "aggregate", + "aggressivly", "aggressively", + "aggresssion", "aggression", + "aggrovating", "aggravating", + "agnostacism", "agnosticism", + "agnostisicm", "agnosticism", + "agnostisism", "agnosticism", + "agnostocism", "agnosticism", + "agnsoticism", "agnosticism", + "agonsticism", "agnosticism", + "agressively", "aggressively", + "agressivley", "agressive", + "agressivnes", "agressive", + "agricolture", "agriculture", + "agriculteur", "agriculture", + "agricultral", "agricultural", + "agricultual", "agricultural", + "agricutlure", "agriculture", + "ahtleticism", "athleticism", + "alcoholicas", "alcoholics", + "alcoholicos", "alcoholics", + "alcoholisim", "alcoholism", + "algorithems", "algorithm", + "algorithims", "algorithm", + "algorithmes", "algorithms", + "algorithmns", "algorithms", + "algorithmus", "algorithms", + "algorithyms", "algorithm", + "algorythims", "algorithms", + "alientating", "alienating", + "alleigances", "allegiance", + "alltogether", "altogether", + "alterantive", "alternative", + "alternatley", "alternately", + "alternitive", "alternative", + "altheticism", "athleticism", + "altnerately", "alternately", + "altruisitic", "altruistic", + "altruistric", "altruistic", + "amalgomated", "amalgamated", + "ambulancier", "ambulance", + "amerliorate", "ameliorate", + "ammendments", "amendments", + "ampehtamine", "amphetamine", + "ampethamine", "amphetamine", + "amphetamies", "amphetamines", + "amphetamins", "amphetamines", + "amphetemine", "amphetamine", + "amphetimine", "amphetamine", + "amphetmaine", "amphetamines", + "analyticals", "analytics", + "anarchistes", "anarchists", + "ancedotally", "anecdotally", + "androgenous", "androgynous", + "anecdatally", "anecdotally", + "anecdotelly", "anecdotally", + "anecodtally", "anecdotally", + "anectodally", "anecdotally", + "anectotally", "anecdotally", + "anedoctally", "anecdotally", + "angosticism", "agnosticism", + "anihilation", "annihilation", + "anitbiotics", "antibiotics", + "annihalated", "annihilated", + "annihilaton", "annihilation", + "annihilited", "annihilated", + "annihliated", "annihilated", + "annilihated", "annihilated", + "anniversery", "anniversary", + "annonymouse", "anonymous", + "announceing", "announcing", + "announcemet", "announcements", + "announcemnt", "announcement", + "announcents", "announces", + "annoymously", "anonymously", + "anonamously", "anonymously", + "anonimously", "anonymously", + "anonmyously", "anonymously", + "anonomously", "anonymously", + "anonymousny", "anonymously", + "anouncement", "announcement", + "antagonisic", "antagonistic", + "antagonistc", "antagonistic", + "antagonstic", "antagonist", + "anthropolgy", "anthropology", + "anthropoloy", "anthropology", + "antibiodics", "antibiotics", + "antibioitcs", "antibiotic", + "antibioitic", "antibiotic", + "antibitoics", "antibiotics", + "antiboitics", "antibiotics", + "anticapated", "anticipated", + "anticiapted", "anticipated", + "anticipatin", "anticipation", + "antiobitics", "antibiotic", + "antiquaited", "antiquated", + "antisipated", "anticipated", + "apacolyptic", "apocalyptic", + "apocaliptic", "apocalyptic", + "apocalpytic", "apocalyptic", + "apocalytpic", "apocalyptic", + "apolagizing", "apologizing", + "apolegetics", "apologetics", + "apologistas", "apologists", + "apologistes", "apologists", + "apostrophie", "apostrophe", + "apparantely", "apparently", + "appareances", "appearances", + "apparentely", "apparently", + "appartments", "apartments", + "appeareance", "appearance", + "appearences", "appearances", + "apperciated", "appreciated", + "apperciates", "appreciates", + "appereances", "appearances", + "applicabile", "applicable", + "applicaiton", "application", + "applicatins", "applicants", + "applicatons", "applications", + "appoitnment", "appointments", + "apporaching", "approaching", + "apporpriate", "appropriate", + "apporximate", "approximate", + "appraoching", "approaching", + "apprearance", "appearance", + "apprecaited", "appreciated", + "apprecaites", "appreciates", + "appreciaite", "appreciative", + "appreciatie", "appreciative", + "appreciatin", "appreciation", + "appreciaton", "appreciation", + "appreciatve", "appreciative", + "appreicated", "appreciated", + "appreicates", "appreciates", + "apprentince", "apprentice", + "appriciated", "appreciated", + "appriciates", "appreciates", + "apprieciate", "appreciate", + "appropirate", "appropriate", + "appropraite", "appropriate", + "appropriato", "appropriation", + "approxamate", "approximate", + "approxiamte", "approximate", + "approxmiate", "approximate", + "aprehensive", "apprehensive", + "apsirations", "aspirations", + "aqcuisition", "acquisition", + "aquaintance", "acquaintance", + "aquiantance", "acquaintance", + "arbitrairly", "arbitrarily", + "arbitralily", "arbitrarily", + "arbitrarely", "arbitrarily", + "arbitrarion", "arbitration", + "arbitratily", "arbitrarily", + "arbritarily", "arbitrarily", + "arbritation", "arbitration", + "arcaheology", "archaeology", + "archaoelogy", "archeology", + "archeaology", "archaeology", + "archimedian", "archimedean", + "architechts", "architect", + "architectes", "architects", + "architecure", "architecture", + "argiculture", "agriculture", + "argumentate", "argumentative", + "aribtrarily", "arbitrarily", + "aribtration", "arbitration", + "arithmentic", "arithmetic", + "arithmethic", "arithmetic", + "arithmetric", "arithmetic", + "armagedddon", "armageddon", + "armageddeon", "armageddon", + "arrangments", "arrangements", + "arrengement", "arrangement", + "articluated", "articulated", + "articualted", "articulated", + "artifically", "artificially", + "artificialy", "artificially", + "aspergerers", "aspergers", + "asphyxation", "asphyxiation", + "aspriations", "aspirations", + "assasinated", "assassinated", + "assasinates", "assassinates", + "assassiante", "assassinate", + "assassinare", "assassinate", + "assassinatd", "assassinated", + "assassinato", "assassination", + "assassinats", "assassins", + "assassinted", "assassinated", + "assembleing", "assembling", + "assemblying", "assembling", + "assertation", "assertion", + "assignemnts", "assignments", + "assimialted", "assimilate", + "assimilatie", "assimilate", + "assimilerat", "assimilate", + "assimiliate", "assimilate", + "assimliated", "assimilate", + "assingments", "assignments", + "assistantes", "assistants", + "assocaition", "associations", + "associaiton", "associations", + "associaties", "associates", + "associatons", "associations", + "assoication", "association", + "assosiating", "associating", + "assosiation", "association", + "assoziation", "association", + "assumptious", "assumptions", + "astonashing", "astonishing", + "astonoshing", "astonishing", + "astronaught", "astronaut", + "astronaunts", "astronaut", + "astronautas", "astronauts", + "astronautes", "astronauts", + "asychronous", "asynchronous", + "asyncronous", "asynchronous", + "atatchments", "attachments", + "atheistisch", "atheistic", + "athelticism", "athleticism", + "athletecism", "athleticism", + "athleticsim", "athleticism", + "athletisicm", "athleticism", + "athletisism", "athleticism", + "atmopsheric", "atmospheric", + "atmoshperic", "atmospheric", + "atmosoheric", "atmospheric", + "atomspheric", "atmospheric", + "atrocitites", "atrocities", + "attachemnts", "attachments", + "attackerasu", "attackers", + "attackerats", "attackers", + "attactments", "attachments", + "attributred", "attributed", + "attributted", "attribute", + "attrocities", "atrocities", + "atttributes", "attributes", + "audiobookas", "audiobooks", + "audioboooks", "audiobook", + "auotcorrect", "autocorrect", + "austrailans", "australians", + "austrailian", "australian", + "australiaan", "australians", + "australiams", "australians", + "australiens", "australians", + "australlian", "australian", + "authenticiy", "authenticity", + "authenticor", "authenticator", + "authenticty", "authenticity", + "authorative", "authoritative", + "authoritate", "authoritative", + "authoroties", "authorities", + "autoatttack", "autoattack", + "autocoreect", "autocorrect", + "autocorrekt", "autocorrect", + "autocorrent", "autocorrect", + "autocorrext", "autocorrect", + "autoctonous", "autochthonous", + "autokorrect", "autocorrect", + "automaticly", "automatically", + "automatonic", "automation", + "automoblies", "automobile", + "auxillaries", "auxiliaries", + "availabiliy", "availability", + "availabilty", "availability", + "availablity", "availability", + "awesoneness", "awesomeness", + "babysittter", "babysitter", + "backbacking", "backpacking", + "backgorunds", "backgrounds", + "backhacking", "backpacking", + "backjacking", "backpacking", + "backtacking", "backpacking", + "bangaldeshi", "bangladesh", + "bangladesch", "bangladesh", + "barceloneta", "barcelona", + "bargainning", "bargaining", + "battelfield", "battlefield", + "battelfront", "battlefront", + "battelships", "battleship", + "battlefeild", "battlefield", + "battlefiend", "battlefield", + "battlefiled", "battlefield", + "battlefornt", "battlefront", + "battlehsips", "battleship", + "beastiality", "bestiality", + "beaurocracy", "bureaucracy", + "beautyfully", "beautifully", + "behaviorial", "behavioral", + "belittleing", "belittling", + "belittlling", "belittling", + "belligerant", "belligerent", + "belligirent", "belligerent", + "bellweather", "bellwether", + "benefitical", "beneficial", + "bestiallity", "bestiality", + "beuatifully", "beautifully", + "beuraucracy", "bureaucracy", + "beuraucrats", "bureaucrats", + "billegerent", "belligerent", + "billionairs", "billionaires", + "billionarie", "billionaire", + "billioniare", "billionaire", + "biologicaly", "biologically", + "birthdayers", "birthdays", + "birthdaymas", "birthdays", + "bittersweat", "bittersweet", + "bitterwseet", "bittersweet", + "blackberrry", "blackberry", + "blacksmitch", "blacksmith", + "bloodboorne", "bloodborne", + "bluebarries", "blueberries", + "blueburries", "blueberries", + "blueprients", "blueprints", + "bodybuildig", "bodybuilding", + "bodybuildng", "bodybuilding", + "bodybuiling", "bodybuilding", + "bombardeada", "bombarded", + "bombardeado", "bombarded", + "bombarderad", "bombarded", + "bordelrands", "borderlands", + "bordlerands", "borderlands", + "bortherhood", "brotherhood", + "bourgeousie", "bourgeois", + "boycottting", "boycotting", + "bracelettes", "bracelets", + "brainwahsed", "brainwashed", + "brainwasing", "brainwashing", + "braziliians", "brazilians", + "breakthough", "breakthrough", + "breakthrouh", "breakthrough", + "breathtakng", "breathtaking", + "brianwashed", "brainwashed", + "brillaintly", "brilliantly", + "broadcasing", "broadcasting", + "broadcastes", "broadcasts", + "broderlands", "borderlands", + "brotherwood", "brotherhood", + "buddhistisk", "buddhists", + "buearucrats", "bureaucrats", + "bueraucracy", "bureaucracy", + "bueraucrats", "bureaucrats", + "buisnessman", "businessman", + "buisnessmen", "businessmen", + "bullerproof", "bulletproof", + "bulletbroof", "bulletproof", + "bulletproff", "bulletproof", + "bulletprrof", "bulletproof", + "bullitproof", "bulletproof", + "bureacuracy", "bureaucracy", + "bureaocracy", "bureaucracy", + "bureaocrats", "bureaucrats", + "bureaucraps", "bureaucrats", + "bureaucrash", "bureaucrats", + "bureaucrasy", "bureaucrats", + "bureaucrazy", "bureaucracy", + "bureuacracy", "bureaucracy", + "bureuacrats", "bureaucrats", + "burueacrats", "bureaucrats", + "businessnes", "businessmen", + "busniessmen", "businessmen", + "butterfiles", "butterflies", + "butterfleye", "butterfly", + "butterflyes", "butterflies", + "butterfries", "butterflies", + "butterlfies", "butterflies", + "caclulating", "calculating", + "caclulation", "calculation", + "caclulators", "calculators", + "cailbration", "calibration", + "calbiration", "calibration", + "calcualting", "calculating", + "calcualtion", "calculations", + "calcualtors", "calculators", + "calculaters", "calculators", + "calculatios", "calculators", + "calculatons", "calculations", + "calibartion", "calibration", + "calibraiton", "calibration", + "califorinan", "californian", + "californain", "californian", + "californica", "california", + "californien", "californian", + "californiia", "californian", + "californina", "californian", + "californnia", "californian", + "califronian", "californian", + "caluclating", "calculating", + "caluclation", "calculation", + "caluclators", "calculators", + "caluculated", "calculated", + "caluiflower", "cauliflower", + "camouflague", "camouflage", + "camouflauge", "camouflage", + "campagining", "campaigning", + "campainging", "campaigning", + "canadianese", "canadians", + "cannabilism", "cannibalism", + "cannabolism", "cannibalism", + "canniablism", "cannibalism", + "cannibalizm", "cannibalism", + "cannibaljim", "cannibalism", + "cannibalsim", "cannibalism", + "cannibilism", "cannibalism", + "cannobalism", "cannibalism", + "cannotation", "connotation", + "capabilites", "capabilities", + "capabilitiy", "capability", + "capabillity", "capability", + "capacitaron", "capacitor", + "capacitores", "capacitors", + "capatilists", "capitalists", + "capatilized", "capitalized", + "caperbility", "capability", + "capitalisim", "capitalism", + "capitilists", "capitalists", + "capitilized", "capitalized", + "capitolists", "capitalists", + "capitolized", "capitalized", + "captialists", "capitalists", + "captialized", "capitalized", + "cariactures", "caricature", + "carniverous", "carnivorous", + "castatrophe", "catastrophe", + "catagorized", "categorized", + "catapillars", "caterpillars", + "catapillers", "caterpillars", + "catasthrope", "catastrophe", + "catastraphe", "catastrophe", + "catastrohpe", "catastrophe", + "catastropic", "catastrophic", + "categroized", "categorized", + "catepillars", "caterpillars", + "catergorize", "categorize", + "caterogized", "categorized", + "caterpilars", "caterpillars", + "caterpiller", "caterpillar", + "catholacism", "catholicism", + "catholicsim", "catholicism", + "catholisicm", "catholicism", + "catholisism", "catholicism", + "catholizism", "catholicism", + "catholocism", "catholicism", + "catogerized", "categorized", + "catterpilar", "caterpillar", + "cauilflower", "cauliflower", + "caulfilower", "cauliflower", + "celebartion", "celebrations", + "celebirties", "celebrities", + "celebracion", "celebration", + "celebrasion", "celebrations", + "celebratons", "celebrations", + "centipeddle", "centipede", + "cerimonious", "ceremonious", + "certaintity", "certainty", + "certificaat", "certificate", + "certificare", "certificate", + "certificato", "certification", + "certificats", "certificates", + "challanging", "challenging", + "challeneged", "challenged", + "challeneger", "challenger", + "challeneges", "challenges", + "chameleooon", "chameleon", + "championshp", "championship", + "championsip", "championship", + "chancellour", "chancellor", + "charachters", "characters", + "charasmatic", "charismatic", + "charimastic", "charismatic", + "charsimatic", "charismatic", + "cheerleadra", "cheerleader", + "cheerleards", "cheerleaders", + "cheerleeder", "cheerleader", + "cheesebuger", "cheeseburger", + "cheeseburgs", "cheeseburgers", + "chihuahuita", "chihuahua", + "childrenmrs", "childrens", + "chloesterol", "cholesterol", + "cholesteral", "cholesterol", + "cholestoral", "cholesterol", + "cholestorol", "cholesterol", + "cholosterol", "cholesterol", + "chormosomes", "chromosomes", + "christianty", "christianity", + "chromasomes", "chromosomes", + "chromesomes", "chromosomes", + "chromisomes", "chromosomes", + "chromosones", "chromosomes", + "chromossome", "chromosomes", + "chromozomes", "chromosomes", + "chronicales", "chronicles", + "chronichles", "chronicles", + "cicrulating", "circulating", + "cincinnasti", "cincinnati", + "cincinnatti", "cincinnati", + "cincinnnati", "cincinnati", + "circimcised", "circumcised", + "circluating", "circulating", + "circualtion", "circulation", + "circulacion", "circulation", + "circumcison", "circumcision", + "circumsiced", "circumcised", + "circumsised", "circumcised", + "circumstace", "circumstance", + "circumvrent", "circumvent", + "circuncised", "circumcised", + "cirticising", "criticising", + "ciruclating", "circulating", + "ciruclation", "circulation", + "citicenship", "citizenship", + "citisenship", "citizenship", + "citizinship", "citizenship", + "civilizatin", "civilizations", + "civilizaton", "civilization", + "claculators", "calculators", + "classifides", "classified", + "cleanilness", "cleanliness", + "cleanleness", "cleanliness", + "cleanlyness", "cleanliness", + "cleansiness", "cleanliness", + "cliffbanger", "cliffhanger", + "cliffhander", "cliffhanger", + "cliffhangar", "cliffhanger", + "clifthanger", "cliffhanger", + "cockaroches", "cockroaches", + "cockraoches", "cockroaches", + "cockroackes", "cockroaches", + "cocktailers", "cocktails", + "coefficeint", "coefficient", + "coefficiant", "coefficient", + "coincedince", "coincidence", + "coincidance", "coincidence", + "coincidense", "coincidence", + "coincidente", "coincidence", + "coincidince", "coincidence", + "coinsidence", "coincidence", + "collabarate", "collaborate", + "collaberate", "collaborate", + "collaborant", "collaborate", + "collaborare", "collaborate", + "collaborato", "collaboration", + "collapseing", "collapsing", + "collaterial", "collateral", + "collectieve", "collective", + "collectivly", "collectively", + "collectivos", "collections", + "collobarate", "collaborate", + "colloborate", "collaborate", + "colonializm", "colonialism", + "colonialsim", "colonialism", + "colonianism", "colonialism", + "colonizaton", "colonization", + "comaprisons", "comparisons", + "combiantion", "combinations", + "combinacion", "combination", + "combinaison", "combinations", + "combinaiton", "combinations", + "combinatino", "combinations", + "combinatins", "combinations", + "combinatios", "combinations", + "combinining", "combining", + "combonation", "combination", + "comediantes", "comedians", + "comeptition", "competition", + "comeptitive", "competitive", + "comeptitors", "competitors", + "comfertable", "comfortable", + "comfertably", "comfortably", + "comfortabel", "comfortably", + "comfortabil", "comfortably", + "comfrotable", "comfortable", + "comftorable", "comfortable", + "comftorably", "comfortably", + "comisioning", "commissioning", + "comissioned", "commissioned", + "comissioner", "commissioner", + "commandered", "commanded", + "commandmant", "commandment", + "commantator", "commentator", + "commendment", "commandment", + "commentarea", "commenter", + "commentaren", "commenter", + "commentater", "commentator", + "commenteers", "commenter", + "commentries", "commenters", + "commercialy", "commercially", + "commericals", "commercials", + "commericial", "commercial", + "comminicate", "communicate", + "comminucate", "communicate", + "commisioned", "commissioned", + "commisioner", "commissioner", + "commisssion", "commissions", + "committment", "commitment", + "commodoties", "commodities", + "commomplace", "commonplace", + "commonspace", "commonplace", + "commonweath", "commonwealth", + "commonwelth", "commonwealth", + "commuincate", "communicated", + "communciate", "communicate", + "communicted", "communicated", + "communistas", "communists", + "communistes", "communists", + "compability", "compatibility", + "compalation", "compilation", + "compansated", "compensated", + "comparabile", "comparable", + "comparasion", "comparison", + "comparasons", "comparisons", + "comparement", "compartment", + "comparetive", "comparative", + "comparision", "comparison", + "comparisson", "comparisons", + "comparitave", "comparative", + "comparitive", "comparative", + "comparsions", "comparisons", + "compassione", "compassionate", + "compasssion", "compassion", + "compatabile", "compatible", + "compatative", "comparative", + "compatiable", "compatible", + "compatibile", "compatible", + "compatibily", "compatibility", + "compeditive", "competitive", + "compeditors", "competitors", + "compeitions", "competitions", + "compeittion", "competitions", + "compelation", "compilation", + "compensante", "compensate", + "compensatie", "compensate", + "compensatin", "compensation", + "compenstate", "compensate", + "comperative", "comparative", + "compesition", "composition", + "competation", "computation", + "competative", "competitive", + "competators", "competitors", + "competetion", "competition", + "competetors", "competitors", + "competiters", "competitors", + "competiting", "competition", + "competitior", "competitor", + "competitivo", "competition", + "competitoin", "competitions", + "competitons", "competitors", + "competution", "computation", + "compilacion", "compilation", + "compilcated", "complicate", + "compination", "compilation", + "compinsated", "compensated", + "compitation", "computation", + "compitetion", "competitions", + "complacient", "complacent", + "complciated", "complicate", + "compleation", "compilation", + "complecated", "complicated", + "completaste", "completes", + "completeing", "completing", + "completeion", "completion", + "completelly", "completely", + "completelyl", "completely", + "completelys", "completes", + "completenes", "completes", + "complexitiy", "complexity", + "compliacted", "complicate", + "compliation", "compilation", + "complicarte", "complicate", + "complicatie", "complicit", + "complicatii", "complicit", + "complicatin", "complicit", + "complictaed", "complicate", + "complimente", "complement", + "complimenty", "complimentary", + "complusions", "compulsion", + "compolation", "compilation", + "componenets", "components", + "componentes", "components", + "composicion", "composition", + "composiiton", "compositions", + "composision", "compositions", + "compositied", "composite", + "composities", "composite", + "compositoin", "compositions", + "compositons", "compositions", + "compositore", "composite", + "compostiion", "compositions", + "compotition", "composition", + "compramised", "compromised", + "compramises", "compromises", + "compremised", "compromised", + "compremises", "compromises", + "comprension", "compression", + "compresores", "compressor", + "compresssed", "compressed", + "compresssor", "compressor", + "comprimised", "compromised", + "comprimises", "compromises", + "compromessi", "compromises", + "compromisng", "compromising", + "compromisse", "compromises", + "compromisso", "compromises", + "compromized", "compromised", + "compulstion", "compulsion", + "compunation", "computation", + "computacion", "computation", + "computating", "computation", + "computition", "computation", + "conceivibly", "conceivably", + "concencrate", "concentrate", + "concentrace", "concentrate", + "concentrade", "concentrated", + "concentrait", "concentrate", + "concentrant", "concentrate", + "concentrare", "concentrate", + "concentrato", "concentration", + "concertmate", "concentrate", + "conceviable", "conceivable", + "conceviably", "conceivably", + "concidering", "considering", + "conciveable", "conceivable", + "conciveably", "conceivably", + "conclsuions", "concussions", + "concludendo", "concluded", + "conclussion", "conclusions", + "conclussive", "conclusive", + "conclutions", "conclusions", + "concsiously", "consciously", + "conculsions", "conclusions", + "concusssion", "concussions", + "condeferacy", "confederacy", + "condicional", "conditional", + "condidtions", "conditions", + "conditionar", "conditioner", + "conditionel", "conditional", + "condolances", "condolences", + "condolenses", "condolences", + "condolonces", "condolences", + "conductiong", "conducting", + "condulences", "condolences", + "conenctions", "connections", + "conescutive", "consecutive", + "confedaracy", "confederacy", + "confedarate", "confederate", + "confederecy", "confederacy", + "conferances", "conferences", + "conferedate", "confederate", + "confererate", "confederate", + "confescated", "confiscated", + "confesssion", "confessions", + "confidantly", "confidently", + "configurare", "configure", + "configurate", "configure", + "configurato", "configuration", + "confilcting", "conflicting", + "confisgated", "confiscated", + "conflciting", "conflicting", + "confortable", "comfortable", + "confrontato", "confrontation", + "confussions", "confessions", + "congrassman", "congressman", + "congratuate", "congratulate", + "conicidence", "coincidence", + "conjonction", "conjunction", + "conjucntion", "conjunction", + "conjuncting", "conjunction", + "conlcusions", "conclusions", + "connatation", "connotation", + "connecitcut", "connecticut", + "connecticon", "connection", + "connectiong", "connecting", + "connectivty", "connectivity", + "connetation", "connotation", + "connonation", "connotation", + "connotacion", "connotation", + "conontation", "connotation", + "conotations", "connotations", + "conquerring", "conquering", + "consdidered", "considered", + "consectuive", "consecutive", + "consecuence", "consequence", + "conseguence", "consequence", + "conselation", "consolation", + "consentrate", "concentrate", + "consequenes", "consequence", + "consequense", "consequences", + "consequente", "consequence", + "consequenty", "consequently", + "consequtive", "consecutive", + "conservanti", "conservation", + "conservatie", "conservatives", + "conservaton", "conservation", + "consficated", "confiscated", + "considerabe", "considerate", + "considerais", "considers", + "considerant", "considerate", + "considerato", "consideration", + "considerble", "considerable", + "considerbly", "considerably", + "considereis", "considers", + "consilation", "consolation", + "consilidate", "consolidate", + "consistance", "consistency", + "consistenly", "consistently", + "consistensy", "consistency", + "consistenty", "consistently", + "consitution", "constitution", + "conslutants", "consultant", + "consolacion", "consolation", + "consoldiate", "consolidate", + "consolidare", "consolidate", + "consolodate", "consolidate", + "consomation", "consolation", + "conspiraces", "conspiracies", + "conspiracys", "conspiracies", + "conspirancy", "conspiracy", + "constantins", "constants", + "constantivs", "constants", + "constarints", "constraint", + "constituant", "constituent", + "constituion", "constitution", + "constituite", "constitute", + "constitutie", "constitutes", + "constrating", "constraint", + "constriants", "constraints", + "construcing", "constructing", + "construcion", "construction", + "construcive", "constructive", + "constructie", "constructive", + "constructos", "constructs", + "constructur", "constructor", + "constructus", "constructs", + "constuction", "construction", + "consturcted", "constructed", + "consuelling", "counselling", + "consulation", "consolation", + "consultaion", "consultation", + "consultanti", "consultation", + "consumation", "consumption", + "consumbales", "consumables", + "consumersim", "consumerism", + "consumibles", "consumables", + "contagiosum", "contagious", + "containered", "contained", + "containmemt", "containment", + "containters", "containers", + "containting", "containing", + "contaminato", "contamination", + "contaminent", "containment", + "contaminted", "contaminated", + "contancting", "contracting", + "contanimate", "contaminated", + "contemplare", "contemplate", + "contempoary", "contemporary", + "contemporay", "contemporary", + "contencious", "contentious", + "contenental", "continental", + "contengency", "contingency", + "contenintal", "continental", + "contenplate", "contemplate", + "contensious", "contentious", + "contentants", "contestants", + "contentuous", "contentious", + "contestaste", "contestants", + "contestents", "contestants", + "contianment", "containment", + "contientous", "contentious", + "contimplate", "contemplate", + "continenets", "continents", + "continentes", "continents", + "continentul", "continental", + "contingancy", "contingency", + "contingient", "contingent", + "contingincy", "contingency", + "continously", "continuously", + "continuarla", "continual", + "continuarlo", "continual", + "continuasse", "continues", + "continueing", "continuing", + "continuemos", "continues", + "continueous", "continuous", + "continuious", "continuous", + "continuning", "continuing", + "continunity", "continuity", + "continuosly", "continuously", + "continuting", "continuing", + "continutity", "continuity", + "continuuing", "continuing", + "continuuity", "continuity", + "contirbuted", "contributed", + "contiunally", "continually", + "contraccion", "contraction", + "contraddice", "contradicted", + "contradices", "contradicts", + "contradtion", "contraction", + "contraversy", "controversy", + "contreversy", "controversy", + "contribuent", "contribute", + "contribuito", "contribution", + "contributer", "contributor", + "contributie", "contribute", + "contributin", "contribution", + "contributos", "contributors", + "contribuyes", "contributes", + "contricting", "contracting", + "contriction", "contraction", + "contridicts", "contradicts", + "contriversy", "controversy", + "controleurs", "controllers", + "controllore", "controllers", + "controvercy", "controversy", + "controversa", "controversial", + "contrubutes", "contributes", + "contructing", "contracting", + "contruction", "construction", + "contructors", "contractors", + "conveinence", "convenience", + "conveneince", "convenience", + "conveniance", "convenience", + "conveniente", "convenience", + "convenietly", "conveniently", + "conventinal", "conventional", + "converitble", "convertible", + "conversaion", "conversion", + "conversatin", "conversations", + "converseley", "conversely", + "converstion", "conversion", + "convertirea", "converter", + "convertirle", "convertible", + "convertirme", "converter", + "convertirte", "converter", + "convicitons", "convictions", + "convienence", "convenience", + "convienient", "convenient", + "convinceing", "convincing", + "convincente", "convenient", + "convincersi", "convinces", + "convirtible", "convertible", + "cooperacion", "cooperation", + "cooperativo", "cooperation", + "cooporation", "cooperation", + "cooporative", "cooperative", + "coordenated", "coordinated", + "coordenates", "coordinates", + "coordianted", "coordinated", + "coordiantes", "coordinates", + "coordiantor", "coordinator", + "coordinador", "coordinator", + "coordinants", "coordinates", + "coordinater", "coordinator", + "coordinaton", "coordination", + "coordonated", "coordinated", + "coordonates", "coordinates", + "coordonator", "coordinator", + "cooridnated", "coordinated", + "cooridnates", "coordinates", + "cooridnator", "coordinator", + "copenhaagen", "copenhagen", + "copenhaegen", "copenhagen", + "copenhaguen", "copenhagen", + "copenhangen", "copenhagen", + "copmetitors", "competitors", + "coproration", "corporation", + "copyrigthed", "copyrighted", + "corinthains", "corinthians", + "corintheans", "corinthians", + "corinthiens", "corinthians", + "corinthinas", "corinthians", + "cornithians", "corinthians", + "corparation", "corporation", + "corperation", "corporation", + "corporacion", "corporation", + "corporativo", "corporation", + "corralation", "correlation", + "correctings", "corrections", + "correctivos", "corrections", + "correktions", "corrections", + "correktness", "correctness", + "correlacion", "correlation", + "correlaties", "correlates", + "corrilation", "correlation", + "corrisponds", "corresponds", + "corrolation", "correlation", + "corrosponds", "corresponds", + "costitution", "constitution", + "councellors", "councillors", + "counrtyside", "countryside", + "counsilling", "counselling", + "countercoat", "counteract", + "counteredit", "counterfeit", + "counterfact", "counteract", + "counterfait", "counterfeit", + "counterfest", "counterfeit", + "counterfiet", "counterfeit", + "counterpaly", "counterplay", + "counterpary", "counterplay", + "counterpath", "counterpart", + "counterpats", "counterparts", + "counterpont", "counterpoint", + "counterract", "counterpart", + "counterside", "countryside", + "countertrap", "counterpart", + "countriside", "countryside", + "countrycide", "countryside", + "countrywise", "countryside", + "courthourse", "courthouse", + "coutnerfeit", "counterfeit", + "coutnerpart", "counterpart", + "coutnerplay", "counterplay", + "creacionism", "creationism", + "creationkit", "creationist", + "creationsim", "creationism", + "creationsit", "creationist", + "creationsts", "creationists", + "creativelly", "creatively", + "credencials", "credentials", + "credentails", "credentials", + "credentaisl", "credentials", + "credientals", "credentials", + "credintials", "credentials", + "cricitising", "criticising", + "criculating", "circulating", + "cringeworhy", "cringeworthy", + "cringeworty", "cringeworthy", + "cringewothy", "cringeworthy", + "criticicing", "criticising", + "criticisied", "criticise", + "criticisims", "criticisms", + "criticisize", "criticise", + "criticiszed", "criticise", + "critisicing", "criticizing", + "critisising", "criticising", + "critizicing", "criticizing", + "critizising", "criticizing", + "critizizing", "criticizing", + "crockodiles", "crocodiles", + "crocodiller", "crocodile", + "crocodilule", "crocodile", + "croporation", "corporation", + "crossfiters", "crossfire", + "cultivative", "cultivate", + "curricullum", "curriculum", + "customizabe", "customizable", + "customizble", "customizable", + "dangeroulsy", "dangerously", + "dardenelles", "dardanelles", + "deadlifters", "deadlifts", + "dealershits", "dealerships", + "deceptivley", "deceptive", + "declaracion", "declaration", + "decleration", "declaration", + "declinining", "declining", + "decloration", "declaration", + "decoartions", "decoration", + "decomposits", "decomposes", + "decoratieve", "decorative", + "decorativos", "decorations", + "decotations", "decorations", + "decsendants", "descendants", + "deductiable", "deductible", + "defenderlas", "defenders", + "defenderlos", "defenders", + "defendernos", "defenders", + "defenesless", "defenseless", + "defenisvely", "defensively", + "defensivley", "defensively", + "deficiencey", "deficiency", + "deficienies", "deficiencies", + "deficientcy", "deficiency", + "definantley", "definately", + "definatedly", "definately", + "definateley", "definately", + "definatelly", "definately", + "definatelty", "definately", + "definatetly", "definately", + "definations", "definitions", + "definatlely", "definately", + "definetally", "definately", + "definetlely", "definetly", + "definitaley", "definately", + "definitelly", "definitely", + "definitevly", "definitively", + "definitiely", "definitively", + "definitieve", "definitive", + "definitiley", "definitively", + "definitivly", "definitively", + "definitivno", "definition", + "definitivos", "definitions", + "definitlely", "definitly", + "definitlety", "definitly", + "deflecticon", "deflection", + "degenererat", "degenerate", + "degradacion", "degradation", + "degradating", "degradation", + "degragation", "degradation", + "degridation", "degradation", + "dehyrdation", "dehydration", + "deinitalize", "deinitialize", + "delaerships", "dealerships", + "delapidated", "dilapidated", + "delcaration", "declaration", + "delearships", "dealerships", + "delevopment", "development", + "deliberante", "deliberate", + "deliberatly", "deliberately", + "deliberetly", "deliberately", + "delightlful", "delightful", + "deliverying", "delivering", + "delusionnal", "delusional", + "deminsional", "dimensional", + "democarcies", "democracies", + "democracize", "democracies", + "democractic", "democratic", + "democraphic", "demographic", + "democrasies", "democracies", + "democrazies", "democracies", + "democrocies", "democracies", + "demograhpic", "demographic", + "demographis", "demographics", + "demograpics", "demographics", + "demogrpahic", "demographic", + "demoninator", "denominator", + "demonstarte", "demonstrate", + "demonstates", "demonstrates", + "demonstraby", "demonstrably", + "demonstrant", "demonstrate", + "demonstrats", "demonstrates", + "demosntrate", "demonstrate", + "denegrating", "denigrating", + "denomenator", "denominator", + "denominador", "denominator", + "denominaron", "denominator", + "denominater", "denominator", + "denominaton", "denomination", + "denomitator", "denominator", + "denomonator", "denominator", + "denonimator", "denominator", + "deocrations", "decorations", + "deomcracies", "democracies", + "deparmental", "departmental", + "depedencies", "dependencies", + "dependancey", "dependency", + "dependencey", "dependency", + "dependencie", "dependence", + "dependenies", "dependencies", + "deplorabile", "deplorable", + "depressieve", "depressive", + "depresssion", "depression", + "deprevation", "deprivation", + "deprication", "deprivation", + "deprivating", "deprivation", + "deprivition", "deprivation", + "deprovation", "deprivation", + "depserately", "desperately", + "depseration", "desperation", + "deregulatin", "deregulation", + "derivativos", "derivatives", + "derivitaves", "derivatives", + "derivitives", "derivatives", + "derpivation", "deprivation", + "derviatives", "derivatives", + "descandants", "descendants", + "descendands", "descendants", + "descendends", "descended", + "descendenta", "descendants", + "descentants", "descendants", + "descirption", "descriptions", + "descprition", "descriptions", + "describiste", "describes", + "describtion", "description", + "descripcion", "description", + "descripiton", "descriptions", + "descripters", "descriptors", + "descriptoin", "descriptions", + "descriptons", "descriptions", + "descritpion", "descriptions", + "descrpition", "descriptions", + "desensitied", "desensitized", + "desensitzed", "desensitized", + "desentisize", "desensitized", + "desgination", "designation", + "designacion", "designation", + "designstion", "designation", + "desinations", "destinations", + "desingation", "designation", + "desitnation", "destination", + "desoriented", "disoriented", + "desparately", "desperately", + "desparation", "desperation", + "desperating", "desperation", + "desperatley", "desperately", + "despirately", "desperately", + "despiration", "desperation", + "destablized", "destabilized", + "destiantion", "destinations", + "destinaiton", "destinations", + "destinatons", "destinations", + "destinction", "destination", + "destraction", "destruction", + "destruccion", "destruction", + "destruciton", "destruction", + "destructivo", "destruction", + "destruktion", "destruction", + "destruktive", "destructive", + "deteoriated", "deteriorated", + "determanism", "determinism", + "determening", "determining", + "determenism", "determinism", + "determinare", "determine", + "determinato", "determination", + "determinded", "determine", + "determinsim", "determinism", + "detramental", "detrimental", + "detremental", "detrimental", + "detrimentul", "detrimental", + "detuschland", "deutschland", + "deustchland", "deutschland", + "deutchsland", "deutschland", + "deutcshland", "deutschland", + "deutschalnd", "deutschland", + "deutshcland", "deutschland", + "develepmont", "developments", + "develompent", "developments", + "developemnt", "developments", + "developmant", "developmental", + "developmetn", "developments", + "developmnet", "developments", + "developpers", "developers", + "develpoment", "developments", + "deveolpment", "developments", + "deveploment", "developments", + "devestating", "devastating", + "devistating", "devastating", + "deyhdration", "dehydration", + "diagnositcs", "diagnostic", + "diagnositic", "diagnostic", + "diagonstics", "diagnostic", + "dictatorhip", "dictatorship", + "dictionaire", "dictionaries", + "dictionairy", "dictionary", + "dictionarys", "dictionaries", + "dictionnary", "dictionary", + "differances", "differences", + "differantly", "differently", + "differental", "differential", + "differentes", "differences", + "differneces", "differences", + "differnetly", "differently", + "difficulity", "difficulty", + "difficultes", "difficulties", + "dificulties", "difficulties", + "dimensiones", "dimensions", + "dimentional", "dimensional", + "dimesnional", "dimensional", + "diminisheds", "diminishes", + "diminsihing", "diminishing", + "diminuitive", "diminutive", + "diminushing", "diminishing", + "dinosaurios", "dinosaurs", + "direccional", "directional", + "direcitonal", "directional", + "directorguy", "directory", + "directorios", "directors", + "direktional", "directional", + "disadvantge", "disadvantage", + "disagreemet", "disagreements", + "disagreemtn", "disagreements", + "disapperead", "disappeared", + "disapporval", "disapproval", + "disapprovel", "disapproval", + "disasterous", "disastrous", + "disastreous", "disastrous", + "disastrious", "disastrous", + "disastruous", "disastrous", + "disatisfied", "dissatisfied", + "disciplened", "disciplined", + "disciplinas", "disciplines", + "disciplince", "disciplines", + "disclipined", "disciplined", + "disclipines", "disciplines", + "discogrophy", "discography", + "discogrpahy", "discography", + "disconencts", "disconnects", + "disconneted", "disconnected", + "disconnnect", "disconnect", + "discontined", "discontinued", + "discontiued", "discontinued", + "discrapency", "discrepancy", + "discretited", "discredited", + "discrimante", "discriminate", + "discrimiate", "discriminate", + "discussiong", "discussing", + "discusssion", "discussions", + "disgraseful", "disgraceful", + "disgrateful", "disgraceful", + "disgrunteld", "disgruntled", + "disgustigly", "disgustingly", + "disgustingy", "disgustingly", + "disgustinly", "disgustingly", + "disicplined", "disciplined", + "disicplines", "disciplines", + "disingenuos", "disingenuous", + "dismanlting", "dismantling", + "dismantaled", "dismantled", + "dismanteled", "dismantled", + "disobediant", "disobedient", + "disocgraphy", "discography", + "disparingly", "disparagingly", + "dispensaire", "dispensaries", + "dispensarie", "dispenser", + "dispensiary", "dispensary", + "displacemnt", "displacement", + "disposicion", "disposition", + "disputandem", "disputandum", + "disqualifed", "disqualified", + "disregaring", "disregarding", + "dissapeared", "disappeared", + "dissapoined", "dissapointed", + "dissapointd", "dissapointed", + "dissapoited", "dissapointed", + "dissappears", "disappears", + "dissatisfed", "dissatisfied", + "disscusions", "discussions", + "dissertaion", "dissertation", + "dissipatore", "dissipate", + "distatesful", "distasteful", + "distatseful", "distasteful", + "disterbance", "disturbance", + "disticntion", "distinctions", + "distinciton", "distinction", + "distincitve", "distinctive", + "distinctily", "distinctly", + "distingiush", "distinguish", + "distinguise", "distinguished", + "distinktion", "distinction", + "distinquish", "distinguish", + "distirbance", "disturbance", + "distirbuted", "distribute", + "distirbutor", "distributor", + "distraccion", "distraction", + "distractons", "distracts", + "distraktion", "distraction", + "distribitor", "distributor", + "distribuent", "distribute", + "distribuite", "distribute", + "distribuito", "distribution", + "distributie", "distributed", + "distributin", "distribution", + "distributio", "distributor", + "distrobuted", "distributed", + "distrubance", "disturbance", + "distrubited", "distributed", + "distrubitor", "distributor", + "distrubuted", "distributed", + "distrubutor", "distributor", + "distructive", "destructive", + "distuingish", "distinguish", + "distunguish", "distinguish", + "disturbante", "disturbance", + "disturbence", "disturbance", + "disucssions", "discussions", + "divisionals", "divisions", + "doccumented", "documented", + "documantary", "documentary", + "documenatry", "documentary", + "documentare", "documentaries", + "documentato", "documentation", + "documentery", "documentary", + "documentory", "documentary", + "domesticted", "domesticated", + "dominateurs", "dominates", + "dominationg", "dominating", + "donwloading", "downloading", + "doublellift", "doublelift", + "downlaoding", "downloading", + "downloadbel", "downloadable", + "downloadbig", "downloading", + "downloadble", "downloadable", + "downvoteers", "downvoters", + "downvoteing", "downvoting", + "downvoteres", "downvoters", + "downvoteros", "downvoters", + "downvoteurs", "downvoters", + "downvotters", "downvoters", + "downvotting", "downvoting", + "dramaticaly", "dramatically", + "dramaticlly", "dramatically", + "drasitcally", "drastically", + "dsyfunction", "dysfunction", + "duetschland", "deutschland", + "durabillity", "durability", + "dyanmically", "dynamically", + "dymanically", "dynamically", + "dysfonction", "dysfunction", + "dysfucntion", "dysfunction", + "dysfunciton", "dysfunction", + "dysfunktion", "dysfunction", + "earhtquakes", "earthquakes", + "earthqaukes", "earthquakes", + "earthquacks", "earthquakes", + "economicaly", "economically", + "economiclly", "economically", + "economisiti", "economist", + "economistes", "economists", + "educacional", "educational", + "effeciently", "efficiently", + "effecitvely", "effectively", + "effectivley", "effectively", + "efficeintly", "efficiently", + "efficiantly", "efficiently", + "efficientcy", "efficiently", + "effortlesly", "effortlessly", + "effortlessy", "effortlessly", + "egaletarian", "egalitarian", + "egalitatian", "egalitarian", + "egaliterian", "egalitarian", + "egostitical", "egotistical", + "egotastical", "egotistical", + "egotestical", "egotistical", + "egotisitcal", "egotistical", + "egotisticle", "egotistical", + "egotystical", "egotistical", + "ehtnicities", "ethnicities", + "ejacluation", "ejaculation", + "ejacualtion", "ejaculation", + "electoratul", "electoral", + "electornics", "electronics", + "electricain", "electrician", + "electricial", "electrical", + "electricien", "electrician", + "electricion", "electrician", + "electricman", "electrician", + "electrisity", "electricity", + "electritian", "electrician", + "electrocity", "electricity", + "electrolyes", "electrolytes", + "electrolyts", "electrolytes", + "electroncis", "electrons", + "electroylte", "electrolytes", + "elementrary", "elementary", + "eleminating", "eliminating", + "elimanation", "elimination", + "eliminacion", "elimination", + "elimintates", "eliminates", + "ellipitcals", "elliptical", + "eloquentely", "eloquently", + "emabrassing", "embarassing", + "embaraasing", "embarassing", + "embarasaing", "embarassing", + "embarassign", "embarassing", + "embarassimg", "embarassing", + "embarassing", "embarrassing", + "embarissing", "embarassing", + "embarrasing", "embarrassing", + "embarressed", "embarrassed", + "embarrssing", "embarassing", + "emergancies", "emergencies", + "emergencias", "emergencies", + "emergenices", "emergencies", + "emmediately", "immediately", + "emmisarries", "emissaries", + "emotionella", "emotionally", + "empahsizing", "emphasizing", + "empathethic", "empathetic", + "emphacizing", "emphasizing", + "emphatising", "emphasizing", + "emphatizing", "emphasizing", + "emphazising", "emphasizing", + "emphesizing", "emphasizing", + "empiracally", "empirically", + "empirialism", "imperialism", + "empirialist", "imperialist", + "enchamtment", "enchantment", + "enchancment", "enchantment", + "enchanement", "enchantment", + "enchanthing", "enchanting", + "enchantmant", "enchantment", + "enchantmens", "enchantments", + "enchantmets", "enchantments", + "encomapsses", "encompasses", + "encompasess", "encompasses", + "encompesses", "encompasses", + "encounteres", "encounters", + "encoutnered", "encountered", + "encryptiion", "encryption", + "encyclopdia", "encyclopedia", + "encylopedia", "encyclopedia", + "endagnering", "endangering", + "endandering", "endangering", + "endorcement", "endorsement", + "endoresment", "endorsement", + "engagaments", "engagements", + "engeneering", "engineering", + "enginerring", "engineering", + "enginnering", "engineering", + "enlargments", "enlargements", + "enligthened", "enlightened", + "enourmously", "enormously", + "enterpirses", "enterprises", + "enterprices", "enterprises", + "enterprishe", "enterprises", + "entertainig", "entertaining", + "entertwined", "entertained", + "enthicities", "ethnicities", + "enthisiasts", "enthusiasts", + "enthuasists", "enthusiasts", + "enthuisasts", "enthusiasts", + "enthusaists", "enthusiasts", + "enthusiants", "enthusiast", + "enthusiasic", "enthusiastic", + "enthusiasim", "enthusiasm", + "enthusiasum", "enthusiasm", + "enthusiatic", "enthusiastic", + "enthusiests", "enthusiasts", + "enthusigasm", "enthusiasm", + "enthusisast", "enthusiasts", + "entrepeneur", "entrepreneur", + "entreperure", "entrepreneur", + "entrepeuner", "entrepreneur", + "entreprener", "entrepreneurs", + "entreprenur", "entrepreneur", + "entretained", "entertained", + "envinroment", "environments", + "enviorments", "environments", + "enviornment", "environment", + "envirnoment", "environment", + "enviroments", "environments", + "enviromnent", "environments", + "environemnt", "environment", + "environmnet", "environments", + "envrionment", "environment", + "equilavents", "equivalents", + "equilbirium", "equilibrium", + "equilevants", "equivalents", + "equilibirum", "equilibrium", + "equilibriam", "equilibrium", + "equilibruim", "equilibrium", + "equivalance", "equivalence", + "equivalants", "equivalents", + "equivalenet", "equivalents", + "equivallent", "equivalent", + "equivelance", "equivalence", + "equivelants", "equivalents", + "equivelents", "equivalents", + "equivilants", "equivalents", + "equivilence", "equivalence", + "equivilents", "equivalents", + "equivlalent", "equivalent", + "equivlanets", "equivalents", + "equivolence", "equivalence", + "equivolents", "equivalents", + "essencially", "essentially", + "essentailly", "essentially", + "essentialls", "essentials", + "essentually", "essentially", + "establising", "establishing", + "ethicallity", "ethically", + "ethincities", "ethnicities", + "ethniticies", "ethnicities", + "europeaners", "europeans", + "europeaness", "europeans", + "evaluatiing", "evaluating", + "evaluationg", "evaluating", + "evangalical", "evangelical", + "evangelikal", "evangelical", + "evengalical", "evangelical", + "evenhtually", "eventually", + "everyonehas", "everyones", + "everyonelse", "everyones", + "evidentally", "evidently", + "exacarbated", "exacerbated", + "exacberated", "exacerbated", + "exagerating", "exaggerating", + "exagerrated", "exaggerated", + "exagerrates", "exaggerates", + "exaggarated", "exaggerated", + "exaggareted", "exaggerate", + "exaggeratin", "exaggeration", + "exaggerrate", "exaggerate", + "exaggurated", "exaggerated", + "exarcebated", "exacerbated", + "excalmation", "exclamation", + "excepcional", "exceptional", + "exceptionel", "exceptional", + "excessivley", "excessively", + "exceutioner", "executioner", + "exchanching", "exchanging", + "exclamacion", "exclamation", + "exclamating", "exclamation", + "exclamativo", "exclamation", + "exclemation", "exclamation", + "exclimation", "exclamation", + "exclucivity", "exclusivity", + "exclusivety", "exclusivity", + "exclusivily", "exclusivity", + "exclusivley", "exclusively", + "excpetional", "exceptional", + "exculsively", "exclusively", + "exculsivity", "exclusivity", + "execitioner", "executioner", + "execptional", "exceptional", + "exectuables", "executable", + "exectuioner", "executioner", + "executionar", "executioner", + "executionor", "executioner", + "exerciseing", "exercising", + "exeuctioner", "executioner", + "existantial", "existential", + "existencial", "existential", + "existensial", "existential", + "existentiel", "existential", + "exlcamation", "exclamation", + "exlcusively", "exclusively", + "exlcusivity", "exclusivity", + "exoskelaton", "exoskeleton", + "expansiones", "expansions", + "expectantcy", "expectancy", + "expectating", "expectation", + "expectional", "exceptional", + "expendature", "expenditure", + "expendeture", "expenditure", + "expentiture", "expenditure", + "expereinced", "experienced", + "expereinces", "experiences", + "experements", "experiments", + "experianced", "experienced", + "experiances", "experiences", + "experiemnts", "experiments", + "experiening", "experiencing", + "experimetal", "experimental", + "experimeted", "experimented", + "experssions", "expressions", + "expiditions", "expeditions", + "expierenced", "experienced", + "expierences", "experiences", + "expirements", "experiments", + "explainging", "explaining", + "explaintory", "explanatory", + "explanaiton", "explanations", + "explanetary", "explanatory", + "explanetory", "explanatory", + "explanitary", "explanatory", + "explanotory", "explanatory", + "explenation", "explanation", + "explenatory", "explanatory", + "explicitely", "explicitly", + "explicitily", "explicitly", + "explination", "explanation", + "explinatory", "explanatory", + "exploitaion", "exploitation", + "exploitatie", "exploitative", + "explonation", "exploration", + "exploracion", "exploration", + "explorating", "exploration", + "explorerers", "explorers", + "explosiones", "explosions", + "explotacion", "exploration", + "expodential", "exponential", + "exponantial", "exponential", + "exponencial", "exponential", + "exponentiel", "exponential", + "expresscoin", "expression", + "expressivos", "expressions", + "expresssive", "expressive", + "expressview", "expressive", + "exprimental", "experimental", + "expropiated", "expropriated", + "extensiones", "extensions", + "extensivley", "extensively", + "extragavant", "extravagant", + "extrapalate", "extrapolate", + "extraploate", "extrapolate", + "extrapolant", "extrapolate", + "extrapolare", "extrapolate", + "extrapolite", "extrapolate", + "extrapulate", "extrapolate", + "extravagent", "extravagant", + "extravagina", "extravagant", + "extravegant", "extravagant", + "extravigant", "extravagant", + "extravogant", "extravagant", + "extremistas", "extremists", + "extremistes", "extremists", + "extropolate", "extrapolate", + "fabircation", "fabrication", + "fabricacion", "fabrication", + "fabrikation", "fabrication", + "facilitarte", "facilitate", + "facilitiate", "facilitate", + "facillitate", "facilitate", + "facisnation", "fascination", + "facsination", "fascination", + "factuallity", "factually", + "familairity", "familiarity", + "familairize", "familiarize", + "familiaries", "familiarize", + "familierize", "familiarize", + "fanatsizing", "fantasizing", + "fanficitons", "fanfiction", + "fantacising", "fantasizing", + "fantacizing", "fantasizing", + "fantasazing", "fantasizing", + "fantasiaing", "fantasizing", + "fantasyzing", "fantasizing", + "fantazising", "fantasizing", + "fascinacion", "fascination", + "fascinatinf", "fascination", + "fascisation", "fascination", + "fascization", "fascination", + "fashionalbe", "fashionable", + "fashoinable", "fashionable", + "fatalitites", "fatalities", + "favoritisme", "favorites", + "favoutrable", "favourable", + "felxibility", "flexibility", + "feministers", "feminists", + "feministisk", "feminists", + "fermentaion", "fermentation", + "fermenterad", "fermented", + "fertilizier", "fertilizer", + "fertizilers", "fertilizer", + "festivalens", "festivals", + "fignernails", "fingernails", + "fignerprint", "fingerprint", + "figurativly", "figuratively", + "finanically", "financially", + "finantially", "financially", + "fingerpints", "fingertips", + "fingerpoint", "fingerprint", + "fingertrips", "fingertips", + "firefighers", "firefighters", + "firefigther", "firefighters", + "firendzoned", "friendzoned", + "firghtening", "frightening", + "flatterende", "flattered", + "flawlessely", "flawlessly", + "flawlessley", "flawlessly", + "flexibiltiy", "flexibility", + "flourescent", "fluorescent", + "fluctuaties", "fluctuate", + "fluctuative", "fluctuate", + "flutteryshy", "fluttershy", + "forcefullly", "forcefully", + "foreseaable", "foreseeable", + "foresseable", "foreseeable", + "forgettting", "forgetting", + "forgiviness", "forgiveness", + "formallized", "formalized", + "formattting", "formatting", + "formidabble", "formidable", + "formidabelt", "formidable", + "formidabile", "formidable", + "fortitudine", "fortitude", + "fortuantely", "fortunately", + "fortunantly", "fortunately", + "fortunatley", "fortunately", + "fortunetely", "fortunately", + "franchieses", "franchises", + "frankensite", "frankenstein", + "frankensten", "frankenstein", + "fransiscans", "franciscans", + "freindships", "friendships", + "freindzoned", "friendzoned", + "frequenices", "frequencies", + "frequensies", "frequencies", + "frequenties", "frequencies", + "frequentily", "frequently", + "frequenzies", "frequencies", + "friendboned", "friendzoned", + "friendlines", "friendlies", + "friendzonie", "friendzoned", + "frientships", "friendships", + "frientzoned", "friendzoned", + "frightenend", "frightened", + "frightining", "frightening", + "frigthening", "frightening", + "frinedzoned", "friendzoned", + "frontlinies", "frontline", + "frontlinjen", "frontline", + "frustartion", "frustrations", + "frustracion", "frustration", + "frustraited", "frustrated", + "frustrantes", "frustrates", + "frustrasion", "frustrations", + "frustrasted", "frustrates", + "frustraties", "frustrates", + "fucntioning", "functioning", + "fulfillling", "fulfilling", + "fulfullment", "fulfillment", + "fullfilment", "fulfillment", + "fullscreeen", "fullscreen", + "funcitoning", "functioning", + "functionaly", "functionally", + "functionnal", "functional", + "fundamentas", "fundamentals", + "fundamently", "fundamental", + "fundametals", "fundamentals", + "fundamnetal", "fundamentals", + "fundemantal", "fundamental", + "fundemental", "fundamental", + "fundimental", "fundamental", + "furhtermore", "furthermore", + "furstration", "frustration", + "furthremore", "furthermore", + "furthurmore", "furthermore", + "futurisitic", "futuristic", + "gangsterest", "gangsters", + "gangsterous", "gangsters", + "gauntlettes", "gauntlets", + "geneologies", "genealogies", + "generalizng", "generalizing", + "generatting", "generating", + "genitaliban", "genitalia", + "gentlemanne", "gentlemen", + "girlfirends", "girlfriends", + "girlfreinds", "girlfriends", + "girlfrients", "girlfriends", + "glorifierad", "glorified", + "glorifindel", "glorified", + "goosebumbps", "goosebumps", + "govenrments", "governments", + "govermental", "governmental", + "governemnts", "governments", + "governmanet", "governmental", + "governmeant", "governmental", + "govormental", "governmental", + "gracefullly", "gracefully", + "grahpically", "graphically", + "grammarical", "grammatical", + "grammaticly", "grammatical", + "grammitical", "grammatical", + "graphcially", "graphically", + "grassrooots", "grassroots", + "gratuitious", "gratuitous", + "gratuituous", "gratuitous", + "gravitatiei", "gravitate", + "grilfriends", "girlfriends", + "grpahically", "graphically", + "guaranteeds", "guarantees", + "guerrillera", "guerrilla", + "gunslingner", "gunslinger", + "hamburgaren", "hamburger", + "hamburgeres", "hamburgers", + "hamburglers", "hamburgers", + "hamburguers", "hamburgers", + "handlebards", "handlebars", + "handrwiting", "handwriting", + "handycapped", "handicapped", + "hanidcapped", "handicapped", + "harassement", "harassment", + "harrasments", "harassments", + "harrassment", "harassment", + "harvestgain", "harvesting", + "headquartes", "headquarters", + "headquaters", "headquarters", + "hearhtstone", "hearthstone", + "heartborken", "heartbroken", + "heartbraker", "heartbreak", + "heartbrakes", "heartbreak", + "heartsthone", "hearthstone", + "heaviweight", "heavyweight", + "heavyweigth", "heavyweight", + "heavywieght", "heavyweight", + "helicoptors", "helicopters", + "helicotpers", "helicopters", + "helicpoters", "helicopters", + "helictopers", "helicopters", + "helikopters", "helicopters", + "hemipsheres", "hemisphere", + "hemishperes", "hemisphere", + "herathstone", "hearthstone", + "heterosexal", "heterosexual", + "hexidecimal", "hexadecimal", + "hierachical", "hierarchical", + "hierarcical", "hierarchical", + "highlighing", "highlighting", + "highschoool", "highschool", + "hipopotamus", "hippopotamus", + "historicaly", "historically", + "historicans", "historians", + "historietas", "histories", + "historinhas", "historians", + "homecomeing", "homecoming", + "homecomming", "homecoming", + "homelesness", "homelessness", + "homelessess", "homelessness", + "homeowneris", "homeowners", + "homoegenous", "homogeneous", + "homogeneize", "homogenize", + "homogenious", "homogeneous", + "homogenuous", "homogeneous", + "homophoboes", "homophobe", + "homosexuais", "homosexuals", + "homosexuels", "homosexuals", + "hopelessely", "hopelessly", + "hopelessley", "hopelessly", + "hopsitality", "hospitality", + "horizonatal", "horizontal", + "horizontaal", "horizontal", + "horizontaly", "horizontally", + "horrendeous", "horrendous", + "horrendious", "horrendous", + "horrenduous", "horrendous", + "hospitalzed", "hospitalized", + "hospotality", "hospitality", + "househoulds", "households", + "humanitarna", "humanitarian", + "humanitites", "humanities", + "humilitaing", "humiliating", + "humilitaion", "humiliation", + "humillating", "humiliating", + "humillation", "humiliation", + "hurricaines", "hurricanes", + "hurricances", "hurricanes", + "hurricanger", "hurricane", + "hyperbollic", "hyperbolic", + "hyperbrophy", "hypertrophy", + "hyperthropy", "hypertrophy", + "hypertorphy", "hypertrophy", + "hypertrohpy", "hypertrophy", + "hypocritcal", "hypocritical", + "hypocritial", "hypocritical", + "hypocrities", "hypocrite", + "hypothesees", "hypotheses", + "hypothesies", "hypothesis", + "hystericaly", "hysterically", + "hystericlly", "hysterically", + "iconclastic", "iconoclastic", + "idealisitic", "idealistic", + "identifible", "identifiable", + "identitites", "identities", + "identitties", "identities", + "ideologiers", "ideologies", + "ideologisen", "ideologies", + "ideologiset", "ideologies", + "ideologiske", "ideologies", + "illegallity", "illegally", + "illegitamte", "illegitimate", + "illegitmate", "illegitimate", + "illsutrator", "illustrator", + "illuminanti", "illuminati", + "illuminarti", "illuminati", + "illuminatti", "illuminati", + "illuminauti", "illuminati", + "illuminiati", "illuminati", + "illuminista", "illuminati", + "illumintati", "illuminati", + "illustarted", "illustrated", + "illustartor", "illustrator", + "illustraded", "illustrated", + "illustraion", "illustration", + "illustrater", "illustrator", + "illustratie", "illustrate", + "illustratin", "illustrations", + "illustraton", "illustration", + "imaganative", "imaginative", + "imaganitive", "imaginative", + "imaginacion", "imagination", + "imaginatiei", "imaginative", + "imaginating", "imagination", + "imaginativo", "imagination", + "imaginitave", "imaginative", + "imbalanaced", "imbalanced", + "imbalanaces", "imbalances", + "imbalancers", "imbalances", + "immatureity", "immaturity", + "immedeately", "immediately", + "immediantly", "immediately", + "immediatley", "immediately", + "immedietely", "immediately", + "immideately", "immediately", + "immidiately", "immediately", + "immigraiton", "immigration", + "immigrantes", "immigrants", + "immoratlity", "immortality", + "immortailty", "immortality", + "immortalisy", "immortals", + "impeccabile", "impeccable", + "imperailist", "imperialist", + "imperealist", "imperialist", + "imperialims", "imperialism", + "imperialsim", "imperialism", + "imperiarist", "imperialist", + "imperically", "empirically", + "imperislist", "imperialist", + "implausable", "implausible", + "implausbile", "implausible", + "implementas", "implements", + "implementes", "implements", + "implementig", "implementing", + "implementos", "implements", + "implicacion", "implication", + "implicatons", "implications", + "implicitely", "implicitly", + "implicitily", "implicitly", + "implikation", "implication", + "implimented", "implemented", + "importantce", "importance", + "importently", "importantly", + "imporvement", "improvement", + "impossibile", "impossible", + "impossibily", "impossibly", + "impossibley", "impossibly", + "impossiblly", "impossibly", + "impoverised", "impoverished", + "impracticle", "impractical", + "impressario", "impresario", + "impresssion", "impressions", + "imprisonent", "imprisonment", + "imprisonned", "imprisoned", + "improbabile", "improbable", + "improtantly", "importantly", + "improvemnts", "improvements", + "improvished", "improvised", + "improvision", "improvisation", + "improvments", "improvements", + "impulsivley", "impulsive", + "imrpovement", "improvement", + "inaccessble", "inaccessible", + "inaccuraces", "inaccuracies", + "inaccurrate", "inaccurate", + "inadvertant", "inadvertent", + "inaguration", "inauguration", + "inahbitants", "inhabitants", + "incarantion", "incarnation", + "incarcerato", "incarceration", + "incarnacion", "incarnation", + "incentivare", "incentive", + "incentivate", "incentive", + "incentivice", "incentive", + "incentivies", "incentives", + "incidencies", "incidence", + "incidentaly", "incidentally", + "incidential", "incidental", + "inclanation", "inclination", + "inclenation", "inclination", + "inclinacion", "inclination", + "inclinaison", "inclination", + "incognition", "incognito", + "incoherrent", "incoherent", + "incompatble", "incompatible", + "incompatent", "incompetent", + "incompetant", "incompetent", + "incompitent", "incompetent", + "incompotent", "incompetent", + "incomptable", "incompatible", + "inconsisent", "inconsistent", + "inconveniet", "inconvenient", + "incoroprate", "incorporate", + "incorparate", "incorporate", + "incorperate", "incorporate", + "incorporare", "incorporate", + "incorported", "incorporated", + "incorprates", "incorporates", + "incorproate", "incorporated", + "incramental", "incremental", + "increadible", "incredible", + "incrediable", "incredible", + "incrediably", "incredibly", + "incredibile", "incredible", + "incredibily", "incredibly", + "incredibley", "incredibly", + "incrememnts", "increments", + "incremenets", "increments", + "incrementas", "increments", + "incremently", "incremental", + "incrementos", "increments", + "incrimental", "incremental", + "inctroduced", "introduced", + "indefinetly", "indefinitely", + "indefininte", "indefinite", + "indefinitly", "indefinitely", + "indepdenent", "independents", + "indepedence", "independence", + "indepednent", "independents", + "independant", "independent", + "independece", "independence", + "independens", "independents", + "independetn", "independents", + "independets", "independents", + "independnet", "independents", + "indepentend", "independents", + "indepentent", "independent", + "indianapols", "indianapolis", + "indicateurs", "indicates", + "indicatiors", "indicators", + "indictement", "indictment", + "indifferant", "indifferent", + "indiffernce", "indifference", + "indigeneous", "indigenous", + "indigenious", "indigenous", + "indigenuous", "indigenous", + "indigineous", "indigenous", + "indipendent", "independent", + "indirectely", "indirectly", + "individiual", "individual", + "individuais", "individuals", + "individualy", "individually", + "individuati", "individuality", + "individuels", "individuals", + "indivuduals", "individuals", + "industriels", "industries", + "ineffecitve", "ineffective", + "ineffektive", "ineffective", + "inefficeint", "inefficient", + "inefficiant", "inefficient", + "ineffictive", "ineffective", + "ineffizient", "inefficient", + "inequallity", "inequality", + "inevitabile", "inevitable", + "inevitabily", "inevitably", + "inevitabley", "inevitably", + "inevitablly", "inevitably", + "inexpencive", "inexpensive", + "inexpenisve", "inexpensive", + "inexperiece", "inexperience", + "inexperince", "inexperience", + "inexplicaby", "inexplicably", + "infallibale", "infallible", + "infallibile", "infallible", + "infectation", "infestation", + "inferioirty", "inferiority", + "infestating", "infestation", + "infilitrate", "infiltrate", + "infiltartor", "infiltrator", + "infiltraron", "infiltrator", + "infiltrarte", "infiltrate", + "infiltrater", "infiltrator", + "infiltratie", "infiltrate", + "infiltrerat", "infiltrate", + "infinitelly", "infinitely", + "infintrator", "infiltrator", + "inflamation", "inflammation", + "inflatabale", "inflatable", + "inflitrator", "infiltrator", + "influancing", "influencing", + "influencial", "influential", + "influencian", "influencing", + "influenting", "influencing", + "influentual", "influential", + "influincing", "influencing", + "infograhpic", "infographic", + "infograpgic", "infographic", + "infogrpahic", "infographic", + "informacion", "information", + "informatice", "informative", + "informatief", "informative", + "informatiei", "informative", + "informatike", "informative", + "informativo", "information", + "informitive", "informative", + "infrigement", "infringement", + "infringeing", "infringing", + "infromation", "information", + "infromative", "informative", + "infulential", "influential", + "ingerdients", "ingredients", + "ingrediants", "ingredients", + "ingreidents", "ingredient", + "ingriedents", "ingredient", + "inhabitents", "inhabitants", + "inheirtance", "inheritance", + "inheratance", "inheritance", + "inheretance", "inheritance", + "inheritence", "inheritance", + "inhertiance", "inheritance", + "initaitives", "initiatives", + "initalisers", "initialisers", + "initalising", "initialising", + "initalizers", "initializers", + "initalizing", "initializing", + "initiaitive", "initiative", + "inititiaves", "initiatives", + "innocenters", "innocents", + "innocentius", "innocents", + "innoculated", "inoculated", + "inpsiration", "inspiration", + "inquisicion", "inquisition", + "inquisistor", "inquisitor", + "inquisiting", "inquisition", + "inquisitior", "inquisitor", + "inquisitivo", "inquisition", + "inquizition", "inquisition", + "insecurites", "insecurities", + "insensative", "insensitive", + "insensetive", "insensitive", + "insentitive", "insensitive", + "insepctions", "inspections", + "inseperable", "inseparable", + "insipration", "inspiration", + "insitutions", "institutions", + "insparation", "inspiration", + "inspecticon", "inspection", + "inspectoras", "inspectors", + "insperation", "inspiration", + "inspiracion", "inspiration", + "inspirating", "inspiration", + "inspriation", "inspiration", + "instalation", "installation", + "instalement", "installment", + "installatin", "installations", + "installeert", "installer", + "installemnt", "installment", + "installling", "installing", + "installmant", "installment", + "instanciate", "instantiate", + "instantaneu", "instantaneous", + "institucion", "institution", + "institutiei", "institute", + "instituttet", "institute", + "instraments", "instruments", + "instruccion", "instruction", + "instruciton", "instruction", + "instructers", "instructors", + "instructior", "instructor", + "instructios", "instructors", + "instructivo", "instruction", + "instructons", "instructors", + "instruktion", "instruction", + "instrumenal", "instrumental", + "instrumetal", "instrumental", + "insturction", "instruction", + "insturctors", "instructors", + "insturments", "instruments", + "instutition", "institution", + "instutution", "institution", + "insufficent", "insufficient", + "insuinating", "insinuating", + "insuniating", "insinuating", + "insurgencey", "insurgency", + "intangiable", "intangible", + "intangibile", "intangible", + "inteferring", "interfering", + "integracion", "integration", + "integratron", "integration", + "integrering", "interfering", + "intelectual", "intellectual", + "inteligence", "intelligence", + "intellectul", "intellectuals", + "intellectus", "intellectuals", + "intellecual", "intellectual", + "intellegent", "intelligent", + "intelligant", "intelligent", + "intencional", "intentional", + "intentionly", "intentional", + "interaccion", "interaction", + "interactice", "interactive", + "interacties", "interacts", + "interactifs", "interacts", + "interactins", "interacts", + "interactios", "interacts", + "interactivo", "interaction", + "interactons", "interacts", + "interaktion", "interaction", + "interaktive", "interactive", + "interasting", "interacting", + "intercation", "integration", + "interceptin", "interception", + "intercoarse", "intercourse", + "intercource", "intercourse", + "interecting", "interacting", + "interection", "interaction", + "interelated", "interrelated", + "interersted", "interpreted", + "interesring", "interfering", + "interessted", "interested", + "interferece", "interference", + "interferens", "interferes", + "interferire", "interfere", + "interfernce", "interference", + "interferred", "interfere", + "interferres", "interferes", + "intergation", "integration", + "intergrated", "integrated", + "intermedate", "intermediate", + "intermedite", "intermediate", + "intermitent", "intermittent", + "internation", "international", + "interneters", "internets", + "internetese", "internets", + "internetest", "internets", + "interneting", "interesting", + "internetors", "internets", + "internettes", "internets", + "interperted", "interpreted", + "interperter", "interpreter", + "interprered", "interpreter", + "interpretor", "interpreter", + "interratial", "interracial", + "interresing", "interfering", + "interrogato", "interrogation", + "interrputed", "interrupted", + "interruping", "interrupting", + "interruptes", "interrupts", + "interruptis", "interrupts", + "intersecton", "intersection", + "interstelar", "interstellar", + "intertained", "intertwined", + "intertvined", "intertwined", + "intertwyned", "intertwined", + "intervalles", "intervals", + "intervation", "integration", + "interveiwed", "interviewed", + "interveiwer", "interviewer", + "intervenion", "intervening", + "intervenire", "intervene", + "interventie", "intervene", + "intervewing", "intervening", + "interviened", "interviewed", + "interviewes", "interviews", + "interviewie", "interviewer", + "intervining", "intervening", + "interwebers", "interwebs", + "interwiever", "interviewer", + "intestinces", "intestines", + "inticracies", "intricacies", + "intimadated", "intimidated", + "intimidades", "intimidated", + "intimidante", "intimidate", + "intimidatie", "intimidated", + "intimidatin", "intimidation", + "intimidaton", "intimidation", + "intimidiate", "intimidate", + "intiminated", "intimidated", + "intimitaded", "intimidated", + "intimitated", "intimidated", + "intiutively", "intuitively", + "intoleranse", "intolerance", + "intolerante", "intolerance", + "intolerence", "intolerance", + "intolernace", "intolerance", + "intolorance", "intolerance", + "intolorence", "intolerance", + "intorducing", "introducing", + "intorverted", "introverted", + "intoxicatin", "intoxication", + "intoxicaton", "intoxication", + "intoxinated", "intoxicated", + "intoxocated", "intoxicated", + "intracacies", "intricacies", + "intracicies", "intricacies", + "intraverted", "introverted", + "intrecacies", "intricacies", + "intrepreted", "interpreted", + "intrepreter", "interpreter", + "intrerupted", "interrupted", + "intricasies", "intricacies", + "intricicies", "intricacies", + "intrigueing", "intriguing", + "intrinsisch", "intrinsic", + "introducion", "introduction", + "introducted", "introduced", + "introductie", "introduce", + "introvertie", "introverted", + "introvertis", "introverts", + "intruducing", "introducing", + "intrumental", "instrumental", + "intuatively", "intuitively", + "intuitevely", "intuitively", + "intuitivley", "intuitively", + "intuituvely", "intuitively", + "inutitively", "intuitively", + "invaldiates", "invalidates", + "invalidades", "invalidates", + "invalidante", "invalidate", + "invariabley", "invariably", + "invariablly", "invariably", + "inventiones", "inventions", + "invesitgate", "investigate", + "investagate", "investigate", + "investiagte", "investigate", + "investigare", "investigate", + "invincibile", "invincible", + "invincinble", "invincible", + "invisibiity", "invisibility", + "invisibiliy", "invisibility", + "invokations", "invocations", + "involantary", "involuntary", + "involentary", "involuntary", + "involintary", "involuntary", + "involontary", "involuntary", + "involunatry", "involuntary", + "invulnerabe", "invulnerable", + "invulnerble", "invulnerable", + "iresistable", "irresistible", + "iresistably", "irresistibly", + "iresistible", "irresistible", + "iresistibly", "irresistibly", + "irrationaly", "irrationally", + "irrationnal", "irrational", + "islamisists", "islamists", + "islamisters", "islamists", + "islamistisk", "islamists", + "isntruments", "instruments", + "jacksonvile", "jacksonville", + "jailbroaken", "jailbroken", + "jailbrocken", "jailbroken", + "jounralists", "journalists", + "jouranlists", "journalists", + "journalisim", "journalism", + "journalistc", "journalistic", + "journolists", "journalists", + "judegmental", "judgemental", + "judgamental", "judgemental", + "judgementle", "judgemental", + "judgementsl", "judgemental", + "judgenental", "judgemental", + "jugdemental", "judgemental", + "juggernaugt", "juggernaut", + "juggernault", "juggernaut", + "juggernaunt", "juggernaut", + "justifyable", "justifiable", + "kidnappning", "kidnapping", + "kidnappping", "kidnapping", + "kilometeres", "kilometers", + "kindergaten", "kindergarten", + "knowledgabe", "knowledgable", + "knowledgble", "knowledgable", + "kryptoninte", "kryptonite", + "lacklusture", "lackluster", + "laughablely", "laughably", + "legalizaing", "legalizing", + "legalizaton", "legalization", + "legalizeing", "legalizing", + "legenadries", "legendaries", + "legendaires", "legendaries", + "legendarios", "legendaries", + "legendarisk", "legendaries", + "legendaryes", "legendaries", + "legenderies", "legendaries", + "legilsation", "legislation", + "legislacion", "legislation", + "legislativo", "legislation", + "legistation", "legislation", + "legistative", "legislative", + "legistators", "legislators", + "legitematly", "legitimately", + "legitimancy", "legitimacy", + "legitimatcy", "legitimacy", + "legitimatly", "legitimately", + "legitimetly", "legitimately", + "legnedaries", "legendaries", + "lengedaries", "legendaries", + "liberalisim", "liberalism", + "liberatrian", "libertarians", + "libertairan", "libertarians", + "libertarain", "libertarian", + "libertarias", "libertarians", + "libertarien", "libertarian", + "libertaryan", "libertarian", + "libertatian", "libertarian", + "liberterian", "libertarian", + "libguistics", "linguistics", + "libretarian", "libertarian", + "lieutennant", "lieutenant", + "lieutentant", "lieutenant", + "lightenning", "lightening", + "lightenting", "lightening", + "lightheared", "lighthearted", + "lightheated", "lighthearted", + "lightweigth", "lightweight", + "lightwieght", "lightweight", + "lightwright", "lightweight", + "ligthweight", "lightweight", + "limitaitons", "limitation", + "linguisitcs", "linguistics", + "linguisitic", "linguistic", + "lingusitics", "linguistics", + "lithuaninan", "lithuania", + "littlefiger", "littlefinger", + "littlefiner", "littlefinger", + "lockscreeen", "lockscreen", + "longevitity", "longevity", + "lotharingen", "lothringen", + "louisvillle", "louisville", + "maginficent", "magnificent", + "magneficent", "magnificent", + "magnicifent", "magnificent", + "magnifacent", "magnificent", + "magnifecent", "magnificent", + "magnificant", "magnificent", + "magnitudine", "magnitude", + "maintainted", "maintained", + "maintanance", "maintenance", + "maintanence", "maintenance", + "maintenence", "maintenance", + "maintianing", "maintaining", + "maintinaing", "maintaining", + "maintinance", "maintenance", + "maintinence", "maintenance", + "malfonction", "malfunction", + "malfucntion", "malfunction", + "malfunciton", "malfunction", + "malfuncting", "malfunction", + "malfunktion", "malfunction", + "malpractise", "malpractice", + "malpractive", "malpractice", + "maneouvring", "manoeuvring", + "manifestado", "manifesto", + "manifestano", "manifesto", + "manifestato", "manifesto", + "manifestion", "manifesto", + "manifestior", "manifesto", + "manifestons", "manifests", + "manifestors", "manifests", + "manipluated", "manipulated", + "manipualted", "manipulated", + "manipulatie", "manipulative", + "manipulatin", "manipulation", + "manipulaton", "manipulation", + "maniuplated", "manipulated", + "mannerisims", "mannerisms", + "manslaugher", "manslaughter", + "manslaugter", "manslaughter", + "manufacters", "manufactures", + "manufacteur", "manufactures", + "manufactued", "manufactured", + "manufactuer", "manufacture", + "manufacturs", "manufactures", + "manufacuter", "manufacture", + "manufacutre", "manufactures", + "manufatured", "manufactured", + "manupilated", "manipulated", + "marganilize", "marginalized", + "marhsmallow", "marshmallow", + "marijuannas", "marijuana", + "markerplace", "marketplace", + "marketpalce", "marketplace", + "marshamllow", "marshmallow", + "marshmalows", "marshmallows", + "masculanity", "masculinity", + "masculenity", "masculinity", + "masrhmallow", "marshmallow", + "mastermined", "mastermind", + "masterpeace", "masterpiece", + "masterpeice", "masterpiece", + "mastrubated", "masturbated", + "mastrubates", "masturbate", + "masturabted", "masturbated", + "masturbaing", "masturbating", + "masturbarte", "masturbate", + "masturbathe", "masturbated", + "masturbatie", "masturbated", + "masturbatin", "masturbation", + "masturbaton", "masturbation", + "masturbsted", "masturbated", + "masturpiece", "masterpiece", + "masuclinity", "masculinity", + "matchamking", "matchmaking", + "materalists", "materialist", + "materialsim", "materialism", + "mathamatics", "mathematics", + "mathcmaking", "matchmaking", + "mathemagics", "mathematics", + "mathemetics", "mathematics", + "mathimatics", "mathematics", + "matieralism", "materialism", + "maybelleine", "maybelline", + "maybelliene", "maybelline", + "maybellinne", "maybelline", + "maybellline", "maybelline", + "mdifielders", "midfielders", + "meatballers", "meatballs", + "mecernaries", "mercenaries", + "mechanicaly", "mechanically", + "mechanichal", "mechanical", + "mechaniclly", "mechanically", + "mechanicsms", "mechanisms", + "mechanisims", "mechanism", + "mechanismus", "mechanisms", + "medicaitons", "medications", + "medicineras", "medicines", + "meditatiing", "meditating", + "meditationg", "meditating", + "mentionning", "mentioning", + "mercanaries", "mercenaries", + "mercaneries", "mercenaries", + "mercenaires", "mercenaries", + "mercenarias", "mercenaries", + "mercenarios", "mercenaries", + "merceneries", "mercenaries", + "merchandice", "merchandise", + "merchandies", "merchandise", + "merchanidse", "merchandise", + "merchanters", "merchants", + "merchendise", "merchandise", + "merchindise", "merchandise", + "mercinaries", "mercenaries", + "mercineries", "mercenaries", + "metabolisim", "metabolism", + "metabolitic", "metabolic", + "metaphisics", "metaphysics", + "metaphorial", "metaphorical", + "metaphorics", "metaphors", + "metaphsyics", "metaphysics", + "metaphyiscs", "metaphysics", + "methodoligy", "methodology", + "metholodogy", "methodology", + "metropolian", "metropolitan", + "metropolies", "metropolis", + "metropollis", "metropolis", + "metropolois", "metropolis", + "micorcenter", "microcenter", + "micorphones", "microphones", + "microcender", "microcenter", + "microcentre", "microcenter", + "microcentro", "microcenter", + "microhpones", "microphones", + "microscrope", "microscope", + "microwavees", "microwaves", + "microwavers", "microwaves", + "midfeilders", "midfielders", + "midfiedlers", "midfielders", + "midfileders", "midfielders", + "midifelders", "midfielders", + "millienaire", "millionaire", + "millionairs", "millionaires", + "millionarie", "millionaire", + "millioniare", "millionaire", + "mindlessely", "mindlessly", + "mindlessley", "mindlessly", + "minimalstic", "minimalist", + "ministerens", "ministers", + "ministerios", "ministers", + "minneaoplis", "minneapolis", + "minneaplois", "minneapolis", + "minniapolis", "minneapolis", + "miraculaous", "miraculous", + "miraculosly", "miraculously", + "miraculousy", "miraculously", + "mircocenter", "microcenter", + "mircophones", "microphones", + "mircoscopic", "microscopic", + "miscairrage", "miscarriage", + "miscarraige", "miscarriage", + "miscarridge", "miscarriage", + "miscarriege", "miscarriage", + "mischeivous", "mischievous", + "mischevious", "mischievous", + "misdameanor", "misdemeanor", + "misdeamenor", "misdemeanor", + "misdemeaner", "misdemeanor", + "misdemenaor", "misdemeanor", + "misdemenors", "misdemeanors", + "misdimeanor", "misdemeanor", + "misdomeanor", "misdemeanor", + "miserablely", "miserably", + "misfortunte", "misfortune", + "misimformed", "misinformed", + "misinterept", "misinterpret", + "misinterpet", "misinterpret", + "misoginysts", "misogynist", + "misognyists", "misogynist", + "misogyinsts", "misogynist", + "misogynisic", "misogynistic", + "misogynistc", "misogynistic", + "misogynstic", "misogynist", + "missionaire", "missionaries", + "missionairy", "missionary", + "missionares", "missionaries", + "missionaris", "missionaries", + "missionarry", "missionary", + "missionnary", "missionary", + "mississipis", "mississippi", + "misspeeling", "misspelling", + "misspellled", "misspelled", + "mistakengly", "mistakenly", + "mistakently", "mistakenly", + "moderatedly", "moderately", + "moderateurs", "moderates", + "moderatorin", "moderation", + "modificaton", "modification", + "moisterizer", "moisturizer", + "moistruizer", "moisturizer", + "moisturizng", "moisturizing", + "moisturizor", "moisturizer", + "moistutizer", "moisturizer", + "moisutrizer", "moisturizer", + "moleculaire", "molecular", + "molestating", "molestation", + "moleststion", "molestation", + "momemtarily", "momentarily", + "momentairly", "momentarily", + "momentaraly", "momentarily", + "momentarely", "momentarily", + "momenterily", "momentarily", + "monestaries", "monasteries", + "monitoreada", "monitored", + "monitoreado", "monitored", + "monogameous", "monogamous", + "monolitihic", "monolithic", + "monopollies", "monopolies", + "monstorsity", "monstrosity", + "monstrasity", "monstrosity", + "monstrisity", "monstrosity", + "monstrocity", "monstrosity", + "monstrosoty", "monstrosity", + "monstrostiy", "monstrosity", + "monumentaal", "monumental", + "monumentais", "monuments", + "monumentals", "monuments", + "monumentous", "monuments", + "mositurizer", "moisturizer", + "mosntrosity", "monstrosity", + "motehrboard", "motherboard", + "mothebroard", "motherboards", + "motherbaord", "motherboard", + "motherboads", "motherboards", + "motherboars", "motherboards", + "motherborad", "motherboard", + "motherbords", "motherboards", + "motherobard", "motherboards", + "mothreboard", "motherboards", + "motivatinal", "motivational", + "motorcicles", "motorcycles", + "motorcylces", "motorcycles", + "mouthpeices", "mouthpiece", + "mulitplayer", "multiplayer", + "mulitplying", "multiplying", + "multipalyer", "multiplayer", + "multiplater", "multiplayer", + "multiplebgs", "multiples", + "multipleies", "multiples", + "multitaskng", "multitasking", + "multitudine", "multitude", + "multiverese", "multiverse", + "multyplayer", "multiplayer", + "multyplying", "multiplying", + "muncipality", "municipality", + "murdererous", "murderers", + "musicallity", "musically", + "mutliplayer", "multiplayer", + "mutliplying", "multiplying", + "mysterieuse", "mysteries", + "mysteriosly", "mysteriously", + "mysteriouly", "mysteriously", + "mysteriousy", "mysteriously", + "napoleonian", "napoleonic", + "narcisissim", "narcissism", + "narcisissts", "narcissist", + "narcisscism", "narcissism", + "narcisscist", "narcissist", + "narcissisim", "narcissism", + "narcississm", "narcissism", + "narcississt", "narcissist", + "narcissistc", "narcissistic", + "narcissitic", "narcissistic", + "narcisssism", "narcissism", + "narcisssist", "narcissist", + "narcissstic", "narcissist", + "natioanlist", "nationalist", + "nationailty", "nationality", + "nationalesl", "nationals", + "nationalisn", "nationals", + "nationalite", "nationalist", + "nationalits", "nationalist", + "nationalizm", "nationalism", + "nationalsim", "nationalism", + "neccesarily", "necessarily", + "necessairly", "necessarily", + "necessaties", "necessities", + "necesseraly", "necessarily", + "necesserily", "necessarily", + "necessiates", "necessities", + "necessitive", "necessities", + "neckbeardos", "neckbeards", + "neckbeardus", "neckbeards", + "necormancer", "necromancer", + "necromamcer", "necromancer", + "necromanser", "necromancer", + "necromencer", "necromancer", + "needlessley", "needlessly", + "negativeity", "negativity", + "negativelly", "negatively", + "negativitiy", "negativity", + "negiotating", "negotiating", + "negligiable", "negligible", + "negociating", "negotiating", + "negociation", "negotiation", + "negoitating", "negotiating", + "negoitation", "negotiation", + "negotiatied", "negotiate", + "negotiative", "negotiate", + "negotiatons", "negotiations", + "neigborhood", "neighborhood", + "neigbouring", "neighbouring", + "neighborhod", "neighborhood", + "neighbourgs", "neighbours", + "neighouring", "neighboring", + "nercomancer", "necromancer", + "nessasarily", "necessarily", + "neurologial", "neurological", + "neurosciene", "neuroscience", + "neutrallity", "neutrality", + "neverthelss", "nevertheless", + "neverthless", "nevertheless", + "newspapaers", "newspapers", + "newspappers", "newspapers", + "nieghboring", "neighboring", + "nightmarket", "nightmare", + "nonsencical", "nonsensical", + "nonsenscial", "nonsensical", + "nonsensicle", "nonsensical", + "normallized", "normalized", + "northwesten", "northwestern", + "nostalgisch", "nostalgic", + "noteworthly", "noteworthy", + "noticeabley", "noticeably", + "notificaton", "notification", + "notoriuosly", "notoriously", + "numericable", "numerical", + "nurtitional", "nutritional", + "nutricional", "nutritional", + "nutrutional", "nutritional", + "obamination", "abomination", + "obersvation", "observation", + "obilterated", "obliterated", + "objectivety", "objectivity", + "objectivify", "objectivity", + "objectivily", "objectivity", + "objectivley", "objectively", + "obliberated", "obliterated", + "obliderated", "obliterated", + "obligerated", "obliterated", + "oblitarated", "obliterated", + "obliteraded", "obliterated", + "obliterared", "obliterated", + "oblitirated", "obliterated", + "oblitorated", "obliterated", + "obliverated", "obliterated", + "observacion", "observation", + "observaiton", "observant", + "observasion", "observations", + "observating", "observation", + "observerats", "observers", + "obsessivley", "obsessive", + "obstruccion", "obstruction", + "obstruktion", "obstruction", + "obsturction", "obstruction", + "obversation", "observation", + "ocasionally", "occasionally", + "ocassionaly", "occasionally", + "occasionals", "occasions", + "occasionaly", "occasionally", + "occasionnal", "occasional", + "occassional", "occasional", + "occassioned", "occasioned", + "occurrances", "occurrences", + "offensivley", "offensively", + "offesnively", "offensively", + "officiallly", "officially", + "olbiterated", "obliterated", + "omniscienct", "omniscient", + "operacional", "operational", + "operasional", "operational", + "operationel", "operational", + "oppresssing", "oppressing", + "oppresssion", "oppression", + "opprotunity", "opportunity", + "optimisitic", "optimistic", + "optimizaton", "optimization", + "optmization", "optimization", + "orchestraed", "orchestrated", + "orchestrial", "orchestra", + "oreintation", "orientation", + "organisaton", "organisation", + "organiserad", "organised", + "organistion", "organisation", + "organizarea", "organizer", + "organizarem", "organizer", + "organizarme", "organizer", + "organizarte", "organizer", + "organiztion", "organization", + "oridinarily", "ordinarily", + "orientacion", "orientation", + "originially", "originally", + "originnally", "originally", + "origniality", "originality", + "ostensiably", "ostensibly", + "ostensibily", "ostensibly", + "outclasssed", "outclassed", + "outnunbered", "outnumbered", + "outperfroms", "outperform", + "outpreforms", "outperform", + "outrageosly", "outrageously", + "outrageouly", "outrageously", + "outragerous", "outrageous", + "outskirters", "outskirts", + "outsorucing", "outsourcing", + "outsourcade", "outsourced", + "outsoursing", "outsourcing", + "overbraking", "overbearing", + "overcapping", "overlapping", + "overcharing", "overarching", + "overclcoked", "overclocked", + "overclicked", "overclocked", + "overcloaked", "overclocked", + "overclocing", "overclocking", + "overclockig", "overclocking", + "overclocled", "overclocked", + "overcomeing", "overcoming", + "overcomming", "overcoming", + "overeaching", "overarching", + "overfapping", "overlapping", + "overheading", "overheating", + "overhooking", "overlooking", + "overhwelmed", "overwhelmed", + "overkapping", "overlapping", + "overklocked", "overclocked", + "overlapsing", "overlapping", + "overlcocked", "overclocked", + "overlcoking", "overlooking", + "overlooming", "overlooking", + "overloooked", "overlooked", + "overlordess", "overlords", + "overmapping", "overlapping", + "overpooling", "overlooking", + "overpovered", "overpowered", + "overpoweing", "overpowering", + "overreacing", "overreacting", + "overreactin", "overreaction", + "overreacton", "overreaction", + "overshaddow", "overshadowed", + "overshadowd", "overshadowed", + "overtapping", "overlapping", + "overthining", "overthinking", + "overthinkig", "overthinking", + "overvlocked", "overclocked", + "overwealmed", "overwhelmed", + "overwelming", "overwhelming", + "overwhelemd", "overwhelmed", + "overwhelimg", "overwhelm", + "overwheling", "overwhelming", + "overwhemled", "overwhelmed", + "overwhlemed", "overwhelmed", + "overwritted", "overwrite", + "pakistanais", "pakistani", + "pakistanezi", "pakistani", + "palceholder", "placeholder", + "palesitnian", "palestinians", + "palestenian", "palestinian", + "palestinain", "palestinians", + "palestinans", "palestinians", + "palestinier", "palestine", + "palistinian", "palestinian", + "palythrough", "playthrough", + "papanicalou", "papanicolaou", + "parachutage", "parachute", + "paragraphes", "paragraphs", + "paramedicks", "paramedics", + "paramedicos", "paramedics", + "parameteres", "parameters", + "paranthesis", "parenthesis", + "parapharsed", "paraphrase", + "paraprhased", "paraphrase", + "parasitisme", "parasites", + "parenthasis", "parenthesis", + "parenthesys", "parentheses", + "parenthises", "parenthesis", + "parenthisis", "parenthesis", + "parliamenty", "parliamentary", + "parntership", "partnership", + "parrallelly", "parallelly", + "partecipant", "participant", + "partecipate", "participate", + "parternship", "partnership", + "partiarchal", "patriarchal", + "particapate", "participate", + "particiapte", "participate", + "participait", "participant", + "participans", "participants", + "participare", "participate", + "participatd", "participant", + "participati", "participant", + "participats", "participant", + "participent", "participant", + "particpiate", "participated", + "particually", "particularly", + "particulaly", "particularly", + "particulary", "particularly", + "partnetship", "partnership", + "partonizing", "patronizing", + "passionatly", "passionately", + "passionetly", "passionately", + "passionnate", "passionate", + "passporters", "passports", + "pathologial", "pathological", + "patriarchia", "patriarchal", + "patriarcial", "patriarchal", + "patriarical", "patriarchal", + "patriotisch", "patriotic", + "patriotisim", "patriotism", + "patriottism", "patriotism", + "patronozing", "patronizing", + "peacefullly", "peacefully", + "pedestirans", "pedestrians", + "pedestrains", "pedestrians", + "pedophilies", "pedophile", + "pedophilles", "pedophile", + "penetracion", "penetration", + "penetrading", "penetrating", + "penetrarion", "penetration", + "penninsular", "peninsular", + "pennsylvnia", "pennsylvania", + "pepperocini", "pepperoni", + "percantages", "percentages", + "percautions", "precautions", + "percentille", "percentile", + "percpetions", "perceptions", + "percusssion", "percussion", + "perdicament", "predicament", + "perdictable", "predictable", + "perdictions", "predictions", + "perephirals", "peripherals", + "pereptually", "perpetually", + "perferences", "preferences", + "perfomrance", "performances", + "perforamnce", "performances", + "performaces", "performances", + "performacne", "performances", + "performanes", "performances", + "performanse", "performances", + "performence", "performance", + "performnace", "performances", + "perfromance", "performance", + "perhiperals", "peripherals", + "perihperals", "peripherals", + "periodicaly", "periodically", + "periperhals", "peripherals", + "periphereal", "peripheral", + "peripherial", "peripheral", + "periphirals", "peripherals", + "periphreals", "peripherals", + "periphrials", "peripherals", + "perjorative", "pejorative", + "perliminary", "preliminary", + "permamently", "permanently", + "permanantly", "permanently", + "permaturely", "prematurely", + "permenantly", "permanently", + "permenently", "permanently", + "perminantly", "permanently", + "perminently", "permanently", + "permisisons", "permissions", + "permissable", "permissible", + "permisssion", "permissions", + "pernamently", "permanently", + "perosnality", "personality", + "perparation", "preparation", + "perpatrated", "perpetrated", + "perpatrator", "perpetrator", + "perpatuated", "perpetuated", + "perpatuates", "perpetuates", + "perpertated", "perpetuated", + "perpertator", "perpetrators", + "perpetraded", "perpetrated", + "perpetrador", "perpetrator", + "perpetraron", "perpetrator", + "perpetrater", "perpetrator", + "perpetuaded", "perpetuated", + "perpetutate", "perpetuate", + "perpetuties", "perpetuates", + "perpitrated", "perpetrated", + "perpitrator", "perpetrator", + "perpretated", "perpetrated", + "perpretator", "perpetrators", + "perpsective", "perspective", + "perputrator", "perpetrator", + "perputually", "perpetually", + "perputuated", "perpetuated", + "perputuates", "perpetuates", + "perrogative", "prerogative", + "persceptive", "perspectives", + "persectuion", "persecution", + "persecucion", "persecution", + "persecusion", "persecution", + "persecutted", "persecuted", + "persepctive", "perspective", + "persicution", "persecution", + "persistance", "persistence", + "persistante", "persistent", + "persistense", "persistence", + "persistente", "persistence", + "personhoood", "personhood", + "perspecitve", "perspective", + "perspectief", "perspective", + "perspektive", "perspective", + "persuassion", "persuasion", + "persuassive", "persuasive", + "persucution", "persecution", + "persumption", "presumption", + "pertubation", "perturbation", + "pessimestic", "pessimistic", + "pharamcists", "pharmacist", + "phenomenona", "phenomena", + "philadelpha", "philadelphia", + "philadelpia", "philadelphia", + "philiphines", "philippines", + "philippenes", "philippines", + "philippenis", "philippines", + "philippides", "philippines", + "philippinas", "philippines", + "philippinos", "philippines", + "philisopher", "philosopher", + "phillipines", "philippines", + "philosipher", "philosopher", + "philosopers", "philosophers", + "philosophae", "philosopher", + "philosophia", "philosophical", + "philosopies", "philosophies", + "philosphies", "philosophies", + "philospoher", "philosopher", + "photograhed", "photographed", + "photograher", "photographer", + "photograhic", "photographic", + "photograhpy", "photography", + "photograped", "photographed", + "photograper", "photographer", + "photograpgh", "photographs", + "photograpic", "photographic", + "photogrpahs", "photographs", + "photogrpahy", "photography", + "physcedelic", "psychedelic", + "physciatric", "psychiatric", + "physcopaths", "psychopaths", + "piankillers", "painkillers", + "pilgrimmage", "pilgrimage", + "pitchforcks", "pitchforks", + "pitchforkes", "pitchforks", + "plaestinian", "palestinian", + "plagiariasm", "plagiarism", + "planeswaker", "planeswalker", + "planeswaler", "planeswalker", + "planeswalkr", "planeswalker", + "platfromers", "platformer", + "playhtrough", "playthrough", + "playthorugh", "playthrough", + "playthourgh", "playthrough", + "playthroguh", "playthroughs", + "playthrougs", "playthroughs", + "playthrouhg", "playthroughs", + "playthtough", "playthrough", + "playtrhough", "playthrough", + "ploretariat", "proletariat", + "policitally", "politically", + "policitians", "politicians", + "politicains", "politicians", + "politicanti", "politician", + "politiciens", "politicians", + "politiicans", "politician", + "polititians", "politicians", + "polyphonyic", "polyphonic", + "pomegranite", "pomegranate", + "popluations", "populations", + "poportional", "proportional", + "popoulation", "population", + "porjectiles", "projectiles", + "porletariat", "proletariat", + "pornagraphy", "pornography", + "pornograghy", "pornography", + "pornograhpy", "pornography", + "pornograpgy", "pornography", + "pornogrophy", "pornography", + "pornogrpahy", "pornography", + "porportions", "proportions", + "portestants", "protestants", + "portuguease", "portuguese", + "portuguesse", "portuguese", + "positionial", "positional", + "positionnal", "positional", + "positionned", "positioned", + "positiveity", "positivity", + "positiviely", "positively", + "positivisme", "positives", + "positivisty", "positivity", + "positivitey", "positivity", + "positivitiy", "positivity", + "possesseurs", "possesses", + "possesssion", "possessions", + "possestions", "possessions", + "possiblilty", "possibility", + "potencially", "potentially", + "potentailly", "potentially", + "powerhourse", "powerhouse", + "powerlifing", "powerlifting", + "powerliftng", "powerlifting", + "pracitcally", "practically", + "practicarlo", "practical", + "practioners", "practitioners", + "practitions", "practitioners", + "pragmatisch", "pragmatic", + "precausions", "precautions", + "precedessor", "predecessor", + "precendence", "precedence", + "precentages", "percentages", + "preconceved", "preconceived", + "preconcieve", "preconceived", + "precuations", "precautions", + "predacessor", "predecessor", + "predecesser", "predecessor", + "predections", "predictions", + "predescesor", "predecessors", + "predesessor", "predecessors", + "predesposed", "predisposed", + "predessecor", "predecessor", + "predicatble", "predictable", + "predicement", "predicament", + "predicessor", "predecessor", + "prediciment", "predicament", + "predicitons", "predictions", + "predictible", "predictable", + "predictious", "predictions", + "predictment", "predicament", + "predisposte", "predisposed", + "predocessor", "predecessor", + "preferabbly", "preferably", + "preferabely", "preferable", + "preferabley", "preferably", + "preferablly", "preferably", + "preferances", "preferences", + "preferenser", "preferences", + "preferental", "preferential", + "preferentes", "preferences", + "preferrably", "preferably", + "preferrring", "preferring", + "preformance", "performance", + "pregnanices", "pregnancies", + "pregnencies", "pregnancies", + "pregorative", "prerogative", + "preipherals", "peripherals", + "prejudicies", "prejudice", + "preleminary", "preliminary", + "prelimanary", "preliminary", + "prelimenary", "preliminary", + "premanently", "permanently", + "prematuraly", "prematurely", + "prematurily", "prematurely", + "prematurley", "prematurely", + "premilinary", "preliminary", + "premissible", "permissible", + "premissions", "permissions", + "preorderded", "preordered", + "preorderers", "preorders", + "preparacion", "preparation", + "preperation", "preparation", + "prepetrated", "perpetrated", + "prepetrator", "perpetrator", + "prepetually", "perpetually", + "prepetuated", "perpetuated", + "prepetuates", "perpetuates", + "preporation", "preparation", + "preposterus", "preposterous", + "prerequesit", "prerequisite", + "prerequiste", "prerequisite", + "prerequites", "prerequisite", + "prerogitive", "prerogative", + "prerogotive", "prerogative", + "prescripton", "prescription", + "presecution", "persecution", + "presedintia", "presidential", + "presentaion", "presentation", + "presentatin", "presentations", + "preservaton", "preservation", + "preservered", "preserved", + "presidencey", "presidency", + "presidental", "presidential", + "presidentcy", "presidency", + "presistence", "persistence", + "presitgious", "prestigious", + "presitigous", "prestigious", + "presomption", "presumption", + "prespective", "perspective", + "pressureing", "pressuring", + "prestegious", "prestigious", + "prestigeous", "prestigious", + "prestigieus", "prestigious", + "prestigiosa", "prestigious", + "prestigiose", "prestigious", + "prestigiosi", "prestigious", + "prestigioso", "prestigious", + "prestiguous", "prestigious", + "presumabely", "presumably", + "presumabley", "presumably", + "presumptous", "presumptuous", + "presumptuos", "presumptuous", + "pretencious", "pretentious", + "pretendendo", "pretended", + "pretensious", "pretentious", + "pretentieus", "pretentious", + "prevailaing", "prevailing", + "prevailling", "prevailing", + "preventitve", "preventative", + "preventivno", "prevention", + "primatively", "primitively", + "princessses", "princesses", + "principales", "principles", + "principalis", "principals", + "principielt", "principle", + "privatizied", "privatized", + "priveledges", "privileges", + "privelleges", "privileges", + "privilegeds", "privileges", + "privilegied", "privileged", + "privilegien", "privilege", + "privilegier", "privilege", + "privilegies", "privilege", + "proactivley", "proactive", + "probabilaty", "probability", + "probabilite", "probabilities", + "probalibity", "probability", + "probelmatic", "problematic", + "problamatic", "problematic", + "problimatic", "problematic", + "problomatic", "problematic", + "proccedings", "proceedings", + "proccessing", "processing", + "proceddings", "proceedings", + "procedureal", "procedural", + "procedurial", "procedural", + "procedurile", "procedure", + "processesor", "processors", + "processeurs", "processes", + "processsors", "processors", + "procrastion", "procreation", + "procriation", "procreation", + "prodcutions", "productions", + "prodictions", "productions", + "producerats", "producers", + "producitons", "productions", + "productioin", "productions", + "productivos", "productions", + "productivty", "productivity", + "produktions", "productions", + "professinal", "professional", + "professionl", "professionals", + "professoras", "professors", + "professores", "professors", + "professorin", "profession", + "professsion", "professions", + "proficiancy", "proficiency", + "proficienct", "proficient", + "proficienty", "proficiency", + "proficinecy", "proficiency", + "profitabile", "profitable", + "progerssion", "progressions", + "progerssive", "progressives", + "programable", "programmable", + "programmare", "programmer", + "programmars", "programmers", + "programmate", "programme", + "programmets", "programmers", + "programmeur", "programmer", + "programmier", "programmer", + "programmmed", "programme", + "programmmer", "programme", + "progresison", "progressions", + "progressers", "progresses", + "progressief", "progressive", + "progressino", "progressions", + "progressivo", "progression", + "progressoin", "progressions", + "progressvie", "progressives", + "prohabition", "prohibition", + "prohibation", "prohibition", + "prohibicion", "prohibition", + "prohibiteds", "prohibits", + "prohibitied", "prohibited", + "prohibitifs", "prohibits", + "prohibitivo", "prohibition", + "prohibitons", "prohibits", + "prohibitted", "prohibited", + "projecticle", "projectile", + "projectives", "projectiles", + "projectlies", "projectiles", + "prolateriat", "proletariat", + "proletariet", "proletariat", + "proletariot", "proletariat", + "proletaryat", "proletariat", + "proleteriat", "proletariat", + "prolitariat", "proletariat", + "prologomena", "prolegomena", + "promenantly", "prominently", + "promenently", "prominently", + "prometheius", "prometheus", + "prometheous", "prometheus", + "promethesus", "prometheus", + "prometheyus", "prometheus", + "promimently", "prominently", + "prominantly", "prominently", + "prominately", "prominently", + "promiscious", "promiscuous", + "promocional", "promotional", + "promsicuous", "promiscuous", + "pronography", "pornography", + "pronoucning", "pronouncing", + "pronounched", "pronounced", + "pronunciato", "pronunciation", + "propaganada", "propaganda", + "properitary", "proprietary", + "propertiary", "proprietary", + "propertions", "proportions", + "prophechies", "prophecies", + "propiertary", "proprietary", + "propogation", "propagation", + "proponenets", "proponents", + "proponentes", "proponents", + "proporition", "proposition", + "proportians", "proportions", + "proportinal", "proportional", + "proposicion", "proposition", + "propositivo", "proposition", + "propostions", "proportions", + "propreitary", "proprietary", + "propriatary", "proprietary", + "propriatery", "proprietary", + "propriatory", "proprietary", + "proprietery", "proprietary", + "proprietory", "proprietary", + "propriotary", "proprietary", + "proprotions", "proportions", + "propsective", "prospective", + "propulstion", "propulsion", + "prosectuion", "prosecution", + "prosectuors", "prosecutors", + "prosecuters", "prosecutors", + "prosicution", "prosecution", + "prosocution", "prosecution", + "prosperious", "prosperous", + "prospertity", "prosperity", + "prospettive", "prospective", + "prostethics", "prosthetic", + "prosthethic", "prosthetic", + "prostitites", "prostitutes", + "prostitiute", "prostitute", + "prostituate", "prostitute", + "prostitudes", "prostitutes", + "prostituees", "prostitutes", + "prostituion", "prostitution", + "prostitures", "prostitutes", + "prostitutas", "prostitutes", + "prostitutie", "prostitute", + "prostitutin", "prostitution", + "prostitutke", "prostitutes", + "prostituton", "prostitution", + "prostitutos", "prostitutes", + "protability", "portability", + "protaganist", "protagonist", + "protaginist", "protagonist", + "protagnoist", "protagonist", + "protagoinst", "protagonists", + "protagonits", "protagonists", + "protagonsit", "protagonists", + "protectings", "protections", + "protectoras", "protectors", + "protectores", "protectors", + "protectrons", "protections", + "protelariat", "proletariat", + "protestents", "protestants", + "protistants", "protestants", + "protoganist", "protagonist", + "protogonist", "protagonist", + "protostants", "protestants", + "protototype", "prototype", + "provacative", "provocative", + "provacotive", "provocative", + "provicative", "provocative", + "providencie", "providence", + "provinciaal", "provincial", + "provinicial", "provincial", + "provisiones", "provisions", + "provoactive", "provocative", + "provocatief", "provocative", + "provocitive", "provocative", + "provocotive", "provocative", + "provokative", "provocative", + "pscyhedelic", "psychedelic", + "pscyhiatric", "psychiatric", + "pscyhopaths", "psychopaths", + "pshyciatric", "psychiatric", + "pshycopaths", "psychopaths", + "psychaitric", "psychiatric", + "psychedilic", "psychedelic", + "psychedleic", "psychedelics", + "psychiatist", "psychiatrist", + "psychidelic", "psychedelic", + "psychodelic", "psychedelic", + "psychopants", "psychopaths", + "psychopatch", "psychopath", + "psychopatic", "psychopathic", + "psychotisch", "psychotic", + "psychriatic", "psychiatric", + "publikation", "publication", + "punctiation", "punctuation", + "puncutation", "punctuation", + "punshiments", "punishments", + "punsihments", "punishments", + "purchaseing", "purchasing", + "purchashing", "purchasing", + "purposefuly", "purposefully", + "pyschedelic", "psychedelic", + "pyschiatric", "psychiatric", + "pyschopaths", "psychopaths", + "qaurterback", "quarterback", + "qualificato", "qualification", + "qualifieres", "qualifiers", + "quantitaive", "quantitative", + "quantitatve", "quantitative", + "quantitites", "quantities", + "quantitties", "quantities", + "quarantaine", "quarantine", + "quarantenni", "quarantine", + "quartercask", "quarterbacks", + "quesitoning", "questioning", + "questionned", "questioned", + "questonable", "questionable", + "radiaoctive", "radioactive", + "radioactice", "radioactive", + "radioactief", "radioactive", + "radioaktive", "radioactive", + "radiocative", "radioactive", + "raidoactive", "radioactive", + "reaccurring", "recurring", + "reactionair", "reactionary", + "realibility", "reliability", + "realistisch", "realistic", + "reaserchers", "researchers", + "reaserching", "researching", + "reasonabley", "reasonably", + "reasonablly", "reasonably", + "reassureing", "reassuring", + "reassurring", "reassuring", + "rebuildling", "rebuilding", + "rebuplicans", "republicans", + "reccomended", "recommended", + "receptionst", "receptionist", + "recgonition", "recognition", + "recgonizing", "recognizing", + "rechargable", "rechargeable", + "recipientes", "recipients", + "reciporcate", "reciprocate", + "recipricate", "reciprocate", + "reciprocant", "reciprocate", + "reciprocite", "reciprocate", + "recivership", "receivership", + "reclutantly", "reluctantly", + "recognicing", "recognizing", + "recognision", "recognition", + "recomending", "recommending", + "recommandes", "recommends", + "recommendes", "recommends", + "recommented", "recommended", + "reconcilled", "reconcile", + "recongition", "recognition", + "recongizing", "recognizing", + "reconsidder", "reconsider", + "recrational", "recreational", + "recrutiment", "recruitment", + "rectangluar", "rectangular", + "rectangualr", "rectangular", + "rectengular", "rectangular", + "recuritment", "recruitment", + "redundantcy", "redundancy", + "reevalulate", "reevaluate", + "reevalutate", "reevaluate", + "reevaulated", "reevaluate", + "refelctions", "reflections", + "referancing", "referencing", + "refereneced", "referenced", + "refereneces", "references", + "referincing", "referencing", + "referrences", "references", + "reflectivos", "reflections", + "refreshener", "refresher", + "refrubished", "refurbished", + "refubrished", "refurbished", + "refurbushed", "refurbished", + "regeneratin", "regeneration", + "regeneraton", "regeneration", + "registerdns", "registers", + "registeries", "registers", + "registerred", "registered", + "registraion", "registration", + "regocnition", "recognition", + "regresssion", "regression", + "regresssive", "regressive", + "regualtions", "regulations", + "regulationg", "regulating", + "regulatiors", "regulators", + "reinassance", "renaissance", + "reinforcemt", "reinforcement", + "reinfornced", "reinforced", + "reinitalise", "reinitialise", + "reinitalize", "reinitialize", + "reinstaling", "reinstalling", + "reinstallng", "reinstalling", + "reisntalled", "reinstalled", + "relaibility", "reliability", + "relatiation", "retaliation", + "relationshp", "relationships", + "relativiser", "relatives", + "relativisme", "relatives", + "relativitiy", "relativity", + "relativitly", "relativity", + "relcutantly", "reluctantly", + "relentlesly", "relentlessly", + "relentlessy", "relentlessly", + "relevations", "revelations", + "relfections", "reflections", + "religeously", "religiously", + "religionens", "religions", + "religioners", "religions", + "relpacement", "replacement", + "reluctently", "reluctantly", + "remarkabley", "remarkably", + "remarkablly", "remarkably", + "remasterred", "remastered", + "remembrence", "remembrance", + "reminescent", "reminiscent", + "reminicient", "reminiscent", + "reminiscant", "reminiscent", + "reminiscint", "reminiscent", + "reminscient", "reminiscent", + "reminsicent", "reminiscent", + "renaiisance", "renaissance", + "renaiscance", "renaissance", + "renaissanse", "renaissance", + "renaissence", "renaissance", + "renassaince", "renaissance", + "renassiance", "renaissance", + "reniassance", "renaissance", + "rennovating", "renovating", + "rennovation", "renovation", + "repalcement", "replacement", + "repbulicans", "republicans", + "repeateadly", "repeatedly", + "repectively", "respectively", + "repersented", "represented", + "replacemnet", "replacements", + "replacemnts", "replacements", + "repleacable", "replaceable", + "repositiory", "repository", + "representas", "represents", + "representes", "represents", + "represssion", "repression", + "reproducion", "reproduction", + "reproducive", "reproductive", + "repsectable", "respectable", + "repsonsible", "responsible", + "repsonsibly", "responsibly", + "republcians", "republicans", + "republician", "republican", + "republicons", "republicans", + "repuglicans", "republicans", + "requeriment", "requirement", + "requierment", "requirements", + "resemblence", "resemblance", + "resemblense", "resembles", + "reserachers", "researchers", + "reseraching", "researching", + "resgination", "resignation", + "residencial", "residential", + "residentail", "residential", + "residentual", "residential", + "resignacion", "resignation", + "resignating", "resignation", + "resignement", "resignment", + "resignition", "resignation", + "resintalled", "reinstalled", + "resistansen", "resistances", + "resistanses", "resistances", + "resistences", "resistances", + "resistnaces", "resistances", + "resoltuions", "resolutions", + "resotration", "restoration", + "resoultions", "resolutions", + "respecatble", "respectable", + "respectabil", "respectable", + "respectfuly", "respectfully", + "respectible", "respectable", + "respectivly", "respectively", + "respectuful", "respectful", + "respektable", "respectable", + "resperatory", "respiratory", + "resperitory", "respiratory", + "respiritory", "respiratory", + "respitatory", "respiratory", + "responcible", "responsible", + "responcibly", "responsibly", + "respondendo", "responded", + "responisble", "responsible", + "responisbly", "responsibly", + "responsable", "responsible", + "responsably", "responsibly", + "responsbile", "responsible", + "responsbily", "responsibly", + "responsibel", "responsibly", + "responsibil", "responsibly", + "responsivle", "responsive", + "resporatory", "respiratory", + "respository", "repository", + "respriatory", "respiratory", + "ressembling", "resembling", + "ressurected", "resurrected", + "restaraunts", "restaurants", + "restaruants", "restaurants", + "restauraunt", "restaurant", + "restaurents", "restaurants", + "resteraunts", "restaurants", + "restirction", "restriction", + "restorarion", "restoration", + "restorating", "restoration", + "restrainted", "restrained", + "restrective", "restrictive", + "restriccion", "restriction", + "restricitng", "restricting", + "restriciton", "restrictions", + "restricitve", "restrictive", + "restricteds", "restricts", + "restricters", "restricts", + "restrictied", "restrictive", + "restrictifs", "restricts", + "restrictins", "restricts", + "restrictios", "restricts", + "restrictivo", "restriction", + "restrictons", "restricts", + "restriktion", "restriction", + "restriktive", "restrictive", + "restrittive", "restrictive", + "restructing", "restricting", + "restruction", "restriction", + "restuarants", "restaurants", + "resturaunts", "restaurants", + "resurecting", "resurrecting", + "resurrecion", "resurrection", + "retailation", "retaliation", + "retalitated", "retaliated", + "retardathon", "retardation", + "retardating", "retardation", + "retardatron", "retardation", + "retartation", "retardation", + "retirbution", "retribution", + "retrebution", "retribution", + "retribucion", "retribution", + "retribuiton", "retribution", + "retributivo", "retribution", + "retribvtion", "retribution", + "retrobution", "retribution", + "retrubution", "retribution", + "revealtions", "revelations", + "revelaitons", "revelations", + "revolations", "revolutions", + "revoultions", "revolutions", + "ridiculious", "ridiculous", + "ridiculosly", "ridiculously", + "ridiculouly", "ridiculously", + "ridiculousy", "ridiculously", + "rightfullly", "rightfully", + "rolepalying", "roleplaying", + "romanticaly", "romantically", + "roundabaout", "roundabout", + "roundabount", "roundabout", + "rudimentery", "rudimentary", + "rudimentory", "rudimentary", + "ruidmentary", "rudimentary", + "sacrifacing", "sacrificing", + "sacrificare", "sacrifice", + "sacrificied", "sacrifice", + "sacrificies", "sacrifice", + "sacrifieced", "sacrificed", + "sacrifising", "sacrificing", + "sacrifizing", "sacrificing", + "salughtered", "slaughtered", + "sanctionned", "sanctioned", + "sarcastisch", "sarcastic", + "saskatchewn", "saskatchewan", + "saskatchwan", "saskatchewan", + "satisfacion", "satisfaction", + "satisfacory", "satisfactory", + "scandanavia", "scandinavia", + "scandanivia", "scandinavian", + "scandenavia", "scandinavia", + "scandianvia", "scandinavian", + "scandimania", "scandinavia", + "scandinaiva", "scandinavian", + "scandinavan", "scandinavian", + "scandivania", "scandinavian", + "scandonavia", "scandinavia", + "scarificing", "sacrificing", + "scheduleing", "scheduling", + "schedulling", "scheduling", + "schoalrship", "scholarships", + "scholarhips", "scholarship", + "scholarstic", "scholastic", + "scholership", "scholarship", + "scholorship", "scholarship", + "scientiests", "scientists", + "scnadinavia", "scandinavia", + "scrambleing", "scrambling", + "screenshoot", "screenshot", + "seamlessley", "seamlessly", + "sedentarity", "sedentary", + "seflishness", "selfishness", + "segergation", "segregation", + "segragation", "segregation", + "segregacion", "segregation", + "segretation", "segregation", + "segrigation", "segregation", + "selectivley", "selectively", + "selfeshness", "selfishness", + "senitmental", "sentimental", + "sensacional", "sensational", + "sensasional", "sensational", + "sensationel", "sensational", + "sensetional", "sensational", + "sensitivety", "sensitivity", + "sentamental", "sentimental", + "sentemental", "sentimental", + "sentenceing", "sentencing", + "sentimentos", "sentiments", + "sentimentul", "sentimental", + "separatedly", "separately", + "separatelly", "separately", + "separatisme", "separates", + "separatiste", "separates", + "sepculating", "speculating", + "serivceable", "serviceable", + "serviciable", "serviceable", + "settelement", "settlement", + "settelments", "settlements", + "settlemetns", "settlements", + "sexualizied", "sexualized", + "shakeapeare", "shakespeare", + "shakepseare", "shakespeare", + "shakesphere", "shakespeare", + "shanenigans", "shenanigans", + "shareholdes", "shareholders", + "sharpeneing", "sharpening", + "sharpenning", "sharpening", + "shatterling", "shattering", + "shatterring", "shattering", + "sheakspeare", "shakespeare", + "shenadigans", "shenanigans", + "shenanagans", "shenanigans", + "shenanagins", "shenanigans", + "shenanegans", "shenanigans", + "shenanegins", "shenanigans", + "shenangians", "shenanigans", + "shenanigens", "shenanigans", + "shenanigins", "shenanigans", + "shenenigans", "shenanigans", + "sheninigans", "shenanigans", + "shennaigans", "shenanigans", + "shortenning", "shortening", + "shortenting", "shortening", + "signficiant", "significant", + "signifantly", "significantly", + "significane", "significance", + "significato", "significant", + "signifigant", "significant", + "signifikant", "significant", + "signitories", "signatories", + "signularity", "singularity", + "similarites", "similarities", + "similarlity", "similarity", + "similiarity", "similarity", + "simluations", "simulations", + "simplefying", "simplifying", + "simplicitly", "simplicity", + "simplifiing", "simplifying", + "simplisitic", "simplistic", + "simplyifing", "simplifying", + "simualtions", "simulations", + "simulatious", "simulations", + "simultaneos", "simultaneous", + "simultaneus", "simultaneous", + "simultanous", "simultaneous", + "singluarity", "singularity", + "singualrity", "singularity", + "singulairty", "singularity", + "singularily", "singularity", + "sitautional", "situational", + "situacional", "situational", + "situationly", "situational", + "siutational", "situational", + "skatebaords", "skateboard", + "skateboader", "skateboard", + "skepticisim", "skepticism", + "skillshoots", "skillshots", + "skillshosts", "skillshots", + "slaugthered", "slaughtered", + "slefishness", "selfishness", + "sluaghtered", "slaughtered", + "smarthpones", "smartphones", + "snowboaring", "snowboarding", + "snowbolling", "snowballing", + "snowfalling", "snowballing", + "socailizing", "socializing", + "socialicing", "socializing", + "socialistes", "socialists", + "socialistos", "socialists", + "socializare", "socialize", + "sociapathic", "sociopathic", + "sociologial", "sociological", + "sociopathes", "sociopaths", + "sociopathis", "sociopaths", + "sociophatic", "sociopathic", + "solidariety", "solidarity", + "somethingis", "somethings", + "sorrounding", "surrounding", + "soundtrakcs", "soundtracks", + "southamtpon", "southampton", + "southanpton", "southampton", + "southapmton", "southampton", + "southernese", "southerners", + "southerness", "southerners", + "southernest", "southerners", + "southernors", "southerners", + "southmapton", "southampton", + "southtampon", "southampton", + "soveregnity", "sovereignty", + "sovereighty", "sovereignty", + "sovereingty", "sovereignty", + "sovereinity", "sovereignty", + "soveriegnty", "sovereignty", + "soveriengty", "sovereignty", + "soverignity", "sovereignty", + "specailists", "specialists", + "specailized", "specialized", + "specailizes", "specializes", + "specatcular", "spectacular", + "specialiced", "specialized", + "specialices", "specializes", + "specialites", "specializes", + "speciallist", "specialist", + "speciallity", "specially", + "speciallize", "specialize", + "specialzied", "specialized", + "specifcally", "specifically", + "specificaly", "specifically", + "specificato", "specification", + "specificies", "specifics", + "specifiying", "specifying", + "specilaized", "specialize", + "speciliazed", "specialize", + "spectatores", "spectators", + "spectatular", "spectacular", + "spectauclar", "spectacular", + "spectaulars", "spectaculars", + "spectecular", "spectacular", + "specualting", "speculating", + "specualtion", "speculation", + "specualtive", "speculative", + "specularite", "speculative", + "speculaties", "speculative", + "spiritualiy", "spiritually", + "spiritualty", "spirituality", + "spirituella", "spiritually", + "spirtiually", "spiritually", + "spirutually", "spiritually", + "spitirually", "spiritually", + "sponatenous", "spontaneous", + "sponatneous", "spontaneous", + "sponsership", "sponsorship", + "sponsorhips", "sponsorship", + "sponsorhsip", "sponsorship", + "sponsorshop", "sponsorship", + "spontaenous", "spontaneous", + "spontainous", "spontaneous", + "spontaneuos", "spontaneous", + "spontanious", "spontaneous", + "sponteanous", "spontaneous", + "sponteneous", "spontaneous", + "spreadhseet", "spreadsheet", + "spreadsheat", "spreadsheet", + "spreadshets", "spreadsheets", + "spreedsheet", "spreadsheet", + "springfeild", "springfield", + "springfiled", "springfield", + "sprinklered", "sprinkled", + "squirrelies", "squirrels", + "squirrelius", "squirrels", + "stabilizare", "stabilize", + "stabilizied", "stabilize", + "stabilizier", "stabilize", + "stabilizies", "stabilize", + "staggerring", "staggering", + "staggerwing", "staggering", + "stationairy", "stationary", + "stationerad", "stationed", + "stationnary", "stationary", + "statisitcal", "statistical", + "statisticly", "statistical", + "statistisch", "statistics", + "statsitical", "statistical", + "stereotpyes", "stereotypes", + "stereotying", "stereotyping", + "steriotypes", "stereotypes", + "steroetypes", "stereotypes", + "steryotypes", "stereotypes", + "stimluating", "stimulating", + "stimualting", "stimulating", + "stimualtion", "stimulation", + "stimulantes", "stimulants", + "stockpilled", "stockpile", + "stormfrount", "stormfront", + "storyteling", "storytelling", + "straightden", "straightened", + "straightend", "straightened", + "straightmen", "straighten", + "straightned", "straightened", + "straightner", "straighten", + "strangeshit", "strangest", + "strategisch", "strategic", + "strategiske", "strategies", + "strawberies", "strawberries", + "strawberrry", "strawberry", + "strawbrerry", "strawberry", + "strenghened", "strengthened", + "strenghtend", "strengthen", + "strenghtens", "strengthen", + "strengtened", "strengthened", + "structurels", "structures", + "strugglebus", "struggles", + "struggleing", "struggling", + "stubborness", "stubbornness", + "stutterring", "stuttering", + "subcatagory", "subcategory", + "subconscius", "subconscious", + "subconscous", "subconscious", + "subisdizing", "subsidizing", + "subjectivly", "subjectively", + "submergered", "submerged", + "submisisons", "submissions", + "subredddits", "subreddits", + "subscirbers", "subscribers", + "subscribbed", "subscribe", + "subscribber", "subscriber", + "subscriping", "subscribing", + "subscriptin", "subscriptions", + "subscripton", "subscription", + "subsequenty", "subsequently", + "subsidiezed", "subsidized", + "subsidizied", "subsidized", + "subsidizies", "subsidize", + "subsiziding", "subsidizing", + "subsquently", "subsequently", + "subsrcibers", "subscribers", + "substancial", "substantial", + "substansial", "substantial", + "substansive", "substantive", + "substantied", "substantive", + "substanties", "substantive", + "substential", "substantial", + "substitiute", "substitute", + "substituded", "substituted", + "substitudes", "substitutes", + "substituion", "substitution", + "substitures", "substitutes", + "substitutie", "substitutes", + "substitutos", "substitutes", + "substitutue", "substitutes", + "substracted", "subtracted", + "suburburban", "suburban", + "succesfully", "successfully", + "successeurs", "successes", + "successfull", "successful", + "successfuly", "successfully", + "successsion", "succession", + "successully", "successfully", + "succsesfull", "successfully", + "sucessfully", "successfully", + "sucseptible", "susceptible", + "sufficently", "sufficiently", + "suggestieve", "suggestive", + "sumbissions", "submissions", + "sunglassses", "sunglasses", + "superceeded", "superseded", + "superficiel", "superficial", + "superfulous", "superfluous", + "superhereos", "superhero", + "superifical", "superficial", + "superiorest", "superiors", + "supermacist", "supremacist", + "supermakert", "supermarkets", + "supermakret", "supermarkets", + "supermakter", "supermarkets", + "supermarkts", "supermarkets", + "supermaster", "supermarkets", + "supernatual", "supernatural", + "supersition", "supervision", + "superstiton", "superstition", + "supervisers", "supervisors", + "supervisior", "supervisor", + "suplimented", "supplemented", + "supplaments", "supplements", + "supplemetal", "supplemental", + "supporteurs", "supporters", + "supposedely", "supposedly", + "supposidely", "supposedly", + "supposingly", "supposedly", + "suppresions", "suppression", + "suppresssor", "suppressor", + "supramacist", "supremacist", + "supremacits", "supremacist", + "supremasist", "supremacist", + "supremicist", "supremacist", + "suprimacist", "supremacist", + "suprisingly", "surprisingly", + "suprizingly", "surprisingly", + "suroundings", "surroundings", + "surpemacist", "supremacist", + "surprisinly", "surprisingly", + "surreptious", "surreptitious", + "surroundign", "surroundings", + "surroundigs", "surrounds", + "surroundins", "surrounds", + "surroundngs", "surrounds", + "surveilence", "surveillance", + "survivabily", "survivability", + "susbtantial", "substantial", + "susbtantive", "substantive", + "suscepitble", "susceptible", + "susceptable", "susceptible", + "suscpetible", "susceptible", + "susecptible", "susceptible", + "suspectible", "susceptible", + "suspiciosly", "suspiciously", + "suspiciouly", "suspiciously", + "suspiciouns", "suspicion", + "suspicision", "suspicions", + "suspicisons", "suspicions", + "sustainible", "sustainable", + "switerzland", "switzerland", + "switserland", "switzerland", + "switzlerand", "switzerland", + "swizterland", "switzerland", + "swtizerland", "switzerland", + "symapthetic", "sympathetic", + "symmertical", "symmetrical", + "sympathatic", "sympathetic", + "sympathiers", "sympathizers", + "sympathsize", "sympathize", + "sympethetic", "sympathetic", + "symphatetic", "sympathetic", + "symphatized", "sympathize", + "symphatizer", "sympathizers", + "symphatizes", "sympathize", + "sympothetic", "sympathetic", + "synthesasia", "synthesis", + "synthesesia", "synthesis", + "sypmathetic", "sympathetic", + "tabelspoons", "tablespoons", + "tablepsoons", "tablespoons", + "tablespooon", "tablespoon", + "tablesppons", "tablespoons", + "tailgateing", "tailgating", + "tailgatting", "tailgating", + "tangentialy", "tangentially", + "techincally", "technically", + "techincians", "technicians", + "techiniques", "techniques", + "techncially", "technically", + "technicalty", "technicality", + "technichian", "technician", + "technicials", "technicians", + "techniciens", "technicians", + "technitians", "technicians", + "technnology", "technology", + "technologia", "technological", + "techticians", "technicians", + "teleportato", "teleportation", + "teleportion", "teleporting", + "teleproting", "teleporting", + "temeprature", "temperature", + "temparament", "temperament", + "temparature", "temperature", + "temparement", "temperament", + "tempearture", "temperatures", + "temperamant", "temperament", + "temperarily", "temporarily", + "temperatues", "temperatures", + "temperaturs", "temperatures", + "temperatuur", "temperature", + "temperement", "temperament", + "tempermeant", "temperament", + "tempertaure", "temperature", + "temporairly", "temporarily", + "temporaraly", "temporarily", + "temporarity", "temporarily", + "tempreature", "temperature", + "temproarily", "temporarily", + "tempurature", "temperature", + "tepmorarily", "temporarily", + "termanology", "terminology", + "terminacion", "termination", + "terminaison", "termination", + "terminalogy", "terminology", + "terminatior", "terminator", + "terminatorn", "termination", + "terminilogy", "terminology", + "terminoligy", "terminology", + "terratorial", "territorial", + "terratories", "territories", + "terretorial", "territorial", + "terretories", "territories", + "terrirorial", "territorial", + "terrirories", "territories", + "terriroties", "territories", + "terristrial", "territorial", + "territoires", "territories", + "territorist", "terrorist", + "territority", "territory", + "terroristas", "terrorists", + "terroristes", "terrorists", + "terrorities", "territories", + "terrotorial", "territorial", + "terrotories", "territories", + "testiclular", "testicular", + "thankfullly", "thankfully", + "thanksgivng", "thanksgiving", + "theoligical", "theological", + "theoratical", "theoretical", + "theoreticly", "theoretical", + "theoritical", "theoretical", + "therapautic", "therapeutic", + "therapeudic", "therapeutic", + "therapeutuc", "therapeutic", + "therapuetic", "therapeutic", + "theraupetic", "therapeutic", + "thereaputic", "therapeutic", + "thereotical", "theoretical", + "therepeutic", "therapeutic", + "thermometor", "thermometer", + "thermometre", "thermometer", + "thermomiter", "thermometer", + "thermomoter", "thermometer", + "thermoneter", "thermometer", + "thermostaat", "thermostat", + "theroetical", "theoretical", + "thoeretical", "theoretical", + "threataning", "threatening", + "threatended", "threatened", + "threatining", "threatening", + "throttleing", "throttling", + "throughoput", "throughput", + "throughtout", "throughout", + "throughtput", "throughput", + "thudnerbolt", "thunderbolt", + "thunberbolt", "thunderbolt", + "thunderblot", "thunderbolt", + "thunderboat", "thunderbolt", + "thunderbots", "thunderbolt", + "thunderbowl", "thunderbolt", + "thunderjolt", "thunderbolt", + "thundervolt", "thunderbolt", + "tightenting", "tightening", + "tocuhscreen", "touchscreen", + "torrentking", "torrenting", + "torrentting", "torrenting", + "torublesome", "troublesome", + "torunaments", "tournaments", + "totalitaran", "totalitarian", + "totalitarni", "totalitarian", + "touranments", "tournaments", + "tournamnets", "tournaments", + "tournemants", "tournaments", + "tournements", "tournaments", + "tournmanets", "tournaments", + "tradicional", "traditional", + "tradionally", "traditionally", + "tradisional", "traditional", + "traditionel", "traditional", + "traditition", "tradition", + "tragicallly", "tragically", + "tramautized", "traumatized", + "tramuatized", "traumatized", + "trancendent", "transcendent", + "trancending", "transcending", + "tranclucent", "translucent", + "trandgender", "transgender", + "tranditions", "transitions", + "tranistions", "transitions", + "tranlastion", "translations", + "tranlsating", "translating", + "tranlsation", "translation", + "tranluscent", "translucent", + "trannsexual", "transsexual", + "tranpshobic", "transphobic", + "transaccion", "transaction", + "transaciton", "transactions", + "transalting", "translating", + "transaltion", "translation", + "transations", "transitions", + "transcluent", "translucent", + "transcripto", "transcription", + "transctions", "transitions", + "transculent", "translucent", + "transending", "transcending", + "transfender", "transgender", + "transferers", "transfers", + "transfering", "transferring", + "transfersom", "transforms", + "transfomers", "transforms", + "transformas", "transforms", + "transformes", "transformers", + "transformis", "transforms", + "transformus", "transforms", + "transforums", "transforms", + "transfromed", "transformed", + "transfromer", "transformers", + "transgemder", "transgender", + "transgended", "transgendered", + "transgenger", "transgender", + "transgenres", "transgender", + "transhpobic", "transphobic", + "transisions", "transitions", + "transisitor", "transistor", + "transistion", "transition", + "transistior", "transistor", + "transitiond", "transitioned", + "transitiong", "transitioning", + "translatron", "translation", + "translusent", "translucent", + "transmatter", "transmitter", + "transmision", "transmission", + "transmissin", "transmissions", + "transmisson", "transmission", + "transmittor", "transmitter", + "transmorged", "transformed", + "transmutter", "transmitter", + "transofrmed", "transformed", + "transohobic", "transphobic", + "transparant", "transparent", + "transparecy", "transparency", + "transpareny", "transparency", + "transperant", "transparent", + "transperent", "transparent", + "transphonic", "transphobic", + "transphopic", "transphobic", + "transplanet", "transplant", + "transporder", "transporter", + "transporing", "transporting", + "transportar", "transporter", + "transportng", "transporting", + "transportor", "transporter", + "transseuxal", "transsexual", + "transsexaul", "transsexual", + "transsexuel", "transsexual", + "transulcent", "translucent", + "transylvnia", "transylvania", + "tranzformer", "transformer", + "tranzitions", "transitions", + "tranzporter", "transporter", + "trasncripts", "transcripts", + "trasnferred", "transferred", + "trasnformed", "transformed", + "trasnformer", "transformer", + "trasngender", "transgender", + "trasnmitted", "transmitted", + "trasnmitter", "transmitter", + "trasnparent", "transparent", + "trasnphobic", "transphobic", + "trasnported", "transported", + "trasnporter", "transporter", + "traumatisch", "traumatic", + "traumetized", "traumatized", + "traumitized", "traumatized", + "travellerhd", "travelled", + "travellodge", "travelled", + "tremendeous", "tremendous", + "tremendious", "tremendous", + "tremenduous", "tremendous", + "trespessing", "trespassing", + "tresspasing", "trespassing", + "triggereing", "triggering", + "triggerring", "triggering", + "troubelsome", "troublesome", + "truamatized", "traumatized", + "trushworthy", "trustworthy", + "trustowrthy", "trustworthy", + "trustwhorty", "trustworthy", + "trustworhty", "trustworthy", + "truthfullly", "truthfully", + "tupperwears", "tupperware", + "turstworthy", "trustworthy", + "ubiquitious", "ubiquitous", + "ubiquituous", "ubiquitous", + "ukraininans", "ukrainians", + "ultimatelly", "ultimately", + "unanimoulsy", "unanimous", + "unappeasing", "unappealing", + "unappeeling", "unappealing", + "unathorised", "unauthorised", + "unattendend", "unattended", + "unatteneded", "unattended", + "unattracive", "unattractive", + "unauthoried", "unauthorized", + "unavailible", "unavailable", + "unavaliable", "unavailable", + "unaviodable", "unavoidable", + "unbalanaced", "unbalanced", + "unbraikable", "unbreakable", + "unbrakeable", "unbreakable", + "unbreakabie", "unbreakable", + "unbreakabke", "unbreakable", + "unbreakbale", "unbreakable", + "unbreakeble", "unbreakable", + "unbrearable", "unbreakable", + "uncensorred", "uncensored", + "uncertaincy", "uncertainty", + "uncertanity", "uncertainty", + "uncertianty", "uncertainty", + "unchangable", "unchangeable", + "uncompetive", "uncompetitive", + "unconcsious", "unconscious", + "unconsicous", "unconscious", + "uncouncious", "unconscious", + "undeniabely", "undeniably", + "undeniabley", "undeniably", + "undeniablly", "undeniably", + "undenialbly", "undeniably", + "underestime", "underestimate", + "undergating", "undertaking", + "undergorund", "underground", + "underheight", "underweight", + "undermiming", "undermining", + "undermindes", "undermines", + "undernearth", "underneath", + "underneight", "underweight", + "underpining", "undermining", + "underpowerd", "underpowered", + "underpowred", "underpowered", + "underratted", "underrated", + "understannd", "understands", + "understsand", "understands", + "undertacker", "undertaker", + "underwarter", "underwater", + "underwieght", "underweight", + "underwright", "underweight", + "undesireble", "undesirable", + "undesriable", "undesirable", + "undetecable", "undetectable", + "undiserable", "undesirable", + "undoubedtly", "undoubtedly", + "undoubetdly", "undoubtedly", + "undoubtadly", "undoubtedly", + "undoubtebly", "undoubtedly", + "undoubtetly", "undoubtedly", + "undreground", "underground", + "unemployeed", "unemployed", + "unemployent", "unemployment", + "unemploymed", "unemployed", + "unexpectdly", "unexpectedly", + "unexpectely", "unexpectedly", + "unfamilliar", "unfamiliar", + "unfortuante", "unfortunate", + "ungreatfull", "ungrateful", + "unilateraly", "unilaterally", + "unilaterlly", "unilaterally", + "unimportent", "unimportant", + "uninspiried", "uninspired", + "uninstaling", "uninstalling", + "uninstallng", "uninstalling", + "uninteresed", "uninterested", + "uniquesness", "uniqueness", + "unisntalled", "uninstalled", + "universella", "universally", + "universites", "universities", + "univesities", "universities", + "unjustifyed", "unjustified", + "unknowinlgy", "unknowingly", + "unkowningly", "unknowingly", + "unnecassary", "unnecessary", + "unneccesary", "unnecessary", + "unnecessery", "unnecessary", + "unnecissary", "unnecessary", + "unnessecary", "unnecessary", + "unnistalled", "uninstalled", + "unoriginial", "unoriginal", + "unorigional", "unoriginal", + "unoticeable", "unnoticeable", + "unpleaseant", "unpleasant", + "unportected", "unprotected", + "unprepaired", "unprepared", + "unpreparred", "unprepared", + "unproducive", "unproductive", + "unprotexted", "unprotected", + "unqaulified", "unqualified", + "unrealisitc", "unrealistic", + "unrealsitic", "unrealistic", + "unreasonbly", "unreasonably", + "unregluated", "unregulated", + "unregualted", "unregulated", + "unregulared", "unregulated", + "unrepentent", "unrepentant", + "unresponive", "unresponsive", + "unrestriced", "unrestricted", + "unsettleing", "unsettling", + "unsintalled", "uninstalled", + "unsolicated", "unsolicited", + "unsoliticed", "unsolicited", + "unsolocited", "unsolicited", + "unsubscirbe", "unsubscribe", + "unsubscrbed", "unsubscribed", + "unsubscried", "unsubscribed", + "unsubscripe", "unsubscribe", + "unsubscrive", "unsubscribe", + "unsubscrube", "unsubscribe", + "unsubsrcibe", "unsubscribe", + "unsuccesful", "unsuccessful", + "unsuccessul", "unsuccessful", + "unsucesfuly", "unsuccessfully", + "unsucessful", "unsuccessful", + "unsunscribe", "unsubscribe", + "unsuprising", "unsurprising", + "unsuprizing", "unsurprising", + "unsurprized", "unsurprised", + "unsusbcribe", "unsubscribe", + "unviersally", "universally", + "unwarrented", "unwarranted", + "utiliatrian", "utilitarian", + "utilitatian", "utilitarian", + "utiliterian", "utilitarian", + "utilizacion", "utilization", + "utilizaiton", "utilization", + "utilizating", "utilization", + "utiltiarian", "utilitarian", + "vacciantion", "vaccination", + "vaccinaties", "vaccinate", + "vegaterians", "vegetarians", + "vegetariens", "vegetarians", + "vegetatians", "vegetarians", + "vegeterians", "vegetarians", + "vehementely", "vehemently", + "venezuelean", "venezuela", + "venezuelian", "venezuela", + "ventalation", "ventilation", + "ventelation", "ventilation", + "ventialtion", "ventilation", + "ventilacion", "ventilation", + "verastility", "versatility", + "verfication", "verification", + "versatality", "versatility", + "versitality", "versatility", + "versitilaty", "versatility", + "victorieuse", "victories", + "victoriuous", "victorious", + "vietnameese", "vietnamese", + "vietnamesse", "vietnamese", + "vietnamiese", "vietnamese", + "vietnamnese", "vietnamese", + "vigilanties", "vigilante", + "visibillity", "visibility", + "vocabularly", "vocabulary", + "volatillity", "volatility", + "volonteered", "volunteered", + "volounteers", "volunteers", + "volunatrily", "voluntarily", + "voluntairly", "voluntarily", + "volunteeers", "volunteers", + "volunteraly", "voluntarily", + "voluntereed", "volunteered", + "volunterily", "voluntarily", + "vulnerabile", "vulnerable", + "wallpapaers", "wallpapers", + "wallpappers", "wallpapers", + "washingtion", "washington", + "watermeleon", "watermelon", + "waterprooof", "waterproof", + "wavelegnths", "wavelength", + "wavelenghth", "wavelength", + "wavelenghts", "wavelength", + "weaknessses", "weaknesses", + "wellingston", "wellington", + "wellingtion", "wellington", + "westernerns", "westerners", + "westmisnter", "westminster", + "westmnister", "westminster", + "westmonster", "westminster", + "whisperered", "whispered", + "whitholding", "withholding", + "wikileakers", "wikileaks", + "willingless", "willingness", + "wincheseter", "winchester", + "windsheilds", "windshield", + "withdrawels", "withdrawals", + "withdrawles", "withdrawals", + "withhelding", "withholding", + "withrdawing", "withdrawing", + "witnesssing", "witnessing", + "woodowrking", "woodworking", + "woodworkign", "woodworking", + "worhsipping", "worshipping", + "workstaiton", "workstation", + "workststion", "workstation", + "worshopping", "worshipping", + "xenophoblic", "xenophobic", + "abandining", "abandoning", + "abandonned", "abandoned", + "abbreviato", "abbreviation", + "abnoramlly", "abnormally", + "abnormalty", "abnormally", + "abnornally", "abnormally", + "abominaton", "abomination", + "abondoning", "abandoning", + "aborginial", "aboriginal", + "aboriganal", "aboriginal", + "aborigenal", "aboriginal", + "aborignial", "aboriginal", + "aborigonal", "aboriginal", + "aboroginal", "aboriginal", + "aboslutely", "absolutely", + "abosrption", "absorption", + "abreviated", "abbreviated", + "absintence", "abstinence", + "absitnence", "abstinence", + "absolument", "absolute", + "absolutley", "absolutely", + "absoprtion", "absorption", + "absorbsion", "absorption", + "absorbtion", "absorption", + "absorpsion", "absorption", + "absoultely", "absolutely", + "abstanence", "abstinence", + "abstenance", "abstinence", + "abstenince", "abstinence", + "abstinense", "abstinence", + "abstinince", "abstinence", + "absurditiy", "absurdity", + "abundacies", "abundances", + "academicas", "academics", + "academicos", "academics", + "academicus", "academics", + "accdiently", "accidently", + "accelarate", "accelerate", + "accelerade", "accelerated", + "accelerare", "accelerate", + "accelerato", "acceleration", + "acceleread", "accelerated", + "accelertor", "accelerator", + "accelorate", "accelerate", + "acceptabel", "acceptable", + "acceptabil", "acceptable", + "acceptence", "acceptance", + "accepterad", "accepted", + "acceptible", "acceptable", + "accerelate", "accelerated", + "accesories", "accessories", + "accessable", "accessible", + "accessbile", "accessible", + "accessoire", "accessories", + "accessoirs", "accessories", + "accicently", "accidently", + "accidantly", "accidently", + "accidebtly", "accidently", + "accidenlty", "accidently", + "accidentes", "accidents", + "accidentky", "accidently", + "accidently", "accidentally", + "accidnetly", "accidently", + "accomadate", "accommodate", + "accomodate", "accommodate", + "accompined", "accompanied", + "accomplise", "accomplishes", + "accompliss", "accomplishes", + "accostumed", "accustomed", + "accountent", "accountant", + "accpetable", "acceptable", + "accpetance", "acceptance", + "accuastion", "accusation", + "acculumate", "accumulate", + "accumalate", "accumulate", + "accumelate", "accumulate", + "accumilate", "accumulate", + "accumulare", "accumulate", + "accumulato", "accumulation", + "accumulted", "accumulated", + "accuratley", "accurately", + "accusating", "accusation", + "accusition", "accusation", + "accustumed", "accustomed", + "acheivable", "achievable", + "acheivment", "achievement", + "acheviable", "achievable", + "achiavable", "achievable", + "achieveble", "achievable", + "achievemnt", "achievement", + "achievemts", "achieves", + "achievents", "achieves", + "achievment", "achievement", + "achilleous", "achilles", + "achiveable", "achievable", + "achivement", "achievement", + "acitvating", "activating", + "acitvision", "activision", + "acknowldge", "acknowledge", + "acknowlede", "acknowledge", + "acknowlege", "acknowledge", + "acommodate", "accommodate", + "acopalypse", "apocalypse", + "acordingly", "accordingly", + "acqauinted", "acquainted", + "acquanited", "acquainted", + "acquianted", "acquainted", + "acquinated", "acquainted", + "acquisiton", "acquisition", + "acticating", "activating", + "actication", "activation", + "activacion", "activation", + "activaters", "activates", + "activiates", "activist", + "activiites", "activist", + "activisiom", "activism", + "activisits", "activist", + "activistas", "activists", + "activistes", "activists", + "activiting", "activating", + "activizion", "activision", + "acustommed", "accustomed", + "adaptacion", "adaptation", + "adaptating", "adaptation", + "adaquetely", "adequately", + "addicitons", "addictions", + "addionally", "additionally", + "additivies", "additive", + "additivley", "additive", + "addittions", "addictions", + "addmission", "admission", + "addresable", "addressable", + "addressess", "addresses", + "adequatley", "adequately", + "adequetely", "adequately", + "adequitely", "adequately", + "adernaline", "adrenaline", + "adjectivos", "adjectives", + "adjustible", "adjustable", + "admendment", "amendment", + "administed", "administered", + "administor", "administer", + "administre", "administer", + "administro", "administer", + "adminsiter", "administer", + "admissable", "admissible", + "admittadly", "admittedly", + "admittetly", "admittedly", + "admittidly", "admittedly", + "adolencent", "adolescent", + "adolescant", "adolescent", + "adolescene", "adolescence", + "adoloscent", "adolescent", + "adolsecent", "adolescent", + "adpatation", "adaptation", + "adreanline", "adrenaline", + "adrelanine", "adrenaline", + "adreneline", "adrenaline", + "adreniline", "adrenaline", + "adressable", "addressable", + "advanteges", "advantages", + "advatanges", "advantages", + "adventrous", "adventurous", + "adventrues", "adventures", + "adventuers", "adventures", + "adventuous", "adventurous", + "adventuros", "adventurous", + "adventurus", "adventurous", + "adverticed", "advertised", + "aestethics", "aesthetics", + "aesthatics", "aesthetics", + "aesthestic", "aesthetics", + "affiliaton", "affiliation", + "affilliate", "affiliate", + "affirmitve", "affirmative", + "afflcition", "affliction", + "afflection", "affliction", + "affliation", "affliction", + "affliciton", "affliction", + "afforadble", "affordable", + "affordible", "affordable", + "affortable", "affordable", + "africaners", "africans", + "africaness", "africans", + "aftermaket", "aftermarket", + "afternooon", "afternoon", + "aggravanti", "aggravating", + "aggraveted", "aggravated", + "aggreement", "agreement", + "aggregious", "egregious", + "aggresions", "aggression", + "aggressivo", "aggression", + "aggrovated", "aggravated", + "agnosticim", "agnosticism", + "agnosticsm", "agnosticism", + "agnostisch", "agnostic", + "agnostiscm", "agnosticism", + "agnostisim", "agnosticism", + "agreeement", "agreement", + "agricultre", "agriculture", + "agricultue", "agriculture", + "agriculure", "agriculture", + "agricuture", "agriculture", + "ailenating", "alienating", + "ajdectives", "adjectives", + "alchoholic", "alcoholic", + "alchoolism", "alcoholism", + "alcohalics", "alcoholics", + "alcohalism", "alcoholism", + "alcoholsim", "alcoholism", + "aleinating", "alienating", + "algorhitms", "algorithms", + "algorithem", "algorithm", + "algorithim", "algorithm", + "algorithsm", "algorithms", + "algorithum", "algorithm", + "algorithym", "algorithm", + "algoritmes", "algorithms", + "algoritmos", "algorithms", + "algorthims", "algorithms", + "algortihms", "algorithms", + "algorythms", "algorithms", + "alievating", "alienating", + "alledgedly", "allegedly", + "allegeance", "allegiance", + "allegedely", "allegedly", + "allegedley", "allegedly", + "allegience", "allegiance", + "alleigance", "allegiance", + "allergisch", "allergic", + "alliegance", "allegiance", + "alligeance", "allegiance", + "alocholics", "alcoholics", + "alocholism", "alcoholism", + "alogrithms", "algorithms", + "alphabeast", "alphabet", + "alteracion", "alteration", + "alterarion", "alteration", + "alterating", "alteration", + "alternador", "alternator", + "alternater", "alternator", + "alternatie", "alternatives", + "alternatly", "alternately", + "alternatve", "alternate", + "alternetly", "alternately", + "altogehter", "altogether", + "altogheter", "altogether", + "altriustic", "altruistic", + "altruisitc", "altruistic", + "altrusitic", "altruistic", + "alturistic", "altruistic", + "aluminimum", "aluminum", + "amargeddon", "armageddon", + "amateurest", "amateurs", + "ambassabor", "ambassador", + "ambassader", "ambassador", + "ambassator", "ambassador", + "ambassedor", "ambassador", + "ambassidor", "ambassador", + "ambassodor", "ambassador", + "ambiguitiy", "ambiguity", + "amendmants", "amendments", + "amendmends", "amendments", + "americains", "americas", + "americanas", "americans", + "americanis", "americas", + "americanss", "americas", + "americants", "americas", + "americanus", "americans", + "americares", "americas", + "ammendment", "amendment", + "amrageddon", "armageddon", + "analitical", "analytical", + "analitycal", "analytical", + "analogeous", "analogous", + "analyitcal", "analytical", + "analyseles", "analyses", + "analyseras", "analyses", + "analyseres", "analyses", + "analysised", "analyses", + "analysises", "analyses", + "analysisto", "analysts", + "analystics", "analysts", + "anarchisim", "anarchism", + "anarchistm", "anarchism", + "anarchiszm", "anarchism", + "anarchsits", "anarchists", + "anayltical", "analytical", + "ancilliary", "ancillary", + "androiders", "androids", + "androidtvs", "androids", + "anecdotale", "anecdote", + "anecdotice", "anecdote", + "anestheisa", "anesthesia", + "anesthetia", "anesthesia", + "anesthisia", "anesthesia", + "anitbiotic", "antibiotic", + "anitquated", "antiquated", + "anitsocial", "antisocial", + "aniversary", "anniversary", + "annilihate", "annihilated", + "anniverary", "anniversary", + "anniversay", "anniversary", + "anniversry", "anniversary", + "annointing", "anointing", + "annonceurs", "announcers", + "annoucners", "announcers", + "annoucning", "announcing", + "announched", "announce", + "annyoingly", "annoyingly", + "anonymosly", "anonymously", + "anonymousy", "anonymously", + "antaganist", "antagonist", + "antagnoist", "antagonist", + "antarcitca", "antarctica", + "antarctida", "antarctica", + "anthropoly", "anthropology", + "antibiodic", "antibiotic", + "antibiotcs", "antibiotics", + "antibitoic", "antibiotic", + "antiboitic", "antibiotics", + "anticapate", "anticipate", + "anticiapte", "anticipate", + "anticipare", "anticipate", + "anticipato", "anticipation", + "anticuated", "antiquated", + "antiquited", "antiquated", + "antiqvated", "antiquated", + "antisipate", "anticipate", + "antisocail", "antisocial", + "antisosial", "antisocial", + "antoganist", "antagonist", + "antractica", "antarctica", + "apacolypse", "apocalypse", + "apartheied", "apartheid", + "aplication", "application", + "apocalipse", "apocalypse", + "apocalpyse", "apocalypse", + "apocalypes", "apocalypse", + "apocalypic", "apocalyptic", + "apocalyspe", "apocalypse", + "apocalytic", "apocalyptic", + "apocaplyse", "apocalypse", + "apocolapse", "apocalypse", + "apolagetic", "apologetic", + "apolagized", "apologized", + "apolegetic", "apologetic", + "apoligetic", "apologetic", + "apoligists", "apologists", + "apoligized", "apologized", + "apologisms", "apologists", + "apologiste", "apologise", + "apologitic", "apologetic", + "apostraphe", "apostrophe", + "apostrephe", "apostrophe", + "apostrohpe", "apostrophe", + "apostropes", "apostrophe", + "apparantly", "apparently", + "appareance", "appearance", + "apparenlty", "apparently", + "appartment", "apartment", + "appealling", "appealing", + "appearence", "appearance", + "appearnace", "appearances", + "apperances", "appearances", + "apperantly", "apparently", + "apperciate", "appreciate", + "appereance", "appearance", + "appetities", "appetite", + "appetitite", "appetite", + "appication", "application", + "applainces", "appliances", + "applicaple", "applicable", + "applicates", "applicants", + "applicaton", "application", + "applicible", "applicable", + "appliences", "appliances", + "appointmet", "appointments", + "appologies", "apologies", + "apporached", "approached", + "apporaches", "approaches", + "appraoched", "approached", + "appraoches", "approaches", + "apprecaite", "appreciate", + "appreciato", "appreciation", + "appreciste", "appreciates", + "apprecitae", "appreciates", + "apprecited", "appreciated", + "apprectice", "apprentice", + "appreicate", "appreciate", + "apprendice", "apprentice", + "apprentace", "apprentice", + "apprentise", "apprentice", + "appretiate", "appreciate", + "appretince", "apprentice", + "appriceate", "appreciates", + "appriciate", "appreciate", + "appriecate", "appreciates", + "approacing", "approaching", + "appropiate", "appropriate", + "approprate", "appropriate", + "apropriate", "appropriate", + "aproximate", "approximate", + "apsotrophe", "apostrophe", + "aptitudine", "aptitude", + "aqcuainted", "acquainted", + "aquisition", "acquisition", + "aramgeddon", "armageddon", + "arangement", "arrangement", + "arbitarily", "arbitrarily", + "arbitraily", "arbitrarily", + "arbitraion", "arbitration", + "arbitrairy", "arbitrarily", + "arbitrarly", "arbitrary", + "arbitraton", "arbitration", + "arcehtypes", "archetypes", + "archaelogy", "archaeology", + "archaeolgy", "archaeology", + "archaology", "archeology", + "archatypes", "archetypes", + "archetects", "architects", + "archetipes", "archetypes", + "archetpyes", "archetypes", + "archetypus", "archetypes", + "archeytpes", "archetypes", + "archictect", "architect", + "architechs", "architects", + "architecht", "architect", + "architecte", "architecture", + "architexts", "architects", + "architypes", "archetypes", + "archtiects", "architects", + "archytypes", "archetypes", + "argentinia", "argentina", + "arguements", "arguments", + "argumentas", "arguments", + "argumentos", "arguments", + "arithemtic", "arithmetic", + "arithmitic", "arithmetic", + "aritmethic", "arithmetic", + "armagaddon", "armageddon", + "armageddan", "armageddon", + "armagedden", "armageddon", + "armageddin", "armageddon", + "armagedeon", "armageddon", + "armageedon", "armageddon", + "armagideon", "armageddon", + "armegaddon", "armageddon", + "arrangerad", "arranged", + "arrangment", "arrangement", + "arthimetic", "arithmetic", + "articifial", "artificial", + "articluate", "articulate", + "articualte", "articulate", + "articulted", "articulated", + "artifactos", "artifacts", + "artificiel", "artificial", + "artihmetic", "arithmetic", + "artillerly", "artillery", + "asbestoast", "asbestos", + "asethetics", "aesthetics", + "asisstants", "assistants", + "aspiratons", "aspirations", + "assasinate", "assassinate", + "assassians", "assassin", + "assassinas", "assassins", + "assassines", "assassins", + "assassinos", "assassins", + "assemblare", "assemble", + "assempling", "assembling", + "assersions", "assertions", + "assesement", "assessment", + "assestment", "assessment", + "assigments", "assignments", + "assignemnt", "assignment", + "assimalate", "assimilate", + "assimilant", "assimilate", + "assimilare", "assimilate", + "assimliate", "assimilate", + "assimulate", "assimilate", + "assingment", "assignment", + "assistanat", "assistants", + "assistanse", "assistants", + "assistante", "assistance", + "assistence", "assistance", + "assistendo", "assisted", + "assistents", "assistants", + "assmebling", "assembling", + "assocaited", "associated", + "assocaites", "associates", + "assocation", "association", + "associatie", "associated", + "associatin", "associations", + "associaton", "association", + "associsted", "associates", + "assoicated", "associated", + "assoicates", "associates", + "assosiated", "associated", + "assosiates", "associates", + "asssassans", "assassins", + "assupmtion", "assumptions", + "assymetric", "asymmetric", + "asteroides", "asteroids", + "asthetical", "aesthetical", + "astonising", "astonishing", + "astornauts", "astronauts", + "astranauts", "astronauts", + "astronatus", "astronauts", + "astronaunt", "astronaut", + "astronomia", "astronomical", + "astronouts", "astronauts", + "astronuats", "astronauts", + "asutralian", "australian", + "atatchment", "attachment", + "athleticos", "athletics", + "athleticsm", "athleticism", + "athletiscm", "athleticism", + "athletisim", "athleticism", + "atmopshere", "atmosphere", + "atmoshpere", "atmosphere", + "atomsphere", "atmosphere", + "atriculate", "articulate", + "atrocoties", "atrocities", + "atrosities", "atrocities", + "attachemnt", "attachment", + "attackeras", "attackers", + "attactment", "attachment", + "attemtping", "attempting", + "attendence", "attendance", + "attendents", "attendants", + "attirbutes", "attributes", + "attmepting", "attempting", + "attracters", "attracts", + "attractice", "attractive", + "attracties", "attracts", + "attractifs", "attracts", + "attraktion", "attraction", + "attraktive", "attractive", + "attribuito", "attribution", + "attritubes", "attributes", + "auctioners", "auctions", + "audioboook", "audiobook", + "audioboost", "audiobooks", + "auidobooks", "audiobooks", + "auotattack", "autoattack", + "austrailan", "australian", + "austrailia", "australia", + "australain", "australians", + "australien", "australian", + "australina", "australians", + "austrlaian", "australians", + "authenticy", "authenticity", + "autherized", "authorized", + "authoritay", "authority", + "authorites", "authorities", + "authorithy", "authority", + "authroized", "authorized", + "autistisch", "autistic", + "autoattaks", "autoattack", + "autocorect", "autocorrect", + "autocorrct", "autocorrect", + "autocorret", "autocorrect", + "autograpgh", "autograph", + "automatice", "automate", + "automatico", "automation", + "automatied", "automate", + "automatiek", "automate", + "automatron", "automation", + "automatted", "automate", + "automibile", "automobile", + "automitive", "automotive", + "automoblie", "automobile", + "automomous", "autonomous", + "automonous", "autonomous", + "automotice", "automotive", + "automotion", "automation", + "automotize", "automotive", + "automotove", "automotive", + "autonamous", "autonomous", + "autonation", "automation", + "autonimous", "autonomous", + "autonomity", "autonomy", + "autononous", "autonomous", + "auttoatack", "autoattack", + "auxilliary", "auxiliary", + "availabale", "available", + "availaible", "available", + "availiable", "available", + "averageadi", "averaged", + "averageifs", "averages", + "awesomeley", "awesomely", + "awesomelly", "awesomely", + "awesomenss", "awesomeness", + "awkwardess", "awkwardness", + "babysister", "babysitter", + "babysiting", "babysitting", + "babysittng", "babysitting", + "bachelores", "bachelors", + "backgorund", "background", + "backgroudn", "backgrounds", + "backgrouds", "backgrounds", + "backgrouns", "backgrounds", + "backgruond", "backgrounds", + "backpacing", "backpacking", + "backpackng", "backpacking", + "backrgound", "backgrounds", + "backrounds", "backgrounds", + "baksetball", "basketball", + "balanceada", "balanced", + "balanceado", "balanced", + "balckberry", "blackberry", + "balckhawks", "blackhawks", + "balcksmith", "blacksmith", + "bandwagoon", "bandwagon", + "bangaldesh", "bangladesh", + "bangladash", "bangladesh", + "bangledash", "bangladesh", + "bangledesh", "bangladesh", + "banglidesh", "bangladesh", + "bankrupcty", "bankruptcy", + "bankruptsy", "bankruptcy", + "bankrutpcy", "bankruptcy", + "barabrians", "barbarians", + "barbariens", "barbarians", + "barbarions", "barbarians", + "barbarisch", "barbaric", + "barberians", "barbarians", + "bargianing", "bargaining", + "bartendars", "bartenders", + "basektball", "basketball", + "baskteball", "basketball", + "bastardous", "bastards", + "battelship", "battleship", + "battelstar", "battlestar", + "battlearts", "battlestar", + "battlechip", "battleship", + "battlefied", "battlefield", + "battlefont", "battlefront", + "battlehips", "battleship", + "battlesaur", "battlestar", + "battlescar", "battlestar", + "battleshop", "battleship", + "battlestsr", "battlestar", + "beahviours", "behaviours", + "beautifuly", "beautifully", + "beautilful", "beautifully", + "beautyfull", "beautiful", + "becnhmarks", "benchmarks", + "beethoveen", "beethoven", + "begginings", "beginnings", + "begininngs", "beginnings", + "beginninng", "beginnings", + "behaivours", "behaviours", + "behaviorly", "behavioral", + "behavoiral", "behavioral", + "behavoiurs", "behaviours", + "behavorial", "behavioral", + "behavoural", "behavioral", + "behvaiours", "behaviours", + "beleagured", "beleaguered", + "beleivable", "believable", + "beliavable", "believable", + "beliebable", "believable", + "believeble", "believable", + "beliveable", "believable", + "benchamrks", "benchmarks", + "benchmakrs", "benchmarks", + "benckmarks", "benchmarks", + "benefecial", "beneficial", + "beneficary", "beneficiary", + "beneficiul", "beneficial", + "benefitial", "beneficial", + "beneifical", "beneficial", + "benelovent", "benevolent", + "benevalent", "benevolent", + "benevelant", "benevolent", + "benevelent", "benevolent", + "benevelont", "benevolent", + "benevloent", "benevolent", + "benevolant", "benevolent", + "benificial", "beneficial", + "benovelent", "benevolent", + "bernouilli", "bernoulli", + "besitality", "bestiality", + "bestaility", "bestiality", + "besteality", "bestiality", + "betrayeado", "betrayed", + "bilateraly", "bilaterally", + "billborads", "billboards", + "bioligical", "biological", + "biologiset", "biologist", + "biologiskt", "biologist", + "birghtness", "brightness", + "birmignham", "birmingham", + "birmimgham", "birmingham", + "bisexuella", "bisexual", + "bitterseet", "bittersweet", + "bitterswet", "bittersweet", + "blackahwks", "blackhawks", + "blackbarry", "blackberry", + "blackbeary", "blackberry", + "blackbeery", "blackberry", + "blackcawks", "blackhawks", + "blackhakws", "blackhawks", + "blackhwaks", "blackhawks", + "blackmsith", "blacksmith", + "blackshits", "blacksmith", + "blasphemey", "blasphemy", + "blitzkreig", "blitzkrieg", + "blochchain", "blockchain", + "blockcahin", "blockchain", + "blockchian", "blockchain", + "bloodboner", "bloodborne", + "bloodbonre", "bloodborne", + "bloodborbe", "bloodborne", + "bloodbrone", "bloodborne", + "bloodporne", "bloodborne", + "bloorborne", "bloodborne", + "blueberies", "blueberries", + "blueberris", "blueberries", + "blueberrry", "blueberry", + "bluebrints", "blueprints", + "boardcasts", "broadcasts", + "bodyheight", "bodyweight", + "bodyweigth", "bodyweight", + "bodywieght", "bodyweight", + "bombarment", "bombardment", + "bookmakred", "bookmarked", + "bootlaoder", "bootloader", + "bootleader", "bootloader", + "boradcasts", "broadcasts", + "borderlads", "borderlands", + "borderlans", "borderlands", + "bottelneck", "bottleneck", + "bottlebeck", "bottleneck", + "boundaires", "boundaries", + "bounderies", "boundaries", + "bourgeoius", "bourgeois", + "boycutting", "boycotting", + "boyfirends", "boyfriends", + "boyfreinds", "boyfriends", + "boyfrients", "boyfriends", + "braceletes", "bracelets", + "braceletts", "bracelets", + "brainwased", "brainwashed", + "brakedowns", "breakdowns", + "braodcasts", "broadcasts", + "brasillian", "brazilian", + "bratenders", "bartenders", + "brazilains", "brazilians", + "brazileans", "brazilians", + "braziliaan", "brazilians", + "brazilions", "brazilians", + "brazillans", "brazilians", + "brightoner", "brighten", + "brigthness", "brightness", + "brillaince", "brilliance", + "brilliante", "brilliance", + "brillianty", "brilliantly", + "brimestone", "brimstone", + "brimingham", "birmingham", + "broacasted", "broadcast", + "brotherhod", "brotherhood", + "brotherood", "brotherhood", + "brusselers", "brussels", + "brutallity", "brutally", + "buisnesses", "businesses", + "bulgariska", "bulgaria", + "bulletpoof", "bulletproof", + "bulletprof", "bulletproof", + "bureaucats", "bureaucrats", + "businesman", "businessman", + "businesmen", "businessmen", + "businessen", "businessmen", + "butterfies", "butterflies", + "cabinettas", "cabinets", + "caclulated", "calculated", + "caclulator", "calculator", + "cahracters", "characters", + "calcluator", "calculators", + "calcualted", "calculated", + "calcualtor", "calculator", + "calculador", "calculator", + "calcularon", "calculator", + "calculater", "calculator", + "calculatin", "calculations", + "calibratin", "calibration", + "calibraton", "calibration", + "califnoria", "californian", + "califonria", "californian", + "califorian", "californian", + "califorina", "california", + "californai", "californian", + "califronia", "california", + "caligraphy", "calligraphy", + "caliofrnia", "californian", + "calrifying", "clarifying", + "calssified", "classified", + "caluclated", "calculated", + "caluclator", "calculator", + "caluculate", "calculate", + "cambodican", "cambodia", + "camofluage", "camouflage", + "camoufalge", "camouflage", + "camouglage", "camouflage", + "campaiging", "campaigning", + "campaignes", "campaigns", + "cancellato", "cancellation", + "candidatas", "candidates", + "candidatxs", "candidates", + "candidiate", "candidate", + "canditates", "candidates", + "cannibalsm", "cannibalism", + "cannisters", "canisters", + "cannonical", "canonical", + "capabality", "capability", + "capabiltiy", "capability", + "capacators", "capacitors", + "capaciters", "capacitors", + "capactiors", "capacitors", + "capasitors", "capacitors", + "capatilism", "capitalism", + "capatilist", "capitalist", + "capatilize", "capitalize", + "capialized", "capitalized", + "capicators", "capacitors", + "capitalisn", "capitals", + "capitalits", "capitalists", + "capitalsim", "capitalism", + "capitalsit", "capitalists", + "capitarist", "capitalist", + "capitilism", "capitalism", + "capitilist", "capitalist", + "capitilize", "capitalize", + "capitlaism", "capitalism", + "capitlaist", "capitalist", + "capitlaize", "capitalized", + "capitolism", "capitalism", + "capitolist", "capitalist", + "capitolize", "capitalize", + "captainers", "captains", + "captialism", "capitalism", + "captialist", "capitalist", + "captialize", "capitalize", + "captivitiy", "captivity", + "caraciture", "caricature", + "carciature", "caricature", + "cardinales", "cardinals", + "cardinalis", "cardinals", + "carefullly", "carefully", + "cariacture", "caricature", + "caricatore", "caricature", + "cariciture", "caricature", + "caricuture", "caricature", + "carismatic", "charismatic", + "carribbean", "caribbean", + "cartdridge", "cartridge", + "cartdriges", "cartridges", + "carthagian", "carthaginian", + "cartilidge", "cartilage", + "cartirdges", "cartridges", + "cartrdiges", "cartridges", + "cartriages", "cartridges", + "cartrigdes", "cartridges", + "casaulties", "casualties", + "cassowarry", "cassowary", + "casualites", "casualties", + "casualries", "casualties", + "casulaties", "casualties", + "cataclysim", "cataclysm", + "cataclysym", "cataclysm", + "catagories", "categories", + "catapillar", "caterpillar", + "catapiller", "caterpillar", + "catastrope", "catastrophe", + "catastrphe", "catastrophe", + "categorice", "categorize", + "categoried", "categorized", + "categoriei", "categorize", + "cateogrize", "categorized", + "catepillar", "caterpillar", + "caterpilar", "caterpillar", + "catholicsm", "catholicism", + "catholicus", "catholics", + "catholisim", "catholicism", + "cativating", "activating", + "cattleship", "battleship", + "causalties", "casualties", + "cautionsly", "cautiously", + "celebratin", "celebration", + "celebrites", "celebrities", + "celebritiy", "celebrity", + "cellpading", "cellpadding", + "cellulaire", "cellular", + "cemetaries", "cemeteries", + "censorhsip", "censorship", + "censurship", "censorship", + "centipedle", "centipede", + "ceremonias", "ceremonies", + "ceremoniis", "ceremonies", + "ceremonije", "ceremonies", + "cerimonial", "ceremonial", + "cerimonies", "ceremonies", + "certainity", "certainty", + "certainlyt", "certainty", + "chairtable", "charitable", + "chalenging", "challenging", + "challanged", "challenged", + "challanges", "challenges", + "challegner", "challenger", + "challender", "challenger", + "challengue", "challenger", + "challengur", "challenger", + "challening", "challenging", + "challneger", "challenger", + "chanceller", "chancellor", + "chancillor", "chancellor", + "chansellor", "chancellor", + "charachter", "character", + "charactere", "characterize", + "characterz", "characterize", + "charactors", "characters", + "charakters", "characters", + "charatable", "charitable", + "charecters", "characters", + "charistics", "characteristics", + "charitible", "charitable", + "chartiable", "charitable", + "chechpoint", "checkpoint", + "checkpiont", "checkpoint", + "checkpoins", "checkpoints", + "checkponts", "checkpoints", + "cheesecase", "cheesecake", + "cheesecave", "cheesecake", + "cheeseface", "cheesecake", + "cheezecake", "cheesecake", + "chemcially", "chemically", + "chidlbirth", "childbirth", + "chihuahuha", "chihuahua", + "childbrith", "childbirth", + "childrends", "childrens", + "childrenis", "childrens", + "childrents", "childrens", + "chirstians", "christians", + "chocalates", "chocolates", + "chocloates", "chocolates", + "chocoaltes", "chocolates", + "chocolatie", "chocolates", + "chocolatos", "chocolates", + "chocolatte", "chocolates", + "chocolotes", "chocolates", + "cholestrol", "cholesterol", + "chormosome", "chromosome", + "chornicles", "chronicles", + "chrisitans", "christians", + "christains", "christians", + "christiaan", "christian", + "christimas", "christians", + "christinas", "christians", + "christines", "christians", + "christmans", "christians", + "chromasome", "chromosome", + "chromesome", "chromosome", + "chromisome", "chromosome", + "chromosmes", "chromosomes", + "chromosoms", "chromosomes", + "chromosone", "chromosome", + "chromosoom", "chromosome", + "chromozome", "chromosome", + "chronciles", "chronicles", + "chronicals", "chronicles", + "chronicels", "chronicles", + "chronocles", "chronicles", + "chronosome", "chromosome", + "chrsitians", "christians", + "cigarattes", "cigarettes", + "cigerattes", "cigarettes", + "cincinatti", "cincinnati", + "cinncinati", "cincinnati", + "circulaire", "circular", + "circulaton", "circulation", + "circumsice", "circumcised", + "circumsied", "circumcised", + "circumwent", "circumvent", + "circunvent", "circumvent", + "cirruculum", "curriculum", + "claculator", "calculator", + "clairfying", "clarifying", + "clasically", "classically", + "classicals", "classics", + "classrooom", "classroom", + "cleanliess", "cleanliness", + "cleareance", "clearance", + "cleverleys", "cleverly", + "cliffhager", "cliffhanger", + "climateers", "climates", + "climatiser", "climates", + "clincially", "clinically", + "clitoridis", "clitoris", + "clitorious", "clitoris", + "co-incided", "coincided", + "cockroachs", "cockroaches", + "cockroahes", "cockroaches", + "coefficent", "coefficient", + "cognatious", "contagious", + "cognitivie", "cognitive", + "coincidnce", "coincide", + "colelctive", "collective", + "colelctors", "collectors", + "collapsers", "collapses", + "collaquial", "colloquial", + "collasping", "collapsing", + "collataral", "collateral", + "collaterol", "collateral", + "collatoral", "collateral", + "collcetion", "collections", + "colleauges", "colleagues", + "colleciton", "collection", + "collectems", "collects", + "collectief", "collective", + "collecties", "collects", + "collectifs", "collects", + "collectivo", "collection", + "collectoin", "collections", + "collectons", "collections", + "collectros", "collects", + "collegaues", "colleagues", + "collequial", "colloquial", + "colleteral", "collateral", + "colliquial", "colloquial", + "collission", "collisions", + "collitions", "collisions", + "colloqiual", "colloquial", + "colloquail", "colloquial", + "colloqueal", "colloquial", + "collpasing", "collapsing", + "colonialsm", "colonialism", + "colorblend", "colorblind", + "coloublind", "colorblind", + "columbidae", "columbia", + "comapnions", "companions", + "comaprable", "comparable", + "comaprison", "comparison", + "comaptible", "compatible", + "combatabts", "combatants", + "combatents", "combatants", + "combinatin", "combinations", + "combinaton", "combination", + "comediants", "comedians", + "comepndium", "compendium", + "comferting", "comforting", + "comforming", "comforting", + "comfortbly", "comfortably", + "comisioned", "commissioned", + "comisioner", "commissioner", + "comissions", "commissions", + "commandbox", "commando", + "commandent", "commandment", + "commandeur", "commanders", + "commandore", "commanders", + "commandpod", "commando", + "commanists", "communists", + "commemters", "commenters", + "commencera", "commerce", + "commenciez", "commence", + "commentaar", "commentary", + "commentare", "commenter", + "commentars", "commenters", + "commentart", "commentator", + "commentery", "commentary", + "commentsry", "commenters", + "commercail", "commercials", + "commercent", "commence", + "commerical", "commercial", + "comminists", "communists", + "commisison", "commissions", + "commissons", "commissions", + "commiteted", "commited", + "commodites", "commodities", + "commtiment", "commitments", + "communicae", "communicated", + "communisim", "communism", + "communiste", "communities", + "communites", "communities", + "communters", "commenters", + "compadible", "compatible", + "compagnons", "companions", + "compainons", "companions", + "compairson", "comparison", + "compalined", "complained", + "compandium", "compendium", + "companians", "companions", + "companines", "companions", + "compansate", "compensate", + "comparabil", "comparable", + "comparason", "comparison", + "comparaste", "compares", + "comparatie", "comparative", + "compareble", "comparable", + "comparemos", "compares", + "comparions", "comparison", + "compariosn", "comparisons", + "comparisen", "compares", + "comparitve", "comparative", + "comparsion", "comparison", + "compartent", "compartment", + "compartmet", "compartment", + "compatibel", "compatible", + "compatibil", "compatible", + "compeating", "completing", + "compeditor", "competitor", + "compednium", "compendium", + "compeeting", "completing", + "compeltely", "completely", + "compelting", "completing", + "compeltion", "completion", + "compemdium", "compendium", + "compenduim", "compendium", + "compenents", "components", + "compenidum", "compendium", + "compensare", "compensate", + "comperable", "comparable", + "comperhend", "comprehend", + "compession", "compassion", + "competance", "competence", + "competator", "competitor", + "competenet", "competence", + "competense", "competence", + "competenze", "competence", + "competeted", "competed", + "competetor", "competitor", + "competidor", "competitor", + "competiors", "competitors", + "competitie", "competitive", + "competitin", "competitions", + "competitio", "competitor", + "competiton", "competition", + "competitve", "competitive", + "compilance", "compliance", + "compilaton", "compilation", + "compinsate", "compensate", + "compitable", "compatible", + "compitance", "compliance", + "complacant", "complacent", + "complaince", "compliance", + "complaines", "complaints", + "complainig", "complaining", + "complainte", "complained", + "complation", "completion", + "compleatly", "completely", + "complecate", "complicate", + "completeds", "completes", + "completent", "complement", + "completily", "complexity", + "completito", "completion", + "completley", "completely", + "complexers", "complexes", + "complexety", "complexity", + "complianed", "compliance", + "compliants", "complaints", + "complicaed", "complicate", + "complicare", "complicate", + "complicati", "complicit", + "complicato", "complication", + "complicite", "complicate", + "complicted", "complicated", + "complience", "compliance", + "complimate", "complicate", + "complition", "completion", + "complusion", "compulsion", + "complusive", "compulsive", + "complusory", "compulsory", + "compolsive", "compulsive", + "compolsory", "compulsory", + "compolsury", "compulsory", + "componants", "components", + "componenet", "components", + "componsate", "compensate", + "comporable", "comparable", + "compositae", "composite", + "compositie", "composite", + "compositon", "composition", + "compraison", "comparisons", + "compramise", "compromise", + "comprassem", "compress", + "comprehand", "comprehend", + "compresion", "compression", + "compresors", "compressor", + "compresser", "compressor", + "compressio", "compressor", + "compresson", "compression", + "comprihend", "comprehend", + "comprimise", "compromise", + "compromiss", "compromises", + "compromize", "compromise", + "compromsie", "compromises", + "comprossor", "compressor", + "compteting", "completing", + "comptetion", "completion", + "compulisve", "compulsive", + "compulosry", "compulsory", + "compulsary", "compulsory", + "compulsery", "compulsory", + "compulsing", "compulsion", + "compulsivo", "compulsion", + "compulsury", "compulsory", + "compuslion", "compulsion", + "compuslive", "compulsive", + "compuslory", "compulsory", + "compustion", "compulsion", + "computanti", "computation", + "conatiners", "containers", + "concedendo", "conceded", + "concedered", "conceded", + "conceitual", "conceptual", + "concentate", "concentrate", + "concenting", "connecting", + "conceptial", "conceptual", + "conceptuel", "conceptual", + "concersion", "concession", + "concesions", "concession", + "concidered", "considered", + "conciously", "consciously", + "concission", "concession", + "conclsuion", "concussion", + "conclusies", "conclusive", + "conclution", "conclusion", + "concorrent", "concurrent", + "concsience", "conscience", + "conculsion", "conclusion", + "conculsive", "conclusive", + "concurment", "concurrent", + "concurrant", "concurrent", + "concurrect", "concurrent", + "concusions", "concussion", + "concusison", "concussions", + "condamning", "condemning", + "condemming", "condemning", + "condencing", "condemning", + "condenming", "condemning", + "condensend", "condensed", + "condidtion", "condition", + "conditinal", "conditional", + "conditiner", "conditioner", + "conditiond", "conditioned", + "conditiong", "conditioning", + "condmening", "condemning", + "conduiting", "conducting", + "conencting", "connecting", + "conenction", "connection", + "conenctors", "connectors", + "conesencus", "consensus", + "confedarcy", "confederacy", + "confedence", "conference", + "confedercy", "confederacy", + "conferance", "conference", + "conferenze", "conference", + "conferming", "confirming", + "confernece", "conferences", + "confessino", "confessions", + "confidance", "confidence", + "confidenly", "confidently", + "confidense", "confidence", + "confidenty", "confidently", + "conflcting", "conflating", + "conflicing", "conflicting", + "conflictos", "conflicts", + "confliting", "conflating", + "confriming", "confirming", + "confussion", "confession", + "congratule", "congratulate", + "congresman", "congressman", + "congresmen", "congressmen", + "congressen", "congressmen", + "conjecutre", "conjecture", + "conjuction", "conjunction", + "conjuncion", "conjunction", + "conlcusion", "conclusion", + "conncetion", "connections", + "conneciton", "connection", + "connecties", "connects", + "connectins", "connects", + "connectivy", "connectivity", + "connectpro", "connector", + "conneticut", "connecticut", + "connotaion", "connotation", + "conpsiracy", "conspiracy", + "conqeuring", "conquering", + "conqouring", "conquering", + "conquerers", "conquerors", + "conquoring", "conquering", + "consciense", "conscience", + "consciouly", "consciously", + "consdiered", "considered", + "consending", "consenting", + "consensuel", "consensual", + "consenusal", "consensual", + "consequece", "consequence", + "consequnce", "consequence", + "conservare", "conserve", + "conservato", "conservation", + "conservice", "conserve", + "conservies", "conserve", + "conservite", "conserve", + "consicence", "conscience", + "consideras", "considers", + "consideret", "considerate", + "consipracy", "conspiracy", + "consistant", "consistent", + "consistens", "consists", + "consisteny", "consistency", + "consitency", "consistency", + "consituted", "constituted", + "conslutant", "consultant", + "consluting", "consulting", + "consolidad", "consolidated", + "consonents", "consonants", + "consorcium", "consortium", + "conspirace", "conspiracies", + "conspiricy", "conspiracy", + "conspriacy", "conspiracy", + "constaints", "constraints", + "constatnly", "constantly", + "constently", "constantly", + "constitude", "constitute", + "constitued", "constitute", + "constituem", "constitute", + "constituer", "constitute", + "constitues", "constitutes", + "constituie", "constitute", + "constituit", "constitute", + "constitutn", "constituents", + "constituye", "constitute", + "constnatly", "constantly", + "constracts", "constructs", + "constraits", "constraints", + "constransi", "constraints", + "constrants", "constraints", + "construced", "constructed", + "constructo", "construction", + "construint", "constraint", + "construits", "constructs", + "construted", "constructed", + "consueling", "consulting", + "consultata", "consultant", + "consultate", "consultant", + "consultati", "consultant", + "consultato", "consultation", + "consultent", "consultant", + "consumated", "consummated", + "consumbale", "consumables", + "consuments", "consumes", + "consumirem", "consumerism", + "consumires", "consumerism", + "consumirse", "consumerism", + "consumiste", "consumes", + "consumpion", "consumption", + "contaction", "contacting", + "contageous", "contagious", + "contagiosa", "contagious", + "contagioso", "contagious", + "contaigous", "contagious", + "containors", "containers", + "contaminen", "containment", + "contanting", "contacting", + "contection", "contention", + "contectual", "contextual", + "conteiners", "contenders", + "contempate", "contemplate", + "contemplat", "contempt", + "contempory", "contemporary", + "contenants", "continents", + "contencion", "contention", + "contendors", "contenders", + "contenents", "continents", + "conteneurs", "contenders", + "contengent", "contingent", + "contension", "contention", + "contentino", "contention", + "contentios", "contentious", + "contentous", "contentious", + "contestais", "contests", + "contestans", "contests", + "contestase", "contests", + "contestion", "contention", + "contestors", "contests", + "contextful", "contextual", + "contextuel", "contextual", + "contextura", "contextual", + "contianers", "containers", + "contianing", "containing", + "contibuted", "contributed", + "contibutes", "contributes", + "contigents", "continents", + "contigious", "contagious", + "contignent", "contingent", + "continants", "continents", + "continenal", "continental", + "continenet", "continents", + "contineous", "continuous", + "continetal", "continental", + "contingecy", "contingency", + "contingeny", "contingency", + "continient", "contingent", + "continious", "continuous", + "continiuty", "continuity", + "contintent", "contingent", + "continualy", "continually", + "continuare", "continue", + "continuati", "continuity", + "continuato", "continuation", + "continuent", "contingent", + "continuety", "continuity", + "continunes", "continents", + "continuons", "continuous", + "continutiy", "continuity", + "continuuum", "continuum", + "contitnent", "contingent", + "contiuning", "containing", + "contiunity", "continuity", + "contorller", "controllers", + "contracing", "contracting", + "contractar", "contractor", + "contracter", "contractor", + "contractin", "contraction", + "contractos", "contracts", + "contradice", "contradicted", + "contradics", "contradicts", + "contredict", "contradict", + "contribued", "contributed", + "contribuem", "contribute", + "contribuer", "contribute", + "contribues", "contributes", + "contribuie", "contribute", + "contribuit", "contribute", + "contributo", "contribution", + "contributs", "contributes", + "contribuye", "contribute", + "contricted", "contracted", + "contridict", "contradict", + "contriubte", "contributes", + "controlelr", "controllers", + "controlers", "controls", + "controling", "controlling", + "controlles", "controls", + "controvery", "controversy", + "controvesy", "controversy", + "contrubite", "contributes", + "contrubute", "contribute", + "contuining", "continuing", + "contuinity", "continuity", + "convaluted", "convoluted", + "convcition", "convictions", + "conveinent", "convenient", + "conveluted", "convoluted", + "convencion", "convention", + "conveniant", "convenient", + "conveniece", "convenience", + "convenince", "convenience", + "convential", "conventional", + "converesly", "conversely", + "convergens", "converse", + "converison", "conversions", + "converning", "converting", + "conversare", "converse", + "conversino", "conversions", + "conversley", "conversely", + "conversoin", "conversions", + "conversons", "conversions", + "convertion", "conversion", + "convertire", "converter", + "converying", "converting", + "conveyered", "conveyed", + "conviccion", "conviction", + "conviciton", "conviction", + "convienent", "convenient", + "conviluted", "convoluted", + "convincted", "convince", + "convinsing", "convincing", + "convinving", "convincing", + "convoluded", "convoluted", + "convoulted", "convoluted", + "convulated", "convoluted", + "convuluted", "convoluted", + "cooperatve", "cooperative", + "coordenate", "coordinate", + "coordiante", "coordinate", + "coordinare", "coordinate", + "coordinato", "coordination", + "coordinats", "coordinates", + "coordonate", "coordinate", + "cooridnate", "coordinate", + "copehnagen", "copenhagen", + "copenaghen", "copenhagen", + "copenahgen", "copenhagen", + "copengagen", "copenhagen", + "copengahen", "copenhagen", + "copenhagan", "copenhagen", + "copenhague", "copenhagen", + "copenhagun", "copenhagen", + "copenhaven", "copenhagen", + "copenhegan", "copenhagen", + "copyrighed", "copyrighted", + "copyrigted", "copyrighted", + "corinthans", "corinthians", + "corinthias", "corinthians", + "corinthins", "corinthians", + "cornmitted", "committed", + "corporatie", "corporate", + "corralated", "correlated", + "corralates", "correlates", + "correccion", "correction", + "correciton", "corrections", + "correcters", "correctors", + "correctess", "correctness", + "correctivo", "correction", + "correctons", "corrections", + "corregated", "correlated", + "correkting", "correcting", + "correlatas", "correlates", + "correlatie", "correlated", + "correlatos", "correlates", + "correspend", "correspond", + "corrilated", "correlated", + "corrilates", "correlates", + "corrispond", "correspond", + "corrolated", "correlated", + "corrolates", "correlates", + "corrospond", "correspond", + "corrpution", "corruption", + "corrulates", "correlates", + "corrupcion", "corruption", + "cosmeticas", "cosmetics", + "cosmeticos", "cosmetics", + "costumized", "customized", + "counceling", "counseling", + "councellor", "councillor", + "councelors", "counselors", + "councilers", "councils", + "counselers", "counselors", + "counsellng", "counselling", + "counsilers", "counselors", + "counsiling", "counseling", + "counsilors", "counselors", + "counsolers", "counselors", + "counsoling", "counseling", + "countepart", "counteract", + "counteratk", "counteract", + "counterbat", "counteract", + "countercat", "counteract", + "countercut", "counteract", + "counteries", "counters", + "countoring", "countering", + "countryies", "countryside", + "countrying", "countering", + "courcework", "coursework", + "coursefork", "coursework", + "courthosue", "courthouse", + "courtrooom", "courtroom", + "cousnelors", "counselors", + "coutneract", "counteract", + "coutnering", "countering", + "covenental", "covenant", + "cranberrry", "cranberry", + "creationis", "creations", + "creationsm", "creationism", + "creationst", "creationist", + "creativily", "creatively", + "creativley", "creatively", + "credibilty", "credibility", + "creeperest", "creepers", + "crimanally", "criminally", + "criminalty", "criminally", + "criminalul", "criminally", + "criticable", "critical", + "criticarlo", "critical", + "criticiing", "criticising", + "criticisim", "criticism", + "criticisme", "criticise", + "criticisng", "criticising", + "criticists", "critics", + "criticisze", "criticise", + "criticizms", "criticisms", + "criticizng", "criticizing", + "critisiced", "criticized", + "critisicms", "criticisms", + "critisicsm", "criticisms", + "critisiscm", "criticisms", + "critisisms", "criticisms", + "critisizes", "criticises", + "critisizms", "criticisms", + "critiziced", "criticized", + "critizised", "criticized", + "critizisms", "criticisms", + "critizized", "criticized", + "crocodille", "crocodile", + "crossfiter", "crossfire", + "crutchetts", "crutches", + "crystalens", "crystals", + "crystalisk", "crystals", + "crystallis", "crystals", + "cuatiously", "cautiously", + "culterally", "culturally", + "cultrually", "culturally", + "culumative", "cumulative", + "culutrally", "culturally", + "cumbersone", "cumbersome", + "cumbursome", "cumbersome", + "cumpolsory", "compulsory", + "cumulitive", "cumulative", + "currancies", "currencies", + "currenctly", "currency", + "currenices", "currencies", + "currentfps", "currents", + "currentlys", "currents", + "currentpos", "currents", + "currentusa", "currents", + "curriculem", "curriculum", + "curriculim", "curriculum", + "curriences", "currencies", + "curroption", "corruption", + "custimized", "customized", + "customzied", "customized", + "custumized", "customized", + "cutscences", "cutscene", + "cutscenses", "cutscene", + "dangerouly", "dangerously", + "dealerhsip", "dealerships", + "deathamtch", "deathmatch", + "deathmacth", "deathmatch", + "debateable", "debatable", + "decembeard", "december", + "decendants", "descendants", + "decendents", "descendants", + "decideable", "decidable", + "deciptions", "depictions", + "decisiones", "decisions", + "declarasen", "declares", + "declaraste", "declares", + "declaremos", "declares", + "decomposit", "decompose", + "decoracion", "decoration", + "decorativo", "decoration", + "decoritive", "decorative", + "decroative", "decorative", + "decsending", "descending", + "dedicacion", "dedication", + "dedikation", "dedication", + "deducatble", "deductible", + "deducitble", "deductible", + "defacation", "defamation", + "defamating", "defamation", + "defanitely", "definately", + "defelction", "deflection", + "defendeers", "defender", + "defendents", "defendants", + "defenderes", "defenders", + "defenesman", "defenseman", + "defenselss", "defenseless", + "defensivly", "defensively", + "defianetly", "definately", + "defiantely", "definately", + "defiantley", "definately", + "defibately", "definately", + "deficately", "definately", + "deficiancy", "deficiency", + "deficience", "deficiencies", + "deficienct", "deficient", + "deficienty", "deficiency", + "defiintely", "definately", + "definaetly", "definately", + "definaitly", "definately", + "definaltey", "definately", + "definataly", "definately", + "definateky", "definately", + "definately", "definitely", + "definatily", "definately", + "defination", "definition", + "definative", "definitive", + "definatlly", "definately", + "definatrly", "definately", + "definayely", "definately", + "defineatly", "definately", + "definetaly", "definately", + "definetely", "definitely", + "definetily", "definately", + "definetlly", "definetly", + "definettly", "definately", + "definicion", "definition", + "definietly", "definitely", + "definining", "defining", + "definitaly", "definately", + "definiteyl", "definitly", + "definitivo", "definition", + "definitley", "definitely", + "definitlly", "definitly", + "definitlry", "definitly", + "definitlty", "definitly", + "definjtely", "definately", + "definltely", "definately", + "definotely", "definately", + "definstely", "definately", + "defintaley", "definately", + "defintiely", "definitely", + "defintiion", "definitions", + "definutely", "definately", + "deflaction", "deflection", + "defleciton", "deflection", + "deflektion", "deflection", + "defniately", "definately", + "degenarate", "degenerate", + "degenerare", "degenerate", + "degenerite", "degenerate", + "degoratory", "derogatory", + "degraderad", "degraded", + "dehydraded", "dehydrated", + "dehyrdated", "dehydrated", + "deifnately", "definately", + "deisgnated", "designated", + "delaership", "dealership", + "delearship", "dealership", + "delegaties", "delegate", + "delegative", "delegate", + "delfection", "deflection", + "delibarate", "deliberate", + "deliberant", "deliberate", + "delibirate", "deliberate", + "deligthful", "delightful", + "deliverate", "deliberate", + "deliverees", "deliveries", + "deliviered", "delivered", + "deliviring", "delivering", + "delporable", "deplorable", + "delpoyment", "deployment", + "delutional", "delusional", + "dementieva", "dementia", + "deminsions", "dimensions", + "democracis", "democracies", + "democracts", "democrat", + "democratas", "democrats", + "democrates", "democrats", + "demograhic", "demographic", + "demographs", "demographics", + "demograpic", "demographic", + "demolation", "demolition", + "demolicion", "demolition", + "demolision", "demolition", + "demolitian", "demolition", + "demoliting", "demolition", + "demoloshed", "demolished", + "demolution", "demolition", + "demonished", "demolished", + "demonstate", "demonstrate", + "demonstras", "demonstrates", + "demorcracy", "democracy", + "denegerate", "degenerate", + "denominato", "denomination", + "denomintor", "denominator", + "deocrative", "decorative", + "deomcratic", "democratic", + "deparments", "departments", + "departmens", "departments", + "departmnet", "departments", + "depcitions", "depictions", + "depdending", "depending", + "depencency", "dependency", + "dependance", "dependence", + "dependancy", "dependency", + "dependandt", "dependant", + "dependends", "depended", + "dependened", "depended", + "dependenta", "dependant", + "dependente", "dependence", + "depicitons", "depictions", + "deplorabel", "deplorable", + "deplorabil", "deplorable", + "deplorible", "deplorable", + "deplyoment", "deployment", + "depolyment", "deployment", + "depositers", "deposits", + "depressief", "depressive", + "depressies", "depressive", + "deprivaton", "deprivation", + "deragotory", "derogatory", + "derivaties", "derivatives", + "deriviated", "derived", + "derivitave", "derivative", + "derivitive", "derivative", + "derogatary", "derogatory", + "derogatery", "derogatory", + "derogetory", "derogatory", + "derogitory", "derogatory", + "derogotary", "derogatory", + "derogotory", "derogatory", + "derviative", "derivative", + "descendats", "descendants", + "descendend", "descended", + "descenting", "descending", + "descerning", "descending", + "descipable", "despicable", + "descisions", "decisions", + "descriibes", "describes", + "descripton", "description", + "desginated", "designated", + "desigining", "designing", + "desireable", "desirable", + "desktopbsd", "desktops", + "despciable", "despicable", + "desperatly", "desperately", + "desperetly", "desperately", + "despicaple", "despicable", + "despicible", "despicable", + "dessicated", "desiccated", + "destinatin", "destinations", + "destinaton", "destination", + "destoryers", "destroyers", + "destorying", "destroying", + "destroyeds", "destroyers", + "destroyeer", "destroyers", + "destrucion", "destruction", + "destrucive", "destructive", + "destryoing", "destroying", + "detectarlo", "detector", + "detectaron", "detector", + "detectoare", "detector", + "determinas", "determines", + "determinig", "determining", + "determinsm", "determinism", + "deutschand", "deutschland", + "devastaded", "devastated", + "devastaing", "devastating", + "devastanti", "devastating", + "devasteted", "devastated", + "develepors", "developers", + "develoeprs", "developers", + "developmet", "developments", + "developors", "develops", + "developped", "developed", + "developres", "develops", + "develpment", "development", + "devestated", "devastated", + "devolvendo", "devolved", + "deyhdrated", "dehydrated", + "diagnosied", "diagnose", + "diagnosies", "diagnosis", + "diagnositc", "diagnostic", + "diagnossed", "diagnose", + "diagnosted", "diagnose", + "diagnotics", "diagnostic", + "diagonstic", "diagnostic", + "dichotomoy", "dichotomy", + "dicitonary", "dictionary", + "diconnects", "disconnects", + "dicovering", "discovering", + "dictateurs", "dictates", + "dictionare", "dictionaries", + "differance", "difference", + "differenly", "differently", + "differense", "differences", + "differente", "difference", + "differentl", "differential", + "differenty", "differently", + "differnece", "difference", + "difficulte", "difficulties", + "difficults", "difficulties", + "difficutly", "difficulty", + "diffuculty", "difficulty", + "diganostic", "diagnostic", + "dimensinal", "dimensional", + "dimentions", "dimensions", + "dimesnions", "dimensions", + "dimineshes", "diminishes", + "diminising", "diminishing", + "dimunitive", "diminutive", + "dinosaures", "dinosaurs", + "dinosaurus", "dinosaurs", + "dipections", "depictions", + "diplimatic", "diplomatic", + "diplomacia", "diplomatic", + "diplomancy", "diplomacy", + "dipolmatic", "diplomatic", + "directinla", "directional", + "directionl", "directional", + "directivos", "directions", + "directores", "directors", + "directorys", "directors", + "directsong", "directions", + "disaapoint", "disappoint", + "disagreeed", "disagreed", + "disapeared", "disappeared", + "disappeard", "disappeared", + "disappered", "disappeared", + "disappiont", "disappoint", + "disaproval", "disapproval", + "disastorus", "disastrous", + "disastrosa", "disastrous", + "disastrose", "disastrous", + "disastrosi", "disastrous", + "disastroso", "disastrous", + "disaterous", "disastrous", + "discalimer", "disclaimer", + "discapline", "discipline", + "discepline", "discipline", + "disception", "discretion", + "discharded", "discharged", + "disciplers", "disciples", + "disciplies", "disciplines", + "disciplins", "disciplines", + "disciprine", "discipline", + "disclamier", "disclaimer", + "discliamer", "disclaimer", + "disclipine", "discipline", + "disclousre", "disclosure", + "disclsoure", "disclosure", + "discograhy", "discography", + "discograpy", "discography", + "discolsure", "disclosure", + "disconenct", "disconnect", + "disconncet", "disconnects", + "disconnets", "disconnects", + "discontued", "discounted", + "discoruage", "discourages", + "discources", "discourse", + "discourgae", "discourages", + "discourges", "discourages", + "discoveres", "discovers", + "discoveryd", "discovered", + "discoverys", "discovers", + "discrecion", "discretion", + "discreddit", "discredited", + "discrepany", "discrepancy", + "discresion", "discretion", + "discreting", "discretion", + "discribing", "describing", + "discrimine", "discriminate", + "discrouage", "discourages", + "discrption", "discretion", + "discusison", "discussions", + "discusting", "discussing", + "disgracful", "disgraceful", + "disgrunted", "disgruntled", + "disgruntld", "disgruntled", + "disguisted", "disguise", + "disgustiny", "disgustingly", + "disgustosa", "disgusts", + "disgustose", "disgusts", + "disgustosi", "disgusts", + "disgustoso", "disgusts", + "dishcarged", "discharged", + "dishinored", "dishonored", + "disicpline", "discipline", + "disiplined", "disciplined", + "dislcaimer", "disclaimer", + "dismanteld", "dismantled", + "dismanting", "dismantling", + "dismentled", "dismantled", + "dispecable", "despicable", + "dispencary", "dispensary", + "dispencers", "dispenser", + "dispencing", "dispensing", + "dispensare", "dispenser", + "dispensory", "dispensary", + "dispesnary", "dispensary", + "dispicable", "despicable", + "displayfps", "displays", + "dispositon", "disposition", + "dispostion", "disposition", + "disputerad", "disputed", + "disrecpect", "disrespect", + "disrection", "discretion", + "disrepsect", "disrespect", + "disresepct", "disrespect", + "disrespekt", "disrespect", + "disription", "disruption", + "disrispect", "disrespect", + "disrputing", "disrupting", + "disruptivo", "disruption", + "disruptron", "disruption", + "dissapears", "disappears", + "dissappear", "disappear", + "disscusion", "discussion", + "dissmisive", "dismissive", + "dissodance", "dissonance", + "dissonante", "dissonance", + "dissonence", "dissonance", + "distastful", "distasteful", + "disticntly", "distinctly", + "distiction", "distinction", + "distincion", "distinction", + "distincive", "distinctive", + "distinclty", "distinctly", + "distinctie", "distinctive", + "distinctin", "distinctions", + "distingish", "distinguish", + "distingush", "distinguish", + "distintcly", "distinctly", + "distoriton", "distortion", + "distorsion", "distortion", + "distortian", "distortion", + "distortron", "distortion", + "distractes", "distracts", + "distractia", "district", + "distractin", "district", + "distractiv", "district", + "distration", "distortion", + "distribuem", "distribute", + "distribuer", "distribute", + "distribuie", "distribute", + "distribuit", "distribute", + "distributs", "distributors", + "distribuye", "distribute", + "distrotion", "distortion", + "distrubing", "disturbing", + "distrubtes", "distrust", + "distrubute", "distribute", + "distubring", "disturbing", + "disturbace", "disturbance", + "disturping", "disrupting", + "disucssing", "discussing", + "disucssion", "discussion", + "disurption", "disruption", + "ditributed", "distributed", + "diversifiy", "diversify", + "dividendes", "dividends", + "dividendos", "dividends", + "divideneds", "dividend", + "divinition", "divination", + "divinitory", "divinity", + "divisiones", "divisions", + "dobulelift", "doublelift", + "doccuments", "documents", + "documentry", "documentary", + "dogmatisch", "dogmatic", + "dolphinese", "dolphins", + "domianting", "dominating", + "domimation", "domination", + "dominacion", "domination", + "dominaters", "dominates", + "donwgraded", "downgraded", + "donwloaded", "downloaded", + "donwvoters", "downvoters", + "donwvoting", "downvoting", + "doomsdaily", "doomsday", + "doubellift", "doublelift", + "doubleiift", "doublelift", + "doubleleft", "doublelift", + "doublelfit", "doublelift", + "doublerift", "doublelift", + "doulbelift", "doublelift", + "downgarded", "downgraded", + "downgrated", "downgrade", + "downlaoded", "downloaded", + "downloadas", "downloads", + "downloades", "downloads", + "downovting", "downvoting", + "downroaded", "downgraded", + "downsiders", "downsides", + "downstaris", "downstairs", + "downstiars", "downstairs", + "downtokers", "downvoters", + "downtoking", "downvoting", + "downtraded", "downgraded", + "downviting", "downvoting", + "downvotear", "downvoters", + "downvoteas", "downvoters", + "downvoteds", "downvoters", + "downvotees", "downvoters", + "downvotesd", "downvoters", + "downvotess", "downvoters", + "downvotest", "downvoters", + "downvoteur", "downvoters", + "downvoties", "downvoters", + "downvotres", "downvoters", + "downvotted", "downvote", + "downvottes", "downvoters", + "downwoters", "downvoters", + "downwoting", "downvoting", + "drasticaly", "drastically", + "drasticlly", "drastically", + "draughtman", "draughtsman", + "dumbbellls", "dumbbells", + "dumbfouded", "dumbfounded", + "dumbfouned", "dumbfounded", + "dungeoness", "dungeons", + "dupilcates", "duplicates", + "duplicants", "duplicates", + "duplicatas", "duplicates", + "duplicitas", "duplicates", + "duplifaces", "duplicates", + "durabiltiy", "durability", + "dyamically", "dynamically", + "dynamicaly", "dynamically", + "dynamicdns", "dynamics", + "dynamiclly", "dynamically", + "dynamicpsf", "dynamics", + "dynamitage", "dynamite", + "dysfuncion", "dysfunction", + "earhtbound", "earthbound", + "earthqauke", "earthquake", + "earthquack", "earthquake", + "earthquaks", "earthquakes", + "earthquate", "earthquake", + "earthqukes", "earthquakes", + "easthetics", "aesthetics", + "ecoligical", "ecological", + "ecomonical", "economical", + "econimical", "economical", + "econimists", "economists", + "economicas", "economics", + "economicos", "economics", + "economicus", "economics", + "economisch", "economic", + "economisit", "economists", + "effeciency", "efficiency", + "effectivly", "effectively", + "efficeincy", "efficiency", + "efficently", "efficiently", + "efficiancy", "efficiency", + "efficienct", "efficient", + "efficienty", "efficiently", + "egotistcal", "egotistical", + "ehtnically", "ethnically", + "ejaculaion", "ejaculation", + "ejaculatie", "ejaculate", + "ejaculatin", "ejaculation", + "ejaculaton", "ejaculation", + "ejaculatte", "ejaculate", + "electircal", "electrical", + "electivite", "elective", + "electoraat", "electorate", + "electorale", "electorate", + "electorite", "electorate", + "electornic", "electronic", + "electrican", "electrician", + "electriciy", "electricity", + "electricty", "electricity", + "electrinic", "electrician", + "electroate", "electorate", + "electrodan", "electron", + "electroinc", "electron", + "electrolye", "electrolytes", + "electroman", "electron", + "electroncs", "electrons", + "electrones", "electrons", + "electronik", "election", + "electronis", "electronics", + "electronix", "election", + "elemantary", "elementary", + "elementery", "elementary", + "elementray", "elementary", + "eleminated", "eliminated", + "elephantes", "elephants", + "elephantis", "elephants", + "elephantos", "elephants", + "elephantus", "elephants", + "eletricity", "electricity", + "elimanates", "eliminates", + "elimenates", "eliminates", + "elimentary", "elementary", + "elimimates", "eliminates", + "eliminaste", "eliminates", + "eliminatin", "elimination", + "eliminaton", "elimination", + "eliminster", "eliminates", + "ellipitcal", "elliptical", + "ellipsical", "elliptical", + "ellipticle", "elliptical", + "ellitpical", "elliptical", + "ellpitical", "elliptical", + "eloquantly", "eloquently", + "eloquintly", "eloquently", + "emapthetic", "empathetic", + "embarassed", "embarrassed", + "embarassig", "embarassing", + "embarrased", "embarrassed", + "embarrases", "embarrassed", + "embezelled", "embezzled", + "emblamatic", "emblematic", + "embodyment", "embodiment", + "emergenies", "emergencies", + "emmigrated", "emigrated", + "emminently", "eminently", + "emmisaries", "emissaries", + "emobdiment", "embodiment", + "emotionaly", "emotionally", + "empahsized", "emphasized", + "empahsizes", "emphasizes", + "empathatic", "empathetic", + "emphacized", "emphasized", + "emphatetic", "empathetic", + "emphatised", "emphasized", + "emphatized", "emphasized", + "emphatizes", "emphasizes", + "emphazised", "emphasized", + "emphazises", "emphasizes", + "emphesized", "emphasized", + "emphesizes", "emphasizes", + "emphisized", "emphasized", + "emphisizes", "emphasizes", + "empiricaly", "empirically", + "employeers", "employees", + "employeurs", "employer", + "emprisoned", "imprisoned", + "encahnting", "enchanting", + "enchancing", "enchanting", + "enchanging", "enchanting", + "enchantent", "enchantment", + "enchantmet", "enchantments", + "encompases", "encompasses", + "encounterd", "encountered", + "encountred", "encountered", + "encouraing", "encouraging", + "encoutners", "encounters", + "encription", "encryption", + "encrpytion", "encryption", + "encyrption", "encryption", + "endlessley", "endlessly", + "endolithes", "endoliths", + "enforceing", "enforcing", + "engagemnet", "engagements", + "engagemnts", "engagements", + "engieneers", "engineers", + "enginereed", "engineered", + "enivitable", "inevitable", + "enlargment", "enlargement", + "enlighment", "enlighten", + "enlightend", "enlightened", + "enlightned", "enlightened", + "enrolement", "enrollment", + "enrollemnt", "enrollment", + "enterpirse", "enterprise", + "enterprice", "enterprise", + "enterpries", "enterprises", + "enterprize", "enterprise", + "enterprsie", "enterprises", + "enterrpise", "enterprises", + "entertaing", "entertaining", + "enthically", "ethnically", + "enthisiast", "enthusiast", + "enthuiasts", "enthusiast", + "enthuisast", "enthusiasts", + "enthusiams", "enthusiasm", + "enthusiant", "enthusiast", + "enthusiats", "enthusiast", + "enthusiest", "enthusiast", + "enthusists", "enthusiasts", + "envelopped", "envelope", + "enveloppen", "envelope", + "enveloppes", "envelope", + "enviorment", "environment", + "enviroment", "environment", + "environmet", "environments", + "equiavlent", "equivalents", + "equilavent", "equivalent", + "equilibium", "equilibrium", + "equilibrim", "equilibrium", + "equilibrum", "equilibrium", + "equippment", "equipment", + "equitorial", "equatorial", + "equivalant", "equivalent", + "equivalnce", "equivalence", + "equivalnet", "equivalents", + "equivelant", "equivalent", + "equivelent", "equivalent", + "equivilant", "equivalent", + "equivilent", "equivalent", + "equivlaent", "equivalents", + "equivolent", "equivalent", + "eratically", "erratically", + "escalative", "escalate", + "escavation", "escalation", + "esitmation", "estimation", + "esoterisch", "esoteric", + "especailly", "especially", + "espeically", "especially", + "espressino", "espresso", + "espression", "espresso", + "essencials", "essentials", + "essensials", "essentials", + "essentails", "essentials", + "essentialy", "essentially", + "essentiels", "essentials", + "essentuals", "essentials", + "estabishes", "establishes", + "estimacion", "estimation", + "estimativo", "estimation", + "estination", "estimation", + "ethicallly", "ethically", + "ethincally", "ethnically", + "ethnicites", "ethnicities", + "ethnicitiy", "ethnicity", + "euphorical", "euphoria", + "euphorisch", "euphoric", + "euthanaisa", "euthanasia", + "euthanazia", "euthanasia", + "euthanesia", "euthanasia", + "evaluacion", "evaluation", + "evalutaion", "evaluation", + "evaulating", "evaluating", + "evaulation", "evaluation", + "eventaully", "eventually", + "eventially", "eventually", + "everyoneis", "everyones", + "exacberate", "exacerbated", + "exagerated", "exaggerated", + "exagerates", "exaggerates", + "exagerrate", "exaggerate", + "exaggarate", "exaggerate", + "exaggurate", "exaggerate", + "exahusting", "exhausting", + "exahustion", "exhaustion", + "examinated", "examined", + "examinerad", "examined", + "exapansion", "expansion", + "exapnsions", "expansions", + "exauhsting", "exhausting", + "exauhstion", "exhaustion", + "excecuting", "executing", + "excecution", "execution", + "exceedigly", "exceedingly", + "exceedinly", "exceedingly", + "excellance", "excellence", + "excellenet", "excellence", + "excellenze", "excellence", + "excerising", "exercising", + "excessivly", "excessively", + "exchangees", "exchanges", + "excitiment", "excitement", + "exclsuives", "exclusives", + "exclusivas", "exclusives", + "exclusivly", "exclusively", + "exclusivos", "exclusives", + "exclusivty", "exclusivity", + "exclussive", "exclusives", + "exclusvies", "exclusives", + "excpetions", "exceptions", + "exculsives", "exclusives", + "exculsivly", "exclusively", + "execptions", "exceptions", + "exectuable", "executable", + "exectuions", "executions", + "exectuives", "executives", + "execusions", "executions", + "executabil", "executable", + "executible", "executable", + "executiner", "executioner", + "executings", "executions", + "executivas", "executives", + "exeedingly", "exceedingly", + "exepmtions", "exemptions", + "exeptional", "exceptional", + "exercicing", "exercising", + "exercizing", "exercising", + "exersicing", "exercising", + "exersising", "exercising", + "exersizing", "exercising", + "exerternal", "external", + "exeuctions", "executions", + "exhasuting", "exhausting", + "exhasution", "exhaustion", + "exhaustivo", "exhaustion", + "exhibicion", "exhibition", + "exhibitons", "exhibits", + "exhuasting", "exhausting", + "exhuastion", "exhaustion", + "exibitions", "exhibitions", + "exictement", "excitement", + "exipration", "expiration", + "existantes", "existent", + "existenial", "existential", + "existental", "existential", + "exlcusives", "exclusives", + "exorbatant", "exorbitant", + "exorbatent", "exorbitant", + "exorbidant", "exorbitant", + "exorbirant", "exorbitant", + "exorbitent", "exorbitant", + "expalining", "explaining", + "expanisons", "expansions", + "expansivos", "expansions", + "expanssion", "expansions", + "expantions", "expansions", + "expecially", "especially", + "expectaion", "expectation", + "expectansy", "expectancy", + "expectency", "expectancy", + "expections", "exceptions", + "expedetion", "expedition", + "expedicion", "expedition", + "expeditivo", "expedition", + "expeiments", "experiments", + "expemtions", "exemptions", + "expendeble", "expendable", + "expendible", "expendable", + "expensable", "expendable", + "expentancy", "expectancy", + "expereince", "experience", + "experement", "experiment", + "experiance", "experience", + "experieced", "experienced", + "experieces", "experiences", + "experiemnt", "experiment", + "experiened", "experienced", + "experiense", "experiences", + "expermient", "experiments", + "experssion", "expression", + "expextancy", "expectancy", + "expidetion", "expedition", + "expierence", "experience", + "expination", "expiration", + "expirement", "experiment", + "explanatin", "explanations", + "explicatia", "explicit", + "explicatie", "explicit", + "explicatif", "explicit", + "explicatii", "explicit", + "explicetly", "explicitly", + "explicilty", "explicitly", + "explioting", "exploiting", + "exploiding", "exploiting", + "exploition", "exploiting", + "explorarea", "explorer", + "exploreres", "explorers", + "explosivas", "explosives", + "explossion", "explosions", + "explossive", "explosives", + "explosvies", "explosives", + "explotions", "explosions", + "explusions", "explosions", + "expodition", "exposition", + "expoliting", "exploiting", + "expolsions", "explosions", + "expolsives", "explosives", + "exponental", "exponential", + "exposicion", "exposition", + "expositivo", "exposition", + "expotition", "exposition", + "exprensive", "expressive", + "expresions", "expression", + "expresison", "expressions", + "expressens", "expresses", + "expressief", "expressive", + "expressley", "expressly", + "expriation", "expiration", + "extensivly", "extensively", + "extentions", "extensions", + "exterioara", "exterior", + "exterioare", "exterior", + "extermally", "externally", + "extermists", "extremists", + "extraccion", "extraction", + "extractivo", "extraction", + "extractnow", "extraction", + "extradtion", "extraction", + "extremaste", "extremes", + "extremeley", "extremely", + "extremelly", "extremely", + "extrememly", "extremely", + "extremests", "extremists", + "extremised", "extremes", + "extremisim", "extremism", + "extremisme", "extremes", + "extremiste", "extremes", + "extrenally", "externally", + "extrimists", "extremists", + "eyeballers", "eyeballs", + "fabriacted", "fabricated", + "fabricatie", "fabricated", + "faciliated", "facilitated", + "facilitait", "facilitate", + "facilitant", "facilitate", + "facilitare", "facilitate", + "facisnated", "fascinated", + "facitilies", "facilities", + "facsinated", "fascinated", + "fahernheit", "fahrenheit", + "fahrenhiet", "fahrenheit", + "fallatious", "fallacious", + "fallicious", "fallacious", + "falshbacks", "flashbacks", + "familiarty", "familiarity", + "familiarze", "familiarize", + "fanaticals", "fanatics", + "fanfaction", "fanfiction", + "fanfcition", "fanfiction", + "fanficiton", "fanfiction", + "fanserivce", "fanservice", + "fanservise", "fanservice", + "fanservive", "fanservice", + "fantasiose", "fantasies", + "farehnheit", "fahrenheit", + "farhenheit", "fahrenheit", + "fascianted", "fascinated", + "fascinatie", "fascinated", + "fascinatin", "fascination", + "fascistisk", "fascists", + "fatalaties", "fatalities", + "favoruites", "favourites", + "favourates", "favourites", + "favouritsm", "favourites", + "favourties", "favourites", + "federacion", "federation", + "federativo", "federation", + "fellowhsip", "fellowship", + "fellowshop", "fellowship", + "feminimity", "femininity", + "feministas", "feminists", + "feminitity", "femininity", + "fermentato", "fermentation", + "fertalizer", "fertilizer", + "fertelizer", "fertilizer", + "fertilizar", "fertilizer", + "fertilzier", "fertilizer", + "fertiziler", "fertilizer", + "festivales", "festivals", + "fetishiste", "fetishes", + "ficticious", "fictitious", + "filessytem", "filesystem", + "filesytems", "filesystem", + "filmamkers", "filmmakers", + "filmmakare", "filmmakers", + "finallizes", "finalizes", + "financialy", "financially", + "fingernals", "fingernails", + "fingerpies", "fingertips", + "fingerpint", "fingerprint", + "fingertaps", "fingertips", + "fingertits", "fingertips", + "fingertops", "fingertips", + "fireballls", "fireballs", + "firefigher", "firefighter", + "firefigter", "firefighter", + "firendlies", "friendlies", + "firghtened", "frightened", + "fisionable", "fissionable", + "flashligth", "flashlight", + "flaskbacks", "flashbacks", + "flawleslly", "flawlessly", + "flexibiliy", "flexibility", + "flexibilty", "flexibility", + "flimmakers", "filmmakers", + "fluctuatie", "fluctuate", + "fluctuatin", "fluctuations", + "flutterhsy", "fluttershy", + "fluttersky", "fluttershy", + "flutterspy", "fluttershy", + "forcifully", "forcefully", + "forecfully", "forcefully", + "foreginers", "foreigners", + "foregorund", "foreground", + "foreignese", "foreigners", + "foreigness", "foreigners", + "foreignors", "foreigners", + "foreingers", "foreigners", + "forensisch", "forensic", + "foreseeble", "foreseeable", + "forgeiners", "foreigners", + "forgieners", "foreigners", + "forgivance", "forgiven", + "forgivenss", "forgiveness", + "forgotting", "forgetting", + "foriegners", "foreigners", + "formadible", "formidable", + "formalhaut", "fomalhaut", + "formallity", "formally", + "formallize", "formalize", + "formatiing", "formatting", + "formatings", "formations", + "formativos", "formations", + "formidabel", "formidable", + "formidabil", "formidable", + "formidible", "formidable", + "forminable", "formidable", + "formitable", "formidable", + "formuladas", "formulas", + "formulados", "formulas", + "forseeable", "foreseeable", + "fortelling", "foretelling", + "fortunatly", "fortunately", + "fortunetly", "fortunately", + "foundaiton", "foundations", + "foundaries", "foundries", + "foundatoin", "foundations", + "fractalers", "fractals", + "fractalius", "fractals", + "fractalpus", "fractals", + "fracturare", "fracture", + "fragmanted", "fragment", + "francaises", "franchises", + "franchices", "franchises", + "franchines", "franchises", + "franchizes", "franchises", + "franchsies", "franchises", + "fransiscan", "franciscan", + "franticaly", "frantically", + "franticlly", "frantically", + "fraternaty", "fraternity", + "fraternety", "fraternity", + "fraterntiy", "fraternity", + "fraturnity", "fraternity", + "fraudalent", "fraudulent", + "fraudelant", "fraudulent", + "fraudelent", "fraudulent", + "fraudolent", "fraudulent", + "fraudulant", "fraudulent", + "freedomers", "freedoms", + "freedomest", "freedoms", + "freindlies", "friendlies", + "freindship", "friendship", + "frequencey", "frequency", + "friednship", "friendships", + "friednzone", "friendzoned", + "friendhsip", "friendship", + "friendsies", "friendlies", + "friendzies", "friendlies", + "friendzond", "friendzoned", + "frientship", "friendship", + "frigthened", "frightened", + "fromatting", "formatting", + "fromidable", "formidable", + "frontlinie", "frontline", + "fruadulent", "fraudulent", + "frustraded", "frustrated", + "frustradet", "frustrates", + "frustraits", "frustrates", + "frustrants", "frustrates", + "frustratin", "frustration", + "frustrsted", "frustrates", + "fucntional", "functional", + "fulfulling", "fulfilling", + "fullfiling", "fulfilling", + "fullfilled", "fulfilled", + "fullscrean", "fullscreen", + "fulttershy", "fluttershy", + "funcitonal", "functional", + "fundametal", "fundamental", + "furstrated", "frustrated", + "furstrates", "frustrates", + "furutistic", "futuristic", + "futhermore", "furthermore", + "futurestic", "futuristic", + "futurisitc", "futuristic", + "futurustic", "futuristic", + "galvinized", "galvanized", + "garuanteed", "guaranteed", + "garuantees", "guarantees", + "gauntanamo", "guantanamo", + "gauntlents", "gauntlet", + "gauranteed", "guaranteed", + "gaurantees", "guarantees", + "gaurenteed", "guaranteed", + "gaurentees", "guarantees", + "generalice", "generalize", + "generalife", "generalize", + "generalnie", "generalize", + "generaters", "generates", + "generaties", "generate", + "generatios", "generators", + "generatons", "generators", + "generatore", "generate", + "generelize", "generalize", + "generocity", "generosity", + "generoisty", "generosity", + "generostiy", "generosity", + "geneticaly", "genetically", + "geneticlly", "genetically", + "genitalias", "genitals", + "genuinelly", "genuinely", + "geographia", "geographical", + "geogrpahic", "geographic", + "germanisch", "germanic", + "gigantisch", "gigantic", + "gimmickers", "gimmicks", + "girlfirend", "girlfriend", + "girlfreind", "girlfriend", + "girlfriens", "girlfriends", + "girlfrinds", "girlfriends", + "girlfrined", "girlfriends", + "goalkeaper", "goalkeeper", + "goalkeeprs", "goalkeeper", + "goalkepeer", "goalkeeper", + "goegraphic", "geographic", + "golakeeper", "goalkeeper", + "goldburger", "goldberg", + "goosebumbs", "goosebumps", + "goosegumps", "goosebumps", + "goosepumps", "goosebumps", + "gothenberg", "gothenburg", + "govenrment", "government", + "govermenet", "goverment", + "govermnent", "governments", + "governemnt", "government", + "governened", "governed", + "governered", "governed", + "governmant", "governmental", + "governmetn", "governments", + "governmnet", "government", + "govnerment", "government", + "govornment", "government", + "gradiating", "graduating", + "gradiation", "graduation", + "graduacion", "graduation", + "grapefriut", "grapefruit", + "grapefrukt", "grapefruit", + "graphicaly", "graphically", + "graphiclly", "graphically", + "gratituous", "gratuitous", + "gratiutous", "gratuitous", + "gratuidous", "gratuitous", + "gratuituos", "gratuitous", + "gratutious", "gratuitous", + "graudating", "graduating", + "graudation", "graduation", + "gravitatie", "gravitate", + "greatfully", "gratefully", + "greenhosue", "greenhouse", + "greviances", "grievances", + "grievences", "grievances", + "grilfriend", "girlfriend", + "guaduloupe", "guadalupe", + "guanatanmo", "guantanamo", + "guantamamo", "guantanamo", + "guantamano", "guantanamo", + "guantanano", "guantanamo", + "guantanemo", "guantanamo", + "guantanoma", "guantanamo", + "guantanomo", "guantanamo", + "guantonamo", "guantanamo", + "guarantess", "guarantees", + "guardiands", "guardians", + "guardianes", "guardians", + "guardianis", "guardians", + "guarenteed", "guaranteed", + "guarentees", "guarantees", + "guarnateed", "guaranteed", + "guarnatees", "guarantees", + "guarunteed", "guaranteed", + "guaruntees", "guarantees", + "guatamalan", "guatemalan", + "gunatanamo", "guantanamo", + "gunlsinger", "gunslinger", + "gunsiinger", "gunslinger", + "gunslanger", "gunslinger", + "gunsligner", "gunslinger", + "gunstinger", "gunslinger", + "gymanstics", "gymnastics", + "gymnasitcs", "gymnastics", + "gynmastics", "gymnastics", + "haemorrage", "haemorrhage", + "halloweeen", "halloween", + "hambergers", "hamburgers", + "hamburgare", "hamburger", + "hamburgesa", "hamburgers", + "hamburgles", "hamburgers", + "hamburgurs", "hamburgers", + "handcuffes", "handcuffs", + "handelbars", "handlebars", + "handicaped", "handicapped", + "handwritng", "handwriting", + "harasments", "harassments", + "hardlinked", "hardline", + "harmoniacs", "harmonic", + "harmonisch", "harmonic", + "harrasment", "harassment", + "harrassing", "harassing", + "harvasting", "harvesting", + "haversting", "harvesting", + "headhpones", "headphones", + "headphoens", "headphones", + "headquarer", "headquarter", + "headquater", "headquarter", + "headshoots", "headshot", + "healtchare", "healthcare", + "healtheast", "healthiest", + "healthyest", "healthiest", + "heapdhones", "headphones", + "heartbeart", "heartbeat", + "heartbeast", "heartbeat", + "heartborne", "heartbroken", + "heartbrake", "heartbreak", + "hearthsone", "hearthstone", + "heatlhcare", "healthcare", + "heavyweght", "heavyweight", + "heavyweigt", "heavyweight", + "hedgehodge", "hedgehog", + "heidelburg", "heidelberg", + "heigthened", "heightened", + "heistation", "hesitation", + "helathcare", "healthcare", + "helicopers", "helicopters", + "helicoptor", "helicopter", + "helicotper", "helicopters", + "helicpoter", "helicopter", + "helictoper", "helicopters", + "helikopter", "helicopter", + "hemingwary", "hemingway", + "hemingwavy", "hemingway", + "hemipshere", "hemisphere", + "hemishpere", "hemisphere", + "hemmorhage", "hemorrhage", + "hempishere", "hemisphere", + "herculeans", "hercules", + "herculeasy", "hercules", + "herculeees", "hercules", + "hesitstion", "hesitation", + "hestiation", "hesitation", + "hieghtened", "heightened", + "hierachies", "hierarchies", + "hieroglphs", "hieroglyphs", + "highalnder", "highlander", + "highlighed", "highlighted", + "highligted", "highlighted", + "highloader", "highlander", + "highpander", "highlander", + "highscholl", "highschool", + "highshcool", "highschool", + "hillarious", "hilarious", + "hinderance", "hindrance", + "hinderence", "hindrance", + "hipsterest", "hipsters", + "hispanicos", "hispanics", + "hispanicus", "hispanics", + "histarical", "historical", + "histerical", "historical", + "historiaan", "historians", + "historicas", "historians", + "historicly", "historical", + "historiens", "histories", + "historisch", "historic", + "hoemopathy", "homeopathy", + "hollywoood", "hollywood", + "homecuming", "homecoming", + "homeoapthy", "homeopathy", + "homeonwers", "homeowners", + "homeopahty", "homeopathy", + "homeophaty", "homeopathy", + "homeopothy", "homeopathy", + "homeothapy", "homeopathy", + "homepoathy", "homeopathy", + "homewoners", "homeowners", + "homoepathy", "homeopathy", + "homogeneos", "homogeneous", + "homogeneus", "homogeneous", + "homophibia", "homophobia", + "homophibic", "homophobic", + "homophobie", "homophobe", + "homophonia", "homophobia", + "homophopia", "homophobia", + "homophopic", "homophobic", + "homosexaul", "homosexual", + "homosexuel", "homosexual", + "honeymooon", "honeymoon", + "hopefullly", "hopefully", + "hopeleslly", "hopelessly", + "horisontal", "horizontal", + "horizantal", "horizontal", + "horizontes", "horizons", + "horiztonal", "horizontal", + "horrendeus", "horrendous", + "horriblely", "horribly", + "hospitales", "hospitals", + "hospitalty", "hospitality", + "hospitible", "hospitable", + "hsitorians", "historians", + "humanaties", "humanities", + "humanitary", "humanity", + "humiliatin", "humiliation", + "humiliaton", "humiliation", + "humilitied", "humiliated", + "humillated", "humiliated", + "hurricance", "hurricane", + "hurriganes", "hurricanes", + "hurrikanes", "hurricanes", + "hurrycanes", "hurricanes", + "hydropilic", "hydrophilic", + "hydropobic", "hydrophobic", + "hyperbolie", "hyperbole", + "hyperlobic", "hyperbolic", + "hyperlogic", "hyperbolic", + "hypertrohy", "hypertrophy", + "hypertropy", "hypertrophy", + "hyphotesis", "hypothesis", + "hypocrates", "hypocrites", + "hypocriscy", "hypocrisy", + "hypocrises", "hypocrites", + "hypocritus", "hypocrites", + "hypocrties", "hypocrites", + "hypocrytes", "hypocrites", + "hypokrites", "hypocrites", + "hypothecis", "hypothesis", + "hypotheiss", "hypotheses", + "hypothesus", "hypotheses", + "hypothises", "hypotheses", + "hypothisis", "hypothesis", + "hypothosis", "hypothesis", + "hyprocites", "hypocrites", + "hystarical", "hysterical", + "hystericly", "hysterical", + "hysteriska", "hysteria", + "ibuprofein", "ibuprofen", + "ibuprofine", "ibuprofen", + "icelandinc", "icelandic", + "idealisitc", "idealistic", + "idealogies", "ideologies", + "identicial", "identical", + "identifyed", "identified", + "identitets", "identities", + "ideolagies", "ideologies", + "ideoligies", "ideologies", + "ideologias", "ideologies", + "ideologice", "ideologies", + "ideologije", "ideologies", + "ideologins", "ideologies", + "ideologisk", "ideologies", + "ideolouges", "ideologies", + "illegalest", "illegals", + "illegallly", "illegally", + "illegimacy", "illegitimacy", + "illegitime", "illegitimate", + "illegitimt", "illegitimate", + "illimunati", "illuminati", + "illinoians", "illinois", + "illistrate", "illiterate", + "illitarate", "illiterate", + "illitirate", "illiterate", + "illumanati", "illuminati", + "illumaniti", "illuminati", + "illumianti", "illuminati", + "illumimati", "illuminati", + "illuminaci", "illuminati", + "illuminadi", "illuminati", + "illuminami", "illuminati", + "illuminazi", "illuminati", + "illuminite", "illuminati", + "illuminiti", "illuminati", + "illuminoti", "illuminati", + "illuminuti", "illuminati", + "illumniati", "illuminati", + "illumunati", "illuminati", + "illuninati", "illuminati", + "illusiones", "illusions", + "illustrant", "illustrate", + "illustrare", "illustrate", + "illustrato", "illustration", + "imablanced", "imbalanced", + "imablances", "imbalances", + "imaginatie", "imaginative", + "imaginaton", "imagination", + "imaginitve", "imaginative", + "imbalenced", "imbalanced", + "imbalences", "imbalances", + "imcomplete", "incomplete", + "imediately", "immediately", + "imigration", "emigration", + "immaturaty", "immaturity", + "immaturety", "immaturity", + "immedeatly", "immediately", + "immediatly", "immediately", + "immedietly", "immediately", + "immenseley", "immensely", + "immidately", "immediately", + "immigranti", "immigration", + "immigrents", "immigrants", + "immitating", "imitating", + "immobilien", "immobile", + "immobilier", "immobile", + "immobilzed", "immobile", + "immobilzer", "immobile", + "immobilzes", "immobile", + "immortales", "immortals", + "immortalis", "immortals", + "immortaliy", "immortality", + "immortalls", "immortals", + "immortalty", "immortality", + "impartirla", "impartial", + "impecabbly", "impeccably", + "impeccible", "impeccable", + "impeckable", "impeccable", + "impelments", "implements", + "imperetive", "imperative", + "imperialsm", "imperialism", + "imperialst", "imperialist", + "imperitave", "imperative", + "imperitive", "imperative", + "implaments", "implements", + "implantase", "implants", + "implausble", "implausible", + "implausibe", "implausible", + "implemenet", "implements", + "implicatia", "implicit", + "implicatie", "implicit", + "implicatii", "implicit", + "implicetly", "implicitly", + "impliciete", "implicit", + "implicilty", "implicitly", + "impliments", "implements", + "imporbable", "improbable", + "importanly", "importantly", + "importanty", "importantly", + "importence", "importance", + "importerad", "imported", + "imporvised", "improvised", + "impossable", "impossible", + "impossbily", "impossibly", + "impossibal", "impossibly", + "impossibel", "impossibly", + "impossibry", "impossibly", + "impossibul", "impossibly", + "impractial", "impractical", + "impreative", "imperative", + "impresison", "impressions", + "impressoin", "impressions", + "impressons", "impressions", + "improbabil", "improbable", + "improbible", "improbable", + "impropable", "improbable", + "improsined", "imprisoned", + "improsoned", "imprisoned", + "improvemnt", "improvement", + "improvents", "improves", + "improvized", "improvised", + "imprsioned", "imprisoned", + "impulsemos", "impulses", + "imrpovised", "improvised", + "inablility", "inability", + "inaccruate", "inaccurate", + "inadaquate", "inadequate", + "inadaquete", "inadequate", + "inadecuate", "inadequate", + "inadeguate", "inadequate", + "inadeqaute", "inadequate", + "inadequete", "inadequate", + "inadequite", "inadequate", + "inadiquate", "inadequate", + "inagurated", "inaugurated", + "inbalanced", "imbalanced", + "inbetweeen", "inbetween", + "incarnaton", "incarnation", + "incentivos", "incentives", + "inchoerent", "incoherent", + "incidentes", "incidents", + "incidently", "incidentally", + "incidentul", "incidental", + "inclreased", "increased", + "incognitio", "incognito", + "incoherant", "incoherent", + "incohorent", "incoherent", + "incorectly", "incorrectly", + "incorrecly", "incorrectly", + "incorrecty", "incorrectly", + "incorretly", "incorrectly", + "incraments", "increments", + "incredable", "incredible", + "incredably", "incredibly", + "incremetal", "incremental", + "incriments", "increments", + "inctroduce", "introduce", + "indefenite", "indefinite", + "indefinate", "indefinite", + "indefinete", "indefinite", + "indefinity", "indefinitely", + "indeginous", "indigenous", + "indentical", "identical", + "independet", "independent", + "indepenent", "independent", + "inderictly", "indirectly", + "indicaters", "indicates", + "indicativo", "indication", + "indicatore", "indicate", + "indicitave", "indicative", + "indicitive", "indicative", + "indiffernt", "indifferent", + "indigenius", "indigenous", + "indiginous", "indigenous", + "indigneous", "indigenous", + "indikation", "indication", + "indireclty", "indirectly", + "indirektly", "indirectly", + "individuel", "individual", + "indiviudal", "individuals", + "indivudual", "individual", + "indoensian", "indonesian", + "indonasian", "indonesian", + "indoneisan", "indonesian", + "indonesean", "indonesian", + "indonesien", "indonesian", + "indonesion", "indonesian", + "indonisian", "indonesian", + "indonistan", "indonesian", + "indpendent", "independent", + "industiral", "industrial", + "industires", "industries", + "industrail", "industrial", + "industrees", "industries", + "industrias", "industries", + "industriel", "industrial", + "industrija", "industrial", + "industrije", "industries", + "indviduals", "individuals", + "inefficent", "inefficient", + "ineqaulity", "inequality", + "inequailty", "inequality", + "inevatible", "inevitable", + "inevetable", "inevitable", + "inevetably", "inevitably", + "inevetible", "inevitable", + "inevidable", "inevitable", + "inevidably", "inevitably", + "inevitible", "inevitable", + "inevitibly", "inevitably", + "inevtiable", "inevitable", + "inevtiably", "inevitably", + "infallable", "infallible", + "infaltable", "inflatable", + "infeccious", "infectious", + "infecteous", "infectious", + "infectuous", "infectious", + "infedility", "infidelity", + "infektious", "infectious", + "inferioara", "inferior", + "inferioare", "inferior", + "inferiorty", "inferiority", + "inferrence", "inference", + "infestaion", "infestation", + "infestaton", "infestation", + "infestions", "infections", + "infideltiy", "infidelity", + "infidility", "infidelity", + "infiltrade", "infiltrate", + "infiltrait", "infiltrate", + "infiltrare", "infiltrate", + "infiltrase", "infiltrate", + "infinately", "infinitely", + "infinetely", "infinitely", + "infiniment", "infinite", + "infinitley", "infinitely", + "infintiely", "infinitely", + "inflamable", "inflatable", + "inflateble", "inflatable", + "inflatible", "inflatable", + "infleunced", "influenced", + "inflitrate", "infiltrate", + "influanced", "influenced", + "influances", "influences", + "influencie", "influences", + "influening", "influencing", + "influensed", "influences", + "influenser", "influences", + "influenses", "influences", + "influental", "influential", + "influented", "influenced", + "influentes", "influences", + "influneced", "influenced", + "infograhic", "infographic", + "infograpic", "infographic", + "infomation", "information", + "informable", "informal", + "informarla", "informal", + "informarle", "informal", + "informarlo", "informal", + "informatie", "informative", + "informella", "informal", + "informerad", "informed", + "informtion", "information", + "infridging", "infringing", + "infrigning", "infringing", + "infulenced", "influenced", + "infulences", "influences", + "ingenuitiy", "ingenuity", + "ingrediant", "ingredient", + "ingrediens", "ingredients", + "ingrediets", "ingredient", + "inhabitans", "inhabitants", + "inhabitats", "inhabitants", + "inherantly", "inherently", + "inherintly", "inherently", + "inheritage", "heritage", + "inhernetly", "inherently", + "inifnitely", "infinitely", + "initaition", "initiation", + "initalised", "initialised", + "initaliser", "initialiser", + "initalises", "initialises", + "initalisms", "initialisms", + "initalized", "initialized", + "initalizer", "initializer", + "initalizes", "initializes", + "initalling", "initialling", + "initalness", "initialness", + "initiaitve", "initiatives", + "initiaties", "initiatives", + "initiativs", "initiatives", + "initiatves", "initiatives", + "initiavite", "initiatives", + "inititaive", "initiatives", + "inititiave", "initiatives", + "initmately", "intimately", + "initmidate", "intimidate", + "inituition", "initiation", + "injustaces", "injustices", + "injusticas", "injustices", + "inmigrants", "immigrants", + "innoavtion", "innovations", + "innocentes", "innocents", + "innotation", "innovation", + "innovacion", "innovation", + "innovaiton", "innovations", + "innovatief", "innovate", + "innovaties", "innovate", + "innovativo", "innovation", + "innvoation", "innovation", + "inofficial", "unofficial", + "inpsection", "inspection", + "inquisator", "inquisitor", + "inquisidor", "inquisitor", + "inquisiter", "inquisitor", + "inquisitio", "inquisitor", + "inquisitir", "inquisitor", + "inquisiton", "inquisition", + "inquistior", "inquisitor", + "inquizitor", "inquisitor", + "inqusitior", "inquisitor", + "insensitve", "insensitive", + "insepction", "inspection", + "insistance", "insistence", + "insistente", "insistence", + "insistenze", "insistence", + "insistince", "insistence", + "insitution", "institution", + "inspeccion", "inspection", + "inspeciton", "inspections", + "inspectons", "inspections", + "inspectres", "inspectors", + "inspektion", "inspection", + "inspektors", "inspectors", + "inspiraste", "inspires", + "inspiraton", "inspiration", + "inspirerad", "inspired", + "inspireras", "inspires", + "insrugency", "insurgency", + "instabiliy", "instability", + "instabilty", "instability", + "installeer", "installer", + "installent", "installment", + "installesd", "installs", + "installion", "installing", + "instatance", "instance", + "instelling", "installing", + "instituded", "instituted", + "instituion", "institution", + "institutie", "institute", + "institutue", "instituted", + "instrament", "instrument", + "instrcutor", "instructors", + "instrucion", "instruction", + "instructer", "instructor", + "instructie", "instructed", + "instruktor", "instructor", + "instuction", "instruction", + "instuments", "instruments", + "insturcted", "instructed", + "insturctor", "instructor", + "insturment", "instrument", + "instutions", "intuitions", + "instututed", "instituted", + "insurgance", "insurgency", + "insurgancy", "insurgency", + "intangable", "intangible", + "intangeble", "intangible", + "intangibil", "intangible", + "intanjible", "intangible", + "integraded", "integrated", + "integrarla", "integral", + "integrarlo", "integral", + "integratie", "integrated", + "integreres", "interferes", + "integreted", "integrated", + "inteligent", "intelligent", + "intenseley", "intensely", + "intensitiy", "intensity", + "intentinal", "intentional", + "intentines", "intestines", + "interacive", "interactive", + "interactes", "interacts", + "interactie", "interactive", + "interactue", "interacted", + "interasted", "interacted", + "interbread", "interbreed", + "intercepto", "interception", + "intercorse", "intercourse", + "intercouse", "intercourse", + "intereacts", "interfaces", + "interected", "interacted", + "interefers", "interferes", + "interesant", "interest", + "interesing", "interesting", + "interestes", "interests", + "interfacce", "interfaces", + "interfears", "interferes", + "interfeers", "interferes", + "interferce", "interferes", + "interferre", "interfere", + "intergated", "integrated", + "interioara", "interior", + "interioare", "interior", + "intermedie", "intermediate", + "internetbs", "internets", + "internetes", "internets", + "internetis", "internets", + "internetts", "internets", + "internetus", "internets", + "interprate", "interpret", + "interrugum", "interregnum", + "interruped", "interrupted", + "interstela", "interstellar", + "intervalls", "intervals", + "intervalos", "intervals", + "interveign", "intervening", + "interveing", "intervening", + "interveiws", "interviews", + "intervento", "intervention", + "intervenue", "intervene", + "interveres", "interferes", + "intervieni", "interviewing", + "intervieuw", "interviews", + "interviewd", "interviewed", + "interviewr", "interviewer", + "intervines", "intervenes", + "interviwed", "interviewed", + "interviwer", "interviewer", + "interwebbs", "interwebs", + "intestents", "intestines", + "intestinas", "intestines", + "intestinos", "intestines", + "intestions", "intestines", + "intidimate", "intimidate", + "intimadate", "intimidate", + "intimatley", "intimately", + "intimiated", "intimidate", + "intimidade", "intimidated", + "intimidant", "intimidate", + "intimidare", "intimidate", + "intimitade", "intimidated", + "intimitaly", "intimately", + "intimitate", "intimidate", + "intimitely", "intimately", + "intolarant", "intolerant", + "intolerace", "intolerance", + "intolerate", "intolerant", + "intolerent", "intolerant", + "intolorant", "intolerant", + "intolorent", "intolerant", + "intorduced", "introduced", + "intorduces", "introduces", + "intorverts", "introverts", + "intoxicted", "intoxicated", + "intraverts", "introverts", + "intreguing", "intriguing", + "intricaces", "intricacies", + "intriguied", "intrigue", + "intrigured", "intrigue", + "intrinseci", "intrinsic", + "intrinsinc", "intrinsic", + "intriquing", "intriguing", + "intriuging", "intriguing", + "introdecks", "introduces", + "introdused", "introduces", + "introvents", "introverts", + "introvered", "introverted", + "introversa", "introverts", + "introverse", "introverts", + "introversi", "introverts", + "introverso", "introverts", + "introversy", "introverts", + "introveted", "introverted", + "intruduced", "introduced", + "intruduces", "introduces", + "intruiging", "intriguing", + "intruments", "instruments", + "intuitevly", "intuitively", + "intuitivly", "intuitively", + "intuitivno", "intuition", + "intutively", "intuitively", + "inumerable", "enumerable", + "inusrgency", "insurgency", + "invaderats", "invaders", + "invaildate", "invalidates", + "invairably", "invariably", + "invaldiate", "invalidates", + "invalidade", "invalidate", + "invalidare", "invalidate", + "invalubale", "invaluable", + "invalueble", "invaluable", + "invaraibly", "invariably", + "invariabil", "invariably", + "invaribaly", "invariably", + "invaulable", "invaluable", + "inveitable", "inevitable", + "inveitably", "inevitably", + "invensions", "inventions", + "inventario", "inventor", + "inventarlo", "inventor", + "inventaron", "inventor", + "inventings", "inventions", + "inventivos", "inventions", + "invertendo", "inverted", + "inverterad", "inverted", + "invertions", "inventions", + "investemnt", "investments", + "investiage", "investigate", + "investions", "inventions", + "investirat", "investigator", + "investmens", "investments", + "invicinble", "invincible", + "invididual", "individual", + "invincable", "invincible", + "invinceble", "invincible", + "invinicble", "invincible", + "invinsible", "invincible", + "invinvible", "invincible", + "invisibily", "invisibility", + "invitacion", "invitation", + "invitating", "invitation", + "involunary", "involuntary", + "involvment", "involvement", + "ironcially", "ironically", + "irracional", "irrational", + "irrationel", "irrational", + "irrelavant", "irrelevant", + "irrelavent", "irrelevant", + "irrelevent", "irrelevant", + "irrelivant", "irrelevant", + "irrelivent", "irrelevant", + "irrevelant", "irrelevant", + "irreverant", "irrelevant", + "irridation", "irritation", + "irriration", "irritation", + "irritacion", "irritation", + "irritaties", "irritate", + "islamisist", "islamist", + "islamistas", "islamists", + "isntalling", "installing", + "isntructed", "instructed", + "isntrument", "instrument", + "israeliens", "israelis", + "israelitas", "israelis", + "italianess", "italians", + "itnroduced", "introduced", + "jailborken", "jailbroken", + "jalibroken", "jailbroken", + "jamaicains", "jamaican", + "jamaicaman", "jamaican", + "jerusaleum", "jerusalem", + "jounralism", "journalism", + "jounralist", "journalist", + "jouranlism", "journalism", + "jouranlist", "journalist", + "journalims", "journals", + "journalits", "journals", + "journalizm", "journalism", + "journalsim", "journalism", + "journolist", "journalist", + "judegments", "judgements", + "judgemenal", "judgemental", + "judgemetal", "judgemental", + "jugdements", "judgements", + "juggarnaut", "juggernaut", + "juggeranut", "juggernaut", + "juggernath", "juggernaut", + "juggernout", "juggernaut", + "juggernuat", "juggernaut", + "juggetnaut", "juggernaut", + "jugglenaut", "juggernaut", + "juggurnaut", "juggernaut", + "justifible", "justifiable", + "juvenilles", "juvenile", + "kickstarer", "kickstarter", + "kickstartr", "kickstarter", + "kickstater", "kickstarter", + "kidnapning", "kidnapping", + "kidnappade", "kidnapped", + "killingest", "killings", + "kilometros", "kilometers", + "kilomiters", "kilometers", + "kilomoters", "kilometers", + "kilomteres", "kilometers", + "kindapping", "kidnapping", + "kingdomers", "kingdoms", + "krpytonite", "kryptonite", + "krypotnite", "kryptonite", + "krypronite", "kryptonite", + "kryptinite", "kryptonite", + "kryptolite", "kryptonite", + "kryptonyte", "kryptonite", + "krypyonite", "kryptonite", + "krytponite", "kryptonite", + "kyrptonite", "kryptonite", + "labarotory", "laboratory", + "laboratroy", "laboratory", + "laborerers", "laborers", + "laboritory", "laboratory", + "laborotory", "laboratory", + "lackbuster", "lackluster", + "lacklaster", "lackluster", + "landacapes", "landscapes", + "landingers", "landings", + "landshapes", "landscapes", + "landspaces", "landscapes", + "lannasters", "lannisters", + "lannesters", "lannisters", + "lannistars", "lannisters", + "lannsiters", "lannisters", + "lateration", "alteration", + "latitudine", "latitude", + "laughabley", "laughably", + "laughablly", "laughably", + "launchered", "launched", + "leaglizing", "legalizing", + "lectureres", "lectures", + "legalazing", "legalizing", + "legalizare", "legalize", + "legalizate", "legalize", + "legendaies", "legendaries", + "legendaris", "legendaries", + "legimitacy", "legitimacy", + "legimitate", "legitimate", + "legislatie", "legislative", + "legitamacy", "legitimacy", + "legitamate", "legitimate", + "legitamicy", "legitimacy", + "legitamite", "legitimate", + "legitemacy", "legitimacy", + "legitemate", "legitimate", + "legitimaly", "legitimacy", + "legitimicy", "legitimacy", + "legitimite", "legitimate", + "leiutenant", "lieutenant", + "lesbianese", "lesbians", + "lesbianest", "lesbians", + "leuitenant", "lieutenant", + "levetating", "levitating", + "liberacion", "liberation", + "liberalest", "liberate", + "liberalizm", "liberalism", + "liberalnim", "liberalism", + "liberalsim", "liberalism", + "liberarion", "liberation", + "liberaties", "liberate", + "liberatore", "liberate", + "libertania", "libertarians", + "libguistic", "linguistic", + "lietuenant", "lieutenant", + "lieutanant", "lieutenant", + "lieutanent", "lieutenant", + "lieutenent", "lieutenant", + "lifestiles", "lifestyles", + "lifestlyes", "lifestyles", + "lifesystem", "filesystem", + "lifesytles", "lifestyles", + "lifetimers", "lifetimes", + "lifetsyles", "lifestyles", + "lighhtning", "lightening", + "lightergas", "lighters", + "lighthning", "lightening", + "lighthorse", "lighthouse", + "lighthosue", "lighthouse", + "lighthours", "lighthouse", + "lightining", "lighting", + "lightneing", "lightening", + "lightnting", "lightening", + "lightrooom", "lightroom", + "lightweigt", "lightweight", + "ligitation", "litigation", + "ligthening", "lightening", + "ligthhouse", "lighthouse", + "likelyhood", "likelihood", + "limination", "limitation", + "limitacion", "limitation", + "limitaiton", "limitation", + "limitating", "limitation", + "limitativo", "limitation", + "linguisics", "linguistics", + "linguisitc", "linguistics", + "linguistcs", "linguistics", + "linguistis", "linguistics", + "linguitics", "linguistic", + "lingusitic", "linguistics", + "lingvistic", "linguistic", + "liousville", "louisville", + "listeneres", "listeners", + "literallly", "literally", + "literarely", "literary", + "literarlly", "literary", + "literatire", "literate", + "literative", "literate", + "literatute", "literate", + "lithuanina", "lithuania", + "litterally", "literally", + "liuetenant", "lieutenant", + "liveatream", "livestream", + "livelehood", "livelihood", + "liverpoool", "liverpool", + "livescream", "livestream", + "livestreem", "livestream", + "livestrems", "livestream", + "livilehood", "livelihood", + "livliehood", "livelihood", + "lobbyistes", "lobbyists", + "lockacreen", "lockscreen", + "logictical", "logistical", + "logisitcal", "logistical", + "logisticas", "logistics", + "logisticly", "logistical", + "loiusville", "louisville", + "lollipoopy", "lollipop", + "lonelyness", "loneliness", + "longevitiy", "longevity", + "lonileness", "loneliness", + "lonlieness", "loneliness", + "louieville", "louisville", + "louisiania", "louisiana", + "louisianna", "louisiana", + "louisivlle", "louisville", + "louisviile", "louisville", + "lousiville", "louisville", + "luietenant", "lieutenant", + "mabyelline", "maybelline", + "magnifient", "magnificent", + "mainpulate", "manipulate", + "mainstreem", "mainstream", + "maintaince", "maintained", + "maintaines", "maintains", + "maintainig", "maintaining", + "maintenace", "maintenance", + "maintianed", "maintained", + "maintioned", "mentioned", + "malfuncion", "malfunction", + "malpractce", "malpractice", + "managebale", "manageable", + "maneagable", "manageable", + "maneouvred", "manoeuvred", + "maneouvres", "manoeuvres", + "maneuveres", "maneuvers", + "maneuveurs", "maneuver", + "manifestas", "manifests", + "manifestes", "manifests", + "manifestus", "manifests", + "manipluate", "manipulate", + "manipualte", "manipulate", + "manipulant", "manipulate", + "manipulare", "manipulate", + "manipulted", "manipulated", + "maniuplate", "manipulate", + "mannarisms", "mannerisms", + "mannersims", "mannerisms", + "mannorisms", "mannerisms", + "manufacter", "manufacture", + "manufacure", "manufacture", + "manufature", "manufacture", + "maraudeurs", "marauder", + "margaritte", "margaret", + "margianlly", "marginally", + "marginaali", "marginal", + "marginable", "marginal", + "marignally", "marginally", + "marijuanna", "marijuana", + "marketting", "marketing", + "marshmalow", "marshmallow", + "masculinty", "masculinity", + "massacrare", "massacre", + "massivelly", "massively", + "masteriers", "masteries", + "masternind", "mastermind", + "masterpice", "masterpiece", + "mastrubate", "masturbate", + "mastubrate", "masturbated", + "masturabte", "masturbate", + "masturbait", "masturbate", + "masturbare", "masturbate", + "masturbeta", "masturbated", + "masturdate", "masturbate", + "materiales", "materials", + "materialsm", "materialism", + "maximazing", "maximizing", + "maximixing", "maximizing", + "mayballine", "maybelline", + "maybellene", "maybelline", + "maybellibe", "maybelline", + "maybilline", "maybelline", + "mccarthyst", "mccarthyist", + "mdifielder", "midfielder", + "meagthread", "megathread", + "meaningess", "meanings", + "meaningles", "meanings", + "meatballls", "meatballs", + "mecahnical", "mechanical", + "mecahnisms", "mechanisms", + "mechancial", "mechanical", + "mechandise", "merchandise", + "mechanichs", "mechanics", + "mechanicle", "mechanical", + "mechanicly", "mechanical", + "mechanicus", "mechanics", + "mechanincs", "mechanic", + "mechanisim", "mechanism", + "mechansims", "mechanisms", + "mechinical", "mechanical", + "mechinisms", "mechanisms", + "mediaction", "medications", + "medicacion", "medication", + "medicaiton", "medication", + "medicalert", "medicare", + "medicallly", "medically", + "medicatons", "medications", + "medicinens", "medicines", + "medicinske", "medicine", + "medicority", "mediocrity", + "medidating", "meditating", + "mediocirty", "mediocrity", + "mediocraty", "mediocrity", + "mediocrety", "mediocrity", + "mediocricy", "mediocrity", + "mediocrily", "mediocrity", + "mediocrisy", "mediocrity", + "meditacion", "medications", + "meditaiton", "meditation", + "melatonian", "melatonin", + "melatonion", "melatonin", + "mellinnium", "millennium", + "melodieuse", "melodies", + "membrances", "membrane", + "mentallity", "mentally", + "mentionnes", "mentions", + "mercenaire", "mercenaries", + "mercenares", "mercenaries", + "mercentile", "mercantile", + "merchanise", "merchandise", + "merchantos", "merchants", + "messagease", "messages", + "messagepad", "messaged", + "messenging", "messaging", + "metabalism", "metabolism", + "metabilism", "metabolism", + "metabloism", "metabolism", + "metablosim", "metabolism", + "metabolics", "metabolism", + "metabolizm", "metabolism", + "metabolsim", "metabolism", + "metalurgic", "metallurgic", + "metaphoras", "metaphors", + "metaphores", "metaphors", + "metaphyics", "metaphysics", + "meterology", "meteorology", + "methaphors", "metaphors", + "methodolgy", "methodology", + "methodoloy", "methodology", + "metrapolis", "metropolis", + "metrolopis", "metropolis", + "metropilis", "metropolis", + "metroplois", "metropolis", + "metropolin", "metropolitan", + "metropolos", "metropolis", + "metropolys", "metropolis", + "mexicanese", "mexicans", + "mexicaness", "mexicans", + "michelline", "michelle", + "micorwaves", "microwaves", + "microhpone", "microphone", + "microscoop", "microscope", + "microvaves", "microwaves", + "microvaxes", "microwaves", + "micrpohone", "microphones", + "midfeilder", "midfielder", + "midfiedler", "midfielder", + "midfieldes", "midfielders", + "midfielers", "midfielders", + "midfileder", "midfielder", + "midifelder", "midfielder", + "midnlessly", "mindlessly", + "migitation", "mitigation", + "migrainers", "migraines", + "miletsones", "milestones", + "milisecond", "millisecond", + "militiades", "militias", + "militiants", "militias", + "millinnium", "millennium", + "miminalist", "minimalist", + "minamilist", "minimalist", + "mindleslly", "mindlessly", + "minimazing", "minimizing", + "minimilast", "minimalist", + "minimilist", "minimalist", + "mininalist", "minimalist", + "ministeres", "ministers", + "ministerns", "ministers", + "minneaplis", "minneapolis", + "minneapols", "minneapolis", + "minnesotta", "minnesota", + "minoritets", "minorities", + "minoroties", "minorities", + "miracalous", "miraculous", + "miracluous", "miraculous", + "miracoulus", "miraculous", + "mircophone", "microphone", + "mircoscope", "microscope", + "mircowaves", "microwaves", + "misandrony", "misandry", + "miscarrage", "miscarriage", + "miscarrige", "miscarriage", + "misdemenor", "misdemeanor", + "miserabley", "miserably", + "miserablly", "miserably", + "misforture", "misfortune", + "misgoynist", "misogynist", + "misinfomed", "misinformed", + "misinterpt", "misinterpret", + "misisonary", "missionary", + "misoganist", "misogynist", + "misogenist", "misogynist", + "misoginist", "misogynist", + "misoginyst", "misogynist", + "misognyist", "misogynist", + "misogonist", "misogynist", + "misogonyst", "misogynist", + "misogyinst", "misogynist", + "misogynyst", "misogynist", + "misoygnist", "misogynist", + "mispelling", "misspelling", + "missionare", "missionaries", + "missionera", "missionary", + "missisippi", "mississippi", + "mississipi", "mississippi", + "mississppi", "mississippi", + "misspeling", "misspelling", + "misspellng", "misspelling", + "mistakedly", "mistakenly", + "mistakinly", "mistakenly", + "mistankely", "mistakenly", + "misterious", "mysterious", + "misteryous", "mysterious", + "mistreaded", "mistreated", + "misygonist", "misogynist", + "mitigaiton", "mitigation", + "moderacion", "moderation", + "moderaters", "moderates", + "moderatley", "moderately", + "moderatore", "moderate", + "moderatorn", "moderation", + "modificato", "modification", + "modifieras", "modifiers", + "modifieres", "modifiers", + "moisturier", "moisturizer", + "moleculair", "molecular", + "molestaion", "molestation", + "molestarle", "molester", + "molestarme", "molester", + "molestarse", "molester", + "molestarte", "molester", + "molestered", "molested", + "momentarly", "momentarily", + "monagomous", "monogamous", + "monetizare", "monetize", + "monitering", "monitoring", + "monogymous", "monogamous", + "monolistic", "monolithic", + "monolitich", "monolithic", + "monolopies", "monopolies", + "monolothic", "monolithic", + "monolythic", "monolithic", + "monopilies", "monopolies", + "monoploies", "monopolies", + "monopolets", "monopolies", + "monopolice", "monopolies", + "monopolios", "monopolies", + "monothilic", "monolithic", + "monsterous", "monsters", + "montioring", "monitoring", + "monumentos", "monuments", + "monumentul", "monumental", + "monumentus", "monuments", + "mormonisim", "mormonism", + "morphinate", "morphine", + "morrisette", "morissette", + "morrisound", "morrison", + "mosquitero", "mosquito", + "mosquiters", "mosquitoes", + "motherbard", "motherboard", + "motherboad", "motherboard", + "motherbord", "motherboard", + "motivaiton", "motivations", + "motiviated", "motivated", + "motorcicle", "motorcycle", + "motorcylce", "motorcycle", + "motorcyles", "motorcycles", + "motorollas", "motorola", + "mouthpeace", "mouthpiece", + "mouthpeice", "mouthpiece", + "movespeeed", "movespeed", + "mozzaralla", "mozzarella", + "mozzeralla", "mozzarella", + "mozzorella", "mozzarella", + "mulitation", "mutilation", + "mulitplied", "multiplied", + "mulitplier", "multiplier", + "mulitverse", "multiverse", + "multilpier", "multiplier", + "multiplaer", "multiplier", + "multiplaye", "multiply", + "multiplayr", "multiply", + "multiplays", "multiply", + "multipleye", "multiply", + "multipling", "multiplying", + "multiplyed", "multiplied", + "multiplyer", "multiple", + "multiplyng", "multiplying", + "murderered", "murdered", + "murdereres", "murderers", + "muscicians", "musicians", + "musculaire", "muscular", + "mushroooms", "mushroom", + "mutialtion", "mutilation", + "mutiliated", "mutilated", + "mutliation", "mutilation", + "mutliplied", "multiplied", + "mutliplier", "multiplier", + "mutliverse", "multiverse", + "mysogynist", "misogynist", + "mysterieus", "mysteries", + "nagivating", "navigating", + "nagivation", "navigation", + "narcassism", "narcissism", + "narcassist", "narcissist", + "narcessist", "narcissist", + "narciscism", "narcissism", + "narciscist", "narcissist", + "narcisissm", "narcissism", + "narcisisst", "narcissist", + "narcisists", "narcissist", + "narcissicm", "narcissism", + "narcissict", "narcissist", + "narcissitc", "narcissist", + "narcissits", "narcissist", + "narcoticos", "narcotics", + "narrativas", "narratives", + "narrativos", "narratives", + "narritives", "narratives", + "nashvillle", "nashville", + "nationales", "nationals", + "nationalis", "nationals", + "nationalit", "nationalist", + "nationaliy", "nationality", + "nationalty", "nationality", + "nationella", "national", + "naturually", "naturally", + "naviagting", "navigating", + "naviagtion", "navigation", + "navigatore", "navigate", + "neccessary", "necessary", + "necesarily", "necessarily", + "necessairy", "necessarily", + "necessarly", "necessary", + "necessarry", "necessary", + "necessiate", "necessitate", + "necessites", "necessities", + "neckbeared", "neckbeard", + "neckboards", "neckbeards", + "neckbreads", "neckbeards", + "neckneards", "neckbeards", + "necromacer", "necromancer", + "necromaner", "necromancer", + "needleslly", "needlessly", + "negativaty", "negativity", + "negativley", "negatively", + "negelcting", "neglecting", + "negilgence", "negligence", + "negiotated", "negotiated", + "neglacting", "neglecting", + "neglagence", "negligence", + "neglegance", "negligence", + "neglegible", "negligible", + "neglegting", "neglecting", + "neglibible", "negligible", + "neglicence", "negligence", + "neglicible", "negligible", + "neglicting", "neglecting", + "negligable", "negligible", + "negligance", "negligence", + "negligeble", "negligible", + "negligente", "negligence", + "negociated", "negotiated", + "negogiated", "negotiated", + "negoitated", "negotiated", + "negotaited", "negotiated", + "negotation", "negotiation", + "negotiaion", "negotiation", + "negotiatie", "negotiated", + "negotiatin", "negotiations", + "negotiaton", "negotiation", + "neigbhours", "neighbours", + "neighbhors", "neighbours", + "neighbords", "neighbours", + "neighbores", "neighbours", + "netowrking", "networking", + "netruality", "neutrality", + "neturality", "neutrality", + "netwroking", "networking", + "neurologia", "neurological", + "neutrailty", "neutrality", + "newletters", "newsletters", + "newlsetter", "newsletter", + "newsettler", "newsletter", + "newslatter", "newsletter", + "nieghbours", "neighbours", + "nightmates", "nightmares", + "nightmears", "nightmares", + "nightmeres", "nightmares", + "nigthmares", "nightmares", + "nipticking", "nitpicking", + "nitpciking", "nitpicking", + "nominacion", "nomination", + "nominatino", "nominations", + "nominativo", "nomination", + "nominatons", "nominations", + "nonsencial", "nonsensical", + "nontheless", "nonetheless", + "northerend", "northern", + "nostalgica", "nostalgia", + "nostalgija", "nostalgia", + "noteworhty", "noteworthy", + "nothingess", "nothingness", + "noticabely", "noticeably", + "noticabley", "noticeably", + "noticiably", "noticeably", + "notoriosly", "notoriously", + "novembeard", "november", + "nuetrality", "neutrality", + "nutricious", "nutritious", + "nutrientes", "nutrients", + "nutritents", "nutrients", + "nutritinal", "nutritional", + "nutritiuos", "nutritious", + "nutritivos", "nutritious", + "nutrituous", "nutritious", + "nutrutious", "nutritious", + "obatinable", "obtainable", + "obejctives", "objectives", + "obilgatory", "obligatory", + "objecitves", "objectives", + "objectivas", "objectives", + "objectivly", "objectively", + "objectivst", "objectives", + "objectivty", "objectivity", + "objektives", "objectives", + "obligitary", "obligatory", + "obligitory", "obligatory", + "observabil", "observable", + "observarse", "observers", + "observaton", "observation", + "observeras", "observers", + "observered", "observed", + "observeres", "observers", + "observible", "observable", + "obstancles", "obstacles", + "obstrucion", "obstruction", + "obstructin", "obstruction", + "obtainabie", "obtainable", + "obtaineble", "obtainable", + "obtainible", "obtainable", + "obtianable", "obtainable", + "ocasionaly", "occasionally", + "ocassional", "occasional", + "ocassioned", "occasioned", + "occaisonal", "occasional", + "occasionly", "occasional", + "occassions", "occasions", + "occational", "occasional", + "occulation", "occupation", + "occupaiton", "occupation", + "occurances", "occurrences", + "occurences", "occurrences", + "occurrance", "occurrence", + "octohedral", "octahedral", + "octohedron", "octahedron", + "offensivly", "offensively", + "offereings", "offerings", + "officailly", "officially", + "olbigatory", "obligatory", + "ominpotent", "omnipotent", + "ominscient", "omniscient", + "omnipetent", "omnipotent", + "omnipitent", "omnipotent", + "omnipotant", "omnipotent", + "omnisicent", "omniscient", + "omniverous", "omnivorous", + "omnsicient", "omniscient", + "on-premise", "on-premises", + "onmipotent", "omnipotent", + "onmiscient", "omniscient", + "operatings", "operations", + "operativne", "operative", + "operativos", "operations", + "oportunity", "opportunity", + "opponenets", "opponent", + "oppononent", "opponent", + "oppressiun", "oppressing", + "optimisitc", "optimistic", + "optimizare", "optimize", + "optimizate", "optimize", + "optimizied", "optimize", + "organicaly", "organically", + "organiclly", "organically", + "organisate", "organise", + "organische", "organise", + "organisera", "organizers", + "organisere", "organizers", + "organisert", "organizers", + "organisier", "organise", + "organisims", "organism", + "organismed", "organise", + "organismen", "organise", + "organismer", "organise", + "organismes", "organisms", + "organismus", "organisms", + "organisten", "organise", + "organiszed", "organise", + "organizaed", "organize", + "organizare", "organizer", + "organizate", "organize", + "organizors", "organizers", + "organizuje", "organize", + "organziers", "organizers", + "orientaion", "orientation", + "orientarla", "oriental", + "orientarlo", "oriental", + "origianlly", "originally", + "originales", "originals", + "originalet", "originated", + "originalis", "originals", + "originalty", "originality", + "orignially", "originally", + "origniated", "originated", + "origonally", "originally", + "origonated", "originated", + "ostencibly", "ostensibly", + "ostenisbly", "ostensibly", + "ostensably", "ostensibly", + "ostentibly", "ostensibly", + "ostrasiced", "ostracized", + "ostrasized", "ostracized", + "ostraziced", "ostracized", + "ostrazised", "ostracized", + "ostrecized", "ostracized", + "ostricized", "ostracized", + "ostrocized", "ostracized", + "oustanding", "outstanding", + "outcalssed", "outclassed", + "outlcassed", "outclassed", + "outnumberd", "outnumbered", + "outnumbred", "outnumbered", + "outperfoms", "outperform", + "outperfrom", "outperform", + "outpreform", "outperform", + "outrageuos", "outrageous", + "outragious", "outrageous", + "outragoues", "outrageous", + "outreagous", "outrageous", + "outsourcad", "outsourced", + "outsouring", "outsourcing", + "outsoursed", "outsourced", + "outweighes", "outweighs", + "overarcing", "overarching", + "overclockd", "overclocked", + "overcloked", "overclocked", + "overcoding", "overcoming", + "overheards", "overhead", + "overheared", "overhead", + "overhooked", "overlooked", + "overlanded", "overloaded", + "overlaoded", "overloaded", + "overlaping", "overlapping", + "overlauded", "overloaded", + "overloards", "overload", + "overlorded", "overloaded", + "overlordes", "overlords", + "overnurfed", "overturned", + "overpirced", "overpriced", + "overpowerd", "overpowered", + "overpowred", "overpowered", + "overprised", "overpriced", + "overtunned", "overturned", + "overtunred", "overturned", + "overturing", "overturn", + "overweigth", "overweight", + "overwhemed", "overwhelmed", + "overwieght", "overweight", + "overwritte", "overwrite", + "pahtfinder", "pathfinder", + "painfullly", "painfully", + "painkilers", "painkillers", + "pairlament", "parliament", + "pakistanti", "pakistani", + "paladinlst", "paladins", + "palcements", "placements", + "paleolitic", "paleolithic", + "palestinan", "palestinian", + "paltformer", "platformer", + "palyerbase", "playerbase", + "parachutte", "parachute", + "parademics", "paramedics", + "paradiggum", "paradigm", + "paragraghs", "paragraphs", + "paragrahps", "paragraphs", + "paragrapgh", "paragraphs", + "paragrpahs", "paragraphs", + "parahprase", "paraphrase", + "paralleles", "parallels", + "parallells", "parallels", + "paramadics", "paramedics", + "paramaters", "parameters", + "paramecias", "paramedics", + "parametics", "paramedics", + "parametros", "parameters", + "paramiters", "parameters", + "paramormal", "paranormal", + "paranoicas", "paranoia", + "paranomral", "paranormal", + "paranornal", "paranormal", + "parapharse", "paraphrase", + "paraphraze", "paraphrase", + "paraprhase", "paraphrase", + "parasitter", "parasite", + "parilament", "parliament", + "parituclar", "particular", + "parlaiment", "parliament", + "parliamant", "parliament", + "parliamone", "parliament", + "parliement", "parliament", + "parrallell", "parallel", + "parrallely", "parallelly", + "partiarchy", "patriarchy", + "participas", "participants", + "participat", "participants", + "participte", "participate", + "particualr", "particular", + "partiotism", "patriotism", + "passionais", "passions", + "passionale", "passionately", + "passionant", "passionate", + "passionite", "passionate", + "passivedns", "passives", + "passivelly", "passively", + "patenterad", "patented", + "pathfidner", "pathfinder", + "pathfindir", "pathfinder", + "pathifnder", "pathfinder", + "patientens", "patients", + "patrairchy", "patriarchy", + "patriachry", "patriarchy", + "patriarcal", "patriarchal", + "patriarhal", "patriarchal", + "patriatchy", "patriarchy", + "patriatism", "patriotism", + "patrionism", "patriotism", + "patriotics", "patriotism", + "patriotisk", "patriots", + "patroitism", "patriotism", + "patryarchy", "patriarchy", + "pedantisch", "pedantic", + "pedestiran", "pedestrian", + "pedestrain", "pedestrian", + "pedictions", "depictions", + "pedohpiles", "pedophiles", + "pedohpilia", "pedophilia", + "pedophilac", "pedophilia", + "pedophilea", "pedophilia", + "pedophilie", "pedophile", + "pedophilla", "pedophilia", + "pedophille", "pedophile", + "pedopholia", "pedophilia", + "penetraion", "penetration", + "penetratin", "penetration", + "penetraton", "penetration", + "penguinese", "penguins", + "penguiness", "penguins", + "peninsulla", "peninsula", + "penninsula", "peninsula", + "peodphiles", "pedophiles", + "peodphilia", "pedophilia", + "pepperment", "peppermint", + "pepperonni", "pepperoni", + "percantage", "percentage", + "percantile", "percentile", + "percaution", "precaution", + "percenatge", "percentages", + "percential", "percentile", + "percentige", "percentile", + "perceptoin", "perceptions", + "percession", "percussion", + "percetange", "percentages", + "percetnage", "percentages", + "percintile", "percentile", + "percission", "percussion", + "percpetion", "perceptions", + "percusions", "percussion", + "perdicting", "predicting", + "perdiction", "prediction", + "perdictive", "predictive", + "perenially", "perennially", + "perfeccion", "perfection", + "perfecxion", "perfection", + "perfektion", "perfection", + "perferable", "preferable", + "perferably", "preferably", + "perference", "preference", + "perferring", "preferring", + "perfexcion", "perfection", + "perfomance", "performance", + "performace", "performance", + "performane", "performances", + "performans", "performances", + "performens", "performers", + "performous", "performs", + "perfromers", "performers", + "perhiperal", "peripheral", + "peridinkle", "periwinkle", + "perihperal", "peripheral", + "periodisch", "periodic", + "periperhal", "peripheral", + "peripheals", "peripherals", + "peripheria", "peripheral", + "periphiral", "peripheral", + "periphreal", "peripheral", + "periphrial", "peripheral", + "peritinkle", "periwinkle", + "periwankle", "periwinkle", + "periwinkel", "periwinkle", + "periwinkie", "periwinkle", + "periwinlke", "periwinkle", + "permanenty", "permanently", + "permanetly", "permanently", + "permisions", "permission", + "permisison", "permissions", + "permissble", "permissible", + "permissibe", "permissible", + "permissons", "permissions", + "perogative", "prerogative", + "perordered", "preordered", + "perpatuate", "perpetuate", + "perpetualy", "perpetually", + "perpetuare", "perpetuate", + "persausion", "persuasion", + "persausive", "persuasive", + "persective", "respective", + "persectued", "persecuted", + "persecutie", "persecuted", + "persecutin", "persecution", + "perserving", "preserving", + "persicuted", "persecuted", + "persistant", "persistent", + "persistens", "persists", + "persoanlly", "personally", + "persocuted", "persecuted", + "personalie", "personalized", + "personalis", "personas", + "personarse", "personas", + "personatus", "personas", + "personnell", "personnel", + "perspecive", "perspective", + "perspectie", "perspectives", + "persuasian", "persuasion", + "persuasing", "persuasion", + "persuasivo", "persuasion", + "persuation", "persuasion", + "persucuted", "persecuted", + "persumably", "presumably", + "persussion", "persuasion", + "persvasive", "persuasive", + "perswasion", "persuasion", + "pertinante", "pertinent", + "pervailing", "prevailing", + "pervalence", "prevalence", + "pervention", "prevention", + "perversley", "perverse", + "pesitcides", "pesticides", + "pessimistc", "pessimistic", + "pessimitic", "pessimistic", + "pestacides", "pesticides", + "pestecides", "pesticides", + "pesticedes", "pesticides", + "pesticidas", "pesticides", + "pestisides", "pesticides", + "pestizides", "pesticides", + "pharamcist", "pharmacist", + "pharmacias", "pharmacist", + "pharmacyst", "pharmacist", + "pharmasist", "pharmacist", + "pharmicist", "pharmacist", + "phemonenon", "phenomenon", + "phenemenon", "phenomenon", + "phenemonal", "phenomenal", + "phenomanal", "phenomenal", + "phenomanon", "phenomenon", + "phenomemon", "phenomenon", + "phenomenen", "phenomenon", + "phenomenol", "phenomenal", + "phenomenom", "phenomenon", + "phenominon", "phenomenon", + "phenomonal", "phenomenal", + "phenomonen", "phenomenon", + "phenomonon", "phenomenon", + "phenonemal", "phenomenal", + "phenonemon", "phenomenon", + "phenonmena", "phenomena", + "philipines", "philippines", + "philippins", "philippines", + "philisophy", "philosophy", + "phillipine", "philippine", + "phillipses", "phillies", + "philosiphy", "philosophy", + "philosohpy", "philosophy", + "philosoper", "philosopher", + "philospher", "philosopher", + "philospohy", "philosophy", + "photogragh", "photograph", + "photograhs", "photographs", + "photograhy", "photography", + "photograps", "photographs", + "photograpy", "photography", + "photogrpah", "photographs", + "photoshopd", "photoshopped", + "photoshope", "photoshopped", + "phramacist", "pharmacist", + "phsyically", "physically", + "phsyicians", "physicians", + "phsyicists", "physicists", + "phsyiology", "physiology", + "phycisians", "physicians", + "phycisists", "physicists", + "phyiscally", "physically", + "phyisology", "physiology", + "physcially", "physically", + "physcology", "psychology", + "physcopath", "psychopath", + "physicials", "physicians", + "physiciens", "physicians", + "physioligy", "physiology", + "picthforks", "pitchforks", + "pinoneered", "pioneered", + "pitchferks", "pitchforks", + "pitchfolks", "pitchforks", + "pitchfords", "pitchforks", + "pitchworks", "pitchforks", + "pitckforks", "pitchforks", + "pittaburgh", "pittsburgh", + "pittsbrugh", "pittsburgh", + "placehoder", "placeholder", + "placeholdr", "placeholder", + "placeholer", "placeholder", + "placemenet", "placements", + "plagairism", "plagiarism", + "plagarisim", "plagiarism", + "plagiariam", "plagiarism", + "plagiarios", "plagiarism", + "plagiarius", "plagiarism", + "plagiarizm", "plagiarism", + "plagierism", "plagiarism", + "plaguarism", "plagiarism", + "plaigarism", "plagiarism", + "plasticosa", "plastics", + "platfarmer", "platformer", + "platformar", "platformer", + "platformie", "platformer", + "platfotmer", "platformer", + "platfromer", "platformer", + "platofrmer", "platformer", + "playaround", "playground", + "playersare", "playerbase", + "playgorund", "playground", + "playthrogh", "playthrough", + "playthrouh", "playthrough", + "playwrites", "playwrights", + "plethorian", "plethora", + "policitian", "politician", + "polinators", "pollinators", + "polishuset", "polishes", + "politessen", "politeness", + "politicain", "politician", + "politicaly", "politically", + "politicien", "politician", + "politicing", "politician", + "politicion", "politician", + "politickin", "politician", + "politiikan", "politician", + "politiness", "politeness", + "polititian", "politician", + "popualtion", "populations", + "populairty", "popularity", + "populaiton", "populations", + "popularaty", "popularity", + "popularest", "populate", + "popularily", "popularity", + "populaties", "populate", + "populatiry", "popularity", + "populative", "populate", + "populatoin", "populations", + "popultaion", "populations", + "pormetheus", "prometheus", + "pornograhy", "pornography", + "pornograpy", "pornography", + "pornogrphy", "pornography", + "porportion", "proportion", + "portabilty", "portability", + "portarying", "portraying", + "portoguese", "portuguese", + "portraiing", "portraying", + "portrating", "portraying", + "portrayels", "portrays", + "portugeuse", "portuguese", + "portuguise", "portuguese", + "posessions", "possessions", + "posicional", "positional", + "positevely", "positively", + "positioing", "positioning", + "positionly", "positional", + "positionne", "positioned", + "positivley", "positively", + "possesives", "possessive", + "possessers", "possesses", + "possessess", "possesses", + "possibiliy", "possibility", + "possibilty", "possibility", + "possissive", "possessive", + "posthomous", "posthumous", + "potentialy", "potentially", + "poulations", "populations", + "powerhorse", "powerhouse", + "powerhosue", "powerhouse", + "powerhours", "powerhouse", + "powerhsell", "powershell", + "powerprint", "powerpoint", + "powersehll", "powershell", + "ppublisher", "publisher", + "practially", "practically", + "practicaly", "practically", + "practicess", "practise", + "practiclly", "practically", + "practioner", "practitioner", + "precaucion", "precaution", + "precausion", "precaution", + "precautios", "precautions", + "precedance", "precedence", + "precedense", "precedence", + "preceeding", "preceding", + "precendece", "precedence", + "precentage", "percentage", + "precentile", "percentile", + "preciselly", "precisely", + "precuation", "precautions", + "precussion", "percussion", + "predecated", "predicated", + "predecence", "precedence", + "predecesor", "predecessor", + "predection", "prediction", + "predective", "predictive", + "prediccion", "prediction", + "prediceted", "predicated", + "predicited", "predicated", + "predicitng", "predicting", + "prediciton", "prediction", + "predicitve", "predictive", + "predickted", "predicated", + "predictave", "predictive", + "predictivo", "prediction", + "predictons", "predictions", + "predjuiced", "prejudiced", + "predjuices", "prejudices", + "preduction", "prediction", + "preductive", "predictive", + "predujiced", "prejudiced", + "predujices", "prejudices", + "prefarable", "preferable", + "prefarably", "preferably", + "prefection", "perfection", + "preferance", "preference", + "prefereble", "preferable", + "preferente", "preference", + "preferenze", "preference", + "preferible", "preferable", + "preferibly", "preferably", + "prefernece", "preferences", + "preformers", "performers", + "pregancies", "pregnancies", + "pregnanies", "pregnancies", + "preipheral", "peripheral", + "preisdents", "presidents", + "preisthood", "priesthood", + "prejeduced", "prejudiced", + "prejeduces", "prejudices", + "prejiduced", "prejudiced", + "prejiduces", "prejudices", + "prejucided", "prejudiced", + "prejucides", "prejudices", + "prejuduced", "prejudiced", + "prejuduces", "prejudices", + "prelimiary", "preliminary", + "prematurly", "prematurely", + "preminence", "preeminence", + "premission", "permission", + "preorderes", "preorders", + "prepartion", "preparation", + "prepetuate", "perpetuate", + "preposters", "preposterous", + "prescients", "presidents", + "prescirbed", "prescribed", + "prescriped", "prescribed", + "presearing", "preserving", + "presecuted", "persecuted", + "presedency", "presidency", + "presedents", "presidents", + "presenning", "presenting", + "presentase", "presents", + "presentato", "presentation", + "presention", "presenting", + "presentors", "presents", + "preservare", "preserve", + "preservato", "preservation", + "preserverd", "preserved", + "presidancy", "presidency", + "presidante", "presidents", + "presidenta", "presidential", + "presidenty", "presidency", + "presidunce", "presidency", + "presistent", "persistent", + "presonally", "personally", + "presonhood", "personhood", + "pressuming", "pressuring", + "prestigios", "prestigious", + "prestigous", "prestigious", + "presuambly", "presumably", + "presuasion", "persuasion", + "presuasive", "persuasive", + "presumebly", "presumably", + "presumendo", "presumed", + "presumibly", "presumably", + "presumpton", "presumption", + "pretaining", "pertaining", + "pretection", "protection", + "pretendias", "pretends", + "pretensive", "pretense", + "pretentios", "pretentious", + "pretentous", "pretentious", + "prevalecen", "prevalence", + "prevalente", "prevalence", + "prevencion", "prevention", + "preventivo", "prevention", + "preventors", "prevents", + "previaling", "prevailing", + "previosuly", "previously", + "previoulsy", "previously", + "prevolence", "prevalence", + "pricinpals", "principals", + "primarilly", "primarily", + "primatives", "primitives", + "princepals", "principals", + "princesess", "princesses", + "princibles", "principles", + "principaly", "principality", + "principels", "principals", + "principial", "principal", + "principias", "principals", + "principlas", "principals", + "prinicipal", "principal", + "prinicpals", "principals", + "prinicples", "principles", + "printerest", "printers", + "prioratize", "prioritize", + "prioretize", "prioritize", + "prioritice", "prioritize", + "prioritied", "prioritize", + "prioroties", "priorities", + "priorotize", "prioritize", + "priotities", "priorities", + "priotitize", "prioritize", + "privaleged", "privileged", + "privaleges", "privileges", + "privaticed", "privatized", + "privelaged", "privileged", + "privelages", "privileges", + "priveldges", "privileges", + "priveleged", "privileged", + "priveleges", "privileges", + "privelidge", "privileged", + "priveliged", "privileged", + "priveliges", "privileges", + "privetized", "privatized", + "privilaged", "privileged", + "privilages", "privileges", + "priviledge", "privilege", + "privilegde", "privileges", + "privilegie", "privilege", + "priviliged", "privileged", + "priviliges", "privileges", + "privitazed", "privatized", + "privitized", "privatized", + "probabiliy", "probability", + "probabilty", "probability", + "probablies", "probable", + "probablybe", "probable", + "problemita", "problematic", + "procalimed", "proclaimed", + "procceding", "proceeding", + "procedding", "proceeding", + "procederal", "procedural", + "procedings", "proceedings", + "procedrual", "procedural", + "proceededs", "proceeds", + "proceedure", "procedure", + "proceesing", "proceeding", + "processsor", "processors", + "proclamied", "proclaimed", + "proclaming", "proclaiming", + "procliamed", "proclaimed", + "procreatin", "procreation", + "procudures", "procedures", + "prodcution", "production", + "prodecural", "procedural", + "prodecures", "procedures", + "produccion", "production", + "produceras", "produces", + "produceres", "produces", + "producirse", "producers", + "produciton", "production", + "producting", "production", + "productino", "productions", + "productivo", "production", + "productivy", "productivity", + "productoin", "productions", + "produktion", "production", + "produktive", "productive", + "produtcion", "productions", + "profesions", "profession", + "professers", "professors", + "professorn", "profession", + "professsor", "professors", + "proffesion", "profession", + "proficeint", "proficient", + "proficiant", "proficient", + "proficieny", "proficiency", + "proficincy", "proficiency", + "profitabel", "profitable", + "profitabil", "profitable", + "profitible", "profitable", + "proftiable", "profitable", + "programmar", "programmer", + "programmme", "programme", + "progresing", "progressing", + "progresion", "progression", + "progresive", "progressive", + "progressie", "progressives", + "progressin", "progression", + "progresson", "progression", + "progressos", "progresses", + "progressus", "progresses", + "prohibirte", "prohibit", + "prohibites", "prohibits", + "prohibitng", "prohibiting", + "prohibiton", "prohibition", + "prohibitus", "prohibits", + "prohibitve", "prohibited", + "prohobited", "prohibited", + "prohpecies", "prophecies", + "projecitle", "projectiles", + "projectiel", "projectiles", + "projecties", "projectiles", + "projectils", "projectiles", + "projectles", "projectiles", + "projectlie", "projectiles", + "projectyle", "projectile", + "projektile", "projectile", + "projektion", "projection", + "prometheas", "prometheus", + "promethese", "prometheus", + "promethius", "prometheus", + "promethous", "prometheus", + "promethues", "prometheus", + "prominance", "prominence", + "prominenty", "prominently", + "prominetly", "prominently", + "promiscous", "promiscuous", + "promiscuos", "promiscuous", + "promoteurs", "promotes", + "promotheus", "prometheus", + "promotinal", "promotional", + "pronoucned", "pronounced", + "pronouning", "pronouncing", + "propechies", "prophecies", + "propencity", "propensity", + "propenents", "proponents", + "properites", "properties", + "propersity", "propensity", + "propertion", "proportion", + "propertius", "properties", + "prophacies", "prophecies", + "prophocies", "prophecies", + "propietary", "proprietary", + "proplusion", "propulsion", + "propoganda", "propaganda", + "propogates", "propagates", + "propolsion", "propulsion", + "proponants", "proponents", + "proponenet", "proponent", + "proporcion", "proportion", + "proporties", "properties", + "proporting", "proportion", + "propositon", "proposition", + "propotions", "proportions", + "proprietry", "proprietary", + "proprotion", "proportion", + "propserity", "prosperity", + "propserous", "prosperous", + "propulaios", "propulsion", + "propulsing", "propulsion", + "propultion", "propulsion", + "propuslion", "propulsion", + "prosectued", "prosecuted", + "prosectuor", "prosecutor", + "prosecuter", "prosecutor", + "prosecutie", "prosecuted", + "prosicuted", "prosecuted", + "prosicutor", "prosecutor", + "prosocuted", "prosecuted", + "prosparity", "prosperity", + "prospectos", "prospects", + "prosperety", "prosperity", + "prospertiy", "prosperity", + "prosphetic", "prosthetic", + "prosporous", "prosperous", + "prostehtic", "prosthetic", + "prosterity", "prosperity", + "prostethic", "prosthetic", + "prostitite", "prostitute", + "prostitude", "prostitute", + "prostituee", "prostitute", + "prostituer", "prostitute", + "prostitues", "prostitutes", + "prostiture", "prostitute", + "prostituto", "prostitution", + "prostituye", "prostitute", + "protaginst", "protagonist", + "protastant", "protestant", + "proteccion", "protection", + "proteciton", "protections", + "protectice", "protective", + "protectiei", "protective", + "protectoin", "protections", + "protectons", "protectors", + "protectron", "protection", + "protestans", "protests", + "protestare", "protesters", + "protestato", "protestant", + "protestent", "protestant", + "protestina", "protestant", + "prothsetic", "prosthetic", + "protistant", "protestant", + "protocoles", "protocols", + "protocolls", "protocols", + "protocolos", "protocols", + "protohypes", "prototypes", + "protostant", "protestant", + "prototipes", "prototypes", + "prototpyes", "prototypes", + "protraying", "portraying", + "protuguese", "portuguese", + "provencial", "provincial", + "proveribal", "proverbial", + "provervial", "proverbial", + "providance", "providence", + "providince", "providence", + "provinciae", "province", + "provincies", "province", + "provincija", "provincial", + "provinence", "providence", + "provinical", "provincial", + "provintial", "provincial", + "provinvial", "provincial", + "provisiosn", "provision", + "provisonal", "provisional", + "provocatie", "provocative", + "pscyhology", "psychology", + "pscyhopath", "psychopath", + "pshycology", "psychology", + "pshycopath", "psychopath", + "psychedlic", "psychedelic", + "psychiatic", "psychiatric", + "psycholoog", "psychology", + "psychopaat", "psychopath", + "psychopats", "psychopaths", + "ptichforks", "pitchforks", + "publicitan", "publication", + "publisheed", "published", + "publisherr", "publisher", + "publishher", "publisher", + "publissher", "publisher", + "publlisher", "publisher", + "punihsment", "punishments", + "punishemnt", "punishments", + "punishible", "punishable", + "punishmnet", "punishments", + "punissable", "punishable", + "punsihable", "punishable", + "purchacing", "purchasing", + "purpolsion", "propulsion", + "purposedly", "purposely", + "purposelly", "purposely", + "purpotedly", "purportedly", + "pususading", "persuading", + "pyschology", "psychology", + "pyschopath", "psychopath", + "qaulifiers", "qualifiers", + "quailfiers", "qualifiers", + "qualfiiers", "qualifiers", + "qualifieds", "qualifies", + "qualifiies", "qualifiers", + "qualifiing", "qualifying", + "qualifires", "qualifiers", + "qualifyers", "qualifiers", + "qualitying", "qualifying", + "quanitites", "quantities", + "quantaties", "quantities", + "quantitize", "quantities", + "quarantena", "quarantine", + "quarantene", "quarantine", + "quarantied", "quarantine", + "quarintine", "quarantine", + "quaruntine", "quarantine", + "quesitoned", "questioned", + "questional", "questionable", + "questionne", "questioned", + "rabinnical", "rabbinical", + "radiactive", "radioactive", + "radioacive", "radioactive", + "rainbowers", "rainbows", + "randmoness", "randomness", + "randomzied", "randomized", + "randonmess", "randomness", + "randumness", "randomness", + "raspberrry", "raspberry", + "rationalle", "rationale", + "readmition", "readmission", + "realitvely", "relatively", + "realtively", "relatively", + "realtivity", "relativity", + "reaserched", "researched", + "reasercher", "researcher", + "rebiulding", "rebuilding", + "reboudning", "rebounding", + "rebouncing", "rebounding", + "rebuidling", "rebuilding", + "rebuliding", "rebuilding", + "rebuplican", "republican", + "reccommend", "recommend", + "recepients", "recipients", + "receptoras", "receptors", + "receptores", "receptors", + "recgonised", "recognised", + "recgonized", "recognized", + "recgonizes", "recognizes", + "reciepents", "recipients", + "recipeints", "recipients", + "recipiants", "recipients", + "recocnised", "recognised", + "recoginsed", "recognised", + "recoginzed", "recognized", + "recognices", "recognizes", + "recogniton", "recognition", + "recognzied", "recognised", + "recomended", "recommended", + "recommande", "recommend", + "recommands", "recommends", + "recommeded", "recommended", + "recommened", "recommend", + "recommennd", "recommends", + "recomments", "recommends", + "recompence", "recompense", + "reconcider", "reconsider", + "reconcille", "reconcile", + "recongised", "recognised", + "recongized", "recognized", + "recongizes", "recognizes", + "reconisder", "reconsider", + "reconsiled", "reconsider", + "recordarle", "recorder", + "recordarme", "recorder", + "recordarse", "recorder", + "recordarte", "recorder", + "recreacion", "recreation", + "recreatief", "recreate", + "recreativo", "recreation", + "recrutiers", "recruiters", + "rectanglar", "rectangular", + "rectangual", "rectangular", + "rectanguar", "rectangular", + "recuriters", "recruiters", + "recurrance", "recurrence", + "recursivly", "recursively", + "redefinied", "redefine", + "redefinine", "redefine", + "redemtpion", "redemption", + "redepmtion", "redemption", + "redesiging", "redesign", + "rediculous", "ridiculous", + "redmeption", "redemption", + "redneckers", "rednecks", + "redneckese", "rednecks", + "redneckest", "rednecks", + "reduncancy", "redundancy", + "redundency", "redundancy", + "redundnacy", "redundancy", + "redunduncy", "redundancy", + "reenforced", "reinforced", + "reevaulate", "reevaluate", + "refedendum", "referendum", + "refelcting", "reflecting", + "refelction", "reflection", + "refelctive", "reflective", + "referances", "references", + "referandum", "referendum", + "referemces", "references", + "referemdum", "referendum", + "referendim", "referendum", + "referendom", "referendum", + "referenece", "reference", + "referening", "referencing", + "referenses", "referees", + "referentes", "references", + "referneces", "references", + "referrence", "reference", + "referundum", "referendum", + "refference", "reference", + "refleciton", "reflections", + "reflecters", "reflects", + "reflektion", "reflection", + "reflextion", "reflection", + "reformerad", "reformed", + "refrigerar", "refrigerator", + "refurbised", "refurbished", + "regenarate", "regenerate", + "registeres", "registers", + "registrato", "registration", + "regresives", "regressive", + "regressivo", "regression", + "regualting", "regulating", + "regualtion", "regulations", + "regualtors", "regulators", + "regulacion", "regulation", + "regulament", "regulate", + "regulaotrs", "regulators", + "regularily", "regularly", + "regularing", "regulating", + "regularlas", "regulars", + "regularlos", "regulars", + "regulaters", "regulators", + "regulatios", "regulators", + "regulatons", "regulations", + "rehtorical", "rhetorical", + "reinstaled", "reinstalled", + "reitrement", "retirement", + "relagation", "relaxation", + "relatation", "relaxation", + "relativety", "relativity", + "relativily", "relativity", + "relativley", "relatively", + "relavation", "relaxation", + "relaxating", "relaxation", + "relazation", "relaxation", + "releagtion", "relegation", + "relegetion", "relegation", + "relentness", "relentless", + "reletnless", "relentless", + "relevation", "revelation", + "relexation", "relegation", + "relfecting", "reflecting", + "relfection", "reflection", + "relfective", "reflective", + "reliabilty", "reliability", + "reliablely", "reliably", + "religiones", "religions", + "religiosly", "religiously", + "religiousy", "religiously", + "religously", "religiously", + "relitavely", "relatively", + "reluctanct", "reluctant", + "reluctanly", "reluctantly", + "reluctanty", "reluctantly", + "remarcably", "remarkably", + "remarkibly", "remarkably", + "rememberes", "remembers", + "remenicent", "reminiscent", + "reminisent", "reminiscent", + "reminscent", "reminiscent", + "remmebered", "remembered", + "renaissace", "renaissance", + "renderered", "rendered", + "renegerate", "regenerate", + "renewabels", "renewables", + "renewebles", "renewables", + "rennovated", "renovated", + "renweables", "renewables", + "repatition", "repetition", + "repblicans", "republicans", + "repbulican", "republican", + "repeadedly", "repeatedly", + "repeadetly", "repeatedly", + "repearable", "repeatable", + "repearedly", "repealed", + "repeatadly", "repeatedly", + "repeatedlt", "repealed", + "repeatetly", "repeatedly", + "repeatible", "repeatable", + "repeatidly", "repeatedly", + "repectable", "repeatable", + "repentable", "repeatable", + "repentence", "repentance", + "repersents", "represents", + "repetation", "repetition", + "repeteadly", "repeatedly", + "repetetion", "repetition", + "repeticion", "repetition", + "repetitivo", "repetition", + "replacated", "replicated", + "replaceble", "replaceable", + "replacemet", "replacements", + "replacemnt", "replacement", + "replacemtn", "replacements", + "replecated", "replicated", + "repoistory", "repository", + "reponsible", "responsible", + "reportadly", "reportedly", + "reporteros", "reporters", + "reportidly", "reportedly", + "repositary", "repository", + "reposotory", "repository", + "repostiory", "repository", + "representn", "representing", + "repressent", "represents", + "repressivo", "repression", + "repsectful", "respectful", + "repsecting", "respecting", + "repsective", "respective", + "repsonding", "responding", + "repsonsive", "responsive", + "reptuation", "reputation", + "repubicans", "republicans", + "republcian", "republican", + "republians", "republicans", + "republicon", "republican", + "repuglican", "republican", + "repulicans", "republicans", + "reputacion", "reputation", + "requirment", "requirement", + "requrement", "requirement", + "resemblace", "resemble", + "reserached", "researched", + "reseracher", "researchers", + "reserverad", "reserved", + "reservered", "reserved", + "residental", "residential", + "resistable", "resistible", + "resistanes", "resistances", + "resistanse", "resistances", + "resistence", "resistance", + "resistendo", "resisted", + "resistered", "resisted", + "resistnace", "resistances", + "resitsance", "resistances", + "resoltuion", "resolutions", + "resolucion", "resolution", + "resolutino", "resolutions", + "resolutoin", "resolutions", + "resolutons", "resolutions", + "resolvemos", "resolves", + "resolvendo", "resolved", + "resolveres", "resolves", + "resolverse", "resolves", + "resolviste", "resolves", + "resonabelt", "resonate", + "resoultion", "resolution", + "respecitve", "respective", + "respectifs", "respects", + "respection", "respecting", + "respectons", "respects", + "respectuos", "respects", + "respektive", "respective", + "respiratoy", "respiratory", + "responcive", "responsive", + "responisve", "responsive", + "responsibe", "responsive", + "responsiby", "responsibly", + "responsile", "responsive", + "responsing", "responding", + "ressembled", "resembled", + "restarants", "restaurants", + "restaraunt", "restaurant", + "restaruant", "restaurant", + "restatting", "restarting", + "restaurent", "restaurant", + "restauring", "restarting", + "resteraunt", "restaurant", + "restircted", "restricted", + "restorting", "restarting", + "restrainig", "restraining", + "restrcited", "restricted", + "restrcting", "restarting", + "restricing", "restricting", + "restricion", "restriction", + "restricive", "restrictive", + "restrictes", "restricts", + "restrictie", "restrictive", + "restricton", "restriction", + "restructed", "restricted", + "restuarant", "restaurant", + "resturants", "restaurants", + "resturaunt", "restaurant", + "retaliaton", "retaliation", + "rethorical", "rhetorical", + "retierment", "retirement", + "retribuito", "retribution", + "retrosepct", "retrospect", + "retrospekt", "retrospect", + "revaluated", "reevaluated", + "revealtion", "revelations", + "revelaiton", "revelations", + "revelatons", "revelations", + "revelution", "revelation", + "reversable", "reversible", + "reversably", "reversal", + "reviewtrue", "reviewer", + "revisiones", "revisions", + "revisionis", "revisions", + "revoltuion", "revolution", + "revoluiton", "revolutions", + "revolutoin", "revolutions", + "revoultion", "revolution", + "rewarching", "rewatching", + "rewatchibg", "rewatching", + "rewatchign", "rewatching", + "rewatchimg", "rewatching", + "rhapsodomy", "rhapsody", + "rhetorisch", "rhetoric", + "ridicilous", "ridiculous", + "ridicoulus", "ridiculous", + "ridiculise", "ridicule", + "ridiculize", "ridicule", + "ridiculled", "ridicule", + "ridiculose", "ridicule", + "ridiculued", "ridicule", + "rienforced", "reinforced", + "rigthfully", "rightfully", + "roleplaing", "roleplaying", + "romanmania", "romanian", + "roundaboot", "roundabout", + "rucuperate", "recuperate", + "rudimentry", "rudimentary", + "sacarmento", "sacramento", + "sacntioned", "sanctioned", + "sacraficed", "sacrificed", + "sacrafices", "sacrifices", + "sacramenno", "sacramento", + "sacreficed", "sacrificed", + "sacrefices", "sacrifices", + "sacremento", "sacramento", + "sacrifaced", "sacrificed", + "sacrifaces", "sacrifices", + "sacrifical", "sacrificial", + "sacrificas", "sacrifices", + "sacrificie", "sacrificed", + "sacrificng", "sacrificing", + "sacrifises", "sacrifices", + "sacrifized", "sacrificed", + "sacrifizes", "sacrifices", + "sacromento", "sacramento", + "sadistisch", "sadistic", + "sanctionne", "sanctioned", + "sandiwches", "sandwiches", + "sandviches", "sandwiches", + "sandwishes", "sandwiches", + "sanitazion", "sanitation", + "santiation", "sanitation", + "sastifying", "satisfying", + "satellitte", "satellites", + "satifsying", "satisfying", + "satrically", "satirically", + "satsifying", "satisfying", + "sattelites", "satellites", + "saturacion", "saturation", + "scandalosa", "scandals", + "scandalose", "scandals", + "scandalosi", "scandals", + "scandaloso", "scandals", + "scandaniva", "scandinavia", + "scandinava", "scandinavian", + "scandinvia", "scandinavia", + "scaramento", "sacramento", + "scarificed", "sacrificed", + "scarifices", "sacrifices", + "scarmbling", "scrambling", + "scartching", "scratching", + "sceintific", "scientific", + "sceintists", "scientists", + "scenarioes", "scenarios", + "scenarions", "scenarios", + "scenarious", "scenarios", + "scheudling", "scheduling", + "scholarhip", "scholarship", + "scholarley", "scholarly", + "sciencists", "scientists", + "scientests", "scientists", + "scirptures", "scriptures", + "scooterers", "scooters", + "scorebaord", "scoreboard", + "scoreborad", "scoreboard", + "scorebored", "scoreboard", + "scorpiomon", "scorpion", + "scracthing", "scratching", + "scramblies", "scramble", + "screenshat", "screenshot", + "screenshit", "screenshot", + "scriptores", "scriptures", + "scripturae", "scriptures", + "scriputres", "scriptures", + "scritpures", "scriptures", + "scrutinity", "scrutiny", + "seahawkers", "seahawks", + "sebastiaan", "sebastian", + "segegrated", "segregated", + "segragated", "segregated", + "segregaded", "segregated", + "segregatie", "segregated", + "segretated", "segregated", + "segrigated", "segregated", + "selectiose", "selections", + "selectivly", "selectively", + "selectivos", "selections", + "selfishess", "selfishness", + "senitments", "sentiments", + "sensitiviy", "sensitivity", + "sensitivty", "sensitivity", + "sentaments", "sentiments", + "sentancing", "sentencing", + "sentements", "sentiments", + "sentencian", "sentencing", + "sentensing", "sentencing", + "sentimenal", "sentimental", + "sentimetal", "sentimental", + "sentincing", "sentencing", + "sentinents", "sentiments", + "separacion", "separation", + "separaters", "separates", + "separatley", "separately", + "separatron", "separation", + "separetely", "separately", + "seperately", "separately", + "seperating", "separating", + "seperation", "separation", + "seperatism", "separatism", + "seperatist", "separatist", + "seperatley", "seperate", + "sepulchure", "sepulchre", + "serenitary", "serenity", + "serviceble", "serviceable", + "settelment", "settlement", + "settlemens", "settlements", + "settlemets", "settlements", + "settlemnts", "settlements", + "seuxalized", "sexualized", + "seventeeen", "seventeen", + "sexaulized", "sexualized", + "sexualixed", "sexualized", + "sexuallity", "sexually", + "sexualzied", "sexualized", + "sexulaized", "sexualized", + "shakespare", "shakespeare", + "shakespeer", "shakespeare", + "shakespere", "shakespeare", + "shamelesly", "shamelessly", + "shamelessy", "shamelessly", + "shaprening", "sharpening", + "shareholds", "shareholders", + "sharkening", "sharpening", + "sharpining", "sharpening", + "shartening", "sharpening", + "shatnering", "shattering", + "shattening", "shattering", + "shepharded", "shepherd", + "shilouette", "silhouette", + "shitlasses", "shitless", + "shortenend", "shortened", + "shortining", "shortening", + "sidelinien", "sideline", + "sidelinjen", "sideline", + "sidelinked", "sideline", + "sigantures", "signatures", + "sightstine", "sightstone", + "signficant", "significant", + "signifiant", "significant", + "significat", "significant", + "signitures", "signatures", + "sigthstone", "sightstone", + "sihlouette", "silhouette", + "silohuette", "silhouette", + "silouhette", "silhouette", + "similairty", "similarity", + "similarily", "similarly", + "similarlly", "similarly", + "similiarly", "similarly", + "similiarty", "similarity", + "simliarity", "similarity", + "simluation", "simulation", + "simplictic", "simplistic", + "simplifing", "simplifying", + "simplifyed", "simplified", + "simplifyng", "simplifying", + "simplisitc", "simplistic", + "simplisity", "simplicity", + "simplistes", "simplest", + "simplivity", "simplicity", + "simplyfied", "simplified", + "simualtion", "simulation", + "simulacion", "simulation", + "simulaiton", "simulations", + "simulaties", "simulate", + "simulative", "simulate", + "simulatons", "simulations", + "simulatore", "simulate", + "sincereley", "sincerely", + "sincerelly", "sincerely", + "singatures", "signatures", + "singulaire", "singular", + "singulariy", "singularity", + "singularty", "singularity", + "singulator", "singular", + "sitautions", "situations", + "situatinal", "situational", + "skatebaord", "skateboard", + "skateborad", "skateboard", + "skatebored", "skateboard", + "skatebrand", "skateboard", + "skeletones", "skeletons", + "skeptecism", "skepticism", + "skepticals", "skeptics", + "skepticles", "skeptics", + "skepticons", "skeptics", + "skeptisicm", "skepticism", + "skeptisism", "skepticism", + "sketchysex", "sketches", + "sketpicism", "skepticism", + "skillhosts", "skillshots", + "skillshits", "skillshots", + "skillshoot", "skillshots", + "skillslots", "skillshots", + "skillsofts", "skillshots", + "skillsshot", "skillshots", + "skirmiches", "skirmish", + "skpeticism", "skepticism", + "slaughterd", "slaughtered", + "slipperies", "slippers", + "smarpthone", "smartphones", + "smarthpone", "smartphone", + "snadwiches", "sandwiches", + "snowbaling", "snowballing", + "snowballes", "snowballs", + "snowballls", "snowballs", + "socailists", "socialists", + "socailized", "socialized", + "socialisim", "socialism", + "socializng", "socializing", + "socialsits", "socialists", + "sociapaths", "sociopaths", + "socilaists", "socialists", + "socilaized", "socialized", + "sociologia", "sociological", + "sociopatas", "sociopaths", + "sociopatch", "sociopaths", + "sociopatic", "sociopathic", + "socratease", "socrates", + "socreboard", "scoreboard", + "soemthings", "somethings", + "soldiarity", "solidarity", + "solidairty", "solidarity", + "soliditary", "solidarity", + "solitudine", "solitude", + "somehtings", "somethings", + "someonelse", "someones", + "somethibng", "somethin", + "somethigng", "somethin", + "somethigns", "somethings", + "somethihng", "somethin", + "somethiing", "somethin", + "somethijng", "somethin", + "somethikng", "somethin", + "somethimng", "somethin", + "somethinbg", "somethings", + "somethines", "somethings", + "somethinfg", "somethings", + "somethinhg", "somethings", + "somethinig", "somethings", + "somethinkg", "somethings", + "somethinks", "somethings", + "somethinmg", "somethings", + "somethinng", "somethings", + "somethintg", "somethings", + "somethiong", "somethin", + "somethiung", "somethin", + "sophicated", "sophisticated", + "sotrmfront", "stormfront", + "sotrylines", "storylines", + "soudntrack", "soundtrack", + "soundrtack", "soundtracks", + "soundtracs", "soundtracks", + "soundtrakc", "soundtracks", + "soundtrakk", "soundtrack", + "soundtraks", "soundtracks", + "southampon", "southampton", + "southamton", "southampton", + "southerers", "southerners", + "southernes", "southerners", + "southerton", "southern", + "souveniers", "souvenirs", + "sovereigny", "sovereignty", + "sovereinty", "sovereignty", + "soverignty", "sovereignty", + "spartaniis", "spartans", + "spartanops", "spartans", + "specailist", "specialist", + "specailize", "specializes", + "specialice", "specialize", + "specialied", "specialized", + "specialies", "specializes", + "specialits", "specials", + "speciallly", "specially", + "speciallty", "specially", + "specialops", "specials", + "specialsts", "specialists", + "specialtys", "specials", + "specialzed", "specialized", + "specialzes", "specializes", + "specifices", "specifics", + "specifiing", "specifying", + "specifiyng", "specifying", + "speciliast", "specialists", + "specimines", "specimen", + "spectarors", "spectators", + "spectaters", "spectators", + "spectracal", "spectral", + "spectraply", "spectral", + "spectrolab", "spectral", + "speculatie", "speculative", + "speculatin", "speculation", + "speecheasy", "speeches", + "speicalist", "specialist", + "spiritualy", "spiritually", + "sponsorees", "sponsors", + "sponsorhip", "sponsorship", + "sponsorise", "sponsors", + "spontaneos", "spontaneous", + "spontaneus", "spontaneous", + "spontanous", "spontaneous", + "spoonfulls", "spoonfuls", + "spreadshet", "spreadsheet", + "springfeld", "springfield", + "springfied", "springfield", + "spriritual", "spiritual", + "squirrells", "squirrels", + "squirrelus", "squirrels", + "stabelized", "stabilized", + "stabilzied", "stabilized", + "stablility", "stability", + "stablizied", "stabilized", + "staggaring", "staggering", + "stakeboard", "skateboard", + "starighten", "straighten", + "starnation", "starvation", + "startegies", "strategies", + "startupbus", "startups", + "starwberry", "strawberry", + "statememts", "statements", + "statictics", "statistics", + "stationair", "stationary", + "statisitcs", "statistics", + "statistcal", "statistical", + "statistisk", "statistics", + "stauration", "saturation", + "stealthboy", "stealthy", + "stealthely", "stealthy", + "stealthify", "stealthy", + "stealthray", "stealthy", + "steeleries", "steelers", + "stereotipe", "stereotype", + "stereotpye", "stereotypes", + "steriotype", "stereotype", + "steroetype", "stereotype", + "sterotypes", "stereotypes", + "steryotype", "stereotype", + "stimilants", "stimulants", + "stimilated", "stimulated", + "stimualted", "stimulated", + "stimulatie", "stimulated", + "stimulatin", "stimulation", + "stimulaton", "stimulation", + "stimulents", "stimulants", + "stomrfront", "stormfront", + "storelines", "storylines", + "stormfornt", "stormfront", + "stormfromt", "stormfront", + "stornfront", "stormfront", + "stornghold", "stronghold", + "stradegies", "strategies", + "strageties", "strategies", + "straighted", "straightened", + "straightie", "straighten", + "straightin", "straighten", + "straigthen", "straighten", + "stranglove", "strangle", + "strangreal", "strangle", + "stratagies", "strategies", + "strategems", "strategies", + "strategice", "strategies", + "strategisk", "strategies", + "stravation", "starvation", + "strawbarry", "strawberry", + "strawbeary", "strawberry", + "strawbeery", "strawberry", + "strawbrary", "strawberry", + "strawburry", "strawberry", + "streaching", "stretching", + "streamtrue", "streamer", + "strechting", "stretching", + "strecthing", "stretching", + "stregnthen", "strengthen", + "streichung", "stretching", + "strenghten", "strengthen", + "strengsten", "strengthen", + "strengthes", "strengths", + "strengthin", "strengthen", + "stressende", "stressed", + "striaghten", "straighten", + "stromfront", "stormfront", + "stronkhold", "stronghold", + "stroylines", "storylines", + "structered", "structured", + "structrual", "structural", + "structurel", "structural", + "strucutral", "structural", + "strucutred", "structured", + "strucutres", "structures", + "strugglign", "struggling", + "strwaberry", "strawberry", + "sttutering", "stuttering", + "stupidfree", "stupider", + "stupiditiy", "stupidity", + "sturctural", "structural", + "sturctures", "structures", + "sturggling", "struggling", + "subarmines", "submarines", + "subcultuur", "subculture", + "subesquent", "subsequent", + "subisdized", "subsidized", + "subjectief", "subjective", + "subjectifs", "subjects", + "subjectivy", "subjectively", + "subjektive", "subjective", + "submariens", "submarines", + "submarinas", "submarines", + "submergerd", "submerged", + "submerines", "submarines", + "submisison", "submissions", + "submissies", "submissive", + "submissons", "submissions", + "submittion", "submitting", + "subsadized", "subsidized", + "subscirbed", "subscribed", + "subscirber", "subscribers", + "subscribar", "subscriber", + "subscribir", "subscriber", + "subscrible", "subscriber", + "subscriped", "subscribed", + "subscrubed", "subscribed", + "subscryber", "subscriber", + "subsedized", "subsidized", + "subsequant", "subsequent", + "subsidezed", "subsidized", + "subsidiced", "subsidized", + "subsidizng", "subsidizing", + "subsiduary", "subsidiary", + "subsiquent", "subsequent", + "subsittute", "substitutes", + "subsizided", "subsidized", + "subsrcibed", "subscribed", + "substanial", "substantial", + "substansen", "substances", + "substanser", "substances", + "substanses", "substances", + "substantie", "substantive", + "substatial", "substantial", + "substences", "substances", + "substitite", "substitute", + "substittue", "substitutes", + "substitude", "substitute", + "substitued", "substitute", + "substituer", "substitute", + "substitues", "substitutes", + "substiture", "substitute", + "substituto", "substitution", + "substituts", "substitutes", + "substracts", "subtracts", + "substutite", "substitutes", + "subsudized", "subsidized", + "subtitltes", "subtitle", + "succceeded", "succeeded", + "succcesses", "successes", + "succesfuly", "successfully", + "succesions", "succession", + "successing", "succession", + "successivo", "succession", + "sucesfully", "successfully", + "sucessfull", "successful", + "sucessfuly", "successfully", + "sudnerland", "sunderland", + "sufferered", "suffered", + "sufferring", "suffering", + "sufficiant", "sufficient", + "suggestied", "suggestive", + "suggestief", "suggestive", + "suggestons", "suggests", + "sumbarines", "submarines", + "sumbissive", "submissive", + "sumbitting", "submitting", + "summerized", "summarized", + "summorized", "summarized", + "summurized", "summarized", + "sunderlona", "sunderland", + "sunderlund", "sunderland", + "sungalsses", "sunglasses", + "sunglesses", "sunglasses", + "sunglinger", "gunslinger", + "sunscreeen", "sunscreen", + "superfical", "superficial", + "superfluos", "superfluous", + "superioara", "superior", + "superioare", "superior", + "superioris", "superiors", + "superivsor", "supervisors", + "supermaket", "supermarket", + "supermarkt", "supermarket", + "superouman", "superhuman", + "superposer", "superpowers", + "superviors", "supervisors", + "superviosr", "supervisors", + "supervisar", "supervisor", + "superviser", "supervisor", + "supervisin", "supervision", + "supervison", "supervision", + "supervsior", "supervisors", + "supperssor", "suppressor", + "supplament", "supplement", + "supplemant", "supplemental", + "supplemets", "supplements", + "supportare", "supporters", + "supporteur", "supporter", + "supportied", "supported", + "supportors", "supporters", + "supposdely", "supposedly", + "supposebly", "supposedly", + "supposidly", "supposedly", + "suppresion", "suppression", + "suppresors", "suppressor", + "suppressin", "suppression", + "suppressio", "suppressor", + "suppresson", "suppression", + "suprassing", "surpassing", + "supressing", "suppressing", + "supression", "suppression", + "supsension", "suspension", + "supsicions", "suspicions", + "supsicious", "suspicious", + "surounding", "surrounding", + "surplanted", "supplanted", + "surpressed", "suppressed", + "surprizing", "surprising", + "surrenderd", "surrendered", + "surrouding", "surrounding", + "surroundes", "surrounds", + "surroundig", "surroundings", + "survivours", "survivor", + "suseptable", "susceptible", + "suseptible", "susceptible", + "suspecions", "suspicions", + "suspecious", "suspicious", + "suspencion", "suspension", + "suspendeds", "suspense", + "suspention", "suspension", + "suspicians", "suspicions", + "suspiciois", "suspicions", + "suspicioso", "suspicions", + "suspicioun", "suspicion", + "suspicison", "suspicions", + "suspiciuos", "suspicions", + "suspicsion", "suspicions", + "suspisions", "suspicions", + "suspisious", "suspicious", + "suspitions", "suspicions", + "sustainble", "sustainable", + "swaetshirt", "sweatshirt", + "swearengin", "swearing", + "swearshirt", "sweatshirt", + "sweathsirt", "sweatshirt", + "sweatshits", "sweatshirt", + "sweatshort", "sweatshirt", + "sweatshrit", "sweatshirt", + "sweerheart", "sweetheart", + "sweetshart", "sweetheart", + "switcheasy", "switches", + "switzerand", "switzerland", + "symapthize", "sympathize", + "symbolisch", "symbolic", + "symbolisim", "symbolism", + "symetrical", "symmetrical", + "sympatheic", "sympathetic", + "sympathiek", "sympathize", + "sympathien", "sympathize", + "sympathtic", "sympathetic", + "sympathyze", "sympathize", + "sympethize", "sympathize", + "symphatize", "sympathize", + "symphonity", "symphony", + "sympothize", "sympathize", + "syncronous", "synchronous", + "synomymous", "synonymous", + "synomynous", "synonymous", + "synonamous", "synonymous", + "synonimous", "synonymous", + "synonmyous", "synonymous", + "synonomous", "synonymous", + "synonumous", "synonymous", + "synonynous", "synonymous", + "sypmathize", "sympathize", + "systamatic", "systematic", + "systemetic", "systematic", + "systemisch", "systemic", + "systimatic", "systematic", + "tabelspoon", "tablespoon", + "tablespons", "tablespoons", + "tablesppon", "tablespoon", + "tacitcally", "tactically", + "taiwanesse", "taiwanese", + "taligating", "tailgating", + "tantrumers", "tantrums", + "targetting", "targeting", + "teamfigths", "teamfights", + "teamifghts", "teamfights", + "teamspeack", "teamspeak", + "techicians", "technicians", + "techincian", "technician", + "techinican", "technician", + "techinques", "techniques", + "technicain", "technician", + "technicaly", "technically", + "technicans", "technicians", + "technichan", "technician", + "technicien", "technician", + "technicion", "technician", + "technitian", "technician", + "technqiues", "techniques", + "techtician", "technician", + "tehnically", "ethnically", + "telegrapgh", "telegraph", + "teleporing", "teleporting", + "televesion", "television", + "televisivo", "television", + "temafights", "teamfights", + "temerature", "temperature", + "temperatue", "temperature", + "temperment", "temperament", + "temperture", "temperature", + "templarios", "templars", + "templarius", "templars", + "temporaily", "temporarily", + "temporarly", "temporary", + "temptating", "temptation", + "temptetion", "temptation", + "tendancies", "tendencies", + "tendencias", "tendencies", + "tendencije", "tendencies", + "tendensies", "tendencies", + "tendincies", "tendencies", + "tensionors", "tensions", + "tentacreul", "tentacle", + "termanator", "terminator", + "termendous", "tremendous", + "termiantor", "terminator", + "termigator", "terminator", + "terminales", "terminals", + "terminalis", "terminals", + "terminarla", "terminal", + "terminarlo", "terminal", + "terminaron", "terminator", + "terminater", "terminator", + "terminolgy", "terminology", + "terorrists", "terrorists", + "terrerists", "terrorists", + "terrestial", "terrestrial", + "terriblely", "terribly", + "terriories", "territories", + "territoral", "territorial", + "territores", "territories", + "territoris", "territories", + "territorry", "territory", + "terrorisim", "terrorism", + "terrorsits", "terrorists", + "terrurists", "terrorists", + "testiclees", "testicles", + "testiclies", "testicle", + "testimoney", "testimony", + "thankyooou", "thankyou", + "themselfes", "themselves", + "themsevles", "themselves", + "themsleves", "themselves", + "theocracry", "theocracy", + "theologial", "theological", + "therapetic", "therapeutic", + "therepists", "therapists", + "theripists", "therapists", + "thermastat", "thermostat", + "thermistat", "thermostat", + "thermomter", "thermometer", + "theromstat", "thermostat", + "thorttling", "throttling", + "thorughout", "throughout", + "thouroghly", "thoroughly", + "threadened", "threaded", + "threatenes", "threatens", + "threatning", "threatening", + "threshhold", "threshold", + "throthling", "throttling", + "throtlling", "throttling", + "throughiut", "throughput", + "thubmnails", "thumbnails", + "thumbmails", "thumbnails", + "thunderbot", "thunderbolt", + "thunderolt", "thunderbolt", + "tighetning", "tightening", + "tightining", "tightening", + "tigthening", "tightening", + "tjpanishad", "upanishad", + "toothbruch", "toothbrush", + "toothbruth", "toothbrush", + "toothbursh", "toothbrush", + "toothrbush", "toothbrush", + "toppingest", "toppings", + "torchilght", "torchlight", + "torchlgiht", "torchlight", + "torchligth", "torchlight", + "torhclight", "torchlight", + "torrentbig", "torrenting", + "torrenters", "torrents", + "torrentors", "torrents", + "tortillera", "tortilla", + "tortillias", "tortilla", + "tortillita", "tortilla", + "tortilllas", "tortilla", + "torunament", "tournament", + "totalitara", "totalitarian", + "touchsceen", "touchscreen", + "touchscren", "touchscreen", + "touranment", "tournaments", + "tourmanent", "tournaments", + "tournamets", "tournaments", + "tournamnet", "tournament", + "tournemant", "tournament", + "tournement", "tournament", + "toxicitity", "toxicity", + "trafficing", "trafficking", + "trainwreak", "trainwreck", + "traitorise", "traitors", + "tramboline", "trampoline", + "tramploine", "trampoline", + "trampolene", "trampoline", + "tranformed", "transformed", + "tranistion", "transition", + "tranlsated", "translated", + "transalted", "translated", + "transaltes", "translates", + "transaltor", "translator", + "transation", "transition", + "transciprt", "transcripts", + "transcirpt", "transcripts", + "transcrips", "transcripts", + "transcrito", "transcript", + "transcrits", "transcripts", + "transcrpit", "transcript", + "transfered", "transferred", + "transferer", "transferred", + "transferes", "transfers", + "transferrs", "transfers", + "transferts", "transfers", + "transfomed", "transformed", + "transfored", "transformed", + "transforme", "transfer", + "transfroms", "transforms", + "transgeder", "transgender", + "transgener", "transgender", + "transicion", "transition", + "transision", "transition", + "transister", "transistor", + "transitons", "transitions", + "transitors", "transistor", + "transkript", "transcript", + "translater", "translator", + "translatin", "translations", + "translatio", "translator", + "translpant", "transplants", + "transluent", "translucent", + "transmited", "transmitted", + "transmiter", "transmitter", + "transmitor", "transistor", + "transmorgs", "transforms", + "transpalnt", "transplants", + "transphoic", "transphobic", + "transplain", "transplant", + "transplate", "transplant", + "transplats", "transplants", + "transpoder", "transported", + "transportr", "transporter", + "transsexal", "transsexual", + "transtator", "translator", + "tranzistor", "transistor", + "trasncript", "transcript", + "trasnforms", "transforms", + "trasnlated", "translated", + "trasnlator", "translator", + "trasnplant", "transplant", + "traveleres", "travelers", + "travelodge", "traveled", + "traverlers", "traverse", + "traversare", "traverse", + "traversier", "traverse", + "treasurery", "treasury", + "trememdous", "tremendous", + "tremondous", "tremendous", + "trespasing", "trespassing", + "trianwreck", "trainwreck", + "trochlight", "torchlight", + "trustworhy", "trustworthy", + "trustworty", "trustworthy", + "trustwothy", "trustworthy", + "tryannical", "tyrannical", + "tunraround", "turnaround", + "tupparware", "tupperware", + "turnapound", "turnaround", + "turthfully", "truthfully", + "tutoriales", "tutorials", + "tyrantical", "tyrannical", + "ubiqituous", "ubiquitous", + "ubiquotous", "ubiquitous", + "ubiqutious", "ubiquitous", + "ukrainains", "ukrainians", + "ukraineans", "ukrainians", + "ukrainiens", "ukrainians", + "ukraininas", "ukrainians", + "ukrianians", "ukrainians", + "ulitmately", "ultimately", + "ulterioara", "ulterior", + "ulterioare", "ulterior", + "ultimative", "ultimate", + "ultimatley", "ultimately", + "ultimatuum", "ultimatum", + "unanwsered", "unanswered", + "unasnwered", "unanswered", + "unattanded", "unattended", + "unattented", "unattended", + "unavailabe", "unavailable", + "unavailble", "unavailable", + "unavoidble", "unavoidable", + "unawnsered", "unanswered", + "unbalenced", "unbalanced", + "unballance", "unbalance", + "unbalnaced", "unbalanced", + "unbareable", "unbearable", + "unbeakable", "unbeatable", + "unbeareble", "unbearable", + "unbeatbale", "unbeatable", + "unbeateble", "unbeatable", + "unbeerable", "unbearable", + "unbeetable", "unbeatable", + "unbeknowst", "unbeknownst", + "unbreakble", "unbreakable", + "uncencored", "uncensored", + "uncensered", "uncensored", + "uncersored", "uncensored", + "uncertainy", "uncertainty", + "uncertanty", "uncertainty", + "uncesnored", "uncensored", + "uncomitted", "uncommitted", + "uncommited", "uncommitted", + "unconcious", "unconscious", + "unconscous", "unconscious", + "undebiably", "undeniably", + "undeinable", "undeniable", + "undeinably", "undeniably", + "undenaible", "undeniable", + "undenaibly", "undeniably", + "undenyable", "undeniable", + "undenyably", "undeniably", + "underbaker", "undertaker", + "undercling", "underlying", + "underfaker", "undertaker", + "undergated", "underrated", + "undergrand", "undergrad", + "undergroud", "underground", + "undergrund", "underground", + "undermimes", "undermines", + "underminde", "undermines", + "underminig", "undermining", + "underneeth", "underneath", + "underneith", "underneath", + "undernieth", "underneath", + "underpowed", "underpowered", + "underraged", "underrated", + "underraker", "undertaker", + "underrater", "undertaker", + "undersatnd", "understands", + "understadn", "understands", + "understans", "understands", + "understnad", "understands", + "understoon", "understood", + "understsnd", "understands", + "undertoker", "undertaker", + "undertsand", "understands", + "undertunes", "undertones", + "underwager", "underwater", + "underwares", "underwater", + "underwolrd", "underworld", + "underwoord", "underworld", + "underwrold", "underworld", + "underyling", "underlying", + "undesrtand", "understands", + "undoubtedy", "undoubtedly", + "undoubtely", "undoubtedly", + "undoubtley", "undoubtedly", + "uneccesary", "unnecessary", + "unecessary", "unnecessary", + "unedcuated", "uneducated", + "unedicated", "uneducated", + "unempolyed", "unemployed", + "unexplaind", "unexplained", + "unexplaned", "unexplained", + "unfamilair", "unfamiliar", + "unfamilier", "unfamiliar", + "unfinsihed", "unfinished", + "unfirendly", "unfriendly", + "unfortuate", "unfortunate", + "unfreindly", "unfriendly", + "unfriednly", "unfriendly", + "unfriently", "unfriendly", + "ungrapeful", "ungrateful", + "ungreatful", "ungrateful", + "unhealthly", "unhealthy", + "unicornios", "unicorns", + "unifnished", "unfinished", + "unihabited", "uninhabited", + "unilatreal", "unilateral", + "unimporant", "unimportant", + "unimpresed", "unimpressed", + "unimpressd", "unimpressed", + "uninsipred", "uninspired", + "uninspried", "uninspired", + "uninstaled", "uninstalled", + "uniquiness", "uniqueness", + "univercity", "university", + "univeristy", "university", + "universale", "universe", + "universaly", "universally", + "universels", "universes", + "universets", "universes", + "universite", "universities", + "universtiy", "university", + "unjustifed", "unjustified", + "unknowingy", "unknowingly", + "unknowinly", "unknowingly", + "unnecesary", "unnecessary", + "unofficail", "unofficial", + "unoffocial", "unofficial", + "unorginial", "unoriginal", + "unorignial", "unoriginal", + "unorigonal", "unoriginal", + "unplacable", "unplayable", + "unplaybale", "unplayable", + "unplayeble", "unplayable", + "unpleasent", "unpleasant", + "unpopulair", "unpopular", + "unproteced", "unprotected", + "unqiueness", "uniqueness", + "unqualifed", "unqualified", + "unrealesed", "unreleased", + "unrealible", "unreliable", + "unrealistc", "unrealistic", + "unrealitic", "unrealistic", + "unreasonal", "unreasonably", + "unrelaible", "unreliable", + "unreleated", "unreleased", + "unrelyable", "unreliable", + "unrepetant", "unrepentant", + "unrepetent", "unrepentant", + "unresponse", "unresponsive", + "unsencored", "uncensored", + "unsetlling", "unsettling", + "unsolicted", "unsolicited", + "unsubscibe", "unsubscribe", + "unsubscrbe", "unsubscribe", + "unsucesful", "unsuccessful", + "unsuprised", "unsurprised", + "unsuprized", "unsurprised", + "unviersity", "university", + "unwrittern", "unwritten", + "urkainians", "ukrainians", + "utlimately", "ultimately", + "utlrasound", "ultrasound", + "vaccinatie", "vaccinated", + "vaccineras", "vaccines", + "valentians", "valentines", + "valentiens", "valentines", + "valentimes", "valentines", + "valentinas", "valentines", + "valentinos", "valentines", + "valentones", "valentines", + "validitity", "validity", + "valnetines", "valentines", + "vandalisim", "vandalism", + "vasectomey", "vasectomy", + "vegatarian", "vegetarian", + "vegaterian", "vegetarian", + "vegeratian", "vegetarians", + "vegetairan", "vegetarians", + "vegetarain", "vegetarians", + "vegetarien", "vegetarian", + "vegetarion", "vegetarian", + "vegetatian", "vegetarian", + "vegeterian", "vegetarian", + "vegitables", "vegetables", + "vehemantly", "vehemently", + "vehemontly", "vehemently", + "veitnamese", "vietnamese", + "veiwership", "viewership", + "veiwpoints", "viewpoints", + "venezuella", "venezuela", + "verificato", "verification", + "verifyable", "verifiable", + "veritcally", "vertically", + "veritiable", "verifiable", + "vernecular", "vernacular", + "vernicular", "vernacular", + "versatiliy", "versatility", + "versatille", "versatile", + "versatilty", "versatility", + "versitlity", "versatility", + "vewiership", "viewership", + "vibratoare", "vibrator", + "vicitmized", "victimized", + "vicotrious", "victorious", + "victemized", "victimized", + "victomized", "victimized", + "victorinos", "victorious", + "victorinus", "victorious", + "victoriosa", "victorious", + "victorioso", "victorious", + "victoriuos", "victorious", + "victumized", "victimized", + "videogaems", "videogames", + "videojames", "videogames", + "vidoegames", "videogames", + "vientamese", "vietnamese", + "vietmanese", "vietnamese", + "vietnamees", "vietnamese", + "vietnamise", "vietnamese", + "viewpionts", "viewpoints", + "vigilantie", "vigilante", + "vigoruosly", "vigorously", + "vigourosly", "vigorously", + "villageois", "villages", + "vindicitve", "vindictive", + "vindictave", "vindictive", + "visibiltiy", "visibility", + "vitenamese", "vietnamese", + "vocabluary", "vocabulary", + "volatiltiy", "volatility", + "volativity", "volatility", + "volitality", "volatility", + "volleyboll", "volleyball", + "vollyeball", "volleyball", + "volonteers", "volunteers", + "volounteer", "volunteer", + "voluntairy", "voluntarily", + "voluntarly", "voluntary", + "voluntears", "volunteers", + "volunteeer", "volunteers", + "volunteerd", "volunteered", + "voluntered", "volunteered", + "vulernable", "vulnerable", + "vulnarable", "vulnerable", + "vulnerabil", "vulnerable", + "vulnurable", "vulnerable", + "vunlerable", "vulnerable", + "warrandyte", "warranty", + "warrantles", "warranties", + "warrenties", "warranties", + "washignton", "washington", + "waterlemon", "watermelon", + "watermalon", "watermelon", + "waterproff", "waterproof", + "wavelegnth", "wavelength", + "wavelenghs", "wavelength", + "wavelenght", "wavelength", + "weakensses", "weaknesses", + "weaknesess", "weaknesses", + "weathliest", "wealthiest", + "wedensdays", "wednesdays", + "wednesdsay", "wednesdays", + "wednessday", "wednesdays", + "wednsedays", "wednesdays", + "weightened", "weighted", + "welathiest", "wealthiest", + "wellignton", "wellington", + "wellingotn", "wellington", + "wendesdays", "wednesdays", + "wereabouts", "whereabouts", + "westbroook", "westbrook", + "westernese", "westerners", + "westerness", "westerners", + "westminser", "westminster", + "westminter", "westminster", + "whatosever", "whatsoever", + "whatseover", "whatsoever", + "whipsering", "whispering", + "whsipering", "whispering", + "widepsread", "widespread", + "wikileakes", "wikileaks", + "wilderniss", "wilderness", + "wildreness", "wilderness", + "willfullly", "willfully", + "winchestor", "winchester", + "windhsield", "windshield", + "windsheild", "windshield", + "windshiled", "windshield", + "wisconsion", "wisconsin", + "wishpering", "whispering", + "withdrawan", "withdrawn", + "withdrawel", "withdrawal", + "withdrawin", "withdrawn", + "withholdng", "withholding", + "withrdawal", "withdrawals", + "witnissing", "witnessing", + "wonderfull", "wonderful", + "wonderfuly", "wonderfully", + "wonderwand", "wonderland", + "worhsiping", "worshiping", + "workingest", "workings", + "workstaion", "workstation", + "workstaton", "workstation", + "worshippig", "worshipping", + "worshoping", "worshiping", + "wrestlewar", "wrestler", + "xenohpobic", "xenophobic", + "xenophibia", "xenophobia", + "xenophibic", "xenophobic", + "xenophonic", "xenophobic", + "xenophopia", "xenophobia", + "xenophopic", "xenophobic", + "xeonphobia", "xenophobia", + "xeonphobic", "xenophobic", + "yourselfes", "yourselves", + "yoursleves", "yourselves", + "zimbabwaen", "zimbabwe", + "zionistisk", "zionists", + "abandonig", "abandoning", + "abandonne", "abandonment", + "abanonded", "abandoned", + "abdomnial", "abdominal", + "abdonimal", "abdominal", + "aberation", "aberration", + "abnormaly", "abnormally", + "abodminal", "abdominal", + "abondoned", "abandoned", + "aborigene", "aborigine", + "aboslutes", "absolutes", + "abosrbing", "absorbing", + "abreviate", "abbreviate", + "abritrary", "arbitrary", + "abruptley", "abruptly", + "absailing", "abseiling", + "absloutes", "absolutes", + "absolutey", "absolutely", + "absolutly", "absolutely", + "absoultes", "absolutes", + "abstracto", "abstraction", + "absurdley", "absurdly", + "absuridty", "absurdity", + "abusrdity", "absurdity", + "academica", "academia", + "accademic", "academic", + "accalimed", "acclaimed", + "accelerar", "accelerator", + "accending", "ascending", + "accension", "accession", + "accidenty", "accidently", + "acclamied", "acclaimed", + "accliamed", "acclaimed", + "accomdate", "accommodate", + "accordeon", "accordion", + "accordian", "accordion", + "accoridng", "according", + "accountas", "accountants", + "accountat", "accountants", + "accoustic", "acoustic", + "accroding", "according", + "accuraccy", "accuracy", + "acftually", "factually", + "acheiving", "achieving", + "achieveds", "achieves", + "achillees", "achilles", + "achilleos", "achilles", + "achilleus", "achilles", + "achiveing", "achieving", + "acitvates", "activates", + "aclhemist", "alchemist", + "acomplish", "accomplish", + "acquisito", "acquisition", + "acronymes", "acronyms", + "acronymns", "acronyms", + "acsending", "ascending", + "acsension", "ascension", + "activaste", "activates", + "activatin", "activation", + "activelly", "actively", + "activisim", "activism", + "activisit", "activist", + "activites", "activities", + "actresess", "actresses", + "acusation", "causation", + "acutality", "actuality", + "adavanced", "advanced", + "adbominal", "abdominal", + "additonal", "additional", + "addoptive", "adoptive", + "addresing", "addressing", + "addtional", "additional", + "adhearing", "adhering", + "adherance", "adherence", + "adjectivs", "adjectives", + "adjustabe", "adjustable", + "administr", "administer", + "admitedly", "admittedly", + "adolecent", "adolescent", + "adovcated", "advocated", + "adovcates", "advocates", + "adquiring", "acquiring", + "adresable", "addressable", + "adressing", "addressing", + "aduiobook", "audiobook", + "advatange", "advantage", + "adventurs", "adventures", + "adveristy", "adversity", + "advertisy", "adversity", + "advisorys", "advisors", + "aeorspace", "aerospace", + "aeropsace", "aerospace", + "aerosapce", "aerospace", + "aersopace", "aerospace", + "aestethic", "aesthetic", + "aethistic", "atheistic", + "affiliato", "affiliation", + "affinitiy", "affinity", + "affirmate", "affirmative", + "affliated", "affiliated", + "africanas", "africans", + "africanos", "africans", + "aggegrate", "aggregate", + "aggresive", "aggressive", + "agnosticm", "agnosticism", + "agregates", "aggregates", + "agreggate", "aggregate", + "agrentina", "argentina", + "agression", "aggression", + "agressive", "aggressive", + "agressvie", "agressive", + "agruement", "arguement", + "agruments", "arguments", + "agurement", "arguement", + "ailenated", "alienated", + "airbourne", "airborne", + "aircrafts", "aircraft", + "airplance", "airplane", + "airrcraft", "aircraft", + "aksreddit", "askreddit", + "alcehmist", "alchemist", + "alchemsit", "alchemist", + "alchimest", "alchemist", + "alchmeist", "alchemist", + "alchoolic", "alcoholic", + "alcoholis", "alcoholics", + "alechmist", "alchemist", + "alegience", "allegiance", + "aleinated", "alienated", + "algoriths", "algorithms", + "algoritms", "algorithms", + "algorthim", "algorithm", + "algortihm", "algorithm", + "alignemnt", "alignment", + "alimunium", "aluminium", + "alingment", "alignment", + "allainces", "alliances", + "alledgely", "allegedly", + "allegence", "allegiance", + "alleivate", "alleviate", + "allievate", "alleviate", + "alliviate", "alleviate", + "allopones", "allophones", + "allthough", "although", + "almightly", "almighty", + "alocholic", "alcoholic", + "alogrithm", "algorithm", + "alphabeat", "alphabet", + "alrightey", "alrighty", + "alrightly", "alrighty", + "alrightty", "alrighty", + "alrington", "arlington", + "alrorythm", "algorithm", + "alterante", "alternate", + "alternatr", "alternator", + "althetics", "athletics", + "althought", "although", + "altruisim", "altruism", + "amateures", "amateurs", + "ambluance", "ambulance", + "ambuigity", "ambiguity", + "amendmant", "amendment", + "amercians", "americans", + "americain", "american", + "americams", "americas", + "americaps", "americas", + "americats", "americas", + "amibguity", "ambiguity", + "aminosity", "animosity", + "amrstrong", "armstrong", + "amublance", "ambulance", + "amunition", "ammunition", + "anachrist", "anarchist", + "analagous", "analogous", + "analitycs", "analytics", + "analtyics", "analytics", + "analyitcs", "analytics", + "analyseas", "analyses", + "analysees", "analyses", + "analysens", "analyses", + "analysise", "analyses", + "analystes", "analysts", + "analzying", "analyzing", + "anarchsim", "anarchism", + "anayltics", "analytics", + "anaylzing", "analyzing", + "ancedotal", "anecdotal", + "ancedotes", "anecdotes", + "ancestory", "ancestry", + "androgeny", "androgyny", + "androides", "androids", + "androidos", "androids", + "anecdotle", "anecdote", + "anecodtal", "anecdotal", + "anecodtes", "anecdotes", + "anectodal", "anecdotal", + "anectodes", "anecdotes", + "anedoctal", "anecdotal", + "anedoctes", "anecdotes", + "animostiy", "animosity", + "anitvirus", "antivirus", + "anlaytics", "analytics", + "anniversy", "anniversary", + "annointed", "anointed", + "annoucnes", "announces", + "annoyingy", "annoyingly", + "annoymous", "anonymous", + "annoynace", "annoyance", + "annyoance", "annoyance", + "anomisity", "animosity", + "anomolies", "anomalies", + "anomolous", "anomalous", + "anomynity", "anonymity", + "anomynous", "anonymous", + "anonimity", "anonymity", + "anonmyous", "anonymous", + "anonymoys", "anonymously", + "anorexiac", "anorexic", + "anorexica", "anorexia", + "anrachist", "anarchist", + "ansestors", "ancestors", + "antarctia", "antarctica", + "antennaes", "antennas", + "antiviurs", "antivirus", + "antivrius", "antivirus", + "antivuris", "antivirus", + "anwsering", "answering", + "anynomity", "anonymity", + "anynomous", "anonymous", + "aparthide", "apartheid", + "aparthied", "apartheid", + "apartmens", "apartments", + "apocalype", "apocalypse", + "apostrope", "apostrophe", + "apparenty", "apparently", + "appearane", "appearances", + "appenines", "apennines", + "apperance", "appearance", + "appetitie", "appetite", + "applaudes", "applause", + "applicato", "application", + "appreciae", "appreciates", + "apprentie", "apprentice", + "approachs", "approaches", + "apratheid", "apartheid", + "apsaragus", "asparagus", + "apsergers", "aspergers", + "aquainted", "acquainted", + "arbirtary", "arbitrary", + "arbritary", "arbitrary", + "arcehtype", "archetype", + "archetect", "architect", + "archetpye", "archetype", + "archetyps", "archetypes", + "architecs", "architects", + "archtypes", "archetypes", + "aregument", "arguement", + "areospace", "aerospace", + "argessive", "agressive", + "argeument", "arguement", + "arguabley", "arguably", + "arguablly", "arguably", + "arguement", "argument", + "arguemnet", "arguement", + "arguemnts", "arguments", + "argumeent", "arguement", + "arhtritis", "arthritis", + "aribtrary", "arbitrary", + "ariplanes", "airplanes", + "aristolte", "aristotle", + "aristotel", "aristotle", + "aritfacts", "artifacts", + "arlignton", "arlington", + "arlingotn", "arlington", + "armistace", "armistice", + "armstorng", "armstrong", + "arpatheid", "apartheid", + "arthirtis", "arthritis", + "artifcats", "artifacts", + "artifical", "artificial", + "artillary", "artillery", + "arugement", "arguement", + "arugments", "arguments", + "asapragus", "asparagus", + "asbestoes", "asbestos", + "asborbing", "absorbing", + "asburdity", "absurdity", + "ascendend", "ascended", + "ascneding", "ascending", + "ascnesion", "ascension", + "asethetic", "aesthetic", + "asnwering", "answering", + "asociated", "associated", + "assasined", "assassinated", + "assassian", "assassin", + "assassine", "assassinate", + "assasssin", "assassins", + "assaultes", "assaults", + "assembeld", "assembled", + "assembley", "assembly", + "assemblie", "assemble", + "assisnate", "assassinate", + "assistans", "assistants", + "assistsnt", "assistants", + "assmebled", "assembled", + "associato", "association", + "assoicate", "associate", + "asssasins", "assassins", + "assualted", "assaulted", + "assulated", "assaulted", + "asteorids", "asteroids", + "astericks", "asterisk", + "asteriods", "asteroids", + "astroanut", "astronaut", + "astronuat", "astronaut", + "astrounat", "astronaut", + "asuterity", "austerity", + "atempting", "attempting", + "atheltics", "athletics", + "atheneans", "athenians", + "athesitic", "atheistic", + "athetlics", "athletics", + "athiestic", "atheistic", + "athleticm", "athleticism", + "atmosphir", "atmospheric", + "atributed", "attributed", + "atributes", "attributes", + "atrifacts", "artifacts", + "atrillery", "artillery", + "atrittion", "attrition", + "attachmet", "attachments", + "attaindre", "attainder", + "attemting", "attempting", + "attemtped", "attempted", + "attendent", "attendant", + "attension", "attention", + "attirbute", "attribute", + "attirtion", "attrition", + "attmepted", "attempted", + "attractes", "attracts", + "attractin", "attraction", + "attributo", "attribution", + "attributs", "attributes", + "attritube", "attribute", + "auctionrs", "auctions", + "auidobook", "audiobook", + "auromated", "automated", + "australin", "australians", + "authroity", "authority", + "autoattak", "autoattack", + "autogrpah", "autograph", + "autonomos", "autonomous", + "auxillary", "auxiliary", + "avaialble", "available", + "availible", "available", + "avalaible", "available", + "avaliable", "available", + "averageed", "averaged", + "avialable", "available", + "awakenend", "awakened", + "awesomley", "awesomely", + "awkawrdly", "awkwardly", + "awnsering", "answering", + "bacehlors", "bachelors", + "bachelour", "bachelor", + "bachleors", "bachelors", + "bacholers", "bachelors", + "backdooor", "backdoor", + "backfeild", "backfield", + "backfiled", "backfield", + "backgroud", "background", + "backpakcs", "backpacks", + "badnwagon", "bandwagon", + "badnwidth", "bandwidth", + "balckjack", "blackjack", + "balcklist", "blacklist", + "balitmore", "baltimore", + "ballisitc", "ballistic", + "ballsitic", "ballistic", + "balsphemy", "blasphemy", + "bandiwdth", "bandwidth", + "bandwdith", "bandwidth", + "bandwidht", "bandwidth", + "bandwitdh", "bandwidth", + "bankrupcy", "bankruptcy", + "bankrupty", "bankruptcy", + "banruptcy", "bankruptcy", + "baordwalk", "boardwalk", + "barabrian", "barbarian", + "barbarain", "barbarian", + "barbarina", "barbarian", + "barcelets", "bracelets", + "barcleona", "barcelona", + "bareclona", "barcelona", + "barrackus", "barracks", + "bascially", "basically", + "bastardes", "bastards", + "bastardos", "bastards", + "bastardus", "bastards", + "bathrooom", "bathroom", + "batlimore", "baltimore", + "battailon", "battalion", + "battlaion", "battalion", + "beahviour", "behaviour", + "beauitful", "beautiful", + "beautifyl", "beautifully", + "becnhmark", "benchmark", + "becomeing", "becoming", + "becomming", "becoming", + "beehtoven", "beethoven", + "begginers", "beginners", + "beggining", "beginning", + "begininng", "beginning", + "beginnins", "beginnings", + "behaivors", "behaviors", + "behaivour", "behaviour", + "behavoirs", "behaviors", + "behavoiur", "behaviour", + "behvaiour", "behaviour", + "beleiving", "believing", + "beliveing", "believing", + "belssings", "blessings", + "bemusemnt", "bemusement", + "benchamrk", "benchmark", + "benchmars", "benchmarks", + "benedicat", "benedict", + "benedickt", "benedict", + "benghazhi", "benghazi", + "benghazzi", "benghazi", + "bergamont", "bergamot", + "berkelely", "berkeley", + "bersekrer", "berserker", + "berskerer", "berserker", + "beseiging", "besieging", + "bestialiy", "bestiality", + "beuatiful", "beautiful", + "biginning", "beginning", + "bigrading", "brigading", + "billbaord", "billboard", + "billboars", "billboards", + "binominal", "binomial", + "birgading", "brigading", + "birghtest", "brightest", + "birhtdays", "birthdays", + "bitcoints", "bitcoins", + "blackbery", "blackberry", + "blackhaws", "blackhawks", + "blackshit", "blacksmith", + "blanketts", "blankets", + "blapshemy", "blasphemy", + "blashpemy", "blasphemy", + "blaspehmy", "blasphemy", + "blasphmey", "blasphemy", + "blatanlty", "blatantly", + "blatimore", "baltimore", + "bleuberry", "blueberry", + "bleutooth", "bluetooth", + "blisteres", "blisters", + "blizzcoin", "blizzcon", + "blockchan", "blockchain", + "blockeras", "blockers", + "bloodbore", "bloodborne", + "boardband", "broadband", + "boardcast", "broadcast", + "bodyweigt", "bodyweight", + "bookamrks", "bookmarks", + "bookmakrs", "bookmarks", + "bookmarkd", "bookmarked", + "boradband", "broadband", + "boradcast", "broadcast", + "boradwalk", "boardwalk", + "bouregois", "bourgeois", + "bourgeios", "bourgeois", + "bourgoeis", "bourgeois", + "boyfirend", "boyfriend", + "boyfreind", "boyfriend", + "boyfriens", "boyfriends", + "brabarian", "barbarian", + "bracelona", "barcelona", + "braodband", "broadband", + "braodcast", "broadcast", + "brazilias", "brazilians", + "breakdows", "breakdowns", + "breserker", "berserker", + "bretheren", "brethren", + "bridaging", "brigading", + "brightern", "brighten", + "brigthest", "brightest", + "brilliany", "brilliantly", + "brithdays", "birthdays", + "broadwalk", "boardwalk", + "bruiseres", "bruisers", + "brunettte", "brunette", + "brusseles", "brussels", + "brussells", "brussels", + "brutailty", "brutality", + "brutallly", "brutally", + "buddhisim", "buddhism", + "buddihsts", "buddhists", + "buddishts", "buddhists", + "buhddists", "buddhists", + "buidlings", "buildings", + "bulidings", "buildings", + "burgunday", "burgundy", + "burgundry", "burgundy", + "burritoes", "burritos", + "burtality", "brutality", + "busineses", "business", + "businessa", "businessman", + "businesse", "businessmen", + "businesss", "businesses", + "bussiness", "business", + "buthcered", "butchered", + "butterlfy", "butterfly", + "cacausian", "caucasian", + "caclulate", "calculate", + "cacuasian", "caucasian", + "caculater", "calculator", + "cafeteira", "cafeteria", + "cafetiera", "cafeteria", + "caffeinne", "caffeine", + "calcualte", "calculate", + "californa", "california", + "caluclate", "calculate", + "calulated", "calculated", + "calulater", "calculator", + "cambirdge", "cambridge", + "cambrdige", "cambridge", + "cambrigde", "cambridge", + "camoflage", "camouflage", + "campagins", "campaigns", + "campaings", "campaigns", + "campiagns", "campaigns", + "campusers", "campuses", + "camrbidge", "cambridge", + "canadains", "canadians", + "candadate", "candidate", + "candidats", "candidates", + "cannister", "canister", + "cannoical", "canonical", + "canoncial", "canonical", + "capactior", "capacitor", + "capicator", "capacitor", + "capitalis", "capitals", + "caprenter", "carpenter", + "capsulers", "capsules", + "capsulets", "capsules", + "carachter", "character", + "cardbaord", "cardboard", + "cardborad", "cardboard", + "cardianls", "cardinals", + "cardnials", "cardinals", + "caridnals", "cardinals", + "carmalite", "carmelite", + "carnberry", "cranberry", + "carolinia", "carolina", + "carpetner", "carpenter", + "carptener", "carpenter", + "carribean", "caribbean", + "cartdrige", "cartridge", + "cartilege", "cartilage", + "cartirdge", "cartridge", + "cartrdige", "cartridge", + "cartrigde", "cartridge", + "casaulity", "causality", + "cashieres", "cashiers", + "cassawory", "cassowary", + "cassettte", "cassette", + "casuation", "causation", + "cataclsym", "cataclysm", + "cataclyms", "cataclysm", + "catacylsm", "cataclysm", + "catacyslm", "cataclysm", + "catalcysm", "cataclysm", + "catalgoue", "catalogue", + "cathderal", "cathedral", + "catherdal", "cathedral", + "cathloics", "catholics", + "cathredal", "cathedral", + "caucaisan", "caucasian", + "caucasain", "caucasian", + "causacian", "caucasian", + "causailty", "causality", + "celebirty", "celebrity", + "celebrato", "celebration", + "celebrite", "celebrities", + "celesital", "celestial", + "celestail", "celestial", + "cementary", "cemetery", + "cemetarey", "cemetery", + "cenitpede", "centipede", + "centepide", "centipede", + "centipeed", "centipede", + "centruies", "centuries", + "centuties", "centuries", + "cerebrawl", "cerebral", + "certanity", "certainty", + "certianty", "certainty", + "cesspoool", "cesspool", + "chairmain", "chairman", + "challange", "challenge", + "challengr", "challenger", + "challengs", "challenges", + "chameloen", "chameleon", + "champagen", "champagne", + "champange", "champagne", + "chandlure", "chandler", + "changable", "changeable", + "charactor", "character", + "chatedral", "cathedral", + "chatolics", "catholics", + "checkmeat", "checkmate", + "checkpoit", "checkpoints", + "chekcmate", "checkmate", + "chemestry", "chemistry", + "chemicaly", "chemically", + "chemsitry", "chemistry", + "chernboyl", "chernobyl", + "chernobly", "chernobyl", + "chernoybl", "chernobyl", + "chernyobl", "chernobyl", + "cheronbyl", "chernobyl", + "chidlfree", "childfree", + "chidlrens", "childrens", + "chihauhua", "chihuahua", + "chihuahau", "chihuahua", + "childbird", "childbirth", + "childerns", "childrens", + "childisch", "childish", + "childresn", "childrens", + "chirstian", "christian", + "chirstmas", "christmas", + "chiuhahua", "chihuahua", + "chlidfree", "childfree", + "chlidrens", "childrens", + "chocloate", "chocolate", + "chocoalte", "chocolate", + "chocolats", "chocolates", + "chocolste", "chocolates", + "cholocate", "chocolate", + "chrenobyl", "chernobyl", + "chrisitan", "christian", + "christain", "christian", + "christams", "christmas", + "chrsitian", "christian", + "chrsitmas", "christmas", + "churchers", "churches", + "cigaretts", "cigarettes", + "cigeratte", "cigarette", + "cilivians", "civilians", + "cilpboard", "clipboard", + "cilynders", "cylinders", + "circuitos", "circuits", + "ciriculum", "curriculum", + "cirticise", "criticise", + "civilains", "civilians", + "civillian", "civilian", + "classicos", "classics", + "classicus", "classics", + "classifiy", "classify", + "cleanisng", "cleansing", + "cleasning", "cleansing", + "clikcbait", "clickbait", + "clinicaly", "clinically", + "clipbaord", "clipboard", + "clitories", "clitoris", + "clitorios", "clitoris", + "clitorius", "clitoris", + "clucthing", "clutching", + "clutchign", "clutching", + "cluthcing", "clutching", + "coca cola", "coca-cola", + "cockatils", "cocktails", + "cocktials", "cocktails", + "cognizent", "cognizant", + "colateral", "collateral", + "collabore", "collaborate", + "collasped", "collapsed", + "collaspes", "collapses", + "colleauge", "colleague", + "collectes", "collects", + "collectie", "collective", + "collecton", "collection", + "collectos", "collectors", + "collegaue", "colleague", + "collegues", "colleagues", + "collisson", "collisions", + "collonade", "colonnade", + "collonies", "colonies", + "collpased", "collapsed", + "collpases", "collapses", + "colombina", "colombia", + "columbina", "columbia", + "comapnies", "companies", + "combatans", "combatants", + "combinato", "combination", + "combusion", "combustion", + "comestics", "cosmetics", + "comisions", "commissions", + "comission", "commission", + "comitting", "committing", + "commandes", "commands", + "commentar", "commentator", + "commentes", "commenters", + "commercie", "commerce", + "commision", "commission", + "commiteed", "commited", + "commiting", "committing", + "commitmet", "commitments", + "commments", "comments", + "commongly", "commonly", + "communiss", "communists", + "communite", "communities", + "communits", "communist", + "communsim", "communism", + "compaines", "companies", + "compalins", "complains", + "compalint", "compliant", + "comparisn", "comparisons", + "compeltes", "completes", + "competant", "competent", + "competend", "competed", + "competion", "competition", + "competive", "competitive", + "compilant", "compliant", + "compilare", "compiler", + "compilato", "compilation", + "compitent", "competent", + "complaind", "complained", + "complaing", "complaining", + "completen", "complement", + "completey", "completely", + "completin", "completion", + "complians", "complains", + "componant", "component", + "comprable", "comparable", + "compresas", "compress", + "compreses", "compress", + "compteurs", "computers", + "comptuers", "computers", + "computato", "computation", + "comradets", "comrades", + "comsetics", "cosmetics", + "conanical", "canonical", + "conatiner", "container", + "concelaed", "concealed", + "concelaer", "concealer", + "concelear", "concealer", + "concensus", "consensus", + "conceptos", "concepts", + "conceptul", "conceptual", + "concernig", "concerning", + "concertas", "concerts", + "concevied", "conceived", + "conciders", "considers", + "concieted", "conceited", + "concieved", "conceived", + "conclusie", "conclusive", + "concsious", "conscious", + "concurret", "concurrent", + "condamned", "condemned", + "condemend", "condemned", + "condemmed", "condemned", + "condemnig", "condemning", + "condenmed", "condemned", + "condesend", "condensed", + "condesned", "condensed", + "condmened", "condemned", + "conection", "connection", + "conenctor", "connector", + "conferene", "conferences", + "confessin", "confession", + "confideny", "confidently", + "confilcts", "conflicts", + "confimred", "confirmed", + "confirmas", "confirms", + "conflcits", "conflicts", + "confrimed", "confirmed", + "congitive", "cognitive", + "conlcuded", "concluded", + "connectes", "connects", + "connectit", "connecticut", + "connectos", "connectors", + "conquerer", "conqueror", + "consdider", "consider", + "consensul", "consensual", + "conserned", "concerned", + "consicous", "conscious", + "considerd", "considered", + "considert", "considerate", + "consisent", "consistent", + "consistes", "consists", + "consolato", "consolation", + "consolide", "consolidate", + "consonent", "consonant", + "constanly", "constantly", + "constanst", "constants", + "constanty", "constantly", + "constasnt", "constants", + "constitue", "constitutes", + "constrait", "constraints", + "construcs", "constructs", + "construde", "construed", + "construst", "constructs", + "constucts", "constructs", + "constured", "construed", + "consulant", "consultant", + "consultat", "consultant", + "consumate", "consummate", + "contactes", "contacts", + "contactos", "contacts", + "contagios", "contagious", + "containes", "contains", + "containig", "containing", + "containts", "contains", + "contemple", "contemplate", + "contendor", "contender", + "contentas", "contents", + "contentes", "contents", + "contentos", "contents", + "contestas", "contests", + "contestat", "contestants", + "contestes", "contests", + "contextes", "contexts", + "contextos", "contexts", + "contianer", "container", + "contibute", "contribute", + "contigent", "contingent", + "continant", "continental", + "continens", "continents", + "continous", "continuous", + "continuos", "continuous", + "continute", "continue", + "contiunal", "continual", + "contracto", "contraction", + "contribue", "contribute", + "contribuo", "contributor", + "controlas", "controls", + "controled", "controlled", + "controles", "controls", + "controlls", "controls", + "convenant", "covenant", + "convencen", "convenience", + "conveniet", "convenient", + "conversie", "converse", + "conversin", "conversions", + "convertie", "convertible", + "convertis", "converts", + "cooldwons", "cooldowns", + "coordinar", "coordinator", + "copenhagn", "copenhagen", + "coprorate", "corporate", + "copywrite", "copyright", + "corcodile", "crocodile", + "corparate", "corporate", + "corproate", "corporate", + "correclty", "correctly", + "correctin", "correction", + "correlato", "correlation", + "corridoor", "corridor", + "corruptin", "corruption", + "corssfire", "crossfire", + "corsshair", "crosshair", + "corsspost", "crosspost", + "coruching", "crouching", + "cosemtics", "cosmetics", + "costumise", "costumes", + "counciles", "councils", + "councills", "councils", + "councilos", "councils", + "countains", "contains", + "counteres", "counters", + "countires", "countries", + "courching", "crouching", + "courtesey", "courtesy", + "courtesty", "courtesy", + "coururier", "courier", + "coutnered", "countered", + "crapenter", "carpenter", + "creativey", "creatively", + "creedence", "credence", + "crhistmas", "christmas", + "cricketts", "crickets", + "criminaly", "criminally", + "critereon", "criterion", + "criterias", "criteria", + "criticaly", "critically", + "criticies", "criticise", + "criticisn", "criticising", + "critisice", "criticise", + "critisicm", "criticism", + "critising", "criticising", + "critisism", "criticism", + "critisize", "criticise", + "critizing", "criticizing", + "crosshiar", "crosshair", + "crossifre", "crossfire", + "crticised", "criticised", + "crusdaers", "crusaders", + "crutchers", "crutches", + "crystalls", "crystals", + "crystalus", "crystals", + "crystalys", "crystals", + "cuacasian", "caucasian", + "cuasality", "causality", + "culitvate", "cultivate", + "culturaly", "culturally", + "culturels", "cultures", + "curiostiy", "curiosity", + "curisoity", "curiosity", + "currenlty", "currently", + "curriculm", "curriculum", + "cursaders", "crusaders", + "custcenes", "cutscenes", + "cutsceens", "cutscenes", + "cutscence", "cutscene", + "cutsences", "cutscenes", + "cyclinder", "cylinder", + "cyclistes", "cyclists", + "cylindres", "cylinders", + "cynicisim", "cynicism", + "dahsboard", "dashboard", + "dalmation", "dalmatian", + "dangeroys", "dangerously", + "dashbaord", "dashboard", + "daugthers", "daughters", + "davantage", "advantage", + "deadlfits", "deadlifts", + "deadpoool", "deadpool", + "dealershp", "dealerships", + "deathmath", "deathmatch", + "decalring", "declaring", + "decendant", "descendant", + "decendent", "descendant", + "decipting", "depicting", + "deciption", "depiction", + "decisivie", "decisive", + "declarase", "declares", + "declarees", "declares", + "decoratie", "decorative", + "decoratin", "decorations", + "decpetion", "deception", + "decpetive", "deceptive", + "decribing", "describing", + "decsended", "descended", + "deductibe", "deductible", + "defaintly", "defiantly", + "defaltion", "deflation", + "defanitly", "defiantly", + "defeintly", "definetly", + "defendent", "defendant", + "defensese", "defenseless", + "defianlty", "defiantly", + "deficeint", "deficient", + "deficieny", "deficiency", + "deficites", "deficits", + "definance", "defiance", + "definatey", "definately", + "definatly", "definitely", + "definetly", "definitely", + "definetyl", "definetly", + "definilty", "definitly", + "definitie", "definitive", + "definitin", "definitions", + "definitly", "definitely", + "definiton", "definition", + "definitve", "definite", + "definityl", "definitly", + "definltey", "definetly", + "defintaly", "defiantly", + "defintily", "definitly", + "defintion", "definition", + "defintley", "definetly", + "defitenly", "definetly", + "defitinly", "definitly", + "defitnaly", "defiantly", + "defitnely", "definetly", + "deflectin", "deflection", + "defnietly", "definetly", + "degeneret", "degenerate", + "degradato", "degradation", + "degradead", "degraded", + "degrassie", "degrasse", + "degrassse", "degrasse", + "deifnetly", "definetly", + "deifnitly", "definitly", + "deisgners", "designers", + "delagates", "delegates", + "delcaring", "declaring", + "delcining", "declining", + "delegatie", "delegate", + "delerious", "delirious", + "deleteing", "deleting", + "delfation", "deflation", + "deliveres", "delivers", + "deliverys", "delivers", + "delpoying", "deploying", + "demcorats", "democrats", + "deminsion", "dimension", + "democarcy", "democracy", + "democract", "democrat", + "demonstre", "demonstrate", + "denominar", "denominator", + "dentistas", "dentists", + "dentistes", "dentists", + "deomcrats", "democrats", + "deopsited", "deposited", + "deparment", "department", + "departmet", "departments", + "depciting", "depicting", + "depcition", "depiction", + "depection", "deception", + "depedency", "dependency", + "depicitng", "depicting", + "depiciton", "depiction", + "deplyoing", "deploying", + "depoisted", "deposited", + "depolying", "deploying", + "depositas", "deposits", + "deposites", "deposits", + "depositis", "deposits", + "depositos", "deposits", + "depostied", "deposited", + "depressie", "depressive", + "depressin", "depression", + "depserate", "desperate", + "depsoited", "deposited", + "descirbes", "describes", + "descision", "decision", + "desginers", "designers", + "desgining", "designing", + "desicions", "decisions", + "designade", "designated", + "designato", "designation", + "desingage", "disengage", + "desingers", "designers", + "desinging", "designing", + "desktopos", "desktops", + "desparate", "desperate", + "desperato", "desperation", + "despoited", "deposited", + "desriable", "desirable", + "dessigned", "designed", + "destinato", "destination", + "destoryed", "destroyed", + "destoryer", "destroyer", + "destroyes", "destroys", + "destructo", "destruction", + "destryoed", "destroyed", + "destryoer", "destroyer", + "desuction", "seduction", + "detailled", "detailed", + "detatched", "detached", + "detectivs", "detectives", + "deteriate", "deteriorate", + "determing", "determining", + "determins", "determines", + "developrs", "develops", + "diabetees", "diabetes", + "diablical", "diabolical", + "diagonaal", "diagonal", + "diagonsed", "diagnosed", + "diagonsis", "diagnosis", + "diagramas", "diagrams", + "diagramms", "diagrams", + "dialectes", "dialects", + "dialectos", "dialects", + "diarrheoa", "diarrhea", + "diasbling", "disabling", + "dichomoty", "dichotomy", + "dicovered", "discovered", + "dictaters", "dictates", + "dictionay", "dictionary", + "difenitly", "definitly", + "diferrent", "different", + "differene", "differences", + "differens", "differences", + "differeny", "differently", + "difficuly", "difficulty", + "diffucult", "difficult", + "dificulty", "difficulty", + "diganosed", "diagnosed", + "diganosis", "diagnosis", + "dimenions", "dimensions", + "dimention", "dimension", + "dimesnion", "dimension", + "diminishs", "diminishes", + "dinasours", "dinosaurs", + "dinosuars", "dinosaurs", + "dinsoaurs", "dinosaurs", + "dionsaurs", "dinosaurs", + "diphtongs", "diphthongs", + "dipthongs", "diphthongs", + "direcotry", "directory", + "directoty", "directory", + "directroy", "directory", + "disapears", "disappears", + "disaprity", "disparity", + "disastros", "disastrous", + "disatrous", "disastrous", + "disbaling", "disabling", + "disbeleif", "disbelief", + "disbelife", "disbelief", + "disciplen", "disciplines", + "disclamer", "disclaimer", + "disclosue", "disclosure", + "disconnet", "disconnect", + "discosure", "discourse", + "discoverd", "discovered", + "discovere", "discoveries", + "discredid", "discredited", + "discribed", "described", + "discribes", "describes", + "discussin", "discussion", + "diserable", "desirable", + "disgarees", "disagrees", + "disgiused", "disguised", + "disgusied", "disguised", + "disgustes", "disgusts", + "disgustos", "disgusts", + "disgustus", "disgusts", + "dishonesy", "dishonesty", + "dishonord", "dishonored", + "disicples", "disciples", + "dismantel", "dismantle", + "dismisals", "dismissal", + "disnegage", "disengage", + "dispairty", "disparity", + "dispalyed", "displayed", + "dispartiy", "disparity", + "dispenced", "dispensed", + "dispeners", "dispenser", + "displayes", "displays", + "disruptin", "disruption", + "dissapear", "disappear", + "dissarray", "disarray", + "dissmisal", "dismissal", + "disspiate", "dissipate", + "distincte", "distinctive", + "distrcits", "districts", + "distribue", "distributed", + "distrubed", "disturbed", + "distrupts", "distrust", + "disturben", "disturbance", + "diverisfy", "diversify", + "diveristy", "diversity", + "diverstiy", "diversity", + "dividened", "dividend", + "divinitiy", "divinity", + "doccument", "document", + "docrtines", "doctrines", + "docuhebag", "douchebag", + "dogdammit", "goddammit", + "dogfather", "godfather", + "dolphines", "dolphins", + "domecracy", "democracy", + "domecrats", "democrats", + "domiantes", "dominates", + "dominatin", "domination", + "dominaton", "domination", + "dominiant", "dominant", + "donwgrade", "downgrade", + "donwloads", "downloads", + "donwsides", "downsides", + "donwvoted", "downvoted", + "donwvotes", "downvotes", + "doublelit", "doublelift", + "doucehbag", "douchebag", + "downgarde", "downgrade", + "downlaods", "downloads", + "downloaad", "download", + "downovted", "downvoted", + "dravadian", "dravidian", + "drummless", "drumless", + "dsyphoria", "dysphoria", + "dsytopian", "dystopian", + "duaghters", "daughters", + "duplicats", "duplicates", + "durabiliy", "durability", + "dynamicus", "dynamics", + "dypshoria", "dysphoria", + "dyshporia", "dysphoria", + "dysoptian", "dystopian", + "dysphoira", "dysphoria", + "dysphroia", "dysphoria", + "dyspohria", "dysphoria", + "dyspotian", "dystopian", + "dystopain", "dystopian", + "dystpoian", "dystopian", + "eachohter", "eachother", + "eachotehr", "eachother", + "eachtoher", "eachother", + "earpluggs", "earplugs", + "earthboud", "earthbound", + "eastwoood", "eastwood", + "eastwoord", "eastwood", + "ecclectic", "eclectic", + "ecomonics", "economics", + "edficient", "deficient", + "effecient", "efficient", + "efficeint", "efficient", + "efficency", "efficiency", + "efficieny", "efficiency", + "effulence", "effluence", + "egalitara", "egalitarian", + "egpytians", "egyptians", + "egyptains", "egyptians", + "egytpians", "egyptians", + "ehtically", "ethically", + "ehtnicity", "ethnicity", + "eighteeen", "eighteen", + "eitquette", "etiquette", + "ejacualte", "ejaculate", + "electivre", "elective", + "electorns", "electrons", + "electrial", "electrical", + "electricy", "electricity", + "electroal", "electoral", + "elementay", "elementary", + "elepahnts", "elephants", + "eliminase", "eliminates", + "eliminato", "elimination", + "ellignton", "ellington", + "ellingotn", "ellington", + "eloquenty", "eloquently", + "elsehwere", "elsewhere", + "emapthize", "empathize", + "embarress", "embarrassed", + "emmisarry", "emissary", + "emmisions", "emissions", + "emmitting", "emitting", + "empahsize", "emphasize", + "emperical", "empirical", + "emphaised", "emphasised", + "emphatize", "empathize", + "emphazise", "emphasize", + "emphysyma", "emphysema", + "empitness", "emptiness", + "employeer", "employer", + "employeur", "employer", + "empolyees", "employees", + "emtpiness", "emptiness", + "emualtion", "emulation", + "enahncing", "enhancing", + "enchantig", "enchanting", + "enclousre", "enclosure", + "enclsoure", "enclosure", + "encolsure", "enclosure", + "encompase", "encompass", + "enconding", "encoding", + "encounted", "encountered", + "encrpyted", "encrypted", + "encrytped", "encrypted", + "encyrpted", "encrypted", + "endangerd", "endangered", + "enevlopes", "envelopes", + "enforcees", "enforces", + "engagemet", "engagements", + "engagment", "engagement", + "engieneer", "engineer", + "engineeer", "engineer", + "engineerd", "engineered", + "enhacning", "enhancing", + "enhanceds", "enhances", + "enligthen", "enlighten", + "enourmous", "enormous", + "ensconsed", "ensconced", + "enthicity", "ethnicity", + "enthusiam", "enthusiasm", + "enthusiat", "enthusiast", + "entirelly", "entirely", + "entitlied", "entitled", + "enveloppe", "envelope", + "epidsodes", "episodes", + "epilepsey", "epilepsy", + "epiphanny", "epiphany", + "episonage", "espionage", + "epscially", "specially", + "epsionage", "espionage", + "eqautions", "equations", + "equialent", "equivalent", + "equivalet", "equivalents", + "ermington", "remington", + "erroenous", "erroneous", + "escalatie", "escalate", + "escalatin", "escalation", + "esitmated", "estimated", + "esitmates", "estimates", + "eslewhere", "elsewhere", + "especialy", "especially", + "espianoge", "espionage", + "espinoage", "espionage", + "espoinage", "espionage", + "esponiage", "espionage", + "espressso", "espresso", + "essencial", "essential", + "essentail", "essential", + "essentias", "essentials", + "essentual", "essential", + "essesital", "essential", + "estiamted", "estimated", + "estiamtes", "estimates", + "estimatin", "estimation", + "ethcially", "ethically", + "ethincity", "ethnicity", + "ethnicaly", "ethnically", + "ethniticy", "ethnicity", + "etmyology", "etymology", + "euclidian", "euclidean", + "euorpeans", "europeans", + "euphoriac", "euphoric", + "euphorica", "euphoria", + "europenas", "europeans", + "europians", "europeans", + "eurpoeans", "europeans", + "evangelia", "evangelical", + "evelation", "elevation", + "evenlopes", "envelopes", + "eventally", "eventually", + "eventualy", "eventually", + "everthing", "everything", + "evertyime", "everytime", + "everwhere", "everywhere", + "everyoens", "everyones", + "everyteim", "everytime", + "everytiem", "everytime", + "everyting", "everything", + "eveyrones", "everyones", + "evreyones", "everyones", + "evreytime", "everytime", + "exagerate", "exaggerate", + "exahusted", "exhausted", + "exapnsive", "expansive", + "exauhsted", "exhausted", + "excahnges", "exchanges", + "excecuted", "executed", + "excecutes", "executes", + "excellant", "excellent", + "excercise", "exercise", + "excerised", "exercised", + "excerises", "exercises", + "exceuting", "executing", + "exchnages", "exchanges", + "exclsuive", "exclusive", + "excludeds", "excludes", + "exclusivs", "exclusives", + "exclusivy", "exclusivity", + "excpetion", "exception", + "exculding", "excluding", + "exculsion", "exclusion", + "exculsive", "exclusive", + "execising", "exercising", + "execption", "exception", + "exectuing", "executing", + "exectuion", "execution", + "exectuive", "executive", + "executabe", "executable", + "exepmtion", "exemption", + "exerbated", "exacerbated", + "exercices", "exercise", + "exerciese", "exercises", + "exercizes", "exercise", + "exersices", "exercises", + "exhasuted", "exhausted", + "exhaustin", "exhaustion", + "exhibites", "exhibits", + "exhibitin", "exhibition", + "exhibtion", "exhibition", + "exhuasted", "exhausted", + "exibition", "exhibition", + "existance", "existence", + "existenta", "existential", + "existince", "existence", + "existnace", "existance", + "exlcuding", "excluding", + "exlcusion", "exclusion", + "exlcusive", "exclusive", + "exlpoding", "exploding", + "exlporers", "explorers", + "exlposion", "explosion", + "exonorate", "exonerate", + "expalined", "explained", + "expanisve", "expansive", + "expatriot", "expatriate", + "expectany", "expectancy", + "expection", "exception", + "expemtion", "exemption", + "experimet", "experiments", + "explaines", "explains", + "explainig", "explaining", + "explaning", "explaining", + "expliciet", "explicit", + "explicity", "explicitly", + "explictly", "explicitly", + "explioted", "exploited", + "explodeds", "explodes", + "exploites", "exploits", + "explorare", "explorer", + "explotied", "exploited", + "expolding", "exploding", + "expolited", "exploited", + "expolsion", "explosion", + "expolsive", "explosive", + "expressie", "expressive", + "expressin", "expression", + "exsitance", "existance", + "extention", "extension", + "exteriour", "exterior", + "extermely", "extremely", + "extermism", "extremism", + "extermist", "extremist", + "externaly", "externally", + "extractin", "extraction", + "extrapole", "extrapolate", + "extreemly", "extremely", + "extremers", "extremes", + "extremley", "extremely", + "extrotion", "extortion", + "eyeballls", "eyeballs", + "eyebrowes", "eyebrows", + "eyebrowns", "eyebrows", + "eyesahdow", "eyeshadow", + "eyeshdaow", "eyeshadow", + "eygptians", "egyptians", + "eytmology", "etymology", + "faceboook", "facebook", + "faciliate", "facilitate", + "facilites", "facilities", + "facilitiy", "facility", + "facinated", "fascinated", + "facutally", "factually", + "familiair", "familiar", + "familiare", "familiarize", + "familiary", "familiarity", + "familliar", "familiar", + "fanaticas", "fanatics", + "fanaticos", "fanatics", + "fanaticus", "fanatics", + "fanatsies", "fantasies", + "fanatsize", "fantasize", + "fandation", "foundation", + "fanservie", "fanservice", + "fantazise", "fantasize", + "farenheit", "fahrenheit", + "fascistes", "fascists", + "fashoined", "fashioned", + "favorties", "favorites", + "favoruite", "favourite", + "favourits", "favourites", + "favourtie", "favourite", + "fedreally", "federally", + "feminisim", "feminism", + "feminsits", "feminists", + "femminist", "feminist", + "fesitvals", "festivals", + "fetishers", "fetishes", + "fightings", "fighting", + "filetimes", "lifetimes", + "filiament", "filament", + "filmmakes", "filmmakers", + "fingernal", "fingernails", + "flashligt", "flashlight", + "flavorade", "flavored", + "flavoures", "flavours", + "flavourus", "flavours", + "flawlessy", "flawlessly", + "flexibily", "flexibility", + "fluctaute", "fluctuate", + "flucutate", "fluctuate", + "fluttersy", "fluttershy", + "follwoing", "following", + "foootball", "football", + "forcefuly", "forcefully", + "forcibley", "forcibly", + "forciblly", "forcibly", + "forearmes", "forearms", + "foreginer", "foreigner", + "foregroud", "foreground", + "foreinger", "foreigner", + "forgeiner", "foreigner", + "forgiener", "foreigner", + "forgivens", "forgiveness", + "foriegner", "foreigner", + "forigener", "foreigner", + "formerlly", "formerly", + "formualte", "formulate", + "formulaes", "formulas", + "formulars", "formulas", + "forntline", "frontline", + "forntpage", "frontpage", + "fortuante", "fortunate", + "forumlate", "formulate", + "foundatin", "foundations", + "fourteeen", "fourteen", + "fractales", "fractals", + "fractalis", "fractals", + "fractalus", "fractals", + "fragement", "fragment", + "fragmenot", "fragment", + "franchies", "franchise", + "francsico", "francisco", + "franscico", "francisco", + "frecklers", "freckles", + "freedomes", "freedoms", + "freestlye", "freestyle", + "freesytle", "freestyle", + "fremented", "fermented", + "freqeuncy", "frequency", + "frequence", "frequencies", + "friendlis", "friendlies", + "frightend", "frightened", + "fromation", "formation", + "frontapge", "frontpage", + "frontilne", "frontline", + "frustrato", "frustration", + "frustrats", "frustrates", + "fucntions", "functions", + "fullscren", "fullscreen", + "funcitons", "functions", + "functiong", "functioning", + "functtion", "function", + "furiosuly", "furiously", + "furiuosly", "furiously", + "futuristc", "futuristic", + "gagnsters", "gangsters", + "galations", "galatians", + "galdiator", "gladiator", + "gallaxies", "galaxies", + "garanteed", "guaranteed", + "garantees", "guarantees", + "garuantee", "guarantee", + "gatherins", "gatherings", + "gauntelts", "gauntlets", + "gauntlent", "gauntlet", + "gaurantee", "guarantee", + "gaurentee", "guarantee", + "genatilia", "genitalia", + "geneology", "genealogy", + "generalbs", "generals", + "generalis", "generals", + "generaste", "generates", + "generatie", "generate", + "generatin", "generations", + "generatos", "generators", + "genitaila", "genitalia", + "genitales", "genitals", + "genitalis", "genitals", + "geniunely", "genuinely", + "gentailia", "genitalia", + "gentelmen", "gentlemen", + "gentialia", "genitalia", + "genuienly", "genuinely", + "genuinley", "genuinely", + "geogrpahy", "geography", + "germaniac", "germanic", + "geurrilla", "guerrilla", + "gimmickey", "gimmicky", + "gimmickly", "gimmicky", + "girlfried", "girlfriend", + "goalkeepr", "goalkeeper", + "godafther", "godfather", + "godspeeed", "godspeed", + "goegraphy", "geography", + "goldfisch", "goldfish", + "goosebums", "goosebumps", + "gorvement", "goverment", + "govemrent", "goverment", + "govenment", "government", + "goverance", "governance", + "goveremnt", "goverment", + "goverment", "government", + "govermetn", "goverment", + "govermnet", "goverment", + "governmet", "governments", + "govorment", "government", + "govrement", "goverment", + "gracefull", "graceful", + "gracefuly", "gracefully", + "graduaste", "graduates", + "graduatin", "graduation", + "grahpical", "graphical", + "grativate", "gravitate", + "graudally", "gradually", + "graudates", "graduates", + "greenalnd", "greenland", + "grenaders", "grenades", + "grpahical", "graphical", + "guadulupe", "guadalupe", + "guaranted", "guaranteed", + "guarantes", "guarantees", + "guardains", "guardians", + "guarentee", "guarantee", + "guaridans", "guardians", + "guatamala", "guatemala", + "guerrilas", "guerrillas", + "guradians", "guardians", + "guranteed", "guaranteed", + "gurantees", "guarantees", + "gutiarist", "guitarist", + "habsbourg", "habsburg", + "hairstlye", "hairstyle", + "hairsytle", "hairstyle", + "halarious", "hilarious", + "hambruger", "hamburger", + "hamburges", "hamburgers", + "hamphsire", "hampshire", + "hamsphire", "hampshire", + "handboook", "handbook", + "handedley", "handedly", + "handedlly", "handedly", + "handicape", "handicapped", + "hapmshire", "hampshire", + "happended", "happened", + "happenend", "happened", + "happenned", "happened", + "harasment", "harassment", + "hardenend", "hardened", + "hardwoord", "hardwood", + "haristyle", "hairstyle", + "harrasing", "harassing", + "harrassed", "harassed", + "harrasses", "harassed", + "hdinsight", "hindsight", + "headahces", "headaches", + "headhpone", "headphone", + "headshoot", "headshot", + "healither", "healthier", + "healtheir", "healthier", + "healthiet", "healthiest", + "healthire", "healthier", + "heapdhone", "headphone", + "hedgehoog", "hedgehog", + "hedgehorg", "hedgehog", + "heightend", "heightened", + "heirarchy", "hierarchy", + "herculase", "hercules", + "herculeas", "hercules", + "herculees", "hercules", + "herculeus", "hercules", + "heriarchy", "hierarchy", + "hesistant", "hesitant", + "hesistate", "hesitate", + "hesitatin", "hesitation", + "hieroglph", "hieroglyph", + "highschol", "highschool", + "hindisght", "hindsight", + "hindrence", "hindrance", + "hinduisim", "hinduism", + "hinduisum", "hinduism", + "hipsanics", "hispanics", + "hirearchy", "hierarchy", + "hirsohima", "hiroshima", + "hispancis", "hispanics", + "hitboxers", "hitboxes", + "hoepfully", "hopefully", + "holocasut", "holocaust", + "holocuast", "holocaust", + "homeonwer", "homeowner", + "homeopaty", "homeopathy", + "homewolrd", "homeworld", + "homewoner", "homeowner", + "homewrold", "homeworld", + "homogenes", "homogeneous", + "homosexul", "homosexuals", + "hopelessy", "hopelessly", + "hopsitals", "hospitals", + "horishima", "hiroshima", + "horizones", "horizons", + "horizonts", "horizons", + "horrendos", "horrendous", + "horribley", "horribly", + "horriblly", "horribly", + "horrifing", "horrifying", + "hositlity", "hostility", + "hospitaly", "hospitality", + "hosptials", "hospitals", + "hourgalss", "hourglass", + "hourlgass", "hourglass", + "househols", "households", + "humanitis", "humanities", + "humanoind", "humanoid", + "humiditiy", "humidity", + "hunagrian", "hungarian", + "hurriance", "hurricane", + "hurricans", "hurricanes", + "husbandos", "husbands", + "hydraluic", "hydraulic", + "hydropile", "hydrophile", + "hydropobe", "hydrophobe", + "hydrualic", "hydraulic", + "hyopcrite", "hypocrite", + "hypcorite", "hypocrite", + "hyperoble", "hyperbole", + "hypocracy", "hypocrisy", + "hypocrasy", "hypocrisy", + "hypocricy", "hypocrisy", + "hypocriet", "hypocrite", + "hypocrits", "hypocrites", + "hyporcite", "hypocrite", + "hypothess", "hypotheses", + "hyprocisy", "hypocrisy", + "hyprocite", "hypocrite", + "hyrdation", "hydration", + "hyrdaulic", "hydraulic", + "hysterica", "hysteria", + "hysteriia", "hysteria", + "iburpofen", "ibuprofen", + "icleandic", "icelandic", + "icongnito", "incognito", + "idealisim", "idealism", + "idealistc", "idealistic", + "identifer", "identifier", + "identifiy", "identify", + "ideologis", "ideologies", + "ignornace", "ignorance", + "illegales", "illegals", + "illegalis", "illegals", + "illegalls", "illegals", + "illnesess", "illnesses", + "illsuions", "illusions", + "illuminai", "illuminati", + "imagenary", "imaginary", + "imaginery", "imaginary", + "imaptient", "impatient", + "imigrated", "emigrated", + "immensley", "immensely", + "immerisve", "immersive", + "immesnely", "immensely", + "immidiate", "immediate", + "immigrato", "immigration", + "immitated", "imitated", + "immitator", "imitator", + "immobilie", "immobile", + "immobille", "immobile", + "immobilze", "immobile", + "immortaly", "immortality", + "immserive", "immersive", + "impaitent", "impatient", + "imparital", "impartial", + "impedence", "impedance", + "implantes", "implants", + "implicati", "implicit", + "impliciet", "implicit", + "implicity", "implicitly", + "impliment", "implement", + "implusive", "impulsive", + "importamt", "important", + "importend", "imported", + "imporving", "improving", + "impossibe", "impossible", + "imprefect", "imperfect", + "impressin", "impressions", + "imprioned", "imprisoned", + "improbabe", "improbable", + "impulisve", "impulsive", + "impuslive", "impulsive", + "imrpoving", "improving", + "inadequet", "inadequate", + "inadquate", "inadequate", + "inaugures", "inaugurates", + "inbalance", "imbalance", + "inbeetwen", "inbetween", + "inbetween", "between", + "inbewteen", "inbetween", + "incarnato", "incarnation", + "incgonito", "incognito", + "inclinato", "inclination", + "includeds", "includes", + "incoginto", "incognito", + "incongito", "incognito", + "incorpore", "incorporate", + "incpetion", "inception", + "incredibe", "incredible", + "incrediby", "incredibly", + "inculding", "including", + "incunabla", "incunabula", + "indicaste", "indicates", + "indicatie", "indicative", + "indicence", "incidence", + "indicents", "incidents", + "indigenos", "indigenous", + "indirecty", "indirectly", + "indisious", "insidious", + "individul", "individual", + "individus", "individuals", + "indoensia", "indonesia", + "indoneisa", "indonesia", + "indutrial", "industrial", + "inersting", "inserting", + "inexpense", "inexpensive", + "infallibe", "infallible", + "inferioir", "inferior", + "inferiour", "inferior", + "infestato", "infestation", + "infiltrar", "infiltrator", + "infinitey", "infinity", + "infinitie", "infinite", + "infinitiy", "infinity", + "infinitly", "infinity", + "inflatabe", "inflatable", + "influense", "influences", + "influenta", "influential", + "informate", "informative", + "infraread", "infrared", + "ingeniuty", "ingenuity", + "ingeunity", "ingenuity", + "ingocnito", "incognito", + "ingorance", "ignorance", + "inguenity", "ingenuity", + "inhabitat", "inhabitants", + "inheirted", "inherited", + "inhertied", "inherited", + "initailly", "initially", + "initalese", "initialese", + "initaling", "initialing", + "initalise", "initialise", + "initalism", "initialism", + "initalize", "initialize", + "initalled", "initialled", + "initation", "initiation", + "initiales", "initials", + "initiatie", "initiatives", + "initiatin", "initiation", + "initiatve", "initiate", + "injustics", "injustices", + "inlcuding", "including", + "inmigrant", "immigrant", + "innoucous", "innocuous", + "innovatin", "innovations", + "innovatve", "innovate", + "inpection", "inception", + "inpending", "impending", + "inproving", "improving", + "inpsector", "inspector", + "inpsiring", "inspiring", + "inquisito", "inquisition", + "inquisitr", "inquisitor", + "inresting", "inserting", + "insanelly", "insanely", + "insepctor", "inspector", + "insidiuos", "insidious", + "insipring", "inspiring", + "insluated", "insulated", + "inspectin", "inspection", + "instabilt", "instability", + "installes", "installs", + "installus", "installs", + "instering", "inserting", + "insticnts", "instincts", + "institude", "instituted", + "instituto", "institution", + "insualted", "insulated", + "insurence", "insurance", + "insurgeny", "insurgency", + "integirty", "integrity", + "integraal", "integral", + "integrade", "integrated", + "integrato", "integration", + "intenisty", "intensity", + "intensley", "intensely", + "interacte", "interactive", + "interents", "internets", + "interesat", "interest", + "interesst", "interests", + "interewbs", "interwebs", + "interfase", "interfaces", + "interfeer", "interfere", + "interfers", "interferes", + "intergate", "integrate", + "intergity", "integrity", + "interiour", "interior", + "internest", "internets", + "interpert", "interpret", + "interprut", "interrupt", + "interrups", "interrupts", + "interstae", "interstate", + "interveen", "intervene", + "intervied", "interviewed", + "intervier", "interviewer", + "intervies", "interviews", + "intesnely", "intensely", + "intesnity", "intensity", + "intestins", "intestines", + "intialize", "initialize", + "inticrate", "intricate", + "intimidad", "intimidated", + "intircate", "intricate", + "intiution", "intuition", + "intiutive", "intuitive", + "intorduce", "introduce", + "intorvert", "introvert", + "intracite", "intricate", + "intrduced", "introduced", + "intregity", "integrity", + "intrenets", "internets", + "intrepret", "interpret", + "intrerupt", "interrupt", + "intrewebs", "interwebs", + "intrinisc", "intrinsic", + "intrisinc", "intrinsic", + "intrisnic", "intrinsic", + "intriuged", "intrigued", + "introdued", "introduced", + "introduse", "introduces", + "introvers", "introverts", + "intruiged", "intrigued", + "intrument", "instrument", + "inutition", "intuition", + "inutitive", "intuitive", + "invaderas", "invaders", + "invalidas", "invalidates", + "inventios", "inventions", + "investige", "investigate", + "investmet", "investments", + "invincibe", "invincible", + "invloving", "involving", + "invovling", "involving", + "ipubrofen", "ibuprofen", + "iranianos", "iranians", + "irelevent", "irrelevant", + "ironicaly", "ironically", + "irritatie", "irritate", + "irritatin", "irritation", + "isalmists", "islamists", + "isalnders", "islanders", + "islamiskt", "islamist", + "islamsits", "islamists", + "islmaists", "islamists", + "isntaller", "installer", + "isntances", "instances", + "isntantly", "instantly", + "israelies", "israelis", + "israelits", "israelis", + "italianas", "italians", + "italianos", "italians", + "jailbrake", "jailbreak", + "jalibreak", "jailbreak", + "jamaicain", "jamaican", + "jersualem", "jerusalem", + "jeruselam", "jerusalem", + "jeruslaem", "jerusalem", + "journalis", "journals", + "judegment", "judgement", + "judgemant", "judgemental", + "judisuary", "judiciary", + "jugdement", "judgement", + "juggernat", "juggernaut", + "juvenille", "juvenile", + "keneysian", "keynesian", + "kentuckey", "kentucky", + "kenyesian", "keynesian", + "keybaords", "keyboards", + "keyensian", "keynesian", + "keyesnian", "keynesian", + "keynseian", "keynesian", + "keysenian", "keynesian", + "kilometes", "kilometers", + "kindapped", "kidnapped", + "kncokback", "knockback", + "knoweldge", "knowledge", + "knowlegde", "knowledge", + "konckback", "knockback", + "kryptonie", "kryptonite", + "labirynth", "labyrinth", + "laboratoy", "laboratory", + "laboreres", "laborers", + "labratory", "laboratory", + "labriynth", "labyrinth", + "labryinth", "labyrinth", + "labyrnith", "labyrinth", + "landscaps", "landscapes", + "landscspe", "landscapes", + "langauges", "languages", + "languague", "language", + "lanuchers", "launchers", + "lanugages", "languages", + "larington", "arlington", + "latitudie", "latitude", + "lattitude", "latitude", + "laucnhers", "launchers", + "laucnhing", "launching", + "launchign", "launching", + "laybrinth", "labyrinth", + "lebanesse", "lebanese", + "leceister", "leicester", + "leciester", "leicester", + "legitmate", "legitimate", + "legnedary", "legendary", + "lesbianas", "lesbians", + "lesbianus", "lesbians", + "letivicus", "leviticus", + "leutenant", "lieutenant", + "levaithan", "leviathan", + "levellign", "levelling", + "levetated", "levitated", + "levetates", "levitates", + "levicitus", "leviticus", + "levleling", "levelling", + "lfiesteal", "lifesteal", + "liberales", "liberals", + "liberalim", "liberalism", + "liberalis", "liberals", + "liberatin", "liberation", + "libraires", "libraries", + "liecester", "leicester", + "lieuenant", "lieutenant", + "lieutenat", "lieutenant", + "lifespawn", "lifespan", + "lifestlye", "lifestyle", + "lighnting", "lightning", + "lightnign", "lightning", + "ligthning", "lightning", + "ligthroom", "lightroom", + "lingerine", "lingerie", + "lispticks", "lipsticks", + "listenend", "listened", + "literarly", "literary", + "literarry", "literary", + "literatre", "literate", + "literatue", "literate", + "literture", "literature", + "lithaunia", "lithuania", + "lithuaina", "lithuania", + "lithuiana", "lithuania", + "lithunaia", "lithuania", + "litigatin", "litigation", + "lituhania", "lithuania", + "liveprool", "liverpool", + "livestrem", "livestream", + "lobbysits", "lobbyists", + "lockscren", "lockscreen", + "logisitcs", "logistics", + "logsitics", "logistics", + "loiusiana", "louisiana", + "lollipoop", "lollipop", + "louisvile", "louisville", + "luanchers", "launchers", + "luanching", "launching", + "lubicrant", "lubricant", + "lubircant", "lubricant", + "ludcrious", "ludicrous", + "ludricous", "ludicrous", + "lunaticos", "lunatics", + "lunaticus", "lunatics", + "macaronni", "macaroni", + "maestries", "masteries", + "magainzes", "magazines", + "magensium", "magnesium", + "magincian", "magician", + "magintude", "magnitude", + "magneisum", "magnesium", + "magnesuim", "magnesium", + "magnifine", "magnificent", + "mainfesto", "manifesto", + "mainfests", "manifests", + "mainstrem", "mainstream", + "maintaing", "maintaining", + "maintance", "maintenance", + "maintians", "maintains", + "mairjuana", "marijuana", + "malasyian", "malaysian", + "malayisan", "malaysian", + "malaysain", "malaysian", + "maletonin", "melatonin", + "maltesian", "maltese", + "malyasian", "malaysian", + "managable", "manageable", + "managment", "management", + "mandarian", "mandarin", + "mandarijn", "mandarin", + "mandarion", "mandarin", + "maneouvre", "manoeuvre", + "maneuveur", "maneuver", + "maneveurs", "maneuvers", + "manfiesto", "manifesto", + "manfiests", "manifests", + "mangesium", "magnesium", + "mangitude", "magnitude", + "manouvers", "maneuvers", + "mantained", "maintained", + "manuevers", "maneuvers", + "maraudeur", "marauder", + "marevlous", "marvelous", + "margarent", "margaret", + "margarite", "margaret", + "marginaal", "marginal", + "marginaly", "marginally", + "marijauna", "marijuana", + "marineras", "mariners", + "marineris", "mariners", + "marineros", "mariners", + "marjiuana", "marijuana", + "marjority", "majority", + "marmelade", "marmalade", + "marrtyred", "martyred", + "massagens", "massages", + "massivley", "massively", + "masteires", "masteries", + "mastereis", "masteries", + "masterise", "masteries", + "mastermid", "mastermind", + "mastieres", "masteries", + "masturbae", "masturbated", + "materiaal", "material", + "matierals", "materials", + "mattreses", "mattress", + "mayalsian", "malaysian", + "maylasian", "malaysian", + "mccarthey", "mccarthy", + "mecahnics", "mechanics", + "mecernary", "mercenary", + "mechancis", "mechanics", + "mechanims", "mechanism", + "mechaninc", "mechanic", + "mechansim", "mechanism", + "medicince", "medicine", + "mediciney", "mediciny", + "meditatie", "meditate", + "meditatin", "meditation", + "megathred", "megathread", + "melanotin", "melatonin", + "melborune", "melbourne", + "melbounre", "melbourne", + "membrance", "membrane", + "menstraul", "menstrual", + "menstural", "menstrual", + "mensutral", "menstrual", + "mentiones", "mentions", + "mercanery", "mercenary", + "merhcants", "merchants", + "messagers", "messages", + "messanger", "messenger", + "metabloic", "metabolic", + "metalurgy", "metallurgy", + "methaphor", "metaphor", + "methapors", "metaphors", + "methodoly", "methodology", + "metropols", "metropolis", + "mexicanas", "mexicans", + "mexicants", "mexicans", + "mexicanus", "mexicans", + "michellle", "michelle", + "micorwave", "microwave", + "micoscopy", "microscopy", + "microphen", "microphone", + "migrantes", "migrants", + "migrianes", "migraines", + "milawukee", "milwaukee", + "milennium", "millennium", + "milestons", "milestones", + "militians", "militias", + "millenial", "millennial", + "millenian", "millennia", + "millenium", "millennium", + "millionar", "millionaire", + "millitary", "military", + "miluwakee", "milwaukee", + "milwakuee", "milwaukee", + "milwuakee", "milwaukee", + "mindcarck", "mindcrack", + "mindlessy", "mindlessly", + "minerales", "minerals", + "minisclue", "miniscule", + "miniscuel", "miniscule", + "ministery", "ministry", + "minisucle", "miniscule", + "minitaure", "miniature", + "minituare", "miniature", + "minneosta", "minnesota", + "minnestoa", "minnesota", + "minsicule", "miniscule", + "minsiters", "ministers", + "minstries", "ministries", + "miraculos", "miraculous", + "mircowave", "microwave", + "mirrorred", "mirrored", + "miserabel", "miserable", + "mispelled", "misspelled", + "misreable", "miserable", + "misreably", "miserably", + "missisipi", "mississippi", + "missonary", "missionary", + "missourri", "missouri", + "misspelld", "misspelled", + "mobilitiy", "mobility", + "moderatey", "moderately", + "moderatin", "moderation", + "modifires", "modifiers", + "moelcules", "molecules", + "moleclues", "molecules", + "molestare", "molester", + "molestato", "molestation", + "molesterd", "molested", + "monestary", "monastery", + "monitores", "monitors", + "monolgoue", "monologue", + "monolight", "moonlight", + "monolouge", "monologue", + "monopolis", "monopolies", + "monopolly", "monopoly", + "monopoloy", "monopoly", + "monserrat", "montserrat", + "monstorus", "monstrous", + "monstruos", "monstrous", + "montanous", "mountainous", + "montoring", "monitoring", + "monumnets", "monuments", + "moratlity", "mortality", + "morbidley", "morbidly", + "morgatges", "mortgages", + "morgtages", "mortgages", + "morisette", "morissette", + "mormonsim", "mormonism", + "morroccan", "moroccan", + "mortailty", "mortality", + "mosquitto", "mosquito", + "motivatie", "motivate", + "motivatin", "motivations", + "motorcyce", "motorcycles", + "motorolja", "motorola", + "motoroloa", "motorola", + "moustahce", "moustache", + "movepseed", "movespeed", + "mozzarela", "mozzarella", + "mucisians", "musicians", + "mulitated", "mutilated", + "mulitples", "multiples", + "multipled", "multiplied", + "multplies", "multiples", + "murdererd", "murdered", + "muscially", "musically", + "muscician", "musician", + "musculair", "muscular", + "mushrooom", "mushroom", + "musicains", "musicians", + "mutatiohn", "mutation", + "mutialted", "mutilated", + "mutilatin", "mutilation", + "mutliated", "mutilated", + "mutliples", "multiples", + "mutlitude", "multitude", + "mysterise", "mysteries", + "mysterous", "mysterious", + "nacrotics", "narcotics", + "naferious", "nefarious", + "nahsville", "nashville", + "narcissim", "narcissism", + "narcissit", "narcissist", + "narcissts", "narcissist", + "narctoics", "narcotics", + "nasvhille", "nashville", + "nationaal", "national", + "nationaly", "nationally", + "nativelly", "natively", + "natrually", "naturally", + "navigatie", "navigate", + "navigatin", "navigation", + "neccesary", "necessary", + "necessite", "necessities", + "neckbears", "neckbeards", + "neckbread", "neckbeard", + "nedlessly", "endlessly", + "needlessy", "needlessly", + "negiotate", "negotiate", + "negociate", "negotiate", + "negoitate", "negotiate", + "neigbhour", "neighbour", + "neigbours", "neighbours", + "neighboor", "neighbor", + "nessecary", "necessary", + "newcaslte", "newcastle", + "newcastel", "newcastle", + "nieghbour", "neighbour", + "nightfa;;", "nightfall", + "nightlcub", "nightclub", + "nigthclub", "nightclub", + "nigthlife", "nightlife", + "nigthmare", "nightmare", + "nihilisim", "nihilism", + "ninteenth", "nineteenth", + "nominatie", "nominate", + "nominatin", "nomination", + "noninital", "noninitial", + "norhteast", "northeast", + "norhtwest", "northwest", + "normanday", "normandy", + "northeren", "northern", + "norwegain", "norwegian", + "norwiegan", "norwegian", + "nostaglia", "nostalgia", + "nostaglic", "nostalgic", + "nostaliga", "nostalgia", + "nostaligc", "nostalgic", + "nostlagia", "nostalgia", + "nostlagic", "nostalgic", + "nostriles", "nostrils", + "nostrills", "nostrils", + "notacible", "noticable", + "notciable", "noticable", + "noteboook", "notebook", + "noteriety", "notoriety", + "noteworty", "noteworthy", + "noticable", "noticeable", + "noticably", "noticeably", + "noticalbe", "noticable", + "noticeing", "noticing", + "noticible", "noticeable", + "notoroius", "notorious", + "novermber", "november", + "nullabour", "nullarbor", + "numberous", "numerous", + "numercial", "numerical", + "numerious", "numerous", + "nuremburg", "nuremberg", + "nurtients", "nutrients", + "nutirents", "nutrients", + "nutreints", "nutrients", + "nutritent", "nutrient", + "nutritian", "nutritional", + "nutritios", "nutritious", + "obediance", "obedience", + "obeidence", "obedience", + "obersvant", "observant", + "obersvers", "observers", + "obesssion", "obsession", + "obiedence", "obedience", + "obivously", "obviously", + "objectivs", "objectives", + "objectivy", "objectivity", + "obscruity", "obscurity", + "obscuirty", "obscurity", + "observare", "observer", + "observerd", "observed", + "obssesion", "obsession", + "obssesive", "obsessive", + "obssessed", "obsessed", + "obstruced", "obstructed", + "obsucrity", "obscurity", + "obtainabe", "obtainable", + "obviosuly", "obviously", + "obvioulsy", "obviously", + "obvisouly", "obviously", + "obvoiusly", "obviously", + "ocasional", "occasional", + "ocasioned", "occasioned", + "ocassions", "occasions", + "occaisons", "occasions", + "occassion", "occasion", + "occurance", "occurrence", + "occurence", "occurrence", + "octohedra", "octahedra", + "ocuntries", "countries", + "ocurrance", "occurrence", + "ocurrence", "occurrence", + "offcially", "officially", + "offically", "officially", + "officialy", "officially", + "offpsring", "offspring", + "offspirng", "offspring", + "offsrping", "offspring", + "ogliarchy", "oligarchy", + "oilgarchy", "oligarchy", + "oligrachy", "oligarchy", + "ommitting", "omitting", + "onlsaught", "onslaught", + "onsalught", "onslaught", + "onslaugth", "onslaught", + "onsluaght", "onslaught", + "onwership", "ownership", + "opiniones", "opinions", + "oposition", "opposition", + "opponenet", "opponent", + "opposiste", "opposites", + "opposties", "opposites", + "oppressin", "oppression", + "opression", "oppression", + "opressive", "oppressive", + "opthalmic", "ophthalmic", + "optimisim", "optimism", + "optimistc", "optimistic", + "optinally", "optimally", + "oragnered", "orangered", + "oragnised", "organised", + "oragnizer", "organizer", + "orcehstra", "orchestra", + "ordinarly", "ordinary", + "orgainsed", "organised", + "orgainzer", "organizer", + "organered", "orangered", + "organices", "organise", + "organisim", "organism", + "organiske", "organise", + "organiste", "organise", + "organites", "organise", + "organizms", "organism", + "organsied", "organised", + "organsims", "organisms", + "organzier", "organizer", + "orginally", "originally", + "orgnaised", "organised", + "orhcestra", "orchestra", + "orientato", "orientation", + "origanaly", "originally", + "originall", "original", + "originalt", "originality", + "originaly", "originally", + "origintea", "originate", + "origional", "original", + "orignally", "originally", + "orignials", "originals", + "oublisher", "publisher", + "oursleves", "ourselves", + "oustiders", "outsiders", + "oustpoken", "outspoken", + "outisders", "outsiders", + "outnumbed", "outnumbered", + "outpalyed", "outplayed", + "outperfom", "outperform", + "outpsoken", "outspoken", + "outrageos", "outrageous", + "outskirst", "outskirts", + "outskrits", "outskirts", + "outwieghs", "outweighs", + "overbaord", "overboard", + "overclcok", "overclock", + "overdirve", "overdrive", + "overhpyed", "overhyped", + "overhwelm", "overwhelm", + "overlcock", "overclock", + "overloard", "overload", + "overpaied", "overpaid", + "overpowed", "overpowered", + "overriden", "overridden", + "overwhlem", "overwhelm", + "overwirte", "overwrite", + "overwtach", "overwatch", + "overyhped", "overhyped", + "owernship", "ownership", + "pacificts", "pacifist", + "packageid", "packaged", + "pactivity", "captivity", + "painkills", "painkillers", + "paitently", "patiently", + "paitience", "patience", + "pakistain", "pakistani", + "pakistian", "pakistani", + "pakistnai", "pakistani", + "paksitani", "pakistani", + "paladines", "paladins", + "paladinos", "paladins", + "palestein", "palestine", + "palestina", "palestinian", + "palistian", "palestinian", + "paltforms", "platforms", + "palystyle", "playstyle", + "pancakers", "pancakes", + "pantomine", "pantomime", + "paradimes", "paradise", + "paragraps", "paragraphs", + "paragrpah", "paragraph", + "paralells", "parallels", + "paralelly", "parallelly", + "paralisys", "paralysis", + "parallely", "parallelly", + "paralzyed", "paralyzed", + "paramedis", "paramedics", + "paramters", "parameters", + "paranoica", "paranoia", + "paranoida", "paranoia", + "parasties", "parasites", + "paraylsis", "paralysis", + "paraylzed", "paralyzed", + "parellels", "parallels", + "paricular", "particular", + "parisitic", "parasitic", + "paritally", "partially", + "parliment", "parliament", + "parmesaen", "parmesan", + "parntered", "partnered", + "parrallel", "parallel", + "partchett", "pratchett", + "parterned", "partnered", + "participe", "participate", + "partiotic", "patriotic", + "partisain", "partisan", + "pasengers", "passengers", + "passagens", "passages", + "passagers", "passages", + "passerbys", "passersby", + "passiones", "passions", + "passivley", "passively", + "passowrds", "passwords", + "pateintly", "patiently", + "paticular", "particular", + "patinetly", "patiently", + "patriarca", "patriarchal", + "patriarcy", "patriarchy", + "patriotas", "patriots", + "patriotes", "patriots", + "patroitic", "patriotic", + "pattented", "patented", + "pavillion", "pavilion", + "pbulisher", "publisher", + "peacefuly", "peacefully", + "pedohpile", "pedophile", + "pedophila", "pedophilia", + "pedophils", "pedophiles", + "peircings", "piercings", + "penalites", "penalties", + "penatlies", "penalties", + "penduluum", "pendulum", + "penerator", "penetrator", + "penguines", "penguins", + "penguings", "penguins", + "penguinos", "penguins", + "peninsual", "peninsula", + "peninusla", "peninsula", + "penisnula", "peninsula", + "penisular", "peninsular", + "pennisula", "peninsula", + "pensinula", "peninsula", + "pentagoon", "pentagon", + "peodphile", "pedophile", + "pepperino", "pepperoni", + "peppermit", "peppermint", + "percepted", "perceived", + "percevied", "perceived", + "percieved", "perceived", + "percisely", "precisely", + "percision", "precision", + "percursor", "precursor", + "perdators", "predators", + "peremiter", "perimeter", + "perfeclty", "perfectly", + "perfomers", "performers", + "performas", "performs", + "perfromer", "performer", + "pericings", "piercings", + "perimetre", "perimeter", + "peristent", "persistent", + "periwinke", "periwinkle", + "permanant", "permanent", + "permature", "premature", + "permenant", "permanent", + "permieter", "perimeter", + "permissie", "permissible", + "permissin", "permissions", + "permisson", "permission", + "pernament", "permanent", + "perorders", "preorders", + "perpetrar", "perpetrator", + "perpetuae", "perpetuate", + "perpetuas", "perpetuates", + "persauded", "persuaded", + "perscribe", "prescribe", + "perserved", "preserved", + "persistes", "persists", + "personaes", "personas", + "personaly", "personally", + "personell", "personnel", + "personhod", "personhood", + "persuated", "persuade", + "pertended", "pretended", + "pertoleum", "petroleum", + "perusaded", "persuaded", + "pervertes", "perverse", + "pesticids", "pesticides", + "petroluem", "petroleum", + "phemonena", "phenomena", + "phenemona", "phenomena", + "phenonema", "phenomena", + "phillipse", "phillies", + "philosopy", "philosophy", + "philosphy", "philosophy", + "phonecian", "phoenecian", + "phonemena", "phenomena", + "phongraph", "phonograph", + "photograh", "photograph", + "phsyician", "physician", + "phsyicist", "physicist", + "phycisian", "physician", + "phycisist", "physicist", + "physicaly", "physically", + "physicits", "physicist", + "physisict", "physicist", + "piblisher", "publisher", + "picthfork", "pitchfork", + "pinetrest", "pinterest", + "placeheld", "placeholder", + "placemens", "placements", + "plaestine", "palestine", + "plagarism", "plagiarism", + "planatery", "planetary", + "planation", "plantation", + "planteary", "planetary", + "plasticas", "plastics", + "plasticos", "plastics", + "plasticus", "plastics", + "platfroms", "platforms", + "platofrms", "platforms", + "plausable", "plausible", + "plausbile", "plausible", + "plausibel", "plausible", + "playgroud", "playground", + "playright", "playwright", + "playstlye", "playstyle", + "playwrite", "playwright", + "plebicite", "plebiscite", + "plethoria", "plethora", + "ploarized", "polarized", + "pointeres", "pointers", + "polinator", "pollinator", + "polishees", "polishes", + "politelly", "politely", + "politican", "politician", + "politicas", "politics", + "politicin", "politician", + "politicus", "politics", + "polygammy", "polygamy", + "populatin", "populations", + "poralized", "polarized", + "porcelian", "porcelain", + "porcelina", "porcelain", + "poreclain", "porcelain", + "porftolio", "portfolio", + "porgramme", "programme", + "portfoilo", "portfolio", + "portoflio", "portfolio", + "portraing", "portraying", + "portrayes", "portrays", + "portrayls", "portrays", + "portriats", "portraits", + "portugese", "portuguese", + "portugues", "portuguese", + "posessing", "possessing", + "posession", "possession", + "positiond", "positioned", + "positiong", "positioning", + "positionl", "positional", + "positiviy", "positivity", + "possesess", "possesses", + "possesing", "possessing", + "possesion", "possession", + "possessin", "possessions", + "possibile", "possible", + "possibily", "possibility", + "possibley", "possibly", + "possiblly", "possibly", + "possition", "position", + "powderade", "powdered", + "powerfull", "powerful", + "pracitcal", "practical", + "practhett", "pratchett", + "practicly", "practically", + "practives", "practise", + "pragamtic", "pragmatic", + "preadtors", "predators", + "precedeed", "preceded", + "preceeded", "preceded", + "preceived", "perceived", + "preciesly", "precisely", + "precisley", "precisely", + "precurors", "precursor", + "precurosr", "precursor", + "precurser", "precursor", + "predatobr", "predator", + "predictie", "predictive", + "predictin", "prediction", + "predjuice", "prejudice", + "predujice", "prejudice", + "prefectly", "perfectly", + "preferens", "preferences", + "prefering", "preferring", + "preformer", "performer", + "pregnance", "pregnancies", + "preimeter", "perimeter", + "prejiduce", "prejudice", + "prejucide", "prejudice", + "premanent", "permanent", + "premeired", "premiered", + "preorderd", "preordered", + "preparato", "preparation", + "prepatory", "preparatory", + "presentas", "presents", + "presentes", "presents", + "presicely", "precisely", + "presicion", "precision", + "presideny", "presidency", + "prestigiu", "prestigious", + "prestigue", "prestige", + "presuaded", "persuaded", + "pretendas", "pretends", + "pretensje", "pretense", + "pretinent", "pertinent", + "prevelant", "prevalent", + "preventin", "prevention", + "previvous", "previous", + "priesthod", "priesthood", + "priestood", "priesthood", + "primaires", "primaries", + "primairly", "primarily", + "primarliy", "primarily", + "primative", "primitive", + "primordal", "primordial", + "princesas", "princess", + "princeses", "princess", + "princesss", "princesses", + "principas", "principals", + "principly", "principally", + "prinicple", "principle", + "prioritie", "prioritize", + "prioritse", "priorities", + "privalege", "privilege", + "privelege", "privilege", + "privelige", "privilege", + "privilage", "privilege", + "privilegs", "privileges", + "privledge", "privilege", + "probabily", "probability", + "probablly", "probably", + "problemas", "problems", + "procative", "proactive", + "procedger", "procedure", + "proceding", "proceeding", + "proceedes", "proceeds", + "procelain", "porcelain", + "procesess", "processes", + "processer", "processor", + "processos", "processors", + "proclamed", "proclaimed", + "procotols", "protocols", + "prodecure", "procedure", + "productie", "productive", + "productin", "productions", + "productos", "products", + "profesion", "profusion", + "professer", "professor", + "professin", "professions", + "proffesed", "professed", + "proffesor", "professor", + "progessed", "progressed", + "programas", "programs", + "programem", "programme", + "programes", "programs", + "programms", "programs", + "progresso", "progression", + "progresss", "progresses", + "projectie", "projectile", + "projectin", "projection", + "prominant", "prominent", + "promiscus", "promiscuous", + "promotted", "promoted", + "pronomial", "pronominal", + "pronouced", "pronounced", + "pronounds", "pronouns", + "pronounes", "pronouns", + "propagana", "propaganda", + "properies", "properties", + "propertly", "property", + "propeties", "properties", + "prophesie", "prophecies", + "prophetes", "prophets", + "propogate", "propagate", + "proposels", "proposes", + "proposito", "proposition", + "propperly", "properly", + "propsects", "prospects", + "prosperos", "prosperous", + "prostitue", "prostitute", + "protectes", "protects", + "protectie", "protective", + "protectos", "protectors", + "proteinas", "proteins", + "proteines", "proteins", + "protestas", "protests", + "protestat", "protestant", + "protestes", "protests", + "protestos", "protests", + "protfolio", "portfolio", + "protocool", "protocol", + "prototpye", "prototype", + "prototyps", "prototypes", + "protraits", "portraits", + "protrayal", "portrayal", + "protrayed", "portrayed", + "provicial", "provincial", + "provincie", "province", + "provisios", "provisions", + "pruchased", "purchased", + "pruchases", "purchases", + "prugatory", "purgatory", + "pruposely", "purposely", + "pscyhotic", "psychotic", + "pseudonyn", "pseudonym", + "pshycosis", "psychosis", + "pshycotic", "psychotic", + "psycology", "psychology", + "psycothic", "psychotic", + "ptichfork", "pitchfork", + "pubilsher", "publisher", + "publiaher", "publisher", + "publicaly", "publicly", + "publicani", "publication", + "publicher", "publisher", + "publiclly", "publicly", + "publihser", "publisher", + "publisehr", "publisher", + "publisger", "publisher", + "publishor", "publisher", + "publishre", "publisher", + "publsiher", "publisher", + "publusher", "publisher", + "puchasing", "purchasing", + "punishmet", "punishments", + "puplisher", "publisher", + "puragtory", "purgatory", + "purcahsed", "purchased", + "purcahses", "purchases", + "purhcased", "purchased", + "purposley", "purposely", + "pursuaded", "persuaded", + "pursuades", "persuades", + "pyramidas", "pyramids", + "pyramides", "pyramids", + "pyschosis", "psychosis", + "pyschotic", "psychotic", + "qaulifies", "qualifies", + "quantifiy", "quantify", + "quantitiy", "quantity", + "quantitty", "quantity", + "quartlery", "quarterly", + "queations", "equations", + "queenland", "queensland", + "questiond", "questioned", + "questiong", "questioning", + "questionn", "questioning", + "radicalis", "radicals", + "rapsberry", "raspberry", + "rasbperry", "raspberry", + "rationaly", "rationally", + "reactiony", "reactionary", + "realisitc", "realistic", + "realoding", "reloading", + "realsitic", "realistic", + "realtable", "relatable", + "realtions", "relations", + "realtives", "relatives", + "reamining", "remaining", + "reaplying", "replaying", + "reasearch", "research", + "reaveling", "revealing", + "rebellios", "rebellious", + "rebllions", "rebellions", + "recations", "creations", + "reccomend", "recommend", + "reccuring", "recurring", + "receeding", "receding", + "recepient", "recipient", + "recgonise", "recognise", + "recgonize", "recognize", + "recidents", "residents", + "recievers", "receivers", + "recieving", "receiving", + "recipiant", "recipient", + "reciproce", "reciprocate", + "reclutant", "reluctant", + "recoginse", "recognise", + "recoginze", "recognize", + "recomends", "recommends", + "recommens", "recommends", + "reconenct", "reconnect", + "recongise", "recognise", + "recongize", "recognize", + "reconicle", "reconcile", + "reconized", "recognized", + "recordare", "recorder", + "recoveres", "recovers", + "recoverys", "recovers", + "recpetive", "receptive", + "recpetors", "receptors", + "recquired", "required", + "recreatie", "recreate", + "recruitcs", "recruits", + "recruites", "recruits", + "recrusion", "recursion", + "recrutied", "recruited", + "recrutier", "recruiter", + "rectangel", "rectangle", + "rectanlge", "rectangle", + "recuiting", "recruiting", + "recurison", "recursion", + "recurited", "recruited", + "recuriter", "recruiter", + "recusrion", "recursion", + "redeemeed", "redeemed", + "redundany", "redundancy", + "redundent", "redundant", + "reedeming", "redeeming", + "refelcted", "reflected", + "refereces", "references", + "refereees", "referees", + "refereers", "referees", + "referemce", "reference", + "referencs", "references", + "referense", "references", + "referiang", "referring", + "referinng", "refering", + "refernces", "references", + "refernece", "reference", + "refershed", "refreshed", + "refersher", "refresher", + "reffering", "referring", + "reflectie", "reflective", + "refrehser", "refresher", + "refrences", "references", + "refromist", "reformist", + "regionaal", "regional", + "registerd", "registered", + "registery", "registry", + "regualrly", "regularly", + "regualtor", "regulator", + "regulaion", "regulation", + "regulalry", "regularly", + "regulares", "regulars", + "regularis", "regulars", + "regulatin", "regulations", + "regurally", "regularly", + "reigining", "reigning", + "reinstale", "reinstalled", + "reisntall", "reinstall", + "reknowned", "renowned", + "relaoding", "reloading", + "relatiate", "retaliate", + "relativiy", "relativity", + "relativly", "relatively", + "relativno", "relation", + "relavence", "relevance", + "relcutant", "reluctant", + "relevence", "relevance", + "relfected", "reflected", + "reliabily", "reliability", + "reliabley", "reliably", + "religeous", "religious", + "remasterd", "remastered", + "rememberd", "remembered", + "rememebrs", "remembers", + "remianing", "remaining", + "remignton", "remington", + "remingotn", "remington", + "remmebers", "remembers", + "remotelly", "remotely", + "rendevous", "rendezvous", + "rendezous", "rendezvous", + "renedered", "rende", + "renegated", "renegade", + "rennovate", "renovate", + "repalying", "replaying", + "repblican", "republican", + "repeatedy", "repeatedly", + "repective", "receptive", + "repeition", "repetition", + "repentent", "repentant", + "rephrasse", "rephrase", + "replusive", "repulsive", + "reportedy", "reportedly", + "represend", "represented", + "repressin", "repression", + "reprtoire", "repertoire", + "repsonded", "responded", + "reptition", "repetition", + "reptuable", "reputable", + "repubican", "republican", + "republian", "republican", + "repulican", "republican", + "repulisve", "repulsive", + "repuslive", "repulsive", + "resaurant", "restaurant", + "researchs", "researchers", + "resembels", "resembles", + "reserverd", "reserved", + "resintall", "reinstall", + "resistane", "resistances", + "resistans", "resistances", + "resistend", "resisted", + "resistent", "resistant", + "resmebles", "resembles", + "resolutin", "resolutions", + "resoruces", "resources", + "respectes", "respects", + "respectos", "respects", + "responces", "response", + "respondas", "responds", + "respondis", "responds", + "respondus", "responds", + "respoting", "reposting", + "ressemble", "resemble", + "ressurect", "resurrect", + "restarant", "restaurant", + "resticted", "restricted", + "restircts", "restricts", + "restorani", "restoration", + "restraind", "restrained", + "restraing", "restraining", + "restraunt", "restraint", + "restriant", "restraint", + "restricte", "restrictive", + "resturant", "restaurant", + "retailate", "retaliate", + "retalaite", "retaliate", + "retaliers", "retailers", + "reteriver", "retriever", + "retirever", "retriever", + "retrevier", "retriever", + "retriving", "retrieving", + "reuptable", "reputable", + "reveiwers", "reviewers", + "revelaing", "revealing", + "revelance", "relevance", + "revolutin", "revolutions", + "rewachted", "rewatched", + "rewatchig", "rewatching", + "rferences", "references", + "ridiculos", "ridiculous", + "ridiculue", "ridicule", + "ridiculus", "ridiculous", + "righetous", "righteous", + "rightfuly", "rightfully", + "rightoues", "righteous", + "rigourous", "rigorous", + "rigtheous", "righteous", + "rilvaries", "rivalries", + "rininging", "ringing", + "rivarlies", "rivalries", + "rivlaries", "rivalries", + "roaylties", "royalties", + "roboticus", "robotics", + "roganisms", "organisms", + "royalites", "royalties", + "roylaties", "royalties", + "ruleboook", "rulebook", + "sacarstic", "sarcastic", + "sacntuary", "sanctuary", + "sacrafice", "sacrifice", + "sacrastic", "sarcastic", + "sacrifise", "sacrifices", + "salughter", "slaughter", + "samckdown", "smackdown", + "sanctiond", "sanctioned", + "sancturay", "sanctuary", + "sancutary", "sanctuary", + "sandstrom", "sandstorm", + "sandwhich", "sandwich", + "sanhedrim", "sanhedrin", + "santcuary", "sanctuary", + "santioned", "sanctioned", + "sapphirre", "sapphire", + "sastified", "satisfied", + "sastifies", "satisfies", + "satelites", "satellites", + "saterdays", "saturdays", + "satisifed", "satisfied", + "satisifes", "satisfies", + "satrudays", "saturdays", + "satsified", "satisfied", + "satsifies", "satisfies", + "sattelite", "satellite", + "saxaphone", "saxophone", + "scaepgoat", "scapegoat", + "scaleable", "scalable", + "scandales", "scandals", + "scandalos", "scandals", + "scantuary", "sanctuary", + "scaricity", "scarcity", + "scarifice", "sacrifice", + "scarmbled", "scrambled", + "scartched", "scratched", + "scartches", "scratches", + "scavanged", "scavenged", + "sceintist", "scientist", + "scholalry", "scholarly", + "sciencers", "sciences", + "scientfic", "scientific", + "scientifc", "scientific", + "scientits", "scientist", + "sclupture", "sculpture", + "scnearios", "scenarios", + "scoreboad", "scoreboard", + "scottisch", "scottish", + "scracthed", "scratched", + "scracthes", "scratches", + "scrambeld", "scrambled", + "scrathces", "scratches", + "scrollade", "scrolled", + "scrutiney", "scrutiny", + "scrutinty", "scrutiny", + "sculpteur", "sculpture", + "sculputre", "sculpture", + "scultpure", "sculpture", + "scuplture", "sculpture", + "scuptures", "sculptures", + "seamlessy", "seamlessly", + "searchign", "searching", + "sebasitan", "sebastian", + "sebastain", "sebastian", + "sebsatian", "sebastian", + "secceeded", "seceded", + "secertary", "secretary", + "secratary", "secretary", + "secratery", "secretary", + "secretery", "secretary", + "secretley", "secretly", + "sednetary", "sedentary", + "seduciton", "seduction", + "semanitcs", "semantics", + "semestres", "semesters", + "semnatics", "semantics", + "semseters", "semesters", + "senatores", "senators", + "sendetary", "sedentary", + "sensitivy", "sensitivity", + "sentimant", "sentimental", + "sepcially", "specially", + "seperated", "separated", + "seperates", "separates", + "seperator", "separator", + "septmeber", "september", + "seraching", "searching", + "seriosuly", "seriously", + "serioulsy", "seriously", + "seriuosly", "seriously", + "servantes", "servants", + "settlment", "settlement", + "sexualizd", "sexualized", + "sexuallly", "sexually", + "shadasloo", "shadaloo", + "shaprness", "sharpness", + "sharpenss", "sharpness", + "shawhsank", "shawshank", + "sheilding", "shielding", + "shephered", "shepherd", + "shileding", "shielding", + "shitstrom", "shitstorm", + "shletered", "sheltered", + "shoudlers", "shoulders", + "shouldnot", "shouldnt", + "shperical", "spherical", + "shwashank", "shawshank", + "sidebaord", "sideboard", + "siganture", "signature", + "signapore", "singapore", + "signitory", "signatory", + "silhouete", "silhouette", + "similiair", "similiar", + "simliarly", "similarly", + "simluated", "simulated", + "simluator", "simulator", + "simplifiy", "simplify", + "simualted", "simulated", + "simualtor", "simulator", + "simulatie", "simulate", + "simulatin", "simulation", + "sinagpore", "singapore", + "sincerley", "sincerely", + "singature", "signature", + "singpaore", "singapore", + "singulair", "singular", + "singulary", "singularity", + "skateboad", "skateboard", + "skeletaal", "skeletal", + "skepitcal", "skeptical", + "skepticim", "skepticism", + "sketpical", "skeptical", + "slaughted", "slaughtered", + "slaugther", "slaughter", + "slipperly", "slippery", + "sluaghter", "slaughter", + "smackdwon", "smackdown", + "smealting", "smelting", + "smeesters", "semesters", + "snadstorm", "sandstorm", + "snippetts", "snippets", + "snowfalke", "snowflake", + "snowflaek", "snowflake", + "snowlfake", "snowflake", + "snwoballs", "snowballs", + "snythesis", "synthesis", + "snythetic", "synthetic", + "socailism", "socialism", + "socailist", "socialist", + "socailize", "socialize", + "soceities", "societies", + "socialini", "socializing", + "socialiss", "socialists", + "socialsim", "socialism", + "socieites", "societies", + "socilaism", "socialism", + "socilaist", "socialist", + "sociopati", "sociopathic", + "sociopats", "sociopaths", + "socratees", "socrates", + "socrateks", "socrates", + "soemthing", "something", + "sohpomore", "sophomore", + "soliliquy", "soliloquy", + "somehting", "something", + "someoneis", "someones", + "somethign", "something", + "somethins", "somethings", + "sopohmore", "sophomore", + "sotryline", "storyline", + "soundtrak", "soundtrack", + "sountrack", "soundtrack", + "sourthern", "southern", + "souvenier", "souvenir", + "soveregin", "sovereign", + "sovereing", "sovereign", + "soveriegn", "sovereign", + "spacegoat", "scapegoat", + "spagehtti", "spaghetti", + "spahgetti", "spaghetti", + "sparlking", "sparkling", + "spartants", "spartans", + "specailly", "specially", + "specailty", "specialty", + "specality", "specialty", + "speciales", "specials", + "specialis", "specials", + "speciatly", "specialty", + "specifing", "specifying", + "specimine", "specimen", + "spectrail", "spectral", + "specualte", "speculate", + "speechers", "speeches", + "spehrical", "spherical", + "speically", "specially", + "spetember", "september", + "sphagetti", "spaghetti", + "splatooon", "splatoon", + "sponosred", "sponsored", + "sponsered", "sponsored", + "sponsores", "sponsors", + "spontanes", "spontaneous", + "sponzored", "sponsored", + "sprakling", "sparkling", + "sprinkeld", "sprinkled", + "squadroon", "squadron", + "squirrles", "squirrels", + "squirrtle", "squirrel", + "squrriels", "squirrels", + "srirachia", "sriracha", + "srirachra", "sriracha", + "stabliize", "stabilize", + "stainlees", "stainless", + "startegic", "strategic", + "startlxde", "startled", + "statisitc", "statistic", + "staurdays", "saturdays", + "steadilly", "steadily", + "stealthly", "stealthy", + "stichting", "stitching", + "sticthing", "stitching", + "stimulans", "stimulants", + "stockplie", "stockpile", + "stornegst", "strongest", + "stragetic", "strategic", + "straightn", "straighten", + "strangets", "strangest", + "strategis", "strategies", + "strawbery", "strawberry", + "streamade", "streamed", + "streamare", "streamer", + "streamear", "streamer", + "strechted", "stretched", + "strechtes", "stretches", + "strecthed", "stretched", + "strecthes", "stretches", + "stregnths", "strengths", + "strenghen", "strengthen", + "strengthn", "strengthen", + "strentghs", "strengths", + "stressade", "stressed", + "stressers", "stresses", + "strictist", "strictest", + "stringnet", "stringent", + "stroyline", "storyline", + "structual", "structural", + "structurs", "structures", + "strucutre", "structure", + "struggeld", "struggled", + "struggels", "struggles", + "stryofoam", "styrofoam", + "stuctured", "structured", + "stuggling", "struggling", + "stupitidy", "stupidity", + "sturcture", "structure", + "sturggled", "struggled", + "sturggles", "struggles", + "styrofaom", "styrofoam", + "subarmine", "submarine", + "subculter", "subculture", + "submachne", "submachine", + "subpecies", "subspecies", + "subscirbe", "subscribe", + "subsidary", "subsidiary", + "subsizide", "subsidize", + "subsquent", "subsequent", + "subsrcibe", "subscribe", + "substanse", "substances", + "substanta", "substantial", + "substante", "substantive", + "substarte", "substrate", + "substitue", "substitute", + "substract", "subtract", + "subtances", "substances", + "subtiltes", "subtitles", + "subtitels", "subtitles", + "subtletly", "subtlety", + "subtlties", "subtitles", + "succedded", "succeeded", + "succeedes", "succeeds", + "succesful", "successful", + "succesion", "succession", + "succesive", "successive", + "suceeding", "succeeding", + "sucesfuly", "successfully", + "sucessful", "successful", + "sucession", "succession", + "sucessive", "successive", + "sufferage", "suffrage", + "sufferred", "suffered", + "sufficent", "sufficient", + "suggestes", "suggests", + "suggestie", "suggestive", + "sumbarine", "submarine", + "sumberged", "submerged", + "summenors", "summoners", + "summoenrs", "summoners", + "sunderlad", "sunderland", + "sunglases", "sunglasses", + "superfluu", "superfluous", + "superiour", "superior", + "superisor", "superiors", + "supermare", "supermarket", + "superviso", "supervision", + "suposedly", "supposedly", + "supportes", "supports", + "suppreses", "suppress", + "supressed", "suppressed", + "supresses", "suppresses", + "suprising", "surprising", + "suprizing", "surprising", + "supsicion", "suspicion", + "surounded", "surrounded", + "surprized", "surprised", + "surronded", "surrounded", + "surrouded", "surrounded", + "surrouned", "surround", + "survivers", "survivors", + "survivied", "survived", + "survivour", "survivor", + "susbcribe", "subscribe", + "susbtrate", "substrate", + "susncreen", "sunscreen", + "suspectes", "suspects", + "suspendes", "suspense", + "suspensie", "suspense", + "swastikka", "swastika", + "sweatshit", "sweatshirt", + "sweetheat", "sweetheart", + "switchign", "switching", + "swithcing", "switching", + "swtiching", "switching", + "sydnicate", "syndicate", + "sykwalker", "skywalker", + "sylablles", "syllables", + "syllabels", "syllables", + "symbolsim", "symbolism", + "symettric", "symmetric", + "symmetral", "symmetric", + "symmetria", "symmetrical", + "symoblism", "symbolism", + "sympathie", "sympathize", + "symphoney", "symphony", + "symptomes", "symptoms", + "symptomps", "symptoms", + "synagouge", "synagogue", + "syndacite", "syndicate", + "syndiacte", "syndicate", + "synidcate", "syndicate", + "synonymes", "synonyms", + "synonymis", "synonyms", + "synonymns", "synonyms", + "synonymos", "synonymous", + "synonymus", "synonyms", + "synopsies", "synopsis", + "syntehsis", "synthesis", + "syntehtic", "synthetic", + "syntethic", "synthetic", + "syphyllis", "syphilis", + "syracusae", "syracuse", + "sytrofoam", "styrofoam", + "tablespon", "tablespoon", + "tacticaly", "tactically", + "tanenhill", "tannehill", + "tannheill", "tannehill", + "targetted", "targeted", + "tawainese", "taiwanese", + "tawianese", "taiwanese", + "taxanomic", "taxonomic", + "teamfighs", "teamfights", + "teamfigth", "teamfight", + "teamifght", "teamfight", + "teampseak", "teamspeak", + "teaspooon", "teaspoon", + "techician", "technician", + "techinque", "technique", + "technolgy", "technology", + "teeangers", "teenagers", + "tehtering", "tethering", + "telegrpah", "telegraph", + "televsion", "television", + "temafight", "teamfight", + "tempaltes", "templates", + "temparate", "temperate", + "templaras", "templars", + "templares", "templars", + "temporali", "temporarily", + "tenacitiy", "tenacity", + "tensiones", "tensions", + "tentacels", "tentacles", + "tentacuel", "tentacle", + "tentalces", "tentacles", + "termianls", "terminals", + "terminato", "termination", + "terorrism", "terrorism", + "terorrist", "terrorist", + "terrabyte", "terabyte", + "terribley", "terribly", + "terriblly", "terribly", + "terriroty", "territory", + "terrorits", "terrorist", + "terrorsim", "terrorism", + "tesitcles", "testicles", + "tesitmony", "testimony", + "testicels", "testicles", + "testomony", "testimony", + "texturers", "textures", + "thankfuly", "thankfully", + "thankyoou", "thankyou", + "themselfs", "themselves", + "themselvs", "themselves", + "themslves", "themselves", + "theologia", "theological", + "therafter", "thereafter", + "therefoer", "therefor", + "therefour", "therefor", + "theroists", "theorists", + "thetering", "tethering", + "thirlling", "thrilling", + "thirteeen", "thirteen", + "thoecracy", "theocracy", + "thoerists", "theorists", + "thoroughy", "thoroughly", + "thoughout", "throughout", + "threatend", "threatened", + "throrough", "thorough", + "throughly", "thoroughly", + "througout", "throughout", + "thrusdays", "thursdays", + "thurdsays", "thursdays", + "thursdsay", "thursdays", + "thursters", "thrusters", + "tiawanese", "taiwanese", + "timestmap", "timestamp", + "tirangles", "triangles", + "tocuhdown", "touchdown", + "toghether", "together", + "tolerence", "tolerance", + "tommorrow", "tomorrow", + "torandoes", "tornadoes", + "torchligt", "torchlight", + "torelable", "tolerable", + "toritllas", "tortillas", + "tornaodes", "tornadoes", + "torpeados", "torpedoes", + "torrentas", "torrents", + "torrentes", "torrents", + "tortialls", "tortillas", + "tortillia", "tortilla", + "tortillla", "tortilla", + "tottehnam", "tottenham", + "tottenahm", "tottenham", + "tottneham", "tottenham", + "toturials", "tutorials", + "touchdwon", "touchdown", + "touristas", "tourists", + "touristes", "tourists", + "touristey", "touristy", + "touristly", "touristy", + "touristsy", "touristy", + "tournamet", "tournament", + "toxicitiy", "toxicity", + "trafficed", "trafficked", + "tragicaly", "tragically", + "traileras", "trailers", + "traingles", "triangles", + "trainwrek", "trainwreck", + "traitoris", "traitors", + "traitorus", "traitors", + "tramautic", "traumatic", + "tranlsate", "translate", + "transalte", "translate", + "transcris", "transcripts", + "transcrit", "transcript", + "transferd", "transferred", + "transfere", "transferred", + "transfors", "transforms", + "transfrom", "transform", + "transiten", "transient", + "transitin", "transitions", + "transofrm", "transform", + "transplat", "transplant", + "trasnfers", "transfers", + "trasnform", "transform", + "trasnport", "transport", + "traversie", "traverse", + "travestry", "travesty", + "treasuers", "treasures", + "treasurey", "treasury", + "treatmens", "treatments", + "treausres", "treasures", + "tremendos", "tremendous", + "trhilling", "thrilling", + "trhusters", "thrusters", + "triangels", "triangles", + "trianlges", "triangles", + "tribunaal", "tribunal", + "triguered", "triggered", + "trinagles", "triangles", + "truamatic", "traumatic", + "truthfuly", "truthfully", + "tunrtable", "turntable", + "turnaroud", "turnaround", + "turntabel", "turntable", + "typcially", "typically", + "tyrranies", "tyrannies", + "ubiquitos", "ubiquitous", + "ugprading", "upgrading", + "ukrainain", "ukrainian", + "ukrainias", "ukrainians", + "ukrainina", "ukrainian", + "ukrainisn", "ukrainians", + "ukrianian", "ukrainian", + "ulitmatum", "ultimatum", + "ulteriour", "ulterior", + "umbrellla", "umbrella", + "unaminous", "unanimous", + "unanmious", "unanimous", + "unanswerd", "unanswered", + "unanymous", "unanimous", + "unbannend", "unbanned", + "uncensord", "uncensored", + "uncomited", "uncommitted", + "undercunt", "undercut", + "underdong", "underdog", + "undergard", "undergrad", + "underming", "undermining", + "understad", "understands", + "underwaer", "underwear", + "underware", "underwear", + "undescore", "underscore", + "unforseen", "unforeseen", + "unfortune", "unfortunate", + "unfriendy", "unfriendly", + "unhealhty", "unhealthy", + "unheathly", "unhealthy", + "unhelathy", "unhealthy", + "unicornis", "unicorns", + "unicornus", "unicorns", + "uniformes", "uniforms", + "uninamous", "unanimous", + "unintuive", "unintuitive", + "uniquelly", "uniquely", + "unisntall", "uninstall", + "univerity", "university", + "universse", "universes", + "univesity", "university", + "unnistall", "uninstall", + "unoffical", "unofficial", + "unopenend", "unopened", + "unplayabe", "unplayable", + "unplesant", "unpleasant", + "unpopluar", "unpopular", + "unrankend", "unranked", + "unreliabe", "unreliable", + "unrwitten", "unwritten", + "untrianed", "untrained", + "unusaully", "unusually", + "unuseable", "unusable", + "unusuable", "unusable", + "unvierses", "universes", + "unweildly", "unwieldy", + "unwieldly", "unwieldy", + "unwirtten", "unwritten", + "unworthly", "unworthy", + "upcomming", "upcoming", + "upgarding", "upgrading", + "upgradded", "upgraded", + "uplfiting", "uplifting", + "uplifitng", "uplifting", + "urkainian", "ukrainian", + "utlimatum", "ultimatum", + "vacciante", "vaccinate", + "vaccinato", "vaccination", + "vacciners", "vaccines", + "vacestomy", "vasectomy", + "vaguaries", "vagaries", + "vaibility", "viability", + "vaildated", "validated", + "vairables", "variables", + "valdiated", "validated", + "valentein", "valentine", + "valentien", "valentine", + "valentins", "valentines", + "validitiy", "validity", + "valueable", "valuable", + "vanadlism", "vandalism", + "vandalsim", "vandalism", + "varaibles", "variables", + "varations", "variations", + "variantes", "variants", + "vascetomy", "vasectomy", + "vastecomy", "vasectomy", + "veganisim", "veganism", + "vegetarin", "vegetarians", + "vegitable", "vegetable", + "vehementy", "vehemently", + "veiwpoint", "viewpoint", + "velantine", "valentine", + "vendettta", "vendetta", + "venegance", "vengeance", + "veneuzela", "venezuela", + "venezeula", "venezuela", + "venezulea", "venezuela", + "vengaence", "vengeance", + "vengenace", "vengeance", + "ventilato", "ventilation", + "verbatium", "verbatim", + "verfiying", "verifying", + "verifiyng", "verifying", + "verisions", "revisions", + "versalite", "versatile", + "versatily", "versatility", + "versiones", "versions", + "versitale", "versatile", + "verstaile", "versatile", + "verticaly", "vertically", + "veryifing", "verifying", + "vicotrian", "victorian", + "vicotries", "victories", + "victoires", "victories", + "victorain", "victorian", + "victorina", "victorian", + "victorios", "victorious", + "videogaem", "videogame", + "videogams", "videogames", + "vidoegame", "videogame", + "viewpiont", "viewpoint", + "vigilence", "vigilance", + "vigliante", "vigilante", + "vigourous", "vigorous", + "viligante", "vigilante", + "viloently", "violently", + "vincinity", "vicinity", + "vioalting", "violating", + "violentce", "violence", + "virbation", "vibration", + "virgintiy", "virginity", + "virignity", "virginity", + "virutally", "virtually", + "visibiliy", "visibility", + "vitaminas", "vitamins", + "vitamines", "vitamins", + "vitrually", "virtually", + "vociemail", "voicemail", + "voilating", "violating", + "voilation", "violation", + "voilently", "violently", + "volatiliy", "volatility", + "voleyball", "volleyball", + "volontary", "voluntary", + "volonteer", "volunteer", + "volunatry", "voluntary", + "volunteed", "volunteered", + "vriginity", "virginity", + "wallpapes", "wallpapers", + "warrantly", "warranty", + "warrriors", "warriors", + "wavelengh", "wavelength", + "weakenend", "weakened", + "weakneses", "weakness", + "weaknesss", "weaknesses", + "wealtheir", "wealthier", + "weaponary", "weaponry", + "wedensday", "wednesday", + "wednesdsy", "wednesdays", + "wednessay", "wednesdays", + "wednseday", "wednesday", + "welathier", "wealthier", + "wendesday", "wednesday", + "wesbtrook", "westbrook", + "westernes", "westerners", + "westrbook", "westbrook", + "whereever", "wherever", + "whietlist", "whitelist", + "whilrwind", "whirlwind", + "whilsting", "whistling", + "whipsered", "whispered", + "whislting", "whistling", + "whisperes", "whispers", + "whitelsit", "whitelist", + "whitleist", "whitelist", + "whitsling", "whistling", + "whrilwind", "whirlwind", + "whsipered", "whispered", + "whtielist", "whitelist", + "widespred", "widespread", + "widesread", "widespread", + "windshied", "windshield", + "wintesses", "witnesses", + "wisconisn", "wisconsin", + "wishlisht", "wishlist", + "wishpered", "whispered", + "withdrawl", "withdrawal", + "withelist", "whitelist", + "witnesess", "witnesses", + "wolrdview", "worldview", + "wolrdwide", "worldwide", + "wonderlad", "wonderland", + "wordlview", "worldview", + "wordlwide", "worldwide", + "worhtless", "worthless", + "workfroce", "workforce", + "worldivew", "worldview", + "worldveiw", "worldview", + "worstened", "worsened", + "worthelss", "worthless", + "xenbolade", "xenoblade", + "xenobalde", "xenoblade", + "xenophoby", "xenophobia", + "xeonblade", "xenoblade", + "yementite", "yemenite", + "yorkshrie", "yorkshire", + "yorskhire", "yorkshire", + "yosemitie", "yosemite", + "youngents", "youngest", + "yourselvs", "yourselves", + "zimbabwae", "zimbabwe", + "zionistas", "zionists", + "zionistes", "zionists", + "abandond", "abandoned", + "abdomine", "abdomen", + "abilitiy", "ability", + "abilties", "abilities", + "abondons", "abandons", + "aboslute", "absolute", + "abosrbed", "absorbed", + "abruplty", "abruptly", + "abrutply", "abruptly", + "abscence", "absence", + "absestos", "asbestos", + "absoluts", "absolutes", + "absolvte", "absolve", + "absorbes", "absorbs", + "absoulte", "absolute", + "abstante", "bastante", + "abudance", "abundance", + "abudcted", "abducted", + "abundunt", "abundant", + "aburptly", "abruptly", + "abuseres", "abusers", + "abusrdly", "absurdly", + "academis", "academics", + "accademy", "academy", + "acccused", "accused", + "acceptes", "accepts", + "accidens", "accidents", + "accideny", "accidently", + "accoring", "according", + "accountt", "accountant", + "accpeted", "accepted", + "accuarcy", "accuracy", + "accumule", "accumulate", + "accusato", "accusation", + "accussed", "accused", + "acedamia", "academia", + "acedemic", "academic", + "acheived", "achieved", + "acheives", "achieves", + "achieval", "achievable", + "acnedote", "anecdote", + "acording", "according", + "acornyms", "acronyms", + "acousitc", "acoustic", + "acoutsic", "acoustic", + "acovados", "avocados", + "acquifer", "acquire", + "acquited", "acquitted", + "acquried", "acquired", + "acronmys", "acronyms", + "acronysm", "acronyms", + "acroynms", "acronyms", + "acrynoms", "acronyms", + "acsended", "ascended", + "actaully", "actually", + "activite", "activities", + "activits", "activities", + "activley", "actively", + "actresss", "actresses", + "actualey", "actualy", + "actualiy", "actuality", + "actualky", "actualy", + "actualmy", "actualy", + "actualoy", "actualy", + "actualpy", "actualy", + "actualty", "actualy", + "acutally", "actually", + "acutions", "auctions", + "adaptare", "adapter", + "adbandon", "abandon", + "adbucted", "abducted", + "addictes", "addicts", + "addictin", "addictions", + "addictis", "addictions", + "addional", "additional", + "addopted", "adopted", + "addresed", "addressed", + "adealide", "adelaide", + "adecuate", "adequate", + "adeilade", "adelaide", + "adeladie", "adelaide", + "adeliade", "adelaide", + "adeqaute", "adequate", + "adheisve", "adhesive", + "adhevise", "adhesive", + "adivsors", "advisors", + "admiraal", "admiral", + "adolence", "adolescent", + "adorbale", "adorable", + "adovcacy", "advocacy", + "adpaters", "adapters", + "adquired", "acquired", + "adquires", "acquires", + "adresing", "addressing", + "adressed", "addressed", + "adroable", "adorable", + "adultrey", "adultery", + "adventue", "adventures", + "adventus", "adventures", + "advertis", "adverts", + "advesary", "adversary", + "adviseer", "adviser", + "adviseur", "adviser", + "advocade", "advocated", + "advocats", "advocates", + "advsiors", "advisors", + "aethists", "atheists", + "affaires", "affairs", + "affilate", "affiliate", + "affintiy", "affinity", + "affleunt", "affluent", + "affulent", "affluent", + "afircans", "africans", + "africain", "african", + "afternon", "afternoon", + "againnst", "against", + "agnositc", "agnostic", + "agonstic", "agnostic", + "agravate", "aggravate", + "agreemnt", "agreement", + "agregate", "aggregate", + "agressie", "agressive", + "agressor", "aggressor", + "agrieved", "aggrieved", + "agruable", "arguable", + "agruably", "arguably", + "agrument", "argument", + "ahtletes", "athletes", + "aincents", "ancients", + "airboner", "airborne", + "airbrone", "airborne", + "aircarft", "aircraft", + "airplans", "airplanes", + "airporta", "airports", + "airpsace", "airspace", + "airscape", "airspace", + "akransas", "arkansas", + "alchemey", "alchemy", + "alchohol", "alcohol", + "alcholic", "alcoholic", + "alcoholc", "alcoholics", + "aldutery", "adultery", + "aleniate", "alienate", + "algoritm", "algorithm", + "alimoney", "alimony", + "alirghty", "alrighty", + "allaince", "alliance", + "alledged", "alleged", + "alledges", "alleges", + "allegedy", "allegedly", + "allegely", "allegedly", + "allegric", "allergic", + "allergey", "allergy", + "allianse", "alliances", + "alligned", "aligned", + "allinace", "alliance", + "allopone", "allophone", + "allready", "already", + "almigthy", "almighty", + "alpahbet", "alphabet", + "alrigthy", "alrighty", + "altantic", "atlantic", + "alterato", "alteration", + "alternar", "alternator", + "althetes", "athletes", + "althetic", "athletic", + "altriusm", "altruism", + "altrusim", "altruism", + "alturism", "altruism", + "aluminim", "aluminium", + "alumnium", "aluminum", + "alunimum", "aluminum", + "amatersu", "amateurs", + "amaterus", "amateurs", + "amendmet", "amendments", + "amercian", "american", + "amercias", "americas", + "amernian", "armenian", + "amethsyt", "amethyst", + "ameythst", "amethyst", + "ammended", "amended", + "amnestry", "amnesty", + "amoungst", "amongst", + "amplifiy", "amplify", + "amplifly", "amplify", + "amrchair", "armchair", + "amrenian", "armenian", + "amtheyst", "amethyst", + "analgoue", "analogue", + "analisys", "analysis", + "analitic", "analytic", + "analouge", "analogue", + "analysie", "analyse", + "analysit", "analyst", + "analyste", "analyse", + "analysze", "analyse", + "analzyed", "analyzed", + "anaolgue", "analogue", + "anarchim", "anarchism", + "anaylses", "analyses", + "anaylsis", "analysis", + "anaylsts", "analysts", + "anaylzed", "analyzed", + "ancedote", "anecdote", + "anceints", "ancients", + "ancinets", "ancients", + "andoirds", "androids", + "andorids", "androids", + "andriods", "androids", + "anecdots", "anecdotes", + "anectode", "anecdote", + "anedocte", "anecdote", + "aneroxia", "anorexia", + "aneroxic", "anorexic", + "angostic", "agnostic", + "angrilly", "angrily", + "anicents", "ancients", + "animatie", "animate", + "animatte", "animate", + "anlayses", "analyses", + "annoints", "anoints", + "annouced", "announced", + "annoucne", "announce", + "anntenas", "antennas", + "anoerxia", "anorexia", + "anoerxic", "anorexic", + "anonymos", "anonymous", + "anoreixa", "anorexia", + "anounced", "announced", + "anoxeria", "anorexia", + "anoxeric", "anorexic", + "answeres", "answers", + "antartic", "antarctic", + "antennea", "antenna", + "antennna", "antenna", + "anticipe", "anticipate", + "antiquae", "antique", + "antivirs", "antivirus", + "anwsered", "answered", + "anyhting", "anything", + "anyhwere", "anywhere", + "anyoneis", "anyones", + "anythign", "anything", + "anytying", "anything", + "aparment", "apartment", + "apartmet", "apartments", + "apenines", "apennines", + "aperutre", "aperture", + "aplhabet", "alphabet", + "apologes", "apologise", + "aposltes", "apostles", + "apostels", "apostles", + "appaluse", "applause", + "apparant", "apparent", + "appareal", "apparel", + "appareil", "apparel", + "apperead", "appeared", + "applaued", "applaud", + "appluase", "applause", + "appology", "apology", + "apporach", "approach", + "appraoch", "approach", + "apreture", "aperture", + "apsotles", "apostles", + "aqaurium", "aquarium", + "aqcuired", "acquired", + "aquaduct", "aqueduct", + "aquairum", "aquarium", + "aquaruim", "aquarium", + "aquiring", "acquiring", + "aquitted", "acquitted", + "arbitary", "arbitrary", + "arbitray", "arbitrary", + "arbiture", "arbiter", + "architet", "architect", + "archtype", "archetype", + "aremnian", "armenian", + "argentia", "argentina", + "argubaly", "arguably", + "arguemet", "arguement", + "arguemtn", "arguement", + "ariborne", "airborne", + "aricraft", "aircraft", + "ariplane", "airplane", + "ariports", "airports", + "arispace", "airspace", + "aristote", "aristotle", + "aritfact", "artifact", + "arizonia", "arizona", + "arkasnas", "arkansas", + "arlighty", "alrighty", + "armamant", "armament", + "armenain", "armenian", + "armenina", "armenian", + "armpitts", "armpits", + "armstrog", "armstrong", + "arpanoid", "paranoid", + "arpeture", "aperture", + "arragned", "arranged", + "arrestes", "arrests", + "arrestos", "arrests", + "arsenaal", "arsenal", + "artemios", "artemis", + "artemius", "artemis", + "arthrits", "arthritis", + "articule", "articulate", + "artifacs", "artifacts", + "artifcat", "artifact", + "artilley", "artillery", + "artisitc", "artistic", + "artistas", "artists", + "arugable", "arguable", + "arugably", "arguably", + "arugment", "argument", + "asborbed", "absorbed", + "asburdly", "absurdly", + "ascneded", "ascended", + "asissted", "assisted", + "askreddt", "askreddit", + "asnwered", "answered", + "aspectos", "aspects", + "asperges", "aspergers", + "assasins", "assassins", + "assemple", "assemble", + "assertin", "assertions", + "asshates", "asshats", + "asshatts", "asshats", + "assimile", "assimilate", + "assistat", "assistants", + "assitant", "assistant", + "assmeble", "assemble", + "assmebly", "assembly", + "asssasin", "assassin", + "assualts", "assaults", + "asteorid", "asteroid", + "asteriks", "asterisk", + "asteriod", "asteroid", + "asterois", "asteroids", + "astersik", "asterisk", + "asthetic", "aesthetic", + "astronat", "astronaut", + "asutrian", "austrian", + "atheisim", "atheism", + "atheistc", "atheistic", + "atheltes", "athletes", + "atheltic", "athletic", + "athenean", "athenian", + "athesits", "atheists", + "athetlic", "athletic", + "athients", "athiest", + "atittude", "attitude", + "atlantia", "atlanta", + "atmoizer", "atomizer", + "atomzier", "atomizer", + "atribute", "attribute", + "atrifact", "artifact", + "attackes", "attackers", + "attemped", "attempted", + "attemted", "attempted", + "attemtps", "attempts", + "attidute", "attitude", + "attitide", "attitude", + "attribue", "attribute", + "aucitons", "auctions", + "audactiy", "audacity", + "audcaity", "audacity", + "audeince", "audience", + "audiobok", "audiobook", + "austeriy", "austerity", + "austiran", "austrian", + "austitic", "autistic", + "austrain", "austrian", + "australa", "australian", + "austrija", "austria", + "austrila", "austria", + "autisitc", "autistic", + "autoattk", "autoattack", + "autograh", "autograph", + "automato", "automation", + "automony", "autonomy", + "autority", "authority", + "autsitic", "autistic", + "auxilary", "auxiliary", + "avacodos", "avocados", + "avaiable", "available", + "availabe", "available", + "availble", "available", + "avaition", "aviation", + "avalable", "available", + "avalance", "avalanche", + "avataras", "avatars", + "avatards", "avatars", + "avatares", "avatars", + "averadge", "averaged", + "avergaed", "averaged", + "avergaes", "averages", + "aviaiton", "aviation", + "avilable", "available", + "avnegers", "avengers", + "avodacos", "avocados", + "awekened", "weakened", + "awesomey", "awesomely", + "awfullly", "awfully", + "awkwardy", "awkwardly", + "awnsered", "answered", + "babysite", "babysitter", + "baceause", "because", + "bacehlor", "bachelor", + "bachleor", "bachelor", + "bacholer", "bachelor", + "backeast", "backseat", + "backerds", "backers", + "backfied", "backfield", + "backpacs", "backpacks", + "balcanes", "balances", + "balconey", "balcony", + "balconny", "balcony", + "ballistc", "ballistic", + "balnaced", "balanced", + "banannas", "bananas", + "banditas", "bandits", + "bandwith", "bandwidth", + "bangkock", "bangkok", + "baptisim", "baptism", + "barabric", "barbaric", + "barbarin", "barbarian", + "barbaris", "barbarians", + "bardford", "bradford", + "bargaing", "bargaining", + "baristia", "barista", + "barrakcs", "barracks", + "barrells", "barrels", + "basicaly", "basically", + "basiclay", "basicly", + "basicley", "basicly", + "basicliy", "basicly", + "batistia", "batista", + "battalin", "battalion", + "bayonent", "bayonet", + "beachead", "beachhead", + "beacuoup", "beaucoup", + "beardude", "bearded", + "beastley", "beastly", + "beatiful", "beautiful", + "beccause", "because", + "becuasse", "becuase", + "befirend", "befriend", + "befreind", "befriend", + "begginer", "beginner", + "begginig", "begging", + "begginng", "begging", + "begining", "beginning", + "beginnig", "beginning", + "behaivor", "behavior", + "behavios", "behaviours", + "behavoir", "behavior", + "behavour", "behavior", + "behngazi", "benghazi", + "behtesda", "bethesda", + "beleived", "believed", + "beleiver", "believer", + "beleives", "believes", + "beliefes", "beliefs", + "benefica", "beneficial", + "bengahzi", "benghazi", + "bengalas", "bengals", + "bengalos", "bengals", + "bengazhi", "benghazi", + "benghzai", "benghazi", + "bengzhai", "benghazi", + "benhgazi", "benghazi", + "benidect", "benedict", + "benifits", "benefits", + "berekley", "berkeley", + "berserkr", "berserker", + "beseiged", "besieged", + "betehsda", "bethesda", + "beteshda", "bethesda", + "bethdesa", "bethesda", + "bethedsa", "bethesda", + "bethseda", "bethesda", + "beyoncye", "beyonce", + "bibilcal", "biblical", + "bicylces", "bicycles", + "bigfooot", "bigfoot", + "bigining", "beginning", + "bilbical", "biblical", + "billboad", "billboard", + "bilsters", "blisters", + "bilzzard", "blizzard", + "bilzzcon", "blizzcon", + "biologia", "biological", + "birhtday", "birthday", + "birsbane", "brisbane", + "birthdsy", "birthdays", + "biseuxal", "bisexual", + "bisexaul", "bisexual", + "bitcions", "bitcoins", + "bitocins", "bitcoins", + "blackade", "blacked", + "blackend", "blacked", + "blackjak", "blackjack", + "blacklit", "blacklist", + "blatanty", "blatantly", + "blessins", "blessings", + "blessure", "blessing", + "bloggare", "blogger", + "bloggeur", "blogger", + "bluebery", "blueberry", + "bluetooh", "bluetooth", + "blugaria", "bulgaria", + "boardway", "broadway", + "bollcoks", "bollocks", + "bomberos", "bombers", + "bookmars", "bookmarks", + "boradway", "broadway", + "boredoom", "boredom", + "bouldore", "boulder", + "bounites", "bounties", + "boutnies", "bounties", + "boutqiue", "boutique", + "bouyancy", "buoyancy", + "boyfried", "boyfriend", + "bradcast", "broadcast", + "bradfrod", "bradford", + "brakeout", "breakout", + "braodway", "broadway", + "braverly", "bravery", + "breathis", "breaths", + "breathos", "breaths", + "brekaout", "breakout", + "brendamn", "brendan", + "breweres", "brewers", + "brewerey", "brewery", + "brewerks", "brewers", + "brewerys", "brewers", + "brigaged", "brigade", + "brigated", "brigade", + "brigthen", "brighten", + "briliant", "brilliant", + "brillant", "brilliant", + "bristool", "bristol", + "brithday", "birthday", + "brittish", "british", + "briusers", "bruisers", + "broadbad", "broadband", + "broadcat", "broadcasts", + "broadley", "broadly", + "brocolli", "broccoli", + "brodaway", "broadway", + "broncoes", "broncos", + "broswing", "browsing", + "browines", "brownies", + "browisng", "browsing", + "brtually", "brutally", + "brugundy", "burgundy", + "bruisend", "bruised", + "brussles", "brussels", + "brusting", "bursting", + "bubblews", "bubbles", + "buddhits", "buddhist", + "buddhsim", "buddhism", + "buddishm", "buddhism", + "buddisht", "buddhist", + "buglaria", "bulgaria", + "buhddism", "buddhism", + "buhddist", "buddhist", + "buidlers", "builders", + "buidling", "building", + "buildins", "buildings", + "buisness", "business", + "bulagria", "bulgaria", + "bulgaira", "bulgaria", + "buliders", "builders", + "buliding", "building", + "bulletts", "bullets", + "burisers", "bruisers", + "burriots", "burritos", + "burritio", "burrito", + "burritto", "burrito", + "burrtios", "burritos", + "burssels", "brussels", + "burtally", "brutally", + "burtsing", "bursting", + "busrting", "bursting", + "butcherd", "butchered", + "butterey", "buttery", + "butterfy", "butterfly", + "butterry", "buttery", + "butthoel", "butthole", + "bycicles", "bicycles", + "cabbagge", "cabbage", + "cabients", "cabinets", + "cabinate", "cabinet", + "cabinent", "cabinet", + "cabniets", "cabinets", + "caclulus", "calculus", + "cafetera", "cafeteria", + "caffinee", "caffeine", + "cahsiers", "cashiers", + "cainster", "canister", + "calander", "calendar", + "calcular", "calculator", + "calgarry", "calgary", + "calibler", "calibre", + "caloires", "calories", + "calrkson", "clarkson", + "calroies", "calories", + "calssify", "classify", + "calulate", "calculate", + "calymore", "claymore", + "camapign", "campaign", + "cambodai", "cambodia", + "camboida", "cambodia", + "cambpell", "campbell", + "cambride", "cambridge", + "cambrige", "cambridge", + "camoufle", "camouflage", + "campagin", "campaign", + "campaing", "campaign", + "campains", "campaigns", + "camperas", "campers", + "camperos", "campers", + "canadias", "canadians", + "cananbis", "cannabis", + "cancelas", "cancels", + "canceles", "cancels", + "cancells", "cancels", + "canceres", "cancers", + "cancerns", "cancers", + "cancerus", "cancers", + "candiate", "candidate", + "candiens", "candies", + "canistre", "canister", + "cannabil", "cannibal", + "cannbial", "cannibal", + "cannibas", "cannabis", + "cansiter", "canister", + "capitans", "captains", + "capitola", "capital", + "capitulo", "capitol", + "capmbell", "campbell", + "capsuels", "capsules", + "capsulse", "capsules", + "capsumel", "capsule", + "capteurs", "captures", + "captials", "capitals", + "captians", "captains", + "capusles", "capsules", + "caputres", "captures", + "cardboad", "cardboard", + "cardianl", "cardinal", + "cardnial", "cardinal", + "careflly", "carefully", + "carefull", "careful", + "carefuly", "carefully", + "caricate", "caricature", + "caridgan", "cardigan", + "caridnal", "cardinal", + "carinval", "carnival", + "carloina", "carolina", + "carnagie", "carnegie", + "carnigie", "carnegie", + "carnvial", "carnival", + "carrotts", "carrots", + "carrotus", "carrots", + "cartells", "cartels", + "cartmaan", "cartman", + "cartride", "cartridge", + "cartrige", "cartridge", + "carvinal", "carnival", + "casaulty", "casualty", + "casheirs", "cashiers", + "cashieer", "cashier", + "cashires", "cashiers", + "castleos", "castles", + "castlers", "castles", + "casulaty", "casualty", + "cataclym", "cataclysm", + "catagory", "category", + "cataline", "catiline", + "cataloge", "catalogue", + "catalsyt", "catalyst", + "cataylst", "catalyst", + "cathloic", "catholic", + "catlayst", "catalyst", + "caucasin", "caucasian", + "causalty", "casualty", + "cellural", "cellular", + "celullar", "cellular", + "celverly", "cleverly", + "cemetary", "cemetery", + "centeres", "centers", + "centerns", "centers", + "centrase", "centres", + "centrers", "centres", + "ceratine", "creatine", + "cerberal", "cerebral", + "cerbreus", "cerberus", + "cerbures", "cerberus", + "ceremone", "ceremonies", + "cerimony", "ceremony", + "ceromony", "ceremony", + "certainy", "certainty", + "challege", "challenge", + "chambear", "chamber", + "chambres", "chambers", + "champage", "champagne", + "chanisaw", "chainsaw", + "chanlder", "chandler", + "charcaol", "charcoal", + "chargehr", "charger", + "chargeur", "charger", + "chariman", "chairman", + "charimsa", "charisma", + "charmisa", "charisma", + "charocal", "charcoal", + "charsima", "charisma", + "chasiers", "cashiers", + "chassids", "chassis", + "chassies", "chassis", + "chatolic", "catholic", + "chcukles", "chuckles", + "checkare", "checker", + "checkear", "checker", + "cheesees", "cheeses", + "cheeseus", "cheeses", + "cheetoos", "cheetos", + "chemcial", "chemical", + "chemisty", "chemistry", + "chernobl", "chernobyl", + "chiansaw", "chainsaw", + "chidlish", "childish", + "chihuaha", "chihuahua", + "childres", "childrens", + "chillade", "chilled", + "chillead", "chilled", + "chillend", "chilled", + "chilvary", "chivalry", + "chinesse", "chinese", + "chivarly", "chivalry", + "chivlary", "chivalry", + "chlidish", "childish", + "chlroine", "chlorine", + "chmabers", "chambers", + "chocolae", "chocolates", + "chocolet", "chocolates", + "choesive", "cohesive", + "choicers", "choices", + "cholrine", "chlorine", + "chorline", "chlorine", + "chracter", "character", + "christin", "christian", + "chroline", "chlorine", + "chromose", "chromosome", + "chronice", "chronicles", + "chruches", "churches", + "chuckels", "chuckles", + "cielings", "ceilings", + "cigarete", "cigarettes", + "cigarets", "cigarettes", + "cilmbers", "climbers", + "cilnatro", "cilantro", + "ciltoris", "clitoris", + "circiuts", "circuits", + "circkets", "crickets", + "circlebs", "circles", + "circluar", "circular", + "ciricuit", "circuit", + "cirlcing", "circling", + "ciruclar", "circular", + "clannand", "clannad", + "clarifiy", "clarify", + "clarskon", "clarkson", + "clasical", "classical", + "classrom", "classroom", + "classsic", "classics", + "clausens", "clauses", + "cleanies", "cleanse", + "cleasner", "cleanser", + "clenaser", "cleanser", + "clevelry", "cleverly", + "clhorine", "chlorine", + "cliamtes", "climates", + "cliantro", "cilantro", + "clickare", "clicker", + "clickbat", "clickbait", + "clickear", "clicker", + "clientes", "clients", + "clincial", "clinical", + "clinicas", "clinics", + "clinicos", "clinics", + "clipboad", "clipboard", + "clitiros", "clitoris", + "closeing", "closing", + "closeley", "closely", + "clyamore", "claymore", + "clyinder", "cylinder", + "cmoputer", "computer", + "coindice", "coincide", + "collapes", "collapse", + "collares", "collars", + "collaris", "collars", + "collaros", "collars", + "collaspe", "collapse", + "colleage", "colleagues", + "collecte", "collective", + "collegue", "colleague", + "collisin", "collisions", + "collosal", "colossal", + "collpase", "collapse", + "coloardo", "colorado", + "colordao", "colorado", + "colubmia", "columbia", + "columnas", "columns", + "comadres", "comrades", + "comander", "commander", + "comandos", "commandos", + "comapany", "company", + "comapres", "compares", + "combiens", "combines", + "combinig", "combining", + "comediac", "comedic", + "comedias", "comedians", + "comestic", "cosmetic", + "comision", "commission", + "comiting", "committing", + "comitted", "committed", + "comittee", "committee", + "commandd", "commanded", + "commecen", "commence", + "commedic", "comedic", + "commense", "commenters", + "commenty", "commentary", + "commiest", "commits", + "commited", "committed", + "commitee", "committee", + "commites", "commits", + "committe", "committee", + "committs", "commits", + "commitus", "commits", + "commmand", "command", + "communit", "communist", + "companis", "companions", + "comparse", "compares", + "comparte", "compare", + "compasso", "compassion", + "compelte", "complete", + "compense", "compensate", + "complais", "complains", + "complane", "complacent", + "complate", "complacent", + "compleet", "complete", + "completi", "complexity", + "complets", "completes", + "complety", "completely", + "complexs", "complexes", + "complext", "complexity", + "complexy", "complexity", + "complict", "complicit", + "complier", "compiler", + "compones", "compose", + "componet", "components", + "componts", "compost", + "composet", "compost", + "composit", "compost", + "composte", "compose", + "comprese", "compressed", + "compreso", "compressor", + "compsers", "compress", + "comptown", "compton", + "compunet", "compute", + "computre", "compute", + "comradre", "comrade", + "comsetic", "cosmetic", + "conatins", "contains", + "conceald", "concealed", + "conceide", "conceived", + "conceled", "concede", + "concened", "concede", + "concepta", "conceptual", + "concered", "concede", + "concernt", "concert", + "concerte", "concrete", + "concesso", "concession", + "conceted", "concede", + "conceved", "concede", + "concibes", "concise", + "concider", "consider", + "concides", "concise", + "concious", "conscious", + "conclued", "conclude", + "concluse", "conclusive", + "concluso", "conclusion", + "concreet", "concrete", + "concrets", "concerts", + "condemnd", "condemned", + "conditon", "condition", + "condomes", "condoms", + "condomns", "condoms", + "conduict", "conduit", + "conected", "connected", + "conencts", "connects", + "confeses", "confess", + "confesos", "confess", + "confesso", "confession", + "configue", "configure", + "confilct", "conflict", + "confirmd", "confirmed", + "conflcit", "conflict", + "conflics", "conflicts", + "confrims", "confirms", + "conicide", "coincide", + "conlcude", "conclude", + "conqueor", "conquer", + "conquerd", "conquered", + "conqured", "conquered", + "conscent", "consent", + "consious", "conscious", + "constans", "constants", + "constast", "constants", + "constatn", "constant", + "constrat", "constraint", + "construt", "constructs", + "containd", "contained", + "containg", "containing", + "contaire", "containers", + "contanti", "contacting", + "contense", "contenders", + "contenst", "contents", + "contexta", "contextual", + "contextl", "contextual", + "contians", "contains", + "contined", "continued", + "contines", "continents", + "continum", "continuum", + "continus", "continues", + "continut", "continuity", + "continuu", "continuous", + "contracr", "contractor", + "contracs", "contracts", + "controll", "control", + "contruct", "construct", + "convenit", "convenient", + "convento", "convention", + "converst", "converts", + "convertr", "converter", + "conviced", "convinced", + "convicto", "conviction", + "convingi", "convincing", + "convinse", "convinces", + "cooldows", "cooldowns", + "coordine", "coordinate", + "coralina", "carolina", + "corollla", "corolla", + "corolloa", "corolla", + "corosion", "corrosion", + "corpsers", "corpses", + "corrdior", "corridor", + "correcty", "correctly", + "correnti", "correcting", + "corretly", "correctly", + "corrupto", "corruption", + "cosemtic", "cosmetic", + "cosutmes", "costumes", + "couldnot", "couldnt", + "coulored", "coloured", + "counries", "countries", + "counseil", "counsel", + "counsole", "counsel", + "counterd", "countered", + "countert", "counteract", + "countres", "counters", + "courtrom", "courtroom", + "courtsey", "courtesy", + "cousines", "cousins", + "cousings", "cousins", + "coutners", "counters", + "covanent", "covenant", + "coverted", "converted", + "coyotees", "coyotes", + "cpatains", "captains", + "cranbery", "cranberry", + "crayones", "crayons", + "creaeted", "created", + "createin", "creatine", + "createur", "creature", + "creatien", "creatine", + "creepgin", "creeping", + "cricling", "circling", + "cringely", "cringey", + "cringery", "cringey", + "criticas", "critics", + "critices", "critics", + "criticie", "criticise", + "criticim", "criticisms", + "criticis", "critics", + "criticms", "critics", + "criticos", "critics", + "criticts", "critics", + "criticus", "critics", + "critiera", "criteria", + "critized", "criticized", + "croatioa", "croatia", + "crossfie", "crossfire", + "crosshar", "crosshair", + "crosspot", "crosspost", + "crowbahr", "crowbar", + "cruasder", "crusader", + "cruciaal", "crucial", + "crucibel", "crucible", + "cruicble", "crucible", + "crusdaer", "crusader", + "crusiers", "cruisers", + "crusiing", "cruising", + "cruthces", "crutches", + "cthulhlu", "cthulhu", + "cthulluh", "cthulhu", + "cubpoard", "cupboard", + "cuddleys", "cuddles", + "culprint", "culprit", + "cultrual", "cultural", + "culutral", "cultural", + "cupbaord", "cupboard", + "cupborad", "cupboard", + "curcible", "crucible", + "curisers", "cruisers", + "curising", "cruising", + "currecny", "currency", + "currence", "currencies", + "currenly", "currently", + "currenty", "currently", + "cursader", "crusader", + "custcene", "cutscene", + "cutsceen", "cutscene", + "cutscens", "cutscenes", + "cutsence", "cutscene", + "cylcists", "cyclists", + "cylidner", "cylinder", + "cylindre", "cylinder", + "cynisicm", "cynicism", + "cyrstals", "crystals", + "dacquiri", "daiquiri", + "daimonds", "diamonds", + "dangeros", "dangers", + "dangerus", "dangers", + "darkenss", "darkness", + "darnkess", "darkness", + "dashboad", "dashboard", + "daugther", "daughter", + "deadlfit", "deadlift", + "deadlifs", "deadlifts", + "deafauts", "defaults", + "deafeted", "defeated", + "deafults", "defaults", + "dealying", "delaying", + "deamenor", "demeanor", + "deathcat", "deathmatch", + "debuffes", "debuffs", + "debufffs", "debuffs", + "decalred", "declared", + "decalres", "declares", + "decembre", "december", + "decidely", "decidedly", + "decieved", "deceived", + "decifits", "deficits", + "decipted", "depicted", + "declears", "declares", + "declinig", "declining", + "decmeber", "december", + "decribed", "described", + "decribes", "describes", + "dedicato", "dedication", + "deductie", "deductible", + "defautls", "defaults", + "defectos", "defects", + "defectus", "defects", + "defendas", "defends", + "defendes", "defenders", + "defendis", "defends", + "defendre", "defender", + "defendrs", "defends", + "defensea", "defenseman", + "defensen", "defenseman", + "defensie", "defensive", + "defetead", "defeated", + "deffined", "defined", + "deficiet", "deficient", + "definate", "definite", + "definaty", "definately", + "definety", "definetly", + "definito", "definition", + "definitv", "definitive", + "deflatin", "deflation", + "deflecto", "deflection", + "defualts", "defaults", + "degarded", "degraded", + "degenere", "degenerate", + "degraged", "degrade", + "degrated", "degrade", + "deisgned", "designed", + "deisgner", "designer", + "dekstops", "desktops", + "delcared", "declared", + "delcares", "declares", + "delepted", "depleted", + "delivere", "deliveries", + "delpeted", "depleted", + "delpoyed", "deployed", + "delyaing", "delaying", + "demandas", "demands", + "demandes", "demands", + "demenaor", "demeanor", + "democray", "democracy", + "demolito", "demolition", + "denseley", "densely", + "densitiy", "density", + "deomcrat", "democrat", + "deovtion", "devotion", + "departer", "departure", + "departue", "departure", + "depcited", "depicted", + "depelted", "depleted", + "dependat", "dependant", + "depictes", "depicts", + "depictin", "depictions", + "depolyed", "deployed", + "depositd", "deposited", + "depostis", "deposits", + "depresse", "depressive", + "depresso", "depression", + "derivate", "derivative", + "descened", "descend", + "descibed", "described", + "descirbe", "describe", + "descrise", "describes", + "desgined", "designed", + "desginer", "designer", + "desicive", "decisive", + "designad", "designated", + "designes", "designs", + "designet", "designated", + "desinged", "designed", + "desinger", "designer", + "desitned", "destined", + "desktiop", "desktop", + "desorder", "disorder", + "despides", "despised", + "despiste", "despise", + "destiney", "destiny", + "destinty", "destiny", + "destkops", "desktops", + "destorys", "destroys", + "destrose", "destroyers", + "destroyd", "destroyed", + "destroyr", "destroyers", + "detalied", "detailed", + "detectas", "detects", + "detectes", "detects", + "detectie", "detectives", + "determen", "determines", + "devasted", "devastated", + "develope", "develop", + "devialet", "deviate", + "deviatie", "deviate", + "devilers", "delivers", + "devloved", "devolved", + "devovled", "devolved", + "diaganol", "diagonal", + "diagnoal", "diagonal", + "diagnoes", "diagnose", + "diagnosi", "diagnostic", + "diagonse", "diagnose", + "diahrrea", "diarrhea", + "dialetcs", "dialects", + "dialgoue", "dialogue", + "dialouge", "dialogue", + "diarreah", "diarrhea", + "diarreha", "diarrhea", + "dichtomy", "dichotomy", + "dickisch", "dickish", + "dicovers", "discovers", + "dicovery", "discovery", + "dicussed", "discussed", + "diferent", "different", + "differnt", "different", + "difficut", "difficulty", + "diffrent", "different", + "diganose", "diagnose", + "dignitiy", "dignity", + "dimaonds", "diamonds", + "dinasour", "dinosaur", + "dinosaus", "dinosaurs", + "dinosuar", "dinosaur", + "dinsoaur", "dinosaur", + "dionsaur", "dinosaur", + "diphtong", "diphthong", + "diplomma", "diploma", + "dipthong", "diphthong", + "direclty", "directly", + "directin", "directions", + "directix", "directx", + "directos", "directors", + "directoy", "directory", + "directrx", "directx", + "dirfting", "drifting", + "disabeld", "disabled", + "disabels", "disables", + "disagred", "disagreed", + "disagres", "disagrees", + "disbaled", "disabled", + "disbales", "disables", + "disbelif", "disbelief", + "dischard", "discharged", + "dischare", "discharged", + "discound", "discounted", + "discoure", "discourse", + "discoved", "discovered", + "discreto", "discretion", + "discribe", "describe", + "disentry", "dysentery", + "disgiuse", "disguise", + "dishoner", "dishonored", + "dishonet", "dishonesty", + "dislikse", "dislikes", + "dismante", "dismantle", + "dismisse", "dismissive", + "disolved", "dissolved", + "dispacth", "dispatch", + "dispalys", "displays", + "dispence", "dispense", + "dispersa", "dispensary", + "displayd", "displayed", + "disposle", "dispose", + "disposte", "dispose", + "dispoves", "dispose", + "disptach", "dispatch", + "disricts", "districts", + "dissovle", "dissolve", + "distates", "distaste", + "distatse", "distaste", + "disticnt", "distinct", + "distorto", "distortion", + "distrcit", "district", + "districs", "districts", + "disturbd", "disturbed", + "disupted", "disputed", + "disuptes", "disputes", + "diversed", "diverse", + "diversiy", "diversify", + "dividens", "dividends", + "divintiy", "divinity", + "divisons", "divisions", + "doapmine", "dopamine", + "docrines", "doctrines", + "docrtine", "doctrine", + "doctines", "doctrines", + "doctirne", "doctrine", + "doctrins", "doctrines", + "dogamtic", "dogmatic", + "dolhpins", "dolphins", + "domapine", "dopamine", + "domecrat", "democrat", + "domiante", "dominate", + "dominato", "domination", + "dominats", "dominates", + "dominent", "dominant", + "dominoin", "dominion", + "donwload", "download", + "donwvote", "downvote", + "doomdsay", "doomsday", + "doosmday", "doomsday", + "doplhins", "dolphins", + "dopmaine", "dopamine", + "dormtund", "dortmund", + "dortumnd", "dortmund", + "dotrmund", "dortmund", + "douchely", "douchey", + "doucheus", "douches", + "dowloads", "downloads", + "downlaod", "download", + "downloas", "downloads", + "downstar", "downstairs", + "downvore", "downvoters", + "downvotr", "downvoters", + "downvots", "downvotes", + "draculea", "dracula", + "draculla", "dracula", + "dragones", "dragons", + "dragonus", "dragons", + "drfiting", "drifting", + "driectly", "directly", + "drifitng", "drifting", + "driveris", "drivers", + "drotmund", "dortmund", + "duaghter", "daughter", + "dumbbels", "dumbbells", + "dumptser", "dumpster", + "dumspter", "dumpster", + "dunegons", "dungeons", + "dungeoun", "dungeon", + "dungoens", "dungeons", + "dupicate", "duplicate", + "duplicas", "duplicates", + "dwarvens", "dwarves", + "dyanmics", "dynamics", + "dyanmite", "dynamite", + "dymanics", "dynamics", + "dymanite", "dynamite", + "dynastry", "dynasty", + "dysentry", "dysentery", + "dysphora", "dysphoria", + "earilest", "earliest", + "eatswood", "eastwood", + "eceonomy", "economy", + "ecidious", "deciduous", + "ecologia", "ecological", + "ecomonic", "economic", + "ecstacys", "ecstasy", + "ecstascy", "ecstasy", + "ecstasty", "ecstasy", + "ectastic", "ecstatic", + "editoras", "editors", + "editores", "editors", + "efficent", "efficient", + "egpytian", "egyptian", + "egyptain", "egyptian", + "egytpian", "egyptian", + "ehtereal", "ethereal", + "ehternet", "ethernet", + "eigtheen", "eighteen", + "electhor", "electro", + "electorn", "electron", + "elementy", "elementary", + "elephans", "elephants", + "elevatin", "elevation", + "elicided", "elicited", + "eligable", "eligible", + "elimiate", "eliminate", + "eliminas", "eliminates", + "elitisim", "elitism", + "elitistm", "elitism", + "ellected", "elected", + "embarass", "embarrass", + "embargos", "embargoes", + "embarras", "embarrass", + "embassay", "embassy", + "embassey", "embassy", + "embasssy", "embassy", + "emergend", "emerged", + "emergerd", "emerged", + "eminated", "emanated", + "emminent", "eminent", + "emmisary", "emissary", + "emmision", "emission", + "emmiting", "emitting", + "emmitted", "emitted", + "empathie", "empathize", + "empirial", "empirical", + "emulatin", "emulation", + "enahnces", "enhances", + "enchanct", "enchant", + "encolsed", "enclosed", + "endanged", "endangered", + "endevors", "endeavors", + "endevour", "endeavour", + "endlessy", "endlessly", + "endorces", "endorse", + "engeneer", "engineer", + "engeries", "energies", + "engineed", "engineered", + "engrames", "engrams", + "engramms", "engrams", + "enigneer", "engineer", + "enitrely", "entirely", + "enlcosed", "enclosed", + "enlsaved", "enslaved", + "ensalved", "enslaved", + "enterity", "entirety", + "entierly", "entirely", + "entierty", "entirety", + "entilted", "entitled", + "entirley", "entirely", + "entiteld", "entitled", + "entitity", "entity", + "entropay", "entropy", + "entrophy", "entropy", + "ephipany", "epiphany", + "epihpany", "epiphany", + "epilespy", "epilepsy", + "epilgoue", "epilogue", + "episdoes", "episodes", + "epitomie", "epitome", + "epliepsy", "epilepsy", + "epliogue", "epilogue", + "epsiodes", "episodes", + "epsresso", "espresso", + "eqaulity", "equality", + "eqaution", "equation", + "equailty", "equality", + "eraticly", "erratically", + "erroneos", "erroneous", + "errupted", "erupted", + "escalato", "escalation", + "esctatic", "ecstatic", + "esential", "essential", + "esitmate", "estimate", + "esperate", "seperate", + "esportes", "esports", + "estiamte", "estimate", + "estoeric", "esoteric", + "estonija", "estonia", + "estoniya", "estonia", + "etherael", "ethereal", + "etherent", "ethernet", + "ethicaly", "ethically", + "etiquete", "etiquette", + "etrailer", "retailer", + "eugencis", "eugenics", + "eugneics", "eugenics", + "euhporia", "euphoria", + "euhporic", "euphoric", + "euorpean", "european", + "euphoira", "euphoria", + "euphroia", "euphoria", + "euphroic", "euphoric", + "europian", "european", + "eurpoean", "european", + "evangers", "avengers", + "everyons", "everyones", + "evidencd", "evidenced", + "evidende", "evidenced", + "evloving", "evolving", + "evolveds", "evolves", + "evolveos", "evolves", + "evovling", "evolving", + "excecute", "execute", + "excedded", "exceeded", + "excelent", "excellent", + "exceptin", "exceptions", + "excerise", "exercise", + "excisted", "existed", + "exclusie", "exclusives", + "exculded", "excluded", + "exculdes", "excludes", + "exection", "execution", + "exectued", "executed", + "executie", "executive", + "executin", "execution", + "exellent", "excellent", + "exerbate", "exacerbate", + "exercide", "exercised", + "exercies", "exercise", + "exersice", "exercise", + "exersize", "exercise", + "exhalted", "exalted", + "exhaustn", "exhaustion", + "exhausto", "exhaustion", + "exicting", "exciting", + "exisitng", "existing", + "existane", "existance", + "existant", "existent", + "existend", "existed", + "exlcuded", "excluded", + "exlcudes", "excludes", + "exlporer", "explorer", + "exoticas", "exotics", + "exoticos", "exotics", + "expalins", "explains", + "expandas", "expands", + "expandes", "expands", + "expansie", "expansive", + "expectes", "expects", + "expectus", "expects", + "expedito", "expedition", + "expences", "expense", + "expensie", "expense", + "expensve", "expense", + "expertas", "experts", + "expertis", "experts", + "expertos", "experts", + "expireds", "expires", + "explaind", "explained", + "explaing", "explaining", + "expliots", "exploits", + "explodie", "explode", + "exploint", "exploit", + "explosie", "explosive", + "explosin", "explosions", + "exploted", "explode", + "expoldes", "explodes", + "expolits", "exploits", + "exportas", "exports", + "exportes", "exports", + "exportfs", "exports", + "exposees", "exposes", + "exposito", "exposition", + "expresse", "expressive", + "expresss", "expresses", + "expressy", "expressly", + "exressed", "expressed", + "exsitent", "existent", + "exsiting", "existing", + "extactly", "exactly", + "extemely", "extremely", + "extendes", "extends", + "extendos", "extends", + "extenion", "extension", + "extensie", "extensive", + "extensis", "extensions", + "extortin", "extortion", + "extracto", "extraction", + "extreems", "extremes", + "extremly", "extremely", + "eygptian", "egyptian", + "faboulus", "fabulous", + "fabricas", "fabrics", + "fabrices", "fabrics", + "fabricus", "fabrics", + "faceplam", "facepalm", + "facilisi", "facilities", + "faciltiy", "facility", + "facsists", "fascists", + "factores", "factors", + "factorys", "factors", + "factualy", "factually", + "faggotts", "faggots", + "faggotus", "faggots", + "falcones", "falcons", + "falgship", "flagship", + "faliures", "failures", + "falseley", "falsely", + "falshing", "flashing", + "falvored", "flavored", + "falvours", "flavours", + "familair", "familiar", + "famoulsy", "famously", + "fanatism", "fanaticism", + "fanatsic", "fanatics", + "fanserve", "fanservice", + "fantasty", "fantasy", + "farcking", "fracking", + "fascisim", "fascism", + "fashiond", "fashioned", + "fasicsts", "fascists", + "fatigure", "fatigue", + "favorits", "favorites", + "favourie", "favourites", + "feasable", "feasible", + "feasbile", "feasible", + "febraury", "february", + "februray", "february", + "feburary", "february", + "fedility", "fidelity", + "fedorahs", "fedoras", + "fedorans", "fedoras", + "feilding", "fielding", + "feisable", "feasible", + "feitshes", "fetishes", + "feltcher", "fletcher", + "felxible", "flexible", + "feminint", "femininity", + "feminsim", "feminism", + "feromone", "pheromone", + "fesiable", "feasible", + "festivas", "festivals", + "festivle", "festive", + "fictious", "fictitious", + "fideling", "fielding", + "fideltiy", "fidelity", + "fiedling", "fielding", + "fiedlity", "fidelity", + "fighitng", "fighting", + "figthing", "fighting", + "fileding", "fielding", + "fimilies", "families", + "finacial", "financial", + "fineshes", "finesse", + "fingersi", "fingertips", + "finnisch", "finnish", + "finsihes", "finishes", + "firebals", "fireballs", + "firendly", "friendly", + "firmwear", "firmware", + "firwmare", "firmware", + "flaghsip", "flagship", + "flamable", "flammable", + "flasghip", "flagship", + "flatterd", "flattered", + "flatteur", "flatter", + "flattire", "flatter", + "flavores", "flavors", + "flechter", "fletcher", + "flecther", "fletcher", + "flemmish", "flemish", + "flethcer", "fletcher", + "flexbile", "flexible", + "flexibel", "flexible", + "flippade", "flipped", + "flitered", "filtered", + "florecen", "florence", + "floridia", "florida", + "floruide", "fluoride", + "floruish", "flourish", + "flourine", "fluorine", + "floursih", "flourish", + "fluorish", "flourish", + "fluroide", "fluoride", + "folowing", "following", + "fontrier", "fontier", + "forasken", "forsaken", + "forbiden", "forbidden", + "foreamrs", "forearms", + "foreksin", "foreskin", + "forenics", "forensic", + "forenisc", "forensic", + "foresnic", "forensic", + "foreward", "foreword", + "foricbly", "forcibly", + "forigven", "forgiven", + "formatin", "formation", + "formelly", "formerly", + "formuals", "formulas", + "fornesic", "forensic", + "forresst", "forrest", + "forsekan", "forsaken", + "forsekin", "foreskin", + "forsenic", "forensic", + "forskaen", "forsaken", + "forsting", "frosting", + "fortitue", "fortitude", + "fortunae", "fortune", + "fortunte", "fortune", + "forumlas", "formulas", + "forunner", "forerunner", + "fossiles", "fossils", + "fossilis", "fossils", + "foundary", "foundry", + "fountian", "fountain", + "fourties", "forties", + "fowrards", "forwards", + "frackign", "fracking", + "framgent", "fragment", + "franches", "franchise", + "franchie", "franchises", + "franciso", "francisco", + "frankiln", "franklin", + "franlkin", "franklin", + "freckels", "freckles", + "freindly", "friendly", + "frequeny", "frequency", + "friendle", "friendlies", + "friendsi", "friendlies", + "frimware", "firmware", + "frogiven", "forgiven", + "frointer", "frontier", + "fromerly", "formerly", + "froniter", "frontier", + "fronteir", "frontier", + "frosaken", "forsaken", + "frutcose", "fructose", + "fucntion", "function", + "fufilled", "fulfilled", + "fulfiled", "fulfilled", + "fullfill", "fulfill", + "funciton", "function", + "fundirse", "fundies", + "funniliy", "funnily", + "funnilly", "funnily", + "furctose", "fructose", + "furition", "fruition", + "furuther", "further", + "futurers", "futures", + "futureus", "futures", + "gamemdoe", "gamemode", + "gamepaly", "gameplay", + "gamergat", "gamertag", + "gammeode", "gamemode", + "ganerate", "generate", + "garantee", "guarantee", + "gardient", "gradient", + "garfeild", "garfield", + "garfiled", "garfield", + "garflied", "garfield", + "garnison", "garrison", + "garrions", "garrison", + "garriosn", "garrison", + "garrsion", "garrison", + "gatherig", "gatherings", + "gauarana", "guaraná", + "gauntelt", "gauntlet", + "gauntles", "gauntlets", + "gaurdian", "guardian", + "gaurding", "guarding", + "gautnlet", "gauntlet", + "gemoetry", "geometry", + "generaly", "generally", + "generase", "generates", + "generats", "generates", + "genialia", "genitalia", + "genisues", "geniuses", + "genitala", "genitalia", + "genrates", "generates", + "gentials", "genitals", + "gentlemn", "gentlemen", + "genuises", "geniuses", + "geograpy", "geography", + "geomerty", "geometry", + "geomtery", "geometry", + "germanos", "germans", + "germanus", "germans", + "gernades", "grenades", + "giagbyte", "gigabyte", + "gigabtye", "gigabyte", + "gigaybte", "gigabyte", + "gigbayte", "gigabyte", + "gignatic", "gigantic", + "giltched", "glitched", + "giltches", "glitches", + "girafffe", "giraffe", + "girefing", "griefing", + "girlling", "grilling", + "gladiatr", "gladiator", + "glichted", "glitched", + "glichtes", "glitches", + "glicthed", "glitched", + "glicthes", "glitches", + "glitchey", "glitchy", + "glitchly", "glitchy", + "glitchty", "glitchy", + "glithced", "glitched", + "glithces", "glitches", + "gloablly", "globally", + "glodberg", "goldberg", + "glodfish", "goldfish", + "gloriuos", "glorious", + "gltiched", "glitched", + "gltiches", "glitches", + "gmaertag", "gamertag", + "goblings", "goblins", + "goddammn", "goddamn", + "goddammt", "goddammit", + "godesses", "goddesses", + "godlberg", "goldberg", + "godlfish", "goldfish", + "godounov", "godunov", + "godpseed", "godspeed", + "godspede", "godspeed", + "goldifsh", "goldfish", + "gonewidl", "gonewild", + "goodlcuk", "goodluck", + "goregous", "gorgeous", + "gorgoeus", "gorgeous", + "gorillia", "gorilla", + "gorillla", "gorilla", + "gospells", "gospels", + "gottleib", "gottlieb", + "gourmelt", "gourmet", + "gourment", "gourmet", + "gouvener", "governor", + "govement", "government", + "goverend", "governed", + "govermet", "goverment", + "governer", "governor", + "gradualy", "gradually", + "grafield", "garfield", + "grafitti", "graffiti", + "grahpics", "graphics", + "grahpite", "graphite", + "graident", "gradient", + "granolla", "granola", + "graphcis", "graphics", + "grapichs", "graphics", + "grappnel", "grapple", + "greandes", "grenades", + "greatful", "grateful", + "greeneer", "greener", + "greenhoe", "greenhouse", + "greenlad", "greenland", + "greenore", "greener", + "greusome", "gruesome", + "grieifng", "griefing", + "grifeing", "griefing", + "grizzlay", "grizzly", + "grizzley", "grizzly", + "grpahics", "graphics", + "grpahite", "graphite", + "gruseome", "gruesome", + "guantano", "guantanamo", + "guardain", "guardian", + "guardias", "guardians", + "guaridan", "guardian", + "guerrila", "guerrilla", + "guidence", "guidance", + "guiseppe", "giuseppe", + "guitards", "guitars", + "guitares", "guitars", + "guitarit", "guitarist", + "gullbile", "gullible", + "gunanine", "guanine", + "guniness", "guinness", + "gunniess", "guinness", + "guradian", "guardian", + "gurading", "guarding", + "gurantee", "guarantee", + "guresome", "gruesome", + "guttaral", "guttural", + "gutteral", "guttural", + "hacthing", "hatching", + "hafltime", "halftime", + "haircuit", "haircut", + "halfitme", "halftime", + "hallowen", "halloween", + "hamburgr", "hamburgers", + "hamitlon", "hamilton", + "hamliton", "hamilton", + "handcufs", "handcuffs", + "handeldy", "handedly", + "handlade", "handled", + "handlare", "handler", + "handledy", "handedly", + "hannbial", "hannibal", + "haording", "hoarding", + "hapening", "happening", + "happends", "happens", + "happenes", "happens", + "happilly", "happily", + "harldine", "hardline", + "harrased", "harassed", + "harrases", "harasses", + "hatchign", "hatching", + "hatesink", "heatsink", + "hathcing", "hatching", + "headachs", "headaches", + "headests", "headsets", + "headhsot", "headshot", + "headseat", "headset", + "healthit", "healthiest", + "heastink", "heatsink", + "heathern", "heathen", + "heatskin", "heatsink", + "heaviliy", "heavily", + "heavilly", "heavily", + "heavnely", "heavenly", + "hedeghog", "hedgehog", + "hegdehog", "hedgehog", + "heighest", "heights", + "heighted", "heightened", + "heirachy", "hierarchy", + "heistant", "hesitant", + "heistate", "hesitate", + "hellifre", "hellfire", + "helluvva", "helluva", + "helpfull", "helpful", + "heratige", "heritage", + "herclues", "hercules", + "heridity", "heredity", + "heroicas", "heroics", + "heroices", "heroics", + "heroicos", "heroics", + "heroicus", "heroics", + "hertiage", "heritage", + "herucles", "hercules", + "hestiant", "hesitant", + "hestiate", "hesitate", + "heveanly", "heavenly", + "hierachy", "hierarchy", + "hierarcy", "hierarchy", + "highlane", "highlander", + "hindiusm", "hinduism", + "hindusim", "hinduism", + "hinudism", "hinduism", + "hiptsers", "hipsters", + "hispanis", "hispanics", + "hispters", "hipsters", + "histroic", "historic", + "hodlings", "holdings", + "hoenstly", "honestly", + "hoildays", "holidays", + "holdiays", "holidays", + "hollywod", "hollywood", + "homeword", "homeworld", + "homineim", "hominem", + "homineum", "hominem", + "honeslty", "honestly", + "honeymon", "honeymoon", + "honsetly", "honestly", + "hopefuly", "hopefully", + "hopkings", "hopkins", + "hopsital", "hospital", + "horading", "hoarding", + "horzions", "horizons", + "hosptial", "hospital", + "hosteles", "hostels", + "hostiliy", "hostility", + "hotshoot", "hotshot", + "hotsport", "hotspot", + "hsyteria", "hysteria", + "htaching", "hatching", + "htiboxes", "hitboxes", + "huanting", "haunting", + "humaniod", "humanoid", + "humanite", "humanities", + "humantiy", "humanity", + "humerous", "humorous", + "huminoid", "humanoid", + "humitidy", "humidity", + "humoural", "humoral", + "humouros", "humorous", + "humurous", "humorous", + "hunderds", "hundreds", + "hundread", "hundred", + "hungarin", "hungarian", + "huntmsan", "huntsman", + "hutnsman", "huntsman", + "hybrides", "hybrids", + "hybridus", "hybrids", + "hydorgen", "hydrogen", + "hydratin", "hydration", + "hydregon", "hydrogen", + "hygience", "hygiene", + "hygienne", "hygiene", + "hyperbel", "hyperbole", + "hypocrit", "hypocrite", + "hyponsis", "hypnosis", + "hyrdogen", "hydrogen", + "icefrong", "icefrog", + "icelings", "ceilings", + "idaeidae", "idea", + "idealogy", "ideology", + "idealsim", "idealism", + "idenfity", "identify", + "idenitfy", "identify", + "identite", "identities", + "ideologe", "ideologies", + "illiegal", "illegal", + "illinios", "illinois", + "illionis", "illinois", + "illnesss", "illnesses", + "illumini", "illuminati", + "illustre", "illustrate", + "illution", "illusion", + "ilogical", "illogical", + "ilterate", "literate", + "imapired", "impaired", + "imgrants", "migrants", + "imigrant", "emigrant", + "immboile", "immobile", + "immenint", "imminent", + "immersie", "immerse", + "immersve", "immerse", + "immitate", "imitate", + "immoblie", "immobile", + "immortas", "immortals", + "impactes", "impacts", + "impactos", "impacts", + "imparied", "impaired", + "imperavi", "imperative", + "imperfet", "imperfect", + "implemet", "implements", + "implosed", "implode", + "impluses", "impulses", + "imporper", "improper", + "importas", "imports", + "importen", "importance", + "importes", "imports", + "imporved", "improved", + "imporves", "improves", + "impropre", "improper", + "improted", "imported", + "improvie", "improvised", + "impusles", "impulses", + "imrpoved", "improved", + "imrpoves", "improves", + "inbetwen", "inbetween", + "inclince", "incline", + "inclinde", "incline", + "includng", "including", + "incorect", "incorrect", + "incuding", "including", + "inculded", "included", + "indianas", "indians", + "indiands", "indians", + "indiania", "indiana", + "indianna", "indiana", + "indianos", "indians", + "indicato", "indication", + "indicats", "indicators", + "indonesa", "indonesia", + "indulgue", "indulge", + "infantis", "infants", + "infantus", "infants", + "infarred", "infrared", + "infectin", "infections", + "infermon", "inferno", + "infiltre", "infiltrate", + "infintie", "infinite", + "infintiy", "infinity", + "inflatie", "inflate", + "influens", "influences", + "informas", "informs", + "informis", "informs", + "infromal", "informal", + "infromed", "informed", + "ingenius", "ingenious", + "ingition", "ignition", + "ingorant", "ignorant", + "inheriet", "inherit", + "inherint", "inherit", + "inhumaan", "inhuman", + "inhumain", "inhuman", + "inifnite", "infinite", + "inifnity", "infinity", + "inisghts", "insights", + "initails", "initials", + "initaite", "initiate", + "initaled", "initialed", + "initally", "initially", + "initialy", "initially", + "initmacy", "intimacy", + "initmate", "intimate", + "injustie", "injustices", + "inlcuded", "included", + "inlcudes", "includes", + "innocens", "innocents", + "innocuos", "innocuous", + "innvoate", "innovate", + "inocence", "innocence", + "inpolite", "impolite", + "inpsired", "inspired", + "inquirey", "inquiry", + "inquirie", "inquire", + "inquiriy", "inquiry", + "inrested", "inserted", + "insanley", "insanely", + "insectes", "insects", + "insectos", "insects", + "insertas", "inserts", + "insertes", "inserts", + "insertos", "inserts", + "insidios", "insidious", + "insigths", "insights", + "insipred", "inspired", + "insipres", "inspires", + "insistas", "insists", + "insistes", "insists", + "insistis", "insists", + "insmonia", "insomnia", + "insomina", "insomnia", + "insonmia", "insomnia", + "inspried", "inspired", + "inspries", "inspires", + "instanse", "instances", + "instanty", "instantly", + "instered", "inserted", + "insticnt", "instinct", + "instincs", "instincts", + "institue", "institute", + "insultas", "insults", + "insultes", "insults", + "insultos", "insults", + "intamicy", "intimacy", + "intamite", "intimate", + "intendes", "intends", + "intendos", "intends", + "intentas", "intents", + "intented", "intended", + "interace", "interacted", + "interacs", "interacts", + "interect", "interacted", + "interent", "internet", + "interese", "interested", + "interfce", "interface", + "intergal", "integral", + "internts", "interns", + "internus", "interns", + "interpet", "interpret", + "interrim", "interim", + "interste", "interstate", + "interupt", "interrupt", + "intevene", "intervene", + "intially", "initially", + "intiials", "initials", + "intimaty", "intimately", + "intimide", "intimidate", + "intregal", "integral", + "intriuge", "intrigue", + "introdue", "introduces", + "introdus", "introduces", + "introvet", "introvert", + "intruige", "intrigue", + "intutive", "intuitive", + "inudstry", "industry", + "inventer", "inventor", + "invertes", "inverse", + "invincil", "invincible", + "invitato", "invitation", + "invloved", "involved", + "invloves", "involves", + "invovled", "involved", + "invovles", "involves", + "iranains", "iranians", + "iraninas", "iranians", + "iritable", "irritable", + "iritated", "irritated", + "ironicly", "ironically", + "irritato", "irritation", + "isalmist", "islamist", + "isarelis", "israelis", + "islamits", "islamist", + "islamsit", "islamist", + "islandes", "islanders", + "ismalist", "islamist", + "isntalls", "installs", + "isolatie", "isolate", + "israelli", "israeli", + "israleis", "israelis", + "isralies", "israelis", + "isrealis", "israelis", + "issueing", "issuing", + "italains", "italians", + "jaguards", "jaguars", + "jaguares", "jaguars", + "jailbrek", "jailbreak", + "jaimacan", "jamaican", + "jamacain", "jamaican", + "jamaicia", "jamaica", + "jamiacan", "jamaican", + "januaray", "january", + "janurary", "january", + "jeapardy", "jeopardy", + "jefferry", "jeffery", + "jefferty", "jeffery", + "jennigns", "jennings", + "jeoprady", "jeopardy", + "jepoardy", "jeopardy", + "jerusalm", "jerusalem", + "jewelrey", "jewelry", + "jewllery", "jewellery", + "joanthan", "jonathan", + "joepardy", "jeopardy", + "johanine", "johannine", + "jonatahn", "jonathan", + "journaal", "journal", + "journied", "journeyed", + "journies", "journeys", + "joysitck", "joystick", + "juadaism", "judaism", + "judaisim", "judaism", + "judgemet", "judgements", + "juducial", "judicial", + "jugnling", "jungling", + "junglign", "jungling", + "junlging", "jungling", + "justifiy", "justify", + "juveline", "juvenile", + "juvenlie", "juvenile", + "katemine", "ketamine", + "kennedey", "kennedy", + "ketmaine", "ketamine", + "keybaord", "keyboard", + "keyboars", "keyboards", + "keyborad", "keyboard", + "keychian", "keychain", + "kicthens", "kitchens", + "kindgoms", "kingdoms", + "kittiens", "kitties", + "knockbak", "knockback", + "knowlege", "knowledge", + "knuckels", "knuckles", + "koreanos", "koreans", + "kunckles", "knuckles", + "kurdisch", "kurdish", + "labatory", "lavatory", + "labenese", "lebanese", + "laboraty", "laboratory", + "laguages", "languages", + "landscae", "landscapes", + "langauge", "language", + "lanucher", "launcher", + "lanuches", "launches", + "laodouts", "loadouts", + "larwence", "lawrence", + "lasagnea", "lasagna", + "lasagnia", "lasagna", + "laucnhed", "launched", + "laucnher", "launcher", + "laucnhes", "launches", + "laundrey", "laundry", + "lawernce", "lawrence", + "lazyness", "laziness", + "leaglize", "legalize", + "lecteurs", "lectures", + "lecutres", "lectures", + "lefitsts", "leftists", + "leftsits", "leftists", + "legenday", "legendary", + "legionis", "legions", + "legitimt", "legitimate", + "lengthes", "lengths", + "lengthly", "lengthy", + "lentiles", "lentils", + "lentills", "lentils", + "lesbains", "lesbians", + "lesibans", "lesbians", + "levander", "lavender", + "levelign", "leveling", + "levetate", "levitate", + "leviathn", "leviathan", + "levleing", "leveling", + "liberato", "liberation", + "libertae", "liberate", + "libertea", "liberate", + "librarse", "libraries", + "licencie", "licence", + "licencse", "licence", + "liebrals", "liberals", + "liekable", "likeable", + "lifepsan", "lifespan", + "lifestel", "lifesteal", + "lifestye", "lifestyle", + "lighitng", "lighting", + "lightnig", "lightning", + "lightres", "lighters", + "lightrom", "lightroom", + "ligthers", "lighters", + "ligthing", "lighting", + "likebale", "likeable", + "limitant", "militant", + "limitato", "limitation", + "lincolin", "lincoln", + "lincolon", "lincoln", + "lineupes", "lineups", + "lingeire", "lingerie", + "lingiere", "lingerie", + "linnaena", "linnaean", + "lipstics", "lipsticks", + "liquidas", "liquids", + "liquides", "liquids", + "liquidos", "liquids", + "liscense", "license", + "lisenced", "silenced", + "listenes", "listens", + "listents", "listens", + "listners", "listeners", + "litature", "literature", + "litecion", "litecoin", + "liteicon", "litecoin", + "literaly", "literally", + "lithuana", "lithuania", + "litigato", "litigation", + "liverpol", "liverpool", + "locagion", "location", + "logtiech", "logitech", + "longitme", "longtime", + "longtiem", "longtime", + "looseley", "loosely", + "loreplay", "roleplay", + "luanched", "launched", + "luancher", "launcher", + "luanches", "launches", + "lubricat", "lubricant", + "lucifear", "lucifer", + "luckilly", "luckily", + "macarino", "macaroni", + "machiens", "machines", + "mackeral", "mackerel", + "macthups", "matchups", + "magasine", "magazine", + "magazins", "magazines", + "magentic", "magnetic", + "magicain", "magician", + "magisine", "magazine", + "magizine", "magazine", + "magnetis", "magnets", + "magnited", "magnitude", + "magnitue", "magnitude", + "mainfest", "manifest", + "maintian", "maintain", + "majoroty", "majority", + "makrsman", "marksman", + "malariya", "malaria", + "malasiya", "malaysia", + "malasyia", "malaysia", + "malayisa", "malaysia", + "malyasia", "malaysia", + "mamalian", "mammalian", + "manadrin", "mandarin", + "manaully", "manually", + "mandaste", "mandates", + "mandrain", "mandarin", + "mandrian", "mandarin", + "maneveur", "maneuver", + "manevuer", "maneuver", + "manfiest", "manifest", + "mangetic", "magnetic", + "manglade", "mangled", + "manifeso", "manifesto", + "manipule", "manipulate", + "manouver", "maneuver", + "manuales", "manuals", + "manuever", "maneuver", + "maraconi", "macaroni", + "maradeur", "marauder", + "maraduer", "marauder", + "maragret", "margaret", + "marbleds", "marbles", + "margerat", "margaret", + "margines", "margins", + "margings", "margins", + "marginis", "margins", + "marignal", "marginal", + "marilyin", "marilyn", + "marinens", "marines", + "markedet", "marketed", + "markeras", "markers", + "markerts", "markers", + "marniers", "mariners", + "marraige", "marriage", + "marryied", "married", + "marskman", "marksman", + "maruader", "marauder", + "marvelos", "marvelous", + "marxisim", "marxism", + "mascarra", "mascara", + "massacer", "massacre", + "massarce", "massacre", + "massasge", "massages", + "masscare", "massacre", + "masteris", "masteries", + "masturbe", "masturbate", + "materias", "materials", + "mathcups", "matchups", + "mathewes", "mathews", + "matieral", "material", + "matterss", "mattress", + "mauarder", "marauder", + "maximini", "maximizing", + "mayalsia", "malaysia", + "maybelle", "maybelline", + "maylasia", "malaysia", + "mccarhty", "mccarthy", + "mcgergor", "mcgregor", + "mchanics", "mechanics", + "mclarean", "mclaren", + "mcreggor", "mcgregor", + "meagtron", "megatron", + "meancing", "menacing", + "meaninng", "meaning", + "meatbals", "meatballs", + "mecahnic", "mechanic", + "mechanim", "mechanism", + "mechanis", "mechanics", + "medacine", "medicine", + "medatite", "meditate", + "medeival", "medieval", + "medevial", "medieval", + "mediavel", "medieval", + "medicaly", "medically", + "mediciad", "medicaid", + "medicins", "medicines", + "medicore", "mediocre", + "medievel", "medieval", + "mediocer", "mediocre", + "mediocry", "mediocrity", + "mediorce", "mediocre", + "meditato", "meditation", + "mediveal", "medieval", + "medoicre", "mediocre", + "meerkrat", "meerkat", + "megatorn", "megatron", + "meidcare", "medicare", + "meixcans", "mexicans", + "melboure", "melbourne", + "meltodwn", "meltdown", + "memoriez", "memorize", + "mencaing", "menacing", + "menstrul", "menstrual", + "mentiong", "mentioning", + "meoldies", "melodies", + "merchans", "merchants", + "mercurcy", "mercury", + "mercurey", "mercury", + "merficul", "merciful", + "merhcant", "merchant", + "mericful", "merciful", + "messgaed", "messaged", + "messiach", "messiah", + "metagaem", "metagame", + "metahpor", "metaphor", + "metamage", "metagame", + "methapor", "metaphor", + "metldown", "meltdown", + "metricas", "metrics", + "metrices", "metrics", + "metropos", "metropolis", + "mexcians", "mexicans", + "mexicain", "mexican", + "mhytical", "mythical", + "michagan", "michigan", + "michgian", "michigan", + "microtax", "microatx", + "microwae", "microwaves", + "midfeild", "midfield", + "midfiled", "midfield", + "midifeld", "midfield", + "migrains", "migraines", + "migriane", "migraine", + "milennia", "millennia", + "miligram", "milligram", + "miliitas", "militias", + "miliraty", "military", + "militais", "militias", + "millenia", "millennia", + "millenna", "millennia", + "miltiant", "militant", + "minature", "miniature", + "mindcrak", "mindcrack", + "minerial", "mineral", + "mingiame", "minigame", + "minimage", "minigame", + "minimals", "minimalist", + "minimalt", "minimalist", + "minimini", "minimizing", + "minimium", "minimum", + "miniscue", "miniscule", + "minsiter", "minister", + "minsitry", "ministry", + "miraculu", "miraculous", + "miralces", "miracles", + "mircales", "miracles", + "mircoatx", "microatx", + "mirgaine", "migraine", + "mirorred", "mirrored", + "misnadry", "misandry", + "misogynt", "misogynist", + "missigno", "mission", + "missiony", "missionary", + "misslies", "missiles", + "missorui", "missouri", + "misspeld", "misspelled", + "mistakey", "mistakenly", + "mistread", "mistreated", + "mobiltiy", "mobility", + "moderats", "moderates", + "modulair", "modular", + "moleculs", "molecules", + "momentos", "moments", + "momentus", "moments", + "monagomy", "monogamy", + "mongoles", "mongols", + "mongolos", "mongols", + "monitord", "monitored", + "monogmay", "monogamy", + "monolite", "monolithic", + "monologe", "monologue", + "monolopy", "monopoly", + "monoploy", "monopoly", + "monopols", "monopolies", + "monrachy", "monarchy", + "monstros", "monstrous", + "montaban", "montana", + "montains", "mountains", + "montanha", "montana", + "montania", "montana", + "montanna", "montana", + "montanta", "montana", + "montanya", "montana", + "montaran", "montana", + "monteize", "monetize", + "monteral", "montreal", + "montiors", "monitors", + "montnana", "montana", + "montypic", "monotypic", + "monumnet", "monument", + "moonligt", "moonlight", + "moprhine", "morphine", + "morbildy", "morbidly", + "mordibly", "morbidly", + "morevoer", "moreover", + "morhpine", "morphine", + "moribdly", "morbidly", + "mormones", "mormons", + "mormonts", "mormons", + "moroever", "moreover", + "morotola", "motorola", + "morphein", "morphine", + "morriosn", "morrison", + "morrocco", "morocco", + "morrsion", "morrison", + "mortards", "mortars", + "mortarts", "mortars", + "moruning", "mourning", + "mosnters", "monsters", + "mosqueto", "mosquitoes", + "mosquite", "mosquitoes", + "mosqutio", "mosquito", + "motoroal", "motorola", + "mounment", "monument", + "mounring", "mourning", + "mountian", "mountain", + "moustace", "moustache", + "movesped", "movespeed", + "mozillia", "mozilla", + "mozillla", "mozilla", + "msytical", "mystical", + "mucnhies", "munchies", + "mudering", "murdering", + "muffings", "muffins", + "muffinus", "muffins", + "mulitple", "multiple", + "mulitply", "multiply", + "multiplr", "multiplier", + "multipls", "multiples", + "mundance", "mundane", + "mundande", "mundane", + "muniches", "munchies", + "murderes", "murders", + "murderus", "murders", + "muscluar", "muscular", + "muscualr", "muscular", + "musicaly", "musically", + "musuclar", "muscular", + "mutliple", "multiple", + "mutliply", "multiply", + "myhtical", "mythical", + "mysitcal", "mystical", + "mysogyny", "misogyny", + "mysteris", "mysteries", + "mythraic", "mithraic", + "nagivate", "navigate", + "naopleon", "napoleon", + "napcakes", "pancakes", + "naploeon", "napoleon", + "napoelon", "napoleon", + "napolean", "napoleon", + "napoloen", "napoleon", + "narcissm", "narcissism", + "narcisst", "narcissist", + "narcotis", "narcotics", + "narwharl", "narwhal", + "naseuous", "nauseous", + "nashvile", "nashville", + "nasueous", "nauseous", + "natievly", "natively", + "nationas", "nationals", + "nationsl", "nationals", + "nativley", "natively", + "natuilus", "nautilus", + "naturaly", "naturally", + "naturels", "natures", + "naturely", "naturally", + "naturens", "natures", + "naturual", "natural", + "nauesous", "nauseous", + "naughtly", "naughty", + "nauitlus", "nautilus", + "nauseuos", "nauseous", + "nautiuls", "nautilus", + "nautlius", "nautilus", + "nautulis", "nautilus", + "naviagte", "navigate", + "navigato", "navigation", + "nazereth", "nazareth", + "necesary", "necessary", + "neckbead", "neckbeard", + "needlees", "needles", + "nefarios", "nefarious", + "negativy", "negativity", + "neglectn", "neglecting", + "neglible", "negligible", + "neigbour", "neighbour", + "neolitic", "neolithic", + "netboook", "netbook", + "neuronas", "neurons", + "neutraal", "neutral", + "neutralt", "neutrality", + "neutraly", "neutrality", + "newcaste", "newcastle", + "nickanme", "nickname", + "nickmane", "nickname", + "nieghbor", "neighbor", + "nightime", "nighttime", + "nightley", "nightly", + "nightlie", "nightlife", + "nihilsim", "nihilism", + "nilihism", "nihilism", + "nirtogen", "nitrogen", + "nirvanna", "nirvana", + "nitorgen", "nitrogen", + "niusance", "nuisance", + "noctrune", "nocturne", + "noctunre", "nocturne", + "nocturen", "nocturne", + "nominato", "nomination", + "nonsence", "nonsense", + "nonsesne", "nonsense", + "noramlly", "normally", + "norhtern", "northern", + "normalis", "normals", + "normalls", "normals", + "normalos", "normals", + "northeat", "northeast", + "northren", "northern", + "northwet", "northwest", + "norwegin", "norwegian", + "nostalga", "nostalgia", + "nostirls", "nostrils", + "notabley", "notably", + "notablly", "notably", + "noteable", "notable", + "noteably", "notably", + "noticabe", "noticable", + "notorios", "notorious", + "novmeber", "november", + "nromandy", "normandy", + "nuatilus", "nautilus", + "nuculear", "nuclear", + "nuetered", "neutered", + "nuisanse", "nuisance", + "nullifiy", "nullify", + "nurtient", "nutrient", + "nusaince", "nuisance", + "nusiance", "nuisance", + "nutirent", "nutrient", + "nutriens", "nutrients", + "nuturing", "nurturing", + "obdisian", "obsidian", + "obediant", "obedient", + "obession", "obsession", + "obilvion", "oblivion", + "obisdian", "obsidian", + "obsessie", "obsessive", + "obsessin", "obsession", + "obsidain", "obsidian", + "obstacal", "obstacle", + "obvilion", "oblivion", + "ocasions", "occasions", + "ocassion", "occasion", + "occaison", "occasion", + "occupato", "occupation", + "occuring", "occurring", + "octobear", "october", + "octopuns", "octopus", + "ofcoruse", "ofcourse", + "ofcoures", "ofcourse", + "ofcousre", "ofcourse", + "ofcrouse", "ofcourse", + "officals", "officials", + "officaly", "officially", + "offsited", "offside", + "ofocurse", "ofcourse", + "oligarcy", "oligarchy", + "olmypics", "olympics", + "olymipcs", "olympics", + "olypmics", "olympics", + "ommision", "omission", + "ommiting", "omitting", + "ommitted", "omitted", + "ongewild", "gonewild", + "onslaugt", "onslaught", + "operatie", "operative", + "opinoins", "opinions", + "oppinion", "opinion", + "opponant", "opponent", + "opposits", "opposites", + "oppossed", "opposed", + "oppresso", "oppression", + "optimaal", "optimal", + "optomism", "optimism", + "oragnise", "organise", + "orangerd", "orangered", + "orangers", "oranges", + "orangism", "organism", + "orchesta", "orchestra", + "ordianry", "ordinary", + "oreintal", "oriental", + "orgainse", "organise", + "orgainze", "organize", + "organims", "organism", + "organsie", "organise", + "organsim", "organism", + "organzie", "organize", + "orgasmes", "orgasms", + "orgasmos", "orgasms", + "orgasmus", "orgasms", + "orginize", "organise", + "orhtodox", "orthodox", + "oridnary", "ordinary", + "originas", "origins", + "origines", "origins", + "originsl", "originals", + "orphanes", "orphans", + "osbidian", "obsidian", + "othrodox", "orthodox", + "ourselvs", "ourselves", + "oustider", "outsider", + "outfeild", "outfield", + "outfidel", "outfield", + "outfiled", "outfield", + "outisder", "outsider", + "outplayd", "outplayed", + "outputed", "outputted", + "outsoure", "outsourced", + "overboad", "overboard", + "overclok", "overclock", + "overdrev", "overdrive", + "overhual", "overhaul", + "overlaod", "overload", + "overpiad", "overpaid", + "overules", "overuse", + "overwath", "overwatch", + "overwhem", "overwhelm", + "oximoron", "oxymoron", + "oylmpics", "olympics", + "pacakged", "packaged", + "packadge", "packaged", + "paficist", "pacifist", + "painfuly", "painfully", + "paitence", "patience", + "paitents", "patients", + "palidans", "paladins", + "palstics", "plastics", + "paltform", "platform", + "paltinum", "platinum", + "palyable", "playable", + "palyoffs", "playoffs", + "pancaeks", "pancakes", + "panckaes", "pancakes", + "pandoria", "pandora", + "pandorra", "pandora", + "panedmic", "pandemic", + "panethon", "pantheon", + "pankaces", "pancakes", + "panmedic", "pandemic", + "pantehon", "pantheon", + "panthoen", "pantheon", + "paradies", "paradise", + "paradyse", "parades", + "paragrah", "paragraph", + "paraiste", "parasite", + "paralell", "parallel", + "paralely", "parallelly", + "paralles", "parallels", + "parameds", "paramedics", + "paramter", "parameter", + "paranioa", "paranoia", + "paraniod", "paranoid", + "paraside", "paradise", + "parasits", "parasites", + "parastie", "parasite", + "parctise", "practise", + "paremsan", "parmesan", + "paristan", "partisan", + "parmasen", "parmesan", + "parmenas", "parmesan", + "parmsean", "parmesan", + "parnters", "partners", + "parralel", "parallel", + "parterns", "partners", + "partialy", "partially", + "partians", "partisan", + "partical", "particular", + "particel", "particle", + "partiets", "parties", + "partiots", "patriots", + "partnerd", "partnered", + "partsian", "partisan", + "passabel", "passable", + "passione", "passionate", + "passisve", "passives", + "passpost", "passports", + "passvies", "passives", + "passwors", "passwords", + "pasttime", "pastime", + "pastural", "pastoral", + "pateince", "patience", + "pateints", "patients", + "patethic", "pathetic", + "patheitc", "pathetic", + "patienty", "patiently", + "patirots", "patriots", + "patriarh", "patriarchy", + "patroits", "patriots", + "patrolls", "patrols", + "patronas", "patrons", + "patrones", "patrons", + "patronis", "patrons", + "patronos", "patrons", + "pattened", "patented", + "patterno", "patterson", + "pattersn", "patterson", + "pblisher", "publisher", + "peageant", "pageant", + "pebbleos", "pebbles", + "pebblers", "pebbles", + "pebblets", "pebbles", + "peciluar", "peculiar", + "pecuilar", "peculiar", + "peculair", "peculiar", + "peculure", "peculiar", + "peformed", "performed", + "peircing", "piercing", + "penaltis", "penalties", + "penatgon", "pentagon", + "penciles", "pencils", + "pendatic", "pedantic", + "pengiuns", "penguins", + "penisula", "peninsula", + "pensioen", "pension", + "pepperin", "pepperoni", + "perceded", "preceded", + "percente", "percentile", + "percieve", "perceive", + "percious", "precious", + "perclude", "preclude", + "perfecty", "perfectly", + "perfroms", "performs", + "perheaps", "perhaps", + "pericing", "piercing", + "peridoic", "periodic", + "perimetr", "perimeter", + "periodes", "periods", + "periodos", "periods", + "permanet", "permanent", + "permiere", "premiere", + "permises", "premises", + "permitas", "permits", + "permites", "permits", + "permitis", "permits", + "permitts", "permits", + "permiums", "premiums", + "peroidic", "periodic", + "perosnas", "personas", + "perpetue", "perpetuate", + "persaude", "persuade", + "perserve", "preserve", + "persisit", "persist", + "personel", "personnel", + "persones", "persons", + "personis", "persons", + "personsa", "personas", + "perstige", "prestige", + "persuaso", "persuasion", + "persuded", "persuaded", + "persuing", "pursuing", + "persuits", "pursuits", + "persumed", "presumed", + "pertaing", "pertaining", + "pertians", "pertains", + "pertinet", "pertinent", + "pervents", "prevents", + "perverst", "pervert", + "perviews", "previews", + "pervious", "previous", + "perxoide", "peroxide", + "pessiary", "pessary", + "petetion", "petition", + "petrolem", "petroleum", + "phantoom", "phantom", + "pharamcy", "pharmacy", + "pharmacs", "pharmacist", + "pharmsci", "pharmacist", + "phenomon", "phenomenon", + "phramacy", "pharmacy", + "phsyical", "physical", + "phsyique", "physique", + "phyiscal", "physical", + "phyisque", "physique", + "physcial", "physical", + "physicis", "physicians", + "physicks", "physics", + "physicts", "physicist", + "physqiue", "physique", + "picthers", "pitchers", + "pillards", "pillars", + "pillaris", "pillars", + "pinancle", "pinnacle", + "pinapple", "pineapple", + "pinnalce", "pinnacle", + "pinnaple", "pineapple", + "pinncale", "pinnacle", + "pinpiont", "pinpoint", + "pinteret", "pinterest", + "piolting", "piloting", + "pioneeer", "pioneer", + "pithcers", "pitchers", + "placebro", "placebo", + "placemet", "placements", + "planetas", "planets", + "planetos", "planets", + "plantiff", "plaintiff", + "plantium", "platinum", + "plasitcs", "plastics", + "platfrom", "platform", + "platimun", "platinum", + "platnium", "platinum", + "platnuim", "platinum", + "plausibe", "plausible", + "playbody", "playboy", + "playstye", "playstyle", + "pleasent", "pleasant", + "plehtora", "plethora", + "pleothra", "plethora", + "plethroa", "plethora", + "ploygamy", "polygamy", + "pnatheon", "pantheon", + "poeoples", "peoples", + "poingant", "poignant", + "pointeur", "pointer", + "pointure", "pointer", + "poisones", "poisons", + "poisonis", "poisons", + "poisonos", "poisons", + "poisonus", "poisons", + "polgyamy", "polygamy", + "polietly", "politely", + "politing", "piloting", + "politley", "politely", + "poltical", "political", + "poluting", "polluting", + "polution", "pollution", + "polygoon", "polygon", + "polymore", "polymer", + "pomotion", "promotion", + "popoulus", "populous", + "populair", "popular", + "populare", "popular", + "populary", "popularity", + "porcelan", "porcelain", + "porposes", "proposes", + "portabel", "portable", + "portalis", "portals", + "portalus", "portals", + "portayed", "portrayed", + "portgual", "portugal", + "portrais", "portraits", + "portrary", "portray", + "portrayl", "portrayal", + "portriat", "portrait", + "posessed", "possessed", + "posesses", "possesses", + "posioned", "poisoned", + "positivs", "positives", + "positivy", "positivity", + "possable", "possible", + "possably", "possibly", + "possbily", "possibly", + "posseses", "possesses", + "possesse", "possessive", + "possesss", "possesses", + "potrayed", "portrayed", + "poverful", "powerful", + "powerded", "powdered", + "powerpot", "powerpoint", + "pracitse", "practise", + "practial", "practical", + "practies", "practise", + "pratcise", "practise", + "praticle", "particle", + "prceeded", "preceded", + "preadtor", "predator", + "preample", "preamble", + "preceeds", "precedes", + "precisie", "precise", + "precisly", "precisely", + "precisou", "precious", + "preculde", "preclude", + "predicat", "predict", + "predicte", "predictive", + "preferas", "prefers", + "prefered", "preferred", + "preferes", "prefers", + "preferis", "prefers", + "preferrs", "prefers", + "preimere", "premiere", + "preimums", "premiums", + "preiodic", "periodic", + "preivews", "previews", + "prejudis", "prejudices", + "prelayed", "replayed", + "premeire", "premiere", + "premesis", "premises", + "premiare", "premier", + "premines", "premise", + "premuims", "premiums", + "preorded", "preordered", + "preordes", "preorders", + "preoxide", "peroxide", + "prepaird", "prepaid", + "preqeuls", "prequels", + "prequles", "prequels", + "prescrie", "prescribed", + "presense", "presence", + "presenst", "presets", + "presidet", "presidents", + "presists", "persists", + "presitge", "prestige", + "presonas", "personas", + "presuade", "persuade", + "pretador", "predator", + "pretains", "pertains", + "preveiws", "previews", + "preverse", "perverse", + "previwes", "previews", + "pricipal", "principal", + "priciple", "principle", + "priemere", "premiere", + "priestes", "priests", + "primaris", "primaries", + "primarly", "primarily", + "princila", "principals", + "principl", "principals", + "prisitne", "pristine", + "probelms", "problems", + "probleem", "problem", + "procalim", "proclaim", + "proccess", "process", + "proceded", "proceeded", + "proceder", "procedure", + "procedes", "proceeds", + "procedue", "procedure", + "proceeed", "proceed", + "procesed", "proceeds", + "processs", "processes", + "proclami", "proclaim", + "procliam", "proclaim", + "procotol", "protocol", + "prodcuts", "products", + "producto", "production", + "profesor", "professor", + "proficit", "proficient", + "profilic", "prolific", + "progroms", "pogroms", + "prohibis", "prohibits", + "prohpecy", "prophecy", + "prohpets", "prophets", + "projecte", "projectile", + "projecto", "projection", + "prolouge", "prologue", + "promplty", "promptly", + "promptes", "prompts", + "promptus", "prompts", + "promtply", "promptly", + "pronoune", "pronounced", + "propechy", "prophecy", + "propehcy", "prophecy", + "propehts", "prophets", + "prophacy", "prophecy", + "propmted", "prompted", + "propmtly", "promptly", + "proponet", "proponents", + "proposse", "proposes", + "proposte", "propose", + "proprety", "property", + "propsect", "prospect", + "prosepct", "prospect", + "prostite", "prostitute", + "protable", "portable", + "protecte", "protective", + "protiens", "proteins", + "protines", "proteins", + "protocal", "protocol", + "prototye", "prototype", + "protrait", "portrait", + "protrays", "portrays", + "protugal", "portugal", + "proverai", "proverbial", + "providee", "providence", + "proximty", "proximity", + "pruchase", "purchase", + "pryamids", "pyramids", + "ptichers", "pitchers", + "pubisher", "publisher", + "publiser", "publisher", + "puinsher", "punisher", + "pulisher", "publisher", + "pumkpins", "pumpkins", + "pumpinks", "pumpkins", + "pumpknis", "pumpkins", + "punshier", "punisher", + "punsiher", "punisher", + "punsihes", "punishes", + "purcahse", "purchase", + "pyramind", "pyramid", + "pyrimads", "pyramids", + "pyrmaids", "pyramids", + "qauntity", "quantity", + "qualifiy", "qualify", + "quanitfy", "quantify", + "quantaty", "quantity", + "quantite", "quantities", + "quantuum", "quantum", + "quarante", "quarantine", + "quartery", "quarterly", + "qucikest", "quickest", + "queation", "equation", + "quention", "quentin", + "quickets", "quickest", + "quicklyu", "quickly", + "rabbitos", "rabbits", + "rabbitts", "rabbits", + "racistas", "racists", + "racistes", "racists", + "radaince", "radiance", + "rahpsody", "rhapsody", + "raidance", "radiance", + "railraod", "railroad", + "randomes", "randoms", + "randomez", "randomized", + "randomns", "randoms", + "randomrs", "randoms", + "randomus", "randoms", + "raosting", "roasting", + "raphsody", "rhapsody", + "raptores", "raptors", + "raspbery", "raspberry", + "rationel", "rationale", + "realible", "reliable", + "realibly", "reliably", + "realiest", "earliest", + "realisim", "realism", + "realisme", "realise", + "realistc", "realistic", + "realiste", "realise", + "realoded", "reloaded", + "realsied", "realised", + "realtion", "relation", + "realtive", "relative", + "reamined", "remained", + "reapired", "repaired", + "reaplugs", "earplugs", + "reaserch", "research", + "reasonal", "reasonably", + "reatiler", "retailer", + "reaveled", "revealed", + "rebellis", "rebellious", + "reboudns", "rebounds", + "rebounce", "rebound", + "rebuildt", "rebuilt", + "rebuplic", "republic", + "receeded", "receded", + "recepits", "receipts", + "receptie", "receptive", + "receptos", "receptors", + "receving", "receiving", + "recident", "resident", + "reciding", "residing", + "recieved", "received", + "reciever", "receiver", + "recieves", "receives", + "recipees", "recipes", + "recipets", "recipes", + "recogise", "recognise", + "recogize", "recognize", + "recognie", "recognizes", + "recomend", "recommend", + "recommed", "recommend", + "reconnet", "reconnect", + "rectange", "rectangle", + "rectifiy", "rectify", + "recuring", "recurring", + "recurits", "recruits", + "redeisgn", "redesign", + "redemeed", "redeemed", + "redesgin", "redesign", + "redesing", "redesign", + "reedemed", "redeemed", + "refeeres", "referees", + "refelcts", "reflects", + "refelxes", "reflexes", + "referede", "referee", + "referene", "referee", + "referens", "references", + "referere", "referee", + "referign", "refering", + "refering", "referring", + "refernce", "references", + "reffered", "referred", + "refilles", "refills", + "refillls", "refills", + "reflecte", "reflective", + "reflecto", "reflection", + "reformes", "reforms", + "refreing", "refering", + "refrence", "reference", + "refreshd", "refreshed", + "refreshr", "refresher", + "refromed", "reformed", + "regardes", "regards", + "regenade", "renegade", + "regenere", "regenerate", + "regiones", "regions", + "regisrty", "registry", + "registed", "registered", + "regresas", "regress", + "regreses", "regress", + "regresos", "regress", + "regresse", "regressive", + "regresso", "regression", + "regrests", "regress", + "regretts", "regrets", + "regsitry", "registry", + "regualrs", "regulars", + "regualte", "regulate", + "reguarly", "regularly", + "regulary", "regularly", + "regulatr", "regulator", + "regulats", "regulators", + "rehersal", "rehearsal", + "rehtoric", "rhetoric", + "reiceved", "recieved", + "reigment", "regiment", + "reigonal", "regional", + "rekenton", "renekton", + "relaible", "reliable", + "relaibly", "reliably", + "relaised", "realised", + "relaoded", "reloaded", + "relasped", "relapsed", + "relatabe", "relatable", + "relateds", "relates", + "relativy", "relativity", + "relavent", "relevant", + "relected", "reelected", + "relegato", "relegation", + "releived", "relieved", + "releiver", "reliever", + "relevent", "relevant", + "relfects", "reflects", + "relfexes", "reflexes", + "reliased", "realised", + "religous", "religious", + "relpased", "relapsed", + "remainds", "remains", + "remainig", "remaining", + "remannts", "remnants", + "remarkes", "remarks", + "remembed", "remembered", + "remembee", "remembered", + "rememebr", "remember", + "remenant", "remnant", + "reminent", "remnant", + "remmeber", "remember", + "remotley", "remotely", + "renderes", "renders", + "reneagde", "renegade", + "renetkon", "renekton", + "renewabe", "renewables", + "renketon", "renekton", + "renmants", "remnants", + "renoylds", "reynolds", + "renteris", "renters", + "renyolds", "reynolds", + "reowrked", "reworked", + "repaires", "repairs", + "repalces", "replaces", + "reparied", "repaired", + "repblics", "republics", + "repbulic", "republic", + "repeatae", "repeatable", + "repeates", "repeats", + "repetion", "repetition", + "repharse", "rephrase", + "repitles", "reptiles", + "replased", "relapsed", + "replayes", "replays", + "replicae", "replicated", + "replubic", "republic", + "reportes", "reporters", + "reposity", "repository", + "repostas", "reposts", + "repostes", "reposts", + "repostig", "reposting", + "repostus", "reposts", + "represet", "represents", + "represso", "repression", + "reprhase", "rephrase", + "repsects", "respects", + "repsonds", "responds", + "repsonse", "response", + "repsoted", "reposted", + "repubics", "republics", + "republis", "republics", + "repulics", "republics", + "repulsie", "repulsive", + "requiers", "requires", + "requieum", "requiem", + "requilme", "requiem", + "requried", "required", + "requries", "requires", + "rescuecd", "rescued", + "researce", "researcher", + "resembes", "resembles", + "reserach", "research", + "resevoir", "reservoir", + "resgined", "resigned", + "residude", "residue", + "residule", "residue", + "resinged", "resigned", + "resistas", "resists", + "resisten", "resistance", + "resistes", "resists", + "resloved", "resolved", + "resloves", "resolves", + "resmeble", "resemble", + "resotred", "restored", + "resourse", "resources", + "resovled", "resolved", + "resovles", "resolves", + "respecte", "respective", + "respesct", "respects", + "responce", "response", + "responed", "respond", + "respones", "response", + "responsd", "responds", + "respoted", "reposted", + "restanti", "restarting", + "restrait", "restraint", + "restrics", "restricts", + "resuable", "reusable", + "retailes", "retailers", + "retalier", "retailer", + "rethoric", "rhetoric", + "retirase", "retires", + "retireds", "retires", + "retireus", "retires", + "retireve", "retrieve", + "retreive", "retrieve", + "retrived", "retrieved", + "retunred", "returned", + "reuasble", "reusable", + "reveales", "reveals", + "reveiwed", "reviewed", + "reveiwer", "reviewer", + "revelaed", "revealed", + "revelant", "relevant", + "revelead", "revealed", + "reverals", "reversal", + "reviewes", "reviewers", + "revlover", "revolver", + "revloves", "revolves", + "revovler", "revolver", + "revovles", "revolves", + "rewatchd", "rewatched", + "rewitten", "rewritten", + "rewritte", "rewrite", + "rewtched", "wretched", + "reynlods", "reynolds", + "reyonlds", "reynolds", + "rhaposdy", "rhapsody", + "rhaspody", "rhapsody", + "rheotric", "rhetoric", + "righteos", "righteous", + "rigntone", "ringtone", + "ringotne", "ringtone", + "ritalian", "ritalin", + "rivalrly", "rivalry", + "roachers", "roaches", + "robberts", "robbers", + "robberys", "robbers", + "robocoop", "robocop", + "robocorp", "robocop", + "robocoup", "robocop", + "roelplay", "roleplay", + "roganism", "organism", + "rolepaly", "roleplay", + "romaanin", "romanian", + "romainan", "romanian", + "romanain", "romanian", + "romanica", "romania", + "rosettta", "rosetta", + "rostaing", "roasting", + "routeros", "routers", + "rutgerus", "rutgers", + "ryenolds", "reynolds", + "sacrifie", "sacrifice", + "saddends", "saddens", + "saddenes", "saddens", + "sadisitc", "sadistic", + "salaires", "salaries", + "sandales", "sandals", + "sandalls", "sandals", + "sandstom", "sandstorm", + "sanotrum", "santorum", + "santourm", "santorum", + "santroum", "santorum", + "santurom", "santorum", + "sapcebar", "spacebar", + "sapphrie", "sapphire", + "sarcasam", "sarcasm", + "sarcasim", "sarcasm", + "sarcastc", "sarcastic", + "sargeant", "sergeant", + "sasauges", "sausages", + "sasuages", "sausages", + "satelite", "satellite", + "satellie", "satellites", + "saterday", "saturday", + "satifies", "satisfies", + "satisfiy", "satisfy", + "satrical", "satirical", + "satruday", "saturday", + "saturdsy", "saturdays", + "sawstika", "swastika", + "scandlas", "scandals", + "scannign", "scanning", + "scarmble", "scramble", + "scepture", "scepter", + "schedual", "schedule", + "schoalrs", "scholars", + "scholary", "scholarly", + "schoodle", "schooled", + "scientic", "scientific", + "scientis", "scientist", + "scoprion", "scorpion", + "scorates", "socrates", + "scoripon", "scorpion", + "scorpoin", "scorpion", + "scostman", "scotsman", + "scratchs", "scratches", + "scriptue", "scriptures", + "scriptus", "scripts", + "scritped", "scripted", + "scroates", "socrates", + "scropion", "scorpion", + "scrpited", "scripted", + "scruitny", "scrutiny", + "scrunity", "scrutiny", + "sctosman", "scotsman", + "sculpter", "sculpture", + "scurtiny", "scrutiny", + "seahakws", "seahawks", + "seahwaks", "seahawks", + "seantors", "senators", + "sebastin", "sebastian", + "seceeded", "succeeded", + "secertly", "secretly", + "secrelty", "secretly", + "secretas", "secrets", + "secretos", "secrets", + "secruity", "security", + "secuirty", "security", + "sedereal", "sidereal", + "seldomly", "seldom", + "selectie", "selective", + "selfiers", "selfies", + "semestre", "semester", + "semseter", "semester", + "senarios", "scenarios", + "senerity", "serenity", + "seniores", "seniors", + "senisble", "sensible", + "sensibel", "sensible", + "sensores", "sensors", + "senstive", "sensitive", + "sentaors", "senators", + "sentiers", "sentries", + "sentinet", "sentient", + "sentinte", "sentient", + "sentires", "sentries", + "sentreis", "sentries", + "separato", "separation", + "separete", "seperate", + "sepearte", "seperate", + "seperate", "separate", + "seplling", "spelling", + "sepreate", "seperate", + "sepulcre", "sepulchre", + "serached", "searched", + "seraches", "searches", + "serentiy", "serenity", + "sergaent", "sergeant", + "settigns", "settings", + "settting", "setting", + "seventen", "seventeen", + "severeal", "several", + "severeid", "severed", + "severide", "severed", + "severley", "severely", + "sexaully", "sexually", + "seziures", "seizures", + "sezuires", "seizures", + "shadoloo", "shadaloo", + "shangahi", "shanghai", + "shanghia", "shanghai", + "sharplay", "sharply", + "sharpley", "sharply", + "shawshak", "shawshank", + "shcolars", "scholars", + "shcooled", "schooled", + "sheilded", "shielded", + "shelterd", "sheltered", + "shelvers", "shelves", + "shelveys", "shelves", + "sherlcok", "sherlock", + "shetlers", "shelters", + "shfiting", "shifting", + "shifitng", "shifting", + "shifteer", "shifter", + "shileded", "shielded", + "shineing", "shining", + "shitstom", "shitstorm", + "shittoon", "shitton", + "shittown", "shitton", + "shleters", "shelters", + "shnaghai", "shanghai", + "shortend", "shortened", + "shotuout", "shoutout", + "shoudlnt", "shouldnt", + "shouldes", "shoulders", + "shoulndt", "shouldnt", + "shrapenl", "shrapnel", + "shrelock", "sherlock", + "shrinked", "shrunk", + "shrpanel", "shrapnel", + "shtiless", "shitless", + "shuoldnt", "shouldnt", + "sideboad", "sideboard", + "sidleine", "sideline", + "siezable", "sizeable", + "siezures", "seizures", + "signatue", "signatures", + "signfies", "signifies", + "signifiy", "signify", + "signigns", "signings", + "signular", "singular", + "silbings", "siblings", + "silicoln", "silicon", + "silicoon", "silicon", + "silimiar", "similiar", + "simialir", "similiar", + "simiilar", "similiar", + "similair", "similar", + "similari", "similiar", + "similart", "similarity", + "similary", "similarly", + "similiar", "similar", + "simliiar", "similiar", + "simluate", "simulate", + "simmilar", "similar", + "simpelst", "simplest", + "simplets", "simplest", + "simplicy", "simplicity", + "simplier", "simpler", + "simulato", "simulation", + "singlers", "singles", + "singluar", "singular", + "sinistre", "sinister", + "sinsiter", "sinister", + "sitckers", "stickers", + "sitrring", "stirring", + "sizebale", "sizeable", + "skateing", "skating", + "skecthes", "sketches", + "skelatel", "skeletal", + "skeletos", "skeletons", + "sketchey", "sketchy", + "sketpics", "skeptics", + "skillsto", "skillshots", + "skimrish", "skirmish", + "skpetics", "skeptics", + "skrimish", "skirmish", + "skteches", "sketches", + "skywalkr", "skywalker", + "slaptoon", "splatoon", + "slaverly", "slavery", + "slienced", "silenced", + "sliently", "silently", + "slighlty", "slightly", + "sligthly", "slightly", + "smartare", "smarter", + "snetries", "sentries", + "snippent", "snippet", + "snippert", "snippet", + "snowbals", "snowballs", + "snugglie", "snuggle", + "snydrome", "syndrome", + "snyopsis", "synopsis", + "soberity", "sobriety", + "sobreity", "sobriety", + "socailly", "socially", + "socalism", "socialism", + "socartes", "socrates", + "socialim", "socialism", + "socities", "societies", + "socttish", "scottish", + "soemthin", "somethin", + "soilders", "soldiers", + "solatary", "solitary", + "soldeirs", "soldiers", + "soliders", "soldiers", + "soluable", "soluble", + "solutide", "solitude", + "somalija", "somalia", + "somehtin", "somethin", + "someoens", "someones", + "somethis", "somethings", + "sometihn", "somethin", + "sometinh", "somethin", + "somoenes", "someones", + "somtimes", "sometimes", + "somwhere", "somewhere", + "soparnos", "sopranos", + "sophmore", "sophomore", + "sorcercy", "sorcery", + "sorcerey", "sorcery", + "sorceror", "sorcerer", + "sorcerry", "sorcery", + "sorpanos", "sopranos", + "southren", "southern", + "soverein", "sovereign", + "soverign", "sovereign", + "sovietes", "soviets", + "spagheti", "spaghetti", + "spainish", "spanish", + "spaltoon", "splatoon", + "spammade", "spammed", + "spammare", "spammer", + "spammear", "spammer", + "spammend", "spammed", + "spammeur", "spammer", + "spanisch", "spanish", + "sparklie", "sparkle", + "spawnign", "spawning", + "specemin", "specimen", + "speciaal", "special", + "specialt", "specialist", + "specialy", "specially", + "specialz", "specialize", + "specifed", "specified", + "specifiy", "specify", + "speciman", "specimen", + "specrtal", "spectral", + "speicals", "specials", + "spellign", "spelling", + "spendour", "splendour", + "sphereos", "spheres", + "spilnter", "splinter", + "spiltter", "splitter", + "spindrel", "spindle", + "spirites", "spirits", + "spiritis", "spirits", + "spiritus", "spirits", + "spirtied", "spirited", + "spleling", "spelling", + "splitner", "splinter", + "spoilerd", "spoiled", + "spoliers", "spoilers", + "sponsord", "sponsored", + "sporanos", "sopranos", + "spotifiy", "spotify", + "spotifty", "spotify", + "sppeches", "speeches", + "sprayade", "sprayed", + "spreaded", "spread", + "springst", "sprints", + "sprinkel", "sprinkle", + "sprintas", "sprints", + "spritual", "spiritual", + "sproutes", "sprouts", + "spwaning", "spawning", + "sqaudron", "squadron", + "sqaurely", "squarely", + "sqiurtle", "squirtle", + "squardon", "squadron", + "squareds", "squares", + "squarley", "squarely", + "squeakey", "squeaky", + "squeakly", "squeaky", + "squirlte", "squirtle", + "squirrle", "squirrel", + "squirtel", "squirtle", + "squishey", "squishy", + "squishly", "squishy", + "squritle", "squirtle", + "squrriel", "squirrel", + "squrtile", "squirtle", + "sriarcha", "sriracha", + "srriacha", "sriracha", + "sryacuse", "syracuse", + "staduims", "stadiums", + "staidums", "stadiums", + "staklers", "stalkers", + "stalekrs", "stalkers", + "stalkear", "stalker", + "staminia", "stamina", + "stampade", "stamped", + "stampeed", "stamped", + "stancels", "stances", + "stancers", "stances", + "standars", "standards", + "standbay", "standby", + "standbuy", "standby", + "stangant", "stagnant", + "staright", "straight", + "starined", "strained", + "starlted", "startled", + "startegy", "strategy", + "starteld", "startled", + "startsup", "startups", + "stateman", "statesman", + "staticts", "statist", + "stationd", "stationed", + "stationy", "stationary", + "statiskt", "statist", + "statistc", "statistic", + "statment", "statement", + "stattues", "statutes", + "statuets", "statutes", + "statuser", "stature", + "staurday", "saturday", + "steadliy", "steadily", + "stealhty", "stealthy", + "steathly", "stealthy", + "stelathy", "stealthy", + "sterilze", "sterile", + "steriods", "steroids", + "stichted", "stitched", + "sticthed", "stitched", + "sticthes", "stitches", + "stimulai", "stimuli", + "stimulas", "stimulants", + "stimulat", "stimulants", + "stimulli", "stimuli", + "stingent", "stringent", + "stirkers", "strikers", + "stlakers", "stalkers", + "stomache", "stomach", + "stormade", "stormed", + "stormend", "stormed", + "stradegy", "strategy", + "stragety", "strategy", + "straignt", "straighten", + "straigth", "straight", + "straings", "strains", + "strangel", "strangle", + "stranget", "strangest", + "stratgey", "strategy", + "stratled", "startled", + "streames", "streams", + "streamos", "streams", + "streamus", "streams", + "streamys", "streams", + "stregnth", "strength", + "stremear", "streamer", + "strenght", "strength", + "strengts", "strengths", + "strenous", "strenuous", + "strentgh", "strength", + "stretchs", "stretches", + "striaght", "straight", + "striclty", "strictly", + "striekrs", "strikers", + "strikely", "strikingly", + "stringet", "stringent", + "stubbron", "stubborn", + "stubmled", "stumbled", + "stucture", "structure", + "studioes", "studios", + "stuipder", "stupider", + "stumbeld", "stumbled", + "stupdily", "stupidly", + "stupidiy", "stupidity", + "stylisch", "stylish", + "styrofom", "styrofoam", + "suasages", "sausages", + "subltety", "subtlety", + "submarie", "submarines", + "subruban", "suburban", + "subscrie", "subscriber", + "subsidie", "subsidized", + "subsidiy", "subsidy", + "substace", "substance", + "substans", "substances", + "substite", "substitute", + "subtelty", "subtlety", + "subtetly", "subtlety", + "subtilte", "subtitle", + "subtitel", "subtitle", + "subtitls", "subtitles", + "subtltey", "subtlety", + "succeded", "succeeded", + "succedes", "succeeds", + "succeeed", "succeed", + "succesed", "succeeds", + "successs", "successes", + "succsess", "success", + "suceeded", "succeeded", + "sucesful", "successful", + "sucesion", "succession", + "sucesses", "successes", + "sucessor", "successor", + "sucessot", "successor", + "sucidial", "suicidal", + "suddnely", "suddenly", + "sufficit", "sufficient", + "suggesst", "suggests", + "suggeste", "suggestive", + "summenor", "summoner", + "summones", "summoners", + "sunfiber", "sunfire", + "sunscren", "sunscreen", + "superham", "superhuman", + "superheo", "superhero", + "superios", "superiors", + "supirsed", "suprised", + "suposing", "supposing", + "supporre", "supporters", + "suppoted", "supported", + "suprised", "surprised", + "suprized", "surprised", + "suprsied", "suprised", + "supsects", "suspects", + "supsense", "suspense", + "surbuban", "suburban", + "surounds", "surrounds", + "surpases", "surpass", + "surpress", "suppress", + "surprize", "surprise", + "surrouns", "surrounds", + "surveill", "surveil", + "surveyer", "surveyor", + "surviver", "survivor", + "suspened", "suspend", + "suspenso", "suspension", + "swaering", "swearing", + "swansoon", "swanson", + "swasitka", "swastika", + "swaskita", "swastika", + "swatiska", "swastika", + "swatsika", "swastika", + "swedisch", "swedish", + "swiftley", "swiftly", + "swithced", "switched", + "swithces", "switches", + "swtiched", "switched", + "swtiches", "switches", + "syarcuse", "syracuse", + "sydnrome", "syndrome", + "sylablle", "syllable", + "syllabel", "syllable", + "symapthy", "sympathy", + "symboles", "symbols", + "symhpony", "symphony", + "symmerty", "symmetry", + "symmtery", "symmetry", + "symoblic", "symbolic", + "symphaty", "sympathy", + "symptoom", "symptom", + "symtpoms", "symptoms", + "synomyns", "synonyms", + "synonmys", "synonyms", + "synonomy", "synonym", + "synoynms", "synonyms", + "synphony", "symphony", + "synposis", "synopsis", + "sypmathy", "sympathy", + "sypmtoms", "symptoms", + "sypnosis", "synopsis", + "syraucse", "syracuse", + "syrcause", "syracuse", + "syringae", "syringe", + "syringue", "syringe", + "sysamdin", "sysadmin", + "sysdamin", "sysadmin", + "tacticas", "tactics", + "tacticts", "tactics", + "tacticus", "tactics", + "tagliate", "tailgate", + "tahnkyou", "thankyou", + "tailsman", "talisman", + "taiwanee", "taiwanese", + "taligate", "tailgate", + "taliored", "tailored", + "tallents", "tallest", + "talsiman", "talisman", + "tanturms", "tantrums", + "tapitude", "aptitude", + "tasliman", "talisman", + "tattooes", "tattoos", + "tattooos", "tattoos", + "taxanomy", "taxonomy", + "teamfigt", "teamfight", + "teamspek", "teamspeak", + "teancity", "tenacity", + "teapsoon", "teaspoon", + "techniqe", "technique", + "teenages", "teenagers", + "telegrah", "telegraph", + "telphony", "telephony", + "tempalrs", "templars", + "tempalte", "template", + "templats", "templates", + "templeos", "temples", + "templers", "temples", + "temporay", "temporary", + "temprary", "temporary", + "tenacles", "tentacles", + "tenactiy", "tenacity", + "tencaity", "tenacity", + "tendancy", "tendency", + "tendence", "tendencies", + "tentacel", "tentacle", + "tentacls", "tentacles", + "tentalce", "tentacle", + "tequilia", "tequila", + "terriory", "territory", + "territoy", "territory", + "terroist", "terrorist", + "tesitcle", "testicle", + "testicel", "testicle", + "testifiy", "testify", + "teusdays", "tuesdays", + "texutres", "textures", + "thaliand", "thailand", + "theather", "theater", + "theathre", "theater", + "theature", "theater", + "theisitc", "theistic", + "themslef", "themself", + "theorits", "theorist", + "theraphy", "therapy", + "thereian", "therein", + "theroies", "theories", + "theroist", "theorist", + "thesitic", "theistic", + "thialand", "thailand", + "thiestic", "theistic", + "thikning", "thinking", + "thirites", "thirties", + "thirstay", "thirsty", + "thnakyou", "thankyou", + "thoeries", "theories", + "thoerist", "theorist", + "thomspon", "thompson", + "thopmson", "thompson", + "thougths", "thoughts", + "thourogh", "thorough", + "threates", "threatens", + "threefor", "therefor", + "thriteen", "thirteen", + "thrities", "thirties", + "throaths", "throats", + "throners", "thrones", + "throough", "thorough", + "throught", "thought", + "thrusday", "thursday", + "thumbnal", "thumbnails", + "thurdsay", "thursday", + "thursdsy", "thursdays", + "tightare", "tighter", + "timestap", "timestamp", + "tirangle", "triangle", + "tirbunal", "tribunal", + "titainum", "titanium", + "titanuim", "titanium", + "tocuhpad", "touchpad", + "togehter", "together", + "togheter", "together", + "toiletts", "toilets", + "tolerabe", "tolerable", + "tommorow", "tomorrow", + "tonguers", "tongues", + "toriodal", "toroidal", + "toritlla", "tortilla", + "tornadoe", "tornado", + "torotise", "tortoise", + "torpedeo", "torpedo", + "torphies", "trophies", + "tortiose", "tortoise", + "toruisty", "touristy", + "toruneys", "tourneys", + "touchapd", "touchpad", + "tounreys", "tourneys", + "tourisim", "tourism", + "touritsy", "touristy", + "tournyes", "tourneys", + "toursits", "tourists", + "toursity", "touristy", + "toxiticy", "toxicity", + "trabajao", "trabajo", + "trabajdo", "trabajo", + "trackres", "trackers", + "trageted", "targeted", + "traingle", "triangle", + "traitour", "traitor", + "trakcers", "trackers", + "traliers", "trailers", + "tranform", "transform", + "transeat", "translates", + "transfom", "transform", + "transfos", "transforms", + "transiet", "transient", + "transito", "transition", + "transpot", "transport", + "trasnfer", "transfer", + "tratiors", "traitors", + "traveles", "travels", + "traveres", "traverse", + "treasurs", "treasures", + "treatmet", "treatments", + "treatsie", "treaties", + "treausre", "treasure", + "tredning", "trending", + "tremelos", "tremolos", + "tresuary", "treasury", + "trialers", "trailers", + "trianers", "trainers", + "triangel", "triangle", + "triangls", "triangles", + "trianing", "training", + "trianlge", "triangle", + "triators", "traitors", + "tribuanl", "tribunal", + "trickyer", "trickery", + "triggern", "triggering", + "trilogoy", "trilogy", + "trinagle", "triangle", + "trinekts", "trinkets", + "tringale", "triangle", + "trinitiy", "trinity", + "triology", "trilogy", + "triumpth", "triumph", + "trohpies", "trophies", + "trollade", "trolled", + "tropcial", "tropical", + "trotilla", "tortilla", + "trpoical", "tropical", + "trubinal", "tribunal", + "trubines", "turbines", + "tsunamai", "tsunami", + "tuesdsay", "tuesdays", + "tunnells", "tunnels", + "turkisch", "turkish", + "turntabe", "turntable", + "turretts", "turrets", + "tusedays", "tuesdays", + "tutorual", "tutorial", + "twilgiht", "twilight", + "tylenool", "tylenol", + "typicaly", "typically", + "tyranies", "tyrannies", + "tyrannia", "tyrannical", + "ublisher", "publisher", + "udnercut", "undercut", + "udnerdog", "underdog", + "ugpraded", "upgraded", + "ugprades", "upgrades", + "ukrainie", "ukraine", + "ukrainin", "ukrainian", + "ukranian", "ukrainian", + "ulitmate", "ultimate", + "ultamite", "ultimate", + "ultiamte", "ultimate", + "ultimely", "ultimately", + "ultrason", "ultrasound", + "umberlla", "umbrella", + "unabnned", "unbanned", + "unbanend", "unbanned", + "uncanney", "uncanny", + "uncannny", "uncanny", + "underbog", "undergo", + "underglo", "undergo", + "undersog", "undergo", + "undertoe", "undertones", + "underwar", "underwater", + "unfailry", "unfairly", + "unfarily", "unfairly", + "ungodley", "ungodly", + "unhapppy", "unhappy", + "unhealty", "unhealthy", + "unicrons", "unicorns", + "unifroms", "uniforms", + "uniquley", "uniquely", + "univeral", "universal", + "unlikley", "unlikely", + "unlockes", "unlocks", + "unluckly", "unlucky", + "unpoened", "unopened", + "unqiuely", "uniquely", + "unrakned", "unranked", + "unrnaked", "unranked", + "unrpoven", "unproven", + "unsuable", "unusable", + "untraind", "untrained", + "unusualy", "unusually", + "unvierse", "universe", + "unworhty", "unworthy", + "upgarded", "upgraded", + "upgardes", "upgrades", + "uploades", "uploads", + "upstaris", "upstairs", + "upstiars", "upstairs", + "urethrea", "urethra", + "uruguary", "uruguay", + "ususally", "usually", + "utilitiy", "utility", + "utlimate", "ultimate", + "vaccinae", "vaccinated", + "vaccinet", "vaccinated", + "vacinity", "vicinity", + "vaguelly", "vaguely", + "vaiation", "aviation", + "vaieties", "varieties", + "vailidty", "validity", + "vairable", "variable", + "vaklyrie", "valkyrie", + "valenica", "valencia", + "valentie", "valentines", + "valentis", "valentines", + "validade", "validated", + "valkirye", "valkyrie", + "valkiyre", "valkyrie", + "valkriye", "valkyrie", + "valkryie", "valkyrie", + "valkyire", "valkyrie", + "valnecia", "valencia", + "valubale", "valuable", + "valykrie", "valkyrie", + "vamipres", "vampires", + "vampiers", "vampires", + "vampries", "vampires", + "vangurad", "vanguard", + "vanillia", "vanilla", + "vanillla", "vanilla", + "vanugard", "vanguard", + "varaible", "variable", + "varaints", "variants", + "variabel", "variable", + "varibale", "variable", + "varities", "varieties", + "vassales", "vassals", + "vassalls", "vassals", + "vassalos", "vassals", + "vaticaan", "vatican", + "vaticina", "vatican", + "vaulable", "valuable", + "vaylkrie", "valkyrie", + "vechiles", "vehicles", + "vectores", "vectors", + "vegansim", "veganism", + "vegtable", "vegetable", + "vehciles", "vehicles", + "vehicels", "vehicles", + "vehicule", "vehicle", + "veichles", "vehicles", + "venelope", "envelope", + "venemous", "venomous", + "vengance", "vengeance", + "vengence", "vengeance", + "verablly", "verbally", + "verbaitm", "verbatim", + "verisons", "versions", + "versatel", "versatile", + "vertabim", "verbatim", + "vertigro", "vertigo", + "vesseles", "vessels", + "vessells", "vessels", + "viabiliy", "viability", + "viatmins", "vitamins", + "vibratie", "vibrate", + "vibratin", "vibration", + "vicintiy", "vicinity", + "vicseral", "visceral", + "victimas", "victims", + "victimes", "victims", + "victorin", "victorian", + "victoris", "victories", + "vieweres", "viewers", + "viewpoit", "viewpoints", + "vigilane", "vigilante", + "vigliant", "vigilant", + "vikingos", "vikings", + "viligant", "vigilant", + "villegas", "villages", + "vindicte", "vindictive", + "vinicity", "vicinity", + "violatin", "violation", + "violenty", "violently", + "violetas", "violates", + "virament", "vraiment", + "virbator", "vibrator", + "virginas", "virgins", + "virgines", "virgins", + "virgings", "virgins", + "virginis", "virgins", + "virginus", "virgins", + "virtualy", "virtually", + "virtuels", "virtues", + "virtuose", "virtues", + "viscreal", "visceral", + "visercal", "visceral", + "visibily", "visibility", + "visibley", "visibly", + "visiblly", "visibly", + "vitailty", "vitality", + "vitimans", "vitamins", + "vitmains", "vitamins", + "vitories", "victories", + "voicemal", "voicemail", + "voilates", "violates", + "volatily", "volatility", + "volcando", "volcano", + "volcanoe", "volcano", + "volcaron", "volcano", + "vriament", "vraiment", + "wahtever", "whatever", + "wallpapr", "wallpapers", + "warantee", "warranty", + "warcarft", "warcraft", + "warrante", "warranties", + "warriros", "warriors", + "watchemn", "watchmen", + "watchign", "watching", + "wathcing", "watching", + "wathcmen", "watchmen", + "wathever", "whatever", + "watkings", "watkins", + "wealthly", "wealthy", + "webistes", "websites", + "websties", "websites", + "wednesdy", "wednesdays", + "weigthed", "weighted", + "weridest", "weirdest", + "werstler", "wrestler", + "wesbites", "websites", + "westbrok", "westbrook", + "westerse", "westerners", + "wherease", "whereas", + "whipsers", "whispers", + "whislist", "wishlist", + "whisltes", "whistles", + "whisperd", "whispered", + "whistels", "whistles", + "whitsles", "whistles", + "whsipers", "whispers", + "widgetas", "widgets", + "wieghted", "weighted", + "willaims", "williams", + "willfuly", "willfully", + "willimas", "williams", + "windsoar", "windsor", + "wininpeg", "winnipeg", + "winnigns", "winnings", + "winnpieg", "winnipeg", + "wiredest", "weirdest", + "wishlsit", "wishlist", + "wishpers", "whispers", + "withdral", "withdrawal", + "witnesss", "witnesses", + "wonderes", "wonders", + "wonderus", "wonders", + "workfore", "workforce", + "wouldnot", "wouldnt", + "wranlger", "wrangler", + "wreckign", "wrecking", + "wrecthed", "wretched", + "wrekcing", "wrecking", + "wreslter", "wrestler", + "wresters", "wrestlers", + "writting", "writing", + "wrnagler", "wrangler", + "wrteched", "wretched", + "yeilding", "yielding", + "yoesmite", "yosemite", + "yorksher", "yorkshire", + "yorkshie", "yorkshire", + "yosemeti", "yosemite", + "yosimete", "yosemite", + "zealotes", "zealots", + "zealoths", "zealots", + "zealotus", "zealots", + "zealouts", "zealous", + "zepplein", "zeppelin", + "zepplien", "zeppelin", + "zimbabew", "zimbabwe", + "zimbawbe", "zimbabwe", + "zinoists", "zionists", + "zionisim", "zionism", + "zionistm", "zionism", + "zionsits", "zionists", + "zoinists", "zionists", + "abiltiy", "ability", + "abodmen", "abdomen", + "abondon", "abandon", + "aboslve", "absolve", + "abosrbs", "absorbs", + "abriter", "arbiter", + "abrupty", "abruptly", + "absense", "absence", + "absolue", "absolute", + "absovle", "absolve", + "absrobs", "absorbs", + "absuers", "abusers", + "absurdy", "absurdly", + "absymal", "abysmal", + "abymsal", "abysmal", + "acadamy", "academy", + "acadmic", "academic", + "accesss", "access", + "accpets", "accepts", + "accross", "across", + "accuray", "accuracy", + "acheive", "achieve", + "achived", "achieved", + "acident", "accident", + "ackward", "awkward", + "acrlyic", "acrylic", + "actauly", "actualy", + "activit", "activist", + "activly", "actively", + "actualy", "actually", + "actulay", "actualy", + "acuracy", "accuracy", + "acusing", "causing", + "acustom", "accustom", + "acutaly", "actualy", + "acyrlic", "acrylic", + "adaptes", "adapters", + "adatper", "adapter", + "adbomen", "abdomen", + "addcits", "addicts", + "adderss", "address", + "addtion", "addition", + "adequet", "adequate", + "adequit", "adequate", + "adivser", "adviser", + "adivsor", "advisor", + "admited", "admitted", + "admrial", "admiral", + "adpater", "adapter", + "adquire", "acquire", + "adultey", "adultery", + "adverst", "adverts", + "adviced", "advised", + "advocay", "advocacy", + "advsior", "advisor", + "aeriels", "aerials", + "affaris", "affairs", + "affiars", "affairs", + "afircan", "african", + "africas", "africans", + "afwully", "awfully", + "againts", "against", + "agaisnt", "against", + "aganist", "against", + "aggreed", "agreed", + "agianst", "against", + "agreing", "agreeing", + "agruing", "arguing", + "ahtiest", "athiest", + "aicraft", "aircraft", + "ailmony", "alimony", + "airbore", "airborne", + "aircaft", "aircraft", + "airlfow", "airflow", + "airosft", "airsoft", + "airpost", "airports", + "airsfot", "airsoft", + "airzona", "arizona", + "alchmey", "alchemy", + "alchool", "alcohol", + "alcohal", "alcohol", + "aledged", "alleged", + "aledges", "alleges", + "alegbra", "algebra", + "algerba", "algebra", + "alienet", "alienate", + "alledge", "allege", + "allegry", "allergy", + "alltime", "all-time", + "almighy", "almighty", + "alochol", "alcohol", + "alotted", "allotted", + "alowing", "allowing", + "alphabt", "alphabet", + "alreayd", "already", + "alrighy", "alrighty", + "altanta", "atlanta", + "alteast", "atleast", + "altough", "although", + "alusion", "allusion", + "amateus", "amateurs", + "amatuer", "amateur", + "amature", "armature", + "amensia", "amnesia", + "amensty", "amnesty", + "amercia", "america", + "americs", "americas", + "ammount", "amount", + "ammused", "amused", + "amneisa", "amnesia", + "amnsety", "amnesty", + "amognst", "amongst", + "amongts", "amongst", + "amonsgt", "amongst", + "ampilfy", "amplify", + "amrpits", "armpits", + "analoge", "analogue", + "analsyt", "analyst", + "analyes", "analyse", + "analyts", "analyst", + "analzye", "analyze", + "anaylse", "analyse", + "anaylst", "analyst", + "anaylze", "analyze", + "anceint", "ancient", + "andorid", "android", + "andriod", "android", + "androis", "androids", + "angirly", "angrily", + "angluar", "angular", + "angualr", "angular", + "anicent", "ancient", + "anitque", "antique", + "anixety", "anxiety", + "anmesia", "amnesia", + "anmesty", "amnesty", + "annoint", "anoint", + "annualy", "annually", + "annuled", "annulled", + "anohter", "another", + "anomoly", "anomaly", + "answerd", "answered", + "anuglar", "angular", + "anulled", "annulled", + "anwsers", "answers", + "anwyays", "anyways", + "anxeity", "anxiety", + "anyoens", "anyones", + "anyonse", "anyones", + "anywyas", "anyways", + "aparent", "apparent", + "appeard", "appeared", + "appluad", "applaud", + "aproval", "approval", + "apsects", "aspects", + "apshalt", "asphalt", + "apsirin", "aspirin", + "aqcuire", "acquire", + "aquarim", "aquarium", + "aquired", "acquired", + "aranged", "arranged", + "arbitre", "arbiter", + "arcahic", "archaic", + "archiac", "archaic", + "arcylic", "acrylic", + "aresnal", "arsenal", + "aretmis", "artemis", + "argubly", "arguably", + "aribter", "arbiter", + "ariflow", "airflow", + "arisoft", "airsoft", + "aritsts", "artists", + "armchar", "armchair", + "arogant", "arrogant", + "arogent", "arrogant", + "arresst", "arrests", + "arround", "around", + "arsneal", "arsenal", + "artcile", "article", + "artical", "article", + "articel", "article", + "artistc", "artistic", + "artmeis", "artemis", + "artsits", "artists", + "aruging", "arguing", + "aseuxal", "asexual", + "asexaul", "asexual", + "ashpalt", "asphalt", + "asiprin", "aspirin", + "asissts", "assists", + "asnwers", "answers", + "asorbed", "absorbed", + "aspahlt", "asphalt", + "asphlat", "asphalt", + "aspriin", "aspirin", + "assagne", "assange", + "assasin", "assassin", + "assembe", "assemble", + "assemby", "assembly", + "assisst", "assists", + "assnage", "assange", + "asssits", "assists", + "assualt", "assault", + "asterik", "asterisk", + "asutria", "austria", + "atcualy", "actualy", + "atelast", "atleast", + "athesim", "atheism", + "athiesm", "atheism", + "athiest", "atheist", + "athiets", "athiest", + "athlets", "athletes", + "atlantc", "atlantic", + "atleats", "atleast", + "atlesat", "atleast", + "atorney", "attorney", + "atremis", "artemis", + "attemps", "attempts", + "attemts", "attempts", + "attened", "attended", + "attracs", "attracts", + "audbile", "audible", + "audibel", "audible", + "austira", "austria", + "austrai", "austria", + "autistc", "autistic", + "avation", "aviation", + "avtaars", "avatars", + "awakend", "awakened", + "bablyon", "babylon", + "backdor", "backdoor", + "backsta", "backseat", + "baclony", "balcony", + "badnits", "bandits", + "baiscly", "basicly", + "bakcers", "backers", + "balanse", "balances", + "balcked", "blacked", + "banhsee", "banshee", + "bankgok", "bangkok", + "baoynet", "bayonet", + "baptims", "baptism", + "baptsim", "baptism", + "baragin", "bargain", + "bargani", "bargain", + "bargian", "bargain", + "bariner", "brainer", + "barlkey", "barkley", + "barracs", "barracks", + "barrles", "barrels", + "barsita", "barista", + "barvery", "bravery", + "bascily", "basicly", + "basicly", "basically", + "basilcy", "basicly", + "basiton", "bastion", + "basnhee", "banshee", + "bastane", "bastante", + "bastars", "bastards", + "bastino", "bastion", + "bathrom", "bathroom", + "batitsa", "batista", + "batsita", "batista", + "bayblon", "babylon", + "baynoet", "bayonet", + "bayoent", "bayonet", + "bceuase", "becuase", + "beacuse", "because", + "bealtes", "beatles", + "beaslty", "beastly", + "beatels", "beatles", + "beaucop", "beaucoup", + "becamae", "became", + "becames", "becomes", + "becasue", "because", + "becouse", "because", + "becuaes", "becuase", + "becuase", "because", + "becusae", "becuase", + "befried", "befriend", + "beggins", "begins", + "beglian", "belgian", + "beglium", "belgium", + "begnals", "bengals", + "bejiing", "beijing", + "beleifs", "beliefs", + "beleive", "believe", + "belgain", "belgian", + "belguim", "belgium", + "believr", "believer", + "believs", "believes", + "belifes", "beliefs", + "beligan", "belgian", + "beligum", "belgium", + "belived", "believed", + "belives", "believes", + "benagls", "bengals", + "benedit", "benedict", + "benghai", "benghazi", + "benglas", "bengals", + "benifit", "benefit", + "beoynce", "beyonce", + "beraded", "bearded", + "bersekr", "berserk", + "beseige", "besiege", + "betales", "beatles", + "bethesa", "bethesda", + "betrayd", "betrayed", + "beucase", "becuase", + "bewteen", "between", + "bicthes", "bitches", + "bidrman", "birdman", + "biejing", "beijing", + "bifgoot", "bigfoot", + "bigorty", "bigotry", + "bigtoed", "bigoted", + "bigtory", "bigotry", + "biogted", "bigoted", + "biogtry", "bigotry", + "bioplar", "bipolar", + "biploar", "bipolar", + "birdamn", "birdman", + "birdges", "bridges", + "birgade", "brigade", + "bitcion", "bitcoin", + "bithced", "bitched", + "bithces", "bitches", + "bitocin", "bitcoin", + "bizzare", "bizarre", + "blacony", "balcony", + "blaimed", "blamed", + "blankes", "blankets", + "blegian", "belgian", + "blegium", "belgium", + "blizzad", "blizzard", + "blockes", "blockers", + "bloster", "bolster", + "blulets", "bullets", + "bobmers", "bombers", + "bollocs", "bollocks", + "bondary", "boundary", + "bonnano", "bonanno", + "bonsues", "bonuses", + "boraden", "broaden", + "borader", "broader", + "boradly", "broadly", + "bordeom", "boredom", + "boslter", "bolster", + "boudler", "boulder", + "boundry", "boundary", + "bounses", "bonuses", + "boutiqe", "boutique", + "bouyant", "buoyant", + "braevry", "bravery", + "braista", "barista", + "brakley", "barkley", + "branier", "brainer", + "braoden", "broaden", + "braoder", "broader", + "braodly", "broadly", + "brednan", "brendan", + "breifly", "briefly", + "breserk", "berserk", + "brethen", "brethren", + "brewrey", "brewery", + "briagde", "brigade", + "brianer", "brainer", + "bridman", "birdman", + "brielfy", "briefly", + "brigdes", "bridges", + "brightn", "brighten", + "brisben", "brisbane", + "britian", "britain", + "britsol", "bristol", + "briused", "bruised", + "briuser", "bruiser", + "briuses", "bruises", + "brocoli", "broccoli", + "bronocs", "broncos", + "browine", "brownie", + "brownei", "brownie", + "brownis", "brownies", + "bruglar", "burglar", + "brunete", "brunette", + "bruning", "burning", + "brusied", "bruised", + "brusies", "bruises", + "brusses", "brussels", + "brutaly", "brutally", + "btiched", "bitched", + "btiches", "bitches", + "bubbels", "bubbles", + "buddhim", "buddhism", + "buddhit", "buddhist", + "buddist", "buddhist", + "budgest", "budgets", + "bugdets", "budgets", + "buildes", "builders", + "bulgara", "bulgaria", + "bullest", "bullets", + "buoancy", "buoyancy", + "burguny", "burgundy", + "buriser", "bruiser", + "burlgar", "burglar", + "burnign", "burning", + "burried", "buried", + "burrtio", "burrito", + "busines", "business", + "busness", "business", + "butthoe", "butthole", + "buttrey", "buttery", + "cababge", "cabbage", + "cabines", "cabinets", + "cabniet", "cabinet", + "caclium", "calcium", + "cacuses", "caucuses", + "caffeen", "caffeine", + "cahched", "cached", + "cahotic", "chaotic", + "cahsier", "cashier", + "cailbre", "calibre", + "calaber", "caliber", + "calagry", "calgary", + "calback", "callback", + "calbire", "calibre", + "calcuim", "calcium", + "calculs", "calculus", + "calicum", "calcium", + "calrify", "clarify", + "calrity", "clarity", + "caluses", "clauses", + "camboda", "cambodia", + "campain", "campaign", + "campuss", "campuses", + "cancles", "cancels", + "cancres", "cancers", + "cancuks", "canucks", + "canides", "candies", + "cannnot", "cannot", + "canrage", "carnage", + "capible", "capable", + "capitas", "capitals", + "capsuls", "capsules", + "captais", "captains", + "captial", "capital", + "captiol", "capitol", + "captued", "captured", + "capturd", "captured", + "capusle", "capsule", + "carange", "carnage", + "carbien", "carbine", + "cardaic", "cardiac", + "cardina", "cardigan", + "careing", "caring", + "caridac", "cardiac", + "carmtan", "cartman", + "carnege", "carnage", + "carnige", "carnage", + "carolan", "carolina", + "carreer", "career", + "carrers", "careers", + "cartles", "cartels", + "caryons", "crayons", + "casette", "cassette", + "casheir", "cashier", + "cashies", "cashiers", + "cashire", "cashier", + "casltes", "castles", + "caspule", "capsule", + "cassete", "cassette", + "castels", "castles", + "casuing", "causing", + "cathlic", "catholic", + "cauncks", "canucks", + "cavarly", "cavalry", + "cavlary", "cavalry", + "celcius", "celsius", + "celisus", "celsius", + "celitcs", "celtics", + "celsuis", "celsius", + "centruy", "century", + "centuty", "century", + "ceratin", "certain", + "cermaic", "ceramic", + "certian", "certain", + "cervial", "cervical", + "cesspol", "cesspool", + "cetlics", "celtics", + "chambre", "chamber", + "charcol", "charcoal", + "charisa", "charisma", + "chasiss", "chassis", + "chatoic", "chaotic", + "cheeots", "cheetos", + "cheesse", "cheeses", + "chekcer", "checker", + "chelsae", "chelsea", + "cheslea", "chelsea", + "chiense", "chinese", + "childen", "children", + "chimeny", "chimney", + "chinees", "chinese", + "chinmey", "chimney", + "chipest", "chipset", + "chispet", "chipset", + "chivaly", "chivalry", + "chlesea", "chelsea", + "chnages", "changes", + "choatic", "chaotic", + "chocies", "choices", + "choosen", "chosen", + "chtulhu", "cthulhu", + "churchs", "churches", + "cilanto", "cilantro", + "cilents", "clients", + "circels", "circles", + "circuis", "circuits", + "cirlces", "circles", + "clacium", "calcium", + "claerer", "clearer", + "claerly", "clearly", + "clagary", "calgary", + "claibre", "calibre", + "claimes", "claims", + "clairfy", "clarify", + "clairty", "clarity", + "clanand", "clannad", + "clarfiy", "clarify", + "classis", "classics", + "clasues", "clauses", + "claymer", "claymore", + "claymoe", "claymore", + "cleanes", "cleanse", + "cleasne", "cleanse", + "cleints", "clients", + "clenase", "cleanse", + "clesius", "celsius", + "cletics", "celtics", + "clevery", "cleverly", + "climats", "climates", + "climbes", "climbers", + "clincis", "clinics", + "clitors", "clitoris", + "cloesly", "closely", + "closley", "closely", + "cluases", "clauses", + "cluprit", "culprit", + "coalese", "coalesce", + "coctail", "cocktail", + "cohesie", "cohesive", + "colgone", "cologne", + "collape", "collapse", + "collest", "collects", + "collony", "colony", + "collumn", "column", + "cologen", "cologne", + "colomba", "colombia", + "colonge", "cologne", + "colorao", "colorado", + "colourd", "coloured", + "columsn", "columns", + "comando", "commando", + "comapny", "company", + "comapre", "compare", + "comarde", "comrade", + "comback", "comeback", + "combins", "combines", + "comdeic", "comedic", + "comited", "committed", + "commano", "commando", + "commans", "commands", + "commere", "commerce", + "comming", "coming", + "commitd", "commited", + "compase", "compares", + "compede", "competed", + "compilr", "compiler", + "compnay", "company", + "compots", "compost", + "comrads", "comrades", + "comtpon", "compton", + "conceed", "concede", + "conceps", "concepts", + "conclue", "conclude", + "concret", "concert", + "condenm", "condemn", + "condiut", "conduit", + "condmen", "condemn", + "confids", "confides", + "confins", "confines", + "confise", "confines", + "conflit", "conflict", + "conived", "connived", + "connecs", "connects", + "conqeur", "conquer", + "conqure", "conquer", + "consept", "concept", + "consern", "concern", + "consums", "consumes", + "contacs", "contacts", + "contais", "contains", + "contast", "contacts", + "contemt", "contempt", + "contens", "contents", + "contess", "contests", + "contian", "contain", + "contine", "continue", + "convers", "converts", + "conveyd", "conveyed", + "convine", "convince", + "coprses", "corpses", + "coputer", "computer", + "corasir", "corsair", + "coratia", "croatia", + "coridal", "cordial", + "corsari", "corsair", + "corsiar", "corsair", + "corspes", "corpses", + "corwbar", "crowbar", + "costums", "costumes", + "coudlnt", "couldnt", + "coulmns", "columns", + "coulndt", "couldnt", + "counsle", "counsel", + "countes", "counters", + "courtey", "courtesy", + "covenat", "covenant", + "coytoes", "coyotes", + "crabine", "carbine", + "cralwed", "crawled", + "craotia", "croatia", + "craweld", "crawled", + "creamic", "ceramic", + "createn", "creatine", + "creater", "creature", + "creatie", "creatine", + "creatue", "creature", + "creepes", "creepers", + "creepig", "creeping", + "creulty", "cruelty", + "cricles", "circles", + "critera", "criteria", + "cropses", "corpses", + "crosair", "corsair", + "crpytic", "cryptic", + "crsytal", "crystal", + "crtical", "critical", + "crucibe", "crucible", + "cruetly", "cruelty", + "cruical", "crucial", + "crulety", "cruelty", + "crusdae", "crusade", + "crusier", "cruiser", + "crusies", "cruises", + "crusive", "cursive", + "crutchs", "crutches", + "crypitc", "cryptic", + "crystas", "crystals", + "crystsl", "crystals", + "crytpic", "cryptic", + "crytsal", "crystal", + "cthluhu", "cthulhu", + "cthuhlu", "cthulhu", + "cthuluh", "cthulhu", + "ctuhlhu", "cthulhu", + "cuasing", "causing", + "cubcile", "cubicle", + "cubilce", "cubicle", + "cuddels", "cuddles", + "culrpit", "culprit", + "culturs", "cultures", + "cupboad", "cupboard", + "cuplrit", "culprit", + "curatin", "curtain", + "curcial", "crucial", + "curcuit", "circuit", + "curelty", "cruelty", + "curiser", "cruiser", + "curisve", "cursive", + "currate", "curate", + "currens", "currents", + "curreny", "currency", + "currest", "currents", + "cursade", "crusade", + "curtian", "curtain", + "cyandie", "cyanide", + "cyclits", "cyclist", + "cycloen", "cyclone", + "cycolps", "cyclops", + "cylcist", "cyclist", + "cylcone", "cyclone", + "cylcops", "cyclops", + "cynaide", "cyanide", + "cyrptic", "cryptic", + "cyrstal", "crystal", + "dagners", "dangers", + "daimond", "diamond", + "damenor", "demeanor", + "dammage", "damage", + "darcula", "dracula", + "dargons", "dragons", + "darkets", "darkest", + "datbase", "database", + "daulity", "duality", + "dawrves", "dwarves", + "ddogers", "dodgers", + "ddoging", "dodging", + "deadlit", "deadlift", + "deadpol", "deadpool", + "deafult", "default", + "deahtly", "deathly", + "deatils", "details", + "deatlhy", "deathly", + "decalre", "declare", + "decison", "decision", + "declars", "declares", + "declase", "declares", + "decress", "decrees", + "decribe", "describe", + "decsend", "descend", + "dectect", "detect", + "defaint", "defiant", + "defauls", "defaults", + "defelct", "deflect", + "defensd", "defends", + "deffine", "define", + "definat", "defiant", + "definet", "definite", + "definie", "definite", + "definig", "defining", + "definit", "definite", + "defualt", "default", + "degarde", "degrade", + "degrase", "degrasse", + "degrate", "degrade", + "deiners", "deniers", + "deisgns", "designs", + "deivant", "deviant", + "dekstop", "desktop", + "delcare", "declare", + "delfect", "deflect", + "demenor", "demeanor", + "dementa", "dementia", + "demsond", "desmond", + "deneirs", "deniers", + "denisty", "density", + "densley", "densely", + "depcits", "depicts", + "dependd", "depended", + "depitcs", "depicts", + "deployd", "deployed", + "depsise", "despise", + "descrie", "describe", + "descuss", "discuss", + "desgins", "designs", + "desings", "designs", + "desitny", "destiny", + "desnely", "densely", + "desnity", "density", + "desomnd", "desmond", + "despict", "depict", + "despide", "despised", + "despies", "despise", + "destkop", "desktop", + "destory", "destroy", + "destros", "destroys", + "detaild", "detailed", + "detials", "details", + "detorit", "detroit", + "detriot", "detroit", + "deuling", "dueling", + "devaint", "deviant", + "devaite", "deviate", + "devided", "divided", + "devlove", "devolve", + "devotin", "devotion", + "devovle", "devolve", + "diabets", "diabetes", + "dialecs", "dialects", + "dialoge", "dialogue", + "diamons", "diamonds", + "diasble", "disable", + "dicksih", "dickish", + "dicover", "discover", + "dictats", "dictates", + "dieties", "deities", + "dilpoma", "diploma", + "dimaond", "diamond", + "dingity", "dignity", + "dinosar", "dinosaur", + "diosese", "diocese", + "dipolma", "diploma", + "dirbble", "dribble", + "directy", "directly", + "diretcx", "directx", + "dirived", "derived", + "dirvers", "drivers", + "disbale", "disable", + "disguss", "disgusts", + "disliks", "dislikes", + "disover", "discover", + "dispair", "despair", + "dispath", "dispatch", + "dispite", "despite", + "dispuse", "disputes", + "disputs", "disputes", + "dissole", "dissolve", + "distase", "distaste", + "distint", "distinct", + "divison", "division", + "docuhes", "douches", + "docuhey", "douchey", + "dogders", "dodgers", + "dogding", "dodging", + "dolhpin", "dolphin", + "dolphis", "dolphins", + "dominae", "dominate", + "dominno", "dominion", + "doplhin", "dolphin", + "dortmud", "dortmund", + "draclua", "dracula", + "dracual", "dracula", + "drakest", "darkest", + "dramtic", "dramatic", + "dribbel", "dribble", + "driectx", "directx", + "driftig", "drifting", + "drinkes", "drinkers", + "druming", "drumming", + "duailty", "duality", + "dualtiy", "duality", + "dubsetp", "dubstep", + "dulaity", "duality", + "duleing", "dueling", + "dunegon", "dungeon", + "dungeos", "dungeons", + "dungoen", "dungeon", + "durring", "during", + "dusbtep", "dubstep", + "dyansty", "dynasty", + "dynamis", "dynamics", + "dynsaty", "dynasty", + "earlies", "earliest", + "earliet", "earliest", + "earplus", "earplugs", + "eastwod", "eastwood", + "ebcuase", "becuase", + "ecilpse", "eclipse", + "eclipes", "eclipse", + "eclispe", "eclipse", + "eclpise", "eclipse", + "ectsasy", "ecstasy", + "edbiles", "edibles", + "edibels", "edibles", + "effords", "efforts", + "ehtanol", "ethanol", + "eifnach", "einfach", + "eighten", "eighteen", + "einfahc", "einfach", + "elasped", "elapsed", + "elcipse", "eclipse", + "elction", "election", + "elecrto", "electro", + "electic", "electric", + "electon", "election", + "ellitot", "elliott", + "elloitt", "elliott", + "elphant", "elephant", + "emabrgo", "embargo", + "emabssy", "embassy", + "emapthy", "empathy", + "embeded", "embedded", + "embrago", "embargo", + "eminate", "emanate", + "emipres", "empires", + "emision", "emission", + "emiting", "emitting", + "emition", "emission", + "emmited", "emitted", + "empahty", "empathy", + "emphsis", "emphasis", + "empiers", "empires", + "empited", "emptied", + "emplore", "employer", + "emporer", "emperor", + "empries", "empires", + "emtpied", "emptied", + "enameld", "enameled", + "encahnt", "enchant", + "encalve", "enclave", + "encrpyt", "encrypt", + "encyrpt", "encrypt", + "endores", "endorse", + "endrose", "endorse", + "energis", "energies", + "enforse", "enforces", + "enginer", "engineer", + "englsih", "english", + "enhanse", "enhances", + "enlcave", "enclave", + "enlgish", "english", + "enlsave", "enslave", + "ensalve", "enslave", + "entbook", "netbook", + "entirey", "entirety", + "entorpy", "entropy", + "epiloge", "epilogue", + "episdoe", "episode", + "epsiode", "episode", + "epsorts", "esports", + "eptiome", "epitome", + "equiped", "equipped", + "erested", "arrested", + "escapse", "escapes", + "escpaes", "escapes", + "esctasy", "ecstasy", + "esporst", "esports", + "espreso", "espresso", + "esprots", "esports", + "essense", "essence", + "etherel", "ethereal", + "ethnaol", "ethanol", + "euphora", "euphoria", + "europen", "european", + "eurpean", "european", + "everets", "everest", + "everset", "everest", + "evloved", "evolved", + "evloves", "evolves", + "evovled", "evolved", + "evovles", "evolves", + "exaclty", "exactly", + "exahust", "exhaust", + "examind", "examined", + "exapnds", "expands", + "exatled", "exalted", + "excange", "exchange", + "excatly", "exactly", + "excells", "excels", + "exceprt", "excerpt", + "excluse", "excludes", + "excrept", "excerpt", + "exculde", "exclude", + "exelent", "excellent", + "exemple", "example", + "exerpts", "excerpts", + "exhasut", "exhaust", + "exhuast", "exhaust", + "exising", "existing", + "existet", "existent", + "exlated", "exalted", + "exlcude", "exclude", + "exliled", "exiled", + "exludes", "excludes", + "exmaple", "example", + "exoitcs", "exotics", + "expalin", "explain", + "expeced", "expected", + "expells", "expels", + "expiers", "expires", + "explict", "explicit", + "expliot", "exploit", + "explods", "explodes", + "explose", "explodes", + "expolde", "explode", + "expolit", "exploit", + "exposse", "exposes", + "expries", "expires", + "exracts", "extracts", + "exsited", "existed", + "extered", "exerted", + "exterme", "extreme", + "extoics", "exotics", + "extreem", "extreme", + "extrems", "extremes", + "eyebals", "eyeballs", + "eyebros", "eyebrows", + "fabulos", "fabulous", + "facebok", "facebook", + "facepam", "facepalm", + "faclons", "falcons", + "facsism", "fascism", + "facsist", "fascist", + "failurs", "failures", + "faincee", "fiancee", + "falesly", "falsely", + "falired", "flaired", + "falshed", "flashed", + "falshes", "flashes", + "falsley", "falsely", + "falvors", "flavors", + "familes", "families", + "famoust", "famous", + "famousy", "famously", + "fanatsy", "fantasy", + "fantaic", "fanatic", + "faoming", "foaming", + "fascits", "fascist", + "fasicsm", "fascism", + "fasicst", "fascist", + "faslely", "falsely", + "fatiuge", "fatigue", + "febuary", "february", + "fecthed", "fetched", + "fecthes", "fetches", + "feminen", "feminine", + "feminie", "feminine", + "feminim", "feminism", + "feodras", "fedoras", + "fertily", "fertility", + "fesitve", "festive", + "fethced", "fetched", + "fethces", "fetches", + "fetishs", "fetishes", + "fianite", "finite", + "fianlly", "finally", + "fiercly", "fiercely", + "filcker", "flicker", + "filpped", "flipped", + "filterd", "filtered", + "finacee", "fiancee", + "fineses", "finesse", + "fininsh", "finnish", + "finishs", "finishes", + "finisse", "finishes", + "finnsih", "finnish", + "firends", "friends", + "firggin", "friggin", + "firsbee", "frisbee", + "firslty", "firstly", + "firtsly", "firstly", + "fitlers", "filters", + "flacons", "falcons", + "flahsed", "flashed", + "flahses", "flashes", + "flaried", "flaired", + "flasely", "falsely", + "flashig", "flashing", + "flavord", "flavored", + "flavous", "flavours", + "flawess", "flawless", + "flciker", "flicker", + "fliters", "filters", + "flordia", "florida", + "florene", "florence", + "fnaatic", "fanatic", + "fomaing", "foaming", + "fonetic", "phonetic", + "forefit", "forfeit", + "foregin", "foreign", + "foreing", "foreign", + "forfiet", "forfeit", + "forhead", "forehead", + "foriegn", "foreign", + "formaly", "formally", + "formery", "formerly", + "formost", "foremost", + "formual", "formula", + "formuls", "formulas", + "forrset", "forrest", + "forsakn", "forsaken", + "forsane", "forsaken", + "forumla", "formula", + "fountan", "fountain", + "fourten", "fourteen", + "fracter", "fracture", + "fragmet", "fragment", + "freedos", "freedoms", + "freinds", "friends", + "frigign", "friggin", + "fristly", "firstly", + "frostig", "frosting", + "frsibee", "frisbee", + "fruitin", "fruition", + "fullets", "fullest", + "fullset", "fullest", + "funides", "fundies", + "funtion", "function", + "furance", "furnace", + "furncae", "furnace", + "futhroc", "futhark", + "gadgest", "gadgets", + "gagdets", "gadgets", + "galatic", "galactic", + "galcier", "glacier", + "galsgow", "glasgow", + "gameply", "gameplay", + "gamerga", "gamertag", + "gankign", "ganking", + "ganster", "gangster", + "garabge", "garbage", + "garfied", "garfield", + "garnola", "granola", + "generas", "generals", + "genersl", "generals", + "geniuss", "geniuses", + "geogria", "georgia", + "geomety", "geometry", + "georiga", "georgia", + "gernade", "grenade", + "gerogia", "georgia", + "gigabye", "gigabyte", + "giltchy", "glitchy", + "gimmics", "gimmicks", + "gimmicy", "gimmicky", + "girzzly", "grizzly", + "glagsow", "glasgow", + "glaicer", "glacier", + "glicthy", "glitchy", + "glimpes", "glimpse", + "glimspe", "glimpse", + "glipmse", "glimpse", + "glitchd", "glitched", + "glitchs", "glitches", + "glithcy", "glitchy", + "globaly", "globally", + "gloiath", "goliath", + "glorios", "glorious", + "gltichy", "glitchy", + "gnaking", "ganking", + "gnawwed", "gnawed", + "goddanm", "goddamn", + "goddman", "goddamn", + "godliek", "godlike", + "godlman", "goldman", + "godsped", "godspeed", + "goergia", "georgia", + "goilath", "goliath", + "golaith", "goliath", + "golbins", "goblins", + "goldamn", "goldman", + "goldbeg", "goldberg", + "goldike", "godlike", + "golitah", "goliath", + "goodluk", "goodluck", + "gorumet", "gourmet", + "gosepls", "gospels", + "gosples", "gospels", + "gpysies", "gypsies", + "grabage", "garbage", + "grahpic", "graphic", + "grainte", "granite", + "grammer", "grammar", + "graniet", "granite", + "grantie", "granite", + "graphie", "graphite", + "graphis", "graphics", + "grappel", "grapple", + "greande", "grenade", + "grenads", "grenades", + "greneer", "greener", + "griaffe", "giraffe", + "gridles", "griddles", + "grillig", "grilling", + "grpahic", "graphic", + "guardin", "guardian", + "guiness", "guinness", + "gullibe", "gullible", + "gutiars", "guitars", + "gypises", "gypsies", + "gyspies", "gypsies", + "habaeus", "habeas", + "haethen", "heathen", + "hailfax", "halifax", + "halfiax", "halifax", + "handbok", "handbook", + "handedy", "handedly", + "handeld", "handled", + "hanlder", "handler", + "hannibl", "hannibal", + "hanuted", "haunted", + "haorder", "hoarder", + "hapened", "happened", + "happend", "happened", + "happliy", "happily", + "harased", "harassed", + "harases", "harasses", + "hardend", "hardened", + "hardwod", "hardwood", + "haricut", "haircut", + "hatchig", "hatching", + "hauntig", "haunting", + "haviest", "heaviest", + "headest", "headset", + "headses", "headsets", + "heaveny", "heavenly", + "heigher", "higher", + "heigths", "heights", + "helemts", "helmets", + "hellfie", "hellfire", + "hellvua", "helluva", + "helment", "helmet", + "helpped", "helped", + "hemlets", "helmets", + "henious", "heinous", + "heorics", "heroics", + "heorine", "heroine", + "heriocs", "heroics", + "herione", "heroine", + "herocis", "heroics", + "heronie", "heroine", + "hesiman", "heisman", + "hieghts", "heights", + "hienous", "heinous", + "hiesman", "heisman", + "himselv", "himself", + "hiptser", "hipster", + "hismelf", "himself", + "hispter", "hipster", + "hitboxs", "hitboxes", + "hoilday", "holiday", + "hokpins", "hopkins", + "holdiay", "holiday", + "holdins", "holdings", + "homniem", "hominem", + "horader", "hoarder", + "hosited", "hoisted", + "hosthot", "hotshot", + "hostles", "hostels", + "hostpot", "hotspot", + "hothsot", "hotshot", + "hotpsot", "hotspot", + "hotsopt", "hotspot", + "hounour", "honour", + "hseldon", "sheldon", + "huanted", "haunted", + "humanit", "humanist", + "humants", "humanist", + "humidiy", "humidity", + "humoros", "humorous", + "hunagry", "hungary", + "hunderd", "hundred", + "hundres", "hundreds", + "hungray", "hungary", + "hurdels", "hurdles", + "hurldes", "hurdles", + "husbans", "husbands", + "hweaton", "wheaton", + "hybirds", "hybrids", + "hydogen", "hydrogen", + "hygeine", "hygiene", + "hypnoss", "hypnosis", + "hyrbids", "hybrids", + "hystera", "hysteria", + "iceforg", "icefrog", + "ierland", "ireland", + "ignitin", "ignition", + "ignorat", "ignorant", + "illegas", "illegals", + "illegsl", "illegals", + "illinos", "illinois", + "imanent", "eminent", + "imapcts", "impacts", + "iminent", "eminent", + "imminet", "imminent", + "implict", "implicit", + "imploed", "implode", + "imploys", "employs", + "impluse", "impulse", + "impolde", "implode", + "importd", "imported", + "imporve", "improve", + "impules", "impulse", + "impusle", "impulse", + "imrpove", "improve", + "incldue", "include", + "incluse", "includes", + "indains", "indians", + "indeces", "indices", + "indiaan", "indiana", + "indluge", "indulge", + "indugle", "indulge", + "infalte", "inflate", + "infenro", "inferno", + "infered", "inferred", + "inferir", "inferior", + "infinet", "infinite", + "infinie", "infinite", + "infinit", "infinite", + "infornt", "infront", + "infroms", "informs", + "infrotn", "infront", + "inheirt", "inherit", + "inidans", "indians", + "initals", "initials", + "initisl", "initials", + "inlcine", "incline", + "inovker", "invoker", + "inpeach", "impeach", + "inpsect", "inspect", + "inpsire", "inspire", + "inquier", "inquire", + "inquriy", "inquiry", + "insaney", "insanely", + "inscets", "insects", + "insepct", "inspect", + "insipre", "inspire", + "insluts", "insults", + "instade", "instead", + "instint", "instinct", + "intenst", "intents", + "intered", "interred", + "interet", "interest", + "internt", "internet", + "interro", "interior", + "intrest", "interest", + "intrige", "intrigue", + "invlove", "involve", + "invoekr", "invoker", + "invovle", "involve", + "iornman", "ironman", + "iranain", "iranian", + "iranias", "iranians", + "iranina", "iranian", + "irleand", "ireland", + "ironamn", "ironman", + "isalmic", "islamic", + "isareli", "israeli", + "islamit", "islamist", + "islmaic", "islamic", + "isloate", "isolate", + "isralei", "israeli", + "isreali", "israeli", + "italias", "italians", + "jagaurs", "jaguars", + "jaguras", "jaguars", + "jamacia", "jamaica", + "jamaina", "jamaican", + "jamiaca", "jamaica", + "jamsine", "jasmine", + "janaury", "january", + "januray", "january", + "japanes", "japanese", + "jasmien", "jasmine", + "jaugars", "jaguars", + "jaunary", "january", + "jeircho", "jericho", + "jennins", "jennings", + "jeopary", "jeopardy", + "jeresys", "jerseys", + "jericoh", "jericho", + "jersyes", "jerseys", + "jewerly", "jewelry", + "jorunal", "journal", + "jounral", "journal", + "joystik", "joystick", + "juadism", "judaism", + "judasim", "judaism", + "judical", "judicial", + "juipter", "jupiter", + "junglig", "jungling", + "juptier", "jupiter", + "jusitfy", "justify", + "justfiy", "justify", + "karakoe", "karaoke", + "karoake", "karaoke", + "kenendy", "kennedy", + "kenndey", "kennedy", + "kentucy", "kentucky", + "keyboad", "keyboard", + "keychan", "keychain", + "keynode", "keynote", + "kicthen", "kitchen", + "killins", "killings", + "kineitc", "kinetic", + "kinghts", "knights", + "kinteic", "kinetic", + "kitches", "kitchens", + "kitites", "kitties", + "knietic", "kinetic", + "knigths", "knights", + "knuckel", "knuckle", + "kroeans", "koreans", + "krudish", "kurdish", + "ktichen", "kitchen", + "kubirck", "kubrick", + "kunckle", "knuckle", + "kurbick", "kubrick", + "kuridsh", "kurdish", + "laguage", "language", + "landins", "landings", + "lantren", "lantern", + "laready", "already", + "laregly", "largely", + "largley", "largely", + "lasanga", "lasagna", + "lasgana", "lasagna", + "latitue", "latitude", + "latnern", "lantern", + "launhed", "launched", + "lavendr", "lavender", + "leathal", "lethal", + "lefitst", "leftist", + "leftits", "leftist", + "legnths", "lengths", + "legnthy", "lengthy", + "legoins", "legions", + "leigons", "legions", + "lenghts", "lengths", + "lenoard", "leonard", + "lepoard", "leopard", + "lesbain", "lesbian", + "lesiban", "lesbian", + "lesiure", "leisure", + "liasion", "liaison", + "liasons", "liaisons", + "liberae", "liberate", + "liberas", "liberals", + "lienups", "lineups", + "liesure", "leisure", + "liftime", "lifetime", + "lighlty", "lightly", + "lightes", "lighters", + "ligthly", "lightly", + "linclon", "lincoln", + "linueps", "lineups", + "liqiuds", "liquids", + "lisence", "license", + "lisense", "license", + "listend", "listened", + "litecon", "litecoin", + "literae", "literate", + "lithuim", "lithium", + "litihum", "lithium", + "loadous", "loadouts", + "loenard", "leonard", + "loepard", "leopard", + "logiteh", "logitech", + "loosley", "loosely", + "luandry", "laundry", + "luckliy", "luckily", + "luicfer", "lucifer", + "lunatis", "lunatics", + "maching", "machine", + "machins", "machines", + "maclolm", "malcolm", + "macthup", "matchup", + "madsion", "madison", + "magents", "magnets", + "magicin", "magician", + "magolia", "magnolia", + "maidson", "madison", + "maintan", "maintain", + "mairlyn", "marilyn", + "malaira", "malaria", + "malaysa", "malaysia", + "malclom", "malcolm", + "manauls", "manuals", + "mandase", "mandates", + "mandats", "mandates", + "mangeld", "mangled", + "mangets", "magnets", + "manualy", "manually", + "manuver", "maneuver", + "marbels", "marbles", + "margart", "margaret", + "mariage", "marriage", + "mariens", "marines", + "maritan", "martian", + "marixsm", "marxism", + "mariyln", "marilyn", + "markede", "marketed", + "marlbes", "marbles", + "marliyn", "marilyn", + "marnies", "marines", + "marrage", "marriage", + "martail", "martial", + "martain", "martian", + "masacra", "mascara", + "massace", "massacre", + "mathcup", "matchup", + "mathwes", "mathews", + "matrial", "martial", + "maunals", "manuals", + "mcalren", "mclaren", + "meanins", "meanings", + "medicad", "medicaid", + "medicae", "medicare", + "medioce", "mediocre", + "meixcan", "mexican", + "meldoic", "melodic", + "melieux", "milieux", + "melodis", "melodies", + "memeber", "member", + "memoery", "memory", + "memorie", "memory", + "menally", "mentally", + "mentaly", "mentally", + "meoldic", "melodic", + "meranda", "veranda", + "merchat", "merchant", + "merucry", "mercury", + "messagd", "messaged", + "messaih", "messiah", + "metagem", "metagame", + "metalic", "metallic", + "mexcian", "mexican", + "michina", "michigan", + "midfied", "midfield", + "midotwn", "midtown", + "midtwon", "midtown", + "migrans", "migrants", + "militat", "militant", + "militis", "militias", + "miltary", "military", + "mimimum", "minimum", + "mineras", "minerals", + "mininos", "minions", + "ministr", "minister", + "ministy", "ministry", + "minoins", "minions", + "minstry", "ministry", + "minumum", "minimum", + "mirrord", "mirrored", + "misandy", "misandry", + "misison", "mission", + "misouri", "missouri", + "mispell", "misspell", + "missils", "missiles", + "mistery", "mystery", + "mobiliy", "mobility", + "modualr", "modular", + "momento", "memento", + "momment", "moment", + "monarcy", "monarchy", + "monatge", "montage", + "monglos", "mongols", + "monitos", "monitors", + "monstre", "monster", + "montaeg", "montage", + "montrel", "montreal", + "monumet", "monument", + "morbidy", "morbidly", + "morgage", "mortgage", + "morphen", "morphine", + "morphie", "morphine", + "morroco", "morocco", + "mortage", "mortgage", + "mosnter", "monster", + "mosture", "moisture", + "motivet", "motivate", + "motnage", "montage", + "motoral", "motorola", + "mountan", "mountain", + "movment", "movement", + "mucuous", "mucous", + "muesums", "museums", + "muliple", "multiple", + "mulsims", "muslims", + "multipe", "multiple", + "multipy", "multiply", + "munbers", "numbers", + "munchis", "munchies", + "murderd", "murdered", + "muscial", "musical", + "mushrom", "mushroom", + "musilms", "muslims", + "muslces", "muscles", + "musuems", "museums", + "mutatin", "mutation", + "mypsace", "myspace", + "mysapce", "myspace", + "napolen", "napoleon", + "narhwal", "narwhal", + "natique", "antique", + "nativey", "natively", + "natrual", "natural", + "naugthy", "naughty", + "nauseos", "nauseous", + "nautils", "nautilus", + "nautral", "natural", + "nautres", "natures", + "nectode", "netcode", + "needels", "needles", + "neruons", "neurons", + "neslave", "enslave", + "netocde", "netcode", + "netowrk", "network", + "netural", "neutral", + "neturon", "neutron", + "netwrok", "network", + "neurton", "neutron", + "neuterd", "neutered", + "nighlty", "nightly", + "nigthly", "nightly", + "nihilim", "nihilism", + "ninties", "1990s", + "niverse", "inverse", + "nocture", "nocturne", + "nominae", "nominate", + "nominet", "nominate", + "nonsene", "nonsense", + "noramls", "normals", + "norhern", "northern", + "normaly", "normally", + "normany", "normandy", + "northen", "northern", + "nostris", "nostrils", + "notario", "ontario", + "notebok", "notebook", + "nothern", "northern", + "nowdays", "nowadays", + "nrivana", "nirvana", + "nuaghty", "naughty", + "nubmers", "numbers", + "nucelar", "nuclear", + "nucelus", "nucleus", + "nuclean", "unclean", + "nuclues", "nucleus", + "nucular", "nuclear", + "nuerons", "neurons", + "nuetral", "neutral", + "nuetron", "neutron", + "nulcear", "nuclear", + "nullfiy", "nullify", + "nusance", "nuisance", + "nutriet", "nutrient", + "oarcles", "oracles", + "obivous", "obvious", + "obvoius", "obvious", + "ocarnia", "ocarina", + "ocasion", "occasion", + "occured", "occurred", + "ocotber", "october", + "ocotpus", "octopus", + "ocraina", "ocarina", + "ocuntry", "country", + "ocurred", "occurred", + "ofcoure", "ofcourse", + "offcers", "officers", + "offical", "official", + "offisde", "offside", + "oftenly", "often", + "ogrilla", "gorilla", + "olmypic", "olympic", + "olreans", "orleans", + "olympis", "olympics", + "olypmic", "olympic", + "omision", "omission", + "omiting", "omitting", + "omlette", "omelette", + "ommited", "omitted", + "onatrio", "ontario", + "onbaord", "onboard", + "onborad", "onboard", + "ontairo", "ontario", + "ontraio", "ontario", + "opartor", "operator", + "openess", "openness", + "opitcal", "optical", + "opitmal", "optimal", + "oponent", "opponent", + "oposite", "opposite", + "oppenly", "openly", + "opponet", "opponent", + "oprhans", "orphans", + "optimim", "optimism", + "oracels", "oracles", + "oragnes", "oranges", + "oragsms", "orgasms", + "oralces", "oracles", + "orbtial", "orbital", + "orcales", "oracles", + "orelans", "orleans", + "organes", "organise", + "organie", "organise", + "organim", "organism", + "orginal", "original", + "orhpans", "orphans", + "oribtal", "orbital", + "orlenas", "orleans", + "orpahns", "orphans", + "orthodx", "orthodox", + "outfied", "outfield", + "outsidr", "outsider", + "overhal", "overhaul", + "overpad", "overpaid", + "oversue", "overuse", + "overtun", "overturn", + "ownders", "wonders", + "owuldve", "wouldve", + "oylmpic", "olympic", + "pacakge", "package", + "pacifit", "pacifist", + "packade", "packaged", + "pacthes", "patches", + "pahntom", "phantom", + "paitent", "patient", + "palcebo", "placebo", + "pallete", "palette", + "palster", "plaster", + "palyboy", "playboy", + "pamflet", "pamphlet", + "pamplet", "pamphlet", + "pancaks", "pancakes", + "pandroa", "pandora", + "panthen", "pantheon", + "paradim", "paradigm", + "paradse", "parades", + "paralel", "parallel", + "paranoa", "paranoia", + "parises", "praises", + "parites", "parties", + "partice", "particle", + "partick", "patrick", + "partiel", "particle", + "partiot", "patriot", + "partols", "patrols", + "passabe", "passable", + "passivs", "passives", + "pasuing", "pausing", + "pateint", "patient", + "pathces", "patches", + "patiens", "patients", + "patirot", "patriot", + "patrcik", "patrick", + "patrios", "patriots", + "patroit", "patriot", + "peaples", "peoples", + "pebbels", "pebbles", + "peirced", "pierced", + "penatly", "penalty", + "pendulm", "pendulum", + "penguis", "penguins", + "penicls", "pencils", + "penison", "pension", + "penisse", "penises", + "penitum", "pentium", + "pensies", "penises", + "pensino", "pension", + "pentuim", "pentium", + "peopels", "peoples", + "percise", "precise", + "perdict", "predict", + "perfers", "prefers", + "perhasp", "perhaps", + "perhpas", "perhaps", + "perisan", "persian", + "perjery", "perjury", + "permade", "premade", + "permier", "premier", + "permise", "premise", + "permium", "premium", + "peroids", "periods", + "peronal", "personal", + "perpaid", "prepaid", + "perphas", "perhaps", + "persain", "persian", + "persets", "presets", + "persits", "persist", + "persued", "pursued", + "persuit", "pursuit", + "pervail", "prevail", + "perview", "preview", + "pharoah", "pharaoh", + "phatnom", "phantom", + "phsyics", "physics", + "phyiscs", "physics", + "physcis", "physics", + "physiqe", "physique", + "picthed", "pitched", + "picther", "pitcher", + "picthes", "pitches", + "piegons", "pigeons", + "piglrim", "pilgrim", + "pigoens", "pigeons", + "pilgirm", "pilgrim", + "pilrgim", "pilgrim", + "pinoeer", "pioneer", + "pinpoit", "pinpoint", + "pionere", "pioneer", + "pireced", "pierced", + "pithces", "pitches", + "plantes", "planets", + "plastis", "plastics", + "plastre", "plaster", + "plataeu", "plateau", + "plateua", "plateau", + "playabe", "playable", + "playofs", "playoffs", + "plesant", "pleasant", + "pligrim", "pilgrim", + "ploygon", "polygon", + "ploymer", "polymer", + "podemso", "podemos", + "podmeos", "podemos", + "poeples", "peoples", + "poignat", "poignant", + "poineer", "pioneer", + "pointes", "pointers", + "poisond", "poisoned", + "polgyon", "polygon", + "polical", "political", + "polishs", "polishes", + "polisse", "polishes", + "politey", "politely", + "poluted", "polluted", + "polutes", "pollutes", + "popluar", "popular", + "populer", "popular", + "populos", "populous", + "porpose", "propose", + "porshan", "portion", + "porshon", "portion", + "portait", "portrait", + "portary", "portray", + "portras", "portrays", + "portrat", "portrait", + "posions", "poisons", + "positon", "position", + "positve", "positive", + "possibe", "possible", + "possiby", "possibly", + "postdam", "potsdam", + "postion", "position", + "postive", "positive", + "potatos", "potatoes", + "potical", "optical", + "potrait", "portrait", + "powderd", "powdered", + "poweful", "powerful", + "poylgon", "polygon", + "poylmer", "polymer", + "practie", "practise", + "praisse", "praises", + "praries", "prairies", + "prasied", "praised", + "prasies", "praises", + "pratice", "practice", + "preamde", "premade", + "preceed", "precede", + "precice", "precise", + "preests", "presets", + "prehaps", "perhaps", + "preimer", "premier", + "preimum", "premium", + "preists", "priests", + "preivew", "preview", + "premeir", "premier", + "premiee", "premiere", + "premire", "premier", + "premits", "permits", + "premius", "premiums", + "premuim", "premium", + "prepair", "prepare", + "preriod", "period", + "presens", "presents", + "presest", "presets", + "presist", "persist", + "prestes", "presets", + "presude", "presumed", + "pretene", "pretense", + "pretens", "pretends", + "preveiw", "preview", + "prevert", "pervert", + "previal", "prevail", + "previes", "previews", + "previos", "previous", + "priased", "praised", + "priases", "praises", + "printes", "printers", + "pristen", "pristine", + "probabe", "probable", + "probaly", "probably", + "probelm", "problem", + "procede", "proceed", + "procees", "proceeds", + "procesd", "proceeds", + "proclam", "proclaim", + "produly", "proudly", + "produse", "produces", + "progidy", "prodigy", + "progrom", "pogrom", + "prohibt", "prohibit", + "prohpet", "prophet", + "prologe", "prologue", + "promose", "promotes", + "promots", "promotes", + "prompty", "promptly", + "promtps", "prompts", + "pronous", "pronouns", + "prooved", "proved", + "propeht", "prophet", + "prophey", "prophecy", + "propper", "proper", + "protals", "portals", + "protecs", "protects", + "protess", "protests", + "protocl", "protocol", + "protray", "portray", + "prouldy", "proudly", + "provded", "provided", + "provine", "province", + "prusuit", "pursuit", + "pryamid", "pyramid", + "pscyhed", "psyched", + "ptiched", "pitched", + "pticher", "pitcher", + "puasing", "pausing", + "publicy", "publicly", + "publsih", "publish", + "puhsups", "pushups", + "punishs", "punishes", + "punisse", "punishes", + "pursiut", "pursuit", + "pursude", "pursued", + "purused", "pursued", + "pushpus", "pushups", + "pyarmid", "pyramid", + "pyramis", "pyramids", + "pyrmaid", "pyramid", + "pysched", "psyched", + "qaulify", "qualify", + "qaulity", "quality", + "qauntum", "quantum", + "quailfy", "qualify", + "quailty", "quality", + "queires", "queries", + "queitly", "quietly", + "quereis", "queries", + "quicket", "quickest", + "quielty", "quietly", + "quitely", "quietly", + "qunatum", "quantum", + "qunetin", "quentin", + "racisst", "racists", + "racthet", "ratchet", + "radaint", "radiant", + "radiane", "radiance", + "radicas", "radicals", + "radiers", "raiders", + "raelism", "realism", + "raidant", "radiant", + "railrod", "railroad", + "rainbos", "rainbows", + "raoches", "roaches", + "raoming", "roaming", + "raptros", "raptors", + "raputre", "rapture", + "rathcet", "ratchet", + "ratpure", "rapture", + "reacing", "reaching", + "reagrds", "regards", + "realies", "realise", + "realsie", "realise", + "realsim", "realism", + "realtes", "relates", + "reamins", "remains", + "reapirs", "repairs", + "rebouns", "rebounds", + "rebulit", "rebuilt", + "recalim", "reclaim", + "receips", "receipts", + "recided", "resided", + "reciept", "receipt", + "recievd", "recieved", + "recieve", "receive", + "recitfy", "rectify", + "recived", "received", + "reclami", "reclaim", + "recliam", "reclaim", + "recorre", "recorder", + "recoves", "recovers", + "recpies", "recipes", + "redeemd", "redeemed", + "redners", "renders", + "refelct", "reflect", + "referal", "referral", + "refered", "referred", + "referig", "refering", + "referrs", "refers", + "reflexs", "reflexes", + "refrers", "refers", + "refroms", "reforms", + "refusla", "refusal", + "regerts", "regrets", + "regiems", "regimes", + "regimet", "regiment", + "registy", "registry", + "regluar", "regular", + "regrest", "regrets", + "regulae", "regulate", + "regulas", "regulars", + "regulsr", "regulars", + "reigmes", "regimes", + "reigons", "regions", + "reitres", "retires", + "reivews", "reviews", + "reknown", "renown", + "relaise", "realise", + "relapes", "relapse", + "relaspe", "relapse", + "relatie", "relative", + "relatin", "relation", + "relcaim", "reclaim", + "releive", "relieve", + "releses", "releases", + "relfect", "reflect", + "reliabe", "reliable", + "relient", "reliant", + "relized", "realised", + "relpase", "relapse", + "remaind", "remained", + "remaing", "remaining", + "remakrs", "remarks", + "remannt", "remnant", + "remeber", "remember", + "remians", "remains", + "remnans", "remnants", + "renderd", "rendered", + "renegae", "renegade", + "renmant", "remnant", + "rentors", "renters", + "rentres", "renters", + "renuion", "reunion", + "repaird", "repaired", + "repalys", "replays", + "repblic", "republic", + "repeast", "repeats", + "repects", "respects", + "repitle", "reptile", + "replase", "replaces", + "replayd", "replayed", + "reponse", "response", + "repostd", "reposted", + "repsawn", "respawn", + "repsond", "respond", + "repsots", "reposts", + "reptiel", "reptile", + "reptils", "reptiles", + "repubic", "republic", + "republi", "republic", + "repulic", "republic", + "reqiuem", "requiem", + "requeim", "requiem", + "requime", "requiem", + "requred", "required", + "resapwn", "respawn", + "rescuse", "rescues", + "resembe", "resemble", + "reslove", "resolve", + "resolvs", "resolves", + "resonet", "resonate", + "resouce", "resource", + "resovle", "resolve", + "respest", "respects", + "respone", "response", + "respwan", "respawn", + "ressits", "resists", + "restord", "restored", + "resuced", "rescued", + "resuces", "rescues", + "retrive", "retrieve", + "returnd", "returned", + "reuinon", "reunion", + "reveald", "revealed", + "reveiws", "reviews", + "revelas", "reveals", + "reveral", "reversal", + "reviere", "reviewer", + "reviewd", "reviewed", + "reviewr", "reviewer", + "revolvr", "revolver", + "revolvs", "revolves", + "rewirte", "rewrite", + "reworkd", "reworked", + "rewriet", "rewrite", + "reynols", "reynolds", + "rhapsoy", "rhapsody", + "rhythem", "rhythm", + "rhythim", "rhythm", + "rhytmic", "rhythmic", + "riaders", "raiders", + "ritlain", "ritalin", + "ritoers", "rioters", + "rivarly", "rivalry", + "rivlary", "rivalry", + "roahces", "roaches", + "robotis", "robotics", + "rococco", "rococo", + "roestta", "rosetta", + "roiters", "rioters", + "roleply", "roleplay", + "romaina", "romania", + "romaing", "roaming", + "romanin", "romanian", + "romanna", "romanian", + "roomate", "roommate", + "rotuers", "routers", + "rugters", "rutgers", + "rulebok", "rulebook", + "rumorus", "rumours", + "rumuors", "rumours", + "runnung", "running", + "ruslted", "rustled", + "russina", "russian", + "russion", "russian", + "rusteld", "rustled", + "rythmic", "rhythmic", + "rythyms", "rhythms", + "sacrasm", "sarcasm", + "saddnes", "saddens", + "sadistc", "sadistic", + "sadning", "sanding", + "salaris", "salaries", + "salavge", "salvage", + "salvery", "slavery", + "salying", "slaying", + "sampels", "samples", + "samruai", "samurai", + "samuari", "samurai", + "samuria", "samurai", + "sandlas", "sandals", + "sandnig", "sanding", + "sanlder", "sandler", + "santorm", "santorum", + "sapphie", "sapphire", + "sarcams", "sarcasm", + "sargant", "sergeant", + "sasuage", "sausage", + "satifsy", "satisfy", + "satsify", "satisfy", + "satsohi", "satoshi", + "savanha", "savannah", + "savannh", "savannah", + "saveing", "saving", + "sawnsea", "swansea", + "sawnson", "swanson", + "scandas", "scandals", + "scannig", "scanning", + "scartch", "scratch", + "scheems", "schemes", + "schoold", "schooled", + "sciense", "sciences", + "scinece", "science", + "scootes", "scooters", + "scorpin", "scorpion", + "scpeter", "scepter", + "scracth", "scratch", + "scrambe", "scramble", + "scritps", "scripts", + "scrolld", "scrolled", + "scrpits", "scripts", + "scyhter", "scyther", + "seached", "searched", + "seaches", "searches", + "seahaws", "seahawks", + "seantor", "senator", + "searchd", "searched", + "searchs", "searches", + "sebrian", "serbian", + "secerts", "secrets", + "secpter", "scepter", + "secrest", "secrets", + "secrety", "secretly", + "seflies", "selfies", + "seguoys", "segues", + "seinors", "seniors", + "selifes", "selfies", + "senoirs", "seniors", + "sensure", "censure", + "sentaor", "senator", + "sentris", "sentries", + "serbain", "serbian", + "sergeat", "sergeant", + "sergent", "sergeant", + "seriban", "serbian", + "servans", "servants", + "sesnors", "sensors", + "settins", "settings", + "severly", "severely", + "sexualy", "sexually", + "seziure", "seizure", + "shaddow", "shadow", + "shanghi", "shanghai", + "shaprie", "sharpie", + "shaprly", "sharply", + "sharipe", "sharpie", + "shcemes", "schemes", + "sheelpe", "sheeple", + "sheepel", "sheeple", + "shephed", "shepherd", + "sherlok", "sherlock", + "shetler", "shelter", + "shevles", "shelves", + "shfiter", "shifter", + "shieldd", "shielded", + "shiping", "shipping", + "shirely", "shirley", + "shitfer", "shifter", + "shledon", "sheldon", + "shleter", "shelter", + "shoudln", "should", + "shouldt", "shouldnt", + "shoutot", "shoutout", + "showede", "showered", + "showerd", "showered", + "shperes", "spheres", + "shriley", "shirley", + "siblins", "siblings", + "sidelen", "sideline", + "sideral", "sidereal", + "siezing", "seizing", + "siezure", "seizure", + "signfiy", "signify", + "signins", "signings", + "signles", "singles", + "silders", "sliders", + "silenty", "silently", + "similir", "similiar", + "simliar", "similar", + "simplet", "simplest", + "simpley", "simply", + "simplfy", "simplify", + "simpliy", "simplify", + "simposn", "simpson", + "simspon", "simpson", + "singals", "signals", + "singels", "singles", + "singify", "signify", + "singsog", "singsong", + "sitmuli", "stimuli", + "skecthy", "sketchy", + "skeletl", "skeletal", + "skeptis", "skeptics", + "sketchs", "sketches", + "sketpic", "skeptic", + "skpetic", "skeptic", + "sktechy", "sketchy", + "skwyard", "skyward", + "slavage", "salvage", + "slayign", "slaying", + "sldiers", "sliders", + "slefies", "selfies", + "slighly", "slightly", + "slighty", "slightly", + "slippes", "slippers", + "slippey", "slippery", + "smaples", "samples", + "smartre", "smarter", + "smaurai", "samurai", + "snadler", "sandler", + "snigles", "singles", + "snippes", "snippets", + "snodwen", "snowden", + "snwoden", "snowden", + "snycing", "syncing", + "snyergy", "synergy", + "socialy", "socially", + "sofware", "software", + "soildly", "solidly", + "soldies", "soldiers", + "soldily", "solidly", + "somaila", "somalia", + "someons", "someones", + "somethn", "somethin", + "southen", "southern", + "soveits", "soviets", + "spacebr", "spacebar", + "spainsh", "spanish", + "spansih", "spanish", + "spanwed", "spawned", + "sparkel", "sparkle", + "spartas", "spartans", + "spartsn", "spartans", + "sparyed", "sprayed", + "spawend", "spawned", + "spawnig", "spawning", + "specail", "special", + "specfic", "specific", + "specias", "specials", + "specisl", "specials", + "spectum", "spectrum", + "speechs", "speeches", + "spehres", "spheres", + "speical", "special", + "speices", "species", + "spellig", "spelling", + "spindel", "spindle", + "spiritd", "spirited", + "splaton", "splatoon", + "splittr", "splitter", + "spoiles", "spoilers", + "spoitfy", "spotify", + "spolied", "spoiled", + "sponser", "sponsor", + "sporles", "sproles", + "sporuts", "sprouts", + "spotfiy", "spotify", + "sprinke", "sprinkle", + "sproels", "sproles", + "spwaned", "spawned", + "sqaures", "squares", + "sqeuaky", "squeaky", + "sqiushy", "squishy", + "squarey", "squarely", + "squirel", "squirtle", + "squirle", "squirrel", + "squirrl", "squirrel", + "squirte", "squirtle", + "squsihy", "squishy", + "sriraca", "sriracha", + "srpouts", "sprouts", + "sryians", "syrians", + "sryinge", "syringe", + "stadius", "stadiums", + "staduim", "stadium", + "stagnat", "stagnant", + "staidum", "stadium", + "stakler", "stalker", + "stalkes", "stalkers", + "stamnia", "stamina", + "staoshi", "satoshi", + "starins", "strains", + "startde", "startled", + "startus", "startups", + "statits", "statist", + "statsit", "statist", + "statuer", "stature", + "statuse", "statutes", + "statuts", "statutes", + "stautes", "statues", + "stealty", "stealthy", + "steeles", "steelers", + "steorid", "steroid", + "steriel", "sterile", + "sterlie", "sterile", + "stickes", "stickers", + "stiring", "stirring", + "stirker", "striker", + "stirrig", "stirring", + "stitchs", "stitches", + "stlaker", "stalker", + "stlyish", "stylish", + "storeis", "stories", + "storise", "stories", + "stormde", "stormed", + "straigt", "straight", + "straind", "strained", + "streamd", "streamed", + "stregth", "strength", + "strengh", "strength", + "streoid", "steroid", + "stresss", "stresses", + "strians", "strains", + "stricty", "strictly", + "striekr", "striker", + "stromed", "stormed", + "stubbon", "stubborn", + "studing", "studying", + "stuidos", "studios", + "stunami", "tsunami", + "stupidr", "stupider", + "stupidy", "stupidly", + "stupire", "stupider", + "suasage", "sausage", + "subisdy", "subsidy", + "subjest", "subjects", + "subtiel", "subtitle", + "succede", "succeed", + "succeds", "succeeds", + "succees", "succeeds", + "succesd", "succeeds", + "suceeds", "succeeds", + "suddeny", "suddenly", + "suefull", "usefull", + "sufferd", "suffered", + "summonr", "summoner", + "summore", "summoner", + "sunggle", "snuggle", + "sunifre", "sunfire", + "superme", "supreme", + "suposed", "supposed", + "suposes", "supposes", + "suppoed", "supposed", + "suppost", "supports", + "suprass", "surpass", + "supress", "suppress", + "suprisd", "suprised", + "suprise", "surprise", + "suprize", "surprise", + "supsend", "suspend", + "suround", "surround", + "surpeme", "supreme", + "surroud", "surround", + "sweidsh", "swedish", + "swiflty", "swiftly", + "swiming", "swimming", + "switchs", "switches", + "switfly", "swiftly", + "swnasea", "swansea", + "sycning", "syncing", + "sycther", "scyther", + "syirans", "syrians", + "sykward", "skyward", + "syllabe", "syllable", + "symetry", "symmetry", + "symmety", "symmetry", + "symobls", "symbols", + "sympaty", "sympathy", + "symtpom", "symptom", + "synegry", "synergy", + "synoynm", "synonym", + "sypmtom", "symptom", + "syracue", "syracuse", + "syrains", "syrians", + "sysadmn", "sysadmin", + "systemc", "systemic", + "sytlish", "stylish", + "tabacco", "tobacco", + "tailban", "taliban", + "tailord", "tailored", + "talbian", "taliban", + "tallets", "tallest", + "tangeld", "tangled", + "tanlged", "tangled", + "targetd", "targeted", + "taryvon", "trayvon", + "teached", "taught", + "teaspon", "teaspoon", + "techeis", "techies", + "tehcies", "techies", + "temepst", "tempest", + "tempels", "temples", + "tempets", "tempest", + "templas", "templars", + "tempset", "tempest", + "tenacle", "tentacle", + "tendacy", "tendency", + "tequlia", "tequila", + "tesitfy", "testify", + "testice", "testicle", + "teusday", "tuesday", + "thankyu", "thankyou", + "thearpy", "therapy", + "theistc", "theistic", + "theives", "thieves", + "themsef", "themself", + "therefo", "thereof", + "therien", "therein", + "theroem", "theorem", + "thesits", "theists", + "thiests", "theists", + "thirldy", "thirdly", + "thirten", "thirteen", + "thirtsy", "thirsty", + "thoerem", "theorem", + "thorats", "throats", + "thornes", "thrones", + "thoruim", "thorium", + "thoughs", "thoughts", + "threadd", "threaded", + "threeof", "thereof", + "thridly", "thirdly", + "thristy", "thirsty", + "throast", "throats", + "throium", "thorium", + "thryoid", "thyroid", + "thyorid", "thyroid", + "thyriod", "thyroid", + "tigther", "tighter", + "tiolets", "toilets", + "tirdent", "trident", + "titanim", "titanium", + "tlaking", "talking", + "tobbaco", "tobacco", + "toliets", "toilets", + "tolkein", "tolkien", + "tomatos", "tomatoes", + "tongiht", "tonight", + "tonuges", "tongues", + "toppins", "toppings", + "torando", "tornado", + "torndao", "tornado", + "torpdeo", "torpedo", + "torrest", "torrents", + "tortila", "tortilla", + "toruney", "tourney", + "toubles", "troubles", + "touchda", "touchpad", + "tounrey", "tourney", + "tourisy", "touristy", + "tourits", "tourist", + "tournes", "tourneys", + "toursim", "tourism", + "toursit", "tourist", + "towords", "towards", + "trackes", "trackers", + "trailes", "trailers", + "traines", "trainers", + "trainig", "training", + "tralier", "trailer", + "tratior", "traitor", + "traveld", "traveled", + "travere", "traverse", + "travesy", "travesty", + "travles", "travels", + "treasue", "treasure", + "treatis", "treaties", + "tremelo", "tremolo", + "trendig", "trending", + "trialer", "trailer", + "triange", "triangle", + "triator", "traitor", + "trickey", "trickery", + "tridnet", "trident", + "trimuph", "triumph", + "trinkes", "trinkets", + "trinkst", "trinkets", + "trintiy", "trinity", + "triolgy", "trilogy", + "troleld", "trolled", + "troling", "trolling", + "tronado", "tornado", + "tropedo", "torpedo", + "trudnle", "trundle", + "truimph", "triumph", + "trukish", "turkish", + "trundel", "trundle", + "trunlde", "trundle", + "tryahrd", "tryhard", + "tryavon", "trayvon", + "tsamina", "stamina", + "tsnuami", "tsunami", + "tsuanmi", "tsunami", + "tsunmai", "tsunami", + "tuesdsy", "tuesdays", + "tunnles", "tunnels", + "turbins", "turbines", + "turksih", "turkish", + "turltes", "turtles", + "turrest", "turrets", + "turtels", "turtles", + "tuseday", "tuesday", + "tusnami", "tsunami", + "tutrles", "turtles", + "twiligt", "twilight", + "tyelnol", "tylenol", + "typcial", "typical", + "tyrhard", "tryhard", + "tyrrany", "tyranny", + "udpated", "updated", + "uesfull", "usefull", + "ugprade", "upgrade", + "ukarine", "ukraine", + "ukranie", "ukraine", + "ukriane", "ukraine", + "ultimae", "ultimate", + "umbrela", "umbrella", + "unahppy", "unhappy", + "unbannd", "unbanned", + "underog", "undergo", + "unfairy", "unfairly", + "ungoldy", "ungodly", + "unicors", "unicorns", + "uniquey", "uniquely", + "unknwon", "unknown", + "unkonwn", "unknown", + "unlcean", "unclean", + "unlcoks", "unlocks", + "unlcuky", "unlucky", + "unlikey", "unlikely", + "unopend", "unopened", + "unprone", "unproven", + "unusabe", "unusable", + "unworty", "unworthy", + "upgarde", "upgrade", + "upgrads", "upgrades", + "uplaods", "uploads", + "upsteam", "upstream", + "urainum", "uranium", + "uranuim", "uranium", + "uretrha", "urethra", + "urkaine", "ukraine", + "urnaium", "uranium", + "urugauy", "uruguay", + "usefull", "useful", + "usefuly", "usefully", + "utiltiy", "utility", + "utopain", "utopian", + "utpoian", "utopian", + "vaccins", "vaccines", + "vaccume", "vacuum", + "vageuly", "vaguely", + "vaguley", "vaguely", + "vairant", "variant", + "valenca", "valencia", + "valetta", "valletta", + "valkyre", "valkyrie", + "valuabe", "valuable", + "valuble", "valuable", + "vampirs", "vampires", + "vanguad", "vanguard", + "varaint", "variant", + "vareity", "variety", + "varians", "variants", + "varient", "variant", + "varisty", "varsity", + "varitey", "variety", + "varstiy", "varsity", + "vasalls", "vassals", + "vasslas", "vassals", + "vaugely", "vaguely", + "vecotrs", "vectors", + "vectros", "vectors", + "veitnam", "vietnam", + "veiwers", "viewers", + "vendeta", "vendetta", + "verbaly", "verbally", + "verical", "vertical", + "verious", "various", + "verison", "version", + "veritgo", "vertigo", + "versoin", "version", + "vertgio", "vertigo", + "vessles", "vessels", + "vetween", "between", + "viatmin", "vitamin", + "vibratr", "vibrator", + "vicitms", "victims", + "vientam", "vietnam", + "vigrins", "virgins", + "vikigns", "vikings", + "villian", "villain", + "villify", "vilify", + "virbate", "vibrate", + "virigns", "virgins", + "virtiol", "vitriol", + "virutal", "virtual", + "virutes", "virtues", + "visable", "visible", + "visably", "visibly", + "visbily", "visibly", + "visting", "visiting", + "vistors", "visitors", + "vitaliy", "vitality", + "vitamis", "vitamins", + "vitenam", "vietnam", + "vitirol", "vitriol", + "vitmain", "vitamin", + "vitroil", "vitriol", + "vitrual", "virtual", + "vitrues", "virtues", + "volatge", "voltage", + "volumne", "volume", + "votlage", "voltage", + "vrigins", "virgins", + "waclott", "walcott", + "wacther", "watcher", + "waitres", "waiters", + "waktins", "watkins", + "warcrat", "warcraft", + "wardobe", "wardrobe", + "wariwck", "warwick", + "warrany", "warranty", + "warrent", "warrant", + "warrios", "warriors", + "warwcik", "warwick", + "wathcer", "watcher", + "watiers", "waiters", + "waviers", "waivers", + "wawrick", "warwick", + "wayword", "wayward", + "webapge", "webpage", + "webiste", "website", + "webstie", "website", + "weigths", "weights", + "weilded", "wielded", + "weirldy", "weirdly", + "weirods", "weirdos", + "welathy", "wealthy", + "wendsay", "wednesday", + "wensday", "wednesday", + "wepbage", "webpage", + "weridly", "weirdly", + "weridos", "weirdos", + "werstle", "wrestle", + "wesbite", "website", + "whaeton", "wheaton", + "whipser", "whisper", + "whislte", "whistle", + "whistel", "whistle", + "whitsle", "whistle", + "whsiper", "whisper", + "wiaters", "waiters", + "wiavers", "waivers", + "widgest", "widgets", + "wieghts", "weights", + "wigdets", "widgets", + "windosr", "windsor", + "winnins", "winnings", + "winsdor", "windsor", + "wintson", "winston", + "wirting", "writing", + "wisnton", "winston", + "withces", "witches", + "witheld", "withheld", + "withing", "within", + "withold", "withhold", + "wlacott", "walcott", + "wokring", "working", + "workins", "workings", + "woudlnt", "wouldnt", + "woudlve", "wouldve", + "woulndt", "wouldnt", + "wreslte", "wrestle", + "wroking", "working", + "wtiches", "witches", + "wupport", "support", + "yaching", "yachting", + "younget", "youngest", + "youseff", "yousef", + "youself", "yourself", + "zaelots", "zealots", + "zealtos", "zealots", + "zelaots", "zealots", + "zelaous", "zealous", + "zimbabe", "zimbabwe", + "zionsim", "zionism", + "zionsit", "zionist", + "zoinism", "zionism", + "zoinist", "zionist", + "abbout", "about", + "abilty", "ability", + "absail", "abseil", + "abutts", "abuts", + "achive", "achieve", + "acused", "accused", + "addopt", "adopt", + "addres", "address", + "adress", "address", + "aeriel", "aerial", + "affort", "afford", + "agains", "against", + "aginst", "against", + "ahppen", "happen", + "aiport", "airport", + "aisian", "asian", + "albiet", "albeit", + "alchol", "alcohol", + "aledge", "allege", + "aleged", "alleged", + "allign", "align", + "almsot", "almost", + "alomst", "almost", + "alowed", "allowed", + "alwasy", "always", + "alwyas", "always", + "amking", "making", + "ammend", "amend", + "amoung", "among", + "aplied", "applied", + "appart", "apart", + "aquire", "acquire", + "aready", "already", + "arised", "arose", + "arival", "arrival", + "arrary", "array", + "artice", "article", + "asetic", "ascetic", + "asside", "aside", + "attemp", "attempt", + "attemt", "attempt", + "auther", "author", + "awared", "awarded", + "bedore", "before", + "beeing", "being", + "befoer", "before", + "beggin", "begin", + "beleif", "belief", + "belive", "believe", + "beteen", "between", + "betwen", "between", + "beween", "between", + "bianry", "binary", + "boyant", "buoyant", + "broady", "broadly", + "buddah", "buddha", + "buring", "burying", + "carcas", "carcass", + "casion", "caisson", + "casued", "caused", + "casues", "causes", + "ceasar", "caesar", + "cencus", "census", + "censur", "censor", + "cheifs", "chiefs", + "circut", "circuit", + "clasic", "classic", + "coform", "conform", + "comany", "company", + "coucil", "council", + "curent", "current", + "densly", "densely", + "deside", "decide", + "devels", "delves", + "devide", "divide", + "dieing", "dying", + "divice", "device", + "doulbe", "double", + "dreasm", "dreams", + "duting", "during", + "ealier", "earlier", + "eearly", "early", + "efford", "effort", + "emited", "emitted", + "emnity", "enmity", + "enduce", "induce", + "enlish", "english", + "erally", "orally", + "eratic", "erratic", + "ethose", "those", + "exampt", "exempt", + "excact", "exact", + "excell", "excel", + "exerpt", "excerpt", + "exinct", "extinct", + "expell", "expel", + "expoch", "epoch", + "extint", "extinct", + "facist", "fascist", + "faught", "fought", + "finaly", "finally", + "forsaw", "foresaw", + "fougth", "fought", + "fourty", "forty", + "foward", "forward", + "freind", "friend", + "fromed", "formed", + "fufill", "fulfill", + "futher", "further", + "gardai", "gardaí", + "geting", "getting", + "ghandi", "gandhi", + "glight", "flight", + "gloabl", "global", + "godess", "goddess", + "guilia", "giulia", + "guilio", "giulio", + "habeus", "habeas", + "harras", "harass", + "hatian", "haitian", + "heared", "heard", + "hertzs", "hertz", + "hieght", "height", + "higest", "highest", + "higway", "highway", + "honory", "honorary", + "howver", "however", + "hstory", "history", + "hunman", "human", + "husban", "husband", + "hvaing", "having", + "illess", "illness", + "ilness", "illness", + "imagin", "imagine", + "imense", "immense", + "includ", "include", + "inital", "initial", + "interm", "interim", + "intial", "initial", + "invlid", "invalid", + "iunior", "junior", + "jaques", "jacques", + "jospeh", "joseph", + "jouney", "journey", + "klenex", "kleenex", + "labled", "labelled", + "largst", "largest", + "larrry", "larry", + "lefted", "left", + "lenght", "length", + "lerans", "learns", + "liason", "liaison", + "libary", "library", + "lieing", "lying", + "lieved", "lived", + "littel", "little", + "livley", "lively", + "lonley", "lonely", + "mailny", "mainly", + "markes", "marks", + "mileau", "milieu", + "milion", "million", + "millon", "million", + "misile", "missile", + "missen", "mizzen", + "missle", "missile", + "mkaing", "making", + "moderm", "modem", + "moreso", "more", + "mounth", "month", + "myraid", "myriad", + "naieve", "naive", + "nestin", "nesting", + "nineth", "ninth", + "noveau", "nouveau", + "occour", "occur", + "occurr", "occur", + "offred", "offered", + "omited", "omitted", + "ouevre", "oeuvre", + "oxigen", "oxygen", + "p0enis", "penis", + "packge", "package", + "peaple", "people", + "pensle", "pencil", + "peopel", "people", + "peotry", "poetry", + "perade", "parade", + "persan", "person", + "persue", "pursue", + "plateu", "plateau", + "poenis", "penis", + "poisin", "poison", + "polute", "pollute", + "posess", "possess", + "posion", "poison", + "prairy", "prairie", + "prarie", "prairie", + "preiod", "period", + "privte", "private", + "proces", "process", + "proove", "prove", + "psuedo", "pseudo", + "psyhic", "psychic", + "pucini", "puccini", + "pumkin", "pumpkin", + "puting", "putting", + "pyscic", "psychic", + "quizes", "quizzes", + "quuery", "query", + "racaus", "raucous", + "radify", "ratify", + "raelly", "really", + "reacll", "recall", + "realyl", "really", + "reched", "reached", + "recide", "reside", + "recrod", "record", + "refect", "reflect", + "relaly", "really", + "renewl", "renewal", + "retuns", "returns", + "reveiw", "review", + "rhymme", "rhyme", + "rigeur", "rigueur", + "rocord", "record", + "rougly", "roughly", + "runing", "running", + "rythem", "rhythm", + "rythim", "rhythm", + "saftey", "safety", + "salery", "salary", + "satisy", "satisfy", + "satric", "satiric", + "saught", "sought", + "scince", "science", + "scirpt", "script", + "seceed", "succeed", + "seinor", "senior", + "sepina", "subpoena", + "sevice", "service", + "shamen", "shaman", + "sheild", "shield", + "shiped", "shipped", + "shorly", "shortly", + "shoudl", "should", + "shreak", "shriek", + "siezed", "seized", + "sixtin", "sistine", + "skiped", "skipped", + "sneeks", "sneaks", + "somene", "someone", + "soruce", "source", + "soudns", "sounds", + "sourth", "south", + "speach", "speech", + "spects", "aspects", + "spoace", "space", + "sqaure", "square", + "staion", "station", + "stange", "strange", + "stilus", "stylus", + "stirrs", "stirs", + "stopry", "story", + "strnad", "strand", + "studdy", "study", + "suceed", "succeed", + "sucess", "success", + "sucide", "suicide", + "sumary", "summary", + "suport", "support", + "supose", "suppose", + "surfce", "surface", + "surley", "surly", + "swaers", "swears", + "swepth", "swept", + "talekd", "talked", + "theese", "these", + "therby", "thereby", + "thigns", "things", + "thigsn", "things", + "thikns", "thinks", + "thiunk", "think", + "thnigs", "things", + "threee", "three", + "tkaing", "taking", + "tounge", "tongue", + "tourch", "torch", + "towrad", "toward", + "trafic", "traffic", + "troups", "troupes", + "truely", "truly", + "twelth", "twelfth", + "tyrany", "tyranny", + "unabel", "unable", + "unkown", "unknown", + "unmont", "unmount", + "unmout", "unmount", + "untill", "until", + "usally", "usually", + "useage", "usage", + "useing", "using", + "usualy", "usually", + "vaccum", "vacuum", + "variey", "variety", + "varing", "varying", + "varity", "variety", + "vasall", "vassal", + "vigeur", "vigueur", + "villin", "villain", + "vreity", "variety", + "vriety", "variety", + "whants", "wants", + "wheras", "whereas", + "wheter", "whether", + "wholey", "wholly", + "whther", "whether", + "wnated", "wanted", + "writen", "written", + "yaerly", "yearly", + "yotube", "youtube", + "zeebra", "zebra", + "abotu", "about", + "adres", "address", + "afair", "affair", + "agian", "again", + "agina", "again", + "agred", "agreed", + "alege", "allege", + "alsot", "also", + "altho", "although", + "amung", "among", + "anual", "annual", + "aroud", "around", + "arund", "around", + "asign", "assign", + "assit", "assist", + "asume", "assume", + "atain", "attain", + "autor", "author", + "baout", "about", + "blaim", "blame", + "boaut", "bout", + "boook", "book", + "borke", "broke", + "breif", "brief", + "caost", "coast", + "casue", "cause", + "chasr", "chaser", + "cheif", "chief", + "chuch", "church", + "claer", "clear", + "clera", "clear", + "coudl", "could", + "crowm", "crown", + "deram", "dram", + "diety", "deity", + "doens", "does", + "doign", "doing", + "donig", "doing", + "drnik", "drink", + "durig", "during", + "earnt", "earned", + "eigth", "eighth", + "eiter", "either", + "emtpy", "empty", + "endig", "ending", + "eveyr", "every", + "exept", "except", + "eyars", "years", + "eyasr", "years", + "fiels", "fields", + "firts", "flirts", + "fleed", "fled", + "fomed", "formed", + "foucs", "focus", + "foudn", "found", + "fouth", "fourth", + "frome", "from", + "ganes", "games", + "gaurd", "guard", + "gerat", "great", + "gogin", "going", + "goign", "going", + "gonig", "going", + "graet", "great", + "greif", "grief", + "gropu", "group", + "guage", "gauge", + "hapen", "happen", + "herad", "heard", + "heroe", "hero", + "higer", "higher", + "housr", "hours", + "htere", "there", + "htikn", "think", + "hting", "thing", + "htink", "think", + "hwihc", "which", + "hwile", "while", + "hwole", "whole", + "idaes", "ideas", + "idesa", "ideas", + "ihaca", "ithaca", + "knwos", "knows", + "konws", "knows", + "lastr", "last", + "lavae", "larvae", + "layed", "laid", + "leage", "league", + "leanr", "lean", + "leran", "learn", + "levle", "level", + "lible", "libel", + "liekd", "liked", + "liuke", "like", + "lmits", "limits", + "lonly", "lonely", + "lukid", "likud", + "lybia", "libya", + "maked", "marked", + "makse", "makes", + "mamal", "mammal", + "mileu", "milieu", + "mkaes", "makes", + "modle", "model", + "moent", "moment", + "moeny", "money", + "monts", "months", + "movei", "movie", + "muder", "murder", + "mysef", "myself", + "neice", "niece", + "ninty", "ninety", + "ocurr", "occur", + "oging", "going", + "opose", "oppose", + "orded", "ordered", + "orgin", "origin", + "otehr", "other", + "ouput", "output", + "owudl", "would", + "paide", "paid", + "palce", "place", + "pased", "passed", + "payed", "paid", + "peice", "piece", + "peoms", "poems", + "poety", "poetry", + "pwoer", "power", + "qtuie", "quite", + "qutie", "quite", + "realy", "really", + "repid", "rapid", + "rised", "raised", + "rulle", "rule", + "rwite", "write", + "rythm", "rhythm", + "safty", "safety", + "scoll", "scroll", + "seach", "search", + "seige", "siege", + "seing", "seeing", + "sence", "sense", + "sicne", "since", + "sieze", "seize", + "sinse", "sines", + "slowy", "slowly", + "snese", "sneeze", + "soley", "solely", + "sotry", "story", + "sotyr", "satyr", + "soudn", "sound", + "sould", "could", + "spred", "spread", + "stlye", "style", + "stong", "strong", + "stoyr", "story", + "strat", "start", + "stroy", "story", + "suppy", "supply", + "swaer", "swear", + "syrap", "syrup", + "sytem", "system", + "sytle", "style", + "tatoo", "tattoo", + "thast", "that", + "theif", "thief", + "theri", "their", + "thgat", "that", + "thier", "their", + "thign", "thing", + "thikn", "think", + "thnig", "thing", + "thrid", "third", + "thsoe", "those", + "thyat", "that", + "tihkn", "think", + "timne", "time", + "tiome", "time", + "tkaes", "takes", + "todya", "today", + "tyhat", "that", + "unsed", "used", + "weild", "wield", + "whant", "want", + "whcih", "which", + "whihc", "which", + "whith", "with", + "whlch", "which", + "wholy", "wholly", + "wierd", "weird", + "wille", "will", + "willk", "will", + "withh", "with", + "witht", "with", + "wiull", "will", + "wnats", "wants", + "wohle", "whole", + "worls", "world", + "woudl", "would", + "wriet", "write", + "wroet", "wrote", + "yaers", "years", + "yatch", "yacht", + "yearm", "year", + "yeasr", "years", + "yeild", "yield", + "yeras", "years", + "yersa", "years", + "agin", "again", + "agre", "agree", + "ahev", "have", + "ahve", "have", + "alse", "else", + "amke", "make", + "anbd", "and", + "andd", "and", + "apon", "upon", + "aslo", "also", + "awya", "away", + "bakc", "back", + "bcak", "back", + "clas", "class", + "cpoy", "coy", + "cxan", "cyan", + "daed", "dead", + "dael", "deal", + "diea", "idea", + "doub", "doubt", + "dyas", "dryas", + "eahc", "each", + "efel", "evil", + "eles", "eels", + "ened", "need", + "enxt", "next", + "esle", "else", + "eyar", "year", + "fatc", "fact", + "fidn", "find", + "fomr", "from", + "grwo", "grow", + "haev", "have", + "halp", "help", + "holf", "hold", + "hten", "then", + "htey", "they", + "htis", "this", + "hvae", "have", + "hvea", "have", + "inot", "into", + "iwll", "will", + "iwth", "with", + "jstu", "just", + "jsut", "just", + "knwo", "know", + "konw", "know", + "kwno", "know", + "liek", "like", + "loev", "love", + "lveo", "love", + "lvoe", "love", + "mkae", "make", + "mkea", "make", + "mroe", "more", + "nkow", "know", + "nkwo", "know", + "nmae", "name", + "noth", "north", + "nowe", "now", + "omre", "more", + "onot", "note", + "onyl", "only", + "owrk", "work", + "peom", "poem", + "pich", "pitch", + "rela", "real", + "sasy", "says", + "smae", "same", + "smoe", "some", + "soem", "some", + "sohw", "show", + "stpo", "stop", + "suop", "soup", + "syas", "says", + "tahn", "than", + "taht", "that", + "tast", "taste", + "tath", "that", + "tehy", "they", + "tghe", "the", + "ther", "there", + "thge", "the", + "thna", "than", + "thne", "then", + "thsi", "this", + "thta", "that", + "tiem", "time", + "tihs", "this", + "tjhe", "the", + "tkae", "take", + "tood", "todo", + "tust", "trust", + "twon", "town", + "twpo", "two", + "tyhe", "they", + "uise", "use", + "vell", "well", + "veyr", "very", + "vrey", "very", + "vyer", "very", + "vyre", "very", + "waht", "what", + "wass", "was", + "watn", "want", + "weas", "was", + "wehn", "when", + "whic", "which", + "whta", "what", + "wich", "which", + "wief", "wife", + "wiew", "view", + "wiht", "with", + "witn", "with", + "wnat", "want", + "wokr", "work", + "wrok", "work", + "wtih", "with", + "yaer", "year", + "yera", "year", + "yrea", "year", + "ytou", "you", + "adn", "and", + "ect", "etc", + "nto", "not", + "teh", "the", + "thn", "then", + "tje", "the", + "whn", "when", + "wih", "with", + "yuo", "you", +} + +// DictAmerican converts UK spellings to US spellings +var DictAmerican = []string{ + "institutionalisation", "institutionalization", + "internationalisation", "internationalization", + "professionalisation", "professionalization", + "compartmentalising", "compartmentalizing", + "institutionalising", "institutionalizing", + "internationalising", "internationalizing", + "compartmentalised", "compartmentalized", + "compartmentalises", "compartmentalizes", + "decriminalisation", "decriminalization", + "denationalisation", "denationalization", + "fictionalisations", "fictionalizations", + "institutionalised", "institutionalized", + "institutionalises", "institutionalizes", + "intellectualising", "intellectualizing", + "internationalised", "internationalized", + "internationalises", "internationalizes", + "pedestrianisation", "pedestrianization", + "professionalising", "professionalizing", + "archaeologically", "archeologically", + "compartmentalise", "compartmentalize", + "decentralisation", "decentralization", + "demilitarisation", "demilitarization", + "externalisations", "externalizations", + "fictionalisation", "fictionalization", + "institutionalise", "institutionalize", + "intellectualised", "intellectualized", + "intellectualises", "intellectualizes", + "internationalise", "internationalize", + "nationalisations", "nationalizations", + "palaeontologists", "paleontologists", + "professionalised", "professionalized", + "professionalises", "professionalizes", + "rationalisations", "rationalizations", + "sensationalising", "sensationalizing", + "sentimentalising", "sentimentalizing", + "acclimatisation", "acclimatization", + "bougainvillaeas", "bougainvilleas", + "commercialising", "commercializing", + "conceptualising", "conceptualizing", + "contextualising", "contextualizing", + "crystallisation", "crystallization", + "decriminalising", "decriminalizing", + "democratisation", "democratization", + "denationalising", "denationalizing", + "depersonalising", "depersonalizing", + "desensitisation", "desensitization", + "destabilisation", "destabilization", + "disorganisation", "disorganization", + "extemporisation", "extemporization", + "externalisation", "externalization", + "familiarisation", "familiarization", + "generalisations", "generalizations", + "hospitalisation", "hospitalization", + "individualising", "individualizing", + "industrialising", "industrializing", + "intellectualise", "intellectualize", + "internalisation", "internalization", + "manoeuvrability", "maneuverability", + "marginalisation", "marginalization", + "materialisation", "materialization", + "miniaturisation", "miniaturization", + "nationalisation", "nationalization", + "neighbourliness", "neighborliness", + "overemphasising", "overemphasizing", + "palaeontologist", "paleontologist", + "particularising", "particularizing", + "pedestrianising", "pedestrianizing", + "professionalise", "professionalize", + "psychoanalysing", "psychoanalyzing", + "rationalisation", "rationalization", + "reorganisations", "reorganizations", + "revolutionising", "revolutionizing", + "sensationalised", "sensationalized", + "sensationalises", "sensationalizes", + "sentimentalised", "sentimentalized", + "sentimentalises", "sentimentalizes", + "specialisations", "specializations", + "standardisation", "standardization", + "synchronisation", "synchronization", + "systematisation", "systematization", + "aggrandisement", "aggrandizement", + "anaesthetising", "anesthetizing", + "archaeological", "archeological", + "archaeologists", "archeologists", + "bougainvillaea", "bougainvillea", + "characterising", "characterizing", + "collectivising", "collectivizing", + "commercialised", "commercialized", + "commercialises", "commercializes", + "conceptualised", "conceptualized", + "conceptualises", "conceptualizes", + "contextualised", "contextualized", + "contextualises", "contextualizes", + "decentralising", "decentralizing", + "decriminalised", "decriminalized", + "decriminalises", "decriminalizes", + "dehumanisation", "dehumanization", + "demilitarising", "demilitarizing", + "demobilisation", "demobilization", + "demoralisation", "demoralization", + "denationalised", "denationalized", + "denationalises", "denationalizes", + "depersonalised", "depersonalized", + "depersonalises", "depersonalizes", + "disembowelling", "disemboweling", + "dramatisations", "dramatizations", + "editorialising", "editorializing", + "encyclopaedias", "encyclopedias", + "fictionalising", "fictionalizing", + "fraternisation", "fraternization", + "generalisation", "generalization", + "gynaecological", "gynecological", + "gynaecologists", "gynecologists", + "haematological", "hematological", + "haematologists", "hematologists", + "immobilisation", "immobilization", + "individualised", "individualized", + "individualises", "individualizes", + "industrialised", "industrialized", + "industrialises", "industrializes", + "liberalisation", "liberalization", + "monopolisation", "monopolization", + "naturalisation", "naturalization", + "neighbourhoods", "neighborhoods", + "neutralisation", "neutralization", + "organisational", "organizational", + "outmanoeuvring", "outmaneuvering", + "overemphasised", "overemphasized", + "overemphasises", "overemphasizes", + "paediatricians", "pediatricians", + "particularised", "particularized", + "particularises", "particularizes", + "pasteurisation", "pasteurization", + "pedestrianised", "pedestrianized", + "pedestrianises", "pedestrianizes", + "philosophising", "philosophizing", + "politicisation", "politicization", + "popularisation", "popularization", + "pressurisation", "pressurization", + "prioritisation", "prioritization", + "privatisations", "privatizations", + "propagandising", "propagandizing", + "psychoanalysed", "psychoanalyzed", + "psychoanalyses", "psychoanalyzes", + "regularisation", "regularization", + "reorganisation", "reorganization", + "revolutionised", "revolutionized", + "revolutionises", "revolutionizes", + "secularisation", "secularization", + "sensationalise", "sensationalize", + "sentimentalise", "sentimentalize", + "serialisations", "serializations", + "specialisation", "specialization", + "sterilisations", "sterilizations", + "stigmatisation", "stigmatization", + "transistorised", "transistorized", + "unrecognisable", "unrecognizable", + "visualisations", "visualizations", + "westernisation", "westernization", + "accessorising", "accessorizing", + "acclimatising", "acclimatizing", + "amortisations", "amortizations", + "amphitheatres", "amphitheaters", + "anaesthetised", "anesthetized", + "anaesthetises", "anesthetizes", + "anaesthetists", "anesthetists", + "archaeologist", "archeologist", + "backpedalling", "backpedaling", + "behaviourists", "behaviorists", + "breathalysers", "breathalyzers", + "breathalysing", "breathalyzing", + "callisthenics", "calisthenics", + "cannibalising", "cannibalizing", + "characterised", "characterized", + "characterises", "characterizes", + "circularising", "circularizing", + "clarinettists", "clarinetists", + "collectivised", "collectivized", + "collectivises", "collectivizes", + "commercialise", "commercialize", + "computerising", "computerizing", + "conceptualise", "conceptualize", + "contextualise", "contextualize", + "criminalising", "criminalizing", + "crystallising", "crystallizing", + "decentralised", "decentralized", + "decentralises", "decentralizes", + "decriminalise", "decriminalize", + "demilitarised", "demilitarized", + "demilitarises", "demilitarizes", + "democratising", "democratizing", + "denationalise", "denationalize", + "depersonalise", "depersonalize", + "desensitising", "desensitizing", + "destabilising", "destabilizing", + "disembowelled", "disemboweled", + "dishonourable", "dishonorable", + "dishonourably", "dishonorably", + "dramatisation", "dramatization", + "editorialised", "editorialized", + "editorialises", "editorializes", + "encyclopaedia", "encyclopedia", + "encyclopaedic", "encyclopedic", + "extemporising", "extemporizing", + "externalising", "externalizing", + "familiarising", "familiarizing", + "fertilisation", "fertilization", + "fictionalised", "fictionalized", + "fictionalises", "fictionalizes", + "formalisation", "formalization", + "fossilisation", "fossilization", + "globalisation", "globalization", + "gynaecologist", "gynecologist", + "haematologist", "hematologist", + "haemophiliacs", "hemophiliacs", + "haemorrhaging", "hemorrhaging", + "harmonisation", "harmonization", + "hospitalising", "hospitalizing", + "hypothesising", "hypothesizing", + "immortalising", "immortalizing", + "individualise", "individualize", + "industrialise", "industrialize", + "internalising", "internalizing", + "marginalising", "marginalizing", + "materialising", "materializing", + "mechanisation", "mechanization", + "memorialising", "memorializing", + "miniaturising", "miniaturizing", + "miscatalogued", "miscataloged", + "misdemeanours", "misdemeanors", + "multicoloured", "multicolored", + "nationalising", "nationalizing", + "neighbourhood", "neighborhood", + "normalisation", "normalization", + "organisations", "organizations", + "outmanoeuvred", "outmaneuvered", + "outmanoeuvres", "outmaneuvers", + "overemphasise", "overemphasize", + "paediatrician", "pediatrician", + "palaeontology", "paleontology", + "particularise", "particularize", + "passivisation", "passivization", + "patronisingly", "patronizingly", + "pedestrianise", "pedestrianize", + "personalising", "personalizing", + "philosophised", "philosophized", + "philosophises", "philosophizes", + "privatisation", "privatization", + "propagandised", "propagandized", + "propagandises", "propagandizes", + "proselytisers", "proselytizers", + "proselytising", "proselytizing", + "psychoanalyse", "psychoanalyze", + "pulverisation", "pulverization", + "rationalising", "rationalizing", + "reconnoitring", "reconnoitering", + "revolutionise", "revolutionize", + "romanticising", "romanticizing", + "serialisation", "serialization", + "socialisation", "socialization", + "stabilisation", "stabilization", + "standardising", "standardizing", + "sterilisation", "sterilization", + "subsidisation", "subsidization", + "synchronising", "synchronizing", + "systematising", "systematizing", + "tantalisingly", "tantalizingly", + "underutilised", "underutilized", + "victimisation", "victimization", + "visualisation", "visualization", + "vocalisations", "vocalizations", + "vulgarisation", "vulgarization", + "accessorised", "accessorized", + "accessorises", "accessorizes", + "acclimatised", "acclimatized", + "acclimatises", "acclimatizes", + "amortisation", "amortization", + "amphitheatre", "amphitheater", + "anaesthetics", "anesthetics", + "anaesthetise", "anesthetize", + "anaesthetist", "anesthetist", + "antagonising", "antagonizing", + "appetisingly", "appetizingly", + "backpedalled", "backpedaled", + "bastardising", "bastardizing", + "behaviourism", "behaviorism", + "behaviourist", "behaviorist", + "bowdlerising", "bowdlerizing", + "breathalysed", "breathalyzed", + "breathalyser", "breathalyzer", + "breathalyses", "breathalyzes", + "cannibalised", "cannibalized", + "cannibalises", "cannibalizes", + "capitalising", "capitalizing", + "caramelising", "caramelizing", + "categorising", "categorizing", + "centigrammes", "centigrams", + "centralising", "centralizing", + "centrepieces", "centerpieces", + "characterise", "characterize", + "circularised", "circularized", + "circularises", "circularizes", + "clarinettist", "clarinetist", + "collectivise", "collectivize", + "colonisation", "colonization", + "computerised", "computerized", + "computerises", "computerizes", + "criminalised", "criminalized", + "criminalises", "criminalizes", + "crystallised", "crystallized", + "crystallises", "crystallizes", + "decentralise", "decentralize", + "dehumanising", "dehumanizing", + "demilitarise", "demilitarize", + "demobilising", "demobilizing", + "democratised", "democratized", + "democratises", "democratizes", + "demoralising", "demoralizing", + "desensitised", "desensitized", + "desensitises", "desensitizes", + "destabilised", "destabilized", + "destabilises", "destabilizes", + "discolouring", "discoloring", + "dishonouring", "dishonoring", + "disorganised", "disorganized", + "editorialise", "editorialize", + "endeavouring", "endeavoring", + "equalisation", "equalization", + "evangelising", "evangelizing", + "extemporised", "extemporized", + "extemporises", "extemporizes", + "externalised", "externalized", + "externalises", "externalizes", + "familiarised", "familiarized", + "familiarises", "familiarizes", + "fictionalise", "fictionalize", + "finalisation", "finalization", + "fraternising", "fraternizing", + "generalising", "generalizing", + "haemophiliac", "hemophiliac", + "haemorrhaged", "hemorrhaged", + "haemorrhages", "hemorrhages", + "haemorrhoids", "hemorrhoids", + "homoeopathic", "homeopathic", + "homogenising", "homogenizing", + "hospitalised", "hospitalized", + "hospitalises", "hospitalizes", + "hypothesised", "hypothesized", + "hypothesises", "hypothesizes", + "idealisation", "idealization", + "immobilisers", "immobilizers", + "immobilising", "immobilizing", + "immortalised", "immortalized", + "immortalises", "immortalizes", + "immunisation", "immunization", + "initialising", "initializing", + "internalised", "internalized", + "internalises", "internalizes", + "jeopardising", "jeopardizing", + "legalisation", "legalization", + "legitimising", "legitimizing", + "liberalising", "liberalizing", + "manoeuvrable", "maneuverable", + "manoeuvrings", "maneuverings", + "marginalised", "marginalized", + "marginalises", "marginalizes", + "marvellously", "marvelously", + "materialised", "materialized", + "materialises", "materializes", + "maximisation", "maximization", + "memorialised", "memorialized", + "memorialises", "memorializes", + "metabolising", "metabolizing", + "militarising", "militarizing", + "milligrammes", "milligrams", + "miniaturised", "miniaturized", + "miniaturises", "miniaturizes", + "misbehaviour", "misbehavior", + "misdemeanour", "misdemeanor", + "mobilisation", "mobilization", + "moisturisers", "moisturizers", + "moisturising", "moisturizing", + "monopolising", "monopolizing", + "moustachioed", "mustachioed", + "nationalised", "nationalized", + "nationalises", "nationalizes", + "naturalising", "naturalizing", + "neighbouring", "neighboring", + "neutralising", "neutralizing", + "oesophaguses", "esophaguses", + "organisation", "organization", + "orthopaedics", "orthopedics", + "outmanoeuvre", "outmaneuver", + "palaeolithic", "paleolithic", + "pasteurising", "pasteurizing", + "personalised", "personalized", + "personalises", "personalizes", + "philosophise", "philosophize", + "plagiarising", "plagiarizing", + "ploughshares", "plowshares", + "polarisation", "polarization", + "politicising", "politicizing", + "popularising", "popularizing", + "pressurising", "pressurizing", + "prioritising", "prioritizing", + "propagandise", "propagandize", + "proselytised", "proselytized", + "proselytiser", "proselytizer", + "proselytises", "proselytizes", + "radicalising", "radicalizing", + "rationalised", "rationalized", + "rationalises", "rationalizes", + "realisations", "realizations", + "recognisable", "recognizable", + "recognisably", "recognizably", + "recognisance", "recognizance", + "reconnoitred", "reconnoitered", + "reconnoitres", "reconnoiters", + "regularising", "regularizing", + "reorganising", "reorganizing", + "revitalising", "revitalizing", + "rhapsodising", "rhapsodizing", + "romanticised", "romanticized", + "romanticises", "romanticizes", + "scandalising", "scandalizing", + "scrutinising", "scrutinizing", + "secularising", "secularizing", + "specialising", "specializing", + "squirrelling", "squirreling", + "standardised", "standardized", + "standardises", "standardizes", + "stigmatising", "stigmatizing", + "sympathisers", "sympathizers", + "sympathising", "sympathizing", + "synchronised", "synchronized", + "synchronises", "synchronizes", + "synthesisers", "synthesizers", + "synthesising", "synthesizing", + "systematised", "systematized", + "systematises", "systematizes", + "technicolour", "technicolor", + "theatregoers", "theatergoers", + "traumatising", "traumatizing", + "trivialising", "trivializing", + "unauthorised", "unauthorized", + "uncatalogued", "uncataloged", + "unfavourable", "unfavorable", + "unfavourably", "unfavorably", + "unionisation", "unionization", + "unrecognised", "unrecognized", + "untrammelled", "untrammeled", + "urbanisation", "urbanization", + "vaporisation", "vaporization", + "vocalisation", "vocalization", + "watercolours", "watercolors", + "westernising", "westernizing", + "accessorise", "accessorize", + "acclimatise", "acclimatize", + "agonisingly", "agonizingly", + "amortisable", "amortizable", + "anaesthesia", "anesthesia", + "anaesthetic", "anesthetic", + "anglicising", "anglicizing", + "antagonised", "antagonized", + "antagonises", "antagonizes", + "apologising", "apologizing", + "archaeology", "archeology", + "authorising", "authorizing", + "bastardised", "bastardized", + "bastardises", "bastardizes", + "bedevilling", "bedeviling", + "behavioural", "behavioral", + "belabouring", "belaboring", + "bowdlerised", "bowdlerized", + "bowdlerises", "bowdlerizes", + "breathalyse", "breathalyze", + "brutalising", "brutalizing", + "cannibalise", "cannibalize", + "capitalised", "capitalized", + "capitalises", "capitalizes", + "caramelised", "caramelized", + "caramelises", "caramelizes", + "carbonising", "carbonizing", + "cataloguing", "cataloging", + "categorised", "categorized", + "categorises", "categorizes", + "cauterising", "cauterizing", + "centigramme", "centigram", + "centilitres", "centiliters", + "centimetres", "centimeters", + "centralised", "centralized", + "centralises", "centralizes", + "centrefolds", "centerfolds", + "centrepiece", "centerpiece", + "channelling", "channeling", + "chequebooks", "checkbooks", + "circularise", "circularize", + "colourfully", "colorfully", + "colourizing", "colorizing", + "computerise", "computerize", + "councillors", "councilors", + "counselling", "counseling", + "counsellors", "counselors", + "criminalise", "criminalize", + "criticising", "criticizing", + "crystallise", "crystallize", + "customising", "customizing", + "defenceless", "defenseless", + "dehumanised", "dehumanized", + "dehumanises", "dehumanizes", + "demobilised", "demobilized", + "demobilises", "demobilizes", + "democratise", "democratize", + "demoralised", "demoralized", + "demoralises", "demoralizes", + "deodorising", "deodorizing", + "desensitise", "desensitize", + "destabilise", "destabilize", + "discoloured", "discolored", + "dishevelled", "disheveled", + "dishonoured", "dishonored", + "dramatising", "dramatizing", + "economising", "economizing", + "empathising", "empathizing", + "emphasising", "emphasizing", + "endeavoured", "endeavored", + "epitomising", "epitomizing", + "evangelised", "evangelized", + "evangelises", "evangelizes", + "extemporise", "extemporize", + "externalise", "externalize", + "factorising", "factorizing", + "familiarise", "familiarize", + "fantasising", "fantasizing", + "favouritism", "favoritism", + "fertilisers", "fertilizers", + "fertilising", "fertilizing", + "flavourings", "flavorings", + "flavourless", "flavorless", + "flavoursome", "flavorsome", + "formalising", "formalizing", + "fossilising", "fossilizing", + "fraternised", "fraternized", + "fraternises", "fraternizes", + "galvanising", "galvanizing", + "generalised", "generalized", + "generalises", "generalizes", + "ghettoising", "ghettoizing", + "globalising", "globalizing", + "gruellingly", "gruelingly", + "gynaecology", "gynecology", + "haematology", "hematology", + "haemoglobin", "hemoglobin", + "haemophilia", "hemophilia", + "haemorrhage", "hemorrhage", + "harmonising", "harmonizing", + "homoeopaths", "homeopaths", + "homoeopathy", "homeopathy", + "homogenised", "homogenized", + "homogenises", "homogenizes", + "hospitalise", "hospitalize", + "hybridising", "hybridizing", + "hypnotising", "hypnotizing", + "hypothesise", "hypothesize", + "immobilised", "immobilized", + "immobiliser", "immobilizer", + "immobilises", "immobilizes", + "immortalise", "immortalize", + "impanelling", "impaneling", + "imperilling", "imperiling", + "initialised", "initialized", + "initialises", "initializes", + "initialling", "initialing", + "instalments", "installments", + "internalise", "internalize", + "italicising", "italicizing", + "jeopardised", "jeopardized", + "jeopardises", "jeopardizes", + "kilogrammes", "kilograms", + "legitimised", "legitimized", + "legitimises", "legitimizes", + "liberalised", "liberalized", + "liberalises", "liberalizes", + "lionisation", "lionization", + "liquidisers", "liquidizers", + "liquidising", "liquidizing", + "magnetising", "magnetizing", + "manoeuvring", "maneuvering", + "marginalise", "marginalize", + "marshalling", "marshaling", + "materialise", "materialize", + "mechanising", "mechanizing", + "memorialise", "memorialize", + "mesmerising", "mesmerizing", + "metabolised", "metabolized", + "metabolises", "metabolizes", + "micrometres", "micrometers", + "militarised", "militarized", + "militarises", "militarizes", + "milligramme", "milligram", + "millilitres", "milliliters", + "millimetres", "millimeters", + "miniaturise", "miniaturize", + "modernising", "modernizing", + "moisturised", "moisturized", + "moisturiser", "moisturizer", + "moisturises", "moisturizes", + "monopolised", "monopolized", + "monopolises", "monopolizes", + "nationalise", "nationalize", + "naturalised", "naturalized", + "naturalises", "naturalizes", + "neighbourly", "neighborly", + "neutralised", "neutralized", + "neutralises", "neutralizes", + "normalising", "normalizing", + "orthopaedic", "orthopedic", + "ostracising", "ostracizing", + "oxidisation", "oxidization", + "paediatrics", "pediatrics", + "paedophiles", "pedophiles", + "paedophilia", "pedophilia", + "passivising", "passivizing", + "pasteurised", "pasteurized", + "pasteurises", "pasteurizes", + "patronising", "patronizing", + "personalise", "personalize", + "plagiarised", "plagiarized", + "plagiarises", "plagiarizes", + "ploughshare", "plowshare", + "politicised", "politicized", + "politicises", "politicizes", + "popularised", "popularized", + "popularises", "popularizes", + "praesidiums", "presidiums", + "pressurised", "pressurized", + "pressurises", "pressurizes", + "prioritised", "prioritized", + "prioritises", "prioritizes", + "privatising", "privatizing", + "proselytise", "proselytize", + "publicising", "publicizing", + "pulverising", "pulverizing", + "quarrelling", "quarreling", + "radicalised", "radicalized", + "radicalises", "radicalizes", + "randomising", "randomizing", + "rationalise", "rationalize", + "realisation", "realization", + "recognising", "recognizing", + "reconnoitre", "reconnoiter", + "regularised", "regularized", + "regularises", "regularizes", + "remodelling", "remodeling", + "reorganised", "reorganized", + "reorganises", "reorganizes", + "revitalised", "revitalized", + "revitalises", "revitalizes", + "rhapsodised", "rhapsodized", + "rhapsodises", "rhapsodizes", + "romanticise", "romanticize", + "scandalised", "scandalized", + "scandalises", "scandalizes", + "sceptically", "skeptically", + "scrutinised", "scrutinized", + "scrutinises", "scrutinizes", + "secularised", "secularized", + "secularises", "secularizes", + "sensitising", "sensitizing", + "serialising", "serializing", + "sermonising", "sermonizing", + "shrivelling", "shriveling", + "signalising", "signalizing", + "snorkelling", "snorkeling", + "snowploughs", "snowplow", + "socialising", "socializing", + "solemnising", "solemnizing", + "specialised", "specialized", + "specialises", "specializes", + "squirrelled", "squirreled", + "stabilisers", "stabilizers", + "stabilising", "stabilizing", + "standardise", "standardize", + "stencilling", "stenciling", + "sterilisers", "sterilizers", + "sterilising", "sterilizing", + "stigmatised", "stigmatized", + "stigmatises", "stigmatizes", + "subsidisers", "subsidizers", + "subsidising", "subsidizing", + "summarising", "summarizing", + "symbolising", "symbolizing", + "sympathised", "sympathized", + "sympathiser", "sympathizer", + "sympathises", "sympathizes", + "synchronise", "synchronize", + "synthesised", "synthesized", + "synthesiser", "synthesizer", + "synthesises", "synthesizes", + "systematise", "systematize", + "tantalising", "tantalizing", + "temporising", "temporizing", + "tenderising", "tenderizing", + "terrorising", "terrorizing", + "theatregoer", "theatergoer", + "traumatised", "traumatized", + "traumatises", "traumatizes", + "trivialised", "trivialized", + "trivialises", "trivializes", + "tyrannising", "tyrannizing", + "uncivilised", "uncivilized", + "unorganised", "unorganized", + "unravelling", "unraveling", + "utilisation", "utilization", + "vandalising", "vandalizing", + "verbalising", "verbalizing", + "victimising", "victimizing", + "visualising", "visualizing", + "vulgarising", "vulgarizing", + "watercolour", "watercolor", + "westernised", "westernized", + "westernises", "westernizes", + "worshipping", "worshiping", + "aeroplanes", "airplanes", + "amortising", "amortizing", + "anglicised", "anglicized", + "anglicises", "anglicizes", + "annualised", "annualized", + "antagonise", "antagonize", + "apologised", "apologized", + "apologises", "apologizes", + "appetisers", "appetizers", + "appetising", "appetizing", + "authorised", "authorized", + "authorises", "authorizes", + "bannisters", "banisters", + "bastardise", "bastardize", + "bedevilled", "bedeviled", + "behaviours", "behaviors", + "bejewelled", "bejeweled", + "belaboured", "belabored", + "bowdlerise", "bowdlerize", + "brutalised", "brutalized", + "brutalises", "brutalizes", + "canalising", "canalizing", + "cancelling", "canceling", + "canonising", "canonizing", + "capitalise", "capitalize", + "caramelise", "caramelize", + "carbonised", "carbonized", + "carbonises", "carbonizes", + "catalogued", "cataloged", + "catalogues", "catalogs", + "catalysing", "catalyzing", + "categorise", "categorize", + "cauterised", "cauterized", + "cauterises", "cauterizes", + "centilitre", "centiliter", + "centimetre", "centimeter", + "centralise", "centralize", + "centrefold", "centerfold", + "channelled", "channeled", + "chequebook", "checkbook", + "chiselling", "chiseling", + "civilising", "civilizing", + "clamouring", "clamoring", + "colonisers", "colonizers", + "colonising", "colonizing", + "colourants", "colorants", + "colourized", "colorized", + "colourizes", "colorizes", + "colourless", "colorless", + "connexions", "connections", + "councillor", "councilor", + "counselled", "counseled", + "counsellor", "counselor", + "criticised", "criticized", + "criticises", "criticizes", + "cudgelling", "cudgeling", + "customised", "customized", + "customises", "customizes", + "dehumanise", "dehumanize", + "demobilise", "demobilize", + "demonising", "demonizing", + "demoralise", "demoralize", + "deodorised", "deodorized", + "deodorises", "deodorizes", + "deputising", "deputizing", + "digitising", "digitizing", + "discolours", "discolors", + "dishonours", "dishonors", + "dramatised", "dramatized", + "dramatises", "dramatizes", + "drivelling", "driveling", + "economised", "economized", + "economises", "economizes", + "empathised", "empathized", + "empathises", "empathizes", + "emphasised", "emphasized", + "emphasises", "emphasizes", + "enamelling", "enameling", + "endeavours", "endeavors", + "energising", "energizing", + "epaulettes", "epaulets", + "epicentres", "epicenters", + "epitomised", "epitomized", + "epitomises", "epitomizes", + "equalisers", "equalizers", + "equalising", "equalizing", + "eulogising", "eulogizing", + "evangelise", "evangelize", + "factorised", "factorized", + "factorises", "factorizes", + "fantasised", "fantasized", + "fantasises", "fantasizes", + "favourable", "favorable", + "favourably", "favorably", + "favourites", "favorites", + "feminising", "feminizing", + "fertilised", "fertilized", + "fertiliser", "fertilizer", + "fertilises", "fertilizes", + "fibreglass", "fiberglass", + "finalising", "finalizing", + "flavouring", "flavoring", + "formalised", "formalized", + "formalises", "formalizes", + "fossilised", "fossilized", + "fossilises", "fossilizes", + "fraternise", "fraternize", + "fulfilment", "fulfillment", + "funnelling", "funneling", + "galvanised", "galvanized", + "galvanises", "galvanizes", + "gambolling", "gamboling", + "gaolbreaks", "jailbreaks", + "generalise", "generalize", + "ghettoised", "ghettoized", + "ghettoises", "ghettoizes", + "globalised", "globalized", + "globalises", "globalizes", + "gonorrhoea", "gonorrhea", + "grovelling", "groveling", + "harbouring", "harboring", + "harmonised", "harmonized", + "harmonises", "harmonizes", + "homoeopath", "homeopath", + "homogenise", "homogenize", + "honourable", "honorable", + "honourably", "honorably", + "humanising", "humanizing", + "humourless", "humorless", + "hybridised", "hybridized", + "hybridises", "hybridizes", + "hypnotised", "hypnotized", + "hypnotises", "hypnotizes", + "idealising", "idealizing", + "immobilise", "immobilize", + "immunising", "immunizing", + "impanelled", "impaneled", + "imperilled", "imperiled", + "inflexions", "inflections", + "initialise", "initialize", + "initialled", "initialed", + "instalment", "installment", + "ionisation", "ionization", + "italicised", "italicized", + "italicises", "italicizes", + "jeopardise", "jeopardize", + "kilogramme", "kilogram", + "kilometres", "kilometers", + "lacklustre", "lackluster", + "legalising", "legalizing", + "legitimise", "legitimize", + "liberalise", "liberalize", + "liquidised", "liquidized", + "liquidiser", "liquidizer", + "liquidises", "liquidizes", + "localising", "localizing", + "magnetised", "magnetized", + "magnetises", "magnetizes", + "manoeuvred", "maneuvered", + "manoeuvres", "maneuvers", + "marshalled", "marshaled", + "marvelling", "marveling", + "marvellous", "marvelous", + "maximising", "maximizing", + "mechanised", "mechanized", + "mechanises", "mechanizes", + "memorising", "memorizing", + "mesmerised", "mesmerized", + "mesmerises", "mesmerizes", + "metabolise", "metabolize", + "micrometre", "micrometer", + "militarise", "militarize", + "millilitre", "milliliter", + "millimetre", "millimeter", + "minimising", "minimizing", + "mobilising", "mobilizing", + "modernised", "modernized", + "modernises", "modernizes", + "moisturise", "moisturize", + "monopolise", "monopolize", + "moralising", "moralizing", + "mouldering", "moldering", + "moustached", "mustached", + "moustaches", "mustaches", + "naturalise", "naturalize", + "neighbours", "neighbors", + "neutralise", "neutralize", + "normalised", "normalized", + "normalises", "normalizes", + "oesophagus", "esophagus", + "optimising", "optimizing", + "organisers", "organizers", + "organising", "organizing", + "ostracised", "ostracized", + "ostracises", "ostracizes", + "paederasts", "pederasts", + "paediatric", "pediatric", + "paedophile", "pedophile", + "panellists", "panelists", + "paralysing", "paralyzing", + "parcelling", "parceling", + "passivised", "passivized", + "passivises", "passivizes", + "pasteurise", "pasteurize", + "patronised", "patronized", + "patronises", "patronizes", + "penalising", "penalizing", + "pencilling", "penciling", + "plagiarise", "plagiarize", + "polarising", "polarizing", + "politicise", "politicize", + "popularise", "popularize", + "practising", "practicing", + "praesidium", "presidium", + "pressurise", "pressurize", + "prioritise", "prioritize", + "privatised", "privatized", + "privatises", "privatizes", + "programmes", "programs", + "publicised", "publicized", + "publicises", "publicizes", + "pulverised", "pulverized", + "pulverises", "pulverizes", + "pummelling", "pummeled", + "quarrelled", "quarreled", + "radicalise", "radicalize", + "randomised", "randomized", + "randomises", "randomizes", + "realisable", "realizable", + "recognised", "recognized", + "recognises", "recognizes", + "refuelling", "refueling", + "regularise", "regularize", + "remodelled", "remodeled", + "remoulding", "remolding", + "reorganise", "reorganize", + "revitalise", "revitalize", + "rhapsodise", "rhapsodize", + "ritualised", "ritualized", + "sanitising", "sanitizing", + "satirising", "satirizing", + "scandalise", "scandalize", + "scepticism", "skepticism", + "scrutinise", "scrutinize", + "secularise", "secularize", + "sensitised", "sensitized", + "sensitises", "sensitizes", + "sepulchres", "sepulchers", + "serialised", "serialized", + "serialises", "serializes", + "sermonised", "sermonized", + "sermonises", "sermonizes", + "shovelling", "shoveling", + "shrivelled", "shriveled", + "signalised", "signalized", + "signalises", "signalizes", + "signalling", "signaling", + "snivelling", "sniveling", + "snorkelled", "snorkeled", + "snowplough", "snowplow", + "socialised", "socialized", + "socialises", "socializes", + "sodomising", "sodomizing", + "solemnised", "solemnized", + "solemnises", "solemnizes", + "specialise", "specialize", + "spiralling", "spiraling", + "splendours", "splendors", + "stabilised", "stabilized", + "stabiliser", "stabilizer", + "stabilises", "stabilizes", + "stencilled", "stenciled", + "sterilised", "sterilized", + "steriliser", "sterilizer", + "sterilises", "sterilizes", + "stigmatise", "stigmatize", + "subsidised", "subsidized", + "subsidiser", "subsidizer", + "subsidises", "subsidizes", + "succouring", "succoring", + "sulphurous", "sulfurous", + "summarised", "summarized", + "summarises", "summarizes", + "swivelling", "swiveling", + "symbolised", "symbolized", + "symbolises", "symbolizes", + "sympathise", "sympathize", + "synthesise", "synthesize", + "tantalised", "tantalized", + "tantalises", "tantalizes", + "temporised", "temporized", + "temporises", "temporizes", + "tenderised", "tenderized", + "tenderises", "tenderizes", + "terrorised", "terrorized", + "terrorises", "terrorizes", + "theorising", "theorizing", + "traumatise", "traumatize", + "travellers", "travelers", + "travelling", "traveling", + "tricolours", "tricolors", + "trivialise", "trivialize", + "tunnelling", "tunneling", + "tyrannised", "tyrannized", + "tyrannises", "tyrannizes", + "unequalled", "unequaled", + "unionising", "unionizing", + "unravelled", "unraveled", + "unrivalled", "unrivaled", + "urbanising", "urbanizing", + "utilisable", "utilizable", + "vandalised", "vandalized", + "vandalises", "vandalizes", + "vaporising", "vaporizing", + "verbalised", "verbalized", + "verbalises", "verbalizes", + "victimised", "victimized", + "victimises", "victimizes", + "visualised", "visualized", + "visualises", "visualizes", + "vocalising", "vocalizing", + "vulcanised", "vulcanized", + "vulgarised", "vulgarized", + "vulgarises", "vulgarizes", + "weaselling", "weaseling", + "westernise", "westernize", + "womanisers", "womanizers", + "womanising", "womanizing", + "worshipped", "worshiped", + "worshipper", "worshiper", + "aeroplane", "airplane", + "aetiology", "etiology", + "agonising", "agonizing", + "almanacks", "almanacs", + "aluminium", "aluminum", + "amortised", "amortized", + "amortises", "amortizes", + "analogues", "analogs", + "analysing", "analyzing", + "anglicise", "anglicize", + "apologise", "apologize", + "appetiser", "appetizer", + "armourers", "armorers", + "armouries", "armories", + "artefacts", "artifacts", + "authorise", "authorize", + "baptising", "baptizing", + "behaviour", "behavior", + "belabours", "belabors", + "brutalise", "brutalize", + "callipers", "calipers", + "canalised", "canalized", + "canalises", "canalizes", + "cancelled", "canceled", + "canonised", "canonized", + "canonises", "canonizes", + "carbonise", "carbonize", + "carolling", "caroling", + "catalogue", "catalog", + "catalysed", "catalyzed", + "catalyses", "catalyzes", + "cauterise", "cauterize", + "cavilling", "caviling", + "chequered", "checkered", + "chiselled", "chiseled", + "civilised", "civilized", + "civilises", "civilizes", + "clamoured", "clamored", + "colonised", "colonized", + "coloniser", "colonizer", + "colonises", "colonizes", + "colourant", "colorant", + "coloureds", "coloreds", + "colourful", "colorful", + "colouring", "coloring", + "colourize", "colorize", + "connexion", "connection", + "criticise", "criticize", + "cruellest", "cruelest", + "cudgelled", "cudgeled", + "customise", "customize", + "demeanour", "demeanor", + "demonised", "demonized", + "demonises", "demonizes", + "deodorise", "deodorize", + "deputised", "deputized", + "deputises", "deputizes", + "dialogues", "dialogs", + "diarrhoea", "diarrhea", + "digitised", "digitized", + "digitises", "digitizes", + "discolour", "discolor", + "disfavour", "disfavor", + "dishonour", "dishonor", + "dramatise", "dramatize", + "drivelled", "driveled", + "economise", "economize", + "empathise", "empathize", + "emphasise", "emphasize", + "enamelled", "enameled", + "enamoured", "enamored", + "endeavour", "endeavor", + "energised", "energized", + "energises", "energizes", + "epaulette", "epaulet", + "epicentre", "epicenter", + "epitomise", "epitomize", + "equalised", "equalized", + "equaliser", "equalizer", + "equalises", "equalizes", + "eulogised", "eulogized", + "eulogises", "eulogizes", + "factorise", "factorize", + "fantasise", "fantasize", + "favouring", "favoring", + "favourite", "favorite", + "feminised", "feminized", + "feminises", "feminizes", + "fertilise", "fertilize", + "finalised", "finalized", + "finalises", "finalizes", + "flautists", "flutists", + "flavoured", "flavored", + "formalise", "formalize", + "fossilise", "fossilize", + "funnelled", "funneled", + "galvanise", "galvanize", + "gambolled", "gamboled", + "gaolbirds", "jailbirds", + "gaolbreak", "jailbreak", + "ghettoise", "ghettoize", + "globalise", "globalize", + "gravelled", "graveled", + "grovelled", "groveled", + "gruelling", "grueling", + "harboured", "harbored", + "harmonise", "harmonize", + "honouring", "honoring", + "humanised", "humanized", + "humanises", "humanizes", + "humouring", "humoring", + "hybridise", "hybridize", + "hypnotise", "hypnotize", + "idealised", "idealized", + "idealises", "idealizes", + "idolising", "idolizing", + "immunised", "immunized", + "immunises", "immunizes", + "inflexion", "inflection", + "italicise", "italicize", + "itemising", "itemizing", + "jewellers", "jewelers", + "jewellery", "jewelry", + "kilometre", "kilometer", + "labelling", "labeling", + "labourers", "laborers", + "labouring", "laboring", + "legalised", "legalized", + "legalises", "legalizes", + "leukaemia", "leukemia", + "levellers", "levelers", + "levelling", "leveling", + "libelling", "libeling", + "libellous", "libelous", + "licencing", "licensing", + "lionising", "lionizing", + "liquidise", "liquidize", + "localised", "localized", + "localises", "localizes", + "magnetise", "magnetize", + "manoeuvre", "maneuver", + "marvelled", "marveled", + "maximised", "maximized", + "maximises", "maximizes", + "mechanise", "mechanize", + "mediaeval", "medieval", + "memorised", "memorized", + "memorises", "memorizes", + "mesmerise", "mesmerize", + "minimised", "minimized", + "minimises", "minimizes", + "mobilised", "mobilized", + "mobilises", "mobilizes", + "modellers", "modelers", + "modelling", "modeling", + "modernise", "modernize", + "moralised", "moralized", + "moralises", "moralizes", + "motorised", "motorized", + "mouldered", "moldered", + "mouldiest", "moldiest", + "mouldings", "moldings", + "moustache", "mustache", + "neighbour", "neighbor", + "normalise", "normalize", + "odourless", "odorless", + "oestrogen", "estrogen", + "optimised", "optimized", + "optimises", "optimizes", + "organised", "organized", + "organiser", "organizer", + "organises", "organizes", + "ostracise", "ostracize", + "oxidising", "oxidizing", + "paederast", "pederast", + "panelling", "paneling", + "panellist", "panelist", + "paralysed", "paralyzed", + "paralyses", "paralyzes", + "parcelled", "parceled", + "passivise", "passivize", + "patronise", "patronize", + "pedalling", "pedaling", + "penalised", "penalized", + "penalises", "penalizes", + "pencilled", "penciled", + "ploughing", "plowing", + "ploughman", "plowman", + "ploughmen", "plowmen", + "polarised", "polarized", + "polarises", "polarizes", + "practised", "practiced", + "practises", "practices", + "pretences", "pretenses", + "primaeval", "primeval", + "privatise", "privatize", + "programme", "program", + "publicise", "publicize", + "pulverise", "pulverize", + "pummelled", "pummel", + "randomise", "randomize", + "ravelling", "raveling", + "realising", "realizing", + "recognise", "recognize", + "refuelled", "refueled", + "remoulded", "remolded", + "revellers", "revelers", + "revelling", "reveling", + "rivalling", "rivaling", + "saltpetre", "saltpeter", + "sanitised", "sanitized", + "sanitises", "sanitizes", + "satirised", "satirized", + "satirises", "satirizes", + "savouries", "savories", + "savouring", "savoring", + "sceptical", "skeptical", + "sensitise", "sensitize", + "sepulchre", "sepulcher", + "serialise", "serialize", + "sermonise", "sermonize", + "shovelled", "shoveled", + "signalise", "signalize", + "signalled", "signaled", + "snivelled", "sniveled", + "socialise", "socialize", + "sodomised", "sodomized", + "sodomises", "sodomizes", + "solemnise", "solemnize", + "spiralled", "spiraled", + "splendour", "splendor", + "stabilise", "stabilize", + "sterilise", "sterilize", + "subsidise", "subsidize", + "succoured", "succored", + "sulphates", "sulfates", + "sulphides", "sulfides", + "summarise", "summarize", + "swivelled", "swiveled", + "symbolise", "symbolize", + "syphoning", "siphoning", + "tantalise", "tantalize", + "tasselled", "tasseled", + "temporise", "temporize", + "tenderise", "tenderize", + "terrorise", "terrorize", + "theorised", "theorized", + "theorises", "theorizes", + "towelling", "toweling", + "travelled", "traveled", + "traveller", "traveler", + "trialling", "trialing", + "tricolour", "tricolor", + "tunnelled", "tunneled", + "tyrannise", "tyrannize", + "unionised", "unionized", + "unionises", "unionizes", + "unsavoury", "unsavory", + "urbanised", "urbanized", + "urbanises", "urbanizes", + "utilising", "utilizing", + "vandalise", "vandalize", + "vaporised", "vaporized", + "vaporises", "vaporizes", + "verbalise", "verbalize", + "victimise", "victimize", + "visualise", "visualize", + "vocalised", "vocalized", + "vocalises", "vocalizes", + "vulgarise", "vulgarize", + "weaselled", "weaseled", + "womanised", "womanized", + "womaniser", "womanizer", + "womanises", "womanizes", + "yodelling", "yodeling", + "yoghourts", "yogurts", + "agonised", "agonized", + "agonises", "agonizes", + "almanack", "almanac", + "amortise", "amortize", + "analogue", "analog", + "analysed", "analyzed", + "analyses", "analyzes", + "armoured", "armored", + "armourer", "armorer", + "artefact", "artifact", + "baptised", "baptized", + "baptises", "baptizes", + "baulking", "balking", + "belabour", "belabor", + "bevelled", "beveled", + "calibres", "calibers", + "calliper", "caliper", + "canalise", "canalize", + "canonise", "canonize", + "carolled", "caroled", + "catalyse", "catalyze", + "cavilled", "caviled", + "civilise", "civilize", + "clamours", "clamors", + "clangour", "clangor", + "colonise", "colonize", + "coloured", "colored", + "cosiness", "coziness", + "crueller", "crueler", + "defences", "defenses", + "demonise", "demonize", + "deputise", "deputize", + "dialling", "dialing", + "dialogue", "dialog", + "digitise", "digitize", + "draughty", "drafty", + "duelling", "dueling", + "energise", "energize", + "enthrals", "enthralls", + "equalise", "equalize", + "eulogise", "eulogize", + "favoured", "favored", + "feminise", "feminize", + "finalise", "finalize", + "flautist", "flutist", + "flavours", "flavors", + "foetuses", "fetuses", + "fuelling", "fueling", + "gaolbird", "jailbird", + "gryphons", "griffins", + "harbours", "harbors", + "honoured", "honored", + "humanise", "humanize", + "humoured", "humored", + "idealise", "idealize", + "idolised", "idolized", + "idolises", "idolizes", + "immunise", "immunize", + "ionisers", "ionizers", + "ionising", "ionizing", + "itemised", "itemized", + "itemises", "itemizes", + "jewelled", "jeweled", + "jeweller", "jeweler", + "labelled", "labeled", + "laboured", "labored", + "labourer", "laborer", + "legalise", "legalize", + "levelled", "leveled", + "leveller", "leveler", + "libelled", "libeled", + "licenced", "licensed", + "licences", "licenses", + "lionised", "lionized", + "lionises", "lionizes", + "localise", "localize", + "maximise", "maximize", + "memorise", "memorize", + "minimise", "minimize", + "misspelt", "misspelled", + "mobilise", "mobilize", + "modelled", "modeled", + "modeller", "modeler", + "moralise", "moralize", + "moulders", "molders", + "mouldier", "moldier", + "moulding", "molding", + "moulting", "molting", + "offences", "offenses", + "optimise", "optimize", + "organise", "organize", + "oxidised", "oxidized", + "oxidises", "oxidizes", + "panelled", "paneled", + "paralyse", "paralyze", + "parlours", "parlors", + "pedalled", "pedaled", + "penalise", "penalize", + "philtres", "filters", + "ploughed", "plowed", + "polarise", "polarize", + "practise", "practice", + "pretence", "pretense", + "ravelled", "raveled", + "realised", "realized", + "realises", "realizes", + "remoulds", "remolds", + "revelled", "reveled", + "reveller", "reveler", + "rivalled", "rivaled", + "rumoured", "rumored", + "sanitise", "sanitize", + "satirise", "satirize", + "saviours", "saviors", + "savoured", "savored", + "sceptics", "skeptics", + "sceptres", "scepters", + "sodomise", "sodomize", + "spectres", "specters", + "succours", "succors", + "sulphate", "sulfate", + "sulphide", "sulfide", + "syphoned", "siphoned", + "theatres", "theaters", + "theorise", "theorize", + "towelled", "toweled", + "toxaemia", "toxemia", + "trialled", "trialed", + "unionise", "unionize", + "urbanise", "urbanize", + "utilised", "utilized", + "utilises", "utilizes", + "vaporise", "vaporize", + "vocalise", "vocalize", + "womanise", "womanize", + "yodelled", "yodeled", + "yoghourt", "yogurt", + "yoghurts", "yogurts", + "agonise", "agonize", + "anaemia", "anemia", + "anaemic", "anemic", + "analyse", "analyze", + "arbours", "arbors", + "armoury", "armory", + "baptise", "baptize", + "baulked", "balked", + "behoved", "behooved", + "behoves", "behooves", + "calibre", "caliber", + "candour", "candor", + "centred", "centered", + "centres", "centers", + "cheques", "checks", + "clamour", "clamor", + "colours", "colors", + "cosiest", "coziest", + "defence", "defense", + "dialled", "dialed", + "distils", "distills", + "duelled", "dueled", + "enthral", "enthrall", + "favours", "favors", + "fervour", "fervor", + "flavour", "flavor", + "fuelled", "fueled", + "fulfils", "fulfills", + "gaolers", "jailers", + "gaoling", "jailing", + "gipsies", "gypsies", + "glueing", "gluing", + "goitres", "goiters", + "grammes", "grams", + "groynes", "groins", + "gryphon", "griffin", + "harbour", "harbor", + "honours", "honors", + "humours", "humors", + "idolise", "idolize", + "instals", "installs", + "instils", "instills", + "ionised", "ionized", + "ioniser", "ionizer", + "ionises", "ionizes", + "itemise", "itemize", + "labours", "labors", + "licence", "license", + "lionise", "lionize", + "louvred", "louvered", + "louvres", "louvers", + "moulded", "molded", + "moulder", "molder", + "moulted", "molted", + "offence", "offense", + "oxidise", "oxidize", + "parlour", "parlor", + "philtre", "filter", + "ploughs", "plows", + "pyjamas", "pajamas", + "rancour", "rancor", + "realise", "realize", + "remould", "remold", + "rigours", "rigors", + "rumours", "rumors", + "saviour", "savior", + "savours", "savors", + "savoury", "savory", + "sceptic", "skeptic", + "sceptre", "scepter", + "spectre", "specter", + "storeys", "stories", + "succour", "succor", + "sulphur", "sulfur", + "syphons", "siphons", + "theatre", "theater", + "tumours", "tumors", + "utilise", "utilize", + "vapours", "vapors", + "waggons", "wagons", + "yoghurt", "yogurt", + "ageing", "aging", + "appals", "appalls", + "arbour", "arbor", + "ardour", "ardor", + "baulks", "balks", + "behove", "behoove", + "centre", "center", + "cheque", "check", + "chilli", "chili", + "colour", "color", + "cosier", "cozier", + "cosies", "cozies", + "cosily", "cozily", + "distil", "distill", + "edoema", "edema", + "enrols", "enrolls", + "faecal", "fecal", + "faeces", "feces", + "favour", "favor", + "fibres", "fibers", + "foetal", "fetal", + "foetid", "fetid", + "foetus", "fetus", + "fulfil", "fulfill", + "gaoled", "jailed", + "gaoler", "jailer", + "goitre", "goiter", + "gramme", "gram", + "groyne", "groin", + "honour", "honor", + "humour", "humor", + "instal", "install", + "instil", "instill", + "ionise", "ionize", + "labour", "labor", + "litres", "liters", + "lustre", "luster", + "meagre", "meager", + "metres", "meters", + "mitres", "miters", + "moulds", "molds", + "mouldy", "moldy", + "moults", "molts", + "odours", "odors", + "plough", "plow", + "pyjama", "pajama", + "rigour", "rigor", + "rumour", "rumor", + "savour", "savor", + "storey", "story", + "syphon", "siphon", + "tumour", "tumor", + "valour", "valor", + "vapour", "vapor", + "vigour", "vigor", + "waggon", "wagon", + "appal", "appall", + "baulk", "balk", + "enrol", "enroll", + "fibre", "fiber", + "gaols", "jails", + "litre", "liter", + "metre", "meter", + "mitre", "miter", + "mould", "mold", + "moult", "molt", + "odour", "odor", + "tyres", "tires", + "cosy", "cozy", + "gaol", "jail", + "tyre", "tire", +} + +// DictBritish converts US spellings to UK spellings +var DictBritish = []string{ + "institutionalization", "institutionalisation", + "internationalization", "internationalisation", + "professionalization", "professionalisation", + "compartmentalizing", "compartmentalising", + "institutionalizing", "institutionalising", + "internationalizing", "internationalising", + "compartmentalized", "compartmentalised", + "compartmentalizes", "compartmentalises", + "decriminalization", "decriminalisation", + "denationalization", "denationalisation", + "fictionalizations", "fictionalisations", + "institutionalized", "institutionalised", + "institutionalizes", "institutionalises", + "intellectualizing", "intellectualising", + "internationalized", "internationalised", + "internationalizes", "internationalises", + "pedestrianization", "pedestrianisation", + "professionalizing", "professionalising", + "compartmentalize", "compartmentalise", + "decentralization", "decentralisation", + "demilitarization", "demilitarisation", + "externalizations", "externalisations", + "fictionalization", "fictionalisation", + "institutionalize", "institutionalise", + "intellectualized", "intellectualised", + "intellectualizes", "intellectualises", + "internationalize", "internationalise", + "nationalizations", "nationalisations", + "professionalized", "professionalised", + "professionalizes", "professionalises", + "rationalizations", "rationalisations", + "sensationalizing", "sensationalising", + "sentimentalizing", "sentimentalising", + "acclimatization", "acclimatisation", + "commercializing", "commercialising", + "conceptualizing", "conceptualising", + "contextualizing", "contextualising", + "crystallization", "crystallisation", + "decriminalizing", "decriminalising", + "democratization", "democratisation", + "denationalizing", "denationalising", + "depersonalizing", "depersonalising", + "desensitization", "desensitisation", + "disorganization", "disorganisation", + "extemporization", "extemporisation", + "externalization", "externalisation", + "familiarization", "familiarisation", + "generalizations", "generalisations", + "hospitalization", "hospitalisation", + "individualizing", "individualising", + "industrializing", "industrialising", + "intellectualize", "intellectualise", + "internalization", "internalisation", + "maneuverability", "manoeuvrability", + "materialization", "materialisation", + "miniaturization", "miniaturisation", + "nationalization", "nationalisation", + "overemphasizing", "overemphasising", + "paleontologists", "palaeontologists", + "particularizing", "particularising", + "pedestrianizing", "pedestrianising", + "professionalize", "professionalise", + "psychoanalyzing", "psychoanalysing", + "rationalization", "rationalisation", + "reorganizations", "reorganisations", + "revolutionizing", "revolutionising", + "sensationalized", "sensationalised", + "sensationalizes", "sensationalises", + "sentimentalized", "sentimentalised", + "sentimentalizes", "sentimentalises", + "specializations", "specialisations", + "standardization", "standardisation", + "synchronization", "synchronisation", + "systematization", "systematisation", + "aggrandizement", "aggrandisement", + "characterizing", "characterising", + "collectivizing", "collectivising", + "commercialized", "commercialised", + "commercializes", "commercialises", + "conceptualized", "conceptualised", + "conceptualizes", "conceptualises", + "contextualized", "contextualised", + "contextualizes", "contextualises", + "decentralizing", "decentralising", + "decriminalized", "decriminalised", + "decriminalizes", "decriminalises", + "dehumanization", "dehumanisation", + "demilitarizing", "demilitarising", + "demobilization", "demobilisation", + "demoralization", "demoralisation", + "denationalized", "denationalised", + "denationalizes", "denationalises", + "depersonalized", "depersonalised", + "depersonalizes", "depersonalises", + "dramatizations", "dramatisations", + "editorializing", "editorialising", + "fictionalizing", "fictionalising", + "fraternization", "fraternisation", + "generalization", "generalisation", + "immobilization", "immobilisation", + "individualized", "individualised", + "individualizes", "individualises", + "industrialized", "industrialised", + "industrializes", "industrialises", + "liberalization", "liberalisation", + "monopolization", "monopolisation", + "naturalization", "naturalisation", + "neighborliness", "neighbourliness", + "neutralization", "neutralisation", + "organizational", "organisational", + "outmaneuvering", "outmanoeuvring", + "overemphasized", "overemphasised", + "overemphasizes", "overemphasises", + "paleontologist", "palaeontologist", + "particularized", "particularised", + "particularizes", "particularises", + "pasteurization", "pasteurisation", + "pedestrianized", "pedestrianised", + "pedestrianizes", "pedestrianises", + "philosophizing", "philosophising", + "politicization", "politicisation", + "popularization", "popularisation", + "pressurization", "pressurisation", + "prioritization", "prioritisation", + "privatizations", "privatisations", + "propagandizing", "propagandising", + "psychoanalyzed", "psychoanalysed", + "psychoanalyzes", "psychoanalyses", + "reconnoitering", "reconnoitring", + "regularization", "regularisation", + "reorganization", "reorganisation", + "revolutionized", "revolutionised", + "revolutionizes", "revolutionises", + "secularization", "secularisation", + "sensationalize", "sensationalise", + "sentimentalize", "sentimentalise", + "serializations", "serialisations", + "specialization", "specialisation", + "sterilizations", "sterilisations", + "stigmatization", "stigmatisation", + "transistorized", "transistorised", + "unrecognizable", "unrecognisable", + "visualizations", "visualisations", + "westernization", "westernisation", + "accessorizing", "accessorising", + "acclimatizing", "acclimatising", + "amortizations", "amortisations", + "amphitheaters", "amphitheatres", + "anesthetizing", "anaesthetising", + "archeologists", "archaeologists", + "breathalyzers", "breathalysers", + "breathalyzing", "breathalysing", + "cannibalizing", "cannibalising", + "characterized", "characterised", + "characterizes", "characterises", + "circularizing", "circularising", + "collectivized", "collectivised", + "collectivizes", "collectivises", + "commercialize", "commercialise", + "computerizing", "computerising", + "conceptualize", "conceptualise", + "contextualize", "contextualise", + "criminalizing", "criminalising", + "crystallizing", "crystallising", + "decentralized", "decentralised", + "decentralizes", "decentralises", + "decriminalize", "decriminalise", + "demilitarized", "demilitarised", + "demilitarizes", "demilitarises", + "democratizing", "democratising", + "denationalize", "denationalise", + "depersonalize", "depersonalise", + "desensitizing", "desensitising", + "destabilizing", "destabilising", + "disemboweling", "disembowelling", + "dramatization", "dramatisation", + "editorialized", "editorialised", + "editorializes", "editorialises", + "extemporizing", "extemporising", + "externalizing", "externalising", + "familiarizing", "familiarising", + "fertilization", "fertilisation", + "fictionalized", "fictionalised", + "fictionalizes", "fictionalises", + "formalization", "formalisation", + "fossilization", "fossilisation", + "globalization", "globalisation", + "gynecological", "gynaecological", + "gynecologists", "gynaecologists", + "harmonization", "harmonisation", + "hematological", "haematological", + "hematologists", "haematologists", + "hospitalizing", "hospitalising", + "hypothesizing", "hypothesising", + "immortalizing", "immortalising", + "individualize", "individualise", + "industrialize", "industrialise", + "internalizing", "internalising", + "marginalizing", "marginalising", + "materializing", "materialising", + "mechanization", "mechanisation", + "memorializing", "memorialising", + "miniaturizing", "miniaturising", + "nationalizing", "nationalising", + "neighborhoods", "neighbourhoods", + "normalization", "normalisation", + "organizations", "organisations", + "outmaneuvered", "outmanoeuvred", + "overemphasize", "overemphasise", + "particularize", "particularise", + "passivization", "passivisation", + "patronizingly", "patronisingly", + "pedestrianize", "pedestrianise", + "pediatricians", "paediatricians", + "personalizing", "personalising", + "philosophized", "philosophised", + "philosophizes", "philosophises", + "privatization", "privatisation", + "propagandized", "propagandised", + "propagandizes", "propagandises", + "proselytizers", "proselytisers", + "proselytizing", "proselytising", + "psychoanalyze", "psychoanalyse", + "pulverization", "pulverisation", + "rationalizing", "rationalising", + "reconnoitered", "reconnoitred", + "revolutionize", "revolutionise", + "romanticizing", "romanticising", + "serialization", "serialisation", + "socialization", "socialisation", + "standardizing", "standardising", + "sterilization", "sterilisation", + "subsidization", "subsidisation", + "synchronizing", "synchronising", + "systematizing", "systematising", + "tantalizingly", "tantalisingly", + "underutilized", "underutilised", + "victimization", "victimisation", + "visualization", "visualisation", + "vocalizations", "vocalisations", + "vulgarization", "vulgarisation", + "accessorized", "accessorised", + "accessorizes", "accessorises", + "acclimatized", "acclimatised", + "acclimatizes", "acclimatises", + "amortization", "amortisation", + "amphitheater", "amphitheatre", + "anesthetists", "anaesthetists", + "anesthetized", "anaesthetised", + "anesthetizes", "anaesthetises", + "antagonizing", "antagonising", + "appetizingly", "appetisingly", + "archeologist", "archaeologist", + "backpedaling", "backpedalling", + "bastardizing", "bastardising", + "behaviorists", "behaviourists", + "bowdlerizing", "bowdlerising", + "breathalyzed", "breathalysed", + "breathalyzes", "breathalyses", + "cannibalized", "cannibalised", + "cannibalizes", "cannibalises", + "capitalizing", "capitalising", + "caramelizing", "caramelising", + "categorizing", "categorising", + "centerpieces", "centrepieces", + "centralizing", "centralising", + "characterize", "characterise", + "circularized", "circularised", + "circularizes", "circularises", + "clarinetists", "clarinettists", + "collectivize", "collectivise", + "colonization", "colonisation", + "computerized", "computerised", + "computerizes", "computerises", + "criminalized", "criminalised", + "criminalizes", "criminalises", + "crystallized", "crystallised", + "crystallizes", "crystallises", + "decentralize", "decentralise", + "dehumanizing", "dehumanising", + "demilitarize", "demilitarise", + "demobilizing", "demobilising", + "democratized", "democratised", + "democratizes", "democratises", + "demoralizing", "demoralising", + "desensitized", "desensitised", + "desensitizes", "desensitises", + "destabilized", "destabilised", + "destabilizes", "destabilises", + "disemboweled", "disembowelled", + "dishonorable", "dishonourable", + "dishonorably", "dishonourably", + "disorganized", "disorganised", + "editorialize", "editorialise", + "equalization", "equalisation", + "evangelizing", "evangelising", + "extemporized", "extemporised", + "extemporizes", "extemporises", + "externalized", "externalised", + "externalizes", "externalises", + "familiarized", "familiarised", + "familiarizes", "familiarises", + "fictionalize", "fictionalise", + "finalization", "finalisation", + "fraternizing", "fraternising", + "generalizing", "generalising", + "gynecologist", "gynaecologist", + "hematologist", "haematologist", + "hemophiliacs", "haemophiliacs", + "hemorrhaging", "haemorrhaging", + "homogenizing", "homogenising", + "hospitalized", "hospitalised", + "hospitalizes", "hospitalises", + "hypothesized", "hypothesised", + "hypothesizes", "hypothesises", + "idealization", "idealisation", + "immobilizers", "immobilisers", + "immobilizing", "immobilising", + "immortalized", "immortalised", + "immortalizes", "immortalises", + "immunization", "immunisation", + "initializing", "initialising", + "installments", "instalments", + "internalized", "internalised", + "internalizes", "internalises", + "jeopardizing", "jeopardising", + "legalization", "legalisation", + "legitimizing", "legitimising", + "liberalizing", "liberalising", + "maneuverable", "manoeuvrable", + "maneuverings", "manoeuvrings", + "marginalized", "marginalised", + "marginalizes", "marginalises", + "materialized", "materialised", + "materializes", "materialises", + "maximization", "maximisation", + "memorialized", "memorialised", + "memorializes", "memorialises", + "metabolizing", "metabolising", + "militarizing", "militarising", + "miniaturized", "miniaturised", + "miniaturizes", "miniaturises", + "miscataloged", "miscatalogued", + "misdemeanors", "misdemeanours", + "mobilization", "mobilisation", + "moisturizers", "moisturisers", + "moisturizing", "moisturising", + "monopolizing", "monopolising", + "multicolored", "multicoloured", + "nationalized", "nationalised", + "nationalizes", "nationalises", + "naturalizing", "naturalising", + "neighborhood", "neighbourhood", + "neutralizing", "neutralising", + "organization", "organisation", + "outmaneuvers", "outmanoeuvres", + "paleontology", "palaeontology", + "pasteurizing", "pasteurising", + "pediatrician", "paediatrician", + "personalized", "personalised", + "personalizes", "personalises", + "philosophize", "philosophise", + "plagiarizing", "plagiarising", + "polarization", "polarisation", + "politicizing", "politicising", + "popularizing", "popularising", + "pressurizing", "pressurising", + "prioritizing", "prioritising", + "propagandize", "propagandise", + "proselytized", "proselytised", + "proselytizer", "proselytiser", + "proselytizes", "proselytises", + "radicalizing", "radicalising", + "rationalized", "rationalised", + "rationalizes", "rationalises", + "realizations", "realisations", + "recognizable", "recognisable", + "recognizably", "recognisably", + "recognizance", "recognisance", + "reconnoiters", "reconnoitres", + "regularizing", "regularising", + "reorganizing", "reorganising", + "revitalizing", "revitalising", + "rhapsodizing", "rhapsodising", + "romanticized", "romanticised", + "romanticizes", "romanticises", + "scandalizing", "scandalising", + "scrutinizing", "scrutinising", + "secularizing", "secularising", + "standardized", "standardised", + "standardizes", "standardises", + "stigmatizing", "stigmatising", + "sympathizers", "sympathisers", + "sympathizing", "sympathising", + "synchronized", "synchronised", + "synchronizes", "synchronises", + "synthesizing", "synthesising", + "systematized", "systematised", + "systematizes", "systematises", + "theatergoers", "theatregoers", + "traumatizing", "traumatising", + "trivializing", "trivialising", + "unauthorized", "unauthorised", + "unionization", "unionisation", + "unrecognized", "unrecognised", + "urbanization", "urbanisation", + "vaporization", "vaporisation", + "vocalization", "vocalisation", + "westernizing", "westernising", + "accessorize", "accessorise", + "acclimatize", "acclimatise", + "agonizingly", "agonisingly", + "amortizable", "amortisable", + "anesthetics", "anaesthetics", + "anesthetist", "anaesthetist", + "anesthetize", "anaesthetise", + "anglicizing", "anglicising", + "antagonized", "antagonised", + "antagonizes", "antagonises", + "apologizing", "apologising", + "backpedaled", "backpedalled", + "bastardized", "bastardised", + "bastardizes", "bastardises", + "behaviorism", "behaviourism", + "behaviorist", "behaviourist", + "bowdlerized", "bowdlerised", + "bowdlerizes", "bowdlerises", + "brutalizing", "brutalising", + "cannibalize", "cannibalise", + "capitalized", "capitalised", + "capitalizes", "capitalises", + "caramelized", "caramelised", + "caramelizes", "caramelises", + "carbonizing", "carbonising", + "categorized", "categorised", + "categorizes", "categorises", + "cauterizing", "cauterising", + "centerfolds", "centrefolds", + "centerpiece", "centrepiece", + "centiliters", "centilitres", + "centimeters", "centimetres", + "centralized", "centralised", + "centralizes", "centralises", + "circularize", "circularise", + "clarinetist", "clarinettist", + "computerize", "computerise", + "criminalize", "criminalise", + "criticizing", "criticising", + "crystallize", "crystallise", + "customizing", "customising", + "defenseless", "defenceless", + "dehumanized", "dehumanised", + "dehumanizes", "dehumanises", + "demobilized", "demobilised", + "demobilizes", "demobilises", + "democratize", "democratise", + "demoralized", "demoralised", + "demoralizes", "demoralises", + "deodorizing", "deodorising", + "desensitize", "desensitise", + "destabilize", "destabilise", + "discoloring", "discolouring", + "dishonoring", "dishonouring", + "dramatizing", "dramatising", + "economizing", "economising", + "empathizing", "empathising", + "emphasizing", "emphasising", + "endeavoring", "endeavouring", + "epitomizing", "epitomising", + "esophaguses", "oesophaguses", + "evangelized", "evangelised", + "evangelizes", "evangelises", + "extemporize", "extemporise", + "externalize", "externalise", + "factorizing", "factorising", + "familiarize", "familiarise", + "fantasizing", "fantasising", + "fertilizers", "fertilisers", + "fertilizing", "fertilising", + "formalizing", "formalising", + "fossilizing", "fossilising", + "fraternized", "fraternised", + "fraternizes", "fraternises", + "fulfillment", "fulfilment", + "galvanizing", "galvanising", + "generalized", "generalised", + "generalizes", "generalises", + "ghettoizing", "ghettoising", + "globalizing", "globalising", + "harmonizing", "harmonising", + "hemophiliac", "haemophiliac", + "hemorrhaged", "haemorrhaged", + "hemorrhages", "haemorrhages", + "hemorrhoids", "haemorrhoids", + "homogenized", "homogenised", + "homogenizes", "homogenises", + "hospitalize", "hospitalise", + "hybridizing", "hybridising", + "hypnotizing", "hypnotising", + "hypothesize", "hypothesise", + "immobilized", "immobilised", + "immobilizer", "immobiliser", + "immobilizes", "immobilises", + "immortalize", "immortalise", + "initialized", "initialised", + "initializes", "initialises", + "installment", "instalment", + "internalize", "internalise", + "italicizing", "italicising", + "jeopardized", "jeopardised", + "jeopardizes", "jeopardises", + "legitimized", "legitimised", + "legitimizes", "legitimises", + "liberalized", "liberalised", + "liberalizes", "liberalises", + "lionization", "lionisation", + "liquidizers", "liquidisers", + "liquidizing", "liquidising", + "magnetizing", "magnetising", + "maneuvering", "manoeuvring", + "marginalize", "marginalise", + "marvelously", "marvellously", + "materialize", "materialise", + "mechanizing", "mechanising", + "memorialize", "memorialise", + "mesmerizing", "mesmerising", + "metabolized", "metabolised", + "metabolizes", "metabolises", + "militarized", "militarised", + "militarizes", "militarises", + "milliliters", "millilitres", + "millimeters", "millimetres", + "miniaturize", "miniaturise", + "misbehavior", "misbehaviour", + "misdemeanor", "misdemeanour", + "modernizing", "modernising", + "moisturized", "moisturised", + "moisturizer", "moisturiser", + "moisturizes", "moisturises", + "monopolized", "monopolised", + "monopolizes", "monopolises", + "nationalize", "nationalise", + "naturalized", "naturalised", + "naturalizes", "naturalises", + "neighboring", "neighbouring", + "neutralized", "neutralised", + "neutralizes", "neutralises", + "normalizing", "normalising", + "orthopedics", "orthopaedics", + "ostracizing", "ostracising", + "outmaneuver", "outmanoeuvre", + "oxidization", "oxidisation", + "pasteurized", "pasteurised", + "pasteurizes", "pasteurises", + "patronizing", "patronising", + "personalize", "personalise", + "plagiarized", "plagiarised", + "plagiarizes", "plagiarises", + "politicized", "politicised", + "politicizes", "politicises", + "popularized", "popularised", + "popularizes", "popularises", + "pressurized", "pressurised", + "pressurizes", "pressurises", + "prioritized", "prioritised", + "prioritizes", "prioritises", + "privatizing", "privatising", + "proselytize", "proselytise", + "publicizing", "publicising", + "pulverizing", "pulverising", + "radicalized", "radicalised", + "radicalizes", "radicalises", + "randomizing", "randomising", + "rationalize", "rationalise", + "realization", "realisation", + "recognizing", "recognising", + "reconnoiter", "reconnoitre", + "regularized", "regularised", + "regularizes", "regularises", + "reorganized", "reorganised", + "reorganizes", "reorganises", + "revitalized", "revitalised", + "revitalizes", "revitalises", + "rhapsodized", "rhapsodised", + "rhapsodizes", "rhapsodises", + "romanticize", "romanticise", + "scandalized", "scandalised", + "scandalizes", "scandalises", + "scrutinized", "scrutinised", + "scrutinizes", "scrutinises", + "secularized", "secularised", + "secularizes", "secularises", + "sensitizing", "sensitising", + "serializing", "serialising", + "sermonizing", "sermonising", + "signalizing", "signalising", + "skeptically", "sceptically", + "socializing", "socialising", + "solemnizing", "solemnising", + "specialized", "specialised", + "specializes", "specialises", + "squirreling", "squirrelling", + "stabilizers", "stabilisers", + "stabilizing", "stabilising", + "standardize", "standardise", + "sterilizers", "sterilisers", + "sterilizing", "sterilising", + "stigmatized", "stigmatised", + "stigmatizes", "stigmatises", + "subsidizers", "subsidisers", + "subsidizing", "subsidising", + "summarizing", "summarising", + "symbolizing", "symbolising", + "sympathized", "sympathised", + "sympathizer", "sympathiser", + "sympathizes", "sympathises", + "synchronize", "synchronise", + "synthesized", "synthesised", + "synthesizes", "synthesises", + "systematize", "systematise", + "tantalizing", "tantalising", + "temporizing", "temporising", + "tenderizing", "tenderising", + "terrorizing", "terrorising", + "theatergoer", "theatregoer", + "traumatized", "traumatised", + "traumatizes", "traumatises", + "trivialized", "trivialised", + "trivializes", "trivialises", + "tyrannizing", "tyrannising", + "uncataloged", "uncatalogued", + "uncivilized", "uncivilised", + "unfavorable", "unfavourable", + "unfavorably", "unfavourably", + "unorganized", "unorganised", + "untrammeled", "untrammelled", + "utilization", "utilisation", + "vandalizing", "vandalising", + "verbalizing", "verbalising", + "victimizing", "victimising", + "visualizing", "visualising", + "vulgarizing", "vulgarising", + "watercolors", "watercolours", + "westernized", "westernised", + "westernizes", "westernises", + "amortizing", "amortising", + "anesthesia", "anaesthesia", + "anesthetic", "anaesthetic", + "anglicized", "anglicised", + "anglicizes", "anglicises", + "annualized", "annualised", + "antagonize", "antagonise", + "apologized", "apologised", + "apologizes", "apologises", + "appetizers", "appetisers", + "appetizing", "appetising", + "archeology", "archaeology", + "authorizes", "authorises", + "bastardize", "bastardise", + "bedeviling", "bedevilling", + "behavioral", "behavioural", + "belaboring", "belabouring", + "bowdlerize", "bowdlerise", + "brutalized", "brutalised", + "brutalizes", "brutalises", + "canalizing", "canalising", + "canonizing", "canonising", + "capitalize", "capitalise", + "caramelize", "caramelise", + "carbonized", "carbonised", + "carbonizes", "carbonises", + "cataloging", "cataloguing", + "catalyzing", "catalysing", + "categorize", "categorise", + "cauterized", "cauterised", + "cauterizes", "cauterises", + "centerfold", "centrefold", + "centiliter", "centilitre", + "centimeter", "centimetre", + "centralize", "centralise", + "channeling", "channelling", + "checkbooks", "chequebooks", + "civilizing", "civilising", + "colonizers", "colonisers", + "colonizing", "colonising", + "colorfully", "colourfully", + "colorizing", "colourizing", + "councilors", "councillors", + "counselors", "counsellors", + "criticized", "criticised", + "criticizes", "criticises", + "customized", "customised", + "customizes", "customises", + "dehumanize", "dehumanise", + "demobilize", "demobilise", + "demonizing", "demonising", + "demoralize", "demoralise", + "deodorized", "deodorised", + "deodorizes", "deodorises", + "deputizing", "deputising", + "digitizing", "digitising", + "discolored", "discoloured", + "disheveled", "dishevelled", + "dishonored", "dishonoured", + "dramatized", "dramatised", + "dramatizes", "dramatises", + "economized", "economised", + "economizes", "economises", + "empathized", "empathised", + "empathizes", "empathises", + "emphasized", "emphasised", + "emphasizes", "emphasises", + "endeavored", "endeavoured", + "energizing", "energising", + "epicenters", "epicentres", + "epitomized", "epitomised", + "epitomizes", "epitomises", + "equalizers", "equalisers", + "equalizing", "equalising", + "eulogizing", "eulogising", + "evangelize", "evangelise", + "factorized", "factorised", + "factorizes", "factorises", + "fantasized", "fantasised", + "fantasizes", "fantasises", + "favoritism", "favouritism", + "feminizing", "feminising", + "fertilized", "fertilised", + "fertilizer", "fertiliser", + "fertilizes", "fertilises", + "fiberglass", "fibreglass", + "finalizing", "finalising", + "flavorings", "flavourings", + "flavorless", "flavourless", + "flavorsome", "flavoursome", + "formalized", "formalised", + "formalizes", "formalises", + "fossilized", "fossilised", + "fossilizes", "fossilises", + "fraternize", "fraternise", + "galvanized", "galvanised", + "galvanizes", "galvanises", + "generalize", "generalise", + "ghettoized", "ghettoised", + "ghettoizes", "ghettoises", + "globalized", "globalised", + "globalizes", "globalises", + "gruelingly", "gruellingly", + "gynecology", "gynaecology", + "harmonized", "harmonised", + "harmonizes", "harmonises", + "hematology", "haematology", + "hemoglobin", "haemoglobin", + "hemophilia", "haemophilia", + "hemorrhage", "haemorrhage", + "homogenize", "homogenise", + "humanizing", "humanising", + "hybridized", "hybridised", + "hybridizes", "hybridises", + "hypnotized", "hypnotised", + "hypnotizes", "hypnotises", + "idealizing", "idealising", + "immobilize", "immobilise", + "immunizing", "immunising", + "impaneling", "impanelling", + "imperiling", "imperilling", + "initialing", "initialling", + "initialize", "initialise", + "ionization", "ionisation", + "italicized", "italicised", + "italicizes", "italicises", + "jeopardize", "jeopardise", + "kilometers", "kilometres", + "lackluster", "lacklustre", + "legalizing", "legalising", + "legitimize", "legitimise", + "liberalize", "liberalise", + "liquidized", "liquidised", + "liquidizer", "liquidiser", + "liquidizes", "liquidises", + "localizing", "localising", + "magnetized", "magnetised", + "magnetizes", "magnetises", + "maneuvered", "manoeuvred", + "marshaling", "marshalling", + "maximizing", "maximising", + "mechanized", "mechanised", + "mechanizes", "mechanises", + "memorizing", "memorising", + "mesmerized", "mesmerised", + "mesmerizes", "mesmerises", + "metabolize", "metabolise", + "militarize", "militarise", + "milliliter", "millilitre", + "millimeter", "millimetre", + "minimizing", "minimising", + "mobilizing", "mobilising", + "modernized", "modernised", + "modernizes", "modernises", + "moisturize", "moisturise", + "monopolize", "monopolise", + "moralizing", "moralising", + "naturalize", "naturalise", + "neighborly", "neighbourly", + "neutralize", "neutralise", + "normalized", "normalised", + "normalizes", "normalises", + "optimizing", "optimising", + "organizers", "organisers", + "organizing", "organising", + "orthopedic", "orthopaedic", + "ostracized", "ostracised", + "ostracizes", "ostracises", + "paralyzing", "paralysing", + "pasteurize", "pasteurise", + "patronized", "patronised", + "patronizes", "patronises", + "pedophiles", "paedophiles", + "pedophilia", "paedophilia", + "penalizing", "penalising", + "plagiarize", "plagiarise", + "plowshares", "ploughshares", + "polarizing", "polarising", + "politicize", "politicise", + "popularize", "popularise", + "prioritize", "prioritise", + "privatized", "privatised", + "privatizes", "privatises", + "publicized", "publicised", + "publicizes", "publicises", + "pulverized", "pulverised", + "pulverizes", "pulverises", + "quarreling", "quarrelling", + "radicalize", "radicalise", + "randomized", "randomised", + "randomizes", "randomises", + "realizable", "realisable", + "recognized", "recognised", + "recognizes", "recognises", + "regularize", "regularise", + "remodeling", "remodelling", + "reorganize", "reorganise", + "revitalize", "revitalise", + "rhapsodize", "rhapsodise", + "ritualized", "ritualised", + "sanitizing", "sanitising", + "satirizing", "satirising", + "scandalize", "scandalise", + "scrutinize", "scrutinise", + "secularize", "secularise", + "sensitized", "sensitised", + "sensitizes", "sensitises", + "sepulchers", "sepulchres", + "serialized", "serialised", + "serializes", "serialises", + "sermonized", "sermonised", + "sermonizes", "sermonises", + "shriveling", "shrivelling", + "signalized", "signalised", + "signalizes", "signalises", + "skepticism", "scepticism", + "socialized", "socialised", + "socializes", "socialises", + "sodomizing", "sodomising", + "solemnized", "solemnised", + "solemnizes", "solemnises", + "specialize", "specialise", + "squirreled", "squirrelled", + "stabilized", "stabilised", + "stabilizer", "stabiliser", + "stabilizes", "stabilises", + "stenciling", "stencilling", + "sterilized", "sterilised", + "sterilizer", "steriliser", + "sterilizes", "sterilises", + "stigmatize", "stigmatise", + "subsidized", "subsidised", + "subsidizer", "subsidiser", + "subsidizes", "subsidises", + "summarized", "summarised", + "summarizes", "summarises", + "symbolized", "symbolised", + "symbolizes", "symbolises", + "sympathize", "sympathise", + "tantalized", "tantalised", + "tantalizes", "tantalises", + "temporized", "temporised", + "temporizes", "temporises", + "tenderized", "tenderised", + "tenderizes", "tenderises", + "terrorized", "terrorised", + "terrorizes", "terrorises", + "theorizing", "theorising", + "traumatize", "traumatise", + "trivialize", "trivialise", + "tyrannized", "tyrannised", + "tyrannizes", "tyrannises", + "unionizing", "unionising", + "unraveling", "unravelling", + "urbanizing", "urbanising", + "utilizable", "utilisable", + "vandalized", "vandalised", + "vandalizes", "vandalises", + "vaporizing", "vaporising", + "verbalized", "verbalised", + "verbalizes", "verbalises", + "victimized", "victimised", + "victimizes", "victimises", + "visualized", "visualised", + "visualizes", "visualises", + "vocalizing", "vocalising", + "vulcanized", "vulcanised", + "vulgarized", "vulgarised", + "vulgarizes", "vulgarises", + "watercolor", "watercolour", + "westernize", "westernise", + "womanizers", "womanisers", + "womanizing", "womanising", + "worshiping", "worshipping", + "agonizing", "agonising", + "airplanes", "aeroplanes", + "amortized", "amortised", + "amortizes", "amortises", + "analyzing", "analysing", + "apologize", "apologise", + "appetizer", "appetiser", + "artifacts", "artefacts", + "baptizing", "baptising", + "bedeviled", "bedevilled", + "behaviors", "behaviours", + "bejeweled", "bejewelled", + "belabored", "belaboured", + "brutalize", "brutalise", + "canalized", "canalised", + "canalizes", "canalises", + "canonized", "canonised", + "canonizes", "canonises", + "carbonize", "carbonise", + "cataloged", "catalogued", + "catalyzed", "catalysed", + "catalyzes", "catalyses", + "cauterize", "cauterise", + "channeled", "channelled", + "checkbook", "chequebook", + "checkered", "chequered", + "chiseling", "chiselling", + "civilized", "civilised", + "civilizes", "civilises", + "clamoring", "clamouring", + "colonized", "colonised", + "colonizer", "coloniser", + "colonizes", "colonises", + "colorants", "colourants", + "colorized", "colourized", + "colorizes", "colourizes", + "colorless", "colourless", + "councilor", "councillor", + "counseled", "counselled", + "counselor", "counsellor", + "criticize", "criticise", + "cudgeling", "cudgelling", + "customize", "customise", + "demonized", "demonised", + "demonizes", "demonises", + "deodorize", "deodorise", + "deputized", "deputised", + "deputizes", "deputises", + "digitized", "digitised", + "digitizes", "digitises", + "discolors", "discolours", + "dishonors", "dishonours", + "dramatize", "dramatise", + "driveling", "drivelling", + "economize", "economise", + "empathize", "empathise", + "emphasize", "emphasise", + "enameling", "enamelling", + "endeavors", "endeavours", + "energized", "energised", + "energizes", "energises", + "enthralls", "enthrals", + "epicenter", "epicentre", + "epitomize", "epitomise", + "equalized", "equalised", + "equalizer", "equaliser", + "equalizes", "equalises", + "eulogized", "eulogised", + "eulogizes", "eulogises", + "factorize", "factorise", + "fantasize", "fantasise", + "favorable", "favourable", + "favorably", "favourably", + "favorites", "favourites", + "feminized", "feminised", + "feminizes", "feminises", + "fertilize", "fertilise", + "finalized", "finalised", + "finalizes", "finalises", + "flavoring", "flavouring", + "formalize", "formalise", + "fossilize", "fossilise", + "funneling", "funnelling", + "galvanize", "galvanise", + "gamboling", "gambolling", + "ghettoize", "ghettoise", + "globalize", "globalise", + "gonorrhea", "gonorrhoea", + "groveling", "grovelling", + "harboring", "harbouring", + "harmonize", "harmonise", + "honorably", "honourably", + "humanized", "humanised", + "humanizes", "humanises", + "hybridize", "hybridise", + "hypnotize", "hypnotise", + "idealized", "idealised", + "idealizes", "idealises", + "idolizing", "idolising", + "immunized", "immunised", + "immunizes", "immunises", + "impaneled", "impanelled", + "imperiled", "imperilled", + "initialed", "initialled", + "italicize", "italicise", + "itemizing", "itemising", + "kilometer", "kilometre", + "legalized", "legalised", + "legalizes", "legalises", + "lionizing", "lionising", + "liquidize", "liquidise", + "localized", "localised", + "localizes", "localises", + "magnetize", "magnetise", + "maneuvers", "manoeuvres", + "marshaled", "marshalled", + "marveling", "marvelling", + "marvelous", "marvellous", + "maximized", "maximised", + "maximizes", "maximises", + "mechanize", "mechanise", + "memorized", "memorised", + "memorizes", "memorises", + "mesmerize", "mesmerise", + "minimized", "minimised", + "minimizes", "minimises", + "mobilized", "mobilised", + "mobilizes", "mobilises", + "modernize", "modernise", + "moldering", "mouldering", + "moralized", "moralised", + "moralizes", "moralises", + "motorized", "motorised", + "mustached", "moustached", + "mustaches", "moustaches", + "neighbors", "neighbours", + "normalize", "normalise", + "optimized", "optimised", + "optimizes", "optimises", + "organized", "organised", + "organizer", "organiser", + "organizes", "organises", + "ostracize", "ostracise", + "oxidizing", "oxidising", + "panelists", "panellists", + "paralyzed", "paralysed", + "paralyzes", "paralyses", + "parceling", "parcelling", + "patronize", "patronise", + "pedophile", "paedophile", + "penalized", "penalised", + "penalizes", "penalises", + "penciling", "pencilling", + "plowshare", "ploughshare", + "polarized", "polarised", + "polarizes", "polarises", + "practiced", "practised", + "pretenses", "pretences", + "privatize", "privatise", + "publicize", "publicise", + "pulverize", "pulverise", + "quarreled", "quarrelled", + "randomize", "randomise", + "realizing", "realising", + "recognize", "recognise", + "refueling", "refuelling", + "remodeled", "remodelled", + "remolding", "remoulding", + "saltpeter", "saltpetre", + "sanitized", "sanitised", + "sanitizes", "sanitises", + "satirized", "satirised", + "satirizes", "satirises", + "sensitize", "sensitise", + "sepulcher", "sepulchre", + "serialize", "serialise", + "sermonize", "sermonise", + "shoveling", "shovelling", + "shriveled", "shrivelled", + "signaling", "signalling", + "signalize", "signalise", + "skeptical", "sceptical", + "sniveling", "snivelling", + "snorkeled", "snorkelled", + "socialize", "socialise", + "sodomized", "sodomised", + "sodomizes", "sodomises", + "solemnize", "solemnise", + "spiraling", "spiralling", + "splendors", "splendours", + "stabilize", "stabilise", + "stenciled", "stencilled", + "sterilize", "sterilise", + "subsidize", "subsidise", + "succoring", "succouring", + "sulfurous", "sulphurous", + "summarize", "summarise", + "swiveling", "swivelling", + "symbolize", "symbolise", + "tantalize", "tantalise", + "temporize", "temporise", + "tenderize", "tenderise", + "terrorize", "terrorise", + "theorized", "theorised", + "theorizes", "theorises", + "travelers", "travellers", + "traveling", "travelling", + "tricolors", "tricolours", + "tunneling", "tunnelling", + "tyrannize", "tyrannise", + "unequaled", "unequalled", + "unionized", "unionised", + "unionizes", "unionises", + "unraveled", "unravelled", + "unrivaled", "unrivalled", + "urbanized", "urbanised", + "urbanizes", "urbanises", + "utilizing", "utilising", + "vandalize", "vandalise", + "vaporized", "vaporised", + "vaporizes", "vaporises", + "verbalize", "verbalise", + "victimize", "victimise", + "visualize", "visualise", + "vocalized", "vocalised", + "vocalizes", "vocalises", + "vulgarize", "vulgarise", + "weaseling", "weaselling", + "womanized", "womanised", + "womanizer", "womaniser", + "womanizes", "womanises", + "worshiped", "worshipped", + "worshiper", "worshipper", + "agonized", "agonised", + "agonizes", "agonises", + "airplane", "aeroplane", + "aluminum", "aluminium", + "amortize", "amortise", + "analyzed", "analysed", + "analyzes", "analyses", + "armorers", "armourers", + "armories", "armouries", + "artifact", "artefact", + "baptized", "baptised", + "baptizes", "baptises", + "behavior", "behaviour", + "behooved", "behoved", + "behooves", "behoves", + "belabors", "belabours", + "calibers", "calibres", + "canalize", "canalise", + "canonize", "canonise", + "catalogs", "catalogues", + "catalyze", "catalyse", + "caviling", "cavilling", + "centered", "centred", + "chiseled", "chiselled", + "civilize", "civilise", + "clamored", "clamoured", + "colonize", "colonise", + "colorant", "colourant", + "coloreds", "coloureds", + "colorful", "colourful", + "coloring", "colouring", + "colorize", "colourize", + "coziness", "cosiness", + "cruelest", "cruellest", + "cudgeled", "cudgelled", + "defenses", "defences", + "demeanor", "demeanour", + "demonize", "demonise", + "deputize", "deputise", + "diarrhea", "diarrhoea", + "digitize", "digitise", + "disfavor", "disfavour", + "dishonor", "dishonour", + "distills", "distils", + "driveled", "drivelled", + "enameled", "enamelled", + "enamored", "enamoured", + "endeavor", "endeavour", + "energize", "energise", + "epaulets", "epaulettes", + "equalize", "equalise", + "estrogen", "oestrogen", + "etiology", "aetiology", + "eulogize", "eulogise", + "favoring", "favouring", + "favorite", "favourite", + "feminize", "feminise", + "finalize", "finalise", + "flavored", "flavoured", + "flutists", "flautists", + "fulfills", "fulfils", + "funneled", "funnelled", + "gamboled", "gambolled", + "graveled", "gravelled", + "groveled", "grovelled", + "grueling", "gruelling", + "harbored", "harboured", + "honoring", "honouring", + "humanize", "humanise", + "humoring", "humouring", + "idealize", "idealise", + "idolized", "idolised", + "idolizes", "idolises", + "immunize", "immunise", + "ionizing", "ionising", + "itemized", "itemised", + "itemizes", "itemises", + "jewelers", "jewellers", + "labeling", "labelling", + "laborers", "labourers", + "laboring", "labouring", + "legalize", "legalise", + "leukemia", "leukaemia", + "levelers", "levellers", + "leveling", "levelling", + "libeling", "libelling", + "libelous", "libellous", + "lionized", "lionised", + "lionizes", "lionises", + "localize", "localise", + "louvered", "louvred", + "maneuver", "manoeuvre", + "marveled", "marvelled", + "maximize", "maximise", + "memorize", "memorise", + "minimize", "minimise", + "mobilize", "mobilise", + "modelers", "modellers", + "modeling", "modelling", + "moldered", "mouldered", + "moldiest", "mouldiest", + "moldings", "mouldings", + "moralize", "moralise", + "mustache", "moustache", + "neighbor", "neighbour", + "odorless", "odourless", + "offenses", "offences", + "optimize", "optimise", + "organize", "organise", + "oxidized", "oxidised", + "oxidizes", "oxidises", + "paneling", "panelling", + "panelist", "panellist", + "paralyze", "paralyse", + "parceled", "parcelled", + "pedaling", "pedalling", + "penalize", "penalise", + "penciled", "pencilled", + "polarize", "polarise", + "pretense", "pretence", + "pummeled", "pummelling", + "raveling", "ravelling", + "realized", "realised", + "realizes", "realises", + "refueled", "refuelled", + "remolded", "remoulded", + "revelers", "revellers", + "reveling", "revelling", + "rivaling", "rivalling", + "sanitize", "sanitise", + "satirize", "satirise", + "savories", "savouries", + "savoring", "savouring", + "scepters", "sceptres", + "shoveled", "shovelled", + "signaled", "signalled", + "skeptics", "sceptics", + "sniveled", "snivelled", + "sodomize", "sodomise", + "specters", "spectres", + "spiraled", "spiralled", + "splendor", "splendour", + "succored", "succoured", + "sulfates", "sulphates", + "sulfides", "sulphides", + "swiveled", "swivelled", + "tasseled", "tasselled", + "theaters", "theatres", + "theorize", "theorise", + "toweling", "towelling", + "traveler", "traveller", + "trialing", "trialling", + "tricolor", "tricolour", + "tunneled", "tunnelled", + "unionize", "unionise", + "unsavory", "unsavoury", + "urbanize", "urbanise", + "utilized", "utilised", + "utilizes", "utilises", + "vaporize", "vaporise", + "vocalize", "vocalise", + "weaseled", "weaselled", + "womanize", "womanise", + "yodeling", "yodelling", + "agonize", "agonise", + "analyze", "analyse", + "appalls", "appals", + "armored", "armoured", + "armorer", "armourer", + "baptize", "baptise", + "behoove", "behove", + "belabor", "belabour", + "beveled", "bevelled", + "caliber", "calibre", + "caroled", "carolled", + "caviled", "cavilled", + "centers", "centres", + "clamors", "clamours", + "clangor", "clangour", + "colored", "coloured", + "coziest", "cosiest", + "crueler", "crueller", + "defense", "defence", + "dialing", "dialling", + "dialogs", "dialogues", + "distill", "distil", + "dueling", "duelling", + "enrolls", "enrols", + "epaulet", "epaulette", + "favored", "favoured", + "flavors", "flavours", + "flutist", "flautist", + "fueling", "fuelling", + "fulfill", "fulfil", + "goiters", "goitres", + "harbors", "harbours", + "honored", "honoured", + "humored", "humoured", + "idolize", "idolise", + "ionized", "ionised", + "ionizes", "ionises", + "itemize", "itemise", + "jeweled", "jewelled", + "jeweler", "jeweller", + "jewelry", "jewellery", + "labeled", "labelled", + "labored", "laboured", + "laborer", "labourer", + "leveled", "levelled", + "leveler", "leveller", + "libeled", "libelled", + "lionize", "lionise", + "louvers", "louvres", + "modeled", "modelled", + "modeler", "modeller", + "molders", "moulders", + "moldier", "mouldier", + "molding", "moulding", + "molting", "moulting", + "offense", "offence", + "oxidize", "oxidise", + "pajamas", "pyjamas", + "paneled", "panelled", + "parlors", "parlours", + "pedaled", "pedalled", + "plowing", "ploughing", + "plowman", "ploughman", + "plowmen", "ploughmen", + "realize", "realise", + "remolds", "remoulds", + "reveled", "revelled", + "reveler", "reveller", + "rivaled", "rivalled", + "rumored", "rumoured", + "saviors", "saviours", + "savored", "savoured", + "scepter", "sceptre", + "skeptic", "sceptic", + "specter", "spectre", + "succors", "succours", + "sulfate", "sulphate", + "sulfide", "sulphide", + "theater", "theatre", + "toweled", "towelled", + "toxemia", "toxaemia", + "trialed", "trialled", + "utilize", "utilise", + "yodeled", "yodelled", + "anemia", "anaemia", + "anemic", "anaemic", + "appall", "appal", + "arbors", "arbours", + "armory", "armoury", + "candor", "candour", + "center", "centre", + "clamor", "clamour", + "colors", "colours", + "cozier", "cosier", + "cozies", "cosies", + "cozily", "cosily", + "dialed", "dialled", + "drafty", "draughty", + "dueled", "duelled", + "favors", "favours", + "fervor", "fervour", + "fibers", "fibres", + "flavor", "flavour", + "fueled", "fuelled", + "goiter", "goitre", + "harbor", "harbour", + "honors", "honours", + "humors", "humours", + "labors", "labours", + "liters", "litres", + "louver", "louvre", + "luster", "lustre", + "meager", "meagre", + "miters", "mitres", + "molded", "moulded", + "molder", "moulder", + "molted", "moulted", + "pajama", "pyjama", + "parlor", "parlour", + "plowed", "ploughed", + "rancor", "rancour", + "remold", "remould", + "rigors", "rigours", + "rumors", "rumours", + "savors", "savours", + "savory", "savoury", + "succor", "succour", + "tumors", "tumours", + "vapors", "vapours", + "aging", "ageing", + "arbor", "arbour", + "ardor", "ardour", + "armor", "armour", + "chili", "chilli", + "color", "colour", + "edema", "edoema", + "favor", "favour", + "fecal", "faecal", + "feces", "faeces", + "fiber", "fibre", + "honor", "honour", + "humor", "humour", + "labor", "labour", + "liter", "litre", + "miter", "mitre", + "molds", "moulds", + "moldy", "mouldy", + "molts", "moults", + "odors", "odours", + "plows", "ploughs", + "rigor", "rigour", + "rumor", "rumour", + "savor", "savour", + "valor", "valour", + "vapor", "vapour", + "vigor", "vigour", + "cozy", "cosy", + "mold", "mould", + "molt", "moult", + "odor", "odour", + "plow", "plough", +} diff --git a/vendor/github.com/golangci/revgrep/.gitignore b/vendor/github.com/golangci/revgrep/.gitignore new file mode 100644 index 000000000..0540fe2ca --- /dev/null +++ b/vendor/github.com/golangci/revgrep/.gitignore @@ -0,0 +1 @@ +testdata/git diff --git a/vendor/github.com/golangci/revgrep/.travis.yml b/vendor/github.com/golangci/revgrep/.travis.yml new file mode 100644 index 000000000..d16d8b5bd --- /dev/null +++ b/vendor/github.com/golangci/revgrep/.travis.yml @@ -0,0 +1,7 @@ +language: go +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci + diff --git a/vendor/github.com/golangci/revgrep/LICENSE b/vendor/github.com/golangci/revgrep/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/golangci/revgrep/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golangci/revgrep/README.md b/vendor/github.com/golangci/revgrep/README.md new file mode 100644 index 000000000..31faefee9 --- /dev/null +++ b/vendor/github.com/golangci/revgrep/README.md @@ -0,0 +1,58 @@ +# Overview + +[![Build Status](https://travis-ci.org/bradleyfalzon/revgrep.svg?branch=master)](https://travis-ci.org/bradleyfalzon/revgrep) [![Coverage +Status](https://coveralls.io/repos/github/bradleyfalzon/revgrep/badge.svg?branch=master)](https://coveralls.io/github/bradleyfalzon/revgrep?branch=master) [![GoDoc](https://godoc.org/github.com/bradleyfalzon/revgrep?status.svg)](https://godoc.org/github.com/bradleyfalzon/revgrep) + +`revgrep` is a CLI tool used to filter static analysis tools to only lines changed based on a commit reference. + +# Install + +```bash +go get -u github.com/bradleyfalzon/revgrep/... +``` + +# Usage + +In the scenario below, a change was made causing a warning in `go vet` on line 5, but `go vet` will show all warnings. +Using `revgrep`, you can show only warnings for lines of code that have been changed (in this case, hiding line 6). + +```bash +[user@host dir (master)]$ go vet +main.go:5: missing argument for Sprintf("%s"): format reads arg 1, have only 0 args +main.go:6: missing argument for Sprintf("%s"): format reads arg 1, have only 0 args +[user@host dir (master)]$ go vet |& revgrep +main.go:5: missing argument for Sprintf("%s"): format reads arg 1, have only 0 args +``` + +`|&` is shown above as many static analysis programs write to `stderr`, not `stdout`, `|&` combines both `stderr` and +`stdout`. It could also be achieved with `go vet 2>&1 | revgrep`. + +`revgrep` CLI tool will return an exit status of 1 if any issues match, else it will return 0. Consider using +`${PIPESTATUS[0]}` for the exit status of the `go vet` command in the above example. + +``` +Usage: revgrep [options] [from-rev] [to-rev] + +from-rev filters issues to lines changed since (and including) this revision + to-rev filters issues to lines changed since (and including) this revision, requires + + If no revisions are given, and there are unstaged changes or untracked files, only those changes are shown + If no revisions are given, and there are no unstaged changes or untracked files, only changes in HEAD~ are shown + If from-rev is given and to-rev is not, only changes between from-rev and HEAD are shown. + + -d Show debug output + -regexp string + Regexp to match path, line number, optional column number, and message +``` + +# Other Examples + +Issues between branches: +```bash +[user@host dir (feature/branch)]$ go vet |& revgrep master +``` + +Issues since last push: +```bash +[user@host dir (master)]$ go vet |& revgrep origin/master +``` diff --git a/vendor/github.com/golangci/revgrep/go.mod b/vendor/github.com/golangci/revgrep/go.mod new file mode 100644 index 000000000..8bdbb1951 --- /dev/null +++ b/vendor/github.com/golangci/revgrep/go.mod @@ -0,0 +1,3 @@ +module github.com/golangci/revgrep + +go 1.13 diff --git a/vendor/github.com/golangci/revgrep/go.sum b/vendor/github.com/golangci/revgrep/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/golangci/revgrep/revgrep.go b/vendor/github.com/golangci/revgrep/revgrep.go new file mode 100644 index 000000000..d0940d300 --- /dev/null +++ b/vendor/github.com/golangci/revgrep/revgrep.go @@ -0,0 +1,410 @@ +package revgrep + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +// Checker provides APIs to filter static analysis tools to specific commits, +// such as showing only issues since last commit. +type Checker struct { + // Patch file (unified) to read to detect lines being changed, if nil revgrep + // will attempt to detect the VCS and generate an appropriate patch. Auto + // detection will search for uncommitted changes first, if none found, will + // generate a patch from last committed change. File paths within patches + // must be relative to current working directory. + Patch io.Reader + // NewFiles is a list of file names (with absolute paths) where the entire + // contents of the file is new. + NewFiles []string + // Debug sets the debug writer for additional output. + Debug io.Writer + // RevisionFrom check revision starting at, leave blank for auto detection + // ignored if patch is set. + RevisionFrom string + // RevisionTo checks revision finishing at, leave blank for auto detection + // ignored if patch is set. + RevisionTo string + // Regexp to match path, line number, optional column number, and message. + Regexp string + // AbsPath is used to make an absolute path of an issue's filename to be + // relative in order to match patch file. If not set, current working + // directory is used. + AbsPath string + + // Calculated changes for next calls to IsNewIssue + changes map[string][]pos +} + +// Issue contains metadata about an issue found. +type Issue struct { + // File is the name of the file as it appeared from the patch. + File string + // LineNo is the line number of the file. + LineNo int + // ColNo is the column number or 0 if none could be parsed. + ColNo int + // HunkPos is position from file's first @@, for new files this will be the + // line number. + // + // See also: https://developer.github.com/v3/pulls/comments/#create-a-comment + HunkPos int + // Issue text as it appeared from the tool. + Issue string + // Message is the issue without file name, line number and column number. + Message string +} + +func (c *Checker) preparePatch() error { + // Check if patch is supplied, if not, retrieve from VCS + if c.Patch == nil { + var err error + c.Patch, c.NewFiles, err = GitPatch(c.RevisionFrom, c.RevisionTo) + if err != nil { + return fmt.Errorf("could not read git repo: %s", err) + } + if c.Patch == nil { + return errors.New("no version control repository found") + } + } + + return nil +} + +// InputIssue represents issue found by some linter +type InputIssue interface { + FilePath() string + Line() int +} + +type simpleInputIssue struct { + filePath string + lineNumber int +} + +func (i simpleInputIssue) FilePath() string { + return i.filePath +} + +func (i simpleInputIssue) Line() int { + return i.lineNumber +} + +// Prepare extracts a patch and changed lines +func (c *Checker) Prepare() error { + returnErr := c.preparePatch() + c.changes = c.linesChanged() + return returnErr +} + +// IsNewIssue checks whether issue found by linter is new: it was found in changed lines +func (c Checker) IsNewIssue(i InputIssue) (hunkPos int, isNew bool) { + fchanges, ok := c.changes[i.FilePath()] + if !ok { // file wasn't changed + return 0, false + } + + var ( + fpos pos + changed bool + ) + // found file, see if lines matched + for _, pos := range fchanges { + if pos.lineNo == i.Line() { + fpos = pos + changed = true + break + } + } + + if changed || fchanges == nil { + // either file changed or it's a new file + hunkPos := fpos.lineNo + if changed { // existing file changed + hunkPos = fpos.hunkPos + } + + return hunkPos, true + } + + return 0, false +} + +// Check scans reader and writes any lines to writer that have been added in +// Checker.Patch. +// +// Returns issues written to writer when no error occurs. +// +// If no VCS could be found or other VCS errors occur, all issues are written +// to writer and an error is returned. +// +// File paths in reader must be relative to current working directory or +// absolute. +func (c Checker) Check(reader io.Reader, writer io.Writer) (issues []Issue, err error) { + returnErr := c.Prepare() + writeAll := returnErr != nil + + // file.go:lineNo:colNo:message + // colNo is optional, strip spaces before message + lineRE := regexp.MustCompile(`(.*?\.go):([0-9]+):([0-9]+)?:?\s*(.*)`) + if c.Regexp != "" { + lineRE, err = regexp.Compile(c.Regexp) + if err != nil { + return nil, fmt.Errorf("could not parse regexp: %v", err) + } + } + + // TODO consider lazy loading this, if there's nothing in stdin, no point + // checking for recent changes + c.debugf("lines changed: %+v", c.changes) + + absPath := c.AbsPath + if absPath == "" { + absPath, err = os.Getwd() + if err != nil { + returnErr = fmt.Errorf("could not get current working directory: %s", err) + } + } + + // Scan each line in reader and only write those lines if lines changed + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := lineRE.FindSubmatch(scanner.Bytes()) + if line == nil { + c.debugf("cannot parse file+line number: %s", scanner.Text()) + continue + } + + if writeAll { + fmt.Fprintln(writer, scanner.Text()) + continue + } + + // Make absolute path names relative + path := string(line[1]) + if rel, err := filepath.Rel(absPath, path); err == nil { + c.debugf("rewrote path from %q to %q (absPath: %q)", path, rel, absPath) + path = rel + } + + // Parse line number + lno, err := strconv.ParseUint(string(line[2]), 10, 64) + if err != nil { + c.debugf("cannot parse line number: %q", scanner.Text()) + continue + } + + // Parse optional column number + var cno uint64 + if len(line[3]) > 0 { + cno, err = strconv.ParseUint(string(line[3]), 10, 64) + if err != nil { + c.debugf("cannot parse column number: %q", scanner.Text()) + // Ignore this error and continue + } + } + + // Extract message + msg := string(line[4]) + + c.debugf("path: %q, lineNo: %v, colNo: %v, msg: %q", path, lno, cno, msg) + i := simpleInputIssue{ + filePath: path, + lineNumber: int(lno), + } + hunkPos, changed := c.IsNewIssue(i) + if changed { + issue := Issue{ + File: path, + LineNo: int(lno), + ColNo: int(cno), + HunkPos: hunkPos, + Issue: scanner.Text(), + Message: msg, + } + issues = append(issues, issue) + fmt.Fprintln(writer, scanner.Text()) + } else { + c.debugf("unchanged: %s", scanner.Text()) + } + } + if err := scanner.Err(); err != nil { + returnErr = fmt.Errorf("error reading standard input: %s", err) + } + return issues, returnErr +} + +func (c Checker) debugf(format string, s ...interface{}) { + if c.Debug != nil { + fmt.Fprint(c.Debug, "DEBUG: ") + fmt.Fprintf(c.Debug, format+"\n", s...) + } +} + +type pos struct { + lineNo int // line number + hunkPos int // position relative to first @@ in file +} + +// linesChanges returns a map of file names to line numbers being changed. +// If key is nil, the file has been recently added, else it contains a slice +// of positions that have been added. +func (c Checker) linesChanged() map[string][]pos { + type state struct { + file string + lineNo int // current line number within chunk + hunkPos int // current line count since first @@ in file + changes []pos // position of changes + } + + var ( + s state + changes = make(map[string][]pos) + ) + + for _, file := range c.NewFiles { + changes[file] = nil + } + + if c.Patch == nil { + return changes + } + + scanner := bufio.NewReader(c.Patch) + var scanErr error + for { + lineB, isPrefix, err := scanner.ReadLine() + if isPrefix { + // If a single line overflowed the buffer, don't bother processing it as + // it's likey part of a file and not relevant to the patch. + continue + } + if err != nil { + scanErr = err + break + } + line := strings.TrimRight(string(lineB), "\n") + + c.debugf(line) + s.lineNo++ + s.hunkPos++ + switch { + case strings.HasPrefix(line, "+++ ") && len(line) > 4: + if s.changes != nil { + // record the last state + changes[s.file] = s.changes + } + // 6 removes "+++ b/" + s = state{file: line[6:], hunkPos: -1, changes: []pos{}} + case strings.HasPrefix(line, "@@ "): + // @@ -1 +2,4 @@ + // chdr ^^^^^^^^^^^^^ + // ahdr ^^^^ + // cstart ^ + chdr := strings.Split(line, " ") + ahdr := strings.Split(chdr[2], ",") + // [1:] to remove leading plus + cstart, err := strconv.ParseUint(ahdr[0][1:], 10, 64) + if err != nil { + panic(err) + } + s.lineNo = int(cstart) - 1 // -1 as cstart is the next line number + case strings.HasPrefix(line, "-"): + s.lineNo-- + case strings.HasPrefix(line, "+"): + s.changes = append(s.changes, pos{lineNo: s.lineNo, hunkPos: s.hunkPos}) + } + + } + if scanErr != nil && scanErr != io.EOF { + fmt.Fprintln(os.Stderr, "reading standard input:", scanErr) + } + // record the last state + changes[s.file] = s.changes + + return changes +} + +// GitPatch returns a patch from a git repository, if no git repository was +// was found and no errors occurred, nil is returned, else an error is returned +// revisionFrom and revisionTo defines the git diff parameters, if left blank +// and there are unstaged changes or untracked files, only those will be returned +// else only check changes since HEAD~. If revisionFrom is set but revisionTo +// is not, untracked files will be included, to exclude untracked files set +// revisionTo to HEAD~. It's incorrect to specify revisionTo without a +// revisionFrom. +func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { + var patch bytes.Buffer + + // check if git repo exists + if err := exec.Command("git", "status").Run(); err != nil { + // don't return an error, we assume the error is not repo exists + return nil, nil, nil + } + + // make a patch for untracked files + var newFiles []string + ls, err := exec.Command("git", "ls-files", "--others", "--exclude-standard").CombinedOutput() + if err != nil { + return nil, nil, fmt.Errorf("error executing git ls-files: %s", err) + } + for _, file := range bytes.Split(ls, []byte{'\n'}) { + if len(file) == 0 || bytes.HasSuffix(file, []byte{'/'}) { + // ls-files was sometimes showing directories when they were ignored + // I couldn't create a test case for this as I couldn't reproduce correctly + // for the moment, just exclude files with trailing / + continue + } + newFiles = append(newFiles, string(file)) + } + + if revisionFrom != "" { + cmd := exec.Command("git", "diff", "--relative", revisionFrom) + if revisionTo != "" { + cmd.Args = append(cmd.Args, revisionTo) + } + cmd.Stdout = &patch + if err := cmd.Run(); err != nil { + return nil, nil, fmt.Errorf("error executing git diff %q %q: %s", revisionFrom, revisionTo, err) + } + + if revisionTo == "" { + return &patch, newFiles, nil + } + return &patch, nil, nil + } + + // make a patch for unstaged changes + // use --no-prefix to remove b/ given: +++ b/main.go + cmd := exec.Command("git", "diff", "--relative") + cmd.Stdout = &patch + if err := cmd.Run(); err != nil { + return nil, nil, fmt.Errorf("error executing git diff: %s", err) + } + unstaged := patch.Len() > 0 + + // If there's unstaged changes OR untracked changes (or both), then this is + // a suitable patch + if unstaged || newFiles != nil { + return &patch, newFiles, nil + } + + // check for changes in recent commit + + cmd = exec.Command("git", "diff", "--relative", "HEAD~") + cmd.Stdout = &patch + if err := cmd.Run(); err != nil { + return nil, nil, fmt.Errorf("error executing git diff HEAD~: %s", err) + } + + return &patch, nil, nil +} diff --git a/vendor/github.com/golangci/unconvert/LICENSE b/vendor/github.com/golangci/unconvert/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/golangci/unconvert/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golangci/unconvert/README b/vendor/github.com/golangci/unconvert/README new file mode 100644 index 000000000..dbaea4f57 --- /dev/null +++ b/vendor/github.com/golangci/unconvert/README @@ -0,0 +1,36 @@ +About: + +The unconvert program analyzes Go packages to identify unnecessary +type conversions; i.e., expressions T(x) where x already has type T. + +Install: + + $ go get github.com/mdempsky/unconvert + +Usage: + + $ unconvert -v bytes fmt + GOROOT/src/bytes/reader.go:117:14: unnecessary conversion + abs = int64(r.i) + offset + ^ + GOROOT/src/fmt/print.go:411:21: unnecessary conversion + p.fmt.integer(int64(v), 16, unsigned, udigits) + ^ + +Flags: + +Using the -v flag, unconvert will also print the source line and a +caret to indicate the unnecessary conversion's position therein. + +Using the -apply flag, unconvert will rewrite the Go source files +without the unnecessary type conversions. + +Using the -all flag, unconvert will analyze the Go packages under all +possible GOOS/GOARCH combinations, and only identify conversions that +are unnecessary in all cases. + +E.g., syscall.Timespec's Sec and Nsec fields are int64 under +linux/amd64 but int32 under linux/386. An int64(ts.Sec) conversion +that appears in a linux/amd64-only file will be identified as +unnecessary, but it will be preserved if it occurs in a file that's +compiled for both linux/amd64 and linux/386. diff --git a/vendor/github.com/golangci/unconvert/unconvert.go b/vendor/github.com/golangci/unconvert/unconvert.go new file mode 100644 index 000000000..38737d39f --- /dev/null +++ b/vendor/github.com/golangci/unconvert/unconvert.go @@ -0,0 +1,665 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Unconvert removes redundant type conversions from Go packages. +package unconvert + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/build" + "go/format" + "go/parser" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "reflect" + "runtime/pprof" + "sort" + "sync" + "unicode" + + "github.com/kisielk/gotool" + "golang.org/x/text/width" + "golang.org/x/tools/go/loader" +) + +// Unnecessary conversions are identified by the position +// of their left parenthesis within a source file. + +type editSet map[token.Position]struct{} + +type fileToEditSet map[string]editSet + +func apply(file string, edits editSet) { + if len(edits) == 0 { + return + } + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, file, nil, parser.ParseComments) + if err != nil { + log.Fatal(err) + } + + // Note: We modify edits during the walk. + v := editor{edits: edits, file: fset.File(f.Package)} + ast.Walk(&v, f) + if len(edits) != 0 { + log.Printf("%s: missing edits %s", file, edits) + } + + // TODO(mdempsky): Write to temporary file and rename. + var buf bytes.Buffer + err = format.Node(&buf, fset, f) + if err != nil { + log.Fatal(err) + } + + err = ioutil.WriteFile(file, buf.Bytes(), 0) + if err != nil { + log.Fatal(err) + } +} + +type editor struct { + edits editSet + file *token.File +} + +func (e *editor) Visit(n ast.Node) ast.Visitor { + if n == nil { + return nil + } + v := reflect.ValueOf(n).Elem() + for i, n := 0, v.NumField(); i < n; i++ { + switch f := v.Field(i).Addr().Interface().(type) { + case *ast.Expr: + e.rewrite(f) + case *[]ast.Expr: + for i := range *f { + e.rewrite(&(*f)[i]) + } + } + } + return e +} + +func (e *editor) rewrite(f *ast.Expr) { + call, ok := (*f).(*ast.CallExpr) + if !ok { + return + } + + pos := e.file.Position(call.Lparen) + if _, ok := e.edits[pos]; !ok { + return + } + *f = call.Args[0] + delete(e.edits, pos) +} + +var ( + cr = []byte{'\r'} + nl = []byte{'\n'} +) + +func print(conversions []token.Position) { + var file string + var lines [][]byte + + for _, pos := range conversions { + fmt.Printf("%s:%d:%d: unnecessary conversion\n", pos.Filename, pos.Line, pos.Column) + if *flagV { + if pos.Filename != file { + buf, err := ioutil.ReadFile(pos.Filename) + if err != nil { + log.Fatal(err) + } + file = pos.Filename + lines = bytes.Split(buf, nl) + } + + line := bytes.TrimSuffix(lines[pos.Line-1], cr) + fmt.Printf("%s\n", line) + + // For files processed by cgo, Column is the + // column location after cgo processing, which + // may be different than the source column + // that we want here. In lieu of a better + // heuristic for detecting this case, at least + // avoid panicking if column is out of bounds. + if pos.Column <= len(line) { + fmt.Printf("%s^\n", rub(line[:pos.Column-1])) + } + } + } +} + +// Rub returns a copy of buf with all non-whitespace characters replaced +// by spaces (like rubbing them out with white out). +func rub(buf []byte) []byte { + // TODO(mdempsky): Handle combining characters? + var res bytes.Buffer + for _, r := range string(buf) { + if unicode.IsSpace(r) { + res.WriteRune(r) + continue + } + switch width.LookupRune(r).Kind() { + case width.EastAsianWide, width.EastAsianFullwidth: + res.WriteString(" ") + default: + res.WriteByte(' ') + } + } + return res.Bytes() +} + +var ( + flagAll = flag.Bool("unconvert.all", false, "type check all GOOS and GOARCH combinations") + flagApply = flag.Bool("unconvert.apply", false, "apply edits to source files") + flagCPUProfile = flag.String("unconvert.cpuprofile", "", "write CPU profile to file") + // TODO(mdempsky): Better description and maybe flag name. + flagSafe = flag.Bool("unconvert.safe", false, "be more conservative (experimental)") + flagV = flag.Bool("unconvert.v", false, "verbose output") +) + +func usage() { + fmt.Fprintf(os.Stderr, "usage: unconvert [flags] [package ...]\n") + flag.PrintDefaults() +} + +func nomain() { + flag.Usage = usage + flag.Parse() + + if *flagCPUProfile != "" { + f, err := os.Create(*flagCPUProfile) + if err != nil { + log.Fatal(err) + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + importPaths := gotool.ImportPaths(flag.Args()) + if len(importPaths) == 0 { + return + } + + var m fileToEditSet + if *flagAll { + m = mergeEdits(importPaths) + } else { + m = computeEdits(importPaths, build.Default.GOOS, build.Default.GOARCH, build.Default.CgoEnabled) + } + + if *flagApply { + var wg sync.WaitGroup + for f, e := range m { + wg.Add(1) + f, e := f, e + go func() { + defer wg.Done() + apply(f, e) + }() + } + wg.Wait() + } else { + var conversions []token.Position + for _, positions := range m { + for pos := range positions { + conversions = append(conversions, pos) + } + } + sort.Sort(byPosition(conversions)) + print(conversions) + if len(conversions) > 0 { + os.Exit(1) + } + } +} + +func Run(prog *loader.Program) []token.Position { + m := computeEditsFromProg(prog) + var conversions []token.Position + for _, positions := range m { + for pos := range positions { + conversions = append(conversions, pos) + } + } + return conversions +} + +var plats = [...]struct { + goos, goarch string +}{ + // TODO(mdempsky): buildall.bash also builds linux-386-387 and linux-arm-arm5. + {"android", "386"}, + {"android", "amd64"}, + {"android", "arm"}, + {"android", "arm64"}, + {"darwin", "386"}, + {"darwin", "amd64"}, + {"darwin", "arm"}, + {"darwin", "arm64"}, + {"dragonfly", "amd64"}, + {"freebsd", "386"}, + {"freebsd", "amd64"}, + {"freebsd", "arm"}, + {"linux", "386"}, + {"linux", "amd64"}, + {"linux", "arm"}, + {"linux", "arm64"}, + {"linux", "mips64"}, + {"linux", "mips64le"}, + {"linux", "ppc64"}, + {"linux", "ppc64le"}, + {"linux", "s390x"}, + {"nacl", "386"}, + {"nacl", "amd64p32"}, + {"nacl", "arm"}, + {"netbsd", "386"}, + {"netbsd", "amd64"}, + {"netbsd", "arm"}, + {"openbsd", "386"}, + {"openbsd", "amd64"}, + {"openbsd", "arm"}, + {"plan9", "386"}, + {"plan9", "amd64"}, + {"plan9", "arm"}, + {"solaris", "amd64"}, + {"windows", "386"}, + {"windows", "amd64"}, +} + +func mergeEdits(importPaths []string) fileToEditSet { + m := make(fileToEditSet) + for _, plat := range plats { + for f, e := range computeEdits(importPaths, plat.goos, plat.goarch, false) { + if e0, ok := m[f]; ok { + for k := range e0 { + if _, ok := e[k]; !ok { + delete(e0, k) + } + } + } else { + m[f] = e + } + } + } + return m +} + +type noImporter struct{} + +func (noImporter) Import(path string) (*types.Package, error) { + panic("golang.org/x/tools/go/loader said this wouldn't be called") +} + +func computeEdits(importPaths []string, os, arch string, cgoEnabled bool) fileToEditSet { + ctxt := build.Default + ctxt.GOOS = os + ctxt.GOARCH = arch + ctxt.CgoEnabled = cgoEnabled + + var conf loader.Config + conf.Build = &ctxt + conf.TypeChecker.Importer = noImporter{} + for _, importPath := range importPaths { + conf.Import(importPath) + } + prog, err := conf.Load() + if err != nil { + log.Fatal(err) + } + + return computeEditsFromProg(prog) +} + +func computeEditsFromProg(prog *loader.Program) fileToEditSet { + type res struct { + file string + edits editSet + } + ch := make(chan res) + var wg sync.WaitGroup + for _, pkg := range prog.InitialPackages() { + for _, file := range pkg.Files { + pkg, file := pkg, file + wg.Add(1) + go func() { + defer wg.Done() + v := visitor{pkg: pkg, file: prog.Fset.File(file.Package), edits: make(editSet)} + ast.Walk(&v, file) + ch <- res{v.file.Name(), v.edits} + }() + } + } + go func() { + wg.Wait() + close(ch) + }() + + m := make(fileToEditSet) + for r := range ch { + m[r.file] = r.edits + } + return m +} + +type step struct { + n ast.Node + i int +} + +type visitor struct { + pkg *loader.PackageInfo + file *token.File + edits editSet + path []step +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + if node != nil { + v.path = append(v.path, step{n: node}) + } else { + n := len(v.path) + v.path = v.path[:n-1] + if n >= 2 { + v.path[n-2].i++ + } + } + + if call, ok := node.(*ast.CallExpr); ok { + v.unconvert(call) + } + return v +} + +func (v *visitor) unconvert(call *ast.CallExpr) { + // TODO(mdempsky): Handle useless multi-conversions. + + // Conversions have exactly one argument. + if len(call.Args) != 1 || call.Ellipsis != token.NoPos { + return + } + ft, ok := v.pkg.Types[call.Fun] + if !ok { + fmt.Println("Missing type for function") + return + } + if !ft.IsType() { + // Function call; not a conversion. + return + } + at, ok := v.pkg.Types[call.Args[0]] + if !ok { + fmt.Println("Missing type for argument") + return + } + if !types.Identical(ft.Type, at.Type) { + // A real conversion. + return + } + if isUntypedValue(call.Args[0], &v.pkg.Info) { + // Workaround golang.org/issue/13061. + return + } + if *flagSafe && !v.isSafeContext(at.Type) { + // TODO(mdempsky): Remove this message. + fmt.Println("Skipped a possible type conversion because of -safe at", v.file.Position(call.Pos())) + return + } + if v.isCgoCheckPointerContext() { + // cmd/cgo generates explicit type conversions that + // are often redundant when introducing + // _cgoCheckPointer calls (issue #16). Users can't do + // anything about these, so skip over them. + return + } + + v.edits[v.file.Position(call.Lparen)] = struct{}{} +} + +func (v *visitor) isCgoCheckPointerContext() bool { + ctxt := &v.path[len(v.path)-2] + if ctxt.i != 1 { + return false + } + call, ok := ctxt.n.(*ast.CallExpr) + if !ok { + return false + } + ident, ok := call.Fun.(*ast.Ident) + if !ok { + return false + } + return ident.Name == "_cgoCheckPointer" +} + +// isSafeContext reports whether the current context requires +// an expression of type t. +// +// TODO(mdempsky): That's a bad explanation. +func (v *visitor) isSafeContext(t types.Type) bool { + ctxt := &v.path[len(v.path)-2] + switch n := ctxt.n.(type) { + case *ast.AssignStmt: + pos := ctxt.i - len(n.Lhs) + if pos < 0 { + fmt.Println("Type conversion on LHS of assignment?") + return false + } + if n.Tok == token.DEFINE { + // Skip := assignments. + return true + } + // We're a conversion in the pos'th element of n.Rhs. + // Check that the corresponding element of n.Lhs is of type t. + lt, ok := v.pkg.Types[n.Lhs[pos]] + if !ok { + fmt.Println("Missing type for LHS expression") + return false + } + return types.Identical(t, lt.Type) + case *ast.BinaryExpr: + if n.Op == token.SHL || n.Op == token.SHR { + if ctxt.i == 1 { + // RHS of a shift is always safe. + return true + } + // For the LHS, we should inspect up another level. + fmt.Println("TODO(mdempsky): Handle LHS of shift expressions") + return true + } + var other ast.Expr + if ctxt.i == 0 { + other = n.Y + } else { + other = n.X + } + ot, ok := v.pkg.Types[other] + if !ok { + fmt.Println("Missing type for other binop subexpr") + return false + } + return types.Identical(t, ot.Type) + case *ast.CallExpr: + pos := ctxt.i - 1 + if pos < 0 { + // Type conversion in the function subexpr is okay. + return true + } + ft, ok := v.pkg.Types[n.Fun] + if !ok { + fmt.Println("Missing type for function expression") + return false + } + sig, ok := ft.Type.(*types.Signature) + if !ok { + // "Function" is either a type conversion (ok) or a builtin (ok?). + return true + } + params := sig.Params() + var pt types.Type + if sig.Variadic() && n.Ellipsis == token.NoPos && pos >= params.Len()-1 { + pt = params.At(params.Len() - 1).Type().(*types.Slice).Elem() + } else { + pt = params.At(pos).Type() + } + return types.Identical(t, pt) + case *ast.CompositeLit, *ast.KeyValueExpr: + fmt.Println("TODO(mdempsky): Compare against value type of composite literal type at", v.file.Position(n.Pos())) + return true + case *ast.ReturnStmt: + // TODO(mdempsky): Is there a better way to get the corresponding + // return parameter type? + var funcType *ast.FuncType + for i := len(v.path) - 1; funcType == nil && i >= 0; i-- { + switch f := v.path[i].n.(type) { + case *ast.FuncDecl: + funcType = f.Type + case *ast.FuncLit: + funcType = f.Type + } + } + var typeExpr ast.Expr + for i, j := ctxt.i, 0; j < len(funcType.Results.List); j++ { + f := funcType.Results.List[j] + if len(f.Names) == 0 { + if i >= 1 { + i-- + continue + } + } else { + if i >= len(f.Names) { + i -= len(f.Names) + continue + } + } + typeExpr = f.Type + break + } + if typeExpr == nil { + fmt.Println(ctxt) + } + pt, ok := v.pkg.Types[typeExpr] + if !ok { + fmt.Println("Missing type for return parameter at", v.file.Position(n.Pos())) + return false + } + return types.Identical(t, pt.Type) + case *ast.StarExpr, *ast.UnaryExpr: + // TODO(mdempsky): I think these are always safe. + return true + case *ast.SwitchStmt: + // TODO(mdempsky): I think this is always safe? + return true + default: + // TODO(mdempsky): When can this happen? + fmt.Printf("... huh, %T at %v\n", n, v.file.Position(n.Pos())) + return true + } +} + +func isUntypedValue(n ast.Expr, info *types.Info) (res bool) { + switch n := n.(type) { + case *ast.BinaryExpr: + switch n.Op { + case token.SHL, token.SHR: + // Shifts yield an untyped value if their LHS is untyped. + return isUntypedValue(n.X, info) + case token.EQL, token.NEQ, token.LSS, token.GTR, token.LEQ, token.GEQ: + // Comparisons yield an untyped boolean value. + return true + case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, + token.AND, token.OR, token.XOR, token.AND_NOT, + token.LAND, token.LOR: + return isUntypedValue(n.X, info) && isUntypedValue(n.Y, info) + } + case *ast.UnaryExpr: + switch n.Op { + case token.ADD, token.SUB, token.NOT, token.XOR: + return isUntypedValue(n.X, info) + } + case *ast.BasicLit: + // Basic literals are always untyped. + return true + case *ast.ParenExpr: + return isUntypedValue(n.X, info) + case *ast.SelectorExpr: + return isUntypedValue(n.Sel, info) + case *ast.Ident: + if obj, ok := info.Uses[n]; ok { + if obj.Pkg() == nil && obj.Name() == "nil" { + // The universal untyped zero value. + return true + } + if b, ok := obj.Type().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 { + // Reference to an untyped constant. + return true + } + } + case *ast.CallExpr: + if b, ok := asBuiltin(n.Fun, info); ok { + switch b.Name() { + case "real", "imag": + return isUntypedValue(n.Args[0], info) + case "complex": + return isUntypedValue(n.Args[0], info) && isUntypedValue(n.Args[1], info) + } + } + } + + return false +} + +func asBuiltin(n ast.Expr, info *types.Info) (*types.Builtin, bool) { + for { + paren, ok := n.(*ast.ParenExpr) + if !ok { + break + } + n = paren.X + } + + ident, ok := n.(*ast.Ident) + if !ok { + return nil, false + } + + obj, ok := info.Uses[ident] + if !ok { + return nil, false + } + + b, ok := obj.(*types.Builtin) + return b, ok +} + +type byPosition []token.Position + +func (p byPosition) Len() int { + return len(p) +} + +func (p byPosition) Less(i, j int) bool { + if p[i].Filename != p[j].Filename { + return p[i].Filename < p[j].Filename + } + if p[i].Line != p[j].Line { + return p[i].Line < p[j].Line + } + return p[i].Column < p[j].Column +} + +func (p byPosition) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 000000000..32017f8fa --- /dev/null +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 000000000..86d0903b8 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,682 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly +// compared using the Exporter option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option +// explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +// +// Before recursing into a pointer, slice element, or map, the current path +// is checked to detect whether the address has already been visited. +// If there is a cycle, then the pointed at values are considered equal +// only if both addresses were previously visited in the same path step. +func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(rootStep(x, y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values: +// y - x. It returns an empty string if and only if Equal returns true for the +// same input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added from y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + s := newState(opts) + + // Optimization: If there are no other reporters, we can optimize for the + // common case where the result is equal (and thus no reported difference). + // This avoids the expensive construction of a difference tree. + if len(s.reporters) == 0 { + s.compareAny(rootStep(x, y)) + if s.result.Equal() { + return "" + } + s.result = diff.Result{} // Reset results + } + + r := new(defaultReporter) + s.reporters = append(s.reporters, reporter{r}) + s.compareAny(rootStep(x, y)) + d := r.String() + if (d == "") != s.result.Equal() { + panic("inconsistent difference and equality results") + } + return d +} + +// rootStep constructs the first path step. If x and y have differing types, +// then they are stored within an empty interface type. +func rootStep(x, y interface{}) PathStep { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + return &pathStep{t, vx, vy} +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + curPtrs pointerPath // The current set of visited pointers + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters []exporter // List of exporters for structs with unexported fields + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.curPtrs.Init() + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case exporter: + s.exporters = append(s.exporters, opt) + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore curPath and curPtrs because all of the + // compareX methods should properly push and pop from them. + // It is an implementation bug if the contents of the paths differ from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Cycle-detection for slice elements (see NOTE in compareSlice). + t := step.Type() + vx, vy := step.Values() + if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { + px, py := vx.Addr(), vy.Addr() + if eq, visited := s.curPtrs.Push(px, py); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(px, py) + } + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + v = sanitizeValue(v, f.Type().In(0)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + x = sanitizeValue(x, f.Type().In(0)) + y = sanitizeValue(y, f.Type().In(1)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +// sanitizeValue converts nil interfaces of type T to those of type R, +// assuming that T is assignable to R. +// Otherwise, it returns the input value as is. +func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { + // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } + } + return v +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var addr bool + var vax, vay reflect.Value // Addressable versions of vx and vy + + var mayForce, mayForceInit bool + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + addr = vx.CanAddr() || vy.CanAddr() + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + if !mayForceInit { + for _, xf := range s.exporters { + mayForce = mayForce || xf(t) + } + mayForceInit = true + } + step.mayForce = mayForce + step.paddr = addr + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer + // since slices represents a list of pointers, rather than a single pointer. + // The pointer checking logic must be handled on a per-element basis + // in compareAny. + // + // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting + // pointer P, a length N, and a capacity C. Supposing each slice element has + // a memory size of M, then the slice is equivalent to the list of pointers: + // [P+i*M for i in range(N)] + // + // For example, v[:0] and v[:1] are slices with the same starting pointer, + // but they are clearly different values. Using the slice pointer alone + // violates the assumption that equal pointers implies equal values. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for maps. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for pointers. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 000000000..5ff0b4218 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego + +package cmp + +import "reflect" + +const supportExporters = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { + panic("no support for forcibly accessing unexported fields") +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 000000000..21eb54858 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,35 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportExporters = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. If addr is false, +// then the returned value will be shallowed copied to be non-addressable. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { + ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() + if !addr { + // A field is addressable if and only if the struct is addressable. + // If the original parent value was not addressable, shallow copy the + // value to make it non-addressable to avoid leaking an implementation + // detail of how forcibly exporting a field works. + if ve.Kind() == reflect.Interface && ve.IsNil() { + return reflect.Zero(f.Type) + } + return reflect.ValueOf(ve.Interface()).Convert(f.Type) + } + return ve +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 000000000..1daaaacc5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 000000000..4b91dbcac --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 000000000..bc196b16c --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,398 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +import ( + "math/rand" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Non-deterministically start with either the forward or reverse direction + // to introduce some deliberate instability so that we have the flexibility + // to change this algorithm in the future. + if flags.Deterministic || randBool { + goto forwardSearch + } else { + goto reverseSearch + } + +forwardSearch: + { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + goto finishSearch + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + goto reverseSearch + } + +reverseSearch: + { + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + goto finishSearch + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + goto forwardSearch + } + +finishSearch: + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 000000000..d8e459c9b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 000000000..82d1d7fbf --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 000000000..8646f0529 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 000000000..d127d4362 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go new file mode 100644 index 000000000..b6c12cefb --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -0,0 +1,157 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package value + +import ( + "reflect" + "strconv" +) + +// TypeString is nearly identical to reflect.Type.String, +// but has an additional option to specify that full type names be used. +func TypeString(t reflect.Type, qualified bool) string { + return string(appendTypeName(nil, t, qualified, false)) +} + +func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { + // BUG: Go reflection provides no way to disambiguate two named types + // of the same name and within the same package, + // but declared within the namespace of different functions. + + // Named type. + if t.Name() != "" { + if qualified && t.PkgPath() != "" { + b = append(b, '"') + b = append(b, t.PkgPath()...) + b = append(b, '"') + b = append(b, '.') + b = append(b, t.Name()...) + } else { + b = append(b, t.String()...) + } + return b + } + + // Unnamed type. + switch k := t.Kind(); k { + case reflect.Bool, reflect.String, reflect.UnsafePointer, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + b = append(b, k.String()...) + case reflect.Chan: + if t.ChanDir() == reflect.RecvDir { + b = append(b, "<-"...) + } + b = append(b, "chan"...) + if t.ChanDir() == reflect.SendDir { + b = append(b, "<-"...) + } + b = append(b, ' ') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Func: + if !elideFunc { + b = append(b, "func"...) + } + b = append(b, '(') + for i := 0; i < t.NumIn(); i++ { + if i > 0 { + b = append(b, ", "...) + } + if i == t.NumIn()-1 && t.IsVariadic() { + b = append(b, "..."...) + b = appendTypeName(b, t.In(i).Elem(), qualified, false) + } else { + b = appendTypeName(b, t.In(i), qualified, false) + } + } + b = append(b, ')') + switch t.NumOut() { + case 0: + // Do nothing + case 1: + b = append(b, ' ') + b = appendTypeName(b, t.Out(0), qualified, false) + default: + b = append(b, " ("...) + for i := 0; i < t.NumOut(); i++ { + if i > 0 { + b = append(b, ", "...) + } + b = appendTypeName(b, t.Out(i), qualified, false) + } + b = append(b, ')') + } + case reflect.Struct: + b = append(b, "struct{ "...) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + b = append(b, "; "...) + } + sf := t.Field(i) + if !sf.Anonymous { + if qualified && sf.PkgPath != "" { + b = append(b, '"') + b = append(b, sf.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, sf.Name...) + b = append(b, ' ') + } + b = appendTypeName(b, sf.Type, qualified, false) + if sf.Tag != "" { + b = append(b, ' ') + b = strconv.AppendQuote(b, string(sf.Tag)) + } + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + case reflect.Slice, reflect.Array: + b = append(b, '[') + if k == reflect.Array { + b = strconv.AppendUint(b, uint64(t.Len()), 10) + } + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Map: + b = append(b, "map["...) + b = appendTypeName(b, t.Key(), qualified, false) + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Ptr: + b = append(b, '*') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Interface: + b = append(b, "interface{ "...) + for i := 0; i < t.NumMethod(); i++ { + if i > 0 { + b = append(b, "; "...) + } + m := t.Method(i) + if qualified && m.PkgPath != "" { + b = append(b, '"') + b = append(b, m.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, m.Name...) + b = appendTypeName(b, m.Type, qualified, true) + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + default: + panic("invalid kind: " + k.String()) + } + return b +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 000000000..44f4a5afd --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,33 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == 0 +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return p.p +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 000000000..a605953d4 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,36 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == nil +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return uintptr(p.p) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 000000000..98533b036 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,106 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + // NOTE: This does not sort -0 as less than +0 + // since Go maps treat -0 and +0 as equal keys. + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 000000000..9147a2997 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,48 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package value + +import ( + "math" + "reflect" +) + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 000000000..e57b9eb53 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,552 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + var name string + if t := s.curPath.Index(-2).Type(); t.Name() != "" { + // Named type with unexported fields. + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" + } + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. + var pkgPath string + for i := 0; i < t.NumField() && pkgPath == ""; i++ { + pkgPath = t.Field(i).PkgPath + } + name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) + } + panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// Exporter returns an Option that specifies whether Equal is allowed to +// introspect into the unexported fields of certain struct types. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func Exporter(f func(reflect.Type) bool) Option { + if !supportExporters { + panic("Exporter is not supported on purego builds") + } + return exporter(f) +} + +type exporter func(reflect.Type) bool + +func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// AllowUnexported returns an Options that allows Equal to forcibly introspect +// unexported fields of the specified struct types. +// +// See Exporter for the proper use of this option. +func AllowUnexported(types ...interface{}) Option { + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return exporter(func(t reflect.Type) bool { return m[t] }) +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +// ByCycle reports whether a reference cycle was detected. +func (r Result) ByCycle() bool { + return r.flags&reportByCycle != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc + reportByCycle +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 000000000..3d45c1a47 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,378 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + paddr bool // Was parent addressable? + pvx, pvy reflect.Value // Parent values (always addressible) + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) + vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int + isSlice bool // False for reflect.Array +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// pointerPath represents a dual-stack of pointers encountered when +// recursively traversing the x and y values. This data structure supports +// detection of cycles and determining whether the cycles are equal. +// In Go, cycles can occur via pointers, slices, and maps. +// +// The pointerPath uses a map to represent a stack; where descension into a +// pointer pushes the address onto the stack, and ascension from a pointer +// pops the address from the stack. Thus, when traversing into a pointer from +// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles +// by checking whether the pointer has already been visited. The cycle detection +// uses a seperate stack for the x and y values. +// +// If a cycle is detected we need to determine whether the two pointers +// should be considered equal. The definition of equality chosen by Equal +// requires two graphs to have the same structure. To determine this, both the +// x and y values must have a cycle where the previous pointers were also +// encountered together as a pair. +// +// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and +// MapIndex with pointer information for the x and y values. +// Suppose px and py are two pointers to compare, we then search the +// Path for whether px was ever encountered in the Path history of x, and +// similarly so with py. If either side has a cycle, the comparison is only +// equal if both px and py have a cycle resulting from the same PathStep. +// +// Using a map as a stack is more performant as we can perform cycle detection +// in O(1) instead of O(N) where N is len(Path). +type pointerPath struct { + // mx is keyed by x pointers, where the value is the associated y pointer. + mx map[value.Pointer]value.Pointer + // my is keyed by y pointers, where the value is the associated x pointer. + my map[value.Pointer]value.Pointer +} + +func (p *pointerPath) Init() { + p.mx = make(map[value.Pointer]value.Pointer) + p.my = make(map[value.Pointer]value.Pointer) +} + +// Push indicates intent to descend into pointers vx and vy where +// visited reports whether either has been seen before. If visited before, +// equal reports whether both pointers were encountered together. +// Pop must be called if and only if the pointers were never visited. +// +// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map +// and be non-nil. +func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { + px := value.PointerOf(vx) + py := value.PointerOf(vy) + _, ok1 := p.mx[px] + _, ok2 := p.my[py] + if ok1 || ok2 { + equal = p.mx[px] == py && p.my[py] == px // Pointers paired together + return equal, true + } + p.mx[px] = py + p.my[py] = px + return false, false +} + +// Pop ascends from pointers vx and vy. +func (p pointerPath) Pop(vx, vy reflect.Value) { + delete(p.mx, value.PointerOf(vx)) + delete(p.my, value.PointerOf(vy)) +} + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 000000000..f43cd12eb --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,54 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + ptrs := new(pointerReferences) + text := formatOptions{}.FormatDiff(r.root, ptrs) + resolveReferences(text) + return text.String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 000000000..104bb3053 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,432 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} +func (opts formatOptions) WithVerbosity(level int) formatOptions { + opts.VerbosityLevel = level + opts.LimitVerbosity = true + return opts +} +func (opts formatOptions) verbosity() uint { + switch { + case opts.VerbosityLevel < 0: + return 0 + case opts.VerbosityLevel > 16: + return 16 // some reasonable maximum to avoid shift overflow + default: + return uint(opts.VerbosityLevel) + } +} + +const maxVerbosityPreset = 6 + +// verbosityPreset modifies the verbosity settings given an index +// between 0 and maxVerbosityPreset, inclusive. +func verbosityPreset(opts formatOptions, i int) formatOptions { + opts.VerbosityLevel = int(opts.verbosity()) + 2*i + if i > 0 { + opts.AvoidStringer = true + } + if i >= maxVerbosityPreset { + opts.PrintAddresses = true + opts.QualifiedNames = true + } + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { + if opts.DiffMode == diffIdentical { + opts = opts.WithVerbosity(1) + } else if opts.verbosity() < 3 { + opts = opts.WithVerbosity(3) + } + + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + var parentKind reflect.Kind + if v.parent != nil && v.parent.TransformerName == "" { + parentKind = v.parent.Type.Kind() + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.FormatValue(v.ValueY, parentKind, ptrs) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) + outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) + outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) + } + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, parentKind, ptrs) + case diffInserted: + return opts.FormatValue(v.ValueY, parentKind, ptrs) + default: + panic("invalid diff mode") + } + } + + // Register slice element to support cycle detection. + if parentKind == reflect.Slice { + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) + defer ptrs.Pop() + defer func() { out = wrapTrunkReferences(ptrRefs, out) }() + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice: + out = opts.formatDiffList(v.Records, k, ptrs) + out = opts.FormatType(v.Type, out) + case reflect.Map: + // Register map to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.formatDiffList(v.Records, k, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = opts.FormatType(v.Type, out) + case reflect.Ptr: + // Register pointer to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.FormatDiff(v.Value, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = &textWrap{Prefix: "&", Value: out} + case reflect.Interface: + out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + return out + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } + } + + maxLen := -1 + if opts.LimitVerbosity { + if opts.DiffMode == diffIdentical { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + } else { + maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... + } + opts.VerbosityLevel-- + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + if len(list) == maxLen { + deferredEllipsis = true + break + } + + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value, ptrs); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var numDiffs int + var list textList + var keys []reflect.Value // invariant: len(list) == len(keys) + groups := coalesceAdjacentRecords(name, recs) + maxGroup := diffStats{Name: name} + for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i) + outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + } + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + keys = append(keys, r.Key) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + keys = append(keys, r.Key) + } + default: + out := opts.FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + } + recs = recs[ds.NumDiff():] + numDiffs += ds.NumDiff() + } + if maxGroup.IsZero() { + assert(len(recs) == 0) + } else { + list.AppendEllipsis(maxGroup) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } + } + assert(len(list) == len(keys)) + + // For maps, the default formatting logic uses fmt.Stringer which may + // produce ambiguous output. Avoid calling String to disambiguate. + if k == reflect.Map { + var ambiguous bool + seenKeys := map[string]reflect.Value{} + for i, currKey := range keys { + if currKey.IsValid() { + strKey := list[i].Key + prevKey, seen := seenKeys[strKey] + if seen && prevKey.CanInterface() && currKey.CanInterface() { + ambiguous = prevKey.Interface() != currKey.Interface() + if ambiguous { + break + } + } + seenKeys[strKey] = currKey + } + } + if ambiguous { + for i, k := range keys { + if k.IsValid() { + list[i].Key = formatMapKey(k, true, ptrs) + } + } + } + } + + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go new file mode 100644 index 000000000..be31b33a9 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -0,0 +1,264 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +const ( + pointerDelimPrefix = "⟪" + pointerDelimSuffix = "⟫" +) + +// formatPointer prints the address of the pointer. +func formatPointer(p value.Pointer, withDelims bool) string { + v := p.Uintptr() + if flags.Deterministic { + v = 0xdeadf00f // Only used for stable testing purposes + } + if withDelims { + return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix + } + return formatHex(uint64(v)) +} + +// pointerReferences is a stack of pointers visited so far. +type pointerReferences [][2]value.Pointer + +func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { + if deref && vx.IsValid() { + vx = vx.Addr() + } + if deref && vy.IsValid() { + vy = vy.Addr() + } + switch d { + case diffUnknown, diffIdentical: + pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} + case diffRemoved: + pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} + case diffInserted: + pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} + } + *ps = append(*ps, pp) + return pp +} + +func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { + p = value.PointerOf(v) + for _, pp := range *ps { + if p == pp[0] || p == pp[1] { + return p, true + } + } + *ps = append(*ps, [2]value.Pointer{p, p}) + return p, false +} + +func (ps *pointerReferences) Pop() { + *ps = (*ps)[:len(*ps)-1] +} + +// trunkReferences is metadata for a textNode indicating that the sub-tree +// represents the value for either pointer in a pair of references. +type trunkReferences struct{ pp [2]value.Pointer } + +// trunkReference is metadata for a textNode indicating that the sub-tree +// represents the value for the given pointer reference. +type trunkReference struct{ p value.Pointer } + +// leafReference is metadata for a textNode indicating that the value is +// truncated as it refers to another part of the tree (i.e., a trunk). +type leafReference struct{ p value.Pointer } + +func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { + switch { + case pp[0].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} + case pp[1].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + case pp[0] == pp[1]: + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + default: + return &textWrap{Value: s, Metadata: trunkReferences{pp}} + } +} +func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} +} +func makeLeafReference(p value.Pointer, printAddress bool) textNode { + out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} +} + +// resolveReferences walks the textNode tree searching for any leaf reference +// metadata and resolves each against the corresponding trunk references. +// Since pointer addresses in memory are not particularly readable to the user, +// it replaces each pointer value with an arbitrary and unique reference ID. +func resolveReferences(s textNode) { + var walkNodes func(textNode, func(textNode)) + walkNodes = func(s textNode, f func(textNode)) { + f(s) + switch s := s.(type) { + case *textWrap: + walkNodes(s.Value, f) + case textList: + for _, r := range s { + walkNodes(r.Value, f) + } + } + } + + // Collect all trunks and leaves with reference metadata. + var trunks, leaves []*textWrap + walkNodes(s, func(s textNode) { + if s, ok := s.(*textWrap); ok { + switch s.Metadata.(type) { + case leafReference: + leaves = append(leaves, s) + case trunkReference, trunkReferences: + trunks = append(trunks, s) + } + } + }) + + // No leaf references to resolve. + if len(leaves) == 0 { + return + } + + // Collect the set of all leaf references to resolve. + leafPtrs := make(map[value.Pointer]bool) + for _, leaf := range leaves { + leafPtrs[leaf.Metadata.(leafReference).p] = true + } + + // Collect the set of trunk pointers that are always paired together. + // This allows us to assign a single ID to both pointers for brevity. + // If a pointer in a pair ever occurs by itself or as a different pair, + // then the pair is broken. + pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) + unpair := func(p value.Pointer) { + if !pairedTrunkPtrs[p].IsNil() { + pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half + } + pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + unpair(p.p) // standalone pointer cannot be part of a pair + case trunkReferences: + p0, ok0 := pairedTrunkPtrs[p.pp[0]] + p1, ok1 := pairedTrunkPtrs[p.pp[1]] + switch { + case !ok0 && !ok1: + // Register the newly seen pair. + pairedTrunkPtrs[p.pp[0]] = p.pp[1] + pairedTrunkPtrs[p.pp[1]] = p.pp[0] + case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: + // Exact pair already seen; do nothing. + default: + // Pair conflicts with some other pair; break all pairs. + unpair(p.pp[0]) + unpair(p.pp[1]) + } + } + } + + // Correlate each pointer referenced by leaves to a unique identifier, + // and print the IDs for each trunk that matches those pointers. + var nextID uint + ptrIDs := make(map[value.Pointer]uint) + newID := func() uint { + id := nextID + nextID++ + return id + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + if print := leafPtrs[p.p]; print { + id, ok := ptrIDs[p.p] + if !ok { + id = newID() + ptrIDs[p.p] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } + case trunkReferences: + print0 := leafPtrs[p.pp[0]] + print1 := leafPtrs[p.pp[1]] + if print0 || print1 { + id0, ok0 := ptrIDs[p.pp[0]] + id1, ok1 := ptrIDs[p.pp[1]] + isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] + if isPair { + var id uint + assert(ok0 == ok1) // must be seen together or not at all + if ok0 { + assert(id0 == id1) // must have the same ID + id = id0 + } else { + id = newID() + ptrIDs[p.pp[0]] = id + ptrIDs[p.pp[1]] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } else { + if print0 && !ok0 { + id0 = newID() + ptrIDs[p.pp[0]] = id0 + } + if print1 && !ok1 { + id1 = newID() + ptrIDs[p.pp[1]] = id1 + } + switch { + case print0 && print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) + case print0: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) + case print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) + } + } + } + } + } + + // Update all leaf references with the unique identifier. + for _, leaf := range leaves { + if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { + leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) + } + } +} + +func formatReference(id uint) string { + return fmt.Sprintf("ref#%d", id) +} + +func updateReferencePrefix(prefix, ref string) string { + if prefix == "" { + return pointerDelimPrefix + ref + pointerDelimSuffix + } + suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) + return pointerDelimPrefix + ref + ": " + suffix +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 000000000..33f03577f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,402 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool + + // QualifiedNames controls whether FormatType uses the fully qualified name + // (including the full package path as opposed to just the package name). + QualifiedNames bool + + // VerbosityLevel controls the amount of output to produce. + // A higher value produces more output. A value of zero or lower produces + // no output (represented using an ellipsis). + // If LimitVerbosity is false, then the level is treated as infinite. + VerbosityLevel int + + // LimitVerbosity specifies that formatting should respect VerbosityLevel. + LimitVerbosity bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + if opts.DiffMode == diffIdentical { + return s // elide type for identical nodes + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := value.TypeString(t, opts.QualifiedNames) + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + } + return &textWrap{Prefix: typeName, Value: wrapParens(s)} +} + +// wrapParens wraps s with a set of parenthesis, but avoids it if the +// wrapped node itself is already surrounded by a pair of parenthesis or braces. +// It handles unwrapping one level of pointer-reference nodes. +func wrapParens(s textNode) textNode { + var refNode *textWrap + if s2, ok := s.(*textWrap); ok { + // Unwrap a single pointer reference node. + switch s2.Metadata.(type) { + case leafReference, trunkReference, trunkReferences: + refNode = s2 + if s3, ok := refNode.Value.(*textWrap); ok { + s2 = s3 + } + } + + // Already has delimiters that make parenthesis unnecessary. + hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") + hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") + if hasParens || hasBraces { + return s + } + } + if refNode != nil { + refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} + return s + } + return &textWrap{Prefix: "(", Value: s, Suffix: ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in ptrs. As pointers are visited, ptrs is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check slice element for cycles. + if parentKind == reflect.Slice { + ptrRef, visited := ptrs.Push(v.Addr()) + if visited { + return makeLeafReference(ptrRef, false) + } + defer ptrs.Pop() + defer func() { out = wrapTrunkReference(ptrRef, false, out) }() + } + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + var prefix, strVal string + func() { + // Swallow and ignore any panics from String or Error. + defer func() { recover() }() + switch v := v.Interface().(type) { + case error: + strVal = v.Error() + prefix = "e" + case fmt.Stringer: + strVal = v.String() + prefix = "s" + } + }() + if prefix != "" { + return opts.formatString(prefix, strVal) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if parentKind == reflect.Slice || parentKind == reflect.Array { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return opts.formatString("", v.String()) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(value.PointerOf(v), true)) + case reflect.Struct: + var list textList + v := makeAddressable(v) // needed for retrieveUnexportedField + maxLen := v.NumField() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sf := t.Field(i) + if supportExporters && !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) + list = append(list, textRecord{Key: sf.Name, Value: s}) + } + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + + // Check whether this is a []byte of text data. + if t.Elem() == reflect.TypeOf(byte(0)) { + b := v.Bytes() + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { + out = opts.formatString("", string(b)) + return opts.WithTypeMode(emitType).FormatType(t, out) + } + } + + fallthrough + case reflect.Array: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + var list textList + for i := 0; i < v.Len(); i++ { + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) + list = append(list, textRecord{Value: s}) + } + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if t.Kind() == reflect.Slice && opts.PrintAddresses { + header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) + out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} + } + return out + case reflect.Map: + if v.IsNil() { + return textNil + } + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + return makeLeafReference(ptrRef, opts.PrintAddresses) + } + defer ptrs.Pop() + + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sk := formatMapKey(k, false, ptrs) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) + list = append(list, textRecord{Key: sk, Value: sv}) + } + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + return out + case reflect.Ptr: + if v.IsNil() { + return textNil + } + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + out = makeLeafReference(ptrRef, opts.PrintAddresses) + return &textWrap{Prefix: "&", Value: out} + } + defer ptrs.Pop() + + skipType = true // Let the underlying value print the type instead + out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + out = &textWrap{Prefix: "&", Value: out} + return out + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +func (opts formatOptions) formatString(prefix, s string) textNode { + maxLen := len(s) + maxLines := strings.Count(s, "\n") + 1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + } + + // For multiline strings, use the triple-quote syntax, + // but only use it when printing removed or inserted nodes since + // we only want the extra verbosity for those cases. + lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") + isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') + for i := 0; i < len(lines) && isTripleQuoted; i++ { + lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + line := lines[i] + isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen + } + if isTripleQuoted { + var list textList + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + for i, line := range lines { + if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { + comment := commentString(fmt.Sprintf("%d elided lines", numElided)) + list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) + break + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + return &textWrap{Prefix: "(", Value: list, Suffix: ")"} + } + + // Format the string as a single-line quoted string. + if len(s) > maxLen+len(textEllipsis) { + return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) + } + return textLine(prefix + formatString(s)) +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { + var opts formatOptions + opts.DiffMode = diffIdentical + opts.TypeMode = elideType + opts.PrintAddresses = disambiguate + opts.AvoidStringer = disambiguate + opts.QualifiedNames = disambiguate + opts.VerbosityLevel = maxVerbosityPreset + opts.LimitVerbosity = true + s := opts.FormatValue(v, reflect.Map, ptrs).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 000000000..168f92f3c --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,465 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + case v.NumIgnored > 0: + return false // Some ignore option was used + case v.NumTransformed > 0: + return false // Some transform option was used + case v.NumCompared > 1: + return false // More than one comparison was used + case v.NumCompared == 1 && v.Type.Name() != "": + // The need for cmp to check applicability of options on every element + // in a slice is a significant performance detriment for large []byte. + // The workaround is to specify Comparer(bytes.Equal), + // which enables cmp to compare []byte more efficiently. + // If they differ, we still want to provide batched diffing. + // The logic disallows named types since they tend to have their own + // String method, with nicer formatting than what this provides. + return false + } + + // Check whether this is an interface with the same concrete types. + t := v.Type + vx, vy := v.ValueX, v.ValueY + if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + } + + // Check whether we provide specialized diffing for this type. + switch t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // Both slice values have to be non-empty. + if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return vx.Len() >= minLength && vy.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + if t.Kind() == reflect.Interface { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + opts = opts.WithTypeMode(emitType) + } + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + maxLineLen = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + + // If possible, use a custom triple-quote (""") syntax for printing + // differences in a string literal. This format is more readable, + // but has edge-cases where differences are visually indistinguishable. + // This format is avoided under the following conditions: + // • A line starts with `"""` + // • A line starts with "..." + // • A line contains non-printable characters + // • Adjacent different lines differ only by whitespace + // + // For example: + // """ + // ... // 3 identical lines + // foo + // bar + // - baz + // + BAZ + // """ + isTripleQuoted := true + prevRemoveLines := map[string]bool{} + prevInsertLines := map[string]bool{} + var list2 textList + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + for _, r := range list { + if !r.Value.Equal(textEllipsis) { + line, _ := strconv.Unquote(string(r.Value.(textLine))) + line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + normLine := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 // drop whitespace to avoid visually indistinguishable output + } + return r + }, line) + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" + switch r.Diff { + case diffRemoved: + isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] + prevRemoveLines[normLine] = true + case diffInserted: + isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] + prevInsertLines[normLine] = true + } + if !isTripleQuoted { + break + } + r.Value = textLine(line) + r.ElideComma = true + } + if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group + prevRemoveLines = map[string]bool{} + prevInsertLines = map[string]bool{} + } + list2 = append(list2, r) + } + if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { + list2 = list2[:len(list2)-1] // elide single empty line at the end + } + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + if isTripleQuoted { + var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} + switch t.Kind() { + case reflect.String: + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + // Always emit type for slices since the triple-quote syntax + // looks like a string (not a slice). + opts = opts.WithTypeMode(emitType) + out = opts.FormatType(t, out) + } + return out + } + + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + var numDiffs int + maxLen := -1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + opts.VerbosityLevel-- + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + maxGroup := diffStats{Name: name} + for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + len0 := len(list) + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + numDiffs += len(list) - len0 + } + if maxGroup.IsZero() { + assert(vx.Len() == 0 && vy.Len() == 0) + } else { + list.AppendEllipsis(maxGroup) + } + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = prev.Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 000000000..0fd46d7ff --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,431 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +const maxColumnLength = 80 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + // The output of Diff is documented as being unstable to provide future + // flexibility in changing the output for more humanly readable reports. + // This logic intentionally introduces instability to the exact output + // so that users can detect accidental reliance on stability early on, + // rather than much later when an actual change to the format occurs. + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" + Metadata interface{} // arbitrary metadata; has no effect on formatting +} + +func (s *textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 *textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(*textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s *textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + ElideComma bool // avoid trailing comma + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := !ds.IsZero() + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, + ) + + // Format lists of simple lists in a batched form. + // If the list is sequence of only textLine values, + // then batch multiple values on a single line. + var isSimple bool + for _, r := range s { + _, isLine := r.Value.(textLine) + isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil + if !isSimple { + break + } + } + if isSimple { + n++ + var batch []byte + emitBatch := func() { + if len(batch) > 0 { + b = n.appendIndent(append(b, '\n'), d) + b = append(b, bytes.TrimRight(batch, " ")...) + batch = batch[:0] + } + } + for _, r := range s { + line := r.Value.(textLine) + if len(batch)+len(line)+len(", ") > maxColumnLength { + emitBatch() + } + batch = append(batch, line...) + batch = append(batch, ", "...) + } + emitBatch() + n-- + return n.appendIndent(append(b, '\n'), d) + } + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.ElideComma { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) IsZero() bool { + s.Name = "" + return s == diffStats{} +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name += "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 000000000..668d470fd --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index 9d92c11f1..f765a46f9 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -16,4 +16,4 @@ change is the ability to represent an invalid UUID (vs a NIL UUID). Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b17461631..b404f4bec 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -26,8 +26,8 @@ var ( // NewMD5 and NewSHA1. func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { h.Reset() - h.Write(space[:]) - h.Write(data) + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck s := h.Sum(nil) var uuid UUID copy(uuid[:], s) diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go index 7f9e0c6c0..14bd34072 100644 --- a/vendor/github.com/google/uuid/marshal.go +++ b/vendor/github.com/google/uuid/marshal.go @@ -16,10 +16,11 @@ func (uuid UUID) MarshalText() ([]byte, error) { // UnmarshalText implements encoding.TextUnmarshaler. func (uuid *UUID) UnmarshalText(data []byte) error { id, err := ParseBytes(data) - if err == nil { - *uuid = id + if err != nil { + return err } - return err + *uuid = id + return nil } // MarshalBinary implements encoding.BinaryMarshaler. diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go index f326b54db..2e02ec06c 100644 --- a/vendor/github.com/google/uuid/sql.go +++ b/vendor/github.com/google/uuid/sql.go @@ -9,7 +9,7 @@ import ( "fmt" ) -// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. // Currently, database types that map to string and []byte are supported. Please // consult database-specific driver documentation for matching types. func (uuid *UUID) Scan(src interface{}) error { diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index 524404cc5..60d26bb50 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -35,6 +35,12 @@ const ( var rander = rand.Reader // random function +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + // Parse decodes s into a UUID or returns an error. Both the standard UUID // forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the @@ -68,7 +74,7 @@ func Parse(s string) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + return uuid, invalidLengthError{len(s)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx @@ -112,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + return uuid, invalidLengthError{len(b)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go index 199a1ac65..463109629 100644 --- a/vendor/github.com/google/uuid/version1.go +++ b/vendor/github.com/google/uuid/version1.go @@ -17,12 +17,6 @@ import ( // // In most cases, New should be used. func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - var uuid UUID now, seq, err := GetTime() if err != nil { @@ -38,7 +32,13 @@ func NewUUID() (UUID, error) { binary.BigEndian.PutUint16(uuid[4:], timeMid) binary.BigEndian.PutUint16(uuid[6:], timeHi) binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() return uuid, nil } diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index 84af91c9f..86160fbd0 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -14,6 +14,14 @@ func New() UUID { return Must(NewRandom()) } +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + // NewRandom returns a Random (Version 4) UUID. // // The strength of the UUIDs is based on the strength of the crypto/rand @@ -27,8 +35,13 @@ func New() UUID { // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() (UUID, error) { + return NewRandomFromReader(rander) +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) + _, err := io.ReadFull(r, uuid[:]) if err != nil { return Nil, err } diff --git a/vendor/github.com/gordonklaus/ineffassign/LICENSE b/vendor/github.com/gordonklaus/ineffassign/LICENSE new file mode 100644 index 000000000..9e3d9bcc0 --- /dev/null +++ b/vendor/github.com/gordonklaus/ineffassign/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Gordon Klaus and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go b/vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go new file mode 100644 index 000000000..606eb14aa --- /dev/null +++ b/vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go @@ -0,0 +1,591 @@ +package ineffassign + +import ( + "fmt" + "go/ast" + "go/token" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// Analyzer is the ineffassign analysis.Analyzer instance. +var Analyzer = &analysis.Analyzer{ + Name: "ineffassign", + Doc: "detect ineffectual assignments in Go code", + Run: checkPath, +} + +func checkPath(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + if isGenerated(file) { + continue + } + + bld := &builder{vars: map[*ast.Object]*variable{}} + bld.walk(file) + + chk := &checker{vars: bld.vars, seen: map[*block]bool{}} + for _, b := range bld.roots { + chk.check(b) + } + sort.Sort(chk.ineff) + + for _, id := range chk.ineff { + pass.Report(analysis.Diagnostic{ + Pos: id.Pos(), + Message: fmt.Sprintf("ineffectual assignment to %s", id.Name), + }) + } + } + + return nil, nil +} + +func isGenerated(file *ast.File) bool { + for _, cg := range file.Comments { + for _, c := range cg.List { + if strings.HasPrefix(c.Text, "// Code generated ") && strings.HasSuffix(c.Text, " DO NOT EDIT.") { + return true + } + } + } + + return false +} + +type builder struct { + roots []*block + block *block + vars map[*ast.Object]*variable + results []*ast.FieldList + breaks branchStack + continues branchStack + gotos branchStack + labelStmt *ast.LabeledStmt +} + +type block struct { + children []*block + ops map[*ast.Object][]operation +} + +func (b *block) addChild(c *block) { + b.children = append(b.children, c) +} + +type operation struct { + id *ast.Ident + assign bool +} + +type variable struct { + fundept int + escapes bool +} + +func (bld *builder) walk(n ast.Node) { + if n != nil { + ast.Walk(bld, n) + } +} + +func (bld *builder) Visit(n ast.Node) ast.Visitor { + switch n := n.(type) { + case *ast.FuncDecl: + if n.Body != nil { + bld.fun(n.Type, n.Body) + } + case *ast.FuncLit: + bld.fun(n.Type, n.Body) + case *ast.IfStmt: + bld.walk(n.Init) + bld.walk(n.Cond) + b0 := bld.block + bld.newBlock(b0) + bld.walk(n.Body) + b1 := bld.block + if n.Else != nil { + bld.newBlock(b0) + bld.walk(n.Else) + b0 = bld.block + } + bld.newBlock(b0, b1) + case *ast.ForStmt: + lbl := bld.stmtLabel(n) + brek := bld.breaks.push(lbl) + continu := bld.continues.push(lbl) + bld.walk(n.Init) + start := bld.newBlock(bld.block) + bld.walk(n.Cond) + cond := bld.block + bld.newBlock(cond) + bld.walk(n.Body) + continu.setDestination(bld.newBlock(bld.block)) + bld.walk(n.Post) + bld.block.addChild(start) + brek.setDestination(bld.newBlock(cond)) + bld.breaks.pop() + bld.continues.pop() + case *ast.RangeStmt: + lbl := bld.stmtLabel(n) + brek := bld.breaks.push(lbl) + continu := bld.continues.push(lbl) + bld.walk(n.X) + pre := bld.newBlock(bld.block) + start := bld.newBlock(pre) + if n.Key != nil { + lhs := []ast.Expr{n.Key} + if n.Value != nil { + lhs = append(lhs, n.Value) + } + bld.walk(&ast.AssignStmt{Lhs: lhs, Tok: n.Tok, TokPos: n.TokPos, Rhs: []ast.Expr{&ast.Ident{NamePos: n.X.End()}}}) + } + bld.walk(n.Body) + bld.block.addChild(start) + continu.setDestination(pre) + brek.setDestination(bld.newBlock(pre, bld.block)) + bld.breaks.pop() + bld.continues.pop() + case *ast.SwitchStmt: + bld.walk(n.Init) + bld.walk(n.Tag) + bld.swtch(n, n.Body.List) + case *ast.TypeSwitchStmt: + bld.walk(n.Init) + bld.walk(n.Assign) + bld.swtch(n, n.Body.List) + case *ast.SelectStmt: + brek := bld.breaks.push(bld.stmtLabel(n)) + for _, c := range n.Body.List { + c := c.(*ast.CommClause).Comm + if s, ok := c.(*ast.AssignStmt); ok { + bld.walk(s.Rhs[0]) + } else { + bld.walk(c) + } + } + b0 := bld.block + exits := make([]*block, len(n.Body.List)) + dfault := false + for i, c := range n.Body.List { + c := c.(*ast.CommClause) + bld.newBlock(b0) + bld.walk(c) + exits[i] = bld.block + dfault = dfault || c.Comm == nil + } + if !dfault { + exits = append(exits, b0) + } + brek.setDestination(bld.newBlock(exits...)) + bld.breaks.pop() + case *ast.LabeledStmt: + bld.gotos.get(n.Label).setDestination(bld.newBlock(bld.block)) + bld.labelStmt = n + bld.walk(n.Stmt) + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + bld.breaks.get(n.Label).addSource(bld.block) + bld.newBlock() + case token.CONTINUE: + bld.continues.get(n.Label).addSource(bld.block) + bld.newBlock() + case token.GOTO: + bld.gotos.get(n.Label).addSource(bld.block) + bld.newBlock() + } + + case *ast.AssignStmt: + if n.Tok == token.QUO_ASSIGN || n.Tok == token.REM_ASSIGN { + bld.maybePanic() + } + + for _, x := range n.Rhs { + bld.walk(x) + } + for i, x := range n.Lhs { + if id, ok := ident(x); ok { + if n.Tok >= token.ADD_ASSIGN && n.Tok <= token.AND_NOT_ASSIGN { + bld.use(id) + } + // Don't treat explicit initialization to zero as assignment; it is often used as shorthand for a bare declaration. + if n.Tok == token.DEFINE && i < len(n.Rhs) && isZeroInitializer(n.Rhs[i]) { + bld.use(id) + } else { + bld.assign(id) + } + } else { + bld.walk(x) + } + } + case *ast.GenDecl: + if n.Tok == token.VAR { + for _, s := range n.Specs { + s := s.(*ast.ValueSpec) + for _, x := range s.Values { + bld.walk(x) + } + for _, id := range s.Names { + if len(s.Values) > 0 { + bld.assign(id) + } else { + bld.use(id) + } + } + } + } + case *ast.IncDecStmt: + if id, ok := ident(n.X); ok { + bld.use(id) + bld.assign(id) + } else { + bld.walk(n.X) + } + case *ast.Ident: + bld.use(n) + case *ast.ReturnStmt: + for _, x := range n.Results { + bld.walk(x) + } + if res := bld.results[len(bld.results)-1]; res != nil { + for _, f := range res.List { + for _, id := range f.Names { + if n.Results != nil { + bld.assign(id) + } + bld.use(id) + } + } + } + bld.newBlock() + case *ast.SendStmt: + bld.maybePanic() + return bld + + case *ast.BinaryExpr: + if n.Op == token.EQL || n.Op == token.QUO || n.Op == token.REM { + bld.maybePanic() + } + return bld + case *ast.CallExpr: + bld.maybePanic() + return bld + case *ast.IndexExpr: + bld.maybePanic() + return bld + case *ast.UnaryExpr: + id, ok := ident(n.X) + if ix, isIx := n.X.(*ast.IndexExpr); isIx { + // We don't care about indexing into slices, but without type information we can do no better. + id, ok = ident(ix.X) + } + if ok && n.Op == token.AND { + if v, ok := bld.vars[id.Obj]; ok { + v.escapes = true + } + } + return bld + case *ast.SelectorExpr: + bld.maybePanic() + // A method call (possibly delayed via a method value) might implicitly take + // the address of its receiver, causing it to escape. + // We can't do any better here without knowing the variable's type. + if id, ok := ident(n.X); ok { + if v, ok := bld.vars[id.Obj]; ok { + v.escapes = true + } + } + return bld + case *ast.SliceExpr: + bld.maybePanic() + // We don't care about slicing into slices, but without type information we can do no better. + if id, ok := ident(n.X); ok { + if v, ok := bld.vars[id.Obj]; ok { + v.escapes = true + } + } + return bld + case *ast.StarExpr: + bld.maybePanic() + return bld + case *ast.TypeAssertExpr: + bld.maybePanic() + return bld + + default: + return bld + } + return nil +} + +func isZeroInitializer(x ast.Expr) bool { + // Assume that a call expression of a single argument is a conversion expression. We can't do better without type information. + if c, ok := x.(*ast.CallExpr); ok { + switch c.Fun.(type) { + case *ast.Ident, *ast.SelectorExpr: + default: + return false + } + if len(c.Args) != 1 { + return false + } + x = c.Args[0] + } + + switch x := x.(type) { + case *ast.BasicLit: + switch x.Value { + case "0", "0.0", "0.", ".0", `""`: + return true + } + case *ast.Ident: + return x.Name == "false" && x.Obj == nil + } + + return false +} + +func (bld *builder) fun(typ *ast.FuncType, body *ast.BlockStmt) { + for _, v := range bld.vars { + v.fundept++ + } + bld.results = append(bld.results, typ.Results) + + b := bld.block + bld.newBlock() + bld.roots = append(bld.roots, bld.block) + bld.walk(typ) + bld.walk(body) + bld.block = b + + bld.results = bld.results[:len(bld.results)-1] + for _, v := range bld.vars { + v.fundept-- + } +} + +func (bld *builder) swtch(stmt ast.Stmt, cases []ast.Stmt) { + brek := bld.breaks.push(bld.stmtLabel(stmt)) + b0 := bld.block + list := b0 + exits := make([]*block, 0, len(cases)+1) + var dfault, fallthru *block + for _, c := range cases { + c := c.(*ast.CaseClause) + + if c.List != nil { + list = bld.newBlock(list) + for _, x := range c.List { + bld.walk(x) + } + } + + parents := []*block{} + if c.List != nil { + parents = append(parents, list) + } + if fallthru != nil { + parents = append(parents, fallthru) + fallthru = nil + } + bld.newBlock(parents...) + if c.List == nil { + dfault = bld.block + } + for _, s := range c.Body { + bld.walk(s) + if s, ok := s.(*ast.BranchStmt); ok && s.Tok == token.FALLTHROUGH { + fallthru = bld.block + } + } + + if fallthru == nil { + exits = append(exits, bld.block) + } + } + if dfault != nil { + list.addChild(dfault) + } else { + exits = append(exits, b0) + } + brek.setDestination(bld.newBlock(exits...)) + bld.breaks.pop() +} + +// An operation that might panic marks named function results as used. +func (bld *builder) maybePanic() { + if len(bld.results) == 0 { + return + } + res := bld.results[len(bld.results)-1] + if res == nil { + return + } + for _, f := range res.List { + for _, id := range f.Names { + bld.use(id) + } + } +} + +func (bld *builder) newBlock(parents ...*block) *block { + bld.block = &block{ops: map[*ast.Object][]operation{}} + for _, b := range parents { + b.addChild(bld.block) + } + return bld.block +} + +func (bld *builder) stmtLabel(s ast.Stmt) *ast.Object { + if ls := bld.labelStmt; ls != nil && ls.Stmt == s { + return ls.Label.Obj + } + return nil +} + +func (bld *builder) assign(id *ast.Ident) { + bld.newOp(id, true) +} + +func (bld *builder) use(id *ast.Ident) { + bld.newOp(id, false) +} + +func (bld *builder) newOp(id *ast.Ident, assign bool) { + if id.Name == "_" || id.Obj == nil { + return + } + + v, ok := bld.vars[id.Obj] + if !ok { + v = &variable{} + bld.vars[id.Obj] = v + } + v.escapes = v.escapes || v.fundept > 0 || bld.block == nil + + if b := bld.block; b != nil && !v.escapes { + b.ops[id.Obj] = append(b.ops[id.Obj], operation{id, assign}) + } +} + +type branchStack []*branch + +type branch struct { + label *ast.Object + srcs []*block + dst *block +} + +func (s *branchStack) push(lbl *ast.Object) *branch { + br := &branch{label: lbl} + *s = append(*s, br) + return br +} + +func (s *branchStack) get(lbl *ast.Ident) *branch { + for i := len(*s) - 1; i >= 0; i-- { + if br := (*s)[i]; lbl == nil || br.label == lbl.Obj { + return br + } + } + + // Guard against invalid code (break/continue outside of loop). + if lbl == nil { + return &branch{} + } + + return s.push(lbl.Obj) +} + +func (br *branch) addSource(src *block) { + br.srcs = append(br.srcs, src) + if br.dst != nil { + src.addChild(br.dst) + } +} + +func (br *branch) setDestination(dst *block) { + br.dst = dst + for _, src := range br.srcs { + src.addChild(dst) + } +} + +func (s *branchStack) pop() { + *s = (*s)[:len(*s)-1] +} + +func ident(x ast.Expr) (*ast.Ident, bool) { + if p, ok := x.(*ast.ParenExpr); ok { + return ident(p.X) + } + id, ok := x.(*ast.Ident) + return id, ok +} + +type checker struct { + vars map[*ast.Object]*variable + seen map[*block]bool + ineff idents +} + +func (chk *checker) check(b *block) { + if chk.seen[b] { + return + } + chk.seen[b] = true + + for obj, ops := range b.ops { + ops: + for i, op := range ops { + if !op.assign { + continue + } + if i+1 < len(ops) { + if ops[i+1].assign { + chk.ineff = append(chk.ineff, op.id) + } + continue + } + seen := map[*block]bool{} + for _, b := range b.children { + if used(obj, b, seen) { + continue ops + } + } + if !chk.vars[obj].escapes { + chk.ineff = append(chk.ineff, op.id) + } + } + } + + for _, b := range b.children { + chk.check(b) + } +} + +func used(obj *ast.Object, b *block, seen map[*block]bool) bool { + if seen[b] { + return false + } + seen[b] = true + + if ops := b.ops[obj]; len(ops) > 0 { + return !ops[0].assign + } + for _, b := range b.children { + if used(obj, b, seen) { + return true + } + } + return false +} + +type idents []*ast.Ident + +func (ids idents) Len() int { return len(ids) } +func (ids idents) Less(i, j int) bool { return ids[i].Pos() < ids[j].Pos() } +func (ids idents) Swap(i, j int) { ids[i], ids[j] = ids[j], ids[i] } diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml deleted file mode 100644 index 6f440f1e4..000000000 --- a/vendor/github.com/gorilla/context/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go -sudo: false - -matrix: - include: - - go: 1.3 - - go: 1.4 - - go: 1.5 - - go: 1.6 - - go: 1.7 - - go: tip - allow_failures: - - go: tip - -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go vet $(go list ./... | grep -v /vendor/) - - go test -v -race ./... diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md deleted file mode 100644 index 08f86693b..000000000 --- a/vendor/github.com/gorilla/context/README.md +++ /dev/null @@ -1,10 +0,0 @@ -context -======= -[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) - -gorilla/context is a general purpose registry for global request variables. - -> Note: gorilla/context, having been born well before `context.Context` existed, does not play well -> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go deleted file mode 100644 index 81cb128b1..000000000 --- a/vendor/github.com/gorilla/context/context.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val - mutex.Unlock() -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.RLock() - if ctx := data[r]; ctx != nil { - value := ctx[key] - mutex.RUnlock() - return value - } - mutex.RUnlock() - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.RLock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - mutex.RUnlock() - return value, ok - } - mutex.RUnlock() - return nil, false -} - -// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. -func GetAll(r *http.Request) map[interface{}]interface{} { - mutex.RLock() - if context, ok := data[r]; ok { - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result - } - mutex.RUnlock() - return nil -} - -// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if -// the request was registered. -func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { - mutex.RLock() - context, ok := data[r] - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result, ok -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - if data[r] != nil { - delete(data[r], key) - } - mutex.Unlock() -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - clear(r) - mutex.Unlock() -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - mutex.Unlock() - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go deleted file mode 100644 index 448d1bfca..000000000 --- a/vendor/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package context stores values shared during a request lifetime. - -Note: gorilla/context, having been born well before `context.Context` existed, -does not play well > with the shallow copying of the request that -[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) -(added to net/http Go 1.7 onwards) performs. You should either use *just* -gorilla/context, or moving forward, the new `http.Request.Context()`. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS new file mode 100644 index 000000000..b722392ee --- /dev/null +++ b/vendor/github.com/gorilla/mux/AUTHORS @@ -0,0 +1,8 @@ +# This is the official list of gorilla/mux authors for copyright purposes. +# +# Please keep the list sorted. + +Google LLC (https://opensource.google.com/) +Kamil Kisielk +Matt Silverlock +Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md b/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md deleted file mode 100644 index 232be82e4..000000000 --- a/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,11 +0,0 @@ -**What version of Go are you running?** (Paste the output of `go version`) - - -**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`) - - -**Describe your problem** (and what you have tried so far) - - -**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it) - diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE index 0e5fb8728..6903df638 100644 --- a/vendor/github.com/gorilla/mux/LICENSE +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index e424397ac..35eea9f10 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,12 +1,12 @@ # gorilla/mux [![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) +[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) [![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) -![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) +![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png) -http://www.gorillatoolkit.org/pkg/mux +https://www.gorillatoolkit.org/pkg/mux Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to their respective handler. @@ -25,10 +25,12 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv * [Examples](#examples) * [Matching Routes](#matching-routes) * [Static Files](#static-files) +* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.) * [Registered URLs](#registered-urls) * [Walking Routes](#walking-routes) * [Graceful Shutdown](#graceful-shutdown) * [Middleware](#middleware) +* [Handling CORS Requests](#handling-cors-requests) * [Testing Handlers](#testing-handlers) * [Full Example](#full-example) @@ -88,7 +90,7 @@ r := mux.NewRouter() // Only matches if domain is "www.example.com". r.Host("www.example.com") // Matches a dynamic subdomain. -r.Host("{subdomain:[a-z]+}.domain.com") +r.Host("{subdomain:[a-z]+}.example.com") ``` There are several other matchers that can be added. To match path prefixes: @@ -210,6 +212,93 @@ func main() { } ``` +### Serving Single Page Applications + +Most of the time it makes sense to serve your SPA on a separate web server from your API, +but sometimes it's desirable to serve them both from one place. It's possible to write a simple +handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage +mux's powerful routing for your API endpoints. + +```go +package main + +import ( + "encoding/json" + "log" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/gorilla/mux" +) + +// spaHandler implements the http.Handler interface, so we can use it +// to respond to HTTP requests. The path to the static directory and +// path to the index file within that static directory are used to +// serve the SPA in the given static directory. +type spaHandler struct { + staticPath string + indexPath string +} + +// ServeHTTP inspects the URL path to locate a file within the static dir +// on the SPA handler. If a file is found, it will be served. If not, the +// file located at the index path on the SPA handler will be served. This +// is suitable behavior for serving an SPA (single page application). +func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // get the absolute path to prevent directory traversal + path, err := filepath.Abs(r.URL.Path) + if err != nil { + // if we failed to get the absolute path respond with a 400 bad request + // and stop + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // prepend the path with the path to the static directory + path = filepath.Join(h.staticPath, path) + + // check whether a file exists at the given path + _, err = os.Stat(path) + if os.IsNotExist(err) { + // file does not exist, serve index.html + http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) + return + } else if err != nil { + // if we got an error (that wasn't that the file doesn't exist) stating the + // file, return a 500 internal server error and stop + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // otherwise, use http.FileServer to serve the static dir + http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) +} + +func main() { + router := mux.NewRouter() + + router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) { + // an example API handler + json.NewEncoder(w).Encode(map[string]bool{"ok": true}) + }) + + spa := spaHandler{staticPath: "build", indexPath: "index.html"} + router.PathPrefix("/").Handler(spa) + + srv := &http.Server{ + Handler: router, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + ### Registered URLs Now let's see how to build registered URLs. @@ -238,13 +327,13 @@ This also works for host and query value variables: ```go r := mux.NewRouter() -r.Host("{subdomain}.domain.com"). +r.Host("{subdomain}.example.com"). Path("/articles/{category}/{id:[0-9]+}"). Queries("filter", "{filter}"). HandlerFunc(ArticleHandler). Name("article") -// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" +// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" url, err := r.Get("article").URL("subdomain", "news", "category", "technology", "id", "42", @@ -264,7 +353,7 @@ r.HeadersRegexp("Content-Type", "application/(text|json)") There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: ```go -// "http://news.domain.com/" +// "http://news.example.com/" host, err := r.Get("article").URLHost("subdomain", "news") // "/articles/technology/42" @@ -275,12 +364,12 @@ And if you use subrouters, host and path defined separately can be built as well ```go r := mux.NewRouter() -s := r.Host("{subdomain}.domain.com").Subrouter() +s := r.Host("{subdomain}.example.com").Subrouter() s.Path("/articles/{category}/{id:[0-9]+}"). HandlerFunc(ArticleHandler). Name("article") -// "http://news.domain.com/articles/technology/42" +// "http://news.example.com/articles/technology/42" url, err := r.Get("article").URL("subdomain", "news", "category", "technology", "id", "42") @@ -491,6 +580,73 @@ r.Use(amw.Middleware) Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. +### Handling CORS Requests + +[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header. + +* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin` +* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route +* If you do not specify any methods, then: +> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers. + +Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers: + +```go +package main + +import ( + "net/http" + "github.com/gorilla/mux" +) + +func main() { + r := mux.NewRouter() + + // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers + r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions) + r.Use(mux.CORSMethodMiddleware(r)) + + http.ListenAndServe(":8080", r) +} + +func fooHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + if r.Method == http.MethodOptions { + return + } + + w.Write([]byte("foo")) +} +``` + +And an request to `/foo` using something like: + +```bash +curl localhost:8080/foo -v +``` + +Would look like: + +```bash +* Trying ::1... +* TCP_NODELAY set +* Connected to localhost (::1) port 8080 (#0) +> GET /foo HTTP/1.1 +> Host: localhost:8080 +> User-Agent: curl/7.59.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS +< Access-Control-Allow-Origin: * +< Date: Fri, 28 Jun 2019 20:13:30 GMT +< Content-Length: 3 +< Content-Type: text/plain; charset=utf-8 +< +* Connection #0 to host localhost left intact +foo +``` + ### Testing Handlers Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. @@ -503,8 +659,8 @@ package main func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { // A very simple health check. - w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) // In the future we could report back on the status of our DB, or our cache // (e.g. Redis) by performing a simple PING, and include them in the response. diff --git a/vendor/github.com/gorilla/mux/context_gorilla.go b/vendor/github.com/gorilla/mux/context_gorilla.go deleted file mode 100644 index d7adaa8fa..000000000 --- a/vendor/github.com/gorilla/mux/context_gorilla.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !go1.7 - -package mux - -import ( - "net/http" - - "github.com/gorilla/context" -) - -func contextGet(r *http.Request, key interface{}) interface{} { - return context.Get(r, key) -} - -func contextSet(r *http.Request, key, val interface{}) *http.Request { - if val == nil { - return r - } - - context.Set(r, key, val) - return r -} - -func contextClear(r *http.Request) { - context.Clear(r) -} diff --git a/vendor/github.com/gorilla/mux/context_native.go b/vendor/github.com/gorilla/mux/context_native.go deleted file mode 100644 index 209cbea7d..000000000 --- a/vendor/github.com/gorilla/mux/context_native.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build go1.7 - -package mux - -import ( - "context" - "net/http" -) - -func contextGet(r *http.Request, key interface{}) interface{} { - return r.Context().Value(key) -} - -func contextSet(r *http.Request, key, val interface{}) *http.Request { - if val == nil { - return r - } - - return r.WithContext(context.WithValue(r.Context(), key, val)) -} - -func contextClear(r *http.Request) { - return -} diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index 38957deea..bd5a38b55 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -295,7 +295,7 @@ A more complex authentication middleware, which maps session token to users, cou r := mux.NewRouter() r.HandleFunc("/", handler) - amw := authenticationMiddleware{} + amw := authenticationMiddleware{tokenUsers: make(map[string]string)} amw.Populate() r.Use(amw.Middleware) diff --git a/vendor/github.com/gorilla/mux/go.mod b/vendor/github.com/gorilla/mux/go.mod new file mode 100644 index 000000000..df170a399 --- /dev/null +++ b/vendor/github.com/gorilla/mux/go.mod @@ -0,0 +1,3 @@ +module github.com/gorilla/mux + +go 1.12 diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go index ceb812cee..cb51c565e 100644 --- a/vendor/github.com/gorilla/mux/middleware.go +++ b/vendor/github.com/gorilla/mux/middleware.go @@ -32,37 +32,19 @@ func (r *Router) useInterface(mw middleware) { r.middlewares = append(r.middlewares, mw) } -// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header -// on a request, by matching routes based only on paths. It also handles -// OPTIONS requests, by settings Access-Control-Allow-Methods, and then -// returning without calling the next http handler. +// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header +// on requests for routes that have an OPTIONS method matcher to all the method matchers on +// the route. Routes that do not explicitly handle OPTIONS requests will not be processed +// by the middleware. See examples for usage. func CORSMethodMiddleware(r *Router) MiddlewareFunc { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - var allMethods []string - - err := r.Walk(func(route *Route, _ *Router, _ []*Route) error { - for _, m := range route.matchers { - if _, ok := m.(*routeRegexp); ok { - if m.Match(req, &RouteMatch{}) { - methods, err := route.GetMethods() - if err != nil { - return err - } - - allMethods = append(allMethods, methods...) - } - break - } - } - return nil - }) - + allMethods, err := getAllMethodsForRoute(r, req) if err == nil { - w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ",")) - - if req.Method == "OPTIONS" { - return + for _, v := range allMethods { + if v == http.MethodOptions { + w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ",")) + } } } @@ -70,3 +52,23 @@ func CORSMethodMiddleware(r *Router) MiddlewareFunc { }) } } + +// getAllMethodsForRoute returns all the methods from method matchers matching a given +// request. +func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { + var allMethods []string + + for _, route := range r.routes { + var match RouteMatch + if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch { + methods, err := route.GetMethods() + if err != nil { + return nil, err + } + + allMethods = append(allMethods, methods...) + } + } + + return allMethods, nil +} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 4bbafa51d..782a34b22 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -5,6 +5,7 @@ package mux import ( + "context" "errors" "fmt" "net/http" @@ -22,7 +23,7 @@ var ( // NewRouter returns a new router instance. func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} + return &Router{namedRoutes: make(map[string]*Route)} } // Router registers routes to be matched and dispatches a handler. @@ -50,24 +51,75 @@ type Router struct { // Configurable Handler to be used when the request method does not match the route. MethodNotAllowedHandler http.Handler - // Parent route, if this is a subrouter. - parent parentRoute // Routes to be matched, in order. routes []*Route + // Routes by name for URL building. namedRoutes map[string]*Route - // See Router.StrictSlash(). This defines the flag for new routes. - strictSlash bool - // See Router.SkipClean(). This defines the flag for new routes. - skipClean bool + // If true, do not clear the request context after handling the request. - // This has no effect when go1.7+ is used, since the context is stored - // on the request itself. + // + // Deprecated: No effect, since the context is stored on the request itself. KeepContext bool - // see Router.UseEncodedPath(). This defines a flag for all routes. - useEncodedPath bool + // Slice of middlewares to be called after a match is found middlewares []middleware + + // configuration shared with `Route` + routeConf +} + +// common route configuration shared between `Router` and `Route` +type routeConf struct { + // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" + useEncodedPath bool + + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + + // Manager for the variables from host and path. + regexp routeRegexpGroup + + // List of matchers. + matchers []matcher + + // The scheme used when building URLs. + buildScheme string + + buildVarsFunc BuildVarsFunc +} + +// returns an effective deep copy of `routeConf` +func copyRouteConf(r routeConf) routeConf { + c := r + + if r.regexp.path != nil { + c.regexp.path = copyRouteRegexp(r.regexp.path) + } + + if r.regexp.host != nil { + c.regexp.host = copyRouteRegexp(r.regexp.host) + } + + c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) + for _, q := range r.regexp.queries { + c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) + } + + c.matchers = make([]matcher, len(r.matchers)) + copy(c.matchers, r.matchers) + + return c +} + +func copyRouteRegexp(r *routeRegexp) *routeRegexp { + c := *r + return &c } // Match attempts to match the given request against the router's registered routes. @@ -143,8 +195,8 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { var handler http.Handler if r.Match(req, &match) { handler = match.Handler - req = setVars(req, match.Vars) - req = setCurrentRoute(req, match.Route) + req = requestWithVars(req, match.Vars) + req = requestWithRoute(req, match.Route) } if handler == nil && match.MatchErr == ErrMethodMismatch { @@ -155,22 +207,18 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { handler = http.NotFoundHandler() } - if !r.KeepContext { - defer contextClear(req) - } - handler.ServeHTTP(w, req) } // Get returns a route registered with the given name. func (r *Router) Get(name string) *Route { - return r.getNamedRoutes()[name] + return r.namedRoutes[name] } // GetRoute returns a route registered with the given name. This method // was renamed to Get() and remains here for backwards compatibility. func (r *Router) GetRoute(name string) *Route { - return r.getNamedRoutes()[name] + return r.namedRoutes[name] } // StrictSlash defines the trailing slash behavior for new routes. The initial @@ -221,55 +269,24 @@ func (r *Router) UseEncodedPath() *Router { return r } -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -func (r *Router) getBuildScheme() string { - if r.parent != nil { - return r.parent.getBuildScheme() - } - return "" -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Router) getNamedRoutes() map[string]*Route { - if r.namedRoutes == nil { - if r.parent != nil { - r.namedRoutes = r.parent.getNamedRoutes() - } else { - r.namedRoutes = make(map[string]*Route) - } - } - return r.namedRoutes -} - -// getRegexpGroup returns regexp definitions from the parent route, if any. -func (r *Router) getRegexpGroup() *routeRegexpGroup { - if r.parent != nil { - return r.parent.getRegexpGroup() - } - return nil -} - -func (r *Router) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - return m -} - // ---------------------------------------------------------------------------- // Route factories // ---------------------------------------------------------------------------- // NewRoute registers an empty route. func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath} + // initialize a route with a copy of the parent router's configuration + route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} r.routes = append(r.routes, route) return route } +// Name registers a new route with a name. +// See Route.Name(). +func (r *Router) Name(name string) *Route { + return r.NewRoute().Name(name) +} + // Handle registers a new route with a matcher for the URL path. // See Route.Path() and Route.Handler(). func (r *Router) Handle(path string, handler http.Handler) *Route { @@ -409,7 +426,7 @@ const ( // Vars returns the route variables for the current request, if any. func Vars(r *http.Request) map[string]string { - if rv := contextGet(r, varsKey); rv != nil { + if rv := r.Context().Value(varsKey); rv != nil { return rv.(map[string]string) } return nil @@ -418,21 +435,22 @@ func Vars(r *http.Request) map[string]string { // CurrentRoute returns the matched route for the current request, if any. // This only works when called inside the handler of the matched route // because the matched route is stored in the request context which is cleared -// after the handler returns, unless the KeepContext option is set on the -// Router. +// after the handler returns. func CurrentRoute(r *http.Request) *Route { - if rv := contextGet(r, routeKey); rv != nil { + if rv := r.Context().Value(routeKey); rv != nil { return rv.(*Route) } return nil } -func setVars(r *http.Request, val interface{}) *http.Request { - return contextSet(r, varsKey, val) +func requestWithVars(r *http.Request, vars map[string]string) *http.Request { + ctx := context.WithValue(r.Context(), varsKey, vars) + return r.WithContext(ctx) } -func setCurrentRoute(r *http.Request, val interface{}) *http.Request { - return contextSet(r, routeKey, val) +func requestWithRoute(r *http.Request, route *Route) *http.Request { + ctx := context.WithValue(r.Context(), routeKey, route) + return r.WithContext(ctx) } // ---------------------------------------------------------------------------- diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index 2b57e5627..0144842bb 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -113,6 +113,13 @@ func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*ro if typ != regexpTypePrefix { pattern.WriteByte('$') } + + var wildcardHostPort bool + if typ == regexpTypeHost { + if !strings.Contains(pattern.String(), ":") { + wildcardHostPort = true + } + } reverse.WriteString(raw) if endSlash { reverse.WriteByte('/') @@ -131,13 +138,14 @@ func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*ro // Done! return &routeRegexp{ - template: template, - regexpType: typ, - options: options, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, + template: template, + regexpType: typ, + options: options, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + wildcardHostPort: wildcardHostPort, }, nil } @@ -158,27 +166,36 @@ type routeRegexp struct { varsN []string // Variable regexps (validators). varsR []*regexp.Regexp + // Wildcard host-port (no strict port match in hostname) + wildcardHostPort bool } // Match matches the regexp against the URL host or path. func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if r.regexpType != regexpTypeHost { - if r.regexpType == regexpTypeQuery { - return r.matchQueryString(req) - } - path := req.URL.Path - if r.options.useEncodedPath { - path = req.URL.EscapedPath() + if r.regexpType == regexpTypeHost { + host := getHost(req) + if r.wildcardHostPort { + // Don't be strict on the port match + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } } - return r.regexp.MatchString(path) + return r.regexp.MatchString(host) } - return r.regexp.MatchString(getHost(req)) + if r.regexpType == regexpTypeQuery { + return r.matchQueryString(req) + } + path := req.URL.Path + if r.options.useEncodedPath { + path = req.URL.EscapedPath() + } + return r.regexp.MatchString(path) } // url builds a URL part using the given values. func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN)) + urlValues := make([]interface{}, len(r.varsN), len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] if !ok { @@ -213,14 +230,51 @@ func (r *routeRegexp) getURLQuery(req *http.Request) string { return "" } templateKey := strings.SplitN(r.template, "=", 2)[0] - for key, vals := range req.URL.Query() { - if key == templateKey && len(vals) > 0 { - return key + "=" + vals[0] - } + val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey) + if ok { + return templateKey + "=" + val } return "" } +// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0]. +// If key was not found, empty string and false is returned. +func findFirstQueryKey(rawQuery, key string) (value string, ok bool) { + query := []byte(rawQuery) + for len(query) > 0 { + foundKey := query + if i := bytes.IndexAny(foundKey, "&;"); i >= 0 { + foundKey, query = foundKey[:i], foundKey[i+1:] + } else { + query = query[:0] + } + if len(foundKey) == 0 { + continue + } + var value []byte + if i := bytes.IndexByte(foundKey, '='); i >= 0 { + foundKey, value = foundKey[:i], foundKey[i+1:] + } + if len(foundKey) < len(key) { + // Cannot possibly be key. + continue + } + keyString, err := url.QueryUnescape(string(foundKey)) + if err != nil { + continue + } + if keyString != key { + continue + } + valueString, err := url.QueryUnescape(string(value)) + if err != nil { + continue + } + return valueString, true + } + return "", false +} + func (r *routeRegexp) matchQueryString(req *http.Request) bool { return r.regexp.MatchString(r.getURLQuery(req)) } @@ -267,10 +321,16 @@ type routeRegexpGroup struct { } // setMatch extracts the variables from the URL once a route matches. -func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { +func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { // Store host variables. if v.host != nil { host := getHost(req) + if v.host.wildcardHostPort { + // Don't be strict on the port match + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + } matches := v.host.regexp.FindStringSubmatchIndex(host) if len(matches) > 0 { extractVars(host, matches, v.host.varsN, m.Vars) @@ -296,7 +356,7 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) } else { u.Path += "/" } - m.Handler = http.RedirectHandler(u.String(), 301) + m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) } } } @@ -312,17 +372,13 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) } // getHost tries its best to return the request host. +// According to section 14.23 of RFC 2616 the Host header +// can include the port number if the default value of 80 is not used. func getHost(r *http.Request) string { if r.URL.IsAbs() { return r.URL.Host } - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host - + return r.Host } func extractVars(input string, matches []int, names []string, output map[string]string) { diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index a591d7354..750afe570 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -15,24 +15,8 @@ import ( // Route stores information to match a request and build URLs. type Route struct { - // Parent where the route was registered (a Router). - parent parentRoute // Request handler for the route. handler http.Handler - // List of matchers. - matchers []matcher - // Manager for the variables from host and path. - regexp *routeRegexpGroup - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - // If true, when the path pattern is "/path//to", accessing "/path//to" - // will not redirect - skipClean bool - // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" - useEncodedPath bool - // The scheme used when building URLs. - buildScheme string // If true, this route never matches: it is only used to build URLs. buildOnly bool // The name used to build URLs. @@ -40,7 +24,11 @@ type Route struct { // Error resulted from building a route. err error - buildVarsFunc BuildVarsFunc + // "global" reference to all named routes + namedRoutes map[string]*Route + + // config possibly passed in from `Router` + routeConf } // SkipClean reports whether path cleaning is enabled for this route via @@ -64,6 +52,18 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { matchErr = ErrMethodMismatch continue } + + // Ignore ErrNotFound errors. These errors arise from match call + // to Subrouters. + // + // This prevents subsequent matching subrouters from failing to + // run middleware. If not ignored, the middleware would see a + // non-nil MatchErr and be skipped, even when there was a + // matching route. + if match.MatchErr == ErrNotFound { + match.MatchErr = nil + } + matchErr = nil return false } @@ -74,7 +74,7 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { return false } - if match.MatchErr == ErrMethodMismatch { + if match.MatchErr == ErrMethodMismatch && r.handler != nil { // We found a route which matches request method, clear MatchErr match.MatchErr = nil // Then override the mis-matched handler @@ -93,9 +93,7 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { } // Set variables. - if r.regexp != nil { - r.regexp.setMatch(req, match, r) - } + r.regexp.setMatch(req, match, r) return true } @@ -137,7 +135,7 @@ func (r *Route) GetHandler() http.Handler { // Name ----------------------------------------------------------------------- // Name sets the name for the route, used to build URLs. -// If the name was registered already it will be overwritten. +// It is an error to call Name more than once on a route. func (r *Route) Name(name string) *Route { if r.name != "" { r.err = fmt.Errorf("mux: route already has name %q, can't set %q", @@ -145,7 +143,7 @@ func (r *Route) Name(name string) *Route { } if r.err == nil { r.name = name - r.getNamedRoutes()[name] = r + r.namedRoutes[name] = r } return r } @@ -177,7 +175,6 @@ func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { if r.err != nil { return r.err } - r.regexp = r.getRegexpGroup() if typ == regexpTypePath || typ == regexpTypePrefix { if len(tpl) > 0 && tpl[0] != '/' { return fmt.Errorf("mux: path must start with a slash, got %q", tpl) @@ -386,7 +383,7 @@ func (r *Route) PathPrefix(tpl string) *Route { // The above route will only match if the URL contains the defined queries // values, e.g.: ?foo=bar&id=42. // -// It the value is an empty string, it will match any value if the key is set. +// If the value is an empty string, it will match any value if the key is set. // // Variables can define an optional regexp pattern to be matched: // @@ -415,16 +412,35 @@ func (r *Route) Queries(pairs ...string) *Route { type schemeMatcher []string func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) + scheme := r.URL.Scheme + // https://golang.org/pkg/net/http/#Request + // "For [most] server requests, fields other than Path and RawQuery will be + // empty." + // Since we're an http muxer, the scheme is either going to be http or https + // though, so we can just set it based on the tls termination state. + if scheme == "" { + if r.TLS == nil { + scheme = "http" + } else { + scheme = "https" + } + } + return matchInArray(m, scheme) } // Schemes adds a matcher for URL schemes. // It accepts a sequence of schemes to be matched, e.g.: "http", "https". +// If the request's URL has a scheme set, it will be matched against. +// Generally, the URL scheme will only be set if a previous handler set it, +// such as the ProxyHeaders handler from gorilla/handlers. +// If unset, the scheme will be determined based on the request's TLS +// termination state. +// The first argument to Schemes will be used when constructing a route URL. func (r *Route) Schemes(schemes ...string) *Route { for k, v := range schemes { schemes[k] = strings.ToLower(v) } - if r.buildScheme == "" && len(schemes) > 0 { + if len(schemes) > 0 { r.buildScheme = schemes[0] } return r.addMatcher(schemeMatcher(schemes)) @@ -439,7 +455,15 @@ type BuildVarsFunc func(map[string]string) map[string]string // BuildVarsFunc adds a custom function to be used to modify build variables // before a route's URL is built. func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - r.buildVarsFunc = f + if r.buildVarsFunc != nil { + // compose the old and new functions + old := r.buildVarsFunc + r.buildVarsFunc = func(m map[string]string) map[string]string { + return f(old(m)) + } + } else { + r.buildVarsFunc = f + } return r } @@ -458,7 +482,8 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { // Here, the routes registered in the subrouter won't be tested if the host // doesn't match. func (r *Route) Subrouter() *Router { - router := &Router{parent: r, strictSlash: r.strictSlash} + // initialize a subrouter with a copy of the parent route's configuration + router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} r.addMatcher(router) return router } @@ -487,8 +512,8 @@ func (r *Route) Subrouter() *Router { // This also works for host variables: // // r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Host("{subdomain}.domain.com"). // Name("article") // // // url.String() will be "http://news.domain.com/articles/technology/42" @@ -496,15 +521,19 @@ func (r *Route) Subrouter() *Router { // "category", "technology", // "id", "42") // +// The scheme of the resulting url will be the first argument that was passed to Schemes: +// +// // url.String() will be "https://example.com" +// r := mux.NewRouter() +// url, err := r.Host("example.com") +// .Schemes("https", "http").URL() +// // All variables defined in the route are required, and their values must // conform to the corresponding patterns. func (r *Route) URL(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil { - return nil, errors.New("mux: route doesn't have a host or path") - } values, err := r.prepareVars(pairs...) if err != nil { return nil, err @@ -516,8 +545,8 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) { return nil, err } scheme = "http" - if s := r.getBuildScheme(); s != "" { - scheme = s + if r.buildScheme != "" { + scheme = r.buildScheme } } if r.regexp.path != nil { @@ -547,7 +576,7 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil || r.regexp.host == nil { + if r.regexp.host == nil { return nil, errors.New("mux: route doesn't have a host") } values, err := r.prepareVars(pairs...) @@ -562,8 +591,8 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) { Scheme: "http", Host: host, } - if s := r.getBuildScheme(); s != "" { - u.Scheme = s + if r.buildScheme != "" { + u.Scheme = r.buildScheme } return u, nil } @@ -575,7 +604,7 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil || r.regexp.path == nil { + if r.regexp.path == nil { return nil, errors.New("mux: route doesn't have a path") } values, err := r.prepareVars(pairs...) @@ -600,7 +629,7 @@ func (r *Route) GetPathTemplate() (string, error) { if r.err != nil { return "", r.err } - if r.regexp == nil || r.regexp.path == nil { + if r.regexp.path == nil { return "", errors.New("mux: route doesn't have a path") } return r.regexp.path.template, nil @@ -614,7 +643,7 @@ func (r *Route) GetPathRegexp() (string, error) { if r.err != nil { return "", r.err } - if r.regexp == nil || r.regexp.path == nil { + if r.regexp.path == nil { return "", errors.New("mux: route does not have a path") } return r.regexp.path.regexp.String(), nil @@ -629,10 +658,10 @@ func (r *Route) GetQueriesRegexp() ([]string, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil || r.regexp.queries == nil { + if r.regexp.queries == nil { return nil, errors.New("mux: route doesn't have queries") } - var queries []string + queries := make([]string, 0, len(r.regexp.queries)) for _, query := range r.regexp.queries { queries = append(queries, query.regexp.String()) } @@ -648,10 +677,10 @@ func (r *Route) GetQueriesTemplates() ([]string, error) { if r.err != nil { return nil, r.err } - if r.regexp == nil || r.regexp.queries == nil { + if r.regexp.queries == nil { return nil, errors.New("mux: route doesn't have queries") } - var queries []string + queries := make([]string, 0, len(r.regexp.queries)) for _, query := range r.regexp.queries { queries = append(queries, query.template) } @@ -683,7 +712,7 @@ func (r *Route) GetHostTemplate() (string, error) { if r.err != nil { return "", r.err } - if r.regexp == nil || r.regexp.host == nil { + if r.regexp.host == nil { return "", errors.New("mux: route doesn't have a host") } return r.regexp.host.template, nil @@ -700,64 +729,8 @@ func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { } func (r *Route) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } if r.buildVarsFunc != nil { m = r.buildVarsFunc(m) } return m } - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// parentRoute allows routes to know about parent host and path definitions. -type parentRoute interface { - getBuildScheme() string - getNamedRoutes() map[string]*Route - getRegexpGroup() *routeRegexpGroup - buildVars(map[string]string) map[string]string -} - -func (r *Route) getBuildScheme() string { - if r.buildScheme != "" { - return r.buildScheme - } - if r.parent != nil { - return r.parent.getBuildScheme() - } - return "" -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Route) getNamedRoutes() map[string]*Route { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - return r.parent.getNamedRoutes() -} - -// getRegexpGroup returns regexp definitions from this route. -func (r *Route) getRegexpGroup() *routeRegexpGroup { - if r.regexp == nil { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - regexp := r.parent.getRegexpGroup() - if regexp == nil { - r.regexp = new(routeRegexpGroup) - } else { - // Copy. - r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, - queries: regexp.queries, - } - } - } - return r.regexp -} diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go index 32ecffde4..5f5c496de 100644 --- a/vendor/github.com/gorilla/mux/test_helpers.go +++ b/vendor/github.com/gorilla/mux/test_helpers.go @@ -15,5 +15,5 @@ import "net/http" // can be set by making a route that captures the required variables, // starting a server and sending the request to that server. func SetURLVars(r *http.Request, val map[string]string) *http.Request { - return setVars(r, val) + return requestWithVars(r, val) } diff --git a/vendor/github.com/gostaticanalysis/analysisutil/LICENSE b/vendor/github.com/gostaticanalysis/analysisutil/LICENSE new file mode 100644 index 000000000..bf7e33db8 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/analysisutil/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 GoStaticAnalysis + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/gostaticanalysis/analysisutil/README.md b/vendor/github.com/gostaticanalysis/analysisutil/README.md new file mode 100644 index 000000000..d8fd3d2a4 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/analysisutil/README.md @@ -0,0 +1,5 @@ +# analysisutil + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/gostaticanalysis/analysisutil)](https://pkg.go.dev/github.com/gostaticanalysis/analysisutil) + +Utilities for x/tools/go/analysis package. diff --git a/vendor/github.com/gostaticanalysis/analysisutil/call.go b/vendor/github.com/gostaticanalysis/analysisutil/call.go new file mode 100644 index 000000000..e3d98d1dc --- /dev/null +++ b/vendor/github.com/gostaticanalysis/analysisutil/call.go @@ -0,0 +1,405 @@ +package analysisutil + +import ( + "go/types" + + "golang.org/x/tools/go/ssa" +) + +// CalledChecker checks a function is called. +// See From and Func. +type CalledChecker struct { + Ignore func(instr ssa.Instruction) bool +} + +// NotIn checks whether receiver's method is called in a function. +// If there is no methods calling at a path from an instruction +// which type is receiver to all return instruction, NotIn returns these instructions. +func (c *CalledChecker) NotIn(f *ssa.Function, receiver types.Type, methods ...*types.Func) []ssa.Instruction { + done := map[ssa.Value]bool{} + var instrs []ssa.Instruction + for _, b := range f.Blocks { + for i, instr := range b.Instrs { + v, _ := instr.(ssa.Value) + if v == nil || done[v] { + continue + } + + if v, _ := v.(*ssa.UnOp); v != nil && done[v.X] { + continue + } + + called, ok := c.From(b, i, receiver, methods...) + if ok && !called { + instrs = append(instrs, instr) + done[v] = true + if v, _ := v.(*ssa.UnOp); v != nil { + done[v.X] = true + } + } + } + } + return instrs +} + +// Func returns true when f is called in the instr. +// If recv is not nil, Func also checks the receiver. +func (c *CalledChecker) Func(instr ssa.Instruction, recv ssa.Value, f *types.Func) bool { + + if c.Ignore != nil && c.Ignore(instr) { + return false + } + + call, ok := instr.(ssa.CallInstruction) + if !ok { + return false + } + + common := call.Common() + if common == nil { + return false + } + + callee := common.StaticCallee() + if callee == nil { + return false + } + + fn, ok := callee.Object().(*types.Func) + if !ok { + return false + } + + if recv != nil && + common.Signature().Recv() != nil && + (len(common.Args) == 0 && recv != nil || common.Args[0] != recv && + !referrer(recv, common.Args[0])) { + return false + } + + return fn == f +} + +func referrer(a, b ssa.Value) bool { + return isReferrerOf(a, b) || isReferrerOf(b, a) +} + +func isReferrerOf(a, b ssa.Value) bool { + if a == nil || b == nil { + return false + } + if b.Referrers() != nil { + brs := *b.Referrers() + + for _, br := range brs { + brv, ok := br.(ssa.Value) + if !ok { + continue + } + if brv == a { + return true + } + } + } + return false +} + +// From checks whether receiver's method is called in an instruction +// which belogns to after i-th instructions, or in succsor blocks of b. +// The first result is above value. +// The second result is whether type of i-th instruction does not much receiver +// or matches with ignore cases. +func (c *CalledChecker) From(b *ssa.BasicBlock, i int, receiver types.Type, methods ...*types.Func) (called, ok bool) { + if b == nil || i < 0 || i >= len(b.Instrs) || + receiver == nil || len(methods) == 0 { + return false, false + } + + v, ok := b.Instrs[i].(ssa.Value) + if !ok { + return false, false + } + + from := &calledFrom{recv: v, fs: methods, ignore: c.Ignore} + + if !from.isRecv(receiver, v.Type()) { + return false, false + } + + if from.ignored() { + return false, false + } + + if from.instrs(b.Instrs[i+1:]) || + from.succs(b) { + return true, true + } + + from.done = nil + if from.storedInInstrs(b.Instrs[i+1:]) || + from.storedInSuccs(b) { + return false, false + } + + return false, true +} + +type calledFrom struct { + recv ssa.Value + fs []*types.Func + done map[*ssa.BasicBlock]bool + ignore func(ssa.Instruction) bool +} + +func (c *calledFrom) ignored() bool { + + switch v := c.recv.(type) { + case *ssa.UnOp: + switch v.X.(type) { + case *ssa.FreeVar, *ssa.Global: + return true + } + } + + refs := c.recv.Referrers() + if refs == nil { + return false + } + + for _, ref := range *refs { + done := map[ssa.Instruction]bool{} + if !c.isOwn(ref) && + ((c.ignore != nil && c.ignore(ref)) || + c.isRet(ref, done) || c.isArg(ref)) { + return true + } + } + + return false +} + +func (c *calledFrom) isOwn(instr ssa.Instruction) bool { + v, ok := instr.(ssa.Value) + if !ok { + return false + } + return v == c.recv +} + +func (c *calledFrom) isRet(instr ssa.Instruction, done map[ssa.Instruction]bool) bool { + if done[instr] { + return false + } + done[instr] = true + + switch instr := instr.(type) { + case *ssa.Return: + return true + case *ssa.MapUpdate: + return c.isRetInRefs(instr.Map, done) + case *ssa.Store: + if instr, _ := instr.Addr.(ssa.Instruction); instr != nil { + return c.isRet(instr, done) + } + return c.isRetInRefs(instr.Addr, done) + case *ssa.FieldAddr: + return c.isRetInRefs(instr.X, done) + case ssa.Value: + return c.isRetInRefs(instr, done) + default: + return false + } +} + +func (c *calledFrom) isRetInRefs(v ssa.Value, done map[ssa.Instruction]bool) bool { + refs := v.Referrers() + if refs == nil { + return false + } + for _, ref := range *refs { + if c.isRet(ref, done) { + return true + } + } + return false +} + +func (c *calledFrom) isArg(instr ssa.Instruction) bool { + + call, ok := instr.(ssa.CallInstruction) + if !ok { + return false + } + + common := call.Common() + if common == nil { + return false + } + + args := common.Args + if common.Signature().Recv() != nil { + args = args[1:] + } + + for i := range args { + if args[i] == c.recv { + return true + } + } + + return false +} + +func (c *calledFrom) instrs(instrs []ssa.Instruction) bool { + for _, instr := range instrs { + for _, f := range c.fs { + if Called(instr, c.recv, f) { + return true + } + } + } + return false +} + +func (c *calledFrom) succs(b *ssa.BasicBlock) bool { + if c.done == nil { + c.done = map[*ssa.BasicBlock]bool{} + } + + if c.done[b] { + return true + } + c.done[b] = true + + if len(b.Succs) == 0 { + return false + } + + for _, s := range b.Succs { + if !c.instrs(s.Instrs) && !c.succs(s) { + return false + } + } + + return true +} + +func (c *calledFrom) storedInInstrs(instrs []ssa.Instruction) bool { + for _, instr := range instrs { + switch instr := instr.(type) { + case *ssa.Store: + if instr.Val == c.recv { + return true + } + } + } + return false +} + +func (c *calledFrom) storedInSuccs(b *ssa.BasicBlock) bool { + if c.done == nil { + c.done = map[*ssa.BasicBlock]bool{} + } + + if c.done[b] { + return true + } + c.done[b] = true + + if len(b.Succs) == 0 { + return false + } + + for _, s := range b.Succs { + if !c.storedInInstrs(s.Instrs) && !c.succs(s) { + return false + } + } + + return true +} + +func (c *calledFrom) isRecv(recv, typ types.Type) bool { + return recv == typ || identical(recv, typ) || + c.isRecvInTuple(recv, typ) || c.isRecvInEmbedded(recv, typ) +} + +func (c *calledFrom) isRecvInTuple(recv, typ types.Type) bool { + tuple, _ := typ.(*types.Tuple) + if tuple == nil { + return false + } + + for i := 0; i < tuple.Len(); i++ { + if c.isRecv(recv, tuple.At(i).Type()) { + return true + } + } + + return false +} + +func (c *calledFrom) isRecvInEmbedded(recv, typ types.Type) bool { + + var st *types.Struct + switch typ := typ.(type) { + case *types.Struct: + st = typ + case *types.Pointer: + return c.isRecvInEmbedded(recv, typ.Elem()) + case *types.Named: + return c.isRecvInEmbedded(recv, typ.Underlying()) + default: + return false + } + + for i := 0; i < st.NumFields(); i++ { + field := st.Field(i) + if !field.Embedded() { + continue + } + + ft := field.Type() + if c.isRecv(recv, ft) { + return true + } + + var ptrOrUnptr types.Type + switch ft := ft.(type) { + case *types.Pointer: + // struct { *T } -> T + ptrOrUnptr = ft.Elem() + default: + // struct { T } -> *T + ptrOrUnptr = types.NewPointer(ft) + } + + if c.isRecv(recv, ptrOrUnptr) { + return true + } + } + + return false +} + +// NotCalledIn checks whether receiver's method is called in a function. +// If there is no methods calling at a path from an instruction +// which type is receiver to all return instruction, NotCalledIn returns these instructions. +func NotCalledIn(f *ssa.Function, receiver types.Type, methods ...*types.Func) []ssa.Instruction { + return new(CalledChecker).NotIn(f, receiver, methods...) +} + +// CalledFrom checks whether receiver's method is called in an instruction +// which belogns to after i-th instructions, or in succsor blocks of b. +// The first result is above value. +// The second result is whether type of i-th instruction does not much receiver +// or matches with ignore cases. +func CalledFrom(b *ssa.BasicBlock, i int, receiver types.Type, methods ...*types.Func) (called, ok bool) { + return new(CalledChecker).From(b, i, receiver, methods...) +} + +// Called returns true when f is called in the instr. +// If recv is not nil, Called also checks the receiver. +func Called(instr ssa.Instruction, recv ssa.Value, f *types.Func) bool { + return new(CalledChecker).Func(instr, recv, f) +} diff --git a/vendor/github.com/gostaticanalysis/analysisutil/diagnostic.go b/vendor/github.com/gostaticanalysis/analysisutil/diagnostic.go new file mode 100644 index 000000000..a911db6f1 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/analysisutil/diagnostic.go @@ -0,0 +1,45 @@ +package analysisutil + +import ( + "go/token" + + "github.com/gostaticanalysis/comment" + "github.com/gostaticanalysis/comment/passes/commentmap" + "golang.org/x/tools/go/analysis" +) + +// ReportWithoutIgnore returns a report function which can set to (analysis.Pass).Report. +// The report function ignores a diagnostic which annotated by ignore comment as the below. +// //lint:ignore Check1[,Check2,...,CheckN] reason +// names is a list of checker names. +// If names was omitted, the report function ignores by pass.Analyzer.Name. +func ReportWithoutIgnore(pass *analysis.Pass, names ...string) func(analysis.Diagnostic) { + cmaps, _ := pass.ResultOf[commentmap.Analyzer].(comment.Maps) + if cmaps == nil { + cmaps = comment.New(pass.Fset, pass.Files) + } + + if len(names) == 0 { + names = []string{pass.Analyzer.Name} + } + + report := pass.Report // original report func + + return func(d analysis.Diagnostic) { + start := pass.Fset.File(d.Pos).Line(d.Pos) + end := start + if d.End != token.NoPos { + end = pass.Fset.File(d.End).Line(d.End) + } + + for l := start; l <= end; l++ { + for _, n := range names { + if cmaps.IgnoreLine(pass.Fset, l, n) { + return + } + } + } + + report(d) + } +} diff --git a/vendor/github.com/gostaticanalysis/analysisutil/file.go b/vendor/github.com/gostaticanalysis/analysisutil/file.go new file mode 100644 index 000000000..2aeca1d9e --- /dev/null +++ b/vendor/github.com/gostaticanalysis/analysisutil/file.go @@ -0,0 +1,18 @@ +package analysisutil + +import ( + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" +) + +// File finds *ast.File in pass.Files by pos. +func File(pass *analysis.Pass, pos token.Pos) *ast.File { + for _, f := range pass.Files { + if f.Pos() <= pos && pos <= f.End() { + return f + } + } + return nil +} diff --git a/vendor/github.com/gostaticanalysis/analysisutil/go.mod b/vendor/github.com/gostaticanalysis/analysisutil/go.mod new file mode 100644 index 000000000..5ca7c62b8 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/analysisutil/go.mod @@ -0,0 +1,8 @@ +module github.com/gostaticanalysis/analysisutil + +go 1.12 + +require ( + github.com/gostaticanalysis/comment v1.4.1 + golang.org/x/tools v0.0.0-20200820010801-b793a1359eac +) diff --git a/vendor/github.com/gostaticanalysis/analysisutil/go.sum b/vendor/github.com/gostaticanalysis/analysisutil/go.sum new file mode 100644 index 000000000..134e67dbd --- /dev/null +++ b/vendor/github.com/gostaticanalysis/analysisutil/go.sum @@ -0,0 +1,37 @@ +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gostaticanalysis/comment v1.3.0 h1:wTVgynbFu8/nz6SGgywA0TcyIoAVsYc7ai/Zp5xNGlw= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1 h1:xHopR5L2lRz6OsjH4R2HG5wRhW9ySl3FsHIvi5pcXwc= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3 h1:2oZsfYnKfYzL4I57uYiRFsUf0bqlLkiuw8nnj3+voUA= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff h1:foic6oVZ4MKltJC6MXzuFZFswE7NCjjtc0Hxbyblawc= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac h1:DugppSxw0LSF8lcjaODPJZoDzq0ElTGskTst3ZaBkHI= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/gostaticanalysis/analysisutil/pkg.go b/vendor/github.com/gostaticanalysis/analysisutil/pkg.go new file mode 100644 index 000000000..b64150d81 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/analysisutil/pkg.go @@ -0,0 +1,49 @@ +package analysisutil + +import ( + "go/types" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// RemoVendor removes vendoring information from import path. +func RemoveVendor(path string) string { + i := strings.Index(path, "vendor/") + if i >= 0 { + return path[i+len("vendor/"):] + } + return path +} + +// LookupFromImports finds an object from import paths. +func LookupFromImports(imports []*types.Package, path, name string) types.Object { + path = RemoveVendor(path) + for i := range imports { + if path == RemoveVendor(imports[i].Path()) { + return imports[i].Scope().Lookup(name) + } + } + return nil +} + +// Imported returns true when the given pass imports the pkg. +func Imported(pkgPath string, pass *analysis.Pass) bool { + fs := pass.Files + if len(fs) == 0 { + return false + } + for _, f := range fs { + for _, i := range f.Imports { + path, err := strconv.Unquote(i.Path.Value) + if err != nil { + continue + } + if RemoveVendor(path) == pkgPath { + return true + } + } + } + return false +} diff --git a/vendor/github.com/gostaticanalysis/analysisutil/ssa.go b/vendor/github.com/gostaticanalysis/analysisutil/ssa.go new file mode 100644 index 000000000..517f6b9b4 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/analysisutil/ssa.go @@ -0,0 +1,146 @@ +package analysisutil + +import ( + "golang.org/x/tools/go/ssa" +) + +// IfInstr returns *ssa.If which is contained in the block b. +// If the block b has not any if instruction, IfInstr returns nil. +func IfInstr(b *ssa.BasicBlock) *ssa.If { + if len(b.Instrs) == 0 { + return nil + } + + ifinstr, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If) + if !ok { + return nil + } + + return ifinstr +} + +// Phi returns phi values which are contained in the block b. +func Phi(b *ssa.BasicBlock) (phis []*ssa.Phi) { + for _, instr := range b.Instrs { + if phi, ok := instr.(*ssa.Phi); ok { + phis = append(phis, phi) + } else { + // no more phi + break + } + } + return +} + +// Returns returns a slice of *ssa.Return in the function. +func Returns(v ssa.Value) []*ssa.Return { + var fn *ssa.Function + switch v := v.(type) { + case *ssa.Function: + fn = v + case *ssa.MakeClosure: + return Returns(v.Fn) + default: + return nil + } + + var rets []*ssa.Return + done := map[*ssa.BasicBlock]bool{} + for _, b := range fn.Blocks { + rets = append(rets, returnsInBlock(b, done)...) + } + return rets +} + +func returnsInBlock(b *ssa.BasicBlock, done map[*ssa.BasicBlock]bool) (rets []*ssa.Return) { + if done[b] { + return + } + done[b] = true + + if len(b.Instrs) != 0 { + switch instr := b.Instrs[len(b.Instrs)-1].(type) { + case *ssa.Return: + rets = append(rets, instr) + } + } + + for _, s := range b.Succs { + rets = append(rets, returnsInBlock(s, done)...) + } + return +} + +// BinOp returns binary operator values which are contained in the block b. +func BinOp(b *ssa.BasicBlock) []*ssa.BinOp { + var binops []*ssa.BinOp + for _, instr := range b.Instrs { + if binop, ok := instr.(*ssa.BinOp); ok { + binops = append(binops, binop) + } + } + return binops +} + +// Used returns an instruction which uses the value in the instructions. +func Used(v ssa.Value, instrs []ssa.Instruction) ssa.Instruction { + if len(instrs) == 0 || v.Referrers() == nil { + return nil + } + + for _, instr := range instrs { + if used := usedInInstr(v, instr); used != nil { + return used + } + } + + return nil +} + +func usedInInstr(v ssa.Value, instr ssa.Instruction) ssa.Instruction { + switch instr := instr.(type) { + case *ssa.MakeClosure: + return usedInClosure(v, instr) + default: + operands := instr.Operands(nil) + for _, x := range operands { + if x != nil && *x == v { + return instr + } + } + } + + switch v := v.(type) { + case *ssa.UnOp: + return usedInInstr(v.X, instr) + } + + return nil +} + +func usedInClosure(v ssa.Value, instr *ssa.MakeClosure) ssa.Instruction { + fn, _ := instr.Fn.(*ssa.Function) + if fn == nil { + return nil + } + + var fv *ssa.FreeVar + for i := range instr.Bindings { + if instr.Bindings[i] == v { + fv = fn.FreeVars[i] + break + } + } + + if fv == nil { + return nil + } + + for _, b := range fn.Blocks { + if used := Used(fv, b.Instrs); used != nil { + return used + } + } + + return nil +} diff --git a/vendor/github.com/gostaticanalysis/analysisutil/ssainspect.go b/vendor/github.com/gostaticanalysis/analysisutil/ssainspect.go new file mode 100644 index 000000000..2f8a16576 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/analysisutil/ssainspect.go @@ -0,0 +1,37 @@ +package analysisutil + +import "golang.org/x/tools/go/ssa" + +// InspectInstr inspects from i-th instruction of start block to succsessor blocks. +func InspectInstr(start *ssa.BasicBlock, i int, f func(i int, instr ssa.Instruction) bool) { + new(instrInspector).block(start, i, f) +} + +type instrInspector struct { + done map[*ssa.BasicBlock]bool +} + +func (ins *instrInspector) block(b *ssa.BasicBlock, i int, f func(i int, instr ssa.Instruction) bool) { + if ins.done == nil { + ins.done = map[*ssa.BasicBlock]bool{} + } + + if b == nil || ins.done[b] || len(b.Instrs) <= i { + return + } + + ins.done[b] = true + ins.instrs(i, b.Instrs[i:], f) + for _, s := range b.Succs { + ins.block(s, 0, f) + } + +} + +func (ins *instrInspector) instrs(offset int, instrs []ssa.Instruction, f func(i int, instr ssa.Instruction) bool) { + for i, instr := range instrs { + if !f(offset+i, instr) { + break + } + } +} diff --git a/vendor/github.com/gostaticanalysis/analysisutil/types.go b/vendor/github.com/gostaticanalysis/analysisutil/types.go new file mode 100644 index 000000000..46b970621 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/analysisutil/types.go @@ -0,0 +1,208 @@ +package analysisutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" +) + +var errType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + +// ImplementsError return whether t implements error interface. +func ImplementsError(t types.Type) bool { + return types.Implements(t, errType) +} + +// ObjectOf returns types.Object by given name in the package. +func ObjectOf(pass *analysis.Pass, pkg, name string) types.Object { + obj := LookupFromImports(pass.Pkg.Imports(), pkg, name) + if obj != nil { + return obj + } + if RemoveVendor(pass.Pkg.Name()) != RemoveVendor(pkg) { + return nil + } + return pass.Pkg.Scope().Lookup(name) +} + +// TypeOf returns types.Type by given name in the package. +// TypeOf accepts pointer types such as *T. +func TypeOf(pass *analysis.Pass, pkg, name string) types.Type { + if name == "" { + return nil + } + + if name[0] == '*' { + obj := TypeOf(pass, pkg, name[1:]) + if obj == nil { + return nil + } + return types.NewPointer(obj) + } + + obj := ObjectOf(pass, pkg, name) + if obj == nil { + return nil + } + + return obj.Type() +} + +// MethodOf returns a method which has given name in the type. +func MethodOf(typ types.Type, name string) *types.Func { + switch typ := typ.(type) { + case *types.Named: + for i := 0; i < typ.NumMethods(); i++ { + if f := typ.Method(i); f.Name() == name { + return f + } + } + case *types.Pointer: + return MethodOf(typ.Elem(), name) + } + return nil +} + +// see: https://github.com/golang/go/issues/19670 +func identical(x, y types.Type) (ret bool) { + defer func() { + r := recover() + switch r := r.(type) { + case string: + if r == "unreachable" { + ret = false + return + } + case nil: + return + } + panic(r) + }() + return types.Identical(x, y) +} + +// Interfaces returns a map of interfaces which are declared in the package. +func Interfaces(pkg *types.Package) map[string]*types.Interface { + ifs := map[string]*types.Interface{} + + for _, n := range pkg.Scope().Names() { + o := pkg.Scope().Lookup(n) + if o != nil { + i, ok := o.Type().Underlying().(*types.Interface) + if ok { + ifs[n] = i + } + } + } + + return ifs +} + +// Structs returns a map of structs which are declared in the package. +func Structs(pkg *types.Package) map[string]*types.Struct { + structs := map[string]*types.Struct{} + + for _, n := range pkg.Scope().Names() { + o := pkg.Scope().Lookup(n) + if o != nil { + s, ok := o.Type().Underlying().(*types.Struct) + if ok { + structs[n] = s + } + } + } + + return structs +} + +// HasField returns whether the struct has the field. +func HasField(s *types.Struct, f *types.Var) bool { + if s == nil || f == nil { + return false + } + + for i := 0; i < s.NumFields(); i++ { + if s.Field(i) == f { + return true + } + } + + return false +} + +func TypesInfo(info ...*types.Info) *types.Info { + if len(info) == 0 { + return nil + } + + var merged types.Info + for i := range info { + mergeTypesInfo(&merged, info[i]) + } + + return &merged +} + +func mergeTypesInfo(i1, i2 *types.Info) { + // Types + if i1.Types == nil && i2.Types != nil { + i1.Types = map[ast.Expr]types.TypeAndValue{} + } + for expr, tv := range i2.Types { + i1.Types[expr] = tv + } + + // Defs + if i1.Defs == nil && i2.Defs != nil { + i1.Defs = map[*ast.Ident]types.Object{} + } + for ident, obj := range i2.Defs { + i1.Defs[ident] = obj + } + + // Uses + if i1.Uses == nil && i2.Uses != nil { + i1.Uses = map[*ast.Ident]types.Object{} + } + for ident, obj := range i2.Uses { + i1.Uses[ident] = obj + } + + // Implicits + if i1.Implicits == nil && i2.Implicits != nil { + i1.Implicits = map[ast.Node]types.Object{} + } + for n, obj := range i2.Implicits { + i1.Implicits[n] = obj + } + + // Selections + if i1.Selections == nil && i2.Selections != nil { + i1.Selections = map[*ast.SelectorExpr]*types.Selection{} + } + for expr, sel := range i2.Selections { + i1.Selections[expr] = sel + } + + // Scopes + if i1.Scopes == nil && i2.Scopes != nil { + i1.Scopes = map[ast.Node]*types.Scope{} + } + for n, s := range i2.Scopes { + i1.Scopes[n] = s + } + + // InitOrder + i1.InitOrder = append(i1.InitOrder, i2.InitOrder...) +} + +// Under returns the most bottom underlying type. +func Under(t types.Type) types.Type { + switch t := t.(type) { + case *types.Named: + return Under(t.Underlying()) + default: + return t + } +} diff --git a/vendor/github.com/gostaticanalysis/comment/LICENSE b/vendor/github.com/gostaticanalysis/comment/LICENSE new file mode 100644 index 000000000..4f7eeff5b --- /dev/null +++ b/vendor/github.com/gostaticanalysis/comment/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Takuya Ueda + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/gostaticanalysis/comment/README.md b/vendor/github.com/gostaticanalysis/comment/README.md new file mode 100644 index 000000000..533555313 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/comment/README.md @@ -0,0 +1,10 @@ +# gostaticanalysis/comment + +[![godoc.org][godoc-badge]][godoc] + +`comment` provides utilities for [ast.CommentMap](https://golang.org/pkg/go/ast/#CommentMap). + + +[godoc]: https://godoc.org/github.com/gostaticanalysis/comment +[godoc-badge]: https://img.shields.io/badge/godoc-reference-4F73B3.svg?style=flat-square&label=%20godoc.org + diff --git a/vendor/github.com/gostaticanalysis/comment/comment.go b/vendor/github.com/gostaticanalysis/comment/comment.go new file mode 100644 index 000000000..2fe67fa96 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/comment/comment.go @@ -0,0 +1,147 @@ +package comment + +import ( + "go/ast" + "go/token" + "strings" +) + +// Maps is slice of ast.CommentMap. +type Maps []ast.CommentMap + +// New creates new a CommentMap slice from specified files. +func New(fset *token.FileSet, files []*ast.File) Maps { + maps := make(Maps, len(files)) + for i := range files { + maps[i] = ast.NewCommentMap(fset, files[i], files[i].Comments) + } + return maps +} + +// Comments returns correspond a CommentGroup slice to specified AST node. +func (maps Maps) Comments(n ast.Node) []*ast.CommentGroup { + for i := range maps { + if maps[i][n] != nil { + return maps[i][n] + } + } + return nil +} + +// CommentsByPos returns correspond a CommentGroup slice to specified pos. +func (maps Maps) CommentsByPos(pos token.Pos) []*ast.CommentGroup { + for i := range maps { + for n, cgs := range maps[i] { + if n.Pos() == pos { + return cgs + } + } + } + return nil +} + +// Annotated checks either specified AST node is annotated or not. +func (maps Maps) Annotated(n ast.Node, annotation string) bool { + for _, cg := range maps.Comments(n) { + if strings.HasPrefix(strings.TrimSpace(cg.Text()), annotation) { + return true + } + } + return false +} + +// Ignore checks either specified AST node is ignored by the check. +// It follows staticcheck style as the below. +// //lint:ignore Check1[,Check2,...,CheckN] reason +func (maps Maps) Ignore(n ast.Node, check string) bool { + for _, cg := range maps.Comments(n) { + if hasIgnoreCheck(cg, check) { + return true + } + } + return false +} + +// IgnorePos checks either specified postion of AST node is ignored by the check. +// It follows staticcheck style as the below. +// //lint:ignore Check1[,Check2,...,CheckN] reason +func (maps Maps) IgnorePos(pos token.Pos, check string) bool { + for _, cg := range maps.CommentsByPos(pos) { + if hasIgnoreCheck(cg, check) { + return true + } + } + return false +} + +// Deprecated: This function does not work with multiple files. +// CommentsByPosLine can be used instead of CommentsByLine. +// +// CommentsByLine returns correspond a CommentGroup slice to specified line. +func (maps Maps) CommentsByLine(fset *token.FileSet, line int) []*ast.CommentGroup { + for i := range maps { + for n, cgs := range maps[i] { + l := fset.File(n.Pos()).Line(n.Pos()) + if l == line { + return cgs + } + } + } + return nil +} + +// CommentsByPosLine returns correspond a CommentGroup slice to specified line. +func (maps Maps) CommentsByPosLine(fset *token.FileSet, pos token.Pos) []*ast.CommentGroup { + f1 := fset.File(pos) + for i := range maps { + for n, cgs := range maps[i] { + f2 := fset.File(n.Pos()) + if f1 != f2 { + // different file + continue + } + + if f1.Line(pos) == f2.Line(n.Pos()) { + return cgs + } + } + } + return nil +} + +// IgnoreLine checks either specified lineof AST node is ignored by the check. +// It follows staticcheck style as the below. +// //lint:ignore Check1[,Check2,...,CheckN] reason +func (maps Maps) IgnoreLine(fset *token.FileSet, line int, check string) bool { + for _, cg := range maps.CommentsByLine(fset, line) { + if hasIgnoreCheck(cg, check) { + return true + } + } + return false +} + +// hasIgnoreCheck returns true if the provided CommentGroup starts with a comment +// of the form "//lint:ignore Check1[,Check2,...,CheckN] reason" and one of the +// checks matches the provided check. The *ast.CommentGroup is checked directly +// rather than using "cg.Text()" because, starting in Go 1.15, the "cg.Text()" call +// no longer returns directive-style comments (see https://github.com/golang/go/issues/37974). +func hasIgnoreCheck(cg *ast.CommentGroup, check string) bool { + if !strings.HasPrefix(cg.List[0].Text, "//") { + return false + } + + s := strings.TrimSpace(cg.List[0].Text[2:]) + txt := strings.Split(s, " ") + if len(txt) < 3 || txt[0] != "lint:ignore" { + return false + } + + checks := strings.Split(txt[1], ",") + for i := range checks { + if check == checks[i] { + return true + } + } + return false +} diff --git a/vendor/github.com/gostaticanalysis/comment/go.mod b/vendor/github.com/gostaticanalysis/comment/go.mod new file mode 100644 index 000000000..275568113 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/comment/go.mod @@ -0,0 +1,8 @@ +module github.com/gostaticanalysis/comment + +go 1.12 + +require ( + github.com/google/go-cmp v0.5.1 + golang.org/x/tools v0.0.0-20200820010801-b793a1359eac +) diff --git a/vendor/github.com/gostaticanalysis/comment/go.sum b/vendor/github.com/gostaticanalysis/comment/go.sum new file mode 100644 index 000000000..425807ce1 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/comment/go.sum @@ -0,0 +1,24 @@ +github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac h1:DugppSxw0LSF8lcjaODPJZoDzq0ElTGskTst3ZaBkHI= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/gostaticanalysis/comment/passes/commentmap/commentmap.go b/vendor/github.com/gostaticanalysis/comment/passes/commentmap/commentmap.go new file mode 100644 index 000000000..9266d9895 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/comment/passes/commentmap/commentmap.go @@ -0,0 +1,20 @@ +package commentmap + +import ( + "reflect" + + "github.com/gostaticanalysis/comment" + "golang.org/x/tools/go/analysis" +) + +var Analyzer = &analysis.Analyzer{ + Name: "commentmap", + Doc: "create comment map", + Run: run, + RunDespiteErrors: true, + ResultType: reflect.TypeOf(comment.Maps{}), +} + +func run(pass *analysis.Pass) (interface{}, error) { + return comment.New(pass.Fset, pass.Files), nil +} diff --git a/vendor/github.com/gostaticanalysis/forcetypeassert/.reviewdog.yml b/vendor/github.com/gostaticanalysis/forcetypeassert/.reviewdog.yml new file mode 100644 index 000000000..2e243ff73 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/forcetypeassert/.reviewdog.yml @@ -0,0 +1,8 @@ +runner: + golint: + cmd: golint ./... + errorformat: + - "%f:%l:%c: %m" + level: warning + govet: + cmd: go vet -all . diff --git a/vendor/github.com/gostaticanalysis/forcetypeassert/LICENSE b/vendor/github.com/gostaticanalysis/forcetypeassert/LICENSE new file mode 100644 index 000000000..bf7e33db8 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/forcetypeassert/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 GoStaticAnalysis + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/gostaticanalysis/forcetypeassert/README.md b/vendor/github.com/gostaticanalysis/forcetypeassert/README.md new file mode 100644 index 000000000..517f69400 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/forcetypeassert/README.md @@ -0,0 +1,17 @@ +# forcetypeassert + +[![godoc.org][godoc-badge]][godoc] + +`forcetypeassert` finds type assertions which did forcely such as below. + +```go +func f() { + var a interface{} + _ = a.(int) // type assertion must be checked +} +``` + + +[godoc]: https://godoc.org/github.com/gostaticanalysis/forcetypeassert +[godoc-badge]: https://img.shields.io/badge/godoc-reference-4F73B3.svg?style=flat-square&label=%20godoc.org + diff --git a/vendor/github.com/gostaticanalysis/forcetypeassert/forcetypeassert.go b/vendor/github.com/gostaticanalysis/forcetypeassert/forcetypeassert.go new file mode 100644 index 000000000..cdc49e3b5 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/forcetypeassert/forcetypeassert.go @@ -0,0 +1,67 @@ +package forcetypeassert + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "forcetypeassert", + Doc: Doc, + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +const Doc = "forcetypeassert is finds type assertions which did forcely such as below." + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.AssignStmt)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.AssignStmt: + if !hasTypeAssertion(n.Rhs) { + return + } + // if right hand has 2 or more values, assign statement can't assert boolean value which describes type assertion is succeeded + if len(n.Rhs) > 1 { + pass.Reportf(n.Pos(), "right hand must be only type assertion") + return + } + if len(n.Lhs) == 2 { + return + } + + tae, ok := n.Rhs[0].(*ast.TypeAssertExpr) + if !ok { + pass.Reportf(n.Pos(), "right hand is not TypeAssertion") + return + } + if tae.Type == nil { + return + } + pass.Reportf(n.Pos(), "type assertion must be checked") + } + }) + + return nil, nil +} + +func hasTypeAssertion(exprs []ast.Expr) bool { + for _, node := range exprs { + _, ok := node.(*ast.TypeAssertExpr) + if ok { + return true + } + } + return false +} diff --git a/vendor/github.com/gostaticanalysis/forcetypeassert/go.mod b/vendor/github.com/gostaticanalysis/forcetypeassert/go.mod new file mode 100644 index 000000000..32b9c14a8 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/forcetypeassert/go.mod @@ -0,0 +1,5 @@ +module github.com/gostaticanalysis/forcetypeassert + +go 1.12 + +require golang.org/x/tools v0.0.0-20190321232350-e250d351ecad diff --git a/vendor/github.com/gostaticanalysis/forcetypeassert/go.sum b/vendor/github.com/gostaticanalysis/forcetypeassert/go.sum new file mode 100644 index 000000000..7a25fbce3 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/forcetypeassert/go.sum @@ -0,0 +1,6 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad h1:tYrC3aF7wTeS1noni7wCGu94xeMVu0dxOdFufzx/VM8= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= diff --git a/vendor/github.com/gostaticanalysis/nilerr/LICENSE b/vendor/github.com/gostaticanalysis/nilerr/LICENSE new file mode 100644 index 000000000..bf7e33db8 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/nilerr/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 GoStaticAnalysis + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/gostaticanalysis/nilerr/README.md b/vendor/github.com/gostaticanalysis/nilerr/README.md new file mode 100644 index 000000000..d6b4acf8b --- /dev/null +++ b/vendor/github.com/gostaticanalysis/nilerr/README.md @@ -0,0 +1,41 @@ +# nilerr + +[![pkg.go.dev][gopkg-badge]][gopkg] + +`nilerr` finds code which returns nil even though it checks that error is not nil. + +```go +func f() error { + err := do() + if err != nil { + return nil // miss + } +} +``` + +`nilerr` also finds code which returns error even though it checks that error is nil. + +```go +func f() error { + err := do() + if err == nil { + return err // miss + } +} +``` + +`nilerr` ignores code which has a miss with ignore comment. + +```go +func f() error { + err := do() + if err != nil { + //lint:ignore nilerr reason + return nil // ignore + } +} +``` + + +[gopkg]: https://pkg.go.dev/github.com/gostaticanalysis/nilerr +[gopkg-badge]: https://pkg.go.dev/badge/github.com/gostaticanalysis/nilerr?status.svg diff --git a/vendor/github.com/gostaticanalysis/nilerr/go.mod b/vendor/github.com/gostaticanalysis/nilerr/go.mod new file mode 100644 index 000000000..d4ed85c98 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/nilerr/go.mod @@ -0,0 +1,8 @@ +module github.com/gostaticanalysis/nilerr + +go 1.15 + +require ( + github.com/gostaticanalysis/comment v1.4.1 + golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6 +) diff --git a/vendor/github.com/gostaticanalysis/nilerr/go.sum b/vendor/github.com/gostaticanalysis/nilerr/go.sum new file mode 100644 index 000000000..93690fa9e --- /dev/null +++ b/vendor/github.com/gostaticanalysis/nilerr/go.sum @@ -0,0 +1,34 @@ +github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gostaticanalysis/comment v1.4.1 h1:xHopR5L2lRz6OsjH4R2HG5wRhW9ySl3FsHIvi5pcXwc= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6 h1:rbvTkL9AkFts1cgI78+gG6Yu1pwaqX6hjSJAatB78E4= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/gostaticanalysis/nilerr/nilerr.go b/vendor/github.com/gostaticanalysis/nilerr/nilerr.go new file mode 100644 index 000000000..787a9e1e9 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/nilerr/nilerr.go @@ -0,0 +1,291 @@ +package nilerr + +import ( + "fmt" + "go/token" + "go/types" + + "github.com/gostaticanalysis/comment" + "github.com/gostaticanalysis/comment/passes/commentmap" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" +) + +var Analyzer = &analysis.Analyzer{ + Name: "nilerr", + Doc: Doc, + Run: run, + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + commentmap.Analyzer, + }, +} + +const Doc = "nilerr checks returning nil when err is not nil" + +func run(pass *analysis.Pass) (interface{}, error) { + funcs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs + cmaps := pass.ResultOf[commentmap.Analyzer].(comment.Maps) + + reportFail := func(v ssa.Value, ret *ssa.Return, format string) { + pos := ret.Pos() + line := getNodeLineNumber(pass, ret) + errLines := getValueLineNumbers(pass, v) + if !cmaps.IgnoreLine(pass.Fset, line, "nilerr") { + var errLineText string + if len(errLines) == 1 { + errLineText = fmt.Sprintf("line %d", errLines[0]) + } else { + errLineText = fmt.Sprintf("lines %v", errLines) + } + pass.Reportf(pos, format, errLineText) + } + } + + for i := range funcs { + for _, b := range funcs[i].Blocks { + if v := binOpErrNil(b, token.NEQ); v != nil { + if ret := isReturnNil(b.Succs[0]); ret != nil { + if !usesErrorValue(b.Succs[0], v) { + reportFail(v, ret, "error is not nil (%s) but it returns nil") + } + } + } else if v := binOpErrNil(b, token.EQL); v != nil { + if len(b.Succs[0].Preds) == 1 { // if there are multiple conditions, this may be false positive + if ret := isReturnError(b.Succs[0], v); ret != nil { + reportFail(v, ret, "error is nil (%s) but it returns error") + } + } + } + + } + } + + return nil, nil +} + +func getValueLineNumbers(pass *analysis.Pass, v ssa.Value) []int { + if phi, ok := v.(*ssa.Phi); ok { + result := make([]int, 0, len(phi.Edges)) + for _, edge := range phi.Edges { + result = append(result, getValueLineNumbers(pass, edge)...) + } + return result + } + + value := v + if extract, ok := value.(*ssa.Extract); ok { + value = extract.Tuple + } + + pos := value.Pos() + return []int{pass.Fset.File(pos).Line(pos)} +} + +func getNodeLineNumber(pass *analysis.Pass, node ssa.Node) int { + pos := node.Pos() + return pass.Fset.File(pos).Line(pos) +} + +var errType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + +func binOpErrNil(b *ssa.BasicBlock, op token.Token) ssa.Value { + if len(b.Instrs) == 0 { + return nil + } + + ifinst, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If) + if !ok { + return nil + } + + binop, ok := ifinst.Cond.(*ssa.BinOp) + if !ok { + return nil + } + + if binop.Op != op { + return nil + } + + if !types.Implements(binop.X.Type(), errType) { + return nil + } + + if !types.Implements(binop.Y.Type(), errType) { + return nil + } + + xIsConst, yIsConst := isConst(binop.X), isConst(binop.Y) + switch { + case !xIsConst && yIsConst: // err != nil or err == nil + return binop.X + case xIsConst && !yIsConst: // nil != err or nil == err + return binop.Y + } + + return nil +} + +func isConst(v ssa.Value) bool { + _, ok := v.(*ssa.Const) + return ok +} + +func isReturnNil(b *ssa.BasicBlock) *ssa.Return { + if len(b.Instrs) == 0 { + return nil + } + + ret, ok := b.Instrs[len(b.Instrs)-1].(*ssa.Return) + if !ok { + return nil + } + + errorReturnValues := 0 + for _, res := range ret.Results { + if !types.Implements(res.Type(), errType) { + continue + } + + errorReturnValues++ + v, ok := res.(*ssa.Const) + if !ok { + return nil + } + + if !v.IsNil() { + return nil + } + } + + if errorReturnValues == 0 { + return nil + } + + return ret +} + +func isReturnError(b *ssa.BasicBlock, errVal ssa.Value) *ssa.Return { + if len(b.Instrs) == 0 { + return nil + } + + ret, ok := b.Instrs[len(b.Instrs)-1].(*ssa.Return) + if !ok { + return nil + } + + for _, v := range ret.Results { + if v == errVal { + return ret + } + } + + return nil +} + +func usesErrorValue(b *ssa.BasicBlock, errVal ssa.Value) bool { + for _, instr := range b.Instrs { + if callInstr, ok := instr.(*ssa.Call); ok { + for _, arg := range callInstr.Call.Args { + if isUsedInValue(arg, errVal) { + return true + } + + sliceArg, ok := arg.(*ssa.Slice) + if ok { + if isUsedInSlice(sliceArg, errVal) { + return true + } + } + } + } + } + return false +} + +type ReferrersHolder interface { + Referrers() *[]ssa.Instruction +} + +var _ ReferrersHolder = (ssa.Node)(nil) +var _ ReferrersHolder = (ssa.Value)(nil) + +func isUsedInSlice(sliceArg *ssa.Slice, errVal ssa.Value) bool { + var valueBuf [10]*ssa.Value + operands := sliceArg.Operands(valueBuf[:0]) + + var valuesToInspect []ssa.Value + addValueForInspection := func(value ssa.Value) { + if value != nil { + valuesToInspect = append(valuesToInspect, value) + } + } + + var nodesToInspect []ssa.Node + visitedNodes := map[ssa.Node]bool{} + addNodeForInspection := func(node ssa.Node) { + if !visitedNodes[node] { + visitedNodes[node] = true + nodesToInspect = append(nodesToInspect, node) + } + } + addReferrersForInspection := func(h ReferrersHolder) { + if h == nil { + return + } + + referrers := h.Referrers() + if referrers == nil { + return + } + + for _, r := range *referrers { + if node, ok := r.(ssa.Node); ok { + addNodeForInspection(node) + } + } + } + + for _, operand := range operands { + addReferrersForInspection(*operand) + addValueForInspection(*operand) + } + + for i := 0; i < len(nodesToInspect); i++ { + switch node := nodesToInspect[i].(type) { + case *ssa.IndexAddr: + addReferrersForInspection(node) + case *ssa.Store: + addValueForInspection(node.Val) + } + } + + for _, value := range valuesToInspect { + if isUsedInValue(value, errVal) { + return true + } + } + return false +} + +func isUsedInValue(value, lookedFor ssa.Value) bool { + if value == lookedFor { + return true + } + + switch value := value.(type) { + case *ssa.ChangeInterface: + return isUsedInValue(value.X, lookedFor) + case *ssa.MakeInterface: + return isUsedInValue(value.X, lookedFor) + case *ssa.Call: + if value.Call.IsInvoke() { + return isUsedInValue(value.Call.Value, lookedFor) + } + } + + return false +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore new file mode 100644 index 000000000..826caa390 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore @@ -0,0 +1,204 @@ +# Created by .ignore support plugin (hsz.mobi) +### Go template +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +### Windows template +# Windows image file caches +Thumbs.db +ehthumbs.db + +# Folder config file +Desktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msm +*.msp + +# Windows shortcuts +*.lnk +### Kate template +# Swap Files # +.*.kate-swp +.swp.* +### SublimeText template +# cache files for sublime text +*.tmlanguage.cache +*.tmPreferences.cache +*.stTheme.cache + +# workspace files are user-specific +*.sublime-workspace + +# project files should be checked into the repository, unless a significant +# proportion of contributors will probably not be using SublimeText +# *.sublime-project + +# sftp configuration file +sftp-config.json +### Linux template +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea +.idea/tasks.xml +.idea/dictionaries +.idea/vcs.xml +.idea/jsLibraryMappings.xml + +# Sensitive or high-churn files: +.idea/dataSources.ids +.idea/dataSources.xml +.idea/dataSources.local.xml +.idea/sqlDataSources.xml +.idea/dynamic.xml +.idea/uiDesigner.xml + +# Gradle: +.idea/gradle.xml +.idea/libraries + +# Mongo Explorer plugin: +.idea/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties +### Xcode template +# Xcode +# +# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore + +## Build generated +build/ +DerivedData/ + +## Various settings +*.pbxuser +!default.pbxuser +*.mode1v3 +!default.mode1v3 +*.mode2v3 +!default.mode2v3 +*.perspectivev3 +!default.perspectivev3 +xcuserdata/ + +## Other +*.moved-aside +*.xccheckout +*.xcscmblueprint +### Eclipse template + +.metadata +bin/ +tmp/ +*.tmp +*.bak +*.swp +*~.nib +local.properties +.settings/ +.loadpath +.recommenders + +# Eclipse Core +.project + +# External tool builders +.externalToolBuilders/ + +# Locally stored "Eclipse launch configurations" +*.launch + +# PyDev specific (Python IDE for Eclipse) +*.pydevproject + +# CDT-specific (C/C++ Development Tooling) +.cproject + +# JDT-specific (Eclipse Java Development Tools) +.classpath + +# Java annotation processor (APT) +.factorypath + +# PDT-specific (PHP Development Tools) +.buildpath + +# sbteclipse plugin +.target + +# Tern plugin +.tern-project + +# TeXlipse plugin +.texlipse + +# STS (Spring Tool Suite) +.springBeans + +# Code Recommenders +.recommenders/ + + +coverage.txt + +#vendor +vendor/ + +.envrc \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml new file mode 100644 index 000000000..fc198d882 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: go +go: + - 1.13.x + - 1.14.x + - 1.15.x + +env: + global: + - GO111MODULE=on + +script: + - make test + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md new file mode 100644 index 000000000..6eeb7e2dc --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md @@ -0,0 +1,51 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +Types of changes: +- `Added` for new features. +- `Changed` for changes in existing functionality. +- `Deprecated` for soon-to-be removed features. +- `Removed` for now removed features. +- `Fixed` for any bug fixes. +- `Security` in case of vulnerabilities. + +## [Unreleased] + +### Added + +- [#223](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/223) Add go-kit logging middleware - [adrien-f](https://github.com/adrien-f) + +## [v1.1.0] - 2019-09-12 +### Added +- [#226](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/226) Support for go modules. +- [#221](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/221) logging/zap add support for gRPC LoggerV2 - [kush-patel-hs](https://github.com/kush-patel-hs) +- [#181](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/181) Rate Limit support - [ceshihao](https://github.com/ceshihao) +- [#161](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/161) Retry on server stream call - [lonnblad](https://github.com/lonnblad) +- [#152](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/152) Exponential backoff functions - [polyfloyd](https://github.com/polyfloyd) +- [#147](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/147) Jaeger support for ctxtags extraction - [vporoshok](https://github.com/vporoshok) +- [#184](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/184) ctxTags identifies if the call was sampled + +### Deprecated +- [#201](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/201) `golang.org/x/net/context` - [houz42](https://github.com/houz42) +- [#183](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/183) Documentation Generation in favour of . + +### Fixed +- [172](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/172) Passing ctx into retry and recover - [johanbrandhorst](https://github.com/johanbrandhorst) +- Numerious documentation fixes. + +## v1.0.0 - 2018-05-08 +### Added +- grpc_auth +- grpc_ctxtags +- grpc_zap +- grpc_logrus +- grpc_opentracing +- grpc_retry +- grpc_validator +- grpc_recovery + +[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.1.0...HEAD +[v1.1.0]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...v1.1.0 diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md new file mode 100644 index 000000000..dd52ab893 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md @@ -0,0 +1,20 @@ +# Contributing + +We would love to have people submit pull requests and help make `grpc-ecosystem/go-grpc-middleware` even better 👍. + +Fork, then clone the repo: + +```bash +git clone git@github.com:your-username/go-grpc-middleware.git +``` + +Before checking in please run the following: + +```bash +make all +``` + +This will `vet`, `fmt`, regenerate documentation and run all tests. + + +Push to your fork and open a pull request. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE new file mode 100644 index 000000000..b2b065037 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md new file mode 100644 index 000000000..814e15517 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md @@ -0,0 +1,86 @@ +# Go gRPC Middleware + +[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware.svg?branch=master)](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware) +[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-middleware)](https://goreportcard.com/report/github.com/grpc-ecosystem/go-grpc-middleware) +[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware) +[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/?badge) +[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware) +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) +[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status) +[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE) + +[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities. + +## Middleware + +[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for +Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) +that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client around the user call. It is a perfect way to implement +common patterns: auth, logging, message, validation, retries or monitoring. + +These are generic building blocks that make it easy to build multiple microservices easily. +The purpose of this repository is to act as a go-to point for such reusable functionality. It contains +some of them itself, but also will link to useful external repos. + +`grpc_middleware` itself provides support for chaining interceptors, here's an example: + +```go +import "github.com/grpc-ecosystem/go-grpc-middleware" + +myServer := grpc.NewServer( + grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( + grpc_recovery.StreamServerInterceptor(), + grpc_ctxtags.StreamServerInterceptor(), + grpc_opentracing.StreamServerInterceptor(), + grpc_prometheus.StreamServerInterceptor, + grpc_zap.StreamServerInterceptor(zapLogger), + grpc_auth.StreamServerInterceptor(myAuthFunction), + )), + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( + grpc_recovery.UnaryServerInterceptor(), + grpc_ctxtags.UnaryServerInterceptor(), + grpc_opentracing.UnaryServerInterceptor(), + grpc_prometheus.UnaryServerInterceptor, + grpc_zap.UnaryServerInterceptor(zapLogger), + grpc_auth.UnaryServerInterceptor(myAuthFunction), + )), +) +``` + +## Interceptors + +*Please send a PR to add new interceptors or middleware to this list* + +#### Auth + * [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware + +#### Logging + * [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body + * [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers. + * [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers. + * [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers. + * [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe). + +#### Monitoring + * [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware + * [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors + * [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags + +#### Client + * [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware + +#### Server + * [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options + * [`grpc_recovery`](recovery/) - turn panics into gRPC errors + * [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter + + +## Status + +This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io). + +Additional tooling will be added, and contributions are welcome. + +## License + +`go-grpc-middleware` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/auth.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/auth.go new file mode 100644 index 000000000..a7e2890ee --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/auth.go @@ -0,0 +1,68 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_auth + +import ( + "context" + + "github.com/grpc-ecosystem/go-grpc-middleware" + "google.golang.org/grpc" +) + +// AuthFunc is the pluggable function that performs authentication. +// +// The passed in `Context` will contain the gRPC metadata.MD object (for header-based authentication) and +// the peer.Peer information that can contain transport-based credentials (e.g. `credentials.AuthInfo`). +// +// The returned context will be propagated to handlers, allowing user changes to `Context`. However, +// please make sure that the `Context` returned is a child `Context` of the one passed in. +// +// If error is returned, its `grpc.Code()` will be returned to the user as well as the verbatim message. +// Please make sure you use `codes.Unauthenticated` (lacking auth) and `codes.PermissionDenied` +// (authed, but lacking perms) appropriately. +type AuthFunc func(ctx context.Context) (context.Context, error) + +// ServiceAuthFuncOverride allows a given gRPC service implementation to override the global `AuthFunc`. +// +// If a service implements the AuthFuncOverride method, it takes precedence over the `AuthFunc` method, +// and will be called instead of AuthFunc for all method invocations within that service. +type ServiceAuthFuncOverride interface { + AuthFuncOverride(ctx context.Context, fullMethodName string) (context.Context, error) +} + +// UnaryServerInterceptor returns a new unary server interceptors that performs per-request auth. +func UnaryServerInterceptor(authFunc AuthFunc) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + var newCtx context.Context + var err error + if overrideSrv, ok := info.Server.(ServiceAuthFuncOverride); ok { + newCtx, err = overrideSrv.AuthFuncOverride(ctx, info.FullMethod) + } else { + newCtx, err = authFunc(ctx) + } + if err != nil { + return nil, err + } + return handler(newCtx, req) + } +} + +// StreamServerInterceptor returns a new unary server interceptors that performs per-request auth. +func StreamServerInterceptor(authFunc AuthFunc) grpc.StreamServerInterceptor { + return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + var newCtx context.Context + var err error + if overrideSrv, ok := srv.(ServiceAuthFuncOverride); ok { + newCtx, err = overrideSrv.AuthFuncOverride(stream.Context(), info.FullMethod) + } else { + newCtx, err = authFunc(stream.Context()) + } + if err != nil { + return err + } + wrapped := grpc_middleware.WrapServerStream(stream) + wrapped.WrappedContext = newCtx + return handler(srv, wrapped) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/doc.go new file mode 100644 index 000000000..0550f023e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/doc.go @@ -0,0 +1,20 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +`grpc_auth` a generic server-side auth middleware for gRPC. + +Server Side Auth Middleware + +It allows for easy assertion of `:authorization` headers in gRPC calls, be it HTTP Basic auth, or +OAuth2 Bearer tokens. + +The middleware takes a user-customizable `AuthFunc`, which can be customized to verify and extract +auth information from the request. The extracted information can be put in the `context.Context` of +handlers downstream for retrieval. + +It also allows for per-service implementation overrides of `AuthFunc`. See `ServiceAuthFuncOverride`. + +Please see examples for simple examples of use. +*/ +package grpc_auth diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/metadata.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/metadata.go new file mode 100644 index 000000000..d386fcaf5 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/auth/metadata.go @@ -0,0 +1,37 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_auth + +import ( + "context" + "strings" + + "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + headerAuthorize = "authorization" +) + +// AuthFromMD is a helper function for extracting the :authorization header from the gRPC metadata of the request. +// +// It expects the `:authorization` header to be of a certain scheme (e.g. `basic`, `bearer`), in a +// case-insensitive format (see rfc2617, sec 1.2). If no such authorization is found, or the token +// is of wrong scheme, an error with gRPC status `Unauthenticated` is returned. +func AuthFromMD(ctx context.Context, expectedScheme string) (string, error) { + val := metautils.ExtractIncoming(ctx).Get(headerAuthorize) + if val == "" { + return "", status.Errorf(codes.Unauthenticated, "Request unauthenticated with "+expectedScheme) + } + splits := strings.SplitN(val, " ", 2) + if len(splits) < 2 { + return "", status.Errorf(codes.Unauthenticated, "Bad authorization string") + } + if !strings.EqualFold(splits[0], expectedScheme) { + return "", status.Errorf(codes.Unauthenticated, "Request unauthenticated with "+expectedScheme) + } + return splits[1], nil +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go new file mode 100644 index 000000000..ea3738b89 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go @@ -0,0 +1,120 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Server Interceptor chaining middleware. + +package grpc_middleware + +import ( + "context" + + "google.golang.org/grpc" +) + +// ChainUnaryServer creates a single interceptor out of a chain of many interceptors. +// +// Execution is done in left-to-right order, including passing of context. +// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three +// will see context changes of one and two. +func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor { + n := len(interceptors) + + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler { + return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) { + return currentInter(currentCtx, currentReq, info, currentHandler) + } + } + + chainedHandler := handler + for i := n - 1; i >= 0; i-- { + chainedHandler = chainer(interceptors[i], chainedHandler) + } + + return chainedHandler(ctx, req) + } +} + +// ChainStreamServer creates a single interceptor out of a chain of many interceptors. +// +// Execution is done in left-to-right order, including passing of context. +// For example ChainUnaryServer(one, two, three) will execute one before two before three. +// If you want to pass context between interceptors, use WrapServerStream. +func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor { + n := len(interceptors) + + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler { + return func(currentSrv interface{}, currentStream grpc.ServerStream) error { + return currentInter(currentSrv, currentStream, info, currentHandler) + } + } + + chainedHandler := handler + for i := n - 1; i >= 0; i-- { + chainedHandler = chainer(interceptors[i], chainedHandler) + } + + return chainedHandler(srv, ss) + } +} + +// ChainUnaryClient creates a single interceptor out of a chain of many interceptors. +// +// Execution is done in left-to-right order, including passing of context. +// For example ChainUnaryClient(one, two, three) will execute one before two before three. +func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor { + n := len(interceptors) + + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker { + return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { + return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...) + } + } + + chainedInvoker := invoker + for i := n - 1; i >= 0; i-- { + chainedInvoker = chainer(interceptors[i], chainedInvoker) + } + + return chainedInvoker(ctx, method, req, reply, cc, opts...) + } +} + +// ChainStreamClient creates a single interceptor out of a chain of many interceptors. +// +// Execution is done in left-to-right order, including passing of context. +// For example ChainStreamClient(one, two, three) will execute one before two before three. +func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor { + n := len(interceptors) + + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer { + return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) { + return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...) + } + } + + chainedStreamer := streamer + for i := n - 1; i >= 0; i-- { + chainedStreamer = chainer(interceptors[i], chainedStreamer) + } + + return chainedStreamer(ctx, desc, cc, method, opts...) + } +} + +// Chain creates a single interceptor out of a chain of many interceptors. +// +// WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors. +// Basically syntactic sugar. +func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption { + return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...)) +} + +// WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors. +// Basically syntactic sugar. +func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption { + return grpc.StreamInterceptor(ChainStreamServer(interceptors...)) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go new file mode 100644 index 000000000..718e10046 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go @@ -0,0 +1,69 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +`grpc_middleware` is a collection of gRPC middleware packages: interceptors, helpers and tools. + +Middleware + +gRPC is a fantastic RPC middleware, which sees a lot of adoption in the Golang world. However, the +upstream gRPC codebase is relatively bare bones. + +This package, and most of its child packages provides commonly needed middleware for gRPC: +client-side interceptors for retires, server-side interceptors for input validation and auth, +functions for chaining said interceptors, metadata convenience methods and more. + +Chaining + +By default, gRPC doesn't allow one to have more than one interceptor either on the client nor on +the server side. `grpc_middleware` provides convenient chaining methods + +Simple way of turning a multiple interceptors into a single interceptor. Here's an example for +server chaining: + + myServer := grpc.NewServer( + grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)), + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary)), + ) + +These interceptors will be executed from left to right: logging, monitoring and auth. + +Here's an example for client side chaining: + + clientConn, err = grpc.Dial( + address, + grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(monitoringClientUnary, retryUnary)), + grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(monitoringClientStream, retryStream)), + ) + client = pb_testproto.NewTestServiceClient(clientConn) + resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"}) + +These interceptors will be executed from left to right: monitoring and then retry logic. + +The retry interceptor will call every interceptor that follows it whenever when a retry happens. + +Writing Your Own + +Implementing your own interceptor is pretty trivial: there are interfaces for that. But the interesting +bit exposing common data to handlers (and other middleware), similarly to HTTP Middleware design. +For example, you may want to pass the identity of the caller from the auth interceptor all the way +to the handling function. + +For example, a client side interceptor example for auth looks like: + + func FakeAuthUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + newCtx := context.WithValue(ctx, "user_id", "john@example.com") + return handler(newCtx, req) + } + +Unfortunately, it's not as easy for streaming RPCs. These have the `context.Context` embedded within +the `grpc.ServerStream` object. To pass values through context, a wrapper (`WrappedServerStream`) is +needed. For example: + + func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + newStream := grpc_middleware.WrapServerStream(stream) + newStream.WrappedContext = context.WithValue(ctx, "user_id", "john@example.com") + return handler(srv, newStream) + } +*/ +package grpc_middleware diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod new file mode 100644 index 000000000..7dc62e5f7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod @@ -0,0 +1,22 @@ +module github.com/grpc-ecosystem/go-grpc-middleware + +require ( + github.com/go-kit/kit v0.9.0 + github.com/go-logfmt/logfmt v0.4.0 // indirect + github.com/go-stack/stack v1.8.0 // indirect + github.com/gogo/protobuf v1.3.2 + github.com/golang/protobuf v1.3.3 + github.com/opentracing/opentracing-go v1.1.0 + github.com/pkg/errors v0.8.1 // indirect + github.com/sirupsen/logrus v1.4.2 + github.com/stretchr/testify v1.4.0 + go.uber.org/atomic v1.4.0 // indirect + go.uber.org/multierr v1.1.0 // indirect + go.uber.org/zap v1.10.0 + golang.org/x/net v0.0.0-20201021035429-f5854403a974 + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be + google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215 // indirect + google.golang.org/grpc v1.29.1 +) + +go 1.14 diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.sum b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.sum new file mode 100644 index 000000000..ee522cdf6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.sum @@ -0,0 +1,122 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215 h1:0Uz5jLJQioKgVozXa1gzGbzYxbb/rhQEVvSWxzw5oUs= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile new file mode 100644 index 000000000..b18d2d2bb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile @@ -0,0 +1,17 @@ +SHELL=/bin/bash + +GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/) + +all: vet fmt test + +fmt: + go fmt $(GOFILES_NOVENDOR) + +vet: + # do not check lostcancel, they are intentional. + go vet -lostcancel=false $(GOFILES_NOVENDOR) + +test: vet + ./scripts/test_all.sh + +.PHONY: all test diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go new file mode 100644 index 000000000..ad35f09a8 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go @@ -0,0 +1,44 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils" +) + +// BackoffLinear is very simple: it waits for a fixed period of time between calls. +func BackoffLinear(waitBetween time.Duration) BackoffFunc { + return func(attempt uint) time.Duration { + return waitBetween + } +} + +// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). +// +// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. +func BackoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) BackoffFunc { + return func(attempt uint) time.Duration { + return backoffutils.JitterUp(waitBetween, jitterFraction) + } +} + +// BackoffExponential produces increasing intervals for each attempt. +// +// The scalar is multiplied times 2 raised to the current attempt. So the first +// retry with a scalar of 100ms is 100ms, while the 5th attempt would be 1.6s. +func BackoffExponential(scalar time.Duration) BackoffFunc { + return func(attempt uint) time.Duration { + return scalar * time.Duration(backoffutils.ExponentBase2(attempt)) + } +} + +// BackoffExponentialWithJitter creates an exponential backoff like +// BackoffExponential does, but adds jitter. +func BackoffExponentialWithJitter(scalar time.Duration, jitterFraction float64) BackoffFunc { + return func(attempt uint) time.Duration { + return backoffutils.JitterUp(scalar*time.Duration(backoffutils.ExponentBase2(attempt)), jitterFraction) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go new file mode 100644 index 000000000..afd924a14 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go @@ -0,0 +1,25 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +`grpc_retry` provides client-side request retry logic for gRPC. + +Client-Side Request Retry Interceptor + +It allows for automatic retry, inside the generated gRPC code of requests based on the gRPC status +of the reply. It supports unary (1:1), and server stream (1:n) requests. + +By default the interceptors *are disabled*, preventing accidental use of retries. You can easily +override the number of retries (setting them to more than 0) with a `grpc.ClientOption`, e.g.: + + myclient.Ping(ctx, goodPing, grpc_retry.WithMax(5)) + +Other default options are: retry on `ResourceExhausted` and `Unavailable` gRPC codes, use a 50ms +linear backoff with 10% jitter. + +For chained interceptors, the retry interceptor will call every interceptor that follows it +whenever when a retry happens. + +Please see examples for more advanced use. +*/ +package grpc_retry diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go new file mode 100644 index 000000000..7a633e293 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go @@ -0,0 +1,142 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "context" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + // DefaultRetriableCodes is a set of well known types gRPC codes that should be retri-able. + // + // `ResourceExhausted` means that the user quota, e.g. per-RPC limits, have been reached. + // `Unavailable` means that system is currently unavailable and the client should retry again. + DefaultRetriableCodes = []codes.Code{codes.ResourceExhausted, codes.Unavailable} + + defaultOptions = &options{ + max: 0, // disabled + perCallTimeout: 0, // disabled + includeHeader: true, + codes: DefaultRetriableCodes, + backoffFunc: BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { + return BackoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10)(attempt) + }), + } +) + +// BackoffFunc denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. +type BackoffFunc func(attempt uint) time.Duration + +// BackoffFuncContext denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. The context can be used to extract request scoped metadata and context values. +type BackoffFuncContext func(ctx context.Context, attempt uint) time.Duration + +// Disable disables the retry behaviour on this call, or this interceptor. +// +// Its semantically the same to `WithMax` +func Disable() CallOption { + return WithMax(0) +} + +// WithMax sets the maximum number of retries on this call, or this interceptor. +func WithMax(maxRetries uint) CallOption { + return CallOption{applyFunc: func(o *options) { + o.max = maxRetries + }} +} + +// WithBackoff sets the `BackoffFunc` used to control time between retries. +func WithBackoff(bf BackoffFunc) CallOption { + return CallOption{applyFunc: func(o *options) { + o.backoffFunc = BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { + return bf(attempt) + }) + }} +} + +// WithBackoffContext sets the `BackoffFuncContext` used to control time between retries. +func WithBackoffContext(bf BackoffFuncContext) CallOption { + return CallOption{applyFunc: func(o *options) { + o.backoffFunc = bf + }} +} + +// WithCodes sets which codes should be retried. +// +// Please *use with care*, as you may be retrying non-idempotent calls. +// +// You cannot automatically retry on Cancelled and Deadline, please use `WithPerRetryTimeout` for these. +func WithCodes(retryCodes ...codes.Code) CallOption { + return CallOption{applyFunc: func(o *options) { + o.codes = retryCodes + }} +} + +// WithPerRetryTimeout sets the RPC timeout per call (including initial call) on this call, or this interceptor. +// +// The context.Deadline of the call takes precedence and sets the maximum time the whole invocation +// will take, but WithPerRetryTimeout can be used to limit the RPC time per each call. +// +// For example, with context.Deadline = now + 10s, and WithPerRetryTimeout(3 * time.Seconds), each +// of the retry calls (including the initial one) will have a deadline of now + 3s. +// +// A value of 0 disables the timeout overrides completely and returns to each retry call using the +// parent `context.Deadline`. +// +// Note that when this is enabled, any DeadlineExceeded errors that are propagated up will be retried. +func WithPerRetryTimeout(timeout time.Duration) CallOption { + return CallOption{applyFunc: func(o *options) { + o.perCallTimeout = timeout + }} +} + +type options struct { + max uint + perCallTimeout time.Duration + includeHeader bool + codes []codes.Code + backoffFunc BackoffFuncContext +} + +// CallOption is a grpc.CallOption that is local to grpc_retry. +type CallOption struct { + grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic. + applyFunc func(opt *options) +} + +func reuseOrNewWithCallOptions(opt *options, callOptions []CallOption) *options { + if len(callOptions) == 0 { + return opt + } + optCopy := &options{} + *optCopy = *opt + for _, f := range callOptions { + f.applyFunc(optCopy) + } + return optCopy +} + +func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []CallOption) { + for _, opt := range callOptions { + if co, ok := opt.(CallOption); ok { + retryOptions = append(retryOptions, co) + } else { + grpcOptions = append(grpcOptions, opt) + } + } + return grpcOptions, retryOptions +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go new file mode 100644 index 000000000..62d831201 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go @@ -0,0 +1,329 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "context" + "fmt" + "io" + "sync" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + "golang.org/x/net/trace" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + AttemptMetadataKey = "x-retry-attempty" +) + +// UnaryClientInterceptor returns a new retrying unary client interceptor. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +func UnaryClientInterceptor(optFuncs ...CallOption) grpc.UnaryClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(parentCtx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return invoker(parentCtx, method, req, reply, cc, grpcOpts...) + } + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { + return err + } + callCtx := perCallContext(parentCtx, callOpts, attempt) + lastErr = invoker(callCtx, method, req, reply, cc, grpcOpts...) + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + if lastErr == nil { + return nil + } + logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + if isContextError(lastErr) { + if parentCtx.Err() != nil { + logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) + // its the parent context deadline or cancellation. + return lastErr + } else if callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) + continue + } + } + if !isRetriable(lastErr, callOpts) { + return lastErr + } + } + return lastErr + } +} + +// StreamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +// +// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs +// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams, +// BidiStreams), the retry interceptor will fail the call. +func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(parentCtx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return streamer(parentCtx, desc, cc, method, grpcOpts...) + } + if desc.ClientStreams { + return nil, status.Errorf(codes.Unimplemented, "grpc_retry: cannot retry on ClientStreams, set grpc_retry.Disable()") + } + + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { + return nil, err + } + callCtx := perCallContext(parentCtx, callOpts, 0) + + var newStreamer grpc.ClientStream + newStreamer, lastErr = streamer(callCtx, desc, cc, method, grpcOpts...) + if lastErr == nil { + retryingStreamer := &serverStreamingRetryingStream{ + ClientStream: newStreamer, + callOpts: callOpts, + parentCtx: parentCtx, + streamerCall: func(ctx context.Context) (grpc.ClientStream, error) { + return streamer(ctx, desc, cc, method, grpcOpts...) + }, + } + return retryingStreamer, nil + } + + logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + if isContextError(lastErr) { + if parentCtx.Err() != nil { + logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) + // its the parent context deadline or cancellation. + return nil, lastErr + } else if callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) + continue + } + } + if !isRetriable(lastErr, callOpts) { + return nil, lastErr + } + } + return nil, lastErr + } +} + +// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a +// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish +// a new ClientStream according to the retry policy. +type serverStreamingRetryingStream struct { + grpc.ClientStream + bufferedSends []interface{} // single message that the client can sen + receivedGood bool // indicates whether any prior receives were successful + wasClosedSend bool // indicates that CloseSend was closed + parentCtx context.Context + callOpts *options + streamerCall func(ctx context.Context) (grpc.ClientStream, error) + mu sync.RWMutex +} + +func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) { + s.mu.Lock() + s.ClientStream = clientStream + s.mu.Unlock() +} + +func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ClientStream +} + +func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { + s.mu.Lock() + s.bufferedSends = append(s.bufferedSends, m) + s.mu.Unlock() + return s.getStream().SendMsg(m) +} + +func (s *serverStreamingRetryingStream) CloseSend() error { + s.mu.Lock() + s.wasClosedSend = true + s.mu.Unlock() + return s.getStream().CloseSend() +} + +func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { + return s.getStream().Header() +} + +func (s *serverStreamingRetryingStream) Trailer() metadata.MD { + return s.getStream().Trailer() +} + +func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { + attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr // success or hard failure + } + // We start off from attempt 1, because zeroth was already made on normal SendMsg(). + for attempt := uint(1); attempt < s.callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, s.parentCtx, s.callOpts); err != nil { + return err + } + callCtx := perCallContext(s.parentCtx, s.callOpts, attempt) + newStream, err := s.reestablishStreamAndResendBuffer(callCtx) + if err != nil { + // Retry dial and transport errors of establishing stream as grpc doesn't retry. + if isRetriable(err, s.callOpts) { + continue + } + return err + } + + s.setStream(newStream) + attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) + //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr) + if !attemptRetry { + return lastErr + } + } + return lastErr +} + +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { + s.mu.RLock() + wasGood := s.receivedGood + s.mu.RUnlock() + err := s.getStream().RecvMsg(m) + if err == nil || err == io.EOF { + s.mu.Lock() + s.receivedGood = true + s.mu.Unlock() + return false, err + } else if wasGood { + // previous RecvMsg in the stream succeeded, no retry logic should interfere + return false, err + } + if isContextError(err) { + if s.parentCtx.Err() != nil { + logTrace(s.parentCtx, "grpc_retry parent context error: %v", s.parentCtx.Err()) + return false, err + } else if s.callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(s.parentCtx, "grpc_retry context error from retry call") + return true, err + } + } + return isRetriable(err, s.callOpts), err +} + +func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer( + callCtx context.Context, +) (grpc.ClientStream, error) { + s.mu.RLock() + bufferedSends := s.bufferedSends + s.mu.RUnlock() + newStream, err := s.streamerCall(callCtx) + if err != nil { + logTrace(callCtx, "grpc_retry failed redialing new stream: %v", err) + return nil, err + } + for _, msg := range bufferedSends { + if err := newStream.SendMsg(msg); err != nil { + logTrace(callCtx, "grpc_retry failed resending message: %v", err) + return nil, err + } + } + if err := newStream.CloseSend(); err != nil { + logTrace(callCtx, "grpc_retry failed CloseSend on new stream %v", err) + return nil, err + } + return newStream, nil +} + +func waitRetryBackoff(attempt uint, parentCtx context.Context, callOpts *options) error { + var waitTime time.Duration = 0 + if attempt > 0 { + waitTime = callOpts.backoffFunc(parentCtx, attempt) + } + if waitTime > 0 { + logTrace(parentCtx, "grpc_retry attempt: %d, backoff for %v", attempt, waitTime) + timer := time.NewTimer(waitTime) + select { + case <-parentCtx.Done(): + timer.Stop() + return contextErrToGrpcErr(parentCtx.Err()) + case <-timer.C: + } + } + return nil +} + +func isRetriable(err error, callOpts *options) bool { + errCode := status.Code(err) + if isContextError(err) { + // context errors are not retriable based on user settings. + return false + } + for _, code := range callOpts.codes { + if code == errCode { + return true + } + } + return false +} + +func isContextError(err error) bool { + code := status.Code(err) + return code == codes.DeadlineExceeded || code == codes.Canceled +} + +func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) context.Context { + ctx := parentCtx + if callOpts.perCallTimeout != 0 { + ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout) + } + if attempt > 0 && callOpts.includeHeader { + mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt)) + ctx = mdClone.ToOutgoing(ctx) + } + return ctx +} + +func contextErrToGrpcErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + default: + return status.Error(codes.Unknown, err.Error()) + } +} + +func logTrace(ctx context.Context, format string, a ...interface{}) { + tr, ok := trace.FromContext(ctx) + if !ok { + return + } + tr.LazyPrintf(format, a...) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png new file mode 100644 index 0000000000000000000000000000000000000000..cc8f9a68a9368ca56dab09e6902ae41ecb8356b8 GIT binary patch literal 5088 zcmZ|Tc{Ei2`vCAe3x;e-vX2jlNl|9+lk+JWL6qTqDN!GHjk+C(5ghC|Q z89rp+TE@P8@A&Wc$M1E{ooDX7=b1U@eeXQaGxw31i7o>jCmjF)4ElOn=3p!VH&$k9 z@Kt}PY#$78cXeZR0C@NG+>s*{xXd;33>H00c<_z^VfPD82-M zOW4=V*OkE&)J}%FT3`h2YqrFIfflQ09RNm~vkfcI7zMte3Dh^%rkR6tA+FNiG=De_ z04%BcTIx4LCpPTDZn9dAoZbov8v4dCE&J634Y7Qp&X3^i)B1M(ADWDkSV*iN7kd)> zqd4~Y*H}BDdBJ$Wu0|IuL+0RbEjvkFo(wIsbO_aTEJU62)m;xvI%)0jP3x;w2nU<) z@2u*WC30Bt>TuK+>2UK948xvI_!&&wc(kmn$)(p4^0&uYvw!h_KvchE7vw+xNJ63p z3Va*cnVhjFDE_dqa(r-66Cwh$yDuYa9*+6AtDq3Cwa`yfL=H^(#{~0H2xDMeh>qp- z^0}j7#;SeYqT9-`^VAd*dw##F3SPruUt0PBV_h~NfP4F@M5ydvA!>`oqlOxml8Y8r zzfJG|=l*wk%qpd)pteM8&76R;qWwW9TCpcwP@;TaNh#IBX&G4&UHg$vw7J6|@A&?( zsfCrJb_kZ;Db(IIm_invBB-A-P@PNa5ztNMR`CDXx!apYBH{6GbYWa_$-D%2rihp| ziumC^e*@} z5o=w0)5UGOMz$zNTcdDaT%=!s;j^5_|Ht17GT%whqFOCH|R>^DXD zMAt}!?X}qW_^PT`uTGwxI&3RM$cnB1cIFcXO+WPqH??+8`FYhm@jsCz)QXkD-GBF8Qf68r3Zu%CeRqd`Zn=g=d52GkeC(9`Y}Fe(j(-f zIIZf6-pfd_l$3qrk^7gQy|S@j;}~za9XDL8R95EYEh-vYQPI=WXN2p%kf^|&j_&P6 zs)~ui`m@W+6oct&8ffAfV2nJElL&#djZrO?l{L?@_rQ9^QRmL|>*P?_w5tocu)x63 z*$E_QGTTYa&PjQtX+h1>DmTz`cKK#kSm_*o&!q}?e8m02-kDkU;FAs*spv?EP2xh? z8yP_HzSR|dpMmJ@aw0K})!{!4WbUnUoT(Zi1a+<6{YWx1SL@@Fk^-dM^6=o~KPI%( z+~6B&H-LeG)7VG+SsJ;E%XjyYFVTVXHO#B;RyQ&## z@L;7Qc6O3T9k8(m-`{i6?zlq3#Ke`0w}Q48deMR8w10au>Nc*5R%OW{)g;8;Ru>yV zHc3%`ej-l+AtPqGViPJxq=fkKswl4DzQ$wd$zC@yW~v5aR%&DW!a(+hi2)t0bqkwS z4RKmUf#Y+3Rpm6kax2t7c!wMq$lZ9GI&QpTbXUy;yd#Iti1`&2Q@}2sL0_jh?;y!e zPpvrsaqi)ecI$h26^1xR0Ylv5L)`m{%`{36i~jlfIoLtqWoK{)6ya#0RIm`@k(CzZ z!(P@Jt~FtQ7r$o7!J5vX?J0Qq*b+OP{AS?echqR%5)#~uv-8rCjnyc#J2N8|H(>N! zI}_f@ypq8c#Y^{TiKc2QVJTVX^aSU-|Awj@a;Qv<$VxI_5iKuo@q>NyQc=lz1w8uH z72gdr$&LOsyL_GRvg_QZuUX;?>+4Wh>|_HMr-31)exkDw&&Dp>H<{H9`}2qO@m4Er zr1HWe@Z!LsjJj9g&CNzdJ3FKe&I;9Ozl~SsVT<$*zG_~q`a2b$2pZ_m?Pe&u-psIvNqrE7IY+BJO(JCuY8Qd0kALDo~MNUruFUps0kp?Xp@ZQ>(NTrg)<2ilGn-s%qyqC*?ACXe<52jCC{+^x<` zudRfOGkjvmR+(W$qRFl}Vz1`ZPs#!6<4FNwIfMI>Dr}mg%z>bzfI6MQy+4x!Tb!qW zY3Wcv|2>1#c{AuCgZ7}fQsdBmkLny-jej`7zIwDyt9Sl)sIt9jg4Fi()+IKI+KoGH z(Z>hdYbuXg<;V-dmoCK;hc1jY`b{;eL;y52DQN}}-lqi^46Z`#(a$~(;^buiU9hnT zU3g??hg)6k@2{1bO+=NIeIFX%dD2xSQOCiDprN@3EQ`ljFmZ8Z_Whx1pOVW|RPt0% z%w5DT2LU%w%<>9;P3itR0@{#(#m1KIZrHg?t$*mU^}ox9zsK)@y&NxF|4OJJc4df1 zC^at5WK=1i7S2MO%M=)BcGL0_ddp*Q-Z%v}_<2m%6msvG0R$(gT~M5)MDSBbDP)MP zOt&IqFqos02t$R^9}W)E|1fs<_hYC76;t(#-$?WHo**>%Br`qC8lbq*)B@>@?%*0I@&nN;MQ%13cLLdrt8;La^(5zc-hS1AWflLodNPhs z$(YuJZm?fFIhpkLWr7!LsB{=@^|E2N!kXB!MNM60H$Id@np?5nSVd7s(hayoo()&Y zXg`XpHDCw3-4gtc<1M8gKTx4D=VL~|p?PQ3A#*x4jnV*FUn&GF_GL;(^jtLzx^sp& zK=HKFUCKm>q9rGp)E0Aka(p;WpZ?`FM-qV8wy}tu! z9i6+IwXSHl#g!rNQRrxuN+|X}C^ItVmqVz0P0-qS-P*`Drn*3*P(%bOWP}Z3perz4 zo1Pjwu&$e$c6JIsDE(kpC?W>$N)8HUMC9$piyg~E;InKoV%Q~<8$(~S(7FkOC4)eB z@{6>{L-AiblXqfN{P=B8erZ~dKNpA_Enb2bX%ZU3b<`GHTUyYPPH>-rPmj>r+J=UX z|6teB4T6ppCH>zTEpC}^5sY7Za4^XP(LJ!MvYKj$G8K4Lym4{I4Jm4zK^X70n`th* zNos7LxXkL`#K?$=(tDUXG_=_6un;~pzi3=on+N>*{rrp0Q|gZ$Goj91^k^|&xn#j{ z|J5IIGxQ7_`eU7)be+TH#JNs;`*KT|t1CDJQUUrClNaD-HV1ot>X4eycMZF%!{GG_ z_&Z!)@u9M^osSJhr)_KNEP!V2NN8w)IlXIB(}xBu!r@|8MDo~aXt+hofi0svEGUG6 zCgqIS%e@%d^}XNo-A|G3OmWRjR{9jQErB!dJ%@^nv&K63o9rYqdLa>KEUL;W5~mN1 zj451cQx%TJTSB%-goxN43Ilvh5h0F-iB~z4+3LN=V1afbj!Et4R|k_C9FG|X2XZ!i z?&fl@sgaHl+f$@z3h_QIFR!{faVdAEEjOL|!{6lG8Svh{e*MPKLhpJ2p}yK8peZSd zD4hFREjNcT^3NCYN_R3uktaLzHo@jbz$SOzphvxFPR$=5AHh;HAWJq|@x^O;%#?h< zqTCFi%$w0W4>53!XwiT~k_VdtL@lAXV&QCcMWq0F%s7u%y}cTH6!ogEgk||f1kSgW zmhRwiW_-X$^<=LvbC(2@;e1|F_#i5Jy7_|e+PwY~H49>Y@_;)dZJ!%hmqbM6 zFn|oF`;zx)ctf?^y_K!{%LLV^g46P;RAJ>D4am`o=_Qn#3y_&PRK%kQ|GYrX*T0=@ z;2PGrTLCtRK~n!x10K3X2w5L%*@Aj{*13cA{>zu>Xk<)Zj+don`S8L5TKj>`ZL81t z8x|v^=xveNRS{OOni)T{tO8iIFK`8@#GI5@3kgA8T{#(y^Z3teLmoWv-v_4^t@}5d zL$B8O*gDLH%s@Dk21_hp{$V##?HJ|d^$L@dk2z>K2fQp})cdmJd`Ta|j*HE*O^SzB z#x941NOBbE34$Xk+L=ON_H5++DDiICtKOy(ZwDRR&3uw(Z6lreFU;-V0rJdrY@g$W zRBNeY=>)@yxJi(8#LH-Qm1gI>)2<&AHa1?801GWaw`jO$LFfXSnM}Tju)8gu@twfP zoNsXsjyCM=NZ@2ZBa?u^!BnBnE~qbIVUw4K;pk-d+BHAF^k^@iYgw`V{mqK0w0SG= zS?pfqJwS^eiIfPLHB4_`eRFehX>A*4#KvjR=5=!MlEnQ4IXPHq+whseVvzZIEPd>n*c0HEB^=b!T)9QYs_=a=?8XoCa14=Hoi@)!d!C3hU}x zNiI;A+Gos$QPiTNS-ek9czB-jawL&S(&Fj^5-F^7v;|A{0`nQ^>2pqTv6T()`~rVs zfozjPsVqoRQCZJ-Un6d*4R4v<&B1xyn8;6V z_@5+XbLzAR)tTtNkV~=sx!;hur~c>cV>Wyz?6{lDo6_rS*%QgIK@+5n!#;|{wB)KyB4U|;DLJPP1C@@FzM@~ctV z`mHaSe&=SWiri9zfq#_FkUp?_PmRjf*3^_S zLE;YeaZZM+`O42Pr<>#nI#GKOILrA)zKm3Cy!7h;|7BwR5gE*}62^niPug*NjKT`} z3JP|)qN41kp6 zRe5nq8F4AOo02k$G75^43gE7yq@<=ga_s*k`1rbbx`qA!2~ym}E?|NrQ|-94SX#jjnJm)CTa(>D^oW*K(PRrb$m?jo2A=xdv3 JRcScf`yb{=X{rDK literal 0 HcmV?d00001 diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go new file mode 100644 index 000000000..0da1658bb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go @@ -0,0 +1,78 @@ +package grpc_ctxtags + +import ( + "context" +) + +type ctxMarker struct{} + +var ( + // ctxMarkerKey is the Context value marker used by *all* logging middleware. + // The logging middleware object must interf + ctxMarkerKey = &ctxMarker{} + // NoopTags is a trivial, minimum overhead implementation of Tags for which all operations are no-ops. + NoopTags = &noopTags{} +) + +// Tags is the interface used for storing request tags between Context calls. +// The default implementation is *not* thread safe, and should be handled only in the context of the request. +type Tags interface { + // Set sets the given key in the metadata tags. + Set(key string, value interface{}) Tags + // Has checks if the given key exists. + Has(key string) bool + // Values returns a map of key to values. + // Do not modify the underlying map, please use Set instead. + Values() map[string]interface{} +} + +type mapTags struct { + values map[string]interface{} +} + +func (t *mapTags) Set(key string, value interface{}) Tags { + t.values[key] = value + return t +} + +func (t *mapTags) Has(key string) bool { + _, ok := t.values[key] + return ok +} + +func (t *mapTags) Values() map[string]interface{} { + return t.values +} + +type noopTags struct{} + +func (t *noopTags) Set(key string, value interface{}) Tags { + return t +} + +func (t *noopTags) Has(key string) bool { + return false +} + +func (t *noopTags) Values() map[string]interface{} { + return nil +} + +// Extracts returns a pre-existing Tags object in the Context. +// If the context wasn't set in a tag interceptor, a no-op Tag storage is returned that will *not* be propagated in context. +func Extract(ctx context.Context) Tags { + t, ok := ctx.Value(ctxMarkerKey).(Tags) + if !ok { + return NoopTags + } + + return t +} + +func SetInContext(ctx context.Context, tags Tags) context.Context { + return context.WithValue(ctx, ctxMarkerKey, tags) +} + +func NewTags() Tags { + return &mapTags{values: make(map[string]interface{})} +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go new file mode 100644 index 000000000..960638d0f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go @@ -0,0 +1,22 @@ +/* +`grpc_ctxtags` adds a Tag object to the context that can be used by other middleware to add context about a request. + +Request Context Tags + +Tags describe information about the request, and can be set and used by other middleware, or handlers. Tags are used +for logging and tracing of requests. Tags are populated both upwards, *and* downwards in the interceptor-handler stack. + +You can automatically extract tags (in `grpc.request.`) from request payloads. + +For unary and server-streaming methods, pass in the `WithFieldExtractor` option. For client-streams and bidirectional-streams, you can +use `WithFieldExtractorForInitialReq` which will extract the tags from the first message passed from client to server. +Note the tags will not be modified for subsequent requests, so this option only makes sense when the initial message +establishes the meta-data for the stream. + +If a user doesn't use the interceptors that initialize the `Tags` object, all operations following from an `Extract(ctx)` +will be no-ops. This is to ensure that code doesn't panic if the interceptors weren't used. + +Tags fields are typed, and shallow and should follow the OpenTracing semantics convention: +https://github.com/opentracing/specification/blob/master/semantic_conventions.md +*/ +package grpc_ctxtags diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go new file mode 100644 index 000000000..a4073ab49 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go @@ -0,0 +1,85 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_ctxtags + +import ( + "reflect" +) + +// RequestFieldExtractorFunc is a user-provided function that extracts field information from a gRPC request. +// It is called from tags middleware on arrival of unary request or a server-stream request. +// Keys and values will be added to the context tags of the request. If there are no fields, you should return a nil. +type RequestFieldExtractorFunc func(fullMethod string, req interface{}) map[string]interface{} + +type requestFieldsExtractor interface { + // ExtractRequestFields is a method declared on a Protobuf message that extracts fields from the interface. + // The values from the extracted fields should be set in the appendToMap, in order to avoid allocations. + ExtractRequestFields(appendToMap map[string]interface{}) +} + +// CodeGenRequestFieldExtractor is a function that relies on code-generated functions that export log fields from requests. +// These are usually coming from a protoc-plugin that generates additional information based on custom field options. +func CodeGenRequestFieldExtractor(fullMethod string, req interface{}) map[string]interface{} { + if ext, ok := req.(requestFieldsExtractor); ok { + retMap := make(map[string]interface{}) + ext.ExtractRequestFields(retMap) + if len(retMap) == 0 { + return nil + } + return retMap + } + return nil +} + +// TagBasedRequestFieldExtractor is a function that relies on Go struct tags to export log fields from requests. +// These are usually coming from a protoc-plugin, such as Gogo protobuf. +// +// message Metadata { +// repeated string tags = 1 [ (gogoproto.moretags) = "log_field:\"meta_tags\"" ]; +// } +// +// The tagName is configurable using the tagName variable. Here it would be "log_field". +func TagBasedRequestFieldExtractor(tagName string) RequestFieldExtractorFunc { + return func(fullMethod string, req interface{}) map[string]interface{} { + retMap := make(map[string]interface{}) + reflectMessageTags(req, retMap, tagName) + if len(retMap) == 0 { + return nil + } + return retMap + } +} + +func reflectMessageTags(msg interface{}, existingMap map[string]interface{}, tagName string) { + v := reflect.ValueOf(msg) + // Only deal with pointers to structs. + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return + } + // Deref the pointer get to the struct. + v = v.Elem() + t := v.Type() + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + kind := field.Kind() + // Only recurse down direct pointers, which should only be to nested structs. + if (kind == reflect.Ptr || kind == reflect.Interface) && field.CanInterface() { + reflectMessageTags(field.Interface(), existingMap, tagName) + } + // In case of arrays/slices (repeated fields) go down to the concrete type. + if kind == reflect.Array || kind == reflect.Slice { + if field.Len() == 0 { + continue + } + kind = field.Index(0).Kind() + } + // Only be interested in + if (kind >= reflect.Bool && kind <= reflect.Float64) || kind == reflect.String { + if tag := t.Field(i).Tag.Get(tagName); tag != "" { + existingMap[tag] = field.Interface() + } + } + } + return +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go new file mode 100644 index 000000000..a7ced60f5 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go @@ -0,0 +1,85 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_ctxtags + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/peer" + + "github.com/grpc-ecosystem/go-grpc-middleware" +) + +// UnaryServerInterceptor returns a new unary server interceptors that sets the values for request tags. +func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + o := evaluateOptions(opts) + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + newCtx := newTagsForCtx(ctx) + if o.requestFieldsFunc != nil { + setRequestFieldTags(newCtx, o.requestFieldsFunc, info.FullMethod, req) + } + return handler(newCtx, req) + } +} + +// StreamServerInterceptor returns a new streaming server interceptor that sets the values for request tags. +func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + o := evaluateOptions(opts) + return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + newCtx := newTagsForCtx(stream.Context()) + if o.requestFieldsFunc == nil { + // Short-circuit, don't do the expensive bit of allocating a wrappedStream. + wrappedStream := grpc_middleware.WrapServerStream(stream) + wrappedStream.WrappedContext = newCtx + return handler(srv, wrappedStream) + } + wrapped := &wrappedStream{stream, info, o, newCtx, true} + err := handler(srv, wrapped) + return err + } +} + +// wrappedStream is a thin wrapper around grpc.ServerStream that allows modifying context and extracts log fields from the initial message. +type wrappedStream struct { + grpc.ServerStream + info *grpc.StreamServerInfo + opts *options + // WrappedContext is the wrapper's own Context. You can assign it. + WrappedContext context.Context + initial bool +} + +// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context() +func (w *wrappedStream) Context() context.Context { + return w.WrappedContext +} + +func (w *wrappedStream) RecvMsg(m interface{}) error { + err := w.ServerStream.RecvMsg(m) + // We only do log fields extraction on the single-request of a server-side stream. + if !w.info.IsClientStream || w.opts.requestFieldsFromInitial && w.initial { + w.initial = false + + setRequestFieldTags(w.Context(), w.opts.requestFieldsFunc, w.info.FullMethod, m) + } + return err +} + +func newTagsForCtx(ctx context.Context) context.Context { + t := NewTags() + if peer, ok := peer.FromContext(ctx); ok { + t.Set("peer.address", peer.Addr.String()) + } + return SetInContext(ctx, t) +} + +func setRequestFieldTags(ctx context.Context, f RequestFieldExtractorFunc, fullMethodName string, req interface{}) { + if valMap := f(fullMethodName, req); valMap != nil { + t := Extract(ctx) + for k, v := range valMap { + t.Set("grpc.request."+k, v) + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go new file mode 100644 index 000000000..952775f88 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go @@ -0,0 +1,44 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_ctxtags + +var ( + defaultOptions = &options{ + requestFieldsFunc: nil, + } +) + +type options struct { + requestFieldsFunc RequestFieldExtractorFunc + requestFieldsFromInitial bool +} + +func evaluateOptions(opts []Option) *options { + optCopy := &options{} + *optCopy = *defaultOptions + for _, o := range opts { + o(optCopy) + } + return optCopy +} + +type Option func(*options) + +// WithFieldExtractor customizes the function for extracting log fields from protobuf messages, for +// unary and server-streamed methods only. +func WithFieldExtractor(f RequestFieldExtractorFunc) Option { + return func(o *options) { + o.requestFieldsFunc = f + } +} + +// WithFieldExtractorForInitialReq customizes the function for extracting log fields from protobuf messages, +// for all unary and streaming methods. For client-streams and bidirectional-streams, the tags will be +// extracted from the first message from the client. +func WithFieldExtractorForInitialReq(f RequestFieldExtractorFunc) Option { + return func(o *options) { + o.requestFieldsFunc = f + o.requestFieldsFromInitial = true + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/client_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/client_interceptors.go new file mode 100644 index 000000000..2e9cafd2b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/client_interceptors.go @@ -0,0 +1,143 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_opentracing + +import ( + "context" + "io" + "sync" + + "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" +) + +// UnaryClientInterceptor returns a new unary client interceptor for OpenTracing. +func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { + o := evaluateOptions(opts) + return func(parentCtx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if o.filterOutFunc != nil && !o.filterOutFunc(parentCtx, method) { + return invoker(parentCtx, method, req, reply, cc, opts...) + } + newCtx, clientSpan := newClientSpanFromContext(parentCtx, o.tracer, method) + if o.unaryRequestHandlerFunc != nil { + o.unaryRequestHandlerFunc(clientSpan, req) + } + err := invoker(newCtx, method, req, reply, cc, opts...) + finishClientSpan(clientSpan, err) + return err + } +} + +// StreamClientInterceptor returns a new streaming client interceptor for OpenTracing. +func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + o := evaluateOptions(opts) + return func(parentCtx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + if o.filterOutFunc != nil && !o.filterOutFunc(parentCtx, method) { + return streamer(parentCtx, desc, cc, method, opts...) + } + newCtx, clientSpan := newClientSpanFromContext(parentCtx, o.tracer, method) + clientStream, err := streamer(newCtx, desc, cc, method, opts...) + if err != nil { + finishClientSpan(clientSpan, err) + return nil, err + } + return &tracedClientStream{ClientStream: clientStream, clientSpan: clientSpan}, nil + } +} + +// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a +// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish +// a new ClientStream according to the retry policy. +type tracedClientStream struct { + grpc.ClientStream + mu sync.Mutex + alreadyFinished bool + clientSpan opentracing.Span +} + +func (s *tracedClientStream) Header() (metadata.MD, error) { + h, err := s.ClientStream.Header() + if err != nil { + s.finishClientSpan(err) + } + return h, err +} + +func (s *tracedClientStream) SendMsg(m interface{}) error { + err := s.ClientStream.SendMsg(m) + if err != nil { + s.finishClientSpan(err) + } + return err +} + +func (s *tracedClientStream) CloseSend() error { + err := s.ClientStream.CloseSend() + s.finishClientSpan(err) + return err +} + +func (s *tracedClientStream) RecvMsg(m interface{}) error { + err := s.ClientStream.RecvMsg(m) + if err != nil { + s.finishClientSpan(err) + } + return err +} + +func (s *tracedClientStream) finishClientSpan(err error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.alreadyFinished { + finishClientSpan(s.clientSpan, err) + s.alreadyFinished = true + } +} + +// ClientAddContextTags returns a context with specified opentracing tags, which +// are used by UnaryClientInterceptor/StreamClientInterceptor when creating a +// new span. +func ClientAddContextTags(ctx context.Context, tags opentracing.Tags) context.Context { + return context.WithValue(ctx, clientSpanTagKey{}, tags) +} + +type clientSpanTagKey struct{} + +func newClientSpanFromContext(ctx context.Context, tracer opentracing.Tracer, fullMethodName string) (context.Context, opentracing.Span) { + var parentSpanCtx opentracing.SpanContext + if parent := opentracing.SpanFromContext(ctx); parent != nil { + parentSpanCtx = parent.Context() + } + opts := []opentracing.StartSpanOption{ + opentracing.ChildOf(parentSpanCtx), + ext.SpanKindRPCClient, + grpcTag, + } + if tagx := ctx.Value(clientSpanTagKey{}); tagx != nil { + if opt, ok := tagx.(opentracing.StartSpanOption); ok { + opts = append(opts, opt) + } + } + clientSpan := tracer.StartSpan(fullMethodName, opts...) + // Make sure we add this to the metadata of the call, so it gets propagated: + md := metautils.ExtractOutgoing(ctx).Clone() + if err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, metadataTextMap(md)); err != nil { + grpclog.Infof("grpc_opentracing: failed serializing trace information: %v", err) + } + ctxWithMetadata := md.ToOutgoing(ctx) + return opentracing.ContextWithSpan(ctxWithMetadata, clientSpan), clientSpan +} + +func finishClientSpan(clientSpan opentracing.Span, err error) { + if err != nil && err != io.EOF { + ext.Error.Set(clientSpan, true) + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + clientSpan.Finish() +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/doc.go new file mode 100644 index 000000000..7a58efc22 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/doc.go @@ -0,0 +1,22 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +`grpc_opentracing` adds OpenTracing + +OpenTracing Interceptors + +These are both client-side and server-side interceptors for OpenTracing. They are a provider-agnostic, with backends +such as Zipkin, or Google Stackdriver Trace. + +For a service that sends out requests and receives requests, you *need* to use both, otherwise downstream requests will +not have the appropriate requests propagated. + +All server-side spans are tagged with grpc_ctxtags information. + +For more information see: +http://opentracing.io/documentation/ +https://github.com/opentracing/specification/blob/master/semantic_conventions.md + +*/ +package grpc_opentracing diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/id_extract.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/id_extract.go new file mode 100644 index 000000000..bc7302e3a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/id_extract.go @@ -0,0 +1,82 @@ +package grpc_opentracing + +import ( + "strings" + + grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" + opentracing "github.com/opentracing/opentracing-go" + "google.golang.org/grpc/grpclog" +) + +const ( + TagTraceId = "trace.traceid" + TagSpanId = "trace.spanid" + TagSampled = "trace.sampled" + jaegerNotSampledFlag = "0" +) + +// injectOpentracingIdsToTags writes trace data to ctxtags. +// This is done in an incredibly hacky way, because the public-facing interface of opentracing doesn't give access to +// the TraceId and SpanId of the SpanContext. Only the Tracer's Inject/Extract methods know what these are. +// Most tracers have them encoded as keys with 'traceid' and 'spanid': +// https://github.com/openzipkin/zipkin-go-opentracing/blob/594640b9ef7e5c994e8d9499359d693c032d738c/propagation_ot.go#L29 +// https://github.com/opentracing/basictracer-go/blob/1b32af207119a14b1b231d451df3ed04a72efebf/propagation_ot.go#L26 +// Jaeger from Uber use one-key schema with next format '{trace-id}:{span-id}:{parent-span-id}:{flags}' +// https://www.jaegertracing.io/docs/client-libraries/#trace-span-identity +// Datadog uses keys ending with 'trace-id' and 'parent-id' (for span) by default: +// https://github.com/DataDog/dd-trace-go/blob/v1/ddtrace/tracer/textmap.go#L77 +func injectOpentracingIdsToTags(traceHeaderName string, span opentracing.Span, tags grpc_ctxtags.Tags) { + if err := span.Tracer().Inject(span.Context(), opentracing.HTTPHeaders, + &tagsCarrier{Tags: tags, traceHeaderName: traceHeaderName}); err != nil { + grpclog.Infof("grpc_opentracing: failed extracting trace info into ctx %v", err) + } +} + +// tagsCarrier is a really hacky way of +type tagsCarrier struct { + grpc_ctxtags.Tags + traceHeaderName string +} + +func (t *tagsCarrier) Set(key, val string) { + key = strings.ToLower(key) + + if key == t.traceHeaderName { + parts := strings.Split(val, ":") + if len(parts) == 4 { + t.Tags.Set(TagTraceId, parts[0]) + t.Tags.Set(TagSpanId, parts[1]) + + if parts[3] != jaegerNotSampledFlag { + t.Tags.Set(TagSampled, "true") + } else { + t.Tags.Set(TagSampled, "false") + } + + return + } + } + + if strings.Contains(key, "traceid") { + t.Tags.Set(TagTraceId, val) // this will most likely be base-16 (hex) encoded + } + + if strings.Contains(key, "spanid") && !strings.Contains(strings.ToLower(key), "parent") { + t.Tags.Set(TagSpanId, val) // this will most likely be base-16 (hex) encoded + } + + if strings.Contains(key, "sampled") { + switch val { + case "true", "false": + t.Tags.Set(TagSampled, val) + } + } + + if strings.HasSuffix(key, "trace-id") { + t.Tags.Set(TagTraceId, val) + } + + if strings.HasSuffix(key, "parent-id") { + t.Tags.Set(TagSpanId, val) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/metadata.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/metadata.go new file mode 100644 index 000000000..3649fb5b8 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/metadata.go @@ -0,0 +1,50 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_opentracing + +import ( + "encoding/base64" + "strings" + + "google.golang.org/grpc/metadata" +) + +const ( + binHdrSuffix = "-bin" +) + +// metadataTextMap extends a metadata.MD to be an opentracing textmap +type metadataTextMap metadata.MD + +// Set is a opentracing.TextMapReader interface that extracts values. +func (m metadataTextMap) Set(key, val string) { + // gRPC allows for complex binary values to be written. + encodedKey, encodedVal := encodeKeyValue(key, val) + // The metadata object is a multimap, and previous values may exist, but for opentracing headers, we do not append + // we just override. + m[encodedKey] = []string{encodedVal} +} + +// ForeachKey is a opentracing.TextMapReader interface that extracts values. +func (m metadataTextMap) ForeachKey(callback func(key, val string) error) error { + for k, vv := range m { + for _, v := range vv { + if err := callback(k, v); err != nil { + return err + } + } + } + return nil +} + +// encodeKeyValue encodes key and value qualified for transmission via gRPC. +// note: copy pasted from private values of grpc.metadata +func encodeKeyValue(k, v string) (string, string) { + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + val := base64.StdEncoding.EncodeToString([]byte(v)) + v = string(val) + } + return k, v +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/options.go new file mode 100644 index 000000000..430fe56a1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/options.go @@ -0,0 +1,89 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_opentracing + +import ( + "context" + + "github.com/opentracing/opentracing-go" +) + +var ( + defaultOptions = &options{ + filterOutFunc: nil, + tracer: nil, + } +) + +// FilterFunc allows users to provide a function that filters out certain methods from being traced. +// +// If it returns false, the given request will not be traced. +type FilterFunc func(ctx context.Context, fullMethodName string) bool + +// UnaryRequestHandlerFunc is a custom request handler +type UnaryRequestHandlerFunc func(span opentracing.Span, req interface{}) + +// OpNameFunc is a func that allows custom operation names instead of the gRPC method. +type OpNameFunc func(method string) string + +type options struct { + filterOutFunc FilterFunc + tracer opentracing.Tracer + traceHeaderName string + unaryRequestHandlerFunc UnaryRequestHandlerFunc + opNameFunc OpNameFunc +} + +func evaluateOptions(opts []Option) *options { + optCopy := &options{} + *optCopy = *defaultOptions + for _, o := range opts { + o(optCopy) + } + if optCopy.tracer == nil { + optCopy.tracer = opentracing.GlobalTracer() + } + if optCopy.traceHeaderName == "" { + optCopy.traceHeaderName = "uber-trace-id" + } + return optCopy +} + +type Option func(*options) + +// WithFilterFunc customizes the function used for deciding whether a given call is traced or not. +func WithFilterFunc(f FilterFunc) Option { + return func(o *options) { + o.filterOutFunc = f + } +} + +// WithTraceHeaderName customizes the trace header name where trace metadata passed with requests. +// Default one is `uber-trace-id` +func WithTraceHeaderName(name string) Option { + return func(o *options) { + o.traceHeaderName = name + } +} + +// WithTracer sets a custom tracer to be used for this middleware, otherwise the opentracing.GlobalTracer is used. +func WithTracer(tracer opentracing.Tracer) Option { + return func(o *options) { + o.tracer = tracer + } +} + +// WithUnaryRequestHandlerFunc sets a custom handler for the request +func WithUnaryRequestHandlerFunc(f UnaryRequestHandlerFunc) Option { + return func(o *options) { + o.unaryRequestHandlerFunc = f + } +} + +// WithOpName customizes the trace Operation name +func WithOpName(f OpNameFunc) Option { + return func(o *options) { + o.opNameFunc = f + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/server_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/server_interceptors.go new file mode 100644 index 000000000..186b1084f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/server_interceptors.go @@ -0,0 +1,98 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_opentracing + +import ( + "context" + + "github.com/grpc-ecosystem/go-grpc-middleware" + "github.com/grpc-ecosystem/go-grpc-middleware/tags" + "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" +) + +var ( + grpcTag = opentracing.Tag{Key: string(ext.Component), Value: "gRPC"} +) + +// UnaryServerInterceptor returns a new unary server interceptor for OpenTracing. +func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + o := evaluateOptions(opts) + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + if o.filterOutFunc != nil && !o.filterOutFunc(ctx, info.FullMethod) { + return handler(ctx, req) + } + opName := info.FullMethod + if o.opNameFunc != nil { + opName = o.opNameFunc(info.FullMethod) + } + newCtx, serverSpan := newServerSpanFromInbound(ctx, o.tracer, o.traceHeaderName, opName) + if o.unaryRequestHandlerFunc != nil { + o.unaryRequestHandlerFunc(serverSpan, req) + } + resp, err := handler(newCtx, req) + finishServerSpan(ctx, serverSpan, err) + return resp, err + } +} + +// StreamServerInterceptor returns a new streaming server interceptor for OpenTracing. +func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + o := evaluateOptions(opts) + return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if o.filterOutFunc != nil && !o.filterOutFunc(stream.Context(), info.FullMethod) { + return handler(srv, stream) + } + opName := info.FullMethod + if o.opNameFunc != nil { + opName = o.opNameFunc(info.FullMethod) + } + newCtx, serverSpan := newServerSpanFromInbound(stream.Context(), o.tracer, o.traceHeaderName, opName) + wrappedStream := grpc_middleware.WrapServerStream(stream) + wrappedStream.WrappedContext = newCtx + err := handler(srv, wrappedStream) + finishServerSpan(newCtx, serverSpan, err) + return err + } +} + +func newServerSpanFromInbound(ctx context.Context, tracer opentracing.Tracer, traceHeaderName, opName string) (context.Context, opentracing.Span) { + md := metautils.ExtractIncoming(ctx) + parentSpanContext, err := tracer.Extract(opentracing.HTTPHeaders, metadataTextMap(md)) + if err != nil && err != opentracing.ErrSpanContextNotFound { + grpclog.Infof("grpc_opentracing: failed parsing trace information: %v", err) + } + + serverSpan := tracer.StartSpan( + opName, + // this is magical, it attaches the new span to the parent parentSpanContext, and creates an unparented one if empty. + ext.RPCServerOption(parentSpanContext), + grpcTag, + ) + + injectOpentracingIdsToTags(traceHeaderName, serverSpan, grpc_ctxtags.Extract(ctx)) + return opentracing.ContextWithSpan(ctx, serverSpan), serverSpan +} + +func finishServerSpan(ctx context.Context, serverSpan opentracing.Span, err error) { + // Log context information + tags := grpc_ctxtags.Extract(ctx) + for k, v := range tags.Values() { + // Don't tag errors, log them instead. + if vErr, ok := v.(error); ok { + serverSpan.LogKV(k, vErr.Error()) + } else { + serverSpan.SetTag(k, v) + } + } + if err != nil { + ext.Error.Set(serverSpan, true) + serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + serverSpan.Finish() +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go new file mode 100644 index 000000000..4e69a6305 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go @@ -0,0 +1,28 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +Backoff Helper Utilities + +Implements common backoff features. +*/ +package backoffutils + +import ( + "math/rand" + "time" +) + +// JitterUp adds random jitter to the duration. +// +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +func JitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} + +// ExponentBase2 computes 2^(a-1) where a >= 1. If a is 0, the result is 0. +func ExponentBase2(a uint) uint { + return (1 << a) >> 1 +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go new file mode 100644 index 000000000..1ed9bb499 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go @@ -0,0 +1,19 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +Package `metautils` provides convenience functions for dealing with gRPC metadata.MD objects inside +Context handlers. + +While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) +they are hard to use. + +The majority of functions center around the NiceMD, which is a convenience wrapper around metadata.MD. For example +the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context +metadata. + + nmd := metautils.ExtractIncoming(serverCtx).Clone(":authorization", ":custom") + clientCtx := nmd.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx) +*/ + +package metautils diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go new file mode 100644 index 000000000..1c60585dd --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go @@ -0,0 +1,126 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package metautils + +import ( + "context" + "strings" + + "google.golang.org/grpc/metadata" +) + +// NiceMD is a convenience wrapper definiting extra functions on the metadata. +type NiceMD metadata.MD + +// ExtractIncoming extracts an inbound metadata from the server-side context. +// +// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns +// a new empty NiceMD. +func ExtractIncoming(ctx context.Context) NiceMD { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return NiceMD(metadata.Pairs()) + } + return NiceMD(md) +} + +// ExtractOutgoing extracts an outbound metadata from the client-side context. +// +// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns +// a new empty NiceMD. +func ExtractOutgoing(ctx context.Context) NiceMD { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + return NiceMD(metadata.Pairs()) + } + return NiceMD(md) +} + +// Clone performs a *deep* copy of the metadata.MD. +// +// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted +// all keys get copied. +func (m NiceMD) Clone(copiedKeys ...string) NiceMD { + newMd := NiceMD(metadata.Pairs()) + for k, vv := range m { + found := false + if len(copiedKeys) == 0 { + found = true + } else { + for _, allowedKey := range copiedKeys { + if strings.EqualFold(allowedKey, k) { + found = true + break + } + } + } + if !found { + continue + } + newMd[k] = make([]string, len(vv)) + copy(newMd[k], vv) + } + return NiceMD(newMd) +} + +// ToOutgoing sets the given NiceMD as a client-side context for dispatching. +func (m NiceMD) ToOutgoing(ctx context.Context) context.Context { + return metadata.NewOutgoingContext(ctx, metadata.MD(m)) +} + +// ToIncoming sets the given NiceMD as a server-side context for dispatching. +// +// This is mostly useful in ServerInterceptors.. +func (m NiceMD) ToIncoming(ctx context.Context) context.Context { + return metadata.NewIncomingContext(ctx, metadata.MD(m)) +} + +// Get retrieves a single value from the metadata. +// +// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set, +// an empty string is returned. +// +// The function is binary-key safe. +func (m NiceMD) Get(key string) string { + k := strings.ToLower(key) + vv, ok := m[k] + if !ok { + return "" + } + return vv[0] +} + +// Del retrieves a single value from the metadata. +// +// It works analogously to http.Header.Del, deleting all values if they exist. +// +// The function is binary-key safe. + +func (m NiceMD) Del(key string) NiceMD { + k := strings.ToLower(key) + delete(m, k) + return m +} + +// Set sets the given value in a metadata. +// +// It works analogously to http.Header.Set, overwriting all previous metadata values. +// +// The function is binary-key safe. +func (m NiceMD) Set(key string, value string) NiceMD { + k := strings.ToLower(key) + m[k] = []string{value} + return m +} + +// Add retrieves a single value from the metadata. +// +// It works analogously to http.Header.Add, as it appends to any existing values associated with key. +// +// The function is binary-key safe. +func (m NiceMD) Add(key string, value string) NiceMD { + k := strings.ToLower(key) + m[k] = append(m[k], value) + return m +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go new file mode 100644 index 000000000..05ccfb3f2 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go @@ -0,0 +1,30 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_middleware + +import ( + "context" + + "google.golang.org/grpc" +) + +// WrappedServerStream is a thin wrapper around grpc.ServerStream that allows modifying context. +type WrappedServerStream struct { + grpc.ServerStream + // WrappedContext is the wrapper's own Context. You can assign it. + WrappedContext context.Context +} + +// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context() +func (w *WrappedServerStream) Context() context.Context { + return w.WrappedContext +} + +// WrapServerStream returns a ServerStream that has the ability to overwrite context. +func WrapServerStream(stream grpc.ServerStream) *WrappedServerStream { + if existing, ok := stream.(*WrappedServerStream); ok { + return existing + } + return &WrappedServerStream{ServerStream: stream, WrappedContext: stream.Context()} +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore new file mode 100644 index 000000000..2233cff9d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore @@ -0,0 +1,201 @@ +#vendor +vendor/ + +# Created by .ignore support plugin (hsz.mobi) +coverage.txt +### Go template +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +### Windows template +# Windows image file caches +Thumbs.db +ehthumbs.db + +# Folder config file +Desktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msm +*.msp + +# Windows shortcuts +*.lnk +### Kate template +# Swap Files # +.*.kate-swp +.swp.* +### SublimeText template +# cache files for sublime text +*.tmlanguage.cache +*.tmPreferences.cache +*.stTheme.cache + +# workspace files are user-specific +*.sublime-workspace + +# project files should be checked into the repository, unless a significant +# proportion of contributors will probably not be using SublimeText +# *.sublime-project + +# sftp configuration file +sftp-config.json +### Linux template +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea +.idea/tasks.xml +.idea/dictionaries +.idea/vcs.xml +.idea/jsLibraryMappings.xml + +# Sensitive or high-churn files: +.idea/dataSources.ids +.idea/dataSources.xml +.idea/dataSources.local.xml +.idea/sqlDataSources.xml +.idea/dynamic.xml +.idea/uiDesigner.xml + +# Gradle: +.idea/gradle.xml +.idea/libraries + +# Mongo Explorer plugin: +.idea/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties +### Xcode template +# Xcode +# +# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore + +## Build generated +build/ +DerivedData/ + +## Various settings +*.pbxuser +!default.pbxuser +*.mode1v3 +!default.mode1v3 +*.mode2v3 +!default.mode2v3 +*.perspectivev3 +!default.perspectivev3 +xcuserdata/ + +## Other +*.moved-aside +*.xccheckout +*.xcscmblueprint +### Eclipse template + +.metadata +bin/ +tmp/ +*.tmp +*.bak +*.swp +*~.nib +local.properties +.settings/ +.loadpath +.recommenders + +# Eclipse Core +.project + +# External tool builders +.externalToolBuilders/ + +# Locally stored "Eclipse launch configurations" +*.launch + +# PyDev specific (Python IDE for Eclipse) +*.pydevproject + +# CDT-specific (C/C++ Development Tooling) +.cproject + +# JDT-specific (Eclipse Java Development Tools) +.classpath + +# Java annotation processor (APT) +.factorypath + +# PDT-specific (PHP Development Tools) +.buildpath + +# sbteclipse plugin +.target + +# Tern plugin +.tern-project + +# TeXlipse plugin +.texlipse + +# STS (Spring Tool Suite) +.springBeans + +# Code Recommenders +.recommenders/ + diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml new file mode 100644 index 000000000..2a845b96a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml @@ -0,0 +1,25 @@ +sudo: false +language: go +# * github.com/grpc/grpc-go still supports go1.6 +# - When we drop support for go1.6 we can remove golang.org/x/net/context +# below as it is part of the Go std library since go1.7 +# * github.com/prometheus/client_golang already requires at least go1.7 since +# September 2017 +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - master + +install: + - go get github.com/prometheus/client_golang/prometheus + - go get google.golang.org/grpc + - go get golang.org/x/net/context + - go get github.com/stretchr/testify +script: + - make test + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md new file mode 100644 index 000000000..19a8059e1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md @@ -0,0 +1,24 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.2.0](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0) - 2018-06-04 + +### Added + +* Provide metrics object as `prometheus.Collector`, for conventional metric registration. +* Support non-default/global Prometheus registry. +* Allow configuring counters with `prometheus.CounterOpts`. + +### Changed + +* Remove usage of deprecated `grpc.Code()`. +* Remove usage of deprecated `grpc.Errorf` and replace with `status.Errorf`. + +--- + +This changelog was started with version `v1.2.0`, for earlier versions refer to the respective [GitHub releases](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases). diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE new file mode 100644 index 000000000..b2b065037 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md new file mode 100644 index 000000000..499c58355 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md @@ -0,0 +1,247 @@ +# Go gRPC Interceptors for Prometheus monitoring + +[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus) +[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus) +[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus) +[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge) +[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus) +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) + +[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients. + +A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus). + +## Interceptors + +[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed +by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement +common patterns: auth, logging and... monitoring. + +To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware). + +## Usage + +There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both. + +### Server-side + +```go +import "github.com/grpc-ecosystem/go-grpc-prometheus" +... + // Initialize your gRPC server's interceptor. + myServer := grpc.NewServer( + grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), + grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), + ) + // Register your gRPC service implementations. + myservice.RegisterMyServiceServer(s.server, &myServiceImpl{}) + // After all your registrations, make sure all of the Prometheus metrics are initialized. + grpc_prometheus.Register(myServer) + // Register Prometheus metrics handler. + http.Handle("/metrics", promhttp.Handler()) +... +``` + +### Client-side + +```go +import "github.com/grpc-ecosystem/go-grpc-prometheus" +... + clientConn, err = grpc.Dial( + address, + grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor) + ) + client = pb_testproto.NewTestServiceClient(clientConn) + resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"}) +... +``` + +# Metrics + +## Labels + +All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods +contain the same rich labels: + + * `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and + the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and + `service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"` + * `grpc_method` - the name of the method called on the gRPC service. E.g. + `grpc_method="Ping"` + * `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle). + Differentiating between the two is important especially for latency measurements. + + - `unary` is single request, single response RPC + - `client_stream` is a multi-request, single response RPC + - `server_stream` is a single request, multi-response RPC + - `bidi_stream` is a multi-request, multi-response RPC + + +Additionally for completed RPCs, the following labels are used: + + * `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go). + The list of all statuses is to long, but here are some common ones: + + - `OK` - means the RPC was successful + - `IllegalArgument` - RPC contained bad values + - `Internal` - server-side error not disclosed to the clients + +## Counters + +The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go) +the respective Prometheus handler (usually `/metrics`). + +For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts. + +For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto), +calling the method `PingList`. The call succeeds and returns 20 messages in the stream. + +First, immediately after the server receives the call it will increment the +`grpc_server_started_total` and start the handling time clock (if histograms are enabled). + +```jsoniq +grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +Then the user logic gets invoked. It receives one message from the client containing the request +(it's a `server_stream`): + +```jsoniq +grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +The user logic may return an error, or send multiple messages back to the client. In this case, on +each of the 20 messages sent back, a counter will be incremented: + +```jsoniq +grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20 +``` + +After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) +and the relevant call labels increment the `grpc_server_handled_total` counter. + +```jsoniq +grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +## Histograms + +[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way +to measure latency distributions of your RPCs. However, since it is bad practice to have metrics +of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels) +the latency monitoring metrics are disabled by default. To enable them please call the following +in your server initialization code: + +```jsoniq +grpc_prometheus.EnableHandlingTimeHistogram() +``` + +After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) +variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics: + + * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method + * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for + calculating average handling times + * `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective + handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/)) + +The counter values will look as follows: + +```jsoniq +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1 +grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001 +grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + + +## Useful query examples + +Prometheus philosophy is to provide raw metrics to the monitoring system, and +let the aggregations be handled there. The verbosity of above metrics make it possible to have that +flexibility. Here's a couple of useful monitoring queries: + + +### request inbound rate +```jsoniq +sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service) +``` +For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the +rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note +how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together. + +### unary request error rate +```jsoniq +sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) +``` +For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the +ones that didn't finish with `OK` code. + +### unary request error percentage +```jsoniq +sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) + / +sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service) + * 100.0 +``` +For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that +this is a combination of the two above examples. This is an example of a query you would like to +[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g. +"no more than 1% requests should fail". + +### average response stream size +```jsoniq +sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) + / +sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) +``` +For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all ` +server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows +you to track when clients started to send "wide" queries that ret +Note the divisor is the number of started RPCs, in order to account for in-flight requests. + +### 99%-tile latency of unary requests +```jsoniq +histogram_quantile(0.99, + sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le) +) +``` +For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles) +of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile +estimation will take samples in a rolling `5m` window. When combined with other quantiles +(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system +(e.g. impact of caching). + +### percentage of slow unary queries (>250ms) +```jsoniq +100.0 - ( +sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service) + / +sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service) +) * 100.0 +``` +For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25` +seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal) +buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps. +This is an example of a query you would like to alert on in your system for SLA violations, +e.g. "less than 1% of requests are slower than 250ms". + + +## Status + +This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services at [Improbable](https://improbable.io). + +## License + +`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go new file mode 100644 index 000000000..751a4c72d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go @@ -0,0 +1,39 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for client-side gRPC. + +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" +) + +var ( + // DefaultClientMetrics is the default instance of ClientMetrics. It is + // intended to be used in conjunction the default Prometheus metrics + // registry. + DefaultClientMetrics = NewClientMetrics() + + // UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. + UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor() + + // StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. + StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor() +) + +func init() { + prom.MustRegister(DefaultClientMetrics.clientStartedCounter) + prom.MustRegister(DefaultClientMetrics.clientHandledCounter) + prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived) + prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent) +} + +// EnableClientHandlingTimeHistogram turns on recording of handling time of +// RPCs. Histogram metrics can be very expensive for Prometheus to retain and +// query. This function acts on the DefaultClientMetrics variable and the +// default Prometheus metrics registry. +func EnableClientHandlingTimeHistogram(opts ...HistogramOption) { + DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...) + prom.Register(DefaultClientMetrics.clientHandledHistogram) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go new file mode 100644 index 000000000..9b476f983 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go @@ -0,0 +1,170 @@ +package grpc_prometheus + +import ( + "io" + + prom "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ClientMetrics represents a collection of metrics to be registered on a +// Prometheus metrics registry for a gRPC client. +type ClientMetrics struct { + clientStartedCounter *prom.CounterVec + clientHandledCounter *prom.CounterVec + clientStreamMsgReceived *prom.CounterVec + clientStreamMsgSent *prom.CounterVec + clientHandledHistogramEnabled bool + clientHandledHistogramOpts prom.HistogramOpts + clientHandledHistogram *prom.HistogramVec +} + +// NewClientMetrics returns a ClientMetrics object. Use a new instance of +// ClientMetrics when not using the default Prometheus metrics registry, for +// example when wanting to control which metrics are added to a registry as +// opposed to automatically adding metrics via init functions. +func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics { + opts := counterOptions(counterOpts) + return &ClientMetrics{ + clientStartedCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_started_total", + Help: "Total number of RPCs started on the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientHandledCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_handled_total", + Help: "Total number of RPCs completed by the client, regardless of success or failure.", + }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}), + + clientStreamMsgReceived: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_msg_received_total", + Help: "Total number of RPC stream messages received by the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientStreamMsgSent: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_msg_sent_total", + Help: "Total number of gRPC stream messages sent by the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientHandledHistogramEnabled: false, + clientHandledHistogramOpts: prom.HistogramOpts{ + Name: "grpc_client_handling_seconds", + Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.", + Buckets: prom.DefBuckets, + }, + clientHandledHistogram: nil, + } +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once +// the last descriptor has been sent. +func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) { + m.clientStartedCounter.Describe(ch) + m.clientHandledCounter.Describe(ch) + m.clientStreamMsgReceived.Describe(ch) + m.clientStreamMsgSent.Describe(ch) + if m.clientHandledHistogramEnabled { + m.clientHandledHistogram.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting +// metrics. The implementation sends each collected metric via the +// provided channel and returns once the last metric has been sent. +func (m *ClientMetrics) Collect(ch chan<- prom.Metric) { + m.clientStartedCounter.Collect(ch) + m.clientHandledCounter.Collect(ch) + m.clientStreamMsgReceived.Collect(ch) + m.clientStreamMsgSent.Collect(ch) + if m.clientHandledHistogramEnabled { + m.clientHandledHistogram.Collect(ch) + } +} + +// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&m.clientHandledHistogramOpts) + } + if !m.clientHandledHistogramEnabled { + m.clientHandledHistogram = prom.NewHistogramVec( + m.clientHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } + m.clientHandledHistogramEnabled = true +} + +// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. +func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + monitor := newClientReporter(m, Unary, method) + monitor.SentMessage() + err := invoker(ctx, method, req, reply, cc, opts...) + if err != nil { + monitor.ReceivedMessage() + } + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return err + } +} + +// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + monitor := newClientReporter(m, clientStreamType(desc), method) + clientStream, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return nil, err + } + return &monitoredClientStream{clientStream, monitor}, nil + } +} + +func clientStreamType(desc *grpc.StreamDesc) grpcType { + if desc.ClientStreams && !desc.ServerStreams { + return ClientStream + } else if !desc.ClientStreams && desc.ServerStreams { + return ServerStream + } + return BidiStream +} + +// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters. +type monitoredClientStream struct { + grpc.ClientStream + monitor *clientReporter +} + +func (s *monitoredClientStream) SendMsg(m interface{}) error { + err := s.ClientStream.SendMsg(m) + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredClientStream) RecvMsg(m interface{}) error { + err := s.ClientStream.RecvMsg(m) + if err == nil { + s.monitor.ReceivedMessage() + } else if err == io.EOF { + s.monitor.Handled(codes.OK) + } else { + st, _ := status.FromError(err) + s.monitor.Handled(st.Code()) + } + return err +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go new file mode 100644 index 000000000..cbf153229 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go @@ -0,0 +1,46 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "google.golang.org/grpc/codes" +) + +type clientReporter struct { + metrics *ClientMetrics + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter { + r := &clientReporter{ + metrics: m, + rpcType: rpcType, + } + if r.metrics.clientHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +func (r *clientReporter) ReceivedMessage() { + r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) SentMessage() { + r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) Handled(code codes.Code) { + r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if r.metrics.clientHandledHistogramEnabled { + r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile new file mode 100644 index 000000000..74c084223 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile @@ -0,0 +1,16 @@ +SHELL="/bin/bash" + +GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/) + +all: vet fmt test + +fmt: + go fmt $(GOFILES_NOVENDOR) + +vet: + go vet $(GOFILES_NOVENDOR) + +test: vet + ./scripts/test_all.sh + +.PHONY: all vet test diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go new file mode 100644 index 000000000..9d51aec98 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go @@ -0,0 +1,41 @@ +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" +) + +// A CounterOption lets you add options to Counter metrics using With* funcs. +type CounterOption func(*prom.CounterOpts) + +type counterOptions []CounterOption + +func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts { + for _, f := range co { + f(&o) + } + return o +} + +// WithConstLabels allows you to add ConstLabels to Counter metrics. +func WithConstLabels(labels prom.Labels) CounterOption { + return func(o *prom.CounterOpts) { + o.ConstLabels = labels + } +} + +// A HistogramOption lets you add options to Histogram metrics using With* +// funcs. +type HistogramOption func(*prom.HistogramOpts) + +// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on. +func WithHistogramBuckets(buckets []float64) HistogramOption { + return func(o *prom.HistogramOpts) { o.Buckets = buckets } +} + +// WithHistogramConstLabels allows you to add custom ConstLabels to +// histograms metrics. +func WithHistogramConstLabels(labels prom.Labels) HistogramOption { + return func(o *prom.HistogramOpts) { + o.ConstLabels = labels + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go new file mode 100644 index 000000000..322f99046 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go @@ -0,0 +1,48 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for server-side gRPC. + +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" +) + +var ( + // DefaultServerMetrics is the default instance of ServerMetrics. It is + // intended to be used in conjunction the default Prometheus metrics + // registry. + DefaultServerMetrics = NewServerMetrics() + + // UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. + UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor() + + // StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. + StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor() +) + +func init() { + prom.MustRegister(DefaultServerMetrics.serverStartedCounter) + prom.MustRegister(DefaultServerMetrics.serverHandledCounter) + prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived) + prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent) +} + +// Register takes a gRPC server and pre-initializes all counters to 0. This +// allows for easier monitoring in Prometheus (no missing metrics), and should +// be called *after* all services have been registered with the server. This +// function acts on the DefaultServerMetrics variable. +func Register(server *grpc.Server) { + DefaultServerMetrics.InitializeMetrics(server) +} + +// EnableHandlingTimeHistogram turns on recording of handling time +// of RPCs. Histogram metrics can be very expensive for Prometheus +// to retain and query. This function acts on the DefaultServerMetrics +// variable and the default Prometheus metrics registry. +func EnableHandlingTimeHistogram(opts ...HistogramOption) { + DefaultServerMetrics.EnableHandlingTimeHistogram(opts...) + prom.Register(DefaultServerMetrics.serverHandledHistogram) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go new file mode 100644 index 000000000..5b1467e7a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go @@ -0,0 +1,185 @@ +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/status" +) + +// ServerMetrics represents a collection of metrics to be registered on a +// Prometheus metrics registry for a gRPC server. +type ServerMetrics struct { + serverStartedCounter *prom.CounterVec + serverHandledCounter *prom.CounterVec + serverStreamMsgReceived *prom.CounterVec + serverStreamMsgSent *prom.CounterVec + serverHandledHistogramEnabled bool + serverHandledHistogramOpts prom.HistogramOpts + serverHandledHistogram *prom.HistogramVec +} + +// NewServerMetrics returns a ServerMetrics object. Use a new instance of +// ServerMetrics when not using the default Prometheus metrics registry, for +// example when wanting to control which metrics are added to a registry as +// opposed to automatically adding metrics via init functions. +func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics { + opts := counterOptions(counterOpts) + return &ServerMetrics{ + serverStartedCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_started_total", + Help: "Total number of RPCs started on the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverHandledCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_handled_total", + Help: "Total number of RPCs completed on the server, regardless of success or failure.", + }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}), + serverStreamMsgReceived: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_msg_received_total", + Help: "Total number of RPC stream messages received on the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverStreamMsgSent: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_msg_sent_total", + Help: "Total number of gRPC stream messages sent by the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverHandledHistogramEnabled: false, + serverHandledHistogramOpts: prom.HistogramOpts{ + Name: "grpc_server_handling_seconds", + Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.", + Buckets: prom.DefBuckets, + }, + serverHandledHistogram: nil, + } +} + +// EnableHandlingTimeHistogram enables histograms being registered when +// registering the ServerMetrics on a Prometheus registry. Histograms can be +// expensive on Prometheus servers. It takes options to configure histogram +// options such as the defined buckets. +func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&m.serverHandledHistogramOpts) + } + if !m.serverHandledHistogramEnabled { + m.serverHandledHistogram = prom.NewHistogramVec( + m.serverHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } + m.serverHandledHistogramEnabled = true +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once +// the last descriptor has been sent. +func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) { + m.serverStartedCounter.Describe(ch) + m.serverHandledCounter.Describe(ch) + m.serverStreamMsgReceived.Describe(ch) + m.serverStreamMsgSent.Describe(ch) + if m.serverHandledHistogramEnabled { + m.serverHandledHistogram.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting +// metrics. The implementation sends each collected metric via the +// provided channel and returns once the last metric has been sent. +func (m *ServerMetrics) Collect(ch chan<- prom.Metric) { + m.serverStartedCounter.Collect(ch) + m.serverHandledCounter.Collect(ch) + m.serverStreamMsgReceived.Collect(ch) + m.serverStreamMsgSent.Collect(ch) + if m.serverHandledHistogramEnabled { + m.serverHandledHistogram.Collect(ch) + } +} + +// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. +func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + monitor := newServerReporter(m, Unary, info.FullMethod) + monitor.ReceivedMessage() + resp, err := handler(ctx, req) + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + if err == nil { + monitor.SentMessage() + } + return resp, err + } +} + +// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + monitor := newServerReporter(m, streamRPCType(info), info.FullMethod) + err := handler(srv, &monitoredServerStream{ss, monitor}) + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return err + } +} + +// InitializeMetrics initializes all metrics, with their appropriate null +// value, for all gRPC methods registered on a gRPC server. This is useful, to +// ensure that all metrics exist when collecting and querying. +func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) { + serviceInfo := server.GetServiceInfo() + for serviceName, info := range serviceInfo { + for _, mInfo := range info.Methods { + preRegisterMethod(m, serviceName, &mInfo) + } + } +} + +func streamRPCType(info *grpc.StreamServerInfo) grpcType { + if info.IsClientStream && !info.IsServerStream { + return ClientStream + } else if !info.IsClientStream && info.IsServerStream { + return ServerStream + } + return BidiStream +} + +// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters. +type monitoredServerStream struct { + grpc.ServerStream + monitor *serverReporter +} + +func (s *monitoredServerStream) SendMsg(m interface{}) error { + err := s.ServerStream.SendMsg(m) + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredServerStream) RecvMsg(m interface{}) error { + err := s.ServerStream.RecvMsg(m) + if err == nil { + s.monitor.ReceivedMessage() + } + return err +} + +// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated. +func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) { + methodName := mInfo.Name + methodType := string(typeFromMethodInfo(mInfo)) + // These are just references (no increments), as just referencing will create the labels but not set values. + metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName) + metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName) + metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName) + if metrics.serverHandledHistogramEnabled { + metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName) + } + for _, code := range allCodes { + metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String()) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go new file mode 100644 index 000000000..aa9db5401 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go @@ -0,0 +1,46 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "google.golang.org/grpc/codes" +) + +type serverReporter struct { + metrics *ServerMetrics + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter { + r := &serverReporter{ + metrics: m, + rpcType: rpcType, + } + if r.metrics.serverHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +func (r *serverReporter) ReceivedMessage() { + r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) SentMessage() { + r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) Handled(code codes.Code) { + r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if r.metrics.serverHandledHistogramEnabled { + r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go new file mode 100644 index 000000000..7987de35f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go @@ -0,0 +1,50 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +type grpcType string + +const ( + Unary grpcType = "unary" + ClientStream grpcType = "client_stream" + ServerStream grpcType = "server_stream" + BidiStream grpcType = "bidi_stream" +) + +var ( + allCodes = []codes.Code{ + codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound, + codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted, + codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal, + codes.Unavailable, codes.DataLoss, + } +) + +func splitMethodName(fullMethodName string) (string, string) { + fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash + if i := strings.Index(fullMethodName, "/"); i >= 0 { + return fullMethodName[:i], fullMethodName[i+1:] + } + return "unknown", "unknown" +} + +func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType { + if !mInfo.IsClientStream && !mInfo.IsServerStream { + return Unary + } + if mInfo.IsClientStream && !mInfo.IsServerStream { + return ClientStream + } + if !mInfo.IsClientStream && mInfo.IsServerStream { + return ServerStream + } + return BidiStream +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 000000000..444df08f8 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, "does not exist") { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 000000000..a733bef18 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/errwrap/go.mod b/vendor/github.com/hashicorp/errwrap/go.mod new file mode 100644 index 000000000..c9b84022c --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/errwrap diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 000000000..b97cd6ed0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 000000000..71dd308ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,150 @@ +# go-multierror + +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) + +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 000000000..3e2589bfd --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,43 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 000000000..aab8e9abe --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 000000000..47f13c49a --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod new file mode 100644 index 000000000..141cc4ccb --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/go-multierror + +go 1.13 + +require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum new file mode 100644 index 000000000..e8238e9ec --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.sum @@ -0,0 +1,2 @@ +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 000000000..9c29efb7f --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 000000000..f54574326 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,121 @@ +package multierror + +import ( + "errors" + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. +// +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + return e.Errors +} + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 000000000..5c477abe4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 000000000..fecb14e81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore new file mode 100644 index 000000000..15586a2b5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -0,0 +1,9 @@ +y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws + +*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml new file mode 100644 index 000000000..cb63a3216 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.travis.yml @@ -0,0 +1,13 @@ +sudo: false + +language: go + +go: + - 1.x + - tip + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile new file mode 100644 index 000000000..84fd743f5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -0,0 +1,18 @@ +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go get -t ./... + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md new file mode 100644 index 000000000..c8223326d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -0,0 +1,125 @@ +# HCL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) + +HCL (HashiCorp Configuration Language) is a configuration language built +by HashiCorp. The goal of HCL is to build a structured configuration language +that is both human and machine friendly for use with command-line tools, but +specifically targeted towards DevOps tools, servers, etc. + +HCL is also fully JSON compatible. That is, JSON can be used as completely +valid input to a system expecting HCL. This helps makes systems +interoperable with other systems. + +HCL is heavily inspired by +[libucl](https://github.com/vstakhov/libucl), +nginx configuration, and others similar. + +## Why? + +A common question when viewing HCL is to ask the question: why not +JSON, YAML, etc.? + +Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) +used a variety of configuration languages from full programming languages +such as Ruby to complete data structure languages such as JSON. What we +learned is that some people wanted human-friendly configuration languages +and some people wanted machine-friendly languages. + +JSON fits a nice balance in this, but is fairly verbose and most +importantly doesn't support comments. With YAML, we found that beginners +had a really hard time determining what the actual structure was, and +ended up guessing more often than not whether to use a hyphen, colon, etc. +in order to represent some configuration key. + +Full programming languages such as Ruby enable complex behavior +a configuration language shouldn't usually allow, and also forces +people to learn some set of Ruby. + +Because of this, we decided to create our own configuration language +that is JSON-compatible. Our configuration language (HCL) is designed +to be written and modified by humans. The API for HCL allows JSON +as an input so that it is also machine-friendly (machines can generate +JSON instead of trying to generate HCL). + +Our goal with HCL is not to alienate other configuration languages. +It is instead to provide HCL as a specialized language for our tools, +and JSON as the interoperability layer. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammar is listed here. + + * Single line comments start with `#` or `//` + + * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments + are not allowed. A multi-line comment (also known as a block comment) + terminates at the first `*/` found. + + * Values are assigned with the syntax `key = value` (whitespace doesn't + matter). The value can be any primitive: a string, number, boolean, + object, or list. + + * Strings are double-quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Multi-line strings start with `<- + echo %Path% + + go version + + go env + + go get -t ./... + +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 000000000..bed9ebbe1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,729 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float32, reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int, reflect.Int32, reflect.Int64: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + + // if item is an object that was decoded from ambiguous JSON and + // flattened, make sure it's expanded if it needs to decode into a + // defined structure. + item := expandObject(item, val) + + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +// expandObject detects if an ambiguous JSON object was flattened to a List which +// should be decoded into a struct, and expands the ast to properly deocode. +func expandObject(node ast.Node, result reflect.Value) ast.Node { + item, ok := node.(*ast.ObjectItem) + if !ok { + return node + } + + elemType := result.Type() + + // our target type must be a struct + switch elemType.Kind() { + case reflect.Ptr: + switch elemType.Elem().Kind() { + case reflect.Struct: + //OK + default: + return node + } + case reflect.Struct: + //OK + default: + return node + } + + // A list value will have a key and field name. If it had more fields, + // it wouldn't have been flattened. + if len(item.Keys) != 2 { + return node + } + + keyToken := item.Keys[0].Token + item.Keys = item.Keys[1:] + + // we need to un-flatten the ast enough to decode + newNode := &ast.ObjectItem{ + Keys: []*ast.ObjectKey{ + &ast.ObjectKey{ + Token: keyToken, + }, + }, + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + + return newNode +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok && item != nil { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + + // Ignore fields with tag name "-" + if tagParts[0] == "-" { + continue + } + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields = append(fields, field{fieldType, structVal.Field(i)}) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + for _, f := range fields { + field, fieldValue := f.field, f.val + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + fieldName := field.Name + + tagValue := field.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, fieldValue) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + fieldValue.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeys": + unusedKeysVal = append(unusedKeysVal, fieldValue) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used key + usedKeys[fieldName] = struct{}{} + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { + return err + } + } + + decodedFields = append(decodedFields, field.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} diff --git a/vendor/github.com/hashicorp/hcl/go.mod b/vendor/github.com/hashicorp/hcl/go.mod new file mode 100644 index 000000000..4debbbe35 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/hcl + +require github.com/davecgh/go-spew v1.1.1 diff --git a/vendor/github.com/hashicorp/hcl/go.sum b/vendor/github.com/hashicorp/hcl/go.sum new file mode 100644 index 000000000..b5e2922e8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/go.sum @@ -0,0 +1,2 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 000000000..575a20b50 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 000000000..6e5ef654b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,219 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + // I'm not entirely sure what causes this, but removing this causes + // a test failure. We should investigate at some point. + if len(o.Keys) == 0 { + return token.Pos{} + } + + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // comment types, only used when in a list + LeadComment *CommentGroup + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } +func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 000000000..ba07ad42b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 000000000..5c99381df --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 000000000..64c83bcfb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,532 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList(false) + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")), + } + } + + // key=#comment + // val + if p.lineComment != nil { + o.LineComment, p.lineComment = p.lineComment, nil + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + var err error + + // If we have no keys, then it is a syntax error. i.e. {{}} is not + // allowed. + if len(keys) == 0 { + err = &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), + } + } + + // object + return keys, err + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList(true) + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), + } + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), + } + } + } + switch tok.Type { + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + node, err := p.literalType() + if err != nil { + return nil, err + } + + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil && len(l.List) > 0 { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true + case token.LBRACK: + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go new file mode 100644 index 000000000..7c038d12a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go @@ -0,0 +1,789 @@ +package printer + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" +) + +const ( + blank = byte(' ') + newline = byte('\n') + tab = byte('\t') + infinity = 1 << 30 // offset or line +) + +var ( + unindent = []byte("\uE123") // in the private use space +) + +type printer struct { + cfg Config + prev token.Pos + + comments []*ast.CommentGroup // may be nil, contains all comments + standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) + + enableTrace bool + indentTrace int +} + +type ByPosition []*ast.CommentGroup + +func (b ByPosition) Len() int { return len(b) } +func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } + +// collectComments comments all standalone comments which are not lead or line +// comment +func (p *printer) collectComments(node ast.Node) { + // first collect all comments. This is already stored in + // ast.File.(comments) + ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { + switch t := nn.(type) { + case *ast.File: + p.comments = t.Comments + return nn, false + } + return nn, true + }) + + standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) + for _, c := range p.comments { + standaloneComments[c.Pos()] = c + } + + // next remove all lead and line comments from the overall comment map. + // This will give us comments which are standalone, comments which are not + // assigned to any kind of node. + ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { + switch t := nn.(type) { + case *ast.LiteralType: + if t.LeadComment != nil { + for _, comment := range t.LeadComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + + if t.LineComment != nil { + for _, comment := range t.LineComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + case *ast.ObjectItem: + if t.LeadComment != nil { + for _, comment := range t.LeadComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + + if t.LineComment != nil { + for _, comment := range t.LineComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + } + + return nn, true + }) + + for _, c := range standaloneComments { + p.standaloneComments = append(p.standaloneComments, c) + } + + sort.Sort(ByPosition(p.standaloneComments)) +} + +// output prints creates b printable HCL output and returns it. +func (p *printer) output(n interface{}) []byte { + var buf bytes.Buffer + + switch t := n.(type) { + case *ast.File: + // File doesn't trace so we add the tracing here + defer un(trace(p, "File")) + return p.output(t.Node) + case *ast.ObjectList: + defer un(trace(p, "ObjectList")) + + var index int + for { + // Determine the location of the next actual non-comment + // item. If we're at the end, the next item is at "infinity" + var nextItem token.Pos + if index != len(t.Items) { + nextItem = t.Items[index].Pos() + } else { + nextItem = token.Pos{Offset: infinity, Line: infinity} + } + + // Go through the standalone comments in the file and print out + // the comments that we should be for this object item. + for _, c := range p.standaloneComments { + // Go through all the comments in the group. The group + // should be printed together, not separated by double newlines. + printed := false + newlinePrinted := false + for _, comment := range c.List { + // We only care about comments after the previous item + // we've printed so that comments are printed in the + // correct locations (between two objects for example). + // And before the next item. + if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { + // if we hit the end add newlines so we can print the comment + // we don't do this if prev is invalid which means the + // beginning of the file since the first comment should + // be at the first line. + if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { + buf.Write([]byte{newline, newline}) + newlinePrinted = true + } + + // Write the actual comment. + buf.WriteString(comment.Text) + buf.WriteByte(newline) + + // Set printed to true to note that we printed something + printed = true + } + } + + // If we're not at the last item, write a new line so + // that there is a newline separating this comment from + // the next object. + if printed && index != len(t.Items) { + buf.WriteByte(newline) + } + } + + if index == len(t.Items) { + break + } + + buf.Write(p.output(t.Items[index])) + if index != len(t.Items)-1 { + // Always write a newline to separate us from the next item + buf.WriteByte(newline) + + // Need to determine if we're going to separate the next item + // with a blank line. The logic here is simple, though there + // are a few conditions: + // + // 1. The next object is more than one line away anyways, + // so we need an empty line. + // + // 2. The next object is not a "single line" object, so + // we need an empty line. + // + // 3. This current object is not a single line object, + // so we need an empty line. + current := t.Items[index] + next := t.Items[index+1] + if next.Pos().Line != t.Items[index].Pos().Line+1 || + !p.isSingleLineObject(next) || + !p.isSingleLineObject(current) { + buf.WriteByte(newline) + } + } + index++ + } + case *ast.ObjectKey: + buf.WriteString(t.Token.Text) + case *ast.ObjectItem: + p.prev = t.Pos() + buf.Write(p.objectItem(t)) + case *ast.LiteralType: + buf.Write(p.literalType(t)) + case *ast.ListType: + buf.Write(p.list(t)) + case *ast.ObjectType: + buf.Write(p.objectType(t)) + default: + fmt.Printf(" unknown type: %T\n", n) + } + + return buf.Bytes() +} + +func (p *printer) literalType(lit *ast.LiteralType) []byte { + result := []byte(lit.Token.Text) + switch lit.Token.Type { + case token.HEREDOC: + // Clear the trailing newline from heredocs + if result[len(result)-1] == '\n' { + result = result[:len(result)-1] + } + + // Poison lines 2+ so that we don't indent them + result = p.heredocIndent(result) + case token.STRING: + // If this is a multiline string, poison lines 2+ so we don't + // indent them. + if bytes.IndexRune(result, '\n') >= 0 { + result = p.heredocIndent(result) + } + } + + return result +} + +// objectItem returns the printable HCL form of an object item. An object type +// starts with one/multiple keys and has a value. The value might be of any +// type. +func (p *printer) objectItem(o *ast.ObjectItem) []byte { + defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) + var buf bytes.Buffer + + if o.LeadComment != nil { + for _, comment := range o.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + // If key and val are on different lines, treat line comments like lead comments. + if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { + for _, comment := range o.LineComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range o.Keys { + buf.WriteString(k.Token.Text) + buf.WriteByte(blank) + + // reach end of key + if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + buf.Write(p.output(o.Val)) + + if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { + buf.WriteByte(blank) + for _, comment := range o.LineComment.List { + buf.WriteString(comment.Text) + } + } + + return buf.Bytes() +} + +// objectType returns the printable HCL form of an object type. An object type +// begins with a brace and ends with a brace. +func (p *printer) objectType(o *ast.ObjectType) []byte { + defer un(trace(p, "ObjectType")) + var buf bytes.Buffer + buf.WriteString("{") + + var index int + var nextItem token.Pos + var commented, newlinePrinted bool + for { + // Determine the location of the next actual non-comment + // item. If we're at the end, the next item is the closing brace + if index != len(o.List.Items) { + nextItem = o.List.Items[index].Pos() + } else { + nextItem = o.Rbrace + } + + // Go through the standalone comments in the file and print out + // the comments that we should be for this object item. + for _, c := range p.standaloneComments { + printed := false + var lastCommentPos token.Pos + for _, comment := range c.List { + // We only care about comments after the previous item + // we've printed so that comments are printed in the + // correct locations (between two objects for example). + // And before the next item. + if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { + // If there are standalone comments and the initial newline has not + // been printed yet, do it now. + if !newlinePrinted { + newlinePrinted = true + buf.WriteByte(newline) + } + + // add newline if it's between other printed nodes + if index > 0 { + commented = true + buf.WriteByte(newline) + } + + // Store this position + lastCommentPos = comment.Pos() + + // output the comment itself + buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) + + // Set printed to true to note that we printed something + printed = true + + /* + if index != len(o.List.Items) { + buf.WriteByte(newline) // do not print on the end + } + */ + } + } + + // Stuff to do if we had comments + if printed { + // Always write a newline + buf.WriteByte(newline) + + // If there is another item in the object and our comment + // didn't hug it directly, then make sure there is a blank + // line separating them. + if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { + buf.WriteByte(newline) + } + } + } + + if index == len(o.List.Items) { + p.prev = o.Rbrace + break + } + + // At this point we are sure that it's not a totally empty block: print + // the initial newline if it hasn't been printed yet by the previous + // block about standalone comments. + if !newlinePrinted { + buf.WriteByte(newline) + newlinePrinted = true + } + + // check if we have adjacent one liner items. If yes we'll going to align + // the comments. + var aligned []*ast.ObjectItem + for _, item := range o.List.Items[index:] { + // we don't group one line lists + if len(o.List.Items) == 1 { + break + } + + // one means a oneliner with out any lead comment + // two means a oneliner with lead comment + // anything else might be something else + cur := lines(string(p.objectItem(item))) + if cur > 2 { + break + } + + curPos := item.Pos() + + nextPos := token.Pos{} + if index != len(o.List.Items)-1 { + nextPos = o.List.Items[index+1].Pos() + } + + prevPos := token.Pos{} + if index != 0 { + prevPos = o.List.Items[index-1].Pos() + } + + // fmt.Println("DEBUG ----------------") + // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) + // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) + // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) + + if curPos.Line+1 == nextPos.Line { + aligned = append(aligned, item) + index++ + continue + } + + if curPos.Line-1 == prevPos.Line { + aligned = append(aligned, item) + index++ + + // finish if we have a new line or comment next. This happens + // if the next item is not adjacent + if curPos.Line+1 != nextPos.Line { + break + } + continue + } + + break + } + + // put newlines if the items are between other non aligned items. + // newlines are also added if there is a standalone comment already, so + // check it too + if !commented && index != len(aligned) { + buf.WriteByte(newline) + } + + if len(aligned) >= 1 { + p.prev = aligned[len(aligned)-1].Pos() + + items := p.alignedItems(aligned) + buf.Write(p.indent(items)) + } else { + p.prev = o.List.Items[index].Pos() + + buf.Write(p.indent(p.objectItem(o.List.Items[index]))) + index++ + } + + buf.WriteByte(newline) + } + + buf.WriteString("}") + return buf.Bytes() +} + +func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { + var buf bytes.Buffer + + // find the longest key and value length, needed for alignment + var longestKeyLen int // longest key length + var longestValLen int // longest value length + for _, item := range items { + key := len(item.Keys[0].Token.Text) + val := len(p.output(item.Val)) + + if key > longestKeyLen { + longestKeyLen = key + } + + if val > longestValLen { + longestValLen = val + } + } + + for i, item := range items { + if item.LeadComment != nil { + for _, comment := range item.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range item.Keys { + keyLen := len(k.Token.Text) + buf.WriteString(k.Token.Text) + for i := 0; i < longestKeyLen-keyLen+1; i++ { + buf.WriteByte(blank) + } + + // reach end of key + if i == len(item.Keys)-1 && len(item.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + val := p.output(item.Val) + valLen := len(val) + buf.Write(val) + + if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { + for i := 0; i < longestValLen-valLen+1; i++ { + buf.WriteByte(blank) + } + + for _, comment := range item.LineComment.List { + buf.WriteString(comment.Text) + } + } + + // do not print for the last item + if i != len(items)-1 { + buf.WriteByte(newline) + } + } + + return buf.Bytes() +} + +// list returns the printable HCL form of an list type. +func (p *printer) list(l *ast.ListType) []byte { + if p.isSingleLineList(l) { + return p.singleLineList(l) + } + + var buf bytes.Buffer + buf.WriteString("[") + buf.WriteByte(newline) + + var longestLine int + for _, item := range l.List { + // for now we assume that the list only contains literal types + if lit, ok := item.(*ast.LiteralType); ok { + lineLen := len(lit.Token.Text) + if lineLen > longestLine { + longestLine = lineLen + } + } + } + + haveEmptyLine := false + for i, item := range l.List { + // If we have a lead comment, then we want to write that first + leadComment := false + if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { + leadComment = true + + // Ensure an empty line before every element with a + // lead comment (except the first item in a list). + if !haveEmptyLine && i != 0 { + buf.WriteByte(newline) + } + + for _, comment := range lit.LeadComment.List { + buf.Write(p.indent([]byte(comment.Text))) + buf.WriteByte(newline) + } + } + + // also indent each line + val := p.output(item) + curLen := len(val) + buf.Write(p.indent(val)) + + // if this item is a heredoc, then we output the comma on + // the next line. This is the only case this happens. + comma := []byte{','} + if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { + buf.WriteByte(newline) + comma = p.indent(comma) + } + + buf.Write(comma) + + if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { + // if the next item doesn't have any comments, do not align + buf.WriteByte(blank) // align one space + for i := 0; i < longestLine-curLen; i++ { + buf.WriteByte(blank) + } + + for _, comment := range lit.LineComment.List { + buf.WriteString(comment.Text) + } + } + + buf.WriteByte(newline) + + // Ensure an empty line after every element with a + // lead comment (except the first item in a list). + haveEmptyLine = leadComment && i != len(l.List)-1 + if haveEmptyLine { + buf.WriteByte(newline) + } + } + + buf.WriteString("]") + return buf.Bytes() +} + +// isSingleLineList returns true if: +// * they were previously formatted entirely on one line +// * they consist entirely of literals +// * there are either no heredoc strings or the list has exactly one element +// * there are no line comments +func (printer) isSingleLineList(l *ast.ListType) bool { + for _, item := range l.List { + if item.Pos().Line != l.Lbrack.Line { + return false + } + + lit, ok := item.(*ast.LiteralType) + if !ok { + return false + } + + if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { + return false + } + + if lit.LineComment != nil { + return false + } + } + + return true +} + +// singleLineList prints a simple single line list. +// For a definition of "simple", see isSingleLineList above. +func (p *printer) singleLineList(l *ast.ListType) []byte { + buf := &bytes.Buffer{} + + buf.WriteString("[") + for i, item := range l.List { + if i != 0 { + buf.WriteString(", ") + } + + // Output the item itself + buf.Write(p.output(item)) + + // The heredoc marker needs to be at the end of line. + if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { + buf.WriteByte(newline) + } + } + + buf.WriteString("]") + return buf.Bytes() +} + +// indent indents the lines of the given buffer for each non-empty line +func (p *printer) indent(buf []byte) []byte { + var prefix []byte + if p.cfg.SpacesWidth != 0 { + for i := 0; i < p.cfg.SpacesWidth; i++ { + prefix = append(prefix, blank) + } + } else { + prefix = []byte{tab} + } + + var res []byte + bol := true + for _, c := range buf { + if bol && c != '\n' { + res = append(res, prefix...) + } + + res = append(res, c) + bol = c == '\n' + } + return res +} + +// unindent removes all the indentation from the tombstoned lines +func (p *printer) unindent(buf []byte) []byte { + var res []byte + for i := 0; i < len(buf); i++ { + skip := len(buf)-i <= len(unindent) + if !skip { + skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) + } + if skip { + res = append(res, buf[i]) + continue + } + + // We have a marker. we have to backtrace here and clean out + // any whitespace ahead of our tombstone up to a \n + for j := len(res) - 1; j >= 0; j-- { + if res[j] == '\n' { + break + } + + res = res[:j] + } + + // Skip the entire unindent marker + i += len(unindent) - 1 + } + + return res +} + +// heredocIndent marks all the 2nd and further lines as unindentable +func (p *printer) heredocIndent(buf []byte) []byte { + var res []byte + bol := false + for _, c := range buf { + if bol && c != '\n' { + res = append(res, unindent...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +// isSingleLineObject tells whether the given object item is a single +// line object such as "obj {}". +// +// A single line object: +// +// * has no lead comments (hence multi-line) +// * has no assignment +// * has no values in the stanza (within {}) +// +func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { + // If there is a lead comment, can't be one line + if val.LeadComment != nil { + return false + } + + // If there is assignment, we always break by line + if val.Assign.IsValid() { + return false + } + + // If it isn't an object type, then its not a single line object + ot, ok := val.Val.(*ast.ObjectType) + if !ok { + return false + } + + // If the object has no items, it is single line! + return len(ot.List.Items) == 0 +} + +func lines(txt string) int { + endline := 1 + for i := 0; i < len(txt); i++ { + if txt[i] == '\n' { + endline++ + } + } + return endline +} + +// ---------------------------------------------------------------------------- +// Tracing support + +func (p *printer) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + i := 2 * p.indentTrace + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *printer, msg string) *printer { + p.printTrace(msg, "(") + p.indentTrace++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *printer) { + p.indentTrace-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go new file mode 100644 index 000000000..6617ab8e7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go @@ -0,0 +1,66 @@ +// Package printer implements printing of AST nodes to HCL format. +package printer + +import ( + "bytes" + "io" + "text/tabwriter" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" +) + +var DefaultConfig = Config{ + SpacesWidth: 2, +} + +// A Config node controls the output of Fprint. +type Config struct { + SpacesWidth int // if set, it will use spaces instead of tabs for alignment +} + +func (c *Config) Fprint(output io.Writer, node ast.Node) error { + p := &printer{ + cfg: *c, + comments: make([]*ast.CommentGroup, 0), + standaloneComments: make([]*ast.CommentGroup, 0), + // enableTrace: true, + } + + p.collectComments(node) + + if _, err := output.Write(p.unindent(p.output(node))); err != nil { + return err + } + + // flush tabwriter, if any + var err error + if tw, _ := output.(*tabwriter.Writer); tw != nil { + err = tw.Flush() + } + + return err +} + +// Fprint "pretty-prints" an HCL node to output +// It calls Config.Fprint with default settings. +func Fprint(output io.Writer, node ast.Node) error { + return DefaultConfig.Fprint(output, node) +} + +// Format formats src HCL and returns the result. +func Format(src []byte) ([]byte, error) { + node, err := parser.Parse(src) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + if err := DefaultConfig.Fprint(&buf, node); err != nil { + return nil, err + } + + // Add trailing newline to result + buf.WriteString("\n") + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go new file mode 100644 index 000000000..624a18fe3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -0,0 +1,652 @@ +// Package scanner implements a scanner for HCL (HashiCorp Configuration +// Language) source text. +package scanner + +import ( + "bytes" + "fmt" + "os" + "regexp" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/hcl/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == utf8.RuneError && size == 1 { + s.err("illegal UTF-8 encoding") + return ch + } + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + if ch == '\x00' { + s.err("unexpected null character (0x00)") + return eof + } + + if ch == '\uE123' { + s.err("unicode code point U+E123 reserved for internal use") + return utf8.RuneError + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + if ch == '/' && s.peek() != '/' { + s.err("expected '/' for comment") + return + } + + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n + for n > 0 && digitVal(ch) < base { + ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + if n != start && ch != eof { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 000000000..5f981eaa2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,241 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + if s[0] == '\n' { + return "", ErrSyntax + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 000000000..59c1bb72d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 000000000..e37c0664e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,219 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list is empty, keep the original list + if len(ot.List) == 0 { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 000000000..125a5f072 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,313 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + pos := p.tok.Pos + o.Assign = hcltoken.Pos{ + Filename: pos.Filename, + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // If we have a zero keycount it means that we never got + // an object key, i.e. `{ :`. This is a syntax error. + if keyCount == 0 { + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + + // Done + return keys, nil + case token.ILLEGAL: + return nil, errors.New("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 000000000..fe3f0f095 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa beginning from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 000000000..59c1bb72d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 000000000..95a0c3eee --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 000000000..d9993c292 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,38 @@ +package hcl + +import ( + "unicode" + "unicode/utf8" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w + if unicode.IsSpace(r) { + continue + } + if r == '{' { + return lexModeJson + } + break + } + + return lexModeHcl +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 000000000..1fca53c4c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,39 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// ParseBytes accepts as input byte slice and returns ast tree. +// +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { + case lexModeHcl: + return hclParser.Parse(in) + case lexModeJson: + return jsonParser.Parse(in) + } + + return nil, fmt.Errorf("unknown config format") +} + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +} diff --git a/vendor/github.com/jgautheron/goconst/LICENSE b/vendor/github.com/jgautheron/goconst/LICENSE new file mode 100644 index 000000000..e92649543 --- /dev/null +++ b/vendor/github.com/jgautheron/goconst/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Jonathan Gautheron + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/jgautheron/goconst/README.md b/vendor/github.com/jgautheron/goconst/README.md new file mode 100644 index 000000000..8dd093baf --- /dev/null +++ b/vendor/github.com/jgautheron/goconst/README.md @@ -0,0 +1,50 @@ +# goconst + +Find repeated strings that could be replaced by a constant. + +### Motivation + +There are obvious benefits to using constants instead of repeating strings, mostly to ease maintenance. Cannot argue against changing a single constant versus many strings. + +While this could be considered a beginner mistake, across time, multiple packages and large codebases, some repetition could have slipped in. + +### Get Started + + $ go get github.com/jgautheron/goconst/cmd/goconst + $ goconst ./... + +### Usage + +``` +Usage: + + goconst ARGS + +Flags: + + -ignore exclude files matching the given regular expression + -ignore-tests exclude tests from the search (default: true) + -min-occurrences report from how many occurrences (default: 2) + -min-length only report strings with the minimum given length (default: 3) + -match-constant look for existing constants matching the values + -numbers search also for duplicated numbers + -min minimum value, only works with -numbers + -max maximum value, only works with -numbers + -output output formatting (text or json) + -set-exit-status Set exit status to 2 if any issues are found + +Examples: + + goconst ./... + goconst -ignore "yacc|\.pb\." $GOPATH/src/github.com/cockroachdb/cockroach/... + goconst -min-occurrences 3 -output json $GOPATH/src/github.com/cockroachdb/cockroach + goconst -numbers -min 60 -max 512 . +``` + +### Other static analysis tools + +- [gogetimports](https://github.com/jgautheron/gogetimports): Get a JSON-formatted list of imports. +- [usedexports](https://github.com/jgautheron/usedexports): Find exported variables that could be unexported. + +### License +MIT diff --git a/vendor/github.com/jgautheron/goconst/api.go b/vendor/github.com/jgautheron/goconst/api.go new file mode 100644 index 000000000..d56fcd6c2 --- /dev/null +++ b/vendor/github.com/jgautheron/goconst/api.go @@ -0,0 +1,74 @@ +package goconst + +import ( + "go/ast" + "go/token" + "strings" +) + +type Issue struct { + Pos token.Position + OccurrencesCount int + Str string + MatchingConst string +} + +type Config struct { + IgnoreTests bool + MatchWithConstants bool + MinStringLength int + MinOccurrences int + ParseNumbers bool + NumberMin int + NumberMax int + ExcludeTypes map[Type]bool +} + +func Run(files []*ast.File, fset *token.FileSet, cfg *Config) ([]Issue, error) { + p := New( + "", + "", + cfg.IgnoreTests, + cfg.MatchWithConstants, + cfg.ParseNumbers, + cfg.NumberMin, + cfg.NumberMax, + cfg.MinStringLength, + cfg.MinOccurrences, + cfg.ExcludeTypes, + ) + var issues []Issue + for _, f := range files { + if p.ignoreTests { + if filename := fset.Position(f.Pos()).Filename; strings.HasSuffix(filename, testSuffix) { + continue + } + } + ast.Walk(&treeVisitor{ + fileSet: fset, + packageName: "", + fileName: "", + p: p, + }, f) + } + p.ProcessResults() + + for str, item := range p.strs { + fi := item[0] + i := Issue{ + Pos: fi.Position, + OccurrencesCount: len(item), + Str: str, + } + + if len(p.consts) != 0 { + if cst, ok := p.consts[str]; ok { + // const should be in the same package and exported + i.MatchingConst = cst.Name + } + } + issues = append(issues, i) + } + + return issues, nil +} diff --git a/vendor/github.com/jgautheron/goconst/go.mod b/vendor/github.com/jgautheron/goconst/go.mod new file mode 100644 index 000000000..53dbfbbb9 --- /dev/null +++ b/vendor/github.com/jgautheron/goconst/go.mod @@ -0,0 +1,3 @@ +module github.com/jgautheron/goconst + +go 1.13 diff --git a/vendor/github.com/jgautheron/goconst/parser.go b/vendor/github.com/jgautheron/goconst/parser.go new file mode 100644 index 000000000..2ed7a9a90 --- /dev/null +++ b/vendor/github.com/jgautheron/goconst/parser.go @@ -0,0 +1,176 @@ +// Package goconst finds repeated strings that could be replaced by a constant. +// +// There are obvious benefits to using constants instead of repeating strings, +// mostly to ease maintenance. Cannot argue against changing a single constant versus many strings. +// While this could be considered a beginner mistake, across time, +// multiple packages and large codebases, some repetition could have slipped in. +package goconst + +import ( + "go/ast" + "go/parser" + "go/token" + "log" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +const ( + testSuffix = "_test.go" +) + +type Parser struct { + // Meant to be passed via New() + path, ignore string + ignoreTests, matchConstant bool + minLength, minOccurrences int + numberMin, numberMax int + excludeTypes map[Type]bool + + supportedTokens []token.Token + + // Internals + strs Strings + consts Constants +} + +// New creates a new instance of the parser. +// This is your entry point if you'd like to use goconst as an API. +func New(path, ignore string, ignoreTests, matchConstant, numbers bool, numberMin, numberMax, minLength, minOccurrences int, excludeTypes map[Type]bool) *Parser { + supportedTokens := []token.Token{token.STRING} + if numbers { + supportedTokens = append(supportedTokens, token.INT, token.FLOAT) + } + + return &Parser{ + path: path, + ignore: ignore, + ignoreTests: ignoreTests, + matchConstant: matchConstant, + minLength: minLength, + minOccurrences: minOccurrences, + numberMin: numberMin, + numberMax: numberMax, + supportedTokens: supportedTokens, + excludeTypes: excludeTypes, + + // Initialize the maps + strs: Strings{}, + consts: Constants{}, + } +} + +// ParseTree will search the given path for occurrences that could be moved into constants. +// If "..." is appended, the search will be recursive. +func (p *Parser) ParseTree() (Strings, Constants, error) { + pathLen := len(p.path) + // Parse recursively the given path if the recursive notation is found + if pathLen >= 5 && p.path[pathLen-3:] == "..." { + filepath.Walk(p.path[:pathLen-3], func(path string, f os.FileInfo, err error) error { + if err != nil { + log.Println(err) + // resume walking + return nil + } + + if f.IsDir() { + p.parseDir(path) + } + return nil + }) + } else { + p.parseDir(p.path) + } + + p.ProcessResults() + + return p.strs, p.consts, nil +} + +// ProcessResults post-processes the raw results. +func (p *Parser) ProcessResults() { + for str, item := range p.strs { + // Filter out items whose occurrences don't match the min value + if len(item) < p.minOccurrences { + delete(p.strs, str) + } + + // If the value is a number + if i, err := strconv.Atoi(str); err == nil { + if p.numberMin != 0 && i < p.numberMin { + delete(p.strs, str) + } + if p.numberMax != 0 && i > p.numberMax { + delete(p.strs, str) + } + } + } +} + +func (p *Parser) parseDir(dir string) error { + fset := token.NewFileSet() + pkgs, err := parser.ParseDir(fset, dir, func(info os.FileInfo) bool { + valid, name := true, info.Name() + + if p.ignoreTests { + if strings.HasSuffix(name, testSuffix) { + valid = false + } + } + + if len(p.ignore) != 0 { + match, err := regexp.MatchString(p.ignore, dir+name) + if err != nil { + log.Fatal(err) + return true + } + if match { + valid = false + } + } + + return valid + }, 0) + if err != nil { + return err + } + + for _, pkg := range pkgs { + for fn, f := range pkg.Files { + ast.Walk(&treeVisitor{ + fileSet: fset, + packageName: pkg.Name, + fileName: fn, + p: p, + }, f) + } + } + + return nil +} + +type Strings map[string][]ExtendedPos +type Constants map[string]ConstType + +type ConstType struct { + token.Position + Name, packageName string +} + +type ExtendedPos struct { + token.Position + packageName string +} + +type Type int + +const ( + Assignment Type = iota + Binary + Case + Return + Call +) diff --git a/vendor/github.com/jgautheron/goconst/visitor.go b/vendor/github.com/jgautheron/goconst/visitor.go new file mode 100644 index 000000000..c0974da8f --- /dev/null +++ b/vendor/github.com/jgautheron/goconst/visitor.go @@ -0,0 +1,160 @@ +package goconst + +import ( + "go/ast" + "go/token" + "strconv" + "strings" +) + +// treeVisitor carries the package name and file name +// for passing it to the imports map, and the fileSet for +// retrieving the token.Position. +type treeVisitor struct { + p *Parser + fileSet *token.FileSet + packageName, fileName string +} + +// Visit browses the AST tree for strings that could be potentially +// replaced by constants. +// A map of existing constants is built as well (-match-constant). +func (v *treeVisitor) Visit(node ast.Node) ast.Visitor { + if node == nil { + return v + } + + // A single case with "ast.BasicLit" would be much easier + // but then we wouldn't be able to tell in which context + // the string is defined (could be a constant definition). + switch t := node.(type) { + // Scan for constants in an attempt to match strings with existing constants + case *ast.GenDecl: + if !v.p.matchConstant { + return v + } + if t.Tok != token.CONST { + return v + } + + for _, spec := range t.Specs { + val := spec.(*ast.ValueSpec) + for i, str := range val.Values { + lit, ok := str.(*ast.BasicLit) + if !ok || !v.isSupported(lit.Kind) { + continue + } + + v.addConst(val.Names[i].Name, lit.Value, val.Names[i].Pos()) + } + } + + // foo := "moo" + case *ast.AssignStmt: + for _, rhs := range t.Rhs { + lit, ok := rhs.(*ast.BasicLit) + if !ok || !v.isSupported(lit.Kind) { + continue + } + + v.addString(lit.Value, rhs.(*ast.BasicLit).Pos(), Assignment) + } + + // if foo == "moo" + case *ast.BinaryExpr: + if t.Op != token.EQL && t.Op != token.NEQ { + return v + } + + var lit *ast.BasicLit + var ok bool + + lit, ok = t.X.(*ast.BasicLit) + if ok && v.isSupported(lit.Kind) { + v.addString(lit.Value, lit.Pos(), Binary) + } + + lit, ok = t.Y.(*ast.BasicLit) + if ok && v.isSupported(lit.Kind) { + v.addString(lit.Value, lit.Pos(), Binary) + } + + // case "foo": + case *ast.CaseClause: + for _, item := range t.List { + lit, ok := item.(*ast.BasicLit) + if ok && v.isSupported(lit.Kind) { + v.addString(lit.Value, lit.Pos(), Case) + } + } + + // return "boo" + case *ast.ReturnStmt: + for _, item := range t.Results { + lit, ok := item.(*ast.BasicLit) + if ok && v.isSupported(lit.Kind) { + v.addString(lit.Value, lit.Pos(), Return) + } + } + + // fn("http://") + case *ast.CallExpr: + for _, item := range t.Args { + lit, ok := item.(*ast.BasicLit) + if ok && v.isSupported(lit.Kind) { + v.addString(lit.Value, lit.Pos(), Call) + } + } + } + + return v +} + +// addString adds a string in the map along with its position in the tree. +func (v *treeVisitor) addString(str string, pos token.Pos, typ Type) { + ok, excluded := v.p.excludeTypes[typ] + if ok && excluded { + return + } + // Drop quotes if any + if strings.HasPrefix(str, `"`) || strings.HasPrefix(str, "`") { + str, _ = strconv.Unquote(str) + } + + // Ignore empty strings + if len(str) == 0 { + return + } + + if len(str) < v.p.minLength { + return + } + + _, ok = v.p.strs[str] + if !ok { + v.p.strs[str] = make([]ExtendedPos, 0) + } + v.p.strs[str] = append(v.p.strs[str], ExtendedPos{ + packageName: v.packageName, + Position: v.fileSet.Position(pos), + }) +} + +// addConst adds a const in the map along with its position in the tree. +func (v *treeVisitor) addConst(name string, val string, pos token.Pos) { + val = strings.Replace(val, `"`, "", 2) + v.p.consts[val] = ConstType{ + Name: name, + packageName: v.packageName, + Position: v.fileSet.Position(pos), + } +} + +func (v *treeVisitor) isSupported(tk token.Token) bool { + for _, s := range v.p.supportedTokens { + if tk == s { + return true + } + } + return false +} diff --git a/vendor/github.com/jingyugao/rowserrcheck/LICENSE b/vendor/github.com/jingyugao/rowserrcheck/LICENSE new file mode 100644 index 000000000..6957f1889 --- /dev/null +++ b/vendor/github.com/jingyugao/rowserrcheck/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Seiji Takahashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/jingyugao/rowserrcheck/passes/rowserr/rowserr.go b/vendor/github.com/jingyugao/rowserrcheck/passes/rowserr/rowserr.go new file mode 100644 index 000000000..ac0177f6e --- /dev/null +++ b/vendor/github.com/jingyugao/rowserrcheck/passes/rowserr/rowserr.go @@ -0,0 +1,331 @@ +package rowserr + +import ( + "go/ast" + "go/types" + "strconv" + + "github.com/gostaticanalysis/analysisutil" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" +) + +func NewAnalyzer(sqlPkgs ...string) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "rowserrcheck", + Doc: Doc, + Run: NewRun(sqlPkgs...), + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + }, + } +} + +const ( + Doc = "rowserrcheck checks whether Rows.Err is checked" + errMethod = "Err" + rowsName = "Rows" +) + +type runner struct { + pass *analysis.Pass + rowsTyp *types.Pointer + rowsObj types.Object + skipFile map[*ast.File]bool + sqlPkgs []string +} + +func NewRun(pkgs ...string) func(pass *analysis.Pass) (interface{}, error) { + return func(pass *analysis.Pass) (interface{}, error) { + sqlPkgs := append(pkgs, "database/sql") + for _, pkg := range sqlPkgs { + r := new(runner) + r.sqlPkgs = sqlPkgs + r.run(pass, pkg) + } + return nil, nil + } +} + +// run executes an analysis for the pass. The receiver is passed +// by value because this func is called in parallel for different passes. +func (r runner) run(pass *analysis.Pass, pkgPath string) { + r.pass = pass + pssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + funcs := pssa.SrcFuncs + + pkg := pssa.Pkg.Prog.ImportedPackage(pkgPath) + if pkg == nil { + // skip + return + } + + rowsType := pkg.Type(rowsName) + if rowsType == nil { + // skip checking + return + } + + r.rowsObj = rowsType.Object() + if r.rowsObj == nil { + // skip checking + return + } + + resNamed, ok := r.rowsObj.Type().(*types.Named) + if !ok { + return + } + + r.rowsTyp = types.NewPointer(resNamed) + r.skipFile = map[*ast.File]bool{} + + for _, f := range funcs { + if r.noImportedDBSQL(f) { + // skip this + continue + } + + // skip if the function is just referenced + var isRefFunc bool + + for i := 0; i < f.Signature.Results().Len(); i++ { + if types.Identical(f.Signature.Results().At(i).Type(), r.rowsTyp) { + isRefFunc = true + } + } + + if isRefFunc { + continue + } + + for _, b := range f.Blocks { + for i := range b.Instrs { + if r.errCallMissing(b, i) { + pass.Reportf(b.Instrs[i].Pos(), "rows.Err must be checked") + } + } + } + } +} + +func (r *runner) errCallMissing(b *ssa.BasicBlock, i int) (ret bool) { + call, ok := r.getCallReturnsRow(b.Instrs[i]) + if !ok { + return false + } + + for _, cRef := range *call.Referrers() { + val, ok := r.getRowsVal(cRef) + if !ok { + continue + } + if len(*val.Referrers()) == 0 { + continue + } + resRefs := *val.Referrers() + var errCalled func(resRef ssa.Instruction) bool + errCalled = func(resRef ssa.Instruction) bool { + switch resRef := resRef.(type) { + case *ssa.Phi: + for _, rf := range *resRef.Referrers() { + if errCalled(rf) { + return true + } + } + case *ssa.Store: // Call in Closure function + for _, aref := range *resRef.Addr.Referrers() { + switch c := aref.(type) { + case *ssa.MakeClosure: + f := c.Fn.(*ssa.Function) + if r.noImportedDBSQL(f) { + // skip this + continue + } + called := r.isClosureCalled(c) + if r.calledInFunc(f, called) { + return true + } + case *ssa.UnOp: + for _, rf := range *c.Referrers() { + if errCalled(rf) { + return true + } + } + } + } + case *ssa.Call: // Indirect function call + if r.isErrCall(resRef) { + return true + } + if f, ok := resRef.Call.Value.(*ssa.Function); ok { + for _, b := range f.Blocks { + for i := range b.Instrs { + if !r.errCallMissing(b, i) { + return true + } + } + } + } + case *ssa.FieldAddr: + for _, bRef := range *resRef.Referrers() { + bOp, ok := r.getBodyOp(bRef) + if !ok { + continue + } + + for _, ccall := range *bOp.Referrers() { + if r.isErrCall(ccall) { + return true + } + } + } + } + + return false + } + + for _, resRef := range resRefs { + if errCalled(resRef) { + return false + } + } + } + + return true +} + +func (r *runner) getCallReturnsRow(instr ssa.Instruction) (*ssa.Call, bool) { + call, ok := instr.(*ssa.Call) + if !ok { + return nil, false + } + + res := call.Call.Signature().Results() + flag := false + + for i := 0; i < res.Len(); i++ { + flag = flag || types.Identical(res.At(i).Type(), r.rowsTyp) + } + + if !flag { + return nil, false + } + + return call, true +} + +func (r *runner) getRowsVal(instr ssa.Instruction) (ssa.Value, bool) { + switch instr := instr.(type) { + case *ssa.Call: + if len(instr.Call.Args) == 1 && types.Identical(instr.Call.Args[0].Type(), r.rowsTyp) { + return instr.Call.Args[0], true + } + case ssa.Value: + if types.Identical(instr.Type(), r.rowsTyp) { + return instr, true + } + default: + } + + return nil, false +} + +func (r *runner) getBodyOp(instr ssa.Instruction) (*ssa.UnOp, bool) { + op, ok := instr.(*ssa.UnOp) + if !ok { + return nil, false + } + // fix: try to check type + // if op.Type() != r.rowsObj.Type() { + // return nil, false + // } + return op, true +} + +func (r *runner) isErrCall(ccall ssa.Instruction) bool { + switch ccall := ccall.(type) { + case *ssa.Defer: + if ccall.Call.Value != nil && ccall.Call.Value.Name() == errMethod { + return true + } + case *ssa.Call: + if ccall.Call.Value != nil && ccall.Call.Value.Name() == errMethod { + return true + } + } + + return false +} + +func (r *runner) isClosureCalled(c *ssa.MakeClosure) bool { + for _, ref := range *c.Referrers() { + switch ref.(type) { + case *ssa.Call, *ssa.Defer: + return true + } + } + + return false +} + +func (r *runner) noImportedDBSQL(f *ssa.Function) (ret bool) { + obj := f.Object() + if obj == nil { + return false + } + + file := analysisutil.File(r.pass, obj.Pos()) + if file == nil { + return false + } + + if skip, has := r.skipFile[file]; has { + return skip + } + defer func() { + r.skipFile[file] = ret + }() + + for _, impt := range file.Imports { + path, err := strconv.Unquote(impt.Path.Value) + if err != nil { + continue + } + path = analysisutil.RemoveVendor(path) + for _, pkg := range r.sqlPkgs { + if pkg == path { + return false + } + } + } + + return true +} + +func (r *runner) calledInFunc(f *ssa.Function, called bool) bool { + for _, b := range f.Blocks { + for i, instr := range b.Instrs { + switch instr := instr.(type) { + case *ssa.UnOp: + for _, ref := range *instr.Referrers() { + if v, ok := ref.(ssa.Value); ok { + if vCall, ok := v.(*ssa.Call); ok { + if vCall.Call.Value != nil && vCall.Call.Value.Name() == errMethod { + if called { + return true + } + } + } + } + } + default: + if r.errCallMissing(b, i) || !called { + return false + } + } + } + } + return false +} diff --git a/vendor/github.com/jirfag/go-printf-func-name/LICENSE b/vendor/github.com/jirfag/go-printf-func-name/LICENSE new file mode 100644 index 000000000..d06a809c2 --- /dev/null +++ b/vendor/github.com/jirfag/go-printf-func-name/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Isaev Denis + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/jirfag/go-printf-func-name/pkg/analyzer/analyzer.go b/vendor/github.com/jirfag/go-printf-func-name/pkg/analyzer/analyzer.go new file mode 100644 index 000000000..7937dd433 --- /dev/null +++ b/vendor/github.com/jirfag/go-printf-func-name/pkg/analyzer/analyzer.go @@ -0,0 +1,74 @@ +package analyzer + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "golang.org/x/tools/go/analysis" +) + +var Analyzer = &analysis.Analyzer{ + Name: "goprintffuncname", + Doc: "Checks that printf-like functions are named with `f` at the end.", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + funcDecl := node.(*ast.FuncDecl) + + if res := funcDecl.Type.Results; res != nil && len(res.List) != 0 { + return + } + + params := funcDecl.Type.Params.List + if len(params) < 2 { // [0] must be format (string), [1] must be args (...interface{}) + return + } + + formatParamType, ok := params[len(params)-2].Type.(*ast.Ident) + if !ok { // first param type isn't identificator so it can't be of type "string" + return + } + + if formatParamType.Name != "string" { // first param (format) type is not string + return + } + + if formatParamNames := params[len(params)-2].Names; len(formatParamNames) == 0 || formatParamNames[len(formatParamNames)-1].Name != "format" { + return + } + + argsParamType, ok := params[len(params)-1].Type.(*ast.Ellipsis) + if !ok { // args are not ellipsis (...args) + return + } + + elementType, ok := argsParamType.Elt.(*ast.InterfaceType) + if !ok { // args are not of interface type, but we need interface{} + return + } + + if elementType.Methods != nil && len(elementType.Methods.List) != 0 { + return // has >= 1 method in interface, but we need an empty interface "interface{}" + } + + if strings.HasSuffix(funcDecl.Name.Name, "f") { + return + } + + pass.Reportf(node.Pos(), "printf-like formatting function '%s' should be named '%sf'", + funcDecl.Name.Name, funcDecl.Name.Name) + }) + + return nil, nil +} diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum index d778b5a14..be00a6df9 100644 --- a/vendor/github.com/json-iterator/go/go.sum +++ b/vendor/github.com/json-iterator/go/go.sum @@ -9,6 +9,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLD github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go index b9754638e..8a3d8b6fb 100644 --- a/vendor/github.com/json-iterator/go/iter_float.go +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -288,6 +288,9 @@ non_decimal_loop: return iter.readFloat64SlowPath() } value = (value << 3) + (value << 1) + uint64(ind) + if value > maxFloat64 { + return iter.readFloat64SlowPath() + } } } return iter.readFloat64SlowPath() diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go index 214232035..d786a89fe 100644 --- a/vendor/github.com/json-iterator/go/iter_int.go +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -9,6 +9,7 @@ var intDigits []int8 const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 +const maxFloat64 = 1<<53 - 1 func init() { intDigits = make([]int8, 256) @@ -339,7 +340,7 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) { } func (iter *Iterator) assertInteger() { - if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + if iter.head < iter.tail && iter.buf[iter.head] == '.' { iter.ReportError("assertInteger", "can not decode float as int") } } diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go index 74974ba74..39acb320a 100644 --- a/vendor/github.com/json-iterator/go/reflect.go +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -65,7 +65,7 @@ func (iter *Iterator) ReadVal(obj interface{}) { decoder := iter.cfg.getDecoderFromCache(cacheKey) if decoder == nil { typ := reflect2.TypeOf(obj) - if typ.Kind() != reflect.Ptr { + if typ == nil || typ.Kind() != reflect.Ptr { iter.ReportError("ReadVal", "can only unmarshal into pointer") return } diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go index f2619936c..eba434f2f 100644 --- a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -33,11 +33,19 @@ type jsonRawMessageCodec struct { } func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) + if iter.ReadNil() { + *((*json.RawMessage)(ptr)) = nil + } else { + *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } } func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) + if *((*json.RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) + } } func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { @@ -48,11 +56,19 @@ type jsoniterRawMessageCodec struct { } func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) + if iter.ReadNil() { + *((*RawMessage)(ptr)) = nil + } else { + *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes() + } } func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) + if *((*RawMessage)(ptr)) == nil { + stream.WriteNil() + } else { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) + } } func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index d7eb0eb5c..92ae912dc 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -1075,6 +1075,11 @@ type stringModeNumberDecoder struct { } func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NilValue { + decoder.elemDecoder.Decode(ptr, iter) + return + } + c := iter.nextToken() if c != '"' { iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) diff --git a/vendor/github.com/julz/importas/.gitignore b/vendor/github.com/julz/importas/.gitignore new file mode 100644 index 000000000..c264e642f --- /dev/null +++ b/vendor/github.com/julz/importas/.gitignore @@ -0,0 +1,2 @@ +.idea/ +importas diff --git a/vendor/github.com/julz/importas/LICENSE b/vendor/github.com/julz/importas/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/julz/importas/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/julz/importas/README.md b/vendor/github.com/julz/importas/README.md new file mode 100644 index 000000000..9489fe7d8 --- /dev/null +++ b/vendor/github.com/julz/importas/README.md @@ -0,0 +1,47 @@ +A linter to enforce importing certain packages consistently. + +## What is this for? + +Ideally, go imports should avoid aliasing. Sometimes though, especially with +Kubernetes API code, it becomes unavoidable, because many packages are imported +as e.g. "[package]/v1alpha1" and you end up with lots of collisions if you use +"v1alpha1". + +This linter lets you enforce that whenever (for example) +"pkg/apis/serving/v1alpha1" is aliased, it is aliased as "servingv1alpha1". + +## Usage + +~~~~ +importas \ + -alias knative.dev/serving/pkg/apis/autoscaling/v1alpha1:autoscalingv1alpha1 \ + -alias knative.dev/serving/pkg/apis/serving/v1:servingv1 \ + ./... +~~~~ + +### `-no-unaliased` option + +By default, importas allows non-aliased imports, even when the package is specified by `-alias` flag. +With `-no-unaliased` option, importas does not allow this. + +~~~~ +importas -no-unaliased \ + -alias knative.dev/serving/pkg/apis/autoscaling/v1alpha1:autoscalingv1alpha1 \ + -alias knative.dev/serving/pkg/apis/serving/v1:servingv1 \ + ./... +~~~~ + +### Use regular expression + +You can specify the package path by regular expression, and alias by regular expression replacement syntax like following snippet. + +~~~~ +importas -alias 'knative.dev/serving/pkg/apis/(\w+)/(v[\w\d]+):$1$2' +~~~~ + +`$1` represents the text of the first submatch. See [detail](https://golang.org/pkg/regexp/#Regexp.Expand). + +So it will enforce that + +"knative.dev/serving/pkg/apis/autoscaling/v1alpha1" is aliased by "autoscalingv1alpha1", and +"knative.dev/serving/pkg/apis/serving/v1" is aliased by "servingv1" diff --git a/vendor/github.com/julz/importas/analyzer.go b/vendor/github.com/julz/importas/analyzer.go new file mode 100644 index 000000000..4fbe104e5 --- /dev/null +++ b/vendor/github.com/julz/importas/analyzer.go @@ -0,0 +1,116 @@ +package importas + +import ( + "fmt" + "go/ast" + "go/types" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var config = &Config{ + RequiredAlias: make(map[string]string), +} + +var Analyzer = &analysis.Analyzer{ + Name: "importas", + Doc: "Enforces consistent import aliases", + Run: run, + + Flags: flags(config), + + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + return runWithConfig(config, pass) +} + +func runWithConfig(config *Config, pass *analysis.Pass) (interface{}, error) { + if err := config.CompileRegexp(); err != nil { + return nil, err + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + inspect.Preorder([]ast.Node{(*ast.ImportSpec)(nil)}, func(n ast.Node) { + visitImportSpecNode(config, n.(*ast.ImportSpec), pass) + }) + + return nil, nil +} + +func visitImportSpecNode(config *Config, node *ast.ImportSpec, pass *analysis.Pass) { + if !config.DisallowUnaliased && node.Name == nil { + return + } + + alias := "" + if node.Name != nil { + alias = node.Name.String() + } + + if alias == "." { + return // Dot aliases are generally used in tests, so ignore. + } + + if strings.HasPrefix(alias, "_") { + return // Used by go test and for auto-includes, not a conflict. + } + + path, err := strconv.Unquote(node.Path.Value) + if err != nil { + pass.Reportf(node.Pos(), "import not quoted") + } + + if required, exists := config.AliasFor(path); exists && required != alias { + message := fmt.Sprintf("import %q imported as %q but must be %q according to config", path, alias, required) + if alias == "" { + message = fmt.Sprintf("import %q imported without alias but must be with alias %q according to config", path, required) + } + + pass.Report(analysis.Diagnostic{ + Pos: node.Pos(), + End: node.End(), + Message: message, + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Use correct alias", + TextEdits: findEdits(node, pass.TypesInfo.Uses, path, alias, required), + }}, + }) + } +} + +func findEdits(node ast.Node, uses map[*ast.Ident]types.Object, importPath, original, required string) []analysis.TextEdit { + // Edit the actual import line. + result := []analysis.TextEdit{{ + Pos: node.Pos(), + End: node.End(), + NewText: []byte(required + " " + strconv.Quote(importPath)), + }} + + // Edit all the uses of the alias in the code. + for use, pkg := range uses { + pkgName, ok := pkg.(*types.PkgName) + if !ok { + // skip identifiers that aren't pointing at a PkgName. + continue + } + + if pkgName.Pos() != node.Pos() { + // skip identifiers pointing to a different import statement. + continue + } + + result = append(result, analysis.TextEdit{ + Pos: use.Pos(), + End: use.End(), + NewText: []byte(required), + }) + } + + return result +} diff --git a/vendor/github.com/julz/importas/config.go b/vendor/github.com/julz/importas/config.go new file mode 100644 index 000000000..2e1c1d887 --- /dev/null +++ b/vendor/github.com/julz/importas/config.go @@ -0,0 +1,69 @@ +package importas + +import ( + "errors" + "fmt" + "regexp" +) + +type Config struct { + RequiredAlias map[string]string + Rules []*Rule + DisallowUnaliased bool +} + +func (c *Config) CompileRegexp() error { + rules := make([]*Rule, 0, len(c.RequiredAlias)) + for path, alias := range c.RequiredAlias { + reg, err := regexp.Compile(fmt.Sprintf("^%s$", path)) + if err != nil { + return err + } + + rules = append(rules, &Rule{ + Regexp: reg, + Alias: alias, + }) + } + + c.Rules = rules + return nil +} + +func (c *Config) findRule(path string) *Rule { + for _, rule := range c.Rules { + if rule.Regexp.MatchString(path) { + return rule + } + } + + return nil +} + +func (c *Config) AliasFor(path string) (string, bool) { + rule := c.findRule(path) + if rule == nil { + return "", false + } + + alias, err := rule.aliasFor(path) + if err != nil { + return "", false + } + + return alias, true +} + +type Rule struct { + Alias string + Regexp *regexp.Regexp +} + +func (r *Rule) aliasFor(path string) (string, error) { + str := r.Regexp.FindString(path) + if len(str) > 0 { + return r.Regexp.ReplaceAllString(str, r.Alias), nil + } + + return "", errors.New("mismatch rule") +} diff --git a/vendor/github.com/julz/importas/flags.go b/vendor/github.com/julz/importas/flags.go new file mode 100644 index 000000000..22be4af3e --- /dev/null +++ b/vendor/github.com/julz/importas/flags.go @@ -0,0 +1,31 @@ +package importas + +import ( + "errors" + "flag" + "fmt" + "strings" +) + +func flags(config *Config) flag.FlagSet { + fs := flag.FlagSet{} + fs.Var(stringMap(config.RequiredAlias), "alias", "required import alias in form path:alias") + fs.BoolVar(&config.DisallowUnaliased, "no-unaliased", false, "do not allow unaliased imports of aliased packages") + return fs +} + +type stringMap map[string]string + +func (v stringMap) Set(val string) error { + spl := strings.SplitN(val, ":", 2) + if len(spl) != 2 { + return errors.New("import flag must be of form path:alias") + } + + v[spl[0]] = spl[1] + return nil +} + +func (v stringMap) String() string { + return fmt.Sprintf("%v", (map[string]string)(v)) +} diff --git a/vendor/github.com/julz/importas/go.mod b/vendor/github.com/julz/importas/go.mod new file mode 100644 index 000000000..97d8c438b --- /dev/null +++ b/vendor/github.com/julz/importas/go.mod @@ -0,0 +1,5 @@ +module github.com/julz/importas + +go 1.15 + +require golang.org/x/tools v0.1.0 diff --git a/vendor/github.com/julz/importas/go.sum b/vendor/github.com/julz/importas/go.sum new file mode 100644 index 000000000..21d696a65 --- /dev/null +++ b/vendor/github.com/julz/importas/go.sum @@ -0,0 +1,26 @@ +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/kisielk/errcheck/LICENSE b/vendor/github.com/kisielk/errcheck/LICENSE new file mode 100644 index 000000000..a2b16b5bd --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Kamil Kisiel + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kisielk/errcheck/errcheck/embedded_walker.go b/vendor/github.com/kisielk/errcheck/errcheck/embedded_walker.go new file mode 100644 index 000000000..3b3192580 --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/errcheck/embedded_walker.go @@ -0,0 +1,144 @@ +package errcheck + +import ( + "fmt" + "go/types" +) + +// walkThroughEmbeddedInterfaces returns a slice of Interfaces that +// we need to walk through in order to reach the actual definition, +// in an Interface, of the method selected by the given selection. +// +// false will be returned in the second return value if: +// - the right side of the selection is not a function +// - the actual definition of the function is not in an Interface +// +// The returned slice will contain all the interface types that need +// to be walked through to reach the actual definition. +// +// For example, say we have: +// +// type Inner interface {Method()} +// type Middle interface {Inner} +// type Outer interface {Middle} +// type T struct {Outer} +// type U struct {T} +// type V struct {U} +// +// And then the selector: +// +// V.Method +// +// We'll return [Outer, Middle, Inner] by first walking through the embedded structs +// until we reach the Outer interface, then descending through the embedded interfaces +// until we find the one that actually explicitly defines Method. +func walkThroughEmbeddedInterfaces(sel *types.Selection) ([]types.Type, bool) { + fn, ok := sel.Obj().(*types.Func) + if !ok { + return nil, false + } + + // Start off at the receiver. + currentT := sel.Recv() + + // First, we can walk through any Struct fields provided + // by the selection Index() method. We ignore the last + // index because it would give the method itself. + indexes := sel.Index() + for _, fieldIndex := range indexes[:len(indexes)-1] { + currentT = getTypeAtFieldIndex(currentT, fieldIndex) + } + + // Now currentT is either a type implementing the actual function, + // an Invalid type (if the receiver is a package), or an interface. + // + // If it's not an Interface, then we're done, as this function + // only cares about Interface-defined functions. + // + // If it is an Interface, we potentially need to continue digging until + // we find the Interface that actually explicitly defines the function. + interfaceT, ok := maybeUnname(currentT).(*types.Interface) + if !ok { + return nil, false + } + + // The first interface we pass through is this one we've found. We return the possibly + // wrapping types.Named because it is more useful to work with for callers. + result := []types.Type{currentT} + + // If this interface itself explicitly defines the given method + // then we're done digging. + for !explicitlyDefinesMethod(interfaceT, fn) { + // Otherwise, we find which of the embedded interfaces _does_ + // define the method, add it to our list, and loop. + namedInterfaceT, ok := getEmbeddedInterfaceDefiningMethod(interfaceT, fn) + if !ok { + // This should be impossible as long as we type-checked: either the + // interface or one of its embedded ones must implement the method... + panic(fmt.Sprintf("either %v or one of its embedded interfaces must implement %v", currentT, fn)) + } + result = append(result, namedInterfaceT) + interfaceT = namedInterfaceT.Underlying().(*types.Interface) + } + + return result, true +} + +func getTypeAtFieldIndex(startingAt types.Type, fieldIndex int) types.Type { + t := maybeUnname(maybeDereference(startingAt)) + s, ok := t.(*types.Struct) + if !ok { + panic(fmt.Sprintf("cannot get Field of a type that is not a struct, got a %T", t)) + } + + return s.Field(fieldIndex).Type() +} + +// getEmbeddedInterfaceDefiningMethod searches through any embedded interfaces of the +// passed interface searching for one that defines the given function. If found, the +// types.Named wrapping that interface will be returned along with true in the second value. +// +// If no such embedded interface is found, nil and false are returned. +func getEmbeddedInterfaceDefiningMethod(interfaceT *types.Interface, fn *types.Func) (*types.Named, bool) { + for i := 0; i < interfaceT.NumEmbeddeds(); i++ { + embedded := interfaceT.Embedded(i) + if definesMethod(embedded.Underlying().(*types.Interface), fn) { + return embedded, true + } + } + return nil, false +} + +func explicitlyDefinesMethod(interfaceT *types.Interface, fn *types.Func) bool { + for i := 0; i < interfaceT.NumExplicitMethods(); i++ { + if interfaceT.ExplicitMethod(i) == fn { + return true + } + } + return false +} + +func definesMethod(interfaceT *types.Interface, fn *types.Func) bool { + for i := 0; i < interfaceT.NumMethods(); i++ { + if interfaceT.Method(i) == fn { + return true + } + } + return false +} + +func maybeDereference(t types.Type) types.Type { + p, ok := t.(*types.Pointer) + if ok { + return p.Elem() + } + return t +} + +func maybeUnname(t types.Type) types.Type { + n, ok := t.(*types.Named) + if ok { + return n.Underlying() + } + return t +} diff --git a/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go b/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go new file mode 100644 index 000000000..724e3e88f --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go @@ -0,0 +1,676 @@ +// Package errcheck is the library used to implement the errcheck command-line tool. +package errcheck + +import ( + "bufio" + "errors" + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/tools/go/packages" +) + +var errorType *types.Interface + +func init() { + errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) +} + +var ( + // ErrNoGoFiles is returned when CheckPackage is run on a package with no Go source files + ErrNoGoFiles = errors.New("package contains no go source files") + + // DefaultExcludedSymbols is a list of symbol names that are usually excluded from checks by default. + // + // Note, that they still need to be explicitly copied to Checker.Exclusions.Symbols + DefaultExcludedSymbols = []string{ + // bytes + "(*bytes.Buffer).Write", + "(*bytes.Buffer).WriteByte", + "(*bytes.Buffer).WriteRune", + "(*bytes.Buffer).WriteString", + + // fmt + "fmt.Errorf", + "fmt.Print", + "fmt.Printf", + "fmt.Println", + "fmt.Fprint(*bytes.Buffer)", + "fmt.Fprintf(*bytes.Buffer)", + "fmt.Fprintln(*bytes.Buffer)", + "fmt.Fprint(*strings.Builder)", + "fmt.Fprintf(*strings.Builder)", + "fmt.Fprintln(*strings.Builder)", + "fmt.Fprint(os.Stderr)", + "fmt.Fprintf(os.Stderr)", + "fmt.Fprintln(os.Stderr)", + + // math/rand + "math/rand.Read", + "(*math/rand.Rand).Read", + + // strings + "(*strings.Builder).Write", + "(*strings.Builder).WriteByte", + "(*strings.Builder).WriteRune", + "(*strings.Builder).WriteString", + + // hash + "(hash.Hash).Write", + } +) + +// UncheckedError indicates the position of an unchecked error return. +type UncheckedError struct { + Pos token.Position + Line string + FuncName string + SelectorName string +} + +// Result is returned from the CheckPackage function, and holds all the errors +// that were found to be unchecked in a package. +// +// Aggregation can be done using the Append method for users that want to +// combine results from multiple packages. +type Result struct { + // UncheckedErrors is a list of all the unchecked errors in the package. + // Printing an error reports its position within the file and the contents of the line. + UncheckedErrors []UncheckedError +} + +type byName []UncheckedError + +// Less reports whether the element with index i should sort before the element with index j. +func (b byName) Less(i, j int) bool { + ei, ej := b[i], b[j] + + pi, pj := ei.Pos, ej.Pos + + if pi.Filename != pj.Filename { + return pi.Filename < pj.Filename + } + if pi.Line != pj.Line { + return pi.Line < pj.Line + } + if pi.Column != pj.Column { + return pi.Column < pj.Column + } + + return ei.Line < ej.Line +} + +func (b byName) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b byName) Len() int { + return len(b) +} + +// Append appends errors to e. Append does not do any duplicate checking. +func (r *Result) Append(other Result) { + r.UncheckedErrors = append(r.UncheckedErrors, other.UncheckedErrors...) +} + +// Returns the unique errors that have been accumulated. Duplicates may occur +// when a file containing an unchecked error belongs to > 1 package. +// +// The method receiver remains unmodified after the call to Unique. +func (r Result) Unique() Result { + result := make([]UncheckedError, len(r.UncheckedErrors)) + copy(result, r.UncheckedErrors) + sort.Sort((byName)(result)) + uniq := result[:0] // compact in-place + for i, err := range result { + if i == 0 || err != result[i-1] { + uniq = append(uniq, err) + } + } + return Result{UncheckedErrors: uniq} +} + +// Exclusions define symbols and language elements that will be not checked +type Exclusions struct { + + // Packages lists paths of excluded packages. + Packages []string + + // SymbolRegexpsByPackage maps individual package paths to regular + // expressions that match symbols to be excluded. + // + // Packages whose paths appear both here and in Packages list will + // be excluded entirely. + // + // This is a legacy input that will be deprecated in errcheck version 2 and + // should not be used. + SymbolRegexpsByPackage map[string]*regexp.Regexp + + // Symbols lists patterns that exclude individual package symbols. + // + // For example: + // + // "fmt.Errorf" // function + // "fmt.Fprintf(os.Stderr)" // function with set argument value + // "(hash.Hash).Write" // method + // + Symbols []string + + // TestFiles excludes _test.go files. + TestFiles bool + + // GeneratedFiles excludes generated source files. + // + // Source file is assumed to be generated if its contents + // match the following regular expression: + // + // ^// Code generated .* DO NOT EDIT\\.$ + // + GeneratedFiles bool + + // BlankAssignments ignores assignments to blank identifier. + BlankAssignments bool + + // TypeAssertions ignores unchecked type assertions. + TypeAssertions bool +} + +// Checker checks that you checked errors. +type Checker struct { + // Exclusions defines code packages, symbols, and other elements that will not be checked. + Exclusions Exclusions + + // Tags are a list of build tags to use. + Tags []string + + // The mod flag for go build. + Mod string +} + +// loadPackages is used for testing. +var loadPackages = func(cfg *packages.Config, paths ...string) ([]*packages.Package, error) { + return packages.Load(cfg, paths...) +} + +// LoadPackages loads all the packages in all the paths provided. It uses the +// exclusions and build tags provided to by the user when loading the packages. +func (c *Checker) LoadPackages(paths ...string) ([]*packages.Package, error) { + buildFlags := []string{fmtTags(c.Tags)} + if c.Mod != "" { + buildFlags = append(buildFlags, fmt.Sprintf("-mod=%s", c.Mod)) + } + cfg := &packages.Config{ + Mode: packages.LoadAllSyntax, + Tests: !c.Exclusions.TestFiles, + BuildFlags: buildFlags, + } + return loadPackages(cfg, paths...) +} + +var generatedCodeRegexp = regexp.MustCompile("^// Code generated .* DO NOT EDIT\\.$") +var dotStar = regexp.MustCompile(".*") + +func (c *Checker) shouldSkipFile(file *ast.File) bool { + if !c.Exclusions.GeneratedFiles { + return false + } + + for _, cg := range file.Comments { + for _, comment := range cg.List { + if generatedCodeRegexp.MatchString(comment.Text) { + return true + } + } + } + + return false +} + +// CheckPackage checks packages for errors that have not been checked. +// +// It will exclude specific errors from analysis if the user has configured +// exclusions. +func (c *Checker) CheckPackage(pkg *packages.Package) Result { + excludedSymbols := map[string]bool{} + for _, sym := range c.Exclusions.Symbols { + excludedSymbols[sym] = true + } + + ignore := map[string]*regexp.Regexp{} + // Apply SymbolRegexpsByPackage first so that if the same path appears in + // Packages, a more narrow regexp will be superceded by dotStar below. + if regexps := c.Exclusions.SymbolRegexpsByPackage; regexps != nil { + for pkg, re := range regexps { + // TODO warn if previous entry overwritten? + ignore[nonVendoredPkgPath(pkg)] = re + } + } + for _, pkg := range c.Exclusions.Packages { + // TODO warn if previous entry overwritten? + ignore[nonVendoredPkgPath(pkg)] = dotStar + } + + v := &visitor{ + pkg: pkg, + ignore: ignore, + blank: !c.Exclusions.BlankAssignments, + asserts: !c.Exclusions.TypeAssertions, + lines: make(map[string][]string), + exclude: excludedSymbols, + errors: []UncheckedError{}, + } + + for _, astFile := range v.pkg.Syntax { + if c.shouldSkipFile(astFile) { + continue + } + ast.Walk(v, astFile) + } + return Result{UncheckedErrors: v.errors} +} + +// visitor implements the errcheck algorithm +type visitor struct { + pkg *packages.Package + ignore map[string]*regexp.Regexp + blank bool + asserts bool + lines map[string][]string + exclude map[string]bool + + errors []UncheckedError +} + +// selectorAndFunc tries to get the selector and function from call expression. +// For example, given the call expression representing "a.b()", the selector +// is "a.b" and the function is "b" itself. +// +// The final return value will be true if it is able to do extract a selector +// from the call and look up the function object it refers to. +// +// If the call does not include a selector (like if it is a plain "f()" function call) +// then the final return value will be false. +func (v *visitor) selectorAndFunc(call *ast.CallExpr) (*ast.SelectorExpr, *types.Func, bool) { + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return nil, nil, false + } + + fn, ok := v.pkg.TypesInfo.ObjectOf(sel.Sel).(*types.Func) + if !ok { + // Shouldn't happen, but be paranoid + return nil, nil, false + } + + return sel, fn, true + +} + +// fullName will return a package / receiver-type qualified name for a called function +// if the function is the result of a selector. Otherwise it will return +// the empty string. +// +// The name is fully qualified by the import path, possible type, +// function/method name and pointer receiver. +// +// For example, +// - for "fmt.Printf(...)" it will return "fmt.Printf" +// - for "base64.StdEncoding.Decode(...)" it will return "(*encoding/base64.Encoding).Decode" +// - for "myFunc()" it will return "" +func (v *visitor) fullName(call *ast.CallExpr) string { + _, fn, ok := v.selectorAndFunc(call) + if !ok { + return "" + } + + // TODO(dh): vendored packages will have /vendor/ in their name, + // thus not matching vendored standard library packages. If we + // want to support vendored stdlib packages, we need to implement + // FullName with our own logic. + return fn.FullName() +} + +func getSelectorName(sel *ast.SelectorExpr) string { + if ident, ok := sel.X.(*ast.Ident); ok { + return fmt.Sprintf("%s.%s", ident.Name, sel.Sel.Name) + } + if s, ok := sel.X.(*ast.SelectorExpr); ok { + return fmt.Sprintf("%s.%s", getSelectorName(s), sel.Sel.Name) + } + + return "" +} + +// selectorName will return a name for a called function +// if the function is the result of a selector. Otherwise it will return +// the empty string. +// +// The name is fully qualified by the import path, possible type, +// function/method name and pointer receiver. +// +// For example, +// - for "fmt.Printf(...)" it will return "fmt.Printf" +// - for "base64.StdEncoding.Decode(...)" it will return "base64.StdEncoding.Decode" +// - for "myFunc()" it will return "" +func (v *visitor) selectorName(call *ast.CallExpr) string { + sel, _, ok := v.selectorAndFunc(call) + if !ok { + return "" + } + + return getSelectorName(sel) +} + +// namesForExcludeCheck will return a list of fully-qualified function names +// from a function call that can be used to check against the exclusion list. +// +// If a function call is against a local function (like "myFunc()") then no +// names are returned. If the function is package-qualified (like "fmt.Printf()") +// then just that function's fullName is returned. +// +// Otherwise, we walk through all the potentially embeddded interfaces of the receiver +// the collect a list of type-qualified function names that we will check. +func (v *visitor) namesForExcludeCheck(call *ast.CallExpr) []string { + sel, fn, ok := v.selectorAndFunc(call) + if !ok { + return nil + } + + name := v.fullName(call) + if name == "" { + return nil + } + + // This will be missing for functions without a receiver (like fmt.Printf), + // so just fall back to the the function's fullName in that case. + selection, ok := v.pkg.TypesInfo.Selections[sel] + if !ok { + return []string{name} + } + + // This will return with ok false if the function isn't defined + // on an interface, so just fall back to the fullName. + ts, ok := walkThroughEmbeddedInterfaces(selection) + if !ok { + return []string{name} + } + + result := make([]string, len(ts)) + for i, t := range ts { + // Like in fullName, vendored packages will have /vendor/ in their name, + // thus not matching vendored standard library packages. If we + // want to support vendored stdlib packages, we need to implement + // additional logic here. + result[i] = fmt.Sprintf("(%s).%s", t.String(), fn.Name()) + } + return result +} + +// isBufferType checks if the expression type is a known in-memory buffer type. +func (v *visitor) argName(expr ast.Expr) string { + // Special-case literal "os.Stdout" and "os.Stderr" + if sel, ok := expr.(*ast.SelectorExpr); ok { + if obj := v.pkg.TypesInfo.ObjectOf(sel.Sel); obj != nil { + vr, ok := obj.(*types.Var) + if ok && vr.Pkg() != nil && vr.Pkg().Name() == "os" && (vr.Name() == "Stderr" || vr.Name() == "Stdout") { + return "os." + vr.Name() + } + } + } + t := v.pkg.TypesInfo.TypeOf(expr) + if t == nil { + return "" + } + return t.String() +} + +func (v *visitor) excludeCall(call *ast.CallExpr) bool { + var arg0 string + if len(call.Args) > 0 { + arg0 = v.argName(call.Args[0]) + } + for _, name := range v.namesForExcludeCheck(call) { + if v.exclude[name] { + return true + } + if arg0 != "" && v.exclude[name+"("+arg0+")"] { + return true + } + } + return false +} + +func (v *visitor) ignoreCall(call *ast.CallExpr) bool { + if v.excludeCall(call) { + return true + } + + // Try to get an identifier. + // Currently only supports simple expressions: + // 1. f() + // 2. x.y.f() + var id *ast.Ident + switch exp := call.Fun.(type) { + case (*ast.Ident): + id = exp + case (*ast.SelectorExpr): + id = exp.Sel + default: + // eg: *ast.SliceExpr, *ast.IndexExpr + } + + if id == nil { + return false + } + + // If we got an identifier for the function, see if it is ignored + if re, ok := v.ignore[""]; ok && re.MatchString(id.Name) { + return true + } + + if obj := v.pkg.TypesInfo.Uses[id]; obj != nil { + if pkg := obj.Pkg(); pkg != nil { + if re, ok := v.ignore[nonVendoredPkgPath(pkg.Path())]; ok { + return re.MatchString(id.Name) + } + } + } + + return false +} + +// nonVendoredPkgPath returns the unvendored version of the provided package +// path (or returns the provided path if it does not represent a vendored +// path). +func nonVendoredPkgPath(pkgPath string) string { + lastVendorIndex := strings.LastIndex(pkgPath, "/vendor/") + if lastVendorIndex == -1 { + return pkgPath + } + return pkgPath[lastVendorIndex+len("/vendor/"):] +} + +// errorsByArg returns a slice s such that +// len(s) == number of return types of call +// s[i] == true iff return type at position i from left is an error type +func (v *visitor) errorsByArg(call *ast.CallExpr) []bool { + switch t := v.pkg.TypesInfo.Types[call].Type.(type) { + case *types.Named: + // Single return + return []bool{isErrorType(t)} + case *types.Pointer: + // Single return via pointer + return []bool{isErrorType(t)} + case *types.Tuple: + // Multiple returns + s := make([]bool, t.Len()) + for i := 0; i < t.Len(); i++ { + switch et := t.At(i).Type().(type) { + case *types.Named: + // Single return + s[i] = isErrorType(et) + case *types.Pointer: + // Single return via pointer + s[i] = isErrorType(et) + default: + s[i] = false + } + } + return s + } + return []bool{false} +} + +func (v *visitor) callReturnsError(call *ast.CallExpr) bool { + if v.isRecover(call) { + return true + } + for _, isError := range v.errorsByArg(call) { + if isError { + return true + } + } + return false +} + +// isRecover returns true if the given CallExpr is a call to the built-in recover() function. +func (v *visitor) isRecover(call *ast.CallExpr) bool { + if fun, ok := call.Fun.(*ast.Ident); ok { + if _, ok := v.pkg.TypesInfo.Uses[fun].(*types.Builtin); ok { + return fun.Name == "recover" + } + } + return false +} + +func (v *visitor) addErrorAtPosition(position token.Pos, call *ast.CallExpr) { + pos := v.pkg.Fset.Position(position) + lines, ok := v.lines[pos.Filename] + if !ok { + lines = readfile(pos.Filename) + v.lines[pos.Filename] = lines + } + + line := "??" + if pos.Line-1 < len(lines) { + line = strings.TrimSpace(lines[pos.Line-1]) + } + + var name string + var sel string + if call != nil { + name = v.fullName(call) + sel = v.selectorName(call) + } + + v.errors = append(v.errors, UncheckedError{pos, line, name, sel}) +} + +func readfile(filename string) []string { + var f, err = os.Open(filename) + if err != nil { + return nil + } + + var lines []string + var scanner = bufio.NewScanner(f) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + switch stmt := node.(type) { + case *ast.ExprStmt: + if call, ok := stmt.X.(*ast.CallExpr); ok { + if !v.ignoreCall(call) && v.callReturnsError(call) { + v.addErrorAtPosition(call.Lparen, call) + } + } + case *ast.GoStmt: + if !v.ignoreCall(stmt.Call) && v.callReturnsError(stmt.Call) { + v.addErrorAtPosition(stmt.Call.Lparen, stmt.Call) + } + case *ast.DeferStmt: + if !v.ignoreCall(stmt.Call) && v.callReturnsError(stmt.Call) { + v.addErrorAtPosition(stmt.Call.Lparen, stmt.Call) + } + case *ast.AssignStmt: + if len(stmt.Rhs) == 1 { + // single value on rhs; check against lhs identifiers + if call, ok := stmt.Rhs[0].(*ast.CallExpr); ok { + if !v.blank { + break + } + if v.ignoreCall(call) { + break + } + isError := v.errorsByArg(call) + for i := 0; i < len(stmt.Lhs); i++ { + if id, ok := stmt.Lhs[i].(*ast.Ident); ok { + // We shortcut calls to recover() because errorsByArg can't + // check its return types for errors since it returns interface{}. + if id.Name == "_" && (v.isRecover(call) || isError[i]) { + v.addErrorAtPosition(id.NamePos, call) + } + } + } + } else if assert, ok := stmt.Rhs[0].(*ast.TypeAssertExpr); ok { + if !v.asserts { + break + } + if assert.Type == nil { + // type switch + break + } + if len(stmt.Lhs) < 2 { + // assertion result not read + v.addErrorAtPosition(stmt.Rhs[0].Pos(), nil) + } else if id, ok := stmt.Lhs[1].(*ast.Ident); ok && v.blank && id.Name == "_" { + // assertion result ignored + v.addErrorAtPosition(id.NamePos, nil) + } + } + } else { + // multiple value on rhs; in this case a call can't return + // multiple values. Assume len(stmt.Lhs) == len(stmt.Rhs) + for i := 0; i < len(stmt.Lhs); i++ { + if id, ok := stmt.Lhs[i].(*ast.Ident); ok { + if call, ok := stmt.Rhs[i].(*ast.CallExpr); ok { + if !v.blank { + continue + } + if v.ignoreCall(call) { + continue + } + if id.Name == "_" && v.callReturnsError(call) { + v.addErrorAtPosition(id.NamePos, call) + } + } else if assert, ok := stmt.Rhs[i].(*ast.TypeAssertExpr); ok { + if !v.asserts { + continue + } + if assert.Type == nil { + // Shouldn't happen anyway, no multi assignment in type switches + continue + } + v.addErrorAtPosition(id.NamePos, nil) + } + } + } + } + default: + } + return v +} + +func isErrorType(t types.Type) bool { + return types.Implements(t, errorType) +} diff --git a/vendor/github.com/kisielk/errcheck/errcheck/tags.go b/vendor/github.com/kisielk/errcheck/errcheck/tags.go new file mode 100644 index 000000000..7b423ca69 --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/errcheck/tags.go @@ -0,0 +1,12 @@ +// +build go1.13 + +package errcheck + +import ( + "fmt" + "strings" +) + +func fmtTags(tags []string) string { + return fmt.Sprintf("-tags=%s", strings.Join(tags, ",")) +} diff --git a/vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go b/vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go new file mode 100644 index 000000000..2f534f40a --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go @@ -0,0 +1,13 @@ +// +build go1.11 +// +build !go1.13 + +package errcheck + +import ( + "fmt" + "strings" +) + +func fmtTags(tags []string) string { + return fmt.Sprintf("-tags=%s", strings.Join(tags, " ")) +} diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/kisielk/gotool/.travis.yml similarity index 53% rename from vendor/github.com/gorilla/mux/.travis.yml rename to vendor/github.com/kisielk/gotool/.travis.yml index ad0935dbd..d1784e1e2 100644 --- a/vendor/github.com/gorilla/mux/.travis.yml +++ b/vendor/github.com/kisielk/gotool/.travis.yml @@ -1,21 +1,21 @@ -language: go sudo: false - +language: go +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - 1.9 + - master matrix: - include: - - go: 1.5.x - - go: 1.6.x - - go: 1.7.x - - go: 1.8.x - - go: 1.9.x - - go: 1.10.x - - go: tip allow_failures: - - go: tip - + - go: master + fast_finish: true install: - - # Skip - + - # Skip. script: - go get -t -v ./... - diff -u <(echo -n) <(gofmt -d .) diff --git a/vendor/github.com/kisielk/gotool/LEGAL b/vendor/github.com/kisielk/gotool/LEGAL new file mode 100644 index 000000000..72b859cd6 --- /dev/null +++ b/vendor/github.com/kisielk/gotool/LEGAL @@ -0,0 +1,32 @@ +All the files in this distribution are covered under either the MIT +license (see the file LICENSE) except some files mentioned below. + +match.go, match_test.go: + + Copyright (c) 2009 The Go Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kisielk/gotool/LICENSE b/vendor/github.com/kisielk/gotool/LICENSE new file mode 100644 index 000000000..1cbf651e2 --- /dev/null +++ b/vendor/github.com/kisielk/gotool/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013 Kamil Kisiel + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kisielk/gotool/README.md b/vendor/github.com/kisielk/gotool/README.md new file mode 100644 index 000000000..6e4e92b2f --- /dev/null +++ b/vendor/github.com/kisielk/gotool/README.md @@ -0,0 +1,6 @@ +gotool +====== +[![GoDoc](https://godoc.org/github.com/kisielk/gotool?status.svg)](https://godoc.org/github.com/kisielk/gotool) +[![Build Status](https://travis-ci.org/kisielk/gotool.svg?branch=master)](https://travis-ci.org/kisielk/gotool) + +Package gotool contains utility functions used to implement the standard "cmd/go" tool, provided as a convenience to developers who want to write tools with similar semantics. diff --git a/vendor/github.com/kisielk/gotool/go.mod b/vendor/github.com/kisielk/gotool/go.mod new file mode 100644 index 000000000..503b37c6f --- /dev/null +++ b/vendor/github.com/kisielk/gotool/go.mod @@ -0,0 +1 @@ +module "github.com/kisielk/gotool" diff --git a/vendor/github.com/kisielk/gotool/go13.go b/vendor/github.com/kisielk/gotool/go13.go new file mode 100644 index 000000000..2dd9b3fdf --- /dev/null +++ b/vendor/github.com/kisielk/gotool/go13.go @@ -0,0 +1,15 @@ +// +build !go1.4 + +package gotool + +import ( + "go/build" + "path/filepath" + "runtime" +) + +var gorootSrc = filepath.Join(runtime.GOROOT(), "src", "pkg") + +func shouldIgnoreImport(p *build.Package) bool { + return true +} diff --git a/vendor/github.com/kisielk/gotool/go14-15.go b/vendor/github.com/kisielk/gotool/go14-15.go new file mode 100644 index 000000000..aa99a3227 --- /dev/null +++ b/vendor/github.com/kisielk/gotool/go14-15.go @@ -0,0 +1,15 @@ +// +build go1.4,!go1.6 + +package gotool + +import ( + "go/build" + "path/filepath" + "runtime" +) + +var gorootSrc = filepath.Join(runtime.GOROOT(), "src") + +func shouldIgnoreImport(p *build.Package) bool { + return true +} diff --git a/vendor/github.com/kisielk/gotool/go16-18.go b/vendor/github.com/kisielk/gotool/go16-18.go new file mode 100644 index 000000000..f25cec14a --- /dev/null +++ b/vendor/github.com/kisielk/gotool/go16-18.go @@ -0,0 +1,15 @@ +// +build go1.6,!go1.9 + +package gotool + +import ( + "go/build" + "path/filepath" + "runtime" +) + +var gorootSrc = filepath.Join(runtime.GOROOT(), "src") + +func shouldIgnoreImport(p *build.Package) bool { + return p == nil || len(p.InvalidGoFiles) == 0 +} diff --git a/vendor/github.com/kisielk/gotool/internal/load/path.go b/vendor/github.com/kisielk/gotool/internal/load/path.go new file mode 100644 index 000000000..74e15b9d3 --- /dev/null +++ b/vendor/github.com/kisielk/gotool/internal/load/path.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package load + +import ( + "strings" +) + +// hasPathPrefix reports whether the path s begins with the +// elements in prefix. +func hasPathPrefix(s, prefix string) bool { + switch { + default: + return false + case len(s) == len(prefix): + return s == prefix + case len(s) > len(prefix): + if prefix != "" && prefix[len(prefix)-1] == '/' { + return strings.HasPrefix(s, prefix) + } + return s[len(prefix)] == '/' && s[:len(prefix)] == prefix + } +} diff --git a/vendor/github.com/kisielk/gotool/internal/load/pkg.go b/vendor/github.com/kisielk/gotool/internal/load/pkg.go new file mode 100644 index 000000000..b937ede75 --- /dev/null +++ b/vendor/github.com/kisielk/gotool/internal/load/pkg.go @@ -0,0 +1,25 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +// Package load loads packages. +package load + +import ( + "strings" +) + +// isStandardImportPath reports whether $GOROOT/src/path should be considered +// part of the standard distribution. For historical reasons we allow people to add +// their own code to $GOROOT instead of using $GOPATH, but we assume that +// code will start with a domain name (dot in the first element). +func isStandardImportPath(path string) bool { + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + elem := path[:i] + return !strings.Contains(elem, ".") +} diff --git a/vendor/github.com/kisielk/gotool/internal/load/search.go b/vendor/github.com/kisielk/gotool/internal/load/search.go new file mode 100644 index 000000000..17ed62dda --- /dev/null +++ b/vendor/github.com/kisielk/gotool/internal/load/search.go @@ -0,0 +1,354 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package load + +import ( + "fmt" + "go/build" + "log" + "os" + "path" + "path/filepath" + "regexp" + "strings" +) + +// Context specifies values for operation of ImportPaths that would +// otherwise come from cmd/go/internal/cfg package. +// +// This is a construct added for gotool purposes and doesn't have +// an equivalent upstream in cmd/go. +type Context struct { + // BuildContext is the build context to use. + BuildContext build.Context + + // GOROOTsrc is the location of the src directory in GOROOT. + // At this time, it's used only in MatchPackages to skip + // GOOROOT/src entry from BuildContext.SrcDirs output. + GOROOTsrc string +} + +// allPackages returns all the packages that can be found +// under the $GOPATH directories and $GOROOT matching pattern. +// The pattern is either "all" (all packages), "std" (standard packages), +// "cmd" (standard commands), or a path including "...". +func (c *Context) allPackages(pattern string) []string { + pkgs := c.MatchPackages(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +// allPackagesInFS is like allPackages but is passed a pattern +// beginning ./ or ../, meaning it should scan the tree rooted +// at the given directory. There are ... in the pattern too. +func (c *Context) allPackagesInFS(pattern string) []string { + pkgs := c.MatchPackagesInFS(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +// MatchPackages returns a list of package paths matching pattern +// (see go help packages for pattern syntax). +func (c *Context) MatchPackages(pattern string) []string { + match := func(string) bool { return true } + treeCanMatch := func(string) bool { return true } + if !IsMetaPackage(pattern) { + match = matchPattern(pattern) + treeCanMatch = treeCanMatchPattern(pattern) + } + + have := map[string]bool{ + "builtin": true, // ignore pseudo-package that exists only for documentation + } + if !c.BuildContext.CgoEnabled { + have["runtime/cgo"] = true // ignore during walk + } + var pkgs []string + + for _, src := range c.BuildContext.SrcDirs() { + if (pattern == "std" || pattern == "cmd") && src != c.GOROOTsrc { + continue + } + src = filepath.Clean(src) + string(filepath.Separator) + root := src + if pattern == "cmd" { + root += "cmd" + string(filepath.Separator) + } + filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil || path == src { + return nil + } + + want := true + // Avoid .foo, _foo, and testdata directory trees. + _, elem := filepath.Split(path) + if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { + want = false + } + + name := filepath.ToSlash(path[len(src):]) + if pattern == "std" && (!isStandardImportPath(name) || name == "cmd") { + // The name "std" is only the standard library. + // If the name is cmd, it's the root of the command tree. + want = false + } + if !treeCanMatch(name) { + want = false + } + + if !fi.IsDir() { + if fi.Mode()&os.ModeSymlink != 0 && want { + if target, err := os.Stat(path); err == nil && target.IsDir() { + fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", path) + } + } + return nil + } + if !want { + return filepath.SkipDir + } + + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + pkg, err := c.BuildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); noGo { + return nil + } + } + + // If we are expanding "cmd", skip main + // packages under cmd/vendor. At least as of + // March, 2017, there is one there for the + // vendored pprof tool. + if pattern == "cmd" && strings.HasPrefix(pkg.ImportPath, "cmd/vendor") && pkg.Name == "main" { + return nil + } + + pkgs = append(pkgs, name) + return nil + }) + } + return pkgs +} + +// MatchPackagesInFS returns a list of package paths matching pattern, +// which must begin with ./ or ../ +// (see go help packages for pattern syntax). +func (c *Context) MatchPackagesInFS(pattern string) []string { + // Find directory to begin the scan. + // Could be smarter but this one optimization + // is enough for now, since ... is usually at the + // end of a path. + i := strings.Index(pattern, "...") + dir, _ := path.Split(pattern[:i]) + + // pattern begins with ./ or ../. + // path.Clean will discard the ./ but not the ../. + // We need to preserve the ./ for pattern matching + // and in the returned import paths. + prefix := "" + if strings.HasPrefix(pattern, "./") { + prefix = "./" + } + match := matchPattern(pattern) + + var pkgs []string + filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() { + return nil + } + if path == dir { + // filepath.Walk starts at dir and recurses. For the recursive case, + // the path is the result of filepath.Join, which calls filepath.Clean. + // The initial case is not Cleaned, though, so we do this explicitly. + // + // This converts a path like "./io/" to "io". Without this step, running + // "cd $GOROOT/src; go list ./io/..." would incorrectly skip the io + // package, because prepending the prefix "./" to the unclean path would + // result in "././io", and match("././io") returns false. + path = filepath.Clean(path) + } + + // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". + _, elem := filepath.Split(path) + dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." + if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := prefix + filepath.ToSlash(path) + if !match(name) { + return nil + } + + // We keep the directory if we can import it, or if we can't import it + // due to invalid Go source files. This means that directories containing + // parse errors will be built (and fail) instead of being silently skipped + // as not matching the pattern. Go 1.5 and earlier skipped, but that + // behavior means people miss serious mistakes. + // See golang.org/issue/11407. + if p, err := c.BuildContext.ImportDir(path, 0); err != nil && (p == nil || len(p.InvalidGoFiles) == 0) { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + return pkgs +} + +// treeCanMatchPattern(pattern)(name) reports whether +// name or children of name can possibly match pattern. +// Pattern is the same limited glob accepted by matchPattern. +func treeCanMatchPattern(pattern string) func(name string) bool { + wildCard := false + if i := strings.Index(pattern, "..."); i >= 0 { + wildCard = true + pattern = pattern[:i] + } + return func(name string) bool { + return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || + wildCard && strings.HasPrefix(name, pattern) + } +} + +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +// Unfortunately, there are two special cases. Quoting "go help packages": +// +// First, /... at the end of the pattern can match an empty string, +// so that net/... matches both net and packages in its subdirectories, like net/http. +// Second, any slash-separted pattern element containing a wildcard never +// participates in a match of the "vendor" element in the path of a vendored +// package, so that ./... does not match packages in subdirectories of +// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. +// Note, however, that a directory named vendor that itself contains code +// is not a vendored package: cmd/vendor would be a command named vendor, +// and the pattern cmd/... matches it. +func matchPattern(pattern string) func(name string) bool { + // Convert pattern to regular expression. + // The strategy for the trailing /... is to nest it in an explicit ? expression. + // The strategy for the vendor exclusion is to change the unmatchable + // vendor strings to a disallowed code point (vendorChar) and to use + // "(anything but that codepoint)*" as the implementation of the ... wildcard. + // This is a bit complicated but the obvious alternative, + // namely a hand-written search like in most shell glob matchers, + // is too easy to make accidentally exponential. + // Using package regexp guarantees linear-time matching. + + const vendorChar = "\x00" + + if strings.Contains(pattern, vendorChar) { + return func(name string) bool { return false } + } + + re := regexp.QuoteMeta(pattern) + re = replaceVendor(re, vendorChar) + switch { + case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): + re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` + case re == vendorChar+`/\.\.\.`: + re = `(/vendor|/` + vendorChar + `/\.\.\.)` + case strings.HasSuffix(re, `/\.\.\.`): + re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` + } + re = strings.Replace(re, `\.\.\.`, `[^`+vendorChar+`]*`, -1) + + reg := regexp.MustCompile(`^` + re + `$`) + + return func(name string) bool { + if strings.Contains(name, vendorChar) { + return false + } + return reg.MatchString(replaceVendor(name, vendorChar)) + } +} + +// replaceVendor returns the result of replacing +// non-trailing vendor path elements in x with repl. +func replaceVendor(x, repl string) string { + if !strings.Contains(x, "vendor") { + return x + } + elem := strings.Split(x, "/") + for i := 0; i < len(elem)-1; i++ { + if elem[i] == "vendor" { + elem[i] = repl + } + } + return strings.Join(elem, "/") +} + +// ImportPaths returns the import paths to use for the given command line. +func (c *Context) ImportPaths(args []string) []string { + args = c.ImportPathsNoDotExpansion(args) + var out []string + for _, a := range args { + if strings.Contains(a, "...") { + if build.IsLocalImport(a) { + out = append(out, c.allPackagesInFS(a)...) + } else { + out = append(out, c.allPackages(a)...) + } + continue + } + out = append(out, a) + } + return out +} + +// ImportPathsNoDotExpansion returns the import paths to use for the given +// command line, but it does no ... expansion. +func (c *Context) ImportPathsNoDotExpansion(args []string) []string { + if len(args) == 0 { + return []string{"."} + } + var out []string + for _, a := range args { + // Arguments are supposed to be import paths, but + // as a courtesy to Windows developers, rewrite \ to / + // in command-line arguments. Handles .\... and so on. + if filepath.Separator == '\\' { + a = strings.Replace(a, `\`, `/`, -1) + } + + // Put argument in canonical form, but preserve leading ./. + if strings.HasPrefix(a, "./") { + a = "./" + path.Clean(a) + if a == "./." { + a = "." + } + } else { + a = path.Clean(a) + } + if IsMetaPackage(a) { + out = append(out, c.allPackages(a)...) + continue + } + out = append(out, a) + } + return out +} + +// IsMetaPackage checks if name is a reserved package name that expands to multiple packages. +func IsMetaPackage(name string) bool { + return name == "std" || name == "cmd" || name == "all" +} diff --git a/vendor/github.com/kisielk/gotool/match.go b/vendor/github.com/kisielk/gotool/match.go new file mode 100644 index 000000000..4dbdbff47 --- /dev/null +++ b/vendor/github.com/kisielk/gotool/match.go @@ -0,0 +1,56 @@ +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build go1.9 + +package gotool + +import ( + "path/filepath" + + "github.com/kisielk/gotool/internal/load" +) + +// importPaths returns the import paths to use for the given command line. +func (c *Context) importPaths(args []string) []string { + lctx := load.Context{ + BuildContext: c.BuildContext, + GOROOTsrc: c.joinPath(c.BuildContext.GOROOT, "src"), + } + return lctx.ImportPaths(args) +} + +// joinPath calls c.BuildContext.JoinPath (if not nil) or else filepath.Join. +// +// It's a copy of the unexported build.Context.joinPath helper. +func (c *Context) joinPath(elem ...string) string { + if f := c.BuildContext.JoinPath; f != nil { + return f(elem...) + } + return filepath.Join(elem...) +} diff --git a/vendor/github.com/kisielk/gotool/match18.go b/vendor/github.com/kisielk/gotool/match18.go new file mode 100644 index 000000000..6d6b1368c --- /dev/null +++ b/vendor/github.com/kisielk/gotool/match18.go @@ -0,0 +1,317 @@ +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !go1.9 + +package gotool + +import ( + "fmt" + "go/build" + "log" + "os" + "path" + "path/filepath" + "regexp" + "strings" +) + +// This file contains code from the Go distribution. + +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +func matchPattern(pattern string) func(name string) bool { + re := regexp.QuoteMeta(pattern) + re = strings.Replace(re, `\.\.\.`, `.*`, -1) + // Special case: foo/... matches foo too. + if strings.HasSuffix(re, `/.*`) { + re = re[:len(re)-len(`/.*`)] + `(/.*)?` + } + reg := regexp.MustCompile(`^` + re + `$`) + return reg.MatchString +} + +// matchPackages returns a list of package paths matching pattern +// (see go help packages for pattern syntax). +func (c *Context) matchPackages(pattern string) []string { + match := func(string) bool { return true } + treeCanMatch := func(string) bool { return true } + if !isMetaPackage(pattern) { + match = matchPattern(pattern) + treeCanMatch = treeCanMatchPattern(pattern) + } + + have := map[string]bool{ + "builtin": true, // ignore pseudo-package that exists only for documentation + } + if !c.BuildContext.CgoEnabled { + have["runtime/cgo"] = true // ignore during walk + } + var pkgs []string + + for _, src := range c.BuildContext.SrcDirs() { + if (pattern == "std" || pattern == "cmd") && src != gorootSrc { + continue + } + src = filepath.Clean(src) + string(filepath.Separator) + root := src + if pattern == "cmd" { + root += "cmd" + string(filepath.Separator) + } + filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == src { + return nil + } + + // Avoid .foo, _foo, and testdata directory trees. + _, elem := filepath.Split(path) + if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := filepath.ToSlash(path[len(src):]) + if pattern == "std" && (!isStandardImportPath(name) || name == "cmd") { + // The name "std" is only the standard library. + // If the name is cmd, it's the root of the command tree. + return filepath.SkipDir + } + if !treeCanMatch(name) { + return filepath.SkipDir + } + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = c.BuildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); noGo { + return nil + } + } + pkgs = append(pkgs, name) + return nil + }) + } + return pkgs +} + +// importPathsNoDotExpansion returns the import paths to use for the given +// command line, but it does no ... expansion. +func (c *Context) importPathsNoDotExpansion(args []string) []string { + if len(args) == 0 { + return []string{"."} + } + var out []string + for _, a := range args { + // Arguments are supposed to be import paths, but + // as a courtesy to Windows developers, rewrite \ to / + // in command-line arguments. Handles .\... and so on. + if filepath.Separator == '\\' { + a = strings.Replace(a, `\`, `/`, -1) + } + + // Put argument in canonical form, but preserve leading ./. + if strings.HasPrefix(a, "./") { + a = "./" + path.Clean(a) + if a == "./." { + a = "." + } + } else { + a = path.Clean(a) + } + if isMetaPackage(a) { + out = append(out, c.allPackages(a)...) + continue + } + out = append(out, a) + } + return out +} + +// importPaths returns the import paths to use for the given command line. +func (c *Context) importPaths(args []string) []string { + args = c.importPathsNoDotExpansion(args) + var out []string + for _, a := range args { + if strings.Contains(a, "...") { + if build.IsLocalImport(a) { + out = append(out, c.allPackagesInFS(a)...) + } else { + out = append(out, c.allPackages(a)...) + } + continue + } + out = append(out, a) + } + return out +} + +// allPackages returns all the packages that can be found +// under the $GOPATH directories and $GOROOT matching pattern. +// The pattern is either "all" (all packages), "std" (standard packages), +// "cmd" (standard commands), or a path including "...". +func (c *Context) allPackages(pattern string) []string { + pkgs := c.matchPackages(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +// allPackagesInFS is like allPackages but is passed a pattern +// beginning ./ or ../, meaning it should scan the tree rooted +// at the given directory. There are ... in the pattern too. +func (c *Context) allPackagesInFS(pattern string) []string { + pkgs := c.matchPackagesInFS(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +// matchPackagesInFS returns a list of package paths matching pattern, +// which must begin with ./ or ../ +// (see go help packages for pattern syntax). +func (c *Context) matchPackagesInFS(pattern string) []string { + // Find directory to begin the scan. + // Could be smarter but this one optimization + // is enough for now, since ... is usually at the + // end of a path. + i := strings.Index(pattern, "...") + dir, _ := path.Split(pattern[:i]) + + // pattern begins with ./ or ../. + // path.Clean will discard the ./ but not the ../. + // We need to preserve the ./ for pattern matching + // and in the returned import paths. + prefix := "" + if strings.HasPrefix(pattern, "./") { + prefix = "./" + } + match := matchPattern(pattern) + + var pkgs []string + filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() { + return nil + } + if path == dir { + // filepath.Walk starts at dir and recurses. For the recursive case, + // the path is the result of filepath.Join, which calls filepath.Clean. + // The initial case is not Cleaned, though, so we do this explicitly. + // + // This converts a path like "./io/" to "io". Without this step, running + // "cd $GOROOT/src; go list ./io/..." would incorrectly skip the io + // package, because prepending the prefix "./" to the unclean path would + // result in "././io", and match("././io") returns false. + path = filepath.Clean(path) + } + + // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". + _, elem := filepath.Split(path) + dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." + if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := prefix + filepath.ToSlash(path) + if !match(name) { + return nil + } + + // We keep the directory if we can import it, or if we can't import it + // due to invalid Go source files. This means that directories containing + // parse errors will be built (and fail) instead of being silently skipped + // as not matching the pattern. Go 1.5 and earlier skipped, but that + // behavior means people miss serious mistakes. + // See golang.org/issue/11407. + if p, err := c.BuildContext.ImportDir(path, 0); err != nil && shouldIgnoreImport(p) { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + return pkgs +} + +// isMetaPackage checks if name is a reserved package name that expands to multiple packages. +func isMetaPackage(name string) bool { + return name == "std" || name == "cmd" || name == "all" +} + +// isStandardImportPath reports whether $GOROOT/src/path should be considered +// part of the standard distribution. For historical reasons we allow people to add +// their own code to $GOROOT instead of using $GOPATH, but we assume that +// code will start with a domain name (dot in the first element). +func isStandardImportPath(path string) bool { + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + elem := path[:i] + return !strings.Contains(elem, ".") +} + +// hasPathPrefix reports whether the path s begins with the +// elements in prefix. +func hasPathPrefix(s, prefix string) bool { + switch { + default: + return false + case len(s) == len(prefix): + return s == prefix + case len(s) > len(prefix): + if prefix != "" && prefix[len(prefix)-1] == '/' { + return strings.HasPrefix(s, prefix) + } + return s[len(prefix)] == '/' && s[:len(prefix)] == prefix + } +} + +// treeCanMatchPattern(pattern)(name) reports whether +// name or children of name can possibly match pattern. +// Pattern is the same limited glob accepted by matchPattern. +func treeCanMatchPattern(pattern string) func(name string) bool { + wildCard := false + if i := strings.Index(pattern, "..."); i >= 0 { + wildCard = true + pattern = pattern[:i] + } + return func(name string) bool { + return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || + wildCard && strings.HasPrefix(name, pattern) + } +} diff --git a/vendor/github.com/kisielk/gotool/tool.go b/vendor/github.com/kisielk/gotool/tool.go new file mode 100644 index 000000000..c7409e11e --- /dev/null +++ b/vendor/github.com/kisielk/gotool/tool.go @@ -0,0 +1,48 @@ +// Package gotool contains utility functions used to implement the standard +// "cmd/go" tool, provided as a convenience to developers who want to write +// tools with similar semantics. +package gotool + +import "go/build" + +// Export functions here to make it easier to keep the implementations up to date with upstream. + +// DefaultContext is the default context that uses build.Default. +var DefaultContext = Context{ + BuildContext: build.Default, +} + +// A Context specifies the supporting context. +type Context struct { + // BuildContext is the build.Context that is used when computing import paths. + BuildContext build.Context +} + +// ImportPaths returns the import paths to use for the given command line. +// +// The path "all" is expanded to all packages in $GOPATH and $GOROOT. +// The path "std" is expanded to all packages in the Go standard library. +// The path "cmd" is expanded to all Go standard commands. +// The string "..." is treated as a wildcard within a path. +// When matching recursively, directories are ignored if they are prefixed with +// a dot or an underscore (such as ".foo" or "_foo"), or are named "testdata". +// Relative import paths are not converted to full import paths. +// If args is empty, a single element "." is returned. +func (c *Context) ImportPaths(args []string) []string { + return c.importPaths(args) +} + +// ImportPaths returns the import paths to use for the given command line +// using default context. +// +// The path "all" is expanded to all packages in $GOPATH and $GOROOT. +// The path "std" is expanded to all packages in the Go standard library. +// The path "cmd" is expanded to all Go standard commands. +// The string "..." is treated as a wildcard within a path. +// When matching recursively, directories are ignored if they are prefixed with +// a dot or an underscore (such as ".foo" or "_foo"), or are named "testdata". +// Relative import paths are not converted to full import paths. +// If args is empty, a single element "." is returned. +func ImportPaths(args []string) []string { + return DefaultContext.importPaths(args) +} diff --git a/vendor/github.com/kulti/thelper/LICENSE b/vendor/github.com/kulti/thelper/LICENSE new file mode 100644 index 000000000..e070215fe --- /dev/null +++ b/vendor/github.com/kulti/thelper/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Aleksey Bakin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/kulti/thelper/pkg/analyzer/analyzer.go b/vendor/github.com/kulti/thelper/pkg/analyzer/analyzer.go new file mode 100644 index 000000000..2f8dba957 --- /dev/null +++ b/vendor/github.com/kulti/thelper/pkg/analyzer/analyzer.go @@ -0,0 +1,416 @@ +package analyzer + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "sort" + "strings" + + "github.com/gostaticanalysis/analysisutil" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + doc = "thelper detects tests helpers which is not start with t.Helper() method." + checksDoc = `coma separated list of enabled checks + +Available checks + +` + checkTBegin + ` - check t.Helper() begins helper function +` + checkTFirst + ` - check *testing.T is first param of helper function +` + checkTName + ` - check *testing.T param has t name + +Also available similar checks for benchmark and TB helpers: ` + + checkBBegin + `, ` + checkBFirst + `, ` + checkBName + `,` + + checkTBBegin + `, ` + checkTBFirst + `, ` + checkTBName + ` + +` +) + +type enabledChecksValue map[string]struct{} + +func (m enabledChecksValue) Enabled(c string) bool { + _, ok := m[c] + return ok +} + +func (m enabledChecksValue) String() string { + ss := make([]string, 0, len(m)) + for s := range m { + ss = append(ss, s) + } + sort.Strings(ss) + return strings.Join(ss, ",") +} + +func (m enabledChecksValue) Set(s string) error { + ss := strings.FieldsFunc(s, func(c rune) bool { return c == ',' }) + if len(ss) == 0 { + return nil + } + + for k := range m { + delete(m, k) + } + for _, v := range ss { + switch v { + case checkTBegin, checkTFirst, checkTName, + checkBBegin, checkBFirst, checkBName, + checkTBBegin, checkTBFirst, checkTBName: + m[v] = struct{}{} + default: + return fmt.Errorf("unknown check name %q (see help for full list)", v) + } + } + return nil +} + +const ( + checkTBegin = "t_begin" + checkTFirst = "t_first" + checkTName = "t_name" + checkBBegin = "b_begin" + checkBFirst = "b_first" + checkBName = "b_name" + checkTBBegin = "tb_begin" + checkTBFirst = "tb_first" + checkTBName = "tb_name" +) + +type thelper struct { + enabledChecks enabledChecksValue +} + +// NewAnalyzer return a new thelper analyzer. +// thelper analyzes Go test codes how they use t.Helper() method. +func NewAnalyzer() *analysis.Analyzer { + thelper := thelper{} + thelper.enabledChecks = enabledChecksValue{ + checkTBegin: struct{}{}, + checkTFirst: struct{}{}, + checkTName: struct{}{}, + checkBBegin: struct{}{}, + checkBFirst: struct{}{}, + checkBName: struct{}{}, + checkTBBegin: struct{}{}, + checkTBFirst: struct{}{}, + checkTBName: struct{}{}, + } + + a := &analysis.Analyzer{ + Name: "thelper", + Doc: doc, + Run: thelper.run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + } + + a.Flags.Init("thelper", flag.ExitOnError) + a.Flags.Var(&thelper.enabledChecks, "checks", checksDoc) + + return a +} + +func (t thelper) run(pass *analysis.Pass) (interface{}, error) { + tCheckOpts, bCheckOpts, tbCheckOpts, ok := t.buildCheckFuncOpts(pass) + if !ok { + return nil, nil + } + + var reports reports + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(node ast.Node) { + var fd funcDecl + switch n := node.(type) { + case *ast.FuncLit: + fd.Pos = n.Pos() + fd.Type = n.Type + fd.Body = n.Body + fd.Name = ast.NewIdent("") + case *ast.FuncDecl: + fd.Pos = n.Name.NamePos + fd.Type = n.Type + fd.Body = n.Body + fd.Name = n.Name + case *ast.CallExpr: + tbRunSubtestExpr := extractSubtestExp(pass, n, tCheckOpts.tbRun) + if tbRunSubtestExpr == nil { + tbRunSubtestExpr = extractSubtestExp(pass, n, bCheckOpts.tbRun) + } + + if tbRunSubtestExpr != nil { + reports.Filter(funcDefPosition(pass, tbRunSubtestExpr)) + } else { + reports.NoFilter(funcDefPosition(pass, n.Fun)) + } + return + default: + return + } + + checkFunc(pass, &reports, fd, tCheckOpts) + checkFunc(pass, &reports, fd, bCheckOpts) + checkFunc(pass, &reports, fd, tbCheckOpts) + }) + + reports.Flush(pass) + + return nil, nil +} + +type checkFuncOpts struct { + skipPrefix string + varName string + tbHelper types.Object + tbRun types.Object + tbType types.Type + ctxType types.Type + checkBegin bool + checkFirst bool + checkName bool +} + +func (t thelper) buildCheckFuncOpts(pass *analysis.Pass) (checkFuncOpts, checkFuncOpts, checkFuncOpts, bool) { + var ctxType types.Type + ctxObj := analysisutil.ObjectOf(pass, "context", "Context") + if ctxObj != nil { + ctxType = ctxObj.Type() + } + + tCheckOpts, ok := t.buildTestCheckFuncOpts(pass, ctxType) + if !ok { + return checkFuncOpts{}, checkFuncOpts{}, checkFuncOpts{}, false + } + + bCheckOpts, ok := t.buildBenchmarkCheckFuncOpts(pass, ctxType) + if !ok { + return checkFuncOpts{}, checkFuncOpts{}, checkFuncOpts{}, false + } + + tbCheckOpts, ok := t.buildTBCheckFuncOpts(pass, ctxType) + if !ok { + return checkFuncOpts{}, checkFuncOpts{}, checkFuncOpts{}, false + } + + return tCheckOpts, bCheckOpts, tbCheckOpts, true +} + +func (t thelper) buildTestCheckFuncOpts(pass *analysis.Pass, ctxType types.Type) (checkFuncOpts, bool) { + tObj := analysisutil.ObjectOf(pass, "testing", "T") + if tObj == nil { + return checkFuncOpts{}, false + } + + tHelper, _, _ := types.LookupFieldOrMethod(tObj.Type(), true, tObj.Pkg(), "Helper") + if tHelper == nil { + return checkFuncOpts{}, false + } + + tRun, _, _ := types.LookupFieldOrMethod(tObj.Type(), true, tObj.Pkg(), "Run") + if tRun == nil { + return checkFuncOpts{}, false + } + + return checkFuncOpts{ + skipPrefix: "Test", + varName: "t", + tbHelper: tHelper, + tbRun: tRun, + tbType: types.NewPointer(tObj.Type()), + ctxType: ctxType, + checkBegin: t.enabledChecks.Enabled(checkTBegin), + checkFirst: t.enabledChecks.Enabled(checkTFirst), + checkName: t.enabledChecks.Enabled(checkTName), + }, true +} + +func (t thelper) buildBenchmarkCheckFuncOpts(pass *analysis.Pass, ctxType types.Type) (checkFuncOpts, bool) { + bObj := analysisutil.ObjectOf(pass, "testing", "B") + if bObj == nil { + return checkFuncOpts{}, false + } + + bHelper, _, _ := types.LookupFieldOrMethod(bObj.Type(), true, bObj.Pkg(), "Helper") + if bHelper == nil { + return checkFuncOpts{}, false + } + + bRun, _, _ := types.LookupFieldOrMethod(bObj.Type(), true, bObj.Pkg(), "Run") + if bRun == nil { + return checkFuncOpts{}, false + } + + return checkFuncOpts{ + skipPrefix: "Benchmark", + varName: "b", + tbHelper: bHelper, + tbRun: bRun, + tbType: types.NewPointer(bObj.Type()), + ctxType: ctxType, + checkBegin: t.enabledChecks.Enabled(checkBBegin), + checkFirst: t.enabledChecks.Enabled(checkBFirst), + checkName: t.enabledChecks.Enabled(checkBName), + }, true +} + +func (t thelper) buildTBCheckFuncOpts(pass *analysis.Pass, ctxType types.Type) (checkFuncOpts, bool) { + tbObj := analysisutil.ObjectOf(pass, "testing", "TB") + if tbObj == nil { + return checkFuncOpts{}, false + } + + tbHelper, _, _ := types.LookupFieldOrMethod(tbObj.Type(), true, tbObj.Pkg(), "Helper") + if tbHelper == nil { + return checkFuncOpts{}, false + } + + return checkFuncOpts{ + skipPrefix: "", + varName: "tb", + tbHelper: tbHelper, + tbType: tbObj.Type(), + ctxType: ctxType, + checkBegin: t.enabledChecks.Enabled(checkTBBegin), + checkFirst: t.enabledChecks.Enabled(checkTBFirst), + checkName: t.enabledChecks.Enabled(checkTBName), + }, true +} + +type funcDecl struct { + Pos token.Pos + Name *ast.Ident + Type *ast.FuncType + Body *ast.BlockStmt +} + +func checkFunc(pass *analysis.Pass, reports *reports, funcDecl funcDecl, opts checkFuncOpts) { + if opts.skipPrefix != "" && strings.HasPrefix(funcDecl.Name.Name, opts.skipPrefix) { + return + } + + p, pos, ok := searchFuncParam(pass, funcDecl, opts.tbType) + if !ok { + return + } + + if opts.checkFirst { + if pos != 0 { + checkFirstPassed := false + if pos == 1 && opts.ctxType != nil { + _, pos, ok := searchFuncParam(pass, funcDecl, opts.ctxType) + checkFirstPassed = ok && (pos == 0) + } + + if !checkFirstPassed { + reports.Reportf(funcDecl.Pos, "parameter %s should be the first or after context.Context", opts.tbType) + } + } + } + + if len(p.Names) > 0 && p.Names[0].Name != "_" { + if opts.checkName { + if p.Names[0].Name != opts.varName { + reports.Reportf(funcDecl.Pos, "parameter %s should have name %s", opts.tbType, opts.varName) + } + } + + if opts.checkBegin { + if len(funcDecl.Body.List) == 0 || !isTHelperCall(pass, funcDecl.Body.List[0], opts.tbHelper) { + reports.Reportf(funcDecl.Pos, "test helper function should start from %s.Helper()", opts.varName) + } + } + } +} + +func searchFuncParam(pass *analysis.Pass, f funcDecl, p types.Type) (*ast.Field, int, bool) { + for i, f := range f.Type.Params.List { + typeInfo, ok := pass.TypesInfo.Types[f.Type] + if !ok { + continue + } + + if types.Identical(typeInfo.Type, p) { + return f, i, true + } + } + return nil, 0, false +} + +func isTHelperCall(pass *analysis.Pass, s ast.Stmt, tHelper types.Object) bool { + exprStmt, ok := s.(*ast.ExprStmt) + if !ok { + return false + } + + callExpr, ok := exprStmt.X.(*ast.CallExpr) + if !ok { + return false + } + + selExpr, ok := callExpr.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + + return isSelectorCall(pass, selExpr, tHelper) +} + +func extractSubtestExp(pass *analysis.Pass, e *ast.CallExpr, tbRun types.Object) ast.Expr { + selExpr, ok := e.Fun.(*ast.SelectorExpr) + if !ok { + return nil + } + + if !isSelectorCall(pass, selExpr, tbRun) { + return nil + } + + if len(e.Args) != 2 { + return nil + } + + return e.Args[1] +} + +func funcDefPosition(pass *analysis.Pass, e ast.Expr) token.Pos { + anonFunLit, ok := e.(*ast.FuncLit) + if ok { + return anonFunLit.Pos() + } + + funIdent, ok := e.(*ast.Ident) + if !ok { + selExpr, ok := e.(*ast.SelectorExpr) + if !ok { + return token.NoPos + } + funIdent = selExpr.Sel + } + + funDef, ok := pass.TypesInfo.Uses[funIdent] + if !ok { + return token.NoPos + } + + return funDef.Pos() +} + +func isSelectorCall(pass *analysis.Pass, selExpr *ast.SelectorExpr, callObj types.Object) bool { + sel, ok := pass.TypesInfo.Selections[selExpr] + if !ok { + return false + } + + return sel.Obj() == callObj +} diff --git a/vendor/github.com/kulti/thelper/pkg/analyzer/report.go b/vendor/github.com/kulti/thelper/pkg/analyzer/report.go new file mode 100644 index 000000000..4a23e36d5 --- /dev/null +++ b/vendor/github.com/kulti/thelper/pkg/analyzer/report.go @@ -0,0 +1,56 @@ +package analyzer + +import ( + "go/token" + + "golang.org/x/tools/go/analysis" +) + +type reports struct { + reports []report + filter map[token.Pos]struct{} + nofilter map[token.Pos]struct{} +} + +type report struct { + pos token.Pos + format string + args []interface{} +} + +func (rr *reports) Reportf(pos token.Pos, format string, args ...interface{}) { + rr.reports = append(rr.reports, report{ + pos: pos, + format: format, + args: args, + }) +} + +func (rr *reports) Filter(pos token.Pos) { + if pos.IsValid() { + if rr.filter == nil { + rr.filter = make(map[token.Pos]struct{}) + } + rr.filter[pos] = struct{}{} + } +} + +func (rr *reports) NoFilter(pos token.Pos) { + if pos.IsValid() { + if rr.nofilter == nil { + rr.nofilter = make(map[token.Pos]struct{}) + } + rr.nofilter[pos] = struct{}{} + } +} + +func (rr reports) Flush(pass *analysis.Pass) { + for _, r := range rr.reports { + if _, ok := rr.filter[r.pos]; ok { + if _, ok := rr.nofilter[r.pos]; !ok { + continue + } + } + pass.Reportf(r.pos, r.format, r.args...) + } +} diff --git a/vendor/github.com/kunwardeep/paralleltest/LICENSE b/vendor/github.com/kunwardeep/paralleltest/LICENSE new file mode 100644 index 000000000..d06a809c2 --- /dev/null +++ b/vendor/github.com/kunwardeep/paralleltest/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Isaev Denis + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go b/vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go new file mode 100644 index 000000000..31f6f2946 --- /dev/null +++ b/vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go @@ -0,0 +1,256 @@ +package paralleltest + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check that tests use t.Parallel() method +It also checks that the t.Parallel is used if multiple tests cases are run as part of single test. +As part of ensuring parallel tests works as expected it checks for reinitialising of the range value +over the test cases.(https://tinyurl.com/y6555cy6)` + +func NewAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "paralleltest", + Doc: Doc, + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspector := inspector.New(pass.Files) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + funcDecl := node.(*ast.FuncDecl) + var funcHasParallelMethod, + rangeStatementOverTestCasesExists, + rangeStatementHasParallelMethod, + testLoopVariableReinitialised bool + var testRunLoopIdentifier string + var numberOfTestRun int + var positionOfTestRunNode []ast.Node + var rangeNode ast.Node + + // Check runs for test functions only + if !isTestFunction(funcDecl) { + return + } + + for _, l := range funcDecl.Body.List { + switch v := l.(type) { + + case *ast.ExprStmt: + ast.Inspect(v, func(n ast.Node) bool { + // Check if the test method is calling t.parallel + if !funcHasParallelMethod { + funcHasParallelMethod = methodParallelIsCalledInTestFunction(n) + } + + // Check if the t.Run within the test function is calling t.parallel + if methodRunIsCalledInTestFunction(n) { + hasParallel := false + numberOfTestRun++ + ast.Inspect(v, func(p ast.Node) bool { + if !hasParallel { + hasParallel = methodParallelIsCalledInTestFunction(p) + } + return true + }) + if !hasParallel { + positionOfTestRunNode = append(positionOfTestRunNode, n) + } + } + return true + }) + + // Check if the range over testcases is calling t.parallel + case *ast.RangeStmt: + rangeNode = v + + ast.Inspect(v, func(n ast.Node) bool { + // nolint: gocritic + switch r := n.(type) { + case *ast.ExprStmt: + if methodRunIsCalledInRangeStatement(r.X) { + rangeStatementOverTestCasesExists = true + testRunLoopIdentifier = methodRunFirstArgumentObjectName(r.X) + + if !rangeStatementHasParallelMethod { + rangeStatementHasParallelMethod = methodParallelIsCalledInMethodRun(r.X) + } + } + } + return true + }) + + // Check for the range loop value identifier re assignment + // More info here https://gist.github.com/kunwardeep/80c2e9f3d3256c894898bae82d9f75d0 + if rangeStatementOverTestCasesExists { + var rangeValueIdentifier string + if i, ok := v.Value.(*ast.Ident); ok { + rangeValueIdentifier = i.Name + } + + testLoopVariableReinitialised = testCaseLoopVariableReinitialised(v.Body.List, rangeValueIdentifier, testRunLoopIdentifier) + } + } + } + + if !funcHasParallelMethod { + pass.Reportf(node.Pos(), "Function %s missing the call to method parallel\n", funcDecl.Name.Name) + } + + if rangeStatementOverTestCasesExists && rangeNode != nil { + if !rangeStatementHasParallelMethod { + pass.Reportf(rangeNode.Pos(), "Range statement for test %s missing the call to method parallel in test Run\n", funcDecl.Name.Name) + } else { + if testRunLoopIdentifier == "" { + pass.Reportf(rangeNode.Pos(), "Range statement for test %s does not use range value in test Run\n", funcDecl.Name.Name) + } else if !testLoopVariableReinitialised { + pass.Reportf(rangeNode.Pos(), "Range statement for test %s does not reinitialise the variable %s\n", funcDecl.Name.Name, testRunLoopIdentifier) + } + } + } + + // Check if the t.Run is more than one as there is no point making one test parallel + if numberOfTestRun > 1 && len(positionOfTestRunNode) > 0 { + for _, n := range positionOfTestRunNode { + pass.Reportf(n.Pos(), "Function %s has missing the call to method parallel in the test run\n", funcDecl.Name.Name) + } + } + }) + + return nil, nil +} + +func testCaseLoopVariableReinitialised(statements []ast.Stmt, rangeValueIdentifier string, testRunLoopIdentifier string) bool { + if len(statements) > 1 { + for _, s := range statements { + leftIdentifier, rightIdentifier := getLeftAndRightIdentifier(s) + if leftIdentifier == testRunLoopIdentifier && rightIdentifier == rangeValueIdentifier { + return true + } + } + } + return false +} + +// Return the left hand side and the right hand side identifiers name +func getLeftAndRightIdentifier(s ast.Stmt) (string, string) { + var leftIdentifier, rightIdentifier string + // nolint: gocritic + switch v := s.(type) { + case *ast.AssignStmt: + if len(v.Rhs) == 1 { + if i, ok := v.Rhs[0].(*ast.Ident); ok { + rightIdentifier = i.Name + } + } + if len(v.Lhs) == 1 { + if i, ok := v.Lhs[0].(*ast.Ident); ok { + leftIdentifier = i.Name + } + } + } + return leftIdentifier, rightIdentifier +} + +func methodParallelIsCalledInMethodRun(node ast.Node) bool { + var methodParallelCalled bool + // nolint: gocritic + switch callExp := node.(type) { + case *ast.CallExpr: + for _, arg := range callExp.Args { + if !methodParallelCalled { + ast.Inspect(arg, func(n ast.Node) bool { + if !methodParallelCalled { + methodParallelCalled = methodParallelIsCalledInRunMethod(n) + return true + } + return false + }) + } + } + } + return methodParallelCalled +} + +func methodParallelIsCalledInRunMethod(node ast.Node) bool { + return exprCallHasMethod(node, "Parallel") +} + +func methodParallelIsCalledInTestFunction(node ast.Node) bool { + return exprCallHasMethod(node, "Parallel") +} + +func methodRunIsCalledInRangeStatement(node ast.Node) bool { + return exprCallHasMethod(node, "Run") +} + +func methodRunIsCalledInTestFunction(node ast.Node) bool { + return exprCallHasMethod(node, "Run") +} +func exprCallHasMethod(node ast.Node, methodName string) bool { + // nolint: gocritic + switch n := node.(type) { + case *ast.CallExpr: + if fun, ok := n.Fun.(*ast.SelectorExpr); ok { + return fun.Sel.Name == methodName + } + } + return false +} + +// Gets the object name `tc` from method t.Run(tc.Foo, func(t *testing.T) +func methodRunFirstArgumentObjectName(node ast.Node) string { + // nolint: gocritic + switch n := node.(type) { + case *ast.CallExpr: + for _, arg := range n.Args { + if s, ok := arg.(*ast.SelectorExpr); ok { + if i, ok := s.X.(*ast.Ident); ok { + return i.Name + } + } + } + } + return "" +} + +// Checks if the function has the param type *testing.T) +func isTestFunction(funcDecl *ast.FuncDecl) bool { + testMethodPackageType := "testing" + testMethodStruct := "T" + testPrefix := "Test" + + if !strings.HasPrefix(funcDecl.Name.Name, testPrefix) { + return false + } + + if funcDecl.Type.Params != nil && len(funcDecl.Type.Params.List) != 1 { + return false + } + + param := funcDecl.Type.Params.List[0] + if starExp, ok := param.Type.(*ast.StarExpr); ok { + if selectExpr, ok := starExp.X.(*ast.SelectorExpr); ok { + if selectExpr.Sel.Name == testMethodStruct { + if s, ok := selectExpr.X.(*ast.Ident); ok { + return s.Name == testMethodPackageType + } + } + } + } + + return false +} diff --git a/vendor/github.com/kyoh86/exportloopref/.golangci.yml b/vendor/github.com/kyoh86/exportloopref/.golangci.yml new file mode 100644 index 000000000..e876057f3 --- /dev/null +++ b/vendor/github.com/kyoh86/exportloopref/.golangci.yml @@ -0,0 +1,4 @@ +linters: + enable: + - unparam + - exportloopref diff --git a/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml b/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml new file mode 100644 index 000000000..22ff44040 --- /dev/null +++ b/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml @@ -0,0 +1,43 @@ +project_name: exportloopref +release: + github: + owner: kyoh86 + name: exportloopref +brews: +- install: | + bin.install "exportloopref" + github: + owner: kyoh86 + name: homebrew-tap + folder: Formula + homepage: https://github.com/kyoh86/exportloopref + description: An analyzer that finds exporting pointers for loop variables. +builds: +- goos: + - linux + - darwin + - windows + goarch: + - amd64 + - "386" + main: ./cmd/exportloopref + ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}} + binary: exportloopref +archives: +- id: gzip + format: tar.gz + format_overrides: + - goos: windows + format: zip + name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + files: + - licence* + - LICENCE* + - license* + - LICENSE* + - readme* + - README* + - changelog* + - CHANGELOG* +snapshot: + name_template: SNAPSHOT-{{ .Commit }} diff --git a/vendor/github.com/kyoh86/exportloopref/LICENSE b/vendor/github.com/kyoh86/exportloopref/LICENSE new file mode 100644 index 000000000..7ac9dba4a --- /dev/null +++ b/vendor/github.com/kyoh86/exportloopref/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 kyoh86 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kyoh86/exportloopref/Makefile b/vendor/github.com/kyoh86/exportloopref/Makefile new file mode 100644 index 000000000..4d3ef22f7 --- /dev/null +++ b/vendor/github.com/kyoh86/exportloopref/Makefile @@ -0,0 +1,16 @@ +.PHONY: gen lint test install man + +VERSION := `git vertag get` +COMMIT := `git rev-parse HEAD` + +gen: + go generate ./... + +lint: gen + golangci-lint run + +test: lint + go test -v --race ./... + +install: test + go install -a -ldflags "-X=main.version=$(VERSION) -X=main.commit=$(COMMIT)" ./... diff --git a/vendor/github.com/kyoh86/exportloopref/README.md b/vendor/github.com/kyoh86/exportloopref/README.md new file mode 100644 index 000000000..5c019c738 --- /dev/null +++ b/vendor/github.com/kyoh86/exportloopref/README.md @@ -0,0 +1,221 @@ +# exportloopref + +An analyzer that finds exporting pointers for loop variables. + +[![PkgGoDev](https://pkg.go.dev/badge/kyoh86/exportloopref)](https://pkg.go.dev/kyoh86/exportloopref) +[![Go Report Card](https://goreportcard.com/badge/github.com/kyoh86/exportloopref)](https://goreportcard.com/report/github.com/kyoh86/exportloopref) +[![Coverage Status](https://img.shields.io/codecov/c/github/kyoh86/exportloopref.svg)](https://codecov.io/gh/kyoh86/exportloopref) +[![Release](https://github.com/kyoh86/exportloopref/workflows/Release/badge.svg)](https://github.com/kyoh86/exportloopref/releases) + +## What's this? + +Sample problem code from: https://github.com/kyoh86/exportloopref/blob/master/testdata/src/simple/simple.go + +```go +package main + +func main() { + var intArray [4]*int + var intSlice []*int + var intRef *int + var intStr struct{ x *int } + + println("loop expecting 10, 11, 12, 13") + for i, p := range []int{10, 11, 12, 13} { + printp(&p) // not a diagnostic + intSlice = append(intSlice, &p) // want "exporting a pointer for the loop variable p" + intArray[i] = &p // want "exporting a pointer for the loop variable p" + if i%2 == 0 { + intRef = &p // want "exporting a pointer for the loop variable p" + intStr.x = &p // want "exporting a pointer for the loop variable p" + } + var vStr struct{ x *int } + var vArray [4]*int + var v *int + if i%2 == 0 { + v = &p // not a diagnostic (x is local variable) + vArray[1] = &p // not a diagnostic (x is local variable) + vStr.x = &p + } + _ = v + } + + println(`slice expecting "10, 11, 12, 13" but "13, 13, 13, 13"`) + for _, p := range intSlice { + printp(p) + } + println(`array expecting "10, 11, 12, 13" but "13, 13, 13, 13"`) + for _, p := range intArray { + printp(p) + } + println(`captured value expecting "12" but "13"`) + printp(intRef) +} + +func printp(p *int) { + println(*p) +} +``` + +In Go, the `p` variable in the above loops is actually a single variable. +So in many case (like the above), using it makes for us annoying bugs. + +You can find them with `exportloopref`, and fix it. + +```go +package main + +func main() { + var intArray [4]*int + var intSlice []*int + var intRef *int + var intStr struct{ x *int } + + println("loop expecting 10, 11, 12, 13") + for i, p := range []int{10, 11, 12, 13} { + p := p // FIX variable into the local variable + printp(&p) + intSlice = append(intSlice, &p) + intArray[i] = &p + if i%2 == 0 { + intRef = &p + intStr.x = &p + } + var vStr struct{ x *int } + var vArray [4]*int + var v *int + if i%2 == 0 { + v = &p + vArray[1] = &p + vStr.x = &p + } + _ = v + } + + println(`slice expecting "10, 11, 12, 13"`) + for _, p := range intSlice { + printp(p) + } + println(`array expecting "10, 11, 12, 13"`) + for _, p := range intArray { + printp(p) + } + println(`captured value expecting "12"`) + printp(intRef) +} + +func printp(p *int) { + println(*p) +} +``` + +ref: https://github.com/kyoh86/exportloopref/blob/master/testdata/src/fixed/fixed.go + +## Sensing policy + +I want to make exportloopref as accurately as possible. +So some cases of lints will be false-negative. + +e.g. + +```go +var s Foo +for _, p := []int{10, 11, 12, 13} { + s.Bar(&p) // If s stores the pointer, it will be bug. +} +``` + +If you want to report all of lints (with some false-positives), +you should use [looppointer](https://github.com/kyoh86/looppointer). + +### Known false negatives + +Case 1: pass the pointer to function to export. + +Case 2: pass the pointer to local variable, and export it. + +```go +package main + +type List []*int + +func (l *List) AppendP(p *int) { + *l = append(*l, p) +} + +func main() { + var slice []*int + list := List{} + + println("loop expect exporting 10, 11, 12, 13") + for _, v := range []int{10, 11, 12, 13} { + list.AppendP(&v) // Case 1: wanted "exporting a pointer for the loop variable v", but cannot be found + + p := &v // p is the local variable + slice = append(slice, p) // Case 2: wanted "exporting a pointer for the loop variable v", but cannot be found + } + + println(`slice expecting "10, 11, 12, 13" but "13, 13, 13, 13"`) + for _, p := range slice { + printp(p) + } + println(`array expecting "10, 11, 12, 13" but "13, 13, 13, 13"`) + for _, p := range ([]*int)(list) { + printp(p) + } +} + +func printp(p *int) { + println(*p) +} +``` + +## Install + +go: + +```console +$ go get github.com/kyoh86/exportloopref/cmd/exportloopref +``` + +[homebrew](https://brew.sh/): + +```console +$ brew install kyoh86/tap/exportloopref +``` + +[gordon](https://github.com/kyoh86/gordon): + +```console +$ gordon install kyoh86/exportloopref +``` + +## Usage + +``` +exportloopref [-flag] [package] +``` + +### Flags + +| Flag | Description | +| --- | --- | +| -V | print version and exit | +| -all | no effect (deprecated) | +| -c int | display offending line with this many lines of context (default -1) | +| -cpuprofile string | write CPU profile to this file | +| -debug string | debug flags, any subset of "fpstv" | +| -fix | apply all suggested fixes | +| -flags | print analyzer flags in JSON | +| -json | emit JSON output | +| -memprofile string | write memory profile to this file | +| -source | no effect (deprecated) | +| -tags string | no effect (deprecated) | +| -trace string | write trace log to this file | +| -v | no effect (deprecated) | + +# LICENSE + +[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg)](http://www.opensource.org/licenses/MIT) + +This is distributed under the [MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/kyoh86/exportloopref/exportloopref.go b/vendor/github.com/kyoh86/exportloopref/exportloopref.go new file mode 100644 index 000000000..4d1671a06 --- /dev/null +++ b/vendor/github.com/kyoh86/exportloopref/exportloopref.go @@ -0,0 +1,305 @@ +package exportloopref + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "exportloopref", + Doc: "checks for pointers to enclosing loop variables", + Run: run, + RunDespiteErrors: true, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + // ResultType reflect.Type + // FactTypes []Fact +} + +func init() { + // Analyzer.Flags.StringVar(&v, "name", "default", "description") +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + search := &Searcher{ + Stats: map[token.Pos]struct{}{}, + Vars: map[token.Pos]map[token.Pos]struct{}{}, + Types: pass.TypesInfo.Types, + } + + nodeFilter := []ast.Node{ + (*ast.RangeStmt)(nil), + (*ast.ForStmt)(nil), + (*ast.DeclStmt)(nil), + (*ast.AssignStmt)(nil), + (*ast.UnaryExpr)(nil), + } + + inspect.WithStack(nodeFilter, func(n ast.Node, push bool, stack []ast.Node) bool { + id, insert, digg := search.Check(n, stack) + if id != nil { + dMsg := fmt.Sprintf("exporting a pointer for the loop variable %s", id.Name) + fMsg := fmt.Sprintf("loop variable %s should be pinned", id.Name) + var suggest []analysis.SuggestedFix + if insert != token.NoPos { + suggest = []analysis.SuggestedFix{{ + Message: fMsg, + TextEdits: []analysis.TextEdit{{ + Pos: insert, + End: insert, + NewText: []byte(fmt.Sprintf("%[1]s := %[1]s\n", id.Name)), + }}, + }} + } + d := analysis.Diagnostic{Pos: id.Pos(), + End: id.End(), + Message: dMsg, + Category: "exportloopref", + SuggestedFixes: suggest, + } + pass.Report(d) + } + return digg + }) + + return nil, nil +} + +type Searcher struct { + // Statement variables : map to collect positions that + // variables are declared like below. + // - for , := range ... + // - var int + // - D := ... + Stats map[token.Pos]struct{} + // Local variables maps loop-position, decl-location to ignore + // safe pointers for variable which declared in the loop. + Vars map[token.Pos]map[token.Pos]struct{} + Types map[ast.Expr]types.TypeAndValue +} + +func (s *Searcher) Check(n ast.Node, stack []ast.Node) (*ast.Ident, token.Pos, bool) { + switch typed := n.(type) { + case *ast.RangeStmt: + s.parseRangeStmt(typed) + case *ast.ForStmt: + s.parseForStmt(typed) + case *ast.DeclStmt: + s.parseDeclStmt(typed, stack) + case *ast.AssignStmt: + s.parseAssignStmt(typed, stack) + + case *ast.UnaryExpr: + return s.checkUnaryExpr(typed, stack) + } + return nil, token.NoPos, true +} + +func (s *Searcher) parseRangeStmt(n *ast.RangeStmt) { + s.addStat(n.Key) + s.addStat(n.Value) +} + +func (s *Searcher) parseForStmt(n *ast.ForStmt) { + switch post := n.Post.(type) { + case *ast.AssignStmt: + // e.g. for p = head; p != nil; p = p.next + for _, lhs := range post.Lhs { + s.addStat(lhs) + } + case *ast.IncDecStmt: + // e.g. for i := 0; i < n; i++ + s.addStat(post.X) + } +} + +func (s *Searcher) addStat(expr ast.Expr) { + if id, ok := expr.(*ast.Ident); ok { + s.Stats[id.Pos()] = struct{}{} + } +} + +func (s *Searcher) parseDeclStmt(n *ast.DeclStmt, stack []ast.Node) { + loop, _ := s.innermostLoop(stack) + if loop == nil { + return + } + + // Register declaring variables + if genDecl, ok := n.Decl.(*ast.GenDecl); ok && genDecl.Tok == token.VAR { + for _, spec := range genDecl.Specs { + for _, name := range spec.(*ast.ValueSpec).Names { + s.addVar(loop, name) + } + } + } +} + +func (s *Searcher) parseAssignStmt(n *ast.AssignStmt, stack []ast.Node) { + loop, _ := s.innermostLoop(stack) + if loop == nil { + return + } + + // Find statements declaring local variable + if n.Tok == token.DEFINE { + for _, h := range n.Lhs { + s.addVar(loop, h) + } + } +} + +func (s *Searcher) addVar(loop ast.Node, expr ast.Expr) { + loopPos := loop.Pos() + id, ok := expr.(*ast.Ident) + if !ok { + return + } + vars, ok := s.Vars[loopPos] + if !ok { + vars = map[token.Pos]struct{}{} + } + vars[id.Obj.Pos()] = struct{}{} + s.Vars[loopPos] = vars +} + +func insertionPosition(block *ast.BlockStmt) token.Pos { + if len(block.List) > 0 { + return block.List[0].Pos() + } + return token.NoPos +} + +func (s *Searcher) innermostLoop(stack []ast.Node) (ast.Node, token.Pos) { + for i := len(stack) - 1; i >= 0; i-- { + switch typed := stack[i].(type) { + case *ast.RangeStmt: + return typed, insertionPosition(typed.Body) + case *ast.ForStmt: + return typed, insertionPosition(typed.Body) + } + } + return nil, token.NoPos +} + +func (s *Searcher) checkUnaryExpr(n *ast.UnaryExpr, stack []ast.Node) (*ast.Ident, token.Pos, bool) { + loop, insert := s.innermostLoop(stack) + if loop == nil { + return nil, token.NoPos, true + } + + if n.Op != token.AND { + return nil, token.NoPos, true + } + + // Get identity of the referred item + id := s.getIdentity(n.X) + if id == nil { + return nil, token.NoPos, true + } + + // If the identity is not the loop statement variable, + // it will not be reported. + if _, isStat := s.Stats[id.Obj.Pos()]; !isStat { + return nil, token.NoPos, true + } + + // check stack append(), []X{}, map[Type]X{}, Struct{}, &Struct{}, X.(Type), (X) + // in the = + var mayRHPos token.Pos + for i := len(stack) - 2; i >= 0; i-- { + switch typed := stack[i].(type) { + case (*ast.UnaryExpr): + // noop + case (*ast.CompositeLit): + // noop + case (*ast.KeyValueExpr): + // noop + case (*ast.CallExpr): + fun, ok := typed.Fun.(*ast.Ident) + if !ok { + return nil, token.NoPos, false // it's calling a function other of `append`. It cannot be checked + } + + if fun.Name != "append" { + return nil, token.NoPos, false // it's calling a function other of `append`. It cannot be checked + } + + case (*ast.AssignStmt): + if len(typed.Rhs) != len(typed.Lhs) { + return nil, token.NoPos, false // dead logic + } + + // search x where Rhs[x].Pos() == mayRHPos + var index int + for ri, rh := range typed.Rhs { + if rh.Pos() == mayRHPos { + index = ri + break + } + } + + // check Lhs[x] is not local variable + lh := typed.Lhs[index] + isVar := s.isVar(loop, lh) + if !isVar { + return id, insert, false + } + + return nil, token.NoPos, true + default: + // Other statement is not able to be checked. + return nil, token.NoPos, false + } + + // memory an expr that may be right-hand in the AssignStmt + mayRHPos = stack[i].Pos() + } + return nil, token.NoPos, true +} + +func (s *Searcher) isVar(loop ast.Node, expr ast.Expr) bool { + vars := s.Vars[loop.Pos()] // map[token.Pos]struct{} + if vars == nil { + return false + } + switch typed := expr.(type) { + case (*ast.Ident): + _, isVar := vars[typed.Obj.Pos()] + return isVar + case (*ast.IndexExpr): // like X[Y], check X + return s.isVar(loop, typed.X) + case (*ast.SelectorExpr): // like X.Y, check X + return s.isVar(loop, typed.X) + } + return false +} + +// Get variable identity +func (s *Searcher) getIdentity(expr ast.Expr) *ast.Ident { + switch typed := expr.(type) { + case *ast.SelectorExpr: + // Ignore if the parent is pointer ref (fix for #2) + if _, ok := s.Types[typed.X].Type.(*types.Pointer); ok { + return nil + } + + // Get parent identity; i.e. `a.b` of the `a.b.c`. + return s.getIdentity(typed.X) + + case *ast.Ident: + // Get simple identity; i.e. `a` of the `a`. + if typed.Obj == nil { + return nil + } + return typed + } + return nil +} diff --git a/vendor/github.com/kyoh86/exportloopref/go.mod b/vendor/github.com/kyoh86/exportloopref/go.mod new file mode 100644 index 000000000..34a53987a --- /dev/null +++ b/vendor/github.com/kyoh86/exportloopref/go.mod @@ -0,0 +1,5 @@ +module github.com/kyoh86/exportloopref + +go 1.14 + +require golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa diff --git a/vendor/github.com/kyoh86/exportloopref/go.sum b/vendor/github.com/kyoh86/exportloopref/go.sum new file mode 100644 index 000000000..3b199f006 --- /dev/null +++ b/vendor/github.com/kyoh86/exportloopref/go.sum @@ -0,0 +1,20 @@ +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa h1:mMXQKlWCw9mIWgVLLfiycDZjMHMMYqiuakI4E/l2xcA= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/ldez/gomoddirectives/.gitignore b/vendor/github.com/ldez/gomoddirectives/.gitignore new file mode 100644 index 000000000..9da3e0da9 --- /dev/null +++ b/vendor/github.com/ldez/gomoddirectives/.gitignore @@ -0,0 +1,2 @@ +.idea/ +/gomoddirectives diff --git a/vendor/github.com/ldez/gomoddirectives/.golangci.yml b/vendor/github.com/ldez/gomoddirectives/.golangci.yml new file mode 100644 index 000000000..cc0a1fcae --- /dev/null +++ b/vendor/github.com/ldez/gomoddirectives/.golangci.yml @@ -0,0 +1,87 @@ +run: + deadline: 2m + skip-files: [] + skip-dirs: [] + +linters-settings: + govet: + enable-all: true + gocyclo: + min-complexity: 12 + goconst: + min-len: 3 + min-occurrences: 3 + misspell: + locale: US + gofumpt: + extra-rules: true + depguard: + list-type: blacklist + include-go-root: false + packages: + - github.com/pkg/errors + godox: + keywords: + - FIXME + gocritic: + enabled-tags: + - diagnostic + - style + - performance + disabled-checks: + - sloppyReassign + - rangeValCopy + - octalLiteral + - paramTypeCombine # already handle by gofumpt.extra-rules + settings: + hugeParam: + sizeThreshold: 100 + forbidigo: + forbid: + - '^print(ln)?$' + - '^fmt\.Print(f|ln)?$' + - '^panic$' + - '^spew\.Print(f|ln)?$' + - '^spew\.Dump$' + tagliatelle: + case: + rules: + json: pascal + +linters: + enable-all: true + disable: + - maligned # deprecated + - interfacer # deprecated + - golint # deprecated + - scopelint # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - prealloc + - bodyclose + - wsl + - nlreturn + - gomnd + - testpackage + - paralleltest + - tparallel + - goerr113 + - wrapcheck + - exhaustive + - exhaustivestruct + +issues: + exclude-use-default: false + max-per-linter: 0 + max-same-issues: 0 + exclude: [] + exclude-rules: + - path: "(.+)_test.go" + linters: + - funlen + - goconst + - path: cmd/gomoddirectives/gomoddirectives.go + text: 'use of `fmt.Println` forbidden' diff --git a/vendor/github.com/ldez/gomoddirectives/LICENSE b/vendor/github.com/ldez/gomoddirectives/LICENSE new file mode 100644 index 000000000..caed523b4 --- /dev/null +++ b/vendor/github.com/ldez/gomoddirectives/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2021 Fernandez Ludovic + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ldez/gomoddirectives/Makefile b/vendor/github.com/ldez/gomoddirectives/Makefile new file mode 100644 index 000000000..dd3b335c7 --- /dev/null +++ b/vendor/github.com/ldez/gomoddirectives/Makefile @@ -0,0 +1,15 @@ +.PHONY: clean check test build + +default: clean check test build + +clean: + rm -rf dist/ cover.out + +test: clean + go test -v -cover ./... + +check: + golangci-lint run + +build: + go build -v -ldflags "-s -w" -trimpath ./cmd/gomoddirectives/ diff --git a/vendor/github.com/ldez/gomoddirectives/go.mod b/vendor/github.com/ldez/gomoddirectives/go.mod new file mode 100644 index 000000000..fb65d2ddc --- /dev/null +++ b/vendor/github.com/ldez/gomoddirectives/go.mod @@ -0,0 +1,8 @@ +module github.com/ldez/gomoddirectives + +go 1.16 + +require ( + github.com/stretchr/testify v1.7.0 + golang.org/x/mod v0.4.2 +) diff --git a/vendor/github.com/ldez/gomoddirectives/go.sum b/vendor/github.com/ldez/gomoddirectives/go.sum new file mode 100644 index 000000000..4e4ac3ecc --- /dev/null +++ b/vendor/github.com/ldez/gomoddirectives/go.sum @@ -0,0 +1,25 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go b/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go new file mode 100644 index 000000000..2a4c90474 --- /dev/null +++ b/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go @@ -0,0 +1,125 @@ +// Package gomoddirectives a linter that handle `replace`, `retract`, `exclude` directives into `go.mod`. +package gomoddirectives + +import ( + "fmt" + "go/token" + "strings" + + "golang.org/x/mod/modfile" +) + +const ( + reasonRetract = "a comment is mandatory to explain why the version has been retracted" + reasonExclude = "exclude directive is not allowed" + reasonReplaceLocal = "local replacement are not allowed" + reasonReplace = "replacement are not allowed" + reasonReplaceIdentical = "the original module and the replacement are identical" + reasonReplaceDuplicate = "multiple replacement of the same module" +) + +// Result the analysis result. +type Result struct { + Reason string + Start token.Position + End token.Position +} + +// NewResult creates a new Result. +func NewResult(file *modfile.File, line *modfile.Line, reason string) Result { + return Result{ + Start: token.Position{Filename: file.Syntax.Name, Line: line.Start.Line, Column: line.Start.LineRune}, + End: token.Position{Filename: file.Syntax.Name, Line: line.End.Line, Column: line.End.LineRune}, + Reason: reason, + } +} + +func (r Result) String() string { + return fmt.Sprintf("%s: %s", r.Start, r.Reason) +} + +// Options the analyzer options. +type Options struct { + ReplaceAllowList []string + ReplaceAllowLocal bool + ExcludeForbidden bool + RetractAllowNoExplanation bool +} + +// Analyze analyzes a project. +func Analyze(opts Options) ([]Result, error) { + f, err := GetModuleFile() + if err != nil { + return nil, fmt.Errorf("failed to get module file: %w", err) + } + + return AnalyzeFile(f, opts), nil +} + +// AnalyzeFile analyzes a mod file. +func AnalyzeFile(file *modfile.File, opts Options) []Result { + var results []Result + + if !opts.RetractAllowNoExplanation { + for _, r := range file.Retract { + if r.Rationale != "" { + continue + } + + results = append(results, NewResult(file, r.Syntax, reasonRetract)) + } + } + + if opts.ExcludeForbidden { + for _, e := range file.Exclude { + results = append(results, NewResult(file, e.Syntax, reasonExclude)) + } + } + + uniqReplace := map[string]struct{}{} + + for _, r := range file.Replace { + reason := check(opts, r) + if reason != "" { + results = append(results, NewResult(file, r.Syntax, reason)) + continue + } + + if r.Old.Path == r.New.Path && r.Old.Version == r.New.Version { + results = append(results, NewResult(file, r.Syntax, reasonReplaceIdentical)) + continue + } + + if _, ok := uniqReplace[r.Old.Path+r.Old.Version]; ok { + results = append(results, NewResult(file, r.Syntax, reasonReplaceDuplicate)) + } + + uniqReplace[r.Old.Path+r.Old.Version] = struct{}{} + } + + return results +} + +func check(o Options, r *modfile.Replace) string { + if isLocal(r) { + if o.ReplaceAllowLocal { + return "" + } + + return fmt.Sprintf("%s: %s", reasonReplaceLocal, r.Old.Path) + } + + for _, v := range o.ReplaceAllowList { + if r.Old.Path == v { + return "" + } + } + + return fmt.Sprintf("%s: %s", reasonReplace, r.Old.Path) +} + +// Filesystem paths found in "replace" directives are represented by a path with an empty version. +// https://github.com/golang/mod/blob/bc388b264a244501debfb9caea700c6dcaff10e2/module/module.go#L122-L124 +func isLocal(r *modfile.Replace) bool { + return strings.TrimSpace(r.New.Version) == "" +} diff --git a/vendor/github.com/ldez/gomoddirectives/module.go b/vendor/github.com/ldez/gomoddirectives/module.go new file mode 100644 index 000000000..379ae07fa --- /dev/null +++ b/vendor/github.com/ldez/gomoddirectives/module.go @@ -0,0 +1,47 @@ +package gomoddirectives + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os/exec" + + "golang.org/x/mod/modfile" +) + +type modInfo struct { + Path string `json:"Path"` + Dir string `json:"Dir"` + GoMod string `json:"GoMod"` + GoVersion string `json:"GoVersion"` + Main bool `json:"Main"` +} + +// GetModuleFile gets module file. +func GetModuleFile() (*modfile.File, error) { + // https://github.com/golang/go/issues/44753#issuecomment-790089020 + cmd := exec.Command("go", "list", "-m", "-json") + + raw, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("command go list: %w: %s", err, string(raw)) + } + + var v modInfo + err = json.Unmarshal(raw, &v) + if err != nil { + return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(raw)) + } + + if v.GoMod == "" { + return nil, errors.New("working directory is not part of a module") + } + + raw, err = ioutil.ReadFile(v.GoMod) + if err != nil { + return nil, fmt.Errorf("reading go.mod file: %w", err) + } + + return modfile.Parse("go.mod", raw, nil) +} diff --git a/vendor/github.com/ldez/gomoddirectives/readme.md b/vendor/github.com/ldez/gomoddirectives/readme.md new file mode 100644 index 000000000..510c8502e --- /dev/null +++ b/vendor/github.com/ldez/gomoddirectives/readme.md @@ -0,0 +1,16 @@ +# gomoddirectives + +[![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) +[![Build Status](https://github.com/ldez/gomoddirectives/workflows/Main/badge.svg?branch=master)](https://github.com/ldez/gomoddirectives/actions) + +A linter that handle [`replace`](https://golang.org/ref/mod#go-mod-file-replace), [`retract`](https://golang.org/ref/mod#go-mod-file-retract), [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives into `go.mod`. + +Features: + +- ban all [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives +- allow only local [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives +- allow only some [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives +- force explanation for [`retract`](https://golang.org/ref/mod#go-mod-file-retract) directives +- ban all [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives +- detect duplicated [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives +- detect identical [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives diff --git a/vendor/github.com/ldez/tagliatelle/.gitignore b/vendor/github.com/ldez/tagliatelle/.gitignore new file mode 100644 index 000000000..74c84ce62 --- /dev/null +++ b/vendor/github.com/ldez/tagliatelle/.gitignore @@ -0,0 +1,3 @@ +.idea/ +/tagliatelle +notes.md diff --git a/vendor/github.com/ldez/tagliatelle/.golangci.yml b/vendor/github.com/ldez/tagliatelle/.golangci.yml new file mode 100644 index 000000000..b897103e9 --- /dev/null +++ b/vendor/github.com/ldez/tagliatelle/.golangci.yml @@ -0,0 +1,77 @@ +run: + timeout: 5m + skip-files: [ ] + skip-dirs: [ ] + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + gocyclo: + min-complexity: 15 + maligned: + suggest-new: true + goconst: + min-len: 5 + min-occurrences: 3 + misspell: + locale: US + funlen: + lines: -1 + statements: 40 + godox: + keywords: + - FIXME + gofumpt: + extra-rules: true + depguard: + list-type: blacklist + include-go-root: false + packages: + - github.com/sirupsen/logrus + - github.com/pkg/errors + gocritic: + enabled-tags: + - diagnostic + - style + - performance + disabled-checks: + - sloppyReassign + - rangeValCopy + - octalLiteral + - paramTypeCombine # already handle by gofumpt.extra-rules + settings: + hugeParam: + sizeThreshold: 100 + +linters: + enable-all: true + disable: + - maligned # deprecated + - interfacer # deprecated + - scopelint # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - wsl + - nlreturn + - gomnd + - goerr113 + - wrapcheck + - exhaustive + - exhaustivestruct + - testpackage + - tparallel + - paralleltest + - prealloc + - ifshort + - forcetypeassert + +issues: + exclude-use-default: false + max-per-linter: 0 + max-same-issues: 0 + exclude: [] diff --git a/vendor/github.com/ldez/tagliatelle/LICENSE b/vendor/github.com/ldez/tagliatelle/LICENSE new file mode 100644 index 000000000..caed523b4 --- /dev/null +++ b/vendor/github.com/ldez/tagliatelle/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2021 Fernandez Ludovic + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ldez/tagliatelle/Makefile b/vendor/github.com/ldez/tagliatelle/Makefile new file mode 100644 index 000000000..f66a39804 --- /dev/null +++ b/vendor/github.com/ldez/tagliatelle/Makefile @@ -0,0 +1,15 @@ +.PHONY: clean check test build + +default: clean check test build + +clean: + rm -rf dist/ cover.out + +test: clean + go test -v -cover ./... + +check: + golangci-lint run + +build: + go build -v -ldflags "-s -w" -trimpath ./cmd/tagliatelle/ diff --git a/vendor/github.com/ldez/tagliatelle/go.mod b/vendor/github.com/ldez/tagliatelle/go.mod new file mode 100644 index 000000000..159e907fb --- /dev/null +++ b/vendor/github.com/ldez/tagliatelle/go.mod @@ -0,0 +1,8 @@ +module github.com/ldez/tagliatelle + +go 1.16 + +require ( + github.com/ettle/strcase v0.1.1 + golang.org/x/tools v0.1.0 +) diff --git a/vendor/github.com/ldez/tagliatelle/go.sum b/vendor/github.com/ldez/tagliatelle/go.sum new file mode 100644 index 000000000..ae1c76f51 --- /dev/null +++ b/vendor/github.com/ldez/tagliatelle/go.sum @@ -0,0 +1,38 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/ldez/tagliatelle/readme.md b/vendor/github.com/ldez/tagliatelle/readme.md new file mode 100644 index 000000000..846767b2c --- /dev/null +++ b/vendor/github.com/ldez/tagliatelle/readme.md @@ -0,0 +1,31 @@ +# Tagliatelle + +[![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) +[![Build Status](https://github.com/ldez/tagliatelle/workflows/Main/badge.svg?branch=master)](https://github.com/ldez/tagliatelle/actions) + +A linter that handles struct tags. + +Supported string casing: + +- `camel` +- `pascal` +- `kebab` +- `smake` +- `goCamel` +- `goPascal` +- `goKebab` +- `goSmake` +- `upper` +- `lower` + +## Examples + +```go +// json and camel case +type Foo struct { + ID string `json:"ID"` // must be "id" + UserID string `json:"UserID"`// must be "userId" + Name string `json:"name"` + Value string `json:"val,omitempty"`// must be "value" +} +``` diff --git a/vendor/github.com/ldez/tagliatelle/tagliatelle.go b/vendor/github.com/ldez/tagliatelle/tagliatelle.go new file mode 100644 index 000000000..dfb302b12 --- /dev/null +++ b/vendor/github.com/ldez/tagliatelle/tagliatelle.go @@ -0,0 +1,192 @@ +// Package tagliatelle a linter that handle struct tags. +package tagliatelle + +import ( + "encoding/json" + "errors" + "fmt" + "go/ast" + "reflect" + "strings" + + "github.com/ettle/strcase" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +// Config the tagliatelle configuration. +type Config struct { + Rules map[string]string + UseFieldName bool +} + +// New creates an analyzer. +func New(config Config) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "tagliatelle", + Doc: "Checks the struct tags.", + Run: func(pass *analysis.Pass) (interface{}, error) { + if len(config.Rules) == 0 { + return nil, nil + } + + return run(pass, config) + }, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + } +} + +func run(pass *analysis.Pass, config Config) (interface{}, error) { + isp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errors.New("missing inspect analyser") + } + + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + } + + isp.Preorder(nodeFilter, func(n ast.Node) { + node, ok := n.(*ast.StructType) + if !ok { + return + } + + for _, field := range node.Fields.List { + analyze(pass, config, node, field) + } + }) + + return nil, nil +} + +func analyze(pass *analysis.Pass, config Config, n *ast.StructType, field *ast.Field) { + if n.Fields == nil || n.Fields.NumFields() < 1 { + // skip empty structs + return + } + + if field.Tag == nil { + // skip when no struct tag + return + } + + fieldName, err := getFieldName(field) + if err != nil { + pass.Reportf(n.Pos(), "unable to get field name: %v", err) + return + } + + for key, convName := range config.Rules { + if convName == "" { + continue + } + + value, ok := lookupTagValue(field.Tag, key) + if !ok { + // skip when no struct tag for the key + continue + } + + if value == "-" { + // skip when skipped :) + continue + } + + if value == "" { + // skip empty value, it can change in the future + continue + } + + converter, err := getConverter(convName) + if err != nil { + pass.Reportf(n.Pos(), "%s(%s): %v", key, convName, err) + continue + } + + expected := value + if config.UseFieldName { + expected = fieldName + } + + if value != converter(expected) { + pass.Reportf(field.Tag.Pos(), "%s(%s): got '%s' want '%s'", key, convName, value, converter(expected)) + } + } +} + +func getFieldName(field *ast.Field) (string, error) { + var name string + for _, n := range field.Names { + if n.Name != "" { + name = n.Name + } + } + + if name != "" { + return name, nil + } + + return getTypeName(field.Type) +} + +func getTypeName(exp ast.Expr) (string, error) { + switch typ := exp.(type) { + case *ast.Ident: + return typ.Name, nil + case *ast.StarExpr: + return getTypeName(typ.X) + case *ast.SelectorExpr: + return getTypeName(typ.Sel) + default: + bytes, _ := json.Marshal(exp) + return "", fmt.Errorf("unexpected eror: type %T: %s", typ, string(bytes)) + } +} + +func lookupTagValue(tag *ast.BasicLit, key string) (string, bool) { + raw := strings.Trim(tag.Value, "`") + + value, ok := reflect.StructTag(raw).Lookup(key) + if !ok { + return value, ok + } + + values := strings.Split(value, ",") + + if len(values) < 1 { + return "", true + } + + return values[0], true +} + +func getConverter(c string) (func(s string) string, error) { + switch c { + case "camel": + return strcase.ToCamel, nil + case "pascal": + return strcase.ToPascal, nil + case "kebab": + return strcase.ToKebab, nil + case "snake": + return strcase.ToSnake, nil + case "goCamel": + return strcase.ToGoCamel, nil + case "goPascal": + return strcase.ToGoPascal, nil + case "goKebab": + return strcase.ToGoKebab, nil + case "goSnake": + return strcase.ToGoSnake, nil + case "upper": + return strings.ToUpper, nil + case "lower": + return strings.ToLower, nil + default: + return nil, fmt.Errorf("unsupported case: %s", c) + } +} diff --git a/vendor/github.com/magiconair/properties/.gitignore b/vendor/github.com/magiconair/properties/.gitignore new file mode 100644 index 000000000..e7081ff52 --- /dev/null +++ b/vendor/github.com/magiconair/properties/.gitignore @@ -0,0 +1,6 @@ +*.sublime-project +*.sublime-workspace +*.un~ +*.swp +.idea/ +*.iml diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml new file mode 100644 index 000000000..baf9031df --- /dev/null +++ b/vendor/github.com/magiconair/properties/.travis.yml @@ -0,0 +1,17 @@ +language: go +go: + - 1.3.x + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "1.15.x" + - "1.16.x" + - tip diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md new file mode 100644 index 000000000..ff8d02535 --- /dev/null +++ b/vendor/github.com/magiconair/properties/CHANGELOG.md @@ -0,0 +1,160 @@ +## Changelog + +### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 + + * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write + + This patch ensures that backslashes are escaped on write. Existing applications which + rely on the old behavior may need to be updated. + + Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. + + * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() + + Thanks to [@aliras1](https://github.com/aliras1) for the patch. + + * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + + * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + +### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 + + * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request + + This patch ensures that in `LoadURL` the response body is always closed. + + Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. + +### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 + + * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading + + This adds the option to disable property expansion during loading. + + Thanks to [@kmala](https://github.com/kmala) for the patch. + +### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 + + * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. + + See PR for an example. + + Thanks to [@yobert](https://github.com/yobert) for the fix. + +### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 + + * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value + + Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail + with a `circular reference error`. + + Thanks to [@yobert](https://github.com/yobert) for the fix. + +### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 + + * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces + + * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled + + Thanks to [@mgurov](https://github.com/mgurov) for the fix. + +### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 + + * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically + * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map + +### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 + + * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency + * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) + +### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 + + * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` + * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs + * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy + * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function + +### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 + + * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. + * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. + * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) + +### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 + + * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. + +### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 + + * Vendored in gopkg.in/check.v1 + +### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 + + * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) + +### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 + + * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. + +### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 + + * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) + +### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 + + * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty + * Add clickable links to README + +### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 + + * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with + [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). + +### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 + + * Added support for single and multi-line comments (reading, writing and updating) + * The order of keys is now preserved + * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry + * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method + * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) + +### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 + + * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one + +### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 + + * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string + +### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 + + * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys + * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties + +### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 + +* Added support for time.Duration +* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) +* Changed default of MustXXX() failure from panic to log.Fatal + +### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 + +* Added MustGet... functions +* Added support for int and uint with range checks on 32 bit platforms + +### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 + +* Renamed from goproperties to properties +* Added support for expansion of environment vars in + filenames and value expressions +* Fixed bug where value expressions were not at the + start of the string + +### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 + +* Initial release diff --git a/vendor/github.com/magiconair/properties/LICENSE.md b/vendor/github.com/magiconair/properties/LICENSE.md new file mode 100644 index 000000000..79c87e3e6 --- /dev/null +++ b/vendor/github.com/magiconair/properties/LICENSE.md @@ -0,0 +1,24 @@ +Copyright (c) 2013-2020, Frank Schroeder + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md new file mode 100644 index 000000000..e2edda025 --- /dev/null +++ b/vendor/github.com/magiconair/properties/README.md @@ -0,0 +1,128 @@ +[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) +[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) +[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) +[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) + +# Overview + +#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. + +properties is a Go library for reading and writing properties files. + +It supports reading from multiple files or URLs and Spring style recursive +property expansion of expressions like `${key}` to their corresponding value. +Value expressions can refer to other keys like in `${key}` or to environment +variables like in `${USER}`. Filenames can also contain environment variables +like in `/home/${USER}/myapp.properties`. + +Properties can be decoded into structs, maps, arrays and values through +struct tags. + +Comments and the order of keys are preserved. Comments can be modified +and can be written to the output. + +The properties library supports both ISO-8859-1 and UTF-8 encoded data. + +Starting from version 1.3.0 the behavior of the MustXXX() functions is +configurable by providing a custom `ErrorHandler` function. The default has +changed from `panic` to `log.Fatal` but this is configurable and custom +error handling functions can be provided. See the package documentation for +details. + +Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) + +## Getting Started + +```go +import ( + "flag" + "github.com/magiconair/properties" +) + +func main() { + // init from a file + p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8) + + // or multiple files + p = properties.MustLoadFiles([]string{ + "${HOME}/config.properties", + "${HOME}/config-${USER}.properties", + }, properties.UTF8, true) + + // or from a map + p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"}) + + // or from a string + p = properties.MustLoadString("key=value\nabc=def") + + // or from a URL + p = properties.MustLoadURL("http://host/path") + + // or from multiple URLs + p = properties.MustLoadURL([]string{ + "http://host/config", + "http://host/config-${USER}", + }, true) + + // or from flags + p.MustFlag(flag.CommandLine) + + // get values through getters + host := p.MustGetString("host") + port := p.GetInt("port", 8080) + + // or through Decode + type Config struct { + Host string `properties:"host"` + Port int `properties:"port,default=9000"` + Accept []string `properties:"accept,default=image/png;image;gif"` + Timeout time.Duration `properties:"timeout,default=5s"` + } + var cfg Config + if err := p.Decode(&cfg); err != nil { + log.Fatal(err) + } +} + +``` + +## Installation and Upgrade + +``` +$ go get -u github.com/magiconair/properties +``` + +## License + +2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details. + +## ToDo + +* Dump contents with passwords and secrets obscured + +## Updated Git tags + +#### 13 Feb 2018 + +I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags +and I've only recently learned that this doesn't play well with `git describe` 😞 + +I have replaced all lightweight tags with signed tags using this script which should +retain the commit date, name and email address. Please run `git pull --tags` to update them. + +Worst case you have to reclone the repo. + +```shell +#!/bin/bash +tag=$1 +echo "Updating $tag" +date=$(git show ${tag}^0 --format=%aD | head -1) +email=$(git show ${tag}^0 --format=%aE | head -1) +name=$(git show ${tag}^0 --format=%aN | head -1) +GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} +``` + +I apologize for the inconvenience. + +Frank + diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go new file mode 100644 index 000000000..3ebf8049c --- /dev/null +++ b/vendor/github.com/magiconair/properties/decode.go @@ -0,0 +1,289 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Decode assigns property values to exported fields of a struct. +// +// Decode traverses v recursively and returns an error if a value cannot be +// converted to the field type or a required value is missing for a field. +// +// The following type dependent decodings are used: +// +// String, boolean, numeric fields have the value of the property key assigned. +// The property key name is the name of the field. A different key and a default +// value can be set in the field's tag. Fields without default value are +// required. If the value cannot be converted to the field type an error is +// returned. +// +// time.Duration fields have the result of time.ParseDuration() assigned. +// +// time.Time fields have the vaule of time.Parse() assigned. The default layout +// is time.RFC3339 but can be set in the field's tag. +// +// Arrays and slices of string, boolean, numeric, time.Duration and time.Time +// fields have the value interpreted as a comma separated list of values. The +// individual values are trimmed of whitespace and empty values are ignored. A +// default value can be provided as a semicolon separated list in the field's +// tag. +// +// Struct fields are decoded recursively using the field name plus "." as +// prefix. The prefix (without dot) can be overridden in the field's tag. +// Default values are not supported in the field's tag. Specify them on the +// fields of the inner struct instead. +// +// Map fields must have a key of type string and are decoded recursively by +// using the field's name plus ".' as prefix and the next element of the key +// name as map key. The prefix (without dot) can be overridden in the field's +// tag. Default values are not supported. +// +// Examples: +// +// // Field is ignored. +// Field int `properties:"-"` +// +// // Field is assigned value of 'Field'. +// Field int +// +// // Field is assigned value of 'myName'. +// Field int `properties:"myName"` +// +// // Field is assigned value of key 'myName' and has a default +// // value 15 if the key does not exist. +// Field int `properties:"myName,default=15"` +// +// // Field is assigned value of key 'Field' and has a default +// // value 15 if the key does not exist. +// Field int `properties:",default=15"` +// +// // Field is assigned value of key 'date' and the date +// // is in format 2006-01-02 +// Field time.Time `properties:"date,layout=2006-01-02"` +// +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas. +// Field []string +// +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas and has a default +// // value ["a", "b", "c"] if the key does not exist. +// Field []string `properties:",default=a;b;c"` +// +// // Field is decoded recursively with "Field." as key prefix. +// Field SomeStruct +// +// // Field is decoded recursively with "myName." as key prefix. +// Field SomeStruct `properties:"myName"` +// +// // Field is decoded recursively with "Field." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string +// +// // Field is decoded recursively with "myName." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string `properties:"myName"` +func (p *Properties) Decode(x interface{}) error { + t, v := reflect.TypeOf(x), reflect.ValueOf(x) + if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { + return fmt.Errorf("not a pointer to struct: %s", t) + } + if err := dec(p, "", nil, nil, v); err != nil { + return err + } + return nil +} + +func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { + t := v.Type() + + // value returns the property value for key or the default if provided. + value := func() (string, error) { + if val, ok := p.Get(key); ok { + return val, nil + } + if def != nil { + return *def, nil + } + return "", fmt.Errorf("missing required key %s", key) + } + + // conv converts a string to a value of the given type. + conv := func(s string, t reflect.Type) (val reflect.Value, err error) { + var v interface{} + + switch { + case isDuration(t): + v, err = time.ParseDuration(s) + + case isTime(t): + layout := opts["layout"] + if layout == "" { + layout = time.RFC3339 + } + v, err = time.Parse(layout, s) + + case isBool(t): + v, err = boolVal(s), nil + + case isString(t): + v, err = s, nil + + case isFloat(t): + v, err = strconv.ParseFloat(s, 64) + + case isInt(t): + v, err = strconv.ParseInt(s, 10, 64) + + case isUint(t): + v, err = strconv.ParseUint(s, 10, 64) + + default: + return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) + } + if err != nil { + return reflect.Zero(t), err + } + return reflect.ValueOf(v).Convert(t), nil + } + + // keydef returns the property key and the default value based on the + // name of the struct field and the options in the tag. + keydef := func(f reflect.StructField) (string, *string, map[string]string) { + _key, _opts := parseTag(f.Tag.Get("properties")) + + var _def *string + if d, ok := _opts["default"]; ok { + _def = &d + } + if _key != "" { + return _key, _def, _opts + } + return f.Name, _def, _opts + } + + switch { + case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): + s, err := value() + if err != nil { + return err + } + val, err := conv(s, t) + if err != nil { + return err + } + v.Set(val) + + case isPtr(t): + return dec(p, key, def, opts, v.Elem()) + + case isStruct(t): + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + fk, def, opts := keydef(t.Field(i)) + if !fv.CanSet() { + return fmt.Errorf("cannot set %s", t.Field(i).Name) + } + if fk == "-" { + continue + } + if key != "" { + fk = key + "." + fk + } + if err := dec(p, fk, def, opts, fv); err != nil { + return err + } + } + return nil + + case isArray(t): + val, err := value() + if err != nil { + return err + } + vals := split(val, ";") + a := reflect.MakeSlice(t, 0, len(vals)) + for _, s := range vals { + val, err := conv(s, t.Elem()) + if err != nil { + return err + } + a = reflect.Append(a, val) + } + v.Set(a) + + case isMap(t): + valT := t.Elem() + m := reflect.MakeMap(t) + for postfix := range p.FilterStripPrefix(key + ".").m { + pp := strings.SplitN(postfix, ".", 2) + mk, mv := pp[0], reflect.New(valT) + if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { + return err + } + m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) + } + v.Set(m) + + default: + return fmt.Errorf("unsupported type %s", t) + } + return nil +} + +// split splits a string on sep, trims whitespace of elements +// and omits empty elements +func split(s string, sep string) []string { + var a []string + for _, v := range strings.Split(s, sep) { + if v = strings.TrimSpace(v); v != "" { + a = append(a, v) + } + } + return a +} + +// parseTag parses a "key,k=v,k=v,..." +func parseTag(tag string) (key string, opts map[string]string) { + opts = map[string]string{} + for i, s := range strings.Split(tag, ",") { + if i == 0 { + key = s + continue + } + + pp := strings.SplitN(s, "=", 2) + if len(pp) == 1 { + opts[pp[0]] = "" + } else { + opts[pp[0]] = pp[1] + } + } + return key, opts +} + +func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } +func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } +func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } +func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } +func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } +func isString(t reflect.Type) bool { return t.Kind() == reflect.String } +func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } +func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } +func isFloat(t reflect.Type) bool { + return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 +} +func isInt(t reflect.Type) bool { + return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 +} +func isUint(t reflect.Type) bool { + return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 +} diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go new file mode 100644 index 000000000..f8822da2b --- /dev/null +++ b/vendor/github.com/magiconair/properties/doc.go @@ -0,0 +1,156 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package properties provides functions for reading and writing +// ISO-8859-1 and UTF-8 encoded .properties files and has +// support for recursive property expansion. +// +// Java properties files are ISO-8859-1 encoded and use Unicode +// literals for characters outside the ISO character set. Unicode +// literals can be used in UTF-8 encoded properties files but +// aren't necessary. +// +// To load a single properties file use MustLoadFile(): +// +// p := properties.MustLoadFile(filename, properties.UTF8) +// +// To load multiple properties files use MustLoadFiles() +// which loads the files in the given order and merges the +// result. Missing properties files can be ignored if the +// 'ignoreMissing' flag is set to true. +// +// Filenames can contain environment variables which are expanded +// before loading. +// +// f1 := "/etc/myapp/myapp.conf" +// f2 := "/home/${USER}/myapp.conf" +// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) +// +// All of the different key/value delimiters ' ', ':' and '=' are +// supported as well as the comment characters '!' and '#' and +// multi-line values. +// +// ! this is a comment +// # and so is this +// +// # the following expressions are equal +// key value +// key=value +// key:value +// key = value +// key : value +// key = val\ +// ue +// +// Properties stores all comments preceding a key and provides +// GetComments() and SetComments() methods to retrieve and +// update them. The convenience functions GetComment() and +// SetComment() allow access to the last comment. The +// WriteComment() method writes properties files including +// the comments and with the keys in the original order. +// This can be used for sanitizing properties files. +// +// Property expansion is recursive and circular references +// and malformed expressions are not allowed and cause an +// error. Expansion of environment variables is supported. +// +// # standard property +// key = value +// +// # property expansion: key2 = value +// key2 = ${key} +// +// # recursive expansion: key3 = value +// key3 = ${key2} +// +// # circular reference (error) +// key = ${key} +// +// # malformed expression (error) +// key = ${ke +// +// # refers to the users' home dir +// home = ${HOME} +// +// # local key takes precedence over env var: u = foo +// USER = foo +// u = ${USER} +// +// The default property expansion format is ${key} but can be +// changed by setting different pre- and postfix values on the +// Properties object. +// +// p := properties.NewProperties() +// p.Prefix = "#[" +// p.Postfix = "]#" +// +// Properties provides convenience functions for getting typed +// values with default values if the key does not exist or the +// type conversion failed. +// +// # Returns true if the value is either "1", "on", "yes" or "true" +// # Returns false for every other value and the default value if +// # the key does not exist. +// v = p.GetBool("key", false) +// +// # Returns the value if the key exists and the format conversion +// # was successful. Otherwise, the default value is returned. +// v = p.GetInt64("key", 999) +// v = p.GetUint64("key", 999) +// v = p.GetFloat64("key", 123.0) +// v = p.GetString("key", "def") +// v = p.GetDuration("key", 999) +// +// As an alternative properties may be applied with the standard +// library's flag implementation at any time. +// +// # Standard configuration +// v = flag.Int("key", 999, "help message") +// flag.Parse() +// +// # Merge p into the flag set +// p.MustFlag(flag.CommandLine) +// +// Properties provides several MustXXX() convenience functions +// which will terminate the app if an error occurs. The behavior +// of the failure is configurable and the default is to call +// log.Fatal(err). To have the MustXXX() functions panic instead +// of logging the error set a different ErrorHandler before +// you use the Properties package. +// +// properties.ErrorHandler = properties.PanicHandler +// +// # Will panic instead of logging an error +// p := properties.MustLoadFile("config.properties") +// +// You can also provide your own ErrorHandler function. The only requirement +// is that the error handler function must exit after handling the error. +// +// properties.ErrorHandler = func(err error) { +// fmt.Println(err) +// os.Exit(1) +// } +// +// # Will write to stdout and then exit +// p := properties.MustLoadFile("config.properties") +// +// Properties can also be loaded into a struct via the `Decode` +// method, e.g. +// +// type S struct { +// A string `properties:"a,default=foo"` +// D time.Duration `properties:"timeout,default=5s"` +// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` +// } +// +// See `Decode()` method for the full documentation. +// +// The following documents provide a description of the properties +// file format. +// +// http://en.wikipedia.org/wiki/.properties +// +// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 +// +package properties diff --git a/vendor/github.com/magiconair/properties/go.mod b/vendor/github.com/magiconair/properties/go.mod new file mode 100644 index 000000000..4ff090bdc --- /dev/null +++ b/vendor/github.com/magiconair/properties/go.mod @@ -0,0 +1,3 @@ +module github.com/magiconair/properties + +go 1.13 diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go new file mode 100644 index 000000000..74d38dc67 --- /dev/null +++ b/vendor/github.com/magiconair/properties/integrate.go @@ -0,0 +1,34 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import "flag" + +// MustFlag sets flags that are skipped by dst.Parse when p contains +// the respective key for flag.Flag.Name. +// +// It's use is recommended with command line arguments as in: +// flag.Parse() +// p.MustFlag(flag.CommandLine) +func (p *Properties) MustFlag(dst *flag.FlagSet) { + m := make(map[string]*flag.Flag) + dst.VisitAll(func(f *flag.Flag) { + m[f.Name] = f + }) + dst.Visit(func(f *flag.Flag) { + delete(m, f.Name) // overridden + }) + + for name, f := range m { + v, ok := p.Get(name) + if !ok { + continue + } + + if err := f.Value.Set(v); err != nil { + ErrorHandler(err) + } + } +} diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go new file mode 100644 index 000000000..367166d58 --- /dev/null +++ b/vendor/github.com/magiconair/properties/lex.go @@ -0,0 +1,407 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Parts of the lexer are from the template/text/parser package +// For these parts the following applies: +// +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file of the go 1.2 +// distribution. + +package properties + +import ( + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +// item represents a token or text string returned from the scanner. +type item struct { + typ itemType // The type of this item. + pos int // The starting position, in bytes, of this item in the input string. + val string // The value of this item. +} + +func (i item) String() string { + switch { + case i.typ == itemEOF: + return "EOF" + case i.typ == itemError: + return i.val + case len(i.val) > 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// itemType identifies the type of lex items. +type itemType int + +const ( + itemError itemType = iota // error occurred; value is text of error + itemEOF + itemKey // a key + itemValue // a value + itemComment // a comment +) + +// defines a constant for EOF +const eof = -1 + +// permitted whitespace characters space, FF and TAB +const whitespace = " \f\t" + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// lexer holds the state of the scanner. +type lexer struct { + input string // the string being scanned + state stateFn // the next lexing function to enter + pos int // current position in the input + start int // start position of this item + width int // width of last rune read from input + lastPos int // position of most recent item returned by nextItem + runes []rune // scanned runes for this item + items chan item // channel of scanned items +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = w + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t itemType) { + i := item{t, l.start, string(l.runes)} + l.items <- i + l.start = l.pos + l.runes = l.runes[:0] +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// appends the rune to the current value +func (l *lexer) appendRune(r rune) { + l.runes = append(l.runes, r) +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.ContainsRune(valid, l.next()) { + } + l.backup() +} + +// acceptRunUntil consumes a run of runes up to a terminator. +func (l *lexer) acceptRunUntil(term rune) { + for term != l.next() { + } + l.backup() +} + +// hasText returns true if the current parsed text is not empty. +func (l *lexer) isNotEmpty() bool { + return l.pos > l.start +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + i := <-l.items + l.lastPos = i.pos + return i +} + +// lex creates a new scanner for the input string. +func lex(input string) *lexer { + l := &lexer{ + input: input, + items: make(chan item), + runes: make([]rune, 0, 32), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexBeforeKey(l); l.state != nil; { + l.state = l.state(l) + } +} + +// state functions + +// lexBeforeKey scans until a key begins. +func lexBeforeKey(l *lexer) stateFn { + switch r := l.next(); { + case isEOF(r): + l.emit(itemEOF) + return nil + + case isEOL(r): + l.ignore() + return lexBeforeKey + + case isComment(r): + return lexComment + + case isWhitespace(r): + l.ignore() + return lexBeforeKey + + default: + l.backup() + return lexKey + } +} + +// lexComment scans a comment line. The comment character has already been scanned. +func lexComment(l *lexer) stateFn { + l.acceptRun(whitespace) + l.ignore() + for { + switch r := l.next(); { + case isEOF(r): + l.ignore() + l.emit(itemEOF) + return nil + case isEOL(r): + l.emit(itemComment) + return lexBeforeKey + default: + l.appendRune(r) + } + } +} + +// lexKey scans the key up to a delimiter +func lexKey(l *lexer) stateFn { + var r rune + +Loop: + for { + switch r = l.next(); { + + case isEscape(r): + err := l.scanEscapeSequence() + if err != nil { + return l.errorf(err.Error()) + } + + case isEndOfKey(r): + l.backup() + break Loop + + case isEOF(r): + break Loop + + default: + l.appendRune(r) + } + } + + if len(l.runes) > 0 { + l.emit(itemKey) + } + + if isEOF(r) { + l.emit(itemEOF) + return nil + } + + return lexBeforeValue +} + +// lexBeforeValue scans the delimiter between key and value. +// Leading and trailing whitespace is ignored. +// We expect to be just after the key. +func lexBeforeValue(l *lexer) stateFn { + l.acceptRun(whitespace) + l.accept(":=") + l.acceptRun(whitespace) + l.ignore() + return lexValue +} + +// lexValue scans text until the end of the line. We expect to be just after the delimiter. +func lexValue(l *lexer) stateFn { + for { + switch r := l.next(); { + case isEscape(r): + if isEOL(l.peek()) { + l.next() + l.acceptRun(whitespace) + } else { + err := l.scanEscapeSequence() + if err != nil { + return l.errorf(err.Error()) + } + } + + case isEOL(r): + l.emit(itemValue) + l.ignore() + return lexBeforeKey + + case isEOF(r): + l.emit(itemValue) + l.emit(itemEOF) + return nil + + default: + l.appendRune(r) + } + } +} + +// scanEscapeSequence scans either one of the escaped characters +// or a unicode literal. We expect to be after the escape character. +func (l *lexer) scanEscapeSequence() error { + switch r := l.next(); { + + case isEscapedCharacter(r): + l.appendRune(decodeEscapedCharacter(r)) + return nil + + case atUnicodeLiteral(r): + return l.scanUnicodeLiteral() + + case isEOF(r): + return fmt.Errorf("premature EOF") + + // silently drop the escape character and append the rune as is + default: + l.appendRune(r) + return nil + } +} + +// scans a unicode literal in the form \uXXXX. We expect to be after the \u. +func (l *lexer) scanUnicodeLiteral() error { + // scan the digits + d := make([]rune, 4) + for i := 0; i < 4; i++ { + d[i] = l.next() + if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { + return fmt.Errorf("invalid unicode literal") + } + } + + // decode the digits into a rune + r, err := strconv.ParseInt(string(d), 16, 0) + if err != nil { + return err + } + + l.appendRune(rune(r)) + return nil +} + +// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. +func decodeEscapedCharacter(r rune) rune { + switch r { + case 'f': + return '\f' + case 'n': + return '\n' + case 'r': + return '\r' + case 't': + return '\t' + default: + return r + } +} + +// atUnicodeLiteral reports whether we are at a unicode literal. +// The escape character has already been consumed. +func atUnicodeLiteral(r rune) bool { + return r == 'u' +} + +// isComment reports whether we are at the start of a comment. +func isComment(r rune) bool { + return r == '#' || r == '!' +} + +// isEndOfKey reports whether the rune terminates the current key. +func isEndOfKey(r rune) bool { + return strings.ContainsRune(" \f\t\r\n:=", r) +} + +// isEOF reports whether we are at EOF. +func isEOF(r rune) bool { + return r == eof +} + +// isEOL reports whether we are at a new line character. +func isEOL(r rune) bool { + return r == '\n' || r == '\r' +} + +// isEscape reports whether the rune is the escape character which +// prefixes unicode literals and other escaped characters. +func isEscape(r rune) bool { + return r == '\\' +} + +// isEscapedCharacter reports whether we are at one of the characters that need escaping. +// The escape character has already been consumed. +func isEscapedCharacter(r rune) bool { + return strings.ContainsRune(" :=fnrt", r) +} + +// isWhitespace reports whether the rune is a whitespace character. +func isWhitespace(r rune) bool { + return strings.ContainsRune(whitespace, r) +} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go new file mode 100644 index 000000000..c83c2dadd --- /dev/null +++ b/vendor/github.com/magiconair/properties/load.go @@ -0,0 +1,293 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" +) + +// Encoding specifies encoding of the input data. +type Encoding uint + +const ( + // utf8Default is a private placeholder for the zero value of Encoding to + // ensure that it has the correct meaning. UTF8 is the default encoding but + // was assigned a non-zero value which cannot be changed without breaking + // existing code. Clients should continue to use the public constants. + utf8Default Encoding = iota + + // UTF8 interprets the input data as UTF-8. + UTF8 + + // ISO_8859_1 interprets the input data as ISO-8859-1. + ISO_8859_1 +) + +type Loader struct { + // Encoding determines how the data from files and byte buffers + // is interpreted. For URLs the Content-Type header is used + // to determine the encoding of the data. + Encoding Encoding + + // DisableExpansion configures the property expansion of the + // returned property object. When set to true, the property values + // will not be expanded and the Property object will not be checked + // for invalid expansion expressions. + DisableExpansion bool + + // IgnoreMissing configures whether missing files or URLs which return + // 404 are reported as errors. When set to true, missing files and 404 + // status codes are not reported as errors. + IgnoreMissing bool +} + +// Load reads a buffer into a Properties struct. +func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { + return l.loadBytes(buf, l.Encoding) +} + +// LoadAll reads the content of multiple URLs or files in the given order into +// a Properties struct. If IgnoreMissing is true then a 404 status code or +// missing file will not be reported as error. Encoding sets the encoding for +// files. For the URLs see LoadURL for the Content-Type header and the +// encoding. +func (l *Loader) LoadAll(names []string) (*Properties, error) { + all := NewProperties() + for _, name := range names { + n, err := expandName(name) + if err != nil { + return nil, err + } + + var p *Properties + switch { + case strings.HasPrefix(n, "http://"): + p, err = l.LoadURL(n) + case strings.HasPrefix(n, "https://"): + p, err = l.LoadURL(n) + default: + p, err = l.LoadFile(n) + } + if err != nil { + return nil, err + } + all.Merge(p) + } + + all.DisableExpansion = l.DisableExpansion + if all.DisableExpansion { + return all, nil + } + return all, all.check() +} + +// LoadFile reads a file into a Properties struct. +// If IgnoreMissing is true then a missing file will not be +// reported as error. +func (l *Loader) LoadFile(filename string) (*Properties, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if l.IgnoreMissing && os.IsNotExist(err) { + LogPrintf("properties: %s not found. skipping", filename) + return NewProperties(), nil + } + return nil, err + } + return l.loadBytes(data, l.Encoding) +} + +// LoadURL reads the content of the URL into a Properties struct. +// +// The encoding is determined via the Content-Type header which +// should be set to 'text/plain'. If the 'charset' parameter is +// missing, 'iso-8859-1' or 'latin1' the encoding is set to +// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the +// encoding is set to UTF-8. A missing content type header is +// interpreted as 'text/plain; charset=utf-8'. +func (l *Loader) LoadURL(url string) (*Properties, error) { + resp, err := http.Get(url) + if err != nil { + return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) + } + defer resp.Body.Close() + + if resp.StatusCode == 404 && l.IgnoreMissing { + LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) + return NewProperties(), nil + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) + } + + ct := resp.Header.Get("Content-Type") + ct = strings.Join(strings.Fields(ct), "") + var enc Encoding + switch strings.ToLower(ct) { + case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1": + enc = ISO_8859_1 + case "", "text/plain;charset=utf-8": + enc = UTF8 + default: + return nil, fmt.Errorf("properties: invalid content type %s", ct) + } + + return l.loadBytes(body, enc) +} + +func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) { + p, err := parse(convert(buf, enc)) + if err != nil { + return nil, err + } + p.DisableExpansion = l.DisableExpansion + if p.DisableExpansion { + return p, nil + } + return p, p.check() +} + +// Load reads a buffer into a Properties struct. +func Load(buf []byte, enc Encoding) (*Properties, error) { + l := &Loader{Encoding: enc} + return l.LoadBytes(buf) +} + +// LoadString reads an UTF8 string into a properties struct. +func LoadString(s string) (*Properties, error) { + l := &Loader{Encoding: UTF8} + return l.LoadBytes([]byte(s)) +} + +// LoadMap creates a new Properties struct from a string map. +func LoadMap(m map[string]string) *Properties { + p := NewProperties() + for k, v := range m { + p.Set(k, v) + } + return p +} + +// LoadFile reads a file into a Properties struct. +func LoadFile(filename string, enc Encoding) (*Properties, error) { + l := &Loader{Encoding: enc} + return l.LoadAll([]string{filename}) +} + +// LoadFiles reads multiple files in the given order into +// a Properties struct. If 'ignoreMissing' is true then +// non-existent files will not be reported as error. +func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} + return l.LoadAll(filenames) +} + +// LoadURL reads the content of the URL into a Properties struct. +// See Loader#LoadURL for details. +func LoadURL(url string) (*Properties, error) { + l := &Loader{Encoding: UTF8} + return l.LoadAll([]string{url}) +} + +// LoadURLs reads the content of multiple URLs in the given order into a +// Properties struct. If IgnoreMissing is true then a 404 status code will +// not be reported as error. See Loader#LoadURL for the Content-Type header +// and the encoding. +func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { + l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing} + return l.LoadAll(urls) +} + +// LoadAll reads the content of multiple URLs or files in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will +// not be reported as error. Encoding sets the encoding for files. For the URLs please see +// LoadURL for the Content-Type header and the encoding. +func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} + return l.LoadAll(names) +} + +// MustLoadString reads an UTF8 string into a Properties struct and +// panics on error. +func MustLoadString(s string) *Properties { + return must(LoadString(s)) +} + +// MustLoadFile reads a file into a Properties struct and +// panics on error. +func MustLoadFile(filename string, enc Encoding) *Properties { + return must(LoadFile(filename, enc)) +} + +// MustLoadFiles reads multiple files in the given order into +// a Properties struct and panics on error. If 'ignoreMissing' +// is true then non-existent files will not be reported as error. +func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { + return must(LoadFiles(filenames, enc, ignoreMissing)) +} + +// MustLoadURL reads the content of a URL into a Properties struct and +// panics on error. +func MustLoadURL(url string) *Properties { + return must(LoadURL(url)) +} + +// MustLoadURLs reads the content of multiple URLs in the given order into a +// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 +// status code will not be reported as error. +func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { + return must(LoadURLs(urls, ignoreMissing)) +} + +// MustLoadAll reads the content of multiple URLs or files in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will +// not be reported as error. Encoding sets the encoding for files. For the URLs please see +// LoadURL for the Content-Type header and the encoding. It panics on error. +func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { + return must(LoadAll(names, enc, ignoreMissing)) +} + +func must(p *Properties, err error) *Properties { + if err != nil { + ErrorHandler(err) + } + return p +} + +// expandName expands ${ENV_VAR} expressions in a name. +// If the environment variable does not exist then it will be replaced +// with an empty string. Malformed expressions like "${ENV_VAR" will +// be reported as error. +func expandName(name string) (string, error) { + return expand(name, []string{}, "${", "}", make(map[string]string)) +} + +// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. +// For ISO-8859-1 we can convert each byte straight into a rune since the +// first 256 unicode code points cover ISO-8859-1. +func convert(buf []byte, enc Encoding) string { + switch enc { + case utf8Default, UTF8: + return string(buf) + case ISO_8859_1: + runes := make([]rune, len(buf)) + for i, b := range buf { + runes[i] = rune(b) + } + return string(runes) + default: + ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) + } + panic("ErrorHandler should exit") +} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go new file mode 100644 index 000000000..cdc4a8034 --- /dev/null +++ b/vendor/github.com/magiconair/properties/parser.go @@ -0,0 +1,95 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "runtime" +) + +type parser struct { + lex *lexer +} + +func parse(input string) (properties *Properties, err error) { + p := &parser{lex: lex(input)} + defer p.recover(&err) + + properties = NewProperties() + key := "" + comments := []string{} + + for { + token := p.expectOneOf(itemComment, itemKey, itemEOF) + switch token.typ { + case itemEOF: + goto done + case itemComment: + comments = append(comments, token.val) + continue + case itemKey: + key = token.val + if _, ok := properties.m[key]; !ok { + properties.k = append(properties.k, key) + } + } + + token = p.expectOneOf(itemValue, itemEOF) + if len(comments) > 0 { + properties.c[key] = comments + comments = []string{} + } + switch token.typ { + case itemEOF: + properties.m[key] = "" + goto done + case itemValue: + properties.m[key] = token.val + } + } + +done: + return properties, nil +} + +func (p *parser) errorf(format string, args ...interface{}) { + format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) + panic(fmt.Errorf(format, args...)) +} + +func (p *parser) expect(expected itemType) (token item) { + token = p.lex.nextItem() + if token.typ != expected { + p.unexpected(token) + } + return token +} + +func (p *parser) expectOneOf(expected ...itemType) (token item) { + token = p.lex.nextItem() + for _, v := range expected { + if token.typ == v { + return token + } + } + p.unexpected(token) + panic("unexpected token") +} + +func (p *parser) unexpected(token item) { + p.errorf(token.String()) +} + +// recover is the handler that turns panics into returns from the top level of Parse. +func (p *parser) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + *errp = e.(error) + } + return +} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go new file mode 100644 index 000000000..1529e7223 --- /dev/null +++ b/vendor/github.com/magiconair/properties/properties.go @@ -0,0 +1,854 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. +// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +const maxExpansionDepth = 64 + +// ErrorHandlerFunc defines the type of function which handles failures +// of the MustXXX() functions. An error handler function must exit +// the application after handling the error. +type ErrorHandlerFunc func(error) + +// ErrorHandler is the function which handles failures of the MustXXX() +// functions. The default is LogFatalHandler. +var ErrorHandler ErrorHandlerFunc = LogFatalHandler + +// LogHandlerFunc defines the function prototype for logging errors. +type LogHandlerFunc func(fmt string, args ...interface{}) + +// LogPrintf defines a log handler which uses log.Printf. +var LogPrintf LogHandlerFunc = log.Printf + +// LogFatalHandler handles the error by logging a fatal error and exiting. +func LogFatalHandler(err error) { + log.Fatal(err) +} + +// PanicHandler handles the error by panicking. +func PanicHandler(err error) { + panic(err) +} + +// ----------------------------------------------------------------------------- + +// A Properties contains the key/value pairs from the properties input. +// All values are stored in unexpanded form and are expanded at runtime +type Properties struct { + // Pre-/Postfix for property expansion. + Prefix string + Postfix string + + // DisableExpansion controls the expansion of properties on Get() + // and the check for circular references on Set(). When set to + // true Properties behaves like a simple key/value store and does + // not check for circular references on Get() or on Set(). + DisableExpansion bool + + // Stores the key/value pairs + m map[string]string + + // Stores the comments per key. + c map[string][]string + + // Stores the keys in order of appearance. + k []string + + // WriteSeparator specifies the separator of key and value while writing the properties. + WriteSeparator string +} + +// NewProperties creates a new Properties struct with the default +// configuration for "${key}" expressions. +func NewProperties() *Properties { + return &Properties{ + Prefix: "${", + Postfix: "}", + m: map[string]string{}, + c: map[string][]string{}, + k: []string{}, + } +} + +// Load reads a buffer into the given Properties struct. +func (p *Properties) Load(buf []byte, enc Encoding) error { + l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion} + newProperties, err := l.LoadBytes(buf) + if err != nil { + return err + } + p.Merge(newProperties) + return nil +} + +// Get returns the expanded value for the given key if exists. +// Otherwise, ok is false. +func (p *Properties) Get(key string) (value string, ok bool) { + v, ok := p.m[key] + if p.DisableExpansion { + return v, ok + } + if !ok { + return "", false + } + + expanded, err := p.expand(key, v) + + // we guarantee that the expanded value is free of + // circular references and malformed expressions + // so we panic if we still get an error here. + if err != nil { + ErrorHandler(err) + } + + return expanded, true +} + +// MustGet returns the expanded value for the given key if exists. +// Otherwise, it panics. +func (p *Properties) MustGet(key string) string { + if v, ok := p.Get(key); ok { + return v + } + ErrorHandler(invalidKeyError(key)) + panic("ErrorHandler should exit") +} + +// ---------------------------------------------------------------------------- + +// ClearComments removes the comments for all keys. +func (p *Properties) ClearComments() { + p.c = map[string][]string{} +} + +// ---------------------------------------------------------------------------- + +// GetComment returns the last comment before the given key or an empty string. +func (p *Properties) GetComment(key string) string { + comments, ok := p.c[key] + if !ok || len(comments) == 0 { + return "" + } + return comments[len(comments)-1] +} + +// ---------------------------------------------------------------------------- + +// GetComments returns all comments that appeared before the given key or nil. +func (p *Properties) GetComments(key string) []string { + if comments, ok := p.c[key]; ok { + return comments + } + return nil +} + +// ---------------------------------------------------------------------------- + +// SetComment sets the comment for the key. +func (p *Properties) SetComment(key, comment string) { + p.c[key] = []string{comment} +} + +// ---------------------------------------------------------------------------- + +// SetComments sets the comments for the key. If the comments are nil then +// all comments for this key are deleted. +func (p *Properties) SetComments(key string, comments []string) { + if comments == nil { + delete(p.c, key) + return + } + p.c[key] = comments +} + +// ---------------------------------------------------------------------------- + +// GetBool checks if the expanded value is one of '1', 'yes', +// 'true' or 'on' if the key exists. The comparison is case-insensitive. +// If the key does not exist the default value is returned. +func (p *Properties) GetBool(key string, def bool) bool { + v, err := p.getBool(key) + if err != nil { + return def + } + return v +} + +// MustGetBool checks if the expanded value is one of '1', 'yes', +// 'true' or 'on' if the key exists. The comparison is case-insensitive. +// If the key does not exist the function panics. +func (p *Properties) MustGetBool(key string) bool { + v, err := p.getBool(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getBool(key string) (value bool, err error) { + if v, ok := p.Get(key); ok { + return boolVal(v), nil + } + return false, invalidKeyError(key) +} + +func boolVal(v string) bool { + v = strings.ToLower(v) + return v == "1" || v == "true" || v == "yes" || v == "on" +} + +// ---------------------------------------------------------------------------- + +// GetDuration parses the expanded value as an time.Duration (in ns) if the +// key exists. If key does not exist or the value cannot be parsed the default +// value is returned. In almost all cases you want to use GetParsedDuration(). +func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { + v, err := p.getInt64(key) + if err != nil { + return def + } + return time.Duration(v) +} + +// MustGetDuration parses the expanded value as an time.Duration (in ns) if +// the key exists. If key does not exist or the value cannot be parsed the +// function panics. In almost all cases you want to use MustGetParsedDuration(). +func (p *Properties) MustGetDuration(key string) time.Duration { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return time.Duration(v) +} + +// ---------------------------------------------------------------------------- + +// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { + s, ok := p.Get(key) + if !ok { + return def + } + v, err := time.ParseDuration(s) + if err != nil { + return def + } + return v +} + +// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetParsedDuration(key string) time.Duration { + s, ok := p.Get(key) + if !ok { + ErrorHandler(invalidKeyError(key)) + } + v, err := time.ParseDuration(s) + if err != nil { + ErrorHandler(err) + } + return v +} + +// ---------------------------------------------------------------------------- + +// GetFloat64 parses the expanded value as a float64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetFloat64(key string, def float64) float64 { + v, err := p.getFloat64(key) + if err != nil { + return def + } + return v +} + +// MustGetFloat64 parses the expanded value as a float64 if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetFloat64(key string) float64 { + v, err := p.getFloat64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getFloat64(key string) (value float64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseFloat(v, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetInt parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. If the value does not fit into an int the +// function panics with an out of range error. +func (p *Properties) GetInt(key string, def int) int { + v, err := p.getInt64(key) + if err != nil { + return def + } + return intRangeCheck(key, v) +} + +// MustGetInt parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +// If the value does not fit into an int the function panics with +// an out of range error. +func (p *Properties) MustGetInt(key string) int { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return intRangeCheck(key, v) +} + +// ---------------------------------------------------------------------------- + +// GetInt64 parses the expanded value as an int64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetInt64(key string, def int64) int64 { + v, err := p.getInt64(key) + if err != nil { + return def + } + return v +} + +// MustGetInt64 parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetInt64(key string) int64 { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getInt64(key string) (value int64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetUint parses the expanded value as an uint if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. If the value does not fit into an int the +// function panics with an out of range error. +func (p *Properties) GetUint(key string, def uint) uint { + v, err := p.getUint64(key) + if err != nil { + return def + } + return uintRangeCheck(key, v) +} + +// MustGetUint parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +// If the value does not fit into an int the function panics with +// an out of range error. +func (p *Properties) MustGetUint(key string) uint { + v, err := p.getUint64(key) + if err != nil { + ErrorHandler(err) + } + return uintRangeCheck(key, v) +} + +// ---------------------------------------------------------------------------- + +// GetUint64 parses the expanded value as an uint64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetUint64(key string, def uint64) uint64 { + v, err := p.getUint64(key) + if err != nil { + return def + } + return v +} + +// MustGetUint64 parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetUint64(key string) uint64 { + v, err := p.getUint64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getUint64(key string) (value uint64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetString returns the expanded value for the given key if exists or +// the default value otherwise. +func (p *Properties) GetString(key, def string) string { + if v, ok := p.Get(key); ok { + return v + } + return def +} + +// MustGetString returns the expanded value for the given key if exists or +// panics otherwise. +func (p *Properties) MustGetString(key string) string { + if v, ok := p.Get(key); ok { + return v + } + ErrorHandler(invalidKeyError(key)) + panic("ErrorHandler should exit") +} + +// ---------------------------------------------------------------------------- + +// Filter returns a new properties object which contains all properties +// for which the key matches the pattern. +func (p *Properties) Filter(pattern string) (*Properties, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + + return p.FilterRegexp(re), nil +} + +// FilterRegexp returns a new properties object which contains all properties +// for which the key matches the regular expression. +func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { + pp := NewProperties() + for _, k := range p.k { + if re.MatchString(k) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) + pp.Set(k, p.m[k]) + } + } + return pp +} + +// FilterPrefix returns a new properties object with a subset of all keys +// with the given prefix. +func (p *Properties) FilterPrefix(prefix string) *Properties { + pp := NewProperties() + for _, k := range p.k { + if strings.HasPrefix(k, prefix) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) + pp.Set(k, p.m[k]) + } + } + return pp +} + +// FilterStripPrefix returns a new properties object with a subset of all keys +// with the given prefix and the prefix removed from the keys. +func (p *Properties) FilterStripPrefix(prefix string) *Properties { + pp := NewProperties() + n := len(prefix) + for _, k := range p.k { + if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference + // TODO(fs): this function should probably return an error but the signature is fixed + pp.Set(k[n:], p.m[k]) + } + } + return pp +} + +// Len returns the number of keys. +func (p *Properties) Len() int { + return len(p.m) +} + +// Keys returns all keys in the same order as in the input. +func (p *Properties) Keys() []string { + keys := make([]string, len(p.k)) + copy(keys, p.k) + return keys +} + +// Set sets the property key to the corresponding value. +// If a value for key existed before then ok is true and prev +// contains the previous value. If the value contains a +// circular reference or a malformed expression then +// an error is returned. +// An empty key is silently ignored. +func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { + if key == "" { + return "", false, nil + } + + // if expansion is disabled we allow circular references + if p.DisableExpansion { + prev, ok = p.Get(key) + p.m[key] = value + if !ok { + p.k = append(p.k, key) + } + return prev, ok, nil + } + + // to check for a circular reference we temporarily need + // to set the new value. If there is an error then revert + // to the previous state. Only if all tests are successful + // then we add the key to the p.k list. + prev, ok = p.Get(key) + p.m[key] = value + + // now check for a circular reference + _, err = p.expand(key, value) + if err != nil { + + // revert to the previous state + if ok { + p.m[key] = prev + } else { + delete(p.m, key) + } + + return "", false, err + } + + if !ok { + p.k = append(p.k, key) + } + + return prev, ok, nil +} + +// SetValue sets property key to the default string value +// as defined by fmt.Sprintf("%v"). +func (p *Properties) SetValue(key string, value interface{}) error { + _, _, err := p.Set(key, fmt.Sprintf("%v", value)) + return err +} + +// MustSet sets the property key to the corresponding value. +// If a value for key existed before then ok is true and prev +// contains the previous value. An empty key is silently ignored. +func (p *Properties) MustSet(key, value string) (prev string, ok bool) { + prev, ok, err := p.Set(key, value) + if err != nil { + ErrorHandler(err) + } + return prev, ok +} + +// String returns a string of all expanded 'key = value' pairs. +func (p *Properties) String() string { + var s string + for _, key := range p.k { + value, _ := p.Get(key) + s = fmt.Sprintf("%s%s = %s\n", s, key, value) + } + return s +} + +// Sort sorts the properties keys in alphabetical order. +// This is helpfully before writing the properties. +func (p *Properties) Sort() { + sort.Strings(p.k) +} + +// Write writes all unexpanded 'key = value' pairs to the given writer. +// Write returns the number of bytes written and any write error encountered. +func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { + return p.WriteComment(w, "", enc) +} + +// WriteComment writes all unexpanced 'key = value' pairs to the given writer. +// If prefix is not empty then comments are written with a blank line and the +// given prefix. The prefix should be either "# " or "! " to be compatible with +// the properties file format. Otherwise, the properties parser will not be +// able to read the file back in. It returns the number of bytes written and +// any write error encountered. +func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { + var x int + + for _, key := range p.k { + value := p.m[key] + + if prefix != "" { + if comments, ok := p.c[key]; ok { + // don't print comments if they are all empty + allEmpty := true + for _, c := range comments { + if c != "" { + allEmpty = false + break + } + } + + if !allEmpty { + // add a blank line between entries but not at the top + if len(comments) > 0 && n > 0 { + x, err = fmt.Fprintln(w) + if err != nil { + return + } + n += x + } + + for _, c := range comments { + x, err = fmt.Fprintf(w, "%s%s\n", prefix, c) + if err != nil { + return + } + n += x + } + } + } + } + sep := " = " + if p.WriteSeparator != "" { + sep = p.WriteSeparator + } + x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc)) + if err != nil { + return + } + n += x + } + return +} + +// Map returns a copy of the properties as a map. +func (p *Properties) Map() map[string]string { + m := make(map[string]string) + for k, v := range p.m { + m[k] = v + } + return m +} + +// FilterFunc returns a copy of the properties which includes the values which passed all filters. +func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { + pp := NewProperties() +outer: + for k, v := range p.m { + for _, f := range filters { + if !f(k, v) { + continue outer + } + pp.Set(k, v) + } + } + return pp +} + +// ---------------------------------------------------------------------------- + +// Delete removes the key and its comments. +func (p *Properties) Delete(key string) { + delete(p.m, key) + delete(p.c, key) + newKeys := []string{} + for _, k := range p.k { + if k != key { + newKeys = append(newKeys, k) + } + } + p.k = newKeys +} + +// Merge merges properties, comments and keys from other *Properties into p +func (p *Properties) Merge(other *Properties) { + for k, v := range other.m { + p.m[k] = v + } + for k, v := range other.c { + p.c[k] = v + } + +outer: + for _, otherKey := range other.k { + for _, key := range p.k { + if otherKey == key { + continue outer + } + } + p.k = append(p.k, otherKey) + } +} + +// ---------------------------------------------------------------------------- + +// check expands all values and returns an error if a circular reference or +// a malformed expression was found. +func (p *Properties) check() error { + for key, value := range p.m { + if _, err := p.expand(key, value); err != nil { + return err + } + } + return nil +} + +func (p *Properties) expand(key, input string) (string, error) { + // no pre/postfix -> nothing to expand + if p.Prefix == "" && p.Postfix == "" { + return input, nil + } + + return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) +} + +// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. +// The function keeps track of the keys that were already expanded and stops if it +// detects a circular reference or a malformed expression of the form '(prefix)key'. +func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { + if len(keys) > maxExpansionDepth { + return "", fmt.Errorf("expansion too deep") + } + + for { + start := strings.Index(s, prefix) + if start == -1 { + return s, nil + } + + keyStart := start + len(prefix) + keyLen := strings.Index(s[keyStart:], postfix) + if keyLen == -1 { + return "", fmt.Errorf("malformed expression") + } + + end := keyStart + keyLen + len(postfix) - 1 + key := s[keyStart : keyStart+keyLen] + + // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) + + for _, k := range keys { + if key == k { + var b bytes.Buffer + b.WriteString("circular reference in:\n") + for _, k1 := range keys { + fmt.Fprintf(&b, "%s=%s\n", k1, values[k1]) + } + return "", fmt.Errorf(b.String()) + } + } + + val, ok := values[key] + if !ok { + val = os.Getenv(key) + } + new_val, err := expand(val, append(keys, key), prefix, postfix, values) + if err != nil { + return "", err + } + s = s[:start] + new_val + s[end+1:] + } + return s, nil +} + +// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. +func encode(s string, special string, enc Encoding) string { + switch enc { + case UTF8: + return encodeUtf8(s, special) + case ISO_8859_1: + return encodeIso(s, special) + default: + panic(fmt.Sprintf("unsupported encoding %v", enc)) + } +} + +func encodeUtf8(s string, special string) string { + v := "" + for pos := 0; pos < len(s); { + r, w := utf8.DecodeRuneInString(s[pos:]) + pos += w + v += escape(r, special) + } + return v +} + +func encodeIso(s string, special string) string { + var r rune + var w int + var v string + for pos := 0; pos < len(s); { + switch r, w = utf8.DecodeRuneInString(s[pos:]); { + case r < 1<<8: // single byte rune -> escape special chars only + v += escape(r, special) + case r < 1<<16: // two byte rune -> unicode literal + v += fmt.Sprintf("\\u%04x", r) + default: // more than two bytes per rune -> can't encode + v += "?" + } + pos += w + } + return v +} + +func escape(r rune, special string) string { + switch r { + case '\f': + return "\\f" + case '\n': + return "\\n" + case '\r': + return "\\r" + case '\t': + return "\\t" + case '\\': + return "\\\\" + default: + if strings.ContainsRune(special, r) { + return "\\" + string(r) + } + return string(r) + } +} + +func invalidKeyError(key string) error { + return fmt.Errorf("unknown property: %s", key) +} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go new file mode 100644 index 000000000..b013a2e5e --- /dev/null +++ b/vendor/github.com/magiconair/properties/rangecheck.go @@ -0,0 +1,31 @@ +// Copyright 2018 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "math" +) + +// make this a var to overwrite it in a test +var is32Bit = ^uint(0) == math.MaxUint32 + +// intRangeCheck checks if the value fits into the int type and +// panics if it does not. +func intRangeCheck(key string, v int64) int { + if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { + panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) + } + return int(v) +} + +// uintRangeCheck checks if the value fits into the uint type and +// panics if it does not. +func uintRangeCheck(key string, v uint64) uint { + if is32Bit && v > math.MaxUint32 { + panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) + } + return uint(v) +} diff --git a/vendor/github.com/maratori/testpackage/LICENSE b/vendor/github.com/maratori/testpackage/LICENSE new file mode 100644 index 000000000..644d0b1c8 --- /dev/null +++ b/vendor/github.com/maratori/testpackage/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Marat Reymers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/maratori/testpackage/pkg/testpackage/testpackage.go b/vendor/github.com/maratori/testpackage/pkg/testpackage/testpackage.go new file mode 100644 index 000000000..cad24e1a5 --- /dev/null +++ b/vendor/github.com/maratori/testpackage/pkg/testpackage/testpackage.go @@ -0,0 +1,53 @@ +package testpackage + +import ( + "flag" + "regexp" + "strings" + + "golang.org/x/tools/go/analysis" +) + +const ( + SkipRegexpFlagName = "skip-regexp" + SkipRegexpFlagUsage = `regexp pattern to skip file by name. To not skip files use -skip-regexp="^$"` + SkipRegexpFlagDefault = `(export|internal)_test\.go` +) + +// NewAnalyzer returns Analyzer that makes you use a separate _test package +func NewAnalyzer() *analysis.Analyzer { + var ( + skipFileRegexp = SkipRegexpFlagDefault + fs flag.FlagSet + ) + + fs.StringVar(&skipFileRegexp, SkipRegexpFlagName, skipFileRegexp, SkipRegexpFlagUsage) + + return &analysis.Analyzer{ + Name: "testpackage", + Doc: "linter that makes you use a separate _test package", + Flags: fs, + Run: func(pass *analysis.Pass) (interface{}, error) { + skipFile, err := regexp.Compile(skipFileRegexp) + if err != nil { + return nil, err + } + + for _, f := range pass.Files { + fileName := pass.Fset.Position(f.Pos()).Filename + if skipFile.MatchString(fileName) { + continue + } + + if strings.HasSuffix(fileName, "_test.go") { + packageName := f.Name.Name + if !strings.HasSuffix(packageName, "_test") { + pass.Reportf(f.Name.Pos(), "package should be `%s_test` instead of `%s`", packageName, packageName) + } + } + } + + return nil, nil + }, + } +} diff --git a/vendor/github.com/matoous/godox/.gitignore b/vendor/github.com/matoous/godox/.gitignore new file mode 100644 index 000000000..30d94b102 --- /dev/null +++ b/vendor/github.com/matoous/godox/.gitignore @@ -0,0 +1,19 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +.idea + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +.vscode/ +debug +debug.test diff --git a/vendor/github.com/matoous/godox/.golangci.yml b/vendor/github.com/matoous/godox/.golangci.yml new file mode 100644 index 000000000..3f0fcdb19 --- /dev/null +++ b/vendor/github.com/matoous/godox/.golangci.yml @@ -0,0 +1,71 @@ +linters-settings: + depguard: + list-type: blacklist + include-go-root: true + packages: + # we are using "github.com/json-iterator/go" instead of json encoder from stdlib + - "encoding/json" + dupl: + threshold: 100 + gocritic: + # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks. + # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags". + enabled-tags: + - performance + - diagnostic + - style + disabled-checks: + - emptyStringTest + - unnamedResult # it is experimental currently and doesn't handle typed channels correctly + gocyclo: + min-complexity: 14 # TODO go lower + golint: + min-confidence: 0 + govet: + check-shadowing: true + goconst: + min-len: 2 + min-occurrences: 3 + goimports: + local-prefixes: gitlab.skypicker.com/search-team/gonuts/conveyance-store + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + +linters: + enable-all: true + disable: + # prealloc is not recommended by `golangci-lint` developers. + - prealloc + - gochecknoglobals + +issues: + exclude-rules: + - path: _test\.go + linters: + - goconst + - dupl + + - path: fixtures + linters: + - gocritic + - varcheck + - deadcode + - unused + +run: + modules-download-mode: readonly + +# output configuration options +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: tab + + # print lines of code with issue, default is true + print-issued-lines: true + + # print linter name in the end of issue text, default is true + print-linter-name: true diff --git a/vendor/github.com/matoous/godox/.revive.toml b/vendor/github.com/matoous/godox/.revive.toml new file mode 100644 index 000000000..db0e4edb6 --- /dev/null +++ b/vendor/github.com/matoous/godox/.revive.toml @@ -0,0 +1,135 @@ +ignoreGeneratedHeader = false +severity = "warning" + +# confidence <= 0.2 generate a lot of errors from package-comments rule. It marks files that do not contain +# package-level comments as a warning irrespective of existing package-level coment in one file. +confidence = 0.25 +errorCode = 1 +warningCode = 1 + +# Rules block. +# ⚠ Make sure to sort rules alpabetically for readability! ⚠ + +# argument-limit rule is setting up a maximum number of parameters that can be passed to the functions/methods. +[rule.argument-limit] + arguments = [5] + +# atomic rule checks for commonly mistaken usages of the sync/atomic package. +[rule.atomic] + +# blank-imports rule disallows blank imports. +[rule.blank-imports] + +# bool-literal-in-expr suggests removing boolean literals from logic expressions like `bar == true`, `arg == false`, +# `r != true`, `false && boolExpr` and `boolExpr || true`. +[rule.bool-literal-in-expr] + +# constant-logical-expr rule warns on constant logical expressions, like `name == name`. +[rule.constant-logical-expr] + +# context-as-argument rule makes sure that context.Context is the first argument of a function. +[rule.context-as-argument] + +# context-keys-type rule disallows the usage of basic types in context.WithValue +[rule.context-keys-type] + +# confusing-naming rule warns on methods with names that differ only by capitalization. +[rule.confusing-naming] + +# confusing-results rule suggests to name potentially confusing function results. +[rule.confusing-results] + +# cyclomatic rule sets restriction for maximum Cyclomatic complexity. +[rule.cyclomatic] + arguments = [15] + +# deep-exit rule looks for program exits in funcs other than `main()` or `init()`. +[rule.deep-exit] + +# dot-imports rule forbids `.` imports. +[rule.dot-imports] + +# empty-block warns on empty code blocks. +[rule.empty-block] + +# error-return rule ensure that the error return parameter is the last. +[rule.error-return] + +# error-strings rule ensure conventions around error strings. +[rule.error-strings] + +# error-naming rule ensure naming of error variables (has `Err` or `err` prefix). +[rule.error-naming] + +# errorf rule warns on usage errors.New(fmt.Sprintf()) instead of fmt.Errorf() +[rule.errorf] + +# exported rule ensure naming and commenting conventions on exported symbols. +[rule.exported] + +# flag-parameter rule warns on boolean parameters that create a control coupling. +[rule.flag-parameter] + +# get-return rule warns on getters that do not yield any result. +[rule.get-return] + +# if-return rule warns redundant if when returning an error. +[rule.if-return] + +# increment-decrement rule forces to use `i++` and `i--` instead of `i += 1` and `i -= 1`. +[rule.increment-decrement] + +# indent-error-flow rule prevents redundant else statements. +[rule.indent-error-flow] + +# modifies-value-receiver warns on assignments to value-passed method receivers. +[rule.modifies-value-receiver] + +# package-comments rule ensures package commenting conventions. +[rule.package-comments] + +# range rule prevents redundant variables when iterating over a collection. +[rule.range] + +# range-val-in-closure warns if range value is used in a closure dispatched as goroutine. +[rule.range-val-in-closure] + +# receiver-naming ensures conventions around the naming of receivers. +[rule.receiver-naming] + +# redefines-builtin-id warns on redefinitions of built-in (constants, variables, function and types) identifiers, +# like `true := "false"` etc. +[rule.redefines-builtin-id] + +# rule.superfluous-else prevents redundant else statements (extends indent-error-flow). Checks for `if-then-else`where +# the then block ends with branching statement like `continue`, `break`, or `goto`. +[rule.superfluous-else] + +# rule.struct-tag checks common struct tags like `json`, `xml`, `yaml`. +[rule.struct-tag] + +# time-naming rule conventions around the naming of time variables. Like not to use unit suffixes (sec, min etc.) in +# naming variables of type `time.Time` or `time.Duration`. +[rule.time-naming] + +# unexported-return rule warns when a public return is from unexported type. +[rule.unexported-return] + +# unnecessary-stmt suggests removing or simplifying unnecessary statements like breaks at the end of cases or return at +# the end of bodies of functions returning nothing. +[rule.unnecessary-stmt] + +# unreachable-code rule warns on the unreachable code. +[rule.unreachable-code] + +# unused-parameter rule suggests to rename or remove unused function parameters. +[rule.unused-parameter] + +# var-declaration rule reduces redundancies around variable declaration. +[rule.var-declaration] + +# var-naming checks naming rules. +[rule.var-naming] + +# waitgroup-by-value rule warns on functions taking `sync.WaitGroup` as a by-value parameter. +[rule.waitgroup-by-value] diff --git a/vendor/github.com/matoous/godox/LICENSE b/vendor/github.com/matoous/godox/LICENSE new file mode 100644 index 000000000..49e1b1e3a --- /dev/null +++ b/vendor/github.com/matoous/godox/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Matous Dzivjak + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/matoous/godox/README.md b/vendor/github.com/matoous/godox/README.md new file mode 100644 index 000000000..9c58e28ce --- /dev/null +++ b/vendor/github.com/matoous/godox/README.md @@ -0,0 +1,23 @@ +GoDoX +=== + +[![Tests](https://github.com/matoous/godox/actions/workflows/test.yml/badge.svg)](https://github.com/matoous/godox/actions/workflows/test.yml) +[![Lint](https://github.com/matoous/godox/actions/workflows/lint.yml/badge.svg)](https://github.com/matoous/godox/actions/workflows/lint.yml) +[![GoDoc](https://godoc.org/github.com/matoous/godox?status.svg)](https://godoc.org/github.com/matoous/godox) +[![Go Report Card](https://goreportcard.com/badge/github.com/matoous/godox)](https://goreportcard.com/report/github.com/matoous/godox) +[![GitHub issues](https://img.shields.io/github/issues/matoous/godox.svg)](https://github.com/matoous/godox/issues) +[![License](https://img.shields.io/badge/license-MIT%20License-blue.svg)](https://github.com/matoous/godox/LICENSE) + +GoDoX extracts comments from Go code based on keywords. This repository is fork of https://github.com/766b/godox +but a lot of code has changed, this repository is updated and the code was adjusted for better integration with +https://github.com/golangci/golangci-lint. + +Installation +--- + + go get github.com/matoous/godox + +The main idea +--- + +The main idea of godox is the keywords like TODO, FIX, OPTIMIZE is temporary and for development purpose only. You should create tasks if some TODOs cannot be fixed in the current merge request. diff --git a/vendor/github.com/matoous/godox/go.mod b/vendor/github.com/matoous/godox/go.mod new file mode 100644 index 000000000..69b34f0e9 --- /dev/null +++ b/vendor/github.com/matoous/godox/go.mod @@ -0,0 +1,5 @@ +module github.com/matoous/godox + +go 1.13 + +require golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578 diff --git a/vendor/github.com/matoous/godox/go.sum b/vendor/github.com/matoous/godox/go.sum new file mode 100644 index 000000000..970cb25dd --- /dev/null +++ b/vendor/github.com/matoous/godox/go.sum @@ -0,0 +1,8 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578 h1:f0Gfd654rnnfXT1+BK1YHPTS1qQdKrPIaGQwWxNE44k= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/matoous/godox/godox.go b/vendor/github.com/matoous/godox/godox.go new file mode 100644 index 000000000..6d7104b09 --- /dev/null +++ b/vendor/github.com/matoous/godox/godox.go @@ -0,0 +1,84 @@ +package godox + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/token" + "path/filepath" + "strings" +) + +var ( + defaultKeywords = []string{"TODO", "BUG", "FIXME"} +) + +// Message contains a message and position +type Message struct { + Pos token.Position + Message string +} + +func getMessages(c *ast.Comment, fset *token.FileSet, keywords []string) []Message { + commentText := c.Text + switch commentText[1] { + case '/': + commentText = commentText[2:] + if len(commentText) > 0 && commentText[0] == ' ' { + commentText = commentText[1:] + } + case '*': + commentText = commentText[2 : len(commentText)-2] + } + + b := bufio.NewReader(bytes.NewBufferString(commentText)) + var comments []Message + + for lineNum := 0; ; lineNum++ { + line, _, err := b.ReadLine() + if err != nil { + break + } + sComment := bytes.TrimSpace(line) + if len(sComment) < 4 { + continue + } + for _, kw := range keywords { + if bytes.EqualFold([]byte(kw), sComment[0:len(kw)]) { + pos := fset.Position(c.Pos()) + // trim the comment + if len(sComment) > 40 { + sComment = []byte(fmt.Sprintf("%.40s...", sComment)) + } + comments = append(comments, Message{ + Pos: pos, + Message: fmt.Sprintf( + "%s:%d: Line contains %s: \"%s\"", + filepath.Join(pos.Filename), + pos.Line+lineNum, + strings.Join(keywords, "/"), + sComment, + ), + }) + break + } + } + } + return comments +} + +// Run runs the godox linter on given file. +// Godox searches for comments starting with given keywords and reports them. +func Run(file *ast.File, fset *token.FileSet, keywords ...string) []Message { + if len(keywords) == 0 { + keywords = defaultKeywords + } + var messages []Message + for _, c := range file.Comments { + for _, ci := range c.List { + messages = append(messages, getMessages(ci, fset, keywords)...) + } + } + return messages +} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml new file mode 100644 index 000000000..7942c565c --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/.travis.yml @@ -0,0 +1,15 @@ +language: go +sudo: false +go: + - 1.13.x + - tip + +before_install: + - go get -t -v ./... + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) + diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 000000000..e055952b6 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 000000000..1f7806fe1 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,37 @@ +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 000000000..08cbd1e0f --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,38 @@ +// +build !windows +// +build !appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 000000000..41215d7fc --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1043 @@ +// +build windows +// +build !appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer + mutex sync.Mutex +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: + attr |= foregroundIntensity + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256%len(n256foreAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256%len(n256backAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod new file mode 100644 index 000000000..1e590b819 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -0,0 +1,8 @@ +module github.com/mattn/go-colorable + +require ( + github.com/mattn/go-isatty v0.0.12 + golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae // indirect +) + +go 1.13 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum new file mode 100644 index 000000000..cf5b95d97 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -0,0 +1,5 @@ +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 000000000..95f2c6be2 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,55 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml index 5597e026d..604314dd4 100644 --- a/vendor/github.com/mattn/go-isatty/.travis.yml +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -1,13 +1,14 @@ language: go +sudo: false go: + - 1.13.x - tip -os: - - linux - - osx - before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover + - go get -t -v ./... + script: - - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md index 1e69004bb..38418353e 100644 --- a/vendor/github.com/mattn/go-isatty/README.md +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -1,7 +1,7 @@ # go-isatty [![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) -[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) [![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) [![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod index 53d84a672..605c4c221 100644 --- a/vendor/github.com/mattn/go-isatty/go.mod +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -2,4 +2,4 @@ module github.com/mattn/go-isatty go 1.12 -require golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 +require golang.org/x/sys v0.0.0-20200116001909-b77594299b42 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum index 5e0752bdf..912e29cbc 100644 --- a/vendor/github.com/mattn/go-isatty/go.sum +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -1,2 +1,2 @@ -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go deleted file mode 100644 index d3567cb5b..000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_android.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build android - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 07e93039d..711f28808 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -3,18 +3,12 @@ package isatty -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TIOCGETA +import "golang.org/x/sys/unix" // IsTerminal return true if the file descriptor is terminal. func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil } // IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go index 453b025d0..31a1ca973 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -1,6 +1,5 @@ // +build linux aix // +build !appengine -// +build !android package isatty diff --git a/vendor/github.com/mattn/go-isatty/renovate.json b/vendor/github.com/mattn/go-isatty/renovate.json new file mode 100644 index 000000000..5ae9d96b7 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/renovate.json @@ -0,0 +1,8 @@ +{ + "extends": [ + "config:base" + ], + "postUpdateOptions": [ + "gomodTidy" + ] +} diff --git a/vendor/github.com/mattn/go-runewidth/.travis.yml b/vendor/github.com/mattn/go-runewidth/.travis.yml new file mode 100644 index 000000000..6a21813a3 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.13.x + - tip + +before_install: + - go get -t -v ./... + +script: + - go generate + - git diff --cached --exit-code + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/README.md b/vendor/github.com/mattn/go-runewidth/README.md new file mode 100644 index 000000000..aa56ab96c --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/README.md @@ -0,0 +1,27 @@ +go-runewidth +============ + +[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth) +[![Codecov](https://codecov.io/gh/mattn/go-runewidth/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-runewidth) +[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth) +[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth) + +Provides functions to get fixed width of the character or string. + +Usage +----- + +```go +runewidth.StringWidth("つのだ☆HIRO") == 12 +``` + + +Author +------ + +Yasuhiro Matsumoto + +License +------- + +under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/mattn/go-runewidth/go.mod b/vendor/github.com/mattn/go-runewidth/go.mod new file mode 100644 index 000000000..fa7f4d864 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-runewidth + +go 1.9 diff --git a/vendor/github.com/mattn/go-runewidth/go.test.sh b/vendor/github.com/mattn/go-runewidth/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go new file mode 100644 index 000000000..19f8e0449 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -0,0 +1,257 @@ +package runewidth + +import ( + "os" +) + +//go:generate go run script/generate.go + +var ( + // EastAsianWidth will be set true if the current locale is CJK + EastAsianWidth bool + + // ZeroWidthJoiner is flag to set to use UTR#51 ZWJ + ZeroWidthJoiner bool + + // DefaultCondition is a condition in current locale + DefaultCondition = &Condition{} +) + +func init() { + handleEnv() +} + +func handleEnv() { + env := os.Getenv("RUNEWIDTH_EASTASIAN") + if env == "" { + EastAsianWidth = IsEastAsian() + } else { + EastAsianWidth = env == "1" + } + // update DefaultCondition + DefaultCondition.EastAsianWidth = EastAsianWidth + DefaultCondition.ZeroWidthJoiner = ZeroWidthJoiner +} + +type interval struct { + first rune + last rune +} + +type table []interval + +func inTables(r rune, ts ...table) bool { + for _, t := range ts { + if inTable(r, t) { + return true + } + } + return false +} + +func inTable(r rune, t table) bool { + if r < t[0].first { + return false + } + + bot := 0 + top := len(t) - 1 + for top >= bot { + mid := (bot + top) >> 1 + + switch { + case t[mid].last < r: + bot = mid + 1 + case t[mid].first > r: + top = mid - 1 + default: + return true + } + } + + return false +} + +var private = table{ + {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD}, +} + +var nonprint = table{ + {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, + {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, + {0x2028, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, +} + +// Condition have flag EastAsianWidth whether the current locale is CJK or not. +type Condition struct { + EastAsianWidth bool + ZeroWidthJoiner bool +} + +// NewCondition return new instance of Condition which is current locale. +func NewCondition() *Condition { + return &Condition{ + EastAsianWidth: EastAsianWidth, + ZeroWidthJoiner: ZeroWidthJoiner, + } +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func (c *Condition) RuneWidth(r rune) int { + switch { + case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining, notassigned): + return 0 + case (c.EastAsianWidth && IsAmbiguousWidth(r)) || inTables(r, doublewidth): + return 2 + default: + return 1 + } +} + +func (c *Condition) stringWidth(s string) (width int) { + for _, r := range []rune(s) { + width += c.RuneWidth(r) + } + return width +} + +func (c *Condition) stringWidthZeroJoiner(s string) (width int) { + r1, r2 := rune(0), rune(0) + for _, r := range []rune(s) { + if r == 0xFE0E || r == 0xFE0F { + continue + } + w := c.RuneWidth(r) + if r2 == 0x200D && inTables(r, emoji) && inTables(r1, emoji) { + if width < w { + width = w + } + } else { + width += w + } + r1, r2 = r2, r + } + return width +} + +// StringWidth return width as you can see +func (c *Condition) StringWidth(s string) (width int) { + if c.ZeroWidthJoiner { + return c.stringWidthZeroJoiner(s) + } + return c.stringWidth(s) +} + +// Truncate return string truncated with w cells +func (c *Condition) Truncate(s string, w int, tail string) string { + if c.StringWidth(s) <= w { + return s + } + r := []rune(s) + tw := c.StringWidth(tail) + w -= tw + width := 0 + i := 0 + for ; i < len(r); i++ { + cw := c.RuneWidth(r[i]) + if width+cw > w { + break + } + width += cw + } + return string(r[0:i]) + tail +} + +// Wrap return string wrapped with w cells +func (c *Condition) Wrap(s string, w int) string { + width := 0 + out := "" + for _, r := range []rune(s) { + cw := RuneWidth(r) + if r == '\n' { + out += string(r) + width = 0 + continue + } else if width+cw > w { + out += "\n" + width = 0 + out += string(r) + width += cw + continue + } + out += string(r) + width += cw + } + return out +} + +// FillLeft return string filled in left by spaces in w cells +func (c *Condition) FillLeft(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return string(b) + s + } + return s +} + +// FillRight return string filled in left by spaces in w cells +func (c *Condition) FillRight(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return s + string(b) + } + return s +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func RuneWidth(r rune) int { + return DefaultCondition.RuneWidth(r) +} + +// IsAmbiguousWidth returns whether is ambiguous width or not. +func IsAmbiguousWidth(r rune) bool { + return inTables(r, private, ambiguous) +} + +// IsNeutralWidth returns whether is neutral width or not. +func IsNeutralWidth(r rune) bool { + return inTable(r, neutral) +} + +// StringWidth return width as you can see +func StringWidth(s string) (width int) { + return DefaultCondition.StringWidth(s) +} + +// Truncate return string truncated with w cells +func Truncate(s string, w int, tail string) string { + return DefaultCondition.Truncate(s, w, tail) +} + +// Wrap return string wrapped with w cells +func Wrap(s string, w int) string { + return DefaultCondition.Wrap(s, w) +} + +// FillLeft return string filled in left by spaces in w cells +func FillLeft(s string, w int) string { + return DefaultCondition.FillLeft(s, w) +} + +// FillRight return string filled in left by spaces in w cells +func FillRight(s string, w int) string { + return DefaultCondition.FillRight(s, w) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go new file mode 100644 index 000000000..7d99f6e52 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go @@ -0,0 +1,8 @@ +// +build appengine + +package runewidth + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go new file mode 100644 index 000000000..c5fdf40ba --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go @@ -0,0 +1,9 @@ +// +build js +// +build !appengine + +package runewidth + +func IsEastAsian() bool { + // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go new file mode 100644 index 000000000..480ad7485 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go @@ -0,0 +1,82 @@ +// +build !windows +// +build !js +// +build !appengine + +package runewidth + +import ( + "os" + "regexp" + "strings" +) + +var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) + +var mblenTable = map[string]int{ + "utf-8": 6, + "utf8": 6, + "jis": 8, + "eucjp": 3, + "euckr": 2, + "euccn": 2, + "sjis": 2, + "cp932": 2, + "cp51932": 2, + "cp936": 2, + "cp949": 2, + "cp950": 2, + "big5": 2, + "gbk": 2, + "gb2312": 2, +} + +func isEastAsian(locale string) bool { + charset := strings.ToLower(locale) + r := reLoc.FindStringSubmatch(locale) + if len(r) == 2 { + charset = strings.ToLower(r[1]) + } + + if strings.HasSuffix(charset, "@cjk_narrow") { + return false + } + + for pos, b := range []byte(charset) { + if b == '@' { + charset = charset[:pos] + break + } + } + max := 1 + if m, ok := mblenTable[charset]; ok { + max = m + } + if max > 1 && (charset[0] != 'u' || + strings.HasPrefix(locale, "ja") || + strings.HasPrefix(locale, "ko") || + strings.HasPrefix(locale, "zh")) { + return true + } + return false +} + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + locale := os.Getenv("LC_ALL") + if locale == "" { + locale = os.Getenv("LC_CTYPE") + } + if locale == "" { + locale = os.Getenv("LANG") + } + + // ignore C locale + if locale == "POSIX" || locale == "C" { + return false + } + if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { + return false + } + + return isEastAsian(locale) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go new file mode 100644 index 000000000..b27d77d89 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go @@ -0,0 +1,437 @@ +// Code generated by script/generate.go. DO NOT EDIT. + +package runewidth + +var combining = table{ + {0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3}, + {0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0D00, 0x0D01}, + {0x135D, 0x135F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0}, + {0x1B6B, 0x1B73}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF}, + {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF}, + {0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D}, + {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1}, + {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A}, + {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11300, 0x11301}, + {0x1133B, 0x1133C}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x16AF0, 0x16AF4}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172}, + {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, + {0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, + {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, + {0x1E8D0, 0x1E8D6}, +} + +var doublewidth = table{ + {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, + {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, + {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, + {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, + {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, + {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, + {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, + {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, + {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, + {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, + {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, + {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, + {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, + {0x3105, 0x312F}, {0x3131, 0x318E}, {0x3190, 0x31E3}, + {0x31F0, 0x321E}, {0x3220, 0x3247}, {0x3250, 0x4DBF}, + {0x4E00, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C}, + {0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, + {0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, + {0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, + {0x16FF0, 0x16FF1}, {0x17000, 0x187F7}, {0x18800, 0x18CD5}, + {0x18D00, 0x18D08}, {0x1B000, 0x1B11E}, {0x1B150, 0x1B152}, + {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1F004, 0x1F004}, + {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, + {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, + {0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F320}, + {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, + {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, + {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, + {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, + {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, + {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, + {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, {0x1F6D5, 0x1F6D7}, + {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, + {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F978}, + {0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1F9FF}, {0x1FA70, 0x1FA74}, + {0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8}, + {0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6}, + {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD}, +} + +var ambiguous = table{ + {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, + {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4}, + {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6}, + {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1}, + {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, + {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, + {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101}, + {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B}, + {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, + {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144}, + {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153}, + {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE}, + {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, + {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, + {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261}, + {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB}, + {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, + {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F}, + {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1}, + {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, + {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016}, + {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022}, + {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033}, + {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, + {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084}, + {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105}, + {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116}, + {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, + {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B}, + {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199}, + {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, + {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203}, + {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F}, + {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A}, + {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, + {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237}, + {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C}, + {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267}, + {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, + {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299}, + {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312}, + {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573}, + {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, + {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, + {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8}, + {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5}, + {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, + {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E}, + {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661}, + {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D}, + {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF}, + {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1}, + {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1}, + {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, + {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, + {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, + {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, + {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, + {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, + {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, +} +var notassigned = table{ + {0x27E6, 0x27ED}, {0x2985, 0x2986}, +} + +var neutral = table{ + {0x0000, 0x001F}, {0x007F, 0x00A0}, {0x00A9, 0x00A9}, + {0x00AB, 0x00AB}, {0x00B5, 0x00B5}, {0x00BB, 0x00BB}, + {0x00C0, 0x00C5}, {0x00C7, 0x00CF}, {0x00D1, 0x00D6}, + {0x00D9, 0x00DD}, {0x00E2, 0x00E5}, {0x00E7, 0x00E7}, + {0x00EB, 0x00EB}, {0x00EE, 0x00EF}, {0x00F1, 0x00F1}, + {0x00F4, 0x00F6}, {0x00FB, 0x00FB}, {0x00FD, 0x00FD}, + {0x00FF, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112}, + {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A}, + {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E}, + {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C}, + {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A}, + {0x016C, 0x01CD}, {0x01CF, 0x01CF}, {0x01D1, 0x01D1}, + {0x01D3, 0x01D3}, {0x01D5, 0x01D5}, {0x01D7, 0x01D7}, + {0x01D9, 0x01D9}, {0x01DB, 0x01DB}, {0x01DD, 0x0250}, + {0x0252, 0x0260}, {0x0262, 0x02C3}, {0x02C5, 0x02C6}, + {0x02C8, 0x02C8}, {0x02CC, 0x02CC}, {0x02CE, 0x02CF}, + {0x02D1, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE}, + {0x02E0, 0x02FF}, {0x0370, 0x0377}, {0x037A, 0x037F}, + {0x0384, 0x038A}, {0x038C, 0x038C}, {0x038E, 0x0390}, + {0x03AA, 0x03B0}, {0x03C2, 0x03C2}, {0x03CA, 0x0400}, + {0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F}, + {0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F}, + {0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4}, + {0x0600, 0x061C}, {0x061E, 0x070D}, {0x070F, 0x074A}, + {0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D}, + {0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E}, + {0x0860, 0x086A}, {0x08A0, 0x08B4}, {0x08B6, 0x08C7}, + {0x08D3, 0x0983}, {0x0985, 0x098C}, {0x098F, 0x0990}, + {0x0993, 0x09A8}, {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, + {0x09B6, 0x09B9}, {0x09BC, 0x09C4}, {0x09C7, 0x09C8}, + {0x09CB, 0x09CE}, {0x09D7, 0x09D7}, {0x09DC, 0x09DD}, + {0x09DF, 0x09E3}, {0x09E6, 0x09FE}, {0x0A01, 0x0A03}, + {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, + {0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, + {0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, + {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, + {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, + {0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, + {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, + {0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, + {0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, + {0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, + {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, + {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, {0x0B35, 0x0B39}, + {0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, + {0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, + {0x0B66, 0x0B77}, {0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, + {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, + {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, + {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, + {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, + {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, + {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, + {0x0C3D, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, + {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C63}, + {0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90}, + {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, + {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD}, + {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3}, + {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D00, 0x0D0C}, + {0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48}, + {0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F}, + {0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1}, + {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6}, + {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6}, + {0x0DD8, 0x0DDF}, {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4}, + {0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82}, + {0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3}, + {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4}, + {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, + {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C}, + {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC}, + {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7}, + {0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248}, + {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258}, + {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D}, + {0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE}, + {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6}, + {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A}, + {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5}, + {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8}, + {0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736}, + {0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, + {0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9}, + {0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819}, + {0x1820, 0x1878}, {0x1880, 0x18AA}, {0x18B0, 0x18F5}, + {0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B}, + {0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974}, + {0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA}, + {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C}, + {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD}, + {0x1AB0, 0x1AC0}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C}, + {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49}, + {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7}, + {0x1CD0, 0x1CFA}, {0x1D00, 0x1DF9}, {0x1DFB, 0x1F15}, + {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, + {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, + {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, + {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB}, + {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE}, + {0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017}, + {0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023}, + {0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, + {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064}, + {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080}, + {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, + {0x20AA, 0x20AB}, {0x20AD, 0x20BF}, {0x20D0, 0x20F0}, + {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108}, + {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120}, + {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152}, + {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, + {0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7}, + {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6}, + {0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206}, + {0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210}, + {0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C}, + {0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226}, + {0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B}, + {0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251}, + {0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269}, + {0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285}, + {0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4}, + {0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319}, + {0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF}, + {0x23F1, 0x23F2}, {0x23F4, 0x2426}, {0x2440, 0x244A}, + {0x24EA, 0x24EA}, {0x254C, 0x254F}, {0x2574, 0x257F}, + {0x2590, 0x2591}, {0x2596, 0x259F}, {0x25A2, 0x25A2}, + {0x25AA, 0x25B1}, {0x25B4, 0x25B5}, {0x25B8, 0x25BB}, + {0x25BE, 0x25BF}, {0x25C2, 0x25C5}, {0x25C9, 0x25CA}, + {0x25CC, 0x25CD}, {0x25D2, 0x25E1}, {0x25E6, 0x25EE}, + {0x25F0, 0x25FC}, {0x25FF, 0x2604}, {0x2607, 0x2608}, + {0x260A, 0x260D}, {0x2610, 0x2613}, {0x2616, 0x261B}, + {0x261D, 0x261D}, {0x261F, 0x263F}, {0x2641, 0x2641}, + {0x2643, 0x2647}, {0x2654, 0x265F}, {0x2662, 0x2662}, + {0x2666, 0x2666}, {0x266B, 0x266B}, {0x266E, 0x266E}, + {0x2670, 0x267E}, {0x2680, 0x2692}, {0x2694, 0x269D}, + {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, {0x26AC, 0x26BC}, + {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, {0x26E4, 0x26E7}, + {0x2700, 0x2704}, {0x2706, 0x2709}, {0x270C, 0x2727}, + {0x2729, 0x273C}, {0x273E, 0x274B}, {0x274D, 0x274D}, + {0x274F, 0x2752}, {0x2756, 0x2756}, {0x2758, 0x2775}, + {0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE}, + {0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A}, + {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73}, + {0x2B76, 0x2B95}, {0x2B97, 0x2C2E}, {0x2C30, 0x2C5E}, + {0x2C60, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27}, + {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70}, + {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, + {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, + {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, + {0x2DE0, 0x2E52}, {0x303F, 0x303F}, {0x4DC0, 0x4DFF}, + {0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, {0xA700, 0xA7BF}, + {0xA7C2, 0xA7CA}, {0xA7F5, 0xA82C}, {0xA830, 0xA839}, + {0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9}, + {0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD}, + {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36}, + {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2}, + {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, + {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, + {0xAB30, 0xAB6B}, {0xAB70, 0xABED}, {0xABF0, 0xABF9}, + {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF}, + {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36}, + {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, + {0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F}, + {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD}, + {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B}, + {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D}, + {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA}, + {0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E}, + {0x10190, 0x1019C}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD}, + {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB}, + {0x10300, 0x10323}, {0x1032D, 0x1034A}, {0x10350, 0x1037A}, + {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5}, + {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, + {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, + {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, + {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, + {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, + {0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF}, + {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B}, + {0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7}, + {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06}, + {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35}, + {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58}, + {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6}, + {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72}, + {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, + {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, + {0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10E60, 0x10E7E}, + {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1}, + {0x10F00, 0x10F27}, {0x10F30, 0x10F59}, {0x10FB0, 0x10FCB}, + {0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x1106F}, + {0x1107F, 0x110C1}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8}, + {0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147}, + {0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4}, + {0x11200, 0x11211}, {0x11213, 0x1123E}, {0x11280, 0x11286}, + {0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D}, + {0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9}, + {0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, + {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, + {0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348}, + {0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357}, + {0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7}, + {0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD}, + {0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, + {0x11680, 0x116B8}, {0x116C0, 0x116C9}, {0x11700, 0x1171A}, + {0x1171D, 0x1172B}, {0x11730, 0x1173F}, {0x11800, 0x1183B}, + {0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909}, + {0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935}, + {0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959}, + {0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4}, + {0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AC0, 0x11AF8}, + {0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45}, + {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7}, + {0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09}, + {0x11D0B, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D}, + {0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65}, + {0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91}, + {0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8}, + {0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399}, + {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, + {0x13000, 0x1342E}, {0x13430, 0x13438}, {0x14400, 0x14646}, + {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, + {0x16A6E, 0x16A6F}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5}, + {0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, + {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E9A}, + {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F}, + {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88}, + {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1D000, 0x1D0F5}, + {0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, {0x1D200, 0x1D245}, + {0x1D2E0, 0x1D2F3}, {0x1D300, 0x1D356}, {0x1D360, 0x1D378}, + {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, + {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, + {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, + {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, + {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, + {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, + {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, + {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, + {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, + {0x1E026, 0x1E02A}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D}, + {0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E2C0, 0x1E2F9}, + {0x1E2FF, 0x1E2FF}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6}, + {0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, + {0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03}, + {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24}, + {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37}, + {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42}, + {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B}, + {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, {0x1EE54, 0x1EE54}, + {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B}, + {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62}, + {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72}, + {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E}, + {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3}, + {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, {0x1EEF0, 0x1EEF1}, + {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, {0x1F030, 0x1F093}, + {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, {0x1F0C1, 0x1F0CE}, + {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10F}, {0x1F12E, 0x1F12F}, + {0x1F16A, 0x1F16F}, {0x1F1AD, 0x1F1AD}, {0x1F1E6, 0x1F1FF}, + {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, + {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, + {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, + {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, + {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, + {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, + {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4}, + {0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, + {0x1F780, 0x1F7D8}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, + {0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, + {0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B}, + {0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D}, + {0x1FB00, 0x1FB92}, {0x1FB94, 0x1FBCA}, {0x1FBF0, 0x1FBF9}, + {0xE0001, 0xE0001}, {0xE0020, 0xE007F}, +} + +var emoji = table{ + {0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122}, + {0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA}, + {0x231A, 0x231B}, {0x2328, 0x2328}, {0x2388, 0x2388}, + {0x23CF, 0x23CF}, {0x23E9, 0x23F3}, {0x23F8, 0x23FA}, + {0x24C2, 0x24C2}, {0x25AA, 0x25AB}, {0x25B6, 0x25B6}, + {0x25C0, 0x25C0}, {0x25FB, 0x25FE}, {0x2600, 0x2605}, + {0x2607, 0x2612}, {0x2614, 0x2685}, {0x2690, 0x2705}, + {0x2708, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716}, + {0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728}, + {0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747}, + {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755}, + {0x2757, 0x2757}, {0x2763, 0x2767}, {0x2795, 0x2797}, + {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, + {0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030}, + {0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299}, + {0x1F000, 0x1F0FF}, {0x1F10D, 0x1F10F}, {0x1F12F, 0x1F12F}, + {0x1F16C, 0x1F171}, {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E}, + {0x1F191, 0x1F19A}, {0x1F1AD, 0x1F1E5}, {0x1F201, 0x1F20F}, + {0x1F21A, 0x1F21A}, {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A}, + {0x1F23C, 0x1F23F}, {0x1F249, 0x1F3FA}, {0x1F400, 0x1F53D}, + {0x1F546, 0x1F64F}, {0x1F680, 0x1F6FF}, {0x1F774, 0x1F77F}, + {0x1F7D5, 0x1F7FF}, {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, + {0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F8FF}, + {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1FAFF}, + {0x1FC00, 0x1FFFD}, +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go new file mode 100644 index 000000000..d6a61777d --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go @@ -0,0 +1,28 @@ +// +build windows +// +build !appengine + +package runewidth + +import ( + "syscall" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32") + procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") +) + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + r1, _, _ := procGetConsoleOutputCP.Call() + if r1 == 0 { + return false + } + + switch int(r1) { + case 932, 51932, 936, 949, 950: + return true + } + + return false +} diff --git a/vendor/github.com/mbilski/exhaustivestruct/LICENSE b/vendor/github.com/mbilski/exhaustivestruct/LICENSE new file mode 100644 index 000000000..893eb73b9 --- /dev/null +++ b/vendor/github.com/mbilski/exhaustivestruct/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Mateusz Bilski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mbilski/exhaustivestruct/pkg/analyzer/analyzer.go b/vendor/github.com/mbilski/exhaustivestruct/pkg/analyzer/analyzer.go new file mode 100644 index 000000000..0dfb713c5 --- /dev/null +++ b/vendor/github.com/mbilski/exhaustivestruct/pkg/analyzer/analyzer.go @@ -0,0 +1,187 @@ +package analyzer + +import ( + "flag" + "fmt" + "go/ast" + "go/types" + "path" + "strings" + + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "golang.org/x/tools/go/analysis" +) + +// Analyzer that checks if all struct's fields are initialized +var Analyzer = &analysis.Analyzer{ + Name: "exhaustivestruct", + Doc: "Checks if all struct's fields are initialized", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: newFlagSet(), +} + +// StructPatternList is a comma separated list of expressions to match struct packages and names +// The struct packages have the form example.com/package.ExampleStruct +// The matching patterns can use matching syntax from https://pkg.go.dev/path#Match +// If this list is empty, all structs are tested. +var StructPatternList string + +func newFlagSet() flag.FlagSet { + fs := flag.NewFlagSet("", flag.PanicOnError) + fs.StringVar(&StructPatternList, "struct_patterns", "", "This is a comma separated list of expressions to match struct packages and names") + return *fs +} + +func run(pass *analysis.Pass) (interface{}, error) { + splitFn := func(c rune) bool { return c == ',' } + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + structPatterns := strings.FieldsFunc(StructPatternList, splitFn) + // validate the pattern syntax + for _, pattern := range structPatterns { + _, err := path.Match(pattern, "") + if err != nil { + return nil, fmt.Errorf("invalid struct pattern %s: %w", pattern, err) + } + } + + nodeFilter := []ast.Node{ + (*ast.CompositeLit)(nil), + (*ast.ReturnStmt)(nil), + } + + var returnStmt *ast.ReturnStmt + + inspector.Preorder(nodeFilter, func(node ast.Node) { + var name string + + compositeLit, ok := node.(*ast.CompositeLit) + if !ok { + // Keep track of the last return statement whilte iterating + retLit, ok := node.(*ast.ReturnStmt) + if ok { + returnStmt = retLit + } + return + } + + i, ok := compositeLit.Type.(*ast.Ident) + + if ok { + name = i.Name + } else { + s, ok := compositeLit.Type.(*ast.SelectorExpr) + + if !ok { + return + } + + name = s.Sel.Name + } + + if compositeLit.Type == nil { + return + } + + t := pass.TypesInfo.TypeOf(compositeLit.Type) + + if t == nil { + return + } + + if len(structPatterns) > 0 { + shouldLint := false + for _, pattern := range structPatterns { + // We check the patterns for vailidy ahead of time, so we don't need to check the error here + if match, _ := path.Match(pattern, t.String()); match { + shouldLint = true + break + } + } + if !shouldLint { + return + } + } + + str, ok := t.Underlying().(*types.Struct) + + if !ok { + return + } + + // Don't report an error if: + // 1. This composite literal contains no fields and + // 2. It's in a return statement and + // 3. The return statement contains a non-nil error + if len(compositeLit.Elts) == 0 { + // Check if this composite is one of the results the last return statement + isInResults := false + if returnStmt != nil { + for _, result := range returnStmt.Results { + compareComposite, ok := result.(*ast.CompositeLit) + if ok { + if compareComposite == compositeLit { + isInResults = true + } + } + } + } + nonNilError := false + if isInResults { + // Check if any of the results has an error type and if that error is set to non-nil (if it's set to nil, the type would be "untyped nil") + for _, result := range returnStmt.Results { + if pass.TypesInfo.TypeOf(result).String() == "error" { + nonNilError = true + } + } + } + + if nonNilError { + return + } + } + + samePackage := strings.HasPrefix(t.String(), pass.Pkg.Path()+".") + + missing := []string{} + + for i := 0; i < str.NumFields(); i++ { + fieldName := str.Field(i).Name() + exists := false + + if !samePackage && !str.Field(i).Exported() { + continue + } + + for eIndex, e := range compositeLit.Elts { + if k, ok := e.(*ast.KeyValueExpr); ok { + if i, ok := k.Key.(*ast.Ident); ok { + if i.Name == fieldName { + exists = true + break + } + } + } else { + if eIndex == i { + exists = true + break + } + } + } + + if !exists { + missing = append(missing, fieldName) + } + } + + if len(missing) == 1 { + pass.Reportf(node.Pos(), "%s is missing in %s", missing[0], name) + } else if len(missing) > 1 { + pass.Reportf(node.Pos(), "%s are missing in %s", strings.Join(missing, ", "), name) + } + }) + + return nil, nil +} diff --git a/vendor/github.com/mgechev/dots/.travis.yml b/vendor/github.com/mgechev/dots/.travis.yml new file mode 100644 index 000000000..f4a4a7363 --- /dev/null +++ b/vendor/github.com/mgechev/dots/.travis.yml @@ -0,0 +1,2 @@ +language: go +go: master diff --git a/vendor/github.com/mgechev/dots/LICENSE b/vendor/github.com/mgechev/dots/LICENSE new file mode 100644 index 000000000..c617c7e01 --- /dev/null +++ b/vendor/github.com/mgechev/dots/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Minko Gechev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mgechev/dots/README.md b/vendor/github.com/mgechev/dots/README.md new file mode 100644 index 000000000..1203aef5f --- /dev/null +++ b/vendor/github.com/mgechev/dots/README.md @@ -0,0 +1,100 @@ +[![Build Status](https://travis-ci.org/mgechev/dots.svg?branch=master)](https://travis-ci.org/mgechev/dots) + +# Dots + +Implements the wildcard file matching in Go used by golint, go test etc. + +## Usage + +```go +import "github.com/mgechev/dots" + +func main() { + result, err := dots.Resolve([]string{"./fixtures/..."}, []string{"./fixtures/foo"}) + for _, f := range result { + fmt.Println(f); + } +} +``` + +If we suppose that we have the following directory structure: + +```text +├── README.md +├── fixtures +│   ├── bar +│   │   ├── bar1.go +│   │   └── bar2.go +│   ├── baz +│   │   ├── baz1.go +│   │   ├── baz2.go +│   │   └── baz3.go +│   └── foo +│   ├── foo1.go +│   ├── foo2.go +│   └── foo3.go +└── main.go +``` + +The result will be: + +```text +fixtures/bar/bar1.go +fixtures/bar/bar2.go +fixtures/baz/baz1.go +fixtures/baz/baz2.go +fixtures/baz/baz3.go +``` + +`dots` supports wildcard in both - the first and the last argument of `Resolve`, which means that you can ignore files based on a wildcard: + +```go +dots.Resolve([]string{"github.com/mgechev/dots"}, []string{"./..."}) // empty list +dots.Resolve([]string{"./fixtures/bar/..."}, []string{"./fixture/foo/...", "./fixtures/baz/..."}) // bar1.go, bar2.go +``` + +## Preserve package structure + +`dots` allow you to receive a slice of slices where each nested slice represents an individual package: + +```go +dots.ResolvePackages([]string{"github.com/mgechev/dots/..."}, []string{}) +``` + +So we will get the result: + +```text +[ + [ + "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/bar/bar1.go", + "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/bar/bar2.go" + ], + [ + "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/baz/baz1.go", + "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/baz/baz2.go", + "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/baz/baz3.go" + ], + [ + "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/foo/foo1.go", + "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/foo/foo2.go", + "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/foo/foo3.go" + ], + [ + "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/baz/baz1.go", + "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/baz/baz2.go" + ], + [ + "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/foo/foo1.go", + "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/foo/foo2.go" + ], + [ + "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/foo/bar/bar1.go" + ] +] +``` + +This method is especially useful, when you want to perform type checking over given package from the result. + +## License + +MIT diff --git a/vendor/github.com/mgechev/dots/resolve.go b/vendor/github.com/mgechev/dots/resolve.go new file mode 100644 index 000000000..309ba18ad --- /dev/null +++ b/vendor/github.com/mgechev/dots/resolve.go @@ -0,0 +1,456 @@ +package dots + +import ( + "go/build" + "log" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +var ( + buildContext = build.Default + goroot = filepath.Clean(runtime.GOROOT()) + gorootSrc = filepath.Join(goroot, "src") +) + +func flatten(arr [][]string) []string { + var res []string + for _, e := range arr { + res = append(res, e...) + } + return res +} + +// Resolve accepts a slice of paths with optional "..." placeholder and a slice with paths to be skipped. +// The final result is the set of all files from the selected directories subtracted with +// the files in the skip slice. +func Resolve(includePatterns, skipPatterns []string) ([]string, error) { + skip, err := resolvePatterns(skipPatterns) + filter := newPathFilter(flatten(skip)) + if err != nil { + return nil, err + } + + pathSet := map[string]bool{} + includePackages, err := resolvePatterns(includePatterns) + include := flatten(includePackages) + if err != nil { + return nil, err + } + + var result []string + for _, i := range include { + if _, ok := pathSet[i]; !ok && !filter(i) { + pathSet[i] = true + result = append(result, i) + } + } + return result, err +} + +// ResolvePackages accepts a slice of paths with optional "..." placeholder and a slice with paths to be skipped. +// The final result is the set of all files from the selected directories subtracted with +// the files in the skip slice. The difference between `Resolve` and `ResolvePackages` +// is that `ResolvePackages` preserves the package structure in the nested slices. +func ResolvePackages(includePatterns, skipPatterns []string) ([][]string, error) { + skip, err := resolvePatterns(skipPatterns) + filter := newPathFilter(flatten(skip)) + if err != nil { + return nil, err + } + + pathSet := map[string]bool{} + include, err := resolvePatterns(includePatterns) + if err != nil { + return nil, err + } + + var result [][]string + for _, p := range include { + var packageFiles []string + for _, f := range p { + if _, ok := pathSet[f]; !ok && !filter(f) { + pathSet[f] = true + packageFiles = append(packageFiles, f) + } + } + result = append(result, packageFiles) + } + return result, err +} + +func isDir(filename string) bool { + fi, err := os.Stat(filename) + return err == nil && fi.IsDir() +} + +func exists(filename string) bool { + _, err := os.Stat(filename) + return err == nil +} + +func resolveDir(dirname string) ([]string, error) { + pkg, err := build.ImportDir(dirname, 0) + return resolveImportedPackage(pkg, err) +} + +func resolvePackage(pkgname string) ([]string, error) { + pkg, err := build.Import(pkgname, ".", 0) + return resolveImportedPackage(pkg, err) +} + +func resolveImportedPackage(pkg *build.Package, err error) ([]string, error) { + if err != nil { + if _, nogo := err.(*build.NoGoError); nogo { + // Don't complain if the failure is due to no Go source files. + return nil, nil + } + return nil, err + } + + var files []string + files = append(files, pkg.GoFiles...) + files = append(files, pkg.CgoFiles...) + files = append(files, pkg.TestGoFiles...) + if pkg.Dir != "." { + for i, f := range files { + files[i] = filepath.Join(pkg.Dir, f) + } + } + return files, nil +} + +func resolvePatterns(patterns []string) ([][]string, error) { + var files [][]string + for _, pattern := range patterns { + f, err := resolvePattern(pattern) + if err != nil { + return nil, err + } + files = append(files, f...) + } + return files, nil +} + +func resolvePattern(pattern string) ([][]string, error) { + // dirsRun, filesRun, and pkgsRun indicate whether golint is applied to + // directory, file or package targets. The distinction affects which + // checks are run. It is no valid to mix target types. + var dirsRun, filesRun, pkgsRun int + var matches []string + + if strings.HasSuffix(pattern, "/...") && isDir(pattern[:len(pattern)-len("/...")]) { + dirsRun = 1 + for _, dirname := range matchPackagesInFS(pattern) { + matches = append(matches, dirname) + } + } else if isDir(pattern) { + dirsRun = 1 + matches = append(matches, pattern) + } else if exists(pattern) { + filesRun = 1 + matches = append(matches, pattern) + } else { + pkgsRun = 1 + matches = append(matches, pattern) + } + + result := [][]string{} + switch { + case dirsRun == 1: + for _, dir := range matches { + res, err := resolveDir(dir) + if err != nil { + return nil, err + } + result = append(result, res) + } + case filesRun == 1: + return [][]string{matches}, nil + case pkgsRun == 1: + for _, pkg := range importPaths(matches) { + res, err := resolvePackage(pkg) + if err != nil { + return nil, err + } + result = append(result, res) + } + } + return result, nil +} + +func newPathFilter(skip []string) func(string) bool { + filter := map[string]bool{} + for _, name := range skip { + filter[name] = true + } + + return func(path string) bool { + base := filepath.Base(path) + if filter[base] || filter[path] { + return true + } + return base != "." && base != ".." && strings.ContainsAny(base[0:1], "_.") + } +} + +// importPathsNoDotExpansion returns the import paths to use for the given +// command line, but it does no ... expansion. +func importPathsNoDotExpansion(args []string) []string { + if len(args) == 0 { + return []string{"."} + } + var out []string + for _, a := range args { + // Arguments are supposed to be import paths, but + // as a courtesy to Windows developers, rewrite \ to / + // in command-line arguments. Handles .\... and so on. + if filepath.Separator == '\\' { + a = strings.Replace(a, `\`, `/`, -1) + } + + // Put argument in canonical form, but preserve leading ./. + if strings.HasPrefix(a, "./") { + a = "./" + path.Clean(a) + if a == "./." { + a = "." + } + } else { + a = path.Clean(a) + } + if a == "all" || a == "std" { + out = append(out, matchPackages(a)...) + continue + } + out = append(out, a) + } + return out +} + +// importPaths returns the import paths to use for the given command line. +func importPaths(args []string) []string { + args = importPathsNoDotExpansion(args) + var out []string + for _, a := range args { + if strings.Contains(a, "...") { + if build.IsLocalImport(a) { + out = append(out, matchPackagesInFS(a)...) + } else { + out = append(out, matchPackages(a)...) + } + continue + } + out = append(out, a) + } + return out +} + +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +func matchPattern(pattern string) func(name string) bool { + re := regexp.QuoteMeta(pattern) + re = strings.Replace(re, `\.\.\.`, `.*`, -1) + // Special case: foo/... matches foo too. + if strings.HasSuffix(re, `/.*`) { + re = re[:len(re)-len(`/.*`)] + `(/.*)?` + } + reg := regexp.MustCompile(`^` + re + `$`) + return func(name string) bool { + return reg.MatchString(name) + } +} + +// hasPathPrefix reports whether the path s begins with the +// elements in prefix. +func hasPathPrefix(s, prefix string) bool { + switch { + default: + return false + case len(s) == len(prefix): + return s == prefix + case len(s) > len(prefix): + if prefix != "" && prefix[len(prefix)-1] == '/' { + return strings.HasPrefix(s, prefix) + } + return s[len(prefix)] == '/' && s[:len(prefix)] == prefix + } +} + +// treeCanMatchPattern(pattern)(name) reports whether +// name or children of name can possibly match pattern. +// Pattern is the same limited glob accepted by matchPattern. +func treeCanMatchPattern(pattern string) func(name string) bool { + wildCard := false + if i := strings.Index(pattern, "..."); i >= 0 { + wildCard = true + pattern = pattern[:i] + } + return func(name string) bool { + return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || + wildCard && strings.HasPrefix(name, pattern) + } +} + +func matchPackages(pattern string) []string { + match := func(string) bool { return true } + treeCanMatch := func(string) bool { return true } + if pattern != "all" && pattern != "std" { + match = matchPattern(pattern) + treeCanMatch = treeCanMatchPattern(pattern) + } + + have := map[string]bool{ + "builtin": true, // ignore pseudo-package that exists only for documentation + } + if !buildContext.CgoEnabled { + have["runtime/cgo"] = true // ignore during walk + } + var pkgs []string + + // Commands + cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator) + filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == cmd { + return nil + } + name := path[len(cmd):] + if !treeCanMatch(name) { + return filepath.SkipDir + } + // Commands are all in cmd/, not in subdirectories. + if strings.Contains(name, string(filepath.Separator)) { + return filepath.SkipDir + } + + // We use, e.g., cmd/gofmt as the pseudo import path for gofmt. + name = "cmd/" + name + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + + for _, src := range buildContext.SrcDirs() { + if (pattern == "std" || pattern == "cmd") && src != gorootSrc { + continue + } + src = filepath.Clean(src) + string(filepath.Separator) + root := src + if pattern == "cmd" { + root += "cmd" + string(filepath.Separator) + } + filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == src { + return nil + } + + // Avoid .foo, _foo, and testdata directory trees. + _, elem := filepath.Split(path) + if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := filepath.ToSlash(path[len(src):]) + if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") { + // The name "std" is only the standard library. + // If the name is cmd, it's the root of the command tree. + return filepath.SkipDir + } + if !treeCanMatch(name) { + return filepath.SkipDir + } + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); noGo { + return nil + } + } + pkgs = append(pkgs, name) + return nil + }) + } + return pkgs +} + +func matchPackagesInFS(pattern string) []string { + // Find directory to begin the scan. + // Could be smarter but this one optimization + // is enough for now, since ... is usually at the + // end of a path. + i := strings.Index(pattern, "...") + dir, _ := path.Split(pattern[:i]) + + // pattern begins with ./ or ../. + // path.Clean will discard the ./ but not the ../. + // We need to preserve the ./ for pattern matching + // and in the returned import paths. + prefix := "" + if strings.HasPrefix(pattern, "./") { + prefix = "./" + } + match := matchPattern(pattern) + + var pkgs []string + filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() { + return nil + } + if path == dir { + // filepath.Walk starts at dir and recurses. For the recursive case, + // the path is the result of filepath.Join, which calls filepath.Clean. + // The initial case is not Cleaned, though, so we do this explicitly. + // + // This converts a path like "./io/" to "io". Without this step, running + // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io + // package, because prepending the prefix "./" to the unclean path would + // result in "././io", and match("././io") returns false. + path = filepath.Clean(path) + } + + // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". + _, elem := filepath.Split(path) + dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." + if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := prefix + filepath.ToSlash(path) + if !match(name) { + return nil + } + if _, err = build.ImportDir(path, 0); err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + return pkgs +} diff --git a/vendor/github.com/mgechev/revive/LICENSE b/vendor/github.com/mgechev/revive/LICENSE new file mode 100644 index 000000000..c617c7e01 --- /dev/null +++ b/vendor/github.com/mgechev/revive/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Minko Gechev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mgechev/revive/config/config.go b/vendor/github.com/mgechev/revive/config/config.go new file mode 100644 index 000000000..298c0bdf5 --- /dev/null +++ b/vendor/github.com/mgechev/revive/config/config.go @@ -0,0 +1,226 @@ +package config + +import ( + "errors" + "fmt" + "io/ioutil" + + "github.com/mgechev/revive/formatter" + + "github.com/BurntSushi/toml" + "github.com/mgechev/revive/lint" + "github.com/mgechev/revive/rule" +) + +var defaultRules = []lint.Rule{ + &rule.VarDeclarationsRule{}, + &rule.PackageCommentsRule{}, + &rule.DotImportsRule{}, + &rule.BlankImportsRule{}, + &rule.ExportedRule{}, + &rule.VarNamingRule{}, + &rule.IndentErrorFlowRule{}, + &rule.RangeRule{}, + &rule.ErrorfRule{}, + &rule.ErrorNamingRule{}, + &rule.ErrorStringsRule{}, + &rule.ReceiverNamingRule{}, + &rule.IncrementDecrementRule{}, + &rule.ErrorReturnRule{}, + &rule.UnexportedReturnRule{}, + &rule.TimeNamingRule{}, + &rule.ContextKeysType{}, + &rule.ContextAsArgumentRule{}, +} + +var allRules = append([]lint.Rule{ + &rule.ArgumentsLimitRule{}, + &rule.CyclomaticRule{}, + &rule.FileHeaderRule{}, + &rule.EmptyBlockRule{}, + &rule.SuperfluousElseRule{}, + &rule.ConfusingNamingRule{}, + &rule.GetReturnRule{}, + &rule.ModifiesParamRule{}, + &rule.ConfusingResultsRule{}, + &rule.DeepExitRule{}, + &rule.UnusedParamRule{}, + &rule.UnreachableCodeRule{}, + &rule.AddConstantRule{}, + &rule.FlagParamRule{}, + &rule.UnnecessaryStmtRule{}, + &rule.StructTagRule{}, + &rule.ModifiesValRecRule{}, + &rule.ConstantLogicalExprRule{}, + &rule.BoolLiteralRule{}, + &rule.RedefinesBuiltinIDRule{}, + &rule.ImportsBlacklistRule{}, + &rule.FunctionResultsLimitRule{}, + &rule.MaxPublicStructsRule{}, + &rule.RangeValInClosureRule{}, + &rule.RangeValAddress{}, + &rule.WaitGroupByValueRule{}, + &rule.AtomicRule{}, + &rule.EmptyLinesRule{}, + &rule.LineLengthLimitRule{}, + &rule.CallToGCRule{}, + &rule.DuplicatedImportsRule{}, + &rule.ImportShadowingRule{}, + &rule.BareReturnRule{}, + &rule.UnusedReceiverRule{}, + &rule.UnhandledErrorRule{}, + &rule.CognitiveComplexityRule{}, + &rule.StringOfIntRule{}, + &rule.StringFormatRule{}, + &rule.EarlyReturnRule{}, + &rule.UnconditionalRecursionRule{}, + &rule.IdenticalBranchesRule{}, + &rule.DeferRule{}, + &rule.UnexportedNamingRule{}, + &rule.FunctionLength{}, + &rule.NestedStructs{}, + &rule.IfReturnRule{}, + &rule.UselessBreak{}, +}, defaultRules...) + +var allFormatters = []lint.Formatter{ + &formatter.Stylish{}, + &formatter.Friendly{}, + &formatter.JSON{}, + &formatter.NDJSON{}, + &formatter.Default{}, + &formatter.Unix{}, + &formatter.Checkstyle{}, + &formatter.Plain{}, + &formatter.Sarif{}, +} + +func getFormatters() map[string]lint.Formatter { + result := map[string]lint.Formatter{} + for _, f := range allFormatters { + result[f.Name()] = f + } + return result +} + +// GetLintingRules yields the linting rules that must be applied by the linter +func GetLintingRules(config *lint.Config) ([]lint.Rule, error) { + if config.EnableAllRules { + return getAllRules(config) + } + + return getEnabledRules(config) +} + +// getAllRules yields the list of all available rules except those disabled by configuration +func getAllRules(config *lint.Config) ([]lint.Rule, error) { + lintingRules := []lint.Rule{} + for _, r := range allRules { + ruleConf := config.Rules[r.Name()] + if ruleConf.Disabled { + continue // skip disabled rules + } + + lintingRules = append(lintingRules, r) + } + + return lintingRules, nil +} + +// getEnabledRules yields the list of rules that are enabled by configuration +func getEnabledRules(config *lint.Config) ([]lint.Rule, error) { + rulesMap := map[string]lint.Rule{} + for _, r := range allRules { + rulesMap[r.Name()] = r + } + + lintingRules := []lint.Rule{} + for name, ruleConfig := range config.Rules { + rule, ok := rulesMap[name] + if !ok { + return nil, fmt.Errorf("cannot find rule: %s", name) + } + + if ruleConfig.Disabled { + continue // skip disabled rules + } + + lintingRules = append(lintingRules, rule) + } + + return lintingRules, nil +} + +func parseConfig(path string) (*lint.Config, error) { + config := &lint.Config{} + file, err := ioutil.ReadFile(path) + if err != nil { + return nil, errors.New("cannot read the config file") + } + _, err = toml.Decode(string(file), config) + if err != nil { + return nil, fmt.Errorf("cannot parse the config file: %v", err) + } + return config, nil +} + +func normalizeConfig(config *lint.Config) { + if config.Confidence == 0 { + config.Confidence = 0.8 + } + severity := config.Severity + if severity != "" { + for k, v := range config.Rules { + if v.Severity == "" { + v.Severity = severity + } + config.Rules[k] = v + } + for k, v := range config.Directives { + if v.Severity == "" { + v.Severity = severity + } + config.Directives[k] = v + } + } +} + +// GetConfig yields the configuration +func GetConfig(configPath string) (*lint.Config, error) { + config := defaultConfig() + if configPath != "" { + var err error + config, err = parseConfig(configPath) + if err != nil { + return nil, err + } + } + normalizeConfig(config) + return config, nil +} + +// GetFormatter yields the formatter for lint failures +func GetFormatter(formatterName string) (lint.Formatter, error) { + formatters := getFormatters() + formatter := formatters["default"] + if formatterName != "" { + f, ok := formatters[formatterName] + if !ok { + return nil, fmt.Errorf("unknown formatter %v", formatterName) + } + formatter = f + } + return formatter, nil +} + +func defaultConfig() *lint.Config { + defaultConfig := lint.Config{ + Confidence: 0.0, + Severity: lint.SeverityWarning, + Rules: map[string]lint.RuleConfig{}, + } + for _, r := range defaultRules { + defaultConfig.Rules[r.Name()] = lint.RuleConfig{} + } + return &defaultConfig +} diff --git a/vendor/github.com/mgechev/revive/formatter/checkstyle.go b/vendor/github.com/mgechev/revive/formatter/checkstyle.go new file mode 100644 index 000000000..bd20da888 --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/checkstyle.go @@ -0,0 +1,76 @@ +package formatter + +import ( + "bytes" + "encoding/xml" + "github.com/mgechev/revive/lint" + plainTemplate "text/template" +) + +// Checkstyle is an implementation of the Formatter interface +// which formats the errors to Checkstyle-like format. +type Checkstyle struct { + Metadata lint.FormatterMetadata +} + +// Name returns the name of the formatter +func (f *Checkstyle) Name() string { + return "checkstyle" +} + +type issue struct { + Line int + Col int + What string + Confidence float64 + Severity lint.Severity + RuleName string +} + +// Format formats the failures gotten from the lint. +func (f *Checkstyle) Format(failures <-chan lint.Failure, config lint.Config) (string, error) { + var issues = map[string][]issue{} + for failure := range failures { + buf := new(bytes.Buffer) + xml.Escape(buf, []byte(failure.Failure)) + what := buf.String() + iss := issue{ + Line: failure.Position.Start.Line, + Col: failure.Position.Start.Column, + What: what, + Confidence: failure.Confidence, + Severity: severity(config, failure), + RuleName: failure.RuleName, + } + fn := failure.GetFilename() + if issues[fn] == nil { + issues[fn] = make([]issue, 0) + } + issues[fn] = append(issues[fn], iss) + } + + t, err := plainTemplate.New("revive").Parse(checkstyleTemplate) + if err != nil { + return "", err + } + + buf := new(bytes.Buffer) + + err = t.Execute(buf, issues) + if err != nil { + return "", err + } + + return buf.String(), nil +} + +const checkstyleTemplate = ` + +{{- range $k, $v := . }} + + {{- range $i, $issue := $v }} + + {{- end }} + +{{- end }} +` diff --git a/vendor/github.com/mgechev/revive/formatter/default.go b/vendor/github.com/mgechev/revive/formatter/default.go new file mode 100644 index 000000000..145e6d548 --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/default.go @@ -0,0 +1,26 @@ +package formatter + +import ( + "fmt" + + "github.com/mgechev/revive/lint" +) + +// Default is an implementation of the Formatter interface +// which formats the errors to text. +type Default struct { + Metadata lint.FormatterMetadata +} + +// Name returns the name of the formatter +func (f *Default) Name() string { + return "default" +} + +// Format formats the failures gotten from the lint. +func (f *Default) Format(failures <-chan lint.Failure, _ lint.Config) (string, error) { + for failure := range failures { + fmt.Printf("%v: %s\n", failure.Position.Start, failure.Failure) + } + return "", nil +} diff --git a/vendor/github.com/mgechev/revive/formatter/friendly.go b/vendor/github.com/mgechev/revive/formatter/friendly.go new file mode 100644 index 000000000..d0a3099f8 --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/friendly.go @@ -0,0 +1,149 @@ +package formatter + +import ( + "bytes" + "fmt" + "sort" + + "github.com/fatih/color" + "github.com/mgechev/revive/lint" + "github.com/olekukonko/tablewriter" +) + +var newLines = map[rune]bool{ + 0x000A: true, + 0x000B: true, + 0x000C: true, + 0x000D: true, + 0x0085: true, + 0x2028: true, + 0x2029: true, +} + +func getErrorEmoji() string { + return color.RedString("✘") +} + +func getWarningEmoji() string { + return color.YellowString("⚠") +} + +// Friendly is an implementation of the Formatter interface +// which formats the errors to JSON. +type Friendly struct { + Metadata lint.FormatterMetadata +} + +// Name returns the name of the formatter +func (f *Friendly) Name() string { + return "friendly" +} + +// Format formats the failures gotten from the lint. +func (f *Friendly) Format(failures <-chan lint.Failure, config lint.Config) (string, error) { + errorMap := map[string]int{} + warningMap := map[string]int{} + totalErrors := 0 + totalWarnings := 0 + for failure := range failures { + sev := severity(config, failure) + f.printFriendlyFailure(failure, sev) + if sev == lint.SeverityWarning { + warningMap[failure.RuleName] = warningMap[failure.RuleName] + 1 + totalWarnings++ + } + if sev == lint.SeverityError { + errorMap[failure.RuleName] = errorMap[failure.RuleName] + 1 + totalErrors++ + } + } + f.printSummary(totalErrors, totalWarnings) + f.printStatistics(color.RedString("Errors:"), errorMap) + f.printStatistics(color.YellowString("Warnings:"), warningMap) + return "", nil +} + +func (f *Friendly) printFriendlyFailure(failure lint.Failure, severity lint.Severity) { + f.printHeaderRow(failure, severity) + f.printFilePosition(failure) + fmt.Println() + fmt.Println() +} + +func (f *Friendly) printHeaderRow(failure lint.Failure, severity lint.Severity) { + emoji := getWarningEmoji() + if severity == lint.SeverityError { + emoji = getErrorEmoji() + } + fmt.Print(f.table([][]string{{emoji, "https://revive.run/r#" + failure.RuleName, color.GreenString(failure.Failure)}})) +} + +func (f *Friendly) printFilePosition(failure lint.Failure) { + fmt.Printf(" %s:%d:%d", failure.GetFilename(), failure.Position.Start.Line, failure.Position.Start.Column) +} + +type statEntry struct { + name string + failures int +} + +func (f *Friendly) printSummary(errors, warnings int) { + emoji := getWarningEmoji() + if errors > 0 { + emoji = getErrorEmoji() + } + problemsLabel := "problems" + if errors+warnings == 1 { + problemsLabel = "problem" + } + warningsLabel := "warnings" + if warnings == 1 { + warningsLabel = "warning" + } + errorsLabel := "errors" + if errors == 1 { + errorsLabel = "error" + } + str := fmt.Sprintf("%d %s (%d %s, %d %s)", errors+warnings, problemsLabel, errors, errorsLabel, warnings, warningsLabel) + if errors > 0 { + fmt.Printf("%s %s\n", emoji, color.RedString(str)) + fmt.Println() + return + } + if warnings > 0 { + fmt.Printf("%s %s\n", emoji, color.YellowString(str)) + fmt.Println() + return + } +} + +func (f *Friendly) printStatistics(header string, stats map[string]int) { + if len(stats) == 0 { + return + } + var data []statEntry + for name, total := range stats { + data = append(data, statEntry{name, total}) + } + sort.Slice(data, func(i, j int) bool { + return data[i].failures > data[j].failures + }) + formatted := [][]string{} + for _, entry := range data { + formatted = append(formatted, []string{color.GreenString(fmt.Sprintf("%d", entry.failures)), entry.name}) + } + fmt.Println(header) + fmt.Println(f.table(formatted)) +} + +func (f *Friendly) table(rows [][]string) string { + buf := new(bytes.Buffer) + table := tablewriter.NewWriter(buf) + table.SetBorder(false) + table.SetColumnSeparator("") + table.SetRowSeparator("") + table.SetAutoWrapText(false) + table.AppendBulk(rows) + table.Render() + return buf.String() +} diff --git a/vendor/github.com/mgechev/revive/formatter/json.go b/vendor/github.com/mgechev/revive/formatter/json.go new file mode 100644 index 000000000..9c939face --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/json.go @@ -0,0 +1,40 @@ +package formatter + +import ( + "encoding/json" + + "github.com/mgechev/revive/lint" +) + +// JSON is an implementation of the Formatter interface +// which formats the errors to JSON. +type JSON struct { + Metadata lint.FormatterMetadata +} + +// Name returns the name of the formatter +func (f *JSON) Name() string { + return "json" +} + +// jsonObject defines a JSON object of an failure +type jsonObject struct { + Severity lint.Severity + lint.Failure `json:",inline"` +} + +// Format formats the failures gotten from the lint. +func (f *JSON) Format(failures <-chan lint.Failure, config lint.Config) (string, error) { + var slice []jsonObject + for failure := range failures { + obj := jsonObject{} + obj.Severity = severity(config, failure) + obj.Failure = failure + slice = append(slice, obj) + } + result, err := json.Marshal(slice) + if err != nil { + return "", err + } + return string(result), err +} diff --git a/vendor/github.com/mgechev/revive/formatter/ndjson.go b/vendor/github.com/mgechev/revive/formatter/ndjson.go new file mode 100644 index 000000000..aa2b1d636 --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/ndjson.go @@ -0,0 +1,34 @@ +package formatter + +import ( + "encoding/json" + "os" + + "github.com/mgechev/revive/lint" +) + +// NDJSON is an implementation of the Formatter interface +// which formats the errors to NDJSON stream. +type NDJSON struct { + Metadata lint.FormatterMetadata +} + +// Name returns the name of the formatter +func (f *NDJSON) Name() string { + return "ndjson" +} + +// Format formats the failures gotten from the lint. +func (f *NDJSON) Format(failures <-chan lint.Failure, config lint.Config) (string, error) { + enc := json.NewEncoder(os.Stdout) + for failure := range failures { + obj := jsonObject{} + obj.Severity = severity(config, failure) + obj.Failure = failure + err := enc.Encode(obj) + if err != nil { + return "", err + } + } + return "", nil +} diff --git a/vendor/github.com/mgechev/revive/formatter/plain.go b/vendor/github.com/mgechev/revive/formatter/plain.go new file mode 100644 index 000000000..a854d2562 --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/plain.go @@ -0,0 +1,26 @@ +package formatter + +import ( + "fmt" + + "github.com/mgechev/revive/lint" +) + +// Plain is an implementation of the Formatter interface +// which formats the errors to JSON. +type Plain struct { + Metadata lint.FormatterMetadata +} + +// Name returns the name of the formatter +func (f *Plain) Name() string { + return "plain" +} + +// Format formats the failures gotten from the lint. +func (f *Plain) Format(failures <-chan lint.Failure, _ lint.Config) (string, error) { + for failure := range failures { + fmt.Printf("%v: %s %s\n", failure.Position.Start, failure.Failure, "https://revive.run/r#"+failure.RuleName) + } + return "", nil +} diff --git a/vendor/github.com/mgechev/revive/formatter/sarif.go b/vendor/github.com/mgechev/revive/formatter/sarif.go new file mode 100644 index 000000000..8968c3ffb --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/sarif.go @@ -0,0 +1,107 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + + "github.com/chavacava/garif" + "github.com/mgechev/revive/lint" +) + +// Sarif is an implementation of the Formatter interface +// which formats revive failures into SARIF format. +type Sarif struct { + Metadata lint.FormatterMetadata +} + +// Name returns the name of the formatter +func (f *Sarif) Name() string { + return "sarif" +} + +const reviveSite = "https://revive.run" + +// Format formats the failures gotten from the lint. +func (f *Sarif) Format(failures <-chan lint.Failure, cfg lint.Config) (string, error) { + sarifLog := newReviveRunLog(cfg) + + for failure := range failures { + sarifLog.AddResult(failure) + } + + buf := new(bytes.Buffer) + sarifLog.PrettyWrite(buf) + + return buf.String(), nil +} + +type reviveRunLog struct { + *garif.LogFile + run *garif.Run + rules map[string]lint.RuleConfig +} + +func newReviveRunLog(cfg lint.Config) *reviveRunLog { + run := garif.NewRun(garif.NewTool(garif.NewDriver("revive").WithInformationUri(reviveSite))) + log := garif.NewLogFile([]*garif.Run{run}, garif.Version210) + + reviveLog := &reviveRunLog{ + log, + run, + cfg.Rules, + } + + reviveLog.addRules(cfg.Rules) + + return reviveLog +} + +func (l *reviveRunLog) addRules(cfg map[string]lint.RuleConfig) { + for name, ruleCfg := range cfg { + rule := garif.NewRule(name).WithHelpUri(reviveSite + "/r#" + name) + setRuleProperties(rule, ruleCfg) + driver := l.run.Tool.Driver + + if driver.Rules == nil { + driver.Rules = []*garif.ReportingDescriptor{rule} + return + } + + driver.Rules = append(driver.Rules, rule) + } +} + +func (l *reviveRunLog) AddResult(failure lint.Failure) { + positiveOrZero := func(x int) int { + if x > 0 { + return x + } + return 0 + } + position := failure.Position + filename := position.Start.Filename + line := positiveOrZero(position.Start.Line - 1) // https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html#def_line + column := positiveOrZero(position.Start.Column - 1) // https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html#def_column + + result := garif.NewResult(garif.NewMessageFromText(failure.Failure)) + location := garif.NewLocation().WithURI(filename).WithLineColumn(line, column) + result.Locations = append(result.Locations, location) + result.RuleId = failure.RuleName + result.Level = l.rules[failure.RuleName].Severity + + l.run.Results = append(l.run.Results, result) +} + +func setRuleProperties(sarifRule *garif.ReportingDescriptor, lintRule lint.RuleConfig) { + arguments := make([]string, len(lintRule.Arguments)) + for i, arg := range lintRule.Arguments { + arguments[i] = fmt.Sprintf("%+v", arg) + } + + if len(arguments) > 0 { + sarifRule.WithProperties("arguments", strings.Join(arguments, ",")) + } + + sarifRule.WithProperties("severity", string(lintRule.Severity)) +} diff --git a/vendor/github.com/mgechev/revive/formatter/severity.go b/vendor/github.com/mgechev/revive/formatter/severity.go new file mode 100644 index 000000000..a43bf3192 --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/severity.go @@ -0,0 +1,13 @@ +package formatter + +import "github.com/mgechev/revive/lint" + +func severity(config lint.Config, failure lint.Failure) lint.Severity { + if config, ok := config.Rules[failure.RuleName]; ok && config.Severity == lint.SeverityError { + return lint.SeverityError + } + if config, ok := config.Directives[failure.RuleName]; ok && config.Severity == lint.SeverityError { + return lint.SeverityError + } + return lint.SeverityWarning +} diff --git a/vendor/github.com/mgechev/revive/formatter/stylish.go b/vendor/github.com/mgechev/revive/formatter/stylish.go new file mode 100644 index 000000000..cd81fdae7 --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/stylish.go @@ -0,0 +1,89 @@ +package formatter + +import ( + "bytes" + "fmt" + + "github.com/fatih/color" + "github.com/mgechev/revive/lint" + "github.com/olekukonko/tablewriter" +) + +// Stylish is an implementation of the Formatter interface +// which formats the errors to JSON. +type Stylish struct { + Metadata lint.FormatterMetadata +} + +// Name returns the name of the formatter +func (f *Stylish) Name() string { + return "stylish" +} + +func formatFailure(failure lint.Failure, severity lint.Severity) []string { + fString := color.CyanString(failure.Failure) + fName := color.RedString("https://revive.run/r#" + failure.RuleName) + lineColumn := failure.Position + pos := fmt.Sprintf("(%d, %d)", lineColumn.Start.Line, lineColumn.Start.Column) + if severity == lint.SeverityWarning { + fName = color.YellowString("https://revive.run/r#" + failure.RuleName) + } + return []string{failure.GetFilename(), pos, fName, fString} +} + +// Format formats the failures gotten from the lint. +func (f *Stylish) Format(failures <-chan lint.Failure, config lint.Config) (string, error) { + var result [][]string + var totalErrors = 0 + var total = 0 + + for f := range failures { + total++ + currentType := severity(config, f) + if currentType == lint.SeverityError { + totalErrors++ + } + result = append(result, formatFailure(f, lint.Severity(currentType))) + } + ps := "problems" + if total == 1 { + ps = "problem" + } + + fileReport := make(map[string][][]string) + + for _, row := range result { + if _, ok := fileReport[row[0]]; !ok { + fileReport[row[0]] = [][]string{} + } + + fileReport[row[0]] = append(fileReport[row[0]], []string{row[1], row[2], row[3]}) + } + + output := "" + for filename, val := range fileReport { + buf := new(bytes.Buffer) + table := tablewriter.NewWriter(buf) + table.SetBorder(false) + table.SetColumnSeparator("") + table.SetRowSeparator("") + table.SetAutoWrapText(false) + table.AppendBulk(val) + table.Render() + c := color.New(color.Underline) + output += c.SprintfFunc()(filename + "\n") + output += buf.String() + "\n" + } + + suffix := fmt.Sprintf(" %d %s (%d errors) (%d warnings)", total, ps, totalErrors, total-totalErrors) + + if total > 0 && totalErrors > 0 { + suffix = color.RedString("\n ✖" + suffix) + } else if total > 0 && totalErrors == 0 { + suffix = color.YellowString("\n ✖" + suffix) + } else { + suffix, output = "", "" + } + + return output + suffix, nil +} diff --git a/vendor/github.com/mgechev/revive/formatter/unix.go b/vendor/github.com/mgechev/revive/formatter/unix.go new file mode 100644 index 000000000..b9ae62d38 --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/unix.go @@ -0,0 +1,27 @@ +package formatter + +import ( + "fmt" + + "github.com/mgechev/revive/lint" +) + +// Unix is an implementation of the Formatter interface +// which formats the errors to a simple line based error format +// main.go:24:9: [errorf] should replace errors.New(fmt.Sprintf(...)) with fmt.Errorf(...) +type Unix struct { + Metadata lint.FormatterMetadata +} + +// Name returns the name of the formatter +func (f *Unix) Name() string { + return "unix" +} + +// Format formats the failures gotten from the lint. +func (f *Unix) Format(failures <-chan lint.Failure, _ lint.Config) (string, error) { + for failure := range failures { + fmt.Printf("%v: [%s] %s\n", failure.Position.Start, failure.RuleName, failure.Failure) + } + return "", nil +} diff --git a/vendor/github.com/mgechev/revive/lint/config.go b/vendor/github.com/mgechev/revive/lint/config.go new file mode 100644 index 000000000..276305804 --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/config.go @@ -0,0 +1,35 @@ +package lint + +// Arguments is type used for the arguments of a rule. +type Arguments = []interface{} + +// RuleConfig is type used for the rule configuration. +type RuleConfig struct { + Arguments Arguments + Severity Severity + Disabled bool +} + +// RulesConfig defines the config for all rules. +type RulesConfig = map[string]RuleConfig + +// DirectiveConfig is type used for the linter directive configuration. +type DirectiveConfig struct { + Severity Severity +} + +// DirectivesConfig defines the config for all directives. +type DirectivesConfig = map[string]DirectiveConfig + +// Config defines the config of the linter. +type Config struct { + IgnoreGeneratedHeader bool `toml:"ignoreGeneratedHeader"` + Confidence float64 + Severity Severity + EnableAllRules bool `toml:"enableAllRules"` + Rules RulesConfig `toml:"rule"` + ErrorCode int `toml:"errorCode"` + WarningCode int `toml:"warningCode"` + Directives DirectivesConfig `toml:"directive"` + Exclude []string `toml:"exclude"` +} diff --git a/vendor/github.com/mgechev/revive/lint/failure.go b/vendor/github.com/mgechev/revive/lint/failure.go new file mode 100644 index 000000000..479b0cb48 --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/failure.go @@ -0,0 +1,39 @@ +package lint + +import ( + "go/ast" + "go/token" +) + +const ( + // SeverityWarning declares failures of type warning + SeverityWarning = "warning" + // SeverityError declares failures of type error. + SeverityError = "error" +) + +// Severity is the type for the failure types. +type Severity string + +// FailurePosition returns the failure position +type FailurePosition struct { + Start token.Position + End token.Position +} + +// Failure defines a struct for a linting failure. +type Failure struct { + Failure string + RuleName string + Category string + Position FailurePosition + Node ast.Node `json:"-"` + Confidence float64 + // For future use + ReplacementLine string +} + +// GetFilename returns the filename. +func (f *Failure) GetFilename() string { + return f.Position.Start.Filename +} diff --git a/vendor/github.com/mgechev/revive/lint/file.go b/vendor/github.com/mgechev/revive/lint/file.go new file mode 100644 index 000000000..ee29c1dae --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/file.go @@ -0,0 +1,278 @@ +package lint + +import ( + "bytes" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "math" + "regexp" + "strings" +) + +// File abstraction used for representing files. +type File struct { + Name string + Pkg *Package + content []byte + AST *ast.File +} + +// IsTest returns if the file contains tests. +func (f *File) IsTest() bool { return strings.HasSuffix(f.Name, "_test.go") } + +// Content returns the file's content. +func (f *File) Content() []byte { + return f.content +} + +// NewFile creates a new file +func NewFile(name string, content []byte, pkg *Package) (*File, error) { + f, err := parser.ParseFile(pkg.fset, name, content, parser.ParseComments) + if err != nil { + return nil, err + } + return &File{ + Name: name, + content: content, + Pkg: pkg, + AST: f, + }, nil +} + +// ToPosition returns line and column for given position. +func (f *File) ToPosition(pos token.Pos) token.Position { + return f.Pkg.fset.Position(pos) +} + +// Render renters a node. +func (f *File) Render(x interface{}) string { + var buf bytes.Buffer + if err := printer.Fprint(&buf, f.Pkg.fset, x); err != nil { + panic(err) + } + return buf.String() +} + +// CommentMap builds a comment map for the file. +func (f *File) CommentMap() ast.CommentMap { + return ast.NewCommentMap(f.Pkg.fset, f.AST, f.AST.Comments) +} + +var basicTypeKinds = map[types.BasicKind]string{ + types.UntypedBool: "bool", + types.UntypedInt: "int", + types.UntypedRune: "rune", + types.UntypedFloat: "float64", + types.UntypedComplex: "complex128", + types.UntypedString: "string", +} + +// IsUntypedConst reports whether expr is an untyped constant, +// and indicates what its default type is. +// scope may be nil. +func (f *File) IsUntypedConst(expr ast.Expr) (defType string, ok bool) { + // Re-evaluate expr outside of its context to see if it's untyped. + // (An expr evaluated within, for example, an assignment context will get the type of the LHS.) + exprStr := f.Render(expr) + tv, err := types.Eval(f.Pkg.fset, f.Pkg.TypesPkg, expr.Pos(), exprStr) + if err != nil { + return "", false + } + if b, ok := tv.Type.(*types.Basic); ok { + if dt, ok := basicTypeKinds[b.Kind()]; ok { + return dt, true + } + } + + return "", false +} + +func (f *File) isMain() bool { + if f.AST.Name.Name == "main" { + return true + } + return false +} + +const directiveSpecifyDisableReason = "specify-disable-reason" + +func (f *File) lint(rules []Rule, config Config, failures chan Failure) { + rulesConfig := config.Rules + _, mustSpecifyDisableReason := config.Directives[directiveSpecifyDisableReason] + disabledIntervals := f.disabledIntervals(rules, mustSpecifyDisableReason, failures) + for _, currentRule := range rules { + ruleConfig := rulesConfig[currentRule.Name()] + currentFailures := currentRule.Apply(f, ruleConfig.Arguments) + for idx, failure := range currentFailures { + if failure.RuleName == "" { + failure.RuleName = currentRule.Name() + } + if failure.Node != nil { + failure.Position = ToFailurePosition(failure.Node.Pos(), failure.Node.End(), f) + } + currentFailures[idx] = failure + } + currentFailures = f.filterFailures(currentFailures, disabledIntervals) + for _, failure := range currentFailures { + if failure.Confidence >= config.Confidence { + failures <- failure + } + } + } +} + +type enableDisableConfig struct { + enabled bool + position int +} + +const directiveRE = `^//[\s]*revive:(enable|disable)(?:-(line|next-line))?(?::([^\s]+))?[\s]*(?: (.+))?$` +const directivePos = 1 +const modifierPos = 2 +const rulesPos = 3 +const reasonPos = 4 + +var re = regexp.MustCompile(directiveRE) + +func (f *File) disabledIntervals(rules []Rule, mustSpecifyDisableReason bool, failures chan Failure) disabledIntervalsMap { + enabledDisabledRulesMap := make(map[string][]enableDisableConfig) + + getEnabledDisabledIntervals := func() disabledIntervalsMap { + result := make(disabledIntervalsMap) + + for ruleName, disabledArr := range enabledDisabledRulesMap { + ruleResult := []DisabledInterval{} + for i := 0; i < len(disabledArr); i++ { + interval := DisabledInterval{ + RuleName: ruleName, + From: token.Position{ + Filename: f.Name, + Line: disabledArr[i].position, + }, + To: token.Position{ + Filename: f.Name, + Line: math.MaxInt32, + }, + } + if i%2 == 0 { + ruleResult = append(ruleResult, interval) + } else { + ruleResult[len(ruleResult)-1].To.Line = disabledArr[i].position + } + } + result[ruleName] = ruleResult + } + + return result + } + + handleConfig := func(isEnabled bool, line int, name string) { + existing, ok := enabledDisabledRulesMap[name] + if !ok { + existing = []enableDisableConfig{} + enabledDisabledRulesMap[name] = existing + } + if (len(existing) > 1 && existing[len(existing)-1].enabled == isEnabled) || + (len(existing) == 0 && isEnabled) { + return + } + existing = append(existing, enableDisableConfig{ + enabled: isEnabled, + position: line, + }) + enabledDisabledRulesMap[name] = existing + } + + handleRules := func(filename, modifier string, isEnabled bool, line int, ruleNames []string) []DisabledInterval { + var result []DisabledInterval + for _, name := range ruleNames { + if modifier == "line" { + handleConfig(isEnabled, line, name) + handleConfig(!isEnabled, line, name) + } else if modifier == "next-line" { + handleConfig(isEnabled, line+1, name) + handleConfig(!isEnabled, line+1, name) + } else { + handleConfig(isEnabled, line, name) + } + } + return result + } + + handleComment := func(filename string, c *ast.CommentGroup, line int) { + comments := c.List + for _, c := range comments { + match := re.FindStringSubmatch(c.Text) + if len(match) == 0 { + continue + } + + ruleNames := []string{} + tempNames := strings.Split(match[rulesPos], ",") + for _, name := range tempNames { + name = strings.Trim(name, "\n") + if len(name) > 0 { + ruleNames = append(ruleNames, name) + } + } + + mustCheckDisablingReason := mustSpecifyDisableReason && match[directivePos] == "disable" + if mustCheckDisablingReason && strings.Trim(match[reasonPos], " ") == "" { + failures <- Failure{ + Confidence: 1, + RuleName: directiveSpecifyDisableReason, + Failure: "reason of lint disabling not found", + Position: ToFailurePosition(c.Pos(), c.End(), f), + Node: c, + } + continue // skip this linter disabling directive + } + + // TODO: optimize + if len(ruleNames) == 0 { + for _, rule := range rules { + ruleNames = append(ruleNames, rule.Name()) + } + } + + handleRules(filename, match[modifierPos], match[directivePos] == "enable", line, ruleNames) + } + } + + comments := f.AST.Comments + for _, c := range comments { + handleComment(f.Name, c, f.ToPosition(c.End()).Line) + } + + return getEnabledDisabledIntervals() +} + +func (f *File) filterFailures(failures []Failure, disabledIntervals disabledIntervalsMap) []Failure { + result := []Failure{} + for _, failure := range failures { + fStart := failure.Position.Start.Line + fEnd := failure.Position.End.Line + intervals, ok := disabledIntervals[failure.RuleName] + if !ok { + result = append(result, failure) + } else { + include := true + for _, interval := range intervals { + intStart := interval.From.Line + intEnd := interval.To.Line + if (fStart >= intStart && fStart <= intEnd) || + (fEnd >= intStart && fEnd <= intEnd) { + include = false + break + } + } + if include { + result = append(result, failure) + } + } + } + return result +} diff --git a/vendor/github.com/mgechev/revive/lint/formatter.go b/vendor/github.com/mgechev/revive/lint/formatter.go new file mode 100644 index 000000000..7c19af278 --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/formatter.go @@ -0,0 +1,14 @@ +package lint + +// FormatterMetadata configuration of a formatter +type FormatterMetadata struct { + Name string + Description string + Sample string +} + +// Formatter defines an interface for failure formatters +type Formatter interface { + Format(<-chan Failure, Config) (string, error) + Name() string +} diff --git a/vendor/github.com/mgechev/revive/lint/linter.go b/vendor/github.com/mgechev/revive/lint/linter.go new file mode 100644 index 000000000..cdca84fb5 --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/linter.go @@ -0,0 +1,99 @@ +package lint + +import ( + "bufio" + "bytes" + "fmt" + "go/token" + "os" + "sync" +) + +// ReadFile defines an abstraction for reading files. +type ReadFile func(path string) (result []byte, err error) + +type disabledIntervalsMap = map[string][]DisabledInterval + +// Linter is used for linting set of files. +type Linter struct { + reader ReadFile +} + +// New creates a new Linter +func New(reader ReadFile) Linter { + return Linter{reader: reader} +} + +var ( + genHdr = []byte("// Code generated ") + genFtr = []byte(" DO NOT EDIT.") +) + +// Lint lints a set of files with the specified rule. +func (l *Linter) Lint(packages [][]string, ruleSet []Rule, config Config) (<-chan Failure, error) { + failures := make(chan Failure) + + var wg sync.WaitGroup + for _, pkg := range packages { + wg.Add(1) + go func(pkg []string) { + if err := l.lintPackage(pkg, ruleSet, config, failures); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + defer wg.Done() + }(pkg) + } + + go func() { + wg.Wait() + close(failures) + }() + + return failures, nil +} + +func (l *Linter) lintPackage(filenames []string, ruleSet []Rule, config Config, failures chan Failure) error { + pkg := &Package{ + fset: token.NewFileSet(), + files: map[string]*File{}, + mu: sync.Mutex{}, + } + for _, filename := range filenames { + content, err := l.reader(filename) + if err != nil { + return err + } + if isGenerated(content) && !config.IgnoreGeneratedHeader { + continue + } + + file, err := NewFile(filename, content, pkg) + if err != nil { + return err + } + pkg.files[filename] = file + } + + if len(pkg.files) == 0 { + return nil + } + + pkg.lint(ruleSet, config, failures) + + return nil +} + +// isGenerated reports whether the source file is generated code +// according the rules from https://golang.org/s/generatedcode. +// This is inherited from the original go lint. +func isGenerated(src []byte) bool { + sc := bufio.NewScanner(bytes.NewReader(src)) + for sc.Scan() { + b := sc.Bytes() + if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) { + return true + } + } + return false +} diff --git a/vendor/github.com/mgechev/revive/lint/package.go b/vendor/github.com/mgechev/revive/lint/package.go new file mode 100644 index 000000000..7b6046fd7 --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/package.go @@ -0,0 +1,178 @@ +package lint + +import ( + "go/ast" + "go/token" + "go/types" + "sync" + + "golang.org/x/tools/go/gcexportdata" +) + +// Package represents a package in the project. +type Package struct { + fset *token.FileSet + files map[string]*File + + TypesPkg *types.Package + TypesInfo *types.Info + + // sortable is the set of types in the package that implement sort.Interface. + Sortable map[string]bool + // main is whether this is a "main" package. + main int + mu sync.Mutex +} + +var newImporter = func(fset *token.FileSet) types.ImporterFrom { + return gcexportdata.NewImporter(fset, make(map[string]*types.Package)) +} + +var ( + trueValue = 1 + falseValue = 2 + notSet = 3 +) + +// IsMain returns if that's the main package. +func (p *Package) IsMain() bool { + if p.main == trueValue { + return true + } else if p.main == falseValue { + return false + } + for _, f := range p.files { + if f.isMain() { + p.main = trueValue + return true + } + } + p.main = falseValue + return false +} + +// TypeCheck performs type checking for given package. +func (p *Package) TypeCheck() error { + p.mu.Lock() + // If type checking has already been performed + // skip it. + if p.TypesInfo != nil || p.TypesPkg != nil { + p.mu.Unlock() + return nil + } + config := &types.Config{ + // By setting a no-op error reporter, the type checker does as much work as possible. + Error: func(error) {}, + Importer: newImporter(p.fset), + } + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + } + var anyFile *File + var astFiles []*ast.File + for _, f := range p.files { + anyFile = f + astFiles = append(astFiles, f.AST) + } + + typesPkg, err := check(config, anyFile.AST.Name.Name, p.fset, astFiles, info) + + // Remember the typechecking info, even if config.Check failed, + // since we will get partial information. + p.TypesPkg = typesPkg + p.TypesInfo = info + p.mu.Unlock() + return err +} + +// check function encapsulates the call to go/types.Config.Check method and +// recovers if the called method panics (see issue #59) +func check(config *types.Config, n string, fset *token.FileSet, astFiles []*ast.File, info *types.Info) (p *types.Package, err error) { + defer func() { + if r := recover(); r != nil { + err, _ = r.(error) + p = nil + return + } + }() + + return config.Check(n, fset, astFiles, info) +} + +// TypeOf returns the type of an expression. +func (p *Package) TypeOf(expr ast.Expr) types.Type { + if p.TypesInfo == nil { + return nil + } + return p.TypesInfo.TypeOf(expr) +} + +type walker struct { + nmap map[string]int + has map[string]int +} + +func (w *walker) Visit(n ast.Node) ast.Visitor { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return w + } + // TODO(dsymonds): We could check the signature to be more precise. + recv := receiverType(fn) + if i, ok := w.nmap[fn.Name.Name]; ok { + w.has[recv] |= i + } + return w +} + +func (p *Package) scanSortable() { + p.Sortable = make(map[string]bool) + + // bitfield for which methods exist on each type. + const ( + Len = 1 << iota + Less + Swap + ) + nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap} + has := make(map[string]int) + for _, f := range p.files { + ast.Walk(&walker{nmap, has}, f.AST) + } + for typ, ms := range has { + if ms == Len|Less|Swap { + p.Sortable[typ] = true + } + } +} + +// receiverType returns the named type of the method receiver, sans "*", +// or "invalid-type" if fn.Recv is ill formed. +func receiverType(fn *ast.FuncDecl) string { + switch e := fn.Recv.List[0].Type.(type) { + case *ast.Ident: + return e.Name + case *ast.StarExpr: + if id, ok := e.X.(*ast.Ident); ok { + return id.Name + } + } + // The parser accepts much more than just the legal forms. + return "invalid-type" +} + +func (p *Package) lint(rules []Rule, config Config, failures chan Failure) { + p.scanSortable() + var wg sync.WaitGroup + for _, file := range p.files { + wg.Add(1) + go (func(file *File) { + file.lint(rules, config, failures) + defer wg.Done() + })(file) + } + wg.Wait() +} diff --git a/vendor/github.com/mgechev/revive/lint/rule.go b/vendor/github.com/mgechev/revive/lint/rule.go new file mode 100644 index 000000000..815abfdd8 --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/rule.go @@ -0,0 +1,31 @@ +package lint + +import ( + "go/token" +) + +// DisabledInterval contains a single disabled interval and the associated rule name. +type DisabledInterval struct { + From token.Position + To token.Position + RuleName string +} + +// Rule defines an abstract rule interaface +type Rule interface { + Name() string + Apply(*File, Arguments) []Failure +} + +// AbstractRule defines an abstract rule. +type AbstractRule struct { + Failures []Failure +} + +// ToFailurePosition returns the failure position. +func ToFailurePosition(start token.Pos, end token.Pos, file *File) FailurePosition { + return FailurePosition{ + Start: file.ToPosition(start), + End: file.ToPosition(end), + } +} diff --git a/vendor/github.com/mgechev/revive/lint/utils.go b/vendor/github.com/mgechev/revive/lint/utils.go new file mode 100644 index 000000000..28657c6df --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/utils.go @@ -0,0 +1,128 @@ +package lint + +import ( + "strings" + "unicode" +) + +// Name returns a different name if it should be different. +func Name(name string, whitelist, blacklist []string) (should string) { + // Fast path for simple cases: "_" and all lowercase. + if name == "_" { + return name + } + allLower := true + for _, r := range name { + if !unicode.IsLower(r) { + allLower = false + break + } + } + if allLower { + return name + } + + // Split camelCase at any lower->upper transition, and split on underscores. + // Check each word for common initialisms. + runes := []rune(name) + w, i := 0, 0 // index of start of word, scan + for i+1 <= len(runes) { + eow := false // whether we hit the end of a word + if i+1 == len(runes) { + eow = true + } else if runes[i+1] == '_' { + // underscore; shift the remainder forward over any run of underscores + eow = true + n := 1 + for i+n+1 < len(runes) && runes[i+n+1] == '_' { + n++ + } + + // Leave at most one underscore if the underscore is between two digits + if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { + n-- + } + + copy(runes[i+1:], runes[i+n+1:]) + runes = runes[:len(runes)-n] + } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { + // lower->non-lower + eow = true + } + i++ + if !eow { + continue + } + + // [w,i) is a word. + word := string(runes[w:i]) + ignoreInitWarnings := map[string]bool{} + for _, i := range whitelist { + ignoreInitWarnings[i] = true + } + + extraInits := map[string]bool{} + for _, i := range blacklist { + extraInits[i] = true + } + + if u := strings.ToUpper(word); (commonInitialisms[u] || extraInits[u]) && !ignoreInitWarnings[u] { + // Keep consistent case, which is lowercase only at the start. + if w == 0 && unicode.IsLower(runes[w]) { + u = strings.ToLower(u) + } + // All the common initialisms are ASCII, + // so we can replace the bytes exactly. + copy(runes[w:], []rune(u)) + } else if w > 0 && strings.ToLower(word) == word { + // already all lowercase, and not the first word, so uppercase the first character. + runes[w] = unicode.ToUpper(runes[w]) + } + w = i + } + return string(runes) +} + +// commonInitialisms is a set of common initialisms. +// Only add entries that are highly unlikely to be non-initialisms. +// For instance, "ID" is fine (Freudian code is rare), but "AND" is not. +var commonInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, +} diff --git a/vendor/github.com/mgechev/revive/rule/add-constant.go b/vendor/github.com/mgechev/revive/rule/add-constant.go new file mode 100644 index 000000000..bc6268ee1 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/add-constant.go @@ -0,0 +1,152 @@ +package rule + +import ( + "fmt" + "go/ast" + "strconv" + "strings" + + "github.com/mgechev/revive/lint" +) + +const ( + defaultStrLitLimit = 2 + kindFLOAT = "FLOAT" + kindINT = "INT" + kindSTRING = "STRING" +) + +type whiteList map[string]map[string]bool + +func newWhiteList() whiteList { + return map[string]map[string]bool{kindINT: {}, kindFLOAT: {}, kindSTRING: {}} +} + +func (wl whiteList) add(kind string, list string) { + elems := strings.Split(list, ",") + for _, e := range elems { + wl[kind][e] = true + } +} + +// AddConstantRule lints unused params in functions. +type AddConstantRule struct{} + +// Apply applies the rule to given file. +func (r *AddConstantRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + strLitLimit := defaultStrLitLimit + var whiteList = newWhiteList() + if len(arguments) > 0 { + args, ok := arguments[0].(map[string]interface{}) + if !ok { + panic(fmt.Sprintf("Invalid argument to the add-constant rule. Expecting a k,v map, got %T", arguments[0])) + } + for k, v := range args { + kind := "" + switch k { + case "allowFloats": + kind = kindFLOAT + fallthrough + case "allowInts": + if kind == "" { + kind = kindINT + } + fallthrough + case "allowStrs": + if kind == "" { + kind = kindSTRING + } + list, ok := v.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument to the add-constant rule, string expected. Got '%v' (%T)", v, v)) + } + whiteList.add(kind, list) + case "maxLitCount": + sl, ok := v.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument to the add-constant rule, expecting string representation of an integer. Got '%v' (%T)", v, v)) + } + + limit, err := strconv.Atoi(sl) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the add-constant rule, expecting string representation of an integer. Got '%v'", v)) + } + strLitLimit = limit + } + } + } + + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintAddConstantRule{onFailure: onFailure, strLits: make(map[string]int, 0), strLitLimit: strLitLimit, whiteLst: whiteList} + + ast.Walk(w, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *AddConstantRule) Name() string { + return "add-constant" +} + +type lintAddConstantRule struct { + onFailure func(lint.Failure) + strLits map[string]int + strLitLimit int + whiteLst whiteList +} + +func (w lintAddConstantRule) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.GenDecl: + return nil // skip declarations + case *ast.BasicLit: + switch kind := n.Kind.String(); kind { + case kindFLOAT, kindINT: + w.checkNumLit(kind, n) + case kindSTRING: + w.checkStrLit(n) + } + } + + return w + +} + +func (w lintAddConstantRule) checkStrLit(n *ast.BasicLit) { + if w.whiteLst[kindSTRING][n.Value] { + return + } + + count := w.strLits[n.Value] + if count >= 0 { + w.strLits[n.Value] = count + 1 + if w.strLits[n.Value] > w.strLitLimit { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: n, + Category: "style", + Failure: fmt.Sprintf("string literal %s appears, at least, %d times, create a named constant for it", n.Value, w.strLits[n.Value]), + }) + w.strLits[n.Value] = -1 // mark it to avoid failing again on the same literal + } + } +} + +func (w lintAddConstantRule) checkNumLit(kind string, n *ast.BasicLit) { + if w.whiteLst[kind][n.Value] { + return + } + + w.onFailure(lint.Failure{ + Confidence: 1, + Node: n, + Category: "style", + Failure: fmt.Sprintf("avoid magic numbers like '%s', create a named constant for it", n.Value), + }) +} diff --git a/vendor/github.com/mgechev/revive/rule/argument-limit.go b/vendor/github.com/mgechev/revive/rule/argument-limit.go new file mode 100644 index 000000000..2b11d4982 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/argument-limit.go @@ -0,0 +1,67 @@ +package rule + +import ( + "fmt" + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// ArgumentsLimitRule lints given else constructs. +type ArgumentsLimitRule struct{} + +// Apply applies the rule to given file. +func (r *ArgumentsLimitRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + if len(arguments) != 1 { + panic(`invalid configuration for "argument-limit"`) + } + + total, ok := arguments[0].(int64) // Alt. non panicking version + if !ok { + panic(`invalid value passed as argument number to the "argument-list" rule`) + } + + var failures []lint.Failure + + walker := lintArgsNum{ + total: int(total), + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *ArgumentsLimitRule) Name() string { + return "argument-limit" +} + +type lintArgsNum struct { + total int + onFailure func(lint.Failure) +} + +func (w lintArgsNum) Visit(n ast.Node) ast.Visitor { + node, ok := n.(*ast.FuncDecl) + if ok { + num := 0 + for _, l := range node.Type.Params.List { + for range l.Names { + num++ + } + } + if num > w.total { + w.onFailure(lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("maximum number of arguments per function exceeded; max %d but got %d", w.total, num), + Node: node.Type, + }) + return w + } + } + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/atomic.go b/vendor/github.com/mgechev/revive/rule/atomic.go new file mode 100644 index 000000000..572e141da --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/atomic.go @@ -0,0 +1,94 @@ +package rule + +import ( + "go/ast" + "go/token" + "go/types" + + "github.com/mgechev/revive/lint" +) + +// AtomicRule lints given else constructs. +type AtomicRule struct{} + +// Apply applies the rule to given file. +func (r *AtomicRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + walker := atomic{ + pkgTypesInfo: file.Pkg.TypesInfo, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *AtomicRule) Name() string { + return "atomic" +} + +type atomic struct { + pkgTypesInfo *types.Info + onFailure func(lint.Failure) +} + +func (w atomic) Visit(node ast.Node) ast.Visitor { + n, ok := node.(*ast.AssignStmt) + if !ok { + return w + } + + if len(n.Lhs) != len(n.Rhs) { + return nil // skip assignment sub-tree + } + if len(n.Lhs) == 1 && n.Tok == token.DEFINE { + return nil // skip assignment sub-tree + } + + for i, right := range n.Rhs { + call, ok := right.(*ast.CallExpr) + if !ok { + continue + } + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + continue + } + pkgIdent, _ := sel.X.(*ast.Ident) + if w.pkgTypesInfo != nil { + pkgName, ok := w.pkgTypesInfo.Uses[pkgIdent].(*types.PkgName) + if !ok || pkgName.Imported().Path() != "sync/atomic" { + continue + } + } + + switch sel.Sel.Name { + case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr": + left := n.Lhs[i] + if len(call.Args) != 2 { + continue + } + arg := call.Args[0] + broken := false + + if uarg, ok := arg.(*ast.UnaryExpr); ok && uarg.Op == token.AND { + broken = gofmt(left) == gofmt(uarg.X) + } else if star, ok := left.(*ast.StarExpr); ok { + broken = gofmt(star.X) == gofmt(arg) + } + + if broken { + w.onFailure(lint.Failure{ + Confidence: 1, + Failure: "direct assignment to atomic value", + Node: n, + }) + } + } + } + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/bare-return.go b/vendor/github.com/mgechev/revive/rule/bare-return.go new file mode 100644 index 000000000..3ee4c4adc --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/bare-return.go @@ -0,0 +1,84 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// BareReturnRule lints given else constructs. +type BareReturnRule struct{} + +// Apply applies the rule to given file. +func (r *BareReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintBareReturnRule{onFailure: onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *BareReturnRule) Name() string { + return "bare-return" +} + +type lintBareReturnRule struct { + onFailure func(lint.Failure) +} + +func (w lintBareReturnRule) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl: + w.checkFunc(n.Type.Results, n.Body) + case *ast.FuncLit: // to cope with deferred functions and go-routines + w.checkFunc(n.Type.Results, n.Body) + } + + return w +} + +// checkFunc will verify if the given function has named result and bare returns +func (w lintBareReturnRule) checkFunc(results *ast.FieldList, body *ast.BlockStmt) { + hasNamedResults := results != nil && len(results.List) > 0 && results.List[0].Names != nil + if !hasNamedResults || body == nil { + return // nothing to do + } + + brf := bareReturnFinder{w.onFailure} + ast.Walk(brf, body) +} + +type bareReturnFinder struct { + onFailure func(lint.Failure) +} + +func (w bareReturnFinder) Visit(node ast.Node) ast.Visitor { + _, ok := node.(*ast.FuncLit) + if ok { + // skip analysing function literals + // they will analyzed by the lintBareReturnRule.Visit method + return nil + } + + rs, ok := node.(*ast.ReturnStmt) + if !ok { + return w + } + + if len(rs.Results) > 0 { + return w + } + + w.onFailure(lint.Failure{ + Confidence: 1, + Node: rs, + Failure: "avoid using bare returns, please add return expressions", + }) + + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/blank-imports.go b/vendor/github.com/mgechev/revive/rule/blank-imports.go new file mode 100644 index 000000000..9e8b8fc00 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/blank-imports.go @@ -0,0 +1,75 @@ +package rule + +import ( + "go/ast" + "strings" + + "github.com/mgechev/revive/lint" +) + +// BlankImportsRule lints given else constructs. +type BlankImportsRule struct{} + +// Name returns the rule name. +func (r *BlankImportsRule) Name() string { + return "blank-imports" +} + +// Apply applies the rule to given file. +func (r *BlankImportsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + if file.Pkg.IsMain() || file.IsTest() { + return nil + } + + const ( + message = "a blank import should be only in a main or test package, or have a comment justifying it" + category = "imports" + + embedImportPath = `"embed"` + ) + + var failures []lint.Failure + + // The first element of each contiguous group of blank imports should have + // an explanatory comment of some kind. + for i, imp := range file.AST.Imports { + pos := file.ToPosition(imp.Pos()) + + if !isBlank(imp.Name) { + continue // Ignore non-blank imports. + } + + if i > 0 { + prev := file.AST.Imports[i-1] + prevPos := file.ToPosition(prev.Pos()) + + isSubsequentBlancInAGroup := isBlank(prev.Name) && prevPos.Line+1 == pos.Line && prev.Path.Value != embedImportPath + if isSubsequentBlancInAGroup { + continue + } + } + + if imp.Path.Value == embedImportPath && r.fileHasValidEmbedComment(file.AST) { + continue + } + + // This is the first blank import of a group. + if imp.Doc == nil && imp.Comment == nil { + failures = append(failures, lint.Failure{Failure: message, Category: category, Node: imp, Confidence: 1}) + } + } + + return failures +} + +func (r *BlankImportsRule) fileHasValidEmbedComment(fileAst *ast.File) bool { + for _, commentGroup := range fileAst.Comments { + for _, comment := range commentGroup.List { + if strings.HasPrefix(comment.Text, "//go:embed ") { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/mgechev/revive/rule/bool-literal-in-expr.go b/vendor/github.com/mgechev/revive/rule/bool-literal-in-expr.go new file mode 100644 index 000000000..0a4e696c6 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/bool-literal-in-expr.go @@ -0,0 +1,73 @@ +package rule + +import ( + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// BoolLiteralRule warns when logic expressions contains Boolean literals. +type BoolLiteralRule struct{} + +// Apply applies the rule to given file. +func (r *BoolLiteralRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + astFile := file.AST + w := &lintBoolLiteral{astFile, onFailure} + ast.Walk(w, astFile) + + return failures +} + +// Name returns the rule name. +func (r *BoolLiteralRule) Name() string { + return "bool-literal-in-expr" +} + +type lintBoolLiteral struct { + file *ast.File + onFailure func(lint.Failure) +} + +func (w *lintBoolLiteral) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.BinaryExpr: + if !isBoolOp(n.Op) { + return w + } + + lexeme, ok := isExprABooleanLit(n.X) + if !ok { + lexeme, ok = isExprABooleanLit(n.Y) + + if !ok { + return w + } + } + + isConstant := (n.Op == token.LAND && lexeme == "false") || (n.Op == token.LOR && lexeme == "true") + + if isConstant { + w.addFailure(n, "Boolean expression seems to always evaluate to "+lexeme, "logic") + } else { + w.addFailure(n, "omit Boolean literal in expression", "style") + } + } + + return w +} + +func (w lintBoolLiteral) addFailure(node ast.Node, msg string, cat string) { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: node, + Category: cat, + Failure: msg, + }) +} diff --git a/vendor/github.com/mgechev/revive/rule/call-to-gc.go b/vendor/github.com/mgechev/revive/rule/call-to-gc.go new file mode 100644 index 000000000..e05fa6924 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/call-to-gc.go @@ -0,0 +1,70 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// CallToGCRule lints calls to the garbage collector. +type CallToGCRule struct{} + +// Apply applies the rule to given file. +func (r *CallToGCRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + var gcTriggeringFunctions = map[string]map[string]bool{ + "runtime": {"GC": true}, + } + + w := lintCallToGC{onFailure, gcTriggeringFunctions} + ast.Walk(w, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *CallToGCRule) Name() string { + return "call-to-gc" +} + +type lintCallToGC struct { + onFailure func(lint.Failure) + gcTriggeringFunctions map[string]map[string]bool +} + +func (w lintCallToGC) Visit(node ast.Node) ast.Visitor { + ce, ok := node.(*ast.CallExpr) + if !ok { + return w // nothing to do, the node is not a call + } + + fc, ok := ce.Fun.(*ast.SelectorExpr) + if !ok { + return nil // nothing to do, the call is not of the form pkg.func(...) + } + + id, ok := fc.X.(*ast.Ident) + + if !ok { + return nil // in case X is not an id (it should be!) + } + + fn := fc.Sel.Name + pkg := id.Name + if !w.gcTriggeringFunctions[pkg][fn] { + return nil // it isn't a call to a GC triggering function + } + + w.onFailure(lint.Failure{ + Confidence: 1, + Node: node, + Category: "bad practice", + Failure: "explicit call to the garbage collector", + }) + + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go b/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go new file mode 100644 index 000000000..ccd36bd09 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go @@ -0,0 +1,195 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" + "golang.org/x/tools/go/ast/astutil" +) + +// CognitiveComplexityRule lints given else constructs. +type CognitiveComplexityRule struct{} + +// Apply applies the rule to given file. +func (r *CognitiveComplexityRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + var failures []lint.Failure + + const expectedArgumentsCount = 1 + if len(arguments) < expectedArgumentsCount { + panic(fmt.Sprintf("not enough arguments for cognitive-complexity, expected %d, got %d", expectedArgumentsCount, len(arguments))) + } + complexity, ok := arguments[0].(int64) + if !ok { + panic(fmt.Sprintf("invalid argument type for cognitive-complexity, expected int64, got %T", arguments[0])) + } + + linter := cognitiveComplexityLinter{ + file: file, + maxComplexity: int(complexity), + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + linter.lint() + + return failures +} + +// Name returns the rule name. +func (r *CognitiveComplexityRule) Name() string { + return "cognitive-complexity" +} + +type cognitiveComplexityLinter struct { + file *lint.File + maxComplexity int + onFailure func(lint.Failure) +} + +func (w cognitiveComplexityLinter) lint() { + f := w.file + for _, decl := range f.AST.Decls { + if fn, ok := decl.(*ast.FuncDecl); ok && fn.Body != nil { + v := cognitiveComplexityVisitor{} + c := v.subTreeComplexity(fn.Body) + if c > w.maxComplexity { + w.onFailure(lint.Failure{ + Confidence: 1, + Category: "maintenance", + Failure: fmt.Sprintf("function %s has cognitive complexity %d (> max enabled %d)", funcName(fn), c, w.maxComplexity), + Node: fn, + }) + } + } + } +} + +type cognitiveComplexityVisitor struct { + complexity int + nestingLevel int +} + +// subTreeComplexity calculates the cognitive complexity of an AST-subtree. +func (v cognitiveComplexityVisitor) subTreeComplexity(n ast.Node) int { + ast.Walk(&v, n) + return v.complexity +} + +// Visit implements the ast.Visitor interface. +func (v *cognitiveComplexityVisitor) Visit(n ast.Node) ast.Visitor { + switch n := n.(type) { + case *ast.IfStmt: + targets := []ast.Node{n.Cond, n.Body, n.Else} + v.walk(1, targets...) + return nil + case *ast.ForStmt: + targets := []ast.Node{n.Cond, n.Body} + v.walk(1, targets...) + return nil + case *ast.RangeStmt: + v.walk(1, n.Body) + return nil + case *ast.SelectStmt: + v.walk(1, n.Body) + return nil + case *ast.SwitchStmt: + v.walk(1, n.Body) + return nil + case *ast.TypeSwitchStmt: + v.walk(1, n.Body) + return nil + case *ast.FuncLit: + v.walk(0, n.Body) // do not increment the complexity, just do the nesting + return nil + case *ast.BinaryExpr: + v.complexity += v.binExpComplexity(n) + return nil // skip visiting binexp sub-tree (already visited by binExpComplexity) + case *ast.BranchStmt: + if n.Label != nil { + v.complexity++ + } + } + // TODO handle (at least) direct recursion + + return v +} + +func (v *cognitiveComplexityVisitor) walk(complexityIncrement int, targets ...ast.Node) { + v.complexity += complexityIncrement + v.nestingLevel + nesting := v.nestingLevel + v.nestingLevel++ + + for _, t := range targets { + if t == nil { + continue + } + + ast.Walk(v, t) + } + + v.nestingLevel = nesting +} + +func (cognitiveComplexityVisitor) binExpComplexity(n *ast.BinaryExpr) int { + calculator := binExprComplexityCalculator{opsStack: []token.Token{}} + + astutil.Apply(n, calculator.pre, calculator.post) + + return calculator.complexity +} + +type binExprComplexityCalculator struct { + complexity int + opsStack []token.Token // stack of bool operators + subexpStarted bool +} + +func (becc *binExprComplexityCalculator) pre(c *astutil.Cursor) bool { + switch n := c.Node().(type) { + case *ast.BinaryExpr: + isBoolOp := n.Op == token.LAND || n.Op == token.LOR + if !isBoolOp { + break + } + + ops := len(becc.opsStack) + // if + // is the first boolop in the expression OR + // is the first boolop inside a subexpression (...) OR + // is not the same to the previous one + // then + // increment complexity + if ops == 0 || becc.subexpStarted || n.Op != becc.opsStack[ops-1] { + becc.complexity++ + becc.subexpStarted = false + } + + becc.opsStack = append(becc.opsStack, n.Op) + case *ast.ParenExpr: + becc.subexpStarted = true + } + + return true +} + +func (becc *binExprComplexityCalculator) post(c *astutil.Cursor) bool { + switch n := c.Node().(type) { + case *ast.BinaryExpr: + isBoolOp := n.Op == token.LAND || n.Op == token.LOR + if !isBoolOp { + break + } + + ops := len(becc.opsStack) + if ops > 0 { + becc.opsStack = becc.opsStack[:ops-1] + } + case *ast.ParenExpr: + becc.subexpStarted = false + } + + return true +} diff --git a/vendor/github.com/mgechev/revive/rule/confusing-naming.go b/vendor/github.com/mgechev/revive/rule/confusing-naming.go new file mode 100644 index 000000000..143bb18c3 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/confusing-naming.go @@ -0,0 +1,190 @@ +package rule + +import ( + "fmt" + "go/ast" + + "strings" + "sync" + + "github.com/mgechev/revive/lint" +) + +type referenceMethod struct { + fileName string + id *ast.Ident +} + +type pkgMethods struct { + pkg *lint.Package + methods map[string]map[string]*referenceMethod + mu *sync.Mutex +} + +type packages struct { + pkgs []pkgMethods + mu sync.Mutex +} + +func (ps *packages) methodNames(lp *lint.Package) pkgMethods { + ps.mu.Lock() + + for _, pkg := range ps.pkgs { + if pkg.pkg == lp { + ps.mu.Unlock() + return pkg + } + } + + pkgm := pkgMethods{pkg: lp, methods: make(map[string]map[string]*referenceMethod), mu: &sync.Mutex{}} + ps.pkgs = append(ps.pkgs, pkgm) + + ps.mu.Unlock() + return pkgm +} + +var allPkgs = packages{pkgs: make([]pkgMethods, 1)} + +// ConfusingNamingRule lints method names that differ only by capitalization +type ConfusingNamingRule struct{} + +// Apply applies the rule to given file. +func (r *ConfusingNamingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + fileAst := file.AST + pkgm := allPkgs.methodNames(file.Pkg) + walker := lintConfusingNames{ + fileName: file.Name, + pkgm: pkgm, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(&walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *ConfusingNamingRule) Name() string { + return "confusing-naming" +} + +//checkMethodName checks if a given method/function name is similar (just case differences) to other method/function of the same struct/file. +func checkMethodName(holder string, id *ast.Ident, w *lintConfusingNames) { + if id.Name == "init" && holder == defaultStructName { + // ignore init functions + return + } + + pkgm := w.pkgm + name := strings.ToUpper(id.Name) + + pkgm.mu.Lock() + defer pkgm.mu.Unlock() + + if pkgm.methods[holder] != nil { + if pkgm.methods[holder][name] != nil { + refMethod := pkgm.methods[holder][name] + // confusing names + var kind string + if holder == defaultStructName { + kind = "function" + } else { + kind = "method" + } + var fileName string + if w.fileName == refMethod.fileName { + fileName = "the same source file" + } else { + fileName = refMethod.fileName + } + w.onFailure(lint.Failure{ + Failure: fmt.Sprintf("Method '%s' differs only by capitalization to %s '%s' in %s", id.Name, kind, refMethod.id.Name, fileName), + Confidence: 1, + Node: id, + Category: "naming", + }) + + return + } + } else { + pkgm.methods[holder] = make(map[string]*referenceMethod, 1) + } + + // update the black list + if pkgm.methods[holder] == nil { + println("no entry for '", holder, "'") + } + pkgm.methods[holder][name] = &referenceMethod{fileName: w.fileName, id: id} +} + +type lintConfusingNames struct { + fileName string + pkgm pkgMethods + onFailure func(lint.Failure) +} + +const defaultStructName = "_" // used to map functions + +//getStructName of a function receiver. Defaults to defaultStructName +func getStructName(r *ast.FieldList) string { + result := defaultStructName + + if r == nil || len(r.List) < 1 { + return result + } + + t := r.List[0].Type + + if p, _ := t.(*ast.StarExpr); p != nil { // if a pointer receiver => dereference pointer receiver types + t = p.X + } + + if p, _ := t.(*ast.Ident); p != nil { + result = p.Name + } + + return result +} + +func checkStructFields(fields *ast.FieldList, structName string, w *lintConfusingNames) { + bl := make(map[string]bool, len(fields.List)) + for _, f := range fields.List { + for _, id := range f.Names { + normName := strings.ToUpper(id.Name) + if bl[normName] { + w.onFailure(lint.Failure{ + Failure: fmt.Sprintf("Field '%s' differs only by capitalization to other field in the struct type %s", id.Name, structName), + Confidence: 1, + Node: id, + Category: "naming", + }) + } else { + bl[normName] = true + } + } + } +} + +func (w *lintConfusingNames) Visit(n ast.Node) ast.Visitor { + switch v := n.(type) { + case *ast.FuncDecl: + // Exclude naming warnings for functions that are exported to C but + // not exported in the Go API. + // See https://github.com/golang/lint/issues/144. + if ast.IsExported(v.Name.Name) || !isCgoExported(v) { + checkMethodName(getStructName(v.Recv), v.Name, w) + } + case *ast.TypeSpec: + if s, ok := v.Type.(*ast.StructType); ok { + checkStructFields(s.Fields, v.Name.Name, w) + } + + default: + // will add other checks like field names, struct names, etc. + } + + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/confusing-results.go b/vendor/github.com/mgechev/revive/rule/confusing-results.go new file mode 100644 index 000000000..1d386b3db --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/confusing-results.go @@ -0,0 +1,67 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// ConfusingResultsRule lints given function declarations +type ConfusingResultsRule struct{} + +// Apply applies the rule to given file. +func (r *ConfusingResultsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := lintConfusingResults{ + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *ConfusingResultsRule) Name() string { + return "confusing-results" +} + +type lintConfusingResults struct { + onFailure func(lint.Failure) +} + +func (w lintConfusingResults) Visit(n ast.Node) ast.Visitor { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Type.Results == nil || len(fn.Type.Results.List) < 2 { + return w + } + lastType := "" + for _, result := range fn.Type.Results.List { + if len(result.Names) > 0 { + return w + } + + t, ok := result.Type.(*ast.Ident) + if !ok { + return w + } + + if t.Name == lastType { + w.onFailure(lint.Failure{ + Node: n, + Confidence: 1, + Category: "naming", + Failure: "unnamed results of the same type may be confusing, consider using named results", + }) + break + } + lastType = t.Name + + } + + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go b/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go new file mode 100644 index 000000000..6a9156111 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go @@ -0,0 +1,88 @@ +package rule + +import ( + "github.com/mgechev/revive/lint" + "go/ast" + "go/token" +) + +// ConstantLogicalExprRule warns on constant logical expressions. +type ConstantLogicalExprRule struct{} + +// Apply applies the rule to given file. +func (r *ConstantLogicalExprRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + astFile := file.AST + w := &lintConstantLogicalExpr{astFile, onFailure} + ast.Walk(w, astFile) + return failures +} + +// Name returns the rule name. +func (r *ConstantLogicalExprRule) Name() string { + return "constant-logical-expr" +} + +type lintConstantLogicalExpr struct { + file *ast.File + onFailure func(lint.Failure) +} + +func (w *lintConstantLogicalExpr) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.BinaryExpr: + if !w.isOperatorWithLogicalResult(n.Op) { + return w + } + + if gofmt(n.X) != gofmt(n.Y) { // check if subexpressions are the same + return w + } + + if n.Op == token.EQL { + w.newFailure(n, "expression always evaluates to true") + return w + } + + if w.isInequalityOperator(n.Op) { + w.newFailure(n, "expression always evaluates to false") + return w + } + + w.newFailure(n, "left and right hand-side sub-expressions are the same") + } + + return w +} + +func (w *lintConstantLogicalExpr) isOperatorWithLogicalResult(t token.Token) bool { + switch t { + case token.LAND, token.LOR, token.EQL, token.LSS, token.GTR, token.NEQ, token.LEQ, token.GEQ: + return true + } + + return false +} + +func (w *lintConstantLogicalExpr) isInequalityOperator(t token.Token) bool { + switch t { + case token.LSS, token.GTR, token.NEQ, token.LEQ, token.GEQ: + return true + } + + return false +} + +func (w lintConstantLogicalExpr) newFailure(node ast.Node, msg string) { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: node, + Category: "logic", + Failure: msg, + }) +} diff --git a/vendor/github.com/mgechev/revive/rule/context-as-argument.go b/vendor/github.com/mgechev/revive/rule/context-as-argument.go new file mode 100644 index 000000000..6502a07be --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/context-as-argument.go @@ -0,0 +1,63 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// ContextAsArgumentRule lints given else constructs. +type ContextAsArgumentRule struct{} + +// Apply applies the rule to given file. +func (r *ContextAsArgumentRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := lintContextArguments{ + file: file, + fileAst: fileAst, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *ContextAsArgumentRule) Name() string { + return "context-as-argument" +} + +type lintContextArguments struct { + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) +} + +func (w lintContextArguments) Visit(n ast.Node) ast.Visitor { + fn, ok := n.(*ast.FuncDecl) + if !ok || len(fn.Type.Params.List) <= 1 { + return w + } + // A context.Context should be the first parameter of a function. + // Flag any that show up after the first. + previousArgIsCtx := isPkgDot(fn.Type.Params.List[0].Type, "context", "Context") + for _, arg := range fn.Type.Params.List[1:] { + argIsCtx := isPkgDot(arg.Type, "context", "Context") + if argIsCtx && !previousArgIsCtx { + w.onFailure(lint.Failure{ + Node: arg, + Category: "arg-order", + Failure: "context.Context should be the first parameter of a function", + Confidence: 0.9, + }) + break // only flag one + } + previousArgIsCtx = argIsCtx + } + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/context-keys-type.go b/vendor/github.com/mgechev/revive/rule/context-keys-type.go new file mode 100644 index 000000000..9c2f0bbd7 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/context-keys-type.go @@ -0,0 +1,81 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/types" + + "github.com/mgechev/revive/lint" +) + +// ContextKeysType lints given else constructs. +type ContextKeysType struct{} + +// Apply applies the rule to given file. +func (r *ContextKeysType) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := lintContextKeyTypes{ + file: file, + fileAst: fileAst, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + file.Pkg.TypeCheck() + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *ContextKeysType) Name() string { + return "context-keys-type" +} + +type lintContextKeyTypes struct { + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) +} + +func (w lintContextKeyTypes) Visit(n ast.Node) ast.Visitor { + switch n := n.(type) { + case *ast.CallExpr: + checkContextKeyType(w, n) + } + + return w +} + +func checkContextKeyType(w lintContextKeyTypes, x *ast.CallExpr) { + f := w.file + sel, ok := x.Fun.(*ast.SelectorExpr) + if !ok { + return + } + pkg, ok := sel.X.(*ast.Ident) + if !ok || pkg.Name != "context" { + return + } + if sel.Sel.Name != "WithValue" { + return + } + + // key is second argument to context.WithValue + if len(x.Args) != 3 { + return + } + key := f.Pkg.TypesInfo.Types[x.Args[1]] + + if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: x, + Category: "content", + Failure: fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type), + }) + } +} diff --git a/vendor/github.com/mgechev/revive/rule/cyclomatic.go b/vendor/github.com/mgechev/revive/rule/cyclomatic.go new file mode 100644 index 000000000..f3af2900e --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/cyclomatic.go @@ -0,0 +1,118 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// Based on https://github.com/fzipp/gocyclo + +// CyclomaticRule lints given else constructs. +type CyclomaticRule struct{} + +// Apply applies the rule to given file. +func (r *CyclomaticRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + var failures []lint.Failure + + if len(arguments) == 0 { + panic("not enough arguments for " + r.Name()) + } + complexity, ok := arguments[0].(int64) // Alt. non panicking version + if !ok { + panic("invalid argument for cyclomatic complexity") + } + + fileAst := file.AST + walker := lintCyclomatic{ + file: file, + complexity: int(complexity), + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *CyclomaticRule) Name() string { + return "cyclomatic" +} + +type lintCyclomatic struct { + file *lint.File + complexity int + onFailure func(lint.Failure) +} + +func (w lintCyclomatic) Visit(_ ast.Node) ast.Visitor { + f := w.file + for _, decl := range f.AST.Decls { + if fn, ok := decl.(*ast.FuncDecl); ok { + c := complexity(fn) + if c > w.complexity { + w.onFailure(lint.Failure{ + Confidence: 1, + Category: "maintenance", + Failure: fmt.Sprintf("function %s has cyclomatic complexity %d", funcName(fn), c), + Node: fn, + }) + } + } + } + return nil +} + +// funcName returns the name representation of a function or method: +// "(Type).Name" for methods or simply "Name" for functions. +func funcName(fn *ast.FuncDecl) string { + if fn.Recv != nil { + if fn.Recv.NumFields() > 0 { + typ := fn.Recv.List[0].Type + return fmt.Sprintf("(%s).%s", recvString(typ), fn.Name) + } + } + return fn.Name.Name +} + +// recvString returns a string representation of recv of the +// form "T", "*T", or "BADRECV" (if not a proper receiver type). +func recvString(recv ast.Expr) string { + switch t := recv.(type) { + case *ast.Ident: + return t.Name + case *ast.StarExpr: + return "*" + recvString(t.X) + } + return "BADRECV" +} + +// complexity calculates the cyclomatic complexity of a function. +func complexity(fn *ast.FuncDecl) int { + v := complexityVisitor{} + ast.Walk(&v, fn) + return v.Complexity +} + +type complexityVisitor struct { + // Complexity is the cyclomatic complexity + Complexity int +} + +// Visit implements the ast.Visitor interface. +func (v *complexityVisitor) Visit(n ast.Node) ast.Visitor { + switch n := n.(type) { + case *ast.FuncDecl, *ast.IfStmt, *ast.ForStmt, *ast.RangeStmt, *ast.CaseClause, *ast.CommClause: + v.Complexity++ + case *ast.BinaryExpr: + if n.Op == token.LAND || n.Op == token.LOR { + v.Complexity++ + } + } + return v +} diff --git a/vendor/github.com/mgechev/revive/rule/deep-exit.go b/vendor/github.com/mgechev/revive/rule/deep-exit.go new file mode 100644 index 000000000..0cdec005a --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/deep-exit.go @@ -0,0 +1,94 @@ +package rule + +import ( + "fmt" + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// DeepExitRule lints program exit at functions other than main or init. +type DeepExitRule struct{} + +// Apply applies the rule to given file. +func (r *DeepExitRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + var exitFunctions = map[string]map[string]bool{ + "os": {"Exit": true}, + "syscall": {"Exit": true}, + "log": { + "Fatal": true, + "Fatalf": true, + "Fatalln": true, + "Panic": true, + "Panicf": true, + "Panicln": true, + }, + } + + w := lintDeepExit{onFailure, exitFunctions, file.IsTest()} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *DeepExitRule) Name() string { + return "deep-exit" +} + +type lintDeepExit struct { + onFailure func(lint.Failure) + exitFunctions map[string]map[string]bool + isTestFile bool +} + +func (w lintDeepExit) Visit(node ast.Node) ast.Visitor { + if fd, ok := node.(*ast.FuncDecl); ok { + if w.mustIgnore(fd) { + return nil // skip analysis of this function + } + + return w + } + + se, ok := node.(*ast.ExprStmt) + if !ok { + return w + } + ce, ok := se.X.(*ast.CallExpr) + if !ok { + return w + } + + fc, ok := ce.Fun.(*ast.SelectorExpr) + if !ok { + return w + } + id, ok := fc.X.(*ast.Ident) + if !ok { + return w + } + + fn := fc.Sel.Name + pkg := id.Name + if w.exitFunctions[pkg] != nil && w.exitFunctions[pkg][fn] { // it's a call to an exit function + w.onFailure(lint.Failure{ + Confidence: 1, + Node: ce, + Category: "bad practice", + Failure: fmt.Sprintf("calls to %s.%s only in main() or init() functions", pkg, fn), + }) + } + + return w +} + +func (w *lintDeepExit) mustIgnore(fd *ast.FuncDecl) bool { + fn := fd.Name.Name + + return fn == "init" || fn == "main" || (w.isTestFile && fn == "TestMain") +} diff --git a/vendor/github.com/mgechev/revive/rule/defer.go b/vendor/github.com/mgechev/revive/rule/defer.go new file mode 100644 index 000000000..2ec7ef47c --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/defer.go @@ -0,0 +1,137 @@ +package rule + +import ( + "fmt" + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// DeferRule lints unused params in functions. +type DeferRule struct{} + +// Apply applies the rule to given file. +func (r *DeferRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + allow := r.allowFromArgs(arguments) + + var failures []lint.Failure + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintDeferRule{onFailure: onFailure, allow: allow} + + ast.Walk(w, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *DeferRule) Name() string { + return "defer" +} + +func (r *DeferRule) allowFromArgs(args lint.Arguments) map[string]bool { + if len(args) < 1 { + allow := map[string]bool{ + "loop": true, + "call-chain": true, + "method-call": true, + "return": true, + "recover": true, + } + + return allow + } + + aa, ok := args[0].([]interface{}) + if !ok { + panic(fmt.Sprintf("Invalid argument '%v' for 'defer' rule. Expecting []string, got %T", args[0], args[0])) + } + + allow := make(map[string]bool, len(aa)) + for _, subcase := range aa { + sc, ok := subcase.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument '%v' for 'defer' rule. Expecting string, got %T", subcase, subcase)) + } + allow[sc] = true + } + + return allow +} + +type lintDeferRule struct { + onFailure func(lint.Failure) + inALoop bool + inADefer bool + inAFuncLit bool + allow map[string]bool +} + +func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.ForStmt: + w.visitSubtree(n.Body, w.inADefer, true, w.inAFuncLit) + return nil + case *ast.RangeStmt: + w.visitSubtree(n.Body, w.inADefer, true, w.inAFuncLit) + return nil + case *ast.FuncLit: + w.visitSubtree(n.Body, w.inADefer, false, true) + return nil + case *ast.ReturnStmt: + if len(n.Results) != 0 && w.inADefer && w.inAFuncLit { + w.newFailure("return in a defer function has no effect", n, 1.0, "logic", "return") + } + case *ast.CallExpr: + if isIdent(n.Fun, "recover") && !w.inADefer { + // confidence is not 1 because recover can be in a function that is deferred elsewhere + w.newFailure("recover must be called inside a deferred function", n, 0.8, "logic", "recover") + } + case *ast.DeferStmt: + w.visitSubtree(n.Call.Fun, true, false, false) + + if w.inALoop { + w.newFailure("prefer not to defer inside loops", n, 1.0, "bad practice", "loop") + } + + switch fn := n.Call.Fun.(type) { + case *ast.CallExpr: + w.newFailure("prefer not to defer chains of function calls", fn, 1.0, "bad practice", "call-chain") + case *ast.SelectorExpr: + if id, ok := fn.X.(*ast.Ident); ok { + isMethodCall := id != nil && id.Obj != nil && id.Obj.Kind == ast.Typ + if isMethodCall { + w.newFailure("be careful when deferring calls to methods without pointer receiver", fn, 0.8, "bad practice", "method-call") + } + } + } + return nil + } + + return w +} + +func (w lintDeferRule) visitSubtree(n ast.Node, inADefer, inALoop, inAFuncLit bool) { + nw := &lintDeferRule{ + onFailure: w.onFailure, + inADefer: inADefer, + inALoop: inALoop, + inAFuncLit: inAFuncLit, + allow: w.allow} + ast.Walk(nw, n) +} + +func (w lintDeferRule) newFailure(msg string, node ast.Node, confidence float64, cat string, subcase string) { + if !w.allow[subcase] { + return + } + + w.onFailure(lint.Failure{ + Confidence: confidence, + Node: node, + Category: cat, + Failure: msg, + }) +} diff --git a/vendor/github.com/mgechev/revive/rule/dot-imports.go b/vendor/github.com/mgechev/revive/rule/dot-imports.go new file mode 100644 index 000000000..78419d7d6 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/dot-imports.go @@ -0,0 +1,54 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// DotImportsRule lints given else constructs. +type DotImportsRule struct{} + +// Apply applies the rule to given file. +func (r *DotImportsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := lintImports{ + file: file, + fileAst: fileAst, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *DotImportsRule) Name() string { + return "dot-imports" +} + +type lintImports struct { + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) +} + +func (w lintImports) Visit(_ ast.Node) ast.Visitor { + for i, is := range w.fileAst.Imports { + _ = i + if is.Name != nil && is.Name.Name == "." && !w.file.IsTest() { + w.onFailure(lint.Failure{ + Confidence: 1, + Failure: "should not use dot imports", + Node: is, + Category: "imports", + }) + } + } + return nil +} diff --git a/vendor/github.com/mgechev/revive/rule/duplicated-imports.go b/vendor/github.com/mgechev/revive/rule/duplicated-imports.go new file mode 100644 index 000000000..485b6a2ea --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/duplicated-imports.go @@ -0,0 +1,39 @@ +package rule + +import ( + "fmt" + + "github.com/mgechev/revive/lint" +) + +// DuplicatedImportsRule lints given else constructs. +type DuplicatedImportsRule struct{} + +// Apply applies the rule to given file. +func (r *DuplicatedImportsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + impPaths := map[string]struct{}{} + for _, imp := range file.AST.Imports { + path := imp.Path.Value + _, ok := impPaths[path] + if ok { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("Package %s already imported", path), + Node: imp, + Category: "imports", + }) + continue + } + + impPaths[path] = struct{}{} + } + + return failures +} + +// Name returns the rule name. +func (r *DuplicatedImportsRule) Name() string { + return "duplicated-imports" +} diff --git a/vendor/github.com/mgechev/revive/rule/early-return.go b/vendor/github.com/mgechev/revive/rule/early-return.go new file mode 100644 index 000000000..ffb568a86 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/early-return.go @@ -0,0 +1,78 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// EarlyReturnRule lints given else constructs. +type EarlyReturnRule struct{} + +// Apply applies the rule to given file. +func (r *EarlyReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintEarlyReturnRule{onFailure: onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *EarlyReturnRule) Name() string { + return "early-return" +} + +type lintEarlyReturnRule struct { + onFailure func(lint.Failure) +} + +func (w lintEarlyReturnRule) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.IfStmt: + if n.Else == nil { + // no else branch + return w + } + + elseBlock, ok := n.Else.(*ast.BlockStmt) + if !ok { + // is if-else-if + return w + } + + lenElseBlock := len(elseBlock.List) + if lenElseBlock < 1 { + // empty else block, continue (there is another rule that warns on empty blocks) + return w + } + + lenThenBlock := len(n.Body.List) + if lenThenBlock < 1 { + // then block is empty thus the stmt can be simplified + w.onFailure(lint.Failure{ + Confidence: 1, + Node: n, + Failure: "if c { } else {... return} can be simplified to if !c { ... return }", + }) + + return w + } + + _, lastThenStmtIsReturn := n.Body.List[lenThenBlock-1].(*ast.ReturnStmt) + _, lastElseStmtIsReturn := elseBlock.List[lenElseBlock-1].(*ast.ReturnStmt) + if lastElseStmtIsReturn && !lastThenStmtIsReturn { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: n, + Failure: "if c {...} else {... return } can be simplified to if !c { ... return } ...", + }) + } + } + + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/empty-block.go b/vendor/github.com/mgechev/revive/rule/empty-block.go new file mode 100644 index 000000000..fbec4d93c --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/empty-block.go @@ -0,0 +1,65 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// EmptyBlockRule lints given else constructs. +type EmptyBlockRule struct{} + +// Apply applies the rule to given file. +func (r *EmptyBlockRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintEmptyBlock{make(map[*ast.BlockStmt]bool, 0), onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *EmptyBlockRule) Name() string { + return "empty-block" +} + +type lintEmptyBlock struct { + ignore map[*ast.BlockStmt]bool + onFailure func(lint.Failure) +} + +func (w lintEmptyBlock) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl: + w.ignore[n.Body] = true + return w + case *ast.FuncLit: + w.ignore[n.Body] = true + return w + case *ast.RangeStmt: + if len(n.Body.List) == 0 { + w.onFailure(lint.Failure{ + Confidence: 0.9, + Node: n, + Category: "logic", + Failure: "this block is empty, you can remove it", + }) + return nil // skip visiting the range subtree (it will produce a duplicated failure) + } + case *ast.BlockStmt: + if !w.ignore[n] && len(n.List) == 0 { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: n, + Category: "logic", + Failure: "this block is empty, you can remove it", + }) + } + } + + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/empty-lines.go b/vendor/github.com/mgechev/revive/rule/empty-lines.go new file mode 100644 index 000000000..61d9281bf --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/empty-lines.go @@ -0,0 +1,113 @@ +package rule + +import ( + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// EmptyLinesRule lints empty lines in blocks. +type EmptyLinesRule struct{} + +// Apply applies the rule to given file. +func (r *EmptyLinesRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintEmptyLines{file, file.CommentMap(), onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *EmptyLinesRule) Name() string { + return "empty-lines" +} + +type lintEmptyLines struct { + file *lint.File + cmap ast.CommentMap + onFailure func(lint.Failure) +} + +func (w lintEmptyLines) Visit(node ast.Node) ast.Visitor { + block, ok := node.(*ast.BlockStmt) + if !ok { + return w + } + + w.checkStart(block) + w.checkEnd(block) + + return w +} + +func (w lintEmptyLines) checkStart(block *ast.BlockStmt) { + if len(block.List) == 0 { + return + } + + start := w.position(block.Lbrace) + firstNode := block.List[0] + + if w.commentBetween(start, firstNode) { + return + } + + first := w.position(firstNode.Pos()) + if first.Line-start.Line > 1 { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: block, + Category: "style", + Failure: "extra empty line at the start of a block", + }) + } +} + +func (w lintEmptyLines) checkEnd(block *ast.BlockStmt) { + if len(block.List) < 1 { + return + } + + end := w.position(block.Rbrace) + lastNode := block.List[len(block.List)-1] + + if w.commentBetween(end, lastNode) { + return + } + + last := w.position(lastNode.End()) + if end.Line-last.Line > 1 { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: lastNode, + Category: "style", + Failure: "extra empty line at the end of a block", + }) + } +} + +func (w lintEmptyLines) commentBetween(position token.Position, node ast.Node) bool { + comments := w.cmap.Filter(node).Comments() + if len(comments) == 0 { + return false + } + + for _, comment := range comments { + start, end := w.position(comment.Pos()), w.position(comment.End()) + if start.Line-position.Line == 1 || position.Line-end.Line == 1 { + return true + } + } + + return false +} + +func (w lintEmptyLines) position(pos token.Pos) token.Position { + return w.file.ToPosition(pos) +} diff --git a/vendor/github.com/mgechev/revive/rule/error-naming.go b/vendor/github.com/mgechev/revive/rule/error-naming.go new file mode 100644 index 000000000..3a1080625 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/error-naming.go @@ -0,0 +1,79 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + + "github.com/mgechev/revive/lint" +) + +// ErrorNamingRule lints given else constructs. +type ErrorNamingRule struct{} + +// Apply applies the rule to given file. +func (r *ErrorNamingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := lintErrors{ + file: file, + fileAst: fileAst, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *ErrorNamingRule) Name() string { + return "error-naming" +} + +type lintErrors struct { + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) +} + +func (w lintErrors) Visit(_ ast.Node) ast.Visitor { + for _, decl := range w.fileAst.Decls { + gd, ok := decl.(*ast.GenDecl) + if !ok || gd.Tok != token.VAR { + continue + } + for _, spec := range gd.Specs { + spec := spec.(*ast.ValueSpec) + if len(spec.Names) != 1 || len(spec.Values) != 1 { + continue + } + ce, ok := spec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + continue + } + + id := spec.Names[0] + prefix := "err" + if id.IsExported() { + prefix = "Err" + } + if !strings.HasPrefix(id.Name, prefix) { + w.onFailure(lint.Failure{ + Node: id, + Confidence: 0.9, + Category: "naming", + Failure: fmt.Sprintf("error var %s should have name of the form %sFoo", id.Name, prefix), + }) + } + } + } + return nil +} diff --git a/vendor/github.com/mgechev/revive/rule/error-return.go b/vendor/github.com/mgechev/revive/rule/error-return.go new file mode 100644 index 000000000..737d8c66f --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/error-return.go @@ -0,0 +1,67 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// ErrorReturnRule lints given else constructs. +type ErrorReturnRule struct{} + +// Apply applies the rule to given file. +func (r *ErrorReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := lintErrorReturn{ + file: file, + fileAst: fileAst, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *ErrorReturnRule) Name() string { + return "error-return" +} + +type lintErrorReturn struct { + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) +} + +func (w lintErrorReturn) Visit(n ast.Node) ast.Visitor { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Type.Results == nil { + return w + } + ret := fn.Type.Results.List + if len(ret) <= 1 { + return w + } + if isIdent(ret[len(ret)-1].Type, "error") { + return nil + } + // An error return parameter should be the last parameter. + // Flag any error parameters found before the last. + for _, r := range ret[:len(ret)-1] { + if isIdent(r.Type, "error") { + w.onFailure(lint.Failure{ + Category: "arg-order", + Confidence: 0.9, + Node: fn, + Failure: "error should be the last type when returning multiple items", + }) + break // only flag one + } + } + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/error-strings.go b/vendor/github.com/mgechev/revive/rule/error-strings.go new file mode 100644 index 000000000..b8a5b7ed7 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/error-strings.go @@ -0,0 +1,98 @@ +package rule + +import ( + "go/ast" + "go/token" + "strconv" + "unicode" + "unicode/utf8" + + "github.com/mgechev/revive/lint" +) + +// ErrorStringsRule lints given else constructs. +type ErrorStringsRule struct{} + +// Apply applies the rule to given file. +func (r *ErrorStringsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := lintErrorStrings{ + file: file, + fileAst: fileAst, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *ErrorStringsRule) Name() string { + return "error-strings" +} + +type lintErrorStrings struct { + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) +} + +func (w lintErrorStrings) Visit(n ast.Node) ast.Visitor { + ce, ok := n.(*ast.CallExpr) + if !ok { + return w + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + return w + } + if len(ce.Args) < 1 { + return w + } + str, ok := ce.Args[0].(*ast.BasicLit) + if !ok || str.Kind != token.STRING { + return w + } + s, _ := strconv.Unquote(str.Value) // can assume well-formed Go + if s == "" { + return w + } + clean, conf := lintErrorString(s) + if clean { + return w + } + + w.onFailure(lint.Failure{ + Node: str, + Confidence: conf, + Category: "errors", + Failure: "error strings should not be capitalized or end with punctuation or a newline", + }) + return w +} + +func lintErrorString(s string) (isClean bool, conf float64) { + const basicConfidence = 0.8 + const capConfidence = basicConfidence - 0.2 + first, firstN := utf8.DecodeRuneInString(s) + last, _ := utf8.DecodeLastRuneInString(s) + if last == '.' || last == ':' || last == '!' || last == '\n' { + return false, basicConfidence + } + if unicode.IsUpper(first) { + // People use proper nouns and exported Go identifiers in error strings, + // so decrease the confidence of warnings for capitalization. + if len(s) <= firstN { + return false, capConfidence + } + // Flag strings starting with something that doesn't look like an initialism. + if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) { + return false, capConfidence + } + } + return true, 0 +} diff --git a/vendor/github.com/mgechev/revive/rule/errorf.go b/vendor/github.com/mgechev/revive/rule/errorf.go new file mode 100644 index 000000000..1bffbab5b --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/errorf.go @@ -0,0 +1,93 @@ +package rule + +import ( + "fmt" + "go/ast" + "regexp" + "strings" + + "github.com/mgechev/revive/lint" +) + +// ErrorfRule lints given else constructs. +type ErrorfRule struct{} + +// Apply applies the rule to given file. +func (r *ErrorfRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := lintErrorf{ + file: file, + fileAst: fileAst, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + file.Pkg.TypeCheck() + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *ErrorfRule) Name() string { + return "errorf" +} + +type lintErrorf struct { + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) +} + +func (w lintErrorf) Visit(n ast.Node) ast.Visitor { + ce, ok := n.(*ast.CallExpr) + if !ok || len(ce.Args) != 1 { + return w + } + isErrorsNew := isPkgDot(ce.Fun, "errors", "New") + var isTestingError bool + se, ok := ce.Fun.(*ast.SelectorExpr) + if ok && se.Sel.Name == "Error" { + if typ := w.file.Pkg.TypeOf(se.X); typ != nil { + isTestingError = typ.String() == "*testing.T" + } + } + if !isErrorsNew && !isTestingError { + return w + } + arg := ce.Args[0] + ce, ok = arg.(*ast.CallExpr) + if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") { + return w + } + errorfPrefix := "fmt" + if isTestingError { + errorfPrefix = w.file.Render(se.X) + } + + failure := lint.Failure{ + Category: "errors", + Node: n, + Confidence: 1, + Failure: fmt.Sprintf("should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", w.file.Render(se), errorfPrefix), + } + + m := srcLineWithMatch(w.file, ce, `^(.*)`+w.file.Render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`) + if m != nil { + failure.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3] + } + + w.onFailure(failure) + + return w +} + +func srcLineWithMatch(file *lint.File, node ast.Node, pattern string) (m []string) { + line := srcLine(file.Content(), file.ToPosition(node.Pos())) + line = strings.TrimSuffix(line, "\n") + rx := regexp.MustCompile(pattern) + return rx.FindStringSubmatch(line) +} diff --git a/vendor/github.com/mgechev/revive/rule/exported.go b/vendor/github.com/mgechev/revive/rule/exported.go new file mode 100644 index 000000000..b68f2bacc --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/exported.go @@ -0,0 +1,272 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + "unicode" + "unicode/utf8" + + "github.com/mgechev/revive/lint" +) + +// ExportedRule lints given else constructs. +type ExportedRule struct{} + +// Apply applies the rule to given file. +func (r *ExportedRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + if isTest(file) { + return failures + } + + fileAst := file.AST + walker := lintExported{ + file: file, + fileAst: fileAst, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + genDeclMissingComments: make(map[*ast.GenDecl]bool), + } + + ast.Walk(&walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *ExportedRule) Name() string { + return "exported" +} + +type lintExported struct { + file *lint.File + fileAst *ast.File + lastGen *ast.GenDecl + genDeclMissingComments map[*ast.GenDecl]bool + onFailure func(lint.Failure) +} + +func (w *lintExported) lintFuncDoc(fn *ast.FuncDecl) { + if !ast.IsExported(fn.Name.Name) { + // func is unexported + return + } + kind := "function" + name := fn.Name.Name + if fn.Recv != nil && len(fn.Recv.List) > 0 { + // method + kind = "method" + recv := receiverType(fn) + if !ast.IsExported(recv) { + // receiver is unexported + return + } + if commonMethods[name] { + return + } + switch name { + case "Len", "Less", "Swap": + if w.file.Pkg.Sortable[recv] { + return + } + } + name = recv + "." + name + } + if fn.Doc == nil { + w.onFailure(lint.Failure{ + Node: fn, + Confidence: 1, + Category: "comments", + Failure: fmt.Sprintf("exported %s %s should have comment or be unexported", kind, name), + }) + return + } + s := normalizeText(fn.Doc.Text()) + prefix := fn.Name.Name + " " + if !strings.HasPrefix(s, prefix) { + w.onFailure(lint.Failure{ + Node: fn.Doc, + Confidence: 0.8, + Category: "comments", + Failure: fmt.Sprintf(`comment on exported %s %s should be of the form "%s..."`, kind, name, prefix), + }) + } +} + +func (w *lintExported) checkStutter(id *ast.Ident, thing string) { + pkg, name := w.fileAst.Name.Name, id.Name + if !ast.IsExported(name) { + // unexported name + return + } + // A name stutters if the package name is a strict prefix + // and the next character of the name starts a new word. + if len(name) <= len(pkg) { + // name is too short to stutter. + // This permits the name to be the same as the package name. + return + } + if !strings.EqualFold(pkg, name[:len(pkg)]) { + return + } + // We can assume the name is well-formed UTF-8. + // If the next rune after the package name is uppercase or an underscore + // the it's starting a new word and thus this name stutters. + rem := name[len(pkg):] + if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) { + w.onFailure(lint.Failure{ + Node: id, + Confidence: 0.8, + Category: "naming", + Failure: fmt.Sprintf("%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem), + }) + } +} + +func (w *lintExported) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { + if !ast.IsExported(t.Name.Name) { + return + } + if doc == nil { + w.onFailure(lint.Failure{ + Node: t, + Confidence: 1, + Category: "comments", + Failure: fmt.Sprintf("exported type %v should have comment or be unexported", t.Name), + }) + return + } + + s := normalizeText(doc.Text()) + articles := [...]string{"A", "An", "The", "This"} + for _, a := range articles { + if t.Name.Name == a { + continue + } + if strings.HasPrefix(s, a+" ") { + s = s[len(a)+1:] + break + } + } + if !strings.HasPrefix(s, t.Name.Name+" ") { + w.onFailure(lint.Failure{ + Node: doc, + Confidence: 1, + Category: "comments", + Failure: fmt.Sprintf(`comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name), + }) + } +} + +func (w *lintExported) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) { + kind := "var" + if gd.Tok == token.CONST { + kind = "const" + } + + if len(vs.Names) > 1 { + // Check that none are exported except for the first. + for _, n := range vs.Names[1:] { + if ast.IsExported(n.Name) { + w.onFailure(lint.Failure{ + Category: "comments", + Confidence: 1, + Failure: fmt.Sprintf("exported %s %s should have its own declaration", kind, n.Name), + Node: vs, + }) + return + } + } + } + + // Only one name. + name := vs.Names[0].Name + if !ast.IsExported(name) { + return + } + + if vs.Doc == nil && gd.Doc == nil { + if genDeclMissingComments[gd] { + return + } + block := "" + if kind == "const" && gd.Lparen.IsValid() { + block = " (or a comment on this block)" + } + w.onFailure(lint.Failure{ + Confidence: 1, + Node: vs, + Category: "comments", + Failure: fmt.Sprintf("exported %s %s should have comment%s or be unexported", kind, name, block), + }) + genDeclMissingComments[gd] = true + return + } + // If this GenDecl has parens and a comment, we don't check its comment form. + if gd.Lparen.IsValid() && gd.Doc != nil { + return + } + // The relevant text to check will be on either vs.Doc or gd.Doc. + // Use vs.Doc preferentially. + doc := vs.Doc + if doc == nil { + doc = gd.Doc + } + prefix := name + " " + s := normalizeText(doc.Text()) + if !strings.HasPrefix(s, prefix) { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: doc, + Category: "comments", + Failure: fmt.Sprintf(`comment on exported %s %s should be of the form "%s..."`, kind, name, prefix), + }) + } +} + +// normalizeText is a helper function that normalizes comment strings by: +// * removing one leading space +// +// This function is needed because ast.CommentGroup.Text() does not handle //-style and /*-style comments uniformly +func normalizeText(t string) string { + return strings.TrimPrefix(t, " ") +} + +func (w *lintExported) Visit(n ast.Node) ast.Visitor { + switch v := n.(type) { + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return nil + } + // token.CONST, token.TYPE or token.VAR + w.lastGen = v + return w + case *ast.FuncDecl: + w.lintFuncDoc(v) + if v.Recv == nil { + // Only check for stutter on functions, not methods. + // Method names are not used package-qualified. + w.checkStutter(v.Name, "func") + } + // Don't proceed inside funcs. + return nil + case *ast.TypeSpec: + // inside a GenDecl, which usually has the doc + doc := v.Doc + if doc == nil { + doc = w.lastGen.Doc + } + w.lintTypeDoc(v, doc) + w.checkStutter(v.Name, "type") + // Don't proceed inside types. + return nil + case *ast.ValueSpec: + w.lintValueSpecDoc(v, w.lastGen, w.genDeclMissingComments) + return nil + } + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/file-header.go b/vendor/github.com/mgechev/revive/rule/file-header.go new file mode 100644 index 000000000..6df974e91 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/file-header.go @@ -0,0 +1,69 @@ +package rule + +import ( + "regexp" + + "github.com/mgechev/revive/lint" +) + +// FileHeaderRule lints given else constructs. +type FileHeaderRule struct{} + +var ( + multiRegexp = regexp.MustCompile("^/\\*") + singleRegexp = regexp.MustCompile("^//") +) + +// Apply applies the rule to given file. +func (r *FileHeaderRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + if len(arguments) != 1 { + panic(`invalid configuration for "file-header" rule`) + } + + header, ok := arguments[0].(string) + if !ok { + panic(`invalid argument for "file-header" rule: first argument should be a string`) + } + + failure := []lint.Failure{ + { + Node: file.AST, + Confidence: 1, + Failure: "the file doesn't have an appropriate header", + }, + } + + if len(file.AST.Comments) == 0 { + return failure + } + + g := file.AST.Comments[0] + if g == nil { + return failure + } + comment := "" + for _, c := range g.List { + text := c.Text + if multiRegexp.Match([]byte(text)) { + text = text[2 : len(text)-2] + } else if singleRegexp.Match([]byte(text)) { + text = text[2:] + } + comment += text + } + + regex, err := regexp.Compile(header) + if err != nil { + panic(err.Error()) + } + + if !regex.Match([]byte(comment)) { + return failure + } + return nil +} + +// Name returns the rule name. +func (r *FileHeaderRule) Name() string { + return "file-header" +} diff --git a/vendor/github.com/mgechev/revive/rule/flag-param.go b/vendor/github.com/mgechev/revive/rule/flag-param.go new file mode 100644 index 000000000..6cb6daea9 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/flag-param.go @@ -0,0 +1,104 @@ +package rule + +import ( + "fmt" + "github.com/mgechev/revive/lint" + "go/ast" +) + +// FlagParamRule lints given else constructs. +type FlagParamRule struct{} + +// Apply applies the rule to given file. +func (r *FlagParamRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintFlagParamRule{onFailure: onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *FlagParamRule) Name() string { + return "flag-parameter" +} + +type lintFlagParamRule struct { + onFailure func(lint.Failure) +} + +func (w lintFlagParamRule) Visit(node ast.Node) ast.Visitor { + fd, ok := node.(*ast.FuncDecl) + if !ok { + return w + } + + if fd.Body == nil { + return nil // skip whole function declaration + } + + for _, p := range fd.Type.Params.List { + t := p.Type + + id, ok := t.(*ast.Ident) + if !ok { + continue + } + + if id.Name != "bool" { + continue + } + + cv := conditionVisitor{p.Names, fd, w} + ast.Walk(cv, fd.Body) + } + + return w +} + +type conditionVisitor struct { + ids []*ast.Ident + fd *ast.FuncDecl + linter lintFlagParamRule +} + +func (w conditionVisitor) Visit(node ast.Node) ast.Visitor { + ifStmt, ok := node.(*ast.IfStmt) + if !ok { + return w + } + + fselect := func(n ast.Node) bool { + ident, ok := n.(*ast.Ident) + if !ok { + return false + } + + for _, id := range w.ids { + if ident.Name == id.Name { + return true + } + } + + return false + } + + uses := pick(ifStmt.Cond, fselect, nil) + + if len(uses) < 1 { + return w + } + + w.linter.onFailure(lint.Failure{ + Confidence: 1, + Node: w.fd.Type.Params, + Category: "bad practice", + Failure: fmt.Sprintf("parameter '%s' seems to be a control flag, avoid control coupling", uses[0]), + }) + + return nil +} diff --git a/vendor/github.com/mgechev/revive/rule/function-length.go b/vendor/github.com/mgechev/revive/rule/function-length.go new file mode 100644 index 000000000..e1cee21cf --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/function-length.go @@ -0,0 +1,153 @@ +package rule + +import ( + "fmt" + "go/ast" + "reflect" + + "github.com/mgechev/revive/lint" +) + +// FunctionLength lint. +type FunctionLength struct{} + +// Apply applies the rule to given file. +func (r *FunctionLength) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + maxStmt, maxLines := r.parseArguments(arguments) + + var failures []lint.Failure + + walker := lintFuncLength{ + file: file, + maxStmt: int(maxStmt), + maxLines: int(maxLines), + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *FunctionLength) Name() string { + return "function-length" +} + +func (r *FunctionLength) parseArguments(arguments lint.Arguments) (maxStmt int64, maxLines int64) { + if len(arguments) != 2 { + panic(fmt.Sprintf(`invalid configuration for "function-length" rule, expected 2 arguments but got %d`, len(arguments))) + } + + maxStmt, maxStmtOk := arguments[0].(int64) + if !maxStmtOk { + panic(fmt.Sprintf(`invalid configuration value for max statements in "function-length" rule; need int64 but got %T`, arguments[0])) + } + if maxStmt < 0 { + panic(fmt.Sprintf(`the configuration value for max statements in "function-length" rule cannot be negative, got %d`, maxStmt)) + } + + maxLines, maxLinesOk := arguments[1].(int64) + if !maxLinesOk { + panic(fmt.Sprintf(`invalid configuration value for max lines in "function-length" rule; need int64 but got %T`, arguments[1])) + } + if maxLines < 0 { + panic(fmt.Sprintf(`the configuration value for max statements in "function-length" rule cannot be negative, got %d`, maxLines)) + } + + return +} + +type lintFuncLength struct { + file *lint.File + maxStmt int + maxLines int + onFailure func(lint.Failure) +} + +func (w lintFuncLength) Visit(n ast.Node) ast.Visitor { + node, ok := n.(*ast.FuncDecl) + if !ok { + return w + } + + body := node.Body + if body == nil || len(node.Body.List) == 0 { + return nil + } + + if w.maxStmt > 0 { + stmtCount := w.countStmts(node.Body.List) + if stmtCount > w.maxStmt { + w.onFailure(lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("maximum number of statements per function exceeded; max %d but got %d", w.maxStmt, stmtCount), + Node: node, + }) + } + } + + if w.maxLines > 0 { + lineCount := w.countLines(node.Body) + if lineCount > w.maxLines { + w.onFailure(lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("maximum number of lines per function exceeded; max %d but got %d", w.maxLines, lineCount), + Node: node, + }) + } + } + + return nil +} + +func (w lintFuncLength) countLines(b *ast.BlockStmt) int { + return w.file.ToPosition(b.End()).Line - w.file.ToPosition(b.Pos()).Line - 1 +} + +func (w lintFuncLength) countStmts(b []ast.Stmt) int { + count := 0 + for _, s := range b { + switch stmt := s.(type) { + case *ast.BlockStmt: + count += w.countStmts(stmt.List) + case *ast.IfStmt: + count += 1 + w.countBodyListStmts(stmt) + if stmt.Else != nil { + elseBody, ok := stmt.Else.(*ast.BlockStmt) + if ok { + count += w.countStmts(elseBody.List) + } + } + case *ast.ForStmt, *ast.RangeStmt, + *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: + count += 1 + w.countBodyListStmts(stmt) + case *ast.CaseClause: + count += w.countStmts(stmt.Body) + case *ast.AssignStmt: + count += 1 + w.countFuncLitStmts(stmt.Rhs[0]) + case *ast.GoStmt: + count += 1 + w.countFuncLitStmts(stmt.Call.Fun) + case *ast.DeferStmt: + count += 1 + w.countFuncLitStmts(stmt.Call.Fun) + default: + count++ + } + } + + return count +} + +func (w lintFuncLength) countFuncLitStmts(stmt ast.Expr) int { + if block, ok := stmt.(*ast.FuncLit); ok { + return w.countStmts(block.Body.List) + } + return 0 +} + +func (w lintFuncLength) countBodyListStmts(t interface{}) int { + i := reflect.ValueOf(t).Elem().FieldByName(`Body`).Elem().FieldByName(`List`).Interface() + return w.countStmts(i.([]ast.Stmt)) +} diff --git a/vendor/github.com/mgechev/revive/rule/function-result-limit.go b/vendor/github.com/mgechev/revive/rule/function-result-limit.go new file mode 100644 index 000000000..1850fc419 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/function-result-limit.go @@ -0,0 +1,68 @@ +package rule + +import ( + "fmt" + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// FunctionResultsLimitRule lints given else constructs. +type FunctionResultsLimitRule struct{} + +// Apply applies the rule to given file. +func (r *FunctionResultsLimitRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + if len(arguments) != 1 { + panic(`invalid configuration for "function-result-limit"`) + } + + max, ok := arguments[0].(int64) // Alt. non panicking version + if !ok { + panic(fmt.Sprintf(`invalid value passed as return results number to the "function-result-limit" rule; need int64 but got %T`, arguments[0])) + } + if max < 0 { + panic(`the value passed as return results number to the "function-result-limit" rule cannot be negative`) + } + + var failures []lint.Failure + + walker := lintFunctionResultsNum{ + max: int(max), + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *FunctionResultsLimitRule) Name() string { + return "function-result-limit" +} + +type lintFunctionResultsNum struct { + max int + onFailure func(lint.Failure) +} + +func (w lintFunctionResultsNum) Visit(n ast.Node) ast.Visitor { + node, ok := n.(*ast.FuncDecl) + if ok { + num := 0 + if node.Type.Results != nil { + num = node.Type.Results.NumFields() + } + if num > w.max { + w.onFailure(lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("maximum number of return results per function exceeded; max %d but got %d", w.max, num), + Node: node.Type, + }) + return w + } + } + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/get-return.go b/vendor/github.com/mgechev/revive/rule/get-return.go new file mode 100644 index 000000000..494ab6669 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/get-return.go @@ -0,0 +1,70 @@ +package rule + +import ( + "fmt" + "go/ast" + "strings" + + "github.com/mgechev/revive/lint" +) + +// GetReturnRule lints given else constructs. +type GetReturnRule struct{} + +// Apply applies the rule to given file. +func (r *GetReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintReturnRule{onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *GetReturnRule) Name() string { + return "get-return" +} + +type lintReturnRule struct { + onFailure func(lint.Failure) +} + +func isGetter(name string) bool { + if strings.HasPrefix(strings.ToUpper(name), "GET") { + if len(name) > 3 { + c := name[3] + return !(c >= 'a' && c <= 'z') + } + } + + return false +} + +func hasResults(rs *ast.FieldList) bool { + return rs != nil && len(rs.List) > 0 +} + +func (w lintReturnRule) Visit(node ast.Node) ast.Visitor { + fd, ok := node.(*ast.FuncDecl) + if !ok { + return w + } + + if !isGetter(fd.Name.Name) { + return w + } + if !hasResults(fd.Type.Results) { + w.onFailure(lint.Failure{ + Confidence: 0.8, + Node: fd, + Category: "logic", + Failure: fmt.Sprintf("function '%s' seems to be a getter but it does not return any result", fd.Name.Name), + }) + } + + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/identical-branches.go b/vendor/github.com/mgechev/revive/rule/identical-branches.go new file mode 100644 index 000000000..094a79147 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/identical-branches.go @@ -0,0 +1,82 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// IdenticalBranchesRule warns on constant logical expressions. +type IdenticalBranchesRule struct{} + +// Apply applies the rule to given file. +func (r *IdenticalBranchesRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + astFile := file.AST + w := &lintIdenticalBranches{astFile, onFailure} + ast.Walk(w, astFile) + return failures +} + +// Name returns the rule name. +func (r *IdenticalBranchesRule) Name() string { + return "identical-branches" +} + +type lintIdenticalBranches struct { + file *ast.File + onFailure func(lint.Failure) +} + +func (w *lintIdenticalBranches) Visit(node ast.Node) ast.Visitor { + n, ok := node.(*ast.IfStmt) + if !ok { + return w + } + + if n.Else == nil { + return w + } + branches := []*ast.BlockStmt{n.Body} + + elseBranch, ok := n.Else.(*ast.BlockStmt) + if !ok { // if-else-if construction + return w + } + branches = append(branches, elseBranch) + + if w.identicalBranches(branches) { + w.newFailure(n, "both branches of the if are identical") + } + + return w +} + +func (w *lintIdenticalBranches) identicalBranches(branches []*ast.BlockStmt) bool { + if len(branches) < 2 { + return false + } + + ref := gofmt(branches[0]) + for i := 1; i < len(branches); i++ { + if gofmt(branches[i]) != ref { + return false + } + } + + return true +} + +func (w lintIdenticalBranches) newFailure(node ast.Node, msg string) { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: node, + Category: "logic", + Failure: msg, + }) +} diff --git a/vendor/github.com/mgechev/revive/rule/if-return.go b/vendor/github.com/mgechev/revive/rule/if-return.go new file mode 100644 index 000000000..c275d2766 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/if-return.go @@ -0,0 +1,115 @@ +package rule + +import ( + "go/ast" + "go/token" + "strings" + + "github.com/mgechev/revive/lint" +) + +// IfReturnRule lints given else constructs. +type IfReturnRule struct{} + +// Apply applies the rule to given file. +func (r *IfReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + astFile := file.AST + w := &lintElseError{astFile, onFailure} + ast.Walk(w, astFile) + return failures +} + +// Name returns the rule name. +func (r *IfReturnRule) Name() string { + return "if-return" +} + +type lintElseError struct { + file *ast.File + onFailure func(lint.Failure) +} + +func (w *lintElseError) Visit(node ast.Node) ast.Visitor { + switch v := node.(type) { + case *ast.BlockStmt: + for i := 0; i < len(v.List)-1; i++ { + // if var := whatever; var != nil { return var } + s, ok := v.List[i].(*ast.IfStmt) + if !ok || s.Body == nil || len(s.Body.List) != 1 || s.Else != nil { + continue + } + assign, ok := s.Init.(*ast.AssignStmt) + if !ok || len(assign.Lhs) != 1 || !(assign.Tok == token.DEFINE || assign.Tok == token.ASSIGN) { + continue + } + id, ok := assign.Lhs[0].(*ast.Ident) + if !ok { + continue + } + expr, ok := s.Cond.(*ast.BinaryExpr) + if !ok || expr.Op != token.NEQ { + continue + } + if lhs, ok := expr.X.(*ast.Ident); !ok || lhs.Name != id.Name { + continue + } + if rhs, ok := expr.Y.(*ast.Ident); !ok || rhs.Name != "nil" { + continue + } + r, ok := s.Body.List[0].(*ast.ReturnStmt) + if !ok || len(r.Results) != 1 { + continue + } + if r, ok := r.Results[0].(*ast.Ident); !ok || r.Name != id.Name { + continue + } + + // return nil + r, ok = v.List[i+1].(*ast.ReturnStmt) + if !ok || len(r.Results) != 1 { + continue + } + if r, ok := r.Results[0].(*ast.Ident); !ok || r.Name != "nil" { + continue + } + + // check if there are any comments explaining the construct, don't emit an error if there are some. + if containsComments(s.Pos(), r.Pos(), w.file) { + continue + } + + w.onFailure(lint.Failure{ + Confidence: .9, + Node: v.List[i], + Failure: "redundant if ...; err != nil check, just return error instead.", + }) + } + } + return w +} + +func containsComments(start, end token.Pos, f *ast.File) bool { + for _, cgroup := range f.Comments { + comments := cgroup.List + if comments[0].Slash >= end { + // All comments starting with this group are after end pos. + return false + } + if comments[len(comments)-1].Slash < start { + // Comments group ends before start pos. + continue + } + for _, c := range comments { + if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") { + return true + } + } + } + return false +} diff --git a/vendor/github.com/mgechev/revive/rule/import-shadowing.go b/vendor/github.com/mgechev/revive/rule/import-shadowing.go new file mode 100644 index 000000000..7b34c90f3 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/import-shadowing.go @@ -0,0 +1,108 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + + "github.com/mgechev/revive/lint" +) + +// ImportShadowingRule lints given else constructs. +type ImportShadowingRule struct{} + +// Apply applies the rule to given file. +func (r *ImportShadowingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + importNames := map[string]struct{}{} + for _, imp := range file.AST.Imports { + importNames[getName(imp)] = struct{}{} + } + + fileAst := file.AST + walker := importShadowing{ + packageNameIdent: fileAst.Name, + importNames: importNames, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + alreadySeen: map[*ast.Object]struct{}{}, + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *ImportShadowingRule) Name() string { + return "import-shadowing" +} + +func getName(imp *ast.ImportSpec) string { + const pathSep = "/" + const strDelim = `"` + if imp.Name != nil { + return imp.Name.Name + } + + path := imp.Path.Value + i := strings.LastIndex(path, pathSep) + if i == -1 { + return strings.Trim(path, strDelim) + } + + return strings.Trim(path[i+1:], strDelim) +} + +type importShadowing struct { + packageNameIdent *ast.Ident + importNames map[string]struct{} + onFailure func(lint.Failure) + alreadySeen map[*ast.Object]struct{} +} + +// Visit visits AST nodes and checks if id nodes (ast.Ident) shadow an import name +func (w importShadowing) Visit(n ast.Node) ast.Visitor { + switch n := n.(type) { + case *ast.AssignStmt: + if n.Tok == token.DEFINE { + return w // analyze variable declarations of the form id := expr + } + + return nil // skip assigns of the form id = expr (not an id declaration) + case *ast.CallExpr, // skip call expressions (not an id declaration) + *ast.ImportSpec, // skip import section subtree because we already have the list of imports + *ast.KeyValueExpr, // skip analysis of key-val expressions ({key:value}): ids of such expressions, even the same of an import name, do not shadow the import name + *ast.ReturnStmt, // skip skipping analysis of returns, ids in expression were already analyzed + *ast.SelectorExpr, // skip analysis of selector expressions (anId.otherId): because if anId shadows an import name, it was already detected, and otherId does not shadows the import name + *ast.StructType: // skip analysis of struct type because struct fields can not shadow an import name + return nil + case *ast.Ident: + if n == w.packageNameIdent { + return nil // skip the ident corresponding to the package name of this file + } + + id := n.Name + if id == "_" { + return w // skip _ id + } + + _, isImportName := w.importNames[id] + _, alreadySeen := w.alreadySeen[n.Obj] + if isImportName && !alreadySeen { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: n, + Category: "namming", + Failure: fmt.Sprintf("The name '%s' shadows an import name", id), + }) + + w.alreadySeen[n.Obj] = struct{}{} + } + } + + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/imports-blacklist.go b/vendor/github.com/mgechev/revive/rule/imports-blacklist.go new file mode 100644 index 000000000..31ef901e5 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/imports-blacklist.go @@ -0,0 +1,52 @@ +package rule + +import ( + "fmt" + + "github.com/mgechev/revive/lint" +) + +// ImportsBlacklistRule lints given else constructs. +type ImportsBlacklistRule struct{} + +// Apply applies the rule to given file. +func (r *ImportsBlacklistRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + var failures []lint.Failure + + if file.IsTest() { + return failures // skip, test file + } + + blacklist := make(map[string]bool, len(arguments)) + + for _, arg := range arguments { + argStr, ok := arg.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument to the imports-blacklist rule. Expecting a string, got %T", arg)) + } + // we add quotes if not present, because when parsed, the value of the AST node, will be quoted + if len(argStr) > 2 && argStr[0] != '"' && argStr[len(argStr)-1] != '"' { + argStr = fmt.Sprintf(`"%s"`, argStr) + } + blacklist[argStr] = true + } + + for _, is := range file.AST.Imports { + path := is.Path + if path != nil && blacklist[path.Value] { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: "should not use the following blacklisted import: " + path.Value, + Node: is, + Category: "imports", + }) + } + } + + return failures +} + +// Name returns the rule name. +func (r *ImportsBlacklistRule) Name() string { + return "imports-blacklist" +} diff --git a/vendor/github.com/mgechev/revive/rule/increment-decrement.go b/vendor/github.com/mgechev/revive/rule/increment-decrement.go new file mode 100644 index 000000000..5d6b17671 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/increment-decrement.go @@ -0,0 +1,74 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// IncrementDecrementRule lints given else constructs. +type IncrementDecrementRule struct{} + +// Apply applies the rule to given file. +func (r *IncrementDecrementRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := lintIncrementDecrement{ + file: file, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *IncrementDecrementRule) Name() string { + return "increment-decrement" +} + +type lintIncrementDecrement struct { + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) +} + +func (w lintIncrementDecrement) Visit(n ast.Node) ast.Visitor { + as, ok := n.(*ast.AssignStmt) + if !ok { + return w + } + if len(as.Lhs) != 1 { + return w + } + if !isOne(as.Rhs[0]) { + return w + } + var suffix string + switch as.Tok { + case token.ADD_ASSIGN: + suffix = "++" + case token.SUB_ASSIGN: + suffix = "--" + default: + return w + } + w.onFailure(lint.Failure{ + Confidence: 0.8, + Node: as, + Category: "unary-op", + Failure: fmt.Sprintf("should replace %s with %s%s", w.file.Render(as), w.file.Render(as.Lhs[0]), suffix), + }) + return w +} + +func isOne(expr ast.Expr) bool { + lit, ok := expr.(*ast.BasicLit) + return ok && lit.Kind == token.INT && lit.Value == "1" +} diff --git a/vendor/github.com/mgechev/revive/rule/indent-error-flow.go b/vendor/github.com/mgechev/revive/rule/indent-error-flow.go new file mode 100644 index 000000000..4c9799b2a --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/indent-error-flow.go @@ -0,0 +1,78 @@ +package rule + +import ( + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// IndentErrorFlowRule lints given else constructs. +type IndentErrorFlowRule struct{} + +// Apply applies the rule to given file. +func (r *IndentErrorFlowRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintElse{make(map[*ast.IfStmt]bool), onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *IndentErrorFlowRule) Name() string { + return "indent-error-flow" +} + +type lintElse struct { + ignore map[*ast.IfStmt]bool + onFailure func(lint.Failure) +} + +func (w lintElse) Visit(node ast.Node) ast.Visitor { + ifStmt, ok := node.(*ast.IfStmt) + if !ok || ifStmt.Else == nil { + return w + } + if w.ignore[ifStmt] { + if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { + w.ignore[elseif] = true + } + return w + } + if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { + w.ignore[elseif] = true + return w + } + if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { + // only care about elses without conditions + return w + } + if len(ifStmt.Body.List) == 0 { + return w + } + shortDecl := false // does the if statement have a ":=" initialization statement? + if ifStmt.Init != nil { + if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { + shortDecl = true + } + } + lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] + if _, ok := lastStmt.(*ast.ReturnStmt); ok { + extra := "" + if shortDecl { + extra = " (move short variable declaration to its own line if necessary)" + } + w.onFailure(lint.Failure{ + Confidence: 1, + Node: ifStmt.Else, + Category: "indent", + Failure: "if block ends with a return statement, so drop this else and outdent its block" + extra, + }) + } + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/line-length-limit.go b/vendor/github.com/mgechev/revive/rule/line-length-limit.go new file mode 100644 index 000000000..5ee057079 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/line-length-limit.go @@ -0,0 +1,84 @@ +package rule + +import ( + "bufio" + "bytes" + "fmt" + "go/token" + "strings" + "unicode/utf8" + + "github.com/mgechev/revive/lint" +) + +// LineLengthLimitRule lints given else constructs. +type LineLengthLimitRule struct{} + +// Apply applies the rule to given file. +func (r *LineLengthLimitRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + if len(arguments) != 1 { + panic(`invalid configuration for "line-length-limit"`) + } + + max, ok := arguments[0].(int64) // Alt. non panicking version + if !ok || max < 0 { + panic(`invalid value passed as argument number to the "line-length-limit" rule`) + } + + var failures []lint.Failure + checker := lintLineLengthNum{ + max: int(max), + file: file, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + checker.check() + + return failures +} + +// Name returns the rule name. +func (r *LineLengthLimitRule) Name() string { + return "line-length-limit" +} + +type lintLineLengthNum struct { + max int + file *lint.File + onFailure func(lint.Failure) +} + +func (r lintLineLengthNum) check() { + f := bytes.NewReader(r.file.Content()) + spaces := strings.Repeat(" ", 4) // tab width = 4 + l := 1 + s := bufio.NewScanner(f) + for s.Scan() { + t := s.Text() + t = strings.Replace(t, "\t", spaces, -1) + c := utf8.RuneCountInString(t) + if c > r.max { + r.onFailure(lint.Failure{ + Category: "code-style", + Position: lint.FailurePosition{ + // Offset not set; it is non-trivial, and doesn't appear to be needed. + Start: token.Position{ + Filename: r.file.Name, + Line: l, + Column: 0, + }, + End: token.Position{ + Filename: r.file.Name, + Line: l, + Column: c, + }, + }, + Confidence: 1, + Failure: fmt.Sprintf("line is %d characters, out of limit %d", c, r.max), + }) + } + l++ + } +} diff --git a/vendor/github.com/mgechev/revive/rule/max-public-structs.go b/vendor/github.com/mgechev/revive/rule/max-public-structs.go new file mode 100644 index 000000000..551b370a4 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/max-public-structs.go @@ -0,0 +1,70 @@ +package rule + +import ( + "go/ast" + + "strings" + + "github.com/mgechev/revive/lint" +) + +// MaxPublicStructsRule lints given else constructs. +type MaxPublicStructsRule struct{} + +// Apply applies the rule to given file. +func (r *MaxPublicStructsRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + var failures []lint.Failure + if len(arguments) == 0 { + panic("not enough arguments for " + r.Name()) + } + + fileAst := file.AST + walker := &lintMaxPublicStructs{ + fileAst: fileAst, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, fileAst) + + max, ok := arguments[0].(int64) // Alt. non panicking version + if !ok { + panic(`invalid value passed as argument number to the "max-public-structs" rule`) + } + + if walker.current > max { + walker.onFailure(lint.Failure{ + Failure: "you have exceeded the maximum number of public struct declarations", + Confidence: 1, + Node: fileAst, + Category: "style", + }) + } + + return failures +} + +// Name returns the rule name. +func (r *MaxPublicStructsRule) Name() string { + return "max-public-structs" +} + +type lintMaxPublicStructs struct { + current int64 + fileAst *ast.File + onFailure func(lint.Failure) +} + +func (w *lintMaxPublicStructs) Visit(n ast.Node) ast.Visitor { + switch v := n.(type) { + case *ast.TypeSpec: + name := v.Name.Name + first := string(name[0]) + if strings.ToUpper(first) == first { + w.current++ + } + break + } + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/modifies-param.go b/vendor/github.com/mgechev/revive/rule/modifies-param.go new file mode 100644 index 000000000..55136e6c8 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/modifies-param.go @@ -0,0 +1,80 @@ +package rule + +import ( + "fmt" + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// ModifiesParamRule lints given else constructs. +type ModifiesParamRule struct{} + +// Apply applies the rule to given file. +func (r *ModifiesParamRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintModifiesParamRule{onFailure: onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *ModifiesParamRule) Name() string { + return "modifies-parameter" +} + +type lintModifiesParamRule struct { + params map[string]bool + onFailure func(lint.Failure) +} + +func retrieveParamNames(pl []*ast.Field) map[string]bool { + result := make(map[string]bool, len(pl)) + for _, p := range pl { + for _, n := range p.Names { + if n.Name == "_" { + continue + } + + result[n.Name] = true + } + } + return result +} + +func (w lintModifiesParamRule) Visit(node ast.Node) ast.Visitor { + switch v := node.(type) { + case *ast.FuncDecl: + w.params = retrieveParamNames(v.Type.Params.List) + case *ast.IncDecStmt: + if id, ok := v.X.(*ast.Ident); ok { + checkParam(id, &w) + } + case *ast.AssignStmt: + lhs := v.Lhs + for _, e := range lhs { + id, ok := e.(*ast.Ident) + if ok { + checkParam(id, &w) + } + } + } + + return w +} + +func checkParam(id *ast.Ident, w *lintModifiesParamRule) { + if w.params[id.Name] { + w.onFailure(lint.Failure{ + Confidence: 0.5, // confidence is low because of shadow variables + Node: id, + Category: "bad practice", + Failure: fmt.Sprintf("parameter '%s' seems to be modified", id), + }) + } +} diff --git a/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go b/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go new file mode 100644 index 000000000..4fe22ddf3 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go @@ -0,0 +1,134 @@ +package rule + +import ( + "go/ast" + "strings" + + "github.com/mgechev/revive/lint" +) + +// ModifiesValRecRule lints assignments to value method-receivers. +type ModifiesValRecRule struct{} + +// Apply applies the rule to given file. +func (r *ModifiesValRecRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintModifiesValRecRule{file: file, onFailure: onFailure} + file.Pkg.TypeCheck() + ast.Walk(w, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *ModifiesValRecRule) Name() string { + return "modifies-value-receiver" +} + +type lintModifiesValRecRule struct { + file *lint.File + onFailure func(lint.Failure) +} + +func (w lintModifiesValRecRule) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl: + if n.Recv == nil { + return nil // skip, not a method + } + + receiver := n.Recv.List[0] + if _, ok := receiver.Type.(*ast.StarExpr); ok { + return nil // skip, method with pointer receiver + } + + if w.skipType(receiver.Type) { + return nil // skip, receiver is a map or array + } + + if len(receiver.Names) < 1 { + return nil // skip, anonymous receiver + } + + receiverName := receiver.Names[0].Name + if receiverName == "_" { + return nil // skip, anonymous receiver + } + + fselect := func(n ast.Node) bool { + // look for assignments with the receiver in the right hand + asgmt, ok := n.(*ast.AssignStmt) + if !ok { + return false + } + + for _, exp := range asgmt.Lhs { + switch e := exp.(type) { + case *ast.IndexExpr: // receiver...[] = ... + continue + case *ast.StarExpr: // *receiver = ... + continue + case *ast.SelectorExpr: // receiver.field = ... + name := w.getNameFromExpr(e.X) + if name == "" || name != receiverName { + continue + } + + if w.skipType(ast.Expr(e.Sel)) { + continue + } + + case *ast.Ident: // receiver := ... + if e.Name != receiverName { + continue + } + default: + continue + } + + return true + } + + return false + } + + assignmentsToReceiver := pick(n.Body, fselect, nil) + + for _, assignment := range assignmentsToReceiver { + w.onFailure(lint.Failure{ + Node: assignment, + Confidence: 1, + Failure: "suspicious assignment to a by-value method receiver", + }) + } + } + + return w +} + +func (w lintModifiesValRecRule) skipType(t ast.Expr) bool { + rt := w.file.Pkg.TypeOf(t) + if rt == nil { + return false + } + + rt = rt.Underlying() + rtName := rt.String() + + // skip when receiver is a map or array + return strings.HasPrefix(rtName, "[]") || strings.HasPrefix(rtName, "map[") +} + +func (lintModifiesValRecRule) getNameFromExpr(ie ast.Expr) string { + ident, ok := ie.(*ast.Ident) + if !ok { + return "" + } + + return ident.Name +} diff --git a/vendor/github.com/mgechev/revive/rule/nested-structs.go b/vendor/github.com/mgechev/revive/rule/nested-structs.go new file mode 100644 index 000000000..cfe9648b2 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/nested-structs.go @@ -0,0 +1,61 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// NestedStructs lints nested structs. +type NestedStructs struct{} + +// Apply applies the rule to given file. +func (r *NestedStructs) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + var failures []lint.Failure + + if len(arguments) > 0 { + panic(r.Name() + " doesn't take any arguments") + } + + walker := &lintNestedStructs{ + fileAST: file.AST, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *NestedStructs) Name() string { + return "nested-structs" +} + +type lintNestedStructs struct { + fileAST *ast.File + onFailure func(lint.Failure) +} + +func (l *lintNestedStructs) Visit(n ast.Node) ast.Visitor { + switch v := n.(type) { + case *ast.FuncDecl: + if v.Body != nil { + ast.Walk(l, v.Body) + } + return nil + case *ast.Field: + if _, ok := v.Type.(*ast.StructType); ok { + l.onFailure(lint.Failure{ + Failure: "no nested structs are allowed", + Category: "style", + Node: v, + Confidence: 1, + }) + break + } + } + return l +} diff --git a/vendor/github.com/mgechev/revive/rule/package-comments.go b/vendor/github.com/mgechev/revive/rule/package-comments.go new file mode 100644 index 000000000..00fc5bb91 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/package-comments.go @@ -0,0 +1,121 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + + "github.com/mgechev/revive/lint" +) + +// PackageCommentsRule lints the package comments. It complains if +// there is no package comment, or if it is not of the right form. +// This has a notable false positive in that a package comment +// could rightfully appear in a different file of the same package, +// but that's not easy to fix since this linter is file-oriented. +type PackageCommentsRule struct{} + +// Apply applies the rule to given file. +func (r *PackageCommentsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + if isTest(file) { + return failures + } + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + fileAst := file.AST + w := &lintPackageComments{fileAst, file, onFailure} + ast.Walk(w, fileAst) + return failures +} + +// Name returns the rule name. +func (r *PackageCommentsRule) Name() string { + return "package-comments" +} + +type lintPackageComments struct { + fileAst *ast.File + file *lint.File + onFailure func(lint.Failure) +} + +func (l *lintPackageComments) Visit(_ ast.Node) ast.Visitor { + if l.file.IsTest() { + return nil + } + + const ref = styleGuideBase + "#package-comments" + prefix := "Package " + l.fileAst.Name.Name + " " + + // Look for a detached package comment. + // First, scan for the last comment that occurs before the "package" keyword. + var lastCG *ast.CommentGroup + for _, cg := range l.fileAst.Comments { + if cg.Pos() > l.fileAst.Package { + // Gone past "package" keyword. + break + } + lastCG = cg + } + if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) { + endPos := l.file.ToPosition(lastCG.End()) + pkgPos := l.file.ToPosition(l.fileAst.Package) + if endPos.Line+1 < pkgPos.Line { + // There isn't a great place to anchor this error; + // the start of the blank lines between the doc and the package statement + // is at least pointing at the location of the problem. + pos := token.Position{ + Filename: endPos.Filename, + // Offset not set; it is non-trivial, and doesn't appear to be needed. + Line: endPos.Line + 1, + Column: 1, + } + l.onFailure(lint.Failure{ + Category: "comments", + Position: lint.FailurePosition{ + Start: pos, + End: pos, + }, + Confidence: 0.9, + Failure: "package comment is detached; there should be no blank lines between it and the package statement", + }) + return nil + } + } + + if l.fileAst.Doc == nil { + l.onFailure(lint.Failure{ + Category: "comments", + Node: l.fileAst, + Confidence: 0.2, + Failure: "should have a package comment, unless it's in another file for this package", + }) + return nil + } + s := l.fileAst.Doc.Text() + if ts := strings.TrimLeft(s, " \t"); ts != s { + l.onFailure(lint.Failure{ + Category: "comments", + Node: l.fileAst.Doc, + Confidence: 1, + Failure: "package comment should not have leading space", + }) + s = ts + } + // Only non-main packages need to keep to this form. + if !l.file.Pkg.IsMain() && !strings.HasPrefix(s, prefix) { + l.onFailure(lint.Failure{ + Category: "comments", + Node: l.fileAst.Doc, + Confidence: 1, + Failure: fmt.Sprintf(`package comment should be of the form "%s..."`, prefix), + }) + } + return nil +} diff --git a/vendor/github.com/mgechev/revive/rule/range-val-address.go b/vendor/github.com/mgechev/revive/rule/range-val-address.go new file mode 100644 index 000000000..ece01ddf6 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/range-val-address.go @@ -0,0 +1,126 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// RangeValAddress lints +type RangeValAddress struct{} + +// Apply applies the rule to given file. +func (r *RangeValAddress) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + walker := rangeValAddress{ + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *RangeValAddress) Name() string { + return "range-val-address" +} + +type rangeValAddress struct { + onFailure func(lint.Failure) +} + +func (w rangeValAddress) Visit(node ast.Node) ast.Visitor { + n, ok := node.(*ast.RangeStmt) + if !ok { + return w + } + + value, ok := n.Value.(*ast.Ident) + if !ok { + return w + } + + ast.Walk(rangeBodyVisitor{ + valueID: value.Obj, + onFailure: w.onFailure, + }, n.Body) + + return w +} + +type rangeBodyVisitor struct { + valueID *ast.Object + onFailure func(lint.Failure) +} + +func (bw rangeBodyVisitor) Visit(node ast.Node) ast.Visitor { + asgmt, ok := node.(*ast.AssignStmt) + if !ok { + return bw + } + + for _, exp := range asgmt.Lhs { + e, ok := exp.(*ast.IndexExpr) + if !ok { + continue + } + if bw.isAccessingRangeValueAddress(e.Index) { // e.g. a[&value]... + bw.onFailure(bw.newFailure(e.Index)) + } + } + + for _, exp := range asgmt.Rhs { + switch e := exp.(type) { + case *ast.UnaryExpr: // e.g. ...&value, ...&value.id + if bw.isAccessingRangeValueAddress(e) { + bw.onFailure(bw.newFailure(e)) + } + case *ast.CallExpr: + if fun, ok := e.Fun.(*ast.Ident); ok && fun.Name == "append" { // e.g. ...append(arr, &value) + for _, v := range e.Args { + if bw.isAccessingRangeValueAddress(v) { + bw.onFailure(bw.newFailure(e)) + } + } + } + } + } + return bw +} + +func (bw rangeBodyVisitor) isAccessingRangeValueAddress(exp ast.Expr) bool { + u, ok := exp.(*ast.UnaryExpr) + if !ok { + return false + } + + if u.Op != token.AND { + return false + } + + v, ok := u.X.(*ast.Ident) + if !ok { + var s *ast.SelectorExpr + s, ok = u.X.(*ast.SelectorExpr) + if !ok { + return false + } + v, ok = s.X.(*ast.Ident) + } + + return ok && v.Obj == bw.valueID +} + +func (bw rangeBodyVisitor) newFailure(node ast.Node) lint.Failure { + return lint.Failure{ + Node: node, + Confidence: 1, + Failure: fmt.Sprintf("suspicious assignment of '%s'. range-loop variables always have the same address", bw.valueID.Name), + } +} diff --git a/vendor/github.com/mgechev/revive/rule/range-val-in-closure.go b/vendor/github.com/mgechev/revive/rule/range-val-in-closure.go new file mode 100644 index 000000000..857787be3 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/range-val-in-closure.go @@ -0,0 +1,111 @@ +package rule + +import ( + "fmt" + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// RangeValInClosureRule lints given else constructs. +type RangeValInClosureRule struct{} + +// Apply applies the rule to given file. +func (r *RangeValInClosureRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + walker := rangeValInClosure{ + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *RangeValInClosureRule) Name() string { + return "range-val-in-closure" +} + +type rangeValInClosure struct { + onFailure func(lint.Failure) +} + +func (w rangeValInClosure) Visit(node ast.Node) ast.Visitor { + + // Find the variables updated by the loop statement. + var vars []*ast.Ident + addVar := func(expr ast.Expr) { + if id, ok := expr.(*ast.Ident); ok { + vars = append(vars, id) + } + } + var body *ast.BlockStmt + switch n := node.(type) { + case *ast.RangeStmt: + body = n.Body + addVar(n.Key) + addVar(n.Value) + case *ast.ForStmt: + body = n.Body + switch post := n.Post.(type) { + case *ast.AssignStmt: + // e.g. for p = head; p != nil; p = p.next + for _, lhs := range post.Lhs { + addVar(lhs) + } + case *ast.IncDecStmt: + // e.g. for i := 0; i < n; i++ + addVar(post.X) + } + } + if vars == nil { + return w + } + + // Inspect a go or defer statement + // if it's the last one in the loop body. + // (We give up if there are following statements, + // because it's hard to prove go isn't followed by wait, + // or defer by return.) + if len(body.List) == 0 { + return w + } + var last *ast.CallExpr + switch s := body.List[len(body.List)-1].(type) { + case *ast.GoStmt: + last = s.Call + case *ast.DeferStmt: + last = s.Call + default: + return w + } + lit, ok := last.Fun.(*ast.FuncLit) + if !ok { + return w + } + if lit.Type == nil { + // Not referring to a variable (e.g. struct field name) + return w + } + ast.Inspect(lit.Body, func(n ast.Node) bool { + id, ok := n.(*ast.Ident) + if !ok || id.Obj == nil { + return true + } + for _, v := range vars { + if v.Obj == id.Obj { + w.onFailure(lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("loop variable %v captured by func literal", id.Name), + Node: n, + }) + } + } + return true + }) + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/range.go b/vendor/github.com/mgechev/revive/rule/range.go new file mode 100644 index 000000000..d18492c71 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/range.go @@ -0,0 +1,82 @@ +package rule + +import ( + "fmt" + "go/ast" + "strings" + + "github.com/mgechev/revive/lint" +) + +// RangeRule lints given else constructs. +type RangeRule struct{} + +// Apply applies the rule to given file. +func (r *RangeRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := &lintRanges{file, onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *RangeRule) Name() string { + return "range" +} + +type lintRanges struct { + file *lint.File + onFailure func(lint.Failure) +} + +func (w *lintRanges) Visit(node ast.Node) ast.Visitor { + rs, ok := node.(*ast.RangeStmt) + if !ok { + return w + } + if rs.Value == nil { + // for x = range m { ... } + return w // single var form + } + if !isIdent(rs.Value, "_") { + // for ?, y = range m { ... } + return w + } + + newRS := *rs // shallow copy + newRS.Value = nil + + w.onFailure(lint.Failure{ + Failure: fmt.Sprintf("should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", w.file.Render(rs.Key), rs.Tok), + Confidence: 1, + Node: rs.Value, + ReplacementLine: firstLineOf(w.file, &newRS, rs), + }) + + return w +} + +func firstLineOf(f *lint.File, node, match ast.Node) string { + line := f.Render(node) + if i := strings.Index(line, "\n"); i >= 0 { + line = line[:i] + } + return indentOf(f, match) + line +} + +func indentOf(f *lint.File, node ast.Node) string { + line := srcLine(f.Content(), f.ToPosition(node.Pos())) + for i, r := range line { + switch r { + case ' ', '\t': + default: + return line[:i] + } + } + return line // unusual or empty line +} diff --git a/vendor/github.com/mgechev/revive/rule/receiver-naming.go b/vendor/github.com/mgechev/revive/rule/receiver-naming.go new file mode 100644 index 000000000..589d5f0ef --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/receiver-naming.go @@ -0,0 +1,81 @@ +package rule + +import ( + "fmt" + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// ReceiverNamingRule lints given else constructs. +type ReceiverNamingRule struct{} + +// Apply applies the rule to given file. +func (r *ReceiverNamingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := lintReceiverName{ + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + typeReceiver: map[string]string{}, + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *ReceiverNamingRule) Name() string { + return "receiver-naming" +} + +type lintReceiverName struct { + onFailure func(lint.Failure) + typeReceiver map[string]string +} + +func (w lintReceiverName) Visit(n ast.Node) ast.Visitor { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return w + } + names := fn.Recv.List[0].Names + if len(names) < 1 { + return w + } + name := names[0].Name + const ref = styleGuideBase + "#receiver-names" + if name == "_" { + w.onFailure(lint.Failure{ + Node: n, + Confidence: 1, + Category: "naming", + Failure: "receiver name should not be an underscore, omit the name if it is unused", + }) + return w + } + if name == "this" || name == "self" { + w.onFailure(lint.Failure{ + Node: n, + Confidence: 1, + Category: "naming", + Failure: `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`, + }) + return w + } + recv := receiverType(fn) + if prev, ok := w.typeReceiver[recv]; ok && prev != name { + w.onFailure(lint.Failure{ + Node: n, + Confidence: 1, + Category: "naming", + Failure: fmt.Sprintf("receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv), + }) + return w + } + w.typeReceiver[recv] = name + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go b/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go new file mode 100644 index 000000000..947b8aac7 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go @@ -0,0 +1,145 @@ +package rule + +import ( + "fmt" + "github.com/mgechev/revive/lint" + "go/ast" + "go/token" +) + +// RedefinesBuiltinIDRule warns when a builtin identifier is shadowed. +type RedefinesBuiltinIDRule struct{} + +// Apply applies the rule to given file. +func (r *RedefinesBuiltinIDRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + var builtInConstAndVars = map[string]bool{ + "true": true, + "false": true, + "iota": true, + "nil": true, + } + + var builtFunctions = map[string]bool{ + "append": true, + "cap": true, + "close": true, + "complex": true, + "copy": true, + "delete": true, + "imag": true, + "len": true, + "make": true, + "new": true, + "panic": true, + "print": true, + "println": true, + "real": true, + "recover": true, + } + + var builtInTypes = map[string]bool{ + "ComplexType": true, + "FloatType": true, + "IntegerType": true, + "Type": true, + "Type1": true, + "bool": true, + "byte": true, + "complex128": true, + "complex64": true, + "error": true, + "float32": true, + "float64": true, + "int": true, + "int16": true, + "int32": true, + "int64": true, + "int8": true, + "rune": true, + "string": true, + "uint": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uint8": true, + "uintptr": true, + } + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + astFile := file.AST + w := &lintRedefinesBuiltinID{builtInConstAndVars, builtFunctions, builtInTypes, onFailure} + ast.Walk(w, astFile) + + return failures +} + +// Name returns the rule name. +func (r *RedefinesBuiltinIDRule) Name() string { + return "redefines-builtin-id" +} + +type lintRedefinesBuiltinID struct { + constsAndVars map[string]bool + funcs map[string]bool + types map[string]bool + onFailure func(lint.Failure) +} + +func (w *lintRedefinesBuiltinID) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.GenDecl: + if n.Tok != token.TYPE { + return nil // skip if not type declaration + } + typeSpec, ok := n.Specs[0].(*ast.TypeSpec) + if !ok { + return nil + } + id := typeSpec.Name.Name + if w.types[id] { + w.addFailure(n, fmt.Sprintf("redefinition of the built-in type %s", id)) + } + case *ast.FuncDecl: + if n.Recv != nil { + return w // skip methods + } + + id := n.Name.Name + if w.funcs[id] { + w.addFailure(n, fmt.Sprintf("redefinition of the built-in function %s", id)) + } + case *ast.AssignStmt: + for _, e := range n.Lhs { + id, ok := e.(*ast.Ident) + if !ok { + continue + } + + if w.constsAndVars[id.Name] { + var msg string + if n.Tok == token.DEFINE { + msg = fmt.Sprintf("assignment creates a shadow of built-in identifier %s", id.Name) + } else { + msg = fmt.Sprintf("assignment modifies built-in identifier %s", id.Name) + } + w.addFailure(n, msg) + } + } + } + + return w +} + +func (w lintRedefinesBuiltinID) addFailure(node ast.Node, msg string) { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: node, + Category: "logic", + Failure: msg, + }) +} diff --git a/vendor/github.com/mgechev/revive/rule/string-format.go b/vendor/github.com/mgechev/revive/rule/string-format.go new file mode 100644 index 000000000..6017c4180 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/string-format.go @@ -0,0 +1,282 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + "regexp" + "strconv" + + "github.com/mgechev/revive/lint" +) + +// #region Revive API + +// StringFormatRule lints strings and/or comments according to a set of regular expressions given as Arguments +type StringFormatRule struct{} + +// Apply applies the rule to the given file. +func (r *StringFormatRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintStringFormatRule{onFailure: onFailure} + w.parseArguments(arguments) + ast.Walk(w, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *StringFormatRule) Name() string { + return "string-format" +} + +// ParseArgumentsTest is a public wrapper around w.parseArguments used for testing. Returns the error message provided to panic, or nil if no error was encountered +func (r *StringFormatRule) ParseArgumentsTest(arguments lint.Arguments) *string { + w := lintStringFormatRule{} + c := make(chan interface{}) + // Parse the arguments in a goroutine, defer a recover() call, return the error encountered (or nil if there was no error) + go func() { + defer func() { + err := recover() + c <- err + }() + w.parseArguments(arguments) + }() + err := <-c + if err != nil { + e := fmt.Sprintf("%s", err) + return &e + } + return nil +} + +// #endregion + +// #region Internal structure + +type lintStringFormatRule struct { + onFailure func(lint.Failure) + + rules []stringFormatSubrule + stringDeclarations map[string]string +} + +type stringFormatSubrule struct { + parent *lintStringFormatRule + scope stringFormatSubruleScope + regexp *regexp.Regexp + errorMessage string +} + +type stringFormatSubruleScope struct { + funcName string // Function name the rule is scoped to + argument int // (optional) Which argument in calls to the function is checked against the rule (the first argument is checked by default) + field string // (optional) If the argument to be checked is a struct, which member of the struct is checked against the rule (top level members only) +} + +// Regex inserted to match valid function/struct field identifiers +const identRegex = "[_A-Za-z][_A-Za-z0-9]*" + +var parseStringFormatScope = regexp.MustCompile( + fmt.Sprintf("^(%s(?:\\.%s)?)(?:\\[([0-9]+)\\](?:\\.(%s))?)?$", identRegex, identRegex, identRegex)) + +// #endregion + +// #region Argument parsing + +func (w *lintStringFormatRule) parseArguments(arguments lint.Arguments) { + for i, argument := range arguments { + scope, regex, errorMessage := w.parseArgument(argument, i) + w.rules = append(w.rules, stringFormatSubrule{ + parent: w, + scope: scope, + regexp: regex, + errorMessage: errorMessage, + }) + } +} + +func (w lintStringFormatRule) parseArgument(argument interface{}, ruleNum int) (scope stringFormatSubruleScope, regex *regexp.Regexp, errorMessage string) { + g, ok := argument.([]interface{}) // Cast to generic slice first + if !ok { + w.configError("argument is not a slice", ruleNum, 0) + } + if len(g) < 2 { + w.configError("less than two slices found in argument, scope and regex are required", ruleNum, len(g)-1) + } + rule := make([]string, len(g)) + for i, obj := range g { + val, ok := obj.(string) + if !ok { + w.configError("unexpected value, string was expected", ruleNum, i) + } + rule[i] = val + } + + // Validate scope and regex length + if len(rule[0]) == 0 { + w.configError("empty scope provided", ruleNum, 0) + } else if len(rule[1]) < 2 { + w.configError("regex is too small (regexes should begin and end with '/')", ruleNum, 1) + } + + // Parse rule scope + scope = stringFormatSubruleScope{} + matches := parseStringFormatScope.FindStringSubmatch(rule[0]) + if matches == nil { + // The rule's scope didn't match the parsing regex at all, probably a configuration error + w.parseError("unable to parse rule scope", ruleNum, 0) + } else if len(matches) != 4 { + // The rule's scope matched the parsing regex, but an unexpected number of submatches was returned, probably a bug + w.parseError(fmt.Sprintf("unexpected number of submatches when parsing scope: %d, expected 4", len(matches)), ruleNum, 0) + } + scope.funcName = matches[1] + if len(matches[2]) > 0 { + var err error + scope.argument, err = strconv.Atoi(matches[2]) + if err != nil { + w.parseError("unable to parse argument number in rule scope", ruleNum, 0) + } + } + if len(matches[3]) > 0 { + scope.field = matches[3] + } + + // Strip / characters from the beginning and end of rule[1] before compiling + regex, err := regexp.Compile(rule[1][1 : len(rule[1])-1]) + if err != nil { + w.parseError(fmt.Sprintf("unable to compile %s as regexp", rule[1]), ruleNum, 1) + } + + // Use custom error message if provided + if len(rule) == 3 { + errorMessage = rule[2] + } + return scope, regex, errorMessage +} + +// Report an invalid config, this is specifically the user's fault +func (w lintStringFormatRule) configError(msg string, ruleNum, option int) { + panic(fmt.Sprintf("invalid configuration for string-format: %s [argument %d, option %d]", msg, ruleNum, option)) +} + +// Report a general config parsing failure, this may be the user's fault, but it isn't known for certain +func (w lintStringFormatRule) parseError(msg string, ruleNum, option int) { + panic(fmt.Sprintf("failed to parse configuration for string-format: %s [argument %d, option %d]", msg, ruleNum, option)) +} + +// #endregion + +// #region Node traversal + +func (w lintStringFormatRule) Visit(node ast.Node) ast.Visitor { + // First, check if node is a call expression + call, ok := node.(*ast.CallExpr) + if !ok { + return w + } + + // Get the name of the call expression to check against rule scope + callName, ok := w.getCallName(call) + if !ok { + return w + } + + for _, rule := range w.rules { + if rule.scope.funcName == callName { + rule.Apply(call) + } + } + + return w +} + +// Return the name of a call expression in the form of package.Func or Func +func (w lintStringFormatRule) getCallName(call *ast.CallExpr) (callName string, ok bool) { + if ident, ok := call.Fun.(*ast.Ident); ok { + // Local function call + return ident.Name, true + } + + if selector, ok := call.Fun.(*ast.SelectorExpr); ok { + // Scoped function call + scope, ok := selector.X.(*ast.Ident) + if !ok { + return "", false + } + return scope.Name + "." + selector.Sel.Name, true + } + + return "", false +} + +// #endregion + +// #region Linting logic + +// Apply a single format rule to a call expression (should be done after verifying the that the call expression matches the rule's scope) +func (rule stringFormatSubrule) Apply(call *ast.CallExpr) { + if len(call.Args) <= rule.scope.argument { + return + } + + arg := call.Args[rule.scope.argument] + var lit *ast.BasicLit + if len(rule.scope.field) > 0 { + // Try finding the scope's Field, treating arg as a composite literal + composite, ok := arg.(*ast.CompositeLit) + if !ok { + return + } + for _, el := range composite.Elts { + kv, ok := el.(*ast.KeyValueExpr) + if !ok { + continue + } + key, ok := kv.Key.(*ast.Ident) + if !ok || key.Name != rule.scope.field { + continue + } + + // We're now dealing with the exact field in the rule's scope, so if anything fails, we can safely return instead of continuing the loop + lit, ok = kv.Value.(*ast.BasicLit) + if !ok || lit.Kind != token.STRING { + return + } + } + } else { + var ok bool + // Treat arg as a string literal + lit, ok = arg.(*ast.BasicLit) + if !ok || lit.Kind != token.STRING { + return + } + } + // Unquote the string literal before linting + unquoted := lit.Value[1 : len(lit.Value)-1] + rule.lintMessage(unquoted, lit) +} + +func (rule stringFormatSubrule) lintMessage(s string, node ast.Node) { + // Fail if the string doesn't match the user's regex + if rule.regexp.MatchString(s) { + return + } + var failure string + if len(rule.errorMessage) > 0 { + failure = rule.errorMessage + } else { + failure = fmt.Sprintf("string literal doesn't match user defined regex /%s/", rule.regexp.String()) + } + rule.parent.onFailure(lint.Failure{ + Confidence: 1, + Failure: failure, + Node: node}) +} + +// #endregion diff --git a/vendor/github.com/mgechev/revive/rule/string-of-int.go b/vendor/github.com/mgechev/revive/rule/string-of-int.go new file mode 100644 index 000000000..38f453a4a --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/string-of-int.go @@ -0,0 +1,95 @@ +package rule + +import ( + "go/ast" + "go/types" + + "github.com/mgechev/revive/lint" +) + +// StringOfIntRule warns when logic expressions contains Boolean literals. +type StringOfIntRule struct{} + +// Apply applies the rule to given file. +func (r *StringOfIntRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + astFile := file.AST + file.Pkg.TypeCheck() + + w := &lintStringInt{file, onFailure} + ast.Walk(w, astFile) + + return failures +} + +// Name returns the rule name. +func (r *StringOfIntRule) Name() string { + return "string-of-int" +} + +type lintStringInt struct { + file *lint.File + onFailure func(lint.Failure) +} + +func (w *lintStringInt) Visit(node ast.Node) ast.Visitor { + ce, ok := node.(*ast.CallExpr) + if !ok { + return w + } + + if !w.isCallStringCast(ce.Fun) { + return w + } + + if !w.isIntExpression(ce.Args) { + return w + } + + w.onFailure(lint.Failure{ + Confidence: 1, + Node: ce, + Failure: "dubious convertion of an integer into a string, use strconv.Itoa", + }) + + return w +} + +func (w *lintStringInt) isCallStringCast(e ast.Expr) bool { + t := w.file.Pkg.TypeOf(e) + if t == nil { + return false + } + + tb, _ := t.Underlying().(*types.Basic) + + return tb != nil && tb.Kind() == types.String +} + +func (w *lintStringInt) isIntExpression(es []ast.Expr) bool { + if len(es) != 1 { + return false + } + + t := w.file.Pkg.TypeOf(es[0]) + if t == nil { + return false + } + + ut, _ := t.Underlying().(*types.Basic) + if ut == nil || ut.Info()&types.IsInteger == 0 { + return false + } + + switch ut.Kind() { + case types.Byte, types.Rune, types.UntypedRune: + return false + } + + return true +} diff --git a/vendor/github.com/mgechev/revive/rule/struct-tag.go b/vendor/github.com/mgechev/revive/rule/struct-tag.go new file mode 100644 index 000000000..cb3818e92 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/struct-tag.go @@ -0,0 +1,236 @@ +package rule + +import ( + "fmt" + "go/ast" + "strconv" + "strings" + + "github.com/fatih/structtag" + "github.com/mgechev/revive/lint" +) + +// StructTagRule lints struct tags. +type StructTagRule struct{} + +// Apply applies the rule to given file. +func (r *StructTagRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintStructTagRule{onFailure: onFailure} + + ast.Walk(w, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *StructTagRule) Name() string { + return "struct-tag" +} + +type lintStructTagRule struct { + onFailure func(lint.Failure) + usedTagNbr map[string]bool // list of used tag numbers +} + +func (w lintStructTagRule) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.StructType: + if n.Fields == nil || n.Fields.NumFields() < 1 { + return nil // skip empty structs + } + w.usedTagNbr = map[string]bool{} // init + for _, f := range n.Fields.List { + if f.Tag != nil { + w.checkTaggedField(f) + } + } + } + + return w + +} + +// checkTaggedField checks the tag of the given field. +// precondition: the field has a tag +func (w lintStructTagRule) checkTaggedField(f *ast.Field) { + if len(f.Names) > 0 && !f.Names[0].IsExported() { + w.addFailure(f, "tag on not-exported field "+f.Names[0].Name) + } + + tags, err := structtag.Parse(strings.Trim(f.Tag.Value, "`")) + if err != nil || tags == nil { + w.addFailure(f.Tag, "malformed tag") + return + } + + for _, tag := range tags.Tags() { + switch key := tag.Key; key { + case "asn1": + msg, ok := w.checkASN1Tag(f.Type, tag) + if !ok { + w.addFailure(f.Tag, msg) + } + case "bson": + msg, ok := w.checkBSONTag(tag.Options) + if !ok { + w.addFailure(f.Tag, msg) + } + case "default": + if !w.typeValueMatch(f.Type, tag.Name) { + w.addFailure(f.Tag, "field's type and default value's type mismatch") + } + case "json": + msg, ok := w.checkJSONTag(tag.Name, tag.Options) + if !ok { + w.addFailure(f.Tag, msg) + } + case "protobuf": + // Not implemented yet + case "required": + if tag.Name != "true" && tag.Name != "false" { + w.addFailure(f.Tag, "required should be 'true' or 'false'") + } + case "xml": + msg, ok := w.checkXMLTag(tag.Options) + if !ok { + w.addFailure(f.Tag, msg) + } + case "yaml": + msg, ok := w.checkYAMLTag(tag.Options) + if !ok { + w.addFailure(f.Tag, msg) + } + default: + // unknown key + } + } +} + +func (w lintStructTagRule) checkASN1Tag(t ast.Expr, tag *structtag.Tag) (string, bool) { + checkList := append(tag.Options, tag.Name) + for _, opt := range checkList { + switch opt { + case "application", "explicit", "generalized", "ia5", "omitempty", "optional", "set", "utf8": + + default: + if strings.HasPrefix(opt, "tag:") { + parts := strings.Split(opt, ":") + tagNumber := parts[1] + if w.usedTagNbr[tagNumber] { + return fmt.Sprintf("duplicated tag number %s", tagNumber), false + } + w.usedTagNbr[tagNumber] = true + + continue + } + + if strings.HasPrefix(opt, "default:") { + parts := strings.Split(opt, ":") + if len(parts) < 2 { + return "malformed default for ASN1 tag", false + } + if !w.typeValueMatch(t, parts[1]) { + return "field's type and default value's type mismatch", false + } + + continue + } + + return fmt.Sprintf("unknown option '%s' in ASN1 tag", opt), false + } + } + + return "", true +} + +func (w lintStructTagRule) checkBSONTag(options []string) (string, bool) { + for _, opt := range options { + switch opt { + case "inline", "minsize", "omitempty": + default: + return fmt.Sprintf("unknown option '%s' in BSON tag", opt), false + } + } + + return "", true +} + +func (w lintStructTagRule) checkJSONTag(name string, options []string) (string, bool) { + for _, opt := range options { + switch opt { + case "omitempty", "string": + case "": + // special case for JSON key "-" + if name != "-" { + return "option can not be empty in JSON tag", false + } + default: + return fmt.Sprintf("unknown option '%s' in JSON tag", opt), false + } + } + + return "", true +} + +func (w lintStructTagRule) checkXMLTag(options []string) (string, bool) { + for _, opt := range options { + switch opt { + case "any", "attr", "cdata", "chardata", "comment", "innerxml", "omitempty", "typeattr": + default: + return fmt.Sprintf("unknown option '%s' in XML tag", opt), false + } + } + + return "", true +} + +func (w lintStructTagRule) checkYAMLTag(options []string) (string, bool) { + for _, opt := range options { + switch opt { + case "flow", "inline", "omitempty": + default: + return fmt.Sprintf("unknown option '%s' in YAML tag", opt), false + } + } + + return "", true +} + +func (w lintStructTagRule) typeValueMatch(t ast.Expr, val string) bool { + tID, ok := t.(*ast.Ident) + if !ok { + return true + } + + typeMatches := true + switch tID.Name { + case "bool": + typeMatches = val == "true" || val == "false" + case "float64": + _, err := strconv.ParseFloat(val, 64) + typeMatches = err == nil + case "int": + _, err := strconv.ParseInt(val, 10, 64) + typeMatches = err == nil + case "string": + case "nil": + default: + // unchecked type + } + + return typeMatches +} + +func (w lintStructTagRule) addFailure(n ast.Node, msg string) { + w.onFailure(lint.Failure{ + Node: n, + Failure: msg, + Confidence: 1, + }) +} diff --git a/vendor/github.com/mgechev/revive/rule/superfluous-else.go b/vendor/github.com/mgechev/revive/rule/superfluous-else.go new file mode 100644 index 000000000..b9ce39606 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/superfluous-else.go @@ -0,0 +1,114 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// SuperfluousElseRule lints given else constructs. +type SuperfluousElseRule struct{} + +// Apply applies the rule to given file. +func (r *SuperfluousElseRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + var branchingFunctions = map[string]map[string]bool{ + "os": {"Exit": true}, + "log": { + "Fatal": true, + "Fatalf": true, + "Fatalln": true, + "Panic": true, + "Panicf": true, + "Panicln": true, + }, + } + + w := lintSuperfluousElse{make(map[*ast.IfStmt]bool), onFailure, branchingFunctions} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *SuperfluousElseRule) Name() string { + return "superfluous-else" +} + +type lintSuperfluousElse struct { + ignore map[*ast.IfStmt]bool + onFailure func(lint.Failure) + branchingFunctions map[string]map[string]bool +} + +func (w lintSuperfluousElse) Visit(node ast.Node) ast.Visitor { + ifStmt, ok := node.(*ast.IfStmt) + if !ok || ifStmt.Else == nil { + return w + } + if w.ignore[ifStmt] { + if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { + w.ignore[elseif] = true + } + return w + } + if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { + w.ignore[elseif] = true + return w + } + if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { + // only care about elses without conditions + return w + } + if len(ifStmt.Body.List) == 0 { + return w + } + shortDecl := false // does the if statement have a ":=" initialization statement? + if ifStmt.Init != nil { + if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { + shortDecl = true + } + } + extra := "" + if shortDecl { + extra = " (move short variable declaration to its own line if necessary)" + } + + lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] + switch stmt := lastStmt.(type) { + case *ast.BranchStmt: + token := stmt.Tok.String() + if token != "fallthrough" { + w.onFailure(newFailure(ifStmt.Else, "if block ends with a "+token+" statement, so drop this else and outdent its block"+extra)) + } + case *ast.ExprStmt: + if ce, ok := stmt.X.(*ast.CallExpr); ok { // it's a function call + if fc, ok := ce.Fun.(*ast.SelectorExpr); ok { + if id, ok := fc.X.(*ast.Ident); ok { + fn := fc.Sel.Name + pkg := id.Name + if w.branchingFunctions[pkg][fn] { // it's a call to a branching function + w.onFailure( + newFailure(ifStmt.Else, fmt.Sprintf("if block ends with call to %s.%s function, so drop this else and outdent its block%s", pkg, fn, extra))) + } + } + } + } + } + + return w +} + +func newFailure(node ast.Node, msg string) lint.Failure { + return lint.Failure{ + Confidence: 1, + Node: node, + Category: "indent", + Failure: msg, + } +} diff --git a/vendor/github.com/mgechev/revive/rule/time-naming.go b/vendor/github.com/mgechev/revive/rule/time-naming.go new file mode 100644 index 000000000..a93f4b5ae --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/time-naming.go @@ -0,0 +1,93 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/types" + "strings" + + "github.com/mgechev/revive/lint" +) + +// TimeNamingRule lints given else constructs. +type TimeNamingRule struct{} + +// Apply applies the rule to given file. +func (r *TimeNamingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := &lintTimeNames{file, onFailure} + + file.Pkg.TypeCheck() + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *TimeNamingRule) Name() string { + return "time-naming" +} + +type lintTimeNames struct { + file *lint.File + onFailure func(lint.Failure) +} + +func (w *lintTimeNames) Visit(node ast.Node) ast.Visitor { + v, ok := node.(*ast.ValueSpec) + if !ok { + return w + } + for _, name := range v.Names { + origTyp := w.file.Pkg.TypeOf(name) + // Look for time.Duration or *time.Duration; + // the latter is common when using flag.Duration. + typ := origTyp + if pt, ok := typ.(*types.Pointer); ok { + typ = pt.Elem() + } + if !isNamedType(typ, "time", "Duration") { + continue + } + suffix := "" + for _, suf := range timeSuffixes { + if strings.HasSuffix(name.Name, suf) { + suffix = suf + break + } + } + if suffix == "" { + continue + } + w.onFailure(lint.Failure{ + Category: "time", + Confidence: 0.9, + Node: v, + Failure: fmt.Sprintf("var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix), + }) + } + return w +} + +// timeSuffixes is a list of name suffixes that imply a time unit. +// This is not an exhaustive list. +var timeSuffixes = []string{ + "Sec", "Secs", "Seconds", + "Msec", "Msecs", + "Milli", "Millis", "Milliseconds", + "Usec", "Usecs", "Microseconds", + "MS", "Ms", +} + +func isNamedType(typ types.Type, importPath, name string) bool { + n, ok := typ.(*types.Named) + if !ok { + return false + } + tn := n.Obj() + return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name +} diff --git a/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go b/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go new file mode 100644 index 000000000..d4da01574 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go @@ -0,0 +1,183 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// UnconditionalRecursionRule lints given else constructs. +type UnconditionalRecursionRule struct{} + +// Apply applies the rule to given file. +func (r *UnconditionalRecursionRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintUnconditionalRecursionRule{onFailure: onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *UnconditionalRecursionRule) Name() string { + return "unconditional-recursion" +} + +type funcDesc struct { + reciverID *ast.Ident + id *ast.Ident +} + +func (fd *funcDesc) equal(other *funcDesc) bool { + receiversAreEqual := (fd.reciverID == nil && other.reciverID == nil) || fd.reciverID != nil && other.reciverID != nil && fd.reciverID.Name == other.reciverID.Name + idsAreEqual := (fd.id == nil && other.id == nil) || fd.id.Name == other.id.Name + + return receiversAreEqual && idsAreEqual +} + +type funcStatus struct { + funcDesc *funcDesc + seenConditionalExit bool +} + +type lintUnconditionalRecursionRule struct { + onFailure func(lint.Failure) + currentFunc *funcStatus +} + +// Visit will traverse the file AST. +// The rule is based in the following algorithm: inside each function body we search for calls to the function itself. +// We do not search inside conditional control structures (if, for, switch, ...) because any recursive call inside them is conditioned +// We do search inside conditional control structures are statements that will take the control out of the function (return, exit, panic) +// If we find conditional control exits, it means the function is NOT unconditionally-recursive +// If we find a recursive call before finding any conditional exit, a failure is generated +// In resume: if we found a recursive call control-dependant from the entry point of the function then we raise a failure. +func (w lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl: + var rec *ast.Ident + switch { + case n.Recv == nil || n.Recv.NumFields() < 1 || len(n.Recv.List[0].Names) < 1: + rec = nil + default: + rec = n.Recv.List[0].Names[0] + } + + w.currentFunc = &funcStatus{&funcDesc{rec, n.Name}, false} + case *ast.CallExpr: + var funcID *ast.Ident + var selector *ast.Ident + switch c := n.Fun.(type) { + case *ast.Ident: + selector = nil + funcID = c + case *ast.SelectorExpr: + var ok bool + selector, ok = c.X.(*ast.Ident) + if !ok { // a.b....Foo() + return nil + } + funcID = c.Sel + default: + return w + } + + if w.currentFunc != nil && // not in a func body + !w.currentFunc.seenConditionalExit && // there is a conditional exit in the function + w.currentFunc.funcDesc.equal(&funcDesc{selector, funcID}) { + w.onFailure(lint.Failure{ + Category: "logic", + Confidence: 1, + Node: n, + Failure: "unconditional recursive call", + }) + } + case *ast.IfStmt: + w.updateFuncStatus(n.Body) + w.updateFuncStatus(n.Else) + return nil + case *ast.SelectStmt: + w.updateFuncStatus(n.Body) + return nil + case *ast.RangeStmt: + w.updateFuncStatus(n.Body) + return nil + case *ast.TypeSwitchStmt: + w.updateFuncStatus(n.Body) + return nil + case *ast.SwitchStmt: + w.updateFuncStatus(n.Body) + return nil + case *ast.GoStmt: + for _, a := range n.Call.Args { + ast.Walk(w, a) // check if arguments have a recursive call + } + return nil // recursive async call is not an issue + case *ast.ForStmt: + if n.Cond != nil { + return nil + } + // unconditional loop + return w + } + + return w +} + +func (w *lintUnconditionalRecursionRule) updateFuncStatus(node ast.Node) { + if node == nil || w.currentFunc == nil || w.currentFunc.seenConditionalExit { + return + } + + w.currentFunc.seenConditionalExit = w.hasControlExit(node) +} + +var exitFunctions = map[string]map[string]bool{ + "os": {"Exit": true}, + "syscall": {"Exit": true}, + "log": { + "Fatal": true, + "Fatalf": true, + "Fatalln": true, + "Panic": true, + "Panicf": true, + "Panicln": true, + }, +} + +func (w *lintUnconditionalRecursionRule) hasControlExit(node ast.Node) bool { + // isExit returns true if the given node makes control exit the function + isExit := func(node ast.Node) bool { + switch n := node.(type) { + case *ast.ReturnStmt: + return true + case *ast.CallExpr: + if isIdent(n.Fun, "panic") { + return true + } + se, ok := n.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + + id, ok := se.X.(*ast.Ident) + if !ok { + return false + } + + fn := se.Sel.Name + pkg := id.Name + if exitFunctions[pkg] != nil && exitFunctions[pkg][fn] { // it's a call to an exit function + return true + } + } + + return false + } + + return len(pick(node, isExit, nil)) != 0 +} diff --git a/vendor/github.com/mgechev/revive/rule/unexported-naming.go b/vendor/github.com/mgechev/revive/rule/unexported-naming.go new file mode 100644 index 000000000..96cec3e46 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/unexported-naming.go @@ -0,0 +1,115 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// UnexportedNamingRule lints wrongly named unexported symbols. +type UnexportedNamingRule struct{} + +// Apply applies the rule to given file. +func (r *UnexportedNamingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + ba := &unexportablenamingLinter{onFailure} + ast.Walk(ba, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *UnexportedNamingRule) Name() string { + return "unexported-naming" +} + +type unexportablenamingLinter struct { + onFailure func(lint.Failure) +} + +func (unl unexportablenamingLinter) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl: + unl.lintFunction(n.Type, n.Body) + return nil + case *ast.FuncLit: + unl.lintFunction(n.Type, n.Body) + + return nil + case *ast.AssignStmt: + if n.Tok != token.DEFINE { + return nil + } + + ids := []*ast.Ident{} + for _, e := range n.Lhs { + id, ok := e.(*ast.Ident) + if !ok { + continue + } + ids = append(ids, id) + } + + unl.lintIDs(ids) + + case *ast.DeclStmt: + gd, ok := n.Decl.(*ast.GenDecl) + if !ok { + return nil + } + + if len(gd.Specs) < 1 { + return nil + } + + vs, ok := gd.Specs[0].(*ast.ValueSpec) + if !ok { + return nil + } + + unl.lintIDs(vs.Names) + } + + return unl +} + +func (unl unexportablenamingLinter) lintFunction(ft *ast.FuncType, body *ast.BlockStmt) { + unl.lintFields(ft.Params) + unl.lintFields(ft.Results) + + if body != nil { + ast.Walk(unl, body) + } +} + +func (unl unexportablenamingLinter) lintFields(fields *ast.FieldList) { + if fields == nil { + return + } + + ids := []*ast.Ident{} + for _, field := range fields.List { + ids = append(ids, field.Names...) + } + + unl.lintIDs(ids) +} + +func (unl unexportablenamingLinter) lintIDs(ids []*ast.Ident) { + for _, id := range ids { + if id.IsExported() { + unl.onFailure(lint.Failure{ + Node: id, + Confidence: 1, + Category: "naming", + Failure: fmt.Sprintf("the symbol %s is local, its name should start with a lowercase letter", id.String()), + }) + } + } +} diff --git a/vendor/github.com/mgechev/revive/rule/unexported-return.go b/vendor/github.com/mgechev/revive/rule/unexported-return.go new file mode 100644 index 000000000..c9c8a41d3 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/unexported-return.go @@ -0,0 +1,106 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/types" + + "github.com/mgechev/revive/lint" +) + +// UnexportedReturnRule lints given else constructs. +type UnexportedReturnRule struct{} + +// Apply applies the rule to given file. +func (r *UnexportedReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := lintUnexportedReturn{ + file: file, + fileAst: fileAst, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + file.Pkg.TypeCheck() + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *UnexportedReturnRule) Name() string { + return "unexported-return" +} + +type lintUnexportedReturn struct { + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) +} + +func (w lintUnexportedReturn) Visit(n ast.Node) ast.Visitor { + fn, ok := n.(*ast.FuncDecl) + if !ok { + return w + } + if fn.Type.Results == nil { + return nil + } + if !fn.Name.IsExported() { + return nil + } + thing := "func" + if fn.Recv != nil && len(fn.Recv.List) > 0 { + thing = "method" + if !ast.IsExported(receiverType(fn)) { + // Don't report exported methods of unexported types, + // such as private implementations of sort.Interface. + return nil + } + } + for _, ret := range fn.Type.Results.List { + typ := w.file.Pkg.TypeOf(ret.Type) + if exportedType(typ) { + continue + } + w.onFailure(lint.Failure{ + Category: "unexported-type-in-api", + Node: ret.Type, + Confidence: 0.8, + Failure: fmt.Sprintf("exported %s %s returns unexported type %s, which can be annoying to use", + thing, fn.Name.Name, typ), + }) + break // only flag one + } + return nil +} + +// exportedType reports whether typ is an exported type. +// It is imprecise, and will err on the side of returning true, +// such as for composite types. +func exportedType(typ types.Type) bool { + switch T := typ.(type) { + case *types.Named: + obj := T.Obj() + switch { + // Builtin types have no package. + case obj.Pkg() == nil: + case obj.Exported(): + default: + _, ok := T.Underlying().(*types.Interface) + return ok + } + return true + case *types.Map: + return exportedType(T.Key()) && exportedType(T.Elem()) + case interface { + Elem() types.Type + }: // array, slice, pointer, chan + return exportedType(T.Elem()) + } + // Be conservative about other types, such as struct, interface, etc. + return true +} diff --git a/vendor/github.com/mgechev/revive/rule/unhandled-error.go b/vendor/github.com/mgechev/revive/rule/unhandled-error.go new file mode 100644 index 000000000..0e2f62875 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/unhandled-error.go @@ -0,0 +1,120 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/types" + + "github.com/mgechev/revive/lint" +) + +// UnhandledErrorRule lints given else constructs. +type UnhandledErrorRule struct{} + +type ignoreListType map[string]struct{} + +// Apply applies the rule to given file. +func (r *UnhandledErrorRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + var failures []lint.Failure + + ignoreList := make(ignoreListType, len(args)) + + for _, arg := range args { + argStr, ok := arg.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument to the unhandled-error rule. Expecting a string, got %T", arg)) + } + + ignoreList[argStr] = struct{}{} + } + + walker := &lintUnhandledErrors{ + ignoreList: ignoreList, + pkg: file.Pkg, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + file.Pkg.TypeCheck() + ast.Walk(walker, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *UnhandledErrorRule) Name() string { + return "unhandled-error" +} + +type lintUnhandledErrors struct { + ignoreList ignoreListType + pkg *lint.Package + onFailure func(lint.Failure) +} + +// Visit looks for statements that are function calls. +// If the called function returns a value of type error a failure will be created. +func (w *lintUnhandledErrors) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.ExprStmt: + fCall, ok := n.X.(*ast.CallExpr) + if !ok { + return nil // not a function call + } + + funcType := w.pkg.TypeOf(fCall) + if funcType == nil { + return nil // skip, type info not available + } + + switch t := funcType.(type) { + case *types.Named: + if !w.isTypeError(t) { + return nil // func call does not return an error + } + + w.addFailure(fCall) + default: + retTypes, ok := funcType.Underlying().(*types.Tuple) + if !ok { + return nil // skip, unable to retrieve return type of the called function + } + + if w.returnsAnError(retTypes) { + w.addFailure(fCall) + } + } + } + return w +} + +func (w *lintUnhandledErrors) addFailure(n *ast.CallExpr) { + funcName := gofmt(n.Fun) + if _, mustIgnore := w.ignoreList[funcName]; mustIgnore { + return + } + + w.onFailure(lint.Failure{ + Category: "bad practice", + Confidence: 1, + Node: n, + Failure: fmt.Sprintf("Unhandled error in call to function %v", funcName), + }) +} + +func (*lintUnhandledErrors) isTypeError(t *types.Named) bool { + const errorTypeName = "_.error" + + return t.Obj().Id() == errorTypeName +} + +func (w *lintUnhandledErrors) returnsAnError(tt *types.Tuple) bool { + for i := 0; i < tt.Len(); i++ { + nt, ok := tt.At(i).Type().(*types.Named) + if ok && w.isTypeError(nt) { + return true + } + } + return false +} diff --git a/vendor/github.com/mgechev/revive/rule/unnecessary-stmt.go b/vendor/github.com/mgechev/revive/rule/unnecessary-stmt.go new file mode 100644 index 000000000..732d8a8bb --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/unnecessary-stmt.go @@ -0,0 +1,107 @@ +package rule + +import ( + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// UnnecessaryStmtRule warns on unnecessary statements. +type UnnecessaryStmtRule struct{} + +// Apply applies the rule to given file. +func (r *UnnecessaryStmtRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintUnnecessaryStmtRule{onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *UnnecessaryStmtRule) Name() string { + return "unnecessary-stmt" +} + +type lintUnnecessaryStmtRule struct { + onFailure func(lint.Failure) +} + +func (w lintUnnecessaryStmtRule) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl: + if n.Body == nil || n.Type.Results != nil { + return w + } + stmts := n.Body.List + if len(stmts) == 0 { + return w + } + + lastStmt := stmts[len(stmts)-1] + rs, ok := lastStmt.(*ast.ReturnStmt) + if !ok { + return w + } + + if len(rs.Results) == 0 { + w.newFailure(lastStmt, "omit unnecessary return statement") + } + + case *ast.SwitchStmt: + w.checkSwitchBody(n.Body) + case *ast.TypeSwitchStmt: + w.checkSwitchBody(n.Body) + case *ast.CaseClause: + if n.Body == nil { + return w + } + stmts := n.Body + if len(stmts) == 0 { + return w + } + + lastStmt := stmts[len(stmts)-1] + rs, ok := lastStmt.(*ast.BranchStmt) + if !ok { + return w + } + + if rs.Tok == token.BREAK && rs.Label == nil { + w.newFailure(lastStmt, "omit unnecessary break at the end of case clause") + } + } + + return w +} + +func (w lintUnnecessaryStmtRule) checkSwitchBody(b *ast.BlockStmt) { + cases := b.List + if len(cases) != 1 { + return + } + + cc, ok := cases[0].(*ast.CaseClause) + if !ok { + return + } + + if len(cc.List) > 1 { // skip cases with multiple expressions + return + } + + w.newFailure(b, "switch with only one case can be replaced by an if-then") +} + +func (w lintUnnecessaryStmtRule) newFailure(node ast.Node, msg string) { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: node, + Category: "style", + Failure: msg, + }) +} diff --git a/vendor/github.com/mgechev/revive/rule/unreachable-code.go b/vendor/github.com/mgechev/revive/rule/unreachable-code.go new file mode 100644 index 000000000..5472feaa9 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/unreachable-code.go @@ -0,0 +1,114 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// UnreachableCodeRule lints unreachable code. +type UnreachableCodeRule struct{} + +// Apply applies the rule to given file. +func (r *UnreachableCodeRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + var branchingFunctions = map[string]map[string]bool{ + "os": {"Exit": true}, + "log": { + "Fatal": true, + "Fatalf": true, + "Fatalln": true, + "Panic": true, + "Panicf": true, + "Panicln": true, + }, + } + + w := lintUnreachableCode{onFailure, branchingFunctions} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *UnreachableCodeRule) Name() string { + return "unreachable-code" +} + +type lintUnreachableCode struct { + onFailure func(lint.Failure) + branchingFunctions map[string]map[string]bool +} + +func (w lintUnreachableCode) Visit(node ast.Node) ast.Visitor { + blk, ok := node.(*ast.BlockStmt) + if !ok { + return w + } + + if len(blk.List) < 2 { + return w + } +loop: + for i, stmt := range blk.List[:len(blk.List)-1] { + // println("iterating ", len(blk.List)) + next := blk.List[i+1] + if _, ok := next.(*ast.LabeledStmt); ok { + continue // skip if next statement is labeled + } + + switch s := stmt.(type) { + case *ast.ReturnStmt: + w.onFailure(newUnreachableCodeFailure(s)) + break loop + case *ast.BranchStmt: + token := s.Tok.String() + if token != "fallthrough" { + w.onFailure(newUnreachableCodeFailure(s)) + break loop + } + case *ast.ExprStmt: + ce, ok := s.X.(*ast.CallExpr) + if !ok { + continue + } + // it's a function call + fc, ok := ce.Fun.(*ast.SelectorExpr) + if !ok { + continue + } + + id, ok := fc.X.(*ast.Ident) + + if !ok { + continue + } + fn := fc.Sel.Name + pkg := id.Name + if !w.branchingFunctions[pkg][fn] { // it isn't a call to a branching function + continue + } + + if _, ok := next.(*ast.ReturnStmt); ok { // return statement needed to satisfy function signature + continue + } + + w.onFailure(newUnreachableCodeFailure(s)) + break loop + } + } + + return w +} + +func newUnreachableCodeFailure(node ast.Node) lint.Failure { + return lint.Failure{ + Confidence: 1, + Node: node, + Category: "logic", + Failure: "unreachable code after this statement", + } +} diff --git a/vendor/github.com/mgechev/revive/rule/unused-param.go b/vendor/github.com/mgechev/revive/rule/unused-param.go new file mode 100644 index 000000000..60df908d3 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/unused-param.go @@ -0,0 +1,102 @@ +package rule + +import ( + "fmt" + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// UnusedParamRule lints unused params in functions. +type UnusedParamRule struct{} + +// Apply applies the rule to given file. +func (r *UnusedParamRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintUnusedParamRule{onFailure: onFailure} + + ast.Walk(w, file.AST) + + return failures +} + +// Name returns the rule name. +func (r *UnusedParamRule) Name() string { + return "unused-parameter" +} + +type lintUnusedParamRule struct { + onFailure func(lint.Failure) +} + +func (w lintUnusedParamRule) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl: + params := retrieveNamedParams(n.Type.Params) + if len(params) < 1 { + return nil // skip, func without parameters + } + + if n.Body == nil { + return nil // skip, is a function prototype + } + + // inspect the func body looking for references to parameters + fselect := func(n ast.Node) bool { + ident, isAnID := n.(*ast.Ident) + + if !isAnID { + return false + } + + _, isAParam := params[ident.Obj] + if isAParam { + params[ident.Obj] = false // mark as used + } + + return false + } + _ = pick(n.Body, fselect, nil) + + for _, p := range n.Type.Params.List { + for _, n := range p.Names { + if params[n.Obj] { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: n, + Category: "bad practice", + Failure: fmt.Sprintf("parameter '%s' seems to be unused, consider removing or renaming it as _", n.Name), + }) + } + } + } + + return nil // full method body already inspected + } + + return w +} + +func retrieveNamedParams(params *ast.FieldList) map[*ast.Object]bool { + result := map[*ast.Object]bool{} + if params.List == nil { + return result + } + + for _, p := range params.List { + for _, n := range p.Names { + if n.Name == "_" { + continue + } + + result[n.Obj] = true + } + } + + return result +} diff --git a/vendor/github.com/mgechev/revive/rule/unused-receiver.go b/vendor/github.com/mgechev/revive/rule/unused-receiver.go new file mode 100644 index 000000000..2289a517e --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/unused-receiver.go @@ -0,0 +1,77 @@ +package rule + +import ( + "fmt" + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// UnusedReceiverRule lints unused params in functions. +type UnusedReceiverRule struct{} + +// Apply applies the rule to given file. +func (*UnusedReceiverRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintUnusedReceiverRule{onFailure: onFailure} + + ast.Walk(w, file.AST) + + return failures +} + +// Name returns the rule name. +func (*UnusedReceiverRule) Name() string { + return "unused-receiver" +} + +type lintUnusedReceiverRule struct { + onFailure func(lint.Failure) +} + +func (w lintUnusedReceiverRule) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl: + if n.Recv == nil { + return nil // skip this func decl, not a method + } + + rec := n.Recv.List[0] // safe to access only the first (unique) element of the list + if len(rec.Names) < 1 { + return nil // the receiver is anonymous: func (aType) Foo(...) ... + } + + recID := rec.Names[0] + if recID.Name == "_" { + return nil // the receiver is already named _ + } + + // inspect the func body looking for references to the receiver id + fselect := func(n ast.Node) bool { + ident, isAnID := n.(*ast.Ident) + + return isAnID && ident.Obj == recID.Obj + } + refs2recID := pick(n.Body, fselect, nil) + + if len(refs2recID) > 0 { + return nil // the receiver is referenced in the func body + } + + w.onFailure(lint.Failure{ + Confidence: 1, + Node: recID, + Category: "bad practice", + Failure: fmt.Sprintf("method receiver '%s' is not referenced in method's body, consider removing or renaming it as _", recID.Name), + }) + + return nil // full method body already inspected + } + + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/useless-break.go b/vendor/github.com/mgechev/revive/rule/useless-break.go new file mode 100644 index 000000000..9e9c829c7 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/useless-break.go @@ -0,0 +1,77 @@ +package rule + +import ( + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// UselessBreak lint rule. +type UselessBreak struct{} + +// Apply applies the rule to given file. +func (r *UselessBreak) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + astFile := file.AST + w := &lintUselessBreak{onFailure, false} + ast.Walk(w, astFile) + return failures +} + +// Name returns the rule name. +func (r *UselessBreak) Name() string { + return "useless-break" +} + +type lintUselessBreak struct { + onFailure func(lint.Failure) + inLoopBody bool +} + +func (w *lintUselessBreak) Visit(node ast.Node) ast.Visitor { + switch v := node.(type) { + case *ast.ForStmt: + w.inLoopBody = true + ast.Walk(w, v.Body) + w.inLoopBody = false + return nil + case *ast.CommClause: + for _, n := range v.Body { + w.inspectCaseStatement(n) + } + return nil + case *ast.CaseClause: + for _, n := range v.Body { + w.inspectCaseStatement(n) + } + return nil + } + return w +} + +func (w *lintUselessBreak) inspectCaseStatement(n ast.Stmt) { + switch s := n.(type) { + case *ast.BranchStmt: + if s.Tok != token.BREAK { + return // not a break statement + } + if s.Label != nil { + return // labeled break statement, usually affects a nesting loop + } + msg := "useless break in case clause" + if w.inLoopBody { + msg += " (WARN: this break statement affects this switch or select statement and not the loop enclosing it)" + } + w.onFailure(lint.Failure{ + Confidence: 1, + Node: s, + Failure: msg, + }) + } +} diff --git a/vendor/github.com/mgechev/revive/rule/utils.go b/vendor/github.com/mgechev/revive/rule/utils.go new file mode 100644 index 000000000..d2b764f9f --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/utils.go @@ -0,0 +1,192 @@ +package rule + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "go/types" + "regexp" + "strings" + + "github.com/mgechev/revive/lint" +) + +const styleGuideBase = "https://golang.org/wiki/CodeReviewComments" + +// isBlank returns whether id is the blank identifier "_". +// If id == nil, the answer is false. +func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } + +func isTest(f *lint.File) bool { + return strings.HasSuffix(f.Name, "_test.go") +} + +var commonMethods = map[string]bool{ + "Error": true, + "Read": true, + "ServeHTTP": true, + "String": true, + "Write": true, + "Unwrap": true, +} + +func receiverType(fn *ast.FuncDecl) string { + switch e := fn.Recv.List[0].Type.(type) { + case *ast.Ident: + return e.Name + case *ast.StarExpr: + if id, ok := e.X.(*ast.Ident); ok { + return id.Name + } + } + // The parser accepts much more than just the legal forms. + return "invalid-type" +} + +var knownNameExceptions = map[string]bool{ + "LastInsertId": true, // must match database/sql + "kWh": true, +} + +func isCgoExported(f *ast.FuncDecl) bool { + if f.Recv != nil || f.Doc == nil { + return false + } + + cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name))) + for _, c := range f.Doc.List { + if cgoExport.MatchString(c.Text) { + return true + } + } + return false +} + +var allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) + +func isIdent(expr ast.Expr, ident string) bool { + id, ok := expr.(*ast.Ident) + return ok && id.Name == ident +} + +var zeroLiteral = map[string]bool{ + "false": true, // bool + // runes + `'\x00'`: true, + `'\000'`: true, + // strings + `""`: true, + "``": true, + // numerics + "0": true, + "0.": true, + "0.0": true, + "0i": true, +} + +func validType(T types.Type) bool { + return T != nil && + T != types.Typ[types.Invalid] && + !strings.Contains(T.String(), "invalid type") // good but not foolproof +} + +func isPkgDot(expr ast.Expr, pkg, name string) bool { + sel, ok := expr.(*ast.SelectorExpr) + return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name) +} + +func srcLine(src []byte, p token.Position) string { + // Run to end of line in both directions if not at line start/end. + lo, hi := p.Offset, p.Offset+1 + for lo > 0 && src[lo-1] != '\n' { + lo-- + } + for hi < len(src) && src[hi-1] != '\n' { + hi++ + } + return string(src[lo:hi]) +} + +// pick yields a list of nodes by picking them from a sub-ast with root node n. +// Nodes are selected by applying the fselect function +// f function is applied to each selected node before inseting it in the final result. +// If f==nil then it defaults to the identity function (ie it returns the node itself) +func pick(n ast.Node, fselect func(n ast.Node) bool, f func(n ast.Node) []ast.Node) []ast.Node { + var result []ast.Node + + if n == nil { + return result + } + + if f == nil { + f = func(n ast.Node) []ast.Node { return []ast.Node{n} } + } + + onSelect := func(n ast.Node) { + result = append(result, f(n)...) + } + p := picker{fselect: fselect, onSelect: onSelect} + ast.Walk(p, n) + return result +} + +func pickFromExpList(l []ast.Expr, fselect func(n ast.Node) bool, f func(n ast.Node) []ast.Node) []ast.Node { + result := make([]ast.Node, 0) + for _, e := range l { + result = append(result, pick(e, fselect, f)...) + } + return result +} + +type picker struct { + fselect func(n ast.Node) bool + onSelect func(n ast.Node) +} + +func (p picker) Visit(node ast.Node) ast.Visitor { + if p.fselect == nil { + return nil + } + + if p.fselect(node) { + p.onSelect(node) + } + + return p +} + +// isBoolOp returns true if the given token corresponds to +// a bool operator +func isBoolOp(t token.Token) bool { + switch t { + case token.LAND, token.LOR, token.EQL, token.NEQ: + return true + } + + return false +} + +const ( + trueName = "true" + falseName = "false" +) + +func isExprABooleanLit(n ast.Node) (lexeme string, ok bool) { + oper, ok := n.(*ast.Ident) + + if !ok { + return "", false + } + + return oper.Name, (oper.Name == trueName || oper.Name == falseName) +} + +// gofmt returns a string representation of an AST subtree. +func gofmt(x interface{}) string { + buf := bytes.Buffer{} + fs := token.NewFileSet() + printer.Fprint(&buf, fs, x) + return buf.String() +} diff --git a/vendor/github.com/mgechev/revive/rule/var-declarations.go b/vendor/github.com/mgechev/revive/rule/var-declarations.go new file mode 100644 index 000000000..441132115 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/var-declarations.go @@ -0,0 +1,120 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "github.com/mgechev/revive/lint" +) + +// VarDeclarationsRule lints given else constructs. +type VarDeclarationsRule struct{} + +// Apply applies the rule to given file. +func (r *VarDeclarationsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + fileAst := file.AST + walker := &lintVarDeclarations{ + file: file, + fileAst: fileAst, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + file.Pkg.TypeCheck() + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *VarDeclarationsRule) Name() string { + return "var-declaration" +} + +type lintVarDeclarations struct { + fileAst *ast.File + file *lint.File + lastGen *ast.GenDecl + onFailure func(lint.Failure) +} + +func (w *lintVarDeclarations) Visit(node ast.Node) ast.Visitor { + switch v := node.(type) { + case *ast.GenDecl: + if v.Tok != token.CONST && v.Tok != token.VAR { + return nil + } + w.lastGen = v + return w + case *ast.ValueSpec: + if w.lastGen.Tok == token.CONST { + return nil + } + if len(v.Names) > 1 || v.Type == nil || len(v.Values) == 0 { + return nil + } + rhs := v.Values[0] + // An underscore var appears in a common idiom for compile-time interface satisfaction, + // as in "var _ Interface = (*Concrete)(nil)". + if isIdent(v.Names[0], "_") { + return nil + } + // If the RHS is a zero value, suggest dropping it. + zero := false + if lit, ok := rhs.(*ast.BasicLit); ok { + zero = zeroLiteral[lit.Value] + } else if isIdent(rhs, "nil") { + zero = true + } + if zero { + w.onFailure(lint.Failure{ + Confidence: 0.9, + Node: rhs, + Category: "zero-value", + Failure: fmt.Sprintf("should drop = %s from declaration of var %s; it is the zero value", w.file.Render(rhs), v.Names[0]), + }) + return nil + } + lhsTyp := w.file.Pkg.TypeOf(v.Type) + rhsTyp := w.file.Pkg.TypeOf(rhs) + + if !validType(lhsTyp) || !validType(rhsTyp) { + // Type checking failed (often due to missing imports). + return nil + } + + if !types.Identical(lhsTyp, rhsTyp) { + // Assignment to a different type is not redundant. + return nil + } + + // The next three conditions are for suppressing the warning in situations + // where we were unable to typecheck. + + // If the LHS type is an interface, don't warn, since it is probably a + // concrete type on the RHS. Note that our feeble lexical check here + // will only pick up interface{} and other literal interface types; + // that covers most of the cases we care to exclude right now. + if _, ok := v.Type.(*ast.InterfaceType); ok { + return nil + } + // If the RHS is an untyped const, only warn if the LHS type is its default type. + if defType, ok := w.file.IsUntypedConst(rhs); ok && !isIdent(v.Type, defType) { + return nil + } + + w.onFailure(lint.Failure{ + Category: "type-inference", + Confidence: 0.8, + Node: v.Type, + Failure: fmt.Sprintf("should omit type %s from declaration of var %s; it will be inferred from the right-hand side", w.file.Render(v.Type), v.Names[0]), + }) + return nil + } + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/var-naming.go b/vendor/github.com/mgechev/revive/rule/var-naming.go new file mode 100644 index 000000000..768f65b96 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/var-naming.go @@ -0,0 +1,230 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + + "github.com/mgechev/revive/lint" +) + +// VarNamingRule lints given else constructs. +type VarNamingRule struct{} + +// Apply applies the rule to given file. +func (r *VarNamingRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + var failures []lint.Failure + + var whitelist []string + var blacklist []string + + if len(arguments) >= 1 { + whitelist = getList(arguments[0], "whitelist") + } + + if len(arguments) >= 2 { + blacklist = getList(arguments[1], "blacklist") + } + + fileAst := file.AST + walker := lintNames{ + file: file, + fileAst: fileAst, + whitelist: whitelist, + blacklist: blacklist, + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + // Package names need slightly different handling than other names. + if strings.Contains(walker.fileAst.Name.Name, "_") && !strings.HasSuffix(walker.fileAst.Name.Name, "_test") { + walker.onFailure(lint.Failure{ + Failure: "don't use an underscore in package name", + Confidence: 1, + Node: walker.fileAst, + Category: "naming", + }) + } + + ast.Walk(&walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (r *VarNamingRule) Name() string { + return "var-naming" +} + +func checkList(fl *ast.FieldList, thing string, w *lintNames) { + if fl == nil { + return + } + for _, f := range fl.List { + for _, id := range f.Names { + check(id, thing, w) + } + } +} + +func check(id *ast.Ident, thing string, w *lintNames) { + if id.Name == "_" { + return + } + if knownNameExceptions[id.Name] { + return + } + + // Handle two common styles from other languages that don't belong in Go. + if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { + w.onFailure(lint.Failure{ + Failure: "don't use ALL_CAPS in Go names; use CamelCase", + Confidence: 0.8, + Node: id, + Category: "naming", + }) + return + } + if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { + should := string(id.Name[1]+'a'-'A') + id.Name[2:] + w.onFailure(lint.Failure{ + Failure: fmt.Sprintf("don't use leading k in Go names; %s %s should be %s", thing, id.Name, should), + Confidence: 0.8, + Node: id, + Category: "naming", + }) + } + + should := lint.Name(id.Name, w.whitelist, w.blacklist) + if id.Name == should { + return + } + + if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") { + w.onFailure(lint.Failure{ + Failure: fmt.Sprintf("don't use underscores in Go names; %s %s should be %s", thing, id.Name, should), + Confidence: 0.9, + Node: id, + Category: "naming", + }) + return + } + w.onFailure(lint.Failure{ + Failure: fmt.Sprintf("%s %s should be %s", thing, id.Name, should), + Confidence: 0.8, + Node: id, + Category: "naming", + }) +} + +type lintNames struct { + file *lint.File + fileAst *ast.File + lastGen *ast.GenDecl + genDeclMissingComments map[*ast.GenDecl]bool + onFailure func(lint.Failure) + whitelist []string + blacklist []string +} + +func (w *lintNames) Visit(n ast.Node) ast.Visitor { + switch v := n.(type) { + case *ast.AssignStmt: + if v.Tok == token.ASSIGN { + return w + } + for _, exp := range v.Lhs { + if id, ok := exp.(*ast.Ident); ok { + check(id, "var", w) + } + } + case *ast.FuncDecl: + if w.file.IsTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { + return w + } + + thing := "func" + if v.Recv != nil { + thing = "method" + } + + // Exclude naming warnings for functions that are exported to C but + // not exported in the Go API. + // See https://github.com/golang/lint/issues/144. + if ast.IsExported(v.Name.Name) || !isCgoExported(v) { + check(v.Name, thing, w) + } + + checkList(v.Type.Params, thing+" parameter", w) + checkList(v.Type.Results, thing+" result", w) + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return w + } + var thing string + switch v.Tok { + case token.CONST: + thing = "const" + case token.TYPE: + thing = "type" + case token.VAR: + thing = "var" + } + for _, spec := range v.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + check(s.Name, thing, w) + case *ast.ValueSpec: + for _, id := range s.Names { + check(id, thing, w) + } + } + } + case *ast.InterfaceType: + // Do not check interface method names. + // They are often constrainted by the method names of concrete types. + for _, x := range v.Methods.List { + ft, ok := x.Type.(*ast.FuncType) + if !ok { // might be an embedded interface name + continue + } + checkList(ft.Params, "interface method parameter", w) + checkList(ft.Results, "interface method result", w) + } + case *ast.RangeStmt: + if v.Tok == token.ASSIGN { + return w + } + if id, ok := v.Key.(*ast.Ident); ok { + check(id, "range var", w) + } + if id, ok := v.Value.(*ast.Ident); ok { + check(id, "range var", w) + } + case *ast.StructType: + for _, f := range v.Fields.List { + for _, id := range f.Names { + check(id, "struct field", w) + } + } + } + return w +} + +func getList(arg interface{}, argName string) []string { + temp, ok := arg.([]interface{}) + if !ok { + panic(fmt.Sprintf("Invalid argument to the var-naming rule. Expecting a %s of type slice with initialisms, got %T", argName, arg)) + } + var list []string + for _, v := range temp { + if val, ok := v.(string); ok { + list = append(list, val) + } else { + panic(fmt.Sprintf("Invalid %s values of the var-naming rule. Expecting slice of strings but got element of type %T", val, arg)) + } + } + return list +} diff --git a/vendor/github.com/mgechev/revive/rule/waitgroup-by-value.go b/vendor/github.com/mgechev/revive/rule/waitgroup-by-value.go new file mode 100644 index 000000000..b86929136 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/waitgroup-by-value.go @@ -0,0 +1,66 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// WaitGroupByValueRule lints sync.WaitGroup passed by copy in functions. +type WaitGroupByValueRule struct{} + +// Apply applies the rule to given file. +func (r *WaitGroupByValueRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := lintWaitGroupByValueRule{onFailure: onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (r *WaitGroupByValueRule) Name() string { + return "waitgroup-by-value" +} + +type lintWaitGroupByValueRule struct { + onFailure func(lint.Failure) +} + +func (w lintWaitGroupByValueRule) Visit(node ast.Node) ast.Visitor { + // look for function declarations + fd, ok := node.(*ast.FuncDecl) + if !ok { + return w + } + + // Check all function's parameters + for _, field := range fd.Type.Params.List { + if !w.isWaitGroup(field.Type) { + continue + } + + w.onFailure(lint.Failure{ + Confidence: 1, + Node: field, + Failure: "sync.WaitGroup passed by value, the function will get a copy of the original one", + }) + } + + return nil +} + +func (lintWaitGroupByValueRule) isWaitGroup(ft ast.Expr) bool { + se, ok := ft.(*ast.SelectorExpr) + if !ok { + return false + } + + x, _ := se.X.(*ast.Ident) + sel := se.Sel.Name + return x.Name == "sync" && sel == "WaitGroup" +} diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md new file mode 100644 index 000000000..1955f2878 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -0,0 +1,73 @@ +## unreleased + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 000000000..0018dc7d9 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 000000000..92e6f76ff --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,256 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value) (interface{}, error) { + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + var data interface{} + newFrom := f + for _, f1 := range fs { + data, err = DecodeHookExec(f1, newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 000000000..47a99e5af --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod new file mode 100644 index 000000000..a03ae9730 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/go.mod @@ -0,0 +1,3 @@ +module github.com/mitchellh/mapstructure + +go 1.14 diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 000000000..3643901f5 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1462 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecodeHookFuncRaw is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + outputKind := getKind(outVal) + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + } + keyName = tagValue[:index] + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + if fieldVal.Kind() != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) + } else { + structs = append(structs, fieldVal) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/moricho/tparallel/.gitignore b/vendor/github.com/moricho/tparallel/.gitignore new file mode 100644 index 000000000..71342280e --- /dev/null +++ b/vendor/github.com/moricho/tparallel/.gitignore @@ -0,0 +1,3 @@ +/tparallel +.envrc +/dist diff --git a/vendor/github.com/moricho/tparallel/.goreleaser.yml b/vendor/github.com/moricho/tparallel/.goreleaser.yml new file mode 100644 index 000000000..e9f6d727e --- /dev/null +++ b/vendor/github.com/moricho/tparallel/.goreleaser.yml @@ -0,0 +1,38 @@ +project_name: tparallel +env: + - GO111MODULE=on +before: + hooks: + - go mod tidy +builds: + - main: ./cmd/tparallel + binary: tparallel + ldflags: + - -s -w + - -X main.Version={{.Version}} + - -X main.Revision={{.ShortCommit}} + env: + - CGO_ENABLED=0 +archives: + - name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' + replacements: + darwin: darwin + linux: linux + windows: windows + 386: i386 + amd64: x86_64 + format_overrides: + - goos: windows + format: zip +release: + prerelease: auto +brews: + - tap: + owner: moricho + name: homebrew-tparallel + homepage: https://github.com/moricho/tparallel + description: tparallel detects inappropriate usage of t.Parallel() method in your Go test codes + install: | + bin.install "tparallel" + test: | + system "#{bin}/goreleaser -v" diff --git a/vendor/github.com/moricho/tparallel/LICENSE b/vendor/github.com/moricho/tparallel/LICENSE new file mode 100644 index 000000000..4f029982f --- /dev/null +++ b/vendor/github.com/moricho/tparallel/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 moricho + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/moricho/tparallel/Makefile b/vendor/github.com/moricho/tparallel/Makefile new file mode 100644 index 000000000..fb3588069 --- /dev/null +++ b/vendor/github.com/moricho/tparallel/Makefile @@ -0,0 +1,13 @@ +all: build + +.PHONY: build +build: + go build -o tparallel ./cmd/tparallel + +.PHONY: build_race +build_race: + go build -race -o tparallel ./cmd/tparallel + +.PHONY: test +test: build_race + go test -v ./... diff --git a/vendor/github.com/moricho/tparallel/README.md b/vendor/github.com/moricho/tparallel/README.md new file mode 100644 index 000000000..cd358d155 --- /dev/null +++ b/vendor/github.com/moricho/tparallel/README.md @@ -0,0 +1,100 @@ +# tparallel +[![tparallel](https://github.com/moricho/tparallel/workflows/tparallel/badge.svg?branch=master)](https://github.com/moricho/tparallel/actions) +[![Go Report Card](https://goreportcard.com/badge/github.com/moricho/tparallel)](https://goreportcard.com/report/github.com/moricho/tparallel) +[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](LICENSE) + +`tparallel` finds inappropriate usage of `t.Parallel()` method in your Go test codes. +It detects the following: +- `t.Parallel()` is called in either a top-level test function or a sub-test function only +- Although `t.Parallel()` is called in the sub-test function, it is post-processed by `defer` instead of `t.Cleanup()` + +This tool was inspired by this blog: [Go言語でのテストの並列化 〜t.Parallel()メソッドを理解する〜](https://engineering.mercari.com/blog/entry/how_to_use_t_parallel/) + +## Installation + +### From GitHub Releases +Please see [GitHub Releases](https://github.com/moricho/tparallel/releases). +Available binaries are: +- macOS +- Linux +- Windows + +### macOS +``` sh +$ brew tap moricho/tparallel +$ brew install tparallel +``` + +### go get +```sh +$ go get -u github.com/moricho/tparallel/cmd/tparallel +``` + +## Usage + +```sh +$ go vet -vettool=`which tparallel` +``` + +## Example + +```go +package sample + +import ( + "testing" +) + +func Test_Table1(t *testing.T) { + teardown := setup("Test_Table1") + defer teardown() + + tests := []struct { + name string + }{ + { + name: "Table1_Sub1", + }, + { + name: "Table1_Sub2", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + call(tt.name) + }) + } +} + +func Test_Table2(t *testing.T) { + teardown := setup("Test_Table2") + t.Cleanup(teardown) + t.Parallel() + + tests := []struct { + name string + }{ + { + name: "Table2_Sub1", + }, + { + name: "Table2_Sub2", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + call(tt.name) + }) + } +} +``` + +```console +# github.com/moricho/tparallel/testdata/src/sample +testdata/src/sample/table_test.go:7:6: Test_Table1 should use t.Cleanup +testdata/src/sample/table_test.go:7:6: Test_Table1 should call t.Parallel on the top level as well as its subtests +testdata/src/sample/table_test.go:30:6: Test_Table2's subtests should call t.Parallel +``` diff --git a/vendor/github.com/moricho/tparallel/go.mod b/vendor/github.com/moricho/tparallel/go.mod new file mode 100644 index 000000000..9947ccb60 --- /dev/null +++ b/vendor/github.com/moricho/tparallel/go.mod @@ -0,0 +1,8 @@ +module github.com/moricho/tparallel + +go 1.15 + +require ( + github.com/gostaticanalysis/analysisutil v0.1.0 + golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65 +) diff --git a/vendor/github.com/moricho/tparallel/go.sum b/vendor/github.com/moricho/tparallel/go.sum new file mode 100644 index 000000000..bcc4158da --- /dev/null +++ b/vendor/github.com/moricho/tparallel/go.sum @@ -0,0 +1,34 @@ +github.com/gostaticanalysis/analysisutil v0.1.0 h1:E4c8Y1EQURbBEAHoXc/jBTK7Np14ArT8NPUiSFOl9yc= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/comment v1.3.0 h1:wTVgynbFu8/nz6SGgywA0TcyIoAVsYc7ai/Zp5xNGlw= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65 h1:DajXNh69ob79PCQz1N7OHxmqq6ASZC5xAnJJWIQGR6I= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/moricho/tparallel/pkg/ssafunc/ssafunc.go b/vendor/github.com/moricho/tparallel/pkg/ssafunc/ssafunc.go new file mode 100644 index 000000000..5a8e637bd --- /dev/null +++ b/vendor/github.com/moricho/tparallel/pkg/ssafunc/ssafunc.go @@ -0,0 +1,34 @@ +package ssafunc + +import ( + "go/types" + + "github.com/gostaticanalysis/analysisutil" + "github.com/moricho/tparallel/pkg/ssainstr" + "golang.org/x/tools/go/ssa" +) + +// IsDeferCalled returns whether the given ssa.Function calls `defer` +func IsDeferCalled(f *ssa.Function) bool { + for _, block := range f.Blocks { + for _, instr := range block.Instrs { + switch instr.(type) { + case *ssa.Defer: + return true + } + } + } + return false +} + +// IsCalled returns whether the given ssa.Function calls `fn` func +func IsCalled(f *ssa.Function, fn *types.Func) bool { + block := f.Blocks[0] + for _, instr := range block.Instrs { + called := analysisutil.Called(instr, nil, fn) + if _, ok := ssainstr.LookupCalled(instr, fn); ok || called { + return true + } + } + return false +} diff --git a/vendor/github.com/moricho/tparallel/pkg/ssainstr/ssainstr.go b/vendor/github.com/moricho/tparallel/pkg/ssainstr/ssainstr.go new file mode 100644 index 000000000..374553f5e --- /dev/null +++ b/vendor/github.com/moricho/tparallel/pkg/ssainstr/ssainstr.go @@ -0,0 +1,63 @@ +package ssainstr + +import ( + "go/types" + + "github.com/gostaticanalysis/analysisutil" + "golang.org/x/tools/go/ssa" +) + +// LookupCalled looks up ssa.Instruction that call the `fn` func in the given instr +func LookupCalled(instr ssa.Instruction, fn *types.Func) ([]ssa.Instruction, bool) { + instrs := []ssa.Instruction{} + + call, ok := instr.(ssa.CallInstruction) + if !ok { + return instrs, false + } + + ssaCall := call.Value() + if ssaCall == nil { + return instrs, false + } + common := ssaCall.Common() + if common == nil { + return instrs, false + } + val := common.Value + + called := false + switch fnval := val.(type) { + case *ssa.Function: + for _, block := range fnval.Blocks { + for _, instr := range block.Instrs { + if analysisutil.Called(instr, nil, fn) { + called = true + instrs = append(instrs, instr) + } + } + } + } + + return instrs, called +} + +// HasArgs returns whether the given ssa.Instruction has `typ` type args +func HasArgs(instr ssa.Instruction, typ types.Type) bool { + call, ok := instr.(ssa.CallInstruction) + if !ok { + return false + } + + ssaCall := call.Value() + if ssaCall == nil { + return false + } + + for _, arg := range ssaCall.Call.Args { + if types.Identical(arg.Type(), typ) { + return true + } + } + return false +} diff --git a/vendor/github.com/moricho/tparallel/testmap.go b/vendor/github.com/moricho/tparallel/testmap.go new file mode 100644 index 000000000..fa9bed708 --- /dev/null +++ b/vendor/github.com/moricho/tparallel/testmap.go @@ -0,0 +1,63 @@ +package tparallel + +import ( + "go/types" + "strings" + + "github.com/gostaticanalysis/analysisutil" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" + + "github.com/moricho/tparallel/pkg/ssainstr" +) + +// getTestMap gets a set of a top-level test and its sub-tests +func getTestMap(ssaanalyzer *buildssa.SSA, testTyp types.Type) map[*ssa.Function][]*ssa.Function { + testMap := map[*ssa.Function][]*ssa.Function{} + + trun := analysisutil.MethodOf(testTyp, "Run") + for _, f := range ssaanalyzer.SrcFuncs { + if !strings.HasPrefix(f.Name(), "Test") || !(f.Parent() == (*ssa.Function)(nil)) { + continue + } + testMap[f] = []*ssa.Function{} + for _, block := range f.Blocks { + for _, instr := range block.Instrs { + called := analysisutil.Called(instr, nil, trun) + + if !called && ssainstr.HasArgs(instr, types.NewPointer(testTyp)) { + if instrs, ok := ssainstr.LookupCalled(instr, trun); ok { + for _, v := range instrs { + testMap[f] = appendTestMap(testMap[f], v) + } + } + } else if called { + testMap[f] = appendTestMap(testMap[f], instr) + } + } + } + } + + return testMap +} + +// appendTestMap converts ssa.Instruction to ssa.Function and append it to a given sub-test slice +func appendTestMap(subtests []*ssa.Function, instr ssa.Instruction) []*ssa.Function { + call, ok := instr.(ssa.CallInstruction) + if !ok { + return subtests + } + + ssaCall := call.Value() + for _, arg := range ssaCall.Call.Args { + switch arg := arg.(type) { + case *ssa.Function: + subtests = append(subtests, arg) + case *ssa.MakeClosure: + fn, _ := arg.Fn.(*ssa.Function) + subtests = append(subtests, fn) + } + } + + return subtests +} diff --git a/vendor/github.com/moricho/tparallel/tparallel.go b/vendor/github.com/moricho/tparallel/tparallel.go new file mode 100644 index 000000000..3139e0425 --- /dev/null +++ b/vendor/github.com/moricho/tparallel/tparallel.go @@ -0,0 +1,72 @@ +package tparallel + +import ( + "go/types" + + "github.com/gostaticanalysis/analysisutil" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + + "github.com/moricho/tparallel/pkg/ssafunc" +) + +const doc = "tparallel detects inappropriate usage of t.Parallel() method in your Go test codes." + +// Analyzer analyzes Go test codes whether they use t.Parallel() appropriately +// by using SSA (Single Static Assignment) +var Analyzer = &analysis.Analyzer{ + Name: "tparallel", + Doc: doc, + Run: run, + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + }, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ssaanalyzer := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + + obj := analysisutil.ObjectOf(pass, "testing", "T") + if obj == nil { + // skip checking + return nil, nil + } + testTyp, testPkg := obj.Type(), obj.Pkg() + + p, _, _ := types.LookupFieldOrMethod(testTyp, true, testPkg, "Parallel") + parallel, _ := p.(*types.Func) + c, _, _ := types.LookupFieldOrMethod(testTyp, true, testPkg, "Cleanup") + cleanup, _ := c.(*types.Func) + + testMap := getTestMap(ssaanalyzer, testTyp) // ex. {Test1: [TestSub1, TestSub2], Test2: [TestSub1, TestSub2, TestSub3], ...} + for top, subs := range testMap { + if len(subs) == 0 { + continue + } + isParallelTop := ssafunc.IsCalled(top, parallel) + isPararellSub := false + for _, sub := range subs { + isPararellSub = ssafunc.IsCalled(sub, parallel) + if isPararellSub { + break + } + } + + if ssafunc.IsDeferCalled(top) { + useCleanup := ssafunc.IsCalled(top, cleanup) + if isPararellSub && !useCleanup { + pass.Reportf(top.Pos(), "%s should use t.Cleanup instead of defer", top.Name()) + } + } + + if isParallelTop == isPararellSub { + continue + } else if isPararellSub { + pass.Reportf(top.Pos(), "%s should call t.Parallel on the top level as well as its subtests", top.Name()) + } else if isParallelTop { + pass.Reportf(top.Pos(), "%s's subtests should call t.Parallel", top.Name()) + } + } + + return nil, nil +} diff --git a/vendor/github.com/nakabonne/nestif/.gitignore b/vendor/github.com/nakabonne/nestif/.gitignore new file mode 100644 index 000000000..df71a2ac7 --- /dev/null +++ b/vendor/github.com/nakabonne/nestif/.gitignore @@ -0,0 +1,16 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +/nestif + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/nakabonne/nestif/LICENSE b/vendor/github.com/nakabonne/nestif/LICENSE new file mode 100644 index 000000000..ddf4d71ed --- /dev/null +++ b/vendor/github.com/nakabonne/nestif/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2020, Ryo Nakao +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/nakabonne/nestif/README.md b/vendor/github.com/nakabonne/nestif/README.md new file mode 100644 index 000000000..ede411f73 --- /dev/null +++ b/vendor/github.com/nakabonne/nestif/README.md @@ -0,0 +1,122 @@ +# nestif + +[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](http://godoc.org/github.com/nakabonne/nestif) + +Reports deeply nested if statements in Go code, by calculating its complexities based on the rules defined by the [Cognitive Complexity white paper by G. Ann Campbell](https://www.sonarsource.com/docs/CognitiveComplexity.pdf). + +It helps you find if statements that make your code hard to read, and clarifies which parts to refactor. + +## Installation + +``` +go get github.com/nakabonne/nestif/cmd/nestif +``` + +## Usage + +### Quick Start + +```bash +nestif +``` + +The `...` glob operator is supported, and the above is an equivalent of: + +```bash +nestif ./... +``` + +One or more files and directories can be specified in a single command: + +```bash +nestif dir/foo.go dir2 dir3/... +``` + +Packages can be specified as well: + +```bash +nestif github.com/foo/bar example.com/bar/baz +``` + +### Options + +``` +usage: nestif [ ...] ... + -e, --exclude-dirs strings regexps of directories to be excluded for checking; comma-separated list + --json emit json format + --min int minimum complexity to show (default 1) + --top int show only the top N most complex if statements (default 10) + -v, --verbose verbose output +``` + +### Example + +Let's say you write: + +```go +package main + +func _() { + if foo { + if bar { + } + } + + if baz == "baz" { + if qux { + if quux { + } + } + } +} +``` + +And give it to nestif: + +```console +$ nestif foo.go +foo.go:9:2: `if baz == "baz"` is nested (complexity: 3) +foo.go:4:2: `if foo` is nested (complexity: 1) +``` + +Note that the results are sorted in descending order of complexity. In addition, it shows only the top 10 most complex if statements by default, and you can specify how many to show with `-top` flag. + +### Rules + +It calculates the complexities of if statements according to the nesting rules of Cognitive Complexity. +Since the more deeply-nested your code gets, the harder it can be to reason about, it assesses a nesting increment for it: + +```go +if condition1 { + if condition2 { // +1 + if condition3 { // +2 + if condition4 { // +3 + } + } + } +} +``` + +`else` and `else if` increase complexity by one wherever they are because the mental cost has already been paid when reading the if: + +```go +if condition1 { + if condition2 { // +1 + if condition3 { // +2 + } else if condition4 { // +1 + } else { // +1 + if condition5 { // +3 + } + } + } +} +``` + +## Inspired by + +- [uudashr/gocognit](https://github.com/uudashr/gocognit) +- [fzipp/gocyclo](https://github.com/fzipp/gocyclo) + +## Further reading + +Please see the [Cognitive Complexity: A new way of measuring understandability](https://www.sonarsource.com/docs/CognitiveComplexity.pdf) white paper by G. Ann Campbell for more detail on Cognitive Complexity. diff --git a/vendor/github.com/nakabonne/nestif/go.mod b/vendor/github.com/nakabonne/nestif/go.mod new file mode 100644 index 000000000..325901d59 --- /dev/null +++ b/vendor/github.com/nakabonne/nestif/go.mod @@ -0,0 +1,8 @@ +module github.com/nakabonne/nestif + +go 1.13 + +require ( + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.4.0 +) diff --git a/vendor/github.com/nakabonne/nestif/go.sum b/vendor/github.com/nakabonne/nestif/go.sum new file mode 100644 index 000000000..6d790ef35 --- /dev/null +++ b/vendor/github.com/nakabonne/nestif/go.sum @@ -0,0 +1,12 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/nakabonne/nestif/nestif.go b/vendor/github.com/nakabonne/nestif/nestif.go new file mode 100644 index 000000000..d458022fb --- /dev/null +++ b/vendor/github.com/nakabonne/nestif/nestif.go @@ -0,0 +1,148 @@ +// Copyright 2020 Ryo Nakao . +// +// All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nestif provides an API to detect deeply nested if statements. +package nestif + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "io" +) + +// Issue represents an issue of root if statement that has nested ifs. +type Issue struct { + Pos token.Position + Complexity int + Message string +} + +// Checker represents a checker that finds nested if statements. +type Checker struct { + // Minimum complexity to report. + MinComplexity int + + // For debug mode. + debugWriter io.Writer + issues []Issue +} + +// Check inspects a single file and returns found issues. +func (c *Checker) Check(f *ast.File, fset *token.FileSet) []Issue { + c.issues = []Issue{} // refresh + ast.Inspect(f, func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Body == nil { + return true + } + for _, stmt := range fn.Body.List { + c.checkFunc(&stmt, fset) + } + return true + }) + + return c.issues +} + +// checkFunc inspects a function and sets a list of issues if there are. +func (c *Checker) checkFunc(stmt *ast.Stmt, fset *token.FileSet) { + ast.Inspect(*stmt, func(n ast.Node) bool { + ifStmt, ok := n.(*ast.IfStmt) + if !ok { + return true + } + + c.checkIf(ifStmt, fset) + return false + }) +} + +// checkIf inspects a if statement and sets an issue if there is. +func (c *Checker) checkIf(stmt *ast.IfStmt, fset *token.FileSet) { + v := newVisitor() + ast.Walk(v, stmt) + if v.complexity < c.MinComplexity { + return + } + pos := fset.Position(stmt.Pos()) + c.issues = append(c.issues, Issue{ + Pos: pos, + Complexity: v.complexity, + Message: c.makeMessage(v.complexity, stmt.Cond, fset), + }) +} + +type visitor struct { + complexity int + nesting int + // To avoid adding complexity including nesting level to `else if`. + elseifs map[*ast.IfStmt]bool +} + +func newVisitor() *visitor { + return &visitor{ + elseifs: make(map[*ast.IfStmt]bool), + } +} + +// Visit traverses an AST in depth-first order by calling itself +// recursively, and calculates the complexities of if statements. +func (v *visitor) Visit(n ast.Node) ast.Visitor { + ifStmt, ok := n.(*ast.IfStmt) + if !ok { + return v + } + + v.incComplexity(ifStmt) + v.nesting++ + ast.Walk(v, ifStmt.Body) + v.nesting-- + + switch t := ifStmt.Else.(type) { + case *ast.BlockStmt: + v.complexity++ + v.nesting++ + ast.Walk(v, t) + v.nesting-- + case *ast.IfStmt: + v.elseifs[t] = true + ast.Walk(v, t) + } + + return nil +} + +func (v *visitor) incComplexity(n *ast.IfStmt) { + // In case of `else if`, increase by 1. + if v.elseifs[n] { + v.complexity++ + } else { + v.complexity += v.nesting + } +} + +func (c *Checker) makeMessage(complexity int, cond ast.Expr, fset *token.FileSet) string { + p := &printer.Config{} + b := new(bytes.Buffer) + if err := p.Fprint(b, fset, cond); err != nil { + c.debug("failed to convert condition into string: %v", err) + } + return fmt.Sprintf("`if %s` is deeply nested (complexity: %d)", b.String(), complexity) +} + +// DebugMode makes it possible to emit debug logs. +func (c *Checker) DebugMode(w io.Writer) { + c.debugWriter = w +} + +func (c *Checker) debug(format string, a ...interface{}) { + if c.debugWriter != nil { + fmt.Fprintf(c.debugWriter, format, a...) + } +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/.gitignore b/vendor/github.com/nbutton23/zxcvbn-go/.gitignore new file mode 100644 index 000000000..4bff1a28e --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/.gitignore @@ -0,0 +1,2 @@ +zxcvbn +debug.test diff --git a/vendor/github.com/nbutton23/zxcvbn-go/LICENSE.txt b/vendor/github.com/nbutton23/zxcvbn-go/LICENSE.txt new file mode 100644 index 000000000..e8f59e06d --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) Nathan Button + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/nbutton23/zxcvbn-go/Makefile b/vendor/github.com/nbutton23/zxcvbn-go/Makefile new file mode 100644 index 000000000..6aa13e006 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/Makefile @@ -0,0 +1,15 @@ +PKG_LIST = $$( go list ./... | grep -v /vendor/ | grep -v "zxcvbn-go/data" ) + +.DEFAULT_GOAL := help + +.PHONY: help +help: + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +.PHONY: test +test: ## Run `go test {Package list}` on the packages + go test $(PKG_LIST) + +.PHONY: lint +lint: ## Run `golint {Package list}` + golint $(PKG_LIST) \ No newline at end of file diff --git a/vendor/github.com/nbutton23/zxcvbn-go/README.md b/vendor/github.com/nbutton23/zxcvbn-go/README.md new file mode 100644 index 000000000..3f742a9da --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/README.md @@ -0,0 +1,78 @@ +This is a goLang port of python-zxcvbn and [zxcvbn](https://github.com/dropbox/zxcvbn), which are python and JavaScript password strength +generators. zxcvbn attempts to give sound password advice through pattern +matching and conservative entropy calculations. It finds 10k common passwords, +common American names and surnames, common English words, and common patterns +like dates, repeats (aaa), sequences (abcd), and QWERTY patterns. + +Please refer to https://dropbox.tech/security/zxcvbn-realistic-password-strength-estimation for the full details and +motivation behind zxcbvn. The source code for the original JavaScript (well, +actually CoffeeScript) implementation can be found at: + +https://github.com/lowe/zxcvbn + +Python at: + +https://github.com/dropbox/python-zxcvbn + +For full motivation, see: + +https://dropbox.tech/security/zxcvbn-realistic-password-strength-estimation + +------------------------------------------------------------------------ +Use +------------------------------------------------------------------------ + +The zxcvbn module has the public method PasswordStrength() function. Import zxcvbn, and +call PasswordStrength(password string, userInputs []string). The function will return a +result dictionary with the following keys: + +Entropy # bits + +CrackTime # estimation of actual crack time, in seconds. + +CrackTimeDisplay # same crack time, as a friendlier string: + # "instant", "6 minutes", "centuries", etc. + +Score # [0,1,2,3,4] if crack time is less than + # [10^2, 10^4, 10^6, 10^8, Infinity]. + # (useful for implementing a strength bar.) + +MatchSequence # the list of patterns that zxcvbn based the + # entropy calculation on. + +CalcTime # how long it took to calculate an answer, + # in milliseconds. usually only a few ms. + +The userInputs argument is an splice of strings that zxcvbn +will add to its internal dictionary. This can be whatever list of +strings you like, but is meant for user inputs from other fields of the +form, like name and email. That way a password that includes the user's +personal info can be heavily penalized. This list is also good for +site-specific vocabulary. + +Bug reports and pull requests welcome! + +------------------------------------------------------------------------ +Project Status +------------------------------------------------------------------------ + +Use zxcvbn_test.go to check how close to feature parity the project is. + +------------------------------------------------------------------------ +Acknowledgment +------------------------------------------------------------------------ + +Thanks to Dan Wheeler (https://github.com/lowe) for the CoffeeScript implementation +(see above.) To repeat his outside acknowledgements (which remain useful, as always): + +Many thanks to Mark Burnett for releasing his 10k top passwords list: +https://xato.net/passwords/more-top-worst-passwords +and for his 2006 book, +"Perfect Passwords: Selection, Protection, Authentication" + +Huge thanks to Wiktionary contributors for building a frequency list +of English as used in television and movies: +https://en.wiktionary.org/wiki/Wiktionary:Frequency_lists + +Last but not least, big thanks to xkcd :) +https://xkcd.com/936/ diff --git a/vendor/github.com/nbutton23/zxcvbn-go/adjacency/adjcmartix.go b/vendor/github.com/nbutton23/zxcvbn-go/adjacency/adjcmartix.go new file mode 100644 index 000000000..66ad30b82 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/adjacency/adjcmartix.go @@ -0,0 +1,108 @@ +package adjacency + +import ( + "encoding/json" + "log" + + "github.com/nbutton23/zxcvbn-go/data" +) + +// Graph holds information about different graphs +type Graph struct { + Graph map[string][]string + averageDegree float64 + Name string +} + +// GraphMap is a map of all graphs +var GraphMap = make(map[string]Graph) + +func init() { + GraphMap["qwerty"] = BuildQwerty() + GraphMap["dvorak"] = BuildDvorak() + GraphMap["keypad"] = BuildKeypad() + GraphMap["macKeypad"] = BuildMacKeypad() + GraphMap["l33t"] = BuildLeet() +} + +//BuildQwerty builds the Qwerty Graph +func BuildQwerty() Graph { + data, err := data.Asset("data/Qwerty.json") + if err != nil { + panic("Can't find asset") + } + return getAdjancencyGraphFromFile(data, "qwerty") +} + +//BuildDvorak builds the Dvorak Graph +func BuildDvorak() Graph { + data, err := data.Asset("data/Dvorak.json") + if err != nil { + panic("Can't find asset") + } + return getAdjancencyGraphFromFile(data, "dvorak") +} + +//BuildKeypad builds the Keypad Graph +func BuildKeypad() Graph { + data, err := data.Asset("data/Keypad.json") + if err != nil { + panic("Can't find asset") + } + return getAdjancencyGraphFromFile(data, "keypad") +} + +//BuildMacKeypad builds the Mac Keypad Graph +func BuildMacKeypad() Graph { + data, err := data.Asset("data/MacKeypad.json") + if err != nil { + panic("Can't find asset") + } + return getAdjancencyGraphFromFile(data, "mac_keypad") +} + +//BuildLeet builds the L33T Graph +func BuildLeet() Graph { + data, err := data.Asset("data/L33t.json") + if err != nil { + panic("Can't find asset") + } + return getAdjancencyGraphFromFile(data, "keypad") +} + +func getAdjancencyGraphFromFile(data []byte, name string) Graph { + + var graph Graph + err := json.Unmarshal(data, &graph) + if err != nil { + log.Fatal(err) + } + graph.Name = name + return graph +} + +// CalculateAvgDegree calclates the average degree between nodes in the graph +//on qwerty, 'g' has degree 6, being adjacent to 'ftyhbv'. '\' has degree 1. +//this calculates the average over all keys. +//TODO double check that i ported this correctly scoring.coffee ln 5 +func (adjGrp Graph) CalculateAvgDegree() float64 { + if adjGrp.averageDegree != float64(0) { + return adjGrp.averageDegree + } + var avg float64 + var count float64 + for _, value := range adjGrp.Graph { + + for _, char := range value { + if len(char) != 0 || char != " " { + avg += float64(len(char)) + count++ + } + } + + } + + adjGrp.averageDegree = avg / count + + return adjGrp.averageDegree +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/data/bindata.go b/vendor/github.com/nbutton23/zxcvbn-go/data/bindata.go new file mode 100644 index 000000000..f3a0c010c --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/data/bindata.go @@ -0,0 +1,444 @@ +// Code generated by go-bindata. +// sources: +// data/Dvorak.json +// data/English.json +// data/FemaleNames.json +// data/Keypad.json +// data/L33t.json +// data/MacKeypad.json +// data/MaleNames.json +// data/Passwords.json +// data/Qwerty.json +// data/Surnames.json +// DO NOT EDIT! + +package data + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _dataDvorakJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xb4\x98\x57\x57\x1b\x41\x0c\x85\xdf\xf9\x15\xb0\x74\x30\xbd\xf7\xde\x7b\x6f\xa6\x77\x30\xbd\x17\xf3\xdb\x33\x26\x39\x99\xef\x9e\x78\x96\x7d\x88\x5e\x72\xc6\x61\xf9\xa4\x95\xae\x34\xd7\x7c\x14\x14\x16\x46\x63\xf7\xfb\xb7\x67\x51\x67\x61\xee\x83\xfb\x58\xef\x8e\x5b\xdf\x47\xf7\xa1\xa3\x22\x4a\xfd\x39\x5f\x3f\x65\x32\xf9\xce\xd1\xd6\xc7\xdf\x67\xa2\xcc\xb4\x3f\xdf\x2f\x46\xdf\xc7\xed\xdf\xff\x13\x35\x10\xbc\xf7\xf5\x33\xb8\xb1\xdf\xc3\xca\xd3\x91\xfc\x82\x90\x1b\x49\x6e\x28\xfa\x99\xdc\x54\xec\xc9\xa9\x6e\x8d\x22\xe4\x26\x92\x91\x4f\x90\xdc\x5c\xe2\x69\xb5\xbd\x12\x45\xc0\xcd\x04\x23\x9d\x20\xb8\xa5\xd4\xc3\x6e\xe7\x25\x88\x80\x5b\x08\x46\x36\x41\x70\xeb\x8e\x87\xbd\x6d\x48\x10\x01\xb7\x12\x8c\x6c\x82\xe0\xb6\x32\x0f\x3b\x19\x95\x20\x02\x6e\x23\x18\xd9\x04\xc1\xed\x55\x1e\x76\x3a\x26\x41\x04\xdc\x4e\x30\xb2\x09\x82\xa1\xf6\xe8\x70\x48\x82\x08\xb8\x83\x60\x64\x13\x04\xd7\x57\xca\x58\x30\x88\x80\x8b\xcc\x46\xc4\xfd\xcc\xa3\x05\x81\x79\x11\x1c\xe7\x62\x7f\x20\x4c\x2e\xb6\x1a\x91\x12\xab\x11\x29\xb5\x1a\x91\x32\x2b\x25\x97\x5b\x35\xaf\xc2\x4a\xc9\x95\x56\xb7\x48\x95\xd5\x50\x57\x13\x5c\xd7\xe7\x1f\xdc\xce\xe6\x0d\x12\xa5\xd3\x9f\xf9\x7f\x50\xb3\xab\xe4\x14\xc9\x9c\x52\x69\x19\xef\x24\x8e\xc5\xcd\x9c\xb4\x52\xc8\x35\x24\x3f\x2c\xf9\x07\x99\x7f\x4f\xf5\xcf\x45\x7a\xdf\x54\x70\x2d\xc1\x14\x13\xb3\xe4\x20\x73\xde\x8e\x47\x24\x7b\x01\xd7\x11\xcc\x3e\xb3\xff\xa8\x38\xb3\xcf\x15\x36\x85\xb7\x15\x70\x67\x68\x44\x20\x7f\xa9\xe5\xdd\x42\xb0\x2c\x02\xee\xb2\x02\x77\x9b\xc9\xa2\xc7\x4c\xca\xbd\x56\xba\xe8\xb3\xd2\x45\xbf\x99\x37\x1c\x08\x09\x43\x7a\x49\x04\x7b\xd6\xd5\x19\xde\xca\x83\xcc\xf9\x75\xdd\xff\xd2\xd1\xb0\x3f\x9f\x8d\xfb\xf3\xd5\x4c\x32\xc9\x0d\x11\xcc\x0b\x87\x17\x11\x17\x26\x57\xfc\xe3\xb2\x04\x17\xf0\x30\xc1\xe7\x13\xf9\x8d\x1f\x03\x32\xfb\x83\x41\x7f\x76\x6f\x2b\xe0\x11\x82\x59\x3f\xce\x02\x45\xf6\xb4\xe2\xcf\x17\x93\x32\x95\x02\x1e\x25\x98\x97\x3a\x2f\x7b\x5a\x58\x66\xcf\x3e\xb8\xb7\x15\xf0\x18\xc1\x7c\x7d\xc2\x58\x6f\x5e\x4a\x2c\x8b\x0b\x22\xe0\x71\x82\x99\x01\x33\x23\x8c\x0d\x83\x42\x72\xf5\x16\xf0\x04\xc1\xac\x1f\xcb\xc2\x37\x61\x70\xca\xf3\x72\x4a\xc1\x93\x04\x63\x2d\xca\x26\x60\x40\x07\x48\xa4\xe3\x29\x82\xd9\x65\xc2\x28\x43\x64\x19\x0b\x9e\x26\x98\xe2\xa7\xef\xe1\x4a\xe2\x76\xe5\x05\x7c\x3d\xab\xa5\x98\x21\x98\x82\x67\xc7\xd9\xb0\x97\xb5\x64\x19\xcf\x12\x4c\x00\xb3\xe7\x42\x65\x96\xcf\xab\x12\x50\xc0\x73\x04\xf3\xae\xe3\x46\xe3\x14\xb2\xa9\x6c\xb6\xdb\x74\x02\x9e\x27\x98\x00\xde\x1b\xf4\xf2\x94\x21\x1b\xec\x02\x0a\x78\x81\x60\xec\x57\xd9\x1b\xcc\x12\xca\x89\xad\xf1\x22\xc1\x9c\x30\xae\x4a\x2a\x84\xf5\x76\x4a\x60\x83\x05\xbc\x44\x30\x1f\x24\x80\x12\xe3\x4d\xe7\x3c\x1b\x1b\x29\xe0\x65\x82\x29\x31\x66\x4f\x85\x30\x38\xa4\x97\xdb\x1b\x02\x5e\x21\x98\xb5\xe4\x0e\x66\xc3\x38\x85\x18\xef\x5c\xed\x05\xbc\x4a\x30\x33\x60\x66\xd4\x2e\x5e\x3f\xb6\x79\x6b\x04\x73\x0d\x72\x58\x18\x04\x43\x11\x0b\x5e\x27\x98\xaf\xc6\x57\xe6\xaa\xc4\xd8\xc7\x82\x37\x08\x66\x5d\x39\x14\xbc\xa6\xb8\x9b\x19\xdc\x0d\x8b\x80\x37\x09\xe6\xec\xb3\xae\x90\x58\xc8\x23\xfd\x93\xf1\x16\xc1\x18\x84\xa0\xc9\xa2\x93\xa3\xbe\x9d\xee\x05\x9c\x4e\x93\x9c\xe0\x9b\x4c\xe2\x94\xb7\x09\xc6\x46\x4f\x02\x13\xf3\xe9\xd2\x17\xf0\x8e\xd5\xdf\xc9\x76\xad\xbe\xec\xed\x05\x8c\x6c\x10\x10\x63\xa3\x05\xbc\x6f\xe6\x90\x0f\xac\x1c\xf2\xa1\x95\x43\x3e\xb2\x72\xc8\xc7\x56\x0e\xf9\xc4\xca\x21\x9f\x5a\x39\xe4\x33\x2b\x87\x7c\x6e\xe5\x90\x2f\xac\x1c\xf2\xa5\x95\x43\xce\x58\x39\xe4\x2b\x2b\x87\x7c\x6d\xe5\x90\x6f\xac\x1c\xf2\xad\x95\x43\xbe\xb3\x72\xc8\xf7\x56\x0e\xf9\xc1\xca\x21\x3f\x5a\x39\xe4\x27\x2b\x87\xfc\x6c\xe5\x90\x5f\xac\x1c\xf2\xab\x95\x43\x7e\xb3\x72\xc8\xef\x56\x0e\xf9\xc3\xca\x21\x7f\x5a\x19\xe4\xac\x95\x41\xfe\xfa\xef\x76\xd3\xfd\x9b\x2d\xc8\x16\xfc\x0a\x00\x00\xff\xff\xd5\xc4\xca\x21\xce\x20\x00\x00") + +func dataDvorakJsonBytes() ([]byte, error) { + return bindataRead( + _dataDvorakJson, + "data/Dvorak.json", + ) +} + +func dataDvorakJson() (*asset, error) { + bytes, err := dataDvorakJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/Dvorak.json", size: 8398, mode: os.FileMode(420), modTime: time.Unix(1452717629, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataEnglishJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x5c\xbd\x4b\x9a\xe3\xca\x0e\x34\xb6\x15\x7f\x9a\xf4\xe4\xae\xc0\x6b\xf0\x0e\x3c\x4a\x92\x29\x31\x4b\x24\x93\x87\x0f\xa9\xd4\xde\xbc\x81\x88\x40\x52\xfd\x0f\xee\xed\xea\x3e\x55\x2a\x32\x1f\x78\x04\x02\x81\xff\xef\xf6\xff\x94\xfd\xb8\xfd\xdf\xff\xef\xff\x75\xfb\xd4\xf3\xf6\xbf\x5b\xb1\xff\x1d\xd5\xff\x6f\xcc\xf6\xff\xc9\xff\xb7\x0c\xf8\x7b\x3a\xfc\xbf\xfb\xff\xd5\xbb\xfd\xdf\xec\xff\xfd\xad\x7f\xdd\xfd\xff\x16\x7c\x1b\xbe\x7e\x2e\xf5\xed\xff\xf4\x67\xb6\xff\xbf\xd7\xcd\xfe\x7f\xf1\x4f\x1d\xd3\xcb\x7f\x6c\xfe\xd8\xff\x0d\x75\xf9\xe3\x3f\xfc\x73\xee\x07\xfe\xfb\x81\x7f\xb4\xff\xeb\xfc\x7b\xaa\x7f\x9c\x3d\x94\xff\xec\x3b\xf9\x87\xbe\x33\x1e\xe0\x0f\xbe\x2e\xc7\x68\x7f\xec\xf8\xf6\xd3\x7f\x32\x4d\x13\xbe\x07\x7f\xa4\xcd\xbf\x17\xaf\x50\xfd\xfb\x52\x57\xf1\x4d\x5b\x79\x8c\x07\x3f\xf7\x0f\xbe\xe5\x91\x0f\x7c\x23\xfe\xc2\xef\x79\xd4\xb2\x3c\xec\xcf\xa9\x3c\xfd\x1f\x3f\x39\xf9\x27\x94\x3b\xbf\xcf\x7f\x29\x3e\xb7\x4f\xfe\x80\xe7\x8a\xc7\x5b\x0e\xbe\xfb\xf2\xd4\x52\xe1\x21\xb9\x08\x0f\xbc\x78\xf1\x95\xc0\x62\x8d\xfa\x57\xfe\x88\x7e\xf3\xe2\xff\x3f\x94\x01\x6b\xea\xab\xb3\x67\xfc\x92\x8a\x65\x7e\xd4\xca\x3d\xc8\xfe\x9f\xb6\x6c\xef\xea\x5f\x70\x55\xea\x39\xf9\x7f\x9c\x6a\x7d\xe2\xa7\x33\xf6\xa1\x70\x7f\x0a\x96\xa3\x3e\x93\x7f\x7f\x97\xfa\x27\x9f\x1c\x0b\x3f\x67\xbc\xc2\xc1\x25\x2b\x7f\xf0\xc7\x7d\xab\x33\xde\xf4\x83\xd5\xd4\x3a\xe2\x75\x7a\xfd\x26\x7b\x4c\x7e\xc0\x27\xef\x78\xb5\x1d\x7b\x86\xdf\x8b\xad\xde\xed\xa9\x7d\x31\x1e\x78\x1e\xee\x68\x9f\xce\x3d\xeb\xbf\xe1\x20\xe8\x85\xfc\x87\x76\x3c\x1d\x1e\xff\x48\x58\x74\x3c\xd8\x1b\xff\x7c\xee\xd8\x8b\xe3\x98\x70\x72\xf8\xdf\x97\x9c\x07\xac\xcb\xb2\x24\xfc\xf5\x85\x9d\x79\x67\xee\xea\x51\x2b\xf7\x09\xcf\x5d\xfe\xe0\xd0\xed\x27\xff\xdb\x98\xfd\x05\xe7\xca\x85\xe7\x0f\xf2\x98\xed\x75\xdb\x3e\x5c\xc3\x2d\x4e\x37\x3e\x61\xca\xb1\xbf\xfe\x4a\x09\x3f\x9f\x3e\x38\xa6\x43\x7d\x2f\xf8\xeb\x82\xb7\xf2\xef\x3e\xfd\xbc\xd8\xe7\x62\xc9\x3f\xb1\xcb\xf8\xa0\x7d\xd4\x1a\xa6\xe5\x13\x1f\xb7\x27\x6c\xfb\x7c\xf6\x23\xff\x03\xde\xf7\xee\x1f\x6e\xaf\x85\x45\xbd\xdf\xf1\x9b\xf8\xfd\x76\xbc\x70\xcc\x1e\xe5\xc5\xa3\x33\xf1\x77\xd4\x93\x67\x7b\xcc\x93\x1f\xca\xe3\x8d\x6b\x9c\x26\xff\xde\x35\xd7\x75\xe2\x49\xf2\xdf\xb5\x1f\x3c\x17\xef\x84\xfb\x5c\x16\x5c\xf9\x7b\xc1\x4d\xb7\x8b\x18\x2f\xfa\x48\x71\xa9\x97\x87\xd6\x01\xaf\x31\xd4\xbc\xf3\x08\xf4\xbc\x76\x47\xc5\x5b\x3d\xec\x68\x1e\xd8\xee\x3b\x97\xb7\xcb\xc7\x81\x05\xd6\x06\x2d\xba\x7f\xfe\x0e\xfe\x0b\xb8\xc5\xf7\xb2\xed\xfc\xb1\xa9\x64\xbe\xd4\xc1\xab\x76\xcf\x79\xd2\x4f\xb7\xf5\x7a\xd7\xcd\x5f\xc9\xaf\x30\xbe\xd7\x1e\x1b\x27\x8a\x07\x2b\xdd\xf9\x0b\xa7\xb4\xeb\x82\xe1\xd8\x0d\xf8\x45\xcf\x9c\x57\x3d\xbe\x2f\x0a\xed\xc6\x56\xcf\x85\x8b\x52\x57\x5d\x32\x9e\xa2\xf2\x07\xef\x74\xfa\x4f\x16\xbd\x6f\x9a\xec\x91\x79\x22\xf7\x23\xf3\x94\x2e\x07\x0e\xe3\xbc\xf1\xbb\x71\x27\x70\x06\x6c\x4f\xf0\xcb\xbb\x02\x7b\x82\x1b\x3f\xa6\x75\xcd\x4b\x1e\x62\x23\x79\x85\xf9\xe1\xc7\xf6\xe1\x2b\x3e\xb9\x13\xef\xad\x6a\xc7\x37\xdf\x5c\x6d\x27\xbf\x65\x4e\x03\x2f\xc2\x1b\x0b\xc7\x7f\x7c\x9c\x79\xe7\x95\xc4\xd6\x70\x0b\x70\xd3\x66\xdc\xea\x2d\xcf\x79\xee\xb0\x3e\x66\xf8\x0e\xad\x67\xfe\xa3\x3d\xb4\x7f\xe3\xb2\x0f\xf8\x99\x29\xd3\x5a\xaf\x53\xea\xfd\x4f\x5b\xa6\x6c\x1b\x45\x7f\x00\xb3\xa3\x35\xe9\x8f\x53\x16\x69\xcc\x69\xc3\xef\xc4\xb9\x5f\x0a\x7e\xee\x9e\xf4\xb1\x79\xa2\x09\x38\x92\xec\x3f\x2e\x59\xd2\xd2\xda\x37\x15\xff\x2e\x33\x33\x1b\xbe\x6f\xd6\x61\x99\xb9\x1a\x59\x57\x2a\x2f\xb1\x18\x1b\x7f\x25\x9e\xbe\x4f\x73\x98\x17\x7e\xae\x9f\x0f\x1a\x3f\x5c\xd0\x7b\x9a\xcb\xc4\xcb\x5d\x27\x9a\x98\x38\x64\x76\x73\x76\xfd\x84\xfd\xe2\xc9\xef\x5a\x19\x32\x5c\xdf\xfe\xc4\xea\xe2\x28\xcd\xf4\x53\x66\x92\xb9\x6e\xb5\x99\xdd\x38\x97\x34\x02\xef\xb1\xe0\x26\x9b\xe3\xd8\x7c\x33\xb6\x8a\xb5\x9f\xf2\xdd\x7f\xfc\xc9\x1d\x3b\x6a\x5c\x05\xb7\xe6\x78\x70\x18\x90\xba\x62\x57\xf5\x32\xfc\xe3\x9d\xe1\x5e\x4e\xd8\xe5\x79\xd6\x21\xf2\x77\x59\x37\xdb\x45\x78\x8b\x04\x9f\x52\x36\x7c\x54\xa1\x97\x82\x9f\xb9\x6f\x25\x63\xf9\xd2\x64\xbf\x69\xe0\x37\xeb\x98\x2d\xf9\x97\x0b\xbf\xc1\xd9\xfc\xd4\x0e\x9f\x59\xbb\x89\x26\xb2\x2c\xe7\x81\x0d\xd4\x0d\xb1\x9b\x37\xf1\xe0\x9a\xa3\xe3\x47\xb8\x43\xe7\xf6\xf9\xd6\x0f\x78\x85\x05\xbe\x63\x4e\xba\xfa\xf3\x47\x8b\xda\x5f\x47\x26\xff\xda\xa1\xe1\x89\x49\x2f\x19\x9b\x51\xbf\x3c\x75\xfa\x0f\x7e\x51\x78\x3e\x71\x0c\xc7\x73\x83\x91\xa8\xb0\xab\x15\xfe\xdf\xd6\x1a\x9b\x69\xdb\x45\x7f\x70\x1c\xd8\xb7\x89\x67\x60\x2f\x0b\x4e\x60\xfe\xed\xcf\x38\x7a\xf8\x90\x27\xed\xdf\xc8\x47\xf6\xa3\x82\xf7\xaa\x34\x13\x3d\x0e\x71\x2c\xd7\xb9\x98\xb5\xbc\x31\xe4\x60\x84\xe0\x57\x1e\xd7\x8c\x47\xc0\xfe\x0a\x2f\xf4\x81\x97\xd8\xe9\x08\x07\x6e\xeb\x41\xaf\x5c\xf9\x1c\x0f\x3e\x96\x1b\x4d\x9a\x8c\xba\x2d\x72\x02\xe7\xba\x9a\xb1\x18\xda\x8e\xed\x5c\x52\x1a\xa6\x13\x0e\x60\xae\x38\x80\x27\xbf\xed\x2d\x67\xb5\xe7\xbe\x62\x77\x56\xbe\xda\x44\xb7\x70\x6c\x27\x23\xa4\x7e\xac\x15\x8e\x9d\x97\xd8\x0c\x33\x63\x1e\xfb\x06\x98\x87\xd3\x16\x89\x56\x23\x27\x9c\xdb\xcb\x53\xe3\x48\x7a\x28\xe7\xff\xf5\xce\x73\x6b\xb7\x4d\xc7\x9d\x2e\xca\xae\x13\x5c\x02\x7f\xa6\x37\x93\x86\x5f\xb3\x9d\xf0\x8a\xf5\xc5\xab\xf6\xe1\x8b\xaf\x66\x3f\xf0\xa1\xdd\x27\x2e\x6b\x57\xb1\xc8\xc3\xa6\x23\x84\x25\xa4\x2b\x9c\x0b\x1e\x6b\x4e\x9b\xad\x08\xde\xd0\x3c\x20\x5e\x31\xe9\x64\xa5\x30\x86\x19\xaf\x64\xbf\x87\x0e\x65\xa6\x17\x5d\x47\xfe\x5a\xdb\x49\xfe\xda\x21\xcd\x0b\xee\x2c\x16\xd2\xaf\x2e\x42\x97\x8d\x9f\xd2\x27\xbc\xc3\x71\x6e\x38\x76\x65\x87\x91\xa9\xb6\x49\x1b\xae\xd2\xb3\x60\x57\x6c\xe1\x68\x08\xc6\xcc\x70\xca\xde\xf9\x81\x53\x86\x6d\xf0\x87\xe3\xb3\x28\xc4\xb8\x73\x43\xcc\x9a\xd2\x23\xd1\x1e\x99\x71\xd6\xbd\x49\xdc\x4c\xbd\xca\x87\x16\xd4\xe3\x1c\xfc\xb2\xeb\x15\xd3\xc2\x87\x0d\xb3\x65\x66\x99\xf7\x84\xbf\x0f\x61\xe6\x4e\xe7\xd4\xe5\x74\x1e\xe5\x7e\xfa\xbe\x3f\x68\xc4\x6d\xfb\xf1\xd0\x5b\xfa\xfb\xd1\x61\xc1\xe9\x61\x40\xcf\x9b\x96\xee\x1b\x63\x11\x73\xdb\x6f\x1e\xe3\x32\xaf\x75\x3b\x18\xdf\x6e\xb4\x83\x77\x6c\xed\xb3\xe8\x10\xf2\x06\x1c\xd8\xb1\xc7\x94\x9a\xa5\x56\x48\xcb\x07\xda\xdd\x5b\xe2\xbd\x60\x53\xf6\xf8\x26\xed\x7e\x57\x68\x59\x4f\x06\x29\x76\xa6\x7c\x51\x72\xd1\x9b\xce\x1b\x0f\x60\xd6\xd1\x1b\x92\x87\x3a\x8c\xf8\x18\x4c\x67\xdc\x47\x33\xd7\x38\x27\x70\x54\x58\xab\x9c\x10\xfd\x58\x4c\x5b\x18\x06\x54\x06\xa8\x38\x2e\xfd\x44\xf7\x3c\xd2\xa3\xd9\xfe\xf8\xb3\xfc\x67\x3e\xf4\x28\x38\x2e\x87\xce\xdd\x3d\x31\x09\x41\x28\xe5\xa1\xe7\xce\x37\x61\x38\x19\xde\xc8\x22\x6e\xc4\x46\xe5\x7e\xb7\x70\x6f\x09\x33\x93\x69\x14\xf7\xb5\x98\xf3\xe6\xc1\xd0\x5b\x9b\x45\x96\x0f\x78\xe7\x61\x90\x1d\x18\x19\x94\x74\x34\x6b\x45\xb7\xe6\x9e\xb1\x7b\xe1\x7c\x3d\x2d\xa0\x9f\xe7\xbf\x56\x58\x82\x8f\x2f\x1a\xfc\x83\xc7\x75\x5a\xaa\xb2\x2c\x58\x0b\xc4\x72\xb1\x7f\xb8\x1d\xe3\xb9\x77\x49\xae\x41\xa6\x65\xc5\x89\xb2\x83\xf6\x66\xf4\xc5\xb0\xd8\xc2\x4f\x3a\xf2\xcc\x80\xdf\x3c\x18\xd2\x01\xb3\xbd\x30\x4f\xe3\x65\xd7\xc7\x04\x4b\xbf\x97\xa1\x39\xd4\x1b\x82\x0f\xae\xc7\x3e\x31\x00\xdb\x68\xd8\xde\x57\x5c\xec\x76\x99\x26\x91\x19\x81\x7b\x1c\xfc\x87\xca\x87\x5d\x0b\xee\x1a\x32\x0b\x2d\x34\x7c\x54\x97\xf9\x02\x48\x04\x07\x5e\x85\x89\x3b\x63\xf7\x83\x2e\x95\x0f\xf1\xb5\xd9\xf8\xe0\xbc\x95\x7a\xf2\xb2\x8c\x0c\x30\x6c\x7d\xf8\xdc\x63\xc1\x8e\x26\xd9\x5f\xbb\x0a\xc8\xac\xaa\x1f\x2b\x5e\xaa\xbb\x36\x6e\xe5\x9d\xee\xb1\x69\xff\x9d\x05\xbf\x7f\x39\x15\x63\x8d\x8a\xac\x76\x3e\xbc\xa5\xae\x13\x6f\x24\xdc\x3a\x43\xe5\xa5\xea\x34\x3c\x18\x94\xba\xeb\x62\x56\xc0\xa4\x25\xe7\x19\x87\xbf\x2c\x0a\xb1\xdc\xbe\x33\x68\x0c\xb3\x78\xae\x3a\xcf\x69\xc3\x77\xcc\x99\x7e\x12\x9e\xa7\xdb\x22\x2f\xf0\x8f\xc2\x12\xd2\xc6\xa6\x3b\xd7\x4a\xee\x17\xbb\xb9\xc9\xe1\x86\x6d\xf2\x08\x90\xff\xd9\xd2\x42\xee\xe0\x58\x69\x3a\x5f\x5c\xf8\x5d\xe6\xc9\xec\xbd\x3c\x59\x3f\xd1\x82\xd9\x12\x31\xe1\xe2\x4f\xdc\xf3\x84\x9f\x2f\xbf\x58\x36\xbf\x20\xb8\x2d\x1b\x73\xe6\xd4\xed\x75\x32\xcb\x40\xd7\x5f\xdf\x4c\x33\xd2\xa0\x8b\x52\xb4\x18\x0b\x03\xc3\x4c\x83\xc4\x10\x01\xbb\xb9\xe6\xbe\xe0\x7a\x75\x7c\xf9\x89\x19\xb3\x9d\xf9\x3f\xbc\x88\x71\xc1\x26\x5c\x29\xdb\xc5\xe9\x46\xbf\xa1\x7b\xc2\x87\xb6\x35\xd2\x3d\x54\xc8\x73\x4f\xe7\x44\x97\x4f\x7b\xa3\x98\xbf\xf2\x98\x76\xc8\x0d\x66\x5b\x9a\x71\xd7\x51\xe3\xc3\xc0\xec\x4c\x8c\x1f\x7e\x72\xe4\x50\x4f\x5e\x9b\xcd\x8c\xe1\x8c\x4c\x51\x5b\x6f\x46\x54\x59\xe8\x11\x27\x30\xe1\x29\x2c\x24\xd8\x0b\x4d\x81\x6d\xf7\x2e\x44\x60\xa5\x77\x33\x93\xa6\x6c\xb9\x02\xe3\xf0\x93\x8d\x6f\xb5\xdf\x4f\xc8\xa6\x1e\x3a\x33\x8f\xb6\xcb\xf6\xa8\x1b\xc2\x81\x76\xf5\xe0\x5e\xd3\x43\x7e\x72\x2a\x7f\x71\x77\xcc\xd6\xdb\x93\x30\xee\x5c\x26\xc6\x07\x7b\xfe\xe5\x26\x20\x4b\x34\x33\xc8\x2c\xc6\x9e\x01\xbe\x95\x87\x71\xf7\xb4\x63\xe0\x8d\x3d\x94\x71\x9b\xe1\xd4\x69\x61\x6e\xb4\x0c\xfc\x16\x7a\x4e\x19\x34\x33\xfd\xda\xfe\xfc\x6b\x57\x17\x89\xa7\xbb\x38\x3d\x78\x36\x2f\x54\x98\xea\xee\xe5\x81\xc5\xaf\x70\x2d\x5b\x36\x8f\x6b\xf6\x7a\x1f\xcb\xaa\x93\x89\xdf\x92\x10\x05\x4f\xf1\xf3\x63\xa5\x0d\xf3\x18\x41\xe9\xfd\xfd\xd4\x03\xbe\x2d\xf1\x80\xeb\x3e\x69\x64\xe2\xde\x7a\x24\x80\xc7\xf3\x18\x08\x9b\x58\x19\x99\x3c\x19\x99\x98\x33\x85\xa5\x09\x97\xe1\xff\x56\xbb\x97\xdb\x15\xa6\x1a\x55\x8e\x68\xe5\xeb\x78\xdc\x17\xc7\x6b\xce\xf1\x18\x9e\xdd\xb9\x13\xdb\x0f\xf3\xbe\xbc\xaa\x3d\x03\xcb\x3b\xad\x8b\x3b\x59\xd8\x00\x9e\xe9\x45\x9e\x75\xe3\xa5\x90\x5b\x44\xb0\xe0\xa9\x2b\x13\x7b\x8b\x27\x68\x4e\x13\xa3\xc4\x49\x9f\x3c\x6c\x48\x72\x7b\x0b\x59\x2b\x8d\x02\xf6\xb8\x2f\x07\xa3\xb0\x0c\x70\xc3\x1c\xe6\x41\x8f\x73\x78\xf2\x81\x98\x78\xe2\xee\xed\xe7\xb6\x6e\x74\xa6\x05\x37\xd8\x7c\x85\xa7\x8d\x8c\xdc\x56\xfa\x57\x8f\xac\xf4\xeb\xe1\x59\x7f\xfb\xbc\x02\x5a\x43\xe8\xf0\xf9\x23\x78\x70\x50\xb0\x78\x28\xd8\xb1\xeb\xbc\xf9\x3d\x86\x15\xcd\xba\xd3\xda\x72\x4b\x31\xe5\x95\x3b\x1c\x4d\x26\xf0\x8c\xe2\xcb\xac\x0c\x63\x8c\x5c\x92\xc1\x8e\xad\x02\x63\x60\x5b\xe9\x32\x10\x71\x98\xd3\x5f\xc1\x28\x58\x08\x07\x51\x16\x9e\x39\xbe\xf0\x16\xc9\x6a\x59\x5e\x75\xa2\x95\xb3\x57\x84\x75\x58\x4b\xee\x15\x33\x63\x4d\x91\x74\x0d\xf6\x94\xbc\x09\x34\x47\x0a\xd7\xea\xab\xf0\x68\x21\xb3\xe8\x15\x20\x59\x88\xb3\x30\x6d\xd5\x9d\x32\xdf\x6e\x3f\x46\x17\x08\xd7\x78\x17\x8e\xe9\x3f\x20\x30\x80\x6b\x37\x98\x75\x97\xad\xaa\x8c\xbf\xf1\x3e\x85\x19\xec\x82\x75\x67\x7c\x9d\x26\x3d\x00\xdf\x93\xa9\x4e\x1a\x66\x04\x0b\x4a\xef\x70\xf3\x4f\xd9\x9d\x91\x0f\x99\x5f\xc8\x73\x86\x6c\xce\xa6\xe8\x2e\xfa\xe9\x22\xb8\x6b\x89\xc3\x1c\x5b\xe0\x71\x35\x8e\x66\x46\x1c\x6f\x3e\xe6\xc9\x84\x7d\x67\x6e\xe2\x3e\x8c\x40\x42\x9d\x26\x66\xc1\x47\xe1\x9d\x77\x48\x4b\xa9\xe3\xc9\x7c\x19\xbe\x5f\x5e\x84\xdf\xb2\xf8\x37\xdf\x10\x7f\xe1\xb0\x4e\xa7\x27\xa6\x8b\xa5\x7a\x3c\x81\x48\x95\x78\x9f\xfc\xb4\x96\x39\x3d\xe8\xf2\xef\x5c\x8e\x3e\xc9\xe5\x75\x93\x12\xe9\x63\x63\x86\x60\xe7\x48\xbf\xea\x9e\x5e\x58\xb2\xe4\x91\xb0\xc2\x13\x47\x1e\x70\xe8\xb3\x99\x44\x9a\x5d\xf7\x66\x48\x19\xec\x04\x2c\xba\x58\xb8\x8b\x5b\xda\x4b\x66\x9a\x38\xa5\x5f\xbc\xbc\x5d\x6e\xac\x4e\xea\xfb\xb0\x9d\x6f\x1a\x68\x5b\x74\x3a\xb0\x99\xd7\xc4\x02\x98\x9d\x26\xd7\x97\x4c\x0f\x64\x89\x59\x15\x1a\x97\x3d\xd4\x96\x75\xe5\x73\x2c\xdd\xbe\xe2\xec\x0b\x11\x9e\x2b\xd3\x3b\xbb\x30\x8f\x85\xde\x70\x36\x1f\x87\xef\x6d\xe9\xcc\x96\x19\xad\xec\xa3\x50\x3a\xde\xee\x8d\x3e\x86\x0e\x9a\x01\x4e\x12\x62\x40\x9b\x70\x30\x5e\xb9\xdb\xe6\xe1\x9e\x9a\x95\xc6\x99\xf5\x00\x52\x21\xd1\xc6\x50\xc7\x31\xb3\x95\x9b\xa6\xe0\x63\x13\x74\xbb\xdd\x14\xae\x5e\x6f\x47\x7b\xf5\x93\x98\x4d\xdb\x0f\x5c\x60\x97\x47\xae\xd3\xb9\xf0\x8e\xb8\x7f\x2c\xf8\x98\x2c\xab\xf5\xb0\xed\x33\x2f\xc4\x14\xfa\xa0\x0f\x1e\xeb\xca\x9f\xb7\x2c\x2f\x77\x11\x8a\xef\x8c\x68\xed\x15\xb1\xbc\x6b\x5a\xe9\xb6\x1b\x7e\xb6\x0a\x06\x38\x72\x5c\xc7\x1b\x5c\x49\xc3\x49\x6c\xaf\x4e\x38\xab\x07\x51\x91\xae\x98\x11\x21\x2e\xb0\x0b\x8f\x85\x9d\x25\x26\x51\x22\xf8\xb6\x35\x26\xa0\xec\xd1\x3f\x4e\x37\x0d\xef\x9d\x07\x8f\xae\x20\x62\x8d\xbe\xec\xb2\x65\xa7\x45\x5d\x82\x93\x96\xac\x40\x8f\x89\xe7\x4b\x0e\xcb\xac\x91\xac\x1b\x20\x37\xfe\x5e\x47\x1f\x75\x8e\xf0\x81\x16\xcd\xbe\x08\x18\xfb\xc9\xc3\xcf\xf5\xc4\x6e\xcc\xa4\xef\xfb\xd7\xa9\xd2\xdd\x33\xfb\xee\xbb\x60\x21\x2b\x43\x47\x07\x28\xe4\xec\xb6\x2a\xd4\x21\x7c\x6f\xc7\x48\x15\x86\xbe\x30\xfe\x5a\x64\x08\x2c\x85\x72\x84\xb5\x3d\x37\xd3\x25\x22\x72\xcb\x95\xbf\x21\x29\xb9\x23\xf4\xdb\x60\xd7\xf1\x79\x2b\xef\x88\xbb\x13\xde\x1b\xdf\xd9\x66\x61\x62\x5d\xdc\x69\x32\xeb\xe0\x99\x92\xff\x41\x9a\x8a\x7f\x71\x50\x57\xf0\x67\xb9\x33\x2c\x54\x36\xb2\x9e\x71\xdb\xcd\x78\xc2\xb3\x73\x21\x2b\x2a\x49\xa7\x2e\x5e\x2c\xf3\xa4\x4f\x59\xdc\xd0\xc1\x64\x9e\x42\x1b\xde\x9f\xdc\x4c\xc5\xad\xc5\x22\x37\xa6\x18\x3b\xce\xf4\xef\x8d\xf0\x5b\x3b\x46\xf7\x89\xae\xd0\xbc\x7c\xa0\x9f\x0d\xc4\x46\xdc\xe5\x21\x16\x17\xc0\x7d\xe7\x88\x53\xf8\x8b\xad\x20\x54\xfe\x73\xf2\x08\x5b\x74\x6f\xbf\x0d\x36\x74\xa7\x35\xb1\x74\x0a\x05\x34\x45\x87\x8c\x34\x88\xa0\xfb\xc1\x0e\x23\xa4\x57\x37\x6b\x44\x07\xd2\x4d\x2d\xcc\x20\x5e\xe1\x29\x06\x83\xaf\xc5\x7e\xe5\x1e\x3f\xe9\x2f\x85\x5f\x63\xbe\xf2\x86\x22\x8f\x9d\x1c\x5e\x80\x89\x31\x9e\x9d\x9a\x9e\x01\xae\xbc\xc4\x7e\x30\x72\xda\xf2\xca\xec\x22\x92\x60\x62\x41\x45\xf0\x3a\x2e\xc8\x2e\x6f\xd3\x25\x9c\xad\xc8\x4e\x86\x7a\x76\xc7\x2d\x70\x75\x41\x4c\x0d\xed\xee\xeb\xca\x83\x4e\xfc\xde\x5f\x60\x53\xf6\x76\xde\xef\x78\x84\xa9\x1d\x83\x7e\x9c\x2a\x7e\x2f\x53\x9c\x7d\x1c\xf1\x68\x3d\xb1\x8a\x89\x7f\xcc\xca\x60\x7e\xaa\x42\x33\xfc\xab\xd9\x1e\x16\xdc\x6c\x67\xcc\x4b\x32\xd8\x2f\x17\xe4\xb5\x0c\x58\xc3\x2d\x07\x48\x14\xa1\xed\xc3\x2e\xf2\xc1\x25\x7a\xea\x3e\xd8\x7b\x45\x0e\xee\x09\x34\xf3\x08\xcb\x2b\x26\xfc\xd6\x27\xfd\x95\x39\x21\x5a\x52\x33\x7e\x93\x5c\xc5\xac\xe3\xbd\x9e\xdd\x54\xfa\x1b\xc1\x4e\xc6\x24\xfb\x44\xfb\xa6\xec\x15\x8b\xdf\xf3\xba\x4d\xb4\xcc\x1e\x5a\xe9\xda\xba\x57\xc6\x51\x18\x5e\x3c\xe3\x1e\x75\x12\x15\x5e\x89\xf3\x58\x4e\x2c\x1b\x7a\xc2\xe6\xf8\x25\x60\xc0\xdb\xde\xf9\xdc\x03\xdb\x77\x0b\x4f\x97\xd7\xd5\x9d\x89\xb3\xb9\xdd\xe5\x2a\x96\x32\x87\x5e\x6a\xcf\x63\x3d\x54\x7f\xf6\xed\xa4\xd7\xee\x11\x6e\xc1\x8d\xaa\xf8\x51\x88\x40\xd9\x65\xc6\x59\x6a\xf5\x08\xa4\x4f\xb2\x89\x40\x1e\x36\x9e\x21\x5f\x42\xee\x39\x22\x00\x9c\x33\x54\x7b\x16\x02\x95\x5d\xc5\x96\xa4\xb5\x4e\xf5\xc1\x34\x66\x57\x8e\x6d\xd6\x91\x3f\x99\xfa\x8d\xcf\xbe\x57\x1d\x95\xb4\xc9\x51\x98\x69\x93\x2f\x88\x2a\x2d\x8e\xe9\x48\x67\x39\x64\x0f\x88\x4b\xd4\x43\xe2\xb5\x17\xd5\x9e\xce\x07\xe3\x20\x7b\x72\x05\xed\x63\x6d\x31\x04\x3d\x7d\x7a\xd3\x19\xaf\x0e\x04\x32\xef\xf0\x10\x3e\x60\xf4\xe5\xa1\x9d\xb0\xab\xc5\xa0\x93\xdb\xea\xa0\x02\xde\x8a\x39\x33\x76\x62\x08\xfc\xbe\x1e\xdc\x58\xbb\xc1\x0f\x0b\x5c\x4f\x65\x42\x78\xb1\x92\x61\xea\x16\x65\x79\x9e\xa3\xda\x9f\xaf\xb2\x07\xd6\x16\xbe\xa4\xff\x33\xd7\x45\x3b\xc0\xa0\x0d\x11\xad\xee\x24\x33\xca\x44\xcf\x48\x57\x62\xbe\xfc\xcd\xeb\x72\x27\xb4\xec\xa5\x06\x3e\xf3\xcc\x87\x76\xa8\x81\xff\xb0\x66\x25\xda\x33\xdd\xa4\xb2\x42\x6c\x81\xec\x11\xf0\x6b\x25\x86\x8c\x33\x6c\x0b\x36\xe2\xbd\xf6\xb0\xf2\x07\x2d\x5c\xd8\xea\x5f\x1c\x50\x8b\x8f\x33\x2b\xaf\x7f\x92\x58\x06\x04\xb8\xb9\x44\xe7\x16\xa7\x89\xb8\x37\x51\x2a\xbe\xb1\xf2\x5c\xa6\x26\x16\x20\x14\x19\xaa\x27\xcd\x1a\x6e\x9b\x3f\x90\xad\x69\x87\xb5\xf3\x7f\x20\x22\x51\xf6\xfe\xdc\x77\x9d\x5f\x45\xf6\xb8\xed\x77\x1e\x66\xf3\x6e\xb6\x01\xda\xf8\x7b\x52\x81\x47\xae\xcc\xce\x70\x65\xdd\x7c\xc5\x29\x1e\xcc\x57\xb3\x90\xff\xd0\x95\x31\x5b\xbb\xb1\xbe\x61\x06\x95\x60\x73\x52\x4e\x4a\x78\x68\xb7\xcb\x45\x4c\x90\xaf\x95\xb6\x99\x86\x51\x7b\x88\x25\x9d\xca\x8b\xd9\x47\x5f\x01\x69\x31\x05\x5d\xf9\x52\xb6\x31\xc5\x7c\x2a\xee\x6a\x6f\xf1\x5f\xe0\x0b\x85\x19\xca\xb0\x9d\x04\x64\x0e\xb3\xd7\xac\xe4\x38\xe6\x2f\x52\x07\x9e\x45\x15\xd0\x87\x80\xa8\x21\x7f\x85\xd0\x0b\x23\xf3\xfd\xf9\x09\x28\x1d\x3f\x69\xb6\x19\x7e\xd2\x16\x9e\x17\xd1\x7e\x0d\xf3\xe0\x4a\x78\xd2\x83\xd5\x9d\xb7\x83\xf9\x66\xfe\xb5\x57\xc7\x2f\x27\x3e\xae\x00\x81\xab\xf1\x88\x9a\xed\x5b\x15\x56\x8b\x86\x51\xf8\x71\xa8\x8e\xfe\x97\xe5\x27\x00\x65\x9e\xcc\xfa\x27\x7d\x56\x6d\x4a\xec\xe6\xa3\xc2\xea\x55\x58\x93\x73\x61\x0e\xec\x88\x77\x43\x5d\x3a\x95\x08\xde\x76\xa9\x85\x8b\x1c\xc2\x3f\x12\x4f\xf8\xcf\x39\xfb\x66\x9a\xa1\xd1\xfd\xdd\x6a\x65\xb1\x8b\x88\x06\x0e\x3a\x1d\xb0\xbf\x23\x03\x89\x4d\x50\xba\x1b\x0c\x12\x6d\x84\xf1\xd2\x99\xdb\xa6\xde\x98\xdd\xd2\x54\xbe\x05\xbf\x46\xca\xf1\xa0\xcf\x1a\xe2\x77\xee\xb4\x11\x9b\x17\x20\x0f\x78\x0c\x5f\x5e\x5a\xdd\xf9\xee\xf8\xbd\xce\x63\xe1\x45\x43\xe5\x82\xa1\x5a\xa1\xb7\x7e\xe9\xf2\xc0\xa4\x63\x3d\x3d\xe8\xe4\xfe\x02\x26\x87\x0d\x3e\x99\x72\x4e\x0d\xf3\xde\x67\x96\xdf\xa6\x14\x15\x59\x1a\xdc\x47\x1d\xfe\x10\xb2\x27\x72\xee\x3c\x0a\xd5\xd2\xf1\xef\xb8\x50\x6f\xa2\x08\xf9\xb7\x2f\x82\x7e\x99\x19\x98\xc5\x0d\xa7\xaf\x98\x86\x05\x5b\xbf\xc8\xa8\x2a\xc3\xea\xf1\xe9\x95\x30\xfe\xba\x0f\xe4\x01\x74\xa8\x10\xe1\x85\xdf\x7f\x6d\x09\x5c\x8f\x39\xd1\x03\x85\xd8\x85\xc7\x81\xd7\xcf\xff\x43\x72\x04\x06\xff\xe9\x1e\x30\x09\x13\x5c\x41\x0f\x4a\x60\x70\x75\x1d\xbb\x25\xea\xa4\xaa\x74\x47\x32\x06\x1d\x89\x98\x49\xaa\xa7\xf9\x72\xeb\x09\xbc\x82\xce\xeb\x9c\x59\x15\x21\x32\x6a\x66\x4e\xe4\x8d\x24\x6c\x38\xc2\x74\xbc\xb7\x2a\xc3\x85\xfe\xd7\xb1\x3b\xe2\x1a\x87\x2a\x75\x66\x14\x56\x7c\xfa\xc9\xbd\xf6\xaa\x27\x39\x3c\x99\xb5\xab\xad\xe8\xa2\xbf\xb4\xc4\x5b\xc5\x6f\xd9\xcc\x22\x55\xbc\x05\x3f\x68\xcc\x48\xbc\x2c\xff\xe3\x96\xcc\xd9\x5e\x66\xe9\x09\x09\x45\x86\x07\x63\x81\x6a\x11\x21\x2e\xfc\x13\x42\x9e\xaa\xd3\x34\x05\xe6\x40\xc3\xfd\x22\x18\xce\xd2\x8c\x67\x53\x74\x22\x27\xd3\xb8\x3a\xb0\x08\xe8\xd6\x3f\xac\xfc\xe3\x94\x5f\x31\xeb\x87\xb3\xf6\x20\x34\xd6\x47\xe0\x0b\x00\x6c\x48\x3c\x5d\xc0\x37\xf1\x14\x69\xe5\xc3\x44\xbe\x22\x0c\x9d\x05\x32\xb7\x4e\x34\x32\xb6\x1e\x3d\xeb\x41\xb6\x41\x74\xc1\x15\xc1\x21\x2f\x1c\x52\x25\xbe\x2a\x68\x5d\x16\xed\x30\xc2\x64\x21\x8e\x16\x60\xc9\x2a\x44\xa8\xbc\x1a\x54\x27\x12\xbe\xc4\x5c\xd8\x12\xcf\xb6\x65\x47\x3b\x2f\x3e\x16\xca\xcd\x16\xf8\x62\x66\xcc\x19\xe4\x03\xfe\x0e\x5a\x8f\x25\x95\x83\x8c\x82\xfe\xed\xae\xeb\x25\xbb\xcb\x60\x03\xf7\x31\x92\xf0\x70\x72\x55\xe5\x1b\xfd\xfd\x38\x09\x9c\xdb\x6b\x92\x15\x13\x95\x17\xb3\x10\x5c\xbb\x33\x2a\xcf\x53\x7e\x60\x15\x5f\xe5\x59\xb0\x78\x83\x2e\x03\xbc\x3a\x49\x3b\xcf\x3a\x25\xbe\x88\x9d\x8c\xc4\x07\x9b\x6e\x51\x58\x55\x4c\x70\xb0\x92\xf7\x20\xf2\xb3\x90\xc9\x40\x0b\x77\x43\x34\x68\x0e\x05\x26\x5e\xd5\xf3\xc9\xd6\x20\x90\x2a\x2c\xf4\x23\xd3\xa3\xc1\xe2\xdf\xeb\x04\xcb\x4b\xc3\xe2\x67\xbf\x2c\x38\x39\xf7\xda\x9f\xbc\x67\xe4\x22\x58\x80\x89\x73\x9a\x11\x46\x7a\x2e\xa3\xa2\x7f\x3f\x29\xa2\x73\x7e\xc3\x9b\x57\x15\x11\xdd\xb9\x1d\xaa\x19\xc4\xfd\x4c\x02\x3a\xcc\xe1\xb3\x20\x86\x0a\x18\xbe\xc9\xc3\x54\xde\x6f\x40\x4b\xf8\x4e\x5d\x25\x3e\xda\x40\x63\x6c\x21\x0f\x5d\x8a\x6d\xec\x22\x67\x18\xb9\x4d\x9e\x57\x46\x07\xf2\x5a\x24\x4d\xbe\x69\x40\x7c\x99\x71\x43\xde\x34\x0a\x4e\x1c\xd0\x2d\xa2\xb3\xf3\x6a\x47\x20\x66\x01\x94\x09\xec\xde\xcf\xee\x87\xc9\xb4\x33\x7f\x80\x08\xd4\x99\xd9\x25\xf6\x74\x16\xec\x48\x87\xc3\x88\x2c\x91\xdc\x61\x0b\x75\x17\x93\xc0\xae\xc2\x43\xd4\x1a\xa0\xa0\xad\x22\xd2\xe3\x52\x9e\x0e\xfd\x99\x7b\x8c\x64\xcf\x8c\x7b\xd4\xd1\x11\x1e\xad\x49\xb7\xc6\x2d\x2c\xbd\x66\xfe\x54\xb9\xe7\x1c\xd4\xbc\x95\xa1\xa2\x83\x36\x11\xd1\x4d\x42\xfe\x19\xba\x00\x06\x64\x90\xa0\x5c\xdd\x0c\x0e\xfe\xe1\xde\x60\x73\xaf\x07\x64\x56\x1f\x7a\x87\xd0\x18\x2d\xda\xba\xf1\x08\xa4\xa5\xd7\x42\x65\x77\x52\xbe\xf5\xf6\xda\xca\x3e\x68\x42\x3d\xe9\xbf\x29\x84\xc7\x27\x0d\xf8\x8c\x44\x52\xe0\xf1\x76\xa7\xe7\xbf\xdc\xa2\x00\x5a\x66\x95\x3e\x68\x8e\xea\x9d\xd8\x93\x87\x7b\xac\x8f\xdb\x21\xca\x58\xe2\x45\x8e\xa1\xd8\x95\x83\x61\x71\x6c\x15\x36\x01\xc4\x16\xd4\xef\x70\x63\x76\x4b\x04\xf9\x48\x03\xd7\x0a\x9c\xb7\x56\x04\xb1\xd8\x8c\x1b\x75\xce\x64\x6c\x59\x0e\xb3\xf1\x37\xd4\xa8\x18\xe4\xbd\x27\x8e\x85\xdb\xb6\x47\x6d\x3e\x9d\x1b\x91\x40\xcf\x05\x04\xfd\x21\x7d\xf3\xef\x64\xbe\x9a\x86\xc0\x83\x9c\xcd\xc2\x83\x6b\x16\x2c\x18\x5e\x7b\xcf\x9b\x02\xa8\xa2\xd5\x27\x36\x82\x1d\x76\xe7\xf9\x44\x66\xca\xe9\xe4\xeb\x20\x4f\xa7\x52\x14\xa3\xc1\xc3\xdc\x38\x5d\xa4\x62\xe1\x32\x31\xb2\xda\xcf\x80\x7e\x7a\x0f\x2d\x89\x3c\x6f\x28\xdc\x9e\x73\x77\x13\xee\x7a\x47\x00\x73\xb7\xa3\xce\x9c\xf7\x41\x96\xeb\x08\x4a\xa7\xb8\x5b\x48\x27\x68\x34\xea\x9f\x5e\xe6\x91\x70\x25\x0f\x80\xdd\x6c\x9e\x13\x54\x57\x69\xf1\xbc\x4c\x71\x63\x14\xa3\xdc\x64\x62\x6a\x55\x36\x65\x93\x5d\x80\x9a\x79\xe9\x68\xe6\xeb\xc4\xe8\x79\x2a\x81\x90\xbe\x5b\x98\xa7\x9b\xe7\xc8\x81\x40\x38\x0b\xe2\x88\x2b\x3a\xc7\xb4\xc5\x4f\xab\x87\x78\x0a\xcb\xd2\xcc\x64\xd0\x56\xe6\xc9\xca\x91\x79\x24\x86\x19\xa2\xeb\xcd\x69\xc3\x81\x49\xf7\x3b\x81\x8a\x97\xc2\x95\x1c\x0c\x58\x62\x64\x3c\xbc\xa7\xd0\x4c\xde\x0a\xcf\xb5\x6f\x0a\xef\x02\xfc\x27\x4a\xb9\xa9\x50\x1c\xb9\x14\xc3\x40\x82\x7e\xfe\xe2\x13\xb6\xdd\xae\xb8\x1d\x21\x40\x5f\x53\x9f\xfe\xe2\x95\xcd\xf0\xcd\x6b\x7a\x2c\x72\xd5\x8b\xdb\x07\x92\x23\x3c\x51\x25\x0d\xca\x92\xd3\xe0\x10\x79\x95\x00\x56\xe6\x15\xe0\x6e\xe0\xdb\xa3\xce\x75\xd4\x28\xf8\xd6\x67\x1c\x60\xf3\xc9\xab\x2a\x87\x00\x45\x6f\xf0\x4c\xbe\x05\x24\xa7\x82\x8c\x88\x75\xae\xef\xa5\x21\x62\x7c\x88\x1b\x6a\xeb\x84\xcb\x65\x72\x27\x85\x32\x76\x60\x08\x50\x25\x50\xa8\x76\xf8\x96\xe1\x3c\x14\x50\xc1\x57\x10\x8e\xd8\x62\x6b\x87\x7c\x57\x9d\xf9\xd9\xea\x19\xf9\x41\xf4\xc0\x31\x24\xb9\x4d\xa2\xc4\xeb\xe6\x40\x1b\x2f\x8f\xaf\xde\x20\xa2\x75\xc7\xd3\x69\x81\x45\x23\x14\xe3\x01\xcd\xc8\x47\x85\xa8\xd8\x8f\x72\xbf\x3c\x3c\x90\xc3\x8f\x7a\x65\x37\xd1\x3b\x9c\x0f\x3e\xbf\x45\x7a\xeb\xb5\x8e\xbb\xee\x6d\x64\xf7\xbb\xc2\xd6\x4e\xd0\x7e\x4f\x0c\xc3\x9f\x97\x18\x26\x00\x6f\x06\xf3\x01\xd4\x5b\xc8\x9c\x3b\x9d\x57\xfb\x39\x33\xb1\x49\x25\x8b\x25\x90\xc3\x81\x39\x3e\x9d\x07\xaf\x01\xd2\x38\x73\xc0\x07\x2b\xd7\x0e\xf5\x90\x13\xef\x96\x43\xbb\x17\x41\x08\x31\x98\xa3\x06\x13\x17\xee\x90\xa5\xae\x53\xf5\x11\x07\x05\xb9\x52\x1e\x6c\xc8\x53\x6c\xf5\x6e\x87\xa9\xd4\xb0\x98\x49\xf6\x03\x48\x3c\x76\xe3\x17\xc7\xda\x32\xbc\xe2\x9f\x84\x47\x7d\xd9\x63\xf3\x6e\xc1\x89\x37\xce\xe9\x9e\xe6\x82\xd5\xc2\x91\xaa\xc1\xbd\xc5\x37\x01\xdf\xec\xb2\x80\xa0\xe5\xd1\xd6\x95\xbb\x68\xdf\x83\x0f\x57\x46\x27\xe6\x95\xd6\x9b\x1f\x93\x10\x3e\xce\xd7\x06\xae\x1e\x33\x89\x78\xb5\xb1\x68\xcc\x5f\xd5\x18\xcd\x2a\xd9\xb7\x4a\x21\x7d\x6e\xab\x8d\x35\x2a\xad\xd3\x4d\x09\x57\x38\x19\x75\xbf\x29\x53\x8a\xd4\xc8\xd6\x4a\xdc\x24\xcb\xc1\x68\x9d\xba\x0a\x2b\xaa\x56\x07\xf3\xd6\xf9\x15\x75\x06\xcb\x34\xcc\xcb\xf2\xd0\x4f\x55\x5c\xbb\x17\xed\xce\x98\x07\xda\x0d\x42\xa4\x8c\xf3\x56\xa5\x8c\x3a\xdf\x7a\x26\xbb\x2d\xd3\xd9\xb2\x33\x3b\x17\xb4\x80\x93\x02\xb3\xad\x15\x67\xca\xec\x30\x43\x44\x0c\x66\x1f\x9c\xb9\x17\x24\xa0\x2a\xaa\x06\xcc\x0d\x43\x1e\xd4\x2f\x08\xab\x04\x85\xdc\x19\x1c\xcc\x2e\x16\x7e\x0a\x3c\x2a\x2d\xf7\xc8\xb2\x9e\x19\x7e\x7c\xd0\x4f\x92\x15\x18\xc9\x79\x11\x8c\xd2\xf8\x08\x7c\xf4\x97\x57\x3f\x1e\x0d\x1f\xef\x58\x92\x63\x6e\x74\x53\xf5\x2b\x36\x91\x4c\x4a\x3b\x85\xac\x72\x6c\x65\x96\xff\x7e\x34\x33\x01\x9a\x24\x0c\x74\xb1\x15\x9a\xaf\x83\x5b\x49\x9d\x6a\x81\xe2\xfd\x34\x5f\xc7\x17\xae\x57\xaa\x10\xd0\x8c\xae\x7b\x0b\xc7\xbd\x54\xb9\x3c\x78\xce\x09\x55\x5a\x74\x4c\x72\x6f\xc2\xc9\xdb\x05\x6b\x34\xfe\x1c\x2d\x80\xff\x15\xbf\xc4\xa1\xdc\x96\x1e\x1c\x39\x5c\xfc\xf5\x9d\x23\xdd\xa5\x7f\x5f\x8b\x96\x18\xe2\x56\x3b\x1c\x34\x08\x20\xf2\x2b\xa2\xb4\x54\x66\xd3\x69\xb0\x3d\xb7\x7d\x1c\x2b\xe2\x1a\xb7\x39\xb3\x38\xe6\x9b\x18\x08\x7f\x93\x18\x0f\x9e\x0f\xdb\xc9\x21\x5c\x9c\x45\xce\xb6\xb3\xca\xbc\xd1\x2e\x21\xec\xa2\x65\x6e\xfb\x78\x0b\xda\x28\xdf\x23\x62\x72\x3f\x8d\x8f\x85\x36\x84\xf1\x1c\xa1\x41\xad\xd8\x3d\xab\x56\xfc\x48\x8f\xf0\x2b\x02\x9e\xdd\xbf\xd1\x6c\x97\x25\xb2\xee\x70\x3e\xe7\xa6\xda\xf1\x73\x21\x7f\x15\x14\x3c\x22\x52\x89\xbc\x2b\xb2\xc5\x89\x0c\x30\x3d\x1f\x59\x8e\x76\x16\x07\xd3\xac\x6d\x68\xd5\x3b\x9c\xd5\xec\x2c\x66\xec\x3b\x92\x33\x9a\x51\x78\xd6\x87\xd3\x6d\xf9\xd2\x76\x4f\x11\xd7\x9e\xdb\x4b\xcc\x0d\x07\x5d\x64\x95\xb6\x38\x97\xa8\x1f\xea\xd5\x18\x57\x10\x60\x12\xa3\x30\x18\xfc\x7a\xbf\x7c\xda\x0e\xab\xfb\x48\xd6\x8a\x5d\x05\x8d\xf1\xa3\xe0\x6d\x73\xdb\x42\xa4\x7b\xfc\xec\x02\x00\xd3\xcb\x42\xb5\x76\xd1\xab\x19\x2a\x1e\xe3\xcc\x12\xa6\xb3\xf8\x79\x64\x56\x6e\xb3\x19\x03\xc4\xd9\x65\x77\xa6\x83\xbc\x75\x12\x65\x09\x2e\xa6\x5d\x71\xd2\xf4\x6e\xc8\x5e\x0e\x2e\xa8\x67\xd0\x2c\x67\x20\x27\x94\x1b\x75\x87\x2d\x74\xf4\x50\x79\xee\x60\x6e\x73\x30\xdb\xb7\x23\xc6\xcb\x6d\x1e\x9c\x39\x98\x08\x7d\x43\x5e\xb5\x44\xb3\xaa\x71\x64\xad\xf0\xd3\x7e\x4e\x5c\xfb\x05\xeb\x8a\xb7\x35\x67\xc7\xda\xa9\x20\x7f\xb7\x1a\x7f\x5a\x69\x54\xb7\xcc\xe2\xf4\x87\x70\xd2\xb3\xd9\x1b\x10\x92\x96\x56\x1f\xdb\x6a\x2f\xf2\x46\x89\x38\xab\x4b\x4a\xa5\x8e\x0f\x57\xd3\x99\x22\x00\x0f\x26\x46\x73\xbb\x5f\xdc\x24\x9f\xb7\x05\x3d\x22\x91\xf4\xce\x20\xcd\x89\x89\x2c\xee\x9f\x0b\xb3\x02\x6d\x8d\xf7\x0a\x71\x31\x97\x70\xde\x66\x01\xe1\x46\x82\x69\x4b\x54\x2d\x3d\x87\x14\x11\x05\x83\xe6\xbc\xc9\xa9\x46\x81\xd8\x41\xc5\x30\x72\x85\x79\x38\x00\xe1\xe0\x4f\xf4\x07\xe1\x4e\xe1\x45\x8e\xd6\xe2\xa2\x96\xad\x3f\x67\x7f\x03\x5d\x53\xf9\x5a\x1e\x05\xff\xa4\x20\x12\xa0\x89\xcb\x19\xd2\x3a\xa1\xbd\x62\x29\xda\xbe\x1b\x13\xbe\xc0\x5a\x0f\xdd\xff\xab\xb3\x44\x46\x70\x22\xf1\x03\xf9\x25\xa8\x78\x3c\x6a\xa2\x53\x88\x21\x3f\xc6\x39\x99\x89\x62\xd9\x6f\x67\x80\x4b\x27\x22\x4f\x3f\x7f\xdc\x62\x7e\x6e\xaa\x26\xd1\xf5\x58\x12\x73\x4e\x64\xf9\x17\x2e\xd0\x2b\x33\xd1\xd0\xa3\xf0\xec\x81\x2e\x97\xc4\x0b\x22\x40\x9a\xf6\xe6\x0d\xf7\xc0\xcc\xef\xe6\xe1\x19\xc5\x23\xfb\x13\xc4\x9a\x88\x21\xdd\xe3\x26\x58\xc8\xf3\x09\x2b\x22\xd7\x9f\x64\x32\x08\xef\x34\xb6\x43\xda\x3a\x2e\xef\x7b\x24\x24\x74\xf0\x3b\xd5\x50\x32\xa0\x9d\x68\x6f\x4c\xfc\xbd\x95\xce\x2c\x66\xcc\x17\xb3\x2e\xc2\xb0\xf8\xca\xbe\x29\xf0\x30\xaf\xa5\x25\x02\x50\x41\x62\xe2\xcd\x4d\xaf\xca\x6e\xb4\x80\xc2\xee\x41\x61\x06\xf7\x4d\x2e\x95\x95\x5c\x36\x95\x45\x5d\x84\x26\xe1\xa6\xde\x0e\x1c\x6b\x95\x0d\xa2\xa2\x35\x12\x66\xdb\xf0\xbb\xc8\x2e\x02\xb0\xf0\x05\xac\x37\x9b\xb5\xab\x9a\xb8\x7b\x4e\xab\x2e\xa2\x06\x48\xbe\x74\xc6\x19\x6f\x02\xed\x6b\x1e\x89\x2e\x8e\xe9\xb1\x45\x3a\x79\x4a\x45\xf7\x93\xbb\xa1\xa8\x53\x20\xde\xb2\xd0\xc2\xdd\xed\x1c\x46\x5a\xa3\xf6\x1e\xc7\x22\xbd\xb5\x4b\x19\xd5\x1e\xf1\x35\x37\x86\xe9\x9b\x1d\x4d\xe2\xe6\x2a\x1f\x32\x82\xc9\x2c\x75\x99\xa7\x7a\x28\x6a\xe8\x82\x48\x08\x5a\x17\x17\xa3\x55\x85\xdc\xea\x91\xf2\x51\xc9\xbf\xda\xc4\x14\x35\x73\x37\x85\x6b\x2b\x44\x3e\x09\xdf\xfa\x15\x72\xdf\x0e\xdb\x64\x51\x5e\x3a\x18\x73\x6c\x39\xff\xe5\x5d\x0b\x68\x9d\x34\x91\xbd\x8f\x0a\xd7\x4a\x0b\x9a\xf5\x16\x29\xf8\x20\x7b\x12\xb1\x21\x2b\x4c\x5c\x71\xc6\xce\xe5\xdf\x02\xc2\x9a\xb9\x32\x8b\x6c\xc0\x76\xc1\x94\x30\x76\xec\x98\x7d\xe7\x08\xbe\x1d\x14\x51\xf9\x31\xd0\xf4\x63\x6b\xc4\x37\x70\x9f\x18\x8c\x6d\x9b\x60\x7b\xe4\x61\xb0\xb2\x28\xc2\x04\x74\x60\x67\x6a\x5e\x55\x91\x51\x37\x86\x6d\x6d\xe6\xae\x32\x64\x71\x58\x9f\x89\x04\xd2\xc5\x62\xe6\x9e\x15\xc7\xcd\xdd\x2b\x3e\x65\xc9\x1e\xfe\xa9\x1d\xa2\x8b\x6a\x32\xa2\xee\x1b\x7b\x03\x58\x62\x73\x16\x24\xdf\xec\x37\xe2\x19\x33\x21\x20\xd6\x3b\x50\xbf\xc9\xaf\x01\x6a\xcf\xc1\x91\x42\xd9\x99\xf4\xf9\x87\xda\xca\x96\xfc\x99\x59\x9b\x7e\x93\x95\xcc\x02\x87\x5d\xc0\x67\x14\x70\xbf\xfd\x56\x25\xca\xf2\xfb\x15\xd7\xcb\x96\x78\x1c\xc3\x33\x6f\xb9\x38\xd9\xaf\x73\x62\xe7\x31\x0e\x0e\x4a\x24\x44\x21\x97\x86\xbc\xa3\x7e\xd0\xd8\xaf\x3c\xff\xf9\xf8\xaa\x0a\xdd\xd4\x44\x24\x72\xc3\x08\xd4\x77\xad\x8a\xa5\x67\x5b\x56\xb4\xee\x4e\x66\x52\x2e\x58\x59\xa7\x9c\xe5\x54\x92\x12\x1e\x8b\x1a\x5d\x9c\x3e\x07\xa4\xca\x2d\x99\x6d\xd4\x48\x2b\x7a\xef\xd0\x55\x9e\x54\xa1\x39\x48\x22\x31\x2b\x17\x3e\x24\xb7\x12\x66\x04\x3c\xbc\x2c\xa8\xf1\xaa\x0a\x5a\x96\x20\x9a\x33\xc3\x55\x5d\x74\xae\x03\x40\x64\x52\x24\xc2\x7c\x0c\x30\x44\x0f\x82\x59\x76\xa6\x93\x90\xae\x3b\x49\x53\x9e\xe1\x92\xe6\xe1\x79\xca\xad\x01\x1b\xa2\x3e\xa8\x48\xc6\xb4\x75\x7f\x06\xab\x83\x16\x4b\x09\xf3\x29\xee\xfd\x78\x8a\x6a\xd1\x8a\x8a\x40\x55\xdc\x6d\xa8\x60\x88\x55\x15\xe1\x0f\xc7\x8d\x9f\xeb\x2e\xf0\xc6\xd8\x63\x51\x75\x05\xc7\x08\xb9\x0d\xac\xac\x2a\x0b\xd8\x8a\x45\xa9\xad\x02\x4e\x7a\xba\x5d\xe7\xb2\xc5\x30\xdb\x29\x96\x2d\x0d\x91\x99\xc5\x53\x4e\xfa\x90\x11\x75\xd4\xb7\xf1\x72\x10\x16\x47\xc4\x36\x56\xb9\xb7\x16\xc3\x79\x6a\x2f\xab\xa1\xb2\xc6\xc3\x39\xc0\x3c\x92\xa9\x2b\xc4\x86\xdd\x7d\xb2\x6d\xaa\xbe\x85\x84\x6d\x16\x98\x25\xa6\x0e\x76\x57\xd9\xa5\x43\xe0\xfb\xf0\x10\x8c\xc0\x4d\x8a\x02\x42\x1c\xfc\x3a\x5c\xf1\xfc\x1c\xc0\xbd\xc5\xc5\x2b\xd9\xb2\x51\xcd\xb5\x53\xd2\x22\xba\x8d\x86\x68\x4e\x0f\x27\xf5\x22\xa6\x33\x0b\x78\xbb\x18\x76\xcc\xe7\x4f\x72\x86\xd8\x33\xf4\x47\x59\x66\xb8\x35\xcf\x46\xf3\xf1\x69\xab\xcc\x28\x8e\x71\x86\x9d\x3e\xf6\xdd\x31\x37\xf5\x92\xb4\x3d\x1a\xc9\x30\xe0\xa8\x32\x51\x47\x77\x0b\xbf\xc1\xdb\x11\x78\xfb\x80\xbc\x04\x58\xcf\x32\xd0\xb3\x0c\x4b\x52\x31\x6d\xff\xef\x64\x4c\x6d\x3e\xe8\x4f\x00\x33\x4b\x03\xf7\x71\x50\x57\x25\x63\x65\x5e\x5b\x65\x0a\x0c\x0b\x5d\x09\x7d\xd4\xcc\x98\xef\x60\xf3\x41\x12\xd1\x19\xc5\xfa\x88\x05\x36\x99\xb3\x74\x20\x9c\xe1\x13\x5a\x18\x7c\x72\x77\x19\xf3\x89\x3f\x6f\xe6\xeb\xe4\x69\xb3\x44\xc3\x0c\x73\xa0\xfb\x04\x20\xe1\x8e\xec\xea\xf1\x3b\x26\xd2\x88\xec\xc1\xed\x21\x60\x7c\x1b\xbf\xd6\x0e\x9d\xcc\x73\x77\x06\x35\x8d\x98\x5d\x9a\xa2\xbe\x8d\x05\x58\x68\x14\x17\x05\xa8\x8d\x94\xc0\x4a\x38\x8b\x40\x03\x8b\x30\xf6\x11\xc8\x85\x98\x04\x63\xd9\x40\x88\x55\x78\x43\xd6\x2e\x6e\x29\x02\x44\xb1\x96\xf6\xa2\xea\xc0\xbc\x88\xf1\xd5\xe1\x5c\x9c\x9e\x69\xb5\x87\x7d\x9c\xcd\x17\x1e\x0d\x6b\x33\xdb\xa2\xf0\x70\x8b\xa2\xb4\x53\xa4\xe0\x9c\xf3\xa0\x84\xdb\x63\x1f\xe1\x63\x9d\x02\x34\xb4\x83\x82\x6a\x44\x5c\x56\x6c\x53\x44\xa5\xda\x3b\x6c\x79\x02\x6e\x60\xf1\x64\xb9\x7b\x27\x68\xe2\xc5\xc0\x2b\xe7\x07\x62\x9a\x7c\x57\xeb\x97\x39\x81\x9e\xe2\x12\x8c\x6d\x5b\xe1\x8f\x1c\xaf\xe2\x7d\x1d\xc3\x79\x5d\xf2\x70\xad\x6d\x13\x23\x51\xc1\xc7\xea\x48\xa9\xea\xca\xa7\xf2\xce\xc3\x3f\xba\xbf\xe7\xc5\x23\x04\xe7\x82\xe1\xf7\xcc\xb2\xaa\x53\xcc\x26\x85\x10\x62\x31\xd2\x04\xe1\xf7\x9d\x1b\x2b\x7f\x3d\xab\x5c\x5c\x66\x1d\xb7\xb8\xd7\xe0\xf5\x71\x89\xde\xb0\xa2\xde\x0e\xc1\xb6\xfe\x7b\x14\xcd\x7a\xcb\x2a\xfa\x8b\xcd\xea\x06\x0c\x39\x2b\xe8\x8c\x5c\x56\x1c\xd8\x55\x98\xe7\x5e\x59\xba\xf1\x16\x6a\x6f\xef\x21\x43\x52\x6d\x64\x6b\x11\xa5\x91\x17\x6a\x8a\xa2\xe7\xc3\x92\x04\x11\x09\xc9\x11\x97\x05\xc7\x79\x37\x3f\xcd\xef\xaf\xe7\x41\xa2\xc4\xac\xf6\x48\x96\x34\x57\x11\x96\xe7\xf4\x89\xf8\x2b\x3d\x15\x93\x3e\x1e\x0d\xc5\xa0\xa5\xcb\xcb\x4f\x0d\x6c\x17\x3e\x03\xed\xe2\xc1\x04\x8d\x04\x5b\x25\x30\x4b\xdc\x32\x4b\xff\x8f\x45\x67\x6c\x6b\x11\x02\xf0\xe9\x41\xff\x95\x05\x86\xb2\x10\x72\xfc\x7c\xf3\x36\x36\xda\x60\x0b\xd2\x76\x6e\xde\x29\xa3\x2a\x32\x86\x97\x63\x2c\x1f\xe2\xdb\xdf\x27\x46\x80\xad\x9b\xbd\x3b\x8f\xa3\x8a\xca\xad\x7a\x49\xf9\x3d\x18\x1a\x1e\x2d\x8d\xa0\x33\xb0\xdf\x50\xb8\x70\x4c\xc4\x18\xa0\x7d\x5a\x82\x32\xa9\xe0\x90\xa2\x5d\x9b\xed\xa6\x7e\x4e\xdb\x06\xd7\xeb\x36\xde\xc5\xf8\x2d\x2a\x96\xdc\x65\x2f\x10\xef\xd1\x00\x2d\x00\x12\x82\xa6\x85\xfc\x07\xbf\x57\x38\xe6\x9d\xdd\xc8\x4e\xda\x73\xd3\xa7\x80\x7b\x9a\xc8\xfa\x71\x3c\x76\x4d\x37\x61\x1e\x0c\x47\x1d\xec\x2a\x77\xbe\x57\xdc\x15\x0f\xc0\x0e\x06\x91\x6c\x1f\x42\x7e\x08\xc4\xf7\xf1\xc1\xc5\xdd\xce\xcc\xce\x70\x26\x20\x5d\x55\x2c\xe3\xe7\x3c\xce\x97\xf3\x48\x93\x12\x5f\x98\x85\x46\x4a\x46\xdc\x87\xd5\x47\xf5\x16\x5f\x94\x8d\x0f\x09\x5e\x1a\xb1\xa5\x25\xf3\xf0\xdd\xc5\x21\xf1\x36\xe5\x1d\xab\x0a\x6b\x16\xd1\xd2\x61\xd9\x83\x1a\xd0\xbd\x70\xca\xe7\xde\x17\x66\xe4\x33\xb8\xd7\xf7\xb2\x51\x3d\x00\xd8\x24\xb9\xa1\xce\xcb\x98\x1b\x11\xfb\x95\x98\x63\xa9\x5a\xe0\x3c\x47\x50\x83\xf9\x61\x73\x1c\x0e\xb2\x5f\x70\xf6\x40\x3c\xe8\x1b\xc0\x76\xd8\x5a\xe0\x36\xbd\x83\x9a\xd4\x3b\x58\xa2\x3e\x84\xe8\xc3\x66\x88\x7d\xbb\x50\x50\xb9\xe2\xa9\x10\x0e\x6b\x2c\x46\x24\x3a\x57\x40\x42\x8c\x8d\xdc\x03\x17\x93\xa0\xe7\xdc\x48\x5e\x4d\xb3\x85\xf4\x8a\x73\xde\xde\x61\x4f\x6a\xd3\x7e\x61\xe5\x01\x00\xb0\xb2\x0b\xb7\xa3\x56\x26\x10\x61\x5a\x3e\x45\xd1\x95\x55\xf7\x6d\x4a\xde\xb0\xa6\x74\x75\xa6\x2d\x0b\xfa\x88\x97\xde\xf0\xba\xcf\xc2\xe6\x9d\x22\x1b\x15\x4d\xfa\xaa\xc7\xf8\x92\x07\xfe\x56\x57\xe5\xde\x20\xc3\x36\x07\xe4\xbc\xc2\x20\xd5\x17\x9e\x6f\x47\x9d\xc0\x1f\xf8\x6d\x3d\xe6\x5d\x36\xe7\xf1\x11\xc3\x09\x1d\xdf\x47\x03\xa1\x14\x8c\xf1\x7a\x12\x26\xc4\x39\x11\x8d\xc3\x02\x72\xc5\x0b\x8b\x7e\xd1\x5e\xd1\x78\x81\xf6\x28\x96\xc7\xd4\xc1\xca\x56\x42\x56\x73\x82\x37\x90\x17\xa5\x99\x13\xfb\x03\x50\xf5\xc3\x07\x9b\xd5\x51\xca\x03\xde\x49\xd0\x34\x67\xed\x05\xf0\xb2\x41\x74\x9b\x9e\xed\x34\x7b\xb3\x66\xf6\x55\xdb\x68\xae\xac\x59\xb5\xd8\xf1\xb3\x88\x89\x1a\xc7\xcd\x8e\x95\x1f\x9b\x86\x14\x74\xe9\x90\xa5\xf1\x07\xb3\xe5\x12\x8c\x67\x4b\xcb\xe0\xc6\x16\x6e\x8f\x34\x3b\x1d\xb4\xb6\x95\x3d\x21\xfe\x3b\x49\xf7\x50\x0d\x1e\x01\xcc\x39\xb3\x3c\x62\xdf\x04\xbb\x68\xa1\xd8\xc8\x2b\x1e\x2e\xd7\xfb\x54\x75\x32\x1c\x68\x46\x76\xe8\x40\xdb\x9e\x08\x96\x26\x51\xe2\xbc\x6b\x00\xb0\x44\x65\x43\xbf\xd9\x22\x82\x4f\x53\x40\xeb\xde\x67\xcc\x20\x05\x05\x7d\xf8\xd8\x73\xb8\x72\x75\x61\xbe\xa0\x2d\xd1\xc3\xee\x4c\xe8\x41\xca\x8a\xd4\x8e\x9c\x42\xd2\x3a\x83\x2d\x6b\x1f\xcc\x6a\xd3\x44\x26\x98\xa7\x00\xf6\x29\x64\x09\xda\xe3\xb3\x9c\xeb\xd5\xa6\xab\x79\x85\xec\x90\x65\x78\x53\xbe\xc4\xcf\x80\x32\x51\xca\xb6\x74\x8d\xe3\xd8\xa0\x9e\xb7\x87\x16\x0b\x8d\x93\xcb\xab\xfc\xa1\x95\x0f\xf7\x6b\x21\x3d\xfe\xc1\xf1\xa6\xa9\x65\x1c\x41\x77\x62\x4e\xc6\x02\x70\x60\xfa\x02\x2b\xdf\xea\x38\x29\x4a\x19\xd1\x0a\x48\x62\x40\x3f\x7d\x7f\x93\x3a\xb0\xfe\x3b\xa3\x88\xcc\x7a\xc1\xac\x7e\x27\xd5\x16\xc5\x52\x8c\xc7\xca\xd1\xfe\x73\x8a\x8a\xe2\xaf\xaf\x37\x73\xc2\x54\x00\xc8\xc3\x43\xe7\x10\x4c\x87\x68\xa0\x24\x57\x12\x68\x4c\xf6\xa6\x7e\x98\x81\x73\x8b\x46\x88\x20\xba\xd2\x9b\x22\x10\xe3\xee\xa3\xa2\x83\xe6\x86\x20\x7b\x90\x4d\x09\xaa\x11\x0b\x49\xe7\xb4\xb4\x52\x4b\xa7\x06\xc1\xe1\x02\x23\x9c\x97\x25\x3c\xc0\x8d\xcd\x5f\x72\x27\xcc\x02\x7b\x32\x82\x93\xb5\x46\xc1\x7a\xe1\x0b\x3e\x05\xc1\x1d\x42\x0e\x9f\x4b\x0e\x44\x41\xa2\x39\x9b\x36\xad\xf3\x0c\x1b\x9b\x15\xc4\x3f\x5b\x93\x17\xd9\x78\x43\x56\x2b\xd4\xb9\x04\x9d\xc4\xc5\x3f\xf0\x73\x3f\x35\x6a\x22\x0a\x12\xf1\xbc\x22\xd1\x6d\x8c\x46\x5f\xe1\xd8\x5d\x38\x20\x3a\xb2\xa8\x9b\x83\xff\xce\x84\xf8\x21\x78\xde\x42\xa7\x5b\x04\x3c\x3c\xdf\x71\x64\x40\x8c\xf3\x5f\xea\xb9\x53\xc8\x49\xfc\x0a\xa7\xb6\x5f\xcf\xb6\x2b\x88\x1a\xd0\xd1\xdb\xdd\xe5\x81\x7c\x8a\xaf\x7e\x54\xb2\x2d\x2d\x4a\x62\x89\x77\x53\xdc\x65\x9e\x07\x2f\x6d\x29\x9a\x5e\xab\x35\x9e\x98\x7b\x3d\x4a\x6b\x77\x8a\x04\x98\xcc\x3a\x44\xa1\x70\x6a\x21\xd8\x50\xff\x32\xb4\x51\xc6\xac\x27\x75\x00\x82\x66\x0c\x3d\x82\x84\x3b\xec\x93\x36\x5e\xc8\x88\x93\xc8\x9d\xb9\xa1\xe1\x5d\x70\xce\xf6\xd2\x51\x9b\x5a\xa8\xd6\xb7\xae\x8d\x97\xa5\x43\x65\x13\x1f\xb5\x45\x47\xce\x82\x52\xf7\x21\x71\x99\x57\x62\x81\x56\x1f\x6e\x16\x57\xce\x2b\x51\xae\x43\x95\x0e\x2c\x5f\x11\x74\xa0\x05\xf3\x03\x7e\xa3\xbf\x0e\x25\x32\xe1\xf9\x43\xa8\x21\x2d\x17\xba\xd8\x49\xd7\x81\x67\x3a\x48\x04\x0d\xaa\x54\x33\x26\x2a\xae\x61\xa9\x1e\x1b\xfb\x14\x9c\x49\x1c\xac\x37\x12\x2b\x0f\x09\x35\x74\x96\x6c\xdc\x5a\xa3\x23\x8f\xa6\xad\xd8\xe7\x4d\x70\x89\xdd\x63\xea\x5b\x40\xb6\x86\xd2\xfd\x29\x2c\xe2\x55\x36\x46\xf7\xa3\x84\x06\x2c\x54\xb6\xd5\x10\xa4\xec\x80\x69\xec\xb3\x73\x61\xb0\x68\x1b\xa9\x3f\xa2\x8d\xf2\xf6\x9a\x57\x9a\xeb\x8d\x34\x40\x54\xfe\x0a\x3f\x7e\x8a\x7b\xf0\x56\xc7\x6a\xee\xc7\xa5\x15\x08\x98\x1c\x71\x69\x26\xc9\xbe\x2c\xbf\xb1\x8d\x3c\xee\xf0\xd8\xd1\xf2\x56\x24\x97\x02\x96\x86\x02\x88\x7d\x54\xd7\x2b\xca\x86\x03\x6f\x6d\x94\xa0\xdd\x4d\x73\x91\x08\xb0\xa0\x19\xfe\xca\xfa\xed\xcf\x0f\xd0\x0f\x0b\x39\xb0\x6b\x1e\x2d\xca\xbc\x55\xf5\x29\x6d\xdc\x95\x8e\xa4\x4f\x75\x0a\x39\x1d\xdd\xdb\xe0\x14\x9b\xf7\x16\x73\xca\x54\xba\x49\xe6\x3f\xae\xa4\x82\x2e\x76\x02\x74\x26\xee\x28\xa6\xca\xc0\x39\x4f\x27\xb4\x6e\x2c\x6a\xb2\x45\x61\x60\x38\x15\xe1\x16\x43\x11\x99\xb4\x6b\xb4\x2d\x08\x4c\xa9\x9e\x1d\xd1\xd0\x77\x7f\xff\x17\x8a\xa2\xd3\x05\xe9\x31\x1e\x54\xdc\xbb\xc2\x1e\xe6\xad\xfe\x7e\x64\x95\x54\x6a\xb1\xdc\xb8\x35\x90\x4f\x61\x51\xdf\x4c\xc4\x6b\xb7\xb7\x72\xc7\x46\x2e\xfd\xe1\x95\x09\x55\x39\xc8\x57\x62\x58\xe5\x94\x03\x7d\x4c\xbe\x0b\x44\x85\x4c\xc5\x9f\x88\x57\x1b\x62\xbc\x65\x31\x1a\x9c\xfb\x13\x4a\x64\xe2\x80\x14\x42\xd3\x54\x7d\x68\xd0\x61\xb0\x1a\x8b\xf8\xde\xe7\x12\x3c\x25\x05\x0c\x68\x40\x65\xf6\xb5\xd1\xee\xce\x6a\xe4\xbc\x23\xc0\xbc\x85\xe1\xc4\x07\xa0\x94\xa5\xde\x5e\xaf\xed\x32\xaf\x18\x53\x84\x74\x0c\x7b\xe2\xd6\x45\x51\x48\x00\x4a\xe3\x20\xab\x8d\x78\x52\xbd\xae\x17\xd6\x14\xdd\xe9\x8e\x6c\x36\x53\x89\xab\xce\x78\x1a\x66\x9f\xc4\x8b\xab\xab\xd3\xad\x85\x3b\x6b\x55\x11\x3b\x69\x82\xa4\x1e\xb4\x3f\xc1\x66\x82\x85\xd5\xfb\x85\x87\xf6\xdc\x59\x26\xa4\x57\x73\x0a\xd8\x05\x30\x36\x08\x46\xf0\x71\x30\xe9\x1f\x5a\x2e\xb6\xe8\xc9\x42\x4b\x87\x6a\x39\x45\xd8\x13\x78\x0b\xbd\xaf\x3f\x2d\xfd\x53\xe9\x03\x0f\x4c\xbc\x73\x0b\x6d\xbf\x57\xa9\x71\xee\x47\x02\xff\xcc\xc2\x1b\xac\xe6\x14\x5f\x2c\xb9\x6a\xd4\xdb\x45\xeb\x74\xe7\x72\xa8\xe2\x3e\xd5\x0f\xfe\x74\xa5\x03\x25\xcb\xde\x06\x45\x6b\x31\xf1\xf2\xb3\xff\x74\x62\x17\xd0\x5f\x87\x33\xd3\xd4\xd7\xb1\x32\xb3\x60\x0c\x1e\x5d\xb0\x08\x68\xf7\x8f\x6d\x46\x3a\xb6\x12\x44\x3a\x7e\x58\x11\x7c\x83\xb0\xc9\x65\xba\x06\x3c\x60\x11\x45\x4f\x64\x4b\x65\x5e\x04\x12\x24\x8b\x64\x31\x3a\xfe\xf9\x2f\x9f\x30\x20\xa7\x9b\x3a\x0f\xb4\xf1\x27\x65\x34\xa3\x6e\x61\x31\x38\x44\x9b\x16\xcb\x0d\x05\xe8\xcb\xc1\xf5\x66\xae\x5b\xbc\x2f\x50\x87\x31\x63\x99\xc2\x13\x1d\x65\xd7\xb3\xb0\x9b\xff\x52\x1a\x90\x92\x84\xd7\xcb\x3c\x47\xbb\x85\x84\x59\x63\x42\x9d\xb6\x59\xe4\x34\x79\xfd\x38\x73\x95\xc4\xab\x40\x23\x02\x76\xcd\x23\x62\x4a\x16\xfc\x77\x96\x55\x81\x5d\x97\x88\x62\xb9\x34\x13\x3b\xce\xd1\x1f\xa7\x68\x85\x6c\x48\xdd\x87\x70\xec\x7e\xc8\x61\x9a\xc4\x2e\xa3\x15\x9d\x52\x27\x35\x01\x4f\x2f\xb6\x12\x38\x9f\x7a\xa9\xb6\x59\xc5\x2d\x36\xa8\xd3\x61\xf7\x59\xf6\x7c\x38\x17\x88\xe3\x7a\xbf\x0b\xe9\xfa\x7e\x9c\xaa\xea\x57\x0e\xbb\x34\x2d\x2e\x80\x76\x38\xcb\x7f\xa9\x02\xf2\x6c\xd9\xdf\x0f\x7f\xd7\x28\xb7\x9a\x3a\x4a\x23\x9d\x8b\x74\x6c\xaf\x1a\xbb\x36\xc7\x2e\x1d\xda\xe6\xc9\x93\xdf\x69\x9f\x10\xdb\x04\xe6\xa8\x56\x96\xa8\xf8\x6f\xaf\xa8\x32\x7b\xc0\xde\x5f\xdc\xcf\x49\xac\x10\xa4\xb0\x8a\x64\x5a\xe8\xa2\x7e\x63\xa6\xa2\x57\x2b\xdc\x02\x25\xb9\x47\x5c\x2a\xb6\xfa\xa4\xd3\x96\x68\x0b\xb6\xb0\x7d\x40\x99\xa6\x56\xb9\x78\x6b\xf9\xf3\xa6\x22\xbe\x87\x16\x1b\xa3\x2c\xea\xaf\x28\x8b\xa0\x2b\x74\x3e\x0f\xe1\xfc\x8e\x54\x9c\x1a\x5c\x79\xef\x22\x26\x2f\x0d\x68\xf2\xbb\x21\xe9\xe8\xf2\x62\xbd\x9d\x06\xc0\x73\x25\x9a\x33\xa7\x84\xe1\x7d\x2b\xbf\x15\xc7\x1d\xb6\xe9\x3a\xda\x1e\x9f\x0b\x61\x9c\xbd\x66\x4a\xbb\x6c\xe1\xd1\x12\x11\x2c\xea\x70\x24\xbb\xb3\xa1\xd9\x09\xc5\x2a\x80\x86\x14\x4e\xf2\xf8\xb5\x06\xe4\x36\xe8\x2a\x21\xb2\x91\xd5\x0e\xf5\x2f\xfd\x50\x54\x1f\x3c\x72\x1f\xc4\x68\xa4\xb0\xef\x18\x19\x92\xb7\x0b\xf3\x9c\xda\x6d\xd8\x5a\x74\x52\xd7\x20\xc9\x7a\xb7\x53\x73\x19\x8b\x5a\x36\xc0\x4f\xa2\x33\xde\x5e\x82\xbf\xf6\x74\x46\x52\x3f\x97\xc0\xa1\x42\xe8\x07\x04\x06\xf8\x09\xb6\xe2\x82\x6a\x26\x13\x3c\x31\xce\x93\xb6\x8f\x52\xa0\x53\xec\x51\xa7\x97\x20\x7e\x26\x97\x4e\xf5\x8d\x65\xbf\x2b\xbf\x03\x91\x5c\xe9\xfc\xc2\x1e\x27\x18\x11\xa9\xd6\x84\xe2\x1a\xdd\xa6\xed\x2a\x43\x3e\x54\x8e\xbc\x43\xf7\x08\xb3\xbe\x05\xcc\x76\x46\x57\x2f\xdc\x49\x57\xd0\x32\x4c\x13\xbd\x5a\x42\x8d\x02\x81\xca\x17\x1e\xfa\xe2\x98\x14\xe1\x87\x4e\x8e\xd9\x5a\x5b\x40\x24\x58\x53\x6e\x6a\x83\x11\xe0\x38\x8d\x41\x98\x41\xbf\xa9\x13\x23\x88\x72\xf0\xdf\xfc\x34\xca\x35\x8e\x39\x6a\x32\x51\x53\x7f\xa8\x4f\xd6\x3e\x98\xc4\xac\xb4\x3f\xc3\x72\x04\x36\xc5\xa8\xa7\xaa\x3b\xbe\x48\xea\xcd\xcc\x89\x50\x18\x34\x96\xe2\x01\xfd\x32\x6c\x4c\x66\x13\x5b\x43\xf0\x06\xe9\x62\xe7\x7b\xcf\x17\x93\xf9\x9f\xd3\xad\xe1\xc5\x8d\xfa\x22\x51\x3d\x0a\xd9\xc9\xcc\xd9\xc0\x86\xe1\x66\xb5\x1e\x2e\x8b\xf9\xa8\xd9\x1b\xb8\xf1\x76\x52\x56\x54\x8a\x52\x7f\x68\x36\x24\xf9\xe7\x4c\xe2\x99\xc6\x01\x20\xe2\x29\xd9\xe0\x6d\x3e\x71\x96\x59\x1e\xc4\x5a\x2f\x49\xd2\x71\xbd\x52\xfd\x59\x28\x49\x5a\xaa\x84\xb3\xd3\x4d\xd8\x55\xf0\x6b\x8e\xd0\xeb\xb0\x3b\x5e\x42\xbe\x35\xd4\x5e\xd0\xb2\x87\xeb\x0e\xf9\x86\x9d\x09\x02\xef\xba\x88\x8b\x01\x19\x95\xb0\x4a\xad\xce\x2a\x92\x88\x5a\x23\x64\x3e\xb6\x1c\x5a\xb2\xfb\xc8\xf8\xd3\xcb\x96\x6c\xcc\x35\xc7\xaf\x4f\x40\x17\x44\x95\x24\x04\xb7\x07\xd8\xb6\x64\x5c\x9c\x38\x7b\x0b\xcd\x52\x46\x90\x5b\x3c\x88\xa7\x92\x8b\x40\x89\xb3\x45\x5e\xde\x5c\xb2\xc8\x28\x0e\x59\xa0\x96\xed\xaf\xb4\xcb\x8e\x10\x36\x89\xf6\x30\x77\xda\xa4\xc2\x81\xc9\xe9\x2c\x0f\xcf\xe2\x98\x97\xa0\xd6\x4e\xa0\xd0\x7e\x3f\x9c\x3c\x9a\x90\x59\x52\x20\x0a\xc9\xad\x20\xb3\x4a\x3f\x76\x2e\x21\x4a\x7b\x48\x7b\x95\xe5\x7b\x5e\x84\xd5\xd2\x7e\x75\xec\x47\x66\xe9\xf9\x3f\x3e\x4b\x64\x81\x1b\x91\x45\x72\x03\x4e\x95\xbc\xa6\x41\x1c\x02\x67\xdb\xea\x2e\x29\xab\x5d\xcb\xb7\x9a\x18\xe3\xbf\x70\x16\x2e\x74\xde\x7a\x84\x0f\xd2\x68\x67\xa8\xdb\x82\xcd\xea\x4f\xf9\x02\x1d\xa1\x88\x3a\x77\xa8\x5d\x7e\x37\x57\x42\x0e\xcd\x54\xbb\x8e\x30\xc8\xc8\x4b\xf3\x87\xc2\xdd\x0f\x56\x98\xf3\xaf\xbb\x21\xb2\x6c\xa0\xbc\x69\xee\x66\xa6\xf4\x2c\x09\x45\x92\x9e\xba\x27\xb5\x24\x75\x11\xc8\x74\xd4\x26\x17\x67\xc9\xc3\x81\xee\xdc\x70\x5a\x91\x3c\x31\x88\xff\x43\x35\xfe\xee\x8c\x3c\x41\x41\x1b\xf8\x7e\x83\x58\x98\x51\x63\x82\x85\x62\xa2\x30\x47\x0f\x05\xf5\x66\xb2\x8a\xee\xc8\x89\xa2\x94\x39\x52\x8e\xc5\x99\x5f\x2c\x27\x29\xe7\xf3\xb8\x8b\x26\xf0\xa7\x9e\x6a\xfd\xbe\x13\x0f\x90\x6f\xa7\x4f\x3a\xd7\x55\x75\xef\x2d\x51\x49\x6e\x5a\x89\xef\xa3\x56\xc8\x2d\x37\x2b\x3e\x07\x33\xd9\xfb\x97\xa2\xbd\x21\x93\x6d\x76\x15\xb4\xcd\x46\x4e\x2d\x3c\x69\x7b\xb6\xb2\x2d\x48\x35\x7b\x5a\x07\x95\xb1\xb0\x40\x2f\xf6\xcc\xd1\x85\x25\x42\x5f\xc7\xd7\x61\xc2\x43\xd3\xed\xf7\xe1\x1e\xb7\x38\xbd\x66\x1f\xcf\x49\x46\x40\x57\xcc\xfe\x08\x91\xbb\xff\xce\x44\x8f\x75\x09\xb4\xac\x28\xad\xc2\x32\x56\xdd\x0d\x08\x4f\x04\xe1\x21\x0d\x20\xee\x6f\x0c\x15\xf6\x88\xc3\xc1\xea\x21\x1c\x87\xc0\x1a\xfc\x18\x6e\x70\x34\x5f\x28\xf9\x32\x6b\x5b\x61\x1a\x48\x27\x9c\x12\x63\x28\x08\xa2\x30\xf5\x60\xd9\x54\x04\x65\x5e\x11\x6c\x62\x12\x6e\xee\x5c\x9f\x3f\xa8\x89\x91\x39\x19\x9f\x2f\xc5\xb4\x49\x94\x29\xc7\xd1\xe9\x7b\xbd\xf6\x4c\xa8\x07\xc8\x66\x14\x7e\xd5\x08\x06\x5f\x86\x15\x76\xb8\x4e\xa5\x59\x51\x2f\xf0\x53\x9f\x55\x67\xc5\x42\xd6\x4e\xca\x1b\x7b\xd0\xc4\x42\x6f\x8d\xd5\xcd\xd8\xd2\x84\x9a\xdb\x04\xd3\x61\x61\x4d\xb4\x50\xf4\xe7\x26\x9c\xbb\xa3\xb1\xc9\x51\x9e\x38\xd8\x5a\xcd\x0a\xb2\x62\x49\x69\x32\xbb\xa0\x16\x5d\x38\xfa\x2d\x43\x28\xdc\xfc\x57\x94\x18\x7b\xc1\x05\x5d\xc6\xc5\x8e\xb2\xd4\xc9\x03\xdd\xd5\x5f\x85\x33\x13\x53\x42\x33\x96\x83\x50\x71\x66\x2b\xf8\x26\x25\x87\x9e\x91\x0e\x44\x0e\xed\x41\x24\x6d\x6f\x31\x49\x89\xfe\x94\x60\xff\x87\x18\xf5\x43\xb9\xff\xbb\xac\x82\x87\x1f\x59\x4d\x8e\x96\x97\x32\xed\x3b\xb2\xc2\x84\xc5\x1c\x3c\xdb\xaf\x88\x7a\x4f\x82\x9a\x54\x1d\x85\xd1\x34\x4b\x45\x3b\x00\xdf\x41\xe3\xad\x35\x24\x85\xb4\xb0\x81\xae\xb0\x47\x4c\xa8\xba\x28\xef\x2e\xcc\xa4\xe3\xee\x5a\xf9\xcc\x82\x9c\x26\x2e\x72\xb7\xc7\x24\x96\x7c\xb1\x41\xa6\xc7\x7e\x20\xe3\x22\xc2\x70\xf7\xd6\x84\xf0\xcb\x66\x60\xc9\x0e\x3b\xde\xa2\x16\xda\x77\x37\x7e\xae\xfd\x45\x2a\xd3\xa8\xf9\x88\x01\x8c\xc2\x98\xe2\x4f\x8a\x96\xf2\xac\x45\xfb\xe9\x22\x55\xbd\x47\x48\x37\xee\x35\x91\xe5\xf1\xb8\x3a\x44\x1a\xcf\x18\xa6\x20\xbd\x49\x83\x5c\x3c\x5e\x64\xaa\x5a\xd9\xcf\xbe\xd3\xe5\xad\x22\x5b\xb1\xc3\xef\x2e\x3a\x31\x53\xc5\x9b\xc4\x86\x64\x55\x14\x4b\x7b\xc9\x2e\x88\x70\x59\x2b\x33\xa6\xab\xde\x89\x72\x9a\xae\xeb\x98\x24\x25\x3b\x4d\x6f\x69\x30\x4f\x77\x7e\x45\x1e\x1a\x33\xaf\x5d\x3d\x98\xae\xe5\x7c\xb2\xf9\x7a\x40\x20\xf7\x08\x7e\xa4\x3a\xf0\xf0\x24\x70\xfa\x6c\xb0\xde\xb9\xd4\xfd\x53\x8a\x08\xfb\x67\x5e\x0f\x62\xff\x22\x57\xa0\x0d\x4d\x8d\x2d\x54\xbf\x1b\xe2\xe3\x28\x35\x51\x1e\xc9\xb5\xe1\x79\x18\xa0\x6f\xc1\x43\xc3\xce\x49\xdc\x5c\x0f\xc5\xab\xae\xda\x07\x0f\xff\xc6\x22\xda\x71\x3f\xe1\xca\x24\x76\x39\x65\x2a\x7c\xdb\x41\x08\xe8\x52\x1d\x23\x20\x0b\xb7\x45\x83\x3c\xb8\x58\x2a\x95\x92\xa4\x10\x72\x69\x35\x23\x11\x77\xfd\x32\x88\x73\x36\x4a\xe6\xfb\xff\xe4\x7f\xcf\x2e\x26\x26\x31\x1a\xdb\x6c\xe6\x30\xe4\xf8\x8f\x04\x69\xdc\x10\x2b\xef\xc5\x92\x22\x2e\xbb\xb3\x61\x95\x3d\xd8\x55\x6d\x28\xab\x52\x8c\x4b\x50\xd1\xf2\xc1\x18\x6c\xb1\xb9\xcf\xf3\x0f\x87\x4a\x3a\xcf\xa5\x27\x1b\xd7\xc5\xc1\xb5\x5b\x8e\x88\xeb\x46\xcd\x88\x80\x7b\x26\xb3\x90\x81\x11\x96\xaa\xf2\x8d\x9c\xb6\x3c\x91\x52\xea\x81\x1e\x0b\x2c\x14\xe4\xa0\x78\xe5\x8d\x14\x11\xe5\x7b\x54\xbe\xa5\x75\x28\x7d\xf0\x64\xf3\x9d\x4e\x61\xaf\x8c\xbd\x00\xb2\xc2\x57\x31\x73\x5c\x1a\x42\xf7\x41\x88\x82\x24\x4d\xa7\xf5\x1d\xfa\x39\x13\xd1\x0f\x74\x84\x32\x0d\x20\xff\x42\x3d\x20\x6a\xea\xb2\xdf\x7b\xb6\x38\xdb\x35\x7d\x18\xa8\x4c\x74\xb2\x66\xd5\xbb\x16\xfa\x8a\xcf\x5c\x24\x15\x94\x86\x1a\x05\xab\xcf\x9f\x10\x19\x8c\x8a\xb1\x4a\x7b\x61\xf8\xb4\x5d\xd9\xd2\x03\x1c\x5c\xf3\x6f\xa1\xc2\x87\xec\x46\x38\xc5\x1a\xa4\x7a\x04\xe5\x28\x5c\x06\x9e\x63\x7e\x65\x52\x23\x22\x5a\xb3\x14\x68\x65\x32\x62\x9d\x5a\x48\xe8\xf0\xa0\x36\x26\x59\x60\x52\x12\x9e\x97\x12\xf5\x8a\x70\x53\x68\x6a\xc0\xe5\x3f\xaf\xec\x85\x74\xf4\xa0\xb7\x22\x70\x3c\x59\x35\x87\xbe\x80\x07\x3b\x8a\x14\xee\x8d\x17\xab\xa4\x96\xbf\x28\x84\x36\xba\x4e\x5d\x07\x80\x1d\xc2\x11\x6b\x00\x92\x97\x64\x9a\xb3\xb4\x45\xa4\xa0\xb6\xc7\x07\x12\x5f\x60\x7b\xbe\x4a\xa5\x48\xa0\xc8\x9f\x0d\x48\x45\x4a\xb5\xf3\x55\x08\xdb\x43\x88\xd7\xc1\x2c\x44\x83\xca\xad\xb0\x5f\x7f\x5d\x02\x00\x07\x3d\x8a\x07\x2b\xf1\x22\x36\x26\xba\xe0\x86\x48\xcb\x93\xb4\x66\x83\xc2\x60\xbf\x96\x81\xbb\xfb\x1d\x26\x41\xaf\xdc\xdb\x5d\xa2\xc1\x75\x58\x82\xa2\x06\xe6\x84\x35\xce\xc5\x4e\x10\x6f\x0e\x3b\xda\x1b\xd8\xe6\x18\x12\x03\xa6\x43\x39\x89\x77\x1f\x52\xa0\x22\xe4\xe4\xf9\x18\x20\x40\x85\xe6\x42\xf0\xa3\xd9\xc0\x52\x33\x05\x54\x49\xad\xc1\x67\x58\xc6\x7d\xc4\xc3\xec\x3c\x43\x3d\x19\x14\x5e\xf0\xbc\x05\xcb\x36\x6c\x64\x3b\xee\x30\x0b\x64\xc2\x67\x6f\x65\x67\x60\xfd\x0a\xaa\xcf\x21\xc9\xa5\x83\x89\xe1\xde\x34\x8a\xc8\xb4\x24\x35\xaf\xd9\x43\x21\xca\x34\x56\xce\x2f\xfe\xa3\x72\x07\x7b\x48\xcf\x46\x60\x8e\x56\x0b\xbb\x09\x5c\xa5\x7b\x13\x5b\xcb\xbf\x3a\xa4\x87\xfa\x6b\xfa\xea\xbd\xe9\xaa\x56\xaf\xd1\x69\xe5\x11\x38\xbd\x28\x6c\x2a\xe3\xb4\x2d\x04\xd8\x1a\xd9\x97\xd1\x34\xe3\x77\xd1\x98\x7c\x3f\x1a\x95\xda\xb3\x77\x3a\xe5\x70\xe0\x5f\xea\x84\x04\xad\xf7\x5b\xab\x62\x33\x98\x60\x40\x63\xa7\x4a\xdc\xd7\x14\x42\xa6\x76\xf0\xe1\xc0\x68\x20\x3b\x17\x2f\x54\xc7\x04\x13\xab\x95\x15\x67\x4f\x42\xe5\xfa\x08\xc5\x90\xd0\x72\x93\xf4\xd0\xff\x01\x6b\xdd\xd4\x8a\xc4\x0e\x63\x8a\x97\x32\x13\xbe\x9b\x93\x02\x0e\x37\x21\x4f\x0a\x0a\xd0\xd2\x9a\xa1\xec\x52\xad\x5f\x12\xaa\xd8\x0b\x73\xa3\x89\x6d\x84\xf1\x34\x99\x9b\x8f\x36\x23\x24\xe3\x0c\xcf\xa7\xe0\x3e\xd8\xcd\xbe\x78\x92\x60\x75\x6b\xa0\x88\xad\x63\xfd\x50\xcb\xc7\x1c\x54\x54\x4a\xf0\xea\x5e\x23\x21\xf9\xc8\x23\xab\xbe\x50\x8a\x63\x25\xb8\xff\xa3\xd8\xc2\xcb\x04\x22\x94\xf7\x81\xc0\x0c\x9b\xbf\x8b\xa3\x93\xa1\x12\xc5\x23\x6f\x7e\xe8\x3f\x16\x70\xb7\x56\xcf\x6a\xcc\x03\x65\xd8\x99\xb8\x06\x42\x38\x4f\x9a\x50\x99\x4b\xd4\xce\x5f\x52\xf8\xfe\xe5\xbc\xa9\xad\x99\xa7\x42\x2c\x05\x4a\x43\x08\x7b\x2d\x82\xfc\xc8\x7e\xa4\x2d\xdb\x43\xc8\x80\x12\x61\xd1\xcf\xf4\x4a\x60\x52\x88\x0b\x81\x6b\x18\x91\xbb\xd4\x49\x78\x43\xd9\x36\x4c\x93\x7b\x94\xbf\x92\x4e\x9f\xd4\x8d\xd0\x69\xb2\xc5\x46\xce\x53\x28\x4a\x01\xfa\xfa\xcd\xfd\x79\x04\xa1\x7a\x11\x7f\xed\xa8\xe1\x2c\x85\xa5\x25\xb5\x3e\x8e\x69\xeb\xc2\xed\x3b\x5e\x43\xc7\x8b\xd4\x87\x6d\x23\xba\xa3\xa4\x6c\x49\x6e\x5b\x78\x2c\x79\x72\xfe\x3e\x27\xc3\x80\xb9\x86\xe2\xd7\x6c\x29\x52\xe0\x6d\x35\x93\x8a\x18\x85\x21\x28\x2f\xe3\xab\x4f\x28\x02\x89\xab\x12\x6d\xdc\x94\x98\xe7\xdb\x04\xe7\xcd\x35\x90\x83\xdb\x63\x0e\x41\xb7\x1f\x6a\x2d\x2a\x27\x7c\x1a\xd4\x5e\xf6\x56\xe7\x52\x55\x4f\x35\x7a\x2f\x35\x47\xcf\xc9\x40\x4b\x62\x77\xa5\x92\x15\xc6\x29\x7c\x45\x24\x68\x50\x1e\x35\x02\x45\x52\xd1\x5e\x62\x50\x67\x4b\x76\xde\xfb\xa6\x25\x39\x32\x3e\xd0\x36\xc3\xd5\xa9\xfe\x34\x23\x03\xcf\xb2\x89\x80\xba\x30\x82\xe4\xc0\x0e\x42\x2f\xfd\x33\xb2\xe1\x43\xd1\x9d\x46\xf1\x49\x9d\x91\x74\x31\xe6\x1e\x6b\xd9\x29\x7e\x02\x0d\x73\xe1\x2f\x0b\xe9\x20\x76\xb0\x37\xbe\xe0\x7a\x95\x96\x78\xe9\xd8\x00\xca\x68\x5d\x34\x59\x33\x06\x21\xa5\xb4\x65\xd7\x08\x14\x18\xb0\x49\xdd\xd3\x23\xc1\x18\x4c\xb4\x9c\x31\x9a\x60\xca\x6c\xfb\xf0\xaa\x43\x50\x4c\xfb\xcb\x3b\x10\x87\xeb\x32\xb3\x71\x0c\xe5\xc2\x99\xfe\x85\x90\xc0\x28\x29\x67\x1e\x5e\x8b\xa6\x49\x05\x2b\x33\x9f\x5a\x45\x1a\x9a\x8b\x22\xc6\xbc\x8b\xe5\xf3\x58\x17\xa6\xeb\x24\x5e\x5c\x45\x10\xb6\x82\xc4\xaf\x76\xb0\x47\xb0\x4b\xa8\x10\xc3\x09\xf1\x96\x68\xda\x90\x4a\xac\x38\xff\x27\x41\x27\x08\xb5\xed\xd8\x6f\x46\x11\x1e\x7f\x45\x8b\xbd\x1d\x33\xd5\x2e\xe2\x25\x59\x62\x9c\x14\x5f\x76\x1e\x4a\x44\xed\xfa\x14\xca\x2e\xc9\x16\xe1\x38\xd3\x57\x39\x15\xb9\x9d\xb3\xa8\x19\x43\x4e\x93\x04\x40\xc6\x68\x52\xdc\xa2\x72\x94\x3e\xe4\x39\x8c\x41\x02\x83\x05\x46\x25\x31\x34\x96\xa8\x91\xac\x92\xd8\x2d\x4a\x9a\x6e\x0e\x2e\x70\xc2\x4f\x1d\x7e\x2b\x3a\x1b\x2e\x8e\x96\x7c\x59\x08\xf6\x65\x8d\x20\x09\x11\x6e\x3d\xe0\x3b\x82\x65\x92\x9a\xcd\x85\x3a\x72\xc9\xda\x08\xa6\x7b\x6c\x22\xfe\x59\xca\xe9\xec\x1b\x8d\x46\x58\xc4\x00\x71\x82\xba\xc2\xbe\x31\x54\xb1\x9b\xe0\x0b\xa3\xad\xad\x15\x79\x0f\xc5\x34\x2e\x36\xad\x54\xd7\x6b\xad\x8c\xba\xc0\x26\x50\x09\xe3\xc8\xdc\xa4\x05\xe3\x6e\x1e\x31\xf9\xe7\x47\xc5\x98\x3b\xed\xa2\xf9\x20\x09\x7e\xf5\xad\x6e\x1d\x43\x2f\x58\x7d\x20\x3e\xa5\xb1\x77\x22\xa5\x47\x91\xee\xa6\x3a\xbf\xaf\xc9\x7f\x67\x55\x75\xca\xcc\x44\xff\x91\xfa\x08\x55\x50\xbe\x98\x7d\xac\xad\xbd\x5a\x14\x11\x55\x24\x35\x49\x0a\xb6\x1b\x94\x36\x23\xc4\x93\x1b\x82\x32\xb1\x48\xd6\x85\x71\x00\x4a\x6c\x98\x50\x72\x34\xfe\xa4\x39\x1e\x75\xda\x51\x7b\x62\x68\xef\xa0\x1b\xbf\xb4\xd0\xda\xc9\x42\x4d\x4e\x93\x1a\xe2\xa1\x34\x93\x25\x62\xe3\x8d\x81\xbf\xb7\x86\x7e\xde\x22\x09\x67\x08\x57\xa5\x03\x65\x1b\x10\x4a\xdd\xa9\x17\x11\x16\xbf\xeb\xc7\xfd\x7b\x9f\x54\xed\x78\x95\xd6\x4a\x4b\x68\x45\xba\xef\xfe\xe1\x6d\x8a\x50\x6f\x07\x50\x64\x2a\xb3\x98\x78\x43\x9a\xd3\x80\x2b\x1c\xba\x10\xf5\xbd\xe5\xb4\x3f\xa9\x0b\x79\xf7\xa2\x2a\xe1\xe0\xdf\x46\xd7\x9f\x53\x93\x07\xe6\x83\x80\x6b\x0c\xa0\xaf\x0e\xb2\x26\x94\x9d\x56\x5c\x84\xfb\xa5\xb8\x23\x92\x7d\xe2\x06\xed\xdf\xbb\x4b\x91\xde\x89\x87\x0c\xa6\x2d\x76\xd4\xec\x2c\xbd\x20\x24\x0f\x9b\x16\x5c\x62\x92\x83\xa9\x6e\xcc\x03\x34\x70\x68\x43\x1f\x5c\x9a\x54\x0e\x92\x0d\x54\xc7\x09\x74\xb9\xb0\xe0\xb5\x69\x20\x79\x67\x1e\x4b\x1f\x16\xbd\xb0\x3f\xa2\x78\x1d\xb8\x55\xbe\xdc\xbe\xa6\xa6\x0a\xe2\xfd\x06\x3a\x15\x3e\xee\x4c\x79\x4e\x7a\x6d\x95\xb0\x07\x05\x24\xd4\x10\xdb\x9a\xc0\x54\x79\x6e\x3a\x27\x4d\x4c\x10\x89\xf2\xa8\xe2\xc5\xb2\x07\x93\xfa\x20\x28\xb8\x06\x8d\xaf\xfe\x7e\x1e\x70\x6d\x3e\xaa\x90\xe3\x75\x5c\xe8\x8c\xdd\x05\x85\x65\x64\x86\x8a\x44\xdb\xda\x89\x13\xbb\x62\x59\x2c\x76\xee\x23\x56\x05\x79\x80\xe0\xf3\x97\x68\x9a\xbf\x08\x51\x4f\x06\xbf\x78\x80\x09\x3d\x20\x76\xa0\xd3\x62\xb9\x59\xb4\xc0\x69\x9a\x4d\x65\xfd\xb5\xfe\x59\x32\x7f\xf9\x7c\x8a\xe6\xe4\xb1\xdb\x19\x59\x33\xe2\xa1\xa0\x82\xc3\xa1\x7c\xcc\xa6\x92\x2f\x4b\x0f\x71\x91\xc0\xef\x32\x1d\x90\x55\xfd\x42\x8f\xba\x14\xf2\xc2\x39\x45\x70\x9b\x3a\x62\x1d\xe6\xd0\x65\x2d\xdf\x0d\xe0\x15\x5d\xe3\xce\xc5\xda\x72\x50\x68\x57\x4e\xb7\xec\x4e\xea\x90\x27\x94\xe2\x30\xc0\x91\x1a\x36\xd4\xd9\x8d\xb3\x1b\x11\xdd\x7c\xb5\x00\xf7\xb2\x65\x29\xc4\x7f\x49\xe0\x29\xad\xaf\xdd\xc5\x1e\xa4\x97\xfe\xab\xba\x75\xb6\xeb\x6a\xee\xb7\x9d\xa0\x41\xed\x17\xd0\x1f\x90\xb5\x71\x2a\x0d\x0f\xbc\x57\x9b\x95\xa5\x77\xb5\x52\x1f\xfa\xcd\xda\xee\x2e\x48\xb7\x77\x4d\x50\xa5\x9c\xf6\xbc\x93\xfa\x5c\x24\x3a\x96\x96\xd1\x2b\x1c\xe2\x7e\x5d\x38\x6c\x28\xe7\x42\x36\x19\x27\x04\xcd\x4a\xda\x19\x8e\x50\x76\x98\xbd\x0a\x6f\x15\x8b\x75\xf7\x36\x96\x9d\xda\xe4\x8d\x55\xf3\xa0\x6f\xc1\xc4\xc5\x38\x16\x98\x36\xc4\xd2\xb3\x9c\xab\x8b\x0f\xd3\x47\xec\x4f\xce\x40\xb9\xd4\x4e\x07\x56\x8a\x86\x96\xe5\x31\x2b\x38\x97\x2f\xa1\xa1\xdd\x2b\x77\x33\xb1\x56\x4a\xf8\x2d\xbd\x98\x2c\xb3\x07\x7e\x79\x52\x95\x76\xdb\x55\x90\xf0\xcb\xa9\x03\x04\x0e\x33\x0f\x0a\xe1\xae\x63\x83\x4e\xb4\x3c\x51\x1b\x63\x18\xdd\xf8\x2e\x72\x7e\x4a\xe5\x5c\x75\x4c\xc4\x7d\x61\x56\x4f\xdd\x7b\x81\x37\xd3\xf9\x54\x5f\x47\x52\x71\xa6\x8b\xf9\x48\x80\x09\x23\xea\x01\xfe\xce\xb7\x99\x6a\x12\x6a\x43\x08\x9f\xda\xc2\x64\x08\xdc\x69\x5e\x21\x08\x14\xdc\x94\xf5\x94\xd1\x9d\x4a\xf7\xe5\xc1\xcd\x6a\xac\x2e\xca\x85\xbf\x4c\x6d\x0a\xae\x84\xab\xd4\xc2\x62\x01\x1c\xbb\x37\x82\x2a\x2f\xd9\x62\x75\xcc\x2c\x12\xbf\x93\xba\x97\x9c\x8a\xfd\xfa\x00\xe0\x43\x41\x60\xd8\x24\x1c\x0b\x89\x09\xde\xc5\xdc\xc6\x6a\xf9\x99\x25\x32\xe9\x1d\xde\x61\x0c\x43\x0d\x7f\x6f\x83\x69\xd4\x04\x5d\x64\xce\x9c\x9d\x12\xe5\xc7\x36\xd9\x66\x0b\xef\xd6\x9f\x7f\x79\x1e\x74\xe3\x2d\x48\x4e\xca\xc9\xfb\x88\x5d\x64\x91\xd4\x28\xd2\xab\x64\xa8\x59\x38\xbc\x89\x76\xb7\xd8\x0d\xc3\xe6\x27\xf0\x6c\x82\x8b\xa5\x1a\xa7\x37\xef\xb7\xb4\x19\xb0\xbd\xea\x7f\x54\xbf\x77\x1d\x59\x78\x84\x81\x87\xd7\x7b\x9c\x68\xf2\x04\xdb\xbb\x78\xba\x54\xaa\x38\x7e\xe6\xb1\x91\xae\x70\x8f\x30\xce\x1c\x26\x60\x85\x83\x4d\x6d\x0e\x70\x6f\x34\x65\xd0\xd7\xc0\x53\x48\x6d\xfa\xa3\x85\x69\xb0\x1d\xef\x50\x7f\x46\xb3\x9f\x25\xa7\x7a\x54\x7b\x4b\xa7\xa4\x08\x6f\xb0\xbc\x68\x2b\x9c\xaa\x88\x38\xbd\xe9\xbf\xaa\x48\x16\xb7\x12\x2a\x46\x41\xba\x5d\xf3\xd5\x01\x74\xa3\x2e\xe3\xd5\xbc\xfe\x93\xa2\xdd\x17\xc3\x6f\x14\x59\x46\x93\xcb\xe8\x24\xef\x2a\xbc\xfa\x1e\x53\x28\xcc\xc8\xb7\x36\xe2\x4d\xe0\x70\xba\x8b\xf0\xec\xe2\x80\x38\x30\x2d\x74\xf2\xb2\x58\xd5\x98\x93\x98\xb8\xe0\x7f\x32\x10\x75\x90\xb8\x69\xc4\xaf\xa4\x8d\xb8\xed\x3f\x4a\x0c\xad\x6c\x3a\x51\xef\x00\x8f\x2c\x10\xfc\xef\xa4\xa1\xda\x3f\x73\x57\xc5\xc6\x96\x6a\xc2\x9a\x34\x50\xcc\x2d\x31\xa5\x1b\x65\xc2\x52\x04\x61\x54\x78\x9e\xa3\x94\xf5\xe1\x08\x82\xa3\x29\xba\xef\x92\xf1\xd9\x13\xb5\x4b\x37\xa9\x77\x78\xf5\x97\xa6\xc4\x7c\x6c\x63\x90\xb2\xa9\x60\x0c\x4e\x9d\xe7\x80\x9e\x01\xd2\xb6\x02\x29\x9d\xce\xfe\xa3\x94\x21\x4d\x02\x9b\xae\xb8\xa4\xf3\xbc\x43\x7c\xc1\x16\xbd\xde\x38\xaa\x47\xb8\xc0\xf9\x78\x68\x90\x5a\x15\x75\x7d\x17\xfd\xeb\x6b\x05\x6f\x68\xd6\xc7\x8f\x7c\x82\x6c\x2e\x09\x0a\x68\x05\xf3\x79\x38\xd5\x95\x91\x05\xe5\x07\x1a\x1f\x76\xd7\x5f\x1e\xa2\x96\xbc\x23\x8a\x06\x83\x86\x3f\xbe\x5f\x26\x1b\x71\x1a\x3e\xe8\xbf\xd3\x1b\x86\x8b\x1a\x1f\x1e\xb7\x7f\xc4\xcd\xe4\x2f\xff\x25\x22\x36\x39\xb6\x9b\xc2\xe4\xa5\xb4\xb4\x9d\x17\x16\x5e\xcd\xf9\xc9\xaa\xd9\xc5\x0c\xa0\x89\x24\x01\xfc\x50\x73\xd5\x76\xfc\xd0\xcc\x74\x0b\xf5\x15\x95\xc4\xef\xed\x61\xfb\xb4\xdd\x79\x4c\x9c\x24\xc1\x66\xa0\xee\x32\x09\x6c\xb5\xd7\x89\x98\x04\x2f\x4d\x0d\x8d\xf9\x49\x72\x01\x16\x06\x37\xec\x1c\x18\xc6\xd7\xe4\x4f\x94\xaf\xd4\x5d\x97\x1d\xfe\x56\x3c\x33\xf5\xac\x10\x99\xf9\xde\x35\x7e\xd1\xce\x7f\xc8\x6e\x86\x4c\x1f\x46\x6b\x84\xb6\x48\x21\x14\x19\xa0\x1e\x8f\x8e\x86\xe8\xf5\x6c\x23\xdb\x62\xa5\xbe\xa9\x32\xe9\x0d\xc9\x06\x0d\xe3\xa7\x4b\xa4\xe4\x22\x23\x99\xfd\xa4\x82\x67\x6b\xdd\x38\x54\x64\xa2\x04\x94\xe6\x25\xad\xda\x40\x59\x2d\xbb\xf8\x1d\x23\xcf\xe8\xf9\xe5\xf3\x38\xdd\x58\xc8\x04\xa3\xb0\x08\x3e\xf1\x08\x90\xeb\xe4\x1d\x70\x25\x09\x5d\x2c\x3b\xfe\x3b\x9b\x4f\x51\x8f\x60\xce\x4e\x5d\x22\x5b\x62\xf5\xca\x59\x48\x94\x42\xfb\x7e\x91\x6c\x96\xf7\x45\x4b\xb2\xb4\xf6\xd1\xb6\xed\x6a\x44\xbc\x04\x2f\x26\x89\xce\xea\x7b\xe8\xd5\x2e\xbd\x16\x1e\x2d\xd2\xf4\xfd\xa3\xc8\x70\x44\xfe\x12\x2c\x48\xc1\x38\x7e\xe2\xa3\xe7\xde\x52\x27\xc1\x2a\x28\x95\xee\xd1\x2c\xc7\x2a\x1c\x31\x46\xa7\xd8\xe8\xa9\xca\xde\x86\x8d\x85\x3a\x1f\x41\x0a\x5f\x47\xc6\x54\x0f\xc9\x07\x6f\x6a\xad\x53\x2f\xce\x57\xc2\x8c\x75\x98\xc9\x47\xb0\x2b\xb9\xcb\x3e\x69\xbe\x41\x5a\xaa\xcf\xf0\xe5\x4e\x0f\xaa\x70\xdb\xa1\x55\x61\xd2\xe7\x96\x70\x35\x1e\xad\xc8\x69\x17\x97\xb8\x01\x05\x29\xf8\x35\xe5\x5b\x60\x97\x02\x07\xfc\x02\xe1\xd1\xd9\xa3\x9b\xe9\x7c\x27\x31\x0e\x42\x33\xc9\x05\x68\x39\xac\xb8\xbc\xfc\x47\x53\xcc\x55\xab\x6a\xb2\xe2\x8c\x1b\x8b\xbb\x73\x3a\x6f\x82\xbd\x63\x7a\xad\x48\x79\x89\x48\x8e\xd3\x2c\x06\xe1\x7c\x14\xd6\xb2\x5f\xa2\x64\xda\x22\x5f\xcc\x28\xaf\x95\xa7\x31\xc2\x6d\x22\x00\xde\x24\x1f\x36\xcf\xac\x7b\x58\x19\x0b\xbf\x81\x29\xc5\xee\xea\x7e\xcc\xdd\x45\xdf\x76\xf5\xd8\x4e\x83\x7a\x91\x42\xbc\x52\xcc\xc1\xd9\xb2\x1c\x49\x2b\xcd\x98\xb5\x4d\xaa\xb9\x38\x1f\xee\x86\x6a\xef\xe1\xb2\x74\x2b\xca\xb0\xe9\xd3\xb5\x78\xe7\x1f\x99\x2e\x9f\xee\x09\x75\xe0\x4b\xf0\x26\x99\x09\xa2\x71\x70\x10\x86\xfe\xd5\x11\xd4\xab\x47\xd2\x17\xa1\x0b\x40\x2b\x6d\xcf\x28\x0a\xba\xb2\x89\x4a\x17\x49\xfd\x40\xfb\x11\x4a\x5b\xe5\x57\x43\xb5\x55\x47\xf5\x01\x2e\x53\xd6\x04\x19\x6a\x96\x2a\x4a\x90\x9e\xa5\xd9\x4f\x4d\x67\xf0\x0b\x91\x85\xd5\x5a\x66\xd3\xe9\xec\xd6\xab\x7d\x02\x83\x99\xed\x53\x6a\x8c\x40\xd4\x18\x3c\x68\x90\xf3\xa8\x99\x23\xde\x22\xc2\x3a\x02\xe1\x84\xe9\xde\x9a\x96\xe4\xcc\x9c\x11\x24\x2e\x6c\xfd\x03\xe1\xa4\x22\x5e\x0e\x38\x9f\xce\x6e\xd7\x19\x46\x85\x20\xc4\xc3\x97\x50\x97\x04\xae\xad\x25\x79\x5c\x0c\x0f\xc6\xf2\xfe\x09\xb6\x8d\xea\xef\x83\xfd\xe6\x82\xb9\x89\x52\x3b\xa1\x25\x39\x83\xb8\xcd\xec\x8e\x08\xe4\x8a\x36\x04\x44\x23\xd9\xd3\xfb\xc4\xce\x54\xb3\xdd\xf0\x23\x7d\xd0\x6b\x45\x85\xbe\x11\x9f\x9f\xb9\xf0\x48\xcf\x98\x91\x0d\x27\x8b\x8c\x13\x23\x72\x8c\x7b\x0c\x1a\x97\x93\x09\x9a\x1e\x0d\x75\xba\xf6\x7f\x4e\x94\xdc\x2f\x2d\x01\xd1\xd8\x1f\xcd\xc3\x53\x96\x1d\x55\x1f\x8b\xc4\x34\xb4\x84\x3b\xd7\x5a\xb6\xfd\x2f\xb7\xab\x46\x7d\x63\xc4\xf1\x6f\x54\xc9\xb8\x60\x78\x52\xbc\x53\x53\x2c\xbf\x36\x13\x1b\x86\x68\xfb\x1a\x0b\x14\x3d\x5f\x20\xba\xb4\xfe\xb4\x90\x2a\x88\x49\xbd\x5a\xe8\x92\xef\x1a\x5c\xee\x6c\x23\x86\x44\xf6\x40\x0c\x60\x88\x49\x6f\x6a\xb1\xdc\xc2\xc7\xac\xe5\xc1\xa8\x9a\xd8\xa7\x6d\x40\x08\xcb\x33\xbd\x63\x45\x07\xcd\x13\xb7\xff\x49\xb5\x1a\xd9\x2c\x14\xde\x15\x14\x7a\xab\x12\xad\xd5\x67\x55\xbd\x74\x85\x71\xde\x38\xe5\xc6\xf3\x34\xea\xfb\x34\x4e\xc7\x2c\x2b\x13\x44\xc3\x62\x51\x05\xd6\xcd\x91\x78\x16\x7e\x9a\x5e\x09\x6f\xb1\xd4\xcb\x43\x8b\x3e\xcd\x51\x14\x4f\xd3\x81\x4d\x47\x73\x23\xd7\x53\x7a\xa1\x4c\x5d\xca\xa2\x19\x6d\xde\xbc\xcb\x2d\x91\xf4\x50\x6f\x81\x25\x83\x9f\x43\xc2\x80\xec\xad\xc3\x6f\xda\x9f\x92\x9a\x51\xa1\x3e\xda\x0b\xe9\xe2\x28\xc4\x2e\x5c\x6e\x54\x05\x13\x43\xbc\x98\xd5\x06\x0f\xfb\xef\xdf\x8f\x36\x8a\x23\x15\x38\xf7\xb4\x0b\x3e\xb2\x0f\xe4\xda\x55\xd0\x03\xc9\x3d\xd8\x6d\x9b\x86\x87\x6d\x79\x84\x85\xa6\x38\xdf\x75\x66\x89\x4d\xf8\xe8\x1f\x66\xa3\x53\xe6\x0b\x10\xb0\x53\xe2\xf7\x25\xd5\x8f\x4a\x7c\x50\x14\x2c\x98\x68\x87\xcf\x5c\xe9\x9d\x3c\x21\xd4\xe3\x84\x2d\x34\x6f\xf0\x64\xa5\x83\x60\x99\xc3\x42\x1c\x35\x89\x34\x31\xba\x7f\x42\x44\xd4\x8b\x01\x73\x54\x30\x1c\x8a\x1b\xb8\x67\xa3\xba\x14\x58\xc3\x13\x76\x90\x34\x35\x3d\xa2\x0a\x9d\xf2\x45\x23\xc2\xa6\x13\x73\x05\x3a\x75\xa6\x1c\xe4\x5c\xa0\xa2\x26\xba\x1e\x29\x77\xa7\xa4\xf1\x49\x29\xf6\x82\xcb\x57\xf1\x51\x6a\xaa\x78\xcc\x91\x75\xe1\xbb\xf3\x7c\x02\x43\x76\x7e\x61\x2f\x3f\x1c\xcd\xac\x1e\xf2\xf3\xd4\xb7\xc1\x75\x7c\x22\x11\x33\x16\xf1\x3f\xbd\x6b\x1b\x12\xa6\x81\x56\xd9\xb2\x23\x6e\xa1\x4c\x4e\x6b\x22\x0f\x72\xa3\x10\x41\x9c\xb1\x72\xe8\x9e\x5e\xd6\xc8\x5f\x00\x86\xa1\x47\x90\x66\x1e\x4e\x2b\x09\x84\x5e\x97\x06\xc7\xd9\xe7\xfa\x30\xf5\x29\x40\x29\xa2\xd6\x1f\xbd\xbc\x64\x81\xe9\xa5\xcd\xee\xd1\x22\xf7\x13\x67\x29\x7c\x38\x44\x49\x1d\x62\xef\x36\x6d\xe6\x38\x3b\x26\x88\x2d\x32\xda\x29\x52\xbb\x12\xb7\x5f\xe9\xe6\x16\x0d\x61\x1b\x13\x87\x9f\x4f\x4d\xb1\xad\xf5\xe8\xe2\xc6\x4b\xbd\x26\xdd\xd1\xed\x79\x8b\xde\x47\xa6\x14\x73\x89\x8a\xe9\x9b\x0a\x7a\x39\x08\x36\x97\xac\xbc\xff\x0d\x23\x29\x89\xbc\x9d\xd2\x81\xf1\xb1\x59\xbc\x69\xc1\x9b\xdf\x92\x04\x5e\x96\x4f\x6b\xc1\x13\x47\xdd\xa5\x38\xb1\x23\xb3\x9d\xaa\xc2\x1e\x02\x34\x4c\x30\x42\xab\x0a\x14\xb6\x43\x63\x2d\x29\x04\xe4\xc5\xec\xd0\xb8\x8f\x59\xe7\x5d\xdb\x51\xef\xc0\xe0\x9c\xa9\x50\xea\x25\xc6\xb3\x50\xd8\x57\xba\x41\x16\x08\x94\x86\xed\xfb\xfc\x4f\x5f\x05\xc2\xd6\xbf\x57\x8b\xe4\xb9\x5c\x31\x88\xcf\x9a\xc3\x33\x9f\xd0\xb8\x37\xc3\x57\x1f\x12\x89\x0d\x9c\xde\x25\x35\x74\xf8\x99\x20\x91\x76\x29\x3a\x6b\xfa\x48\xdd\xb7\x27\x31\x43\x8a\x1c\xab\x57\x6c\x14\xd3\x8f\x27\x04\xc2\xa3\x61\xc1\xce\xab\x78\xae\x96\x9f\x57\xce\x17\xdc\x17\xc6\x55\xdb\xd5\xe7\x71\x69\xba\x36\xca\x30\x1a\x0e\xc2\x3f\xf6\x4d\xf7\x73\x9d\x44\x47\x27\xf9\xc1\x21\x20\x11\x30\x50\x41\x82\x43\xca\x98\xbe\xee\xff\x7a\x2e\x40\x0c\x55\x6c\x73\xaf\x4e\x7b\x87\x9e\x8b\xe1\xeb\x71\x83\x55\xf0\xcd\xd5\xa9\x6b\xb9\x26\x52\xb1\x7a\x49\xfd\x31\x14\x8d\x61\x59\x3d\xea\xa3\xe4\x7e\x21\x76\x87\x31\x2d\x54\x44\xdf\xa5\x5f\x0c\x8e\xeb\x4d\x7e\xe8\x86\x21\xc6\xb0\x4e\xff\xd1\x70\x0d\xae\x40\xd9\xbc\xee\xa1\x5a\x33\x14\x77\x77\xa8\x1e\xfb\x03\x17\x01\xf2\x4e\xcd\x67\xcf\xde\xd3\x4c\x02\x63\x9b\x31\x10\x9b\xf1\x6b\xd5\x5b\xdb\x55\x16\xb4\x85\xc6\x31\xbe\xa5\x2a\xb0\xd1\x00\xb3\xcb\x70\x88\x6a\xed\x7a\xce\x81\x44\xda\x97\xc2\x21\x6b\x0c\xdc\x8c\x40\x0a\x8d\x4e\x39\x8a\x19\xcf\x80\x01\x89\x31\xce\xe4\x2f\x5b\xda\x3d\x7d\xc8\x0d\xf8\x48\x35\xce\xf5\x36\xf1\x6b\x44\x03\xd8\xdb\x99\x93\x4b\x6a\xca\x73\x13\xdb\x04\x5c\xae\xe0\x2b\x7e\x89\x68\x1d\x97\x3c\x09\x3e\x7f\x4a\xb0\xa4\x4d\x4d\xc4\x81\x80\x68\x0e\xe1\xcd\x12\x38\xd8\xbb\x70\xa2\xe6\x51\x67\x30\x34\x18\x50\xc4\x10\x15\xc0\x6e\xec\x24\x8b\x11\x3d\xff\x4c\x15\x09\x2a\xf1\x4a\x31\x87\xfc\x13\x4b\xda\x1a\x65\x34\xfe\x3e\x06\xb3\x0a\x15\x1a\x04\xf8\x42\x06\x53\x5b\x9d\x43\x80\xa0\xab\xbc\xd6\x16\x2e\x0c\x0c\x4c\x11\x4d\xb8\xce\x36\x16\xe0\x27\x11\x12\xfd\xd6\xda\xa4\x65\xc2\x12\x17\x16\x9d\xca\x2a\x08\x29\x84\x06\x35\xf8\xb6\xd1\x75\xa2\x9f\xcf\x1b\xa8\xff\xe9\x2c\xe2\x35\xf0\x99\x37\x62\x50\xf4\xca\xec\xa1\xc4\x79\x6b\x92\x50\x58\x3e\x33\xf5\x96\xda\xd3\x59\x8a\x40\xe1\x45\x45\xe7\x9f\x70\x04\x11\xd9\x87\xe4\xff\x9c\x31\x93\x66\xa8\x12\x33\xd8\xc2\xce\xe1\xc3\xc0\xbe\xe3\xc7\x72\xcb\x90\x65\xa1\x35\x3a\x12\x12\x2d\xf8\x03\xf4\x5c\x3e\xbf\x65\x02\x7a\xd9\x9f\x33\x44\xb4\x1c\x9e\x0b\xbc\xd5\x8b\x31\x41\x25\xba\x73\x0c\x43\x5e\xfe\x8a\xe2\xfe\xfa\xea\x0d\x0f\xdb\xc8\x84\x36\x40\xe5\xd0\x2c\x94\x50\x2c\x59\x2c\xe6\xc2\x48\x80\x74\x31\x41\xec\xbd\x5b\x14\x05\xfb\x4d\x53\xcc\x9b\xda\x35\x70\xae\x9b\x2e\xe9\x7f\xc7\xc2\x18\x88\x96\x67\x54\x69\xcf\x2d\x09\x32\x44\x4e\x1b\x85\x2a\xfe\x8d\x87\x46\x63\xd9\x86\xf4\x2a\x41\x2a\x0d\xf9\xe0\xaf\xf9\x9b\xe4\x3a\x2c\xde\x4a\xc7\xa9\x20\xef\x72\xfc\xcd\x5b\x9b\xdb\xfa\x22\x47\x59\xf2\xa5\xeb\xf9\xf7\x2f\x33\xc9\x33\x6f\xf0\x01\xde\x9a\x18\x8a\xc7\xba\x5e\xe0\x85\x66\xea\x70\x47\x35\x0b\x93\xcd\x99\x51\xad\x71\x07\x50\xdf\xb5\x4b\xe9\x28\xbf\x6b\xca\xdd\x30\x33\x28\x0a\xdb\x55\xb2\x70\xde\x48\xfd\x05\x85\xf4\xce\xea\x96\x66\x94\x8f\x24\xc2\xbf\xa1\x1d\x93\x31\xb2\x25\xaa\xbd\xc8\x67\x6a\x33\xb4\xdc\x7b\xf9\x0c\x29\x8e\x77\xd3\x45\xea\x85\x32\xbb\x5a\xe8\xd9\x58\x1c\x95\x80\x4d\xaf\x69\x59\xe8\x3b\xa1\x11\x70\x88\xbc\xf9\x52\x28\x10\x69\xff\x22\x04\xb2\x74\x99\xb4\x16\x07\xfd\x90\xaf\xa4\x46\xae\xc7\x88\x3b\x42\x5b\x79\x79\x91\x3d\xc7\x74\x33\xc6\x87\xff\xd1\xf3\x1f\xe4\xe2\x45\xc5\x77\xaf\x77\x46\x5c\xa5\x41\x3e\xc8\xa5\x71\xa7\x31\x5e\x97\x26\xfc\x31\x2a\x18\x1e\x68\x69\x35\x56\x96\x5d\x0c\x79\xba\xd3\x0f\x78\xff\x41\x0d\x4d\xd1\xac\x29\xcd\x07\xc5\x8b\x24\x71\xea\x20\x32\x7f\xf1\x9c\xdf\xd4\x44\xdc\x3e\x7a\x3c\x0d\x22\xdd\x63\xb1\x6e\x0c\xd9\x71\x6d\x30\x4f\x3b\x72\x1d\xb6\xc6\xbc\xa5\xc2\x13\x64\x79\xe5\xe7\x67\xa8\xcf\x1c\xd1\x27\x88\xce\x5f\x62\x07\xae\xbf\x26\x53\x1e\xc2\x04\xe9\x57\x39\x12\x0d\x86\x77\x1d\x65\xa6\x83\xa9\x0d\x54\xb9\xa7\x72\x8c\x74\x35\x9d\x3c\xab\x68\xab\x7f\x0b\xbb\x61\xc5\x00\x7f\xa7\x6b\xb0\x39\xa1\x30\xdd\x7b\x52\x16\xa1\x97\xc1\xb0\x35\xbb\xec\x4b\x20\x0a\x99\xb2\x15\x21\x57\xbe\xc5\xd0\x0a\x94\xc3\x9a\x20\xce\x91\xd4\xa6\xe3\xaa\xa7\x02\xe0\x0e\x0e\x66\xc6\x3a\xd2\xf2\x39\x25\xf8\xa6\x6e\x7f\xbd\x99\xc8\xb0\x31\x37\x3e\xa6\x7d\xee\x78\xf6\x47\x9a\xbb\x4b\x71\x87\x0a\x81\xb8\x09\x1d\x18\xac\x75\xf7\xe1\x05\xa4\xbe\xaa\x66\x21\x2e\x11\x4f\x3f\x94\x17\x02\xb6\x94\x6e\x9a\x8f\x11\x0a\xac\x86\x8d\x34\xed\x8d\xbc\x27\x65\x66\x29\x68\x6d\xde\x66\xcb\xae\x95\xe2\xb1\x64\xe5\xa6\x5c\xaa\xbc\x54\x83\x94\x04\x41\xcc\xd8\x60\xb8\xf9\x83\x26\x31\x97\x5b\x8e\xec\x46\x0d\x63\xf5\x21\x73\xe1\x26\x7c\x8f\xcb\x10\x08\x93\x10\xfc\xef\x29\x13\x9d\x32\x6c\xbf\x84\xaa\x8b\x38\x6a\xb0\xc6\x4e\xee\x75\x75\x10\xa1\x0d\x23\x13\xd2\x0b\x3a\x87\x59\x66\xa2\xe9\x57\xd5\x01\x01\x2f\xe9\xfb\x80\xe5\x19\xdf\x37\x6d\x88\x5e\x38\x03\x0a\x65\xff\x44\x56\x01\x19\x36\x60\x8e\xab\x8e\x60\xb8\xe7\xd8\xf0\xb1\x11\xa7\x6b\x78\x52\xc4\xc1\xf5\x8c\xac\xbf\xcd\xb8\x65\x6a\xd4\xa4\x85\x34\x40\xe6\x21\x90\x43\xe2\x34\xaa\x44\xfa\x93\x33\xe5\x77\x95\xe5\x5e\xf1\x17\xd5\xbb\x60\x70\xa3\xe3\xc3\xfb\xc9\xaf\xbc\xae\x91\xab\x59\xed\x88\xb2\x21\x06\x19\xcf\x1f\xcd\x72\x75\xbe\x3a\x41\xdb\xca\x62\xaf\x03\xde\xfd\x29\x6a\x9f\x32\x68\x87\x4f\x88\x12\xd8\x7f\x9c\x35\x6f\x44\x42\xab\x31\xc2\x9a\x29\x64\xa7\x42\x62\x30\xf1\xc0\x93\x5b\x19\x98\x60\xb8\x91\xf8\xb5\x30\x5c\x49\x4a\xc4\x80\x9f\xa2\x4f\x8c\x41\xb4\xfe\x76\x57\x0a\x6e\x67\x8e\x5d\x3c\x03\xf7\x0b\xd0\xf1\x8d\x55\x2c\x78\x96\x56\xf3\xa0\x0c\x29\xcd\x74\x14\xc2\x2c\xda\x91\x09\x9b\xcb\x35\xb2\x65\x0a\x6a\xd0\x52\x08\xc3\x63\x0c\x3e\xcd\xa8\x1c\x00\x24\xe0\x42\xde\x91\x53\x1d\x89\xf5\x2c\x7d\x5c\xf2\x5e\x54\x29\x47\x82\x58\x4e\x1d\x54\x1a\xdc\x17\x35\x3e\xc4\xa0\xd6\xe8\xd6\x96\x35\x25\x77\xa3\x31\xa0\x96\xca\x11\x93\x4d\x78\x0c\xf7\x28\x31\x40\x92\x9b\xf1\xcf\x66\x25\x00\xa0\xcd\xc3\x4e\x6f\x68\xa2\x16\x7f\x26\x9e\x01\x7b\xc5\xa1\xcd\xca\x92\x68\x80\x52\x37\x8f\x6e\xa6\x12\xcd\xea\xaf\x88\x4a\x91\xfa\x37\xb0\xd9\x1c\xe1\xe4\x6c\x0a\x8e\xac\x48\x31\xf3\x20\x06\xa9\x3a\x53\x57\xfe\xa6\xbc\xda\x9c\x9e\x73\x1a\xc8\xe3\xd9\xcd\x05\xd3\xb0\x1d\x81\x8d\xba\xae\x6b\x12\xeb\xf3\x64\x5f\xab\x4f\x37\x60\x8d\x27\x75\x59\xf0\x9a\x03\x0c\x11\x05\x4f\x64\xae\x02\xab\x6f\xea\x31\xa0\x62\x6c\x5f\x43\x1c\xbe\x26\x35\x39\xee\x81\x38\xcd\x0c\xae\xa0\xb2\x2e\x2b\x91\xa3\xae\x97\x1a\x90\x62\xe3\xde\x31\xf6\x0d\x82\x61\xec\x73\x84\xb4\x4b\x0b\xe0\xa4\xd5\x27\xa3\x42\x59\x58\x90\xb3\xfc\x8b\x05\x30\xd9\x12\x63\x0f\x42\xa3\x84\x5f\x0f\xcd\x18\xb9\x50\x17\x22\x5c\x56\x4d\xd0\x7a\x5b\x42\xaf\x54\xfa\x74\xd2\xb8\x29\xca\x92\xf6\x26\xb9\x6a\xa1\x2c\x7b\xaa\xa3\x47\xc5\xfe\xee\x41\x1e\x77\x9c\xb3\xbd\xef\xd1\x36\x81\x49\x70\x41\x56\xb2\x48\xc2\x8e\x81\x50\xa0\x35\xab\x64\x13\x92\xb4\x6c\x48\x25\x34\x1f\x83\xf1\x26\x1f\x63\xcb\xda\xaf\x67\x2b\xea\xf5\x68\x2c\x94\xab\xb3\x00\x81\xf0\xb6\x7f\x5f\x0c\xd8\xb4\xf4\xa5\x9b\x84\x1a\xa2\xce\x08\x74\xb2\x18\xd7\x61\xdc\x7e\x2f\x6a\x46\xac\xc9\xd7\xc8\xb1\xc8\x03\x9d\x1d\x8d\xe4\x81\x52\x12\x5f\xf3\x7b\x1e\x8f\x4b\x55\x06\xbd\xe2\xc4\x1d\x26\xb6\x5b\x64\x2f\x42\xe8\x3c\xf8\x2c\xfa\xb6\x90\x6a\x4c\x8f\x44\x6e\x49\x9a\xcb\x80\xb1\xae\x38\xd3\xd2\x56\x22\x97\x11\x07\xef\x52\x77\x1a\xce\x28\x7e\xb9\x51\xc6\x0e\xa7\x4f\x5c\xea\x21\x80\x03\xf5\x06\x07\x3d\x87\xfd\xb0\xac\x0b\x3a\xb9\x51\x04\x7f\x41\xe1\xe9\x0c\x45\x8d\x70\x32\x2f\x8a\x53\x84\x0c\x37\x0c\x28\xf8\x58\x5b\x24\xc8\x98\x42\x41\x33\x10\x8f\xaf\x5e\x4a\x0c\x00\x6d\x69\x7c\xd4\x05\x24\x3d\x0b\xe5\x2f\x6e\xd4\xb6\xd2\x4b\x08\x85\xde\x6b\x97\x23\xcc\x8a\xc4\xd7\x89\xa3\xf5\x6b\xbc\x93\xf0\x14\x4b\x58\xa8\x51\x8a\x81\x44\xf4\x5f\xad\x82\x0e\x7e\x88\xea\xd3\xa8\x32\xe7\xa6\xaf\x09\xe0\x60\x6b\xd5\x6b\xfd\x2d\x49\x8a\xfc\x2b\x59\xea\x95\xde\x39\x39\x50\xd3\x0c\xde\x42\x0e\xbd\xa9\xbc\xd9\xa6\x13\x93\x70\xe3\x18\xcf\xa5\x0d\x6c\xf4\x20\x21\x24\x30\x29\x21\xc8\x08\x5c\x27\xf8\x8f\xb7\xe9\x4c\x95\xfb\xb4\xff\x13\xbe\xc9\x8c\x4f\x5e\x10\x42\xba\xb2\x9d\xd4\xc3\xf1\x30\x3c\x32\xd7\x60\x9e\xbf\xd3\x43\x83\x66\xbf\x64\x30\x70\xa1\xe2\xce\x5e\x7d\x29\xce\x22\xb8\x08\x18\x3e\x23\x6f\xf9\x03\x4f\x2b\xb5\x96\x87\xe0\x8c\xbb\xc4\x06\x07\x47\xa1\x5b\xcd\xf8\x1f\x9a\x63\xdc\x40\x41\x2b\xda\xdb\x3d\x02\x5b\xb3\x3c\xbb\x36\x4c\x73\x5c\xc1\x6a\x99\x88\x28\xfb\xc8\xf1\x00\x9c\x73\xda\xa2\x15\x05\xd8\x42\xf4\x91\xa3\x75\x8b\x7d\x12\xd3\x94\x56\xae\xd0\xb9\xca\x8f\xec\xd7\x2c\x80\x99\xc9\xa0\x5d\x6a\xb5\xc2\x50\xfb\x0c\x66\x9e\x12\x69\x50\x83\x69\x05\xe3\xb3\x0d\x47\xec\xc3\x47\xf8\x84\x19\x75\xe9\xd1\xdf\x3a\x02\x41\x5c\x27\x8a\xbe\x2b\x47\x98\x00\x4d\x07\x21\xf8\xd6\xd2\x20\x4a\xab\x22\x1b\x61\xff\x1c\x71\xa3\x35\x46\x7f\xbb\x85\x66\x55\x3f\xbf\x6e\x01\x55\xd4\x96\x50\x83\x6b\x7e\x9c\x97\xc2\x97\x47\x23\xc8\xf3\xcd\xd2\x6b\xda\xf0\x17\xf0\x89\x8b\xe3\xb3\xc6\x83\x0e\x50\x8b\xd8\x7d\xd0\x33\x55\x16\x19\x32\xc7\xf6\xbb\x79\x4d\xfc\x42\xac\x91\x62\xbb\xf0\x62\x0c\xdb\x62\xbb\xad\x0e\x6d\x5d\x58\xa0\xf4\xc8\x7e\x69\x8a\xe9\xb0\x2c\x4c\x67\x7a\xdf\xd1\xa6\xd3\xe2\xa9\x0f\x8d\x88\xb7\xa1\xf1\x88\x2a\xfa\x52\xd0\x37\xfc\x50\xe9\x12\xba\x90\xf4\x72\x55\x74\x88\x26\xf4\x23\xde\xa7\xf4\x15\x10\x84\x20\xf7\xfd\x52\x15\x62\x2b\x18\x89\xa4\x6e\xdd\xf3\xc9\x1b\xff\x13\xbe\xeb\x1e\x33\xf2\x4a\xf8\xde\x7b\xd0\xd2\xfa\xd1\x73\x5a\x75\x33\x45\xa0\x78\xe3\x38\x30\x06\x9a\xcf\xca\xa9\xd9\xa2\xe7\x95\xaf\xca\x8b\x96\xa0\xb4\xc9\x3b\x88\x00\xcb\x15\x9a\x37\x71\x2a\x0e\x74\x9c\xbe\x1b\x05\x61\x3f\xda\x7c\x77\xbb\xda\xeb\x28\x7d\xa7\xe3\x6c\x45\xab\x33\xf4\x9f\x36\x44\x6d\x5e\x54\xd6\xcd\xf5\x58\xa2\xb5\x4d\xd8\x82\x8f\x84\x7f\x3c\x53\xbe\x51\xa9\x47\xb1\xb0\x6f\x0f\x38\xb4\x4d\x9f\xb2\x67\x9b\x73\x3f\x4e\x55\x78\xf2\x54\x30\x47\x26\x75\xa1\xa9\xe9\xf9\x24\xaa\x18\xf5\xf9\x41\xe3\xfa\x18\x42\xfe\x4e\xe0\x0a\xd2\x1c\x87\x5e\xf9\x43\x29\xe8\x20\x77\x6e\x3e\x67\xe0\xc4\x33\x5f\xf2\xa7\xe6\xcf\x37\x52\x22\xb3\x37\x86\x40\x16\x5b\xa0\xbf\x39\xdd\xf7\x29\x17\x0b\xeb\x18\xb9\xdf\x9b\x63\x34\x53\xa8\xc2\xa3\xe4\x06\xe2\x18\xb5\x16\xe3\x77\x51\xe3\x97\x65\x2b\x2c\x60\x35\xd5\x50\xac\xe1\x40\x3c\x68\x5d\x5b\x56\xeb\xbf\x34\x0c\x20\x06\xde\xa8\x06\x97\x21\x14\x51\xb6\x86\x89\x4e\xac\x3c\x08\xf0\x97\x14\xf4\x07\xe3\xd4\x29\xe8\xe8\x5f\x41\xbf\xe6\xee\xbc\x2d\xa6\x1f\x1c\x65\xd5\xf4\x06\xae\x76\xfd\xdc\x07\x0e\xef\x79\x3b\x35\x96\x62\x68\x80\xaa\x0e\x97\xb6\xef\x16\xaa\x62\x71\xf0\xf1\x97\x36\x26\xc2\x15\xfb\xb9\x01\x98\x91\xbf\x91\x10\x77\xf2\xb2\xfc\xc8\xec\x5d\x09\x25\x75\xde\x49\xe0\xd5\xa0\x1d\x33\xe4\x6d\xd4\xc7\x68\x5e\x6f\x61\xf4\x3b\xb6\xa9\x69\xa2\xc6\x13\x1d\x22\xe6\xa8\x5c\x2f\x75\x5b\x78\x80\x68\xee\x3a\x41\xf1\xe9\x1a\x4f\xbc\x3b\x91\xc8\xbf\xa9\x04\x35\xe5\xac\xea\x2f\x9a\x52\x60\x8f\xc6\x32\xd5\x5d\xe7\xdf\xb9\x8e\xa4\x95\x7a\x1f\x26\x53\xa3\x81\x4c\x98\x21\xf7\x93\xf8\x59\xa7\x63\x73\xd4\x55\xd8\xb8\xf6\x87\x2a\xc8\xfb\xd9\xd1\x62\xc2\x11\x78\x76\x11\x46\x82\xa4\x3b\xaa\x79\x4c\xe4\xb8\x4e\x49\x8c\xcc\x91\x7a\x4b\x3c\x27\xee\xe8\x0e\x4a\x13\x0d\xad\xbb\xfd\x9e\xaf\x0e\xcd\x50\x51\x09\x89\x4b\xcc\xda\xa0\x5f\x96\x1c\x86\xff\x9a\x06\x79\xff\x2b\xd3\xe0\xdf\xf6\xc9\xa0\x45\x56\xa8\x0b\xb8\x81\x9d\xa3\xac\x0c\x7e\x3d\x0d\x9e\x73\xf4\xe1\x1d\xf3\xc3\x42\x1c\x75\x7e\x38\xe4\xbd\x2c\xc4\x58\xe7\xf4\x13\xda\x59\x93\x8b\x8e\xb0\x6c\xad\x09\xa7\x25\x84\xb4\x25\xe0\xc4\x44\xd4\xe7\x4e\x36\x37\x1c\xda\x7e\x24\xbb\x9c\xd0\x62\xd1\x3c\xcd\xcc\x55\x7b\x16\xcb\x84\x9e\x85\x07\xc8\x3d\x81\x62\xb5\x93\x47\x2f\xa3\xdf\xff\xe2\x90\x59\x50\xc6\x36\x75\xea\x53\xea\x01\x60\x12\x99\xea\xad\x4d\x84\x4c\xba\x6f\x9b\xb8\x9c\x21\x67\x80\xbf\x41\xd5\x88\x0f\xc5\xb6\xd2\x18\xf7\x86\x15\xf2\xd3\xbe\x28\xaf\xf4\xf3\x19\xed\x35\x52\x26\x3c\x17\xb6\xea\xb3\x82\x1a\xe6\xd7\x0f\xe7\x5f\x42\x50\x10\x36\x69\x14\x2a\x4e\x52\xe5\x81\x8d\xd9\x58\x7e\xec\x71\xa2\xce\x98\x91\xd5\x48\x36\xbb\xea\x5d\x5e\x20\x27\x6b\x9b\xb2\xc0\x37\x58\xe4\x5d\x6b\xb9\x72\x7a\xca\x58\x73\x97\x15\x0b\x1f\x9a\x23\x29\xb9\xdf\xe8\x60\xad\x11\x69\xcf\x62\xb7\xb8\xa0\x89\x68\xe1\xe5\xe2\x14\x8c\x1e\x80\xf3\xfa\x9d\x64\x30\xd7\x98\x12\x09\x36\x59\x1c\xb5\x87\x34\x17\x2e\x66\x57\x6a\x9d\xf4\x3e\xc7\x4c\x78\x14\x04\x72\xa2\xd9\xd9\xd1\xa2\x45\xfb\xee\xb7\x73\x55\x5c\xdd\x5d\x43\x39\x81\x1b\xb7\xbf\x52\x31\x4b\x49\x33\xdb\xa0\x69\x4e\x78\x7b\xee\x12\x83\x53\x0f\x5c\x23\x5d\xe1\xc9\xd2\xa6\x9c\xc6\x5d\x66\x0a\x06\xc2\x91\x62\x66\xe2\x1e\xf2\x6d\x53\xe8\x45\x5a\x64\x99\x7a\xb9\x0f\xf7\xa8\x72\x1a\xe8\x2e\xa5\x91\xda\x6a\x61\x17\xec\x4b\x7f\x0a\x3e\x88\x9c\x28\x9c\xc7\x14\x22\x15\xfc\xca\xa5\xc1\xb4\xf3\xf5\x73\x95\xb0\xba\x48\x91\xbb\x14\x60\x5c\xbc\x82\x7c\x4c\x1b\x98\xfb\x6e\x4d\xc5\x9e\x61\x4a\xe1\x0d\xfd\x1a\xac\x1e\xb8\x23\x57\x08\x9c\xdb\xa8\x84\x77\xce\x51\x68\xbe\xf6\x97\xe0\xe9\x3e\xa6\xb7\xc0\x6b\xc8\x51\x81\x8c\x2c\xdf\xe4\x40\xfa\x4a\xf5\xb7\x15\xc9\xa6\xbb\x87\xb7\x19\xeb\xd8\x88\xe9\x69\x41\x3d\xdb\x13\xe7\x90\xa4\x1e\x85\x8b\x4d\x92\x1b\x81\xc6\xba\x80\xb1\x2d\xba\x3b\x7d\x22\xa6\x2a\x34\x48\x3a\xf8\xd5\x9c\xfe\x8a\x6e\x9c\x19\x9e\x36\x60\x96\x14\x55\x39\x7c\x9d\x8d\x88\x49\xbc\x8e\x49\xaf\x4d\x35\xf3\x28\x8e\xc1\xb1\x7c\x2c\x7b\x57\xaf\x4a\xab\x44\xdf\xd0\x0b\x96\x03\x14\x12\x14\xc6\x8e\xa6\x60\x8a\x0c\x7f\x3e\xbc\xb9\x67\x24\x86\xd0\x72\xdd\x2f\x8b\x82\xed\x9a\xcc\xcd\x44\x5b\x4e\x44\x0c\x0e\xa9\xe0\x8f\x41\x53\xe9\x24\xf7\x0b\x97\xf5\xca\x8f\x7c\x34\x78\x3f\x91\xad\x24\x17\x8c\x4d\x59\xb7\x6b\x90\xe4\xea\x06\xa6\x34\x13\x10\xb0\x69\x95\x88\x61\x54\x72\x97\xaa\x11\x89\x7f\xb9\xa2\x3f\xa2\x01\xab\x71\xdb\x25\xd9\x5f\xd1\x8d\xa4\x52\xe9\x3d\x68\x44\x13\xb8\xb6\xea\x5e\xdc\xa3\x31\x07\xaf\x1d\xe5\x1a\x47\xc2\x71\xc9\x28\x60\xc0\x2b\x05\x27\xf2\x91\xc0\x97\x47\x3b\x2c\x0f\xb4\xa1\xc1\xcc\x9d\x97\x7a\xf7\x8b\xd1\x87\x9e\x4d\x12\xcc\x9f\x62\xae\x1c\x68\xdb\xe2\xca\x58\xc4\xf1\xf9\x2b\xd3\x0b\x77\xe9\xaa\x19\x37\xce\x02\xf0\x6f\x6f\x03\x3c\x91\x89\x60\x2c\x15\x7e\x72\x50\xe4\xe7\xa2\x76\x16\x91\x07\x1a\x6a\x8b\x1a\x9c\x12\x86\x95\xa1\xbd\xd1\x92\x69\x85\xd5\xa0\xdc\x69\xeb\x26\x86\x0d\xa4\x92\x09\x83\x3a\x66\x8b\x0a\x46\x96\x03\xde\xe8\x18\xf3\x97\x3e\x11\x58\x38\x91\x32\x9a\x1e\xf0\x7a\x3c\xb5\x77\xb1\x49\xa3\x54\xe0\xdc\x6c\xf5\x89\x78\x97\xd1\x16\x4d\xa9\xae\xdd\x89\x57\x63\x24\xd7\x3a\x5d\xf2\xd4\x16\x71\x50\xa3\x5f\xcf\x26\x0a\xb3\xe1\x9b\x02\x24\x54\xd0\xe5\xe5\xfb\xef\x19\x5e\x28\xa6\xca\x1c\xfc\x92\x10\xf2\x4c\xad\x50\x61\x57\x80\xc7\xef\xbf\xb3\x48\x24\xc6\xac\xb9\xc4\x2c\x2d\xae\x03\xd2\xd8\x2e\xba\x70\x57\xf3\x8e\x1d\x71\xaf\xe8\xe0\x13\x14\x49\xb5\xdf\x74\x0d\x24\xbb\x27\xae\x76\x5e\xa2\x70\xe4\x83\x7d\xde\x5a\xbf\x41\x64\x03\x79\x44\x5f\x6a\xa6\x10\x5d\x0c\xf8\xf1\x15\xee\xd1\x6d\x48\xa5\x63\x36\xc8\x40\x44\x40\xca\xf0\x63\x1b\xb8\x24\x51\x8b\x2b\x7a\x0d\xde\x36\x98\x8e\x76\x56\xd9\xa8\xf0\xf2\x26\x33\xce\x87\x44\x6c\xc5\xa5\x9e\x1a\x6c\xaf\xf4\x63\x60\x6e\x30\x94\xf4\x58\xaa\x06\x60\xe9\x9e\xa3\xca\x19\x81\xa5\xda\x03\xca\xa6\xa3\x96\x94\x05\xf9\xbc\xb5\x8e\x03\xeb\x5f\x29\x30\x67\x78\xf1\x48\x5d\x38\x0e\x7b\x2b\xec\x58\xd8\x44\x24\x5e\x5d\x8f\xa3\x25\x76\x2e\x46\xd3\x5e\xa2\xc9\xd8\xc7\xf0\x02\xe2\xcd\x73\xb1\x0c\x5a\x12\x01\x3e\x48\x27\x22\x42\xef\x54\x68\xc2\x6c\x8b\xb8\x01\xc7\xce\x31\x73\x75\x13\x62\xbf\x6b\x78\xda\xa6\xa9\x67\x4d\x6e\xdc\x4f\xca\xcc\xa9\x85\xda\x3a\xf4\x09\xde\xa0\xc9\x45\x93\xcf\xd9\xa0\x88\x09\x4a\x48\xa7\x78\xf7\xa6\xf4\x82\x7f\xc7\x42\x1d\xd8\xfc\x82\x77\x66\x3b\xef\x19\x73\xc5\x5c\xf7\x9e\xba\xea\x57\xfb\xb5\xce\x76\x12\xfc\xa5\xa9\x02\xe6\xb0\x9e\xf2\x8d\x27\xa7\x7b\x1f\x63\xe6\xac\xdf\xb3\x25\x82\x51\xd5\xb7\x6b\xef\x53\xfa\x69\x52\xa0\xf6\xd5\x8a\x72\xea\x38\x64\x1b\x63\x24\x6c\x18\xa7\xcb\xb8\xa7\x69\x88\xb1\xae\x89\x57\x94\xec\xf6\x36\x4b\x32\x71\x04\x53\x7f\x24\x55\x11\x48\x6c\x30\x76\x79\xa2\xee\xa1\xfe\xf6\x88\x37\x6e\x21\xea\x4d\x6c\xc8\x20\x15\x00\xcc\xdb\xf5\xfe\xd2\x51\xf4\x4c\x25\x3a\xb4\xb6\x69\xd0\x84\xa9\x7d\x6e\x7d\x82\xf9\xf7\x8c\x01\xd5\x19\x83\x4e\x65\xcf\xf6\x4c\xe5\x0f\x14\xea\x79\x48\x30\xb4\x98\xf0\xad\xe3\x4d\x02\x42\x9f\x39\x28\xbb\x0d\x71\x19\xb0\xa9\xe1\x26\xb6\x45\x96\xa9\xb7\x67\x81\x53\xdc\xc4\x46\xc8\xdb\xa6\x0a\x7a\xeb\x57\x6f\x6c\xe9\x36\x11\xc0\x41\xfd\x9b\x2c\x2a\x83\x97\x4e\x23\xd9\xd3\xfb\x1f\x81\x28\x19\xab\xb9\x83\x3c\x23\x1e\x1e\xed\x6d\x38\xa6\x6c\x61\x88\x11\xf0\x18\x2b\x11\xb7\x3c\x8a\x96\x67\xd7\x35\x2b\x76\x11\x84\x6e\x5f\x72\x39\x5f\xc6\xe0\xc9\x2f\x52\xa8\xb5\x84\xe0\x3c\xd1\xc4\x8a\x52\x59\x9e\x3b\x4d\x40\x70\xd1\x1c\x71\x65\x5a\x81\x12\x13\x89\xa2\x7e\x4a\x7a\xc4\xb3\x41\x44\x79\x57\x37\x1e\x55\x2e\xe2\xca\x62\x79\x23\x66\xc5\x5d\xc5\xdc\x50\xc2\x44\x01\x20\xcf\x16\xe9\xe8\xf6\x7a\xef\x2d\x80\x38\x07\x7c\xb0\x0d\x16\x5a\x06\xaf\x63\x3b\x94\xe9\xd9\xb7\x55\x96\x80\x04\xc8\x12\x4a\xc9\x79\x6b\x23\xab\xcc\x80\xee\xd0\x08\x71\xa9\x0f\x8e\x9d\xa0\xee\xdc\xb9\x5c\x63\x48\xdb\x92\xaa\x58\xe6\xd8\x2c\x9f\xe4\x7b\xb0\x8d\xe0\xba\x80\x0e\x77\x61\x5c\x47\x7e\xc0\x1d\x9f\xbf\xa7\xd2\x7d\x89\x9f\x3f\x3f\x9a\x1a\x6e\xde\xa4\xb4\x12\xc1\x0d\x11\xd7\x4d\x79\x04\xaf\x4e\xf4\x4d\x3d\x8a\x6a\xa7\x8f\x58\x39\xf3\x98\x30\xff\x3c\x70\xd8\x1a\x0b\xc7\x28\xfe\xa3\x46\x3e\x50\x64\x70\x98\x3a\xaf\xbb\xab\x95\x3a\x6c\x22\xc6\x74\x97\xe8\xf8\xa3\x6f\xd9\x9f\x21\xce\x39\x6a\x74\x90\xb7\xb5\x47\xe0\x08\xcd\x3c\x4e\x09\x60\x2f\xfb\x21\xae\x65\x48\x60\xad\xca\x8e\xe8\xba\xe6\xab\x67\x7a\x72\x8d\x55\xfc\x20\x9a\xe5\x70\xcf\x2c\xe8\xda\x6f\x5f\xba\x85\x32\x8e\x9f\x2c\xd1\x7d\x5e\xa1\xf1\xdc\x4b\xda\x5b\xf1\x44\x72\xe9\xe6\x72\xd6\x50\xc5\x4d\x85\x82\x5d\x3e\xbf\x83\x49\x39\x9b\xcb\x14\xbe\x8f\x12\x85\xdb\xa3\x21\x05\x43\x2b\x03\xeb\xff\x51\xda\xb5\x86\x5c\xbd\xeb\xdd\x53\x05\xe5\x6f\xc1\x56\x4c\x32\x45\x13\x27\xaa\x31\x9e\x8d\x8c\xa2\x6b\x2d\x9c\x4e\x21\xd3\xe0\x0d\x52\x30\xd9\xbb\xac\xa9\x6d\x40\x8b\xb7\x78\x3c\x2f\xee\x6a\xc1\x80\x59\xa2\xe5\x98\x18\x02\x59\x8a\x6e\xb1\xc8\x7e\x66\x4b\x44\x3b\xf9\x58\x4b\x6a\x7d\x30\xee\xc2\xb8\x97\x4b\x3f\xc1\xb5\xed\xba\x8e\xf7\x42\x43\xc7\x70\x43\x3d\xa0\xa7\xc1\xac\x00\x79\xc6\x4a\xfa\xf3\xab\x3c\x79\xfe\x5e\x89\x93\x07\x7d\x1c\x94\xba\xf2\x3a\x80\x8d\xab\xba\xb4\x95\x4f\x8a\x43\xdc\x2d\x35\xa6\xf8\xcc\x4a\x1a\x5d\xf6\x87\xe5\x55\x8f\x5c\x10\xfb\x3c\xa4\x9b\x78\x57\x55\xdf\xcb\xdb\xec\x53\x19\xed\x79\x58\xf8\x82\x49\x0b\x64\xad\x6b\xe4\x02\xce\x4c\xa0\x09\x21\x9c\x0d\xa5\x79\xec\xa3\xa2\x6f\x8b\x07\x7a\xe9\x90\x36\x3d\x8e\x19\xd9\xc5\xf7\xb1\x0a\x50\x1c\x40\x51\x59\x82\xe3\x36\xab\x55\xd7\xf6\xb5\x46\xc4\xd2\xf6\x92\x8b\x4a\x42\xc2\x69\x8e\xab\xc4\x70\x4b\xf3\xcb\x8c\xda\x3b\x16\x3c\x8e\xab\xcd\x4c\x09\xe4\x26\xeb\xe8\x93\x22\xe5\x8c\x2e\xc6\x06\x2d\xf0\x94\x2e\xa2\x77\x70\xe0\x2e\x78\x66\xdd\x94\x61\xa8\xbc\x78\x64\x46\x99\xb8\x9e\xf6\x00\x5f\x1a\xa1\xd5\x6c\x93\x3e\xc9\x82\x59\x80\xe0\x7e\x7b\xd1\x3d\x58\xa4\x6e\xa0\x86\x03\x80\x2e\x51\xfd\x69\x60\xf7\x40\xa0\x5b\x73\x83\xb6\xfe\x6c\x03\x08\xd9\x08\x38\x50\x85\xb4\x05\x8c\x6f\x4a\xb4\xb8\xf6\x3b\x03\xea\x6b\xf2\x22\x67\xa9\x7c\xae\x3c\x1a\x11\x88\xa7\x32\xa3\x86\x02\x7e\xb0\x1a\x6f\xcd\x2f\xc6\x84\xce\x2f\x97\x14\x4e\xe8\xfe\x35\x4c\x02\xd4\xb0\xc0\x56\x3f\x04\x68\x96\x65\xff\x4c\x96\x36\x80\x6d\xe0\x31\xde\x14\xf7\x25\xb0\x1a\xc7\x2e\x10\x81\x59\x54\xb8\x0f\x0d\x44\x32\x07\xbd\x97\x28\xf3\xbb\xf8\xad\x60\xaf\xab\x88\xd5\x6f\x67\x1f\xaa\x5e\x6c\x35\xd2\xd4\x52\xf3\x1a\x48\xc7\x91\xcd\x25\x5e\x9b\xbd\x84\xc4\xe7\x7a\x71\x9a\x40\x78\x6b\x5c\x40\x76\xd7\xef\x3d\x3b\x0a\xfd\x5e\xea\xb5\x76\x0d\x7a\xde\x28\xf9\xb8\xd4\xab\x6c\xc4\xa0\xbb\x78\xa9\xe8\xaa\x62\x73\x01\xd9\xbf\x82\xaf\x1a\x90\x3e\xa0\xac\x1f\x75\xc7\xef\x12\x14\x30\x04\x69\x60\x75\xac\x37\x36\x40\x5b\x44\xd2\xf5\x62\xb8\x47\x1b\x78\x2f\x1a\x54\xcb\x64\xdf\x31\xcb\x09\xa2\x5f\x3c\xcf\x12\xa3\xed\xbd\xca\x79\x8b\xfa\xf8\x4d\xb2\x03\xba\x2a\x8e\x0b\x21\x6c\x38\x1f\x8d\x20\xec\xa3\xc6\x64\xbc\xf7\x5e\xa3\x78\x3d\xd6\x7b\x7e\xe2\x8b\xb8\x85\xa1\x61\x5f\x87\x0f\xc6\x98\xf9\x13\x2e\xbf\x1a\x52\xae\xfe\x62\x26\xc6\x6e\x60\xf8\xa0\xa3\xb8\x99\x4d\xc0\x80\xe9\x5c\xd8\xdd\x1b\xe5\x43\xf7\x5b\xc8\x88\xd2\x4f\x94\xa5\xe9\x1b\x37\x49\x09\x0a\xdf\xc5\x80\x90\xa3\xf6\x55\xda\xf1\x2f\x89\x62\x3d\xd2\x46\x68\xc2\x4f\x0f\xe7\xb1\xf9\x65\xc2\x4a\x33\x76\x53\x5b\x58\x72\x20\xec\x86\x4e\xad\xfa\xc8\x8a\x79\x35\x7f\x88\x30\x94\x27\xad\x43\xf0\xb8\xec\xf8\x9c\x5b\x64\xc4\xb2\xba\x5b\xbd\x7f\xc9\x48\x44\x34\xe2\x50\xbc\xf9\x98\xc9\xfe\xa4\x4c\x31\x32\x6c\x56\x86\x81\xb8\x9e\x22\xf5\x6e\xf2\x82\x3e\x3c\x15\xfb\x91\x33\xe9\xe4\x9c\xae\x4b\x1f\x8b\x29\x27\xca\x0c\x1a\x43\x01\x11\x85\x1d\xaa\x4a\xa4\x40\xe4\xaa\xa1\xb6\xc6\x56\x4c\x0c\x2f\x91\x00\x46\xc4\xce\x7e\xc0\xa0\x35\x05\xb7\x0d\xf3\x03\xf1\x64\x9d\x77\xaf\xb5\xab\xd5\x95\x98\xb2\x1b\x86\xd5\x85\xe4\xdd\x8f\x47\xcd\x08\xcb\xe4\x12\x56\x21\x70\x69\xc7\x6d\x23\x6b\xab\xc1\x5a\x7b\x18\x0b\x95\xb9\xa1\x43\x1e\x88\x8f\x08\xec\xde\xcb\xa9\x00\x92\xfc\x20\x4b\xb6\xb9\x67\xa3\xbd\x2f\xc6\xfa\xfb\xd7\x19\x8c\x25\x1e\x57\xdb\x01\x51\x47\x9a\x30\x36\x1a\x24\x9a\x54\xca\x45\x20\x68\xd0\xa4\x28\xb7\x5d\xb0\x2b\xb5\x90\x2e\xbd\x22\x5e\x7f\x54\xbc\xdd\xbf\x0d\x51\xfc\xde\x02\x6b\xf4\x3a\x8d\x7c\xd4\x36\x7e\x8e\x91\x04\x14\x74\x38\x04\xe3\x33\xaf\x2d\xc6\x5e\x4f\xea\x05\xef\x24\xf1\xa6\x0f\x54\x8e\x6f\x5f\x8d\x87\x42\x1d\xfb\x11\x70\x17\x22\x5f\x45\x7f\x9e\x2c\x26\xcd\x73\xf9\x39\x5d\x8e\x91\x04\xf6\x3a\x6a\xec\xc9\x2c\xe4\xf6\xe1\x42\xe6\x94\x3b\x71\xc6\xb1\x46\xac\xed\x31\x75\x20\x12\x20\x0c\x36\xe3\x9d\x76\x30\x5b\x89\xbe\x26\x9e\x63\x8f\x43\x12\xde\x9c\xc2\xc3\x52\x43\x85\x1d\xad\xb7\xfd\x5c\x48\xf8\x15\x0c\xd3\xf9\x3d\xc7\x8a\x21\x78\xfb\x2c\x83\x14\xb0\xb6\x16\x93\xaf\x9a\x16\x00\x09\x25\x29\xcd\x60\x53\x98\x6e\x53\xb2\x47\x1b\xc7\x46\x37\x1c\xe1\x12\x92\x76\x7d\xfd\x27\xb2\xf3\xaf\x4b\xaa\x38\xa4\x90\xee\xce\x93\x39\xaf\xbf\x95\x73\x50\x26\x45\xec\xf6\xa1\x9e\xf8\xb0\x3f\xde\x3b\xbb\x43\x2a\x84\x2c\x04\xc0\x36\xa9\x93\xf1\x7f\x27\x35\x13\x14\xbe\x06\xfc\x06\x6f\x3e\x9b\xef\x8f\x36\x2a\x87\x53\xe3\x8e\xc0\xd3\x2d\x0b\xc1\xff\x53\x89\x65\x3b\xa7\x56\x4f\xde\x63\xca\x93\x97\x11\xff\x8f\x2c\x95\x77\xac\xf8\x0c\x26\xe2\x0b\x1a\x06\xca\xb3\x9d\x5b\x3a\x3e\x24\x3a\x3f\x0c\xc4\x08\x52\x85\x25\xe5\xda\xc8\x81\x4a\xeb\x5e\xa0\x80\xdc\x40\xe8\x3b\xd8\x72\xb3\x69\xa9\xc9\xbd\x1c\x68\xb1\x8e\xa1\xf6\xa3\x5c\xef\x25\xf7\xe2\xf5\x3c\x29\xa7\xee\x54\x64\x85\x0e\xe4\xce\xd3\x78\x69\xae\x09\x79\x71\x35\x43\xee\xe3\x10\x33\xe6\x8e\x28\x35\xf4\xae\xca\xcd\x9a\x45\x6a\x35\x4b\xe9\xb1\xa2\x66\xf7\x7b\x95\xe7\xbe\x48\x7c\xb4\xbf\x73\x84\x5f\xae\x45\x90\x35\x76\x52\xb8\x32\x3c\xc1\x49\x62\x48\xda\xa8\x84\x18\xc0\xf9\xeb\x1a\x75\xdc\xf8\x0b\x0c\x44\xb6\x8b\xfc\xba\x4a\x53\xcb\xb5\xff\x20\xfa\x55\x45\xca\x38\x5c\x90\x3c\xb2\x9c\xe2\xc5\x41\x6e\x50\xa8\x3f\xf8\x03\xb0\x12\x47\xe1\x8d\xd6\x73\xc1\x70\xa4\x35\xdf\x31\xef\x09\x63\x4a\x39\x69\x66\x1f\x02\xd2\x5e\x59\x90\x8f\x03\x56\x4d\x7e\x19\x7f\x06\xc5\xe6\x48\x61\x78\x42\xe5\x85\x2c\x18\x6a\x68\x2d\xb5\x46\xa2\x3b\x0a\x2d\xdf\x33\x98\xb5\x90\x4c\xf8\xfb\x92\xc4\x85\xc7\x59\x0f\xbd\xb4\xc3\x80\x0f\x69\x91\x37\xe1\xf1\x9f\x5c\x57\x17\x83\xf8\x4b\x10\x6c\x51\xef\xa7\x26\xce\x6f\x7a\x70\x2a\x7c\xf0\x3d\x19\xa1\xa6\x7f\x7b\x60\xd4\xbb\x79\x86\x64\x51\x1f\x05\x16\xff\x31\x99\x56\x8f\xc6\x2e\x6a\x01\xbe\x38\xd1\x84\xa2\xd6\xe0\x73\x61\xdb\x63\xd4\x0e\x4e\xb5\xa1\xd6\x17\x55\x7f\xf7\x31\x96\x24\xb3\x9d\x76\x97\x62\xd1\x0e\x0e\x27\x53\x45\x6a\x83\xfd\xa4\x39\x71\x87\x4f\x5e\x12\xe7\x20\x10\x2c\x32\xb7\xce\xee\x6a\x0d\xf2\x2b\xdb\x57\x0f\xe1\x60\xbf\x9e\x90\x56\xb4\xed\x79\x70\xad\x7a\x7e\x57\xe7\x4e\x39\xa4\x9d\xb1\x18\x30\xe9\x84\xdf\xfd\xf8\x2a\x45\x37\x61\xb4\x97\x43\x9b\x88\x3e\x2d\x84\x67\x2c\x04\xc5\x27\xbe\x15\x95\x9d\x25\x59\xb2\xb3\x96\x03\x48\x8c\x7b\xc8\xe4\xd6\x81\xd3\x56\x75\xff\xd2\xf5\xc5\x2b\x15\x1c\x25\x9f\x8f\xca\x03\xfb\x42\x12\xca\xc6\x2c\x01\xd8\x51\x18\xc7\x76\x12\xc5\xfb\xee\x19\x92\x25\xf1\xf0\x28\xf6\xf2\x62\x85\x58\xd4\xd5\xc9\x3d\x86\x50\x9b\xc5\x49\xee\x8a\xdc\x7e\x0c\x73\x1b\xd8\x0b\xdb\x43\x7a\x1e\x99\x7a\x4d\x27\xf8\x0c\xf1\x13\x4c\xe1\xa4\xfd\x01\xef\xfc\xcd\x49\x48\x80\x9f\x3e\xdc\xda\xfb\x3f\x28\x57\x8e\x5a\x5c\x68\xeb\x71\x0c\x9c\xac\xa4\x6b\x0c\x33\xa1\x19\xa3\x66\xe6\x72\x5b\x18\xdb\x31\x9d\x08\x18\x5a\xad\x4b\x6f\xb3\x69\xd4\x93\x7d\xa1\x80\x07\x0d\xbc\xbd\xc6\x36\x51\x59\xbe\xe7\x14\x79\xae\x51\x7a\x53\x2b\x07\xca\xe5\x28\x60\x6b\xb6\x20\x03\x04\x17\x41\x8e\x0b\x1a\x0d\x7f\xa2\xee\x86\x1f\xb7\xe5\x6f\x7a\x37\x0a\x7d\x44\xb7\x15\xe8\xee\x6d\x9d\x6b\x11\xb5\x72\x66\x72\x4e\x84\x11\xc6\x35\x75\x20\xbe\x90\x63\x42\x4a\x89\x6f\x23\x2e\xcb\x03\x2a\x78\x8f\x44\x64\x29\x9c\x92\x37\x54\xc4\x40\x1c\x46\x8c\x1a\xf8\x14\x12\x05\x4d\xce\x70\xbf\x86\x90\xba\xf2\x43\x9b\xe4\x6f\x61\x9b\xa0\xe3\x6e\x93\xe1\x49\xcb\xe7\x52\x59\x77\xd8\x9a\x91\x91\x8f\x53\x51\xfa\x28\x40\xf7\xcd\x49\x1e\x66\x9b\xa9\x53\xba\xdb\xca\x5d\x85\x8c\xa3\x49\x92\x44\x23\x93\xa2\x68\x49\xaa\xde\xa2\x1b\x07\xf7\x78\x5b\xa5\xb1\x54\x29\xdb\x6f\x49\x97\x62\x87\x10\x5a\xe2\xe9\x6e\xac\xa7\xca\x52\x23\xc6\x48\x46\x0c\x21\xc9\xfe\xfc\xeb\x6a\x60\x25\x4a\x80\xea\x4b\xb8\x7d\x71\xc6\x14\x57\xa8\x82\x95\x36\xf6\xfa\xbe\x6a\x9b\xfc\x6e\x21\x84\x5a\x0f\xf7\x13\x1b\xde\x82\x42\x9c\xe7\xa9\x11\x23\x7d\x24\xab\xc8\xf0\x2d\x74\x1c\x2f\x5c\x6d\x3b\x9b\xde\x87\x39\x2b\x05\x8e\xf9\x5e\x94\x99\x79\x5a\x22\xa0\xfb\xa3\x74\x6d\x39\xc1\xac\x99\x6b\x93\x96\x74\x1e\xfd\xcf\x99\x20\xa9\x6c\x91\x04\xac\xd0\x8f\xc5\x8f\xdf\x6c\x30\xa1\x28\x3c\xd6\x0d\xd8\xbe\x81\xf9\x79\x8c\x16\x15\x23\x10\x1d\xb6\x82\x6b\xe7\x13\xd0\x75\xae\x06\x6f\x7e\x38\x32\x87\xcf\x40\xae\xf3\x12\xc6\x61\xc2\xd6\x41\x0f\x90\xb8\xed\xc5\xb9\xe2\x41\xe1\x71\x87\x11\x50\x84\x76\x04\x17\x05\x5c\x3e\x75\x02\x53\x45\xd8\xd6\xf1\x12\x0a\xa4\x58\x9b\xc7\x7c\x02\x55\xbe\xee\x86\xb2\x02\xf9\x05\x4f\xbf\x43\xd2\x41\xb5\x5e\x6f\x3d\x85\x7f\x3e\x17\x73\x87\x82\xbd\x55\x93\xc7\x7c\xf3\x2b\x8e\x8c\x3a\xc5\x5d\x64\x4a\x4c\x8e\xe5\x35\x18\x43\x94\xdd\xbe\x82\xa4\x40\x6f\xaf\x77\xf6\xf6\xbb\x10\x63\x6e\x6a\x65\xad\xa7\x65\x3e\xc8\x65\xe7\x53\x8d\x7a\xc3\x05\x5b\x38\x52\x87\x98\xc1\x42\xc9\x39\x9c\xdc\xc4\x86\x3c\x10\xe8\x02\x0a\x60\x97\x01\xb5\xf1\x34\x4c\x98\x21\xcc\x94\x7e\xa3\x60\xb3\xd0\xd8\xad\x27\x53\x42\x33\xf4\x23\x53\xf7\xab\x3d\x58\xfc\x6a\x07\x8c\x9b\xc8\xaa\x0f\xe8\x89\x40\xc4\xa5\x86\xdb\x78\x6d\x57\x71\x16\x1e\x9a\x1f\x74\x15\x24\x97\x80\x30\x19\x05\x62\x5e\x1a\x0d\x4e\x62\xd8\x7c\x29\x32\xf2\x5d\xc9\xb6\x62\x63\x8b\x37\xdf\x84\x44\x95\x72\x5f\xa2\xca\x51\x8f\xa2\xff\x5d\xe2\x13\x66\xd2\x05\x19\x60\x4d\x3c\xa5\x0f\x89\x35\xcc\x64\x8f\xdc\xd8\x07\x17\x25\xe4\x8d\xc0\xb9\x8f\xdb\xd7\x15\x9f\x75\x68\x67\x1a\xb3\xfe\x8b\xc6\xc6\xa8\x8b\xe3\x7b\xe5\xc8\xce\xab\x85\x4c\xbc\x34\x07\x7f\x57\x0d\x66\xf6\x8f\xe7\x77\x02\x75\x55\x85\x44\xbd\xe6\x4d\xd6\x43\x4d\xac\x50\xe1\x7e\x39\x8d\xe0\x7b\xe3\x08\x00\x0c\x0d\xaa\x59\xc1\x56\xd0\x8c\x2c\x0b\x41\x04\x13\x85\x2f\x50\xfe\x73\x43\x34\x22\x45\xe3\xa7\x8f\xd6\x50\x3c\x62\x8e\x45\x3c\x02\xf7\xdb\x37\x51\x1f\xf1\x67\x78\x40\x28\xb8\x28\x19\xbf\xfb\x95\x96\xdc\xf5\xd0\x2c\x7f\x0b\x3a\xa1\x3c\x01\x33\xe8\x80\xda\x2e\xf3\x2f\x21\x4f\xca\xfa\xf7\xed\xe0\xc4\xbc\xf6\x26\x9e\x17\x3a\x5f\x18\xd6\xfd\x50\x2d\x13\xe7\xd2\x0e\x48\xd9\xda\x6a\x86\x2c\xbd\x47\x9b\x08\x8b\x06\xf5\xd6\xbe\x32\x83\xed\xe3\xcd\xa6\xb6\x03\xf1\x3c\x63\xf8\xed\xd1\x46\xb2\x61\x80\x46\xcc\x11\x3a\xf6\x28\x71\x95\x16\x07\xad\x41\xbe\x0e\xd9\x6d\x0d\x88\x40\x52\x12\x53\xab\xda\xd8\x3e\x20\x72\x52\x93\xb0\x17\x99\xae\xd2\xf8\x22\xba\x65\x93\xee\xa3\x68\x9d\x5a\xbe\xb2\xa6\x89\x6b\xe6\x26\x63\xc0\x28\xfe\xf6\x82\x49\xed\x98\x5e\xcc\xd8\x1d\x9d\x2f\x6c\x06\xb8\xc6\xc9\x39\x1d\x29\xa4\x69\xd2\xf4\xe2\xda\xf8\x84\x02\xb5\x5a\xb4\x49\x39\x8d\x48\xb5\x36\x1e\x31\x68\x2f\xad\x44\x0e\x28\x86\xad\x03\x28\x79\x20\x74\xad\x6f\x67\x93\xdf\xd0\xbf\x7e\x93\x40\x77\x19\x5a\x79\x57\x3c\xf2\x6b\x5a\xd2\x4a\xb3\xe1\xe5\x28\x8a\x04\xf9\x85\x8e\x48\xb0\x5e\xe0\xa4\x8f\xc8\xeb\x34\x11\xae\x8f\x41\x59\x43\x8a\xc9\x38\x30\x57\x1d\xb4\x7f\xfd\x8b\x1a\x0d\xcb\xb8\x47\xd2\xd9\xc6\x02\x89\x3d\xc3\x81\x9a\x51\x6a\x04\x9b\x43\xf6\x5e\x13\x09\x28\xc1\xec\xad\x57\xaa\xc6\xba\x5a\x16\x6e\x93\x07\x24\x1a\xb2\x71\x5d\x87\x95\xd5\x10\x1f\xe9\xa2\xa4\x2e\xc6\x80\x53\xf3\x71\xaa\x7b\x0c\xf8\x65\xfd\x8d\xa1\xd3\xed\x5b\x10\x4e\x37\x26\x4b\xa9\xe0\xba\x2b\x89\xed\x28\xde\xc7\x70\x32\xcb\x12\x31\x9a\xf9\x0c\xca\x36\x61\x48\x6d\xab\x38\xde\x1f\xd7\xe1\x55\xd8\x31\x52\x39\x12\xfa\xa8\x4f\x4d\x1e\x8f\x5a\x12\x47\x63\xb3\xaf\x0a\x16\x64\x6f\x9b\x8a\xbf\x6a\x6e\x37\xfe\xca\xc1\x3d\x29\x7a\xeb\xb6\x1c\x62\x5f\x8a\x0e\x1a\x55\x3f\x5c\xe3\xfe\xd5\x62\x84\x71\xfd\x34\xb2\xde\x2b\x1a\x72\x28\x4e\xa4\xe5\xbe\x47\xb7\xf8\x33\x85\x1c\x17\x6e\xf6\x74\xfe\x70\x68\x68\xe2\xe8\xbb\x2f\x31\x96\xec\xc1\xbf\xfa\xee\x62\x0e\x4e\x5f\x39\x38\x27\x54\x1b\x83\x2a\x9e\x24\x24\x7b\x25\x71\xe7\xc2\x91\xfd\xf6\xd7\x4b\xb6\x8a\xc6\x73\x0e\x2d\xe4\x39\x37\x10\x5e\x2b\xbc\x7b\x2b\x93\xc6\x40\x2f\x90\x6e\x56\x41\x1a\xa4\xc6\x4e\xb1\xa2\xd7\xfd\xb9\x44\x8f\xef\xa2\x87\xa7\x49\x31\x67\x3e\x72\x5e\x04\x92\x1b\x0b\x9a\x17\x57\xd4\xd7\x61\x74\x06\x21\x74\x08\xd5\x8c\xa6\x4e\xdc\x53\x64\x5e\x8f\x27\x11\x5a\xc0\xa4\x9b\x9d\x51\xac\x76\xb7\xcc\x70\x8c\xee\x2f\x07\x9b\x34\xbd\xad\x3a\x57\x1c\xd7\x62\xaa\x75\x10\x85\x8e\x3d\x3b\xcc\x1d\xdf\x12\xfc\xb7\x3c\x34\xb2\xab\x98\xc8\x29\xae\xc8\xdb\x57\x1f\xff\xed\x94\x5a\xf5\x09\x69\xd2\xfd\xad\x0f\x71\xe6\x00\x43\xcc\xe6\xb5\x39\xc3\x33\x45\xae\x81\xbd\x27\x35\x68\xb2\x93\x2b\xc7\x5b\x1c\x5b\xc7\xf5\xf8\x6c\xec\x1f\xbe\xdc\x3e\x47\xaa\x45\x34\xed\xd5\xf8\xa0\x6d\xa3\x4a\xdf\x94\x31\x12\x8d\x46\xaf\x81\x3c\x9c\x60\x50\x76\x45\x88\x5e\x7b\xa3\xb1\x70\xef\x11\xa2\xd8\x9b\x79\xc6\xfe\xc6\xfa\x54\xe4\x19\x58\x5f\x52\xb3\x4f\xa7\x93\xe3\xab\x20\xdc\xb3\xbf\x6b\x0f\xd5\x43\xaf\xf7\x2b\x4c\x34\x8f\xf7\x72\xae\x36\xef\x88\xea\x51\xfb\x05\xfa\x6c\xd7\x14\x77\xcb\x21\xb4\x04\xab\x33\x3c\x17\xa2\xca\x2a\xc9\xba\xec\x25\x0a\x70\x80\x27\x6e\x08\xa0\x71\x98\xc6\x98\xe5\x36\x51\x55\x4d\xaa\x09\xc8\x25\x62\xa8\x52\x70\x5f\x02\x82\x77\x90\x99\x11\x26\x24\x3e\x6f\x52\x2d\x8c\x82\x55\x04\x95\x1b\xa8\x10\x11\x94\xec\x9c\x4a\xd9\xff\x51\xf1\x1d\xfa\x83\xa7\xc6\x31\x09\x97\x0e\x12\x9d\xc3\xed\x5d\xa9\x6a\xf1\xbf\x64\x28\xfc\x2f\x9d\x3b\xc3\xc8\x4f\xfb\xb1\x97\x4c\x9a\xc5\xa1\x4d\x63\xd8\xeb\x9d\xcc\x5a\x7d\x2c\x33\x56\x72\xbd\xee\x9c\x34\xde\x31\x4e\x90\xf9\x08\xf8\x14\xba\x62\x0d\x18\xf4\x22\x7c\x73\xc0\x07\x09\xa0\x18\x8c\x10\xec\x2d\xb7\xe0\x7c\xc0\x25\x6b\x75\x29\xdf\xa2\x58\x3d\x06\x17\xcf\x1a\x0a\xee\x3a\xbd\x43\x0a\xe4\x8d\xf5\xd2\x28\x05\x23\x34\x24\x08\x1a\x9a\xec\x1e\xf5\x61\x46\x80\xd7\x82\x7b\x69\x9e\x7b\x41\x27\xed\x57\xcc\x17\xf2\xd6\x63\x0d\x24\xce\x65\x5d\x74\x0a\x03\x9b\xea\x4e\x89\x15\x76\x27\xa5\xf5\xcf\xb9\xe9\x7b\x80\xca\xc2\x9c\x0f\x19\x3e\x96\x2f\x7d\x26\x29\x66\xec\xa3\x5a\x88\xa4\x36\x63\x67\x70\x90\x35\xc2\x3c\xa0\x5b\xe4\xb0\xca\xeb\x2e\x5d\x36\xe4\x49\x1c\x6e\xe2\xf6\xff\xe4\xa1\xd0\xb4\x6d\x2c\x0b\x4b\x51\x17\x34\xcc\x1c\xc0\x0c\xf6\x9d\x32\x29\xde\x2f\xe8\x27\xd3\x9c\x8b\x34\x5c\xbc\x41\x49\x89\x9e\xdf\x51\xe2\x20\x41\x21\xc5\x75\x9d\xa8\xa4\x60\x71\x23\xc7\x18\xfd\x8a\x30\xeb\x4e\x25\x04\xab\xca\xfe\x50\x0d\x3c\xa4\x25\x5a\xc5\x28\x7f\x51\xca\xc0\x85\x6a\x23\xde\x2c\x07\x24\xce\x7e\x4a\xf4\xd6\x69\x81\xc1\x17\xea\x38\x66\x36\x7d\x09\x1e\xbd\xa2\xe7\xc4\xc2\x41\x0e\x21\x31\x63\xde\x18\x95\x9c\xac\x83\x05\x35\x67\xf6\x66\xf6\x05\xb1\xa2\x4e\x52\x33\x97\xb2\x48\x45\xbd\x89\xf1\xf4\xea\x61\x8f\x8a\x48\x61\xd6\x61\xea\x29\xdc\xcc\xef\x07\x1d\xde\x93\xc6\x3b\x5b\x03\xb6\x45\x17\x1e\xc0\x27\x92\x67\x1f\xd3\x84\x73\x77\x55\xb6\xbd\x60\x17\x0c\xc7\xa1\xb4\xc1\x34\x0c\x16\x75\xb3\xf7\x50\xbb\x39\xb7\x6b\xde\xd7\xf0\x1d\x2f\x7f\x67\x20\xec\x21\xd0\x34\xc8\x4f\x62\xf5\xa1\xb8\xa2\x5c\xb4\x48\x43\x7a\x4e\x22\x2d\x0b\x52\x4a\x02\xdf\x5c\xe1\xa3\x72\x60\xae\x1d\x72\xba\xd2\x2e\xb8\x30\xfb\x21\xac\x60\x07\xdc\x82\xdb\xcc\x11\xf6\x5b\x06\x1f\xe2\x06\xf5\x9e\x51\x84\x4a\x8a\xc7\xec\xba\xb5\xee\xfd\x6f\x54\x63\x4d\xfc\x82\xfc\xc6\xfa\xc7\x59\xc3\x20\x13\xf2\x9a\xcc\xe9\x57\xdc\x6b\x33\x0f\x77\x99\x16\x8e\x86\xe9\x19\x66\x7a\x9f\x6d\xf8\xa0\xbb\xa6\x0a\x48\xf3\x1a\x0b\x56\x2f\x1a\x36\x7b\x7e\x55\xc8\xd8\x25\x04\xa4\x9b\xad\xde\xd7\xdc\xda\x8e\x29\x65\x2d\xac\xca\xee\xea\xd6\xcb\x3a\x74\x2e\x4b\x23\x24\x82\x01\x7f\x44\xe5\xce\x32\x44\x1a\xfb\x32\x33\x4e\x20\x42\xf4\x50\xfa\x1c\x3d\x9d\x07\xbc\x58\x4c\x10\x31\x74\x02\x65\x70\x7c\xc6\x13\x2b\xe9\x4d\x22\x20\x13\x26\xc7\x58\xc5\x7e\x93\xb8\x17\xaa\x6c\xcd\x32\x82\xe7\x2a\x56\xd9\xce\x7d\x5d\x59\xd8\xba\xb1\x0c\x6c\xbe\x5b\xdd\xe7\x2b\x3b\xac\x66\x08\xd2\x31\xc4\xf1\x77\x43\x8f\x21\x19\x69\x9c\x85\xc2\x30\xd3\xc2\x09\x8e\x91\x2d\xcb\xef\x4d\xfd\x19\xd0\xae\xf7\xba\x67\x76\x15\x5c\x31\xac\x01\x7d\x79\x14\x5b\x9a\x22\x54\x09\x4e\xd4\xc6\x0e\xc5\x89\xc5\x5c\x4c\xec\x68\x1c\xf0\x10\x84\x55\x76\x43\x41\x31\x5f\xbd\xa0\x66\x7e\x36\xea\x5c\xfb\x9c\x45\x49\x58\x65\x4e\x3c\xbe\x7c\xef\xdc\xc5\x40\xfb\x2f\x1d\x55\x12\x81\x23\xb7\x69\xb5\xae\xb5\x12\xd8\xfd\x2b\xc9\x24\x71\x2d\xa5\x02\xcc\x92\x02\x44\x7d\x19\xa6\xda\x1d\xd3\xa0\x26\xb5\x4e\x20\x04\x63\xdb\xe8\xa6\x29\x75\x9e\xaf\x9f\x57\x65\x62\x4d\xa1\x76\x35\x35\xda\xa8\x6d\x33\x2b\xd7\xd0\x12\x93\x6b\xc0\x3c\xfc\xf4\x6e\xbc\xf3\xde\x5c\x7a\xb8\xb6\xb7\x43\xc7\x81\xe4\x90\xe7\x68\x29\x1d\x37\xd9\x67\x64\xee\x6d\x21\x6e\xad\x21\x99\xa1\xb9\x0f\xe8\xd1\x11\x70\x59\x2f\xf9\xc1\x0f\x2a\x0c\xb3\x6d\x67\x84\x8f\x97\x4e\xbe\xeb\x61\xf0\xdb\x9e\x69\x5f\x2a\x18\x63\x1c\xa8\x27\x28\xa4\xf1\xe1\x95\xd0\x70\x4a\x51\xfb\x57\x0d\x48\xa6\x73\x92\x38\x3c\xb5\x8f\xf9\xe0\x21\x15\x19\x94\x6f\x82\x60\x1d\x8b\x9f\x88\x40\x24\x3f\x78\x43\x00\xab\xff\xec\x24\x19\x59\x59\x8f\x44\x86\xaa\xc9\x0b\xe8\x66\xb8\x89\xa6\x1f\x22\x33\x03\x47\xf1\xb3\x10\x70\x70\xd1\xea\xed\xd2\x80\x6a\x63\x15\x8e\xb3\x51\xf8\xcf\x5e\x3d\xba\xb4\x62\x67\xf7\x2d\x59\xbf\xfb\x7e\x89\x64\xe6\x42\xf1\xf4\xbb\xac\x9f\x39\x61\x26\xf8\x6c\x5b\x73\x22\x9b\xf3\x6f\x2d\x12\xe5\x8d\xf4\xc0\x01\x4e\x01\x3a\x06\x37\x28\x5d\xfb\x1f\xcf\x45\xeb\x24\x4d\xa9\x8d\x31\xb4\x5c\x8e\xac\x99\x12\x6a\xa7\x6f\xab\x55\x15\xb2\xbf\x4c\x8d\x08\x28\x50\x8d\x41\x78\xe2\x9d\x0a\xdc\x1e\x12\xc6\x31\xb4\xe0\x1d\x38\x50\x7f\x29\x8e\xb7\xce\x55\x86\x1d\xd1\x47\xe4\xb6\xf8\xa1\x14\x60\x7a\xc6\xb8\xe7\x65\xbf\xa2\xe6\xc3\x3c\xf8\x0d\x81\xf0\xa6\x04\x5a\x4d\x75\xb6\x20\xa8\xb2\xb2\x29\xbe\x8f\xdc\xb1\x8b\x49\xf6\xce\x09\x18\x44\x4c\x74\x01\xb4\x08\xda\x38\x55\xa2\x9e\x87\x08\x18\x9e\x2d\x60\x35\x5c\xce\x90\xef\x7e\x15\xa0\xa2\x55\x3b\xb5\xb4\xc0\x57\x9c\x5f\x0f\x4a\x26\xa5\xd6\xe0\x7a\x24\xe0\xd7\x4a\xfa\x5d\x72\x41\x21\xa2\x4b\xf3\xe1\xbc\x22\x61\x7b\x43\x99\x4e\x73\xe0\xb7\xa8\x20\xdf\x64\xf5\xf7\x5b\x33\xff\x3c\xbe\x12\x19\x21\x76\x5b\x66\x69\x92\xa7\x46\x1c\xe0\x4c\xd0\x1b\x7a\xe2\x5d\x10\xea\xe6\x83\xd6\x41\xc9\x7b\xa5\xfe\x44\xd9\xd0\xa1\x15\x16\x18\x03\x0f\x3d\x6c\x23\x19\x29\x27\x8e\x2c\xf4\x5c\x48\x0c\xb1\xd2\x1c\x83\xf7\x5e\x30\xf8\x63\xd1\x39\x37\x19\x64\x8d\xfb\xad\x14\xfa\x0e\xe6\xae\x0f\xfe\xe6\x7f\x26\x8c\xba\xaa\x60\x3f\x73\x66\x3e\x3a\x4a\x75\xfe\xec\x9d\x1e\x18\x66\xa8\x13\xa6\x6c\x6b\x92\x80\x43\x62\xca\xec\xeb\x35\xea\xe4\x0f\x28\xe1\x9e\xbf\x5a\x95\x73\x0e\x52\x59\x53\x1c\xe1\x19\x6d\x9c\x00\xff\x7a\xae\x96\x40\xfa\x2f\x7f\x95\xe7\x21\x72\xba\x19\xe9\x59\xef\xde\x2b\xc1\xb8\xe6\x8c\x66\x25\x0b\xe1\x70\xcd\x53\x3f\x46\x6f\x03\xc3\x7a\x94\x90\xc3\x3f\xa4\xe1\xea\xa3\x84\xc3\xe2\x69\x31\xc8\x6a\x5c\x27\xc9\xbe\xfa\x17\x14\x67\xe7\xf9\x7b\x2a\x8e\xf0\x48\x64\xa9\x20\x47\x2d\x3b\x0b\x1b\x65\x11\x3f\x90\x40\xd5\x4f\x8d\x01\x1b\xd7\x24\x48\xf3\xcc\x15\x72\xa8\xbf\xe9\xf1\xb8\x24\x94\x33\x66\x55\x37\xa9\x0b\xbb\xe5\xae\x73\x1d\xd5\x4e\x37\x6f\x0b\x2d\x9e\x4f\xf5\xda\xa2\xe9\xd8\x87\xd8\xb2\x8c\x7d\x8a\xdd\xa3\xb9\x23\xe1\x4d\x3c\xa0\xea\x2c\xb1\xc3\x1f\x2d\x28\xc0\x44\x79\xde\xe3\xe5\x13\xb5\xb7\x84\x4b\xe9\x48\x88\xb7\x6d\xf8\xa3\xfe\x05\xb2\x42\xb6\xc9\x1e\x17\x7b\x8a\xbe\xd1\xd6\x63\x67\x6b\x1d\x73\x04\xf7\x89\x51\xee\xaa\x86\x22\xa2\x5d\x5f\xad\x8b\x94\x5e\xc6\x89\x3a\x1b\xcf\xdb\x8b\x43\x84\xb2\xa6\x73\x0b\xad\xc8\x1a\xb2\xe1\xcd\xb7\xb6\x01\xf4\x9c\x28\x49\x2d\x66\xde\xca\x11\xd2\x6c\x0f\xd7\xdc\xc5\x8c\xc7\xed\x1a\x07\xfb\x48\x9a\x11\xed\x29\x49\x50\xa5\x21\x46\x86\xdf\xe8\xf2\xaa\xaa\x79\x7d\xcd\x39\xe3\x7f\xf2\xc1\x9b\x4c\x3d\x26\x8e\xb7\xe5\xac\x6f\x86\xcf\x5d\x92\xb0\xc3\x40\x91\xd4\x8f\xfe\xc6\x83\xbc\xc7\xcd\xdf\x8f\x10\x79\xa2\x38\x09\x6c\x00\xa7\x25\xf6\x3e\xb5\xe0\xc6\x22\xd5\x1a\x28\x1d\x21\x6b\xba\xb6\x32\x24\xa6\x29\x4a\xce\x9c\xf9\xa7\xb1\xf6\xea\xdd\xfe\xb6\x1f\xc2\xa1\x5c\xff\x75\x0e\xba\x29\x1f\xff\x03\xe9\x2a\x29\xd3\x1c\x9c\x2d\xce\xe4\x0a\xf3\xf3\x1e\xe5\xd5\x90\x2f\xbb\xd4\xe8\x77\xb9\xa9\x47\x0a\x77\x0c\x60\x92\x24\x0e\x79\xbb\xcc\x4c\xb9\x92\x2c\x0e\x42\x9e\xee\x31\x0d\xc0\x09\xb7\x3d\x75\x42\x97\xb7\xc4\x62\xf7\xc0\x61\xe4\xd6\xe6\x16\x76\xb6\xc2\x0b\x11\x8f\x67\x66\xae\x43\x63\x3e\xe7\x23\xad\xa3\xf8\xe9\x42\x48\x5d\xa5\x45\xd3\x0e\xa0\xea\xcb\xe6\xe8\x2f\xb9\x02\x0d\xd2\x48\x1e\xee\x91\xdb\x1a\x55\xcf\x7b\xeb\x08\xd4\x58\x5c\x5e\x25\x8f\xd7\xbb\xd4\x0c\x75\x93\x94\xc0\x1c\x9f\xe5\xd6\xaa\x32\xb2\xd8\x20\xba\x5a\x08\x10\xdd\xcc\x1e\x3a\x8a\xb4\x84\xc6\x13\x62\x62\xb3\xfc\x09\x35\x4d\xe6\x26\x6e\xe8\x08\x23\x9f\xd0\xcb\x0a\x9a\x97\xba\xcd\x9c\x9c\x69\x21\x4c\x12\x25\xa6\x81\xa7\x5e\xb3\x8c\x6a\x2b\xb5\x9d\xcc\x3b\xe6\xbe\x8f\xde\x90\x2c\x0b\xc6\x21\xdb\x20\xbe\x88\xb5\x15\xb3\x7e\x5c\x19\x4b\x60\xb2\x43\x03\xea\x1d\x78\x84\x82\x99\xf7\x8e\x7b\xe1\x94\x8b\x31\x67\x22\x65\xfd\x27\x54\x32\xbc\xe7\xc2\x73\xd2\xab\xdf\x59\x03\x5a\x5e\xf5\xc3\xb4\xba\x31\x61\x99\x27\x6f\x5d\x22\x2b\xe6\xdf\xf2\xa7\x8f\xfb\x17\x86\x0e\x9e\x32\xc6\xb5\x33\x71\x09\x6e\xec\x3e\x4a\x43\x95\xb3\xc0\x2c\xaa\x7d\x08\x40\x7d\x9c\xd3\xd7\x34\x87\x47\x00\xa6\x2e\xf6\x44\x4f\x35\x9d\x21\xb0\xe0\xf0\x90\x33\x28\x6e\x98\xed\x4c\x8d\x64\xef\xbe\x8a\x1e\x4f\x1e\xb1\x87\xaa\xa2\xee\x8d\xde\x1c\x6d\x82\x76\x43\xe1\xd1\x31\x6c\x85\xa3\x75\x99\xe3\x79\x2c\xac\xa9\x11\x70\x84\x90\xce\xe2\x1d\x47\x7e\x61\x17\xd7\xa7\x69\x9e\x84\x17\xbc\x67\x92\xcd\xc6\xa1\x77\xef\x31\x8d\xea\xbf\xef\xdc\xa9\xac\xef\xb2\xf0\x7b\x5c\xb4\x4b\x26\x6a\x6d\x22\xdd\xaa\x30\x91\xb5\xe9\x93\x5c\x88\xe7\x4c\xd1\xf6\xb7\xe7\x8b\xc8\xce\x60\x0a\x67\x04\x6d\x5f\x2e\xc1\x16\x7c\x58\x4f\xe6\x74\x80\x2e\x85\x00\x7f\x25\xcd\xfe\xb1\x50\x45\xf4\x0d\x80\xf5\xdf\x73\xa4\xa8\x32\xe8\x2b\x75\x8c\x91\x18\x2c\x1d\x5a\x12\x81\x93\x35\xe4\x1e\xf1\xb2\x97\x06\x6b\xdc\xca\xef\x3a\xdd\xc3\xfc\xc2\xaa\x60\x93\x33\x21\xef\xa5\xd3\x7c\xa9\x48\x3e\x2d\x9b\xf5\xff\x5f\x62\x3c\x07\x14\xf3\x04\x65\xac\xfc\x16\x8e\xc8\x60\x7c\x55\x59\xee\x3f\x2f\xa5\x0a\xf6\xf3\x47\x9c\x55\x63\xaa\xa1\x8c\x47\x27\x62\xe8\xac\x0a\xc7\xfb\x94\xc6\xdb\x74\x67\x6d\xf6\xed\xcd\x35\x38\xbc\x4a\x42\xc4\x99\xe1\x46\x1f\x55\xfd\xf8\xee\x67\xbf\x9a\x10\x30\xbe\x47\xa9\x66\xa1\x40\xa0\xd3\xa4\x46\x26\xbe\x76\xb9\xc0\xb5\x38\x00\x47\xd3\x74\x4a\x01\xcd\xc2\x60\x4a\x21\x7a\xf3\x10\xfe\x44\x45\x09\xa7\x7c\xff\xef\xc4\x25\x40\xb9\x00\x57\x9c\x1c\xdc\x31\xc6\xee\x7b\x52\x1e\x39\xf9\x49\x8b\xbe\xa8\x15\x6d\x66\x1d\x4d\x89\x1b\xb3\x87\x39\x7d\x04\x06\x16\x4b\x7a\x6e\x51\x5e\x6b\x94\xab\x73\xd2\x4e\x43\x3e\xe8\x19\xa3\xf1\x1f\x90\xf7\x86\xef\x0c\x95\x87\x7b\x65\xa3\x2e\x2e\x5a\x4c\x61\x24\x85\x5b\xda\x2f\x2f\xa1\x49\x31\x22\x36\x48\x94\xba\xe7\x6f\x0d\x8f\x3c\xc6\x53\xdd\x0c\x5b\x70\xfe\x87\xa0\x2e\x3a\x22\x21\xbb\xe7\x47\x40\xc8\x38\xe0\xeb\xe6\x3c\xe0\x4a\xf8\x5f\x1c\x81\x27\x37\x99\x97\x6d\xb1\x77\x1c\xd8\xcb\xb1\xe4\xf7\x2c\x11\xe0\xb9\x4d\x8a\xb2\xcc\x39\x58\x27\x49\x81\x94\x7b\x33\x76\x74\xea\x2f\xfe\x07\x0b\x16\xee\x20\x19\x0b\xdf\xaf\xe6\xc7\x3b\x07\x27\xdd\x1b\x4a\x32\xa8\x34\xe7\xca\x4a\x92\x39\x96\xbc\x04\x5d\xb0\x23\xd5\x58\x86\xe5\x9f\xe9\x15\x8d\x3c\xea\xb7\xb1\xfd\x45\x7f\x50\xfa\x54\xc5\xaa\xa8\xdf\xbf\x8a\x3c\xb5\xc7\x64\xb5\x49\x12\xd7\x4b\x61\xcd\xcd\x1c\xad\x87\x74\x44\xf6\x0a\xea\xb5\x1a\x2a\x72\x6e\x4a\xd2\x1b\x5a\xcc\x68\x65\xa1\x52\x02\x51\xa1\x55\x0d\xc8\x5e\xbe\x8e\xb4\x40\x54\x63\x69\xdb\x62\x93\xcc\x40\x7c\x0d\x0d\xb0\xc3\x87\x43\x98\x5d\x3a\x86\x91\x1c\x7a\x70\xa6\x90\xd3\x23\x22\x80\xaf\x52\x14\xf1\xa4\x8e\xfb\x50\xdb\xe7\x7d\x6a\xb3\x4c\xcb\x74\x50\xfe\xad\x4d\xad\xa2\x00\x9f\x1a\xaa\xf2\x10\xe3\xf8\x1d\x49\xa7\x8d\x18\xe8\xae\x9d\x1d\xac\xd5\xbf\x5f\xa3\x9b\x5d\x29\x89\x72\x71\xb9\x29\xe5\x11\xaf\xa2\x22\x59\x1c\xd2\xa6\xcb\x74\x7c\x75\x88\x73\x5a\x77\x6c\x44\xee\xca\xee\xa9\xc4\x5b\x8d\xda\xec\x5e\x51\x06\x4c\x00\xe8\x95\x22\x0f\x5e\x5b\xd8\x8c\xde\xbf\x36\x13\x2b\xaf\xad\x3d\x99\x62\x0b\xea\x04\x51\x61\x94\xc9\x88\x4e\x8a\x44\xd5\xc5\xb2\xdf\xf6\x10\xf7\xf9\x2d\x57\x72\xb1\x45\x3d\x85\x23\x6a\x82\x60\x44\x56\x51\x33\x66\x43\xb1\x2f\xaf\xd9\x6e\x3e\x9f\x1f\x35\xd1\x4d\x02\x1f\x88\x80\x62\xd2\x5c\xef\xd3\x8c\xb8\x94\x0e\x2a\x06\x2a\xac\xdb\xda\x9d\x0b\x8b\x08\x41\x1f\x65\xc4\xd3\x84\xff\x10\x0e\x54\xc2\x0c\x20\xe2\xd1\x1a\x26\x49\xc2\x7b\xca\x10\xc0\x47\xbd\xda\x87\xd1\x98\xe5\x5f\xb1\x1e\xc4\x09\x8c\x58\x99\x10\xe4\x71\x9d\x7f\x02\x0f\x2b\x75\x83\xfc\xd2\xab\xd0\xae\xd4\x7b\x81\xae\xf1\x92\x1f\x2d\x70\x9b\xff\x3c\x13\x5b\x63\x17\xef\x4c\x51\x13\x3d\x86\xd2\x42\xbf\x75\xb1\xf5\x13\xbf\x87\xda\x9e\x38\x9e\x76\xa1\xcf\x17\xb2\x09\x1f\xf4\xc2\x53\xe5\xfd\x45\x1a\xf9\x03\x0a\xad\x26\x6f\x6e\x30\x01\x5b\x1b\x03\xa4\xd6\x14\x9d\xaa\x2c\xe5\x84\x34\xe9\xe4\xa7\x61\x68\x2a\x21\x0e\xdb\x85\x5a\xa4\xe6\xd6\x9c\x18\xab\xb0\xd1\xa8\xed\x8e\xca\x60\xc5\x2e\x5a\xe2\x2a\x9f\xb4\x4f\x2a\xe9\x6c\xe3\x47\xda\xd7\x93\x88\xe7\x12\x2f\x26\xb2\x14\xf6\x51\xbd\x8c\x51\x83\x57\x1f\x8f\xd9\xcf\x10\x4e\x59\x05\xae\x79\xbd\x62\x00\xe0\x08\x89\x44\xe5\x33\x0b\x42\x25\x55\xe9\x1b\xbd\xe3\xe1\xcd\x45\xdb\x9d\x72\x6f\x1e\x70\x86\x9a\x7c\x54\x69\x39\xa2\x70\x55\xb4\x7e\x7c\x0f\xa0\xc7\x64\x1d\x40\xa6\xe8\xb9\xc5\xb2\x57\x96\x96\xee\x2e\xf2\x47\x57\x91\x3f\xb9\x13\x59\x6e\x10\x71\xd8\x51\x85\xdb\xff\xbe\xa4\x74\x95\x27\x07\x13\xa2\x44\x90\xd0\x37\xce\xd0\x16\x28\x19\xf4\x0e\x7e\xc9\xfd\x06\x56\xd1\x6b\x3a\x61\x9f\x84\x31\xf5\x0e\x27\x23\x14\xee\xec\x96\x1e\xa8\x4a\xa4\x97\xa5\xca\xd8\xc5\x4d\x79\xa9\x86\x5a\x7b\x08\xcf\x06\x53\x37\x55\x92\x21\xd1\x74\x44\xd5\x84\xb9\xab\x4e\x30\x8d\xb6\x8b\xfb\x9d\xa1\xdc\x5a\x86\x18\x08\xc4\x6c\x75\xcb\x8a\x8b\xb6\xc8\xd5\xd7\x73\x1f\xd5\x26\x16\x89\xc1\x8a\x7a\x12\xfe\x51\x52\x40\xfe\xaf\x75\x64\xe5\xdd\x75\x67\x10\x8c\x91\xb6\x3f\x17\x4d\x60\x6c\x23\x3d\x91\x32\xad\x59\x89\xe9\xd8\x46\x40\xb0\x09\x47\x32\x89\x1b\x11\x4c\x76\xd6\xee\x02\x17\x2c\x26\x5b\xb5\xcc\xf4\x7a\x96\xa2\x35\x19\x85\x26\x3d\xc9\x53\x1f\x03\x6e\xc1\xa5\xb8\x6b\x31\xf9\x07\x4d\x76\x0a\x64\xda\x8c\x02\x9a\x64\xde\xb2\x97\xef\x82\x3a\x2f\x13\xa4\x5c\x62\xca\xd2\xab\x16\xe0\x18\x47\xb1\x1c\x72\x94\x5d\xe1\xb5\xdc\xdf\x57\x69\xf8\xad\xb1\x00\x5e\x6b\xa0\x66\x81\x06\x88\x30\x45\x77\xaf\xb7\xf2\x86\x38\x1a\x88\xbd\x51\x97\x7d\x17\xfa\xa0\x3e\x9b\x19\x21\x6e\xf3\x8f\xa1\xe1\xc2\xd1\x1b\x25\x50\x46\x3b\x69\xb4\xc0\x54\xa3\x0a\x03\x5c\x64\x7f\xa3\x49\x80\x0c\x6d\x9e\x6a\xb1\xfd\x1c\x5e\x0c\x7d\xb4\xbe\x6e\x5f\x92\xc4\x93\x26\xff\x39\x01\x57\x4b\x79\x32\x95\xf1\xcc\x33\x6d\x8c\x23\xb2\x5b\x9b\x59\xab\x1a\x7e\x6a\x27\xfe\x6a\x81\x11\x9a\x9a\xbc\x17\x41\x8b\x72\x76\x6b\xcd\x20\xdf\xee\x9c\x41\x2b\xe7\x1f\x86\x17\xf2\x6f\x4c\xd2\x35\x6a\x91\x68\x0d\x79\x8d\x33\x59\xc4\xcf\x1c\xd6\xb2\x3c\x64\xdb\x30\x33\x8a\x63\xfc\xbd\x9d\x11\xde\x5c\xeb\x9b\x97\x6b\xd2\xec\x00\xa3\xef\xef\x4c\xfe\x56\x09\xdf\x85\x1e\x69\xce\x2d\xc1\x71\x2a\x9b\x1a\x1d\x7d\x42\x0b\x4f\x55\xa7\x0c\xde\x3c\x36\x3b\xb1\x3a\xe8\x80\xce\xec\x6b\x75\x22\x57\xb4\x6a\xf4\xde\xf0\xcd\x72\xd0\xba\x46\x9d\x20\xb9\x16\x40\x4c\xa6\xa4\xc7\x52\xa1\x44\xb9\xf8\x07\x7d\x58\xef\x2d\xc7\x5e\xc3\xfe\xd2\xea\xe1\x6f\x5f\x65\x29\xe1\xcb\x3b\xeb\xb3\xfb\xec\x3d\x83\x21\x01\xd2\x24\xb1\x38\x89\x9a\xfe\x0b\x94\x4a\x84\xfb\x17\x15\x70\x6a\x4a\x52\x6e\xd2\x77\x67\x8f\x22\xde\xca\xdb\x89\x35\xff\xb0\x84\x82\xfc\xac\x2f\x2b\x97\x26\x06\x86\xfb\x97\xeb\x06\xde\xd3\x42\x5d\xca\xd9\xd5\x6f\x61\x71\x2b\x63\x75\xfb\x13\x7a\x86\x0a\xb7\x44\xb3\x25\xa2\xe7\x8d\x17\xac\x7b\x7a\xdb\x50\x53\x6e\x85\x7e\x0c\xbd\xe8\x18\x39\xfc\x88\x0e\x8a\x1b\xb3\x00\x6c\xac\xfb\xc0\x98\x61\x82\x3a\x56\xb9\x1a\x87\xb6\x36\xa1\x45\x9d\x14\xbd\x05\x80\xd2\x17\xf3\xe6\x56\x39\x55\x17\xe9\x52\x8f\xa6\x0b\xd5\x68\x8a\xa5\x03\x93\x8a\x1f\xa0\xe5\x8a\x8d\x7b\x85\x30\x33\xc0\x7b\x5c\xe4\xc3\x12\xfd\xa6\x36\x26\x8d\x2a\x1a\xcd\xef\x4e\x05\x4d\x28\xbc\x98\x55\x1a\xc0\xcb\xb6\xb6\xe8\x31\xda\x34\x82\x97\xb0\x09\xde\xb8\x2e\xd1\xb8\x31\x47\xde\x7e\x89\xee\x7b\xdc\x40\xe0\x29\x6d\xad\x1b\x7e\xbf\x7d\x75\x60\x05\x27\xa9\x69\x15\x78\x7f\xf7\x50\x14\xce\x8d\xe7\xcc\x8b\xfd\xd8\x58\xe4\x7d\xb8\xc9\x60\x45\x86\x57\xff\x6b\x00\x90\x84\xa6\x88\x6d\xe6\xc1\x0c\xab\x46\xe1\x6c\x51\x2f\xb1\x5c\x29\x75\x8c\x17\xd4\xfb\x2d\x56\x93\xb6\x84\xa9\x2c\x1a\x9d\xf3\x15\xef\xf2\x1e\xa1\xda\xda\xf1\x29\xd2\x6f\x40\x59\xba\x37\x58\x8e\x1b\x78\xf1\xa1\x7e\x73\x8c\x96\xb4\xe1\xea\xa0\x01\xfa\xd6\x26\x8d\x45\x77\x25\xe6\x96\x2b\xfd\xa0\x84\x29\x01\x43\xf2\xe6\xb8\x75\x79\xe9\xb2\xd4\xc0\xf7\x35\x0a\x15\x6b\x3d\xa2\x9e\xb8\x8b\xa9\x2f\xf2\x5c\x14\xaf\xc9\xa4\x8b\x5c\x30\xa2\x70\x12\x0c\x6f\xa8\x9a\xe1\xff\x7b\x8d\x98\xd9\xea\x4b\xa9\x72\x59\x03\xa5\x0f\x36\x90\x3b\x4d\x42\x1b\x67\xc1\x35\x22\x72\xc1\x3a\xb5\xf7\x5a\xb0\x3b\x01\x32\xbc\xa4\x59\xe9\x0d\x7f\xd0\x7d\x03\xa5\x72\xfe\x49\x41\x1f\xdf\xd4\x4b\x98\x75\xac\xca\x04\xef\xf4\x56\x16\x0c\x08\xc7\xe0\x31\x71\x6a\x31\x99\xbb\x30\x85\x54\xb3\x12\x27\xa3\x63\x49\x97\xb3\x5e\x19\xb8\xa4\xf7\x74\x85\xe4\x57\xf7\xbd\xd9\xcb\x25\x90\xe2\x66\x17\x11\x64\x7b\x21\x94\xa5\x19\x8b\x2e\xb1\x71\xee\x39\xdd\x40\xbc\xdb\x0c\x77\x4a\xb8\x9c\x07\xcc\x07\xe5\x09\xb5\x6b\xf5\xf9\x07\x8a\xed\xfb\xd5\x2b\xea\x66\x2c\xbe\xac\xd2\xd6\x80\xd0\x4e\xe0\x75\x97\x7e\x63\xef\xc5\x4b\x5d\xcc\x74\x1c\x52\x59\xab\x77\x95\x4c\x5c\x3e\x9c\x2d\x8b\x11\x12\xce\x8b\x5a\x6a\xaf\x21\x3b\x37\x10\x09\xf0\x2a\x66\xc0\xae\x4b\xc7\x5f\x3c\x95\x19\xbc\x21\xff\xe4\x67\xea\xe9\x87\x5e\x0d\x73\x6d\x77\x90\x2e\xf8\xd4\x5c\xd6\x98\x42\x89\x2f\x6b\xe8\x25\x51\xec\x45\xd7\x88\xf3\x26\xed\x5e\xbd\x85\x35\x65\x47\xc6\x53\xa3\x79\x90\xdc\x21\xd3\x66\x5e\x52\xa9\xa5\xd7\x45\xbf\xc6\x06\xcc\x59\xb1\x83\xb3\xf9\x99\xb1\x78\x17\x2b\xf7\xf2\xd2\x03\xd2\x5f\xd3\xaa\xb4\xca\x7b\xd8\x82\x6f\xa8\x09\x6c\x9d\x0b\x9f\x47\x36\x1a\xba\x8c\xa9\x3b\x79\x54\x3e\xf5\xc1\x09\x93\x39\x56\xf2\x1d\x03\xca\xee\x77\x4d\x8c\x2d\xfe\x53\xb4\x9c\x3e\xd3\xc4\xff\xcd\xe2\x32\x97\x10\xc5\x47\x9c\xcb\xd7\xcc\x21\xe2\xfd\x89\x07\x97\x3c\x84\x7c\xfb\x5f\xd3\x45\xe1\x31\x48\xd2\x2b\xdd\x31\x00\x9e\xf7\xaf\xae\xba\x93\xd0\xf5\x0a\xa6\x6b\xcb\x5d\x31\x4a\x23\xb8\xe2\x31\xe0\x35\x22\x0c\x0b\x59\x7b\x9a\x59\x8c\x8a\x65\x69\xc9\x45\x82\x2e\x5d\x51\x73\xd9\x55\x97\xf6\x8f\x45\xc4\xd4\x7a\xcd\x6f\xfd\x29\x66\x88\x39\xbe\x55\x60\xd2\x14\x74\x70\xcc\xbf\x6c\xc1\xd4\x14\x00\xae\x67\x82\xa1\x70\xc3\x76\x7f\x8e\x89\xc6\xed\xf6\x76\x60\x81\xf4\x3f\x28\x23\x89\x4d\xe9\x87\x0a\xe0\x3f\x51\x21\xd4\xf0\x24\xb9\x5f\x6c\x11\x63\x14\xab\x34\x6d\x27\x21\xcb\x0b\x49\x72\x0f\xcb\x29\x9c\xbb\x13\x8e\xf3\x6a\x70\xb6\xc8\xfa\x04\x92\x96\x82\xb2\x4d\xb6\x72\xeb\x91\xae\x94\x16\x76\xa8\xe6\x12\x56\x6f\x43\x8e\xd9\x31\xd2\x8e\x5e\x6b\xa0\xb9\x02\x7a\x8f\xc4\xeb\x42\x69\x52\x1c\x2d\x7b\xec\x2a\x0c\x8e\x99\x8b\xd0\x0d\x18\x8b\xfd\x33\x21\x38\x4b\x8c\x42\x3c\x72\x8a\x59\x9f\xca\x27\x3e\xc8\x73\x43\xdf\xf8\x1d\x6a\x86\x8a\x0d\xbd\xa3\x9e\x9d\x93\xcd\x34\x40\x3b\x21\xc0\xac\xc3\x91\xc3\x98\xb5\x99\x37\xa7\x00\xe1\x54\x95\x2d\x62\xd3\xb5\x68\x74\x7a\xdd\x42\xc2\xd8\xdb\x82\x17\x8a\xc0\xba\xe6\xa8\x78\x3b\xab\x3a\x9a\xbd\x5b\x8e\x36\x66\xc9\xa7\x37\xe9\x40\xa6\x45\x5c\xd8\xfa\x0e\xa1\xc4\x8d\x41\x9f\x67\x43\x22\xaf\xc0\xe6\xfc\xa8\x05\x6d\xff\xca\x88\x1a\xad\xc1\x67\xb7\x48\xc1\x81\xaa\xc2\x96\xd0\x9d\x1a\xfb\x11\xed\x0a\xb7\x0b\x72\x61\xdb\xb1\xab\x87\x90\xc7\xb8\x29\xfb\x76\x91\x6c\x1c\x02\x4e\xce\x6a\x3a\xd9\x2c\xee\x92\x96\xcb\x96\x06\x8c\x71\x0a\x1e\x8c\x93\xc4\x98\x1b\x65\x3d\x86\x87\xcc\x9d\x90\x45\x8b\x63\x24\x82\x9a\xa5\x17\x45\xd1\x44\xfa\x0d\x2c\x61\x8f\x86\x7e\xd4\x24\x43\xcf\xce\xd9\x59\x64\x0e\x79\x19\x47\x43\xaa\xfc\x22\xf7\xa2\xa3\x25\x97\x9c\xae\x21\xa5\x3d\x42\xbc\x17\xe7\xc1\xeb\xe9\xcb\xdc\xa2\x81\x73\x0a\xe6\x47\x30\x42\xd0\x5f\x14\xb3\xda\x5c\xb2\xec\x86\x12\x10\xed\x1f\x3b\xdf\x37\x56\xd1\xbc\xfe\xc3\xc3\x70\x86\x4e\x4a\xd3\x65\x3a\xbc\xcc\x4a\x3f\xee\xca\x8b\xd1\x8c\x72\x88\x50\xe9\x75\xfc\x99\xda\x5a\xab\x72\xa7\xbd\xf5\x0a\xee\x44\x55\xd5\x96\xa6\x7f\x4b\x12\x93\x54\x56\x13\x59\x34\xd5\x8f\xa4\xab\xb9\xeb\x8c\x88\x04\x71\x21\x9a\x49\x62\x56\xe0\x5e\x32\xcf\x5b\xec\xf1\x80\x28\x79\x86\x57\xc3\x62\xf8\x7f\xf9\x29\xd2\xb6\x08\x09\xf5\x8b\x8a\xdf\xf4\x08\x6f\x6d\x18\xac\x70\xa3\x36\xe3\x7d\x8a\xd1\x65\x68\xff\x45\x76\xe8\xbd\x20\xbe\xfb\xbb\xc5\xc9\x81\x68\x44\x5a\x82\x54\x49\x49\x13\x32\x46\xd1\x0c\x8b\x67\x09\x28\xfc\xb8\x1b\x66\x24\xb8\x2d\x92\x0f\xb9\x6c\x7a\x34\x0f\x2b\xa3\x04\xed\xb7\xab\x4d\x99\xdc\x75\xc0\x29\x96\x62\x8e\x88\x96\xc6\xb1\x6a\xa5\x61\x1f\x0d\xcc\x7e\x87\xba\x98\x1d\x39\x0b\x45\xe5\xdf\x25\x5a\x4e\xd5\xd9\xd1\xee\x28\xf1\xaa\x36\x4b\x6e\xd1\xf4\xb6\x9e\x75\x35\x9f\xc5\x4f\x74\xdf\x32\x58\xc1\xfc\xc7\xd6\x70\xab\xa9\xe9\x23\x7b\x80\x7c\xf5\x14\x09\xf9\x71\x58\xda\xb1\x9c\x20\x28\xb7\xce\xbe\xc5\xb5\x67\x79\x76\x43\x25\x5d\xc5\xd0\x9c\x07\x4e\x2f\xc1\x0c\xcd\x1b\x1b\xfb\x14\x39\xcc\x7c\xdf\x14\x23\xe2\xbd\xae\x34\xb2\x87\x71\xce\x4e\x6b\x54\x25\x74\xfa\x0c\x59\x84\x1e\x2f\x65\xdd\x98\xf4\x53\xa5\x57\x01\x20\xc0\x02\x95\x5a\x7b\xc2\x47\x0f\xa7\xdf\xb0\xda\x45\xfc\xea\x2e\x09\xf6\xfc\xf8\x60\x61\x21\x12\x15\xb1\xc6\x57\x2b\x87\x33\xf0\xef\xb8\xe1\xef\x98\xfc\x23\x89\x1b\xc6\xf7\x8e\x4d\x52\xe5\xac\x2b\xa1\x7e\x3f\x62\x22\x25\x0b\xb2\xd1\xb8\xe3\x5a\x20\xbc\x50\x48\xf6\x26\x91\xef\xce\x45\x8d\x84\xde\xe6\x99\x18\x16\xa0\xda\x96\x44\xda\xb5\xef\x93\x8c\x51\x14\x3f\xa4\xcb\xce\x60\xde\xc2\xa5\xce\x9b\x77\xb1\xcf\x35\x44\xc8\x96\x90\x14\xee\x47\x22\x38\xc4\x66\x40\x22\xd1\x5e\x2a\x32\x7c\x45\x9b\x96\x6d\x60\x00\xdf\xfe\x5f\xff\x28\x37\xdb\xcf\x14\x7a\xd1\x9f\x3d\xc4\xc4\x67\xea\xf2\xb3\x88\xb3\x71\x1f\xe7\x60\x48\xce\x31\xfb\x79\x76\xc9\xd4\x99\xd5\x37\x9f\x41\x25\xfa\x3e\x0b\x55\x48\x36\x62\x26\x89\x2b\xee\x06\x71\xe3\xf9\xf0\x10\xff\x87\x70\xe1\x78\x4d\x97\x01\x51\xb5\x04\x7f\x24\x3a\xee\x1a\x63\x51\x5f\xa7\xe5\x9a\xf8\x5e\xcc\x4c\x69\x3a\x40\x3e\xc2\xad\xf4\xa7\x3a\xf4\xce\xb5\x44\x24\x5f\xa7\x6b\xfc\xf9\x1c\x4d\xbc\x50\xe0\x54\xa1\xaa\x3b\x27\xb0\x77\xec\x86\x1f\xc1\x09\xf5\xe4\x2d\x82\x42\x17\x7c\xa1\x0b\xdf\x53\xc8\xf1\x7b\x33\x2a\x08\xf7\xc9\x3f\x3f\xf4\x24\x49\x44\xe0\x37\x74\x9c\x85\xf1\xa1\xf4\x93\xcb\x70\xdd\xa8\x59\xce\x9c\xdb\x15\x6c\x59\x04\x1d\xda\xc4\xc6\x60\x81\xfa\xc2\xde\xfe\x47\x79\x35\xc5\x86\xb1\xe3\x47\x0a\x29\xf7\xd6\xc9\x0e\xa9\x50\x81\x9b\xcf\x8f\x42\x48\x0e\x70\x24\x64\x22\x12\x60\x3e\x14\x3b\xef\x5e\x46\x54\x7b\x4c\x93\x83\xdc\x30\x09\x67\xc3\x52\xde\xc4\x09\xcc\xa1\x95\xc7\x19\x8e\x5f\x3d\x54\x57\xdb\xec\x5a\x9d\x65\xb6\xf6\x2b\x0b\x82\x4b\xbe\xca\xd7\xce\x22\x69\x67\x25\x07\xb8\x30\x27\xde\xed\xa4\xbe\xa0\x91\x69\x9b\x39\x98\x67\x6b\x57\xc0\xa9\xc0\xbf\x22\x85\x7e\xa4\x05\x33\x3e\x08\x24\xdc\xf3\x14\xc3\xa2\xee\x12\xc2\xf2\x3e\x3d\x19\xc1\x26\x79\x15\xfd\x37\x29\xe2\x9f\xc1\x02\xdc\x81\x53\x3a\x37\xf2\x81\x06\x62\x35\x43\x5d\x4e\xda\x88\x87\x50\x33\xfe\x7f\x2f\xf1\x28\xca\xfd\x07\x5f\x9a\x20\xa2\xb2\x7f\x28\x2e\x07\xe4\x62\x87\x3a\xa8\xe3\x9d\x8c\xa8\x65\x19\x39\x2e\x70\xeb\x75\x23\xaf\xa8\x7f\xde\x55\x8e\x88\xaf\xfd\xd4\xb8\x2a\xc4\x8d\x4c\x5a\x42\x52\x17\xbf\x45\xa7\xb1\xbf\x7a\xf7\xa4\xe3\xf2\x72\x8a\xec\x8d\x6d\x14\x33\x29\xc7\xaf\xc2\x4c\xf0\xdc\xd8\x7b\xda\xb7\x16\xff\x73\x9e\xe1\x3e\x18\x20\xd0\xf4\x1c\x47\xad\x02\x12\xb6\x2f\x45\x6f\x9f\xc1\xc2\x6e\x3c\xb3\xc4\x3c\x49\x50\x46\xc6\x57\x5d\x1b\x48\xc3\xe0\x55\x82\xb0\xc2\x8c\xda\xf4\x02\xf9\x9a\xa5\x9d\x32\x32\xa1\x15\x25\xdc\xfe\xc7\x51\x4c\xcc\x57\xb2\x05\x58\x6c\x96\xf4\xc3\x26\xee\xc6\x4a\x39\x98\x43\xca\xc5\xf5\x26\x19\x8c\x1c\xe6\xa6\xc3\x01\x74\x8a\x5a\x04\xfe\x1e\x5b\x76\x85\x4c\x2e\x2a\xcc\xf2\x76\x3c\x17\x29\xa9\x3f\x43\xe0\x8c\xa3\xdc\x5a\x37\xb5\xb4\xd4\x34\xc9\x00\x62\xd5\x83\x97\xec\x24\x92\x15\xa3\xc8\x74\x46\xe4\xe8\x55\xf0\x5b\xaa\x19\x07\x65\xb6\x18\x59\xcd\x33\x97\xf6\x51\xcc\x97\x7e\x0b\xfb\xdd\x18\x1b\x6d\x10\xdc\x11\x05\x69\x9f\xa0\x06\x04\x81\xed\x42\xef\xb4\xad\x4a\x26\xbf\x06\x57\x93\x33\x76\xe1\xe3\x87\xf8\x14\x52\xdc\x14\xd9\xc4\xde\x5b\xb1\x2d\x74\xa7\xb4\x5b\x87\x4a\xf7\x6e\x3f\xc2\x26\xbb\x1a\x60\x69\x3d\x0c\x1a\x7d\x94\x09\x54\x51\xa0\x2c\xcb\x5a\x78\x55\x11\x5b\x15\x4d\x52\xeb\xd5\xc2\x1b\x49\x83\x45\x0d\xac\x15\xb0\x99\xe6\x41\xa7\xe1\x2a\x94\xbb\x27\x2e\xb4\x06\x66\x24\xde\xd1\xef\xc5\x14\x30\x26\xe5\x68\x7d\x4b\xc0\x9b\x4e\x2b\x9e\x38\x74\xd4\x1b\x8b\xba\x26\xe9\x70\xf6\x57\x48\xc0\xe9\xa7\x51\xc2\xd2\xc8\x34\x33\x0a\x92\xd2\x5d\x7e\xea\x27\xae\xb4\xc7\x20\xe2\x17\x7b\xb6\x76\xf2\x82\xef\x5f\xb3\x64\x3d\xea\x17\x46\xe1\xb5\x41\x0d\xc9\x5e\xa5\xfc\x13\xe3\x9f\xbf\x66\x74\xf9\x5c\xba\x20\x3f\xf4\x89\x39\x96\x43\xc1\x16\x94\x90\x0e\x3a\xa9\x1c\xf3\xe0\xf0\x4f\x91\x08\x5b\x93\xde\x39\x34\x25\x84\x36\xb8\x83\x20\x2d\x4c\xc2\x4c\x8f\x64\x07\xfc\x68\xf6\xe0\x85\x21\x91\x0c\x15\xcb\x31\x0e\x54\x1a\x79\xdb\x95\x22\xde\xf0\x66\xe8\xf4\xca\x63\x88\xc6\x52\x9b\xed\xc6\xa6\x5b\xbb\x5e\x41\x67\x58\x34\x53\xb0\xd1\x7c\xc9\xb9\x23\x1a\xcf\xaf\x1a\xf3\x69\x8a\xfa\xbd\x1b\x82\xb4\x8a\x96\x98\x04\xc4\x59\xb8\x69\xd9\x8b\x54\x4b\xbe\x7a\x4f\x27\x19\x01\x99\xc9\xf5\x54\x1b\x92\xb8\xc1\xf8\xd9\x4a\x25\x4e\x57\x26\x6f\x10\xc4\x9b\x11\xdc\xe2\xf5\x05\x89\xd3\xd6\xc2\xc3\xe4\x43\xc5\x05\x1b\x4f\x3c\x9a\x96\x31\x4a\xee\x7b\xe5\xb5\xd7\x0b\x3e\x31\x3d\x5b\x4f\xe5\xf9\x44\x23\x9d\x46\x85\x34\xd0\x64\x10\x6a\xd4\xab\x39\x86\x42\x17\x05\xf2\x1e\xe2\xbd\xab\x56\x07\x0a\x05\x72\x1c\x0d\xcb\x4a\x3b\xce\x62\xd0\x27\xd6\x51\xc4\x12\x33\x12\x13\xa7\x08\x8b\x10\x70\x23\xc4\xa0\x8e\x27\x65\x17\x38\x57\x9a\xe1\x10\x1a\xe9\x51\xa4\x12\x93\xdf\x5b\x8a\x62\x6c\xca\x21\x7e\x53\x17\x78\x50\x97\x46\x09\x5a\x9d\x4b\x7a\x99\x45\x8c\xbd\x7d\xbb\xd9\x65\x74\xe9\x79\x31\x00\x66\xe2\xfc\x64\x17\x97\x4b\xef\xb6\xcd\x44\x01\x94\x1c\xb5\x2c\x0b\x49\xf5\xcf\x87\xc7\x53\xb2\x16\x4f\xa9\x4c\xe7\x98\x0e\x00\x0d\x83\xd6\xe6\xe3\x73\xeb\xb9\xdf\x77\x85\x99\x2e\xb5\x96\x8f\x08\x1d\xee\x79\x53\xbf\xf7\xaa\xdc\xc2\x22\x00\x8e\xce\x61\x3f\x6e\xfe\x92\xdc\x5a\x4e\x16\x5a\x1d\x1a\xe7\x49\xc1\x71\x00\x13\x1c\x2d\x52\x38\x16\xa7\xc6\xd0\xd1\xdb\x79\xd8\xc1\xc3\xc1\xbc\x62\x9f\x65\x67\x98\x4a\xba\x8f\x91\xf1\x1f\x85\x83\xba\xee\x5a\x09\x95\x74\xca\xea\x99\xd1\x13\x7d\xe1\x51\xe9\x55\xf2\x6f\x2f\x70\x30\x4b\xd6\x6e\x38\xe7\xce\xfd\x11\x84\x72\x22\xb7\x98\x4a\x6b\x0c\x4c\xf3\x35\x07\xa7\x2b\xfd\xd4\xf0\xa3\x98\x27\x59\x67\x76\x26\x4d\x51\x07\xb4\x2f\x73\xf4\xa8\x8a\x56\x07\x11\xc8\xa0\x3e\xb2\x38\xa1\x79\xb1\xaa\xa0\x6b\x92\x2f\x0f\x87\x6d\x08\x45\x62\x6b\x53\xe7\x1a\x44\xe4\xf3\xa6\x97\x77\x89\x31\xf2\xe3\x4d\xec\xff\xda\xb1\xe2\x90\x5c\x7a\x48\x88\xc4\xa4\xb6\xd8\xd4\xed\x8e\x8a\x30\x9c\x95\x1e\x24\x88\xdd\x31\x14\xdc\xf5\x6c\x62\x2e\xc8\x30\xa0\x6a\xaf\x20\x24\x8e\x59\x2b\x73\x5c\x73\xc6\xd5\xeb\xbf\x40\xa6\x53\xf2\x3f\x2d\x48\xb3\x0b\x83\xef\xd6\xfd\x06\x14\x2a\xf6\x4a\xa2\x38\x06\x85\x11\xd5\xdc\xcd\x68\xde\xcb\xca\x9b\x4a\xf7\x68\xb1\x63\x7b\xc1\x56\xdb\x20\x8b\xb5\x8d\x56\x3f\x48\x3f\xd5\x01\x84\x52\xdb\x8d\xa9\x50\x63\x90\xaf\x8e\xe4\xac\xd7\xe4\xa5\x1e\x3a\x79\xf8\x71\x61\xa8\xd0\xfe\xb2\xcf\x16\x4c\xfa\x18\x3b\x3b\x7b\x23\xfd\xfc\xd2\x12\x07\xc7\x25\xa5\x6f\x33\xe5\x18\xb6\x04\x51\x17\x79\x3e\x9f\x5b\x5c\x32\x55\x5f\xbc\x08\xb9\x14\x21\x62\x1d\xe7\xd9\x50\xed\xe2\x47\xad\x95\x0e\xe4\x35\xc0\xdc\x87\x52\xaa\x2e\x31\x92\x00\x36\xd6\x4f\xc4\xcc\xfe\x74\x8f\x74\xe2\x91\xef\x16\xfc\x35\x65\x0b\x4b\x96\x21\xc3\xc9\x0e\xdf\xb8\x64\x50\x20\xbd\x45\x0b\x26\x4d\x13\x8c\x66\x73\x88\x64\x56\x80\x8a\xbf\xdf\x82\x75\xd3\x66\x2a\x07\x46\xaf\x98\x78\xfe\x9e\x01\x36\xd5\x4d\x63\x2a\x39\x7f\xb7\xd7\x68\x73\x73\xd2\xfd\x18\x73\xc9\xbc\x5d\x82\x35\xbb\xce\x96\x81\xf4\xbd\xed\xd1\xba\xcb\xd3\x1b\x07\x14\x82\xf0\xd4\x61\xcf\x53\xe3\xa2\x63\x70\x18\xbf\xf4\xde\x7f\xc6\x70\x2f\xb6\x3b\x46\xc5\x2c\xca\x2c\xe5\xd9\xa2\x20\x5a\x8e\x9d\x04\x8e\x7d\xe6\xa4\xed\x9d\x41\x38\x64\xba\x14\x3a\x21\x55\x52\x3f\x63\x6b\x10\xe5\x2c\x16\x02\x2a\x7e\x2d\x94\x6a\x5c\x33\x32\x55\x23\xfb\xff\xb9\xba\xb2\x34\xc5\x75\x25\xbd\x17\x5e\xea\xe5\x6e\x4a\x1e\xc0\x4e\x6c\xcb\xc7\x03\x14\xb9\xfa\xd6\x3f\x84\x4c\xf5\xed\xfe\x0e\x82\x24\x29\x12\xac\x50\x0c\xff\xf0\xc5\x01\x1a\x7d\x65\x85\x4c\x00\xe9\xcd\xaf\x80\x00\x0d\xfa\xf7\x65\xb9\xc6\x05\x30\x5b\xf2\xf6\xad\x3d\x96\xb8\x96\xe2\x6c\xe2\x3c\xda\x4e\x74\xfa\xc4\x07\x7e\xc4\x93\x68\x04\xcf\xd1\x3d\xf9\x1f\xc0\xb5\xdc\xdb\x10\xa1\x15\xa2\x90\x0a\x7b\x53\xb4\x54\xb0\xa9\x25\xfb\x45\x53\xe0\x80\xf2\xb7\x17\xb0\x94\x99\xf2\x54\x4d\x7a\x5f\x29\x6a\xf4\x0e\x26\x17\xee\x97\xad\x55\x75\x14\xe2\x60\x5c\x5a\x7d\x9d\x61\xe7\xb3\x0a\x88\x48\xcb\x21\x7e\x94\x4d\x4e\xb5\x4f\xd0\xd4\x19\x70\x6a\x95\xdb\xa4\xb6\x1d\xab\x7f\x11\x0e\x00\xc9\x8b\xdd\x84\x21\x26\x71\x70\xaa\x34\x15\xc6\x1b\x7c\xe2\x2e\xa2\xb9\x99\x80\x7e\x55\x62\x73\xcd\x59\xc6\x2d\xac\x3e\xfb\x52\x40\x5a\xe1\x00\x5f\x2e\x23\xcf\xc2\x92\x73\x9f\xd1\x74\x55\x83\x2d\x2a\x1d\xe0\x88\x79\x7d\xb8\xb7\xb4\xc5\xa4\x79\x1b\x0d\x74\x51\x65\xa2\x1a\xa8\x24\xa8\x86\xa4\x0a\x59\xaf\xc6\xc4\x7a\x5a\x13\xc0\xb8\x7a\x5d\x27\x6d\x78\x4d\x80\xdc\xef\xd6\xfc\xaa\x4b\x34\x82\x91\x9a\x34\xe3\x86\x61\x46\xf8\x05\xe2\x0a\xd0\x59\xcf\x02\x5b\xbb\x99\x89\x71\x17\x56\xea\xc1\x32\xc7\xb0\xf5\xd4\x04\xd6\xfd\xe8\x69\x6c\xb6\x68\x1b\xff\xf8\xb8\x00\x5a\xb9\x02\xdd\xa1\xa1\x27\xe2\x08\x7f\x34\xcf\x76\xff\x85\xa7\x98\xb4\x29\x1d\x46\xd9\x97\x37\xb2\xa9\x7c\x20\xb3\x7a\x73\x49\x1d\x79\xb5\x9f\x19\x7d\xfa\x40\xe6\x44\xea\x7a\x4f\x7f\x75\xa0\xfe\x3d\x2a\x00\xb6\x64\xed\x15\xb2\x57\xc9\x02\xf2\xd1\xc9\xef\x25\x38\xb8\xdd\x68\xb4\xbe\xfd\xba\xb6\x00\x9f\x43\x79\x3c\x0a\x6e\x34\x09\xe2\x34\xdd\x04\xb5\x4c\xfb\x99\x02\xb4\xd6\x7c\x1e\x39\x1a\x40\xbf\xbf\xd1\xaf\xdd\x02\xa2\xea\x4e\xac\x9c\x5b\xa2\x0d\xaf\x73\xd3\xcd\xe8\x77\xaf\xc0\x70\x61\x12\x38\x19\xb5\x5d\x31\xa9\x99\x1e\x55\x1f\xd1\x65\x86\x5c\xab\xcc\x0c\x21\x02\x15\x13\xde\xa3\x5f\x65\x6e\xae\xd8\xf4\xa5\x3f\xaf\xcb\xad\x3e\x02\x2d\xe2\xcd\x6c\xea\x52\x63\xc6\x20\x38\x5a\x7c\x67\x24\x5c\x59\x57\x0e\xda\x42\x65\xfb\xde\x42\xfa\x47\x5c\x71\x68\xe7\xc9\x01\x7a\xac\xf9\x46\x49\xb8\xfd\xca\x73\xbf\x05\x16\x09\x76\x32\xea\x90\xcc\xb0\x70\x9b\x54\x22\x94\xf4\x15\xcd\xdb\x53\x47\x93\xc3\x0a\xb5\x11\x74\x1a\x95\x5c\x2a\xc4\x83\xfa\xc5\x6d\xc1\xa1\xfe\x09\x8f\xbe\x67\x09\x4e\xb0\xb7\xa6\x73\xc6\x76\xf9\x8b\xf7\x88\xa6\x14\xfd\x1e\xcc\x6c\x2a\xd1\x14\x9a\x41\x24\xba\xa9\xc4\xbe\x49\xe0\x23\xb2\x55\x1e\x42\x02\x1e\x4f\x7d\x53\xfd\xb5\x49\xfe\xd4\x35\x92\x6c\xdb\x17\xa2\xe5\xfe\xea\x3b\xdb\x0d\x96\xf4\xc7\x85\x16\x34\x3d\x2f\x67\xe7\xb4\x3c\x5d\x41\x55\x46\xc7\xf1\xa9\x03\xb8\xb1\x01\xbe\x66\x64\xa4\xa9\x02\x0d\x80\x92\x3a\xc4\xb0\xa9\x51\xe1\xc8\x4b\x50\xb1\x4b\xb0\x35\x2f\xa7\xff\x1b\xce\xd5\x25\x32\x18\x49\xb1\xb7\x97\x26\x64\xa9\xa9\x30\x18\xf5\x64\xee\x6d\x2f\xbf\x12\x52\x50\x3d\x05\x71\x47\xa7\x1e\x29\xb5\xbe\x00\xda\xfa\x51\x02\xb5\xcf\xc4\x7a\x43\x5d\x72\x23\x9f\x84\xfb\xb8\xd4\x51\x75\xc0\x52\xd6\x5f\xf6\x45\x8c\x2c\xc2\x40\x19\x07\xed\x41\x70\xa9\x87\x3d\xb4\x6b\x43\x8c\x71\xea\x3d\x01\x0e\xde\xc1\x94\xaa\x23\xcb\x94\x42\x77\xb2\x1c\x50\x25\x4a\x58\xb9\xf4\x0c\xb6\xd5\x0f\xf7\x87\xcb\xfd\x64\x60\x30\x44\xfe\xcf\xf5\x8a\x41\xd6\x57\xc5\xa1\xf4\x70\x24\xa0\x08\x42\x1b\xa0\x89\xc7\xe4\xe6\xe8\x03\x2f\xa7\x3c\xc7\x20\x62\xbb\x1b\xc4\xdc\xd7\xad\x1a\x1c\xac\x67\xc4\x91\x7d\x0d\xdb\xb3\x1d\xe3\x08\x2b\xe0\x3d\x94\xc0\x0f\x51\x2b\x74\x1a\x66\x97\x3a\xad\xb7\xfa\x57\x49\x0b\xce\x2a\x22\x08\x5b\x38\xf5\xce\x61\xbc\x11\xd8\xe4\x50\x02\xe3\xc5\x57\xb2\xa4\x29\x62\x0c\xfe\xea\xfb\xe4\x28\xd4\x3b\x48\x2b\x2f\xdf\x28\x43\x78\xa3\x7e\xe4\x1e\x3d\x4c\x52\xbf\xaf\x0b\x72\xd2\xcc\x23\x8a\x8b\x5f\x0c\x97\xd4\x0a\xe0\x25\xc8\x93\xa2\x54\xf9\xda\xcf\x07\x1b\xf8\xd0\xd2\xbd\x5a\x40\x21\x93\x0d\x62\x9b\x35\xb5\xd6\xb2\x0d\x2b\xd2\x68\xaf\x72\x0b\xbb\x58\x35\xe5\x3b\xbe\xd2\xc5\x4b\x47\xfb\xd2\x79\xd8\x6b\x7f\x6f\x8e\x5e\x12\xca\xca\x9c\x62\xe4\x17\xec\x9e\xb0\xbf\xf5\xb9\x55\x51\x90\x4c\x92\xca\x21\x90\xef\x12\xd4\x5c\x95\x10\xbd\xbf\x21\x45\xf2\x96\x73\xbe\x44\x58\xc3\xa6\x16\x72\xee\xd4\x04\x80\xcb\x8f\x0a\x42\x31\x5e\xf7\xdb\xb7\xb7\xfb\x35\x4f\x8e\x91\xf9\x38\x7f\xd1\x2c\x70\xa7\x0d\x81\xbb\xb1\xc5\x7c\xb7\x24\xa8\x36\x59\x1f\xac\x33\xd1\x73\xc6\x02\x3c\x49\xc8\x4d\xc8\xa5\xd9\xc1\x6d\x0b\xf0\xc9\x23\x84\x2e\xa0\xcb\x8f\xf9\x4c\x5c\x7a\x8f\x73\x0f\xd9\x0d\x57\x89\xbd\x26\xdc\xb7\x6f\x8c\xe6\x78\x79\x99\xc7\xa9\xf6\x3b\x86\x33\xe5\x12\xd4\x4c\x39\xbf\xfb\x84\x53\x11\x40\x77\x93\xbe\xea\x64\x96\x38\xa2\x7a\x8f\xa6\x08\xce\xbb\x11\xa8\x5c\xab\xe0\xa4\x1c\xa3\x3a\x9c\x6c\x30\x72\xf1\x5a\x79\x95\x49\xbf\x6f\x59\x4c\x6d\x00\x7e\x46\x07\x23\x87\xd2\xbc\x54\x3b\x28\xf0\x79\x77\x8b\x95\xb9\xc5\xca\x9f\x67\x00\xf1\x95\xd4\xa9\x83\x7d\xfb\x9f\x9d\x4a\xa4\xef\x25\x48\x11\x28\xe7\x25\xf6\x24\x23\xde\x26\x5f\xcc\xe5\x63\x5d\xdd\x54\x39\xc2\x6f\x1c\xc4\xc5\xf0\x05\x01\x2f\x62\xab\x1d\x2c\x5e\xe5\x5b\xb6\x32\xfa\x91\xb3\xfa\xa1\x25\x1a\x59\xa7\x49\x43\x70\xf2\x7a\x14\x6f\xcf\x29\x5c\x92\x09\x5b\xc6\x6d\x8c\x94\x30\x12\xd9\x2a\xf2\xa6\xe4\x37\xad\xc1\xc6\x22\x8d\xb9\x97\x61\xa5\x17\x5e\xe9\x52\x09\x88\x51\x5b\x77\xfb\x9e\x98\xf0\x27\xc0\xd1\x8b\xfa\x37\x7d\xb5\x3c\x17\xe4\x36\xed\x44\x10\x3c\x91\x5c\x92\x4f\x80\xf3\xd8\x10\xc4\xaa\xc0\x6d\x00\x2e\xe9\x90\x4c\x8c\x9d\x99\xe5\xe3\x9e\x00\x62\x70\x7f\x83\x1a\xc8\x2e\x17\x47\xda\x5f\x9d\x84\x83\x0d\xa7\x8d\x9c\x6c\xec\x7d\x1f\x0d\x5d\xb8\x13\x61\xcf\xab\x32\x6b\xda\x61\xae\xb0\xc8\x66\x1e\x8e\x87\xad\xe8\x1c\x96\x17\xba\xc2\xcb\xe5\xac\x76\xf6\x76\x5e\x80\x55\x2b\x32\x34\xb5\xf7\x31\x55\x8c\x1d\xa2\x61\x04\xca\x68\x8c\x0f\x94\xfe\xad\x40\xae\xf8\x71\xdf\x3e\x6d\x2c\x59\x12\xb8\xc1\x92\x68\x38\xcf\x55\x42\x4c\x9a\xc4\x69\xbe\xa2\x4b\x2f\x34\x9c\x61\xa9\x1b\x2d\x87\x34\x29\x6c\x9a\x05\xa9\x0f\xe9\x93\x34\x9d\x33\x86\x0c\xee\x9b\x0a\xa0\xc7\x39\xd6\x06\x99\xf7\xba\xcc\x9c\xb5\xaf\x8f\x5c\x2e\x0f\x8f\x4f\xda\x18\x9c\x96\x65\x76\x3f\x63\xdc\xcc\x53\xdb\x57\xf9\x5d\xd1\xc1\x53\xb1\x16\xfa\x72\x92\x2f\x8c\x6c\x40\xd3\x35\x37\x35\xca\xb7\x13\x57\x1b\x8d\xdc\x74\x05\xf1\x50\xf5\xe1\x7e\x84\xcd\x99\x98\xa5\x96\xf2\xc3\xdb\x5a\x4a\xb9\xeb\x77\xb8\x94\xc3\x3c\x6d\x82\x03\x62\xcb\xdf\xcf\x90\x49\x9a\x47\x7b\x71\xcd\xde\x67\xcf\xcf\x92\x76\x35\xe8\x9e\x8b\xf6\x4f\x39\xcc\xd5\x08\x86\xd1\x69\xb5\x7f\x8e\x6a\x31\x8e\xf5\x52\xd7\x4c\x1e\x7b\x97\xb2\x91\x87\x71\xee\x6a\x2a\x7b\x87\x54\x9c\x4e\x54\x28\xaa\xda\xe4\x55\xbd\xb6\x9b\x9c\x87\x97\x36\xde\xff\x7d\x0c\xb3\xd5\x3b\x00\xcf\x46\xea\x1a\x31\xd7\x7d\xf6\x28\x46\x93\x06\x78\xda\xd3\xd2\xde\x7a\xd4\x1a\x9f\xb2\x50\x56\x2b\xbb\xf7\x15\xb9\x71\x86\x03\x4d\x6b\xfa\x91\xc1\x1c\xbf\xba\x2a\xbf\xf2\x8d\xe8\xbf\xe6\x0a\x3f\xd4\xe1\xd0\x5a\x6e\x99\xbe\x5a\x35\x9f\x23\xc7\x4d\xc7\xfb\x64\x36\x49\xb3\x55\xbc\x0d\x26\x64\xee\xd0\x8e\xdb\x31\x98\x28\xd9\x90\x75\xdd\x04\xa0\xa5\x41\x8b\x42\x90\x60\x36\xf5\x0c\x86\xbf\xe6\x7f\x37\x07\x5f\x5e\xb9\x54\xc7\x0d\x6d\xfd\x81\x43\xe2\x72\x53\x2e\x8c\x97\x9d\xd7\xf0\xd6\x8c\x42\x60\x1f\xa5\x6a\xc3\x97\x4f\xcf\xa9\xbe\x79\xcf\x5a\x52\x83\x22\xcc\x0a\x76\xa8\xfb\xf0\x0a\xc5\xeb\x45\x8b\x2d\x94\xd8\xff\x9f\x23\xf9\x7f\x92\x92\xd3\xe0\x9a\xaa\x9a\xda\x96\x40\xdd\x87\xbe\xe6\x16\xa0\x01\x78\x6a\x54\xb1\x23\x66\x52\xca\x46\xaf\x95\x12\x4f\xaa\xf0\xb4\x82\x08\x95\xd0\x28\xb8\x09\x8b\x96\x85\xd1\x5e\xa5\x4a\xed\x87\xe0\x5e\xda\xda\x90\x3e\x54\xe5\x0e\xcd\xb7\xb2\x35\xb3\x12\xd9\xae\x9b\x62\x04\x0d\x5d\xb2\x5d\xb0\xd7\xfc\x34\xfe\x55\x7d\x36\x95\x75\xd7\x0c\xca\xd0\x43\xd2\x2f\xbc\xe6\x00\x56\x66\xbf\x20\xe0\x5a\x30\x62\x48\x9f\xda\xfc\x78\xc0\x12\x51\xfd\x29\x5a\xc2\xba\x90\x99\x5e\xa6\x4d\xee\xe5\x80\x69\xbc\x37\x41\xa7\xc2\x09\xe5\x0c\x60\x8d\x0a\xb3\xe3\x97\xeb\xe6\xf0\xb6\xd5\x52\x1c\xc8\x73\x3f\x3a\x35\x51\xd3\xf6\xbd\x5b\x6b\xa4\xad\xa8\xad\x82\x84\xc9\x69\x65\x15\xe4\x6b\xd2\x6e\x69\x48\x9b\xdd\xa8\xca\x39\x04\x46\xd4\x64\x03\x9c\x76\xee\x0c\xb6\xd3\xf0\xf3\x73\xdd\x15\xdf\xce\xe5\xd2\xf0\x38\x46\xe6\xa6\xfb\x1b\x2d\x6b\x56\x34\x6f\x6e\x51\x5e\x4c\x6f\x9d\x9b\x1c\x15\xb8\x73\x12\x79\xe3\x8d\x35\xcd\xb4\xb2\xb9\xbe\x55\x84\xc1\x91\x4b\x25\xe5\x66\xc8\x1a\x67\x26\xdc\x54\x9d\x0e\xc2\x23\x56\x07\xa6\x3b\xd3\x20\x58\xf2\x1a\xe2\x80\x4f\xa3\x02\xd1\xe9\x1b\x8b\xf3\x39\xfd\x1b\x82\xb4\x33\x9f\xd3\x5b\xda\xff\xc6\x22\x5d\x57\xc3\x97\x55\xe9\xe4\x73\x7a\xd2\x1e\x9d\xaa\x25\xcd\x53\xf8\xf9\x67\x8f\xff\xfe\x28\xa2\xcb\x31\xd6\xc5\x34\xcd\x26\x16\x22\xd2\x3c\xae\x9c\x23\xd3\x44\x45\xaa\xe8\x51\x2e\x8c\x6e\x18\x6f\x55\x21\xd6\x60\xb4\x90\x80\x05\x7a\xa1\x9a\xc0\x7d\x9b\xd4\x54\x49\x1f\x27\x8f\x54\x23\xaf\x26\x0d\xf6\xf9\x8e\xc1\xe3\xf6\x31\x91\x87\xc4\x5c\x23\x99\x60\xea\xea\x0b\xc7\x72\xf9\x21\xf2\x55\x63\x58\xe7\x32\xb9\x4e\x43\x1d\xc2\xf8\x0d\x36\x3d\x1c\x51\x5e\x42\xac\x05\x6b\x4f\x2d\x11\x5b\xb6\x71\x6c\xa0\x39\xa5\xba\x63\x36\x86\xe6\xe1\x5a\x5e\x91\x20\x98\xf7\x07\x80\x7a\x5b\x88\x2b\x74\xbf\x72\xc9\xb5\xd1\xed\x62\x07\xa5\x65\x97\xe4\x9c\xa8\xee\x53\x52\xc9\xeb\x60\xd5\x25\x75\x97\x16\xfe\x24\x2a\x3b\xf9\xe3\x01\x60\x5d\x46\x33\x1f\xfa\x47\x35\xd5\x7a\xf5\xd3\x97\xee\x5c\x39\x7a\x0d\xc7\x2a\x5f\xf9\xea\xcb\xed\x16\x44\xbf\x68\x9e\xb0\x64\x8e\x6c\x2d\xd4\x99\xd7\x3c\x7d\x38\x9a\xba\xb1\x13\xc7\xdf\xea\x17\xef\x31\x81\x3f\x0c\x00\x83\x8e\x6c\x95\xc3\x84\xae\xae\xd0\x8d\xbd\x10\x07\xec\x9d\x85\xa3\xe1\xf1\x09\xae\x97\x82\xfd\x34\xda\x61\x6e\xea\xef\x47\xb6\x16\xc3\x4f\xb6\xf3\xc4\xcf\x38\x7b\x02\x3c\x7e\x95\xca\x8c\x4a\x73\x15\xb0\x18\xaa\x82\x51\x09\x4f\x4c\xd9\x06\xff\xfa\x00\xb4\x4c\x5e\x83\xf4\xd9\x7b\xac\x99\x2a\x8b\x79\x3b\x9a\xcd\x9a\xd4\xe5\x38\x47\x77\x85\xd3\xaf\x20\xdf\x6a\x88\x30\x85\xed\xde\x1d\xa8\xc3\x41\x73\xf5\xfb\x18\x0e\x1a\x6b\xed\xf8\x83\xe9\xec\x5e\xf0\x3d\x3c\xba\x5a\x20\x96\x63\xe4\x85\xe6\x4b\x5c\x9c\x55\x2d\x13\x50\x8a\xda\xc8\x09\x3d\x03\x55\x30\x53\xf8\x90\x0c\x62\x61\x35\x25\x3d\x72\xda\x97\x3a\xcf\x12\x42\x46\xa1\x01\xe9\x89\xe3\xbe\xb4\x92\x4a\xba\x94\xf8\x64\xbf\x96\x98\x4c\xa7\x80\xb0\x38\xbd\x7e\x4b\xba\xe9\x1d\x22\x0d\xef\x5e\xe4\xb7\x12\xd8\x9e\xb5\x4b\x54\xc5\x57\xa2\xcd\x97\xab\x06\xab\xfc\x1b\x7c\xc6\xfe\x63\xfc\x74\x78\xa6\x7b\x94\x78\x26\x76\x36\x2a\x04\x71\x45\x89\xb4\x71\x4d\xba\x9f\x0d\x74\x99\x16\xaf\x47\x0f\x2b\x92\x89\xdc\xa1\x50\x42\x61\x8b\x9b\x81\xba\x77\x6b\x71\x2d\x34\x7a\xd9\xab\xc4\xdf\xee\x1e\x77\xab\x0e\x3f\x28\xdf\xfa\xe2\xb9\xa7\x5d\xb7\x8c\x1c\x2e\xdc\xe8\x80\x8f\x67\xff\x77\xff\x3b\xff\x48\xcc\x65\xb5\xff\x1b\xe6\x65\x51\xa2\x57\x60\xec\xca\x3a\xa1\xd3\x1e\x68\x05\x74\xa2\x48\xe8\x47\x43\xb6\x45\x06\x76\x33\x4e\xf3\x08\xb3\xcf\x3e\xbf\x4c\x58\x46\x02\xab\x68\x9b\x03\x9b\x53\x2a\xa5\xcf\xe2\x2a\x12\x34\x31\xb3\x80\x5a\xf4\x63\x71\x81\xef\x41\x0f\x90\x36\xe0\xcd\x0a\xcc\xba\x74\x2d\x43\x7d\x13\x83\xce\xff\xcc\xc3\x46\x3f\x48\xe2\x6c\x86\x60\x7f\x0d\x20\xac\x84\x6f\x06\x5a\xc2\x0d\x17\x57\xe4\x2e\x63\xb6\xa0\xff\xa4\x98\x58\xf9\xca\x03\xfa\xac\x8d\xb6\x49\xd9\x2e\xa9\xfb\x72\x2d\xdb\xd8\x23\x69\x28\x37\xe2\x87\x92\xcd\x10\x9c\xb1\x09\xd1\x33\xf9\x54\x14\xd3\x56\xe8\xd0\x7f\x04\xa5\xa0\x65\xad\xbc\x6c\xea\xc3\x05\x61\xfc\xeb\x46\xe6\x3e\xa8\xca\xd9\xad\x36\x40\xf7\xf3\xc4\xaf\x73\x37\x45\xe4\x20\x7a\xba\x7c\xa9\x2a\x0e\xca\x39\x6c\xa7\x8d\x89\xd9\x6e\xf2\x3c\xc1\x8c\x3b\xe4\x64\x91\xf9\xad\xf1\x85\xe5\xf3\x90\x6b\x49\x89\x22\x5e\x34\x55\x33\xbb\x7c\x3a\xaa\x39\xdd\x1e\xd4\x89\xf9\x8e\x17\xf9\x21\xfd\xbd\xf6\x5c\x10\x45\x53\xa5\x9f\x1f\x1a\x63\xdc\xbe\xd9\x7a\x0c\x35\x25\x63\xe1\x5c\xe2\x31\x29\xd5\xba\xc6\x7e\x3e\x2c\x7d\x67\xea\xd5\x4e\xc1\x48\xdf\xdf\xd7\x4b\x29\x93\x28\x1b\xfa\x02\xaf\xba\xb3\xbc\xe9\xe8\xec\xb8\x57\x17\x33\xf7\x4b\xdd\x1a\x87\x6b\x05\xc2\x03\xc5\x63\x23\x90\x4c\x11\xd5\x8b\xfe\xaa\x6f\xdd\x69\x09\xf4\x8b\xe3\x6b\x3e\x86\x8b\x1c\xcc\x41\xfb\x8d\x70\xd1\xad\x37\x76\x20\xc9\x2b\x90\x23\xad\xdd\x22\xa0\x89\x3b\xdf\x7a\x98\x72\xca\xe4\x53\x4b\x26\x31\x31\x72\x97\xeb\xf6\x29\xc6\x8c\x54\xa6\x83\x3a\xb6\x55\x22\xa8\x6d\xb3\xae\xcd\x55\x76\x11\x0f\x4a\xfb\x8f\x84\xf8\x85\x40\x38\xae\x48\xa7\xe7\x37\xf0\xab\xb6\xfd\xda\x3e\x44\xc1\x4b\x14\x61\x30\x79\x3a\xa7\xda\x47\x9f\xab\x61\x1d\x81\x19\xba\xae\xb4\xf0\xc1\xb3\xe8\x7e\x34\xfc\x9c\xec\xd3\x7f\x40\x41\x64\x8e\x31\xf1\x8a\x0b\x26\x52\x3c\xb4\x97\x4e\xa7\x76\x5b\xff\x73\xda\xa3\x05\xea\x49\x39\x86\xee\x9c\x9d\x74\x8e\x30\x71\x04\xcf\xda\x03\xf3\x39\x9b\x98\xa4\x6a\x48\x45\xad\xf0\x3f\x63\x9d\x58\xcd\x3e\x01\xe7\x74\xd9\xff\x1f\x76\x2d\xdf\xcc\x66\x72\xee\x0e\x6c\xb6\x22\x92\xd0\x82\x37\x9a\x44\xf4\x31\xf9\xc5\x77\x58\x52\x4d\x09\xe0\x9d\xc6\x89\x80\x9c\xa8\x19\x23\xa5\x40\x70\xe9\x9e\x71\x5e\xe0\x57\x55\x02\x4f\xca\x65\xee\xd1\xcc\x80\x3f\xde\x7e\x0b\x9f\xbc\xa8\x11\xc6\xdf\x54\x25\xff\x89\x58\xd7\x58\x0e\x00\x93\xea\xc9\x32\xbc\x93\x69\x15\x12\x10\xf3\x29\xcb\x7c\x39\xc0\x27\xb5\x5c\xad\xa6\xf7\x3e\x7b\x79\xe5\xc7\x66\x78\x8d\x17\xc0\xc8\xde\xc8\x51\x58\x5b\x22\x32\x76\x4a\x95\x2b\xc2\x1d\x7c\x9b\x7b\x1d\x93\x3d\x9c\xba\x35\xa7\xf4\xb7\x44\x77\x6e\x00\xa2\x7c\x07\xd6\xa6\xb1\x16\x6f\x09\xac\xd2\x1f\x98\x68\xd1\x93\xee\x76\x8e\x7a\xa9\x35\x08\x2e\x97\x61\xe0\x09\xa0\x19\x95\xb7\x62\xcd\xdf\xe4\xb7\x59\x9b\x87\xe7\x22\xd6\xd5\xcd\xec\xa7\x8a\x67\x2a\x1f\xb9\x91\x6c\x30\x4d\xb4\xd5\x86\x6b\xa5\xc0\xb5\x6a\x53\x98\xa4\xb9\x97\x4b\x4f\xe7\x69\x0d\x32\x72\x90\x68\xed\x53\x1e\x03\x8d\x2b\xc1\x17\x1e\x0e\x2a\xef\xda\x18\x9e\xf5\xa9\x5c\xd6\xb9\x9b\x36\x6b\xd1\x56\xfd\x3d\x16\x3e\xb8\x2d\x5f\xb8\xde\x4d\x9a\xfa\xa0\x63\x58\x87\xda\x12\x09\x15\xaa\x2f\x8b\x16\xd7\x3e\x57\xdb\x90\x9a\xd1\x71\x6f\x4a\xd6\x64\x80\xac\xa4\x7f\x13\xa3\x70\x95\xd7\xa0\x65\x79\xdb\x06\xdb\xe6\x66\x77\x2c\x27\xa6\x3a\x83\xe7\x5e\x1a\x9c\xd0\xe1\xeb\x05\xc2\x9b\xdb\xf9\xdc\x04\xc6\xa2\x1a\xb3\x3b\x8e\x5f\xae\x1f\x73\x00\x77\xca\x77\xd5\x9f\x3e\xdf\x25\xe1\x09\x85\x21\x6e\xfe\x72\x30\x7c\x42\x06\x7c\x03\xb4\xfc\x2a\xaf\xa3\x97\xce\xb0\xbf\x89\xfa\x9e\x77\x15\x87\x0f\x0d\xa7\x09\x36\x08\xaf\x6f\xc0\x9d\xa6\x00\x7c\xb7\x97\x6c\x3c\xf8\x5d\x9e\xfd\xf7\xa8\x39\x25\x8c\x75\x59\x17\xf6\x2d\x87\xa9\x6d\x2c\xe5\xf8\x75\x4e\xee\x68\xe6\xcd\x1f\x1b\x27\x42\x97\x34\x74\x07\xd5\x6a\xe1\x5e\x1a\x19\x00\xd6\xce\x57\x7b\xf9\x37\xa2\x51\x1b\xc0\x85\x56\x4e\x35\xed\x64\x7c\xb1\x8c\x21\x3d\x10\x22\x40\x4b\x7b\xe6\x2b\x65\x08\x51\xa2\x26\x06\x23\x65\xaf\x94\x42\xdb\xed\xfa\x3a\x69\x4c\xe4\xb3\xd9\xbd\x3e\xea\x2e\x1d\x1d\x98\x7d\xd8\xd9\x9d\x26\x84\xdc\x68\x9d\x21\xd8\x12\x41\xe4\x43\xc4\x70\xdd\xa8\x70\xab\x9d\x75\x12\x53\x77\xae\x4e\x45\x4b\x12\xe2\x1d\x51\xbe\x6b\xde\xe4\x92\x31\xec\x9c\x19\x96\xf3\xad\x6e\x8e\x63\x10\x8f\x20\x5c\x00\xd8\x05\x85\x79\x0d\x37\x58\xf5\x39\xa4\x64\x8e\x6a\xe3\x92\x9d\xaa\xa5\xbe\x60\xcc\x84\x05\xe8\xfe\x15\x37\x1e\x29\x6f\x2f\x42\x74\x9b\xfd\xd7\xef\x61\x63\x9a\xe4\x6b\x65\xb2\x95\xb4\x24\x6e\xff\xbb\x6c\x2f\xab\x40\x21\x55\x1c\x32\x58\x01\x6e\x3c\x51\xbf\x8a\xff\x24\xa2\xb4\x39\x65\x6a\x85\x6e\x4a\xb0\x67\xa8\x1b\xbb\x83\x2e\x71\x26\x00\x23\x2a\x9f\x15\xf3\x4d\x7e\xcc\xcf\xde\x34\xeb\x9f\xfc\xc9\xb6\x53\x7a\xc1\x3a\xb6\xb9\x5c\x61\xb1\x61\x6f\xd6\xd8\x39\xeb\xe4\x59\x92\x3b\x5a\x41\x79\x48\x6a\x10\xf7\x34\xfb\x65\x18\x5d\xf5\xcc\xe9\x62\x9d\x96\x2f\xc5\x43\x4d\x0f\xd1\x4b\x26\xfb\x98\x2f\xa3\x55\x69\x21\xe9\xa0\x00\x39\x86\x7d\xac\x90\x85\xe3\x89\xb0\x19\xac\x6a\xe5\xcd\x9e\x95\xb3\xab\x31\xf7\xe0\xcc\x80\x52\x27\x34\x09\x8f\xd2\xae\x1e\x2f\xad\xee\x38\x95\x5c\x34\xec\x5a\xf3\xf2\x63\x8f\x2b\x1c\x0f\x4e\x92\x39\x66\x8f\x16\x6b\xda\xba\x90\x74\x68\x2a\x91\xa3\x99\x84\xe9\x9a\xc2\x12\x4a\xc2\xf2\x80\x23\xc9\xf3\x3b\x91\xd5\xa0\xb2\xcc\x06\x51\xae\x9b\x4b\x46\x02\xbe\xf4\xe6\x6b\x99\x3e\xb9\xd5\x52\xe5\x8d\xfa\x81\x68\xd6\x1b\x31\xcf\xea\x54\x22\xc1\xb6\xef\x55\x89\x63\xfc\x46\x5e\xc0\x0f\xf1\x22\xc3\x40\x4c\xc7\xc7\xe5\xa6\x54\x2a\xb7\x53\xa3\xf1\x63\xb3\x11\x6c\xb8\x92\x2a\x3b\x3a\x34\x68\xc2\xc6\x2c\xf1\x8e\xd7\x7b\xcd\xfa\x39\x50\x55\x66\x65\x8e\x96\xd3\xf4\xa3\xd4\x8d\xce\xb8\x94\xcb\x3f\x45\x00\xa2\xda\x68\xb4\xfb\xc9\x38\x7e\x79\xdc\xd4\xbf\xa5\xc0\x78\x13\xdc\x11\x6e\x97\x3c\x1c\x72\x18\x4c\x46\xed\x66\x0c\x7e\x00\x80\x25\xda\xe5\x4b\xfe\x5c\x43\x47\x9a\xa4\xaf\x98\x14\xf0\x44\xf3\x5a\x4a\xeb\x90\xe9\xb8\xac\x46\xdf\x3e\xb8\x78\xb6\xf0\x36\x1b\xa7\x54\xb1\x61\x6b\x8f\xba\x25\x49\xbf\x72\x72\x53\xad\xeb\xd4\x52\xdb\x62\x1a\x35\x86\x6e\x3f\x99\x7d\xc1\x40\xa4\x1b\x83\x76\x9b\xba\xff\xe2\x87\x07\x84\xf1\xa7\xb7\xaf\x4b\x10\xfc\x58\xe1\x7d\x1e\xa5\x38\xf0\xc4\x61\x85\x5c\x71\x0c\x5a\xd7\xfe\xbe\x9d\xa3\xc0\x45\xbf\x7d\x23\xcb\x25\x34\x63\x2c\x18\x66\x6c\x10\x1b\x6b\x3e\x01\xca\xc6\xc3\xc5\x60\x6d\x61\xff\x23\xec\x59\x70\x10\x65\xba\x4f\xd6\xa6\x48\x6d\x30\x00\xcf\x2a\x2c\x28\x9c\x63\x72\x6e\x44\x85\x88\x90\x1c\xbc\xb9\x9a\xb8\x7a\x9c\x25\xee\x73\x08\x4b\xfc\x1d\x9c\xbf\x04\x15\xb0\x76\x58\x9b\x20\xfa\xaf\xa9\x7c\x09\xc5\x2f\x49\xc0\xa6\xa9\xfd\x92\x5d\x85\x88\xa6\x60\x71\x99\x23\xba\x06\x6e\xb1\x1b\x79\xd7\xc9\xdf\xea\x2f\x72\x4b\x5e\xef\x0b\xca\x0f\xaa\x84\x18\x54\xf9\x06\x97\x58\xa1\x42\x7f\xb5\xfb\x70\x90\x5b\x8f\xe6\x16\x67\xa9\xaa\x54\x78\xd9\xc7\x75\x75\x80\x6b\xe6\x2c\x06\x0a\xab\xbe\xb8\x73\x40\xe5\x92\x65\xde\x25\x28\x17\x3a\xc4\x10\xe9\xae\x56\x41\x7c\x62\xae\x13\x1d\x8c\xfe\xa2\x13\x88\xa4\x49\xd6\x65\xb2\xc1\xb1\x81\x7e\xd9\x6e\x76\x15\xaa\x82\x80\xeb\xd9\x84\xe4\x16\x81\xf0\x36\xc8\xe9\x75\x20\xa1\x18\x68\x07\xbd\x93\x0c\x05\xc4\x87\x57\x74\x26\xbd\xd5\x9e\x70\xa7\xa5\xf4\xf9\x10\x1b\xe3\x7b\x9f\xf3\x05\xa4\x0c\xa7\xe7\x98\xac\x4d\x01\xd7\xaa\x42\x6a\x8c\xce\x95\xe1\x52\x3e\xf0\x3a\x14\xc4\x68\x07\x45\x62\x95\x41\xc0\x5a\xd1\x64\x38\xe7\x48\x57\xe8\x2e\x68\x6c\x00\x84\xfc\x8d\xc2\xdd\xe6\x40\x94\x57\xe5\x57\xb4\x7d\x55\x65\xab\x87\xa4\xeb\xf5\xe1\x63\x26\xe4\x67\x19\xf6\xa7\x50\x8e\xed\xb6\x52\x8b\xd8\x55\xa7\xcb\xbb\x3e\x0b\x58\xd8\x8a\x4b\x2c\x37\x9a\x48\x71\xaa\xc2\x79\xbf\x45\x77\xa2\xe4\x37\x15\xb5\x5b\x22\x21\x87\x21\x2d\x2d\x38\x3c\x37\xc3\xec\x34\x8e\x00\xe3\xf2\x03\xe3\x8b\x73\xd5\x75\x33\x46\x62\xf3\x17\xf1\xa8\x7c\x5c\xf3\xd2\xeb\x2a\xc6\xee\x54\x4a\x33\x55\x01\xe2\x66\x84\x66\x85\x2d\xcf\xa7\xc7\x68\x55\x04\x8d\x95\xed\x1b\xc1\xb8\x0f\xfc\x40\xe0\x41\xa9\xdd\x77\xfb\x5f\xb8\x30\x45\x4b\x65\xb2\x96\xf1\x05\xa6\x43\xab\x6e\xb6\x40\x87\x8a\x54\xc8\x1a\x31\x9c\xa3\x80\xae\xf5\x42\x8a\xd9\x4a\xc9\x2d\x59\xfd\x42\x62\x2a\x9a\x68\xa8\xbb\x44\x7c\x4b\x9f\x60\x56\x8e\x9e\x99\x8d\x92\x97\x41\x8e\x63\x34\x01\x60\xab\x8a\xeb\x90\x2e\x30\xdc\x1d\x9b\xc0\x54\x18\x61\xae\xb7\xb1\xeb\xaa\xfb\xc9\x3b\x20\x05\xc8\x6a\x34\xdf\x0d\xd8\x67\x25\x5e\xde\x54\x0a\xfc\x4a\xed\x2b\x76\x83\xa2\x75\x74\x74\x74\xc7\x9c\xb9\x52\x00\x3d\x2f\xb8\x4c\xe2\xb0\x82\x46\xb9\xd7\xb0\xcc\x35\x09\x02\xf6\x9c\x94\xec\x3f\xcb\x76\x56\x79\x86\xb4\x1e\xc8\x5b\x2e\xb7\x34\xb8\x34\x7e\x39\x72\x5b\x9a\xa6\x7e\x24\xe3\x9e\xa7\xaf\x06\xcf\x76\xfa\x4d\x13\x55\x03\xc5\xd4\x80\xaa\x4b\xf3\xca\xc3\xb3\x2c\x00\x8d\xbd\x5c\x19\xd9\xdf\x35\xd5\xf1\x65\x01\xb5\x6c\xba\x87\xf1\xce\xa4\xb6\x22\x94\x60\x79\x9f\x5c\xaf\xc8\xb2\xee\xc1\xed\xbd\xf7\x25\x19\xe6\xb6\x81\x95\x83\xd5\x9d\x08\x66\x5c\x9c\x16\x76\x23\x8a\x6c\x67\x45\x55\xeb\x27\x94\x67\x6e\xb5\x7e\xd6\xb5\x5f\x2f\x28\x61\x6f\x2c\xf9\xbf\xad\x6e\x69\xe7\x50\x30\xcc\x48\x06\xbb\xca\x30\x26\x34\xc8\xb3\xf4\x52\x14\xf3\x22\x87\x5d\xa5\xbe\xc4\xa6\x3a\x20\x43\xb5\x8f\xb8\x59\x6d\x8e\xe4\x1e\x24\xe5\x01\xc2\xf2\xa9\x41\x02\xcc\x8d\x54\x2e\x5c\x37\x84\xd3\x0a\xfc\xee\x47\x12\xcf\x04\xde\xe8\x1f\xfe\x15\xbd\xfe\x66\x83\x40\xb7\x96\xe1\xaa\xb9\x5a\x05\xe7\x35\x56\xce\x97\x1b\xad\x25\xb3\x09\x4c\x0d\x32\x43\x35\x31\x6c\x3d\x6f\x2f\x23\x38\x95\x5a\xf8\xcf\xc2\x11\x9a\xf1\x91\xad\xa0\x95\x12\x2b\xc8\x2f\xb6\x01\x5f\xcc\x1d\xbf\x29\xc0\x1d\x8d\x78\x28\x9f\x66\xb4\xaa\x78\x39\x70\xb7\xf4\xf6\x91\xbb\xf0\xb3\x5b\x0f\x9b\xa6\x0a\xbb\xf1\x61\x4b\x20\x84\xb0\x49\x5b\xcc\xb3\xe5\x64\x1e\xbf\x05\x90\xf4\x59\x4b\x63\x52\xce\xe2\x47\xc4\x8b\x42\xa9\x0c\x1d\xeb\xf1\x9a\x0a\xe9\x81\xfe\x56\x65\xcc\x76\x0f\xb2\x5f\x31\x9e\xec\x6b\xad\x87\x5a\xfb\xb8\x76\x16\xcd\x8b\xb0\x66\xe1\x91\xd5\x54\xce\x7f\x16\x99\x97\x2d\x25\xfc\x78\xb2\xbb\x64\x41\x27\x90\xf2\x5f\x70\x1e\xb2\x22\x5d\x7a\x94\x2d\xeb\xb4\xc9\x6d\xc6\x99\xa3\xe8\x09\x1a\xe4\xfd\x18\xd0\xc9\x6e\x12\x26\xf0\x1b\x79\x7d\xd3\xd0\xc7\xd3\x9f\x12\xf1\x46\x51\xea\x9d\x40\x97\x13\xed\x3a\x98\x20\x35\xa7\x07\x3d\x51\xf3\xd8\xa7\x27\xf8\x65\x48\xd5\xd6\xee\x61\x3e\xf5\x7d\xb3\x35\xdc\x08\x64\x0f\xb7\xe2\x8d\xf5\xb6\x5d\xa0\x4a\xe9\x61\xee\x72\xff\x77\x48\x95\x9b\x5c\x0e\xb4\x21\x0e\x17\x1c\x6e\xeb\x45\x31\x01\x9e\xd0\xc6\x6c\x64\x68\x57\xb7\x1b\x5a\xae\xdb\xd8\xbd\x4b\xf1\x9c\x7a\xde\x44\xcf\xaa\x56\x7c\x49\xad\xf9\x0a\x0d\x7a\x6d\x76\x97\x4c\x04\x3b\x73\x80\x79\xf6\x4b\x8e\x43\xa7\x8b\x43\x47\xe2\xdd\x4d\xaf\x4e\x1f\xc4\x1c\x6b\x1b\xb7\x7b\x54\x83\x98\xc0\x5d\x98\x9a\xd9\x61\x4c\xc9\xad\xf4\x4b\xf5\x61\x1d\x36\xd8\x61\x1c\xa2\xde\x38\x27\x3f\xea\xf4\x0b\x72\xfc\x97\xdb\x02\x1f\x50\x54\x3f\x97\x81\x32\x41\xc1\xf8\x4f\xae\xb7\xe7\x6c\xc3\xbb\x33\x94\xa2\x0e\x9c\x43\x2b\xe2\x01\xee\xf0\xd8\xbe\xd1\x9f\x0c\x88\x55\xa7\x5d\x65\xd3\xa5\x23\x7d\xac\x23\xd6\x5f\x2e\xba\xd4\x77\x7c\x68\x68\xba\x9e\x3a\xa6\x20\x87\x2c\xb5\x74\xd0\x3e\x7c\x9a\x81\xf9\x16\xd2\xa8\x83\x2e\xae\xfd\xab\x40\x04\xf8\x4d\xf9\x94\x8e\xaa\xd8\x4c\x77\x71\x8a\x4a\x2a\xce\x43\x4b\xf2\x88\xfa\x87\x30\x53\x4d\x06\x83\xe4\xcb\xe9\x1d\xca\x01\x74\x94\xab\xdd\x2c\x8b\xba\x52\xa6\x39\x26\x4c\xc1\x33\xc4\xdf\xcd\x9f\x06\x29\xc6\x9b\xee\xb1\x09\x13\x42\x91\x22\x6e\x2c\xbb\x2f\x1a\x0c\x62\x12\xc4\xf1\xd2\x06\x7b\x84\x12\x39\xa0\x73\xa6\x84\x47\x57\x77\xb4\x38\x46\x39\x4d\x1e\xea\xbc\xff\xe4\x20\x6c\xfd\x08\x57\x80\x8c\x4f\xd5\xfa\x3e\x2e\xe7\xc5\xb8\xda\x9d\x81\x83\x92\xdc\x7a\x02\x3b\x8f\x8f\x8a\xd7\x8c\x7a\xc5\xb9\x5f\xde\x66\x5d\x8f\x50\xa7\x49\x81\x83\xdf\xc2\x24\x62\xe9\xd6\xe4\x63\x4f\xac\xd2\x98\xb8\x1e\x43\x25\x37\xdd\x4d\xd1\xbf\x83\x60\xa8\x9e\x17\x07\x06\x87\x8e\xb7\x25\x58\x8b\xe2\xe3\x68\xdf\x75\x31\x71\xe8\xa4\x3d\x13\xfe\x84\x15\x52\x90\xac\x20\xd7\x8d\x8d\xce\x42\x44\xd7\x8f\x97\xab\xa7\x49\xf2\x2f\xec\x6e\x41\x67\x0f\xd1\x36\x00\x76\x03\xf4\xd7\x06\x4b\x9a\xd4\x68\xd7\x84\x80\xff\x99\x17\xd6\x9d\x15\x48\x45\xc9\xda\x3d\x1c\xb3\x19\x76\xae\xbc\xd1\xb2\x49\x16\x95\x83\x34\x47\xb9\x5c\xa4\x92\x60\x06\xf5\x58\x27\xbd\x56\x59\x64\x03\x72\x0f\x78\xf4\x76\x30\x77\x95\x18\x69\xa7\x55\x8a\xfa\x35\x91\x31\xa5\xee\xd8\xd7\x74\x20\x68\xd3\x49\xd4\xa2\x10\x44\x7f\xe5\xc0\xa1\xa2\x62\x55\x1a\xf9\x18\x11\xff\xd5\x58\x40\xec\x95\xa4\x42\xa7\x2f\xf7\xd8\x3c\x30\xa6\x51\x99\x91\xb9\x28\xcc\xdc\x37\x3a\x44\xc7\x3b\x06\xc9\x15\x97\xd4\x80\xbb\x41\xbf\xcb\x6d\x76\xd3\xa1\x7b\x64\x69\x7e\xd8\xb8\xe3\x0c\xb4\x60\x59\xb4\x29\x78\x92\x79\xd6\xf9\x3b\x5a\x22\x67\xff\x22\x94\xac\x75\x30\xb5\x2f\x55\x4f\x17\xd2\xa9\xba\x9a\xc5\x46\x51\x4c\x48\x73\xec\xf4\xaa\xa2\xea\x3b\xfd\xe5\x92\x8e\x29\x96\x4c\x0c\x27\x3b\xcc\x3c\x2e\x08\xf8\xfd\x74\xe3\xba\xd4\xbc\xe3\x1c\x31\x41\xa5\x5c\xd6\x98\x59\x93\x8f\xf5\xfc\xfd\x9d\xdc\xa3\xd8\x00\x25\x34\x0d\x2a\x40\xb2\xf6\x44\x64\xac\x98\x9c\x0b\xc3\x5c\xc8\x53\x0b\x69\x66\x5f\x4c\x06\x31\xc7\x1b\x05\x0b\x89\x44\xb0\xec\xbb\xdf\xf7\xd5\x4f\x5b\x3e\xbc\x20\xad\x62\x13\xb0\x54\x4a\x12\xad\xe6\x8a\x2f\x16\x24\x2f\x19\x2f\x34\x27\x16\x67\xbf\xad\xf0\x92\x93\x38\xe4\x4f\x98\xd6\x6e\x26\x8e\x3f\x7b\xe9\x4c\x7c\xf3\x13\xb8\xa3\x73\xc2\xc3\x0f\xa8\xb3\x6d\xc2\xfa\x70\x2b\xdd\x61\xa5\x24\x04\x78\x1c\x84\xdc\xa8\x0b\xed\x61\xd4\x81\x68\xa4\x53\xd0\x9d\xe2\x9b\xb0\x7a\x43\x97\x63\xaf\xbb\xae\xd2\x31\xa5\xa1\x6e\x35\x92\xc9\xdc\x17\x56\x89\x2d\xf1\xc2\x44\xff\x48\x99\xa0\x1c\xdd\xe6\x2a\x8c\xc6\xc5\xa1\xa9\x47\x41\x67\xcb\x73\xc7\x6a\x39\xbe\xc6\x3e\x10\xbc\xbc\x19\xa3\x5b\x9b\x83\x12\x05\xe6\x0a\x8a\x59\x7a\xee\x64\x01\xb8\xb6\x52\xd5\xaa\x9a\x8e\x4a\xbd\x4f\x1f\x69\xaf\xc7\x40\x18\xa4\x06\xda\x31\xa0\x9a\x69\x0b\x68\x21\x6c\x22\xa4\x79\x62\x21\x6b\xf2\x8f\x55\xe1\x65\x92\x96\xb8\x0d\x21\x9a\xc4\x8d\x07\xf5\x70\x95\x71\xda\x2a\x98\x03\xed\x92\x86\x11\x05\xf0\x6b\x76\x50\x9b\xd5\x3c\x6f\x71\xd1\xbb\x33\x75\xb0\xea\x69\x2b\x8c\xe8\xd3\x05\x30\x0e\x52\x99\x89\xe7\xa5\x54\x70\xb9\xda\xbe\xe4\x38\x4f\xbb\x07\x49\x11\xc7\x33\x9e\x61\x3e\x85\x3b\x2f\xaf\x48\x2d\x7b\x41\x2f\x6e\x91\xc8\xde\xfe\x67\xc3\x64\x67\xaf\xe8\xce\xd4\x2d\x68\x25\x11\xad\xc7\x50\x7c\x64\x33\xc7\xe7\x6d\xb4\x4b\x46\xfe\xe5\x9c\xb0\x8e\xd0\x5f\x0a\x2d\x69\x2a\x96\xcb\xb5\x15\x7d\x70\x03\x1d\xd6\xe9\xd4\x0b\x94\x45\x9d\xfa\x47\x64\x10\xa5\x42\x3c\x8a\x3b\x3d\x72\x9b\x5a\x14\x95\x93\x36\x1b\xd6\x95\xff\x90\x09\xc9\x3e\x0a\xcb\x62\x41\x01\xb8\x14\x75\xcc\xe8\x3a\x1b\x57\x76\x26\xe3\xcb\x08\x00\x38\xf5\xaa\x06\xb4\x9c\xea\xb7\x4c\x53\x6a\x74\x28\xeb\x1c\x9b\xa0\xe4\xcd\x1a\x33\x7d\x14\xcb\x9e\xc2\xb5\x3c\x47\x8b\x75\x3e\x91\x09\xa8\x03\x04\xf2\xe6\xc3\x5c\x6a\x40\xa1\xa6\x51\x60\x10\x1d\xcd\xc6\x13\x68\x29\x35\xb4\xbb\xcd\xbf\x28\xa4\x6c\xd8\x7b\x1c\xaf\xbd\x09\x43\x50\xfc\xf3\xf1\xb9\x6a\x5e\x54\x72\x54\x43\x32\x6d\x8a\xdd\x8d\xd2\x06\x7c\xc0\x4b\x43\x1b\x90\x22\xb9\x3a\x01\xc3\x46\xbe\x3d\x89\x5d\xd6\x96\xda\x73\xc8\xe0\xb7\xa1\xf9\x80\x48\x6e\x81\xa9\x72\xd0\x10\xbc\x59\xa2\xea\xb2\x5c\xfc\x1f\x03\x47\x74\x11\x00\xf9\x5b\xfd\x8c\x12\x19\x79\x7f\xe2\x24\xf3\x74\x67\xd3\x57\x98\x1a\x2b\x6f\x03\x88\x10\x94\xdd\xb2\x7d\x9e\x90\xfb\xe5\x12\xe9\x6b\x34\xfb\xde\x3a\x82\x4b\x6a\x67\x07\x73\xcc\x58\xff\xb0\x61\xf0\x4e\x3a\xfe\xb2\xfd\x7d\x80\x46\x10\x0e\xa7\x5a\x01\x9a\xd2\x08\xc1\x32\x1f\x51\x7b\xa8\x52\x9d\x15\x1a\x5b\x4e\x26\x4d\x04\xf7\x6a\xb2\x34\x5d\x33\x51\x3a\xea\x0a\x73\x9c\xd7\xcb\xd7\x74\x1f\xfc\x87\xcb\x70\x49\xbf\x56\x5b\x11\x68\x9a\x6f\x81\x35\xb0\x7b\xe4\x7f\xe7\xf8\x3b\x87\x7f\x06\xad\x64\x0d\x97\xb7\x7f\x2e\xf2\xf1\x0a\xf1\x10\x23\x3b\x76\xc9\x45\x60\x5d\x87\x52\x30\x94\xeb\xd3\xcc\xec\x29\xc2\x7e\x9e\xca\xb9\x1c\xae\xe5\x7d\x5c\xc8\x9c\x20\xd8\xad\xfc\x3c\x8e\x5b\x00\xec\xcd\xa9\x91\x56\x5a\xc0\x02\x99\x1f\x5a\x3b\x75\xfc\xfd\x1d\x43\xf4\x72\x12\xf2\xf8\x2e\xe5\xf0\xbe\x9c\x2f\x33\xbb\xbc\x01\x01\xe4\x6e\x50\x6f\xeb\xc9\x56\x04\x2e\xf7\x8e\xc8\x49\x58\x89\x2b\x99\x9c\xa6\x41\x97\xd8\xf0\x87\x4a\x12\x8f\x72\xf1\xf3\xd2\x47\x03\x5c\xfb\x81\x5d\x47\x5c\xea\x67\x25\xdd\x6e\x63\x24\x83\x40\xe0\x4b\xbf\xd7\x82\x0f\xae\xf0\x8c\x44\xc5\x51\x57\x43\xc2\x65\x0b\xc3\x27\x22\x5f\x72\x46\x5a\xd2\x2f\x7d\x3f\xdd\xfc\xba\x59\x34\x47\x9e\x05\xcb\x68\x27\xad\xf6\xb4\x05\xef\x65\x86\x44\x8c\x4e\xf5\x6e\x25\x9a\x59\x47\x89\x4c\x88\x13\x91\x88\x00\x25\x13\x86\x0a\x9f\x6a\x6e\x90\x7e\x11\x8a\xa6\x49\x17\x92\xb9\x0e\x39\xd0\x95\x00\x2b\x49\x8d\x12\x2b\x2b\xd0\x9f\xd6\x8d\xfd\xf4\x08\xb4\xc3\xed\x7f\x5f\xae\x71\xda\x49\xa7\x5b\xb1\x32\xcd\x77\x33\x65\x74\xd8\x7f\x0f\xf5\x04\x00\xb1\xc3\x6d\xf5\xf2\x2f\xa7\x67\xb5\xf4\xa3\x26\xae\x9b\xec\x21\x54\xa9\x83\x07\x33\xb9\xa4\x42\xaf\x1c\x50\xc0\x64\xa8\xed\x32\x90\xe1\xb5\x68\x57\xfd\x3d\x4c\x72\x51\x5f\xe6\x76\xe1\x79\xfa\x58\xa3\x0a\x0e\xe3\x5b\x70\x43\x04\xdb\x74\x0d\xd8\x07\xbe\xc1\xe7\x58\xd9\xaf\x97\x55\x3c\x34\xa0\xdc\xba\x91\x8c\x82\x9a\x36\xe5\x0b\x2a\x71\xdc\xed\x7b\x3b\xc6\x4c\xee\xb9\x97\x83\xda\xe0\xf5\x30\x30\x52\x69\x78\xd2\x3b\xb7\x94\x11\x39\xc4\xa4\xb6\xfc\xf7\x13\x3b\x74\x0b\x18\x81\x18\x03\x7b\x9c\x77\x57\xe7\xe5\xa7\x76\xa5\xcb\xe6\x85\x6d\xde\x2d\xd8\xf0\x4e\xf3\xb2\xf1\xb5\x6b\xdf\xbb\x9f\xb9\x32\x87\x5e\x53\xe6\xb6\x71\xe3\xdf\x07\x15\xd0\x69\xe4\x0a\xfe\xe1\x31\xca\x20\x36\x07\x04\x60\xce\x29\x0c\xba\xd2\x89\x78\x7c\x4a\x0c\x66\x72\xc3\xdf\xc8\xcd\x67\x2e\x07\x00\xed\xf3\xa7\xf3\x00\xd8\x6f\xdc\xd2\x7f\xb7\x6f\xf2\x60\x9d\x00\x94\x20\xea\x33\x10\x77\x42\x5e\x96\x16\x15\xda\xa4\x70\x43\xaa\xce\xc0\x56\x0b\x56\xef\xf3\xa3\x46\x1c\x75\xac\x7c\x65\x0c\xe4\x50\x99\xd4\xea\x37\x03\x71\x23\xb7\x63\xa0\x6d\x2c\x2d\xfb\x5e\x5e\x05\x8f\xfe\x49\xd9\x83\xfb\xa6\x54\x39\x70\x8e\x5c\x03\xd9\xa2\xc9\xc0\xdd\xde\x16\xc8\x49\xbd\xb5\xef\x81\xd5\x45\xba\x1d\x1e\xd0\x04\x21\x89\x22\x2b\x57\x54\x0d\x1b\x4c\x36\xe8\x4a\x54\xf2\x84\x6c\x97\x6c\x74\xcd\x46\xc3\x30\xee\xf2\x26\xd8\x3e\xbb\x4d\xfc\x49\xc6\xdf\x72\x30\x66\xcb\x53\x2a\x5f\xf1\xdc\x6c\xb3\xed\x23\x91\xcb\xbb\x85\x01\xa0\x9f\x47\x87\xa3\x1b\x1b\x3c\xc6\xd1\xb7\x43\xe8\xa4\xb7\xc9\x9a\x7a\x17\xc1\xac\x29\xa5\x46\x79\xe7\x9e\x3b\xcf\x01\xd0\xb3\xde\x56\xef\x33\x55\xb3\x5d\x82\x83\xc3\xb9\x13\x91\x2c\x31\xd5\xa0\x5a\x66\x85\xe3\x39\x0a\x93\x70\x3b\xc6\xa3\xa3\x4f\xad\x12\x22\x64\xf0\x8a\xc3\xf6\x3a\x61\xc3\xd6\x15\x15\xe5\x50\xd9\x4d\x6f\x83\xc2\x44\xbc\xd5\xe2\x19\x53\xba\x38\xef\x4f\xe1\xdd\xb3\xf8\xf8\x07\x9a\x76\x3e\x61\xa1\xd0\xbc\xab\xe3\x7a\xbf\xe7\xf6\xda\xc6\x96\x2e\x0c\x17\xde\x7b\x8e\x31\x39\x90\x6b\xd6\x88\xec\xd3\x5c\x41\xc3\x31\x96\x47\xf3\xe2\xad\xda\x84\xd0\x0b\x0d\xa9\xd1\x1d\x6a\x55\x3e\x06\x91\x80\x22\xf3\xdc\xd4\xd1\x94\x2e\xab\x90\x23\x2f\x71\x4c\x70\x0d\x23\xff\xea\x54\xba\xbf\xb0\x62\xee\xa7\xfe\x3a\x8d\x65\x4f\x9d\xa7\x73\x64\xad\x1e\x73\xbc\xcc\xfa\x5b\x7b\xf4\x7c\x31\x3d\xd2\x4f\x7a\x27\xaa\xaa\xf1\x30\x36\x9a\xac\xc7\x96\xa5\x8a\xb7\xc8\x61\xb1\x7c\x88\xb9\x44\xa5\x27\x71\x1d\x27\x34\x0a\x6f\x6a\xf9\x3c\xca\x09\x1c\x7a\xa4\xe3\xf1\xfb\x52\x39\x88\x5c\x32\xdc\xbe\xe7\x51\x04\x78\x71\xff\xc9\x71\x9f\xab\xd1\xb6\x1c\xa1\xb4\x6a\x4d\x5f\x2f\x81\x99\x19\xab\x8d\x65\x9f\x27\xc8\x86\xe4\xc9\x2d\x1c\x48\xff\xf4\x74\x2e\x20\xc4\xa9\xb2\x8b\x43\xd6\xfc\x56\xb9\x98\xfc\x65\x70\xda\x6d\x01\x52\xde\xa8\x95\xa2\x6b\xdf\x34\xb8\xed\x5c\x32\xdc\x11\xda\xbc\xd8\xd1\xe5\x91\xd3\x9f\x53\x67\xe1\x63\x54\xc7\xe5\x61\x16\x1f\x10\x70\xe7\xee\x33\x9e\x5d\x03\xcd\x4d\x2a\x8e\xa0\x44\x05\x62\xa3\xc7\xda\x37\xa2\x3a\x87\x68\x79\xf4\xb2\x44\xf3\x28\xdb\xc2\xbb\xdb\x38\xb1\xc3\x2a\xe7\xcd\x9d\xa4\xf6\xf2\x61\x28\x27\xb3\x82\x4c\x17\xca\x72\x6d\x68\x9b\x70\xd3\x7b\x4f\x87\x42\xe6\x4d\xe5\x24\x67\x8c\x25\x8c\x2f\x96\x6f\xb7\xed\x6b\xb6\xfd\x99\x1d\x00\x30\x74\x60\xc1\x9a\xca\xd3\x42\x3c\x86\x98\x22\x3a\xc7\x31\x14\x86\x6c\x5e\x85\xd7\x6a\x6b\xcf\x16\xe9\x4a\x30\xf5\x37\x7f\x6e\x7a\x00\xaa\xc2\x55\xff\x77\xe4\x96\xae\x42\x33\x43\x6c\x64\xa5\xc1\x52\xc8\x7b\x07\xc6\x18\x4a\x0f\x91\x25\x1e\x24\x9f\x1c\x25\x5f\x75\x17\x36\x2f\xee\x20\x42\xb9\xc9\x90\xc3\xc1\xd0\xbe\xfd\x53\xfe\x56\x99\xa9\x9c\x84\xe5\xa9\xef\xca\x92\x7d\xcf\xf7\x68\xc4\xf4\x53\xa9\xf8\x9f\x82\x41\x41\x6c\xd3\xb9\xef\x64\x7e\x28\xf7\xc0\x76\xea\xee\x36\xfa\x60\x55\x41\xbc\x95\x7c\xf5\x1d\xe5\x24\x38\x8e\x95\x82\x0f\x5a\x73\xa4\xc9\x6f\x95\x6e\xc8\x97\xd5\xce\xa2\x64\xb2\x07\x1c\xfb\x5e\x21\xb9\x82\x4b\x5b\xd1\x75\x2b\x61\x65\x22\x6c\x8a\x6c\x14\xe3\x2a\xec\x8c\xe3\x96\x6c\xe3\xaa\x56\x27\xb0\x3b\x2f\xe5\x15\x25\xcd\x06\x2c\xbc\xe6\xed\xe3\x52\xe1\xb8\x9d\x25\xdc\xca\x45\xcf\x1d\x09\x70\x88\x9a\xde\xa4\x0c\x18\x51\xf8\x18\xab\x4d\x11\x2a\x37\x8f\x04\x61\xae\x73\xba\xe6\xbb\xdc\x74\x42\x2a\x3e\x4e\x67\xe2\x4a\x8d\xb4\x12\xc4\x29\xce\xe1\x50\xdc\xbc\x99\xd1\xaa\x82\x61\x88\xb2\x78\xc8\x4b\xae\xfe\x22\x08\xe1\xdc\x7e\xae\x29\x73\x57\xa2\xec\x9e\x94\x6c\xdd\x3d\x5d\x26\x35\x66\x73\xcb\xb6\x9f\xb3\xd8\x31\x39\x4f\x65\xa3\x73\x2e\x04\xa2\xab\x90\x29\x58\xf0\x80\x96\x89\xa5\xd8\xae\xa8\x18\x0f\x1b\x5f\x8a\x08\xeb\x8e\x6f\xb9\x36\x6c\x6a\x46\x7c\x23\xdb\x17\xc2\x38\x02\xef\xa5\xb7\xd4\xb7\x94\x3b\x91\xb3\xc6\x56\x47\x99\xb9\x73\x23\x3b\x9c\xf4\x63\xc2\xf2\x08\x4f\x1a\x18\xc3\x97\x28\x10\x21\xaa\x3d\xb7\xcd\xde\x63\x44\x81\xfb\x6c\xb6\xdf\x64\xcc\x5e\x5a\x08\x62\x6a\xd8\x39\x87\x7d\x26\xb7\xeb\x19\x27\xf4\xb2\xe6\x67\x16\x2a\x17\xc6\x43\x56\xd7\x82\xb6\xfa\x91\x05\x82\x14\xca\x45\xe7\xf3\x19\x86\xb1\x9d\x6a\xa4\x7a\x1c\xab\xf9\x13\xa2\xd2\xa9\x95\x0b\x78\x05\x34\xee\xcc\x42\x94\xac\x77\xc4\x74\xe0\xb5\x25\xcf\xfa\x5b\x76\xb2\x86\x33\x5b\x30\x97\xdf\x8c\xe8\x37\x14\xb3\x2f\xfe\xb7\xad\x76\x1d\xfd\x72\x1a\x27\xa6\x72\x11\x6d\x04\x66\x86\x80\xcd\x6c\x2e\xd2\xce\xf5\x61\xb5\xe8\x73\xad\x1d\x79\x38\x30\x9d\x4a\xf7\x5b\xd7\x69\xc7\x9b\xd3\xff\xa3\xd4\x05\x77\x59\x4c\x6f\xb0\xab\x0e\xd4\x3e\x9b\x53\x0a\x13\x94\xf5\x62\x58\x81\x47\x98\xd8\x19\x18\x78\xa9\x39\x05\xc8\xbb\x9e\xd7\x07\x24\xa7\x0e\xe6\x0e\x90\x6d\x8d\xc9\x4c\x2a\xbf\x27\x77\x80\xcf\xf2\x31\x35\x2e\x09\x4a\x65\xaa\x24\x1d\xe4\x32\x2e\xa6\x88\x74\xbb\x0f\xe4\x5d\x21\x5f\x78\x05\xeb\xfd\x4b\xf5\x55\x87\x7f\x0a\x71\xac\xf4\x7a\x09\x85\x79\xca\xce\x0c\x6c\x2b\xb1\x3a\x53\xbb\x91\xa9\xac\x10\x73\x36\x8d\xc2\xff\x16\x82\x09\x63\x58\x13\x6e\x6e\x96\x63\xc4\x1b\xa9\x81\x11\x5c\x80\xb1\xe9\xd9\x16\x16\x99\x22\x2b\x40\xf1\xd3\xba\x26\xb8\xf3\x3b\x8f\x18\x75\x8c\x61\x99\xb9\xf2\x14\x87\x7e\xbc\xa6\xb6\xfd\x26\x74\x50\xaf\xde\x03\x5d\x15\x35\x7a\x55\xd0\x62\x26\x05\x08\x28\x07\xb4\xf0\xbb\x52\xb0\xb1\xe3\xdf\x1c\xa4\xbb\x59\xb4\x29\x8c\x09\x4e\xc7\x2c\x7c\x2f\x0a\x56\x0a\x44\x73\xd9\x3f\x65\xcb\x85\xb9\x67\x89\x5c\x31\xc0\x7d\x2c\x66\xa9\x52\x67\x38\x1a\x59\xd5\x4c\xf2\x67\x54\x8c\x7a\x05\x15\x40\xd8\xd1\x2c\x4a\x69\xd9\x2b\x25\x30\x07\xdb\x6b\xd4\x39\x3a\xc0\xb5\xc9\x50\xb8\xcd\x4e\xbd\x40\xe1\x86\x95\xd3\x90\xce\x2a\xe0\x36\x3a\x3f\x38\x17\xca\x4b\xdc\xe4\xbe\xbd\xbc\x55\xd7\x3c\x4c\xba\x85\x92\x36\xfd\x74\xb7\xda\x03\xa3\xac\x9b\xab\x06\x8c\xe4\x95\x29\x30\x01\xb9\x13\x63\x3e\x29\x44\xf1\xbf\x66\x93\xdd\x53\x30\x57\xab\xf2\x0b\x9a\x9b\x12\x13\x2c\x39\x50\x27\x5c\x5a\x77\xb9\x05\x42\x24\x8e\xca\x4d\xf9\x4b\xc2\xbd\x3f\xaa\x7e\x32\x0d\x64\x24\x9a\x9a\x34\x32\x2a\x1f\xcd\xb7\x5b\x5f\x09\x4a\x50\xb6\xa8\x84\x68\x54\xf5\x5e\x0f\x6a\x7c\xa7\x29\xc2\x43\x28\x3c\xe9\xf2\x6e\x4e\x4c\xa5\x9d\xdd\x0a\x7a\x7d\xa4\x0a\x51\xd5\x0c\xbc\x99\xa2\xfa\x6a\xc2\x68\xb8\x49\xd4\x70\x61\xf0\xd1\xf4\x07\xdd\x83\x14\x73\xb6\x74\x01\x4b\x4a\x5d\x70\x93\xd0\xbb\x9a\xd6\xf2\xb9\xf3\x7c\x49\x5d\x84\x87\x18\x93\xef\x21\x33\xc3\x78\x4b\x81\x04\xc3\x65\x4d\x89\xcd\x03\xc0\x54\xe9\x6c\x42\x6e\xb9\x6c\xbc\x43\xfd\x18\x29\x52\xbc\xad\x73\x04\x79\xf8\xde\x27\x58\x89\x29\x55\x92\x62\x84\x95\xbf\x31\x1e\xe9\xef\x5a\xa1\x4b\x47\x3d\x21\x76\x41\x02\x6a\x71\x10\x7d\xb9\xda\xdc\xa6\x6c\xbc\x5b\xe2\x9c\x3f\x44\x5c\xb8\xc2\x48\x13\x5d\xcf\x52\x84\x19\xb0\x3d\x60\xcf\xe2\x6f\xd4\xbd\x6a\x48\xba\xf7\x36\x26\xc0\x57\xe7\x3d\x07\x1f\x51\xc5\x95\x99\xe4\x2b\xc6\x87\x73\x51\x74\x78\x5c\x0d\x72\xe8\x98\x54\x80\x77\x6b\xc5\x3d\x4c\x1f\x35\x3c\x0e\x5e\x27\x00\xab\x61\xce\x1f\xe6\x40\x31\x3d\xcf\x9f\xf0\x8d\x25\x6a\xe3\x54\x78\xca\x13\x0d\x1f\xa4\xb6\xc2\xcb\xbc\xec\x5b\x76\x4a\xb9\x24\x24\x4f\xa8\xd6\xbc\x83\x4b\x27\x33\x9b\x09\x3e\x35\xf6\xb0\x01\x7f\x72\xb2\x9a\xc2\x4c\xe4\xaf\xf8\xbc\x40\x4f\x0d\xe5\x70\x3b\x5c\x3c\x2c\xa7\x5a\x3d\xbe\x73\xaf\x94\xc4\x72\xcf\xcd\xc2\xe9\x94\xfb\x8e\x88\xcd\xc0\x23\x0b\x63\x95\x93\x3e\x87\x52\xfb\x10\xc9\x30\x5e\x3e\x7b\xc3\xb9\x4d\xde\xfb\x2e\x18\x3c\x2c\x41\xd7\x0f\x90\x38\x6e\xef\x9e\x68\x69\xef\xef\xe8\x76\x2f\xb6\x9f\xa1\xcf\x28\x7f\x98\x42\xf8\x65\x9c\xfc\x4d\x2b\xcb\xf0\x9e\x06\x87\xc4\xa4\x0a\x9a\xa9\x6d\x1e\xdb\x75\x17\x28\xb2\xfb\x93\x00\x30\x10\x63\xa2\x35\x8f\x9f\x06\x29\x4e\x19\x16\xf2\xb8\x83\x3e\x31\x8d\x15\x2b\x9b\x2d\x30\xd0\x4e\x6c\x2d\x4c\xce\x25\xec\x10\x5b\x4e\x62\x37\x04\x77\x55\x02\x9c\xf9\x79\x74\x6d\x79\xd0\x18\x24\xa5\x50\x92\xe1\xf0\xd2\x08\x72\x68\x79\xa1\xa0\x66\xde\x3f\x87\x47\xe5\x51\x91\x1e\x40\x98\x2f\x76\x04\x34\xaa\x30\xc4\x8a\x9a\x3e\x82\xb6\xb3\x09\x26\x16\x79\xea\xd5\x34\x7f\x45\x8f\xef\x50\x22\x60\x7b\xc1\xf2\x55\xbe\xd5\x4d\x3f\xef\xb4\x05\xd5\x61\x72\x64\x95\x69\xd1\xe7\xa3\xda\xbe\x8e\x7c\xeb\xf9\xee\xef\xe0\xfa\xc6\x36\x0d\x35\x2e\xd5\x0c\x93\x7d\x1c\x26\xc3\xff\x08\x41\xd4\x69\x3d\x80\x5b\x7c\x88\x3f\x11\x81\x4b\xb2\x94\x3c\x7f\xf3\xc3\xdb\x84\xde\xcf\xb6\x7a\x1f\x75\x68\x6e\xe2\x53\x83\x14\xdb\xa9\x84\xef\xfa\x79\xad\xea\x1e\xbd\x41\x79\x74\x80\xaf\x30\x55\xaa\x7b\xf8\x4a\xb1\x9d\xb1\x9e\x35\xe0\x42\xcd\xab\x7d\x7f\x52\xeb\xde\x6c\x5e\x91\xa1\xf3\xf8\x95\xa2\xc7\x8d\xfd\x73\x7f\x47\xf3\xc9\x7d\xc6\x97\x74\x97\x4e\x2f\xdf\x69\x3f\x3d\x63\xd1\xea\x58\x4e\xa0\x78\x58\xfc\xb2\x02\x86\xcb\xa6\x61\xe3\x7c\xbc\x53\x49\x0d\x9b\x26\x9b\x28\xf7\xcc\x3f\x23\x37\x51\xe7\x76\xf8\xcf\x29\x64\x9a\xa0\x8b\x6a\xdc\x99\x99\xea\x4a\xc1\x14\x0e\x95\x10\x63\x55\xd7\x41\x57\xcf\x54\x23\xe3\xfd\x8e\xf8\x09\xf9\xd2\xc3\x29\x9c\x95\xe5\xc3\xe9\x60\x61\x44\x41\xd5\x2a\xc7\x6c\x4a\xe2\xd2\x95\x53\xf3\x98\x6c\x08\xfe\xa0\xf0\xd1\xc3\x14\x28\x55\x05\x98\x43\x56\xb5\xf2\xfb\x24\x34\xc7\x1d\x04\x2b\x15\x0d\xae\x24\xca\xc9\x6f\xbc\xa0\x7d\xad\x2a\x16\x0b\x41\x4a\xc9\x3d\x20\xff\x01\x15\xd9\x04\x1d\xd9\xf2\x14\x30\x90\x1d\x7b\xd1\x1e\x8a\xbd\x80\x90\x91\xe6\x5b\xfc\x4e\x7b\xf2\xdc\x02\x76\x75\xae\xd1\x56\x32\x94\xeb\xde\x8f\x1a\x2b\x7f\x19\x73\x77\xb4\x45\xa9\x4a\xc0\xe8\xf8\x05\x9b\xb0\x36\x5e\x41\xef\xd7\x01\x8f\x49\x06\x91\x11\x82\x10\x0b\x48\x5f\x89\x84\x1b\x13\x79\x88\x06\x97\x93\x7f\xe4\xd2\x1e\xc7\x25\x83\xaf\x50\x51\xe2\xbd\xee\xa2\x35\x35\x34\xe9\x69\xea\x47\xdd\x20\x4e\x88\x9e\x55\x36\x8a\xba\x8c\x09\x22\x36\xc6\x1f\x13\x31\xe2\xce\x01\xb0\x66\xf6\xb1\x25\xf7\x82\xd5\x88\xf3\xf4\x8f\x8e\xf4\xbf\x5f\x70\xb0\x8f\xdb\xf5\x6f\x50\x78\x2b\x20\x19\x12\xc5\xfd\x35\x4b\x73\x22\x73\xae\x21\x61\x7a\x62\x3e\x18\xa1\x04\x90\xc5\x0a\xcf\x94\x21\xca\x51\x4d\x7d\xd5\x4b\x10\xaa\x0b\x7a\x2b\xe5\xe0\xe5\x10\xbf\x7c\x5c\xe3\x52\x12\x9a\xf0\x66\x46\xdf\x8f\x81\x62\x35\x2f\x6d\x27\x64\x01\x5e\xac\xe1\xfb\x35\x55\x44\x66\xae\x4c\xf3\xe8\x67\x94\xed\x36\x59\xa6\x1d\xfd\x55\x93\x50\xa6\x20\x97\x83\x85\x66\xed\xc9\x6f\x7b\x0f\xfd\x74\x1d\xb6\xe4\xa8\xb2\x86\xb6\x04\x68\x9d\x25\x91\xae\x98\xe7\xae\xef\x2b\x50\xc4\x54\x96\x5a\x26\x95\x83\x76\x92\x22\x01\xea\x0f\xb5\x27\x3e\x81\x81\xc6\xe9\xdd\x44\xd3\x62\xac\xb8\xb2\xfd\xcb\xdb\x88\x8e\x88\xed\xc7\x77\x28\x21\xa3\xb5\x7b\xc7\x6b\x55\x65\x33\x89\xd2\xd1\x0c\xc8\x17\xb5\x93\x6a\x04\x33\xb8\x72\x55\xb1\x23\x8f\xa4\xce\x8d\x4a\xb6\xc1\x20\xd6\x82\xd9\x7b\xeb\x06\xa5\xc5\x2c\x43\xa2\x50\x49\xc6\x09\xd0\x8e\x40\xa2\xd4\x5c\xed\x18\x3b\x96\x8c\xf6\x9a\xc1\xa1\xf8\x7e\x1f\xea\xfe\xcd\x9f\x83\xc2\xd1\x02\xaa\xa8\x4b\x66\xbd\x1a\x85\xc0\xf2\x4f\xe6\x40\x66\x57\x02\xd8\x9c\x00\x5e\x1e\xd9\x2f\xba\x99\xc8\x60\xfc\x24\xa2\x21\x00\xbd\x66\x32\x8f\x97\xdc\x3f\x36\x9c\x71\xdc\x73\xa3\xce\x0a\x63\xa6\x36\x4a\xf4\x2d\xb7\xaa\x9d\xb8\xec\x11\x0d\x69\xbd\xed\xd2\x85\xd3\x2d\x0f\x00\x87\xcb\x32\x46\xfa\x6d\xc8\x8e\xca\x11\xc4\x5a\xf8\x21\x42\xf7\xc3\xf8\xbb\xc7\x20\x70\xd0\x43\x55\x95\x0c\xff\x9d\x9e\x94\xeb\xe5\x57\xfd\x94\x8f\xfb\x26\xb9\x33\xd1\x0d\xd0\x7d\x2d\x80\x64\xe3\xaa\x9c\x7c\x55\x32\xe7\x6f\x82\x38\x94\xa7\x17\xf8\x1a\x2a\x4d\xab\x7a\x63\x7b\x96\x5f\xc3\x01\xf2\xec\xc9\xb0\xf0\xc7\x68\xb7\x06\xb7\x39\xba\xfe\x95\x0d\xb5\xb9\x57\x94\x78\x5b\x22\x9c\x9a\x55\xdd\x9f\xdc\x9f\x2f\x7b\x8e\x12\x0a\xd0\xe6\x52\x6a\x54\x63\x10\xea\xdf\xa7\x2a\x6c\xb2\x07\x54\x96\x22\x27\x0e\x86\x53\xcc\xb8\xe0\xd2\x54\x62\x99\x9a\x24\xe8\xf3\x72\x51\x12\x97\x2e\x1e\x8c\xe6\x5a\x73\x56\x9b\xf2\xec\x4a\x1b\x04\x3b\x5b\x0b\x25\xab\x10\xb0\xc8\x6a\x52\xf3\xd9\x47\x4f\x37\x46\x52\x33\x18\xf7\x10\x8f\x63\xf8\x19\x26\xde\xa7\x7d\x87\x56\x8e\x5e\xac\xec\x68\xe4\x7a\x59\x94\xbc\x5e\xdc\x53\xfc\x4f\x75\x8d\x1d\x37\x68\x51\x4c\x56\xdc\x7b\x18\xad\xe0\x38\xee\xcf\xbc\xe2\x1d\xbc\xd3\xa4\xc1\xde\xeb\xf2\xda\x7d\xb1\xab\x8b\x17\x03\x15\xf5\x0a\x88\x15\x49\xbb\x40\x5f\xd1\x21\xd1\x32\x66\x62\x25\xd5\xd9\x07\xe8\xa3\x31\xe6\x6c\x9f\x94\x1a\x56\x89\x03\x4d\x94\xa0\x1d\x2d\x3a\x3f\x19\xda\xf8\x3e\x0c\x2d\xd8\x4e\x13\xf1\x36\x75\xaa\x76\x4b\x11\x93\x99\x57\x55\xaa\x7a\x28\xfa\x64\xf7\x42\x66\x25\x4c\x5d\xd7\xbb\x8d\x91\x27\xf7\x31\xe0\x76\xe3\x28\xf7\x38\xbf\x86\x9b\xe8\xbd\x56\x49\xc8\x10\x92\xfb\xef\xcc\xc7\x77\xa7\x55\xd1\xc4\xa0\x03\x45\x1b\x50\x56\x2b\xe3\x0e\xcd\x1e\x96\xe5\x8c\x24\x46\xf8\xe7\x79\x7c\xb0\x66\xc8\x92\x68\x59\x50\x12\xe7\x35\x22\xc9\x5d\x7c\x8f\x25\xa9\x81\x81\x3c\x9b\x83\x91\x9e\x56\x76\x9e\x62\x70\x7f\xa3\xa5\x20\x29\xf0\xb1\x69\x42\x9e\xf7\x25\xe0\x5b\x0c\xd6\x26\xe8\x48\xee\x8a\x0b\xb9\x5a\x16\x3c\xc3\xa1\x56\x2e\xb7\x5e\x85\x71\x8f\x07\xa1\x4b\x6e\xeb\x00\x04\x49\x0a\xe5\x5c\x91\x34\xed\x55\xd2\x57\xce\x68\xe3\xfc\x8f\xce\xf2\x70\x06\x3a\xa4\xb6\x38\xf8\x70\x32\x7a\x13\xed\x0c\x7e\x5f\x0f\x9b\x37\x3e\x90\x0a\x39\x8f\xea\x89\xe8\x0b\xc2\xec\x22\x3e\x4b\xc9\x99\x0e\xdd\xaa\xfd\x1a\x8a\x12\xf0\x91\xf5\x7c\xe2\x3e\x32\x43\xbc\xf7\xce\xb2\x4a\xd6\xcb\x7c\x09\x2a\x80\x6b\x38\x88\x75\xdc\x4a\xdd\xc7\xc3\x90\x77\x58\xf9\x71\x1e\x32\xdf\xf5\xd7\x40\x94\x61\xb9\xbc\x63\x8f\x1c\x6d\x5b\xc9\xbb\xf9\x70\xef\x7a\xd9\xc1\x80\x01\xca\x05\xd1\x83\x4a\x6f\x49\xae\xdd\xdb\x2b\x72\x70\x44\xea\xee\x47\x39\xf5\x3c\x42\xa9\x1f\x9a\xee\xce\xf9\x6b\x5d\xd1\x14\xe5\x92\x94\x5c\x66\x49\x8e\x77\xed\x60\x69\x9f\x4b\xa2\x33\x59\xba\x97\x72\x0d\xea\x76\x94\x04\x83\x88\x5b\x32\x46\xa4\xf9\xba\xcd\x56\x4c\x7a\xb3\x46\x85\xf9\x5e\x66\x82\x21\x92\x56\x4c\x50\xf8\x26\xba\xfa\x3d\x7e\xce\x27\xdf\xcf\xc7\x49\xc8\x3b\x84\x95\x06\x05\x27\xb5\x65\xdd\x4d\x2d\xdf\x95\xf2\x2f\xea\x13\xaa\xb3\xa9\x4d\x74\xfb\xd6\x75\x60\x3e\x34\x6c\xee\x83\x00\x00\x23\x24\x12\x2d\x78\x88\xab\x40\x19\x7e\x44\xff\xe7\xc0\x71\xa9\x96\x28\xfd\x6c\x0c\x9e\xfd\xd8\x72\xcc\xde\xdb\xe1\x97\xbe\x29\xd4\x48\x0d\xd9\xc1\xa2\xac\x93\x3c\xb5\xd6\xab\x79\x52\x72\x21\x62\x24\xf7\x3a\x2b\x90\xe5\x75\xd9\x34\x5f\x00\xa6\xa3\x96\x98\xe5\x8e\x84\x56\x77\x87\x67\x8c\x56\xdf\xbc\xc0\xb6\x5c\xc9\x19\x20\xbe\x45\x74\x79\x3a\xa4\x94\x74\xe6\x54\x9c\xd1\x25\x0f\x58\xa0\x73\x2f\x99\x3c\x56\x3e\x0a\x42\x4b\x64\x41\xa6\x74\x85\xf0\xac\x03\x0e\xe6\x20\x61\x50\x41\xa7\x01\x77\x55\x26\x09\x99\xac\x56\x0b\x1a\x6b\xdf\x70\x05\x88\xc3\xf8\x04\x18\x61\x1e\x6e\xb1\x22\xb2\x29\x54\xc9\xe4\xc2\x79\x52\x39\x62\x42\x4f\x92\x1c\x05\x56\xa3\x8b\xb8\x95\x4b\x15\xc3\x5c\xd2\x6b\x34\x29\x69\x29\x51\x06\x56\x1d\x5a\xcf\xbd\xe1\xb9\x7b\xaf\x43\x87\x52\x69\xbd\x82\xd8\x3b\x9d\x4f\xee\x13\x24\x40\xf5\x13\x9b\x71\x3e\xb2\x01\xab\x2c\x68\x0b\x12\x79\xf9\xfb\x0f\x51\xf9\x83\x01\x30\xa5\x4a\xe9\x5c\x57\x5e\xe0\x50\x29\x1a\x23\x8a\x49\xe1\xf1\xe6\x4c\xa7\xc2\x61\x81\xe6\xb8\xdf\xab\x6e\xad\x70\x1c\xc6\x43\x96\xa3\xbd\x1a\xa7\x61\xaa\x0b\x6c\x91\xfa\x30\x8c\xa9\xa5\x10\x14\x09\xb4\xd4\x01\x2f\x77\x6e\x61\xd4\x1b\xf9\x51\x12\xb7\x15\x15\x62\x85\xad\x95\x6b\x86\xb2\xbc\x49\x77\x7c\x71\x63\x10\x6c\x40\x10\x84\xc4\x0f\xed\xae\x07\xd9\x68\x37\x26\x56\x02\x84\x60\x0e\xf5\xc7\x05\x23\x82\xa0\x96\xed\x33\x82\x61\x05\x7b\xb9\xc5\xa5\x69\x93\x39\x73\x01\x1b\x2a\xd5\xe2\x76\xb6\xd5\xc9\x95\x8e\x69\xfe\xeb\xfb\x65\x88\x3e\xa8\x97\xb7\x4b\xd6\x1a\x4b\x18\x85\x8d\xce\xbe\xbb\x60\x4e\x76\x91\xbe\x83\x75\x87\xe2\x25\x09\x3e\x49\xae\x81\x9e\x01\x86\x41\x0d\x8b\x77\x29\x0a\x2a\x78\x36\x9b\x1c\x18\x3d\xc4\x68\xa9\xdf\xca\xc0\x76\xb6\xf1\x0f\xb4\x67\xa3\x51\x73\x39\x0c\x20\x08\x70\x53\xee\xd5\xef\x36\xdd\x08\xed\x61\xc5\x4f\x58\x59\x0c\x42\xc0\x40\xb8\xe3\x8f\xfb\x4e\x81\x2c\x81\x5e\xc7\x1c\x5a\xdb\xa4\x55\x6d\x4e\xce\x6c\x36\x6c\x02\x37\x1c\xb6\xf5\xbb\xe5\x80\xee\x75\xdb\x4d\x7d\xf4\xe3\x09\x53\x8b\xd3\xb6\xd9\xce\xe8\x11\x68\xa9\x6b\x86\x85\x73\x74\x9a\x07\xdb\x12\x41\x13\x21\xd4\x0e\x42\x11\x8c\xeb\x6e\xab\x72\x9d\xa1\xcd\xdd\x1b\x94\x0c\x82\xb2\xbe\xed\x92\x8a\xc4\x0e\x4c\xa5\x80\x6e\xac\xf6\x49\x0b\xe5\x12\x81\x42\x57\xe3\xcd\x12\x3b\xcf\x4e\xd9\x58\xf5\xbc\xfa\xa0\x26\xbe\x38\x6a\xc4\x02\x42\x34\xd6\xd5\x41\x39\xa4\x88\x5a\xa2\xa0\x54\xb7\xbf\x7c\x16\x8f\x1c\x7e\xa5\xe3\x31\x79\x10\x5e\x22\x8f\x43\xb0\xd8\x98\xa8\x49\x15\xab\x71\xb5\x2b\x81\xa6\xb1\x18\x6f\xc3\xcb\xf4\x34\x07\x9f\x91\x59\x8b\x10\xd5\x83\x39\xc6\xd0\x57\xd3\xd3\x71\x6b\x55\x68\xca\x63\x41\xc9\x5f\xb9\xa2\x85\x7c\x61\x97\x60\x47\xe9\xe1\x51\x33\xd7\x7d\xf2\x3a\xc7\xce\x17\xaf\x57\xcb\xf1\xab\xf9\x6d\x14\x2f\x2e\x1c\x34\xde\xfd\x0e\xfa\x49\xa6\x5a\x25\x7c\x9f\xd5\x74\x15\x32\xe1\x5e\xc2\xa1\x41\x9f\x98\x98\x9e\xa6\xb4\xeb\xf8\x13\x0e\x5c\x9f\xe2\x26\xfc\xca\x36\x76\x06\x84\x7f\xcb\xce\x6c\x3d\x8d\xef\xa3\x76\xbe\x6f\x81\x1a\x6f\xf9\xdf\x54\x65\x6f\x36\x29\x63\x73\x49\x74\x84\x45\x51\x8e\xcb\x83\x7b\xcb\x3f\x01\xaa\x00\xe5\x82\x91\xbc\x24\x43\xd9\x87\x6d\xa9\x7b\xff\x63\xd8\x5b\xc7\x29\xb9\x8f\x6e\xb4\x2e\x25\xa5\x9d\x85\x4a\x9e\xf7\x4f\x79\x63\x93\x79\xf4\x59\x7e\x49\x33\xca\xd8\xf6\x63\x67\xc1\x4b\x5e\x3a\x7a\x78\x7c\x25\xb4\xf2\x42\xf1\x92\xbc\xac\x24\x81\xa6\x29\x78\x02\x93\x54\xbf\xa6\x51\xae\x45\xa1\x16\x59\xea\x52\xe5\xa2\xe5\x0f\xc6\xe2\xe7\xd2\x8c\xf9\x29\x05\xb8\x57\xa5\xc6\xba\x82\xb8\x32\xcc\xfe\x45\xa0\xef\x77\x60\xf6\xb9\x37\x7c\xd6\x30\x02\x1b\x02\xa3\x82\xb8\xdc\x63\xdc\xa2\x12\xb6\xb1\x85\x02\xb4\xbc\xd4\xcf\x67\xac\xbe\xf7\x93\x06\x6d\xdb\x64\x4c\xc0\xb8\x49\x4b\x81\xd5\xec\x46\xf2\x05\x9a\x2b\x82\x9c\x03\x45\x00\xde\xda\x2d\x4a\x5b\x05\xe3\xb0\xe2\x08\x2d\x97\x9b\x66\x6d\x15\x96\x03\x94\x06\x53\x5a\x62\xd4\x95\xa3\xf6\xc7\xa9\x92\xd7\x6d\x33\x29\x06\x9c\x6e\x12\xf4\xff\x55\x05\xd4\x1e\x1e\xa6\xa1\x2a\xcc\xfa\xd8\x02\x47\xfb\x85\xd1\xc2\xfd\x3e\x2a\x4e\x44\xcc\x86\x3b\x05\x2e\x4b\x87\x6e\x1f\x02\xf1\xa9\xcb\xde\x66\x04\x23\x9b\x93\xa2\xbf\x17\x32\x30\xa8\xc3\x8f\xba\x6c\xbf\x44\xb3\xb1\x77\x0c\x24\x90\x35\x66\x2d\x79\x8d\xcd\xb5\x25\xd4\xd5\x1d\xb4\xa0\x76\xa9\x66\x8c\x16\x60\x2b\xd0\x3f\xa7\x8d\x81\x01\x4c\xf6\xae\x35\xd3\x84\xe8\x02\xc7\x60\x9b\xe6\xfd\xb1\x56\x2d\x4c\xa5\xdc\x2e\x0c\x42\x71\x03\xb6\x8d\x7e\x9a\x77\x3b\xe3\xd9\xa1\x77\x2c\x85\x0a\x78\x04\xaa\x9b\x31\xd2\xc9\x9b\x89\x75\x28\x39\x1c\x75\x79\x09\x8a\x6d\x76\x53\xcc\x05\x88\x90\xe9\x30\x1d\xa0\xdd\x31\xec\x8d\x71\x7f\xe7\xcc\x1e\x46\x60\x01\x6f\xe6\x96\xa9\x11\x78\x4e\x8f\x24\x2a\xb6\x8a\xfb\xf3\x30\x6e\xe4\x5c\xce\x9a\x9c\x9f\x0b\x7a\xec\x83\x1b\x83\xe5\x45\xcc\x1f\xe5\x90\x4e\x6f\x83\x1a\x1e\x1a\x1a\x18\x86\xe0\x67\x57\xed\x54\xbe\x3c\xc4\x95\x69\x18\xb3\xf9\x0e\x0b\xeb\xb7\x49\x23\x34\x20\x52\x8e\x6c\x94\x71\x17\x77\x4a\x3a\xdf\x2b\x50\x37\xa7\x98\xc3\x65\xb5\x02\xb3\xe0\x90\x8c\x04\xd7\xc1\x6f\xf9\x54\xd7\x9a\x85\x65\xcc\xfe\x04\xdd\x49\xe7\x3d\xb5\x04\x2b\x02\x11\x69\xb2\x23\x2a\x04\x70\x8c\x36\x84\x03\xf5\x6e\x37\xf3\x9c\xa7\xc7\xc8\xf1\xd6\x96\x67\x72\x71\xfd\xa1\x6c\x71\x1c\x20\x4b\x1e\x45\xab\xa1\xd9\x92\xf3\x65\x4b\xfc\xaa\xec\x86\xf0\xc7\x4d\x7c\xb5\xaa\x08\x7c\x9f\x7c\x1d\x6e\xb5\xa7\x03\x49\x7d\xb7\x3f\x56\x7e\xcd\xab\x69\x79\xa6\xd3\x38\x7f\xd6\xd1\xb2\xe6\x5c\xc5\x34\xfd\xe5\xac\xca\x71\x65\x60\x1c\x7d\x64\x80\x19\xdb\xa0\xad\x94\x03\x9a\x66\x11\x9e\x4a\x6e\x47\x08\x33\x2f\xe7\x56\x59\x6d\x9a\x87\x9b\xaf\xba\xf5\x7f\x18\x3b\xf3\xc2\xe4\x90\xe1\x73\x1e\x08\xee\x27\x5a\xd1\x93\xc5\xed\x69\x33\x41\xe2\xd7\xf4\xbc\x29\xdb\xcb\x87\x93\x10\xe5\x4b\x3f\xb9\xea\xde\xfe\xc8\x0b\x0e\xd0\x92\x7b\xe8\xaa\xde\xc7\x29\xa0\x56\x32\xda\x61\x40\x25\xf6\xf5\x08\xfc\xed\x70\x6a\xe8\x4c\x04\x82\x0d\xc4\x61\xec\x44\x3f\x89\x5e\x02\x64\x8f\xd0\xa7\xda\x82\xa1\x77\xdf\x20\xf9\xc5\xc7\x72\x0c\x07\xee\x79\xa2\xf3\x81\x03\x9d\xdc\x39\xca\x82\x5f\xe7\x9d\x82\x68\x0a\x7d\xb0\x53\xe5\xeb\xf6\x32\x35\x53\xe8\xe3\x57\xd4\xaf\x23\xd4\xe2\x15\xf7\xa8\xd6\xc8\xbc\x74\xa2\xa6\xd1\x70\x33\x4d\xe1\xc6\xac\xb4\x6f\xab\x51\x64\x27\x85\x9f\x71\x2f\x11\x7b\x39\xae\xb9\x87\x65\x87\x2e\xb5\x6d\x59\x87\xab\xe3\x37\x41\xb7\xab\xba\x7e\x8f\x9e\x6d\x95\x2c\xb5\x82\x9f\xc9\xbc\x3b\x1b\x87\xd5\x52\xa2\x84\xcf\xc5\x7a\x0d\x3d\x2a\xdd\xa0\x5d\x34\xf6\x94\xe1\x31\x9d\xfb\x95\x6e\x96\xec\xc0\x57\x14\x54\xb8\x77\x66\x5d\xca\x2d\xd1\x75\x17\xca\xa7\x39\xa7\xa7\xc2\x99\x52\xcb\x14\x9a\x35\x40\x3d\x03\x46\x48\x2a\x11\x6c\x06\x62\xbc\xd0\x48\x10\x22\x59\x36\xe5\xcd\xb9\x4a\x9a\xc5\xaa\x4c\xd3\x39\xd3\xb4\x2a\xe9\x63\xf2\x14\x94\x86\x2d\x41\x7d\x67\xe2\x88\x87\x7f\xfb\xa0\x2f\xc3\x1d\x43\x9b\x9c\x14\x78\xfd\x91\xaf\x72\xa9\x45\x58\xeb\x0f\x14\x74\xbb\xc6\xa2\xac\xd3\x2b\x39\x62\xef\x7b\xf1\x66\xe5\x25\xef\x80\x05\xb6\x73\xee\x38\xa6\x60\x50\x1b\xc2\x53\x04\xb1\x2c\x57\xc6\x44\x15\x03\x65\x1e\xaa\x4c\xb3\x52\x58\x0f\x7b\xdf\xb0\x1b\x90\x63\x4a\x5f\xee\x21\x9e\x5f\xc9\xea\x75\xff\xa6\xf4\x34\xaa\xe2\xa3\xc2\x1f\x8e\x64\x38\xe0\x91\x58\xd7\x7f\x16\xa9\x2d\x68\x5d\x35\xd3\xf6\xb7\x54\xe1\x61\x69\x3a\xb9\xcd\x40\x09\x3e\x33\x72\x6b\x5a\xea\xfc\x31\xe3\x83\xe1\x43\x39\x08\x15\x14\xc2\xeb\x15\xf4\xa0\xf8\x15\x49\x65\x32\x98\x60\x4f\xe0\xf3\xdd\x94\x15\xf2\x7c\x0f\xc8\x9e\x21\x54\xc6\x4e\x2d\x96\x71\x76\x95\xbb\xc2\xdf\x22\xf2\xc1\x9f\x6a\x1f\x00\x55\x8b\x4a\x84\x5d\xf3\xbb\xab\xaa\x5f\x46\x60\x67\x69\x2c\x2b\x9a\xb5\x21\x31\x8f\xce\xab\x46\x21\x4b\x1f\x71\x62\xbd\xe0\x59\x98\x81\x64\xeb\xf2\xa7\xca\xab\x2e\xd7\x3e\x03\xe1\x62\x63\xb5\x92\x78\x95\xe7\x99\xac\xaf\x22\xa3\xe3\xfa\xdc\xbe\xba\x08\xc2\x6f\x2b\x06\xbe\x6f\x41\xa2\x12\x59\x03\xa1\x5f\x91\x10\x4a\x96\x86\x60\x04\xab\x83\xed\x8d\x50\x56\x07\x48\x42\x83\xbb\x19\x26\xac\x4e\x33\xf3\xa9\x54\x66\xaa\x58\x25\x21\xca\x45\xae\xe2\x95\xfb\x63\x6a\x1b\xd5\xfc\x07\x37\x2f\xbb\xe0\x04\x8e\x5f\x94\x58\x48\x70\x88\x59\xcb\xf7\x5d\x2e\xbd\x24\x91\xa9\x21\x4f\x71\x3c\x4b\xea\xc1\x59\x64\x9a\x67\x7f\xda\x43\x9a\x68\x9a\x73\x23\x4a\xcb\x0b\x08\xdb\xf2\x23\x7c\x4c\x52\x38\x29\x05\x7d\xf5\x42\x2e\xdb\xea\x13\x12\x01\xbb\x84\x0c\xd0\xdd\x6c\x1d\x57\x01\x7d\xd5\xf6\xe2\x74\x38\x30\xa6\x6f\x27\x90\xf8\x90\x84\x4a\x4d\x27\x36\x58\xff\xa6\x6d\x2d\x28\xb0\xce\x56\xca\xd2\x7c\x3b\x4e\xfe\x63\x8c\x4c\xd5\x9c\x9b\xf4\xa5\x2a\xa0\x54\x49\x4f\x97\x5b\x33\x42\x36\x16\x18\xa6\x24\x97\x22\x24\xf4\x68\xcb\xa5\x46\x86\x8a\xa5\x02\xb8\xc2\xf4\xcb\x55\xf9\xa6\x32\x03\x2a\x01\x06\x80\x97\xa3\x1d\x40\x50\xad\xe1\x13\x18\xb2\x69\xc8\x31\x85\x3f\x4e\xe2\x28\xd3\x58\x56\xb9\xe4\x94\x2b\x8d\xcc\x28\x5b\x70\x0e\xb8\x6b\xdb\xaa\xc7\xce\x02\xdf\x95\x52\x3b\xc8\x08\x3c\xec\xb2\x34\xae\x2e\x79\xbd\x41\xbe\x94\x95\xf6\x4b\xaa\xea\xaf\xb2\xd0\x34\x10\x8c\x8a\x9e\xd5\xba\xbd\x6b\xf5\x58\x49\x3a\x23\x26\xe3\x26\x47\x4b\x45\xf6\x2f\xfa\xc5\x31\xb4\x6c\x9b\x74\xfa\xc2\x6b\x00\x2a\x13\xce\x8c\xf0\x31\x96\xf3\x81\x4d\xe9\x90\xdf\xff\xe3\x90\xa5\x76\x5c\x6a\xac\xed\x45\x00\xac\x80\xaf\xf6\xb0\x7c\xab\x34\x7b\x5b\xca\xff\x5d\x95\x4f\x5e\xa3\x9c\xe0\x97\xfe\xd7\xf4\x95\x13\x68\x2d\xcb\x50\x9e\x93\xe2\xcc\x61\x9a\xeb\x81\xd9\x72\x00\x90\x21\xf4\xff\xfc\x12\x88\xd3\x44\x26\x86\x06\xfb\xdb\x02\x9e\xef\xb2\x73\x85\xea\xdc\xdf\xc6\x80\x9d\xa8\xf8\xf6\xe3\x32\x74\xe4\x70\x3a\x96\x01\x0e\xaa\x5a\xd4\x37\xd6\xf3\x23\x67\x82\x3b\xa2\xd2\xae\xed\x51\x52\xca\xac\x9f\x0e\xe7\xfd\xae\x80\x09\xf9\x5d\x9b\x43\xf7\x1a\x26\x02\xbf\xea\x07\xd2\x5d\xb8\x2a\x33\xd6\x04\x34\x7b\x49\x63\x21\x75\xe1\x1f\xbc\x05\xfe\x1b\x00\x55\xdc\x94\x28\x0f\x29\xf9\x64\x7b\xbb\x29\x86\x39\x93\x36\x10\x27\x39\xa1\xbb\x6b\xc5\x82\x5a\x69\x97\xc3\x3a\x74\x47\x59\x73\x3a\x6d\x44\xc3\x5e\x51\xb4\x64\xf0\x81\xd6\x5b\x7b\x73\xa4\x34\x56\xae\x5e\xc0\xa8\x49\x55\x6c\x4f\xf6\x14\x5d\xce\x43\x57\x4c\xb5\x97\xe6\xfa\xb1\x65\x3e\x11\x46\xef\x8a\x9b\xbc\x4a\x60\x17\xa6\xc0\x28\x16\x7a\xf9\xa4\xcb\x55\x91\x0f\x4d\x80\x10\x2b\x2d\x55\x3d\x53\x1a\xf5\x16\x63\x64\xe3\x6b\xb6\x30\x86\x82\x0a\x5b\x28\x13\x05\x5e\x4d\x78\xb4\xc3\xb1\x34\x3c\xa4\x26\x94\x4a\xf6\xbf\x9f\x42\x52\x5e\xd6\x8f\x31\x41\x5a\xac\xae\x02\x25\x84\xe9\x0a\xa9\x3e\x5c\x0c\xd0\x77\x5f\x14\x05\x7a\xb0\xb3\xa8\xaa\xfb\xac\xfa\x5b\x43\x49\xe7\x71\xd2\x2b\xca\xce\xab\x5a\xa5\x13\x6d\x6e\xeb\xd0\x41\xce\x7b\x37\xba\xb5\x97\x60\x62\x1d\x5e\xd4\x12\x0c\x9a\x25\xdd\x66\x72\x20\xf4\x1b\x74\x1d\xa2\xda\xc6\xa7\x45\xff\x8a\x3d\x48\xaf\x04\xc8\x2b\x28\x2e\x8f\x54\xe3\xdf\x9b\xed\x44\x2e\xed\xe8\x03\x56\x8c\x78\xea\x55\x62\xe5\xe5\x32\x7c\xf7\x9c\xf0\x4a\x29\x75\x87\x63\x56\xff\x04\x28\x33\x01\xf8\x89\xb6\xa9\x49\xe7\x2b\xba\x45\x50\x78\xd8\xea\x10\x89\x7c\x44\xd6\xee\x16\x9a\x70\xf2\xea\x89\x8e\xb4\x29\x03\x99\x9f\xe7\x1a\xd7\x38\x30\x12\x23\x59\x4f\x9c\x4b\x32\x55\xf1\xff\x83\x88\xb9\x43\xb4\xc9\x01\xbc\x71\x26\xd0\xc6\x06\x02\x0a\xc7\xe6\x83\x78\x9f\x9e\x4a\x8f\xe5\x9a\xd4\xaa\x1b\x2f\xcf\xeb\x92\xa7\xda\xd7\x8a\x0d\x74\xc5\x45\xa2\x9e\x9a\xfc\xb6\x04\x5a\x36\x0d\x20\x87\x39\x61\x39\x8d\xab\x78\x2c\xe0\xa5\xda\x87\xf0\xc2\x84\x67\x2d\x47\x9c\xe9\x0d\xfa\x45\x80\xfc\x3e\x1a\x49\x97\xc8\xce\x58\xd8\xe9\xa4\x53\x55\xde\x2a\x1b\xea\x62\x5d\x3d\x59\xac\xb3\x7c\xcd\xab\xf0\x21\xfe\x02\x65\x8a\xc5\x27\x1c\xed\x2d\x8a\x45\xac\xad\x92\x5e\x24\xd2\x6e\x62\xbe\x07\x9a\x4b\x71\x99\x36\x49\xcc\x2e\x1d\xc2\x84\xc8\x03\x6a\x03\x41\x3d\x54\xc1\x75\x0d\xdb\x8a\x93\xb3\xda\xb5\x66\xc3\x24\xc4\x3a\xd1\xc5\x21\x51\xd2\x16\x75\x5c\x79\x25\xdd\x9c\xf6\xa2\xab\x1b\x34\x82\xe0\x0d\xfb\x0e\x00\x33\x6e\x44\x20\x27\x16\x95\x60\x83\xb2\x7a\xb4\xe1\x8e\xfc\x57\x8d\x15\x4c\x0e\x2a\xd7\x20\xba\xd3\xd8\x0f\xe5\xec\x3f\x6d\x25\x40\x2d\xcb\x52\xe4\xeb\xdf\xf0\x2c\xfc\x6c\x6a\xde\xcd\x2e\x6c\x0a\x31\xc7\x1d\xba\xcf\xea\x10\x40\xd0\xc9\x61\x9d\xe2\xe9\xba\x5c\xf7\x92\x1c\x9e\x3c\x6b\xf6\x3c\x50\x20\x1a\x94\x27\xfe\xa4\xec\xf3\xc3\x68\xc6\xda\x43\x18\xaa\x0e\x2d\xec\x70\x2f\xef\xa8\x93\x17\x02\xf2\x95\xe8\xb4\x5a\x07\x08\x8c\x44\xdc\x8c\x73\x23\xc1\x9f\x4d\x1a\x34\x14\xab\x52\x96\x2c\xd4\xb3\x96\xe3\x34\x46\x1f\xc0\x0a\x40\xb7\xaf\x7e\x2b\xd7\x83\xca\x32\x70\x17\xa7\xa9\x26\xce\x5b\x72\x5b\xb0\x7c\xdc\x4b\xe0\x21\x89\x7d\xaa\xc8\x23\xb0\x81\x83\xaf\x50\x8a\x2d\xb1\x99\xf3\xba\x8f\x52\x5c\x87\x22\xd1\xee\xba\x90\xb4\xbf\xca\xb2\x02\x6e\xb7\x5c\x93\xe7\x51\x01\x47\x86\x15\xdd\xfe\x57\x15\xac\xa3\x13\x7b\xf8\x28\x41\x73\xf6\x4b\x9e\xcc\xba\xa7\x58\x91\x0e\x8e\xbf\x00\x1e\xaa\x93\x1c\x89\xd9\xae\xe5\x63\x34\xf3\xd0\xf0\x4d\xc7\x89\xdb\x10\xfd\xc3\xe5\x32\x1a\xe8\x59\xe9\xf7\xcb\x5f\xf2\x3c\xfe\xad\x92\xbe\x78\xf5\x24\xca\x24\xfb\xbf\xed\x03\xfd\x38\x9f\x14\xdb\x8e\x06\xb1\xd9\x5b\x81\x6f\xf6\xfe\x9a\x65\x56\x32\x31\x1f\x28\x19\xee\x6a\xc1\x02\x60\xb5\x6d\xa8\x60\x69\x1b\xa3\x93\x60\xcc\x6b\x2d\x90\xc0\x90\x3e\x7b\x31\x51\x7e\xe4\xd2\x86\x41\xef\x65\xd1\x66\x9f\x26\x61\x35\x2b\x2d\x8c\x77\xfc\x14\xad\xb9\x2c\x69\x72\xef\x51\xd6\x48\x9f\x83\x98\xe3\x41\xc0\xb3\x5f\x42\xdf\x1b\xdf\x40\x49\xad\x79\xe1\x5a\x72\x6f\x13\x2f\xd8\xd4\x6d\x4a\xe8\x29\xd8\x97\x1f\x65\xa5\x41\xe5\x68\x3e\x35\x62\x84\xa6\x7d\x30\xb8\xb7\x14\x76\xa1\xa2\x71\x56\x50\x25\xd0\xda\xbb\xb9\xae\xf0\x32\x77\x3e\xf9\xe0\x1f\x79\xdf\x2e\x2b\xb9\x03\x4d\xfe\x6a\x0e\x36\x2a\xbc\x86\xa3\x31\xd7\xec\x5d\xf2\xc7\x62\x16\xdf\x78\xd6\x18\xa8\xbd\x7f\x09\x23\x02\x2e\x56\x01\xd8\x04\x37\x8d\x71\x66\xa5\xb7\x30\x13\x5d\x7a\x99\xf3\x1d\x58\x8f\xce\xc5\x7a\xe7\x91\x6a\x97\xe6\x5d\x87\xc0\xb9\x55\x53\xa7\xed\xf8\x04\x1e\xd4\x52\x7d\x71\xfa\x34\xe7\xfe\xc5\xd9\xe8\x2f\x5a\x19\xc8\x1c\x96\x61\xe2\x9d\x87\x1f\xbe\x39\x19\xbf\xfd\x4f\x52\x60\xba\x6e\xcb\x39\x93\xad\x25\x91\xb6\x58\x58\x3a\xd9\xf0\xa8\x79\x8d\x9e\xef\x68\x6e\x22\x3d\x5b\x74\xe8\x94\x77\x07\x2c\xd0\xcd\x79\xf9\x1e\x0b\x5b\x49\x99\x5f\xd0\xa0\x2d\xe5\x43\x09\x99\x8e\x5f\x10\x03\x24\xe5\xec\x10\xf5\xba\xfb\xa8\x28\x19\xfc\x13\xe9\x38\xbb\xde\x54\x35\xb7\x04\x4b\xc9\xb5\x1f\x01\xaa\xe2\xd4\xd1\x24\x92\x08\x9f\x49\x8e\x95\x58\xdd\x1f\x88\xaa\xbb\x68\x10\xe5\x25\x96\x72\x4a\xf6\xee\xe3\x51\x91\x82\x6f\x00\xaa\x38\x96\x07\xfc\xf4\x6a\x1b\x81\x6d\x12\xb8\x8a\xb2\xbd\x85\xf4\x7b\x63\x50\xc6\x53\x8c\x0a\x85\x42\x04\xbd\xf2\x1c\xd4\xd0\x97\xae\x54\xbe\x3f\xbd\xdc\x59\x9d\x15\xce\x65\xad\xbc\x8d\x13\x32\xf5\x50\xcb\x94\x49\x51\x17\x63\xbe\xd0\x43\x22\x56\x49\x27\x8e\x9c\x77\xb5\x73\x8e\xcc\x91\xe3\x31\x5e\xfd\x14\xb0\xe5\x66\xad\xdc\x0c\x39\xe1\xde\xa1\x43\x44\x62\x85\x7d\xdc\x89\xc1\x5f\x2d\x0e\x82\x7f\xcc\xb2\x5d\x3f\x0c\x0f\x8e\xc3\x62\xb4\xac\x1a\x36\x4d\x30\x29\x00\x66\xb0\x7e\xf8\xae\xf7\xb7\x0a\x0e\x8b\xb6\x77\x3d\x53\xf7\xe7\x68\x11\x57\x68\x67\x66\xdf\xea\x94\x1a\x48\x9a\xe6\x4a\xb2\x19\x35\x43\xda\xfb\x5e\xc2\x8b\xed\x00\x8d\x68\xbe\xe4\x9f\x18\xb2\x6d\x62\x90\xd3\x6c\xc4\x0f\x2c\xc4\x2f\x54\xdb\xf6\x57\xb4\x80\xfe\x19\xce\x84\xb1\x30\x97\xab\xd1\x63\x4b\xb4\x69\x60\xf6\x9e\x89\x2e\xb0\xed\xbb\x99\x77\x7d\x38\x8a\x97\x53\xeb\x99\x2b\x71\xc6\x43\xe8\x1b\xfb\x43\x5d\x28\x47\xcd\x1a\x64\x3b\x12\x58\x3b\xfe\x7a\x60\x3d\xcb\x75\xf0\x6b\x15\x1d\x11\x16\xae\x16\x52\x9c\x7b\x6c\xd0\x63\x2d\xf0\x70\xb9\x41\xdf\xc7\x23\xc5\x72\xb0\x3e\xad\x3a\x95\x30\x69\xe4\x39\xd6\x07\x56\x75\x25\x52\x47\x87\x6c\x59\x4b\xab\x82\xeb\xb4\x6b\xb3\xf3\x30\xe3\xf5\x59\x56\x34\xcb\xc0\x92\x96\x13\x10\x7e\xf4\x51\x74\x06\x98\x71\xae\xbd\x89\x12\xd3\xf0\x65\xeb\x00\x1a\x21\xff\x75\x88\x4f\x04\xa7\xe2\xcd\x13\x41\x74\x36\xcb\x93\xaa\x1a\x9c\x1c\x10\x82\x9a\x0c\xde\xa0\xcf\xaf\x07\x59\x49\xae\x81\x56\x3b\xf5\x4e\x30\x1a\xaa\x06\xf7\xf9\xc2\xd3\x65\xdd\xd4\x1e\xd4\x04\x71\x74\x8f\x31\x0f\x1b\x5c\x4c\x4e\x0b\x27\xc3\xcf\x9f\xe5\x92\xc0\xa7\xf8\x93\x87\x65\xb1\xe5\x59\x1f\x2b\x1c\x52\x63\x7b\xd1\x94\x45\x39\x68\xc5\xfc\x51\xef\x89\x64\x93\x41\xc6\x99\x30\xe5\x65\xcb\x89\xf0\x16\x35\x9f\x00\xa2\x97\x45\x22\xef\x7e\xe1\x5f\x4b\xa5\x7a\x36\xa1\x44\x89\xe5\x4d\xcd\x2a\x77\xaa\x1a\x1c\xac\x37\x76\xa7\x96\x60\x2c\x04\xef\x15\x4b\xdd\x8c\xee\x5b\xe9\x10\xe3\x11\x5e\xce\x32\xb2\xcc\xd9\xc3\x52\x97\xea\x13\x13\x4f\x78\x04\x86\x86\xdb\xbd\x5c\xcf\x54\x2f\x91\xea\x29\xe6\x10\xfd\x66\x9a\xd2\x08\xa1\xd0\xb7\x6d\xfb\x35\x0c\x05\xd0\x08\xd7\x56\x68\xcc\xd6\x23\x90\xa7\xde\x8a\x14\x07\x9b\xc4\xf5\x7d\x3f\xb0\xff\xff\x78\x2c\x99\x27\x20\x1a\x1a\x46\xfd\xa6\x7b\x30\xa1\x8d\xd1\x4b\x9a\x88\x52\x93\x57\x75\x17\x30\x2c\xec\xf4\xff\x49\x9b\xf9\xd0\x59\x83\x8f\x36\x33\x25\x51\xb3\x69\x0b\xd3\xfb\x96\x2e\x17\x37\xb7\xa9\x30\xc5\xaf\x78\xdf\x12\xe1\x42\xef\x00\x6a\x7f\x31\xfd\x6c\xcd\x6f\x74\x3f\x27\x87\x83\x68\x6b\x3e\x51\x39\xec\x64\x7a\x8f\x1a\xaa\x3f\x82\x46\x6e\x2c\x4c\x43\xe8\xb2\xc3\x1f\x95\x7b\x7d\xc2\x69\x4b\x35\x17\xe4\xb9\x51\x01\x93\x90\xbb\x72\x78\x09\xb0\x9e\x0a\xa5\xe9\x2c\xdf\xa6\xe8\x0e\x93\x9c\x7e\x21\x8e\xf4\x70\xd2\x5e\xb2\xd7\xc3\xc3\x83\xd4\xbd\x82\x47\x52\x96\xe8\x87\x59\x6f\x1a\x83\x5d\xc9\x2b\xd5\xa2\xcb\xa5\x96\xa1\xc3\xcd\x1e\x1a\xc1\x29\xf1\x0b\x79\x97\x8f\x4c\xe7\x99\xa4\x36\x65\xa5\x68\xdb\x4f\x24\xd2\x15\x3c\x7b\xae\x8d\x6c\x41\xce\xe5\x7b\xe4\x5b\x0e\x1b\x15\x3d\x55\xe1\x1a\xa9\xae\x35\x2f\x8e\xa8\x73\x0f\x93\x26\x34\x33\xc0\x9f\xe1\xae\x3f\xaa\x0c\x23\x52\x22\x29\xb3\xf6\x9f\x60\x91\x47\x30\xa5\x41\x85\x4f\x9a\x7a\xce\x21\x00\x86\x0e\xd6\xf2\x90\x7e\x0c\xc2\x75\xdc\xe2\x38\x51\x5f\x6a\x4d\xd2\xde\x54\x33\x73\x27\xf2\x4c\x8b\xe4\x5e\x23\x1a\x56\x84\xa5\x0c\x15\x30\x12\x56\x5f\x7b\x15\x58\xea\x7b\x9a\x72\xb3\x30\x62\x94\x7d\x4b\x72\x83\x1b\x50\xc7\x02\x15\xf0\xaf\x29\x40\x77\x86\x05\xab\x37\x55\x39\x04\xce\x2f\x8d\x7e\x54\x32\x00\x84\x5e\xc0\xcc\xcd\x56\x38\xb8\x30\xf0\x7d\x00\x40\xc3\xaa\x62\xef\xcf\x4e\xd6\x20\x99\x72\xc7\x26\x47\x6c\xd1\x38\xc7\xb9\x8a\xaa\x63\x8a\x02\xe8\x08\xac\xc6\x0a\x8e\x87\xbd\x41\xca\xd7\x23\x1d\xfa\x21\x1f\x6a\x61\x7b\xa2\x70\xdd\x17\xec\xf9\xbb\xee\xe1\x13\xa2\x27\xb5\x86\xa0\x37\x32\x65\x7c\x00\xa2\x52\x6c\xe3\x63\x74\x07\xa9\x7c\x6b\xd9\x96\xd3\x4b\xff\xde\xd5\x84\xba\x7d\x35\xc9\x74\x32\xc0\x5e\xd1\x34\xb1\xab\xd3\x5f\x96\xbd\x92\xcc\xb9\x47\x82\x19\xe8\xc1\x0f\xb4\x48\x74\x7d\x93\xc2\x19\x3a\x14\x5b\xb6\x35\x70\x5b\x75\xa8\x27\xb8\x3a\xdf\x28\xdf\x76\x23\x62\xc5\x17\x54\x65\x58\xf0\x59\x4f\x5e\x12\x3f\xa7\x80\xcf\x6a\x0b\x5f\x42\x52\xc1\x33\x93\xc4\x82\xeb\x95\x9f\xb3\x0a\xf4\x8e\xf3\x5a\x8d\x61\x87\x88\xbe\x97\x5e\xb7\x43\xb5\xa1\xb9\x83\x8f\x94\xc7\xe4\xce\xf8\xc3\x5a\x3b\x40\x0f\xfd\xb2\x92\xb0\xa8\x4d\x39\x55\x56\xf7\xbc\x4e\x8b\x79\x9b\x1f\x46\x38\xdf\xae\x79\xaf\x86\x02\x2b\x22\xe4\x43\x30\xbf\x07\x7d\x77\xf8\x37\x58\x01\x2e\xd4\x5a\x7b\xe0\x56\x07\x87\xec\xae\x6a\x80\x77\x15\x07\xd4\x6d\x31\xef\xef\x72\x4c\xa1\xbb\xcb\x16\x40\x98\xaa\xea\xbd\xa5\xa6\x18\xf9\x23\x5d\xfa\x20\x7a\xb3\x33\xf5\x51\x65\x53\x8e\x7f\x9b\xa8\x6c\x67\x28\x91\xb5\x61\xfe\x2c\xb8\x8a\x56\xaf\xd0\x4e\xb3\xd6\x69\x44\xe5\x8a\xb4\x8a\x10\xec\x8c\xcd\x00\x6e\x67\x09\x6d\xd6\x63\xed\x73\xf5\x80\xc0\x1e\xae\x2d\x7d\x47\x93\x1e\x2b\xf9\x15\x47\xad\xb7\xff\xd9\x41\xf2\x5f\xd6\xf9\x3b\xeb\xf3\x6a\x36\xf5\xa2\x81\x46\x51\xd0\x6c\xe4\x18\x51\x82\xb5\x41\x8a\x30\xfd\x53\xf5\xb0\x45\xad\x11\x23\x48\x75\xbc\xb4\xdc\x77\x8b\x8e\x04\xfc\x4f\x8f\x36\x54\x13\xbe\x51\xf7\xee\xa3\x1b\x87\x8d\x72\x86\xdd\x48\x5b\x1f\xb2\xf3\x8a\xf4\x94\x87\xcc\xb6\x46\x63\x16\x8a\x1a\x53\x35\x4b\xb1\xd2\xaa\x52\xef\xf7\xa5\x48\xf9\x1e\x3e\x7f\x36\x23\x06\x27\xdb\x97\xc2\xa7\x92\x2f\xfa\x42\x45\xf7\x69\x5c\x67\x8c\xe8\xd7\x28\x2c\x52\x07\xbd\x92\xfb\x8d\x68\xe1\x0a\x5e\xd7\x6d\x10\xdc\x8f\x37\x0e\x00\xa9\xc1\x94\x02\x78\xc1\xc4\xf4\xc6\xa0\x1e\xf6\x97\xcb\x1e\x7d\xb2\xc3\x73\x0e\x68\xe6\xf5\x6e\x61\x45\xba\xae\x66\x96\x13\x7c\x2a\x22\x13\xdc\xb8\x0b\x50\x5c\x6a\x6b\xfa\x5a\x96\x58\x3e\xee\xe9\x72\x01\x74\x84\x07\x7f\x5c\xa3\xb7\x72\x9a\x95\x0f\x2c\x14\x62\xa1\xd8\xef\x22\x63\xcd\x55\x9b\xa7\x5c\x17\x81\x62\xb4\x4f\x37\x1f\x5e\xc2\xf5\x69\xb9\xb8\xf4\xec\x65\xc4\x01\xb9\x57\x64\xd7\x8e\x71\xa2\x7b\x5c\xe3\xf4\xa2\xcd\x95\x06\x1b\xa3\x85\x65\xab\x5a\x28\x00\x87\xeb\x14\x12\x7c\x67\xe3\x85\x2b\x25\x50\x26\x93\x08\xbf\x6b\x34\xea\xfd\xa4\x2d\xc7\x28\x09\x36\xfe\xb1\xba\x57\x4d\xcd\xaa\x83\x89\x39\x05\xfb\x7d\x31\xe8\x10\x23\xbe\x54\x11\xf2\x29\x92\x6e\x90\x16\x98\x43\xeb\x7d\x6f\xc9\x54\xe2\xd4\x59\x92\x80\x40\xc4\xf0\xd8\xde\x07\x2b\xa3\xac\x12\xc1\x58\xa7\x6b\xd8\xe0\xf0\x3f\xee\x81\x34\x0f\x4d\xbe\x95\x3a\x7e\x21\xe7\xb1\x72\xe0\xac\xd7\xbb\x82\x72\x79\x4f\x31\xb4\xc7\xc4\x3a\x96\xb0\x38\xe1\xcc\x64\x84\x02\x87\xe7\x23\xf1\x35\x2c\x29\xec\xba\x31\xf1\xb2\x1d\xc3\x26\xd5\x4e\xd2\x49\xb9\xca\x96\xff\xb8\x1a\xe5\x73\xee\x02\x04\x3f\xd7\x5e\x70\xa9\x28\xdc\xaf\x4d\x55\x43\x8b\xde\xcd\x29\x7a\x61\x6d\xed\x8a\x39\xb5\xac\x4e\xf3\x59\x80\x9d\xe9\x7e\xc5\xfa\x49\x0c\xf1\x69\x0c\x26\x5e\x25\x9e\x4e\x69\xd5\x40\x45\xdd\xaa\xe7\xf8\x4a\x42\xb1\x1b\x5f\x23\x21\x42\xe9\xce\xc1\xc6\x77\xab\xd0\x6d\x40\x1f\x6d\x40\x60\xf4\x3a\x62\x5d\x8c\x52\xa6\x4e\x28\xf5\x51\xdd\xa6\xc1\xa6\x11\x32\x8b\x70\x8d\x60\x43\x63\xcb\xef\xab\x58\x48\x1d\xc4\x56\xf4\x3b\x9c\x68\xd7\xaa\xa0\x7c\x1e\x0e\x3a\x0f\x42\x50\x05\x2c\xa5\xf6\x48\xd8\x23\x62\x38\x13\xe8\xb6\x47\xf8\x1d\x82\xb2\x63\x04\xff\x23\x9d\x8c\x1a\x0f\x8c\x17\x38\xc4\x61\x6b\xdf\xee\x30\x1e\x80\x8f\x25\xd8\xe8\x4c\x07\xb6\x68\x72\x26\x8c\xb5\x2b\x0d\x9f\xfe\x16\xc4\xf3\x83\xfd\xa9\x59\xf8\xb6\x25\x0b\x97\x9c\x93\x8d\x24\xcb\x17\xbd\x47\x87\x55\xba\xa7\x25\x03\x1c\x5b\x4b\x0b\xbb\xe6\xb8\x71\x34\x9e\x14\xa5\xc1\x13\x8b\x81\x37\xdc\x23\x3d\xa5\x21\xd5\x47\x83\xee\x35\xc4\xbd\xe1\x76\x1f\xca\x93\x5d\x4f\xa5\x6c\xaf\xc1\x0c\x5f\xfc\x9b\x08\xc9\x30\x2d\xf6\xd9\x58\xea\x63\xb2\x09\xcf\x70\x02\x6d\x31\xf4\x14\x66\xb8\x75\x6b\x41\x06\x7d\x5e\x95\x53\xed\xe3\xa6\x53\x2b\xf7\x26\x61\x2f\xbf\xe8\x41\x38\xe7\x2c\xc1\xbf\xfa\x88\xe3\x81\x17\x03\x72\xf3\xb1\xe9\x33\x89\xee\x6c\x23\x16\xe3\xd0\xdf\xdd\x63\x2b\x6f\x25\x6b\xb5\xe5\x0a\xdd\x85\x4a\xeb\xe9\xf1\x78\x08\x3d\x94\xb3\x45\xfe\xec\x7b\x93\x05\x7a\x37\x16\x13\xb4\x46\x75\xd9\x44\x09\x6d\xc6\x0b\xe1\x80\x09\x8f\x58\xb5\xf6\xbf\x54\x9b\x6d\xb7\x00\x1f\x99\xda\x8a\xf1\x89\x51\x64\xd4\xb9\xf6\x30\x4c\xa6\xc2\x97\x3c\xcd\xe1\xa1\xfd\x9b\x7c\x8c\x7d\xb2\x95\x68\x3f\x89\xc1\xac\x5c\xa7\xb3\xfb\xcf\x14\x63\x14\xda\x3d\xa8\x87\xec\x10\xf1\xf5\xcb\x3d\xf8\x2e\x73\x25\xb6\x9a\x8d\x33\x17\x8a\x7e\x1d\x55\x64\xe5\xad\x36\xe3\xf1\xd2\x79\x24\x73\x62\x1c\x64\x8a\x7b\xc0\xc7\xe0\xc6\x9c\xeb\x23\xdf\x31\x31\x0b\xa1\x57\xd5\x24\xa6\x2d\x25\x15\x1e\x70\xd1\x51\x97\x8c\x78\x22\x2f\xfb\xad\x91\x72\x00\xa5\xc9\x3f\x3e\xd3\xa2\x1f\x75\xc8\x4c\x4a\x14\xee\xa3\x5f\x77\x9b\x3f\x33\x42\x61\xb5\xb3\x0e\xbb\xca\x16\x9d\x63\xc9\x06\x66\x7b\x3e\x21\x0e\x60\x79\xba\x76\xcc\xab\xd8\x28\x7b\x08\x54\xed\x93\xcf\x43\xd3\xc3\x55\xd3\x8c\x55\x07\x90\x23\x1b\x75\xc6\xca\xde\xd7\x3c\x1e\xc7\xa5\x88\x4f\x13\x8a\x7c\x03\x4b\x78\x74\x99\xd2\x42\x7c\xe7\x28\xff\x72\x62\x3c\xba\x68\x93\xed\xba\x4d\x82\x50\x22\x87\xe4\x4d\xd3\x84\xb9\x4f\x08\x6f\x40\x4f\x7d\x33\xe4\x73\x4a\x7f\x2f\x0e\xe6\x45\xe7\xe4\xb7\xfa\x5b\x7f\x72\x79\xbe\x28\x5d\xb8\xb1\x08\x12\x16\x14\x61\x91\x0a\x95\x06\x8a\x5b\x91\xf6\x6a\x7e\x9d\x62\xf3\x93\x53\xee\xb9\x4c\xde\x03\x3f\x9a\x5d\x0f\x4d\x95\x22\x5d\x5e\xdf\x8f\x55\x67\xbb\x5a\x08\xa9\x37\x76\xbf\x07\xc1\xfc\x72\x40\xe8\x6d\xe5\x65\x9d\xd4\xf2\x2d\x1d\x9f\x41\x4a\x56\x40\x09\xa8\x20\x3a\x43\x15\xca\x8a\x7c\xb5\xc2\xab\x9e\x39\xbc\x93\x07\xf5\xce\xca\x99\x62\x4c\x01\xd0\x76\x26\x43\x2c\x71\x66\x01\x88\xc5\xab\xcf\x56\x2a\x3a\xa7\x96\x46\x3a\x7e\x9a\xd5\x95\xdb\x21\x2c\x55\xe0\x1c\xa6\x24\x0f\x62\xb3\xbc\x59\xca\x35\xa3\x48\x5f\xd6\xcb\x38\x24\x01\x0c\xb6\x6b\x42\x54\xaa\x8b\xf9\xa3\x75\xe0\x07\xf2\x07\x3a\x0b\x96\xe1\x8d\x41\x80\xf3\xa2\x29\x7d\x34\x8d\x40\x59\x83\x2d\x04\xa1\xe6\xb0\xba\xfc\xc9\xfb\x10\xc8\x7f\x16\x2d\x16\xe8\xc0\x6b\xd8\x4e\xf2\x80\x4a\x7b\x54\x5a\x8e\xc2\x23\x53\x4f\xb4\xa3\xc2\xba\x65\xcd\x47\x08\x02\x22\x23\x48\x95\xd4\x55\x4d\xcb\xc7\x40\xbe\x6e\xb6\x2c\xb6\x0c\xe8\x45\x3c\x46\x7b\x21\x78\x03\xae\xd0\x7c\xd4\x96\x77\x8e\xf0\xad\x3e\xda\x67\xdd\xad\xc7\x7d\xa6\x12\x31\x15\x83\x1e\x80\x02\x48\x10\x04\x9c\xae\x89\xf3\xd2\x47\x06\x30\x50\x07\x64\xd9\xc3\xab\x4c\x2e\x65\x0a\x19\x99\x43\x52\xf3\x6c\x9a\x67\xd7\x83\x3c\xdf\x1e\x89\x7a\x2e\xf7\xbe\x37\x6f\xeb\xd3\x87\x72\x23\x35\x7d\x62\x08\xa4\x8f\x61\xae\x33\xcc\x7e\xf9\xcd\x9e\x29\x75\xc0\xf8\x06\xeb\x75\xf9\x82\x44\xf5\x32\xdf\x17\x1d\xbf\x3b\xeb\xb8\x8e\x6a\x55\xe6\x17\x40\xdd\xdc\x1b\xbd\x14\x75\x40\x6f\x4f\x01\x81\x18\x4b\x30\x73\xdf\x8c\xfe\x33\x31\x73\x90\x00\x58\xb6\xb5\xcc\xa5\x23\x59\x0e\xd2\x2c\xd8\xed\xbd\x0e\x8c\x88\x86\xd0\xad\xaf\x7e\x30\xd0\x82\xcd\x4f\xfd\xc4\x1b\xcb\x43\xcf\x28\x5a\x02\xdf\x19\x61\x00\x2b\xd3\xe6\xa5\x2e\x51\xf4\xf2\x72\x4c\xaf\x28\x70\x78\xf3\x71\xb9\x54\x08\x05\xb9\x8f\x7a\x73\xc4\x94\x39\xcd\xc1\x61\x2e\x72\x1d\x1d\x6c\x46\x03\x29\x36\x48\x5d\x3c\xfb\x7f\xee\x70\x9d\x23\x5c\xb4\x09\x66\x1a\x3a\xe1\x18\xbd\x5b\x06\x1d\x96\x89\x2e\xfe\x72\xb8\x88\x80\xd9\x7f\xe8\x16\x97\x01\xdb\x82\xa0\x3b\x1b\x59\x56\x0e\x45\x08\xe0\xd8\x13\x07\x79\xf9\xa0\xc2\xa6\xa1\x74\x40\x2c\x78\xbb\xc1\xc9\x80\xab\xe5\x27\xeb\x56\xc3\x27\xfa\x5e\xe9\xbc\x6d\x4c\x29\x7b\x8b\xe5\x45\xa3\xe9\xb0\x67\x07\x18\x2e\x85\xbe\x7a\x0e\x75\xd5\xf2\xe3\x26\x44\xe6\x89\x26\x33\x67\x2c\xfa\x82\x91\xbd\xff\x6b\xde\x8b\x67\x7f\x92\x8d\x06\x34\xad\x51\xf1\x89\xa4\x7a\xb2\x46\xda\x68\x1d\x16\x80\x2d\x24\xb8\x34\x1d\xce\xd3\xc2\x0a\x3e\xc6\x59\x2c\x42\xc5\x5a\x1b\xb9\x7d\xc2\xac\xe7\xc6\x76\xe2\x45\x53\x00\xfe\x78\x32\xee\x1d\x0a\x79\xab\xd3\x3c\xe0\x30\x21\xe0\xa5\x32\xd5\x84\x26\x34\xfd\x0c\xc7\x48\xe2\x51\x09\xff\x86\xb3\x7f\x0c\xe4\xef\x66\xd5\x0b\x9f\xc6\x3a\x73\xb7\xb1\x02\xdb\xb6\xe6\x6c\xc3\x08\x18\x38\x1d\xad\xa6\x90\x2f\xa8\xe6\xf2\x47\x12\x78\xc2\xb4\x94\x7d\x39\x55\xbb\x2e\xa5\x36\x90\x9c\x81\x8e\x46\x89\x36\xc7\x91\x0b\x06\xb2\x64\x9f\xbe\x2d\xa7\xc5\xab\x48\x61\x45\x8c\x5e\x89\x94\x19\x80\x42\x82\x07\x2c\x4f\xe7\xf0\xda\x2f\x0b\x69\x74\xef\x3e\xe9\x37\xc8\xcf\x51\x8f\x64\xcb\xbf\x96\x8d\xca\x6c\x66\x5b\x38\x2a\x60\x6a\x98\x5d\x65\xb7\x00\x01\xaf\x08\xce\xe5\x57\x93\xf2\xa6\x53\x79\xb5\x28\x0c\x8c\xc3\xfc\x74\xb7\x25\xe3\x1c\xde\x57\x98\x27\x4b\x0e\x26\xa4\x5d\x60\x5b\x7c\xd9\x61\x82\x5e\x35\xab\x23\x89\x53\xd8\xad\x1a\xc1\xc8\xd3\x97\x39\xbe\x67\x52\x9e\x46\x29\xf2\xe1\x40\xb6\x78\xcb\x18\x20\xb6\xd5\x73\xf3\x92\x10\xa5\x4a\x79\xf3\x57\x8f\x03\xb7\x1a\x90\x21\x93\xd3\x89\x8b\x0c\x65\xea\x09\xbe\x89\x1a\x54\xe7\x6d\x2d\xad\x16\xf3\xe8\x97\xbb\xcf\x52\x88\x5d\xf1\x63\x88\x35\xeb\x95\xa5\xca\x56\xcd\xe7\x11\x35\xd1\x7c\xb6\x83\xca\xd0\xd3\xbb\x77\xa6\x06\xab\x5b\x98\x18\x39\x08\x89\x35\xc3\x04\xe5\x46\x14\xdf\xe1\xa3\xf5\xe8\xc3\xc5\x3c\xa8\x71\x17\x74\xaf\xde\x4f\x8f\xaa\x18\x33\x85\x8f\x28\xef\x8d\x44\x8d\xc5\x58\xeb\x6d\xab\xb4\xb1\x62\xfb\xa7\x1c\xc8\x3e\x9c\x73\xf1\x20\x3e\x5b\x0d\xc0\x38\x8b\xbc\x99\x66\x8d\x47\x9e\x06\x52\x3f\x93\xb8\xe8\x3e\xb8\xab\x60\xc1\xcf\xb9\x31\x2c\x7c\x35\x3c\x7f\xce\xc9\x73\xf1\x1f\xb9\x90\x61\x2e\x16\x86\x0d\x3f\x08\x06\x77\x09\x9f\xfe\x44\x3f\x26\x20\x20\x37\xfb\x8e\x86\xb0\xc4\xf2\x8d\x5e\x1a\x69\xcb\x5c\x19\xdc\xf7\xb0\x9a\xfe\xc2\xd6\x52\x94\x7b\x5e\x0f\x64\xe5\x21\x67\x3a\xcc\xf3\x2c\x0d\x9b\xfb\xe1\x7a\xb5\xec\x00\x16\xa5\xd4\x98\xe0\x2f\x3e\x3a\x22\xb6\x59\xa6\xdc\x58\x9c\x72\xc6\xc6\x02\xf3\x12\x53\x2c\xa1\x7c\x3f\xdc\x47\x41\x58\x57\x21\x4b\x15\xd0\x3b\xe0\xfd\x3a\x99\x43\xba\x46\xad\xd7\xa3\x9a\xf3\xf6\xcb\xcb\x1a\x06\x9a\xe0\x05\xbf\x25\x48\x26\xd8\xa9\xa6\x0c\x77\x25\xef\x13\xcb\xa5\xdf\x4d\x64\xc1\x75\x79\xd1\x5a\xca\xbd\x20\x5a\x97\xe3\x37\x2a\x6b\xe1\x11\xa3\x91\xa2\x47\x5c\x39\x75\xe3\x3c\x56\xa7\xa6\x0e\xe2\x83\x6e\x85\x74\x63\xaa\xe8\xc4\x35\x57\xd4\xe2\x37\x03\x9c\x4a\x39\x5c\x60\xb0\xe8\x21\x17\xea\x5e\x4f\x77\x50\xde\xba\x6d\xfb\xea\x55\xa5\x32\x3a\xfb\xa4\x45\xdb\xb6\xd2\x08\xed\x05\x09\xa1\x4d\x41\xd2\xdb\xe9\x6a\xff\x0a\xd1\x6d\xf9\xb0\x4e\x12\xa0\x7b\x23\x33\xba\x04\x02\x6d\xaf\x02\x75\x72\xdf\xa0\x4d\x83\xa5\xb5\xda\xa4\xc9\x1a\x91\x3d\x66\x68\x8f\xf1\xba\x74\xab\xe2\x82\xc8\xc3\x46\xaa\x07\xe8\xd4\x4a\x20\xa3\x97\x8a\x5d\x93\x02\x77\x80\xfe\x27\xd3\x15\x09\xfe\xa7\xf3\x7b\xdc\xc1\xa3\x34\x98\xed\x12\x1e\x0b\xd7\x3a\xd4\x91\xac\x94\x13\xfc\x62\xc7\x90\x5e\x49\x10\xe5\xaf\xf0\x11\x72\xf2\xf5\xba\x38\x72\x83\x70\x91\x90\xb6\x7f\x88\x3a\xa4\x90\xa1\x8d\xef\x81\xff\xeb\xe0\x3b\x5e\xd6\xbf\x27\x31\x0d\xbf\xdf\x66\x40\x95\xc5\xe3\xdb\xae\x93\x8b\xa4\xca\xe1\xbe\x42\x47\x66\x03\x8a\x5e\x69\xc7\xf9\x37\x0b\x20\xc2\xfa\xb4\xad\x88\xc4\xd1\x03\xa1\xe3\x32\xef\x3e\xb2\x50\x7f\x07\xbc\x20\x1f\x5a\x2d\x2e\x82\xc3\x4d\xa0\x7a\xf8\x90\xf0\xd5\x88\x24\x4e\xbf\x03\xbb\xee\x13\xf8\xe0\x4c\xd2\x82\xa7\xf5\x1f\xd8\x3f\x30\x8f\x12\x10\xfc\x6d\xb5\xf8\x70\x48\x70\xb9\x2c\x13\xee\xfd\x6c\x70\x05\xb5\x95\xf6\x82\x23\xf9\x55\x3d\x6b\x03\xb2\x58\x62\xac\x5e\x03\xdc\xca\xd0\x41\x43\xbe\x2f\x93\x85\xa5\xdc\x1e\x72\x1b\xd8\xe7\x91\xe9\x1f\x55\x18\x55\xdc\x96\xb3\xbc\xab\xbd\xde\xa5\xb3\xbd\xed\x3c\x7f\xf9\xdc\x26\xad\x66\xb9\xe5\x95\x22\x40\xc4\x77\x40\xde\x46\x1d\xdf\x9c\x81\xfa\xf4\x36\x68\x0e\x22\x44\xe1\x7d\x84\xdc\xc9\xd8\x47\xd3\xca\xf3\x5b\x72\x8d\x39\x5c\x11\xb6\xdc\xc5\x81\x1b\xc2\xd6\x5b\xff\x45\xcf\xc5\xa9\xb9\x6d\xfd\xd7\x5d\x6c\xe8\xe8\x0e\x87\xbf\x29\x2c\xc9\x52\x1c\xe9\xa5\xe2\x48\xf6\xf9\x03\xbc\xb1\x3b\x85\xae\xf4\x80\x51\x4f\x67\xe3\x58\x6a\x48\x15\x8b\x44\xce\x7c\x89\x55\xfe\x86\xa1\xa2\xb2\xfb\x14\xff\x0b\x73\x1c\xaf\x5f\xf9\x59\xe1\xf3\x25\xfe\x86\xf4\x73\x73\x55\x06\x6b\x28\xe6\x73\xf6\x18\xb3\x46\x39\xb1\x78\x3a\xd9\xbb\x1e\x2f\x01\xf6\xea\x27\xad\x68\xcb\x4d\x6a\xcb\xac\xd6\xae\x5b\xc3\xd9\xa6\x7c\xc7\x6e\xca\xae\xe3\xc3\xa9\x40\x0c\x51\x57\xbe\xf3\xb2\x80\xa6\xbe\xdd\x9f\xca\xa5\x11\x0a\x12\x65\xbd\x8a\xaa\x9b\x37\x2b\x00\xe0\xf0\x07\xd2\x44\x7d\x04\x60\x2e\x33\x40\x8e\xd2\xc3\xaf\x79\xc9\x72\x86\x70\xf3\x92\xeb\x5f\x57\xc9\x4f\x2a\xc5\x23\xc1\x5a\xca\x4b\x34\x52\xba\x3c\xf7\x68\x44\xce\x00\x6c\xbb\x7f\x3d\x85\xb3\xcc\x0c\x72\x37\x1b\x17\xb1\x53\x29\x90\x82\x32\x70\xe9\x93\xd9\xf7\xb7\x40\xf9\x9b\x72\x35\x43\xcc\x39\x20\xff\x3a\xf9\x7d\x07\x3e\x42\x97\x35\x55\x53\xcf\xa3\xf2\xef\xf9\xcf\x98\x3c\x73\xc1\xf4\xb0\x43\xef\x46\x65\xfa\xd2\x91\x41\xc4\xb5\x95\xde\xa8\x1c\xc5\xdf\xfe\xc9\x53\x94\xe3\xa5\x06\x99\xb9\x64\x00\xd1\xaa\xbc\xd3\x90\xd6\xdc\x52\x38\xea\xdc\xd3\x71\x5e\x30\xce\xee\x9c\x1e\x17\x5b\x60\x47\x72\x1b\x3f\x0a\x79\x8f\x48\x03\xa4\xc9\xc9\xb6\xc0\x5c\xb5\xb9\x86\x72\xdd\xf3\x3c\x18\xce\x05\xb8\x1e\x7e\x32\x43\xc9\x73\xa2\x1d\x8e\x1d\xca\xbd\x3c\x60\xe4\xc0\x3a\xbe\x7c\x62\x84\xc5\xc0\x5c\x72\x37\x6f\x6b\x39\x9f\x4f\xbe\x0e\xb5\x3c\x59\x9c\xb3\x76\xbf\xa0\x31\x65\xbb\x50\xc1\x37\xd8\x5b\xa2\x74\x25\xb7\xb5\x1d\x1b\x1e\x25\xe1\x46\xee\x45\x01\xee\xd3\x0a\x67\x25\x79\x70\x4f\xef\x6e\x06\xe1\x7d\xfc\x6b\x6d\x2a\x26\xfe\x77\x97\xf8\x7d\x67\xf6\x2d\xab\xf9\xb1\x5a\x2e\x67\x68\x7e\x0a\x26\xa3\x57\xec\x4b\xd4\xd6\x26\xc1\x98\x7e\x0e\x15\x17\x16\xf7\x37\x53\xbd\x0c\x8c\x31\xd9\x84\xd3\x56\x65\x10\x12\xb7\xf2\xd4\xd6\x40\xc4\x2e\xb7\x21\x44\x88\x64\x02\xda\x19\x5e\xe7\xa6\xaf\xeb\xa5\xff\xb8\x82\xb3\xc7\x2c\x57\xfd\x23\x9b\x18\x36\x7c\xba\xad\xfa\x59\x3a\x1d\x93\xc0\xbf\x11\x35\xbb\xec\x2b\xe9\x6c\xff\x1b\x45\xba\x74\x06\xea\x14\xb7\x87\x77\x4a\x55\x13\x5e\xf6\xf0\x94\x45\x0d\xbf\x56\x78\x4d\x8f\xdc\x4f\xa9\x03\xd0\x86\xee\x7b\x4f\x9e\x10\xc0\xb2\x5d\xb7\xe5\xd7\x4e\xff\x92\xad\x87\x7a\x57\xfb\x4e\x7b\x5b\x5c\x58\x0b\x46\xd6\x37\x0e\x83\xb7\x2b\xf7\x00\xc2\x2d\x24\x8b\xa7\x6a\x47\x2d\x13\xea\x1b\x49\x64\xfd\x5c\x45\xfe\x12\xdb\x99\x29\xa4\x63\x84\x6d\x2d\xe7\x44\xf2\xa4\xf8\x9c\xa4\x6e\x5c\x0e\xaf\x98\xe4\x03\xbb\x3a\x56\x0d\x83\xa4\x5b\x4c\x6c\xd5\x40\x80\x7b\xa0\x16\x41\xc7\x6a\x66\x4a\xa4\x42\x0f\xd5\x42\x4e\x4d\xb2\x0f\x7d\x68\xfa\x97\x4c\x85\x19\xc3\xd9\x59\x1c\x10\xc0\x0b\x4b\x7a\x6d\x82\x83\x55\x85\x83\xdd\x77\xca\xc6\x43\x9b\x98\x78\x64\x15\xfb\x24\x8b\xaa\x09\x90\x2c\x47\x51\x5e\x37\xeb\x6d\x43\x3b\xb0\x84\xfd\x4d\x63\x20\xf1\xd5\xf8\xf9\x7f\x94\xd4\x7c\xbe\xa6\x47\xef\x51\x86\x45\x6f\x28\x11\x2a\x35\xa1\x75\xbe\x97\xe3\xd6\x39\x0b\x78\xa3\xd3\x63\x13\xfb\xe6\xaa\xf9\x5e\xbc\xa8\x6f\xae\xe2\x9f\x2e\xe2\xed\x99\xa8\x5c\xe2\x5d\xf6\xf5\x8d\x15\xfd\x8e\x39\xa2\x97\xbc\xa1\x30\x84\xab\x7c\x51\x2d\xf4\x2b\xb4\x10\x8d\x9a\x42\xd2\xe9\x6c\xf6\xe3\x13\x1f\xb5\xb2\x27\x20\x34\x6a\x6e\x4c\x68\x42\x56\xe7\x18\x36\xb6\xf8\x61\x64\xb3\x55\xaa\xf1\x66\x26\x82\xf3\xa7\x40\x16\x25\x55\x0d\x57\x1f\xa0\x56\x58\xfb\xea\x52\x18\x92\x5b\x4c\x26\xb2\x7a\xf7\xfe\x71\xc9\x55\x35\xb0\x1e\x65\xa9\xb9\x03\xc8\x6a\x63\x6f\x68\xaa\x3a\x9d\xd8\x64\x96\xcd\xb6\xbb\xe7\xc9\x9e\xe1\xed\x54\x3c\x16\x2b\x6e\x13\xc0\xac\x6c\x59\xd0\xaf\x1f\x92\x4e\xc8\x36\xfa\x1e\x1f\x32\xfa\x1e\xbb\xf1\x0b\x3d\xe9\xbb\x46\xb4\x5a\x85\x68\xab\xd0\x24\xce\x99\x2f\x14\x12\x7d\xf6\xf9\x2a\xb5\x93\x90\xac\xf7\x03\xf5\xb2\xc4\xdb\xf1\xaf\x24\x37\xd7\x13\x04\x20\x8f\x87\x4d\xad\x5f\xad\x0f\x4f\x76\xf3\x4d\x19\xc4\xd8\x99\xf0\x5c\x32\x4d\x04\x58\xeb\x37\x4c\x01\x36\x0b\xf0\x6a\xd0\x34\xa0\x51\xe8\xbc\x01\xc4\x29\x37\xed\x8f\x35\xb2\xe3\x75\x12\x73\x4e\xb3\x67\x61\x27\x57\x5a\x72\x28\x4f\xd8\x46\x43\x57\x57\x3e\x6a\x7e\xb4\x4f\xba\xe8\x20\x68\x09\xb6\x90\x27\xd3\xf4\xa1\xb8\x11\xc3\x14\x29\x5d\xbe\x4c\xf1\x11\x7c\x4c\xdd\x5f\xf2\xc1\xed\xb1\xd4\x91\x03\x78\xd3\x69\x92\x97\xfb\x92\xbf\x64\x0a\x60\xea\x27\x8d\x08\x52\x04\x85\x80\x5d\x9e\x7d\x18\x5e\x95\x1d\x94\xc5\x01\xd4\xfd\x29\xc6\xb0\xb3\xcd\x18\x4b\xaa\x20\x09\x9c\x39\x7d\x80\x99\xb2\xfb\xb1\x9a\xf4\x37\x4e\xa5\x35\x9c\x2e\xb9\x88\x82\x03\xc6\x84\xd3\x18\x92\xf5\xa3\xe4\x1e\xf5\xa4\x52\x31\x9a\x04\x38\xa2\x82\xd8\x9d\x24\x84\x87\xca\x13\x1c\x58\x7e\x99\x4f\xa1\x1c\x9e\x18\xd9\x61\xf1\x63\x8e\x1e\x34\x7d\x94\x2e\xf4\xe5\xab\xdc\xba\xb8\xc8\x7f\xd2\xef\xaf\x93\x82\x57\x15\x36\x82\x30\x5b\xa4\x0a\xdf\xdc\xae\x71\x19\x92\x0b\x7d\xe8\x05\x06\x91\xe3\x3e\xa5\xaf\x84\x81\x39\x30\xd6\x0f\x16\x1d\x63\x17\x6a\xdd\xd9\x07\x2f\x74\x27\x6d\x91\x46\x2d\x8a\xf1\x88\xe4\x60\x9a\x15\x76\x87\x12\x94\xcc\xd3\x2e\x4b\x13\x46\x06\xdb\x0d\x00\xd9\x60\x29\xf7\x01\x78\x24\x55\xbe\xa0\xd0\xb2\x91\xc0\x96\xaf\x72\x87\x33\xe2\x45\x49\x1d\xdc\x2c\xb8\xd8\x21\x7c\x4a\xce\xf7\x78\xbc\xd3\x40\xef\x31\x25\xfb\x4b\x6f\x21\x21\x74\xe9\xee\x1a\xf3\xcc\xac\x81\xba\x64\x77\xb6\xe2\xb5\x7a\x9c\x1a\x0a\x89\x0a\x49\xbf\x02\x2c\xb8\x65\x68\x31\xa4\xab\x11\x66\x80\x06\x9b\x94\x0c\xa2\xad\x69\x58\x29\x7c\x05\xe6\xd6\xf7\x18\xca\x6f\xd7\x98\x60\x32\x94\xaa\x9c\xcd\xe5\xa3\x90\x72\x9b\xb3\x82\x33\xd2\x8c\x2e\x30\x5f\xbf\xce\x28\x64\xd8\x02\x1b\x5b\x2a\x89\x74\xa3\x8d\xa9\x41\x9e\x04\xf8\xf3\x4b\x93\x63\x0b\x01\x4e\x73\xce\x4d\x40\x4f\xd1\xfb\xb7\xde\xb3\xb2\x0c\x5d\x54\x31\x38\xe7\x7a\xb3\x82\x09\xd9\xe5\x32\x1a\x33\x84\x06\xd7\xc4\xb9\x46\x76\xb1\xe5\x25\x7d\x29\x1a\xf5\x98\xfc\x06\xd8\x17\xbd\x88\xf5\xfa\xa1\x52\xcf\xc0\x8c\x59\x5c\x2e\xc4\xcd\x4b\x5a\x61\x87\x83\xbe\x1c\xdc\x8f\xc8\x11\xd4\x8c\xa0\x5b\x84\xd2\x00\x28\x99\x39\x21\xb0\x84\x4b\xf3\xe1\xd8\xd6\xbc\xc9\x73\x8f\x86\xc4\x19\x60\x5c\x12\x5a\x2e\xe5\xce\xa8\x4d\xe1\x78\xa8\x03\x7e\x5c\xa2\x39\x61\x6e\x25\x55\xcf\x4f\x7e\xd7\x65\x9d\xbd\x43\x1b\x9a\xa2\xfa\x44\x97\x1a\x5e\xc2\x48\x59\x0d\xff\x52\x62\x26\xe5\x04\x80\x10\x93\x99\x32\x4b\x6c\x59\x27\x7c\x0f\xf1\xf3\xcd\x4e\xc4\x77\x69\x83\x76\xa8\x63\x4f\xb7\x2c\x53\x97\xa3\x36\x4c\x1d\x38\xbf\x7b\x5d\xf9\x51\xba\x39\xb9\x93\xf5\x41\x1b\xe2\xc6\xc6\x7f\xb4\xfd\xde\xa3\xc1\x67\xb0\xf4\x90\x86\xa4\x72\x82\x19\xc8\xb9\x8f\x26\xf8\xa2\xec\xbc\xfb\x6e\xd2\x39\x0c\x61\x51\x6d\xd3\x37\xcd\x91\x04\x51\x3b\x27\xb4\xed\x6a\xe9\xf5\x32\xd6\x8b\x89\x04\xaf\x60\xc2\x9b\x75\x54\xbf\xbc\xfd\xce\x93\x16\x2e\xe7\x46\xf6\xc1\xe9\xa6\x18\xba\x54\x75\x80\x7c\x2e\x99\xba\x87\x49\x30\x81\x6a\xc2\x41\x78\x50\x40\xae\x6c\x1b\xae\xd5\xdd\xd4\x99\x00\xb9\x1e\x54\x3d\xc1\x37\x58\x47\x71\x25\x02\xe7\x63\xf8\x18\xdd\x86\x6d\xa3\x15\x14\xd5\xed\x10\x99\x8c\x71\xb3\x54\x74\x29\xe2\xde\x02\x1c\x7f\x20\x60\xae\x93\xfc\xe4\xf1\xee\x4f\x97\x70\x74\xf5\x2b\xce\xb0\x60\x81\xc5\xcc\x5f\x02\xd0\x8e\x00\x14\xed\x6b\xb8\xbe\x66\xcb\x97\xcc\x15\xfe\x36\x55\x65\xde\x7d\xfc\x5b\x3b\x91\x7b\x78\x48\xb2\xd1\x11\x08\xb6\x4b\x07\x75\x0e\x9c\xdc\x70\x09\xe9\x6d\x8f\x9e\xde\xb1\xbd\xcd\xea\xf7\x5e\xde\x13\xe1\xba\x60\x54\x74\x8a\x3c\x5b\x8d\xd5\x72\x51\x2d\x61\x95\x3e\xe7\x70\x97\xb5\x98\x2b\xfa\x16\xaf\xaa\x31\x5f\x2e\xb0\xbb\x10\x01\x54\x49\xd1\xef\xb4\xe3\x1a\xfc\xcf\xf4\xaa\x7d\x36\x9a\xcf\x5a\xe4\xfd\x5c\x25\x74\x42\xe1\xa7\xaf\xa1\x3f\x49\xfc\x22\x77\xc6\xb0\x78\xcd\x59\x82\x50\x95\xa7\xb5\x0e\xb9\x6f\x9c\x25\x54\xc8\xe9\x1a\xb8\x24\xd0\x3a\x4d\x5e\x81\x2f\x0d\x88\x38\xb7\x2a\x98\x12\xd3\xeb\xfc\x6b\xe6\x8b\x35\xe4\xdd\x61\x38\x8f\x2f\xcb\x75\x8c\xfd\x75\xb2\x67\x18\xe5\x10\x5d\x0c\xef\x54\xc7\x8b\x52\xa3\x0a\xf9\x86\xc2\x54\x5d\x06\xc7\xe5\x25\x2c\x0c\x96\x51\x3d\xed\x59\xca\xaf\x33\x00\x44\xd1\x5a\x28\x1b\xd3\x8c\xcd\x1c\x76\x7d\xbb\x0c\xaa\xb6\x3e\x7a\x0e\xce\x31\xe1\xdb\xd7\x7b\x9c\x2b\x7f\x0a\xfb\xe5\x98\x35\xb3\x41\x02\xd1\x26\x1d\xde\x3a\x73\x12\x5f\x6c\x3a\x57\x7b\xc0\xcf\x4d\x0a\x62\xcd\x2e\x10\x1a\x52\xb7\x0a\x7f\x93\xd2\x8b\xfa\x11\xcf\xaf\xdc\xc3\xaa\xbc\x6c\x43\xd4\x79\x44\x52\x7c\xff\x51\x50\xff\x71\x48\x42\x78\x34\x29\xdf\x7d\x06\x2a\xb1\x47\x66\x50\xc5\x5d\x90\x25\x60\xc8\x55\x9b\x0e\x06\x03\xc2\x93\xab\xfc\xa1\x15\x48\x30\x27\xfb\xfd\x0d\x1c\xa5\x75\xd2\x6c\x55\xbf\x73\x00\xaf\x18\x80\x77\xe5\x01\x4b\x57\xc2\x4f\x89\xf9\xfd\xdd\x77\xc5\xf8\xfc\x2c\x60\x17\x47\xdb\xe7\x01\x9e\x7a\x39\x0c\x7a\xf8\x54\x08\x48\x37\x64\xf5\x13\x2a\x68\x41\xc8\x39\xed\x5a\x10\x46\x9b\x8f\xa1\x01\xc4\x21\x69\x56\x87\xc1\x1a\x0f\x74\xd0\x46\xa4\xf7\x72\xc8\x31\x04\x42\xba\x37\x8b\x0a\x46\xef\xe0\xd3\x4f\xe1\xcd\xe9\xf5\xcd\x22\x83\x76\xf4\x92\x53\xc8\x9f\xaf\x34\xc0\xe3\x08\x8e\xf6\x08\x8d\x93\xa5\xbf\x54\xb9\x16\x1b\x80\xf0\xf9\xd5\xe0\x03\x1d\x00\x9e\x4c\x98\xf0\xef\x9a\x06\x94\x7f\xa1\x0e\xeb\x1f\x61\x13\x2a\x7d\x64\x80\xfa\x83\xdc\xd1\xc2\x6a\xd1\x5c\xd0\x15\x82\xb3\x0f\x0f\x09\x96\x9f\xf3\x91\x34\xa6\x87\xc5\x72\xa5\xf1\x44\x4d\x63\x7d\x98\x2a\x81\x60\xc3\xfd\x6c\x53\xe2\x01\xb5\xb5\xce\x62\xcc\x0d\xd0\x19\x53\x19\x10\xca\x30\xae\xce\xa9\x3c\xd8\x40\xa1\x54\xc7\x34\x52\x39\xc0\xd7\xf9\x3e\x5d\xd4\xeb\xdf\xd5\x3c\x10\x28\xfa\xac\xdb\x70\x30\xdf\x82\xf4\x06\x70\x5c\xa8\x35\x94\x12\xd9\x67\xf8\x49\xcc\x97\xea\x7d\x56\xa7\xcd\xf9\x78\xe8\x4c\x3f\x17\x65\xe6\xcd\x96\xcb\xd7\xf7\xda\x9f\xb2\x3a\xa9\x4e\x20\x8d\xfa\xd2\x4d\x06\xc3\xc1\x63\x5c\xd0\x58\x6d\x5d\x36\x55\x06\xd0\x19\x98\x74\xe8\xcd\x68\x71\x54\xae\xce\x31\x40\xcb\x58\x87\xfe\x5e\x45\x91\x58\xc3\x04\x67\x32\x79\xa2\x61\x13\x55\xfe\xda\xd4\x3f\x01\x9f\xe6\x71\x3f\xe0\x7f\x58\x00\xe8\x59\x2d\xd4\x1f\x25\xe8\xcf\x8b\xd5\x16\xe4\xa8\xa4\x9f\xfc\xb2\x5f\x58\xc2\xbb\xfc\x02\x40\x09\xd2\xe1\x20\x79\x42\x9d\xf9\x1b\xa2\x05\x69\xac\x25\x3b\x4c\x83\x1d\x4f\xde\x59\x72\x0c\x25\x47\x6a\x12\xf9\x76\xc0\x05\xac\xfe\x5b\xde\x66\xef\xbc\xc6\xe7\x73\xd4\x81\xdf\x57\xf1\x6a\xda\x0c\xe0\x1f\x3c\xd7\xae\xaa\x2d\xbc\x01\x82\x32\xe1\x35\x54\x5a\x6a\xe7\x60\x0f\x1d\x02\x42\x00\x6e\xd1\x0d\xc8\x3e\xdc\xeb\xbd\x9b\x99\x47\xb3\x94\xd2\x0f\xd5\xae\x00\x82\x2e\x5a\x5c\xc0\x7a\x68\xfc\xbe\x95\x03\x0c\x74\x61\xf2\xec\x03\x56\xca\x75\x69\x36\xed\x90\xe5\xbb\xc1\xc9\x08\xd9\x5f\x5c\x49\x48\xfa\x1f\x2f\x50\x0e\x49\xc2\x7f\xae\x94\x29\xa2\x48\x25\x17\xda\xa5\x0a\x21\xe2\xfd\xc4\x44\xb4\xff\xc7\x23\xde\x90\xc1\xf2\xf2\x87\x18\x84\x94\x29\xd3\xb9\x9b\x75\x19\xee\xf5\x5d\xee\xa1\x84\x52\x2e\x44\xdf\xa8\xed\xf0\x1b\x33\x18\x9b\x49\xaa\xf5\x10\x80\x87\xa1\x7c\x93\x8b\x91\x80\x2f\x43\x03\x17\x8b\xac\x5f\x84\xd2\x72\xcc\x57\x65\xc8\xea\x88\x0f\xad\x07\x73\x72\xed\x5e\xb2\xe5\xbf\xc8\x8f\xd5\x37\xe8\x6b\x3f\x73\x33\x18\x29\x8c\xe2\x4e\xf5\x15\xaa\xb4\xe3\x11\xb3\x06\xcc\x3e\x27\x1f\xe7\x8b\x50\x7e\x9e\x8d\xc6\xf8\xe0\x38\x61\xe3\xab\x43\x3e\x9c\xad\xaa\x8c\x4d\x49\x0c\x4a\x3c\x9e\x7d\xbc\x1f\x81\x0c\x5b\xab\x3c\xcd\x5a\xf6\x38\xf6\x13\x97\xf6\x21\xca\xaf\x8b\x97\x0e\x32\xab\x35\x45\xf2\x1a\xd0\xec\x5b\x60\x07\xb4\x0d\x81\x60\x67\xaf\x7c\x29\xa5\xca\x16\x88\xd7\x85\x1e\x32\xea\x0a\xcc\xee\x42\x2d\xfd\x5b\x98\x82\xf7\xce\x52\x41\x6b\x56\x82\x5c\xae\x49\xf6\xf7\xce\xb8\xec\x5f\x19\xe7\xb2\x4d\xae\x6f\xff\xe8\xfb\x08\x96\x4e\x8c\x80\x9e\xf7\x83\xbe\x98\xe1\x02\x00\x3b\xf5\xa2\x41\xed\x61\xbd\x35\x55\xb5\x0e\x08\x3a\xf0\x11\x9b\x38\xa1\xf0\x47\xbe\xa4\xd2\x7f\x3b\x3b\x05\x30\xb0\xda\x83\xdd\xf6\xec\x3f\xad\xbe\xf4\x27\x15\x1f\x03\x51\xfc\x73\x2e\xcf\xf0\xe8\x22\xd3\x91\xcf\xf9\x29\x67\x86\x73\xec\x9f\x4c\x0c\x19\x56\xfa\x50\x84\x02\x88\xf9\x3e\xed\xeb\xe3\x8e\xc4\xa8\xca\x97\x91\xff\x5e\x1a\x43\xc1\xb9\xd2\xb2\xec\x50\x0f\x16\xca\xe1\x77\xee\xf5\x57\x21\xfc\xc9\x05\xe8\xb1\x66\xdf\x22\x9b\x39\xc2\xb6\x07\x6d\xec\xac\x33\x69\x40\x45\xe0\x2f\x6b\x20\xc7\x84\xad\x16\x5c\x4d\x07\xef\x10\xb9\x3d\xe4\xd9\x74\x5c\x35\x1b\xe6\xa0\x9b\x94\x08\x37\xe9\x73\x1e\xf0\xef\xde\xd8\x75\x10\x26\x16\xe5\xc4\x85\x23\x7c\xc8\x5c\xac\x7c\x77\xd1\x9d\xf8\x96\x3f\xba\x91\xb0\xbb\x0f\xd9\x6e\x63\xb3\x6e\xc6\x45\xee\xd1\x96\xa5\xf0\x52\x3b\xfc\x91\x2d\x0a\xfb\x20\xac\xa3\xf1\x2c\xe9\x31\x56\xa5\xe5\x92\x61\xcc\xbd\x90\x30\xe5\x6f\x95\x6c\x52\x70\xda\xe9\xd7\x92\x64\x33\x38\x06\x09\xfb\x5e\xd3\xe1\x7b\x2f\x60\xf1\x5d\xe3\xfe\x3b\x45\x51\x9c\x6f\x98\x2a\x84\xae\x43\xb4\x2e\xca\xc7\x3f\x54\xff\xbe\x0a\x87\x70\xeb\xa1\x44\x2a\x8f\xf9\x4a\xe9\x80\xd8\x2f\xd1\xcf\xdb\x97\xb1\xcf\xd5\x87\xf8\xb4\x08\x57\x1d\x3f\xc0\x52\x9e\x3e\xd4\x26\xd8\x6b\x85\x8f\xd9\x45\x2f\xef\xbb\xb2\x24\x00\xf5\x66\x89\x26\xaa\x14\xdf\x84\x7b\x70\x43\xa2\xb7\x80\x06\x8a\xb3\xdd\xba\xf4\xd3\xa0\x1f\x4d\xa9\xdd\x32\xdb\xe2\x5d\xff\x05\x0b\xe9\xe2\xcd\x76\x7d\xcb\x7c\x21\x88\x6c\x81\xe6\x2f\xa5\x87\xb1\xfb\x50\xa1\x56\xe6\x53\xaa\xed\x26\x74\xf4\xd8\x76\x58\x42\xcf\x6e\x79\x20\xcf\xbf\x9a\x0e\xcc\x59\xf7\xdb\x77\x02\xe4\xbc\x67\x85\xad\x79\x45\xfc\x4f\xfd\x17\xfc\x5f\xa3\x10\x65\x42\x40\xf9\xf1\xa5\xd1\xb1\x71\x45\xc5\xe9\x85\x7e\x3e\xf4\x40\x15\xda\xd2\x39\x55\xaf\x22\x81\xfe\xec\xe2\x2c\x97\xa1\x0f\x8c\xb9\xb1\x38\x15\x84\x9b\xf2\x6a\xf7\x30\xbb\x69\x68\xc3\x17\x6c\xb6\x69\x7a\x48\x37\xae\x49\x31\xb2\xa2\x54\x06\x64\xe4\x02\xef\xb0\x23\x0d\x51\x6f\xc1\x60\x43\x2e\x3d\xf6\x34\x46\xe5\x26\x3e\x1b\x02\x7a\x0c\x1d\xae\xbb\xa3\xf9\xcd\xbf\x43\x3f\x06\x77\x17\xdd\x80\x87\x89\x66\xe8\x59\xe8\xc1\x71\x63\x73\xfd\x66\x62\xf3\x9e\xcc\x66\xce\x13\x3d\xe5\x5b\xe1\x99\x92\xbc\xb6\xdc\x85\xa7\x11\xf5\x3f\x9c\x39\xbd\x9d\x46\x14\x5c\xfd\x0b\x0d\xd5\x05\x55\x2c\xa6\x06\x99\xcb\x2f\x8d\x2b\x7f\xa1\x06\x86\xcd\xfd\x5b\x76\x32\xb3\x20\x6e\xfe\x4f\x85\x09\xbd\x09\x33\xa1\xde\xd4\x87\x8b\xa5\x66\x44\xe3\x64\xf6\xe6\x7b\xb0\xd1\x52\x0a\xc0\x46\xb2\x8c\xcc\x9b\x96\x96\x5e\x72\x46\xf9\x1a\x37\xbf\x5b\x60\x22\xd1\xa8\xc1\xb2\x77\x6a\x00\xab\xd9\xa9\xf6\x30\x28\x65\xae\xd5\xf8\x1a\xbb\xab\x83\x61\x42\xa8\x9f\x37\xf1\x8e\x22\xcb\xf1\x96\x40\x33\x62\xe9\xd3\x46\x38\x32\xbc\x76\x7b\x23\x04\xa8\x6d\x11\x10\x30\xb4\xb2\x6a\x07\x25\x47\x95\xaa\x8e\x9b\x40\x8e\x8c\x53\xb0\xb4\xd5\xe1\x80\xc2\x94\xa9\x73\xa0\x6d\x49\x9f\xaa\x9a\x39\x1d\xf6\x72\x72\x56\xc3\xd4\x61\xcd\xc6\x62\xef\xab\x13\x2a\xa8\x4e\xd9\x13\x6a\x0c\xbf\x97\x69\x54\x2b\xa2\x53\xdb\x84\xf9\x91\x33\x0f\xdb\x12\x80\x78\x2f\x63\xa5\x8d\xf5\x18\x7d\xf7\x85\x50\x44\x13\x47\x7d\x89\x44\x54\x10\xf5\x3c\xea\xa8\x03\x9c\x79\xe1\x30\xf6\x50\x26\xde\xfa\x9f\x4b\x77\xda\xa6\xac\x41\xe0\x96\xbc\x5b\x9d\x93\x9c\x5b\xa5\xec\x41\xe5\x5c\x2d\x03\x30\x21\x85\x8d\x2c\x75\xe9\x2c\xa0\x04\xa6\x90\x29\xf8\xd7\xf9\x4b\x6d\x87\xd9\xce\xf6\x25\x54\x08\xdb\xdc\x20\x2d\x44\x6f\x2b\xe8\xdb\x5a\xf2\x80\x8d\xa7\xb4\xf2\x87\x02\x6b\x2f\x08\x0e\x36\x4d\x59\xcb\x47\xa2\xc4\xc8\x02\xc2\xab\xbf\xae\x2a\x5a\x35\x1a\x4a\x89\xf5\xd3\xdc\x85\xcb\xd3\x6e\x4d\x9d\x3b\x20\xad\x99\x3d\xab\x69\x60\x79\x7b\xd9\xbe\x0e\xfe\xa0\x01\x79\xcb\x0d\x87\x37\x56\x02\x5d\x6a\x72\xcf\x56\x86\xeb\xb6\x12\x76\x5f\x79\x17\x4d\xe1\x75\x4d\x24\x60\x88\x27\x76\x9f\xa6\x24\xe3\x62\x6e\x9f\xec\xba\x7b\x64\x04\x16\xfd\x38\x37\xe3\x20\xfb\xc9\xba\xda\x27\xff\xeb\x3b\xe5\xc2\x0a\x80\xbd\xd1\x95\x7a\x5c\x98\xa5\xe9\x7c\x64\x9a\xbf\x4d\xe3\x5a\xde\x20\x16\xf0\x45\x6c\xe1\x57\x75\xc3\xe0\x43\x98\xfd\x67\xba\x93\x79\x4b\x42\x4b\x0a\x57\xbd\x7f\xfa\x4c\xe3\x22\xe7\x86\x3e\xee\x6c\x2f\x0d\xee\xd4\xb1\x48\xdf\xb6\x34\x74\xfa\x3a\x7d\x87\x00\xd8\x48\x5c\xbe\xa4\x5a\xc8\xd4\x9c\xfd\x2b\xd4\xbc\x32\x9f\x1c\xe8\x47\x76\xa8\x1c\xb4\x9c\xd9\x04\xf3\xcf\x34\x03\xe4\xf1\xa9\x11\x5c\x77\x20\xa7\x40\x7d\x3d\x2e\x2c\x70\x3b\xc3\xe7\x20\x60\x54\x03\xfb\xfb\x58\xa4\x5f\x4d\x3f\x3e\x24\xcf\xde\x94\xa9\x98\xfe\x34\xe0\x00\xe0\x2d\x79\x0e\xdb\x69\xf4\xf9\x03\x1a\x02\x81\xaa\x48\x0b\x1a\xb5\x76\x2c\x5f\xba\xe7\xd8\xf9\xf1\xff\xa7\xa7\x25\x2e\x61\xee\x76\x95\x69\x0f\x9e\x6d\x78\xea\xfd\x43\x3f\xaf\xb3\x3a\xd3\xde\x6d\x32\x52\x76\x9f\xb0\x9a\xfc\x92\xe1\xc4\x10\xbe\xd9\xc4\x5d\xf0\x29\xa3\x52\xe5\x3b\xd2\xff\x89\x28\xc4\x7b\xa3\x0b\xef\xee\x3a\xb3\xe4\x30\x25\x52\x73\xb6\xc2\xbb\x7f\xbf\xbc\xbd\x68\x86\x5e\x3f\x6b\xa0\xa6\x36\xcf\x45\x9a\x80\x36\xf7\x54\xa0\xea\x9f\x6c\x83\x9c\x9d\x05\xb6\xca\x7b\xc1\xe9\x2e\x00\x86\xde\x42\x97\xff\x11\x26\xdf\x2a\x0a\x5d\x62\x91\x37\x69\x79\x2d\xd7\x97\x87\xa4\x24\x1c\x77\xfa\xeb\xf3\xea\xbe\xc8\x50\x1d\x4d\x2c\x94\x16\x41\x7c\xd6\xa4\x43\x77\x4f\x40\xb4\x57\x3a\x41\x34\xe6\xa4\x41\x09\xa6\xe2\x72\x28\x13\x75\xbd\xcd\x19\xdc\xca\x98\xa3\x10\x9e\xe1\x06\x07\x00\xe1\x8b\x6c\xf3\xff\x45\xde\xb1\x75\x53\xe5\x7a\x29\x89\x72\x7e\x81\x38\xa0\x77\x50\x1b\x38\xff\xe8\x54\xa6\x00\x67\xb5\xd9\xc4\x0a\x7c\xba\xd5\x8d\xae\x6d\xdd\x64\x39\x5b\xe3\x3b\x91\xc6\x97\x2f\x95\xc3\x8e\xf3\x7e\x67\x0e\xd6\x40\xcd\x48\x59\xca\xe6\xd6\x6d\x93\x09\x8a\x6f\xb2\x35\xb8\x9b\x29\x9c\x26\x9b\xcb\xd7\xba\x01\x89\x81\x7c\x89\xf1\x87\xe7\x77\xd3\xf3\x8c\x56\xa7\xa5\x6c\xd0\x57\x94\x10\x8d\x35\xe8\x9a\x8b\x30\x4b\xc4\x05\x13\x89\x4d\xdc\xc5\xcb\x51\x16\x19\xde\x22\x4f\x6a\x6b\x80\xcd\x49\x9c\xbc\x34\x95\x5a\x65\x4b\x68\x07\x7e\x29\x57\xf2\x27\xcd\xf7\xe5\xf0\xce\x5b\x58\x30\x42\x43\xce\x28\x04\x38\x4e\x44\x49\xe8\x0e\x89\x32\x87\x98\xae\xa4\x8f\xe1\xc2\x65\xe5\xa2\xeb\x5d\x71\x95\xaf\x71\xf9\x70\x02\x02\x94\x94\xb7\xd2\x2b\x5d\xa0\xd4\x73\xdd\xdb\x64\xed\x4a\x9e\xe4\x97\xae\xfb\x6a\xc5\x61\xba\x4f\x50\x19\xac\xfe\x2c\xbd\x72\x95\xa8\xa6\x99\x7d\x68\xa1\x01\x36\x17\x13\x92\xc3\x79\xc3\xb2\x73\xce\xeb\x4e\xca\x29\x6b\x49\x28\x56\x0a\x34\x3a\xc7\x04\xa4\x9f\x9d\x28\x08\x33\xea\xd7\x09\x86\xcf\x91\xca\x49\x8f\x10\x60\x98\xc5\x5b\x07\x35\x11\x90\x55\x50\x0b\xf4\x4a\x9a\xb8\x7c\xdf\xb9\x31\xad\xa8\xe2\x63\x97\x1d\xa3\x68\x1d\x5c\x94\x3d\x68\xfe\x22\x47\x6f\xf1\xf3\x0c\x1f\x06\xbd\x40\x0e\xd7\x22\x19\xed\x6b\xf0\x02\x10\xe3\xc3\xc7\x16\xd2\x94\xf2\x35\xf5\x36\xd9\x97\xb4\x80\x7e\xc7\x56\x6c\xc0\xa2\x2a\x7b\x19\x05\x61\x26\x2d\xd3\x27\xe9\x9e\xcd\xfe\x9f\x2d\xfa\x40\xa9\x32\xf1\x4d\x77\x39\x88\xed\x52\x08\x53\x8f\x85\x1b\x66\xb1\xb0\x00\xf7\xad\x3e\x97\x96\xe6\xbb\x82\x87\xd4\xf3\x9e\xf8\x90\x1b\x89\x20\x4b\x60\x4b\x99\x0b\x29\x63\x09\x65\x1f\x8a\x6b\x8e\xf5\xb8\xf9\x76\xdf\x85\x7a\x59\xd2\x02\x18\x43\xab\x75\xd2\x88\xe3\x66\x68\xc8\xdf\xe0\x81\x7c\xda\x20\x63\xb6\xb3\x4c\xed\xde\x9d\x79\x9a\x86\x5f\xa1\xe5\xcc\x7e\xff\x7f\x27\xf1\x47\x14\xa9\x21\x47\xf3\x5b\x73\x0b\x53\x9b\x51\xad\x61\x78\xe0\xf1\x49\x44\x00\x71\xe1\x46\x50\x09\x64\x61\x1d\x40\x92\x26\x78\x1e\x56\x6d\x9e\x42\xa9\x67\x75\xfb\x66\x1d\x34\xc1\x89\x61\xd6\x8a\x81\x87\x61\xa7\xd4\xaa\x51\x02\x03\x19\xe1\xa3\xc2\x43\xc3\x77\xa1\x2c\x4b\x79\xad\xd6\x0f\xf4\x45\x99\x52\x72\x56\x18\xb0\xd2\x3f\xa8\x37\xc3\x0e\xb8\x6e\x5b\xd3\x48\x72\x1a\x9c\xe9\x98\x93\xb5\xf4\xef\x59\x79\xc3\xd2\x97\x7f\x25\x14\x36\xe6\x93\xca\x49\x66\x8b\x5c\xec\xc5\x72\x28\x9c\xd5\x34\x3c\x24\x9a\x94\xff\x54\x3f\x08\x65\x3f\x4b\x55\x3f\x6b\xa1\xca\x32\x75\x6e\xf9\x7c\xee\x93\x45\x5d\xe6\x04\x36\xff\x96\xac\xa8\xbf\xc8\x9e\x18\xb5\x91\xa6\x76\x13\xf6\xb4\x66\xc8\xd3\xc9\x63\x15\xe3\x9c\xcd\xda\x68\x2f\x6f\xfc\x29\x9f\x55\x89\xbf\x36\x63\xc0\xca\x95\x64\xcb\x34\x06\x23\x6f\x2a\xdf\xca\x63\x91\xf7\x53\xc9\xc4\xd5\x2e\x02\xf4\x0b\xb7\x7d\xef\x80\xf6\x73\xbe\xec\xe3\xd7\x99\x0c\x92\x2b\xc3\xf3\x38\xc7\x6a\x43\xca\x24\x37\x06\x3f\x4a\xa6\xa9\xf7\x3a\x7d\x63\x42\xb6\x80\x3c\x63\xd8\x73\x69\xb3\xca\xde\xb8\x9a\xf6\x7d\xf6\x60\x12\x56\x9d\xea\x1b\xa5\xff\xd7\x9a\xff\xbc\x3e\xb4\xf9\x61\xdb\x87\xa8\xb9\xd1\x7d\x19\x0f\x74\x98\x7b\x40\x34\x2d\xe0\x1e\x10\xb0\x77\x9b\xba\x94\x40\x74\x91\xaa\x5d\x90\xc7\x9f\xc5\xaf\x75\x3f\x8d\x30\xba\x67\xf7\x99\xa8\xf4\x1f\x2d\x97\x71\x16\x2f\x94\x9a\x3e\x41\x3a\xf9\x9b\xc2\xe5\xf4\xf8\xc5\x48\x5c\xd5\xee\x7d\xfc\xa2\x8e\x56\xa1\xf9\x3b\xfd\xaf\xd5\x7a\xf9\xf4\x76\x59\xef\xff\xe6\xe5\xa2\xa4\x1c\xea\x19\xc7\x84\x07\x12\x40\x91\xed\x93\x30\xbc\xc7\xf0\x1c\x04\x58\x79\x14\x40\x0c\xc8\xf8\x0f\xa1\x32\xb2\x73\x70\x14\x99\x55\x8b\x54\x83\x07\x0b\x61\x1a\x57\xea\x1f\x40\xa4\x37\xa9\x39\x03\xea\x89\xa9\xa2\x17\x3c\xa3\x24\x41\x53\xbe\xb2\xa1\x4a\x5f\xe9\xa9\xc6\x8f\x55\xfa\xfd\xad\xd4\xd0\x77\xe8\x96\x2e\x7f\xf8\x9a\x69\x5e\x7a\x83\x92\x3a\xc8\x9b\x84\xae\xd0\x1e\xb2\x42\x67\xab\x7f\xb3\xc5\x98\x65\xb2\x3a\xc3\x65\x47\xf0\x89\x7b\xa9\xd1\x71\x7b\xf9\x0e\xfa\x89\xec\x90\xd5\xfc\x66\x3e\x97\x4b\x00\xb5\x44\xef\xf8\x41\x75\x29\xc8\xae\x98\x68\x8b\x1f\x0b\x59\xc0\x80\x61\x2a\x1c\x38\x96\xd1\xa8\xb1\xdd\x21\x30\xca\x2d\x2e\xcd\x40\x72\xb4\xb4\x4f\x99\x42\xac\x1b\x7a\x3c\x76\xcd\xda\x9a\xb2\x05\x9c\xb4\xa5\x20\x0e\x34\xc1\x3f\x4d\x46\x9c\xf6\x68\xc9\x72\x99\x95\x44\xd1\x16\xcd\x9a\x45\xc8\x35\xbe\x9c\x0d\x76\xb7\x0b\x4a\xc6\x15\x9d\x9c\x26\x48\xbe\x30\x6b\xff\x44\x62\xd2\x54\x53\xe1\x86\xff\xb2\xb8\x90\x58\xab\x23\xf3\x1a\x89\x6e\x85\xa0\x3d\x5d\x6d\x6e\x6c\x03\x85\x08\x7a\x82\x10\x42\xf8\x55\xa2\xe3\x43\x85\xa8\xb4\x94\x23\x57\x7d\x97\xe5\xc1\xf1\x1b\x1a\x3b\x38\xa7\xb1\x7c\x5c\xd2\xc9\xa9\xa3\xfe\x3c\x56\xf8\xb2\xaa\x23\xdc\x6f\xf2\x67\xfe\xc9\xa7\x6e\x2f\xed\xdc\x8f\xcc\x5a\xdf\x5b\xf4\x76\x61\x2f\xe9\x86\x0c\x53\xff\x8a\x5e\xb5\xae\xdd\xe0\xb6\xcf\x5b\x13\xc1\x92\x66\xed\xe9\xad\xc5\xaa\x9b\x41\x0d\xd6\x37\xde\x1a\x53\xab\x72\xd9\x55\x48\xca\xb9\x4b\x74\xc2\x51\x8a\x57\xcb\xce\x05\x48\xe5\x6a\xc9\xc8\x4f\x49\x4b\xd9\x8a\x9d\x4b\xc3\x0f\x4c\x3d\x9b\xc7\x64\xd9\x6c\xb0\x06\x4b\xdc\x8d\xc2\x55\x93\x28\xd9\xdc\x64\xc0\xfe\xf8\xe0\xb8\x1e\xd9\x70\xd5\xfe\x82\xae\x42\xfe\xcf\x12\x4b\xd0\xca\xdf\x25\x06\x09\xb3\x06\x66\x50\x1f\x10\xd7\x85\xf3\x78\x57\xb2\x54\x59\x2a\x35\x3a\xb7\xeb\x40\x0e\x43\x31\x27\x15\x67\x73\x4c\xbd\x72\x95\xe3\x8c\xf6\x4d\x75\xfe\x87\x3a\x93\x52\x27\xb9\x7b\x7b\x6d\xd2\x2b\xf9\x70\xf5\x99\xe5\xad\x28\x7b\x59\x37\xb5\xe9\xe1\xce\xfb\x31\x52\xd6\xfc\x8d\x7d\xa9\xd6\x3a\xf3\x65\x17\xfe\x3c\xdd\x11\x92\x97\xcb\x18\xa9\xd3\xfb\xf2\xea\xf5\x44\x53\xe2\x15\x58\xb4\x84\xaa\xc8\x09\xa8\x64\xe0\x95\x35\xb4\x9d\xb3\x20\x79\xdb\x05\xc0\x83\xf3\xc3\x71\x50\x5d\x6f\xcc\x6e\xf0\xe0\xc2\xb6\xb8\xeb\x6a\x74\xb4\xed\xbf\xb5\x74\x56\x75\x81\x4e\x74\xb7\x5c\xc9\x42\x54\x22\x85\xb2\xba\x52\x8f\x2b\xc1\x12\xb3\x7d\xff\xa5\x4f\xde\x77\xe3\x4d\x28\xdb\x47\xbd\xe6\x37\xab\x36\x20\x8d\x8a\x47\xba\x9a\x40\x6e\xa8\x2d\xfa\xc3\x9d\xf8\xad\x6a\xfe\x96\x5c\xcb\xce\x49\x04\xc6\x58\x28\x6b\xfd\x00\xb0\x22\x9f\xb3\x73\x9a\x43\xfd\x72\x25\x3e\x4a\x58\xdd\x2a\x68\xbe\x49\xa4\x84\xe8\xdc\xfd\x6a\x42\xcd\xdc\x14\xe2\xeb\x54\x95\xa1\x95\x7f\xdc\xb9\xf5\xce\xe0\xc8\xdf\x57\xcb\x09\xb4\x78\x4d\xb6\xd7\x48\x70\x56\x78\xf5\x95\xc2\x8c\xa9\x9d\x93\x31\x83\x6c\x6c\x60\x14\x90\x56\x40\x6c\x02\x6c\x13\x21\x98\x6a\x82\x71\x3a\x33\x82\xf7\x81\xb5\xd9\xd8\xab\xa9\xf9\x2f\x72\xb7\x4d\xa8\x0a\xd0\x78\x76\xa5\x6b\x18\xff\x1b\xa5\xfb\x41\x92\x43\x4a\x0f\xc7\x6d\x18\x25\xbf\xe9\xed\xb1\xe8\xee\x6c\x4d\x8d\x1c\xc4\xb0\x85\x1d\xfc\xe8\x63\xe9\x8e\xf2\xaf\x73\x17\x53\x4a\x77\x1e\xf2\xd2\x15\x24\xc7\x98\x5e\x78\x6c\x3a\xf7\x72\x52\x77\x23\xf8\x66\x1b\x6d\x77\x14\xa2\x5f\x90\x2d\x2c\x79\xaf\xf3\x37\xe7\x6c\x7e\x95\xc4\xfe\x9a\x99\xc4\x69\x2b\x25\x6b\x95\xaa\x7a\xa8\xcc\xa0\x0e\x8b\xb5\x10\x99\x86\x95\xbc\x46\x30\x60\x11\x31\x27\xc3\x87\x45\x01\xe6\x62\x0a\xdd\x8e\x66\x24\xbc\x79\x4a\x6f\x35\xa5\x9e\xa7\x9c\xd6\x9e\x4b\xa0\x2d\x9e\x0c\x15\x3a\x81\x9f\x90\xf8\x71\x31\xfe\x73\x1a\x6f\xf6\x73\x76\x8f\x38\xc0\x21\xed\x71\xda\xae\x4e\x7a\x4d\x3f\x8a\xc5\x3f\xa0\x13\xfb\x71\xa6\x7a\x11\x7f\x30\xe3\x9b\xbe\x8c\x9a\x39\x24\xdf\x2b\x21\x58\x9a\x20\x95\x10\xcc\x56\xe9\xd5\x24\xe3\x36\x32\x54\x58\xa1\x1b\x5c\xb3\x25\xbe\xa2\x0b\x00\x54\x95\x19\xb7\x2c\x84\xc2\xf8\x8f\x0b\x3b\xa0\x41\x2c\xcf\x95\x7a\x3a\x0d\x44\xd7\x5a\x5d\x31\x78\x44\x01\x4b\x6c\xfe\x11\x3f\x9a\x20\xbd\x0f\x39\x64\x39\x4b\x51\x13\x7e\xd0\xfd\xb8\x55\x27\xa9\x0d\x5a\xc8\x6a\xae\x7d\x42\x77\x6f\xa0\x48\x1b\x64\x7a\xd9\x4a\x3b\xf9\xe2\x02\x15\x61\xb6\xa8\xac\x12\x4a\x00\xc2\x1d\x5d\x6e\xe1\x68\x9f\xd5\xbe\x4c\x89\x8d\x04\x14\xa3\x1b\x08\x5f\x2a\xb7\xcb\xa6\x7b\xa9\x14\x02\x62\x7c\x3e\xc6\xf8\x38\x98\xd4\x70\x41\x75\x6b\x26\xa3\xc0\xea\x3c\x2b\x04\xf9\x11\xcf\xc4\x18\xbd\xef\xab\x37\x15\x04\x7a\x77\xad\x36\x33\x9b\xaa\x5a\xcf\x7d\xac\x8e\xa8\xa3\x2f\x19\x0c\x0e\xa3\x5f\xa4\xb5\xf2\x59\x2b\x76\xf1\x29\x6c\x9e\xf8\xe1\xd1\x6a\xaa\x77\x77\x11\xfb\xbf\xeb\x65\x63\x07\x68\x53\xaa\xe8\x2e\xc9\x6f\x63\x71\xb2\xb7\x53\xbe\x5c\x9f\x52\x06\x2f\xf3\xd5\x89\x28\x8c\x4a\xb9\x1f\xef\x77\x4e\xbf\xc0\x6a\xe4\x4f\xd3\x66\xc5\x93\xd3\x64\xe9\xd3\xee\x07\x55\xcb\x52\x42\x25\x40\xe2\x6f\x75\xf4\xe8\x7f\x87\xf8\x66\xcf\x1a\x35\x8e\xd4\x73\x23\xd4\x95\x8c\xd7\x60\xe7\x3b\xa9\x97\xee\x26\x76\x63\x29\x8d\xf2\x1e\x86\x31\x07\x90\x94\x76\x26\xdc\xd9\x09\xe4\xd0\x53\xf9\x6f\xe4\xbd\x25\x05\xe6\x54\x12\x0a\x5d\x4c\xfc\x3e\x53\x54\xe4\x6d\xed\xc7\x95\x70\x27\x24\x1e\x84\xa5\x57\xe5\x66\x2d\x04\x93\x3f\x5a\xb4\x4f\x49\xb1\x50\xe3\x3f\x32\xd7\x60\x93\x2a\xdd\xdd\xba\x30\xe8\x5a\x3f\xb5\xcb\xb7\x9f\x97\xd5\xeb\x17\xd9\x3f\xc6\x98\x9e\x48\x56\xe4\x74\xcc\x2b\x25\x76\x6b\xa7\x00\xa5\xc2\x7d\x12\x6c\xa3\xc4\xff\xb2\xfd\xb6\x18\x33\xb6\x84\xdc\xc6\x6a\xd4\x9b\x1c\xa0\x76\x92\x6c\x21\xb0\x9a\x07\xda\xa2\x09\xdd\x3b\x78\xb5\xb5\x47\xd9\x45\xf7\x70\xea\x4c\xd9\x4a\x47\x15\x2e\x6f\xd3\x2a\xcc\x70\x10\xb8\xa6\x0a\x8d\x82\x3c\xb3\x0a\x37\xd0\xb5\x4c\xc7\xb2\x3a\xf3\x61\x32\x79\x5f\x65\x58\xf2\x2e\x43\x25\xd8\xde\x0c\x5a\xcc\x4d\x74\x09\x01\x7f\x31\x18\xa0\xe4\xd7\x6f\x75\x71\x9a\x28\xbf\xcb\xe1\xba\xde\x9c\x6d\x07\x6e\x7b\x4a\xbf\x91\x6b\x97\xb3\xa7\x4b\xee\x6c\x8e\xff\x34\x44\x60\xa2\xa3\x34\x7e\x4c\xbb\xfb\x95\x3b\x84\xc9\x6f\xec\x4c\x96\xff\xe7\xdf\xd8\x04\x30\xb1\xe4\x02\x5b\xfa\xb9\x99\x18\x96\xed\x64\x5a\xee\x24\x27\x4a\x82\x89\x69\x16\x0a\x07\xa3\x75\x1b\x2b\x09\x8b\x09\x7a\x1b\x04\xf7\x25\xfc\x49\x94\x7d\x4f\x6d\x26\x8e\x8b\x3f\x1b\x23\x0d\x06\xa4\xaf\x23\x38\x21\x51\x02\x0b\x9a\x31\x47\x68\x82\xa6\x2e\xd9\xe1\x2c\x04\x36\x79\x47\xbc\x58\xff\xf1\x25\x69\x17\x99\x0c\x7a\x05\xcc\xbc\x47\x18\x33\xec\xbd\x06\xa8\xc7\x30\xd8\xcc\x47\xa3\xce\x60\x91\x95\x7b\x46\x9d\x41\xc3\x90\xcf\xed\x61\xce\x6d\xbe\x59\x32\x1a\xf8\x1d\x00\xc9\x57\xf9\x8c\x4f\x1d\x10\xaf\x91\xdd\x5e\xbe\x43\x0c\x4a\x4b\x36\xb8\xfa\x39\xc9\xf6\xe9\xca\xce\xd7\xe9\x74\xf3\x4a\x86\x0a\x5e\x02\x05\x61\x23\x5f\xf5\x3f\x4b\x41\x16\xe2\xbb\x5f\xc9\x70\xc9\xde\xb3\xfb\x1c\xc7\x76\x39\xf9\x00\x54\x33\x84\x34\xcd\x2c\xd7\xea\xc9\xd4\x35\x8e\xcf\xba\xba\x8e\x5f\x4e\x21\x5d\x84\x2c\xc2\xbf\x5b\xb6\x97\xb1\xe5\xa3\xc9\xf9\xc3\x67\xcb\xea\xaf\xf6\x61\xf6\xa2\xc1\x6b\x0a\x3f\x92\xa3\xf7\x25\x74\xa4\x77\x67\x1d\x1c\x95\x05\x76\x4a\xf8\x50\x1e\x71\xbc\xaa\x01\x03\xd0\x62\x74\x4d\xcd\x3a\xc3\xce\x25\x90\x80\x4c\xff\xf0\x2d\x10\x48\xdc\xaf\xe0\xfd\x55\x73\xa1\xf7\xa8\xe7\xf5\x6b\x97\xd4\x19\x0d\x3f\x67\xb0\xf8\x08\x41\xcb\x5f\x1a\xdc\x98\x18\xe9\xcd\x3c\x8d\x20\x1f\xe7\xaa\xb8\xc9\x91\x99\xde\xd4\x50\x29\x38\xd4\xe4\xd6\x63\x39\x40\x6b\x38\x90\xf5\xdb\x0f\x41\xd3\xe7\xca\x79\x64\x8b\xd4\x7d\xd3\x61\xfc\xcd\x6b\xf9\x75\xc5\x89\xdd\xc3\xc4\x9d\x4c\x0f\xad\x8e\x71\x17\x18\x82\xe9\xf6\x27\xfd\xa1\xe5\x5c\x39\xac\x79\xbf\xc4\x0c\x61\xd5\x80\x3f\xb1\xb7\x71\xea\x9a\xb0\x2c\xdb\xe4\xcb\xbf\xd1\xb8\xb9\x0e\x96\xf3\x53\xed\x53\x22\x16\x54\x53\xac\xc1\xf9\x5f\xf2\xeb\x92\xbe\x9b\xae\x81\x32\x4b\x86\xa0\xe6\x3d\xfc\x5b\x1e\x3a\x1b\x2c\x9f\x5f\x55\x05\x16\xdc\xd8\x78\x94\x7a\xd8\xfb\x85\xa9\xef\x7d\xae\xd3\x52\xcf\x4c\xbe\x92\x8f\xb1\xf2\x50\x35\x71\x93\x43\x12\x7f\x01\xe5\x03\xab\xf8\x15\xc4\x09\xa4\xb8\x5c\x07\xe3\x3f\x34\xd1\xa4\xf2\xd3\x5e\x62\xe3\x3f\x21\x4e\x0b\x25\x80\x92\x90\x48\x91\x5c\xfa\x08\x66\xf1\x41\xa6\x9b\x55\x05\x26\x10\x37\x31\xf8\xa2\x6c\xd8\x8f\xd6\x1d\xc5\x15\x7b\x48\xd8\x3e\x9b\x5a\xaf\x06\x3f\x50\xc3\xf4\xa3\x85\xc4\xf1\x6d\x45\x11\x0d\x86\x15\xa5\x53\x4c\xaf\x2b\x04\xf0\x72\x5d\x8a\xfb\xa0\x40\x08\x91\x0e\xca\x9a\x94\x7f\x08\xe8\x63\x61\x91\x1d\xa6\x2c\xe9\x94\x87\x99\xff\x75\x81\xb9\x7c\x28\xed\x0a\x64\x7f\xf4\x7c\xc1\xae\x93\x14\x80\xcc\x52\xb5\xea\x05\xe2\xe7\xe4\x47\x28\xc0\x51\x5a\xe7\x49\x71\x08\xdd\x7c\xde\xb0\x90\x84\xe2\xad\x86\xe1\xd5\x18\x77\x3e\x9d\x02\xcc\x2c\xd7\xd0\x26\x3e\x43\xdc\x76\xf3\x41\x07\xab\x6f\xd1\x60\x4b\x0c\xfe\xe5\x8d\xa0\xd5\x7a\xde\x38\x8f\xa6\x15\x76\x6f\x49\x8c\x04\xfa\xdf\xc4\x81\xb6\x9d\x92\x09\x84\x7f\xc3\x38\xaa\x04\x8f\xa7\xd4\x07\x48\x52\x98\xa9\xe4\x3d\x53\xba\xeb\x46\x24\xe1\x70\x53\x99\xc1\x22\xe2\x6c\x47\x63\x09\x77\x4b\x90\x70\x19\x5a\x44\x06\x16\xbe\x97\x39\xbc\x38\xa2\x5d\x5f\xf2\xba\x2c\x7f\xf6\xe7\x96\xfb\x41\xdf\xc2\x73\xe3\x10\xd6\x35\x86\x3b\x01\x3f\x31\x6a\xfd\xf1\xeb\xfd\x8c\xcb\x5f\xbc\xfe\x0f\x86\x40\x38\x64\x7f\x04\xab\x29\x75\x86\xd3\x96\x9f\x74\x11\x27\x7f\x6a\x91\xcc\x66\x73\x1c\xb3\xd4\x1e\xb2\x3e\xc4\xb8\x30\xcb\xbf\x5d\x75\x45\x14\x19\x14\x34\x30\xd3\xc0\x66\x5d\xb2\x9b\xda\xbf\xcb\x8a\x57\x05\x51\xa3\xb4\x50\x9b\x05\xab\x64\x70\xf2\xa8\x93\xba\x54\x0b\xb3\x9b\xb8\xe5\x0a\xf6\xe0\x0d\x08\x2e\x78\xe8\x89\xaa\x08\x11\xf0\x0b\x3d\x58\x0e\x5c\x33\x12\x21\x3c\xeb\xa2\x22\x9d\xe6\x2b\xc6\x76\x1e\xd0\x44\x55\x20\x2b\xcb\xe8\x6a\x27\x02\xe1\x20\x67\xd0\xbf\xc2\xe2\x0a\x02\x33\x9c\xe2\x3f\xf8\xe5\x3d\x04\x3f\x94\x4b\x4b\x49\x50\x45\x6a\x74\xc7\x3a\xda\x49\x28\x07\xc6\x87\x07\x4a\xf7\x6c\x1b\xa7\xfb\x24\x27\xd8\x92\x0e\x6e\xd1\xb0\x46\x8e\x06\x85\x2c\xdc\xe9\xa4\xc2\x8b\x0f\xe3\xca\xe5\x65\x31\x17\xf7\xca\x1f\x9e\x2a\x82\xa6\xa7\x12\x54\x90\x15\x7e\xf2\x27\x80\x81\x60\x9a\x70\xea\x8e\xf1\xb7\xdb\xbe\xa7\x53\xe2\x4e\x82\xa5\xdd\xd9\xb8\x10\x84\x46\x1a\x1f\x61\x50\xe0\x73\x73\xd5\xb3\x1f\x77\xdb\xe6\x44\x4e\x4f\xfc\x44\x20\x89\xcb\x7d\x0a\x4a\x68\x59\x39\xf7\x1a\xde\x07\x2d\xa2\x95\x50\x53\xb5\x7b\xe9\x40\xe8\x8f\x01\xbe\x1f\xa1\xbe\x21\xf3\x63\xa8\x23\x44\x72\xbd\xc1\x5a\x53\x49\xf9\x18\xfa\x49\xa7\x08\x3a\xea\x1f\x99\x7c\xb8\xb9\xdd\x14\x34\x08\xc3\xa9\x91\xa6\x3b\x5d\xe7\x7f\x9d\xd4\x87\x2e\x7e\xa4\xea\x16\x58\x08\x30\x62\x0a\xce\x0f\x58\xdc\xa7\x47\xfe\x38\x0f\xc2\x1c\x17\x19\xfb\x5e\x3f\x02\x41\x0c\x5d\x30\x94\x0f\xea\xae\x0c\x3e\xa9\x5b\x3d\x86\xa6\x30\x75\xdb\x97\x00\x1d\x96\xfa\xa3\x65\xb7\x99\x00\x44\xe5\xc0\x25\x94\xb3\xad\x58\xd2\xf0\x3f\x4a\xcd\xe7\x35\x68\x93\x53\xab\xfc\xb4\x39\x75\x01\x41\x4b\xd1\x87\xa7\x52\x73\x25\xe4\xd9\x6d\x7f\xb0\x27\xd4\x45\x21\x65\xd2\x8f\xa5\x51\xd9\xf9\x5f\x67\xe9\xa7\xc6\x97\x50\x4f\x68\xc8\x90\x55\xb6\x7d\x22\x3d\x74\xc2\xcd\x06\xf8\x61\x5f\x82\xa6\xbc\x28\x33\xc8\x86\x17\x50\x83\xf1\x94\x12\xed\x0a\xc9\x81\x3d\x98\x3a\xdc\xd4\xe7\x65\xe2\x1b\xd2\x50\x7c\xf8\xe0\x3c\xf5\x6b\x19\x78\x48\x18\xf8\xf0\xfd\x95\x5c\x5d\x22\x8a\x1b\xf1\xfc\x14\x84\x5a\x2c\xde\x16\x32\x07\x4a\xbb\xbb\xb0\x16\x9b\xc9\x4f\xc1\x6a\xaa\x51\xa0\xfc\xb1\xae\xee\xd3\x20\xc1\x29\x9e\x48\xbf\x8c\x57\x6f\x22\xe8\xb0\x18\x15\x90\xdf\x63\x67\xd0\x21\x16\xf6\x15\x2b\xd9\xeb\x7b\x0c\x6c\x01\xf1\x7f\x6f\xd6\x38\x81\x3c\x14\x6a\xa5\xdc\xfa\x3e\x3b\xff\xaf\x91\xb6\x6e\x9b\xf2\x64\xbc\xe9\x17\xe7\x13\x49\x89\x30\x30\x89\x01\x44\xcc\xfc\x6f\x97\xe3\x7e\x03\x99\x40\xdf\x69\x25\x29\x05\x1b\xd9\xcd\x40\x77\x5e\xb5\x8a\x83\xe5\x13\xe5\x8c\x2a\x04\xa3\xd8\x45\x88\x55\x5f\xb9\x12\x4a\x72\x4b\xf1\xa7\x14\x56\x1e\x18\x3b\x26\x7b\xf6\x01\x78\xf7\x78\x99\x6a\x06\x03\xc6\xbf\xc0\xb8\x7a\x26\xe4\xa4\x66\x96\x29\x4e\x61\xe9\x0b\x63\x6a\x05\xe7\xff\x4e\x5e\x2b\x46\x84\x53\xdd\x80\xae\x96\x3b\x77\x4b\xee\xc3\x32\x33\x94\xa6\x4a\xe9\x64\x53\xb2\x2c\xb9\xce\x1d\xbd\x46\xfe\xc2\x4c\x71\x32\xbd\x12\xf7\x4b\xf4\xa9\xf3\xdb\x22\x8d\x65\x79\x46\x66\xdb\x83\x7c\xd6\x39\x91\x9d\x6a\x96\x2b\xeb\xd5\xbd\x27\x78\x3d\x54\x98\xa5\x9b\x68\x8d\x08\x81\x20\x87\x10\x43\xd5\x52\xb2\x64\x25\xb7\xe5\x3b\xb1\xca\x56\x02\x64\xa0\x7c\x1a\x34\x4a\xfb\x33\x9b\x2a\xb1\x39\xd7\xa2\x7e\xda\xf8\x2f\x4d\x43\xeb\xb9\xda\x60\x97\x34\x14\xc3\x2c\x2d\xc1\x50\xdf\xa2\x4d\xdd\x87\xe1\xd8\x3d\x70\x85\xa0\x15\x05\x87\x98\xda\x0c\x4e\xe0\x7a\xc8\x32\x2f\x4e\xd6\xcc\x58\xda\x7c\xaf\xcb\x97\x5c\xe3\xba\x19\xd7\x88\x3b\x4d\x1d\x78\x62\x50\x1f\x3d\x5f\x93\x85\x73\xc8\xe8\xc9\xa9\xb4\x64\x6b\xe3\x23\xab\xbd\x1b\xda\x8e\x79\x7b\xf7\x0f\x75\xef\x96\x6a\x2d\xb4\x8c\x7e\x53\x50\x5c\x2a\x7f\xe1\x31\x24\x71\x2f\xe0\x49\x77\x64\xf3\x2a\x69\x25\xee\x29\xfd\x18\xbd\xe9\x39\xf3\x93\x9d\x85\x8a\x98\xc7\xe9\x29\x2e\xe5\x2e\xaf\x05\xfa\xd3\xdc\x02\x75\xb8\xdb\xaf\x19\x3a\xd9\x65\x3f\x29\xc3\x2a\x57\x09\x12\x6e\xad\xc1\x05\xeb\xd5\x12\x1e\x63\x8a\x6f\xa3\x54\x4e\xeb\x53\xe7\xc1\xfd\x78\x4d\xe7\xf3\x53\x02\xcb\x19\x87\x0c\x1f\x2a\x67\x96\x25\x1d\xee\x3d\x40\x23\x7c\xb0\xb7\xd3\x59\x5a\x3c\x98\x7f\x12\x02\xff\x44\x55\x7f\x43\x3e\x65\xee\x10\x08\x18\xe6\xfe\x8d\x0c\xd5\x21\xc9\x8e\x7b\x31\x24\x81\xb2\x32\x9b\x91\xfc\x0d\x86\x08\x65\x97\x30\x33\xdb\x3a\xdb\xc5\xd3\x63\x93\x59\x49\x16\x77\x7a\xc8\x73\x23\x62\x65\x7e\x54\xc4\xa1\x92\x15\x96\x38\x82\x1c\x6a\x26\x5f\xce\x0d\xdd\x45\xe4\x8c\x3c\x66\x94\x08\xc7\x90\xee\x96\x76\x8a\x13\xfd\x61\x6c\xe0\xe3\x84\xf0\x44\x0a\x27\xe9\x1d\xa6\xc3\x4a\x10\xe8\x2b\x7d\x21\x0c\xb1\x28\x7f\x57\x24\x86\xf0\x1c\xa8\x7c\xe8\x92\xd9\xec\x9e\xaa\xdc\xcb\xe7\x4b\x24\x24\x66\xf0\xe0\x33\x18\x5c\x71\xaf\x4d\xcb\x92\x14\xbc\x55\x40\xdc\xe1\x6d\xe4\xae\x66\x1f\xdf\xcf\xbd\x8f\x79\xc8\xbd\x37\x2e\xef\x5e\x4a\x39\x4b\x38\xf0\x3c\xb5\x0a\x94\xf2\x9d\x73\x25\x03\x95\x2a\x50\xc0\x67\x6d\x61\xdf\x43\x24\x85\x90\x87\xec\x61\xa2\xa3\x25\xfe\xc4\x04\x0e\xb1\xcc\x7f\xa6\x98\x86\x9f\x34\xee\x87\xf8\x0b\x5b\xd8\x65\xf5\x54\x8a\xd9\x59\x6f\x1f\xbc\x57\x79\x59\xa3\x02\x64\xba\x92\xcb\x45\xab\x9f\x59\x03\xbd\x24\x3f\xd1\x33\x00\xe1\x3f\x0c\xac\xc7\x92\x10\x0d\x72\x3c\x50\x2f\x93\x2f\x03\xb3\x21\x3f\x17\xa2\x73\x7a\xec\x2e\x55\x36\xa5\x43\x7f\x4a\x96\xaf\xcc\x03\x47\xba\xd3\x0c\x34\x49\xea\x54\x7e\x5c\xce\x2f\x17\xec\xaa\x15\x62\x79\xa8\xe8\xda\x10\x9f\xb8\xbd\x02\xaa\x08\xbd\xca\xe8\xfe\x0b\xa0\x58\x5b\x92\x53\xff\xcf\xec\xfe\xff\xe5\x39\xf1\xef\x4e\x8e\x3b\xed\x3f\xb2\x51\xca\x6f\xc0\x76\x33\xd7\xd4\xb3\x38\x74\x26\x83\x7f\x59\xd6\x73\x9e\x43\x10\x22\xe0\x00\xec\x30\x9e\xfb\xed\x6a\x36\x8a\x4d\xb1\xb1\xb5\x75\x53\x57\x31\xe0\x87\xf9\xd3\x96\xd2\xe7\xf6\x3f\xe9\x44\xeb\x31\x34\x04\xa3\x37\xf8\x04\xf0\xe7\xa6\x1c\x24\xf8\x9f\x53\x9d\xb1\x41\x6f\xb2\x4e\x2e\x41\xbf\x38\xa7\xca\x01\x2d\x47\x19\x6f\x9b\xf1\xa8\x79\x77\x9d\xc1\xdf\xd8\xe1\xe3\x11\x9a\x66\xe9\x3a\x9a\x21\x1a\xfd\xbe\xd1\x78\x9a\x80\x19\x81\x58\xd1\x43\xcf\xd0\x94\x8b\x2a\x0e\x5d\xd5\x21\x2e\x39\xe7\x84\x03\xc4\xa4\xc8\xbf\x25\xce\x03\x77\x4a\xaa\x28\x53\x8a\xbc\x05\xdb\x80\xb4\x08\x86\x23\xd9\x71\x7b\x1c\xdf\x63\x4b\xd9\x4f\x8d\xb8\x17\xc2\xb4\x5f\xe8\x95\x6b\x67\x9f\x8a\xae\x22\x83\xf2\x81\xe5\xdd\x4f\xad\x8a\x16\x8c\xdf\x29\x4a\x61\x5f\xd4\x05\x96\x6c\xee\xd8\xad\xd2\x76\xe3\xd4\x63\x0e\xb9\x5c\x8c\xdc\x23\xe7\xf8\x94\x7f\x81\xdd\x9d\xe3\xec\xf7\x2e\xcc\x8b\x50\x64\x88\xcb\x40\xda\x68\xf5\x89\xfe\x92\x66\x3c\x0c\xf8\x38\x7a\xc1\x1e\x01\x01\x32\x91\xa2\x0e\x6f\xdd\x4b\x8b\x61\x7a\xb9\xf2\x71\x08\xba\xa3\x76\x97\xf2\xaa\x59\x0e\xc7\xa9\x14\xa0\x64\x27\x34\x2d\x0d\xe7\x09\x2c\x74\x13\x90\xa5\xb2\x2f\xee\x77\xfd\x4e\xf2\xa1\x54\x56\xe3\xa6\x62\x71\xdf\x77\x59\x46\x84\x70\xd5\x45\x09\x30\x60\xd1\x77\x96\x53\x37\x7d\x1f\xd2\x54\x73\x9f\xc2\xf0\x88\x14\x8a\xdd\xfe\x25\xfb\x24\x13\xbc\xa7\xe8\x17\x68\xd0\x01\x05\x59\xd5\x39\xd5\xb1\xb3\xf1\xb8\x52\x18\xa7\x36\x76\x3a\xdc\x7b\xff\x2d\xfd\x5b\xc1\xad\x44\x99\xa3\x64\xdf\x4c\x51\x5a\xf6\x23\xbc\x9c\x67\xa7\x32\x22\xc1\x26\x0c\x1f\x90\x15\x6b\xb3\xef\x69\x1e\x65\x9a\xe4\x2e\xef\x16\x0d\x5b\x68\x81\x3a\xa7\x99\x2d\xe6\xb8\x5d\x5c\x95\xad\x1e\xfc\x50\x21\x54\xee\xb9\x41\x14\xb6\x6a\x5f\xf6\xb0\x5e\xf1\x8c\x7e\x0f\x56\xf1\xd6\x97\x7c\x43\x19\x0f\xfb\xe4\x5c\x2d\xfe\x5b\x28\x67\x21\x88\x64\x39\xb9\x63\x56\x6f\x60\xcd\x66\x6f\x37\x61\x1f\x0f\x53\x3f\x2c\x58\x01\x55\xec\xba\x71\x80\x7a\x4c\x9e\x0a\xaf\x46\x63\x4a\x4c\x6b\xeb\x07\x4e\xa1\x64\x38\x51\x72\xaa\x96\x66\xa4\x6a\x7d\x49\xfe\x2a\xd4\xba\x69\x73\x22\xa2\x6c\x56\x27\x69\x35\xe6\x7b\x2d\xb1\x2e\x1a\xf1\x65\xcd\xd6\xce\x1a\x98\x79\xa0\xb6\xb4\x65\xd6\xaa\x5a\x00\x8f\xa7\x94\xed\xf4\xfa\xe2\xcd\x62\x57\xb6\xb5\x9f\x5e\x76\x38\xf7\x08\x9f\xde\xe6\x5e\xee\xec\xf9\xa5\x90\xd0\x20\xc2\xb2\xf3\xd3\x97\xde\x66\x81\x95\x17\x44\xba\xad\xdf\x45\x8a\x81\x3b\x3e\xcf\x52\xfc\x2b\xf5\x2b\x11\x31\x77\x8c\x23\x76\xca\x50\xa3\x6e\xe9\xa5\x40\x9f\xcb\xe9\xf4\x1a\x45\x38\x81\x87\xd4\xc5\xb4\xc8\xe9\xd9\xa5\x29\x74\x3b\xf9\x0c\x9b\x8e\x20\xeb\x43\x8b\x8d\x0f\x61\x8a\x08\xe4\x4b\xcf\xcf\x66\x49\xa1\x1b\x8b\x3a\x81\x37\xbb\xb2\x6a\x2e\x92\x57\x1a\xde\x02\x8e\x69\x01\x07\xe4\x7f\x9e\xd6\x8f\xe5\x90\x83\xff\x98\xb9\x2a\x87\x45\xd7\x6e\xd2\xf7\x84\xf9\xab\x5a\x75\xb8\xf4\x2d\xa9\x51\xd6\x00\x0e\x45\xfb\x68\xae\x96\xbf\x33\x2a\xc0\x51\x79\x64\x3b\x28\xc0\x4c\x9f\x8e\xa0\x26\x25\x75\x20\x12\xe1\x36\xa3\x6f\xac\x4c\x0c\xb3\xa6\xe4\x94\xef\x63\x69\xef\x97\x20\x7a\x93\x69\xbb\x4b\xb6\x64\xcc\x73\x28\x7b\xa8\xbc\x32\xbe\xe1\x67\xf9\x76\xb2\x9b\x6e\x36\xcb\xf8\x21\x3e\x92\x8b\x87\x31\x6c\xc4\x62\x55\x27\xe6\x1f\x9d\xeb\x3f\x65\x33\x08\x97\xf4\x93\x1a\x6a\x10\xea\x6b\xed\xaf\xc1\xfd\xd1\x73\x1e\x78\x5e\x8d\x37\x50\x1c\xcc\x82\x01\xa4\x73\x0e\x32\x2f\x10\xc3\xa3\xa7\xf7\x50\x0f\x0d\x64\x28\x32\x1c\x1a\x2e\x56\x9e\x6f\xdf\x84\xae\x87\x46\xf9\xbe\x53\x85\xc5\x29\x6a\x53\x93\xd3\xea\x54\x6a\x44\x68\x39\xf3\xf8\xb3\x73\x0b\xa7\xdb\xb2\x94\x49\x2f\x40\xe9\x41\x90\x05\x90\x4a\x4d\x50\xe8\xbc\xe9\x73\x1c\xfa\xf2\x7f\x37\x75\xe4\xec\xe6\x08\x2c\x6c\x68\x89\x59\xb8\x69\x00\x5d\x7b\x72\x2e\x1b\xd9\x0e\xe0\x7d\x6a\x90\x60\xd5\x24\x8d\xf4\x4b\x8a\x82\x82\x00\x6b\x73\x80\x27\x36\x48\x1e\x96\xe3\x7c\x4c\xd1\xde\xc6\x80\x3e\x29\xc4\x97\xa2\x99\x53\xd5\x07\x0a\x2a\x26\x9f\x27\xcc\x61\xa4\x1f\x72\x56\xeb\xae\xfb\x56\x5b\xec\xf0\xa4\x2f\x27\xb4\x1f\x76\x97\x4e\x90\x1f\x7b\x6e\x45\xdf\x2f\x1c\xef\x35\xf9\xd7\xa3\xe5\x3d\x3c\x75\x3b\xcf\x17\x30\xe0\xa9\x24\x19\x7d\xbf\xfe\x4a\x86\x75\x79\xdc\x53\x55\xe7\xc6\xb8\x7f\xd1\xdc\xff\xa1\x1f\xb5\x98\xcd\xea\x9b\xea\xff\xf6\x82\x18\xc1\xd1\x72\xfd\xe2\x1e\x22\x38\x4b\xb7\x6c\xd7\x91\x8c\x92\x89\xfb\x07\xbd\x42\x90\xb5\xb0\x81\x6f\xa6\xea\x54\x18\x40\x65\xb7\xa3\x15\x56\x22\x89\x80\xaf\x73\xd3\xa3\x99\x1e\x4d\x45\x05\xa3\xee\xa4\x1c\x7f\x77\x1a\x22\x80\xc2\xfc\x9e\xdc\x40\x3c\xc2\xfb\x92\xd5\x41\xa7\x11\x0e\x21\x02\xc1\x3c\xb0\x80\xaa\x56\x40\x0d\xa7\x2d\xbe\x29\x3c\xd0\xbd\x80\x78\x50\xba\x3d\x4a\x44\x2e\x74\xd9\xb9\x4c\xeb\x50\xf2\x7c\xa9\xac\x9c\x61\x46\xc6\xb7\xec\x7e\x62\xf5\x15\x7b\x8d\x5d\x28\x6b\x85\xc9\x19\xaf\x90\xf6\xdc\xaa\x22\x5a\x6f\xad\x75\xe4\x93\x91\x16\x5f\xbd\xc4\xd6\x79\x33\xb0\x45\x91\x28\xa3\xc3\xab\xe7\x61\x24\xa0\x07\x23\x43\x2d\xb9\xb0\xc7\xf7\x69\xfb\xd6\xff\x6f\x47\xf3\x8b\x4f\x8b\xb3\xa1\x17\xa8\x99\xff\x10\xc4\xc1\xca\x6e\x0c\x39\x55\x67\xc8\x90\x4b\xd5\x70\x3e\xea\x0b\x48\xad\x18\x85\x90\x56\x76\xab\xa1\x46\x2c\x90\xeb\xb2\xc4\x4f\x40\xb8\xf7\x1f\x9f\x9c\x1f\xb6\xe9\xe1\x09\xfe\x7e\xb8\x89\x58\xae\x41\x4f\xf7\x83\xbb\x2c\xf7\x10\xad\xf8\xa3\x9c\xe7\x01\x7a\x2a\x37\x0d\xe9\xf5\x33\x44\x4c\x40\x32\x2b\x18\xf6\x46\xee\x10\xf2\x73\x8a\xb7\x8c\xdb\x17\x47\x3a\xc4\x0c\x1a\xa8\x0c\x6f\x75\xfa\x3e\x51\x53\x97\xaf\x5b\xd3\x3e\xf4\x0a\xc3\x92\x15\xee\x6c\xf2\x3b\xdb\x4b\xac\xaf\x68\x58\x04\x5f\x93\x8c\x12\x1a\xec\x7c\x6a\xf9\xb3\x3f\x56\xda\x46\x97\x40\xda\xad\x63\x45\xc0\xa0\x55\xc8\x1b\xa0\x88\x75\x71\xa5\xe8\x17\x71\xeb\x95\x24\xd0\xf4\xe9\x06\x5c\x07\x26\xb8\x54\x79\xf1\xae\xe5\x65\xd9\xc6\x46\x28\xf9\xfd\x16\xd9\x3b\xaf\xa6\xde\x6c\xe8\x4e\x4a\x0b\x25\x23\xbf\xe3\xdf\x7b\x8f\xd2\x4b\xa2\xaa\xab\x50\xb3\x9c\x3e\x88\xa3\x94\xf6\xc1\x30\x08\x63\x65\xd9\x31\xcc\x93\xaf\xfe\x17\x61\x59\x4b\x70\x9a\x01\xeb\xd0\xb2\xa5\x75\x1a\x93\x7a\xe0\x61\xd8\xc2\x3d\xd7\x77\x80\x66\xc3\xa8\xd1\x42\x7d\x20\x28\x7d\x07\xfb\x73\x90\xca\x9b\xe7\x72\xc7\xfb\x22\x7f\x81\xcd\xc1\x9b\x1e\x25\xcb\x24\xbe\x73\x49\xe2\x53\x9d\xec\x1c\xd9\xd2\x56\xc7\x78\xbf\xa7\x25\x34\xdb\xfa\xba\x00\x82\xf6\x76\xa9\xb6\x38\xdf\xef\x09\x12\x94\x72\x9b\xbe\xd2\x23\xd4\x90\xca\x89\xf1\x8c\xba\x68\x7f\x4b\x3e\xa9\xd2\x96\x22\xfb\x67\x7b\x37\x60\xb5\xf8\xe3\xda\x0a\x98\xf5\x6c\x7d\x14\x97\x75\x57\xfb\x53\x4b\x25\x36\xe2\x22\xfd\x67\xc4\x3d\x66\x19\xe1\x48\x0a\x14\x6d\xa4\xf7\x4a\xd3\xd7\x6a\x48\x0b\x35\x7c\x8f\x26\xf6\xea\x2a\xba\xdb\xe1\x6e\x5f\x64\xa5\xc9\x0a\xc0\x69\xbf\xf9\x49\x4f\xeb\x5e\xee\xb1\x8b\x4a\x5a\x2a\x58\xee\x00\x60\x35\x17\xe5\x97\xa3\x29\x89\xb5\x3d\x8c\xa2\x9b\xf9\xc7\x3d\xcc\x4d\x5b\x9a\x92\x72\x4a\xec\xcf\x69\x0d\x7c\x02\x06\xf0\xf1\x89\x24\x82\x1a\x4a\xe0\xb4\x99\x69\x49\x76\x04\x72\x2d\x95\x9f\x3c\xe7\x4e\x3a\x46\x73\xd9\x29\xe1\xd9\x80\x1b\xbe\x23\xbf\x10\x53\xe9\x35\x06\xcc\x36\x3c\x77\x91\x72\xe7\x4a\xcd\xde\xbf\xb0\x65\x5b\x3f\x06\x7a\xa4\xec\x0f\xfe\x0b\x64\x35\xc5\x1c\xfd\xf4\x84\xde\x5d\x7e\xa8\x5d\x8d\xb6\xa0\x31\xd5\x1c\xb7\xc9\x48\x01\xf3\xba\x23\xa5\xa7\xc4\xad\x49\x4c\x1b\xc1\xd8\x5a\x52\x0e\xbf\xba\x6f\xa3\xce\xf0\xc6\x2c\x35\xda\x24\xc5\x8e\x12\xb2\x3f\x87\x0e\xe8\x72\x1c\xf8\x1c\x42\x88\x79\x07\x49\x09\x80\x5a\xdd\x3e\x0e\x9b\xff\xac\x63\xb2\x94\xfd\x4a\x41\xeb\x95\x03\x18\x8a\x0a\x42\xa9\x20\xd8\xde\xdd\x54\x87\xe0\x6e\x19\xe7\xf7\x1b\x97\x8c\x60\x26\x95\x8f\x9a\xb7\x46\xb7\x16\xdf\x90\x52\x1d\x53\xed\x3f\x28\x0f\x6e\xd2\xba\x6d\x23\xac\x5c\x12\xb7\xcc\xac\xcb\x85\xed\x2c\xf6\xb3\x1d\x4e\xc4\xe7\xf2\x35\x8a\xf2\x39\xcb\x91\x10\x52\x75\xd3\x28\xb2\xb7\xdf\x21\xf2\x84\xfb\x28\xf4\x4f\xc9\x9f\xcb\x11\x69\xf6\x77\xa9\xb4\x46\xaf\x34\x6d\x9a\xa1\x55\xe3\x41\xf1\xac\x6b\x28\xd2\xe6\x12\x4b\x55\xad\x80\xc2\x76\xdf\xa4\x5f\x01\x8c\xa7\x0a\xb2\xb2\x72\xa2\x45\x7d\x1c\x65\x26\x53\x39\xc9\x79\x93\xa2\x0b\x33\xd5\xca\x70\xea\x75\x05\x97\x1a\x20\xe4\x72\x2e\xca\x93\xb6\xe0\x94\x9c\x8b\x03\x71\xca\x56\x02\xbd\x93\x94\x88\x77\x1f\x6b\xc9\x12\x48\xa7\x6a\xfd\x67\x54\xfd\xfe\x13\xd3\xf0\x9f\xf4\x64\xc9\x3a\x2e\x46\x8b\xfc\xab\xde\xc7\x7b\xcb\x95\x40\x27\x3f\x07\xa8\xca\xd1\x39\x31\xe9\x50\xa3\x85\x7a\xc0\x15\x67\xcf\xab\xba\xc8\xd6\xec\x07\x16\x17\x52\x10\x18\x94\xff\x95\x0f\xb9\xda\x26\x0f\x99\xc2\x55\x43\x9e\x15\xbc\x5d\x80\x2a\x95\xae\x6c\xa3\x01\xea\xbb\x31\x99\xa6\xf9\x6c\x34\x76\xa7\xa9\xd1\xbf\x52\x3e\x98\x4e\x3f\x8f\x89\xce\x90\x6a\x6e\x3c\x87\xaf\x35\xba\xbb\x16\x42\x7b\xc0\xd8\xeb\x6d\xa9\x7d\x08\xf5\x87\xd1\xea\x03\x13\x78\x94\x43\x37\x72\xb1\x56\x8f\xb2\x2f\x86\x57\x6d\x4a\x4b\x85\x57\x21\xe9\x91\xb5\x47\x1f\x19\x20\xb1\x49\xf9\xf3\x23\x93\x6d\x12\xa2\x2f\x4c\xaf\x7b\x5b\xcb\x9e\xbf\xc0\x4f\xdc\x43\x33\xfb\x5e\x12\x25\xf6\x9d\xb9\xf0\x68\xc4\xd0\xd8\x1b\xfd\xfb\x8e\xa0\x9d\x4f\x0a\x6c\x77\x08\x4c\x19\x02\x4b\xf8\x94\xbe\xf5\x7b\x4f\xa7\x01\x63\x5c\xf7\x2f\xdd\x9f\xfe\xaf\x14\x0e\x7d\x87\xb3\x5b\x46\x80\xb2\x6e\x4f\x11\x50\x71\x8f\xa1\x84\x79\xeb\xd1\xb2\x1f\x76\xbb\x2c\xdf\x99\xdd\x42\x76\x4b\xfc\xf6\x68\x35\xe0\xd4\x89\x68\x4f\x91\xbf\xcd\xf8\x26\xb2\x0f\xac\x2c\x58\x75\x49\xcb\x53\xd1\x24\x63\xb6\x19\x62\xe3\x5f\x68\x58\x67\xb4\x50\xe2\xa9\xa3\x74\xe6\x14\xfa\xc1\xec\xdc\xf5\x95\x22\xcc\x75\xfd\xa1\xce\x13\x5c\x86\xf6\xd1\xec\xae\x10\xee\xd2\x3d\x1c\xe4\x76\x1c\x0a\xe6\x58\xda\x9e\x56\x8b\xe5\xbd\x90\xf4\xfb\x2c\xa3\x2d\x12\xda\x4f\xb2\x9d\x5a\x5b\xb1\x60\xa5\xe4\x5e\xa2\x0d\xfc\xca\xd1\x46\x1e\x97\xb3\x36\x87\xd9\x61\xfe\x27\x05\xae\x1d\xe6\x7b\xb4\x6d\x38\x17\x5f\x6a\xf8\x13\xad\xbd\x32\xd3\x78\x77\xb9\x04\x7b\x60\x2e\x7d\x98\xe6\xde\xc4\x0b\xdc\x75\x5a\x91\x15\xcb\x9f\x51\x56\x4b\xb9\xb1\x25\x0b\x9d\x16\xf7\xb5\x93\x5c\x8e\x9d\x26\xf4\x0f\x01\xd8\xd8\x91\xe8\x09\xc0\xda\x24\x29\x1e\x60\x3e\xae\x06\xf2\x59\x52\xaf\xc5\x79\x6f\xd7\x0d\x8a\xf6\xcd\x76\xf6\xa6\x37\x80\x20\x3f\x33\x71\xb8\x29\xfd\xbd\x79\x68\xee\xa6\xf3\xaf\x44\x0b\xd1\x64\x6f\x45\x89\x9f\xca\x11\x67\x0b\x83\xc9\x2c\xf6\xb2\xa8\x00\xa4\x06\x2c\x4f\x91\xe8\x2b\xe2\x01\x72\x6d\x36\x08\xee\xd3\x09\x3b\xe3\x9b\xf8\xf2\xfe\x31\x84\xfc\x7b\x3d\xc6\x11\x53\xa3\x7d\xdb\x88\xe9\xc6\xcb\x2e\xfd\x65\x73\x5b\x0e\xf9\x37\xf6\xb3\xb7\x64\x9c\x6a\x0a\xfb\xbf\xb4\x4b\x26\x5d\xa2\x07\x32\x42\x70\x3f\x84\xa5\x16\x3f\x58\x94\x50\x19\x40\x8e\x24\x53\x25\x8d\xd4\xab\x9e\x21\xdc\xed\x23\x17\xf6\x54\x1d\xb0\x4a\x35\x3a\x90\x0b\x56\xe4\x5d\x6a\x79\x48\x7c\xca\xdf\xcd\x8c\x76\xb3\x10\x35\x86\x3c\xeb\xc5\x23\x83\x77\x0a\xaf\x2a\x0c\xd3\xf3\xdb\xa4\x32\xf1\x2d\xd0\xd6\x5e\x63\xe0\xce\xd1\xbb\xc6\xed\x7d\x5f\x3b\xdc\xbd\x9e\x58\xfe\x7e\x3d\x90\x94\xf4\xbd\xca\xc1\xaf\x53\xa1\x42\x53\xf1\xab\x50\x42\x8c\xe4\xa0\x6c\xad\x20\x12\x92\xe2\x7f\x54\x8e\x99\x74\x79\x94\x30\x4f\x81\xf2\x05\xcb\x2c\xe4\xbe\xce\xc5\x41\x8b\x59\xeb\x5b\x7f\xfc\x41\xf6\xdb\x8d\x6d\xee\xfb\x3d\x6c\x13\xc7\xc7\xe3\xc2\x9f\x06\xd1\x11\x22\x6c\xfc\xd5\x72\xa9\x13\xe5\x89\x35\xa5\x4f\xf9\xcc\xb1\xe3\x77\x7a\x5c\x74\x5a\xc8\x20\x3a\x8b\x1e\xb7\x50\x40\xde\xc4\xa5\xe0\x78\xa7\xad\x52\x41\x01\x59\x5d\x2c\x8a\x58\x4e\xda\xb0\x29\x2f\xd7\x28\x74\xcb\x3d\xd5\x3b\xe8\x3a\xcf\xc5\x24\xcb\xa9\xd4\xda\x7c\x52\x20\x80\x74\x5c\xf4\xb5\x37\x8f\x14\x25\xd9\x63\x6b\xc7\x17\x18\x4b\x55\xf9\x80\xa6\xfc\x01\x41\x55\x2b\xbb\x4a\x8d\xf4\xad\xba\x54\x90\xfa\x58\x76\x93\x21\xb2\xc7\x55\x58\x40\xd0\x6c\x0b\x78\x80\xa4\x74\x58\xa2\x70\x17\x80\xb9\xc3\x0f\x83\x82\x01\x4f\xbf\x96\xf5\x02\x6a\xa7\xbc\xfc\x96\x6d\x2f\x20\xdd\x58\xd3\xf6\xf0\x9f\xc2\x09\x06\xb8\x15\xd6\x8b\xc9\x79\x0b\xcf\x91\x7d\x09\x5e\xfd\xae\xf2\x1f\x72\x8e\x55\xd8\x99\x1e\x6f\x6d\x40\xd8\x4b\xb2\x7e\xa1\x0c\xde\x2e\xce\xb0\xb4\x22\x21\x2d\xfe\x27\x67\xee\xc7\xe3\xb4\x2b\x74\x86\x34\x9a\x3e\x97\xa1\x7c\x42\x9a\xd7\x94\xe5\xdd\x7c\xbb\xb2\xf7\x6d\xc2\xa9\xbc\x7e\xb7\x6c\x41\xf9\xb7\xf8\x84\x5e\xff\x64\x5f\x9d\x2e\x40\xf0\x52\x11\xd3\x02\xfb\xa1\xc6\x3f\x8a\xfc\x47\x36\x38\xe1\x83\xca\x46\x79\xff\x61\x94\x4b\xa9\x00\x3a\x3b\x4e\x9c\xc1\xd8\x9b\x2b\x7b\xaf\xfc\x2d\x6a\x4f\x81\x9a\x76\x63\x13\x3f\x42\x1c\x7b\xe7\xf2\x7e\x90\x95\x65\x64\xfd\xff\x60\xac\x00\xb9\x3d\x03\x1c\x5b\xca\xf5\x00\x3d\x54\x99\x50\x62\x68\x29\x11\x1f\x77\x87\x64\x69\xa9\x70\x19\x0f\xe8\x2f\x88\x79\x7c\x12\xa5\x0e\xf6\x9b\x0c\xa9\xcd\xd8\x5c\x99\xee\xf1\xed\x88\x6a\xf7\xe7\x2b\xeb\x0f\xe2\x5c\x79\x57\x51\x33\xd8\xdf\xa6\x1c\x93\x7b\x2d\x11\x72\xae\x30\xc6\x15\x2a\xf1\x50\xe0\x56\x96\x4f\xc2\xa4\xd5\x2c\xca\xe1\xad\x13\x7d\x75\xdf\x86\x1a\x09\x21\xa1\x43\x22\x85\xfe\xbd\x29\x3d\x4e\x43\x6a\x03\x28\x3b\xc6\x24\x74\x1d\xdb\x45\x78\x69\x7c\xb1\x6a\xe5\xbf\xa3\x5a\xd8\x21\xea\xab\x84\x8f\xb8\x8d\xda\xe1\xbf\xa1\x82\x70\xef\x9e\xbc\x79\x66\x11\xd8\xc4\x3d\x25\xd9\xb2\xe0\x27\x20\xc0\xf1\x24\x09\x5f\x0c\x55\x10\x2d\x61\x41\x04\xcc\x8e\x28\xe0\xb9\xb0\x6e\xc2\x5f\xf5\xed\x4d\xf5\x09\x7b\x6b\x35\xe4\x4f\x01\x64\xcd\xc9\x28\xd9\xa4\xfc\x37\x4b\x6e\x23\x5b\x93\x79\xfc\x2b\xfc\x04\xe6\x0f\x51\x5b\xf4\xd0\xca\x61\x5d\xd0\x36\xda\xa8\x94\x48\xeb\x55\x1e\xa0\x92\xf3\xca\xee\x9a\x65\xdb\x79\xa8\xe5\x22\x63\x38\x5d\x56\x98\x8c\xc2\x9a\xc2\x11\x0a\x90\x8d\xaa\x93\x90\xee\x21\x7e\x7d\xbc\x84\xa5\x62\x5e\x33\x84\x99\x29\x30\x1a\x04\x2d\xdc\x84\x8d\x55\x9d\x70\x3e\x10\x65\x82\x77\xfd\xcc\x1d\x55\x47\x9f\x1a\xc0\xb1\xcd\x6e\x89\x62\xc0\x5e\xc3\x73\xb3\xbc\x3c\xfe\xf6\x51\x73\xea\x71\x19\xc6\x66\xb4\xee\x25\xe4\xd4\xaa\x48\x76\x95\x3b\x21\x7a\x75\xf9\xc7\x58\x63\xcd\x97\x0f\xd7\x3f\xf8\x40\x98\x6c\x5f\x26\x1d\x01\x0c\x61\xe3\x7e\x6e\xfa\xd6\x85\x07\xd4\x64\x88\x85\x18\xce\xd8\xb8\xbc\x66\xbf\x70\xac\x6a\xaa\x33\xe1\x2f\x85\x62\xc4\x64\x60\x60\x6f\xb5\xc4\x88\x4d\x35\x8c\x18\xe6\xf0\xe7\x25\x44\xa1\x0a\xbb\xa9\xeb\x7e\x33\x62\xc4\x48\x11\x87\x01\x08\x3c\x3b\x2d\x7a\x9c\x41\x55\xe7\x4a\x75\x42\x34\x1b\x1f\xb9\xeb\xd2\xcc\x4b\xec\x21\x7b\x02\x90\xb2\xd9\x5b\x37\x17\x2e\xa4\x30\x33\xcc\xd4\xb4\xe8\x4e\x75\xba\xd9\xc0\x73\x27\x5c\x31\x09\xfe\x14\x2c\xf9\xfe\x8f\xaa\x2b\xcb\x52\x5d\x47\xb6\x73\xe1\xe7\xfc\xbc\x49\xc9\x0d\xc6\x89\x6d\xb9\xdc\x40\x92\xa3\x7f\xda\x4d\xc8\xdc\x5b\xb5\x16\x32\x09\x1c\x1a\x5b\x52\xc4\xee\xa0\x87\x0a\x6f\x07\xfe\x60\xf7\x51\x0b\x66\x30\x43\xd4\x18\x17\x05\xe6\x8e\xf5\xef\xc5\x13\x5b\xd5\x82\x31\xad\x52\x5c\x83\x91\xe5\x1a\x21\x08\xb5\xb5\x50\xed\x7f\x99\xf7\x2f\xde\x48\xae\x86\x95\xe8\x98\xf7\xe1\xfb\x3d\xb2\x61\x07\x1f\x83\x66\xaa\xbe\x3f\xec\x57\xc5\xd2\xd3\x2f\x6c\xee\xf3\xe1\xa5\x58\xfc\xb8\xb4\xef\xe7\xce\x36\xe2\x34\xba\x8a\x46\x7a\xa9\xff\xbb\xa0\x9e\x8c\x8e\xde\x2d\x5b\x99\x6f\x5f\xab\xcf\x82\x8e\x2e\xb7\xee\xa7\x15\x8f\x65\x25\xdc\x6c\x1a\xf1\xb6\x36\x2e\xa3\x65\x26\x2a\x2e\x9e\xbc\x58\x3c\x47\x0b\x1a\x57\x16\x12\x5d\x54\x5e\x2e\x33\x4f\xe2\x40\xe9\xe3\xf5\x28\x70\x8a\x6e\x6c\x1a\x3d\x1b\x4e\x60\xf6\x2a\xaf\x7e\x48\x1d\x49\x0b\xea\xa3\xf3\x14\x0e\xf7\xad\x7b\xe0\xff\x61\xdf\x77\x85\x93\x5f\xee\x14\xe9\x78\x34\x6e\xc2\x27\x8b\x13\x25\xc7\xe3\x7d\xae\x4a\x4e\x7b\xaa\xb5\xee\x64\x81\xdf\x12\xc5\x04\xe4\x1d\xbc\x75\x93\xb0\xdd\x02\xd5\x6e\xcb\xf4\x67\x3e\xcb\x3e\xc3\xfc\xb2\x6c\xf1\xa4\x9d\xdb\x0e\xe0\x3d\x87\xe9\x29\xeb\x47\xec\xbd\x6a\xd8\xe5\x67\x2f\x90\x7c\x5c\x21\x27\x58\x80\xda\xa0\x02\xd7\x28\xe0\x36\xdb\x2b\x37\x9e\x84\x54\x51\xf3\x89\xcb\x69\x34\x2f\x5f\x35\xcf\x78\x15\x3c\x67\xf5\x3d\x07\x71\x26\x02\xd8\xd2\x21\xae\x4f\x9b\x2f\x95\x69\x9b\x1f\x14\x37\xde\x54\xee\xd8\xbd\xe2\x8a\xce\xa5\xef\xc5\x7c\x51\x26\xda\xc7\xb9\xae\x4a\x76\x75\x38\x6a\xb9\x7d\x96\xb9\xa4\x63\x63\x8f\x9d\xa7\x88\x72\x2b\xdb\x00\xb8\x37\x39\x83\xaf\xec\x15\x68\xbd\xb7\x8b\x56\x8c\x6c\xc0\xa1\x3a\x9b\x36\x6a\x24\x97\x52\x27\xc9\x7f\xb9\x81\x88\x7f\x8e\xea\x25\x47\x00\xac\x28\x99\x94\xeb\x55\x33\x0c\xcc\x4c\x15\x23\x2e\x77\x3c\x5d\xfd\x64\x01\x00\x76\x04\xdb\x80\x9c\x6b\xa4\xd7\xe8\x1f\xe9\x35\xba\x7c\x6e\xca\xee\x21\x99\x9d\xb3\xb9\xfa\x59\x9e\x30\x9e\xd2\x50\x15\x47\xd9\x0e\xb3\xdc\xb0\x4d\xaa\xfd\xd9\xb7\xdd\x0a\x24\x9a\xa6\x57\x17\x52\x34\x07\x1d\xbc\xfe\x14\x1e\x29\xab\x76\x1a\x8d\xdd\x27\x6d\x9a\x52\x57\x66\x7d\xf7\xef\x5b\xc1\x51\x2a\x68\xa8\xe5\x74\xff\x07\x56\xa7\x2e\x83\x1a\x7d\xc8\xbf\x48\x07\x2e\x53\x0e\xdf\xd2\x5f\xf0\x50\xfe\xcc\xa9\xf8\x9c\xf6\x8b\xfc\x60\xaf\x16\xce\x43\x9f\xbc\x79\x13\x07\x4d\x13\x6e\x40\x60\xd7\x40\x22\xba\xb7\x3b\x76\xef\xc7\x67\xf9\xc7\x3f\x00\x26\xe0\xbf\x51\xaa\xa0\xa5\x72\x6a\x4c\x28\xd6\xb0\x14\x58\xb0\xe9\xbf\x5d\xa6\x65\x37\x74\xff\x69\xf4\x7f\x63\x65\x03\xd7\xbd\xcb\xa7\xcc\xa6\x14\xae\x69\xc2\x04\x6a\x09\x03\xd4\x55\x94\x70\x40\x9f\x59\xcf\xb7\x8b\xdd\x52\x5f\xa2\x8d\xf8\xf6\xb2\xe9\x10\x6d\xe7\x9c\x73\xd4\x3b\x3c\x75\x3d\x5e\xf6\x0a\x15\x1f\xe8\x9b\x38\x27\x1f\x78\xbf\x12\xe4\xc6\x59\x38\xc9\x31\x72\x71\x2a\xc5\xcf\x36\x88\x0e\xc8\xee\xe1\x2e\x7e\x02\xc6\xaa\x6f\xca\x35\x7e\xaa\xa8\x71\x50\xee\x4d\x42\xbd\xa3\xee\x2e\x60\x80\x06\x8e\xd4\x8d\x88\x42\xb8\xab\x95\xf3\xdd\x7f\x96\x73\xd7\x7e\x2d\xa9\x65\x77\x77\x8f\x94\xb9\x93\x51\xa6\x2e\x56\xce\x80\xa0\x1d\x46\xbd\xd5\x83\xf1\xf8\x89\x94\x54\xc0\x66\xbe\xfd\x26\xee\x54\xe6\x3a\x31\x87\xaf\x8d\x2e\xe0\xae\xe3\xf2\xf1\xd8\x02\x8b\xa8\xb1\xb9\x6b\xdf\x5e\xa6\x80\x7b\xd9\xc9\xb6\x61\x4a\x3f\x4d\xf9\xef\x8f\xa4\xe3\x0c\x20\x4e\x95\x44\x96\xd3\xd9\x02\xd1\x26\xff\xbd\x19\x3e\x5a\xa0\xbe\xf3\x7d\xb8\x17\x55\x7e\x06\x93\x8e\xf8\x5e\xcb\x43\x93\x7d\x22\xb0\x2d\x88\xbc\x5e\xa8\x48\x6a\x82\xbe\xb9\xb8\xc4\x23\x44\x41\x77\x35\xb3\x56\x1b\xb5\x47\x5f\x9e\xa1\x37\xd7\xbf\x22\xa7\x1e\x96\x89\xe6\x29\x4d\x55\xef\x01\x8a\x52\x74\xaf\x41\x8a\x34\x3f\x19\xc1\xfc\x4d\x13\xc3\x9c\x5d\xd5\x6c\xfd\x5b\x65\x20\xf3\xfa\x54\xf4\x80\x8d\xed\xef\xa4\x4d\x76\x0e\xd9\xc1\xd8\xf2\x15\xb1\x13\x45\x53\xac\xff\xd2\xe1\x7a\x70\x05\x54\x0e\x48\x7c\xd9\xcb\x14\xb5\xcf\x0e\xf5\x0b\x4a\x3d\x09\x4e\xad\xd1\x0d\x48\xe2\xa3\xce\x79\x29\xc1\x44\x54\xa6\x5a\xfc\x84\x37\x2f\xeb\xa0\x64\x47\x92\x52\xa2\xf6\x2e\x87\x18\xac\x15\xd4\x19\x64\x8a\x6e\xd5\xca\x0a\x05\x8f\xe6\x23\x44\xef\xc8\x5c\xce\x8f\xd5\x59\x0c\x45\x11\x7f\x11\x44\x01\xaa\x28\x08\xa1\x8b\x9c\xde\x9a\x64\xe2\x2d\x73\x83\xbf\xdd\x46\xd8\xa2\xa8\xd9\xc1\x53\x1f\xad\x2f\x6c\x20\x22\x08\xde\x05\xd3\xbd\x5a\x44\x48\x44\xc8\x7f\x27\x5f\xdb\x9c\xb5\x02\xd7\xab\xfc\xf1\x95\xee\xe3\xe2\xa7\x4c\xff\xb6\x3d\x99\xbc\x7e\x21\x48\x66\x62\x7b\xca\x99\x3f\xaa\x7a\x7c\x32\xda\x84\x84\xf7\xa5\x1a\x04\xb8\x7b\x2b\xc7\x50\xc0\xca\x01\xc8\xe7\xd1\x8d\x31\xa8\xd8\xc9\x18\x0e\x6e\x5a\xbc\xb5\x08\xe6\x8d\x1b\x16\xe8\x0c\x17\x9f\x8b\xa0\x2d\x6d\xb1\x48\x2e\x68\xaf\x8a\xc2\xe5\xc0\x00\x0e\xcb\xa0\x72\x94\x61\xd3\x1d\x46\xf6\x70\x90\x5b\x24\x06\x3c\x8f\x47\x99\xb7\x55\xb5\x94\x5f\x41\xbc\xa2\x1c\x8b\x31\x60\x16\xc3\x2e\xb0\x24\x17\xc9\xbd\x7e\x27\x73\x88\xf6\x09\x16\x88\xbe\x2e\xdc\xa5\x5e\xfc\xb3\x58\x60\x33\xf3\x13\x6e\x02\x63\x54\x50\x85\x9e\x00\x29\xc6\xa4\x14\x6b\x4c\x74\xda\x4f\x4d\xfb\x74\x55\x5f\xf6\xe2\x35\xe1\x16\x0a\x2d\xbd\x1d\xc4\xa1\xbf\x22\xbf\x40\x63\xbf\xf7\xb2\xa3\x73\xf7\x02\x2c\xa8\x48\x1b\x98\x82\xef\xc1\x8e\x97\xcc\xda\xe0\x8e\x67\x0e\xd4\x3d\x1a\x98\x93\x03\xbb\xc3\x98\x6b\x42\x6a\xd8\x56\xdd\x4f\xfa\x90\xb8\x4d\xe5\x2a\xe2\x6b\xf4\x83\x27\x61\xf0\x5d\xc4\x98\xb7\x4a\xe0\x59\x7e\x8f\x56\xf8\xcf\xb3\x2c\xa4\x24\x9e\x3c\x47\xd4\x7c\x18\xa4\x95\x78\xea\x0f\xf8\x08\xf7\x58\xcc\x7e\x4a\x99\xc1\x09\xac\x7c\xb3\xa7\x8b\xaf\x32\xa5\x89\x04\x31\x06\xbb\xd9\xe3\xae\x5a\x99\x2c\x27\x1a\x44\x1a\x3a\x08\xa1\x14\x58\x63\xf8\x93\x8d\xf2\xfa\xbe\xc8\x54\x1d\x10\xf1\x10\x26\xf6\x6d\x60\x3c\xf4\x34\x71\x19\x37\xd1\x33\xe9\x06\xe6\xd3\x2c\x0f\x93\xe8\x90\x51\xf6\x28\xdc\x67\xd7\x4e\xeb\xcf\x05\x1c\x52\xfe\x1f\x7a\x92\x0c\x24\x1f\x59\x6f\xcb\xee\xc1\x5a\x28\xd0\x12\x52\x82\x52\x99\x28\x1e\x26\x71\x3e\xd2\xe7\x3d\x3a\x4b\x69\x8e\x6d\xc4\xa3\xbc\x76\x63\xf2\xd3\xb8\x31\x1e\xef\x26\x32\xbf\x63\x15\x4f\xee\xe2\x91\x77\xa0\xf9\x63\xd8\x46\x1b\x9d\x40\x54\x6d\xe9\x17\xdd\x67\xcb\x76\x9e\xdf\xd5\x30\x41\x3b\x46\xe6\xd4\x14\xb9\xed\xc3\x48\xcd\x17\x5f\x7c\x40\xb4\xa3\x5e\xa2\xff\x0a\x6e\xa2\x61\xca\x76\x33\x0c\x54\xed\x54\x84\x09\x69\x43\x30\xf4\xbd\x52\x1f\x85\x49\xdf\x4f\x43\x44\x0f\xe5\x6d\x7c\x19\xc4\xdd\x41\x6c\x1e\xe9\x83\x70\xe7\x64\x7f\x63\x8d\x88\x96\xab\x7f\x0a\xa4\x58\xee\xba\x8d\xb2\xd0\xb6\x5f\x40\x8e\xe6\x6a\xc0\x82\x0e\x81\x0b\x40\x07\x48\x33\x2f\x78\x57\x01\x18\x66\x7b\xbd\x75\x95\xdb\x95\x20\x00\x5d\x25\xe1\x9c\x90\x2e\x81\x91\xb9\x49\xfc\xc7\xa3\xf2\x29\x6a\x53\x3c\xf2\x1f\x2b\x5f\xca\x85\x5b\x3f\xf0\x9a\x86\xdf\x70\x50\xfe\x43\xb5\xe3\x96\x1a\x2f\xf0\x6b\x4b\xc0\x78\x69\x0b\x26\x3a\x2b\xcc\x15\x03\x19\xb5\xda\x0e\x3a\xb7\xc7\x53\x4d\xf8\x57\x24\xe4\x9a\x44\xf7\x60\x7c\x83\x2f\xfd\x4e\x9d\x8e\xae\x5c\x8c\xbc\xbc\x48\xfa\x0b\x06\x17\x02\x1b\x09\x89\x97\x82\x2e\xd2\x91\x3a\x7e\x2a\x05\x46\xf4\xed\x18\xe1\x55\x09\x11\x25\x1a\xf1\x77\x21\x80\x84\xdb\xed\x72\x6a\x68\x33\x68\x26\xa3\x41\xa1\xb7\xab\x9a\x57\x1f\xa2\xcb\x37\x13\x16\x75\xf0\xe4\xda\xce\x71\x7e\xba\xe3\x86\x42\xea\x4a\xa1\x91\xf5\x71\x77\x85\x3d\xd4\x28\x2b\xbd\xc4\x74\xce\x86\x8e\x22\xe9\x3c\xc9\x71\xa5\x79\x2b\x7a\xae\x9d\xb2\x03\xbc\x18\x5e\xed\xf7\x38\x55\x0b\x89\x52\x5b\xed\x34\x51\x19\xe1\x5e\xb9\x18\x59\x6c\x71\x65\x06\x07\xbd\xfc\xf6\xd2\x6b\x3e\x1c\x8e\xf9\x9f\x7c\x96\x16\x57\x82\xb9\x53\xb1\x6a\xb6\xd0\x91\xf1\x76\xd2\xb5\x83\xfd\x47\x59\x93\x09\xee\x9c\xd3\xd4\xe5\xa1\x4a\x30\x1f\x61\x8d\x92\xfe\xfa\x8a\x2b\xbd\x2f\x6b\xc0\x0d\x28\x37\x06\xd1\x0c\x84\x61\x8a\x65\x98\xe5\x1b\x73\xdb\xb9\xf1\xf7\x25\x31\xe6\x19\x9f\xae\x99\xaa\xc6\xa8\x0c\x53\xf0\x8a\x3c\xe6\x13\xfa\xc9\x02\xae\xb2\xef\x5a\x3d\x88\x56\x80\xdc\x9a\x07\x13\xb0\xb0\x9f\xd2\x45\x9c\xde\xa2\x36\x24\xa5\x01\x90\x7c\x65\xe6\xd5\x5e\xab\xc8\x04\xae\x4c\x1a\x94\x7b\x59\x76\x19\xbc\x5d\xe5\x6b\x23\x42\x56\xdf\x76\xca\x87\x82\x7f\x1e\x7f\x37\x32\xae\x2e\x07\x15\xed\x24\xd2\x44\x12\x33\x46\xc3\x80\xc8\xef\x80\x96\xee\x12\xeb\xc2\x69\x81\xb1\xfd\x41\xff\x6a\x05\x24\x56\xf1\x85\xb3\x29\xf8\x4f\x7d\xd2\xd3\xdf\x0d\xa1\x28\x41\xf1\x65\x69\xc2\x0f\x1f\xb9\x13\xe5\xdf\xeb\xe5\x1c\x5d\x2e\x48\x07\x70\x5a\x3e\xfb\x1e\x87\xf0\x36\xcc\x79\x77\x46\xf7\xfe\x34\x71\x0b\x36\x23\x50\x3f\xf2\xe0\xb8\x92\xb8\x60\x04\xa2\x91\x14\x52\xac\xd3\xf2\xe4\x80\x2e\xb9\x69\xc3\x48\xc5\xa8\xf3\x6b\x34\x0d\xbe\xd4\x72\x46\xa6\x94\xe1\xc3\x82\x6c\xd7\x6f\x78\x46\x57\xed\x24\x2d\xe5\xa6\x04\x8b\x5d\x05\x59\xad\xa3\x4a\x79\x07\xaa\xa9\x86\x22\x2d\xfa\x62\x2e\x85\xdd\xb9\x84\xec\xea\x5c\xa0\x9a\x8a\x6e\x3f\x2b\xbc\x97\x7c\x70\xf9\xb3\x1c\xa7\xe2\xe8\xbe\xbc\x9f\xc1\x00\xdb\x1d\x92\x14\x32\x0e\x95\x72\xbf\x63\x1b\x96\x7a\x47\x2e\x97\xa3\x0a\xc3\x52\x35\x64\xbd\x3b\x07\x7b\xfa\xee\x7c\x3c\x92\xe3\x2f\xa2\xc0\xae\x91\x73\xc8\x5d\x1e\x47\x71\xd0\xca\x9e\x42\xfd\x1c\x94\x84\x55\x4a\x82\x9e\x07\xfd\x75\x51\xff\x41\x4f\xa8\x8a\xaf\x1a\xb5\xfc\xfb\x3a\xae\xe6\xbf\x40\xc3\xd0\xd9\xd2\xdf\x3a\x3f\xa6\x7d\x2c\x5c\xc9\xc2\xdc\xc5\x15\x41\x19\x4d\xff\x84\x2f\x80\x87\xa6\x2a\x35\x79\x65\x25\x88\xa8\xba\xe2\x33\x37\xa3\x4e\xe6\xdd\x39\x20\xd0\xa0\x64\xe7\xac\x81\x93\xd6\xff\xef\xfc\xd2\xa3\xac\xce\xe9\xde\xa3\x42\xe6\x38\x3a\x95\x65\x69\x6f\x03\xe6\x22\x51\x0f\xf1\xcf\x81\x8e\xc1\x89\xdb\xf8\xd8\xb8\x54\xc1\xac\x48\x99\xc2\xbc\x22\x20\x95\x16\xe4\xbb\x9a\xcb\x3c\x74\x34\x2c\x9c\x2d\x2d\x68\x59\xf2\x1b\xdd\x55\xbe\xde\x04\x1c\xd5\x56\x44\x3b\xf7\xf9\x8f\xe4\xc5\x7f\x37\x78\x8c\x5d\x96\xca\xc1\x43\x39\xc8\xe6\xa2\xe5\xf7\xe0\x4f\x4d\xfc\xcb\x6f\xe4\x51\x76\xc8\xab\xaa\xc6\xb4\x3d\x11\x74\xa3\xb1\x7a\x19\x3b\x14\xb0\x29\x40\xac\x74\x19\x5f\x7e\x59\xc9\x24\x1d\xab\x84\x71\xe5\x57\x56\xd2\xf8\xd6\xdb\x70\x9b\x91\x8f\xdc\x47\x43\xc4\xcf\xf1\xf3\x26\x27\x76\xef\xe0\xcf\x87\x1d\xef\x66\x44\x67\x33\x0b\xed\x94\xe3\xd1\x96\x8f\xb0\x7f\xf9\x3c\x7f\x4a\x4d\x26\xda\x9a\xd3\x97\xe1\xf1\x8d\xb2\xce\x60\xd8\x75\x19\x61\x37\xa2\x77\xa4\x3f\x7d\x87\x6f\x5f\x16\x34\x2a\x12\xe7\x64\xb7\x9a\x49\x21\xdd\xaa\x17\x7f\xf2\x68\xc8\xec\x21\x67\xad\x8b\x45\x17\xf5\xa3\x0e\x9e\xc1\x70\x44\x0e\x8a\x19\x74\xcd\x39\x4e\x1e\x01\x2b\xe4\xa8\x72\x61\xe3\x28\x28\xb0\x1b\x8b\xee\x7d\xf1\xdb\x08\x9f\x73\xb4\xa6\x49\x02\x29\x95\x26\x2d\x69\xb6\xfc\xd6\x59\x85\xf2\xf1\x26\x19\x8d\xea\xb7\xcd\x3e\x0d\x2b\xfc\x35\xbf\xf2\x60\x53\x35\x15\x2f\xe5\x89\x46\x8a\x1e\x58\x83\x93\xba\x56\x67\xc2\x15\xa0\x84\x98\x75\xa8\xfd\x84\xb9\x31\x5c\x3e\x38\x22\x6b\xaf\x69\x67\xed\x9f\x9c\xfe\xd6\x8a\xf4\xaf\x0c\x6b\xbc\xa9\x92\x74\x58\x3d\xdc\xce\x76\x87\xac\x84\x1e\xa0\x8c\xd2\xbf\x28\x33\x05\xad\x89\xf5\x57\x36\x7e\xe1\x00\xab\x72\x53\xbf\x62\x76\xec\x2e\xe4\x93\xd4\x2f\xa0\x4c\x74\xe2\xca\xfd\x4e\x28\xae\x81\x48\x52\x5f\xfb\x92\xd5\x8c\xe2\x90\xff\x71\xb4\x44\xce\xc5\x4d\x46\x97\xfc\x2c\x0b\xca\x47\x3e\xb2\x17\x32\xd7\x0f\x53\x8d\xdf\x5f\xd2\x89\x4e\x88\x2b\xb2\xd8\x72\x97\xc1\x85\xc6\xa9\xab\x34\x9f\x4b\xf0\xd4\xe7\x6b\xab\x35\xc7\x96\x80\x85\xaa\x40\x9b\x19\x16\x31\xab\xc8\xa7\x40\xef\xca\x94\x76\xaa\x7c\x9d\xed\xc1\x39\xbb\x80\x04\x50\x70\x0a\xb5\xeb\x7b\xd7\x9f\xe9\x77\x9c\x6d\xa3\x5e\xce\x9b\xde\xf1\xf8\xf0\xaf\xae\xe6\x98\x60\x7a\xf1\xbd\xd1\x3b\x71\xbc\xa9\x94\x74\x01\xaa\x30\x9b\x29\xbf\x51\x2a\xb2\xfe\x83\x01\xc5\xac\xe0\x12\xa4\xd8\x08\xa3\x7b\x29\x39\x6f\x54\xb5\x5b\x36\x63\x60\xcd\xd5\xd2\xb1\x5a\xa1\x97\x22\x83\x95\x62\xd4\xa1\xe5\xd6\x5c\xe6\xa9\xaf\x19\xb7\xb0\x27\xc6\x87\x20\xca\x17\xce\xea\x61\x54\x26\xe7\x74\xdd\xf5\x65\x58\xf9\x04\x09\x93\x83\xa4\x45\xe5\x59\x96\x29\x2a\x72\xf2\x16\x8e\x10\x3f\xb0\x34\xa8\xa0\x60\x60\x82\xb3\xb3\xb7\xc6\x8d\x86\xfe\xb5\x8d\x0a\x8b\xf5\x26\xa0\xbe\x2f\x6f\x61\xe5\xef\x6f\x75\x24\xcc\xb0\x7a\x18\xfe\xe7\xa8\x0d\x60\x50\x56\x0e\x84\x08\x43\x89\x03\x38\x83\x65\x1f\x7e\x7d\x62\x79\x19\x1b\x7f\xfd\x7e\x90\x3d\x6a\x76\x7c\xc0\xcd\xf5\x46\xc6\xdf\x93\x37\x50\x84\x1e\xc1\x02\x2c\xbb\xfe\x80\xbc\x87\x2f\x8b\x5e\x8d\x99\xc5\x58\xef\xc8\xb3\x8b\xc2\x71\x75\x71\x68\xd9\xf1\x00\x59\x73\x8d\xe6\x4f\x72\x7d\xa0\xc1\x8d\xaa\xbb\x49\x5c\x9a\x3b\xed\x5f\x54\x74\x9d\x9e\x67\xee\xdb\x49\xf5\x57\xa9\xf1\x9a\xa8\xb7\xee\x5b\x7f\x76\xda\x7f\xde\x69\xe6\xa6\x99\x9b\x05\xdf\xfe\x70\x0f\xba\x2c\x50\xde\x75\xdd\xc7\x70\x63\xbb\xf7\xb3\xd4\x39\xc0\xe9\xf8\x4a\x97\xa4\x06\xa3\x90\xd2\x94\x0a\x31\xeb\x76\xd4\x85\x20\xf5\x8c\x4d\xaa\xee\xb4\x4a\x74\x6d\x68\x17\x4d\xdd\x3f\x5a\x50\x48\x23\xf8\x18\xb6\x95\xb0\x89\x88\x60\x41\x19\xa5\x80\xc4\x9e\x74\xd0\xef\xcd\x23\x6c\x46\x98\x04\x91\xa6\xb8\x4b\xed\x2a\x4b\x11\xb1\xd7\x72\xdb\xa0\x47\x7a\x88\xcb\x42\x83\x07\x14\xdd\x84\x02\x5d\x68\xa2\x79\x8c\x0b\x6c\xcc\x49\x9d\xfb\x2f\x66\xa8\x82\x93\xde\x9c\x7c\x52\x17\x16\x77\xa5\x72\xb8\x5f\x16\x93\x9c\xec\x54\x6f\x46\x86\x02\x81\x43\x90\x5b\x6e\x36\xe0\x74\x1d\xb9\x56\x07\x24\x98\xce\x27\xc7\x01\xa2\x7f\xa9\xd2\x12\x29\x18\x9e\x7f\x83\x9d\x18\x10\x1f\x20\xc9\xab\x25\x20\x36\xa2\xc5\x0f\x5d\x4f\xd5\x14\x47\xcd\x89\xb6\x8b\x5e\xa0\x94\x16\x61\xc8\x29\x16\x54\x87\x2f\x9d\x58\xde\x29\xfb\x40\x10\xdb\x65\xc0\x09\x0b\x7a\xc1\x99\x48\x5b\x58\x3d\x02\xd9\x45\x65\xe0\xb9\x1d\x51\xc3\x29\xaf\x27\xd4\xf0\x8e\x8e\x12\x6b\xb1\xd6\x8f\x08\x92\x6a\x2f\x42\x23\xb5\xa3\xef\x3c\xa7\xb0\xe4\x1c\xab\x0c\x94\xd9\x82\xbc\x4a\xf7\x38\xea\xfa\x2b\x9e\xb0\xcd\xdf\xde\x9d\xe5\x6b\x0d\x2b\x22\xf4\x01\x96\xce\x90\xdf\x17\xe8\x6f\xff\x7b\xb1\x2f\x31\x39\xf3\x76\x12\x89\xd4\xc5\x6d\xef\x3e\x3d\x44\x43\x21\x1f\x1a\x2f\x9f\xad\xf6\x41\x95\xb1\x6d\x83\xdc\x53\xa2\x59\x50\xd8\x7b\x66\x73\x1e\xcb\xa7\x02\xe1\xe5\xc6\x22\xd5\xd6\x9c\x2d\xce\xb1\x2b\xf4\xa1\xbc\xa8\xb3\xd6\x9b\x0d\xff\x1a\xa1\x36\x78\xe6\x6e\xc9\x65\x67\xd7\xb3\x65\xa3\x12\x93\x83\x84\x68\x6f\xb9\xfd\x4c\xe1\x1b\xdf\x8c\xd2\xc1\x37\xea\x39\x34\xb2\xed\x6b\x30\xc7\x23\xe0\xc7\x32\x1f\x86\x34\x0b\xff\x4b\x16\x1a\xf5\x76\xa9\xa3\x11\x67\x1d\x6c\x66\x81\x20\x1b\x71\xad\xfe\x14\xf0\xe3\x8c\x9f\x02\xa4\x99\x5d\x57\x04\x9c\x14\x3e\xc7\x63\x16\x3e\xe8\xb3\x95\x31\x8d\x7e\x9a\xd5\xfb\xc2\x01\xcb\x16\xec\xf6\xed\xdb\x69\x85\x0e\xa4\x65\xf0\xc9\x67\x61\x5a\x3d\xd6\xc9\x70\x79\xf4\xfa\x3e\x59\xa6\xd6\x8e\x7b\xc2\x02\xa6\x7f\x7f\xe0\x09\xa4\x97\x67\x8a\xa2\xf5\x46\x5d\x5e\x6b\x31\xca\xef\x58\x90\x62\x58\x07\xa0\xa3\xc0\x66\x5f\x4a\x49\x90\xe4\xfb\x9a\x8d\xd1\x37\x25\xfc\x81\xc1\x18\xa2\x7f\x69\x5f\x6e\x12\x12\x99\x2e\x69\xb3\x47\x82\x86\x7e\x6e\xf5\xeb\x78\x83\x23\x3e\xc7\x40\x36\x2a\xef\xf4\xeb\x32\xb3\xa9\x8b\x2e\x6d\x01\x38\x55\xf1\x1a\x30\x28\xce\xaa\xb4\xfa\x0d\x96\x0b\x9e\xbb\xc0\x52\x96\x66\x97\xa7\x63\xe7\x40\x69\xbb\x6a\x94\xa2\x54\xae\xcd\x52\x1e\x55\xb7\x10\x06\xeb\x6b\xb0\xb7\xe1\x05\x8b\x22\xf5\xae\x9a\xb4\x1a\x66\x23\x07\x09\xf3\x68\x64\x94\x92\x6c\x39\x44\x34\x34\xd6\x2e\x3b\x15\xb4\xec\x7c\x82\x76\xe9\x98\xdc\xa3\x76\x3f\xb0\x24\xdf\x2a\x13\x53\x23\x9f\xa5\x48\x44\x98\xc2\x84\xa0\x9c\x82\x29\x52\x1a\x55\xd5\xb2\x36\x45\x55\x2b\x5a\x0b\x86\xe5\x6c\x71\x2c\x01\xd0\xca\x52\x4a\x65\xb1\x14\x71\xb5\x66\xde\xce\x8a\x57\x2a\x2b\xa6\xab\x58\x85\x83\xb2\x16\x37\x5e\x49\x52\xb4\x2a\x57\x86\x0a\x70\x5f\x74\xc8\xe7\xd7\x75\xe7\xea\xc4\x6a\xa5\xac\x62\x92\xdc\x9c\x2f\x19\x76\xc5\xa8\x43\x45\xc7\x0c\x8b\xff\xf7\x55\x42\x6a\x6b\x85\x40\x48\xde\x98\xb1\x54\xea\xcf\xea\x22\xb9\x1f\x1f\xa3\xfe\xca\x72\x0a\x62\x66\x28\x98\x8e\xf1\x12\x47\xd5\x0c\x13\x56\xa1\x99\x97\xb2\x1c\x9b\xf8\x4a\xa4\xe4\xb9\xb4\x34\x31\xf3\x2a\x46\xc1\xe8\x74\xd5\xd9\x22\x6a\x49\x7f\x01\x13\x5d\x40\xa5\x89\xfb\xa8\x40\x5d\x89\xaa\xc9\x50\xea\x93\x9a\xc7\x3d\xd3\xfe\x49\xc3\x70\xb3\x66\x99\x2a\x44\xf1\xb1\xc9\x66\x01\x98\x64\x50\x26\x73\x6f\x7c\xcf\xe8\x1f\x62\xeb\x1f\x1e\xf0\x26\x85\x6a\x62\x87\xd9\xbf\x4d\x4e\xfb\xae\x1a\xd0\x33\xf9\xb2\xd2\x52\x5b\xca\x77\x84\x48\x42\x36\xc8\x51\x3a\x3e\x7c\x4f\x29\x7c\x63\xf7\xf4\x60\x54\x4d\x59\xde\x10\x7d\xfe\xb1\x82\xca\xc0\xdd\xa6\xf9\xb2\xda\x99\x62\x8c\x7e\x6e\x23\xe9\x2f\x63\x34\xf9\x80\x72\x7a\xb4\xb5\x92\x04\x97\xd1\x7c\xcc\xb2\xce\x4d\x53\xd8\x9a\x46\x97\x08\xe1\x51\xbb\x74\x4d\xb0\x31\x75\xcb\x6e\xeb\xef\xa7\x04\x9b\x2e\x1f\x3d\xac\x0b\x20\x8a\xc7\x29\xfe\x0d\x50\xe0\x93\xc1\x49\x68\x2d\x38\x71\x96\x33\xfd\x4f\x94\xf5\xff\x9d\x32\x63\x5c\x1c\x65\xf7\x3f\x40\xe0\x4a\xf4\x3e\xbd\x81\x5b\xad\xbc\x11\x40\x69\xbe\x16\x66\xd4\x73\xda\x2b\x20\x69\xcb\xd2\x8c\xfc\x5b\x8d\x1c\xd3\x0b\xfe\xc9\xcc\x7b\xbc\xf1\x42\xfc\xc1\x18\x19\x9c\x5d\x80\x06\xdf\xe1\x9d\xa6\x27\x04\xeb\x72\xe9\x2d\x1c\x1b\x07\xb9\x75\xae\x8f\xcf\x0e\x55\x43\x08\xb6\xf4\x57\xac\x3c\x6e\x95\xac\xcc\xb8\xd9\x22\x97\xf3\xc3\x05\x53\xb5\xe2\x51\x1d\x43\xe0\xbe\x60\xfe\x24\xba\x98\x5d\x94\x9f\x7c\x35\x54\x8e\x29\xa0\xec\xfc\x0a\xdc\x0b\xa1\x38\xdb\x14\xe3\x64\x32\x76\xd9\x4c\x70\xb2\xcd\xb6\x5a\x95\xde\xcb\xf5\xdf\x29\x94\x6b\xe9\xe1\x0a\xc5\x7f\x6d\xc1\x0e\xbe\xc9\x92\x77\x23\x7c\x99\xdb\xe9\x52\x28\xc6\x95\xb8\x24\x9b\x10\xcd\xce\x18\x97\xd8\x4b\x71\xf3\x3c\x6c\xc6\x88\xc2\x52\xcd\x60\x93\x05\xff\x79\x9c\x9e\xb0\x0e\xd1\x1f\xca\xef\x65\x35\x59\x7f\x6c\x5f\xa4\xa9\xb9\xf7\x42\x0d\x43\x05\x15\x7e\x9f\xe4\x0a\x51\x95\x29\xd4\x08\xc7\xea\x0c\xa5\x99\x04\x6f\xdd\x3f\x9c\xac\xd9\xce\x45\x1a\x14\x38\x94\xc9\x06\xa9\x6c\x35\x16\x74\x36\x0d\x25\x22\xd3\x2e\x9b\xc1\x79\x54\x52\xa7\x73\x14\xda\xec\x04\xd2\x52\x10\xea\x1f\x9e\x70\xb6\x4f\x61\xc9\x52\x76\x1e\x7e\x4a\x99\xa3\x9b\x93\x17\xc1\xb3\x9c\x3a\x42\x89\x9e\xe8\x0a\xf2\x6c\x79\xe6\xcc\xa9\x52\x41\xeb\x18\xa8\x5d\x82\x50\xf5\x8f\x93\xd4\xc7\xf0\x42\xd5\x0c\xf2\x53\x26\xf6\xab\xdd\xf6\x23\x5d\xbd\x1c\xef\x82\xe6\xf9\x9c\x62\xf4\x48\x5f\xa1\x59\x0c\x14\x6d\xed\xb1\x70\x9f\xaa\xb9\xa9\xa4\xfd\x26\xca\xd9\x05\xd5\xcf\x6f\x2d\x17\x1b\x97\xe6\xaa\xfa\x78\x6e\xe9\xc7\xa6\x49\x18\x1a\x23\xae\x32\x25\x6d\x83\x44\x4c\x6c\x69\x1c\xfc\xd3\xde\xf1\xf1\x59\x69\x83\x21\xfe\xe7\x69\x44\x99\xf5\xa2\x71\x47\x53\x56\xaf\xd8\x75\x2d\x67\xcc\x8c\x13\xd1\xef\x91\x9f\xbd\xee\x52\xf7\xe6\x81\xa6\xcd\x23\xb8\xf4\x17\x3c\xa9\x52\x33\x75\x0f\xfa\x6c\xe2\x20\xed\x76\xf4\x78\xd8\x28\x10\xe4\x51\x6f\x58\x6e\x02\x26\xdb\xd3\x7f\x40\x86\x0a\x76\xb5\xa5\x1e\x43\x64\x87\xd4\x67\x3d\x1a\x78\xd2\x92\xa5\x3f\x25\xb2\x71\x2c\xa8\x65\xd0\x1f\x96\x3f\x07\x37\xd8\xa5\x1c\x09\xda\xd8\x82\x3a\x86\xdd\x05\xde\x29\x9c\x9e\xe2\x0c\xe7\x32\xe4\xe3\xdd\xab\x2e\xcd\xda\x3a\x43\x98\xae\xd2\xb2\xff\xbd\x02\x87\xf0\xa3\x1d\xa6\xbc\xde\xd1\x44\xe0\x75\xc5\xc7\x45\x82\x43\xb9\x8a\x59\x0b\x62\x8f\xe5\xba\xf1\xa2\x81\x22\xbd\xfd\xac\x05\x19\x2d\x5a\x4d\x2d\x05\x9d\xf4\x46\x12\x69\x64\xe4\xf7\x8b\xc2\x60\x02\x57\xb4\xda\xb4\x94\x1b\x61\x4c\x66\x67\x05\x31\x45\x33\xbb\xc7\x37\xc2\x89\x0a\x26\xb9\xd1\xae\xd5\xfd\x88\x4e\x06\x47\xe0\xe2\xf3\x16\x7e\x2b\x1f\x0d\xc4\xd1\x56\x96\xea\x3f\x0f\xef\x7a\x54\x56\x8c\x22\x62\xc8\x04\xd3\x23\xd1\xc5\xfe\xab\x91\xc2\xd5\x8d\xbf\xf1\xbc\xd0\xad\x45\x61\xc8\x09\xcf\x7f\x01\x84\x1a\x06\x0d\x8c\x51\xe7\x70\x90\xc2\xb9\xa3\x97\x81\x5e\xa4\x3f\x52\xfb\x98\x6b\x46\x43\x1f\xe1\x09\x1d\x88\x33\xaf\x18\xcf\xb2\x03\x75\xaa\xc3\x74\xc5\xbd\x97\x1a\xb2\xa6\xb9\x76\x97\x6f\x09\x79\xa6\xa7\xc8\xaa\xf1\xc6\xd3\xa7\xfe\x7c\x65\xcc\xfe\x06\x87\xfb\x83\x84\x45\x8e\x41\x28\xe1\x60\xbe\xc8\xb7\xff\x00\xe3\x4e\xd9\x64\xd4\x2a\xec\x95\xd1\x6c\x8c\xc7\x48\x38\x28\xc5\xe7\x5f\x24\xa5\xa1\x0c\xbd\xa9\xf2\x6c\xc7\x29\xaa\xc8\x32\x5f\xbb\x32\x34\xe3\xb4\xb2\x4c\x91\x33\x2e\xbf\x09\x32\x4e\xaf\xca\x8f\xd3\x8d\x46\x94\x4b\xa4\xe3\x2b\xd3\x3e\x58\xa3\x90\xec\x57\x24\x45\x87\x55\x8c\xf7\x48\x49\xa7\x59\x3b\x9d\x55\x63\xd3\x4e\x61\x97\x50\x46\x51\x4e\xce\x86\x49\xed\xb9\x42\x96\xa0\xdd\x26\xb0\x70\x3b\x04\xab\x2c\x92\xff\x3b\x43\x64\x87\xe8\x0b\xb1\xdc\x60\xe3\xa6\x3b\xb1\x37\x8a\x2f\x2a\xb2\x03\x5b\x06\x5d\x9b\xc3\x8a\x3e\xb4\x5d\xdc\xb6\x70\xf3\x03\x4e\xba\x33\xbc\xb2\x4d\xa6\x50\x82\x96\xba\xeb\x43\xa0\x12\x9b\x90\x77\x7a\xab\xc5\x6a\xc0\xab\xb0\x6a\x52\xe5\x8d\xab\x0f\xfd\x21\x1f\x34\x59\x7b\x18\x00\xae\x56\x5f\x20\xe0\x2d\x9d\x48\xec\x10\xce\x5a\x4e\x83\x7b\xda\x9d\x4e\x51\xf3\x25\xcb\xf0\xdf\xc1\x8b\x0a\xa8\xaa\xb9\xab\x27\xce\x49\x45\xd9\x96\x4d\x67\x37\x56\x3d\x9f\xff\x9e\x4c\x56\x6b\x46\x4d\xa8\x1c\x4a\x52\x38\x1a\xe1\x05\x80\x60\x66\x66\x35\xba\x50\x14\x1c\x5b\xd9\xa1\xed\x8b\x02\xd5\x70\x43\x7a\xa9\x5f\x0d\xaf\x8b\x66\xd4\xb5\xc0\x42\xd2\x32\xb1\xb4\x79\xe3\x99\xca\x46\x41\x02\x3e\xd7\xc2\xe5\xb7\x3e\xfa\x1a\x1e\xc0\xe2\x56\x33\x17\x0a\x8b\xf2\xd3\x39\xd6\x5f\xac\x49\xb4\x70\xf7\xde\xbd\xef\x54\xb6\x3b\x9f\x58\x78\x10\x05\xa8\xfd\x54\x9a\xd6\x47\x6a\xfa\xfa\x07\x88\xc0\xc3\xe3\x62\xaa\xe6\x1a\x53\x3f\x3e\x4f\xa5\x63\x74\x2f\xcc\x0c\x9f\x6b\xb8\xc7\xf0\xf2\xc6\x38\xa7\xb0\x89\x67\xa0\xef\xe5\x94\x11\xec\xaf\x3f\xc8\x3f\xcd\xae\x2d\x5b\x17\x56\xb0\x98\xce\x35\x1d\xbe\xe1\x39\x29\x69\xe0\xf1\xb0\x2f\x36\xf5\x80\x86\x66\xcb\x57\x2b\xdb\xf2\xf7\x65\x69\xf9\x06\x9e\x25\x3c\x36\x6c\x57\xdf\xa5\x90\xd2\x45\xaa\xa9\x58\xdf\x0d\xc3\x2d\x8c\xc3\x52\xba\xfa\x4f\xd5\xef\xa0\xb5\xe7\x45\xfa\x0f\x4b\xd4\x5d\xa7\x1d\xd2\x86\x66\x17\xa9\x63\xb9\x4a\x0c\xbc\xfe\x87\x28\x04\xcd\xe0\x32\x5e\x84\xd9\x20\x39\xb8\x93\x83\x99\x25\xc4\x09\xe7\xd2\x6c\xa3\xd5\x1e\x65\xd9\xea\xff\x8c\x35\xbe\xe6\xf2\x16\xf6\x48\x05\x8e\x3c\xde\x03\xa1\x24\xeb\xc3\x49\x76\x5b\xd5\xad\x61\xa6\xee\xa3\xbc\xed\x2a\xe5\x96\x04\x5c\xdf\xef\x9a\x36\x5e\x3f\xc0\x04\x67\xe6\x1e\x65\x5b\x69\x1f\xae\xe3\xa1\x76\xf2\x21\xfe\x43\x29\x96\x89\x78\xec\x67\x18\xe4\x39\x28\x38\xf8\xb3\x6d\xcb\x8b\x45\x07\xcd\x15\x3b\x45\x35\xaa\x6a\xcb\x72\x1a\xbf\xc5\x70\x29\xe3\xe1\x2b\xd5\xe2\x0c\x95\x32\xed\xab\x5c\x88\x2a\xd2\x0e\xbe\xae\xac\xf1\xd6\xc0\xd0\x11\x8d\x59\xad\x3a\xfa\xf0\xc3\xab\x29\x4c\x70\xe8\x18\xeb\x48\xe5\x20\x74\x1a\x9b\xef\x83\x7b\xea\x26\x02\xed\xd4\xcf\x8b\xeb\xd2\x7b\xec\x00\x1c\xa5\xb1\xdb\xcf\x43\x61\x7c\xcb\xb8\xae\xae\x52\xf5\xd9\x59\xa2\xfa\xc3\xc2\xba\x9f\xb7\x65\x5e\x36\x81\x67\x87\x4d\x98\x95\x84\xf2\xff\x00\xdd\x48\x4d\xf2\x7d\xac\xd1\x29\x28\x59\x34\x00\x98\x1f\xb6\x1e\x11\x47\x0d\xf8\xb4\x7f\xc0\x4b\x27\xec\x8b\xcb\x12\x67\x15\x62\xd9\x41\xd6\xd5\x61\xef\xcb\xf6\xdb\xdf\x5a\x1b\xfe\x5b\x20\xd5\xc2\x31\xd8\x2e\x7f\x28\x0a\x67\x7d\x51\x65\x56\x34\x62\x7a\xd8\xa2\x54\xc3\x60\xca\x5e\x41\xc6\xf8\x4a\xd9\x83\xda\xea\x6a\x56\x4a\x5d\x5d\x2f\x5b\x6e\x18\xee\xff\x7b\x53\x89\xbb\x4b\x47\x5e\x76\x76\xae\x2d\x80\x9d\x2a\xd2\xe2\x26\x06\x2d\x53\x92\xa2\xca\x3d\xf2\x97\x6d\x48\x3c\xb7\xec\x0b\x52\xd4\xdd\x40\x58\xe5\xb0\x1d\x72\xc3\xde\x63\x25\x24\x19\xc4\xec\x45\xd5\xfa\x76\x85\x81\x8b\x60\x17\xab\x12\x50\xd7\xa1\x42\xb1\xb1\xb0\x56\x40\x75\x2c\x2f\x6d\xb0\x5e\x69\x1e\x7c\xb3\x65\x41\x49\x53\x18\xa2\x90\x31\x99\xaa\x07\x61\x2b\x6c\x19\x6e\x31\xe4\xbb\x82\x94\x6b\x95\xdf\x9e\xec\x45\x28\x80\x0b\x2f\xfd\x95\xb5\x65\x52\xae\x35\x8b\xb9\xf9\x67\xaa\x2e\x73\x5f\x55\xfa\x02\x43\x3a\xf5\x1a\x60\xfe\x05\x1a\xdb\x9b\xc9\x7a\xfb\x46\x60\x75\xd0\xf6\x9d\x75\x8d\xa7\x73\x9d\x8f\x10\x9d\x40\xf9\xeb\xfa\x95\xb1\xcc\x16\x07\xac\xbd\xce\xe4\x15\xbd\xbe\x87\x06\xc1\xf5\x85\x52\x4f\xb5\xaf\x76\x8f\xa8\x78\xa7\xf8\xec\x04\x4e\x73\x55\x26\x56\x6a\x5f\x39\x3f\x5d\xfc\x36\xbd\x1b\x9f\xcb\x67\x96\xa7\xfb\x12\x7d\x9c\x32\xe8\xb1\xc2\xea\x4e\x32\xbe\x96\xf1\x99\xa7\x84\xef\x70\xa9\xce\x0a\x18\x09\x53\x3c\x2b\xb6\x80\x76\x39\xd2\x46\xcc\xb3\xc5\x56\xda\xff\x4c\xc4\x39\xab\x94\x1d\x27\xfb\x3a\x8f\xbd\xca\x5c\xb8\xe6\xec\xa6\xd3\x96\x12\x26\x2a\xde\x09\xf2\x28\x8f\xbb\x6a\x7e\x34\xc3\x5f\x4e\x95\x08\xf6\xd9\x62\x3d\xcf\xb0\x8f\x77\x91\x04\xef\x93\x6a\x77\x3f\x97\x22\xc8\x90\xda\xfc\xcf\xff\xee\x74\xfe\x9e\xc1\x25\x9e\x62\x37\x55\x06\x69\xb7\x88\xf1\x10\xac\x69\x6e\xc4\x45\x4b\x9b\xca\xea\x31\x3a\xfb\xf9\x5e\xbe\x25\x9e\x45\x53\xef\xd9\x53\xf0\xa7\xee\x4a\xdb\xe2\xc8\x0f\xf0\xa5\x26\x8e\xf6\xbf\x49\x91\x83\xdb\xcb\xd1\x1f\x3c\x6b\xa0\x72\xc4\x2d\x92\x3d\x5c\x90\xbe\x32\xec\x09\x92\xeb\x51\xf4\x75\x4d\x72\x45\xeb\x77\xbe\x29\x18\xba\xf9\xcf\x66\xb5\x94\xba\x17\x60\x39\x54\x51\x23\x0a\xa6\x74\x39\x07\xae\xb2\x85\x37\x7d\xb6\xec\x35\xc2\xeb\x7a\x11\x15\x76\xac\x62\x63\x54\xab\xc3\xf4\x69\x7b\xf5\xea\x1f\x4c\x77\xe4\x00\x09\x2c\x2a\x3e\x4f\xd0\xaa\x3d\x2e\x9b\x05\x49\xb3\x91\x24\xe1\x76\x53\x19\x46\x79\x09\xd1\xb9\x9d\x00\x55\xa4\xda\x62\xa5\x9f\xec\x94\xad\x13\xcb\xc6\x28\xa0\x0d\xdd\x1c\xf3\x11\xf5\xe9\x32\x24\xdd\x37\xdd\x0d\x6c\xb1\xa2\xe1\xed\x50\xb6\xd2\x8f\x7e\x14\x89\x76\x93\xfb\xb5\x43\x4d\x01\x8c\x56\x0b\x39\x1e\xcc\x22\x6b\x0f\x99\xa5\x2d\xef\xce\x11\x30\x02\x8e\x4c\x10\x68\xf3\xa9\x6a\xb7\x5c\x90\x0f\xf5\x26\x08\x8e\xa9\xf6\x2c\x1b\x00\x27\x98\xd9\x41\x50\xb5\xea\x69\xf6\xc3\xbd\x6c\x8b\xcd\xc4\xc5\xde\x77\x15\xe7\x02\x60\x95\x8a\x5b\x1a\xcb\xeb\xe9\x7d\xfd\x8c\xf7\xe0\x4e\xdc\x23\x07\x1b\x01\x81\xf7\xd3\x05\x71\xb5\xde\xbd\xa7\xb2\xff\xe0\x3c\x56\x2a\xd8\x0b\xaf\xc2\xc1\xad\xba\x6b\xeb\xcf\xda\xac\x22\x49\xf0\xe4\xbc\x4d\xad\x64\x04\x0a\xf6\x1b\x3c\x1e\x84\x54\xc2\x03\x5d\x77\xef\xab\x6e\x20\xcb\x73\x9d\x2c\x08\x54\x70\x27\xf2\x84\xac\xaf\xb4\xdc\xab\x7c\xf5\x5f\xb6\x6a\x20\x70\xbf\x5d\x32\x13\x0d\x0d\x92\x2d\xca\xe6\x00\x42\xcb\xa4\x14\x05\x26\x7e\x04\xd7\xbc\x12\x4a\xbe\x97\xc0\x2d\xcb\x10\x53\x9a\x8b\xc9\x2c\xe1\x12\x34\x95\x6f\x9b\xa6\x64\xda\xb9\x08\xa3\x1c\x5f\xe7\x64\xc3\xc1\x9d\xfb\x82\x1a\x5c\xb2\x47\x01\x0c\x51\x95\xf3\x0f\x41\xbd\x85\xfa\xb5\x9a\xba\x6c\x87\x3c\x29\x19\x50\x38\xe6\x8a\xb6\x5c\xc7\x2a\x62\xb1\x02\x04\x03\x57\x5b\x2a\xab\x28\x97\xc8\x42\x59\x46\x15\xb9\xf7\xfe\x7a\x8d\xb2\xc0\xff\x69\x00\x87\x5c\x43\xad\x76\xe1\x10\x9c\xfa\xe9\x30\x5f\xb1\xe4\xfa\x53\xa8\xe1\xb9\x4d\xaa\x6f\xa7\x55\xda\xb3\xd6\xe5\xdd\xf6\x69\x64\x10\x5d\x5e\x15\x24\x69\xfe\x0d\x32\xa0\xff\xb1\xbb\x02\xfd\xc8\x5a\x2b\x62\xf3\x46\x4a\x71\xb5\x5c\xe9\x20\x6d\xc0\xa9\xe7\xea\x5c\xee\x0c\x7d\x19\x07\x51\xe5\xe5\xe5\x65\x91\xaa\x94\x97\x31\xc4\x67\xaa\x78\xeb\xe1\xfe\x8d\xbc\xc7\x2b\x0e\x4b\x46\x70\x8c\xe6\x5c\x31\x58\x18\xc6\x7c\x15\xca\xa5\xc6\x0b\x25\x78\x3b\x55\xaf\xc4\x71\xf3\xe7\x78\x9c\x11\x6b\xdd\x3e\xd4\x87\x2c\x17\xca\x2b\x4d\x9b\x90\xd5\x5e\x31\x13\x69\xd5\x61\xa9\xb3\xb0\xa0\xfe\x8b\x62\x17\xc5\x68\x8e\x22\x16\x65\xea\xe0\xaf\x0f\x29\x43\x0f\xbb\x60\x32\xac\xc5\x58\xeb\x96\x7d\x97\x56\x2a\x0e\x81\xad\xaa\xfe\xe3\x85\xad\x52\x32\xaa\x55\x15\x65\x4d\xd9\x78\xc9\xab\x66\xfa\x49\x8b\x00\x98\xe6\x6c\x82\x01\x24\x2b\x0d\x95\xb2\x58\x9e\x45\x20\x1e\xe7\xb0\x13\x21\x99\xb8\xaa\x36\xcb\xa2\x91\x35\x30\x7d\x38\x08\xc5\x9d\x63\x1b\x49\x73\x35\x8a\xdb\x2a\x86\xb9\xe9\xc7\x1f\x3f\x4c\xc4\xc0\x26\x75\xa4\x1b\xf9\xce\xa4\x2b\xbc\x14\xae\xac\x21\x4b\xbd\xba\x96\x35\x93\x55\xde\x11\x7e\xde\xbb\x8e\x84\x21\x94\xdd\xc2\xac\x3b\x1e\x36\x5c\x48\x7b\x2b\x52\xfd\x4d\x60\xae\xcc\x20\x05\xc6\x96\x1d\x09\xb8\x22\xa2\x19\xc3\x88\x19\xb7\x8a\x63\x81\x64\xd5\xc7\xdc\x46\xc0\x0c\x5c\x05\xab\x53\x28\x19\x6e\xd9\xe8\x2e\xf9\x7e\xa4\xa9\x7c\x1c\xfe\x93\xa3\xbe\xed\x34\xba\x20\x45\xfe\x55\x2d\x42\x63\x1a\x2e\xd7\x81\x5c\x1e\x9b\xdd\xe9\xa8\xa5\xf2\x71\x95\x96\x52\x22\xd3\x26\xf1\x3f\x8c\x90\xbd\xc8\xef\xe2\x93\x5d\x7f\x7c\x42\xfa\x86\x0c\x75\xd5\xab\x9f\xa5\x7b\x70\xe3\x40\xff\xf6\xd8\x45\x61\xa2\xdc\xcb\x4b\xe3\xd7\x79\x8f\xb1\x1d\x78\x3f\x04\xfb\x22\xf7\xc6\x8e\xb5\x6f\x4a\xc8\xb8\xe7\x29\xe5\xd3\xf1\xe7\x82\x76\xb2\xc4\x97\xee\xee\xc6\xa2\x5f\x60\x5b\x39\x96\xb1\xa9\x5b\x6c\x86\x3c\xbb\x9e\x1d\x9c\x3c\x63\xd7\x9b\x5b\x05\x72\x75\x50\xca\x5a\x50\xd7\xf6\x60\xad\x9c\x4b\xdd\x01\x62\x23\x56\xce\xa6\xfd\x76\x99\x49\x46\xed\xaa\x99\x29\x68\xc4\x30\x6a\xc2\x08\x95\x96\x57\xc3\xe3\xc3\x4d\xd9\x71\x79\xc9\x94\x2a\x72\x25\xb6\xdc\xeb\x60\xb1\xf5\xf7\x81\x98\x3c\xac\x7e\xba\xdf\x49\xd0\x44\x6e\x86\x6a\xac\x93\x6c\xb6\xd3\xd7\xec\x09\x9e\x75\xb8\xcd\xa3\xe0\xd7\xae\x11\x88\x3b\x86\xb1\x4d\xa9\x67\x75\x63\xa3\x2d\xb2\xa2\x20\x11\x27\x1a\x8b\x33\x5b\x34\x5f\x39\x54\xea\x4c\x04\x89\xd8\x24\xf5\xa3\xef\xe3\xf4\x3f\xc0\x92\xe3\x7d\xe4\x06\xb3\x6e\xfa\x48\x37\xe4\x92\xf7\x03\xbf\x7d\xe3\x8d\x6f\xb3\x51\xe9\xa7\x19\x3d\xc8\xfd\x0d\xa1\x1c\x47\x27\xc8\x28\x35\x85\x58\x51\x3b\x39\x64\x94\x32\xe1\x0f\x34\xb7\xb2\x89\x97\xfe\x3f\x77\xf8\xa0\x45\xf5\x7f\x63\xc5\x5d\x56\x66\x95\xd9\x65\xaa\x08\x84\x17\x06\x01\xc6\x4b\x0f\xe4\x08\x06\xe7\x18\xdd\x01\x39\x4d\x1e\xfd\x3d\x3c\x63\xca\x46\x71\x7f\x86\x54\x55\x85\x23\x84\xb4\x49\xfe\x96\xe8\xde\x84\x70\x96\x2e\x3d\x1e\x67\x27\x98\x97\x2b\x67\x44\x9b\xf5\x88\xe2\x5a\xb5\x3e\xb6\xfe\x82\xb5\x4b\x45\x7d\x11\x86\xdd\xcb\xd9\xe7\x51\x69\x1a\xe0\x1e\x4d\xab\xf0\xdc\x49\x7e\xc1\xbb\x9c\x64\x11\x5c\x1f\x9f\x67\x0c\xf4\x09\x9a\x55\x31\x7f\x47\xee\x85\x19\x71\xb9\xf7\x7d\x3c\x0e\x70\x81\x40\xe0\xd3\x31\x30\x0e\x34\x71\x55\x2d\xdc\x18\xb3\xb6\xe0\x65\x34\x35\x55\x64\xcf\x07\x0d\x63\x77\x46\x86\x8b\xbc\x8c\x1e\x4e\x18\x73\x52\xc2\x1a\x6a\x56\xf5\x60\x09\x23\x2f\xab\x32\x9d\xca\x7e\x40\x81\x43\xb8\x6c\x6e\xc2\x92\x3b\x15\x19\x28\xb9\xcb\x59\x9c\x5d\x74\xdf\xfb\x27\xe1\x6d\xbe\xe1\x90\xc8\x6d\x88\xcc\xbe\x85\x5d\x0f\x47\x39\x7c\x4a\xca\xc8\xa4\x39\xca\x58\xed\xd3\x1f\xd1\x1b\xe5\x07\xeb\xa2\x06\x4f\x2e\x9c\x0f\x47\x50\xa0\x14\x2f\xa5\x75\xa5\x3d\x53\xc9\xfc\xe5\xf7\x53\x33\x96\x15\xce\xaf\x90\x45\xda\xf6\xf3\x76\x3d\x8f\x2f\x6e\xf3\xd3\xb1\xf9\xa0\x33\xeb\xef\xf7\xf2\xb4\x68\xf4\xe3\xab\x28\x57\x65\x3d\x42\x27\x5a\xa5\x36\x64\x0f\xca\xcb\x8c\x28\xcd\xc5\x9f\x19\xf3\xfb\xe2\x2a\x9c\xe9\x00\x29\x44\x18\x5b\xaa\x99\x99\x9e\xcd\x24\x65\x08\x25\x2b\xba\x69\x66\x5c\xac\xa7\x95\xa7\xb0\x0d\x5d\xda\xa8\x9d\xb3\xce\x68\x85\xf0\xc7\x03\xea\xf5\x85\x70\xcd\xb1\x7f\xbb\xe6\x5e\xdd\x5b\x5d\xd1\xfa\xc0\x8f\xb3\xf3\x14\x5e\x29\x03\xd5\x1f\x10\x03\xa0\x57\x99\xe8\x1f\xc0\x20\x80\xb2\x33\xe4\xbf\xd1\x1f\xe7\x14\x1e\xff\x07\xa9\xf8\x86\xa4\x37\xaa\xa4\x83\x3a\x4d\x63\xf7\x9a\x26\xe0\xe8\x01\x61\xdd\x7a\x78\xa4\x39\xc9\xd4\x66\x85\x07\xb1\x8d\xff\x71\x41\x5b\x54\xbb\xd9\xbf\x92\x11\x01\xf6\x59\x5d\x6d\x38\xbb\xc6\x36\x1b\xf3\xeb\x5f\x9f\x6c\xa0\x8f\xa3\x32\x15\xb2\xce\x0d\xc1\x58\x46\xb6\x86\x56\xf6\xf2\x55\x71\xc7\x98\x61\xdf\xb8\x54\x21\x6e\xd9\x2c\xc9\xb4\x74\x8d\xce\x30\x62\x64\x28\xe9\x05\xc0\xbd\xbb\x11\x60\x73\x79\xf5\x0a\xc8\x41\xc1\xd5\xfe\xe1\x11\x24\x04\x5b\xed\x18\x74\x2e\x6e\x3d\x94\x70\xb7\xfc\x68\x1e\x70\xc6\x35\x1f\x74\x21\x89\x4b\xc0\xb9\x6c\xe1\x16\x6d\xb3\xe6\x37\x57\xe2\xf9\x74\x70\xe7\x69\x44\xbc\xfa\xa5\x03\x2b\x0f\x98\x9c\xbd\x5a\x33\xa2\x29\xb7\x95\xf8\x37\x37\xc1\x8e\x4e\x35\x7c\x00\xe3\x7e\x31\x6d\xba\xac\xb5\xbd\x74\xe3\x73\x99\x65\xd4\x71\x68\xcb\xd9\x23\xd6\xe1\x1c\xf6\x4c\x88\x03\x0d\xca\xc1\x0c\x23\xc1\xa4\xd7\x4f\x7f\x56\xe6\x9e\xbb\x49\xd3\xd0\x44\x37\xd5\x22\x69\x7f\x98\x69\xbd\x9c\xa8\xc9\x20\xb1\xd1\xf1\x23\x0f\x49\x06\xa8\x9f\x5d\x51\x31\xec\x22\x70\xcf\xb6\x84\xb5\xeb\x34\x3e\x83\x03\x8d\xb9\xef\xdd\xdb\xd7\xd6\xc4\x6a\xfe\xa1\x7f\xb9\x51\x3e\xf5\x87\xbb\x88\x53\x6f\x1f\xd6\x52\x3c\xb9\x89\xf0\x59\x7e\x35\xe8\x92\x14\xbf\x98\x1b\xf8\xcf\x07\x19\xb7\x4c\xc4\x7c\xab\x4f\xe6\x24\x70\x20\x32\xf5\xd8\x2d\x97\xeb\xc3\xb3\x17\x49\xe9\x09\x83\x05\xec\x11\x7f\x54\xd3\x00\x77\x17\xbb\xba\x6f\xcc\xa9\xde\x53\x63\xbe\xe6\x18\x1d\x86\x2d\xfd\x0f\xb3\xc3\x98\xf9\x61\xc7\xb2\x53\x55\x23\x61\x29\xf5\xd8\x52\xb5\xeb\x68\x62\xec\xb5\x89\x01\x57\x63\x05\x6e\x21\xc4\xb1\xad\xac\xec\xb2\x38\x74\xe9\x2b\x3a\x61\x53\x22\x92\xbb\x15\x7b\xe8\x2b\x31\x9c\xbf\x3b\x1f\x58\xe2\xf4\x12\xb5\x9d\xf1\xf3\x85\xe1\x53\x42\x1c\x50\x3f\x1d\x79\xe2\x5f\x57\x80\xae\xb5\xc5\xe1\x06\x55\xdd\xa1\xa6\xaf\x7f\x63\x5e\xcf\x48\x36\x4d\x9a\xea\xc7\x59\xc1\x44\x1c\x4f\x64\x46\xf8\x1f\xff\xc7\xd9\x70\xfc\x37\xd2\xbd\xa9\xd4\x71\x6a\x86\x9c\xfa\x87\x1e\x20\xef\x4a\x14\x43\xd5\x8c\x62\x8b\xd0\x08\x71\x7b\x64\xf2\x72\x4f\x37\xf1\x63\xcb\xee\x7f\xc0\x38\x56\x29\x2c\x0f\xa5\x82\x3e\xd8\xcb\x60\x59\x8f\xe1\x68\xf9\xf6\xf0\x99\x97\x52\xcc\x9f\x56\x14\xbf\xa7\xa0\x82\xc3\x1f\xd6\x12\x5f\x8e\x39\xd8\xa4\xf5\x55\x9f\x6f\x18\x37\x79\x4c\x0c\x68\x4a\x08\xf5\x47\x08\xf6\x43\x7b\xf2\x21\x12\xdc\xcb\xc0\x66\xd4\x64\x35\xf1\x76\x52\x8e\xdb\x1d\x9b\x52\x26\x28\x6c\x16\x6e\xdd\xc1\x47\x37\xde\x4f\xce\xb8\xe1\xfd\x9a\x0c\x0f\x8b\xa9\xe8\x80\x94\x13\x95\x75\x28\x52\xc6\xe4\x23\x7e\xef\xcb\x95\xeb\x5e\xdd\x5d\xc1\x8d\xee\x8d\x1c\x91\xd5\x80\x74\x28\xab\x86\x9f\xf5\xf6\x4b\x2b\x2c\x21\x55\x39\xd0\x59\x02\x87\xd9\x9b\xfa\x1f\xff\x4a\x65\xc5\xbf\x71\x7f\xcc\x36\xc5\xbe\x6b\x55\xd5\xc1\xd3\x0a\x88\xb2\x77\xab\x4b\x69\xef\x1b\x71\x7a\x22\xdd\x10\x12\xea\xc1\xec\x53\x74\x3e\xd0\xe3\xa4\x59\xed\x3c\x8f\xbc\x41\x2f\x53\x0e\xb7\x73\x93\xcd\xaa\xa2\xc9\x97\x5c\x81\x20\x27\xe3\xb2\xd5\x29\x32\x9d\xcd\x04\x20\xd7\xb8\x75\x52\x61\x57\x1b\x91\x5d\x6e\x4f\xca\xe5\x02\x12\xcf\xbe\xd0\x28\x43\x36\xb6\xbf\x87\x3c\x01\x22\xe4\xf1\x19\xe0\x3f\x6d\x26\x0f\x2b\x90\xa5\x90\xbe\xb9\x25\x02\xab\xdc\xe8\x88\xac\x53\xb6\x77\x95\x86\x36\xac\x7d\x8d\x29\x98\x01\x6d\xa4\x53\x4a\xa0\xec\xe1\x7a\xc5\xc2\xf2\x93\x77\x97\xbb\x60\x19\x4e\x02\x19\xe8\x46\xe5\x36\xca\x5d\x5e\x59\xfd\xbd\x0d\x0f\x2a\x37\x46\xc4\x1c\x17\x28\xd3\x6e\x17\x69\xde\xe9\xb1\x1a\x22\x71\x2f\xee\xde\x9c\x1f\x63\xaf\xa8\x4b\xfe\x46\x57\xa8\xa0\x52\xfb\x0e\x12\x81\xb5\x4c\xe0\x88\x18\x77\x45\xec\xdd\x77\xc8\x13\x7a\x16\xc9\x30\x7d\x13\x7e\xb7\x53\xd6\xcd\x68\xe8\xb1\x9d\xc2\x2d\x90\x90\xbc\xae\xbd\x72\x0a\xcc\xc6\xe3\xc9\xfe\x76\x67\x42\xbb\x96\x72\x6b\x56\x35\x8c\xd3\xd5\x1e\xe9\x81\x86\x98\xdc\x81\xee\x45\x30\x11\x5b\x85\x8d\xea\x6e\x03\xef\xaf\xc0\xd9\x6b\x32\x5b\x59\x6c\xd0\x60\x51\x0b\x03\x58\x86\x43\x25\xca\x9b\x66\x9f\xa1\x6c\xc4\x84\xa8\xa3\x9d\xe8\xd6\xc0\xa5\x42\xde\xce\x49\x19\x12\xdb\x68\xdc\x7d\xdc\x65\xa5\x0b\x78\x53\xf7\x80\x5f\xea\x10\xda\x1a\xdf\x5a\x46\x9d\xb6\x9d\x7c\xb7\x4d\xd0\x17\x1a\xf7\xb4\x9b\x9e\x20\x87\xe8\xe3\xd3\x29\x7a\x7a\x1f\xf5\x7f\xd3\x3f\xca\xab\x8a\x64\xfe\xf0\xf6\x12\xde\x6b\xd1\xb9\x00\xf5\x51\xe7\x37\x74\xce\x63\xc4\xc1\xa1\xa0\x35\xff\x7c\x12\xb7\x9d\x72\xcf\x58\x61\xa3\xff\x71\x13\x27\xdd\x01\x11\xa5\x88\x8a\x78\x8a\x43\x56\x52\xf0\xc3\x0f\xd6\x39\x4e\xf5\xda\x4c\x2a\x13\xcf\xaa\x26\xc4\x06\xeb\x75\xb6\x15\x96\x0f\xf6\x37\x1c\xe5\x1a\x1e\xab\xc4\x8a\x28\x07\xe1\x5d\xc4\xd3\xce\x5b\xa6\xb2\x7d\x5b\xfe\x6c\xb8\x7b\xc7\xf9\x8a\x8f\xc8\x83\x3b\x98\xe5\xce\xa4\xb8\xc3\x44\x2f\x78\xea\xdd\x26\x3b\x85\xff\xf8\x97\xa7\xb2\xdb\x82\x82\x96\x97\x85\x1b\x32\xc0\xe4\x4d\x0c\xfd\x94\x5f\x11\xff\x3c\x5a\x18\xa4\x9b\x63\xc5\x30\xf6\x4e\x6f\xc8\xf7\x63\x74\x5f\x99\x01\xb8\x96\x19\xbc\x41\x97\xe3\x59\xcf\x38\x8b\x8e\xa7\xf4\x1b\x65\xa3\x50\x69\x40\xef\x01\xc2\x8b\x73\xf6\xca\xb3\x97\xd1\x17\x04\x91\xb3\x2a\x41\xb0\x64\x6f\x8c\xc7\x65\xd3\x02\x0a\xaa\xaf\x70\x8b\x57\x6a\xed\x76\x70\x35\xd0\x4e\x2d\x74\x88\xb0\x53\x13\x82\x9a\xcc\xe8\x48\xc8\x5a\xdf\x63\xd8\x5e\x99\x4e\xfe\x14\xc2\x78\x2e\x3f\x92\x54\x95\x7a\xa2\xce\x2b\x84\xeb\xcb\x92\x11\x47\xa4\x2f\x54\x97\x73\x10\x1d\x35\x65\xf0\x68\x98\x84\x38\x1f\x9f\x40\x24\x8e\xf7\x88\x9d\x14\x1b\x1b\xec\xdf\x1d\x04\x51\x6f\x96\x53\xf7\x12\x56\xa7\xd9\xcd\x52\x0c\xfb\x08\xd6\x68\x05\x3d\x1e\xd2\xf3\x1d\x59\xb2\x5e\x7a\xec\xf2\xb1\x65\x97\x70\x3e\x0f\x0a\xa0\x1f\xef\x20\x10\x70\x33\xdf\xc8\x1d\xf3\x78\x28\x5e\xe3\x11\xb6\x4e\x65\x34\x47\x90\x1e\x26\x2d\x07\xf5\x21\x60\x07\x75\x0a\xc7\xc1\x6a\x2e\xa3\xe4\x87\x3e\xfb\x28\x0d\xf6\x0f\x55\x2c\xf2\xc9\xfd\xcc\x20\x8e\x8a\x8c\x6e\xfb\x5a\x58\x66\x55\x26\x80\xed\xb3\xf8\x3c\x5b\xe1\x9e\xe1\x7b\x75\x29\x1c\xe8\xa7\xe5\x80\xbe\x74\x9c\x66\x4a\xa2\xdd\x30\x79\xfb\x8a\xf1\xf0\x08\x5e\x80\x12\x01\xd0\x85\xe0\xfa\x05\x63\x46\x3e\x19\xfd\x87\xd9\x6f\x63\xcd\x4f\x61\x4e\xe0\x08\xd4\x93\x83\x17\x65\x8d\xcb\x2a\x47\x4a\xc9\x5b\x83\x42\x40\xc0\x5f\xad\x8a\x35\xc5\xbe\x80\x84\x02\xfc\x18\xa4\xd4\xcf\x95\x51\xb2\x4f\x0e\x01\x9c\x92\x0f\xeb\x5f\x9e\x61\xd3\x52\x96\x78\xa7\x7e\xf4\xb2\x07\x7b\x26\x9b\xf2\x62\xab\x57\xce\x45\xb6\x29\x1e\x5b\x9d\x96\x20\xa5\x36\xb9\x1d\x46\x5b\xa7\x9d\xb5\x44\xfb\x80\xf5\xe6\x54\x1f\x58\x4a\xd1\x60\x07\xc8\x58\x5c\x1f\xaa\x9c\x99\x77\xed\x92\xb9\xd7\x90\xec\x3d\xbc\xb8\x5a\xf7\x2e\x36\xba\x14\x55\xff\x2d\x39\x9b\x3b\xfb\xf3\x7e\x99\x93\xc1\x6d\x28\x14\xda\x8f\xde\x52\x70\xf5\x2e\x22\x34\x50\x96\x5b\xee\x5e\x38\x62\x25\x89\x54\x4f\xe9\x9c\xfb\x17\xda\xba\x6c\xb2\x1f\x2b\x53\x58\x57\x6a\xc4\xf6\xb1\xf4\x96\xc1\x6f\xf6\xf0\xdd\x1e\x9f\xb0\xe2\x7a\xf4\x91\xcf\x47\x2b\x2e\x35\x0e\xa8\x47\x31\x66\xff\x32\xe0\x6f\xe6\x9c\xc6\xc6\xca\x30\x9a\xc6\x2f\x62\x81\x48\x07\x1c\xcf\xc2\x6c\x18\xc0\x0c\x17\xcc\xe0\x0a\x84\x11\x68\x29\x66\x4a\xcd\x5a\x05\xd4\x34\x58\xb9\x38\x04\x7e\xc8\x28\x01\x29\x48\xf8\xd1\x02\x19\x2c\x75\x06\x05\xdf\x6f\x8d\x3d\x10\x07\xa8\x44\xb3\xb7\x8c\xaa\x5d\x6d\x19\xf7\x7d\xf5\x46\x46\x42\x8a\x86\x72\xd4\xf0\x98\xae\x61\xee\x89\xac\x96\xf6\xfe\xef\x9c\x53\x68\xb5\x43\x1b\x0d\xba\x41\xb5\x6c\xd0\x91\xd9\xf8\xec\xdf\xc4\xa3\xfa\x9f\x13\xf6\x2c\xee\x6e\xb4\xd5\xf6\x6a\xcd\xef\xb7\x2c\xbe\xf2\x45\xa1\x58\x61\xca\xaa\x57\xe1\x48\x4d\x0e\xb5\x25\x5e\xce\x72\x45\x8f\x4b\x1d\x33\x8c\x3c\xf1\xac\xa0\xb6\x3a\x83\x50\x8c\x7c\x4d\xe1\xeb\x63\xea\x19\xcd\xb5\xf6\xdb\x3d\x68\x20\xf0\xac\xc1\x87\xe6\xf0\x89\x72\x9b\xcb\x76\xfe\x15\x1d\x04\x14\x7b\xb6\x0c\xce\x43\x6e\x50\x65\x50\x77\x1c\x59\xdc\x23\xb5\x25\xd6\xa4\xfa\x31\xe7\x2c\x1d\x55\x06\x9c\xcc\xb2\x26\x5b\xb4\x92\xff\x1d\x59\x78\xe9\x72\x6e\x26\x30\x2d\xe8\x23\xcd\xf2\x39\x44\x00\x2f\xd9\x7e\x18\x57\xee\xeb\x52\x36\xd0\x6a\x5d\xce\x5c\x6a\xdc\x19\x20\x23\x54\x96\x5b\x75\xb7\x33\xe7\x41\x4a\xec\xb2\xa1\xdc\x47\xf5\x07\x76\x18\xea\x24\x99\xb1\xcf\xf4\x5a\xdd\x9c\x7d\x3d\xb2\xcc\x9e\xc5\x78\xe6\xae\x82\x51\xd0\x35\xb5\xa5\xb3\xb4\xba\x47\x08\x90\xd8\x0c\xe0\x5f\x79\x87\x8f\x26\xc3\xc3\x25\xa6\x1d\x99\x93\xba\x0b\xdb\x80\x94\x83\x68\x25\x40\x8d\x7d\xf1\x28\x50\x1f\x02\x3f\x3e\x74\xd0\xf5\x53\x93\x4e\xb1\x19\x96\xb2\x5d\xf7\xde\x61\x06\x9b\x92\xb7\x53\x88\x48\x4d\x90\xd0\x87\x82\xb0\xda\xeb\x00\xc6\xc8\xd2\x32\xe7\xa1\xec\xda\x62\x8b\x04\xc5\x87\x9a\x05\x7c\x0b\x9f\xc9\x32\x6f\x25\x98\xd0\xe0\xf7\x90\xa0\x7b\xa5\x1b\xb8\x16\x09\xb9\xa8\x90\xb8\x5f\xb6\x4e\xb4\x61\x7e\xe6\x9f\xd1\xdd\x06\x19\xa8\x3e\x7b\x97\x82\xcf\xb2\xb7\xd2\xfc\x23\x93\x30\x6f\x3f\x7f\xfa\x32\xeb\x94\xe5\x8b\x7c\x86\x7d\x4b\xbd\x5c\x87\xa0\x85\xb8\x49\xbf\xcd\xb9\xe4\x6a\x20\x2c\xb5\xa2\xb7\x6d\xa6\xfa\x02\x07\x64\xff\xd3\x1e\xe3\x52\x8a\x92\x9e\x7f\x53\xc4\xe2\x76\x56\x5b\x11\x26\x2e\x56\x2a\x39\x9c\xc7\xc0\x05\xd7\xab\x5f\x1c\x47\xe2\x9b\x67\x3c\x06\x30\xf1\x74\xb5\x2b\x08\xb7\xd8\x65\x0c\x1d\xcb\xdb\x77\x68\xf8\xe5\x15\x7d\x11\xa8\x6b\x70\x38\xc6\xdd\x3f\x36\x02\x10\xc1\x78\x23\x7d\x02\xd9\xe1\xe5\x45\x9f\x3a\x22\x7b\xe2\x54\x90\xa4\x5c\xa4\x55\xd1\x3f\xf2\x1c\xf0\x12\xd8\xba\x9e\xa9\x1f\xf6\x3d\x78\x84\xaa\xf0\xe1\x7e\xe6\xc3\x04\xb8\xc7\xf8\x13\xed\x69\xe8\xcf\xdd\x53\x58\x0d\x11\xc0\xbd\x4c\xbe\x08\x28\x45\xa2\xe1\xb0\x1d\x72\x26\xdb\xf7\xc8\xad\x59\xba\xf6\xe4\xa6\x08\x0a\x01\x9e\x1f\x1c\x77\x2c\x6e\xd5\x6b\xb0\xf1\xc7\x70\x7e\xec\xfe\x39\x9c\x4e\x6e\x2a\xd7\xd9\xaa\x1b\x51\x29\xce\xb0\xa1\x1d\x36\xbb\x76\x0f\x30\xed\xb0\xa5\x19\x7a\xcd\xc7\x65\xc7\x38\x6c\xf2\xb0\x82\xa0\x3d\xd9\xe1\x2c\x67\x87\xe4\x4c\xc1\x12\x1a\xa4\x2d\x18\xe4\xe3\x3c\x84\x2b\xcc\x90\x42\x56\x81\xf3\xe1\x57\x53\xc0\x90\xba\x41\x7d\xc8\x72\xbd\x74\xf6\x9b\xe1\x21\xd6\xbf\x8b\xc3\x7d\xdf\xf2\xe5\x3f\x75\xdf\x54\x86\xdd\xc1\x54\x60\xeb\x62\x83\xb6\x45\xbd\x0d\x34\x87\x75\xb5\x7d\x25\x4d\x40\xcd\x30\xe5\xec\x9c\x75\x1b\x3d\x52\x7b\x22\x4f\xb4\x71\xab\x57\xba\xac\xb4\x2d\x67\xb8\x5c\xd0\xa4\x72\xc0\x08\x53\x11\x9b\x1c\x23\x92\x0a\xf5\x4a\x50\x6c\x3b\x20\xf1\x0e\x1b\xaa\xea\xf3\x0a\xfb\xec\xa0\x92\x5c\x91\x98\xd5\x37\xf5\x5e\x36\xe3\x62\x81\x40\x06\x61\x6d\x3c\x7a\x04\xea\x95\xa0\x1d\xaa\x59\xba\x7f\x05\xeb\x02\x76\x94\x5c\x72\x7a\xd1\x6a\xfa\xe5\xef\xa3\xaa\x96\xfd\x0e\x4b\xed\xb1\x51\x7a\x48\xd1\x04\x23\xed\x8f\x5a\x26\xa4\x7a\x07\x2b\xa4\x9c\x00\x4d\xbf\x89\xfd\x51\x16\x15\x14\x5c\x65\x76\x72\xbb\xa4\xcc\x96\x6c\x95\xa4\x2d\x4c\xf4\xcb\x06\x1e\xde\xf5\xb4\x37\xab\x74\xc8\x6e\x4b\xef\xd0\x03\x76\x9b\xd7\xf2\x2e\x7f\xec\x35\xd3\xe5\x33\x50\xf7\x2e\x07\xf5\x02\x3f\x83\xff\x6a\xc9\x43\xce\x94\xe8\xc8\x91\x3b\x94\x12\xaf\x08\xc3\x51\xff\x64\x8c\xf6\x08\x1a\xbf\x6e\x9b\x48\x3c\xe6\xfb\xd1\x4e\x71\x67\x65\x38\x77\x87\xfc\xbc\xe4\x0b\xd8\x95\x6d\xd1\xef\x4d\x1d\x92\x2f\x2f\xcf\xb2\x9f\xd2\x3f\x29\x87\xb9\x5e\x04\xb9\x8b\x23\x52\x97\x1f\x10\x22\xb7\x0a\x32\x42\x37\xd1\x89\x62\x52\xd6\x00\xc8\x2e\x79\x2f\x12\x7a\x3c\x83\x77\xff\x18\x58\x5c\x7e\xa8\x31\x26\xa7\x96\x96\xa5\x6a\x19\x54\xbc\xa6\xcd\xef\x6a\x67\x6c\x01\x84\x02\x34\x35\xfe\xea\x9c\x0c\x57\x6a\xa6\x4d\xa6\xa2\x89\x42\xfb\x14\xd6\x73\x3e\xd2\x88\x32\xc4\xe8\xab\x4c\x94\xa4\x87\xec\xfe\x7e\xee\x55\x8f\x8f\x93\xb9\xde\xef\x05\x94\x3a\xfd\xbd\x3e\xa6\x3b\x47\x73\x48\xc2\x9d\x8f\x01\xf6\xe5\x72\x30\xb5\x43\xfe\x72\xa1\xa8\x68\x3c\x28\xab\x58\x35\x12\x88\xce\x11\x56\xb9\x68\xd8\x98\x91\x40\x73\xee\xea\xd5\x8b\x23\x8a\xfa\x6d\x67\x0f\x1e\xd6\xa2\xef\x17\x7e\x64\xcd\x16\x92\x30\x13\x48\xd4\x5b\x21\x14\xee\x6f\x31\x59\x81\x58\x06\xf6\xd5\x6d\x53\xcd\xf7\x93\x7f\xb7\x94\xfb\xeb\xb5\xee\x80\x60\xa2\x3b\xfd\x05\xa4\x11\xf6\xc2\x7e\x76\x17\x74\x94\x2e\xbd\x4c\x42\x09\x07\x2b\x30\x4d\x3e\xd1\xc2\xe9\xf2\x9f\x7d\xeb\x22\xb8\xa6\x39\x23\x0a\xb0\xd9\xce\x45\xd3\x3d\xb5\x14\x41\x3f\x81\x6a\xcb\x46\x72\x5b\x7c\x85\xa4\x9f\xb0\x85\x52\x1d\x76\xc9\xfd\x3c\x82\x81\x02\x2b\x81\x46\xe6\x68\xcd\x34\xd6\xee\x0e\x13\xa8\xde\xaa\x98\x9a\xf1\x72\x93\x00\x41\xc5\xad\x9d\xe3\xb8\x52\x93\x44\x0f\x69\x2a\x6e\x8a\xfc\xa4\x43\xb7\x4c\x41\x7e\x6a\x2c\xfb\xbd\x04\x40\xdc\x7a\xd6\x06\x25\x73\xa8\x01\x1a\xec\x99\xec\x7b\x0e\x1e\x85\x9a\x3d\xed\x53\xf6\x28\xee\xf2\x04\xfb\xe3\x55\x7e\x33\x77\x69\xca\x95\x42\x0d\x9a\x44\x14\xfb\x6e\x05\xc7\xe5\x4a\xce\x27\x6c\x73\x99\xf4\x26\xa2\xd2\x14\x69\xa8\x57\x83\xbd\x2b\xc9\x64\x6a\x14\xad\x97\x09\x6f\xa2\x95\xa6\xda\x40\xe2\x88\xdd\xa8\xc5\x70\xeb\x87\x51\x72\x8c\xbf\xaf\xae\x77\x68\x25\xa8\x17\x9d\x06\x26\xd9\x7a\x73\x0b\x21\x90\xed\x11\x20\x83\xe7\x6a\x53\x3e\xd6\x52\x0a\x87\xbe\x8b\xa6\x15\xb6\xb1\xaf\x2b\xdf\x49\x86\xe7\x0e\x70\xb2\x7b\x9e\xde\x01\x82\x0c\xca\xd9\xd3\xb0\x88\x4d\x44\xf4\x6e\x70\x40\x5f\x5a\xde\x4c\x3c\xd5\x3f\xca\x0a\xf9\x18\x27\xfb\x2d\xdb\x4a\x4c\x59\x6f\xf6\x9e\x4d\x6e\x81\x36\x99\x4e\x07\xf9\x9c\x3a\x76\x96\x46\x2f\xeb\x6f\x24\xd4\xa9\xc3\xa4\x44\xc2\x37\x6c\xde\x82\xde\x01\xe3\x3d\xf7\x9d\xf8\x65\xbc\xcb\xdc\x40\x9b\x7b\xf5\xa0\xfa\x66\xb7\x06\xf2\x0d\x2d\xfd\xb2\x89\x2b\xb3\xf3\x5f\x28\xcb\xb1\xd0\xb5\x17\xb3\x9d\xf8\xf5\xbe\xc6\x21\x18\xa7\xaf\x91\x67\x1c\x49\x8c\xb3\x05\xa0\xe5\x20\x4b\xf8\x73\x92\x8f\xe6\x9e\x0f\x68\x0d\x6d\xed\x2a\x95\xbd\xd2\xc5\x59\x82\x53\x41\x7b\x86\x9d\xfa\xbd\xbf\x3a\x53\xcb\xbd\x1a\x59\x55\xc3\xbd\x2a\x09\xe9\xa5\xdc\x0f\x3e\x8d\xab\x15\x1e\xe2\x1c\x67\x97\xe8\x64\xe7\xcb\x8e\x7c\x3d\x26\x1b\x57\x18\xc7\xf6\xd9\x25\x00\xc9\x3e\x56\xcf\xc9\x16\x06\x97\xe5\x3a\x9d\x84\x44\xb4\x41\xa2\x56\x28\x3e\xe0\xb4\x85\xc1\xb8\x69\x09\x87\xe1\xac\x0b\x46\xd4\x1d\xbc\xc5\x76\xa6\x06\x4c\xdd\x27\xc7\xb6\xd2\x3b\x58\x96\x06\xf8\x28\xaa\xff\xdf\x94\xa1\x68\x74\xb7\xc0\xa1\x9c\x9a\x7f\x49\xa6\x04\xd4\x02\x5c\x2d\xa2\x68\x18\x73\x3c\x46\xb7\x08\xdb\xde\xa3\xa6\xda\xf1\x98\x96\xba\x37\x31\x5c\x70\x03\x5b\xc2\xbc\x2d\x91\x94\x4a\xa8\x39\xd2\x4f\x49\xd0\x8e\x98\x57\x18\x9f\xf9\xa9\xa1\x3c\x89\xce\x93\x5c\xda\xc5\x95\x19\xef\xf7\x68\x42\x55\x67\xbb\xe3\x6a\xea\xd0\xb2\x4d\xbd\x21\x73\x40\x60\xe3\xa3\x1e\x13\x31\x9f\xb2\x4d\x2f\x55\x15\xbb\x3b\x33\x14\x5a\x37\xb2\x56\xd8\x91\xdf\xa7\x20\xb6\x72\x91\xdb\x2c\xfa\x28\xfb\x1e\x1b\xa6\x43\xff\x61\xee\xca\x63\x93\x9f\x23\x60\x2b\x35\xa6\x90\x13\xe5\x5e\xcd\x72\x31\x82\xfa\xe8\xf2\xf4\x41\xc0\xe9\x3b\xb5\x13\x61\x2a\x56\x4e\x1a\xfd\x18\xad\x3c\x5f\xcb\x2d\xfb\xd7\xb2\x4e\xb7\xf8\x59\x95\xa8\xc9\x26\x6a\x9c\xa5\xed\x19\xcd\x9c\xf2\xf6\x48\xcd\x28\x5b\x90\x10\x51\xe4\x5f\xab\xad\xcb\x0c\x23\xa8\x6d\xeb\x7f\x7d\x63\xb1\x87\x3b\x5f\xa5\xbe\x0a\xbb\x3c\x59\xe7\xa9\x0b\xa2\xdf\xde\xd6\x79\xa0\x90\x54\xf9\xc6\x39\x45\xc0\xec\x6a\xd1\x39\xb4\x1c\x97\x97\x1e\xb5\x1c\xb7\xda\x91\x89\x3e\x8c\x5b\x2e\x41\x37\x8f\x47\x0f\xb6\xf1\xbd\x85\xda\x43\x44\x04\xdf\x81\x0d\xa8\xce\x9a\x30\x5d\x17\x1b\xc5\x3d\x1f\x35\xd4\x39\x2a\xb3\xa0\x1d\x11\x39\xb6\x12\x24\xc4\x9f\x32\x44\xd0\xaf\x49\x33\x8c\x73\x35\x05\x65\x6e\x12\x59\x20\x54\xbe\x78\x7e\x81\x2d\x42\xac\xa2\xd2\x84\x8c\x32\x3f\xc8\x4d\x34\x67\x88\x4c\xe9\xb1\x4e\x82\xda\xe0\x05\xda\xd5\xe6\x0d\xaa\xfc\xba\xbf\x2f\xbb\xeb\x23\x0c\xbb\x31\x0e\xcf\xfd\x55\xd6\xa4\xce\xaa\x0a\x5a\x0c\x5c\x1d\x94\xf3\xb7\xaa\x49\x8a\xc4\xde\xe5\xa1\x3b\xb4\x83\x29\x9b\x8f\x72\x9d\xd8\x40\x69\x35\xc5\xfa\x66\x47\x3e\xbb\xce\x96\x8f\x20\x36\x2f\x28\x28\x63\xd9\x0a\xcb\xc1\x41\x5e\x6d\x30\xde\xfb\x68\x00\x73\x55\x13\x3f\xb0\x6e\x9e\x84\x0f\x3c\x0c\x16\x0a\x34\x5f\x6b\x15\x95\xb4\x52\x3c\x94\x21\x7f\xd4\x72\x91\xe1\x3a\x72\xc3\xc7\xbc\x93\x60\x89\x6c\x5d\x70\x95\xd8\xeb\xc1\xed\xd2\x5e\x3b\x85\x7c\xa9\xce\xac\xf8\x2e\x73\xb4\xd9\x5a\xb9\xd3\x15\x21\x8f\x78\xeb\x4d\x3e\x65\xff\xc2\xcb\x6f\xc9\xa0\xb9\x69\xf2\x5f\x32\x10\x5c\x55\x9d\x0b\x2d\x16\xfa\x18\x01\xb0\x53\x63\xe8\x8d\x9d\x0a\x75\x2c\xe2\x13\x60\x94\xd4\x9b\xd4\xf9\xf8\x56\x17\x6b\x09\xb5\xe8\x22\xbe\xe8\x5c\xa6\x6c\x93\xaa\x67\xc5\xe5\xcf\x26\x3e\xce\xe0\xc6\xb7\x7a\x3b\xe8\x5f\x49\x5e\x92\xb9\x06\xbb\xe1\x42\xc4\xd1\x7a\x97\x5c\x7d\xa8\xe6\xcc\xc9\x0a\xd1\xea\x3c\x6a\xc2\x22\x7c\x26\x23\xdd\xd1\xe3\x98\x2d\xc2\x41\x70\xfc\x32\x80\x78\x18\x55\x93\x4d\x3d\x7a\x76\x49\x16\x10\x00\xad\x07\x2f\x62\x94\xa7\xeb\xb6\xd3\x6d\x97\x42\xad\x4f\x94\x6b\xae\x46\x9b\xb3\x33\x7a\xe6\x4a\x8e\xc3\xa8\x7d\x44\xe3\x69\x3b\xf2\x79\xf7\xa8\x66\x7d\xd1\x7b\x48\x31\xc4\x8b\xfc\x9b\xe6\xb0\x5d\x81\x08\xc7\x2f\xfc\x63\x83\x0c\x94\x79\xb5\xe3\x33\x13\x8e\x33\xbb\x26\x98\x24\xd3\x87\x6c\x7f\xf8\xda\xb3\x0d\x75\x56\xb1\x0d\xd2\x6b\x06\x79\x4b\x20\x9b\xd8\x42\x67\xb8\x54\x94\x6d\x35\x49\x30\xe3\xfe\x27\xee\x0c\x7f\x57\x35\xae\xca\x6a\x1a\x62\x1d\x30\x6a\x64\x52\x01\x5d\xda\x2e\x6e\x4c\xf9\xab\x4c\x0d\xb9\x6d\x73\xfe\xd8\xac\x5a\x79\x42\x7a\x99\xa9\x0d\xd0\x0a\xf9\x77\x7a\x6e\xda\xae\x3e\x45\x93\x78\x0a\x23\x7d\x8e\x41\xb3\xff\xc9\x9f\x4d\xa7\x10\x8c\xa1\x37\x31\x4d\x98\xf7\x68\x2e\xcc\x92\xdb\x1e\xdc\xb6\x5b\x8d\x29\xf3\xaa\x89\xcc\x67\x34\x59\x25\x4d\x86\xd3\x60\x05\xed\xcb\x7c\x79\x5e\xf4\x99\xb6\xcc\x57\xa1\xee\xa9\xb1\x65\x7e\x18\xab\x34\x32\xb2\x74\x87\x42\xf3\xcd\x56\xd9\x10\x43\x51\x73\x90\x55\x4a\xc2\x9e\x42\x1d\x9f\x0f\x35\xc7\xda\xa2\x3c\x3e\x91\xde\xf7\xb8\xd6\x13\x0e\xcd\x5f\x29\x05\x2e\x1f\x97\xdf\xff\x24\xf4\xa1\x4f\x7e\x66\x43\x28\x67\xfb\x52\xc8\x49\xe7\x91\x91\x83\x70\x63\x6e\xb2\xb3\x94\x7f\xd4\x76\xa6\x15\x22\x27\x24\x04\xcd\xcb\x64\xbf\x1f\xe7\xb2\xfa\xaa\xf3\x84\xa4\x90\x48\x54\x4e\xf3\xb9\x94\x7a\x6a\x13\xe9\xa5\xfc\x20\xca\x43\x4b\xe3\xf3\xe4\xad\x7d\x41\x1e\x89\x33\xf9\x70\xca\xd7\x6c\xd8\xce\xe8\x14\x0e\x50\x48\xc4\x50\x1c\x16\x20\xac\x6a\x21\x61\xcd\x8b\xbd\xf9\xb0\x39\x53\x0b\xd1\xbe\xaf\x8f\x07\x6e\x62\xd5\xb0\x6d\x69\x89\x8e\x2a\x49\x1f\x72\x07\xee\xa1\x03\x98\xbd\x91\x18\x22\x98\x61\xd0\x81\x1b\x56\xd8\x70\xf1\x0b\x28\x9b\xfd\xdc\x8e\x66\xcd\xac\xd1\x63\x42\x55\x21\xfb\x8b\x52\x87\x4c\x91\xe9\x82\xa6\x12\x14\xdc\xfe\x4b\x0f\x2b\xf3\x9b\x1a\x4c\xa1\x7b\xbf\x57\xd1\xe9\x3d\x8b\xaf\x75\x77\x8e\xfd\xdd\x2d\xb6\xfb\xe8\x66\x0f\x1d\x19\x35\x48\x9b\xb9\x33\xfd\x3f\x5d\x0d\xb0\x64\xb4\xd3\x7f\x29\x55\xa9\x23\xfa\x98\x95\xdb\xff\x1a\xdd\x84\xd9\xfe\x36\x7e\xd3\x4e\x70\x4f\x27\xe4\xcb\x22\x21\x3f\xc5\x50\x0e\xea\xbd\x7e\x7d\x6c\xba\x0e\x98\xd0\x3c\xb9\x7b\x74\x8c\xf6\x4f\xc4\x3e\xc5\x7a\x21\x9f\xa8\xbd\x97\x06\x75\x91\xe0\x05\xa8\x87\x86\x57\x44\x2f\xe4\x40\x7e\xfd\xfe\x07\x3b\xff\x44\x6c\x61\x96\x85\x5e\x6c\x17\x1e\x05\x79\xa2\x2f\x75\x96\x1b\x38\xe7\x12\xd0\x48\x77\x0e\xda\x2a\xb2\xa1\x14\x4d\xa3\x13\xf2\xcf\xde\x82\xa4\xe8\x29\x8d\xf4\x69\x0e\x6a\x0d\x84\xad\xf5\xfe\x14\xd6\xb9\xf5\x60\xbb\x99\x52\x33\x7d\xa5\xab\x95\xad\x27\xe5\xfe\x49\x1a\xa4\x25\xbc\x39\x08\xef\x77\x63\x67\x65\x50\xa9\x6e\xbc\xc7\xe8\xc6\xf4\xd7\xaf\x8e\x78\x23\xdf\xc6\x50\x42\xf9\xc9\x73\x8d\xdf\x77\xce\x9b\x29\x36\x97\xa5\x46\x7f\x0f\xa5\x52\xeb\xb6\x29\x7a\x48\x7e\x65\x10\xc1\xf1\x33\x97\x57\x94\xf7\x34\x52\xdd\x1e\x9b\x16\x78\xc5\x05\x58\x6e\x74\x8a\x87\xe3\xee\x92\xee\x23\x71\xcd\x4b\x5d\x4b\xb7\x92\x6b\x57\xc2\x63\xf7\x65\x4a\xe5\xf8\x4f\xbe\x89\x44\xf7\xa2\xb5\x8b\xe2\x75\x8a\xf6\x8e\x75\x37\x35\xd2\xad\x9a\x6f\xa0\xba\x88\x2c\xb6\xda\xc3\x5a\x0c\x9d\x52\xbf\x95\x6a\xa4\x81\x8c\x1b\xfb\x5b\x90\x78\x5a\x7f\x4e\x9b\x38\x5a\x52\x84\x34\x82\xe0\xf3\x94\x95\x38\x95\x05\x6b\xd4\x1f\x52\x2c\xc4\xb6\xe0\xd0\x30\x23\x50\xc3\xdd\xa4\xea\x3e\x8f\x61\x68\x03\x4a\xb9\xb5\x45\xe2\x75\xb9\x30\xfe\x92\x86\x08\xbf\x66\xf3\x86\x61\x70\xed\x18\xb1\x75\x65\x82\x37\xbf\xa7\xac\xb1\x5c\x07\x21\x5c\x62\x3e\x74\x15\x2e\x9d\x4f\xa5\xc5\x49\x71\x93\xe4\x23\xc9\x96\x53\x04\xdd\x80\x1c\xe4\x09\x1e\x6a\x26\x30\x66\x9d\x11\x07\xd5\x93\xdb\x47\x66\x21\x35\xe7\xdf\x9f\x65\x4b\xbb\x23\x4a\x9a\x13\xc1\x24\xea\x9e\x9c\x8a\xd0\x6c\xe0\x46\x53\x05\x4b\xea\xf6\x50\xae\xa4\x76\x52\x9f\xec\xb7\xb1\xb9\x29\x45\x2f\x24\xbb\x64\x94\x59\x27\xb4\x4e\x39\x18\x52\xcd\x04\x34\x97\xff\xc2\xb8\x3d\xbd\x3d\x6f\x7a\x2c\x35\x9d\x28\x40\xce\xa5\xeb\x11\x93\xa1\x0e\xbe\x68\x41\xea\x18\x95\x79\x5b\xf2\x82\xa6\x6f\x9f\xef\x2c\x54\xa2\xe9\x9d\xe6\xd6\x98\x2e\x0d\xe5\xb7\x8b\x84\x26\x0d\xab\x13\xd9\xca\x70\xd0\x5f\x9b\xb2\x43\x8a\xa8\xf4\x74\x85\x07\xa6\xf3\x17\x95\x9c\x0c\x30\x0e\x1b\xe6\xa8\x51\x14\x1a\x27\x14\xa1\xe1\xbc\x0f\xef\x54\xde\xec\x55\x59\x04\x92\xc2\x95\x81\xb0\x2d\xd9\x4c\xec\xb2\x16\x68\x5d\x85\x57\xca\xe1\x2e\x47\x19\x27\xed\x79\xd2\x5a\xca\xa2\xe9\x13\x6e\x1e\x91\xb2\x6e\x2a\x91\x92\x79\xd9\xc2\x01\x53\x49\xba\x29\x01\xe1\x69\x6e\xce\xe9\xab\x8d\x94\x75\x03\x2e\x9e\xd2\x19\xca\x56\x47\xb9\xe2\xd3\xe0\x02\xa2\xcc\xc5\xfb\x53\x29\xe2\xe3\xe6\xa6\xd3\xfd\x8e\xcf\xed\xcf\x50\x13\xc9\xbb\xd7\x05\x9f\xc3\x1e\x1a\x96\x6f\x22\x23\xcd\x96\x21\xa4\x0e\x5b\x04\xf5\xc3\x2e\x7a\x38\x43\x1c\xd6\xb4\x78\x6f\xf7\xd7\xf3\x6b\xfa\xe4\x0f\xf3\xd5\x3f\x99\x2c\xf7\x4f\x6a\x35\x19\xd1\x2e\x53\xcd\xa3\xed\x72\xfc\xb0\x33\x77\xb9\xf5\x1d\xee\xad\xbd\x91\xff\xa3\x1f\x1b\x71\xe3\x1e\x4c\x65\x82\xe1\xa9\x02\xa7\x10\x5e\xb7\x6f\x98\x07\x6b\xe2\x7f\x33\x0b\xd5\x43\xef\x20\x21\x5d\xe8\x9d\xe1\xb0\xc8\x17\xfd\x25\x72\x52\xb4\x93\x2e\xbf\xe0\xf2\xa9\x5a\xc5\x3c\x44\x70\x4e\x99\xa6\x45\x49\x7b\xb1\x73\x27\xd2\xd2\x8a\xc0\x9b\xe8\x10\xd1\x9e\x9b\xa7\xa7\xda\x4a\xd8\x7a\x0e\x95\x75\x64\x24\x21\x6c\x30\xd1\x3e\x3a\xc2\x3a\xb3\xbb\xdc\x38\xd8\x40\xa2\xb3\xd2\xd4\xdb\x9a\x73\x51\xde\xfd\x8d\xba\xac\x71\xf2\x6e\xfd\x78\x2b\xbb\xf5\x70\xdf\x15\x8e\x59\xd5\x47\x73\xfb\xdf\x19\xe9\x0d\x78\x53\x96\x4d\x6d\x67\xf0\xdf\x68\x3c\x62\xfa\x12\xb3\xce\xa3\xe1\xa4\xcc\x11\x53\x9c\x62\x8a\x3b\xa8\x5e\x9e\x39\x72\x6f\xfe\xa0\xa2\xc0\x7f\xcd\x48\xdd\x12\x7d\x29\x93\xd3\x77\x60\x02\xfd\xc4\x80\x8f\x1f\x93\xe8\xa3\x87\xcd\x6f\x39\x14\xc3\xa9\x5c\xf0\x9c\x29\x0f\x68\x4e\xfc\x5b\x7c\x65\x40\xf0\x08\x67\xb7\x4e\x86\x32\xee\x92\x6c\x35\x95\x9e\x7f\x58\x14\xb8\xbf\xdd\x86\x79\x07\x99\x72\x3f\xf7\x2b\x0a\x74\x87\xd5\x69\x50\x68\x38\xb6\xfe\x4b\xf6\x27\x1a\x36\x6e\x7e\x95\x01\x5a\x3d\x4b\x1f\xfd\x2b\xf4\x4d\xfd\xd4\xa6\xb3\x59\x0f\x7a\x56\x0c\xf8\x2f\xf3\xc3\x4a\xda\x9c\xc6\xed\x53\xd6\x1f\x3c\x2a\xef\xe9\x9d\xc2\x21\xa5\x37\x70\x89\x4d\xe3\x10\xfd\xaa\x20\xd6\xa9\x5f\xa5\x06\x52\x59\xa0\x7a\xb9\x42\xa2\x67\x15\x00\x83\xfa\x57\xa2\x3e\x4d\x4a\x0d\x5f\xc7\xff\x50\xa1\x9c\x39\xb1\xa6\x3f\xdd\x40\x41\x87\x11\x42\x44\xe9\x0d\x1a\x32\xb7\xb2\x91\x46\xde\x39\x5f\x75\x96\xe7\xde\x6e\xfb\xc1\x9d\x35\x97\x9a\x4b\xd3\x58\x19\xd6\xfb\xd3\xa8\x04\xa9\x51\x7e\x43\x65\xe3\x0a\x66\xa0\x3b\x62\x76\x0a\x7d\x98\x35\x84\x01\x56\x4c\xdf\x39\xf6\xd1\x98\xa3\xfc\x5f\x3d\x40\xc0\xd5\x62\x77\xec\xbd\x63\x64\x77\x06\xd1\xa9\x7b\xa6\xda\x75\x97\xda\x50\xf2\xab\x9e\xc1\xeb\xa6\x4e\x0d\x35\xcb\xb0\x4d\x01\x88\xee\xe9\xdd\x89\x14\xa8\x94\x89\xf8\xc8\xe5\x2b\xd7\x64\x48\xa7\x14\xf1\xe1\x12\xf8\xfe\xf6\xca\xdf\xd3\xe3\xe7\xe1\x1c\x8a\xb6\x9e\x7a\xd0\x38\xb0\x13\xc6\xca\x87\x83\xd5\x0d\x91\x4d\xee\x31\x54\x2a\xa9\xd9\x16\xbe\xc5\x1b\x6c\xb8\xf2\x76\xbf\xd9\x76\x25\xfa\x50\xb9\xf3\xdb\x2d\x65\x98\xde\xe2\x36\x9a\x60\x55\x83\xde\x69\x88\xac\x1e\xd9\xab\x9f\xbe\x14\x5b\x4c\x9e\xd4\x88\x71\x16\x8d\xdb\x79\xeb\x77\xe7\x4c\xfc\x3d\x77\xf6\xd6\xca\x7e\x5a\xd3\x15\x01\xbf\x84\x30\x0c\x1d\x0a\xbd\x04\x9d\x44\xbf\x7a\x75\xf1\x6a\xa1\xdf\xea\x47\x05\x26\x57\x3d\x18\x01\xf3\xe8\x04\x76\x63\xa5\x6a\xd1\x6c\xec\x26\x75\xd8\xf2\x96\x63\xd4\x15\x93\xe8\x77\x50\xea\x38\x75\xe5\xe6\x3d\x1c\x67\x48\xa1\x54\x2b\x4f\xe2\x00\xec\xfd\x78\x0a\x96\x8b\xa2\x66\xe0\xad\x9f\x9f\xf2\x83\xb1\x95\x75\x12\xa6\x52\xb0\xa2\xda\x6f\x67\xf9\x12\x92\xed\x5a\x22\x5e\xd1\x4d\xbc\xb3\x8b\x16\xde\x41\xd9\xc9\x7e\x1d\xf4\x71\xc0\x32\x3c\x94\x65\x3c\xd0\xf0\xab\x2f\x09\x33\x9b\xae\x8a\xcf\xbe\x4c\xd3\xa3\x37\xe8\xbf\x8d\x33\xc0\x98\x24\x56\x97\x33\xa9\x38\xb0\x6f\x8c\x7c\x52\x3d\x86\x0f\x8c\x15\x63\xa8\xfa\xa2\x6f\x98\x86\x1a\x02\x89\xbe\xff\x95\x7c\xbf\x57\x70\x09\x63\x91\xf5\xf1\xa3\xfb\x6b\xc8\x0c\x6a\xd7\x1b\x51\x6e\x04\xad\xdf\xd4\x4f\x7c\x65\x31\x5f\x56\xc0\x5c\x6c\xfa\x29\xd0\xa3\xaf\x92\x36\x72\x68\x37\x77\x16\x0f\xfa\xea\x29\x1b\x52\x1d\xc9\xbe\x75\x1f\xf1\xd1\x5f\xf1\x67\xb0\x08\x56\x07\x71\x4b\x66\x4d\x52\xd1\x62\x0d\xdb\xd2\xe5\x2d\xb2\x3d\x66\xb0\xc9\xf4\x61\x43\xd1\x6d\x35\x5b\x30\xc5\xa3\x87\xa8\xbf\x21\xc3\x98\xbf\x29\x3f\x20\x8a\x4b\x86\x7e\x2c\x6d\xa0\x93\x79\x8c\x5e\xa1\x1b\x79\xe7\x01\xeb\x03\xad\x21\xe5\x20\xbe\xb7\x45\xfe\x1b\x3e\x7f\xcb\x66\xe2\xf8\xdb\x95\x62\xb4\x8c\x55\x9c\xbf\xf4\xef\xe9\x23\x16\x72\x19\xb6\xd9\xdf\xc4\xd2\x5b\xba\x42\x53\xd7\x49\x2f\x9d\xc0\x8e\x30\xe2\x3d\x7f\x1c\xbe\x0f\x52\xb8\x5b\x69\x78\xe4\x97\x0f\x4e\x67\x2e\xd8\x19\x54\xdd\xb9\x3a\x41\xce\x11\xb7\x30\xdb\x28\xa3\x9c\x3d\xd6\xe3\xa0\xaf\xd8\xcf\x1e\x98\x29\x37\x67\xed\x6b\xe9\xb5\x32\x45\xcf\x71\x4a\x5e\x97\xe7\xdc\x91\x0f\x1a\xff\xf4\xb8\x57\xcf\x57\x94\x9f\xac\x7e\xbe\xfe\x2a\x85\xd8\xcc\x14\x1b\x25\xfa\x8f\x1d\x98\x5f\x37\x6a\xe0\x76\xdd\x44\x12\x3a\xcd\x1b\xe2\x2a\x96\x93\x03\xb6\x23\x8a\xba\x44\x62\xba\x3e\x23\x90\xd4\x21\x3a\x7c\x7b\x90\xec\x98\x4c\x3b\xeb\x01\x35\x8f\xb3\x94\x37\x91\xac\x49\xfe\x07\xff\xc1\x64\xff\x1d\x98\xf2\x38\x93\xe4\xed\x9b\x1a\x22\x92\xdf\x21\x84\x43\x3e\x73\xdc\x59\x66\xf2\x8d\x55\x2c\x58\x6c\xfe\xd7\x4a\x75\x96\xf7\x8a\x24\x94\x09\xed\xc4\x36\xc3\xdb\xec\x69\xec\xca\x4c\xa1\x0a\x72\x42\x5c\x22\xdf\xc1\xd4\xbf\xd9\x7a\xec\x5f\xa1\xb5\x85\xb1\xbf\x37\xa0\x53\xef\x24\xa6\xa9\xaf\x3e\xca\x53\x2a\x03\x47\x6e\x26\x50\x0c\xed\xf5\x83\x1d\xbc\xbc\x15\x70\x4d\x3a\xf5\x64\xeb\x95\xaa\x39\xb8\xed\x08\x4b\x5f\x8c\x9e\x80\x20\x3e\x72\x02\x2a\x45\x41\xa9\x38\x44\x9b\xc3\x4a\xad\x5e\x59\x79\xa0\xd7\xca\xa7\xda\x0d\x8a\x9f\x41\x26\x0a\x18\xa7\x27\x1b\x63\x65\x97\x8f\x3b\x7f\x4e\x00\x4f\x66\xd9\x79\xd3\xf0\xa3\x65\xf9\x27\x7f\xb4\xef\xfc\x19\x1f\xac\xc4\x7e\xb0\x4c\xc6\x83\xcd\xee\x1f\x37\x30\xdd\x92\x3a\x88\x2f\x27\x1e\x71\x23\x02\xe7\xf6\x3e\x7a\x9a\x9b\x8d\x64\x38\x46\xb3\x74\xbf\xc9\x8d\x28\xea\x7b\x1e\xfe\x58\x2c\x57\xfe\x99\x8a\x9d\xc3\x57\xd7\x74\x09\xf4\x39\xf5\xa3\xc3\x07\xa7\xef\x4c\xb5\x3b\x25\x0f\xa4\xa6\x67\xa8\xbc\x3f\xba\x4c\xd6\x7e\xe9\x1c\x7d\x2f\xd0\xec\x2a\xbd\x88\x09\x98\x7b\xf0\xef\x70\x22\x6b\x67\x50\xf6\x62\x22\x76\xf1\xa5\x9e\xfa\x41\xc7\x41\x12\x4a\x9b\x2f\xd0\xab\x57\x89\x2d\x1f\xcc\x49\x88\x77\x98\x75\xc8\x42\x9c\x33\x8a\xba\xa5\xe1\x1f\xf1\xa0\x81\x9d\xf8\x78\x59\x7d\xcb\x5c\x2e\xac\x9a\x36\xfb\xa0\x69\xbb\x9b\x98\xe2\x8d\x3c\x20\xf3\x36\x57\xfe\x51\x6e\x77\xdf\xea\xf1\xe5\x4b\x92\x8b\x52\x99\x7d\x7f\x79\x2b\x8b\xf7\x07\xa5\x97\x18\x40\x1d\x23\x5f\x0a\x90\xf3\xac\xfc\x2f\xc3\x1e\x04\x1b\x8d\xa1\x1f\x88\xc6\xe9\x32\x44\xf2\xe8\xf3\x5c\xdc\x1c\x0d\x8b\xa3\x73\xb7\x31\x52\x15\x34\xc1\x33\x49\x2d\xd1\x7e\x88\x46\xe7\x9f\x7b\x9f\xe9\xf8\x9e\x5e\xd8\x01\xed\x4f\x71\xf1\x36\xa2\xc5\x43\xee\x10\xba\xc3\xe6\xea\x12\x3a\xa8\x61\x8a\xa0\xcf\x32\x2a\x3f\x76\xcd\x16\x5d\xae\xf4\x20\x10\xf3\x74\x3e\x0e\x49\xc6\xa3\x77\x38\xbe\x78\x47\x5c\x36\x1c\x30\xce\x12\xcb\xed\x62\xeb\x99\x81\x57\x56\xc2\x46\x97\xcb\xdd\x16\x11\xf7\xf2\x96\xed\xc5\x5b\x86\xe7\xd4\xdb\xb1\x89\x36\xe1\x31\x87\x95\xd5\xee\x38\x11\x2a\x6c\x8c\x4f\xdd\xd6\xae\x7f\x7c\x3a\x91\xf5\xc0\xd7\xa0\xa7\x07\x9a\xa9\xee\x97\xdc\xa7\x53\xfd\x86\x3b\xd9\xb0\xfe\x8e\xe9\x3e\x1c\x6c\x34\xa7\xde\x68\x08\x5b\xf7\xaa\x3b\x0a\x9a\xd3\x3d\xc8\x83\xa3\x5d\x32\xee\xfd\x61\x66\xd4\x1d\xc2\x34\x7d\x4e\x10\xfd\x6b\x2c\xce\x15\x63\xe3\x30\xd3\xdf\x03\x8c\x78\x17\x94\x0c\xad\xe9\xbc\x83\x91\xb6\x51\x77\x47\x8f\xd6\x7f\x18\xab\xd9\x31\xf7\x38\xfb\x35\xf4\x03\xfc\xb4\x72\xf5\x85\x03\xd4\xaf\xdd\x21\x65\x99\xfc\x4a\x6b\x38\xf8\xf4\xd8\x0a\xdf\xa8\x86\x1c\x91\x27\xac\x16\x53\xbf\x96\xca\xd5\x4d\x0d\x34\x7b\x93\x8c\xf3\x7a\xf4\xf3\x6c\x99\x8c\xc6\x6f\xe5\x0a\x46\xdf\x10\x08\xf0\xe0\x95\xbb\x1f\x06\xc4\x01\xe8\xd1\xe1\xb5\xda\x9d\xef\x24\xfb\x0c\x9a\x24\xeb\x56\xa1\xab\x5b\xff\xf1\x79\xdf\x6d\x69\xff\x4e\x66\x7d\xf5\xba\x3d\x1b\xdf\x91\xff\x74\xbb\x78\xda\xed\xb2\xc2\x5e\x82\x00\x78\x13\xd1\xcf\x49\x5b\xd5\x42\xb9\x36\x77\x8f\xab\x54\xef\xb0\x5d\x3b\xfa\xa1\x7a\x4e\xd1\x42\xe2\x08\x52\x60\x02\x52\x10\xf5\x49\xa7\x58\x15\x18\x2b\x8b\x7f\x58\xeb\x59\x5a\x4a\xcd\x55\xfe\xc8\xb6\x6e\x90\x00\x85\x2f\x87\x91\x32\x5d\x95\xc5\xfd\x9b\x94\xf4\x8a\x98\x8e\xeb\xef\x6a\x2c\xfa\xe5\x45\x37\x8b\x2e\xb0\x9b\x38\x5d\xef\x26\x5e\x17\x36\x9c\xa5\x94\x7e\x3a\x56\xba\xb3\x6f\x02\xa2\xe5\xdd\x29\x8e\xc4\x72\x2a\x1d\x70\x4f\x7b\x8a\x51\xa1\xb0\x6e\x75\x7f\xcf\xb5\x75\x7f\x1d\xd4\x60\xcb\x34\xf3\xb9\x4a\x57\x55\x86\x9c\x17\x68\x49\xd5\xba\x40\x45\x2e\x7c\x18\x4b\x9d\x21\x10\x81\xd5\x06\x10\xe6\xec\x86\xee\xfa\xd9\x3c\x43\xa2\x29\xdc\x57\xa7\xe5\x71\x39\xab\x07\x73\xaa\x3f\x1a\x8e\x74\xb3\xc3\x74\x23\x3a\xce\xe4\x11\x7c\x8f\x5b\x3f\xaa\xcb\x7e\x3d\x04\x01\xd5\x84\x9f\x2b\x05\xa1\xf2\x0c\xf7\x38\x60\x39\xa2\x04\xdb\x60\x3a\x4e\xc0\x56\xfc\x08\xf0\x65\xfd\x4c\xff\x7b\x68\x15\xb2\xa9\xcb\x66\xb0\x68\x92\x60\xb2\xbd\xf2\xc7\xbc\x4c\xf3\x0f\xf9\xfe\x1f\x59\x4a\x18\x73\xa3\xb4\xb6\xb3\x9e\x8a\x9e\x33\x4c\xad\x9c\x7f\x86\x68\xdb\x3e\xac\xc2\xd1\xdc\x1e\xbe\x52\xca\x5b\x6e\x08\xda\x30\xb2\x12\x38\x8a\xc4\xee\x73\xfa\x62\x22\x3a\x73\xa0\x75\x9a\x67\x73\x12\xa7\x30\xb5\x70\xeb\xa4\xb3\x04\x53\xfb\x51\xf9\x48\x8d\x3d\xd5\x9a\x30\x49\x6c\x36\x07\x9f\x96\x01\xdc\x79\xc4\x26\x8c\x1b\x84\xe4\xb8\x1f\x0b\xb1\xc8\x71\x51\x0c\x51\x84\x55\x7e\x61\x5c\x4e\xb0\xb9\x4a\x22\xfd\x37\xe5\xb7\xb2\x8e\x34\x37\x2c\x2e\x9a\x2c\xac\xab\xec\xd5\x59\x74\x97\xf3\xf2\xd4\x4d\xef\x43\xc7\x8a\x31\xd8\x28\x4b\x86\xd5\x8c\xbb\x3a\xfd\x0d\xeb\x57\xed\xe2\xc1\x49\x64\xbf\x9e\x07\x58\x0d\xdc\x46\x46\x88\x48\x7c\x01\x32\x6b\xe3\x30\x7d\xe0\x94\xa6\x16\xb2\x2f\x1a\xd0\x10\x27\x8b\x35\x9a\xb4\xf2\x8d\x40\x7c\x91\xbd\xe2\x80\x8e\x68\x9d\xea\x54\xd5\xab\xcf\x29\xd1\xe7\xb1\x21\xab\x2f\xc8\x8a\x77\x4f\xd4\x4d\x6a\x78\x99\xa4\xf7\x13\xad\x4b\xf7\xb1\x6b\x3f\xba\xd3\x81\x56\x20\x34\xa6\xa5\x28\xe0\x63\xca\xce\xbf\x26\x0c\x62\x33\x70\x4e\xbe\x7f\x7d\xa4\x49\xba\x55\x64\xb0\xba\x25\x5d\x7e\xa6\x72\x21\x6a\xcc\xd4\x08\x36\x77\x95\xb3\xa4\x1e\xf8\xca\xc2\x56\x32\xdf\xb4\x94\x2d\x27\xfb\xcb\x0b\xb8\x0f\xd1\x2d\x9e\x5a\x67\x81\x81\xab\xd8\xda\xd9\x1a\x8e\xd1\x21\x63\x45\x96\xd2\xf1\x6d\x20\x8d\x6e\xa7\xd3\x67\x53\xc3\x7a\x48\x49\x4a\x8e\x4f\x62\x8e\xd2\x0d\xde\xd1\x50\x0b\xde\x68\xce\xb5\xf3\xc6\xec\xa6\x4f\x29\x38\x6f\x72\xe8\x2a\xd5\x80\xe7\x08\x68\x5c\xc3\x9b\xf4\x53\xb6\x36\xec\x22\xbe\xcf\xae\x93\x97\xf4\x95\xb0\xfa\xce\xf6\x2c\x66\x54\x98\x9d\xac\xdf\xb5\x0b\xf5\xbe\x1e\x08\x7f\x74\x3e\xb9\x8f\x93\x1e\xd1\x9a\xf4\x12\x82\x44\xf6\xe3\x8e\x72\xdc\x02\x86\xd9\x44\xee\x84\xed\x97\x02\x9e\x94\x05\x7d\x5c\x9d\x68\xc9\x65\x39\xc9\xbe\x10\xd9\x61\xf3\xae\x8c\xaf\x91\xeb\xe4\x2b\x97\x42\xdb\xce\xd6\x57\xff\xa5\x7c\x9f\x51\x1e\xd1\x75\xc7\xd3\xce\xcb\x41\xf7\xa6\x3b\x0e\x63\xd8\x4e\xc0\x03\x27\x8c\xde\x99\x44\x5c\x59\xe3\x2f\x63\x51\xb0\x75\x8c\x66\xf6\xd8\x89\x7b\xf4\x4a\x6d\x99\x4f\xdd\x7c\x66\xb4\x11\x97\x5d\xb4\xb6\x69\x22\x7d\x2e\xaf\x7e\x54\x9b\x7a\xca\x2f\x77\xa4\x2f\x23\x46\xe4\x3e\x5d\xfb\x87\xb2\xbb\xe4\xc9\xa9\x83\xe7\xe6\xda\xea\xf8\xd0\x8b\x92\x2d\x6a\x35\x71\xdf\xf2\x16\xc3\x0e\x4f\x1e\xd5\x30\x04\x62\x29\x79\xec\xad\xb4\xb6\x50\xc0\x3a\xf0\xa9\x5c\x03\x7a\x9d\xe0\x47\x46\x17\x5b\xfe\xd8\x59\x3e\x63\xc9\x0d\xca\x23\x0c\x47\x0f\x9a\x1f\x93\xf7\x18\x0a\x80\x23\xaf\xf6\x2c\x86\xf1\x9d\x3b\xe9\x39\xd2\xda\x0e\x6b\x65\x8e\xb1\xb3\xd1\x58\x5f\xa3\x91\xcb\x24\xa0\x5d\xab\xa2\x8b\x75\x1f\x6b\x11\x42\x21\x57\x20\x71\x4c\xc3\x47\xe2\xf6\x55\xa3\xc1\xd3\x05\xf6\xac\xba\x8a\x11\x20\xe1\xb8\x89\x23\x0d\xd4\x3e\x96\xe1\xfe\xb7\x46\x10\x01\x59\x99\x6e\x84\xbe\x8d\x71\xed\x67\xd8\x48\x95\x12\x2b\x5c\xc7\x44\x61\xdb\xcb\xb6\xd4\x59\x52\x73\xb5\x2e\xde\x4f\x37\xb6\x1d\x31\x07\x26\xe6\x41\x03\xab\xaa\xea\x3d\x6b\x19\x1b\xad\x6b\x76\x46\x8f\x7e\x9d\x79\xc6\x52\xa5\x1b\x9d\x5b\x06\x45\x5d\x59\x1a\x08\xa9\x53\xf3\x97\xc6\xde\xb3\xbc\x2b\x60\x68\xa2\x9f\x79\xcf\x91\xe4\xb5\x33\xaa\xc6\x1a\x5b\x71\x71\xeb\x7b\x84\x64\xd5\xfa\xe4\xd9\x05\x14\x7a\xd2\xfe\xe8\x33\xa5\xe8\x6c\x4a\xeb\x35\x05\xc3\xfa\x0d\x3d\x3f\x55\x72\xfc\x2c\x2b\x83\x58\x9a\x47\x4c\x54\x90\x74\xd9\x28\x1b\x74\x17\xfd\x6a\x74\xf1\x76\x1b\x7d\x44\xb0\xa1\x58\x3f\x65\x6c\x75\xee\x19\x2e\x3b\x68\x5a\x2f\x8e\x1c\xdf\x1f\x59\x1a\xb8\xfd\xc1\xff\x38\xaa\x09\x07\x1c\x9a\xfb\x59\xb6\x51\xfd\x5d\x23\x74\x9c\x35\x0a\xda\x16\x32\x5a\x75\xbb\xbd\x2a\xc3\x14\x4e\xbd\xe2\xf1\xc2\x52\x2a\x3e\x5a\x5b\xbf\xbb\x16\xfb\x5c\x3e\xab\x75\xa4\x97\x75\xbe\xa6\x54\x2b\xe9\x4a\x5f\x57\x3a\xfb\x2d\x08\x4a\xb0\xbc\xd8\x8d\xfa\xef\xd8\x39\x38\x7b\xd9\x8f\xdc\x3a\xef\x3a\xcb\xb0\x6c\xb7\x1e\xce\x7e\xa7\xc1\x19\xf7\x0c\xf0\xd8\x1b\x83\x5c\x4a\x1a\x0c\xfb\x74\xdd\xcd\x0e\x67\x7a\x53\xa5\x70\x8c\xfc\xac\xb7\x3d\x90\xcb\xf6\xef\xf0\x6b\x6f\x41\x05\x64\x5e\x73\xb4\xc7\xa7\xbb\x0c\xbd\x18\x15\x13\xfe\xe3\xc1\xf4\xa2\xd2\xaf\x52\x51\xfb\x4a\x1d\x25\x15\xf5\x52\x4e\x94\x3b\xf2\x52\x55\xbf\x5f\x8b\x20\x2d\xc0\xbe\x1a\xe7\x79\xed\xc3\x25\x8d\x5c\x62\x8f\x86\x7c\x8c\x61\xa5\x26\x65\xb1\xc6\x12\x13\x1f\xee\xae\x43\xb1\x2f\x9a\x07\xca\x93\x39\x44\xc4\x15\x56\x42\x83\x80\xe7\x2e\x58\xab\x74\x3a\xdd\x7d\x30\x94\x3a\xb3\xf6\xc8\x23\x38\x6b\xeb\x6b\x10\xd5\x96\xde\x0f\x7f\x74\x80\xc2\x68\xe2\xba\x19\x8e\xa6\x47\x92\x6a\x38\x82\x3f\xd7\x33\xca\x59\x78\xa8\x9d\xaa\xdc\xca\x10\xf4\xb0\xb0\x04\x5e\x25\xc6\x5c\xcf\x46\x8d\x63\x36\xc7\x2f\x1f\x0b\x70\xde\xec\x58\xa6\x76\x2c\x16\xae\x3a\x88\xc6\x33\x93\xbe\xdc\x0c\x82\xaf\x9a\x04\x3f\x3a\xa0\x4d\x61\x65\xb6\x4e\x22\xc3\xda\x71\x38\x48\xad\x9b\x1a\xd2\xe3\xee\xee\x7b\x4e\x81\x13\x82\xcf\xea\x46\xb0\xc8\x0c\xbe\x5b\x72\xc9\xc0\xae\x7c\xe8\x4f\x5d\xf6\xb8\xd6\x14\xbb\x39\xad\xa7\x2f\x47\xae\x94\x1d\x07\x2e\x45\x12\xf5\x30\x7c\xdc\x6c\xfe\xc0\x47\xbd\xd7\x30\x8b\x29\x0e\x8c\x9d\x96\xe4\x21\x63\xf6\xee\x02\x2a\xc8\x40\x0f\x12\x35\x8a\xe5\xa6\x47\x9e\x82\x67\x7f\x74\xa9\x1f\x89\x5d\x33\x0c\xef\x53\x84\x2f\xe4\x6d\x80\x43\xac\x89\xad\x33\x43\x4f\xf3\x0f\xcd\xd1\x15\x1c\x66\x7f\xb6\xae\xd3\xac\x6f\x4f\x35\x3d\xf7\xdf\x43\x5e\x99\xf9\x5f\xd9\x0f\x8d\x62\xa0\xe6\xe0\x0e\x2d\xd9\x61\x3c\x26\xb5\xb2\x2d\x3d\xd6\x38\x22\x92\x5a\x49\x3e\xbb\x39\xb4\x5a\x3d\x6a\xf4\xdd\xad\x80\x3e\xcb\xd7\x12\x5f\x2b\xbc\x33\xca\x6e\x20\x0c\xd9\xfa\x8d\x73\x19\x5a\xdb\xd5\x88\x60\x49\xaf\x71\xb8\xf4\x96\x0b\x74\xec\x3e\x55\x17\x6d\xca\x96\x04\xce\x34\xdb\x60\x30\x0c\xd3\x93\x16\xe4\x37\xb2\xeb\xbf\x40\x64\xc9\x6e\x70\xa9\xdc\xd8\xc9\x99\xcb\x2c\x91\x1d\x1c\x3a\x9f\x53\x6c\xa4\x69\x74\x87\xad\x81\xd9\xb3\x4a\xd2\x92\xed\xdb\x3d\x96\xaa\x99\x61\x10\x1c\x2c\x4c\x3a\x8b\xc6\x73\x56\x50\x20\xf3\xb0\x63\x72\x28\x07\xdb\x57\x64\xda\x2e\x03\x19\x8e\xe1\x46\x28\xc9\x36\x5d\xe0\xf5\xd4\xa5\xd4\xd5\x76\x84\x9b\xc6\xf0\x89\x8f\xa4\xb5\x5d\x07\x7d\x6c\x33\x67\x82\x73\xd5\x9a\x6d\x06\x43\x6d\x52\xe6\xd0\x4d\xec\xdb\xf2\x5e\xcb\xc9\x66\xf1\xb8\xe2\x0b\xa9\xee\xe6\x31\x3e\xcf\x52\x09\xb5\x07\x1b\x08\xbd\x0f\xca\x4f\x9d\x44\xb9\xa5\xc3\x28\xf9\xa4\x33\x1c\x69\xc4\xa8\xdd\x3e\x7a\x5c\x99\x9e\xfd\xf4\x32\x39\xb5\xca\xf0\xfe\xf1\xe6\x01\x16\xed\xca\xe4\xc2\xa8\x0d\x12\x6d\x79\x3f\xb3\x5c\xeb\xdf\x4e\x5e\xcb\xfa\xcd\x85\x5a\xe9\xbd\xa1\x97\xb4\xc4\x40\xeb\xdc\x84\x1e\xec\x32\xf7\x1f\x27\xbb\xcd\xfd\xf4\x65\x49\x47\xe7\xfa\x3e\x60\x74\xd0\xd5\xd5\x00\xef\xd3\xaa\xee\xf6\x6f\x9c\x32\x53\x3a\xa2\x0d\xae\xa0\x07\x88\xee\xd6\x6c\x5e\x2d\xa5\xbf\xdc\xee\x4d\xa9\xf9\xa0\x87\x8b\xef\x0c\xa6\x2e\x9e\xc3\x63\xbd\x7c\xf6\x83\x6f\xd4\x27\x7b\x26\x88\x82\xb8\x76\xfc\x04\x3e\xf0\x73\x76\xf8\x21\x7e\xb2\x4e\x9c\x9f\xbe\x6c\x11\xed\xe8\x8b\xd6\xb6\x33\x39\xca\xec\xc3\xfe\xae\xa4\xe5\x37\xf7\xac\x97\xe8\x46\xef\xe7\x57\x32\x9c\xa8\x66\xe3\xd7\xb4\x34\x2e\x5c\x7d\x3c\x0e\x5d\x2a\x05\x82\xb3\x3e\x14\xda\xdc\x7a\xf6\x1d\x04\xeb\x6a\xb2\x8f\x8e\xb3\xdb\x5a\x38\x9a\x42\x4d\x39\xc2\x3f\xa9\xdf\xcd\x02\x86\xbc\x35\x34\x5b\x0c\x12\xdf\xaf\x94\x33\x1c\xcb\x70\x1b\x1e\x24\xc4\x80\xd1\xc9\xae\xad\x8f\xb1\x7c\x9b\x0b\xf7\xd3\x8f\xcf\xba\x38\x42\xb8\x0c\xcd\xd3\xe5\x0f\xcc\xb6\xb5\x5a\xd4\xe0\x24\x44\xff\xad\x1c\xa1\x17\xa0\x37\xfb\x70\xa8\x08\x8c\x4c\x6a\xeb\x7a\xae\xf9\xfb\x60\x10\x84\xcc\x5a\x63\xcd\x98\x8f\x71\x2b\xfb\x91\x72\x79\x4f\x7e\xd8\xa0\x18\x36\xfd\x73\x65\xf6\x16\xa3\x12\x96\x76\xee\x42\xc3\xc9\x8e\x33\x86\x4f\x83\xa0\x0a\x96\x43\xd6\x09\xf8\x3c\x65\x66\x4b\x73\x6d\x4c\x9b\x82\xfb\x79\x78\xae\x40\x47\x3a\x9a\xd5\x69\x91\x5e\x98\xcd\x68\x34\xaf\xf8\x60\x80\x41\xce\x44\xcf\xea\x48\x0c\x48\xf1\xe0\xbf\x3a\x31\xb9\xff\xa3\x61\x2b\xba\xfe\x30\x65\x39\x1d\x0d\x63\xde\xb8\xfb\xc1\x65\x35\xba\xb5\x3e\xf4\xf6\x4c\x02\x6d\xe4\xa0\x63\xd2\x90\x6c\x62\x31\x04\x97\x63\x08\x06\xf2\x7d\xe3\x5c\x11\x81\xe9\x6e\xda\xc3\x02\x0f\xaa\xc3\x67\x68\xc5\xab\xf8\xe9\x9e\x7f\x0d\xec\xdc\xc9\xa4\xab\xc2\xf3\xec\x70\x51\x90\x7d\x0f\xc9\x81\x80\xc3\x1b\xfa\xbf\xe7\x89\x56\x8e\x37\x3a\xe5\xe5\xad\x76\x35\xee\x91\xf8\x7c\x9f\x42\xfe\x7a\x07\x61\xd1\x3d\xe8\x89\xe4\x5e\x8d\xb4\x4f\xbf\x43\xcb\xef\x86\xf4\xd6\xdf\x0d\xfb\xdf\xd5\xf7\xe5\xb0\x0a\xcb\x3f\x3a\x6a\xb5\x9d\xb9\x63\x91\x18\x4e\x09\xcb\xa7\xc9\x5e\x22\xf7\xcb\x46\xbc\x0c\xc3\x3d\xef\xd7\xfe\xcc\xea\x1d\xff\xd6\x8e\x33\x01\x68\x11\x8a\xed\x1a\xe7\x8e\x74\x74\xaa\x5b\x53\xe7\xb0\x2d\xa4\xef\xd6\x20\x85\x07\x36\x27\xbc\xe2\xfb\x52\xce\xc6\x96\xbf\x17\x06\xab\x66\x35\xe2\x06\x82\x6a\xbc\xc9\xb8\xa2\xcc\x77\xa7\xee\xea\x67\x19\xf2\xad\xd2\x35\x4a\xaa\x8e\x01\x30\x2d\xbe\x94\x43\xfc\xf4\x5a\xda\x6d\x44\x86\xbb\xde\x90\xd5\x94\x7d\x6d\xbc\x77\x7e\xd1\xb2\xf1\x0a\xc6\xb1\xd4\xde\xe7\x1c\xea\x00\x24\xc8\xcd\x31\xcc\xd5\x76\x13\xe6\x7e\x7d\xcd\xfb\x83\x87\x56\x97\x5d\x52\x60\x60\xd1\x7a\x7b\x98\x3c\x8d\x4e\x34\x5a\x6a\x35\xc0\x60\xfd\xea\x51\xaf\xe7\x51\xe9\xc7\x92\x14\xf9\x29\x5d\x72\xbc\x5f\xf9\xfd\xd6\x29\x38\xc5\x65\x81\xed\xa2\x29\x8d\x30\x06\x0d\x19\x8a\x6f\x6f\xbf\xa3\x82\x79\x04\x47\xb6\xaf\x70\xbf\xd1\x1d\xe6\x7b\x64\xd9\x81\x5e\x3c\x57\x4b\x29\x51\x8c\xd5\x69\x7e\x2f\xb5\xe5\xec\x10\x3f\x5c\x11\x2f\x36\x17\xca\x3b\xf3\x59\xd7\x4a\x19\xd9\x02\x97\xea\x4c\xce\xdd\x2c\xaa\x77\x9c\xfc\x2f\x87\xa5\x46\x73\xe3\x78\x8b\x56\x73\xbf\x8a\xae\x5c\x7f\x10\x84\xfd\xf5\xba\x7d\x9b\x57\xfc\xea\x2b\x1b\x79\x9d\x42\x45\x6e\x49\x2b\x79\xc9\x60\xd7\x74\xb5\x25\xbd\xd5\xed\x5a\x39\x78\xce\x29\xc2\x00\x68\x43\x1d\x1e\x73\x6d\xce\x47\x64\x29\xe4\x35\x5a\xd5\xca\xda\x89\xe6\xf3\x95\xdd\x2c\x6e\xf3\xfa\xc5\x6c\x3e\xe7\x10\xbc\x73\xe5\xf9\x4f\xda\xfd\xe6\xee\xf6\xfe\xe5\x43\xc8\x83\x78\xe1\x72\x8e\x46\x6f\x7c\x60\x0c\xac\x85\xe5\xe8\x66\x83\x2c\xe2\x96\xbb\x0e\x3d\x82\x29\xe3\x1e\x63\x8b\xa8\x2f\x76\x80\x7d\x00\x70\x98\xa6\x5b\xf4\xc2\xe3\x31\x7d\xa5\xfe\x58\x68\xaf\x3f\x4c\xd3\xe8\xd4\x7c\x58\x5d\xee\x75\x74\x23\x73\x9a\x53\x44\xb9\xcd\x62\x1c\xb4\x20\x2c\x04\x69\x19\xe3\x11\x27\xd2\xae\x23\x65\x85\xb2\x55\xee\xdf\x07\x6c\x48\x7f\xaa\x11\x89\xa6\xda\x69\xa1\xb0\x50\x0e\x44\xa9\xdf\x6a\xf3\x3b\x18\xd4\x6e\x6f\x97\xeb\x5d\x1f\x27\x1d\xfb\xd3\xe1\x22\xad\xae\x78\xda\x25\xae\xd8\x3c\xf9\x01\x69\xf2\xb9\x97\x82\x81\xd6\x3a\x1d\x15\x3a\xff\xc8\xe3\xdf\xe8\xdd\xe5\x9d\x34\x0e\xaf\x70\xc4\x6e\x54\xab\x7d\xb3\xf1\xe2\x0a\x8b\x3e\xcb\xfc\x57\x5a\x74\x87\x2b\x40\x52\x83\xa9\x81\xa0\x92\x8e\x87\x27\xac\xd3\xd5\xfc\x3e\x1d\x85\x5d\x0a\xcf\x10\xc8\x9f\x6e\xaa\x93\x08\xaa\x07\xd9\x51\x91\x49\x74\xb7\x2f\x17\x00\xde\x0b\x02\x77\xaf\x41\xfb\x84\x29\xb3\x38\xdc\x8a\x5a\x84\x89\xe3\xc7\x03\x75\xc1\xc1\xc6\x05\xdb\xff\x14\x12\x0d\x62\x77\xea\xc4\x04\x4f\xd1\xf4\x6c\xa4\xa2\xbc\xd1\x12\x60\xd0\x64\xda\xe4\xa9\xbc\xd7\xd7\x48\xc1\xfe\x54\x36\x2d\x8a\x8a\x80\x4d\xc0\x5c\xf9\x09\x71\xe4\x61\xb7\x47\xea\x34\x3d\x22\xd5\xf7\x68\x62\x93\xc9\xf6\x3d\xb3\xc3\x6f\xff\x67\xbb\x48\xbf\x0a\x8a\x3c\x6d\x4a\x1b\xb8\x4b\x54\x77\x03\x24\x45\x0f\x5e\xa6\xcb\x06\x75\x3c\x44\xd3\x83\xbb\x00\xef\x4a\xe1\xb7\xd5\xf8\xb7\xb4\x89\x40\xbd\xd3\x91\x90\x69\x79\x46\x2f\x19\xcb\x98\x9b\xfb\x8d\x6f\xd4\xc8\x4b\x60\x14\x89\x40\x5e\x46\x4a\xbf\x51\xe8\x22\xd2\x98\x35\x33\x27\x9a\xf4\xe6\x39\xf2\x17\x69\x30\x60\xba\x34\xb8\x79\xe6\x59\x6f\x65\xb9\x11\xc6\x08\xab\x81\xa1\xef\x84\xb8\xa0\x10\x3d\xb2\x3e\x02\xe3\xbd\xf8\x8f\x2d\x9f\xf2\xee\xc8\xe3\x2e\x95\xc4\x27\x7a\xe7\x4b\x3f\x3b\x08\x52\x1e\x01\x4b\x1a\x44\x05\x57\xcf\x26\x4d\x83\xc5\xd9\xe2\x7c\xd7\x91\xf3\x25\x47\x43\x02\xe3\x5c\x4b\x40\x03\xb4\xe9\xc1\x05\xc8\x66\x94\x6a\xf9\x77\x61\x24\x0b\xfa\xb7\x7c\x0a\x20\xc7\x94\x0f\x6c\x4a\xb0\xfb\xd3\x80\xfb\xb3\x3f\x55\xbb\x9f\x3c\x20\xd9\xf8\xc6\x56\x3d\xef\x71\xa0\x86\x3a\xe3\xdb\x18\x28\x0b\x86\x61\x34\x90\x25\xac\x87\x3b\x89\xe7\x99\x52\x6b\x07\x45\x7c\x34\xd3\x02\x1a\x4d\x3e\xea\x91\xf3\x1a\xbd\xfb\xb1\xfa\x5a\xca\x68\x0f\xdd\xfc\x2b\x56\x92\xd4\x72\xb3\xc8\xd3\x9f\x98\xe6\x69\x57\x62\xc7\xa8\xc6\xf6\x4b\x8c\x97\xd7\xb8\x29\x15\xf4\xa5\x3e\xd8\xcb\x65\xd5\x8b\xe9\x3f\x11\x3b\x69\x8f\x13\x0a\xd4\x1a\x19\x28\xf0\x3b\xe2\x2b\xa0\x29\x11\x9e\x97\xe5\xaa\xd2\xe9\xf9\x4a\xe4\x33\x68\xd8\x56\x9f\xe2\xb3\xac\x87\x6c\xb0\xaf\x91\x1c\x75\xa2\x25\x23\x5b\x4b\x27\x88\xb8\xbb\x0e\xd2\x5a\xe7\x4d\xe1\xb9\xa0\xcd\x55\x2e\x83\x38\xec\xd0\x56\x79\x7e\xd1\xc8\x01\x0f\xdf\xc4\x10\x97\x91\x73\xd9\x70\x96\x53\x6b\x11\x47\x93\x2e\xe6\x6c\x8e\x33\x1c\x37\x48\xdf\xb0\xee\x4b\x37\x37\xd7\x3f\x13\xde\x73\x3d\x5c\x23\x29\xe5\xc8\x1f\x77\xd8\xdf\x4b\xcd\xf1\x40\xbf\x84\xb7\xee\xa5\xcb\x71\xc0\xf6\x9a\x8f\x8f\x7d\xd6\x11\x4e\x49\xcb\x84\x4c\x43\xe7\xe3\xa1\xa8\xca\x87\xaa\xaa\x5b\xf5\xbe\xdc\x39\x2c\x05\xfe\xe6\x56\x7c\x7a\x9b\xff\x5d\x79\xe3\x65\x45\xe3\x9f\x80\x8c\x23\x71\xf5\xe6\x36\x3c\x2e\xbd\xa8\x70\x8e\x74\xfe\x63\x13\xf6\x28\x53\xf8\xad\x76\xe5\xdd\x77\x37\x33\xf1\x80\x45\xfa\x16\x1d\xfa\xfb\x5d\xe2\x85\x43\xab\xd7\x11\xb6\x59\x7b\x59\x1d\xd0\xba\xe6\x90\xae\xcd\x5c\x62\x14\x08\xa2\x41\x99\x73\x0e\x77\x10\xf6\x77\xa2\xe1\x71\xb9\x95\xfb\x01\x6c\x14\xa2\x1f\x54\xb6\x64\xf7\x2f\xe3\x84\x77\x0e\xa3\x04\x78\xf2\xeb\xee\x59\x91\x9d\x80\x00\x4c\x65\x03\x53\xfd\x9c\x22\x34\x44\xd9\xab\x7a\x6c\xb3\xe6\x5e\x15\xe0\x7e\xd0\x46\x8c\x23\x89\xca\x77\x70\x28\xcb\x1a\x21\x98\xb7\x6c\x5f\xde\x6e\xf6\xf7\xeb\xea\x74\x90\x1e\x0d\xb1\xf0\xe8\x3c\x22\xd4\x11\x7c\xe3\xca\x3e\xa0\xc3\x85\x5a\xc1\xe8\x01\x71\xb0\x8e\xad\xb2\x20\x30\xe2\x4d\xa4\x01\xee\x19\xd5\xa0\x1a\xdd\x65\xa8\xe7\x41\xbf\xab\xcf\x96\x43\x8e\xbf\x2f\x24\x0a\x95\xc5\xd4\x35\xe6\xbe\x5c\x19\x6f\xbb\x1a\x42\x3e\x80\x2b\x38\x6f\xe3\xab\x9a\x52\xd8\x84\x5f\xb2\x1e\x74\xf9\x6b\xb0\xc7\xc8\x6d\x8e\xde\xfd\x48\xd9\xb1\x47\xa6\x11\xed\xe6\x5d\x95\x8f\x6b\x87\xcf\x0e\xfa\xd5\x37\x87\xa1\x51\xd8\x1f\xe7\x12\xa6\x0e\x5b\x45\x58\xca\xa3\x9a\xf1\x4f\x23\x13\x19\xca\x28\xc7\x6c\x56\xde\x18\x7e\xb6\x80\x06\x70\x42\x20\x0e\xa2\x66\x83\x46\xa3\x7b\x67\x7b\xb5\xef\xd5\xfd\x60\x07\x3b\xc2\x48\x7a\x65\xa3\xef\x7d\xe5\x77\xec\x2d\x5b\xc0\x88\x9e\xd2\xd1\xdc\x28\xeb\x9e\x5d\xd9\x26\x90\x82\xde\x6e\xa4\xb0\x71\xea\x36\x73\xe6\xcb\x8e\xa4\xec\xfd\xfd\x90\xa4\xcc\xd3\xd6\xa7\x65\x3a\xdb\x80\x0e\xbe\x4c\x03\xe9\x05\x6a\x3a\x7d\xd7\x45\xff\xbc\xec\x07\xbd\x90\x60\xb8\xb9\x18\xdf\x90\x01\xe8\x07\x88\x83\xe7\xa0\xd1\xd3\x41\x01\xdb\xfe\x12\x51\xde\xdc\xf7\xd9\xcb\xf5\x86\x22\xc4\xd8\xc1\xa3\xe7\x4c\x8d\x24\x8f\xb0\xa1\xd8\xc3\x85\x0b\xdd\xfd\xe5\xb8\xc6\x5c\x18\xc5\x61\x47\x2b\x39\x87\x81\xc5\xa3\x9e\x12\x18\xb3\x7f\x52\xc1\x83\x01\x42\xee\xda\x8e\x1f\xc6\x6a\x03\x18\xa6\x9f\x7a\x58\x6f\x72\xdf\x76\x31\xe2\x5b\x9c\xb8\x6d\xfd\xe7\x9b\x51\xfa\x33\x52\xdc\xb3\x99\xec\x6b\x50\xdb\xa7\xf5\x11\x4d\x7c\x86\x8f\x6a\x1c\x1a\xa1\xcd\x57\x61\xb9\x84\x42\x30\xb7\x7e\x26\xfa\x9a\xad\x60\x2e\xab\xbf\x0f\x9a\xe4\x4b\x4d\xff\xe0\x35\xab\x97\x4f\x5f\x35\xb6\xf6\xcb\xd7\x6f\xd7\x89\x1c\xa1\x6b\x1e\xda\xff\xc1\xbd\xfe\xca\x75\x47\x07\xe9\xcb\x6a\x74\xa8\x16\x9c\x20\xb5\xeb\xc6\x91\x16\x80\xf8\xdc\x4c\x06\x95\x3d\xee\x54\x7a\x69\xb0\xd3\x3f\xa3\x20\x01\x14\xfa\x3c\x67\x4c\x48\xdf\x2b\x39\x6a\xcd\x33\x26\x65\x97\xfb\x6b\x9e\x06\xd9\x04\xac\x13\xa6\xb4\x78\xa5\xa9\x02\x57\x18\xd6\x3b\x11\xfe\xe0\x8e\x3d\x7a\x85\xfa\xa8\xe5\x2a\xff\xe7\x01\x39\x48\xeb\x23\x1f\x4a\xda\xae\xb9\xa9\xe3\x14\xc1\x08\x38\x44\x8c\x2d\x3f\xaf\x04\x25\x91\xd4\x22\xb8\x61\xd7\x78\x25\x76\xe0\xaf\xa9\xdf\x6c\x74\x3a\xe2\x35\x93\x9f\x8b\x10\x0b\xbd\x1e\x0d\x50\xb3\x86\x91\xbc\xda\xdb\x63\xa3\xb7\x1f\x47\x37\x0e\xea\x39\xac\x65\x95\xb5\x6d\x47\xd9\x4d\x23\x7f\x92\x4c\xfa\xb2\x07\x5c\x8c\x6e\x01\x63\x90\xb9\xc0\x9a\x7e\x94\x99\xb3\x26\x77\xbf\xe1\x66\x7c\xd7\xd2\x59\x96\x5a\x4c\x35\xd5\x7e\xc3\xe0\xc1\x79\xc0\x8f\xd8\xee\x1a\x2b\x7c\x71\xf0\xe6\xf3\x53\x2f\x44\x2b\x09\x2c\xec\x36\xc9\x38\x8f\x3d\xf3\x36\x45\x52\xcb\x69\xf3\x0c\x73\xdf\x49\x7d\xe4\x66\x4e\x43\x61\x08\xf4\x5c\x00\x4e\xa6\x0b\x67\x81\xbb\x27\x6f\xa1\x88\xd6\x2b\x97\x75\xfb\xa3\x5b\xc1\x16\xa3\x94\x75\xf0\xd9\x20\x29\x92\xe3\x17\xda\xa4\xb2\x6e\x05\x01\x7b\x33\x26\x11\x19\xb3\x08\x7c\x7d\x1b\xb5\x18\x4a\x29\xd0\xeb\x91\xed\xe6\x4a\x6f\xe9\x1b\x79\x5d\x07\x46\xa1\xf7\x5c\x49\xf8\xa3\xd0\x89\x52\x45\x2b\x4d\x67\xfe\x4c\xf6\x7e\x75\xa7\x9d\xd7\x94\x3a\x52\xf0\xdc\x7d\x5d\x30\x87\xb3\x64\x38\xa2\xc1\xdf\xf8\x67\x54\x22\x62\xfc\x71\x30\xaf\x72\x7a\xdd\x15\x12\x12\x98\x03\x55\xc7\xeb\xc5\xa6\x5f\xc6\xe8\x08\xcc\xe3\xf3\xa9\xac\x99\xf2\x21\x71\x02\xe9\x1f\x83\x65\x6a\xd9\x7e\xcc\xfa\x4b\xc3\xcc\xaf\x99\x4a\x74\x0c\x40\x35\xe2\x5f\xb0\x08\xce\x35\xce\x26\xed\x42\x4d\xd2\xc6\x52\xee\x76\x39\xc4\xda\xee\x35\x3d\x93\x30\x81\x2e\x0d\x09\x93\x3d\x0e\xfe\x11\xd4\x98\xce\x3f\x19\xd5\x83\xf9\x6c\x9e\x3c\xbc\x23\x36\xfb\x2f\x94\xca\xfc\xc5\xdf\x05\xe9\xb6\x68\x42\xee\x1c\xc7\xb5\xf8\x9f\x4c\x0b\x39\x68\xf0\xca\x29\x8f\xa3\xd7\xec\xd4\xc3\x7d\xc5\x10\xc0\x5d\x7f\x49\x91\x65\x3a\xa5\x4f\xcc\xf4\xe0\xc2\x77\x86\x03\x66\xb5\x64\x9e\xa7\xc0\xa2\xe7\x52\x4d\xb6\x9e\x4a\x01\x7b\x8e\x8d\xb6\x2e\x65\x5b\xbc\xf2\xa6\x5c\xf1\xa6\xb2\x97\xf5\x5d\x96\x80\x3f\xd9\x1e\xb2\xe5\x2b\xe1\xcd\x24\x33\xb0\x9f\x6c\xa4\x00\xf1\x8b\x2f\x8f\xca\xf7\x96\xca\x42\xc7\xab\x1c\x4e\xb2\xd8\x38\x47\x83\x1e\x7e\xb1\xfd\xf2\xdd\xe4\xd7\x6c\x00\xd0\xe6\x75\x41\x02\xf4\xec\x88\xa4\x5c\x39\xe5\xe3\x5c\x60\x6a\x50\x12\x8b\x1d\x0e\x61\x63\x7b\xc5\xeb\xca\x16\xe0\xeb\xb8\xbd\xa2\x66\x08\x0f\xf3\xf7\x03\x87\xbd\x1f\xc2\x77\x2e\x7a\xff\x93\x1b\xff\x9a\x73\x38\x2e\xbb\xb8\x20\x0b\xc2\xd5\xa4\x12\xe9\xa7\x69\x89\x9d\x20\x48\x3f\xce\xe2\x5d\x5c\x3b\xc2\x48\x36\xe2\x79\x83\xf2\x23\x37\xd9\xf1\xe5\x6e\xff\x39\xdf\xe5\xe8\x40\xb4\x38\x5d\x28\x81\xff\x9e\xe3\x81\x79\x9d\x64\x46\x5b\xbe\xec\x21\x19\x57\x98\xed\x02\x22\xe9\x3c\x5c\x67\xb5\xcf\x2f\x7b\xa0\x72\x66\x7e\x84\x4b\x8c\x6c\xca\x43\xfa\x29\xe1\x53\x59\xed\x3f\xba\xdd\x43\x79\x02\xb6\x84\x91\x02\x46\xf4\xda\x83\x05\x07\xa6\xf3\x3d\xd2\xe8\x9a\x0a\xe6\xb3\xc1\x5f\x7f\xa3\x01\xc0\x0e\x3e\xf6\x47\x06\x05\xce\x91\x73\x0e\xef\x2e\x6b\xe5\x10\x46\x20\x83\xee\x01\xdd\x42\xa1\xbf\xa7\x2c\x65\xb7\xc8\xb2\x18\x2a\xb1\x49\x5e\x1d\xbc\x6f\x8a\x7d\xed\x50\xe9\x40\x43\xcf\x6b\x78\xb0\x71\xd0\x40\x32\x9d\x8c\x3f\xc8\xab\x20\x80\x90\x2a\xee\x07\x8c\x00\x9c\x03\x0e\x49\xe1\x18\x90\xda\x8b\x26\x46\xa5\xb4\x5f\xf2\xaf\xfb\x56\xce\x71\x3b\x72\xc0\x40\xa9\x73\x46\xc1\x7d\x63\x68\x60\xc4\xdb\x6c\xbd\xe5\x9d\x77\x5a\xf3\x2b\x5e\x38\x93\xe1\xa7\x66\x0a\x60\x03\xc7\xe6\x94\xcd\x29\x92\xbc\x3d\x34\xdb\x3d\x87\xbf\xac\x7a\xbe\x77\x18\xdf\x2c\x74\xb8\xbf\x8f\xbf\x7a\xcc\xb8\xa9\x56\xff\xe2\xb1\xf3\xc8\x27\xaf\xf8\xeb\xc2\x10\xfa\xbf\xbf\x51\xdd\x7f\xc0\x0b\xee\xd9\xdf\xd1\x30\xd5\xd5\x75\x4f\x1b\xd3\x8d\x7d\x50\x3e\x46\x04\xf6\xcc\x14\xe0\xab\x9a\xba\x9b\x24\x74\x47\x2f\x22\xdd\x35\x04\xb5\xd4\xa9\x43\x4a\x3b\xbe\x1c\x47\x50\xb1\x96\x59\x4f\xfc\x36\x1e\xb6\x95\x04\x5f\xc9\xbc\x0c\x41\xae\xfe\x9d\x8e\x44\xfe\x72\x2d\x99\xd2\x72\xfd\x91\x06\xcf\xce\x44\xce\x93\x60\x40\xfd\x09\x5e\xa8\x93\x3b\x77\xd8\xba\x0b\x18\x38\x97\x93\xb5\x13\x36\x5f\xfc\xcb\xae\xfe\x1e\x86\xf4\x06\xe6\x9d\x4b\x64\xbe\xf5\x94\x52\x1a\xdb\x80\x38\x6e\xaf\x24\x71\x5a\x9e\xf8\x2f\xf3\x7a\x7c\xea\xb0\xdf\xcc\xc8\x40\x89\x40\x45\x58\x19\x94\x39\x2c\x36\x5b\x65\x8f\x8a\xe2\x54\x7f\x11\x5a\xd6\x37\x54\x29\x75\xa5\x26\x25\x98\x50\x3e\x87\xb1\x02\x43\x11\x78\x39\xc6\x2d\xab\x29\x8f\x4d\xaa\xa9\xf2\x83\xc1\x86\xc9\x86\x27\x53\x54\xf8\x9d\xd5\x64\x9d\xc6\xf4\x65\x09\xcc\x81\x71\xca\xfe\x03\x1a\xf1\xb5\xbb\xd1\x01\x8d\x2f\x27\xb0\xe8\xef\x62\xc5\xd7\x67\xf1\x48\xcf\x1a\xd0\x47\xb1\x31\x4a\x67\xdc\xe1\x55\x63\x84\x5e\x63\xe5\xb2\xd7\x44\x66\x43\x10\x61\x9c\x90\xa6\x7a\x18\xd8\x03\x1c\xf5\xf5\xa4\x32\x4f\x67\xfd\x7d\x22\x89\x68\x0a\xa8\x42\x37\xe9\x13\x4f\x81\xe7\xaa\xff\x75\xf1\xa4\x3a\x2d\x57\xa5\x0e\x8b\xf0\x8d\xee\x5f\x9a\x3c\x73\xd8\xb5\xb3\xf3\x90\x1b\x0b\x63\x12\x1f\x5e\x20\x80\x2e\x7c\xc6\xc0\x45\x25\x02\x96\x65\xa8\xa2\x61\x76\x82\x34\x18\xf2\x61\x35\xb2\x79\x3b\x44\x97\x7d\x03\x18\x2f\x77\xec\xff\x3e\x15\x8f\x38\xec\x03\x0e\x03\x95\x73\xcb\x6a\xfb\xe7\xa7\x41\x0d\xe2\x0a\x15\x1a\x28\xdf\x58\x95\x86\xc2\x37\x65\x1a\x2f\x60\x63\xb9\x8f\x9d\x19\xbd\xa0\xc6\x43\x30\xe5\x87\xb5\x7d\xf5\xde\x09\x13\xde\x40\x07\x42\x77\xbc\xff\xf7\xd0\x47\xf4\x5c\x89\xd7\xc7\xe7\xfe\xbd\x60\x09\xf8\x4a\xdc\xbe\x01\x07\xff\x6b\xf0\xb4\xe6\xa0\xc2\x2f\xd7\x4b\x24\x75\x1f\xdb\x29\xbf\x6d\x8e\x8b\xee\xf1\x7b\xf4\xbd\xbd\x99\x3c\x02\x13\x04\xb3\xe0\xcb\xa9\xae\x5b\xed\x43\xf1\xa5\xb8\xd5\xae\xb4\x7d\xb0\xd7\x5a\xcb\x07\x30\xf0\x7b\x15\x14\x02\x12\xb2\x54\x57\x8e\x99\x76\x48\x76\xd9\x4d\xad\x99\xbd\x44\x84\x7a\xf5\x66\xe3\x8f\x8f\x33\x95\xff\x73\xdc\xf7\x12\x21\x02\x89\xa8\xcc\x7d\xd7\x71\x6d\xb9\x12\x1a\xf5\x4c\x5b\x3b\x23\xb4\x11\xf8\x84\xe2\xf5\x82\x12\xfc\x9d\x94\x97\xf8\xd8\x1f\x18\xb5\x8a\x5f\x45\xad\x2c\x84\x54\x1f\xa3\x1e\x56\xd6\xc0\x26\x45\x62\x75\x67\xc4\x43\x69\x56\x4e\xfd\x53\x66\x75\xd9\xb2\xca\xa7\xa0\x39\x23\x00\xca\x5f\x17\x00\x04\xe8\x32\x38\x34\xf5\xa8\x39\x6b\xfa\xd2\x86\xde\xb8\x1a\x43\x65\xdc\xed\xf4\xde\x6a\xf8\x0d\xbe\x8d\x0f\x8c\x7a\x20\xda\x28\xc8\x66\x08\xd4\x80\xbf\x87\x9e\x97\x60\x88\xb6\x3f\x62\x3c\x26\x46\x66\x67\xd0\x72\xd4\xb4\x2f\x3f\x38\xa8\xc5\x37\x11\xfd\x59\x48\x34\x75\xf5\x25\x36\x10\x0e\x2f\xe3\x9a\x85\xdb\x94\x11\xae\xf8\x5d\x56\xc6\xf6\x03\x1e\xe7\x26\x6b\x00\x0b\x78\x7b\x11\xf7\xdc\x6e\x37\xd1\x55\x10\xa2\xa0\x2f\x0d\x90\x02\x3b\x3f\x0d\x71\x05\x6c\x85\x05\x3a\xf4\xec\x1a\x35\x7d\xb7\x26\x3b\x0d\x03\x5f\x30\x5f\x01\x4e\xc8\x10\xcf\x18\x2b\xc0\x6c\x28\x80\xa4\xca\x5e\x1b\x88\x97\x78\xbb\xfd\x89\x06\x55\x46\x8b\xcd\x3c\x1b\x2c\x88\x1e\x34\xc9\x6c\x0a\xe4\x59\x8d\x7a\x91\xea\xf1\xc2\x40\x2b\xff\x23\xed\xf3\x94\xa7\xf0\x8b\xbf\x7c\x52\x32\x51\x3a\x03\xf1\xac\x92\x01\x8d\xc7\xe3\xec\xec\x66\xa3\x6e\xbf\x63\x17\x89\x63\x66\x45\x82\xca\xcb\x26\xa8\xfa\x11\x97\x6f\x3b\x9b\x1d\x3c\x3d\xe1\x1a\xe5\x0d\xb1\xa6\x40\xe3\x67\x0f\x80\x62\x04\x36\xcc\x47\xfe\x8f\xe7\x7f\x02\x1d\xd9\xc5\x05\xbd\x6e\xac\x65\x94\x80\x20\xba\xb2\xc4\x31\x34\x18\x7a\xcb\xdd\x52\x29\x19\x79\x53\xae\xbc\x57\x3c\xac\x0d\xbc\x65\x21\x38\xc7\x21\x54\x3a\xfc\xe7\xa7\xf5\x91\x9a\x2f\x25\x7a\x9a\xb2\xee\x9f\xec\xa4\x8c\x0d\xcd\xc8\x9d\x5e\x1a\x1d\x61\x50\xb6\x17\x51\xa3\xa5\x07\xa9\x6c\x69\x18\x9c\xdc\xaf\x17\x19\x90\x3c\x60\x27\x1d\x98\xc3\xcd\x42\x0b\xc3\x2f\xc7\xae\x41\xe5\x84\x79\xd8\xb0\xbe\xaa\x1d\x0c\x94\x60\xe9\x93\x1a\xa3\x13\x20\x8d\xba\xcc\x88\x89\x66\x46\xbd\x13\x76\x70\xfd\x48\x2c\x3b\xb3\x48\x59\x0e\x2a\xff\x73\x27\xee\x2f\xbb\x45\x86\x04\xd8\x9b\xac\x74\x78\xfb\xcf\x1e\xad\xef\x4f\xd5\x33\x53\xf2\xa0\x3b\xc7\xe3\x1d\x83\x47\x99\x6e\xdf\x75\x18\x9a\xcc\x38\xd2\x05\x00\x60\xc5\x5e\x85\x6f\x06\x4e\x09\x8e\x7a\x8f\x36\x76\x9e\xa6\x60\x99\xc2\xdc\xf9\x2e\xa1\x43\xd9\xe2\x87\xe9\x24\xc6\x6b\xb4\x3c\xdf\x35\xf4\x48\x56\xa8\x1c\x5c\x59\x8d\x65\x3c\xc1\xb0\x58\x80\x10\x5a\xbf\x2e\x30\xdf\xd8\x45\xeb\xde\xb4\xa8\x73\xfb\x4e\x23\x36\x0e\x65\x23\x96\xac\x6b\x78\x09\x91\x79\x09\x9f\x41\x14\x3d\xdf\x40\xa8\x22\x42\x3a\xf1\x14\xa9\x2f\xdc\xa2\x2d\x83\xd8\xbe\xd5\xab\x8a\x54\xc7\xa7\x07\xe7\x5a\xbf\x38\x90\x17\x4e\x2b\xe7\xea\x89\xe8\x5c\xde\x5c\x98\x17\x8f\x39\x43\x9e\x65\x6d\x35\xce\x72\x51\xea\x2b\x48\xb3\x96\xcf\x64\xaa\x0c\x20\x9b\xed\x2c\x27\xac\x96\x8f\x73\xc1\x3a\x5e\x3d\xa5\xff\x77\x6a\x4f\x8e\x51\x9a\x46\x93\x29\xce\xa5\x7c\xd7\xf7\xd8\x99\xa1\x70\x3e\xae\x3d\x0b\xeb\xe8\xc9\x12\x6c\x26\x9d\x7d\x91\xd5\x6e\xa1\xc0\xf8\xf2\xa9\xae\x9e\xe5\x7a\xc6\x3c\x33\x58\xe4\x9c\xdc\xf5\x45\x84\xc7\x92\x65\x28\xd4\x27\xe1\x42\x0a\x28\x83\x36\x63\xd1\x40\x6b\x2b\x89\x9e\xce\x48\x3b\xcb\xcf\x29\x58\x28\xaf\x6c\x4f\x1d\x9b\xa9\xcf\x9c\xcc\x6d\x6a\x8d\x6b\xa6\xb5\x26\x63\x93\x14\xeb\xb8\x28\x2f\x07\x3b\xe6\x1a\x74\x36\xaa\xe6\x08\x03\xfd\xce\xc7\x78\xc9\x98\x21\x62\xda\x23\xba\x0a\x45\xce\xcb\xc0\x10\x74\x83\xbe\xb3\xdf\xd7\xd1\xa3\xb2\x73\xb2\xb5\xa3\x50\x24\x5d\x5c\xe5\x5d\xe3\x6f\x87\xdd\xb0\x7f\x25\x30\xc1\x2e\xa1\xd5\xbf\xdb\x6f\x81\x2d\xf9\xfb\xe9\x13\xe5\x84\x1c\xa7\xdf\xd5\x18\xf4\xc1\x44\x05\xbf\x0c\x5c\xba\x35\x40\x6a\xa7\x5a\xfb\xdc\x8e\xdb\xc2\x23\x22\xe6\x8d\x22\x01\xa7\x75\x7b\xff\x5d\x65\x03\x65\x9f\xbe\x84\x46\xe0\xac\xae\xd8\xa0\xdb\x07\xc7\x1d\x98\x51\x6a\x7d\x3f\xdd\xb6\x53\x04\xb5\xdd\x2b\xcb\xb3\x6c\xf9\x92\x1c\x32\xf7\xd3\x74\x37\xbb\x17\xe9\x8a\xa1\x07\xb7\x16\x49\x68\x3f\x42\xe4\x04\x66\xac\x24\x0f\xf4\xe1\xd6\x9f\xf3\x3b\xc9\xea\xa0\x0c\xf5\x39\x44\x98\xf4\xa8\xcf\xc7\x67\xb5\x79\x37\xb8\xb7\x16\x94\xd8\x66\xdf\xc6\x46\x31\x46\x33\xfe\xaf\x8a\x48\x92\x07\xd9\xac\x3a\x3a\xa8\xc5\x37\x87\x94\xee\xb8\xbb\x37\xf3\xa9\x8c\xca\x8c\x34\xf7\xf5\xfe\x36\x2e\x16\x5e\x6c\x65\x93\xcc\x7f\x1c\x84\x6e\x0b\x4f\x64\x25\xb6\x97\x8d\x2f\x2e\x5f\x3d\x2d\xbb\x9c\x20\xde\xa4\xcf\x31\xcb\xe0\xc8\x60\xcf\xdc\x2b\x5f\x6e\xb6\x90\x7b\x47\x27\x4c\x6f\x60\xc2\x4a\x7b\x5c\x4e\x2c\x48\xde\x17\x30\x15\x8f\x7d\x6a\x35\xdd\x9f\xfd\x05\x61\x3d\xcb\x8f\x5e\x69\x79\xfb\x58\x1b\x14\xfb\xf8\x6b\x59\xca\xa6\x6e\xb1\xf4\x29\xee\x64\xee\x8a\x7d\xac\x7c\x91\xfd\x91\x23\x57\x2e\x4c\xe7\x60\xb4\x6a\x63\x0f\x04\xcb\x09\x46\x7b\xa4\x77\x40\x4c\xfe\x17\x1f\x74\x80\xe2\x13\x7a\xa6\x72\x84\x99\x13\xe4\x66\x34\x77\x87\x6d\xa6\xde\x4a\x7f\x15\xbe\xca\xc7\xe7\x7c\xb2\xe3\x74\x38\x1c\xbe\x51\x0e\x1a\xc5\x7e\x95\x59\xed\xa3\x9b\xd1\x81\xec\xd8\x59\xca\x5e\x09\xcc\xa7\x3d\x06\xfe\xdb\x63\xa4\x54\x26\x4e\x88\x96\x01\x9c\xfa\x55\x90\x48\x77\x99\x34\xd9\x45\x2b\x4d\x91\x9b\x89\x26\x80\xb3\x23\xf6\x7f\xa1\x30\xfa\x47\xd2\xd8\x76\x02\x99\x92\x2d\x78\x2e\x27\xe9\xeb\x66\x74\x69\x74\xbe\xfe\x5c\x97\xe2\x2d\xb3\x5f\x59\x6e\x02\xe7\x2d\x9b\xf4\x1f\x22\x4e\xe3\x5d\xe7\x42\xf9\xf7\x56\xa3\x4a\xaf\xd1\x8a\x0f\xb8\x68\x58\x45\xb2\x9e\xc8\xcc\x92\xcd\xd1\x77\x75\x83\xa3\xd4\xd6\x1a\x9e\x5e\x4b\xc2\x92\x96\xfe\x1d\x0f\xb1\x3f\x4a\x28\x4c\x9c\x92\x37\xa5\x5f\xfb\x2b\x91\xf9\x6c\x68\x2a\x9a\x7c\x17\x66\x25\x25\xca\x40\x23\xfa\x78\x45\x7b\x27\xe8\x81\x70\xde\xb0\xa4\xa9\x9c\x4e\x52\xe5\xe0\x87\xad\x12\x95\x2f\xed\x33\x16\x83\xfa\xd6\x53\x0d\xad\x5b\x81\x50\x6a\xa8\x15\x1b\x1e\xfc\x81\x5a\xe9\x32\xdf\x92\x5d\x7c\x39\xd0\x57\x95\x6c\x23\x57\x55\xbd\xb7\x0b\xfb\xd2\xb3\x61\x65\x26\x78\xe5\x5c\x7e\xcb\xd4\x06\x1a\xa0\xd5\x2a\x41\x86\x58\xf7\xdd\x3e\x4a\x67\x67\xa5\xca\x69\xbd\x04\x51\xad\xf6\xc2\xb1\xaa\x6b\x01\x94\x2a\xe3\x95\xa4\x07\xab\xec\xe0\x5a\xc1\x8e\x89\x36\x00\x4b\xaa\x88\x55\x6e\xbf\x6d\x9c\xaa\x06\x67\xdd\x64\xa9\x48\xa4\x2b\x42\xf9\x33\x11\xf3\x9b\x1c\x96\xaa\x07\xcf\x0a\x56\xa3\xf4\x27\x59\x5e\xa9\xab\x2d\xf6\xd6\x71\x0d\x85\x4b\xec\xce\x57\xe5\x45\x71\x08\x67\xee\x68\x1b\x02\xa2\x82\xe3\x18\xd8\x42\xfa\x7c\x0f\xe4\x00\x29\x01\xac\x8c\xd3\x1e\xfe\xed\xec\xfb\x8e\xe1\xe5\xb9\xd2\x87\x5a\x56\x4c\x19\xb4\xc5\x49\xe0\xd4\x22\xe2\xfa\xcd\x70\x93\x05\x35\x8a\x73\x5c\xa5\x80\x80\xeb\x7b\xb6\xa4\xb0\xea\x5f\xf4\x8f\x80\x3e\xd2\x0f\xbc\xcc\x31\x66\xe9\xab\x67\xe2\xf0\xef\x4f\x80\xd5\xf2\xcc\x7a\xf4\xb5\xc1\x5b\x43\x6a\x19\xa0\x94\x87\xf4\xaa\xe2\x38\x2e\xe8\x32\x71\x51\x38\xa1\xe1\x64\x0e\x07\x2c\x17\xfd\x02\x6b\x1f\x79\x10\x79\x5e\x4a\x51\x67\x2e\x70\x2e\x6f\x0c\x37\x4f\xae\x65\xf9\xc9\x0e\x6f\xee\xaa\xb3\xd3\x1b\x00\xea\xad\x66\x02\x12\x4e\x42\xc3\xda\x20\xd2\x7b\xb7\xa9\x81\x30\xa6\xc9\x71\xa5\x02\x9a\x34\xea\xe5\x73\x2b\x09\x4c\xc8\x66\x41\x8d\x99\xbc\xc8\x96\x45\x82\x2d\x9c\xc5\x60\x11\xee\x9b\x6b\xbe\xce\x4c\x75\x9e\xf5\x2b\xbb\x55\x2d\x9b\x8d\xcf\xf3\x2e\x5f\xa5\x1c\x36\x4e\xb3\x10\xa2\xf2\x69\x75\xbf\xbd\x9b\xf8\x42\x79\xea\x74\xa7\x6c\xe1\xa7\x38\x98\xaa\xe7\x3b\x6d\x26\xb6\xb2\x8d\x93\xfc\xa4\x7c\x61\x10\x69\x3b\xaa\x70\xa9\xe8\xd3\xd8\x08\xf9\xc1\xce\x71\xb1\xef\xfc\xd8\xf2\x3a\x51\xf6\xa0\xe3\xcd\xc0\xe0\x14\x65\x7e\x8e\xc8\x6e\xa8\x3c\xe0\x23\x63\x8b\xf8\xd0\xaa\xd8\x1d\xde\x2e\x53\x89\xf9\x57\x71\x4d\xd1\xe7\xa9\x0d\x0d\xca\xee\x54\xa0\x72\x15\x45\x8e\x2e\xe6\x61\xb9\x91\xc5\xd1\x5d\x14\xf6\x59\x12\xab\x39\x1c\xd3\x10\x1d\xdc\xe4\x5f\x3f\x8c\x39\x60\x7b\x4d\x29\xc4\xed\x3f\x68\x57\x38\x78\x98\x3f\x4a\x81\x0b\x6e\xdd\xd2\x9f\x4e\x5e\x10\xd3\x49\x96\xdc\x64\x77\xa7\x8c\xad\xb6\x50\xa7\x1c\x19\x6d\x53\xbe\xe8\x82\xf6\x3f\x87\xe5\x9d\x6a\x1b\xc5\xf6\xb7\x1a\x1a\x98\xa6\x34\x86\xaf\x01\x6f\xf6\xcd\x0e\x51\xbd\xb2\x51\x90\xc8\xe3\x87\x81\xa7\xc5\xce\xeb\x04\x2b\x80\x6d\x78\xa8\xde\x9f\x10\xfe\x23\x34\xab\x6c\xdd\x09\xba\x3e\x73\xd9\x09\xa9\x81\xf2\x5c\xc0\xcf\x24\x20\xf0\x64\x37\x95\x83\xcc\x74\x81\xe7\x03\xc5\x77\x3a\xad\x83\x01\x0d\x45\x46\x51\xf4\xfb\xe5\x23\xcb\x97\x4d\xc0\xac\x5c\xc6\x63\x97\xcb\x7a\xbb\xe2\xf3\xfc\x84\xa3\xd8\x0f\xcf\xd6\x45\x62\xd0\x9f\xf2\x15\x98\x9c\xf0\x03\x0e\x3b\x25\x34\x7d\x48\x09\x7f\xca\x87\xe3\x3d\xe9\x03\x43\x47\xfd\xc4\x3f\xf8\x61\xe4\x79\x3f\xca\x4c\x1d\xdb\x15\x2c\x50\x5b\x14\xf2\x0e\x62\x34\xaa\x75\x70\x22\xf6\x1f\x7c\xca\xf6\xb2\x71\x72\x9e\x47\xa8\x6d\x8e\x1a\xe9\x32\x2a\x2e\x8c\x23\x90\x36\x34\xa2\x3c\x22\xb0\x37\xb9\xd2\x69\x98\xb6\xeb\x7e\xf8\xa6\x5b\x4c\x23\xe7\x0e\x97\xf0\xa3\x9b\x91\x7a\x7d\xaa\x67\xea\xd8\xe1\x8c\x01\xb6\xc1\xef\x14\xa3\x32\xb3\xb4\x72\x79\x2a\x8f\x68\x24\xc6\x20\xd3\x1d\xb7\xe7\x26\xa3\xc4\x47\x39\xdb\x04\x05\x30\xac\x91\x40\xd9\x59\xfe\x87\x5b\xff\x6b\xf0\xd4\x5c\xb3\xc0\xb4\xad\x7c\xe1\x9e\x10\x61\xee\x11\x20\x5a\x37\xa9\x13\x59\xce\xe5\xa7\x50\xb1\xb2\xe7\x79\x8c\x21\xb2\x51\xb0\x23\x65\x33\xaa\x5a\x1f\x88\xf5\x90\xf0\xb4\x2c\x10\xf6\x9a\x44\x49\xf3\x50\x16\xdf\x43\xa5\xb5\x74\x39\x18\xb7\x93\xcc\x66\x1e\xac\xb3\x5b\x69\x0c\x01\xec\x0d\x3a\x91\x31\x0c\xc7\xea\x47\xe8\x68\xca\xde\xf1\x19\xa3\xad\x97\xb3\xd4\xd2\x85\xbd\xd6\x23\xcc\x6f\x19\xfb\xb8\xea\xeb\x18\x4a\xd5\x09\x1f\x18\x94\x7f\x74\xc5\xb7\x91\x7e\xb8\xe2\xdf\x98\xd0\xb8\xf7\xcd\xa9\xb0\x12\xc4\x35\x1a\x64\x8b\x09\x08\x16\x51\xba\x91\x30\x67\x0c\xf5\xe3\x60\x36\xf9\x20\x7e\xc5\x00\x93\x65\x41\x70\x69\xf7\x3d\xdb\x36\x06\xd7\x4b\x09\x8d\x1c\x00\x7d\xd1\xa0\x37\x55\xe9\x0e\xde\x30\x31\xa7\x72\x1e\x2c\x17\x34\xa6\xde\x98\xc6\x43\x1f\x78\x5b\xd9\x60\xde\xf9\x7d\xf2\xa9\x39\xf8\x49\xf7\xa8\x1d\x94\x15\xa5\xbb\x22\x5d\xf2\x3e\x55\x13\x93\xfb\x94\x62\x57\x72\x1f\x8f\xbf\x01\xdd\x09\x49\x71\xc6\x2b\x12\xeb\x3e\x06\x0f\x10\x53\xf3\x9c\xaa\x39\xd4\xbd\x5c\x55\x9c\x46\xef\x94\x1c\xca\x18\x6a\x9a\xd8\xa6\xba\x3b\x64\xe7\x9e\x42\xf7\x50\xae\x85\xff\x9d\x29\x19\x5b\x73\xad\x46\x60\xcd\x00\x18\x0d\xa2\x76\x9b\x3c\xe1\x20\x3c\xa1\xaa\x04\xb1\xff\x05\x16\xb5\x4b\x7d\xf3\x10\x43\x0c\x86\x4f\x92\xd7\x50\x55\x13\xce\xfb\x38\xdd\x38\x90\xb3\xbf\xfe\x09\x7a\x3d\xed\xa1\xb5\x11\x9f\xb9\x5f\xb6\x24\xa9\x7e\x99\xb3\xce\xf1\x16\xe1\x90\x7f\xbe\xaf\xcb\xef\x88\x81\x04\x7b\xe2\x41\xae\x26\x5f\x79\x5e\xab\x2d\x54\xd3\xd7\x5a\xa9\xec\x67\x35\x89\xd0\x11\x5c\x30\x18\x56\x38\x8f\xc6\xdf\x51\x7f\x64\x8e\xe4\xf4\xb1\x34\x67\xaa\x25\x6f\xd9\x66\xbb\xae\xc2\x64\xcd\xf7\xd0\x0a\xde\x6b\xf9\x20\xfe\x25\x0d\x11\x1b\x09\x55\x08\xf3\x1a\xb7\x8f\x0d\xf9\xb7\xfe\xd7\x2e\x53\x11\xbf\x16\xd1\x93\x37\x03\x67\xbe\xb7\x9c\x8d\x32\x7b\xca\xad\x6e\x9a\x51\x66\x16\xdd\xf8\x82\x7d\xaa\x30\xb4\xaf\x70\x00\x3c\x11\xa6\xbf\x15\x0f\x3b\xa2\xe5\xdb\x41\xd0\x7a\x49\x0e\x81\xbc\x6d\xf9\x65\x10\xae\x14\x31\x9f\xfa\x94\x72\xd0\x8d\x9e\x06\xe9\x48\x85\x8d\xa3\x36\x78\x80\xe2\xfc\x62\xd0\xbd\x6c\x01\xc8\xad\xbe\x01\x97\x52\x43\x21\xe4\xe4\x2e\x09\xe2\x02\x61\xf2\x48\xf5\x19\x04\xe7\xfc\xcf\xf7\x55\x68\x84\x60\x82\xbc\xb9\x19\xae\xc3\x80\xd1\x5e\xe1\xc1\xd1\x45\x31\xd5\x21\x5e\xc3\xdf\x31\x68\x99\xc1\xba\xb2\xba\xc8\xcf\xb3\xa5\x66\x87\x04\xec\xc3\x21\x05\xd0\x01\x4b\xc1\x04\xcb\x23\xdd\xfa\xcd\xf4\x32\xc8\xf2\x81\x0c\xb2\xf4\xa2\x77\xb7\xa5\x31\x4a\x1e\x91\xab\xe3\xc7\x9a\x1d\x18\xff\x6e\xb9\xe4\xdf\x54\xdf\x76\x6e\x94\x77\x40\x5e\x2c\xbb\x12\xaf\xbe\x4b\xf7\x7b\xd9\x81\x0a\x64\x29\xdb\xce\xdd\x46\x58\xbb\x53\x21\xcf\x95\xea\xef\xf6\x54\x94\x5a\xe0\x81\xc4\x76\xce\x1f\x22\x42\x56\xe6\x03\xf3\x7b\x3a\x9e\x80\xd9\x09\x86\xcd\x20\x87\x72\xd4\x26\x62\x3a\x03\xfa\x7a\xc1\x68\x8d\xa3\x73\xab\xda\xa5\x45\xad\x33\x8c\xf6\xba\x55\x84\x85\x16\xcc\x87\x38\x5c\xd8\x95\x0e\x84\x0f\xfb\x6a\x5f\x76\x3e\xaa\xf2\x23\xbb\x68\x5d\x72\x24\xcc\x8d\x61\xb1\xc5\xbe\x81\x89\x2f\x12\x1e\xed\x4a\xa7\xe1\x01\xec\x04\x5e\xc6\xf2\xae\x58\x6f\x0a\xc0\x9c\xc1\x28\x31\xd8\x11\xa3\xa5\x6a\x9b\x66\x99\x34\xd4\x67\x57\x4a\x8e\x34\x46\xf5\x9d\x4f\x86\xbe\xa1\x30\xea\xdb\x2b\x7d\x14\x96\x8a\x4a\x05\x68\xb3\xb7\x02\x61\x08\xd8\x4e\x67\x7f\x48\x70\x84\x1e\xb9\xef\x63\x0c\x43\x17\xc3\x29\x02\xc5\x5a\xf1\x2f\xdb\x29\x55\xb5\xd1\x31\x86\x14\x0f\xa4\x4b\x46\xc5\x95\x01\x69\xb3\x30\xde\xe8\x37\x01\x7d\x96\x4d\x3d\xc4\xce\xc6\x55\xfd\x96\x0b\x3a\x86\xbb\xb5\x5e\x65\xb1\x5d\xb5\xbd\x69\x1f\xe9\x08\x54\xef\x2c\x4b\xf4\xe4\xbd\x01\xd4\x48\x69\xb3\xa4\xc9\x56\x5e\xe1\xeb\xd5\x3e\x02\xfc\xab\x72\xa3\x6e\x1c\x22\x45\x94\x2e\x5e\xdc\x2a\xb7\x69\xc9\xab\x50\x45\x6e\x99\xdb\x34\xb7\x4c\xa8\xe5\x78\xca\x52\xd2\xb7\x57\x7f\xb2\x4d\x8d\xd9\x1f\xcd\x87\x8b\x75\xa4\x86\x42\x19\x2b\x50\xea\x2b\x37\xbd\x89\x04\xe7\x26\x34\xd7\x0d\x1c\xd4\x25\x52\x9a\xa6\xd5\xe2\xa4\x72\x7d\x08\x36\xdb\xb2\x76\xee\xcd\x96\x0c\xf6\xd9\xbc\xa5\x29\x45\xa0\x96\x91\x26\xe7\x70\x04\xcb\xd0\x6f\x09\xfc\xca\x43\xff\x99\x3d\x14\xba\x37\x9d\x71\x12\x63\xd8\xf0\xd6\xa6\x8c\x54\x13\x85\x22\x18\x71\xa4\xbb\xd6\x70\x32\x74\xc1\x96\xe3\x38\x9b\xda\xde\x8c\x03\xf7\x65\x80\xfa\x9c\x7c\x51\xd6\xa5\xbb\x61\x33\xa4\xef\xc5\x00\x04\x44\x4b\x1f\x9b\x68\x40\x36\x7d\x40\xa2\x65\x7b\xc0\x0b\xac\x29\x3b\x93\x37\x9d\x05\x9a\xbe\xda\x86\x85\xf9\x02\x0c\xc4\x96\xd1\x39\xa6\x21\x7e\x4a\x7f\x49\x9f\x29\xed\x0f\xbf\x53\x6c\x62\x9c\xc2\xdc\x54\x2a\x6c\x53\xfd\x3a\x9b\xf4\x23\xcb\xb0\x61\x10\xd3\xbe\x49\xd5\x1a\xec\x2d\xb7\xae\xb7\x92\x15\xca\x1e\x1f\xa2\xa3\x8f\x87\x73\x59\x7a\x8c\x77\x1d\xc0\xc7\x6e\x46\xfe\x46\x67\x57\x5c\xc0\xd3\xfe\xd8\x0c\xea\x69\x42\x2b\xb7\x7a\x6f\xa4\x77\xf3\x76\x5e\x55\x23\xd4\xe4\x68\x6c\xff\x1f\x73\xa0\x5f\xcb\x32\xa2\xab\x21\xc0\x6c\xd1\x64\x8f\xcc\x8a\x6c\x5f\x85\x04\x1e\xba\x11\xbe\xff\xc0\x66\x73\xe4\x78\x96\x82\x69\x1c\xce\x6b\x1c\x02\x27\xfc\x12\xb1\x4c\x60\xf2\xdd\x3a\x43\x6c\x17\xfd\x03\xb1\x14\xc7\x11\x4f\xe8\xe8\xe3\x52\x4f\xdb\xd4\x89\x0c\x9b\xca\x84\x80\x9b\xe6\x23\xd7\xbf\xd4\x94\x29\x2e\x68\x9b\x7f\x11\x26\xf3\x07\x9c\x9f\x33\x3d\x90\x36\xeb\x98\xd2\x9f\xe2\x2b\xca\xce\xdf\xbb\x10\x58\x92\x09\xd2\x2a\xb3\xf8\x50\x51\x31\x6a\x99\x38\x48\x21\xb4\x7e\xe7\xb2\x84\xbc\x25\xd8\x7f\xbb\x27\xfa\xa6\x82\x06\xa7\x90\x8e\xca\xc9\x1e\xd1\xa9\xe5\x5d\x2c\xe1\x47\x06\x5c\xd5\x49\xaa\x79\x95\x73\x19\xc9\x99\xda\xa1\x71\xcc\x78\xf6\xbe\x33\x26\x37\x5a\xf5\x51\x46\xed\x43\xdd\xa1\x37\x1a\xdb\x06\xea\xfa\x7e\x6a\xb9\x75\xbf\x49\x15\xf5\x4f\xb2\xa8\xf2\x6b\x08\x37\xec\x45\x22\xd4\x10\xce\xd7\x1c\x26\x7a\xfe\x40\x81\x54\xa3\x34\x2e\x31\xd2\x3b\x85\x4a\xe7\x8d\x32\x83\x3f\x35\x82\x3a\xb3\x14\x55\x01\x00\xa6\x2e\x24\x57\x56\xd1\xbc\x8c\x7d\xbc\xc6\x0d\xb9\x26\xc2\xee\xf8\x86\x5f\x0e\xe7\x00\x57\x58\x59\xd6\x18\xca\x7c\x8c\x31\xd4\x9b\x06\x8d\x1f\xd5\x84\x39\x1a\xc9\x55\xbb\xee\xb5\xa9\xc8\xab\x6f\xcb\x77\x98\x2d\xbb\x9a\x81\x2f\xdc\xe8\x6f\xc6\x50\x0f\x28\x2c\xf9\xd6\x31\x8b\x79\x71\x2b\x2b\x6a\x36\x02\x5a\x96\xd4\x47\x7a\x56\x04\x0d\xc9\x04\x81\xc5\x6d\xa7\x1f\xbc\x69\x0e\x39\x97\x1a\x77\x07\x57\x34\x41\x01\xe7\x72\x41\x79\x43\xee\xfc\x8c\xee\xac\xf8\xdd\x5c\xf1\x3b\xa8\xb4\xf2\xb7\x64\x8b\x6f\x6f\x09\xf1\xb0\x96\x47\xc2\x7c\xd4\x0f\x57\x54\x11\x13\x8c\xdf\xe0\xf1\xc6\x96\x92\xea\xad\x73\xa1\x06\x8a\xf5\x3f\x51\x2a\x65\x1e\xc2\x02\x33\xe2\x66\x81\x0a\x39\x3a\xef\x80\xeb\x96\x3a\x7e\x07\xa4\x18\xa3\xe3\xb8\x41\x20\x68\x7a\xc9\xc1\x22\x90\xc6\xce\x6b\x95\xf0\x83\x0c\x30\x83\x54\x65\x58\x6a\xe1\x1c\x63\xce\xf8\x65\x32\x1a\x92\x90\x8d\x03\xe2\x61\xdc\x7e\x53\x89\x2d\x02\x6b\xc4\x31\x8b\x23\xb5\x1a\x00\xe8\xa1\x2a\xe5\x93\xcb\xfa\x27\x33\xb5\x87\x35\x59\x0f\x9b\x4c\x13\xb3\xb3\xb7\x15\xa5\xea\xbc\xed\x17\x97\x1e\xff\xb1\x63\xe3\x71\x64\x9c\x21\x36\x44\xef\xa1\x54\x66\x72\x8e\x2b\xbf\x75\xd9\xc4\x72\x24\xf4\x21\xd4\x5d\xfb\x75\xc0\x65\x76\xff\xe0\x27\x90\x4b\x56\x5c\xd1\xbb\xdc\x03\x39\xea\x25\x9c\x3a\x03\x81\xa3\xe2\xcb\x88\x0f\x7d\xda\x22\x54\x02\x2e\x0f\x01\xf6\x21\xde\x45\xe2\xaf\x96\xb1\x7e\x04\x4c\xce\x86\xe1\x68\x1e\x1b\xe2\xf5\x0b\x35\x10\xc6\xa8\x0f\xc1\x34\x12\xeb\xb9\xbe\xd6\xeb\x72\xb0\x66\x89\xc0\x5c\x7b\xa3\xfd\x32\x47\xf8\xc8\x66\x7b\x3b\x79\x61\x0f\x96\xb1\x1d\x34\x11\x9f\x2e\xc5\x57\x45\x6f\x0e\xf5\xf1\xe2\x7e\x57\x0b\x65\x30\x70\x7a\x85\xa6\x2c\x19\x55\x61\x16\x6f\xc0\x58\x08\xac\x38\x57\x8f\x0c\x46\x1e\xc9\x1c\x38\xf9\xca\xed\x1a\xd5\xe6\xc3\xbe\x96\x42\x7b\x5f\xc5\x47\x66\xde\x49\x45\x05\xb9\xdd\xc0\x37\x1a\x7a\x25\xa1\x80\x57\xd2\xc3\xbe\x86\xc5\x40\x19\x89\x63\x0f\x2b\xba\xd1\xd1\xb9\x5a\x5f\xf6\x7c\x3f\xdc\xa9\xd8\x73\xb0\x9b\xf6\x9c\xd6\x86\x29\xfd\xfb\x62\x3a\xd3\x3e\x33\xc3\xc5\x7f\x07\x4c\xd8\xc0\x5a\xed\x46\x50\xd0\xe4\xe7\x5d\x0e\xd0\x1c\xc1\x1b\x94\xa3\x27\xec\x52\xa5\x55\x7b\xd2\x07\xd9\xa7\x8e\x83\x8b\x3b\x8d\x47\xf6\xd0\xa4\x48\x9b\x5e\x84\x13\x24\x52\x33\xe8\x45\x99\x5a\x60\x85\x51\x60\x52\x96\xa6\x64\x13\xc7\x0f\xf3\xb1\x0f\xb4\xa2\xfd\x08\xc2\xe2\x0a\x23\x86\x67\xcc\xc1\xd9\x18\x9b\xf6\xcb\x4e\x66\xef\x4d\xc5\xdf\x61\x7b\xe6\xf3\x48\xf5\xb2\x55\x69\x43\x80\x0d\x7b\x8d\x68\xa7\x31\x9d\xf3\x55\x60\xfe\x1b\x11\xd0\xed\x76\x2a\x2d\x1f\x26\x75\x8a\x8f\x01\x79\xf1\x34\x00\x08\xaa\xf8\xa1\x4c\x21\x1d\x8f\xd0\x74\x3b\x6f\xa5\xdc\xbb\x3c\xad\xe6\x0b\x56\xca\x8e\x06\x17\x6f\xcd\xdb\xdb\x95\x5e\x58\x6e\x46\x83\xb4\x69\x7a\x45\xbe\x40\x19\xcf\x70\x20\x98\x14\xeb\x72\xef\x69\x28\xc3\x71\xbb\x31\x40\xe9\x46\x79\x9a\x74\xa2\x5b\x18\xea\x6d\x27\x99\x60\x72\xa9\x13\x3a\x78\x0e\xa1\x1c\xcb\x47\xf8\xd5\xed\x8f\x2a\x54\x2b\xab\xb6\x76\x7d\xc0\xb5\x0c\x1f\xc6\xc9\xc3\x88\x8f\x07\xfb\x58\x10\x77\x13\x33\xd8\x10\x05\x1d\xfe\x51\xf4\xb3\x0b\x27\x6f\x5a\xaa\x9d\x86\xfd\x0e\x05\x7b\x61\x2d\x01\x5e\xa1\x3b\x77\x5f\x9d\x5b\x4f\xef\x3c\x8e\x10\x0d\x34\x7a\xbc\xba\x16\x08\x18\xd2\xf7\xf6\xc7\x77\xf4\xf2\xcb\x2f\xe1\x00\x99\x50\x44\x13\x37\x14\x0a\x38\x80\x27\xc2\xd1\x7d\xfa\x8a\x78\xee\x00\x81\xe8\xa1\x5d\x0d\xaa\xa6\x0b\xbe\x34\x7d\x3d\x93\xe6\x05\x6d\x6d\x84\x89\x8d\xff\x7d\x7c\x5b\x4e\x3f\x7e\x51\xb6\x96\xa9\xc0\x60\xc8\xe2\x96\x2b\x23\x66\x0c\x79\xdc\x9d\xb1\xd5\xb1\x43\x80\xa1\xc5\xe7\x26\x95\x5c\x75\x0a\x2f\x3f\xe1\xc3\xa2\xb3\x7c\x54\xe6\x01\x94\x6d\xa5\x22\x11\xa7\xad\x1c\x80\xf0\xeb\xfb\x21\xbc\xe9\xcf\x7a\x60\x83\xc5\x32\x26\x19\x62\xf2\x41\xf9\x3a\x0c\x19\xde\x7b\x23\x81\xf9\x4a\x6f\xce\xad\x88\x64\x80\x07\x7b\xab\x70\xf1\xf5\x87\x5f\x6a\x19\xaf\xf6\xd6\x2b\x5f\x1c\xda\x91\x7e\x04\x7a\x0d\x66\xca\xae\x71\x42\xa1\x0c\x2e\x73\xec\x79\x05\xbb\xac\x6b\x20\x8e\xf9\x2b\xe1\x25\xab\xe2\x20\xbf\x31\x4b\x12\x08\x23\x14\x6c\x71\x88\xc3\x4d\xa7\xa3\x55\x26\x07\x86\x60\x42\xf5\x05\xb1\x4e\xe9\x63\xe4\x0b\xc3\xfa\xa2\x93\x57\x1b\x68\xeb\xf4\x22\xc9\x67\xce\x8a\xcd\x43\xd9\x5a\x3f\x39\xde\x03\x7f\x1c\x17\xf7\xb7\x80\x64\x56\xb9\x5e\x55\xdc\xc5\x71\x79\x87\x5c\xef\x29\xa3\x7b\x6a\x90\xac\x9b\x53\x77\xc0\x90\xe4\x06\xe1\x93\x53\x66\x96\xc5\x6f\x06\x6d\x3c\x81\xb9\x7d\xff\x0a\x39\x5d\x08\x3b\x90\x4b\x6d\x17\xc0\xe4\x8e\x2b\xf4\x8f\x6e\xea\x62\xf8\x15\xc0\xe3\xa6\xcd\x0a\x46\xa2\xd7\xd8\xb5\xcc\x18\xbc\x81\xb7\xaa\x4c\xd3\xb2\x3c\x60\xd0\x50\x79\xf3\x92\x3f\xe2\xe8\xd1\x4f\x4c\x59\xfe\xf8\x0e\xd9\x34\x0a\xc1\x2c\x87\xdf\xec\x2c\x5a\x01\x86\xef\x1c\xe0\x4d\xcd\x38\x40\x37\x2d\x81\x2b\xc3\x20\xf7\x52\xc1\x5c\xea\x80\x49\x9b\xc0\x8c\x24\x8b\x25\x1c\x5a\xec\x1d\xc8\xcb\x1e\x26\x06\x63\x05\x26\x2a\x1f\x8a\x8b\x15\x4e\x3c\x1e\xa0\xaf\x47\x1c\x14\xce\x0b\x7a\x60\x23\x98\xbb\xd7\xb8\x87\x60\x5b\x17\x6a\xfe\x27\x69\x9d\x03\x71\x8c\x23\xd7\x43\x8e\xe7\x26\xfc\x00\xcf\x66\xb4\xc0\xef\x3f\xf9\x0e\xcc\xd6\x96\x26\x50\xff\x71\xd8\xd9\x9f\xb0\xc9\xf6\x89\x5f\xfa\x35\x62\xc5\x17\x80\xbd\x0d\x4e\xe7\x6c\xc4\x95\xbd\x85\x52\x1f\x18\x3f\x25\x61\xd0\xc3\xc7\xc9\xdb\x6d\x7d\x44\x28\xce\x84\xcf\x2a\x63\xb8\xf9\x6c\x79\xf5\x7e\xab\xc0\x31\x6e\x5c\x3b\xc1\x16\x70\xa8\xfc\x83\x72\x24\x92\xda\x8c\x32\xe9\xa8\xc8\x69\xd9\x4a\x48\x11\x88\x61\x90\xad\xa4\xdc\xab\xce\xf0\x38\xc4\x1a\xb2\xf4\xda\x22\xe0\x38\x4d\x6d\x5d\xae\x51\x54\xce\xc2\x6c\xe6\x71\x71\x98\x4e\xd9\x1c\xbc\x39\x28\x67\x11\xdf\x2e\x3b\xf6\x5a\xcd\x80\xb4\x36\x46\x24\x6a\x1a\x37\x9f\xce\x0d\xa0\x75\x81\x53\x5c\xbf\x8e\x88\xef\x3d\x34\x60\x33\xa7\x4f\x56\x34\xf6\xef\x48\x81\x8c\x30\x58\x9b\x3f\x28\x65\xc7\x43\xe8\x35\x35\xbd\xcd\xe8\x0f\xf1\xd6\xa2\xa8\x19\x38\x79\x20\xb0\x88\xe3\xd6\xa8\xcc\x07\x72\x9e\x93\xca\x50\x77\xfe\xe4\x00\x6e\x47\x49\x07\xcb\x2f\x37\x24\xcb\xfb\x18\x72\x26\x90\x54\xd2\x87\x72\x33\xee\x61\xc2\x39\x55\xc2\xc6\x84\xd0\x26\xde\x86\x05\xd4\x14\xbe\x98\x97\x0f\x0c\xf2\x79\x76\xde\x76\x35\xbc\x87\x66\x88\x37\x62\xb0\x87\x2d\x92\xa7\xf1\xda\xd2\xe2\xe2\x6a\x42\x7f\x84\x88\xa2\x69\xe4\x0f\x3b\x8d\xff\x64\x4a\x38\x5c\x9e\x0f\x13\xe2\x3d\x3f\xdf\xc2\x43\x7e\x51\x53\x5a\x3b\x9a\x09\x03\xe1\xbf\x0c\x62\x9f\x79\x85\xa5\xa0\xe1\x58\x16\x4c\xcf\x65\xf4\xa4\x8d\xdd\x5c\xf4\x19\x9f\x63\xb7\x68\x37\x52\xde\x45\xde\x25\xe7\x7a\xda\x62\xe4\x89\xd2\x18\x7f\xfb\x29\x35\xdf\x64\x5f\x42\xbe\xc3\x1f\x95\x46\x3f\x39\x92\x7e\x7e\xca\xfb\x71\x0c\xd4\x4f\x4f\x85\xd3\x4f\x39\xf7\x46\x56\xa6\x3f\xa9\xfd\x1f\x43\xaa\x6d\x5a\x58\x2e\x1c\x76\x6f\x46\x84\xe0\x54\x35\x22\x3c\xef\x08\xf1\x39\x9b\x67\xd7\x10\x5c\xd7\x2b\x5f\x47\x89\xe3\x69\xf2\xc1\x56\x49\x2f\x11\x5a\x51\xdd\x0e\xf3\x54\x3d\x09\x77\x9c\xe0\x4e\xeb\xd9\xb1\xea\xaf\x11\x53\x5e\xd6\x2d\xc7\x97\x3f\x52\x85\x6e\x99\x7d\xa1\x7f\xfd\x0e\xca\x8d\xdd\xa4\x75\x47\xbf\x1f\xd7\x3f\x72\x77\x58\x30\x68\xba\xaf\xb1\x3b\x63\x12\x64\xf8\xcf\x52\xf9\x0c\xe3\x12\x79\x18\x56\x3a\x2e\xb0\x16\x73\x16\x12\xc3\xcd\xf7\xea\x17\x87\x43\x74\x9d\x1f\xdc\xde\x5a\x65\x99\xce\xc1\xf1\x8c\x23\x96\xfc\x32\x73\xb7\x01\x09\x6b\xab\xe0\xa3\x6c\x78\x61\x84\x6b\xe6\x72\x25\x0a\x85\xb1\xa2\x3f\x21\x60\x10\x61\xc5\x43\xbc\x09\xe5\xc9\x8e\xff\xc8\x3f\x51\x48\x90\xb8\xa5\x88\x4f\x67\x71\x1f\xfe\x8b\x1b\xb1\x71\xb7\xc6\x78\x07\xfd\xd5\xf9\x8d\xf0\xb0\xfe\x24\x0f\x58\x28\x54\x04\xb9\x73\x48\x50\xa4\x08\xc5\x1f\xb4\xd5\x7a\x64\xcd\xe5\x8f\x4c\xa5\x21\xff\x34\x56\xca\xe1\x63\x5c\xc2\x79\xd1\xad\xcb\x0b\x5a\xfe\x51\x04\x3b\x66\x2c\x36\x55\x68\xd5\x68\x6a\xcc\xa3\x6c\x1a\xe1\x81\x38\x0a\x67\x26\xaf\x00\xa6\xd6\xf4\x9d\x7d\xf4\xe3\xe2\xe0\xa1\x25\x2b\x74\x9d\xef\x70\x90\x29\xc7\x70\x02\xa3\x29\x33\x28\xef\xdb\xb2\x2c\x28\xca\x80\x80\x2d\x5a\x33\xd6\x48\x20\xe2\x25\x2c\x24\xcb\xa9\xb3\xab\xbe\x24\x62\xac\xe4\xf4\x5b\xb5\x71\x14\xdb\x92\x0f\x2c\x2b\xb5\x1e\x97\xa1\x8e\xe3\x5d\x53\xcf\x7e\x53\x55\xe3\x0d\x68\x8a\x6b\x17\x08\xa7\x75\xe2\xd7\xcc\xbe\x05\xef\x97\x63\x33\xdd\x86\x52\x69\xea\x46\x47\x47\xff\xec\x4d\xfb\x1b\xd2\x36\xe4\xcf\x14\xe8\x72\x07\x2f\x35\x8f\x85\x17\x0e\x48\x1b\xe0\xed\x14\xe1\xee\xc9\x19\xc4\x65\x60\x13\x6c\x8c\x0c\xcf\x9e\xa2\x07\xa3\x45\x28\x48\x1a\xdf\x7c\xc8\x3a\x61\x39\xb1\x54\x87\xc8\x4e\x9b\xff\xbb\x8d\x71\x35\x2e\xc5\xf3\x78\x68\x49\x02\x03\xe2\xd4\xf7\xcb\x20\x46\xe9\x35\xb3\xbb\xea\x77\xd8\x0b\x4b\xd2\x99\xd8\x02\xbe\x4f\x39\x3c\x46\xee\x98\x01\x75\x6b\x9d\xe6\x68\xc6\xe2\xee\x83\xfb\x58\xbf\xea\xbb\xff\xe9\x71\x3a\xaa\xaa\x13\xb8\xa1\xa4\xa5\xb1\xb5\xbf\xd7\x24\x0a\x8e\x2a\x1a\x7d\x58\xcb\xca\xaf\x87\x03\x62\x60\xd6\x81\x4a\xb6\x59\x91\xe9\xc4\x32\xc2\x62\x4f\x43\xd2\xd3\xe7\x1a\x1b\xbb\xa5\x70\xd3\x12\xca\x97\x45\x95\x48\x2c\xa8\x49\xef\x67\x3b\x71\x8f\x5c\x76\xfb\x49\x3c\x76\x06\xb7\x0a\x5e\x7e\x19\x09\x87\x47\x9a\xef\x4a\xe1\x55\x82\x5c\x3b\xde\x05\x25\xe5\x33\xa0\x66\x21\x4c\x82\x96\x07\x4a\xbe\x75\x02\xf5\xaa\x13\xba\x53\x2c\x29\x02\x93\xe7\xf2\x76\xa6\x7e\x77\x86\xab\x7f\xb7\x89\xaf\x03\x6b\xc7\x48\x92\x0f\xd8\x19\x5a\x4f\xce\xa6\x1d\xd2\x4f\x22\xad\x5d\x46\x4f\xe5\x66\x5b\x74\x55\x76\x5e\x1c\xbb\xf8\x79\xbb\xb2\x4a\xea\xa1\x7a\xe9\xf8\x47\xcb\xcf\x52\xf6\x40\x01\x9b\x51\x77\x39\x7e\xcd\x5d\x44\x7d\x19\x97\xa3\x3f\xfb\x55\x9d\x3c\x85\x40\xa2\x50\x75\xf6\x8e\x28\x6a\x8f\x2b\x9b\x1e\x72\x1b\xce\xc8\x97\xe0\xd2\x78\xae\x9f\x2f\x5f\xe0\xe8\x7f\x7b\x96\xf6\x2c\x5a\xca\x9f\x2d\xbc\x81\xcb\x78\xaa\x51\xfa\x7d\x82\x96\x59\xdf\x54\xd9\x59\x2a\xbd\x09\x00\xc1\xc9\xe5\xac\xfd\x2c\xc7\xc3\x59\x70\xad\xe6\x16\x04\xce\x93\x1f\xd7\x6e\x70\xe3\x94\xbf\x63\x86\x44\x30\x02\xe9\x47\x51\xc0\xf0\xe1\x95\x37\xdf\x77\x52\x5e\xa2\xf5\x33\x69\x74\xe1\x5e\xf9\x93\xc3\xfa\x10\x18\x6e\x78\x16\x9e\x6b\x7d\xc0\x89\xc5\x12\xe6\xdd\xdb\x75\x78\xef\xc7\xcb\x05\x12\xb1\x57\xd7\x41\xad\x9d\xdb\xbc\x5e\xde\x90\x15\x77\x8d\xac\x10\xe2\xb1\x9f\x1c\xfe\x8f\xe7\xa5\x9c\xf4\x81\xc6\xca\x12\x8f\x87\xd1\x3c\xdb\x2f\xb4\x20\x71\x5f\x59\x6b\xe5\xe8\xa7\x8f\x85\x16\xea\x4d\xb7\x6b\x84\xb5\xc6\xdd\x33\x5b\x02\x55\xad\xd9\x87\x93\x63\x05\xfb\x80\xad\x86\x78\x55\xe0\xea\x05\xb5\x0a\xa7\x46\x0b\xcc\xd8\xad\x20\x56\x0e\xbb\xce\x38\x6d\xfb\xdc\x6b\xa2\x74\x9b\x89\x85\x41\xb2\x79\xd8\x46\xae\x9d\xbc\x62\x91\x90\x64\x79\x66\xbf\xb9\x23\xdb\x96\x89\x09\x6d\x8f\x52\x54\x36\xa3\x11\xd8\x7d\x8f\x36\x08\x6c\x20\x63\x45\x84\xfd\x23\x17\xd8\xf6\xa1\x4e\x69\xb9\x1d\x06\x61\xb0\x28\xf9\x77\xe3\x15\x12\x74\x5a\x6d\xde\xd2\x99\x29\x94\x9d\x63\x38\x29\xb6\x0f\x2b\xf2\xbe\x7d\x22\xf5\xf8\x1a\x49\x55\xf6\x37\xcf\x9a\xe8\xaf\xf2\xcb\x3f\x2c\x0d\x2d\xf8\x8a\x8c\x40\xf5\x9d\xe5\x37\x33\x22\xbb\xfb\x22\x83\x7e\xb3\xac\xfb\x3a\x2f\xd3\x36\x85\x9d\x32\xe0\x5b\x0f\x10\x08\x2b\xa4\x16\x65\x09\x7f\x3d\x88\xe2\x26\xc3\xc0\x1c\x6a\x14\xb2\xce\xb2\x89\x2e\x9b\x10\x8f\xcb\x5a\xad\x3f\x83\x59\xde\xb3\x5b\xea\x04\xa7\xfd\x1a\x1a\xb2\xdd\x1b\x69\xed\xe0\x22\x99\xce\x72\x35\x58\xf8\xb9\x94\x85\x4c\xa2\xc7\xb3\x94\x33\x82\x54\xcf\xee\xdd\x8f\xbb\x05\xa1\xa1\x1d\x68\x4e\x39\x95\xc0\x56\x72\x89\xae\x16\xd3\xff\x27\xf9\x48\x52\x65\x20\x43\xc8\x13\x17\x29\x5f\x3f\x1f\xec\xa8\x22\x16\xc9\x00\x28\x21\x5f\xbd\xc1\xdc\xb1\xe1\xde\x4c\x67\x85\x9e\xa7\xb0\x08\x91\x2f\xa4\x92\xa6\xa6\x60\xa8\x52\xfc\x59\x93\x98\xb0\xdf\x11\xd0\x0a\xac\x37\x22\x4e\x1b\x90\xf8\x0e\x8d\x42\x80\x19\x31\x8f\x52\x7b\x6a\x04\x69\x8b\xc1\xde\xc0\xa0\x7b\x4c\x46\x22\x3d\x34\x80\xd6\xaa\x46\x74\xa0\xc7\x77\xc5\x80\x05\xe0\xf6\x83\x2c\x9d\x01\xee\x5a\xc1\xa9\x9c\xfb\x26\x31\xf1\x5b\x78\x2e\x59\x9c\x65\x71\x41\x27\xfd\x3f\x68\xef\xae\x03\x9f\x14\xb4\xa0\x6c\xcf\xfe\x7b\xdc\x79\xec\x07\x40\x9b\x37\xe4\x4b\x1e\xaa\x7b\xcb\x32\x5b\x47\x72\x9a\x72\xa2\x54\xe0\xcd\x50\x0f\x13\xff\x2c\x45\x42\x4d\x5c\xa2\xf0\x82\x30\xea\xc9\x30\x1c\xa1\xc4\x74\x29\x57\x16\x14\xa8\x25\x01\x1d\xdb\xaf\x2a\x1d\x0e\x68\xa9\xea\x50\xe3\xc0\xc7\x58\xae\x33\x01\xd0\x5b\x39\x39\xec\x1e\x82\xcf\xf9\xd2\x69\x9b\xb6\x6c\xa9\xe8\x9c\x1d\x2f\x05\x4c\xc4\x7a\x48\xb4\xc4\x30\x03\xd4\x03\x63\xd6\xe8\xee\xf0\xc1\x65\x37\xd4\x4a\xe7\x0a\x96\x0b\x6f\x05\xfb\x2e\x8b\x40\xea\x00\x9f\x8d\x06\x03\x6a\x7e\x1b\x6b\xee\x48\xf5\x0c\xe0\x19\xe6\x0c\x5e\x59\x52\xd8\x02\x25\x93\x38\xd3\x5c\x6d\x5b\xe0\x80\xd1\xd5\x68\x2d\x5c\x5a\xfc\xa0\x50\x94\xfa\x33\xe3\x5b\x8f\x18\xac\x3b\xfe\xcf\x51\x9b\x1f\x01\x12\x19\x66\xad\x3c\x40\xb4\x37\x17\xdb\x84\xd2\x45\x33\x68\x55\x80\xab\xbd\xb7\x29\xcb\x22\x2d\x7c\x52\x77\x49\xb5\xca\x2e\x67\x3d\x97\x98\xe6\x53\x2b\x32\x79\x6a\xe4\xcf\xe4\x17\x64\x70\x96\xa2\xb3\x6a\x75\xfd\x37\x96\xf5\x87\x7f\xe6\x48\xf7\x95\x25\x8d\x6f\xfa\x73\x4e\xfd\xa1\x99\xfc\x53\xd6\xc2\xee\xe3\x24\xad\x05\x4c\x4b\x0d\x9d\xc9\xf5\x2b\x55\x02\x34\xa5\x4f\x63\xdc\xd1\xa1\x7d\xe7\xa8\x3c\xde\x57\x40\x3c\x5c\x3a\x77\xdd\x76\xb3\x9d\x76\xdf\x63\x67\x3c\xe5\xfd\xa8\xab\x22\xe0\x6b\xbd\x60\x6f\xd5\x0b\x24\x02\xc6\xc9\xfb\xb2\x21\xb7\x1d\x35\xe3\xb5\x60\x03\x5a\x0f\xee\xe2\x81\x73\xdc\x18\x85\x46\xfb\x92\xe7\xc2\x3b\x2d\x11\xc0\x55\x5e\xde\xaf\x30\xea\x2f\xb1\x1b\x7e\xa7\xa4\x3b\x12\xdb\x96\x2f\xc3\x18\x2f\x5b\xa2\x44\x3b\x03\xcc\xa9\xbe\x4c\x6b\x1e\x03\x8c\xf0\xfd\x4b\xa0\x9e\x02\x9d\x25\x1a\x7c\xd5\xa4\x27\x85\x69\x09\xdb\x95\x32\xfb\xfc\x72\xdb\x2f\xfb\x06\xf9\x53\x9d\x6b\x17\xd7\x10\x90\x64\xd8\x33\x06\xd8\xcc\xf4\xf2\xa7\xd1\x61\xb0\x37\xbf\x80\xe8\xa7\xb0\x5f\x7b\xc2\xf8\x20\x4f\xdd\x85\x1f\xef\xe3\xf6\xad\x13\x8d\x7f\x0e\x63\xcb\x92\xca\x19\xa5\xbd\x16\x8d\x83\x3d\x15\x50\x25\xda\xa7\xaa\x4b\x4d\x4d\x9e\x15\x7b\x7f\xde\xd9\xad\x3a\x3e\x46\x5d\x11\xaa\xc2\xd7\x81\xa9\x68\xc8\x42\x7f\x7b\xf1\x2a\x0e\xf4\xe8\x09\xc4\x82\x16\x7c\x68\x20\x51\xe8\x39\x2f\xf2\xea\x14\xe5\x16\x68\xa4\xb1\x15\x62\xd2\xa9\x93\x23\x3d\x6d\x74\x5b\x0e\x46\x4b\x18\x14\xf2\xe1\xbc\xaf\x3e\x59\x63\x9a\x64\x8d\xdd\xc5\x81\x8d\xd0\x38\xfe\x8a\x6c\x47\x14\xd8\x92\xe2\x7a\x3e\xa2\xe7\x5b\x06\xed\x03\x21\x4f\xba\x57\x02\x9f\x23\x4f\x65\xa6\x16\xd8\x8c\x7f\x48\x23\x55\x44\xb4\x27\x0d\x4f\x52\x45\x97\xa1\xe4\x6d\x4c\x1a\x85\x35\xc8\xeb\x4a\xdf\x04\xf8\xac\x5b\x75\x4f\x0f\xcc\xa4\x83\x92\xb2\x0f\xb0\x6f\x78\x6d\x1c\x69\xfb\x13\x5b\xeb\x28\x7b\x00\x3d\xb1\xb2\xfe\x40\x65\x4b\xc8\xf6\x15\x86\x8e\x1f\xb5\x45\xc0\x35\x1e\xb5\x7f\xca\x24\xf8\x99\x8d\x67\x7e\x16\xec\x82\x16\x83\xb8\x9f\x79\x85\x7a\x29\x74\xa7\xb0\xda\x8b\x0d\xd7\xfe\x61\xc8\x93\x00\x68\x64\x8b\x45\x43\x6f\x87\x38\x1b\x4e\x96\x18\x97\x5f\x2e\xa8\x7b\x10\x9c\xc6\x3e\x88\x8a\xd3\xf5\x52\x99\x2e\x46\x19\xaa\xf8\x54\x63\x2e\xdd\x1c\x96\xeb\x19\x56\x7f\x1c\x77\x36\x37\x25\x70\x6d\xe4\xf9\x5c\x1d\xb8\x4c\x64\x7a\x35\x38\x8c\xfc\x54\xf9\x97\x05\x36\xdd\x5e\x49\x65\x40\x8e\x25\x43\x44\xd6\x9b\x04\x91\x47\xb5\xff\xe5\x2a\xab\xfb\x46\x2b\x73\x20\xc1\xd5\x3d\x64\xc2\x5b\x9f\x3a\x09\x74\x46\x1a\x59\xb5\x73\xa7\xbd\xa9\x46\xbb\xc3\xbd\x90\x76\x18\x88\x28\xc7\x81\x3a\x67\x6b\x73\x57\x8b\x45\x99\x4a\x85\x6e\xb1\x03\xd9\xd6\xc8\xf2\x81\xf5\xa9\xa6\xa6\x3d\x97\xe9\x8a\x0f\xce\xe1\x25\x5b\x96\xd1\x2a\x6b\xc4\x92\x6a\x03\xcf\x05\x5b\xad\xce\x23\xde\x54\xf1\xc2\xbe\x8c\xc0\x2f\xf9\x8c\x25\x29\xcd\xa4\x0c\x9e\xbd\xa3\x8f\x77\x88\x93\xfc\xc1\x67\x60\x0b\x65\xdf\xa7\xdf\x8e\xa9\x6a\x7e\x91\xa9\x9a\x8a\x97\xa1\xfa\x9a\xfb\x94\xe5\x60\xb4\xd3\x17\xe8\xc1\x82\x14\xd8\x76\x40\xdb\xc2\x66\x10\xb0\x36\xc7\x48\x7f\x22\xd7\xca\x71\x8a\x3b\x14\x9e\x81\x4e\x77\x6c\x61\x6a\xa8\xf8\xc9\xdb\xff\x39\x4e\xcd\x0a\xd5\x71\x7d\x6f\xc2\x01\xe9\x96\xca\x0f\xf3\x60\x04\x8a\x1c\x56\xfb\xc6\xfa\xda\x07\x8c\x4f\x96\x80\xb7\xcb\x9d\x8f\x64\xe5\xef\x23\x88\xf3\x40\xb4\x23\x18\xed\xdc\x5e\x02\xa9\x21\xd2\x53\x05\x0b\x47\x3c\xa5\x8a\xa3\xaf\x6f\x41\x2a\x75\x46\x02\xb9\xd3\x6f\x8e\xa8\xa8\x3d\xbd\x4b\x71\xc1\x77\x95\x5e\xa3\xe6\xa4\x1d\x2d\x45\xbe\x8e\x94\xaa\x7a\x81\xc4\x93\xc2\x62\xd5\xa5\x6b\x92\xf1\xe8\xc8\x3d\xdb\xff\xad\xda\xf9\x6c\xf9\x93\xa2\x0b\xe2\x5c\x34\x09\x3a\x19\xf5\xd1\xa8\x49\x40\xf9\xea\xe2\x81\x70\xe2\xb2\x09\x08\x74\x7a\xac\xf7\x71\x49\xd9\xc6\x7b\x4d\x3a\x7b\x55\x8f\x2b\xa5\xa6\xd5\xdc\x32\x04\x45\x6d\x6a\x98\x10\x5e\x1e\xdb\x8a\x0e\xef\x6e\x42\x21\x40\x6d\x7a\xc5\x2b\xed\x55\xc0\xba\xb7\xe1\xb2\x8d\x40\x85\x80\x1e\xca\x18\x51\x5f\x01\x03\x0b\xbc\xfa\xd3\xeb\x2f\x56\x96\x55\xba\x50\x19\xe4\xd7\xa5\x90\x55\x17\x1a\x02\x04\x5f\xff\x5b\x4f\xa5\x95\xb1\x6b\x89\x88\x3c\x9c\x2d\x43\x85\xd9\xa0\x1e\x0a\xd3\xc4\x4d\x6f\xa7\x3d\x2d\xcc\x15\xbb\x75\xaf\xe3\x7f\x31\x5c\xda\xb1\x7e\x39\xce\x60\x0b\xd9\x2d\x0f\x77\x7f\x30\xc4\x0b\x5c\xe8\xf6\xe5\x06\x86\x83\x3e\x1c\x68\x9b\x78\x87\xe8\xca\xa7\xe8\xac\x7e\x65\xcb\xc1\x6d\x51\x9f\x3c\xad\x26\x2a\x42\xfd\x3a\x09\xbe\x2e\x4f\x80\x9a\x80\x5f\x6b\x79\xfe\x83\xfd\xd0\xf5\x33\xcc\x01\x69\x57\x8d\x83\x36\x7a\xb1\xab\x93\xdb\xab\x2c\x4f\x78\x06\xaf\x20\x3a\xdc\x88\x48\x5f\xfc\x49\xba\xba\xee\x15\xee\xf6\x2d\xa0\x91\xda\xdb\x0f\xc1\x6c\x3d\x52\xd9\x42\x55\x8f\x45\xb9\x9b\x30\x09\x2a\xb5\x7e\xa5\xb8\x05\xf7\x5e\xd6\x22\xc4\xb7\xfd\x94\x24\xc0\x1b\x2d\x6b\x03\xd5\xfb\x51\xe7\x4f\xb4\xd3\x83\x02\xb0\xa2\x97\xb0\xc4\x40\xf7\xa4\xa8\x4c\x6d\xff\xea\x51\x13\x77\x1a\xc7\xc6\x76\x9e\xcf\x1c\xc3\xfc\xac\x8c\x82\x2c\xb9\x8e\xff\x53\x61\x04\xa3\xfd\xd6\xfc\xbb\x75\x0c\xa3\x43\x06\x39\x0c\x82\xb4\x07\x14\x22\x7e\xac\x6e\x20\xc2\x95\x01\xc5\x8a\xcd\xc1\x1c\x1c\xa5\xc0\xa9\xf5\x97\xed\xc7\x78\x3f\xf0\x6a\xdf\xb2\x85\x6f\xe0\xfa\x40\xaa\x90\x95\xb7\x0a\xa1\xc2\xd4\xe4\xdf\xa0\xaf\xfa\x5b\x29\xff\xa0\x2d\xab\xa4\x81\xaa\xa6\x2d\x5f\x98\x3e\xcd\x65\x5a\xb4\x42\xc6\xc7\xa9\x06\xfe\x05\x92\xf1\x84\xb5\xbb\x1e\x0a\x8b\xf0\x88\x1f\x93\x0e\x97\x83\x69\xd7\x43\xa3\x9b\x0b\x59\x6e\xc5\xa8\x89\x69\x7b\x8c\x64\x58\x0f\xfd\x65\x96\xab\xd3\xf5\x1e\x18\xd9\x89\x1e\xec\xb0\x94\xcd\x51\xe7\x80\x81\x69\xf6\x40\x19\xea\x8b\xcd\xe7\x48\x3e\x72\x46\x5c\x30\xbf\x87\xec\x20\x50\xe4\xeb\xf1\x57\x65\xdf\x4c\x77\xb5\xa5\x7e\xf1\xaf\x08\x98\x1a\xd0\x50\x60\xd6\x97\x71\x58\x0e\x6f\x0a\x1d\xf4\xf5\x09\x2a\x46\xf2\x3f\xd0\x78\x84\x67\x2f\xf2\xbf\xc9\xff\x1e\x4c\x8a\xc4\x13\x96\xcf\x14\x38\x75\xbe\xd2\x6e\x17\x68\xce\x26\x67\x5b\xb0\x5a\xd2\xb6\x0c\xbd\xd2\xee\x76\x81\xd9\x42\xb3\xf5\x3f\x0d\xd1\xfa\xd1\x06\x69\x61\x1c\xbc\x12\xf9\xf8\x0b\x32\x7a\x4f\xe2\x63\x87\xc4\x2e\xfd\x79\xf9\xd3\x3e\x1f\x79\x73\xcc\x06\x7d\x6b\x19\x00\xa1\xf7\x96\xaa\x30\xdd\xf9\x79\x1c\xe9\x9f\x4c\x3c\x53\x96\x7f\x3a\x99\xe7\x4f\x39\x2b\x9f\x31\x30\xdf\x20\x90\x67\x13\xf9\x88\x91\x2f\x5f\xfe\xb5\xa7\xdb\x71\xc2\xc9\x2d\xc6\x99\x4d\xdd\x9b\xf3\x72\xce\x97\x30\x82\x98\x79\xf2\xce\x11\xb1\x3f\xa5\x60\xb8\x49\x82\x6c\xa4\xdb\x2a\xdc\x71\xcf\xc3\x67\x89\x8b\xe4\x5b\x81\x1c\xc8\x3a\x50\x1a\x95\xd3\x08\x5b\xf3\xb6\x6f\x1e\x67\x3f\xa1\x3b\x14\xa1\x4a\x01\x32\x57\x57\x3e\xb4\xd2\x21\xe7\x7e\x48\xb1\xe8\xce\x6e\xae\x0b\xfc\x8e\x7f\xa1\x4f\x91\xb2\xc6\xa1\xc0\xec\xe8\x80\x96\x45\xe2\xd4\x4d\xd9\x05\x4c\x46\xad\x4b\xa1\x58\x7e\xa3\xd9\xe3\x49\xee\x28\x90\xf5\x23\x82\x52\x43\x20\x55\xa3\x6c\x1a\xc3\x43\xd7\xff\x1c\x66\x77\x23\x2f\xd0\x23\xfb\x25\xd1\x24\xfc\x49\x52\x1a\x93\x22\x33\x9d\x93\x1a\xfc\xd3\xf9\x04\xeb\x91\x40\xb9\x74\xc6\x86\xbf\x5f\xbd\xf7\x72\x10\x2d\xfb\xe7\x98\x72\xf8\x2c\x4f\x65\x3d\xda\xd8\x9f\x9e\xca\x7c\x64\x28\x82\xd9\x5c\x1a\x04\x53\x20\xdc\xcf\x70\x1b\x1d\xb7\xf2\x6f\x9c\xb4\x83\xb1\x06\x79\x16\x0f\x7d\xea\xc7\x3d\xb8\x79\x53\x3a\x59\x2e\xdd\x1c\xc0\xa7\x5b\x7b\x6e\xfa\x1b\x99\x52\x14\xe2\x53\xd2\xa9\xf3\x2c\x2b\x0f\x31\xec\xb2\x87\x98\xe0\xa6\xb3\xbf\xa5\xe6\x7e\xe6\xfd\x23\x6e\xeb\xf3\x01\x87\xd1\x1b\x85\xc8\xfa\xee\x9f\x96\x55\x3d\xfd\xd0\xb2\x23\x9f\x38\xa1\xfc\xd0\x69\x58\xf0\xf6\x89\x08\x35\xae\xdd\x65\x38\x3a\x03\xf9\x87\x42\x79\x41\xe1\x79\x72\xec\xd3\x8f\x34\xab\x3f\xc0\xeb\xc7\x56\xa0\xf8\xa6\x9e\x17\xdc\xc4\xb4\xca\xfe\x54\xd5\x0a\x80\x6f\xed\xa4\x81\x7b\x7f\x87\xe5\x05\xd4\x6d\x84\x5a\xd4\x59\x3f\x72\x1f\xd7\x51\x23\x6d\x4f\x0c\x87\xd3\xf8\x54\xf7\x0b\x6b\xb6\xcd\xef\x9d\xc0\x47\xbc\x6e\xff\x0b\xff\x83\x2b\xbd\x0f\x8e\xa8\x61\x08\xdc\x7d\xd1\x50\x05\x4b\x97\xc3\x80\xa4\x9d\xed\xe7\x7f\x21\x8e\x6e\x55\xc1\xac\xa9\x14\xc0\xfb\x12\xbc\xde\x72\x90\x84\x9b\x37\x5b\x48\x95\x19\x9a\x1c\xfa\xe6\x2f\x03\x92\x4b\xd4\x1c\xe9\x81\x61\x81\x3a\xfa\x0a\x85\x9c\x80\xec\x82\x51\xfa\x86\x87\xe2\xb8\xa1\x6b\x2e\x2f\xb3\xd8\x27\x58\xdd\x1b\x38\xff\xfe\xf3\x60\xd5\x0f\x01\x25\x73\x40\xd0\xb9\x33\x06\x1d\x66\x45\x0f\x1a\xf5\xda\x35\xb8\x9c\xc7\x65\x4f\x72\x24\x3b\x0e\x33\xfd\xf0\x26\x04\x5a\x30\xf2\x21\xd3\x05\xf6\xce\x85\x30\xef\xce\x5d\x05\x88\x8f\x42\x52\x43\x8b\x5f\x01\x40\xc7\x35\xfd\xe8\xcd\x58\x63\x7c\xa0\x2f\x89\x47\x2a\x2b\xc0\x5d\x0a\x65\xed\x44\x1e\xc9\xb6\xc1\x5b\xac\x4d\x65\x88\x56\xb6\x86\x4b\x37\x38\x10\x11\x3d\xbd\x36\x68\x06\x38\x1a\xdf\x21\xf6\x79\x80\x19\x4e\x80\xbc\x36\xf1\x10\xda\x50\x87\xdd\x4f\xb9\x29\xf3\x63\xea\xdb\x8b\x83\x36\x20\x90\x81\x80\x16\x48\xd4\x44\xbe\xd1\x56\x17\xb4\x7c\x26\xea\x26\x6c\x7c\x3a\xa8\xc4\xbe\x90\x67\x1e\x1b\x15\x1f\x97\xfa\xe9\xca\xc1\x9c\x47\x3d\x62\x84\x7c\xb2\xf5\xbd\x9e\xf2\x07\x10\xf5\x47\x81\xe6\xfd\xe7\x7a\x56\xd9\x0e\x4c\x55\x7f\x1d\x26\xc6\x59\xca\xe2\x01\x5a\x65\xaa\xb7\x27\x41\xe8\xe5\xc5\x3e\xec\xfd\x53\x09\xd1\x87\x20\x65\xd0\xde\x63\x20\x83\x54\xe8\xf7\x9f\x5d\x1a\x87\xca\xd6\x1e\xd2\x19\x77\x1d\x01\x84\xbb\xa5\x74\x77\xde\xe4\xfd\x7c\xda\xad\x38\xcf\xb4\x47\xb8\x6f\x97\x9e\xfd\x5e\xbe\x15\x4e\xfd\xf7\x0c\x1d\x25\x17\xda\x7b\x39\xdb\xb4\xe4\x43\x8b\x1d\x6b\xf7\x1d\x7b\x0e\x09\xa9\xcb\x5c\xfa\x16\x5f\x11\xc3\x47\xc4\x22\x2e\x7f\xbc\x19\xf5\x18\x25\x2e\x32\xfe\x90\xc0\xf8\x34\x2e\x15\x10\x1f\xc3\x29\xe5\x3e\x56\x20\xbb\xfc\x86\xf7\x90\x86\x8f\xd8\xf8\x6b\x9b\x04\x24\xbc\xee\xa6\xef\xf2\x30\xd2\xfd\x5d\x24\x95\x72\x0d\x71\xb7\x1b\xfe\x10\x30\xba\xaa\xcf\x00\xd7\x51\x9c\xb0\x7b\x0f\xe7\x09\xa3\xf9\x1c\xeb\x6b\xe9\x0d\xc4\xa7\x17\x66\x9a\x48\x55\xb4\xdf\xdc\x3d\xc1\xf0\xa0\xd7\x28\x39\xb4\x15\x26\x60\x7a\x4d\x44\x2a\x2e\xb1\x93\xea\x7f\x1d\xee\xfc\x75\x20\x50\xfb\xb7\x54\x4d\x75\xd8\xaf\x81\xc1\x43\x97\xe6\xbd\x5a\xff\xab\x1b\x2b\xbe\x8d\xac\xb3\xd8\x44\xec\x7d\x10\xe3\xfa\xe5\xef\xc3\xdf\xa8\x2f\xb3\xaf\x45\xdf\x6f\x6f\x39\xd1\x0e\x0d\xf1\x72\x79\x53\xb4\x8e\xd5\xfd\xd0\xad\x54\xc1\xf7\x80\x13\x53\xff\xd8\x12\x24\xa8\x7e\x1e\x05\xc4\x43\xe3\xa3\x3b\x90\xc6\x15\xcf\x99\x43\xf2\x5d\xe5\xfe\xd6\x7a\xeb\x93\x96\x45\x8c\xf4\xf9\xfe\xb1\xfd\x3f\x57\xdf\xb6\xe5\xaa\xce\x33\xfb\x2e\xb9\xe9\x9b\xfd\x52\x06\x1c\xa0\x03\x98\x8f\x43\x32\xe9\xa7\xdf\x2a\x55\xc9\x64\xfd\x73\xad\x31\x90\x09\x49\x68\x02\xb6\x25\xd7\xc1\x7e\xd9\x41\x57\xc6\x13\x13\x3d\x04\xdd\x35\xe9\x2e\xea\x5c\x6f\x8d\xb4\xe3\x33\x64\xf8\xba\x28\xb9\x74\xee\xa3\xa2\x9d\x10\x3d\xc2\xb6\x50\xfd\xd7\x66\x33\x10\xb7\xa0\xe9\x54\x17\x66\xaa\x5d\x89\xed\x66\xf3\x4d\xbe\xb4\xfa\x75\xf5\x70\x76\x5d\xb9\x58\x38\x97\x88\x0d\x96\xf5\x4b\xf5\x70\x74\x0f\xa6\x4a\xf9\x3e\xc8\x7b\x08\xdf\x46\xbe\xcf\xfe\xec\x0f\xd7\xe4\x77\x37\x4e\x7e\x88\xe7\x0d\x54\x64\x65\x87\x27\x99\x12\x20\x5c\x9d\x2a\xae\x8f\xa4\x0d\x4a\x65\x7e\x43\x0e\xc4\xa3\xf0\xd5\xee\xf4\xfb\x75\x23\x68\x8f\xfa\x74\x5f\x9e\x74\xce\xf3\x98\x62\xf1\x13\x90\x00\x4a\xd1\x8b\xbe\xad\xd5\x7d\x0d\xb0\x8c\xc7\x5e\x1d\x6a\x57\x61\xea\x50\x61\x9e\x58\x1d\x47\x48\xf8\x30\x69\xe9\xd9\x2b\x14\xfc\x98\xa7\x8d\x6e\x21\xbc\xdc\x96\xd8\xdb\xa4\x5b\xab\x39\xb1\x57\xe8\x30\xe7\xf3\xed\xca\xde\xb3\x4b\xb3\x2a\xaa\x16\x05\xd0\x20\x8d\xfb\x25\xf2\x76\x47\x3a\x5c\xf7\x53\xb2\xbc\x0e\x5a\xa7\xca\x08\x65\xb0\xe9\x86\x80\xf1\x3e\x5f\xe4\xdd\xd0\xd2\x0d\x14\xaa\xcc\xb7\xb9\xe0\x66\x23\x19\x83\xbc\x0a\x7e\x60\x93\x13\x2d\x7b\x39\x56\x60\xbb\xd8\xb5\x79\x43\xab\xe9\x5b\xe0\xf0\xd9\x5a\x6e\xdb\xc8\xb5\x9a\x45\x5a\xfa\x5a\x0e\x45\x42\x11\x10\x21\x53\xa1\x06\xa5\x42\x01\x9c\x0c\xc7\x90\x62\xaf\x15\x2f\x00\xf4\x40\x1b\x4e\x94\x0e\x1f\xe8\x6e\xc9\x67\xfb\x75\x26\x31\xe9\x49\xde\xfe\x72\xa7\xb4\xce\xef\x56\x61\x83\x06\xf4\xd9\x86\x07\x65\x17\x00\x34\x97\x86\x9e\xbf\x98\xe2\x33\x7d\x35\xf4\x75\xf3\x2c\x1f\x18\xb7\x8b\xac\x3b\x6f\xa7\xca\xb0\xc4\xb3\xa8\x39\x83\x12\x1e\x4e\x91\x32\x7f\x2c\xc2\x0b\x30\x57\x6f\x03\xb7\xde\x86\x72\x14\x70\x01\x5a\xa6\x1f\x1d\xd3\xcd\x45\x7f\x9b\x43\x48\x5e\xda\xa6\x28\xbb\x82\xb9\xae\xf9\x8f\xcc\x9d\x2c\x00\xb9\x9a\xcb\xff\x23\x96\x8b\x3c\x42\xb7\xc7\x40\x3f\xd8\x90\x33\xb9\xde\x39\xf4\xbc\x3c\x8c\x17\x63\x39\x02\x58\x02\x51\xc6\x87\x2a\xd8\xec\x02\xcf\x12\x74\x46\x77\xac\x9c\x94\x0d\xb1\x6d\x5c\xf2\x79\x92\x9b\x24\xca\x0a\xda\xbb\x55\xb8\x47\x72\x7f\x8e\xda\x50\xca\x82\x3a\x66\x3f\xbe\x05\x3b\x80\xdb\xa4\x64\xc5\xe0\x38\xe9\x9b\xf5\x88\xfc\xbd\xf5\xf9\x06\x61\x09\x98\x4e\xa1\x27\x7a\xed\x6a\x06\xbf\xdc\xa2\x2e\xd4\xa8\x7d\x89\xd4\x23\xac\xb9\x27\xd9\x5f\xda\x44\xde\x7f\xa8\x84\xc5\x7b\x5f\xd6\x86\x24\xa5\xd6\xc3\xb1\x56\xc0\xed\x22\xb9\x68\xae\x51\x9f\xb1\x8a\xd1\x54\xab\x5b\xd0\xc4\xb1\x40\xc8\xa5\x71\x14\x9e\xb9\x74\xed\xac\x34\x5f\x1c\x0f\xe4\x47\x03\x8a\x4d\xe0\xe7\x9b\x2d\x5c\x7f\x6c\x1e\xfc\xd1\x56\x7c\x6e\x24\x3a\xa4\x84\x40\x64\x5a\x48\x06\xdc\xfe\x02\x1b\x9c\x01\x43\x38\xb5\x30\xdb\x94\x9d\x5e\x76\xf6\xb7\x08\xd3\xd6\x4c\x67\x96\x2f\x82\xb3\xc9\xb5\xc0\xd0\xd8\x7d\x47\x90\x83\x3d\xb7\x49\x35\x55\x98\x73\xb1\x0b\x07\x51\xd1\xf7\x10\x02\xdc\xe4\xac\x85\x6b\xeb\x39\x9a\xb3\x67\xd4\x9e\x54\x6e\x6f\xc4\xa7\x69\x00\x41\x14\xab\xfb\xaf\x94\x17\x89\xdd\xd7\x47\x45\xe4\x26\xbd\x6f\x09\xe8\x5d\x03\xbc\xac\x2a\xab\xf8\xf3\xe6\x00\x17\x72\xc8\xb7\x99\x0b\x7a\x16\xf5\x81\x1d\x08\xdc\x40\x22\x74\xbe\x49\xac\x11\x34\x20\x55\x7b\xc1\xa1\x41\x29\xda\x45\xb7\xd3\x8b\x78\x85\x97\xde\x0b\xea\xb4\x9c\x36\x53\x5b\x91\x09\xed\x6b\x95\xbc\x25\xad\x2f\x43\xc3\x13\x69\xe6\x34\x92\xb0\x7d\x48\x4c\x82\x8d\xad\xb4\x9a\xe5\x20\xbf\x22\xa2\x00\xec\x60\x5f\xc3\xde\x03\xe2\x01\x32\x3a\x79\xfb\x96\xd2\xd7\xa5\x75\x74\x5f\xa3\x64\xa7\xd7\xe1\xfa\x77\xab\x2d\xef\x43\xa0\x37\xec\x09\x22\x04\xc7\xeb\x67\x7c\x72\xd2\x26\x54\x00\x5d\x52\x7d\x00\xfc\x17\xd6\x55\xae\x7f\x3b\x54\xdc\x2d\x9a\x36\x38\xf8\x75\x81\x56\x35\x94\xb3\xf9\xed\xcb\x15\x3a\xd4\xb4\x24\x96\x62\xb5\x5e\xc4\x9a\xb9\x0f\x69\x4a\xbf\x5d\xc1\x3a\x56\xd0\x20\x49\x2d\x04\xbf\x5d\xe0\xed\x8c\xc2\x49\x02\x56\x80\x00\x01\xd7\x26\xd1\x29\x58\x2f\xbc\xdc\x0d\xc7\xa9\x2a\x4c\xfc\x88\x2a\xdc\x91\xc6\x0d\xa5\x55\x9e\xd7\x28\xfa\xbb\x4d\xfb\xe9\x3b\xc0\x3f\x02\x2a\xb7\xa8\xd5\x71\xf6\x9a\x9e\x36\xd3\x5e\x14\x8d\x5e\x9f\x48\x36\x54\x0a\xb1\xf0\x55\x64\xb6\x5f\x19\x89\x3a\x25\xa7\x15\x7b\xd8\x9e\x73\x48\x50\x42\xb0\x1a\x88\xd4\x9a\x5d\xa7\xe6\x82\x4f\xad\x47\xee\x41\x8c\xa3\xfe\x46\x97\x14\xbe\x38\xe7\xb8\x4a\x19\xbc\xb4\x76\xcd\x0e\x70\xba\xc2\xa9\xf5\xb2\x5b\x6e\x1f\x2c\xef\xb6\xf8\x73\x72\xfd\xea\xb3\x8d\xf2\x4c\x07\x59\x9e\x03\x1f\x26\xf7\xb3\x2c\x40\xad\x7b\xe3\x93\xe0\x4c\x79\x59\x33\x01\x60\xb0\x8e\x3e\x3d\x73\x3f\xd0\x7f\x0a\xb8\x06\x03\xad\x6a\xad\xfe\xfb\x7c\xfd\x21\xb3\xd0\xc0\x1e\x8c\xa1\xcd\xee\xf2\xd5\x01\x43\x60\xe6\xf6\x51\x25\x0a\x5b\xbd\x94\x34\xb7\x70\xcc\x01\xe9\xf0\x07\x1d\x77\x3f\xb7\xff\xf6\x27\x91\x3f\x00\x71\x7b\x5f\xa6\xf5\x38\x7c\x1b\x81\x36\x70\x9e\xba\xa5\xb7\xce\x65\xc7\x72\x9b\xff\x1e\xef\x32\xbd\xf6\x4f\xea\xfd\xaf\x7d\x8f\x7f\x0b\xf5\x43\xde\x23\x34\x56\x76\x46\x63\xa0\xf2\xdc\x7d\x54\x13\x59\xa2\x0f\xe0\x4f\xce\xe4\x13\x56\x74\x2a\xeb\xbd\x69\x2a\xa2\x08\x6c\x4b\xc6\xc4\xe4\xbd\xf3\x30\xfa\x5a\x25\xe2\x74\x0d\xa7\xf0\x0b\x17\x37\x1c\x18\xde\xf1\x2d\xc9\x2e\x7a\x61\xd4\x8d\x4c\xea\xcf\xd3\x97\x47\x4f\x18\x5d\xf6\xc4\x32\x08\x6c\x72\x42\xbf\x92\x43\xa9\xa5\xd8\x81\x36\x40\x65\x30\x60\x05\xa3\x63\x29\x26\x21\x17\xc6\x50\xa1\xa9\x7b\x64\x5d\xce\x37\x52\x17\xa8\x36\xdc\xcc\xf4\x94\x48\x37\x99\xf2\x4b\xc4\xad\x9e\x8c\x73\x69\xc3\x2b\xc0\x19\xf2\x9b\xe8\xf2\x96\x2f\xee\x0f\x42\x16\x1c\x14\x70\xee\xac\xae\x40\xd9\x5a\x65\x8c\x03\x00\x7c\xe1\x11\x2e\xae\xc9\x1d\xee\xd9\xc6\xd5\x78\x87\x25\xf0\x55\x16\xf7\x6d\xeb\x43\x9b\xf6\xb5\x5a\x11\xc5\x02\xb6\x3e\x25\x2b\x6f\x26\x68\x41\xb9\xb9\xf5\x62\x76\x87\xc8\x75\xd5\x0d\x55\x29\x28\x48\x0c\x83\xb4\xf9\xbd\x81\x62\x50\xc5\x34\x70\xfa\x72\x58\xa7\x26\xbf\x61\x54\x57\x77\xf8\xff\x3f\x82\x8a\xcf\x1f\xff\xc0\x53\xe4\xa0\x08\xa4\xc6\xab\x86\xfc\xc3\x26\x5c\xdc\x42\xbb\x46\xaa\x01\x70\x0c\xf5\x7e\x0e\xd6\xf8\x1a\xf7\x8e\x71\xb5\xfd\xfa\x9b\x85\x7c\x70\x05\xee\x86\xd3\x95\x83\x86\xdb\x07\xcc\xa8\xfc\x34\x98\x11\x57\x07\xd8\xb1\xbb\xe2\xcd\xc4\x74\x1e\xc3\x39\x0b\x0e\x7f\x0c\xdb\x19\xf0\x3f\x8b\xab\x55\xe3\x81\x89\x5a\x64\x76\xe0\x00\x49\xdc\x94\xa2\xdc\xba\x55\x49\x09\x89\xe9\x0e\x50\x9b\xd6\x9b\xf8\xef\x60\x37\xbe\x74\xba\x97\x14\x17\xa7\x26\x79\xc0\x65\x00\x01\xc1\xbd\xc2\x6d\x84\xe5\xeb\xb6\x09\x53\x07\x15\x1d\x3a\xe1\xee\x2e\xa5\xd0\x88\x4c\xe5\x72\x3e\x5c\xe8\x46\xde\xb8\x3f\x2a\x54\x62\xe4\xaf\xb6\x7b\x5f\x90\xef\x90\x41\x38\x82\x9e\x7f\x90\x86\x10\xcb\x7f\x0b\xca\xf2\xb9\x62\x1d\x94\x50\x01\xf4\xad\x84\x46\xcc\xeb\x0f\x83\x9e\xca\x01\x2e\x05\xf0\xa8\xa0\x09\x82\x3b\xce\x46\x29\x81\xb7\x28\xf3\xb0\x33\xa4\x5b\xa2\x5b\x4f\xf7\x2a\x89\xec\x94\x5a\xf7\x48\x42\x9c\x1e\xe7\x8f\x54\xd4\x80\x95\x0d\x03\x45\x36\xd3\x8c\x64\xdd\x63\x22\x40\x8e\x10\x24\x75\xf8\x9c\x76\xc9\x60\x6d\x77\x17\xcb\x10\x94\x06\x7e\x5a\x9c\x7d\x94\x31\x85\xed\xb0\xf1\xb5\x4a\x7c\x83\x22\x7f\xd5\x48\x57\x03\xe0\x09\x99\xf0\x52\xed\x8c\xc4\xfc\x6d\x0b\x68\xc4\x06\x3a\x12\x57\xf9\x89\x94\x47\x99\xe8\x13\x2a\x10\x7b\xe0\x94\xb0\x92\x2d\x6d\x05\x00\xa2\x42\x67\x9a\xd5\xb3\x7d\x71\x64\x04\x61\x15\xe5\x43\x71\x1c\x35\x50\xe9\xe1\xb1\x80\x57\xe8\x98\x24\xd0\xa8\x45\xfe\x84\x07\xb4\x42\xb1\xf0\xfc\xfb\x3c\x06\x71\x60\x9f\x73\xf5\xbe\x8d\x1a\xd1\x3e\x65\x62\x3e\x6c\xbb\x16\x19\xca\xa3\xe1\xf0\x82\x29\x05\x72\xda\x86\x0e\xbb\xb9\xa9\x18\x30\x86\x1a\x38\xd6\x46\xf4\x79\xa3\x2a\x6a\xa2\x66\x09\x61\xe1\x86\xb6\x71\x44\x57\xa5\xe3\xdd\xb8\x56\xf0\x08\xa7\xbb\x33\x74\x55\x00\xc5\x70\x08\x4b\x5a\xe0\xb7\xdf\xe1\x60\x37\x65\x11\x4f\x21\x90\x19\xf1\xba\x34\x06\x30\xd5\xf1\x53\xb3\xe4\x08\x93\x3c\x22\x37\xd2\x07\x90\x0c\xc5\xf3\xc9\xed\x0f\x35\xba\xb1\xec\x28\xb8\x17\x44\xc4\xf4\x41\x19\x65\x75\x55\x66\xf7\x2c\xde\x2e\xf4\xc6\x5b\x16\x5c\x76\x32\x7b\x76\x7b\xf2\x57\x76\x87\xd4\x18\xe7\xc2\xec\xde\xd2\x6b\xd5\xdf\xd4\x06\x7a\xdd\xa2\x2d\x57\xa4\x92\x35\x2a\xda\xa5\xf5\x7a\xbd\x1f\x1d\x5e\xcf\xbb\x93\x8d\xe4\x91\xec\x5a\x02\x02\x74\x4c\x55\x61\xc3\x42\xed\x4a\x73\xb0\x3d\xb6\xb3\x17\x70\x9a\x32\x02\x77\xe4\x90\x80\xb3\x2b\xd5\xfa\x15\xac\xfd\xed\x21\x55\x81\x4a\xa7\x47\x43\xbb\xc1\x74\x11\xd8\x63\x17\x2e\xc4\xce\x5a\x6f\xa6\x58\x1a\xb2\x21\xa2\x44\x22\x8b\x05\x89\x3d\x8e\x69\xb8\xb1\x41\x99\xb0\x10\x69\xec\x6c\x55\x35\x12\x33\xca\x25\x09\x14\x11\x2e\x66\x81\xd6\x20\x52\x8d\xaf\x79\x02\x2f\x14\x84\xd0\x82\x08\x82\xb0\xb1\xe5\xea\x2e\xa1\x96\xcd\xfc\x53\x05\x79\xec\x1a\xbe\xa0\x8e\x1e\x46\xbb\x2e\xa5\x7d\xe3\x47\xa6\x00\x7c\xf8\x8a\x0b\x3f\x7f\x8e\x35\xac\xcd\xe6\x4f\x21\x61\xbe\x41\x75\xab\xda\xf5\x82\xf0\xc4\x23\x74\xdf\x5a\xf0\xd4\x8e\x00\x7b\x74\x00\xd0\x84\xcd\xd8\xe6\xaa\x87\xbd\x34\x16\xda\xf0\xf6\x05\x30\x5d\xc4\x22\x6b\xe4\x6a\x28\x0c\xb3\x9e\x7e\x99\x03\x76\x92\xaa\xae\x7a\x5a\x03\x56\x92\x26\x9d\x01\xc0\x89\x7e\x58\xc5\x88\xd4\xc3\x2d\xb1\x8a\xc7\x1d\x55\x77\xb8\xcf\x51\xda\xc0\x26\xce\xbc\xc4\xff\x3b\x9d\x30\x07\x52\xe7\xe4\x5b\x5f\x09\x50\xd1\xcd\x5a\x1c\x60\xd6\x73\xdb\x4f\x51\xef\xcf\x4d\x7b\xfa\x8a\x0d\x09\xb0\x86\x3d\x13\xa4\xef\x9f\x90\x3b\x0b\x48\x05\x1a\xf7\xf2\x13\xc1\x23\x5c\x96\xa8\xed\x5a\xc1\x42\x69\x70\xd4\x23\x01\x99\xf5\x2a\xb2\x5e\x5a\xa2\xe7\x56\x01\xb6\x57\xc7\x65\x87\xd4\x3a\x16\xe9\xc9\xb0\xc7\x14\xf1\x10\xe4\xcb\x46\xb2\x77\x92\x2e\x2a\x46\x35\x2f\x54\xd0\x61\x18\x8e\x96\xf1\xd3\xb8\x79\xf0\xbe\x96\x30\x1c\xce\xff\x29\x86\xac\x31\x4a\x01\x2e\x72\x86\x5b\xf0\x44\x81\x75\xfb\x89\x5d\x55\x40\x92\xb9\xd0\x31\x38\x42\x7c\x00\x03\x5a\x42\x97\xb3\x8e\xff\xc6\xe0\x6a\xaf\xe3\x9b\x6a\xe5\x63\xcc\xdd\x50\x12\x5b\x88\xd9\x5c\xc7\xdc\xea\xa8\x4e\x04\x7b\x4c\x91\xf4\x07\xc4\x60\x4f\x95\x83\x2c\x13\x61\x2c\x83\x0d\x21\x54\xb6\x0e\x52\x9f\x5d\x9f\xcf\x27\x75\xd9\x8f\x1c\x62\x06\xba\xdc\xd9\x9d\x33\x43\xeb\x40\x58\x13\xc6\x85\x07\x6c\xe7\x9b\x09\x1f\x6e\x35\xe7\xfb\x54\x6b\x62\x99\x08\xdb\x0d\xd2\x12\xd0\x66\x53\x09\x79\x15\x63\xa5\xcb\xcb\x8c\x2b\xa9\xdf\x55\x08\x01\xde\x95\x8c\xec\xc7\x79\xd1\x45\xd8\x45\x8c\x09\x0e\x59\x38\x71\x85\x21\x5f\x72\x4f\xf9\x35\xcd\x12\x4b\x98\x32\x2c\xfb\xbf\x54\x14\x46\x97\x5d\xd4\xab\x10\xd7\xf1\xfd\xe5\xef\x8f\xf2\xeb\x81\x2a\xd9\x1f\xa1\xfc\x1e\xd5\x49\x6f\xc5\xf2\x96\x2b\xc1\x73\xfe\x8f\x70\x4a\x21\x0f\xff\x64\x91\x06\x61\xc7\x97\xcf\xc3\x09\xfa\xc2\xa1\x4c\x5c\x90\xa5\x50\xbc\x7f\x09\x12\xdb\x0e\x24\x36\x9e\x08\x6b\x77\x65\x93\x6d\x0f\xfc\xda\xb8\xed\x64\xf5\x1a\xf3\x23\xdf\xeb\xff\x06\x45\x44\x75\x5a\x1f\xea\x9e\xcb\xfe\x3a\xcc\x69\xc6\xd0\x4e\x38\x84\x75\x91\xbd\x40\x49\xaf\x0f\x6f\xf2\x45\x52\xe3\x55\x3a\xc1\xef\xf0\xc5\x86\x9e\x60\x07\x2c\x05\xc6\xec\x1e\xbc\x0b\x4a\x66\x8c\x8f\x6a\x9a\x5c\xca\x7f\xe5\x13\x1e\x04\x91\xf0\x6a\x2d\x82\x8b\x2d\x70\x0c\xf6\xad\x25\xf2\x5e\xad\xf3\x46\x0b\x61\x7e\x77\xd6\x72\xbc\x89\x66\xc0\x8b\x25\xa5\x9a\x32\x2f\x2e\x7a\xfa\xa0\x5e\x7d\xe8\x39\xa4\x2a\x8a\xef\xbc\x0c\x7e\x01\x78\xac\x61\x95\x2c\xe7\x64\x97\x6c\x00\xf9\x3a\xc7\xa7\x25\x0d\x1b\x8b\x0d\xe7\x84\xaf\xbc\x92\xe5\x0f\x8e\x9f\xb0\x74\xc9\x71\x1a\x96\xbe\x26\x3a\x41\xb8\x50\x9b\x2c\xdd\xe6\x72\xfc\x25\x05\xd6\x35\x3d\x6f\xb7\x20\x78\x28\x50\x9a\x1e\x9a\xd8\x74\x5d\xfe\x52\x6f\xb6\x06\xed\x95\xcb\x16\x0a\x6a\x20\x69\xea\x27\x75\xf0\xea\x83\xd8\x15\xad\x59\xcc\x76\x19\xb4\x4f\xb4\xcb\xb9\xa2\xe0\xe7\x32\x38\xe7\x65\x2e\x9d\x50\x2e\x9d\x65\x11\x0e\x0a\x81\xd4\xba\x66\xc1\x3e\x05\x64\xb6\x20\x51\xb3\xc8\x7d\xad\x09\xb7\xc8\xa4\xb4\xe2\x4b\x0b\xa2\xea\xec\x97\xaa\x7e\x0e\xd1\x07\x45\x59\x44\xd1\x39\x7b\x61\x8e\xfb\x30\x4c\x9c\x84\xb8\xd8\x05\xaf\x23\xd1\x4c\x39\x94\x2e\xfc\x5e\x35\x0c\x49\x07\x22\xf6\xfd\x50\xf0\xe1\x82\x7b\x31\xc1\x66\x92\x82\xa8\x5a\x10\xa8\x12\xa9\x52\xee\xba\xfb\x0a\xfc\x78\x01\x5e\xa0\xe8\x12\xf0\x98\x90\xdb\x03\x87\x2d\xcc\xb0\x51\x37\x2f\x4d\x52\x98\xda\x38\x20\x9f\xef\x1c\xda\xfe\x04\xe1\x24\x57\xa7\x6a\xd5\xb3\xcd\xa9\xab\x35\x95\xe9\x0a\x88\xb9\xf5\xdf\x3e\xc7\x70\x59\x7e\x87\x9c\x08\x1a\xe3\xa1\x80\x33\x85\x1c\x2e\x08\x44\xd4\x4f\x28\xd6\x09\x10\xe2\xf2\x0e\x9d\xfe\xd1\x0b\x45\x94\x80\x10\x38\x66\x72\x57\x0e\x79\x52\x6b\x1f\x62\x46\x2d\x6a\x7c\xfc\xb8\x0c\x89\x37\x56\x33\xfd\x71\x0f\x0d\x09\x8a\x44\x08\x28\x13\xc0\x41\xcb\x30\xdf\x89\x48\x19\x51\x47\xb1\x6e\xa8\x51\xdd\x3d\xf8\x26\x81\x4e\x26\x8d\xd6\x96\x1f\x5e\x55\x52\x29\xc0\x77\xaf\x73\xfb\xf3\x6f\xb4\xe0\xf8\xfb\x50\xb6\xf0\x45\xdb\x99\x97\x25\x1a\x6e\x15\xa0\x2e\xc1\xb2\x06\x21\x83\x60\xfc\xf2\xf3\xe6\xbe\x99\x38\x34\xeb\x0e\x7f\x5d\x8c\xc2\x3a\x18\x01\x6d\x16\xdf\xa6\xbf\x93\x6a\xff\x57\x58\x83\xff\xc2\x9f\xc6\xef\xd3\x5f\x20\xac\xb4\x93\xc3\xc4\xef\x28\x45\xd3\xdf\xb1\x42\x17\x7e\xb3\x25\xb2\x9c\x35\x58\x88\x2c\xc8\x81\x36\xf9\xf9\xdc\x34\x8c\xfd\xa6\xb9\x46\xed\xff\x96\xe2\x13\x61\xac\x45\xec\xbc\x61\x7e\x93\xcf\x3d\xa1\x67\x2d\x03\x80\x9d\x5a\x0c\xdb\x76\x63\x68\x6d\x48\xfb\x9f\xd4\x1c\x3c\x17\xf7\xaa\x23\xdb\x96\x08\xcf\x2c\x29\x0a\x1c\x73\x4b\x5e\xcb\x15\x20\xc8\x1a\x6e\xb9\x2d\x51\x50\x8b\x2b\x58\xc2\x62\x2a\x52\x3c\x27\xf7\x42\x16\xce\x07\xfa\x12\x59\xd8\x99\xee\x9c\x7a\xa1\x6d\xba\x8c\xf4\xa1\xa9\x5f\x27\xc6\xae\x60\x3b\x6d\x19\x42\xe9\x75\x5c\xbe\xf4\x31\x50\x04\x0d\x8c\x8e\x0b\x47\x48\x44\xd8\xa1\x36\x6f\xd5\xb7\xbd\x81\x99\x1c\xdf\x3f\xdf\x9e\x2d\x7c\xd5\xa6\x11\xbe\x8c\x04\x29\x7f\x75\xdb\x8e\xf6\x09\x8d\xb7\x91\x26\x46\xc3\x35\x13\x90\x22\x93\xeb\x73\xd3\xd4\x64\x38\x41\x39\x71\x30\xcb\x09\x37\x60\x0f\xe8\xd2\x30\x58\xe6\x43\x6f\x00\x40\x73\x78\x34\xba\xd6\x2d\x75\xcc\xe2\x06\x0e\x36\xe0\x87\xd3\xf5\x7c\xc0\x92\x1e\x05\x4a\xc0\xbe\x12\xff\x71\x28\xbd\x3a\xef\xa1\x04\x85\x73\xa8\xae\xbe\x00\x0d\xcd\x36\x1c\x9e\x1e\xdb\x87\x6f\x94\x99\x1c\xc6\x4e\x2a\x11\xb7\xc7\x9a\x4d\x0b\x56\x88\xce\x20\x74\xed\x06\x3f\xd0\xc9\x7e\x7a\x92\x86\x58\x3d\x09\x7b\x6f\xfe\x01\x01\xbc\xe1\x21\x2f\x96\x7b\x86\x44\x8c\x31\xb0\x3a\x3e\x1c\x32\x96\x97\x36\x13\xa8\x88\xfc\x0f\xe9\x37\x5a\x5f\xf4\x30\xfa\x11\x60\x86\xef\xda\xea\x40\xeb\x16\x05\x0e\xca\x7d\x54\x95\x89\x00\xdd\x24\x9a\x26\xf7\x50\x4a\xb6\x4e\xce\x7f\xdf\x7e\xfc\xfb\x63\x4a\xd9\x43\xad\xcf\x75\x75\xfa\xb1\x1a\x75\xf6\x10\x47\x93\x2e\xc4\x5e\x3d\x09\xc4\x03\x73\x89\x88\xea\x58\x30\x25\xc9\xcc\xf7\x69\xdc\x3e\x17\xdf\x23\x8c\xe1\xd3\xd7\xa2\x85\x11\x21\x59\xcc\xb1\x2f\xe7\x0c\xbd\x17\x42\x68\x60\x07\xae\x0a\x9d\xb8\x5f\xd1\xc8\x45\x30\x1a\x77\x66\x0b\xad\x05\x00\x31\xb8\xad\xb7\xe3\x53\xfe\x2a\xc0\xd4\xa8\x72\xca\xd0\x03\x1b\x51\x7a\x02\x66\xce\x7f\xbe\x29\x42\x26\x3e\xbd\x83\xa5\xa4\x44\x66\xf9\xe6\x69\xf3\xb9\x5f\x2d\x0a\x3d\xe1\x68\xc6\x03\x39\xdf\xe6\x4e\x64\x26\x15\xb8\xe3\x7a\x57\x02\xd9\xb4\x82\xbf\xdc\x12\xbd\x4f\xa8\x35\x00\x92\xca\x83\x73\x77\x83\xd7\x9f\xe9\xb8\x25\x7d\x9e\x69\x6b\x74\x91\x68\x17\x2e\xe7\x70\x7e\x39\xd7\x35\x1e\x5f\x32\x13\x16\xe7\xab\x1f\xaa\x49\x78\xdd\xf9\x8f\xac\xb7\xdd\xe3\xa4\x2e\x12\x29\xb2\x88\x10\x90\xe7\x13\x3f\xc2\x26\xec\x36\xe1\x27\xf7\x23\x3b\xe9\x9f\x1f\xb7\x30\x81\x7e\x10\xf4\x42\x3c\x8c\xa3\x63\x63\x3c\x07\xa4\xcf\x9f\x6d\x88\xa5\x6f\xb1\x0f\xa5\xb5\x88\x29\x3c\x9a\x6d\x66\x99\x6e\x74\x01\x6e\xa8\xc0\xbc\x34\x2e\x27\xb4\xdf\x5e\x06\x90\x23\x64\xd5\x25\xd0\x2f\xf7\xfb\xb8\x43\xbe\xe4\x4f\x21\x29\xf8\x92\x3d\xa9\x90\xea\x76\x24\x4f\x6b\xfd\xab\x2f\xf6\x0a\xb6\xc6\x31\xdb\x57\x38\x1e\xc0\xc6\xd8\x38\x48\x98\x48\xe8\x24\x76\x9b\xa5\x5d\xb1\x6e\xdc\x6d\x51\x15\x73\xe2\x9c\xfb\x04\xaa\x21\x7c\xcb\x34\x3d\x93\x6c\x08\xde\xf2\x15\x70\x44\x4b\x48\x4a\x40\x08\xeb\x88\xa8\xfd\xde\x7d\xbb\x1c\xf8\x65\x0e\x9f\x83\xb5\xdc\xfe\xe2\x6e\x50\x55\x5b\x18\x7d\x96\x2f\xb5\x8a\x3d\x34\x8d\xe2\x73\x7c\xdd\x4a\xe7\x50\x75\xab\xa9\x70\x21\x68\xc9\xbb\xdc\x66\x05\xe3\xed\x26\x7e\x10\x8e\xce\x3f\x14\xcb\x7d\x22\x32\xf9\xea\x5f\x7b\x1b\x06\xb8\xa1\x7c\x40\x53\x26\x99\x0a\xf0\xd9\xed\xc2\x9c\x2d\xbc\x06\xf8\x8b\x84\x1f\x3a\x8c\xd9\x02\xe7\x32\x47\x56\x0c\xc1\x40\x00\x7e\xf4\xbd\x4d\x33\xc6\x29\xa4\x63\x90\xc2\x46\x22\x40\xa5\x73\x86\xb6\x60\x2e\xcb\x8b\x0b\x6a\xed\x5f\xb6\x19\xf9\x6e\xf3\x2d\x9b\x1b\xf8\x8e\x0b\x0b\x51\xf4\x4b\x68\x2f\x10\x2d\x56\xfe\x38\xed\x79\x34\x7a\x7c\xed\x71\x74\x9c\xc3\x39\xad\xa1\xbd\xef\x68\x72\xc1\x5f\xd6\x70\x30\x07\x3d\x6f\x67\x28\xb2\x03\x23\x79\x95\x9f\x39\xd0\x31\x62\x54\x5a\x70\x46\x09\xae\x75\x8b\x68\x06\x94\xe7\x18\xbf\x2a\xf9\x2e\xd2\x41\xab\x01\x48\xb9\x05\xce\xe4\x13\xdd\x03\x4a\x7c\x92\xa2\xa8\xb4\xf4\x56\x00\x8f\xb2\xbd\x42\x5a\x03\xe6\x3a\x2e\x16\xe2\xd8\x98\xfd\x46\xc7\x3b\xce\xe5\x16\xc5\xb3\x26\x84\x40\xf4\x2e\x80\x5c\x42\xba\x63\x71\xf3\x43\x09\x1a\x84\x76\xc6\x97\xb9\x39\xe0\xbc\xd1\x37\xb7\x45\x32\x1c\x13\x3d\x60\x5b\xa8\x9f\x09\xa6\x52\x97\x8c\xdc\x64\x40\xe2\x19\xee\x49\xee\x03\x37\xe0\x2b\x0d\xcb\x6c\x50\xb5\x08\xa8\xa0\xbf\x57\x60\xae\x76\x54\x3d\x1e\x40\xe1\x60\x72\xb6\x03\x46\x26\x19\x99\xd7\x11\x0d\xd7\x58\x62\x29\x80\xb8\xa4\xe5\x2f\xf3\x08\xd5\x22\x1c\xb5\xe2\xd6\x66\x6a\x25\x2a\xf5\x43\x0a\x7c\xd9\x99\x62\xb7\xde\x47\x65\x45\xd5\xcd\x03\x86\x86\x47\x38\x86\xd8\x0c\x18\x6c\x68\xe9\xa6\xe4\x0c\xf8\x6b\x9b\xde\x15\xfa\x63\x1d\xf5\x7a\x4e\xc4\x89\xec\x7b\xe4\xd7\x10\xc1\xa8\x16\x0d\x09\x9e\xca\x7d\x4f\x63\xf4\xa4\x4a\x2a\xf0\x2a\x5f\x96\xf9\x29\x94\x7d\xa1\x75\x21\x8a\x3e\xc2\x2a\xc2\x6f\x1d\x16\x12\x9a\x50\xdb\x98\xc6\x86\x27\x8e\x04\xed\x08\xf0\x8a\x0d\x0f\xfe\x2b\xc1\xb3\x40\x30\x95\x63\x0a\x59\x8c\xfd\x6a\x0a\xa1\x1b\x67\xcc\x72\xa8\x8a\xa1\x23\x65\xa6\x66\x41\x27\x21\xff\x73\x12\xa7\xc1\xa2\x3e\x51\x40\x1f\xf8\x15\xbd\x79\x3b\xb5\x2d\x1f\x48\x72\x33\xb4\x2c\x41\xf2\x15\x88\x17\x29\xb5\x7b\x3b\x4b\x0a\x17\xe8\x95\x57\x30\x90\xed\xda\x9e\x42\x5c\xa0\x8a\xd8\xc4\x55\x6b\xca\xa5\x4a\x5b\x53\x8e\xb4\xa8\xc3\x6e\xca\xa2\x8b\xd3\xc4\x1a\x01\x2c\x11\xc4\x91\x03\x03\xd7\x37\x63\xd5\xda\x70\xe5\x8c\x12\x2f\xa3\x41\xa1\x6d\xd7\xce\xd8\x19\x28\x8d\x75\x93\x04\x49\x6c\x50\xe7\xac\x19\xed\xa3\x6e\x57\xfc\x06\xc2\x40\xd8\x0e\x69\xf6\x3c\xbb\xc9\xd6\x3f\xe5\x21\x53\x39\x43\x57\xbc\x8a\x76\xe4\x29\x48\x88\x0d\xca\x14\x0f\x2a\x65\x84\x4b\xfa\x87\xe0\x94\x2f\x9f\x09\x79\xe1\x37\x60\x57\x8e\x3a\xde\xd2\x45\xd9\x7e\x3a\x00\xc6\x13\xbb\x1b\x0f\xd3\x0a\x41\x08\x3d\x8d\x80\x2a\xc2\x09\xbf\xba\xb2\x8b\x83\xe6\x22\x19\xb4\x7d\x48\xe1\xf9\x60\x6f\x5d\x90\xa8\xe9\x4d\xed\x6b\x20\x60\x34\xfd\x1d\x6e\xfa\x91\xfe\x51\x13\xe2\xdc\xdb\xe1\x33\x1e\x00\x18\x27\x0a\x56\x5c\xbb\x80\x13\x58\xa7\xdd\x5b\x41\x39\xb6\x9e\x64\x9b\x10\x70\xb0\xb6\x56\xa0\x6d\xf6\xc2\xfb\x09\x12\x62\x5b\x3e\x28\xa6\x61\x5d\x5e\x22\xe8\xc2\x45\x31\xb0\xb8\xeb\x69\x93\xfb\xa6\x07\x6b\xcf\x2e\xbc\x8d\xed\xa3\xec\x15\xfe\x55\x44\x19\xb4\x31\xfa\x52\x39\x3a\x69\x29\x50\x85\xa0\xb0\x44\x2c\x3b\xda\x4c\xa7\x50\x01\x64\xc9\x6d\x27\xe1\xa3\xb4\xe8\xaf\xb4\x34\xa7\xb8\x4d\x0a\x60\x93\x25\x4c\x19\x72\x9d\x47\x02\x35\x94\xc3\x8a\xa1\x30\xed\x02\x06\xd4\xad\xdd\xa7\x6a\xa4\x4a\xf5\x74\x7f\x73\x48\x8f\xd8\xdd\xaf\x29\x6a\x7a\x3a\x44\x2c\xcc\xd3\xa5\x7b\x21\xbf\x74\x49\x5f\x14\x17\x97\x49\x1d\x88\x23\x21\xc3\x61\x43\x48\xe3\xbf\xbf\x7b\xa6\xcf\xa5\xab\x78\x99\x16\xbd\x51\x95\x39\x74\x2b\x87\x26\x62\x61\xbe\x2c\xd8\x2a\x22\xa9\xea\x65\xe8\x8f\x4a\x1f\xfb\xf7\xa0\x74\x86\xdd\x40\xf8\xd7\xf7\xbd\x34\x34\xce\x17\x27\xa2\x7f\x45\xb4\x82\x3f\xfc\x2e\xd8\xc2\xd4\xcf\x57\xfe\xaf\x33\x64\x7b\x2f\x98\x34\x62\xfb\x33\xd0\x58\xed\x73\xf9\xb5\xf9\x6c\xb1\x10\xf1\x01\xc8\x49\x68\x13\x76\xad\xb7\x35\x44\x14\xc7\x2c\x7c\xfa\xc6\xbe\xc0\xd9\x58\x1f\x17\xc0\x64\x30\xb8\x37\x8b\x3e\x61\x74\xc8\x66\x2b\x91\x0d\x94\x81\xe6\x1d\x93\x3b\x36\x03\x25\xf6\x41\xba\x7f\x45\x10\x6f\xcd\x14\xce\x18\x28\xae\xc9\x4c\xef\xc3\xf3\x79\xd0\x12\x82\x32\x96\x88\x76\xbb\x2d\xb5\x6a\x8b\xa6\x8d\x16\x33\x0a\x43\x93\x2f\x3d\x7c\x20\x4a\xe0\x7c\x01\x8b\xba\xf0\x91\xa8\x65\x18\x78\x88\xc7\x97\xa6\x7f\xc4\xcb\x84\x05\x8f\xa5\x9a\xf4\xc0\x48\xdb\x50\x08\x9b\xd9\x9e\x69\x93\x66\xc7\x8a\x7b\xd9\x23\xeb\xfc\xc3\x4c\x42\x1a\x1f\x1d\x51\x88\x1f\xbb\x61\x1f\x40\xcd\x6c\x4c\x5a\xdf\xa5\x0f\xb3\xc9\xb7\x65\x22\x44\x66\xd8\xe4\x9c\x88\x17\xeb\xdd\xce\x56\x6e\xf0\xdb\xe8\x2a\x93\x88\x33\x9d\xe3\xf3\x41\x3d\x14\xbf\x15\xdb\xea\x24\xb1\x05\x88\x40\xfa\x1e\x01\x95\x09\x21\x47\x7f\x8b\x9d\x73\xcb\x55\x92\x77\x68\x03\xbe\x41\x87\x9b\xa4\x25\x45\xc3\x89\x1c\x18\x9a\x12\xdb\x78\xcf\xa4\x57\x5a\xf2\x4f\xf8\x16\xae\xe0\x08\xbd\x22\x59\x22\xf7\xa5\x77\x40\x4d\xdc\x38\xe7\xfa\x22\x38\x95\xe2\x73\x01\x40\xf3\x3c\x39\x3c\x25\xf6\x9c\xc2\xae\xc2\x29\xb5\x8a\x87\xb1\xaa\x7d\xa8\x9f\x3b\x17\x58\xdd\x59\xbf\x11\x92\x21\x58\xa6\x7d\x57\x37\xcc\x13\xc6\x97\xc7\xa0\xf7\xf8\x88\x75\xbb\xca\x2f\xe3\x1c\x3d\x91\xb5\x3a\x98\x7c\x49\x92\x44\x58\xaa\xe3\x42\xff\xb2\xe1\x5c\x0f\xbb\x75\x89\xcc\x38\x99\x85\x1e\x27\xfc\x91\x3c\xc0\x93\xec\x01\xd7\x5b\x8e\x33\xe0\x22\x7b\xa2\x03\xfc\x76\x2a\xc7\x97\x79\x45\xb5\xcb\x73\x43\x0c\x17\xad\x80\x7b\x85\x50\x37\x40\xab\x10\x34\x33\x06\x58\x04\x72\xe8\x21\x1f\xf2\x62\x09\xc2\x11\x36\x7b\xfe\xc7\xa4\x91\xe0\x9b\x3a\x67\xf4\xe6\xb3\x0a\xb4\x61\x4d\x4f\xd4\x6e\x07\xd5\xd8\x90\x2f\xdb\xe2\x70\x4a\x3a\xa8\x80\x74\xb8\x30\x1a\x0f\x1c\x8f\x50\x37\xa1\x8f\xcb\x11\xdc\xf1\x63\xa4\x15\xac\x6b\x3a\x13\xf1\x7d\x7c\xd5\x33\xe5\x84\x51\x91\x31\x1b\x0d\x63\xdd\x12\xa3\xe5\x05\x1a\xc6\xfd\x87\x87\xe6\x10\x1e\x43\xb9\x3d\xc1\x50\x38\x30\x34\x85\x0e\x8e\x87\x3d\xa1\x1d\x51\x9a\x87\x25\xbb\x7e\x68\x5e\x54\x53\x03\xf8\x4a\x2a\x25\x4b\xf8\x52\x00\x2f\x73\x0c\xf2\xc6\x9f\x32\x68\xe5\xd2\xb4\x73\x3f\x8d\xd1\xdd\xb8\x1e\xc4\xd2\xac\x45\xc7\xd1\x1f\x9f\x61\xeb\x3d\xc3\x81\x0c\x85\x7f\x4f\x52\x5f\x76\xb8\x56\xa9\x50\x46\x36\xf0\xbd\xa4\x7b\xe2\x4c\x5a\xdb\x8e\xa5\x0d\x25\x94\x0f\x5d\xa4\x31\xe1\x71\x20\x9d\xbb\x53\x71\xcf\xa4\x9f\x04\x13\x3e\xee\xa1\xdb\x7f\x58\xb0\xdf\x5a\x28\xbe\x7e\xff\x09\x2f\x04\xeb\x0a\x7d\x93\x02\x31\xf1\x49\x2a\x26\xc1\x96\xe3\x96\x8b\xb9\x4d\x3a\xf4\xda\x6d\x69\xbc\x9f\x21\x7e\x61\x41\xda\x02\x91\x33\xbb\x14\x99\x87\xd3\x21\x4b\x8e\xe9\x49\xdd\x08\x4a\x7a\xed\x67\xe3\xf2\xff\x6f\x41\x78\xce\x8d\xd0\x28\xb7\xf5\x48\x4b\xf5\xe5\xbf\xb6\x02\xf9\x4e\xc6\x15\x43\x02\x8f\x7e\x05\x45\x10\xa4\x63\xe3\x80\xe9\x29\xb8\xb4\x4c\xa8\x95\x42\x68\xc8\x51\x6c\xd2\x2f\x80\x8b\x3b\x73\x74\xe1\xf6\xe8\x65\x97\xe8\xa4\x6a\x83\x71\x9a\x04\x15\x82\xdb\x0f\x5f\xb7\x07\x2d\x7c\xe7\x0f\x2f\x29\xd3\xa8\x43\x01\xef\x04\x44\x8d\xb4\x4b\x56\x57\x8a\x12\x1c\x08\xc8\x00\x2c\x34\x30\x9d\xf0\x55\x3e\xff\x24\x40\x7c\x0f\xca\x69\xb8\x70\x8a\x1e\x2d\x37\xf3\xa8\x3a\x19\x6e\xe4\x1f\x78\x1b\x28\xc3\x6e\xa3\x97\x9d\xf7\x12\x36\x11\x65\xbb\x57\x61\x30\x03\xb0\xe9\x89\x9f\x43\x01\xcc\xa0\xfe\x26\xc5\x39\x93\x1e\x3d\xe9\x7d\x61\x5b\xdd\x85\x34\x02\x89\x8f\xc3\x97\x4b\x46\xc5\xc1\x42\xda\xbf\xa4\xaa\xa9\x67\xf1\x76\xef\xe6\x4a\x98\x05\x6d\x45\x00\x5d\xf6\xe0\xf1\x9e\x02\x18\xc8\x83\x29\x6c\x62\x9d\xda\xb8\x4b\x57\xe5\xd6\x33\x77\x68\x50\x05\x06\xa5\xbf\x2c\x19\x91\x49\x52\x0a\x16\x50\x07\x57\x87\xbf\x2e\x4e\x9d\xf7\xd7\xd8\x09\xc0\xf3\xaa\x9e\x22\x6f\x97\x4d\x73\x8c\x75\xa2\xf3\xd6\x3e\x56\x9e\x8b\x85\x2d\xf1\xf5\x3b\x32\x28\x5e\x37\xe8\xaf\xfa\x76\xaa\x6e\x78\x3b\xd7\xae\x19\x76\x59\x5a\x3c\x63\xdb\xea\x4b\xf8\xb9\xc3\x15\x3a\x3f\x83\x16\x2a\xf7\x61\x3b\x25\x57\x0a\xb4\x51\xd6\xbe\x2c\x61\x5a\xe0\x7e\xf9\xa1\x43\x39\xa7\xee\x27\x3d\x84\x44\xca\x4b\x44\x2d\xd3\x62\xaf\x72\xc5\xb0\x06\xa4\x5d\x5c\x5f\x70\x41\x05\x72\xb2\x21\x29\x8c\x5b\x86\x14\x9f\x8b\xc5\x4f\x7e\x95\x73\x2a\x22\x5e\x85\x5f\x1b\x50\xa9\xf6\xd3\xcb\xff\x42\x46\xc8\xad\x0d\xc3\x60\x26\xaf\x9c\x95\xc3\x15\x45\xf2\x49\x79\x42\xd5\x73\x8f\x4b\x93\x89\xcd\xab\x76\xf4\x36\xce\x36\xee\x1d\xb8\x4b\x52\xcb\xad\x4f\x44\x02\x45\x1c\x48\xac\xb6\x6a\x14\xb5\x5a\xcc\x84\x3c\x4d\xfc\x09\x40\x6b\x84\x76\x10\x3a\xd9\xe3\x8f\x9d\x96\xdd\x78\xea\x71\x6c\x40\x0e\xbc\xd2\x15\x1d\x03\xb3\xf7\x1d\x53\xef\x99\x2a\x36\x0b\xef\xf0\xb4\xbc\x12\xed\x87\xa2\xfe\xb0\x27\xe7\x06\xb3\xce\x62\x93\xbb\x30\xe0\xb1\x19\x89\xcc\x57\xa6\x3f\xcd\x23\x21\x60\xf3\x11\x67\x12\x9a\x23\xfc\x9c\x09\x4e\xa4\x1b\xa9\xbb\x7b\x7a\x9d\x53\xe1\x77\x3f\xab\xa3\xed\x9e\x42\xad\x25\x8c\xd9\xad\xdf\x12\x1c\xea\x5c\x04\x96\xfa\xc2\xca\x28\xf6\x70\xfc\x7d\x67\xa2\xa1\xfe\xa9\xe8\x75\xd3\x61\x5d\xdc\x95\x1f\x0c\xea\x0e\x47\x2e\xd7\x51\xfe\xaf\xcb\x8a\xef\xce\xef\xb1\xaa\xd9\x1c\x20\xcc\x56\x0c\x53\xd9\x02\x57\xb4\xe4\x3e\xf6\x7e\x7b\xac\x83\xcd\x50\x75\x62\x26\x27\xc2\x56\xd4\x10\x5d\x45\xf9\xf6\xe9\x16\x73\x80\x56\x8d\x6b\x31\xf3\x15\x00\xf2\xf4\x51\x3d\xa0\x3b\x47\x80\x9d\xfa\xea\x23\x00\xa9\xe5\x49\x3a\x39\x4f\xac\x3b\xb5\xf1\x8e\x90\x0e\x01\xa6\xa9\x0d\x38\xd3\x96\xeb\xfb\x54\x9c\x7a\xdc\x88\xa6\x25\x3c\x60\xda\x54\x55\x69\x54\xd6\x46\xf5\x5f\x7d\xd6\x86\x59\xbb\x5e\xa5\xbc\x95\xdc\xcd\x5d\x9b\xc6\xbb\x12\x40\x90\xf8\xe5\x90\x4a\xf7\xe1\x04\x56\xda\x01\x10\xaa\xa5\x6e\xe2\x81\x76\xc9\xa9\x60\xb9\x05\x73\x26\x41\x8a\x96\x73\x69\xc7\x2f\xd1\x19\xfa\x32\xc8\x93\x05\x4c\xe0\xc0\x1e\xd9\xd5\x12\x6d\x01\x62\x34\x98\xcc\x3d\x28\x47\xd3\x8e\xab\x70\x2e\x9b\xc0\xdc\x36\x2c\x7f\xfb\xde\xac\x71\xcd\x02\x52\x64\x1f\x15\xc7\x25\xc1\xe9\x57\x71\x44\x28\x4d\x43\x4b\x13\x48\x5c\xb6\xc2\xfa\x54\x5b\x0d\x00\x8c\xae\xc4\x8c\x76\x2d\xd5\x64\xca\xc3\x4a\x39\x06\x2a\xa2\xf5\x1c\x34\xac\x58\xf6\x88\xf5\x87\x6b\x4c\x5b\x0b\x68\x8f\x04\x6f\x15\x4b\x07\x08\xf3\x81\xd2\x97\x16\x12\x56\x14\x0a\x03\xb5\x74\xf6\x35\xcc\x39\x27\xf9\xb3\x20\xf0\xeb\x35\x55\xa5\xb1\x15\xe3\x1b\x91\x49\x21\x55\x05\x6a\x60\xab\x67\x68\x1d\xa0\x58\xba\xf3\x54\x87\x52\x7c\xa2\xe8\xb0\x51\xc9\xd1\xdc\x8b\x2c\xd0\x2d\x08\x73\xf8\x15\x17\x51\xc2\xc8\x2e\x6f\xb3\x56\xe0\x18\x4a\x17\xf2\x6c\x84\xb6\x99\xef\x4b\xc7\x3d\x9f\x82\x4e\xcd\xe8\x62\x7a\x42\x6e\x61\x98\xa9\xc2\x35\x69\xba\x7c\x2e\xb1\x12\xb0\x12\x07\xb4\xc3\x29\x4b\x1e\xbb\x2d\x1d\x14\xef\xb0\xaa\xa4\x1f\x22\xc9\xbc\x69\x4d\x76\xe7\xd3\xa4\xe5\x1f\xca\x3d\xb4\x5e\xf9\xc8\xa3\x88\x88\x22\xbf\xe3\x11\x42\x62\x7e\x51\xdc\x71\x31\x0b\x21\xea\xcf\xbc\x27\xbc\x85\x22\xee\xb2\x7f\x23\x94\xda\x50\xc8\x84\xec\xa8\xcd\xa7\xf9\x67\x01\x7d\x92\xa4\x9b\x43\x47\x96\x7c\x87\x71\x53\x7f\x01\xaf\x5d\xfd\x46\x9f\x03\xb1\xbd\x1c\xce\x3c\xc5\xee\x3f\xf6\x79\x6e\xde\xc7\xc2\x41\x59\xda\x28\x76\xab\x6c\x55\x66\xaf\xd0\xc1\x93\xc8\x9b\x2f\x0d\xf6\x96\x8f\xe2\x21\x78\x48\x3a\x47\x10\xc9\xd2\xec\xb7\xd9\x4b\x71\x9c\xcf\xe9\x37\xf9\x72\xda\x3d\x21\x8f\x17\xfb\x0d\x13\x51\x49\xcb\x90\xdd\xbc\x03\x58\x19\xc9\xd5\xa0\x84\xee\xe9\x32\x88\x6e\xab\x14\x70\x8e\xdb\xe2\xc5\xfe\x32\xc7\x04\x41\xe0\xc6\x86\xdd\x36\x33\x23\x5f\xc6\xd7\x48\x24\x10\xba\xdb\x39\xdc\x71\xbd\x05\x91\x29\x8f\x31\xf5\x73\xb9\x1c\xd7\xc2\xf1\x9f\x54\x0e\x30\xe7\x26\x18\xd1\xfb\x52\xf0\x45\xf5\x58\x00\x61\x3b\x1f\xd4\xc1\xd9\xa1\xda\x45\xac\x92\xd4\x0c\x30\x95\x22\x98\xe6\xb4\xdf\x57\xc4\xcb\x19\x80\x8d\x97\x14\x5a\x20\x82\xb3\x4e\x21\x90\x03\xd7\x18\xcc\x61\x08\xd0\x39\x27\x30\xd6\x09\x54\x9a\x08\x13\x3a\x5b\x49\xde\x00\xb9\xe4\x01\xc6\x9a\x87\xf0\x4b\x02\x21\x51\x7a\x9f\x8e\x69\x73\x68\x20\x02\x93\xe4\x97\xc3\xd2\x36\x70\x47\xc9\x3d\x9b\xdd\x99\x41\x68\x10\x6f\x34\x42\xc9\x80\x22\xeb\xdb\x99\x2c\x86\xb9\xd0\x10\x6c\x1e\xff\x79\x6a\x34\xbb\xfd\xb7\xea\xd9\xf3\xb8\xf7\x3e\x64\xa9\x11\xa6\x25\x4e\xcc\x97\x55\x0b\xe4\x20\xfc\x12\xdb\x5f\x58\xf4\x2a\x64\x8a\x24\x53\x67\x59\xa4\xf5\x91\xf2\x8e\xd9\xe7\x2a\x67\x3d\xc7\x9a\x32\xee\x32\x87\xf7\xd8\x28\x26\xb8\x15\x0c\x76\x14\x25\x42\xa0\x9a\xcf\x49\x44\x11\xba\x8c\xa4\xa4\xa0\xb6\x42\x05\xc8\x39\xf5\x57\x8d\xe6\x50\x89\x63\xb3\x65\x81\x19\xb9\x51\xf9\x0f\xe2\x88\x90\xa7\xb4\x9e\x03\x3f\x75\x39\x89\xe8\xe6\x31\x61\x06\x35\xa7\xc9\x9e\x50\x9e\x85\x25\x50\x4b\x7b\x57\x43\x20\xc3\x73\xcc\x49\x11\x01\x4a\x0e\x7f\xf7\xa8\x5d\xd2\xcc\x09\xcf\x9c\x5c\x5b\x45\xbf\x7d\xf2\xd5\x91\xa3\x02\x9d\x40\x9e\x5d\x74\x18\x8c\x61\x3d\x09\x9b\xae\xbd\x10\xb8\xf4\x2f\xcf\xb8\x69\x1d\x26\x74\x52\x72\x16\xb3\x77\xef\xdd\xdc\x12\x84\x77\xd0\x44\xeb\x38\xc7\x35\x51\x90\x77\xc2\x08\xd3\x09\xed\xd4\x4a\x84\x65\x2a\x4d\xa3\x5c\xd3\xe6\xfb\xb2\xd2\x82\xe1\xb8\xd3\xcb\x30\xdb\x26\xb8\x09\x35\x14\x09\x06\xa2\x21\xe4\xd3\xac\x1d\xd5\x97\xc1\xee\xf4\x16\x35\x02\xbb\xce\x94\xfc\x19\x73\x03\xe4\x90\x87\x1d\x8f\x68\xbc\x14\x07\x1d\x52\x02\x9c\xb2\x9d\x61\x95\x41\x23\x0a\x4a\x3c\xf5\x29\x57\x0a\x2f\x2c\x65\xb9\x88\x32\xc9\x5b\x7d\x8a\x84\xc4\xa6\x3d\xba\x76\x53\x7e\x1e\xb1\xcd\x1a\xe8\x6c\xc0\xda\x42\x3d\x62\x72\xb7\xce\x9d\xe1\x95\x28\x60\xe6\x99\x0b\x05\x81\xa6\xb4\x63\xb1\x4b\x17\xd0\xc6\x0e\x1b\x0c\x59\xe8\x9e\xa0\x9a\xb7\x2a\x62\x95\x67\x4a\x85\xed\xa5\x73\x3f\x76\x04\x4f\xfa\xc3\x4e\xf4\x29\xd7\x61\x94\x62\x98\x80\x43\x93\xac\x51\x13\xfe\x4e\xaf\xab\xf8\x43\x07\x24\x96\x1f\x05\x18\x16\xe1\x58\x12\x3a\x5a\x5c\xdb\x5c\xd7\x03\xe8\xac\xac\xe0\xb6\xfd\xf1\x3f\x0d\xfd\x8d\xbf\x61\x9c\xfc\x73\x46\xac\xa4\xc8\x3f\xf4\x05\xda\xa9\x90\x79\x98\x72\x1f\xbe\x6d\xf9\x68\xff\x9e\x28\xe7\x73\x9c\xb4\x38\x11\x6a\xe9\xca\x47\xf7\x13\x05\xf5\xa3\xe4\xdb\x97\x90\x5a\x33\x27\xd9\xbf\x18\xef\x6b\xc8\x0f\xcc\x5c\x08\xfd\x85\x55\x9a\xef\x48\x81\xd3\xe2\xc9\xff\x02\x5f\xc3\x1d\xc0\xf4\x6d\x11\xb6\x59\x87\x2d\x3d\x0d\x86\xb0\xe6\x40\xa1\x26\xcb\xe1\xbd\xfe\xf2\x9b\xc6\xc9\xcb\x7b\x0f\x62\xbd\x84\x10\x83\xec\x92\x2a\x07\xbf\x3f\x9e\x29\xda\xaf\x48\xb4\xd5\x3b\x51\x59\xc0\x1b\xce\x4b\xab\xfa\x46\xb8\x65\x81\xfa\x4a\x5b\x34\xff\xd3\xf5\xfb\x1e\x9f\x75\x08\xb8\xe5\xcd\x4e\x9f\xc4\x35\xfc\x07\xc1\x60\x50\x8c\xe3\xdf\xe1\xbe\x44\xe7\x97\xc2\x92\xf3\x5f\x08\x03\x63\xa7\x06\x38\xd8\x78\xab\x3e\x49\xb7\x45\xa7\x44\x59\x16\x7e\x6c\x7e\x3e\xf3\x0d\xf4\xd2\xb2\x97\x07\x0f\x82\xb8\x8e\xf3\x0b\xde\x35\x06\x76\x05\xcb\xab\xbc\xb9\x20\x82\xba\xed\x3c\x3a\xba\x48\x6f\x34\xb9\x95\x27\xc5\x38\xfb\x24\x8a\xd6\x42\xdc\xd3\x61\xb2\xd6\x7a\x54\xa2\x02\x63\x3f\x0d\x4d\x47\xc7\x1f\x61\x00\x21\xcc\xd4\x02\xaa\x05\xcd\x32\xb6\x07\xbf\x81\x87\xab\xdb\x60\x1a\xef\x3b\xed\xae\x0e\x29\x22\x28\x38\x11\xc9\x85\xd1\x51\xfd\xcc\x40\xb0\xbc\x6d\x46\xbe\x44\x34\x3d\x4c\x13\x88\xd5\x3a\xc3\xac\x66\x70\xc7\x63\x85\xf8\xd5\x1e\x04\x92\xe9\x3a\x0e\xd0\x02\x23\x4e\x2c\xf0\x64\x73\xa0\xcd\xe6\x2c\x57\x01\x17\xf3\x06\x56\xfa\x27\x5a\xc8\x00\x39\x89\x1a\x0a\x3b\xa3\x61\xfc\x95\x38\xf8\x90\xbd\x9b\x42\x26\x71\x68\x39\x68\x80\xdd\x9e\xd4\xa0\x36\x20\x63\x3c\x97\xb6\xe4\x77\x7d\x10\x57\x56\x73\xb1\x21\xe7\x8b\x5b\x68\x9b\x11\x82\xa6\xf5\x03\x3c\xca\x0d\xab\xda\x96\xff\xbf\xb9\x01\x82\x46\x95\x4d\x14\x05\x74\xea\x90\xa2\x0f\x25\x9e\x01\xf5\xfb\x73\xd3\x11\x0d\xef\xb6\x01\x99\x01\x67\x05\x43\x9a\xa1\x9d\xca\xab\x91\x6e\x2d\xaa\x14\xff\x79\x03\x33\x7d\x89\x66\x25\xc1\xcf\xce\x85\x14\xe6\x9e\x95\x81\xfe\x2b\xe3\x85\xaa\x53\xe0\x9c\x2d\x7e\x4a\xbf\x29\x0b\xaf\x66\x7f\x84\xa4\xfc\xa2\x29\x40\xdb\x97\x2d\x52\xa0\xd6\x02\x10\x97\xd8\x41\xf7\x98\xe4\x6a\xd0\xec\x31\xaf\xcb\xea\x9e\xfa\x52\xaf\x6b\x5f\xc0\x0e\xd6\xde\xae\x96\x06\xfb\xe9\x1e\x94\x7b\xeb\x6e\x23\x09\xea\xf3\xc5\xc4\xa4\xb7\x3f\xf1\x8a\xe5\xa6\x3e\x7f\x01\x88\xa4\x1b\x95\xe3\x0d\x12\x4c\xec\x51\x04\xe6\xec\xd0\x42\x5f\x74\xef\xd3\x1e\x7e\x4c\xee\xaf\xa4\xbf\x51\x31\xdf\x8e\x7e\x7f\xf2\x13\x4d\x53\x48\x4a\xd9\x13\x4c\xd7\x24\xad\x7b\x3f\xcf\xd7\x68\xef\xf0\x5b\xec\x0b\x66\xfe\xb4\xc9\x8f\xb3\xca\xdd\x5d\x69\xa9\x18\x39\x38\x2d\x09\x6d\xb6\xe5\xc6\x37\x49\x6c\xf9\xa7\x93\x71\x62\xc1\xe3\xb9\x55\xb5\xa9\x7f\x4d\x29\x84\xb8\x61\x29\x91\xd2\x59\x70\xc9\x24\xde\xac\xb8\x21\x4c\xc4\x4d\xb5\x82\x85\x20\x95\xf6\x8e\xe2\x66\x3e\x27\xa8\x61\x28\x24\x6e\xc4\xb6\xb9\x25\xdc\x2e\x1d\x8d\x70\x76\x36\x7f\x64\xa9\x14\xa4\x0b\x41\xea\xe4\xcd\xe4\x31\xea\x20\x4e\xd0\x60\x4b\x47\xd8\xb0\x7f\x4a\x11\xe2\xe9\x05\x0c\x59\x30\x0d\x53\x08\x0a\x03\x60\xc7\x5b\xeb\x89\xe4\x59\x68\xa6\x67\x9e\x79\x2a\xee\x42\xc8\xf7\x58\xf7\xda\x9f\x01\xb6\x9b\xa3\xb2\xf3\xb4\xf9\x1a\x13\x33\x20\x6c\xd3\xfe\x75\xb5\x08\xc4\xa3\x71\x94\x8d\xab\xcf\x2c\x04\xde\x39\xc5\xca\x50\xbe\x50\x7b\x21\xee\xee\xb8\x55\xf9\xa0\x5b\x45\x8b\x37\xc1\xe8\xfe\xad\x10\x06\x26\xf4\xed\x9f\x97\x85\x17\x02\xd5\xfe\xa9\x47\xa5\x9e\x15\x41\x73\xff\xda\x29\x34\x1b\xb2\xd6\x72\x41\x69\x6b\x2b\x7b\xcc\x21\x7c\x5c\xda\x81\x57\xd4\xc3\x5d\xa2\x2c\x3d\x5d\x59\x77\xb6\x1f\x73\x09\x1e\x75\x76\x07\x58\xa9\x5b\xfd\xef\xcc\xd5\x05\x03\xc6\x71\xfa\xbe\x75\x8c\x9a\x45\x5e\x36\x4a\x7c\x02\xdf\x00\xc1\x2e\x7f\x1b\xcd\xa5\xfc\x8f\x9c\x1b\x94\x09\x84\x57\x77\x10\x5f\xd2\xe2\x55\x34\x14\x4e\x4a\x44\xf3\x2c\xaf\x9c\x3c\x91\x29\x99\xa7\x7d\xe5\xdd\x26\x54\x9f\xb4\x69\xd9\x0a\x4d\x50\x4b\xb1\x78\xcc\x90\x7d\xd3\xf7\x5e\x1e\x45\xc8\x1e\x32\xfb\xe4\xb7\xf5\x59\x66\x16\x67\x29\x7b\x17\xe8\x37\x5e\x86\xe2\xac\x4f\x64\x81\xfa\x73\x5e\x5d\x07\x7d\xd1\x23\xb4\xb1\xe6\x06\xcc\x50\x85\x31\x09\xe8\x4e\x62\x4b\xba\x73\xdc\x45\x7d\xe9\xb6\x58\x16\xe9\x7c\xe9\x8d\x6f\xc7\x82\x8a\x43\xd6\x60\xf7\x4b\x24\x4c\xb7\xb9\x44\xba\xf0\x83\xdb\x4c\x1b\xac\x29\x96\xba\xbb\x32\xca\x95\xea\xfd\xd6\xb6\x6a\x60\x74\x44\x5e\x00\x28\x78\x26\x69\x56\xed\x5b\xf6\xe5\x83\x1b\x1e\x08\x18\xfe\x5c\xf1\x85\xe1\x08\xe3\x50\x41\x0a\x55\x0b\x18\x38\x43\xce\x25\x1a\xbd\x4b\x55\xf6\xfa\xea\xea\x9f\xa5\x77\xb6\x94\x4f\xaf\x92\x5a\xf1\x65\x0b\x47\xab\x6e\x9c\x3f\xbc\x46\x94\xce\xd2\x17\xa2\xf4\x17\x16\x5b\xdd\x2f\xc5\xb6\xe8\x60\xd5\x8d\xac\xdd\x31\xac\x30\xc4\x40\x88\x5a\x1c\xd8\xb4\x2e\xcb\x89\x1e\xf2\x5a\x98\x06\xc5\x11\xa8\xc3\x26\xe7\x28\x74\x10\xef\x4e\xff\xd1\xdd\x0a\x08\xe3\x26\x63\x86\x2e\xdf\xe8\xc2\xd5\xe5\x04\x5a\x02\x09\x9d\x4f\xa5\x43\x66\x37\x42\xd5\x50\x0c\xe6\x8b\x80\xd3\x50\x70\x57\xc9\x58\x3a\x5d\x1e\xd9\x03\xf5\x62\x60\x49\x8a\xff\x71\x0e\x67\xac\xbe\x5f\xa8\x5f\x7a\xd0\xd8\xc4\xe4\xa9\x2f\xb7\xa1\x59\x62\xa1\x5d\xfa\xe3\xe6\x88\x9e\xa8\xb3\x59\x74\x17\x8e\x60\xf3\x12\x90\xca\x34\xd5\x82\xac\x4d\x26\x2c\xc5\xdd\xd0\x05\x75\x3f\xa9\x2f\x92\x29\x3a\xdf\xea\xec\xda\x0a\xe5\xaa\xa0\xfd\x76\xbb\xfc\x32\x63\x48\x13\x5e\x31\x4b\xcf\xc0\x6b\x50\xa7\x8a\x19\x60\x13\xb3\xb2\xee\xc0\x8a\xec\xaa\x4b\x0f\x21\x10\x7d\x5b\x45\xd8\x1d\x89\x18\x62\x5d\xce\xc8\x14\x71\xc8\x77\x1c\xfb\x27\x24\xac\x6c\xce\x04\x2b\x18\x07\xba\x15\xf7\x3d\x05\x2a\x31\x38\x97\x44\x28\x1e\x31\x3a\xfc\x17\xb0\x38\x4a\x14\x4b\xaa\x5e\x55\xc6\x4b\x9e\x60\xd1\xac\x8e\x00\x94\xf8\x22\x6a\x30\x80\x8e\xb7\x93\x58\xda\x14\xb9\x20\x8c\xde\xfc\x74\xeb\x39\x61\x21\x9f\x72\x1a\x57\xb8\xa0\x8a\x34\xc5\x6b\x0e\xc5\x6e\xd5\x6a\x39\x9f\x23\x30\x52\xb7\xba\xcb\x7c\x55\x6d\x2f\x5d\x8d\xb8\x42\xb3\x97\x33\xf4\x6b\xb8\x0a\x58\x1b\xe7\x3c\xcf\xb1\x13\x63\xbd\x0e\x7f\x86\x1f\xa7\xc5\xf9\x5c\xd7\x54\xbf\xa3\x49\xd5\x5f\x6d\x4e\xc1\xd9\x41\x4d\x0d\x5a\xc0\x33\xe3\xea\x92\x66\x93\xcf\xa6\x7c\xe9\x98\xd1\x24\xab\xd8\x38\xa2\x4b\x9b\xfe\x05\x3c\x73\x7c\x3e\xc3\xfd\x0d\xac\xd2\x70\xe8\x9a\xc2\x6c\xba\x0d\x1f\xba\xe1\xbc\xb8\xd1\x0f\x32\x9c\x22\xc7\x57\xe9\x31\x1d\xa7\x72\xb0\x77\xbd\x2c\xbe\x58\x38\x75\x32\xaa\xf5\x38\x6c\x91\xc1\xc8\x7d\xc5\xac\xc4\xe9\xb9\x4e\xfd\x81\x23\x7e\xf9\x70\xd9\x1b\x95\x2a\x4a\x2e\x40\x5d\xac\x9a\x75\x38\x7c\x3d\x1c\xcc\xe6\x55\xe6\xee\x3a\x0c\xec\x46\xfe\xe5\x24\x46\x5a\x56\x46\xac\x4e\x6b\x73\xe2\xb1\x7a\x93\xb9\x2a\x91\x10\x95\xeb\x5d\x67\xb1\xc6\xce\xd2\x8f\x45\x2b\xc4\x51\x89\xd6\x5c\x47\xba\x40\xb9\x2b\xd9\xa2\xf5\x2d\x34\x46\x8b\x19\x76\x13\x98\x45\x6c\xd0\xbf\x2d\xdd\x98\xcd\x2f\xf6\x1c\x16\xf9\x3e\xc9\x65\x4b\x5a\x1b\x18\xfc\x0f\x4a\xd6\x91\x48\x8c\x0f\x70\x05\x07\xfa\x9d\xff\xe4\x27\x76\x4f\x9b\x6d\x5a\xf1\xe2\x2e\xe2\x30\x89\xd3\xec\x25\x28\x86\xd4\x46\x10\x4b\xe0\x45\xa8\x28\x76\x2e\x81\x45\x85\xce\x98\x33\x3c\x6c\x80\x3e\x7c\xd6\x6e\x01\x5f\xb0\x9b\xdb\x7d\xba\xec\xb7\x24\x74\x04\xf0\x4b\xeb\xf1\xe8\x6f\x12\x2a\x63\xbd\xe0\x99\x96\x24\x93\x0c\xee\xf6\x67\x8e\xec\x64\x43\x25\xa0\xc6\x89\x5e\x0c\xb4\x95\xba\x4e\xe3\x8e\x05\xfa\x5b\x6c\x72\x59\x09\x4d\xcd\x14\x53\x38\x8b\x42\x94\x8c\x5f\x47\x80\x08\x95\xc9\x46\x09\x92\xd9\x73\x16\xa6\x8b\xcd\x94\x54\xed\x6a\x46\x96\x2b\x1b\x96\x41\x61\xab\x59\x9b\x02\x7e\xf2\x5e\x6f\x46\x00\xc3\xf5\xa2\x08\xcf\x70\x55\x1b\x77\x79\x2b\x59\x23\x7b\x4a\xd2\xdc\x20\xbb\x26\x9e\x64\x00\x3b\x2f\x6e\xf5\x20\x7a\xcd\xe5\x7d\xf2\x45\xf1\x77\x11\x64\xe9\x5d\x34\x39\x56\xbf\x9a\x3c\xbe\x0a\x45\xc7\x32\x2b\xdf\x36\xf5\xdd\xb1\x7a\xc3\x57\x3b\x41\x47\x53\x88\x9d\x7d\xf8\x55\xc0\x23\x04\x92\x17\x71\xdc\x0e\xb8\x73\xff\x88\x0a\x5d\x2e\x8a\xa9\x4d\xf6\xd3\xfe\x25\x21\x40\xe7\xc2\x49\x5b\x93\x5e\x3b\xbd\xe6\x34\xd5\x6a\x52\x57\x6f\x16\xac\x78\xb9\xe9\x8c\x1a\xb0\xed\xb4\x30\x7d\x5e\x48\x8c\x89\xb3\x7c\x6b\xe5\x27\x01\x19\xe5\x63\x6f\x12\x93\x2a\x1d\xc7\xc9\x99\xb8\x8b\xa0\xf1\x28\x4b\xdd\xc4\xbc\x4f\xee\x31\xe3\x70\x46\x88\x9c\x09\x1f\x95\x02\x36\x09\x93\x34\x74\xb2\x3e\xb9\x86\x9c\x5b\xb5\x1d\xdb\x84\xb2\x4f\x5b\x7f\x4d\x81\x24\x9d\xc3\x5b\x03\x7a\x6f\xae\x20\xf0\x20\x70\xf4\xf0\x49\x08\x21\xa4\x58\x9b\x76\xdd\x08\xfa\x39\xf1\xdc\x56\x24\x6b\x39\x60\xa2\xc7\x38\xc0\x55\x4a\x22\xa5\x68\x93\x5e\xec\xab\xc5\xc8\xcb\x6f\x42\x78\x9a\x4f\xb9\x57\xa4\xc9\x52\x48\x2c\x1a\x7a\x0c\x2c\x51\xc0\x0d\xb5\x90\x93\x26\x5a\x4b\x58\x1e\xb0\x89\xf0\x9e\x46\x25\x2f\xa9\xaf\xf2\x61\x2e\xe9\x4b\xd5\x27\x9e\x5b\x57\x89\x24\x76\x69\xed\xa3\xda\xb8\x06\xf0\x46\x0b\xf0\x6c\xf7\xe5\x11\x80\x2e\xa2\xda\xba\x61\x6f\x5c\x6c\x39\x74\x78\xa8\x1f\xac\x09\x8d\xb9\xc6\xd5\xfc\x05\x2d\x6d\x42\x8e\xd4\xa2\x8e\x7b\x9e\x7e\x69\xf8\x8f\x5e\x6a\x3e\xbb\xfd\x2b\x13\xf5\x79\xfe\x0a\x1f\xc8\xbf\xcc\x2e\xfe\x2f\x39\x5e\xcd\xe5\xcd\xd2\x4c\x41\x33\x4e\x79\x3f\x5b\x8b\x84\x04\x06\x69\xc4\x2e\x96\x12\x5b\x9b\x16\x53\x64\x6c\x84\xc2\x86\x23\x28\xed\xa9\x2d\x9f\x7d\xd4\xee\xc5\xa6\x8b\x5a\x36\x45\xe3\x29\x4d\x32\x9b\x88\x8b\x92\x89\x30\x55\xdc\x28\x94\xcc\x04\xd2\x1c\x67\xee\xb1\x07\x36\x50\x9c\x9d\x90\x99\xe8\x5a\xfc\x69\xb3\xfe\x59\xa7\x94\x47\xdd\xda\xd0\xc3\x62\x6a\xfa\xc9\x4d\xad\x09\x7f\x12\x45\x66\xc1\x0c\x64\x5f\x07\x67\xb5\xc2\x2d\x1f\xbe\xf7\x78\x04\x91\xd6\x86\x95\x7c\x25\xba\x7c\xbd\x51\x81\x9b\x35\xc5\x7d\x43\x14\x8b\x40\xc7\xbc\x75\x49\x58\x4b\xa1\x03\xde\x58\x8d\xf0\x2d\x7d\xd7\x1c\x3d\xe9\x3d\x19\x54\xae\xcb\xf9\x8e\xdd\x36\xe8\x5f\xed\x48\x84\xe5\x34\x06\x7e\xb2\xaa\x9a\xbe\x13\x52\x56\x9c\xa3\x8d\xda\xd6\x2d\x2a\xe9\x3f\x57\xb9\x18\x9d\x37\x29\xeb\x5c\x31\x8b\x55\xbc\xfc\x8d\x02\x51\xa6\x3d\x00\x8e\x30\x95\xaa\x10\xca\x73\x3c\x2a\xd6\x71\x3f\x4e\x1f\x22\x60\xce\xe6\x2b\xd5\x81\xae\xd4\x9d\x9c\x34\x99\x3b\x17\x20\x98\x43\xdd\x8c\x0f\x2b\x0d\xda\x18\xe5\x79\x9d\xca\x75\x83\x2d\x6d\xae\x9c\xaa\xc0\xd9\x19\x1a\x68\xd6\x21\xf8\x45\x3f\x17\xcc\x3e\xab\x9c\x19\x66\x74\xae\xac\x18\x7f\xe2\xbd\x47\xef\x6c\x32\x0a\xd9\x0b\xaf\xc6\x92\xbc\xce\xc9\x4f\x6f\x30\x81\x96\x93\xf4\x71\x01\x1e\x94\x65\xc6\x16\xc0\xc6\x73\x5b\xe0\xdc\xca\x50\xd8\x9e\x43\xda\xde\x87\x5d\xe1\x4e\xc5\x66\x8a\xa3\x31\x8a\x95\x33\xe0\x32\x57\xa9\x9f\xe1\xda\x1c\x04\x03\x6e\x79\x7d\x51\x76\xcd\x01\x98\x0c\x72\x1f\x32\x69\xe9\xa8\x1d\xda\xe1\x4e\xa2\x93\x5e\x41\x9c\x43\x8a\x2d\xad\xf9\x4f\x80\x4d\x9b\xe4\x28\xea\x32\xb5\x31\x50\x51\x70\x80\xa0\x74\x21\x0e\x00\xc5\x37\x97\x98\x38\xbc\x0c\xe9\x81\x34\xc1\x9c\x1f\xe3\x48\xd3\xe0\x45\xbb\x36\x99\x5b\xe1\x78\x63\x2b\x29\xf0\x93\x5a\x55\x3f\x28\xf5\xaa\x38\x2d\x91\xbc\xc1\xfa\x8d\x95\x02\xea\x94\x2d\xa1\xbe\x84\xb2\x37\xbd\xc7\x1d\x3d\x29\x13\x4b\x17\x21\x4b\xdb\x2b\x32\x09\xb4\x5f\xa8\x0b\x13\xbb\x88\x7a\x3e\xd1\x93\x59\x45\x81\xa3\xe5\xa7\x05\x12\x0f\xae\x70\xc2\x95\x26\x5f\xa4\x62\xd7\x7d\xa4\x00\xaa\x78\x83\x72\x68\xe9\x65\xbd\xed\x03\x08\xc7\x22\x0b\x10\x6a\x9a\x59\xca\x5a\xd8\xd8\x6e\xcf\xb7\x58\x63\xb6\xc1\x69\x8d\x42\x05\x94\xca\x42\x0c\xc2\x01\x90\x3c\x75\x36\x91\x2a\x12\xec\x8a\x07\x44\x28\x28\xcc\xbd\xf6\x80\x31\x9e\xee\x96\xee\x77\x12\x44\xcc\xbe\x90\x95\x8d\x04\x4b\x2c\x82\x93\xe3\xd9\x0b\x0f\xe9\x3a\x51\x92\xee\x3a\x9b\xb9\xea\xb6\xec\x00\xfb\xd6\x86\xf5\xb7\x4b\x45\x85\x41\x55\x4a\xf7\x0e\x15\xcf\x58\x46\xc7\x4f\xb2\x1e\x82\x54\x20\xa9\xb5\xfe\x03\x2c\x25\x22\xae\x70\xe3\x04\x7e\x0f\x56\x71\x23\x8f\x92\xb7\xb6\xa3\x26\x19\x8c\x36\xea\x8d\x8c\xfa\x39\x79\xb2\x49\x4b\x39\x0f\x30\xaa\xca\x31\x0e\x4a\x6a\x94\xb7\xb2\x9f\x4a\xbe\x66\xc0\xb9\xd2\x18\x0f\x3f\xcf\x1c\x9a\x6b\xa9\xce\x4f\xf7\xff\x9d\xd9\xee\xe9\x9d\x61\x55\x47\x83\x2e\x90\x90\x6a\xab\x0b\x48\x09\x45\xe9\xcb\x56\x04\x4d\xf2\x0b\xd6\x71\x09\x6d\xb5\x7c\x97\x69\xad\x21\x0f\x46\x07\x52\x3e\xaf\x88\xb0\x98\xab\x3f\x1a\xfc\x3f\x62\x20\xa9\x69\xbc\x63\x74\x9c\xb4\x67\x9a\x55\x02\x00\x33\x1b\x13\xde\x9f\x68\x80\x30\xc5\xb5\x6c\xc1\x29\x03\x32\x69\x77\x1f\xad\x20\xf7\xd2\x54\x84\xa2\xf5\x40\xcf\x49\xa2\xb2\x21\xaf\xa6\x58\x8a\x6d\x22\xcd\xed\xd3\x18\x7c\xbf\xdd\x4d\x7e\xfd\x32\xbc\x2e\x57\xf8\x0a\x61\xb4\xd3\x33\x85\xfd\x35\xbe\xdf\x82\x2c\xbe\x6c\xb0\x94\xa7\x9e\x65\x12\x71\x4d\x3d\xf4\x20\xb4\x99\xf7\xfa\x67\x23\x45\x0b\x2b\xba\xdc\x04\x9e\x91\x5d\xd3\x3e\x9c\x55\x89\x6e\xe0\xa8\xb7\x87\xb7\xf4\x1e\x96\x70\x96\xcd\xff\xbc\xb9\x67\x7c\x73\x23\xd2\xbb\x43\x0d\x03\x5e\xe8\xa0\xc3\x4e\x71\x86\x06\xdf\x0d\x41\xfc\xcf\x5b\xba\x0a\x62\x45\xec\x5f\x81\x47\x40\x08\xcb\x3c\x91\x20\xbe\x7b\xc5\x8f\x69\xa4\x0d\x6d\xf2\x1a\xcc\xd2\xf8\x85\x4a\x1a\xa1\x93\xe0\x5a\x06\xb8\xc8\xa5\x0e\x22\x5f\xda\x41\xee\x11\x60\xb2\xbd\x5d\x12\x5b\x68\xfc\x4c\x53\x4c\xed\xf7\x76\x98\xab\x41\x33\x20\x87\x0b\xaa\xaf\x32\xcb\xcb\xa1\x5e\xd8\x7e\x71\xb4\x76\x50\xf2\x46\x06\xc0\xdf\xfb\x01\xa1\xe3\x66\xd3\xce\x41\xb9\xc7\x4e\xa8\xc4\x8e\xd5\xc8\x54\xc1\x87\xa1\x3b\x97\x96\x20\x00\x03\x46\xc8\x03\x5b\xe7\xde\xb3\x57\xdd\xec\x11\x0f\x9b\x3c\x09\xa2\xed\xb9\xa1\x25\xbc\x85\x09\x17\x99\xa0\xc1\xa5\xe3\xb4\x0a\x72\x68\x51\xb4\x76\xf3\x3c\xe2\xde\x46\x39\xe7\xb5\x05\xfa\xde\x0f\x82\x06\x21\x12\xe0\x37\xc0\x96\x3f\x5b\x11\x50\xef\x4d\x78\x19\xe3\xe4\xab\x4f\x98\xa8\x81\xb6\x14\xa6\x77\xbb\x6c\x9a\xab\xca\x19\x63\x87\x17\x0a\x2d\x48\x67\x16\x59\xc1\x65\xaf\x43\x6a\xc4\x87\x64\x3c\xff\xb6\x5c\xaa\xa3\x21\x74\xd1\x3e\xd4\x56\xd3\xf2\xbf\x5c\xf2\xf8\xd1\x73\x83\x19\x13\x35\xca\xec\x77\x05\x1c\x6e\xb8\x11\x8b\x37\xc2\xf1\xb7\x8c\x15\x4c\x08\xef\x98\x6d\xa9\xd8\xc1\x71\x6e\xce\x6d\xaf\x00\xc4\x8a\xf5\x77\x3c\xdd\xa6\x13\x92\xf3\x03\x4f\x53\xba\x6a\xfc\x7b\x28\xd4\x64\x5b\x7f\xef\xa3\x22\x13\xe3\xdb\xe0\x4c\x1f\x13\x0d\x80\xf2\xf6\x6a\x47\xd8\x7c\xa1\x2b\x29\xb7\xc6\x30\xdc\xc8\xa0\xb5\xb6\xc6\x0d\x8d\x34\x90\x25\xe0\x4d\xd2\xdb\xd6\x7d\x07\x5a\x14\xd4\x51\xee\x3a\x76\xc7\x7d\xda\x30\x48\x84\x08\xf1\xa0\xa8\x5a\xe9\x9b\x52\xf7\x95\xbf\x80\xcd\xe2\x4f\x9d\x05\xac\xd9\xa2\xf3\x7d\x3e\x28\xc3\xf6\xe2\x96\x72\x56\xeb\x85\xfb\xc0\x39\x0e\xd1\x7c\xb8\x08\xdb\xc8\x0c\x60\x95\xfb\x2e\xb0\x91\xf1\xc7\x57\x15\x36\x62\xf0\x5c\x77\x6d\xaf\xe6\xab\x15\x3c\x99\x04\x77\xb4\x53\x69\x18\x1d\x61\x3b\x0e\x1c\xe5\x36\xe6\xc0\xd3\x41\x6e\x63\x0f\xcd\x13\x20\x27\x43\xd5\x7e\x75\xb1\xa4\xf8\x22\x9a\x12\x31\xf9\x85\x7f\x9f\xce\x26\x54\xd6\x6e\xbb\x3f\xfb\x71\x42\x8c\x7b\x75\x10\x69\xf7\x05\x9e\x8c\x77\x81\xa1\x51\xe3\xe5\x55\x0d\xfd\xe4\x66\xb6\x56\x46\x33\x50\x95\x70\xad\xf3\x27\x59\x86\x7f\x8a\xe2\xcc\x5c\x7d\x96\x1e\x7c\x2e\xe6\x16\x6f\x74\xe4\x81\x4d\x7e\x3f\x81\x7c\xac\x97\x10\x9a\xea\x5c\xa8\x02\x7f\x05\x98\x9d\xc2\x78\x97\x11\xe0\x82\x3b\xcc\x3f\x71\x04\x97\xb9\x65\x8d\x86\x0c\xcc\x50\xb8\xf3\x86\xe0\xa3\xae\x1a\xab\xba\x26\xa4\xd6\x76\x81\x1e\xc3\xdc\x8b\x40\xe6\x58\x48\x91\x73\x92\x10\x4f\x98\xed\xdc\xba\x71\x74\x03\xd4\x51\xa3\xd2\x04\x84\x01\x27\x95\xde\x4b\x1c\xdd\x3a\x7e\x52\x2f\xad\x97\xb6\x6b\x76\xe2\x8b\x37\xec\x69\xf6\x2b\x96\x6f\x60\xa6\x1c\xfe\x6c\xf0\xb6\x8e\x93\x36\x83\xb0\x9d\x15\x38\xf2\x18\x0a\xb5\xa1\x31\x3d\xa3\xf3\xc4\x0a\x41\x2f\x61\x33\x37\xa0\x33\x8b\x46\xcb\xaf\xa6\x5a\xe0\x91\xe3\x6f\x4b\x8f\x0a\xd0\xd4\xd7\x06\x29\x6c\x4d\x37\xdb\x68\x4d\x22\xb0\x58\x10\x72\x00\x58\x70\xc4\x88\xab\x8f\xec\xe3\x9b\xda\xe1\xb2\xf7\xcd\x8c\x2b\xe2\x12\xb9\x69\xc0\x32\xed\xb7\xbc\x14\xe2\x17\x99\xd5\x1d\xf9\x43\x40\xb8\x9a\x4d\x72\xb8\x7c\x82\x84\xe2\x25\x45\x5d\x8b\x7b\xce\xe0\x2d\x6a\xa8\x11\xb7\x8d\x72\xfb\xb3\x9c\x7a\x51\x5d\xb8\xd0\x96\xa5\x14\x8e\x3f\xa5\x50\xa3\xed\x96\x79\xf3\xa9\xbe\x2b\xc1\xfa\xd6\xcb\x35\xa5\xeb\x54\xbc\x2c\x0d\x2d\x2e\xf7\x47\x00\x2c\xf9\x9c\x95\x66\xba\x7d\x49\xd1\xa0\xb8\x96\x4c\x0a\x4b\x94\x10\xd8\xe6\x1f\xc4\xf5\xcf\xf2\xf3\xb2\x8e\xc5\x31\x91\x57\xef\x05\x81\xe5\x22\x30\x12\xb9\x2c\xa1\x8e\xf5\x41\xb2\x28\x26\x58\x4b\x51\x1d\x6d\x59\x68\x08\x68\xb7\x9f\x03\x24\x6f\xec\x97\x85\xbd\x9e\xaf\x45\x85\xd8\x25\x7f\x76\x27\x86\xee\x6c\x3c\x7d\x26\xc7\xe1\xc8\x9a\x4d\x61\xc6\x4f\x50\x26\xe6\xf3\xfe\xbb\x2e\xd9\x7d\x93\x74\x0e\xe8\x9c\x85\xe8\x24\xa6\x4c\xb8\xcd\x2d\x5e\xef\x27\xbb\xee\x52\xb0\x73\x47\x43\x04\x28\x8e\xeb\xa7\x84\x5f\x84\x4c\xef\x16\x28\x57\x14\xc1\x3c\xa1\x37\xa7\xcf\x00\x02\x83\x8b\xa8\xae\x3d\x67\x29\xd1\x4e\xf4\xe7\x4a\x0f\xc4\x89\x9b\x11\xbe\xa4\x0f\xa0\x33\xc9\xb1\xf4\x70\x6f\x27\xa1\x3b\x37\x2e\x55\xdb\xac\xdc\x67\x61\x0e\xf7\x94\xae\xac\x70\xa0\xae\x00\x3f\x9f\x31\xdc\xa0\x6a\xb5\xfb\xf6\x45\xb7\xc5\xb9\xea\xb4\xce\xa7\x27\x9a\x42\x7c\x22\x71\x14\x5c\xb2\xd8\x0c\xff\x41\xa5\xba\x5e\x0f\x31\x3c\x94\x4f\x45\x05\x72\xe2\xf4\x61\xa4\x85\xe2\xc5\xb0\x63\x4e\x33\xa3\xb7\xdb\x5f\x8f\x50\xa4\x13\x2e\x14\x61\xd5\x8d\x2b\x61\x48\x58\x3a\x4c\x73\x6e\x64\x22\x68\x08\x3a\x09\xa1\x4b\xc7\x9d\xdd\x96\x05\x91\x9f\xce\x58\xb9\x0c\x23\xbf\x19\xc4\x7d\xdf\xda\x5d\xf3\x61\xd0\x71\x56\x3c\x8f\x73\x21\xff\x15\x88\xd8\x00\x8f\xcc\xe3\x6b\xa0\x22\xcd\x9c\x31\x6d\x1b\xe5\xbb\xb8\xb9\x63\xd4\x83\x28\x51\xcb\xe8\x13\x0f\xd1\x9a\xed\xec\xe0\x00\x5d\x53\xf7\x66\x1c\x3b\x85\xfa\x59\x2d\xd4\x0a\xbb\x45\xc0\xd6\x13\xfa\x31\xb7\x7d\x0a\x05\x67\x3c\xf9\xc2\x8c\x56\x7d\x0d\xc4\xa9\xe5\x34\x71\x16\x95\xca\x66\x53\xf1\x06\xfa\x32\x7e\xc7\x82\x99\xda\xfd\x7f\xc6\xbb\x26\x68\xf6\x48\x37\xcf\x6e\xa4\x6b\x0f\x51\xbc\xdf\xb8\x85\xe7\xd4\x71\x8e\x0c\x28\x28\xa1\xa7\x2d\xbd\xa8\xf5\x3d\x30\x5c\x11\xc2\xf1\xd2\x24\x72\xba\x5a\x87\x92\x4e\xa7\x33\x09\x3d\x93\xf3\x58\x58\x4e\xeb\x76\xc4\xb2\x98\x2c\x9f\x59\xfe\x84\xd8\x8c\x5c\x1b\x3a\x76\xda\xc3\x0a\xfd\x24\xde\x19\xf1\xa0\x74\x88\xb4\xf1\xb9\x0f\x30\x24\x0a\x33\xc2\xd1\x4d\x50\x45\xe1\x8a\x16\xee\x63\x02\x46\x01\x1e\x5f\x02\x15\x5a\x45\xa0\x3c\x8e\x1b\x6b\x1a\x25\x81\xed\xfa\x78\x7c\xb9\x4f\xd2\x4b\x9e\x2c\xef\x60\xb1\x04\x90\xed\xa6\xa4\xb0\x8e\x9c\xc7\x8f\x10\x59\x53\xee\x7d\x78\x76\x44\x27\xd1\xa5\x28\xa2\xf3\xed\xe9\x73\x85\xfa\x1e\x70\x49\x49\xc0\xd8\xa4\x39\x24\xd4\x4c\x5a\x1a\xc7\x43\xe3\x8a\xfc\xe6\x17\xae\x3f\x65\xee\x96\xa2\x25\x86\xd7\xc2\x04\xc6\xfa\xf9\x57\x05\x56\xfa\x26\x23\xa5\xb2\xa1\x4d\x2e\x2c\x36\x82\x5c\xaa\x3e\x58\x58\x76\x5f\xab\x79\x65\xdc\xd4\x8e\x57\x78\x25\xf7\xd1\x75\x0d\x3c\xdf\x52\x72\x8f\x45\xd8\x5f\x15\x61\x7f\x35\x59\xfb\x2d\x57\xa0\x1f\x69\x39\xa9\x98\xee\x2d\xbf\xfc\xf1\x7e\xc7\x70\x2b\xfa\x0d\x7b\x42\x60\x2d\x5d\x8b\xf3\x37\x2b\xbf\xfe\x4d\x4b\xe0\x30\x21\xc7\x98\xf6\xe4\x7d\xd2\x6f\xa8\xdf\xfe\x12\x6e\x00\xb5\x45\xff\x09\x6c\xba\x76\x94\x55\x02\x79\x00\xa6\x3a\x56\x11\x65\xa3\x15\x3e\x4c\xb7\x5b\xe5\xa6\x42\xf8\xb8\x78\xe5\x6b\x09\xbd\x3b\x61\x2a\xa5\xa7\xc7\xe5\x66\x37\x5a\x79\x50\x45\xef\x79\xfb\x65\x05\x38\x52\x9f\xe3\x86\xcf\x01\x9b\x1c\x62\x39\xdf\xc3\x50\x85\x02\x98\x88\xa8\x46\x0b\x96\x93\x50\xc8\x50\x5c\x96\xb0\xde\x5f\x88\xec\xe5\x7f\xd5\xc8\xd2\x7d\x32\x3d\x02\xca\x38\x7d\x79\x6e\xe6\xff\x9c\x8f\x75\x10\xf1\x16\xe0\x01\xe6\xd8\xdf\x55\x9e\x8c\x3b\x66\x1e\xb7\xd4\x1f\xea\x39\x9a\x6c\xa1\xf1\x04\xb9\xe8\x96\xec\x73\x6b\x84\x2a\xa0\x24\x7f\xcd\xed\x4b\xd3\xcf\x8d\xa8\xb4\x38\xa3\x91\xd5\x75\xfb\xde\x51\xfc\xb5\x16\xc6\x48\x7e\xc1\x74\x63\xd7\xec\x84\xca\x26\xbb\x4a\x10\xe8\x75\xf9\xb1\x9c\x39\xba\x88\x3e\x80\x97\x85\xea\x3b\xad\x9a\x72\x8c\xb4\xac\x5b\x0a\xa5\x03\xbc\x23\xf4\x68\x02\x74\x39\x8d\xb5\x73\xb7\x66\x52\x95\x64\xa0\x71\xc6\x70\xea\xda\x0f\x67\x17\xeb\x18\x83\x13\x59\x36\x59\x5a\x96\xb7\x70\x98\x8b\x1c\x29\x86\x92\x7a\x2a\xe7\xcd\x9c\xe6\x0c\x63\x70\x84\x06\x54\xb7\x06\x2e\x30\x0f\x2a\x11\x0d\x7e\x39\x85\xd4\x1b\x60\xa3\x5e\x4d\xc4\xa0\xcb\x36\xea\x6b\x32\x6b\xf6\x83\xbb\x31\xca\x82\xd3\xae\x8c\x80\x9e\x79\xdc\x26\x19\x32\xc1\xd5\x43\x03\xbd\xaf\x77\xee\x64\x7c\xba\x0d\x27\xab\xec\x08\x3b\xd9\xbd\x0d\xd5\x12\x01\x4e\x9c\xa3\x44\xfe\xec\x5d\x6e\x7a\x26\x90\xe4\x97\xf8\x2b\x1b\x71\xb6\x96\x0d\x36\xb5\x0c\x0f\x45\x9b\x58\xdc\x1e\xd2\x1a\x57\xd8\x1e\x45\xca\x0c\x52\x46\x90\x7a\x81\x18\xa9\xf8\xaa\xdd\xfd\x1c\xc7\x06\xb0\x33\xb1\xaf\x3f\x0f\x61\x31\x59\xd7\xed\xcf\xd9\xc7\x6f\xf8\x73\x8a\x42\x63\x61\x97\xb1\x2a\x8b\xd8\x06\x6d\x5f\xae\xe8\x37\x69\x88\xf5\x5b\xa0\x38\xc7\x9d\xed\xb0\x88\x87\x17\x27\xf7\xa8\x73\x71\x91\xc1\xf7\x28\x57\xcc\x8d\xe5\xc3\x7e\xab\x34\x07\x70\x48\xec\x21\xf0\xef\x29\x42\xe7\x58\xf0\x54\xa0\x3a\x4a\xbf\x88\x10\xda\x87\x52\xa3\xcd\xc6\x1c\x05\x65\xdb\x79\x0d\xc8\x66\x0a\x50\x45\x3f\x62\xb9\x93\x7b\xb1\xe4\x1b\x54\x8f\xde\x06\x45\x4c\xf4\x10\x66\x4a\x6d\xf9\xee\xec\x12\xca\x81\xd7\xf4\xc5\x1c\x3f\xcf\x74\x75\x5e\x20\xeb\xb9\xac\xdd\xfb\x64\x2e\x11\xba\x09\x90\xa6\x8e\x5a\xfa\xa6\x5a\x79\xf4\x90\x40\xff\x0e\x85\xd9\x94\x7f\xe6\x49\xe8\xd3\x13\x22\x62\x0c\xca\xa7\x82\x31\x0b\x0b\xe4\x16\xfc\x1c\x7e\x49\x9e\x5b\xe0\x09\x37\xfa\xd4\x3d\xc1\x20\xbe\x84\xee\x04\x39\x4d\x30\x1a\x9f\xf1\xf2\x5b\xad\xdf\xea\xb3\x4c\x0c\x80\xbf\xdc\x69\xfd\x09\x50\x88\x12\xb5\x67\x11\xfc\x12\xa8\x9e\x8b\xd1\x5a\xf1\x8d\xd0\x3a\xfc\xd1\xeb\xb2\x6c\x79\x4e\x4a\x47\x9e\xd5\xef\xd3\xe6\xa5\x4f\xfa\x4f\x54\x58\xa6\x50\x97\x0e\xcc\xdc\xc6\x90\x64\xfb\xda\x21\x8c\x26\xda\xf5\xb5\x34\x09\xb8\x49\x79\x22\x30\x1b\x32\x3f\x55\x57\xce\x61\x9c\x0f\x47\x68\x6a\x01\xfb\xe9\x75\x14\x47\x60\x52\x1e\x91\x9f\xeb\xb1\xfe\xc0\xf4\x47\xa8\xe6\x47\x5e\xab\xe9\xa8\x18\xcd\x3d\xb2\x64\x0f\x43\x1b\xfb\x99\xb6\x80\x6b\x12\xc4\x29\xd5\x49\xfc\x82\x0c\xa6\x3d\x72\xe4\x67\xf2\x34\x2d\xa4\x15\xf5\xa7\xc4\xa2\x17\x80\x9d\x59\x2b\xbe\x37\xae\x53\xd0\x48\xfa\x93\x6a\xd6\x17\xc8\x4e\x1e\xd9\xda\xe5\xff\x63\xb4\xc5\x08\x01\x7b\x52\x08\x53\xeb\x83\xdb\xf4\xbe\x11\xa2\x69\xfe\x62\xf3\x64\x09\xa8\xdb\xe4\xd3\xf1\x8f\xc2\x76\xf9\x02\xb6\x1c\x4d\x77\xa6\x6e\xd2\x70\x64\xe1\xcd\xe5\x4d\xb0\x6a\x42\x70\xe9\xff\x54\xf1\x42\x30\xc9\x5b\x0a\x0f\xaf\x75\xc4\x5f\x9e\xe9\xae\xf1\xc8\x33\x42\xfd\x4d\x7a\x8f\x70\x73\xe0\x17\xa1\xf8\x9a\x15\x02\x5a\x4d\x80\x26\x00\xa2\x5b\x75\x30\x0d\xf3\x95\x0c\x21\xb9\x41\x4b\xf1\x20\x1c\x89\x7f\xe0\x30\x50\xbe\xb1\xef\x65\xc1\x92\x3b\x69\xbb\xa1\x00\x52\xe2\x0a\x57\x32\x61\xd8\x83\xe2\x6b\xe4\xfe\x12\xb5\xb4\xee\xda\x31\x6a\x13\xaa\x29\x8f\xcc\x93\x6e\xa6\xd0\x65\x16\x54\x0f\x66\x49\x42\x1b\x5a\x96\x03\x1e\xdc\xc3\xf1\x9c\x0b\x67\x6c\x44\x76\x0a\xcf\x09\xce\x2a\x51\x9c\xd6\xd3\x07\xfa\xa4\x83\x7c\x95\x0c\xc4\x2c\xee\x29\x4d\x80\x28\xa0\x7d\xe5\xe3\x8f\x1b\x64\xce\xa0\xdc\xaf\x50\x1f\x8a\x1a\x56\x61\xe4\x3f\xed\x36\xfe\xf3\x46\xbc\xac\x4a\x8f\x05\xac\x8b\x38\x24\x54\x32\x7a\x10\x96\x04\x9c\x78\x7f\xdc\xa2\x91\xa1\x04\x89\x25\xd6\xe9\xd0\x51\x3e\x73\x8a\x04\x02\x90\x4e\x64\x8b\x0a\x87\xb4\xdd\x82\x92\x09\x1d\x77\x80\x0f\xc7\x4d\xc3\x42\x67\x87\xa3\xa0\xc7\xef\x59\xfa\x81\x1f\x1b\x95\x38\x22\x40\xe3\xc3\xc1\x29\x5c\x3c\xc9\xb2\x78\xe6\x91\x51\xda\xeb\x46\x27\x83\x22\x71\xda\x46\x02\x6f\x69\xa7\xca\x8f\x71\xda\x6d\x84\x36\xeb\x8d\xab\x6a\x39\x0e\xe0\x5d\x1b\x21\x95\x75\x36\x43\xc5\x4a\xee\x84\x11\x55\xf8\xa6\xee\x79\xbb\x41\xa0\x2b\x37\x04\x70\xae\x3c\xe0\xdb\xc0\x0e\xad\xf4\x0e\xc5\x4a\xe1\x43\xf9\x99\x76\x77\x2f\x81\x4f\xb5\x3e\xbf\xce\xf3\xc2\xca\x55\x2f\x79\x41\x3a\xc7\xa7\x3d\x75\x07\xfb\xec\xad\xd3\x3e\xbf\x43\x74\x46\x79\x6d\x8b\xfe\x12\x87\x8c\x4a\x1c\x13\xd5\xe4\x5d\x21\x14\xb8\xf7\x1a\xa6\x1b\x56\xda\xd6\x0a\x52\xc4\x1e\x36\x02\xb1\xa6\xae\x29\x13\x21\xa4\x76\x1f\x9f\x1a\x7e\xbb\xe4\xcb\x71\xbc\x59\x6d\x40\x15\xfa\x98\x49\x76\xa0\x4a\x19\xf7\x2a\x6d\x75\x96\x2a\x7a\xb5\xa6\xfb\xf1\x9e\xa5\xbd\x00\xd6\xc6\x2b\xd6\x5f\x41\x06\x9f\xe5\x02\x4b\x43\xdf\x11\x20\x95\x23\x72\xd1\x95\x9b\xb9\x77\x1d\x25\x08\x79\x92\x47\xdd\x9e\x30\x5f\x58\x7c\x61\xa9\xdd\x4e\x55\xd4\x2c\x12\x2a\x74\x13\x08\xbf\x85\xa2\x71\x00\xe4\x10\x6b\x8a\xd9\x6e\xe1\x9d\x89\xb4\x19\xde\x60\x94\xd4\x8c\x15\x46\x88\x4d\x54\x20\xeb\xd3\x95\x57\x10\x17\xfa\x3a\x0b\xb0\x9a\xda\x43\x8d\x3d\x07\x6a\xf4\xac\x80\xca\x4d\xb2\x7c\x00\xb0\xe6\xa4\xa0\xd9\xc2\xda\x76\xeb\x12\xb1\xa0\xb0\x10\x96\x90\xaf\xa3\x57\xaf\xc0\x9f\x1e\x55\xb2\xce\x1b\x15\xed\x8f\x56\x3c\x82\x1e\xcf\x6b\x78\xe3\x63\x6a\x7f\x02\x63\x13\xf0\xd4\x6f\xb4\xb3\x37\xc7\xf5\xf6\xa8\xb5\x9b\x6d\x0b\xa7\xdb\x98\x1b\x5b\xf8\x5b\x34\x5f\xb4\x58\xb3\x99\x87\xb0\xae\x31\xe5\x77\xe7\x5a\xbf\x6c\x6c\xb4\x50\x99\xd2\x09\xb4\x45\x97\x65\x69\x25\x21\x6a\x83\x10\x15\x4b\xdd\xc3\x3a\x00\xae\xeb\xd7\xa7\xfd\x87\x59\xce\x6c\x64\xdc\xab\x58\xe8\xfc\x1f\x18\xec\x71\xe8\xf9\x14\x0c\x36\x7f\x35\x51\x9f\x51\x7e\xdd\x86\x22\x76\x7b\xff\x39\x3d\xa4\xf8\x84\x7d\xa5\xcd\x61\x5b\xaa\x3e\xab\x75\x90\xc1\x0e\x69\xa7\xb3\xaa\xe4\x43\x94\xd4\x93\xdf\xd6\xa6\x3b\xcb\x93\x48\x27\x8b\x93\x30\xab\xf0\xc8\xe6\x2f\x3e\x39\x88\x8f\x3b\x09\xf9\x73\xcd\x52\x62\x4b\x5d\xbd\x54\x97\xd8\x85\x4a\x39\xda\xb4\xe3\xd6\x9e\x73\x5b\x5b\x18\x22\x28\x37\x3b\xda\x6c\xf4\xd8\x08\x62\x3d\x8f\xbf\x95\x8f\x8f\x85\x04\x28\x3a\x6a\x36\xe0\xae\xa7\xaa\x02\x14\x6b\xd2\x2c\xad\x1d\xc6\x7a\x5b\x02\x1d\x9b\x14\xa4\xbe\x08\x5f\xfb\xb5\x08\xe9\x86\xbb\x77\x98\x60\x1c\x16\xac\xca\x36\xd4\xc3\x11\xe8\x4f\xc0\x02\xeb\x3f\x06\x59\xcf\x73\xba\x32\x35\x83\x5a\x14\x3b\x88\x44\xdd\xc8\xbf\xe7\xfa\x59\xe8\xb2\x26\x9f\xf6\x48\x23\xd5\x81\xb1\x40\xc6\xf2\xa5\x90\x09\xa5\xa9\x6e\xc7\xa8\x8d\x0b\x67\xd9\x32\x11\xb4\xa8\x0e\x4a\xce\x14\x20\x6f\xf0\x91\x5a\x36\x3b\x0a\x5b\x9e\x7f\x7f\x7b\x72\x03\xdb\x93\x2a\xdb\xf0\xd4\x0d\xd1\xd2\x45\x3f\x79\x73\x4a\xd3\xd2\xe9\x2f\x32\x6b\x05\xa3\x7d\x49\x92\x11\x0d\xae\x3c\x6c\x73\x8b\xf6\xd5\x65\x76\x38\xe7\x6a\xce\x4c\x2d\x37\xce\xec\x1d\xbb\xca\x9d\xc9\xbe\x48\xb2\xa7\xe5\x6a\x6b\xbd\xba\xf1\xe1\x80\x6f\xc4\xce\x56\x29\x45\x53\xa8\x64\xe4\x61\xd9\x8f\xff\x62\x5b\x6b\xab\xa7\xe5\x6e\x38\x79\x5a\x44\x9d\x20\xc0\x5c\x5d\x9c\x36\xb0\xae\xa5\x3b\x6c\xac\xa3\x52\x1c\x56\xb1\x0f\xc2\x24\xdc\x77\x57\x70\x57\x47\xea\x8e\xb0\xe4\x61\x7e\x60\x8f\x31\xbc\x0f\xf9\x55\x28\x78\xb2\x5f\x69\xc6\xbe\x29\xb2\xf5\xcd\xd4\x46\x14\x06\x95\x6e\x98\x9e\x13\x6f\x81\x38\xb5\x7e\x25\xf9\xec\xa7\xb1\xac\x97\xe0\xde\x3c\x94\x22\x2c\x6b\xe5\x50\x37\x90\x8e\x89\x1f\x45\xb1\x87\xd4\x64\xf7\x90\x40\xd2\xdc\xad\x74\xf1\x6a\x72\x17\xea\xa4\xba\xae\x09\xeb\xda\x15\x9e\x6b\x37\x71\xc5\xb8\x86\x4c\xe9\x26\x9d\x49\x44\x99\x93\xd8\x26\x20\xdf\x80\xb8\xca\xd9\x77\x9c\x84\x43\x4e\x82\x26\x3b\x94\x35\x35\x4d\x88\x99\xb6\xa2\x24\xa7\xc8\xb7\xec\xf7\x16\x5c\x1f\x85\x26\x56\xb6\x13\xf2\x38\x07\x75\x9e\x9c\xe1\xc1\x0e\xf7\x46\x67\xde\xcc\x0e\xd7\x3d\xd5\xd4\xd3\x71\xe9\x47\xb8\x80\xa1\x96\x8b\xd5\xba\x74\x1f\xfa\xe5\x69\xe4\x93\xe0\x31\x1c\x7b\x21\x05\xdd\xea\xb4\x36\xa4\xdf\x4e\x4c\x85\xaf\x00\xef\x45\x44\x29\x7f\x01\x50\x37\x7b\x9c\x08\x80\xb5\x59\x08\x95\x52\xb1\x94\x4f\xc4\xe7\xea\x6b\xed\x84\x7c\xae\x7c\x93\xfe\x52\x38\x8b\x2e\x84\xe8\x2e\xa8\x27\x72\x67\x40\x70\xec\x59\xdd\x24\xa8\x02\x42\x41\x91\x52\x02\x63\x1e\x31\xa3\x6c\xe6\xa9\x70\x28\x9c\x58\xfe\xd1\x09\x14\x3b\xf2\x10\xac\x04\x52\x53\x12\xe5\xd0\x40\x97\x3a\x5d\x2a\xc9\x81\xf5\x6e\xf1\x52\xcd\x96\xa8\xa9\xfb\x47\x82\xbc\x8e\x12\x57\x05\x96\xde\x0f\x18\xa1\x36\x41\x87\xe4\x11\xda\x98\x99\x91\xd4\x25\xd2\x10\xff\xbe\x1b\x5f\xf1\xc3\xc1\xb6\x87\xaa\xf3\xee\xcd\xb0\x94\x00\xcb\x3e\xfb\x81\x7f\xa7\xfb\x02\xd7\x1f\x28\x94\x59\x11\x67\x4b\x94\x7c\x67\x07\xad\x04\xb7\xf9\x6d\xc3\x7e\x16\xa8\xda\x42\xcc\x5a\xa2\x0f\x04\x3c\x80\xfc\xf7\x73\x9c\xd7\x97\x39\x74\xdb\x3a\x01\x4f\x16\xc2\xf6\xf8\xcc\x95\x34\x98\x1a\xf5\x84\x09\xa6\xdd\x55\xa3\x15\xaa\x63\x84\xe2\x76\x4e\xfa\x7e\x10\x8e\x1b\xaa\xac\x7f\x98\x92\xfd\x31\xff\xb6\x3c\x4d\x4e\xf8\x7f\x79\xe2\xb5\xb2\xc0\xab\x0c\x17\x47\x8a\x2b\xbf\x7b\xfa\xf8\x5e\xd9\xd9\x0f\x57\xde\x5f\x61\x45\xcc\x32\xd1\x95\x29\x18\x7e\xe5\xe4\x5f\x71\xa5\xf9\x74\x86\xd1\x45\x45\xbd\xcf\x95\x84\x39\xff\xec\xab\x63\x6c\xb7\xb1\x5a\x63\x7d\x90\x51\x4c\x21\xe4\x7a\x4e\x50\x27\x25\x7f\xdb\xad\x8b\xab\xe8\x1d\x30\xbf\x9a\x89\x7d\xc6\x65\x91\x15\xf1\xc2\x81\x18\xbd\x92\xa0\x9c\x40\x91\x55\x20\xfc\x67\x88\xf5\x0b\x07\xfe\xca\x9e\xe3\x03\xa5\x2b\x87\xec\xda\xd8\x41\x7f\x45\x8b\xb2\xfc\x90\x2d\x9a\xf4\x3d\x08\x1b\x2f\xdb\x78\x2b\x85\x16\xab\x2f\x4a\x85\x58\x2c\xf5\xe9\xf9\xb7\xe4\xfc\xd2\x22\x96\x85\xa1\x34\x9b\x93\x90\x25\xae\x05\x66\x13\x13\xfe\x4e\xb4\x3b\x0d\x94\xf0\x3e\x84\xa8\xf4\xc7\x9e\x72\x29\xbc\xb2\xa8\x68\x33\x58\x75\xde\x88\x74\xfc\xf4\xe2\x43\xfe\x91\xe3\xe2\xc7\xee\x4f\xc1\x89\xed\x22\x7a\x31\x00\x4e\x85\x6b\x10\xf0\xdf\xa3\x4c\x73\x09\xa3\x20\x94\x18\x7d\xbc\xa2\xde\xa7\x1c\x99\x8d\x2e\x97\x23\xad\x81\x4e\xbe\x61\xb1\xc0\x55\xf9\x11\xd9\x26\x96\x44\x1e\xdb\xdf\x43\x74\xb1\x3d\xc0\x61\xa1\xf9\xbe\xeb\xcf\xef\xe4\xc8\xe9\x77\xd2\xd4\xfd\x2d\xfa\xa4\x6d\x6d\x36\x2f\xe1\x0a\xb7\x4a\x3e\xc2\x2d\x79\x2d\x44\x2b\x0b\x2c\x0f\xd0\x31\xa7\xed\xe7\x3a\x94\x69\x8f\xa9\x99\xb7\x2a\x02\xd9\x75\x71\xa3\x71\xee\x01\x42\xa6\x77\xab\x4d\xd1\xaf\xef\x5a\xc1\xb9\x6c\x60\x3f\xd5\x46\xf6\xc9\xe1\x5f\xc5\x12\x3b\xf8\xa3\xfd\x2a\xc9\x9f\xcb\x9c\x76\x82\x96\xa1\x2d\x35\xc4\x17\x7d\x99\x2e\xb3\x8d\xca\xc4\x56\x05\x5f\x7b\x1b\x93\xf9\xfd\x55\x0a\xf6\x21\x54\xb2\xc8\x52\x1e\x6f\xe9\xcb\x98\x59\x75\x31\xf7\x53\x12\x33\xea\x5c\xa0\xbe\xee\xfd\x8b\x85\xa3\xc4\x66\x9b\x31\xc5\x9f\x49\x90\xea\x19\x9d\x16\xf0\xc7\xe1\x90\x7a\x54\x81\x2c\x09\xbe\x0a\x0a\x3c\xa6\x8e\x81\x3d\x2b\xc1\x48\x45\x65\x66\xa0\x0c\x0b\x96\x4f\x46\x96\x5b\x0f\xb2\xfa\x78\x00\x4c\x96\x27\x25\x18\x54\x76\x2d\x61\x07\x81\xe6\x12\x7a\xb0\x61\xb3\x49\x14\x78\xbc\xb9\xab\x91\xfd\x71\x45\xdc\x8a\x03\x8b\x49\x80\x06\x38\x70\xd8\xfe\xfa\x4b\xdb\x21\xc0\xb8\xf6\x5b\x6d\x01\x44\x4e\xd5\xf4\x58\xb0\x5d\x14\x9d\xf9\x55\x72\x44\xf6\xd0\xfd\xdb\x6f\x04\x3b\x14\x5f\x17\x39\xd0\x49\xde\x95\x7f\x31\x70\xc8\x52\x0d\x0a\x60\xb2\xb0\xc7\xdb\x36\x2a\xa2\xd2\x87\x6d\x9f\xbc\x9b\x80\x4b\x06\x1c\xf5\x21\x60\xf2\x40\x79\x14\xc5\x52\x9e\xad\x28\x65\xe9\xbd\x46\xdd\xf5\x08\x89\x05\x92\xbf\x15\xe4\xa8\x02\x1d\x29\xfc\xd4\x0f\x49\x98\x1d\x72\x65\x39\xbc\x28\xef\xc1\x2b\x4b\xc9\xd5\x8b\xd3\x9e\xb3\x1c\xa9\x83\xc0\x9a\x7f\x03\x60\x3f\x0d\x87\xd2\xfd\x72\xc9\x3f\xec\x76\x88\xb2\x20\xcc\x58\xb7\x25\x6c\xb9\xba\x0f\x9f\x9a\xb5\x3b\xc0\x61\x67\xf0\xfc\x82\x2d\x63\x59\xbd\xb6\x06\xfb\x56\xf6\x3e\xbb\x3d\x6c\x2b\xb7\x42\xb5\x52\xcf\x55\x1f\x31\x87\x19\xbf\xe5\xa5\x27\x37\xeb\x20\x39\xd7\x09\x9e\x64\x81\x8c\x9e\x9e\x14\x71\xa5\x29\x32\x60\xcf\xa1\x2a\xe9\x6b\x73\xc2\x52\x37\xb4\x4f\xad\x2d\xe1\x85\xcf\xc6\xbe\xa7\x8f\xbd\xdd\x49\x90\xf0\x55\xe1\xc9\x28\x01\xf2\x55\x17\x9f\x20\x2e\xd9\x86\xe4\x1e\xa9\xc3\xae\x56\xae\x08\xe8\xe4\xe5\x68\xa9\xbf\xaa\xa5\xc3\xca\x76\x1d\x81\xad\x47\x2b\x3b\x19\x94\x0d\x51\x5f\x43\x30\x36\xbe\x1c\xf8\xe7\xa4\x63\xaa\x25\xf2\x91\x89\xca\x73\x3a\x09\xb1\x9c\xe8\xd6\xec\xde\x5f\xb5\x94\x66\xed\x95\x6a\x3c\x6a\x38\xd5\x2e\x5a\x42\xe5\x5a\xb0\x06\x90\x9a\xf2\x44\xda\x9d\x5e\x55\x4a\xd2\xa6\x2f\x67\x18\x54\x83\x7b\xa7\xc8\x72\x00\xe8\xf1\x6d\xc0\xc4\xbe\x6c\x7a\x77\xcd\x82\x61\x2f\x1d\x9c\x1c\x69\x5e\x6c\xf7\xdc\x87\x6b\x17\xee\x25\xa6\xf9\x85\xfb\xde\xf0\x13\x51\xf3\x39\x2a\x10\x5b\x0a\x5e\x40\x5f\xcb\x91\x78\x3d\x79\x46\xd6\x85\xfb\x7a\x0b\x02\xa2\xa7\x37\xda\x15\xec\xab\x9d\x67\x52\xb4\x45\xa5\xc5\x12\x92\x71\x09\xf7\x6a\xaf\xa5\x85\xaa\xad\x53\x37\xfd\x84\xd6\xdb\x0a\xdb\x7b\x07\x1e\xe0\x52\xf1\x7e\x3e\x28\xc1\x06\x40\x5b\x28\xdf\xb2\x9c\xdf\x6a\xb6\x40\x66\x0b\x7c\xdd\xd6\xe1\x67\x5f\xce\xea\x5b\x5d\x3e\x22\x0d\x39\xf4\xba\xb8\xe6\x32\x61\xd9\xa3\x0c\xc2\x97\xf4\xf4\xdb\x7a\x06\x59\xd5\x25\x4a\xbd\x35\x4a\xfd\x77\x3a\x43\xfd\xd7\xa2\x95\x5e\xd4\xf2\x5e\xdd\x27\xda\x0e\xbb\x66\x2d\x46\x73\xfe\x2d\x76\xf2\xdc\x2b\x74\x3f\x14\xa0\x08\xed\x7f\x5d\x5d\xe8\x71\x82\x59\xa9\x5a\x83\x43\xb4\xbf\x4f\xed\x95\x88\x43\xd8\xc7\x7f\x3c\x80\x54\x6f\x58\x61\xb3\x59\xbb\x28\x86\x1e\x2c\x7d\xd5\xb5\x75\x99\x1e\x41\xbd\x5d\xbb\x96\x08\xef\xfe\x29\x65\x9c\x7d\xa4\xec\x17\x9e\xbd\xb4\x51\x16\xb7\x93\x65\xea\xee\xc6\xfd\x1e\x34\xf5\xc1\x07\xd1\xf7\x22\xf2\xfb\xa4\x8c\xad\xc3\x87\xaa\xbf\xb4\xf5\xda\xc8\xb2\x74\xf5\x07\x1b\xa4\xc2\x4b\xbb\x7c\xbe\x14\x8f\x07\xac\x36\xf1\x1c\x87\xff\x4c\x12\x5d\xed\x16\xa3\x3f\xe3\x75\xa2\x04\x8c\x85\xa5\xaa\xf5\x0e\x63\xa2\xef\x1a\xe0\xa5\xf1\xdb\x0d\x49\x46\xdb\x54\x7b\xd9\xb1\xa0\xdf\x8d\x2b\x07\x7c\x6f\xc9\xe1\xda\xa5\xeb\x73\x84\x82\xcc\xed\x30\x7a\x09\xd6\xd6\x0e\xfa\x7e\x40\xe5\x73\x0e\x4f\xdd\x3d\x77\x51\xc3\xdb\x73\x68\xdf\xc3\x75\x20\x47\x72\xb7\xdf\x3e\xe7\xf0\x5c\xb1\xcb\x72\xba\x85\x04\xfd\xba\x3d\x28\x9f\x1b\x81\x7e\xd6\x83\x0b\xa4\x4a\x65\xd0\x6d\x5d\xf1\xf6\x07\x29\x56\x75\xe5\xc8\x23\x42\xed\xb9\x1d\xe6\x53\x6a\xde\x50\x89\x8a\xbd\x49\xfa\xb8\x69\x5a\x63\x47\x85\xa3\x3f\x9f\xb7\x98\x6e\x3a\xab\xdd\xb7\x4b\xde\x32\x5e\xa4\x33\xec\xda\xb7\xd2\xb6\xbd\x8d\x67\x76\xff\x2c\xed\xb7\x87\x55\x9e\x2b\x16\xab\x53\xb0\x1b\x9a\x3a\xba\xf0\xbc\xc4\x9e\xed\xea\xd3\x44\x8f\xcd\xed\x8c\xc2\x07\xcc\xb2\x34\x4d\xdd\xce\x5b\xdc\x04\xf6\x48\x42\x8a\x9f\x5d\xb2\x4c\x8e\xea\xb8\x5a\x3e\xb3\x7e\xc0\x65\x59\xcb\x71\x54\x27\x92\x4d\xea\x8c\x7c\xbd\xc8\x2e\x30\x26\x80\x8c\xd1\xa5\x37\x53\xf0\xd9\xac\xdd\x11\x2c\xcf\x8a\x38\xb0\xf0\xb8\x01\x11\x12\x1c\xb0\x81\xec\x4b\xcf\x6d\xd5\x5e\xb7\x21\x3b\xc6\x83\x7e\xe0\x15\xcf\xed\x0d\x41\xb2\x81\xe9\x54\x08\xff\xb1\xb5\x0a\x67\x6d\xd9\x67\x91\x8a\x31\xf0\x26\x01\xe7\x7d\x8c\xd9\x19\xae\xa3\x00\xe5\xfb\x6d\x83\x41\x31\xde\x3b\xa6\x33\xb9\xde\xa0\x95\x4a\xb8\x89\x4f\xbb\x20\xf3\x2e\xce\xae\xc3\xd7\xd0\x6f\x77\x1d\x5f\x9d\x99\x3a\x1e\x0f\x02\xf2\x4e\x61\xde\x47\x95\xdf\xf5\xf9\xf9\xe6\x0a\xe9\xe1\xee\xfd\x94\xc0\xa3\x45\xd3\xed\xda\xe8\x18\xea\x56\x45\x77\x80\xde\xdb\x72\x7b\x92\xc3\xf4\xea\xa8\xe8\xf9\xbb\x5a\x1d\xc2\xbc\xb1\x23\x0e\x67\xf5\xb7\xb6\xdc\x08\x53\xf1\x28\x77\xf2\xf1\x06\xce\xaf\x71\x96\x08\x53\x1b\x46\xe6\x79\x7c\x57\x43\x72\xcc\x7a\x15\x2e\x63\xcd\xe0\x51\xd1\x0c\xc2\x01\x94\xf8\xbc\x27\x27\x66\xde\x3f\x1b\xc7\xed\x11\x38\xd6\xf9\xf6\x31\x3f\x62\x91\x17\x48\x05\x9b\xd7\x6a\xe5\x04\xfe\x1a\x3e\xc8\x5b\xd0\xfe\x25\x5a\x9a\x6f\x12\x10\x0e\xf7\x29\xe9\x0c\xeb\xb3\xe3\xc9\xc2\xf4\x99\xf7\xa0\x4d\xf6\x38\xe5\x82\xa7\x91\xdb\x53\x3e\xdc\x0e\xfd\x10\x4e\xc8\x86\xe2\x3f\x55\x0c\x2c\xf4\xc7\xda\x15\x78\x22\xd7\x03\x46\x3f\x52\x27\x5f\xa1\x7d\xde\x88\x28\x98\xa4\x07\x08\x7f\x9e\x83\x13\xbe\x9e\x14\x92\xa3\x23\x7a\xa8\xd6\x02\x97\x9f\xb1\x5a\x25\x54\xfe\xbf\x91\x36\xd8\xc0\xe1\x0b\x13\x8e\x90\xb0\x75\xdd\xe6\x81\xc0\xf7\x95\xb0\x40\xc7\x7b\x8b\x91\x25\xc4\x7d\xdd\x8d\xc5\x3b\x21\xf3\xcb\x0d\x83\x82\xda\xb1\x13\x25\x42\x14\x19\xde\x5f\xb7\x4b\xfb\x46\xa0\xfb\x37\xa1\xd4\xa5\x8f\xf5\x62\xfd\xd2\xbc\x37\xd7\x11\x63\x93\x35\xa9\x0d\x7b\x43\xfc\x09\xad\x97\x38\xb2\x64\x60\x95\x0d\xf2\xa3\x90\x2a\x3e\x02\xe8\x4f\x68\x7e\xac\xee\xad\xe5\x00\x82\x67\x67\x68\xfb\x37\x1f\x7a\x39\xaf\xd1\x8a\xb0\x35\x58\x84\x74\x9f\xf6\x2c\x9c\xff\xac\x8d\x83\x4a\x78\x6a\xd6\x00\x3e\x43\x00\x03\x9b\xdb\x5f\xa0\x38\xea\xb5\xe9\x92\x1c\xb4\x63\x36\x1d\x90\x1e\x94\x0d\x78\xbb\x67\x6d\x9b\x20\x12\x4c\xe7\xc8\x7d\x48\x8b\x3c\xc8\xbb\xe3\x4c\xb1\x8e\x2c\x67\x74\x8b\x82\xb4\xe6\x19\x5e\xe8\x27\x2f\x15\xea\x3f\xb9\xa1\xf9\x93\x59\x81\xf5\x46\x20\xf5\x12\xe2\x3e\xda\x98\x50\xb4\x1b\x59\x4a\x2b\x1d\x66\xf7\x85\x7f\xfc\x3f\x99\xc1\xfb\xeb\x03\x91\xfc\x83\x5b\xe4\xaf\xa2\x0d\x0c\xe3\x54\x79\x65\xeb\x33\x63\x9a\xac\x33\xcf\x55\x4a\x24\x2c\xe4\x2a\xd1\xe0\x8c\xee\x1a\xb6\x12\x39\x0a\x64\xd1\x90\x3f\xbf\xfd\xd4\x70\xac\xfe\x6a\x4e\x62\x70\x60\xc9\xc9\x7a\x11\xa2\xf9\x3f\x01\xe0\x3f\xf7\x80\xef\x9f\x81\xfc\xc7\x34\x33\x5e\xfe\x52\x71\xe6\xb4\x0c\xb9\xb3\xfa\x37\x90\x39\x2a\x59\x24\x49\x7f\x05\x22\xcf\x5e\x8b\x41\x10\x2a\xce\x40\x6b\xc6\x5f\x95\x90\x15\x33\xb2\x2f\x44\xc5\x4b\xfb\x75\xc6\xd5\x04\xaa\xbc\x6f\xdd\x8f\xa0\x07\x3c\x6e\xe3\xf8\x88\xd7\xdb\xed\xaf\x36\x2b\x57\xa0\x2a\xd3\xa8\x8d\xc5\x05\xbd\x08\xb3\x08\x4a\x3f\x9f\x7f\xe5\x41\x3e\x01\x20\xec\x3b\x63\x01\x77\x2c\x7a\x0a\x17\x67\x21\xb7\xc7\x21\xd7\xf8\xb0\x93\x07\xc1\x20\xb4\x9d\xdd\xe3\xda\x23\x9b\x99\x9f\x24\xff\x94\x15\x0c\x86\x49\x91\x8f\xc0\xf6\x30\xd8\x53\x72\x56\xfb\xad\x52\x5c\x88\x0c\x7c\x04\x27\x12\x94\xff\xb8\xcf\xab\xcc\x51\x96\xeb\x9f\x6f\xe4\xeb\xf0\x70\x8a\xc2\xee\x66\xe4\xfe\x9d\x31\xe5\x2a\x6d\x00\x4f\x6d\xce\xed\x63\x0a\x43\x30\xf2\xfc\x2a\x97\x9f\x0e\xc8\x5c\x8f\xa0\xb4\x24\x36\xc1\x79\xfc\x96\xc6\x83\xc5\x73\xd6\x05\xb0\xf6\x2b\x58\x07\xc8\x0d\xc9\x11\x28\x8b\x97\x47\x6d\xfb\x4c\x64\x29\x34\x32\x52\xb7\x71\x45\x98\xff\x91\x58\x22\x70\x0e\xa8\xf0\xfc\xd9\x3f\x45\x69\xb8\xa8\x04\xa2\x0f\xec\x12\xd4\x42\xe9\x62\x14\x5f\x00\xa1\x12\x14\xb8\x08\xed\xd5\x20\x6a\xc9\x3f\x14\xad\x06\xc8\x98\x65\x01\x90\x74\x29\x10\x40\x4e\x01\x5f\xde\xc3\x87\x7e\x73\xd3\xa7\x05\x0b\x98\x2a\x8e\xcc\xca\xa8\x67\xeb\xfb\x63\x00\x13\x15\xe0\x0a\x0a\x40\x47\xe4\xff\xf1\x16\xd6\x1f\x5e\xbe\xd2\x1a\x9f\x1d\x5b\xfb\x08\x06\x80\xa0\xe6\xc5\xad\x08\x09\x6b\x2f\xd5\x87\x17\x52\x8c\x09\x1a\x83\x3c\x1e\xd9\xa7\xe4\xa0\x49\x14\xe8\x14\x87\xcc\x71\xb1\x7e\x9e\xea\xaa\x01\xc8\x03\x79\x00\xf5\x37\x47\xa0\x97\xb0\x33\x98\xa5\x28\xe1\x6c\x00\xbe\xf3\x56\x60\xa0\x3b\xfd\xdf\x4d\x1d\x68\x1a\xe1\xfe\x0f\x40\xbb\xf5\x27\x62\x0d\x86\x2c\x80\x9d\xf3\xa4\x2c\xc8\x99\xed\x18\x5e\xe2\x0f\x3c\xfd\x2e\xe5\x89\x8c\x7b\x37\x6e\xd5\x78\x15\x54\xb3\x61\x84\xfe\x18\x5f\xb4\x94\x54\x86\xef\xe0\xa6\x23\xc9\x78\x04\x9d\xe0\x41\x36\x81\xce\xdc\x79\x05\xc4\xf4\x8f\x03\xb5\xad\x3b\x82\x03\xe6\x11\x40\x06\x39\x72\x58\x43\xc2\xdd\xf9\xb0\xc9\x7e\x93\x68\xc2\x9f\x0f\x5d\x16\x7b\xb8\x7d\xb3\x1c\x15\xb0\x8f\x51\x2b\xc0\x54\x73\x8b\x69\x18\x99\x01\x57\x58\xd8\x9f\xda\xee\xce\x20\xa7\x12\xb4\xe5\xc2\xc3\x2d\x51\x0d\xe4\x0e\x03\x74\xd0\xfc\x43\xbd\xaa\xa5\x77\x6e\xe3\x6f\xfa\x24\xe9\x40\x6f\x1a\xcf\x66\xd7\x13\x0a\xf6\xc1\x25\x37\x7c\x79\x96\x63\xd6\x0d\x9b\x1f\x86\xa1\xaf\xee\x6e\xf9\xdc\xf2\xfb\xe6\x51\xef\x9f\xf0\x75\x3c\x6f\x77\xcc\x0f\x7f\xfe\x9e\x1d\x87\x07\xf7\xb7\x01\xf5\xcb\xcf\xe9\x9c\xc3\x3e\xa7\x5b\xde\xc4\x25\xac\x79\x1b\xff\xcc\xfe\xa5\x3f\x83\x36\x84\x13\xb9\xf1\xfe\x83\xea\xd5\xbe\xa9\xf0\x37\x48\x0a\x39\xef\xe0\xc4\x9a\x68\x18\xf0\xbf\x73\xf5\x79\x99\x4a\xcc\x44\xc0\x59\x70\xca\x03\x20\x0e\xfe\x69\x25\x75\x22\x14\x1c\xbe\x68\xd0\x3f\xc4\x5a\x08\x71\x6b\x38\xd8\x3c\xc8\x5e\x00\x2e\x5a\xec\x85\x9d\x9b\xfe\x1c\x03\xbf\x34\x49\x95\x08\x8a\xd6\x9b\x6c\x84\xbd\xfe\xd7\x9c\x13\xba\x29\xa0\xac\x59\xc8\x9d\x32\x6b\x1c\xd0\x11\xf5\x73\x00\xbf\x2a\x24\xaa\xb1\x5c\xe9\x5d\x0f\x5b\xd5\xe4\x5f\x85\x79\xcc\x08\xde\xf2\xe5\xbf\xd9\x0a\xd5\xf3\xd5\xba\xda\x8d\x95\x40\x79\xfe\x4b\x2d\x9a\x0c\x22\x1b\xa1\x84\x25\x44\x21\xbc\xaa\x58\xef\xb2\x96\x9d\x92\x8d\x77\x51\xf7\x45\x1a\x37\x15\x2d\x1b\xbb\xd0\x98\xce\x84\xb7\xe7\xeb\x9c\x81\x28\xf4\x28\x51\x7f\x7a\xe3\x54\xe5\x65\x9d\x8c\xa3\x5f\x5f\x5b\x26\x43\x22\x51\x44\xe3\xc5\x1f\xff\x4b\x8e\xda\xf7\x3a\xfb\x4b\x59\xe8\x6b\x7c\x53\x80\xfa\xd8\xb9\xea\xf6\xaa\x26\x82\x2f\xb9\x9e\xbd\x46\xe8\x4e\x7e\x3c\x7a\x91\x44\x71\x51\xe7\x1f\xeb\xf0\x7b\xd2\xbe\x26\x35\xbe\x4d\x7a\xc2\x5e\xd6\x03\xbf\xdc\x60\xd0\x1b\xdb\xc4\x93\x85\x86\x89\x16\x4d\xc0\xa3\xf0\xcc\xf7\xf7\x1c\x59\x57\xf9\x3d\x55\x15\xb6\xa0\xcf\x44\x49\xa2\xb5\xfb\x5a\x34\x90\x38\xec\x82\x7e\x8b\x8f\xbe\xbf\xd9\x66\xf8\xf9\x21\x41\x6b\xbf\x58\xbf\xf9\x75\x79\x89\xea\xd7\x72\x82\xb7\x64\x76\x7f\xb3\x26\x69\xbf\x99\x7a\x1f\xb6\xe5\x74\xc4\x82\xfd\xd4\x2b\x5e\x7e\x02\x13\x83\xcf\xdc\xaf\x3d\x6a\x36\x29\xa2\xa8\x35\xa5\x9d\x5c\xc8\x9a\x70\xd5\xdf\x24\x98\xc1\xaf\x5b\x34\x29\x18\x42\x60\xc7\x65\xae\xd9\x29\x8c\xff\x16\xef\xbd\x50\xdc\x0d\xd6\x2b\xb4\x26\x79\x95\xc4\xe4\xf0\x08\x37\x00\x77\x6e\x5b\x95\x2f\x66\xfc\xc5\xe8\xb0\xbc\xb9\xb4\x41\x4e\xd8\x90\x69\x1e\xb5\x35\x27\xbd\xff\x25\xae\xc3\xbb\xa8\x7a\x3e\xba\xdd\x60\xaa\x1c\x07\xf4\x70\x92\x64\xe7\x9a\x4d\xec\x7e\x90\x00\xd2\x7c\x39\x52\x8c\xff\xc9\x44\xd1\x42\x31\xb2\xb6\xd6\x5c\x17\x71\xc7\x65\xaf\xba\x9a\x16\x43\x2f\x40\x27\x42\xa7\x95\x78\x0f\x67\x7e\xe3\x62\x77\x00\xdf\xe6\xac\x12\x1d\xfa\xf4\x12\x7d\x65\x75\x60\x41\x25\xd5\xac\xcd\xd5\xb6\x7d\x0b\x09\xc8\xca\x4a\x01\xf5\x23\xc8\x22\x80\xf0\xed\xf1\x4d\x2d\x14\x3a\xf9\xc6\x16\xe0\x3b\x8f\x6c\x76\x74\x6e\x37\xfb\x05\xbe\xa6\xb1\xc0\x0e\x5e\x47\x88\x16\x7b\xdc\xc7\x45\x50\xa3\x1e\x36\x15\x51\xd1\xa5\xd8\xed\x27\xf2\x72\x68\x9b\x44\xb8\xfd\x33\xba\x6f\x2d\x63\xd4\x08\x27\x6a\xb5\xdb\x1d\xbf\x8a\xe9\x70\xad\x40\xaa\x43\xda\x95\x2f\x61\x87\x3b\x65\xcc\x41\x0a\xe1\xbd\x02\xe0\x93\x6f\x6c\x28\x1e\x3d\x98\xa9\xed\x32\x9c\x0e\xc2\x91\xef\x2e\x28\x20\xe4\x45\xc0\x6d\x5b\xcc\x0c\x54\xbf\x29\x09\x27\xce\xc7\x12\x2f\x34\x36\x3b\x6b\xbe\x1b\x08\x00\x80\x85\x70\x9f\xf7\xb3\xdf\x82\x5e\xc3\xb8\x95\x61\xf4\xee\x07\xf6\x9e\x0e\xb7\xe0\x59\xda\x85\x57\x4a\xbd\xab\x49\xee\xc4\xe8\xfc\x39\x32\x40\xc6\x17\xfd\x0a\x07\xcd\xfe\xb0\x6e\x72\x04\x36\x79\x00\x89\xde\xfe\x6e\xcb\x7e\xf8\xb5\xa1\xc6\xed\xaa\x75\x40\xe4\x1c\xe5\x49\x88\xa7\x35\x9a\xd0\xb3\x8c\x31\x69\xc8\xcb\x76\x91\x39\x62\xf9\xe7\x53\x1d\xfa\x90\x59\xf4\xb6\x14\x7d\x1b\x25\x11\x0e\xba\x88\x2f\x7c\xa8\xd9\x61\xae\x13\xfc\x92\x0c\x19\x17\x4e\x9e\x07\x77\xac\x3f\x2a\x59\xa4\x42\x75\x06\xaa\xa1\x3d\xc8\x12\xd1\xc4\x85\x61\x93\xd4\xe8\xc7\x43\xf2\xdd\x8d\xcf\x0a\x7b\xfb\x81\xc3\x24\xa7\x3f\x2f\x9a\x2c\x5a\xa0\xad\x33\x6e\x96\x91\x2f\x83\xcd\xe0\xdb\x60\x67\x80\xdd\x3f\x55\x7d\xef\x22\xb3\x33\x8b\xfc\xb2\x3a\x29\x24\x4c\xd7\xfb\x4d\x5a\xa8\x3d\x08\x5f\xdd\xa8\x22\x68\x5f\xd5\xf6\x7a\xe8\x04\x0f\x25\x7a\x79\x58\x2f\xaf\xf5\xa3\x93\x13\x80\x7a\x37\x7b\xba\x65\xc0\xbb\x91\xd4\x3b\xca\x85\xef\x54\x14\x57\x51\xa2\xdf\x94\x0e\xbb\x4c\xb8\x4e\x37\x75\xe4\xc8\x58\xd4\xd8\x65\xd0\x27\x15\x67\xf2\xf5\x48\x5c\x3a\x05\xda\xf2\xcc\x4b\xb7\x4b\xda\xa5\x2f\xba\xd2\x3d\x98\x51\xec\xd4\xfa\x29\x81\xb2\xe0\x4c\xbc\x1e\x02\x1c\x7e\xa6\x40\x4d\xbb\xbe\x5b\x0f\x85\xf9\x2f\xde\x88\x1b\x94\xf8\x49\x83\x8d\xe2\x1c\x93\x3c\x92\xef\xd7\xa7\x3f\x4f\xe9\x6c\x8b\xb5\x2b\x3f\x3c\x6d\xe4\x14\x5a\xd0\xf0\x76\xec\xd3\xd4\xdf\x96\x18\x3d\x46\x03\xa8\xc3\x7a\xfc\x74\x2e\x76\xff\xc3\x79\xe9\xf3\xb2\xd1\x0d\x5b\xac\xdb\xfa\x42\xd1\xf3\x54\x81\xdc\x66\x91\x61\xca\xf5\x0c\xfb\x7a\x0f\x48\x7d\xd8\x80\xd9\xac\xea\xe2\xbe\xae\x36\x4a\x85\x08\xc2\xe2\x9e\x99\x58\x90\x35\xc5\x7a\xba\x23\x23\x0b\xaf\x1e\xdb\xb7\x85\xb2\xf8\xd9\x9d\xe2\xab\x8a\xc9\xe2\xa1\x3d\x2e\x9f\xa4\x6c\x1d\x8d\x76\xe2\x2f\x89\x38\x91\x90\x07\x61\x46\xf2\x12\x9f\x31\xf9\x78\xc6\x93\xef\x34\x17\x91\x5c\x7a\xad\x84\x3f\x65\x93\xf7\x9c\xc2\xa8\xd4\xa2\xe7\x33\x76\x56\x81\x03\x90\x5b\x8e\x5d\x18\x8b\x27\x38\xa1\x64\xb4\xd8\x44\x28\x54\xc2\xc1\xef\x2c\x97\x4e\xd5\x66\x3c\x2e\xa1\x89\xb5\x36\xc8\x5c\x7a\xb8\x89\x5c\x72\xaf\xc8\x80\xe4\x02\xaa\x1e\xd9\x31\x64\xcf\x40\x14\xa2\xf2\x63\x5a\x2f\x3a\xf8\xa0\xfe\x0c\x99\xc8\xa7\x94\xd1\xa5\x55\xf7\x84\x14\x16\xfb\xda\x27\x3d\xc2\x9e\xb7\xc1\xda\x33\x34\x5c\xa5\x58\xde\x31\x1c\x58\x94\x42\x94\x63\x2d\xf6\x29\x3c\xf9\xf3\x3e\x23\x54\xa8\xd4\xd9\x3f\xd3\x28\xf1\x72\x8c\x8f\x64\x53\x5c\x36\x45\x56\xd0\x4f\x01\x33\xb0\x06\x99\x18\xff\x7c\xec\x29\x0c\xed\xc2\xdd\x8a\xb9\xd6\xb6\x39\x2c\x07\x35\x12\x5d\x46\xf2\x56\x80\xf1\x1e\x53\x55\x99\xce\x6f\x09\xed\x6e\x6c\xd9\x45\x8b\xc5\x17\x18\xb6\x5c\x1c\x44\x41\xdc\x3f\x64\x72\x9c\x01\x1b\x21\xc8\x36\x9f\x7d\x48\xf2\xe4\x93\x7a\x8b\x58\xba\x29\xb2\x81\x8d\xf8\xe1\x4c\x18\xef\x22\x82\x81\x82\x75\x46\x97\x92\xca\xe0\x4a\xf0\x4f\xb5\x67\x41\x9f\xb5\x4d\x92\x35\xe7\xd5\xb2\x71\xef\x20\x91\x1b\x11\x7b\x9c\x2f\x5e\xcc\xfe\xf8\x3f\x34\x19\x65\xd0\x58\x75\x72\x59\x43\x18\xcb\x9e\xfb\xf8\xad\x15\x8f\xd4\xd4\x25\x38\xb2\x34\xd3\x8f\xba\xee\x69\x83\x81\xee\x79\x8b\x82\xd4\x32\x7f\x55\x85\xb2\x9b\x29\xef\x8c\x86\x6b\xe7\x0d\x91\x67\x39\x0e\xba\xb2\xba\x57\x41\xed\x85\x9d\x5a\xb3\x7c\x1f\x52\x68\xd8\xd3\xf3\xe2\x00\xeb\x10\x98\x45\xf7\xab\xf6\x4a\x51\xee\x2f\x98\x67\x91\x3e\xf4\xb4\xfb\xd5\xcf\x96\xab\x60\xbe\xaf\x1d\x84\x36\xb3\xc7\x11\x9c\x98\x47\xe5\xd7\xb8\xfa\xf5\xb9\xba\x26\x16\x6f\x67\xb6\x82\xa8\x60\x63\x04\x59\x13\xd0\xb0\xcd\x9d\xb8\x0f\xe7\x53\x87\x56\xac\x6d\x27\x8e\x5c\x77\x2a\x61\xee\x4e\x97\xf6\x7d\x04\x1d\x67\x51\x5a\x82\xaf\xe7\x3a\x6d\x17\x5d\x55\xb7\x91\x5c\xea\xea\xeb\x24\xe5\x8c\x5c\x1d\xec\xb6\xf4\x69\x42\x8d\xc5\xa9\x3a\x73\x65\xe5\x04\x13\xba\x2b\x95\xd8\xe3\x44\x00\xba\x8f\x76\xb0\x0e\xb8\xc2\x3a\xa2\x2b\x35\x37\xee\x28\x43\xcb\x63\x96\x9f\x83\xb3\xb5\xae\x78\x39\xdd\x59\x3c\x55\xd8\xa4\x2b\x1a\xbd\xbb\xd2\xb7\x44\x8d\x79\xa3\x75\x19\xd3\x38\xa6\xad\xa9\x3f\x63\x7f\x2a\x9d\xf9\x43\x32\xc6\xf8\x56\x41\x1c\x12\xf0\x62\xf4\xec\xb0\xfa\x4a\x9b\x1a\xff\x91\x80\x87\xf1\x5c\x66\x3c\x8d\xaf\xba\x3f\x6c\x65\xea\x8e\x01\xce\xe2\x49\x31\x58\xb6\xc1\x0e\x1a\xea\x21\x79\x89\xe2\x8e\xcb\xc2\xfb\xf2\x67\x30\x79\x76\xad\x63\x82\x35\xf4\xd6\x04\x40\x85\x95\xb7\xce\x96\xb6\xc6\x36\x2a\x6b\x38\x04\x21\x28\x58\x42\xe8\xd8\xea\x17\x85\x82\xfb\xf8\x2b\xfa\x92\xab\xc7\x93\x94\x62\x33\x83\xa2\xaf\x6c\xe3\x78\x67\x07\x55\x12\x10\x11\xc0\x08\xf8\xe7\xa0\x07\x29\x6b\xcd\xca\x3a\xe5\x50\x98\x11\x8c\xe2\x9f\x4a\x36\x3e\x38\x39\xce\x89\x48\xa2\x0f\xb9\x1e\x2d\xa3\xfc\x96\x11\x7e\x97\xa1\x59\x59\x19\x46\x2e\xc2\xc7\xfd\xf0\x10\x64\x04\xd9\xdf\x2d\xc4\xda\xb1\xce\x52\xd1\x77\xde\x8a\x9c\xa6\x23\xec\xb0\x4a\xcd\xf7\x89\x5b\x5f\x7f\x2b\x3c\xd1\xa7\xb8\x5e\xe1\xf5\xf8\x08\x3e\x91\xee\x08\xd7\x97\xe7\x39\xda\xaf\xb2\x52\x7e\x1f\x61\xec\x2c\x5d\x5c\x9d\x76\x74\x90\xb8\xef\x4f\x92\xff\x4f\x9f\x4e\xcc\xa0\x73\x1d\xaa\xfe\x7c\xde\xc9\x4d\xf3\x2e\xc9\xbf\x26\x81\xe0\xc1\x20\xe6\x53\xe4\xff\x04\xb2\x1f\x2b\x25\x51\xac\x6d\xcf\xf6\x8c\xce\x08\x3c\x9f\x4d\x69\x4c\xbb\x5d\x50\xef\xa9\x7d\x60\x8b\x95\xe0\x89\xaf\x94\x4f\x17\x7c\x1f\x02\x4e\x3d\x20\x81\x00\x48\xd3\xd0\x5b\xdf\xfc\x2f\x6c\x51\x3f\x10\xbb\x07\xc8\x7a\x9d\x11\x62\x7d\x0a\x7a\xac\x08\x25\xc0\x6d\xcf\x7f\x9d\x2f\xb8\xba\xbd\xe8\x43\x91\x0e\xfb\x2a\xe7\x21\xca\xc9\x3b\xfe\x30\x30\x85\xae\x6f\xd2\x10\x9e\x9e\xe9\xf6\x37\x68\xcb\x66\xd3\xe4\x20\xa5\x6c\x6b\x65\x1a\x6d\xdd\x2d\x1e\xbf\xee\x84\x56\x81\x1f\x74\x4e\x81\xf2\x45\x0b\xeb\xbd\xc1\xc8\x81\x26\x7e\x0c\x8b\xd2\xc1\xbf\x09\x43\x10\xf2\xaa\xab\xb2\x6d\x30\xa7\xca\xf2\xb4\xd1\x34\xb8\x47\x0e\x65\x4a\x15\xcc\x88\x3d\x5e\xe2\x00\xdb\xa7\x4c\xf5\x84\x97\x6e\xfc\x3e\x86\xad\x20\x0f\x75\x63\x0c\x69\x4e\xf1\xd9\x6f\x4a\x4f\x58\x47\x23\x1e\xa7\x4a\xfc\x19\x45\x5c\xb2\xc8\xb2\x4d\xc5\x28\x48\x4b\xbc\x7e\x2e\x9d\xce\x2f\x24\xf1\xd5\xd2\x83\xd3\x3a\x8a\x87\x41\x33\x86\xf2\xba\xcb\xd9\xeb\x7d\x20\x8b\x54\xfd\xfa\x29\x6d\x95\xee\x53\xa9\x56\x36\xb2\x6a\x86\xd6\x42\x58\x84\x5f\x3d\xc1\xca\x81\xe2\xf2\x53\xda\x83\xe3\x03\x66\xef\xb0\x55\x11\x5d\xdf\xa1\x8a\xb9\xed\xf9\x9f\x6f\xb0\x98\x4e\xb6\x0d\x96\x38\xef\x64\x98\xfa\xf6\x36\xa8\x5e\xdf\x0d\xb5\x6c\xde\x8f\x64\xcf\x05\xf8\x91\x68\xfa\x30\x42\xbd\x7b\x8e\x56\xa0\x19\xe9\xb7\x1e\x9c\x20\x2c\x41\x18\x6b\x61\x8d\x2b\xc2\x56\xba\xf7\x76\x7f\x4a\xa3\x7f\x48\x7f\xdc\x9c\x6f\xf4\x95\xda\x47\xbe\x50\x12\xea\xb1\xfd\xca\xeb\x30\x58\x96\x10\xca\xc7\x83\x38\x53\xa7\xc6\x5a\x2f\xea\x74\xb7\x44\xce\x58\x5f\xb7\xab\xb3\x27\x0b\x4b\x21\x54\x0c\x07\xe6\x22\xd6\x40\x67\xc9\x3f\xdf\xe2\x66\xfb\xe2\x01\xbd\xb3\x57\xf7\x5a\xcc\x35\x71\x77\xf8\xfc\xb4\x05\xd7\xba\xf4\xa7\x7e\x48\xe2\xd3\x04\xf8\x00\xa1\x48\xe4\x26\x9b\xe3\x8b\x62\x24\x0e\x19\xbc\x53\xb3\x98\x48\x6b\x98\x3b\xa4\xb5\xea\xe4\x73\x08\xb1\x6d\xb0\x8a\xde\xa9\xda\x5c\x48\x7a\x3f\x4e\x60\x49\x5d\xa8\xf9\x7b\x99\x5e\xf1\x34\xc9\x0f\x0c\x58\x99\x26\x60\x0c\xd6\x88\x99\x80\x85\x3a\xb4\xc1\x8f\xd0\x5c\x55\x04\x16\x7c\x24\xb2\x90\x0e\x67\x74\x3b\x1f\x91\x0d\xce\xf4\xf7\x07\xa9\x4a\x3a\x1a\xf3\x6d\x51\x55\x40\x2e\x24\x31\x89\xa4\x9c\x13\x62\x35\xce\x00\x3a\x5f\x09\x42\x90\x1e\x8e\x00\xec\x78\xd4\x89\x0c\xe4\x3a\xfc\x22\x2f\x49\x79\xbf\x34\x9c\x96\x36\x5b\xc0\x73\x1a\x49\xdf\x36\x40\x3e\x51\xb1\xa7\x01\x2e\x2f\x08\x4e\x89\xea\x59\x4e\x2d\xe0\xf6\x55\x5f\x12\x67\xad\xd9\xbe\xec\xba\xd0\xe0\xde\x72\x31\x31\x74\xc9\x70\xdd\xc1\x4d\x61\x51\xba\x81\xf1\x74\xcf\xa8\xe8\xa8\x22\x4f\xa7\xa6\x2c\x8d\x98\x56\xbc\x07\xa1\x29\xce\xad\x5d\x01\xbf\x6c\xd3\x29\xd9\x95\x06\x0f\x79\x58\xd0\x36\x53\x55\x6e\x43\x38\xdf\x61\xb7\xc7\x02\xb6\xb7\x86\xb0\x22\x85\x36\xd9\x1f\xaf\xed\x34\x8a\xa5\x6d\x13\x4b\x9e\xa2\x3d\xf2\x07\x0f\xb2\x81\x98\x77\x43\xe3\xe5\x7b\x7e\xf3\x28\x83\x81\x91\x76\xb5\x2d\x63\x24\x0c\x2d\x0f\xe5\x2f\x3c\xfe\x92\xa2\x35\xf6\xf2\xb1\x6a\x50\x05\x7b\x38\x71\xca\xd1\x9c\xbe\x2f\x6f\x3d\xdc\xc4\xc4\x9e\xda\x50\x9f\xe7\x7e\x12\xa9\xd4\x70\x6a\x4f\xf3\x25\x21\x65\x71\xe8\x28\x35\xd4\x47\xf5\xef\x12\x7d\x4a\xef\x0a\x2b\xe7\x26\x7d\xfc\x8c\x1c\xb4\xc0\x1d\x47\x5d\x27\x45\xac\x39\x69\x63\x19\x43\x58\x23\x58\xd8\x84\xed\x44\x83\x65\xc3\x2e\x58\x56\xbc\x46\x69\x6b\x94\xf1\x38\xb7\xaa\xd5\x33\x0b\xdb\x38\x79\x41\xdb\xdb\x65\x3a\x30\x56\x26\x5c\xea\x07\x2e\x4e\xe2\x69\x0a\x6f\x74\xc4\x00\x85\xbf\x14\x77\x1b\xd7\xcb\xd2\xe5\xb5\xb0\xf4\xf1\xdb\x05\x65\x4c\x4c\xbd\xe8\x19\xd0\xde\xbc\xaa\xc3\x21\xcf\x24\xcb\x78\x4c\x4a\xcc\x01\x8f\xed\x4b\x91\x1c\xaa\x2c\x19\xcf\xdc\x0e\x48\x30\x5b\x1e\x18\xcc\x9b\xfd\x9a\xf9\x83\x7a\x8f\x65\xf7\xc0\x10\xf2\x93\x58\xe6\xd7\xb6\xda\x11\x60\xb2\xa7\x6f\x0d\x32\x2c\x16\xcc\xda\xea\x50\x9e\xb6\xcd\xa9\x37\xdb\x4c\x5d\x65\x12\x7f\xb6\xd9\x99\x94\x0f\x39\x13\x90\xd6\x05\x3c\x7c\xf0\xc1\xa2\x11\xa7\x81\x19\xd0\x22\x0d\xfe\x95\xf7\x3e\x6a\x58\xd9\xd1\x79\x8f\x70\x2d\xf8\xab\x64\xad\x11\x78\x56\xbe\x15\x12\x8a\x30\x9c\x8e\xd7\xac\xbf\x66\xa6\x05\x62\x2a\x19\x5b\xa1\x5c\xe2\xe2\x66\x62\xdd\x26\x2c\x13\x0f\x22\x8d\x2d\xe1\x62\x6d\xd3\xb8\xb3\xea\x64\xa5\x19\x4b\x3f\xa2\x5e\xe5\xe9\xe4\xa7\x4e\x10\x64\xb1\xab\xe6\x1f\x32\x35\x80\xc0\x93\xc7\xf5\x9b\xc4\xc2\x92\x1a\x3a\x59\x58\x1e\xf0\xe0\x2a\x74\x6f\xf7\x4a\x75\x6e\x90\x95\x42\x5f\x3d\xb3\x53\x75\x82\x4f\x3d\x0a\x7e\x91\x86\x90\x84\xa5\x18\x20\x2a\xcf\xce\x3d\x9a\xe9\xff\x82\x18\x68\x16\x92\xa8\x2a\xb3\xea\x9c\xef\x5f\xab\xe5\x3c\x25\xd2\x09\xb4\x27\x81\x33\x2c\xf6\x42\x03\x2f\x3e\x39\x58\x7e\x6a\x8d\xd8\xea\xa9\xf1\x6e\x86\x91\xdf\x4b\xcd\x82\x42\x7e\xd0\xb4\x9c\xe4\xa1\x2b\x87\x27\x44\xd7\x39\x39\xcc\x81\x46\x09\xce\xcd\x9a\xca\xe1\x00\x99\xbf\xcc\x15\xc7\xbf\xbc\xfd\x9d\xae\xa1\xf6\x97\x56\xa8\xf8\x3a\x41\x8b\xbe\xb7\x7f\xf0\x11\x60\x81\xe2\x3a\xfb\xb2\x4f\xa0\x6c\xa0\x51\xce\xe2\x9b\x0d\x90\xd7\x8b\x45\xd6\x2b\x7b\x19\xcd\x39\x5b\xdc\xda\x38\xc3\x15\x5d\xb0\xb7\xf0\x9d\x17\xdd\x1a\xae\x1f\x08\xdf\xbb\xfb\xc1\xcf\xbc\x03\x23\x68\x3b\xbd\xfd\xa0\x31\x83\x3c\x0e\xb6\x4a\x78\x76\xd6\xd6\x8f\x1b\x24\xd8\x7c\x56\x6f\x80\xfe\x15\xf9\x54\x45\x2a\x48\x1f\xef\xe6\x49\xec\x9a\x9e\xbc\xeb\x2c\xb2\x0f\x79\x79\xc4\x8f\xf5\xeb\xcf\x20\xf7\xf4\xe6\xfe\x60\x0d\x46\x93\xcc\x8f\x32\xf3\x4f\xac\x6c\xc3\xdb\xc1\x09\xb8\x72\x7a\xe8\x7b\xbe\x1c\x7e\x88\x16\xe1\xc6\xce\x74\xb8\xfc\x0c\xae\xdd\xfc\x20\x1f\x4c\x9d\xe1\x67\x08\x32\xd8\xc1\xcd\x96\x55\x78\xff\x78\x4d\xdc\x5f\x94\x85\xfb\x87\xca\x6b\xb0\xe7\x89\x62\xdf\x27\xf3\x8f\x72\x40\x45\x48\xfb\x5b\xab\x17\x65\xeb\xad\xbc\xc9\xf9\x5e\xa3\xbc\x20\x0e\xeb\x5c\xe5\x90\x53\x4d\x43\x3f\xc9\x87\xb8\x8f\xbc\x65\x91\x57\xe7\x4a\xf7\x9a\x14\x50\x14\xf4\x93\xc4\x89\x02\x0b\xd5\x57\x70\xf8\x0d\x2d\xb4\x79\x67\x0f\xe5\xfe\xfd\xde\xae\x97\xdd\xc7\xbe\x28\x0d\x66\x18\xb7\x07\x2f\xde\x9b\x3a\x0f\xb6\xe9\xb9\x7f\x82\x75\x12\x79\x58\x60\x2b\x79\x50\x1d\xd8\xde\xa3\x43\x9e\xff\x82\x17\xb6\xb7\x41\x2d\x63\xc5\x08\x5b\xdf\x94\x38\x6f\x71\xc7\x54\x71\x79\xe7\x8f\x7f\x8c\xfb\xae\x71\xc7\x76\x04\x9f\x8c\xda\x56\x6f\xcb\x6a\x8f\x14\xc6\xe0\x6f\x8d\x0f\xef\x7b\x58\x72\xaa\x58\xf7\x10\x4f\x8c\x42\xb3\x6f\x8c\x37\x2d\x91\xd3\x6f\x5c\x34\xbd\x19\x05\x62\x79\x66\xa4\xea\x0d\xe1\x49\x98\x53\xaf\xe8\x7c\x66\x37\x34\xcb\x29\x27\x8b\xa5\xc0\x58\x47\xb7\x72\xa2\x17\x74\xda\x55\xa0\x34\x2d\xd1\xcd\xa2\x94\x7d\x6a\xe9\x17\x66\x17\xb9\x72\xb8\x58\x8b\x53\x38\x6b\xa7\x74\xbb\x49\x0b\xa3\x29\x03\x5f\x28\xa1\x73\x0e\xf0\x93\x8b\x12\x80\x55\x66\x3f\xc4\x3b\xf6\x4a\x73\xe5\x9b\x72\x36\xdd\x62\xa2\xe7\x42\x5f\xdc\xb0\xf5\x02\x34\x78\xf7\x12\x20\xdf\xee\x32\x26\xef\x78\x73\xe7\x82\x42\xb2\xd7\xb0\x46\x4f\x55\x1d\xb6\xa9\xd5\x10\x67\xd3\xc0\x9a\xc0\xff\xf6\x17\x96\x3c\x58\x13\x3d\xae\x2d\xc8\x5f\xd7\xca\xf1\xe1\xb8\xb8\x86\x71\x5c\xa0\x53\x23\xf8\x14\x09\x57\x42\x91\xd6\x5f\x3a\x49\x0f\x06\x2f\xad\xa3\x3e\xf7\x81\x6c\x5f\x9d\xe8\x71\x36\x90\xce\x84\x34\x0c\x5a\xbb\x8d\x08\x83\xbe\x0e\x4a\x61\xc9\xcb\x32\x16\xca\xd1\xe2\x64\x81\xe5\xd8\x48\x29\x3f\x20\x86\xbe\xb2\x23\x82\x63\x81\xf6\xf5\x50\xf9\xc9\x94\xe0\x85\xd3\xe3\x14\x07\x40\x64\x98\xb4\x36\x7a\x37\x29\x7c\x45\xd4\x08\x66\xe7\x27\x8e\xc5\xe7\x30\xcf\x40\x42\x78\x44\x24\x64\xaf\xff\xee\xa2\x92\x61\xe6\x1c\xa6\x1b\x80\xa4\x85\x19\x08\x5a\xad\x34\x5c\xac\x01\x3a\x13\xfb\xaa\x03\x55\x42\x59\x6d\x2c\xc2\x13\x1e\xf0\x6e\x03\x7a\x9f\x24\x38\x2e\xa5\x1c\xf8\x25\x3c\xf0\x89\x2a\xff\x12\xac\x14\x3b\xee\xf9\x40\xf5\x75\xe3\x4a\xdb\x31\xd6\x75\xf8\x63\x20\x49\x0c\x4b\xa1\xbc\x98\x43\xb1\x99\x44\x65\xdd\xd9\x24\x49\x14\x38\x4c\x24\xb6\x53\xb1\x4f\x3c\x74\x7c\x16\xf3\x6c\x48\x9f\xa0\xd8\x61\x7e\xc5\x80\xb4\xb6\x7f\x87\x2c\x45\x0f\xd7\x25\x2e\x32\xc0\x39\x5c\xb8\xdc\x03\x39\x93\xf2\xfd\x79\x76\x60\xf2\x91\xfe\x8d\x7e\x2b\xf2\x0e\x12\x10\xe6\x70\x64\xa4\x07\x50\x1e\xe1\x4b\xad\xac\xa6\x31\xad\xbb\x90\xef\xf1\x96\xf9\x49\x2f\x12\x4b\xf7\x6b\x71\x9b\x25\xef\x08\x6c\xfe\x06\x3b\xa2\x28\x11\xed\xd6\x4b\xb7\xf4\xe9\xc8\xf9\x70\xf1\x95\x9d\xad\x4e\x94\xb8\x54\x1d\x31\xd0\x25\x8f\xde\x49\x5a\xc8\xb3\xdc\x4f\x7e\x28\x90\xa0\x4b\xa1\x67\x01\x62\x77\xc8\x13\x31\xcb\x9e\x93\x54\xe9\x1e\xa4\xbe\xf1\xab\xed\xc6\x2d\x15\x5f\xbd\x9f\xf6\xbc\x93\x33\xe1\x72\x17\xad\xec\x39\x8e\x91\x18\x62\x71\xd9\x3a\x86\x5e\xe8\x90\xff\x07\xa5\x92\xf5\xf1\xb1\x6e\x6d\x51\x82\x7a\x0e\x26\xa4\xfb\x71\x62\x6d\x96\x1d\x31\x7c\x3d\x44\xc7\xb2\x87\xaa\x11\x35\xec\x14\xde\x0d\x8b\xc0\xef\xca\x89\xb3\x2b\x72\xd3\x8e\xdc\xb2\xb2\xaf\x26\x0f\xce\xda\xfc\x8a\xb1\x6a\x2d\xbe\x1f\x60\x40\xea\x8a\x2d\x96\xb3\x47\x08\x65\x40\x60\xa6\xce\xa7\x48\x7d\xd3\x79\x39\xa4\x97\x51\xfb\x22\x63\xb0\x92\xea\x72\x9a\xc9\xc0\x50\xab\x93\xf7\x9d\xc7\xcf\x24\x22\x9e\x18\x3e\xa0\xb3\xc9\x5a\x03\x33\x41\x89\xca\x02\x76\xa3\x7d\xad\x32\x0d\xeb\xd6\x09\x13\xdd\xbf\x64\xfe\x11\x47\xc1\xdc\x19\x6b\x22\x8b\xb9\x49\x88\x76\xc3\x8b\x60\x65\xe0\x9a\x8c\xb2\x92\x26\x65\x4d\x87\x97\xc3\x17\xcd\xb9\xbf\x04\xb8\xc0\x42\x71\x4e\x5c\xb3\x81\x7c\xc9\xd5\x95\x2d\x44\x87\xcb\xb9\x6b\x64\xe7\xe2\xea\x6d\x12\x6b\x01\xb1\xcd\x7e\x89\x35\x3c\x32\xd6\x24\x62\xdb\x32\x8a\x02\x0e\xbb\x11\x7d\x89\x8b\x80\x8f\xfd\x72\x88\xdc\x76\x0c\x36\xdc\x2c\x8c\x49\xf9\x43\x8a\xbb\x82\xea\xe2\x0d\x4b\x34\xf7\x44\x74\xeb\x1e\xea\x05\x3b\x16\x62\x15\x90\xae\xc4\x86\xdd\xd7\x72\x3b\x91\x12\x37\x8c\x46\x8e\xa0\xc3\x6d\xaf\xa0\x06\xec\xcb\x18\xba\x94\x58\xc5\xaf\x3b\x03\x6f\x01\x86\x9c\xd8\x79\xe0\xca\x91\x0d\xb7\x78\xc7\x17\xc7\x66\xbd\xcc\xb5\x64\x70\xe6\xfc\x01\x43\x2e\xae\x43\xa6\x4c\xcc\xfc\x3e\x55\xdf\xa0\xbd\xde\x5c\x0e\x51\x8f\xc8\x1e\x46\x3d\x00\x53\x02\x34\x04\x51\xf5\x3e\xf1\x86\x7e\xd2\xd7\x38\xb1\xe4\x60\x53\xaf\x29\xb3\xae\xbf\xbf\x12\xe9\x6e\x47\xa6\xff\x0a\x92\x94\xb1\x12\xe3\x7c\x5b\x25\xe7\xdc\x1a\x56\x77\x81\xdd\x01\xc1\x94\xeb\xf2\x4b\x80\xbc\xdd\x9d\x28\x11\x80\xd8\xb6\x47\xe0\xdb\xf2\xf9\xa2\x89\x0e\xe5\x08\x3a\x9b\xdd\xb5\xaf\x80\x4a\x81\xce\x16\x73\x1d\x87\x2d\xf0\x4c\x44\x6c\x8b\xb7\xae\xf1\xd6\x1c\x01\xff\x14\x4a\x99\x32\xda\xe2\xf3\xad\x5b\x86\x3a\x8d\xc7\xc0\x7c\x6f\xfa\x70\x51\x0d\xad\x77\x0f\xa6\xdb\x60\x19\x55\x97\x46\x86\xdb\x2a\x0e\x9d\x5e\x0a\xad\xf3\x7d\x08\xc8\x1b\x28\x74\xd9\x49\x99\x59\x5c\xbf\x40\x89\xc1\x8b\xa5\xe3\x63\x0e\xb3\xd0\x99\x81\x38\x74\xc0\x2c\x88\x56\x96\x81\x06\x24\xc5\x66\x47\xbd\x63\x69\x2b\xcf\x2e\x4c\x8c\xc0\xb3\xd3\x8f\x8b\x65\x6c\xfe\x9d\xf9\x8b\x64\xb4\x67\xfb\x02\xf6\x85\xa0\xe6\xab\xef\xc8\x29\xb2\xbd\x3d\x57\x72\x5b\x4e\x03\xd0\x8f\x0c\x1d\x30\x82\x90\xcc\xbb\x8e\xe1\xa4\xb1\x17\xc4\x3b\x6e\xf8\x94\xb7\x65\xd3\xb2\x1b\x43\x0f\xec\x37\x24\x20\xcf\xc2\xa5\xfc\x31\x98\xa5\xa6\x6f\xe1\xe4\xd5\x1e\x00\x83\x1d\x80\x81\xc0\xeb\xac\xbc\x7a\x61\xce\xe7\x34\xbc\x4a\xbf\x9b\xe4\x33\xd5\xba\x1d\x8c\xa2\xca\xc7\xbb\x58\xed\xb6\xc0\xe6\x67\xf2\x8e\x4a\x5c\x34\xc3\xf2\x59\x3c\x5d\x29\x5c\x12\xf6\xaa\xa9\x84\x08\xbd\x77\x18\xc7\xa4\xe9\x13\xc4\xbc\xb9\x24\x6d\x7d\x33\x9d\x37\x61\xce\x5a\xc1\xe5\xa5\x25\xd6\x0e\xa9\x1c\xfd\x01\xa9\x17\x5d\xf7\xc7\x6e\x30\xa7\x26\x05\x9e\x71\xdb\xdf\xab\x23\x7d\x36\xc8\x07\xe6\x87\x8c\x67\xc8\x8d\xb2\x9b\x8a\xa4\x2a\xeb\xa1\xfc\x60\x08\x80\x91\xa9\x07\x79\xdf\x37\xf7\xe1\x87\x68\x49\xf7\x67\xfb\x39\xf2\x73\x62\x3d\xb3\xca\x33\xa1\xb2\xb9\xa8\x72\xb1\x69\x69\x73\x73\xd9\xda\x00\x32\xe3\xb2\x7c\xa8\x15\xba\x0d\x17\x67\x35\xdb\x90\x6d\x2f\x19\xfd\xf0\xad\x19\x0f\x11\xed\xea\xdc\xdb\xc3\x93\x6b\x95\x63\x7d\xf1\x75\x53\xfb\x64\x93\x44\x66\x1f\x5f\x3f\xc0\x59\x22\x2d\xcd\xae\xb5\x82\x91\xe2\x39\x95\xda\xc7\x0f\xaf\xba\x3e\x7a\x4d\x7c\x40\x19\xdf\x38\x97\x2f\x33\xe4\xd5\xca\xd6\xa5\x37\x61\x2a\x03\xba\xce\x17\x8f\xaf\xd2\xe8\xc2\xd3\x7e\x73\x75\xcd\x5d\x21\xd9\x58\x5b\x5d\xf8\x03\xb7\x75\xe6\x2e\x87\x73\xf3\x84\x26\x18\xd6\xea\x33\x5f\x63\xc8\x64\x6d\xf9\xf7\xb4\x67\xe6\xb6\xa8\x51\xb3\x92\x03\x5d\x94\x5f\xaf\xb1\x75\x7b\xe2\x58\x6f\xc9\xef\xf3\x05\xc8\x8e\xd4\x67\x50\xff\xc6\x6f\xfd\x51\x67\xee\x71\xb8\xb4\xb0\xca\x5e\x90\xbb\x17\xab\x4b\x20\xbc\xae\xa1\x2a\xb2\xe5\xa6\x54\xc6\x21\xca\xec\x73\x98\x02\xa5\x60\xf7\xe9\x57\xce\x52\xaf\xc4\x58\xae\x07\xcf\xc2\xdb\xdd\x72\x03\x56\xaf\x3a\xe4\xd0\x10\x29\xf9\x5c\x6f\xf3\x85\x09\xbe\x62\xc3\x2f\x39\x86\x58\xde\xe4\xdb\xd6\xa0\x57\x2a\x21\xda\xa0\x48\xc8\xa3\xa1\x37\xec\x41\x37\xd6\x4b\x83\x98\x9f\x80\x19\x7a\x96\x8f\x4e\xa9\xf5\x34\x58\x0a\xf2\xb1\xf0\x88\xbf\x93\xcf\x56\x90\xdc\x07\x15\x0d\x3b\xc4\xbd\x87\x7c\x1f\x84\xdc\x19\xda\xd3\xf2\xaa\x91\x07\xc9\x46\xad\xd2\xd1\xa5\xc7\x61\x8a\x81\x98\x53\x93\x34\xa3\xcb\x9f\xb9\x91\x64\x30\xeb\x07\x61\x5b\xea\x64\x0e\x79\xf5\xa8\x06\xb9\x9e\xd2\x57\xb0\x80\xbc\xa6\x93\xd8\xeb\x15\x69\xeb\x10\x39\xe4\x7a\xc6\xc4\x9d\x3c\xc2\x07\x49\x84\xf4\xfe\x91\x93\x0d\x6b\x96\xb4\xf7\x91\x14\x87\x5f\x51\xee\xe1\x59\xc1\x9d\xaa\x85\x9b\x62\x10\xf0\xca\xde\x8e\x90\x40\x61\x23\x5c\x79\x2a\x65\x70\xcd\x41\xa7\x10\xad\x50\x61\x5f\xb1\xad\x20\x0c\x86\xfd\x0c\x98\x84\x98\xed\x2f\xdf\x6c\xc2\xaf\x03\x9b\x74\xe3\x13\x7c\x32\x18\xdf\x33\x56\x82\x1c\xfa\x83\x43\xe7\x9a\xa1\x76\x13\xe9\x37\x9a\x42\x4f\x03\xdc\xa4\x49\xbd\xd3\x49\xb6\x77\xde\xef\x38\x05\xbc\x01\x64\x43\x17\xd3\x5b\x9d\x20\xeb\xd2\x4f\xda\x2f\x61\xa8\xd5\xd1\x00\x1b\xa3\xec\x57\x45\x7f\x20\xb2\xa3\x36\x48\xe8\x6b\xf9\xe8\xf1\xb6\xe9\xe9\xa9\x2d\xcf\x11\x10\x94\x95\xc1\x58\xbf\xb7\xec\xe2\x10\x6e\x36\x8f\xa7\x61\xb6\xc7\xaf\x34\x15\x1e\x50\x26\x29\x6c\xac\x65\x3a\xa0\xb0\x11\x0e\x41\xae\x26\x2b\xce\x23\x4d\x23\xd7\xe9\x14\x00\x16\xc4\xc3\x39\x5e\x9d\xaa\x4c\xe1\xca\xc1\x6c\x85\xe0\xb6\x1c\x8a\x6c\x02\x86\x94\xf4\x41\xf2\x61\x9d\x85\xae\xce\x3b\xf8\x89\xb0\x29\x97\x42\xfb\xc9\xc4\x9d\x04\x13\xb1\x9d\x70\xfb\xf8\x4b\x61\xca\x34\x2e\x6b\x85\xcc\x5b\xe3\x15\x7b\x5f\xdc\x4e\x3d\x7e\x41\xde\x85\x63\xbf\xbf\x64\x89\xf4\x7c\x06\x47\xf1\xeb\x67\x1f\xdb\x96\xea\x37\xc1\x54\xbc\x18\x43\x74\xe7\x41\xfe\x21\x8d\x80\x64\x5c\x2f\x83\x1f\x94\xb1\xf4\x3a\xb4\xf0\x17\x86\xcf\x93\x93\xed\x35\x6b\x4f\xf8\x05\x65\x7a\x25\x31\xe6\xf2\x80\x8d\x02\xaf\xd8\x8a\x74\xf8\x0e\x4a\x22\x54\x7b\x38\xf5\x80\x8c\xe7\x12\x64\xc4\xa8\x4b\x63\xbd\xb1\x3a\x0d\x61\x21\x62\x0c\x47\x4a\x60\x79\xf0\xcb\xb7\x11\xa7\xd0\x27\x41\x8b\x7a\xfd\x1e\x01\x76\xaf\x2b\x00\x8d\xb8\x32\xd5\xb7\xc4\xe7\xae\xa3\x78\x8d\x2b\x7b\x14\x8f\xed\xa2\xc3\x28\x9c\x67\xa6\x5b\x35\xa5\x91\x3d\x6c\xf9\x47\x1b\xa3\x8f\x68\x75\x0e\x94\x90\xf1\x88\xc7\xf6\xc3\xbf\x1e\xb7\xcf\x51\x65\x2b\x6e\x27\x69\x7c\x16\x4e\x95\x26\xfd\x6d\x81\xf4\x08\x56\x63\xb0\x18\xcf\x03\xc9\x00\x7b\x71\xf7\x3e\xca\xf2\x3e\xea\x8a\x3d\x54\x57\xc4\xdc\x86\x9d\x71\xf1\x4c\x74\xa4\x52\x77\xb0\x17\xf5\x65\x9b\x2f\xbb\x87\x83\xd1\xa6\xba\x82\xcd\xca\x05\x9f\x06\x6f\x31\xae\x39\xe2\x25\x8c\x34\xb1\x18\x79\xa3\xf8\x9d\xbb\x48\xf2\xe2\x7c\xae\x9f\x44\x53\xa5\x60\xb3\xdb\xd9\x5f\x5d\x21\xcb\xc9\x9e\xbe\x44\xab\xa7\x21\xd1\x67\xbb\x20\x73\x0e\xfa\xe5\x93\x7e\xa8\xa5\xf3\xc9\x15\xc8\xcb\xdb\x16\x93\x66\x52\x99\x17\x71\x1b\xd1\xc5\x84\xbd\x12\x66\x9a\x41\x1f\x84\x0d\x53\x1f\x64\x9e\x92\xa8\xd7\x51\x7e\xb0\x52\xa1\xc4\xb9\xfc\xf4\x3e\x7c\x2d\x97\xe8\x24\xcb\x79\xd3\xdc\x96\x33\x9c\x96\xce\x5e\x5c\x47\xb7\x02\xd5\x4d\x84\x6c\x91\xb3\x64\x8b\x5a\x4f\xcd\x97\x52\xfc\x96\x5d\x00\x51\x09\x8f\x19\x6b\x60\x62\xaf\xd1\x65\x29\x5f\x35\x22\x6b\x00\xac\xe2\x51\x66\xd1\x7b\xe1\xec\x72\x01\x55\xa7\x7b\x90\x55\x29\xb7\xa3\xb1\x97\x62\x2a\x22\x1d\x8d\x62\x5e\xe2\xe9\xe4\x8f\x8d\x66\x64\x62\xda\x05\x09\x1c\x15\x44\xfc\x79\x68\x1e\x7c\x8d\x64\x71\xbc\x84\x8f\xde\x4b\x46\xdd\x84\xf3\x5a\x30\x29\xfb\x6a\xb9\xb4\x77\xe9\x7f\x1e\x6c\x10\x82\xf3\xc8\xfa\x39\xcf\x1d\x97\x9f\xe5\xc7\xf9\x7d\x97\x5d\xe9\x94\x3d\x4a\xcb\xec\x97\x71\x3e\xc9\xf8\x9f\xcf\xd3\xfe\xf7\x00\xce\xf5\xc1\x4d\x24\x9a\xd1\x23\x9b\x76\x0e\x34\xaf\x16\xfb\xf2\x26\xe8\x87\x33\x13\xdf\xe2\x8a\x5e\x0c\xed\xe6\xf0\x2f\x2e\x7f\x61\x84\xee\x4c\x73\x71\x2d\xcb\xd6\x90\xc2\x24\x3a\x66\xf5\x48\xf2\x96\xf8\x94\x47\x6a\x82\x8d\x19\x96\x4a\x5b\x27\xee\x22\xfd\xc0\x66\xfc\x84\x32\x61\x02\x91\x55\x91\x8b\x8e\x01\x25\x53\xb4\xd5\xd2\x07\x9c\x9a\x78\x02\xb4\x6a\x06\x9c\xc4\x1e\x78\x96\xe8\xe6\xa2\xc5\xcf\x59\x0e\xb1\xf3\xfc\x12\xdb\xd1\x0e\x12\x01\x93\xba\x88\x02\xfc\xfc\x28\xee\x32\x13\x56\xbb\x57\x4e\x24\x6a\x99\x31\x79\xa5\x12\xf9\xb5\x2d\xb0\x28\x89\x64\xcb\xe9\xa8\xef\x9e\xb4\x67\x02\xe6\x8e\x0c\xcc\x97\x78\x95\x3d\x2f\x65\x68\x33\xcd\x63\x7e\x89\xc7\x38\x76\x32\xfc\x7e\x38\xd3\x12\xfe\xa2\x72\x7a\x02\x8c\x17\x2e\x79\x7e\x98\xf3\x74\x69\xaa\x95\x43\x03\xdd\x05\xa9\x78\x35\xf3\x52\xc0\x2b\x4f\xb7\x19\xd4\x46\x52\x25\x97\x8d\x66\xd4\x0b\x94\x80\xcf\xad\xfb\x4d\xb4\x82\xb0\xce\xe9\xa2\xcb\x82\xbf\x96\xfe\x8d\xa4\x20\xcc\x18\x08\xf0\x4b\xf0\x23\xed\x67\xdd\xe9\x6f\x85\xb5\x6a\xbf\xd5\xa8\xad\x27\x86\x10\x5a\x45\xfe\x4f\x7b\x22\xf1\x72\xfb\xc3\xd8\xc0\x90\x55\xcf\x19\x8e\x20\xca\x08\xed\xee\xff\xd3\x3c\x12\xc4\x4d\x00\xba\xab\x51\x14\x34\x53\xca\x16\x44\xcf\xc5\x85\xed\x16\x35\x34\x5d\x46\xe4\x78\x22\xc6\x76\xa3\x31\xb2\x7c\xb6\xba\x58\x4d\x4f\x3a\x4e\xea\xad\xe3\x12\x26\x5a\x58\xf3\xd5\x31\xdd\xa7\xcc\x3a\x49\x57\x47\x8f\x2f\xb5\x2c\xda\x1d\xae\xc9\xd9\x5c\x78\xb3\x59\x50\x6d\xa5\xae\x58\x98\x9c\x4e\x50\xde\xfd\x66\x9a\xce\x5f\x8e\x56\xd3\xe9\xc6\x0b\x34\x5c\x2a\xd2\xa5\x9d\x4a\x65\x6f\xce\xdc\x3c\xbd\x93\x86\x30\x45\x96\xaa\xbb\xc5\x96\x9c\x91\x4e\xf3\xa0\x27\xd5\xb8\xf2\x01\x99\x8a\xcf\x99\xa7\x89\xf8\xd4\xf0\xa1\x0a\x47\x29\x9b\x53\xc7\x9f\xe2\x1c\x4e\x75\xb4\x88\x81\xfe\xf0\x2f\xc2\xc8\x3b\x68\xb7\x18\x90\xf0\x98\x22\xbb\x12\x22\xd3\x7c\xb7\x1d\x7f\xc4\xbb\x7d\x26\x32\xb9\x4b\x84\x07\x24\x47\xb9\x63\x78\x70\x3d\x77\xe2\x0b\xa6\xbc\x2e\x3a\xa8\x54\xa5\x5e\x47\xb8\xf9\x1f\x96\xb9\x66\x3a\xe5\xf4\x8c\xad\xb4\x17\x27\x64\x4d\x64\x8a\xa6\xbf\xcc\x45\x02\xf7\xb3\xf2\x6f\x22\x24\xd2\x23\xe7\xa9\xc6\xf5\xb7\xd6\xf3\x26\x76\xda\x1f\x3c\x8d\x62\x87\x92\xf1\xc9\xfd\x7d\x71\x32\xee\x94\x3a\x5d\x1a\x15\x1a\x5e\xe7\x87\x9c\xc3\x97\x4f\x29\x9d\xae\x09\xb0\x1c\x5f\xac\xca\xed\x2f\x24\x94\xac\xb6\xbe\x90\x68\x7a\xb6\xf1\x2a\x9f\x22\x25\xa1\x57\xd9\xcb\xdb\x69\xa3\xb0\xcd\xd2\xb2\xd3\x0b\x06\x68\x9d\x07\xb9\x6e\x5b\x87\xd9\xbf\xc6\x4e\x38\xcb\x57\x7e\x13\x8a\x8a\x07\xd3\x1f\xd5\x57\xd6\x7a\xdd\x2b\x41\x36\x95\xfb\xd2\xf9\x0c\xd8\xcc\x2b\x01\x4a\x34\x17\x56\x59\x5f\x18\x2a\x82\x05\xaa\xd7\x37\xf9\x6b\xd9\x24\x5d\xa7\x07\x36\x0b\x95\x13\x7e\xcf\x59\xeb\xa1\xbf\xe7\x44\xe6\xd8\xef\x89\xf5\x42\xe7\x62\x16\x08\x11\x93\x09\xf0\xeb\xba\x02\x7e\xa0\xf5\x5a\x3e\x94\xfd\x66\x58\x87\x23\x48\x34\x5c\xfe\x4d\x21\xe5\xf2\xeb\xea\xc5\x22\x27\xee\x96\x39\x4c\xe3\x4d\xb1\xec\xc5\xf2\xab\xf4\x4a\x91\x2d\x85\xff\x54\x33\x96\x73\xf4\x9c\x8e\x36\x82\x53\x94\xdc\x22\xea\x5e\x72\x37\xd6\xdb\xce\x60\x5a\x56\x6f\x01\x5f\xb9\x44\x95\x44\x4c\x46\xb4\xc2\xa8\xd7\x5b\x0e\x40\xcc\xd5\x78\xea\xb8\x0d\xb5\xa4\xaa\x59\x3f\x86\x90\xd9\x78\x75\xaf\x6b\x0c\x4e\xce\x3c\xce\x0a\x48\xf5\xf6\x54\xa3\x18\xbc\xa3\x51\xdf\xe3\x75\xb9\x71\x09\xd9\x09\x7e\xff\x72\x62\x21\x45\x7f\x86\x5b\x7d\xc5\xb9\x3e\xa7\x60\x86\x3e\x6d\x24\xf9\xa2\x52\x3e\xe5\x39\x61\x11\x4d\x0c\xdd\xa2\x2b\xde\x76\x83\xdd\xf7\xaf\x76\x3e\xd4\x28\x8f\x9b\xca\x19\x3c\x55\xc0\x74\x1f\x37\xab\x53\x5f\xd3\x86\x8e\x1c\xc2\xe6\xfe\x7a\x68\x24\x59\xaa\x72\x86\xcd\x99\xcd\x92\xdd\x02\xd7\xce\x96\x97\x6a\x46\x7e\x7e\x44\x28\xe5\x0f\x7a\x79\x05\xe5\xb3\x1c\x82\xf6\x8e\xb3\xe8\x16\x7a\x01\x73\xa8\xb1\xd2\x3f\xe5\xce\x2a\x06\x28\x24\x73\xea\xdb\xe6\x9b\x91\x3b\x4b\x6b\xc1\x1b\x40\xde\x7e\x5d\x2c\x6b\xce\x77\xce\x3d\xda\xdf\x2f\xc2\xb0\x93\x44\x99\x05\x89\x30\xea\x67\xd9\xba\xd0\x97\xcf\x04\xc7\x34\x37\xe2\x86\x62\x3c\xae\x0b\x7c\xc3\x85\x07\x67\xa5\x5a\xd6\x70\xf5\x76\x4e\x22\x49\xc2\x58\xcc\x86\x40\x12\x2b\xe9\x32\x86\xcf\x1f\xce\xbf\xbf\x44\xdf\xb0\x9d\xec\x48\x87\x9e\x47\x41\x1b\xa6\xe5\x11\x68\x08\x19\x42\x0e\x70\xc0\xb8\xc1\xd7\x9a\xc6\x0b\xfa\xee\x33\xd6\x0a\xaa\x06\x5c\x61\x4f\xd6\xa7\x5d\x51\x3e\xf6\x83\x56\xe1\x2d\xcb\x25\x17\x92\x3b\x89\x45\x1d\x50\xb5\xb0\xa7\x59\xd8\x08\xf0\xd3\xab\x9a\x31\x68\x14\x67\xe4\x5c\xce\xa9\xd0\x13\x8a\x05\xa2\x6b\x3f\xb5\x5a\x3d\x94\x39\x57\x07\x18\x34\xfa\x71\xa3\xd7\xd9\x14\x0e\x63\x98\x7a\xe9\x23\x5d\x5c\x42\x15\x16\x87\x18\xfb\x7d\x01\x88\x31\xdd\xd0\xc6\x57\x50\x59\x83\x97\xea\x2d\x1b\x5f\xfc\x07\x03\x2a\x66\x64\x60\xd7\xb5\xac\x47\xe5\xa1\xc6\xd9\x88\x88\xe9\x23\x64\x93\xe5\xbd\x65\x2d\x65\x6e\x03\x4a\x71\xc3\x18\xd6\x40\x4e\x44\x4d\xfb\xfd\x31\xa9\x73\xfe\x1b\xe2\xf4\x61\x65\x77\xb0\x09\x78\x65\x89\x0f\xd0\xf0\xf3\x2f\x02\xb9\x5e\x84\x55\xbb\x66\x9b\x38\xaa\x8b\x24\x88\x06\x22\xf6\x87\xf4\x35\x01\xb4\x09\x3b\x94\x47\x48\xe9\x4d\xae\x8a\xcf\xc9\xf4\xa0\x62\xcd\x50\x59\xb9\x29\xf5\x32\x44\xc3\xbf\x07\x98\xac\x5e\x1b\x01\x67\x55\xb4\xd1\xd3\xaf\x72\x7f\xce\x58\x80\x61\x34\xbb\x29\xd8\x39\xf1\x05\x94\x01\xc8\x82\x3d\x0e\xf7\x26\xdb\x2a\x25\xc0\xc2\x73\xad\xcc\xd6\x1b\x1d\xeb\xe4\xd6\xd8\x3f\xee\x87\x1c\xcf\x44\xa6\xec\x61\xe3\x0d\x3e\x28\xf7\x0a\x36\x6e\xc1\x70\xd3\x52\xb1\x3a\x8c\x08\x5f\xb5\x54\xd6\x4a\x5f\x84\xdb\xec\x4b\x2f\xca\x27\x68\x47\x9e\x15\xf4\x94\xa4\xd0\xb7\x4e\x4c\xd7\x7b\xac\x01\x6a\x4f\x4e\x73\x84\xf1\x95\xf2\x4a\xed\x3d\x05\x7b\x54\xaf\x34\x9e\xc4\xd8\x84\xd8\x86\xc3\x53\xc8\xcb\xcd\x34\x3f\xcb\x37\x56\xd6\x62\xa7\xde\x2a\xfd\x03\x8f\x25\x09\xfc\xd8\xd7\x4a\x5e\x7f\x5b\xcb\xf6\x2e\xcd\x5d\xa3\x87\xd3\x56\xb1\x6e\xc5\xa8\x87\xa4\x08\x99\xc1\x69\x7b\x82\xe2\x35\x31\xee\x62\xe1\xd5\xe2\x36\x68\xb5\xc9\x6d\xc3\xe1\xba\x16\x56\x6c\x96\xb8\xc8\x50\xbf\xb7\x34\xd6\x4f\x16\x25\xb0\x41\x4c\x59\xea\xd4\xc6\x6f\x8f\x3c\x59\xab\x01\xbd\x4d\x5b\x43\xb7\xb7\xe7\xe4\xb2\x07\x54\xd3\xe7\x0e\x4f\x67\x1d\x4c\x14\x62\x43\x83\xf3\xd5\x27\x1d\x6e\xc8\x62\x75\xb2\xac\x07\x3d\x12\xc3\xe7\x66\x39\xb5\xb7\x29\x54\x40\x24\x2f\x04\x30\xfd\xcc\x9f\x40\xa9\x88\xaa\x89\x52\xa1\x26\xee\x16\x7b\xf2\xca\xfd\x89\x82\xab\x4f\x74\x78\x1b\xc7\x44\xf8\xb6\x00\x9b\x38\x50\x11\xdd\x99\xe2\x1f\x82\x22\x61\xf6\x56\x1e\xd5\xf4\x4d\x1d\x0e\xb0\x6f\xbb\x38\xe1\x1e\xfb\x85\x86\x2e\x35\x5c\x45\xfd\xe3\x2d\xd9\x8b\xaf\x2c\xb1\x50\xc0\xc8\x83\xe9\xe5\x37\xcb\x93\x46\x2a\xc1\x8f\x3d\x39\x71\x72\xfa\x6c\xec\xa3\xf1\x5c\x6d\x6d\x32\x8c\x2b\xae\xbe\x26\xd6\xac\xdd\x72\xa9\x52\x5c\x41\x15\xd4\xd1\xae\xcc\x15\x24\xdb\xea\x09\xf7\xf7\xe7\x9b\x7f\xd5\x5a\x6d\x7c\x87\x15\xdc\x3e\x2c\x72\x87\x23\x51\x75\xa4\xc0\xa2\xf3\x62\x6d\x30\xa5\x7b\x9e\xcd\x64\xb6\x1c\xd7\x6a\x7c\x92\xd7\xf8\xc4\x2f\x21\xae\x33\x14\x1f\x34\xc4\x3d\x73\x98\x92\x3d\x2b\x16\x18\xfe\x72\x89\x6f\xca\x59\x34\xb1\x27\xe6\x8f\x62\xe6\x86\x69\x88\xbb\xca\x85\x4f\x9c\xbc\xe4\x3c\xb6\x1f\xab\x39\xa5\x52\xf6\x74\xe3\x38\x1e\x83\xe5\x40\x22\x73\x60\x21\x37\x54\x4e\xee\xa8\xa4\x0a\x91\x48\xb9\xf2\x69\x79\xa6\x51\x3a\x6e\x4f\x16\xfc\xb8\xb3\xf5\x89\x25\x16\xa2\xe4\x64\xd7\xe6\xb8\xbc\xf9\xca\x6b\x22\x4c\x3f\xff\xfb\x27\xaf\x38\xaf\x7f\xc1\x6c\x15\xf5\x47\xef\x8e\x7c\x5f\x47\x3d\x4a\x8f\x6f\x06\x97\x5e\x1e\x99\x10\x78\xc3\x57\x05\x3a\x86\x85\xc9\x13\x38\xbd\xa0\x0e\xd1\x06\x06\xad\xa0\xfb\xe7\x7f\x28\x5a\x54\x5f\xbb\xb1\x0f\x2f\xbb\x61\x74\x0f\x2b\x7d\x50\xb5\xe6\x62\xa8\xb7\xba\x17\x92\x76\xdb\x29\x05\x5d\xc8\x1a\x61\xd6\x0f\x4f\xbc\xfd\xcb\x43\x4f\x0e\x79\x8a\x59\x60\x60\x54\x59\xc4\x5b\x53\x3a\xb2\x8b\x81\x38\x8c\x33\x38\x31\x9c\xa4\x5d\x44\x62\xca\x36\x62\xbb\x66\xb1\x5f\xc1\x6b\xec\xcb\xa1\x90\xe2\xbf\x96\xcd\xec\xc1\x09\xf6\x44\xa2\x55\xe3\xc0\xa0\xaa\x6b\xbb\x0e\xe4\xc5\xde\xbc\xdf\xc7\x4d\xf1\xbd\x19\xc3\x80\xfb\x8a\xf1\xeb\xa1\x47\x3d\x4c\xae\xb4\xd7\xa6\x52\x6f\x9a\x6d\x50\xed\x63\xaf\x91\xbf\x4e\x4e\x70\xf5\xd4\x03\x2f\x31\xcc\xfe\xe6\x75\x48\x61\xaa\x07\x35\x7b\x5e\x15\xa8\xe2\x84\xad\xde\xec\x54\x0f\xed\xa6\xd3\xdf\x0c\x72\x42\xb8\x99\xa1\x80\xd6\x56\xb3\x29\x61\x8e\xed\x4f\x94\x0a\x99\x3d\x0d\x51\x7f\xca\x96\x96\x57\x3f\x3f\x79\x13\xe5\xa7\x33\xd6\xb5\x93\x7e\x7d\x31\xe5\xcb\xee\x86\x01\x45\x47\xcd\x99\x73\xda\xaf\xfe\x26\x15\xcb\x43\xb5\x3b\xe9\xd8\x76\xb2\xf8\xdf\x49\xb4\xab\xb3\xee\x3a\xe8\xbf\x27\x7b\x64\x6c\x49\x67\x84\xe2\xac\x2a\x28\xdd\x16\x38\x0d\x67\x00\xab\x2e\xd2\x6d\xd9\x1f\xa1\x0e\x5d\x2e\xb7\x60\xf4\x7b\xb5\x09\x46\x7d\x74\x95\x41\xaa\xb9\x07\x94\xaa\xa3\x73\x4d\x07\x4f\x52\x72\x76\x97\xf2\xa1\x16\xb6\x53\x7c\x99\x6e\xd0\xb3\x4f\x04\x4d\x61\x3a\x64\xd0\x27\xcb\xb6\x51\x22\x34\x5f\xcd\x40\x2a\x74\xee\xe6\xdb\x05\xbd\x76\x2f\xd3\xbb\x12\x6f\x77\xff\x0d\x18\x7b\x06\xc8\x90\xfa\x9f\x7a\x73\xa8\x33\x90\xca\xfb\xc5\xda\x6d\x4f\xad\x89\xb9\x88\xeb\x22\xbb\xe4\x6f\x78\xa9\x2c\x00\x03\x2b\x86\x26\x78\xf0\xd1\xb0\x74\x04\xeb\x31\xfc\x88\x25\x08\xc0\xc1\x59\xb4\xc4\xe7\xb6\x12\x6c\x2b\xc1\x38\x35\x5c\x7e\xea\xf2\xe9\x0e\x86\xd9\xe6\x70\x95\x6d\x9c\xeb\xb2\x1d\x0f\xb1\xec\xa7\xf2\x77\x01\xcd\x09\x27\x41\xf7\xee\x63\x04\x34\x4d\x4b\x62\x2b\xe5\x06\x48\x8d\x5d\x8b\x5c\xf2\x56\x2e\xd3\x77\xd5\x79\x6f\x76\xe1\xd9\xaf\x2f\xf1\xaa\xcb\x57\xfb\x29\x7b\x89\xae\x6a\xdd\x7e\x35\x82\x2e\xbb\x5d\xd5\x44\xd1\xee\xe0\x6b\x57\x70\xae\x22\x55\x43\xcf\x36\x56\x11\x90\xeb\x35\x99\x1c\x63\x70\x0b\x82\xa9\x9b\xa6\xfa\xf7\xa4\x67\x64\xce\xb0\xe7\x13\x89\xa2\xab\x66\x44\x9d\x80\xd2\x1d\x56\xf3\xf5\x73\xda\x9c\x47\x24\x65\xb1\xdf\x3a\x1a\xdc\x6a\xdf\xac\x99\x5a\x87\x07\x2b\x3e\xa5\xe7\x19\xff\xe4\x83\xdc\xbc\x3f\x10\xf6\xf7\xa9\xbc\xd3\x4b\x8b\x8e\xb0\xb8\x0f\x8b\xb1\xb3\x4a\x7c\x59\x67\x9b\xda\xe4\xdc\xc3\x73\x75\x69\x6f\xee\x75\xdc\x94\xe8\xad\x36\xa6\x1d\x87\xde\x38\x7d\x65\x82\xb5\x45\x36\xde\x4e\xc4\x36\xd4\x81\x09\x7c\x70\xe3\xbe\x41\xe2\x12\xd6\x58\x2e\xed\x75\x77\x32\x22\x09\xa6\x2a\x92\x86\x1d\x44\x23\x51\x2e\x56\x3d\xbd\x13\x79\xf5\x0d\xb7\xe6\x6e\xeb\x4e\xc2\x08\x44\x3c\x17\x4b\xd7\x3e\x61\x57\xc5\xb3\x2d\xdb\x4e\x79\xc3\x16\xb3\x13\x25\x8a\x28\x81\x06\xaf\xb7\xc8\x7d\x0d\x9c\xdb\xea\x9c\x17\x8c\xdb\xca\xc0\x1d\x97\x93\x83\x3d\x7d\xf8\xa2\x66\x31\xdd\xbb\xaa\x0c\x2d\x9a\xbc\x29\x3d\x9a\xc2\xd2\x3e\x9c\xbb\xf5\x15\x0b\xef\x3d\x9d\xc5\xf2\xbc\xd9\xaf\x14\xf9\x94\x17\xa2\xb5\xe2\xae\xf7\xb8\xab\x9c\x71\xb7\xf1\x73\xe3\xbd\xf8\x5a\x0c\x93\xf5\x43\xa0\x95\xfc\x9f\x86\x72\x50\x6f\xa5\xe9\xf6\xdd\x83\xbc\x74\xbd\xce\xa0\xd0\x24\xa9\x26\xb3\xd1\xde\x07\xc6\xaa\xa3\x5b\xfe\x91\xad\x49\xb3\xf1\xd8\x6b\x37\xa5\xbf\x23\x5a\x5d\x44\x5f\x4b\x01\x6a\x72\x99\xd2\x1b\xf5\xdd\xc0\x8a\x6d\x71\xb1\xa6\x73\x5e\xb4\x60\x0d\x42\xf0\xe2\x29\xf8\x15\xad\xfb\x95\x49\x09\x9f\x87\x4d\xb9\xdf\x0f\xbb\xbc\xf0\x35\xac\xca\x52\x76\x89\x1c\x4a\x68\x5b\xc1\x6a\xdd\x19\x30\x5e\xb4\x5e\x45\xcc\x71\x94\x8b\x19\xc0\x7e\x5e\xa7\x8e\xae\xb2\x8d\xf0\xf9\xf4\xfa\x17\xe9\xc6\x20\x96\x7d\x29\x32\xb7\x28\xa2\x62\x3b\x1e\xac\xba\xda\x2c\x85\x54\x61\x58\x06\x8e\xc4\xb5\x57\x86\xf1\xcd\x28\x2e\xab\x6b\x09\x82\xd0\xb6\x07\x9f\x37\x6e\x50\xb7\x5d\xe7\x15\x45\xfe\x9f\x3f\xf2\x04\x14\xeb\x7d\xb0\x13\xe2\x77\x0f\x22\x1f\x21\x48\x0e\x4d\x65\x63\x5b\x4a\x73\x91\xca\x1c\x1e\x93\x48\xbb\xab\xcd\x60\x9a\x57\xf8\xd6\xea\xe3\x12\xad\x98\xc1\x16\xf6\x2f\x42\x2d\x90\x09\x1f\x38\xc2\x81\x75\x45\x1c\xb0\x0a\x70\x82\x37\xb1\x83\x8f\xb0\xed\x4c\x87\x9b\x13\xb6\x8c\xab\x61\x52\x8b\xc5\xee\xc5\x46\xdd\x68\x65\x08\x2d\x3e\xc8\x12\x76\x7d\x14\xbd\xb0\x95\x90\x81\x01\x6b\x78\x7c\x17\x41\x45\xad\xf5\x1b\x84\x5e\x9b\xe2\xa9\x48\xc4\x57\x56\x12\x93\xd7\xf1\x96\x63\x11\x65\x98\x11\x88\x3f\x54\x14\x48\x4b\x15\x41\x6f\x7d\xcd\x23\x66\x10\x6c\x4d\xe4\xbc\xa7\x8a\x20\x6b\xd3\xcc\x9f\x0e\x74\x65\x9d\xd5\xf4\x17\xb7\x0a\x12\x02\xa7\xf4\xb6\x62\x4e\xb6\xa9\x91\xfe\x77\x13\xa8\x6b\xf2\x86\x1b\x1a\x53\x40\x00\x8a\x0b\xf2\xcd\x69\xbd\xd6\xce\x03\xb0\x5c\xce\x00\xf6\xc4\xa2\x65\x5a\x7f\x96\x4e\xb9\xc5\x35\x40\xa7\x8b\x67\x7b\x66\xa2\xd3\x9a\xed\x0a\x12\x28\x90\x85\xf1\x6d\x76\x87\xfd\x05\xa5\x58\xb3\x3f\x8b\xbc\x72\xdf\xc8\xbf\xdd\xb6\x58\x1f\x64\x94\x48\x2c\x06\x73\x35\xdc\x03\xb7\xcc\xa3\xa9\x2c\x0f\x36\xf1\x3e\x4a\x1b\x07\x44\xe2\x49\x47\x25\xd2\x06\xe1\x9b\xd8\x55\xe7\x45\x1b\x39\xf3\x02\xf6\x9c\xf3\x48\xdd\x11\x42\x2f\x7c\xe2\x0c\x81\xdc\xfe\xdf\x29\x96\x70\xf8\x29\xea\x20\x12\xcf\x9a\x12\x85\x6c\x30\x8d\x55\x97\x70\xd2\x71\xd0\x8f\xfb\x5c\xed\x14\x11\x7b\x30\x37\xfa\x90\x89\x9e\xb7\x0d\x45\x18\x6d\xc3\xeb\x25\xd3\x8d\x30\x5a\x69\xa6\xf3\x14\x79\xf8\x2b\xa7\x6d\x26\xb9\x49\xc3\x9e\x91\x14\x63\x0d\x35\x0d\x86\xc3\xa2\x80\xb9\xa7\x18\xcd\xf1\x32\x15\x98\x9c\xb5\x0c\x79\xe0\x88\x47\xbd\x3e\xd2\x2e\x92\xdf\x22\x90\x4b\x33\xa5\xef\xbd\x09\xce\x8e\x33\xef\x77\x6b\xc5\x24\xab\x19\x29\x80\x05\x01\xbd\x88\x98\x1c\x36\x61\x60\x85\xf8\x49\x6b\xbe\x46\x10\x7c\x68\x72\xe4\x4d\x44\x69\x0b\xdc\x49\xd9\xdf\x94\x5d\x68\xde\xa3\xe5\x17\xc3\x2c\xf7\x2e\xc0\x4f\x6a\x32\xd3\x54\xb2\x60\x03\xf0\x20\xef\x92\x3c\xd9\x4d\xf2\x27\x34\x29\xd2\x87\xab\xfa\x5c\x59\x0b\x7d\xb1\x22\x1d\x30\x84\x12\x1c\x8c\x20\xfd\x82\xd9\x2d\xfd\x5b\x6c\xbc\xe1\xce\x90\x64\x6f\xd2\xbf\x10\x38\x05\xe3\x59\x84\xe7\x3d\x07\x11\x3d\x8a\x05\x16\x3c\x6b\xd4\xd0\x25\x72\x91\xa4\x99\x45\xaf\xea\x1b\x64\x8d\x4c\x6e\xf2\x34\xed\x97\x02\x9f\xdb\x32\x7e\xc9\x31\x34\x4d\x6d\xbd\xd3\x92\xca\xa9\x0d\x6a\x74\x64\x43\x3f\x9f\x41\xad\x0f\x63\x1f\x04\x7c\x4d\x9b\xf6\x08\xf6\x75\xfb\xea\x13\xc6\x43\x6f\xa4\x97\x73\x4a\xff\x8e\x0d\x0b\x78\x33\xe3\x4c\xf6\x32\x6f\x27\xcb\xf5\x9c\x5c\x7a\x1e\xa5\x19\xff\x53\x5e\xb3\x5d\x81\x83\x85\x9d\x33\x08\xba\x89\x74\x6a\xac\x1d\x28\xf1\x27\xb7\xfa\x87\x61\x17\x2b\x1f\x08\x65\xc4\x48\xbe\x75\x25\x59\xdb\x04\x46\x2a\x8d\x22\x57\x8f\xe2\x5c\x83\x56\xad\x17\x9c\xf4\x5c\x28\x83\x46\xb3\x05\x9e\x29\x96\xa3\x1f\xff\xef\xcb\xbf\x92\xe7\xb6\xd1\x08\x72\xd3\x4a\x92\xd3\x9e\x8f\x22\xde\xb3\x38\xd0\x08\x2d\x79\x91\xc3\x24\xa1\x80\x8a\x48\xa8\x5d\xd7\x4d\xca\x04\x08\x41\xfa\x14\x95\xc6\xc6\x31\xa4\x13\x69\xb9\x64\xfa\x93\xdc\x71\x83\x13\x15\x5f\xd5\xf6\xf1\x93\x0d\x2c\xc8\xfb\x9b\x48\x98\x6e\x19\xf2\xe7\xb6\x3b\xc4\xd3\x73\xdc\x29\xc3\x58\xc9\xc3\xa0\xe1\xb1\x43\xd7\x5a\xa8\x76\xf7\x63\x71\xe8\xda\x15\xad\x87\x13\xa9\x49\x0b\x9e\xf1\x03\x4c\x14\xa1\x4a\x20\xb1\x51\x77\xd2\x3f\x65\x16\xd1\x5d\xc6\x98\xfc\xe8\xb9\x19\x7b\xcd\x23\x11\xcb\xfb\xd2\x59\x47\xf4\xfe\x9c\xae\x5d\x39\x76\x9a\xbc\x02\x28\xf7\xc7\xa9\xe5\x42\x77\x82\x8a\x05\xcb\xc8\x90\x56\x9e\x75\x68\x6a\x02\x68\xed\x9e\x99\x12\x36\x4c\xc3\x41\xfe\xf5\x66\xaf\xfb\x2d\xf3\xb4\x99\x25\x5c\xce\xfd\x5d\x1d\x56\x0e\xdd\xfb\xdf\x42\x29\x0f\x5b\x84\x6e\x82\x7f\x5e\x07\x90\x42\x5c\x09\x35\x3c\x8c\xbc\x26\x85\xfd\xb2\x5b\xc1\x74\xbc\xae\x2d\xe8\xc7\x53\xee\xaa\xdc\x1a\x68\xda\x62\x59\x8b\xa2\xcd\xd0\x9e\x7b\xbe\x01\xd2\x22\x5f\xf4\x6a\x2f\xe1\x26\x18\xfd\xca\xcd\xb4\xb1\x59\x61\xa7\xab\xd6\xec\x01\xef\x4e\x80\xac\x89\xab\x0d\x10\x31\x0f\x05\x50\x8e\x57\xaa\xa1\x11\x1f\x3d\x34\xab\xb5\x66\x57\x0d\xb5\x53\xd3\xc8\x39\x55\x38\x38\xdb\x36\x3e\xa7\x4a\xd4\x2b\xa1\xe3\xa6\xd3\xba\x55\xd8\xfc\x83\x1d\x8e\xf7\x7d\x7f\x58\x31\x84\xe8\x86\xc7\x33\xde\x99\x3d\xac\x2e\x5b\x7f\x79\x1d\x2e\xef\x41\xfe\xd0\x5f\xb6\x8c\x08\x0e\xbc\xca\xe9\x50\xa2\x0b\x58\x1f\xee\x78\x71\x3e\x7d\x51\xc0\xe2\x42\x06\xa6\x99\xc8\x15\x22\xbd\xd7\x0f\x9d\x07\x0f\x9e\xde\xe7\x5a\x00\x15\x40\xb4\x51\x5d\xe4\xe3\xc2\x1c\x4e\x49\xf6\x01\x1c\x1f\xf8\xd1\x70\x15\xce\x9c\x62\x79\xa3\x2a\xad\xfd\xc1\xbb\x47\x14\xbb\x5e\x54\x8a\xfc\x10\x1e\xf1\x29\x85\x89\xce\x87\x12\xfe\xee\xe4\x79\x45\x10\x1c\xf2\xd2\xb9\x95\xb5\xc7\xbf\xa9\x3b\xad\x07\x8d\xc5\xf2\xcf\xd7\x58\xf7\x01\xf3\x28\x42\x30\x3e\xfc\x1b\x6c\x28\x3a\x12\xbf\xec\xe6\x5b\x7d\x22\x25\xfa\xe0\xf1\x68\xc8\x0b\x87\x9e\xbc\xf6\x0e\xa7\x00\x65\x64\x85\x2b\xf0\xe9\xfd\x07\x58\x96\x3d\x4d\x72\x04\x1d\x6b\xa6\xeb\xd6\xa1\xdc\x89\xd9\xa8\x73\x10\xc9\x12\xe7\xab\xfe\x9c\x7c\xb2\x03\x11\x7d\x8f\x8d\x8c\x1f\xf9\x7a\xe6\xe5\xe4\x2e\x1b\x77\xc5\x76\xcf\x3a\xa8\xe1\x26\xbd\xab\x91\xa8\x0c\x62\x3e\xb9\x9e\x6e\x3a\x4e\xd7\x61\x71\xf6\xb8\x1f\x94\x5c\x4e\xe1\x47\xf1\x73\xcc\x53\x10\xc9\x57\xba\x1e\x03\x49\x11\x29\xbf\xc5\xfb\xd1\xe8\xb7\x4f\xfc\x99\xed\x7e\xf2\x0e\xf4\x4d\x2a\xaa\xb3\xb7\xcb\xe1\xdd\xc3\xbb\x60\x09\xe0\x87\xaf\xca\x81\xef\x3d\xbe\x99\x0b\x5b\x90\x23\xe0\x43\x86\x20\x06\x93\xf7\xc8\x7c\x53\x4e\x48\x8c\x58\xf7\x00\x45\x7c\x64\x3f\x09\x34\x20\xa7\x0a\x96\x66\x2e\xe4\x97\x2f\xad\xf8\xda\x58\x3f\x63\xd0\xeb\xa3\xba\x5c\xf4\x52\xcb\xfa\xe8\xdb\x55\x78\xf8\x6a\xd6\x28\x8f\xce\xc7\x67\x94\x3b\x1b\x4b\x18\x9d\x86\x43\xf3\xdb\x8b\x87\x53\x01\x39\x62\x66\x5b\x93\xa1\xb7\xab\xa9\x71\xb4\x7b\x5b\xa6\x40\xe4\xcc\xdb\xb9\xe8\x47\x8d\x5a\xbe\x4c\xea\x79\xc7\x78\xe4\x76\x71\x1e\x24\xc3\x4e\xd4\xf9\xca\x39\x78\x43\x41\xa2\x96\x3c\xec\xe9\x75\xa8\xdb\x59\x57\x12\xce\xbd\xde\xe0\xe7\xde\x91\x97\x1e\x76\xa4\xdb\x49\x7a\xfa\x6d\xe4\x2c\x8e\x3a\x3f\x0b\x31\x89\xe5\xab\x3d\xda\x5e\x37\x38\xc1\xfe\x0b\x3e\x7a\xf0\x4c\xd5\x5a\x63\x1a\x0f\x69\x21\xcf\x06\x3c\xb0\x8b\xa0\xfc\xde\x9b\xf4\x29\x45\x6d\x60\x0f\xc7\x8b\x13\xcf\x95\x38\xe6\xd3\xf8\xaa\x66\xa9\x53\xc8\x04\x5b\x1e\xef\x25\x4b\x01\x94\xd4\xcc\xd1\x40\x6e\x5e\x39\xed\x20\xbf\x44\xc3\x2d\x44\x65\xb3\x5a\x39\xd9\xe7\x32\x04\xac\xd7\x3a\x27\xbf\x58\xcb\xf3\xe4\x7d\x62\xd1\x96\xf3\x5f\x9c\x96\x56\x82\x6a\xd3\x86\x30\x3d\xac\xe7\x22\x8d\x51\x7d\x4e\x5d\xa4\x41\x48\x99\xb9\x78\x25\x71\xbe\x48\xfa\x7c\x79\xd7\x17\x6a\x89\xb3\xf2\xe9\xdd\x79\x65\x89\x86\x04\xdf\x19\x83\x74\x17\x8d\x58\x6f\x89\x46\x8e\x98\x98\x5d\x1e\xe7\x95\xa2\x98\x65\xc2\x1b\xe9\x3d\x06\x14\x02\x08\x84\xff\x63\xc5\x0f\x09\xcc\x8c\x65\x01\xc4\xbd\x94\x6a\x4f\xfa\x17\x1f\xb2\x63\x39\xdc\xe9\x3e\xc2\x46\x26\xa9\xf8\xed\x26\xed\xa4\xd9\xc4\xe1\x66\xa9\x99\x91\x58\xd9\x1b\x8c\x39\x6e\x32\x38\xf8\x8b\x5c\x64\x85\x75\xac\xdc\x48\x37\xa9\x31\xba\xa0\x74\x70\xe6\x63\x5e\xeb\x44\xfa\xf0\x60\xc5\xe3\xd2\x4b\xd4\xaf\xda\xcc\x8a\x12\x1f\xad\x07\x9d\x66\x33\xdd\xac\x2c\x3c\xe7\xc4\x23\xec\x27\x95\x4c\xab\x2b\xb7\x06\xc5\x7e\xd9\x9d\xd3\x51\xf9\xf9\x09\x34\xa7\xe9\x21\xcf\xd9\x7a\x1a\x90\x46\x8d\xb7\xa8\x0a\x7a\xac\x3f\x64\xb3\x97\x7f\x3e\x23\x9b\xe5\xbe\x7d\x00\xb8\xe1\xdf\x0e\x27\x35\x75\x3b\x16\xeb\x37\xb2\xc8\xc6\x88\xa2\xb7\xd2\x8e\x06\x24\x4e\xfe\xe9\x65\x5d\xa4\x6a\x75\x88\x93\x79\xa0\x48\x47\x5a\x3e\xaa\x8c\xdc\x17\xa7\x0b\xac\xcc\xc0\x60\x84\xba\x2c\xaf\xf3\x38\xe9\x27\x41\x87\x5b\x83\x70\xae\xbd\xed\x47\x8f\xb1\x6b\x24\xa8\x30\x56\x02\xc6\x01\x3e\x9b\x6f\x37\x55\x11\x2c\x82\xaa\xa1\xb3\xef\xab\x0f\x2e\xcd\x65\x21\x69\x3c\x04\x50\x18\x2d\x2e\x95\x43\xec\x77\xe6\xf1\x18\x2c\x26\xee\xfb\xd6\x20\xc2\xfa\x14\x37\xd4\x14\xc8\x54\x45\x85\x1e\x00\x5d\x18\xbf\x8d\x74\xfd\x95\x7c\x0c\xb5\x4a\xe1\xda\x00\xaf\xf2\xf6\x70\xdb\xd2\x97\x67\xb0\xcb\x03\xf0\x32\x64\x76\xa4\x58\xcb\x28\x8b\x8c\xa1\xad\xd1\x10\x92\x09\x43\xdd\x7f\xda\xaa\x64\xef\x21\xd8\xf7\xee\xa2\x8b\xc5\xd6\x26\x56\x6d\x20\x30\xd0\xba\x7a\xa1\x45\xbc\x4e\x92\x91\xe0\xa2\x71\xa7\x30\xf5\x82\x51\x1e\xe0\x33\x68\xf7\x22\x98\xc2\x81\xda\x96\xd7\xbe\xf0\xc4\x87\x70\x05\x00\x23\xab\x64\x0a\x5a\xbe\x95\xca\x86\xbe\x7c\x5e\x03\xa7\xa7\x58\x63\x07\x62\x49\xf4\xd4\x8b\x37\xf4\x7e\x85\xf1\xa6\xb4\x0b\x76\x8f\x6d\x3e\x5f\xf8\x9c\xef\x1f\x17\xa9\xa1\x2e\xc1\xf8\xf7\x47\x1e\xff\x67\xbc\xf7\xc5\x9a\xfb\xfe\x19\x63\xe1\x74\xff\x54\x9d\x0f\xb7\xfa\x05\x4b\xd8\x3f\xf8\x93\xae\xbf\x24\x7e\xfe\x27\xd5\x42\xe6\xfe\xf9\x36\x70\xd8\x6d\x08\x7d\xd1\xb1\x62\x3f\xff\x24\x09\x65\x1d\xf6\x9a\xe4\xc2\xbb\xc6\xc2\xa2\x4b\x20\x50\xc2\xcb\x43\x1d\x39\x57\xa3\xf1\x9d\x1a\x33\xc0\x25\xea\xb5\x9e\xe3\x93\x07\x75\x97\x93\x49\x4f\x79\x0e\x37\x6f\xa2\x44\x2c\x3a\x1d\x9f\xe1\x5e\xbf\x48\xcb\x44\xbb\x76\x81\x04\x1d\x0b\x71\x37\xa5\xa5\x3b\xac\xe9\xe6\x74\x2b\x24\xfc\x82\x96\x20\xd1\x85\xc3\xed\xa1\x6c\x33\x8f\xa1\x89\xb0\x1c\x62\xac\x5a\xfa\xda\x85\x28\xc2\x85\xd4\x52\x8a\x06\x9b\x2b\xdd\x52\xc2\x01\x16\xc1\xf1\x37\xb9\x5e\xc2\xcd\xf9\x47\x9f\x04\xad\xce\x68\xa0\x2b\x1c\xc8\x25\x95\x54\xb4\x1e\x5f\x27\x47\x74\xb7\x13\x71\x92\x23\x9a\xcd\x15\x3e\x90\x54\xe0\x31\xea\x43\x7c\x02\xe1\xba\x2b\xb2\xf7\xad\x1d\x2b\x10\x8b\xc2\xda\x22\xec\x82\x93\x0b\x22\xa1\xfc\x61\xf1\x00\xf8\x75\x3f\xdc\x97\x85\xd1\x71\xea\xd2\x1f\x10\xe2\xd2\xb9\xa7\xe5\x4f\x07\x2e\xc1\xf9\xb0\xb8\x5f\xc2\xa5\xd8\x3d\xa1\xeb\x55\x70\x1d\x05\x04\x96\x77\x3b\x23\x7a\x3d\xf9\x53\xf9\xb2\x3e\xaf\xf4\xea\x62\xee\x3c\x7e\x55\x9d\x8f\x7e\xbe\x98\xc7\xb2\x01\xb1\x51\xfa\xfc\x4e\xc1\x74\xf4\x70\x7f\xab\x02\x88\x29\x01\x59\xbb\x54\x36\xb0\xbb\x5d\xdb\xee\x5b\x3f\x41\xfa\x1a\x6b\x6e\xef\xb5\x25\xb4\x46\x94\x6a\xd9\x48\x62\x4b\x62\x90\xd1\x13\xb5\x26\x7e\x40\x92\x4b\xd9\x5e\xae\x46\xa4\xf5\x72\xbe\x27\x71\x9b\x91\x4e\x91\xcf\x8d\x28\x05\x53\x0d\x0e\xc2\xeb\xc9\xdf\xb3\x9c\xab\x74\x17\xc4\xc1\x77\x1e\xea\xa1\x1f\xa1\xac\x43\x3c\x3b\x25\xe8\x69\xa8\xe8\x01\x42\xe0\x94\x9d\xdd\xa5\x2e\xa5\x5c\x50\x9e\x61\x33\x5d\x6c\xea\x5c\xbb\x32\xca\xb7\x69\x1a\xa0\x06\x29\xf4\xcb\x15\x4e\xc8\x96\x22\x53\xbf\x2f\x5a\xe8\x6a\xa5\xd3\x90\xaa\x34\xc3\x7c\xf6\x71\x1f\xce\x3c\x1f\xff\xb6\x39\x06\x54\xb7\x25\xd6\xd6\x37\x45\xd9\xd8\x3e\x65\x01\x80\xf6\xd7\xe5\x94\x6b\xc6\x7a\x90\x5e\x59\x94\x73\x3b\x49\xcd\x03\xf7\x11\x90\x0e\x5a\xe3\x8e\x6b\xb1\x3c\xcb\xe9\xf0\xb8\x8d\x42\x24\x61\xfe\x52\xd6\x09\x49\x05\xc5\xc0\x76\xf0\x77\x1d\x2d\x3b\xdf\xee\x28\x34\xb6\x20\x3a\x76\xd0\x77\x38\x63\xdd\xd4\xa3\x2e\x7f\xc2\x76\x63\x1f\xae\xf8\x4b\x87\xf3\x9d\x88\xf9\x87\xe2\x82\xfa\x34\x98\x04\xdf\xae\x69\xe8\x1b\xe9\x0e\xc2\x46\xb8\xde\x43\x3f\xa1\x1a\x67\x63\x32\x17\x26\xe4\x96\xed\xda\x54\x68\xd5\xfe\x10\x36\xb6\xf0\x29\x57\x61\x36\xdd\x84\xe1\x47\xf1\x24\x4b\xe2\x3c\x35\x57\xbd\xc7\x07\x58\x08\xbd\xa4\xe9\x40\x21\x90\xc1\x17\x2c\xf8\xa6\xf4\xbf\xb3\x1e\xe9\x6c\x23\x88\x2d\xd8\x5f\xa4\x68\x0c\x47\x62\xf5\xac\xec\xf1\x15\xae\x23\xb5\x6e\x9c\x91\xc1\x3f\xd3\x79\x6d\x60\x16\x84\x7d\xb1\x5c\x5a\xf6\x2c\x50\x97\x05\x14\x69\xf0\xb5\x40\xbe\x07\xa5\xa0\xd0\x28\x0f\x45\x05\x8f\x7d\xe1\x54\xe2\x04\x36\xdb\x6a\xa6\x2a\x87\xb0\xc6\xaf\x1a\x20\x57\xe8\x21\xa4\xf3\x2f\xfc\x88\xe7\x50\x3f\xf8\xa3\xbe\xfa\xde\x42\x12\x5a\x8e\xd4\x2d\xe5\x37\xdc\x61\x95\xf7\x00\xca\x8e\xfe\x29\x76\xbb\xf0\xa7\x06\x28\x8a\x7b\x96\xfd\x45\x15\x67\xac\xfe\xc0\xb6\x50\xbb\xab\x42\x02\x72\x2f\x0f\xa6\x50\x90\xb0\x2c\xbe\xf2\x2f\x76\x47\x69\xf8\x91\x3f\x2d\x7f\xe8\xad\x02\x3b\x2d\x72\x58\x15\x29\xf8\xe7\xc2\x7b\x67\xc3\xe4\xd9\xc7\x72\x10\x12\x49\x2f\x07\x82\x8d\xbd\xbf\x4d\x92\x29\x0d\x80\x99\x16\x96\xd7\xa4\x4d\x10\x7e\x46\x8c\x3a\x8f\xa0\x47\xc4\x59\xb8\xc5\xe8\xe7\x9c\x66\x5e\xc5\x4f\xb7\x60\xba\x57\x3b\x62\xed\x1e\xae\x95\x76\x03\x5b\x51\x71\x34\x24\x0f\x74\xbc\xd4\x4a\xe1\x53\xac\x8f\x1a\xa3\x90\xb0\x01\xe2\xe9\xfa\x01\x63\xf5\x3d\xf1\x50\xc7\xa9\x38\xb0\x29\xcf\xc6\x16\x0c\x92\xe2\x31\x32\x90\x8e\xa1\x16\x3b\x6f\x4d\x84\x9d\xad\xe9\xff\x73\xf5\x6e\x59\xb2\xea\x3e\x13\xe7\x7b\x0f\xa3\x5e\x6a\x54\xfd\x60\xc0\x09\xae\x04\xcc\x9f\x4b\xe5\xce\x5a\xab\xe7\xde\x0a\x45\xc8\xe4\xf9\xea\x9c\xbd\x90\x49\xee\x17\x63\xcb\xd2\x2f\x5a\x7a\xfe\xaf\xa7\xf8\xab\x40\x30\x96\xa8\x00\x68\x84\x47\x70\x21\x44\x0a\xc3\xcb\x04\x9f\xcd\x5c\x1a\x4a\xc0\x95\x71\x94\xbe\xef\xe8\x11\x5a\xad\x59\x03\x7b\x48\x32\x9c\x92\x19\x0b\xf7\x97\x40\x0c\x3b\xe3\xf6\xf6\x2c\xfd\xa5\x38\xcc\x2d\x58\x0a\xd2\x97\x95\xed\x22\x7f\x1f\x18\x02\x7c\xb2\x74\x02\x76\x81\xe4\xae\x34\xdb\xa5\x47\xa5\x4b\x2c\x1d\xd2\x4b\x8b\xdd\x8a\xe0\x3b\x84\x57\x52\x83\x6d\xa2\xff\x20\x41\x61\xeb\x8b\x68\xab\x18\xd1\x0e\xac\x02\x18\x6e\x11\x0a\xea\x41\x2a\x5c\x44\xa9\x15\x0d\xa8\xf0\x1b\x64\x65\x8f\x4a\xe4\x35\x99\x28\x90\x15\x30\x08\x78\x05\x76\x22\x07\x02\x0a\x1f\x5a\xc6\x8f\x2b\x90\x3d\x3b\xc8\xb0\x63\xde\x83\x4b\x86\xb4\xf3\x35\x7e\xca\x8a\x13\xb4\x57\x72\x0f\x51\x64\xd7\x67\xd6\x5c\x0c\x5c\x87\x59\x90\x2e\x9f\xa8\xb8\x8c\x0a\x21\x85\x56\x72\x88\x26\xbf\x28\x9e\x1d\x1f\x99\x1d\xd0\x6c\xce\x09\x95\xdf\x3d\xfd\x64\xe9\x7d\x27\xb9\x5f\x9c\x28\xac\x0d\xd8\x71\x49\xfc\x78\xb4\x9e\x52\xa6\xa5\x6d\x0d\x6a\x42\xa2\x92\x63\xa3\xcd\x07\x9f\x3c\xc4\xee\x2b\x00\x08\x84\x2b\x38\xe6\x00\xb9\x65\x92\xd9\x7d\x7b\x5d\xb2\xbd\xc7\x25\xcd\xa1\x7b\x7c\xe8\x21\xda\x04\x49\xb3\xc6\x0d\x32\x56\x68\xfd\x57\x22\x79\x61\xab\x7f\xbb\xb4\x83\x0d\x6f\xbe\x82\x1b\x1c\x5e\x50\x90\x97\xd7\xb0\x00\x57\x88\x0f\xd7\x5f\xc4\x1b\x64\xda\xf8\xe0\x47\x52\xb6\x8b\x23\xef\xc1\x34\xd8\x4b\x66\x1a\xbf\x87\x9e\x0d\xb4\x9a\x10\x72\x23\x16\x68\x3b\x9f\x9c\x00\x2f\x44\x06\x8c\x4b\x20\xb3\x1e\xb5\xe7\xfb\x1d\xf8\x81\xdf\x26\x30\xbe\xe1\x7d\x6c\xc2\xc6\x91\xe0\x0f\x16\x1f\xe9\x1c\xe0\x11\x30\x02\x83\x04\x82\x40\x0a\xb4\xfc\x7e\x85\x70\x62\xaa\xa6\x9e\x4c\x6e\x13\xbc\x28\x5e\x93\xba\x7d\x7c\xf0\xad\x01\x28\x74\x06\xb8\x02\x4a\x3e\xaf\xf5\x41\x72\x81\xbd\x2a\xbc\xca\x08\x0c\x29\x54\x4d\x76\x91\xe3\x5b\xb6\xd3\x66\xcc\x55\xbf\x40\xf2\xf1\x4b\xf0\x81\x80\x0e\x08\xdf\x30\x7b\xe8\x4e\x30\x07\x44\x7c\x03\x47\x80\xd1\xf6\x2a\x71\xfc\x2a\x0a\xe8\x8f\x86\x9d\xd3\x25\x74\x41\xd2\x38\x16\x35\x90\xdb\x79\xcc\x29\xe8\xe4\x5b\x39\xe3\xdd\xdf\xca\x1d\x99\x00\xe0\xc0\x3b\xac\x10\x21\xdc\x90\xb9\x63\xed\x05\x9e\x4a\xd1\x00\x10\x0c\xd1\x0a\xc6\xf1\x1d\xe9\xf4\x05\x78\x24\x6d\x20\x87\x72\x35\xd2\x4d\x3f\x3a\xe4\x4c\x3f\x8d\xcf\xb4\xf5\x27\x0f\xc4\x6c\xfa\xa1\x20\x87\x98\x18\x44\xd7\x55\xde\x8e\x30\x3b\x32\x0c\x24\x0a\xb3\xc1\xbb\x54\xdd\xc7\x6f\x55\xda\x7b\xe7\x94\x12\xcb\x2c\x1c\xd3\x59\xa8\x87\x6d\xad\x05\x56\x9a\x9e\x6e\x73\x4b\x73\x6c\x2e\xe2\x85\xd6\xe1\x9b\xa5\x2d\x47\xb2\x3c\x39\x79\xf1\x0d\xd8\xa0\xc7\xee\xd3\x35\x7c\x37\x4e\x41\xe0\x94\xc8\xa6\x2d\x2f\x08\xf3\x24\x21\x21\x0b\x82\x30\x04\xbb\xc9\xea\x99\x3e\x2f\xa2\xbc\x42\xb0\x19\x6d\x38\x37\xe3\xeb\xbd\x39\xaf\x85\x57\x48\x3c\x04\x80\x9d\x2f\x1f\x2e\xda\xf4\x89\xf4\x1e\x41\x96\xc5\xf8\x32\x28\x33\xeb\x55\x42\xe3\xe1\x5b\x56\x62\x48\xc3\x96\xe6\x93\x6a\x90\x1b\x50\xef\x52\x77\x8e\x08\x3c\xb3\x44\x3c\xda\xee\x6e\x9e\x83\x0c\x66\xb6\xac\x60\x4f\xed\x53\x88\xd2\xa0\xd8\x49\x4f\x1e\xb2\x7b\xe0\x1d\x07\x2f\x10\xc9\xe3\xba\xcc\x33\x9d\x0d\xd6\x0b\x39\xbc\xe5\x6d\x06\xaa\xc0\xf8\x5c\x50\x86\x59\xf4\x00\xe8\x3c\xd0\x6b\x0e\xb2\xd7\x1e\x14\x83\xe4\x29\x29\xf8\x24\x89\x8e\x05\xd3\x27\xd1\x9d\xfa\xcc\x7e\xab\xb5\x52\x70\xf9\x3f\x4a\xcb\x21\xb4\x6c\xaf\xa2\x42\xcf\xeb\x4c\x79\x27\xb2\x0a\xdc\x0f\xef\x4b\x71\xb9\x51\x54\x87\xe1\x7d\x1c\xf9\xa2\x59\x43\x3d\xea\x96\x5a\xbf\x4b\xab\x0f\x1f\x06\xa1\x80\x36\xd2\x14\x23\xcc\x7a\x7d\x57\x42\x01\xec\x00\xa6\x9b\x54\xb3\xda\x75\xe1\xcb\xb2\x5e\x3e\xb6\x6e\xf5\xc7\x1f\xc3\x94\xd6\x8a\x51\x49\xbf\xb1\xab\x30\xf3\x2b\x3d\x31\x2b\xc9\x6f\x6b\xdd\x5f\x79\x54\xeb\xcd\x79\x05\x07\xc3\x11\x20\xe3\x4c\xbd\x65\x0c\x54\x32\xd5\x68\xad\x90\x19\xf3\x25\xd1\x79\xc0\x40\x2b\x37\xe2\xfd\x5d\x11\x03\x5c\xfd\x88\x80\x75\xb3\x19\x1d\x62\x6f\xc5\xc5\x89\xbd\x04\x5c\xaa\x7f\xae\x82\x12\x99\x9d\x9b\x9a\xf3\x4b\x9a\x1f\x6b\x76\xce\x05\x15\xa5\xf3\x78\x87\x2e\x22\x53\xd5\xb3\x60\xad\x51\xbf\x73\x46\x04\x72\xac\x11\xe1\x6d\x8d\x6d\x6b\x12\x2d\xb4\xbc\x49\x8c\x88\x23\x96\x57\x6f\x33\xad\x50\xcb\xa7\x3c\xb4\x18\x6d\x2b\x6b\xa6\x15\x03\x9d\x98\x7e\x1f\xef\xd5\x55\x7f\xdf\xf7\x7b\xe4\xc5\xe3\x0c\x75\x86\xe5\x96\x6a\x42\xd2\x9f\xe4\x9d\x01\xd2\x7d\x14\x5a\x07\x87\x21\xcd\x22\x59\x02\x46\xe0\x8e\x96\x6b\x3f\x13\x6f\x89\x18\x08\x92\x9f\x6e\x4f\xa4\x99\x52\x09\xbe\x5a\x36\xf7\x62\x9f\x25\x4f\x63\xbf\x82\x2a\x60\xa7\x1a\xfd\xa9\xa5\x4a\x28\x61\xe1\xcd\x05\xc4\xa0\x49\x74\x2f\xc8\x6a\x59\xc1\xd6\x7f\x84\x4a\x74\x95\x62\x35\x52\x4a\x69\x58\x73\x97\x7d\x92\x05\x8c\x9b\x95\x86\x67\xfd\x8b\x80\xe0\xec\x35\xae\xdd\x3b\xf2\x9e\xf6\x37\x8f\xbd\xfc\x0b\x19\xe7\x63\x67\x35\x66\x56\x04\x3d\x40\x39\x3a\x73\xec\x0f\x66\x0a\xaf\xdd\xe2\x79\xa0\x74\xb7\x2c\x1e\x5b\x2d\xd0\x15\xb7\xb4\x96\x0f\xbc\x01\xc2\x0a\x38\xe8\x03\x7a\x41\x28\x2c\x9b\xa9\x0c\xde\xa5\x8c\x7b\x58\x56\x63\x21\x6c\x30\xcf\x1c\x29\x5b\xb2\xe2\x61\x98\x78\x45\x51\x18\xb6\xe8\x40\x33\xb0\x57\x53\xa1\x83\xc0\xf5\xec\xb1\x4a\xae\xc2\x16\x1c\xd5\x7a\x6b\x12\x95\xae\x3b\xb5\xb1\x33\xb6\xce\x4d\x06\x57\x6e\xf1\x64\x00\x69\x62\xe7\x20\xd5\x21\x06\x37\xe4\xc5\xfb\x35\x6b\xc8\x6f\xe9\x47\x74\x9d\xdc\x4a\xfb\x4a\x87\x99\x3d\x97\x7f\x5e\xdf\x2c\x11\xab\x83\x27\x35\x2f\xe9\xd6\x3b\x43\x80\xe0\x7e\xfb\x44\x22\xc1\x98\xe9\x83\xf8\x48\x0a\x4e\x60\xc5\x5e\x7c\x5b\x48\xab\x24\x06\x57\x42\xd4\x5a\x9c\x81\xfd\x37\x9a\xc3\xd0\xb6\x0e\xd0\xc0\x6e\xcf\x50\xa6\x55\x95\x74\x0c\xb3\x01\x07\xac\x11\x39\x5c\x4f\x2d\x11\x88\x85\xbd\x6f\x72\xd7\xeb\xc7\xad\x05\xbb\x3b\x50\x09\x20\x1f\x84\xe9\x9c\x12\xf2\x1a\x56\x7a\xd8\x17\x97\xdc\xa7\xf1\x21\xf2\x04\x36\x82\xb5\x61\xd4\xb0\xb0\x52\x9a\xd8\x46\xc3\xc7\xef\x9c\x64\xc0\xf5\x70\xca\xae\xbf\x41\x88\x28\x4b\x9c\xdc\x98\xfe\xfe\x03\x4c\x38\xbc\x0b\xbe\x24\xb4\x89\x3d\xd9\xde\xc5\xad\x99\x77\x8f\xf4\xf3\xb8\x6c\xb3\x0f\x8a\x71\x2b\xf3\x65\x3d\x14\xdf\xc4\x7c\xf9\x57\x4a\xe8\x84\xe0\x1a\xb8\x64\xee\x48\x8b\xfc\x03\xeb\xca\xd6\x4b\x74\x84\x4a\x29\x34\x34\x7d\x09\x31\x00\x28\x84\xcd\x15\xe1\x35\xe7\x3a\x04\xc8\xc4\x3e\xa7\xd5\xa9\x24\x5c\x85\x5d\xea\xb9\x86\xfc\xc5\x5c\x02\x64\xa0\xc1\x02\xf7\xc4\x7a\xa2\xa9\x7e\xb7\xcf\x1d\x5b\x9d\x80\x26\xa0\xd3\xc0\x1d\x95\x87\xa4\xb7\x35\xf1\xce\xec\x5c\xa4\xd9\x34\xe7\x97\xf0\x08\x60\x9c\xc7\x69\x91\x4f\x59\xdd\x31\xe6\x04\x05\x3f\x26\xb4\x9f\x29\x7b\x9d\x05\xeb\x98\xf3\x72\x91\xa9\xa0\x8d\x8d\xe5\x50\x22\x8b\xd9\x37\x27\x7d\x46\xc8\x35\x52\x81\x09\x39\x78\x2d\x32\xac\x35\xba\x87\x2c\xf6\x71\x3c\xeb\xf6\x70\xd3\xb3\x16\xbf\x04\x4e\x40\xaf\xe6\x50\xe1\xc1\xc1\x14\x88\x48\x5a\x23\x95\x16\x42\x73\xdc\x0a\xaf\xc6\x0c\x46\x09\xb7\xdf\xd3\xf5\x39\x03\x76\xa7\x2d\x76\xd6\x06\x97\x5e\x37\xb2\xc8\x49\x2b\x78\x05\xc4\x80\x7e\x95\x27\x82\x7d\xac\x07\xe2\x24\x84\xed\x1f\x1e\x8a\xa7\x14\x4b\x9f\x90\xce\xf5\x19\xf8\x9a\xd9\xe7\x8c\xd1\x28\x4f\x7b\x98\x1c\xba\xf8\xb4\x1a\x7a\x25\x0f\xcc\x4c\xfb\xee\x9c\xee\xb9\xb2\x8b\x8d\x10\x7e\x5a\x12\x42\x83\x45\x5d\x21\x90\x06\xbf\xa9\xc9\x8d\x47\x4f\x6a\x9b\x00\xb5\x2e\x12\x5c\x7b\x4e\xfb\x75\x58\xdd\xf6\xcb\x5f\x26\x3a\x6b\x9f\xf9\xdd\x12\x0d\x9e\xf9\x45\x70\x03\xfd\xe8\x78\x3d\xb2\x00\x0e\x5c\x07\x94\x93\x2a\x0b\x31\x5f\xbe\x3f\x7b\x2b\x17\x8e\xa7\x41\x97\xd9\x61\x4b\x66\xf0\xf8\x7e\x1a\x97\xe7\xe7\xfa\xb1\x85\x2e\xb7\x86\x60\x74\xfc\xb0\x97\xf7\x03\xa8\x25\xa7\xd6\xf4\x62\x67\xeb\xa7\x9e\x21\xda\xbd\x22\x08\x96\x47\xf0\xc3\xbc\xc4\x9f\xdb\x1f\xfa\x53\x90\x65\x4a\x13\xe3\x9f\xde\x93\x32\x4b\x81\xc2\x3f\x1e\x0e\x49\x17\x35\xec\x98\x9b\x37\x4e\x11\x7e\xc6\x87\xfc\x27\xfd\xfa\xf9\xfc\xd8\x2b\x80\x61\x85\x35\x6c\xfd\xba\xe0\xae\xe7\x2f\xca\x79\xfb\xbc\x22\x66\xae\xdb\x48\xec\x83\xe0\x71\x68\x6a\x87\x24\x66\x09\xa8\x00\x34\x81\xcb\xf8\x29\x49\x1d\x73\xc4\x2c\xf0\x28\x07\x9a\x88\xb9\x6a\xf0\x87\xbd\x22\x58\x8a\x6d\xc5\xc6\x82\x90\x9a\x35\xc6\x01\xaf\xad\xa9\x70\xef\xdb\x9e\x1b\x6d\x01\x45\xb0\x5b\x45\x22\xf3\x19\xca\xeb\x27\x36\x62\x4f\x62\xdf\x48\x8d\xbb\x01\x16\x58\xd2\x66\x0e\xe5\x90\xf1\x68\xe8\x10\x12\x0d\x42\xcd\x2c\xa4\xef\x4a\xc8\x7a\x45\x62\x6e\x83\x3c\xcc\x9e\x7a\x44\xee\x43\x6c\x0e\x39\x15\xf3\x0d\x6a\xb8\x15\xf5\xca\xea\xd9\x8c\x4d\x82\x7b\xb8\x98\x28\x19\x47\x8c\xde\x79\x6a\xec\x0a\x7d\xe9\x65\xaf\xb1\x7b\x04\xb0\xcb\x00\xe9\xf9\x8c\x43\xe9\xa5\xe0\x7d\x2c\x0c\x2e\x36\xb3\xf7\x64\x1b\x1d\x07\xb1\x5b\x24\x3a\x28\xf0\xd1\xb5\xba\x55\x33\x7b\xff\xf2\xd3\x8c\xfc\xc1\xb2\xd8\x47\xf4\x92\x63\x46\x49\xe6\x68\x53\x85\xac\x00\x24\xbd\x7d\xde\x00\x20\x9c\x67\xbf\x51\x69\xf5\x51\xe2\x49\x19\xbe\xfd\x2c\xfa\x48\x07\x2a\x1d\xc2\x4e\x1e\x5e\xbb\x95\xef\xf2\x4d\x69\xef\x85\x70\x81\xf7\xb0\x5b\xdb\xeb\xa0\x0d\x21\x4d\xac\x31\x21\xd4\x04\x38\xd7\x6f\x95\xd8\xbb\x9b\x14\x8f\x60\x53\xde\x58\x33\xec\x78\xf4\x93\x67\x92\xc9\xee\x42\xee\xdb\x7a\x01\xc0\x46\x0f\x6e\x2b\x0f\x6f\x6a\x69\xc6\xee\xd2\x9f\x44\xa3\xf2\x82\x1d\x72\x6a\x8a\xe0\xcd\xe5\x6f\x66\x2f\x45\x66\x40\x99\x09\x5c\x00\x1f\x43\x59\x42\x28\xf4\x1c\x20\xf2\xfc\x8a\xd1\xfb\x4c\x84\x35\xc4\xc6\x18\x87\xed\x9c\x86\xa5\xd6\x9b\xcd\x50\x1d\x1e\xd8\xb3\x50\x04\x73\x58\xf2\x3d\xea\x33\xd5\x47\xe3\xc9\x4c\xab\xd7\xe1\x13\xbc\x0a\x1f\x5c\x06\x8d\xc0\x4e\xc5\xc9\xe0\xd2\x2a\x9f\xe7\x35\x44\xaf\xa7\xfc\x2f\x51\xf5\xc2\x0b\xaf\x97\x13\x1f\x72\xd3\x4e\x81\x2f\xc7\x3b\xb5\x66\x58\xeb\x8f\x90\x48\x0c\x5a\x48\x32\x1c\xf9\xaa\x5c\xd5\xea\x04\xce\xd2\x89\x03\x84\x33\xb2\xcb\x64\x76\xbb\x5e\xd9\x9a\xc7\x6f\xe9\xa1\x6b\x94\x00\x89\xac\x9b\x3b\x61\xa7\xac\xaf\xef\x94\xbb\xdd\xba\x39\xb4\x42\x7a\x3d\x64\xae\xfc\x23\xae\xf7\xd1\x6c\x6a\x98\xb7\xfe\xb5\xc7\xd8\xc9\x2f\x82\x20\xbb\x60\x17\x99\xbd\x87\x10\xd6\x94\x3c\xdc\x47\xf4\x06\x6f\x93\x68\x99\x6d\x7b\xcb\xa2\xc2\x5b\x14\xc6\x68\x31\xa1\xdd\x56\x34\x6d\x72\x2c\x2c\x8c\xfc\x1d\x7a\x5b\x39\x37\x88\x44\x6c\x62\x7e\xec\xf9\x29\xab\x1d\xde\xc8\x08\xe6\x09\x19\x4f\xb8\x3c\xe3\x9b\x4e\x4b\x97\x3f\x27\xb9\x41\x41\x3a\xe3\xf5\x21\xb6\x34\x5e\x2d\x6b\x79\xbc\x42\xe0\xc0\xda\xc9\x18\xab\x56\x2c\xcd\x28\x94\xb8\xc3\x23\xbe\xc3\xea\xa9\x5d\x5e\x91\x4e\x11\x96\x16\xaf\x74\x2b\xd9\x11\x49\xab\x9c\x6e\xd4\x11\x74\xc3\x4a\xe3\xf1\xd0\x86\xb2\x84\xbe\xbe\xa4\xa1\x7e\x30\xaf\x1b\xdd\x0a\x85\x1a\x33\x9a\x3c\xe6\xae\x94\x24\xdf\xbd\x49\x72\x4e\x4d\x56\x1d\x57\xad\xd3\xe2\x03\xbd\xbb\xe3\x1e\xd4\xfa\x51\xb0\x97\x11\x47\xbb\xd1\xaa\xec\x6e\x9b\xd1\xcd\x61\x68\x6b\xf1\xe2\x58\xef\xcc\x3e\xbe\x6f\x72\x2d\xf8\x34\x49\x36\x9d\xd6\x80\xfe\xa4\xae\xd1\x2a\x1d\xf8\x59\xee\x39\x33\x9c\xf4\x60\xcd\x49\xdd\xfa\x71\x96\x80\xf5\x58\xfe\x48\xce\x1d\x8b\x9e\x04\xab\x32\xfe\xa8\xb4\xbe\xb0\xc6\x82\xe1\x5f\x4d\xd7\x53\x0f\x78\x04\x7c\x56\xbb\xb4\xd4\xad\x9e\x9e\x42\x56\xdd\x2f\x76\x3a\x59\x94\x2a\x7c\x10\x33\x20\xb5\xae\x23\xb2\x6b\x34\x95\x1f\xbf\x32\x69\xc6\x28\x65\xac\xe9\xb9\xbc\xdc\x05\x84\x06\xe7\xc8\x0f\x18\x93\xd5\x95\x8a\xdc\x1a\x13\xf5\x99\xec\x4b\xa5\x86\x32\xac\x16\x36\xf8\x00\x7f\x86\x34\x89\xa5\xb4\x20\x88\xc7\x35\x0c\x3e\xe9\x05\x8b\xb0\x0a\x0a\xd9\x86\x54\xa3\x80\x20\x7b\x27\x7a\xc0\xde\x74\xdb\xa3\xf5\xa4\x20\x3b\x59\xe2\x67\xc3\x44\xa6\x95\x96\xb0\x6a\x77\x56\x94\xd3\x03\x12\x8c\x4a\x54\x79\x20\xaf\x87\x6c\x03\xef\x91\x17\x11\x22\x76\x56\x54\x4e\x95\x60\x0e\x5b\xa8\xb4\x93\x04\x60\xa5\x2e\x0d\xb2\x28\x36\x6f\x75\x6e\x93\xa1\x83\x34\x0a\x5e\x71\xfe\x10\x5a\xb2\x8f\x4a\xe4\x3f\xd4\xdb\x51\x29\x34\x53\xfb\x02\x51\x8f\x60\x8a\xf9\xe2\xe4\x56\xc3\x36\xbb\xb5\xfc\xcd\x8e\xc4\xd0\x07\x12\x6d\xa2\x85\x63\x05\xb9\x4c\x60\xf9\xa4\x08\xce\xfa\x98\x1b\x53\xdb\xcc\x44\x49\x33\x6b\x35\xf0\x2b\xf0\x28\xff\xd4\xbf\x01\x8f\x42\x70\x0d\x6b\x16\x30\x10\x02\xf8\x89\x10\xc0\x23\x8a\x22\xed\x77\x69\x0e\x00\x87\x7f\xee\xba\x24\x96\xc5\x7a\xf7\x2b\x1e\x91\x39\xf0\x80\x97\xd7\xa3\xab\x1e\xec\xe0\xa0\xa9\x22\x14\x04\x95\xdf\xc9\x79\x68\x8c\x05\xb3\x1c\x3c\xe7\x66\x19\x89\x94\xf8\x6d\x90\xa1\x07\x69\x02\x0f\xeb\xa1\x78\x5d\x04\xe3\x4b\xc0\x0a\xfe\x10\x6a\x44\x8f\xb4\x3f\xc5\xa9\xa0\xbb\x04\x7d\x14\xb5\x20\x1c\x52\x61\x67\xba\x06\xa7\xe2\x96\xa7\x36\x5b\xc3\x57\x5c\x87\x2b\x58\xfb\x99\x17\xc6\x89\x12\xe8\x69\x93\x60\x43\xc0\x84\xb0\x11\x01\x96\x10\x06\x22\xe4\x65\x1a\x5e\x42\x78\x05\x07\x4d\x90\x4b\x70\x2b\xcb\x7b\xc9\x9a\x0a\x5a\x18\x96\x1b\xe9\xd8\x9a\xc3\x31\xff\x7a\x0e\xbb\xd0\x03\xbf\xcc\xa3\xe5\x0f\x1e\xe0\xeb\x6c\x09\x08\x7a\x73\x7a\xc7\x1e\xe5\x63\x41\xaa\xf9\x90\x64\x7f\x89\xfb\xa0\x41\x5c\x28\xd1\x89\x4c\x41\xfa\x23\x46\x28\xa3\xd1\x6d\x6d\x7a\x8f\x71\xcb\x5b\xf5\xfe\x16\x0e\x57\x7b\x75\x06\x44\x98\xf6\xac\x91\x97\xb1\x1e\x97\xe6\x4d\x49\x79\xbe\x19\xb1\x96\x0f\xcd\xe5\x47\x81\xc7\xb5\xa2\x52\x0f\xdb\x5a\x51\x50\x6a\xe6\x0a\x89\x84\x6e\x87\x66\x90\x1f\x01\x69\x78\xb9\xfd\xf2\xb2\x78\x8f\x24\x2b\xf5\x96\xe6\x1a\x10\xb6\xbc\x04\x15\xc3\x3a\x6d\x88\x50\x13\x03\x22\xed\x2d\xdc\x96\x25\xca\xac\xc6\x3a\x48\x1b\xe6\x6f\xe2\x43\x68\xcb\x0d\xcf\x11\x14\x10\x07\x44\xc4\x8f\x2e\x68\xad\x5b\x12\x09\x37\xb9\xe8\x3a\xda\x97\x45\xc1\x82\xc0\x46\x44\xf3\x3f\x87\xa0\x58\xce\x3e\x9a\x07\x65\x5c\xbf\xc4\x43\x21\x56\xfc\x60\xc1\xc9\x00\xb9\xff\x93\x0a\xbe\x0b\xaa\xc7\x15\x00\x56\x47\x92\xfe\x88\x0e\x7e\xd3\x08\xef\x44\x96\x6f\x6a\x78\x67\x65\xcf\xbf\xd4\x04\x1a\x7e\xbd\xa1\x39\x5c\xbf\x99\x28\x0a\x6b\xc5\x13\x1d\x70\xcd\xbd\x66\x35\x64\xcf\x10\x28\x36\x47\x4e\x84\x1c\x3d\x1b\xdf\x03\x92\xcc\xd3\xd5\x7f\x09\x35\x21\xd5\xee\xfa\x3a\x82\x0b\x60\xbd\xbd\x5d\x42\x70\xb0\xf1\xe5\x65\xf2\x7f\xc4\x0f\x0e\x9e\x30\xf8\x25\xc4\x44\x89\x03\xf9\x04\xee\x5a\xc1\xb3\x56\xf9\xad\x1f\xaa\x64\x27\x90\xa1\x40\xe0\x43\xd5\xf6\x15\xf0\x31\x94\xdf\x6b\x0e\x98\xc3\x6f\x69\x23\x8f\x0d\x58\xd1\x94\xdd\x4f\x02\x07\xbe\x1a\x97\x22\x68\x12\x47\x43\x83\x40\x68\xbe\x49\x19\xa0\x80\x2e\x91\xe4\xea\xbd\xd0\xc4\xd5\x91\x53\x50\x6e\xb5\xfb\x72\x58\x0b\xdb\xbe\x36\xb3\xb6\x72\x8b\x26\xa1\x80\xfe\x56\x5b\x10\x35\x68\x04\xf2\x01\x72\x91\x43\x7d\xcd\x0a\x8e\xfb\x69\x4b\xf6\xd6\x90\x3f\xf2\x57\x23\x5e\x58\x87\x36\x16\x64\x4b\x90\x85\xb5\x21\x2e\x22\xf9\xdc\x85\xe8\xaf\x33\x0e\xe0\x83\x56\x68\x05\x90\x95\x56\x51\x38\x6e\xb1\x78\x6b\xb4\xeb\x8a\x7b\xf3\x9d\x07\x07\x16\xec\xc1\x91\x16\x6b\x07\x6e\xd3\x9e\xc6\x85\xbf\xb8\x54\x3d\xaf\x1f\x55\xf4\x24\x4f\x2f\x49\xfa\xdf\x24\x19\x06\x09\xc5\xb3\x62\x2c\x8d\x9a\x91\x1a\x96\x0f\x6c\x8c\x5e\x6f\x13\x45\xea\xb9\xd1\xed\x56\xc2\x07\xfd\x95\x46\xbd\xda\x1d\xcc\x41\x69\x0e\x0c\xc6\x5d\x4e\x5a\x1a\x8e\xd8\x2f\x21\x34\x78\x18\x0d\x45\x42\xd5\x44\x0a\xd8\x87\x62\x3d\x78\x1d\x7e\x41\xb3\x13\xcd\xe8\x6a\x1c\xfe\x1b\xc1\x30\x64\x67\x5a\xd1\x8a\x4f\x3a\x84\xed\x3b\x47\xac\xdd\x42\xf7\xd1\xc3\x76\xad\x7b\xe9\xd4\x43\xea\xbe\x8f\xac\xcf\x01\xc3\xda\x71\x34\x83\xbb\x31\x55\x70\xfc\xc2\x7e\xf5\x1f\xbc\x8e\xa5\xe1\x41\xa8\x7b\xdf\x16\x05\xa7\x83\x5b\xf3\x8e\xbc\x86\x68\x86\xfc\xe1\xc0\x07\xc3\x63\xbb\xfd\x89\x43\xee\x2e\xbf\xff\xfc\xad\xb3\xcb\x55\x78\xd0\xd6\xcb\x29\xa2\x76\xbc\x62\xec\x10\xd9\x75\xfe\xbe\x27\x60\x99\x75\xc3\xd3\x8e\x4c\x26\x1f\x81\x19\x90\xc3\x7e\xd2\x20\xd9\xc6\xda\x1a\x99\x53\x6b\x28\x22\xa3\x9d\xf1\x14\x94\xdf\xf7\xcb\xe0\xda\xaa\x4e\x00\x78\x9f\xf5\x1f\xd9\x18\xc8\x26\x1f\x42\x64\xff\xac\x1e\x1b\x6d\x86\x90\x32\xfd\x25\xaa\x87\x07\xbb\x7d\xd3\xde\x7f\xa3\x9b\xd0\x7f\x40\xcc\xcc\x7e\xb0\x73\xd7\x83\xfb\xbf\x77\x84\xaa\xf7\x56\xc3\x55\xba\xd2\xfa\xdd\x61\x8a\x32\xfb\x82\x56\x51\x15\xc2\xe3\x52\x52\xb0\x99\xae\x81\x00\x85\x7e\xc8\x99\xf3\x67\xab\x01\xb8\x56\x8b\x57\xea\x71\xd7\xc9\xf3\x28\x1d\x67\xe4\xd7\x12\xb8\x0f\x88\x7f\x29\xb9\xdf\x43\xe0\x35\x9b\x39\x56\x50\xf5\xcf\x6c\xe4\x63\xf0\xc5\x73\xf4\xa1\xe8\xf5\x0e\x43\x8b\xbb\x8a\x1f\x57\x08\x8d\x6f\x0f\x65\xd9\x69\xf5\xcf\x18\xd2\xef\x2b\x1a\xbb\x62\x7f\x38\x36\x64\x4b\xa2\x76\xe0\xad\x01\xed\x58\x05\x4e\x3c\x7d\xee\x5b\x52\xff\x82\x4f\x30\x18\xf7\x5b\x05\xc7\x04\xaa\x80\x5b\x2e\x8b\x43\xc0\x3d\x52\x12\x8e\xcd\x99\x1d\x5a\x1b\x19\x16\x83\x0e\x60\xbb\xef\x08\x41\x24\x5c\x04\x20\x12\x3d\xa9\x31\xa3\x78\x5d\x18\x8b\x9e\x5e\x3b\xca\x9d\x84\x72\xfa\x84\xb1\x54\xb0\x56\x65\x78\x38\xf6\x5a\x37\x29\xf6\x5b\x25\xb8\xdd\xe4\x90\x70\x46\x9a\xf9\xe4\xc4\x23\xb1\xb5\x28\xfd\x4e\xb4\x95\xfe\xd2\x50\x22\xf5\x83\x92\xd2\x0b\x64\x45\x25\xca\x33\xb6\xa8\xe9\xc2\x84\xde\x46\x28\x41\xbe\x47\x6a\x6c\x11\x07\x8d\xf4\xc1\x20\xb9\x2b\x5a\x67\x8d\x30\x7e\x2b\x60\x35\x4e\x0a\x19\x50\x79\x70\x67\xd6\x26\x89\xf3\x87\x29\xd0\x0d\xc2\x76\xe5\xe1\x36\xbb\x2b\x71\x49\x97\x6f\x6d\x77\x6e\xb2\xc4\xb2\xef\x5d\xba\x40\xa0\x7e\x99\x3f\x74\x7d\x9c\x31\xb2\x77\x42\x5f\x34\xe0\x48\x9c\x51\x03\x90\x68\x55\x35\x6c\xfa\x40\x10\xf4\xd5\xba\xed\x97\x8e\x3a\x46\x35\xfb\xaa\x07\xbc\x0e\x08\x47\x1b\xe2\x4a\xdb\xa1\x37\xf5\x0d\x94\x98\x64\x02\x4b\xef\xd2\x7c\x9d\xf1\xfa\x59\xc7\xe8\x6c\xdc\x12\x8c\x43\xe7\xf4\x60\xe1\x50\x9e\x89\x99\xa5\x8d\x8a\x22\x1c\xac\xad\x1a\x72\x96\x3d\x08\xd4\x9c\xee\xa3\x70\x0a\xf0\xb3\xf1\x61\x05\xa1\x2f\x06\x66\xfb\x39\x02\x49\xfb\x72\x47\xd6\x58\xe5\xbc\x5a\x9d\xb7\xd3\xf4\x58\xd6\x7e\xf2\xc4\x25\x5a\x74\x0a\x7a\x8e\x28\x0d\xcf\x1f\xf7\xc3\x04\xf0\x74\xf5\x71\x1b\xba\x18\x6d\x46\xb1\xab\x4c\x86\x49\xf5\x38\x14\x9e\xfc\x34\xa7\xe5\x4d\x0f\x4b\x8f\xe0\x18\x9e\xf2\x54\x90\x06\xe2\xcf\xed\x94\x33\xbd\x29\x6e\xf1\x3a\x4d\xb9\x9f\xd6\x37\xd7\x49\xd7\xaf\x77\xa8\xc8\x23\x99\x92\x57\xca\x08\xe3\x38\xf7\x7c\x69\xdf\xa8\xa2\x45\x5c\xb1\x0f\x81\x78\x29\x5b\xe0\x3e\x26\x88\x2e\x0e\xb4\x06\x22\x28\x81\x46\xc9\x7f\x82\xa4\x5c\xb3\xf0\x45\x79\x97\xb7\x95\x8b\xcc\x47\x61\xd5\x66\xad\xcc\x6b\xa6\x37\x06\x36\xb7\x95\x1d\xad\xc3\x3d\xc0\x07\x28\x6f\x82\xdb\x3d\x7b\xd4\xbd\xe4\xfc\xf1\x01\x16\xb0\xe4\x04\x76\x53\xf7\x24\x11\x2f\x93\x28\x77\x31\xd0\xa6\x88\xa7\x19\x53\xb8\x26\x61\xab\xbe\xb7\xcf\x52\xf2\x51\x77\xb3\x5c\xcb\x94\x7b\x21\x64\x45\xb6\xdd\xc9\x3a\x66\x31\x5d\x76\xa4\xfe\x7f\x87\xbd\x86\xb9\x5d\x22\xc5\xa4\xbb\x9e\x72\xb3\x1d\xc7\x76\xba\xaf\xb9\x87\x40\x95\xde\xb8\x90\x29\xee\x7d\x9c\x99\xc6\xe0\xa4\x24\x6d\x15\x5f\xfe\x3b\xfc\xac\x4f\xcb\x31\x91\xd4\x8b\xd8\x63\xd6\xf5\x69\xd6\x78\x9d\x5b\x3b\x0d\xd4\x0c\xc1\x8e\x99\x3d\xc7\xa0\xea\x6a\x15\x2e\x61\xef\x5d\xeb\x72\xda\xa5\x4d\xbf\x71\x35\x7b\xf8\x78\x89\x93\x4a\x62\xe2\x76\xef\x8d\xa3\x20\xb0\x5f\x89\xaa\xf8\xd7\x1f\xa3\xb0\xbb\xeb\xcd\x0f\x2f\xa9\x17\x45\x9c\x96\x43\x20\x1b\xe0\x5b\xe6\xe0\xc6\x20\x2d\x46\x53\x6e\xa3\x6a\x5b\x6b\x09\x8e\x0b\xdc\x3f\xf5\x25\x7b\x98\xb5\x5a\x7c\x7a\xbb\x6b\x9e\xa7\xc8\xd4\xec\x02\x4a\x72\xf5\xef\x27\xa7\xd1\x1f\xe9\x38\xd6\x46\x99\xd7\x6e\xaf\xaf\xae\x45\x2f\x81\x0b\x43\x42\xde\xa1\x12\x21\x1c\x7b\x5d\x66\x19\xfc\x88\x76\x08\x3f\x1a\xc4\xa5\xb1\x13\x78\xc8\x0b\x02\xbf\xff\x18\xd9\x5b\xee\x13\x56\xe4\xb3\x78\x31\xf3\xfb\x2f\x07\x3e\xe6\x79\x6d\xf1\xcb\x40\x92\x87\x3d\x96\xaf\x8b\x80\xe0\xce\x47\x80\x64\x44\xa0\x08\x58\x32\x43\x33\xb5\xc3\x80\xf2\x81\x2a\xf3\x16\xc6\xaf\xab\x7f\x7c\xf4\x80\x8b\x09\xdc\x8a\x9b\x6d\x6e\x5c\x8c\x7a\x52\x6a\xbd\xab\xcd\x89\x6e\xe6\xe1\xbb\x46\x20\x15\xa9\x0b\x5d\xc3\xcc\x3c\x11\x2d\xff\xa0\x3d\x16\x32\x3d\xec\xf3\x77\x04\x5c\x66\x46\xef\xa3\xf0\xfa\x59\xa5\xce\xb3\x75\x30\x2f\x67\xa5\x3d\xe6\xe9\x19\xec\x66\xe8\x66\x29\x28\x01\x61\x61\xa7\xc4\x07\x1d\x1f\x23\xa2\x0c\x1d\xaa\xdd\x2c\x91\x61\xe8\x97\x08\x62\x32\x23\x82\xb3\x04\x0d\xc6\xea\xc8\x53\x49\xc4\x5c\x2e\x0d\xed\x37\xb0\xe2\x5d\x3a\xe4\xcb\xf9\x30\x88\x6f\x78\xd2\x54\x1e\x99\x59\x7d\x7a\x13\x4e\x02\x6e\x0c\xf9\xf9\x30\x7b\x26\xbf\x74\xe0\x83\xf4\x34\xe8\x31\xe6\x49\xfd\x97\x46\xd2\x15\x09\x83\x59\x87\xa4\xfe\x73\xe6\x4a\xb0\xec\xbb\xc2\x9b\xe9\x99\xeb\x1d\x52\xba\x79\x09\xac\x5d\x27\x35\x43\x38\xad\x34\xcf\xda\x10\x47\x90\x62\xc8\xb7\xb1\x2e\x86\x7e\x9b\x35\x88\x0c\xab\xfe\x4f\x46\x40\x64\xe6\x86\xb3\x01\x5d\xba\xc1\x67\x46\x51\x81\xf2\xa3\xc9\x7a\x77\x19\x51\x32\xbf\xf2\xfe\x77\x9e\x2f\x22\x6b\xfe\xcb\x64\x80\x9b\x4d\xa7\x4b\x97\xd1\x6d\xd7\x13\x04\x9b\x97\xcf\xac\x02\xb6\x05\x6d\x8f\x91\x77\xb3\x7f\xea\xb5\xc5\xf8\xd7\x19\x07\x9e\xae\x33\x8e\x3c\xde\xd4\x74\x09\xdd\x49\xeb\x8b\x48\x9b\x39\x2b\x68\x8d\x05\xc6\xb6\xb6\x22\x37\x91\x90\x12\xda\x69\x5d\x8c\x6d\x38\xbd\x86\xa5\xc6\x11\x85\x8b\x86\x09\x96\x66\xad\xa9\xd7\x1e\x24\xc4\xdc\x85\x38\x60\x17\x84\x64\x33\xa2\x6e\x48\x91\x06\x43\x8b\x47\xb3\x46\xfa\x56\x97\x16\x39\x2d\x21\x0b\xae\x63\x9d\x67\x8a\x7b\x71\x2d\xff\xb6\x74\x69\xda\x93\x76\x26\x39\x49\x07\xe5\x68\xcb\x2e\x7e\x40\x9d\x4b\x14\xda\xb1\x22\xd4\x29\xee\x5d\xea\xde\x07\xb1\x56\x66\xa1\x21\xa6\xad\x74\x42\x5c\xd9\x6d\xfe\xf5\x49\xfe\xe3\x84\x7d\xfc\x04\xa1\x36\x06\xd2\x3b\x19\xe4\x1a\x97\xc8\x06\xf8\x12\x05\x87\x63\xd7\x32\x65\xd5\x93\x91\xe6\x22\xe2\x30\xb4\x2d\x29\xc9\x02\x29\xa5\x4a\xbe\x81\x19\x52\x95\x18\xc5\x72\x30\x08\x6a\x11\xe9\xe5\x79\xf1\x00\x66\xc2\xcf\x22\x44\x0c\x7d\x6b\xfb\x35\x32\x89\x32\x61\x90\x28\x62\xca\x71\xab\xfc\x0c\x5c\xbe\xa1\xd2\x7a\xf8\x44\x8d\x3e\x24\x45\x24\xe2\x6a\xa0\x04\xf0\xcf\xdd\x93\x3c\x9d\xcd\x47\x88\x09\x2e\x21\x96\x33\x0a\xf0\x6d\x51\xf9\x16\x0d\xfc\x09\x6e\x9b\x06\xa1\x59\x41\x1e\x39\xc9\x7c\x00\x60\xfd\x1d\xee\x46\x28\xee\xcf\x61\xe0\xf5\xd2\x59\xac\xa3\xfc\x6e\x8c\x43\xbb\x9a\xa5\x9f\xed\xb9\x45\xc6\x6b\x44\x9e\x81\xe0\x56\xb5\x6f\x4a\x5a\xcb\xab\x06\x62\x1a\xc2\x96\x08\x58\x59\xc1\xb5\xc9\xb4\x1c\x7a\xc7\xce\xb0\x1d\xa7\x3c\xa8\x69\x21\xdd\xc7\x9a\xe9\x6d\x00\x34\x21\x04\x8c\xb7\x62\xf1\xa7\x96\x80\x9c\x6b\x61\x08\x51\xf0\x71\x74\x19\x54\xca\xed\xa7\x76\xe5\xe6\xea\xcd\x06\x3b\xb0\x80\x23\x62\xb8\x46\x40\x1e\xb0\x9e\xfd\x24\xc3\x5f\xef\x43\x83\x83\x7e\xa4\x64\xa9\x90\x3a\x6e\x71\xa4\xc2\x1e\xbc\x78\x04\xda\xab\x9b\x46\x0a\x9d\x5b\x65\xaa\xbd\x40\xee\x77\x9c\x7d\x8c\x39\x3d\xf6\xf0\xdd\xd9\xcb\xb1\x57\xba\x93\xd2\x43\x9a\x0c\xa1\xf7\x90\xf2\xae\x41\x66\x12\x79\x76\x59\x33\xa7\xf0\x9e\x11\x73\x33\xfc\x26\x17\x4a\x5a\x45\xf2\x81\x20\x28\x6f\xe7\xb0\x40\xb9\xfa\x3b\xec\xd5\x53\x4b\xd4\x04\x4a\xc3\xcf\x25\x32\x73\x1a\xa6\x16\x00\x61\x55\xd5\xff\xae\x76\x08\xc3\xd0\x24\x06\x69\xeb\x25\x1c\xd0\xa4\xe3\x89\xc1\x8c\x1b\xd7\x7b\xc6\x67\xeb\x3a\x21\x6a\xe9\xb7\xdd\x14\xa0\x3f\x49\x00\x12\x44\x08\xb1\x0f\x47\x98\x08\x8a\xbe\xcf\xa1\x3b\x5a\x0e\x9b\xbd\xf9\xbf\xd9\x3f\xa6\x2c\xf8\x0d\xea\x82\x11\xe2\xdc\x1e\x9e\xa0\x7d\xf4\x58\xdf\xa6\x44\x19\x9e\xe4\xcc\x1e\x33\xfe\xae\xbf\xcb\x7f\xf9\xbb\x5e\x85\x42\x55\x7f\x95\xde\xfd\x3f\xc6\x0c\xfd\x95\x6d\x76\x04\xe9\x5f\x7a\x22\xb6\xd6\xfb\x0f\xef\xcb\xd1\xbe\xef\x6b\xc1\xf3\x69\xcd\x3b\xef\x11\xbd\xab\xa3\x0c\xdf\x20\x02\x78\x19\xa0\x96\xc3\x8d\x41\x54\xf1\xb7\xf7\x5e\x06\xe6\x53\xbc\x33\xd5\x03\xde\x39\x11\x87\xe8\x86\x1a\x51\x6e\xfb\x89\xbc\x1b\xfe\xf5\x8d\x2a\xd6\xa7\xd3\xf9\xe7\xa3\x96\x6f\xc0\xb3\xfc\x04\xde\xdf\xe2\xa1\xbe\xbf\x29\xdb\xf3\x4f\x08\x98\xeb\x83\xa9\x83\x06\x17\x16\x06\xe5\xe7\x8b\x80\x1f\xb1\x4c\xcd\xb4\x7e\x56\xf5\x01\x0d\xf8\x3b\x8e\x56\x43\xa2\x54\x8a\x36\x50\xa1\xe8\x7c\x12\x81\xc1\x45\xdd\x89\xfc\x52\x38\x39\xcd\x41\x5b\x5c\x9f\x84\xff\x2c\x77\xb6\xd4\xcb\x6a\x04\x6b\x77\x1f\xfc\x65\x7d\xbe\x7c\xe2\xb5\xfa\xab\x88\x56\x8f\xc9\xb0\x27\x6f\xe2\xbe\xec\xb2\xbd\x35\xe5\x42\xfa\x38\xbb\xe1\xd3\xb5\xb2\xac\x98\x78\xa4\x8a\x3a\x00\x21\xab\x00\x31\x5e\xed\xdc\x7d\x61\x0b\xbf\xcd\xaf\x12\xcd\x31\x5a\x83\x2c\xaf\xdf\x5f\x93\xdf\xa0\xd7\xf4\x62\xf2\xe6\x6b\x72\x9e\xc3\x6b\xe2\xbe\xa6\x50\x42\x30\x6b\xb8\xe8\xa8\x33\x93\x3a\x12\xa0\x08\x45\xc3\xd2\x89\x42\xc7\xca\x71\x6c\x2b\xec\x6d\xbe\x1d\x84\xf7\x69\xcd\x0a\xc2\x89\x9b\x5c\x30\x9d\x91\xb1\x0d\x7b\xf8\x0e\xfc\xd0\x99\xc2\x22\xc8\xc1\x0c\xc5\x38\xbe\xd8\xae\x01\x86\x28\x04\x0f\x31\x4e\xe1\x7e\x3c\x18\xf9\x60\x58\xb5\xd9\x3d\x63\xdb\x88\x27\x0a\x13\x71\xee\x22\x1f\xe5\xcc\x9a\xe4\x95\x87\xb1\x08\x5f\x14\xf1\x1c\xaf\x9c\xe8\x52\x32\x63\x53\x4b\xf9\xc5\xe7\x0e\x59\xee\x8c\x60\x7d\xa5\x7f\xe2\x16\xc1\x03\xbf\x8e\x22\x4c\x79\x20\x92\x1b\xba\xa7\x36\xdd\x45\x3f\x7c\xa1\x6b\xa9\xcd\x39\x2f\xcb\x0f\xc0\xae\x9b\x8b\x8b\x03\x6f\x94\xdc\xf3\xce\xcd\x0f\x03\x2f\x03\x5c\xa7\x7e\x5f\x92\xc6\xcc\xad\x43\xba\x7a\xe6\xc0\x6f\xf5\x5e\x1c\x88\x46\x3c\x70\x64\x0b\x34\x1f\x36\x0a\x8a\xdb\xfa\x2d\xff\xb2\xe0\x44\xc7\xb4\xd6\xe0\x19\xb9\xa7\xcf\x4d\x74\xd4\xc3\x3c\xaf\xea\x20\xb1\x5f\x0a\xf8\xdc\xae\x26\x10\x8d\x84\xba\x74\xf3\x9b\xd6\x2c\xc7\xef\x6f\xf1\x5c\xa7\xdf\x32\xe6\x20\x27\x21\xc2\xc0\x3f\x75\xbf\xc5\xd6\x74\x17\x06\xc3\xed\x38\x80\x09\xa6\x91\x03\xd3\x89\x3d\x0a\xe7\xa1\x57\xed\xea\xcc\xb6\x84\x41\x27\x1a\x65\xad\x36\x73\x32\xe6\x26\x55\xfb\x9b\xe3\xf5\x87\xa5\x35\xc8\x55\xfc\x4d\xce\x7c\x76\x6b\xae\x7c\x29\x5d\xd6\x16\x7d\x52\xb7\x47\x69\x5a\x9b\xa5\x69\x7f\x47\x08\x5e\xd7\xe2\xfc\x20\xe4\x7a\x7a\x77\xd5\xc9\x44\x1c\x32\xba\x36\x74\x6f\xb9\xdf\x6b\xf3\x01\x2b\x5f\x65\x7d\xdd\x58\x66\x2b\xa0\x86\x24\xea\xe7\x24\xb3\x9e\x76\xf8\x8d\x02\x4d\x54\xfe\x53\x6a\x99\x29\xd7\xea\x5a\xf2\x53\x5b\xd1\xc9\x45\x5c\x74\xa1\xb4\xd4\x8d\x2f\xaa\xbf\xcd\x06\x79\x6e\xe5\xe1\x90\x5d\x24\x7b\x7d\x04\xdb\xcc\x0a\xcb\x27\x51\x08\x41\x46\x45\xd8\x24\x8f\xde\xd1\xfc\xfc\x6f\x83\x88\x05\x0b\x43\x0e\x8e\x86\xdb\x63\x0a\xec\xbe\x17\x63\x20\x27\x96\x5d\x4b\x03\x31\xb9\xcf\xd3\x5a\x45\x47\x5c\x97\x1e\x3c\xcd\x49\xa6\xb5\x26\xb4\xd2\x7f\x49\x41\x6a\x42\x5d\x2b\xfd\x0a\x5a\x46\xbd\x42\x95\xfc\xa6\x5d\xcb\xe6\x77\x67\xbe\x7c\x14\xed\x9a\xa6\xc9\x51\x2b\x2f\xd5\x37\x27\xd2\x6c\x3c\x8d\xe8\xbc\x8e\x27\x27\xfe\x86\xda\x14\x60\x5d\xbf\xb7\xf6\xf2\xd2\xd7\x61\xc6\x38\xf9\x1b\x0f\xf0\x50\xbc\x1d\x84\x10\xb1\xf3\x70\x5e\x8f\x07\x57\x57\x9c\xcf\x79\xb8\x73\x1c\x83\x2f\xa4\xe8\x5b\x5b\xff\x1f\x99\x3f\xfe\xd1\xb6\xc9\x86\x76\x0f\x67\x81\xf2\x7a\xee\xb2\x37\xd6\xcc\x50\x42\x5c\x82\x2a\x64\x6f\x8c\x40\x46\xa5\x23\x9e\xac\x91\x86\xb8\x2c\x6b\x49\x27\x0e\x85\xcc\x0c\x99\x43\x67\x7c\x87\x1c\x31\xe4\x2f\xdc\x79\x23\x87\x12\x02\xd4\xe8\xf2\x43\xf9\x5b\x3f\xd8\x17\x26\x36\x31\x10\x08\xb3\xb7\x16\x3f\xe2\x70\x59\x47\x9e\xf5\xda\xb3\x88\x64\x27\x32\x92\x02\xec\x03\x8f\x99\x1f\x4e\x95\xf6\xb9\x1b\x3a\xad\xaa\xe4\x34\x87\x12\x89\x13\x64\xfd\x70\xe6\xbc\x9d\xca\xb2\x47\x67\xa2\x97\x31\x72\xba\xf0\x3e\xd5\xa5\x22\x40\x8e\xf3\x82\xba\x7d\x56\xc1\x47\xce\xea\xbe\x24\x3b\xe9\x3f\xe2\x86\xa0\xfc\xeb\x21\xce\x80\x16\xe9\x04\x0a\x35\x8d\x6c\xda\x11\x30\x35\xbd\x9c\x2a\x8c\x80\x04\xd1\x84\xce\x89\x3f\xec\x17\xfd\x98\xb0\x0e\x81\x81\xf6\xd0\xad\xa4\x39\xc8\xb2\xf3\x6f\xcb\x96\x9e\xab\x5b\x3b\x80\xb1\xca\x88\x84\x7c\x06\xe7\xde\x0a\x7e\x75\x10\x9f\x58\xd7\x0b\x9e\xe4\x5d\xe5\xa4\xb8\x3e\x50\x89\x5e\x42\x0c\xe1\x1b\x18\x5b\xe6\x8f\x08\x16\xe1\xe9\xe6\x7f\x6c\x3a\x23\xe8\x58\x6b\x22\xce\xfa\xd1\x5e\x15\xd0\xfd\x78\xdc\x0e\x1f\xba\xad\x2f\x52\x86\xc0\x35\x10\x65\x88\xf4\xa1\x68\x03\xdd\x08\xa2\xb6\xa5\x19\xda\xf9\xf9\x0c\x28\x51\x03\x93\x9d\xf6\x85\xdc\x6a\x6c\x3c\x2d\x81\x8d\x06\xae\x85\xfb\xb6\x66\xb6\x43\x98\xe0\x1b\x3d\xaf\x39\xb8\x44\x9a\x5a\x55\x4f\x9f\x97\x63\x89\x06\x1a\xc4\xfa\xb8\x34\x88\xf6\xe2\x44\x71\x51\xbf\xac\x59\xaa\xe7\x0e\x61\x39\x5a\xa7\x28\xd9\xd1\x2c\xad\x6c\x6d\x11\x76\x99\xad\x8e\xd6\xac\x5e\x31\x4a\x67\x8a\xd1\x82\x53\x62\xe4\xde\x70\xf7\x13\x89\x82\xe7\x22\x1c\x6f\x66\x08\x1d\xf4\x10\x21\x95\xcc\xfb\x69\x8a\x2a\xf0\x32\xa7\xe8\x06\xf4\x61\xb2\x86\x05\xef\x48\x77\xfd\x70\xbc\xa3\x58\x0d\xaf\xb2\x80\x22\x74\xc8\x5e\x02\x03\x63\x6d\x95\x76\x59\x9d\xa9\x10\xae\x6f\x2b\x64\x61\x68\x5e\x0d\xef\x71\xfd\x22\xb3\xcb\x2d\xb1\x8c\xf0\x71\x73\x63\x5f\x99\x73\x89\xa6\x20\x61\x14\x17\x32\xa1\x09\x35\x35\x3b\x84\xe6\x55\x82\x47\x5e\xcb\x6f\x88\x66\x9f\xe1\x52\x3b\xa2\xdc\xd0\x1f\x28\x3c\xe6\x46\x43\x02\xe8\xc2\xd3\x68\x0f\xa7\xef\xf1\x74\x40\xb2\x1b\xd2\x1c\x9c\xa5\x55\x42\x41\xb0\x74\x5e\x00\xeb\xe9\x78\xd7\x48\xca\x3f\xc8\x9e\x3f\x2e\xe2\x6a\x40\xf1\xf6\x9e\xc5\x71\xc5\x86\xfa\x27\x6f\xc3\x65\xdd\xa3\x93\x8d\x20\xa2\x94\xda\x69\x58\x3f\x69\x2f\x6d\xb0\x1a\x38\x25\xe0\x90\xb3\x0a\x18\xc3\x99\x69\x86\x9b\x08\x25\xc8\xa5\xf9\xf5\x39\xe1\x01\x11\x12\x09\x26\x77\x8b\xfa\xb8\x04\xe2\x28\x94\xf4\x1b\x3f\xa9\xd1\x94\xd2\x12\x1c\xbd\xc3\xab\xcf\x23\xac\x86\x55\xa2\xfb\xdc\xa1\x2f\xaf\x06\xb4\x41\x69\xca\xab\x30\x47\x18\xce\xf3\xfb\x6a\x75\xd5\x8d\x90\x41\x8f\x31\x8e\x40\x68\x19\xae\x5b\x9a\xe0\x0b\x6c\x78\x01\x68\xe7\x97\x72\x47\xcc\xfc\x65\xb4\x33\x64\x54\xaa\xa6\x9c\x2c\x8b\xb6\x92\xf3\xfa\x7a\xbb\x47\xef\x70\xb0\xc7\xa2\x25\xcc\x9e\xe8\x54\x86\x3f\x6e\x6c\x47\x83\xdc\x1d\x09\x51\xc2\x9d\xb2\xb4\xa7\xf5\x4c\xc8\x06\xf2\x6b\xfa\xbf\x2b\x1c\x9f\x32\x33\xcd\x26\xc1\x02\x3b\xd6\xdb\x42\x01\x12\xac\x25\xd6\x17\x07\xf2\xf3\x81\x4f\x25\x9b\x67\x03\x00\xcf\x0d\x77\x0c\x73\x09\xe8\xf9\xd3\xa8\xcf\xd8\x92\xab\x4b\x80\x68\xe3\x25\x38\x8d\x19\x35\x78\x6c\x37\x97\x09\xad\x5b\x66\xc9\x98\xed\xcd\xbf\x06\x5c\xd2\x73\x83\x4c\x8b\x3d\x89\xb2\xc4\xd7\x0b\x9e\x24\x6b\x9b\xf2\x67\xb1\xb1\x5c\x0e\xfe\x22\x44\xe9\x25\x96\xd2\x39\x45\x4f\xc4\x0e\x64\xe7\xa4\x17\xf4\x07\x21\xf3\x42\x22\xf1\x5e\x58\x9f\x7e\xca\x9e\xc9\x8a\x4e\xe8\x07\xa0\x8e\x45\x19\x9b\xa0\x4e\x0b\x5c\xe5\xf1\xbb\x93\x96\x2a\x77\xf0\x20\x3e\xa6\x0e\x4c\x73\x3b\xf8\x05\x3c\xaa\x67\x1c\x85\xc5\x0b\x5c\x53\x33\x88\x7a\x02\x6b\x89\xf7\xdb\x39\x4b\x3e\xc0\x1d\xc8\x25\x5d\xd4\x35\x50\x7a\x07\x1a\x8e\xf7\xec\xd2\x90\xdf\xde\xd9\x0b\xf2\xcf\x7a\x53\x68\x16\xca\xe6\x3b\x9f\x89\xcf\xf5\x82\x41\x12\x5f\x6c\x69\xcd\x09\x37\x45\xa0\x3b\x66\x6b\x5e\x8b\x05\x36\x5f\x6d\x43\x60\xc7\x7b\x2b\xeb\x98\x4b\x40\xdd\xe6\x9b\x78\x84\x7c\x74\x49\x2a\x98\x9d\xfe\x22\x96\xe3\x78\x56\x66\xea\xda\xb1\xed\xde\x40\x3c\x58\xd8\x1a\xdb\xed\x99\xa3\xca\x2e\xe7\x93\x0c\xa6\x13\xb4\x6e\xc1\x9b\x0e\xe2\xc4\x8f\x50\x67\x3f\x4a\xf4\xd3\x0e\xa4\xa2\x40\xbf\xe7\x50\x81\x13\xbd\xe4\xa0\x1e\x73\x07\x65\x7e\x6a\x68\x06\x66\xcc\xe2\x35\x28\x10\x9b\x14\x02\xea\xee\x8b\xa2\x43\x19\x0e\x9e\x63\xba\xdc\x21\xee\xe6\x7e\x5f\x10\xb4\x36\xf8\x86\x4f\xd5\x23\xa6\x0e\x38\xf3\x55\xa7\x4c\x1f\x38\x63\xb4\xaa\x9c\x79\xcd\x4b\x6d\x9f\xc3\x50\x85\xc3\x5b\xf2\x8e\xbd\xa0\x4b\xa2\x95\xcd\xaa\x8d\xf3\x94\x79\x3a\x93\x35\x2a\x57\x1a\xe0\x29\x88\x15\x65\x4f\x0b\x01\x68\x53\xc8\xcf\x1c\x4c\x2a\x38\x64\x6a\xb1\xa1\xfd\xa8\x91\x43\xb3\xac\x6f\x3d\xbf\xc3\xe4\xf3\x8b\x01\x04\xbe\x60\x9e\xdb\x1b\xce\xaf\x23\x6f\xdb\xf5\xf4\xef\xb0\x35\x9f\x02\xa0\x66\xed\x7d\xbe\x98\x66\x80\x0b\xbf\x88\xb4\x8e\x6a\xbd\xe9\x6e\x1e\x79\xfe\x5b\x99\xd3\x8a\xb8\x30\x24\x5d\x09\x9e\x86\xe8\x87\x42\xa2\x14\x4f\x01\xf5\xb7\x2e\x7f\x7f\xb1\xe9\x78\x20\xe8\x47\x86\x5f\x9a\x7e\xbf\xac\x0e\x09\x76\x19\x43\x7f\xf8\x78\xf9\x57\x47\xa2\x3a\x28\x74\xca\xaf\x43\x97\xfa\x0c\xab\x91\xcb\x10\xf2\xa0\x99\x45\xea\x35\x07\x44\x4d\xd6\x86\x94\x2a\x83\x20\x58\xfd\xc4\x40\xff\x03\xf2\x21\xed\x2b\xd0\x27\x81\xde\xfa\x06\xf9\x87\x69\xa7\x58\x5f\x5a\x4f\xbc\x3b\x34\xdd\x1a\xbf\x8a\x41\xee\x36\x65\xa2\xe3\x21\x49\xfb\x23\x12\xed\x61\xa8\x7a\x48\x7b\x5f\xad\xe6\x14\xbe\xd1\xd3\x1f\xb8\x44\x08\x5c\x1d\xf0\x23\xcb\xa0\x97\xef\x48\xf4\x64\x81\x51\x0c\xdd\x30\xae\x39\x82\x23\xb2\x73\x74\xdf\x4a\xe2\x54\x3d\xb2\xf7\x27\x39\xb3\x17\x06\xab\x47\x4f\x4f\xac\x42\xeb\x71\x5b\x93\xc3\x6f\x50\xea\x30\xc7\x9e\x8c\x89\xc1\x9e\x20\xba\x0a\x33\x74\x2d\xae\xee\x0f\x6f\x05\xc6\x27\xdd\xec\xba\xa0\x54\x59\x73\x9c\x83\x77\xbb\x7f\x34\x06\x92\x91\x2a\xf1\x4f\xd5\x85\xe5\x19\x66\xbb\x57\x7b\x1e\xf8\x3a\xed\xa1\x80\x6d\x46\x34\x62\x61\xa2\xe5\x20\x7a\x12\xee\xd3\x3b\x4c\xef\xf2\x61\xaa\x95\xa8\x21\xcf\x60\x44\x94\xb4\xdc\x5c\x07\xf7\xe1\x9b\xf5\x88\x59\x21\xfd\xba\xd7\xe1\xde\x60\x27\x4a\xa6\x33\xb0\x38\xaf\x28\xd1\xdd\x23\x42\x76\x05\x5b\xee\x45\xf1\xb4\x3b\xb8\x9a\xfe\x04\x20\x1d\xa6\x59\xa3\x7e\xf4\x04\x3d\xb7\x02\x64\xb2\xe7\xd7\x8d\xcd\x42\x44\xb8\xd6\xc9\xbf\x37\x4c\x49\x0b\x3a\xe7\xea\xa0\xc9\x0f\x57\x40\xad\x62\xc1\x43\x01\xe7\x9e\xf1\x70\x41\x49\xbf\xfd\x82\x1e\xab\x20\x57\x1e\xe0\x1c\x88\xa9\x23\xc0\x51\x60\x39\x22\x1a\xe3\x22\x7a\xe9\xb8\x65\x9d\x44\xc8\x92\x99\xcf\x7b\x77\x56\x25\x44\xb5\x20\x32\x16\xcd\x3d\x5e\xb3\x06\xc1\x5a\x62\x1d\x68\x26\xb0\x0b\x1a\x36\xcd\xea\xa2\x0d\xb4\x91\x9b\xc0\xa3\x5b\xff\x2a\xa7\xce\x1b\xff\x22\xd6\x6a\xe7\x6f\x4b\xb9\x8f\x64\xc9\x9d\x90\x51\x76\x39\xda\x30\x9b\x95\xd2\xbf\x14\xc7\xf7\x73\xfd\xe6\x35\x30\x56\x3f\x77\x2a\xe7\x9e\x7d\x54\xc5\x1a\xce\x70\x75\xc5\x9a\xe2\x5c\xf1\x9e\x5b\xa9\xae\x71\xd4\xa3\xb5\xcb\x65\x31\x7a\x54\x67\x3a\x2a\x24\x6c\xcf\x8f\xda\x0b\xd9\x85\xf8\xd0\x58\x8f\x0d\x3e\xa4\xb4\xd8\xbe\xb5\x8e\xfc\x31\x6a\xfa\x5a\x8d\xf1\x56\x82\x23\x2a\x8f\xf2\x21\x5c\xb6\x67\xc6\x85\xa5\x58\x70\xbe\x81\x5a\x7d\xba\x19\x5e\x7d\x6b\x27\xe0\x61\x39\x99\x20\x8d\xb4\x2b\x28\xd6\xb5\x1f\x66\x29\x28\x00\xba\xd4\x10\x5d\x89\xc3\x3b\xde\x36\xda\x3b\xaf\x69\xf6\xa6\x98\x62\xd6\xe8\xbd\xb4\x5d\xb9\x74\x3b\x47\x86\x57\xe5\x30\xaa\xf8\x45\x16\x17\x39\x63\x09\x3e\x14\x7d\xfa\x1d\xe7\x27\xe4\xd6\xc6\x36\xa0\x7b\x3b\xfc\x63\xb0\x7b\x42\xe5\xc8\x5f\x7f\xfc\x0b\x0f\x44\x57\x71\xcd\x00\xce\xb5\x4f\x9f\x57\x17\xc9\xa3\xbb\xd7\x30\x83\xfa\x35\xb4\x4e\x31\xec\x80\xa4\xe1\xab\xa6\xa3\xef\xbc\xa6\xfb\xdf\x45\x3c\xcb\xff\x2e\x74\x44\xbe\x48\xf6\xf2\xc7\x03\x2c\xc4\xdb\xe0\x85\x32\x33\x73\xad\xec\xb9\x3c\x68\x22\xef\xfa\xa8\x4b\xd5\x90\x0b\x02\xf3\x85\x7d\x83\x56\xaa\x29\x54\x45\x0f\x9a\x48\x1c\x1d\xdc\x8c\x1c\xc1\xcd\xbe\x54\xe7\x75\xd3\xbe\xec\xbb\x05\x8d\x21\xb7\x47\x6f\x61\x6f\x60\xcd\xc6\x29\x39\xf0\x2b\x04\x3c\x59\xd8\xa4\x98\xc0\x92\x8f\x59\x7a\x1d\xb7\xed\x21\x70\x86\x21\xd8\x74\x94\x27\x4d\xab\x0d\x86\x36\xdf\x53\x5c\xfc\x2c\x50\x10\x21\x09\xb8\xb0\xf6\x4a\x44\x69\xfd\x0a\x74\x58\xcf\xc6\x2d\x0a\x19\x41\xbc\xc2\x4f\xed\x2d\x8b\xc9\xf5\x51\xf6\x26\xa4\x8f\xe2\xf4\x6e\x60\x00\x14\x23\x7e\x0c\xbc\xf8\x47\x58\x4b\x0c\xd4\xd1\xce\xda\xc9\x23\xbc\x9e\x8e\x23\xcb\x37\x8a\x6c\x2e\xbf\x91\x17\x6d\xb5\x07\xf4\x65\xc2\x9b\x86\xbe\x8a\xae\x01\xab\x59\x1d\x61\xa4\x4f\xb9\x27\x95\xcd\x75\x87\xf1\x89\x8b\x96\xbd\x7d\xa0\x0d\x66\x7b\x71\x49\xb3\x42\x75\x31\x00\x7a\xa5\xe5\x1f\xfe\xbd\xd8\xf0\x1a\xba\x3e\x5d\x6c\xb2\xb7\xce\x68\x03\x98\x2d\xf4\xed\x6e\xe2\x57\xed\x69\x56\x67\x77\x43\x6c\x7d\xd2\x80\xe8\x76\x67\x84\xb9\xc9\x4f\x39\x7a\x52\xfe\xd1\x52\x97\x2a\xa8\x33\xe8\xc4\xc7\x7c\xd7\x89\xa0\xa9\xba\x86\xd5\x26\x8d\x40\xb1\xc3\x91\xa0\x79\xfb\xf9\xcc\x6f\x59\xe0\x42\x30\x6a\xd0\x4a\x2b\xe3\x64\x82\xb7\xd6\x0e\xc7\x9e\xd0\x5e\xb4\x29\x90\xe1\xce\x30\xb4\x5c\xdd\xe2\x58\x16\x89\x3e\x6c\x95\xdd\x8c\xad\x16\x31\x1e\xb6\xaa\x9d\x0f\x60\xd4\xf9\xde\x67\xf6\x3b\xac\x99\x40\x5f\xaf\xb3\xd2\xf4\xc2\x23\x48\x43\x8d\x05\xc7\xa0\x89\x1c\xec\x49\xf9\xdf\x11\x09\xe2\x01\xb7\x5d\x3d\xc2\xd4\x6d\xfc\xe1\xf9\x96\x7f\x8c\x93\xdf\x02\x53\xb5\xa1\x9f\x60\x6d\x41\xae\x50\x3c\x84\xca\xba\x99\x29\x7e\xdd\x1b\x6e\xad\xac\xd5\x9a\x74\x99\x26\x08\x83\xba\xc0\x56\xd1\x3e\xf8\x9d\x71\x7e\x9a\x66\xaa\x0b\x0d\x43\x57\xb8\xc4\x76\x1a\x90\x0b\x64\xd4\x95\xd3\xe7\xe6\x30\x5b\x2e\x60\xad\x29\x7c\x65\xfe\xc3\x56\xe3\xd6\x9c\xae\xe6\x86\xbd\x8c\xdc\x5c\xa4\x6f\x83\xa4\x56\x09\xfa\x3d\x3e\x8b\xba\x1f\x48\xc8\x56\x8d\x32\x15\x62\x16\x74\x10\x13\x32\x26\xf2\xda\x76\xe1\x90\x17\x1e\xc9\x94\x8e\x58\x08\xe2\x72\x84\xd1\xe5\x97\x5e\x3d\x7c\xdd\xe5\x1f\x07\x5f\xad\x12\x6e\x1b\xb0\x36\xad\x98\x77\x78\xda\x79\x31\x33\xb5\x0c\xf0\x88\x28\x82\xc2\x4d\xd5\x7e\x4e\x69\xbb\x6e\x32\xdb\xee\x28\x27\x9a\xa0\x14\xe9\x85\xb1\x02\x4e\xea\x2d\xdb\x61\x1c\x5a\xff\xa1\x26\x8d\x33\xde\x98\xd1\xe4\x9d\xc5\x8b\xbf\xe3\xd9\x94\xb4\x9e\x78\x6f\xb1\xa6\x67\x2f\x72\xeb\x73\xdb\xff\x3c\x6b\x0b\xf6\x35\xe4\xe8\x1c\x40\x05\xbd\xba\x0a\x66\x53\x78\x5a\x07\x1f\x3d\xe2\x2d\x2b\x1e\x62\x4b\x0e\xce\xfb\x96\xad\x69\x8c\x8c\x6f\xc0\x61\x1d\xa2\xb6\x9d\xe7\xbb\xe7\x90\xeb\x06\xd5\xaa\x99\xbd\x7a\xda\xaa\x07\xd3\x59\x79\x75\x01\x93\xa3\x21\xc5\xcd\x26\x58\x80\xa6\xd0\xea\x23\xdd\x1b\x17\xd8\xc9\xa1\xc3\xc5\xbb\xcf\xf5\x2e\xb2\xc4\xc8\x62\xda\xf6\x1e\x5c\x0d\x5a\xb7\xf9\x18\xcc\x06\xfe\xbc\xd5\xa4\xdc\x94\x9d\x47\xe2\x56\xaa\x22\x67\xb6\x04\x54\xbc\xce\x03\xd2\xfe\xf2\x24\x6f\x01\x1f\x72\x7b\x89\x5a\x21\xcd\xdb\x47\x64\xf5\x26\x1d\x1b\x10\xec\x42\x56\x7d\x83\x36\xf8\xed\xc1\xd8\x92\xbb\x28\xeb\x8b\x18\x38\x80\xa2\x24\xd0\xe1\xb9\xf2\xa7\x1c\x90\x5e\xc0\x3d\x55\x5b\x15\xe5\x35\xdc\x77\xae\x9a\xf1\x2f\xe8\xd6\x28\xa5\xf5\x9f\xbc\x85\x5e\x42\x57\x90\xc9\x6a\xd6\x16\x7d\x35\x80\xdd\x75\xfe\xd6\x66\xae\x97\x04\xa5\xcd\x9e\x55\xa7\xbb\xb9\x36\x93\xf1\x7b\x6e\x29\xfd\xc1\x79\x78\x6c\x8c\x9a\xc9\x1c\x0b\xb8\xbe\x6c\x81\x20\xe3\xd9\xb3\xfc\x76\x43\xcd\xcd\xea\xbe\xdf\x95\x29\xa6\xcc\x2c\x6b\xe3\xa1\x08\xb1\xcc\xd4\x00\x56\x59\x89\x46\x75\xf3\x2f\x96\xce\x7d\x23\x4c\x95\x3b\x00\xc6\x70\x97\xe5\xb4\xbd\xea\x6e\xb3\x3a\x37\x12\x2f\x7a\xb4\x98\x28\xe6\xb5\x3e\xbd\x3d\x2d\x7a\x9e\x00\x7a\x9f\x76\xb8\xf4\xab\xfb\x90\x32\xad\x8e\x08\x58\x7b\xeb\xd2\x1a\x71\x95\x5f\x4e\xd0\xfb\x90\x35\xaf\x9d\x58\x34\x55\x4d\x92\xfa\x8d\xd1\x13\xbe\x1b\xf5\x7b\xe2\x80\x5a\xfd\x86\xda\x9d\x3f\x52\x34\xd9\xf4\xb1\x1b\xb0\xf9\xd7\x73\x55\xd7\x7e\xbd\xf8\xb1\x5e\x49\xee\x5a\xaf\x88\x01\x73\xec\x45\x38\x2b\x57\x34\x86\xb2\x46\x2e\xad\xd0\x2c\x6b\x33\xf0\x7a\x39\x6b\x2f\x3b\xd2\x60\x65\xb1\x73\x44\x5e\x5d\x04\xb7\xf1\x84\x32\x2e\xb9\x88\x4d\x57\xbd\x73\xba\x16\x6a\x38\xac\xc5\x73\xef\x5d\x77\x8d\xdf\x6d\x3e\x7b\x62\x61\xad\xf9\xb5\xf0\xf2\xac\xe8\xb4\xf9\x74\xe1\xeb\xb3\x22\x90\xa1\xab\x3b\x9c\xb5\x9a\x31\xa2\xc9\xcf\x51\x8e\xd5\xe5\xde\xf9\x84\xd3\x76\xa3\xf7\xbe\x6a\xef\x4f\x18\x98\x7b\x5c\xb1\xf7\x0b\x4e\xb4\x4f\xa4\xe6\xaf\xd6\xbe\xbe\x48\xd9\xdb\xc0\x02\x9c\x09\x80\xb0\x9e\x01\xda\x6c\x53\xd3\x68\xc5\x8c\xfe\xcd\xf6\xb3\xd9\x74\xcf\xac\xc9\xae\x8f\x2c\x12\x05\xd6\xef\x37\x49\x18\xce\xde\x6b\x38\xbb\xf7\x64\x47\xed\xf7\xc8\x6b\x6c\xcd\xbd\x8e\x27\x45\x3d\xcd\x6a\xb3\xf2\xc5\x48\xd4\x05\xa3\x16\x27\xad\x45\x81\xa4\x4b\x08\x9e\x2f\xd7\x48\xfc\x9b\xf5\x11\xbd\xc7\xbf\x5c\xbd\x6d\xeb\x29\x58\x5f\x3f\xe1\x70\x7c\x8d\xdd\xaf\xe6\x12\xd9\x49\x8b\xe3\x8e\x84\xd4\xc3\xa8\x3c\xba\xd5\x64\x94\x01\xb7\xb9\xca\xf0\x7d\x80\x3a\x1f\x04\x48\x16\x1e\x14\xf9\x72\x74\x5f\xa1\x3b\x7e\xf1\xf7\x93\xcb\xcb\x8b\xb8\xd8\x9d\xf2\xe1\x30\xc0\xfa\xe8\xa3\x00\x0d\x06\xdf\x88\x9d\x36\xaf\xd5\x52\x3d\x52\x90\x5b\xb1\x13\x73\x4c\x21\x52\x55\xf0\x6c\xf2\x4c\x6a\x21\xeb\x6f\x8a\x10\x3a\x50\x98\xe3\x95\x5e\x2a\xc3\xd9\x17\x7a\x84\x17\x72\x9a\x16\x0d\xff\x2e\xe5\x63\x40\x03\x29\xa6\x5b\x0e\x6a\x5f\xc3\x16\x45\x9f\x01\xce\x53\x77\x8f\x02\xe9\x97\xa8\x58\xea\xe2\x26\x5a\x19\x2f\xc5\xb5\xcb\x4c\x5c\xc3\xea\x2f\x3f\x33\xeb\x56\x46\xaa\x8e\x63\xfe\x08\x64\x67\x89\x6f\xdf\xe2\x19\xf3\x6c\x2f\x90\xf9\x17\x7b\x85\x56\xcc\xf7\x19\x66\x11\x79\x30\xef\x7f\x2f\xe6\xb4\x2c\x65\x78\x29\x20\xcf\xf6\xe2\x2a\x31\x24\x38\xa2\xd0\xeb\x2b\xe6\x85\x2e\x16\x9a\xb2\x46\xa6\x96\xc9\x03\xd9\x96\xfc\x87\xd1\x47\x7f\xa8\x17\xab\x5b\x22\x5c\x69\xc1\x38\xe7\x35\x37\x74\x9d\xd3\x03\xf9\x54\x2f\xd0\x63\x83\xd8\x24\xc3\xbe\x17\xcf\x8f\x8f\x7e\x01\x46\xec\xa9\x2c\xb4\xe4\x42\x1e\x21\x07\xf0\x96\x8c\xb7\xae\x73\xcb\x73\x34\xb9\xa9\xc1\x15\x64\x6d\x1a\x94\x3d\x5c\xac\x55\x50\xbf\x7e\xcc\x2f\x5e\xce\x7e\xb8\xd6\x80\x7e\x2e\x3d\xfd\x32\xae\x92\x88\x69\x7a\x9f\x4e\x67\x30\x43\x84\xc3\x74\xfe\x25\x71\x07\xa1\x5b\xdb\xcb\x26\xf3\xb0\x21\x08\xd5\xf2\x00\x8b\xc5\x1a\x58\x37\x79\xf0\xc8\x7a\xd2\xd3\xfe\x4f\x80\x46\x6b\x18\xbc\xb5\xe9\xfd\x0c\x28\xab\xdb\x29\xe8\x80\xc0\xef\xc5\xec\xd9\x2a\xa4\x2a\xfb\xe9\x6a\x22\x1c\x94\x21\x65\x94\x8f\x52\xda\x23\xa8\xc6\x87\xe9\x74\x41\x40\x8d\x8d\x99\x88\x98\x77\x6b\x2d\x8f\x7c\xdc\x1f\x7d\xcc\xe8\xa5\xb8\xb5\x78\xe8\x14\x37\xb8\x2c\x81\x3d\x5c\xb8\xad\x59\xf5\xb8\x96\x9c\x89\x37\x9c\xc9\x85\x58\xd2\x4f\x25\xea\x66\x49\xf0\x3e\x71\xcd\xa9\xf9\xf7\x16\xe4\x25\xaf\x32\x30\xa2\x1e\xa9\x7d\x4b\xc2\x17\x54\x20\xca\xa6\x3c\xeb\x2c\x42\x8f\xbe\xe0\x88\xb3\x95\x07\xbb\x61\x7c\x44\xe0\xdd\x2c\x0c\x6c\x36\x9b\x71\x7e\x30\x56\x3e\x29\xdf\x07\x53\x2b\xe6\xf7\xfa\x8f\x13\xd1\x04\xdf\x31\xf2\x3a\xbf\x07\xa5\xe0\xcf\x8a\x43\x98\xd5\x8a\x9c\xaf\x80\xf6\x5d\xf6\x32\x8c\x32\x7b\x3e\xaa\xc2\x1a\xae\x02\x17\x5e\x45\x9a\x65\x73\xa8\x09\xcd\x51\x29\x01\x66\x88\x8f\xd5\x79\xdb\xfc\x7d\xf1\xd0\xf2\xb9\xaa\xd9\x3b\x7b\x1c\xd9\x9f\xd6\xe9\xe9\xe5\x77\x9c\x21\xa6\x73\x7d\xf3\x23\x14\xa4\x5b\x37\xad\x32\x0c\x76\x1a\x0a\xd9\xdd\xa6\xb3\xd4\xb6\xf0\xbe\x5d\x91\x76\x36\x17\xa6\xc2\xcd\xc5\x2f\xa3\x36\x25\xa4\xe1\x87\x74\x14\x8e\x74\xe6\xb4\xb3\x57\x9a\x3f\x87\x46\xdf\x8c\x0c\x6a\x19\x0d\x7d\x38\x25\x8f\x1e\x9c\xf3\xab\xb0\xab\x8e\xe1\xe0\x5d\x91\x5b\x73\xf6\xb7\x41\xf3\xad\x32\xa3\x73\x7a\x06\xc3\xaa\x99\xd4\x68\x85\x91\x22\x88\x70\xce\xee\x69\xa4\xd9\x5c\xba\x40\x8d\x01\x9f\xe1\x5d\x66\xd2\xb8\x60\xa4\x7f\x29\xa2\x87\xe7\xf4\x1b\x4f\xa5\x75\x42\x9f\xe4\x1c\xc6\xb0\xc5\x0c\x18\x14\x71\x88\xab\x46\x68\x66\xbe\xa8\x80\xaf\xbc\xbf\x6e\x7e\x22\x1f\x3c\x94\x22\xaa\x7d\x46\xc3\x4f\x97\xc8\x51\x9a\xec\x55\x02\xba\x53\xb4\x40\xff\xc4\x13\x24\x6a\x62\xdf\x1c\x8a\x24\x28\x36\x0b\x9f\x10\xad\xf1\x9d\x16\x0e\x10\x3c\xdf\xfb\xf8\xfe\x93\x2a\xdc\x73\x8f\x21\x3f\xfb\xae\x12\x91\x58\x7f\x8b\x20\x85\xf8\x88\x42\x15\xcb\x59\x84\x35\x34\xa0\x9f\x10\xaa\xc7\x26\x11\x8b\xf6\x4d\x03\xe6\x97\xa8\x8b\x74\x3c\x3b\x7b\x51\xe6\xc2\xac\xe3\xe7\xac\xc4\xac\x67\x79\x49\xff\xe4\x69\x4d\xec\xc0\x29\x9e\xf2\xd0\x3c\xf5\x65\x7b\x42\x42\x9f\x5d\xbd\x67\xd9\xbb\x58\x6c\x1d\x07\x76\x3f\x9f\x45\xa1\x6a\xa7\xbe\xf4\xcf\x62\x9f\xef\xe0\x34\xea\xf3\xf0\x2c\xb3\x10\x8d\x65\x58\xbd\x17\x67\x06\x83\x37\xcc\xa0\x47\xe0\xa9\xf0\x69\x4c\x23\x9f\x36\x6c\x99\x8c\x8d\x73\xcc\xe3\x04\xce\xa3\x17\xac\xcd\x44\xc9\xb0\x67\x7e\x31\x6c\xfa\x69\x75\x0b\x77\x4b\xb7\xed\x13\x21\x3a\x3e\x9d\x43\x50\xf6\xd9\x0f\xf8\x5e\x3d\x51\xa3\xeb\x8d\x87\x4d\xdc\xe3\x9e\x99\x48\xf2\x4c\x85\x33\x1e\x79\x5e\xa5\x4b\xf6\xb4\xfb\xd9\xf9\xd4\x57\xfa\xb9\xd6\x1f\x1f\xb5\xfe\xd1\xf8\xc9\xcf\x35\x3b\xcf\xfb\x07\xd0\xd3\xec\xc6\xea\x01\x8a\x3f\x75\x66\xf7\xf2\x07\xe9\xab\xb8\xb1\x3f\xf9\x95\xe3\x78\x7e\xb2\x32\x69\x7e\xb2\xd4\x44\x7e\xb2\xe7\xed\xaa\x52\xff\x49\xef\x80\x38\xbe\x94\x15\xfc\x83\x76\x61\xd5\x20\x1c\x0a\xde\xe6\x85\x21\x2e\x63\xc8\x84\x14\x74\x81\x51\x1f\x94\xd5\x55\x32\x1b\xea\xd0\x29\xbf\x34\xe1\x63\x6c\xa6\x7f\x6b\x60\x60\x9c\x2c\xbe\xd9\x0d\xc5\x28\xda\x23\x4a\x75\x4c\x37\x53\xd1\x15\xdb\xfa\xe0\x2d\x06\xaa\x51\xac\x43\xb4\xe5\xe5\x28\xf7\x82\xa7\x4f\x9f\xf9\x3f\xc5\x1b\xe2\x08\x07\x7f\xd8\x0d\xc6\xe0\x25\x04\xdc\xad\x5e\x68\xf0\x7e\x33\x11\x41\xa1\xa3\xf7\xec\xf3\xb4\xe6\x80\x52\xc2\xb1\xd1\xf8\x8b\xd1\x23\x15\xcd\x70\xf5\x51\xc8\xc0\x14\xae\x50\x45\xe1\x4a\xab\xea\x29\x00\x20\x0b\xb7\xfb\x13\x5e\xd4\x02\xc9\xb9\x56\xf7\x00\xdc\xef\x13\x6f\x68\x41\x2b\xe0\xeb\x03\x12\xc9\x25\x1e\x3b\xd9\x8c\x88\x0d\xbd\x43\x88\x01\x8d\x2c\x7d\x43\x3c\x3e\xbc\x4d\xa6\x13\x1a\xae\xfb\x87\x81\xed\xb0\x28\xe0\x2b\xce\xcd\x81\x9b\xc2\xda\x7e\x6a\xa7\xe0\x4d\xa1\x2a\xc6\xe6\x10\x75\xbe\x8f\xb3\x47\x23\x09\xfa\x02\xdd\xbd\xd0\x7f\x47\x2a\x50\x6e\x83\xc5\xc8\xad\x06\x21\xa0\xe4\xb5\xdd\x02\xe4\xf7\x5b\x6f\x39\xf6\xa7\x0f\xa3\xfb\x6c\x43\xd6\xcd\x19\x95\x9a\xee\x9f\x5b\x4e\x7b\x03\x26\x95\xb5\xdb\x73\x03\x4e\x02\x55\x89\x5e\xa9\xb6\xba\x6c\xd7\x10\xb7\x8c\x31\xb4\xa5\x06\x9b\xca\x1d\x7a\x4b\x04\x2e\x03\x55\x19\x29\x1f\x66\x7b\x1a\xf2\xda\x4a\xbd\xd6\x28\xe8\x72\xe3\x6d\x2f\xe1\xe7\x2b\x43\xa9\x88\x34\x83\x86\x33\x6f\x52\x2f\xf2\x97\xb5\x4b\x3c\xa9\x94\xf4\xd1\x1e\x23\x53\xfe\x41\xb2\x26\x1a\x7e\x9e\xde\x00\x97\x3a\x09\xf0\xed\x8e\x5c\x75\xf1\x1d\x5b\xd9\x4f\x73\xdd\xd9\xbd\x99\x28\xdb\x3a\x5d\xfa\x2e\x4d\x17\xf9\x9b\x48\x4e\xe7\x0f\x8b\xb7\x97\xa7\x6b\x56\x83\x67\xba\xba\x3e\x89\x66\xd9\x71\x70\x79\x3a\x4f\xd4\x24\x08\x4a\x48\x9c\x76\x4c\x03\x36\x8b\x55\xa6\x3b\x85\x47\x97\x35\x89\x92\xab\xc2\xad\x5e\x3a\x5f\x4c\xb1\x30\x2b\xc2\xc9\xc0\xb0\x8c\x8c\x41\x28\x5d\xa1\x51\x7e\x5e\x5a\x0a\xaf\xaf\xac\x41\xd3\xa4\x48\x74\xa4\x76\x86\xe3\x64\x82\x08\x07\xa7\xac\xb2\x1a\xc3\x92\x4b\x8a\x60\xc9\x43\x5e\xf2\x6b\xcf\xca\xfb\xb7\x52\xb7\x0b\x9a\x89\xc8\x86\xef\x30\x83\x7d\x09\x4b\x5b\x7c\x02\xfa\xcb\xbd\xb8\x93\x63\xaa\x23\x22\xfc\xb9\x5c\x48\x2c\xda\x6e\xaa\xb6\xd7\x35\xc2\x27\x4c\x1e\x7d\x62\xfa\xde\x54\xf8\x9d\x9f\xca\x22\xbc\x09\x5c\xaf\xbd\x44\x46\xa6\xf2\xd3\x1a\x9d\x93\xbd\x83\x8c\x84\x11\x47\x13\xd2\x03\x6e\x74\xf9\x7e\x92\xa7\xcc\xe4\x04\xeb\xa1\x5e\xdf\x17\xb9\x95\xf6\x81\x4e\x9a\xa7\x87\x7a\xca\xf0\x47\xf3\x13\x37\x65\x51\x1c\x79\xc6\xd0\x76\xe2\x25\xcf\x81\xde\x9e\x32\xd5\x3b\xdd\x6c\xff\xdd\x25\xb7\x86\x31\x36\xdd\x92\xb0\xdd\xe4\xdd\xcc\xe9\xf7\x4d\x00\xc8\xe4\xa2\x8e\x71\xb7\x90\x30\x9d\x05\xe4\xb4\x2b\xe8\xb9\x9e\x2c\x0c\x9b\x38\x3c\xce\xd1\x94\x6e\xcf\x94\xbf\xfd\x94\xd3\x9f\x7d\xf3\xf8\xb4\xa5\x3f\x55\x87\x53\x7a\x2b\x9b\x17\x31\x2c\x7e\x77\x24\x22\x30\x41\x5c\x43\xa7\x02\x87\xa8\x40\xad\x49\x11\x72\xa0\x70\x96\x20\x69\x9a\x39\x24\xcd\x8d\x38\x57\x33\x83\x0e\x38\x79\x92\xa2\xac\x5e\xa2\x97\x13\x58\x18\x8d\x9d\x0a\x30\x27\xe9\xe1\x30\x75\x8d\x81\x4c\xd4\x29\x03\x9c\xc0\xe7\xc8\x1a\xd4\x13\xa7\x9d\x2b\xf0\xfa\x3b\x97\xca\xbe\xe9\x67\x08\x00\x9f\xb2\x3c\x76\x32\x0e\x92\x4d\x91\x89\x49\x21\x53\x62\x62\xce\xf4\xfd\xf6\xf2\xf8\xf6\x96\xcf\x78\x51\x69\x6e\x54\xb0\xfb\x78\x89\xfe\x32\x5e\x37\xc9\xd1\x53\x0a\xf8\x6b\x51\x94\x87\x5b\x7e\xae\xe3\x7e\x65\xe2\x34\xaf\xb8\xb9\xe3\xee\x7f\x5f\x24\x77\xfa\x03\xfc\x01\x69\x23\xba\x93\xab\x22\xf1\x85\x8e\xe9\xb1\x45\x55\x8e\x3b\xc8\xd6\xea\x39\x8e\x08\x27\xdd\xf9\xe9\xb0\xcf\x93\xb0\x91\x4e\xee\x3c\x69\x35\xe0\xe2\x18\x8d\x02\xf7\x14\x6a\xb9\x95\xe2\x9d\x76\xad\x67\x06\x88\x8e\xb5\x7a\x27\x83\x44\xce\x43\x56\xe2\x74\x88\xce\x15\xec\x99\x8b\xcd\x43\x70\x0b\xc7\xea\x4f\xd8\xc1\x05\x86\xf4\xa2\x43\x0d\xe6\xe2\xc7\x47\xaf\x8a\x4d\xb8\x8d\xf9\xdd\xab\x2e\x18\x11\x25\xc3\x79\x8c\xce\x1a\x89\x1d\x03\xae\x13\x83\x1c\x11\xc3\x38\x7a\x6f\x87\x27\x31\xe7\x35\x86\xe0\xc6\x39\x5d\x7d\x25\x89\xb4\xec\xb3\x07\x11\xd1\x46\x52\xa4\xaf\xc8\x78\xb3\xb1\xd8\x33\xf2\xa6\xc1\x63\xf0\x50\x4b\xc9\x39\x8e\x13\x93\x18\xc6\x7c\x34\x8f\xda\x98\x3f\x72\xe4\xad\xd0\xb2\xd7\x79\xf6\x20\xd0\x39\xe7\x33\x3f\xc5\x32\x1f\x33\xbe\xd5\xb4\xf2\x53\xb3\xd2\x1e\x6e\x49\xd8\x9c\x99\x42\xa3\x70\x04\x72\x70\xa0\x31\xdf\x2d\x84\x11\x63\xd9\x27\xf7\x6b\x6f\x86\x1f\x58\xe2\x33\x49\x9e\xc1\x08\xaf\x6a\xff\xc7\x48\xd4\xf1\x7b\xa2\xda\x19\xc8\x9f\xe5\xeb\x06\x7f\xfa\xb6\x1e\x37\x1d\xd9\xee\x49\xe4\x50\x3d\x40\x13\xe0\x3a\xfd\xf3\x95\xc2\x62\xbd\x65\xd6\x44\xd7\xd0\x03\xd2\x5f\x82\x62\xee\xf9\x62\x8f\xed\xb1\xb7\x30\x13\x32\x42\x09\xdf\x44\x16\xa8\xc6\xc2\x1c\x18\x2a\x1a\x26\x94\xc4\x98\x37\x6c\xed\xa4\xbf\x12\x73\xdd\x77\xdc\x27\x6d\x30\x15\xa1\x3c\xff\xc5\x30\xea\xa3\x6a\xbc\xfe\x81\x97\x23\xfa\xd0\xd6\xc4\x12\xf3\x15\x8d\xad\x4b\x89\x4e\x66\x3f\x32\xc3\xc3\x31\x30\xf8\x62\x4c\x64\x00\x45\x63\x45\x26\x40\x01\x27\xaa\x57\x11\x44\x51\xb5\x0f\x3c\x28\x24\x96\xa4\xa3\xea\x81\x8f\xbd\x80\x99\xad\xdf\x0f\x1c\xe8\x8e\x30\xc2\xb0\x23\xf6\xf0\xe1\x20\x1c\x6d\x77\xce\xec\xdd\x3c\x66\x8f\x16\x93\x89\x5c\x97\x93\xe6\xf9\x62\x06\x26\x4c\xe4\xc7\xf4\x51\x88\xd1\x8d\x07\xde\xc8\x21\xb5\x1f\x0a\x8f\xa8\x45\x8b\x3e\x42\x0c\xf8\x21\xe8\xdf\xa3\x90\xbc\xff\xb0\x27\xb0\x91\x39\x1e\x68\x2e\xeb\x67\x84\x6b\x14\x59\xf3\x2c\x2b\x8d\xda\xc8\xfc\x24\x2e\x74\x46\x45\x43\xd3\x71\x78\xbc\x38\xb0\x23\x28\xe9\x51\x1e\x5c\x19\x99\xab\xd2\xe4\x7c\x40\x14\x90\xbb\x49\x91\xdb\xf6\xc8\x0c\x12\x7b\x64\xaf\x43\x11\x6b\x53\x7c\x50\xfa\x91\x97\x08\x77\x7f\x84\x8b\xfd\x61\x5f\xfc\x9d\x8b\x79\xc7\x00\xba\x17\x8d\x2c\x2a\x86\x0c\x11\xc3\x31\xd3\xfa\x26\xd7\xf4\xa4\x29\xe8\xab\x5e\xae\x47\x8a\x18\xfa\x47\xfa\xa1\xb2\x22\x84\xe0\xdf\x8d\x59\x5a\x76\x57\x12\xec\xa3\x74\xeb\x81\x3d\x92\x26\xd6\x15\xf0\x6e\x01\xf4\xcf\x4e\x4d\xd5\x26\x71\x48\x29\x7e\xcb\xef\xcc\x51\xb3\x2f\xe7\x8f\x32\xbc\x82\xfe\xbf\xfc\x8f\x90\x79\x0e\xc2\x90\x41\xca\x03\x47\x36\x18\x82\x91\xe3\x07\xc4\x5c\x47\x2c\x24\x8a\x43\x69\x04\xd3\xc4\x63\xca\xff\xea\xde\x53\xcf\x36\xff\xf3\xbe\xce\xd9\x90\xf2\xf9\x9f\x7d\x9f\x8f\x60\x76\xfe\xcb\x3d\x57\xb0\x36\x1f\x86\x2f\x77\xee\x03\x90\x2b\xc8\xe9\x37\x36\x2a\xdc\x18\x5d\x70\x4d\x31\x42\xac\x43\x43\xba\x60\x13\xdd\x05\xe5\x94\x43\x6a\x19\x5f\x41\xbf\x58\xf9\xa2\x1f\x57\x26\xb2\xc8\xbc\x4e\xca\x37\x0d\xf5\xeb\x86\xa1\xc6\xbc\x44\xc0\xb2\xa3\x07\xe7\x08\x0a\x0a\x32\x1f\xea\x71\xeb\xc6\x72\xfc\x4d\x27\x65\xfd\x52\x28\x32\xae\xf9\x52\xdf\x17\xa7\xcc\xb0\xbe\x0c\x7a\xbf\x3c\x2d\xd0\xa2\x59\x74\xb0\xeb\x28\x59\xc0\xbc\x3e\xe4\xbe\xb3\xf5\xfd\x00\x56\x0d\x3a\x85\x22\x25\x62\x33\x7a\xcf\xb2\x63\x8b\x2e\x53\x8c\x96\x97\x60\xe1\xaa\xe0\xe6\xbf\x25\xd5\x68\x05\x0c\xca\xb3\xfa\x01\xec\xf4\x86\x9a\x96\xa6\x20\x0e\xa0\xe9\xa1\xaa\xc4\x7a\x62\x2e\x58\x99\xfd\x46\x7f\x09\x5b\xba\x83\x8f\xee\xc7\xa1\xe0\xb6\x2f\xc7\x94\xd6\xa6\x2c\x2b\x66\x69\x4b\x18\xcd\xc3\x2f\x6b\x96\x6c\x9d\xc5\xe8\xce\xe5\x01\x83\x20\xaa\xc2\x73\x7f\x25\xc2\xf0\xd1\x75\x65\x5a\x52\xee\xcb\xee\xd3\x74\xe8\xe1\xb3\x36\xd8\x24\x3d\xf8\x0c\xfd\x1a\xc2\x02\x65\xf2\xe0\xad\x6d\xe6\x1b\x1c\xde\x87\xcb\x3f\x62\xbd\xc1\xa5\xc5\x79\x59\xe0\x28\x55\x0e\xb9\xa3\x4d\x89\x2a\xbc\x98\x0d\x3a\x58\xfd\xc6\x24\xd4\xe1\x52\x3a\xfa\x70\xad\x1c\x3b\x37\x43\x2d\x6f\x58\xfa\x6d\x19\x3c\x6a\x64\xb8\x40\x5a\xe2\xac\x61\xa6\xe8\x85\x9d\x29\x2e\xd1\xb0\x5f\x41\x13\x1f\x3c\x8c\xdb\x9f\x0b\x98\x3c\x06\xb4\x91\x6a\x58\xd5\xa3\x2b\xbe\x04\x4b\xe5\x3e\xc0\xb2\x94\xa4\xf6\xb0\xcb\x71\xea\xdc\xd4\x20\x40\x22\x77\xfc\x0c\x96\xe3\x9e\x62\x5a\xd6\x8d\x7a\x79\x66\x8f\x6a\xb9\x0e\x55\x43\x15\x78\xff\xb9\x7f\xfa\xf7\xe0\xae\x56\x93\x78\xa8\x82\x3a\xe2\xeb\x34\xd3\x78\x4a\x3b\x71\xb0\x8f\x04\xe4\x52\xf8\x7b\xf5\xc4\x36\x3f\x9e\x0a\xa0\x99\x2e\xbc\xd9\x88\xae\xe1\x79\x60\x40\x4b\x86\x7e\x25\xd9\x38\xec\x9e\xc9\x6c\x43\xa5\x90\xc9\x80\x84\x05\x29\x90\x0f\x3f\xee\x7e\x1e\x90\xcf\xa7\xa3\x2e\xbf\x94\xba\x02\xa1\x75\x10\x17\x68\x00\x4a\xfd\x4b\x64\xd6\x43\x40\xd2\xe3\xae\x15\x07\x68\x58\x8d\x91\xc3\x1a\x30\x56\xd9\x4b\xd2\xaa\x50\xed\x89\x99\x6e\x7f\x09\xb2\x8a\x6a\xfa\x8c\x42\x73\x8a\x70\x06\x02\xcb\xb5\xa0\x67\x50\xc7\x51\x1e\x7d\xd9\x22\x20\x0b\x90\x55\xa7\x94\x68\xeb\x7b\xc4\x29\x0e\xd0\x8a\xf1\xe9\x12\x48\xd5\x2b\x70\xaa\x57\xaf\xc6\xc4\x50\xc6\x7c\xb4\xed\x0a\xcc\x34\xdc\x31\x40\xc8\xcb\xf6\xf7\x49\x03\x5e\xad\xfc\x45\xba\x6a\x83\xd8\x3a\x68\x95\xe3\x3e\x5f\x4e\x56\xd5\xa7\x7c\x40\xf6\x7b\x51\x1c\xe5\x40\x4d\x5b\x5a\xf5\x9f\x76\x91\xcf\xb6\x2c\x2a\x6c\x3f\x44\x45\xfd\x72\xae\xc8\xab\x03\x0b\x7d\xdc\x17\x7c\x24\x69\x78\xd2\xb0\x2c\x5d\xb1\x6c\xaf\xb8\x56\xdf\xca\xcd\x28\x85\xad\x0d\xad\x11\x61\x20\x60\x2b\x57\x5b\x2e\x6d\x69\x71\x8e\x60\xb0\x49\x05\x83\x18\xb2\x34\x9d\x86\xf6\x34\x66\x05\xa9\xc3\xe8\x3e\x09\xac\x73\xb7\x73\xc8\x16\x63\x48\x52\xa1\x1f\x72\x84\xfc\x0d\xf6\xc8\x5c\x6c\x94\x0f\xf9\xd1\x20\xac\xbc\x36\x8f\x56\x59\x03\xdf\x1a\xd0\x59\x34\x41\x6e\x5a\xeb\xaa\xe1\x16\xd8\x7d\x00\x5d\xe1\x2a\xd3\xe9\xf5\xfb\x7b\x6b\xf6\x0d\xa8\x0c\x56\xeb\xb3\x9d\xbc\xb0\xab\x59\x05\x49\xda\x0e\xea\x05\x73\x21\xe8\x2b\xd0\x40\x9e\xa4\xe6\x2d\x1e\xbc\x41\xd3\x07\xc5\x06\x97\x75\xe4\xc3\x97\x9a\x0f\xc2\x4d\x1e\xc8\x37\x02\xbf\xdd\xc0\x50\x1c\x31\xc9\xdf\x31\xec\x6f\xd6\xb8\xa2\x25\x63\x76\xff\x97\x39\x38\xed\x46\xf5\xb4\xfa\x67\x11\x83\x15\x30\x53\x82\x04\xdf\x40\x63\xbb\x61\x57\x7e\x60\x0e\x25\x42\x3a\x89\x69\x33\x4b\x7e\xac\x5e\x02\x12\xa0\xb1\xde\xb0\x53\xe2\x58\x79\x9d\xc9\x63\xd5\x42\x70\x02\xa9\xdf\xd2\x93\x64\xdc\x5f\x4c\x6c\x80\xb8\x2f\xdf\x6d\x67\xb2\xd2\xb7\xd2\x37\x4d\x8d\x3e\x72\x76\xc1\x68\x55\x38\x6a\xdf\x84\x69\xcd\x92\x2b\xad\xf7\x88\xfb\x2e\xd8\x86\xee\x74\xe4\xe6\xf3\x11\xf0\x54\x8a\xe4\x12\x64\xb9\xc7\xbc\xb4\x68\x8d\x14\x0a\xbc\x50\x17\x67\x9d\x05\x7d\x9f\x17\x33\x88\x7a\xc4\x0d\x11\xd6\x07\xa9\x25\xe6\x9a\x82\xd2\x7c\x06\x20\x42\xe8\x55\xd2\x96\x5a\x79\x55\x1b\x1a\xa5\xa3\x91\xa2\x7a\xeb\x55\xaa\xc5\xd0\xd7\xe3\x86\x0d\x59\x81\xa3\x9d\x66\xf8\x01\x20\x79\x9a\x60\x4e\xf1\x3c\x03\xbd\x7a\xfc\xa7\xc4\x42\x84\x7c\xc1\xe4\x04\xba\x41\x4f\xed\x66\xef\x1a\x59\xb3\x2a\xd4\xb8\xf7\x71\xc5\xad\x9e\xcd\x56\x47\xce\x0a\x68\x79\xf2\x91\xb0\x0f\xc8\x70\x30\x5a\x0e\xfe\xda\x6b\x96\xaf\x80\x4c\xd7\x21\x5e\x7f\x21\x5e\x1b\x2b\x15\x78\xd7\xfb\xb8\x81\x6f\x15\x2c\xd6\xcd\x9b\x87\xe8\xcc\xe3\x54\x02\x25\xbb\xfe\xa8\x39\x6a\xe6\x98\x25\xaf\x64\x36\x24\x13\xe2\x52\xae\x6c\xcd\x8f\x51\x18\xf2\xc7\x15\x5a\x09\x18\xf7\x8f\xbc\x63\x5d\x45\xd2\xf6\xd8\x5c\xc5\x9f\x01\x67\x60\xe7\xc1\x26\xaa\x33\x5b\xad\x6d\x7b\x36\xf4\x2a\x22\xe3\x3e\x48\xa9\xee\xea\x15\x43\x77\x59\xea\xf0\xb1\xe4\x92\xf7\xde\x9b\xc3\xdc\x9b\xd5\x3e\x69\xbc\x02\x40\x3b\xb7\xb1\x39\x2f\x3e\x85\x3d\x2d\x22\x65\xd6\x11\xa4\x56\x31\x52\x1f\x51\xfb\xf5\xb5\x7b\xe5\xce\x0d\x96\xd7\xce\xef\x02\x86\x94\x39\x63\x76\x79\x22\x99\xcb\xc1\xd4\x2c\x33\x1b\x62\x75\x16\xc8\xdd\x8d\x95\x06\xc3\xaf\xdc\xc8\x34\x36\x4e\x06\xcd\xbf\xd7\xd6\x10\xa9\x19\x4b\x17\xfb\xc9\x84\xb0\xe6\xb4\x47\xcc\x15\x08\xb9\x51\x17\xcc\xc0\x91\x09\x1e\x0c\x09\x39\x35\x73\x60\x4f\xb1\xd5\xb4\x3e\x6f\x73\x6c\xe6\x22\xc7\x52\x5f\x7e\x7f\x75\x55\xad\xfa\x78\xd1\xcb\x6d\x66\xfc\xba\x0b\x80\xf9\x8e\x92\x3a\x2a\x6a\x6f\x82\xcf\x2a\xe8\x28\xa9\x5c\x41\x16\x9d\x40\xad\xe0\x12\x95\x0d\x31\x24\x6f\xf1\x27\x68\x6e\xba\x61\xaf\x65\x6d\x60\x0d\xe0\x58\xa3\x89\x0b\x7b\x26\x36\x79\x12\x3f\x55\x83\x88\x88\xa2\x3c\xb5\x1d\xf0\x59\xb9\x26\xe8\x6b\xd6\x25\xff\xc7\xc2\xae\x70\x32\x87\xae\xee\x1a\x38\xe8\xa1\x01\x74\x31\x39\xda\x6c\x57\x9a\x4b\x25\x76\x67\xdd\xc1\xc6\x37\xb5\x82\x3f\x55\x50\xed\x8a\xa7\xdb\xda\xfb\x60\x9c\xf1\xe1\xf6\x9e\xca\x20\xeb\x43\x7e\x92\xc5\x07\x45\x2d\x1d\x3e\xca\x6b\x91\xed\x77\xbf\xac\xd6\x99\xe4\xd9\xa0\x6f\x1b\x97\x38\xfd\x66\x7a\xfb\x1c\xd5\x5a\x82\x25\x7a\x21\xc1\x3d\x50\xa7\x57\xf0\x66\xed\x43\x3b\x44\xcd\xd8\x74\x8f\xcd\x9a\x82\xf6\x7a\x46\xee\x3f\x38\xae\x51\xeb\xf9\xf8\xc9\x96\x49\xbd\x4d\x07\x70\xb0\xfa\x61\x0f\xda\xea\x7e\x02\x32\x34\xcb\x76\x4c\xa7\x7e\xd8\x82\xff\x9b\xe8\x60\xb6\xe9\x7c\x74\x49\xb3\x72\x78\x30\x61\xe3\xf9\xe3\x5b\x91\x90\x59\xd3\x7e\x00\x03\x76\xe1\xce\x11\xfa\xaf\x00\x04\x7e\xad\xe7\xc6\xd2\x05\x2a\xb7\xe1\x4e\x43\xf7\xb0\xf7\xd8\xd6\xef\x30\x8b\x30\xc8\x69\x4d\xfc\x84\x82\xec\x06\x2d\x1a\x22\x7b\x01\x77\x13\x49\x76\x6e\x41\x49\xb0\x8b\xfb\xf0\x54\x7a\x68\x48\x1e\xa1\xc0\xda\x72\x0c\xfc\xf7\x09\x98\x67\xd1\x18\x81\x69\x25\xa9\xf0\x3a\x5b\x98\x7f\xe7\x8c\x74\x2d\x70\xbc\x25\x77\xd3\x5d\x72\xdf\x76\xd7\xbe\x69\x3a\xd6\x25\x17\xf9\x61\xba\xcb\x41\x3e\x5d\xcb\xf3\x37\xeb\x19\xa1\xff\x20\xb7\xf6\xf4\x2d\x75\xd7\xfc\xd4\xb7\xa0\xbb\xca\x3c\xc4\xfe\x03\xaf\x78\x59\x6f\x91\xd8\x46\x58\x5c\xac\x7f\x42\xb5\x94\x5b\x45\xf2\x25\xc1\x9f\xfb\x25\x61\x5c\x20\x5c\x0f\x51\x55\xd1\xc5\x10\x52\x1a\xd0\x56\x61\x29\x11\xfc\xfc\xa6\x21\x49\x57\xb3\x62\xf8\x4d\x94\xd6\xe3\x36\x65\x89\xaa\x9a\x45\xf9\xc0\xe8\x9a\xe6\x24\x74\x22\x57\xed\x52\xc8\x5b\x07\xa3\xf0\xc8\xab\x07\xd8\x75\xde\x0b\xa0\xb7\xb8\x6b\xda\xf2\x1d\xf5\x06\xdc\x3a\xf8\x7d\xb7\xaa\x7c\x54\xe4\x59\xd7\x12\x5b\xba\x5a\x41\xbd\xe4\xef\x35\xe6\x11\xa8\x89\x51\x2b\xf6\x30\xbb\x8a\x7e\xbb\x70\x94\x35\x02\xce\x3b\x84\xe4\x12\x1e\x8b\x6f\xc2\xbe\xa9\x59\xda\x55\x77\xb8\x76\xf6\xc1\xf3\x15\x66\x24\x78\x13\xe2\x89\x67\x89\xb7\x17\xb2\x40\x80\x2c\xe8\xa4\xbd\xd8\x88\x8f\x1d\x24\x4e\x05\xdf\x9c\x19\xaf\x6f\xed\x2a\x26\xa9\x77\xb3\x54\xa2\x88\x61\xd5\x42\xa0\xc9\x53\x38\xda\x6d\xf5\x1d\xdd\xa6\x77\x95\x66\x6c\xde\xed\xb0\xba\xe0\x23\x00\xde\xaa\xcd\x75\x4d\xbb\x15\x5c\xd6\x2e\xc0\x9a\x05\x21\xeb\xd6\xbb\x8a\x00\xb3\xae\x54\xd7\x5c\x18\x34\xe0\x0b\x86\x97\xd5\x89\xf4\xe7\x74\x56\x0f\x2b\x9f\xaf\xf3\x00\x23\x0e\x12\x75\x11\x88\xd8\x95\xf1\xfb\xe2\xa5\x2e\x5e\x19\x7d\xcb\xce\xa4\xaa\xc2\x21\xc2\x59\xf9\x08\x29\xcc\x2e\x1f\xfa\x95\x99\x09\x18\x77\x53\x56\x7f\xe7\x1c\x57\xad\x20\xf2\x2b\xe4\xaf\x35\xc3\xba\x75\x7e\x57\x00\x79\x9d\xf8\xeb\x7c\x92\xf3\x82\x1a\xb9\xab\x6f\x2e\x38\xdf\x6c\x35\x14\x42\xc7\xbd\xcb\x53\x11\x14\x37\x43\xad\x4e\x7b\x1f\xab\x70\xb2\x79\x0e\xb0\xac\x2d\xf8\x4b\x8b\x48\x05\xae\x03\x4c\x6b\x6c\x35\xb1\xd3\x09\xa3\xe1\x68\xad\x96\xe5\xbb\x83\xf1\x6a\xe7\x32\x7a\x81\xfb\x4e\x97\xdf\x59\x38\x30\x59\x26\x23\xb0\x43\xd7\x80\x97\x3a\xe9\xcd\x94\x02\x1b\x97\xda\xa3\x47\x63\x66\xf7\x69\xd2\x51\xd0\xb1\xcf\x0f\x0e\x01\xde\x5c\x1d\x1d\x08\x30\x40\x69\x4d\x2c\x0c\x8a\xd9\x72\x0a\xeb\x17\xd1\xab\x7a\xb7\xd3\x28\x6f\x47\x97\x24\x1b\xd8\x25\x41\x10\xf0\xbc\x53\xe1\xaa\x0b\x7c\x1c\x90\xaa\xdc\x45\xfa\x7b\x2a\x63\x2f\xbd\xad\x49\x00\xf0\x25\x6c\x0e\x9a\xa6\x17\xfe\xdc\x48\xcf\x1c\xd4\x55\x86\x7e\x00\xb8\x2a\xca\x22\xba\x12\x6a\x18\x98\x0d\xaf\x22\x49\xd8\xc9\x83\xad\x9c\xcf\x69\xd5\x82\xaf\x74\x61\x18\x2e\xde\x14\x7c\xeb\x7a\x1a\x3b\x75\xe5\x81\x29\xaf\xae\xb3\xad\x92\xd7\x21\xc3\xd7\xcd\x58\xfd\x22\x53\x75\x0f\xb0\x24\x6d\x9e\xca\x81\x04\x6c\x79\x0c\xad\xb1\xc4\x67\x05\xb1\x99\x1c\xce\x4f\x22\x7f\x25\x0c\xd5\x25\x87\x67\xca\xdb\x8c\x18\xcc\x38\x52\xab\xe1\x05\x01\xdd\x07\xc5\x48\x78\x9a\xc7\x1e\xd6\x50\x6a\x4f\x96\x9b\x73\x59\x73\x83\x9e\xee\x5d\x41\x1e\xab\x40\xa9\x7b\x62\xfc\xb6\x3b\x3f\x36\x0f\x30\x19\x54\x4c\xc1\xf0\xdc\xe0\x3f\x0e\x64\x26\x72\x53\xd4\x4b\x48\xeb\x1b\x3a\x4c\xdf\xbe\xc2\xfa\xaf\x84\xac\xbc\x75\xa0\xe4\x17\xf7\x64\xeb\x19\x81\x76\xb1\x06\x61\xbc\x10\x6e\x0b\x66\x6b\x59\xc8\x77\x1d\xe7\x7a\xc8\xd0\x10\xb7\x80\xad\x81\x0d\x05\xad\xf5\xb0\x16\x34\x53\xa7\xbd\x68\xad\x37\xae\x3c\x54\x0e\x49\xc0\x9a\xe9\xf3\xc7\x73\xc8\x43\x5e\xb9\xb5\x85\x6f\x6e\x5a\xe8\x4f\xb0\x66\xe8\x54\xba\xa2\xdf\x56\x5c\x2e\xe8\xa4\xf8\xd7\xc6\xca\x88\x8f\xf1\x43\xb6\xd6\xbf\x7a\x16\xd0\x40\x42\xa8\xa6\xdf\x00\x7c\xfb\x31\x9d\xb5\xd5\xf9\x97\x8f\x24\xdb\x69\x69\x3e\xd1\xc7\x95\x89\x81\xfe\x0d\x59\x33\x7f\x5c\x65\x9b\x00\x04\x65\xf7\xca\x4a\x89\x84\xdb\x19\x4a\xdd\xbe\xcb\x78\x5d\x40\x7d\x8d\xa9\x9e\xaf\x79\xae\xfd\x0d\x87\x9d\x4b\x43\xe2\xc2\x45\x2a\xaf\x25\xec\x51\x73\x41\x9f\x1a\x18\x7c\x65\x85\x5e\x9b\x4d\xe5\xa9\xc3\xb4\xb7\xca\x0d\xc4\xd5\xf9\x79\x15\xbe\x36\x76\x84\x5c\x16\xb1\x5d\x2d\xf6\x22\x8d\x02\x6f\xd8\xdb\xbb\x7b\x0c\x45\x1a\x91\x8b\xc0\x45\x01\xb1\x08\xdf\x5e\x7a\x60\x60\xa7\xf5\xf8\x12\xd2\xa7\x31\x1d\xea\x76\x43\x59\x7f\x22\xcc\x11\x6d\xa0\xfe\x03\xd6\xea\x59\xfa\x3c\x42\x17\xc5\xfa\x6a\x6c\x56\x92\x90\x69\xf3\x48\xfa\xfd\x4d\x3f\x06\xa0\xac\x50\xe5\x90\xbb\xdf\x8a\x0d\x52\x67\xdf\xa8\x4a\x84\xbd\x5d\xf8\x7d\x17\x45\xb6\xf3\x14\x90\xc4\x14\x2a\x0c\xc7\x7f\x70\x58\xdb\x1f\xca\xdf\xc2\xeb\xfe\x5d\x08\x12\xf6\xa7\xeb\x8f\x1d\x0a\x9b\x3c\x83\xf5\xf4\x67\xdd\x92\x4a\x4a\xd6\x5f\x23\x99\xfd\x41\x6a\xcd\xdb\xb6\x7f\xd9\x1a\x81\x7c\x2d\xfe\xbc\x7d\xfe\x76\xab\x9c\x18\xc1\xf5\xad\xa7\x95\x4f\xe6\x1f\x48\xc2\xbe\x19\x5c\x5d\x9b\xbe\x67\xba\x8b\xdf\x8d\x02\xf2\xce\x87\x4b\xdc\xbe\x31\x88\xe1\xd3\xd1\x17\xc8\x76\x80\x53\x18\x98\xa6\x77\xf5\xc9\x6b\xe3\x84\x44\xd7\xa4\x28\x9b\x77\x9a\x86\xc4\xe5\x18\x95\xf0\xfe\x1e\xdd\x33\xf0\xcf\xaa\x19\xa6\xbd\xbc\x5e\xb5\x7a\xe5\xfa\x82\xb7\xe5\x95\x18\x3f\xfa\xda\x9b\xc2\x0f\xa8\xad\x83\x9f\xc0\xab\xfa\xc0\x8e\x80\x9d\x04\xed\xbf\xe8\xf4\x64\xd5\x47\xb2\x2b\x0d\xcf\xee\xc1\xf4\xe4\x40\x23\xcc\x68\xe7\xda\x2e\x49\xd0\xb4\x16\x8e\x83\x5e\xb5\x36\x3d\xb1\x2f\x24\x32\x72\xb1\xd9\x1d\x34\x5a\xc7\x3e\x61\xa7\xb6\x3e\x07\x5b\xb5\x42\xd5\x8f\x68\xd0\x3b\x9c\xc7\x93\xbf\x78\x27\x5e\xe5\x70\x94\x67\x11\xa5\x35\x08\xaa\x08\x54\xe4\x9c\x75\x1c\x62\x2d\x60\x95\x88\x66\x81\x5d\x5f\x80\xbc\x79\x01\x01\xde\x34\xce\x58\x56\x61\x19\x2f\x77\xb2\x3d\x65\x41\xbc\x56\x47\x63\xed\x6b\x2a\x4a\x1d\x2a\x59\x1f\x8f\xad\x2e\x2b\xb8\xfc\x08\xb0\xaf\x17\x77\x26\x60\x2f\x7a\xc8\xe2\xe2\x4e\xef\xb7\x6f\x66\x6a\xb9\xb8\x4e\x7a\xf5\x69\xf9\x6b\xe8\x56\xe6\x93\x90\xf7\x7a\x86\xf5\xe0\xf7\xe9\x05\xc0\xb5\x6e\xe1\x54\xe2\x67\x4a\xee\x3a\x09\xf6\xda\x48\xda\x85\xdd\x69\x6e\x56\xf7\xf6\x85\x10\xa0\x30\xd3\xdf\x1f\x81\xa8\xd6\x17\xd6\xbf\x24\x40\x2d\xa3\x15\x1c\x17\xdb\x68\xb1\xdf\xcd\xdc\xbf\x04\x8b\xf5\xed\xe4\x7f\xd1\xb9\x79\x65\xf5\x33\x61\x7c\x11\x23\x7b\x91\x67\x9b\x43\xae\x17\x4c\x59\x89\x39\x60\x61\x92\x70\x73\x1b\x34\x7a\x49\xab\xfc\x95\xbb\x8d\x09\x3c\x56\x4f\xb7\x94\xbd\x17\x44\x09\x7d\xea\x6a\xfb\x82\xd2\xbd\xf8\x69\xa6\xe5\x55\xc4\x2b\x35\x97\xfd\x4b\x34\x64\x4c\xb3\xfc\xd0\x66\x77\x12\x84\x79\xa5\x59\x9e\x4a\xa0\xc3\xba\x24\x99\xe8\x57\x2a\xe2\x9e\x83\x16\x8b\x7f\x5f\x00\xbd\xca\xd1\xf9\xeb\xba\x25\x44\xc2\xee\x0a\xd1\xfd\x65\xa7\xce\x26\xf0\x31\xb8\x35\xc3\x8d\xbe\x93\xf6\xfa\xeb\x6d\xe3\xdf\x12\x70\x05\x07\xc4\x5e\x6e\x6c\x2c\xe3\xb3\x57\xc9\x83\x1d\x72\x1d\xd3\xa2\xe5\x82\xeb\xf0\x0b\x6f\xc6\xb7\xac\xe9\x7d\x1c\xb5\x04\xf2\x35\xed\xea\x1e\xff\xaa\x1b\x6a\x3d\xf4\xeb\xb7\x68\xd6\x2e\x70\xeb\x3e\x5e\x92\xb2\xf2\xd1\xdb\x86\x71\x85\xdd\x80\xb0\xf9\xef\x42\xb8\x3d\x0b\x9e\x20\xf4\x9b\x67\x64\x9a\x65\xce\x9b\x39\xa0\x05\xa3\x2f\xf6\x85\x61\xc5\x1e\x88\x58\x7f\xf0\x7e\xad\xa9\x9b\x23\xe4\x06\x12\x73\x81\xb5\xb5\x06\x6b\x0f\x5c\x3b\x4f\x2c\x6d\x3a\x2f\xbf\xb9\x93\xf3\x8b\x7f\xed\x63\xca\xe6\x30\xdc\x1a\x83\x3e\x15\xb0\x43\xa9\x0d\xa0\x76\x85\x31\xfd\xda\x37\xe6\x5a\xb8\x40\xdf\xab\x3a\xb9\xfe\xba\xfc\x2c\x8a\xa4\xbf\x8e\xcb\xbf\x11\xd6\x11\xdb\x57\x7f\x27\x2f\x81\x03\x70\xf6\xc4\x79\x5c\xab\xd7\xce\xbc\x18\x60\xc8\x7a\xeb\x58\x87\x7c\xad\xbf\x60\x68\x37\x00\xea\x19\xc3\x96\x66\x3a\x89\x53\xd9\x71\x48\x67\x8b\x7a\xd5\xe5\xc9\xaf\xa0\xb8\x1e\xfd\x1e\x34\x81\x6b\xdd\xf3\xc7\x06\x80\x0c\xdc\x9f\xe9\xfe\x6d\xbc\xa5\x92\xae\xd5\xb9\x11\x6d\xbf\x88\x9e\x76\x02\xed\xc5\xbd\x55\x40\x06\xf5\xdb\xea\xc1\x6c\x2b\x0b\x0b\x55\x7b\xae\x75\x2e\x4d\x27\x1e\x48\xe9\x60\xd2\x06\x6b\xd7\xfa\x27\x76\xf0\xf2\xcb\x39\x9e\x16\x7d\x91\x76\x30\x4e\x05\xde\xd3\xcc\x42\xef\x1e\xc5\x6b\x7d\x04\x5e\xc9\xcc\xe4\x79\xca\x6d\x85\xc1\x73\xe7\x3a\xfd\x6a\xf7\x13\x2c\x9c\xb8\xa6\x2a\xe6\x28\x34\xe2\x49\x94\x9a\xad\x78\x00\x1d\x2d\x5c\xc5\x9b\xa3\x19\x52\x1c\x28\x9a\x18\x42\x02\xc3\x5b\x7c\x8a\x6a\xcb\x77\x0f\x06\x9a\x82\x67\x5b\x00\x6d\xc0\xa0\xe2\xb2\x8f\x02\x57\xc1\x6e\x6d\x93\xbe\xc4\xc5\x81\xb6\x12\xb8\xd2\x51\x24\xe4\x76\xfd\x94\xd7\xb1\xcf\xc4\x3d\xc6\x78\xad\xd1\x9e\x64\xc1\x03\x92\xbe\x9c\x8b\xab\xb7\xe4\x52\x02\xdc\x35\xbd\xdc\x7d\x74\xa9\x38\x92\xfb\x6d\x53\x9f\x3c\xe8\x7f\x01\xf5\xcb\x2f\x0f\x01\x9b\x8e\xb2\xcc\x1f\xe6\x40\x93\x9d\x2d\x33\xd6\x88\xf7\x74\x5b\xd3\x31\xd6\x68\x43\x97\xb4\x71\xcd\x4f\x50\x3e\xb8\x19\xeb\x8c\xce\xac\x55\xcf\x96\x8c\x09\x2b\x29\x98\x58\xe1\xbf\xa7\xdd\x74\x85\x99\x9f\xd7\x82\x01\x21\xc1\x64\x2f\xe5\xab\x9c\x8c\x13\x83\xb5\x33\x69\x18\xe3\x95\x4f\xcd\xb9\x3a\xe4\xe9\xf1\xe4\x30\x24\x34\xd3\x80\xbb\x2b\x11\xd0\x0b\x5f\x8a\x92\xfa\xad\x20\xdd\x0e\x00\x78\x4b\xa2\x90\x0b\x6c\x4e\x12\x24\x6a\x1b\x39\xd5\xe3\xc6\x1a\xf6\xd7\xb5\x2f\xb8\x77\xf1\x78\xb9\x45\xeb\xec\x02\x3b\x71\xc6\x6f\xeb\x71\x33\x08\xbd\x18\x22\x1d\x2c\x28\xc4\xdd\x0b\x0f\xba\xd7\x68\x5b\xa3\xa1\xfd\x00\xb8\x88\xae\xac\x17\xf3\x1a\x1b\xff\x1f\xa7\xcb\xa6\x2d\x96\xad\x91\x83\xcb\x1a\x88\x5f\xc5\x11\x9a\xf5\x48\xf3\x48\x28\x2d\x72\xdb\xb3\xd8\xc2\x9e\x05\xed\x7b\xf6\xbc\x3a\xef\xce\x3a\xf8\xb7\x70\xe6\x19\xc1\x36\x34\x79\x20\x15\x38\x32\xb6\x6b\x4f\x66\xae\xcb\x2a\x73\x98\x81\x13\x86\x87\xcd\xab\xea\x93\x60\xfd\x53\x8d\xb2\x13\xef\x08\xa8\xbf\x6e\xc3\x37\x42\x77\x87\xf7\xd5\x79\xc1\x2a\xe3\xed\xce\x3a\xb2\x48\x37\xcb\x59\x1f\xd7\xce\x61\x12\x47\x04\xc7\x21\x85\x57\xf4\xac\xdd\x7e\x11\x2b\x0c\xd7\x85\xe6\x25\x34\xc5\xaa\x3f\x10\x95\x19\xf7\x8a\x48\x72\x15\x52\x2e\x24\xb9\x0b\x50\x53\xdf\x9a\x9e\x51\xd1\x9e\x1e\x1a\xed\x8d\x9e\xb3\xc8\x6d\x76\x16\x88\xfd\xf0\x58\x27\x6a\xe4\x9f\x71\xe7\xa6\x6b\x1c\xdd\x8f\x4e\xca\xb0\x0e\x12\x03\x0d\x1d\x9f\x68\x33\x73\x18\xaa\x65\x00\x19\x7e\xf0\x5a\x4f\xd5\x69\x67\x9d\x5e\x1f\x08\x01\x4c\x8a\x3c\x47\x8b\x59\xd4\x49\x12\x88\x69\xd9\x91\xf0\x57\xfb\x02\xcf\x89\x38\x5f\x04\xa8\x70\x03\x5e\xbf\x55\x0d\x94\x9d\x1e\xe6\x74\x49\xdd\xfc\x74\x46\x42\x68\x16\x11\x35\x4c\x23\x1f\xfc\x3a\x03\xb5\x86\xcb\x7c\x0a\x22\xbc\x5d\x7e\xe3\xf3\x7a\xc5\xd6\xed\x2b\xd4\x0b\x5e\x1c\x63\x35\xb0\xf2\xae\x81\x4d\x14\xb4\xa9\x96\xfc\x04\x50\x0b\xf5\x78\xa3\xf4\x20\x44\x13\xa6\x4f\xda\x1d\x8d\x81\xf6\x33\xfd\x83\x56\xb8\xf7\x04\x60\x73\x3b\x9e\x84\x27\xc0\xaf\x55\xc1\xe2\xbe\x9d\x10\x11\xd2\x9e\x12\x02\x98\xce\x30\x13\xc4\xc5\x68\x0f\x9c\x7c\x73\x8a\xc1\x7a\x01\xf3\xe3\xb6\x5a\x25\xf0\xd6\x34\xc0\xc2\x84\x55\xda\xbb\xb0\x5c\x5c\x0d\x09\x6c\xda\x4f\x47\x1d\x1c\x7f\x6a\x4e\x0a\x51\x41\xcb\x36\xe0\xd0\xc9\x23\xf8\x79\xa5\x0e\x8c\xb0\x33\x13\xf8\x78\xab\x61\x71\xbc\x81\xcd\x18\x1a\x94\x18\xb0\x9c\x43\xe6\xca\x70\xef\xe3\xc5\xfc\x0c\x9b\x02\x2b\x00\xcb\xda\x4b\x3f\xea\x25\xc0\x1e\x13\x67\x5f\xbf\xbe\x9c\x80\x9d\xd7\xfe\x0b\xad\x19\xc5\x8a\x5a\x71\x61\x23\x0e\x2c\x61\x0f\x0f\xd5\xec\xfc\x60\xba\x06\xd0\xc2\xb1\x04\x21\xc3\x6d\x55\x72\x86\xe3\x0d\x17\x5c\xf8\x60\x9b\xca\x4b\x87\xa4\x2a\xc9\x19\x26\x66\xf2\x72\xfa\xca\xd6\x4c\x3b\xf6\x20\x0e\xe3\xe1\xa1\xd1\x45\x23\xe5\x70\x71\xd6\x7e\x2e\xde\xc2\xb5\x07\x5a\x3c\x68\x34\xea\x83\x1d\x3c\x06\xf8\xf4\x7a\x3c\x4a\x5f\x84\x33\x3a\x40\x07\xe0\xce\x03\x4d\x79\xb5\xa0\x0b\x07\x0e\x73\x6a\x5f\x62\xd5\xaa\xc7\xd5\xe1\xe9\x88\xb3\xe9\xa8\x1c\x1a\x44\x5e\x2b\x17\x64\x85\xdd\x3f\x07\x49\x14\xed\x24\xc4\x76\x88\x3f\x3c\x5d\xc2\x40\x9f\x04\xc2\xe2\x03\x14\x73\x90\x0d\x33\xc8\x1a\x93\xda\xed\x88\xed\x0a\x5e\x2a\x3e\x2f\x23\x32\x24\x74\x48\x56\xb6\x7e\x8b\x9c\x08\x18\xf9\x7e\xb7\x1f\x92\x52\x24\x82\x44\xfc\xdf\x42\xfc\x66\x5f\x99\xf3\x06\xbb\x9e\x20\x86\xc6\xc6\xaa\x5d\x03\x6f\xab\x59\x95\x80\x54\x3a\x1e\x40\x11\x9c\xdb\xab\xaa\xb0\x96\x4f\x62\xf1\x23\x5e\x5c\xd0\x88\x63\xc3\xf6\x56\x05\x5a\x14\x15\x46\xb5\x36\x45\xfb\x89\x47\x92\xb7\xa5\x0a\xba\x69\xf6\x94\x82\x59\x9c\x85\x5f\x25\xa0\x58\x17\xea\x86\x09\x9f\xde\x91\xe2\x8b\x70\x46\x16\x2e\xe2\x34\xdb\xf6\xd3\xa2\x4e\x15\x86\x34\xf6\x50\x33\x0f\xed\xe8\x46\x90\x86\xd6\x0f\x91\x53\x30\x6f\xc0\xd1\xe1\xfc\xb5\x41\x56\xeb\xea\x7b\x01\x53\x30\xc0\xa2\xc1\xe3\x05\x5d\x15\xc7\x18\xf3\xdc\xac\xbb\xeb\xb5\x83\x1b\x99\x19\x99\x66\x3f\xf3\xe1\x62\x31\xad\xa4\x57\x00\xa1\x63\xd1\xce\x76\x7a\x31\x1f\x4f\x67\x4e\x71\x7b\x92\xd1\x3f\x36\x4a\x56\x1e\x1e\x79\xcb\x75\x19\x72\x8c\xa9\x52\x6d\xcc\x84\x9f\xf6\x52\x5a\x13\xb0\xc6\x82\xb0\x83\x6c\xcc\x37\x77\x4b\x7b\xe3\x28\x27\xb1\x4c\x37\xe7\x60\xf9\x61\xd7\x77\x97\x05\x3e\x46\x8b\xf6\xfc\x40\x1e\x0b\x90\xe1\xb6\x3a\xcd\x6e\xab\x2f\x7d\x54\xc9\xfc\x1f\x08\x15\x50\x3a\xee\x51\x39\xe6\x71\x38\x2a\x8c\x86\xbd\x04\x1c\x35\x23\x34\x59\x3c\x56\x7d\xea\xed\xd1\x19\xef\x06\x28\x70\xc8\xdf\x02\x23\xcf\x18\x66\xe5\x01\xcc\x65\xae\xff\xbb\x78\xbc\xac\xa4\x40\x45\x0e\x64\x6b\xd5\x61\xf6\x45\x59\x10\x5e\xea\xec\xcd\xe2\x91\x44\x42\x35\x00\xc8\x87\xa6\x01\xe8\x85\xe9\xed\xb9\x63\x45\x7b\x37\x16\x6c\x6d\xdf\x63\x6d\x88\x9d\x63\x4d\xd7\xa1\x58\x1c\x00\x0b\x35\x17\x14\x64\x2d\x6b\x66\xe4\x15\x98\x6d\xcd\x7a\x57\x36\x38\x88\x50\xa0\x35\x07\x56\x75\x56\xaa\x8e\x19\x11\xa1\x02\x4a\x32\x9f\x98\xb9\x2a\x4c\xfd\xb0\x76\x97\xdc\xf3\xc7\x1c\x70\xd0\x63\xce\x57\xc4\x42\x03\x92\xfc\x1b\x7a\xe1\xc7\x1c\xe3\xe4\x18\x2f\x5e\x1b\x0c\xb9\x34\x2e\xfb\xd3\xda\x66\xe1\xdb\x82\xbe\x1e\x88\x81\x0e\x4d\x3b\x9e\x69\x8c\xc0\x7b\xe7\x23\xfb\xb4\x5e\xff\x7c\xda\x08\xef\x65\xfd\xe1\x21\x14\x6f\x18\x0b\x44\x6d\xaf\xe8\x4b\x06\xbc\xa7\xda\x2d\x6a\x8e\x3b\xff\xf5\x28\x70\xbb\xcd\x4c\x89\x40\x21\x40\xca\xe3\x45\xde\xb7\x63\x52\x04\xf0\x36\x1b\x0a\x3b\x39\x16\x59\x6f\x6c\x75\xa1\x62\xaf\x3d\x9b\xfc\x6d\x40\x80\xb0\x9b\xd3\x3b\xea\x9b\x29\x6a\xca\xe9\x66\xd3\x4f\x27\x69\xbf\x4d\xa6\xc7\xac\x92\x49\x6e\x9e\x5a\x6c\x83\x9b\x3e\x20\xcc\xc2\xf6\x6c\x12\x8a\xc7\xa4\x21\x00\x17\x96\xe5\x77\xc6\xbd\x66\x27\xad\x33\x44\xb6\x61\x7b\x18\xbe\xec\x2e\x71\xcb\x65\x8b\xce\xa5\xa7\xd9\xc4\x2d\x99\xa0\xb4\x42\x23\xa4\xe7\x61\x86\x68\x92\xd9\x85\x7b\xcd\x03\x85\xd5\x0e\xa4\xda\x71\xf7\x99\x78\x06\x80\x9c\x15\x8b\x73\x30\xb2\x44\xa6\xb5\xe2\xb8\x69\xab\x32\xe9\x7a\x26\xca\xf9\x94\xf9\x88\x0b\x96\xbe\xed\xe1\x26\x68\xf9\x9f\xe4\xd2\x88\x6b\xe6\x96\x72\xd4\x4e\x99\x0f\x08\xe3\x6f\xd5\x67\x38\xe0\x1b\xd7\x97\x32\xef\x1a\x1b\x39\x32\xe4\x6c\x90\x00\xc0\x47\x24\x47\x2b\x06\xc1\xf0\x59\x96\xea\x35\x84\xe7\x73\x6a\x15\x11\xdf\xd9\xbb\x85\x08\x82\x46\x8b\xd2\x38\x3c\x42\xfd\x68\x56\x44\x5e\x1d\xb9\xac\x0f\xf9\x3b\x8f\x1c\x07\xdd\xb6\x40\xa7\xde\xa1\x38\xf9\xc3\x23\x35\xa3\x1a\xca\x14\xfa\xa6\x63\x3d\x4a\x6e\x22\x40\x4c\x2b\xf4\x76\x71\xc4\xb0\x4e\x71\x25\x52\x40\x18\xcc\x9c\xe3\x92\x21\x8c\x4b\x9f\xc2\xfe\x6a\xc3\xbb\x66\x7b\x90\xde\x81\x7c\x87\x78\xd2\xa4\x31\x3f\xd0\x2e\xcb\xc2\xae\x31\xe2\x8e\xf3\xca\x1a\xd1\x8b\x76\xb3\x54\x2b\xf4\xb6\x3f\xce\xb3\x8b\x13\x10\x67\xb2\xa6\x27\xe5\x60\x1c\xfd\xb4\x8b\x61\x0b\xf6\x34\xb8\x63\xe4\x67\xf7\xf7\x43\x8b\x28\x26\x1d\x42\x6a\xd5\x88\x87\xaa\xac\x89\x51\x06\x07\xf4\x86\xb9\x39\x3c\x2d\xfa\x48\x27\x09\x51\x99\x61\x87\xca\xeb\xd5\x46\x22\x8f\xb4\x6b\xa3\x70\x59\xf9\x46\x3c\xfb\x7d\x24\x62\xde\xda\xd4\x55\x46\xc7\x1f\x67\xe1\xab\x9d\x2c\x4e\xa3\x88\x0c\xfd\xc8\x3d\x55\x69\xbc\x34\x70\x40\xcc\x0c\x09\x04\x03\x2d\x5d\xc0\xe2\x50\xc5\x92\x3a\xe5\xeb\x98\xa5\xf7\x01\xc4\x47\x36\x75\x76\x04\xc2\xe4\xc2\xf6\xba\x75\xc2\x0e\x45\x9f\x01\x32\x40\xf6\xb1\xa3\xa5\x71\x32\xf0\x11\x04\x00\xb7\x3a\x53\x7c\x75\xf3\xa0\xaf\x01\xc8\x52\x2e\x76\x04\xff\x59\x42\xbc\x60\x6e\x39\x06\xb6\x81\xa2\x8f\x25\x4a\xcc\x27\xc3\xa8\xf7\xe9\x0d\x5f\xa0\xa2\xf9\x8c\xef\x35\x40\xd1\xf6\x35\xe7\x17\x75\x77\xd0\xa3\x58\xb5\xc1\xf4\x47\x8a\xc0\x1e\xe3\xf4\x76\xa6\x5b\xa5\x6a\xf5\xee\x7d\x56\x35\x43\xec\xf9\xf1\x95\x4a\xb4\x72\xed\xcd\xd7\x68\x46\x53\x61\xdc\x4b\x27\xd9\x5b\x9c\x9a\xce\x61\x7a\x9f\x13\x47\xb7\xed\x19\x22\xe4\xf7\x25\x8a\x1e\x48\xd1\xd5\x07\xcd\xb2\x4a\xec\x39\x82\xa5\xc0\x2d\x13\xa5\xe4\xae\x4d\x40\xa3\x0f\x97\x46\xd7\xba\xd6\x7a\x8f\xa5\x08\x8d\xbe\x4b\x8a\x52\x3d\xa2\x48\x78\x6f\x46\xee\x9c\x02\x37\x01\x56\xdd\xf1\xb8\x69\x99\xe6\x6a\x41\x77\x94\xd8\xec\xcc\xc4\xa5\xc8\xb2\xbf\x09\xd1\x77\xd1\x5e\xf3\xd8\xe9\x07\x03\xcf\x0a\x73\xe9\x03\x80\x0c\x38\xd6\x6d\xcf\x73\xb3\x99\x94\x01\xa3\xdc\x58\xe4\xba\x8f\xe9\x43\x34\x7f\x57\xf0\x3a\xf7\x08\xd2\xd9\xf9\x41\xd1\xb6\x66\xe6\x1f\x8d\x14\x07\xe5\xe2\x94\x7d\x50\x9a\x17\xb2\x88\xb3\xdc\xa6\x80\x4e\x0b\x8a\x8c\x18\x0f\x9c\x9f\xb8\xc4\xd9\x3d\xd0\xb4\x20\xa5\xf8\x75\x33\xa9\xdd\x7c\xde\x19\x30\x7b\x06\x00\x20\xf0\xd4\xe3\x1d\x0a\x49\xe0\x74\x40\xac\x47\xf5\x56\xad\xab\x77\xe5\xb8\xb4\x0f\x7b\x69\xee\xf8\x58\x94\xb3\x37\xc5\x09\x44\xce\x48\x16\x64\x17\x1c\x1d\xc4\x35\xcb\xb8\x41\xd1\xf9\x5f\x5a\x62\x7e\x3e\x34\x1c\x6f\xcf\xd2\x1a\x44\xee\xc1\x9e\xac\x0f\x66\xb5\x2d\xd1\xe9\x66\x0e\x7a\x8c\xf3\xd0\xd7\x80\x93\xf7\x77\x87\x1b\xbc\x5c\x1f\x2c\x72\x70\x2e\x94\xae\x1a\x18\xbc\x2f\xe0\xf9\xa6\x8f\x62\x69\x67\xd9\x07\xfe\x97\x40\x6b\xce\xec\x5a\xd8\x85\xf3\xac\x5b\x38\x20\xb2\x2e\xdf\x9a\x16\x31\x68\xc8\xb8\xbe\xaf\x88\x82\x82\x77\x4a\x54\xbb\xf5\xfd\x64\x57\x7b\x4f\xaf\x39\x20\xe8\x5e\x41\xfa\xfa\xa9\x7d\x43\xf7\xe4\x9f\x80\x9d\x3d\x77\xb8\x96\x31\x04\xc1\x03\xb5\xda\x71\xf7\x78\x80\x0f\xc2\x79\xb2\xa7\x82\x17\xd0\x1e\xc0\x61\xaf\x5a\x6d\x62\xbc\x0c\xa2\xa9\xf9\x6e\x0a\x34\x0e\x5c\x35\x4f\xb0\x75\xd2\x61\x71\x03\x9d\x35\x40\xe1\xfa\x04\xb7\xda\x7b\x07\xce\xab\xbe\x64\xc9\xe1\x6b\xdf\x6b\x4d\xd4\xcf\xa5\x1e\x1d\x8d\xf9\xab\x71\xab\x8f\xdb\xe4\xa1\x5a\xa1\x68\x7c\xc4\xcc\x71\xa1\xef\xc0\x4c\xe4\x2b\x11\xe5\x0a\x89\x63\x4c\xaf\xf3\x5a\x38\xb6\x6c\xe6\xb7\x3f\xfe\x80\x5c\x3f\x84\xfc\xa5\x39\xd0\xcc\x32\x56\x85\xff\x6d\x17\x02\x24\x49\xb5\x16\x2c\xf8\x1a\xbc\xeb\x65\xd3\x86\x01\xf6\xae\x10\x8f\xcb\xec\x9e\x1f\x06\x00\xa1\xeb\x60\x97\xeb\xed\xbc\x5f\x87\x5d\x0f\xb8\xc2\x5e\xca\xd7\x00\xc7\x8a\xdb\x49\x38\x67\xd7\x21\x26\xb3\xb9\x62\x44\x6c\xd7\xec\xf3\x13\x57\x26\xbc\xb5\x7b\xf8\x88\xb7\x66\xc5\x09\x3b\x35\x5e\x75\xfd\xa8\xa5\x36\xc8\xcd\x3f\xe2\xa7\x71\xff\x00\x60\x82\x56\x7d\x7b\x30\xc4\xae\xbe\x77\x34\x94\xf8\xde\x35\x7c\xb5\x96\xeb\x62\xcb\x1d\x89\xf5\x41\xb5\x16\x84\x15\x4a\xa8\x6b\x98\x18\x37\x3a\xc3\xb6\x57\x25\xc0\xbe\xf6\xe2\xfb\x44\xbe\x38\xd9\x29\x54\xd8\x36\xaf\xce\x37\x7d\x09\xb6\x9b\x4e\x40\x02\x76\x18\xeb\xe7\xa5\xc9\x1f\x42\x54\x9b\xc3\xf8\xa3\x26\xda\xf0\xe4\xc0\x69\xb2\xc7\xa1\x5a\x4d\xb0\x9d\x61\x15\xed\x03\x09\x49\x08\x61\xd2\xe9\x91\x98\xbd\x7d\xec\x00\xaf\xfa\x19\x48\x6f\x6b\xdc\xf8\xdb\xe2\x28\xec\x8d\xd8\x4f\xb7\xa3\xb7\x43\x30\xf6\x21\x2b\x48\xd4\xe7\x26\x10\xf5\x41\xc1\x02\xbc\x92\xcd\xd1\xe2\xc9\xb0\x3e\xaa\x0e\x0b\x61\x06\x32\x1f\x76\x17\x49\xda\xad\x7c\x2f\x31\x8d\x75\x88\xaa\xad\x9e\xc9\xbf\x55\xd5\xed\x5b\x9d\xdf\x5a\x72\x7e\x2f\x2d\x4e\x7a\xf3\x3c\xa7\x3e\xac\xc8\xa8\x44\x41\x00\x07\xe4\x97\x08\xa4\x1d\x14\x61\x6f\x24\x3c\x81\xce\xf3\x92\x5d\x73\x7f\x8e\x67\x6b\xf7\xb1\xc9\x02\x16\x36\x52\x85\x37\xda\x2e\xb9\x17\x77\x1a\x45\x3e\x0d\xf3\x4d\x09\x0a\x62\x36\xcd\xa3\x09\x95\xa1\xb0\x38\xdf\x5b\x11\x4e\x36\x43\xd5\xc4\x06\x1c\x91\x8c\x1e\xd4\xc1\x66\x73\xeb\xd6\x31\x4a\x1e\x46\x00\x4c\x36\x1f\xe9\xb2\xe5\x06\x32\x2f\x9b\x16\x5b\x01\x98\xac\x34\x5d\x93\x2b\xc0\xd9\xae\xf7\x2d\xfb\x29\x24\x71\x11\xd9\x74\xf3\x2f\x19\xd7\x9a\x67\x0d\xc9\x99\x49\x85\xd5\xad\x84\xc3\x6a\x2b\xa3\x5a\xf3\xb4\xdc\xc8\x42\x54\x97\x9c\xf9\xe6\x96\xde\xf3\x9a\x19\x7b\xbc\xd9\x25\x0c\x7a\xf6\x7e\x23\xae\xcd\xe4\x4e\x00\xd7\xfe\x01\x06\x2d\x92\x80\x7d\x0e\xb4\xf6\x84\xd6\xb6\x6a\xc0\xfa\x05\x97\x0a\x02\xde\xb2\x90\xbb\x58\xc6\xf7\x44\x22\x6c\x5c\x11\x94\xd0\xbf\x8f\x9f\xdc\x4b\xc5\x23\xb5\xeb\x5f\x17\x3d\x57\x53\x8a\x57\x0f\xfa\x99\x95\xba\x81\xdb\x83\xcf\xfb\xc3\x41\x5d\xeb\x85\xf7\x13\x65\x34\xe0\x7e\x69\xb4\x74\xba\xcd\x5f\x2b\x7d\xfe\xed\x6a\xfb\x55\xb4\xc6\x16\x8b\x3b\x74\xb4\xb2\x4c\x7b\x6e\x9c\xe8\x07\x1b\xe3\xf8\xf1\x03\x72\x60\x78\x35\x9c\xcb\xbd\x7f\x70\xb9\x75\x5b\x1d\x21\xa6\x67\x39\xef\xc8\xd3\x21\x23\x7b\x2f\x75\xa8\xa1\xe8\xed\x54\xee\x35\xf4\x4f\x51\x42\x02\x26\x4d\xe9\x7b\xb9\xbf\x98\x9f\xc1\x0d\xd1\x03\x6c\x1d\x7a\x9b\x6d\x89\xfb\x88\x42\x84\x35\x6e\xf7\x20\xc4\xd6\x88\x48\x01\xf4\xa6\xb9\xbe\x3b\x25\x82\xa1\xd0\xed\x95\x80\x35\x2b\x4c\x4c\xdd\xdc\xb2\x64\x56\x90\x61\x47\x87\x88\x5b\x7b\x3c\x54\x0c\xe2\xb6\x49\x43\x7f\x67\xed\x27\x6f\x51\x49\x78\x4b\x89\x1f\x64\x78\xeb\xb4\xbd\xf4\x7c\x6b\x7a\x73\xf9\xed\x73\xc8\x8f\x5a\xfa\xd7\x7b\x47\x68\x4b\x72\xab\x81\x03\x1e\xd1\xa4\x00\x81\x6b\xe6\x21\x6d\x15\x87\x7b\xfb\xd4\x91\xa8\x69\xa6\x1d\x7d\x09\x77\xfb\x59\x17\x95\x4f\x86\x47\xe8\x72\x43\xfb\xd0\x00\xec\xf6\x04\x6d\xe8\x48\xf5\x51\xf8\x78\xe4\x51\x64\x8d\x80\x94\x94\x58\xb9\x7d\xe2\xec\x2d\x81\x02\x27\xd7\xd4\xb7\x37\xad\xb9\xa1\xb9\x15\x32\xe3\x46\x0b\x85\xdc\xec\xae\xcd\x49\x27\x14\x81\x8d\x66\x09\xcd\x6e\x86\x82\x2f\x37\x6f\x76\xd1\xc7\x0e\x12\x38\x5e\x13\xd5\x6d\x56\x42\x55\x39\x69\xd7\xf6\xaa\x07\xfd\x3d\x31\xf2\x42\x2c\xf4\x31\x47\xe3\xc3\xea\xcf\xa8\x22\xac\x45\xc4\x27\x1a\x06\x5b\x1a\xf5\x4d\xb4\x75\x44\x03\x3b\x47\x1c\xfa\x9f\x6b\x14\xbe\x82\x21\xce\x87\x1e\xf6\x5e\x14\xb2\xa1\x02\x37\xf1\x0b\x4f\xce\xfb\xfe\x01\xc9\xdb\xb1\x15\x04\x19\x66\xc8\xb2\xe6\x7b\x46\x5b\x0b\x19\x40\x3c\x42\x7b\x16\x4e\xe5\xe1\x99\x69\xaf\x69\xbe\x1a\x7d\x7c\x8e\x4d\x3b\x5a\xbc\xd9\x0f\x85\xdc\x79\xb2\x61\x18\x9c\xf8\xbe\x2e\x31\xc6\x1b\x73\x57\xb4\x59\xe5\x77\x55\x07\x35\xa4\x88\xee\xa8\xf0\x99\xf9\xb4\x47\xd5\xaa\x0f\xb8\x7f\xfe\x00\x22\xe0\x96\x3f\x7a\x41\x69\xfe\x3f\x33\xfc\xee\xd6\xdd\xbb\xbd\x15\x51\xc3\x2c\x7f\x0f\x59\x99\x3b\x35\x3c\x20\x95\xdc\xe9\xe5\x76\xda\x12\x4f\x4e\x3a\x38\x4c\x60\x72\x60\x57\x86\x5c\x4b\x98\xdd\x0f\x7e\x55\xb0\xab\x23\xe9\xfc\x3c\x16\xde\x79\xbb\xac\x1a\xe6\xab\x4f\xa4\x79\xee\x0f\xf6\xf3\x91\x54\x37\xf0\x5d\xaf\x0f\x9d\xfa\x83\xdf\xd8\xda\x61\x0c\x63\x67\xb8\x76\x2b\xf0\xcc\x3a\xe6\xfb\xdb\xd4\x7b\xc7\xf5\xdb\xbe\xab\x3b\xb7\xf2\xbd\xe7\x32\x37\x40\x79\x5e\xbd\x1a\x5b\xdf\x50\x7e\x71\x23\xb5\xff\xb1\xe6\x7a\x49\xa9\xd0\x0c\x4f\x69\xb7\xe9\x1e\x5d\x15\xb3\x95\x38\xbe\x5e\x3b\x18\xd6\xc5\xcd\xa5\x3b\x9e\x97\x66\xcf\x68\x38\xe6\xe3\xb6\x5b\x40\xef\xda\xf2\xf9\xd6\x0b\xda\xe6\x30\x4e\xff\xea\x7d\x52\x72\xd7\xea\xa3\x75\xde\x6a\x74\xac\xb9\xea\x09\x20\x89\xad\x8a\x52\xab\xd4\x4a\x8f\x96\xb1\xbd\x2a\x4d\x05\x53\xef\x08\xac\x75\xaa\xa4\x7d\xd7\x8e\x3d\x21\x6b\x9c\xbf\x78\x8b\xcd\x22\x70\x63\x65\x48\xd9\x5a\x3a\x4d\x34\x78\xbe\xe6\x57\x24\xcb\x99\x49\x4d\xc8\xb0\x13\xb5\x48\x3f\x8b\x6d\xc1\x3e\xc9\xad\xbb\xe6\xdf\x36\xfb\x8a\x59\xd6\x78\x40\x6c\xb9\x9f\x95\x0f\x75\xbf\xea\xee\x0f\xfc\x2a\x14\x9d\x35\x1d\x1c\x01\xcf\x8f\xbd\xa3\xd2\xf9\x40\xac\x72\x9e\xae\xd6\x5b\x23\x20\xc2\x7d\xce\xe9\x63\xdc\xc8\x66\x70\x7f\x48\x08\x3b\x42\x98\x62\x0d\x45\xef\x35\x11\xe6\xb9\x92\x74\xb4\xa6\x87\xbb\xcd\x97\xb7\xf5\x47\xf7\x81\x35\xf2\xf2\x9e\x95\x5b\xb2\xbc\x9d\x93\xfa\xe5\x28\xf4\x32\xdf\x30\xf4\x00\x8c\x8b\x61\x7e\x3a\x52\x81\xb8\x74\x61\x91\xaf\x53\xdf\x1e\x5a\x6e\x1c\x02\x68\x5f\xc7\x33\x68\xe9\x7b\x90\xb5\xcc\x24\x99\x75\xb9\xe6\xd7\x3d\x73\xb6\x56\x40\x3a\x02\x6b\x6c\xf5\x42\x0d\x4e\xf3\xf5\x93\x3d\x93\x74\x89\x81\xd3\xe5\x1a\xe0\xf6\x1a\x63\x59\x87\xac\xab\xb0\x7b\x47\x76\x61\x0d\xb9\x20\x8b\x3b\xcb\xc0\x47\x72\xa3\xbd\x8b\x5a\x6e\x06\xc7\x7e\x96\xea\xd1\xd7\xda\x06\x11\xeb\x7c\x02\x85\x58\x77\x0b\x83\xa3\x34\xd8\x81\x72\xca\x7a\xac\x63\xa6\xce\xbe\x42\x5f\x99\x3b\xd8\xef\x7c\xbc\xa5\x2a\xd2\xc8\xc7\x61\xde\x61\xc4\xea\x78\x03\x56\xd6\x72\x28\xec\x49\x8c\xf5\xb5\xa2\xbd\xfd\x97\x55\x18\x29\xbb\x03\x93\x2d\x09\x8c\xe4\x4c\x14\x46\x35\x33\xed\x9a\xea\xd8\x1f\xfe\x91\x04\x9e\x5d\x79\x35\xa0\xb3\x5f\xbc\x0a\x3d\x2f\x94\x8f\x45\xea\x28\xca\xf9\xf7\x9b\x88\x7e\x2f\xcc\xc7\x27\xaf\xbd\x5d\x19\x7b\xce\x4e\xc7\xb6\x2e\x40\x5b\x7c\xf6\xab\x38\xef\xe7\x22\xbf\x62\xf9\xbf\xc8\x0a\x8c\xe5\xef\x11\xa9\x62\x85\x0f\x67\x8b\xe8\xee\x45\x5e\x09\x10\xde\x35\x82\xee\x0e\xa9\x67\xd9\x4f\xd9\xcb\x7d\xa0\xeb\xf0\x42\x2a\x25\xd7\x97\x93\x6b\x29\x33\xc4\xb8\x2a\xe5\x2c\x1c\xfb\xde\x44\x56\x96\x32\x5e\xc8\x69\xaa\xb4\xf7\xfb\x8c\x00\x0b\xe3\xa5\xb1\x2e\xc3\x25\xa1\x4f\xd8\xae\xaf\x4b\x7b\xe7\x28\xff\x82\xf1\x1c\x6d\x7c\x48\xdc\xac\x53\xde\x61\xa1\x39\xeb\x5d\x23\x16\x26\x7b\xe0\x98\xd1\x63\x5f\xd2\x41\x11\xab\x0b\x50\x5d\xa4\xb7\xef\xfd\xa5\x24\x1e\x6b\xfa\x5f\xfe\xa6\xc2\x88\x39\x0a\x6c\x35\x8b\x41\x99\x0b\xc2\x65\xed\xa5\xd5\xef\xcb\x42\x7e\x3b\xc6\x64\x38\x67\x6e\x3c\xdb\x25\x8f\x89\xae\x1e\x58\x7b\x92\x31\xbb\xc3\xd7\xe3\x18\x16\x75\x2e\xac\x65\x73\x31\x1a\x0a\x5c\xd9\x5f\xfd\x16\xc0\xd9\xc5\x19\xd2\xf0\x71\x1d\x6a\x2d\x2f\xfd\x8a\x98\x21\xbe\xd0\x3d\x12\x5b\xa7\x37\xbd\xc9\x4b\x7a\x6f\x6c\x5c\x9a\xc5\xe0\xa5\x25\x5d\x9a\xb6\x78\xda\x25\xb5\x20\x75\x0c\x36\xbd\xc3\xd0\x9c\xbb\x6b\xb7\x24\x75\x30\xc9\x82\xe7\x1e\xa0\x15\xbd\x04\x24\x7d\x77\x61\x4f\x99\x59\xcd\x55\x04\x8d\xea\x09\x43\x93\x32\xc8\xf2\x30\xd5\x0c\x5b\x3c\xa6\x6d\x0f\xd3\xbe\xfa\xf4\x52\x7a\x81\x53\x57\x7f\x89\xc3\x58\xfb\x00\xae\xaf\x4c\xbc\x5c\xd2\x22\xce\xfa\xec\x9f\xc8\x78\xbc\x31\x50\xb0\x24\xb1\xea\x67\x40\xa9\xfb\x49\x6b\xce\x1e\x79\xa9\xdd\xcc\xa9\x68\x47\x4f\xeb\x0f\x6a\x89\x9f\x1c\x51\x60\x0b\x94\x1f\xf4\xea\xdb\x64\x11\x69\x1f\xc7\xc4\x1b\x09\x98\xca\x4c\xe2\x92\x00\xf1\x3a\xc9\xd1\x29\xcf\x4b\xca\x33\x6a\x29\x5e\xb1\xa1\x67\x85\x17\x6a\x3e\x80\xc5\x5f\x9c\x3e\xd2\xee\xe2\x53\x9a\x9d\xf8\x00\x26\x4f\xc0\x58\xbe\x39\xfa\xbe\x7c\xe3\x5b\xc2\x4b\xeb\xcd\x3b\x5f\x1a\x1c\xef\x5d\xc8\x73\xf8\xc1\x56\x37\x14\x65\x39\x5f\x8b\xc0\xee\xae\x3a\x13\x20\xf1\x10\xc8\x98\x2f\x0a\x77\xcf\x8a\x30\x17\x28\x5e\x2c\x70\xa4\xfb\x53\x6e\x64\x8e\xc1\x7b\x33\x14\x75\x3e\x57\x0e\x4c\xcc\x80\x45\xf8\x94\xa4\x33\x9b\x72\x68\x67\x6e\xb0\xc5\x19\x48\xa0\xf0\x21\xcf\x55\x6d\xfa\xb9\x16\xad\x88\x71\x60\x0e\xef\xb3\x6d\xa7\xd6\xd4\x6c\x2d\x2f\x2e\xa8\x64\x6d\x37\x76\x5a\x9d\x42\x71\xe6\x9a\xd8\xdd\x77\x19\x1a\x1e\xa3\xb5\xf7\x2f\x49\xc9\x3b\x68\xfe\xe2\xef\xeb\x93\xe3\x19\x4d\x4c\xdd\xf1\xf2\x52\x79\xd4\x8c\xd1\x61\x9d\x5f\xc2\xcc\x07\x8a\x1d\xba\x03\x49\xa3\xad\x28\x34\xdd\x52\x14\x3c\x47\xf6\x8b\xbc\xf9\x90\xa0\x71\xbe\x8c\xb0\xf7\x09\x2e\x35\xce\xfc\x74\xb9\x59\xb3\x93\x2e\x4d\x7b\x41\x30\x0a\xca\xb7\x6a\xce\x91\x40\x0d\x8b\xdc\xf3\xe0\xe8\xcd\xd9\x23\x84\xf9\xbc\x5b\x47\xec\xef\xab\x41\xe6\xf9\x3d\x9a\x51\xab\x04\x94\x9f\x76\x03\xdf\xbb\xf6\x19\x0c\x40\x51\x8f\x30\xfc\xf3\x39\x5b\xa7\x92\x18\x96\x39\xbd\xde\xf1\xca\x9b\x1d\xae\x40\x33\x3d\xcf\x4d\x05\xeb\x10\x88\x1b\x7f\x59\x67\xdb\xbb\x6a\x33\x2a\x12\xfd\x7c\xee\x8d\x42\x1f\x4d\x12\x8f\xd0\xa6\xa1\xcf\xde\x6c\x8d\x36\xea\x18\x24\x17\x2c\xd5\xba\xb1\xbf\x67\x8e\x21\xac\x99\xe9\x0c\x33\x34\xfc\x7d\x0a\xb4\xd8\x65\x4d\x2d\xd2\xf2\xfb\x69\x7f\xdb\xe7\xdf\xcf\xde\xd1\xf5\x7e\x60\xdf\x9e\x77\xcf\x37\xe1\xdb\x5f\xde\xe7\x2b\xd1\xb9\xfb\xbc\xfe\x20\xff\xe1\x56\x38\xb7\x9e\xfb\xd5\xff\xbd\x35\x98\x6c\x4d\x17\x86\x4c\xd3\x7e\xda\x42\xe4\xd7\x5b\x7b\xd4\x19\xf5\x15\x55\x3c\x81\xef\xf5\x7a\xa4\x7f\x6e\x58\x9d\x27\xcc\xba\x75\x45\x9c\xfa\xbe\xd6\x7f\xd1\x6b\x07\xdd\x3e\xd9\x06\xb4\x83\xb5\x48\x82\xde\x19\xf7\x7e\x00\xb3\x47\x4e\x58\x5d\x29\x60\x83\x7d\x63\xc5\x98\xb7\x35\x39\x63\x05\x81\x93\x74\xfa\x96\xd8\xf8\x04\x9c\xba\x0c\xb2\xe7\x3b\xe0\x12\xb4\xfa\xd4\xf4\xad\x41\xa0\x67\xcf\x9e\x58\x7a\x52\xeb\x27\x5d\x6d\xeb\xe9\x78\x52\x15\xcd\x25\xda\xcf\xcf\xfc\x2b\x26\xfd\xaf\xa2\xeb\x9f\x79\x7d\xf3\x7e\x3e\xf3\xa8\x17\x11\x01\x0f\x87\xba\xa0\x4f\x08\x01\x69\xdd\x44\x72\x3f\x38\x10\x4f\xa2\xe8\xd1\x3d\x1d\x2e\x9a\xb1\xe7\x04\xe1\x4d\x3f\x5e\x5c\x73\x62\xf1\xad\x3e\x9f\xf5\xeb\xe8\x0d\xe5\x9f\xeb\xb7\xf8\x64\x47\x2b\xa5\x6f\xdd\xc8\x9f\xeb\xe7\xed\x18\x4f\xb7\x07\xc7\xfa\xfe\xd4\x48\x37\xfa\xa9\x52\x27\xf9\x71\xbd\x75\x36\x3f\x7f\xc4\x4a\xf9\xa9\x6a\x9c\xff\xc0\x6d\xa8\xc5\xa6\x35\x3e\xd8\x3f\xd6\x26\xf3\xad\xd9\xf7\x8d\xd5\xf9\x8f\x23\x55\x65\x71\x19\x75\x18\x6c\xea\xfd\xb3\x9f\xbc\xfe\xf9\x03\xf7\x83\x54\x06\xb1\xef\xf3\xca\xed\x58\x9d\xad\xd7\xf6\x87\x5a\xf6\x3f\x79\xc0\x67\x9d\x30\x7d\x0c\x20\x69\x7c\xfe\x27\xbd\x85\xe7\x8f\x34\xe1\x9f\xb4\x2f\x59\xa0\x7c\x06\x27\xfe\xa4\x51\xa0\x7d\xeb\xf1\xda\x4b\xe4\x0f\xd5\x8f\xc0\xbf\x98\xba\x02\x29\x0f\x2e\x45\x35\xf9\xe3\x99\xf3\x3f\xae\x33\x02\x67\xab\x53\xea\xaf\xc5\x79\xee\xe0\xa2\x07\xc1\x7d\x87\x04\xf0\x2e\xde\xbd\x28\x40\xe0\x9b\xbf\x55\xe6\xc8\xa8\x78\xe7\xfb\x7f\xd4\x6b\x51\xfc\x18\xa5\x03\xa5\xdf\xaa\xf2\xb1\x09\x88\x59\x33\xf2\xd7\x63\x53\x30\x98\x54\x3e\xf0\xfb\x57\x97\x6e\x9a\x3f\x65\x3f\x1b\x7d\xdf\xf5\x8c\xc3\x1d\xed\x64\x7f\x4e\x5b\xe6\x85\xc7\xc7\x51\x28\xc3\xcd\xe3\xd6\x13\x25\xd9\x3f\xb6\x9c\x15\x27\x4a\x6c\x7f\x59\xdb\x61\x5a\x11\x41\x0b\x7b\x14\x7a\x7b\x86\xee\xed\x01\xab\x32\xc6\x82\x63\x2c\x05\xd2\x4c\xac\x7f\x9c\xef\x66\xed\xd7\xf2\xd1\xfe\x76\xc0\xff\x18\xe0\xf8\x0f\xc0\xbf\xce\xc6\xea\x4e\xb6\xc1\x59\xe2\x31\xff\x38\x0b\x45\xe7\xa6\xbb\xc2\xfc\x17\xce\x1a\x35\x91\xcf\xc9\x1b\x84\xd2\x96\xf7\xd4\x8a\xb6\xef\x9b\x16\x07\xac\xff\xd1\x78\xfd\x21\xa2\x05\xbe\x7f\xeb\xb3\xa9\xc0\x23\xf1\xa1\xdb\x9b\xef\x6f\x7d\x75\xaf\xe4\xcd\x4e\x92\x37\x20\xee\x53\x0a\x68\x56\x84\xce\x95\x80\xf4\xf0\xea\xdd\x17\x67\x40\xe3\xe4\xad\xfb\x0f\x0c\x62\x58\x76\x3e\x6d\x75\x80\xf8\x3e\xe4\x0c\xd8\x15\xa1\x49\xa1\xd4\x7b\x49\x8c\xc0\x0f\x0d\xf3\x3f\xfc\xe7\x62\x13\xbb\xd6\x08\xff\xd7\xde\xce\xd9\x11\x52\x94\xfc\x6b\x3f\xdb\x33\xd2\x82\x8e\x34\xc7\xb5\xfc\x75\x84\x7d\x9d\xfc\x3d\xd0\x8e\x7b\xc2\xfb\xbf\xa4\x1d\x10\x17\xad\x2f\x7e\x08\x33\xd7\x49\xe4\xd2\xc7\x0d\x4f\x83\xed\x20\xf5\xba\x2e\xcb\x86\x20\x12\x6e\x62\xf1\x71\xc2\x36\x52\xec\xe5\x6d\x6f\xb9\xed\x28\x97\xa3\xfd\x54\x6e\xd1\x80\xcf\x21\x40\x2b\xd6\x23\xe4\x10\x16\x0c\x34\x49\x16\x1c\x52\x03\x42\x74\x22\x57\x2a\xf2\xed\xa0\x2c\x20\x9d\x02\x64\xfa\xb5\x96\x02\x8a\x8a\x6e\x81\xd9\x15\x2d\xd4\x5d\xfa\xb9\xc3\x78\x7b\x98\x88\xc2\x76\x49\x8e\xd9\xdb\x9d\xf1\x16\xce\xde\xde\xa0\x59\x3c\x1c\xce\xbe\x40\xee\x72\x80\x64\x81\x3f\x3d\xbd\x88\xe9\x1e\x77\xe7\xdb\x90\x14\xc1\x25\x85\x02\xab\xcd\x56\xcd\xa7\xfb\x0c\x2a\x04\xd0\x2f\xe6\x55\x99\xae\x95\x44\x7d\x6b\x81\xa6\x90\xcb\x84\x2e\x81\x20\x26\x66\xaa\x43\x0f\x91\x82\x30\x3c\x41\x5c\xeb\x5b\x27\x31\x06\x8f\x27\x29\xc3\x79\xce\x05\xd1\xe9\x68\x02\xf3\xd3\x37\x9d\x0b\x45\x0a\x3c\xfe\xd3\x26\xb3\xe0\xfe\x27\x89\xfe\x27\x07\x51\xa7\xaa\x37\xc9\x8c\x50\x02\xb0\xfa\x07\xac\xa5\x2f\x4a\x08\xc4\x4b\x0a\x1b\x14\x83\x30\x19\xd6\x08\x01\x98\x5b\xe0\x70\xaa\x4d\x08\x40\xf9\xaf\x53\x2d\xf1\x06\xbb\x8a\xc0\x4c\xb6\xd6\x54\xa3\x29\x3d\x49\xd9\x0d\xd2\x01\x45\x12\x03\xc2\x55\x4c\xcb\x22\xa1\x86\x22\x70\xcf\x54\x10\x84\x2f\x23\x24\x04\xd0\x8b\x61\xe3\x12\xb6\xcb\x43\x14\x72\xeb\xe0\xc2\x70\x3a\x8a\x56\x6e\xd5\x3c\xb4\x06\xd4\x84\x00\xda\xe0\x60\xa4\xdb\x94\xdf\x83\xc7\x24\x4c\xd6\xda\x26\xf5\xdf\x7a\x84\x0a\x4a\xa4\xcc\x00\x1f\xe2\x09\xae\xbb\x6d\xc2\x10\xb5\x3f\xac\x56\x76\x42\xbd\xb5\x76\xad\x2d\x4d\x90\xff\x12\x03\x7f\x32\x69\x31\xec\x6a\xca\xf3\xf6\xd1\x03\xb6\xe2\x72\xcc\x59\xbf\xcc\x13\xa2\x35\xbe\x3e\xb4\x08\xdc\xd6\xcf\x31\x1d\x06\xee\x51\x9d\x30\x57\x17\xe8\x24\x72\x6f\x85\x4d\xc7\x99\x42\xde\x01\x81\x9a\x27\x9b\xe2\x6e\xe7\x58\x72\x50\x64\x9c\x64\x08\x74\xab\xd2\x2b\x06\x55\xa6\xf4\xeb\xcd\x54\x9b\x46\x04\xd5\x94\x24\x1f\x48\x89\x01\xee\x2a\x1d\x9c\xb3\xff\x66\x31\x19\x5d\x61\x80\xd3\x81\xda\x41\x93\x34\x36\xd2\xaa\x4d\xae\x47\xa5\x28\xc0\x3a\xae\xec\xa0\x7a\x54\xdf\x19\xbb\xc6\xad\x6d\xb3\xd9\x60\x98\x62\x9c\x25\x9a\x7e\x13\xc8\xb1\xa7\xcc\x19\x72\x61\xd0\xa3\x5c\x55\xac\x5c\xf4\x6e\x9d\xb8\xec\xa3\x4f\x27\xc9\x0b\x50\xe9\x02\x39\x2a\x83\x7b\x08\x70\x81\xc7\xb7\x1c\x9e\xe3\xa5\xb3\x19\x2f\x6f\x7a\x8f\xe8\x82\xb2\x3c\x7b\x0b\x7c\xe4\xe0\xf0\x88\x18\x92\x99\x81\xe8\xd6\xb3\xfa\xa5\x34\x81\x3d\x2f\xe8\x9d\xaf\x2c\xec\x71\x36\x66\x5b\x57\xc4\x9a\x8d\xa4\xda\xef\x17\xd2\x5f\x76\x99\xcc\x82\x87\x85\x70\xa2\x10\x13\x88\x0c\xf8\xd1\x75\xc3\xbf\x24\x41\xa0\x95\x2a\xbb\x45\x23\xf9\xa4\x63\x0c\x29\x9b\xc1\x10\x49\x33\xec\x30\x86\xa2\x6d\x95\x47\x50\xfb\x19\x32\x27\x73\x98\xab\x76\x9d\xc9\x2e\x30\xc3\x1a\x52\x6f\x5a\xa9\x89\x20\xa4\x5f\x47\x37\x71\x2d\xbb\x1b\x4f\xcf\x26\x18\x11\xb4\xc3\x65\x15\x08\x3c\xba\xec\xac\xcc\x2b\x14\x16\x5c\x99\x89\x07\x52\x01\x81\xa2\x31\x86\x66\xc1\x43\x47\x56\xe7\x61\x61\x48\x03\xcc\xbc\xee\x95\xdb\x29\xc4\xd2\x8d\x4d\xe5\xc0\x61\x9f\xef\x91\x21\xdd\x2a\xe6\x56\x4c\xdf\x97\xba\xc5\xe3\xac\x1b\x05\x72\x7c\x34\x5f\xa0\x55\x30\x4a\x04\x72\x04\x9c\x4f\xe8\x77\xb3\x17\x7f\x72\xcc\xe8\x45\x7b\x1b\x5d\xc1\xc8\xcf\x01\x4d\x6a\xd6\x5a\x66\x32\x88\x60\x2c\x4a\xd5\x1d\x43\xeb\x7a\x54\x23\x7b\x2c\xe0\xd8\x71\x54\x75\x9c\xa0\xbc\x49\x39\x87\xc9\xe6\x79\x5d\x36\xe2\x91\xe6\xaf\x8c\xab\x1a\x73\x3c\x0e\x7a\xae\xc7\x08\xc1\x71\x1b\xec\x19\x8d\xdc\x8c\x08\x3e\x9e\x65\x45\x20\xc2\x98\x5d\x5e\x07\xa8\x3f\x7e\x34\x47\x04\xf1\x15\xaa\x21\x54\xc5\x5f\x24\x7f\x0e\xf1\xdd\xe2\xd4\xeb\x31\x20\x25\xcf\xf8\xc5\x75\xe4\xa8\x55\x81\xf4\x9a\x7b\xa4\x4b\xc5\x2c\x7b\x40\xe3\xde\xcd\xb9\x63\xff\x60\x4c\x7f\xa8\x74\xad\xbd\xe2\x27\x9a\xde\x92\xbc\x48\xed\x21\x48\x72\x7a\x87\xb0\x02\x4d\x46\x8d\x43\xcc\x4c\x57\xdc\x15\x16\x69\x24\xcd\x90\x3f\xd7\xbe\x1e\x1e\xde\x35\x2a\x51\x0d\xd3\xb9\x19\xac\x3e\x46\xa5\xac\x8d\x2d\x92\x1e\xde\x84\xd9\x9d\x94\x66\x39\x6a\x87\x15\xda\x98\x34\x4a\x34\x26\xd6\xab\x23\xe3\x37\x1e\x97\xeb\x93\xd2\xd5\xfe\x60\x18\xd1\xe3\xda\x83\xba\xfa\x68\xa0\x38\x58\xd2\x2a\xb8\xd6\xe7\x5b\x3d\x35\xb3\x07\xc2\x81\xf4\xcb\x90\x3e\x59\xea\x0f\xab\x41\x76\x6f\x94\x3e\xec\xc2\x2c\xf5\x74\x3f\xf2\xe3\xd2\xcd\x40\x94\x22\x0f\xfb\xd1\xf4\x49\x30\x66\xe2\x8e\x76\x1f\x3c\xf1\x7a\xec\x81\x70\x5f\x1e\xc4\xfe\xd1\x98\x79\xec\xd6\xaf\xb7\x7f\x34\x2b\x35\x20\x10\x87\x10\x12\x0f\x15\xae\x34\x6f\x2b\x9b\x3d\x6a\x03\x88\xed\xd7\xea\x25\x22\x86\xd8\xa6\x96\x65\xf7\x2a\xa2\x8b\x50\x92\xc2\x03\x06\xf5\x86\x50\x5c\x84\xbc\xc4\x5c\x93\x46\x8b\xbd\x94\x42\xc2\x0a\x7a\x13\x22\xbf\x3c\x20\xf4\x94\xfe\x84\xf2\xc7\x61\xef\x1f\x61\xa6\x90\xa0\x78\xda\xa5\x96\x40\xc2\xee\x5b\x90\x2e\x01\x0a\xd6\x22\xd0\x21\x25\xc1\xa5\x1e\xb5\x25\xf3\xd3\xbb\xa2\xa5\xeb\xbe\xde\xe3\x86\xad\xc4\xa3\x91\x4a\xc5\x5d\x6a\xce\x1e\x2f\xac\xb1\x85\x31\xe2\x03\x21\x54\x71\x4e\x3e\xb0\xfc\x25\xb5\x0a\xbf\x00\xd0\xaa\x50\x9a\xed\xe3\x6e\xf0\x50\xb7\xa2\xd9\xcf\xb0\x12\xf5\x22\xa4\xac\xf2\xf0\x6c\x98\x30\x05\x0c\x7c\xcc\x1f\xe3\x98\x2e\x76\x21\x54\x9a\xd9\xbe\x47\xe4\x54\xb9\xc2\x03\x67\xe6\xc2\x70\x77\x2d\x93\x87\x90\xc0\x81\x8d\xd6\xca\xc5\x1f\xd2\x5b\x5b\xf4\x76\x8e\x4d\x5e\x2d\xd1\xe0\x31\xb7\x34\x0a\x17\xb8\xe8\x92\x36\x90\x9a\xbc\x46\xc3\x94\x3e\x66\xbd\x5b\x8f\x72\xa8\x83\x02\x58\x8a\xce\xa2\xec\xf1\xa3\xc7\x24\x3b\x68\x02\x76\x3b\x3e\xb3\xbb\x6b\x0c\xab\x50\xa3\x03\x4f\xd9\xfe\x11\x7a\xfc\xf0\x70\x9a\xac\xdf\x24\x45\x51\x80\x0c\x56\xd8\x27\x0a\xca\xd1\x46\x48\x2e\x5b\x34\x8f\x0f\x97\x27\x9a\xc9\xbd\x1e\xca\x92\x1f\x83\xb7\x35\x1e\xa5\x8b\x44\x82\x07\x1c\x9c\x5e\xed\xb3\x20\x37\x03\xc4\x31\x78\x8b\x20\xab\xe6\xbf\x21\x32\x7b\x97\xc1\xee\xcf\xc3\x83\xa8\xdf\x5c\xcc\xba\x8b\xbc\x06\x94\x9a\xfa\x92\x66\x46\x3f\x79\x3e\xf6\xc3\x73\xd7\xb8\x93\xfc\xa9\x93\x01\xf5\x99\xea\x75\xe7\x23\x11\x8c\x63\xd3\x5e\xe7\x61\xb5\xaa\xea\x31\x33\x73\xa4\xf9\x41\x40\x21\x1d\xef\xf6\xcb\x9a\x5a\xec\xfd\xc3\xf9\x59\xbc\x5e\x56\xe3\xa9\x06\x43\x8a\x20\xef\x75\x62\x67\xe1\x91\xe2\xbd\xcf\xff\x90\xd0\xcf\x95\xa5\x91\xe1\xc7\x45\xdb\x37\x64\x26\xfd\x0c\xed\xa7\xe9\xba\xe5\x23\xfe\x79\x8b\xfa\xa3\x98\xa4\xe7\x9a\xff\x21\x5c\x50\x2a\x16\xff\x10\xe9\xa8\xd7\x2a\xff\xeb\x43\xa6\x19\xe6\x4c\x03\x32\x3b\xfb\xbd\x8b\x14\x81\xf3\xf9\x57\xf9\xa3\x66\xf4\x21\x71\x02\xd7\x8e\xfc\x50\x6e\x36\xb0\x5a\xfe\x4d\x11\x3c\x0a\x7d\x8c\xd1\x45\xf7\x5c\xa1\x22\xc4\x2d\x22\x16\x3c\x1f\xde\x07\x95\xf2\x45\x8d\x9e\x1c\xd2\x31\xbd\x79\x96\xa5\x01\xe5\x49\x3e\xcd\x71\x93\xbd\x99\x35\xca\xf2\x43\xd9\xd7\xe3\x55\x18\x15\x90\x37\x30\xf3\xb7\x24\xc5\xdc\xbc\x79\x63\x9f\xfb\xdf\xfa\xda\xb4\x33\x22\xff\x05\x83\x71\x4b\xa0\xe6\x5a\xa1\x89\x6c\x4c\x7b\x70\x23\xf3\x7a\x5c\xda\xed\xea\xf9\xea\xc9\x4d\x3b\xe4\x90\x06\x59\x55\x0b\xe5\x75\x6e\x55\x0f\x42\xbb\x14\x39\x28\xb3\x9d\xfc\x0a\x21\x84\x58\x0a\xa9\x64\x6e\xf4\x7b\xc4\xd2\xb8\xde\xa0\x6b\x85\xac\x49\x83\xa9\xd4\xdc\xd0\x3a\xe8\x8d\x4b\xf0\x62\x41\x1c\x02\xaf\xdd\x62\xf7\xa1\x68\xe1\x2e\x4b\xff\x06\xca\x1b\xa1\x63\x81\xc1\x2e\x56\x78\xb9\x11\x84\x33\x52\x1c\x35\x30\x60\x76\xf1\xc9\x22\x47\x10\xd2\x78\xb7\x68\xa9\x60\xc0\x68\x95\x0f\x2e\xd3\x8d\xa1\xf9\x90\xef\x68\xf2\xcf\xcc\x90\xe2\x71\x5a\xbd\x25\x31\x10\x55\x5a\xd6\x2a\xe4\x60\x25\xd7\x1c\xbd\x21\x99\x43\xb4\x87\x12\x1f\x5c\xe1\xd1\x64\x2f\x73\xf6\x08\x29\x48\xbf\x1c\x9a\xc6\x51\xc4\x37\xd9\xf5\x3d\x10\x35\xea\x36\xc9\x89\xdc\x03\xc9\x62\x19\x92\x5f\x43\xa3\xb2\x63\xac\xb7\x53\x0f\xcf\x2e\xfe\xcb\xc3\x90\xf3\xb7\x64\x3b\xc2\x93\x36\x5c\x6b\x27\x34\xe0\x70\x2d\xcb\xb3\x6e\x0f\x37\x47\x06\xcd\x0f\x40\xe1\x94\x83\x1c\xfb\x26\x0b\x33\xd8\x3d\x44\x1f\x84\xab\x59\x07\xe2\x29\x0d\x8f\x5d\x03\xe8\x03\x20\xf2\xf7\x02\xd5\xbb\xe2\x36\x65\x58\x2b\xd4\x38\x62\xca\x63\x35\x2b\x42\x59\x86\x5d\xb8\xfa\x41\x34\x61\xda\xe9\x2f\x4b\x88\xe3\xe5\x8d\x7f\x4c\x85\x43\x87\xb9\x85\x1a\xc7\x2b\xd1\xed\x08\x31\x0e\x9f\x44\x56\xa9\x3d\x8c\x35\xc0\xc8\x43\x85\x6a\xdf\x2e\x2b\xcf\x63\xc0\xa1\xac\xa8\x85\xef\xcf\xb2\xe4\x3a\xb8\x53\xff\xc4\x20\xcc\xb2\x89\x73\xd4\xdf\x26\xd0\xb1\xe6\x71\xe2\x62\x4b\xe1\x44\x1b\xfb\x71\xf1\x89\xda\xd3\xdf\xa9\x6f\x30\xa4\x38\x56\x5d\x4f\x37\xfd\x26\x43\xa7\x86\x42\x27\xe5\xf2\x1c\xa0\xe3\xc3\xfe\xa2\x58\x07\xde\x20\x06\x7e\xa0\x44\xc9\x21\x2e\x76\x1c\x45\x5e\x33\x88\x77\x5c\xdb\x79\xdc\xe6\xbd\x90\x47\xce\xfb\x5a\x29\x54\x38\x3c\x7e\x8e\x92\x2f\x05\xa9\xcc\x71\x5c\xc7\x82\xb7\x6c\x5f\xda\x46\x59\xfe\x0a\x59\x0f\xba\x1e\x61\xc3\xc3\x55\xea\x7a\x2f\xe9\x9e\x82\xdc\x4e\xeb\x08\x09\x52\xde\x70\x17\xfe\xb0\xdf\xd9\x71\xf0\xa2\x7d\xb0\x18\x27\xec\xa5\x59\xee\x29\xc8\x80\x1c\x77\x92\x12\x8a\x4d\x80\xa1\xec\x67\x89\xab\x85\x80\x37\xf4\x58\xbe\x28\x10\xc2\xad\xc2\x47\xc9\xb5\x46\xae\xf0\x68\xbe\x5d\xd8\x52\x97\x29\xc4\x9f\x0d\x76\x97\x5f\x59\xcb\x87\x1c\x1a\xac\x18\xd9\x1b\xbc\x3f\xd4\xe5\xb0\x27\x45\x6e\x9b\xdd\xb5\x5b\x95\x5f\xa9\xe9\x83\x28\x4a\x12\xea\x20\xa5\xd5\x24\x90\x05\x69\x42\x2e\xf0\xa0\x9e\xb7\x15\x2f\x65\x3e\x53\x7b\x20\xa8\x2d\xa0\xcb\x2a\xc9\x90\x50\xbb\xc0\x45\x8d\xd4\x01\x24\xde\xc7\xb9\xe5\xbd\xe5\x17\x0c\x79\xbb\x4e\x39\x2f\x07\x8e\x48\x48\xe5\x24\xbb\xd3\x44\x32\x22\xb1\x6c\x91\x8a\x09\x3c\xc7\x49\xfb\x5c\x43\x9b\x0a\x82\x22\x7e\x0f\x63\x0b\x0b\xdf\x3a\x0c\xb5\x7a\x8f\xa0\xcd\xb7\xf7\x42\x9f\x9c\x21\xcf\x3a\x41\x10\x16\xa5\xc6\xe1\x29\x58\x4a\xc7\x80\x8a\x48\xaf\xd7\x97\xa2\x20\x3c\x54\x77\xa5\x87\xfb\x16\xc5\xee\x2b\x44\x41\x62\x09\xf5\xff\x87\x9c\xa5\x51\x63\xd5\xe2\xc7\x51\x74\xd4\x9f\xc9\xe9\x7f\xbc\x2a\x29\x44\x70\x12\xa3\x05\x87\x74\xe7\x4a\x0f\xa8\x21\x15\x07\x3b\xb8\x4c\xe5\x18\xf3\x9f\x1f\x62\xa1\x83\x2b\xe7\xc7\xab\x14\x05\xda\xfd\x7f\x05\x44\x06\x9a\x21\x34\xb2\x78\x74\x58\x8d\x82\x4f\xe6\xdd\x9f\x50\xb7\xc7\xda\xb9\x9b\x68\xa0\x7f\x76\xf8\xb6\x9a\xc8\xc3\x1d\x86\x6f\x04\x1a\xef\x6f\xdf\xcb\xb7\x13\x77\x5c\xc9\xe0\x1d\xa4\xf4\xf7\xca\x47\xcf\xc5\x42\xd6\xd0\x8b\x70\xb5\x10\x1a\xd9\xc7\xdc\xcc\x48\x6c\xa1\x59\x67\xe1\xf4\xb4\x4a\x29\x81\xec\x67\x43\xdf\x63\x18\xc0\x9f\x26\xb3\x16\x54\xe6\x04\xde\x5f\xfb\x20\xbd\x91\x35\x97\x07\x89\x8b\x90\x13\x71\xe6\xe7\xc0\xc2\xc6\xf1\x7a\x6e\xd2\xe5\x76\x7a\x64\x11\x6a\x58\xbe\xbf\x22\x67\xd2\xf5\x5b\x8e\x30\xbc\x57\x66\x16\x3c\x69\x3c\xf0\xdd\x47\x77\x3f\x32\x78\xd1\x77\xed\xe6\x50\x05\xb9\x06\x09\x42\x40\xff\xa9\x87\x5f\x92\x05\x1e\xe8\x0e\x19\x73\xf7\x17\xa3\xba\x11\xcf\x1d\x1f\x17\xb6\x5c\x7a\x64\xde\x30\xb4\x1b\x42\x24\x6a\x00\x20\x2b\xd8\xdf\x05\x08\x91\xb0\xd1\x48\x4b\x3f\x32\xb4\x1c\x32\x24\xcf\x43\x06\x27\x8f\x53\xe8\x09\xb7\xe3\xf4\x52\x67\x1f\x4f\x4a\x2f\x40\xcb\x37\x44\x3e\x7c\x60\x78\xa6\x7d\xd9\x5d\xff\xa6\xb4\x08\xb0\x65\xa2\x1d\xf6\xe8\xef\xd1\x38\x9b\xf6\x41\x3d\x5a\xeb\xcd\xec\xd9\xc7\x24\x6f\xc9\x11\x1e\x62\xdd\xb7\x10\x36\x84\xea\x48\xa0\x4c\x21\x3d\x72\x21\x7d\x42\x22\x26\x6b\x6e\x72\x1d\xd4\xff\xb2\xe9\x10\x82\x08\xb5\x86\xf8\x49\x9d\xa5\xb0\x52\x23\x4f\x1e\x66\x48\x64\xd4\x27\xa8\x06\x9c\x0b\xa5\xa1\x10\x91\xc0\xa0\x91\x52\x2c\x69\xfb\xab\x4e\xb3\xbb\xce\x38\xc3\x10\x2b\x09\xd1\x0f\x0d\x52\xc2\x90\x58\x92\xe2\x8d\xda\x96\x5a\x57\xc7\x0b\xcb\x16\x35\x84\x6b\xff\xdc\x42\x27\x4c\x84\x95\xc8\xc7\xea\xe9\x5f\x4d\xc3\xa1\x7e\x0c\xf5\x41\xaa\x04\xfe\x89\x4f\xe9\x93\x87\x9a\x49\x6e\xee\x92\x2c\x79\x94\x33\x8c\xa3\xf5\xe4\xa5\x7a\x12\x07\x70\x5f\x3d\x28\xf8\x95\x5b\xfa\x65\xf5\x0e\x79\xdb\x81\x8b\x44\x48\xc7\x05\xe1\x7e\xb1\x31\x49\xd6\xf8\x08\x96\x36\xb4\x04\x16\xc8\x4d\x49\xda\xc3\x8e\x61\x13\x57\xf2\x8b\x4b\x10\xd2\x27\xa1\x91\xc2\x01\xd5\xa6\x92\xf2\xd1\xdc\xf0\xe2\x52\xf7\xfb\x4c\x96\xc5\x3d\xdc\xba\x44\x4b\xdb\xba\x1d\x1e\x8f\x14\x24\xad\x18\x03\x54\x49\xbb\x45\x7c\x54\x6d\x56\x88\x8e\x54\xf8\x12\xd8\x30\x81\xbd\xaa\xfb\xdc\x13\x4b\xde\xde\x36\x44\x19\x66\x09\xc3\xcc\x91\xc0\xda\xd7\x1f\xf9\x18\xfa\x5a\xd8\x19\xc6\xa0\x62\xd4\x46\x6e\xea\xfe\x85\xf0\x6d\x6f\x15\x5c\x7b\x40\x07\x08\xa2\xb9\xd5\x5b\x97\x26\x16\x68\xfa\x91\x2e\xdc\x72\x28\x03\xa2\x9f\xaf\xb3\x05\xeb\x49\xc8\x25\x2c\x4e\x97\x23\x52\x6c\x50\xd8\x68\x04\xbd\x9e\xf2\x2d\x87\xc6\x5a\x7a\x97\xfc\xe4\x91\xd3\xe4\xb6\xb2\xfb\xa4\xb5\x0d\x25\x7b\xf6\x33\x05\x8b\x21\xe1\xc2\xf3\x9a\xc1\x87\xd1\xca\xe9\xbd\x08\xdf\xd0\x63\xfc\x48\xd5\x14\xcd\x26\xeb\xa2\xb1\xba\xbe\xb8\xfa\x58\x53\x24\x29\x17\x55\x7c\x89\x40\x94\xeb\x20\xf4\x5a\x72\xfb\xa5\xe8\x75\x81\xf6\xfd\x12\x35\x4c\xe1\xb3\x3f\x21\xb0\x99\x73\x30\xce\xa7\xab\xe3\x3a\x2e\xd1\x33\xb5\x12\x42\x41\x42\xcc\x08\x25\xad\x50\xd9\x72\x33\x63\xd9\x42\xf3\xa5\xf6\xd4\x75\xb1\xbb\x3d\xc7\xd6\xca\x2e\xf5\x97\xe2\xf8\x4e\x74\x2d\x59\x97\xa0\x8f\x99\xfa\x49\xfe\x74\x2b\x0a\x46\xef\x52\x2f\xf4\x7c\xbb\x99\xad\xa9\xff\x2f\x0a\x69\xa0\x92\xca\x24\xfd\x09\xdc\x87\x23\xb4\x58\x71\x27\xae\x8e\xc6\x22\x15\x9f\x29\xc1\xef\x75\xed\xad\xd4\x6b\x27\xe0\xac\x9e\x9a\xb9\x8f\xda\x58\x82\x90\x12\xf2\x09\xe3\xe8\x31\x64\xcc\xf1\xd7\x7e\xe2\xb7\x3f\xce\x35\x2d\x7c\x4f\xa7\x34\x5f\x4e\x1c\x31\x0b\xa3\x22\xbc\xf4\x53\xe2\xa7\x38\x33\x27\x19\x53\xe6\xca\xd0\xba\xdd\x86\x28\xbb\x63\x2a\x6b\x39\x64\xc1\xd3\xca\xdd\x2e\xbd\x19\x98\x9c\x89\x86\x99\x5e\x00\xd4\x98\x6b\xe8\x34\xa1\x2b\x1f\x9a\x34\x40\xec\xb1\xae\x45\x3f\xd7\x1b\xcb\x7d\x6e\xb9\x4f\x56\x17\xa5\x90\x85\xf1\xdc\x61\xd7\x8c\x79\xf9\xb8\xdb\x18\x45\x2a\xb1\x1c\x2a\x8e\xf5\x56\xa2\x39\x83\x6f\x6f\x66\xb2\x77\xec\x90\xca\x0f\x24\x66\x86\x10\xa1\x39\x0a\x13\xa3\x61\x29\xff\xaf\x8f\x8f\x2b\x46\xf1\x42\x66\x86\xc3\xbb\xb1\xbd\x5b\x11\xc7\x1a\x8d\xb3\xe6\xb5\x30\x6b\xd8\xf4\x28\xc0\xd2\x36\x6f\xfd\x03\xb7\xad\xe9\xd1\xe6\xb7\xad\xae\x92\x86\xd9\x25\x8b\xb3\x0f\xa5\x5a\x93\x00\xef\xc9\xde\x66\x28\x4c\xbd\x07\x4c\x78\x95\xba\x52\xda\x85\xd7\xe8\xd3\x76\xc9\x07\x68\x66\x08\x27\xa5\x88\xf6\x72\x6e\xa3\x8e\xd1\x9a\xd0\xd7\x1c\x76\xda\xb2\x8c\xa1\x64\x2d\xba\x6c\xa9\x8c\x7a\x48\x92\x7b\x36\x42\xc7\xc6\x9a\x72\x4d\xd9\x67\x46\xbb\xf6\x3c\x69\xa7\xa5\x9c\xa1\x98\xc3\xa7\xca\xb6\x07\x2f\x1c\x67\xd9\x0d\xe1\x82\x5d\xfe\xe3\xd1\x76\x2e\x84\xe9\xc2\x0e\xef\xbf\x46\x71\x87\x9c\xcd\x93\x81\xf7\x30\x43\xd5\xc3\xcc\x6f\x1a\xef\x26\x6b\x63\x4f\xa4\x4f\x8f\xa6\xa8\x71\xb1\x7a\x74\xec\xb7\x97\x91\x8a\x43\x43\x81\x5f\x9d\xd5\x9a\x4f\x0e\xf8\x75\x1e\x95\x4e\x0b\xce\xd3\x98\x3b\xcf\x4d\x59\x67\x9e\x87\xea\x5d\x79\x33\x3b\x9e\x5c\x27\x99\x4f\x28\xdb\xbc\x3b\x77\x6b\x75\x68\x8f\x4a\x40\xe4\xe2\xf0\x49\x77\x75\x17\x7f\xea\x3a\xbe\x33\x1d\x86\x33\x29\x4a\x23\xe7\x1a\x14\x6f\x56\xe6\x01\x76\x90\x0b\x4e\x13\x25\x68\xea\xaa\xf7\x5a\x42\x38\xfe\xea\x98\x2d\x3f\x1b\x44\x6f\x86\xe8\xbe\x76\x56\xd3\x3d\xa5\x8a\x53\xf2\xa3\x57\x2b\x11\xe1\xbf\x52\x41\x81\xd4\x75\x2c\x9b\xf3\xa4\x4d\x78\x7a\xa2\xcb\x27\xb1\x98\x9e\xa1\x94\xee\x85\xd0\x5b\xd9\x7d\xd0\x9c\xe3\xb1\x1d\x86\x3d\x29\x57\xef\x26\xb7\x6e\x2f\x14\x2f\x07\xc6\x41\xe3\x4c\x51\x05\x95\x30\x9b\x40\x8f\xd5\x1f\xaf\xf4\x1b\x8b\x94\x55\x3d\x64\xb3\xc7\x91\x6d\x71\x33\x07\x8e\x4e\x41\x2f\x85\x62\xdf\x6e\xf1\x71\x80\x35\xd0\xe0\x88\x66\xe7\x9a\x7f\xab\x22\xcf\xed\x66\x9c\x8a\xa8\x6c\x32\xb8\x2e\xcf\xf3\xa6\xa1\x4e\x14\x34\xd7\xa4\xc0\x53\x0f\x49\xfd\x40\x80\xe7\x1f\xad\xa7\x23\x99\xb2\xe6\x3f\x11\xe6\x2e\x93\x95\x3d\x2c\xe9\x61\x76\x68\xf7\x59\xdb\x58\xbb\x5c\xc7\x2a\x23\xd0\x5c\x30\xe3\xc7\x38\x62\x38\x04\xd5\xcc\x52\x41\x07\x4a\x25\xd0\xae\x16\x49\xd1\xba\xc5\xa9\x6a\x89\xae\xe2\x3a\x69\x3b\x5d\x57\x35\xa5\xfe\x09\x06\x77\x4f\x19\x9d\x7c\xe5\xd0\x06\xfa\xa9\x12\xdd\x81\x04\xd0\xc4\xe7\x15\xfa\x3f\xa5\x09\x00\xf5\x4d\xca\x09\xb6\x37\xa6\xa4\x1a\x04\xbc\x15\x1f\xae\x99\x1a\x38\x73\x96\x46\x90\x86\x67\x3b\x26\x8e\x73\x69\x44\x36\x72\xe1\x34\x0c\x6d\x26\x95\x83\x5a\xa1\xcb\xd2\x3c\xfa\xa9\x8e\x34\x71\x95\x62\x9f\x52\x7e\xad\x63\x8e\xbd\x4d\x26\x44\x36\x0f\xb4\x35\x18\xab\x5f\x7a\xd7\x76\x26\x75\xff\x2e\x7a\x01\x15\xc1\xc5\x17\xa6\xcc\x7c\xb8\xcb\xb8\x88\xda\x6c\xe6\x14\x7b\x45\x20\xe8\x11\x01\xb3\x5d\xb1\x56\xf1\x56\xa4\xbf\x73\x1d\xd2\xbb\x02\x58\x15\xc2\x6c\x6e\xdb\x17\x64\xef\xa9\xca\x73\x94\x2c\xd5\x1a\xb4\xa4\x28\x85\x64\xfd\x8d\x68\xd5\x9b\x9d\xa9\x96\xe6\xc7\x04\xd7\xf6\x1a\x31\xc4\x28\xd1\x5f\x6c\xd6\x4f\xc9\x61\x68\xc5\x35\xbb\xb8\xf7\xae\x52\x3f\x31\x26\xc6\x4d\x3d\x33\x70\x73\xf9\xd4\x5a\x3e\xe9\x9e\x8b\xa0\x19\x2a\x7a\xe1\xe3\xcf\xa9\x8b\x0d\xee\x3a\xd2\xf0\x1b\x74\x79\xda\x75\x28\xa3\x23\xde\xc2\x8c\x2d\xe5\x55\xeb\xe7\x07\x97\x07\xad\x94\x2d\xe4\x2e\x87\x62\xd2\xc0\xd1\xab\x0e\x90\x2c\x9f\x42\x3c\x07\xa3\x1f\xac\x77\x73\x0a\xe5\xa9\x9c\xfa\x50\x4d\x8a\x31\x71\xb7\xaf\xa1\x5e\x12\xee\xd1\x13\x4d\x54\x6b\x2b\xec\x31\x86\x0c\x9b\x11\xd1\xfa\x45\x4d\x3a\x58\xaa\x8e\xf0\xd1\x7c\xeb\xd7\x2e\xa6\x3a\x80\x04\x31\x71\xd6\xdc\x69\x3b\x15\x30\xec\xa2\x44\x97\xd4\x5a\x51\x68\xf2\x45\x0b\x77\x69\x7d\x07\xcd\xb0\xd7\x32\x8e\xdf\xe3\x99\xb8\xeb\x67\x8c\x87\x76\xf0\x67\x31\xec\x1e\x01\x6c\xd6\xf8\x1e\xb8\x44\xc7\x31\x99\x2e\x25\xbe\x5b\xdf\x0f\x82\xc3\xa0\x40\xe4\x9a\x32\x52\x80\x76\x21\x22\x37\xd0\x0f\x1f\xc3\x5c\x15\xcf\x96\x7e\xd3\xc9\x26\x8e\x44\x89\x24\xe0\x73\x61\xbc\x44\x0e\x36\x2f\x28\x37\x49\xb6\x54\x88\x20\xb3\xb6\x7c\x35\x11\xa3\x33\xdd\x6b\x4c\x88\x99\x8c\x1c\x91\xcf\xb2\x17\x39\xbc\x0b\x01\xa9\x16\xf4\x90\xae\xce\x5d\x3d\xd0\x38\xe2\x43\x05\x2d\x53\x65\xeb\x26\x64\x69\x4d\x45\xb3\xed\xbe\xe8\xe9\xf7\x8a\x22\x5c\x06\xe9\x78\x1f\x27\xd3\x99\x3c\xfc\xc1\x1a\x89\x0f\xb7\x8f\xb2\x94\xe8\x0e\xdc\xa5\xaf\x26\x86\xd4\x54\xc7\xe5\x70\x95\x76\xd0\x01\x07\x6c\x09\x6d\x23\xdc\xdb\x2c\xab\x0f\xc9\x20\xd8\x53\xd5\x05\x46\x54\x9f\x76\xef\x8d\x34\xce\x44\xea\xee\xac\xd1\x13\x6f\x86\x69\x4d\x45\x65\xb8\x54\x92\x9d\xcf\x3b\xec\x81\x46\x40\x17\x13\x30\xa3\x14\x95\xd9\xfc\x05\x0c\x3d\x24\x9a\xc8\xae\xe0\x2a\x9b\x7b\x2e\x75\x83\xad\xa0\x96\x66\xda\x3c\xd8\x9d\x96\x3d\x6c\xdc\xe8\x7a\x82\x68\x42\x6b\x96\x5c\xd1\x59\x10\x9f\xfb\x96\xbd\x6a\x2a\xd7\xa9\x74\x6a\x6c\x86\x22\x14\x13\x86\xd0\x10\xeb\xe2\x63\x6a\xda\xe8\xd4\xa4\x9d\x10\x6f\x45\x3d\x24\x07\x29\xe8\x31\x51\x14\x3d\x60\xe6\x89\x09\xe3\x50\xa3\x54\x27\x2b\x11\xdf\x9d\xec\x3b\x37\xd3\x90\xe6\x12\x9f\xb5\x55\xfa\x4a\x7d\xe4\x37\xe3\xd3\x5f\xd5\x99\x45\x2b\x73\x7a\xcf\xb7\xb8\x53\x92\xac\x5a\x44\xd4\xa5\xe5\xdc\x13\xb5\x96\xa0\x5f\xe0\xfb\x5b\xb6\x2b\xd0\x62\xc4\x33\xcb\xf4\xbc\x0e\xae\x94\x43\x4c\xcb\x5e\x58\xe8\x34\x70\x24\x0d\xc9\x07\x45\xd2\x4b\x54\x5a\x3a\x19\x65\x01\xc7\x66\x89\x1b\x7b\x6b\x2b\xf1\x27\xbb\x3f\x14\x57\xaa\x57\xe6\x47\xc9\x31\x75\xa1\x66\x85\x48\x3a\xfe\x0e\x68\xcb\xfe\xa1\xac\x94\x57\xd5\x8f\xae\xa6\xa4\x13\x76\xf3\x96\x5c\x2a\xe3\x1a\x3f\x78\xfc\x12\x1f\xb7\x59\x1d\xaf\x34\xdf\x1f\x31\xb3\x33\x33\x7a\xd2\xec\xe2\x98\x5f\x2e\xb5\xf4\x52\x46\x2f\x24\x40\xa9\x72\xa4\x06\x63\x9a\x3c\xbb\xcc\xde\xc4\xa6\x13\x4e\x5b\xc7\xf7\xdf\x88\x8d\xf4\xa0\xb8\xd2\x7f\x6e\x1d\xc0\x82\x1b\x35\xfd\x60\x0f\xef\x15\x94\x07\xff\x69\xf0\x70\x6b\xc9\x8c\x0d\x22\x23\x98\xe1\xcf\x8d\x06\x49\x12\x64\xff\x23\x65\xcf\x0a\x48\x2f\x3d\x3f\xc2\xa5\xac\x1f\x60\x55\xf5\xa0\x2c\x19\x34\x5e\x35\xde\x96\xe0\x0a\x3f\x58\xd3\xa6\xfe\xda\x00\x06\xf1\x70\x23\x96\xf9\x94\xd9\xac\x24\x08\x5d\x13\x76\x3a\x54\x68\x72\x10\xa9\x97\x4f\xd5\x76\xb2\xd6\x17\xc2\x3f\x78\xc8\xfd\x24\x59\xa7\xbe\x41\xcd\x20\x5d\xac\xea\xa5\xef\x2f\x39\xaf\x60\x2e\x37\xf5\x1f\x02\x51\x6a\x99\x41\x1c\xca\xf3\x14\xfe\x53\x80\xdd\x1d\x70\x3f\xbf\x69\x42\x40\x59\x5a\x51\x47\x9d\x7f\x6b\x58\x8a\x71\x31\x3b\xc7\xc3\x60\xcd\xdc\x49\xba\xd2\x48\x76\x2a\xa3\x58\xd2\xc9\xc5\x17\xdd\xc0\x10\xdd\x6a\x4f\xa6\xdf\xfc\xf8\xfb\xaf\xe0\x14\x0a\xdf\xac\xa5\xfe\xdf\xff\xef\xff\xf9\xff\x03\x00\x00\xff\xff\x71\x1d\x08\x34\x38\x36\x05\x00") + +func dataEnglishJsonBytes() ([]byte, error) { + return bindataRead( + _dataEnglishJson, + "data/English.json", + ) +} + +func dataEnglishJson() (*asset, error) { + bytes, err := dataEnglishJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/English.json", size: 341560, mode: os.FileMode(420), modTime: time.Unix(1452717629, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataFemalenamesJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x6c\xbd\x4b\xb2\xab\xbc\xd3\xe5\xdd\x7f\x47\xf1\xc6\xbf\xfd\x8d\xe0\x1b\x43\xcd\xa0\xa2\x1a\x32\xc8\x20\x5b\x80\x0f\x17\xef\x83\x2b\x6a\xee\xa5\x14\xde\xe4\x6f\x71\x2a\xe2\x69\x1c\x1e\x6f\x63\x90\x52\x79\x5d\xb9\xf2\x7f\xff\xd7\x7f\xff\xf7\x7f\xfe\x47\x5a\xd6\xff\xfc\xff\xff\xfd\x3f\xcb\xbf\xcb\xd5\x10\xe6\xfd\x3f\xff\xdf\xf1\xef\x57\x58\xe7\xd4\xa4\xf0\x7b\x9d\xd3\xd8\x9e\x17\xb7\x30\x97\xff\xce\xcb\x98\xd3\x27\xdc\xe2\xda\xff\xfe\x8f\x47\x1c\xc7\x74\x8f\xf3\xef\x75\xb9\xb1\xdf\x69\xd9\x96\x30\xe2\x93\x2e\xcc\x71\xfd\xbd\x6e\xa7\x79\x5a\xfb\xdd\x7f\x75\x39\xbf\x37\x86\xb1\x39\x3f\x78\x96\x2f\x9d\x37\x29\x3f\xbd\x9e\x9f\xf4\x31\xfb\x27\xe5\x97\x5a\x7f\xce\x76\x1a\xc7\xf3\xa2\x09\xf3\x94\x7f\x2f\xe6\xcd\x1f\x7e\xe9\xcb\x27\xfe\x80\xa9\x29\x77\xcc\xf1\x7c\xa2\xb0\xf9\x0d\x97\xb2\x08\xe7\xf7\x9e\x69\xb8\xc5\x39\x9f\x0f\xd2\xc6\xdb\x84\x8f\x1f\x71\x59\x52\xe3\x5f\xed\xd3\x9c\xe3\xf9\xc7\xcd\x3e\xae\xbd\xaf\x51\x18\xbb\x98\xcf\xab\xa1\x2c\xf0\xe2\x0b\x71\x2b\xaf\xee\x7b\x11\x86\xdd\xbf\xe5\xaf\x37\xc7\x5b\x6c\xfc\xe7\xde\x69\xee\xd2\xe8\x3f\xf0\x0c\x6b\x9f\xa3\x2f\xd4\x2b\x0c\xfc\xc1\x30\xaf\x7d\xc0\x8b\xcc\xf8\xb9\x80\x1f\x5f\xd6\xf8\xea\xc3\x98\xa2\xac\xea\x7e\xde\xb6\xe9\xe7\x22\x61\x69\x8c\x94\x84\xf3\xe2\x11\x46\xdf\xf9\xa6\x3c\x51\x9c\xf1\xa7\xf7\xb9\x6c\x78\x5c\xf0\x72\xe7\xf7\xa6\xbd\x39\xff\xac\x4d\xc1\xbf\x13\x72\xf2\x4f\x1e\x5b\xf6\xdf\xea\x63\xbd\xff\xef\xe5\x1a\xe7\xb8\x40\x30\xca\x63\xfe\x5e\x74\x79\x82\xb4\xc6\x77\xc4\xfb\x3c\x62\xc0\xbb\xc5\x79\xcf\x2e\x27\xb9\x9d\x63\xcb\xf5\x95\xb7\x79\x4c\xfe\xcd\xb0\xf4\xd8\xfa\xc7\xd6\x26\x97\xbe\x79\x5a\xb8\x3e\x78\x9d\x67\x11\xc3\xf3\x4b\xe5\x83\x29\xe3\x4d\xdb\xfd\xba\xe4\xb2\xd5\xe7\xa7\xf6\x54\x78\xf1\x5b\x79\x3d\x11\xd9\x31\xf9\xcf\xaf\x61\x70\xd9\x4a\x45\xe8\x64\xe3\xce\x03\x51\x56\xeb\x7c\xf8\x60\x87\x85\x7b\x8d\xb5\xb3\xa3\x18\xe5\xa9\x66\xff\x30\x4f\x1b\x7e\x79\x81\x72\x29\xfb\x8e\xdf\x6d\xfe\x6c\xe5\x30\xf8\xff\xf9\xa1\x34\xde\xca\xf9\x4e\xb2\xfd\x7e\x1c\xb6\x9b\x2b\x95\xc9\x37\x9b\x0b\xf5\xea\xf7\x9c\xfd\xa3\x71\x9a\x07\xff\x2c\x6c\x7e\x3c\x4c\xe4\xf8\x7c\xfe\x9b\x39\x95\x3b\xf8\x46\xc7\x22\x15\xbb\xef\xec\x2d\xf9\x79\x8b\x5d\xe7\x5b\x36\xef\xcb\x1a\xb2\x0b\x60\x68\xf7\xf3\x31\xe6\xb4\xfa\x0f\x87\x1f\x17\x3f\x79\xd9\x7b\xd9\x86\x38\xba\xb0\xac\x65\x2b\xce\xfb\xc7\xd6\x1f\x77\x4d\xf7\x7b\x18\x77\x9c\xd7\xc1\xb5\x40\x11\x3e\xd7\x8f\x45\xe1\x9f\x7f\xd6\x95\xdb\xf9\x9a\x47\x7c\xf2\x4e\xcd\x2a\xe7\x85\xc2\x5c\x54\xa2\xeb\xbb\x38\xbb\x75\x59\xf6\xfc\xf6\xef\x3c\x8a\xcc\xbf\x7a\xec\x6a\x91\xd2\x3c\x40\x55\x96\x35\x76\x8d\x5c\xee\x94\x7c\x2f\xe2\x0a\x89\x33\x45\xed\x8b\x9f\x83\x2a\x9e\xc7\x34\x8b\xa2\xe2\x55\xd1\xf9\x79\x5a\x57\xff\xf3\x69\x84\xb2\x8e\x0b\x75\x87\xc9\x02\xee\x1c\x07\x7f\xd4\xc7\x56\x8e\xec\x0a\xd9\xc0\xc5\xdc\x4f\x90\xd5\x3e\x7c\xfc\xb9\x83\x59\x0e\xd7\x38\xd4\xbc\x37\x7f\xc8\xf0\x2a\xe7\xe9\x94\xb4\xb8\x40\xbb\x35\x19\x27\x26\x6f\x4d\x82\xc5\x7a\x84\x21\x89\x1a\xc2\xa3\xe7\xa2\xcf\xa6\xf3\x97\xdf\x21\x47\x2c\x4a\x6b\x8a\x1d\x37\x1a\x62\x07\x1d\x96\xe9\x1d\x2c\xdb\x87\xf7\x3d\x8c\xe6\x79\xd9\x05\x7f\xee\xf2\xa2\x34\x2d\x65\xd9\xa1\x57\x8a\x32\x92\x75\x7f\x94\xf7\x38\x9f\x75\xf6\xd3\xd3\xc5\x39\xe4\x16\x7b\x60\x56\xd9\x77\xbe\xa1\xc6\xab\x6f\x0c\x65\x35\x53\x2a\x8a\x7a\x82\xaf\x00\xfd\x3a\xc7\x0e\x9a\xa1\xfc\x76\x03\x9d\x59\x5d\x23\x5f\xa6\xc9\xce\xde\x82\x17\xa4\xd6\x0e\x5b\xd1\x7a\xe7\x6d\xf7\xf7\x84\x65\xb2\x25\x83\xcc\x3d\x36\xb5\x93\x2e\xf2\x2d\xf4\x4d\xd1\x14\x0d\x2c\x3e\x0c\xfe\x18\xcf\xaf\xa7\x16\x96\xff\x0d\x8d\x54\x94\x50\xd9\x00\x97\xc2\x09\xef\x7c\x2b\xaa\x66\x85\x6e\x30\x8f\x80\xba\xad\xbc\xe5\xea\x5f\xdd\xa7\x4c\xdd\x6b\x96\x91\x2f\x63\x3b\xe2\xdf\x2d\xda\x1e\x17\x66\xa0\x7c\xb7\xde\xe5\x8b\xf0\x6e\x02\x4f\xfe\x06\x51\x5d\xfc\x0e\xea\x6b\x52\xf2\x8a\x3a\x7a\x26\x9c\x71\x57\x14\x2b\x4e\x88\x19\x59\x3a\xbb\x45\xa1\xc0\x17\x2a\xbe\x81\x9b\xb4\x22\x6a\xeb\xbc\xb5\x11\x87\xcb\x8d\xe9\x34\xee\x70\x82\xb3\xec\x91\x6f\xfa\x4f\xc2\x2b\x51\xac\x9e\x87\xa5\xf6\x37\x59\xf0\x8e\x63\x28\x26\x01\x87\xbf\x1b\x5d\xc6\xca\x39\x09\xa2\xba\x70\x86\x6e\x72\x97\x36\x8a\x70\x9a\x27\x89\x4d\x7b\xc5\xf2\xdd\xf3\x17\xe4\x36\x83\x9d\x28\x1c\xa9\x22\x28\xb8\x0c\x66\x28\x5d\x3e\x8b\xa7\x80\xd5\x2d\xee\x99\xaf\xde\x54\xfc\xce\x00\x9b\x55\x96\x06\x66\xaa\xda\x4b\x6a\xb1\xe2\x0b\xf9\x7d\x42\xd9\x4b\x9e\x08\x28\x1c\x93\x53\x8f\x0c\x22\xef\x32\x24\x1a\xc6\x41\x5f\xab\xd8\x8e\xd6\xf7\x36\xd3\x91\xce\x3b\x7e\xfb\x9d\xa6\x8c\xc7\xde\xe6\x75\xf4\x0d\xad\xe1\x8c\xeb\x8d\x95\x5b\x5f\x1d\x60\x68\x98\xe2\x5a\x62\x61\x44\x4c\xf1\x8c\xeb\xca\xab\xbf\xf8\x7e\x82\x13\x32\x94\x20\x8b\xde\xd5\xe2\x8a\x74\xd8\xe7\x15\x41\x4a\x44\x9c\x53\xfd\x41\xb8\x78\x8c\x12\x4a\x94\xb7\x78\xe4\x54\x5c\x58\x97\x53\xb3\xf2\x93\xb8\xbe\x5c\xd6\x11\xaf\x65\x31\x1c\x3e\x49\x8c\xb1\x16\x8d\xc0\x8a\xbd\xe9\x5d\xf8\xc6\x11\xf1\x9c\x47\x9e\xe5\x79\xcd\x80\x21\xf0\x9b\x86\x24\xeb\x7b\xfe\xd8\x94\x3b\xd7\xcb\x22\x4b\xe6\xa3\x8d\xe2\x0e\xf9\xfe\xe5\x38\x8d\x81\xef\x75\x7e\x72\x8f\x6a\xd6\x26\x84\x4c\x43\x2a\xbb\x3e\xf8\x11\xc4\xa1\x2e\xa1\xd6\xd3\xd5\xe8\x44\xa3\x6d\x82\xe4\x41\xce\xf3\xe2\x9b\x17\x0d\x92\xfc\xfe\xd8\xa6\x21\xb8\x33\xdc\xd3\xf1\x6f\x43\xf2\x1d\x9b\xc3\x80\x37\xa9\x2e\x56\x82\xcd\x7a\xd2\xdb\x9e\xb9\x2f\xc5\x35\x9a\x71\x08\xc7\x76\xc1\xea\xd8\x65\xc0\x11\x1e\xe1\x91\x74\x5b\x68\x43\xde\x5e\x50\x37\xa2\x51\x8e\x78\x1e\x5e\xcf\x22\x51\x52\x03\xb1\xb9\x87\xdd\xb5\x5b\x1b\xa8\xf7\x20\xd8\x4b\xb8\xf1\xc9\xd3\x72\x39\x03\xdd\x56\x5e\xd4\xcd\x4e\x2f\x47\xa9\xaf\x4e\xde\xb9\xfa\x03\xad\x5d\x13\x8b\x90\xa5\x8b\x2e\x3e\xdf\xaa\xc4\x9e\xae\x27\x6e\xc5\xd2\x15\x87\xc6\x1f\x09\xfe\x6f\x31\x25\x17\xef\xca\x9f\x15\x41\x65\xdc\xc4\x29\x18\x71\x60\xca\x61\xff\xb8\x0e\x92\xb5\x6c\x25\xd6\xb1\x10\x1d\x29\x82\x2c\x96\xbc\x6e\x53\x2a\xe1\x9c\xbf\xec\x98\xfe\x6c\x78\x34\x7f\x9f\x07\xcd\x7a\x39\x79\x99\x02\xbf\x43\xff\x8e\x17\xb9\x85\xf7\x5c\x9e\xd3\xdf\xe7\x1e\x2e\x2a\x77\x87\xcf\x35\xbd\x3c\xb4\x09\x62\x36\xf6\x37\x3d\x86\xf2\x76\x93\xaf\x6b\xde\xce\x45\x59\x36\x2c\x71\x51\xae\x88\x1c\x2c\xec\x09\x72\x50\x5c\x8e\x9f\xb3\x1e\x28\x78\xc1\x19\xc1\x5c\x86\xa2\x2f\x76\x75\xc4\xa1\x2e\x7f\xd5\xfb\x96\xad\x53\x12\x0f\xc7\xd6\x40\xb2\x0d\x61\x14\x6f\x48\xfe\xf8\xe5\x9a\xc3\xe2\xf8\x73\x2b\x7e\xfa\x44\xcb\x52\x64\xbd\xed\xa0\x2e\xe8\xc0\x34\xdc\xfd\x1c\xd6\x69\xa7\x1a\xf7\x37\xb5\x65\xf0\x3f\xec\xca\xf6\xd2\x1b\xc0\xcb\x26\x28\x2d\x73\x84\xa9\x71\x5c\x8f\x1c\xc6\x6e\x84\xbc\xcc\xb4\xd0\x16\x8f\xbb\x91\x88\xb0\x56\xf5\xd4\xd0\x40\xc1\x0a\xac\xd4\xb2\xe5\xe9\x65\xcf\xe0\xa6\xfd\x0d\xe2\x2e\x77\xa6\x9e\xf1\xf1\x0d\xd9\x85\xb6\x9c\x74\xfc\xb1\x85\xb2\x74\x9f\x72\xfc\x2b\x96\xe8\x36\x4f\xd3\x53\x1c\x4a\x78\xbe\x4b\x68\xe9\x6c\x16\xa1\x68\xb9\x99\x66\x4d\x12\x0e\x96\x8b\x5c\xe4\x4f\x94\x13\xdf\xc3\x56\x59\x72\x24\x50\x77\xdd\xfc\xb3\xf2\x13\x51\xf3\x64\xe6\x3d\x53\xcf\x35\xdc\xba\x48\x03\xff\xd4\xec\xc1\xb1\x4a\x23\xf4\x29\x03\xa5\xb2\x7b\x6e\x21\xaa\xe7\xa2\x16\x53\x12\x1e\xe2\xcf\xd3\xe0\xc3\xe8\x14\xff\x26\x4a\x02\x21\x4a\x8a\xaf\x8d\x4b\xb1\x34\x91\xa7\x68\xa4\x87\x37\x26\x04\x4e\xf0\x45\xbe\x59\x2c\x24\x03\x23\x14\x63\x9c\x25\xcd\x50\x04\x50\xb6\x07\xd2\xdb\x5c\x0c\x6e\xe4\xc9\x8f\xa9\xeb\xfd\x9e\x5d\x0f\x9f\x6e\x7a\x21\x1b\x1b\x2d\x63\x04\xc9\xd2\x8c\x70\x57\xc2\xa2\xf2\x7f\x46\x18\x16\x1e\xc0\x39\xd8\x11\x39\x2d\x52\x1c\xcd\x22\x21\x45\x90\x77\x04\x3f\x0f\xe8\x10\xf3\xc0\x5c\x9f\x60\x6b\x2e\x89\xf9\x60\x5a\x4b\xf4\x23\x32\x78\xb4\xa4\x89\x39\x97\x58\x2e\x90\x19\xe7\x13\x17\x67\x21\x52\xb3\x24\xc8\x00\xe5\x4e\xbc\xee\xa2\xd6\x1f\xfe\x4c\x48\xd0\x64\xbc\x52\x4b\x7d\x36\x30\xc1\x24\x41\xcb\x40\x03\x5b\x7f\x94\x61\xa0\xa4\x8d\xee\xe9\xa2\x74\x19\x07\xcc\x0c\x50\xdb\xf4\x97\x06\x00\x2b\x68\xd9\x6e\x78\x25\x14\x2c\xe4\xc4\x1f\x53\xcf\xf3\x61\x55\x02\x38\xa5\x03\x33\x2e\x25\x1c\x4a\xf8\xbb\x1f\x88\x1f\x0b\x1a\xf1\x36\x49\xb0\x7d\x3b\x3f\xc1\x59\x33\xc9\x4e\xb8\xf3\x2d\x21\x7d\x32\x71\x07\x7e\xd2\x98\xee\x4c\x41\xeb\x09\xb2\x63\x89\xac\x65\x79\x10\xbf\x4f\x48\x83\x9f\x51\xee\x71\xd1\xda\x23\xd7\x89\xe1\xf4\x40\x83\xaa\x4e\x5b\x79\x31\xb8\x80\x2f\xcd\x97\xcc\x4c\x83\x1d\x3e\x2b\xfc\xc0\x96\x67\xbe\x18\x10\x86\x52\x16\xcb\xba\xbe\xb0\x20\x9b\xdf\x9c\x99\x95\x6d\x68\xc2\xca\xa2\x31\x05\x55\x8e\xa7\xa9\x3e\x17\x9b\x1f\x3f\x9f\x0d\x33\xcf\x2d\x72\x58\x56\x4f\x79\xba\x3b\x60\x02\x44\xe1\x9a\x66\xa4\x9c\xe2\xfd\x8e\x2d\xe3\xb2\x97\xbb\xc0\xb3\xe5\xee\x95\x70\x7b\xa2\x85\x64\x80\x45\x7d\x61\x59\x4a\x38\x6f\xf4\xf8\xcb\x2f\x61\x0f\xc6\xf4\x7c\x5e\x54\x7d\xd0\x95\x5d\x1a\x1e\xa5\xa4\xe6\x4d\xe4\x06\x1a\xca\x0c\x88\x17\xde\x2c\x63\x04\xf7\xc5\xea\x02\xac\x95\x95\xb8\xb9\xf5\xe4\x43\x4d\xd2\x41\x13\x32\x2e\xbf\x17\x1d\x06\xd5\x84\xd0\xbb\xfc\x1c\xcc\x57\x39\xff\xfe\x73\x89\xce\x1a\x97\xa5\x1e\x33\x26\x89\x02\x12\x62\x4d\x39\xe1\xd9\xdf\x21\x8d\x5d\x71\xba\xce\xab\x37\x4c\x45\xf1\xa9\xd3\x25\x8e\x76\xed\x3c\xe5\x96\x8b\x84\x14\x51\x51\xd6\x2c\x7e\x1e\x25\x17\x48\xf1\x88\xcd\xc4\xf1\x7a\x5c\x13\x68\x92\x5e\x6a\xa8\x13\x2d\x8f\x52\x8c\xca\x44\x85\xf3\x84\x5f\xb4\x50\x62\x8e\x3a\xa4\x1c\x0c\x57\xef\x8c\x88\x5a\xd1\xa5\xe5\x50\x64\x57\x8a\xbe\xc5\x5a\xe1\x38\xea\xb5\x11\xea\xca\xbf\x65\x79\x33\x28\xf8\x57\x2c\x52\xf7\xb9\x68\x05\x97\xab\xa3\x7a\x1a\xe1\xc6\x52\xdd\xf6\xe2\xf2\x34\x25\x82\x77\x35\x10\xef\x8c\x67\xcc\xf1\x9b\xd3\xe4\x2a\x52\xbc\x0d\x78\x58\xa2\x3c\x19\xd2\x06\x2a\xe9\x22\xc3\x3c\x09\x08\x3e\x6b\x1d\x9b\x79\x37\x6c\x5e\xfd\x4c\xb4\xbe\x87\x0d\x16\x77\x7a\xf0\x57\xe3\x59\x66\x6e\x16\x96\x06\xa7\x86\xde\xf5\x2b\xa4\x0e\x01\xdd\x78\xa9\xbb\xf1\x2e\x78\xed\x36\xbc\x7a\x56\x48\x5d\x32\x4c\x2b\x78\x55\x23\xae\x2e\x25\x5d\x55\x01\x7e\x8f\x34\x4c\x1d\xeb\x7f\x13\xcf\xee\x33\x52\xef\x64\x66\x5a\x2c\xc3\x06\x8f\xc6\x62\x79\xdc\xf5\x09\x1d\xbd\xcd\x0b\x5d\xb3\xf4\xf9\x40\xcb\xa5\x99\x09\x63\x93\x0c\x51\xd2\x52\xe6\xd9\x19\x9e\xec\x52\x3b\xc4\x52\x76\xa2\xe3\x8a\xf3\x02\xbf\xbd\xd6\xc4\xa4\x18\x0e\x79\x38\xe2\x8a\xb1\xf5\xf3\x64\xe9\x0e\xa6\x84\x13\x94\x92\xc9\xf8\x47\x6e\xe4\x98\x03\xab\xd9\x30\x7e\x8c\x22\x9e\xc5\x30\x46\x16\x19\x3b\x79\xcd\x7b\x59\xc7\x96\xee\x16\x8f\xcd\x06\x93\xfd\x6f\x7e\x55\x43\x58\x45\x01\x1c\x3f\x73\xdd\xaf\x0c\x23\xb5\x6e\x83\x27\x05\xca\xb7\x83\xd6\xb8\xa1\x12\xab\x79\x76\x81\xbc\xe8\x51\x0f\x9f\x36\x89\x7b\x98\x55\xcb\x1b\x6d\x96\xe5\x7a\x04\x28\x52\xfc\x47\x26\x38\x96\xa9\xd8\x8a\x79\xc2\x9f\xd3\xef\xb7\x6f\xe3\xb3\xb7\xbb\x11\x8d\x98\xe1\x9a\xbc\xe3\x86\xad\x1f\x3c\x0f\x4f\x6e\x96\xba\xc3\xe1\x7b\xd2\x57\xa1\x8c\x4a\x79\x21\xb3\x0c\x99\x83\x80\x52\x7e\x92\xbc\xf1\xb8\x41\x0e\xbb\xe0\x3f\xbe\x70\x9d\x5a\x2a\xf1\x65\xbf\xb9\xa7\x13\x6e\x5e\xf5\x4e\x6f\xe4\x80\xe9\x34\x6a\xe6\xa6\x41\x06\x06\x62\xff\x90\x8c\x40\xd7\x06\xaa\xe3\xe9\x4e\xa9\x19\x62\x60\x9d\x70\x6b\x99\x29\x5e\x99\x63\xce\x2c\xe4\x96\xdd\x1b\x79\x9b\x5b\x62\xfa\x60\x29\x1a\x48\x75\xed\x8e\xc0\x1a\xb9\xba\x0a\x74\xa1\x11\xa5\x6e\x32\xdc\x12\x94\x5c\x2d\x4e\x88\x9a\xc6\x66\x47\xa4\xb0\xbe\x69\x19\x3f\x38\x73\x3f\xb1\x94\x0b\x95\x6d\xfa\xc8\xad\x21\xd4\xf0\x91\x46\xe0\x19\xce\x13\x83\x1e\x2c\x85\xc9\xb9\x2b\x9c\x6d\xed\x91\x5d\x8f\x58\xbf\x72\x02\xe2\xfc\xa6\x1a\x70\x39\x58\xc5\xaf\xce\x11\x51\x43\x5f\x7e\x0a\xb5\x2f\x73\x0d\xcf\xcf\xde\x21\x33\x7c\xbf\xcd\x92\x14\xa8\xca\x0f\xd9\x9e\x12\x10\x48\x52\xd0\x2a\x56\x10\xfb\x37\x9e\x34\x88\xda\x83\x43\x99\x53\xcb\x02\xd1\x4c\x85\xf2\x9b\xb5\x91\x0a\x52\x13\x5f\x0d\x6a\xa3\x6b\x92\x98\x0c\xd0\xad\xd9\x73\xe8\xed\xa4\xc5\xdc\x27\x6c\x84\xc7\x6a\x31\x8f\xe2\x29\x5d\x4a\x17\xe5\x43\xec\xc3\x9b\xba\x65\xa7\x9b\x20\x41\x5e\x89\xd8\x02\xd6\x7e\x89\x0c\xaf\xa8\x87\x82\x46\xd3\xf7\x24\x71\x2d\x12\xa0\x2d\xab\xa5\xa3\x84\xbc\xdf\x7c\xf7\x0a\x55\xc3\x6a\xf7\xab\x18\xed\x3c\xbd\x10\x21\xe7\xd0\x95\x3d\x3d\xdf\x7f\x60\x46\xfc\x16\x05\xca\x50\x64\x0b\xa9\xdb\x0a\x61\x42\x58\x0a\xab\x17\x97\xa1\x96\xe7\x35\x64\xf0\x8f\xdf\x78\x1e\x13\xa0\xf3\xf2\x43\xa5\x36\x04\xa9\x2b\x8c\x92\x66\x2a\xfe\x6e\x7c\x22\x90\xda\xb8\x51\xbd\xe9\x50\x97\x6f\xe6\x7d\x8b\xf3\x03\x5f\xf5\xd7\x94\xf3\x00\x31\xbf\x1e\xa4\x60\x32\x53\x2a\x1f\x22\x1a\x66\x9a\x1d\x45\x61\x18\x03\x94\x04\x18\xa1\xb5\xd4\xaf\xe5\xe5\x2f\x49\xbf\xc3\xf1\xa0\xfa\x88\x03\x8d\x51\x43\xa0\xd1\x07\xce\xc1\x78\x71\x22\x27\xaa\xee\x78\x2d\xec\xe6\xf2\x84\x04\x00\xf1\x53\x6e\xf9\x22\x3e\xb7\xed\x31\x54\x69\x31\xfe\x40\xb0\x65\x49\x11\xe7\xf0\xe4\x9f\xc6\xf6\x47\x2d\x71\xf1\x7a\xd4\xae\xc7\x99\x29\xd4\x5d\x2c\x1b\x3d\xfb\xbf\xe2\xe6\x43\x13\x55\x34\x19\x56\x72\xd4\x58\xca\x9c\xb4\x41\x70\x1f\x41\x80\x18\x89\x4b\x50\x33\x1b\xe7\x1a\x6c\x1f\xb9\xd1\x34\x4b\x32\x75\xc7\x4f\x68\x1a\x96\x8a\xb0\x15\x57\xa1\x4a\xd9\xa8\x6b\x07\xa9\xe3\x53\x6e\xb0\x41\xcf\xb0\x48\x05\x7a\x17\x20\xcf\xfb\xea\x21\x1a\x3c\x50\x8b\x2b\x30\x14\x41\x60\x73\x7b\x89\x4e\x71\x7e\xcd\x4a\x61\x77\xda\x2e\x72\x95\xa1\x88\x5e\x52\x68\x38\xc0\x43\x90\x5d\x7a\xa1\xeb\x47\x9c\xaa\xd3\xd0\x99\x05\x96\xf5\xad\x08\x00\xc5\x80\x62\x69\x16\xf1\xc3\x69\xa1\x8b\xc8\x49\x14\x72\x04\x67\xfe\xd7\x02\xaa\x4a\xed\x4c\xd1\x58\x92\x14\x4f\x90\xae\x8e\xcc\x98\x16\xcb\x07\xd0\x4f\x4c\x33\xee\xd2\xe1\xa0\xdc\x24\x11\x15\xac\xf2\xec\xf5\x1e\xcb\xad\x42\x6e\x4d\x05\x61\x1b\xad\xe6\xee\xe9\x1c\xea\x79\xa9\xf5\x95\x25\x7c\x23\x34\x13\x97\x20\x68\x7a\xb9\x6a\x0d\x37\x61\x13\xf4\x94\x1a\xef\x85\xf9\xbb\x5a\x79\x44\xe9\x9f\x4b\x59\x51\xb5\xd8\xb2\x56\xe2\x14\xcb\xf0\x28\xb0\xef\x8e\x0f\xbb\x0d\x19\x94\xe3\x1c\xc0\xbe\xd2\x1b\x0a\xa6\xff\xa1\xe1\xdf\xf6\x08\x28\xc6\x85\x72\xa3\x85\x70\x0d\x98\x8e\x03\x39\x23\x92\x02\xa5\x5a\x2b\x5d\xd7\x23\x8d\x4d\x96\x4a\x43\x16\xb4\x44\x18\x5e\xc1\x23\x0c\x3b\x61\x62\x92\x90\x39\xa4\x28\xfe\x24\xb5\x85\xbe\xae\x25\x7a\x89\x40\xc2\x6f\x74\x94\xd3\x3f\x47\x3b\xa8\xd0\xd0\xfd\x84\x92\x64\x05\xb3\x9c\xea\x96\x82\xc1\xd5\x06\x88\xec\xd8\x51\xdf\xec\xe6\x89\xbd\x2f\xcb\x91\x2e\x25\xe9\xa8\xb0\x4a\xc4\xcd\x0a\xe9\xa5\x24\x34\x61\xb9\x94\xdf\x0e\x8f\xda\x77\xd4\xde\x71\x0d\x14\x39\x82\x39\x2e\xb0\x39\xa9\x31\x19\xb8\x8c\x99\x40\x45\x65\x1b\x22\x7c\x5c\xa9\xc2\x22\x1e\xb2\x16\xbd\x60\x0b\xa9\x62\x4c\xc9\x0a\xa2\x34\x7b\x2c\x5f\xe1\x6d\xe7\x0e\x44\xc0\x50\x3a\x6e\xf7\x0c\x77\xa3\xdc\xee\xcf\x96\x14\x7b\x87\xa4\x2b\x45\xc6\xfc\x1b\x22\xa0\x6d\x6d\x10\xea\x7e\x91\xf1\x48\x9e\x69\x4a\x33\x4f\x5c\x4a\xab\x91\x27\x16\x46\x7f\x22\x15\xe3\x0e\x13\xff\x83\x20\xc4\x6a\xfa\x97\xc4\xee\xa9\x72\x00\x45\xab\x0e\x39\x14\x8e\x29\x2a\x3a\x5c\x37\x6e\x79\x97\x70\xf0\x82\x38\x94\x6f\x56\x05\x5e\x61\xf4\x52\x20\x0b\x23\xc5\x23\x92\x34\xd7\x9a\xe0\xfe\xb7\x1a\xb2\x59\x54\x48\x97\x48\x94\x00\x02\xac\x12\xbc\xc6\x46\x83\x08\xba\x03\xf9\x1f\xb8\x3c\x12\xd0\x47\xf6\xce\xb7\x34\xce\x03\xbb\x2c\x2c\x08\xf3\x23\x20\x87\x78\x05\x6c\x7e\x6a\xd6\x40\x38\x74\x68\x99\x2d\x46\x8e\xa8\x8b\xea\xc6\x7c\x9b\x53\xa0\xca\x98\x8d\x1f\x05\x37\x64\xe8\xcd\x5e\x8d\xa0\x8b\xe6\x34\x22\x7f\x9c\x8b\x29\xe0\x6e\x58\x83\xc2\xf9\x13\xbd\x64\xe0\xc3\xb8\x96\xa3\x29\x8e\x86\x25\x5a\xfc\x58\x5b\x34\x9c\x5a\xfa\x65\xbe\xae\xdc\xae\x28\xf5\xa8\x56\x6c\x41\x4e\x48\x63\x54\x1f\xe4\x5c\x0f\x62\x57\xac\x06\x05\xdf\x71\xa0\xcf\x69\xa5\x4a\xd4\x30\xbe\xb9\x66\xe4\x06\x97\xc1\x8f\x8f\x45\x68\x0c\x4a\x5a\x38\xfa\x5c\x15\x29\x2c\x17\xd3\x86\x47\xae\x91\x14\xfc\xfa\x81\x79\x9d\x40\x7c\xd8\x3c\x35\x9e\xac\xae\xe7\x18\xf1\x75\xbb\xaf\x0e\x3b\xaa\x21\x17\x2a\xd6\x8b\xc8\x93\x39\x40\x58\xc1\x8e\xe5\xaf\x4c\x79\x7e\x99\x69\x0d\x94\xa0\x10\x99\x2c\xde\xa5\x45\x4a\x5d\x15\x9a\xe4\x0d\xee\x41\xf3\x47\x10\xf1\xb5\xf7\x48\xd4\x28\x6b\x31\x15\xaf\x80\x0c\x38\x53\xd5\x52\xdc\xba\x69\x95\x3d\x2d\xb2\x15\xdf\xe0\x89\x41\x18\x9a\xa2\x5a\x05\xad\xb4\x71\x88\xeb\x4c\x55\x01\x7f\xe1\x11\x2e\x4d\x25\xa6\x00\x80\x4f\x4e\x92\x25\x92\x63\x74\x2f\xa6\xc7\x6f\xb4\x4a\xb6\xf5\x0b\xae\x67\x74\x87\x52\x1d\x1c\xd2\xa2\x75\xad\xdf\xe1\x62\xe1\x21\x9e\xe6\x1a\x48\xf0\xb2\x6a\xe2\x98\x41\x1a\x76\x8c\xf6\xd7\x3a\x09\x10\x3d\x6c\x6d\xd2\xd4\x25\x57\x26\x01\xf6\xd4\x28\x70\x50\x15\x43\x5f\xf4\x4d\x1c\x89\xca\xed\xe3\x44\xc0\x6b\xed\xb2\x62\x0b\x84\x65\x57\x89\x6a\x57\x47\x32\x4b\x13\xc8\x4d\xb7\x70\xa4\x5d\x6d\xf9\xc8\x56\x89\x75\xac\x5e\x51\x1d\x2e\x5a\xcc\xac\x32\x22\x79\x43\x56\x0c\xc1\x27\x0e\xb3\xa6\xbc\xac\xb4\x82\xde\x9c\xcd\x0a\xfd\xf3\x20\x82\x37\x41\xb7\x1c\xc5\x65\x0d\xea\xe9\xe4\x49\x10\x93\x27\x56\xac\x8b\x32\x45\x75\xa9\x57\x50\x73\xa8\xf2\x0f\x03\x86\x65\x0e\x9d\xa4\x98\x8a\x5f\xda\x06\x77\xfc\x92\x65\x6b\x7d\x8f\xd2\x5b\x23\x63\x05\xe3\x57\x43\x7f\xae\x52\xa4\x49\x16\x1c\x82\x28\xe8\x35\x0a\x18\x38\xb1\xab\x22\x6f\x4d\x39\x31\x04\x97\x30\x3b\x62\x25\xc7\x1e\x9b\xca\xea\x50\x10\xb3\x7c\xd4\x79\x7c\x03\xb5\xcb\x62\xe9\xf9\xcd\x57\x91\x4b\xe6\xa3\xd1\x78\x64\xb6\x0d\x4d\x54\x2d\xdc\xd7\xd5\xa2\xe2\xf3\x6a\x1a\xf8\x5b\x2d\x9f\x6a\xa4\x71\x00\x76\x92\x69\xe8\x39\xdc\x83\x24\x39\x76\x58\x1f\xba\xcc\xe5\x3b\xbc\xf9\x9b\x87\xcd\x9a\x6a\xfc\x0d\xc5\xd6\xd1\xcd\xe6\xc6\x2c\x45\xbb\x13\x20\x32\x73\xeb\x8f\xf4\x3b\xf4\x88\xb8\xbb\xef\x48\xe8\x9d\x24\xff\xec\x80\x51\x43\xa0\x81\xce\x8a\x71\x90\x12\x4b\xbe\xf9\x87\x35\x15\xe6\x1a\x5b\x92\x6d\xcd\x25\x65\x63\xb9\xf9\x73\x5d\x27\x37\x0f\x87\x8b\x85\x23\xed\x26\x41\x4e\xc4\x01\x16\xe4\x8a\x0f\x1e\x21\x48\x3a\x4a\xce\x55\xf5\x0c\xa4\x13\x14\x10\x13\x39\x38\xe2\xe9\x1d\xf0\xce\xe0\x0f\x0c\xcf\x59\xb2\xf7\x51\x6c\xf4\x26\x2d\x34\xcf\x9d\x4e\xc2\x81\x2f\x38\xdf\xf4\x65\x59\x7a\x28\x87\x99\xf0\x3f\x3e\xf2\xd1\xd1\xe7\xa6\x25\x28\x88\x8f\xbf\x1f\x27\x38\xea\xb3\xe8\xfd\x07\xd7\xeb\x48\x44\xf2\xac\xb1\xac\x5f\x8e\x06\x83\x24\x48\x7d\x2d\xca\x75\x3c\x11\x2b\x7d\x30\x96\x07\x33\xa2\xdd\x9a\xe6\xf7\xe7\x97\xe2\x6b\x9a\x89\x38\xa3\xee\x78\xc7\x71\x3b\x77\xea\x00\xee\xfb\xaf\x35\x02\xfe\x3d\xde\x27\x49\x94\x52\x74\x0d\x01\xd9\xa3\xe2\x16\x8b\xfe\xd0\x76\x21\x6d\x6f\x5b\x80\x54\x64\xff\xab\xc9\xa0\xaf\xbe\xe1\xf4\x20\x33\x4f\xdd\xe0\xc7\x36\x76\xbe\xa3\xfb\x82\x2c\xfe\x8e\x0a\x5d\x90\xda\x4d\xc5\xb9\x68\x59\x13\xe7\x41\xa0\x19\xf7\x94\xa7\x01\xbe\xc1\xa7\xfc\x3b\xb1\x6a\x75\xd4\x0a\xc4\x3f\x81\xdd\x78\x03\x93\xbf\x31\x9f\xc5\xc7\x79\x5d\x4a\x40\x47\x97\x25\xd3\x94\x4c\xf5\x5a\x24\x7a\xbe\xb1\x98\xa1\xa8\xfd\xbf\x52\x46\x6a\x43\x35\x7e\xd4\x1b\x52\x75\xbe\x31\xb5\x39\x86\xa7\x00\xfb\x98\xc1\xae\xe9\x54\x44\x84\x9a\xca\x45\xe1\xa8\x15\xc8\x61\x2b\x5d\x9c\xd6\x7c\x4f\x7f\x6b\x95\xac\xca\xc4\x8c\xed\x37\x2d\x0e\x9d\xc1\x4a\x96\xa9\x4a\xd9\x9f\x5a\x5e\xe6\x59\x65\x31\xaa\x9d\xde\x8c\x19\x06\xc9\xae\x0e\x40\xf6\x7f\x1b\xa4\x4f\x99\x40\x8a\x7a\x25\x44\x73\x60\x11\xde\x92\xa0\x2b\xb3\xa0\x56\xbb\xbf\x68\x56\xff\x62\x43\xcb\x56\x02\x47\x7a\x4f\xd1\x0b\xb1\x97\xb4\x64\xf6\x96\xe6\xc7\xd4\x8f\x92\x33\x84\x73\xc2\x44\x8f\x99\xbc\xc8\x96\xf7\x27\xb2\xc8\xd2\xcd\xdb\x5f\x8c\xcb\x1e\x5a\x58\xd9\x57\x9c\xa5\xb8\xda\xb1\x61\xb9\x42\x92\x05\xbb\xd3\x13\xa0\x3f\x4c\x1f\xe9\x81\x35\xf7\xd0\x7b\x0f\x25\x1d\x51\x34\x41\x7b\xcd\x7e\x8a\xab\x95\x24\xff\xb3\x0a\x9e\x2c\x07\xf9\xf0\xdb\x38\x37\xaa\xa7\xe0\x8f\x18\xd0\x10\x6d\x7d\x5b\xb1\x0b\x33\x8e\x6e\x11\x7b\xaa\x89\x2f\x4d\x03\xce\x65\x27\x29\x49\x07\xa5\xfc\x42\x3e\x60\x5d\xd8\x63\x0a\xaf\x68\xbe\x10\x63\x4c\xcc\xa9\x1c\x72\x78\xee\xaa\xf4\x1d\x04\xeb\xcb\x56\x27\x07\x66\xa8\xc8\x39\x96\x68\x22\xa0\x31\x5e\x52\x91\x33\x0b\x40\x70\x8e\xca\xaf\x11\x16\xbc\xc6\xcb\x42\xc4\x91\xfe\xd2\x4d\xb0\xf2\x45\xb7\xa8\xd7\xdd\x48\x10\x2a\xc9\xad\x56\xe3\x44\x96\x16\x0f\x0e\x11\xc2\x5c\xc5\xb9\xbf\x91\xa3\xa4\xd8\xb1\xb8\xfc\x03\x30\x72\x17\x0e\x39\x86\x38\x4e\x6f\x04\x8e\x4d\x2e\x61\xed\xb8\xa6\x4b\xd7\xc3\x8d\x5e\x1c\xb0\xdc\xf5\xea\x1f\x09\x1b\xa9\x9e\x99\x2e\x6a\x82\xd7\xa6\x25\x77\xbc\x02\x62\x55\x5e\x5a\x8b\x0f\xde\xe7\x62\xea\xef\xfc\x77\x24\xf7\xc9\x9b\xd0\xd4\x37\x0f\x67\xcd\xd5\xfb\xae\x5a\xd5\x2a\x09\x42\x52\x4e\x6a\x09\xcb\x47\xd2\xad\x80\x6d\xa5\x91\x22\xac\x96\x0a\x44\x52\xad\x71\x12\x0e\xeb\x85\xf5\xa6\xb6\xbd\x5e\x0a\x89\x0a\x98\x12\x0f\xba\xb6\x5a\xfb\xda\xfc\x40\x28\xc7\x48\x2c\x41\x10\x9c\x58\x76\xa3\xdd\x4b\xc2\xf9\x5e\xa4\x73\x12\xa7\x33\x88\x97\x62\x41\x51\xa6\xcc\x91\x79\xc5\x9c\xaf\x4c\x35\xed\xdb\xb4\x9d\xaa\x22\x27\xa9\x6f\x66\x62\xf2\x61\x49\x25\xb7\xf4\x88\x02\xe9\x17\xc8\x6f\x5d\x20\x46\xbf\xbd\x04\x94\x41\xc1\x4f\x89\x7e\x37\xdc\x8a\x46\x3d\x25\xb3\x85\xc8\xc7\xa6\x0c\x77\xb7\x93\xfa\x7a\x74\xf0\x96\x94\x1b\xec\xf9\x11\x02\xbb\x16\xb3\x7e\x10\x3c\x46\x0c\x84\x44\xb6\x5b\x6e\x90\xba\x5c\xd3\x9b\x0e\x52\x16\xe8\xd4\x13\x38\x92\xe3\x68\xc1\xb1\x92\xfa\x84\x35\x1e\xb9\x2e\xf4\xd3\x53\xa1\x58\x04\xf0\x2f\xe2\xf4\x1b\x0e\x88\x66\x38\x06\x36\xac\xfc\x62\xb2\xb0\xb6\x8b\x58\x9f\x6f\x26\x16\xd1\xde\x28\xb6\x88\x4a\xb2\x52\x13\x11\x8a\x28\x70\x0b\x83\x49\x45\x26\xf4\xda\x24\x61\xea\xb8\x8a\x9c\x43\x70\x9a\x83\x64\x83\x6f\xac\x8d\x94\x8a\x12\x60\x79\x37\x12\x8b\x26\xcc\x27\x2d\xfb\x2b\x1a\xa9\x50\x97\xcd\x04\xb4\x3b\x6b\x47\xbb\x65\x3a\xfc\xaa\x16\x63\xfc\xd8\xfd\x20\xfb\xd7\x3c\x23\x51\x55\xd4\xd5\x87\x5a\xf7\x1d\xba\xed\x17\xd3\x91\x34\xbf\xd3\x06\xb9\x91\xaf\x70\x45\x61\x78\x35\xac\x7c\x79\x94\x6e\xc5\x0d\xdb\x2a\x94\x11\x06\x35\x07\xd6\x3c\x56\x94\x12\x0e\x95\x36\x29\x70\xcb\x2d\x26\x0a\xa0\xd0\x9a\x15\xed\xb3\x86\x0a\x91\x65\x8c\x86\x8f\x5b\x4d\x92\x7d\x0c\x67\x04\xb0\x8e\x91\x13\xb9\x10\xec\xd2\x5e\x5c\x51\xd8\x7e\x27\x40\x2d\x56\x8a\xbb\x91\xf1\xb8\x5e\x7e\xf5\xdb\xe4\xd1\xd2\x73\x57\xfa\x03\x13\xf5\xf3\x45\x6a\xaf\xee\x79\xcf\x30\x5e\x28\x59\xe6\x89\xe6\x7e\x6c\x04\x97\x20\x39\x57\xe9\x1e\xb2\xe2\x95\x38\x89\xc7\x8d\x45\x5e\x11\xd8\x5f\xfb\xde\x2b\x63\x15\xab\xaa\x45\x34\x5e\x78\x07\x65\xb7\x98\x60\xd6\x4a\x00\x1c\x77\x04\x18\xe5\xa0\x3a\xf2\xe7\xb9\x63\x55\x52\x8e\x17\x7f\x31\x66\xe1\x5d\x13\xe4\xbf\x45\x1b\xd0\xa9\x9a\x52\x3e\x8a\x09\x00\x02\x33\x03\x16\xd9\x1b\x19\x06\x81\xcb\xee\xac\x42\x1a\xc6\xc6\xcf\x97\xa5\x8a\x7d\x9b\x3a\x03\x3f\x9d\xf2\x1e\xdf\xec\x3e\xb9\x9c\xb0\x9d\x09\x14\xba\x03\xcf\x1d\x11\xf4\x97\x7f\x44\xde\x76\x4e\x6a\x1d\x2f\xc2\xe5\xac\x18\x89\x29\xb4\x30\xb2\xc5\xbc\xc8\xfd\xc2\x04\xf7\xe6\xba\x40\x51\x34\xd6\xfb\xcd\xfc\xc1\x5d\xba\x12\x6a\xde\x71\x54\x4c\x1c\x5e\xd3\xba\x02\x8b\xc3\x4c\xa6\x27\x56\x2b\x0e\x22\x1e\x78\x25\xd6\x14\x43\x87\x9a\x57\xb7\x5b\x84\x52\xfd\x1b\xc4\x3b\x8c\x9a\x23\xeb\xc6\x89\x4d\xc5\x89\xaf\xa3\x64\x4a\x33\x7f\x3d\xb0\x5f\x4a\x7c\x60\x92\x8b\x19\xc7\x15\x19\x23\xde\x82\x6d\x4b\x92\x1d\x95\x5a\xcf\x23\xaa\x4f\x21\xf8\xc1\x72\x1f\x84\x9b\x59\x6d\xc6\x1f\x65\x03\xf8\x65\x17\x84\xc2\xfc\x52\x4d\xf9\xca\x4b\xc4\xd9\x24\xa5\xe1\xcb\x37\x29\xd9\xfa\x41\x2c\x9b\x8f\x5a\x63\x3d\x5a\xe2\x53\xc2\x0f\xbe\x92\x23\xb2\x19\xe4\x19\xa3\x97\x68\x2b\xc3\xc6\x79\x40\x2c\xb9\x49\xbf\xca\x60\x0d\xa8\x50\xb1\xff\xd5\xae\x16\x0a\x6c\x7f\xd1\xd4\x34\xe5\xcc\x07\x5a\x6b\x95\xc6\x81\xaa\xa2\x58\x92\x9d\x14\xc6\xe8\x7e\x96\xd4\xfb\xe7\x01\x32\xf4\x81\x15\xf8\x9b\x26\x62\x45\x8a\xd7\x0f\xd9\x30\x0f\x03\x05\xe8\x31\x8a\x8e\xbe\xd4\x97\xd9\xc6\xc9\xac\x48\xd5\x00\x6e\x65\xd2\x40\x8a\x05\xcf\x92\xdd\xc3\x8c\x38\x21\x4a\x30\xda\x68\xfd\xf5\x37\xa9\xb8\x70\x17\x32\xb3\x8a\x8e\x34\xd9\xa7\x3c\x49\x02\x82\x15\xe5\x95\x18\xa9\x97\xd1\x09\x81\x03\xe7\xa5\xc4\x2a\xbb\xa0\xef\x8a\xf3\x31\xa2\x13\x66\x9a\x5f\x44\x69\xcd\xef\x08\xc0\xe1\x47\xa4\xec\xc8\x35\x63\x75\x66\xcd\x97\x5c\xa8\x80\x0e\x5e\x15\xe8\x2f\x42\x17\x2a\xf5\x09\x5c\x4f\xcd\xea\x82\xc5\x05\x42\x62\xd2\x2c\x0f\x60\x45\x70\x28\x3c\x44\x35\xbd\x2f\x62\xb1\x3a\x4f\x6f\xed\x3a\x40\x7c\xb0\xe6\x02\x83\xea\x93\xf0\xc2\x3d\x04\x8f\xd5\x5c\x18\x2b\x8a\xfe\x5d\x01\x5f\xae\xa8\x3c\x74\xed\x08\x9e\xb8\x0f\xb2\x97\xb5\x95\xc4\xd7\xb5\xa1\xd3\xcf\xee\x32\xff\x46\xca\xcc\xe6\xa6\x1c\xaf\x1c\x5b\xc9\x8f\x08\xfc\xee\x1d\x8c\x00\x07\x5f\x1e\xe2\xb7\x56\xf3\x9a\xc0\x0c\x08\x0d\xc5\x53\xf2\xf1\xf1\xe6\x6a\x61\x97\x94\xde\x48\x8d\xca\x74\xe7\x10\x81\x51\xb6\x83\x73\x2b\xc1\x8c\x6b\xa5\xe9\x4d\x0b\xd2\x91\xfc\xee\x5b\x4b\x74\xd3\xb9\xb5\x9e\x0a\xfe\x20\x99\xf9\xbe\x64\xc1\x7c\x83\x6b\x9e\x4a\xba\xa0\x12\x71\x1a\x66\x43\xa1\x27\xaa\xf0\x31\x5b\x83\x93\xf2\x08\x9f\x41\x9a\x3c\xcf\x6f\xf5\x93\xff\x53\x6a\x67\x95\x55\x0e\xa7\x9f\x32\x10\x4b\x28\x21\x49\xac\x56\x4b\x7f\x54\xff\xb4\x56\x86\x03\x51\x48\x59\xe2\x29\xb5\x2e\x52\x31\x45\xe7\xbf\x8d\xd6\x10\xa5\xfc\x31\xb6\xe9\xd2\xdd\xbc\x48\x90\x0d\xcd\x8a\x74\x78\xf5\x51\x20\x61\x40\x16\x95\x33\xbd\x8a\x3b\x82\xa5\x33\x33\x88\x78\x8f\xd1\x3e\x1d\xb0\x46\x94\x84\xe1\x53\x04\xf9\x25\x4c\x7f\xab\xb1\xbc\x31\x99\xc7\xa4\x2a\x0b\xf8\x59\x83\x96\xe2\xc2\x02\xf8\xa6\x64\x00\x07\xaa\xc3\x1f\x8e\x29\x96\x86\x05\x9c\x6f\x6d\x3d\x53\x1b\xa0\x65\x6a\xfb\x30\x0a\x0d\x2c\x27\x4c\x42\xba\xb2\x0b\x32\x14\x6f\x57\x31\x2d\x70\x54\x8f\x52\x0f\xb0\xdb\xda\x04\x20\x04\x61\x82\xeb\x97\xae\xf2\xe6\xd2\x1c\x2f\x48\xd2\x90\x87\x48\x10\x83\xb0\xf1\x65\x85\x56\x58\xf7\x16\x9b\x1f\xcd\xb7\xf0\xda\xcb\x9b\x52\xdf\xec\xd8\x8b\x86\x41\x5f\xc3\xce\xef\x4f\xd2\xc2\x12\xa4\x4f\x48\xda\x70\x5a\xbf\x91\x1a\x3e\x95\xd2\xf4\xe1\xa9\xc1\x61\x16\x44\xa0\xb1\x14\x45\x28\x39\xeb\x5b\x64\xe3\xa1\xd0\x2e\xe1\x79\x12\x21\x16\x49\xea\xb9\x35\xa3\xbe\xf0\x8a\xcc\x74\x15\x3f\x48\xe2\x92\xb4\xb2\x60\x09\x2f\x67\x49\x6c\x20\x3f\x88\x79\x15\x10\xb7\x3b\xf1\x5b\x56\x0e\xa1\xde\x7a\x23\x3a\x55\xa0\xda\x0e\x10\xff\x62\x3d\x67\xa5\xc9\xb0\x56\x20\x24\x5b\xb6\x57\x22\xba\x1a\xa9\xde\x1c\xf0\xa6\x51\xf6\xc8\x4e\xf4\x55\xe2\xce\x2b\x6b\x55\xde\x71\x66\x60\x32\xbf\x4d\x27\xe7\x65\xa7\x70\xb8\x1f\x09\xba\xab\x1a\x97\x33\x86\x9a\x4f\xbf\x51\x07\x3a\xac\x6c\x67\x38\x57\x99\x41\x11\x86\x59\x0d\x9a\x55\x84\x74\xdb\x19\xea\x8d\x23\xdd\x95\x4b\x73\xf4\xc6\xb8\x57\x5a\xbc\x60\x98\x9f\x2a\x6f\x48\x54\x3d\x26\xe2\x5b\xee\x24\x5f\x31\x46\x0f\x5f\x3d\xd1\x55\x86\x76\xf5\x55\xff\x6c\xa4\xb7\x83\xab\x31\xa1\x6c\x55\x13\x27\x81\x44\x28\x82\x1e\x78\x08\x03\xc7\x37\xc0\x16\xe2\x27\x56\xed\x03\x6b\x8f\x54\xe2\xef\x8a\x8f\xe6\x99\x25\xf5\x78\xb5\xc8\x6c\xac\x91\xaa\xe9\x28\x98\xda\x0b\x9a\x9f\xeb\x57\x01\x97\x90\x1e\x85\xfe\x77\x6c\x48\xba\x0b\xc7\x53\xd4\xc6\xf3\x98\x6b\xe7\xb3\x5f\xb7\x91\xdd\x88\xab\xe4\x1a\xc9\xcf\x77\xfe\x73\x92\xb4\x87\x64\x70\x93\x32\x5b\x89\x56\xac\x99\x4b\x3f\x96\xda\x16\x3d\xad\xc2\x24\x67\xdd\xea\x09\xba\xbf\x61\x43\x7f\x2f\x59\x8a\x12\x65\xf3\xea\x4a\x6b\xfb\x08\x1b\x8f\x51\x14\x2e\x1d\xcb\x98\xa3\xad\x1a\x99\xb4\x30\x84\x56\x3c\x95\xc4\x73\x24\x79\x5f\xab\x27\xf8\x21\x9a\x6e\x48\x29\x9b\xf4\xba\x7d\x29\xa7\x1f\x7a\x62\xda\x24\xdd\x07\x52\x9e\x81\xf0\xfb\x83\x82\x4d\x0a\xe2\x92\xe3\x20\xd7\xef\x81\x47\xf1\xf3\xb2\x81\x2c\xbc\xc8\x64\x66\x71\x2d\xb2\xcd\x73\x6a\x55\xdd\xd2\x60\x97\x23\x98\x23\xfa\x5f\x90\x9d\x78\xb2\x8e\x5f\x0d\x0a\x5c\xbf\xa4\xc0\xbc\x39\x2b\xb9\xc3\x47\x42\xd7\xea\x5a\x9c\xd7\x01\x1d\xc6\xf3\x51\x8e\x3f\x9f\x8d\xe9\xba\x21\xcd\x1a\x61\x8c\x9a\x9b\x93\x7e\x3a\x82\x31\xbe\xb0\x02\x17\x0a\xb2\xc1\x1e\x20\xca\xf0\x8f\x45\x91\x02\x8d\x1a\xa8\x86\xce\x09\xbb\x80\xaa\x26\x40\x46\x36\x4c\x4c\xdf\x8c\x77\x1c\x76\x7a\x1e\x0e\xf8\x92\x39\x01\xc4\x28\x2d\xbd\xf4\x13\xbc\x82\x34\x7e\x5e\xd4\xb5\xb0\xcf\x10\x12\xf8\xa5\x90\x87\x96\x96\xb2\x85\xad\x13\xa9\xb5\xc4\xd7\x3f\x26\x54\x20\x73\xde\x6f\x54\xdc\x42\x96\x19\xaa\x73\x85\x3e\xb4\xa8\x48\x5d\x28\xd6\x5a\x4d\x6c\xc4\xf6\x43\x0d\x66\x6e\x44\x6d\x74\xe4\xe1\x17\x4c\x6e\x00\x8e\xdf\xb0\xc1\x6c\x36\x25\xc2\xb6\xa5\x14\x37\xc2\xac\xd0\x10\x29\xff\x91\xda\x8f\x14\x50\xde\xe0\xbc\xa9\xea\x81\xf9\xc2\x90\xaf\xc3\x1b\xa0\xf0\x8d\x44\x4b\x55\x3e\xfa\xdc\xd0\xb9\x57\x55\x3a\xbc\x76\x16\x5e\x02\xe3\x9a\x9f\x9d\x20\xdd\x55\x18\x6e\xed\xd4\x4f\x52\x67\x12\x07\xe3\x22\x5d\x2d\x0f\x21\x01\x8a\x69\x7c\x4a\x37\xbd\x22\x0a\x12\xfa\xb8\xb3\xf4\x16\x6c\x34\xe6\x59\xf8\xff\xc8\xab\x19\x85\xf9\x77\x12\xaa\x73\x4b\xe2\x46\x5e\x8a\x84\xed\xd2\x87\x04\x65\x64\x96\x53\xc8\x43\x77\x49\xf0\x8d\xf6\x12\x91\xd8\xa9\x48\xbb\x60\xed\x4e\xf4\xb9\x07\xe4\x57\xe6\x8d\x38\x09\x63\x8e\x83\x23\xf2\xc1\x01\xad\x72\xbc\x5f\xd8\x40\x83\x74\xdf\xee\x33\x3b\x55\xb5\xf8\xaa\x64\xa1\xc6\x71\x87\x45\x1a\x22\xad\x80\xe1\xd4\x67\xae\x99\x52\xd0\x1f\x45\x35\x7f\xf9\x07\x83\x90\xe9\xa6\x5d\x3c\xc2\x18\x83\xae\xce\x5a\x45\x80\x1a\xd9\x9c\x9a\x93\xc9\x1d\x6b\x84\x93\xb6\x00\xa9\x41\xdd\x95\xca\x4d\xa8\x59\xbf\xc8\x7f\x3f\x97\x3b\x93\xca\x8d\xb2\x7e\x57\x3e\x04\x7a\xd2\xb5\xb7\xb1\x11\x5f\xd1\xb5\xe3\x47\x56\x76\x9f\x7c\x33\xd7\x38\xfe\xdb\x00\xcb\xc3\xc3\x08\xd0\xca\x2a\xf8\xb7\x14\x32\x0c\xe4\xc3\xa4\xfa\x2a\xbc\xd0\x65\x41\x50\xcc\x7b\x46\x88\x93\x55\x40\x3c\xd6\x22\x36\xb7\x13\xc7\xf5\xb7\xeb\x17\x4e\x7f\x47\x4c\x4e\xe5\x8c\x84\x88\x44\x72\x4f\x5a\xf5\xe1\x83\x44\x5e\x25\x94\x64\x32\x11\x11\xa7\xb1\xa9\xc4\x0b\x09\x74\xc4\x51\x60\x04\x3f\x5e\xe0\x4c\xc3\x85\xa1\x9b\xb3\x0f\xf2\x26\x9d\x94\x54\x44\x4f\xa9\xe4\xc5\x56\x75\xf7\x8d\x54\x98\x77\xe4\x23\xd7\x4b\xdf\x04\x49\x0b\xca\xde\xb1\x93\xa9\xc3\x4e\xf4\xf8\xd9\x44\x21\xc6\x01\xda\xf5\xfc\x88\xe9\x3f\x92\x34\xcc\x92\xb6\xfc\x6b\x2b\x62\x25\xb0\xa2\x26\xc2\x0e\x8f\x22\xef\x22\x0b\x38\x21\x5f\x89\xf8\x46\xe0\xd7\x03\x6b\xac\x8c\x76\x84\xe1\xe1\x99\x3e\x9e\x94\xb9\x1b\x65\x01\x90\x2d\x47\xcb\x82\x87\xea\xd3\x0d\x85\xf7\x72\xb6\x1b\xd6\x48\x4b\x40\xe3\x5a\x62\xde\xa4\xf5\xca\x55\x70\x6d\xc5\x4d\xff\x16\x8e\xfc\x79\x6a\xcb\xd8\xf9\xf7\x69\x1e\x88\xb8\x4b\x04\xe2\xf4\x6c\x46\xae\xd1\x36\xca\x4d\x08\xd8\xad\x49\x2c\xd2\x2c\x72\xf8\x4e\x4b\x4a\x8d\x86\xe9\xb9\x1f\xf7\xb0\x84\x98\x8a\xa0\xaa\x97\x39\x94\xa4\xb8\xdc\x50\xa5\x5d\x82\x9f\xd9\xbc\xbd\xd9\xfb\x5f\x37\xc1\x2f\x7c\x7d\x8a\x89\x41\xfa\xa8\x73\xa8\xc4\x31\x77\x86\xdd\x01\x82\x96\x35\x4a\x1a\x97\xa7\x89\x55\x9e\xca\xa0\xb9\xd2\x4c\x43\x96\x9e\x20\xce\x1d\xe3\x44\xf7\xf0\x20\x18\xc7\x56\xb5\x8c\xd4\x2a\x31\x23\xbd\x51\xe0\xaf\x03\x01\x69\x8a\x24\x3b\x60\x8f\x91\xbb\xb6\xcb\x9c\x14\xd1\xe7\xbf\xb8\x3b\x57\x3b\x5f\x4a\x10\x66\xc5\xb6\xf2\x57\xee\x58\x25\x75\xba\x2d\x8b\xce\x3a\x77\xf1\x64\x11\x9b\x88\xba\x7c\x09\x53\x72\x62\xe9\xa8\xb6\xc3\xf2\x04\x42\x65\x3d\x26\xba\xd8\xc5\xed\x27\x97\xa0\x98\xab\x9b\xcc\xd0\x61\x0f\xe5\x01\xce\xf4\x0d\x73\x8e\x1b\x22\x95\x89\x71\x3f\xd8\x68\xa1\x0e\xa3\x72\x86\x48\xba\xdd\xcc\xc0\x18\xf5\xa4\x44\x9c\x14\x3c\x7f\xbc\x94\xb7\x0c\x6a\xc7\x10\xe9\x2d\x4d\xf0\xc6\x3c\x48\x26\xb2\x8a\x7e\x85\xc4\x74\x21\x03\x0b\xdb\x26\x71\xca\xa5\x43\xa3\xc4\x46\x0a\x19\xc2\x87\x47\xb7\xaf\x9f\xd8\x79\x13\xb8\x7f\x1d\xde\x73\x2e\xf9\xb7\x2d\x0c\x7b\xc0\xf6\x64\x2b\x0b\x8d\xe8\x61\xe3\x46\x06\x36\x9b\x7d\x70\x62\x76\xe4\xca\x6a\xfa\x37\xea\xea\x9c\x57\x96\xa2\x5a\x59\x72\x41\x43\xef\x32\x0d\x03\x49\x00\xcd\x42\x40\xdc\x28\xd1\x42\x57\x55\xd5\x3f\x29\x67\x40\x8e\x91\x39\x8f\x2f\x0b\x7d\xde\x2f\x67\x9c\x87\x3d\x8a\xf8\x3c\xa4\x96\x61\x1a\x9b\x7f\x28\x4c\x36\xc9\x43\x0a\x03\xcc\x9c\x1f\x6a\x89\xeb\x7d\xf7\x8a\x59\x6d\x44\xf1\x6d\x92\x8c\xd6\x6d\xde\xa4\xd1\xb0\x36\xad\x9f\x6a\xbe\x96\x38\xce\x8b\x59\x6a\x40\x2b\xbb\xc7\x56\x3f\x2a\x2b\x67\x04\x96\xe7\x47\x91\x67\x89\xc2\x0d\x4b\xa4\x4b\x9d\xec\x80\xbd\x46\xfd\xaa\x0e\x3d\xa0\x06\xac\x2d\x76\x8c\x8b\x2f\xe3\x4a\x8c\x1f\xc1\x17\x54\xb8\x5e\x1a\xc1\xa0\x17\x15\x41\x95\x06\xb7\x9c\x49\x95\xb7\xe0\x1e\x8d\x87\x34\x93\x00\x7c\x50\xf2\x6b\x69\x50\x2f\x51\xce\xf0\xe2\x44\x8d\x86\xe5\xde\x2a\x1b\xac\xe1\x3e\xa6\xcc\x88\xbd\x88\x1f\x4f\x22\xb4\x67\x2f\x64\x9b\x31\x2c\x4c\x6e\x64\xa1\x27\x30\x93\x03\x97\xa6\x67\xfd\x35\x58\x43\x86\xbf\xc9\x62\x7b\xd0\xba\x4e\x26\xc9\xcb\x4c\xd8\xc0\xd8\x4d\x8d\x1b\x2b\xc9\x3f\x5d\xcc\x2b\xb1\x44\x59\x4f\x97\x51\xd5\x09\xad\xb7\xce\x0b\x7b\x08\x0e\x1a\x9d\x10\x11\x43\xfa\x5a\xe9\xa9\x6f\x74\x60\x53\xb8\x68\x81\x5e\xda\xb9\x7b\x99\x35\x38\xb2\xc4\x4f\xc7\x62\x60\x13\x46\x66\xd9\x45\xd0\x67\x06\x77\xda\x58\xbe\x49\xc2\x6a\xba\x33\xc8\xfa\xb3\xe1\xfc\xa7\x6e\x64\xec\xd3\x43\xc7\xf5\x69\x46\xb8\xf3\x4b\xa4\x0e\x1b\x4d\xb6\xcf\x56\xd1\x0d\x6d\xe8\xb3\xb0\xcc\xc9\x68\xa3\x4a\xa6\x8b\x4f\xc7\x22\x96\xa8\x7b\x89\x59\xba\x05\x21\x7a\xb5\x23\xc3\xe8\xe3\xff\xe5\x5d\x73\x91\x7f\x46\xad\x66\x03\x76\xf0\x67\x4b\xd2\xba\x27\x07\x09\xb5\xec\x81\x25\x9a\x21\x5e\xc8\xbd\xe6\x2e\xc1\x69\x15\xeb\x9b\x22\x63\xf2\x5d\x0b\x68\xa8\x75\x3d\x64\x0a\xa4\xfe\x40\x64\xcd\xc1\xca\x69\xa4\x0e\x48\x88\x0c\x9b\x20\xf3\xba\xe8\x6e\x84\x9b\x02\x81\xf0\x87\x75\x16\x18\xd6\x8b\xdc\x1f\x75\xf5\x98\x75\x7e\x32\xc6\x50\x17\xf1\xd2\x21\x07\xde\xd0\xea\x2e\x4a\x33\x3d\xf5\xf6\x33\x09\x6a\x73\x93\x72\xcb\x67\x90\xd4\xfc\x88\xf0\xa0\xaf\x69\x18\x34\xfb\xd7\xd1\x52\x13\xb9\x26\xeb\x18\xb1\xf3\xf2\x38\x8c\xa7\x71\x2a\xf2\x08\xe3\x35\x97\xb8\x91\xdc\xc8\x75\xca\x09\xdc\xbe\x7b\x26\x5d\x48\xa3\xf5\xe4\x2f\x47\x89\x4b\xe1\x78\x21\x96\x7e\x13\x74\xbe\x0a\x1b\x84\xa9\x43\x11\x57\x57\x8d\xe2\x08\x8c\xd2\x88\xa3\xa9\x85\x41\x11\x81\x79\x93\xa7\xcb\xd7\xac\x3b\xc7\xfa\x3d\x38\x0b\xcd\xdd\xa1\xde\x73\x01\x8a\x49\xae\x83\x32\xcf\x45\xa1\x9d\x8d\x64\xde\x8c\xa4\x4a\x2d\x7b\x88\xce\xa4\xd6\x30\x17\xf0\xd8\x84\xf3\x2f\xb2\x1f\xaf\xb1\xe9\x8b\x8d\x94\xf3\x7a\xba\xff\xcc\xf0\xbc\xc9\xc8\x50\xd9\xbf\x59\x13\xff\xa1\xf9\x09\xec\x06\x5f\x64\x8c\xda\x12\xc4\x3a\x1e\xa6\x34\x8a\x7e\x10\x16\xbd\x51\xa0\xcc\x32\x3c\xe4\x42\x40\x00\xd7\xbd\xa3\x71\xd2\x06\x95\x59\x8c\x51\xb8\x14\xf3\x74\x0f\x89\x74\x88\x02\x68\x8c\xc8\xb1\x57\x41\xec\x7d\x5b\x19\x18\x9b\xd0\x4b\x8f\x39\x1e\xd3\x5a\x7b\x26\xf4\xf6\xa4\x7f\xc8\x91\x9e\xb8\x6e\x99\x64\xc9\x48\xf1\xec\xe4\xf4\x78\x07\x24\x5f\x66\x06\x5f\xab\x20\x66\xb6\xa7\xff\x93\x31\xc5\xe4\x28\xb5\x62\xb3\xfc\xff\x5f\xaa\x03\x40\x74\xce\x25\x1e\xe0\x48\xe6\x5a\x2b\x90\xba\x15\x90\x4d\x4c\xd7\xe7\x89\xac\x39\x45\x5e\xe7\x0b\x2e\x02\xa4\x30\x65\x7d\x77\x8e\x43\x05\xd3\x15\x07\x01\x35\x52\x3b\x3e\x68\xfc\x3d\xfc\x48\x34\x22\x41\x13\x18\x61\xd9\x46\xd2\xaf\x5e\xe8\x32\x8a\x3c\xbb\xc8\xbe\x25\x6f\xbd\x4a\xde\x7a\x9d\x04\x2d\x2d\x1f\x69\xc2\xc8\x6a\x47\x82\xa5\x5e\xd8\x36\x72\xcc\x6d\x81\x7c\x0b\x70\x8f\x58\xbd\xc8\x54\x84\x5d\xc1\xe2\x09\x99\xa4\x81\xde\x66\xbf\x60\x27\x69\x27\x5e\x45\x65\x37\x4f\xe8\x41\x2f\x76\x87\x41\x48\x50\x4c\x44\x14\xb5\xa9\x93\xe6\x5a\xe7\x4c\x6a\xcd\x07\xc2\x41\x90\x01\x8d\xd2\x3d\x93\x05\x6b\x33\x0c\x42\x66\x71\x63\x9f\x76\x2d\x99\x4a\x95\x72\xbe\xc8\x25\x1c\x8f\x4d\x7e\x66\xd4\x1e\x1e\xcb\xe6\x91\x2d\x85\x72\xf4\x8c\x17\x8a\xd0\xcf\x46\xef\x5e\x08\x92\xec\x65\x30\x46\x20\x08\xca\xa0\x38\x68\xc4\xe7\x37\x79\x12\xe2\x8a\x6f\xbd\x04\x26\x4f\x5d\xdb\x83\x68\x90\x42\xbb\xb5\x52\x4d\xc6\xa6\xfd\x92\xa7\x9e\x4a\x82\x7a\xa1\xfc\xe1\x65\xac\x0b\x18\xa9\x65\x7e\x45\xe5\xe4\xf3\x73\x1f\x65\x74\xc4\x9f\xa4\x9d\x22\xba\x82\x60\xdc\x8e\x4a\x14\xaa\xb4\x3f\xcf\xdd\x5b\x2b\x4b\xf0\x13\x2f\x73\x94\xae\x9d\x9d\xe0\xc9\x60\x66\x33\xb2\x00\x64\xed\x95\xb0\x85\x3a\x2c\x3f\x74\xc0\x06\x34\x32\x3b\x79\x62\xc8\x77\x80\x11\x31\xc0\x5c\x28\xca\x5e\x13\x8b\x78\xaf\x2b\x19\x6d\xcd\xba\x27\x41\x9f\xaa\xef\xc6\xdf\x3d\xb3\xb2\x50\x8d\xb3\xb8\xad\x28\xd6\x28\x49\xcf\xaf\x4f\xc5\x36\xb6\x27\x53\x37\x8b\xa4\x6e\x8a\xbf\x85\x79\x2e\x2d\x59\x47\x1a\x61\xc5\x3d\xf2\x03\x6a\x8d\x22\x39\x9d\x63\x27\x69\xfd\x08\xa7\x75\xd1\x99\xff\x0b\xfa\x02\x8b\x3a\x7a\x0b\x83\x25\x54\xdc\x2c\xc8\xc3\x39\x48\xcf\x99\x35\x91\x08\x99\x51\x59\x30\xa7\x83\x26\x85\x4c\x11\xa7\x1b\xd1\xec\x4f\x0a\xe9\x01\xb7\x63\x81\x4f\xc8\x76\x39\x33\xfc\x21\x39\xc8\x24\x54\xcf\x83\x92\xa7\xc1\xa7\x6b\x09\xdc\xb4\x7c\xea\x40\x26\x14\xc3\x90\x30\x75\x29\x08\x0b\xab\x6b\x83\x81\x4e\xfb\xd6\x66\x76\xcb\x76\x28\x7c\x1c\xb3\xe3\x7c\x63\xc0\xb7\xf1\x21\xd9\xf6\x2f\xb4\x2c\x8a\x16\x60\x31\x66\xce\xc2\x16\xab\x23\x4b\x00\x7e\x99\xcb\xeb\x17\xc9\xf1\x1e\xb6\x28\xf1\xbe\x65\x6b\x91\xe4\x1e\xd8\xcc\x54\x5c\x03\x7c\x32\x09\xcd\xd0\xc1\x31\xe1\x4a\x42\x67\x6f\x90\x69\xa9\x12\x12\x9f\xdb\x64\xd8\x9b\xcb\xc0\x02\x99\xe8\x5f\xe2\x70\x09\x8c\x92\x80\xf4\xea\x38\xd8\x20\xda\x1b\x1a\xa0\x78\x25\x96\x2b\xd4\x90\x24\x42\xfb\xb2\xbb\xaa\x5a\x7e\x99\x80\x86\x68\xd3\xb8\x96\x94\x6b\x86\x25\xce\x3e\x60\xe4\x8d\xbc\x79\xd9\x09\xa9\x2d\x84\xb7\xe2\x9e\x25\x17\x68\xad\xa6\x38\x40\xcc\x99\x4c\x2d\xa1\x5c\x23\xf5\x76\xd2\xce\x29\xf8\x64\xbb\xe4\x2c\x9f\x44\x9e\x95\xd3\x81\xbb\x9b\xd7\xe9\x47\x45\x52\x9b\x8c\x3c\xfa\x1d\x6a\xcc\xd5\x78\x31\x62\x9e\x0d\x31\x83\x06\xe8\xfd\x2c\x94\x34\xd2\x96\xfa\x25\xbf\x81\x35\x9c\xdf\x29\x5f\xbb\xca\xe1\x32\x13\x4f\x54\xdc\xd9\xc4\xae\x96\x11\xce\xd0\x5b\x1a\x5a\xc5\x0d\x5c\xbd\xfd\xe9\x0b\xfd\xc1\x4e\x25\x19\x91\x88\x9a\xa1\xb5\x63\x42\xec\xc7\xc5\xe0\xe5\xc2\x61\x2d\xe3\x51\x2d\x5a\x03\x3a\x52\xda\xcb\x2a\x60\xc3\x77\x04\x64\xb7\xbf\x23\x2b\x3d\xaf\xb4\xbb\x37\xdb\x09\x66\x3d\x66\x1d\x29\x76\xd4\x1d\xaf\xff\x83\x96\x30\x08\x2c\x3f\xac\xf1\x02\x24\xca\x4c\x90\x14\x2f\x16\x01\xa5\x50\xd5\x88\x5b\x55\xcb\xd8\x5a\x3b\x22\x84\x6f\xdb\x3d\xbb\xb1\xec\x25\xf8\x41\xe9\xc8\x4e\xbc\x13\xa8\xec\x12\x2f\xee\xbe\x47\xf5\x0a\xb2\xcd\xf0\x5d\x48\x54\x31\x9c\x85\x63\x58\xb3\xc0\xb9\x7e\x6b\xa5\x11\xba\x07\x75\xce\x49\x7c\xb5\x72\xfb\x56\xe0\x4c\xca\x03\x1a\x08\x94\xff\x81\x15\xce\xc2\x01\xd4\x68\xb7\x4c\x09\x17\xc7\x1f\xcc\x06\xcd\x32\xe0\x02\x6c\xf3\xab\x90\x74\x6c\x3c\xab\x4b\xf1\x01\xd8\xe1\x37\xc9\xd8\x21\x4b\x4d\x05\xea\x7d\xea\xc3\x2f\xbc\x0d\xd9\x54\xe5\x15\x2e\x97\x6c\xdf\x1b\x2e\x4c\x7f\x34\xe8\xaf\x2c\x65\x2b\x24\xb2\x8c\x3e\xfb\xfc\x77\x89\x0a\x2f\xdc\x41\x44\xb9\x4a\xd3\x67\x05\xd6\x5f\xc6\x47\xc2\x66\x48\x07\xcf\x53\x4c\x6d\xbd\xf2\x73\x04\xb5\x06\x2b\x6f\x60\x11\x70\x18\x9c\xdc\x34\xe2\x8d\xba\x5b\xb6\x89\x3b\xfa\x26\x75\xd1\x3f\x2d\x6a\x42\x67\x0b\x6b\xd9\x4e\x32\x17\xb0\x92\x45\xc3\x8d\x05\xda\xa8\x06\x0a\x32\x87\x59\x80\x55\x39\xfd\x85\x1d\x92\xae\x86\xb9\x15\xc8\xe7\x8f\xa6\xc9\x7e\x44\x32\xc2\x26\x23\x53\xc4\x7a\x6b\x57\xc1\xcc\x87\xfb\x4e\xe3\x50\xab\xe2\xe7\x10\x8a\x25\x4f\xcc\x0e\xa4\x45\xd8\x55\x55\x14\x1f\x02\x20\x7e\xb0\x19\x62\xf1\x25\xeb\xf7\xda\xf2\xe3\xa9\x99\xe8\x01\xb6\x59\x7e\x6d\x4b\x62\x3a\x58\x5b\x59\x26\x99\xdd\x70\x99\x40\xdd\x5e\x22\xde\x4b\x63\x67\x93\xd8\x91\x77\xd3\xee\xfb\x4c\x22\xdc\x63\x58\xaf\x9f\xee\x85\xd0\x5d\x13\x75\x66\x3f\x33\x1f\x70\xdf\x98\x9c\x60\xf1\xae\xfc\x5c\xe7\x4a\xe1\x47\xa9\x1a\x2e\x28\x44\xfc\xda\x68\xcd\x3b\x38\xd9\xa9\xdb\xb4\x25\x59\xc6\x26\xed\x37\xc5\xd4\x65\x0d\x0b\x8f\x51\xe6\xf8\x3f\x2d\x71\x0d\x80\xc8\x9b\xb8\x0b\x39\xc6\x44\x9e\xd9\x9a\x0f\x92\xd1\x08\x9b\xf6\xf6\xcb\x0b\x65\x25\x2a\x7a\x2a\x11\xc0\x93\x59\xfc\x27\xe7\x1c\x3d\x82\x75\xae\xb8\x7f\xa8\x58\xfa\xce\x3c\xc4\xeb\x20\x07\x02\x33\xe2\x2b\x89\x57\xd0\xee\x32\x53\x90\x0c\xa1\x11\x73\x16\x0c\x3b\xca\x51\x8a\x95\xd5\x97\xd0\x52\x99\x12\xa8\x03\x50\x1b\xe1\x4c\xb5\x61\x45\x17\xe3\x8c\x8d\x34\x32\x4a\xf1\x82\xd8\xf3\xba\xce\xe8\xde\x30\x22\x0f\xa0\xa0\x16\x2b\xc4\x77\x0c\x17\xc8\xa6\x7b\x94\x7f\x99\x2d\x24\xac\xe4\x3b\x58\xd8\xf7\xbf\x22\xef\xfe\x89\x7e\x21\x20\x6c\x4d\x23\x2c\x2f\x6b\xdb\x5f\xe6\x48\xb8\xa7\x8c\xf0\x8d\x92\x8d\x67\x6a\x9d\xae\x56\xa7\xd5\x22\x65\x65\x32\x37\x0c\x9c\x23\x61\xdc\x14\xe2\xff\xd4\x0d\x61\xe1\x8d\x5a\xf5\x68\x9b\x42\x4c\x28\xe8\x20\x73\x62\xdd\x4d\xbb\xcd\xf0\xdf\xc2\x67\x6b\x64\x04\xc5\xa8\xc9\x9e\x0e\x59\xf3\xb7\xf8\x25\x6f\xad\x6d\x59\x78\x7e\x99\x97\x4c\xbd\x1e\x6f\xc2\x84\x1c\x08\xec\xe3\xcc\x88\x59\x17\xeb\xc5\xae\x7e\xdb\xee\xde\x2f\x50\xb3\x4a\x73\x64\xae\xa9\x0e\x91\x5a\x92\xfa\x6b\x59\x8e\xb1\x8c\x76\x3a\x7c\x2e\x98\x6a\x76\x61\x04\x65\x3a\xf4\x34\x92\x36\x0d\xd6\x09\xc7\x7e\x15\x0d\x42\xea\xc7\x5d\x9a\x27\x4e\x2f\xda\xc3\xc5\xbc\x59\xbe\x45\x1c\x01\x22\xe3\x7b\xb4\x79\x77\xa2\x2b\x0e\xd5\x80\xe2\x96\x96\xad\xe3\x5f\xe2\x29\x51\xd2\x9d\x59\xd7\xb1\xb6\x07\x3f\xbd\xf1\xcd\x83\xec\x59\xac\xb0\x2c\xdb\xc8\x6c\x81\xa4\x57\xcd\x72\x40\x07\xd8\x85\x3f\x7f\x31\x1d\xf0\x54\x0e\xbc\xcd\x79\xf9\x43\x15\xfa\xe3\x0d\x27\x3f\x21\x17\xcb\xe2\x65\xbf\x37\xb5\x48\x64\xcf\xb9\xc9\x9e\x60\xe3\xd2\xc5\xa3\x14\x58\xdd\x2b\x91\xe1\x59\x3d\x0c\xeb\xeb\x15\x8e\x80\xe4\xec\x41\x45\x4a\x88\xd7\xdd\xa4\x21\xe1\x21\xd9\x7a\x43\x69\x6b\xd0\x94\x04\x0b\x4d\xcd\x74\x0f\x50\x0d\xe0\x06\x8e\x4d\x3f\xf9\x0e\xbd\x29\xfb\x25\x54\xda\xfc\x34\xd3\xcb\xb3\x31\xf0\x4c\xef\x0c\xfc\xf0\x3b\x4d\xc7\x13\x6c\x2f\xc8\xf9\xe1\x50\x70\xfb\xa4\x26\x87\x8a\xe0\x26\x85\x08\xe1\x1c\x30\x0a\x73\x4e\xb9\xe6\x3d\x2c\xe7\x70\x49\xaa\x7b\x53\x63\x94\xb0\x66\x8e\x52\xba\xb3\x69\x9e\xe8\x9e\xba\xb5\x9b\xd4\xf6\xa5\xa5\x5d\x06\x3c\xc4\xae\x0b\x5a\x5a\xe5\x55\xc7\xba\xf6\xdc\x5c\x26\xa1\x4a\xba\xa2\x0d\x32\xc8\xa4\xf8\x57\xec\x65\xfe\x70\x14\x6e\xd0\xb7\xce\x57\x38\x3f\x43\x9b\x72\x44\xf0\x11\x4e\x4f\x1a\xa7\x46\x80\xa1\xdd\x0f\x87\xca\x94\x23\x3f\x09\xc8\x40\xe6\x66\x58\xd8\xec\xee\x40\x1a\x10\x05\x1d\x72\x44\x8b\xc2\x1c\x63\xe5\xb4\xe2\x25\xa9\x49\x28\xec\x87\x59\x10\xb4\x13\x06\xaa\xb5\xe5\x7b\x04\x45\x4b\x62\xec\xef\x64\xf0\x10\x4f\x51\x89\xa9\xad\x10\x57\x99\xcb\x2c\x85\xad\x0b\x42\x1f\xd0\xea\xf3\x34\x54\x9e\xb7\xf3\x82\xf8\x24\xeb\xbd\xf4\x7b\x8f\x97\x6d\x1a\xa4\x57\xbc\x4e\x7b\xd7\xd6\x96\xe7\x24\x92\x33\x73\x4a\x20\x18\x10\xd3\x47\x19\x01\xa3\x96\x74\x12\x27\xe2\xcb\x90\x9e\xca\x30\x82\x91\xe2\x92\x5d\x1c\x47\x4e\x18\x1a\x39\x7a\x43\x5a\xde\x7b\xce\xb9\x8a\x3a\x73\xb1\x55\xc3\xd0\xea\x51\xf9\x92\x24\x9d\x92\x91\x25\xed\x78\xf0\x6e\xb8\xda\x30\xb6\x16\xf4\x91\x68\x2a\x3c\x08\x84\x6c\xc7\xa4\x9f\xdd\xcb\xc1\x3f\x7e\x9c\x7e\x02\xf1\x52\x9b\x2f\x9e\x49\x83\x6b\x0b\xf6\xd9\x2d\x02\xff\x3c\x46\x54\xf9\xa5\x0d\xe5\xe1\x14\xa1\xd0\xa1\x8b\xb0\x92\x57\x35\x02\x0a\x86\x9d\x9d\x72\x22\xc4\x6e\x6a\x15\x45\xb4\xa2\x75\x51\x34\x6b\x09\x5d\xc0\xa5\xc2\x42\xd7\x10\x59\x6e\x2d\x82\xf4\x26\xb6\x80\x13\x71\x77\x51\x81\x96\x59\x90\x79\xe6\x1b\x87\x43\x05\x9d\x56\x6f\x23\x03\xaf\x60\x25\xfa\x8a\x4f\x4c\x74\x7c\x4c\x8b\x54\x0b\x1f\xa4\x2d\x7a\x68\x8b\x34\x91\x4c\x87\x15\x73\xd5\xf5\xae\x3e\x96\xc7\x2c\xe5\x2e\x7e\x0e\x94\x1a\xf5\xdb\x4c\x4d\x61\xdb\x21\x6c\x26\x7b\xf2\xa9\x50\x88\xce\x0c\x48\x2d\x53\x75\xfe\x9b\xa6\x26\xe8\xb8\x8b\x4a\xe3\x44\xf7\xc3\xe3\xd3\x5a\xf8\x8d\x48\xd9\x5a\x5d\xa1\x77\x03\x26\x8d\x24\x2e\x71\x65\x37\xb2\x66\xb0\x66\xf1\x38\xe2\x85\x03\x67\x93\x9e\xa0\x45\x8c\xcb\x2c\x5c\xcc\xca\xf4\x2c\x63\xd5\x6b\xbc\x83\x5d\x5e\x05\xf1\xf8\x67\x53\xd2\x0a\xe6\x6f\xb5\x1d\xe7\x99\x83\x36\x94\x25\x61\x93\x86\xd6\x09\x3a\x1c\xec\x81\x74\x5a\x57\x5c\x73\x19\xa6\xb4\x62\x6a\x7f\x1c\x24\xfa\xae\xad\xfc\x88\x1d\x15\x5f\x5c\x2c\xc0\xd8\x71\x4b\x49\x0a\xc5\xa1\xbf\xb3\xcc\xa4\xb2\x89\xbf\xd3\xe9\x7e\xd7\x89\xc0\x73\x2b\xd8\x2d\x2c\x6b\x2d\xf7\x03\x63\x34\xcb\x6c\xee\x20\x6c\x14\x04\x85\x8b\x4f\x8f\xdc\xf6\x7e\x29\x1a\xaf\xd3\x0d\x43\x03\x16\xd6\x25\x59\x16\xae\xae\x28\x2f\xa2\xd4\xbf\xb2\xa4\x41\x95\x65\xc5\x10\x4c\xf0\x88\xec\xed\x5c\x6b\xcd\xd3\x3b\xb5\xe2\x21\x54\xe4\x81\xb0\xec\xf7\xf4\x91\x08\x6f\xba\x76\xa5\xba\xb6\xb2\x8d\x52\x02\xd2\x5d\x5c\x19\xa6\xa8\x9e\x51\x3b\xf5\x59\xe2\x7a\x46\xba\x7a\xdf\xf9\xa2\x10\x2a\xe4\xe7\xbb\x3a\xd0\x14\xd1\xb2\x72\x7c\x44\x1d\xc9\xc3\xc0\x59\xc7\xb8\x1f\x4a\x85\x52\x45\xb5\xa1\x29\xe5\x9b\x8c\x51\x29\xaa\x15\x76\xf2\x97\x26\x18\x02\x22\x9c\xc5\x32\xed\x94\xd3\xb9\x0e\xd2\x32\xd4\x19\x96\x7e\xe5\xc8\xa2\x70\x1d\x3a\x11\xa5\x12\xba\x2b\xdb\xa2\xf6\xc8\x07\x48\x85\xfb\x3a\x01\x1e\xe3\x48\x26\xe9\xb4\x2e\xc8\xd5\x59\x74\x2c\x21\x8e\x52\xcb\xef\x4a\x6a\x58\xcc\xa5\x00\x3e\x6f\x17\xf6\x0f\xc5\x0f\xc8\x7c\x8c\xf5\x52\x4d\x95\x0e\xe4\xac\x5b\x90\x83\x30\x80\x3e\xa5\x04\x67\x69\x58\x5e\x44\x09\xac\x17\x28\xde\x47\x54\xaf\x49\x7f\xe4\x21\x21\xe9\x23\x08\x8b\x68\x22\xc1\x5e\x44\x8e\x47\x26\xbd\x15\x1f\x64\x25\x4c\xb7\x0e\xce\x47\xc7\xad\x10\xe0\xcd\x34\x49\xb5\x8c\xeb\x17\xff\x20\xc6\x48\x0d\x63\x15\x56\x17\xc0\xa4\xe0\x48\xb6\x7b\x7b\x0c\x58\xc4\x8d\xf9\xd8\x90\x39\x7f\x83\x7d\xe0\x49\xee\x87\xae\x76\xab\xaf\x09\x70\x8f\x16\xcc\xba\x15\x85\xf9\x50\x14\xe3\x22\xda\x5e\x89\x75\xe6\x78\xdf\x3a\xba\x54\x17\x84\xf5\x85\xed\x51\x5c\x39\xeb\x69\xa4\x47\x04\x50\xa3\xb5\xa6\x41\xf0\x24\x1e\xa9\x49\x0f\x08\xe2\xa5\x44\x33\x29\xb4\x5e\x80\xd0\x99\x23\x2f\x9e\x6c\x09\xae\x08\x00\x12\xa6\xda\x75\xa4\xbe\x23\xc3\x77\x46\x7b\x45\x94\x6e\x80\xb4\x4c\x68\x50\x3e\x89\xe3\xfd\x7f\x80\xaf\xf6\xe0\x4c\x44\x76\x22\x8a\xba\x39\x86\x07\x21\xe9\x67\xe3\xdc\x57\xe0\x09\x1b\x69\xf6\xbd\xb4\x2d\x37\x75\x0f\xe7\x4b\xb8\xa7\xed\xbf\x2e\x0f\xcc\xd2\x12\x32\x7a\x40\x72\x91\xb3\x3d\xfa\xc3\x12\x45\x07\x46\x6d\x57\x7c\xb4\x52\xcc\xd2\xf8\xc9\x00\x87\x2a\x6f\xa7\x40\x4d\x0b\xe1\x61\xe3\xd4\x63\xb0\xf7\x08\xef\x61\x98\x38\x5e\x42\x67\xd6\x0c\x4a\x56\x10\xc9\x56\xf4\x6d\xd0\x67\x6e\x40\xac\x67\xd0\x5e\x89\x41\x79\xd8\x8f\xa9\xae\xe4\x1f\x69\x13\x33\xbc\x59\xb0\xc0\x93\xf0\x4a\x66\x75\x6f\xaf\x83\xc0\xb5\xfd\xa5\x02\xda\x5a\x11\xe5\x09\x2d\xbf\xec\xb6\xf5\x76\xe6\x28\x2d\x9c\xab\xd4\xee\x9f\x01\x3c\x0c\x8c\x08\x84\x0a\x74\x67\xeb\x8c\x70\x16\xf5\x5e\x9e\xea\xa5\xf4\x7f\x8c\x58\x65\xca\x3b\x49\x28\x1a\x97\x97\x0c\xf8\x66\xd7\x77\x1b\x85\x88\xae\x36\x8b\x36\x82\xd7\x9f\x50\xe6\xbc\x6d\x28\x10\xfc\xd2\x43\xba\xa2\x75\x4c\xd8\xcd\x48\x29\x11\x74\xdc\xd2\x0d\x1d\x83\x41\x6e\x13\x31\xe8\xc1\x82\x74\x5c\x15\x6f\xe5\x25\xf4\x4f\x33\xca\xfd\x6f\xe1\x90\x5f\xb5\x0e\x36\x0d\x03\x7b\x79\xe1\x3c\xce\x8c\x38\x30\xe5\x30\x0c\x83\x1e\x11\xa2\xdf\x42\xeb\x77\xfb\x92\xdb\x23\x20\x6e\xd0\x92\x5a\x42\x4e\x38\x8e\x40\x25\x8d\x92\x15\x35\x86\x7e\x69\x34\x9f\x95\xd1\xdb\xfa\xfe\x84\xe0\xfb\x8d\xb4\x87\xf6\x6e\x66\x16\x46\x6a\x20\x8b\x2b\x75\x30\xc9\x20\x15\x81\xb4\x4f\x34\x7b\x47\xaf\xeb\x45\x67\x42\xd8\x88\x0b\xfe\x81\x49\xbf\x17\x3d\xe1\x97\x03\x5c\x6f\xc0\x0f\xea\x98\x50\x17\x3e\xa9\x7e\xc7\x20\x90\x4e\x01\x99\x34\xca\xc2\xd3\x28\x92\xbf\x09\x25\x56\xa1\xb2\xb5\xff\x51\x8b\x2b\x1e\xdd\x1c\x01\x8c\x5e\xbb\x6e\x96\xe2\x7d\xb8\xf8\xa4\x6f\x19\xa5\x72\x0c\x8f\x77\xd9\x9a\x29\x44\x2b\xdb\x12\x75\xee\xf7\xa2\x44\x98\x07\xcd\x05\x50\xce\x0c\x61\x37\x94\x90\x8b\x42\x06\x1d\x1d\xfa\x74\x67\xe6\xa6\xfe\x6c\x68\x19\xa8\xd0\xd5\xf3\x4b\x46\x59\x84\xfa\x8f\xf6\x6a\x52\x0a\x47\x49\x6b\x8c\xa4\xcd\x02\xfe\x46\x8a\xca\x43\xd8\x27\x45\xea\xe8\x74\x2c\x63\x8b\xea\x71\xd1\x89\x7b\x72\x49\x8b\x09\x93\xad\xf9\x15\x42\x34\xc5\xce\xfa\x1c\x55\x53\x8b\x5a\x97\x42\x65\x16\x0c\xf8\x33\x89\xcf\x11\xd7\x0d\x79\xfe\xaa\xb0\x93\xb4\x87\x10\x3a\xca\xf4\xe1\x41\x8d\xc6\x72\x04\x5b\xd2\xa4\xdd\xaa\x0e\x2d\x71\x25\x5e\x82\x28\xcc\xe2\x4e\x8e\xeb\xed\x05\x73\xde\xa5\x6d\x89\x2f\x2a\xbf\x7a\x2c\x71\x2a\xef\xc6\xe3\xe0\xc7\xf2\xc0\xf3\xc3\x06\x88\x3e\x34\x3b\x32\xb0\xf2\xe1\xd3\xc1\xdb\x0d\x3c\x4c\x36\xd4\xd6\xd3\x03\x3b\x92\x69\x35\x35\x84\x24\x43\xed\x11\xc7\x01\xea\xae\x24\x03\x6f\x66\x47\xff\xd2\xb4\xfe\x04\x90\x38\x69\x9b\xfa\x2a\xa3\x70\x6a\xd2\x98\x40\x3d\x3c\x4f\xd1\xd7\x37\x41\x2b\x8b\x8b\x24\x2d\x18\x7d\x92\xb8\x7f\x13\xb7\x88\xdc\x4f\xb5\xb1\xf4\xcf\xc6\x4f\xfd\x2e\x51\xa2\xbc\xe2\x92\x87\xbb\x94\x58\x05\x2b\x33\x2d\xaa\x41\x5e\xe5\xe4\xb3\x43\x6b\x12\x7d\x33\x86\x96\xc7\x62\x9b\xd8\xc2\xda\x5f\x41\x1a\x4d\x6c\xe3\x87\x27\xce\x12\x5e\xb8\x6e\xe4\x88\x21\x91\xde\x81\x53\x62\x08\x77\x66\x6d\x73\xd0\xf9\x30\x3b\x61\x15\xd5\x05\xc0\xf5\x40\x98\xc0\x53\x9a\xf2\x6c\xd0\xb4\x5f\xc8\xf7\x2a\x8b\xd9\x05\xc4\xcb\xad\x4f\x32\x96\xe3\x34\x42\xea\xe5\xf4\x9c\x24\x5f\x89\x8c\xe8\xf6\xc0\x91\x8a\x3d\xa0\x61\x51\x20\xbe\x36\x29\x47\xfa\x42\xa3\x40\xf5\x78\x0a\x6b\x5e\x03\x26\x48\x46\xfb\xcd\xd3\x9e\x49\x83\x35\x5c\x10\x1a\x9c\xe0\x68\x1e\x03\x52\x20\x3b\x58\x4d\x24\x23\x64\x58\x6f\x06\xa3\x97\xd1\x1c\x45\x97\x61\xe9\x8f\xd2\xcf\xef\x95\xa8\x99\x7d\xba\xa4\xd6\xa4\x80\xb4\x87\x9e\x6c\x36\x06\x97\x66\x5a\x7a\x4d\x92\x2c\x50\x60\xe8\xaa\x2d\x82\xc6\x2f\x48\x8b\xa6\x33\x9e\xde\x97\xb9\x62\xdb\xc8\xd1\x75\x34\xc7\xd5\xdf\xc2\xb9\x64\x63\xda\xce\x86\x82\x0d\xf6\x0f\x1c\x60\x73\xec\x58\x94\xb3\x06\x44\x8c\x27\x35\x23\x44\x0d\xf4\x0a\x03\x02\xae\x41\x28\xe6\x86\x09\xe5\x9f\xef\xc8\x71\xfa\x65\xc4\x39\x64\xe9\x82\x63\xfa\x25\xe9\xc6\xe5\x28\x6d\x62\x6c\x27\xb2\xf9\x25\x52\xb2\x3c\x02\xe1\xf3\x52\xe7\xe4\x1b\xd7\x10\x69\x40\x49\x52\xf2\x24\x38\xd2\xf2\x83\x8e\xaa\x88\x97\xfe\x09\xcd\x7a\x3c\xb6\x0b\x17\x21\x0d\x1d\x5d\xdd\x47\x00\x23\x52\x6f\xa9\x3a\x87\xde\xa5\x16\x15\xde\xee\x9f\x7e\x49\xe9\x67\xb9\x93\xab\xf9\x98\x4b\xee\x5d\x81\xc5\xf0\xdd\xc8\x4f\xf6\xc1\xbf\x15\x3d\x76\xd0\x8a\x72\x0e\x1c\x9b\xd4\x24\x78\x6a\x93\xc0\x59\x2a\x4f\xd7\xc2\x63\x2f\xa3\x42\xbf\x54\x94\x24\x9f\xe8\x95\xe7\x0e\xec\x6a\xb7\x98\x9f\xa8\xf2\x73\x7f\xcc\x28\xe0\xf4\xa2\xac\x69\x1a\x1f\xa7\x73\xf6\xa3\xf9\x90\xa3\x19\x10\xee\xe0\x71\xb6\x3c\xc3\x75\xa8\xb0\x00\x3f\xa1\x86\x56\xc3\x78\xcc\x28\x97\x7d\xfa\x6c\xf4\x41\x47\x65\x39\x52\x0a\xf6\x5e\xc7\x6c\xd4\x4c\xa9\x5e\xff\xd9\x14\x4e\x5a\x6c\x2b\x9c\xd5\x8f\xc6\x49\xd6\x86\x86\xc4\x15\x19\xba\x74\xc0\xef\x1c\x24\xc6\x7f\xf5\xa8\x28\x59\x6b\x8a\x20\x88\xad\xb8\xdc\xb0\xe9\x6f\x04\x89\xad\x75\xa7\xf8\x61\xa6\xf3\x37\x88\xb3\x5b\x71\xce\x51\x5c\xd5\xf7\xa5\x67\xd9\x52\x26\xd2\xae\xd4\xcb\xd5\x95\x09\xf4\x23\x63\x54\x6e\x37\x16\x9a\x7a\xba\xb2\xa1\xc5\x08\xaf\x67\xba\xb4\x9c\x5f\x5a\x9b\x64\x6e\xde\xf3\x42\x57\x23\xdd\xb5\x86\xa6\xe5\x99\x4e\x0c\xe9\xcc\x81\x5d\x94\xc2\x33\xa0\x62\x90\x06\x29\x7a\x3f\x8f\x39\xad\x7e\xf2\x13\x69\xb9\xac\x7b\xcd\xfd\x5b\xf5\xe8\x0d\x48\xc1\x11\x02\x2c\x5a\x15\x57\x81\x03\x7d\xe2\x70\x29\x38\x00\x5d\xdd\x12\x32\x3d\x29\x16\xf7\x7b\x8d\x93\x1e\x67\x45\x61\x82\x9e\xaf\x02\x4f\x78\xea\x39\x0d\x3f\xd9\x69\x95\xd9\x45\xfd\x45\x01\xc4\x45\x60\x7f\x4a\x0a\x56\xee\xbc\x48\x00\x4a\x7e\x89\x85\x04\x9a\x38\x05\xc6\xe1\xe3\x7e\xc2\x91\x51\x89\x8a\x2c\xf6\xbf\x4d\x98\xfb\x76\x73\xcf\xee\x77\x2a\x2c\xe2\xd7\x4e\x5a\xcb\xa2\x60\x8e\x2f\xc0\x31\x38\x4d\xc5\x11\xc0\x91\x34\xb7\x40\xf0\x87\x0a\x91\x3e\x58\xc1\xe0\xb9\x43\xc2\xd7\x38\xbc\x48\xbb\x33\xc8\xa0\x14\x72\x8d\x2d\xc5\x36\x3f\x51\x85\x9b\x40\xf0\x54\xb4\x93\x30\x6a\xcc\x32\x44\x9f\xb3\x1a\x8f\x29\x28\x50\x34\x69\x12\xad\x03\xc7\x20\x02\x2a\x46\x54\xde\xcc\x2c\xca\x24\x63\x6c\xe2\x65\x48\xbd\xef\xde\x04\x1f\x1e\xd8\xa7\x31\xc8\x0c\xca\x21\x65\x52\x78\xcd\x3a\x40\xca\x82\xe2\x59\xe7\xe9\x69\xf3\xc4\x20\xb3\xb9\xb6\x59\x27\x27\x66\x99\x89\x55\x04\xd4\xa6\x16\x4a\x52\x53\xca\x40\x97\x5a\xb9\x39\x1f\xca\x7a\xb6\x37\xa4\x92\x17\xb6\xab\x1d\x9e\xfd\x25\xfa\x0d\x54\x25\x1c\x56\xce\x44\x55\x57\x01\x50\xe7\x55\x0e\x7b\xeb\x10\x86\xf2\x52\x28\xe4\x76\x88\x29\xcc\x7b\x68\xaf\x3e\x80\x1f\xef\x9b\x30\x4a\x07\x01\xd4\x1e\xec\xc8\xd2\xab\xdd\x12\xed\x50\xdc\x51\xe4\xd6\x9b\x49\x43\x81\x78\xe9\xe4\x56\xeb\x56\x29\x37\x50\xe4\xac\x63\xe9\xe0\x06\x08\xb2\x5f\x1a\xc8\xcd\xef\xc5\x40\xac\xf1\x21\x2c\xf2\xa4\x80\x0c\x59\xa9\x26\x82\x22\x9c\x3e\xc0\xc5\xed\x1b\x2b\x48\x06\xbb\x98\x5a\xfc\xe6\x5b\x60\xac\xd2\x70\xb8\xee\x52\xb1\xc2\xc8\xb2\x75\xd2\x2a\x97\xfc\x1d\xeb\x9a\x2b\xe2\x90\x25\xe9\x7c\xce\x9a\xd9\xe4\xb1\xf5\x6c\xc3\xa2\x53\xde\x17\xa5\x09\xb1\x5c\xd7\x8e\x4e\x62\xe6\xc7\x22\x01\xa6\x45\x4d\xb8\xab\x10\x36\xd6\x21\xc6\xe9\x86\xa0\x7d\xec\xd8\x7f\x66\x54\x8d\x08\xb4\x9f\x9c\x14\x7e\x54\x2c\x60\xde\xff\xa6\xe1\x32\xca\xde\xff\x9d\x60\x49\x2d\xe1\x1f\xe4\x74\x5e\xce\x1e\x4f\x1e\xca\xfb\x39\xa0\x93\xa9\x92\xf2\xe3\x14\xae\xbd\x06\xe0\x72\x60\x67\xed\x76\x4e\xec\xe7\x04\x71\x88\x71\xd7\xdd\x2f\xe1\x38\x82\xb5\x03\x6f\x84\xab\x27\x8d\x46\xbf\xbb\xb5\x27\xb9\xe6\x1c\x3e\xa8\x57\xdc\xb3\x64\x6f\xee\x52\x8a\x89\xd7\x21\xf1\x3f\x3a\x1f\x40\x46\x62\x93\x25\xec\x81\x0f\x5a\x58\x72\x70\x87\x49\xf3\x70\x39\xb7\xa4\xcf\xca\x12\xb4\x97\xa7\x77\x71\x68\xb4\x3f\xa6\x39\xc2\x57\x89\xf7\x8d\x1e\x80\xd7\xb0\xef\xa0\x32\xbb\xf1\xa4\x2b\xad\xd6\x2c\x24\xb5\x96\x65\x01\xfd\x4b\x71\x41\xfd\xa3\x41\x1a\x81\x18\x1d\xbc\x27\xe9\x0a\x72\x80\x9f\xb9\xfd\xfe\x49\x14\x08\xdb\x0e\x81\xfa\x36\x65\x9e\x77\x2c\x37\x6c\x12\x1b\x0d\x34\x07\xc7\x5a\xa4\x8e\x9b\x5f\x13\xf1\xbf\x6b\xd4\x2b\x30\xc9\xae\x51\xb1\xa4\xe4\xb7\x5e\xc9\x6c\x5c\xeb\xc3\x88\xfb\xc3\xba\xcd\xe4\x48\xb6\xe4\x08\xd2\x04\x2f\x2f\x31\xbd\xc2\xe2\xa9\x4a\x26\x67\xc7\xed\x26\xbd\x08\xe9\x3a\x13\xff\xe2\xd7\x5b\x69\x5b\xfe\x47\x2b\xbd\x6c\xd2\x9c\xc2\x7a\xb0\xb9\x51\x3a\x2b\x71\x6c\xdd\xe7\x3b\x2c\xf3\x45\x17\x60\xd0\x62\x44\x22\xe7\xe8\x82\xf2\x2b\x49\x60\x3f\x2f\x8d\xbb\xcf\xbe\x32\x99\x81\x4e\x39\x66\xf6\x3d\x2a\x29\xd8\x28\x94\xd7\xe5\x88\x13\xdb\xa1\x23\x08\x50\xa2\x7d\xc8\x88\xc3\x1e\x88\xdf\x4a\x55\x01\x93\x5e\xcd\xf6\xdb\xd3\x6e\x68\xb4\xad\x4c\xd9\x70\xef\x87\xa0\xd9\xba\x3b\x1b\x51\x24\xde\x6b\x55\xea\xca\x7e\x34\x52\xb1\x0c\xd2\x33\x39\x23\xdc\x6b\xb6\x46\x4e\x3e\x61\x70\x49\x66\x6c\x31\x93\xad\xb3\x05\xad\xb2\xc4\x32\xfe\x92\x58\x3d\xf9\x76\xa8\x53\x11\x44\xfa\x60\x15\x45\xe7\x57\x93\x30\xf0\x9d\x28\x3a\xff\x03\xd8\xae\x5b\xf9\x29\x41\xcd\xb1\x67\xc5\xd2\xac\xae\x1f\x58\x26\xfe\x10\x7f\xf1\x61\x9a\xcb\xd0\xc0\xe8\x03\x09\xd0\x7d\x3f\xd2\xb1\x79\x90\x8f\xbb\x7e\xb0\xc9\x0a\xf4\x02\x70\xae\x67\xc9\x00\xac\x02\x08\x32\xa2\x6b\x39\xf6\x3f\x4c\xde\x6b\x52\x91\x48\x9e\x0a\x6a\x60\xc6\x9e\x44\x97\xc2\x49\x28\x45\xbe\x9a\xcb\x40\x60\xd1\xc7\xcb\x28\xf0\x78\x89\x18\x88\x21\x9d\x85\x21\xd6\xa2\x04\x2a\x1d\xc4\x05\x88\x1d\x18\x8f\xcc\x2c\x8a\x5a\x0a\x22\xef\xb8\x1c\x9b\x4b\x2d\x17\x00\x54\xc2\xc1\xb4\xfb\xe5\x79\xe1\x43\x49\xcd\xbc\x31\xd3\xdf\x44\x45\xf7\x48\x6b\x5b\xde\x6e\xd0\x53\x8a\x1c\x23\x2f\x42\x8e\xcd\xa5\x76\x06\x9f\xa4\xaa\x63\x7c\x4a\xb7\xa3\x0f\x6d\x7a\x68\xb5\x4c\x7b\xb5\x16\xd0\xba\xe7\x7c\x49\x3e\xc8\xe8\xa0\x65\x5b\x92\x5c\xaf\xe2\x99\xb0\x99\xec\x11\xef\x77\x49\x54\xec\xc4\xfa\x04\xe5\x2d\xfc\x82\x7d\xdc\x05\x49\x02\x29\x8f\x6f\xc9\x3f\x60\x4e\x09\x01\x09\xf1\x48\x99\x41\x35\x2d\xac\x64\x0b\x1d\x6f\xcb\xe6\x4f\x38\x2a\x23\xc1\xda\xc7\x50\x80\x05\x97\xc2\x44\xb9\x23\x37\x60\x9e\x0c\x3c\x10\x63\xf7\x14\x57\xe6\x9a\x7e\xc8\x52\xac\xb8\x94\xcc\x6b\x31\xd6\x53\x96\x46\x74\x2c\xad\xc8\x30\x1b\xd6\x97\x1c\xe9\x98\x20\x6b\xd9\x6d\x10\xd9\xd0\x8d\xdb\xe2\xfa\x45\xa8\x1e\x12\x0b\xcb\xef\xd4\xac\x02\x58\x5c\x77\x8d\x1f\x24\xe0\x5e\xa7\xa6\x21\x23\x18\x38\x9a\x0e\x92\x1b\xfe\x2d\xc7\x1b\x2c\x93\x14\xf7\xe2\x25\x7b\x39\xe8\x80\x4e\x0b\xe0\xe4\x12\xdc\xd2\xf1\xb5\x46\x92\x05\x2e\x52\x19\xb4\x0e\x46\xbf\x10\x7a\x16\xae\xc1\xac\xdc\xd6\xb5\x2a\xe1\xd9\x82\x95\xc2\x58\x82\x4d\x18\xcb\x63\x22\x01\x69\x18\x1a\xb2\xca\x33\x4c\x32\x2e\x3e\x5c\x68\xa6\x61\x5d\x3c\x43\x33\x70\x32\xe6\x10\x95\xc1\x6f\xee\x90\x92\xe8\x75\xbc\xa1\xd6\xc4\x8a\x32\xd1\xc9\x07\x64\xf8\x93\xa2\xc6\x34\x8a\x9e\x19\x05\x7b\x42\x2f\x36\xd7\xf9\x7c\x59\xf4\x8e\x0e\xb7\xb3\x7e\x7c\x37\x2c\xd6\x31\x4a\x48\xfd\x41\xc5\xc9\x20\x88\xed\xa4\x62\x84\x1e\x13\xe7\xae\x17\x6f\xc5\x3f\x09\x47\x5b\xe8\xef\x75\xef\x60\x96\x8e\x53\x11\xba\x3e\x2d\x9a\xd4\x3c\xf8\x51\x41\xa6\x57\x24\x65\x49\xa8\x4c\xde\x95\xd0\x4b\xa6\x39\x69\x1d\xd9\x7c\x51\x22\xae\xa1\x45\x84\x6b\xa4\x66\xa1\xa2\x5c\xd3\x78\x14\x8d\xb3\x53\xe1\x48\x42\x94\xcf\xde\xec\x25\xb6\xc4\x48\xf4\x9a\x20\x71\x8d\xb1\x5f\x00\xb5\x6c\x6a\xa6\xdf\x77\xdb\x38\xb1\x6e\xe3\x07\x24\x1c\x2b\xa7\x86\xfc\xb9\xe1\x8d\x54\x40\x90\x35\xfd\xe0\x4c\x15\x6f\xc4\x30\x87\xf8\x1f\x0a\x12\x38\x52\x91\x4f\x5e\x67\x85\x82\x17\x37\xea\x09\x84\x8e\x50\xd0\x10\x84\x1b\xa7\x7b\x62\x03\xe5\x4e\x07\x65\x1b\x39\x42\x9d\xe2\xb9\x86\x67\x60\x42\x13\x4d\xd3\x8b\x10\x5e\x2d\xe9\xaf\x62\x0d\x58\x1e\x8d\xcc\x3e\x9a\xff\xc1\x16\x96\x9b\xf0\x77\xef\x46\xee\xed\x39\x8e\x88\xd9\x19\xdc\x15\x99\x83\xa4\xbc\xb7\xc2\x2e\x50\xab\x9a\x50\x0c\x3a\xf5\x77\x53\xc0\xf0\x8c\x2a\xb7\x5d\x5d\x7c\x94\x46\x58\x00\x68\x7f\x86\xf0\xe4\xac\xb5\xe9\xad\x60\xf6\x6d\x26\x59\xcf\x3c\x0b\x0e\x48\xc0\x70\xc7\x40\xab\x20\xda\x82\x94\xfd\xd2\x5c\x9e\x75\x7a\x52\x33\x4b\x7a\xd3\x62\x29\x34\xea\x18\xf0\x94\x0c\x03\x6f\xfc\x3b\x51\xe1\xcc\x13\x6b\xa2\xf0\x47\x12\xe0\xc4\x8f\x18\xb4\x14\xf1\x08\x2c\x8c\x24\xf0\x94\xa4\x4a\xb7\x0d\x8d\xe2\x7f\x78\x0f\x36\x7f\x03\x3e\x8b\x80\x48\xa3\x82\x7a\x12\xfe\x8d\x25\x52\x48\x6c\x5b\x8c\x36\x70\x3c\x32\x67\xa8\x81\x6b\x6a\x05\x10\x71\x27\x8a\x1f\xd6\x92\xa8\x4b\x2e\x95\x7c\xa3\xb9\xea\x64\xab\x79\x44\xed\x34\xca\x54\x2d\xe2\xa8\x28\x74\x2f\x93\xe7\xff\xa6\x1d\x13\xb5\x44\x12\x47\xa4\x5e\xa8\x7c\x80\xe7\x34\x1a\x4d\xa1\x75\x63\x8f\xc9\x2c\x6e\x61\xe5\xc3\x16\xfa\x3d\x9c\xa9\xc0\xa1\xbc\xa1\x92\xd4\x96\x7f\xff\xaf\xff\xfa\x3f\xff\xf5\x7f\x03\x00\x00\xff\xff\x1c\xf7\x13\x7e\x95\xd2\x00\x00") + +func dataFemalenamesJsonBytes() ([]byte, error) { + return bindataRead( + _dataFemalenamesJson, + "data/FemaleNames.json", + ) +} + +func dataFemalenamesJson() (*asset, error) { + bytes, err := dataFemalenamesJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/FemaleNames.json", size: 53909, mode: os.FileMode(420), modTime: time.Unix(1452717629, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataKeypadJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x9c\x94\xcd\x6a\x85\x30\x14\x84\xf7\x3e\x45\xc8\x52\xeb\xff\x5f\xec\x0b\xf4\x21\x4a\x17\xdd\x75\x21\xa5\x14\xba\x2a\xbe\xfb\xcd\xd5\x8b\x99\x91\xe3\x3d\xea\x46\x26\x42\xe6\x3b\x33\x1c\xf2\x1f\x19\x63\xdf\x7e\x3f\x7f\xbe\xec\xab\xb9\x1f\xfc\xb1\xf0\xf2\x7d\x96\xc6\x7c\xff\x8d\xe3\xcb\x43\xdb\xd2\xae\xb2\x0a\xb2\x0e\x32\x5b\x25\xde\xdb\xea\x59\x7e\x2c\x7f\xbc\xa7\x0c\x23\x70\x13\x10\xad\x38\x43\xb1\x0b\x26\x58\x05\x30\x4c\x23\xfb\x77\x4a\xc6\x0d\x95\x48\x35\x92\x2a\xc5\xfe\x59\x59\x32\x97\x58\xcd\x91\x0a\xfb\x70\xdd\x69\x6d\x96\xfb\xb9\x5a\xcc\x05\xb5\xc9\xf6\x83\x56\x26\x41\x09\xd4\x21\xa8\xd5\xdc\x13\xb5\x4b\xa6\x12\xaa\x3f\xd0\x1f\x59\xe5\x5a\x97\xcd\x7e\x81\x0e\x73\xf5\xe2\xd8\xe8\x1f\x6b\x65\x12\x95\x48\x03\x92\x60\x4e\xd9\x3d\xd5\xca\x64\x28\x91\x62\x24\xe5\xe7\xd6\x5a\xe4\x62\x56\xc7\xac\x04\x59\x83\x12\xe5\xc8\x04\x9b\x8c\x04\x4b\x11\x16\x5f\xb6\x25\x04\xa5\x24\x5a\x86\xb4\x42\x79\x6b\xaf\xcc\x40\xb4\xfc\xe4\xd6\x53\x08\x79\x2d\x1d\xbe\x07\x0b\xcc\x7f\xa7\x68\x8a\x6e\x01\x00\x00\xff\xff\x2d\x9a\xa0\x40\x67\x06\x00\x00") + +func dataKeypadJsonBytes() ([]byte, error) { + return bindataRead( + _dataKeypadJson, + "data/Keypad.json", + ) +} + +func dataKeypadJson() (*asset, error) { + bytes, err := dataKeypadJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/Keypad.json", size: 1639, mode: os.FileMode(420), modTime: time.Unix(1452717629, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataL33tJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xaa\xe6\x52\x50\x50\x4a\x2f\x4a\x2c\xc8\x50\xb2\x52\x00\x71\x80\xdc\x44\x20\x33\x1a\xcc\x04\x72\x4c\x94\x74\x60\x4c\x07\x25\x30\x2b\x16\x22\xa0\x94\x84\xac\xcc\x02\x55\x2e\x19\x59\x4e\x03\x61\x44\x35\x82\x19\x8d\x60\xda\xa0\x6a\x4e\x45\xd6\x6c\x8c\x2a\x97\x8e\x2c\x67\x86\x30\xc2\x12\x55\x59\x26\xb2\x32\x43\x84\x32\x45\x04\xb3\x06\x55\x47\x0e\x0e\x1d\x35\x08\xa6\x39\xaa\x8e\x7c\x64\x1d\x06\xa8\x72\xc5\xc8\x72\x2a\x08\x23\x4c\x51\x95\x95\x20\x2b\xd3\xc6\x69\x53\x05\xb2\x32\x55\x54\xb9\x2a\x64\x39\x23\xa8\x1c\x90\xac\xe5\xaa\xe5\x02\x04\x00\x00\xff\xff\xd5\xd6\x71\x46\xdd\x01\x00\x00") + +func dataL33tJsonBytes() ([]byte, error) { + return bindataRead( + _dataL33tJson, + "data/L33t.json", + ) +} + +func dataL33tJson() (*asset, error) { + bytes, err := dataL33tJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/L33t.json", size: 477, mode: os.FileMode(420), modTime: time.Unix(1452717629, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataMackeypadJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x9c\xd4\xcb\x4a\xc6\x30\x10\x05\xe0\x7d\x9f\x22\x64\xd9\xda\xfb\x35\x42\xd7\x3e\x84\xb8\x70\xe7\xa2\x88\x08\xae\xa4\xef\x6e\x6c\xa5\x39\xa7\x4c\x99\xd4\xcd\xcf\xf4\x87\xcc\x97\x39\x24\xf9\x4e\x8c\xb1\x4f\x9f\xaf\x1f\x6f\xf6\xd1\xfc\x7e\xf8\xcf\xca\x97\xcf\x5b\x69\xcc\xfb\xd7\xb2\x3c\xfc\xd5\xb6\xb6\x47\xd9\x84\xb2\x0d\x65\x71\x94\xb8\xee\x5c\x6f\xe5\xcb\xfe\x8f\xef\x29\x63\x04\x77\x81\xe8\xc5\x3d\x54\x97\x30\x61\x0d\x60\x38\x8d\xdc\x7f\x50\x66\x3c\xa9\x24\xb5\x28\x35\x5a\xfb\x4c\xcd\x8d\x59\xa2\xba\x98\x04\xc7\xb0\x7c\xd2\xc2\xac\xaf\xc7\xea\x71\x2c\x48\x4d\x6e\xef\xb4\x2c\x09\x25\x68\x40\xa8\xd7\xba\xe7\x5a\x94\x8c\x92\x34\x46\xc4\x47\xad\x66\x2d\xca\xee\x3a\xbf\x09\xc7\x1a\xe5\xad\x42\xff\x52\xcb\x92\x54\x92\x1c\x4a\xb0\x4f\xb9\x7b\xaa\x64\xc9\x26\x41\x29\x42\x65\xd4\x03\xa0\x46\x9c\xe3\xdc\xa4\x65\xa8\x0d\xca\x61\xb8\xed\xb6\x8c\xe5\x88\x39\x25\xb8\x28\x80\x12\x25\xab\x40\xab\x52\x1e\xda\xff\x84\x4b\x5a\x89\xda\x7c\x6f\x06\xf9\xac\x38\xbc\x15\x64\xcd\x37\x2f\x18\x61\xf2\x0d\x98\xf0\xe5\xd9\x31\xff\xbb\x26\x6b\xf2\x13\x00\x00\xff\xff\xa3\x67\xe0\x02\xd0\x06\x00\x00") + +func dataMackeypadJsonBytes() ([]byte, error) { + return bindataRead( + _dataMackeypadJson, + "data/MacKeypad.json", + ) +} + +func dataMackeypadJson() (*asset, error) { + bytes, err := dataMackeypadJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/MacKeypad.json", size: 1744, mode: os.FileMode(420), modTime: time.Unix(1452717629, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataMalenamesJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x5c\x5a\x4b\xb6\xeb\x38\x6c\x9c\xf7\x2a\xfa\xbc\x71\x56\x90\x35\x64\x07\x39\x19\x50\x22\x2d\xd1\xa6\x48\x3f\x52\xb2\x5b\x37\x27\x7b\x0f\x28\xb1\x0a\x78\x3d\xd3\xf5\x95\xf8\x01\x0a\x85\x02\xc8\xff\xfd\xf5\x5f\xb1\xed\xbf\xfe\xf3\xbf\xff\xfa\xfb\xef\x5f\x4f\xb7\x85\xf6\xeb\x3f\xae\xc7\xb2\xe6\xfb\xa9\x96\x29\xd4\xfd\x7e\xde\xe2\xbc\xba\x90\xee\x3f\xbe\x31\xa5\xe8\xb6\xfb\x0f\xef\x3e\xd1\x8f\x0f\xfa\x4b\x75\xfc\xd1\x1f\x93\x0e\xda\xc2\x7b\xbd\x9f\xf7\xb5\x6c\xae\xe1\xa5\x2a\x8b\x28\xef\x35\x54\x8c\x96\x23\xa6\x79\xbb\x63\x3c\x6d\xae\xbe\xc6\xff\x4b\x76\x69\xcc\xb0\x84\x52\x97\x70\x3f\xbf\x42\xce\x61\x1f\x33\xb4\x3d\x7c\xc2\xd8\x44\xf0\x5f\x2e\x69\xaa\xd1\x71\x6f\x3a\x8e\xcb\xb2\xa4\x7c\x62\xa0\x4f\x1c\xef\x3c\x5d\x2b\x19\x0b\xd8\xf7\x35\x7c\xc7\xbc\xae\x8e\x97\xf7\xb8\x95\x7d\x3d\x75\x8f\xf7\x53\x72\x15\x6f\x3c\xc3\xe3\x51\xc3\xf8\xe3\x51\x5d\x1e\xfb\x68\x73\xd9\x87\x69\x83\x98\x8d\xeb\x16\x4b\x64\xac\xca\x57\xcc\x58\xdd\xb9\x95\x8c\x6d\xd7\xb0\x94\xaa\x93\xae\x87\xc3\x5c\x9c\xd6\x8b\x39\xe2\xb0\xf1\xd7\xa5\x1d\xe6\x7d\xbb\x5d\x66\x1b\x6b\x78\x07\xfe\x2e\xbe\x2a\x30\x87\x2f\xc7\x92\xe0\x20\x59\x0e\xc6\x9c\xc5\x9f\x63\x69\x75\x5f\x8f\xf1\x65\x3d\xd5\xa4\x0b\x86\x7b\x96\x61\x89\xe7\xe1\x68\x4c\x4c\xeb\x92\xc2\xea\x29\x6e\xd8\x57\xbe\x73\xb4\x1d\xc6\xdf\x75\x33\x32\x2c\x7d\xf5\x0a\x91\x5e\x76\xdb\xf1\x07\x22\x03\x8c\x95\x00\xb5\xe4\xbe\x35\xe4\x79\xfc\x23\x0b\x3c\x0b\x77\x56\xcb\x18\x7e\x0a\x59\xe0\x8f\x79\xa7\x7a\xe0\xfd\x49\xdc\xe5\x81\x00\xe7\x01\xf8\x55\xbd\x2b\xae\xf5\x30\xf2\x99\xf1\x99\xac\xe5\x34\x40\x1c\x4b\x29\x07\x1c\x22\x7e\x0a\xdb\x78\xc3\x89\xe1\x61\x3f\x99\x6d\xfc\x1a\x8e\x25\x60\xb8\x6e\xf6\x82\x35\x1f\xad\x85\x34\xf6\x3c\x95\x69\x1a\xef\x7f\xe2\xbc\x97\x0a\x38\xe5\xd0\x86\x79\xdf\x6b\xb7\xcb\x7b\x58\xb4\x78\x8f\x05\x34\x40\x75\xae\x2e\x2e\xf0\x0b\xfc\xd0\x56\xf7\x1d\x8f\xb3\x60\x59\x2d\xd8\x02\x5e\xe9\x23\x63\xe0\x2b\x88\x95\x3c\x10\x49\x81\x78\x79\xc6\x8d\xfb\xcd\x7b\xc9\xb1\x30\xd8\xf1\xf2\xa4\x38\xda\x19\x8b\x89\x26\xdb\xe2\x0b\x4b\xd8\x5d\x4e\x08\xa8\x14\x04\x3f\x88\x6e\x8b\x24\xef\x52\x40\xe4\x66\xa2\xa4\x16\x9f\xf1\xe9\x7c\xd4\x1d\xa3\xe7\x52\x37\xc7\x48\xaf\x24\x00\x79\x98\x43\x1e\xb6\x5c\x92\xc4\x94\x06\x75\x20\x07\x54\x21\xc0\xa6\xff\x20\xfb\x79\x02\xbf\x4c\x63\xec\x90\x38\xb6\x4b\x0a\x9e\xd7\x89\xd5\x76\x86\x98\x31\x9a\xe0\xcf\x73\xab\xe2\xb3\x83\x31\x59\x35\x82\xfa\x28\x41\x63\x5a\x98\x82\x21\x98\xc8\x80\x98\x94\x68\x0e\xde\x33\x5c\xe4\x5b\x6e\x05\x31\xd1\xb1\xe8\x08\x34\x05\xbc\xcc\xab\x06\x17\x53\xc1\x93\x29\xf0\xd3\xb1\x85\xb9\xf0\xa5\xf9\xa0\x17\xe7\x35\xb8\x31\xaa\xf0\x69\xf1\xa5\x02\x87\x29\x3e\x1e\x85\x23\xc7\x85\x4e\x2b\x4d\x22\x00\xa6\x3c\x09\x28\x60\x65\x43\x94\x58\xd3\x86\x7f\x48\x2f\xf0\xbc\xf0\xa1\x89\xcf\xb1\xea\x54\x4e\xcf\x81\x36\x03\x2a\xb0\x68\x0d\xc8\x3b\x62\x03\x46\x9e\x04\x70\xd9\xe0\x31\x1d\x43\x3e\xc4\x02\xb8\x96\x1d\x2b\xfd\x86\x46\x5f\x7a\x86\x51\xe7\x72\x2c\x95\xb9\xcc\x1b\x1a\x7c\x07\x5f\x0b\x57\xa3\x7e\xf6\x18\xe1\xc7\xf5\x44\x0b\x50\x17\xe6\x1a\x01\x89\xc1\xf4\x21\x5f\x8e\xd1\x3f\xe2\xc2\xf2\x47\x86\x2f\x70\xc1\xe9\x83\x62\x1d\xc3\x28\xb3\x08\x2d\x64\x05\x8d\xc0\xa0\x90\x88\x15\x4a\x30\x45\x63\x6e\x99\x2a\x63\xa8\xba\x0d\x53\xef\x82\xf9\xf1\xc2\x12\x4d\x46\x50\xe2\xeb\xc8\x19\x9f\x85\x25\x6a\xba\xae\xc7\x14\x48\xd6\x01\x59\xd4\xe5\x05\x88\xb9\x89\x80\x32\xa2\xba\x87\xd3\x58\x58\x00\xa6\x2d\xa6\x5d\xa9\x17\x4a\x83\x43\xcf\x61\x8e\x09\xfe\xe0\xbe\xaf\xa4\x3c\x86\x4a\x9b\xee\x10\xc1\xbe\x38\xd1\x18\x4a\x37\x34\xb3\x50\x2b\xe9\x80\x61\xef\xbc\xea\x91\x17\xc9\x72\x66\x6e\x17\xee\x3d\xe0\x11\x09\x71\x83\xc4\x13\x3b\x0d\x58\xe3\x2d\xa4\xa2\x66\xdb\x4f\xc4\xea\x1e\x3d\x66\xb3\xa7\x97\x63\xe6\xbe\xf7\xc0\x50\x56\x79\xb3\x9f\xb2\xf0\x60\x60\x3f\x5e\x4e\x26\x86\x64\x3f\xc0\xd3\x5c\x90\xb5\x9e\x47\x02\x1f\xbc\x0e\xf8\x53\x58\xc4\x69\x2e\x39\x39\xf5\x7a\x2c\x2b\xe6\x1e\xd1\xea\x4d\x12\x15\xce\x5a\x56\x0c\xd1\x91\x8c\xf5\x3f\x42\x8a\xff\x90\x05\x36\x2c\x48\xf2\xac\x62\x41\xad\x4a\xa0\x80\x82\x9f\x2e\x6e\x4c\xac\x0d\xc1\x72\x93\x31\xa3\xd7\x04\x85\xa8\xdb\xf1\x7a\xfc\xd0\xb8\x3d\xc9\x61\xe2\x16\x35\xa7\x4c\x27\x5d\xde\x6d\x81\xf7\x63\x73\x6e\x56\x9a\xa3\x09\xba\x68\x21\x99\x8a\xa5\xe1\xd5\x4f\xac\x0b\xfc\xaa\x9a\xa0\x09\xab\x38\x8f\x58\x7c\x45\x28\xe2\x16\xe4\x6d\x44\x22\x25\xf0\x8b\x31\xd7\x85\x94\x7a\xab\x32\xbc\x82\xc0\x9a\x21\xdc\x5f\xd2\x34\x2f\x9a\x2f\xfe\x3e\x98\x92\x4c\xb2\x68\xbb\x7c\x64\x32\x8f\x46\xbc\xfc\x7c\x54\x5a\x30\x3c\x7b\xa4\x14\xd8\x0b\x06\xca\x4a\x10\xc7\x4e\xcd\xff\x0d\xd9\x5b\x6e\x0d\x5b\x74\x2b\xdc\xea\x8c\x41\x91\x47\xbc\x41\x62\x61\x16\xdf\x6b\xf8\xc0\x3c\xe9\x80\x5e\x90\x62\x83\x3c\x23\x3a\xb2\x1a\x91\x4b\x4b\x41\x6f\xac\x87\xf2\xd0\xc5\x27\x85\xc4\x77\x20\x08\x4e\x15\x16\xa4\x9e\xf4\x28\xb9\x8d\x77\x4b\x4d\x0a\xd6\x8a\x44\xd4\x45\x1c\xfd\x7e\x2b\xb4\xf1\xca\xdb\x4d\x98\x26\x09\x7b\xe7\x1f\x8c\xb3\x81\xa8\x04\x26\x13\xd4\xf6\x94\x9c\x6e\xac\x3a\x7a\xb5\x58\x0d\xf0\x52\xe9\x2c\x74\xe4\x56\x4b\xcf\xf0\x44\xac\xce\x30\x1a\xaa\xb1\xd0\x98\x6a\x05\xe3\x46\x4f\x6d\x2e\xcd\x25\x61\x9c\xc3\x17\x4a\x6c\xaf\x94\xfe\x32\xf2\x46\xde\x19\x33\xbd\x6b\xdf\x2c\x53\xb3\x3e\xd6\x79\x8d\x86\xea\xc7\xbe\xbf\x63\x92\x5e\x9c\x80\x9c\xb3\x99\x6f\x71\xa6\x2e\x28\xa6\xb8\xba\x4b\x09\x8c\xde\x69\xe2\x8d\x84\x1f\x14\x2e\x37\x06\x0a\x42\x00\x60\x10\xf9\x8f\x22\xec\xf2\xc2\xf8\x3d\x98\xc4\x24\xfb\xc7\xe6\x96\x43\x22\x57\xf2\x2b\x86\x71\x46\x72\x4f\x0a\xa7\x5c\x8c\xf0\x2c\x02\x92\xb1\x59\x65\x89\xe0\xb7\x03\x89\xe1\x8a\xef\x5d\x05\xd1\x15\x5c\x58\xd2\x97\xf1\xd1\x15\xc3\x02\x58\x9a\x1c\xef\xb5\x6e\x6d\x67\xfa\x98\xfc\x5b\x8b\x14\xa6\x9f\x90\x40\x8e\xad\x32\x1d\x3e\xfb\x00\x11\x8c\x20\xfa\xab\x72\x1f\x5f\x9b\x94\x53\xd0\x04\xd6\x22\x1d\x1e\xbb\x54\x1e\x8b\x91\x25\xd2\xf5\x0f\x04\xea\xb7\x14\x61\x81\x91\x57\x5e\xff\x2a\xf5\x93\xac\xd8\xb8\xbf\xc1\xb6\xa2\xf0\x3f\x88\x88\x28\x99\x48\x7b\x00\x3b\x1c\x1f\xd3\x1f\xba\x25\x82\xeb\xc4\x89\x5a\xe0\x49\xf6\x34\x9c\x16\xdb\xc6\x6d\x17\x59\x77\x42\x1c\x6b\x95\x1d\x54\xf9\xfb\xf0\xd5\xdc\x40\x83\xa8\x62\xef\x65\x37\x38\x63\x19\xeb\x88\x4b\x76\x33\x98\x58\xf4\x68\x18\x29\x67\xd7\x76\x87\x78\x2c\xa9\x06\xd7\x88\xbd\x1a\x03\xd1\xe6\x5c\x02\x47\x58\x28\x2a\x49\xeb\x7e\x9c\xa4\x63\x70\x85\xbc\x82\x19\xd4\x67\x16\xbb\xa2\xbb\x22\xd8\x78\x2e\x99\x72\x25\x17\x10\xed\x22\xbf\x21\x60\xd7\xa8\x52\x44\xcc\x4f\xda\x4d\x8e\x9c\x74\x2b\x1a\x3a\x00\xba\xfc\x1d\xea\x4c\x8d\xfb\xcf\x6e\xf5\x91\x11\xf7\xa1\x26\xdd\x2a\x45\xfb\x06\x04\x5c\xb9\x08\x70\x97\x5f\xc1\x4e\x51\xdd\xd3\xa8\xd7\x44\x88\x6b\xcd\xd7\x57\x9b\x94\x34\x58\x69\x14\xd5\x09\xbb\x96\xa2\xdd\xce\x5a\xf5\x4b\x0e\xd2\x02\x94\x75\x35\x7e\x72\x13\x3c\x32\xb8\x62\x38\x4e\x52\x15\xdd\x23\x96\x7a\xc2\x9c\x33\x25\xf9\xf5\x3e\x20\xe2\x8e\x89\x7c\x15\xb6\x8d\x82\x65\xeb\x35\x3e\x60\x61\x2b\xd6\xa7\x69\x7d\x08\x59\xb0\x07\xd4\xe7\x25\x52\x55\x35\x95\x7d\x27\x56\x3c\xfc\x29\x33\x76\xf1\x5c\xb8\x53\xf4\x5e\x42\xd3\xcc\x89\x08\x5a\x8f\xcd\x84\x96\xac\xd1\x2c\xe6\x6a\x4f\x39\xaa\xc1\x03\x36\xfd\xc4\x5e\x25\x53\x19\xca\x2a\x77\x72\xb5\xf2\x86\x14\x7f\xcc\xea\x82\xf7\xc8\x02\xc3\x41\xb3\x08\x4b\xb2\x83\xe4\x83\x58\xa7\x12\x7a\x41\x53\x91\x89\xa0\x5a\x58\xba\xbe\x63\x47\x0d\x1d\x81\xbd\x9e\x0c\x17\x11\xbd\x24\x3a\x29\x7b\xa6\xa8\x8d\x43\xad\x74\xa5\x88\x65\xab\x43\x60\x98\xc8\x6e\x4b\x2d\x1f\x2c\x2d\x68\x0e\x9b\x53\x27\x56\x6e\xd1\x9f\xaa\x75\xd7\x43\x4b\xb5\x4d\xdb\x8e\x41\x8b\x97\xde\xc9\x00\x13\x5e\x6d\x0c\x88\x86\xab\xc9\x03\x8e\x60\xed\x2e\xdf\x45\xf8\x45\x8c\x81\x92\xba\xd0\xf2\xa2\x0d\xb8\x43\x91\x18\xd8\xe0\x4c\x29\x2e\x92\x2e\xb3\xb0\x74\x8b\xa9\x32\x9f\x12\xd9\xc4\xd3\x47\x5b\x5e\x9a\xb4\xf6\xb3\x19\xd9\x6a\xd4\xf2\x44\x3d\x28\x95\x8d\x78\x90\x81\x03\x43\xfa\xc0\x4e\x64\xb7\x44\xa0\x88\x76\xbf\x0f\x82\xe0\x02\x0a\xde\x6f\x06\xe7\x49\xc2\x6b\x67\xbc\xd2\x18\x77\x0f\x94\xd5\x86\xa4\xb1\x08\xaa\x2b\x6d\x46\xf7\xb1\x27\xc1\x89\x21\xac\x65\x4f\x2b\xa9\x30\x89\xe5\x52\x2d\x69\x92\x63\x72\x51\x67\x3a\x46\x2d\x77\xd8\x17\xaf\xc5\x0c\xac\x37\x29\xb0\x2b\x4a\x8e\x90\x7a\x1a\x54\xb4\xd9\x22\x5e\x11\xbe\x95\x48\xa6\xeb\x74\x80\x61\x1e\xd1\x07\x76\x4c\x9c\xf7\x01\x1f\x5c\x1d\x13\xbe\x3f\xe3\x25\x07\xf0\x57\xf7\xd6\x8e\xbd\x44\x82\xea\xae\x8d\x80\xed\x92\x83\x2a\xbd\x77\xaa\x49\x8d\x00\x92\x68\xf7\xa6\x8d\xfb\x43\xf5\x46\x0e\xd4\x2d\xca\xae\x02\x5d\xe4\xe8\xab\xd7\x93\x14\xa4\xb4\x1f\x7b\xa7\x6e\x32\x61\xb1\x2c\x0c\x48\x36\x13\xa5\x6e\x62\x21\xb0\x60\xdb\x52\x8f\x61\x04\xed\xbc\x48\x7d\x42\x8a\x35\x5a\x71\xb3\x05\x96\x90\x05\x34\xe6\x22\xa9\xd3\x25\x6d\xab\x68\xf7\x4c\xe4\x4a\xc4\x48\x8b\xca\xe8\xfe\xa3\xa1\xc5\xde\x60\x56\xc5\xcf\xad\xc9\x30\xd8\x44\x2e\xa6\xdb\xa2\xd0\xd2\x24\x6d\xe2\xfd\xee\xce\x00\xcb\xab\x28\x73\x9c\xac\x94\x5c\x98\x81\x26\xd7\x40\xf9\x3d\xfd\xa8\xcc\xe9\x27\x09\xe0\x61\x06\x5d\x47\x27\x32\xb1\x66\x82\x26\xd1\x60\x9a\x04\x8f\xbb\x73\x38\x26\x3e\xd2\xd9\x88\x40\x11\x69\xd1\x56\xc1\x52\x70\x31\xb5\x7a\x55\xb8\x8b\x63\xb8\x08\x5f\xd3\x5a\x52\x72\x01\x54\x4e\x79\xc3\x47\x3e\x39\x92\x96\x30\xe8\xc6\xe4\x33\xa3\x87\x58\x4c\x3d\xd8\xfb\xb1\x6c\x88\x1e\x19\x62\xab\x35\xd4\xd4\xfe\xd0\x2e\x69\x73\x59\x85\x86\x68\xc1\x49\xcf\x2e\x36\x96\x73\x57\xf5\x85\x83\x8f\x22\x0c\x03\x4b\xee\xe1\xe1\xb4\x53\x8a\x4d\x92\xe0\x24\x26\x35\x92\xa7\xe0\x8e\xc1\x87\x26\x23\x2d\x8e\x9d\xb3\xd8\x22\x2b\xe0\xbd\x1e\x46\xa8\xa7\xed\x5f\x55\x16\x79\x29\xd2\xab\x68\xeb\x8e\xb1\x94\xf9\xb6\xde\x05\x4c\xec\xfa\xf4\x33\xb6\xd3\xb2\x40\xcc\x54\xa0\x35\x71\xfa\xa0\x5c\xf5\x32\x8d\xef\xae\x90\xa9\x5f\x64\x0a\x15\xb8\x1f\xe6\x9a\xb9\x5b\x88\x6b\xcf\x1f\x95\xef\xaa\xcd\xc3\xc6\xb0\x7e\xbb\xf6\xfb\xd0\xee\x7b\x91\xea\x73\x73\xa6\x61\xec\xb2\x36\xff\x23\x68\x33\x99\x83\x16\xcf\xce\xc6\x75\xfe\x12\xf4\xfc\x05\xcc\x73\x49\x14\xb2\xa0\x8b\x50\x99\x87\xba\x7e\x31\x95\xd1\x64\x92\xa1\xe4\x2e\xb6\x03\x3a\x62\x09\xce\xde\x46\x25\x80\x9f\x6e\x96\x5c\x09\xe9\x21\x59\x0d\x58\x2c\xed\x60\xf7\xe4\xab\x11\xdc\x33\x22\xa7\xee\x7d\x0d\xed\x2b\x9e\xbd\xc0\xe3\x8e\x59\x59\xee\xbd\x16\xd2\x83\x33\x1e\xe4\x88\xa5\xc5\xde\x9a\x95\x59\xb5\x66\xf2\x74\x99\xa5\x3e\x62\x99\x51\x8c\xe6\x5d\x0f\xc8\x95\x9e\x89\x6d\xda\xd3\x9e\x70\x53\x56\xee\xe4\x3f\x86\x8f\x9d\x67\x72\x04\x86\x2b\x14\xd9\x7c\x56\xa6\x9b\xb3\x46\xed\x6b\x66\x73\xc0\xc5\x9e\xa0\xf6\x01\x64\xac\x5d\x95\xf0\x46\x59\xe2\x8e\x5b\x75\x68\x53\x73\x5e\x89\x64\xff\x25\x0f\xdf\x2d\x0f\xd6\x93\xd0\xa9\xdb\x69\x0e\x94\xb3\xe3\xa9\xc2\x6e\x52\x8d\x29\xe2\x7e\x34\x8b\x89\x25\x09\xb5\x1f\x57\xff\xe4\x71\xd3\x67\xd0\xba\xc3\x3d\x8b\x09\xd2\xc8\x81\xaa\x69\x39\x6b\xab\xd7\x4d\xfe\x60\xb7\x98\x39\x30\x1d\xb3\xe2\xbd\x85\x73\x2b\xe8\xe3\x5c\xc7\xbf\xec\x5d\xf6\xd3\x3e\x78\xf4\x8e\x18\x86\x61\xe7\x13\xd5\x6c\x35\x1f\x54\x64\x7d\xf0\x40\x26\xaf\x3e\x66\xa6\xf5\x9d\x87\x4f\xe1\x87\x7d\x9e\xa4\xfa\x72\x52\x8d\x72\x22\x04\x2f\x2b\x8e\x8f\x1c\x0b\x34\xa9\x7b\x18\xba\xbf\x79\x1a\x97\xf4\xdc\xee\x25\x98\x0e\x1b\xad\x0c\x62\x9b\x8a\xc9\xe2\xda\xcd\x93\x84\xac\xdd\x73\x38\x81\x50\x15\x46\x79\x17\x2e\x73\x74\x74\xa9\x43\x5a\x78\xe0\xd3\xc2\xda\xca\x1e\xe8\x1b\x61\x55\x43\x34\x25\x4f\xb7\x14\x45\x8e\x6a\xd4\xaf\xe9\x4b\xf5\xc8\x20\x86\x24\x25\x1d\x0a\x17\xcd\xe0\x13\x95\xf4\xf3\x20\x10\xd9\xa9\x15\x22\x6d\x3c\x00\x5a\xc9\x79\x39\xf2\x5c\x42\xca\x1e\x3d\xdc\xdf\xbf\x14\xa6\x89\x24\x27\xf6\x93\xe8\x23\xf5\xd6\xf0\x38\xd8\xdb\xf5\x64\xf6\xd2\x3e\x66\x73\x58\x48\x3f\x54\xe2\xe1\xc4\x7d\xa2\x09\xd1\x19\xd9\x16\x8e\x9f\xa2\xe7\x96\xec\xca\xde\x0a\x18\xd9\xd1\x89\x13\x78\x04\xd6\x8f\xba\x50\x80\x3e\x64\x65\x3f\xb0\x76\x0b\xe4\xb5\xcd\xd2\x9f\x64\xe0\x05\x0b\x62\x99\x34\x6e\x79\x80\x79\xaf\x86\x29\x9a\x17\xe9\xdc\xb4\xb1\xae\x58\xe9\xe4\xaf\xad\x28\x3d\xa9\xa1\x5e\xe9\x68\xd3\xa2\xe5\x1b\x11\x95\x93\x4d\x76\xa2\x7a\xd1\x9f\x52\x45\xed\x2e\x0e\xd2\xe6\xc5\xe1\x09\x8f\xc3\x33\x23\x2c\x7c\xe5\x3a\x34\xd2\xd3\x81\x30\x1b\x2e\x60\x07\x31\x2b\xb3\xe7\xe8\xe1\xfd\x1d\x0f\xef\x52\x1f\x91\x05\x4b\xf1\x0c\xa5\x90\xed\x8d\x99\x43\x14\x1c\xfc\x53\xd8\x2f\xd1\x0e\xd8\x4c\x11\xf3\xa2\x23\x6f\x52\x05\x4f\xbf\xf4\xea\xcb\x2a\x72\x82\x53\x4e\x87\xf6\x19\xc7\x22\xea\x87\xc1\xef\xd8\xbb\xef\x67\xf2\x20\xa6\xfb\x6c\x91\x2e\x29\x49\xe1\x1b\x27\x25\x8e\xc4\xe0\x52\xfe\xc9\xe3\x1b\x71\x92\x22\x95\xac\xb1\x17\x55\xd6\xda\x05\xbe\x3a\xda\x5c\xaf\x9e\x58\x5e\x94\x48\x4b\x9b\x73\x37\xaf\xdb\xdb\xa0\x06\x7b\x62\x58\x4d\xb0\xf3\xd4\xb0\x4c\x68\x71\xe8\x25\x1c\x26\xcc\x7a\xbc\xb5\x59\x56\x92\x96\xf4\x2f\x03\xa6\x5e\xbe\x2a\xa4\x23\x23\x99\xa7\x65\x5f\x0d\xcb\x71\x38\x42\x2c\x7b\x5c\x09\x79\xe1\xd6\x84\xb0\x09\xea\xba\x53\x38\x14\x4b\xee\x37\x0b\xd8\x7a\x5c\xb9\xed\xaf\xbd\x54\x62\x45\xae\x76\xab\xb7\x22\x14\x06\x9f\xbc\x48\x3a\xb9\xcc\xab\x46\x06\x65\xd6\x55\xbf\x9a\xfa\x3f\x12\xf8\x8f\xab\x03\x3e\xeb\x25\x0d\x73\xec\xbc\x97\x29\x6a\xde\x85\x93\x57\x27\x6a\x9c\x61\x73\xb4\xf0\x7e\xb3\xf8\xb7\xe7\xff\x22\xf6\x89\xd3\xbe\x69\x9e\xc3\x9f\xba\xae\x57\x90\x88\x40\xa7\xa1\x9f\x94\x54\x60\x2a\x68\x5d\xa6\x76\x16\xb5\x24\x72\x90\x13\xac\x81\x6d\xf8\xba\xb3\x90\xea\xd7\xc7\x78\x79\xe2\xe0\x09\xb9\xa1\x15\x51\x73\xe6\x8e\x04\x92\xce\xae\x9d\x7a\x3d\x45\xba\xaa\x52\x0c\x16\x85\x2b\x49\xd7\x8f\xde\x86\xd0\xfe\x0f\xc5\x98\xff\xe3\x44\xb1\x6b\x11\x84\xc1\x75\x03\x02\x08\x96\xea\x31\x1b\x31\xff\x4f\x44\x86\x6c\x85\x4d\xcd\x14\xe2\xa2\x77\x9b\xfe\xe8\x07\x4f\xdc\x6e\x75\x4d\x05\x80\x14\xe7\xbb\x36\x50\x14\xbe\x24\xc6\x7e\x8d\x8a\x94\x20\x91\x0b\xe7\xfe\x66\x0e\x0c\xe2\xd1\x29\x6a\x29\x08\x7e\x17\x9b\xf0\xd4\xc4\x1b\xc9\x79\x1f\x1d\x8c\xb5\x88\x37\xd9\xb5\x2a\x27\x43\xf0\xd4\x4a\x2a\xfc\x84\x17\x85\x94\x10\x4e\xb3\x30\x54\xa2\x0e\x89\x57\x4c\x46\xd1\x88\x9d\x57\xe6\xcf\x1c\x4c\x63\x3f\x78\xc5\xb4\x0f\xf6\x4e\x93\x53\x49\x2f\x3f\xb3\x7b\x3a\xee\x0d\x38\xaa\xfd\xea\x4d\xc5\x4a\x9a\xbb\x32\xc6\xfd\x48\x02\xed\x47\x31\xba\xa3\xfb\xba\x1e\x9f\x4d\x45\x7a\xe5\x41\x52\xa7\xdb\xa6\xca\x4b\x83\x7b\x64\x6c\xac\x9a\x12\x1f\xa6\xdb\x28\x86\x92\xa8\xd5\xd6\xa0\x6b\x1b\x4f\x0e\x49\xad\x5a\x7f\xbe\x48\x06\x6c\x64\x69\x75\x28\xc9\x0b\xab\xed\xd7\x89\xc0\x65\x22\x20\x78\x3f\x6e\xa5\xd0\x7d\xf2\x1a\xc2\xb8\x1b\x88\x70\x93\x8d\x31\x88\x4b\x33\xf4\x27\xbf\xb6\x6f\x61\x1d\x71\x9f\x68\xf3\x1c\xa3\xf0\xc4\xfc\x36\x39\xdb\x54\x6c\x75\x4d\x21\x8b\xfb\xe6\x9d\xcc\xc6\x14\x01\xf4\xc5\x56\x79\x95\x68\xb9\x54\xf9\x30\x58\xcf\xee\xc8\xd5\x0d\xe2\xb7\xd2\x2c\x3f\xbc\x87\xb8\x15\xaf\xc7\xb0\xd7\xad\xa5\x60\x6e\x2d\x65\x06\xc5\xd3\xa1\x0a\x73\x67\xd2\x0a\xd3\xa9\x7a\x7c\xc8\xde\x6c\xb7\x24\xd8\xc3\xe3\xeb\xe6\x2c\x30\xb9\xc8\x34\x06\xa2\x5f\xed\xf8\x25\xdc\xef\xe8\x0b\xd4\x0b\xa6\xbc\x3f\x99\xf5\x9a\xd6\xfc\xaf\x83\x34\x49\x41\xb1\xe8\xad\x99\x34\xe3\x52\xd4\xab\x98\xd3\xd1\xf6\x0e\x2b\x39\x53\xef\x20\xf4\x3d\x8d\x67\x36\x7e\x57\xb1\xb7\x63\x7c\xa8\xb0\xa3\x2e\xed\x07\xc0\x43\x8d\x57\x7d\x62\x8f\x34\x18\xf1\x7d\x8b\x96\xc4\x33\x24\xd1\xe2\x94\x2a\xd7\x3e\x1e\xe6\x0e\xaa\xe6\x5d\x29\x75\x8a\xa9\x58\x02\xbb\xe6\x29\x98\x9b\x05\x2f\xbd\xbd\xf8\x0c\xff\xbe\xa0\x05\x9a\x37\xb7\x98\xe7\x3f\x6f\x2c\x24\x6d\x0b\x98\x8c\x2a\x30\x4e\x7a\x01\x71\x19\x79\x5e\x84\x00\x55\xf4\x53\x64\x0a\x8d\xb1\xe8\x1a\xae\x63\x3c\x49\x66\xe0\x1f\x53\xfb\xb7\xc8\x4b\x62\xf1\xc1\x45\x3a\x2c\xf2\xbe\x51\x0d\x3f\x5c\xd7\x0e\x7f\x78\xf9\x80\x97\x90\x2f\x24\x8d\x61\x4e\xd3\x8b\xea\x77\x26\x60\xc3\x88\x00\x24\x29\xf6\xb6\x03\xaf\xf2\xfd\x79\x53\xa0\x1f\x22\xb6\xf2\xeb\xaf\xff\xf9\xbf\xbf\xfe\x3f\x00\x00\xff\xff\x45\xf8\xc0\x95\x0f\x2e\x00\x00") + +func dataMalenamesJsonBytes() ([]byte, error) { + return bindataRead( + _dataMalenamesJson, + "data/MaleNames.json", + ) +} + +func dataMalenamesJson() (*asset, error) { + bytes, err := dataMalenamesJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/MaleNames.json", size: 11791, mode: os.FileMode(420), modTime: time.Unix(1452717629, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataPasswordsJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x4c\xfd\xeb\x72\xf3\x3a\xd0\x2c\x8c\xdd\x0b\x7f\x24\x55\x7b\xbf\x3b\x65\xc9\xe7\xe4\x16\x72\x07\xf9\x91\x02\x49\x88\x84\x44\x12\x7c\x78\xd0\xe9\xbb\xf9\x6f\xa6\xbb\x21\x2f\xfb\x59\x65\x2d\xd9\x92\x48\x60\x30\xc7\x9e\x9e\xff\xab\xfa\xff\xa6\x75\xab\xfe\xdf\xff\xbf\x6a\x0e\xeb\x7a\xcb\x4b\x5b\xfd\x4f\x75\x38\xbe\x7f\x7c\x7e\xbd\x1e\x7c\xff\xe8\xa1\xfd\xf8\x77\x8b\xcb\xf6\x28\xbf\xb2\x9f\xed\x12\xba\x3c\xd9\x83\x79\x5f\x57\xff\x45\x1d\xd6\x58\x87\x61\xb0\x87\xa7\x9c\x37\x3d\x1c\xe2\x36\xc6\xe4\x7f\x37\xe6\xe9\x12\xfd\x0f\xbf\x7e\xfd\xdb\x1e\x84\xba\xb1\x77\xf3\x5f\xed\xeb\x16\xa6\xce\x1e\xad\x7d\x68\xf3\xcd\x9f\x0a\xeb\x16\x17\xff\x3c\x7c\xd9\x83\xe3\xdb\xdb\x9b\xfd\x38\xdb\xa5\x06\x7f\xbf\x75\x9f\xe3\x32\xe2\x61\x1f\x96\x21\x3e\xfe\xae\xdb\x2f\x61\x6f\x2e\x63\xf4\xdf\xed\x13\xdf\xc8\x9f\x79\xe4\xdd\x1e\x6d\x8b\x7d\xde\x94\xfd\x4d\x17\xfb\x58\xfc\xb6\xde\xf5\x79\x5b\xea\xf8\xcc\x9a\x9b\xe6\xf5\x42\xdc\xdf\xc6\x4f\xdb\xa2\x2d\xdc\xff\x60\xdd\xec\xc7\x25\x0d\x03\xfe\xac\xcf\x0d\xef\xaf\xf1\xcb\x49\xfe\xd9\x43\xbe\x46\x5c\xe9\xb4\xf6\x69\xf2\x87\xf6\x9a\x3e\x0f\x51\xab\xe0\xef\x12\xe7\x19\x2f\x0f\xf6\x69\x78\xc3\xb2\xf6\xfe\xdb\xaf\xcf\x8f\xf7\xe3\x01\xcb\xd1\x75\x78\x4b\x5b\xa8\xe5\x16\x16\xff\xc3\x35\x0d\x57\xbc\xb4\xb5\xa5\x0e\xfe\xcc\x23\xd8\x12\x47\xbd\x09\x97\xf6\x0b\x5f\x7e\x79\x71\x18\xb2\xfd\xcc\xb8\x65\xbf\x9f\xb4\x45\xac\xd0\x69\x89\xb1\xcd\xa3\x5f\x79\x1e\xe7\x9d\xcb\xb0\xc6\xbb\xdf\xcb\x66\xcb\xd7\xe2\x89\x2e\x69\xa5\xfa\x30\x8e\xfc\x93\x5d\x0f\x9a\xbc\x5c\xe3\xb6\x45\xad\x15\x6f\xc7\xd6\x13\xdb\xae\xdd\xb3\xbf\x1c\xf8\xff\x47\xff\xf6\x37\xcc\xc3\x89\x2f\xef\xed\x9a\xfd\xc5\xf3\x92\x26\x2d\x82\x3d\x37\xac\x31\xf8\xcd\xa5\x60\x92\xe3\xd2\xf9\xf0\x3b\xb8\xe1\xca\xbb\x36\x43\x5a\x62\xb3\xc4\x0d\xcb\xda\x9e\xba\xde\x9f\x99\xc3\x72\xc1\x26\xe4\x5b\x9d\xf1\x20\x8c\x61\xc9\x58\xc2\x6d\x49\x77\xbf\xc6\x30\x34\x90\xdc\xe4\xfb\x43\x91\xe8\xf6\x64\x0b\x0b\x69\x5e\x66\xec\xcf\xda\x98\x10\xe3\xfa\xe6\x3e\xc7\x09\xaf\x0c\xf8\xa2\x90\x44\xec\xc1\x9c\x97\xd5\xae\xd5\xdf\x3e\x69\xfb\xc7\x60\xbb\x92\x20\x32\xf6\x16\x17\xec\xda\x14\xd6\x86\x6f\x1f\xc3\xb4\xfb\x15\x1f\xde\xfd\x9b\xc7\x02\x2f\xeb\xf3\x32\xf9\xcf\xd5\xae\x77\xda\xfa\x00\x09\x9b\xb6\x84\xed\x34\xd9\x8c\x03\x3f\x71\x9d\x72\x9e\x71\xde\x72\xe6\xf2\xdf\xfa\xb0\x45\x4a\x42\x6a\x22\x65\x74\x1d\x33\xaf\xa6\xb3\xdf\xdd\xc2\x03\x52\x72\xc9\x5b\x78\x2d\x8d\xbf\x57\x0c\xdd\x10\xb9\xdc\x7e\xf5\x13\x96\x9b\xc2\x3e\x04\xfc\x7c\xde\x9b\x6b\xed\xcf\xdb\x56\x2d\x61\x49\x2e\xf0\x53\xea\xfa\x8d\xa7\xae\xb5\xad\x8f\x12\x9c\xf0\x0f\x0f\x4e\xa7\x18\x79\x75\xf6\x8f\x62\xd6\xf4\x38\x60\xc3\xc0\x5d\xbb\xe3\xcb\x2f\x3c\xda\x46\x40\x04\xe7\x21\x3c\x70\x03\x53\xd3\x1c\xbe\xdf\x5c\x60\x6e\xe9\x19\xa0\x94\x7c\x23\x6a\xbf\x81\xf3\x3e\xa5\x8c\xbb\xf4\x13\x3d\x61\xe3\x5d\x14\x74\xc5\x26\xd7\x2d\x44\x36\x4e\x53\x5a\x71\x0f\xf9\x76\xce\x35\x8e\xee\x64\xdf\x5c\x6c\x1d\xf3\x75\x4e\x94\xeb\x21\x5c\xb8\xb0\x4b\xa8\xed\x5a\x7d\x55\xa4\x30\x6c\x75\x9b\xd8\x62\x7d\x4e\x51\xa7\xe0\x61\xbb\xd3\x4b\x2c\x6b\x9c\xa7\x3a\xaf\x1b\xa4\x09\x32\x81\xfd\x5f\x78\xd6\x7d\x51\x4d\x47\xbe\xb4\xcc\x8a\xed\x98\xec\xb4\x9e\x70\x7f\xfa\x18\xbb\x85\x2d\x6d\xb8\xe0\xb0\x50\x16\x96\xe0\x17\x27\xf9\x9a\xf0\x9b\x96\xd2\xec\xf7\x84\x15\x33\x41\x9f\x20\xe8\xbe\x02\xa1\x6d\xa9\x78\xa2\xee\x6e\xb0\x13\x83\x8b\x1a\x53\x5b\xb6\xab\x1e\x76\x9c\xd0\xe4\xba\xc8\xf7\xe1\x0d\x5f\xd8\xc8\x69\x0a\x38\x3d\x65\x1b\x0e\x87\x97\xda\x35\x45\x60\xff\xfc\xa2\x62\xbb\xe6\x3b\x74\xc2\xfd\x70\x78\xff\xd1\xb1\xe3\x1d\x0f\x75\xc6\x21\x73\x91\xed\xf1\x16\x94\x1c\xd7\x2a\xa6\xad\xe2\x14\xdc\x18\xfc\x0b\xcf\xdb\x7a\xc7\x3e\xe8\xaf\xbe\xf9\xe5\xbb\x1b\x56\x2a\x42\x5b\x98\x89\x1a\x27\x0f\x2d\x64\xb2\xde\x37\xfb\xf3\xe0\xc2\x70\x4d\x17\x5e\x7c\xba\xd9\x27\xf9\xc9\xf5\xa5\x1d\xd6\x97\xf6\xe0\x89\x8f\x0b\xee\xbe\x4b\x0b\x7e\x35\x86\x36\xad\x78\xa6\xc7\xa1\xe6\x91\x0a\xcb\xb6\x44\x1c\xd3\x30\x6f\x81\x36\xca\x56\x12\x32\x6a\x57\x33\x72\x17\xfd\xc3\xb9\x4f\x39\x77\xe5\xba\xfc\xae\x17\xd7\xf7\x1b\xd6\x43\x87\x6e\x48\x76\x0a\x67\x48\xfd\xc9\xf6\x89\xc6\xc3\x0e\x08\xcf\xe6\xb8\x9f\x4e\xf8\x90\x6d\x5f\x36\xaa\x98\x3c\xf7\x50\x0f\xbe\xb4\x76\x5f\x7e\x59\x5b\x7e\xf0\x94\xae\xc9\x4f\x1c\xd7\x43\x12\xd6\x25\xbb\x67\xdc\x6a\x68\x24\xb4\x53\xbc\x3d\xf2\xc2\x9b\xd0\xfa\xd5\x7b\x5d\x07\xec\xe1\xf1\xf8\xfe\x5e\xd1\x0a\xe1\x8f\xc7\x6c\xb6\x90\x37\x6a\x47\x69\x8b\xbe\xa0\xed\x92\x78\x79\xa6\x70\xe7\xb1\xa8\x70\xae\xf1\x5c\xd4\xd7\xe0\xa6\x13\xcb\x73\xb7\x33\xeb\x3b\xf9\xf3\xf5\xfd\xf9\xfe\xe6\x26\xaa\x8e\x81\xc6\xc0\x5e\xb5\x51\xe1\x64\x7c\xda\xe7\xe1\xd3\x85\xeb\x88\x2f\xff\x8b\x3e\x95\x5d\xf4\x8f\x30\x45\x81\x75\xee\xf6\x40\xeb\xb9\x71\xe5\x75\x16\xba\x68\xeb\x9f\xca\xd5\xbf\xb4\x06\xf4\xc6\x4b\x66\x1a\x3b\xd5\x6d\xe0\x72\x2f\x09\x8f\x7e\xf4\x85\x0d\x32\x87\x64\x6f\x71\x92\x37\x33\x91\xa9\xf1\xfb\xcd\xcd\x06\x25\xb2\x2d\x79\xaf\xb9\x0d\x7b\x31\xbd\xeb\xb6\x9b\x5a\xc0\x2e\x8c\x4d\xf0\xbb\x31\x53\xbb\x50\xe9\x98\xe6\xf6\xf3\xe5\x12\x37\xcf\xd4\x9a\x7e\x94\xaa\xe2\x1a\xed\x09\x0b\xd4\xe1\x53\xec\x63\x71\x22\xdb\x3c\xd8\x16\x4f\x54\x2f\x69\xaa\x61\xbe\xba\x5d\x02\xfe\xfb\xf3\x6d\x06\xbe\x82\x05\xe6\xdf\x87\x21\xde\xa9\xbb\x16\xb3\x22\xd0\xcc\x19\xda\xed\x78\x80\xcd\x94\xe9\xb4\x27\x43\x93\x07\xbf\xdf\x7b\xb8\x26\x3a\x01\xfc\xa8\x8a\xfe\x09\x37\xb0\x36\x35\xf0\xf6\xf6\x8d\x15\x18\x6b\xfc\xd9\x35\xe7\x16\x12\xaa\x25\x34\x73\xc3\x93\x11\xe6\x4c\x07\xe1\x94\xa0\xd2\xb7\xa2\x4b\x6c\x83\xaf\xb8\xa6\x6b\x7e\x04\x9e\x00\xd7\x4a\xd0\x6b\xfb\xda\xeb\xca\xea\x48\xb3\x3f\xbf\xac\xe7\x32\x27\xff\x1b\x13\xec\x19\xf6\x6a\x7d\xb4\xd2\x6e\xb1\xa5\x6f\x32\xeb\x8c\xf8\x27\x50\x29\xd3\x06\x9f\x4d\xbc\xe9\x5c\x9d\x86\x07\x45\xd7\xcd\x89\xff\x3c\xbe\xfb\xb7\xab\x17\x7c\xfd\x7d\x12\x6c\x57\x96\x93\x34\xc4\x2e\xc2\x65\xc8\xfb\x0f\xae\x8e\x8e\x51\x05\xbb\xf6\xc4\x47\x2d\x65\x13\xea\xb4\xb4\x49\xfe\x90\x36\xf8\x13\x5f\x10\x83\xd9\x36\xab\xa2\xa2\x93\xb2\xeb\xfd\x62\x27\xa8\x22\x39\x6e\x53\x1a\xc3\x20\x49\xc7\xfb\x74\xd4\xd5\x1f\xf6\x55\x41\x2f\x5d\x03\x9c\xa3\x2e\xb7\x4f\xf3\x16\x03\x54\xc5\x29\xf6\x34\xb1\xb3\xa9\xdf\x0d\xee\xd7\xc2\xbd\x0e\x7b\xb7\xc3\xc3\xb4\xbd\x19\xe9\xcb\x64\x7f\x7f\x33\x92\xb6\xaf\xfb\x88\x15\xba\xe0\x92\x4d\xcd\x35\x19\x17\x75\x09\x5f\x37\x8a\x86\xfc\xc9\x66\x1f\xcd\xd5\xe4\x89\x5c\x4e\x29\x0e\x2d\x74\x89\x5c\xa8\xc6\x34\x81\x7f\xc2\x00\x67\x84\x1e\x39\xfc\x51\x3b\xc4\x5b\x7c\xb9\xd7\x55\xf1\xb7\xb1\xe6\x70\x17\x60\x13\x4c\x59\xc0\x24\xbb\xa6\x35\x23\x57\xc9\x62\x50\x94\x23\x04\xf5\x9d\x8e\xab\x2d\xe9\x15\x66\xca\xcc\xbf\x6b\x80\xd7\x55\xaf\x15\x7d\x1e\x88\xc4\xba\x2f\xf4\x05\xcd\x72\xaf\xd0\xa5\xbf\xf8\xc2\x22\x98\xbe\xc4\x1e\xa4\xa5\xf6\xbf\xf7\x0b\x19\x22\x56\x4d\x5a\xc5\x7f\xd9\x6c\x94\x01\xd3\xf4\x2d\x7c\x61\x9c\xca\x8a\x8e\x96\xbf\xe4\x3e\x9b\x8e\x90\xf4\x9a\x2a\xa0\x81\x5e\x92\xbc\xd4\x36\x36\xe5\x84\xdc\xa8\xde\xd7\x5b\x2c\xda\x5d\xfb\x92\xa7\x81\x26\xc1\x3d\x0e\x69\x32\xbb\x97\x7c\x19\x1e\xfe\x16\x8d\x3b\x7d\x78\xc9\x12\x1a\x29\xd0\x48\x4f\x44\x76\xd6\x9c\x38\xaa\x82\xd8\x9a\x52\xef\xfc\x61\xbb\x44\x3b\x80\x15\xdc\xc7\x3e\x75\x8c\x66\xa2\x6b\x69\x6a\x13\x6e\x37\xf4\x04\xd7\xb3\x2d\x51\x94\x39\xb3\xa6\xc3\xa1\x64\x52\x67\xce\xab\x4b\xc8\x3b\xbe\xfc\x5a\xcc\xb6\xd1\x26\x99\x63\x6b\x82\x5e\x02\x0b\x8b\xb8\xfc\xd6\x66\x73\xf8\xb9\x29\x27\x73\x86\xb3\xb6\xdd\x9f\x99\xd3\x38\x63\x41\xb0\xd7\xfe\xd3\xad\x0f\x03\xb8\x29\x2d\x57\xba\x51\xd7\x60\x7f\x05\x2d\x21\x8d\xec\xfe\x1b\x85\x7e\xde\xc7\xf9\x82\x3f\x37\x47\xf5\xa6\x20\xd0\x75\x09\x8f\xfc\x5a\xe2\x83\xd1\x14\x1d\x44\xd7\x0e\xff\x26\x75\x6a\x3b\x1a\x10\x4a\x36\x71\xd8\xa0\x47\x4d\x7a\x96\x85\x5e\xfc\xba\xe2\x94\xbe\xb4\xfb\x3a\xe9\xfc\xd9\x51\x8f\x6b\xa2\x38\x99\x52\x6f\xb9\xc0\x34\x26\xae\x4f\xcd\xe3\xa2\x8b\x3a\xb8\x5a\x70\x89\x2e\x9a\x0d\x91\xef\x1b\x9c\x4c\x3b\xb4\xd2\xb4\x16\x49\x5d\x63\x07\x11\x5a\x87\x34\x5f\x26\x9c\x21\xad\x6b\x6b\x17\xeb\x2a\xff\xf0\xef\x78\x7b\xc7\xf2\x34\xf6\x37\x08\x5e\xf8\xd4\x07\x54\xf1\x52\x22\x23\xf3\x37\x37\x79\x9e\xfb\x8a\x3b\x52\x00\x61\x1b\x64\x52\x38\xd1\x1d\x37\xc1\xb4\xa8\x17\x22\x64\xef\x9f\x10\x33\x22\x5c\x3b\x40\x69\x64\xca\xe5\xc9\x2d\x1c\x1d\x7f\xfb\x55\x73\xc1\x15\x36\x99\xfb\x66\x1b\xdb\xc6\x53\x45\xeb\xcf\xe5\xec\xc3\x2d\x24\x17\xa3\xd3\x60\xdb\xac\x10\x63\xc5\xaf\x2c\xaa\x1b\xb9\x78\x52\x74\x1e\xc3\x33\xd2\x68\x2d\x7e\x92\xbc\x30\xa8\x3a\xc3\xb9\xb6\x8b\xe0\xef\xdd\xb2\xc1\x01\xb6\x63\xda\x52\xc4\x26\x53\x58\xfd\xc6\x0f\x98\x60\x84\x5d\xc7\xe2\x09\xc5\x9e\xa6\x88\xf8\xff\x08\x76\x0e\xdc\x75\x44\x05\x6e\xdd\xe1\x75\x48\xbc\x57\x93\x3f\xc8\x9d\xf9\x1e\xbd\x5c\x8b\x8f\x0f\x69\x52\xf3\x82\x69\x16\x5a\x58\x38\x7f\x43\xfa\x83\xfe\xdc\x50\xe2\x05\xfb\x13\x5d\x97\x7b\xd8\x13\x16\xd6\x84\xcf\xf4\x81\x3f\xe7\x9f\x57\xff\x39\x14\x7e\xac\xcd\x21\xe0\x33\x83\xef\x2e\x8d\x42\x70\x53\x84\x94\xc8\x9b\x7f\x43\x81\x98\xb7\xc8\x60\x45\x09\x93\x83\x16\x0e\x37\x64\xf1\x92\xf9\x1b\x90\x04\x13\xc3\x31\xa4\x01\x07\xcd\xb4\x03\xf5\xa4\x59\x80\x1d\xd6\xc2\xe4\xeb\x12\x78\x33\x32\xa6\x61\x71\xf7\x14\x31\x64\x58\x1e\xe7\x30\xe9\xfe\x67\x2c\x98\xdd\xd5\x86\x1b\x6d\x63\xc6\xd1\xeb\x28\x36\x16\xc0\x6c\x32\xd5\x7e\x4a\x32\xed\x5b\x93\xf7\x8e\x8b\x76\x7c\xb3\x7f\x65\xd3\xfd\xfa\x2d\x0e\x18\x43\x55\x82\x8b\x0a\x3e\xc6\x13\x97\x59\x7b\xb8\x10\x25\x0d\xc8\xed\xec\xf8\xdf\x76\xc9\x37\x53\xc7\xb3\xf6\x69\x40\x86\xc5\x7c\xf8\x23\x9d\x78\x13\x0a\x6c\xaa\x9d\xc4\x97\x3b\x9a\x8a\x6f\x64\x36\x75\x8d\xfe\xe6\x8b\x44\xd5\xfd\xb9\xe8\xca\xec\x66\x9b\xb7\xa6\x96\x9e\xd7\x73\xbf\x24\x2d\x29\xf5\x77\x5f\x8e\xce\x10\x5a\x4a\x5c\x30\xbf\x26\x48\x75\x60\xfd\x0f\x1f\xdf\xf6\x0f\xca\x6a\xa1\x61\x5d\xa3\x8b\xf3\xc9\xdf\xe9\x6c\x1e\x1c\x4f\xb7\x29\x81\x05\xda\xd3\x4d\xd2\xbe\x52\xb7\xbb\x1f\x40\x4f\x23\x31\x7c\x6a\x77\x73\xf4\x78\x09\x0b\x3d\xaf\x50\x3f\x3c\x6e\xa8\xe0\x79\xb7\xb6\x02\x5c\xec\x25\x31\xe9\xe3\x51\x9b\x42\x49\x1e\xc7\x39\x14\xbf\xa1\x5e\xf6\x6d\xc7\x9f\x8f\xb6\x34\x2e\xde\x66\x00\x5c\x01\x73\x6b\x99\xa2\xf2\x80\x00\xd6\xdd\x0f\xbb\x5f\x1e\xf2\x21\x30\x9d\x26\x1a\x3d\xfe\xf4\x9e\xc6\xfd\x6f\x39\xb1\xfb\x5a\x9e\xd9\xf6\x79\xc4\xa3\xa6\x77\x25\x8c\x87\x2e\x06\x14\x79\xbb\xe0\xc4\xf8\x3f\x9f\x4a\x06\x6e\x36\x03\xcd\x40\xfa\x61\x7a\x8f\x69\x9e\x1a\xb7\x10\xa7\x33\x74\x36\xdc\xfb\x03\x77\x94\x0e\x14\x63\x58\xdc\x4a\x6c\xf6\xa5\xd8\x19\x1e\xe2\xd0\x8e\xb8\x25\x73\x26\x5a\x5b\x3c\x04\x24\x76\x6c\xa1\x3c\xa6\x46\x39\xa2\x85\xf1\x27\xf2\x41\xad\x14\x94\x52\x89\xd4\x55\x08\xe2\xcc\x13\xe0\x85\x9b\xd1\xf5\xac\xc9\xa4\xed\x52\x6e\xcd\xb4\xf2\x82\x57\xaf\xff\xf6\x84\x63\x70\xf6\xb4\x52\xc3\x53\x56\xd7\xb8\x42\x73\xcc\x97\xe1\x8c\xa5\xf4\x88\x7d\x5f\xe8\x78\xdb\x35\xfa\x79\x91\x17\xb1\x78\x02\xc6\x57\xc3\xdc\x05\x7e\xe2\x68\xa6\x7f\xa1\xa2\xab\xf1\x85\xb7\x32\x3f\x8a\x69\x8b\x29\xf0\xba\x4d\xdf\x0d\x0a\xa0\xcd\xeb\x68\xcc\xea\xc2\x82\x9a\xdf\x54\x82\xc8\x4e\x32\x68\xfe\x3d\xa3\x78\x17\x0c\x65\xd8\x3a\x7e\xd6\x33\x8f\x35\xb3\x77\xb7\x34\x8e\xf4\x0d\xda\x1d\x2e\x1c\x83\x87\x03\x05\x2f\x16\x9d\x3e\x04\xb8\xaf\xa6\x16\xe4\x32\xe7\x2c\x9b\x35\x96\x98\x0c\x2a\xe8\x44\xbb\x67\xce\x9c\x1c\x7e\xb3\x29\x97\xc3\x8f\x3b\x82\xc5\x4c\x9a\x7b\xb1\xf3\x77\x1e\x17\x22\xb3\xd2\x33\x69\xe3\x86\x73\xc1\xa2\xf8\x72\x9b\x4f\x0f\x47\x2a\xbc\x92\x5c\xa6\x54\x68\x85\x87\x54\x4b\x9f\x9b\x58\x3c\x33\xd4\x8f\x6f\x62\x85\xf8\x53\x2f\x84\x9f\xcf\x83\x67\xf6\x69\xe3\x01\x77\x11\xa4\x2d\xb2\x18\x00\xd6\x2c\x0c\xca\x18\xc1\xf7\x82\x3a\x08\x54\x14\xc7\x0f\xff\x86\x2c\x8c\x35\x82\xaf\xb6\xa4\x62\x9b\xc1\xf5\x03\x03\x2b\x64\x91\x98\x1a\xc9\x8c\xb5\x43\xd3\x30\x7d\x3d\xe7\xb9\x61\xbe\xc4\x5c\x46\x7a\x49\xb5\xb9\xf9\x34\x3c\x3b\xb4\xa0\x45\x43\x0c\x88\x2c\xcc\x34\xc7\x9d\x77\xc7\xa8\xdd\xaf\x01\xee\x18\x3e\xd2\x83\xf6\x4d\x07\xcd\x1e\x0f\x34\x4a\xeb\xa3\xe9\x21\x1d\x66\x09\xe8\x73\xcc\x66\xe1\x53\x80\xaf\x92\xa7\x09\x91\xa2\x59\x11\xe8\x29\xf3\x32\xa9\xb0\xcc\x15\xd7\xa6\xdb\x41\xd5\xd6\x32\xff\x34\x32\xcd\x67\x52\x95\x19\xf5\x66\x65\x69\xda\x78\x4d\x03\xd5\xa6\x3c\x2d\x58\xe3\x0a\x9e\x43\x9b\x98\x4e\x59\xfb\xd0\x75\xd0\x29\x4b\xe6\x83\x7f\x0c\xa8\x2e\x66\xfa\xd7\x00\xcd\x7a\xc9\xa6\x11\x18\x66\xcc\x90\xf1\xe2\x48\xda\x8d\x68\xbf\x7a\x13\x29\x25\x12\x1f\xf4\x76\x4c\x85\xd7\xd2\x23\x13\x23\x0d\x77\x5d\xab\x57\x7a\xce\x6f\xe0\x1f\xbe\x2a\xf8\xe5\xe6\xfe\x31\x57\x62\x9a\x82\x02\x1b\xae\xb6\xd1\x0c\x23\x3a\xe5\x07\x4c\x36\xb1\x1a\x52\x1b\x38\x94\x8c\xa4\x3c\x37\x7a\x0a\x0d\x3d\x7b\x33\xe3\x35\xbc\x68\xd3\x16\x8c\x8e\xb7\x65\xa7\x09\x71\xcf\x7c\x56\xd0\x14\x97\x2b\xfd\x7b\x53\x10\xf2\xc5\x15\x8c\x30\x5f\x66\x22\x04\x63\x63\xe1\xee\x83\x4b\x75\xa5\x26\x31\xaf\x54\x69\x7c\xcf\xd7\x71\x25\x12\xdf\x7f\x09\x33\x73\x03\x07\xc6\x42\x08\xa3\x97\xc0\xd7\x97\x6c\xf0\x69\x09\x4c\x33\xf9\xd9\x57\x72\xc9\x34\x36\xb6\xd4\x4e\x82\x3f\xf3\xfd\xf3\xcb\x32\x8a\x7b\x33\x16\x5d\xd3\x3d\x31\x25\xc2\x54\xab\x29\x3d\x08\x5e\x1f\xfa\xa0\x3c\xaf\xc5\xbb\x90\x60\x73\xfd\x5b\x26\xa9\xcd\x5e\x5d\xa9\xe8\x56\x4f\x6c\x21\xdb\xed\x1e\xff\x35\xc8\x1e\x61\x5b\x8e\x9f\xfe\x5d\x54\x0d\x34\x13\x8b\x15\xab\xbd\x44\x7e\xd4\x1c\x1f\xd8\xc1\xb0\x6d\x38\x2e\x9b\xb9\xc4\xab\x8e\x12\x23\x9c\xb0\x2f\x34\x5b\xe6\xb5\x64\x85\x40\xcc\x3e\x28\x9d\xe7\x0f\xcf\x61\xa6\x44\x98\xb0\xe2\xb3\xe1\xfa\x70\x23\x71\x98\xe1\x05\x79\xb2\x46\x99\xca\x92\xf0\x77\x85\x0d\x53\x6b\xeb\xd5\x47\xe6\xea\x16\xf7\x37\x83\xb6\xb4\x63\xe9\xc7\x83\x04\x95\x56\x46\x8f\xf9\x98\xca\x33\xdb\x8a\xa8\xfa\xbc\x73\x35\x3c\x4e\x0a\x71\xf0\x0f\x9b\x8a\x7a\x38\x7c\xfe\x7e\x7f\xba\x7b\x0e\x57\xe1\x00\x91\x31\xe5\x20\x4b\x74\xbb\x70\x01\xcc\xae\x04\x9a\xfc\xdc\x96\xec\xbd\x9d\x11\xaf\x3c\xe1\x23\x1b\x95\x68\xe6\xc7\x12\x46\x64\x81\x2c\x0e\x88\xc8\xc5\x5d\x42\xf1\xe2\x4d\x0f\x63\x31\x93\xfc\x88\xc6\xd3\x75\xfe\x4c\x9a\x4e\x25\x1e\x9b\xe3\xbc\x22\x73\xc5\xd2\x17\x75\x3b\x6e\xb1\x77\xfb\x44\xff\xc8\x0c\x56\xc4\x92\xf9\xea\x60\xa7\xf2\x6e\x6a\x1f\xe5\x0d\x73\xbb\x26\x9a\xbf\x61\xb0\xb0\x0a\x96\xa9\xfc\x15\x42\x29\xed\x33\x6f\xc1\x14\x45\x2a\x1e\xc1\xd4\xd1\x45\x6d\x42\x5c\x59\x6f\x30\x85\x23\x67\xce\x1c\x7b\xb9\x5d\x75\xbe\x0d\x14\xa9\xdb\x5e\xea\x6e\xfb\xb4\xb2\x92\x32\x04\x3b\x39\xc8\x70\x97\xd2\xd8\xd3\x4b\x54\xdc\xcb\x19\x5f\x15\x8a\x71\x70\x7f\x65\xcb\xfc\xbd\x99\xbb\xb1\xd3\x60\x7a\x30\x50\x17\x31\xe6\x33\xd9\xcc\x4c\x88\x87\x95\x46\x59\x15\x32\xd4\x85\xf0\x55\x31\xa9\xdf\x31\xe1\xe9\xe9\xf2\x41\xe9\xa4\xc0\xea\xca\xe6\xd1\x78\xd1\x70\x66\x2f\x91\x68\xc9\x75\x38\xc5\x0d\x22\x6c\x2f\x59\xe9\x4d\x31\x3d\xb4\x9a\x12\x56\x92\x15\x5f\xbe\xbe\xe6\x8c\xed\x33\x85\xba\xcd\xf8\x24\x88\xf0\x5a\x54\x0c\x2b\x28\xe6\x41\x21\x67\xbf\x9a\x2a\x80\x4f\x11\xcc\xfc\x79\xa2\xd9\xae\x80\xb1\xe5\x35\x45\x6c\x93\x39\x8a\x6d\x86\x8b\xd5\x4a\xfd\xb6\x69\xa8\xa9\x95\x66\x0f\x21\x71\xa8\xce\x52\xae\x26\xca\xca\x17\x9e\x86\x62\x29\x4f\x25\x69\xda\x46\x93\xd8\xc4\xa3\x64\xd2\x3a\x49\x9b\xee\x4c\x1d\xa2\x98\x53\xc9\x2b\x43\x8a\x2e\x6e\x7c\xab\xfb\x29\x0d\x2a\x10\x95\xf2\xdc\x9c\x9e\x4f\xc6\x17\x35\xb5\x54\x0c\xc3\x46\xb7\x72\xda\x37\xbf\x83\x6e\x37\x07\x05\x7e\xa0\xb9\x3f\xd8\x2a\x53\xd5\x5d\x4f\xcb\x69\x02\x63\x92\x83\xcc\xdd\x38\xd5\xd7\xe6\xee\x39\xb6\x5b\x92\x17\xa7\x42\xad\x6f\x9c\x69\x27\x5b\x21\xbf\x6b\xcf\x00\x2a\x0b\x18\xcc\xb9\x67\x45\xc7\x23\x29\x8a\xc1\xe1\x76\x8c\xef\x8b\x2b\xf2\xde\x42\x5e\xfa\xcc\x97\xa9\xa8\xdc\xd8\x4e\x11\x1a\xcb\x9d\x43\x39\x88\x5d\x7a\x8e\xd8\x02\x3b\x2c\x08\x3a\x58\x6b\x46\xfa\xc3\xed\xa2\x12\xc5\x4b\xb2\x28\x16\xa5\x25\xb3\x99\x9e\xc6\xab\xa0\xa7\x37\xa6\xec\xcc\x97\xbb\x21\x24\x42\x81\x80\x25\x48\x66\x81\x43\x25\xa5\x0d\x8f\x79\x9c\x03\x33\x72\xb6\xea\x38\x46\x76\xfd\x1d\x3c\x1c\x64\x85\x0f\x7e\x41\xa8\x08\xcd\xa8\x7e\x99\x60\x2d\x88\x5c\xd7\x51\x29\x13\xb3\x6b\x4f\x85\x9e\xbb\xdd\x96\x1c\x10\xf9\x8c\x6e\xb5\x75\x57\x76\xbd\xd2\x57\x90\xb6\x0a\x41\xf0\xbe\xb1\xca\x6a\x9a\x97\x7e\xc1\xab\xfa\xfb\x06\x99\x69\x3d\x5f\xd8\xf0\xec\x50\x9e\x4d\xea\xa9\x74\x91\x63\xa6\x2a\xa0\x8f\xe3\x55\x02\x46\x4c\x2b\x8d\xb7\x97\x2e\x2a\x2a\x65\x56\x39\x3d\xa9\xa0\x98\xd1\xd6\x89\x65\x1d\x86\x0c\x2a\x37\x3e\x4b\xf5\x28\x2a\x0b\x66\x97\x6a\xa1\x15\x04\xd3\xd3\x14\x2c\xab\xbd\x4a\xf3\xc7\x57\x31\xd7\xdf\xb5\xc4\x1b\x4b\x6c\x52\xa6\xec\xef\xb2\x42\xc1\x3d\x5a\xa8\x52\x2f\xea\xe0\xdd\x16\xfa\x91\x73\x1e\x14\x78\x6d\x49\x81\xb9\x49\x0e\xf5\x85\xbf\xa1\x2a\xc4\xe5\xc4\xac\x97\x47\x49\xc2\x41\x26\x56\x9e\x3e\x5a\x58\x1d\x61\x8b\xb9\xec\x4f\xe8\x08\xe5\x92\x87\x6b\x18\xe9\x5a\xf0\x1d\x37\x94\x47\x90\x7b\xc4\x3a\x86\x85\xce\xf9\x2d\x6e\x05\xcb\xc0\x45\x91\x80\x52\x50\x4e\xd9\xd4\x0d\x77\xcf\x5c\x09\x7f\xb3\x31\x2f\x73\x1f\x71\xa3\x1e\x06\xd0\x69\xf9\xf0\x6f\xad\xef\xb6\xa3\xc6\x31\xdb\x63\xa4\x3b\x3d\x14\xc6\x8e\xf9\xb9\xff\xff\xdb\xd9\xa6\x3a\x76\x25\xc2\x77\xde\xcd\x76\x42\xc8\xd7\xd4\xb1\x5a\xb5\x6d\xb8\xb4\xe3\x9b\x7f\x57\x25\x54\xf7\x37\xb3\xc3\xf2\x03\xb1\xb5\x13\x09\x49\xbb\x06\x6a\xe0\x2e\x5b\x88\x99\xe8\x5f\x4d\xac\xeb\x87\xba\xf3\x0a\x85\xef\x08\xbe\x20\x4f\x0a\xd5\x78\x3e\x29\xd0\x8c\x50\xe3\x7d\x56\x60\x74\x4d\x4b\x07\xbd\xff\x2c\xeb\x7f\x33\x0d\x74\x61\x12\xd8\xf5\x1f\x7d\x3c\xf3\x7c\xd7\xfd\x95\xf0\xba\x30\xc3\x63\x0e\x4c\x94\x41\x34\xcd\xbc\x2f\xc8\x68\xfe\xfe\xbe\xf2\xba\x48\x93\x33\xa9\x6f\x7b\x12\x37\x25\x9b\xa3\xfd\xab\x0a\xfc\xc3\xdf\xa7\x44\x23\xe6\xaf\x7a\xc9\xe3\xe5\x6e\x6c\xf0\x1f\xec\xaf\x3c\x13\x9c\xb0\x07\xd8\x6b\x3f\x40\x72\x83\x7f\x95\x37\xc5\xd5\xdc\xf7\x80\x4d\xf3\xe0\xda\x5f\xf9\xfd\x57\x7a\xac\x5d\xf9\xc3\x47\xe8\xed\xd8\x16\x07\xdd\x13\x16\x84\x6a\x98\xe3\x30\x97\xb4\x50\xc9\x2d\xed\x1d\x37\xdc\x62\xe7\xc4\xd4\xb5\xf9\xc1\xb6\xf0\x8d\xca\xfd\xb8\xf1\x9b\x99\xb9\x52\xbf\x51\xf6\xb6\xed\x5e\xd1\x0f\x51\x33\xfb\x22\xb8\xc9\x14\x58\x28\x37\x9b\xea\xaf\x90\x57\xe4\x15\xc9\x44\x1c\xcb\xaa\x24\xcb\x9c\x3a\x3a\x6e\x10\xac\xf2\xae\x4c\x25\x79\xd6\x84\xbe\x80\x45\x7b\x8f\xbf\x7b\x63\x10\x36\xf2\x75\x0a\xc0\x7b\xee\x82\x83\x75\xa8\x45\xa8\x9b\xed\x06\x59\x11\xf2\xa3\x2a\x93\x65\x3e\x3c\x4b\xb1\xf6\xba\xa2\x65\xcc\x1f\x2b\x29\x0c\x48\x65\x52\x59\x7a\xa1\x43\xe5\x36\x5e\x77\xe6\x96\x94\x05\x7c\xf3\xf7\x99\x21\xb1\x38\x30\x00\x60\x12\xbd\x7e\xe3\x9f\x8f\x7a\x71\x85\x54\xb2\xce\xa5\x69\x75\x66\x24\xaf\x09\xb9\x36\x05\x1f\x48\x01\xfd\x22\xaf\xe8\x25\x46\x95\x19\x1b\x8f\x38\x58\x09\xeb\xcc\xf9\xa7\xfc\x17\xa9\xcd\xa9\x51\x21\x68\x72\x91\x85\x8e\xb4\x3b\xba\x06\x54\x4e\x6f\x7d\x5a\x15\x2d\x2a\x6d\xef\x1a\x8e\xd9\x5a\x06\x5a\x12\x75\x77\xb4\x87\xcc\x4c\xd2\x62\xf1\x0a\x21\x1d\x76\x8e\x19\xfd\x08\x7d\x43\x53\x33\xd2\x11\xeb\x7d\x31\x07\xfa\x3c\x29\x9e\x90\xe6\x36\x95\x72\x62\x5a\x03\x2e\x69\xc5\xf8\x0d\xe7\x64\x18\xcb\x81\x72\x60\x4b\xd4\xb2\x54\x88\x5f\xc6\x52\x1e\x82\x2e\x4d\x0c\x88\x96\x56\xa9\x92\x25\x8f\x23\xb4\x92\xd2\x95\x76\x84\xdb\xc0\x68\x66\xf4\xe0\xf9\x2a\x77\x9a\x7e\xfe\x38\xe2\x8c\x7a\x0e\x44\x5b\xc4\x3a\x69\x58\xcc\x6e\xe3\x12\x16\x93\x1f\xd6\x15\xb9\x26\x66\x08\xa6\x8b\x72\xaf\xb4\x20\x9e\xdb\xb6\xeb\x1f\x2a\x66\x65\x58\x3b\x2e\xa6\x21\x2d\x14\x9c\x03\x0f\xfb\x39\x7a\xfc\x0e\xd7\xdc\x7c\xf4\x52\x45\x01\xb0\xa7\xd4\x47\xfc\xa2\xd7\x5b\xf1\x62\xec\x7e\x19\x3b\xb8\xc5\x66\xe6\x2b\x6d\x1e\xb1\x54\x2f\xbf\xfa\x88\x1b\xf8\x0f\x6a\x0c\xb1\xdc\x90\xec\xa6\xb1\x38\x84\x22\xd1\xd7\x2e\xe5\x66\xc8\xd4\xe5\x51\xc4\x9d\x81\x9c\xe9\x29\xb3\xf8\x2d\xad\xa2\x39\x1c\x37\x84\x49\x87\x4f\xff\xae\x18\xa2\x08\x54\x63\x07\x82\x22\x26\xf7\xb9\xcb\x6d\x4b\x4d\x69\x7f\xa0\xda\xe8\x5a\x3c\x55\x64\x18\x54\xd7\xf0\x3f\x3e\x15\x88\xcd\x64\xf7\x4f\x4b\x8a\x5c\x7d\x55\x92\xfd\x70\x5c\x93\x85\x0a\xf1\x59\xde\x67\x8e\x54\x72\xb9\x94\xe4\x51\xfe\xf4\x3b\xb7\xbb\x9c\x54\x34\xc8\x9e\xe6\xc6\xb9\x76\x93\x48\xf7\x63\xc9\x67\x9d\x42\x57\x95\x48\x25\x23\x11\x2b\x13\xe1\xf8\x88\x34\x12\xb0\xa1\xc4\x58\xb3\xa4\xb1\x9c\xfb\x4c\xab\x7d\x32\x8d\x14\x8a\x07\x21\x2c\x9b\x17\x0e\xe5\x70\x8e\x28\xeb\x9e\xca\x96\xdd\x4a\x70\x87\xc2\x28\x73\xe2\x2c\xae\x75\x4b\x8c\x52\x2e\xbf\xff\x29\x65\x41\x7b\xcc\x54\x6e\xc9\x74\x99\x5f\xa4\x69\xc1\x97\x6f\x6c\x42\x0e\xa1\x6a\xe9\x2d\xd9\x3d\x8d\x35\x1d\x83\xd0\xee\x03\x7d\x12\x4f\x86\x61\xfd\xf7\x81\x49\x6d\xcf\x2e\xf7\x50\x4a\xf8\xf4\x0c\xe1\x8b\xb8\x7f\xcf\x36\xb0\xe6\x66\x0e\x1b\xc3\xcc\x56\xa5\x0e\xcf\x84\xc1\x83\xbf\x28\x2d\xfc\x6e\x66\xfc\x13\x39\x80\x81\xf0\x22\xa0\x3d\x4a\xc5\x44\x69\x72\x13\xdf\x4e\x01\x21\x4a\x6a\xfe\xde\xf8\xe2\x51\xa3\x9b\x6a\x0e\x3d\x65\xa5\xcf\x88\x6d\xbb\xec\xdf\xd8\x41\xaf\xb4\x22\x01\xe0\x29\x56\x7a\x4c\x44\x9b\x9c\x06\x1e\x4e\x25\x7d\x6d\x2b\xf7\xb0\x28\x8d\x01\x95\xb5\xc5\xa6\xc7\xbd\x0d\xc5\x8c\x0f\x61\xa0\x1f\xeb\xd9\x8c\xde\xed\xa1\x1d\x03\xe2\x92\x96\x74\x4f\xcc\x29\x0d\x70\xc1\xcc\x83\x64\x50\x59\x67\x79\x5c\x17\x99\xce\xdf\x2f\xa4\x0c\xe7\x9d\x8b\x30\xa4\x7f\x3b\x02\xe1\xba\x54\x03\x2d\x40\xa3\xaf\x69\x2a\xfa\x46\xdc\xc9\x05\x5f\x78\xf5\xb7\xa2\x3d\xe6\x90\x6b\x93\x5f\x85\xac\x1e\xc4\x2d\x14\x5a\x13\x56\xe4\xec\xa6\xc0\x2a\x61\x67\x4e\xc2\x5e\x80\x0e\x54\xf7\xb8\x96\x4b\x28\xb9\x7d\xcf\x40\x31\x31\x3f\x75\x97\x7c\xa9\x5e\x48\xce\x23\xc4\xd7\xcc\x2f\x81\x1d\xe1\xb1\xd1\xa5\xb4\x50\xdc\x14\x24\xd7\xeb\xf2\xb8\x85\xe1\xa2\xfc\xac\x4e\xad\xb9\x22\xe6\x4b\xe3\xd3\xaf\xc1\x36\x60\x88\x2c\x3c\x04\xa5\x50\x4d\x5b\x24\x66\x4c\x96\xe7\x2b\xe5\xd1\x4a\x2d\xc5\x62\xd1\x3b\xba\x96\xa6\x10\xb6\x86\xe1\x99\xc9\x64\x1a\x75\xd1\x3d\xdc\x1d\x4f\xdd\x6c\xca\x17\xa0\x1c\xb9\x6c\x4c\x7a\x36\xfc\xbd\xfd\xa0\xbf\x77\x85\x77\x3a\x3f\xa4\x7c\x4d\x43\x9c\xfd\x4e\x1e\xa6\x60\x6f\x9e\x63\x43\x31\xfe\x84\x58\xe8\x85\x0d\xdd\xe8\xe4\x9f\xf9\xd6\xa6\x61\xf7\xb5\x14\xe0\xcd\xeb\x83\xb7\xb1\xbc\xd0\xa5\xe6\x3d\x21\x00\xc8\xc2\xb8\xd9\x8d\xe0\x83\x2c\x22\x1b\xf1\xc4\xf8\xd0\x01\xaf\x93\x85\x5d\x78\xea\x11\x7a\x68\x42\xbb\x99\x95\x90\xa4\x78\xff\x20\xf8\x6c\x54\x50\x3b\xc7\x52\x6d\x9c\x53\x83\x1f\x7d\x62\xc8\xb6\x71\x79\xec\xc6\xa9\x03\x94\x52\xc4\x4e\x87\x86\x4e\x80\xa7\xd2\x2e\xc8\x81\xe0\x0b\x97\x89\xd8\xcd\x64\x00\x2e\x9d\xd9\xd0\x25\x0c\x8a\xeb\x88\x7b\x63\xcd\x87\xfe\x7e\x28\xa0\x0f\xfb\x71\x4f\x58\xca\x3b\x9f\xdc\xc2\x53\x49\xbe\x52\xf0\x31\x0f\xbc\x4f\x35\x23\x10\xf3\xb3\x58\x08\xa3\xa4\xf1\x7c\x9e\x03\xe0\x24\x2e\xbe\xbb\xdf\xc0\x63\xe7\x1b\xe4\x91\xe0\x0c\xdf\x2b\x5c\x74\xb6\x63\x89\xc2\x96\x79\x00\x8b\x47\x50\xd0\x63\x2c\xcc\xc2\x7d\x47\xa9\x57\x65\x0d\x0b\x16\x97\x70\xa2\x3b\x43\x13\xb3\x16\x41\x5b\x79\xaf\xd2\x2f\xe7\xa4\xaa\x84\x97\x11\x33\x5e\xb1\x70\x09\x4f\xd2\x1f\x71\x53\x68\xfd\xcf\x3c\x59\x3a\xd6\x76\xe4\xe0\xbf\xf2\x2f\x6e\xf8\xf2\x4b\xcf\xfe\x6d\x0f\x9e\xbd\x3e\x8a\x7f\xe0\x55\x19\xfa\xfc\xe1\x79\x5f\xfd\x2f\xff\xf1\x53\x1b\x7e\x52\xd3\xf3\x9e\x5b\xfe\xf9\xa4\x03\x91\x63\xd2\x42\xc5\xf9\xa5\x63\x23\xec\x8c\x69\x0c\x56\xf5\x2d\x34\xda\x25\x56\xd8\xf6\x7e\xef\x1c\xe2\x82\xd3\xcf\xe5\x4d\xfc\x8c\x91\xfb\x34\x16\xef\x76\x62\xf2\x9c\x05\x68\x3f\x22\x87\xfa\xd8\x00\xe0\x43\x89\x7d\x96\xe5\xba\xcb\xd4\xbb\xfb\xd9\x64\x56\x57\x1f\x82\x70\xa6\xa0\xcf\xd0\xbb\xc6\xd1\xe4\x10\xa2\x16\xf5\x31\x5d\x6a\x98\x9f\xec\x92\xeb\xce\x72\xb3\x5c\x1c\xf3\x87\x74\x68\x61\x11\xb3\xb0\x82\x9b\x02\xfc\x0b\x57\xc5\x7e\x24\xde\x00\x71\x15\x42\x9e\x8f\xf2\x78\x3a\xfe\xda\x71\xb2\x70\x5f\xbb\xa8\x32\x0f\xfe\x6e\x2e\xf7\x9b\xe8\x47\xff\xe3\xd3\x66\x56\xaf\xf8\xeb\x89\xab\x33\x69\xe5\x4b\x91\xc7\xcd\x41\x56\x2e\x90\x21\xbd\xad\x02\x51\xb5\x81\x1b\xcc\xd7\x3d\xf9\xba\x27\x45\xea\xc1\xa5\x7a\xf2\x77\x77\x7e\xd4\x93\x57\xf0\xf6\xff\xb2\x6f\x3a\x5b\x7c\xde\xb6\x2c\x51\x1a\xf9\xc3\x53\x79\x7c\x50\xa4\x14\x61\x3a\x3f\x60\xe6\x55\xcc\xfc\xf4\x41\x7b\x19\xcb\xce\x8e\xfe\x16\x8b\xe4\x55\xcb\xdb\xf0\xc7\x22\xd1\xd2\x2b\xf9\x7f\xff\x52\x91\x3b\x6a\xc3\x1d\xfe\xcc\xae\x08\xab\x14\xf1\x99\x39\xe5\x0f\x6d\x85\x67\xfb\x68\xf6\xd7\x66\x60\xec\x4e\x0b\xa6\xda\x7c\x45\x57\x6f\xcc\xd8\xc8\x27\xdf\xff\xc9\x5b\xb9\x4b\x4a\x9e\x7d\x7c\xfd\xe4\x0f\xfe\x51\x28\x27\x07\x27\x69\x64\x32\xf4\x49\x85\xfd\xe4\x25\x3e\xa5\x1b\x98\xd0\xc6\x2a\x65\x8a\x3f\x7e\xbd\xe9\x26\xf9\xfe\x97\xfd\x25\x9d\xf0\x2b\xdf\x3f\xbf\x11\xff\x22\x15\xc1\x33\x98\xf1\x83\xdb\xf5\x4f\x97\xe7\xf2\x99\xb0\x00\x5a\x07\xfc\xd5\xa4\xdf\xea\x97\xc7\xe3\x0b\x5e\xf9\xd4\xda\x3c\xf5\xab\xb5\x2f\xfb\xab\xff\x77\x40\x03\x71\x65\xe7\x82\xfe\xf8\xfa\x7a\xb5\x07\x20\x25\x9b\xa1\x84\x91\x62\x85\x2e\x54\xa4\x77\x33\x8d\x6b\xbf\xf4\xcf\x3f\xbe\xbd\x01\x87\x08\x6c\x29\x40\x19\xe6\x13\x4b\xd5\x3a\xc2\x11\xfe\x9f\xbd\x9c\x39\xd4\xe3\xc7\xd7\xcf\x81\x69\x0b\x33\xaa\xc8\x0a\x98\x39\x26\x02\x0d\xe9\x06\xd5\x50\xcd\x39\x24\x74\x83\x70\x1d\xa1\x22\x16\xc4\xa5\xaa\xdc\x9b\xaa\x43\x45\x65\x5f\x4e\x0a\xd2\x6c\xbd\xfe\x83\x39\x44\x68\xb1\xb6\xf6\xcf\x1f\xec\x35\x31\x63\x8e\x6c\x36\xad\x8f\x9e\x14\x77\x3d\x92\xc2\xe4\x89\xbb\x2a\x00\x3a\xd2\x99\x6a\x60\x70\xb1\x11\x5c\x35\x8e\x01\xb2\x75\xf0\xbb\x45\xc2\xd9\x9b\x30\x70\x8b\xdd\x92\x33\x60\x6d\xdd\x90\xf9\xf6\x87\x1f\xff\xe6\x85\xfe\x56\x52\xf7\x2c\xd5\x98\x09\x52\x75\xf6\xa1\x8e\x0b\x7b\x91\xb4\x46\x1b\x6e\x34\x3e\xf6\x57\x6c\x82\xb0\xd7\xbf\xe1\xc7\xd7\x47\xc5\xa2\x68\x85\x2c\x19\x5d\xfd\x86\xfe\x42\x6c\x23\x8b\x21\x8c\x5b\xed\x35\xd8\x95\x6f\xff\xc6\x03\x64\xe7\xdd\x6f\x71\xcf\xf7\x6c\xe1\x06\x9b\x01\xdc\x11\x87\xeb\x1d\x66\x41\xc6\xe6\x02\x6f\xbe\x06\xb9\x38\xd1\xdc\x64\x58\x15\x3b\x64\x7d\xa4\xd7\xe7\xf0\x77\x24\xba\x95\xb6\xed\x53\x4b\x3d\x86\x16\x8a\x1d\x97\xf0\xf3\x06\x99\x6f\x9f\xf0\xb6\x0f\x1f\xdf\xc7\x4f\x5f\x8f\x53\x59\xc6\x7a\xef\x4a\xab\xcd\x49\x69\xfb\x31\x0f\x54\xb5\xca\x6e\xdd\x5e\xc5\x6d\xaf\x79\x6d\xbc\xf4\xc4\x92\xeb\xc1\xfe\xe4\xdd\xff\x88\x60\x7a\x56\x79\x62\x8d\xb3\x6e\xab\x31\x6f\xfd\x92\x99\x17\x35\xb9\x50\x45\x70\xf4\x70\x8e\x81\xee\xc2\x92\xfc\x98\x5b\x06\x29\x76\xbd\x1f\x92\x50\x9e\xdf\x9b\xa2\x0d\x40\x0d\x16\xdc\xad\x29\xbd\x2d\x01\x42\xe2\x18\x4f\xac\x84\x42\xea\x39\xdf\xb8\x5a\xdb\xad\x84\xd0\x5e\xfe\x8d\x0d\x74\xb9\x5d\xcc\x4e\x9c\x22\x32\x91\x15\x4a\xc2\x02\x2a\x2a\x83\xce\xe8\x3d\x2b\x81\xdb\x67\xe6\xac\x57\x7b\xa5\xc3\x93\x2a\xe2\x9f\xa0\x34\xc6\x5a\x48\x5b\x25\x75\xd7\x50\xf3\x3d\xaf\xf9\x41\x48\x91\x4a\x80\xef\x5f\xfe\x5d\x11\xff\xee\x97\xdf\xa1\x3d\x0a\xfb\x9e\x08\x96\x54\xc5\x76\xda\x5b\xba\xa1\x0a\xa7\xd6\xc1\xd6\x8f\xad\x13\x76\xf3\x88\x22\x4e\x9e\xbf\x66\xee\xa4\x76\xa8\xbd\x5f\x79\xb8\x78\xe2\x33\x41\x56\x1c\x34\x93\xe9\xe1\x0c\x6a\x68\xf9\xfd\xf6\x05\x35\xb1\x7f\xd8\x4e\xe3\x32\x26\x39\xe4\x4d\xe3\x95\x2c\x5c\x3b\x73\x43\xde\x7f\x85\x3b\x97\xba\x86\x48\xe6\x61\x1f\x6b\x24\xf5\x2e\x16\xf9\x8c\xdc\x2d\x8f\x18\x06\xa2\xe0\x07\x33\x37\x70\x6b\xed\x68\xb8\x7c\x16\xfc\xef\x25\x28\x87\x31\xee\xaa\x1d\x6e\x7d\x44\x32\xe0\xea\xf9\xe3\x02\xa0\x45\x49\xa3\x0d\x45\x06\xb3\xd0\xbf\x4c\x29\x78\x32\xf8\x05\xfc\x67\xa5\x41\xd0\x1d\xe6\x80\xcd\x25\xe6\x41\x48\x26\xba\xfe\x8e\x3f\xe7\x8f\x47\x7c\xdf\x9f\xb8\x4f\xf3\x74\x18\x76\x03\x93\xe3\xaf\x28\x90\x70\xc4\xb4\x5e\x16\x56\x52\xd1\x6e\x72\x43\xf7\xdc\x1f\x98\xfd\xee\x18\x76\x74\xa5\x05\xd3\x45\xc2\x06\x32\xe5\xde\xe1\x0b\xeb\x76\x62\x4e\xc1\x13\x7c\xc8\x59\xf8\xe9\xc5\x5a\x99\xdf\xbd\xed\x4a\x0b\x3a\x48\x12\xf6\xc7\xbd\x02\xa4\xa5\x1d\x17\xb5\xaa\x0e\xc1\xfc\x82\xd9\xb1\x85\x30\xa4\xfb\x46\x05\x6b\xa2\x4d\xd4\x64\x9f\x37\x96\x6e\xdf\x3f\xfc\xbb\x7a\xb5\x81\x54\x7f\x58\x40\xc8\x95\x62\x5e\xaf\xf5\x0e\x44\x5e\xac\xdb\xab\x91\xee\xe3\xd3\xbf\x2b\xba\x53\xaa\x00\xe7\xac\x8c\xc7\x9a\x9b\xe2\xbe\x17\x00\x04\x6b\x35\x5e\x9b\xc2\xb1\x7c\x7f\x35\x46\xb9\x21\xe0\x3e\x9e\xd2\xb0\xf5\x7e\x61\xb9\x7f\x44\x34\xaa\x84\x93\x60\xc7\xc9\x73\x50\xc4\xec\xb3\x44\xca\xfa\xc7\x2a\xf8\xf8\xa2\x32\xcc\x9c\xb7\x80\xb8\xb3\xc7\x17\x64\x6c\xea\x98\x1c\xbf\xc5\xc8\xb2\x07\xf0\x27\x08\x2a\xbd\x80\x0b\x05\x53\x74\xca\x50\x20\x12\xf3\x4e\x30\xbb\x39\x66\xac\x02\x79\x96\x06\x9e\x08\x52\x48\x28\xb4\x79\xd2\x90\xb5\x95\xe9\x31\xf2\xb7\xed\x2b\x2f\xe0\x69\x6f\x81\x62\x72\xb8\x48\x2c\x32\xbe\x28\x37\xb4\x60\x66\xfd\x59\x34\x6f\x8a\x1d\x54\x3e\x95\xd8\xba\xb5\x51\x57\x44\x5b\x34\x66\xe3\x1d\x3f\x8b\x44\xb9\x94\x85\x16\xc2\x0a\xd7\xd2\x51\x73\xce\xe7\xac\x6b\x55\xda\x70\xf4\x90\x14\xb1\x96\xfd\xbf\x1a\x1d\x4f\xf8\xa2\xae\xb2\x05\xc8\x04\x9c\x9b\xeb\x8a\x15\xff\xcb\xdf\xbc\xba\x30\xf0\x55\xa9\x6e\x87\x9c\x94\x89\x83\xda\x2c\xc6\x87\x3e\xc9\x65\x52\xe5\x18\x53\x15\xc0\x2c\x98\xa9\x55\x23\x80\xa7\x3a\x14\x38\xd8\x19\x40\x1c\xeb\xf0\x3d\x22\xb0\x54\xb3\xf9\xd3\x20\xf4\x58\x5a\x47\x94\xc0\x27\x76\x3c\xec\x5d\x7b\xfa\xf7\x11\xe8\x19\x95\x73\x74\xfc\xf6\xef\x0a\x21\x2c\xa0\x8b\xc4\x7b\x75\x9e\x09\x1a\x1e\x52\x44\xc4\x1c\xc5\x93\x94\xa5\x57\xc9\x99\x1d\xf9\xc3\x37\x8e\x79\x4a\xbc\x2f\xd3\xfb\x14\x9f\x75\x08\x14\x09\xbb\x74\x82\x48\xb7\x95\x4d\x5f\x8e\x2a\x45\xad\x1e\x05\x7d\xec\xd2\x1b\xf2\x5f\x15\x01\xd3\x32\x38\x40\x86\x4c\xb8\x13\x93\xd1\x11\xc9\x8c\x2e\x5f\x5c\x25\x0c\xf8\x82\x5c\x84\xf5\x89\x58\x33\x5b\xec\x4d\x94\xd9\x0a\x5f\x80\xdd\x78\x78\xc6\x44\x67\xd3\x05\x09\xf9\xff\xd7\xd9\x6b\xde\x0f\x4b\x12\xad\x29\xd9\xb2\xf5\x5e\x76\x2a\x49\xab\x0a\x30\x0a\xf3\x4d\x2b\x04\x3e\x43\xa4\x85\x08\x25\x2e\xf3\xa6\x27\x15\xfb\x81\x05\xae\x98\x15\x4c\x84\x70\x2d\xa5\x5e\x88\xdc\x1f\xed\xeb\x17\xf7\x38\xd4\x03\xd1\x9b\x0b\x50\xf5\xaf\xe6\x22\x77\x47\x59\xf7\xeb\x4d\x1c\xb9\x12\xb1\xa3\x51\x31\xbb\x4e\x2b\xbd\x86\x25\xf4\xbe\x84\xa6\x40\x94\x9f\x18\xf2\x3e\xb0\x8b\x97\xc5\x0f\xb8\x96\x4c\x53\xbe\x09\xda\x65\x8a\xb4\x66\x9b\x12\x7b\x78\xfd\x0d\xce\xb1\x45\xf4\x66\x47\x34\x13\x2b\x96\x99\x7a\xf4\x22\x14\xf6\x48\xcb\x07\x24\xbc\x2a\xac\x91\xb9\x8c\x39\x5d\x82\x1d\xc8\xea\xd5\x0e\x4b\xdd\xe0\x0d\x78\xb8\x12\xc2\xd5\xa9\x70\x72\xe9\x1e\x46\xd7\x22\x0e\x83\x59\xec\x9a\xb0\x71\x57\xff\x8b\xa0\x52\xbb\xdc\x66\x34\xbb\xf8\xb5\x96\x86\x16\x4f\x08\x94\xf4\xf2\xdc\x0b\xe2\x6e\xc1\x09\x72\x9a\xee\x91\xab\x51\x75\x11\x48\x74\x2e\x50\x17\xc7\xe2\x09\x37\xb9\x04\xe6\x28\x7a\x22\x9a\xfd\x7a\x59\x3f\xaa\x00\x63\x1c\x99\x2f\xb6\xf0\x68\x64\x8f\x66\x8c\xa7\x86\x40\xaf\xe7\xf3\xaf\xb3\xa6\x57\x00\xec\xe9\x1e\xc8\xa7\x6a\x87\x0e\x0a\xca\x54\x32\xa6\xb7\x6b\xc1\x66\x9b\x0b\xd0\x15\x5d\x5e\xd4\xe3\xe2\xea\x44\xb0\x2e\x73\xf9\x4b\x0a\xcd\x93\x8b\xaa\x64\xd8\x5a\x9c\x94\xbc\xf3\xd4\x24\xde\x6f\x0c\x4f\x02\x47\xed\xa0\x13\xeb\x8d\xe2\x16\x32\xba\x2d\x74\x36\xa3\xa4\x7f\x2f\xfc\x19\x77\x8e\x67\x82\x28\x3a\xc0\xd0\xa1\x02\xdd\xc6\x60\x07\xa5\xbe\xdd\x20\xfa\x7d\xe4\x7e\xf2\xff\x2a\xf8\xa9\x3f\x9f\xc7\xea\xd5\x99\xcd\x2a\x5d\xf7\x82\x18\x48\x0b\x9b\xc7\x52\xfc\xe3\x87\xe9\xb6\x5d\x49\x5f\x13\x01\x75\x63\xc6\x50\xfa\x1a\xec\x46\xe9\xa9\xd9\xbd\xb4\x2c\x0f\xb3\x3f\x1d\xa6\x0c\xbd\xc7\x10\x7b\x40\x14\x51\xbb\xba\xc4\xc9\x7d\x41\x3c\x7a\xd4\x99\x87\x12\x9e\x35\xcd\x88\x29\x33\x3a\xa9\xe1\x89\x75\xb0\x07\x03\x74\x0f\x44\x05\x6e\xb2\xbb\x89\x75\x45\xec\x25\xd3\xe7\xa5\x5b\xdd\x71\xfa\x29\x76\x4c\xba\x74\x93\x72\xad\x16\xb5\x11\x16\xd4\x94\xfe\xba\xd1\x4e\x35\x11\x63\xee\x87\x69\xb5\xee\xd4\xc5\x1b\xd1\x91\x5b\x41\x0c\x12\x1e\xcc\xae\x59\x3f\xaa\x15\xbd\x20\x87\x49\xf0\x38\xd0\x54\xe4\xfb\x89\xda\x27\xaf\x89\x35\x99\xbc\x74\x61\x1d\x25\x05\x44\x14\xb1\x81\x1a\x09\xcd\x3d\xae\xea\x70\x58\xdc\x26\x63\x47\x4b\xc1\xd5\x53\xd4\x61\xc9\xcc\x79\x9b\xc9\x0a\x34\xf4\xe6\xc3\x40\x4f\xd8\x06\xb1\xc7\x41\x9e\x17\x65\x9f\x70\xbc\x73\x69\x58\x3f\x87\x6b\x69\x8c\x84\xa0\xe6\x56\x2a\x58\xdd\xe6\x4a\x9f\x16\x37\xe2\x34\xa8\xa7\xc5\x34\x5e\x8b\xa5\x99\xa2\x9f\x2c\xc2\xf7\x4d\xd6\xa0\x3f\x2e\x05\x76\x70\x8b\xf5\x8b\xb4\xe0\xba\x0f\x04\x22\x1e\x7e\xfd\xbb\xa2\xb1\x53\x1d\xd6\xac\x27\x0d\xe8\x29\x2c\x82\x00\x2c\x7b\x57\x0b\x62\x1d\x65\x38\x01\x45\x20\x45\x80\xb7\xc9\xe1\x08\xdb\x92\xf7\x6c\x16\x37\xcf\x29\xca\x20\x65\x0b\x0e\x59\xbc\x5c\xa4\x52\x3c\x25\xbb\xd2\x76\xba\x72\x3c\x13\x18\x98\x77\x57\x05\x74\xb7\x1c\xce\xc2\xa0\xbd\xb7\xcd\x2e\xd5\x32\xd3\x66\xb0\xbc\xf8\xaa\x4a\x2e\xae\xfd\x80\xc4\xb0\xe1\x2c\xf7\xdc\xda\x38\x5c\x89\xf8\xc3\x27\x40\x49\x78\xb0\x34\x31\x1b\xe2\x29\x7b\xbc\x93\x83\xf5\xd8\x28\xe1\x65\x79\x61\x86\xbd\xe3\x53\x47\xa4\xf1\x9e\x6d\xe1\x2b\x52\xe9\x5a\x74\x38\x28\x0b\xd5\xad\x63\x7d\x2a\x04\xec\x0c\x02\x47\x17\x66\x66\xc0\x00\x5a\x87\xfb\x91\x85\x72\x18\x77\xfb\x2b\x4a\x95\x90\x73\x2c\xf4\xf9\xee\x33\xf7\x20\x67\xf5\x92\xf4\x41\x4c\xce\x9b\xc3\x2f\xfb\xcd\x4c\x95\x93\x57\x28\xb3\x00\x47\x92\xa8\x74\xf9\x44\x27\x47\xb8\x52\x31\xed\xee\xf3\x32\x63\xb8\x32\x98\x76\x0c\xe1\x40\xe1\xf1\x18\x46\xc9\x13\x74\x90\x42\x53\x0f\x8f\xd1\x77\x81\x47\x49\x1d\x4a\x2e\x31\x40\x7c\xf6\x81\xfd\x97\x08\xdf\x36\xa0\x20\xa0\x41\xe9\x94\x45\x7c\xf1\xa2\x7b\x79\xdd\xb6\x9f\xb2\x14\x5e\xda\x52\x60\x63\xdb\x61\xf2\xca\x9c\x7f\xbc\x2e\x44\x30\x6c\x05\xb4\x6a\xfe\x83\xad\x02\x2e\xa1\x77\x48\x34\xd5\xe5\xc8\xb3\xec\x62\x21\xc8\x86\xbf\x2f\x1b\xc1\x1c\xd0\x69\x07\x8e\x10\xbd\x66\xaf\x77\x59\x77\x6e\x3e\x99\x19\x9e\x61\xa6\x55\x35\x93\x31\x64\xf2\x30\x30\x28\x19\x93\x5a\x83\xa0\x1e\xa0\x7b\x5c\x19\xf2\x54\x09\x1d\x0f\xc0\x15\x42\xae\xa5\xf4\xc4\x83\xed\x82\xd5\xe6\x50\xe0\xbd\xe8\xcf\x64\x5a\xc3\xac\x20\xdb\x96\x4c\xe0\x3c\xfb\x53\xd6\x02\xf8\x3c\xb4\x65\xaf\x65\x21\x99\xb2\x5f\xc6\x1d\x26\x29\x9b\xee\x1e\x0a\x78\x55\x96\x61\xb9\x34\xac\x3d\x02\x16\x51\x01\x6d\x3e\x35\x40\x42\x3c\x04\x03\x48\xd3\x29\xb2\xe3\x15\xe2\x2e\xf2\x09\x27\x5d\x60\x45\xfb\x84\xba\x82\x43\xa5\x10\x5a\x0d\x81\x87\x73\x96\x4e\x9a\x97\x0c\xac\x83\xc5\xa3\xbe\xc9\xfe\xc7\xe6\x9b\xb3\x74\x6e\x47\x92\xde\x94\x6d\x20\x53\x14\x0f\x7c\xd9\x03\x74\x1e\x11\x65\x69\xb7\x0c\x25\x6b\xb6\x1b\x7e\x2b\xc3\x42\x0f\x2c\xf7\x8e\x92\x57\x68\x59\xcc\x09\xe0\xfb\x09\x56\x79\x60\xae\x1f\x19\xc2\xbc\xa0\x47\xff\xb6\xa4\x52\xf5\x7f\x21\x24\x9c\xba\xa5\x40\x6a\xe6\x85\x0b\xee\xbd\x04\x0c\x6d\x28\xdd\x8e\xf2\xb3\x1f\x9f\x5f\xfe\x0d\xe9\x9c\x4e\xfb\x5f\xb5\x19\x4b\xfe\xa0\xe5\xe8\xf7\xa5\x40\xb1\xd1\xf2\x05\x99\xb9\x1f\x9f\x97\xee\xc7\x73\x7f\x4f\xa1\xa6\xcc\xd7\xdf\x98\x48\x66\xef\x26\x91\x0d\xbe\xe2\x66\x6e\x19\x1a\x00\xd9\x59\xbd\xfa\x12\x8a\xb8\xe2\x73\xfb\x87\x5d\x3c\x5f\x7a\x89\x57\x21\x6d\x7d\x83\x80\x8f\xfa\x78\xf5\x8d\xad\xee\xd3\x95\x1e\xc3\x85\x2d\xb1\x53\x27\x4c\xee\x69\x27\xac\xcb\xeb\x83\x35\xa3\xbd\x9c\x86\x52\xa0\x2f\x28\x24\xf4\xae\x03\xf6\x90\xc6\xd1\x96\x1d\x0b\x02\x64\x33\x43\xef\x41\xb8\xf9\xfb\xc8\x02\xed\x39\x2e\x82\x1c\x78\x4f\x57\x89\xfe\xfb\x58\x58\x1d\xd6\x15\x79\xa5\xa6\x64\xdf\x46\x8b\x2b\xa1\xf2\x4a\x5b\x07\x0f\x91\x5a\x2b\x04\x18\x87\xde\xa6\x33\xeb\x91\xa9\x84\x9d\x4b\xc7\xf8\xbc\x62\x24\x53\xda\xab\x4d\x66\xfd\xe3\x04\x9c\x29\x66\xeb\x51\x0a\xcd\xb2\x16\xd9\x7c\xa1\x9d\x39\xd6\x2f\x24\x6b\x23\x81\xed\xef\x5f\xc7\x0f\x24\x94\x04\xea\xc1\x96\x25\x35\xec\xd8\x05\x44\xc7\x7a\xe2\x0e\xef\x04\xc0\x0b\xfa\x56\xba\x69\x70\xfe\x14\xbb\xc6\x9b\xa2\x08\x28\xcc\xd3\xae\xca\x36\x9a\x66\x2a\x28\x7a\x0b\x83\x82\xaf\x97\xd9\x33\x05\x8c\xbb\x23\x1e\x29\x4d\x93\x6e\xfc\x66\xfb\x40\xa4\x8e\xfb\x7c\xfe\x8c\x99\xd4\xc7\x5f\x9e\xb5\x42\x1a\x88\x18\x2b\xc7\x51\x9d\x86\xac\x16\x2d\xef\xb0\xc0\x0e\xd9\xdf\xed\x38\x78\xf0\x0a\xfe\xa3\x1e\xa0\x5a\x2c\xde\xe1\x29\x14\xfd\x0b\x62\x0f\x7b\x2d\xa5\x27\xbb\x69\x52\xb2\x2a\x2b\x59\xd5\x2c\xca\xf9\xc4\xbb\xda\x65\x92\x97\x3e\x4a\xc7\x9a\xf2\x1e\x43\x40\xf0\x62\x6a\x5f\x88\x0a\xd7\xc7\x5c\x1c\xf3\xca\xbc\x29\xa4\x52\x45\x11\x6f\xe9\x96\xac\x62\x3f\x85\x44\xc1\x71\x6e\x15\xaa\xbf\x6a\xa4\x35\x0d\x4d\xd4\x67\x2c\x46\xc0\x85\xa1\xa7\xf3\xe8\x19\x13\x75\x2b\x98\x63\xca\x48\xea\xb2\x2f\x6a\x36\x50\xd2\x25\xa3\x39\x95\x65\xa7\x9b\xaa\x75\x38\x5c\x7e\x87\x17\x2f\x96\x2b\xc3\xb7\xa9\x7c\x7f\x43\x3e\x8d\xa5\x71\x46\x93\x66\x1b\x6d\x69\x89\x51\x31\xa1\xe9\x91\x6e\x06\x59\x8d\x9e\xaa\x69\xbd\xe6\x2c\x03\x1e\xdc\xb5\x64\x5b\xeb\xf4\xa4\xb5\xf3\x83\x95\x8a\x33\x40\xdc\x89\x79\x41\x2c\x76\x7b\x3f\x0c\x53\x48\x4d\x66\xc2\xa5\x15\x5d\xc5\xbe\x8e\x0d\x85\x44\x08\x90\x8b\x43\xcb\xe8\x8a\xd8\x71\xbf\x28\x80\xf3\x15\x01\x42\x2d\x35\x0f\x6a\x7e\x73\x72\x96\x91\x91\xfb\xb6\xb7\xc8\x0a\xbe\xbf\xbf\x1a\xa7\xd7\x59\x4d\x78\xe6\x51\x13\x23\xe5\x8a\xb5\x34\xa7\x9a\xad\xe9\x05\x7e\x32\x6d\x31\x6a\xd3\xc0\xd5\x82\xb8\x41\x81\x74\xbc\x37\x05\x34\xee\x72\x8d\xf4\x4a\xf2\x92\x4b\x82\xda\xb0\xcd\x56\xb6\xc3\x8e\xcd\x1d\x77\xb9\xf5\x0a\xfb\x87\x53\xa1\xd0\x0a\x0d\xff\x1a\x75\x0b\xe6\xa5\xc6\xd0\x14\xaa\x17\x77\x73\x85\x2b\xbe\x7a\x57\xda\x7f\x78\x7d\x08\xad\x09\x4f\x5c\xbd\x57\xd2\xe5\x08\x85\x7f\xe6\x77\xa5\xbf\xc2\x3b\x4c\x15\x9d\x1a\xf7\x72\x70\x4e\x08\xee\x9f\xe7\x17\xce\xdf\x4c\x57\x9b\x80\x8c\x99\x8a\x13\x6c\x21\x65\x81\xc9\x5e\x4d\x3f\x21\x42\xb9\xfd\xd5\x38\x92\x52\xee\x6b\xf2\x30\xf3\x85\x09\xe0\x26\x75\x4b\xde\x67\x7a\x01\xf1\x8a\x3f\x2b\xa9\xa3\xee\x95\xec\x6c\xf3\xde\xf5\x72\xbc\x3d\xd4\x82\xbd\x9c\x76\xb5\x2e\x0f\x2c\x81\x0c\x16\xe7\xa4\x19\x66\x7b\xb7\x43\xc5\x93\xe8\x9d\x35\x2c\x93\xe4\x41\x11\xf1\x95\xfc\x20\xb7\x54\x17\xae\x8f\x91\x78\x07\x33\x66\x9f\x90\xb9\x99\x39\xf0\x93\x19\xd2\x27\xfb\x40\xd9\xc3\x4c\xae\x1a\x78\xd1\x9e\xf4\xe3\xf5\x9b\x1a\x61\x53\x98\xe7\x71\xfc\x03\xf0\xe5\x5b\x93\x0b\x8f\x93\xe7\x82\x3b\x1c\x6c\x4f\x89\x88\xb3\x6b\x6f\x21\xce\x26\x8b\x8e\xf7\xb4\x47\x3f\x87\xd3\x7e\xb9\xf8\x9d\x7f\x7f\x1c\x18\x80\x36\x25\x62\xf3\x46\x14\x38\x14\x28\x37\x75\x92\xfe\x8d\xaa\x28\xf2\x50\xda\xfe\xb3\xa1\x19\x3b\x5d\x87\x9a\x7b\x9d\x88\x74\x6f\xbc\x95\x44\x59\xb7\x41\x71\x42\xe9\xe3\x01\x3a\xe1\xc0\xd3\xbc\xc8\x99\x43\xd6\x07\x3c\x44\x08\x87\x5e\x9d\xce\xab\x85\x95\x38\xdc\xab\x30\x86\xa5\x2c\x6d\x87\xb6\xe8\x1c\xd1\x45\x8c\x21\x7a\x30\x56\x09\xc5\xec\x1f\xf0\x42\xfa\x3a\x7d\x0f\xfd\x50\x25\x5f\x1f\x19\x4b\xec\x35\x66\x26\xb1\x7b\x6a\x39\xbb\x46\x75\x86\x21\x27\x46\xb2\xaf\x42\xc9\xd3\x79\x73\x3c\x37\x4b\xa4\x56\x70\xbe\x6a\xfb\x58\xdf\x89\x30\x17\xbe\x12\x3b\x66\x74\x05\x72\x47\x6b\xdf\x27\x19\x12\x6f\x3d\x13\xfe\xdd\xd3\x2e\xca\xf4\x1f\x7e\x7f\xd4\x92\xce\x2c\xa9\xc0\x13\xa8\x94\xbf\x6a\xe5\x82\xb4\xad\x68\x02\xf5\x07\x8e\x00\x56\x01\x73\xd8\x45\x9e\x40\xa2\x30\x44\xcc\x0e\xd0\x62\x4e\x68\x63\x11\x1a\xf0\x33\x04\x45\xe1\xc1\x9e\x32\x3b\xbf\x44\xf2\x1c\xde\x90\xd0\x0c\x43\x4d\xab\x88\x32\x1d\x8e\x4b\x74\x4e\x9f\x85\xa4\x19\xbb\x7b\x7f\x44\x0b\xfa\x77\xe5\x2a\xaf\x94\xb7\x98\xd7\x3d\xfc\xfe\xfe\x60\x23\x2f\xd4\x39\xb7\x9c\x4f\xfe\x5f\x45\xa7\xb2\x62\x9e\x1e\x2a\x3a\x31\x09\xdf\x98\xd2\x5c\xa8\x66\xfe\xd7\xff\x72\x45\x3d\x3d\xce\x74\x9e\x2d\x40\x4f\x6b\x2f\x55\x6b\xce\x2f\x33\x74\x57\xdb\x30\x3a\xb0\x2c\x7c\xb1\x43\xd6\x9b\x47\xd4\xa6\x1c\xd8\xc2\xe3\xbd\xbc\x5b\xa0\xab\x33\x09\x0c\x89\xb7\x1d\x42\x6d\x4b\xe3\xeb\x1f\x27\x66\x90\x18\x33\xd7\xc4\x4d\x8d\xe1\xd1\xa3\x45\x79\x89\xad\x4a\x33\x5d\xbe\xe6\x81\xba\x23\x8e\x34\x06\x9f\x6f\xfe\xed\x4f\x95\x43\xb7\x14\x8a\x23\x81\x4a\x59\x1d\x74\xac\x05\xf1\xce\x93\x99\x78\xdc\x66\x1b\xd7\xcb\x99\xfd\x0c\x7b\xdb\x0e\x6a\xdb\x2e\xfe\xf5\xe8\x5d\x18\xe8\xb2\xef\x05\x2b\x3e\x7b\x89\x1f\xde\xb0\x49\xc9\x91\xa7\x44\xad\x26\x75\x7a\x32\xf9\xb2\xa6\x7b\xb1\xf2\x05\x26\x66\x96\x4f\x1d\x84\xce\xbd\xb2\xaa\x15\x3c\xda\x5a\xab\x6c\x66\xae\x2f\xcd\x8b\xa7\xa1\xde\x0f\x2c\xdb\x99\x0b\x44\x6d\x77\x1a\xe4\x58\x9a\x1d\x07\x3b\x88\x19\x99\x61\xc9\x81\x25\xa4\x15\xc0\xcd\xc6\x42\xf7\x01\x49\xc9\x7a\x79\x6b\xdf\x17\x4a\xf0\x52\xe8\x98\x6e\xb1\x2d\x4e\x64\xb8\x11\x81\x82\x6c\x96\x52\xeb\x70\xa9\x27\x6f\x65\x61\x5d\x38\x33\xb9\xea\xc1\x26\x37\x5d\x05\x09\x27\x5d\x51\xca\xa8\x5e\x82\x10\xb6\x33\x23\xb3\x81\x08\xe5\xb6\x30\x27\x22\x73\x4a\x71\x34\x91\xe1\x03\xef\x06\x60\x0e\xe6\x29\x44\x96\xf2\x21\xc7\x2f\xff\xc6\x95\xa6\xa9\xa8\xb9\x6d\x61\x9a\x01\x80\x57\x62\x03\x4a\xb1\xdf\xdc\xe3\x89\xf2\xe3\x5c\x19\x2c\x66\x38\x31\x0d\xd6\xd0\xae\xa0\x65\xfb\xd4\x68\x3a\xa2\x58\x50\x14\x9e\x09\x48\x50\x4e\xe5\x45\xe9\xd7\xc6\x17\xa3\xdb\xc4\x6e\x46\x74\x47\x33\x89\x38\xc9\x69\xbb\x04\x73\x3c\x55\x7b\x54\x66\xe5\xe3\x20\x96\x0a\x19\x52\xef\xbd\xa0\x6b\x9e\xe2\x22\xd2\x17\xfb\x84\x85\x0c\x53\x76\x8a\xe9\x4c\x0e\x51\x47\xb7\xc0\xbb\x70\xa5\xde\xaf\x87\x17\x2f\xb9\xce\x42\x25\x79\x75\x8c\xa1\x42\x20\xee\x61\xf5\x5a\x92\xf8\x0b\xc1\x30\x51\x11\x3c\xb4\x21\x7e\xd8\x56\xbb\x42\xa0\x6e\xe0\x18\xd0\xf4\xef\xac\xfa\x28\x1d\x0c\x33\xb5\x13\x8b\xe0\x38\x63\x55\xd0\x4c\xe9\xfb\x36\x3c\xc3\xbf\x83\xe8\x1c\xd8\xd8\x89\xcf\x9c\x4b\xe7\xa9\xbb\x16\xf8\xa0\xd5\x62\x74\x14\x27\x91\x30\xaa\x4a\x23\x09\xd2\xc1\x63\xe1\x65\xf4\x9e\x92\x9d\x9e\xdc\x22\xad\xb7\xda\x7e\x51\x49\x06\xc7\x07\x55\x85\x7f\x12\xde\xf4\x65\x08\x3d\x4b\x9e\x03\x81\x29\xad\xf3\xc4\x25\x5d\x70\x55\x7c\x0e\x9e\x51\xd6\x31\x5f\x6d\xb8\xe3\xf8\xea\x1c\xbc\xd9\xa7\xf5\x99\xd8\xcd\x3d\xaa\x81\xfa\x69\xbe\x9a\x1a\x4c\x41\x2c\x40\x28\x69\x3a\xa9\x88\xf1\xd7\x06\x33\x90\x86\xc6\x61\x93\xb1\x68\x92\x0a\xde\xb1\xea\xfa\x8e\xf8\x47\x66\xdb\x24\x63\x0d\x84\x25\x26\xea\x5d\x17\x6e\x99\xa4\x94\xc0\xb5\x62\xcb\xea\x6e\xc5\xe3\x59\x4a\x9b\x66\xd2\x6a\xee\x9a\xa9\x18\xb3\x9e\xb0\x95\x5e\xf0\x90\xf3\x48\x64\x65\x5d\xbf\x38\x08\xfa\x30\xfc\xd2\xe8\x74\xa6\xba\x79\x3a\x95\xf2\xea\x72\x69\xc7\xf0\x42\x1b\x3d\xd2\x91\x6d\x8f\x6b\xb3\xc3\x52\xe7\x69\x78\x40\x53\x6f\xdb\x1f\xa8\xd2\xe2\x7e\x41\xc5\x6d\x9f\xb0\x3f\x83\x77\x3a\x23\xf8\xdb\x59\x8b\x3c\x9b\xe2\x0d\x37\x74\x43\x10\x43\x08\x20\x58\x89\x2f\x28\x15\xeb\xab\x95\x72\x2e\x34\x52\x76\x77\xe6\x23\x40\xb1\xbf\x00\x03\xc3\xe5\xdc\x77\x94\xf0\xc1\xd9\x9c\xa8\xe4\xed\x74\xec\x2b\xd2\xae\x7b\x52\x02\xad\x7b\x8c\x13\x6b\x2e\x8d\x09\x3b\xe9\x01\x72\x18\x14\x28\xb6\xeb\xc8\x56\xef\x81\xd8\x17\x71\x28\x22\x1f\x92\x59\x71\x33\x87\x5e\x90\xf5\xe6\xd5\x52\xd9\xc6\xf8\x54\x2d\xb9\x2f\xe8\x0f\x65\xce\xc0\x14\xc3\xdb\x7e\x64\x6f\xef\x61\xf6\xaf\x30\x54\xac\xf1\xae\xd0\x2a\xde\x95\x78\x4d\xcd\xff\x53\x79\x8a\x51\x4e\xf4\xcb\x5a\xa9\x42\xb3\xc4\xed\x79\xa1\x6b\x67\xd7\xc4\xec\xf5\x69\x49\xac\xcd\x65\x53\xaf\xc8\x49\x51\xe6\xb9\x10\x13\x58\x1a\x2b\xb7\x4a\x71\x69\xd5\x56\x1f\x0f\x1f\xf8\xc4\x3a\x0b\xe6\x78\x21\xa0\x00\xf9\xb0\x26\xef\xa4\x70\x39\x7c\xf9\x37\xa4\xec\xc7\xa5\xec\x99\xd8\x76\x0a\xef\x44\x10\xdd\x4d\x16\x31\xbc\x7a\xa8\xbd\x79\x45\x2d\xd9\x0d\x78\xc0\xb0\xf6\x79\x48\xac\x49\x89\x85\xcc\x03\x47\x95\x65\x2c\xe2\x60\xdd\xca\x3e\x18\xf6\xcd\x7c\x4c\x2e\xb8\xf9\xc4\xf6\xaf\x62\x7d\x57\x3c\xaf\x8d\x08\x8b\xec\x8f\xae\x82\xf3\xd9\x72\x85\x49\xbd\x09\x02\xdd\x7a\xf4\xa5\xa6\x3e\x57\xb4\xa4\x1a\xb3\x55\xe4\xa2\xcc\x11\x70\x85\xeb\x1a\x3b\x38\x86\xe8\x1e\x39\x68\x2b\x0e\x54\x3f\x2c\xc5\xad\xff\x76\x47\x97\xe1\x73\x3c\x6b\x04\xd9\xcd\xa5\xcf\x23\xbf\x0a\xdc\xa1\x50\x0d\x5a\x34\xa7\xd2\x8b\xc7\x1e\x0c\x42\x3d\x0f\xb3\x05\x82\x40\x03\x0a\x42\x88\x38\x51\x43\x16\x41\x27\xc1\x29\xba\x47\x2f\x91\xe0\x8a\xb7\xc7\xa0\xe2\xdf\xc3\x7c\x3d\x1a\xf2\x7d\x6a\x97\x82\xaa\x97\x52\x98\x4a\x8a\xf5\xea\xce\x52\x47\xb0\x8e\xa0\x28\x1e\x14\xbc\x43\xf5\x8a\x9f\x07\xbd\x2f\x28\x4d\x3b\x37\x48\x69\x72\x6c\xb9\xa8\x00\x5a\xe3\xc4\xc5\xbd\x8b\x99\x50\x06\x5b\x2e\xa0\xf3\x2d\x14\x20\xed\xaf\x53\x20\x64\x22\x86\x65\xa7\xbc\xf9\x67\x60\x27\xb2\x39\x64\x33\x1b\x06\x5f\x94\x77\x8e\x29\x20\x89\xcd\x69\xd0\x51\x02\x51\x9e\x1f\x4f\xb3\x2d\x1b\xfd\x78\xdb\xb0\xbd\xf5\x0b\x45\x03\x16\xf2\x48\xf9\x44\xef\xc2\xf3\x62\x10\xfa\x3c\xc6\x91\xb0\xe6\x97\x32\x36\x57\x9e\x27\xd1\xf4\xa8\xb7\x08\x55\x7f\x95\x11\x7f\x98\x48\x2c\x90\xa6\xd5\xf7\x51\xed\x0d\x8f\x33\x99\x74\x2c\x50\xa4\x73\xb1\x95\xca\xa7\x17\xec\x0a\x4d\xcb\x98\x5a\xf6\xdc\x2d\x69\x66\x27\x8d\x93\x3a\x9c\x64\x71\xd8\xbc\x8b\x0c\xa9\xf8\x6a\xba\x3e\x6e\x1b\x55\x47\x67\xae\x0e\x5a\x07\xf6\x75\x0a\x57\x42\x82\x26\x69\x7a\x1a\x4c\x82\xf1\x86\x15\x8e\xb5\xb7\xf1\xaa\xc9\x71\x2f\xbd\xbd\xde\x8b\x19\x55\x21\x9e\x3a\x29\xe2\x31\xad\x27\xfa\x1d\x69\x38\x4d\x68\x0b\x47\xcf\x1f\x5e\x71\x33\xeb\x4b\xc3\x91\x3a\xd1\x29\x23\x3b\xd9\xa3\xcb\xff\x29\x5a\xc4\xad\xd4\xef\x2f\xe6\x47\x9f\x50\xf5\x0f\xcc\x2c\xb9\x14\x0f\x19\x49\xb1\x60\xb1\xcf\x27\x9b\x2e\x3d\x48\xe4\x23\xe7\xb4\xf5\x17\x02\xf7\xe5\x8a\xab\x44\xd0\x60\x94\x82\x34\xdb\x69\x61\x24\x81\x8e\x19\x3c\xb0\xe0\x80\x47\x33\x75\x4c\xf3\x99\x20\xaf\xc4\x45\xc6\xf8\xc8\x6a\xed\xca\x74\xe0\xec\x66\xd0\x8c\x04\x3d\x54\x32\x12\xce\x9b\x13\xd4\x79\x45\xb8\x0e\x04\xd4\x8f\x25\x91\x13\x4e\xa6\xa2\x34\x95\x39\xf4\x54\x8a\x60\x49\x54\xc0\x50\x30\xf9\x1e\xda\x45\x35\xc5\x38\x5f\x10\xa0\x8e\x05\x4c\x42\x36\x38\x51\x40\xa5\x55\xc0\xf4\xd2\xb6\xed\x92\x37\xf3\x68\xc5\x33\x92\x04\x05\x8f\xd6\x67\x13\x6f\xca\xd8\x2a\xe2\x44\x70\x2d\xd4\x99\xde\x27\xf9\xcd\x4b\x06\xe3\xe6\x00\xdf\x8a\xfe\x32\xb9\x48\x7c\x79\xa0\xfd\x66\x6a\xc3\x97\xcd\x43\x2b\x05\xd3\xf1\x53\xc1\x17\xba\x3b\x87\xf4\xab\x59\x51\xd6\x10\x00\x07\x87\x12\x73\x9a\xcf\x20\x94\x2d\x03\xe1\x93\x89\xa6\x1a\x79\x4e\xe6\x3c\xc3\xe1\xb3\x53\xbd\xd0\x34\xac\xa5\xf3\x69\x35\x9d\x05\xab\x89\x12\x17\x83\x5f\x54\x69\x69\x40\xba\xa4\x94\x9a\x7f\x57\xa5\xed\xc4\x57\xee\x51\xfa\x76\xed\x7c\xc2\xc8\x8a\x03\x00\xd4\xc8\x8c\x75\xaf\xfb\x80\x1a\xe8\xd5\xbe\xb0\x32\x4e\xc6\x98\x28\xa3\x0a\x1d\x1e\x3b\x59\x27\xdf\x80\xb8\xf4\xeb\xb1\xe7\x49\x82\x02\xba\x6d\x3a\xc0\x53\x1d\x64\x34\x09\x63\x5b\xed\xc0\x90\xf3\x2c\x9f\x36\x17\x1b\x2c\x54\xd3\xd0\x78\x6f\x7d\x5a\x44\x21\x33\xc4\x3c\xf3\x44\x99\x02\x0b\xac\xb3\xc4\xd1\xe4\x4e\x1c\xc3\xb7\xd2\xa4\xb3\xce\xbb\x18\xc4\xdf\x3f\xbe\x80\xa9\x0d\x53\xe1\xb1\x44\x84\x5c\x07\x02\xa4\xd4\xa4\x76\x7a\xb1\xb7\xad\xa6\x0d\x26\x21\x7d\x3f\x3f\x55\xf5\xf3\x36\xb9\x0c\x95\x43\x64\x8b\xc5\x0e\x66\x3f\xf1\x20\x91\xc2\xcd\x69\x91\x94\xc8\x6d\xe1\xdd\xa7\x54\xbc\x3a\xb2\x5d\xac\x7b\x4d\x2a\x6d\x74\xa9\x31\x9f\x34\xc5\xc2\x1f\xed\x65\x44\x15\xeb\xe6\x52\x17\x28\x39\x33\x11\x20\x02\x20\x49\x44\xcf\x79\x9f\x22\x71\x92\x5e\x31\x10\x74\x60\x60\x3f\xb5\xb7\xc6\xb3\xf0\xe9\x31\xdf\x85\xd8\x49\xf7\x18\x89\x6a\xf0\x5c\x3f\x91\x78\xf5\x8b\x93\x16\xc9\x11\xb8\xff\x5e\x63\x18\x62\x50\x38\x6c\x51\x11\xd3\x28\xf9\xf9\x64\x8b\xca\xfd\x2f\x4d\x62\x8a\x47\xa4\xc8\x16\xaf\xdd\xd2\x89\x79\x54\x76\x73\x91\xfc\x81\xba\xa0\xc1\x3b\x38\x03\x1d\xd2\x45\xc4\x18\x3b\xe3\x8c\x7c\x0d\x94\x01\x57\xae\x13\xb2\x25\x4e\x0c\xc1\xf0\x28\x2e\x60\xad\x13\x24\xd0\x2c\xde\xf1\x83\x30\xac\x76\x2f\x8e\x83\xab\x46\x11\xae\x79\x93\x50\x24\x2a\x06\xf4\xf1\xcc\xbf\x7e\x7c\x02\xfe\xd0\x38\x8c\x86\xdd\xd3\x0e\x99\x54\x1b\x86\x0a\x39\x6c\xbe\x73\xa8\x08\xb2\x9e\x76\x20\x60\x31\xc2\x10\x2e\x85\xe8\xa3\x42\x36\xb3\x16\xab\x6e\x1e\x0a\x79\xa1\xdc\x3f\xd3\xef\x45\x29\x90\xf8\x0a\x48\x54\x2a\x10\x27\x25\x14\xf5\xb2\x03\x0e\x77\xf1\xe7\x99\x06\xc5\xdb\x3e\xbd\xd1\xbd\x87\x36\xc9\x6c\x99\x72\xb5\xc2\x7a\x49\x29\x11\x9b\x32\x62\xfe\x32\x74\x0c\x2a\xc7\x70\x27\x46\xc9\x4b\xe0\xb9\x15\xc0\x7c\xf1\xaa\x80\xa0\xa4\xde\x58\x8c\xb0\x8a\xe4\xec\xc8\x07\x78\x76\x9a\x89\x40\x07\x52\xac\x45\x21\x28\xa3\xbf\x64\x5a\x23\xbb\x91\x07\x8b\x9b\xc5\x0d\x72\xe4\x38\x71\xba\x40\x16\x12\x32\x38\x38\xb1\x95\x98\x22\x92\xfa\x27\xeb\xbd\xbd\xc5\x54\x78\x0c\x85\x23\xb3\x93\x91\x08\xcb\x89\x77\x4f\x63\xc7\xa9\x21\x1d\x88\x93\x11\xe0\x7e\x25\x3c\x87\xdf\x2f\x60\xe7\x37\x95\x42\x9d\x24\xa3\x57\x01\x44\xce\xd4\x0c\x9a\x5d\xd1\xef\x10\x9f\xc6\x75\x99\xd9\xa6\x6a\xaa\x54\xcc\xdb\xde\xfa\xab\xb2\xf5\x5f\x78\x64\x2f\x11\x51\xa7\x05\x4a\x27\x55\x2d\x0a\x27\xa8\x2d\x5a\x23\x72\x4a\x0b\xa4\x73\x57\x78\x2a\x99\x0a\x2f\xe4\x76\x80\x88\x7c\x80\x90\xd5\x8b\x8b\x85\x19\x00\x81\xde\xff\x81\xc1\x4e\xf5\xd7\x5e\xbb\xc6\x31\xb7\xed\x8a\x16\x52\x87\x94\x1d\x8a\x4a\xa8\x54\x7f\x62\x72\xd7\xd9\x37\x68\xe3\xe0\x7c\xb7\x95\x0a\x76\xca\x0f\x2c\x4a\x38\xba\xce\x50\x23\x73\xb3\xed\x5a\x80\x38\x17\xed\x90\xc8\xaa\xb2\x39\xd5\x1a\x73\x0b\x16\x3c\x05\xff\x4f\xe7\x82\xbc\xdd\x43\xda\x44\x49\x20\x7e\xca\xa5\x50\x04\x86\xd7\x89\x71\x66\x49\x9e\x27\xaf\x27\x88\xad\xdc\x3c\xbd\x32\xc9\x61\xc9\x8f\x80\xa0\x0f\x14\x1a\x64\x4e\x4e\x92\x89\xbf\xfe\x99\x31\x38\x4f\x2a\x93\x1b\xe6\x24\x62\x6f\xbc\xc5\x43\xa5\xd1\x29\x5f\x92\xdc\x5e\x76\xbd\xd5\xcb\xe7\x87\x2f\x99\xd3\x76\x8f\xa5\x39\x61\x64\x72\x05\x65\x3b\x92\xc7\x9b\x01\x76\x2d\xa8\x2e\xe6\xf7\x4f\xff\x86\x12\x08\x44\xb3\xa9\x5b\x1e\x39\x55\x16\xe8\x6d\xb9\x58\x79\xdd\x0a\x35\xf1\x50\x33\x66\x31\x03\x05\xba\x18\xd7\xb2\x82\xae\xa2\x19\x19\x6a\xbd\x03\xbc\x51\x65\x07\xd6\x75\xfd\xe6\x70\x69\x39\x90\xbc\x6d\x40\x53\xaf\x20\xae\x83\xc9\x8b\x12\xb4\xaf\x22\xa7\x43\xcb\xc4\x44\x65\xcb\xcb\x3e\x40\x8f\x64\xe0\xa4\x4e\x25\xe6\x04\xc7\x02\xcf\x09\xbd\x92\xff\x53\x09\x69\x8c\x33\x32\x8a\x93\x2b\x99\xfe\xc6\x26\xaa\x54\xef\x20\x13\x01\x4d\xea\x97\xeb\xbf\x15\x27\xfd\xe3\xf3\x8b\x24\x62\x9b\xa3\xda\x52\x91\x93\x48\x0e\x63\xb3\x7f\x64\x9d\x38\xbc\x81\xd7\xc0\xc2\x37\x8d\x62\xb0\xfd\x1e\x90\xb2\x01\x0e\xfb\x43\xfa\x83\xa5\xb6\x6b\x2c\x5c\x26\x4b\x7b\x62\x82\xd3\x1d\x59\xae\x51\x78\x8c\xf4\x80\x2c\x72\x20\xd7\x43\xa1\x0a\x87\x15\x25\x4f\xae\x7b\x18\xf0\xa9\x0b\xfa\x0e\x6e\x24\xe4\xa0\x13\x32\x69\x60\xa7\x50\x2b\x84\xf4\xc5\x5c\xc5\x4b\x00\x9a\xcb\x1d\x62\x86\x06\x29\x6e\x13\xfc\xae\x2b\x05\x9c\xfd\x2f\xb8\x8e\x37\x05\xe1\x45\x31\x00\x5f\xab\x66\x53\x72\xc6\x96\x18\x3c\x29\x0e\x74\xa7\x81\xf0\x39\xb3\x3f\xbe\xb0\xe6\xee\xf9\x09\xe3\xad\x33\x8f\xba\x2b\x39\xb7\x3a\xab\x28\x83\xbb\xc9\x91\xfe\xa4\x59\xa6\xb5\x58\x02\x7b\x2d\xf6\xe9\x32\x31\x4f\x12\x6a\x13\xc7\x9d\xde\x82\x8f\x58\xa1\x97\x20\x63\x59\x21\xff\x73\x2b\x31\x81\x2e\xf2\xf0\xf3\xf1\xfe\xf5\xc9\x76\x1c\xf2\x07\x3c\xef\x8d\xfd\x73\x61\x7f\x7b\x67\x07\x8b\x93\x39\x68\x92\xcc\x62\xd1\x99\xf8\x41\x57\x2f\x9c\x14\x37\x76\x54\xd7\x79\x2c\x58\x80\xc3\xc7\x2f\x50\x8a\x53\x47\xfe\x8c\x5c\x18\x16\x38\x85\x83\xd2\x9f\x17\x46\x43\xce\xae\x41\x37\xd3\xb9\x34\x99\x4a\x30\x83\xdb\xd3\x3b\xb4\xd0\x5f\x9c\xf1\x43\xde\x04\x21\x5d\xa6\xec\x81\x22\xcb\x90\xb6\x10\xab\x00\x2c\x4b\xbb\x0b\x3f\x74\x8d\xe2\xe2\x59\x58\xca\xa6\x53\x59\x89\x1c\x09\x81\xaf\x39\x2a\x6a\xdb\x30\xbf\x40\x0d\xb0\xb1\xf4\xe5\x9b\xf4\xaf\x42\x4e\xb3\x85\x22\x69\x44\x45\x81\xbb\x6c\xe1\x31\xb0\x15\xe4\xe3\xe3\x40\x7a\x57\xd5\x9d\xdd\x9d\x82\x5e\x46\xf2\x71\x22\x31\xa2\x27\xce\x99\xd6\x50\xc5\xc2\x77\x5d\x1c\xbb\x64\xdb\xc5\x93\x60\xb5\x47\x3f\x81\x17\xdf\x54\x3e\xca\x43\x71\x33\x55\x48\x30\x99\x1f\x64\x24\xba\x5e\x54\x91\x73\x68\xe9\x0f\xce\xae\x39\xf9\xe7\x16\x4f\x61\xbb\xcd\x07\x28\x78\x64\x68\xd5\x02\x17\x15\x46\xb4\x19\x0a\x97\x51\x6d\x8b\xcd\x42\x48\x9f\xfd\xbb\x62\x9d\x49\x0d\xf2\xa6\xc5\x37\x32\x9e\xc5\xd2\x91\xce\x1c\x38\x21\x39\x77\x2d\xe2\x39\xde\x62\xb9\x67\xf7\x6c\x61\xf6\x79\xd3\x4b\x4f\x8e\x83\xba\x05\xac\xd2\x53\xdf\x39\xab\x45\x2d\xfd\x87\x7a\x13\x67\xca\x81\x49\x4a\x25\x2e\x79\xdf\x22\x61\xf4\x11\x16\xce\xfe\xff\x12\x34\xd7\xc8\x7c\x1c\x36\xb5\x99\x6f\x41\x90\xf4\x30\xbc\xa0\xe8\xdb\x2d\x15\x42\xd4\x66\x49\x82\xaf\x34\xfb\x2c\xfc\x30\xb0\xf2\xf0\x4d\x3d\xb3\x48\xb4\x80\x00\xf4\x43\x21\x15\x32\x9f\x81\x49\xdd\x34\x79\x97\x9c\xdf\xe2\x3d\x2e\x77\x15\xd3\x12\x51\xf5\x22\x62\x98\xf5\xd3\xe1\xd6\x81\x34\x4a\x63\x2c\x3d\x57\xa6\xb0\x82\x28\x3c\xb6\xc4\x6e\x81\x60\xc6\x41\xa0\x50\x2d\x06\x36\x9c\x86\xe3\xf3\xf3\xb3\x0c\x1f\xb2\xe5\x6b\x79\xc8\xdb\xac\x7c\xe1\x74\xcb\x1f\xa5\x69\xb9\x64\x4e\x47\xe2\x10\xa9\xa5\xdb\xbf\x36\x0e\xa5\x19\x3d\x36\x06\xd9\x8f\xcb\xce\x94\x27\xd6\x9f\x0b\x5d\x18\x79\x97\x95\x7d\x64\xaa\xe0\x97\x26\x72\xb8\x70\x67\xaf\x57\x05\x62\x6a\x1e\x44\x02\x9b\xb9\x3a\x8b\xc7\x48\xdf\xa5\xca\xc0\xf6\x72\x59\xcf\x7b\xd7\x91\xb5\xdd\x25\x1f\x2c\x9f\xe6\x38\x46\xf1\xf8\x7c\xbe\x01\x09\x43\x25\x51\xa0\x3c\x1e\x11\x39\x6b\xe8\x55\xb0\xcb\xbd\x16\x9d\xb7\x39\xae\xc4\x56\x17\x1a\xed\xda\x51\x50\x15\xa2\xc0\x07\xcc\xc8\x7f\xf0\xe0\xdb\x63\x80\xba\x78\x3f\x1e\xc8\xcc\xe9\x05\xdc\x89\x81\xe5\x0a\x42\xce\x0a\x19\xba\x20\xc8\x4a\x50\x0e\x5c\xd3\x8b\x7c\x37\x7b\x8b\xeb\x58\x49\x47\x81\xf4\xdf\x2e\xa7\x58\x4d\x9e\xde\x1b\xeb\xe7\x11\xec\x71\x70\xd4\xeb\xe5\x0b\x31\x85\xa9\x02\xd3\x89\x81\x85\x9d\xd2\x72\xed\x8d\x74\x5b\x81\x9a\x83\x0f\xa2\x5d\x34\x1d\x29\xd7\xe7\xd8\x6c\x2f\x2f\xb3\x2d\x42\x5c\xfd\xe5\x60\x95\xa5\x6a\x5f\x67\xb6\x30\x10\x90\x25\xd2\xc2\x32\x01\xa4\x9c\x6a\x91\x8e\xb0\x73\xae\x21\x71\xe8\x5d\x03\x51\x8c\x9d\xa0\x8f\xe6\x38\x9f\x03\x77\x4e\x6e\x4f\x4d\xf6\xc8\x67\x6a\x89\xf9\xdf\x0a\x27\xaa\xa3\x81\x04\xec\x5b\x89\x12\xf3\x26\x08\x0f\xe1\x2a\x80\xc8\x76\x3a\xf6\x9b\x2a\x5d\x8e\xc7\xa3\xd7\x85\x15\x4d\x15\x55\x98\x1a\x01\x0a\x1a\x4f\x6d\x96\xa7\x5d\x48\x0c\xac\x49\xf5\x82\x56\x54\x40\x37\x2d\x72\xf7\x3a\x11\x2c\xce\x0e\x39\xa2\x2b\x24\x62\x23\x68\xbb\x28\x5b\x3f\xd2\x4e\x6c\x5e\x52\x27\x03\x88\x97\x1f\x05\xd9\x9f\x23\xb9\x19\xa7\xbc\x30\x2c\x36\xdb\xcf\xf3\x82\x26\xc7\x85\x19\x12\x1d\x8e\x73\xe9\xbc\x7b\xbf\x7f\xcf\x77\x08\x0a\xc6\x97\x41\x76\x7c\x52\x40\xd6\x7c\x2c\x55\x49\x59\xf6\x73\x82\x2b\x4a\x88\x47\x86\x22\x52\x9d\xda\x45\x74\x6a\x1e\x1c\xbf\xa0\x4f\xc2\x9a\x1f\xd8\xc3\x72\xf8\x78\xb7\x7f\xbe\x64\x6b\x60\xaf\xb8\xbb\x89\x3c\x5b\x3f\xbf\xfe\x5d\xfd\x4f\x99\xd0\x04\x2c\x36\xe7\x5b\x21\x51\xec\xa9\xd3\xe2\x38\xb6\x27\x97\x45\x3c\x7b\xbb\x48\xea\x96\x2e\x3f\x48\x87\x02\x01\xa4\x43\x56\xb8\x7c\x84\x9d\x36\x4b\x23\x34\x05\xa2\xca\x17\xe4\x6a\x16\xb1\xbb\x17\xe2\x45\xcd\x91\x4a\x25\x32\xde\xc0\xf2\x07\xf1\x5a\x52\x83\x6e\x75\x10\x18\x13\xcd\x70\x27\x18\x2e\x6f\x8b\x02\xd6\x46\x54\x6b\xe6\xff\x08\x65\x3d\x7a\x57\x10\xc6\xe7\xe5\x65\xeb\x5e\x2c\xd4\x1b\xeb\xd4\x3e\x39\x4c\x5d\x8e\xd9\xdb\xc2\x59\x3b\x1d\x84\x45\xc0\xb9\x63\x13\x04\x5c\x26\xd3\x5f\x2c\x00\x85\xd7\x9c\x89\xab\x98\x9b\xfa\xb0\xa8\x6c\xe7\x84\x76\x58\xe5\x4b\x62\x16\x5c\xfc\x35\xcc\x0e\x6f\x35\x7c\xfe\xbe\x60\xe5\xfb\x7c\x03\x7a\xc9\x91\xf9\xfd\x40\x9e\xb3\x8b\x85\x06\xdc\x16\x77\xa5\x8a\x23\xf7\xfb\x43\x75\x68\x8e\x7e\x9d\x14\x6c\x0b\x6e\x61\xc6\x92\xe8\x38\x5a\x4d\xc4\x56\x2c\xff\x30\x87\x1c\x33\x87\xb5\xcd\x05\x61\xe0\xa4\x05\xf9\x55\x8d\x53\xdf\x83\xe3\x0f\xb0\x10\xd3\x39\xeb\x01\xa9\xbb\x5c\xaa\x3c\x53\x8e\xd3\xb6\x60\x31\xc3\xf3\x29\x8b\x1c\xbc\x48\x86\xd0\x29\x45\x41\xed\xda\xbc\x35\x08\x0c\xcc\x73\x0f\x72\xd6\x4c\xae\xc8\xd3\x6b\x7b\x3b\x05\x71\x7a\x79\x3a\x5d\x85\xf3\xfd\xae\x73\x53\x10\xf9\x75\xe9\xa9\x72\x1c\x11\xca\xfb\x63\xa9\x7c\x5e\xbd\x2f\xd6\x7f\xc5\xd1\x37\x64\x54\x35\x33\xc1\x8d\xbc\xbd\x48\x3f\x9e\x3e\xa6\x84\xe9\x8a\x87\x79\x30\x4a\x44\x99\xf7\x4a\x80\xed\x68\x3e\x2b\x81\x2f\x99\x74\xe0\xe1\x36\x3d\x1a\x7a\x43\x4a\x89\x8b\x8d\x18\x98\x83\xff\x6d\x3e\xd7\xff\xfe\xef\x41\x48\xcb\x3a\x48\xa9\x6c\xe4\xd6\xb0\xa7\x4e\x9e\x0a\x20\xf1\x65\x63\x9b\x92\x65\x82\x5f\x63\x24\xc1\x3e\x55\x0f\xb8\xfd\x42\xea\xdd\x3c\x4a\x13\xca\x3e\x67\xd4\x7a\xcd\x88\x35\x61\x2e\xf0\x20\x85\x4d\x1c\x4d\x07\xe9\x1e\x58\xa8\x33\xa7\xbc\xc0\x29\x2d\x88\x74\xf1\x20\xcb\x0b\x41\xcb\xbd\x59\x71\x92\x2d\xba\x6c\x36\x0f\x6d\x76\x26\x33\x45\x4b\x05\xa5\x49\x64\x6c\x5c\x0a\x4d\x99\x56\xb1\x3b\x2c\x06\x75\xe7\x6e\xc9\x84\x87\x99\xc7\x90\x7b\xe6\x67\xbe\x7f\xfc\xbb\x12\x5b\x42\x85\xf2\xc9\x18\xef\x84\x6c\xc9\xcd\x43\x5f\x38\x74\x47\x2a\xcd\xe9\xa7\x9d\x19\x37\x13\x1a\x46\x51\x7f\xfd\x3c\x0e\xc6\x47\x5d\x70\xc7\x97\x3d\xf8\x3d\x1c\x7e\x0f\xac\x37\x80\xdf\x9c\xdd\x1e\xa1\x50\xbe\x4f\x25\x11\xf0\x9f\xa2\xb7\x87\xd4\xac\xcf\x5d\xd3\xb2\x31\x85\x71\x36\xff\x2e\x52\xdb\x82\x4f\xa0\x52\xee\x9b\x05\x26\xb5\x40\x1e\x55\x6a\x1c\x04\x8c\x74\x36\x1c\xa5\xb1\xe6\xfe\xc1\x20\x34\x11\xb8\x8e\xee\x56\x68\x68\x6f\x5b\x98\x89\x2c\x01\x28\x80\xf1\x78\x57\x1a\xf1\xe3\xe5\x22\x78\x86\x66\x0c\x7a\x6d\xee\xf2\x9f\x52\xbb\x80\x02\x9d\xfb\x66\xcc\xe9\xe2\x22\x38\xa3\x84\x99\x3c\x56\x83\x5f\x38\x48\xc1\x44\x11\x0e\xfb\xd5\x7d\x7f\x7f\xff\xbe\x63\x8d\x40\xcc\xc6\x8c\xf9\xe4\x48\xc6\xca\xb9\x7d\xb7\xd2\x0d\x47\xc6\xea\x83\x16\x5c\x29\xf3\x4e\x41\x9f\x99\x6f\x35\x4c\x84\xa5\x10\xca\xfa\xd8\xac\x0a\x8e\x6b\x54\xf7\x5c\xde\x27\x8d\x18\x10\x8e\x85\x33\x3e\x0f\x3a\x9f\x15\x0c\x9c\x39\x39\x89\xeb\xcb\x69\x08\xc7\x37\x24\xcb\x5f\x03\x48\x6f\xb3\x3f\x73\x03\xda\x6a\xaf\x29\x55\x7f\x15\x74\x10\x78\xd3\xdf\x2d\xac\x3a\x4a\xbd\x20\x9a\xe4\x89\x17\x87\xe0\x68\x3a\x8b\xbb\x5f\x78\xff\x2c\xaa\xab\x33\x33\x20\x88\x94\x4c\x9d\x9a\xa9\xa6\xb7\xbf\x2c\x84\x81\x84\x53\xf3\x4b\xc3\xb6\x09\x43\x9c\xed\x06\x61\x42\x73\x7e\x35\x1c\x8f\x25\x3f\x29\xbc\x27\x48\x3c\x88\xfa\x92\x64\xbe\xf9\x77\xa5\xd1\x37\x45\x44\x45\x58\xd8\x17\xfa\x35\x33\x1a\x0f\x11\x1c\xc4\x57\xa7\x8a\x66\xe8\xf9\x6b\xfe\x1f\x2c\xb3\xfa\x8f\xff\x0f\xef\xfe\xd5\x91\xac\xa9\x99\xa8\x52\x13\xba\xd1\x65\x35\xe7\x7f\x7c\xfb\x77\xc5\xa4\xf6\xa5\xf8\x72\xca\xa3\xcf\x41\xcc\x00\xf9\x92\x49\x0b\x36\x9b\x07\x93\x5a\xc1\x33\xfd\xbb\x62\xe3\x5e\xaf\x10\xa5\x76\xa8\x51\x45\xd0\xa6\x2f\xb2\x49\x2a\x1c\xa4\x11\x79\x25\x98\x96\x3e\x6c\x2c\xd9\x15\xbf\x3c\x15\x36\xe0\x57\xc1\x12\x12\xe2\xcd\x1a\x03\xdd\x65\x52\x65\x01\x02\xe3\x77\xfa\x56\x1a\x31\xb0\xc3\x3c\x3b\xfe\xb6\x15\x32\x9f\xbb\x22\xca\xa8\x64\xa8\x89\x2a\x15\x68\x1f\x1e\xa1\xe6\x2e\x8b\xb1\x9c\xe9\xde\xbc\xf0\x74\xfc\x0b\x4f\xc6\x70\xf6\x37\x24\xd3\xb4\x38\x6c\x22\x25\x38\x32\xba\x9a\xf9\xe3\xce\x5b\xab\xbd\xb9\x16\x7f\x81\xbe\xab\x45\xc4\x19\x90\x18\xcf\x25\x73\x52\xdc\x25\x9f\xc1\x3f\x09\x1e\x11\xd6\xfb\xe4\xc4\x1e\xca\x3c\xb0\x9b\x09\x04\x72\xdb\xcf\xc8\xa1\x21\xc5\xde\xbc\x10\xd3\x69\x54\x04\x80\x86\x7e\xfa\x73\xa5\xf1\x05\x05\x5d\xe6\x38\xd9\x39\x5b\x43\x5b\x08\xaa\x44\x3e\x6b\x4e\x41\xb5\x87\x41\xd8\x34\x11\xfc\x9a\x99\x0f\xa4\xf9\x33\xb7\xb9\xe7\xca\xbc\xba\xc5\x1a\x4f\x9d\x2d\xa2\x12\x1d\x63\x53\x38\xd9\x6a\xd1\xa2\x7b\xe2\x83\xca\xb6\xb5\x6b\x51\x9f\x91\x64\x9e\xdc\xad\xaf\xdc\x39\x17\xd8\x29\xb0\x39\x9e\xcf\xdb\x5f\x4a\xe2\xc1\xf3\x22\xac\xaa\x2c\x8f\x75\x28\x3c\x1f\x9b\x88\x6e\xbc\xc6\x7e\x26\xf8\x54\x33\xce\xec\xb6\xa9\xba\xdb\x2e\xf6\xda\x06\x54\x57\xb9\x39\x1a\x7e\xd4\x93\x67\xa7\x8f\x0f\x76\xf3\x21\x55\x26\x22\x42\xdb\x34\xa6\x18\xbc\x86\x26\xd6\xbd\xe4\x03\x5e\xd9\x6a\xbc\xee\x13\x46\x4b\x56\x64\x69\xf0\x67\xe2\x54\x26\x2e\xfa\x3c\x08\x6c\x84\xb0\x57\x79\x22\xba\xca\xbb\xd3\x2e\x05\xd5\xd4\xf4\xca\x25\x2e\xa3\xc6\x48\x78\x4e\xf1\xc4\xac\x13\x0a\xc7\x28\x4f\x1c\x68\x33\x0e\x1f\xc7\xcf\x77\x82\x53\x93\x1d\x51\x98\x4b\x8e\x59\x60\xa6\xb9\x8c\x19\x0b\x02\xff\x76\x9c\x7b\xa3\x92\x85\x0a\x52\xb2\x63\x60\x40\x3c\xe8\x14\xab\xc9\x23\x2f\xb8\x84\xe5\xd8\x1e\x9b\xf7\x99\x75\xe9\x2b\x47\x7f\x99\x3e\x26\xee\xed\x71\x37\xb5\x88\xb2\x5f\xa1\x1a\x74\x6a\xc4\xbc\x2f\x8c\xbb\xd6\x5e\x69\x1d\x51\xf0\xa3\x60\x17\x8e\xf5\xbb\xbf\xcb\xe1\x35\xb7\xfa\x96\x21\x7f\xec\x09\xec\xf3\x99\xcb\x89\x71\xc1\x42\x98\x74\xaa\x95\x98\x5b\xb2\x0b\xb3\x6a\xde\xc5\x4e\x2e\xc9\x49\x8c\x67\x67\x01\x51\x0e\x47\xc8\x6d\xed\x03\x53\xe1\xc1\x79\x6e\x13\x9e\xc3\x7a\x5f\xbe\x45\x49\x2b\x0a\x7a\x8d\x5c\xa5\x57\x30\x0c\x62\xa5\xea\x4a\xab\x60\xcb\xf2\xad\xf7\xac\xd3\x71\x65\x5d\x77\x2e\x14\xc1\x4b\x10\x41\x47\x5d\x66\x23\x79\x58\x44\x4f\xfc\x51\xa8\x86\xa3\x18\x0f\x2f\x79\x7a\x30\x7f\x9f\x34\x99\xea\xf8\xe3\xdf\xf6\xe0\xfe\x90\x0e\xf9\x38\x7e\x79\x34\xed\x8f\xde\xfc\x1b\x52\xe4\x3d\x6e\x03\x79\x34\xa6\x6e\x66\x2b\xe4\x10\x9e\x61\x51\x30\xba\x48\x95\x43\xbf\xc1\xd5\x2f\xa3\x52\x5c\x75\xab\xcf\xdb\xeb\xb2\x7c\xe0\x11\xa0\x2b\x50\x8d\x5b\x69\x9a\xbf\xba\x7e\x47\x78\xfc\x89\x4d\x71\x9d\x53\x16\xb3\x0d\xac\x2d\x95\x29\xd3\x60\x27\x06\x02\x9a\x74\x7a\xd0\x09\x55\xd3\xb6\x0f\xcc\x62\x2e\x60\x63\x31\xad\x19\x70\x7d\xaa\xeb\x4e\x4a\xc7\x5c\xe9\xc0\x99\x78\xb0\xf8\x7f\x31\x9b\x3d\x26\x42\x55\x91\x36\xf1\x28\x29\x15\xe2\x6a\x27\x06\x9a\x84\x7e\x10\x19\xcc\xd5\xaf\x8d\xaf\xa0\xfe\xaf\xe0\x7b\xfa\x1a\x3e\xef\x4d\x99\x68\x37\x0b\x2d\x2f\x24\x04\x4a\xe2\x15\x0b\xe1\x1c\x03\x66\x02\x45\x57\x57\xcb\x3a\x96\xc1\x21\x8b\x2a\x36\x66\x4d\x61\xf9\x9c\xc3\x38\x30\xb8\x09\xbb\xad\x8a\x9f\xef\xcf\xcf\xba\x13\x65\x4a\xf3\xb7\x8a\xde\x5b\xc5\x79\x8b\x9e\x7f\x27\x09\xb4\xc6\x4e\xb6\xd3\x1a\x5a\xb6\xcb\xcf\xb2\xc9\x1c\x31\xd6\x01\x7e\xb9\xbd\x06\x81\x9b\xdb\x11\xca\x2c\x54\xbb\x40\xc2\x4e\x33\xd3\xd1\x4d\xde\x1b\x72\x1b\x38\x12\x57\x74\xf5\xf9\x35\xb6\xa3\x07\xbe\x1b\xe1\x65\x9f\x9e\x29\x30\x1e\xb5\x95\x84\x9b\x57\x06\x8b\x80\x58\xf9\x51\x91\x59\x05\xa7\xcd\xa9\x29\xa9\x90\xc0\x8b\x58\x01\xeb\x34\xc8\xaf\x0c\x65\x78\xf5\x2c\x5c\x1b\x26\xb4\xf8\x0b\x1d\xea\x04\x67\x36\x3e\xd5\x6e\x0e\xcc\x17\x6d\xc8\x13\x4e\x3c\x08\x3e\x8e\xd4\x37\xa5\x69\x6f\x59\x5e\xad\xcc\xad\x5b\x1f\x64\x32\x4b\x73\xfb\x5f\x8f\xcb\x56\x48\x4d\x83\x29\xb9\x07\xf9\x65\x5a\x67\x8c\x42\x55\xd0\x19\x39\x14\xb6\x95\x41\xca\x16\xe3\xaa\xd0\xef\xf7\xcc\x55\x47\xef\x2c\x5e\x71\x71\xa8\x36\xba\x63\x92\xf8\x6f\x3c\xd7\x5c\xf8\x20\x87\x91\x3d\x09\x9e\xdf\xa3\xbb\x83\x91\xa4\x38\x4f\x51\xe7\x29\xe6\xdb\x8e\xa2\x9d\xb3\x73\xd3\x77\x7b\xbc\xba\x42\x3d\x42\xc6\x21\x71\x20\xaf\x1f\xbd\xaa\x38\x1d\x28\x16\x15\x4a\xa7\xc7\xf3\x41\x6c\xa0\x8f\x39\x66\xb2\x3f\x09\x3f\xe0\xa3\xd8\xd9\x79\xe6\x33\x18\x09\xca\xb6\x37\xf8\xfc\x91\x46\x2b\x83\x51\xff\x7a\x24\xeb\x02\xbb\x31\x85\xaf\xc6\x2c\xaf\x8c\xd1\xf9\xf0\x66\x6d\x32\x8f\x9e\x5e\xcc\x31\xad\x73\x9f\x8c\x85\x9f\x53\x52\xe8\x0e\x89\xc0\x0c\x1e\xa8\x92\x66\xd3\x4e\xee\x63\x64\xe5\xaa\x8d\xf0\xad\xcd\x68\xf1\xff\xff\x82\x41\x10\x3a\xa0\xfa\x1e\xc6\x0e\x82\x8a\x14\x2b\x4e\x1d\xa7\x72\xb1\x38\x1d\x5e\x93\xe5\x50\x85\x54\x79\xe7\xa1\x55\x72\x18\xb7\x88\xa8\x38\x83\x88\x3c\x0b\xb3\xb9\x85\x38\x72\xc7\x42\x43\xed\x59\x53\x62\x4a\x9e\xa9\x7b\xc2\xa6\x3e\xfe\x36\x00\x41\x11\x83\x2b\x0b\x62\x18\xc7\xb8\x14\x17\xe8\xde\xed\x65\x6c\x4d\x7c\x45\x24\x59\x66\xfc\x73\x1a\x44\xa5\x49\x35\x75\x99\x5d\xcf\xfa\x10\x5c\x60\x5e\x2a\xc0\x5d\xd4\xa3\x0f\xba\x71\xaa\xce\x40\x0d\x78\xce\x9f\x44\xb8\x1c\x7b\x8c\xc5\xf7\xc9\xc1\xf5\x50\x40\xfc\xaf\xe1\x02\xfb\x82\x5e\x8f\x1d\x83\xbb\xb3\x38\x53\x42\x1b\x46\x0e\x12\x06\x3d\xf4\x58\xba\xa7\x46\x02\x34\xc5\x38\x4f\x7a\x60\x78\x2e\x69\x66\x1d\x80\x49\x6c\x7f\xea\xf8\xeb\xdf\x94\x47\x56\x22\x6f\x7f\xb9\x88\x75\x13\xfb\xbc\x13\xc8\xcd\x3c\xdc\x8b\xf0\xa9\xa2\x95\x9c\x77\x4e\xc1\xd2\x60\xc8\xd9\x01\x45\x43\xe1\x12\xff\x60\x19\xcb\xdb\xe3\xee\x84\x05\xd5\xb1\xa1\xcb\xf1\x27\x63\xee\xe2\x31\xac\xef\x82\xba\x80\xcb\x69\x5f\x0b\x5f\x80\x85\xad\xaa\x15\x8f\xc5\xec\x30\x06\xac\xd0\xc2\xc5\x33\x63\x2e\x1a\xea\xb9\xe3\x43\xa3\xe4\xdd\xf9\xd5\x08\x28\x9f\x0b\x0a\x49\x70\x0b\x1e\xff\xd2\x96\xd0\x78\x0e\x4e\xa4\xf2\x1a\x64\x0a\x7d\x4a\x11\x10\x4d\xf2\xfd\x39\xca\x0f\x7d\xd8\x47\xec\x3b\xc7\x49\x82\x13\x11\xd9\x25\x24\x75\x87\x5d\xb2\xe8\xf3\xbc\xc8\x5b\x6a\xeb\x46\x7e\xa9\x70\xe2\xd4\x7d\x4f\x0a\xe0\x1d\x40\xeb\x4c\x2f\x6a\xa0\x7b\xe7\x2e\x8e\x86\x13\x7b\x79\x56\x0c\xa3\xd7\xd8\x25\xc2\x1f\x39\xdc\x17\xdb\x9e\x39\x8f\x96\xbc\xd9\x30\x92\x9c\x1f\x7e\xd0\xbe\x0b\x28\x0c\x13\x81\x8d\x37\xb7\x20\x32\xb4\xf7\xcf\x90\xa5\x34\xdd\xff\xc4\x4d\x0e\xc3\x8d\x9b\x76\x4d\x6b\xc9\x40\x3c\xd4\x1e\x73\xf8\xfd\x79\xe7\x59\xda\xb7\x04\x5c\xe3\xd3\x47\x91\x81\x8a\xc2\x3b\xe7\x27\xe1\xc8\x5e\x14\xf4\x43\xa1\xe9\x58\xc7\x50\x46\xd4\x83\x7d\x9b\xc1\x85\x99\x4e\xfa\xad\x7e\x95\xa0\x45\xc7\xe4\x24\xaa\xc7\x73\xa6\x06\x37\xaf\x0c\xc9\x9d\x79\x4b\x80\xb9\x01\xa6\x25\x9e\x1f\x87\x2e\x10\x08\xbc\x9b\xed\x62\x1b\x6f\x30\x67\x89\x05\xd3\x8e\x10\xa2\x13\x86\x4c\x12\x43\x84\xa9\xba\x7c\x38\x65\xcf\x9d\x57\xe2\x2f\xf7\x9f\x6f\x47\x6c\x22\x10\xf5\xcc\x66\xd5\xb4\x12\x91\x58\x56\x1f\x4e\x85\xc6\x45\x73\xa0\xc8\x53\x94\xef\x9a\x17\xd0\xc4\x52\x62\xe4\x7c\x8f\xf5\xa5\xc5\xd8\x6c\x22\x94\xab\x2d\xe0\x17\xb5\x12\x9a\xaa\xcd\x58\xc8\x16\x80\x03\xd6\xaf\xeb\x7f\xf9\x17\xd6\xd3\x0c\x39\xd1\x13\x16\x30\xa9\xe4\x92\x97\x91\xb5\x54\xf6\xc0\x7f\x1c\x09\xcd\x32\xc5\x39\xd1\x45\xf3\xfa\xa2\xdc\x61\x4d\x06\x05\xcf\x4f\xe4\x71\x05\x49\x8b\x1d\xfb\x2c\x39\x7f\xf1\x85\x09\x1a\x71\x78\x13\x62\xa9\xb1\xf0\x13\x52\xfc\xdc\x38\xed\xff\x8e\x9e\x5a\x9f\xc0\x86\x68\xab\x0f\x17\xea\x2f\xb3\xe1\xf0\x5d\xc3\x6b\xee\x99\x33\x59\x57\xff\xa3\x79\xfd\xac\xbe\x62\x86\x39\xa5\xbb\x25\x74\xce\x49\x6b\xc8\x93\x7a\x13\x9d\xd6\xfc\xd0\x28\x4c\x30\xc7\x20\xff\xf4\xe2\x31\x98\x32\x55\x42\xed\x04\x04\x5a\x77\xf4\x19\xfb\x64\x0f\x91\xb5\x62\xde\x9d\x5f\x6a\x6c\x05\x06\x70\xe7\x99\x65\xa3\xcb\xe5\xc5\x67\xfe\xf7\x08\x4d\xbc\x39\x31\xf3\x88\xe0\xde\x9c\x40\xb8\xd6\x1f\x5f\x60\xbd\xd4\x1c\x1c\xb8\xc7\xaf\x59\x06\x22\x2a\x86\xa2\x08\x00\x60\xac\x7b\xfd\x64\x4c\x03\x1f\x11\x4b\x7f\xc9\xc3\x05\xcf\x5c\x4a\x03\x06\xa8\x33\x28\xf3\xfb\x36\x25\xd2\xbd\xaf\x9b\x36\xe0\xec\x72\xa5\x2d\x38\x02\xfe\xe2\x35\x38\x92\x64\x61\x18\x9c\xef\xca\x47\xbc\x16\xa0\x37\x3d\x5f\x0d\x49\x87\x53\xb4\x7d\x3d\x04\xa8\x19\x7d\xee\x2c\x95\xc9\x90\xe1\x10\xbb\x5d\x56\x77\x86\x5f\xba\xd0\x29\x8d\x1d\x84\x56\x23\x6f\x26\x8d\x3f\x1a\x1d\x81\x36\x31\x67\x80\xd3\x4a\x91\xfd\x25\x1f\xf5\xc0\x76\xfa\x47\x78\xb0\x0f\x7e\x56\xe4\xef\x4c\x01\x14\xcf\x42\x7f\x25\xce\x4a\x4c\xad\x42\x5d\x30\x9d\x4e\x61\xd2\x48\x5d\x71\x8f\x0d\x25\xa9\x75\xce\x6b\x9c\x7b\xfa\x30\xde\x33\x4a\x74\x77\xb4\xd3\xce\xd2\x4b\x78\x66\x61\x62\x54\xf0\x57\x33\x7b\xf7\x98\x57\x7a\x2d\x65\x18\x74\x18\x0b\x2f\xba\x60\xd8\xa6\xe5\xa9\x5f\xed\x99\x51\xe0\xf8\x14\x59\x5d\xec\x44\xf9\xde\x9a\xbe\xe5\xb4\xf0\xa4\x79\x35\x1c\x42\xa4\xce\x1b\xa7\xc4\x77\x11\x7c\x80\x6e\xf3\x55\x7a\xcc\x4b\xeb\x7b\xe2\xdd\xd7\x50\x9b\xf3\x52\x88\xe5\xec\xd1\xdc\x73\x04\x9f\x53\xfc\xf8\x2b\xf6\xa1\xf4\x72\x37\x65\xc0\x9c\xb9\x57\x0a\xf6\x90\x5c\x85\xde\xda\x91\x03\x9e\x5e\xed\xc7\x28\x0e\x51\x61\x3f\x88\xf7\x34\x2d\x63\xc6\x1b\xad\x24\xde\x96\xc7\x7e\x78\x9f\x0a\x59\x83\xc0\x93\x63\x93\xa1\xf6\x63\xf1\xd4\xd6\x79\x28\xfc\x2b\x28\x15\xa9\x78\x82\xd9\x6f\x34\xfe\xa5\xe0\x34\xfa\x4b\xd2\x0b\x01\x8d\x67\x9c\x9f\x1d\x9f\xec\xad\x9b\xea\x90\x0c\xa4\x1c\x40\xb3\xf7\x4a\x79\xb4\x10\x07\x08\xa4\xa5\xb0\xbf\xa0\xaa\xc4\xbe\x6f\x55\x08\xdb\x32\x17\xb2\x5d\xd2\x49\x24\xa4\x3e\x52\x33\x70\x54\xe9\x6b\xd4\xe0\x1f\x19\x98\xf7\xc3\x7d\xbc\xbf\x7d\x13\xc5\xe3\x6c\x4e\xc0\x3a\xc3\x02\x72\x35\x9d\xb7\x89\xab\xb9\x06\xc4\x73\x6d\x68\xd9\x1a\x3e\x3e\x54\xf3\xaf\xf3\xed\x86\x1b\xf0\x0a\x1b\x07\xa5\xb8\x43\xa9\x8a\x94\x07\x85\x1e\x00\x54\x0c\x15\x39\x68\x60\x18\x0a\x91\x45\x7a\x61\x85\xd5\x49\x05\x6d\xcc\x71\x26\x4c\xcd\x94\x0d\x5d\xcd\xe5\x42\x32\xd4\x74\x00\x7d\x59\xaf\x98\x0f\x8a\x13\x41\x32\x97\x4a\xa3\xd3\x96\x23\x07\xdb\x72\x48\x20\x3d\x0c\x0d\xce\x03\xe5\x17\x1e\xc4\x93\x39\x60\x20\xfe\xd4\x70\x44\x17\xb7\x25\x13\x99\x89\xde\x42\x9c\x09\xfa\x9d\xd3\xc4\x97\xd7\x72\xb7\x00\x0f\xa1\x2b\x58\x02\x05\x80\x4c\xf9\x08\x6c\x65\x0c\x33\xcb\x80\x7d\x1f\xf1\xa2\xd7\x76\x41\xa6\xca\x3b\x84\x54\xfe\x34\x13\xaf\x46\xac\xb4\x6c\x35\xeb\xe0\x19\x23\x56\xa9\xd5\x1b\xb5\x32\xfe\x15\x44\x09\x54\x2d\x18\xaf\xbc\x2a\xa3\xf4\xd7\xbf\x16\x9a\xfd\x3f\xfc\x06\x90\x7f\x53\x43\x10\x90\x10\x39\xac\x65\x8a\xa3\xed\x3b\x8a\x42\xe5\x48\x4c\xf1\xbe\x11\x88\x92\xeb\xf6\x41\x98\x6b\xdc\x97\x78\x21\xc3\xec\x38\x92\xbd\xab\x39\xff\xde\xee\x9f\xc8\x0e\xd8\x76\x2a\x3c\x40\x8f\x3c\x60\xf2\xa6\x9d\xd7\x1b\x22\x80\x1b\xc3\x5d\xc7\x31\x8d\x0a\x22\xa2\x26\x73\x4f\x0a\x85\x93\x26\xad\x01\x28\xba\x11\x6a\xf8\x94\x0d\x7d\x3f\x1c\x58\xbd\xf0\x80\x4c\x82\x84\xee\x1e\x66\x9f\x26\x75\x58\xad\x79\x1f\x34\x43\xc2\x99\x28\x60\x96\x2f\x8b\xda\x6d\xe0\x1a\xb3\x24\x50\x98\x1c\x40\xbf\x06\xbb\xa3\xb8\x62\x07\xcd\xe1\x29\x29\x1f\x1a\x68\x0e\xf1\x0e\xc7\x4a\x59\x02\x6a\x13\xf3\xdb\xf7\xff\x54\x03\xfc\x51\xed\xc8\x30\x65\x02\xbd\x7c\xc3\x99\x40\x3d\x25\xef\x0f\x02\x34\x17\xfe\xf0\x59\x55\xc6\x7d\xe0\xb4\xc9\xab\x85\x3e\x8f\x85\xf9\xc5\x7d\xd6\x54\xdc\x51\x96\xcb\x87\x1a\x99\x2e\x24\x88\x60\x57\xf3\x89\x99\x95\xa0\x6e\x01\x2f\xc0\x30\x26\x9c\xb2\x73\xda\xb2\x5b\x2c\xd7\x1a\x22\x99\x0b\x28\xc0\x3b\x16\x10\x30\x3b\x21\xcf\x8d\xcc\x76\x7d\x34\x95\xca\xa4\x6a\xde\xc9\xc5\x6e\x31\xdc\xa4\x12\x6d\xfc\xab\x49\x38\xfe\x89\x09\x2d\x67\x67\xe4\x2a\x61\x28\x15\x2e\xc1\x3b\x45\x89\x73\xf6\x66\x95\x54\x3a\x46\xc8\xc7\xfd\x41\x76\x6f\x80\x5c\x5f\xec\xe7\x68\x07\x87\x78\x14\xed\xb1\x2a\x6b\x87\xfc\x7f\xd1\xa5\x7a\x34\xec\x1d\xab\x2d\x83\xa7\x0d\x71\x78\xd7\x9a\xa0\xc6\xf3\x4e\xf0\xe1\x18\x5b\x62\xf0\x1c\x97\xc2\xb1\x20\x62\xa4\x83\xfb\x81\x38\x5f\x24\x76\x70\x44\xde\x90\x73\xf2\xf4\x2b\x7b\x4e\x46\xc4\x5d\xdf\x9f\xfe\x6d\x0f\x3e\x7f\xfc\x1b\xc7\x1b\x5e\x4c\x45\x17\x97\x30\xd7\x36\xe5\x1e\x9e\xf4\x3e\x3b\x2b\x0a\xcc\xaf\x1d\x90\x8a\x7e\x80\x8a\xc9\x9a\xbd\xcc\xa8\xc4\xd3\xc7\x3c\x70\xe9\x72\x01\x58\xc2\x01\x44\x74\xb3\x1e\x1b\x13\x21\xf7\x46\xf5\xda\xb4\x68\x0a\x9a\xad\x39\xb1\xe7\x5e\x82\xd6\x9d\xbc\x26\x72\x1d\x5e\xa4\x79\xb5\xf7\x3b\x09\xb1\x50\xcb\xe4\x7a\xbb\x1c\x51\xa9\xc8\x7d\x02\x3e\x85\x91\xc2\x50\x8f\x1e\x54\x16\xa8\x9c\x3a\xac\xcc\xc1\x9d\x34\xab\xa4\x0c\x6d\x9a\x17\x36\xf5\xdb\xaa\x6f\x8f\x99\x55\x42\x1f\x89\x43\x18\x92\xc5\x07\x1c\xba\x32\xb8\xb5\x3a\xe8\x01\x89\x03\x96\x02\xcb\x3d\x90\x9f\xfc\x70\x7c\x11\xb1\x9a\x73\x51\xf0\x97\x66\x3f\x40\x7f\xe4\xee\xb9\x52\xc8\xa7\xcc\xb6\xe9\x36\xb7\xec\xa5\x54\x7b\xba\x27\xe2\x46\x32\x91\x82\xca\x33\xeb\x34\x0b\xa2\x5e\xca\xc1\xce\x2c\x47\x86\x2c\xa9\x40\xb0\x3f\xb0\xb2\x30\x94\xa1\xcd\x73\x3c\x81\xc8\x28\xb6\xbb\x90\x08\x85\x55\xdb\x8c\x7d\xaf\x46\x7c\x73\x4e\x38\xa2\x8d\x93\x12\x28\xf9\x53\x49\x90\xb1\x3f\xbf\x26\x82\x7f\x6a\xc5\xd1\x7a\x7d\xa1\xd2\xae\xa6\x0e\x90\x23\x99\x30\x88\x8b\xf2\x8f\xb2\xc0\xcf\xb7\xfd\xe3\xff\xb3\x5c\x04\xbf\x73\xdd\x57\xfa\x84\x1c\x9f\xc4\xfe\x7e\x2f\xf5\x95\x56\x21\xf6\x04\xf8\x70\x7c\xc6\x19\xe6\xab\x98\xb5\xb9\xe8\x48\x68\x92\xa1\x3f\x0c\xfa\x29\x85\xbf\xe5\x22\x24\xe7\x28\x5f\xd0\x55\xa0\x62\x6c\xf9\xb0\x23\x26\x0e\x2b\x9f\xaa\x0c\x60\x0e\x6d\x19\x56\x89\x14\x17\x1f\x10\x00\xe0\xc3\xbe\x59\xc5\xc0\x4c\xed\x8a\x58\x5a\x69\xa3\xef\x9f\x5f\x92\x73\xfc\x8d\x7b\x6a\x2c\xac\x0c\xfc\x00\x33\xb8\x9d\x46\x26\xaf\x1a\x26\xf3\x97\x6a\xe4\xa6\x5a\xa8\x4d\xb1\xca\x6a\x60\xfe\x63\x2c\x45\x25\x6c\xc3\x6d\xef\xfb\x0b\x03\xf0\xf7\xc8\xdc\x15\xfa\x99\x0e\x0d\x92\xc1\xcb\x52\x97\x6b\x43\xd1\x9c\xcc\xfc\xe3\x0a\xbc\xc6\x83\x49\xec\x41\x49\x21\x87\x63\x31\xa1\xe5\x09\x1c\xf1\x69\x4a\x03\x79\xe8\x83\xfa\xe6\x25\xdd\x84\x5a\x9b\xfe\x66\xa4\xcd\xf4\x14\x50\x3b\xf9\x40\x0c\xff\x0d\x96\x1b\xcf\x87\x31\x21\x6e\x27\xf3\xc5\xc6\xb7\xb4\xa8\xff\x39\xa5\x71\xc5\xa2\x13\x69\xb7\xc0\xfe\x09\xe3\xee\x95\x6d\x01\xf7\x5f\xe8\x1b\xdd\x88\x6d\x63\x24\x01\x46\x28\xed\x6b\x2a\x2f\xf9\x1e\xd0\x42\x15\xf8\x7a\xd8\x6e\xb9\x70\x44\x4e\x6a\x08\x39\x85\x65\x64\xf0\xea\x24\x9a\x82\x8f\xf8\x40\xfb\x95\xde\xe1\xea\xc9\x11\xad\xff\x2a\x59\x38\xc2\x0f\x77\xe3\x75\x49\xa2\x22\xf4\x12\x4b\x05\x7f\xba\xc1\x79\xc5\xe0\xa5\xb5\x3c\xf8\x2b\xfd\xe3\xdc\xbc\x40\x00\x69\x75\x42\x37\x8a\xa3\x5a\x0b\x79\x4a\xe8\x70\xf8\x68\x81\x32\x5e\x80\x1c\x17\x35\x26\xc4\x32\xd9\xeb\x9c\xd7\x78\xea\xf2\x22\x9b\x61\xf0\xbd\x13\x50\xe0\xc9\x52\x55\x53\x00\xe4\x54\x5b\x80\x39\x79\x9c\x28\x65\xb1\x16\x8a\x44\x83\x27\x60\xe9\x53\xed\xec\x99\x07\x4d\xa0\x58\xda\x6a\x21\xfd\xc6\x70\xbf\x59\x34\x86\xe3\xa1\x49\x49\x8e\x95\x53\x5b\xe5\x32\xae\x5b\x69\x60\x34\x4f\x93\x93\x37\x2c\xc0\x23\x0a\xa3\x2e\x94\xee\x41\x3c\x59\xc1\x6b\xbd\x28\xea\x7c\xbe\x33\x00\xfe\x10\x01\x72\x99\xfa\xe4\xad\x97\x87\x9f\x77\x34\x3b\x0e\x8f\x32\xef\x98\x9d\x9c\x08\xa9\xbc\x6f\xa1\x30\x79\xfb\xa8\x73\x6c\x74\x66\x93\x58\xee\xcb\x8c\x84\xd0\xd4\x51\x33\x8b\xdb\xbf\x09\x74\x8f\x92\xec\xd2\x78\x4f\x8b\x50\xa7\xd0\xb1\xa1\x3b\xf3\x14\xfa\xc9\xe1\x64\xb8\xc5\xb5\x09\xb3\x37\xa1\xa0\x60\x27\x0b\x8b\x37\xed\x6e\x4c\x85\x62\xcd\xdd\x12\x02\x8d\xe6\x35\x0d\x62\x13\xf4\x1e\x57\x26\x8a\x27\x5f\x54\xc6\xb9\x20\xf2\x84\x00\xfc\xb9\xfb\xc5\xe7\x83\x29\x63\x77\xbe\xb9\xa4\xf4\x4a\xd7\xd2\x5d\xed\x98\x99\x64\xbe\x11\xb6\xc6\xb7\xb4\x26\x04\xcd\x49\x4a\x61\x80\x89\xf0\xac\x48\xcd\x92\xef\xac\xb3\xa1\x7b\xe0\xa0\xc1\xca\x8b\xcf\x02\xe6\x31\xe9\x3a\xec\x86\x29\xa5\x82\x47\x2a\x45\xf1\x7a\xf9\x7d\x7b\x5b\x08\x48\xaa\x4d\x3d\xd2\x4b\xf7\xc9\xc6\x37\x0e\xf9\x21\xbd\x1d\x8b\x8e\xe4\x28\x00\xa2\x42\x0b\x5c\xfa\x11\x9b\x51\x33\x99\x9c\x2d\xa3\x02\xeb\x4f\x27\x1b\xb3\xf9\x0c\x7a\x5a\x7c\x8b\xa3\x44\xfa\x1d\x42\xe0\x94\x00\x8b\x5a\x03\xb7\xb4\xd1\xdb\xb7\x0e\xa8\x40\x84\xba\xa8\x27\xb2\x2e\xce\x63\x1c\x7c\x0a\x37\xac\xe3\x25\x15\xee\x81\x5b\x7a\x0a\xca\xc8\xa9\x9c\xd8\x37\xcf\xd6\x2b\x0c\x33\x11\xef\x55\x25\x3a\x7e\xfe\xbc\xf9\x7f\x54\x4b\x65\x54\x8d\xf7\x02\x91\x6d\xd1\x2f\x6e\x56\xbe\x71\x09\x37\x56\x7d\x5e\x1d\x18\xb1\x26\xe1\x5f\xba\xcf\x4c\x10\x7b\x77\x09\xcf\x53\xff\x90\xbf\x43\x80\x93\x2f\xff\x46\xee\x1d\xc7\x82\xc8\xd4\x30\xd4\x15\x9a\xbe\x74\xfd\xba\x0a\xc1\x0c\x16\xff\x9b\xaf\x1f\xff\x86\x1e\x63\x63\xb3\xd9\x74\xb5\x69\x64\x44\x50\x07\x3d\x3e\x83\xb9\xd9\x8c\x39\xa9\xea\xd6\x18\x3a\x52\x1f\x65\xce\x24\x1c\x34\x56\xcc\x51\x63\x70\xa8\x6c\x3f\xcc\x83\x46\xf7\xc1\x35\xec\xf7\x9e\x07\x2a\x34\xa9\xd5\xd8\x1b\x93\x25\x10\x70\x3c\x50\x54\x83\x8d\xcb\xf9\xa9\x5e\x25\x07\x68\x36\x1c\x06\x07\xfc\x8c\x63\x74\xd4\x71\x37\x8a\x77\xf0\xc0\xbc\x06\x56\xd6\x8c\x17\xae\xa2\xb6\x33\x51\xef\x82\xa6\x7b\xbc\x06\x61\x74\x80\x26\x51\x68\xe6\xb1\xdd\x4a\x82\x72\xbc\xbd\xc3\x3b\xc5\x8c\xdf\x9a\x3b\x40\x37\xc4\xc7\x56\xac\x65\x34\x1b\x51\xe5\x6e\x0d\xe9\xe8\xfe\x87\x86\xef\x52\xe6\xe8\xed\x1b\xec\x15\xc6\x51\x95\xa1\x7a\xde\x7e\x48\xa0\xc9\xe0\xd8\x9a\x49\x55\xeb\x8b\x5c\x05\x81\x4d\x9d\x4c\x8c\xf8\xcd\xf3\xee\xd4\xd6\x38\x72\x49\x30\xe6\xd1\xd9\x90\xee\x74\x1f\x3c\x67\x88\xc4\xb6\x0f\x29\x45\xf9\x4f\x59\xf4\x8d\x07\xb2\x0e\x8b\xda\x72\xd5\xd4\xe9\x9a\xcb\x8c\x3c\xb0\x8e\x4e\xc6\x8b\x85\x72\xd0\x08\xbd\x89\x56\x31\x3f\xf3\xcc\x42\x65\x71\x8c\x9d\x1b\xb7\xb6\xc4\x66\x76\x05\xd0\x03\x5f\xbf\xc8\xce\x7f\x3b\x41\x05\x10\xfc\xb7\xe5\x98\xbe\x7b\x40\x0e\xdf\x3f\x8f\x1f\x44\x51\x48\xdb\xbb\xf6\xe4\x9f\x9d\x92\x32\xa4\xe6\x20\x6b\xe2\x29\x58\x7e\x57\x54\xd9\x9d\x1a\x16\x8e\xf0\xc9\xce\xe9\xc0\x62\xd2\xc9\x01\x06\xc8\x87\x05\xed\xc1\x49\x70\x99\x35\xf8\x25\xe2\x81\x50\x03\x66\x2c\x80\x04\xe7\x43\x4d\xb5\x6f\xd3\xb5\xac\x19\xc7\xc4\x38\x40\x46\xfd\x45\x0e\xfb\x67\x10\x18\x27\xe0\x61\xcd\x2f\xa0\x6b\xa0\x41\xea\xd5\xff\x14\x9a\x16\x62\x67\xa4\xb3\x4a\x70\x3b\x05\x6f\xf1\x40\x48\xf1\x9f\xbc\xd6\xb2\x13\x80\x53\xef\x6d\x98\xe3\x8b\xa0\x9f\x70\x2d\x53\x0f\x79\xc7\x09\x27\xbd\x1d\xb5\xb4\xa9\x95\xa2\x3f\x0a\xed\x5b\x6f\x2e\x44\xaf\x0c\x98\x1b\x6e\x3c\xeb\x50\xb1\xed\x32\x91\x38\x0d\xf5\x69\xfb\x41\x6b\xdd\x69\x2c\x93\x4b\x3a\x93\x35\xcf\xf0\x8f\x2c\x49\x98\xef\x0d\x17\xf7\xed\xed\x0d\x49\x4f\x0c\x4b\x81\xd0\xc6\x65\xa6\xd0\xac\x23\x3d\x69\x53\xae\xb7\x42\xd5\x9c\xcc\x33\x21\x57\x7b\xee\xe5\xa4\xd6\x59\x63\x03\x4c\xc8\xd8\x29\x83\x70\xde\x4f\xc5\xe1\x8d\x85\x0c\x0b\x59\x70\x6e\x30\xd2\x1d\xea\x33\xc8\x59\x9a\x9e\x41\x2e\xeb\x95\xeb\x82\x90\xcf\xd7\xfb\xf3\x1d\x85\xb9\x53\x20\x2b\x1d\x99\xae\x58\x23\xf4\x34\xe3\x11\xfb\x3e\x09\xdb\xcd\xa8\xb3\x5d\x5e\x2c\xd9\xb6\xdb\x34\xb9\xd9\xe2\x14\x46\x4b\x79\xe5\x87\xce\x4b\x7e\xd2\x32\x2c\xe9\x6a\x07\x94\x51\xe4\x28\x1e\x06\xc5\xf7\x15\xdc\xd6\xc8\x9a\xa2\x89\x86\x2a\x44\x16\x7c\x88\x5e\xaf\x49\x4d\x93\x48\x30\xa2\x16\x41\xfb\x33\x33\xcc\xc0\x89\xb4\x21\xd2\x6d\xab\x5f\xac\x1c\x35\xe8\x78\xfd\x56\xd9\xf4\x1e\xbb\xee\x35\xde\x69\x11\x15\x98\x7b\x61\x88\x2a\x9d\x06\x2e\x4f\x48\x18\xf8\xa8\x5f\xca\xd8\x33\xef\x4f\xf8\x29\xc0\x32\xfe\x62\x79\x31\xc6\x0c\x39\x7a\xf8\x31\x05\x05\x73\x87\xdd\xf6\xae\x65\x66\xe0\x23\x81\xee\x68\xf3\x67\xa2\x6a\x20\xff\xbb\xcf\x18\xea\x59\x1c\x01\x90\x0a\x92\xb9\x3c\x44\xab\x34\x38\x20\x76\xd1\x66\x2b\x3c\x61\xf3\x56\x55\x1c\x30\x6c\xea\x9c\xd4\xb3\xa9\x9a\xbe\xd7\xfd\x74\xce\xe3\xc4\xd4\x8d\xeb\x54\x26\xde\x92\x50\x7b\x7e\x68\xf3\x09\x14\x35\xde\xe7\x4b\xea\xd1\x9c\x47\x91\xe7\xfb\x79\x2c\x6d\x11\x8e\x3d\x8f\xcc\x8a\xfa\xe0\x4f\x46\xf8\x4e\x3f\x8a\xbc\x9a\x7f\xd1\x38\x3c\x38\xee\x2d\x5f\x4c\x5c\x81\x0a\xce\x1a\x33\xb8\x7b\xbe\x90\x66\xbb\x61\x37\x8b\x17\x52\xcc\xcc\xb0\x4c\x38\x31\x16\x8b\xa1\x0c\x84\xb4\xa0\x18\x83\xae\xb1\x3b\x04\x0c\x8f\xa8\xf1\x2d\x44\x44\x28\x3a\x77\x8b\x2e\x3a\x20\x8c\x81\xdf\x68\x61\x82\x48\xe5\x39\x1c\x09\x15\x84\xdc\x25\xf1\x52\xa0\xb0\xe2\xcf\xbd\xfd\xb0\xaf\x72\x1f\x7c\x4f\x3f\x8e\xa5\xe0\xe2\x55\x2c\x22\x49\xa4\x48\xdd\x03\x64\xd7\x34\x28\xa9\xd4\x67\xde\x4d\x00\x9e\x38\x33\xba\x28\x27\xc8\xfc\xb3\x78\x47\x2d\xac\x84\x45\x8a\xfc\xa7\xc7\x95\xfa\x2e\xa9\xde\x95\x46\xb1\xd3\x45\x1d\xe8\x98\xa7\x96\xd5\x85\x1d\xa7\x1f\xa8\x55\x5a\x7c\x53\x08\xda\x58\x04\xfe\x40\xd0\xe7\xfb\xc3\x87\x64\x55\x42\x8d\x93\x3e\xe7\xb4\x6f\x75\x16\xdd\xeb\xaa\xca\x6f\x7a\x30\xc9\x69\x67\x13\x1a\x33\x0b\x98\xe1\x7e\x3c\x75\x74\x49\x32\xec\x2d\xc4\x7b\xda\x37\xf3\x50\x2a\xb4\x29\xd6\x60\x59\xbc\x9a\x36\x8c\x64\xce\xdb\xb2\x9a\x6f\x37\xcf\x04\x54\x6c\x6e\x13\xd6\x07\xf7\xe1\xe4\x2a\x33\x73\x3e\xb5\x23\x9a\xfc\x57\x7d\xde\x0a\x8f\xe8\xe6\x94\xb7\xd8\xec\xf5\xc2\xd1\xd1\x76\x10\x35\x9b\x21\x5f\xe8\xc5\xc5\x45\xb3\x1a\xb6\x45\xf8\xd2\xa5\x55\xf4\x71\xc9\x1a\x0d\x34\xe6\xe2\x6a\x30\x48\xbd\xfe\xa5\x1b\xc4\xc9\xe9\x9b\xfb\x37\xd7\xd0\xab\x18\x17\xc2\x87\xc1\x51\x82\x03\xed\xdc\x1d\xa4\xed\x76\x39\x11\x0e\x4b\xb8\xaf\xf7\x6f\xff\xae\x50\x51\x7e\xbc\x1a\x8c\x38\x89\xcb\x8d\xb4\x5a\x03\x1a\xa6\xfc\x06\x50\x83\x33\x8f\x6e\xb2\x79\xae\x44\xca\x86\xea\xbf\x79\xb5\xbe\x8f\xa5\x8d\xbc\xf7\xe2\x05\xb7\x3c\x10\x88\xdd\x84\xa1\x63\xa7\xad\x9d\xdc\xd2\x9d\x70\x1a\x5e\x61\x9b\x45\xa1\x33\xb3\xe2\xcb\xa6\x2d\x63\x06\xdc\x11\x6e\x1a\x8d\xb2\x5f\x8f\x91\x7a\xd6\xb1\xd7\x75\xbe\xb9\x0d\x22\x6c\x51\x90\xdf\x51\x6a\xc2\x93\xa2\x60\x58\xbe\xd6\x13\x3b\x4e\x1a\x4d\x2a\xf5\xb1\x1f\xd8\x49\x37\x6e\xe4\x7b\x00\x07\x17\xf2\xaa\x51\x33\x85\x1b\x26\xee\xba\x7e\xbb\x69\x6e\x52\x21\x7b\xae\x3d\xf6\xc4\xee\x68\x9f\xfd\x3c\x01\x62\xc3\x62\x92\x4f\x7e\xe5\x91\xba\x58\x80\x47\x0f\x78\xaa\x99\x21\x8a\x37\xb5\x51\x5f\x3d\x65\x88\xd6\x04\xf8\xf0\xac\x96\xfe\xb2\xd1\x7d\xb9\xc4\x42\x7f\x95\x5b\x67\x77\xac\x90\xbd\x23\x21\x82\xab\xb5\x5c\x97\x19\x95\x65\xbc\x0d\xed\xb5\x77\x3d\xb2\x89\x08\x85\x0f\x76\xea\xf4\xda\xe9\x73\x8e\x9a\x7a\x92\xda\x32\x8d\xe4\xc4\x53\x7d\x38\xbe\x7d\x21\x77\xe1\x59\x1f\x9e\xdf\xc8\xe6\x45\xef\x0f\x58\x90\xaf\x73\xba\xf9\xb5\x52\x55\x10\x56\x16\x58\x26\x3e\xf0\xbe\x04\x24\x7e\xbf\x3e\x3f\x35\xae\x1c\x5c\x39\xd8\x6a\xe7\xc1\xd6\x70\x53\xd3\x06\x2c\x43\x3a\xcd\x83\x6c\x1d\xd8\xc4\xb8\x65\x16\x05\xab\xa6\x52\x86\x78\x74\x66\x72\x35\xa2\xc5\x79\xd6\x98\xac\x18\x25\xc3\x8d\xda\x59\xca\x90\x8b\xd9\xe7\x34\x96\x30\x6d\x64\x7f\x2e\xa8\xba\xde\x21\x2e\xca\x0e\xd9\x7e\x0a\x30\xb7\x14\x0d\x6a\xfe\x76\x14\x9f\x86\x3b\x9c\x50\xf7\xed\xc8\xe4\xd5\xef\xd7\xbb\x46\xde\x34\x91\x32\xd8\xa4\x85\x30\x4f\xaf\x95\x90\xca\x15\xac\xf0\xd7\x44\x31\xd9\x56\x72\xc7\x08\xc8\x1b\x07\x33\x4d\xdf\x90\x8b\x42\xe2\xea\x4d\xc7\xb4\x4e\xce\xf9\xa5\x82\xc9\x83\xc6\x15\xad\x2a\xd8\xaf\xc0\x96\x98\x73\xb8\xc4\x33\xdb\x85\x4d\x42\x3e\xf8\x03\xf5\x74\x02\x55\xdd\x74\x9f\x03\x69\xea\xe3\x20\x12\xb1\xd5\x7c\x3e\x06\xb1\xea\x45\x61\xd0\x56\x9a\x27\xdc\x7f\x25\xd1\x4a\x57\xf4\xdd\x18\x54\x51\x77\xe6\x21\x82\xc1\x2d\x0a\x68\xd7\x7d\x6b\x2b\x4e\x85\x2f\x39\xc5\x34\x9e\xd1\x75\xcc\xfa\x32\x6f\xc3\x79\x58\x88\x78\xd8\xcc\x79\x23\x41\x80\x79\x6c\x0d\x16\xf5\xc0\x66\x8f\xc3\x1b\x3a\x6a\x1c\x1c\xac\x83\x1e\x9e\xeb\xbd\x65\x0b\xcb\xc2\x03\x15\xc4\x1f\xe6\x52\xc6\x8a\xa3\x9a\x0f\xbf\x7f\x70\xeb\xde\x75\x12\x58\xac\xb7\x73\x42\xa8\x83\x63\x18\x45\x34\xee\xc5\x4c\xa2\xfc\xcb\x6e\x7b\xee\x31\x11\x56\xec\x98\x44\xa0\xae\x93\x85\x8a\x78\x44\x02\xd7\x40\xb7\xdd\x2d\x2f\xd9\xa2\xc3\x8d\xf3\x60\xb6\xe5\x35\x7f\xab\xb1\xc0\x85\x40\x17\x67\x0e\x25\xf0\xce\xb3\x41\xa3\xb2\xf9\x6a\x43\xce\xb9\x64\xe7\x45\xae\x6e\xce\x48\x1e\x68\x7a\x23\x66\x65\x16\x1c\xb2\xf8\x14\x31\x73\x53\xcd\x69\xde\xe2\x7f\x5b\xe9\x96\x9f\x4e\x1a\xd4\x1b\x06\x71\xe3\x79\xcf\xd8\x46\xb9\xa8\x6b\x65\xd3\x3c\xcf\xc0\x74\xa4\x8f\x9a\x44\x25\xc6\x79\x0e\x68\x67\x92\x0f\x67\xc3\xf8\xb6\xd4\xa9\x49\xca\x45\x05\xbe\x7b\x0d\xa5\xdb\xea\xa9\x77\xa6\xd8\x7c\xdc\x2a\x6b\xdc\x00\xf4\xf9\x1b\x3f\x3c\xc8\x83\x6e\x7e\x0c\xee\x38\x23\xca\xdc\x27\x1d\xd7\x4d\x95\x6b\xf7\x57\xa8\xc2\xdf\xd3\xfc\xfd\x75\x81\xb3\xe8\xa0\x25\x16\x0f\x86\x6e\x81\xbc\x6c\xde\x89\xe8\x32\xb9\x25\x55\x2d\x21\x60\x15\xcc\x4a\x2f\xe8\x78\xd2\xa4\x23\x4f\x58\x9f\xf7\x50\x1e\xd2\x96\x3b\xf9\x93\x90\x75\xed\xc4\x05\xf4\xca\x8f\x05\xa4\xee\xa3\x1c\x0e\xf0\xe4\xed\xb8\x73\x2a\x60\x71\x4c\x2a\x32\x8e\x51\x53\x7a\xc7\x3b\xe1\xab\x40\x17\xe2\xc1\x2c\x7a\xbc\xa5\x2d\xa5\xda\xec\xcc\xd7\xd8\xf8\xef\x6f\xf4\x0d\x7c\xfc\x52\x65\x9a\x67\x32\x15\x5a\x25\x31\x87\xf8\xfc\x1d\xa6\xc4\x4e\xa5\xc1\xff\xe4\x2a\xf2\xc5\x42\x4a\x45\x64\x9a\x6c\x54\xf3\xf5\x48\xc2\x4c\x32\x45\x16\x27\x45\x29\x9b\xf6\xac\x17\x12\xc9\x43\x68\x7b\x1c\xb9\x3a\xbf\xbf\x85\x25\x3c\xd4\x8d\x3a\x7b\x1c\x93\x26\x9e\xc1\x17\xe1\xb8\x85\xa2\x9b\x32\xd9\x37\x91\x7b\x0f\xb9\x9b\x42\xd1\x46\x15\x33\xb5\x67\x08\xed\xc5\xf9\xa8\x58\x7a\x06\x15\xf1\x42\x6d\xc4\xa8\x6f\xb4\x0f\x7f\xaa\xa3\x27\x8f\x54\xb7\x8e\xe3\xd1\x24\x58\x65\x94\x8e\x1c\x06\xc9\x12\xdf\xab\x2e\x0c\x60\x0b\xcc\x28\xa2\x0b\x09\x20\xb1\x1e\x5b\x2c\x8c\xbd\xa7\xb2\xb4\xab\xf7\xe0\x90\x13\xcd\xe4\x81\x7d\xa7\x28\x0b\x12\xf1\x38\xce\x7e\xf4\x20\x3d\xa3\x5a\xf9\x36\xf3\xeb\x09\x53\x8e\x27\x96\x7c\xcd\xd8\xb5\x98\x05\x84\x47\x51\xb4\x07\x9d\xda\xc1\x0a\x35\xaa\xa4\xcf\x14\x94\x48\xb3\x0e\xb8\xf4\x03\x41\x6a\xa6\xa6\x50\xf8\x38\x32\x19\x1d\xea\xd7\xac\xff\xb0\xd4\xa1\xcd\x32\x82\xdc\xcd\xaf\x0f\xff\xae\xe4\xbd\x6a\xa2\x97\xcf\xbe\xa7\x3c\xbc\xba\x2d\xfc\xc4\xf8\x90\xe3\x8a\x60\x8e\x95\xdb\x5f\x7a\x02\xc0\x47\x0b\xed\xa2\x0c\x83\x3b\xad\x1a\xe9\xa3\x99\x6a\xb9\x5e\xcb\xa4\x34\x27\x1c\xc1\xdd\x7b\x64\xcf\x2c\x58\x48\xea\xbe\xf7\xa1\xa8\x13\x1b\xcd\x1b\x1f\x06\x0b\x5f\xb5\x31\x97\x98\xc4\x2c\x8d\x29\x2a\xc0\x5c\xea\x5c\x12\xb5\xbd\xf9\xc3\x24\x40\xf7\x86\x74\xfc\x15\xc6\x55\xa1\xee\x11\x2f\x3e\x59\xa6\xa2\x9d\x62\xcf\x53\x94\x8c\xdf\xc2\xa6\x1e\xf2\x9b\x85\x9e\x2c\x45\x1f\xa8\xab\x0f\xcc\x64\xf8\x98\x62\x8d\x2a\xae\x53\x60\xf0\x5a\x97\xf1\xa1\xde\xa1\xbc\x33\x46\x77\x16\x07\x9e\x3b\xb4\x57\xc2\xf1\x79\x64\x51\x26\x3e\x5c\x51\x45\x70\xe1\xaa\x83\x99\x19\xfe\x7d\x4e\x48\x7c\xae\xae\x21\x99\x2d\xf3\x2c\xd4\x85\xe9\xa2\xd5\x39\xc8\xd8\x4f\x36\x62\x04\x11\x6a\xcd\x0e\x9f\x15\x52\x72\x16\x60\x1d\x54\xa9\xb4\x89\x03\xdb\x2a\x9c\x4c\x93\xce\x94\x7d\x1a\xb3\x99\x79\xdc\x59\x75\x56\x2f\xc0\xd9\xc7\xb1\x31\x2f\x76\x4f\x63\xf9\x89\x27\xbc\x7d\x95\xb0\x1e\xbf\x7e\x22\xae\x0a\xe4\xd7\xa4\xec\xed\x1d\xa2\xf5\xc6\xd9\xd7\x47\xba\xcd\x47\x98\x45\x14\xab\xd9\x9e\xf8\x1a\xd8\xee\x61\x10\xe7\x05\x96\x81\x79\x85\xf3\x8c\x59\x0d\x2f\x06\x21\xa4\xb7\x90\xb8\xcb\x54\x37\x99\x3a\xd6\x7c\x16\xe2\x75\x3f\xbe\x99\x6c\x7e\xcd\x25\x01\x8b\x2b\xb3\xa9\x4b\x21\x0b\x75\x49\x2d\xf5\x07\xb8\x63\x1d\xd9\x16\xb5\x59\x8b\x73\x6b\x91\x3e\xd3\xbc\xaf\x9d\x18\x0a\x0b\xb9\xba\x02\x97\xcb\x24\x73\x6d\x85\x3f\x6d\xc9\x11\xa5\x94\x11\x3a\xec\xc1\x8d\x22\x4c\xe4\x89\x5e\x37\x66\x68\xa3\x90\x3b\x21\x13\x81\x44\xc1\x2e\x4e\xd3\xb1\xb0\xdc\xd5\x3e\x6b\x9a\xf0\xcf\xe1\xd1\x08\x72\xa4\x21\x49\x71\xb2\x7b\x68\x8a\x38\x96\x7e\x25\x73\x8f\x7c\xdf\x8e\xef\x2f\x76\xca\x5a\x73\xa1\x51\x4f\x82\x1e\x32\xc3\x45\xab\xfb\xb4\xd8\xfa\xcc\x73\x05\xc2\x21\x38\x96\x2c\x3b\xe0\x91\x43\x6d\x5a\xa2\xb3\x1d\x01\x4a\x8a\x48\x0d\x30\xc4\x7a\x0c\x65\x50\xb7\x89\xd7\xdf\x00\x48\xe6\xc6\x4d\xe1\x68\x16\xb9\xa9\xf9\x53\x6c\x36\xa6\x5c\x3d\xce\x45\x8f\xb3\xbd\x89\x80\xe7\x18\xa0\xb2\x52\xb6\x54\x36\x31\x07\xd6\xd6\x7a\x62\x88\xcd\xc8\x6a\x5e\x18\x54\x3a\xc7\x35\x24\xf2\xfb\xeb\x9b\x8e\xbb\xa0\x8c\xbe\xc5\x0a\xbf\x1c\xab\x67\x2f\x07\x3e\xc1\xb7\xf1\x20\xb7\xdc\x82\xa2\x6b\xe0\x0c\xc2\x39\x36\x99\xf3\x6f\x54\x9f\x73\xc2\x5a\x21\x0e\x9c\x04\x0c\x92\x61\x87\x74\xdb\x01\x21\xef\x9c\x8a\x06\x1b\x6e\x7e\x73\x26\x92\xc0\xd6\x03\x0b\x82\x24\x2f\x64\xa1\x77\x40\x3f\x9e\xb2\x78\x81\x01\x60\x9b\x56\xf5\xf2\xb9\x7e\x13\x96\xc9\xd4\x14\x9d\xda\x19\xdd\x62\x22\xbe\x1e\x92\x26\x60\x5e\x35\xec\x96\x38\xd6\x32\xdc\xaa\x4c\xfc\x89\x05\x6f\xd2\xdc\xf2\x8e\x7e\xe4\xe0\xc3\x86\x88\xf0\xcc\x85\xe1\x38\xfa\x78\x40\xfe\x9d\x37\xe1\xad\x0c\xff\x62\xeb\x63\x73\x2b\x06\xe2\x0c\xc5\xf3\xa5\xd0\x08\xe4\x55\x84\xbb\x2a\x83\x0b\x23\x46\xa9\x82\x13\xfb\xfb\xfd\xa3\x09\x93\x47\x22\x48\x39\xbd\x39\x15\x08\x7f\x61\x66\xbb\xa4\x9a\x5c\xf0\x0e\xea\xd7\x0c\xb4\x78\xa7\xe1\x9f\x87\xc4\x49\x32\x2f\x22\x6e\x74\x21\xe2\x75\x73\xe9\x28\x01\xcf\x07\x1d\xac\xcd\x1b\x5b\x39\xc9\xd6\xd1\xc5\x5a\x8e\xe8\xe4\x46\x6b\x94\x20\x11\x03\x77\x76\x86\x56\x9a\xbb\x67\x1b\x96\xfb\x37\x74\x51\xa3\x39\xb1\x57\xd0\xbb\x43\x03\x11\x0f\xc3\x89\x69\xa6\x54\xb4\x3f\x6e\xed\xb4\x2d\xde\x9b\x08\xb5\xda\x79\x98\x44\x02\x63\xcf\xa6\xdf\xb0\xe1\xe0\xd6\xf6\x57\xf8\xe4\x07\x52\xce\x04\x60\xc0\x9c\x4c\x34\xd4\x82\xe5\x3b\x88\xc6\x65\xa8\xc5\xd9\xee\xcc\x9d\x61\x2e\xde\xa4\x00\x0b\x81\x94\x8e\x2a\xd0\xe2\x03\xd9\x47\x21\xe3\x2c\xb0\x92\x8f\xdc\x66\x44\x2f\xde\x9a\x2a\x67\xa9\x2d\x90\x3f\xc0\x06\xe0\x41\x16\x38\xa4\xcf\x9e\xce\x1a\x6d\xde\xa5\x06\xae\xad\x3a\x3c\xb7\x1b\x29\x15\xf2\xb5\xb4\xff\x35\x5e\x9c\x8b\xc4\x0e\x5c\x4d\x40\x08\xea\x65\x0b\x97\x8a\xa6\xa5\x03\x14\xc8\xac\x7f\x48\xb8\xfb\xbc\x3b\xe6\x01\xc4\x74\x61\xfe\xd6\x8b\x0f\x72\xbb\x30\x34\xdc\x55\x3a\xe9\x73\x67\xea\x4b\x94\x99\x48\x40\x56\x68\x14\x68\x81\x07\x8a\x5d\x07\xae\xcc\xea\x7f\xca\xa8\xfb\xaf\xaa\x00\x4d\x60\x77\x6f\x61\xc6\xb7\x3f\x8c\x0a\x69\xc8\x8e\x8d\xf2\xff\x35\xd5\x0b\x6b\x3f\xb5\x08\x42\x4c\x62\xb6\xbe\x0d\x0f\x3f\x09\xaf\xbc\x20\xe6\x61\xa9\x87\x60\xaf\x9d\x38\x6d\x92\x64\xd6\x6a\x33\x59\x44\x5c\x80\x01\xc0\x6f\x7a\xce\x9b\xce\xb6\xbf\xa7\xb1\x94\x98\x3d\x0e\xe5\x36\x80\x7f\x00\x2e\x7e\xf4\x11\xb7\x84\x0a\x39\xd4\xb7\x4e\x6c\x94\xdd\xd6\x3d\x4b\x7a\xa7\x2c\x89\x55\xc0\xed\x69\xe0\x8d\xae\xdb\xa8\xc4\x9a\x0b\xb1\x9c\xe0\xf3\xfe\x4a\x5a\xc7\x61\xe1\xfc\x74\x54\x13\xe5\xf1\xd3\xa7\x42\x96\x02\x7a\xc3\x33\xc2\x2f\x12\xc7\xe9\x99\xf9\x9c\xed\xc9\xc4\x47\xdb\x23\x33\x31\x87\x61\x7c\x2e\xf9\x98\xa7\x01\x8d\xe9\x4a\xb5\xd1\x04\x48\x75\x2a\x9b\x6b\xaf\xf9\x78\x26\xb2\x85\xb6\xb5\x2b\x61\xd7\xb2\x4f\xc2\x09\xba\x8a\xe3\x38\x09\xdb\x6d\xaa\x46\xe7\x4b\x11\x67\x4a\x16\x78\x2a\x94\xea\xa0\x9d\xec\xd2\x33\x1b\x7c\x3e\x2e\x74\xa3\x05\xc2\x93\x26\x50\xe6\xf6\xa1\x31\x8b\x18\x72\x36\x88\xa5\x34\x4d\x85\xdf\xc7\xb5\xbe\x10\xfc\x26\x2c\x70\x38\xc4\xa1\xe5\xbd\xd4\xe2\x54\x64\x15\xea\x12\xd8\xab\xee\x6e\xbe\xba\xc8\x46\x76\x33\x11\xc4\x02\x39\x4a\x77\xc8\x3e\x32\x0e\x34\x9d\x30\xc3\xd0\xc0\xd7\xfc\x08\x82\x05\x39\xac\xa6\x62\xbe\xf1\xcc\xea\xaf\x73\xb9\xd2\xc3\x67\xdf\x0d\x9d\x39\x48\xa1\x66\x99\xba\xa5\xfd\x55\x4b\x10\x24\xeb\x61\x1f\x2a\xfa\xfd\x57\x81\xeb\xfd\xdd\xb9\x9e\x2b\xd4\x95\xb7\x07\x1e\xf8\x1c\x05\x56\x9a\x47\x95\x9c\x9d\xa0\x5c\x3e\x9b\x37\xb5\xd4\x64\x9b\xa6\x61\x5b\x7d\x49\xe8\x4c\x0f\x9a\x74\x36\xf7\x09\xdd\x62\xd5\x1f\x9e\xdc\xdf\x18\x88\x79\xda\xe4\xcc\x88\x82\xfd\x30\x6c\xdc\x8d\x3e\x47\x00\x6e\xf6\x10\x6e\xa2\x0d\xcb\x32\x40\x68\x4e\x62\x06\x29\x5b\xe0\x71\xa0\x04\xfa\x46\xc2\x43\xf3\x96\x19\x61\x2e\x9d\x7d\x47\x92\x79\x4d\xc5\x40\x81\x7b\x59\x58\x79\x87\xb2\x92\x94\xad\x7e\xc4\xfa\xa1\x4c\x77\xa1\x28\x6f\xc2\xc8\x5e\x24\x9f\xff\x47\x78\x88\xc5\xbb\x9c\x86\x71\x0a\x57\xef\x23\x8a\xa0\xae\x72\xb4\x1c\x98\x32\x3a\x13\xd7\x86\x3c\x66\xa7\x65\xa7\x8e\xf6\x11\x22\xe6\xaa\xc1\xd8\x7a\xa1\x58\xd3\xdf\x7d\x4a\x18\x51\x85\x6e\x6e\x13\x71\xc6\x3a\x47\x85\xc2\x0e\x9c\xc2\xfb\xc8\x71\x5a\xca\xae\x62\xc9\xec\x22\x68\xa5\x4c\x39\x2f\x91\x31\x85\xc9\x97\xe7\x16\x7e\x3f\x0f\xdf\xe8\xf0\x0d\x4d\xbf\x89\x2a\x87\xa1\x29\x30\x60\xae\x34\x1b\x2a\xdd\x45\x5d\xc2\x1e\x95\xf5\x4c\xfd\x3b\x69\x98\xd8\xdf\x5b\x8e\x23\x9c\xd2\xc8\x6e\xa9\xfa\x95\xae\xeb\xe3\x56\x78\xac\x9d\x6c\x89\xe7\xb2\xff\x8b\x32\xfb\xdd\x0e\x29\x8b\x35\x6e\xdf\xf6\x0d\x99\x9d\x4b\x54\x92\xd4\x84\xbb\x0c\xa9\xbb\x75\x3f\xf1\xfd\x76\x76\xc3\x9b\x5c\xd7\xd6\x00\x04\xa6\xe9\xaa\x5e\x95\x5f\x95\x33\xd5\x36\x8d\xb8\x44\x3d\xd1\xa5\x29\xbf\x8e\xd3\x39\x55\xaf\x9c\x5b\x85\x36\xf1\xa7\xc4\xfa\xa2\x5a\xf5\x16\x12\x7d\x70\x53\xbb\x83\xda\x75\xfb\x50\x68\x0a\xc7\x62\xf0\x87\x7d\xe1\xec\xc4\x7f\x7b\x0c\xc2\x54\xd0\x3c\x00\xca\x57\x40\xee\xf9\xa2\xee\x7a\x35\xd1\x9a\x60\xca\xc4\x33\xb5\xfb\x09\x7d\xe1\x51\xc1\xc0\xb4\xc1\xdd\x3f\xda\x96\xd8\xc3\x55\x6c\xe4\xc6\xb3\x6d\x3b\x47\x5c\x86\x19\x7f\x56\xf5\xde\x89\xd2\x64\x84\x56\x97\x91\x74\x01\x53\xdb\xe0\x02\x2c\x0f\xa1\x3d\x3f\xbf\xd1\x95\x50\x79\x22\xe4\x83\xa0\x5e\x9f\x1d\xb0\xbc\x4a\x6d\xff\xed\x4d\x87\x27\x09\x0b\xca\xc2\xed\x7a\xe3\x2a\xba\x10\xb2\x61\xbd\x0c\x81\x6e\xd9\x77\x66\x72\xa8\x54\xcf\xf4\x18\xe7\x9e\x41\xba\x98\xd6\x90\x83\xa3\xce\x0b\xa3\xb8\x7b\x8b\x17\xe8\x6f\xb3\xa9\x52\xce\x99\x9a\x76\x80\x34\x52\xb4\xd9\xe3\x72\x65\x9e\x4e\x9c\xde\x93\xb7\x37\x20\xf5\x33\xc5\x5d\x40\xae\xda\x19\xc0\x68\xcb\x4f\x18\x4b\xeb\xe3\xd1\x05\xfe\xf3\x05\x75\x0b\xd1\x3f\xe8\xda\xfa\x8c\xe2\x0b\x11\x70\x68\x12\x11\x3a\x74\x8b\x6c\x54\xf3\x3e\x07\xc4\x82\xf6\x27\xfe\xaf\x42\x33\xda\xe1\xc8\x2a\x9c\xe7\xe0\x14\x1c\x99\x0c\x9d\x88\x97\x7f\x04\xf3\x55\x69\x9f\x97\x44\xb1\xd8\x42\x1d\x56\xb5\x3f\xcd\xc0\x18\xbc\xff\xf8\x77\x05\x8a\x5b\xf1\x08\xc6\x29\x36\xa4\x71\xda\xc5\x99\x60\x46\xc4\x27\xc5\xe0\xb4\x86\xad\x3f\x11\x8c\x0e\xa6\x64\x19\x63\x3b\x46\x3d\x61\x54\x1b\x8b\xda\xa3\x58\xc9\x46\x69\xdc\xd1\xbc\x33\x35\x08\xed\xe6\x4c\x70\xd2\x92\xaa\x77\x4e\x33\x12\x45\x1e\x2b\x15\xe7\xd0\x2b\x66\xea\x9c\x72\xbc\xfa\x9f\x32\xe6\x1e\x11\xec\xb0\x65\xce\xc0\xa8\x31\xca\xbd\x02\x55\xda\x2d\xcd\xcc\xa2\xec\x7d\xa0\x8f\x84\x2f\x5a\xe8\x9e\x30\x50\x27\xe4\x21\x89\x9e\x93\x1a\xcd\x22\xc9\x70\x43\x8e\x77\xf9\x71\xe2\x47\x7f\xc1\xd7\x57\xa1\x97\x92\xf7\xf9\xb9\xdc\x1f\x33\x94\x27\xda\xe3\x0a\x97\x46\x71\xe3\x9c\x8f\x63\x79\x41\xea\x58\xb7\x3c\x05\xd5\xad\x4f\xed\xf8\x0d\x0f\xcd\x5c\x88\x70\x42\x77\x4b\xf7\xe2\x4f\x3b\xb9\x85\xc1\x31\x7d\x75\x41\x2d\x4b\x81\x81\x79\x0a\xe7\x46\x50\x5f\x8b\x8c\x03\x0d\x7e\x43\x77\xc7\x53\xfb\x4b\x2a\xf5\x08\x81\x78\xbd\xd5\x9b\x59\x66\x27\x47\xa5\x45\xb2\x28\xf3\x2a\x32\x78\x1f\x9e\xa8\x7c\xf0\x5c\xfc\x5d\x56\xb7\xbe\x71\x06\x12\xa9\x46\xbd\x25\x19\x1e\xbc\xd8\xc3\x38\xd9\x7b\x18\x0a\x53\x79\xf6\xf9\x65\x95\x86\x91\xc0\x4f\x08\x65\xa6\x08\x24\x7d\xa0\xd0\xfb\x54\x0f\x94\x8b\xea\xfd\x72\x09\x9a\xec\xb5\xa8\xa1\xdd\xab\x91\x61\xe7\x6f\x39\x83\xda\xd4\x36\xb3\x8f\x28\x76\x51\xfe\x55\x34\xb8\xca\xfd\x32\xa5\x48\x23\x76\x10\x5e\xfd\x17\x60\x8f\xa3\xb7\x00\xbc\xf3\x1c\xb0\x78\xe0\x07\xa2\x72\xb0\x01\x8b\x60\xf6\x61\x40\x2e\x61\x48\xcf\xa1\x9c\x8d\xcc\x5f\xc9\xa1\x7f\x38\x17\x1b\x0a\xde\x64\x20\x26\xd5\x26\x9d\xda\x45\x0c\x58\x7e\x92\x4e\xca\x52\x17\x06\x82\xad\x4c\x88\x58\x2f\xff\x71\x1b\xa8\xd0\x67\xce\x04\xc0\x3e\x38\x25\x06\x45\x63\x66\x66\xd7\x93\x8e\x9a\x52\xd6\x47\x64\x1a\x2a\xc1\x72\x59\x32\x29\x0c\x72\x43\x50\x0f\xb8\x57\xce\x7d\x11\xf6\x39\x15\xcc\xb8\x60\x25\x76\x5c\xe4\xc7\x1e\xd8\x5a\xea\x4a\x18\xc7\xda\xe1\x9e\xe4\x39\x22\x27\xf9\xbb\x1c\x5a\x02\x1b\x50\x9d\xf5\x85\xf3\xfa\x87\x4f\xb3\xe5\x43\x26\xd8\x5e\xa9\x47\x27\xac\xe3\x14\x9a\x02\xfc\x6e\x3c\xab\xcd\xae\x5d\x2f\x4e\xab\x4c\x82\x01\xe7\xa5\x64\xb2\xaa\xd4\xce\x11\xa9\xc4\xdb\x41\xe2\xdd\x68\xd1\x8a\x9e\xde\xde\xfe\xba\x97\x30\x44\xf2\x58\x89\x42\x68\xaf\x19\xae\x75\xcc\xfc\x78\x6f\x69\x62\x30\xda\xda\x6a\xb7\x54\xf3\x77\x62\xce\xf3\xe9\xb4\xf6\x64\x29\xf0\xe6\xed\x95\x0b\xd3\x84\xa5\x30\x4a\x4f\x1d\x01\x33\xc0\x9b\xad\x92\x7d\xc8\x7b\x36\x5d\x84\xf7\xd0\xb0\x01\x5f\x71\x61\xd8\x7c\x58\xc6\xc4\x19\x2b\xf6\x50\xfe\xc4\xd6\x5f\xc9\xe0\x0f\xde\xbc\x3b\xe3\x35\x07\x36\x94\xfe\x6d\x5a\x87\x3e\xc9\xda\x61\xfa\x1c\x7c\x87\xa1\x71\xba\x5f\xa6\x03\xf2\xc0\x7e\xbd\x0b\xea\xbf\xb8\xdc\x7d\x99\xfb\x12\x7d\x35\xec\x90\x48\xe6\xf6\xe3\xc1\x91\x44\xda\x2e\xe3\x55\xf1\x1b\x16\x25\x39\x99\xe3\x7c\x04\x2f\xaf\x61\x3f\x9e\x25\xe7\x8f\x01\xe3\x40\x68\x90\xcc\xee\xfd\xed\x8d\x1d\x00\xf7\x15\x65\x1b\x5b\xb2\xd7\xb8\xf1\x3b\xbb\x45\x0a\xf5\x27\xc9\x00\x62\x14\x51\xdf\x2c\x97\x56\x2d\x00\x48\x5a\x92\xd9\xc7\xe7\xca\x89\x01\xcb\x42\x95\xba\xa5\xdb\x10\xcb\xa4\x97\x7d\x92\xd9\x31\xeb\x59\x26\xd0\x38\xb9\x09\xf9\xd3\xe2\x76\x24\xf0\x67\xb1\x30\x35\x60\xbb\x3d\x19\x8d\x8c\xf9\xf1\x9d\xe5\xe2\xa3\xf2\x99\x24\x3d\x1a\x3e\x04\x53\xf1\xe2\x1c\x47\xf7\x87\xb5\xe3\xf5\x7e\xfc\xf8\x77\xe5\x7d\xcc\x68\x75\xf7\x39\x2f\x90\xbb\x8d\x55\x7a\xef\xa8\x4b\x6c\xe1\xb2\x98\x6c\x55\x47\x4b\xe3\x24\x34\x65\x8c\xa9\x22\xe5\x13\x28\x2d\xe1\x71\xf8\x64\x37\x56\x85\x5d\xff\x30\xf1\x45\xa5\x5c\xc1\xdf\x30\xe1\x81\x77\x99\xca\xc0\xcb\x2d\x7e\xec\xee\x0c\x10\xb2\x79\x82\x69\xf4\xcc\x3c\x4e\xb6\x73\x84\x11\x0c\xb9\xe4\x59\x94\x39\xf6\x66\xa5\x89\xc0\xa7\x42\x08\xa6\xeb\xbd\x02\x78\x40\xe6\x2e\xf3\x5e\xdb\x13\x87\xca\xa8\x7b\x86\xf8\x3c\x55\x63\xa6\xd2\x6d\x3a\xb0\x8c\xe3\xae\x05\x34\x58\x5c\x31\x83\x86\xde\x05\xfd\x6a\xe7\xda\x19\x33\x7b\xdb\x3f\xba\x1f\x64\x2c\xd2\x60\x31\x08\x40\x48\x18\x97\xab\x7e\x77\x74\x54\x39\x9c\xe7\x28\x1e\x0e\x45\xfe\x2a\xe1\x6d\xc3\xab\xe4\x12\xff\xe3\x81\x30\x9b\x6c\xce\x80\x6a\x1a\x0e\x49\x1d\x30\x10\x17\xcc\x2a\x1c\xe4\x5e\x0a\xd5\x1e\x8e\xdf\x98\x7d\x31\xe1\xf4\xed\xd2\x8c\xfd\x4d\x37\x2e\xd3\x63\xa2\xda\x2a\xab\x77\x1d\x80\x79\x77\xca\xba\xd0\xc8\x37\xb1\xc0\x20\x91\x82\x80\x19\x76\x8b\x88\x92\x62\x7f\x13\xcf\x96\x19\xac\xa1\x64\x54\x07\x8b\xf1\x6e\x24\x6a\xf6\x67\xfd\x1b\x0f\x0b\xdd\xc2\xb9\x54\xb1\xcf\xf9\xc1\x0a\xd6\xe8\x10\xdd\xd2\xe7\x32\x6d\x82\xac\x4e\x2f\xf8\xd8\xb4\x40\xdc\x17\x48\x41\x52\x69\x90\xe3\xf1\x0f\x8e\x18\x27\x74\xc6\x39\xaa\x90\x66\x35\xd5\x0c\x19\x27\x95\x80\xfd\xfe\x8d\xc2\xcd\x6c\x42\x28\x34\x1f\x80\x31\xc3\x18\x7b\xd2\xf2\xa0\xbf\x79\x10\x0e\xe8\x0f\x3b\x02\xbf\x2d\x66\xca\xc4\xf3\x77\x2b\x7b\x46\x3c\x5d\xc1\x0a\x7b\xef\xe3\xc3\x52\x19\x29\xa7\x3c\x15\x18\x34\x7d\x59\x7f\xd2\xa4\x81\x35\x1f\xb7\xdb\x95\xb5\x25\xf3\x24\xd4\x79\x00\x2e\x99\xa4\xe0\xd3\xd4\xf3\x8a\x04\xa4\x77\x75\x2c\xa4\x85\x34\xb3\x82\x23\x48\x96\x06\x14\x65\xc3\x28\xcc\xb4\x12\x92\x5e\x14\x18\x99\xb7\xbd\x87\xcb\x15\x4f\x99\x3d\x3a\x51\x7d\x4b\xfd\x98\xf5\xe5\x3f\x2c\x6b\xcb\x06\x1a\x2f\x1a\xd2\xcd\xfa\x57\x77\xc7\x2f\xf0\x35\xb9\x8e\x56\x6e\xf2\x39\x5f\x31\x8d\xe5\x4a\x2c\x92\x07\x82\xf8\x71\x93\x9f\xe0\xfd\xf2\xae\xae\x70\x78\x6e\x08\xe1\xa7\xed\xb3\x45\x25\xc4\x9c\x92\xe4\x1d\x2b\x78\xe8\xf4\xbd\x48\x43\x9a\xee\x56\xaf\xfd\xf8\xb8\xc8\xbb\x5c\x1e\x65\x76\xb1\x73\x50\xc2\x6b\x4f\xfe\x5d\xa9\x46\xc0\xe4\x74\x02\x53\x12\x6b\x5d\x5b\x99\xc7\xea\x25\x78\x9c\x3b\x4d\xbe\xbc\x84\x7e\x00\x85\xf0\x28\x36\xbe\x31\x3d\x09\x83\xbc\x21\xa3\xc9\x6a\x7c\x6b\x97\xc0\x11\xd9\xb6\x73\x70\x58\x50\x77\x38\x05\xd1\xe4\x0c\xe5\xf4\x89\xdb\xda\xce\x74\xad\x11\x8a\xbb\x2a\x21\x64\xc6\xa1\x25\xf0\x2d\xc4\xc3\xb7\x3f\x37\xee\xed\xfb\xed\x03\x99\xa9\xe7\xbe\x10\xf3\x55\x06\x16\xb2\x20\x91\x06\xcd\x69\x03\x8f\x85\xcb\xc6\xf1\xfb\xf7\x0f\xd2\xa7\xc8\xd3\x55\x0c\x94\xa1\x8f\x04\x24\x5a\x66\x4c\xaa\x88\xe5\xb9\xa7\x39\xb4\xf7\x7c\x08\xfb\xe7\xd8\x26\x32\xb2\xf9\xae\x94\x9e\x5a\xd3\x87\xa7\x21\x3f\x90\xb2\xea\x35\x64\x70\x2e\x33\xca\x7c\x56\x27\x0b\xcf\xde\x16\x78\x52\x57\xa0\xc7\xe2\x10\xf6\xcb\x7a\xab\xdb\x1d\xe7\x95\xa3\x31\xcd\x9e\x2c\x0c\x30\x18\xa6\xf3\xc8\xd1\x8c\xbc\xd1\xa8\xf0\xc0\xe9\x38\x1e\x80\x5b\x74\x02\x85\x40\x0f\xbc\x2e\xc4\x06\xee\x00\xc8\x2d\x41\xfd\x43\xa4\x97\x37\xa7\xd7\x80\xbc\x2f\xaf\x3a\xbe\x88\x46\x9c\x48\x4e\xd4\x36\xa5\x91\xde\xe1\x47\x70\x1a\x00\xfc\xf8\x47\x1f\x5e\x8d\xd7\x6d\x9f\x66\xf4\x8d\x22\x09\x5d\xa8\x3c\xcc\x66\x5c\xa4\x5a\x48\xb0\x35\xfb\x3c\xcb\x38\x48\xf4\x57\x62\x8a\x92\x85\x01\x40\x6c\x84\x2e\xa8\xd5\xa9\x31\x9d\x88\x1c\x43\x0b\x5c\x80\x3f\xb3\x9e\xea\xe5\x13\x04\x49\x0e\x24\x5c\x49\x23\x36\x79\xbb\x2d\xb2\x2f\x53\xd3\x7c\x7f\x90\xa9\xbc\x7e\x0d\x34\xa9\x67\xd9\x63\x13\x77\x31\x55\xd4\x25\x5d\xde\xc7\x07\xe3\x10\xcf\xc3\x96\xca\x6e\x1c\x65\x45\x7b\xb5\x55\x9b\x13\x36\x45\x4d\xfb\xb4\xed\xe6\x4a\xc4\x7b\x23\x36\xc5\xbb\x23\xc5\xcb\x19\x60\x34\x79\x4d\x4b\x97\xa8\xad\x2d\x0a\x69\xd9\xc8\xff\x9f\xba\x9e\x27\x04\xb1\x77\x28\xdd\x56\x18\xe3\x31\xdd\xcf\x23\x7a\x1c\x3c\xb3\xc9\xac\x5c\x3d\xde\xec\x5f\xa5\x09\xba\x2e\x8e\xb7\x7b\x73\xad\xa7\x22\xdd\x13\x8c\xc6\x66\x3e\x3f\xfb\x18\xc2\x65\x97\xe3\xf3\x10\x43\x57\xbc\xa6\x07\x24\x7b\x2c\xb9\x6a\xf7\x7f\x20\x90\xb1\xa0\xbe\x4c\x2e\xb6\x3c\x2a\x39\xfd\x70\xaf\x13\xc2\x29\xa9\xd9\x62\x89\x2d\x41\xa0\xef\x4f\x89\x2b\xd9\x59\xe4\x52\xc9\xa8\x74\x14\x8b\x73\x9c\xce\x1c\x3f\x6b\xb7\xbb\xd0\x4d\x5d\x9d\x02\x8e\x0f\xb7\x2d\x92\x42\xc0\x9c\x46\x64\x39\xd2\xe9\xe4\xc1\x63\x55\xe6\x91\x51\xa0\x59\xe7\x7d\x3b\x0a\x5b\x47\xb7\xdd\x4c\xa8\x8a\x2f\x48\x9c\x59\x18\x4e\x62\x65\x9f\x7f\xca\x71\xdf\xe0\x58\xa6\xc9\xc8\x9b\xcf\x3f\xa8\x10\xd8\xd6\x4b\x6a\x89\xa3\x93\xbb\x2f\xf8\x9e\x39\x05\x51\xca\xd4\x6b\x2c\x2c\xd6\x99\x61\x57\x24\x3d\x64\x8d\x62\x74\xe4\x01\x33\x30\x5d\x49\xd2\x2f\xaf\xea\x25\x78\xcb\xc1\x09\xe7\xd0\x59\xc4\x4b\x3e\x91\x95\xce\x5b\x7e\x28\xae\x98\xc3\x30\x96\x20\x29\x9c\x45\x39\xf9\x79\xbb\xfc\x03\x86\x39\x98\xf5\xd9\x5e\x54\xe7\xdc\xb9\x0d\x11\x37\x09\x02\x43\xe9\xfd\xdf\x57\x82\x7a\xfc\xf7\x8f\xeb\xde\xfd\xf3\xe7\xe6\xad\xcc\x91\xb1\xd8\x00\xd3\xab\x5e\x98\x6b\x90\xb6\xe2\x64\xa2\x3a\x4e\x00\x0e\x87\xa4\xc2\x74\x84\x6b\x38\x33\x0f\x5e\xd7\x81\x02\x96\xcc\xac\x00\x89\xd4\x2f\xa7\x27\xba\x04\xc0\x6b\x80\xaa\xcb\xb4\x10\xde\x49\x16\x6e\x7f\x80\x39\xdd\x6c\x90\xdd\x37\x15\x64\xd2\x73\x87\x2f\xa7\xca\xcc\xb1\xd2\x34\x0e\x9e\x8b\xc8\xb6\xba\x9b\x49\x21\x47\x93\x9a\x49\x18\x44\x22\xed\xb8\x2c\x08\xc0\x2f\xc8\x80\x0f\x9f\xbf\x07\x0c\x31\x3d\x14\x24\x89\xb7\xd8\xc9\x29\x43\x82\xab\x12\xb4\x9b\x8c\xca\x03\x73\xa0\xf7\x7f\x1d\x21\xf0\xce\x5d\xa7\x41\xd4\xcf\x08\x15\xe4\x28\xb9\xcc\x29\x13\xa9\x54\x54\xdf\x7f\x58\x8a\xc4\x90\x7a\x9c\x22\x66\xd7\x56\x4e\xdb\x61\x56\xdd\xc4\x7b\x66\xe9\x96\xb8\x40\xe0\x22\xf2\x44\x87\xd3\x01\x6a\x08\x2d\x2c\x56\x68\x15\x2a\x0f\x05\xed\x37\xbb\x40\xd1\x12\x64\xc2\xf6\xfd\x19\xe6\xe3\x91\xb9\xe4\x19\xf3\xc9\x7d\x15\x8c\x83\x7b\x5e\x78\xce\x09\x3a\xb8\x27\x9e\xbe\x0d\x1a\x66\x58\x86\xb5\xc4\xb9\x8c\xa3\x8d\x35\xc7\x24\x7b\xc6\x9d\x09\xf7\xd8\x32\xb7\x86\xb9\x2d\xc0\x14\x9e\x6f\xdf\x77\x7a\x65\xd0\x84\x3e\xbe\xe0\xeb\xe3\x9b\xe8\xe3\x23\xe9\xac\xba\xdb\x79\x7f\xd7\xb8\xe1\x87\x7a\xee\xbe\xeb\x2e\xfd\xf3\xfb\x11\x39\x39\x06\x69\xf9\x47\x7c\xac\xd3\xf3\x97\x67\x6a\x6a\x1f\xa2\x5e\xc0\x17\x53\xf2\xdb\xa9\x4c\x6f\x21\xe5\xf9\xa9\x10\x70\x9d\x80\xf4\x23\x76\x62\x52\x0f\x9d\x77\x14\x89\x29\xcc\x22\x93\xe7\x48\xf4\xc7\xc3\x14\x0a\x59\x25\x3a\x1a\x6c\x35\xb9\xe7\x38\xb6\x83\x7f\xb4\xed\xc4\xfb\x9d\x0d\x30\x8c\xc5\xbd\x26\x44\xf5\x8a\x74\x10\x9c\xac\xf0\xbc\xad\xf7\xd8\x6a\xe0\x10\x87\xf2\xfb\x74\xf7\x3b\x99\x5a\xa6\x8f\xe3\xbf\x33\xce\x47\xe9\xf6\x02\x03\x35\x73\x64\xd3\xde\xb2\x38\x30\xa5\xb1\x0d\x47\x48\xdb\x9e\xc4\x52\x89\x39\x71\xb5\x93\xc8\x70\x60\x93\x1f\x0e\x92\x35\xe6\x21\x75\xb4\x10\xa6\xc4\x07\xa2\xd8\x2e\x61\x91\x8b\x01\x36\x50\xe5\x40\x85\xec\x30\xb9\x68\x38\x6c\xe2\x56\x26\x04\xb9\x4f\xc4\x15\x49\x93\x83\x6e\x48\x12\x21\x7d\x24\x4e\x3c\x10\x18\xd0\x38\xdf\x1f\xdb\x69\x77\x1d\xf9\xb0\xff\x27\xee\xda\x2d\xc6\x7d\xbd\xf9\x6e\x3f\x1e\x9f\x4b\x7d\x42\xc7\x96\xcf\x98\x84\xd3\xf3\xf6\xf5\x85\xd9\x74\xa1\x57\xc4\x6b\x36\xba\x27\xec\xe0\xfd\x5d\x12\xf1\x9a\x9a\x26\xb6\x5e\xf3\x7f\x25\x17\x9a\x1d\xac\x84\x3c\x07\x79\xad\x2a\x17\xac\xf3\x99\x35\x35\xf5\xa3\x1e\x74\x1c\x3c\xbd\x81\x53\x03\xa6\x04\xd6\xec\x25\xea\xb6\x6f\xac\x6f\xce\x40\xdb\xb0\xd4\x69\xce\xd6\xb2\xc8\x61\xda\xc4\xd4\xb6\xf9\xe8\x43\x74\xea\xa6\x71\x16\xc4\xdf\x3e\xf5\xe3\xed\xfb\x1b\x0f\xed\xd5\xc4\x71\x93\xee\x61\xc6\xa3\x3b\x9d\xf6\x25\x79\x73\x4a\xa5\xce\x18\xa5\x67\x09\x59\x32\x9f\x09\x18\x85\x8f\xef\x9f\xdf\xaf\x77\x1e\x90\x77\x45\x26\x1a\x2c\xeb\x39\x7a\x28\x59\x6f\xe8\xc7\x33\xe6\x97\xf5\x0f\xec\x47\x6d\x16\xae\xb4\x5a\x86\x4d\x29\x59\xcc\x55\x3f\x54\xaa\x64\xf1\xd4\xfa\x73\x2f\x46\x9c\x9f\xd3\x63\xe6\x73\x2d\x3f\xe3\xfb\xfb\xe7\x07\x69\x42\xb7\x1e\x05\x03\x5e\x78\xa3\x4c\x7c\x82\x80\x94\x2d\x41\x10\x8d\xa3\xb6\xc9\x0f\x8e\x51\x95\x54\xf8\xe1\xac\x49\xc5\x05\x52\xb9\x14\x1e\xec\x52\x0b\x3b\xe2\x14\x92\xd6\xf2\xe4\x4c\x45\x64\xde\x3b\xc5\x38\x74\x2c\x23\x77\xa7\xfb\xbf\xfb\x17\x78\xe1\xd6\xb0\x72\x02\x9f\xd9\xb9\x33\x13\x71\xab\x7d\x26\xd1\x29\x43\x2d\xae\xb4\x76\xbf\x44\xff\x0f\x87\xf4\x36\xf5\xac\x30\x39\x77\x52\x4f\x1e\x24\xf3\xa7\x76\x72\xc7\xb9\x67\xa5\x91\x48\xa3\x2d\xaa\xce\x44\x30\x07\x3c\xd1\x02\xda\xe9\xe1\x18\x40\xaf\xa4\xc9\x65\x56\x37\x3d\x24\x64\x17\xc4\x88\xd5\x35\x14\xc9\x30\x52\xed\x50\xbd\x8a\x6f\xfe\x39\xff\x76\x3a\x9d\x6c\x25\xa6\x23\xe3\x53\x7b\x94\xdc\xb3\x0f\x2b\x48\x18\x27\x21\xa1\x7e\x89\x27\x51\x81\xb7\x26\x8f\xa4\xbb\xfd\x75\x3c\x42\x25\xf8\x32\x4d\xe6\xae\xa9\x9d\x93\x77\x6d\x71\x06\xd5\xa5\x04\xde\x35\xe6\xb3\xd0\x44\xe5\x8e\x43\x0a\x6a\x0e\xe4\xe8\xf7\x81\x2e\xe5\xa6\x9b\x8e\x02\x0f\xc5\xfb\x2c\x3e\xfe\x24\xc2\xe0\x4b\x83\xee\x26\x54\x4c\x38\x37\x09\x4a\x83\xbe\x96\x97\x4e\x4a\x69\x5f\xfd\x53\xb7\xf0\xef\xf6\x8e\xa0\x2a\x95\x39\xd2\xb7\xbe\xad\x37\x3c\xd5\x5f\x9e\x0f\x9c\xfc\xcf\x8f\xbd\x8b\xa0\x7e\x3a\x9c\x8a\xe9\x64\x7f\x9e\x29\x92\x85\x3e\x61\x3d\xec\xaa\xd6\xd7\x51\x29\x2b\x07\xc9\x2a\x51\x47\x72\x6b\xb0\x2c\xde\x17\x8d\x59\x56\xb7\xd4\xe6\x0d\x7a\xa4\x81\x58\xf7\xe5\xca\x4a\xb1\x45\xcd\x4c\x72\x6e\x25\x25\xff\x3e\xce\xcf\x0f\x3c\x60\xee\xe5\x5d\xdc\x24\x4e\x5e\x4e\x28\x93\x03\x81\x66\xf6\xb5\xdb\xe9\x42\xb6\xca\x67\xf3\x69\x78\x96\x3b\x14\x42\x95\x81\xeb\x53\x94\xf0\x7e\xa2\xa3\x7a\xb1\x26\xf1\x2f\xce\x85\xbb\x77\x03\xcc\x1d\x4a\x03\xcc\xc7\x88\xa8\x87\x92\xa0\x84\x4f\x4b\xb3\x70\x8e\x6d\xf2\xcc\x65\xa5\x69\x12\xcc\x8b\x3c\xdb\x40\x7a\x85\x11\x68\x0d\x8e\x76\x5a\x3a\x66\xf8\xc1\xa3\x03\xc9\x73\x4b\xab\x60\x4c\x60\xea\x0f\x36\x74\x98\xa3\xcb\xa8\x87\x80\x09\xd8\x9f\xb5\x3d\x75\x3d\x23\xb2\xb4\xbc\x14\x82\x4e\xac\xa3\xbe\x89\x25\xf8\xbe\x8f\x9f\x8b\xbb\x78\x5f\x24\x09\xf1\xa2\xb7\xe0\x4e\x76\xf8\xd9\x06\xe3\x89\xbe\x13\xed\xbf\x0f\x5c\xcf\x04\x9f\xf9\x14\xb3\xa4\x0e\xdd\x11\xc2\xcd\x3c\x1f\x8e\xef\x49\x84\xa6\xe8\x0e\x62\xc7\x6d\xd6\x94\x3c\x33\xa9\x0e\xac\x23\xaa\x02\xfa\x2e\xcf\x89\x41\xed\xe0\x23\xee\x78\xac\xaf\x24\x97\x30\xe9\x1f\x44\x61\x66\x2a\x9d\xdc\xd2\x76\x8f\x23\x46\x12\x1d\x79\x58\xbb\x95\x75\xac\xe5\xf8\xdb\x83\x95\xe4\x8a\x64\xcf\x0d\x8e\x7b\x12\x25\x5c\xa8\x1b\x3b\x7b\x5d\xf1\x53\xa9\xd7\x9b\x42\x43\x3e\xb9\xa4\x91\x54\xc2\x19\x15\xb4\x5c\x26\x89\x8f\xe1\x93\xbf\x36\x93\x81\x11\x69\xf1\xc6\x97\x4e\x7e\xd3\x04\x5f\x38\xdd\x93\xaa\xdc\x60\x1e\x15\xe4\x60\xd7\x69\x76\x5d\xad\x12\x0d\x39\xa4\xed\xcf\xe5\x94\xf4\xcb\xfb\x63\x43\xd6\xd1\x44\x45\xe4\x6d\xbd\x80\xc1\x65\xb2\x10\x4e\xaf\x48\xfa\xa2\x93\xc7\xa9\x91\x29\x8d\x17\xf8\x7e\x97\xa3\xf7\x7f\x57\xe8\x21\x9e\x0a\xb5\x61\xd4\x45\x78\x37\x0b\xef\xdf\x33\x0c\x4c\xee\xef\xa7\xc0\x40\xd2\x01\xe5\x00\xf4\x62\xa6\x29\xce\xf8\x02\xa6\xc1\x8a\xb1\xa3\x0a\x69\xc7\xe3\x37\x11\xc3\xaf\x69\x6c\xe6\x0b\x12\x51\x57\x0f\xaf\xd3\xad\x04\x21\x9c\x85\x82\x44\x71\xa6\x5b\x1e\x78\xfa\x8c\xe2\x31\xba\x39\x11\x30\x40\x9d\x3f\x9f\xdb\xf3\xf9\xcf\xd5\xd1\xf3\xee\xdf\xfe\x60\x1d\xcf\x47\x24\xbb\xf6\x88\xec\xe3\x76\xfc\xea\x26\x94\x56\x0b\xd3\x96\xc7\x00\xf4\x89\x6f\x39\xc3\x1d\xf8\x20\x1e\xfc\xe3\x08\xba\x02\x3b\x13\xbc\xfb\xf7\x9f\xf7\xb9\x3d\x5f\x07\x1e\x79\xb6\xf0\x78\xf6\x45\x65\x11\x73\xe9\x98\x94\x66\xcf\xb5\x0e\xbc\x8a\x66\x3e\xd0\x4c\xa0\x15\x73\x9d\xe8\x40\x88\x18\xce\x3c\x6a\x2a\x9f\xd9\xc7\x90\x32\xff\xbc\xb1\x47\xdb\xb7\xc1\xcc\x2a\xce\xe2\x60\xb6\x8b\x2a\x21\x4d\x3a\xc8\x43\x98\x0a\xd3\xcf\x52\x33\x20\xc1\x94\x08\xba\xb1\x76\xac\xc9\xea\x23\x20\x32\xfe\x2f\x0c\x3e\x2f\x0e\xce\x40\x1e\x56\x95\x81\x2c\x98\xb9\x07\xc6\x97\xde\xbc\xd9\xb0\x1d\xcc\x3d\x49\xb8\x0a\xe6\xba\xc5\xe7\x13\x28\x61\x33\xb6\xea\x40\xe8\x8a\xd6\xfd\xf9\x7a\x0f\x75\xb7\xa2\x81\x82\xdd\x9d\xdf\xbf\xfe\xed\x0b\x08\xc8\x7c\xe5\x44\x0f\xef\x04\xdc\x7f\x3c\xff\x05\x4c\x23\x2a\xc8\x82\xfe\x73\x1a\x19\xf2\x02\x30\x53\x31\xbb\x13\xe5\x40\x94\xac\x13\xca\xbb\x54\x0a\xf6\xd0\x7c\x0f\x25\x45\x69\x95\x98\x28\xc7\xda\xd8\x43\xc9\x3d\xa9\x26\xe1\xcc\xe8\x77\x16\x9a\x28\xc4\xb5\xb8\xdf\xc4\x17\x05\xe7\xf7\xfd\x7c\xe3\x50\xdc\xe9\x0c\x2c\xcc\x7e\x62\x3b\xab\x8f\x47\x29\xf4\xdb\x69\xe5\x92\x38\xda\x4b\x13\x33\x86\xc7\x58\x5a\xc0\x1b\x8a\x51\xf6\x04\x3f\x11\x5d\x2d\xb1\xcb\xee\x41\xac\xe4\x68\x79\x88\x51\xc2\x8e\x03\xe7\x40\x98\xfe\x67\x64\xc5\xfe\x63\x08\xc4\x76\x8a\xef\xf7\x3b\xda\x31\x6f\xf7\xf6\x13\x64\x36\xfb\x22\x70\xbb\x3b\x11\xa1\xcc\xea\x9e\x6d\x3d\xfc\xa3\xf6\xc7\x7d\x42\xca\x6e\x7b\xa8\x97\xda\x14\x54\x81\xf2\x91\x5d\xa0\xb4\x90\xd8\xbf\x8a\xd5\x33\xe8\xe2\x74\x4d\x03\xc9\x1d\x9a\x01\x8c\xfa\x70\x31\x7c\x12\x3c\xb2\x31\x1c\xe7\x88\xe7\x1e\x33\x85\xb9\x0d\xf3\x73\xff\x00\xdf\x13\x06\xc3\xd3\x43\x9d\x72\x99\xaf\x52\xd0\x93\xbd\xc6\xe5\xc5\xc1\xde\xb5\x1e\x58\xf0\x98\x3a\x25\x38\xc7\xbd\xbd\x7a\xfb\x5b\xc5\x18\x82\x4e\xd5\xfc\x93\xbf\x80\x12\xf7\x12\x55\xc7\x69\xe2\x5e\xc8\x0b\x2a\xb9\x7d\xb2\xe4\x46\x38\xf7\x90\x2f\xaa\x66\xb4\x91\xd5\xe3\x91\x68\xfe\x67\xce\x9a\xe3\xf2\xf8\x37\x74\xcb\x17\x53\xac\x3e\xc7\x54\xc3\x66\xbd\x39\x14\xc9\xd8\xdb\xd7\xfa\x38\x57\x04\xf2\x68\x40\x0a\xc6\x91\xe3\x18\xdf\x02\x27\x74\x7f\x7c\x38\x8d\x4c\x05\xf2\x0d\xc6\x56\xef\x3f\x6f\xcf\x81\xee\xce\xea\x1e\x10\x9b\xef\x5e\x4d\xcb\x40\xb6\x4d\x9c\x55\x9a\x1c\x1f\x46\x9b\x5f\x26\x46\x81\xce\xff\x22\xad\xa0\xa9\x34\xb3\x33\x24\x72\x12\x42\xbf\x10\x19\xb4\x95\x26\x05\xb3\x48\x1b\x7b\xa9\x5c\x9b\x43\x03\x98\x55\x61\xaf\x9e\x8f\xe8\xa4\x2e\xf0\xfe\x6d\xfc\x6a\x6a\x85\x86\x19\x5e\x0e\xd5\xe8\x67\x4a\xad\x7a\x4b\xd0\x6c\x76\x33\x75\x8c\x6f\x4c\x90\xdd\x2b\xfa\x1b\x76\x6f\x5a\x8e\x88\x0e\x52\x6c\x05\xb9\x98\x70\x06\xaa\x57\xc4\x00\x21\x7b\x3a\x4a\x01\x60\xec\xf0\x5c\x88\x99\xee\x0b\x64\x02\x23\x45\x35\x65\x73\x38\x75\x1b\xb9\x18\x6e\x1e\xe2\xc1\x46\x1d\xd7\xa1\x51\xab\x30\x4b\x29\x5f\x26\x38\xad\xeb\x26\x8f\x64\x44\x6c\x35\x89\x2c\x27\xac\x24\x44\x08\x4f\x70\x15\x93\xc7\xa7\x62\xcb\xa9\x7e\x33\x3c\xe6\x95\x8f\xe6\x28\x81\x3a\x15\x62\xd6\xd3\x10\xca\x9c\x77\x8c\x73\x26\x0c\xe6\x64\xee\xba\x43\x78\x2a\x40\xde\x35\x36\xf8\xf7\x39\xad\x1f\x55\x81\x83\x54\x2f\xa2\x20\x84\x21\x9e\x91\x62\x66\x3d\x9c\x4e\x9a\x14\x99\xbb\x91\x63\x29\x9d\x2d\x6d\x64\xa4\x83\x6e\x66\x61\xdf\xf2\xce\xde\x1c\x9f\x95\x4b\x9c\x84\xd7\xe4\x84\x1b\x6b\x31\xd8\x9c\x4e\x9f\xe2\xa5\x1c\x2e\xca\xb6\x0d\xfb\x69\xfb\xc0\x91\xdf\x08\xcf\x53\xd6\x70\x5b\x9c\x21\x08\xba\xe2\x6a\x87\xbc\x75\xdf\x04\x4d\x14\x15\x01\xf3\x0f\x98\xab\xed\xd1\xb0\xb6\x14\xfe\x93\x53\xcb\x53\x19\xd0\xeb\x89\x47\xae\x52\xb3\xbc\x12\x32\x0e\xd9\xeb\xd9\xde\xda\x16\x1e\x6b\xb1\xc0\xda\xa5\xaf\x9a\x72\xf8\x1b\xeb\x0b\xa9\x28\x16\xe8\x30\x0b\x38\xbc\xed\x9b\xd5\x76\x99\xab\x7a\x89\x1a\xa6\x50\x3b\x15\x29\xfa\xc9\x7c\x5c\xfe\x0c\xb5\xd2\xef\xa7\xf1\xdf\xad\x7a\xcd\x26\xa8\xc8\x9e\xce\x86\xf3\xe8\xa3\xdc\xc8\xfc\x1f\xb7\xeb\xed\xe6\x4b\x10\x6f\x8f\x1d\xef\xe1\x85\x2e\x7f\x57\x0b\xfc\x18\xfb\x5d\xd2\xca\x91\x9a\x97\x21\x84\x8d\xd3\xaa\x86\x32\x75\xee\x26\x34\xba\xbd\x61\xb3\x1e\xa0\x3e\x12\x49\xb6\x0f\x4e\x56\x26\xfe\x4a\x3b\x47\xc2\x23\xdf\xdf\x3f\xaf\x3f\x1c\x10\xd8\xf9\xeb\x1e\xb7\xeb\x7d\x46\x5e\x6e\xea\x6e\xd9\x55\x85\xb3\x95\xb2\x7c\x83\x46\xa7\x8a\x1c\xe3\x2e\x11\x0e\xa0\xbb\x05\x38\x05\x05\x6b\x6e\x4a\x81\xff\x2a\x64\xdc\x5c\x37\xb3\x59\x24\x2d\x13\x53\xb6\x5e\xe2\x81\x43\xba\xe6\x41\x94\x66\x62\x5e\x9f\x2f\xf7\x88\x9a\xd0\x9c\x86\x2c\xd0\x90\xf9\x50\x44\x1b\xed\x83\xff\x07\xe5\xa0\x29\x2f\xa9\xd9\x40\x3c\x02\xa3\x4f\x7e\xcf\x21\x8a\x8d\xe7\xb2\xd7\x2a\x38\xea\x4e\x51\x6c\xc1\xe3\x95\xa4\x3f\x8f\xf5\xeb\xe6\xb7\x89\x19\x8f\x28\xb8\xbe\xa2\x88\xb3\x68\xda\x1d\xfc\xb5\x91\xfb\x62\x29\x3c\xf1\x07\x72\xbd\xa8\x64\x2a\x3c\x40\x21\xcc\xae\x43\x2b\x6e\xc2\xb0\x6e\x3d\x0e\x5c\xd8\x95\x8b\x06\x8d\xa0\x02\x76\xd1\x7a\x7e\x1d\xbe\x7e\x91\xc9\xf8\xfc\x9e\x66\xf0\xa3\x7d\x7e\xfd\xeb\xef\xa8\x84\x8e\xd3\x35\x92\x27\x6d\x1b\xf6\xeb\x82\xaa\xf4\xe9\xdf\xe5\xf6\x39\x56\xa5\xc9\x57\x44\x2e\x7e\xcc\x99\xa1\xb3\xd3\x36\x11\x4a\x26\xc4\x61\x67\xae\x03\x5b\x81\x6b\x5d\x61\x97\xbb\x32\x18\x0a\x25\x54\xe5\x7b\x0a\x05\xf7\x92\x6f\x65\x24\xba\x1a\x75\x56\x8b\x81\x36\x3d\x20\x22\x1e\xa3\x53\x99\x63\x18\x73\x4f\x2d\xe0\x28\x03\x5c\x62\xab\x86\xe0\xc2\x81\x4d\x44\xe4\x92\x4a\xf7\xd9\xfb\xed\x1f\x92\xb1\xde\xc3\x3e\x90\x24\x29\x4f\xa2\xb2\xf6\x7c\x20\x67\x82\xf8\xe0\xbe\x24\xf7\x61\xea\x34\x66\xd7\x87\xa2\x8b\x60\x76\x5d\x81\xfe\x58\x1c\x02\x2d\x62\xd9\x3b\x7b\x8d\xc8\xa2\xc8\xf8\xca\xdf\x8f\xc3\xaf\x9c\x6f\xa2\x25\x3a\xb3\xd9\xbd\xc5\x6b\x61\x49\x6a\xe0\x14\xee\xf1\xa1\xa9\xac\xd3\xff\xcd\xd4\x95\x6c\xa9\xae\x04\xc7\xbd\xff\xc2\x2c\xbd\x6a\x66\x58\xf9\x4b\xbc\xd0\x50\x12\x42\x13\x68\x44\x9c\xe3\x7f\x77\x46\x44\x16\xd7\xef\xbe\x3e\x57\x07\xba\x6f\x83\xa8\xca\xca\x8c\x8c\x8c\xb0\x7d\xeb\x17\x11\x16\x88\x23\x12\x69\xff\xf3\xf5\xb7\x85\xcc\x6e\x22\x38\x87\x8d\xd2\xfd\x47\xf5\x78\x06\xcb\x06\x79\xe9\x85\x61\x50\x51\x11\x90\xeb\xab\x0a\xb1\xc5\xc7\x9b\x14\x6c\x29\x33\x6e\xdb\x49\x3a\x38\x49\x5c\x62\xb4\xfb\xf8\x7d\xdc\xef\x98\x5e\x57\x2b\x4e\xd0\x3c\x8b\xfa\x00\xb2\x22\x35\x86\xb0\xd3\xc9\xc5\x64\x73\x56\x88\xc6\x1a\x2c\xdd\xe1\xd4\x3a\x04\x30\x99\x4d\xc8\x00\x8b\x1d\x62\x97\xa5\xfb\x4f\x3e\x5f\xbb\xfb\x46\x6a\xb9\x74\x74\x53\xcd\x25\xe3\x18\xdc\x46\x65\xeb\x25\x3f\xb8\x59\xbc\x96\x45\xc7\xe7\x70\xaa\x6a\xbc\xcd\xbf\xcb\x31\xdf\x9e\x1c\x87\xa4\x06\x0b\x85\x7a\xbe\x53\x5b\x64\x6f\x45\x86\x97\xcc\x0d\x27\x28\x40\x72\xfd\x6c\x6d\xbf\xde\x76\xde\xd0\x65\x72\x63\x9f\xeb\x89\x86\x5b\x2e\xbd\x48\xfa\xb0\xb7\x73\xb9\xd2\xba\xc4\x95\xa8\xfa\x62\xf2\x64\x01\xd0\x64\xc8\x75\x22\x91\xed\xee\xba\x61\x40\xdf\x33\x57\x87\xe8\x5c\x6f\x27\xf1\x59\x99\xca\x15\x06\xe6\x27\x1b\xb5\x90\x5a\xe2\xcf\x3f\x31\x57\xa4\x0d\x1f\x19\x59\xb4\x5f\xfa\x8d\x63\x0b\xa5\xea\xed\x54\xcf\x23\x14\xe9\x69\xb4\x85\x86\x35\x3a\xd2\xbd\xe7\x2a\x28\x46\x2c\x80\x45\x1c\xa1\xf4\x16\x98\x5a\xbd\x4c\xc3\x12\x0d\x31\x22\xa9\x60\x64\x68\x5e\xe2\xa0\x27\x76\xf6\x55\xaa\x9f\x68\xf1\xe2\xfa\xa3\x18\xb1\x12\x8d\xa2\x71\xaf\x3a\xc0\x8b\x95\x5a\x41\x57\x3b\x27\x37\xc4\x58\x94\x17\x3b\x21\xfd\x3b\xb9\x60\xee\xe4\x91\xb8\xa3\xaa\x4c\x54\x9c\x28\x96\xfc\xa9\x0a\x05\x5d\x34\xa7\xb4\x96\x89\x73\x4d\x64\x06\xcd\xf8\x60\xeb\xdf\x9b\x99\x65\xf3\x9b\xf3\x8e\x2a\x2a\x1a\x81\xd4\xb0\xa1\xc5\x8c\xde\x69\xd3\x10\xa6\x90\x8f\x2b\xfc\x31\x24\xd5\xd4\x53\x04\xde\xe7\x41\x74\x2a\x42\xf2\xf4\x23\xb4\x37\xb3\xe4\x4f\x95\xdd\xec\xb2\x01\xc0\x2b\xb6\x88\x24\x3a\xb6\x6f\xe7\xb6\xc8\x28\xa4\x86\x7e\x99\x74\xcd\xaf\xae\x68\xa9\x14\x02\x0e\x12\x3f\xef\xc1\x03\xd3\x7d\xb4\xf0\x48\x6a\x7e\xf4\x8c\x4a\xb2\x66\x3c\x90\x9e\x95\x58\xa9\xc1\x5e\x9b\xa5\x4a\x73\xc3\xae\x58\xe2\x81\x39\xeb\x91\xc1\x30\x3f\x80\x1b\x2f\xcb\xaf\xf7\xa1\x7e\xe1\xe7\xf2\x4b\x7f\x7b\x11\xb7\xd9\x40\x87\x91\x77\xa0\x95\xaa\x59\x4c\x18\xdc\x89\x31\xc3\x40\xd7\xe8\x4f\xe2\x8b\x97\xa3\x6c\xb9\xdb\x6f\x78\xb1\xd1\xd7\x6e\x2e\x8b\xd6\x61\x3c\x91\x7b\xaf\x8f\xa2\xb4\x68\x4d\x44\x51\x33\xbd\xae\x74\xea\x9e\x2c\xaa\x88\x68\x44\x6d\x51\xdb\x33\xa1\xa1\x41\x8d\xfd\x46\x4a\x33\x86\xd5\x6e\x25\x71\x48\xd8\xf6\x08\x76\xad\x7b\x32\x39\xea\xee\x67\x02\x09\x66\xd5\x43\x51\x05\x74\xdd\x2e\xf6\xbf\x15\x5d\x62\xaf\xc3\x52\x72\x2d\x3f\x1c\x03\x70\x3d\xe7\xa5\x67\x85\x20\x83\x88\x5d\xbd\x60\x3a\x9a\xb9\x58\x4f\x40\x7b\xa9\x06\x69\x6e\xda\xc7\xfa\x3c\xb2\x79\x0e\x79\x06\x99\x72\xa1\x52\xa5\x50\x43\x03\x11\x73\xc2\x5c\xfb\x38\xdf\x7f\x90\xeb\x8d\x15\xf0\xfe\xf6\x52\x2a\x75\xf8\x30\x70\xe7\xe3\x37\x94\x47\xa5\x0a\xc0\x37\x79\x7f\x46\x04\xf7\xcf\xa0\xb1\x84\x7f\xa2\x50\xb4\xa2\x14\x09\x51\x29\xf8\x04\xe5\x22\x39\x41\x93\x77\x8e\xc7\x5c\x04\xc8\xb5\xd9\x8e\x57\xf2\xfa\x4e\xa7\xe8\x87\x72\xbc\xdc\xc2\xd3\x62\xf9\x4e\xc9\x89\x1a\x88\x9d\x3a\xf3\x5d\xe5\xa0\xa8\x3d\x92\x4c\xb1\x8f\x38\x08\x4f\x73\x9d\x62\x25\x57\xaf\x7a\x6a\x3f\x62\xdd\x5a\x81\xec\x66\x6a\xe1\x23\x8d\x00\x78\x96\x6b\xe5\xdf\x96\xf3\x91\x1a\x36\xfd\xbd\x3e\x3c\xd9\xf9\x81\x7c\xbf\x86\xbc\xed\x8a\xbe\x01\xa2\xd8\xf2\xb9\x79\xa0\xe6\xf6\x2e\x2a\x45\xb8\x31\x75\x1c\x0e\xd8\x9f\xae\x87\xf3\xcd\xf9\x8a\x64\xd9\x08\xd8\xdc\x3b\xbe\xf9\x77\xd7\x30\x36\xc9\x9b\x68\xe1\x8e\x02\xce\xa0\xbc\xa1\xb8\x24\x0f\x0f\x8e\x15\xc5\x51\x1f\x97\xfc\x4f\x13\xe8\x60\x05\x35\x4f\x9a\xb4\xe7\xd2\x8d\xac\x76\x4b\x7a\x92\x54\x59\x43\xd9\xbb\x6e\x26\x62\x96\xe6\x70\xe0\x7c\xe8\xa4\x6f\xb9\xbd\x30\x90\x5c\x2f\x97\xa1\x6c\xde\x24\xbb\x9f\xd9\x56\xbe\x9e\x8f\x6a\x1e\xdf\xf2\xea\x91\xe1\x5d\x5c\xee\x9a\x0c\xda\x91\x44\x79\xbd\x46\xcf\x57\xce\xf2\xd1\xee\xca\x1f\xa8\x7d\x8a\xc8\x0b\x16\x5b\x7a\x3e\xdb\x59\xfc\xe0\x71\x20\x22\xb2\xc8\xef\xf8\x9e\xf8\xf6\x60\x66\x2e\xea\x14\x6d\x99\xf7\x7e\x91\x46\x29\xda\x25\xec\x8f\x13\x45\xc0\x2c\xe1\x70\xc5\x2a\xb2\xd2\x84\x7c\x5b\x78\x9b\x34\xfa\x9a\xd9\x56\x94\xc9\xa6\xd5\x4d\x02\x6d\x5b\xaf\x84\xe0\x41\xe3\x4e\x16\xcc\x88\x98\xf8\xe2\xd2\xfb\xab\xfd\xe0\x53\x23\x30\x11\x71\x28\xb3\x75\xb9\xd4\x77\x77\x39\x1e\xd8\x77\xee\x81\x7a\xf0\x68\xfe\x49\x69\x20\x92\x9c\x1c\xef\xb1\x07\xe7\x4e\x0c\x9d\x25\x34\xfd\x3f\x41\x50\x7c\xc8\x6a\xa5\xcc\xc1\x8d\xe5\xc0\x54\xb3\x33\x81\x39\x6a\x9a\x08\xdb\xa1\x6f\x11\x4b\x4b\xbb\x23\x6a\x60\x11\xd3\xc0\x4b\xbd\xac\xdd\xe0\x64\xd0\x59\x5c\xab\x3c\x3c\x5e\x9b\xc8\xf6\xcb\x36\xca\x80\xa3\x7b\xbe\x33\x16\x2d\x9d\xd5\x57\x3e\x86\x12\x7d\xa0\x49\x2e\xee\x57\x7e\x9b\x25\x7a\x9d\x83\xbe\x78\x54\xd2\x6c\xe9\x1c\x0b\x28\xe0\xbc\x62\xfe\x54\x51\x6a\xce\x1b\xe8\xe0\x0c\xb8\x77\xea\x06\x7e\xc8\x4e\xc3\x51\x3e\x56\x8e\x4b\x09\x1b\x81\xfa\x16\xc8\xb3\x7b\x7c\xbf\xe1\x7e\xb8\x23\xee\xd2\x31\x4e\x5d\x5f\xcb\x39\x1f\xbc\x51\x18\x1a\xd3\x2b\x81\x2d\x2b\xb4\x1d\x70\x79\x7e\x15\xac\xa7\x42\xce\x6e\x49\xb0\xcf\xb5\x69\xbd\xed\xc3\xe9\x8a\x59\xf0\x4e\x28\x74\xf6\x58\x3d\xf6\x95\x65\x04\x38\x70\xb8\x61\x35\xda\xab\x3c\x27\x08\x0a\xab\x61\x3c\x5b\x5c\xdd\x4b\x07\xc9\x93\x6f\x3b\xda\x85\xb4\x23\x06\xa3\x37\xbb\x93\xb0\xba\x0f\xec\xac\x7f\x7f\x13\xcd\xee\x99\xb2\x49\x21\x32\x19\x9e\xb2\x8a\x82\x9c\x27\xf3\xcf\x2e\xdf\x44\x1c\xd8\x4b\x5f\x96\x29\xc7\x85\x64\xf9\xfd\xf5\x6f\x8f\xaf\x1d\x87\x12\xb4\x79\x0e\x17\x26\xa8\xb6\xe4\x1b\x17\xa9\xe8\xa4\x9c\xb6\x55\x93\x62\xd2\x3f\xce\xde\x6f\xe4\x60\x72\x05\xef\x89\xba\xcf\x0c\xe4\x53\xf2\xb5\xff\xf9\x24\x88\xe6\x2a\x0a\x08\xff\x6b\xec\x1b\x24\x9a\x56\x0f\xa2\xb5\xd4\x3b\xa1\x6f\xf0\x31\xc4\xe3\x7b\x79\xb3\x0c\x3a\xaa\xaf\x61\xa1\x4a\xad\x97\xd1\xf2\x34\xc5\xeb\xaa\x7d\xb0\x91\xc2\xae\x11\xcb\xc7\xf1\x35\x7d\x4e\x92\x93\x07\x26\x27\xec\x78\x18\x75\xfa\xbe\xa2\xa4\xf2\xab\x3b\x3f\x97\x55\x3b\xc6\xe1\x53\xfa\xfc\x7b\x8c\xf6\x3d\x87\xb6\x3c\xd3\xdb\x49\x9d\xd9\xe9\xf1\xab\x4f\xd4\x87\xe6\x26\x1d\xa2\x09\xf5\xfe\xe0\x7a\x3b\xba\xbf\xa7\xab\x43\xca\x04\x17\xe1\xb4\x9a\xf0\xf4\x85\xaa\xe2\x20\x82\x13\xe8\x41\x8e\x23\xa5\x0a\x53\xc9\x37\xd1\xfe\x84\xa4\x81\x4e\xba\xcb\xdc\xe4\x0b\xfe\xa9\xdb\x71\xbb\xbc\x00\xd3\xdf\xf4\x1f\x6e\xc9\xd4\x2c\x5c\x80\x18\x3b\x3f\xab\x51\x3c\xd8\x2b\x23\xc0\x03\xf1\xb0\x91\xdf\x26\xd7\x3b\x05\xbd\x21\x38\xda\x8e\x4b\x05\x1f\x5a\xb6\x78\x27\xfb\x97\xdf\xf5\x39\x19\x65\x3b\x22\xc1\xee\x94\x57\x61\x89\x91\xe6\x69\x99\x1e\x4a\x21\x21\x58\x0f\xfd\x68\xb5\xc8\xc9\xd4\x3e\xfe\x8d\xe1\x46\xc4\x74\xef\x4e\x79\x5d\x30\xcc\xb9\xcf\xd6\x22\x42\xaa\xcd\x8d\x41\xb8\x44\xf5\x5f\x52\xfe\x31\x51\x2c\x93\xbb\xcb\x0a\x8e\xb5\x36\x6c\xdf\x35\xce\xa0\xea\xa7\xc8\xbd\x72\x5f\xc7\x36\x29\xaa\x1f\xcb\x4a\xcd\x68\xe8\x3f\x15\x4e\x4e\x74\x75\xb7\xf7\xfb\x71\x3f\x70\x7e\x28\x6b\x8b\x5c\xea\xf0\x83\x1d\x49\x83\x23\xc7\xca\xc1\x5e\x53\x9a\x3f\xb0\x32\xa6\x79\x98\x94\x11\x45\xc1\x7d\xad\x09\xbb\xa5\x05\x3e\xdf\xb9\x0c\xcf\x85\x12\x69\x29\x0f\x9a\xfb\x7e\x3f\x81\x86\x88\x47\x32\xf2\x52\xd0\xfe\xf2\x49\x57\x3b\x91\x1f\xc2\x98\x1a\xc1\x1c\x56\x52\xba\xdf\x6a\x02\x29\x2b\x4a\x36\xf4\x98\x3a\x96\x08\x3f\xb6\x37\x27\x8d\xfa\x21\x49\x45\x46\x4a\x7b\xd7\x6c\x4d\x71\x1a\x69\xf4\x1b\x96\x64\x3e\x4b\xdf\xcb\x7a\xd4\x87\xea\xb1\xe9\x2f\x87\x32\x7d\xef\x9c\xda\xc8\xa0\xf6\xaa\x0f\x6f\xbc\xb0\xf0\x5a\x9e\x29\x5b\x9f\x5d\xba\x48\x00\xd7\x3b\xba\xed\xdc\xf9\xda\x5f\xab\x4c\x3a\xfc\x07\x79\x54\xa6\x96\xc9\x6d\xf2\x64\x4b\xc1\x75\x56\x07\x6a\x74\x3c\xd5\xf6\x50\xa7\x93\x2d\x85\x56\xa6\x46\x42\xed\x84\x97\x7a\xc5\xb6\xd8\x27\x76\xe6\x54\xdc\xd6\xb1\x44\xdf\xc4\xb0\xfc\x36\xdf\x62\xe0\x28\x6a\xdf\x2c\x5c\x27\x56\x1e\x3a\x49\xf4\xbf\xa2\x3f\x1d\xea\xb9\x52\xbd\x4a\xcc\x2b\xea\x4c\xfd\x3c\xde\x9c\x5b\xa7\x34\x89\x84\x1b\x2d\x97\x39\x71\x43\x1e\xeb\xea\x74\x20\xac\x84\xd8\xad\x40\xf1\x88\x5e\x1c\x63\x14\x11\x45\xef\xd8\xfb\xc7\x23\x65\xfa\xc5\xfb\xe5\x70\x02\x7f\x05\xf2\xba\x4a\xbd\x2a\x14\xc0\x6a\x2b\x70\x6c\x89\x17\x56\xe9\xac\x7c\xfa\x15\x29\xc1\x13\x4b\x1e\x1f\x8f\x9a\x7c\xde\x83\x48\xd2\xfe\x57\x27\x12\x23\x6f\x5d\xc3\x08\x91\x43\xf8\x44\x58\xdf\xfc\xab\x8a\x22\x4d\xfb\xbd\x8b\x2e\xc9\x47\x7b\x2f\x26\xb0\x4a\x38\xca\x9e\x54\xaa\x0c\x21\xe3\xa5\x31\x36\x8c\xc8\xab\x32\x8c\x52\x49\xe0\x07\x8b\x5e\x58\xce\x03\x0e\xab\xeb\x21\x3f\x4f\x78\x29\x97\x3f\xfc\x41\xfc\x58\xb3\xf7\xd3\x19\x29\x3e\x4b\x0f\xfd\x49\xfe\x50\xf1\xb3\xe7\x2e\x03\x2b\xea\x32\x7d\x64\x05\x59\x26\x96\xe6\x78\xf2\x53\xcc\xc9\xfb\x4b\x48\x38\x89\x5e\xb6\x80\x8b\xec\xc3\x5c\xf8\x06\xa7\xc4\xc1\xbe\xa8\xa6\x37\x26\xac\x70\x46\xe2\xb5\x52\x26\x0d\x4b\x43\x28\x37\x77\xc7\xc9\x7c\x70\x45\x6f\x28\xfc\xfb\x60\x6c\x98\x33\xe7\xe0\x93\xe1\x87\x28\x90\x4d\xae\x28\x60\xf1\x20\x59\xc5\x31\x19\xa9\x81\xe0\x11\x60\x88\x2e\x51\xe2\x5b\xbf\xef\x73\x4b\x8e\xa0\x65\x41\xee\xd8\x43\x80\x48\x9e\x48\x22\x2a\x63\xd5\xcc\xe1\x56\xbc\x3c\x02\xe8\xb4\xb3\xa2\xf0\x48\x85\x5f\x2b\x74\x40\x4a\xd5\x1c\x4c\xf2\x8d\xc2\xf9\xf3\x54\xb9\x4d\xe4\xaf\x23\x94\x87\x67\xb2\xd0\xdc\x60\xfe\x58\xf8\xc6\x9e\x4e\x97\x2a\x9d\xb8\xb9\x6b\x97\xd4\xac\x35\x19\x83\xe6\x98\x0a\x3a\x8b\x24\xca\x6a\xe2\xa1\xa4\xb2\x4f\x5d\xee\xa6\x59\x5d\xd2\x62\x06\x7b\x69\x2d\xe3\xde\x96\xd8\x2a\x4a\x88\x46\xc3\x23\x74\x7e\x67\xfe\x48\x61\x3d\x6e\xf9\x62\x1b\x38\xb1\xda\x42\xee\x60\x94\x81\x6f\x5f\xae\x8c\x28\xed\xd8\x7d\x38\x1c\xde\xae\xef\xcb\xbb\x61\x17\x01\xec\x66\xde\x7a\xbb\x8f\x9b\xf3\x87\x16\xfb\x4b\x03\xba\x32\x30\x60\x35\x71\xbe\xcb\xcc\x61\x7f\x91\x68\x98\xc5\xc1\x44\xd9\x84\x2c\x8e\x30\xdb\x23\x75\xc1\x34\xe4\x0d\x6b\x16\x7b\x4c\xb8\x02\x70\x31\x7c\xef\xc7\xca\x23\xd2\xf0\xfe\x9b\xff\x61\x7d\xcc\xb6\x51\x79\xb2\xcf\x9d\xd3\x8a\xc6\xb9\x0e\x52\x1b\x5a\x2b\xc9\x9b\x1d\x20\x25\x27\x60\x28\x7c\xb4\xcd\x07\xcd\x4f\x82\xd1\xa6\xc3\x6b\xac\x43\x98\xa2\x8e\x6b\x9c\xb3\x66\x57\xdd\x1b\xd2\x8a\xb8\x56\xca\xd8\xd6\x76\x77\x9f\x46\x6c\x0b\xdb\xec\xb9\xb7\x99\x1e\xb4\x1d\x60\x86\x0c\xfc\x88\xb3\xa7\xed\xf3\xd3\x09\x45\xa1\xa9\xc0\x44\x52\x74\x73\x28\xaf\xf5\x71\xf7\x43\x93\xc5\xe9\x2c\x0a\x7c\xed\x04\x29\xb9\xf3\x4f\x1b\x3b\xea\xa0\x77\x32\xc4\x8a\x93\x82\x05\x5a\xbd\x26\xe5\xab\x54\xf9\x1e\x15\x01\x14\x08\x34\xa3\x7e\x3a\xdc\xce\x1e\x18\xc0\xfb\xdb\xeb\x29\x6a\x22\x0d\x68\xc8\xaa\x5e\x6b\x3a\xef\x72\xa3\xfb\x37\xfb\x3a\x02\xd7\x04\xef\x98\xa6\x4f\xac\x9a\xed\x84\x11\x79\x1e\xe1\x44\x85\xda\x38\xea\xa1\x47\x66\x59\x2a\x33\x63\x34\x60\x3c\x1a\x0c\x89\xc3\x84\x76\x15\x09\xa5\xa8\xe3\x94\x36\xb4\x22\xd4\xdf\x96\xe7\x97\xb7\xf3\x7a\x23\xe7\xd7\xfe\xd2\x59\x78\x9b\xab\xe4\x8b\x73\xd3\xf5\xc6\x4f\xd7\xd3\x33\x5f\x78\x77\xce\xe7\xfd\x98\x29\x49\x3a\xff\xc5\x21\x33\x96\x76\x3c\x3b\x1f\x91\xb9\x8e\xee\xbc\x50\x5f\xe4\x30\xb2\xf3\x42\x35\x37\x49\x34\x81\xd6\xbe\xd5\xcf\xbd\xc2\x92\x0c\x25\x23\x7d\xe9\xbc\x6b\x08\xa7\x88\xd2\x98\x55\x16\x83\x35\xc3\x86\x98\x13\x3d\xa7\xfb\x5c\x27\x7f\x6f\xcb\x54\xa1\x85\xbd\x6c\xee\xa4\x7e\xb0\xbc\x52\x71\xe7\xd1\x6e\xa5\xfb\xcc\x57\xce\x87\x79\x25\x00\x2f\x5c\x3a\xa2\xfb\xc7\x5a\xec\x00\x89\xa8\xbb\xfd\x48\x5c\x98\xa0\x03\x2a\xc4\xd1\xe7\x15\x7f\x70\x51\xce\xcb\xc6\xa1\x93\xcf\xe7\x18\x8a\x49\x20\x57\x54\x7c\x85\x6d\x82\x08\x6f\xf3\x50\xef\x6f\xe4\x5f\xe7\xcd\x67\x26\xc8\x35\x97\x10\x58\xdd\x91\x88\x53\xa9\xe6\x98\xd7\xa1\xb9\xb2\xa1\x0d\xa7\x1a\xcf\x44\xda\xa2\x7b\x71\xb6\x23\x64\x95\x68\x6e\x96\x8f\xa5\x95\x6b\x79\xea\x5c\xcf\x6d\x2f\x24\xd2\xbb\x05\x82\xe1\xd3\xc2\x5d\x35\xba\x3f\x93\x14\x7f\xf8\xef\xc1\x14\x05\x0a\xe6\x7c\xd4\xde\xbc\x4f\x26\x01\x27\x8b\x42\xb0\xa5\x0e\xd9\x2a\xfd\x74\x23\x79\x37\xff\xaa\xac\xbe\x11\xf0\x1a\xf2\x36\x62\xb1\x00\x49\x3b\xde\xb4\x7c\x71\x57\x13\xd8\xed\xe1\x65\xd5\xdd\x2c\x9a\x0e\x51\xee\xc2\x49\x73\xbe\x52\xb1\x20\xb4\x9e\xe5\x2d\xea\xcf\x56\xf2\x7d\x6e\xfb\xa7\xb0\x36\x0a\x14\x38\x05\x61\x70\xc5\x8c\xa5\x4e\x3e\x99\xd0\x29\x3b\x53\x5c\x0a\xa6\x6f\x1c\x90\x59\x8a\xfc\x51\x61\x75\xda\x92\x1a\xa3\x97\xcb\xf2\xfa\x3a\xb0\x9e\xfb\x20\xd2\xca\x09\x6a\x62\x55\xd5\xfb\xfb\x9d\x24\x04\x09\xd5\x16\x37\x85\x16\xc0\x7b\xbf\x9e\xf0\x85\x4b\xa9\x1a\xe1\xea\x7d\x58\x8f\xe1\x34\x9c\xc9\x01\x3e\xb3\xc0\x3e\x1c\xce\x6e\x81\x61\xd5\xec\xaa\xab\xd4\x99\x28\xcc\xac\x32\xaa\x20\x7f\x29\xc2\x8c\x5f\x3f\x65\x09\x01\xf5\xb5\x77\xc6\xcc\x47\xc7\xc2\xe7\xf9\xed\xde\xcc\xbc\x82\x5c\x46\x70\xf9\x58\x3b\x26\x82\x56\xbb\xc9\x22\xfb\x78\xc7\x1f\xfc\xde\xc2\xd6\x2c\x11\x0a\x5b\xfa\x0e\x25\x28\x3d\xe2\xd5\x03\x72\x7e\xea\xc9\x85\xc4\x6b\xea\xb1\xea\x64\x25\x36\x5a\x56\x22\x65\xad\xb1\xad\xaf\x47\xa6\x3a\xb0\x26\xf4\xb6\xcc\xc8\xe1\x5a\xa5\x56\xd4\x4f\xe2\xa5\xd5\xb7\xb2\xf0\xfe\xf5\x59\x5f\x70\x93\x45\x3b\xf1\x95\x57\xe2\x7f\x47\xb1\x4b\x91\x86\x07\xf1\x45\x30\xbe\x22\x44\xbf\x7f\xb1\xc8\x9c\x7a\x1f\xf6\x44\xeb\xf2\xa1\xf3\x53\xfa\xdc\xad\x06\x26\x67\x17\x9f\xb6\xf2\x12\xe6\x3f\xbc\x2c\xbb\xf9\x2e\x85\xa5\x2e\x24\x6c\x90\x62\x14\x94\x24\xa1\xbe\xd4\xa0\xe8\xd0\xb7\x73\xe3\x76\x89\xb9\x65\x8c\x85\xf3\x86\x48\x27\x3e\xb9\x64\x21\xe8\xaf\x8a\xba\xf4\x00\xe4\xa2\x4b\x1a\x4b\xa1\xc4\x83\x4c\x9a\x7b\xc2\xf9\xfe\xa4\x6f\xa2\x85\xbb\x8f\xb6\xbc\xc0\xbf\xe3\xb7\x0f\xa3\x12\x39\xe7\x37\xa6\x69\x2a\x74\x3e\xf9\x44\x94\x1a\x0a\xbb\xdc\x38\xc9\x0a\x2c\xce\xa9\xc7\x79\x11\x9c\x69\xb8\xd6\x22\xf3\x00\xc7\x64\xb5\x05\xcf\x00\x46\xb2\xdb\xf9\xe4\xb2\xcd\xf4\x95\x21\x73\xab\x2b\x2f\x07\xbc\x97\xf3\xf9\x34\x7f\x5f\xfc\xa8\xcf\x27\x9e\x25\xb0\x56\x66\xd4\x3e\x1f\x98\x71\x82\xf7\xdf\xbb\x34\xde\xe0\x59\x62\x85\xbc\x97\xa5\xe0\xaf\xb6\xec\xbf\xc2\x38\x8a\x46\x78\x71\xf1\xf5\x61\x42\xba\x74\xf8\x88\x25\x28\xcf\xca\x00\x55\xfa\x0e\x73\x1c\xb8\x69\x47\xd7\x33\x50\xe3\x1f\x17\x24\xc7\x08\x09\xa3\x36\xa8\x9e\x9d\x20\x63\x87\x9c\x2a\xef\xdf\xcb\xfb\xc8\xa7\x37\x57\xcf\x7e\x89\xd1\xed\xa4\x81\xc4\xf3\xa8\x1e\x8c\x04\x97\x9a\xea\xd0\xf0\x62\x28\xef\x36\xa9\x69\x62\x21\xcc\x61\xa2\x89\xdb\x3b\x2b\x5a\x19\xed\x6e\xc5\x8d\x25\x15\x2c\xdf\xe3\x14\xbe\xa5\x8b\xe2\x33\xb0\xa3\xc8\x5f\x42\x11\x2a\xcd\xf1\x0e\xd5\xa2\xb9\x5e\xcf\xe7\xed\x81\x73\xc2\x0f\x3b\x5f\x74\x9a\xdd\x6f\x04\x57\x93\x72\x5b\xfa\x81\xa1\xb9\x89\x12\xbe\x59\xef\x16\x30\x59\x3f\xfb\x5c\x96\xfc\xf5\xdd\xa2\x6a\x55\x30\xcc\xb6\x6f\xcd\xfa\x94\x52\x16\x1a\xba\xce\x0f\x96\x21\x31\xa8\x27\xf0\x02\xa1\xee\x76\x1e\xae\x2d\x4d\xdd\xbb\xe4\x5b\xce\x12\x8e\xb4\x64\x22\xea\x5c\x7c\x05\x9a\x4d\xfa\x95\x60\x49\x7a\xd9\x43\xa7\x03\x11\x12\x86\xce\x4f\x7c\x2b\x32\x6b\xe5\x2a\xa1\x2b\x81\x3d\xb2\xfb\xd8\xb8\xeb\x44\x4d\xed\x32\x75\x0c\x52\xa5\x5f\xa0\x35\x6b\x58\xcc\x9d\xb5\x2c\xd6\xf9\xe6\x44\x74\x16\xbb\xcb\x12\x34\x69\xe7\xad\x71\xfe\xd9\xee\x89\x3a\xbd\x6b\x22\x9f\x48\x82\x5a\x3a\x62\xab\x2e\x12\x58\x9f\xd1\xb0\x6a\x2f\x9d\x80\xbd\x9d\x55\x1a\x1f\xdf\x8b\xf7\xb2\x1f\xa2\xf5\xd8\xe1\x6f\xff\xb4\xa4\x13\x1f\xa7\xc5\x55\x7e\xf3\x9b\xb1\xc2\xa2\xa9\x33\x8f\x11\x57\x63\x2f\x53\x18\x2f\x4e\x05\x71\xcd\x2d\x4f\xad\x98\x5c\xac\xcb\xf3\xdc\x91\x75\x3d\x9f\x02\xf5\xbf\x25\x60\xc0\xc3\x00\x45\x4a\x13\x48\x9f\x42\x67\x61\xbf\x8b\x22\xad\xc4\xc7\xec\x86\xb8\xa6\xc7\x1f\xdd\xdf\xd9\x4f\xf8\x93\x99\xeb\x2f\xd9\x9d\x9e\x49\x2a\xc0\x0c\xa7\x08\x21\xaf\xa9\xed\x02\xe9\x57\x35\xba\x8a\xdc\x20\x75\x68\xfc\x8a\x26\xb0\x12\x9f\x7e\xf9\xac\xb5\x3d\x96\xb9\x1b\x2c\xfc\x47\x24\x83\xd0\x34\x24\x41\x52\x84\xae\xeb\x45\x48\xa1\x79\x25\xff\x1e\x7d\x6c\xa9\x57\xc6\x16\x29\xc8\x16\x4f\x7d\xfe\xb2\x9e\xcb\xf6\x8a\x25\xd6\x04\x2b\xea\xc8\x76\xc7\x7c\x12\xbe\x76\x1c\x37\x8f\xb5\x18\x04\x60\x5b\xb5\x5d\x9f\xbd\xfe\xf0\xb2\x53\x45\xd8\x56\xf9\x0f\xb1\xde\x0a\xf5\x90\x41\x75\x73\x32\xe6\xb7\xe3\x41\x8b\x6a\xd8\x7d\xbc\x06\x00\x21\xdc\x66\x56\x4b\x7a\x10\x4e\x5c\xdc\x81\xfe\x0a\xd2\x68\xda\x9f\xf6\xde\xd0\xc0\x78\xc7\x8e\x6d\x54\x82\x6d\x03\x72\x45\xf2\x37\x31\xc4\xe4\x60\x85\x6d\x34\xaf\x8f\xb1\x8f\xf1\x24\x7a\x5e\x7a\x5d\xd4\x17\x89\xbd\xd3\xaf\x33\xab\xe7\x8c\xb8\x0c\xb5\x8d\xd9\x5e\x4b\x7e\x5a\x34\xd7\x3a\x0d\x3c\x37\x5c\x6e\xf4\x92\x3e\x17\xaa\xf9\x9c\x9b\x2d\xe4\x54\x88\x92\x71\xea\xf9\x70\x57\xc6\xca\xd9\x74\xc6\xd2\x6b\x33\xac\xac\x0c\xf1\xc2\x46\x3d\x39\xe9\xf0\xf9\x7f\x83\x56\x83\x05\x31\xa1\x63\x45\xf5\xc8\x0b\x44\x3b\xb9\x32\x89\xa3\x5d\xf6\xae\x48\x5e\x26\x41\xe2\x44\xc5\x3a\x26\xb9\x4e\x84\xdc\x0b\xf1\x26\x6f\x49\xe0\x2c\x3e\xc7\x69\x96\xbe\x7b\xaa\x52\xd3\x29\x1d\xfc\x07\xa2\x90\xcb\xf0\x69\xa7\x9a\x19\xe7\xcc\x1d\x03\xab\x6b\x96\x6b\xf9\x1c\x27\x3f\x40\xd7\x26\x61\xbb\xff\x36\x56\x29\xae\x78\xb6\x5f\x8e\xc9\x93\x82\x1a\x7d\xf6\x53\x9a\xac\x93\xcd\xb9\x9c\x59\xb9\x2d\x8e\x98\xda\xb6\x1a\x32\x17\x36\x91\x23\xd0\xf2\x0c\x25\xde\xf7\xfb\x61\xf5\x27\x8f\xfc\x21\x34\xb3\x28\x8b\x13\x10\x70\x86\xd5\x19\x8c\x80\x9d\xfa\xb7\xa3\xab\x60\xcd\x6e\x4a\x25\x94\x4e\xad\x8b\x44\x32\x94\x49\x18\x7a\xec\x81\x1d\xa1\x39\xed\x89\xac\xc1\x98\x85\xc8\x9d\xbf\x59\x91\x58\xaa\xe4\xa1\x01\x61\x84\x71\x76\x7b\xa9\x08\xcc\x21\xc2\xaa\x9c\x78\x91\x54\x4a\x3f\x77\xee\x72\xdb\x85\xcf\x3c\x72\x8e\xac\x77\x2d\x0f\x5b\xc0\x45\xcf\x31\x10\x50\xbb\x26\xc5\x58\x47\xe4\xb0\x92\x25\x16\xc5\xbe\x88\xf3\xd4\x63\xa5\xc5\x57\xb3\x93\x1b\xb1\x5a\x9e\xb6\x9f\x5d\x4c\x06\x4d\x88\x4e\x63\xca\xb3\x76\x1f\x87\xf8\xf3\x20\xcd\x5f\x84\x05\x85\xec\x5e\xd4\x92\xb0\x74\x2d\x32\xcb\xc7\x5c\xf6\xf8\xda\xd1\x94\x50\x59\xf5\x42\x60\x84\xe3\x29\x3f\xfa\x08\x48\xab\xf9\xee\x7f\xfe\xf7\x3f\xfe\x2f\x00\x00\xff\xff\x9c\x84\xb0\xba\xce\x07\x01\x00") + +func dataPasswordsJsonBytes() ([]byte, error) { + return bindataRead( + _dataPasswordsJson, + "data/Passwords.json", + ) +} + +func dataPasswordsJson() (*asset, error) { + bytes, err := dataPasswordsJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/Passwords.json", size: 67534, mode: os.FileMode(420), modTime: time.Unix(1452717629, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataQwertyJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xb4\x98\xd7\x52\x23\x3d\x10\x85\xef\x79\x0a\x18\x72\xce\x39\xe7\x9c\x33\x98\x9c\xc1\xe4\x8c\x31\xcf\xfe\x6b\xf8\xb7\x56\xdf\xa9\xb2\xc6\xbe\xd8\xbe\xd9\x6a\x2f\x33\x5f\xb7\x5a\xad\xa3\x63\x67\x8a\x8a\x8b\xa3\xc9\xa7\xa3\x87\xcb\xa8\xa7\x38\xfe\xe0\x3e\x96\xb8\x70\xf7\x37\x74\x1f\x0e\x7f\xa2\xba\x3f\xf1\xdd\x6b\x3a\x9d\x2b\x8e\x5a\x86\xfe\x3e\x13\x3d\x2e\xcb\xf3\xbf\xe1\xde\xff\xff\x13\xa5\x22\x92\x7b\x7b\xfc\x5b\xbb\x19\x1f\xef\x65\xf3\x67\x6c\x1c\x8c\x84\x5c\x4a\x30\xca\x09\x02\xda\xca\x7c\xc2\xb3\x71\x1f\xbf\x6f\x2a\xb8\x8c\xe0\xd6\xd2\xfc\xe0\xf6\x72\x0f\x7b\x5a\x91\x24\x02\x2e\x27\x18\xd5\x04\xc1\x1d\xfb\x1e\xf6\xb2\x26\x49\x04\x5c\x41\x30\x5e\x0a\x82\xbb\x6a\x3c\xec\x75\xdd\xc7\x9f\xdb\x0a\xae\xb4\xda\xbc\x2a\x82\x51\x4d\x10\xd0\x54\xed\x13\xde\x2f\xfa\xf8\x6a\x5a\xc1\xd5\x04\x77\x57\xe5\x07\xd7\x1f\x78\xd8\xc3\x92\x24\x11\x70\x0d\xc1\x9d\x15\xf9\xc1\x48\x1e\x57\xc9\x7e\x0b\xb8\x96\x60\x54\x13\x02\x4b\x12\xf4\x3e\xde\x13\x01\xd7\x11\x7c\x3b\xef\x1f\xbc\x99\xf5\x71\x7a\xce\xc7\x0d\x03\xc1\xe4\x02\xae\x27\x18\x1b\x13\xac\xb2\xbf\x36\xf7\xe4\xb8\x7e\x0b\xb8\x81\xe0\xba\xbe\xdc\x55\x72\x0c\xdd\x58\x15\x54\x71\x23\xc1\x58\xa6\xc0\x2a\x9d\x56\x15\xd2\x7c\x21\x37\x59\xcd\x5b\xb3\x95\x22\xb7\x10\xdc\x5c\x92\x1f\x0c\x09\x8c\xb5\x92\x49\x04\xdc\x6a\x25\xc8\x6d\x56\x82\xdc\x6e\x25\xc8\x1d\x04\xa3\x9a\x20\x18\x82\x12\x8b\x30\x93\x08\xb8\xd3\x4a\xe9\xbb\xac\xe4\xad\xdb\x4a\xe9\x7b\x08\xa6\x40\xf0\x54\x51\x6d\x78\xbe\xa9\x1c\xb1\x20\x08\xb9\xd7\x8c\xdc\x67\x25\xc9\xfd\x56\x97\xc8\x80\x95\x24\x0f\x9a\x49\xf2\x90\x95\xc0\x0d\x03\x2c\x00\xc8\xad\x00\x9e\x57\x7d\xfc\xb5\x13\x96\xe4\x11\x56\xfc\xb6\xe1\x5f\xba\x98\xf4\xf1\xe5\x94\x8f\xef\x16\x0a\x6b\xf2\x28\xc1\x1f\x5b\x1e\x70\x3a\xe6\xe3\xf3\x09\x1f\x23\x79\x22\x78\x8c\x60\x2e\x93\x3a\x4e\xe9\x65\x92\x93\x51\x1f\xbb\xa2\x04\x3c\x4e\x30\x7b\xc9\x4d\xe2\xc5\xc1\x24\x5c\x95\x2b\x4a\xc0\x13\x04\xf3\x41\x02\xa8\xe9\xec\x3d\xf7\xc4\x55\x2f\xe0\x49\x82\xb9\x4c\xc2\xa8\xe9\xdc\xc8\xe3\x11\x49\x22\xe0\x29\x82\x59\x0d\x61\xd4\xf4\xeb\x99\x9c\x13\x12\x27\x11\xf0\x34\xc1\x04\xf0\xb2\xa0\xbe\x53\x86\xa9\x54\x2e\xa1\x80\x67\x08\xe6\x32\x99\x84\x97\x05\x61\x94\x43\x57\xbd\x80\x67\x09\xe6\x32\x09\x63\x95\x54\x27\xaa\x96\x4b\x22\xe0\x39\x82\x59\x0d\x61\x94\x7d\xaa\x13\x55\xcb\x25\x11\xf0\x3c\xc1\xdc\x0c\x56\xcf\x84\xa8\x32\xf1\xe4\x2d\x10\xcc\xf1\x61\xbf\x99\x04\x7d\x4d\x04\x2f\x12\xcc\xbe\x72\x12\x78\x39\xb3\x2d\xec\xb7\x5b\x95\x80\x97\x08\x66\x5f\x09\xa3\x2d\xe6\xd5\xca\x7e\xbb\x24\x02\x5e\x0e\xe9\x31\x44\x5f\x6c\x31\xf5\xe4\x68\x38\xac\xc7\x2b\xac\x98\x8a\x46\xe1\xa1\xc9\xe4\x51\xa7\x04\x38\x9d\x11\xf0\x2a\xc1\xa8\x40\x2a\x63\x42\x0a\x15\xf5\xdb\xdd\x26\x02\x5e\x23\x98\x8a\xc6\x2a\xe9\x5e\xa9\x21\xd4\x16\x57\xbd\x80\xd7\x09\xe6\x4b\x74\xac\xd4\x0d\x4e\x0e\xc7\xd0\x8d\xa7\x80\x37\x08\xe6\x8d\xc0\xfe\xb1\x32\xcc\x7a\xe2\x1c\x6f\x12\xcc\x9b\x99\x93\xc0\xdb\x84\xfd\xe6\x55\xe6\xf6\x47\xc0\x5b\x04\xe3\x3a\x97\x97\xb8\x61\x58\x55\x62\xc5\xdb\x04\x73\x94\xb8\x61\xec\x37\xe5\x94\xc7\xde\xb5\x4b\xc0\x3b\xa1\x03\xc2\xd1\x63\xf5\x18\xb1\xc4\x8a\x77\x59\x31\x75\x80\xc7\x98\xdf\xfa\x69\x31\xc5\x2d\xbb\xf3\x2d\xe4\x54\x8a\xe8\x02\x7e\x5a\x2a\xd8\x16\xee\x11\x4c\x89\x41\x9d\xd2\xa4\x54\xea\x3b\xf7\x1f\xe2\x05\x08\x7a\xdf\xea\xeb\xde\x81\xd5\x2f\x2d\x87\x81\xc9\x08\x82\x13\x7c\xb4\x80\x8f\xac\x3c\xf2\xb1\x95\x47\x3e\xb1\xf2\xc8\xa7\x56\x1e\xf9\xcc\xca\x23\x9f\x5b\x79\xe4\x0b\x2b\x8f\x7c\x69\xe5\x91\xaf\xac\x3c\xf2\xb5\x95\x47\xbe\xb1\xf2\xc8\x69\x2b\x8f\x7c\x6b\xe5\x91\xef\xac\x3c\xf2\xbd\x95\x47\x7e\xb0\xf2\xc8\x8f\x56\x1e\xf9\xc9\xca\x23\x3f\x5b\x79\xe4\x17\x2b\x8f\xfc\x6a\xe5\x91\xdf\xac\x3c\xf2\xbb\x95\x47\xfe\xb0\xf2\xc8\x9f\x56\x1e\xf9\xcb\xca\x23\x67\xcc\x3c\xf2\xb7\x95\x45\xce\xda\x59\xe4\x9f\x7f\x6e\x38\xdd\xbf\xd9\xa2\x6c\xd1\x7f\x01\x00\x00\xff\xff\x4c\xae\x50\xc0\xce\x20\x00\x00") + +func dataQwertyJsonBytes() ([]byte, error) { + return bindataRead( + _dataQwertyJson, + "data/Qwerty.json", + ) +} + +func dataQwertyJson() (*asset, error) { + bytes, err := dataQwertyJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/Qwerty.json", size: 8398, mode: os.FileMode(420), modTime: time.Unix(1452717629, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataSurnamesJson = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x54\x7d\x5b\x62\xf3\x3a\xcc\xdc\x5e\xfc\xdc\x15\x74\x0d\xdd\x41\x9f\x28\x89\x96\x18\x51\xa2\x0e\x29\xda\x9f\xd3\xcd\x17\xc0\x0c\xe8\xfc\x4f\x27\x27\x5f\x12\x4b\xbc\xe0\x32\x18\x0c\xfe\xdf\xe3\xff\xa4\x76\x3f\xfe\xf7\xff\x7d\xb4\x23\xdd\xdb\xe3\x7f\x3d\x7e\xca\x76\xb6\x72\xca\x57\xef\x94\x73\x0a\x47\xb3\x6f\x9e\x51\xff\x3b\xd5\xf2\xd6\x7f\x5a\xc2\x2b\xe9\xff\x1f\xf2\x23\xb1\xe2\x67\xf1\x4b\x47\x29\x35\xca\x7f\xef\xf0\xc9\x45\xff\x25\x9c\x4b\xac\xf8\xb7\x9f\x30\xef\xfc\xd3\x5b\xba\xf5\xa7\xb6\x50\x2b\xfe\x50\xa8\x77\xd2\x7f\xb9\xb7\x72\x5c\xf8\xa1\x35\xd4\x39\x85\xf1\x8f\xf1\x57\xbe\xac\x65\x4a\x7c\xbc\x39\x87\xba\xdb\xb7\x96\x9a\xd6\x6e\xff\x9c\xe3\xdb\xfe\x5c\x8e\xfa\xd7\xdf\x21\xef\xf6\x74\x5b\xc8\x59\x1f\x45\x1e\x56\x7f\xf3\x53\xfa\xb9\xea\xb7\x63\x3d\xf5\xf1\xf4\x37\xf7\x64\xdf\x7a\xcb\x9f\xda\x6e\xfd\x0b\xe5\xb2\xef\x6f\xc9\x7e\x75\xad\xd1\x7e\x35\x2c\x58\x90\x29\xe0\x2f\xaf\xe5\xfc\x0d\xd9\x7e\xf2\x8c\x5c\x82\x59\x1e\xd7\xfe\x51\x56\x74\xde\xa2\xfd\xfe\x15\xab\x3f\x7f\xac\xb7\xfe\x85\xbb\xd7\xd3\x7e\xea\xd2\x8f\x48\x57\xb3\xdf\x3c\xae\x89\xbf\x20\x2f\x67\xff\x1c\x5f\xe1\xd4\x7f\x8b\xcb\x3b\xd4\xc5\x7e\xaa\xc8\xcf\xdb\xf7\xda\x1d\xe5\x9b\xfa\xb8\x2d\x9c\xf2\x51\xbf\xb6\x01\x5c\xd2\x5a\x56\x59\x78\xfd\x22\xc6\xc5\x7e\xad\xec\xf8\xf7\x35\xe8\x63\xf2\x83\x8e\x5e\xaf\xed\x63\xaf\x94\x72\xd4\x2f\x6a\x7a\xc5\x1a\xf0\x1b\x97\x3d\x43\x4d\xf3\xa6\x1f\x8e\xd7\x2b\xff\x74\x5d\x8a\x3e\x8e\x2d\xb2\xfd\xe7\x96\x8f\xb5\x23\x72\xc5\xdb\xf7\x7b\xad\xc1\xfe\x5e\x38\x12\xde\xfe\x1d\x6e\xfc\x8b\x9c\xa3\xb2\x37\x3c\xf7\x82\xa7\xbc\xe4\x43\xa2\x3d\xd6\x79\xc6\x5b\xdf\xe9\x5d\xca\x62\xcf\x55\x71\xf8\x6a\x69\xcd\x76\xed\x7b\xa2\x64\x29\xe2\x61\x6f\xf3\x13\xcf\x1d\x8b\x22\x8f\x5c\xf5\x63\xaf\xf2\xc6\x1b\xe6\x62\x5b\x7b\x85\x7b\x3c\xd9\xd6\xd7\xcd\xfe\xe6\x53\x0e\xa9\x7d\xf1\x0e\x6d\x93\x23\x70\xe3\xf1\xfa\x8d\x63\xdd\xd2\x71\x14\xfb\xab\xcf\xd2\xee\xbf\x3b\x8e\xdb\xf0\x09\xe7\x6d\x07\x2b\xfe\xb3\x17\xb1\x97\x4e\xcf\xa7\x1d\xe5\x25\x05\x3b\x41\xe1\x63\x3f\x7d\x7c\xf0\xa2\xcf\x62\x0b\xb6\xc9\xaa\xe4\xdb\xd7\x49\xfe\x4f\x3f\xae\xcb\xce\xbe\xec\x7d\xe4\xec\xe6\x60\x0b\xa2\xcb\xc0\x6d\xb7\xff\x8d\x4d\x3f\x52\x5e\xee\xc4\xe6\x7e\xce\x92\xed\x07\x9e\xa9\x6d\x38\x32\xf2\x57\xda\xb8\x5d\xf8\x88\x34\xf1\x7e\xce\x4b\x39\x43\xb6\x03\x51\xfb\x2f\x6e\x57\xe3\x05\x29\x72\xcf\x7e\xed\x1d\x0f\x9c\xa5\x5e\xb1\x85\x4f\x39\x43\x58\x67\x5d\x53\x5b\xaf\x38\x4d\x58\x20\xde\x56\x59\x9e\x17\x1e\xe9\xee\x33\x8e\xee\x55\x78\x11\xb6\x34\xdb\x6e\xcf\x35\xbc\xf9\xfa\x53\xf9\x2c\xf6\xe1\x6e\x36\x2a\x17\x75\xd7\xfd\x5f\x3e\x38\x59\x35\x62\x21\xff\xd9\x0f\xc9\x41\x2a\x7c\x67\xac\xbf\x5c\xa1\x66\x8f\x5b\x17\x3c\xc3\x16\xde\x76\x36\xf3\x81\x13\x83\x23\xc5\x4b\xc7\x9d\xb7\x1d\x5b\xc2\x99\xa2\xbd\xc8\x15\xe4\x87\x71\x5b\xf1\x66\xa7\x1c\xf6\x62\x5f\xc9\xbe\xd8\x0f\x3f\x63\x5d\xbb\xbf\xa5\x58\x42\x5b\xda\x37\x8f\xdb\xd2\xcf\x13\xa7\x8e\xdf\xd8\x3a\x2f\x4a\xbb\xe2\x39\xe3\x4c\xc8\x1d\xc1\x65\x97\x55\xba\x36\x2c\xd3\x15\x3e\xf6\xa7\xae\x14\x2b\x4f\x3e\x0e\xee\x21\x07\x75\x8b\x6f\xfd\x19\x39\xfa\x25\xe3\x96\xad\xa7\x1b\x5a\x6c\xae\xdc\x25\x7e\x60\xf9\xda\x9d\x5a\x6c\x1f\xe5\x99\x66\xdb\xad\x76\x7e\x16\x5a\x40\x33\x13\xb3\x3c\xac\x9c\x72\x9c\xb6\x1c\xec\xf3\xe5\xe0\x56\x7c\x5a\xed\x09\x07\xb6\xe2\xda\x3f\xed\xa6\x57\x9a\x85\x50\x8f\x76\x57\xdc\xa5\x59\x7f\xe4\xc4\xe6\xbe\x63\x78\xf1\xe4\x8b\x8d\x8c\x3c\x7f\xc5\xae\xb0\x98\x8d\x97\x9d\x24\x39\x27\x5f\xe3\x60\xbb\x9c\xf1\x57\x9f\xb2\xc6\x7b\xb6\xfb\x92\xc3\x1b\x2f\xf2\x94\xad\xb1\x13\xbd\xf6\x5b\x16\x07\xb6\xa3\xcd\xdb\x91\x96\x9b\xef\x29\xff\x79\x85\xf6\x1f\xac\xfe\x1c\xda\x2d\xcb\x52\xf4\x61\xb6\x18\x71\x75\xe5\xa3\x2f\x1c\xd9\x92\x13\x1e\x50\xae\xf2\xad\x47\xbb\xc2\xca\xc1\xa6\x3d\xbe\x8e\x0e\x4e\x4a\xfd\x1f\xcd\x80\x3c\x9a\xdd\x5c\x39\x6f\xfa\xfb\x93\x5c\xb0\x72\xd9\x25\x9a\xcb\xc7\x0e\x1a\x4d\x4c\xc8\xaf\x50\xff\x98\x5f\x1c\x35\x31\xdc\x76\x7e\x9f\x7f\xfc\x8c\x1c\x85\xdf\x80\xb3\x8b\x0f\x39\xd7\xfe\x89\xf4\x8e\x65\xc2\xf1\x4e\xba\xe5\xcf\x4e\xdf\x9a\x3f\x62\xdb\xf1\xab\x15\x96\xb1\xea\x4b\x14\xdc\x46\xfb\x27\x71\x85\x5c\xba\x1a\x7e\x13\x1e\xb6\x8b\x03\xe1\x53\xe0\x8a\x89\xdd\x2c\xf6\xd9\xf2\x8c\xf1\x2c\x76\x09\xdf\x58\xa2\x23\x2e\xe9\x0c\xb6\xe5\x6f\x7c\xe8\x24\x67\x02\x27\xa7\x3c\x9f\xf8\x21\x59\x77\x9e\xb4\x96\xe4\x7d\x6d\x3b\xfd\x73\xe5\xc6\xc8\x71\xb2\xc7\x16\x8b\x0c\x5f\x2a\x26\x19\xef\x2f\x6b\xb3\x06\xbb\xb0\x1f\xbb\xfb\x1a\x3d\x2c\xfe\x7b\x17\x4f\xf1\x61\x86\x66\xd3\xed\x36\xe7\xf3\x0e\x8b\x3e\x7e\x2b\xb7\xbd\x68\xc8\x3c\x39\x67\x0c\xd9\x1e\x26\x2f\xc3\xbe\xbf\x23\x3e\x4e\xcf\x36\xbc\x73\xa5\x0f\x91\x2b\xfc\x32\x53\xf0\x93\xe4\xfd\xe1\xd1\x0b\x57\x5e\xac\x65\xf6\x8d\xf6\x95\x2d\x53\x4d\xf1\xe4\x99\xb2\x25\x6e\xfd\xbe\x69\x39\xe5\x49\x4f\x3b\xb4\xb9\xcf\x81\xe1\x0f\x3d\x93\x84\x20\x7e\x18\xe5\x72\x4d\x78\x54\x59\x14\x0b\x25\xec\x1b\x37\xd6\x46\x23\x82\x8a\xeb\x7b\xe3\x51\xef\x08\x5b\x6d\xd6\xbd\x6e\x65\xa1\x1b\xb5\xfd\x98\xe2\xbc\xdb\x4b\x73\xa3\x36\x35\x1a\xf6\xd1\x33\x4c\x18\x37\xd6\x3e\x7a\xea\x6d\xb3\xe5\x16\xef\x66\x36\x49\x62\x08\x33\x51\x7e\xb1\xc4\xdb\xde\x29\xac\xfa\x5a\xa7\x87\x09\x7a\x09\xf4\x9d\xc4\x32\x45\x7b\x84\xb9\x7f\xfd\xa7\xbd\x86\xdc\xbc\x9e\xef\x5f\xac\x13\xc3\x9e\xfe\x8b\xe7\xb9\xc2\xaa\xbf\x73\xf4\xb3\xe0\x07\x6c\x47\xec\x76\xf3\x12\x9e\x0b\x23\xc4\x38\xf1\xc5\xb3\x3d\x65\xfe\xc0\xb1\x8a\x51\x6f\xb6\xa8\xef\x92\x9f\x11\x1f\x77\xca\x0d\xc0\x8d\xed\x1e\xea\x48\x58\xd0\xdc\xc6\xd7\x0b\x67\x37\x72\xef\xf0\x87\xe7\x7e\x1c\xdc\x7f\xf1\xe4\x70\x10\x7a\x48\x12\x5c\xac\x7a\xe5\x1b\xcb\x93\xb1\x5c\x5b\x9f\x26\x04\x2f\x2d\xe4\xf0\x1b\x2c\xd2\x89\xf1\xc5\x78\xa0\x0e\x8b\xfd\xf2\xcf\x96\x3b\xb5\xc6\xc6\xb8\xe5\x36\x5f\x74\x07\x0b\x64\x67\x06\x27\xab\xff\x9a\x2c\x8f\x1d\x9a\x30\x23\xe8\xb2\x7f\x95\xe0\xb6\x9e\x7e\x9a\x56\x8b\x51\x9f\x62\x56\xed\x17\x96\x98\xd7\xb0\xe8\xd6\x84\xb5\xa7\x6c\x4f\xf3\x8a\xab\x1e\x82\x35\x97\xd7\xf8\xa3\xb8\x5b\x73\xd9\x22\xce\x43\x3d\xe0\x1b\xcb\xe2\xe1\x5e\x99\x26\xac\x99\x9c\x1a\x9e\xef\x1c\x12\xfc\xf0\xba\xe2\x9f\xe4\xaf\x54\x73\x02\xf2\xc2\xcd\x5e\x40\xfe\x34\x3d\x9c\x58\xc6\x9d\x97\xf9\x96\x70\x5f\xee\xf0\x62\xc7\xe1\xf6\x00\xa8\x70\x51\xf5\x52\xf2\xc3\xdf\x30\x35\xc7\x85\x6f\xa8\xe3\xb7\x47\xd7\xa0\x0b\x7f\xf6\xed\x76\x48\x7f\x9f\x56\x47\x76\x42\x82\x11\x7b\xe9\x0f\xef\x81\x1e\x78\x04\x31\xfa\x18\x6e\x24\xe4\xdd\xe0\xcf\x0b\x22\xa1\x13\x8b\x1c\x16\x1a\x0f\x39\x5c\x0c\xcf\xe4\x6f\xba\x87\xd6\xd0\x0d\x16\xc9\x4f\xb2\x18\xd8\xc9\x76\xa3\xd8\x91\xff\xaf\x27\xf8\x6e\x09\x73\xf5\xa1\xdb\x24\xdb\x63\xc7\x79\xd6\x18\xde\xa2\x62\x49\x2c\xd4\x22\xe9\x23\x4a\xe0\x55\x25\xfe\xc7\xee\x2f\xe5\x65\x76\x48\x02\xab\x09\xb1\x02\xb6\x58\x0e\xd9\xaf\x6c\x04\x22\x2c\xf1\x23\xbb\x3d\xf8\x52\x3e\x76\xb7\x5a\xe8\x1e\xef\x4a\xa2\x62\xeb\x5e\x32\x03\xb4\xaf\x23\xc1\x8a\xa8\x3b\xb5\x28\x2a\x2c\xf2\x6f\x30\x89\x6e\x03\x3b\x4c\x99\x5e\x37\x9e\xe0\x79\xd3\x4c\xc0\x3c\xd0\x2c\x36\xcc\x6c\xe7\x56\x3a\x3d\xd9\xbc\xc9\x65\xbf\xdd\x1e\xef\x39\xda\xfe\x5d\x35\x98\xd1\x93\x9f\x5f\x57\x84\x24\x6a\xce\x61\x5e\xb2\x5a\x11\x38\xe6\x5f\x89\x82\x63\xc5\x9e\x49\x24\x08\x6f\x34\x75\xbd\xdc\xc1\x83\x37\x44\xe3\x57\xe4\xc1\xb9\xd2\x7d\xe3\x17\x26\x79\x9d\x0f\xd6\xb4\xd4\x23\xd9\x75\x50\x6f\x21\xf9\x83\x65\x1d\xb5\xd8\xb7\xae\x82\xf0\x36\x17\x66\x27\xa1\x31\x2f\x34\x6b\xb1\xd4\xb0\x23\x18\xa6\x6b\x79\x3e\x3d\x96\xbf\x90\x06\x1e\x6e\xd6\xc3\x34\x21\xf8\x68\x6e\xff\x4e\xff\xa7\xad\x3f\x9f\xf6\x47\x1b\x4c\xce\x53\xf3\xc6\x5a\x2c\xd7\x71\x4f\x36\xf9\x61\xb1\xa8\xb1\xb9\x7f\xc0\x1f\xb8\xf1\xa2\x6a\xd1\x1f\x8c\xaf\x47\xce\x50\x70\xee\x66\x3d\xfb\xe6\x08\x34\x3f\xb5\xd3\x14\xee\xcd\x4c\x6d\x93\x1f\x16\x0f\xe0\xc9\x35\xcf\xee\x1c\x6c\x91\xed\x60\x30\xe5\xd9\xb8\x08\xe2\x72\xe2\x29\xfe\xfc\xc1\xa4\x0f\xe1\x40\x37\xcf\x20\x61\x64\xbe\xb0\x55\x62\x2f\x2c\xe7\xd6\x7d\xc7\x45\xd1\xbf\xee\x0f\x7f\x06\x5b\xc1\xd6\x75\x13\x2d\xa8\xf4\xc0\x51\x7e\x0a\xc9\x9c\xee\x16\x2f\x16\xa2\x32\x59\x76\xba\x33\x8d\xb1\xe1\x1a\xe7\x22\xa7\x08\xfb\xbc\x05\x3b\xbb\xe2\xc7\xe3\xa9\x41\xbe\x3d\xb2\x3d\xf9\xca\x6b\x1c\x3e\x21\xe3\xa6\xbc\x11\x3b\xbd\xc2\x2f\xff\xe2\x92\xe6\xf1\x6c\x76\xb3\xf5\xc7\x67\x49\xaf\x2c\xfe\x90\x60\xe7\xb4\xbb\x76\xa5\x13\xd1\x0a\xe3\x71\xa6\x09\x67\x2d\x11\xbb\x2d\x46\xe7\x01\x24\x81\xb9\x78\x51\xbb\x60\x98\x43\x33\x4f\x0d\x6f\x53\xf1\xe3\x19\xf1\xcd\x9e\xea\xf4\xe1\x49\xc0\xf9\x94\x8b\x77\x00\xb2\x58\x8a\xfa\x0f\x3c\x64\x61\x98\x21\xc7\x19\xe6\xf6\xb0\x5b\xbe\x8b\xd1\x64\x3c\x7d\xce\x38\xb8\xab\xde\xc1\x75\x63\x9c\x4f\xc7\x23\x4e\x4a\x0f\xa8\x7d\x85\xf5\x79\x0f\xd3\xab\xc7\x01\xf1\xb6\x5c\x89\x8c\xcd\x97\xd4\x6d\xc4\x05\x66\x07\xda\x81\xff\x1e\x1e\xa2\x68\xd2\x80\xbd\x99\x60\xd5\x96\x8f\xa7\x9b\xc8\x75\xe4\xe5\x6f\x0d\x9d\x6c\x63\x3f\xbc\xd8\xe1\x1f\xac\x62\x3b\x6d\xb1\xc4\x0d\xe1\x6f\x48\xe8\x23\xe7\xc7\x2c\x87\x86\x8e\x70\x25\x85\x5e\x66\x8a\x81\x3f\x86\x70\x3d\x0e\xc4\x66\x8b\xc1\x92\xb6\xc8\x54\x76\xfd\x26\x98\x92\x3d\x33\x68\x57\x1f\x67\x3b\xff\x42\x6c\x20\x7b\x67\x7f\x41\xde\x50\xf6\xee\x9b\x8e\x30\x9f\x94\x73\x03\x13\x76\x74\x8f\x1a\xf6\x78\xa8\x87\x17\x8f\x18\xfd\x6a\x99\xe7\x16\x67\x12\x19\xfb\x89\x69\xe3\x2d\xb2\x07\x8d\x81\x76\x4e\xb6\x39\xda\x99\x7f\x69\x32\xc4\x3b\x8a\x30\x71\xeb\xc7\xb5\x55\xbc\x97\x3d\xb7\xd9\x95\x8e\x23\xbd\x75\x09\xcc\x1c\x59\xda\xe0\xe3\x97\x52\x61\x26\x26\x89\x42\xb0\xd9\x62\x13\x98\xfa\xc7\xc0\x50\x96\xfb\x23\x21\xad\x1d\x78\x4d\x2b\x1e\xe6\x71\xe7\x9d\xf1\xe9\xa7\x98\x81\xb2\xc4\x02\xff\x74\xca\xb5\x96\x05\xbb\xcc\xa2\xc4\x57\xb2\x60\x5c\x82\x10\x39\xa3\x0e\x03\x15\x60\x22\x3d\xe1\xee\xe9\x21\xdb\x68\x6c\x25\x82\xf8\x04\xbc\xda\x82\xe0\x46\x7d\x94\x05\xf6\x92\xdc\xf3\x43\xe5\x3a\x3d\x99\x6a\xeb\x09\x8d\x6b\x21\x76\xd2\xfe\x9a\x1f\x5f\x5e\x5c\x20\x39\xda\x15\x9f\x07\x13\x41\x57\x8e\xe8\x84\x6e\x79\x2b\xf0\x77\x77\xe0\x0d\xdb\x4f\x98\x90\x20\x07\x78\xb6\xf7\x60\x8a\x0b\xb7\x96\x61\xc7\x6b\xf9\xb1\x57\x6b\xfa\xa4\xf6\xba\x07\x8d\xb3\x62\x03\x38\x0a\x51\xac\x23\x76\xa5\xbd\x35\x93\x44\x54\xca\x8c\x53\xef\x4a\xaf\xb0\x84\x27\x5c\x47\xc5\xa9\x90\xc4\xe1\xc4\x66\x3c\xc7\x5e\x9b\xc7\xc7\x3d\xaf\x06\x97\x9c\x84\x10\xde\x31\x21\x12\xab\xfd\x87\x29\xa3\xa1\x25\x38\xc3\xb9\x00\x4b\xea\x4c\xe6\x24\xcc\xa1\xc7\x15\x7f\x18\xe1\x97\x0a\xed\x99\xfa\x64\xf9\x38\xb3\x12\x1a\x2f\x00\x26\x94\xa4\xd8\x3e\x57\x73\x54\xfe\xfd\xc0\xcb\x22\x3f\x6e\x16\x31\xfe\xbb\xe9\xb7\x2c\xeb\x97\xd8\xda\x9e\x38\xea\xb9\x98\x1f\xc4\x58\x11\xe5\xbd\xc4\x9d\x7a\x92\x2b\x9f\x6f\x27\x4b\xbd\xee\x6a\x7f\x47\x6f\x0b\x36\xd5\xdc\x90\xbe\xa5\x1e\x4f\x5d\x88\x42\x53\x21\xd7\x2c\xc3\x7a\xcb\xde\xce\x88\x2b\xd4\x43\xdb\x33\xca\xd9\x07\x10\xa0\x36\x87\x69\x93\x1d\x03\xd9\x6a\x73\x27\xa1\x13\xfd\x7b\xe1\x83\xd4\x95\x13\x62\xb9\x24\x38\xc0\x03\x02\xd6\x44\xe6\x21\x11\x12\xc3\x9d\x65\xc0\x3a\xe6\x29\xd4\xde\x5c\xc4\x0b\x63\xdc\x79\xc8\xe5\x85\x0a\xee\x92\x27\x61\xf6\x4b\x92\xae\x6b\x1c\xcf\x38\x39\x2d\x8b\xfb\xcc\x66\xf9\x26\x1e\x84\x41\x48\xd6\xdb\x63\x7f\x0e\xf1\xb0\x1c\x52\xbc\x82\xc5\x88\xb8\xf7\xe1\x36\x40\x61\xdc\x18\x5b\xbd\x02\x14\xe5\x29\x61\x9b\xe5\x89\xcc\xe4\xec\xde\xaa\xcf\x7b\x58\x0a\x76\x0e\xf7\xae\x76\x8a\x50\xe7\x33\xb9\x97\x9c\x93\x66\x01\xfa\x20\x7b\xb1\xe7\x91\xcf\x55\x5b\x81\xc5\xb1\x0d\xdd\xf0\x3b\xb2\x86\xc8\x80\x25\x7c\x62\xd0\x14\x7f\x0c\xdc\xde\x81\xc2\x48\x7e\xe3\xd8\xa3\x44\xdf\xf6\xe7\x25\xc0\x94\x6f\xc2\xa0\x1d\x61\x59\x80\xc6\xf4\xd6\x0a\x36\xe9\x24\x54\xad\xbf\x61\x6b\xcd\xc4\x42\x81\x2c\xf8\x69\x79\x39\xf3\x76\x12\xc8\xac\xb6\x2d\x04\xc1\xe4\xca\xe0\x52\xfa\x91\xea\x75\x9c\xad\x07\x6c\x18\x63\xe7\x43\x2c\x7e\x31\x67\xf5\x0c\x3d\xef\xb8\xf2\xe2\x1c\x11\xad\xee\x67\xb8\xd4\x88\xed\x34\xbb\xb2\x08\x16\x4c\x01\xca\x20\x88\x8f\xb4\x5e\xce\x1c\x41\xc0\x8d\x41\xb6\x9e\x09\x2c\xce\x16\x99\x1f\xcb\xad\x59\x22\x6c\xa0\xad\xed\xa7\x12\xc6\xcf\xb4\x3d\x19\x96\x02\x10\xa6\xa3\x5e\xe7\xdd\x71\xd4\x92\x07\xd2\x1a\x3d\xd8\x3d\xe8\x0a\x52\xd9\x7d\x97\xef\x32\x1e\x95\x6f\xb9\x77\xb3\x75\xdb\x61\x68\x24\x5e\xc1\x9a\x78\xc6\xb9\x4a\x90\x67\xb7\x69\x19\x89\x88\x24\xeb\x37\xe3\xf8\x53\x0c\x20\xe2\xc6\x89\x80\xc5\x1c\xeb\x2b\xf0\xe3\x36\x26\x44\xe9\xf4\xc0\xb3\xd2\xf1\x1a\x26\x28\x5f\xfc\x06\x8d\x9a\x1f\x9a\xf8\xd0\xbe\xe9\xa2\xea\x4f\xb8\xf1\x11\xe7\x7b\x5b\x06\x52\xba\x1d\xde\x63\x16\xfb\xc6\xf0\xba\x05\x87\x5d\x65\xc9\xe8\xb5\x81\xd4\xca\xa5\x1a\x00\xaf\x5c\xa1\x6e\xb1\x59\xf4\xe4\x48\xae\x2b\xb1\x02\xcd\x29\x80\x18\x05\x3e\xda\x56\xae\x8b\x51\xcc\x88\xce\x35\xe9\x33\xf4\xa4\x7c\x46\x36\x6c\xab\xac\x39\x6a\xad\xdf\xac\x89\x16\x86\xf1\x03\xa1\x70\xc5\xf6\xcc\x25\x88\x45\xf3\x07\xff\x20\x21\xba\x34\x08\x44\x94\xce\x3b\x2d\x67\xb2\x98\x51\x3a\x73\xb8\xec\xae\x7e\x96\x48\x57\x45\x30\x60\xde\xed\xa1\xe6\x22\xb1\x15\x53\x0a\x31\x70\xe6\xff\xdf\x9e\x16\x17\xbf\x3b\x72\x07\x69\xb0\xe9\xa8\xe6\xc2\x54\x54\x8e\xd1\xc2\x97\x41\xea\xf0\x8c\xee\x37\xf4\x30\xf1\xcb\x67\xa7\x67\xcf\xc5\x5d\xb0\xfc\x16\x2c\x77\xf1\xb4\x6b\xf3\x0a\xc9\xc4\x1b\xa4\x80\x05\x30\x3f\x82\x8c\x6a\xbd\xf4\x8f\xc4\x97\xfe\x7f\x59\x86\x59\x9a\xe8\x6a\x17\x7d\xd2\x88\xd7\xd0\x7b\x0a\x30\x20\xf9\x27\x84\x4c\x43\x28\x7f\xd0\x00\x22\xa6\x6a\x7a\x55\x23\xff\xe4\x61\xeb\xf6\x7c\x22\xa5\xb9\x7f\x25\x4e\xaa\x48\xb4\xcc\x84\xfc\x4a\x60\x85\x32\x1a\x5c\x57\xc8\x0c\x0c\x8e\x59\x83\x40\x9c\x23\x71\x46\xaf\x68\xfe\xc1\xc0\x30\xd6\x7f\x0c\xdb\xa8\x7a\xdb\xb1\xf0\x9e\xdf\xab\xad\xe1\x16\xac\x72\x54\x60\x6c\xc2\xd3\xd6\x29\x47\x96\x4c\xde\x08\xe5\x0d\x68\x27\x54\x93\x90\xac\xde\x81\xf7\xa2\x9d\x04\x7a\xc4\x52\x8d\xa2\x0a\xcd\xda\x1b\xeb\xb3\xc1\x56\xd9\xa6\x99\x71\xd2\xda\x0c\xb2\x86\x81\xc7\xe9\x67\xca\xb5\x86\x5b\xd1\xbf\x1e\x91\xa6\xe7\xc0\xfa\x43\xd8\x08\xdf\x00\x0f\xd3\x2c\x95\x81\xa0\xbd\xa1\x1a\xea\xc0\x3b\x62\xf8\x92\x99\x1d\x89\x78\xdd\x71\xdc\x21\x7b\x62\x34\xa7\xc0\xda\x03\xcb\x6c\xe1\xce\x09\x69\xe5\x3c\xfb\x1d\x5c\xf0\xac\x7b\xe8\x9e\x65\x14\xfb\x91\x19\xb9\xd8\x54\xb2\xa3\x40\x4f\xff\x59\xf3\x64\xe6\x09\x18\x2f\x48\xaa\xe2\x30\xcc\x2e\xc7\x48\x9f\x48\x02\xc9\x01\xc5\xfa\xfa\x4b\x8c\x8c\x93\x98\x35\x49\x79\x20\x8c\x22\x14\x9b\xe0\x25\x9b\x2c\xae\xad\xc2\xf9\x85\xdc\x01\x62\x55\x0d\x58\xb0\x47\x2f\x7a\xee\x37\x51\xb9\x76\x88\xa7\xc1\xb6\x25\x5a\x69\xee\x88\x9c\x1e\x16\x83\xec\x78\xa1\x46\x5b\x68\xbe\x74\x6d\x59\x58\x7a\x79\x08\xb9\xc4\x9f\xd8\xba\xed\xe2\x67\x41\x7c\x52\x27\xfb\xb8\x15\xc8\xfb\xc3\xa0\xfe\xad\x26\xd6\xbb\x6e\x24\x24\x1d\xc0\x9e\x1c\xc8\x6c\xe9\xcb\x44\x14\x45\x5c\x5c\xd8\x86\xb3\x3c\x79\x80\xe4\x46\xe8\x8e\x76\x37\x2c\x9a\xba\x61\xd3\x24\x44\x7d\x67\x26\x49\xc5\x52\x72\x0d\xac\x3c\x1f\xd5\x7d\x76\xa3\x81\xd8\x40\xd2\x1c\xfb\x8b\xbd\xb1\x46\x7d\xfb\xe6\x7f\xfc\x40\x44\x9a\x10\x5d\xca\x04\x20\xee\x84\x3f\xe0\x75\xc8\x7d\x06\x6a\xa4\x51\x8d\x59\xb6\x66\x88\x8a\x1f\x48\x3b\x44\xe5\x1b\xa5\x48\xea\x69\xa9\xeb\x54\x3a\x33\xc1\x50\xf9\xc5\x54\x70\xe5\x97\xf2\xb5\x14\x28\x9b\xda\x32\x9d\x91\x21\xff\x4c\x18\xee\x8d\xd7\x78\xca\x72\x12\x68\x9b\xdc\xfa\xbe\x91\x6e\x2a\xfe\x42\x17\x18\x50\x7b\x92\xe4\x07\xd9\xdb\x27\x54\x09\x40\x11\xc0\xac\x8a\x46\x01\x28\x2a\xf3\x5d\xe0\x41\x10\xbd\x65\xc9\xcd\x11\xc6\xc3\x8b\xc9\x3a\xac\x96\x9b\x4d\xe2\xfa\xed\x68\x58\x10\xd2\xa3\xd5\x09\x24\xf5\x0b\xf1\x39\x2e\xba\x99\x99\x2d\x66\x54\xd2\xcb\xc7\x9e\x60\x05\x7e\xd6\x6e\x3f\x35\x19\x88\x4f\x52\x7b\x64\xc7\x97\x29\x9a\x2d\xf2\x6b\x04\x66\xfa\xd6\x74\x60\xc1\xc1\x75\x45\x5d\x99\xb9\xe1\x06\xbc\x0b\x81\x58\x31\xdd\xbb\x2d\xa5\x5d\x28\x7b\xeb\x8a\x6c\xcf\xbe\x26\x4c\x18\x2c\xff\x7c\x11\x76\x5b\xcc\xf0\xb6\x4b\x83\x6c\xfe\xf9\x8d\xe6\x6f\xf1\x9a\xf9\xa4\x85\x18\xc0\x33\x92\x29\xb2\xda\xb6\x0e\x63\x4d\x74\x43\xe3\x56\x2b\x61\xbd\x03\xad\x99\x64\xe4\xf6\x5b\x77\xb8\xfb\x01\x1b\x02\xe0\x58\x31\x0d\x5b\xe8\x6e\xa0\xde\x55\x3f\xb6\xfe\xa5\x96\xdf\x19\xf1\x5f\xd0\xbc\xe5\x26\x24\x64\x98\xbe\xd6\x55\x08\x3f\x14\xb9\xa1\x48\xd8\xc4\x5e\xca\x83\xd1\xe2\xcb\x59\x69\xe5\x40\x40\x09\xfc\x99\xe5\x0a\xe2\x10\x7a\x50\x68\x99\xd3\x61\xdb\xb3\x96\xe5\x89\xa4\xf9\x29\xff\x16\x56\xda\x7a\x7b\xd8\x59\xeb\xae\xb6\xc1\xb1\x49\xc0\x67\x90\xf5\x32\x92\x97\x26\xd6\x87\xb8\xb3\x46\x24\x5b\x37\x93\x5d\x7b\xe4\x22\xce\xab\x98\xef\x84\xcf\xa8\x84\xc2\x61\x52\xb5\xb6\x4c\x97\x2d\x76\x02\x09\xa9\xc4\x8f\x2c\x08\xdd\xc8\x52\xb4\x7e\xa0\xe7\xb5\x38\xa2\xa0\x27\x67\x25\x20\x26\x46\xd9\x0c\xdf\x25\xa9\x8b\x59\x80\x1a\xc6\x65\xc6\xc2\x8b\xa9\x9e\xc4\xd5\xc3\x8d\xd4\xfd\xeb\x32\xff\x62\x7a\xea\xa2\xfd\x61\x37\xe4\x44\xee\x86\x80\x33\x5b\x7c\x16\x56\x20\xe7\x12\xba\x60\x0d\xae\x82\x18\xef\x4a\xc4\xff\x3e\x8e\x19\x20\xa2\x3b\x34\x4d\xb3\xc7\x4a\x07\x2b\x26\x53\x47\xa0\xa5\xd9\xbd\xfd\xb0\x18\x18\x43\xf3\x70\x6a\x3a\x4c\xcc\x61\x96\x32\x4d\x13\x41\x5a\xe6\xa2\x72\xc9\x79\x03\x22\x53\xa8\x2b\x1b\xda\x67\x7f\x87\xe8\xd6\xec\xd0\x55\x1d\xe5\xf9\xe4\xce\xff\xa9\x9f\x7a\x16\x33\xee\x34\x70\xb5\x4f\xc9\x50\x68\x25\x17\x30\xe8\x8b\xf9\x0b\x1c\xa2\x54\x2e\xbb\x6f\x78\x6e\xe2\x7d\x13\x0f\x85\x27\x91\x2d\x85\xe9\xd9\xc3\xed\xa0\xdf\xc0\x1b\xd4\xb2\x36\x66\x83\x92\x56\x55\xa0\xb2\x33\x00\x3d\x8b\x8b\x08\x5c\x62\x19\x73\xdc\x68\xc6\x46\xe9\xfa\xb7\x4b\xf2\x63\x90\xb8\xd6\xcd\x80\x68\x85\x27\xcf\x8c\x23\x7b\x60\xc7\x18\x4a\x23\xe6\xbe\xc2\x09\x9e\xe9\x9f\xdd\x83\x98\x59\x6a\x0f\xbc\x38\x08\x91\xe4\xfa\x4d\x76\x26\xc4\x28\x30\xe9\x48\xe7\x4e\xe2\x05\x0a\x57\x17\x2f\x81\xae\x0c\x13\xab\xb5\xd0\x2e\x55\xb5\xde\xfa\x85\x1a\x3e\xfd\x8e\x96\x18\xca\xa8\xd8\xc5\x7a\x20\x98\x1c\xf9\x10\xcb\x72\x8a\x7d\x21\x32\xf1\x2c\xd0\x22\x41\xda\x41\x7d\x63\x1e\x64\xb5\x2e\x38\xc7\x8e\x34\x7a\x96\x5b\x1a\x37\x67\xfa\x20\xbf\x0a\x35\x31\xa3\x21\x2c\x3d\xe1\x8a\xc7\x7c\x0f\x18\x57\x22\x7e\x10\x06\xe4\xd6\xc2\xa0\x79\x00\x7d\x77\xa6\x3a\x33\x5f\xe9\x0a\x1f\x8f\x59\x62\xae\x56\xfe\x9e\x15\xbb\xf7\x3a\x6b\x41\x00\xa1\x11\xf9\x20\x51\x2d\x7e\x2e\xf4\xb2\x93\x99\x30\x15\x10\xbe\x64\x07\xf5\x60\xdc\x25\x33\xf9\x5b\xa2\xe4\x34\x08\xe7\x92\xbc\x55\x2d\xc0\x8b\xbc\xc8\x2b\x37\xda\x16\xe2\x95\x56\x0b\x34\xee\x70\x25\x78\x9e\x84\x47\x17\xff\x49\xf8\x5c\x41\xa3\xee\xd9\xab\x91\xd1\x12\xe3\xd6\xbd\x5b\x20\x68\x8c\xab\x40\x1e\xca\x49\x33\x7f\x47\x49\x6a\xcc\x13\x81\xd1\x61\x74\x06\xfe\xc5\x59\xd9\x4d\xf6\x69\x31\xe2\x6a\x17\x5c\x70\x46\x9a\xd7\x96\xae\x0b\x78\x02\x2e\x76\x48\xb0\xeb\x01\xe0\xac\xfc\x49\x46\x97\xca\x62\x00\x20\xa3\xcf\x80\x93\x04\xb0\xdb\x8e\xe7\x54\xde\xb0\x2f\x2f\xa0\xe4\xb6\xe5\x6f\xd4\x63\xab\xd2\xc9\xf0\xd4\xea\x50\x22\xe0\xa5\x2b\x5d\x34\x51\x12\x0a\x32\xf1\xd7\x7a\x21\x50\x40\xb1\x5a\x1b\x0b\xc7\xb7\x6d\xe3\xe2\x89\xb9\x16\x23\x8c\xef\x17\x9f\x1a\x29\x30\xc6\xce\x6a\x43\xe5\x16\xc3\x7c\x2a\x4f\x00\x41\xc6\x11\x0d\x8b\x97\x88\x12\xee\xd3\xc2\x16\x78\x58\xcf\x9d\xee\x72\xe4\xc1\xcd\x1b\xbb\x3a\x25\xfc\xcd\x87\x59\x53\x71\xb2\x76\x7a\x1d\xd5\x6e\xe6\x9c\xbc\x66\x14\xcb\x95\x79\x55\x24\x36\xb3\xcf\x55\x3a\x60\x70\x0a\x13\xf9\x45\x38\x76\x06\xc2\x15\x04\x1d\xca\x0a\x6c\x6e\x83\x71\x4c\x66\x62\x9a\xcd\xcb\x62\x72\x10\x81\xc2\x91\x89\xa3\x20\x87\x07\x63\xd5\x8e\xaf\xee\xc4\xa2\xb8\x87\x43\x4c\x0c\xfb\x42\x1e\x39\xc9\xd1\xc5\xe3\x99\x9b\x98\x4a\x7b\x3b\x81\x8e\xf0\x84\xde\xf5\x1b\x41\xdf\x15\xc4\x74\xdb\x36\x1b\x9d\x09\xa1\x02\x73\xa8\xf3\x2c\x58\xba\x74\xc2\x4e\xd9\xef\x01\x4d\xc8\xfd\x74\xca\x1b\x36\x5a\x63\x90\x08\xf2\x84\x78\x64\x98\x7d\x2e\xd7\xd6\xbd\xa2\x70\x27\xa7\x42\xb4\x3f\x6c\x4b\x45\xad\x89\xac\xeb\xd1\x38\x17\x2e\x90\xb2\x13\x1f\x56\x51\xe3\xc1\x09\x5e\xe0\x5c\x58\xab\x09\x53\x05\xcb\x51\x52\xcf\xd7\xa8\xf9\x9a\x8b\x96\xbc\x00\x56\x96\x94\xa1\x49\xa9\x2d\x86\x7c\x7a\x51\x58\x5e\x90\x7c\x1e\x85\x34\x02\x63\x42\x04\x09\x17\xc9\x2f\x6a\xea\x17\xc6\xb6\xef\x41\x37\x71\x90\x62\x76\x78\xc9\x99\x9d\xf6\xd2\xa4\x7d\x14\x16\x98\xaa\x1b\x40\x35\xd9\xfc\xb8\x99\x69\x6d\x3a\x1d\x2e\x32\x9e\xa1\x9d\x82\x64\xc9\xe9\xd2\x77\xb7\x52\x12\x3c\x99\x7d\x0b\x5f\x47\xab\x51\x63\x75\xdf\x41\x78\xfa\x8b\x5a\xc9\x39\x5e\x40\x86\x8b\x83\x3a\x91\x6a\x1b\x5c\x92\xa7\xbb\x4e\x4d\x3c\x90\xe5\xc8\xad\xc7\xb3\x2d\x08\x6e\x35\x98\xb1\xd4\x2e\x5c\xd9\xf3\xb8\x43\x79\x48\x08\x9d\x73\xb1\x54\x5e\xfd\x61\x72\x00\xd2\x8e\x50\x05\x90\xc1\x24\xa6\xa5\xdf\x48\xa7\xae\x25\x46\x7c\x40\xbf\x10\xcc\x34\x59\x27\x5b\x96\x78\x4b\x84\xf8\x7c\x78\xdc\xec\x95\x0d\x46\x0a\x4f\x87\x7a\xe7\xd0\x3d\x7a\x76\x02\xe5\xda\xb5\x78\xc0\x8b\xac\x0c\x40\x8b\x47\xcd\xf7\xbd\x35\x5b\xc5\xde\x88\x65\x03\xd0\x8d\xfd\x50\x4c\x04\x37\x03\x3c\x0c\x3c\x43\xc3\x69\xd9\xbc\xb2\x83\x83\xf1\xec\x66\x45\xe6\xe2\xa5\xc5\x2b\x48\xd2\x88\xda\xfe\xaa\x55\x10\x4b\x10\x66\x24\x18\x9b\x1a\x7e\xa0\x51\xcd\x03\xf0\x0b\x77\x3c\x9e\x83\x75\xd3\x82\xb3\x27\xd4\xa3\xc5\x3b\xb1\x74\x9e\x9c\xa4\x26\xf6\xd8\x73\xa1\xd2\xc9\xea\xcb\x49\x72\x57\xfb\x90\x5b\x7c\x4f\xf6\x92\x39\x10\x98\xe2\xa9\xd6\xc1\xa2\x97\x46\x95\x6f\x58\x14\x6c\x1d\x2c\xbe\xa1\xbc\xe5\x41\xe8\x18\x64\x0c\xb5\x5a\x4e\xb7\x94\x4f\xed\x04\x0c\xe8\x2b\x2f\x42\xce\x72\xa4\x59\x62\xb9\x87\xb3\x64\x7a\x23\xa7\x82\x06\x0e\xd5\x93\x6e\xe4\x54\xbb\x12\x62\x8f\x19\x75\x6c\x1d\xc1\x4d\x3f\x80\x48\xc4\x33\xa1\xf2\x1c\xaa\x39\x02\x56\x59\x70\x2b\x43\x5f\x12\x71\xa7\xb0\x5a\x09\x3d\x28\x2f\x17\x3e\x44\x62\xf3\x6a\x81\xd0\x9a\xae\x61\xc0\x5f\x08\x17\x98\xc2\xbc\x80\x21\xc9\xb9\x25\x1a\xd6\x5a\xb2\xba\xbe\x98\xfd\x41\xe1\x9a\xb5\x4a\x6d\xa9\xc1\x52\x70\xc0\x17\x86\x67\x9a\x31\x68\x4e\x25\x8e\x3d\x02\xd1\xc8\x8c\xfa\xdd\xf4\x6a\x21\xd3\x7e\x53\xa9\xe5\x8c\x13\x3b\xdd\x89\x12\x15\xdc\x56\xcd\xf8\xfb\x8e\x96\x28\x27\xe3\x76\x16\x4a\x43\xf9\x6c\x2b\x49\x1f\xac\x26\x03\xa7\x73\x5c\xf4\xde\x31\x98\x13\x3b\x4e\x93\x23\xfb\xcf\xf4\x38\x62\x27\x3f\x12\x00\x90\xb7\x8a\x38\xa4\xc9\x53\x7b\x02\x77\xe1\xc3\x3a\xaa\x5f\xea\x3c\x80\xbe\x89\x7d\x74\x6a\x80\x13\x64\x24\x53\x64\x24\xa1\x06\x09\xa6\x75\xea\x9e\x9b\x2a\x55\xd1\xd9\xab\x28\xca\x19\xc1\x97\xc8\x24\xd9\x43\xca\x7e\xe0\x61\xe9\x92\xa0\xb3\x8e\xe1\x11\x5e\xe0\x7f\x7f\x53\x5c\xc9\x53\xbe\xe4\x1a\xb3\xb4\xb1\x85\x85\x31\x78\xe5\xd3\xb6\x7e\x90\x94\xd4\xbd\x28\xf5\x1b\x5e\x2c\xd0\x6f\xb0\x90\x7a\x79\x19\xec\x88\x3d\x6c\x33\x89\xa4\x61\xcb\x38\xcf\xc0\xc4\x2a\x1c\xa4\x3d\xd7\x7d\x06\x64\xac\x7f\xfa\x04\x0e\xc9\x32\xc0\xb8\x3d\x70\xfe\x7e\x0a\x8d\x71\x0b\xe4\x81\x6d\x71\x61\xa6\xb5\x71\xb9\xf5\xbf\x66\x16\x12\x42\x14\x89\x38\x33\x56\x52\x6e\x3e\x3d\xbd\xfc\xb9\xf6\xd7\xbe\xd2\xec\xae\x3d\xa1\x96\x65\x10\x6e\x22\x6a\xc9\x1c\x44\x5d\x2d\x8e\x45\x4b\xf6\x6b\x0e\x0a\x04\x9a\x8c\x78\x82\x2f\x23\x2f\x12\x58\x3f\x1d\xf8\xad\x51\x2f\x4e\x2f\xaa\x9e\x44\x44\x32\xe2\x9f\xe8\x60\x87\x96\xea\xf0\xbe\xf0\xa5\x57\x0c\xa4\x05\xc4\xee\xec\x51\xc9\x6f\x9c\x89\xa4\xdc\x2a\x37\x61\x16\x0d\xb9\xf9\x63\x21\xee\x00\x4b\x36\x0f\x52\x3c\xbe\xad\x3c\x7a\x06\x6d\xb3\xa7\x39\x11\xa1\xe3\x3d\x08\xea\x65\xc3\x3b\xec\x91\xc4\xb0\x83\xc6\x16\x1c\x27\xa3\xa5\x34\xbe\xae\x25\xe5\xa7\xc4\xc6\x01\x95\x93\x16\x09\x70\xa9\x2f\xb2\xc8\xd9\xec\x8c\xfe\xd5\x00\x87\xb9\x77\x44\x51\x24\xde\x3c\x50\x92\x76\x0e\x8f\xbb\xf9\x7e\x27\x00\x1d\x6f\x7c\x68\xe3\xf5\x51\xcf\x4e\xaf\x7a\x79\x31\x2f\x7f\x70\x0d\x32\x41\x61\xcd\x7c\x71\x66\x96\xc1\xad\xde\xfe\xa4\x5a\xce\x26\xfa\x0c\x4a\x4e\x40\x49\x59\x83\x5d\x56\x12\xd6\x8e\x6d\x8c\xf6\xac\xb9\xfc\x85\x02\x9d\x76\xef\x21\xaa\x18\x5f\x52\x10\x36\xef\x45\x90\x9c\x3e\x7b\x0a\x21\xb9\x51\xb2\x9b\x6b\xa8\x9a\x5d\x06\xcb\x35\x4f\x5a\x16\x96\x55\x65\xbd\xf8\x57\x2c\x8e\xc0\xf9\x0d\x6c\x1a\xb1\x3e\x13\xf5\xea\xfa\x17\x3f\x8e\x63\x35\x05\x93\x19\x12\x6b\x7c\x6e\x11\x81\xf6\x68\x30\x7d\xfd\x86\xd6\x62\xf4\xf8\xb7\x2d\xcc\x02\x21\xed\x6d\xf9\x28\x7c\x5a\xc6\xe6\x1c\x86\x5d\x63\xc1\xbe\x09\x59\xe0\x49\x5b\x9d\x31\x8e\xdc\x03\x47\xdf\x2c\xe3\x19\x2e\xb8\x99\x56\xba\xc5\x65\x59\x29\xa5\x88\xf7\x90\xb7\xbc\xa3\x1d\x95\xc2\xb0\x56\x4c\xa9\xd3\xf7\x25\x2e\xd0\x3b\x61\x86\x2c\x4c\x3c\xf4\x12\xa0\xdf\xd8\x82\x26\xa6\x29\x22\xe1\x23\x9f\x4b\x7e\xb7\x59\xd2\x14\x6e\xae\xf1\x1b\x68\xf9\x6b\x30\x2d\x2f\x43\x6c\x2d\x02\x3d\xc3\xc7\xd7\xca\x01\x9c\x27\xb0\xe7\x5e\xf9\x8c\x53\xcc\xfe\x8a\x4a\x1d\x7d\x3e\xc8\xaf\x04\x86\x7e\x9a\x9b\x20\x7b\x09\xc8\xcd\x07\xa1\x52\x7b\x3b\x39\x41\x6c\x2b\xda\x98\x2e\x92\x83\x90\x2a\x2d\xfd\x15\x68\xfd\x40\x16\xfa\x97\xc0\x41\x50\x38\x9f\xd8\xc2\x16\x11\xce\x2b\xd8\xce\xf2\x6d\x49\xb7\xbd\x4d\x46\x85\x50\x31\x13\xbe\x56\x9a\xbd\xb2\xf0\x72\xd0\x98\x55\x21\x09\xf7\x91\x75\xbf\xed\x2c\xd1\x55\xe7\x17\x72\x27\x79\x7b\x22\x21\xd1\x5e\x6f\x8f\x0e\xfb\x2f\x0e\x63\xc9\xfd\xe4\xd5\x95\xf3\x93\x90\x33\x5b\x1d\x42\xcd\x97\x9d\x5e\x65\x9a\x58\xdc\x32\x31\x3e\x13\x4b\xb5\x74\xd4\x35\x79\xac\x25\x21\x25\x58\x78\x97\x89\xcc\xce\xb6\xb1\x0a\xbe\x15\x66\xc5\x8a\xd4\xe0\x80\x0f\x7e\x90\xc4\x12\x2f\xbb\xb1\x5a\x29\x1f\x3c\xa0\x8b\x5b\x16\xd1\x9d\xf1\x53\xb4\xca\xd0\x1c\x75\x4b\x03\x1d\xce\x65\xc2\xf3\x86\x5e\x09\x9d\x9e\x12\x14\x24\xb7\x62\xf0\xa0\xa4\xab\x68\x9e\xd8\xae\x52\xbe\x66\xc4\x36\x4e\x36\xd7\x96\x5d\xb6\x56\xd2\x80\x05\x0f\xc7\x1a\xb7\xc6\xf5\xfa\x0d\x6f\xe7\xc8\xbd\xed\xf8\x6d\x73\x6a\x1a\xda\x5a\x36\x7a\x6f\x2c\x8c\x35\xe7\x39\xab\x8d\xc4\x1d\x94\x70\x9a\xd8\x02\xb9\x26\x4e\xf4\x5a\x71\xce\x02\x69\x68\x4a\x38\x7a\x18\x1a\xee\x0c\x48\x83\x88\xe7\x4e\xb8\x27\xd8\x5a\x2e\x8e\x07\x07\x40\x3b\xb6\xf4\x28\x72\xc6\x76\x8f\x64\xe5\x2e\x3c\x80\x12\x47\xf0\xaf\x1d\x7d\xb0\xf8\xc9\x02\xf4\x0a\x9c\x62\x88\x76\x98\x7e\x98\x56\x6e\xa3\xf4\x5e\x16\x52\xab\xe6\x9e\x5f\x5e\xa6\x84\xf3\x89\xdc\xc1\x33\xae\x95\x01\x9f\x57\x22\xf4\xd3\xcd\x2d\x77\xad\x5d\xd2\xd1\xea\x79\x60\x99\xfc\x26\x8e\x6f\x70\xa1\xa4\x28\x83\x9e\x51\x24\xa0\x35\x67\x7c\xfe\x61\xf8\x1d\x1f\x7a\xfb\x9f\x6e\x90\x3b\xa8\x01\x5e\x0e\xb4\x3f\xbd\xc0\x1f\x69\xca\xc5\x94\x2e\x80\x5b\x3d\x75\xd0\x17\xb4\x4f\x6f\xf1\x96\x8a\x81\xb7\xe7\x70\xe0\xee\xdc\x5f\xaa\x8b\x7e\xf5\x09\xbf\xa0\x4c\x0e\xc3\x7d\x39\xb7\x73\x4d\xce\x89\x9a\xdd\xcf\x83\x19\x9b\xbc\x55\x43\xee\xb1\x36\x27\x90\x3d\x5e\x4f\x22\xf9\x62\x86\x26\x7b\xdf\x83\x39\x8e\x5e\x61\xec\x11\x48\x87\xa8\x64\x00\x3a\xb0\x5d\xc5\x21\x61\x5b\xcf\x4a\x33\x2e\x31\x8a\xbc\x07\x48\xde\xcb\xc2\x1b\x93\xfb\x0f\x49\x0d\x6f\x33\x06\x3f\xf2\xb6\x04\x86\xb6\xfe\x39\x51\x2f\xec\x8e\xfc\x2c\xde\x8e\x50\xb7\x12\xf0\x9c\xc5\x88\x73\xb0\x72\x7b\xf2\x8c\xeb\x0f\x8c\xbd\x81\x04\x89\x04\x85\x5c\x1b\x23\x42\xcb\x1b\xd4\x08\xb4\xd7\x36\x47\xa3\x48\xa6\x24\x2b\x2b\x98\x45\xb2\x0a\x83\x38\x63\x5a\x69\x76\x41\xe9\xb8\xe8\xce\x94\xe0\x95\x1c\x48\x79\x5b\xe8\x23\xf7\x54\xbe\x65\xce\x73\x63\xf3\xc6\x82\x53\x56\x95\x04\x7e\x3c\x10\xd1\xe3\x2a\xc9\x8b\x79\xc5\xc8\x96\x56\x21\x0c\xc0\x96\x28\xce\x5f\xcc\x54\xc3\xc7\xfb\xa3\x0c\x98\x63\x0e\x34\x3b\xd3\x49\x72\x05\xb8\x02\x59\x7c\xa6\xf0\x7b\x60\xf4\x21\x26\x09\xdf\xf2\x03\xba\x8d\x45\x2c\x87\x65\x49\x12\x93\x22\xf6\xc8\xc1\x16\x62\xaf\x28\x17\x48\xc8\x1d\x2d\xe7\xd3\x16\x08\x4b\xad\xb2\x64\xce\x96\x31\x86\x09\x05\xb1\xb2\x0d\xce\x04\x36\xfb\xe8\x0b\xf9\x1b\x1e\x91\x4e\x19\xa4\xb3\x1a\xd9\x8c\xf2\x14\xff\x0f\x4a\xed\x41\xff\x55\x78\x9a\xce\xa0\x68\xd0\xc3\x38\x7d\x2f\x04\x74\x92\x70\x8f\xa6\xab\xca\xbc\x76\xa4\x8a\x56\x51\x7a\x80\xdf\xc2\x9c\x65\x50\x4f\x73\xd2\x95\xde\xcf\x4e\xd6\xcf\xd3\xc1\xe5\xc5\xf9\xda\x9b\x86\x7f\xaf\xb2\xc2\x71\x81\x0b\x5f\xb5\x96\x03\x84\xd2\xd9\xa9\x6a\x83\x12\x13\xd3\x99\xbf\xfb\x1b\x2e\x60\xe7\x1b\xb9\xa6\x9b\xa4\xd0\x8c\x7b\x8c\x2a\x64\x9f\xf3\x82\xe7\xed\x8b\xbc\x7a\xff\x67\x57\x20\xac\xc3\xf4\xd1\x21\xdd\x35\xfe\x60\x49\x07\x27\x44\x0d\x26\xd3\x86\xbc\x00\x1a\xdb\xbc\x19\xcb\xda\x70\x0a\x0f\xe7\xc6\x6e\xa5\xa2\x9d\x19\x40\x96\x77\x76\x07\xae\x23\xad\x7a\x16\x10\x19\xc4\xdd\x36\xe7\x44\x6a\x59\x83\x1d\xcf\x4f\x46\x68\x12\x59\x35\x14\x9a\x1b\xb2\x19\x79\x68\xe7\x61\xcf\x19\x88\xf2\xc6\x44\x20\x36\xd9\xa7\x57\xc4\x96\xb1\xd3\x59\xe1\x1b\x5f\x26\x6f\x6b\xc8\x7d\x79\xdb\xfd\x91\x8d\x72\xc0\xd0\xb8\xa3\xc8\x78\xf8\xf7\xe3\x2f\xfe\x85\xf0\x23\x0c\xf2\x07\x38\x5b\x39\x6d\x0f\x4e\x80\xd2\x46\xae\xb0\xe3\xa1\x89\x1c\xaf\x03\x0e\x85\x42\xeb\x76\x68\x25\x23\xe6\x13\x55\x76\xff\x2d\xa3\x25\x64\x66\x09\x3d\x5c\x5a\xf2\xf1\xfb\x82\x4c\xdc\xe2\x5f\x5c\xbd\xb7\xf7\x67\xe4\x42\x36\xa3\x96\xf0\xf0\x1a\xff\x9c\xef\xce\x97\x00\xaf\x36\x39\xae\x26\xef\x09\xce\x0f\xb9\xf6\xd1\xfa\x21\xec\xe7\x1c\x50\x95\x38\xa7\xfc\x00\xef\x7e\x3a\x85\x43\xdc\x35\x69\xc8\xac\x2d\x69\xf4\x60\xb0\x48\xf9\xd3\xd0\x46\x73\x3c\x95\x77\x63\xbe\x55\xc5\x21\xa1\x84\x93\x10\x89\x99\xf5\x1d\xa6\x0f\x10\x36\xab\x2c\x7d\x06\xc3\x45\xb9\x02\xcc\x66\x99\x33\x3f\xfb\x0a\x26\x88\xa6\xc5\x48\xca\x03\x3a\xd4\x15\x8d\xb2\x87\x6f\x6e\x4b\x01\x96\xbd\x47\xac\xa9\x05\x54\xf0\xb2\x99\xd1\x4c\x85\x5d\x61\xcc\xd1\xe4\x3a\xd8\x9b\xcc\xf4\xef\xb2\xff\x16\x9b\x06\x92\x7d\x64\x39\x16\x42\x5f\x0a\xd8\x9a\xc5\x4c\x60\x11\x4e\x1f\xfe\xcf\xa8\x7c\x19\x0f\x17\xac\xbf\x66\xc1\xd8\x6f\xbc\x50\x83\x30\xd4\x7e\xbc\xb8\x96\xd1\x9d\xd7\x65\x1b\x21\x8f\xcc\xb4\xd8\x5a\x86\x82\x5d\xf9\x0a\x54\xa7\x46\x96\xf7\xcf\xf8\x26\x8b\x44\x33\x30\x84\x9a\x7d\xc2\xb2\x06\xee\x9e\xdc\xe0\xb6\x04\x46\xbd\xba\x49\x91\x75\xa5\x99\x2c\xe4\x0f\xf0\x2d\x6d\x63\x75\xc3\x43\xb6\x84\x2c\x7b\x44\xe6\x8f\xb8\xd7\x9a\x66\x48\x90\x91\xe4\x71\xc3\x46\x5b\x31\x99\x5c\x24\x2d\xfa\xf2\xaf\xd3\x75\x79\x05\x6d\x10\xfa\x9e\x39\x38\x54\xb7\x7c\xbc\xdd\x06\x21\xb8\xd8\x17\x1e\x83\x5b\x23\x30\x64\x8b\x4f\xf2\x06\x4a\x20\x61\xe8\x15\x7e\x0b\xeb\xb3\x4e\x74\x55\x4b\x0b\x06\x76\x20\x51\x5c\x92\x64\xfa\x2b\xd9\x22\x27\x5c\x94\x81\xe4\xb0\xea\xf0\x1e\x40\xa7\x93\xa0\xa6\x18\x6d\xfd\xac\xa9\x0b\xb4\xaa\x4b\xe3\x1c\x42\x3a\x84\x9b\xe7\xd2\x6f\xb0\x9b\xc3\xd7\x8e\x77\x25\xc8\x22\xd8\x8a\xe6\x95\x16\x72\xaf\x27\x60\x6c\x93\x86\xd0\xa0\x7e\xc6\xaa\x7c\x30\x5d\x97\xc8\x9a\xfa\x12\x47\x29\xe6\x76\x4a\x9b\x3a\x8b\x93\xd9\x33\xa9\x31\x5a\xf8\x64\xbf\x1b\x2c\x6f\xe9\x0d\xe4\x17\xb9\x8d\x12\x18\x03\x86\x77\xb6\x43\x42\x62\x6d\x0c\x76\x7b\x91\xff\x3a\xe3\x8e\x51\x7d\x42\x8c\x62\x27\xce\xd8\xcb\x33\xf2\xc6\xd3\x7e\xd1\x10\xa9\x07\xb3\xc7\x37\x57\x5a\x6d\xb7\x7c\xd5\x33\xb3\x8b\xb6\x15\x67\xc0\xea\xee\x23\xe6\x60\xa1\x9b\x2c\xee\xec\xb5\x48\xb9\xfc\xde\x96\x6e\xf1\x04\x8b\x16\x4c\xbc\xfe\xd3\xbe\x18\x8f\x00\x34\xe1\xbd\xd4\x4c\x19\x20\x90\xae\xc6\x83\x3e\x7b\xaf\xff\x1c\x26\x23\x9a\x1a\x3c\x96\x58\x3e\xd7\xbb\x07\x9f\x74\x1b\x95\x9c\xa7\xa9\x54\x4f\xba\xd8\x7f\xa0\xd4\xc9\xd1\x9f\x08\x26\xb0\x15\xf8\x18\x6d\xad\xac\x06\x99\x4a\x82\x61\x13\xc1\x5e\x7f\x4b\x15\xb7\x78\x01\x0f\xeb\x2a\xc9\x5d\xbc\xd2\xbd\x07\x54\x1f\x58\x73\x89\x60\x3d\x07\x64\x23\x77\xf5\xd8\xdf\xe0\xff\x07\x4a\x55\xf2\x33\x50\x84\x20\xa4\x9d\x03\x5b\x7a\x67\x39\xca\x97\x9d\x88\x13\xf5\xdb\xb7\x6c\x27\x5c\x58\x6c\x13\xab\x92\x6c\xfe\xfc\xd1\xc2\xa5\x77\x0a\x28\x55\xe7\x41\x01\x05\xbb\x82\x4e\x6b\x3d\x5c\xcf\x41\x93\x10\x67\x0d\x19\x5b\xdc\x9d\x14\xb0\xa7\x5a\x9e\x78\x7b\xb4\x71\x74\x27\x7d\x9d\xdd\xf9\x67\xf1\x6d\xa6\x2e\x27\xad\x3a\x6b\xa7\x84\x1d\x37\x76\x43\x9c\x4e\x0d\x8c\x23\xf7\x67\x02\xc5\x8e\x2a\x44\x75\xd5\x3a\xd2\x2d\x7d\xbc\x6a\x27\xed\x7d\xde\xf0\x54\x47\x98\xec\x87\x8c\xfd\x8e\xeb\x53\x09\x04\xcb\x76\x8e\x44\x29\xac\xcc\xde\xf9\x0c\x6d\x76\x59\x82\xb2\x79\x73\x8a\x98\x66\x43\xd0\xb7\x32\x84\x4a\x4e\x9c\x10\x74\xa6\xae\x88\xa4\x7e\x63\x22\x9a\x61\xcc\xa1\x4c\x5f\xe1\x7a\x12\x92\xf4\x18\xe5\xc8\x36\xdf\xdb\x57\x36\xea\xaa\xcc\x5f\xcd\x8b\xb7\x73\xcf\x3c\x5c\xda\xdd\x94\xc9\x51\xf9\x0d\xf5\xdb\x6e\x30\x7c\x9e\xd6\xae\xc8\x16\xb8\xd0\xfc\x3c\x1a\x23\x66\xed\x87\xc6\x99\xb4\x45\x68\x17\x43\x3c\x5b\xf8\x81\xb0\x62\xdf\xbc\x63\xc6\xdc\xb1\x5e\x2f\xd6\xc9\x66\x4d\x38\x02\x83\xf7\xc6\x12\xd7\x44\x2c\xa3\x5d\x68\x5b\x94\xc8\x99\x45\x5e\xbd\x86\xd6\x1d\x7c\x21\xf6\xd0\x8e\x31\xc2\xd0\x12\x85\x90\xdb\x5b\x9c\x6e\xab\x46\x8f\xb1\x53\xea\x76\xb4\xfc\x1a\x6d\x1e\xc8\x6e\x0a\x9d\x12\x72\x92\xac\x29\x93\xfb\xcb\x10\x4d\x8c\xcb\xc2\x54\xe2\x62\x4c\x1b\x07\x59\xce\x58\x96\xf6\x5f\xbf\x92\xe2\xd0\xc9\x12\x19\x60\xb3\xda\x39\xef\xe3\x91\xb8\x35\xa7\xd8\x71\xe6\x76\x77\x7c\xef\xb0\x8f\xc5\xd2\xde\x2a\x3a\x29\x03\xce\x11\xf2\xbe\x13\x1d\x24\x35\x08\x56\x32\x17\x25\x79\x59\x11\x2d\x7e\xfb\x2b\x79\xbf\xf4\xc1\xd9\x6b\x15\xdd\xd9\x6a\xd3\x54\x4c\x53\xf4\xac\xdd\x1b\xb3\x9c\x2a\x15\x14\xb2\xb3\xc4\x04\x74\xae\x5e\x27\x24\x3f\x6f\xbc\xba\x3c\xdc\xb2\x56\xf4\xe0\xa8\x4b\x61\x8d\x55\x0c\xab\x73\x80\x8b\x99\x72\x75\xd6\x7a\x48\x11\xdc\xe0\x18\x4b\xf2\x82\x4b\xb1\xb1\x19\xe7\xa3\x84\x1c\x33\x1c\x1a\xc4\xb3\x8a\x4e\x0b\x98\xc5\x9a\x32\x78\xc9\x86\x2a\x6f\x7d\x59\x9d\x48\xdf\x86\x56\x86\x16\x19\x60\xbc\x66\xd2\x52\x08\x33\xbd\x3d\x81\xb9\xd8\xfe\xaa\x26\x61\x08\x62\xac\x9e\x03\x1c\x5e\x9f\x8f\xa0\xc5\xbc\xd5\xb2\xe3\xca\x66\xf2\xc3\x14\x2a\xd5\x15\xff\x41\x58\x94\xe9\xe0\x16\x67\x24\xc9\xee\x1a\xfe\xd3\xc9\x09\xa9\x31\x50\x3e\x84\xa5\x58\x24\x0f\x70\x75\xee\xcf\xf5\xba\xd0\x96\xc3\xbf\x53\xbc\x63\x53\x9e\x15\x7c\x6b\xf4\xa6\xe4\x03\xd8\xa5\x49\x05\x21\x99\x56\x7f\x1f\xf0\x10\x1e\x3a\xc8\x61\x65\x91\xd2\x98\xc7\x56\xaf\x33\x3f\x4c\x1d\xa2\xe8\xe1\x4d\xf3\x60\x5e\x92\x0a\x4a\x3a\x1c\xe8\xff\x32\xc4\x3c\x1b\x00\x27\x97\xe4\xcb\x60\x5b\x3a\xa9\x3e\x8d\xe2\x43\x92\xeb\xb0\x90\x10\x2a\x8e\xff\x93\xf5\x68\x09\x08\x36\x0f\x46\x25\xa0\x95\x4d\xb4\x74\x17\xe5\x50\xbf\x8f\xde\x5e\xba\x39\x6b\xb9\x7a\x77\xbe\xd2\xfa\x3c\x41\xd2\xe5\x9d\x19\xde\xfe\xa1\xa8\x9a\x3d\x12\xc7\x85\x73\x27\xe6\xdb\x70\x01\xe3\x84\x93\x49\x2e\xd9\xaa\x83\xf4\x3b\x0f\xa5\x98\xb8\x9b\xc5\xac\xc8\x26\xcb\x1b\x57\x45\x99\x5c\xe6\x44\x1f\xa8\x40\xb0\x32\x1c\x16\x9a\x83\x77\x71\x4b\xd7\x89\x0d\x9b\x62\x14\xa3\x14\xb9\xa9\xc6\x57\xfb\xe1\xe3\x28\x6a\x30\xfa\x1a\xec\xea\xe9\xc5\x42\x80\xa9\x95\x3d\x3c\xf9\x83\x8d\x55\xcd\xd7\xfc\x64\x7f\xaf\x0a\x03\x10\x14\xa9\x86\xca\x5b\x3d\xb4\x72\x4d\xfe\xeb\xda\x59\x01\x78\x59\x0d\x0b\x5e\x08\xb6\xc8\x5e\xfd\xc7\x41\x27\xd3\x99\xe1\x0e\xa5\x0c\x44\xea\x61\xfe\xbe\x5d\x86\x15\xa5\xdd\x3b\x28\x49\xf3\x30\x3e\x2d\xb3\x7c\xcd\x70\xd0\x20\xf6\x79\xb1\x2d\xd0\xb2\xd7\x85\x95\x92\x77\x20\xfe\x2f\x21\x45\x03\x9f\x4e\xbc\x0c\xab\xb8\x92\xf5\x07\x76\x2b\x51\x90\xe2\xed\xc9\x69\x93\x34\x05\xed\x40\x65\xb4\x32\x6a\xb5\x1d\x2d\xf0\x97\xe7\x81\x9a\x20\x91\x6e\x81\x3e\x24\x25\x13\x06\x14\x67\xad\xbb\x92\x8d\x3d\xbd\x01\xf6\xa2\x4b\x54\x39\x0e\x74\x73\x1e\xe9\xd7\x9e\xea\x43\xbe\x99\x83\x34\x73\xf0\xce\x04\x67\xf2\x24\x09\x57\xf4\xab\xd4\x42\x98\x11\x3e\x29\x05\x66\x2a\x5c\xbc\x8d\xca\x49\xb7\x5b\xdd\x97\xe6\x19\x3b\x59\xa3\x07\x4a\x74\x2d\x01\x05\xde\x7a\x76\x0b\x46\x77\x61\xb4\x72\xe2\x44\x2f\x4d\xf0\xe1\xb1\x09\x15\x49\xb8\x30\xfa\x25\x2d\x1f\xc3\x65\x77\x0a\x43\xd9\x63\x34\xf9\x0a\xf9\xfe\x44\x8c\x98\xc1\xf6\x53\xfd\xb5\x9b\xbd\x49\x97\x9b\x37\x3b\x29\x2a\xeb\xb7\x5c\x8b\xc7\xee\x52\x0c\xc1\xb7\x45\xd8\x8b\xea\x65\xec\xe9\x01\x36\x31\xf0\x50\xf1\x21\x33\x18\x12\x72\x81\x0d\xb2\xad\x68\xf4\x91\x33\x16\x99\x22\x3b\x43\x3e\x7b\xac\x66\x14\x1e\x0b\x32\xbc\x17\x69\xe2\xce\xbf\x3f\x1e\x43\x7b\xc5\x42\xc5\x04\xa6\x07\x61\x2f\x5e\x75\x75\x38\x8d\xa1\x08\x3b\x6b\x00\x49\xdc\x9b\xf8\x17\xd6\xb6\x95\x4f\x6e\x5b\xb0\xba\x83\x90\x5b\x80\x7c\xfe\xea\x95\x3a\x00\xef\xcd\x75\xa1\x0e\x54\xad\x71\xee\x29\x28\xa2\x36\x1f\x5f\x82\xac\xac\x3f\x16\xc9\x86\x82\x10\x1c\xc4\x11\x82\xa6\x18\xce\x2e\x73\x85\xa2\x9b\x7d\xf6\x83\x92\xa0\x4d\xb5\x4d\x5c\xcc\xe7\x31\x78\x56\xa4\xfb\x59\x59\xcc\x1b\x25\x2a\xcb\x4c\xcf\x32\x52\x75\x00\x80\x12\xff\x49\x82\xe2\xa1\xed\xb9\xe2\xb3\x65\x25\xec\xc1\x9e\x5e\x2f\xf0\x0e\x05\x6d\x6a\x65\x29\x57\xcb\x0f\x6b\x00\x75\xf1\x3d\x64\x66\x7c\x87\x15\xa5\x60\x8f\xa7\xd7\x74\x20\xe1\x00\xf9\x2d\x74\x13\x92\x9b\x6f\xe1\x80\xed\x72\xa7\x92\x99\x04\x4b\x08\x1c\x7a\xbd\xe1\x2d\x5b\xcc\x38\x1a\x21\x11\x57\x15\x53\x7a\xb3\xf1\xa9\xe7\x2b\xb2\xb7\x6b\x2e\x23\xea\x50\x9b\xe5\x07\x1a\x00\x1d\x1e\x71\xf7\xd3\x41\xb6\xd1\x1d\x2d\x77\xd2\xe6\xbb\x87\xf5\xfd\xe2\x59\xb1\xf2\x3c\x1a\xb0\x0e\x3b\xda\x6c\x36\x0f\x67\xd7\x70\x50\x9f\xa9\x37\x92\x4a\xeb\x45\xcf\xe4\x2d\x28\xaa\x29\x40\x6f\x02\x6c\x20\x61\x69\xad\x55\xf3\xab\x6b\x15\xce\x5f\x36\xcb\x51\x0c\xaa\x16\x67\xbf\xc9\x9f\x28\x5f\xd6\xf2\xf9\x21\xd0\x7c\xcf\xde\x96\x33\x18\x8a\x47\x71\x77\x4e\x15\x00\x94\x80\x00\x2e\xfd\xbd\x7f\x1f\x8b\xa5\xea\xe8\xbd\xba\xbc\x5a\x70\x8d\xea\xb1\x2e\x0b\x9e\x6d\xfa\x2a\x82\x21\x30\xc9\xdf\xde\x84\xd0\xf4\xb4\xd9\x53\xca\xf9\x67\x00\xdf\x12\xcb\xb6\xc5\xfb\x4a\xac\x8f\x8d\xe5\x9c\xda\x57\x1a\xb9\xc9\x99\xbd\x1d\x60\x4b\x37\x3c\x1c\x34\x4b\x1c\xe7\xd5\x8b\x38\x9d\x72\x33\x46\xfc\x44\x5f\xf1\x33\x7a\x41\x67\xd6\x0d\xa1\x9b\x57\x6a\xd7\x11\x00\xfd\x89\x59\x59\x08\xfd\x6d\x49\x02\x5b\x3b\xdc\x4f\x0f\x93\xf4\xd4\x3c\x40\xd0\x23\x90\x71\x2b\xd1\xa9\x9b\x75\x31\x94\x1a\x50\x9a\x25\xf4\x20\xc7\x4a\xc6\xeb\xe2\x37\x15\xb6\x5d\x2c\x55\x23\x32\xe2\x52\x49\x68\xfc\x63\x30\x04\x2e\xab\x2c\xd1\x2f\xe4\xa8\xda\x9d\xac\x05\x5a\x7b\x3f\x50\x7f\x33\x4a\x3b\x62\x02\x6e\xed\x4a\xf0\x28\x0e\xde\x70\x66\x1f\xeb\x57\x0d\xc5\x3b\x97\xcb\xe4\x12\x69\x49\xc1\xae\xc6\x7b\xcf\x70\x60\xd6\xc6\xc8\xb3\x03\x51\xec\x15\x06\xff\xa2\xef\x1a\x0a\x95\x4e\x08\x8b\xaf\x41\xa4\x77\x24\x6d\x0b\x97\x31\x87\x83\xb3\x5d\x25\x7f\x17\x6f\xf8\xcf\xbb\x2d\xe5\x6e\xda\x17\xb3\x22\x22\x09\xbd\x00\xe2\x4e\xfe\x3d\xd8\x91\x80\x73\xee\x3a\x20\x5b\x01\x06\xf9\x86\xea\x5d\xf3\x36\xb7\x16\x4e\xb6\x58\xee\xee\x6b\x57\xf7\x91\xf2\x7d\x47\x95\xc5\x5a\x64\x04\x43\x13\x0d\x8a\xd3\xdf\xad\x54\x64\x1f\x36\xfa\x30\xd5\x3a\x78\x19\x9d\x21\xd0\x8b\x84\x4a\xe7\x38\x99\x53\x61\x04\x00\xbe\x80\x76\x37\xc3\x88\xcb\x9a\x77\xe3\x74\x6a\xdd\xf2\x6f\xd1\x33\x90\xa3\xbb\xf6\xf0\xc3\x74\x00\x1b\xa9\x6a\x28\x2c\xa8\x42\xd7\xc2\x79\xc9\xb1\xfe\x16\x2b\x30\x29\xa2\x0d\x8d\xcd\xb6\x79\x33\xe1\x9b\x2e\x6a\x04\xd7\x72\xc4\x00\x83\x66\x9c\x0a\xb4\x2e\x3b\xb3\x8d\x45\x86\xa9\xe0\x86\xb8\x5b\x92\x98\x9a\x01\xf0\x9d\xaa\xb3\x04\x4d\x93\x0b\xd5\xf5\x82\xcf\x3d\x8b\xb7\xdb\x9d\x08\x01\xa3\x27\xeb\xba\xcc\x80\x01\x87\x90\x8e\xc7\x04\xef\x33\xb3\x59\x5e\xdc\x3e\x52\x43\x2d\x76\x19\xda\x84\x20\xab\x64\xca\x69\x32\x45\x82\x2f\xef\x1e\x24\x3c\x9f\xd8\x2d\x49\x48\x47\xb5\x75\xd2\xea\xe2\x03\x2d\x60\x5d\xf1\x0d\x3b\x0e\xb0\x33\x92\x05\xb2\x0f\xef\x7a\xc6\xc4\x20\x50\x83\x10\x06\xa8\x91\xa4\x25\x57\x2a\xdd\xa2\x6b\x46\xad\xd6\xfd\x4e\x28\xa8\x1c\x9e\x89\x6c\xaa\x6a\xe7\x09\x68\x26\xea\xf3\xa2\xca\xa4\x3e\xd3\xf4\xb5\x51\x88\x38\xbb\xb9\x57\x5c\x6f\x4b\x07\x8b\x97\x55\x25\xe0\xa2\xfc\xc7\xed\xff\x8d\x66\x08\x24\x1f\xf5\x9f\x69\xb7\xb7\x05\x69\xa7\xa6\x9b\xdc\x62\xa2\x17\x38\x10\x95\xd6\x6c\xf3\x08\xca\x24\x4a\x24\x56\x80\xf6\x1d\xb4\x06\xb4\x01\xca\xfb\x9f\xb6\x04\xf4\x72\xed\x5f\xfa\xb3\x7c\x5a\xfb\xdc\x1b\xc2\xd0\x15\x48\xbc\x1c\x5b\xe7\x67\x68\x7b\x79\x75\x80\x6b\x27\xd7\x65\x80\xd9\xbd\x26\x43\x6d\x6b\xfa\xfd\x85\x5a\x1a\x35\x3c\xbf\x44\xea\x75\x84\x51\x0a\x8d\x90\xf4\xa4\x38\x33\x02\x15\x12\x14\xd6\x4e\xa4\x72\x4d\xf2\xf6\xa8\x31\x5a\x23\xf1\x68\xa0\x5c\x9c\x3e\x4b\x1a\xe9\x25\x77\x0d\xbc\x26\xad\x2f\x11\x25\x27\x0d\x8a\x22\x66\x96\xf8\xd7\xa8\xa5\x8a\x07\x42\x09\x42\x8f\x9e\x5d\x56\x7f\x48\x57\x1e\x04\xe2\x44\xae\x82\xd7\xb8\x0d\x36\xf8\xc0\x2a\xca\x5a\xce\x11\x8c\x1b\x12\x35\x8f\x81\x44\x47\x92\x86\x4c\x06\xce\x42\x5c\xcf\xd5\xad\x62\xc1\x00\x7f\xec\xa4\x92\x71\xb0\xd3\x65\xfc\x8d\x63\x3e\x3b\xf1\xb8\x53\xfe\x08\xbe\xb5\x6a\x8a\x85\x7d\xae\x8b\xcb\xdb\x31\x75\xd5\x36\xca\xea\x0a\x92\x0a\x76\xa0\xf8\x04\xfa\x9d\x69\x84\x51\xe0\x66\x75\x05\x46\x55\xa7\xc4\xe2\x2b\x8b\x08\xf7\x5f\x2c\x42\x7f\x50\x23\xd5\xd2\x8f\xbd\x46\xb3\x6d\x3f\xc4\x7e\xd0\x2b\xc4\xa3\x1c\xc7\x2b\xa8\x75\xa4\xcb\x3d\x59\x5e\x2e\x68\x9b\x6b\x1a\xfa\xd9\xb9\xae\x41\x33\x7f\xd2\x0b\x6e\x87\xc6\x0e\x38\x5f\xb9\xea\x41\x2d\x91\x6a\x21\x46\xb3\x49\xda\xde\x88\x0f\xd5\xf6\x5b\x5b\xd0\x27\x41\xa1\xd9\xf1\x75\x40\xa8\x0c\x39\xa8\x13\x60\x25\x79\xa0\xae\xfd\x9c\xb1\x70\x9a\xab\x77\xa4\x5c\x5f\x79\xbc\xa0\x14\x01\xdb\x3a\x92\x65\x74\x03\x5a\xa9\xd6\xc5\x65\x3a\x7a\xe4\x14\xb0\x02\xf8\x2c\x60\x4f\x68\x76\x05\x57\x32\xef\x5e\x27\x88\x5e\x1d\x38\xe8\xc1\xc4\x91\x02\x8f\xc7\xa7\x69\xce\x27\x11\x00\x68\xcc\xaa\x51\x61\x7f\x51\x92\xa6\x3a\x0a\x74\xe3\xee\xbe\x3f\xd9\x3c\x9f\x84\x75\xd4\x9f\xba\xba\xd7\x3f\xcc\x0b\xd9\xc5\xec\x6e\xa4\x26\x63\xc2\xe1\x6e\x58\x9f\x8f\xea\x5c\x21\x33\x20\xd4\x10\xea\x3f\x7b\x5c\xd8\x25\xed\x34\xe9\xe4\xc7\x33\x20\x9d\xbe\x4d\xf7\x72\xf2\x0f\x0b\xf6\x8d\x6e\xf1\x00\x53\x89\xbe\x1b\x77\xa1\x53\x09\x33\x7a\xc5\xe7\x9d\x66\xe7\x42\x65\x17\x0e\xbd\xb7\x34\x95\x85\xde\xe5\x24\x46\x9c\xc3\xab\x40\x65\x02\x19\xe5\xcc\xb4\xdd\x4a\x1a\x8d\xcd\x42\x48\x79\x02\xc4\x01\xb2\x15\x36\x2c\x43\xda\xe5\xb1\xcd\x70\x1d\x44\xb6\xb7\xea\x61\x96\x24\xdb\xa8\x4b\xbf\x63\x3c\x90\x91\x6f\xe8\xe5\xf1\xb4\xa9\x19\x52\xe9\xb7\x4c\x15\xfc\x6c\x8b\xa1\x27\x17\xeb\x82\xda\x9e\xe2\x61\xbf\x7f\x1f\x5e\x7f\x33\x88\x83\x5e\xd0\xb1\xab\x27\x96\x42\x3a\x31\x79\xeb\x85\x78\x0b\x42\x6e\x3f\x85\xaa\x35\x5a\x59\xb2\xa3\xb8\x7b\x5c\xaf\x64\x5b\x70\x13\xd0\x78\x8f\x9a\x94\x37\xe5\x04\x05\xe4\x58\x4e\xbd\x02\x94\xb1\x23\xfd\xef\x51\x7e\x92\xc5\x0d\x7b\x97\xbc\xdc\x29\x34\xf2\x6f\xe6\x24\xa2\x64\x5b\x87\x37\xdd\xda\x55\xf8\xe0\x12\x3a\x51\x56\x8f\x9f\x27\xd2\x2f\x0d\x59\x1f\x0a\xe3\x0d\xf9\x12\x79\xd3\xf5\x42\xec\x74\x6b\x78\x85\xb8\xbb\x76\x84\x07\x6d\x08\xbb\x0d\x29\xae\xea\x7b\xad\xf8\x15\x14\xbe\xb6\xd1\x6c\x6e\x5d\x18\xa3\x8c\x2c\xf1\xac\x9e\x13\x0b\x24\xdb\x03\xc4\xa9\x88\xd2\x97\x17\x83\xc8\x63\xc9\x65\x6d\x0b\xcb\x54\x1a\x0f\xc2\x92\xa4\x35\xf2\xef\xb2\x6f\x4b\x23\xa7\x27\xc5\xbe\x34\xe4\xfa\xa2\xef\x92\x44\xef\xbc\x64\xde\xd6\xc9\x16\xf3\xf7\x30\xe4\x5f\xd9\x39\x0a\x93\x4f\x09\x6b\x60\x6a\x19\x0f\x00\x5a\xe6\x6d\xe3\xa5\xe4\xaf\x05\xe7\x31\x29\xa1\xc9\x3a\x90\xaf\x5a\xd4\x97\xa0\x13\xed\xf4\x80\xb5\xb5\xc4\xf0\xe1\x04\x43\xe8\x08\x89\x55\x0f\xca\xea\xaa\x2a\x4a\xc1\x06\x9d\x05\x3d\xbd\x53\x60\x8c\xaf\x4d\x0a\x66\xbc\x5d\xdd\x7b\x2e\x68\x7b\x36\x1a\xbc\x9d\x92\xe4\x4a\x43\x33\x41\x10\xc9\xd8\x89\xa9\xdf\xc1\x43\x74\x48\xfd\x3e\x00\x9c\x21\x84\x5d\xbb\xeb\x05\x4b\x44\x16\x67\xe7\xc5\xb4\xa1\xc3\x39\x17\xba\xbc\x29\x7c\x49\xc8\x68\xdd\x79\x4b\xf0\xfb\x1e\x04\x3e\xcd\x4c\x2c\x2b\xb7\x8a\x07\x16\x71\x08\xf6\x06\x23\x03\xa8\xda\x1c\x2a\x44\xfd\xf0\x4d\x11\xdb\xfc\x19\x02\x15\x86\x61\xaa\xa8\xb9\xb7\xab\x7d\x65\x3d\x72\xf4\xe8\x46\x79\x01\xf0\x43\x2b\xe1\x83\xe7\x90\xef\x9c\x3c\x90\x97\x98\xe1\x2f\x24\xf0\xa7\xf3\x8e\x9d\x29\x57\x90\x70\x6b\x22\x20\xae\x9d\xca\xf6\x12\xe7\x6a\xd5\x9b\x0d\x5d\x05\x5e\xe8\x3d\xac\xe2\xfd\x60\x7b\x93\x2d\x46\x64\x7b\x31\x1e\x49\x52\x39\xb6\x9f\xdf\x24\x0c\xbf\x83\xa7\xf1\x97\x07\xfb\xe5\xa6\x14\xe3\xe1\x44\xfa\x83\xfb\x22\x97\xbd\x83\x61\x13\x4f\xa7\x74\xf0\x88\x2e\x29\x80\x31\x7b\x24\x34\xc3\xc8\x46\xf7\x8b\xe8\x6f\x21\x24\xe1\x0a\x36\xb0\x9e\xd6\xed\xe1\x8c\xa8\xb9\x8e\x06\x09\x79\x78\xab\xc4\xb2\x07\x08\x58\xf2\x4d\x37\x7f\x23\x2f\x68\x89\xe0\x1d\xe0\x1a\xcd\xc8\x1a\xff\x4b\x66\xb5\x96\x7a\xb1\x4d\xf3\x68\xf6\x9d\xc3\x33\x02\xb4\xcf\x8d\x3d\x4f\x2a\x1f\xc1\x48\x06\x42\xd5\x6a\x83\x32\x13\x64\x05\x9b\xd7\xd1\xb2\x96\x9c\xc3\x20\xef\xf0\x0d\xd3\x49\xd0\x70\xb2\x7f\xbb\xa1\xc1\x60\x81\x24\x39\x6a\x62\xd9\x9f\x15\xcd\x38\x6d\x28\xd0\xab\x06\x65\xc2\x42\x9c\x8e\x5e\x6a\xc9\x8c\xf6\x90\x50\xfe\xb3\x30\xc5\x75\xbf\xaa\x25\x22\xf2\x99\x6b\xf8\xc2\x37\x64\x3d\x07\x04\x4f\xc1\x45\xf1\xed\x38\x19\xf0\x42\x95\xd2\x76\x80\xb6\xd9\x32\xe5\x10\x54\x6c\x94\xfd\xb8\x38\xbe\x1d\xa2\x64\xdf\x40\x44\x8b\x9b\x16\xb6\x74\xaf\x9f\xb8\xf2\xdb\x97\x3a\x89\xc7\x50\x0a\xd6\xc3\xb2\x2e\x7a\xa6\x50\x71\xdc\x54\x61\x3d\x2d\x83\xfb\x3e\x0e\xf9\xe6\x15\x1b\xf1\x8f\x20\x97\x02\xfa\x38\x25\x20\x62\x59\x21\x7b\xa7\x9b\xe9\x41\xa1\x0e\x21\xa1\x15\x4a\x4e\x12\xc5\x82\x86\xfb\x53\x26\x40\x1e\x15\xac\x31\xb1\xab\x68\x30\x63\x23\x9d\xb5\x44\x23\x6a\xf6\xcc\xea\x2a\x0b\xba\x12\x0d\x19\x46\xa9\xb5\xa3\xb4\x74\x01\x88\x92\x9c\xc8\x23\xca\x2c\x8e\xf2\x54\x1d\x2c\x03\x62\xf7\xa0\xfd\x6d\xf6\xa5\xc4\x2d\xc1\x75\x4c\xbb\x13\xf2\x3c\x38\x0d\xca\x63\xb5\xfd\x4f\x2e\x0e\x96\x31\x78\x42\x25\x52\x8a\x35\xa2\xef\xf2\xaa\x16\x0c\x1a\x61\x93\x6d\x31\xb2\xc7\xcd\x8e\xfd\xf3\xdb\x09\x32\x4b\xda\xe3\x7a\xa1\xa0\x56\x62\xf1\xd0\x27\x0c\xa8\x04\xac\x0c\x89\x4f\xc8\xbc\xbf\x3a\x8a\xc9\x72\x01\xe1\xfe\x67\x6f\x51\x9a\xc3\xc4\x3a\x77\x67\x53\x4c\xa3\xd0\x18\x34\xfb\x87\x2b\xf2\xee\xc0\xf3\x03\x8b\xe0\x3e\x63\x3f\xbb\xf3\xb5\x94\x0f\x83\x27\x5f\xcb\x93\x5b\x2b\x9f\x5d\x2c\x2f\x98\x32\x9a\xed\x40\x94\x26\xe3\xe3\x64\xc2\xde\xa8\x7d\x57\xd1\x5c\xa8\xd4\x1c\x4e\xef\x80\x18\xf8\x6e\x2d\x32\xe8\x04\x29\xf5\x05\x4d\x07\x71\xbb\x24\x01\x3c\x25\xe2\xf5\x52\xba\xdf\xbf\xb9\xe7\xaf\xd6\xbf\xdc\x64\xef\x89\xd7\x49\x01\x10\xc5\x35\x15\x86\xc4\x83\xea\x47\x31\xfc\xf9\x2d\x89\x12\x6c\x47\x4c\x42\x6a\x50\xfa\xc1\x50\x24\x04\x87\x42\xc7\x9d\x7e\xb4\xaa\x13\x78\x72\x1f\xd4\xcb\x74\x6d\x63\x79\x61\x72\x39\x0f\x2a\x56\x30\xee\xdf\x0a\x04\x5a\x96\xa8\x81\x21\x3a\x5e\x92\x37\x07\xb3\x6a\x0c\xd2\x15\x75\xcc\xc9\xbf\x72\xd9\xf2\x3b\xf9\x84\x8c\x26\x41\x17\xca\xf8\x62\x28\x33\x59\x6e\xf4\x2c\x97\x02\xff\xf6\xe1\xe7\x8c\xda\x4b\x4e\x16\x0b\xab\x88\xa7\xb3\x56\x3c\xdf\x9d\xb5\x27\xd2\xd6\x4f\x3b\x4a\x07\xec\xea\xe5\xbd\xa9\x00\x92\x52\xe7\x19\x56\x2f\x6e\xea\x63\xfd\x82\xd2\x66\xa4\xbb\x41\xbf\x8b\xae\xba\x28\xfe\x06\x04\x93\xe6\xd5\x22\xa3\xdc\xc1\xf4\x98\x36\xdb\x03\x64\x30\x6f\x41\x5d\x57\x32\xb9\xa2\xab\x92\x2d\xf1\x05\x57\x66\xbc\x56\x36\x48\x98\xd5\x0d\x33\x84\x70\x5b\x61\xaf\x63\x1b\x7c\x0b\x45\x14\xd9\xeb\xa0\xab\xe1\x05\xf0\xa2\x61\x21\x97\xe7\x27\xc0\x22\x40\xa3\x2c\x7d\x7d\x90\x1f\x7d\x17\x46\x21\x56\xde\x7d\xe2\xcb\x14\x2e\x0d\x30\xec\x5d\x35\x9a\xe1\x6b\x25\x04\x85\xde\x48\x65\xba\xe5\x0f\x30\xb3\x48\xd8\x4f\xa4\x4b\xb3\xba\xa1\x11\x15\x6b\x9c\x69\x5d\x08\x2a\x6b\x23\x0a\x40\x99\x70\xb1\x11\x63\xdc\x45\xc9\x47\x40\x8a\x93\x80\x2e\x11\xc4\x8a\x2c\x4b\xca\x9d\xb7\x8f\xd5\xd2\x1b\xda\x35\x0e\x86\x99\xcc\x37\x7e\x02\x65\x57\x36\x70\x57\xb4\x7e\xf2\xb4\x3b\x64\x47\x53\xe5\x10\x8d\xa1\xc1\xe9\x3d\x26\x29\x7f\x72\x2a\x88\x69\x41\x57\x9e\x64\xb1\x4c\x01\x45\x7d\x58\x10\xeb\x6a\x32\x0f\x69\x14\x64\x58\xa2\x13\x66\xf3\x42\xd7\x9b\xb7\x73\x53\x1c\xd1\x6d\x61\xa9\xd0\xd5\xc8\xda\x93\xf9\x84\x5f\xa2\xc6\x0f\x44\xdc\xb5\xa3\x78\xc0\x02\xe7\xd0\x64\xf5\xf3\x3a\x5b\x7a\x38\x21\xe2\xf9\x4d\xba\x9a\x30\xca\xb7\x23\x33\x77\x95\xe7\x18\x7a\xa8\x97\x8d\x92\x78\x80\x1f\x93\x5d\x06\xf2\xf0\x4c\xf9\x0d\xcd\xbc\x63\xd8\xe1\x68\xde\x7a\xe4\x94\x3e\xdc\xc4\x25\x59\xb5\x8b\x17\x99\x3a\x39\x38\xb2\xf6\x9a\x2e\x96\x3f\x78\x33\x07\x42\xb0\xcd\x2e\x2e\x9c\x73\x81\x7a\xad\xa9\x52\x31\xa1\x12\x0b\xe6\x3d\x8b\x70\xd4\xd6\xb1\x0d\x50\x4e\x83\x46\xe2\x5c\x26\x42\x49\x93\xa5\xc7\xc1\x68\x01\x5a\x64\xa0\xa2\x7f\x82\xe8\x3a\xea\x01\xf6\xd9\x30\xdb\xc9\x73\x64\x53\x1c\xf6\x5b\x9d\x1d\xc6\xd9\xfc\x2e\x64\x73\xed\x04\x78\x47\x8d\x93\xb3\x52\xbc\x4d\xe4\xd6\x35\x96\x74\xcf\x15\x0e\xa6\xc8\x50\x12\x14\xbc\x81\xf2\xb1\xd5\x67\x87\xf1\x63\x4f\x84\x9d\x2a\xd3\x67\x70\x2a\xca\x4c\xcd\xd2\xd9\x65\x33\x8d\x0f\x01\x4e\x45\xc9\x4b\xb7\x7a\x80\x29\x3b\x71\x9e\x94\x83\xb2\x60\x90\x1b\x5d\x41\x76\xd6\xe5\x62\x03\x28\x6a\x2d\x4d\x03\xbd\x7a\xb2\x9b\x6a\x36\x41\x31\x70\xe3\x32\xa9\xc0\xc6\x7b\x71\xbd\xa9\x73\x58\x21\x26\x1d\x8a\xfa\xd8\xa9\x33\x5f\xbd\x44\x86\xaa\x64\xed\xcf\x23\xcb\x19\x03\x58\x9c\xe2\xfd\x1b\x2a\x12\xe0\xf6\xf6\xa1\x3c\x0d\xdd\x5d\x95\x94\xef\xcb\xf2\x6f\xec\xc9\xcb\x19\x5e\xe2\xec\x10\xd8\x3c\x73\x34\x80\x47\xad\x93\x41\x45\x96\x71\x43\x1f\x71\x5d\x47\xa3\x43\xa4\x4f\x1b\x95\x61\x6d\x60\xe5\xc8\x1e\xa8\xe6\x9c\x0f\xb6\xdc\x80\x77\x2d\xb9\xa1\x77\x0c\xb8\x04\xcd\x03\x1e\xa1\xba\x0b\x0f\x43\x39\x0e\x9b\x5f\x51\x3c\x9d\xb4\x8b\x0b\x21\x94\x9c\xcd\x09\x97\xc5\xdc\x66\xe3\xc1\x9c\x9d\xcd\x8f\x62\x41\x2b\x9e\xcd\x39\x6c\x70\x78\x01\x3d\x7f\x35\xda\x24\x0f\x62\x94\xbc\x17\x48\x89\x46\xe6\x64\x5b\xeb\x78\x34\x87\x1a\xeb\x8b\x5f\x2c\x0b\x68\x91\xa4\x9d\xd5\xaf\xcd\x16\x7f\xf3\x4d\xa1\x6e\xbe\x68\xbb\x03\xd9\x60\xc5\x89\xa1\xa1\xd3\x28\xbb\xe8\xa0\xbc\xfd\xac\xc7\xfa\x92\x70\xc5\xbb\xf4\x83\xa7\x74\x34\x3d\x79\x58\x50\xfb\x78\x05\x4d\xcc\x12\x3e\x51\x94\x15\xfb\xf3\xcb\xde\x1e\x6b\x20\x7c\x00\x94\x5b\x0a\x01\xe5\x09\x91\x42\x1d\x29\xed\xe5\x23\xcf\x8e\x32\xa8\x61\x6f\xde\x51\xf5\xdd\xf6\xd0\x6a\xf2\x0d\x75\x22\x9c\x8d\x17\x77\x59\xbd\x17\x69\xf7\x7a\xac\xf9\xb6\xef\xe0\x00\xa5\x4f\x97\xb2\x12\xb6\x57\xb0\xe5\x5c\xd9\xd3\xa6\xca\xfe\xf9\xd5\xc7\x67\x2c\xf1\x3b\x5b\xa2\x82\x56\x30\xbb\x4c\x9c\x5e\xc9\x1c\x9d\x8e\x35\x19\xe5\xce\xbe\xf0\x49\x62\xa4\x83\xbb\x26\xc7\xed\x83\x97\xc2\xc7\xb5\x74\x51\xe6\xa2\xc1\xb4\x30\xb5\xbb\x7e\xc7\xe1\x85\x1f\xf1\x55\x1b\xe2\x3e\x92\xaa\x36\xf6\xcb\x3d\x3b\x67\x34\xcd\x4e\x4e\x99\x46\x27\xea\x6d\xd7\xd5\x6e\xb5\xb2\x0f\x58\xb0\x8a\xab\xdb\xcc\x8b\x83\x7e\x1e\x24\x31\xa0\x52\x5d\x28\xd1\xa5\xd0\x0a\xc2\x6f\x2a\xe7\xaf\x1a\x96\xb0\xc9\x8f\x6f\xf1\x1c\x40\xcc\x42\x39\x94\x59\x36\xdf\xf3\x23\x13\x27\xc7\xdb\x53\x3d\x45\xbc\x4c\x31\x0c\xeb\x95\x6e\x2c\x8c\x45\x90\x48\x50\x29\xda\xd9\x56\x2a\xfd\x7b\xdc\x36\x8d\x72\x82\xa7\x21\x6f\x6f\xb0\xa0\x61\xb5\x08\xf1\xe5\x4a\x06\x2a\x18\xb3\x23\x7f\x91\x8d\x3e\x59\x05\xfa\x3a\x1d\xda\x83\xe2\x8a\x34\x47\xb4\x02\x03\xbd\xa9\xf7\x6b\x61\x55\x57\x9a\xe0\x35\xbb\x18\xe5\x4a\x6c\x37\x51\x8b\x0e\x63\x20\xac\x79\x07\xbe\x9e\x32\x91\xb7\xab\xcf\xb6\x61\x96\x6b\xf9\x70\xe5\xcb\x10\x0e\x3d\x7a\x22\x89\xc4\x25\xe4\xb3\x9b\xff\xfd\x4b\xdf\xd8\xd5\xc9\x60\xdd\x09\x28\xad\x9c\x06\x00\xf6\x1d\x52\x5b\xe0\x05\x1f\x8a\x78\x68\x35\xfb\x03\x9c\x64\xa4\xd6\x2e\x11\xa3\x03\x7f\xa8\x76\x6b\x9d\xa5\x78\xf5\x35\x8d\x2a\x30\x2e\x91\x0d\xc5\x41\xea\x16\xc5\x45\xa3\x67\xa9\xf5\xe3\x1a\x8a\x4d\xb1\x12\x6c\x30\x0b\x65\xe9\x1c\xfc\x88\xfe\xcb\x92\x00\x0f\xc0\x65\xcd\xa0\x0d\x6b\xde\xea\xb5\xc3\x00\x55\x8a\x49\x2e\xbe\x37\x4f\x7f\xe7\x0e\x39\x66\x97\x13\xff\x4d\xcd\x3a\x99\xe9\xb7\x49\x45\xdb\xc9\xee\x2e\x2d\x71\x23\x63\x6a\x8a\xd4\xf9\xa4\x0e\xe5\xc1\x75\x0e\xf4\x5a\xb3\x37\x41\x45\x0c\xe2\x61\x0f\xca\x62\xfa\x8c\xc8\x35\x50\x46\x90\x00\x9e\x9d\x9f\x19\x06\x7b\x1e\xda\x7f\x96\x4c\x79\x4f\xa7\xc4\x34\x88\x5c\x15\x26\x44\x73\xac\xeb\xf6\x19\x98\xc6\x99\x06\x13\xb0\x0c\xee\xea\x8f\x5b\x0e\x08\x2c\x9a\xdd\xda\xe2\x13\xc7\x54\x35\xe1\xd9\xb1\xe4\x7e\x40\x52\x21\x97\x18\x2f\xa3\x47\x31\x07\x52\x5c\x67\x04\x7d\xda\x73\x6b\xee\x7e\x0c\xf4\xba\x71\x41\x9b\x87\xdd\x67\xf8\x41\xa9\xd5\xb4\x4b\x1d\x72\xa2\x7e\xd8\xe5\xb0\x8e\x62\x3d\x60\x41\x8e\x1a\xf5\x52\xd8\x86\xf1\x61\xd7\x31\x7b\x98\xe7\x31\x93\x4c\xb6\x1c\x96\xd3\xa8\xe7\x16\xfd\xcb\xd9\xb4\xff\xca\x4d\x1e\xf1\xe6\x31\xa1\x06\xdf\x6e\x08\x68\xb4\xcb\xab\x6e\x36\x8d\xe4\x70\xf6\xcc\x3b\x3a\x99\xad\x05\x3e\x2a\x08\xfb\xb8\x42\x1c\x33\x15\xc1\x33\x8f\xd6\xc8\xf7\x74\x41\x37\x31\xcb\xa3\x76\xab\x08\xcc\x98\xb3\x32\x66\x4b\x99\xf7\xe6\x56\xf4\x8b\xfd\xdf\xad\xfc\x15\xdb\x6a\x83\xb5\xd3\x38\xd1\xef\xe0\x95\xb2\x66\xb7\xb6\x3b\xf2\x27\x3e\x8a\x00\x6d\xee\xa3\x93\xc4\x78\x26\x74\x8b\xf2\xe5\x8f\xb9\x44\x1d\x5f\x40\xa0\x30\xdc\x89\xf2\x83\xc7\xc4\xea\x12\xf1\xf5\x69\xc8\xb5\x80\xed\x41\xd9\x00\x66\x13\x77\x5f\x51\x5c\xb9\x0b\x86\x6f\xdd\xde\xc3\xa9\x49\xa2\x6d\x1a\x14\x28\x28\x58\x36\xba\x82\x50\x1a\x45\x78\xe6\xcd\x21\x7a\x09\x71\xa4\x28\x36\x75\x7f\x47\x7f\x49\x8e\x40\x45\xba\x76\x39\xd7\xab\x52\x4e\xd1\xe4\x09\x11\xb3\x2b\xe8\x77\xa2\x3f\xe5\x62\x1f\xbc\xb5\x9c\xb1\xf9\x3c\xba\x5e\x59\xbc\x7f\xb3\x43\xe8\xd4\x11\x34\x3c\x13\x71\xcf\x0c\x66\x80\x56\xc4\xbd\xa1\xf9\xf9\x74\x51\xea\x5a\x59\x98\x53\x29\x28\x43\x19\x5e\xf8\x1c\x3f\xae\x57\xfc\x8c\x44\x23\x2e\x6c\x90\x9e\x77\x67\x80\x91\x94\xa7\x93\x54\xc6\xd4\x4d\x6d\x3e\x44\x72\x4b\xb1\xe2\x38\x9f\x54\x67\x65\x0b\x47\x50\xba\x1d\x4c\xb9\xd7\x38\x6f\x30\x33\x6c\x69\xf8\xe0\x31\x7a\x79\xe4\x3b\x2f\x31\x27\x74\x20\xee\x3e\xa2\x61\x27\xa6\x7a\x1b\xdc\x20\x59\xdb\xca\x0e\xd6\x42\x55\x13\x9d\x4b\xc9\x56\x2d\x4d\xc1\xa8\x95\x74\x5a\x51\x07\xeb\x0e\x99\xaf\x07\xe0\x01\xf8\x47\xc6\x0d\xc5\xc5\x91\xeb\xc1\xf1\x2d\xae\x52\x63\x7c\x4b\x78\xb1\xbe\xba\xe4\xc9\x31\x54\x39\xf2\x30\x28\xe9\x1c\x4d\xcf\xfd\x58\x83\xb3\xe2\x54\xbb\x18\xc5\x51\x2d\x08\x80\x55\x76\xeb\xd9\xb2\xf3\xa1\xcd\x57\x08\xe0\xb5\xfe\x6a\x27\x61\xc8\x1a\x90\x48\x6a\xd3\x6c\xf5\x3d\x3b\xe5\xc8\x23\xbb\xc7\xe4\x04\x2e\xcb\x00\xe2\x5f\x3a\xac\x04\xaf\xfc\x21\x01\x6c\x52\x61\x5c\xb4\x19\x4e\x46\x06\x08\x1e\x43\x7f\xe9\xff\x88\xe1\x64\x11\xa6\xee\x58\x19\x09\x92\x65\x94\x6b\x4a\x4b\x5e\xbd\xea\xcc\xa4\xd9\x17\xfb\x9b\x1d\xac\xfc\x29\x63\x0c\xe1\x68\x42\xc1\x8c\x04\x59\x99\xe2\x1a\x92\xe9\x1a\xe4\x2c\x7e\x4a\x22\xc5\xe3\x52\xa7\xf0\x63\x84\x7a\x49\x6d\x21\x3d\x5b\xd0\x72\x60\x8d\x45\x66\x03\x76\x8d\x9a\x11\xb0\xf9\x48\x35\x31\xb2\x44\xce\xe3\xe9\xcd\xfc\xae\xaa\xb4\x71\x3e\x93\x58\x76\x07\x32\x54\x5e\x11\xfc\x0e\x2d\xa6\x74\x10\x27\x15\x24\x28\x00\xc4\xa9\xbf\xb7\x7a\xdf\xc3\x93\xb1\xa8\x26\xe9\x9e\x98\x7e\x7c\xbc\xd1\xe6\xcc\x9e\x77\x32\x08\x47\x7f\xb0\xdf\x43\x80\x7f\x30\xeb\xb5\xff\xd7\x27\x7f\xb0\xa8\x0b\x55\x1e\xd4\xba\x29\x4a\x08\x81\x89\xf2\x67\x36\x6b\x09\x18\x2b\xe5\x94\x86\x57\x20\x43\xa6\x92\x96\x25\x31\xc9\x66\xae\x6a\x3f\xe1\x06\x7e\xe2\xdb\xbb\x4d\x5c\xa4\xd2\x7a\x6a\x6d\x81\x94\xa6\xf4\x80\xf0\x2c\xc0\xb5\xdf\x38\xa6\x2a\x23\x97\xb1\x26\xdd\xe4\xe0\x45\x7f\xba\x94\x72\x3f\x29\x61\xab\xf7\x86\xc9\xe4\x1c\x9e\xcf\x3a\x8c\x37\xf6\xa9\xa2\x31\x58\xe3\x03\xea\x28\x74\xea\x52\x5a\x6b\x86\xae\x67\xf0\x2c\xe0\xc5\xce\x7d\x40\x66\x72\x8f\x00\xe2\x68\x5f\xc2\x48\xb3\xe4\x6b\xdc\x0e\x4c\x41\x81\xe1\xb8\x7d\x44\xc0\x51\x46\x2b\x5e\x1c\xbd\x60\x9c\x38\x79\x0d\x56\xe2\x11\xfa\x45\x38\x64\xc4\xd2\xca\x88\x77\x54\xb9\x35\x2f\x86\xf8\x7e\x6d\xc1\x25\xce\xb5\x42\x87\xc8\xfc\xfe\xfd\x33\xa2\x37\x6c\xd4\x1a\x0b\xe7\x98\x32\x9b\xd9\xed\xe4\xfc\x28\xfd\x4a\xf6\x3a\xa1\x63\xb6\x7c\xc5\xff\x24\x81\x6a\x18\xf1\x43\x7f\xac\xed\xbb\x19\x6c\xb1\x63\x8e\x6f\x8e\x17\x01\xc9\xfb\xd9\xff\xeb\x76\xb6\xe5\xb0\x0c\x7a\xca\x8c\x0c\x7d\x92\x20\x82\xdd\x89\xda\x6a\xd6\x6c\x99\x10\x96\xc8\xf1\x39\xa2\xc7\xef\x13\x29\xd3\x62\x2d\x60\xcb\x9b\x35\xfa\xb0\x38\xfb\x42\x13\xed\xa5\x4d\x6d\x88\x72\x24\xea\xb9\xd8\xe4\xa9\x85\x65\x30\x54\x36\x8a\x7d\x43\xfb\x9a\x47\x0c\x93\x08\xab\xd3\xb9\x75\x78\xef\xe2\x7d\xa6\x94\x96\x51\xce\x1c\xdd\xf0\x27\x70\x1e\x1d\x06\x01\x8a\x33\x76\xc1\xc9\x37\x0a\x0a\x6d\x50\x74\x6d\x24\x1a\x07\x3e\xb2\xa9\x40\xc7\xae\x0d\x91\x50\xb1\x43\x9c\x56\x52\x39\x94\x73\xd7\xb2\x34\x07\x05\xa1\x95\x03\xe4\x7d\x9c\x42\x55\x31\x65\xf7\xe5\x66\xaa\x37\xde\x37\xf8\x27\x05\x6b\x6f\x4c\xc0\x7d\x3a\x05\x70\xf1\x36\x29\xcc\x58\xc2\x6d\x0a\xce\x9f\xc0\xc0\x31\x7c\xf5\x2a\x08\x0a\xa7\xc2\x19\x3e\xae\x7f\xae\xfe\x13\x20\x49\x61\x21\xc9\x47\x56\x64\x96\x22\xc2\x64\x01\xed\x8f\xd8\xb7\x7e\x3d\x7c\x18\xab\x2b\x8e\x93\x6a\x61\xf2\xd0\x27\x59\xf5\x1a\x4f\xe1\x18\x44\x13\x53\xb6\x03\xd1\x41\x44\xd1\x16\xc6\x05\x89\x74\xce\xfe\x51\x6a\x20\x91\x90\x69\xff\x32\xcc\xd5\xc9\xab\x2b\x09\xf4\xe4\x15\x1a\x96\x9d\x9b\x73\xa9\x2a\xdd\x84\xc4\xb8\xae\x9d\x77\x68\x44\x3e\x0a\x86\x26\x8e\xf1\x6d\x0b\x64\x7a\x79\x92\x6f\x8e\xfa\xf8\x80\xea\x75\xf1\x8c\x35\xe7\x35\x19\x85\x71\x9d\x46\x16\x28\xfd\x9b\xae\x8b\x82\x94\x95\x38\xb0\x2a\x3b\x0e\x35\xf4\x53\x32\x02\x53\x58\x4b\xdf\xa4\xce\x5b\xea\x6c\xce\x97\x59\x98\x8c\xfd\x97\x70\x2c\x2c\xa8\x99\x49\xde\x03\x5e\x7b\xf4\xef\x18\x6b\xd0\x9f\xe4\x0a\x2e\x69\x57\x4d\xd2\xc2\x0a\x03\x57\xba\x2b\x98\x25\xec\x04\xb0\xd9\xcf\x48\xf2\xc4\xd3\x17\x16\xa8\x65\xad\x36\xa2\x35\xde\x5f\x9e\x25\xf4\xa4\xe0\x83\x87\x2e\x73\x27\x04\x33\xfb\x34\x73\x9d\x4a\xe4\x1a\x79\x13\x18\x24\xda\xe6\x40\x5e\x14\xe6\xb9\x4e\xce\x85\x08\x1d\xc3\x9e\xc4\x2e\x41\x4c\x4f\x89\x99\xb6\xb2\xec\x77\x96\xc5\x5c\x06\x35\x82\x6c\xbe\xb3\xfb\xf4\x20\xed\xf0\x65\xaf\x80\x3e\x06\x2b\x5f\x3b\x98\x4b\xcf\x38\xbd\x40\xcd\xe6\xc0\x57\x59\x99\xf2\x4e\x2c\x20\x5d\x17\x02\x4a\x33\x88\xa0\x38\x22\x19\x59\xe0\xdb\x39\xab\x0f\x75\x62\x95\x77\x22\x1b\x80\x6d\xae\x0a\x4c\xa9\x79\xfb\x4e\x6e\x07\x1a\xd2\xee\x0a\x69\xce\x46\x19\x26\x68\xce\x96\x11\x12\xb1\x13\x67\x1e\xe4\xfb\x32\x66\x71\xea\xfc\x1d\x4e\x63\x91\x70\xc0\x9e\x33\x21\x57\x07\x4b\x61\x28\x1c\xba\x2d\xa7\x03\xe2\xfc\xd2\x27\x47\x10\x4e\x48\xa0\xc3\x57\xb3\xf4\x9d\x24\x3e\x89\x0c\x67\xc4\x82\xbe\x5d\x90\x5b\x23\xd5\xc3\x6f\x06\xa8\xb1\x6e\xbb\x8f\x82\xb0\xb1\x7f\xe1\xc5\x0e\xd0\xbd\x6c\xc8\x8b\x03\x0b\xc7\xa6\x4d\x45\xe7\xc3\x95\x7e\xda\x7d\x20\xdd\xfe\xc9\xe9\xf9\x0b\x0a\x25\xea\xe2\x1d\xfd\x1d\xd7\x6f\xf2\x36\x68\xa3\xbf\x52\x84\x43\x25\x40\xa1\x16\xe7\xcd\x6f\xed\x00\x80\x07\x11\x45\x3b\xba\xd6\xf7\x87\xd9\x2c\x44\x19\x25\x35\x18\xa4\x91\x1c\xc1\x17\x1c\x57\x9b\x34\x41\x8d\xb6\xec\xfc\x79\x67\x8b\xb6\xe8\x05\x78\x95\xec\xc3\x2f\x35\xdb\x73\xd8\x8b\xc3\xd0\x0e\x8e\xda\x01\x38\xa1\xcb\xa0\x15\x03\x32\x68\x5d\x8c\x3c\x7a\xd3\xc3\xc2\x6e\x13\x0c\xa6\xe5\x9d\x58\x3c\xb9\x5a\x82\x0f\x81\x96\x13\xc0\x1a\xf8\x1c\x48\xa9\x9d\x6a\x77\x76\x52\x58\xa6\x01\x20\x9b\x92\xcd\xc3\x8a\x29\x2c\xcb\xca\xa6\x53\xf0\x52\x76\x49\x99\x22\x07\x4e\xa7\x85\xdc\x61\x72\xdd\x3d\x32\x00\xef\xf8\xc3\xe6\x33\x67\x03\x5d\xb0\x01\x67\xa9\x62\x5e\xd0\xa9\x47\x61\x36\xad\x54\x3d\xd0\x01\x0d\xda\xd2\x1d\xe9\x2a\x54\xed\x7a\x4c\xcb\x8e\xdd\x27\xa5\x9c\x44\x4a\x66\xa0\x3c\xea\x29\xf9\x4e\x81\xf5\xc5\xa9\xc4\x0d\x15\x19\x18\xf0\x4f\xf1\xfe\x3a\xae\xd8\xed\xe4\xc2\xe6\xf1\xb4\xf1\x51\x90\x21\x2e\xae\xd5\x20\x11\x5a\xdb\x47\x45\xec\xcf\x14\x07\xab\x06\x53\x4e\x21\xf8\xbc\x39\x6d\x5f\x27\x09\xec\xa4\xf2\x7b\xa5\x79\x5e\x4d\xcb\xda\x30\xf1\x94\x1d\x12\xa6\x90\x5c\xb3\x1a\x4b\x60\x1a\x6f\x98\xbf\xb8\x85\x5e\xe1\x01\xd8\x51\x6a\xd1\x9b\x1c\x52\xfc\x0f\xa3\xb8\xef\x14\x8d\xa6\x33\x7c\xb3\x53\x0f\x59\xdf\x86\x64\xda\xe9\x10\x30\x06\x2d\x92\x08\xd7\x35\x03\x03\xbc\x07\xaa\xfb\x83\x45\x7d\x84\x79\x87\xa3\x43\x3b\xc7\xd4\xef\xe5\x05\x00\xcc\xe4\x03\xd8\x43\x34\xb4\xac\x35\x86\xdc\xc6\x44\x24\x71\xc0\x7c\xac\x48\x31\xa4\xd3\x39\x41\xda\xd4\x82\x14\xc3\x09\xfc\x93\xea\xdd\xa0\x38\x35\xfa\x32\xdf\x0e\x88\xbc\x51\xab\x95\x68\x03\xad\x67\xa8\x5b\xda\xae\x21\xbf\xd1\x09\x40\x24\x45\xc4\x0f\xe9\x27\xd0\xdd\x45\xcf\x7c\x85\xe4\x13\xa5\xfd\x7f\x71\x0c\x1d\x30\x3b\xc5\x2f\x91\xe0\x93\xbd\x33\x1a\xb9\xad\xe6\x4a\x5c\x93\x48\xc5\xc3\x1c\xa7\xca\x8b\x48\xba\xe2\x3e\xaa\x6e\x9b\x1e\x2b\x80\xa5\x9b\x0f\x40\x5b\xa9\xbf\xd1\x89\x29\x85\xbc\xb2\xb6\x54\x00\xbc\x58\x4b\x00\x60\x6f\x1f\x05\x71\x72\xce\xca\x81\x6e\x3d\x6d\x72\x1c\x42\xdb\x43\x9c\x5c\x7d\xb4\x7d\xbc\xd2\x3a\x98\x21\x7c\xb9\x17\x9d\xc7\x68\x2d\x71\xb2\x34\xef\x99\xa1\x21\x3c\x77\xcf\xec\xe7\x9c\x98\xf0\x7a\x33\xf3\x83\x68\xbd\xdd\x6e\x4b\xbc\x7d\x22\xab\xc2\x5c\x08\x7d\x13\x48\x1b\xd5\x15\xeb\x4e\xa0\x96\xc7\x6c\x8c\x6d\x0c\xa4\xb1\xe9\xb9\x18\x09\x17\xa8\xf7\x89\x60\x7a\x0f\x8d\x89\xb8\x5b\xf0\xa7\xf6\xc2\x5a\xc3\x59\xa1\xf8\x4a\x57\xb1\x28\x2a\xe7\x4c\x75\x94\x2b\x46\x8b\xe1\x5b\x87\xbf\xde\xc4\x60\xd9\xf1\x83\x7e\x46\x5b\xd5\x78\xd1\x84\x36\x1d\x48\xce\xce\xdb\x9e\x7f\x71\x22\x54\x99\xea\x41\x0d\x42\xf3\x85\x53\xfd\x90\xfb\x61\xbe\x4a\xa3\x4c\xc8\xdd\x7b\x37\xbf\x4e\xe7\xc7\x69\x4e\xec\x25\xda\x5c\x22\x7b\xea\x56\x56\xd5\x19\x65\x18\x8b\xe3\x44\xf3\x49\x62\x15\x4f\xbc\x46\x2a\x64\xd2\x9f\x0f\xab\x46\x93\xd9\x7b\x43\x3d\x59\xe2\x8c\xe7\x93\x8a\x25\x15\x65\x67\xea\x32\x98\x1c\xc9\xce\xd9\x25\x3e\x92\x32\x62\xc6\xce\x0e\xa7\xbb\x21\x48\xf4\x29\x69\xb5\x8d\xa9\x22\x4e\x92\xef\x11\xf1\x9f\x0e\x23\x43\x6a\x91\x7e\x7f\x07\x97\x1b\xee\x4a\xbd\x0b\x55\x95\xd2\x93\x71\xb2\xd7\x45\xb5\x14\x15\x7e\x02\x82\xa9\x5f\x57\xe4\x2e\xa9\x31\x2d\x51\x62\x63\xa6\x64\x1c\xb5\x56\x59\x4d\xb4\x5e\x59\xd8\xd4\xa3\x05\x60\x44\x68\xbb\x28\x90\xae\x3e\x0a\x4d\xea\xae\x23\xdc\x71\xbe\xd7\x88\xa9\x2d\x72\xa9\x4c\x48\xdf\xec\x0b\x61\xa3\x5d\x47\xf1\x83\x45\xa9\xfe\x91\x64\xb7\x5a\x30\x8c\x08\x39\xd4\xb3\xc3\x22\xd8\xf0\x0c\x16\xbb\x7d\x10\xfe\x0d\x38\xcc\xa4\x83\xad\x01\x21\x91\x71\x21\x6e\xc1\xac\x0c\x49\xe8\xed\x62\x30\x2e\x67\xc8\x49\x8b\xe7\x87\x6d\x55\x79\x88\x48\x4f\x78\x25\xb6\xa1\xfb\xd9\x97\x97\x29\x6f\x6c\x19\xfb\x03\x33\x5b\xdb\x54\x87\x3b\x8d\xee\xbd\xf3\xdb\x3f\xe1\x56\x51\x7c\xdb\x9f\x11\x23\xfa\x7f\xd1\xb5\xec\x31\x6f\xc2\x6e\x2d\x10\x66\x1d\x7e\x3d\x76\xe8\x98\xdc\xac\xd6\x7d\x8c\xca\x53\xdf\x49\x47\x2e\x3f\xe8\x04\x79\x82\x89\x16\xb5\xd3\xb9\x98\x4c\x08\xa1\x1c\x09\x7f\x6f\xea\x98\xe2\x70\x6a\x25\x76\x1b\x9d\xb5\x1b\x21\xf2\x00\x51\x9f\x8a\xd3\x52\xd1\x0b\xa8\xd6\x95\xbc\xff\xa4\xe1\xce\xb7\x1c\x01\x2e\xac\x8b\xb7\x9d\xe4\xa8\x28\xaa\x0c\xe8\x79\xa4\x0a\x3f\xe2\x17\x3a\x8a\x3d\x30\x85\x6b\xe4\xe5\x73\x3d\xaa\x65\x60\xc5\x8b\x9c\x22\x8a\x4f\x6b\x3c\x0b\x98\xdd\x9b\xfb\x4b\xf8\x8e\x34\xb1\x8e\xe3\x42\x4a\xdb\x42\x56\x94\xf6\xb2\x83\x02\x33\xb9\xc4\xbf\xd9\x6e\xe2\x4c\xd9\xe8\xb6\x96\xbb\x29\xbf\xc7\x3a\xc0\xe4\x93\xe1\x77\xe5\xf5\x9c\x84\x6a\x96\xc4\xab\xc0\xda\x4b\xc9\x17\xf3\xd8\x39\x90\xb6\xba\x8a\x75\xf3\x42\xd8\x53\x4f\x9d\x11\xeb\xac\xd9\x1e\x72\x32\x16\xb6\xa3\x90\xe1\xad\x8a\x3e\x3c\x60\x62\x05\x74\x72\xcd\xe9\xc9\x45\xc0\xb4\x6f\x81\x70\x14\xcf\x95\xf6\x21\xc0\xd1\x70\x14\xe9\xc3\x62\x30\x0e\xd7\xbb\x7c\xba\x55\x0b\x98\xd3\xa9\xc5\xf4\x4a\x94\x27\xbb\x02\x3f\x2d\xba\x26\x8e\x4e\x8c\xbb\x3d\x19\x90\x07\xa7\x2e\xa3\x2a\xa0\x31\xfa\xdb\x3e\x74\xe2\xba\x6b\x56\xfa\x56\x2e\x18\x5e\x10\xb8\xc4\xe2\x0d\x8e\xd6\x39\x4a\x9d\xb0\xd4\xd8\xc8\x11\xfe\xd1\xdf\x6f\x1f\xcf\xc5\x56\x02\xbe\x27\xcb\xb7\xf2\x95\x9c\x56\x1e\xdd\xe4\x76\xdd\xaa\xbc\xac\xf8\x79\x48\x20\xaf\x68\x17\xf3\x52\xca\x87\x61\x05\x97\x58\x06\xa7\xa4\xca\x82\x81\x28\x1d\xce\x5f\x90\x49\x4c\x7b\x91\xb5\x90\x65\xc5\x47\x28\x9b\x43\x91\x69\x33\x2f\x5a\xca\xe5\x05\x78\xc2\xdd\x68\xea\x28\xc1\x4f\x63\x09\xd9\xb6\xed\x23\x56\x45\xfc\x12\x94\xe0\xc0\xaa\x50\xa1\x71\xab\xb8\x6e\x1c\x29\x43\x32\xff\xcd\x45\x46\x01\x58\xf9\xad\x3f\xc4\x68\x31\x0b\x40\xa1\xfb\x05\x0f\x49\x55\x36\x6f\xf0\xb6\xae\x14\x2c\xb5\xc4\xce\x17\x8d\x22\x4b\x89\xaa\xde\x42\xef\xaf\x31\x82\x17\x19\x46\x7d\xd8\x87\xee\xcc\x92\x8a\xa2\xd8\x3d\x69\x1a\xb9\x22\x4e\x76\x4e\xf9\xc4\x34\xcb\xed\x41\x26\xae\x7d\x79\xa0\x76\x05\x4b\x64\xcf\xee\x93\x54\xa8\x5b\xa4\x97\x8d\x21\x92\x6b\x3c\x6f\x63\x24\xa6\xd6\x0f\x57\x3b\x96\x51\x67\x06\x02\xbe\xd3\x59\x10\x1c\x3a\x14\x39\xea\x69\x72\x70\xf3\xfd\xe5\x62\xdf\xce\x62\x36\xe6\x97\x3b\x85\x8b\x65\x40\xa2\xb5\x0a\x02\x61\x8d\xe4\x35\x54\xce\xe3\xe0\xe7\xe3\x41\x46\x07\x60\x0e\xbf\xb8\x64\xb7\x0f\xc6\x57\x46\x10\xd7\xe5\xe4\xee\x2a\x9d\xcc\xba\xfe\xa6\xd3\x2d\x29\x91\xe8\x37\x87\x0a\xd8\xa8\x2e\x42\x80\x56\x97\x6a\xfc\x6a\x51\xbc\x19\x06\x94\x49\xdd\x35\x6a\x33\x62\xc2\x07\x8e\x7d\x45\xbe\x3a\x2a\x57\xd6\x35\x05\x36\xcd\xd9\xef\x6f\xac\xc7\xb0\xf4\xf7\x17\x09\xe7\x7a\xf6\x41\x7c\x22\xf3\x63\x2f\xbf\xd0\x47\x09\xe7\xee\x05\x04\xc9\x3e\x7c\xee\xb1\x44\x04\xce\xd9\xaa\x6f\xd7\xaa\xd6\xe1\xf7\x2c\xaf\x7c\x48\x5b\x60\xb3\x79\x27\xf3\xc9\x5c\x02\x68\xe7\x61\x60\x73\x13\xe4\x9f\x78\x50\x32\x14\x6d\x72\x8a\x43\x31\x49\x51\x6b\xc8\x34\x66\x4b\x04\x01\x78\xa0\x03\x66\x03\xa7\x4c\x62\x4a\x0c\x64\x29\xab\xcf\x81\x1d\x18\xa2\x58\x43\x4e\x46\xd1\xa9\xfe\x9c\x12\xa2\xcd\x84\xe0\x70\xa1\x39\xa3\x01\xf8\xf4\x84\xe7\x95\x58\xea\x56\xdd\x10\x2f\x31\x47\xe7\x8c\xc8\x26\x38\xfc\xb1\x0d\x6c\xf5\x28\xde\x2f\x9f\x93\x8f\x46\xc9\x66\xc0\x73\x78\xb3\x05\x56\x5b\xfa\xf5\x2c\x55\x04\xa5\x62\xe0\xc6\x1c\xda\xac\x58\x1d\x39\x58\x84\xd6\x36\x2d\x99\x5a\xd4\xb2\xca\x49\xa1\xe0\x87\x78\xdf\x21\x93\x01\x1f\x56\x32\xe7\xaa\xf9\xd4\x16\x9b\x23\xeb\xc3\xc7\x27\x96\x84\x13\x02\x5f\x1f\x52\x98\xb2\x37\x82\xbd\x3f\x38\x59\xf2\x92\x2b\x5a\xb9\x6c\xc4\x29\x4e\xdb\x3b\x06\x9b\x53\x80\x13\x29\x87\xca\x22\x30\x09\xe3\x26\xd7\xf4\x48\x43\x0f\xe1\x76\xa6\xe1\x19\x38\x97\x50\x33\x33\x84\xeb\xff\x86\x9e\x09\x00\x2e\x7f\x6b\x74\x3f\x66\x6d\x5b\x28\x48\x4f\xe5\xde\x10\x11\xa8\xc5\xa2\x55\xd6\xea\x16\x57\xe7\x91\x58\xd6\x04\x49\x92\x2b\x94\x7e\xa6\x00\x6d\x42\x09\x6f\x59\x04\x31\x0e\x27\x2a\x32\x56\x0e\x7e\x7c\x8b\x49\x4c\xb6\xf3\xa8\xcf\x7c\x29\x7e\xc8\x64\xb0\xc2\xf8\x4a\x9b\xa4\x6d\x6d\x65\xfd\x9d\xd6\xac\xf2\xa7\xa8\x8b\x0c\x05\xa7\x51\xa1\x98\x86\xda\xba\xcd\x53\x7b\xd8\x70\xe7\x89\x82\xc2\x35\xf6\xdb\x13\x32\x0b\x57\x95\xb6\x73\x70\x4e\x81\xfc\x51\x5e\x9e\x63\x28\xfa\x3a\xfd\x8b\x80\x7a\xf6\x27\xae\x94\x9f\x43\x1f\xfa\x4f\x5f\x7c\x5a\x4f\x52\xcd\x3f\x70\xe3\x15\xac\x82\x6f\x51\x59\x30\xaa\x8f\x8a\x2f\xb6\xa0\xcf\x7a\x6c\x71\x23\x96\x05\x97\x20\x62\xe0\x9e\xfa\x16\x02\x9c\x2a\x22\xc5\x40\xc7\xf2\x7a\xcb\x87\x87\xdc\xa2\x56\xe1\x69\x48\x9b\xc6\x31\x10\x0a\xd7\xb0\x05\x00\x50\x59\xcc\x79\x9c\x5a\xf5\x24\x0f\x49\x2c\x3f\x20\x03\xf3\x48\x4a\xd6\x41\x99\x7c\x4c\xea\x07\xa9\x52\xa7\x73\x11\xaf\x75\x94\x63\x21\xaa\x64\x93\x55\x79\xf8\xbf\x2c\xfd\x27\x78\xcc\xf2\x81\x68\x34\x58\x06\x3d\xed\x0b\x4e\x4e\xfd\xdf\x98\xa9\xce\x51\xaa\x0a\x3e\x9f\xd6\x9a\x3b\x45\x46\x21\x72\xfc\xbb\xf9\x4c\x6b\x5c\x64\xe5\x46\xb3\x26\xfe\xf3\xda\xc1\x30\x0a\xae\x4a\xf9\x21\xe9\x5e\x2c\x5d\xfb\x83\xee\xd2\xb0\x19\xf5\xc2\xbe\x9a\x39\xfb\x7a\x02\x6c\xbf\x11\xb7\xbf\xbc\x00\x50\xd0\xbe\x60\xaa\x0a\xdc\xe4\x85\x4a\x01\x84\xde\x4c\x88\x40\x7f\xe6\x67\x28\x77\x2b\x61\x8d\xda\x74\x36\xb7\x2c\x60\x78\x0e\x3d\xac\x0a\x4a\xa3\x4b\x75\xb6\x6e\x32\xaf\x34\x7a\x93\x4a\x91\xad\xf2\xa5\x88\x75\x34\x03\x9a\xde\x69\x64\x27\xdd\x2f\x9a\xac\x35\xf0\xd8\x5c\x63\xcd\x07\x51\xd8\x20\x98\x07\xab\x04\xf4\x3e\x43\x7a\x40\x23\xfa\xe2\x31\x96\x36\x3f\xa0\xa0\xea\x66\xf1\x4a\x8e\xe0\x5c\x1b\x41\x0a\xf9\xe1\x72\x9f\x78\xf5\x31\xc8\x58\xc3\x6a\x1c\x01\xc8\x55\xad\xde\x79\x25\x57\x79\x76\xe5\x8e\xac\xb2\x1c\x48\x8f\x5f\xa3\x03\xfa\x20\x7b\xc9\xc1\xca\xdb\x59\x48\xe0\xb0\xa0\x09\x14\x62\x2a\xed\xa4\xd6\x3e\xa9\x8b\xd7\x70\x02\x9a\x6e\x78\x38\xae\xb3\xd7\xaa\x43\x8c\x6f\x67\x83\x1e\xe0\x33\x69\x61\xd4\xd5\xac\x95\x22\x8f\xb6\xb6\x3e\xe2\x3e\x2d\x92\xc2\x5a\x49\x30\x25\x3e\x1a\x24\xa8\xf4\x8f\x35\xae\x18\xbc\x59\xfa\xa9\xca\x30\x80\x5a\x3d\xca\x5b\xbc\xfc\xb3\x44\x1b\x22\x62\x5b\x5b\x4f\x74\xa1\xcc\x3e\xae\xc6\xec\xa6\x9c\x3d\xa8\xbb\x97\x8f\xb7\x8a\x68\x59\x00\xf1\x82\x92\x81\x39\x1b\x45\xa3\x27\xd4\x64\x51\x5c\x97\x0b\x4c\xba\x8b\xe6\xcb\x0f\x83\x0b\xa0\x45\x2a\xfe\xb6\x78\x6b\x86\x55\x34\x57\x9f\x98\xa4\x24\x37\xea\x87\x84\xf7\x0c\x94\x21\x42\x5a\x44\x29\x1c\x2b\xee\x80\xd8\xca\x3c\xe2\x73\x72\xc4\x25\xb6\x77\x7a\xa3\x0f\x4c\x99\xc2\x97\x62\x3e\xfd\x99\xc2\xf6\x2a\xa6\xb4\xae\xe9\x80\x73\xb8\xef\x82\x89\x6a\xda\xe2\x44\xf2\x7a\x55\xe3\x03\x71\x13\xc4\xcc\x97\xa4\xbb\x85\xa1\xcd\x0d\xc0\xe8\x27\x1c\xc9\x73\x27\xe5\x77\xfa\x1e\x18\x07\x04\x0f\x2f\x01\x8d\xf7\xfd\xed\xfe\x00\x72\x4d\x09\x6a\x6d\xa6\x40\x39\x7b\x5b\xab\xc2\x38\xd4\x4d\x1b\x62\x77\x9d\x3a\x8f\x3a\x4c\x70\x88\x7b\xfb\x78\x0c\x56\x5b\xb4\x99\x51\xff\x90\xb3\x58\x1b\x67\xf4\x54\xe5\x5d\x42\x91\x3a\x91\x9b\x59\x29\xb6\x70\x25\x8f\xd3\x5c\xc7\xee\xe0\x54\x6c\xe5\xcb\xfa\xc4\x38\x05\x57\x3b\x80\x9c\xc5\xc0\x17\x1d\xbb\x80\x14\xb1\x42\xf5\xf9\x29\x79\x1f\xd4\x86\x9e\x98\x7c\x6c\x83\xb8\x9c\x1f\xc5\x4c\x64\x72\x04\xcb\x00\x61\x9d\xfd\x62\x1b\x60\x1a\xb8\x3e\x0e\x0f\xc9\x99\xb5\xdb\x56\x98\x35\x30\x75\xc4\x9a\x41\x89\xc5\x28\x77\xd9\x73\xbc\x3f\x93\xe5\x73\x14\xb3\xd3\x21\x41\x5d\x49\x18\xd1\x44\x22\xa0\xaf\x17\x0c\x10\xd5\x63\x77\x29\xee\x3c\x86\x47\xa0\x61\x56\xb7\xa4\x3b\x3b\x5f\x45\x83\x5c\x7d\xd9\x27\x94\xc6\x1f\x94\xdf\x39\xe7\xc5\x57\x3f\xe2\xd0\xd8\xf8\x46\x72\xce\x5e\x25\xad\x28\x39\x96\xf7\xe4\xfb\xdd\x8a\xc4\x47\x68\x8f\x67\xa7\x68\x2d\xdb\x82\x33\x25\x77\x26\x50\xe4\x01\x9a\xf6\x64\x97\x9f\x3e\x65\x04\x1d\xce\x18\x1c\xe5\xcd\x5e\xaa\xfc\xb7\x20\xd9\x5a\x86\xea\x82\xb5\x16\x2b\xe7\xac\x43\x89\xd1\x89\x32\xcf\x78\x32\xc7\x43\x20\xb8\x18\x79\x01\x12\x99\x63\xec\xdb\xec\x43\xaa\x59\x8d\x9a\xc3\x2f\xa9\x76\x4a\x2f\x3d\x31\x84\x79\x72\x39\x12\x39\x28\x2e\xd4\x34\xa1\x57\x9c\x97\x8c\x94\x83\x3b\x9c\xcd\xbd\xa5\x09\x70\x1d\x18\x3b\xc8\x5c\xa1\x5a\x1b\x8b\xbe\xab\x98\xf0\x93\x62\x9c\xb3\x4e\x34\xb5\x17\xee\x83\xcd\x7f\x47\x1b\x1f\xf2\x30\xb4\x82\x3a\x1f\xde\xeb\x7c\x70\x3c\x84\x1c\x05\x6b\xe9\x47\x48\x8f\x06\x25\x94\xde\x94\xa8\xfe\x00\xce\x08\x2e\xed\x3d\xda\x11\x5c\x0c\x6e\x95\xc0\xc3\xab\xb7\xae\x17\xe1\x52\xd4\x24\x33\x2d\xb1\xfd\x98\xae\x0d\x91\x01\x90\x0c\x4d\x06\x15\xd5\xe6\x34\xe0\x5f\xb7\x5d\x93\x0b\x1b\x5a\x75\x3c\x43\x57\xec\x8f\xca\x1b\x7a\xa2\xf5\x94\x94\xc6\x21\x84\xd6\xd6\x5b\x99\x76\xdb\x4e\x5c\x60\x95\xa3\x02\xcd\x15\x51\x5e\x30\xa8\x94\x3a\x52\xcc\x9c\x98\x92\x82\x20\xd3\x9f\xc1\xe2\xf6\xe6\x2f\x14\x3b\xe2\x50\xb0\x51\x85\x21\x32\x39\x2a\x1b\x6b\x34\xf0\x78\x80\xea\x64\xa5\x37\xbd\x47\xe6\x07\xe5\x8a\x36\x56\xbf\xc5\x12\x21\x73\x37\x4d\x6e\xfa\xc1\x8e\x99\x68\xb7\x62\x3e\x0f\xeb\x99\x2e\x9c\xa6\xa4\x2b\xec\x99\xa6\x93\x3a\x1a\x4b\x64\x72\xa0\xd9\x58\xd9\xd2\x7a\xb0\x75\x40\xab\x4e\x76\xef\x17\x17\x6e\x8c\x72\xa4\x41\x28\xa7\xae\x4c\xe6\x64\x02\x54\x45\x72\xd8\xc8\x00\x58\xbd\x3a\xf9\xe3\xac\x7c\x65\x1d\x3d\x90\xf7\x24\x1f\xbd\xf5\x1c\x5a\x3f\xde\xb8\xb1\x99\x58\xb1\x43\x5e\x12\x48\xb3\xfc\xc9\x0a\x43\x5c\x0e\x12\xe4\x97\xe0\x73\xd5\xe7\xaf\x8e\xbc\x36\xe3\x0c\x31\xe5\x8b\xad\x83\x43\xfe\x78\xc2\x9c\x93\xb7\x43\xb1\xaa\xf2\x6f\x86\x0f\xf8\x16\x66\x41\x7e\xd5\x38\x39\x6a\xbc\x95\x4c\x1d\x95\x5a\xe2\xd4\x39\x48\xfe\xed\x2c\x4d\x34\x1a\xde\xb0\xc4\x57\xa0\x56\xf6\xca\x01\x8c\x88\xa6\x72\xdf\x59\xf1\x19\x3a\xe5\x3f\x62\xa8\x92\x83\xb6\x04\xd1\x55\x5d\x81\x8f\xba\x48\xe2\x00\x7f\x3c\xbb\xc2\xff\xcc\x2c\x77\x0e\x57\xbf\x01\x7b\xc5\xf7\x57\x09\x22\x2e\xae\x04\x96\x91\x82\x5a\x40\xfb\x66\x59\x43\x92\xf8\x93\x1a\xa9\xc1\x1c\xac\x22\x60\x20\x80\x52\xe3\x05\x50\xdd\x95\x2c\xb0\xbe\xc2\xf7\x54\xef\x43\xac\x35\x4a\xbe\x42\x3c\x61\x0c\x1b\x46\x5b\x9f\x97\x91\x50\x26\xc9\x54\x2a\xcf\xe1\xe5\xc5\xf9\xbd\x86\x08\xf7\xb7\x33\xe9\xdf\x13\x3b\x7e\xb6\x82\x63\x21\xf1\xf0\x1f\x3e\xd0\x40\x0c\x4d\x32\x39\x71\x58\x64\x98\x1c\x98\x30\x45\x1d\xd6\xca\x03\x0b\xda\xcf\xe0\x7d\x4b\x2a\x22\xd3\x60\x1f\x82\x55\x56\xaa\xa7\x11\x06\xd7\x2e\x92\xe1\xea\x22\x28\x1a\xd3\x88\x17\x6a\xe3\x2a\x3d\xe2\x5b\xac\x91\xe9\xc8\xeb\x3a\x69\x85\x86\xf7\xc9\xbe\x71\x47\x9c\xba\x7b\x94\x63\x9a\xac\xe1\x48\x54\x13\x10\x1b\x89\xd3\x39\x87\x8e\x54\x6b\x24\x05\x3a\x83\x02\x02\xfb\xf9\x09\xe8\xc9\xb9\xa2\x87\x85\xa0\x18\xa9\xc8\xc6\x0b\x50\x0a\xed\x72\xbd\x90\x7e\x6e\xe1\x97\x0d\x1a\xf9\xe2\xab\x13\x47\x7e\x62\xd8\xbc\xca\xd4\xe1\xbf\x19\x47\x56\x92\x0b\x40\x9c\x9c\x9f\x81\x80\x1f\xe3\x7c\x27\x09\x62\x3a\x63\x7e\x03\xca\x82\xb6\x95\x60\xde\x43\xf0\xf1\xc8\xaf\x70\x2a\x9e\xfb\x00\x1d\x35\x8c\x40\x6c\x60\x21\x9c\xc0\xdf\xd8\xed\x0e\xa1\xab\x87\xc1\x04\x07\xd1\x53\x0a\x73\x9e\x81\x0d\x31\xc7\xbc\x17\x72\x26\x51\xdf\xd8\x1d\xd8\xd8\x8a\xac\xc2\x9e\xc8\x85\x68\x1c\xda\xef\xfa\x7a\x2b\x07\x0b\x4a\x92\xce\xe9\x14\xd1\xe7\xf8\x1a\x17\x9e\x36\x20\x90\x21\xab\x56\x7d\x05\xee\x69\xfd\x0f\x56\xa7\x99\x81\x16\x7c\xd8\x07\xa1\x27\x60\x54\x88\xbd\x03\x3c\xfc\x1d\x37\x66\x9d\x8c\x05\xdb\x3b\xba\x13\x0c\x7f\xa3\xb5\xc4\x68\x4c\x76\x03\xea\x90\x1f\xc8\xed\xd6\x72\x81\xc3\xa7\xfd\x54\xf6\xfa\xd1\xeb\xf4\x18\x33\x89\x6a\x66\x47\x51\x46\xef\xac\x0e\xfe\xb3\xd5\xe8\x07\x55\x82\xbc\x68\x8d\xba\xf0\xe9\x55\x8f\xdd\xe7\x06\xec\x71\x38\x90\x93\xec\x9c\x6d\x4c\x13\x05\x72\x8b\x12\x5e\x3e\xd8\xe0\xa7\xf7\xca\xac\x88\x7c\x95\x26\x7a\x18\x3b\x31\x6c\xd8\xd4\x88\x82\x65\x54\x57\x0e\x9e\x8a\x4f\x1b\x0f\x1f\xb6\xf9\x05\x42\x93\xc6\x05\x45\xe7\x8a\x77\x36\x6b\x8f\x0a\xc8\x5c\x31\x21\x49\x6b\xbf\xfa\x4f\xb8\xb6\x88\x3e\x3a\x4b\x62\x6b\x79\x8d\x29\xee\xef\xe0\x1d\x5b\x3e\x64\x3d\xd5\x9b\xf8\x2c\xe7\x47\x89\x35\x9d\x19\x68\x88\x51\xfa\xd3\x37\x89\xd2\x8f\xac\x33\xa8\xf6\xbb\x26\x4b\x98\x53\xb5\x67\xa0\x6f\x1a\x04\xb5\x42\x66\x01\x46\x24\xac\xea\x8e\xed\x9d\xe3\xc8\x5d\x3e\xbb\xde\x61\x3b\x59\x2e\xd9\xbc\xc4\xd9\x05\x14\x16\x9f\x2e\xb7\x00\x22\xd3\x45\x1a\xe2\xa3\x86\xf5\xb2\xc4\x5c\x86\xae\xf6\x06\xa6\xd8\x8c\x3e\x40\x33\xc9\x13\x48\x63\x07\xb3\xac\x4f\xf0\x29\x86\x9c\xe6\xf0\x1e\xd3\x27\x22\xdb\x28\x5e\xd6\xb6\x7e\x81\x0d\xad\x2b\x88\xe6\x67\xad\xc1\x90\x0c\x55\x15\xe6\xb4\x2f\xe2\x31\x5c\xc2\xf5\x44\x97\xf7\x15\x3d\x97\x47\xeb\x0c\x86\x64\xd6\x00\xed\x5a\xc8\x2f\x07\xad\xf5\x5c\x38\x74\xa8\x06\xcb\x3f\x38\x52\xa9\x32\xe3\x75\x54\xa1\xc8\x64\xd9\x86\xe9\x5a\x6c\x3c\xd9\x01\xbb\xb3\x71\xc2\x80\x6a\x3e\xbb\xaf\x13\x47\x05\xea\x4c\xf9\x0a\x21\x83\xe0\x3a\x79\x87\x82\x17\x83\x95\x57\x04\xdb\x84\xd2\x41\x22\x2e\x46\x1a\xa8\x42\x7c\x9b\x33\x68\x0d\x12\xec\xd3\x30\xbd\xef\xd3\x2f\x29\x69\x11\xa9\x3a\xe5\x3c\x0f\x5a\x6f\x8a\xf0\x51\x15\x12\x90\xf6\x15\x97\xf9\xfa\x8c\xce\x0e\x98\x29\x05\x65\x69\x0d\x74\x8d\xe2\x6d\x52\x2b\x76\x14\xb4\xa8\x87\x96\x84\x85\xfa\xd7\x1c\x4e\xb2\xf9\xbc\xcd\x8d\x48\x9a\xfc\xf2\xc4\x10\xf7\x3b\xb0\x7a\x0b\x9f\xd1\x02\x78\x24\x3a\x7f\xb2\x2b\x2a\xb9\x70\x98\x21\xab\x8b\x4a\xe5\xa7\x79\x34\xed\xc8\x7a\x3f\x8c\xf9\xb2\x91\x12\x36\x6f\x1b\xf2\xe6\x09\x72\x3b\xde\x63\x43\x56\x5d\x18\x3a\xa1\x81\xfa\xb2\x4a\xf3\xf9\x32\x63\x2e\x8c\x7a\x79\x69\xc7\x1e\x04\xeb\xb4\x55\x9f\x82\x80\xf7\x2f\x6d\xde\x56\x89\x8a\xd7\x0f\x28\x5a\x97\x5a\x7b\xbb\xa9\xaa\xb4\xb1\x7a\x66\x40\x94\x5f\x73\x00\xfc\xf7\x5c\x31\x73\x43\x12\xa2\xc5\xfe\xba\x1e\x21\x92\x61\x02\xa3\x81\xbe\x81\xee\x5d\xbd\x0d\xfb\x38\xfc\x9c\x1d\xcc\xbb\x9f\x5e\x7a\x5e\x0a\x78\x8e\x4b\x1c\x23\x5d\x30\x7a\xca\xa0\x69\x06\xc6\x83\x86\x3e\x51\x68\x6f\x4a\xa6\x15\x6c\xc2\xd1\xca\x2b\xb2\x0b\xa9\xd2\x3d\x00\x4d\xdf\x1f\xf1\x4a\x9c\x5c\x53\x8d\x2e\xe1\xa4\xbe\xa6\x61\x33\xb8\x21\x62\x32\x01\xb5\x49\xa4\x32\x7a\x16\xac\x91\x89\xd4\x2f\x09\x66\x0e\xb0\x2a\x74\x78\x74\x42\x26\x55\xbd\x8d\xac\x86\x99\x3a\x2b\x72\x4c\x2e\x8c\x50\x0c\x2a\xe7\x0e\x84\x3a\xe6\x2d\x9c\xc3\xaa\x79\x63\x7a\x5c\x59\xd9\xcd\x61\x88\x18\xec\x18\xd1\xb5\x57\x06\x55\x3f\xd4\xd7\xdd\xd2\xf0\x95\x1c\x9a\x63\x93\x46\x32\x58\x2e\x36\x2f\xcd\x8e\xd6\x3a\xd4\x73\x5b\xb5\x3e\x12\x44\xd4\xb9\x7e\x38\x36\x66\xf6\xea\xfe\x1c\x06\xc9\x47\x87\x31\x01\x78\x92\x34\x94\x25\xbe\x32\xc4\x1b\x95\x06\x0b\x9b\x02\x1d\xe8\xb7\x8f\x7e\xbc\xad\xb8\x41\x3a\x87\x16\xf3\x9b\xe3\x3a\x96\x8f\xe0\xea\x89\x47\x80\x54\xc5\xa5\x85\x01\xa4\xdf\xdf\xe6\x7c\xad\xd0\x41\xd7\x42\x63\x23\x0a\x48\xc6\x17\xe9\xdb\xa7\xf7\x18\xca\x21\x02\x8f\xa9\xd0\x03\xb2\xed\x62\xd5\x4b\x6a\xff\xa2\xcb\xe5\xfa\x0e\xc8\x5f\xc4\x94\x6b\x6c\xc0\xc9\x6d\xed\x00\xa4\x60\xa0\x0d\x88\x38\x55\xfd\x29\xca\x99\x6e\xd5\x23\x06\xbd\x29\x79\xd5\x9a\xc3\x94\xb7\x42\xc1\x2e\xb9\x66\x54\xeb\xbd\x5b\x81\xc4\x93\xce\x0b\x32\x52\x8f\x1c\x0e\xd4\x52\x34\xd2\xc0\x77\x34\x15\x4d\xa8\x9b\x56\xd8\x83\x5a\x0c\xe5\xab\xdb\x57\xcc\x5d\xdc\x9a\x51\x87\xd5\x74\x72\xa4\xe8\xc1\x89\x48\xdd\x3e\xd9\x6e\x5d\x1d\xe1\xeb\x31\x5b\x1f\x07\x20\x08\xc7\xe0\x72\xa1\x74\xbf\x6a\x2a\xe1\x10\xee\xdd\x4d\xbd\xff\xd0\x8a\x92\xd1\xfa\x2d\xb6\x3a\x59\x70\xf5\x2c\xce\x44\x3f\xa9\x31\x1e\x9d\x4b\xa6\xcd\x89\x38\x41\xa4\xa6\x29\xd3\x18\x23\x62\xd8\xcc\x68\x49\x98\x93\x42\x25\x8a\xf1\x01\x6b\x3a\x36\x8d\x3e\xf3\x43\xb0\x30\xfd\xe3\xd0\x8d\x91\xbb\xc7\xd1\xd0\xa1\xb7\x9c\x3e\x56\x2c\x85\x19\x95\x5f\x7a\x00\xb9\xbf\x5b\xa4\x3d\x23\x00\xfa\x4a\xcc\xaa\xb5\x1f\xf3\xfe\x7d\x5a\xb0\x6b\xec\x88\x05\xfb\xd1\x51\x82\xa5\x70\x56\xed\x93\x77\x01\xc9\xd9\xf7\xd5\x54\xb8\xfd\x2b\x4e\x8f\xa4\x40\x02\x38\x4d\x63\x39\x9c\x75\x00\x1c\xe5\x90\x83\xf4\x30\xe5\x22\xc3\x38\xf4\x9c\x90\x4e\x97\x5d\x72\xd4\xcb\xc8\x7f\x86\xa4\xca\x6a\xc2\xbb\x99\xcf\x25\x6c\x6f\x35\x2c\x56\xdb\xc9\x18\x28\xb8\x17\x16\x56\xb1\x0e\xb3\xa4\xa1\xa7\xa5\x3e\xb8\x7a\xea\x3b\xe8\x63\x12\x9e\x03\xa2\xd4\x56\x51\x28\x10\x2a\xeb\xd5\x87\x12\xce\xd8\x83\x3e\xc8\x94\xa4\xae\x8b\x2b\x42\xab\xda\x87\x90\x1d\x35\x38\x48\xe8\x0b\xd5\xa7\xa1\x18\x4a\xf9\xeb\x58\xe1\xdf\xf0\xd9\xce\xec\x0b\xb0\xa3\x6b\xca\x59\x4c\xa8\xff\xd5\x4e\x46\xb0\x2d\x4d\xea\xe7\x41\xb0\x81\x23\x40\x15\x89\xf2\xc4\xb5\x76\x0e\x4f\x91\x8f\x64\xcd\xcb\xa4\x13\x16\x48\x9d\xe8\xe4\x2a\x1e\x7f\x30\x4d\x4e\x9d\x28\x8a\x73\xfe\xd2\x1a\x9d\x5d\xd0\xdd\x8f\xb1\xce\x75\xf2\xcc\xce\x74\xe0\xb8\x27\x18\x79\xeb\xf0\xe9\xf6\x0d\xe5\x4c\x92\xd6\xfb\x52\x70\xe9\x56\xd4\xe0\x56\x76\xcc\xac\xda\x54\x6b\xd1\xf8\x1a\x07\x51\x53\xb3\x12\x5c\x8c\x4c\x55\xa7\xe8\x6d\x15\xa8\x63\x3c\x20\x28\x12\x79\x69\xec\xa6\xd4\x21\x31\xe7\xba\x65\xbc\x08\x7d\xd4\x1a\x21\x00\x87\xbd\x63\x7d\x7d\x02\x14\x31\xc5\xbf\x78\xba\x09\xb3\xf1\x96\xc0\xdf\x6b\x96\x83\x86\xe5\xe4\x16\x4a\x71\x10\xfb\xcf\xea\xdc\x4c\x38\x91\x5b\x89\xdc\x0f\x4b\x72\xbc\x4b\xf9\x18\xa2\xb2\xea\x43\x01\x5f\xcb\xd3\xb3\xbf\xde\xc6\xe4\xe9\xc1\x2f\x63\xa2\x45\x99\x2a\x9e\xe0\x00\xe6\x72\x30\xe1\xd3\x7e\x31\xe0\xa4\xe2\x1d\x21\x4b\x75\x2e\xce\xf5\xc8\x3a\x0f\xca\xbe\xb2\xf2\xea\x03\xc5\x08\x0b\xe3\x87\x3f\xde\x21\x87\xfa\x13\xd9\x2a\xa6\xa3\xca\xfc\x1e\x95\x0e\x00\xd1\x67\x5e\xaf\xbd\xdd\x54\xaa\xf6\x06\x37\x4a\x53\x89\x37\x39\xc9\x26\x22\xd1\xbb\x9f\x0b\x8a\xc6\xb2\x5f\x99\x3d\x85\xaa\xd7\x06\x8e\xbb\xcf\xd9\x56\x6e\x10\x81\xd7\x8b\xd1\x45\xf7\x32\xb0\xab\x89\x82\xbb\x39\x06\x8f\xa7\x7a\x7c\x49\xa8\xec\xc9\x9b\x02\x88\x45\xbf\xe1\x63\x4f\x21\xd7\xc4\x2c\xd1\xed\xb9\xcf\xed\x0d\xd3\xda\x48\xf0\x00\x0f\x21\xfc\xb2\x2f\xcb\x87\x66\x2a\x8f\x1c\xa5\x37\xa5\x47\x82\xbb\xd0\x06\xc1\x2e\x9e\x88\x38\x0e\xe4\xec\xa7\x57\x64\x21\x43\xe5\x4f\xb4\x33\xe6\xdd\x06\x70\xa6\x58\xb5\xeb\xb9\x47\x62\xd4\x3c\xd7\xe2\x44\xff\x0e\x65\x60\x55\xb1\xbb\xe8\xcf\x64\x58\x91\x1e\x36\x85\xed\x2c\x12\x90\x55\x57\xc1\x9c\x29\x39\xaf\x09\xb3\x2a\x63\x5a\xfc\xc8\x91\x27\xa9\x7c\xec\xfe\x6b\x26\x41\xd5\x8d\xd9\x76\xa8\xef\xba\xb8\x79\xa8\xc5\x27\x04\x87\x09\xfd\x29\xa0\x65\x3b\x47\xfc\x52\x22\x31\x08\x51\x72\x6a\xd0\x48\x3c\xae\xc4\xa9\x90\x79\x72\x8a\xc2\x20\x09\x1e\xb3\x69\x3b\xdb\x57\x4c\x1e\x25\x23\x32\x9b\x61\x89\x65\x25\x0b\xef\xa2\x02\xa6\x3c\x26\xee\xdf\xce\xd1\x23\x03\xb6\x1c\x57\x5e\x07\xb2\x62\x50\x48\x69\x2e\x74\xac\xb6\x2b\x7e\xbf\x7a\x50\xd2\x07\x09\xe0\x13\x3b\x15\xb7\x9a\x51\xd9\x45\x5f\x7f\x41\x0d\x05\x06\x01\xb7\x01\x5a\xbc\x19\xa6\x6c\x0e\xde\x64\xd1\x4f\x12\x64\x26\x4b\xbc\x27\x9d\x44\x3b\x84\x4b\x83\x73\xe1\xc5\xb7\xbc\x50\x6e\xd2\xb9\x31\x9c\x4d\x15\xa8\x20\x26\xef\xfe\xb9\x31\xbd\x53\x1b\x8d\x91\x71\xfd\x9e\x29\x00\x31\x94\xec\xdc\x89\x01\x86\x85\xa3\x98\x0a\xa3\xe0\x13\x6c\x8e\xee\x38\x61\xed\x28\xbd\x68\x08\xee\x33\x84\x64\xa3\x9e\x5f\x7d\x5b\xe3\x6b\xbc\xa9\xf4\x55\x24\x15\xb6\x3c\xa1\x9f\xa8\x26\x14\x9f\x9c\x4c\xb8\x94\x1e\x31\x8f\x59\x09\xbb\x6b\x68\x5b\x0b\x81\x7d\x27\xfd\x92\x1f\xa1\xc1\xa0\x35\x88\xfb\x0c\xbd\x1f\xdd\x6d\xdd\xa1\x3e\x8d\x91\x1a\x6e\xe6\x3b\x55\x8f\x0a\xb1\xd3\xe8\xc1\xec\x22\x09\x6f\xc0\x80\x39\x24\xa2\x73\xaf\xd1\x5b\x8d\x9f\xb4\xca\x63\xfc\x70\xf2\x55\xd0\xaa\x3a\x4b\xea\xa0\x84\x86\x69\xe9\x1a\x2f\xd9\x2a\x56\x56\x7a\x34\x89\xb5\x12\x5b\x1f\x1d\x1f\xd6\x85\x85\xa0\x18\x10\x4f\x63\x67\x4e\x0b\xa0\x09\x48\xde\xd8\xd8\xfd\xe2\xe2\xb1\x98\xea\x02\x87\xd8\xe6\x1e\xf2\x63\x64\x13\xf8\xb2\xee\xae\xc4\x57\x7e\x21\x96\x57\x9a\x0f\xe2\x32\xfa\xad\x9d\xe9\x4d\x5c\x24\x72\x3e\xc2\xb5\xb0\xb7\x24\xa9\xfc\xc8\x09\x29\xc4\x6e\x64\xbd\x80\xbe\xb9\x58\x8d\x0d\xd1\x71\x6d\x7d\xce\x54\x01\x4f\x52\xdf\x8b\x79\x18\x85\x16\xc4\x4c\x8c\x86\x0d\x2f\x71\x28\x98\xee\x24\xf0\x02\x6f\xa6\x8a\xe3\x08\xa4\x2b\xae\x05\x86\xe4\x93\x73\xd9\xa9\x68\xd0\x7e\xc1\x20\x6b\x61\xb4\x47\x9a\xbd\xa9\xfd\xa6\xe9\xaa\x28\x91\x5e\xe4\x8a\x5d\x91\xdc\xc2\x22\xae\xc0\x4c\x83\x4d\xa9\x27\xef\xb0\x71\x02\x14\x67\xc2\x1e\x9e\xf1\xab\x4a\x2b\x75\x07\xda\xe8\x63\x4d\x3e\x7a\x6b\x8f\x23\x6e\x30\x99\xce\xcd\xf2\xb9\x15\xa1\xe7\x5a\x96\x02\x98\xcd\xa7\x50\x8f\x61\x3d\x72\x35\x59\x4d\x91\x5f\xda\x7d\x54\x96\x13\x10\x16\xd2\xd7\x97\x31\x35\xd4\x9a\x8c\x33\xc7\x75\x54\x5c\x1e\x03\xf9\x75\x33\xed\xe6\x7c\x5d\xa0\xde\x29\x12\xe8\x17\xa2\x97\xe4\x02\x24\xe2\x65\x6d\xf3\x5a\x96\xc2\x22\x11\x62\xa1\x95\x64\x0d\x14\x9f\xa3\xb7\x7b\x66\x17\xd7\xa9\x63\xf0\xa0\x4a\x31\xe0\x92\x7e\xd5\x7c\x0e\x3d\x72\xe6\xad\x69\x1b\x0b\xdf\x65\x1f\xca\x73\xaa\xcd\xd8\xb0\x77\x5b\x27\xce\xb5\x75\xcf\x55\x37\x39\x07\x81\x1a\x65\xff\xec\xa0\xbd\x5c\x5c\xf4\xa6\x6f\xb1\x81\x40\xcc\x40\x5c\xb2\x42\x47\x62\x51\xcd\x10\xc5\xc4\xb5\xd0\xa4\x3c\x95\x9f\x82\x23\xf3\xfc\x2a\x5c\x5d\x11\xa9\x89\x9a\x08\xc3\xf8\x20\x62\xa9\x9c\x2e\x5c\x6c\x6d\x66\xea\x0e\x49\xf5\x8a\xee\x6d\x8d\x92\x0d\xb1\xab\x14\xf3\x52\x97\x3c\x06\x4c\xdd\x43\xbe\x52\xce\x95\xcd\x1c\xb2\xd4\x15\x7d\x86\x7a\x3d\x43\xa7\xda\x97\xf6\x05\x1b\x74\x74\x29\xc1\xe2\xc1\x01\x57\xfc\xc2\xbd\xf6\x19\x94\x14\x79\xb3\x78\xd4\x25\xa5\xde\x00\x07\x64\x30\x7d\x76\x88\xc1\x6c\x85\x74\x80\xed\xcf\x84\x87\xcd\x0e\xe6\x49\x9e\x50\x80\xeb\xb2\xc1\x06\x62\x47\x35\xb2\x59\x59\x43\x7b\xf6\x8d\xf6\xea\xe9\x7a\x52\xa6\x2e\xa3\x4b\x32\xc4\x3a\x16\x50\xd2\xd8\xf2\x9c\x6e\x8a\xdf\x01\x9a\x37\xd1\x20\x06\xab\x63\x34\x87\x36\xff\xd0\x33\x35\xd4\xa7\xfa\x0a\x8c\x80\x73\xe0\x19\xb6\x20\xb3\x34\x48\xd3\xc7\xf8\x66\x82\x61\x26\x87\xc1\x93\x2c\xdf\x61\xb7\x4b\x70\x4d\x46\x8d\x8a\xc0\xe8\x71\x16\xed\x6d\xc3\xa1\x40\x76\x39\xfe\x50\x7a\xe5\x6a\xf8\xcc\xf3\x56\x76\xe0\xa6\xd1\x07\xf2\x69\x17\xcf\x98\x69\xc1\xac\x45\xd3\x0f\xf2\x61\x8e\x91\xfd\xd6\x70\x79\x64\xd1\xf3\x3f\x9c\x7f\x67\x7f\xfb\x14\x5d\x53\x4d\x87\xb5\x80\xc2\x92\x49\x6d\x0d\x95\xe7\xcd\xa5\x17\x55\x6f\xc5\x76\x35\x77\xff\x85\x9f\x88\x9a\xed\x26\x0e\xfc\xc3\xc4\xde\x5a\x38\x12\x4e\x75\x3a\x78\xed\x9e\x20\x1f\x98\x4f\xf7\xde\x1a\x37\x21\xe7\x68\x9c\x8c\x99\xae\x6f\x51\xef\xe8\x43\x7f\xad\x26\x07\xc6\xa2\x67\x6f\xb5\x3b\x1b\xc7\x7d\x9a\x95\xc7\xf0\x5f\x07\xdb\x24\xee\x76\x52\x66\xe2\x6c\x12\x71\x6a\x3e\xcb\x57\x05\x64\x08\xf6\x64\xad\xb7\x60\xee\x9b\xea\x54\x44\xff\x8a\x22\xe1\x6f\x47\x91\x55\x71\xee\xe3\xeb\xba\x01\xd5\x71\x85\xdc\xc4\x02\x28\xc7\x26\xa3\xf8\xb5\x47\x97\xd2\x6d\x63\x36\x2c\x6a\x59\x4a\xb3\xf2\x01\x14\x79\xc8\xf9\xe7\xe0\x83\x69\x76\xea\x26\xee\x85\x92\xcd\xc9\xe7\xb2\xfd\xa4\x83\xf4\xce\x4d\x19\x36\x6e\x5a\x2a\x53\xee\xf0\x4b\x18\x67\xb5\xf9\x1e\xc8\xe5\x4e\x56\x1f\xac\x76\x02\x0b\x2e\x36\x89\xf3\xd8\x95\x87\x00\x24\x4d\xa7\xed\xc3\x13\x1e\x18\xaf\xa9\x53\x92\x4e\xd4\xa7\x81\x55\x83\x9e\xc7\x1d\xa0\x56\x8d\x0e\xf3\x46\xfa\xa6\xad\x2c\x5e\x87\xb2\xb7\xb4\x3e\x85\x46\xd1\x77\xa5\xde\xb1\x13\x5b\x47\xb4\x3c\x1f\x44\xba\xc7\xc8\x17\x8c\xfb\x11\x23\x74\x03\x7d\x1c\x7c\xc5\xcb\x37\xe0\x4c\x72\xb4\x61\x6e\xe8\x24\x25\x6e\xa4\x74\x03\xe7\xfb\xb1\xfd\x54\xa2\x2f\xab\x4e\x80\xcc\x20\xcb\x3b\x06\x95\xe5\xfe\x95\x86\xf2\x99\xe3\xb9\xc7\x17\x8f\xb6\x4e\x59\xb4\xff\x26\xe4\x68\xcf\xa7\xce\x59\x7f\x68\xfd\xe5\xc9\x68\x77\x4b\xa7\x03\x49\x18\x43\x2f\x81\xc3\xc3\xe0\x38\x14\xf4\x9a\x1e\x2c\x0e\xeb\xf0\x02\xff\x12\xa8\x9c\x34\xa3\x56\xac\x53\xae\x77\x45\xd3\xc1\xe0\xd1\x5a\x02\x43\x2b\xd5\xa9\xf4\x23\xbb\xa5\xc7\x90\x76\x7e\xa0\x69\x2e\xa1\xba\xeb\x94\x0d\xb1\xf7\xe6\x2d\x6f\x56\x6d\x25\x33\xff\x7c\xcd\x82\x7c\x25\x56\xd9\x15\x7d\x0f\x3a\x22\x6a\xad\x25\xb5\x7f\xf6\xf6\x24\xdf\xaa\x81\xae\x2b\xc5\xed\xb3\x49\x79\x44\x1e\xbd\x46\xe5\x20\xb6\xca\x1b\x06\x01\x8e\x20\xc6\x0b\x79\x4e\x69\x53\x0e\x88\xc3\x89\xd5\xf6\x3e\xdf\xa0\xda\x61\x0f\x6a\x18\xf0\xd3\x74\xd0\x9f\x3d\xa1\xc2\x99\x5e\x93\x12\xa3\x6c\x57\x72\x66\x4a\xa3\x41\xd8\x48\x3d\xd5\x37\x21\xe6\x6a\x9b\xf3\x7d\xa1\x2c\x32\x79\x55\xcb\xc0\x3b\x18\xe2\xb7\xd7\xb7\x5e\xe2\xe7\xb7\x48\x59\xc7\x6e\xd4\xd9\xb6\x55\x50\x06\x6d\x54\xbb\x57\x58\x02\x2c\xaa\x52\xe7\xa9\x4f\x23\x9e\x94\x43\xe2\x7e\x13\x49\xa1\x6d\xd0\x74\xaf\xc4\x9e\x93\x8b\xf3\x5e\x4e\x2f\x65\x9e\xf0\x74\x12\x7f\x5e\x1b\x8f\x9a\x58\x04\x2f\xfb\x8b\x0d\xf8\x33\x43\xd9\xe4\x9b\x01\x65\xb2\x10\xa6\xeb\x4d\x72\x09\xf8\x05\x3a\xa2\xda\x9b\x57\x2c\xb8\x90\xb8\xfa\x85\x03\x26\x5f\x2d\x94\x21\xb0\x59\xfb\x76\xb3\xfb\x8d\x00\xe2\xfe\xf5\x82\xe4\x82\x54\x0f\xcb\xb8\xc4\xc3\xcb\x82\xd4\x92\x9f\xf5\x05\x38\xad\x80\x3a\x1f\x62\xf3\x87\x36\x3f\x27\x5c\xea\x30\xc7\x93\xe4\x5c\x5d\xe3\x2f\xd5\x20\x79\x88\xf3\x56\x48\xcc\xb6\x5f\x6d\x26\x4c\x41\x42\x7c\x76\xab\x50\x01\xff\x26\x3b\x44\x5b\x67\x5c\xd3\x4c\xaf\xc0\x75\xdd\x6e\x6f\x2e\x6e\x91\xd8\x8d\x64\xbd\x3f\x16\xd1\x5c\x9c\xe7\x6d\x5f\xae\x28\x02\xa2\x3e\x24\x77\x9c\x2a\xc6\xc6\xdf\x5f\x88\xf9\x1d\x81\xb1\x7b\xee\x36\x9b\x0b\xe3\x05\x51\x8a\x90\xab\x8e\x76\xdc\x38\x8b\x01\x54\x95\xde\x9d\x5d\xb7\x0a\xd0\x00\xe2\x44\x4f\xb3\x24\x1a\x1e\xe6\x45\x6a\x08\x4a\x22\x71\x39\xdc\x4c\x3b\xb8\x8e\xae\x78\xe0\x58\xcd\xfb\xf2\xb0\x03\x92\x50\xd2\xa6\xbe\x1d\x35\x8b\x43\x81\x40\xe9\x3f\x4e\x39\x95\x1d\x4d\x15\x4c\xfd\x53\x49\xd9\x08\xf0\x00\x2b\xc1\x03\x82\x4a\x93\xaa\x2b\xc3\x1f\x83\x85\x05\x8a\x44\xf6\xe1\xc0\xe1\x4d\x71\x02\x6c\x8a\x31\x71\x41\x93\x8c\x95\x7d\x22\xda\x80\xe6\x3a\xb7\x1b\x6a\x5c\x4d\xeb\xa8\xce\xca\x6d\xf1\xc5\x81\x36\x12\x4b\x42\x5e\x68\x09\x95\xba\x6e\x0b\x95\xb3\x4f\x5c\x13\xad\x4c\xe0\x8b\xf9\x20\xff\x23\x50\x23\xfb\x40\x17\x6c\x0e\x3f\x18\x39\xb4\x17\xdf\xe5\x3d\x0d\x51\xbe\x9d\x14\x91\x8f\xcf\xe3\x55\x55\x49\xa2\x00\x56\x8a\x07\xaf\xe6\xf7\x7f\x98\x18\x08\xf9\x3e\x61\xc8\x11\x40\x80\x04\x5b\xa3\xa7\x23\x72\xcb\x5c\xab\x1d\xe3\xe3\x19\x00\x7e\x58\x8f\x9e\x94\xec\x43\xbe\x85\xab\x85\xd8\x5c\x72\x80\x08\x55\x65\x8d\xed\xab\x9f\x39\x81\x59\x7e\x7e\x25\x8c\x95\x8e\x81\x03\xff\x1d\x9b\xf9\x32\x2d\x20\x8c\x37\xb0\x36\x03\x8b\x4c\xad\x11\x05\x18\xf2\x98\xdc\xaf\xe8\x10\xb1\x88\x3e\x68\xe8\x9d\xf2\xbf\x65\x30\x35\xb8\x41\x99\x92\x6e\x1b\x35\x22\xda\x57\xbd\x94\x3a\x30\x92\x61\xbb\x9d\xa9\x7a\x66\x20\x02\x58\x32\x6b\x77\x0d\x24\x5d\xd5\x6e\x06\xf9\x55\x87\xc3\x93\x1e\x9e\xa9\x81\x20\x51\x21\xc2\xc3\x9c\x89\x78\xb2\x66\x9a\x3d\x9c\xca\xa3\x41\x21\x8b\xdd\x48\xa0\x77\x14\xc0\x19\x11\x9e\x7d\xeb\x3e\x73\x35\x3b\xd8\xc2\x72\x03\x37\xf1\x73\xb2\xb3\xbc\xee\x2e\x1c\xa2\xa2\x05\xe6\x25\x94\x17\x9b\xa2\xe9\x64\x55\x60\x09\x2a\xba\x0f\x36\x94\x38\xf9\xdd\x51\x68\x4f\x2f\xb5\x32\x76\x78\x3f\x7a\xa8\x4e\xb1\xd7\xd4\x17\xc6\xec\x83\x83\x6d\x52\xcb\x1c\x23\x8a\xce\xcf\xe9\x6f\x86\x4e\xaf\x51\xff\x6c\xe3\xb9\x90\xa5\xaf\xb3\xbc\xf2\x77\x7c\xa8\x2a\x4b\x90\x3b\xe3\x53\x3c\xdb\x97\x47\x21\x81\x5d\xc5\x84\x80\x8a\x33\x7b\xe9\x08\x9c\x87\x12\x2d\x19\x23\x9f\x72\xcb\x3d\xe8\x95\x58\x8b\xe3\x5d\x94\x91\xc9\x59\xc9\xc7\x2c\x79\x7f\x42\x40\xb9\xe8\xb0\x3b\x58\xb6\x1a\x38\xa4\x57\x7b\x34\x70\xaf\x4e\x5e\x3e\x55\xdc\x81\xe0\x4e\x56\x02\x26\x65\xb6\x20\x91\xb8\x93\x7d\xeb\xb2\xb4\x12\xae\xb7\x41\xb6\x41\x2b\xc6\x50\xd6\x26\x71\xb3\xfa\x98\xe2\xf4\x13\x60\xa4\x56\x15\x43\xa6\x81\xe3\x96\x46\xde\xbe\xd9\xf1\x87\x79\x04\xe6\xb3\xab\xcb\xcf\xe2\xee\x67\x8e\xc2\xd3\xe4\x74\xd4\xb4\xfb\x9f\x0b\x33\xb1\x2f\x4e\x25\x53\x4f\xfc\xf7\x2b\x36\xab\x24\x28\x0c\xcf\x4d\x36\x18\x0f\xe3\xe5\x7d\xf4\xb1\x46\x40\x50\xfa\xb1\x06\x7b\xce\x83\x90\x7d\xf1\x86\x96\xcd\x95\x7a\x55\x00\x88\xf3\xc2\x4c\x11\xc9\xa2\xcd\x7e\x20\xb6\xc1\x8c\x1b\x33\x6b\x2a\x8a\xe3\x2c\xbc\xcb\xb9\x55\x47\xa1\x4f\x38\x6c\x38\xdb\xe4\x6c\xa9\xbd\x13\xc6\xe3\x87\xac\x15\x85\xea\xd5\xda\xe9\x2c\x43\x95\x90\x1c\xf4\x57\xe4\xe8\xe2\xd5\x9d\x9d\x1f\xb5\x29\xc3\x4b\x5f\x87\x93\x3d\xe3\xf2\xa5\x00\x54\x94\xdf\x95\xaf\xcf\xbe\xb2\xae\x51\x31\xb2\x7a\x54\xa4\xb5\xf6\x45\x35\x6b\x50\xaa\xe4\x40\xee\xae\xb4\x24\xbe\x20\x68\x30\x0c\x40\x2a\x65\x6a\x7d\xbf\x55\x68\x9f\x72\xdb\x4c\x8c\x25\x36\x8a\x63\x92\x95\xb6\x61\xca\x4e\x18\x4d\x80\xed\x3f\x77\x5c\x9c\x22\x18\x9d\x26\xdf\xc4\x51\x25\xe6\xa3\x8c\xc8\x74\x62\xad\xbc\x8c\x41\x06\x5a\x77\x64\x57\xbd\xde\x00\x57\xbc\xf9\x0d\xe0\x03\x2b\x15\xc0\xa2\x55\x95\xfc\xb2\x28\xb6\xb0\x26\x28\x61\xc6\x4c\xfa\x0a\x15\x7b\x21\x9c\x7b\x90\xb2\x92\xdd\x9b\xc0\xf6\xa8\x54\x3e\xce\xf2\xfb\x4f\x91\xf2\xcb\x4c\x76\x99\x4d\xf2\x01\xb6\xb0\x8e\xd2\xd6\xca\x79\xd9\x4a\xb5\xcf\x4e\x2e\xd4\x2b\xcd\xa6\xb9\x43\x9b\xcb\x38\x2a\x27\x0d\x2b\xb3\x84\x0f\x2d\x0c\x5d\xa7\xdc\x9c\xce\x4e\x09\x0c\xf2\xed\x37\x27\x8b\x46\x12\x4a\xef\xcf\xe8\xe0\xb9\xb5\xa3\x8a\x0d\x99\x80\x51\x55\xce\x40\xfd\x87\x7e\xa9\x9e\x19\x3c\xc4\xf2\xd5\xc0\xad\x11\x91\x92\xfc\xd3\xc6\x71\xcd\x92\xc8\x2b\x63\xca\x6c\x37\x18\xae\xb6\x40\x1f\xba\x5b\xbf\x66\x47\xe0\xe4\xbe\xa8\xb2\x1a\x66\xc6\xbf\x0e\x37\x5b\xef\x11\x39\x1a\x38\x19\x3a\xf0\x11\x56\x24\xfe\xc4\xce\x64\xe0\x17\xa9\x91\xcb\x5c\x7b\x05\x58\xf3\x01\x26\xa0\x45\x3e\x9f\x57\x66\x1b\x21\xd2\xea\xa0\xb2\xb6\x8d\xb3\xdb\x1f\x0b\x18\x3b\x26\x20\x9f\xdc\x09\x28\xb6\x3b\x45\x68\xe1\x9a\xca\xc7\xa3\x9c\xbe\x7c\x6b\xbb\xf8\xd6\xfd\x71\x1e\x91\x6a\x44\xa1\x04\xfe\xf6\xa6\x43\x65\x65\x4c\x63\x3e\x86\x18\x9c\xa1\xa8\x9f\xda\x49\xbc\xb1\xf9\xec\xc3\x16\x5c\xf6\x5b\x35\xef\xb5\xf1\xd4\x0e\x2e\xd5\x8f\x90\x40\x17\xb6\x2b\x89\xa5\xfe\xaf\x83\xfd\x0a\x84\xc5\x8e\x26\xa7\xdd\xe9\xf4\xbc\x03\xbb\x94\x7d\x26\x51\xf6\xb6\x8d\x1c\xc9\x2d\x7f\x51\xf4\x16\x34\xff\xad\x70\xf9\x38\xdd\x6a\x53\x66\x80\x2d\x53\xf7\x2d\x8a\xff\xcc\x75\xc5\xd7\x40\xb2\x3a\x57\xdf\xa4\x48\x7c\xe2\x04\xdb\x59\x51\x53\xb5\x07\xb3\x1a\x0a\xa2\x3f\x1d\x46\x4d\xe5\x2d\x9f\x30\xa3\xa9\x84\x1b\x5f\x0d\xe6\x69\x6b\xc5\xf9\x77\x9f\xcd\xe1\x74\xf2\xd6\x8f\xea\x27\xf7\x4b\x38\x6c\xe4\x36\xb5\x21\x2e\x1e\x28\x9f\xa9\x41\x15\x96\x4d\xb1\x11\x24\x46\xa8\x8a\xcc\xab\xb2\x78\x38\x43\x24\x51\x77\x50\x4c\x55\x40\x2b\xc1\xa8\x58\x27\x79\x8d\x2d\x71\x88\xfa\xcb\x55\x34\xcf\xca\xce\x29\x45\xda\xf0\x40\x5a\xa5\x46\x03\x9a\x97\x97\x9f\xe8\x35\x93\x1b\x1f\x51\x0b\xa3\xb0\xb6\xf1\x23\x2d\x56\x28\x95\x63\x4c\x75\x01\x08\x14\xea\x35\x7b\x52\xf2\xa7\xc8\xee\xd9\x28\x05\x93\x40\xf7\xa8\xda\xf3\x4d\xf1\x17\xbe\xe8\x53\x59\x48\x4c\xd0\x84\x90\x9a\x7f\x41\xf3\x0b\x54\xab\x01\x2a\x71\xae\xee\x18\x96\xa7\x14\x04\x42\x23\xcb\xe0\x40\x28\xc4\xed\xfd\x89\x9e\x14\x29\x53\xeb\x4b\xcf\xca\xa5\xb3\xab\x3e\x79\x15\x5e\x75\x59\xfc\x94\xab\x6d\xeb\xac\x2d\xd6\xc3\x3a\x21\x1f\xd0\x7b\x27\x41\x50\x13\x27\xb3\x23\x27\x4d\xb3\xa6\x7e\xc8\xdd\xb5\x8d\xc2\x92\x47\xfd\x54\xa5\x9f\x91\x41\xe8\x74\x19\x33\x34\x0c\xef\x95\x8b\x8d\x3c\xea\x5d\x87\x4c\x44\xf4\x7e\xf7\x77\x66\x52\x4b\xfd\x82\x11\xee\x99\x40\xc4\x8c\x7e\xca\xad\x80\x5d\x2c\x26\xfa\x4f\x70\x6f\x0d\xfc\x34\xd3\xef\xdd\x7b\xe4\xef\xf1\x03\xde\x63\x01\x25\x2a\x09\x6f\xc4\x47\x83\xbb\x30\xa0\xd5\x67\x47\xe0\xff\xe4\x90\x5d\x8b\x19\x2d\xca\x93\xff\xda\x2d\xa9\xbc\x66\xb3\xea\xa7\xbb\x44\xa2\x4b\xc6\x2b\x0b\xbc\x23\x59\x72\x88\x52\x5b\x52\xf8\xa5\x75\xac\xe8\x2f\xfc\x92\x1a\x96\x08\x0e\x8a\xdb\xf4\xb3\x77\x43\xdc\x9d\x44\x63\x09\x44\xfe\x79\x45\xfb\x8a\xbc\x2b\x01\x2a\xd0\x36\xe9\xcc\x35\x53\x19\xca\xeb\x00\x2c\xd6\x21\xcf\xdb\x44\x1c\x51\x1f\xa6\xfa\x91\xfb\xd5\x7f\x66\x9e\xb8\x56\x55\xc7\xc7\x99\x53\xad\xd1\xf5\x56\x7d\x96\xcb\x59\xa8\x9d\xf7\xcf\xd5\x1a\xfd\x92\x9b\x80\x9b\x43\x94\x62\x80\xe8\x31\xbe\x53\x79\x73\x84\x38\x42\x76\x82\x86\x49\x0d\xa1\xd8\x3e\xd2\xb0\xc8\x29\xf3\x26\x61\x5a\x39\xd9\xac\x78\xc3\xeb\xef\x2f\x06\x32\x8c\x2e\xdb\xaf\x6e\xb5\x69\x48\xd9\x46\xe9\x8c\x18\xbb\x9c\x2c\x30\x99\xac\xa0\x43\x41\xec\x85\x96\x2c\x06\x93\xf0\x38\x49\x40\x8b\x1a\xe4\xf3\xa8\xd2\x04\xb7\x8e\xd3\x2e\xcb\xbc\xff\x09\x29\xb3\x03\x6a\xa4\x94\xa4\x3f\x83\x56\xa6\xf4\x37\xfa\xe4\xf4\xb2\x29\x6c\xa0\x07\xca\x36\x4d\xd5\xc3\x46\x87\xc2\x3f\x65\x63\x10\xc5\xbb\x72\x3b\x8b\xe1\x86\x4a\x33\xea\x5a\x5a\x5b\x09\x0f\x0a\xb3\x80\x3c\xde\xef\x6b\x73\x75\x0c\x3a\xfc\xb4\x17\x1c\x0d\x26\xe2\x26\xcb\x35\xc4\xf5\x87\xaf\xb6\xd1\x0e\xe4\x66\x51\x9c\x44\xb6\x7a\x4e\x9c\x01\x2f\x07\x9d\x02\x9c\xce\xef\xcc\xdd\xa8\xbd\x85\xd0\x46\xfa\x65\x75\x96\xca\x08\xa6\x2b\x3a\x5a\xb8\x00\x71\x9a\xbe\x76\x19\xa5\x98\x85\xd8\x72\xbd\xdb\xf7\xf6\x71\xd6\x7b\xa9\x08\x85\x9e\x92\xaa\xd9\x07\x68\x58\xe2\x51\xd3\xd2\xe5\x82\xdf\x63\xee\x24\xac\x69\x2d\x5e\x65\x91\x10\x1e\x49\x80\x91\x9b\x89\xe8\x99\x8a\x45\x44\x2c\x55\x6f\x09\x18\x80\x36\x87\xad\xb0\xed\xdd\xc5\xb9\x8c\xfc\x45\x54\x23\xbe\xb9\x6d\x9a\x24\x0c\x77\xf7\x1d\x7e\xed\x8a\xda\xdf\xd2\x85\xe6\x1d\xa4\x6d\xdc\x2e\x0d\x14\xd1\x0e\xf8\x0a\x33\x65\x8f\x6f\xda\xfd\xd6\x97\xa5\xdf\x23\x05\x77\xb9\x80\xbb\x94\x8b\x14\xc8\xb7\x23\x2c\xf6\xe1\x08\x1e\x4e\x70\x7b\x4c\xaa\x7f\xf0\x05\x26\x40\xad\xd5\x24\xcd\x1e\x9e\x64\xa8\x95\x50\x31\x05\x63\x0e\x98\xe4\x1a\x50\xd8\xa7\x42\xc0\x00\x62\x2d\x7d\x84\xf9\x2c\x20\x07\xb4\xdb\xe5\x13\xb2\xcf\x99\xda\x23\x8b\xd8\xb2\x17\xbc\x5b\x3a\x1c\xed\x2b\x5c\x0b\x5f\xa8\x9a\x67\xe4\x10\x95\x17\xef\xe2\x9d\x7d\xa2\x7e\x1a\xf5\x19\x17\xe6\xe8\x3e\x97\xf9\x99\x03\x7b\x8c\xc5\x2b\x20\x62\x95\x68\x36\x11\xe1\x93\xcb\x9a\xb8\xd0\xca\x47\xf7\xc4\x6b\xe9\x4f\x79\xb3\x93\x08\x4c\x23\xa3\x64\x19\xba\xec\x33\x04\xcd\x8c\x10\xf6\x60\x49\x8d\xc2\x64\x95\x65\x35\xb6\x75\x7d\x4a\xdb\xc0\x91\x33\x9f\x0a\x34\x25\x22\x9c\x35\x9d\xd2\x7e\x33\x35\x9f\x37\xc2\x8a\xd9\xf1\x45\x5e\x25\xd5\xb3\xfc\x78\xcd\x81\x79\x4e\x63\x74\xd2\x36\xa4\xe2\xd5\x26\x36\xd8\x26\x6e\x8c\xac\xa3\x2c\x99\x6d\xce\x89\x64\x84\xa9\x76\x27\xb7\xe3\x14\x47\x12\x4d\x9c\x43\xee\xa9\x9b\xd4\x6b\x63\xbb\x24\x0a\x11\xf6\x65\x34\xaf\x0a\x3b\x3b\x67\x24\x42\x4a\x4d\x29\x64\xe7\xd0\x39\xc6\xf4\x03\xaa\x88\x04\x90\x17\x6e\x6b\x78\xaa\x34\x7a\xa0\x15\x9e\x20\x40\xf1\x13\x9c\x7a\x1f\x03\xef\x65\xf3\x76\x70\x58\xdb\x30\x04\xb3\x13\xaa\x0d\x5a\x14\x45\x89\x22\x0d\x1b\xbb\x7a\xd1\x72\xf9\xa0\xe1\x4a\x2d\x2a\x22\xa2\xbe\x31\xc4\x84\xc3\x9f\xcb\x84\xd1\xc4\x8a\x37\x53\x8e\x69\xfa\x20\xae\x32\x63\xeb\xa2\x0c\x23\x47\x9a\x12\xef\x81\xce\x54\x43\xa5\x74\xed\xde\x79\x73\xe0\x9e\x85\x19\x1f\xff\xe9\x40\xd5\x3c\x6c\x7f\xa7\x95\xa3\x4e\xde\x31\xb2\xf2\xf3\x4a\x4b\x85\x9f\x79\xe9\x38\x76\xd6\x4b\x49\x73\x93\x9b\x68\xfd\x1e\x6d\x4b\xae\xd7\xa4\xca\x57\x0c\x83\x7c\x6b\x54\x80\x5c\x47\x28\x06\x12\xab\xd4\x00\xda\xba\x96\x86\xda\xd6\x99\xc8\xc3\x3b\xc7\xb0\x7f\x4f\x70\xb4\x32\x7a\x8e\xaf\x48\xb1\xd0\x38\x75\xe8\xc2\xcc\x3a\x2b\xe2\x74\xb0\x79\x4c\x39\x52\x84\x86\x15\xf0\xc0\x53\xa5\xa4\x65\x40\x66\x9a\xd8\xd8\x76\x9a\xe5\x57\x3e\x3f\x3d\x6a\xad\xac\x88\x93\x86\x27\xa1\x10\x02\x21\x6b\x00\x5a\xdf\xbc\xc4\x34\xc6\xfa\xd2\xec\xc0\x56\x6d\x3c\x58\xde\xa7\x05\xa0\x0f\x0a\xbb\x47\x0a\xbb\x9b\x2c\x83\xdd\xc3\x72\x96\x67\x25\x1c\xd1\x2c\xc3\xe4\xc4\x07\xaf\x70\xcc\x5e\xa4\x33\x45\x00\x90\x7a\x6d\x13\x41\x06\x52\x9e\xe4\x9c\x10\x08\x1d\x7d\x49\x20\x8c\x8b\x1d\x78\xb1\x54\xda\xc6\x00\x51\xef\xc6\xbe\x7d\x2c\x89\x7a\x44\x90\x33\x39\x89\xa2\x79\x3e\xa4\x60\x33\x89\x71\x2c\x0a\x57\x6c\xad\x24\x15\x07\xb3\x0b\x50\x13\x74\xdf\x98\xc6\x1b\x7f\x03\x88\x4c\x86\xd0\xf0\x21\xee\x7f\x9a\x86\x92\xbb\xe9\x7f\xda\xd7\x14\x52\x97\x2f\x96\x82\x4d\xf9\x14\xa0\x46\x2a\x2a\xe6\x5d\xef\x3a\x40\x16\x8a\x93\x62\x82\x90\xd2\x2d\x85\xb9\xb0\x29\x73\x3e\x4c\xf9\x95\xc8\x9f\x26\xbf\xb6\x50\x3f\xe9\x0f\x7d\x53\xfb\x3f\xbf\x0d\xd1\x53\x60\x3f\xf8\x77\x40\x28\x8a\x3d\x0a\xb0\x2d\x70\xe2\x9c\x76\xf2\x80\x4a\xc5\x37\xbf\x41\x84\x94\x5c\x7a\x3c\xba\x18\xca\x13\xc0\x8e\xca\x43\xa1\xeb\xad\x5f\x95\x1d\xe5\x57\x8a\xa0\xbd\x2e\x3e\x18\x71\xce\x3e\xbf\x4c\x8b\x01\x2e\x2b\xe0\x75\x55\xe5\x40\xc3\x2b\xda\xc9\x75\xba\x33\x1d\xc1\x4b\xbb\x44\x2d\xae\x1d\xfa\xe1\x37\xe9\x2a\xb7\x37\xaf\xdf\x61\x0f\x6a\x85\xcc\x6f\xea\x9f\x21\xbf\x0e\x1b\xd3\x4e\x6a\x5f\x6e\xde\x75\x53\xa8\x40\x65\x38\x86\xdd\x82\xab\x38\xd1\x1b\xda\xec\xd1\xbf\x7a\x18\x15\x0c\x74\x8c\x4b\x7b\xb3\xf8\xc5\x0e\xf0\x83\x1a\xea\x59\x92\x1d\xfb\xd9\xf3\x6f\xa5\xf0\x48\xcb\x4e\xfd\xa4\xe8\x12\xe5\x3a\x20\x1f\x2a\xac\x12\xfe\x5f\xf8\x20\x94\x83\x16\x24\x32\x4e\x99\xc8\xac\x62\xef\x27\x07\xfa\x25\x96\xe5\x3c\x3f\xc1\xff\xaf\xfd\xb2\x2b\xa1\xc2\x0f\x43\x8c\x40\x2e\xe5\x90\x32\xea\xd9\x00\x42\x55\x6b\x86\x7d\xb1\x59\x88\xf8\x8a\xd3\x5d\x6d\x30\x9b\xf7\xc8\x13\xcf\x14\xaf\xf9\xe1\x5d\x05\xd6\x34\x40\x96\x25\x7a\xcb\x91\x76\xd2\xbb\x82\x3f\xca\xa4\x5d\x05\x8b\x51\x29\xb2\x9b\x04\x9e\x25\xda\x16\x4e\x2c\xa2\xa2\xbb\x0c\x9c\xd2\x60\x63\x87\x4a\xa6\x64\xe1\xdb\xab\x64\x22\x44\x73\x6c\xac\xd3\xa8\x40\x90\x66\xd5\xd4\x5c\x8d\xce\x59\x34\x6c\x55\x88\xec\x7f\x5b\xd8\x55\x65\x99\x5c\xab\x00\x63\x71\x41\x4d\xe9\x74\xf9\x2a\xcc\x5c\x60\x47\x64\xce\x2d\x5c\xf6\x95\x37\xad\x81\x60\xaa\xd7\x99\xbc\x56\x67\x16\xf0\x96\xc2\xda\xa2\xa5\x55\xec\x2a\x65\x8a\xa1\xb7\x46\x95\x54\xd5\xd7\xc1\x7f\x41\xe8\x0a\xdd\x1b\xf5\x77\x1c\x4a\xed\x28\xa5\xdd\x45\xde\x68\x7d\x81\xe8\x0a\x58\xb3\x1f\x4a\x31\xad\x63\xcc\xcf\x53\xb5\x1f\xe0\x4d\xd9\xc1\xb1\xa4\x2f\x70\xb0\x04\x17\x8d\xa9\xe2\xbe\xfe\x94\x23\x28\x24\x69\xe3\x7f\x3d\xd4\x1d\xd9\x27\x6a\xfd\x5f\x92\xaf\x62\xb7\x43\xfd\x03\x3c\x6b\x10\xc3\x26\xf8\x7a\x08\x1d\xe9\x4f\xfe\x2a\xb7\x8d\x43\x40\x16\x96\xba\x65\xf3\x86\xec\x1d\x94\x81\x1f\x94\xda\xc5\x2e\x27\x2f\x7e\x1a\x57\xd9\xd2\x89\x5e\xff\xeb\x6c\xd2\xb9\xd1\x68\xae\xb2\x29\x64\x69\xb3\xc6\xf1\x0e\xb6\x0f\xad\x8f\x0d\x66\x41\x4a\x1c\xe5\x49\x14\x2c\xdc\x10\xaa\xf5\x51\x55\xb9\xc0\xdb\x25\x4a\xbd\x67\xe7\x78\xed\xf6\xaa\x66\x0c\xbb\xfb\xe8\x2d\x99\x31\xb6\x89\x9e\x63\x12\x52\xf0\xde\x39\x4a\xf7\xea\x05\xb5\xd6\x9e\x6f\xca\xa1\x81\x27\x9e\x08\x2d\xa9\x0b\x7a\x3e\xb4\xb0\x6d\x5e\xad\x57\xe8\xac\x9c\x4a\x72\xe3\x7c\x70\xd5\x50\xe3\x10\xc4\x1c\xd0\xe0\x13\x5a\x1a\x7a\xcf\x15\xa1\xbf\x7c\x95\x01\xa5\x4a\xa0\x73\x6d\x08\x73\x34\xa9\x01\x9d\x88\x2a\x1d\x71\x10\x4e\xa6\xc8\x82\x92\x82\x43\xd8\x49\x53\xd8\x7d\x18\x24\x44\x01\x3e\xa3\xed\xc1\x16\xe8\x04\xf6\x07\xa7\x50\xa3\x22\xcb\x72\x2c\xeb\x94\x36\x38\x09\xb3\x61\x9b\xa3\x9b\xe6\x21\xb5\xc1\x87\x56\xb6\x16\x26\x29\x3a\xd9\xc6\x54\x54\x1f\x26\x57\xb5\x22\x2c\xbb\xba\x1c\x63\x84\xae\x97\xb6\xff\x1a\x8b\x76\x83\xe1\x3c\xb5\x33\x13\x3a\xbc\x87\x16\xc5\x92\xdf\x45\x70\x2f\xbf\xc3\xff\x24\xb7\x47\xae\xc8\xc5\x27\xa3\xe9\xc7\xa7\x1e\x6c\xa4\x0c\x6d\x92\x5f\xd9\x76\x95\x6f\x0f\x87\x09\xbe\xa3\x5d\x59\xff\xb0\x8b\xa1\x35\x6f\xc3\x3d\x41\x12\x1a\xf5\x6a\xb1\xac\xd0\x48\x48\xa6\xee\xe2\x72\x33\x49\xad\x8f\x5d\xbf\x95\xda\xdd\xd0\x23\x71\x47\xab\x37\x9e\x12\x9a\x9e\x64\x68\x97\x33\xf6\x1f\xa1\xde\x5c\xc3\xd3\xa9\xcf\x63\xf6\x2d\x3f\x55\x4e\xc1\x10\x59\xfb\x16\xce\xbd\x31\x58\xf6\xe0\x43\xf4\xe0\xa4\x4a\x97\xcb\xd5\xaa\x3a\xe7\x97\xae\x67\xf3\xe1\x8d\xc9\x7a\x12\x26\x57\x9b\xfa\xb4\x5b\x07\xda\xb9\xd6\xca\x9c\x0e\xe8\xf3\xf6\x3a\xdd\x2a\xff\x1b\x51\xc0\x11\xaf\xe7\x23\x92\xe4\xe0\xba\xa8\x2b\xe7\xf2\x7d\x67\x00\x8f\x3a\x91\x21\x46\x73\xed\x66\x8c\x63\x82\xf5\xbc\x74\x8a\x0f\xe8\x28\xf4\x90\x54\x0a\x3b\xca\xdb\x8d\x70\xc8\x6c\xa9\x70\x89\xa6\xe4\x92\xd2\xc7\x57\x62\x5c\xfb\xfb\x9c\xbc\xf6\x15\x23\xcc\xe0\x5b\xa7\x3f\x03\xc7\x69\x86\xbd\x71\x2f\xc7\x0b\x7d\x1b\x39\xa8\x04\x91\x59\x65\x49\xda\x2a\x32\x56\x89\xe7\x03\xb2\x58\xba\xa0\xf4\x42\x41\xb8\xc7\xc9\x73\xd9\x65\x65\xc1\x79\x0b\x43\x2c\x42\xc2\x4f\xb3\xc0\xa6\x93\x89\x22\x4d\x67\xbf\xbb\x84\xf3\x34\xcb\x3e\xd0\x6c\x19\xf0\xd3\x32\xba\x6f\x96\xe8\x5b\xa6\x2d\xf0\xec\x23\x92\x6b\xe8\xea\x88\x4e\x7c\x51\x9f\xca\x48\x58\x85\x4c\x31\x8a\xa4\x74\xad\x00\x80\x5c\x08\x61\xa7\x09\xad\x54\x3a\xb5\x79\x1e\xd1\xf0\xa8\x0e\xbe\x03\x86\xb2\xbe\xd0\xd0\x75\x7f\x50\x5d\x23\x74\xe8\x7d\xd6\xed\x3d\xae\x74\xf7\xa1\x39\xe1\x7c\x52\xb5\x53\xb5\x5b\x48\xbc\x6c\xd9\x25\x49\x5b\xba\xd0\xe5\xda\x60\x9a\x95\x48\xc2\x6a\x27\xd4\x12\x15\x45\xfa\x77\x86\x8f\x4b\x5b\xd8\x46\x2b\x77\x03\x85\x0d\xf6\x9b\xf4\xfc\x62\x81\x1a\x44\x8e\x2b\x9e\x3e\xaa\xef\x4c\x2a\x1c\x68\x47\x41\xdb\x8b\x11\x39\x3d\x43\x1d\xc0\x61\x77\xc9\x14\x94\xe8\x02\xd9\x02\x61\xad\xdd\x5d\xc0\xe6\xc8\x30\x78\x46\x39\xf8\xb4\x97\x1f\xf1\x28\x6c\xda\x01\x19\x38\xff\x3a\xe5\x83\x8d\xe6\x09\xa6\xc1\xbb\x6b\xe5\x8f\x02\x07\x5d\x13\x69\xf0\xf2\x28\x6c\x66\x95\x50\x98\x02\x10\x0e\xe6\xca\x17\x63\x06\xc8\x02\x2d\x84\x37\xd2\x1c\x37\xfd\x9a\xa4\xbf\x10\xed\x8b\x7b\x70\x45\xcc\x62\x9a\x30\x8f\x01\x4c\x01\x67\xf4\x0e\x65\x2d\xb4\xb2\xe3\xb3\x57\xf2\xea\x6d\xc6\xe7\x90\xad\x19\xbd\x00\xda\x8e\x49\xaa\x54\xe8\x9e\x9b\xe9\x25\x20\x2e\xe1\xf9\xd8\xad\xcf\x13\x90\x14\x49\xe8\xb1\xa2\xde\x92\x39\x73\xa3\xf5\xe4\x53\x53\x83\x47\xa7\xcd\xb8\x48\xcc\x75\x73\xe7\x64\x96\x4b\x03\x40\x83\xed\x93\x49\x79\xeb\xe3\x88\x95\xa2\x3a\xbb\xa1\x7e\xd6\x31\x60\xb3\x1d\xe0\xb9\x9f\x4f\x44\x46\x47\xbc\x25\x20\x7e\x22\xf0\x8a\xfb\x98\xf6\x11\x07\x8b\xd5\x87\xd8\x9a\xd6\x89\x13\x5a\x55\x10\x17\xed\xb9\x95\xa3\x2f\xb3\x26\x44\x68\xc2\xd0\xa6\x62\x34\x64\xec\x20\x92\xee\xcc\x2e\xe4\xb2\x6b\xec\xf5\xa7\xfd\x73\x2b\xef\x5f\x94\x05\x94\x0f\xe9\x71\xb4\x2a\x0f\xb8\x54\x88\xf6\x2f\x02\x31\xc1\x24\x35\x8b\x6f\xd7\xde\x46\x69\x31\x7a\x2b\xc7\x00\xfc\x57\x5b\xf4\x07\x5a\x7d\x31\xfe\x4a\x32\x28\xc4\x8b\x18\x7e\xab\x5f\x7c\x2b\x60\xca\xcb\x67\x33\x9f\x0e\x72\xa5\x1e\xe7\x41\xe0\x42\xe2\xee\xd0\x90\x58\x4d\x63\x5e\x84\x26\xf1\x0b\xec\xdb\x34\xd2\x1a\x7d\x4e\x16\x0e\xe6\xbd\x71\x02\xb6\xbe\x28\x63\x98\xc9\xe7\x22\x4e\x61\x8c\xc4\x27\x81\x53\x75\x35\x18\xc4\x95\x7a\xbb\x9f\xb8\xc7\x4c\x60\x63\x61\x65\xb9\x96\x96\x2a\xaa\xe9\x28\xb7\xcf\xc9\x5e\x8d\xf8\xd7\xbe\xb3\x82\x8c\x43\x37\xbe\x0e\x07\xdb\x26\xa0\xf3\xd2\xa2\x73\x57\x66\x6b\x2c\x80\x90\x89\xcd\x97\x83\xa9\x40\x27\x71\xb9\x37\xc7\xf4\x34\x7c\xb0\xe5\xbc\x86\xe0\xb9\x29\xbd\x3b\xc2\x12\xcf\xa1\x86\x5d\x09\x5a\x5d\xda\x24\x66\x87\x8f\x1d\xa0\xaa\x60\xe0\x32\xb7\xab\x33\xca\x0b\x59\xd1\x12\xf4\x92\x15\xcd\xb1\xe6\x8a\xa5\xdb\x79\xa9\xb0\x2e\x7a\x2c\x99\x75\x57\x76\x46\xfe\x48\x06\x74\x71\xfa\x61\x3a\xa8\x8a\x60\x11\x24\xc2\x0c\x17\x54\x52\x28\x2c\xb1\x2e\x64\x03\x05\xa0\x33\xeb\xd9\xf0\xb3\x62\x64\xa1\x16\xa6\xb9\x23\x51\xfc\xba\x89\x73\x6b\xf9\x0f\xc4\xe8\x48\xe9\xa5\xa5\x7c\x70\x60\x34\x94\x23\x77\x77\xf6\x3f\x65\xb3\x32\x39\x1b\x42\xfb\x92\x11\xc5\xbb\xbc\xcf\x24\x2e\xc2\x92\x6e\x76\xe1\xbc\x3f\x4d\xcb\x15\x0f\x42\xd1\xc8\xb8\x73\x82\x70\xa1\xd3\x4f\x9a\x41\xf3\xa8\x53\x73\x41\x15\x23\x71\xea\x90\x0e\x72\x61\x21\x88\x38\x95\x32\xbb\xbc\xd5\xe8\x7a\x7a\x9f\x8f\x76\x71\x78\x54\x76\x11\x99\x2b\x7d\x42\x28\x58\x94\x78\x6c\x9c\x23\x9d\xc7\x36\xa8\xab\x0b\x88\x41\x90\x05\x34\xd6\xac\xd6\x8d\x28\xf3\xaa\x50\x8e\x15\x1a\xc4\x88\x9e\xec\x7c\xd8\x29\x80\xab\x85\xad\xdd\xf6\xec\xa0\x32\x6e\xce\xa4\xfa\x99\xd7\x27\x78\xad\x8d\xf7\xd8\x3b\x3a\x02\x00\x24\xc9\x3b\x5f\x37\x94\x53\xd6\x5e\x21\x78\x2a\x1e\x60\x62\x71\x09\xd7\x72\xa5\xed\x5b\x03\x1a\xb5\x56\x27\xc3\xad\x26\xfc\xeb\x02\x8c\xde\xb3\x35\x08\x43\xe2\x1c\x78\x70\xf5\x62\x57\x30\x6d\x0a\xfb\x14\x1d\x39\x36\x65\x7e\x0c\x54\xcb\x71\xa5\x0e\xb6\x76\x31\x72\xbe\xc5\xfc\x9d\xb8\xa7\x7f\x23\x71\x4a\x5e\xb6\xe1\x0e\xd3\x87\x30\xa0\x4e\x6c\x59\xbc\xbc\xa8\x23\xe2\x46\x09\x83\x85\x5d\x89\x22\x0a\x4d\x04\x07\x9c\x6d\xc8\x1d\xb4\x9e\x4f\x99\x13\xea\x1e\xe9\x35\xe6\x48\xb6\x51\x1d\xba\x3d\xd8\xc3\x0c\xc9\x87\x57\x16\xed\x5b\xf3\x98\xba\xd3\x4c\xaa\x07\xb4\x07\x1f\x58\xdd\x42\x00\x45\x67\x89\xe2\xd6\x1e\x06\xc9\x8c\xe6\x9f\x1a\x50\x18\xf6\x79\xf7\x12\xc1\xcd\x1a\xdf\x04\xde\xfc\x31\xa8\x3c\xb7\x5f\x1f\x6c\x26\xa9\x20\x2e\xbc\x8f\xcf\x3d\x53\x6e\xde\x1d\xf4\x22\x3d\xc5\x06\x86\xd9\x59\x93\x5c\x1a\x9d\x10\x07\xa5\x51\x29\x0a\xea\x3a\x6d\x63\x30\xf7\xac\x2c\x18\x4a\x8a\xcc\x1e\x07\x1a\xd4\xfa\xf6\x21\x0c\x1a\x6f\x1c\xc9\x52\x3f\x39\x7b\x83\xa0\xad\x7d\x15\xde\xea\x2a\x16\xca\xc6\x99\x32\x16\xff\x29\x0c\x33\x6a\xfc\xd3\x9d\x15\xc7\x98\xce\x0b\xb0\xbd\xae\x60\x5a\x98\xb7\x3c\x79\xb8\x3a\x25\x6e\xd8\x0d\x1b\xd3\x6d\xca\x98\x2a\x2e\x4f\x93\xab\xcd\x04\x26\xc8\x01\xea\x7f\x19\x3a\x4c\xc9\xcf\x9b\x24\x4d\x6c\x16\xc8\x97\xfd\x67\xed\x94\xa8\xd1\x84\x90\x1a\x08\x3a\xdb\x0f\xbd\x4a\x85\x83\x82\x24\xe8\x22\x17\x75\x0e\x90\xcf\x0b\x8b\xbf\xed\xe4\xba\x96\x56\x52\x21\x3b\xfe\xdf\x04\xd6\x07\xe9\x43\x93\x86\xcd\xde\x72\x10\x07\xab\x26\x54\xd7\xb0\x9c\x02\x41\xcd\x49\x89\x63\xbf\xa8\xac\x05\xd8\xe4\xf0\x61\x2d\x51\x49\x58\x48\xe6\xac\x54\x63\x49\xcf\xc4\xea\xe4\x77\xf6\xf2\xad\x31\x58\x70\x8e\x0e\x53\x2c\xf9\x0a\x33\x38\x4e\xea\x11\xda\x60\x00\x10\x1b\xb6\x94\x99\x00\x4a\x8c\xbb\x60\x68\x51\xe0\xc4\xc2\xa6\x21\x0a\x4e\x32\xa5\x79\x2a\xd4\x76\xea\xa0\x7c\x69\x81\x9c\xa5\xa4\xeb\xe9\xce\x55\x29\x12\xce\x62\x70\x56\xfd\xd9\x4f\x87\x93\xe4\x29\x48\x2f\x39\xbd\x1b\xec\x28\xcb\xb7\x7b\x67\x70\x7c\x7c\x98\xd9\x11\xd4\x69\xf2\xa5\x73\x4c\x93\xf3\xec\x54\xbf\xf6\x17\x13\x9e\xe5\xc1\x58\x60\x2d\x93\x36\xc7\x1b\x38\xba\xf9\xd0\x44\x89\x70\x0f\x6f\xfc\xd9\xc6\x6c\x2f\x2b\xb7\xb2\x59\x62\x4c\xce\x34\x63\x6c\xcf\xf9\x1c\x05\x07\x39\x55\x2e\xe0\x55\x02\x18\x97\x4a\x1d\x87\x65\xd3\xd3\x35\xda\xa1\x74\x9b\xbd\x33\xf5\xa7\x54\x96\x8e\x26\x25\xcd\x62\x30\x2e\xcb\x67\x92\xa6\x7a\xc2\xaa\x2d\x4f\xae\xa8\x44\x05\x93\xfb\xed\xfd\x6a\x3e\x2f\x55\x63\x57\x57\xbf\xf9\xe3\x05\x4d\x58\xf7\x03\xd6\x09\x05\x02\x53\x1c\x04\xe4\xc8\x65\xbe\x18\x53\x03\xac\x45\x76\x9a\xa0\xc0\x7a\x26\x27\x36\x1e\xdd\x85\xd1\x74\x72\x14\xcb\x6a\x26\xd8\x67\x5f\x59\xf9\x73\xec\x51\x3a\x7f\xa1\xce\xee\x23\xcf\x54\x98\xc9\x1f\x2a\xa7\x8b\x9d\x2b\x67\x72\xf2\xda\x19\xb9\xd3\x3b\x45\xaa\x76\xdc\xe4\x1f\xce\xf2\xda\x94\xaf\x5a\xd8\x55\xcd\xa1\x05\x6a\x48\x57\x34\xbe\x52\x8b\x38\xce\x6c\x85\x54\x90\xdc\x02\xc0\x87\xf3\xfe\x01\xde\xca\x7e\x5b\x05\x43\x12\x90\x08\x02\x83\xce\x3a\xfe\x78\x11\x34\x74\x54\x3e\xb4\xeb\xc4\x29\xc9\x1e\xf6\x9b\x9e\xfd\x20\x2a\xe8\xdb\x7a\xbd\x5b\x1c\xed\x50\x1f\x4b\xd6\x90\x35\xa9\x30\x1b\x75\x19\x26\x42\xe1\xa1\x4f\x6c\xd8\x38\xd4\x36\x21\xf5\xd0\xfa\x95\x5d\x54\x26\x90\x2a\xdf\x6b\xb7\xf9\x17\xe3\x92\x6d\xaa\x07\xc6\x23\x71\x2a\xce\x3b\xb0\x0c\x75\x73\xac\xa4\xa9\x91\x3f\xc0\x70\x61\x21\xa7\xde\x8e\x10\x7a\xef\x77\x85\x30\xa3\x05\xbf\xac\x62\xa8\x19\x70\x6d\x3f\x65\x3a\x92\xef\xc0\x22\x5e\x08\xf6\xd0\xd5\x37\xc8\x48\x77\x68\x85\x0e\x3b\xa9\x48\xc3\xe0\x5c\xde\x81\xa4\xc4\x75\x97\x2b\xcd\x61\xe1\x24\x55\x9d\x0f\xc9\xb9\x04\xb7\x8e\x6a\xa4\xce\x6b\x3e\xbd\x74\xb7\x51\xec\x78\x0d\xde\x2b\x42\x42\xb7\x7e\x35\x0a\xeb\xe0\x57\xdf\x37\xba\x75\xb4\xeb\x91\x34\x4f\x09\x7f\x87\x92\x5a\x7a\x21\xa5\x55\xe7\xc3\x17\xd5\xb2\xad\x55\x6d\x0d\xeb\x50\x9a\xc0\xfc\xfb\xd9\x71\xfc\xc6\x84\xe8\xe2\xf8\xcd\x8f\x0a\xc6\xc2\x77\xa6\x05\xd0\x07\xf2\x9d\x4e\x4a\xce\x36\x7a\xb7\xb6\x91\x59\xae\x9d\xac\x96\x55\xdc\x16\xf3\x61\x15\x8e\xd3\xa3\xb6\x4a\xf0\x80\x98\x29\x4e\x84\x74\xd7\x6f\xf6\xf9\x2c\xff\xfc\xec\x28\x6c\x69\x73\x89\x0c\x8f\xc4\x69\x45\xd0\x30\xa7\xc3\x65\x79\x5d\xf5\x5e\xa9\xad\x8e\x8a\xb9\x02\xba\xc3\x21\x9a\xe6\x30\xd8\x9c\x54\x59\x07\x9e\x83\xc7\x26\x7c\x48\x9f\x59\x48\xd8\x79\x7b\x90\xfa\xe2\x74\x00\xed\x3e\x91\x33\x6b\x67\xea\xcd\xca\x52\x43\x37\xdb\x8b\x0a\x8e\xfa\x6e\x1b\x25\x4a\xc5\xdc\xe2\x34\x61\x92\x86\x23\xcd\x98\x3a\x8d\x33\x85\x92\xbe\x86\x3e\xf5\xc9\x2f\x6c\x69\x6d\x42\xff\x7f\x9d\x33\xe2\x47\x2c\x5c\x87\xce\x8a\x7c\x0a\xfd\x84\xb1\x17\xa8\xf7\x8f\x0c\x59\xbb\x0e\x03\xe3\x18\xa6\xc7\x73\xca\x3e\x89\x59\xce\xd2\x86\x43\x35\x53\x36\xd2\x4a\x4d\x93\x9d\x1d\x1b\x32\x1c\x9c\x60\x05\xed\x0a\xb9\x3b\xce\xaa\x0a\xe8\xee\xf9\xb1\x61\xb6\x1d\x7e\xa2\xa1\x11\x61\xeb\xcb\x19\xbc\x6f\xc2\x49\x28\x2a\x8b\x47\xcc\xb4\x82\x7b\xa8\x34\xf3\x38\x06\xa6\x85\x0e\x92\xda\x33\x34\x27\x39\xda\xcd\xd7\x3d\x40\x8d\x62\xcb\x14\xf7\x97\x88\xb7\xa3\x74\xe4\xe0\x38\x5b\x8e\x48\x42\x2f\x59\xbc\x14\x9a\xf7\x8b\xd6\x96\xec\x5f\xb3\xaa\x67\xda\xab\xcd\x81\xc3\x27\x27\xa7\xa3\x4e\x94\xc9\x9a\xca\x1c\x54\x55\xdb\xd0\x91\x21\xa9\xaf\x43\x00\xf5\x24\x5a\xaa\x5b\x43\xc2\xdc\xf8\x93\x17\x37\x64\x58\xac\x5f\x57\xf5\xe1\xd4\xf8\xe8\x82\xa8\x6f\x2f\xcd\xbf\x82\x0f\xf7\x96\x8b\x60\xc5\x10\x33\x4c\x6f\xd8\x25\x67\xb8\x89\x6d\x76\x85\xb6\x4e\x8d\x75\x2d\xd4\x3c\x39\x61\xfb\x9e\x3f\x76\x5e\xb4\x4f\x03\xd1\xb3\x96\x33\xed\xc7\x13\x3a\x85\x08\xa5\x81\x4a\x60\x02\xe3\x66\xaa\x1a\xce\x4f\x74\xf6\xdd\x62\x1a\x5b\xf8\x5a\x4f\xfd\x8d\xaf\x86\x72\xbc\x76\x4c\xe0\x95\x2a\x44\x69\x6b\x08\x13\xcc\xd8\xe2\x72\x04\x56\x2d\x7d\xd8\x18\x68\xd8\xb8\xa3\x7c\xce\xc4\xa1\x2a\x8a\x40\xdc\x5e\xda\x1e\x0c\x04\x39\x72\x7f\xd4\x75\x31\x1c\x31\x74\x97\xdd\xab\xb5\x3f\x71\xce\x7c\xbc\x56\x66\x24\x27\xf6\xc9\x7b\xf0\x51\x69\xdb\x03\xba\x17\x20\x40\x8e\xa3\xb8\x21\xa3\xf8\x09\xca\xfe\xb0\xe3\x37\xe2\x63\x85\x41\x60\x99\x82\xc7\x16\x6b\x92\x13\x58\x68\x7c\xd8\xd8\x5d\x7c\xc4\x89\x31\xc2\xec\xbf\xe6\xf7\xd1\x12\x9b\x38\x6a\xc6\xa8\x4d\x88\x2c\x17\xce\x0c\xc0\x61\x85\xe8\x2f\x96\x1c\x9f\x38\x44\x71\x95\x57\x46\x51\x60\x53\x76\x65\x83\xc9\x4c\x32\xd5\x64\x1c\x33\x78\x47\xf6\x48\x44\x65\x59\x9c\x5e\x40\xb3\xab\xae\x9c\x03\x92\xfc\x64\xab\x2b\xb2\x1f\x6b\xd8\x32\x8b\x13\x66\xf6\x32\x6a\xb2\xe6\x3d\x8e\xfa\x8f\x24\x1b\x9d\x24\x85\x72\x44\xd4\x46\x4a\x7f\x78\x37\xd2\x8a\x8a\x2b\x91\x35\x8d\xb6\x59\xf0\x97\xe0\xce\x85\xa8\x6c\xc8\xc8\x77\x12\xf5\x56\xfc\x94\xd1\xae\x59\xa1\xf0\x64\xbd\x3c\xfa\xdc\x7a\x1d\x4f\x01\x0c\xd7\x73\xb4\x8d\x72\xa9\x61\x54\x9f\xeb\xb7\x41\xa9\x3a\xb1\xeb\xbf\x0e\x0b\xa8\xbd\x48\x3c\x72\xfa\xca\x70\xa6\x92\xe4\x1b\xd1\xc7\x39\xa2\xe5\xcb\xde\x38\xbf\xf3\x7c\xb4\x8d\x15\xc8\xcd\xa1\x41\x0c\xf0\xc1\x0a\x6a\xd6\x81\x32\xab\x0d\xf9\xa3\x4a\x40\x63\x63\x92\x55\x6a\x99\xed\x49\x3e\x04\x24\xb8\xdf\x94\x5c\x5f\xd8\xee\x9c\xcb\x18\x0b\x29\x67\x99\x3d\xd8\x8d\x89\x32\xfc\x26\x68\xe2\x7a\xfc\x69\xb6\xf7\x74\x78\x43\xf2\xf7\x9b\x4a\xb3\x01\x9f\x06\xdd\x97\x54\xf6\x30\x5c\x11\x3d\x21\xac\xab\x6f\xcc\xaf\x56\x1d\x42\x9b\x51\xc2\xc7\x0c\x1a\xf9\xe0\xe6\xa1\x5f\x1a\x8d\xcc\x21\x53\x68\x02\x3d\x15\x94\x5e\x33\x41\x5a\x27\x3e\xd6\x81\x3a\x13\x1d\xa2\x85\x17\x8f\x30\x51\xd8\x71\x76\x62\xa0\x61\xa8\x96\xd5\xb1\x01\x11\x75\x62\xad\xc2\xbd\x87\x34\xea\x54\xd0\x2f\xa5\x8a\x03\x48\x01\xb9\x22\x73\x18\x6d\x55\xa4\x85\x4e\x9f\x2f\x8f\xb5\x33\x06\x9e\x38\xa9\x47\xa9\x1e\x34\x89\xe1\x76\xf4\xc1\xda\xde\x0f\x57\x01\x0d\x8c\xb5\x43\x6e\x06\x94\x4a\x12\x20\x3e\x8b\xd2\xc8\x64\xbe\xbd\x0a\x35\x6a\x94\x50\x14\x59\x2a\xb6\xbb\x7e\x0f\xe9\x88\xf6\x8e\x43\x6a\x4d\x05\x8f\xc8\xeb\x51\xb4\xc7\x35\x9c\x25\x0b\x30\x13\x50\xe3\x81\x73\xf8\x3f\x84\x44\x29\x7b\xf5\x82\x55\xf4\xda\xd5\x85\x5b\x0a\x08\xf1\xc5\x32\x52\x79\x3e\xd9\x46\xa7\xc7\x23\xb9\x87\x55\x55\x00\xce\x74\xde\x49\xbe\xfa\x10\xe3\xee\x1c\x78\x43\xb3\x6a\x1a\x0d\x85\x26\x34\xd4\xa1\x0d\x20\x7f\x07\x7e\xfb\x9f\xab\x5f\xd4\x3f\x67\x98\xf5\x2d\x67\x3d\xca\xf9\xb0\x00\x4a\xcf\x61\x60\x5b\x87\xdc\x0e\x6f\xdd\xe6\x50\x08\xad\xb4\x40\xad\xba\x8f\x3a\x25\x1b\x57\xed\xbb\x35\x70\x1c\xe5\xea\x2d\xfd\x2a\x01\x83\x9e\x6e\x13\x71\x04\x87\xc4\x87\x9f\x12\xd4\xf2\x87\x1a\xa3\xd6\x0c\xd5\xce\x43\x48\x4c\x03\x3a\xd0\x31\x7d\x5c\x8f\xd5\x0a\x4d\x3c\x46\x52\xaa\x8c\xb6\x79\xad\xf5\x31\xa4\x53\x3d\xa4\xc4\xb6\x86\xba\xff\xa1\xf3\x0d\x0d\x38\xf3\xe4\x77\x18\xe2\xb3\xb9\x73\x16\x05\xfd\xbf\x43\xee\x92\x87\x74\x92\x91\xf5\x88\x22\x13\xc9\x24\x49\x58\x07\xb2\x4f\x00\xbe\x1d\xe6\xd6\xa6\x6a\xce\x47\x7b\x59\xb8\x6d\xca\x97\x01\x67\xf8\xd6\x1f\x44\x6e\xc1\x9b\xfd\x60\xea\xc9\x2e\xfd\xc6\xe5\x12\xcb\x0b\x7c\x5b\xb6\x11\xdc\xde\x16\xc6\x7c\xc4\x32\xa4\xa7\x1a\x9e\x84\x58\xc6\x59\x14\xdb\xb2\xfc\xd3\x74\x6b\x1e\x46\x43\xa9\x30\x71\xbb\xd3\x51\x16\x4e\xd9\x10\x7f\x4c\x60\xf6\x88\x3f\x9c\xe9\x3c\x1f\xfd\xdb\x78\x8e\xea\x91\xe6\xa2\x0e\x0c\x1c\x6c\x23\x3e\xa0\x50\xca\x48\x50\xeb\x68\x9c\xb7\xc4\x9c\x43\x72\xbb\x6b\x10\x25\x20\x01\xe2\xa3\xdb\x0c\xfc\xc6\x5f\x96\x2f\x47\xc7\xd5\x14\xd8\x92\x5e\xc9\x06\x75\x2e\x84\x72\x60\xbe\xd3\x74\x9b\x6b\x10\xe7\x81\x74\xe1\x10\xba\xca\xbd\x99\x3d\x62\x5b\x1c\x93\xb6\x06\xce\xd3\x59\x6d\x1a\x36\x07\xbe\xa8\x88\x03\xe8\x86\x09\x75\x6a\xa5\xa2\x6d\x78\xfc\xa8\xf5\x0e\x54\x0b\xac\x7b\xce\xc0\x2d\x37\x79\x4b\x94\x5f\x65\x21\xbe\xfb\xf0\xec\x59\x65\x9d\x0c\x36\x57\xa9\xf3\xe1\xcc\xc5\xe3\x26\xbe\xa2\x9a\xa9\xaf\xf9\xf3\x49\x18\x28\xa4\xfc\x1d\xae\xa7\x33\x31\x50\x87\x09\xd0\x53\x55\x58\x6c\xe4\xcf\x5a\xcb\x27\x4b\x95\x7a\xdd\xf5\x0b\x7c\xfd\xb2\x5c\x07\x40\x67\x82\xab\x1c\x95\xb8\x60\x1a\x44\x0f\x74\x4b\xe9\x0f\xfc\x6a\x03\xa9\x99\xcd\x77\xaf\x3e\x75\x1b\x1d\xe1\x8b\x8b\xc5\x84\xd5\x6f\x89\xca\x2f\xdd\xe8\xfc\x56\x83\xa8\x42\x63\x2c\xfa\x79\x0f\xb1\x0b\x89\xc8\x6f\x40\xb7\xab\x65\x88\x8d\x68\x71\x12\xb1\xe6\xba\x92\x39\x4e\x08\xb5\x46\xcc\x6d\x79\x98\x01\x75\xdf\x76\xa1\xfc\x31\xd4\x2e\x2f\x12\x01\x2f\xa2\x5b\x9a\xf8\x55\xb3\xef\x27\x47\x28\x1f\xca\x39\x02\xee\x1a\x31\x8a\xfb\xf0\x41\x0c\x7a\x54\x69\xfe\x56\xae\x7f\x76\xc2\x5f\x1e\xfd\xea\xec\xd0\x16\xc7\xb0\x23\xda\x9c\x5c\x56\x5c\xcb\x26\xf6\x4b\x9a\x0e\x5b\x65\x20\xb9\xa8\xd3\xae\xcf\x65\x69\x70\xaf\x20\x3d\x6a\x76\xf3\xff\xb9\x7a\xb3\x05\xd5\x75\xdf\x69\xf4\x5d\xb8\xfe\x5e\xca\x49\x4c\xe2\xce\xe0\xfc\x32\xc0\x86\xa7\x3f\x2a\x55\xc9\xac\xff\xb9\xd8\x1b\x77\xaf\x9e\x80\xc4\x96\x4a\x35\xa8\x25\x99\x9a\xd7\xd6\xf4\xc3\xcd\xf0\xa8\x24\xd9\xe9\x27\x78\x1b\xdd\x25\x9b\xc7\xb3\xae\xc9\x9a\x27\x99\x0f\x3e\x7f\x69\xa9\x6e\x5e\xfa\x70\x46\x00\xeb\xf4\xe1\xa0\x13\xc8\x50\x23\x86\x27\x13\x80\x5d\x12\x1b\x24\xbe\xee\x03\xb5\xd7\x3d\x88\xcd\x62\x03\x08\x89\xdd\x04\xe8\xf7\x61\x4b\xea\x61\x66\x72\x95\x76\xca\x6b\x43\x68\x6e\x97\x67\xd1\xa1\xab\x03\xa3\x43\x69\x5a\x0b\x27\x2b\x56\xa3\x7e\x7c\x88\x82\x1b\x99\x71\xaf\xb3\x9c\xd1\x9c\x8b\x28\x1c\x15\x83\x06\xdf\x57\x21\xcd\xa2\xb4\xcc\xbf\x2a\xb9\x1a\xe5\xeb\xd8\xb2\x57\xd9\x9f\xb0\x09\x7b\xd7\xe5\xa9\xa1\x61\x95\xdc\xef\x55\x5b\xe6\xf5\x41\x4f\x0b\x7b\x1f\xfe\x0b\x05\xe5\x41\x5f\x15\x4f\xca\xe4\xc6\xb9\x97\x9f\xc4\x17\xac\x04\x72\x8f\xec\xfc\x1e\x3d\xec\xce\xd5\x2a\x2d\x6b\x38\xb9\x35\x97\x75\x1d\xad\x6a\xe5\x71\x0b\x09\x84\xcc\x13\x4b\xdf\x83\xab\xec\xa4\xce\x7c\x04\xd7\x60\x64\x91\x0f\xee\xf0\x75\x7b\xc3\x5e\x9b\x64\xac\x0e\xb0\x95\xf6\x8b\xd7\x5f\x24\x9c\xd8\x41\xc5\xb6\x4d\x58\x17\xea\x51\xa3\xeb\xb1\xbf\xa8\x57\x5c\x53\x5a\x6e\x47\x11\x30\x9c\x62\xd2\xba\x55\x5a\x8d\xa4\x92\xce\x76\xbc\xcd\x6f\xeb\xf0\x7f\x1f\xd9\xe5\x47\x84\x96\x15\x11\x72\x63\xc3\xe0\xfe\x13\xfb\xe9\xab\xa9\x2b\xe2\x68\x99\x12\xcf\x4d\xdb\x56\x9b\xed\x71\xf6\x8b\xce\x37\x56\xb5\x49\x8e\xc8\x78\xb5\xf8\xb4\x83\x36\xa4\xb2\x4f\xee\x66\x31\xf5\xc8\xc0\x99\x49\x5c\xda\x26\xfa\x82\xd0\x37\x00\x0a\xce\x5f\x22\x5b\xf8\xe0\x77\xe1\xd1\xe1\x6e\x2c\x24\x6d\xea\x2c\x84\xfd\x8a\xb6\x23\x8e\x97\xed\xef\x16\xcb\xf8\x62\xe9\x74\xb5\xfc\xee\xf3\x2d\xc7\xf6\xf3\x65\x1d\xdf\xf0\xcb\x59\xd2\x8c\x09\xfc\x74\xa5\x36\x34\xed\xe3\xd9\x6c\xcf\x4f\x72\x34\x74\xb6\xb4\x00\xb5\x33\x48\x76\x3e\xc1\x54\xf2\xda\x5a\x20\x06\xf0\x2b\xe3\xa3\x22\xca\xae\x15\x62\x56\x47\xab\xd3\xad\xc1\x73\xea\xc0\x7e\x2f\x99\x2a\x27\x70\x8c\xf8\xf3\x37\x5d\x44\x80\x88\xc3\x67\xbe\x0c\x43\x0a\xdc\x6e\x71\xac\xd5\xbe\x8f\xfb\xcf\xc5\x7a\x6f\xbe\xff\xd2\x9b\xc7\x27\xf4\xdc\x7c\xaf\xc1\xc3\xf7\x31\xaf\x9d\x38\x8b\xfa\xed\x09\x51\xe6\x82\xdc\x32\x1d\xb5\xa6\xbc\x47\x9e\xcf\xba\xc5\x62\x0d\xe2\x5a\x33\xb7\x60\xa2\xb9\x37\x68\xa3\x77\x24\xdc\x8a\x10\x87\x54\x42\x98\xca\xe4\x78\x7f\xdf\xfd\xe5\xf5\xad\x2a\xb0\x9b\xb4\xcc\xfc\xf1\x56\x6d\xa8\x51\x1b\x6e\x9e\x79\x3e\x7e\xd6\x41\x9a\xc2\x5a\x11\x4f\x44\xad\xf2\x94\x86\x5f\xa4\x2b\x8c\xf7\x5a\x2f\x40\x34\x99\x2a\x6e\x28\xa8\xe8\x81\x76\x04\x27\xb3\xab\xef\x58\x2c\x83\x34\x38\x3a\x52\x6d\x2f\x57\x2e\xa9\x2d\x8b\x7c\x18\xbe\x31\x59\xee\x9a\xe8\xd2\xd3\x07\xa2\xb2\xa3\x49\x15\xcd\x7c\xf8\xfc\x61\x93\xcb\x0b\x02\xfa\x0d\x9e\x25\x57\x64\x67\x5b\x3b\x55\x3b\xd6\x2e\x9e\x3f\xf0\x1b\x6a\x8e\xc1\x7a\x51\x6f\x87\xfd\xc8\x8f\xc7\x3d\xf7\x24\x33\x9f\xb5\xc5\x41\xae\x59\x58\x8a\x03\x81\x7e\xa5\xf6\xb5\xb9\xe8\x25\xc2\xdb\x87\x33\xaf\xdc\xb4\xac\x8d\x8d\x4e\x5e\x7f\x4b\x98\xa4\x54\x7e\x62\x94\xdf\xab\x58\x3f\xf0\x25\x7b\x38\x94\xe3\x9b\x56\xb4\x58\x8c\x6d\x88\x21\x53\xd8\xbd\xee\x59\x50\x33\x8d\x55\x1e\x9c\x3b\x11\x34\xc4\x36\xd7\x93\xc5\x6b\x4d\xdf\x6f\x32\xaa\x91\xc7\x40\x61\xd8\x1a\xd5\xcf\x8a\x19\x06\xbb\x17\xf7\x05\xa1\xc8\x3c\x2f\xa3\x46\x52\xd0\x67\xd3\xc7\x0f\x54\x2c\x81\x02\xab\xac\x79\x96\x0f\x4f\x29\x1c\xd7\x38\x61\xfd\xa6\xf8\x74\x61\x9c\x9c\x5f\x52\x16\x8d\xcd\xa4\xfa\x99\x22\x7d\xd3\x2e\x9b\x90\xf7\xcb\x76\x6d\xf6\xb3\x71\x06\x67\xf5\x9c\xf1\xe4\xff\x52\xe0\x7a\xb0\xc2\xfc\x39\x60\xfa\xdd\xf4\x16\x06\x33\x31\xe7\xd5\xbf\xaa\x08\x5f\x03\x69\x5e\xcd\x77\x91\x41\x32\xa6\x5c\x34\x91\x18\xb1\x3d\xd0\xf1\xd2\xba\xcc\xb8\x31\xc2\xc5\xed\x19\x3c\x9f\x67\x8a\x30\x5c\x94\x42\x1a\xcd\x87\x64\x26\x93\x60\x34\x88\x1a\xd8\xc6\x31\x40\x48\x02\x26\xc1\x9c\x97\x69\x11\x0c\x0c\xeb\xad\xf2\xde\xa6\xc6\x0c\xf3\x31\x4c\xb3\xa4\xaf\xfe\x7a\x91\x24\xb0\xd4\x0f\x19\x01\xa9\x00\xd6\xe3\xc8\xff\x5e\xfe\x89\xcf\x91\x19\x9d\x1d\x41\x8a\x2a\xe8\x23\x4f\xdf\x09\x00\x72\xe3\xc4\xed\x1c\xa3\x39\xc5\x6f\xd6\x61\x93\x6a\x5f\xdb\x1a\x16\xbd\xdf\x7e\x8b\xec\xec\xa7\xc8\xa4\xb7\xfa\x74\xaa\x92\xc8\xc9\x68\xe0\x1d\x29\x3a\x90\x9a\x57\x9e\x04\x52\x71\xa0\x96\x50\xe4\xe5\x75\xb4\xb1\x5e\x26\xd1\xfd\xb2\x56\x29\x45\x3e\xdf\xf9\x19\xe2\x44\x08\x57\x08\x87\xc2\x25\xad\x5a\xaf\xc2\xad\x1f\x1b\x03\xff\x15\x4b\xd1\xdc\x62\xfd\x60\xff\x2f\x01\x48\x96\x37\xb9\x7b\x53\xd2\xa6\xf5\xd4\x20\xe4\xa0\x17\xd2\x71\x2b\x66\xba\xec\x3b\x41\x02\x69\xd8\x6c\xe1\x35\xed\x01\x2e\x5d\x74\x53\xff\xbb\xa9\xff\x03\xe5\x88\x88\x15\x47\x25\x5b\x24\x90\x6d\x1b\x37\x1e\xe4\x73\xbd\x5b\x83\xcf\xb7\x16\x3a\x82\x81\x7b\x3e\x66\x43\xa1\x1a\xb8\x83\xb3\x76\x29\xac\xdd\xae\xbe\x4d\x63\x89\xf9\x90\x06\x44\xdb\xdd\x9c\x78\x31\xfe\xe5\x3d\x62\x11\xc8\x5a\xb5\x22\x42\x8f\xfc\x58\x41\x39\x1c\x28\xea\x66\x74\x30\x24\x1c\x32\xbf\x55\x59\x89\x73\x18\xc1\xae\x30\xbc\x7d\xb8\xe9\xb6\x2e\x6a\x6b\xd1\xd4\x9f\x8f\xcb\x1d\xb0\x14\x3d\x3b\x1d\x9f\x6a\x81\x53\xa3\xbd\x54\x7e\x47\x3f\x23\xa6\xef\x29\xe0\x30\x7f\xe8\xb7\x71\xb6\xee\x30\xa7\xc8\x5b\xb7\x7b\xac\x84\x2b\xfa\x02\xc9\xbe\x84\x61\x88\xff\x0a\x68\xf5\x12\xfa\xd5\xb7\x88\x7d\xbb\xb6\x9b\xe3\x66\xf8\x23\x20\x5d\xcb\xcb\x51\x14\x2a\x5b\x14\x2a\x57\x0e\x4b\x04\xe9\x0b\x61\x90\xe6\xbf\x5a\xe0\x27\xb3\xfa\xed\x56\xeb\x63\x2b\x7d\x37\xfb\x4d\x51\xa2\x70\x61\xc7\x5f\x6e\x4d\xd6\xcf\x9b\x53\xaa\x0a\xa2\x4e\x2e\x1a\x04\x81\x83\xf2\xc1\xa4\xcd\xe9\x8e\x3e\xf8\xba\x35\xf3\xbd\x1a\xd2\x7f\x95\x65\xd4\xc8\xe7\x8a\x67\x64\xd7\xb0\x48\x96\x84\x64\x4e\xca\x49\x44\xa1\xa1\x07\x5a\xb4\x3f\x07\x19\x24\x47\xe5\xa8\xbb\x93\x67\x1b\xae\x56\x0d\xbd\x77\x58\xc3\x3d\x5c\xa5\x2d\x7d\x13\xfb\xbe\x1a\xec\x2d\x09\xf9\xb6\x4f\x7c\x62\xb3\xd2\x1c\xb9\xf1\x58\xa6\x36\xf6\x05\xbe\x80\x07\xb7\x31\xa4\x5b\x31\xc8\x33\x7a\xd4\xef\x5d\x7f\xcc\x99\xd2\x34\x8b\x7d\x63\x70\x06\x25\x17\x15\x33\x2d\x74\xd6\x34\xff\x0c\x09\xff\x72\x5b\xb6\x69\xd2\x72\xcb\x1a\x53\x30\xb0\x95\xad\xcd\xa3\x01\xcd\x9e\x7f\x4d\x92\x5b\x69\x14\x49\x2e\x51\xe5\xb9\x80\x3b\x51\x86\xb0\x9e\x00\x1c\xe8\x03\x70\x0e\xff\x0a\x79\xfb\xc0\x48\x40\x1c\xef\xa5\x58\xd5\xa2\xf3\x01\xb4\x06\xc9\xe3\xc4\x27\x47\x39\xbd\x0b\xa7\xf8\xb9\x0a\xd8\xee\x46\x24\x0d\x05\x96\x9f\x1d\xc0\x5c\x45\xf3\xa1\xc1\xcf\x15\x57\x3d\x2e\x6e\x04\x8f\xfa\xd1\x50\xd5\x80\xda\x13\xfe\x1d\x0c\x32\x30\xb0\x3f\xfc\x94\x07\x39\x06\x11\x11\x37\x0c\x2f\xcf\xed\x2b\x9e\xd8\xa2\x01\xb1\x9d\x0a\xed\x75\xe9\x3d\x68\xde\x57\x53\x49\xba\x5d\x80\x5e\x2a\x2e\xce\x7a\xc5\xdd\x7a\x6a\xf6\x8a\x2c\xaa\x8e\x3a\x27\x85\xd8\x23\x5a\x4a\x07\x04\xb7\x7c\x64\xca\xa9\x67\x2c\x2d\x3f\xc6\xd3\xf2\x89\xd8\xca\xdc\xc3\x4a\x1c\xa5\xf7\x47\x38\xd2\x37\xfd\xa5\xfe\x21\x09\x42\x15\x42\x21\xd9\x64\xcb\x23\xb0\x97\x2d\x0a\x31\xcc\xdc\xbd\x15\x0c\xc8\xe9\x72\xb7\x66\x3a\x13\xdc\xc1\x41\x8e\xf9\x2f\xaa\x30\x1a\xe9\xd7\xff\x63\x58\x8c\x39\x21\x8b\x7f\xc4\x43\x7b\x6d\x56\xc6\xe8\x22\x43\xb4\xe0\xe7\xac\x08\xea\x6d\xbe\xb6\xd2\xbc\xb4\xb1\x47\x4f\xf0\x7e\x1e\x4d\x44\x9f\x44\xf0\x21\xef\x0f\x16\x37\x85\x1e\x26\x4e\x81\x80\x5f\x57\xe1\x94\x65\x6f\x11\x10\xdb\x27\xac\xa4\x1c\xfa\x70\x2a\xc6\x86\xee\x8d\xfc\x00\x2b\xb7\xaa\x14\x2f\x56\xee\x84\x73\x2f\xd5\x0d\xd4\xf5\x76\x61\xd1\x0d\x0c\x18\x90\xa2\xdf\x0b\x78\xc3\x97\x90\xca\x92\xec\xf1\x64\xb4\x24\x7c\xc8\xe5\xbe\x76\x11\x6a\xb0\xea\xea\x16\x51\x28\xf5\xfc\xd4\x2c\xd4\xc8\xdb\x0f\x62\x7c\x24\xf8\xcc\x29\xc8\x93\x78\x0f\x1f\x60\x48\xab\x0d\x81\x78\x9f\xa6\xe3\xb2\x0c\xb4\xb2\x4a\x06\x94\x42\x42\x56\xa2\x73\x56\x71\x17\x11\x59\x2f\xfb\xbd\x9c\x03\xcb\x93\x02\x6c\x14\x41\xeb\xc3\x1d\xde\x7d\x43\xa5\x66\xaa\x8c\x05\xbe\x00\x9b\x7b\x5c\xe5\x68\xab\x06\x14\x7f\xf4\x1e\x3b\x8a\xbc\x7f\x8f\xdd\x25\x26\xb6\xb0\xa3\x9e\x95\x93\x2c\xcd\x40\xd1\xde\x74\xfc\xf7\x22\x40\xb8\xdb\x5e\xc0\xc7\x41\xe8\xf5\x45\xa3\x0c\x1c\x63\x50\xa1\x3a\xbb\x62\xf8\xb9\x1f\x63\xa1\x0b\x6b\xfc\xce\x67\x1d\x74\x7a\x05\x46\xe2\x67\x4e\xd2\x88\xef\x37\xfe\x4a\x11\xb3\xf0\xbd\x55\x0d\xbe\x9b\xfe\xc3\xb9\x4d\x67\x9c\x0f\x11\xc4\x17\x21\x4b\xf6\xc3\x5d\x15\xe6\xf7\xc2\xbd\xee\xba\xdc\x05\x8b\xe2\x58\x20\xc0\x5c\xe2\x82\x5d\x1a\x8b\xdc\xf1\x0f\x6f\x3a\xec\xee\x20\xea\x71\x51\x87\x73\xaa\x15\x38\x42\x95\x0f\x76\x34\x09\xd0\xa2\x52\x85\xcc\xbc\x92\x62\x88\x58\x60\x4d\x80\x7d\xeb\x70\x23\xe9\x5e\x83\x60\x4d\x54\x56\x98\xf6\x08\x64\xde\xe2\xc2\x4d\xbe\x0d\xb2\xf3\xff\x2d\x7c\x23\xcf\x9c\x54\xec\x52\x2f\x07\x03\x63\xc6\x64\xc7\xc7\xbc\x39\x6d\xe1\x2d\xd4\x86\x9a\x2e\x5d\xce\x82\x8d\xdf\x01\x24\xcb\x16\xa4\x53\x13\x1c\x68\xcf\xf8\xdb\x2b\xc7\x5b\x36\x30\x76\x35\x3f\x5c\xcf\xcc\xc9\x19\xd8\x97\xa4\xb1\xe1\x30\x6f\x38\xf1\x27\xfc\xbf\x97\xaa\x8b\x34\x54\x95\xde\x47\x0f\xcd\x81\xa8\xbf\xb5\x15\xd7\xa8\x4b\xec\x8e\x6e\x04\x7a\xf8\x3c\x96\x2b\xb6\xd7\xd2\xa6\x63\xea\xe9\xbb\x5b\x9b\x69\xb6\xfe\x35\x29\x64\xe4\xc6\x50\x89\x99\x75\x50\x5c\xaa\x62\x21\x1e\x8d\x47\x22\x6f\xb6\xb5\x7d\x71\x45\xbb\xe9\x6a\x96\x86\x63\xa0\x04\x14\x8d\x2f\xef\xc0\xe4\x5d\xf3\xe5\x1a\xc1\x77\xa8\xca\xde\xda\x14\xad\x8d\x71\x3a\x87\x73\xa7\x84\xca\xb9\x43\x8c\x9e\xc9\x25\x12\xf3\xf5\x8f\x70\xfe\x74\xb3\x76\x5f\xa4\xf0\xac\x80\x8b\x99\xae\xbe\x7a\xcf\xcc\x60\x03\x3f\x91\x7d\xaf\x06\x64\xf0\x12\x12\x47\x3c\x38\xdd\x27\x70\x44\x0e\x54\xf0\xea\x49\x4f\xe2\xc8\x1c\xdb\xaf\x23\x7b\x81\xbb\x6b\x98\x82\x9c\x37\x46\xb5\xfd\x93\x9a\x6c\xc5\x88\x04\x0c\xd0\x8c\x75\xec\xa8\xed\xfb\xbf\xff\x0a\x78\x4f\x2d\x25\xb9\xb7\x8a\x92\x3a\x68\x90\xa9\x08\xe8\xac\xa0\x08\x56\x7d\x72\x88\x09\x5b\xbf\x51\xeb\x03\xbd\x67\x11\xfb\xd4\xae\x48\x32\x07\x96\xe6\x6c\x0b\x63\x6e\x31\xf8\xdc\xfe\xd9\xaf\xe2\xc0\x2b\xe7\x72\xf0\x52\xf3\x4c\x25\xd2\x17\xd4\xc1\xfc\xd1\xaf\xc9\xce\x43\x31\x20\x6d\x67\x55\x88\x43\x73\x0a\x4c\xa2\xf8\xda\x5f\x1f\x23\xe2\x3d\x90\xe8\xdc\x54\x8a\x87\x22\xce\x80\xef\x04\xbc\x93\x35\x25\xe1\x56\xf4\x94\xf6\xea\x59\x82\x81\xfb\x94\x92\xfd\x9f\xe1\x31\xae\x78\x3d\x13\x4c\x4a\x24\x62\xcb\xd2\x81\x0e\x08\x36\xe3\x5d\x40\xc8\x62\x28\xe7\x7a\x47\x30\xb2\x4f\x53\x7d\x09\x3b\x02\x96\x22\x83\xb5\xe5\x61\x5d\xb6\x3d\x5b\x0f\x4b\x72\xcf\xa2\x43\xa3\xe7\xec\xaf\x87\x2a\x5a\x2d\xaa\x24\xe8\x7d\xe4\x8a\xf5\xe9\x7d\xc9\xfe\x23\x29\xe7\xc2\xfd\x27\x49\x9e\x58\x6e\xc6\x9f\x3a\xa0\xad\xa0\x98\xe6\x27\xc0\xb0\x6e\xec\x18\x4a\xea\xba\x69\x1b\xb1\x4a\x07\xb9\x3c\xed\x14\x4f\x1a\x4d\xbe\x43\xc0\x85\x4b\x54\x43\x44\x0c\x55\x04\x13\xb9\xcc\xa5\x0b\x31\xec\x41\xec\xd2\x4e\x6b\x3b\xe2\x1d\x9f\xa1\xa7\x8d\x6d\x08\x7a\x0b\xae\x8f\xd7\x4c\xd7\x3b\xb2\x7b\x55\x98\xd3\x26\xc4\x17\x9b\x72\xe8\x81\x6a\x86\xc8\xc6\x37\xa1\x8e\xcd\x24\x7c\x41\x55\x4a\x9d\xe9\x4c\xba\x6f\xf6\xbd\xcd\xd5\x4f\xf7\x81\x9d\xa3\x3d\x9d\x78\x1d\xb0\x53\xc5\x77\xd9\x21\xf6\xc7\xb7\x60\xf7\xf7\x6c\xb7\xd7\xb6\xd1\xe5\x6d\xcf\xd3\xb8\x66\x0a\xbf\x28\x4f\xe1\x63\x19\x0e\xa2\x79\x8e\xb0\x1b\xb5\x49\xf5\x3f\x4c\x1c\xfd\x38\x84\x23\x04\xef\xc7\x7a\x34\xf6\x32\xef\xbe\x83\x36\xb1\xf0\x92\x3c\x64\x81\xdd\x9c\x6a\xfa\x59\xaf\x3d\x49\x69\x4a\x2e\x95\xc8\x06\x3d\x8e\x46\x92\x42\x8b\x97\x9b\x9b\xfd\x22\xe2\x28\xec\x2e\x54\xe7\x37\xd3\x48\x34\x87\xac\x65\x3a\x3f\x03\x1c\x4e\x85\x18\x27\x1c\xe1\xc9\xc8\xc0\x79\xd2\xa5\xfd\xc2\x6d\x8b\x9b\xed\x6b\x45\x50\x61\xe0\x27\x82\x96\xea\x29\xf1\xc6\x84\xd1\xac\x22\xba\x30\xa4\x54\xc7\x6b\xc7\x8a\x4e\x95\xa2\x3d\x0d\x18\x98\x9f\x1d\x20\xb7\xd0\x33\xe1\x54\x4d\x83\x3e\x9d\xa5\x3c\xad\x51\xd1\xb8\xfa\xc7\x0e\x1b\x06\x2a\x03\xce\x9b\x6a\x41\x2b\xeb\x27\x7f\xb5\x78\x99\x2b\x09\xb1\x77\x80\xdb\xf9\x16\x56\x57\x46\x27\x0a\xd5\x41\x34\xc1\x77\xe8\x7b\xee\x16\x44\x11\xd6\x7e\x38\x72\xc8\x47\xd2\x6b\xd9\x05\x4f\xba\x93\x9d\x14\x8c\x72\x26\x1f\xf3\xd4\x31\xfd\x42\xc6\xfe\x99\xa5\x47\x67\x9d\x0e\x1d\xed\xee\xd3\xa0\x09\x7f\x1a\xe6\x00\x63\xbe\xde\x8c\x12\x63\xa5\xb2\xb8\x1e\xff\x68\x8d\x83\xf4\xec\xdd\x31\x1d\x75\xfc\xc9\xc4\x0b\xe1\x74\x54\xef\xb8\x91\x2a\x98\x34\x0c\xca\x9a\x06\xdc\xd9\xe9\xc5\xbe\x4e\xef\x08\x2b\x79\x87\x98\xf9\xaa\x7e\x3f\xf8\xaa\xe3\xf7\xe6\x18\x28\xdb\x2f\x79\xe5\x16\xed\x90\x9c\xc7\x79\xc6\xdf\x70\x4a\xc5\x9a\xc3\x93\x98\xe9\x7b\x4a\xd2\x3a\xa1\x1e\xa4\xd1\xdd\xfd\x9b\xc4\x40\xe9\xb3\x45\x99\x60\xbd\x1c\xe1\x13\x18\xf9\x48\x10\xb1\x43\xc4\x2d\xdc\x96\xce\xb2\x41\x24\xe9\x04\xd4\xec\x59\x25\xd7\xb5\x91\x90\x5c\xff\xa1\x8d\x57\x57\x2b\xf0\x6e\x3a\x7f\xba\x94\x07\xdb\x01\x2e\x64\x45\x81\xb0\xdb\x68\x04\xe0\xf5\x33\x8a\x40\x62\xbd\x19\x8f\x0a\x59\x1e\x46\x00\xe3\x22\xce\x32\x82\x49\xdd\xd5\xd2\xaa\x75\x15\xf6\x5e\x56\xfe\x55\xb9\xfc\x3a\xbf\xce\x27\x9c\x3e\x42\x60\x85\xdf\x6a\x23\x08\x20\x64\x84\x10\x16\x5a\xb8\x4d\xc8\x05\x5e\xa4\x0e\xc8\x0c\x59\xb1\xe7\xc5\x79\x7e\x1d\x48\xe9\xa4\x41\x82\xb7\xcc\xb9\xe8\x26\x1a\x22\x26\x80\x7e\x99\x48\x3e\xe2\xdd\x87\x13\x8a\x6e\x7c\x8d\x93\x88\xbb\x41\xb3\x88\xe6\x77\x6a\xdb\x19\x26\x48\x3d\xcb\x2b\x54\xbf\x2a\xde\x46\xee\xea\x79\x40\xf5\xaa\x31\x56\xba\x56\x49\x14\xdc\x6b\x12\xa6\x43\x4c\xbd\xc0\xd4\x75\x69\x7c\x94\xdf\x11\x66\x2d\x08\xa4\xc1\x5a\x47\x7f\x92\x5b\xa7\x35\xe4\x3f\x96\xc4\x76\x6b\xeb\xa2\xeb\x01\x18\x8a\x50\xb5\x4b\x68\xde\xd7\xf1\x8c\x49\x08\xd4\x5b\x7e\xd3\x16\xbb\xbd\xbd\x4f\x27\x03\x93\xae\xb2\xec\x2f\x50\xa3\x8b\x5d\x0c\x30\xab\x53\x71\x58\x9b\x40\x23\x74\x9c\x6c\x4d\x22\x2f\xbf\x13\x8f\xa9\xcb\x2f\x9d\x84\xd7\xa4\x19\x4b\x96\x69\x83\xd5\x90\x2c\x25\xcf\x5f\xa0\xe5\x42\xa2\x7e\xf8\x60\xe8\x18\x85\xa3\x25\x6d\xb1\x86\x39\x60\xb7\x4f\x4b\xbc\xb1\xc3\xd2\x73\x5e\xc9\xca\x9a\x6e\xd1\xb2\x4a\x98\x37\x67\x45\x4f\xbc\xaa\x10\x8f\x57\xf8\xd2\x7a\x6a\x56\x11\xe8\x15\x5c\xb6\xf3\xa6\x95\xef\x99\x2f\xb5\x36\xbd\xc8\x2b\xe8\xc6\x27\x3e\x1e\xf9\x47\x48\x14\xf6\xb5\xd2\xac\xf0\x50\x92\x5c\x1b\x20\x31\x77\xdc\xc7\x26\xe9\x37\xa4\xdb\xeb\x6e\xc7\x35\x71\x5a\x9f\x7e\x3c\x5c\x79\x9c\x74\x5f\x05\x5e\x80\x9b\xe8\x33\x70\x6a\x31\x82\xfa\x49\xf2\x5f\x95\x55\x6e\x0d\xca\xfb\xd2\x4c\x29\x86\xbb\x7f\xc8\xe7\x85\xf9\xa8\x89\x74\xc1\x35\x3a\xef\x3e\x1c\xa1\x0f\x36\x24\x73\x55\x84\x89\x2c\x20\xff\xd2\x2f\xaf\x6b\x8c\xc9\x04\xfd\xcc\xa6\x50\xbe\x4f\x39\x45\x7e\x37\xf5\xc7\x23\x1b\x42\x90\xb6\x94\xa2\x6a\x77\x1a\x5b\x96\xf1\x97\x39\x39\xfe\xc0\xa9\x71\x89\x59\xf7\x13\xfb\xcc\x49\x46\xb6\x75\xfe\xe2\xc7\xc2\xcf\x8d\x96\xe0\xf0\xc0\xf1\xbb\x67\x39\xb5\xe5\xe6\x85\x42\xc0\x1c\xa5\x51\x16\x7d\x65\x38\xea\x2d\x04\x2b\xec\xdb\xca\x57\x86\x06\xf6\x4f\x1a\x0f\x12\x79\x67\xe0\x0c\xdb\x7d\xb4\x44\xc1\x45\x54\x51\x78\x87\x39\x54\x7d\x2b\x1d\x03\x6c\x6e\xfe\x9d\xe4\x75\xa7\x70\x3e\x19\x58\xf5\x38\x38\xd5\x71\xff\xee\x8e\x08\xf7\xc7\x5e\xab\xf2\x10\x52\xc1\x9e\xdc\x9b\xba\x28\x22\xa9\xab\xcd\xc8\x66\xfb\x51\x14\x73\x1a\x24\x87\xbd\x75\x5b\xd0\x69\xd8\x9b\xab\xef\xed\x6f\x24\xa8\x89\xba\x3b\x0e\xdb\x80\xe1\x55\xce\x3b\xa5\xf0\x78\xec\x35\x35\xfb\xe4\xdd\x4b\xfa\x8f\xcb\x1f\xde\x6d\x03\x84\xa7\xfa\xcf\xdd\x18\x80\xa8\x8a\x4b\xec\x28\x0f\x72\x20\xfc\xfe\xf4\x44\x86\x07\x01\x30\x21\x60\xe9\x08\x73\xb2\xab\x04\x52\x75\xe9\x8e\xd8\xc1\xe7\x0e\xc1\xb4\x7d\x10\x3b\x15\x52\x66\x97\x36\xa8\xe4\xf7\x54\x2f\xd4\xad\x23\x73\x15\x97\x00\xaf\xf2\x7c\x8a\x10\x71\x0f\x81\x0e\x14\x9a\xf0\x82\x21\xe1\x0b\xbc\x5a\x78\x11\x30\xea\x80\x22\xfe\xf6\x89\xe1\x7e\xbb\xb8\x6c\x5f\x1a\x51\x92\xd6\xe9\x76\x83\x89\xd8\xc6\xf8\xc0\xb5\x89\x5b\xad\xd6\xfa\x91\xd0\x72\x52\x61\xb8\x35\xe1\x0b\x62\x90\x78\xa3\x7d\xab\x8f\x36\x31\x13\x09\xef\xef\x2a\x27\x12\xb0\x25\xd8\x73\xb9\xa8\xcd\x21\x2b\xf0\xdf\xfd\x9e\xb2\x6b\x98\x37\x19\x52\x77\x83\x44\x51\xb7\x23\x91\xf3\x23\x98\xf1\x2f\xc2\xfa\xfe\xea\xef\xe4\xf3\x3b\x90\x6c\xdd\x63\x8c\xd1\xbb\xdf\x86\x25\xe8\xde\xd6\x83\x69\xde\x2e\xa7\xce\x49\x54\x77\x18\x04\x47\x49\x04\x5f\xe0\x16\x73\xdc\x35\xbb\xf1\xbb\x44\x6d\xa2\xb9\x4a\x79\xd0\x81\x56\xb7\x2a\xee\x50\xbf\x70\x9f\x07\x0f\xcf\x67\x8d\x2d\xd2\xca\xcb\xfe\xe6\xbc\xd1\xeb\x71\x67\x59\xca\x91\x84\x61\xe1\x6c\x16\xb3\x60\x96\x01\xe9\x48\x0f\x5a\x08\x1c\xe4\x1a\x65\xd8\x92\xd1\x75\x82\xaa\xf2\x87\x33\x39\x48\xb9\x3c\x40\x17\x21\x8c\xcf\x90\xd4\x60\x11\x85\xf3\x5d\xdf\xa6\xf9\x04\xd0\xb4\xb4\x3f\x65\x8b\xbb\xae\x59\x16\xa7\x6f\xcb\x54\x14\x43\x00\xe7\x18\x07\x8a\x2e\x4f\xea\x60\x5d\xfe\x10\xec\x76\xb2\x4c\x42\x30\xf6\xe6\x77\x4c\x97\x36\x99\x16\xc9\x7e\x03\x4d\x2a\x91\x34\x76\xa3\xf0\x49\xd6\x63\x14\x78\x08\x83\x38\x1c\x38\xff\x64\xf6\x62\x6f\x8c\xf6\x38\x7f\x43\xc1\x19\x2d\x9d\x67\x80\x39\x69\xb8\x2e\x73\x98\x5b\x43\xae\xa0\x3d\xc7\x7e\xd5\xa2\x44\x7d\xf2\x97\xce\x4f\x88\x94\xdf\x0d\xed\x3e\xaf\xdb\x76\xf0\x97\xf2\xf1\xec\x36\x9d\x49\x4f\x29\x41\x10\x18\x23\x2a\x68\x14\xb4\x6b\x77\x5c\x0b\x7a\xb5\x9a\x52\xb3\xfc\x66\x9b\x2c\x77\xc1\x12\x36\x74\x6b\x98\x77\xd8\xed\x06\xec\x5b\xe3\x97\xfd\x3e\xca\xd3\x07\x6d\x7b\xb3\x81\xb1\xed\xbd\x51\x91\x77\x7b\xc6\x4a\x7e\xdb\x53\x4b\x96\xa4\x3f\x51\x3d\x23\x3f\x3a\xc4\xb4\xdb\x67\x4c\xdc\x13\xb7\xfc\xee\x65\x40\x64\x77\x2a\x33\x63\x63\x20\x93\xe1\x36\xc8\x46\xce\xeb\x0f\xf1\x93\xc5\x82\x5a\xfb\xf5\x27\xfa\x48\xc1\xd7\xe3\x63\x58\xfc\x8e\xe2\x74\x2f\xb9\x45\x7d\xda\x55\xc7\x56\xed\xe0\x86\xe4\x86\x0a\xfc\xa7\x5a\x95\x43\xf1\x97\xd9\x51\xd9\xbd\x08\x16\x0e\x4b\x51\xa2\x7f\x2e\x45\xe5\x4d\x78\x36\x8f\xe0\x30\x1d\x95\x97\x3d\x44\xef\x07\x41\x85\xf1\x10\xd4\x89\x05\x6f\xff\x71\x49\x43\x54\x50\xf6\x1e\xd5\x67\x5a\x78\xe3\xb5\xb1\x8a\xbd\xd5\x9d\x5f\x65\x79\xf8\xd7\x91\x43\x02\x61\x7c\xea\x66\x39\xf8\x4e\xac\x05\x4f\xc9\xec\xad\x76\xe9\x25\x16\xb1\x3b\x53\x54\xd2\x2b\x2b\x30\xc9\x2e\x52\x87\x11\xfa\xda\xe4\xbe\xfd\x72\x33\x1f\x3b\x9d\xfa\xc6\xb4\xdf\xe1\xfc\xb5\x09\x05\xea\x13\x0c\x57\x65\xf6\x71\xea\x28\x3b\x82\x19\xdb\xd5\xe3\x4f\xca\xa6\xf0\x50\xdc\xf8\xe4\xe4\x74\xca\xfb\xa6\xe7\x2f\xfa\x26\x77\x4a\xd9\x65\xd3\x16\x3e\xa7\x91\x1c\x5d\x42\x71\xee\x37\x06\x1b\xb1\x49\x26\x78\x57\x8e\x3c\x10\x78\x6f\xe8\x50\xf9\x88\x1c\xe7\x59\x07\x7c\xe4\xc9\xe4\xb2\x6b\x9e\x55\x35\x6e\x89\x2b\x0f\x0d\x4f\x94\x31\xa3\xed\xdc\xee\x82\xf1\xf0\x21\x7d\x9c\x59\x82\x57\xc1\x96\xa6\xcb\x50\x84\xc7\xc2\x03\x24\xd2\xf9\xa8\x70\x55\xcb\x7c\xd0\xec\xd6\x9a\x03\xe5\x31\xef\x20\xac\x8a\x89\x7a\x2f\xa5\x72\xc5\x40\xb2\x3d\x32\xb5\x41\x4a\x55\xfb\xef\x36\xac\xb8\x48\xc8\x08\x4c\xda\x9a\x80\x90\x13\xa9\xaa\x34\xcb\x80\x56\x3b\x87\x5b\x55\xec\x32\xec\xd6\x38\xf5\xbc\xb7\x60\x6b\xff\x94\x20\xd0\x4a\xf1\xb4\xeb\xef\xae\xd3\x08\x14\x58\x96\x68\xfb\x87\x8e\xb6\x97\x4c\x20\x31\x91\xba\xc8\x05\x38\xe0\x42\xa5\x62\x72\xd5\x6c\xf3\xd6\xa6\x61\xb7\x10\x3b\x3a\x85\x1b\xce\x59\xd8\x2d\xd2\x1c\x48\x05\xb4\x67\xea\x0e\x38\x53\xd6\x50\x7a\x8a\x50\x84\x29\x8d\x3c\xbb\x06\x3d\x89\x91\xee\x6d\xc8\x3a\x72\xa7\x86\x48\x71\x3e\x5b\x64\xde\xa9\xc1\xa6\xef\x6c\x3c\xcc\x52\x0b\x2b\x7e\x3a\xd0\xf4\x2c\x9c\x27\x3d\x19\xdf\xcb\xdb\x09\xf3\xd4\x80\xd8\x6d\xa3\xdc\x28\x57\xc1\xd6\x27\x02\xc0\x7a\x44\xad\xb9\x84\x87\x05\x68\x11\x9c\x09\x15\x69\x59\x40\xce\xa4\x5c\x20\x0b\x8f\x94\x7d\xf7\xd1\xca\xa9\xbe\x2a\xf5\xbf\xce\x41\x92\x25\x6b\x76\x89\xb4\x0f\x4f\x63\xe2\xf9\x37\x25\x19\x56\x61\xa5\x00\x18\xd1\xd5\x7c\x75\xec\x25\xac\x33\xfc\xd6\x0b\x6b\x40\x0a\xd9\x7f\xce\x19\xf8\x88\x8a\xc2\x23\x3a\xa9\xae\x46\x4c\x4e\x07\x97\x78\x2a\x0a\x69\x16\x82\x0c\x45\x39\xd7\x76\xb0\xe4\xe5\x02\x3f\xf7\xe3\xbe\x59\x10\xbf\x07\x60\xbf\xaa\xd4\x02\x74\x8f\x69\x91\x96\x4c\xa5\x7c\xc7\xfd\x8e\x72\x52\x70\x12\x18\x07\xb4\x95\x78\x15\x1e\x61\x6e\x4b\xed\x37\xca\xbd\xcd\x1b\xed\xfb\x2a\xd1\xaf\x2b\xa9\xde\xba\x30\x2b\x73\xc6\x8c\x1d\x67\xbd\xb4\xad\x8b\xfc\x73\x4f\xf7\x9d\x52\x30\xc0\xba\x87\x91\xc7\xfd\x69\x64\x5b\x44\xaa\x86\x37\xcc\x27\xa8\x09\xdf\xa8\x2a\xd1\x65\xa6\x17\x29\x09\x22\xb8\xe5\x2d\x34\xf8\x2e\x11\x79\x38\x95\x2d\x30\xa1\x43\x41\xbe\xbb\x84\x32\x7b\xdb\xb7\xf7\x2a\xa4\x65\x21\xdc\xbf\xff\x3c\x01\x77\x2b\x0e\xfd\x4b\xe0\x5d\xcf\x13\x07\x76\xe6\x1f\x3f\x68\x9c\xdd\xa0\x62\x10\x0e\x20\x74\x56\x2e\x79\x95\xfa\xca\x8e\x3d\x22\x85\x6b\xa3\x52\xc0\x51\x3b\x29\x55\xa5\xcf\xd4\x60\x64\x99\x63\xe3\xd8\x3b\xb5\xfd\xc1\x91\x21\x4b\xc1\x93\xae\xf3\x16\x2d\x7c\x8d\x3b\x1b\xc0\x26\xe7\x5d\x1e\x31\x44\x5f\x1f\x1f\xd2\x02\x6e\xd2\x18\x21\xbf\x23\x2b\x65\x69\xa2\x34\xb7\x98\xf4\x53\x12\x61\xec\x0f\x2a\x2e\x82\x03\xb7\x0b\x4b\x99\x33\x09\x45\x33\x55\xa0\xb3\x93\x5f\xd8\x55\xe6\xa7\x35\x7a\x3e\xf2\x9a\x3e\x67\x84\x9d\xdd\x67\x06\x50\xcc\xbb\xdf\x6a\x82\x00\x71\x06\x99\x54\xa0\x74\xd5\x00\x2d\x98\x77\xae\x0b\x92\x31\xfb\x92\xbe\x01\x84\xf2\x67\x3c\x8f\xea\x93\xde\xe7\x72\xc7\x5c\xef\x19\xac\xaf\xa7\x70\x9b\xc0\x8a\x20\xf9\xa5\x47\xa2\x95\xce\x32\xc9\x23\xc7\x7e\x80\xea\x86\x75\xa6\xbb\xa5\x52\xe0\x7b\xe6\x9f\x00\x78\xd4\xbd\x2d\x46\x4f\x2f\x8b\x66\xec\x9f\xf6\xb2\x8a\x9f\x6c\x87\xf7\x2d\x4f\x3a\x3b\x2b\x23\x0f\xc5\x27\x49\x9d\xdd\x00\x9a\x10\x2c\x6a\x59\xad\xc3\x3e\x97\x36\x63\x93\x2d\x4a\x07\xdd\x0d\x9d\x0b\xe6\x59\xca\xf1\x60\x3c\x7a\xf0\x3d\xcb\x4d\x8c\x5f\x56\x8a\x31\xac\x05\x1a\xc2\xf0\x54\x3d\x26\xe7\x6d\xbc\x41\x8f\x1c\x34\xa1\x26\x0d\x7d\xc7\x04\x11\x23\x5f\xea\x1f\x5e\xc1\xd8\x45\x51\x99\x5a\x4e\xad\x86\x75\xd5\xee\x3e\xc2\x9d\xaa\xd2\xe0\x20\x5f\x62\x54\x97\x1d\x67\xb7\x73\x15\xdd\x11\x2d\x8f\xe3\x4d\xb4\x13\x98\xc1\x0e\x57\x0c\xe5\xec\x88\x9c\x82\xfa\x06\xf7\x62\x21\xdc\xa0\x12\x78\x9d\x0a\x99\x4f\x58\x67\xc4\x93\x38\x93\xfc\x1c\xe1\x5a\xec\xdd\x20\xf2\x46\x39\x86\x3f\xe4\xbe\xf0\x33\xd5\x3f\xe8\x10\xab\x94\xf9\xa3\xa9\x9f\x8f\xb4\x33\x7f\x05\x93\x67\x19\xa8\xd8\x81\xfb\x24\x6d\xc4\xfe\x0c\xab\x33\xe9\xa4\x4d\xa0\x63\x53\x63\xb6\x95\xb8\xc3\xf5\x79\x14\xfa\x2c\x31\x3f\x93\x22\xa3\xf5\x7a\x5a\x21\xfc\x57\x5e\x62\xa2\x8e\x8b\xbe\xca\x0e\x59\xd9\x10\xad\xe9\xbf\xb8\x25\xfb\x12\x46\xe8\x9b\xd7\x71\x88\x8e\xd1\x30\x9f\x08\x20\x6f\x46\x78\x22\xea\x76\x8c\xf1\xf8\x52\x22\x11\x11\x34\xd6\x34\x2d\xba\x57\x45\xa9\x70\x22\x45\x84\x74\x58\x5d\xfb\x6e\x66\x0b\x1c\x43\xcf\x9b\x68\x2a\x08\x54\xcd\x91\xbe\x35\x67\xb2\x89\x6c\x2b\x79\x09\x98\x95\x3f\xf2\x1f\x36\x66\xd6\xc1\x1f\x3e\x80\x67\xba\x09\x99\x0d\x53\xef\xe5\x27\x38\xc5\x69\x4f\x7b\xef\x85\x79\xd7\xb6\x18\x73\x34\xa6\xa1\xc0\x9f\x42\x28\xe2\x9e\xb6\xfc\x37\x59\x5e\xfd\x54\x4a\x63\x25\x08\x0b\x33\x69\xc4\xbd\x3f\xdc\xba\x61\x54\xde\x51\xba\x11\xf7\x7e\xfb\x92\xd9\x5a\x5e\x09\xc8\xdf\x8c\x5d\xf1\xb3\x7a\xab\xfa\x6c\xee\x02\x3e\x7a\xf4\xaf\x08\xda\xa1\xd5\x00\x14\x5c\xe0\x5d\xd5\xa1\x6e\x7f\xee\xfd\x65\xaf\x6a\x15\x09\x55\xa7\xf7\x15\x0d\x14\xf4\x2f\x9c\xbc\xab\xae\xef\x2b\x77\x07\x58\x7c\xe1\x4a\xd6\x29\x9f\xdf\x41\x0e\xc4\x5c\x47\x83\x91\xab\x9f\xa4\xd6\x73\xd7\x23\x7a\xe1\xdd\x8b\xe0\xd0\x3b\x6f\x47\x0d\x57\x3d\x82\xaf\x8c\x81\x38\x92\x8c\x57\xef\xe5\x4d\xb3\x5a\xec\x28\x64\x71\x75\x25\x9a\xe0\x72\xd0\xae\x08\xe2\xee\x10\x2b\xba\x13\xa3\x6f\x1a\xb7\xb5\x0c\x97\x94\x87\x0b\xaf\x48\xa8\x14\xfc\x33\x9f\x9a\x66\xa2\x44\x65\x81\x5f\x9f\x6f\x0f\x53\xd1\x3d\xf5\x96\x76\x88\x1f\x84\x8e\xf7\x95\x4e\xf1\xda\xa8\x4f\x1c\xc4\x1a\x51\x1c\x1d\xb4\x34\xe2\xd5\x56\xbe\xb5\x57\x1c\xa3\x57\xf1\xf2\x01\x14\x24\xb7\xca\x68\x36\x48\x32\x7a\xdd\x73\xb0\xab\x5c\x00\xad\x3e\x37\xbc\xb1\xad\x72\x27\xd6\x44\x18\xe7\x0c\x9f\xd6\x73\x0b\x67\xa0\x53\x9b\x4d\x78\xec\x63\x90\x76\x35\x4f\x1e\x38\xf0\xe2\x31\xbf\x4a\x10\x04\xc2\xab\xe6\xb4\xf7\xf4\xb5\x45\x01\x71\x45\x05\x51\xcf\x9f\x09\xa3\x27\x3c\x70\x26\x84\x29\xa3\x2c\x0d\x78\x05\x1c\xe1\x59\x77\x20\x5e\x50\x69\x69\xa4\x86\xd8\xe6\x13\x41\x60\x3b\xf9\x24\x56\xda\xfd\x8a\x82\x3d\x38\x6d\xfe\xd4\x59\x63\xef\xc8\xde\xf3\xaf\x04\xe9\x57\x1d\x76\xe2\x1e\x24\x58\x64\xbd\xc9\x07\x5c\x1b\x98\xbd\xc2\x7a\x87\x75\x41\x38\xe7\xdb\x76\xa3\xf7\x09\xb6\x94\x94\x3b\xd8\x13\x73\x2a\xae\xc4\x98\x55\x93\x4f\x2f\x06\xca\xbe\x8b\xda\x38\x6b\x17\x21\x9b\xca\x6a\x37\x2f\x6d\xbd\x0e\xf0\x32\x6e\xce\xad\xea\x87\xe7\x11\xbb\x64\x30\x53\xed\x81\x05\x52\x51\x55\x8d\x6e\x87\xfc\x82\x03\x72\x36\x72\x0c\x28\xf0\x9a\x6a\x9e\x4f\x9a\xd5\x4e\xf9\x56\x5f\xbd\x48\x9c\x3c\xe5\x5f\x7c\x04\xc2\x20\x63\x1f\x21\xdf\x7f\x62\x35\xe3\x15\x02\x6b\x4e\xbb\x97\x7d\x47\xa8\x43\x27\xfb\x4b\xdc\xde\xe4\xd1\xc3\x56\x85\xab\x8e\x5b\x89\x5c\x9a\xc7\xb0\x1d\x1a\x3d\x28\xfb\x11\x9e\xe6\xfe\xe2\xc0\x57\xb9\xd0\xe4\x48\x4c\xca\x24\xad\x6c\x8e\x14\x9e\x2c\x28\x2f\xb7\x14\x65\xa6\x98\x73\xc4\x36\xdc\x3d\x4d\x48\x07\xbd\x8c\x83\x1d\x28\x7c\x51\x3c\x10\xa8\x17\xb1\xac\x05\x62\x82\xc8\xa7\x71\xcd\x7d\x8a\x8e\x70\x34\x8b\x9d\xfe\x28\x1a\x2e\xf5\x07\x3b\xf3\x6a\x07\x95\x3c\x17\x91\xd1\x25\x9b\xb4\x25\xfe\x16\xb4\x51\xca\x01\xee\xe3\x64\xef\x93\x20\xcd\x3e\xb9\x7f\x0d\x3b\x07\x3b\xdd\x7f\xfa\xd0\xf2\x4f\x70\x00\x44\x32\x67\x18\x68\x89\x5b\x4f\x98\x4d\x56\x38\x2d\x8e\x28\x85\xb1\x9f\xed\x53\x21\x53\x61\x5a\x4a\x57\x83\xdb\x8f\x12\x4f\x13\xa2\x2c\x8f\x26\xc9\x67\x7c\x40\x8e\x5d\x89\xc6\x1c\x48\x2a\x0a\x36\xd1\x81\x58\x1d\x02\x84\x80\x11\xa9\x50\x85\x4f\x38\x6f\x9f\x2f\x5a\x36\xa6\x61\xbf\x6d\xab\xd2\xfe\x85\x3e\xa5\xa8\x51\xb1\xba\xad\x70\x4c\x74\x5e\x71\x2b\xdb\xed\x11\x34\x69\x8c\xf9\x14\x8c\x5e\x44\x70\x7b\x01\xbd\x77\xb4\xcb\x56\x4a\xeb\x77\xad\x7d\x63\x30\x79\x4d\x44\x5a\x98\xe0\x6f\xfb\xec\x16\x38\x78\xfc\xc0\x73\x97\x4d\x31\xd8\xc8\xda\x8e\x68\x4f\x9c\x89\x75\x3e\xfe\x9f\x6c\x09\x7d\x91\x0a\x8d\x2c\x6e\x9f\x18\x03\x6c\x38\x78\x4a\x1c\x35\x24\x28\x47\xe1\x18\xd5\x76\x34\x9d\x25\x47\xb4\xac\x18\x39\x04\x17\x3b\x0d\xb4\xbc\x2e\x4a\x1a\xdc\xc3\x6a\x74\xff\xff\x65\xbf\x50\x18\xb8\x67\x6b\x5d\x19\x46\xb3\x63\x97\x20\xf4\xb0\xb9\x76\x0c\xa1\x43\xf4\x23\x73\xb6\x68\x48\xb6\xef\x63\xbc\x0b\xad\xa5\x6a\x78\xdf\xab\x0d\x94\x71\xa1\x32\x7f\xec\x28\x91\x29\xd6\xda\xfb\x8b\xa4\xd9\x71\xf8\x70\xa0\x48\x22\xad\xf4\x66\xc4\xc8\x61\xf5\x11\xa9\xd7\x47\x1f\xf1\x23\xdb\xcf\xa4\x7a\x4d\x1c\xc0\x02\x56\xf5\x9a\xe9\x1e\xb8\x23\x2d\x95\xfa\xdb\x42\x47\x58\x17\xe6\xaa\x3a\x5b\xec\xfe\xd4\xb7\xdb\xd6\xa6\x6c\x7f\xbd\xb6\x0b\x94\x63\x51\x18\xdd\x0b\x03\x2e\x62\xca\x3f\x33\xc8\x4c\x9c\xee\xf3\xfa\xe5\x9a\x7c\x2a\x39\xdb\xcb\x53\xbc\x1b\xf8\x4c\xb2\x67\x51\xd4\xd0\x58\x5b\x1b\x53\x6f\xfb\xfd\xbe\x5d\x2d\x81\x50\x8f\x0a\xcd\xc8\x92\xca\xc2\x1e\x91\xe4\x58\x7b\x73\x7e\x0c\x6e\xb0\x2e\x7c\xf5\x17\xa2\xdf\xe7\xf2\xf1\x3b\x1a\xde\x3d\x92\xcc\x05\x0b\xe2\xc9\x49\x61\x3e\x9b\xeb\x5a\xf6\x9a\xe4\xd0\xc0\xba\x9c\x94\xfe\x4a\x4d\xd7\xfc\x06\xc0\xad\x51\x33\xb4\x84\xfe\xe1\xa5\x46\x76\x48\x41\xef\x8e\xda\x65\x51\x02\x0d\xb9\x54\x42\x35\x5e\x6d\x75\x34\xef\x3d\x37\xf2\x51\x7e\xd9\x19\x29\x80\xae\x0e\x1e\x89\x26\x8a\xc1\x7a\x47\x56\xa7\xbd\x60\x82\x2f\x22\x86\xc7\x97\xd2\x89\x5a\x89\xb3\x09\x8d\x86\x64\xfd\x4c\xb2\x74\x0c\x46\x6b\x67\xad\xe2\x25\x7a\xec\x36\x6b\xa4\x90\xe8\xa5\xe9\xbd\xa9\x26\x68\x18\x2b\x0b\x77\x44\xd5\x47\xac\x23\x66\xab\x88\xbb\xe6\x03\xdf\x4d\xbf\xea\xb6\x7c\x3b\x15\xf6\x85\xa9\x11\x77\x85\x13\xbf\x9e\x5c\xac\x2d\xaa\x9e\x57\x1a\xb6\x44\x4b\xc0\x77\xe4\xcd\xc0\x89\xe9\xe0\x22\x70\x8c\x0b\x73\x7a\x61\x9b\x99\xf4\x8d\x2b\x2d\xbf\x9c\x3e\x50\xcd\xc1\x0c\x24\x7c\xf9\x96\x36\xfe\xdd\xa4\x7f\x17\x1c\x36\x7d\x11\x3e\x0f\x57\xea\x62\xda\x76\xb7\x04\xcc\x88\xd4\x25\xa6\x77\x96\x10\xa1\xc3\xf5\x78\x67\x8d\x54\x9e\x31\x06\x87\xae\x5f\x44\xf3\x60\xb7\x27\xaa\x16\x09\xa3\x70\xf8\x75\x84\x2b\xda\x71\xc7\xeb\x75\x54\x46\x71\x1d\x31\x20\xa3\xdb\x93\x0f\x1b\xf2\x44\x9d\xa0\x02\x7f\xed\x84\xf0\x9f\x86\x82\xe4\x57\x53\xed\xcd\x5e\x6a\x5f\x04\xea\x60\xa6\xa7\x32\x48\x67\x40\xfd\xbf\xbe\x29\xf5\x9f\x64\x12\x2b\x4c\xa8\x4d\x23\x47\x33\x6b\x28\xb0\x05\x5c\x82\xdd\xa9\x85\x53\x55\x34\x78\x8a\xbf\xa9\x03\x8d\xf3\xd7\xf2\x49\x01\x9c\x04\x30\xbb\xe6\x2e\xcc\x15\x41\xc1\x94\x9f\x22\x86\xb1\xdc\xa2\xec\x56\xa2\xfd\xc4\x72\x33\xf6\x6f\xa9\xaf\x48\x82\x6f\xa4\x1a\x05\xc0\x63\x1f\x3a\xc3\x29\x65\x18\xb4\x09\xbd\xb4\x2f\xfd\xb3\x45\xf5\xe1\xf6\xe9\xf6\x13\x41\xf2\xdc\x6b\x51\x72\x39\x50\xde\xd0\x66\x69\x3c\xe7\xa3\xfc\xbb\x2b\x89\xf6\xc2\x48\x3d\xf3\x5d\x2b\x6c\x8e\xe7\xd4\x7f\x23\xbb\x09\x00\x4c\x58\xb1\x48\xaa\x88\x6c\xa8\xb0\xcc\x08\x71\xef\xd8\x58\x35\x57\x88\xae\x50\x62\x4d\x51\x62\x3d\xc3\x93\xe5\xe5\xe6\x42\x4f\x84\x4a\xf9\x23\xcd\x24\xc0\x4a\x73\xfb\x03\x6b\xbd\x65\x5d\xfd\xd4\x3b\xf5\x84\x51\x5f\x6c\x4c\x4f\x7b\xae\xcd\xc2\x93\x43\x6e\x87\x61\xdd\x3a\x63\x95\xa0\x23\x0f\x63\xbb\x2b\x72\xe3\x46\xfe\x42\x7f\x6e\x04\xf8\x90\x09\xfa\xe1\xa0\x71\x79\xe5\xde\xba\x2c\x16\x53\x7e\x3c\x80\xcf\x11\x29\x34\x8d\x0b\x53\xaf\xeb\x88\x41\xff\x99\x5e\x51\x22\xb5\x64\x1a\xdb\xa1\xc2\xc0\x78\x8f\xaa\x69\x0d\x7a\xe8\xf2\x54\x60\x1f\xbd\xea\x90\xbf\x79\xcb\x17\x9f\xd1\xd9\x00\x80\xec\x1d\xf4\xa3\x0b\xf8\xec\xbb\x95\x48\xe9\xd2\x46\x77\xcc\x0d\x0a\xf3\x58\xe2\xc6\xaf\xf9\x63\x5a\x11\xc5\x62\x44\x7e\x76\xdf\xbf\x90\x8c\xe3\xcf\xe3\xa3\xc8\xa0\x8f\x6f\xcc\x4e\xe3\x8f\x17\xd2\xae\x41\xc1\x40\xf6\x94\xac\x5e\xf1\x69\x39\xf3\x74\xdf\xd0\x8c\xcb\xde\x00\x45\xe8\xc6\x6d\x47\x2a\x30\x46\xa6\xf0\xc6\xb0\xdd\xe4\x08\x2a\xff\x1e\x5e\xb4\x4b\x93\xb9\x03\x10\x61\x88\xe7\x14\x57\xd9\x99\xed\x96\x2e\x22\xc4\x9c\xc9\xdb\xee\x63\xfa\xac\x82\x5b\xbd\x9b\xc6\x6c\x90\x33\x90\x85\xc2\x5d\x6b\x6c\x0e\x82\xa6\xe9\x8a\x19\x08\x08\x99\x3a\x60\xf7\x44\x3b\x49\x20\xa6\x21\x0c\x16\x6f\xc6\xae\x2b\x6a\x74\x37\x36\x2d\x6b\x30\xeb\x3c\x88\x2e\x29\xac\xa3\xf7\xa1\xce\xea\x94\x54\x61\xa5\xe1\x4f\x88\xab\x44\xa4\xec\x2e\x07\x25\x5b\x9d\xf2\x2a\x9b\xc5\x55\xa7\xf9\x9a\xac\x6d\x91\xd0\x40\x57\xe5\x72\xef\xd1\x1b\x09\x40\xf1\x27\x2a\x78\x79\xba\xb7\x33\xac\x02\xbc\x3e\x0c\xef\x1f\x40\x26\xdf\x9e\xad\xca\xc4\x6e\x1d\xfc\x57\x5e\x7b\xc8\x35\x8f\x69\x22\xf7\xef\x09\xb6\xfb\x4a\x88\xc9\x63\xf8\x2e\x64\x65\x83\x30\x22\xa6\x0d\xf7\x2f\x8c\xe6\x1d\x06\xb1\xa2\x91\x45\x88\xd5\x50\x72\x05\x07\xe8\xc5\x9b\x6e\x0c\x6b\x1f\xbb\xba\x07\x71\x73\x14\xb1\x37\x36\x44\x62\x94\x73\xe9\xf3\xf8\x24\xc2\xac\xb9\xf5\x77\x4f\x18\x15\xea\xa6\xfe\xef\x3f\x7f\x38\x27\x91\xe5\x90\x3f\xfe\x20\x57\xce\xef\x5d\x18\x2b\x33\x17\xf3\x45\xcc\x28\xab\xb8\xce\xcb\xdc\x6c\x71\x04\x25\x0f\xf7\xa1\xd2\x44\xcc\x64\xfb\x8d\xe7\xcc\x3b\x74\x88\x81\xc4\xe0\x48\xb5\x2f\x14\x54\x31\xe4\x75\xe7\x24\xd4\xfa\x57\x6d\x00\xb6\x47\xf2\x6b\xe8\x95\x30\xb0\xd1\x18\x00\x7f\xbb\x7c\xd9\xe5\xd9\x5d\xe2\xae\x05\x3b\xb2\xb8\xe2\xac\xa7\xda\x22\x09\x47\x7e\xa3\x56\xf5\x08\xfc\x9d\x02\xaf\xc5\x30\x21\x0a\x19\xd6\x34\xdb\x6f\x4d\x56\x9d\x13\x8e\x84\xf2\x6c\x41\x8f\x3d\x22\x44\xbb\xab\xb0\x76\x52\x7b\xb4\xdc\x61\x64\xd9\x2d\xce\x8c\x13\x57\x21\x2b\x11\x32\x7d\xa4\xbb\x00\x51\xfb\xe1\x54\xb9\xfb\x4f\x69\xda\x33\x0f\x8b\xa4\x77\x52\x49\x2f\xdf\xbb\x91\x46\x29\xfa\xe1\x35\xf2\x65\x65\xf8\x3e\x6a\xd7\x98\x29\x60\x05\x34\xb2\xec\x70\xd2\x72\xf5\xed\x6c\xc3\x1f\x5b\x36\x0c\xa9\xce\x6b\xa4\x3b\xee\x5b\xe4\x6a\x40\x41\x8a\x45\x3b\x54\x80\xdb\xdb\x23\x8d\x6a\x2a\xc2\x6f\xaf\x36\xde\x53\x2a\x93\x76\x2a\xab\x5d\xa2\xa1\x97\xd3\x01\x19\x46\xc0\x80\x7d\xe6\xa4\x4a\xc6\x57\x25\x98\x20\xe7\xde\xd4\xa7\x7b\xe2\x9d\x7b\xda\xc9\xed\xe1\x85\xbe\xf6\x5f\xb4\x48\xda\x79\x4e\xc7\xa7\x45\xb2\xc3\xa9\x49\xbf\xcf\x4e\x36\xfd\x3a\x68\x78\xe5\x4c\x08\xc3\xcf\xf3\xd2\xb1\x7c\x04\x60\x73\x4c\x6c\xe4\x31\xb3\x23\x17\xa9\x6c\xf2\x1a\x62\x60\x9f\x1f\x90\x7b\x18\x49\x38\x35\xc2\x6f\xa7\xbd\x31\xdb\x27\x39\xe5\xc2\xa8\xb7\xca\x09\x01\x90\x87\xd7\xc5\x56\x35\x70\x5c\x5c\x23\xcd\x76\x63\x25\x42\x81\xc9\xc3\x85\x79\x8d\xbe\x44\xd6\x8f\xb7\x59\xb4\x55\xb5\x3d\xb7\x4b\xac\x57\x2e\xc1\x5d\xb6\x9a\xd8\x87\xae\xde\xe1\xf2\x73\x8b\x78\x60\x60\xef\xc2\x58\xc6\x83\x00\x6d\x1f\xfc\x64\x1a\xb5\x42\xe7\xa7\xe6\x0a\x83\x12\x6f\x9f\xee\x75\x9f\xa3\xa6\x79\x65\x52\x01\x96\xfa\x94\x35\x11\xe9\x7e\x4b\x89\x7e\xd0\x36\xbf\xbd\x79\x3b\x2e\xf9\x39\xff\xe3\x03\x2e\x89\x6c\xba\x49\xcf\x5b\x22\xc5\x15\x83\xcf\x48\xa7\xbb\x0f\x4f\xb5\xf0\xd2\x87\xcd\x56\xcb\xf4\x0d\x5a\xc4\x8c\x9e\x12\x8b\x3f\xa6\xcc\x72\xb7\x75\x84\x76\x3a\x8b\x4b\xcd\x26\xbd\x0a\xb6\xf1\xea\xd7\x4f\x9c\xc1\x4c\x35\x3f\xb9\xcf\x82\x1e\xf5\x70\x38\x7a\x56\x7a\x68\x0b\x33\x1a\xeb\x02\x98\x77\x21\x78\x64\x87\xb8\x9c\x94\xee\x45\x47\xa3\xb5\x7a\x97\x8c\xbe\x8e\xac\x19\x54\x20\xa2\x4f\x7b\xef\xc9\xc6\xf7\x8b\x65\xe3\x34\xba\x1e\x6d\x90\xf4\xb4\x77\xda\xb7\xbf\xe8\x2d\x7d\x02\xad\x0d\x30\x8b\xf1\x2c\x19\xf1\x70\x6f\x8c\x58\x1b\xea\x96\xc4\x14\x2e\xda\xd0\xca\xbf\xce\xb1\x03\xe0\x4b\xdd\xcf\xbd\xf0\xb6\xde\xaa\x5c\x56\x73\x7d\x8d\x44\x86\xfa\x0f\x93\xa4\x87\x14\xd9\x1f\x1d\xe1\xc0\xbb\xe0\x58\x72\x0f\x4f\xaa\xd8\xc1\x8a\xe6\xd1\xcb\x8f\x0e\xac\x3c\x37\xbc\x5e\x02\x7a\x5a\xe7\x15\x2a\xd9\x8e\x25\x7f\x97\x74\x0b\x76\x1c\xa2\x76\xbf\x3d\x31\xdd\x76\x20\x4e\x9a\x64\x7d\x93\x06\x25\x40\x73\x64\xde\xd4\x71\x17\xf2\x57\x05\x5d\x54\xef\xa3\x61\x41\x37\x3e\x64\x06\x36\x49\xd5\xe2\x47\x50\xd6\xeb\xa8\x21\x96\xd9\x7c\x93\x75\xac\xe6\xcf\x3b\x74\x10\xf9\x03\x16\x71\x6f\x53\xb2\xfb\xc5\x31\x72\x07\xbd\x96\x91\x2c\xa4\x1a\x43\x0e\x6e\x28\x20\x60\xf9\x75\x76\xc2\x0e\x90\xbd\x56\xf0\xdc\x6d\x47\xcc\xf5\x20\xac\x0c\xcd\xb2\x6f\x40\xc7\xef\xfd\x3e\x11\x20\xc9\xef\x69\x1c\x6f\xa4\x52\x65\xec\x84\xbe\x9e\xc3\xed\x1a\x3e\xac\xaf\x98\x67\xdf\xff\x3a\xa1\x1e\xb1\xa8\x6d\xcb\x52\x5f\xe8\xfe\xa2\x04\xa4\xce\xd4\x6a\x3b\x80\x86\x5e\x75\x45\x4a\x6c\x3d\x75\x2e\x21\x0d\xe0\x4a\x02\x41\x8e\x48\xaf\x3c\xf2\x28\x07\x4a\x8d\x60\xa0\x1d\xce\x9b\x26\x99\xb4\x74\x11\x99\x0b\xb9\x4e\xba\xbf\x3d\xf1\x82\x0c\x2f\x28\x9c\x15\xdc\x2d\x39\x4e\x7d\xff\x84\xf4\x5b\xea\xb9\x65\x05\xc3\x7c\x6d\x15\xea\xda\x03\x78\xf1\xc5\xf4\x9b\x96\x41\x52\xb6\x68\x1f\x0a\xc7\x35\xe9\x92\x45\xfb\x45\x13\xa0\xf6\xea\xcb\x71\x36\x3c\x19\x7c\xa3\xa9\x5b\x40\x05\x60\x76\x3c\xe8\x1c\xf8\xe6\xc9\x37\x97\x28\x6a\x67\x64\xc9\x3c\x98\xbc\x04\xfb\x16\xfc\x9d\x1e\xf9\xae\xba\x76\x02\xc9\x89\x44\x2e\x77\x9a\x4f\x6a\xa8\xa7\x7a\xfe\x58\x96\xb5\xa9\xec\x6d\x19\xfc\xcc\xda\xc9\xf0\x6a\x2a\x0b\xcb\xbb\xac\xfe\x6a\x64\x12\xde\x58\x47\xb6\xc0\x90\xc9\x09\xd0\x1b\xd3\xd0\xc9\xed\x05\x9b\x39\x6f\x2d\x1f\x5a\xad\xea\x9a\x7e\x8c\x4e\x34\x4d\xa2\xac\xf8\xae\x96\x8b\x80\x9f\xe1\x43\x8a\xa6\xed\x1b\xba\x25\x90\xf8\x16\x7a\x1f\x58\xfb\xb0\x80\x82\x99\x9e\x3f\xf9\xc1\x36\x1e\xcd\xbb\x72\xa0\x39\xcd\x9a\x99\xd0\xa4\xa7\x30\x93\x02\x3d\x45\x11\xfe\x4b\x1e\x81\x4d\xff\x3f\x36\x2f\x42\x9e\x53\x23\x4a\x87\xce\xdd\x89\xd0\x2d\x02\xb8\xab\x11\x72\x7e\xb4\x68\x8e\xa0\x63\x7a\xb7\x24\x08\x9b\xd9\x81\x3e\xe4\xb6\x3f\xc5\x6d\x51\xbe\x38\x2e\x0e\xca\xf0\xbe\x89\xdb\xe4\x27\xc9\x78\x9a\xb2\x7e\x0e\xc0\x1b\xd5\xe8\x9d\xc3\x23\xcc\xca\xc5\x2b\x52\xd2\x87\x7c\xf3\x7d\xbe\xee\x27\xa7\xd7\xee\xfb\xc6\x05\xf9\x59\x57\x84\xa2\xb9\xdc\x87\xff\x56\xbd\x2a\x01\x34\x63\xbb\xac\xdf\xde\x0d\xe2\x39\xaf\x1c\x42\x35\x0c\xaa\xc2\xa0\xc9\x7e\xbb\xd8\x66\x30\x00\xe2\x2e\x82\x2c\x3d\x3e\x0e\xf9\x9f\x15\x77\x0b\x09\x5b\x3b\xfd\xcb\x56\x79\xc1\x7b\xea\x17\xbf\xa6\x44\xa2\xdd\xc5\x61\x56\x4d\x4c\xfd\x0e\x43\x78\xdb\x3c\x68\x94\x60\xdb\x04\x77\x6b\xaf\x6b\x36\xde\xff\x67\xcc\xab\x16\xe5\xf1\x77\x65\x10\x0f\x14\x09\x69\xdc\x06\x94\x4e\x5b\xfe\x0b\x0a\xe7\x7b\x69\x73\x2a\x5b\xa7\xb0\x72\x72\x5e\xe7\x0b\xdb\x35\x47\xe8\xd9\xdf\xc6\x0a\x87\xb9\x83\xe1\x37\x43\x6b\x3e\xb7\xdb\x07\x0a\x0f\x9a\x78\x28\x72\x91\xdb\x41\x4b\xc8\x05\x4e\xe3\xfb\x43\x58\x75\x94\x05\xc2\xee\x07\xd1\xe4\xe6\xe5\x01\x93\xa7\x1c\x54\xb6\x5f\x18\x89\xcc\x98\x81\x75\xf2\xbb\xf1\xbd\xe2\x82\x36\x52\x4d\x0a\x69\xcb\x9a\xc2\x43\x3c\x61\xef\xcf\x5a\x22\xed\xc0\xd1\x95\xf0\x80\xc5\x60\xbe\x70\x2c\x7f\x37\x16\x5c\x69\x9b\x86\x03\xcc\x82\x9b\x16\xcf\xd0\xe0\xd6\x53\xf7\xaa\xf4\xc7\x74\xde\x02\x74\x5c\xec\xf9\x20\x58\xa3\xfc\x83\x53\x46\x07\x1b\xef\x65\xeb\xd5\x1b\x8c\x85\x24\x6d\xb1\x26\x66\x78\xfb\x71\xb7\x72\x8e\x79\x12\xdf\x76\x8a\x42\xe7\xc6\x50\x99\xdb\x53\xe4\xa9\x84\x3e\xaa\xaa\x3b\xc1\xa2\x71\xc8\xcb\x15\x2b\x9f\x84\x4d\x8d\xb3\x3f\x42\x73\xba\xc9\x94\x2a\x4a\xa3\x65\x10\xfb\xa6\x8a\x49\x64\x57\x77\xf2\x23\x96\x87\x1f\xf7\xad\x4b\x68\x2d\x7c\x50\x45\xb9\x13\xd6\xe7\x76\x93\x21\x9d\xda\xc9\x56\x81\x41\x86\xff\x50\xbb\x52\xc4\x93\xcb\x2e\xe6\x91\xba\x26\x8c\x7a\x90\x14\x46\x8d\xd3\xad\x30\x28\xf8\xa7\x86\x8d\xd5\x1b\xe1\x76\xc4\x7e\x16\xb4\x5f\x25\xa2\xe9\x86\x28\x83\x8e\x86\x73\xf4\xf5\x15\xc0\x8f\x55\xec\xaa\x11\xad\xcb\x2b\xa4\xc3\x4e\x32\xf4\xb9\xe4\x37\x04\xee\x1d\x8e\x71\x61\x41\x5b\x0a\x86\x8f\xb3\x74\x34\xdb\xeb\xbc\xf2\xa3\x6d\x16\x9a\x37\x4d\xe1\xa1\xf8\x5d\x42\x45\xef\x0f\xe1\x63\x95\x0e\x71\x53\x15\x1d\xe5\xbb\xdb\x72\x29\xa7\x0d\x81\x54\x43\x7d\xa9\xb7\x53\x35\x54\x64\xb8\xfb\xc5\xc9\x40\xa2\x0f\x78\xe7\x57\x43\xaf\xe9\x9c\xc5\x31\x3d\xde\x04\x69\xa4\x70\x68\xd3\xe7\xe2\x6d\x1b\x64\x7c\xc1\x3f\x25\xe0\x8b\xe2\xb1\x4b\x89\x37\x90\x08\x87\x52\xe8\xfa\xd1\x69\xf0\x13\x79\x4b\x5c\x22\x44\x83\x66\x1b\x25\xeb\x09\x25\x2e\x91\x20\x3b\x92\x03\xc0\x72\x61\xbe\xe6\x59\xb6\x9d\x7a\x55\x24\xc8\xc0\x8b\x9f\x23\x42\xf8\x30\x02\x26\xf4\x5c\x56\x9a\x0a\xb8\x4b\x96\xec\x86\xa6\xd4\xc0\x25\xbb\xfb\xdd\xdc\xc4\x1d\xb3\xb8\xbb\x25\x58\xe0\x88\x33\xa4\x46\x0d\x42\xdd\x26\x90\xac\xdf\xe0\xe8\xaa\x0a\x9a\xe0\xde\xc7\x46\xee\x9f\xac\x0b\x57\x18\x78\xd1\x13\x4a\x20\x1f\x93\x11\xd9\xdd\xd3\x27\x11\x88\xf2\xdb\xd3\x0a\x42\x96\x1d\xb6\x70\xdb\x85\x87\x3b\x02\xd4\x30\xbd\x81\xcb\x96\xb6\xf0\x2d\xbf\x3b\xee\x00\xb0\xdd\x1e\x69\xb4\xb8\x1e\x49\xc1\xb2\x97\x06\x50\x2b\x72\x4a\x64\x5f\x84\xf9\x74\xf3\xf2\x0c\xc3\x00\x28\x8f\x88\x16\xdf\xde\xbb\xd8\xd6\x73\x25\x46\x7a\xf9\x9f\x84\x49\x96\x88\xbb\xd6\x4d\x2b\x97\x1f\xa4\x34\xa5\x80\x21\x63\x94\xfe\xb3\x1d\xee\xde\x07\xd9\x7c\x78\x5d\xca\xd7\xde\xae\x4c\xd1\x02\x70\x3a\xf2\xf3\xd4\x10\x39\xb5\xe7\xc1\x96\x49\x34\x9d\x4f\x98\xbf\xd9\x2d\x7a\x36\x23\x65\x21\xa8\x58\xb5\x08\x0e\x2f\x76\x94\xfa\x01\xfa\x8c\xf2\xe7\x8e\xa6\xff\x77\x30\x19\x63\xd1\x07\x9d\x8c\xf4\xef\x69\x6b\xb1\x5d\x23\x8a\x68\x59\x48\xfa\x21\xa2\xe0\xe3\x3a\x52\xd3\xe5\x46\x5f\x9a\x31\x8d\x9c\xd6\x3c\x6f\x6d\x3b\xe4\x29\x41\x4a\xab\x7d\x19\x28\x14\xad\x97\x3f\xf4\x5a\x1c\x3e\x49\x0d\x56\xb3\x15\xb0\x43\x5e\x06\x48\x8b\x32\xb3\x7d\x43\x0b\x05\xe5\xa9\x7a\x27\xe8\x51\x7d\xc4\x8c\xf7\x61\x90\x87\xa9\xbd\xea\x9f\xbd\x4d\xde\xfa\x14\x8c\xc9\xee\x73\x28\x15\x33\xba\xa2\x23\x47\x80\x8c\x8f\xea\x1e\x31\x48\x8f\x9a\x48\xd3\x8f\x9f\x83\x8c\xad\x08\x76\x3f\x34\x45\xd7\x3c\x9d\x24\xdb\xae\x6a\x6c\x16\xf6\xcf\x56\xe5\x69\x25\xa7\xa0\xae\x41\x2c\x70\x27\x8f\x9c\xd7\x2e\x97\xd0\x6f\x4a\x35\x43\xd3\xea\x44\x73\xcb\x7e\xd2\x8b\x98\xf4\xf7\xd8\x6e\xa5\x5b\xd9\x7d\x3e\x19\x78\xb6\xc4\xd4\x22\x2d\x14\x09\xa0\x23\x25\xe4\xb4\x28\x4f\xfe\x13\xe5\xdf\x27\xff\xbb\x7d\x2d\x57\xb3\x6b\x2d\x8a\x63\x9f\x8a\x54\xbe\xe0\x4c\xad\x61\x74\x68\x55\x9b\x5d\x80\xa2\x1f\x82\xe0\xa4\x0e\xeb\x4a\xdb\xdc\x52\x4d\x56\xa1\x45\xf0\x4d\x17\x95\xe8\x48\x72\x36\xa0\xb9\xbb\x5d\x75\x94\xa0\x81\x42\xc8\xc6\xa9\x8c\xe2\x1e\x4e\x4a\xbd\x38\x73\x52\x6c\xa0\x15\xc1\xbe\x25\x9c\x50\x8e\x34\xfd\x27\xc9\x66\xf6\x46\x0e\xe4\x03\xad\x85\x93\x7a\x0d\xe5\x29\xec\x13\x17\x51\x47\xe7\x4e\x51\x8a\x15\x05\x2c\x9e\xea\xd4\x02\xd8\x20\x05\xf7\xc3\x05\x24\x37\x15\xd3\xb6\x54\xff\x87\xf7\x99\x07\x78\xb5\x6b\x9e\x4a\x7e\xdb\x7d\x9a\x8d\x53\x0c\x80\xb6\xa2\x6d\xe7\x96\x86\xdc\x95\xf2\x8e\x79\xdf\x11\xd8\x09\x7b\xd8\x32\xf1\x86\x45\x80\x65\x58\xc5\x7a\x2c\x54\x56\xa9\x94\xb7\x01\xc4\x63\x5f\xb6\x3a\xac\xe7\x97\x73\x47\x5a\x5a\xa6\x8a\xec\xd0\x15\xe8\x53\xa2\xc2\x02\x58\xec\xb5\xd1\x9d\x87\x6b\x56\xed\xb4\x9d\x72\xfb\x5a\x60\x7a\x95\x14\xef\x62\xa5\x9a\xc6\xef\xfb\x14\x0a\x83\x67\x8d\x32\xc9\xfd\x35\x2b\xfe\xff\x07\xc0\xc1\xff\x00\x64\xdf\x38\x9f\x3e\xa4\x30\x56\xd9\x8e\xf2\x30\x99\x49\x0a\x98\xee\x90\xe7\xd4\x41\x7c\x14\x28\x77\x02\x52\x4f\x7d\x78\xbf\x9f\x4c\x6f\xb7\x3d\x2b\x2d\xbf\xf8\xf5\x08\xda\xdc\x68\x9b\x8e\x83\x4c\x5f\x16\x74\xa1\x43\x49\x04\xe0\x26\x70\xb1\xae\xb2\x8f\x87\xb3\xed\xd6\xe8\x42\xf4\xb0\xbd\x7b\x89\xe8\x92\x17\x76\xd6\x8b\xb6\xa4\xfe\x71\xe4\xc8\xdd\xda\x11\x27\x04\xdd\x87\x6e\xf0\xe1\xee\x92\xb0\x1f\x0a\xea\xc4\x12\xda\xc1\xb9\x4b\x0a\xea\x46\x30\x6c\xb0\x14\x69\x9b\x80\xec\x4e\xd7\xe5\x0e\x80\xa8\xbd\xdf\xec\x5b\x82\x5a\x5f\xc3\x42\x7e\x09\xcb\x5d\xd8\xb4\xf9\xd5\xdc\xff\x13\xf1\xe3\xaa\x04\x4e\xbc\x60\x68\xa4\x6f\x16\x97\xa3\x47\x09\xc3\xfe\xee\x73\x88\x1d\x74\x8f\x2d\x54\xef\x57\x38\x74\x00\x47\x1d\x04\xe6\x07\xe7\xbf\xde\xf3\x02\x4d\xbb\x5a\xd5\x0a\x6a\xd3\x63\x35\xd7\x2d\x55\x63\x82\x8e\xb7\x46\x47\xd4\xd3\x5d\x2a\xe8\x12\x0e\x98\x8a\x6a\x62\x5f\x52\xc4\x6e\x17\xa1\x5c\x2c\xba\x1f\x49\xc5\x19\x08\x94\xca\x2e\x83\xb6\x2b\xde\xf9\xe0\x3e\x4d\x69\x0d\x75\x79\xe0\xf4\xbe\x29\x71\x47\x92\x79\x68\x26\x1e\x05\x45\x9e\xed\x1f\x7d\x98\x45\xf8\x5f\x78\x8b\xcd\x77\xd1\x52\xf2\xd2\x78\xd8\xf6\x9d\x46\x7e\x3c\x88\x24\x00\xa3\xfe\x05\x83\x5e\x18\x29\x6f\x5c\xf0\xeb\x9a\x87\xfc\x45\x73\xa4\xd3\x7e\x64\xb8\x47\xc4\x76\x01\x4b\x7c\x8f\xc3\x99\x4b\xc7\x3d\x8b\xee\xa3\x53\x46\x6c\x8d\xef\x5d\x65\xa0\xb3\x44\x5f\x03\x4b\xe2\xeb\x70\x7a\x76\xba\x55\x0d\x95\x9b\x97\xa0\xa1\x8d\x37\x08\x7a\x37\x79\x44\x58\x07\x2c\x39\x1f\x7a\xe1\x74\xce\x2d\xdf\x34\xee\x71\xf4\x7e\x3e\x69\xcf\x4d\xca\x20\x2d\xd9\x7a\x6b\x97\x5f\x9d\xcf\xc1\x80\xdc\xaa\xac\x21\xc8\x17\x38\x85\xfb\xd7\xc7\x11\x49\x11\x97\xe4\xb7\xab\xcc\x91\x7a\x0f\x77\x88\x4d\x66\xca\xbf\x5d\x46\xdd\xe4\x0a\x86\x09\x9b\xbc\x03\x88\x2d\xbf\x40\xc6\x8e\x6e\xfc\x15\x3e\x4a\xb3\x70\xa2\x57\x60\xd4\xc0\x73\x0e\x51\xaa\x35\x66\x83\xc6\x17\x8c\x0a\x6f\x02\x17\x40\xec\x41\x13\x8a\xc8\xa9\x85\x39\x1c\xf3\xbd\x7d\xb9\x35\x49\x48\x9f\x77\xdf\xdf\xe6\x30\x22\x9b\xf1\x37\x34\xeb\x74\x8d\x63\xf0\x4e\x9f\x7c\xcc\xc1\x9b\xfe\xb6\x56\x6c\x8b\xf6\xd1\x36\x85\xc0\x7b\xa6\x54\x24\x91\x48\xbc\xdc\x46\x0e\x97\x47\xbc\x5f\x6c\xcb\x40\xc9\x53\x14\x0f\xaa\xa9\xa4\x04\x06\xa6\x63\x3e\xbc\x6f\x63\x12\x1e\x64\xf9\xfa\x42\x3b\x56\x42\x97\x5f\x65\xb0\x0a\x71\x88\x33\x8f\x60\x6d\x58\x48\xa2\x0e\x91\xd1\x08\x49\x7e\x74\xdd\xcf\xfb\xe0\x80\x07\xf8\x37\xe1\x6f\x64\xed\xf9\xa1\xfa\xac\xe2\x2f\x23\xa9\x19\xb3\x28\x2c\x61\x85\xe1\x7b\x5c\x3f\xab\xbd\xa1\x91\xbb\x26\xf9\x29\xdc\xbc\x3b\x25\xb2\x08\x82\x72\x15\x41\x8a\x91\xde\x92\x76\x92\x87\x6c\x63\x2b\xbd\x84\xc4\x22\xef\x0f\xa9\x09\xf7\x0b\x6b\xa1\xfe\x70\x3b\xc7\x21\x46\x79\xd2\x3c\x9e\xaa\xde\xaa\x5d\xfb\x01\x8e\x5f\xdc\x10\xe5\x6c\xd5\xcb\x48\xd6\x23\x80\x72\x90\x97\x60\x16\x2e\x06\x51\x77\xaf\x3b\x5d\x28\x6f\x89\x93\xba\x20\xfd\x77\x1a\xd6\x3b\x40\xdf\x15\x1f\xea\xd3\x9a\xf8\x68\x3b\x14\x83\xfe\xbb\xac\x14\xb1\x2e\x12\x22\xd5\xec\x22\xc3\x18\xb9\x72\xec\xee\x2e\x5d\x95\xc0\xbf\xbf\xfe\x88\x6a\x82\x50\x78\xd6\x89\x4c\xe3\xbe\x32\xc8\xd6\xfe\xa3\x64\x22\xdb\x68\xce\x70\xef\xb3\x96\xef\x5b\xc8\x48\x8a\x9d\xed\xba\xf5\x52\x5c\x53\xbc\x9b\x17\x50\x09\x81\xda\x45\xf0\xb3\x1c\x6a\xc1\x01\x60\xc3\x77\x41\xc0\xb5\xc7\xd2\xf7\xb7\x33\xa8\xe1\x27\xdc\xc9\xa4\xa3\xec\x4b\x28\xc8\x6d\x43\x12\x1c\x95\x97\xdd\x77\xa8\x9c\x22\x5e\xfd\x8e\x21\xf3\x89\x69\x6b\xf3\x92\xba\x34\x1b\xc5\xb2\x99\x4b\x83\x84\x7d\x86\x8c\x4b\x1e\xe5\xde\xf7\x1c\x48\x6c\xf5\x7d\xcb\x8a\x10\x2f\xbf\xf2\xa6\xd6\xda\xda\x3b\x16\x66\x59\x37\x11\x34\x20\x3a\x7b\xec\xec\xf4\x16\xef\xc8\x2b\x29\x8f\x0c\x70\x47\x54\xc5\x45\x1e\x42\x8d\x19\x2d\xe4\xea\xd1\xd0\xa2\xf0\xc4\xc3\x14\xa9\xed\x5e\x7c\x79\xb7\x1d\x6e\xb4\x57\x78\xbf\xc0\x83\x8a\xd2\x65\x64\x44\x64\xc9\x98\x63\x0f\x58\xbd\xb8\x85\xd4\xe4\xe1\xc5\xd6\x57\x54\x23\xfb\xd4\x99\xb4\xfa\x99\xd6\x92\xe9\xb3\xf6\x40\x4d\x44\x4d\x00\x65\xb2\x10\x3b\xdf\x14\xe3\x94\xd0\xc5\x90\xa8\xb7\x46\x4d\xa7\x57\x1b\x3e\xb5\x9c\xde\x21\xc1\xf1\x22\x62\x75\xe8\x4d\x42\x2f\x29\xf1\xa7\x8e\x4d\x48\xa4\x6f\x0a\xc4\xac\x15\xd9\x3a\xca\x3f\x6f\x62\x24\x40\x3d\xd5\x61\xce\x6f\x9f\xb2\xce\x6f\x72\x8e\x8e\x5b\x76\xd0\xf7\x4e\x55\xb5\xdf\x98\xb3\x5d\x32\x92\x8d\xf5\x93\x7c\x3d\xc5\x48\x0a\xf3\xa0\x39\x9c\xf5\xe7\x9f\xa0\x68\xce\x1a\x48\xcf\xf4\x4b\x9c\x1b\x13\xe5\x0f\xf2\x05\x7c\xea\xaf\x2a\xe1\xfe\x2f\xb7\xc9\xcd\x9f\xb8\x1a\xd3\x1d\x76\xca\xb8\x0f\x83\xeb\xb4\xd5\x3f\x51\x35\x9f\x91\x1d\x7a\xc5\x88\x65\x92\x3b\xfc\xf4\x6b\x68\xdc\x4d\x28\xcc\x1e\xb1\x83\xbe\x3f\xbe\x6b\xc2\x6b\x97\x01\x93\x72\x5f\x78\xda\xe6\x23\x5a\xc6\x33\x62\x6c\x9f\x34\xd3\x7e\x46\x50\x2b\xf8\x12\xf4\x6f\x4f\x32\x00\x87\xb7\xa3\x47\xe9\x6f\x2c\xff\xec\x59\x7c\x39\x0d\x8c\xb0\xdf\x9c\x8e\x58\xb0\x73\xbd\x23\x13\x79\x38\x9a\x75\xda\x60\x57\x78\xa0\x63\x47\xa0\xe0\x83\x0b\x09\x38\x24\xe4\x38\xd1\x36\x09\x07\xb5\xb5\x46\xe7\x94\x46\x29\xc5\x39\x3b\xf2\x40\x0d\x51\x1b\x7c\x77\x97\x2f\x5e\x95\x3f\x2f\xd2\xf3\x9a\xf8\xa5\xb6\x99\x42\xaf\xea\x1d\x98\xfc\xbf\x58\x59\xe8\xa3\xd0\x09\x4b\x18\xb7\x5d\x8a\xc5\x6c\x0b\x19\xcc\x28\x76\x41\x27\x84\xcb\x5c\xd4\xf6\xde\x6d\x1b\xae\xde\x57\x59\xa3\x3b\x48\x40\x97\xce\x7f\x69\x12\x98\x09\x70\xe0\xd8\x2c\xc3\x5c\xe9\xe2\x25\x1e\xac\xcd\x13\x6b\x94\x14\x9d\x6c\x68\x5f\x30\x1d\xc4\xe3\x10\x56\xda\xa9\xd3\xe9\xf4\xa5\x81\xcb\xa7\x49\x5e\x34\x49\xc3\x5b\xdd\xc8\xe3\x25\x9c\xa1\x7c\x33\xd6\xc5\x04\xd8\xad\x59\x8b\x45\xf2\x42\xd3\xc3\xb9\xe0\xce\xa3\x75\xdc\x91\x68\xe3\x98\x72\x03\xf1\x88\x35\xbd\x97\x4f\x1d\xcf\xfc\x7b\x1f\xca\xcc\x51\x84\xb2\x87\xaf\xba\xd3\x6f\x13\x61\x11\xac\x19\x25\x6b\x87\x79\x26\xa9\x16\x77\x17\x2d\x32\xa9\x52\x97\x2a\x33\x47\xed\xc4\x51\x4f\x57\x54\x93\x56\x06\x76\x31\x69\x84\xa8\x2f\x26\x18\xe9\x37\xcc\x48\x91\xad\x85\xc0\x62\xf9\xd6\x03\x93\x08\x4c\x86\xa3\x09\xff\x74\x04\xaa\x9f\x2d\xb4\xda\xea\xd3\x4d\x5b\x93\x9b\x51\x68\x1c\xa1\xa3\x20\x2d\x91\xea\x7e\xc8\x66\xfe\x28\xdf\x2f\x73\x8f\x65\xa9\xd4\x8b\x68\x81\x81\x69\x66\xe9\x9a\x78\x59\x1f\x56\x8e\x9f\x62\x95\x21\x9e\xe2\xe1\x7a\x5b\xee\xea\xbe\x3f\xef\xe9\xb5\x14\x8e\x2a\xec\xea\x7c\xb2\x82\xaa\x76\x7e\x90\x8c\x56\x67\xb2\x3b\x37\x4e\x0b\x36\x4e\x0f\x6e\x2a\x29\xd6\xfa\x95\xbf\x92\x27\x42\x91\x0d\xb1\xf6\xaf\x20\x75\x79\x56\xb2\xe2\xb8\xfb\xa7\xfe\x22\xf7\xa0\x68\xbd\xb1\x12\x20\xa5\xcb\xc6\xbb\x4d\xa7\x40\x9c\xb2\x01\xd4\x95\x88\x91\xd2\x69\x89\x9c\xf2\x07\x41\xbd\x4d\x7c\x89\xcf\x2a\x20\xef\xad\x5a\xd5\x5d\x93\x4f\xed\xc2\x1b\x3d\x67\x34\x04\x99\xed\x49\xe8\xa6\x99\x9d\x27\x30\xb7\xb7\x14\x06\x6e\x5a\x41\xe0\xeb\xbb\x68\x92\xca\x70\x8a\xa2\x7d\xa2\x6f\xe8\x74\xff\xa6\x9d\xdc\x48\x27\xf6\xd0\xcf\xb5\x19\x8f\x4e\xf9\x1d\x7e\xe5\xaa\x5d\xff\x68\x60\xd8\x79\x1c\xf1\x48\x12\xe4\x78\x13\x75\x19\xc1\x92\x63\xe1\x79\x64\x5e\xcb\x63\x8c\xd0\x9d\xa5\x16\x2b\x45\xbc\xd7\xb7\x52\x9c\xfd\x96\xe6\xa4\xa1\x8d\xb7\x41\x4e\x0d\x9f\xf3\x63\x23\x7f\x77\xd4\xeb\xee\x0a\x40\xbd\xba\x4f\xea\x63\x9e\x0e\x3c\x91\x19\x8f\xdb\x39\xc2\x02\xa3\x9c\x85\x1f\x35\xeb\xa8\xac\x79\x57\x76\xeb\x3e\xff\xcc\xa6\x11\x1d\x0c\xc5\x1a\x53\xd5\xa5\xd7\xda\xf5\x07\xec\xf2\xaa\x97\x87\xf2\x7f\xba\xf3\xb3\xf2\xfa\xb4\x15\x04\x3d\x03\xf7\xdf\x1c\x3c\xb6\x41\x6e\x70\x50\xb2\xd2\x91\x11\x3d\x11\x79\xe2\xb0\x88\x67\x81\x0a\xa3\x97\xd5\xf5\xfa\xfa\xf0\x57\xcb\x8a\x9d\x26\x6d\x5d\x1f\xfa\xd0\xbe\x6a\xca\x31\xaa\xc9\xb6\x1e\x3f\xac\xda\xa7\xfa\xbf\x5b\x7d\x50\xef\x3d\x86\xb7\xfe\xa0\x22\xc1\x4a\xce\xd7\xcb\xa4\x5e\xfc\x3e\x64\x04\x0a\x6a\xbe\xb2\xb9\xee\x7e\x8a\x3e\xff\x9e\xb8\x4d\x93\xc6\xd1\x45\x60\x5f\x57\x24\x5a\x56\x3a\xba\x55\x97\xb6\xa7\xe2\x85\xfe\x0a\x22\xf8\x7a\x12\x36\xc9\xab\x87\x76\xd0\x0f\xf4\xab\x93\x0a\xe0\x7b\xf1\xb3\xc0\x1d\x51\x05\x12\x96\xb0\x1b\x70\x8b\xc8\x08\x75\xe2\xc5\x62\xbb\xef\x2e\xeb\x54\xb0\x8c\x02\xa5\xb6\xad\x76\x0d\x1b\xc8\xb4\xd0\x1b\xf8\x75\xb4\xfb\x03\x99\x9d\x65\x09\x95\xb3\xd3\x56\x41\xb6\xa2\x6e\x51\x91\xb3\xef\x88\x77\x3b\x01\xa1\x71\xca\x0b\x6b\x2c\x16\xc2\xf0\x84\xb9\x64\xe3\xe8\x29\xca\x1a\x03\x3f\x93\x46\x83\xb6\x5f\x86\x97\xc8\xc5\xf9\xc1\xb9\x23\x4d\x92\x64\x5a\xec\xe0\xfe\x55\x75\xf8\x67\x1e\xa2\x48\xb1\xa9\x2a\x38\xde\x6e\xb3\xc6\x04\xb1\x5e\x91\x17\x11\x2d\xb3\x62\x30\x92\xb6\xc0\x25\x95\x1c\xaf\x1d\x07\x67\x3e\xf7\xd6\xd0\x24\xb6\x5c\xbc\x43\x8e\xcf\x07\x2c\x9f\x1c\x85\xd4\x38\x78\x5f\x48\xa3\x40\xb8\x94\x78\x6a\xf9\xe8\x0b\x07\x06\x7b\xd2\x17\x51\x73\x0c\xbb\x73\xdf\x47\xd1\x30\x11\x96\x94\xab\x0e\x34\x42\x0e\x46\x46\x51\x7a\x37\xdd\xc8\x5a\xe5\xee\x63\xe5\xb6\xcc\x61\x6d\x2d\xba\xa6\x15\xc9\x8a\x72\x76\x8a\x7c\x68\x32\xd7\x9c\x49\x20\xf1\x7c\xd2\x22\xd4\xa0\x0f\xc1\xc8\x9a\xd8\xbb\x81\xbe\x1b\x64\x3c\x1f\xe4\x6a\xa4\x3b\xfd\x4a\x62\xa1\x99\x49\x09\x10\x56\xe9\x8b\x91\x8f\x1d\xc2\xff\x04\xe0\x23\x56\xae\xb2\xa1\xc1\x36\x4c\x18\x80\xc2\x6b\x3b\xe3\xfd\xd1\xed\x6b\x38\x4a\xf1\x5d\xee\x0f\xac\x6c\x2f\x39\xff\x72\xc4\x1a\xfd\xa9\x00\xb7\x46\x2b\x48\x68\xd0\x04\x68\x38\xfb\x0f\x9f\xe4\x08\x7a\xa5\xed\xb2\xb1\x08\xe6\x70\x91\x9c\x75\x92\x22\x73\x22\x20\x35\x35\x8c\x6d\x72\x6a\x67\x58\x10\xed\x6d\x42\x43\xed\x3b\xf6\x4b\x99\xc9\xe6\x41\xbc\x79\x44\xab\x2c\x9c\x82\x20\xa2\x4d\xf4\x93\x1b\xb5\x9b\xb8\xc3\x50\x2a\xc8\x84\xd5\xea\x5d\x09\xab\x0b\x0d\xf0\x1e\x3e\xfb\x9d\x05\x83\x0e\xa2\x4b\xbb\xb1\x33\x05\x03\x02\x9f\x6d\x77\x25\x78\x8a\x19\x27\x47\xa7\x43\x94\xc3\x43\x58\xb9\xd9\x26\xb7\x38\xa1\xd5\xea\xd0\xab\x0d\xe4\x6c\x8f\xbb\x28\x36\x42\x52\x32\xa7\xae\xf5\xe8\x38\x7e\x29\xa9\xb9\xcf\x1e\xa2\xe6\x1f\x9d\xa8\xb8\x5d\x33\xe9\xe8\x14\xe2\xde\x29\x4a\xc4\x0a\x46\x2f\x08\xba\xa2\x69\x2c\x4c\x07\x35\x0b\xb4\xf2\x51\x6c\xb5\x4b\x69\xd9\xe9\xdc\x54\xea\x7a\x66\xbf\xf3\xd5\xfe\x8b\xd0\x16\x1e\x6b\x4e\xbb\x6a\xfe\xfa\xb0\xfe\x92\x2f\x98\xff\xa3\x5b\xf8\x30\x87\x08\xfb\xde\xa7\x84\x0d\x9f\xe0\xc5\xfa\xd7\x17\xeb\xb3\x02\x25\x82\xad\x17\xa9\x0b\xae\x5c\x94\xd9\x0a\xfd\x61\xde\xad\xdf\x4f\xbf\xcd\x6c\xce\xe1\xdb\xa7\xe6\x09\xec\x36\x1f\xd0\x36\x61\xc6\x55\xc3\x41\x29\x3c\xa8\xac\xbc\xa2\x23\x34\x3a\x26\x55\x88\xb8\x3a\xbc\x32\xfb\xb2\xec\xc0\xee\xa6\xcd\x4d\xad\xc3\x59\xb9\x2f\x95\x55\x97\xb5\xed\x50\xda\x98\xd2\x2d\x03\xdb\x97\x7c\x59\x88\xc6\xdb\x63\x7a\x4a\x44\xb4\x84\x89\xe7\x99\x1c\xac\x3c\x3e\x2d\x01\x1e\xd9\xa4\x1c\xd9\xa6\x83\xda\xc6\x66\xae\x34\xb5\x38\x4b\xae\x7b\x2d\x45\xca\x6d\x27\xb8\x6d\x6b\xfa\x96\x42\xa5\x24\x6c\x9e\x09\x5b\x12\x95\xda\x8f\x3a\x7b\xc2\x05\xec\x5b\x48\x6b\x13\x92\x6b\xbd\x30\x1c\x99\x1f\x4e\xd1\x9d\x44\x73\xf4\x70\xd0\x4d\xcc\xb6\x83\x0e\x4c\x5b\x78\x9c\xd9\xe5\x16\xf3\x9a\xa3\xcc\xf8\x8b\x90\x25\x41\x53\x96\x1b\xfe\x6b\xab\xf2\xf7\xac\xe2\x24\x08\xc9\xf9\xca\x83\x2c\x17\xee\x79\x07\xda\x13\xd4\xd6\xfe\xd1\xc6\xc0\xd1\xfe\xa5\x84\x61\xe4\xd1\x28\xb5\xbe\x77\x97\x83\x66\x55\x1d\x49\xcc\xe9\x0c\x45\x64\x50\x58\x24\xfe\x5b\x93\xdf\xbb\xcb\x4d\xcd\xa1\x73\x52\xfc\x51\xe1\xa4\x91\x55\x53\xfa\xf0\x7a\x12\xc9\x05\x94\x79\xcd\x61\xc6\xc0\x47\x13\xc5\x45\xe7\xac\x20\x28\x28\x59\xf9\x0f\xbb\xae\x97\x59\xdd\x07\xa2\x78\x0f\x85\x37\xdb\xb3\x0e\xd9\x91\xb2\x10\xac\xce\xa4\xbb\xd3\x5f\x96\x34\xc3\x36\x7c\xaf\xc7\x81\x41\xcb\x3f\xc7\x1a\xf3\x9b\xb6\x12\x50\x4c\xb4\xcc\xf2\x77\x6c\x94\xd2\xcd\x4d\x6d\x1c\x0c\xec\x74\x62\xba\xfa\x24\xe0\xbc\x11\xd3\x26\x17\x34\xb8\x03\xd2\xc3\xc7\x3b\x31\xa4\xbe\xbe\xcd\x2c\x78\x2b\xe1\xd3\x46\x1e\x0a\x06\x3e\x2d\x01\xb8\xf9\x48\x21\x0c\x78\x90\xf0\xc9\x5a\x7f\x7f\xbc\x07\x0e\x76\x38\xbc\x16\xcb\x00\x1f\x24\xd5\xae\x3c\x4c\x80\xbb\xca\x95\x82\xb3\x0d\xc0\x07\xcd\xbd\xfb\x14\x7e\x0d\x40\x8f\x56\xf9\xba\x87\x9f\x9f\x27\xf1\x84\xbf\x42\x8b\x54\xc9\xa6\x60\x54\xc4\x37\xe6\x59\x17\xba\x16\x7a\x54\xa6\xf8\xc6\x4c\xcd\x54\x63\xfb\xcc\xe2\x7a\x3b\x93\x26\x72\xad\x7e\xf2\x8a\xfd\x0e\x28\xe1\xdf\x4c\xec\x96\x7e\x36\x38\x01\x21\x50\x84\x45\xe8\x87\x0b\x2e\xbc\x3e\xcd\xab\x26\xe3\x51\xee\x92\x5d\xd2\x1f\x4a\x36\x91\x55\xb1\xd3\x03\x6f\xc9\xcd\x79\x41\xf7\x28\x02\x79\x0d\xf6\x2d\x23\xb1\x87\xd8\x50\x4c\x1b\x6b\x63\x45\xbf\xd9\xc0\xa0\xe0\x60\x9d\x7b\xb9\x6d\x88\xec\x72\xec\x95\x53\xed\x79\xd7\xb0\xd8\x79\x0b\x48\x81\x13\x2b\x2b\xd4\x23\x02\x7d\x5c\xa9\x3e\x29\x09\xa1\xab\x92\xfa\x43\x97\xde\x2c\xeb\x36\x1d\x16\x3f\x9c\xe1\xd0\x79\x71\xc7\x16\xd4\x69\xde\x8f\x97\x4b\xa4\x77\x30\xaf\x5f\x42\x5e\x93\x17\x59\xbe\xc8\xe4\x08\x03\xca\xed\x6f\x4e\x9b\x80\xe5\x49\x49\xf4\xfd\x79\x81\xbe\x5b\x34\x57\xd9\xe6\x06\x15\x50\x9f\x8e\x3f\xe8\x45\x0b\xf5\x17\x0c\xfa\xb7\x81\x42\xd3\xcd\xde\xe8\x2c\x77\x8d\xac\x27\xe8\x34\xc6\xc3\x8e\x1a\xa5\x2a\x77\x51\x77\xda\x36\x26\x80\xcf\x8e\x39\x44\x97\xde\xe1\xab\x75\x13\xe7\xb5\x5d\x6e\x0e\xbf\x7f\xed\x83\xbe\xe3\xff\x93\x22\x4e\x46\xf3\xfe\xcb\x73\xd9\x73\x56\x2d\x5c\x9d\xca\x02\x31\x7b\x04\xdb\x5b\x6f\x1a\xcc\xe6\xb5\x71\xa5\xd1\x54\x2e\xad\x1a\xb7\x9d\x79\xa7\x0f\x65\xf4\xfe\x2d\x02\xf3\x44\xc2\x7f\x64\x5b\x02\x7a\x3a\xa3\xc6\x95\xf9\x2c\x38\x3f\xfd\x14\x82\xd4\xf0\xfc\x62\xca\x11\x4e\x96\xc6\x6d\xdc\x74\x44\x0c\x31\x10\x66\x4e\x74\x0b\xdb\x9d\x78\x0c\xd8\x2d\xeb\x08\x32\xd8\xce\x3c\x2f\xee\xcd\x8b\x97\xbd\x2a\x9c\x63\x5f\x58\x87\xef\x61\xc6\x4f\x0f\x75\x96\xa3\x98\x84\x15\xd9\xcd\x92\x2b\x0f\xcd\x03\x43\x4e\xa1\x9b\xe7\x75\xbb\xa7\xe1\x5e\x58\x1e\xd7\x0f\xcb\xe3\x22\x09\x14\x8a\xdc\x4a\xc0\xe0\x3e\x1c\x1c\x87\xe9\xd7\xa5\x47\xd6\xb4\x88\x32\xfb\x23\xe6\xd0\xa2\xd2\x71\x3e\x04\x47\xfa\x28\xcd\x52\x68\xfb\xf2\x54\xd8\x46\xf2\x8d\x30\x06\x53\x41\x3c\x69\x44\x36\x0c\x52\xb9\x8b\xc7\xb2\xc4\x4d\x12\x84\x23\x24\x1a\xf8\x03\x3d\x0d\xdc\xa9\x8e\x71\x67\xf6\x34\x7c\x7b\x3f\x48\x9b\x98\x17\x3a\xc3\xfe\xdd\xf2\xba\x76\x37\x60\xff\x59\x65\x5d\xa2\xe2\x55\xbe\x59\xa1\x8d\x03\x8c\x00\xa5\x30\xad\xef\xc6\x3e\x73\x62\xe2\x1a\xcb\x41\x59\xd7\xd5\x99\x59\xfa\x8a\x30\xb8\x2d\x61\xfa\xce\xdf\x8d\xfd\x4c\xfb\x34\x0a\xd7\x26\x7a\x43\xda\x1f\xc1\x06\x48\x55\x8b\x98\x46\x11\x5b\x83\x17\x8f\xcd\x0d\xcc\x05\xc3\xef\x71\x54\xdc\x9a\x1d\xb8\x57\x08\x6b\x9f\x32\xff\xb6\x47\x6a\xe0\x3c\xba\x55\x1b\xf7\x73\x09\x83\x75\xc4\xbc\xc3\x49\x84\x5d\x3c\x65\x72\xc3\x11\x19\x4b\x70\x77\x1f\x27\x7f\xed\x6c\x2f\xa5\x57\x75\x96\x3e\x1e\x11\x83\xb9\x65\x0c\x9e\xfb\x11\x75\xb1\xf6\xd7\x26\x29\xe8\x6f\x16\x66\xbd\xf3\xa7\x65\x26\x78\xb7\x68\xb1\x1e\xe8\xbe\xef\xaf\x25\x1c\x24\xd5\xf2\xf5\x53\xb8\x34\x23\x9d\x21\xfe\x4d\x34\x20\x68\xbb\x99\xb5\xb6\x68\xec\xd5\xb9\x92\xfb\x0c\x33\xc1\x74\xb7\xf3\x0d\x80\x2c\x39\x38\x0f\xa9\x3e\x1a\x17\xa9\xf9\xc5\xd7\x2e\x0d\x52\x42\x77\xd5\x09\x6e\x6e\x10\xbf\xa8\x32\xe8\xf2\x8f\x22\x69\x37\x8c\x94\x53\x7e\xfe\xc4\xac\x5e\x7b\x2c\xcc\xc0\xe9\x28\x92\x96\x93\xec\xc7\xce\x07\x0c\xe9\xeb\x93\xc9\xf4\xa6\x9a\xff\xbe\x5c\x17\xc9\x62\xfb\xa2\x7d\xfc\x82\x0e\xc6\x93\x21\x3a\xfa\xeb\x7e\xa3\xb9\xf9\x7a\xcc\x04\x82\x78\x67\xf1\x8f\x50\x1c\x9f\x0f\x8a\x68\x05\xe3\xbc\xa1\x5c\x66\xa5\x8d\x23\xd0\x6b\xef\xa9\x5c\x9a\xb0\xbe\x6d\xff\x69\x4e\x48\xce\x0b\x7b\x7b\x95\xf4\xc6\xda\x21\x0a\x9c\x6a\x51\xa9\x6f\x29\x5a\x76\x3b\x39\x31\x8e\xf5\xdd\x5a\xec\x26\xdf\xb5\xd7\x24\x33\x91\x4d\x6c\x0d\xf0\x77\x07\x62\x0f\x45\x20\x84\x9d\x8d\x3e\xee\xb4\xbf\x6a\x97\x3b\xce\x15\x94\x9e\x4b\x3a\x7d\xf2\xa0\xc8\x82\x9a\x53\xcf\xcd\xdb\x4a\xeb\xc0\x6b\x8f\x3b\x48\xb9\x0e\x5c\xc8\x2f\xe0\xa7\x9e\xbb\x88\x6b\x3a\x04\xd1\xb8\x9b\x91\xc7\x7f\xa2\x04\x88\xec\x25\xdc\xd4\xda\x24\xce\x60\x60\xc6\x1c\x18\x91\xa4\x0f\xf7\x4f\xe4\xbd\x00\x2f\xdd\x25\x29\xa5\x54\x00\xcf\x4e\xc9\x8d\x3d\x48\x11\x57\xe2\xf2\xd9\x23\x06\x0c\x45\x77\x54\xd8\x7e\xe3\xd7\xd3\x8d\xa8\x7c\xef\xfc\x84\xae\xe4\xf8\xc7\xc3\x7c\xab\xdd\xc2\x9d\x18\x55\x5d\x41\x21\x8e\x75\xe6\x91\xb5\x09\x95\xdd\x12\x48\x94\x4c\x7e\x5b\x6f\xb2\x51\xd7\x2a\xdb\x89\x55\x5a\x03\x47\x16\x7c\x97\x0d\x9f\x7b\x58\x0c\xa3\x16\xe1\xd6\x3b\xb7\xf9\x5d\x8f\xad\x4f\x1a\x62\xe8\x2b\x58\x99\xd3\x4c\x89\xcb\xad\xc4\xce\x3c\xde\x89\x94\xa8\xe5\x15\xa3\xbc\x24\x35\xdd\xbd\xaa\x79\x5e\x6a\x94\x6f\xb6\xb3\xb0\x5c\x07\x44\xc4\x7d\x3a\xff\x6c\x1e\x3d\xf3\xe9\xe1\xf5\xb9\x86\x76\xc9\x31\xac\x5f\xbc\xde\x11\x0e\xad\x74\xab\x9b\x89\x68\x58\x0b\x84\x77\x62\xde\x34\x6b\x9e\x65\xf4\xc6\xfe\x6d\x4e\x54\x64\xcd\x9a\xb8\xff\xdd\x27\x8b\xb2\x12\xdc\xa8\xfb\x57\x14\x4e\xf7\x28\xfe\x82\x75\xf7\x84\x2f\x04\x2d\xd9\xfd\x48\xc2\x10\x48\xfa\xe2\x81\xfe\x9c\x67\xed\x96\x6f\x76\xef\x1e\x13\xce\xc5\x12\x01\x34\xb6\x8b\x87\x88\x79\xed\x88\x0d\xdf\x8d\x13\x3a\x88\xc5\xe0\x41\xe2\x27\x97\x3d\x5e\x50\x5f\x2f\x59\x66\xd2\xe3\xcf\xdb\x3c\xcb\xc3\xdc\xf6\x1d\xe6\x0b\x3e\x83\xf5\xf2\xd4\xfc\xf1\x99\x1a\x08\x7c\xb0\x1f\x00\x84\x4f\x3d\xf2\x5b\xac\x05\x7b\xdd\x29\xaf\x39\x7b\x70\x65\xdc\xf0\x69\x3b\xd7\xe6\xa9\xe2\x01\xcf\xa4\x32\x2c\x0e\x4e\xba\x2a\xd5\xdb\xcb\x4c\x15\xed\xf0\x39\x55\x79\x23\x40\xd1\x4f\x89\xe2\xf8\x90\x0f\xec\x8a\xc4\x3a\xf9\x3a\xc2\x0c\x2a\xa4\x27\x43\xde\x79\x70\x60\xa3\x10\xcd\x11\xb2\x9d\xd3\xce\x23\x1e\x15\x50\xf5\x3f\xa8\x5c\x86\x45\xb2\xab\x98\x51\x0d\x8b\x6a\x0b\x37\x5a\xd9\xa6\x1f\xf4\xe5\xec\xeb\xe1\xcd\xa3\x5d\xfe\x23\x5f\x30\x17\x51\xe8\x88\xc0\x08\xca\xcb\xdd\xfe\x1f\x7c\xa5\xc6\xa9\x31\x40\x71\x2d\x27\x0d\xe0\x6b\xe2\x90\xc2\x66\x9a\xf6\x5b\xee\xc8\x1e\x96\xd0\x35\x0d\xc1\x99\x10\x87\xb8\x83\xbb\x0d\xb7\xff\x9f\x4b\x0b\x26\x16\x64\x98\x1e\xce\xe6\x73\x22\x80\x26\xef\x88\xc6\x92\x27\x9f\x47\x2e\x73\x5b\x5e\xbb\x12\xd8\x88\xf6\x56\xb9\x1d\xbe\x70\xd6\x39\xac\xf8\xca\xdc\x0d\x5f\x49\x2d\x33\x2c\x9c\x96\x0f\x3d\x5c\x50\x20\x1d\x83\x28\x59\xb6\x0f\x84\x3e\xd3\x0e\x10\xf7\xaa\xb8\x9a\x6d\x08\x1c\x97\x1c\x38\xb8\x92\xf6\x94\x0b\x9e\x53\x72\x8f\x7b\xa7\xcf\x97\x20\x09\x4e\xc3\x7f\x4c\xd8\x45\xa9\xb3\x9f\xbb\xc8\xf3\xb2\x8d\xc8\xae\x30\xca\x3e\x77\x25\x20\xec\x25\xfc\x7a\x7c\xc9\xc2\xee\x5c\x38\xa4\x3a\x71\x6f\x2b\x2a\x94\xb2\x39\x0a\x04\xab\x62\xa0\x4e\xbd\xc2\x56\x78\xe5\x3e\x11\x87\xd1\xdd\xe1\xf2\x9f\x66\x8b\x1b\x7a\x41\x18\xeb\xf9\x9f\x99\xe8\x61\x73\xc2\x9f\xdb\x1f\x97\xa0\x65\x02\x81\x41\xd1\x4d\x03\xdc\x7a\x16\x61\x2b\x9b\x0c\x1c\xac\x20\xd4\x8a\xf4\xfa\xcc\x54\x56\x70\x2b\x38\x02\x52\x40\x8a\x7b\xee\xfb\x0f\x94\xfa\x68\xd7\xc9\xb3\x0b\x6f\xb7\x72\x87\xc0\xfd\x1e\x0a\x78\x98\xe8\xca\x43\xb7\x70\x4a\xb4\x23\xf7\xe1\xe7\x8a\xfd\x8e\x7a\xfb\x2c\x2a\xb8\x7b\x46\xaf\xd4\x8b\x33\x54\x5c\xdd\x5c\xf8\x9c\x90\x0e\x22\x95\xcc\xa6\xda\xe3\x4a\x91\x69\xa4\x1f\x20\x3e\x0b\xd8\x4a\x16\x8e\xb0\x79\xd4\x2d\x95\x49\x22\xf5\x6d\xcd\x3c\x17\x94\x49\x99\x41\xd8\xe5\x27\x86\x47\xef\x2a\x05\x9e\x12\xee\x20\x7a\xd2\x3f\xe2\xa3\x8d\xff\x16\xce\x7c\x89\x74\x4c\x2f\x5e\xad\xd6\xaa\xb9\x33\x4c\xae\xe4\x71\xb5\x30\x8a\x4c\x34\x89\x05\x87\x93\x76\xb3\xa5\xe6\xb7\x4a\xf5\x57\x73\x89\x29\x17\xd9\x66\xcb\x8f\xee\xb3\xe4\xf0\x9e\xb1\xed\x8f\xd8\x0e\xb2\x57\x9c\xf0\x91\x73\xb8\xcb\x30\xfd\x64\x09\x07\xfe\x25\xc8\x8f\x50\x61\x4a\x50\x99\xc2\x8e\x6d\xde\x22\x46\x10\x1e\xc7\x7c\xc4\xef\x9b\xd6\xe2\xcd\x0a\x1c\xd1\xc2\x13\x8b\x25\xd7\xdf\x1d\xf4\x1f\x0f\xfe\x26\x94\xa3\x0b\x69\xaa\x7b\xd8\x57\xd4\xa5\x19\x2d\x4f\xb5\xf9\xfe\x16\x19\xe8\xe7\x12\x74\xb8\x2c\x1f\x9e\x29\x32\xf2\xec\xec\x28\xfc\xc4\x5f\x26\x29\x0e\xde\xa8\xfe\x1e\x8e\x77\xcc\xfe\x51\xe7\x2b\xf9\x09\xd2\x62\x9a\x69\xd5\xa6\x6f\xca\x34\xf8\x83\xf7\x1f\xd5\x98\xb6\x0d\x31\x3b\xce\x7e\xd4\x12\x33\x44\xd6\xfd\x61\x51\xea\x1a\x6d\xdf\xe2\x95\x0e\x99\x45\xa8\x76\x99\x86\x1a\x83\x3c\x68\x1b\xbf\xb3\x1c\x2b\x8e\x30\xf4\x8b\x4c\x7f\xdb\xfe\x6d\xff\x29\xd2\x5e\x0a\x8d\x81\x7b\x45\x8c\x04\xdd\xf6\xcc\x57\x61\x2d\x86\xb4\xcf\x8b\x5f\xaf\xfc\xbf\xfe\x9b\x7b\xed\xf7\xa4\xb7\xd9\xd5\xb7\xd3\x8b\xb8\x8e\x83\xfc\x6c\x96\x70\x93\xec\xcb\x1e\x6c\x1f\x44\x18\x5d\x9c\xce\x41\xe4\xed\x7d\x43\xb6\x1a\x47\x7b\x67\x8f\xfb\xfb\x92\xa9\xb1\x66\x8b\x89\xe0\x06\xcc\x6d\x8a\x02\x37\x26\x9e\x02\x56\xde\x0b\x34\x41\x5b\x45\xb0\xfd\x73\x0a\x62\x79\x91\xd9\x51\xe5\xfb\x27\x3a\xf0\x42\x5b\x63\x20\x8b\xfe\x43\xe1\x7c\xad\x53\x81\x74\xb9\xa5\xf5\x19\x2e\x97\xe2\x27\xdd\x62\x8a\x47\x06\x3a\x19\xf6\x0b\x5f\x11\x90\x11\xb9\xc7\x76\x24\xfd\x13\x97\x23\x56\xef\x03\x3d\x40\xe6\x41\xf9\xf9\x15\xff\x92\x4e\xc3\x04\x43\x28\xcc\xac\x29\xe2\x29\x1b\x96\x77\xdc\xdc\xef\x38\x2b\x5f\xb2\x7f\xc6\x71\xc2\xaf\x81\xe1\x17\x21\xfa\x70\x02\xc1\x38\x4a\x1e\x5f\x6f\x69\xc8\xc3\x53\xf9\x52\x33\x7a\xa5\x7a\xac\xdc\xc1\xc0\xaa\xc8\xed\xd3\x01\xc6\x58\x09\xc7\x62\xfd\xc7\x9f\xb8\x5a\x1e\x81\xd5\xc5\xcd\xd5\x8b\x84\x8a\x22\x5d\xbc\xf7\x57\x52\x4c\x71\x02\x72\xea\x8e\x77\x22\xeb\x2f\x9d\x57\x09\x45\x67\x80\x3a\x69\xe7\xf5\x00\xa9\xb3\x57\xf4\x94\xce\x40\x48\xaf\x90\xb9\xef\xf7\x96\xe7\x85\x8c\xbd\xb2\xc4\x0f\x49\x1f\xfb\xec\xde\xc1\xf2\x3b\x92\xed\xf7\x16\x85\xb4\xdb\x6e\xab\xbb\x7e\x07\xa2\xe1\xb7\xea\xae\xad\x7e\xf1\x89\xbe\xf6\x7f\xd9\x43\xec\x21\x76\xc3\x99\x18\x27\x13\xa8\x45\xb9\xe7\xa8\x6d\x4f\xff\x71\xc7\x42\xa4\x05\xfe\x10\xc9\xad\x9c\x6e\x4c\x36\xdc\x5d\x7e\x81\xf0\x32\x9b\x58\x49\xd1\x5c\x5b\x9c\xcd\xca\x53\x78\x45\x07\x23\xaf\xa0\xa5\x44\x78\xc2\x30\x84\x59\xd0\x1a\xbd\x12\x54\x0f\x39\x47\x46\xd0\x0f\x9c\x4f\xc4\x59\xfa\x92\xff\xc9\x8a\x47\x32\x22\x8d\x32\x28\xd1\xb5\xea\x33\xf6\xdf\x77\x18\x7a\x59\xa9\xa7\x28\xd7\x14\xdd\xcc\x1c\x1d\xe4\xbc\x51\xda\xe5\x71\xd8\xe4\xcd\xa5\x83\x0f\x0b\x4b\x20\x38\x7a\xd1\x9c\xb9\x46\x9e\xab\xc6\x77\x7f\x2d\x15\xe1\x2f\x91\x74\x1a\x16\x4c\x53\x33\x6a\x70\xdd\xfa\x36\x9c\x6d\x5a\x58\x85\xac\x77\x0c\x12\xca\x0a\xcd\x99\xe2\x7a\x99\xd2\x18\x61\xf4\xc3\xe0\x57\x1c\xdc\x86\x4a\xb3\x1b\xb2\xe3\x4e\x6c\x6d\xab\xb0\x4e\x0a\x6f\x3c\x03\x8c\x95\xb9\x1d\x48\x2f\x72\x34\xbe\x2d\xbb\xe1\xe6\x2c\x31\x85\xb5\x36\x26\x60\x7e\x86\x3c\xeb\x16\xf0\x06\x89\x1b\xe2\xd4\xd1\xbe\xff\x09\x27\x5b\xa2\x5a\xcf\xd4\x4a\x77\xbb\x91\x5d\x52\x8a\xb3\xd0\xeb\xf4\xc2\x9d\x68\xa8\x41\x91\x1e\xc0\xc9\xa4\x9c\xe2\x50\xd6\xd4\x10\xcf\xce\x36\x5b\xff\x38\x69\xe8\x30\x28\xb5\xdb\x97\xa2\x92\xd8\xc2\xa9\xc6\x47\xe8\x6b\x7b\x69\xdd\xed\x56\x09\x6e\x05\x36\xf6\xc0\x68\x22\xbe\xa8\xca\x25\xc3\xe5\xaf\x0c\x5b\xa9\x9e\x14\xf4\xf8\x7f\x4c\x9d\x0d\x41\x7d\x8e\x3b\x96\xd6\x42\xa9\x05\x41\x04\xd4\x83\x27\xb9\x68\x33\x1e\x05\x83\x3f\x9f\xb7\xf4\x17\x72\x1e\xb3\xf2\xa5\x89\x2c\x30\x3a\x09\x82\x72\xcb\x2b\xe9\x00\x2c\xf1\x73\x5b\xda\xc5\x7b\xec\xea\xda\x45\x36\x59\x79\x89\x7d\xe7\x86\xaf\x10\x9e\x2d\x94\x9b\x8d\xa1\x2a\x1b\x02\x48\x6f\xa6\x1f\x2d\xcb\x0f\x80\x3b\xcf\x82\x3c\x54\x01\x42\x42\xf1\x61\xb9\x1e\xfe\xcb\xe9\xc3\xa8\x4c\xd4\xb8\x03\xcf\xa6\x64\xa5\x15\x21\x4a\x3b\xe2\x07\xb1\xf3\x00\x6d\xf2\x0b\xe7\xf6\xad\x32\x33\xc2\x17\x7c\x53\xc8\xfe\xed\x92\x97\x70\x63\x23\xb0\xf3\x86\x2c\x81\x1c\x3d\xeb\xa5\x7f\xee\x45\xea\xb4\xde\x4d\x2f\x9b\x08\x2f\xd8\xa3\xca\xad\xd7\x2f\xb8\xdf\x6e\x5e\x16\xcd\xb7\x3c\x8a\x2e\x86\x8d\x5d\xc0\xb0\x1f\x0e\xb3\x6f\x3d\xfd\x09\xae\xa3\xfe\xba\xe4\xcb\xd5\x59\x6e\x30\x92\x6e\x29\x4d\x4e\x67\x58\xfd\x7c\x8f\x44\x96\xc1\x8e\x2f\x99\x5a\x85\x8e\xb7\x31\xf1\x64\x6a\x8f\xb7\xbf\xf9\x23\xe5\x67\x16\xd7\xc4\xae\xb8\xac\xd5\x16\xac\x93\x88\xc0\xb9\x5a\x3e\x06\x22\x47\xa3\xc3\xb8\x0f\xcd\x93\xce\xe6\x49\x74\xee\x39\x72\x9f\x4e\xb4\x99\x7e\xa2\xcc\xb5\x72\xde\xab\x84\xab\x93\xa1\x66\xe7\x74\x47\xf2\x94\x5b\x9d\xaa\x13\xca\x92\x88\x9f\xb6\x7d\x47\xe4\x8e\x22\xd5\xbc\x2c\xf5\xa6\x05\xa1\xe4\xbf\x76\x64\x6b\xae\x26\x25\xd2\x75\x30\x3c\x67\x4f\x82\xd3\xe8\x2b\x45\xef\x19\xb4\xf0\x21\x3f\x6f\x1e\x56\x8e\x6c\xf8\xf9\x74\xdb\x51\xed\xbd\x86\x55\x04\x4a\x52\xe5\x00\x79\x15\x45\xdc\x39\x61\xf8\xb9\xdb\x50\x18\x06\xb0\x94\x16\x03\x40\x9b\xa6\x23\xc5\x67\x80\x7c\xfd\xac\x28\x3d\xb5\x98\xd9\x71\x4d\x38\xb5\xdf\x8a\xa1\xdb\x6b\x90\xe3\xf7\xba\xef\x82\xaf\x24\x96\x29\xca\xf5\xb1\x86\x3e\x18\xe4\x11\xe7\xc3\x75\xd2\x88\xe0\x92\xb7\x9d\xec\xa2\x40\xb2\xf7\x5f\x22\x79\xd9\x0e\x16\xea\x03\x27\xdb\x8a\xb0\xa2\x87\x37\x28\x92\xb3\x7c\x82\xf2\xad\x00\xeb\xb5\xb6\xc6\x42\x56\xf9\x6b\x7d\x3e\x93\x7f\x49\xd1\x49\xb9\x96\xad\xb9\x29\x84\x5a\x70\x45\x21\x5a\x35\x55\x08\x43\x84\xbe\xff\xed\xbd\x6b\xb3\xd9\x5f\x09\xe4\x84\xb8\x2f\xa4\x3c\xa9\xf7\x53\xdf\x13\x5a\xd8\x8a\x60\x44\xd3\x70\x63\x97\x13\xf2\x0a\x58\xf2\x14\x5c\x46\xed\xb0\xb6\xbd\xd9\x6d\xae\x99\xf2\xae\xb6\x63\xe2\x17\x93\x99\x6c\x9d\x7d\x72\x33\xc7\xf9\x8e\xe4\x61\xb0\xc8\x39\x88\xfe\xbb\x9d\x7f\xf4\x27\xe8\x0e\xe1\xe6\xde\xad\xdb\xd9\x57\x4f\x1e\x82\xd4\x30\x2b\x77\x7c\xaa\x1c\x1b\xd4\x21\x8e\x31\x64\x90\x3d\x7c\x80\x3c\xb7\xc9\x71\xe4\x33\xda\x55\xf4\x55\x6c\x81\x08\x9e\x76\x20\xc2\xa3\x40\x33\x88\x77\x90\xc6\xc5\x95\x04\xed\x5c\x8c\x04\x70\x1b\x23\x0b\xd3\x2e\x1d\x8a\xcd\xec\x95\xf3\x20\x2e\x6f\x49\xec\x4f\xd5\x28\x18\x0e\x0a\xfe\xa9\x06\x41\x83\x1d\xeb\x9c\xc9\x54\x02\x1c\x79\x2e\xa0\xce\xfb\x26\xff\xc4\xc8\x77\xd1\x42\x04\xe1\x67\x59\x44\xcb\x78\x5a\x89\x74\x90\xd5\x83\x1c\x4d\x07\xa1\x90\x99\xa5\x61\xb0\xda\x98\x29\x46\xc1\xf9\x59\x82\x40\xae\x4e\x70\xa8\x30\xe5\x89\xc5\xa8\xb9\x05\xd4\x10\xec\x60\x36\xe1\x57\xff\x02\x52\xc1\xb3\x1f\xda\x46\x62\xf7\x2a\x6c\x69\xc9\xf7\xc9\x9e\x4d\x4d\x7f\x2a\x99\xa1\xf6\xf7\x25\x75\x65\x64\x4e\x3f\xfc\x0c\xe5\x80\xac\xe7\x3d\xec\xcd\x88\x63\xcb\x8d\x67\x5e\xdf\x24\x0c\xd5\x96\x0b\x88\xbb\x2f\x3c\xcf\x77\xf2\x1f\x21\x0f\x12\xed\xfc\xfe\x99\xd2\x28\x86\x7a\xed\xc2\x9c\x66\x6d\xe7\x50\x27\xc0\x06\x98\x96\x42\xaa\x49\x34\xbf\xb1\x6d\xd0\xe6\xa1\x71\xbc\x9b\x3b\x34\x30\xe9\xb1\xf2\x94\xf4\x1c\x5c\xc9\x97\xbb\x22\x81\xf7\x3f\xf2\xdf\x0e\x26\xa6\x14\x5c\x17\xda\x41\x58\xdf\xa9\xce\xe5\x68\x0d\x4b\x17\x22\x98\x2e\x49\x2b\x6d\xc7\xa5\x57\x2b\x00\x12\x44\x80\xf1\x28\x08\x3f\x0c\x97\xfc\x9f\xb3\xda\x1f\x3a\x0e\x95\x82\x7d\x87\x4a\x42\xb6\x07\xef\xcf\x36\x13\xc5\x02\x52\xa6\x83\x8f\xca\x91\x17\xc1\xb2\xf2\x45\x3c\xb7\xe3\x65\x71\x93\xbd\x44\x0c\xe4\x6c\xf9\x37\xa6\xb0\x0f\xb4\x59\x5e\x77\xeb\x66\x18\x6e\xe6\x10\x9a\x5d\x6a\xb2\x6c\xbb\xf2\x7b\x8e\xc2\xfd\x4a\x1c\x4d\xe8\xb5\x3f\xdf\x32\x4e\x86\x8d\xb1\x06\x78\x3c\x0a\x85\x8d\xd9\x7a\xd0\x8c\x62\x8d\x63\x2f\xf7\x8d\x9d\x4e\xc8\x6b\x27\xa5\xc0\x2d\x26\x74\x7a\x55\x3f\x0c\xce\x99\xac\x94\xb3\xac\x6a\x8b\x9a\xe9\xaa\xfd\x5a\x5d\x6c\x67\xe6\xae\x7b\x06\x26\xe2\xd3\xe7\x60\x50\xae\x61\x36\x02\x6f\x87\x62\xcd\x1f\x4f\x93\xae\xdd\x90\x58\xf7\x93\xb0\xfc\xc3\x6d\x6e\x98\x2a\x65\x97\xa3\x48\x49\x83\xd4\x2b\xc8\xdd\x89\xd5\x44\x1e\xd2\x01\xf7\x8d\x93\x6d\xd3\x1d\x4d\x18\xd8\xeb\x1c\x18\xec\x37\x5f\x9e\x1d\xe2\x4c\xff\xc4\x4f\x4c\xbb\xd7\xeb\x25\xa6\xa6\x1d\xe1\x67\x21\x1f\xf3\x48\x03\xdd\xbc\x77\xeb\x22\xc2\x87\x10\x85\xd3\x45\x4e\x7b\x7d\x73\xa4\x58\x2f\xbd\x10\xee\xa2\x29\xb0\xab\x9e\x82\x25\xac\x50\x57\x4f\x25\xe9\x1b\x21\xb0\xfb\x94\xcf\x36\xca\x77\xbf\x7c\xb6\xca\x93\x0a\x9c\x6b\x65\x94\x58\x13\xab\x21\x0b\x49\x3f\xd6\x87\x89\x11\x8e\x00\xc7\x38\xa3\x38\x2f\x5a\x5d\x1d\xf6\xf8\x7f\x4c\xac\xf2\xe3\xa4\xef\xd9\x29\xb9\xcf\x05\xe9\x9a\xd7\x79\x33\xf4\xca\x3e\xa1\x73\x74\xf5\xf4\x59\x2e\x66\x21\x67\x08\xc0\x55\xf7\xb5\x49\x1e\xba\x51\x0c\x8d\xa3\xc9\x93\x20\x97\x4f\x17\xc8\xd1\x22\xfb\x64\xb8\xa5\x04\xf1\xc5\x11\xb3\xf1\x0c\x05\x14\xb6\xa1\x87\x0f\x52\xae\xb2\xea\xbc\x0a\x03\xe6\xf9\xd6\x0d\x34\xab\x8c\x42\x98\x8e\x2c\x0a\x17\xb7\x16\xc5\x7c\x48\x34\xa8\x99\xa7\xd4\xfc\x4f\xf7\xe2\xb9\xf6\xe1\x7f\xc0\x0f\xd4\xd6\x79\xb9\xfc\x07\x77\x7d\x3c\x72\xa6\xf9\x97\x9f\x7d\x4d\x04\xc5\x82\xac\x87\x55\x90\x59\xa6\xfc\xab\xb9\x91\xcf\x2c\x87\x66\x5b\x2e\x4a\x8b\xcd\x0a\x2e\x85\xd1\xcf\xff\x4f\x68\xea\xcb\xbc\x89\xe5\xd4\xee\x3d\xb8\x6a\xb1\xc3\x8b\xe2\x75\x52\x12\x37\xea\x97\xb8\xf9\xec\x80\xfb\x51\x70\x47\xb1\x32\xc7\x72\xc8\x32\x67\x2c\xb5\x88\x22\x2a\xe5\x23\x56\xf6\x2b\xa2\xe7\xe3\xf1\x26\xad\xd9\x98\x64\xa6\x32\xb2\x68\x47\x33\xc8\x9d\xc5\x87\xf3\xed\x42\xb4\x63\xc4\x19\x9a\xcf\xd6\xcd\xd9\x4a\x60\x1b\x42\x32\xc9\x98\xb4\xbe\x51\x1c\x82\xe7\xe2\xe9\x1b\x51\x89\x3c\xdd\xf3\x58\x21\xb6\xbc\x99\x9e\x90\xa9\xfa\x23\x47\x6a\x59\xf6\xea\x39\x66\x6d\xd6\x8a\xea\xc8\x1c\x94\x3b\xed\x63\x38\x29\xb1\xce\x33\x5c\xf1\xef\x83\xc1\xe1\x3e\xd9\xd7\xae\x00\xb6\x7f\xbd\x68\xe0\xe8\x9b\x17\xa5\xaa\x47\x04\x88\x1d\x5b\xc4\x04\xca\x46\x1d\xfe\x8c\x6e\xe5\x8e\x86\x90\xae\x74\x3d\x50\x58\xb6\x08\xde\x1b\xae\xe1\x3f\xeb\x00\xa0\x47\x27\x91\x56\x75\xca\xa7\x06\xe3\x1a\x9d\x7d\x68\x5e\xc3\x6f\x76\xe3\x44\xd1\x5d\x8b\xe4\xea\xb8\x84\x22\xab\xcb\xdf\x14\x2b\x61\x7b\xd7\x25\x41\xff\x4d\x27\x25\x9e\x89\xa7\xa2\x2f\xc0\x63\x09\xa2\xd4\x4d\x61\xa8\xe7\xe7\x86\xa5\xc8\x71\xaf\xe2\x47\x45\xb3\xd1\xc9\x75\xb0\xb3\xca\x23\x93\xe1\x82\x2d\x25\x64\x05\xa9\x89\xf9\x35\x72\xc5\xed\xd0\x91\x35\x63\x7f\xe7\x10\xd4\x42\x0c\x0a\x8f\xb0\x77\xfe\x16\x3b\x53\xf1\x15\xdf\x14\xda\x0c\x8c\xf6\xc7\x18\x5f\x7f\x70\x2f\xd1\x88\xf1\x53\xbd\x20\x7f\xdf\xba\x90\x50\x67\x91\x9a\xfa\xae\x7f\x97\xd5\x14\xbd\xd4\xb5\x72\x92\x2c\x8b\xba\x7d\x98\xba\x21\x3d\xc0\x5b\xc9\x7c\xd6\x66\x7e\xe6\x29\x21\xca\x98\x4f\x8b\x6f\x45\xaf\x7a\x31\xeb\x50\xba\x58\x68\x25\xd9\x56\x3b\x23\xcb\x0f\x40\xdb\x98\xcb\x7e\x52\xd4\x7f\x1d\xa5\xf9\xcb\xc5\xd8\xf3\x0a\x6f\xbe\xcb\xfe\x70\xc7\x3d\xd9\xc5\x91\xaf\xfb\x74\xdf\x9f\x0b\x81\x93\xb2\xdf\x3a\x3f\xf4\x07\xb0\xee\x56\x2e\x4c\xde\x30\xf2\x98\x04\x76\xab\xd5\xd6\xd0\x00\x68\xb4\x1e\x24\x6d\x69\x24\x14\xc2\x2a\x6b\xed\xc2\x5a\x60\x72\xd9\xa2\x83\x89\x7a\x94\x84\x17\xf2\x3e\x1d\xd1\xc8\xa9\x57\xf7\x87\x76\xae\x8d\x78\xcf\x20\x0c\xa1\x63\x8b\x5e\x91\xdc\x2c\x6f\xb6\x3c\x40\x09\x1e\xe7\xfe\x81\x54\xf3\x02\x70\x8e\x48\xfe\x3a\xd0\x1e\x72\x94\x34\xea\x6c\x74\xa1\xb4\x98\xb7\xb7\x97\xe5\xfb\x01\x56\xe1\xc3\x3b\x30\x1e\xf8\x7b\x09\xa7\xca\x32\x2a\x07\x7c\xa2\x8d\xd5\xde\x86\xab\xbb\x82\x1c\xad\xe9\xba\x69\xec\x74\x04\xb6\xb3\x27\x50\x10\x1c\x46\x86\x73\x9d\xe0\xcc\x2a\xb6\xdf\x16\x9e\x67\x1b\x25\x66\x1b\x72\x2e\xc5\xd5\x52\xea\xaa\x46\x80\x2b\x03\xe9\xd6\xba\x28\x40\xce\x4e\x11\xa6\x8c\x2f\x3f\x0b\xde\x86\x12\xac\x34\x8b\x59\xfb\x4d\x0a\x4f\x1c\x8a\xc7\xa6\xf3\xb1\x69\x16\x5c\x31\x3f\x6b\xed\x65\xdf\x9b\x87\x9d\xe6\x89\xab\xbb\xa5\xf3\xe4\x04\x46\xcb\x03\x30\x74\x0e\x6e\x14\xad\x7e\x6d\xd9\x39\x09\x06\x5a\xb9\xc9\xac\x62\xf9\xe8\xae\xb2\xe6\x87\x74\xaf\x7b\x5d\x15\x47\x67\x47\xe3\x24\x95\x9a\xad\x11\x2f\xa2\x7c\x15\xf1\x82\xab\xea\x97\xc5\xf7\xbe\xf0\xff\x25\x83\x21\x3f\x45\x9b\x5c\x72\x0e\x61\xf1\xa0\x58\x48\x58\xbd\xc8\x24\x2f\x8d\xda\xb5\xe6\x26\xd5\x9e\x85\x49\xce\x9b\x68\x0b\x41\x7b\xb0\x7b\xf4\x0a\xa9\xf0\xa2\x2a\x6d\x4e\x00\xc5\x7d\xb1\xeb\xaa\xb7\xa3\x34\xef\x93\x9f\x64\x77\xff\xd3\x01\xbb\x9e\x67\xaa\xcf\xdf\x29\x3c\x55\xc2\x7e\x93\x5b\x5d\x87\x82\xc2\x4b\x77\xff\x1e\xda\x8d\x4d\x32\x53\x08\x57\x3a\x6c\xbd\x41\x46\x3b\xec\x0a\x69\xa4\x86\xf0\x56\xbf\x39\x3f\xb5\x47\x9e\x7f\x47\x6a\x4e\xec\xe4\x31\x60\xd1\xe4\x15\x34\x2f\x05\x41\x38\x7a\x12\xac\xdb\x6b\x8d\xb1\x95\xce\xc6\xaa\x36\x64\x2c\x37\x55\xdc\xd6\x56\xaa\x61\x1d\x73\x1d\x27\x05\xd7\x8d\x76\x2d\xe9\x70\x65\xbe\xeb\xf3\x66\x75\x84\x40\xab\x07\x7d\x44\x19\x1c\xf0\xd4\xbc\x98\x15\x9f\x0c\xf8\x02\x1e\x77\xd7\x22\x76\x74\xb6\xdf\xda\x7f\x90\x71\x0f\xf6\x12\xf8\xa7\x0e\x4f\xe1\x78\x90\xe3\x36\x69\xc2\x05\x49\x84\x8e\x42\x0d\xae\xfe\xf9\x94\xd8\xbd\x03\x27\x18\x8a\xb2\x16\xad\x11\xcd\x21\x8a\xc9\x55\x6b\x12\x1b\xbe\xe9\xaf\xf9\xc7\xc2\xad\xbd\x04\xdd\xa1\xf5\x8c\x9a\x2b\xf6\x87\x4c\x71\x11\xbb\xeb\x8f\x3f\xf3\x2d\xe4\x07\x73\x85\xb8\x52\x1e\x76\x1f\x75\xdb\xc8\x74\x50\x37\xdc\xb7\xf8\x4e\x82\x45\x08\xb8\x16\xad\xc6\x27\x5f\x04\x57\xed\xc8\xe3\x97\x77\xc7\xdd\x32\x2a\xd1\xd9\xfc\x43\x95\xe3\x11\x57\x3f\xe1\xb2\xd1\xd5\x78\x66\x5d\xa5\x3d\x1a\xec\x92\xd9\x6b\xb2\xf0\xf5\x16\xd2\x8a\x73\xb1\x2b\x9a\xa7\x47\x17\x5c\xc1\x2e\x5a\xb3\x70\xbc\x21\xf5\x39\x6f\x9e\xf7\xe0\xcb\xa5\x6a\x6a\x96\x25\x7f\x69\x2a\xf2\x2e\x7d\xda\x0e\xdd\xa5\x66\xcc\x83\x4d\x55\x5d\x68\xc4\x0b\xa2\x3e\xe1\x77\xfc\x82\x52\x94\xc7\xe5\xc7\x31\xb6\x61\x35\x18\x08\x0f\x75\xa6\x33\xcc\x2e\x15\x47\x8a\xbd\x8f\x4e\xad\x40\x96\xf0\xb3\xbf\x37\xe1\x6a\x74\xa3\x9b\x46\x6f\x4b\x33\xfd\xb2\xa2\x59\x38\x2d\xa9\x54\x34\x9c\xf7\xc5\x10\x70\xed\x42\xa8\x16\x42\xef\xe3\x12\xcb\x8e\xd5\xce\x4b\xd1\x25\x2f\x22\x1e\xf6\x4a\xf9\xcd\x66\xcd\xeb\x37\x58\x1f\x1b\x5f\x93\xeb\x56\x19\x73\xa1\xa7\x0a\xfb\x67\x62\x67\xf0\x3b\xdc\xb5\x2a\x0e\xef\x5f\x14\xee\x5b\x1b\x7a\x3a\x21\xdd\xcf\x5d\xda\x4d\x5c\x95\xe6\x23\x28\xb6\x73\x04\x9f\x1e\xd3\x4d\x06\x49\x22\x09\xf1\xbc\xd1\xd6\xfa\x62\x8d\xfc\xc2\x2c\x25\xa9\x1f\xc7\x3a\x22\xeb\x42\x1b\x83\x33\xb8\xce\x8b\x48\x82\x67\x0b\x5c\x3c\xa7\x98\x46\x43\x61\xd3\x72\x80\xfc\x42\xe1\x6a\x54\x84\xf4\x99\x05\x9f\xe6\x10\x5f\x43\xd9\x49\x86\x05\xd0\x55\xc5\xaa\x1e\x4c\xf3\x3c\xc8\xc3\x83\xde\x3f\x44\x36\x37\xbd\x9f\xeb\x25\x43\xd8\x5f\x90\x6a\xb1\x4f\x91\x41\x75\x48\x3b\xee\x1e\x12\x2f\x0e\x04\xb7\xb0\xc0\x47\x24\x16\x53\xb1\x0f\x2a\x02\x5c\x90\x4d\xdc\x54\xac\x2a\xb4\xb4\x53\x5d\x57\xd2\x3e\x4a\x18\x5b\xb8\x27\x45\x6c\x7d\xde\xed\xfe\x33\x2d\xf4\xcf\xd5\xe5\xab\xb6\x37\x11\x29\xdf\x9d\xba\x1c\xd3\xc2\x31\x0e\x7c\xcf\xec\xf1\xa6\x18\x47\xe7\x8d\xdd\x75\x0f\x05\xbc\x2d\xd6\x30\xb2\x05\xc0\xe2\xc7\xfb\x57\x04\xd2\x7a\x44\x17\x7c\x0c\xce\x5d\xa5\xb6\xc0\x36\x15\x92\xb7\x93\xd8\x7c\x35\x8c\x7c\xad\x1b\x66\x49\x01\x1a\x37\xe9\x05\xd6\x58\xb9\xd1\xc5\x06\x34\xf3\x41\x1a\xf7\xc3\x0b\x02\x96\x9b\x2b\xf9\xb6\x76\xfc\x6b\xc7\xb7\x55\x27\xa1\x0f\xa8\xcf\xa2\x6b\xf7\x79\x79\x47\x3a\x09\xdd\x97\xa3\x5d\xee\xfd\xd5\xf5\x18\xc7\x68\x93\x8f\x25\xcc\x3f\xac\x25\x7e\x91\x53\xb0\x26\x9f\xce\x2f\x77\xd8\x7a\xdc\xbd\x0e\xf3\x43\x31\x91\xcb\x2f\x68\x68\x08\xa6\x77\xe3\x86\x84\xb6\x77\xf1\x11\x14\xe1\xd9\xe1\xa6\xbf\xd1\xad\x61\xd9\x2c\x4b\x88\xd9\x5e\xa6\xdd\xab\xec\xd9\x03\xdb\xe1\x84\xe4\x33\x4b\x5d\xd2\xb3\xc6\x39\xb3\xed\xc1\x85\xf9\x28\x2a\x6e\xe6\xf0\x16\xfb\x93\x1e\x10\x12\x20\xef\x5a\xd1\x52\x6b\x12\xf9\x56\x58\xca\xc6\x0a\x04\x8a\xc8\x9f\x36\x32\x11\x58\x85\xbf\xb6\xeb\x2a\xdd\x86\xe5\xc7\x1a\x8f\x84\x11\x40\x89\xa1\xa8\x44\x14\xb5\xfe\x35\xc5\xf9\x7d\xce\x2d\x89\xf6\x8a\xc6\x19\x9d\x95\xce\x6f\xec\x51\x91\xdc\x76\x8f\xd6\x86\x33\x6f\x19\xc3\x4e\xce\x02\x9c\x66\x2e\x88\xc4\x7a\x61\xba\x77\x2f\xe2\x4b\x8d\x68\x26\xfc\xd6\xf1\xaa\x9d\xfb\xd6\x98\x06\xd9\x2a\x85\xeb\x77\x10\xe3\x9f\xd6\xd9\x2f\x4d\xf1\x73\xff\x5c\x93\xad\xf9\x1d\xb9\xb3\x3e\x43\xdf\x6d\xfd\xed\xaa\x2a\x05\x31\x2b\x9a\x8f\xe6\xee\x26\xfa\xab\x28\x17\x27\x34\xfa\x01\xff\x12\x57\x65\x8e\xb3\x08\xcd\x6c\xdc\x74\xc3\x47\xa4\x95\x67\xeb\x69\x61\x4c\xe9\x6f\x99\x1d\xf4\xbb\x22\x56\x04\x0a\xfb\x79\x6d\xb7\xc6\x37\xf9\xbf\x47\xe7\x34\x60\x64\x47\x82\x2d\x12\xfa\x38\x71\xcd\x2b\x6f\x4d\xca\xda\x0f\x8d\x56\xef\x2e\x1a\x62\xc2\xbc\x39\xb5\xdc\x15\x59\x8d\x78\xeb\xc9\x3e\x57\xaa\x57\xf4\xbd\x57\x3b\xb6\x8f\x35\x87\x54\x48\xd3\xd6\xf4\x54\x86\x33\x5e\xb3\x49\x90\x2e\x0f\x7e\x9c\xd5\xde\xbb\x1e\xf0\xed\x20\xd7\x1d\x94\xe9\x18\x66\x2e\x4a\x6b\xee\x6a\xe8\xb0\xba\x3a\xc8\x80\x24\x8e\xd7\x4d\xba\x26\xef\x66\x75\xfa\xfe\xe3\x9e\x99\xee\x17\xe3\xde\xb0\xdc\x15\x64\xf9\xe1\x6b\xdf\xb5\x8b\x0c\x31\xfb\xf4\x8f\x4a\xbc\x6a\xad\x89\x61\xcb\x1b\xad\x6f\x7f\x93\xbe\xbe\xcd\xfa\xcc\x88\x97\xc2\xf9\x61\xe2\xc0\x7e\xdf\x25\x2a\xdf\x2f\xe2\xbe\xf0\xf8\xcf\xdc\xf1\x9b\xa9\x33\xfe\x08\x10\x03\x75\x72\xa2\xf6\xd4\xad\xe6\xc6\x90\xcd\x7b\xe4\xaf\xf7\xb9\x99\xa3\x82\x77\xfa\x3f\xae\xc0\xae\x51\x73\x62\x65\xb3\x34\x7f\x85\x9e\xd7\xb1\xe0\x57\xd6\xcc\xeb\x05\x6a\xb5\x3d\xde\x67\x30\x5f\xf4\x78\x2b\x43\xfd\xaa\x82\x0c\xed\x67\xcf\x72\xbb\x0b\xb6\x65\x87\xd9\xb1\xff\xdb\xd4\x32\xba\x2f\xaf\x48\x78\xbc\x26\x46\x08\xb5\x20\x3c\xef\x8b\x3d\xf3\x8c\x47\xec\x48\xce\xcc\xf1\x0f\x42\xfc\x7c\xb2\x61\xdd\x19\x1b\x75\xc2\x3d\xec\xe1\x87\x69\x48\x95\x72\xc8\x87\x7c\xdf\x13\x61\x72\xd5\x7b\x72\xfa\x25\xa7\x7e\x96\x1b\x26\x9c\x8d\x48\x89\x81\x53\x0a\xa1\xa6\x23\xac\xca\xd0\xca\xa7\x50\xa3\x7a\x34\x25\xc7\x8e\x9e\x1d\x11\xc2\xd5\xc8\x99\x39\x52\x59\x4e\x0d\x6e\x6c\x1b\xe7\xc8\x03\x3d\xac\xce\xb7\xb0\x13\xc3\xd1\x47\x38\x18\xef\x2d\x4f\xb7\x5a\x2e\xd2\xb4\xf7\x72\xf6\x25\x42\x86\x76\x2b\xcd\x0f\xd2\x21\xdf\xfe\xff\x23\xeb\xc3\x4d\x70\xef\xce\xf4\x30\x8c\x18\xa7\x70\xee\xb3\x3d\x9d\x57\x31\xb7\xd6\x4a\xc8\xa7\x72\x77\xa3\x75\xbb\xdc\x48\x71\x6f\x10\xdf\xae\x63\xfc\x75\x70\x42\xf1\x33\xe9\xde\x7a\x06\xe2\xd5\x77\x47\x9f\x0d\xd4\x80\x5b\x28\x5d\x35\xfb\xb3\xb3\x95\x23\x48\xbe\xd3\x6b\x91\xe8\x15\xf4\xb4\x42\xa5\xff\xba\xf2\x33\xf6\x9c\xfe\xa1\xe2\xc7\xd5\xb8\xfe\x83\x7b\xae\xbe\xb3\xbb\xdd\xcc\x83\x9d\x33\x4f\x40\xef\xf3\xc2\x41\xb0\x69\x67\xd6\x5e\x64\xb8\xb5\x0f\x38\x1c\x86\xf0\x99\xdd\x70\xc3\x04\xed\xcc\xe4\xce\xb2\x02\xc1\x26\x6b\x27\x7c\x18\x97\x7a\x07\x47\xff\xac\x1f\xe6\x4c\x9c\xae\xa3\xb2\x1d\x90\xd9\x12\xd9\xdd\x4e\x96\xdc\x71\x6a\xa9\xa3\xcc\x16\x41\xdf\x84\x9b\xe9\xc5\x32\xd1\x0e\x51\xba\x64\xb5\xa6\xd7\xb7\xb7\x99\xfb\xa4\xbb\x65\xcd\x3c\x5b\x97\x8a\xbe\xeb\xef\xe1\x78\xb2\x20\xe4\x96\x42\x3f\x2b\x5b\x6b\x0e\xc1\x8e\xf5\x25\x62\x09\xcc\x56\xad\xe0\xcf\xfa\xab\x34\x38\xfe\x0b\xa2\xc3\x1f\x72\xa5\x94\x72\x41\x4b\x24\x6c\x5e\xee\xb7\xce\x53\xca\x2a\xd9\xaf\xd2\x9e\xa7\x5b\x6e\x2f\xf7\xc0\xb3\xf7\x0e\xe1\x1c\xb4\xb7\x71\x0c\xd3\x36\x3a\x7f\xe2\x80\xdd\x2e\x69\x03\xf2\xa0\xf9\xc2\x94\x3e\x3a\x54\x3f\x3d\x84\xe9\xbe\x84\x45\xc3\x8f\xce\x79\x34\xaf\xd5\x68\xb3\x80\x43\x53\x59\xd0\xca\xe5\xa9\xbd\x98\x53\x8a\x0c\xa8\xc9\xc7\x61\xde\xe8\xde\x3f\x53\x69\x82\xe6\x08\x8d\xef\xfd\xa9\x8e\xd5\x43\x5c\x78\x0c\x37\x28\x02\x5e\xe8\x1c\xb4\x46\x58\xb7\x67\xfb\xdc\xca\xf6\x61\x35\x0d\x92\x83\x58\xa0\x76\xcc\x2b\x12\x03\x46\x5e\xfa\xa4\x47\xe1\xf1\x9b\x0b\x4b\xec\x7f\x40\x79\xba\xb0\xf3\x80\x05\x39\xf3\x2a\x74\xbc\x8f\x21\x32\xea\xd7\xa5\x0d\x14\xf1\x91\x7d\x70\x45\x0f\xee\xb7\x55\x2e\x3f\xd7\xc4\x61\x53\x0e\x62\xe6\x53\x56\x64\x5a\x58\xd1\x08\xa0\x96\xb9\x42\x58\x45\x0d\x32\xdc\xc7\xc9\x3c\x32\x45\xa8\x9c\x69\x79\x69\x74\x2b\x4d\xad\x35\xdc\x6b\x34\xe1\x81\xa0\x7a\x56\x22\xcf\x64\xc1\xc7\x87\x4c\xc2\xc2\xc2\xcf\x5a\xed\x40\xae\xd3\x25\x1f\xd9\x1a\x97\x67\x1f\xf1\x26\x7d\x8d\xd8\xa1\xf4\x1f\x8d\x1a\x97\x48\x9f\xb0\x15\x50\x0e\x5e\xbe\x7d\x8b\x7b\xb2\xeb\xcc\xed\x1b\xf9\x81\xc8\x4f\x7f\xc4\xad\xcf\x14\x7e\x0f\xec\x5d\x5d\x47\x9c\x95\x61\xbd\x39\xa3\x1b\x78\xf7\x37\xc2\xac\x31\xa4\x95\x8f\x4d\x34\xc0\xf7\x28\xd9\x14\x3d\x1c\xf9\xaf\x56\x20\xf0\x82\xeb\x0e\x69\xe1\x8e\x38\xb3\x5c\xcb\xd0\x62\x1b\x05\x69\x1f\x79\x26\xf4\x7d\xa8\xef\x82\x42\x8e\xf2\x90\x2e\x64\xeb\xce\xbd\x3a\x7d\x06\xd9\x85\xbf\xed\xd8\x12\x4e\x9a\xb1\xad\xce\x2d\x4f\x7b\x94\x3b\x07\xca\xc9\xe8\xd6\xa3\x59\x2f\x5f\xd6\x14\x11\xe2\x2f\xab\x32\x34\x19\xe1\x6b\x56\xe5\xbd\x0d\x2e\xcd\x46\xab\xdb\x21\xad\xfd\xf7\x83\x27\xf4\xc9\x22\x98\xd8\xc2\xbb\x0e\x98\xed\x8c\xb8\xa9\x1f\x4e\x9e\x92\x79\xd7\xbb\xb2\xe1\x2e\xff\x55\x96\x09\xd8\x6a\x13\x57\x60\x1b\x3a\xba\xed\xde\xdc\x6c\xe2\x33\xdd\x9b\xde\xff\xb7\xfa\x7e\x13\x0b\x79\x63\x83\x6d\x59\xba\xee\xe6\x93\x22\xf1\xca\xfa\x61\xba\x9a\x20\x2d\xfd\xdb\x53\xaf\x61\x0d\xbb\x4e\x18\x74\xac\xa7\x82\xe9\x6f\x3f\x96\x15\x0b\x78\xdd\x47\xaf\xc7\xae\xe5\x40\x6b\x08\xfd\xf0\x52\xe3\x9a\x44\xa0\xb2\xf5\xa4\xe0\xdd\xb6\xd9\x21\x90\x80\x47\xfb\xc7\x5e\x27\xf2\x96\xde\xbf\x50\xb9\x77\x91\xc7\xa6\xad\x82\x4a\x25\x3e\xa6\xbb\x41\xb5\x0a\x23\xba\x79\x00\xf4\xe4\x5e\x11\x6b\x3e\x45\x8b\xb7\x8a\x8f\x1e\x6d\x11\xf3\xc4\x0a\x44\x86\x4e\x40\xd5\xa3\xaf\x0f\x81\x22\x05\xd0\xdf\x58\xdf\x0b\x7f\x4e\x3f\x39\xa7\x46\x94\x5f\xa0\xad\x2c\x4c\x6e\x2b\x23\x22\x6e\x5c\x48\x2f\xd0\x30\x36\xef\x27\xcb\xf0\xa3\xb6\x41\xb6\x6f\xe6\x47\xe1\xc0\x5a\x27\xc6\x91\xd7\x36\xa4\x0e\xcf\xcc\xbe\xd0\x9f\xec\x20\xd5\xf1\x48\xf4\x54\xde\xe1\x38\x25\xc6\xd3\xe7\x9a\x44\x32\xd9\x69\x7d\xb1\xd7\xb9\x92\x49\x0e\x51\xb4\xc8\xba\xaa\xf5\x6d\x11\xaa\x8d\x2d\xac\x33\x3e\x11\xeb\x09\x17\x2a\x5f\xec\x28\x48\x68\x57\x58\xef\x4b\xb3\x15\xe4\xa5\x05\xaa\x86\xd8\x18\x62\xd3\xdb\x1d\xb2\x69\xf9\x8d\x6e\x38\x12\x1f\xa8\x45\x8e\x8b\xf2\xa5\xf5\x17\xb9\xba\x22\xb6\x44\xf3\x67\xe6\xfd\x7a\x39\x41\xaa\xc0\xaa\xf4\xa7\xb5\x44\x67\x1e\xe5\xf7\x0a\x33\x0e\x75\xdd\x91\xbe\x62\x45\x05\x72\x94\xd4\x8a\x1f\x3f\x1e\x70\x33\xc9\x00\x5d\xba\xb2\xf5\xae\x74\x8d\xc3\xfd\x1c\xfd\x96\x5b\x63\xa9\xb5\xae\x9d\xd2\xc9\x85\x3d\x2e\x0d\x28\xf7\x90\x06\x18\x0e\xf1\xc3\x29\x1c\x3a\x26\xb6\xe4\xd4\x08\x5b\x41\xab\x16\xdd\xca\x79\x19\x18\x2c\x29\x1c\xe2\x86\x77\xa1\x43\x9c\xf0\xdc\xf9\xfe\xae\xf4\xe3\x44\x6b\xa0\xc1\x75\x65\xce\x0c\x45\x04\xb3\xa3\x1c\x33\x58\xe3\x00\x30\xfe\xa6\xc6\x27\x96\xf1\x0f\x1c\xb0\x38\x18\xbe\xbd\xf2\xc1\xb4\x48\x55\x81\x74\x7e\xf5\x19\x1e\x9b\x41\x80\xa7\x60\x3b\x54\x38\x76\xc1\xd3\xbb\x08\x3b\x5a\x6b\xb8\xe3\x90\x3f\x2e\x05\xf1\x85\x78\x3c\x2d\x31\x90\x56\x1a\x07\xbd\x89\xf8\xf4\x01\xab\x7b\x23\x7d\xb7\x19\xe3\xe8\x54\x0d\x5f\x64\x59\x24\x7b\x50\xf1\xe2\x17\x04\xdd\x90\xc3\x53\xce\xff\xaf\xac\x85\xb1\x6e\xcc\x3d\x1c\x79\xc8\x81\x2a\xa9\xb6\x70\x0c\x15\xd0\xd8\xb2\xe7\x47\x98\x20\x06\x77\x79\x95\x5f\xf2\xe2\x2f\xe5\x98\x7e\x55\xc4\x52\xc0\x87\x7b\x78\x0e\xa0\xbf\x7c\x98\x59\xb3\xb0\x80\xe6\x26\x45\x80\x97\xfd\xf1\x7e\xb9\xa0\x49\x67\xc5\xe0\xd9\xaa\x2a\x0e\xec\x06\x52\x36\xea\x66\x05\x8c\xf7\xf0\xf6\x2e\x28\xf6\x98\xbe\xa4\xaa\xa0\x70\xfc\xeb\x9e\xf9\x49\x99\x87\xdb\x9a\x6b\x97\x8c\x1c\xf5\x4b\xe3\x8e\xb3\xaa\x39\xa7\x17\xef\x80\x33\xcc\x8b\x83\xfc\xc7\xfd\x61\xc0\x70\x71\xe5\xd7\x3c\x05\xda\x7b\x6a\x8f\xaf\xba\xa5\x0a\xaa\x67\x0f\xdb\xdf\x8e\x63\x78\x02\x8a\x2a\x85\x4a\xfb\xc9\x3e\x4c\x18\xbc\x8f\xe7\xe2\x18\xd8\x1f\xf4\xc8\x61\x51\x25\x87\xc8\x94\xf1\xe0\x6b\x65\xeb\x38\x65\xed\x8e\xfa\xd5\x02\xc8\x3b\xfd\x85\xc8\xdb\x66\x59\xda\x88\xfb\xd2\xe3\xf0\x1b\x7f\xc7\x96\xd8\xd3\x69\x0d\xc4\x2f\x6b\x91\xc9\x3f\xeb\x69\x27\xec\x97\x49\x77\xcb\x27\xe4\x8e\x88\x9e\x26\xd9\xb7\xf3\x3e\x0c\xd2\xec\xb8\xff\x82\x04\x46\x7e\xd6\xd1\x22\xe2\xbb\x43\xdc\x90\x4e\xe6\xd2\x76\xbe\xff\x4f\xf1\x1b\x04\x1a\xc2\x09\xdb\x03\x3a\x34\x04\x98\xc2\xb6\xf4\x9b\xe5\x3f\x15\x35\x47\x0e\x2e\x77\xfe\xc9\xe9\x33\xa2\x2b\x00\xf1\xe0\x03\x8f\x58\xb8\x82\x32\x26\x46\x75\xf5\x6b\x9b\x19\xb4\x94\xbc\xdc\xa4\x99\xd9\xfb\x6e\xef\x80\x97\xda\xf0\x3c\x55\xbc\xd0\x11\x4a\x7c\x37\x57\xd9\x7c\xb3\x45\x25\xe8\xef\x04\x34\x67\x62\x90\xa1\x74\xf2\xba\xf9\x53\x45\xd3\x7d\x7f\xfc\x96\x7a\x7f\x7a\x96\xa2\x6e\x7f\xca\x29\x80\x1d\x9d\x51\xee\xe2\x14\x25\x2a\x0d\x80\x62\x15\xc6\x9f\x9b\x2d\xcd\xbb\xc5\x07\x4a\x3c\x3f\x51\xbf\xf2\x76\xf7\x46\xf2\xd2\x14\x72\x63\x0b\xfb\xf9\x01\x44\x2c\x1a\x6d\xd2\x2b\xf5\x95\xbd\xa9\x7f\x31\xf7\xfe\xc5\x69\x26\x94\xab\xf2\x81\xbb\x28\x49\xbe\xea\x51\x3a\x7f\xe2\x5e\x11\x68\xc2\x1e\x06\xaa\xd8\x34\xdb\xf1\x09\xd3\x54\x2f\x22\xb2\xba\x7e\x2b\x2d\x49\x34\xba\xd8\xd0\x5d\x2e\x87\x25\xc4\xef\x4f\xe5\xbc\xee\x86\x36\xd4\x66\xee\x57\x83\x59\x63\xef\xd4\x14\xe4\xeb\x4d\xbd\x3e\x5c\x53\x97\xfe\xa6\xfd\xdf\xcd\x33\x7c\xcf\x41\xcc\x26\xd3\xf8\xac\xdc\xeb\xce\x55\x3e\x7c\x30\xd5\x7e\xa8\x84\x38\x36\xe6\x5d\x9c\x11\x2d\x74\xc2\xdc\x81\x0b\xd5\x31\xb9\x6f\xc3\x80\x24\x97\x16\x4f\x81\xbb\xb4\xa4\x7b\x23\x42\x4c\x5c\x81\x26\xd8\x83\xa3\x4a\x2c\xd2\x8b\xcf\x52\x6b\xae\xd2\x21\x3f\x41\x85\xad\x9d\x2d\x44\x2d\xfa\x08\xb1\xde\xbd\x6a\x80\xb9\x89\x30\xcb\xa3\x91\x8e\x0e\x76\x14\x56\x10\x6c\x74\xb7\xb6\x36\x87\x44\x94\x23\x8f\x32\xfd\x3c\x22\x77\x00\x1e\x17\x7c\x3f\x44\xe7\xdf\x0f\x51\x3b\xf7\xf8\x44\xfd\xc8\x2b\x40\x26\x33\x04\x46\xb0\x70\xdb\x3e\x22\x24\x02\xec\x77\xb7\x7d\xb0\xf7\xb5\xa9\x48\x37\x29\xe6\xf6\x4c\xe3\x1c\xf6\xa4\x3b\xb2\xd5\x12\xab\x11\x6b\x3e\x12\xbb\xde\x5d\xf4\x0e\xeb\x73\x7d\x63\xab\x83\xdf\x36\x5b\x7d\x55\x67\x01\xd4\x2a\xef\xd8\xb7\xee\x87\x0d\x44\x6f\x3c\xea\xe0\x5c\x23\xc1\x62\x05\x21\x45\xab\x25\x18\x71\xae\xcf\x23\x40\x22\x81\x51\x09\x75\xce\x8a\x20\x7c\x32\x04\xf0\x7a\x63\xd1\x0b\x60\x91\x23\x41\x22\x6f\x7b\x0b\x13\x6d\x00\x20\x0b\x9d\xf4\xd6\xbe\x44\x71\xbb\xf6\x69\xa8\xac\x54\x5c\x73\xca\x85\xed\x64\x0a\x36\xff\x06\x47\x40\x66\x06\x10\x59\x90\x1f\xf0\x97\x43\xc9\x64\xd5\x2e\x73\x5c\x2a\xd1\x0e\xf5\x1e\xcb\x9d\x3b\xc6\xba\xdc\xf4\x1b\x83\x8f\x4b\xf2\xad\x76\xa9\xbe\x4f\x2e\x4e\xdf\x15\x04\x52\xf8\x8a\x78\x35\xfb\x60\xc9\xb3\x05\x2b\x80\x26\xee\x4b\xe3\x83\xce\x2c\x99\xe6\x60\x80\x88\xc2\x3a\xcb\xc9\x6f\x5e\x0a\x2b\x9a\xf2\x0f\xb9\x0e\x30\xb4\x33\x42\xe6\xb2\x8a\xcb\x34\x07\xd5\x70\x2e\x5d\x47\x36\x9e\xed\xe6\xce\x19\x08\xd9\xcf\xcc\x43\xf2\x2f\xeb\x05\x70\x83\x07\x56\x3a\xc7\xd2\xdd\x74\xb0\xd9\xce\xb8\xdf\x41\x27\x48\x14\x73\x4c\xf5\xbf\xc2\xc7\x38\x95\xdd\xb1\x71\x0b\x64\x6a\xaa\xe1\x7a\x31\x05\x42\x3e\xc1\x91\xc8\xaf\x54\x94\xf1\xff\x30\xf2\x58\xf3\x78\x9e\x16\xab\x9e\xbb\x93\x40\x74\x8a\x93\x10\x93\xc0\x12\x25\x51\x09\xe6\x5e\xfb\xd5\x69\x6c\x2e\x0a\x8a\x7a\xbf\x1b\x43\xef\x10\x08\xe0\x35\x51\x04\x44\xd4\x88\x23\x85\xd9\xb9\x80\x92\xe5\x47\x4c\xff\x0d\x30\xb0\x53\x2c\x98\x5f\xc6\x87\xcd\xfe\xd1\x1f\x3b\x61\x10\x63\x73\xd8\xb1\xfb\x67\x91\x80\x56\xf0\xe9\xf3\x08\x33\xa2\xa7\x40\xbc\xe7\x02\xfd\x16\xbd\x22\x19\x8c\xe3\x9f\xb5\x1e\xea\xe9\x96\x2a\x10\xd4\x0b\xf7\x58\x22\x29\x5e\xe4\x5b\x5c\x12\x72\x5c\xe8\x39\xcd\xb4\xca\x24\xc2\x01\x63\x14\x71\x87\xdb\xd9\xf1\x09\x4f\x6d\x64\xc7\x27\x49\x72\x39\x86\xa8\xaa\x83\xb4\xa1\x0e\xe5\x5c\xe5\x93\x6f\x17\x89\x5d\x7a\x4a\x16\xb2\x0f\xe4\xc8\xf3\xce\xb2\xe8\xb1\xee\x48\xb6\x0d\xb8\x5c\xc4\x64\x68\x26\xcb\x30\x69\xd8\x14\xd4\xb3\xc8\xce\xf7\x81\x7a\x69\xf3\x9a\x8a\xb1\xf3\x2a\xd8\xc0\x76\xbf\x54\x33\x9d\xa7\x5b\x88\xfb\x07\x19\xf3\x8d\x36\x1c\x11\xb5\xe1\xba\x82\x34\x78\x89\x5e\xdb\xfb\x93\x92\xf3\xef\xd9\xe4\x66\x0c\xa9\x70\x2f\xe0\x20\x1c\x76\x5d\x98\x05\x89\xfa\x62\x47\x03\x6b\xa4\xc9\xde\x5b\x37\x00\x86\x19\xbe\x94\x68\xf0\x03\x24\x19\x62\x5f\x52\x8c\x57\x84\xb8\x74\xdc\xbb\x41\x8a\x08\xb7\x5f\xc4\x66\xcb\x55\x08\xa4\x83\x98\xb9\x9c\x92\x03\x59\x49\x75\x37\xf2\xb4\xe3\x29\xc2\x47\x38\xff\x84\x1b\x9b\x10\x93\x23\xa2\x62\x2b\xb1\xf8\x4e\xc1\xa7\xe4\xd8\x8b\x4e\x51\x8e\xe1\x8c\x60\xc5\x41\x31\xbb\x5d\x8a\xf2\xe8\xaa\x62\x30\x1c\x9c\xc2\xdb\x6a\xd3\x63\x9f\x37\xd1\x1b\xb6\x20\x1f\xf6\xac\x65\xed\x1a\x54\xe6\x61\x8a\x69\x61\x0b\xe7\x08\xd2\x62\x83\x22\xbd\x27\x39\xb9\x08\xc5\x15\xc2\x84\xf4\x88\x0a\xcd\x7f\x40\x4b\x70\xf8\x56\x71\xf1\xcf\xaa\x48\x8f\xf0\xd9\x7f\xe3\x86\xf2\x9a\xc9\xde\xf9\x12\x8b\x90\x04\xdb\xba\x0f\x3f\xf9\xcf\x0a\xaa\xbb\x2f\xe9\x23\xf5\x26\x2d\xdf\x1e\x72\x84\xef\xf3\xb5\x82\x11\x86\x26\xef\x2f\x9a\x82\x60\x5c\x53\x8f\x7f\xec\xe2\xa6\x5a\xdd\x2d\x6e\x93\x8d\xc3\x1d\xd3\x93\x8b\x66\x89\x30\x59\x0f\x3b\x6e\x5b\x8f\xcc\x34\xaa\x76\xcf\x2f\x11\x90\xad\xe1\x25\xd0\x11\x0e\x72\x42\x55\xfc\xd6\x71\x6d\x8b\x48\x90\x39\xdf\x00\x43\x88\xb5\x80\xee\xf9\xf8\x7f\xf4\x52\xfe\x49\xe1\xee\x35\xaa\xa8\x9b\xe8\x37\xba\xe6\x6f\x09\x04\x66\x54\x8a\xe4\x55\x64\x06\xe0\x2e\xf5\x64\x4f\x5c\x41\x73\x06\xcb\x82\x1e\xf7\x4b\x92\xcf\xdf\xe9\xe2\x59\xbf\x53\xdc\xbb\xc8\xc7\x3b\x93\xdd\x9e\x87\x16\x68\xa3\x02\x88\x06\xd9\x42\xf2\xe9\x2e\x7d\xfd\x4b\x33\x8b\x20\xa4\x8c\xa8\x5a\x7a\xa7\x2d\x70\x99\x23\xdb\xf5\x34\x52\x90\x8c\xea\x0a\x95\x45\x88\xac\x8b\x22\xea\x50\x2d\xc5\x90\xf5\xb8\xbf\xf6\xe7\x7b\xd9\x53\xd9\x02\x1c\x76\x07\x24\xd6\x30\x28\x4a\x84\xe1\x40\x0e\xc9\x08\x39\x38\x54\x05\xfd\xc9\xaa\x2c\x5d\xff\xc0\x76\x0b\xc9\x17\x25\xe0\xb5\x23\x0f\x2a\xe9\x6d\xbb\xe0\x9c\xe9\xbd\x04\xa1\xed\x88\x7c\xfb\xb0\x68\xfc\xdf\xad\x67\x61\x95\xe8\x4e\x6b\x13\xb8\xdf\x31\x36\x6d\x3f\x6c\xf3\x95\x29\x13\x06\x38\xc2\xf6\xc1\x8e\x1c\x48\xaf\xb4\xb3\x8b\x0b\x7b\x5e\x2a\xb2\xbc\xf2\x70\xd6\xc3\xfe\xa4\xc8\xc5\xca\x2b\x47\x9b\xf6\x9c\xce\xa0\x5d\xf8\xb7\x41\xb9\x40\xc7\x68\xbb\xc1\x46\xb2\x2a\x46\x5a\xb2\x01\x1e\x65\x5d\x85\xc3\xb1\x65\x6c\x8a\x0b\xb4\x15\xbf\xe9\xe0\xce\x21\xef\x54\x95\x58\x87\x32\xe9\x6a\x04\x39\xad\x21\x07\xaf\x85\x55\xd5\xf7\x48\x8e\x73\x2a\x10\x1b\x83\xa7\x0f\xf3\x58\x5a\x6e\xc7\x5a\x66\x72\x8e\x31\x5f\x5a\x55\x7a\x15\x72\xb3\xd7\x1e\x2a\x4a\x71\x2e\xfa\x11\x58\xba\x3e\x9f\xdf\x8d\x9b\x71\x6a\x47\xc7\xba\x65\x98\xc3\x9f\x5b\xac\x4d\x0a\x76\x54\x6e\x5d\xee\x1b\xce\x25\x9c\x2a\x42\xea\xd0\xec\x41\xc2\x58\x61\xf5\x7c\x53\xf6\xb7\xa8\x0e\x38\xb7\xda\xe4\x5f\x95\x02\x6c\x81\xab\x35\x0b\xaf\x90\xe6\x83\x79\x21\x7f\xeb\x12\x0a\x07\x2f\xb2\xca\xfe\xab\xc8\xa2\x77\x5b\xb2\x60\x4c\x92\x0a\xb8\xc8\xfa\x76\xab\xaa\xba\xe0\x4b\x2e\xe9\x3d\x09\x66\xba\xae\x12\x18\x54\x0b\x2b\x06\xab\x33\xd8\x23\x76\xc8\x5d\x4c\x24\x5d\x99\xcc\xb9\x24\xa2\xb1\xf3\x91\x9a\x4c\x7e\x5e\xaa\x44\xee\xff\x04\xbf\x9c\xd7\x2f\x0d\x46\x85\x1c\x32\x67\x1e\x98\x68\x7d\xdf\xd4\x7b\xfc\xa5\xf9\xee\xe2\xde\xc0\xb4\x12\x3f\xd9\xca\x3d\xaf\x19\x8a\x0f\xb2\x59\xc5\x0d\x8d\xfe\x79\x9f\x2d\x9c\xdd\xad\x26\xe0\x37\x18\xd5\xa3\xf3\x40\xbd\x9a\x2b\xe3\xf4\x54\x98\x69\x11\x50\x95\x1b\x47\x24\x13\x94\x9d\xf2\x10\x01\x5d\x9f\x1f\x9c\xd5\x92\x09\xbc\x66\xf3\x47\x19\x1b\xda\x2b\x20\x97\xd9\x99\x53\x22\x3b\x5f\x4f\x96\xea\xb0\x6b\x57\x12\x4d\x3f\x77\x72\xf9\x18\xef\x56\xc1\xf5\x92\x26\x3a\xd6\x15\x44\x14\xa7\x9c\x88\xf9\x03\xa3\xd9\x23\x9c\x66\xe1\x6d\x44\x2e\xe9\xd0\xd1\x6e\x67\x5c\xd4\xca\x8c\xf2\x72\x1e\x4b\xcb\xca\x19\x1d\x4d\xe1\x88\x76\x74\x69\x02\x0d\x50\x96\xc8\x3e\xf2\x18\x05\x3e\x26\x32\xb1\xac\x92\x53\x8e\xf5\xd3\x7a\x22\x0e\xbe\xbe\x1a\x80\x35\xdb\x75\x64\x1e\x76\x51\xd6\xda\x07\xac\xfc\x34\x35\x02\x91\xd1\xe7\x58\xb6\x0d\x58\x8b\x88\xbf\x3b\xa3\x6b\xd3\xbf\xa2\x05\xf7\x4d\x3c\x33\x4e\x33\xf7\x2d\xf0\x46\x41\x88\x07\xd5\x02\x83\xa0\x7f\xa7\x9a\x28\x5f\xc1\x33\x8a\xc2\x5c\x71\xe5\x35\x81\x62\x8c\xb1\x4d\xb6\x6a\x38\xcd\x90\xee\x9d\x59\xd9\x83\x13\x48\x0a\x0b\x33\xa2\x52\x07\x3d\xc1\xfb\xfa\x4d\x44\xa9\x9c\x8e\xc9\x4f\xad\x32\x43\xe9\x17\x41\x40\x76\x27\x0e\xcd\xa0\xae\xb7\x67\x95\x07\x55\x4e\x9f\x20\xa6\x9c\xff\x86\x49\x37\xb3\xdb\x45\xf5\x55\x65\xd4\x09\x36\x0d\x6a\xe5\xef\xaf\x0a\xa5\xfb\x54\xf9\x64\xad\x88\xde\x32\xcf\xc9\x09\x2b\x14\x3f\x58\x9e\xa2\x4a\xe3\x6f\x08\x1b\x14\xa0\x1b\x41\x47\xad\xdb\xd3\xdf\x66\x5f\xd3\x87\x08\x19\xb1\xfe\x58\xa0\x39\xf1\xc5\xc2\x1b\xa9\x0b\x8f\xe6\x2e\x47\x42\x35\x4b\x55\x56\x52\x8a\x27\xe9\x72\x4c\x72\xbd\xca\xca\x62\x65\x58\x55\xc5\x9a\x6a\x51\xba\x19\x6e\xaf\x83\x38\x69\x97\x26\x7e\xae\xd7\x21\x9b\x74\x40\xa2\x28\x23\x12\x55\x94\x7d\xdd\x2c\xec\xbe\x10\x72\x33\x80\x27\x7c\xfa\x68\xda\xf8\x50\x52\x44\xa2\x1a\x23\x38\x8b\x0e\x4b\xb1\xa8\x92\xae\x1f\x97\xfa\x25\x44\xaa\xaf\xff\x69\x34\x35\xad\x31\xb4\x4a\xa4\xb3\xb2\x9a\x22\xc6\x25\xac\x0a\x46\x59\x5e\x68\xb8\x3c\x92\x90\xdb\x2b\x32\x96\x59\x43\xd9\x8b\xd6\x8c\x5f\xfc\xcd\x7f\xb5\xdd\xd8\xee\x10\x4a\x3e\x3e\xaa\x91\xc8\x60\xba\x00\xbb\x84\x23\x3b\x15\x68\x58\xe6\xff\xac\x5c\xa3\x1b\x8c\x13\xd4\xaf\xb4\x87\x9d\x93\x95\x43\x1a\x18\x5d\x52\x31\xdb\x82\x2d\x0a\x4e\x51\x2f\x36\x70\x03\x13\xec\x41\xe3\x19\x02\xfc\xb0\x12\xb3\xab\x54\xa0\x0e\xe7\x50\xac\x4a\x0a\x60\x0a\x16\x2e\xbf\x0c\x89\x49\xb6\xaf\x18\x4e\xc5\xe7\x30\x97\x10\xb8\x14\xc8\x84\xff\x6d\x02\x87\xd8\x41\xd0\x6c\xac\x4d\xb1\x0e\x25\x08\xa3\x26\xaa\x4b\x5c\x2a\xac\x90\x1a\x66\x0d\x14\x2a\xd1\x7a\x07\x41\x40\xf1\x87\xa5\xb5\xa9\x46\x96\x22\xb7\x78\x19\x8e\x7d\xd8\x0f\x1d\xbc\x1b\x59\x12\x61\xce\x7d\xd3\x84\x52\x46\x34\xa4\x6b\x1d\x19\xb8\xad\xd4\x24\x6d\xda\xb5\x49\xab\xd0\xb8\x42\xf0\xf9\xfd\x93\xc6\xb2\x3e\x83\x65\x73\x69\x72\x87\xe9\x16\xef\xcd\x1d\x73\x86\x91\x0a\xcb\xc6\x99\xab\x74\x59\xaf\x07\x3d\x12\x20\x45\xd6\x61\x82\x25\xfc\xe4\x9d\x51\xb3\x55\x99\x4e\x56\x6f\x05\x79\xa6\xd6\x49\x43\x80\x2a\x4b\xa9\x9a\x23\x53\xae\xfb\xc8\xeb\xf2\xfe\xba\xa8\xcc\xf9\xa5\x04\x99\x88\x35\x6f\x01\xee\x6d\x6e\x9a\xa0\xdc\xdf\xa4\x12\xca\xfa\x52\x5a\x5c\xd6\xc8\xfe\x5e\x6b\x40\x51\x63\x84\xb4\xaf\xda\xb3\xad\x1c\x42\xf4\x07\x2b\x21\x6c\x12\xac\x8e\x40\x45\xf0\xda\x04\x0a\x04\xff\x3a\xc8\x53\xb4\xf0\xdc\xd1\x15\xd1\xb1\x2c\x76\xc0\x42\x12\xfd\x16\xd6\x01\x0b\xfd\x9d\xd6\x08\x7e\x71\xb6\x6a\xd8\x0d\x80\xa4\xda\xfc\x71\x8e\xfe\xb7\xf3\xae\xc8\x64\xdd\xd2\x20\x4f\x81\xdc\x08\x38\xe2\xdd\x6c\x63\x51\x54\xdd\x29\x2f\xfb\xed\xb2\xde\xc8\xd1\xd2\xc5\xbd\xf1\x7c\x2c\x96\x75\x36\xd9\x22\x09\x51\x92\x83\x24\x82\x3b\x53\x04\xa4\x8b\x6e\xb2\x90\x6b\xb8\x24\x45\xb7\x22\x52\xe9\x94\xf9\xbd\xae\x94\xf9\x0e\x73\xa2\x79\xf3\x56\xac\x8b\x50\x8e\x99\x84\x72\x94\x58\x9a\xdf\x42\xa3\x12\x50\x14\x3c\xaf\x55\xcd\x58\xab\x47\xce\x3f\x21\x27\x15\x36\xc1\xd4\x11\x7b\x60\x4e\x0a\x7f\x56\x86\x88\x3f\x6d\x3a\x8c\xf2\xb3\xb9\x45\x8b\xa0\xa7\xf7\x9d\xf2\x2f\x2f\x5e\x41\xc2\x4e\xbf\x0f\x4e\x0f\x8e\xcc\x12\x3e\x65\xd3\x3d\xaa\xb1\xb6\xd5\x28\x5f\x83\xfa\x0e\xe3\x65\xd6\x3b\x35\x0c\x33\x81\x63\xb1\x02\x52\xa6\x10\xae\x60\xbd\x68\x53\x08\x73\x7c\xc1\x1a\x07\xc9\x92\xc4\xad\xae\x2b\x45\x5e\x9e\xed\x94\x44\xad\xe0\x48\xc7\xb4\x64\x7f\x12\x51\x07\x0d\x21\x26\x0d\x21\x0b\x5a\x7f\x55\x35\xb6\x64\x89\xed\x4a\x18\xce\xe9\xca\x4f\x42\x2a\xd9\x28\x0d\x9a\x7d\x09\x5b\x75\xf9\x07\x75\x1a\xe7\x25\xb8\xa7\xfa\x3f\x3f\x3f\x4f\x59\xe9\x1f\xfc\x59\xcf\x7b\xc1\x66\xf5\xf4\x19\xdb\x83\xa5\x8c\x4a\x9a\xa0\x50\x3c\x59\x33\xd9\x03\x5f\xda\xa7\x33\x18\x7d\xa1\x11\xaf\x87\x52\x73\xc1\xea\xa7\x76\x6e\x55\x8a\x92\x87\x5c\x71\xd4\x3b\xbc\x9e\x9e\x40\xfb\xca\xc1\xae\xe9\x99\x29\xa1\x78\xb6\x91\xe5\xb3\xed\xa8\x08\x7e\xf7\x5d\xdb\xdd\xa0\xf5\xb9\x5e\x3a\x93\x1c\xe1\x3c\xc3\xbd\x11\x81\x19\xee\x30\x92\x3b\x32\x99\x96\x83\x50\xb5\xa1\x68\x8e\x3f\x14\xbd\x39\x43\x69\xda\xd3\x5c\x22\x42\xba\x5f\x54\x03\xbd\xfd\xff\x38\x3f\xe8\xcf\xb0\xcd\x2c\x83\x6c\x0b\x19\x54\x11\xf5\x54\x53\xf7\x62\xd0\xc2\x52\x34\x6c\xe6\x8e\x9d\x0f\x94\xa3\xd6\x3d\x3e\xbf\x9d\x65\x88\x28\x80\x2d\x6a\x0f\x5b\x76\x95\x28\x56\xaf\x60\xc8\x5e\xc6\x58\xfd\x92\xdf\x8c\xac\x0a\xb6\x3b\x7d\xea\x52\x54\xff\x3d\xca\x08\x3a\x60\x5b\xcb\x2d\xb2\x0f\xcd\x6f\x44\x0f\x6a\x85\x56\x09\x7a\x10\xf2\x35\xf6\x49\x14\xa2\x63\x8d\x37\x12\x7a\x1f\xa1\x87\x30\xc0\x66\xb9\x74\x43\x8a\x46\xaa\x8f\x4c\x1e\xd2\x97\x51\x44\xee\xa5\x14\xb0\x95\x5c\x4e\x8f\x34\xc8\xde\x08\x16\x10\x99\x5a\x1f\xcf\xa5\x28\x5c\x76\xb4\xc0\xae\xd7\x24\x74\x6b\xd2\x24\x90\xdf\x84\xfa\xc3\x17\xb2\xb1\x13\x55\xbe\x6b\x52\xba\xce\x4e\x1d\xc1\x5b\x25\x6b\x26\x99\xdd\x13\xbb\x52\x13\x24\x05\x90\x52\xaf\x95\x38\xd0\x85\x12\xa3\x0b\xc4\xb4\x4b\xdf\x80\xc4\x84\x8f\xb6\x72\x2d\x10\xb2\x10\xfb\xf0\x4d\xee\x52\x0b\x37\xb3\x42\x3a\xea\xb4\x2b\xe0\x30\xf1\x2d\x1c\x0f\x7b\x38\xdf\xc8\x3f\xbf\x80\x46\x24\xec\x8b\x39\x92\x8e\x81\x25\x0d\x18\x21\x05\x72\x2a\x52\xe7\x9f\xf8\xde\x59\xe2\xae\x6f\x8d\x9a\xe1\x53\xe7\xfa\x61\x9d\xf8\xa9\xdc\x36\xde\x9f\x85\x0c\x38\x2b\xea\x38\x14\x7b\x53\xbb\x6f\x0f\x21\xbd\x2d\x4b\x2b\xf3\xd6\x86\xa1\x2d\x89\x35\xda\xdb\x8f\xfd\x57\xd6\x88\x11\x69\x21\x84\xd4\x30\xd1\x51\x0f\x69\x6b\x75\x2c\x2f\x6b\x91\x79\xd9\x05\x3e\xe6\x5b\xec\x4b\x7e\xfe\xf7\xa8\x34\xe3\x0b\x8a\x19\x86\x3e\xdb\x2d\x42\x3c\xe3\x6a\x96\xc3\x57\x19\xf8\x00\x4e\xb3\x17\x7c\xb8\x70\x65\x7b\x46\x2d\xaf\xaa\xbf\xe4\xc1\x89\x14\x0d\xb8\xa9\x2c\x2b\x41\x0f\xe1\x75\xe0\xe9\x9d\x9d\xc3\x55\x1a\x66\x26\x94\xe2\x84\x59\x16\x37\x2b\xf8\x7a\xaf\x7c\x8c\x11\xa3\x9d\xf9\x1c\x3b\x42\x1e\x21\xf3\x6f\xbb\xed\x45\x79\xc7\x2d\xb1\x89\xb6\xc4\x3b\xdd\x17\x25\x4b\x02\x6d\x1f\x71\x67\xc4\xc2\x27\x85\xfb\x11\xb3\x46\x64\xe0\xb0\x42\x5b\xe9\xb0\xc4\x52\x10\x46\xb5\xaa\x28\x37\x47\x24\xe8\x16\xee\x8b\xfc\x14\x93\x0a\xdd\xf8\xff\x6e\x5a\xd7\xde\xe1\x1d\xee\x32\x32\xff\x63\xed\x82\x93\xa8\x58\x9c\xb2\x96\x04\x11\x38\x5b\x95\xff\x02\x38\xda\x6d\xf0\x8a\x78\x6a\x9a\xd4\x92\x63\x01\x39\x1e\xd3\xcc\x6a\x23\xb9\x3a\x98\xe6\x9b\x3f\x92\x21\xd5\xe3\xd8\xd9\xc3\x6d\xfe\x98\x14\xe7\x79\xe4\x7d\xd1\xe1\x67\x4d\xa6\xc4\x39\xb6\xe3\x88\x55\x05\x87\x26\xfe\x9b\xdf\xc1\xff\xbb\x13\x3d\x08\x0f\x9f\x96\x84\x99\xed\x10\x3a\xe2\xea\x03\x47\xeb\xca\xbc\x5e\xb4\x43\x01\xa3\x53\xce\x24\x47\x5d\x28\x7b\x4c\x5a\xad\x9c\xfc\x8d\x1a\x8f\x7f\xb5\xc8\x76\xfb\xd8\x4f\x62\x91\x41\x2f\x0f\x9e\x1a\xf5\xfe\xfa\x0f\xab\xbf\x30\x41\x24\x64\x91\xaa\xad\x5b\x0d\xb8\x86\x17\x97\xb3\xfd\xc0\x8b\x03\x4b\x72\xfe\x36\x6a\x03\xb6\x92\x65\xe1\x89\x0c\x13\xb9\xdf\xaa\x78\xb1\x05\x47\x64\x5b\xe4\xad\xd9\x56\x10\x45\xdb\x51\x59\x49\x7e\x02\x71\xb3\xae\x84\xf8\x13\x1d\xb1\x34\xd7\x6c\x30\x1c\x30\x05\x31\xc0\x99\xe1\x66\xf7\x15\x65\xd3\xf9\xd2\xdc\xdc\xde\x02\x19\x2e\x82\x9d\xf1\xf8\x7f\x2d\xfa\x98\xd6\x8c\x99\xe1\x4a\x98\xce\x16\xd5\xa2\x31\x19\x41\xe2\xe6\x30\x04\x21\xeb\x0e\x23\x65\x56\xa1\xc1\x71\x03\x3f\xab\x36\xa2\x56\x11\xbc\x76\x0b\x34\xd3\x6c\x36\x1d\x41\xa6\x87\xc4\x20\x4c\x1d\x97\x18\x89\xaa\x44\x1d\x03\x29\x84\x0f\xa2\x6c\xe0\x53\x27\x89\x55\x48\xad\x94\xc8\xb3\xdc\x9a\x72\xd6\x2e\x29\xcb\xf3\xa5\xf4\xb8\x5f\xf4\x16\xd2\x96\x64\xe1\xb3\x84\x1d\xc6\x82\xc3\x4a\x0b\x79\x60\xb9\xa3\x05\x8f\x2a\x7c\x80\x89\xf5\x43\x00\xdb\x11\x0c\xaf\x18\x27\xcd\x76\x51\x32\x88\x29\x88\x94\x33\x62\xf7\x7d\xeb\x9d\x97\x24\x82\x83\x15\xa8\x51\x9d\x6e\x9f\x58\x00\xbd\xaa\x2c\x46\xdf\x71\xdd\x60\xa9\x50\x2b\x27\x91\x7f\x7c\x91\x36\xc7\x80\xf1\xb5\x91\x2f\x51\x4e\x1f\xec\x17\xb6\xe9\xb0\x92\x0c\xbf\x5f\x8c\x7d\x88\xbf\x85\xb6\x06\x0e\x5c\xbf\x9a\xb0\x54\x19\xff\x36\x21\x88\xbd\x82\x4b\x99\x48\x21\xdb\xa2\x5a\xb3\x9e\xf4\x15\xf5\xe6\x67\xba\xe9\x91\x35\x25\xb7\xe9\xe3\x2a\x87\xb1\xb0\xcf\x69\x02\x9f\x6b\x7e\xc0\x56\xd4\xa4\x57\x18\x74\xd9\x09\xab\xa9\x25\xe2\x0a\x78\xd6\x83\x24\x16\x45\xa9\x46\xea\x23\xe7\x36\xa3\x9d\xd9\xba\xa4\x47\x9c\xda\xf2\x9f\x97\x9b\xd3\x88\xe2\x4d\x26\x29\xa5\x59\x1f\x8f\x3e\x84\x7e\x38\xbd\xdc\x93\x50\xed\xaf\x56\x75\x9b\x02\x71\x93\x17\x71\x6a\x59\xc2\xb0\xcd\x7a\x90\x60\x7e\x8a\x3f\x16\x11\xfb\xe7\xa5\x47\x09\xbd\xe2\x1e\x7f\xd6\x16\x6d\x03\xa3\x4c\xff\x29\x1a\x83\xc0\x02\xcc\x91\x34\x5e\xed\x39\x62\x2a\xf3\xae\x72\x20\xdb\x35\x44\x86\x99\x15\x5b\x4d\xe5\x31\xdc\x17\x75\x5f\xf6\x6d\x99\x9f\x58\x29\x6d\x1a\x0e\xda\xb2\x0d\xf5\xad\x5a\x03\x3b\xe9\xe4\x44\x9c\xa1\x56\x95\xa9\xb5\xab\x9a\xa6\x72\x2c\x5a\xd4\x4d\x5a\x4d\xda\x55\x45\x48\xd9\x31\x91\x2f\x16\xa3\xd6\x8c\xc9\x3c\x65\x77\xe1\x25\x72\x4f\x36\x25\xed\x0e\x3c\xb3\xfb\xcf\xde\x0c\xc1\x14\xd4\x7f\xd4\x9e\x73\xd2\x12\xae\x28\x87\x64\xcf\x3d\x0e\x41\x45\x9f\x72\xbe\x59\xd7\xbd\x6a\x1c\xba\xa2\x2c\x66\x39\x4a\x26\x57\x0f\xe5\xd5\x2b\x48\xe5\xf0\x47\x69\x5c\x32\xe9\xc9\x1d\xc5\xe2\x6e\x0d\x4e\x92\x68\x63\xf5\x7e\x6b\xca\x89\x72\x2e\xa8\x60\x31\x95\x3c\x52\x4c\xbe\x50\x23\xfe\xef\x6e\x51\xd6\x7c\x84\x11\x9e\x57\x72\x56\xf8\x8a\x68\xb6\x24\x15\x81\x22\xf0\x77\x53\x9a\x2f\x62\x6e\x3a\x61\x03\x2e\xea\x22\xfd\xa3\x4b\x56\x77\xb8\xa8\x01\x09\x10\x5e\x62\xed\x92\x11\xa5\x35\x6a\x33\x5a\xe2\x61\x42\x2c\x7f\xb2\x34\xd3\x75\xf3\x1b\x56\x21\xdf\x70\xff\x7d\x7f\xc2\xf7\xe4\xa3\x44\xd5\xda\x92\x57\x3d\xe5\x04\x77\xa2\xb8\xac\xef\xa9\xaa\x56\x7e\x03\x30\xe7\xf8\xd1\x2b\x2e\xf1\x11\xde\xb6\x83\xab\x0e\x2b\x7a\x02\xef\xe6\x40\x07\x76\xb8\xd7\x28\xbe\x9e\xc2\x84\xf3\xfa\xe9\x7d\x90\x6e\xd8\x1c\x56\x08\x45\xbd\x0a\xa6\xfd\x61\x54\xb6\x30\x9a\x0f\x90\x1c\x7e\xcb\x2d\xb6\xc4\x1d\xe9\xf6\x3e\x62\x97\x11\xe7\x3b\x3b\x68\x7f\xdd\xd5\x37\x15\xb8\xcd\x44\xca\x8a\xe7\x9d\x3f\x94\x95\xf5\x90\x04\x9c\xdf\x55\x85\xd9\x51\xba\x84\xd3\x77\x13\x72\x77\x30\x83\xcb\x16\x49\xbe\xce\xf9\x8a\x0a\xd0\xae\x2f\x4a\x29\xad\xd6\xba\x02\x5b\xc1\xab\x36\x71\x6a\x89\x98\x06\x0d\x2b\x61\xfe\x15\xea\x6b\x7c\x34\x04\x91\xdc\x2e\xb6\xbe\xec\x53\xcc\x34\xa3\xdd\x90\x74\x9c\xab\xa0\x92\x59\xc9\x28\xf0\x6f\x3f\x5a\xa6\xd6\xfe\xfb\x61\x7b\x60\x71\xb8\x51\xa3\x86\xf4\x6e\xc5\xcb\x3a\x28\x98\x68\x9a\xe6\x55\xd2\x59\x8e\x90\xc3\x15\x08\xa3\x93\xff\xb6\xd2\xc6\xb8\x25\x54\xa4\xe7\xa4\x88\x09\xcc\x50\xc5\x63\x8f\x29\x6c\xef\x24\xb0\x07\xeb\xb8\xd2\xa6\xa5\xef\xf0\x59\x83\xbd\x1f\x56\xe9\xb3\x54\x05\x81\xbb\xba\x55\x8e\x9f\xfe\xe5\xd6\x36\xf0\x39\x27\x58\x36\xf8\xe8\xb2\x4a\xb2\x5e\x65\x8f\xc6\x01\xb7\x15\xa8\xa4\xe2\x1f\x68\x95\xf8\xa8\x48\xc9\xd2\x28\x8d\xc7\xcf\xc8\xe2\x40\xac\xa7\x78\x66\x7b\x14\x7a\xab\xa8\x30\x21\x7d\xd9\x3f\x87\x9c\xd8\xa2\x43\xdf\x8f\xef\xa7\xfb\xa8\x80\xdc\xad\x82\x6c\xc5\x9e\x6c\x27\x76\x9d\x0a\x3b\x93\x5f\xf9\xed\x59\xf1\xe5\x3b\x84\x3a\xab\x2f\x7a\xaf\x61\xf6\x8c\xa3\xc6\x57\x24\x41\x87\x17\xa8\x34\x7f\xd4\xdf\x15\xa1\x86\xb4\x61\x8c\x58\xd8\x0a\x63\x1e\x0e\x46\x3f\x8c\xc8\xb3\x27\x64\x0b\x67\x9d\x65\x9e\x39\xa0\xc2\x2b\xe0\xe6\xcd\xdb\x94\xaa\x3c\xad\x46\xe7\xd5\xcb\x9e\xed\x08\xdb\x1a\xfb\xab\x6f\x96\x6f\xcf\xa7\x6f\xe9\x6b\xf9\x13\xdf\x1c\x6a\x79\x2a\x3c\x1e\xac\xcf\x0a\x6d\x45\x56\x48\x91\x06\xe5\x22\xe4\xbf\xaa\xa6\x0f\x4c\xb4\xeb\x6a\x7a\xf6\xf0\xad\xc1\x38\x2f\x56\xcb\x8b\xab\xfc\xe3\xa9\xf5\x55\xe5\x16\xce\xe6\x1c\x13\x4f\x39\x77\x63\xb0\xc3\xc5\xe5\x2e\x83\xbe\x3c\xc3\xfc\x26\xe2\x2f\x8f\x6f\xd8\xdc\x1c\x11\x8b\xe6\x81\x5d\xc1\x71\x1b\x15\xc6\x1e\x70\xa2\xcf\x4f\xbd\x4c\xbb\x39\x3c\x3d\x39\xa7\x5b\x30\x4b\xd7\x3c\xb5\x0b\x15\x60\xd9\x83\xad\x76\xca\x73\x6e\xf1\x18\x67\x8e\x4d\xe9\x5a\x8a\x11\x80\x1c\x4d\x8f\xa8\xd8\x76\xe5\x22\x4b\x85\xea\x52\xc0\x5e\x2b\x8e\xfe\xe6\x3b\x36\xc1\x19\xc3\x5e\x2f\xd1\xed\x86\x44\xc3\x42\x21\xe0\xc4\xa2\x6d\x3b\xc3\x47\x2e\x7c\x6e\x7c\x42\x2a\x22\x5b\x2e\x2a\xee\x24\x70\xf9\xbb\x1d\x44\xc4\x14\x37\x73\x62\x6a\x3b\x18\x3b\xca\x72\xa6\x70\x3c\xfd\xec\x82\x0a\x5b\x38\xe6\xbd\xb0\x5c\x3b\xfc\x39\x4e\x4a\x2d\xe6\xe4\x08\xb4\x35\xcd\x38\x33\x84\x3c\x82\x09\x25\xc0\x17\x71\x2e\x3a\x89\xc9\x55\x69\x41\xf0\xd7\x60\x62\x8a\xc1\xcf\xe4\x87\xd4\x45\x51\xd9\xa4\x63\x6f\x24\x8e\xe5\xba\xfb\x1a\xc3\x4e\x98\x30\x38\xe0\x77\x88\x84\x86\xf1\xa7\x98\x47\xa3\x4c\x7d\xc6\x66\x33\x6e\x25\xc6\x2f\x9c\x62\xac\x52\x7f\xf8\x40\x94\xc4\xb7\xb2\xd0\xbd\xd4\xd7\x22\xb9\x75\xb4\xb0\xcb\xe4\xc8\x00\x6b\x9c\xc2\xa0\xb5\x29\x87\xc6\x1c\xc6\x74\x63\xa4\xf1\xc3\x67\x27\x87\xe3\x4e\x4e\x0a\x1c\x2a\x34\x26\x27\x6f\xd9\xc1\xc1\x16\xbd\xf1\x0b\x8b\xf3\xcf\x3b\x3c\xe0\xe7\xd5\x33\x2f\x4d\x14\x20\x26\xbf\xe8\x6f\x31\x18\x45\x77\xe4\x15\x93\x75\xc0\xcd\xb3\x7d\x20\xf3\x72\xb0\x83\x6c\x8a\xf6\x4b\x35\x19\x8b\x32\x99\xeb\x94\xff\x8a\x8a\x31\xbf\x2f\x39\x3f\x4d\x8b\x2a\xb0\xbe\x5e\xcd\xb5\x6e\x67\x20\xd4\x10\x16\xcd\x43\x5e\x6f\x25\x4f\xd8\xdd\x2e\xf9\x00\xa4\x6e\x03\x67\xae\x4f\xe9\x6c\xe1\xe0\xed\x3f\x2e\x2d\xe1\x9f\x5e\xc6\x66\x1c\xde\x7f\xba\x5b\xbb\x27\xb8\x6b\x12\x16\xd6\x43\x52\xc1\x43\x49\xf0\x56\xb1\xd1\x10\xc0\xde\xd4\xce\x1f\xb9\xcd\xba\x22\xf0\xc3\x05\x76\xf5\x20\xb0\xd1\x3b\xb7\x4f\x97\x5b\x84\x73\xae\x8a\x4d\x61\xbc\x29\x35\x60\x45\x26\x69\x40\xc4\x4c\x59\x99\x07\x52\xc5\xd5\x8c\xf0\xfc\xf1\xaf\xc8\xf9\xe6\xc3\x87\xfb\x4b\x96\x67\xa7\xe3\xc6\x67\xb0\x0a\x20\x7d\xca\x1a\xe0\xf8\xe6\x6f\x40\x0d\xa0\xbb\xf9\x83\x5d\xe6\x5b\x14\x84\x5d\x00\x85\x7a\x6b\xba\x66\x60\x61\x65\x07\xdf\x06\xaf\xf8\xf4\x39\x78\x8a\x71\x2e\xab\x8a\xef\x1b\x06\xd8\xae\x74\x4b\xfd\xff\x6e\x95\x84\x8e\xdc\xba\xc3\x9e\x06\xb2\x6d\xc6\x82\x11\x70\xd4\x9e\xe9\x5e\x9b\x69\x8e\x7d\x30\xd4\x10\x13\x14\xe2\x82\x76\xcc\x0e\xb7\xa8\x71\x91\xa9\xb1\x09\x77\xe9\x52\x04\xf7\xf7\xa1\x4a\xc4\xd6\x54\x64\x03\xe4\xde\x05\xb0\x9a\x75\xbd\x01\x9d\x3c\xac\x99\xe8\x03\xf3\x7b\x38\xfd\x0d\x3f\xf2\x8b\x00\xf7\x77\x28\x0a\xe6\xca\x70\xff\x1a\x5e\x2c\x56\x93\x68\xe4\xfc\xc6\x6c\xfd\xe1\x70\xde\x45\x47\xbe\xf1\x57\x0f\x3a\x29\x8e\xf2\x5b\x44\x74\x84\x39\x50\xb1\x66\x57\x39\xc3\x6f\x17\xd2\xf9\x22\xd8\x19\x56\x52\xca\x57\x28\xa1\xe1\x08\xf7\x3e\x2b\x8e\xfe\x29\x34\x83\x1f\xe2\xf1\x51\x7a\x11\x5f\x56\x1f\xc2\xf2\x49\x4b\x62\x60\x2f\xbc\xd9\x3e\x07\x86\x27\x0f\x6b\xd2\xb4\x4d\x0e\x2e\x78\x4a\x6b\xd1\x5e\x63\x1f\xe0\x6a\xc0\xab\x78\xef\xd6\xce\x1c\xb4\xf9\xba\x3e\x1c\xa6\x21\xdf\x8e\x13\x3d\x19\x24\xd0\xa2\x28\x2c\xde\xaf\xa2\x9a\x03\xfe\x36\x42\x0b\x3d\x2d\x4c\xd6\x7b\x77\x98\x31\x80\x41\x27\xa6\x9c\x15\xa4\xa1\x3c\x38\x7e\x7c\x53\xf0\xe1\x0e\xd5\x86\x4a\xa3\x71\xa5\x81\xc2\xfe\xc2\x02\x70\x4d\x2d\x1d\xd0\x75\x06\x84\xf1\xe8\xcc\x74\xda\xad\xfa\x7c\x86\x17\x91\xf2\x9e\xdd\x7e\x48\xc5\x5c\x0a\x9f\x04\xa4\x1e\x79\x35\x75\xc3\x6e\x9e\xe0\xdb\x5b\x56\x42\xf2\x50\x70\xf3\x3f\x66\x1c\x1d\x95\x1c\x7c\x2b\x4a\xfe\x77\x8b\x99\x7f\xd4\xb1\x71\xdd\xf2\x7f\x61\xd3\x9e\xe5\x57\x67\x5b\xc6\x97\xff\x14\xf5\x5b\x9e\x74\x43\xc3\x5f\x9a\x15\xdd\xf3\xc9\x82\xce\xfd\x90\x7d\x95\x3c\xf8\x1b\xc6\x2d\xa7\x4a\x37\x4f\x38\xd9\xeb\x1d\x62\x95\x5d\x2e\x0b\xab\x07\xd3\xee\x0b\x15\x33\x52\x13\xe8\xda\xf0\x4c\x70\x1e\xc6\x7b\x58\x27\x62\xa3\x3c\xf5\xe8\x3f\x1a\xef\x52\x08\x1b\xad\x2d\x2d\x72\x71\xdf\x46\x05\xc1\x6e\x7d\x15\x70\x68\x37\xc8\xf7\x4b\x4b\x6d\xbc\x46\x1c\x23\xbf\xe0\xf1\xcc\x81\x32\x19\xf8\xf5\x68\x86\x93\x4a\x02\x61\x42\x15\x41\xba\x81\x8e\xf0\x89\xa3\x8f\x2d\xfd\x71\x3e\xfb\x39\x97\x02\xd1\x8d\xaf\x55\xbc\xdd\x92\x5b\xaf\x6e\xff\xaf\x29\x71\x70\x59\x57\xbb\x8a\xf8\x99\x05\x6d\xbd\x4a\x44\xda\x52\xac\x7a\x07\x51\xec\xfe\x69\x96\x5c\x07\xf9\x3d\x88\x1d\xba\x96\x19\xd3\x4c\x8d\x95\x85\xc0\xe5\xeb\xfb\x33\x38\x5c\x54\xdf\xfd\xef\x96\x7c\x61\xcf\x99\x85\xe1\x96\xd5\x3c\xac\xfd\x68\xc7\xfd\xa4\x2f\x1c\x27\xfe\x8a\x7e\x08\x0b\x53\xc0\x7b\x6b\x17\x78\x9e\x87\xcd\xf8\xb2\xcb\xf2\xa6\xff\x4c\x2c\x07\x56\xb1\x4b\xe1\x82\x48\xd1\x17\x8a\x3d\x0e\x87\x3f\x0c\x28\x5a\x6e\xf5\xbb\x0b\xb1\x68\xa8\x18\x54\xff\x2f\xee\x61\xb4\xd4\xa7\x88\x6f\xb5\xaf\x52\x33\x70\x72\x4c\x00\x0f\x64\x7e\x7f\xb4\xfb\x8c\x85\x5d\x86\x6e\xed\xc3\xda\xb0\x04\x7d\x18\x66\x10\x6f\x3d\xde\x99\xb1\xba\x3a\x23\xed\xa8\xff\xf2\x81\xe6\x4b\x18\x64\xc2\xbd\x4c\xa3\xe7\xe6\xbc\x36\xcb\xa5\xc4\xea\x43\xb1\x6d\xe7\x43\x7e\x88\x47\xcd\x31\x87\xbe\xb3\xda\xea\x59\x20\x82\xeb\x1f\xe2\xb4\x9a\xcb\x12\x75\xf7\x2c\x67\x87\xf9\x67\x41\x0c\x96\x1f\x2b\xca\x10\x9a\x3b\xf8\x47\x4a\xe8\x2c\x27\x7a\x8c\x12\xfc\xef\x26\x05\x62\xfa\xf1\xe8\x6c\xd5\x71\xbc\x7c\x84\x25\xdf\x74\xc7\x9c\xf9\x1f\x77\x62\xdb\xb8\x5b\x3b\x3e\xa9\xb1\x06\xfb\xee\x97\x3d\x13\x33\x6f\x2b\x1e\x17\xd9\xee\x0b\xf4\x87\xeb\x68\xe5\x3c\x3a\x25\xc6\x7d\x5d\x71\x5f\xd0\xf5\xc1\x4b\xb8\xd6\xb8\x03\xc6\x13\x33\xee\xa0\x37\xac\xea\xc2\x98\xe4\x8f\x4b\xf8\x35\x5a\x71\x97\xc8\x78\x83\x43\x17\x89\xc5\xcf\xc3\xa3\xb9\x34\xfd\x05\x27\xe6\x41\xd9\x26\x2b\x94\x27\x90\x5f\xff\x94\xa8\x80\x4f\x95\xd4\x59\xba\x0a\x6c\x0f\x4f\x79\x38\xc8\x67\x38\x23\x0f\x45\xb3\x5c\x84\x75\x3c\x5c\x52\x50\xac\xae\x65\xb8\x2d\x7c\x93\x56\x6c\x6b\x72\x4e\xe2\x16\x8c\x32\xeb\x95\x47\x55\x59\x25\xb2\x2b\xdd\x7e\xc6\x47\xbc\x24\x0b\x0d\x76\xea\x32\xfc\xae\xbf\x77\x99\xc5\x42\xfc\x57\x7d\x56\x88\x98\x60\x15\x4b\x04\x74\x5d\x90\xf9\xf3\x20\xe6\x35\xd2\x07\x56\xec\x8e\x0e\x17\xbf\xb1\x36\x3a\x0b\xe1\x74\x8e\x58\xeb\x38\x2d\x4d\x50\x50\x54\xbe\x29\xdd\xbd\x36\xeb\xb0\xde\xef\x66\xfe\xde\xb2\xd7\x30\x72\xb2\xfd\xa1\x9d\xc2\x80\xf3\x07\xcd\x79\x71\x39\x5e\x21\x3c\xd8\x77\x29\x75\x29\x42\x70\x61\x42\x0e\xd7\xfe\xb3\x79\x0d\xf6\x71\xc1\x7a\xba\x3c\xf1\xbd\xa4\x49\x7f\xef\x06\x93\xa2\xd9\x15\x69\x3f\x8f\x5f\xfe\x71\x8d\x62\xa9\x57\x6c\xae\x3d\x22\xfc\xd2\x57\x6b\x47\x7d\xe5\x27\x48\x78\xe0\x88\x45\x0c\xbc\x62\xe4\xef\x9e\x25\x61\x3f\xe3\x5d\x67\xb1\xc6\x8a\xb5\x3b\x3e\x1c\x16\x97\xc6\x19\x44\x90\x32\xd3\x73\x8e\x9f\x8f\x44\xfd\x85\x21\x57\x77\xec\x17\x0a\xb8\x85\xf1\x63\x11\x9d\xdb\x6a\x41\xa6\x11\x00\x07\xf4\xdf\x9a\xc3\x3a\xd2\xb6\xd1\x78\x8c\x38\x4d\xaf\xfd\x58\xbe\xc1\x7b\x4a\xd3\xdd\x41\x04\xec\x46\x24\x4e\x88\x07\xd6\xcb\x81\x17\xc6\x5f\x04\x7b\x2d\x6e\xca\xc2\xd2\xd0\xc4\x99\x49\xba\x8a\x4f\x84\x98\x7d\xfc\x6a\x47\x64\x32\xa9\x76\x42\xe2\x31\xa6\x7d\xc5\xbb\x88\x0f\xec\x15\x18\x27\x0e\x6c\x9d\x65\x2b\xed\x02\xad\x3d\xa4\x10\xf4\xaa\x6e\xa1\x22\x14\x35\x08\xab\x5f\x3b\x73\xbc\xfc\x03\xce\xf9\x52\x69\x17\x86\x57\x6f\xf9\x84\xbd\xf1\x0e\xfb\xa1\xff\xaa\xf5\x10\x20\xf0\x72\xd7\xde\x07\x85\xa2\xc2\x63\x62\x5b\x7d\x65\x8a\x41\x5e\x09\xde\x60\x27\xe9\x7a\xdb\xfb\xa3\x60\x03\x4f\x7b\x96\xc3\xaf\x17\x75\x9c\x4d\xde\x6b\x58\x9e\x5f\xef\x90\x8e\x22\x19\x97\xb9\x3f\x79\xa4\xbc\x74\x8d\x60\xa0\x56\xd7\x4d\x35\x58\x7e\xdc\xb9\xae\x14\x16\xcf\x91\xb0\x43\x73\x7b\xbc\x0d\x81\xfa\x85\x76\x00\x1e\x8a\x2d\x9b\xc2\x9a\x99\x59\x9a\x86\xcb\x0f\x75\x40\x7f\xaa\xee\xf6\xa0\xe2\x58\x51\xd7\x10\x3a\xeb\xa8\x13\xb3\xe1\xe6\x9f\x5f\xb5\xab\x20\x54\x20\xce\xcd\xfa\x79\x8e\x81\xe8\x59\x22\xb6\x5f\x96\x7c\xf6\x0c\xad\x9f\x68\xda\x8c\x12\x91\xff\x82\x24\xa7\xb8\xcf\xec\x5a\x15\x17\xe9\x6c\xf6\x9e\x30\xb0\xf3\x3f\x20\x17\xa1\xfd\xa7\x9c\x12\x00\x0c\x36\x6b\x4a\xce\xd0\x81\x06\x36\x0a\xa1\x7b\x6e\x11\x0f\xbc\x5b\xe8\xdb\xe9\x1b\x34\x60\x41\x5d\xae\xb6\x94\x83\xa5\x95\x92\xd0\x22\x3d\x58\x4d\xf2\x71\xa3\xaa\x82\x9c\x9c\x03\x1b\x92\x30\x41\x57\xff\x41\xec\x42\x17\x3d\x41\xf2\xfb\x91\x6f\x19\x59\x58\x13\x42\xa1\xab\x27\xd4\x71\xb5\xd4\x9b\x1c\xa2\xdd\x36\x91\x9b\x8f\xb0\x54\x15\xf3\x17\x66\x17\x1f\x29\xe7\x77\x51\x12\x77\x3b\x52\x16\xe2\x8a\xf0\x9b\xcc\x84\x48\x76\x68\xd4\xb9\xf0\xd2\xef\xc1\x7a\x4f\xf7\x58\x1d\xed\xcc\xc5\xa3\xbb\xf6\xd1\xe4\x02\xde\xa0\x7e\x99\x6d\x75\x10\xc6\x27\xc4\x66\x0b\x6b\x3d\x5b\xb8\x5b\x84\xd3\x00\x0f\x4d\x6a\xf9\xc6\x58\x79\xe7\x90\x1c\xa4\x80\x2c\xe8\x36\xff\xa5\xee\xa9\x15\xfa\xd2\x2b\x6e\x97\x35\xc7\x76\x89\xda\xec\x8a\x29\xea\x8c\x99\x5e\x83\xf0\xb2\x6a\xb3\x9b\x11\x7a\x3d\xa9\x48\x18\xc1\xca\x26\xc8\x7d\x2b\xf5\x49\x98\x48\x46\x36\x76\x55\xa6\xb0\x67\x3b\xfc\x72\x85\x3e\xe2\x63\xb8\x14\x22\xb0\xba\x4b\xe1\x66\x20\x07\x56\x65\xa2\x06\x41\xd7\xb5\xaa\x81\x0a\x2e\x9a\xd0\xb2\xd0\x4d\xe5\x6a\x6a\x89\xc9\x5f\x13\xeb\x24\x11\x09\xcd\x42\xec\x23\x91\x2a\xcf\xd5\x05\x45\x6d\xcb\x27\xaa\x27\x4b\x3e\x24\xcd\xb2\x22\x07\xd9\x30\xf5\x72\xbc\x0e\xbb\x8e\xf0\xff\x4a\xe5\x4b\xff\x2f\x99\xac\x2e\xd0\x95\x12\x00\xf4\x44\x7b\x0a\x5b\x9f\x32\xd4\x5b\x12\x0f\x0f\xf1\x0e\xef\xa3\x28\x2d\x42\x85\x9a\xe7\xff\xb1\xc2\x4c\x47\x4b\x9b\xd8\xce\x66\x1a\xb6\xf5\x8b\x0a\xc6\x95\x49\x82\xc9\x89\x84\xd6\x70\x34\x1e\xe5\xec\x17\xb7\x7f\xfa\xc7\x7b\xf0\x88\x56\x61\x85\x4b\x10\xf1\x67\x04\x22\x68\x59\x9a\xc4\xa2\x84\xe2\x13\x2b\x89\x2d\x72\xd8\xcf\xda\xdb\xce\xf9\xc8\x1c\x71\xe8\x33\x47\x65\x7f\xd5\xdf\x81\xbf\xb2\x6a\xef\x28\xfa\xc9\x98\xcc\x92\x71\x78\x58\x99\xe2\x52\x07\xda\x3b\x4c\x7e\x03\x4d\xf5\x54\xf0\xf5\xa4\x0e\x70\x52\xc6\xd9\x04\xe7\x50\xfd\x0c\xa7\x4f\x72\x01\xe7\x2e\xaf\x16\x8b\x0a\x45\x3c\x99\x70\x1a\xa1\x2c\x13\x87\xe7\xb7\xb2\x66\xdc\x84\x44\x6e\x7a\x02\x93\xc6\x1f\x53\x1e\x89\x43\x9e\x11\x4b\x7e\xec\x2c\x29\x0f\xc9\x31\x08\x58\x8e\x76\x21\x04\x43\x71\x15\x4d\x74\x14\xaa\x6d\xdd\xa9\x2c\x43\xde\xe1\xc6\x5d\x9b\x59\x77\x38\x75\xe7\x98\x13\x5b\x91\x29\x39\x6c\xa3\x3a\x16\xcf\xdb\xf5\xef\x00\xa5\x84\xb0\xe1\xb1\x05\x9b\x31\x78\xa2\x10\x5b\xb8\x69\xc8\x1d\xc4\xfa\x27\xda\x49\x87\x1e\x90\x31\x2c\x53\x24\xe4\x61\x88\x47\x78\x7b\x94\x08\x63\x2e\x60\xa7\xe5\xc5\x67\x8b\x09\xce\x07\x99\xec\x59\x29\x15\xf6\xe2\x94\xd1\xff\x81\x15\x86\xe3\x86\xfa\x52\x08\xcb\xbd\xc1\x84\x55\x04\x87\xc5\x4a\xed\xc9\xf2\x54\x1a\xac\x71\x66\xb0\xd3\xbd\xf5\x01\x4f\x0d\xc0\x31\x18\x95\x78\xf7\xf4\xf8\xae\x88\x5a\xe2\xd5\x38\xe8\x36\x02\x17\x31\x86\xc2\x3e\xac\x0f\x79\xc6\xaf\xe0\x85\x48\xe7\x64\x36\xd2\x90\xff\xd8\xdd\x01\x41\xa1\xa5\xdb\x90\x7b\xcd\x85\x73\x0a\xbf\x5e\x98\x9c\x88\xea\x98\x5a\xf5\x1b\xf6\xd9\xf0\x43\x08\xed\xec\xbb\x99\x96\x6d\x4d\xcc\x71\x74\xac\x89\x35\x36\xb5\xa2\x57\xc1\xe9\xd0\x45\xf0\x4b\xa4\x3e\xe9\xb3\xb8\x22\x3d\x0c\x5c\x06\xc1\x8b\xf0\xa3\x50\xd9\x28\xcf\xed\x96\xe7\xd3\x35\xf9\xb5\x4b\x62\x05\x18\xda\x5b\xfa\x8f\xcd\x58\xe0\x84\x35\x1c\xc4\xf8\xc3\xbc\x4e\xa4\x2d\x78\xbd\xce\x36\x48\x3e\x24\x88\xfd\x31\x0d\x23\x5a\xcd\x0e\x5a\x89\x66\xa3\xc9\xf2\x8c\x05\xc1\x23\x56\xa6\x87\xa8\xb6\x84\x2b\x49\x3e\x94\xc4\x7e\xfc\x32\xaa\x8e\xa8\x2a\x2f\x3d\x0e\x81\x27\x0e\x24\x39\x62\x57\x67\xd2\x64\x38\xbf\x83\xdb\xe2\xdf\x0c\xa4\x5f\x30\x63\x5c\x9f\x9e\x6d\x55\xe8\x88\x06\x6d\x11\x1f\xc1\x15\xf6\x55\xe3\x28\x16\x4d\x4c\x52\x40\x3e\x68\x07\xe4\xc1\x68\x5f\x3f\x53\xe4\x61\xaf\x3a\xac\x09\x89\xda\x82\x11\xf3\xfd\x45\xad\x26\x79\x38\xe3\x71\xe0\x48\x2c\x0d\xcc\x7e\x4a\x1d\x63\xe5\xbf\x99\x26\x2c\x30\x47\x8d\xb1\xf7\xea\x2f\xb8\x2d\x3f\xc0\x3a\x69\x99\x3a\x2a\x77\x9e\xf4\x40\x17\x8d\x9c\x3f\x00\x92\x7b\x07\xb1\x48\x8a\x85\xdf\xf9\xa3\x29\x75\xf6\xf0\xc5\x4a\x93\x33\xc5\x0d\xbf\xb9\x0b\x13\xaa\xcc\x6d\xf8\x9d\x39\xeb\x78\x67\x96\x6f\x72\x43\x09\x7a\x22\x88\x9e\x5e\x86\xf2\xaa\xf0\x92\x7d\xf1\xa7\xfd\x2a\x7a\xb9\x5f\xbe\x3f\xbc\xec\xd3\xa0\xe1\xe0\x9f\x3c\x64\xc4\x51\x15\x58\x9d\xd3\xf3\xec\xa8\x7f\x3f\xf5\xa8\x55\x95\x3b\xdb\xc4\xab\xde\x1a\x6c\xdb\x16\xe8\x5f\x58\x57\x0a\xa8\xac\xe2\xb0\x7b\xc8\x17\xc1\x53\x14\x69\xe9\xca\xe2\x63\x5f\x9c\x0e\x5c\x34\x4d\xb8\x1c\x49\xf7\xaa\x0c\x66\x6a\x78\x74\xd7\x34\x95\x95\x10\xfd\xca\xb6\x8c\x00\x32\xd4\x2c\x21\xf3\x2d\x0a\xbc\xcc\x41\x49\xcc\x63\xd8\xac\x25\xe1\xa8\xe7\xbe\xa4\xb7\x17\xa6\x56\xea\xb2\x32\xf4\x29\xf6\xcf\x3e\xfd\xe3\x9f\x5c\x15\x09\xb6\xf8\xbe\x61\xbf\x7c\x0f\x58\xc1\x2a\xda\xaa\x69\xf6\xda\x38\x8b\x65\x70\x4b\x10\xe7\x22\x06\xb2\xe9\x94\x79\x01\xac\x13\x0c\xb4\x94\x62\x79\xc8\xcd\xd0\x6a\xd8\x68\xf8\x4f\xeb\x68\x8b\xe4\x2f\xef\x44\x2c\xc8\x45\x2f\xed\xa7\x41\xfd\xab\x2f\xc8\x51\xf1\x82\x82\xe5\x1c\xcb\xb4\xf2\x2a\x3a\xee\x41\x83\xc5\x03\x89\xd9\xfe\x78\xff\x13\x30\x76\xa9\xd6\x81\x97\x1b\x49\xa8\x47\x5d\x83\xd9\x72\x48\xbf\x11\x51\xc1\x18\x3a\xa0\xcd\xc3\x72\xaa\x52\x09\x4f\x9e\x48\x75\xb8\xa2\xce\x27\xe7\xd6\x12\xfa\xc3\xdd\x84\xc4\x69\x67\x39\x0c\x5d\x70\x94\x66\x47\xe2\x65\x89\x12\x99\x67\xfe\x0e\x75\x3f\xcb\x60\xd6\xae\x7b\xd9\xa8\x9a\xf7\x90\x4b\x2e\xc0\xd9\xd3\x14\x1b\xfa\x65\x89\x62\xe0\x3c\xeb\x78\x6b\x06\x45\x98\xfe\x2b\x12\xd7\x40\x48\xa3\x34\xcd\x9f\x0d\x2b\x48\x90\x2c\x45\xf7\xc4\x70\x18\x54\xcc\x4f\xbf\x97\xeb\x25\xfb\xa0\x7a\x5e\x52\xde\x70\xf0\xa8\x54\xee\x0a\xf7\x2a\xbe\x7c\x35\xfa\xad\xad\xaa\x9e\xc6\x88\xdc\x17\xf9\xad\xc2\x13\x60\x6a\xd1\x4a\xae\x68\x9b\x93\x12\x2f\x95\xd9\x5d\x4b\x92\x3e\x84\x52\xde\x34\x75\x43\xb8\x99\x8a\xd7\xaa\xa8\x95\xfa\x4f\xbf\x0f\xbf\x77\xee\x64\xb8\x78\x04\x8c\xda\x6f\xf0\x6f\x28\xa7\x0c\xea\xd7\x12\x5a\xe0\xdc\x24\xa0\xc0\x48\x8b\x6c\x8c\xd6\x5f\x01\x6b\x55\xf7\x41\xef\x1f\xf7\x9d\x53\x5f\x6d\x15\xe1\x3f\xd5\x34\xfc\x89\xf8\xd9\xfb\xe4\xa3\x8f\xc9\x63\x3e\xee\x1a\xe3\xac\xda\xba\xfd\x09\x84\x21\xad\x8c\x8e\xa3\x62\x75\xda\x8d\x2f\x86\xe3\x1e\x59\x2e\x93\x94\x87\xb2\x78\x0b\xdd\xd4\x52\x5b\x60\x1a\xfa\x79\xcd\xc7\x59\xef\x2d\xd8\x45\x6e\x55\xc7\xb6\xb7\xb7\x5e\x76\xc9\xaf\x43\x03\x74\x7b\xbf\xe3\x16\x05\x33\xc9\x1f\x9e\xcc\xb5\x5d\x32\x0f\x57\x38\xd2\x8b\xdb\x48\x65\x8c\xd4\xd9\xf3\xed\x66\xcb\xaa\x7d\xeb\x1a\x63\xf3\xa4\xa9\xf9\x06\x9b\x4a\xc2\x9d\x95\xf9\xd5\xb3\x95\x1f\x2c\x70\x65\x11\x39\x17\x82\xc4\x1e\xd6\xc6\x7f\x82\x47\xbc\x3f\x96\x6b\xe2\x9b\x6d\xcb\xa7\xc6\xe8\x83\x4b\x5a\xfe\xc0\x88\xf1\x8d\xfb\x0f\xb7\x94\x24\x3a\x56\x01\x69\x90\x7e\x6f\x57\x73\x94\x9a\x64\xca\x72\xb3\x53\x9b\xdc\x37\x90\x55\xee\xd9\xd4\x36\xc8\x75\x25\x9b\xca\x6b\x60\xff\x21\x55\x41\x63\xb6\x83\x9c\x01\xa6\xc1\xe0\x3e\x0a\x63\x7b\x3d\xe5\x43\x53\x3a\x7d\x2e\x7c\x10\x26\x52\xdd\x29\xd0\x09\xef\xdd\x1c\xf1\x70\x01\xa5\xda\x4f\x0a\x83\xdd\x08\x1c\xc0\xb5\xac\x8a\x66\xe4\x6b\xec\xe3\xf7\x44\x11\x6c\x78\xec\x79\x05\x0c\x8f\x80\x77\x62\xb4\x8d\x57\x59\xbe\x80\x80\x90\xb2\xe5\x83\xd0\xcd\x08\x2a\x9c\x72\x6f\xb6\xa1\x89\x8d\x37\x11\x48\x46\x7f\x25\xfd\x0b\xe9\xf1\xcc\x29\x29\x3e\xd0\x4a\x16\x7e\xc1\xe2\x1c\xed\x5d\x02\x5c\x46\xf6\x64\x5c\xbe\xb0\x71\xcd\xbb\xff\xd9\xa0\x31\x86\x74\x99\x75\x08\xe6\xf1\x11\xac\xf9\xbc\x35\x7a\x78\x1e\xaa\x44\x6d\x91\x17\x9e\x40\x1e\x32\x47\xe4\xb7\x56\xa2\xbe\x6b\x47\x81\x0f\x19\xbc\xcf\xe5\x73\x51\x03\x54\xfe\x87\x3d\x8c\x1a\x1f\x0d\xeb\x1c\x97\xf5\x47\x78\x22\x70\xa6\xcf\x8c\x39\xb7\xe9\xc5\x02\x09\xf9\x3c\x05\x9f\xe1\xf2\xfe\x14\xb5\xd7\xd9\x54\xf8\xa6\xac\xed\x01\x1c\x2a\xa7\x6a\xda\xdf\xa6\xe4\xb9\x70\xf8\xd3\x86\x36\x04\xff\xdd\x4b\xee\x93\x35\x37\xfc\x77\x55\x68\xdb\x0b\xdb\xcb\xd3\x66\x09\x8a\xe6\x70\x70\x9f\x43\x7c\xab\x1c\x3b\xe1\x55\xe3\x0f\xcb\x33\x62\x5c\x97\xbe\x90\x9b\x89\xf8\xb9\xac\x15\x77\xb4\x21\x67\xd9\x03\x2f\x2e\xb6\xec\x63\xa2\x04\xd2\x75\x20\xc3\xb7\x6d\x19\x6c\x94\xa0\x9f\x16\xf3\xc9\x96\x00\x2e\x20\x02\x7b\xb0\x32\xff\xd9\xde\x90\xa3\x69\xbb\x83\x78\x9b\x76\x52\xb1\x15\xea\xed\x94\xd7\x63\x44\xfa\x10\x92\xfe\xc9\xc4\xfb\x96\x62\x0f\x39\x91\x44\x1c\x3d\xf3\x9e\x91\x25\xd9\x09\xc0\xb3\xb5\x86\x39\x7d\x6b\x6f\xad\xc5\x79\x5b\x09\x70\x04\x9f\xa0\xea\x4f\xb5\x3b\x49\x90\x35\x88\x79\x2c\xf0\xc3\x3c\x14\x4c\x02\xc9\x92\x52\xb8\xec\x00\x38\xcd\xb2\x0e\x8c\xe4\xd6\x23\x6c\x05\xeb\xff\x47\xd6\x9b\xa5\x3b\xcb\xf3\x4c\xa3\x73\xc9\xf1\x3f\x29\x03\x0e\xb0\x68\xcc\x0b\x38\x79\x92\xd1\x6f\x95\xaa\xe4\xdc\xdf\xb5\x8f\xf0\x4a\xb7\xd2\x80\x6d\x95\xaa\xf9\x2f\x6e\xb1\xaa\x9c\x8e\xff\x30\x14\x8e\xcd\xbf\x1c\x09\x61\x1a\xcc\x01\xa4\x44\x2d\x64\x1d\x84\xc9\xb0\xc1\xd9\x25\x30\x22\xd5\xdf\x77\xfa\x0a\x09\x0a\x42\x98\xed\xfa\x03\x83\x5e\xa5\x31\xa2\x12\xad\x5b\x45\x23\xee\x20\x8e\x75\xc3\x97\x2e\xf3\xbd\xa0\xe6\x73\x0a\x01\xb4\xe9\xa7\x60\xb2\x8e\xed\x49\xb7\xd0\x59\x69\xc1\x06\x69\x90\x5d\x0f\x31\xf2\xa3\xfb\x03\x77\x69\x95\x76\x68\xf1\xd3\x3e\x31\x23\x39\xdd\xbb\x5f\x0f\xb6\x3f\xf4\xcf\xfb\xa0\x6e\xc8\x0f\x9d\xc8\xaa\x88\x85\x21\x15\xd5\xa1\x7e\x0c\xd6\x21\x89\x39\xc0\x44\xe6\x7f\xb2\xa0\x52\x97\xb8\xf9\xf8\x7e\x70\x1b\x3e\xd7\x97\xf3\xd6\xa7\x68\x3e\x79\x73\xff\xf4\xae\xbb\x28\xa7\xc1\x16\x79\x2b\x34\xec\x05\x5f\x4a\xba\x0a\xfb\x6e\x9a\xbe\x82\xf3\x40\x86\xdd\x2b\xbd\x28\xef\x4e\x57\x17\x8b\xf2\x2b\xed\x30\xe6\x10\x4c\x3c\xb0\x26\xae\xa3\x2c\x27\xdd\x7a\xb8\x61\x59\x77\xf5\xb3\xc0\xde\xef\x38\xc8\x1b\xa7\x52\x0a\x77\xcb\x1e\xce\x63\xf9\x7e\x18\x31\x27\xb9\x5b\xe6\xb3\xa1\x77\xb3\xf2\x42\x89\x07\x9c\xac\xef\xa4\x6e\x2c\xa2\xfc\xf8\x72\x8a\x22\x42\x93\xc0\xb1\xd1\xb7\xac\xd5\xaf\x7a\xf7\x00\x62\x39\xde\x9b\xa9\x05\xbe\xc3\xd6\xdb\xc7\xf7\x29\x9f\x40\x18\xf9\x3c\xc4\x34\x75\x19\x0e\xf7\xdd\xf0\xa5\xd3\x80\xdc\x52\xd0\xde\x7d\xe0\xdb\x63\xa1\xd0\x63\xbc\x36\x3a\xdb\x18\x80\xfa\xa8\x30\x86\xa3\x91\x4d\x25\x1b\x2a\xff\xc7\xe4\x87\x5d\x6c\xc0\xa7\xfe\xef\xe7\xf0\x20\xb4\xf5\x91\x8b\x97\x8d\x08\x34\xcb\x9f\x47\x26\x54\x61\xd4\xd3\xb4\xe3\x53\x18\xfd\x30\x68\xee\xca\x49\x0b\x1e\x29\xa7\x3f\xa7\x64\x6d\xc8\xd3\xfa\x25\x76\x7c\x29\x62\x5e\xdb\x53\xe0\xca\x34\xcc\x40\xd4\x7c\x83\x9c\x69\x94\x81\x7d\x37\xb7\xd0\x11\x78\x79\x7e\x3a\x29\x6d\xc1\x72\x98\x83\xe6\x80\x82\x2d\xa8\x08\xe0\x38\x04\x16\x73\x16\x5b\x30\x44\x14\x3d\x7f\x3d\xb4\xb3\x0c\x84\x13\xdd\xb3\xda\x8f\xc3\x2a\x63\x9f\xc0\xab\xfb\x5e\xb2\xd7\x33\xcf\x61\xd1\xac\xfa\xe8\xcc\x0a\x94\xf8\x70\xe7\xee\x50\xb5\xcd\xbc\x2d\x2a\xf4\x7f\xd5\x5f\xcc\xb7\x92\x0f\xa7\x3c\x44\x3e\xf3\xe1\xbb\xc0\x07\x71\xec\x33\xb2\x21\x62\xc3\x9d\x43\x2a\x61\x85\xa2\x6d\xa8\xb8\xb5\x76\x5d\x89\x53\x10\x3e\xc1\x8e\xdd\xcb\x0f\xa5\x56\x38\x62\x52\xe3\x71\x4f\x12\xa6\xc3\x55\x6f\x16\xaf\x00\xf3\x6e\xf8\x9a\x39\x05\x61\xe7\x80\xd6\xcb\x1e\x0d\xe5\xfc\x72\x1f\x12\x3d\xc2\x50\x02\xa3\xc0\xc7\x00\xf7\x50\xff\x43\xd7\xc4\x87\x6f\xa1\x1b\x41\x35\x5e\xc3\x65\x2c\x0e\x81\xff\x08\x3f\xa2\x28\x70\x2f\x6c\x97\xc9\x1a\x69\x4c\xc2\x76\xb6\x4c\x2d\x4f\x96\xa3\xe2\x29\xbd\xd0\xe1\x2d\x3a\x3e\xcd\xb9\x0a\x3e\xb0\x2a\x80\xb7\x78\xe8\xbe\xff\xab\x7e\x88\x06\x2a\x36\xe8\xa5\x05\x35\xaa\xe6\x80\xf2\xa8\x92\x3d\xe2\x22\xa4\xb3\x59\x06\x6d\x47\x17\x5b\x71\x90\x5e\xe7\x06\x7f\xd7\x2f\x01\xee\xaa\x40\x27\xc5\x2c\x93\xb8\x48\xf6\x84\xbd\xb5\xa4\xbd\xba\x2b\xe9\xf9\xf6\x12\x8d\x47\xd7\xda\xab\x2b\x0c\x26\x1d\x2f\xb3\xb5\x10\x27\x5e\x35\xf1\xac\xb3\x58\x69\xee\xc1\x4b\x98\x59\xb6\x59\xab\x67\x61\x3e\x88\x46\xc7\xa6\x76\x0d\x49\xfd\xb3\xc8\x66\x74\x05\xbc\x42\xc0\x65\x61\x2c\xe6\xb2\x57\xe5\x3f\xe9\x22\x07\x11\x38\x16\x6d\xdb\x77\xf7\x12\x56\xda\x42\xc8\x1d\xcf\x32\xff\x92\x6b\x6c\xdc\x8c\x54\x96\xdc\x3c\x80\x8e\xd8\x90\x6f\x81\x45\xf3\xfa\x5c\xb4\x77\xa7\x93\x93\x93\x15\x14\xa7\xbc\xbb\xda\x81\xfa\xa5\x8d\xb6\x41\x6a\x07\xfe\xa1\x25\x44\x27\x21\x5b\x71\x98\x95\xa4\x2e\xbd\x6d\xc2\x47\xf6\x3a\x27\x9a\x57\x4e\xb3\x90\x6c\x86\xe4\x4f\x2d\x69\x71\xca\x32\x8f\xb6\x19\xe6\xdb\x76\xda\x7b\x78\x57\xda\xa6\x5b\xf3\x85\x47\x4a\xf9\x31\x94\x4b\xb2\xf2\x1c\x2b\xb1\x1b\x19\x56\xe7\x9d\x7e\x89\x67\x73\x9c\x2e\x59\x99\xb8\xe4\xad\xfa\x86\xdb\xdd\x6f\x39\x52\x14\xc4\x88\x22\x20\xa0\xe5\x9e\x3f\xc4\xf8\x4f\x00\x85\xad\x57\xdf\xe0\xa9\xd2\xfc\x3a\xad\xcd\xe4\x47\xbe\xd9\x29\x9f\xfa\xa6\x9f\x55\x0b\x10\xa2\x2e\xb8\x21\x3e\xa9\x14\x7e\xce\x25\xec\x53\x9f\x6a\x83\x3e\x49\x21\xdf\xc2\x28\xe1\x99\xe5\xa0\xff\x04\x37\xd4\x8f\x9d\x16\x36\xe4\x6b\xfb\x26\xb7\x8a\x0e\x01\x84\xd0\x07\xb0\xde\x26\x2f\x82\xe9\xd9\xa2\x44\x02\x7b\x8e\x90\xe5\x33\x44\xd1\x43\x95\x47\xf2\x10\x26\xec\x03\x88\xc1\xf4\x78\xcc\x6f\x65\x52\xb1\x56\x1d\xca\xe5\x33\x3b\xc7\x91\xff\x39\xcc\xa7\xbc\xb5\x87\xf9\x29\xab\x0b\xdb\x5a\x9c\x0b\xef\xc4\xbe\x5a\x7b\xec\x30\x0e\xda\x23\x0c\x2b\x83\x4f\xa4\x3d\xf1\xa6\x4c\x3a\xec\x98\x1d\xb3\x7f\x38\x17\x63\x8d\x38\x4b\xf6\xc3\x07\x5d\x88\xbd\x9d\xcf\x14\x2e\x60\xaf\xfc\xd1\x31\x18\x18\x95\x5e\xc6\x8e\x58\x6b\x9b\xda\xa6\x8f\x1e\xeb\xf5\x49\xaa\xa6\xbb\x39\x93\x6b\xf1\x12\x52\x5d\x95\x7f\x61\x9b\x62\xca\xef\x4b\xb0\x3a\x40\x58\xe6\xb6\x19\x00\xfd\x1e\xbc\x0d\x6e\x5a\x01\xb6\xfb\xd1\x49\x17\x1d\xb3\x2e\xfa\x19\xd6\x49\x3e\x9a\x6a\x38\x13\xf6\x48\x7e\xf9\x6d\x93\x3d\x28\x97\x9f\x1c\x05\x38\x36\xa6\xdc\xac\x7b\x07\xc3\xf7\xd5\xdc\xcf\x71\x93\xbc\x1d\x24\xdc\x01\x23\x9f\x15\x63\xee\x61\x1c\x1f\x29\xa9\x7a\x99\x70\x75\x9e\x41\x18\x2e\x91\x57\x20\xe0\xb6\x97\xca\x1a\x69\xbb\x0b\xd5\x39\x31\xf6\x53\xc2\xd4\xae\xbc\xb9\xff\xa5\xe7\x91\x00\xe5\xb2\xcb\x5c\x72\x8d\xc4\x4c\xce\xc2\xdd\xda\x78\xb1\xb8\x8e\xe4\x24\x29\x69\x2f\x93\x24\x7c\x40\x05\x63\x17\xf6\x66\x5d\x22\x5e\xd7\xb9\x0f\x39\xd1\xef\x73\x4f\x54\xf6\xa3\x0b\xa1\xa8\xaf\xb5\xab\x59\x36\x93\x11\xfd\xdf\xa5\x9f\x60\x2a\xd5\x4f\xa6\x31\x75\x0a\x57\x49\xd7\x38\x68\x6f\xcc\x9d\x9d\x4d\xb6\xb4\x03\x4e\xc1\x77\x48\x08\x3e\x91\x44\x14\x3e\xec\xfe\x3d\xd9\xb4\x3f\x3b\x71\x28\x59\xa1\xbe\xcb\x5a\x2a\x21\x91\x09\xa7\xc8\x17\x6d\x46\xdf\x26\xbb\x86\x1a\x83\x14\x06\xaf\x9f\x7a\xfb\xc2\xf2\x09\xcb\xc3\xf7\x67\xe4\xbf\x7e\xd7\xd3\x35\x5c\x95\x33\xfb\x7b\xf6\xec\x1a\x42\xd2\x90\x6d\x89\x3a\x6b\xcb\x38\x69\x1f\x13\x04\xb9\x82\xaf\xe7\xeb\xab\x63\xb3\x02\x07\x0d\x2e\x25\x0e\x7d\x0f\xf9\x4e\xff\x05\x27\xf7\x5f\x7e\xbe\xfd\x25\xe7\xef\x71\x94\x63\x85\x82\xc7\x1e\xdc\xac\x13\x8e\xb2\x3a\x59\xd3\x00\x33\xc7\x9c\xd9\x01\x8b\x91\x4f\xe9\x39\x1c\x3e\x61\xc1\x94\x77\x6f\xd1\xf2\xa2\xab\xc2\x38\xea\x4e\xb9\x5f\x75\xf6\xe9\x5d\x37\xed\xa8\x49\x2f\xb8\x4f\x8a\x60\x99\x0e\xea\x83\x00\x6e\x6f\x39\x0e\xdf\xf3\x11\x7e\x97\x6b\x23\xea\xd2\xcd\xc9\xc7\x68\x9b\x09\x77\xcf\x77\x10\x7e\xee\xcc\xa8\xcb\xdb\x49\xfd\x3d\x87\xae\x8d\x79\x00\x32\x5f\xb4\xf5\x7c\xff\xe8\xbb\x6f\x24\x94\x11\xde\xae\x8d\x5b\x51\xb5\x0b\x0f\xff\x4a\x7b\x9b\x1d\x8f\xe9\xa3\x76\x1d\xb6\xf4\x5b\x90\x7e\x5f\xaa\x35\xae\x3b\x24\xd5\xbe\x23\x6d\x00\xf6\x21\x78\xfa\x08\x77\x89\xeb\x20\x7e\x03\x76\x15\xb9\xc1\xfe\x15\x62\x1b\xaf\x6c\x1d\x1b\x32\xe7\xcb\x4e\x85\x21\x9e\xb5\x0a\x21\xbb\xb4\x88\x13\x69\xf7\xd2\xdd\xce\x84\x59\x06\x04\x6e\x40\xe5\x83\x71\x6f\xbd\x2b\xfb\x63\x6c\x9b\xff\x88\x41\x9d\x90\x6a\x1d\x9b\x70\xa7\x83\xb0\x18\x98\x3d\x2f\xcb\x36\xfd\x47\x0b\x45\x3d\xb1\xe1\xe4\x93\x52\xe0\x58\x57\x13\xa7\x45\x68\xee\x95\x37\x39\x0b\x6c\x9b\x82\x53\x15\xac\x71\xe5\xf8\xf7\x40\x08\xb8\x53\xba\xfa\x19\x4a\xfa\x5d\xd0\xbd\xed\x02\x03\xce\x0f\x7f\x85\xe6\x60\xa5\x3f\x1a\x9b\x25\xcf\xa1\x72\xcb\x59\x89\xab\x77\x18\x5a\x21\x71\x75\xe6\xe8\x24\x2f\xf1\x42\x42\x8f\x2c\x3c\xaf\x5f\x10\xdb\x95\x9a\x5c\xff\x92\x50\xfd\x94\xf7\xe6\x59\x23\xa8\x85\xec\x6a\xdf\xf9\x57\x0a\xa3\xdd\x83\x57\x78\x7e\xe9\xa2\x0b\x30\x33\x91\xef\xcc\xff\xc1\x6d\xd7\x47\x11\xe7\x9a\x77\xd6\x13\xfb\xed\xd9\xed\xbc\x2d\xbe\x44\x48\xe5\x34\x48\xcd\x11\x61\x63\xa5\x11\x9e\x59\xe1\xd6\x7c\x7c\x04\xbc\x1c\xa7\xac\xb3\xd0\x6e\x65\x0f\x00\xa3\x08\xb2\x63\x59\x62\x0b\x02\xbf\xe7\x83\x79\xe8\xf0\xd4\x4a\xea\x11\x7c\xc5\xa7\x19\x4e\x12\x79\x8e\xf4\x33\x3e\x98\x05\xbc\x1c\x49\xf5\xb6\x9d\x44\x6e\xac\x75\x76\x2a\xa8\xa8\x96\xf3\x27\x20\xe5\x8b\x26\x59\x36\xd5\xb7\x82\xc5\xdd\xd9\x3f\x3e\x6a\x6b\xef\x9e\xde\x60\x5c\x79\x7d\x20\x9a\xed\x56\x37\x06\xac\x96\xf3\x98\x58\x12\xb8\xca\x3a\x52\xbf\xb7\xf9\x93\x62\x63\xb9\xe1\x6c\xa6\x99\x80\x5b\xd2\x06\xce\x8f\xb0\x69\x92\x52\xe4\xf7\xb3\x29\x36\x0b\xd8\x7e\x4e\xf2\x02\xdd\x23\x78\xc6\xca\x8a\x39\xd2\x65\xd6\xbc\x36\x07\x51\xb7\xd9\x3c\x03\xfc\xcf\xf9\x15\x05\xc5\x1e\x49\x33\xf7\x1d\x22\xb8\xe6\x5c\x20\xa5\x07\xea\x01\xf9\x37\x42\xcc\xa9\xf2\x29\xa1\x56\x92\x52\xcf\x16\xab\x70\x6e\xde\x1a\x53\x27\x31\x5c\x7b\xfd\xec\xe4\x21\x7f\xd8\xa5\x74\xda\x0c\xcb\x87\x65\x11\x0b\x46\x47\xdb\xc3\x7b\x5d\x30\xaf\xc1\x8c\x06\xf8\x0a\xa3\x95\x87\xa3\xfd\x22\xc6\x8c\x51\x65\xe4\x81\x39\x38\x20\x46\xb3\x73\xb3\x7a\xd9\x23\x02\x8c\x6c\xd5\xbd\xf6\xe0\x72\x03\xdf\xc1\x3d\x58\x31\xb6\x3e\x3c\x89\x23\xc0\x64\x54\x7b\x3a\x30\x68\x95\x39\x8f\xbd\x98\x5a\x23\x56\xa9\xa4\x60\x53\x0f\x36\x73\xe5\xca\xd7\xb0\x9d\xc2\x4e\xb9\x5e\xc2\x1e\xa0\xe7\x48\xba\xea\xe5\xe3\xaf\xb3\x9c\x64\x23\x2d\xa7\x0e\x3a\xed\x97\x52\x9d\x6b\x66\x45\x0c\x69\x20\x70\x4c\x50\xd9\xb2\x05\x3b\x3b\x3e\xeb\x92\xc9\xf9\x59\x52\x44\x20\x2c\x11\x3d\xb9\x24\x01\x97\x7f\xde\x93\x54\xcc\x4e\xab\xbd\x6d\xb5\x67\x51\x62\xe5\xc9\x4b\xbf\xec\xc4\x96\x8e\x1d\xd4\x69\xb8\x44\xba\xde\x20\x12\xe6\xf0\xb9\xa7\x51\x41\x3c\x3b\xf9\xd3\xb6\xff\x61\xf9\x52\x82\xc6\x34\x51\x84\x3c\x15\xba\xed\x4f\x0e\x76\xad\x9a\x11\x11\x03\x91\x3b\x6d\x44\xa6\xbc\x89\x5a\xb3\x86\x13\xd8\x8e\x85\xa9\x39\xd6\xb7\xde\x41\x56\x10\xe4\x94\xec\x6e\x19\x9a\x56\x76\x15\x2e\x9c\x64\xa1\xf9\x8b\x34\x1f\x09\xef\x30\x83\x5f\xc1\xef\x8e\x44\xb0\x09\xb6\xb6\x61\xa6\xea\xcb\xf5\x58\xef\xa9\x45\xe2\xd7\xb5\x3b\x83\x70\x32\x0a\xdc\xa0\x2b\x2a\x4d\xc0\x4e\x36\x5d\x50\xcb\xda\x2e\xc1\xb1\x82\x30\x39\x1c\xcf\xc8\x06\x64\xcd\x88\xbe\x44\x94\xb5\x63\x91\xb3\xcd\x38\xdb\xd9\x91\x1e\xcd\xc5\x41\xf1\xfa\xab\xba\x17\x88\x34\xf1\xb5\x90\x85\xd8\x83\xb2\x41\xde\x09\x22\xa0\x3a\x16\xf4\x7a\xf8\x75\x23\x54\x1e\x55\x75\x78\x40\xe0\xf1\xb6\x80\xad\x05\x4a\xe4\xff\x4f\x0d\xed\xe7\x3f\xe2\x01\x28\x06\xc3\x3e\x35\xd4\x36\xcf\xf6\x51\x9f\xaa\x80\x9f\xc0\xe5\x6f\x95\x5b\xf6\xe6\xed\xe4\x73\xe8\x22\x8b\x00\x9f\x2f\x6f\x8f\xd0\x8b\xd2\xdd\xe7\xa2\xc0\x52\x25\x96\xae\x9b\x60\xe4\x50\x9d\x00\xed\x95\x4b\xf5\x95\xc3\x7e\x61\xd5\x0e\x43\x79\xb3\xcb\x10\x05\xd8\x3f\x61\xb3\xc3\xec\xde\x34\x3e\x7a\x06\xaa\x0d\x75\xb0\xce\x0f\x7b\xe7\x3c\xed\x59\x42\xb1\x34\x12\x29\x12\xfd\x85\x93\x09\xad\x18\xee\xac\xb9\xfa\x6c\x0b\x25\xef\x6e\x4e\x07\x03\x08\xfb\x72\x75\x85\x16\x81\x44\xcd\x21\xf1\x7d\x25\x2d\x1a\x43\x3a\x43\x79\xb8\x6a\x13\xe5\x55\x18\x2b\x2d\xda\xf1\x80\x23\xef\x75\x54\xb5\x0b\xce\x5f\xa5\xa7\xfb\x8e\x8b\xd6\x55\xbc\x9c\x11\x38\xec\x49\xe4\xe1\x25\x51\x69\x3a\x0d\xf3\x4b\x16\x5c\x9b\xf3\xf1\xfb\xf5\x13\x9d\x06\xc5\x9a\xf5\x39\xde\x88\x0b\x13\x67\xc9\x0f\xe9\xd7\xe5\x48\x0d\x49\x31\xb6\x43\x8d\xbc\xe2\xf0\xc2\xb1\x65\xea\x90\xea\x70\x15\x38\xd5\xd5\xf9\x3e\xc5\x23\x1a\x68\x0f\xdb\xad\x9c\x3d\x3b\x99\x61\x80\x73\x1e\xc5\x92\xf7\x8f\x83\x77\x5e\x14\x62\x54\x4a\x10\xc4\xc5\x90\xb5\x12\xea\xc7\x3f\x3f\xa1\x21\xe6\xbd\x60\xd9\x6c\x1a\x0d\x72\xb2\x88\x67\xda\xb2\xdf\xc8\xeb\x9f\xa8\xac\x7e\x2e\x0b\x1d\xdc\x25\xfd\xe8\x62\xfb\x87\x97\x59\x55\xfe\x5b\x5e\x71\x45\xf5\x35\x7b\x9d\xcd\xf2\x2b\xaf\xca\xe8\xec\x24\xc1\x75\xa3\x33\x02\x1e\x36\x6c\xbc\xf6\x95\x65\x2f\xce\x88\xb2\x89\x05\xcf\xb5\xb2\x93\x6f\x73\x87\x28\x33\xd6\x67\x39\x28\x58\x9e\x24\x61\x05\xdc\x35\x45\xe1\x76\xb2\x9b\x22\x6f\x8f\x54\xff\x23\x0a\x02\x9d\x20\x7f\xbc\x24\xde\x51\xa1\xc1\x76\x02\xda\x4a\xe6\xb8\x4d\x3d\x07\x67\x75\x80\x28\x44\x31\x51\x92\xd1\x13\xcd\xe5\x2f\xfe\x54\x7e\x1d\x29\x84\x9e\xa9\x1b\xd0\x5f\xc5\xf3\xbe\xa2\x95\x7c\x23\x38\xf8\xeb\xb8\xde\xdb\x47\x7f\x73\x9f\xd9\xd5\x58\xfc\x27\xf8\x04\xf3\xec\x0d\xc9\xae\xbc\x32\x6c\x57\xd8\xdc\xea\xdf\x10\xf6\x68\x20\x03\x0e\xab\xce\xc2\x8a\x23\xd4\xdc\x1e\xba\x24\xee\xbc\x0d\xc3\xec\x36\x47\xe0\x21\x7e\x52\xc5\x9a\xbe\x73\x0b\xec\x79\x4b\xd2\x6d\xe5\x59\x78\xac\x59\xd9\x66\x67\x17\x26\xf8\xd7\x29\xdf\xd6\x17\x34\x01\x5e\x8a\x15\xbd\x15\x08\x44\x75\x15\xbd\x50\x5a\x53\xdb\xec\x74\xfc\x9e\x5c\xab\xd7\x1c\x62\x2b\xe4\x3f\x4a\x3a\xb9\xc7\x6e\x19\x32\xca\x7f\x54\x94\x36\x78\xb1\xf7\xe7\x7f\xa0\xd3\xac\x3f\x92\x15\xf7\x9f\x9f\xe9\x6e\x2f\x74\xaa\x5e\xbc\xa2\xec\x3e\xf2\x97\x70\x3d\x71\x20\xac\xd3\x26\x26\xed\x83\xef\xf2\xde\x9b\xe2\x52\x71\x55\x50\x26\xdb\x47\xf1\xd3\xcf\x4b\x3a\x9e\xf0\xb0\x71\x63\x94\x54\x8a\x4e\x1f\x4a\x3a\x36\x67\x4e\x7a\xe6\xbe\x4b\x47\xa6\xf6\xf5\x9e\xa5\xf6\xb9\x60\x39\xc4\x5a\xfc\x12\x13\xc0\x8a\x38\x15\x0d\xb7\x3a\xa4\x97\x67\x63\xb3\xb7\xd2\x74\xe9\xd0\xe8\x04\x19\x6a\xde\xa3\x93\x76\x15\xc2\x64\xb6\xb1\x66\x12\x14\x24\x68\x0f\xef\xab\xb0\xe8\x00\x13\xd8\x9f\x6e\x5b\x83\x44\x62\xd2\x14\x14\xa1\x6b\x2a\x6f\x15\x99\x53\xd6\xa3\x72\x64\x75\x5c\xf4\xaa\x45\x80\x94\x5c\x81\x5b\x7c\xab\x73\xf5\xef\x89\x10\x39\x43\xb2\x95\x91\x1d\xb9\x44\x50\x81\xea\x8a\xbb\x40\x45\xa7\x7b\x6f\x0d\x99\x02\xa9\xfb\x52\x13\x83\x07\x35\x37\x87\x5f\xee\x34\xaf\x14\xce\x21\xa8\x8b\x54\xff\x28\x89\xdb\x4a\x22\xef\x82\x7c\xe2\x72\x82\x0b\xb0\xf7\x4c\xb8\xb3\x38\x29\x27\xd1\x52\x8e\x37\xef\xbb\x5d\x26\x72\xcb\xe3\x6d\x97\x0d\xf0\x47\xa5\x4e\x25\x6a\x88\x85\x58\xb5\x4f\x13\xd7\xda\xf8\xdf\x3f\x3a\x9d\xcb\x1e\x4f\xcf\xda\xe7\x79\x73\xc7\x65\xd7\x00\xbd\x90\xce\x94\x88\xd4\x82\x2f\x85\x54\x2b\xaf\x64\xd0\x31\xf2\x8a\xa8\xfe\x32\x51\x0f\x04\xdd\xed\x7d\x52\x53\x26\x5c\x0d\x50\x2a\x85\xbb\xdf\xd1\x64\x53\x47\xa1\x1c\xb5\xa1\x58\x68\xdc\xab\xec\x82\x87\xda\xaf\x19\x76\xcc\x0a\x77\x3e\xb0\xec\xd2\xce\xc5\xd5\x08\xfe\xd8\xbc\xa7\xe7\x2a\xcf\x91\x95\xfe\x91\x18\xc8\x8e\x24\x49\x92\x6c\x03\xee\x91\x61\x3c\xc7\xc2\xcb\x63\xb9\xc3\x61\xa4\x5c\x61\x5e\x54\xce\x6f\x46\x03\x9d\x1b\xf4\x66\x74\xec\x6f\x00\x5e\x81\xf4\x9d\xf3\xb0\x5e\xee\x36\x19\xd7\xa9\xbc\xce\x33\x42\xa1\x11\x9c\xb5\x84\x8e\x7f\xe7\x7c\xbf\xcf\x5c\x59\xf7\x39\x98\x57\x7b\xae\x08\x45\xf2\x91\xae\xda\x3d\x89\x94\xb6\x7d\xe8\x75\xb8\x55\x5a\x32\x6d\xee\xec\x7e\x8a\x8d\x45\x28\x0e\x80\x25\x82\x24\x63\x38\xf2\xd6\xf9\xab\x6d\x2d\xba\x45\xfe\x5a\xce\xab\x61\x57\x06\xe7\xaa\x6e\x53\xd1\xb7\xc6\x67\xdf\xe6\x85\xf3\xdd\x06\x63\x57\x0e\x82\xd5\x89\xad\x3c\x83\x11\xb6\x1c\x6d\xa1\x3c\x8f\xea\x2b\x85\xca\xf5\x4c\x11\xd5\xd5\x2e\x75\xd4\x7a\xbd\x34\x75\x88\x2d\xcf\x11\x3b\xea\xdb\x3d\x0e\x87\xa2\xb4\x09\x10\x12\xe2\xb5\xac\x50\x60\x00\xf8\x96\xbe\x73\x3a\x59\xd8\x55\x49\x2a\x22\xe8\x1c\xff\xe4\x8a\xaa\x30\xd8\x60\x67\x78\xfe\x63\x19\x66\x07\x89\xe0\x14\x0a\x43\x3d\x66\xdc\x9b\x6a\x38\xfd\xfb\x0e\x13\x57\xa9\xb5\xf2\xdb\x5f\xab\x17\x51\x04\x62\xd7\xe2\xfd\x45\xaf\xb5\xca\x2e\x7a\xd8\x25\x7d\x33\xc2\x48\x22\x62\x62\xa7\x79\x0a\x1c\x84\x59\xc4\x01\xb2\xfc\xaa\xf3\x74\x93\xb7\xb8\xfe\x43\x21\xc0\x52\xdf\x52\x25\xdc\x75\x40\xc3\x9d\x7e\x04\x6e\x91\x73\x4a\x72\x31\x46\x93\x73\x11\x22\xee\x16\xcd\xf2\x6a\x5b\x38\xbd\x59\xc9\x76\xa8\xfd\xe4\x00\xdd\x32\xab\x6c\x3b\xa5\x9b\x84\x64\x36\x85\xaf\x8a\x7d\xe5\x4e\x7f\x41\x33\x49\x35\x1c\x55\xb3\x7f\xf9\x08\x1a\xd8\x19\xdd\x9f\x58\x5e\x66\x49\x47\x27\xf7\xd5\x98\x00\x40\xb2\xb9\x84\xdc\x1b\x3a\xa8\xd8\xef\x37\xf2\x27\x06\x41\x4c\x75\xe6\x74\xd2\xd2\x03\xab\x8f\x6a\xac\x22\x68\x7b\x0a\xb3\x21\x54\x70\xa2\x86\x9d\x2c\x35\x3c\x75\x95\xcd\x2a\xb1\xa9\xa6\xac\x84\xe9\xf0\xd0\x63\x01\x96\x23\xc2\x12\xa5\xdd\x5b\x4a\xdb\x88\x29\x4b\x57\x96\x14\x69\x92\x4e\x6d\xfa\xa7\xaa\xb6\xba\xfe\x29\x61\xcc\x64\x93\x0a\x1b\x4f\x75\x8e\x7a\x6c\x97\x6e\xa2\xee\x3f\x86\x18\x89\x53\xe3\x29\x9b\x95\x33\x54\x24\xce\x0b\x63\xd1\x96\x44\xc2\x1c\x11\xdb\x4c\x4b\xbe\x12\xd0\xd1\x58\x5a\x3f\x2c\x5c\x78\x6d\xe4\x1b\x33\x18\xbb\xc8\xa1\xa5\x74\xf2\x72\xd9\xd5\x9f\x1b\xb1\x33\xba\x38\x18\x64\xe8\x12\x2a\x60\xd8\x49\x91\x47\x96\xc3\xb7\x1b\x96\xd2\xd1\x0c\x0b\xe1\xc0\xa8\xb2\xd6\x4a\xb8\xaa\x7f\x88\x5d\x5c\xb4\xd5\xd6\xc1\xce\x00\x6a\x34\xa2\x2a\x7c\x42\xdc\x13\x66\xd1\xef\x55\xb5\x9d\x26\x27\x9c\x55\x51\xd8\x31\x34\x55\x36\xd0\xe9\x11\x75\x5d\x69\x23\xda\xc1\xec\x73\xbc\x70\x56\x3b\xc1\x06\xb2\xf9\xa3\xff\xd3\x33\x35\x27\xe9\x14\x7a\x97\x27\x5d\xff\x9f\x20\x3c\x68\x34\x9f\x22\x98\x3f\x94\x48\xaf\xec\xb4\x5f\x7c\xf4\xf0\x81\x03\x09\x06\xa8\x8e\xee\x34\xb0\x0c\xdc\xf7\x28\x0d\x97\x30\xa0\x66\xbb\xad\x46\xec\xcf\x50\x3b\xbf\xfe\x87\xf2\x9f\x02\xd5\xf6\x4d\xa8\xe9\x50\x86\x51\xbd\xb6\x4b\x22\xdb\xa1\x39\xa5\x0f\x33\xf0\x2b\x9a\x0d\xda\x4f\xc4\x5c\x8f\xf9\x1f\xdb\xbf\xd9\xf1\x48\xba\xce\xfc\xd4\x1c\x43\xf6\xde\x32\x4a\xc9\x8b\x24\x36\xb7\xd6\xf0\x8c\x61\x52\xd7\x52\xb4\xfa\x32\x25\xc9\x43\x7a\x29\xe3\x63\x1e\xd5\xa8\x1b\x86\xc4\xbd\x5c\xff\x45\xc4\x85\x90\x26\x77\x0f\xa5\x9a\xb9\x76\x1d\x1b\xdd\x36\x5b\x47\x76\x01\x38\x50\x2c\x5e\x91\x58\xcb\xc9\x13\x52\xe8\x39\xe2\x6a\x3f\x3e\x55\xf9\xf3\xdd\xc3\x1a\x4b\x34\x81\xc8\x1e\x34\x5c\xca\x9f\xa5\x5d\x45\x99\x52\x98\xa0\xd4\xc3\x3f\xeb\x0e\x62\xd9\x27\x78\x65\x25\x22\xd7\x6c\x03\x3a\xb2\x01\x98\x90\xa3\x1d\xcd\xb5\x30\xd7\x44\xb4\xa3\xba\x7e\x14\x42\x76\x1f\x76\x9b\x90\x21\xe0\xb5\x9d\x0b\x02\x23\x09\x7f\xc9\xea\xc2\x11\x50\xe9\x20\x1a\x18\xa3\x1b\x37\x45\xe0\x9a\x9c\x4b\x70\x1c\xa5\x68\xf3\x94\x5c\x49\x4d\xd0\xae\xcb\x54\x3b\x75\x44\xa3\x7d\x54\xd2\x10\xa6\x9c\x98\x30\xb4\xa0\x78\x1d\xc9\xc7\x96\xba\xec\x9a\x48\xba\x9f\xd8\xd2\x87\x5b\x84\xbf\x5d\x49\x16\x87\xa5\x15\x95\x9b\x5b\xd3\x44\x85\xa9\x94\xdc\x35\xd2\xf1\xd7\xcc\x32\xad\x9b\xc5\xac\xb4\xc1\xd2\x0a\xc3\x1b\xef\x42\x32\x97\xa8\x23\xa3\x8c\xf4\x44\x68\x1f\xa6\x08\xd8\x17\x75\xcf\x21\xde\x87\x68\x70\xe4\xc8\x85\x88\x65\x9f\xe4\xb3\x6e\x63\xba\xee\x59\x25\xb9\xbe\x33\xad\xb5\xe7\x2b\xda\x7e\x3c\x09\xbb\xc4\xd6\x17\x08\x6c\xe4\x47\x41\x09\x48\xfe\x5d\x6a\x25\x2a\xba\x68\x4a\x90\xab\x5f\x54\xe2\x0f\x6f\x11\x7a\xe9\x78\x51\x06\x97\x04\x83\x00\xd5\xb4\xb9\x15\xa3\x41\x55\x26\xe6\x21\x96\x94\x65\x6d\x92\xc0\xaf\x1c\x2a\xbe\xe9\x5b\x69\xf7\xf1\x4d\xee\xf3\x86\x8f\x81\x28\x15\x99\xe2\x80\x9a\xc1\x34\x3a\x4d\x4e\xb8\x1c\x78\xa9\xbd\x5d\xca\xec\x2a\x95\xb9\xb9\xe9\x58\x79\xa9\x62\x90\xc2\x3c\xd6\x8f\x2e\xdc\x66\x51\x29\x7e\x1e\x06\xe2\x79\xa0\xfb\xa7\x44\xdf\x51\x48\xc2\xab\xd0\xde\xe4\x05\xa3\x24\xaa\x68\x5e\x2d\x22\xec\x25\x0e\x93\x12\x56\xd4\x07\x74\x97\x36\x4a\x54\xaa\xb6\x7f\x6e\xdd\xc8\xde\x5d\x5d\x59\x0b\xd8\x28\x64\x2b\x5a\x43\xd1\x7b\xed\x56\x6a\x5c\xea\x8f\xaf\x87\xc8\x44\xef\x99\xde\xf6\x31\xc8\x6e\xbc\xe7\x9f\x88\x7b\x24\x76\x78\x43\xc1\x38\x71\x90\x25\x7d\xc6\xee\x9c\x3e\xe0\xf9\xed\xba\x18\xc4\xff\x8b\x9b\x47\x8f\x9f\x14\xf6\xd4\x9f\x57\xac\x39\xd0\xc6\xe0\xab\x21\x3b\x0f\x5b\x88\x08\xb3\x43\xe7\xbb\xf9\x3f\x2a\xdd\x0f\xdf\x61\x44\x0b\x9f\x29\x40\x13\xe8\xc0\x1b\x03\x6f\xde\xf5\xed\xca\xdc\x3b\x58\x71\xc7\x1c\x12\x1a\x4f\x98\xf5\x42\x6a\x07\xac\xe5\xcf\x73\x58\x88\x8d\xb8\x3d\xca\x3e\x0f\xb4\xa6\xe2\x7b\x2b\x2c\xd0\x82\x14\x74\xcd\x6a\x1d\xca\x37\x14\x1a\x99\x25\x64\xe0\x4d\x4d\xde\x4c\xc7\x00\x27\xdd\x6a\xc8\x89\x75\x07\x6b\x3f\x8a\xd8\x6d\x83\x91\xd4\x40\x3b\x25\xe9\x07\x29\x7a\x0b\xed\x0c\x04\x8c\x91\x2d\x33\x51\x94\x60\x95\x64\x11\x1b\xcf\x2b\x49\xe9\xc2\x63\xe0\x67\xd0\x09\x29\xbe\x1f\x37\xf1\x07\xcf\x3a\x05\x55\xaf\x7c\x7d\x9b\x73\xa2\x74\x89\x66\x87\xad\xe9\x82\xd9\x4f\x45\x7f\xda\x91\x5f\xe5\x59\xd8\xfe\x08\x19\xee\x39\x2b\xc2\x19\x9d\x34\xaf\xf3\x26\x91\xfa\x00\xad\x88\x74\x97\xc3\x88\xea\x74\xcb\x3b\x1f\xd9\x2c\xee\xef\x8d\xee\x1c\x27\x98\xe9\xbe\x53\xb0\xe2\xe4\x9a\x5b\xa5\x7a\xb8\xed\x96\x55\x84\x7d\xa1\x98\xc8\x0b\x42\x0d\x28\x62\x3d\xce\x30\x87\x3d\x4e\x32\xd3\x8e\x93\xb1\x80\x07\x64\x8a\xfe\x2a\x2b\x63\x1c\x8f\x95\x6c\xbd\x03\x84\xdf\x3b\x7b\x25\x39\xef\xa4\xfe\x80\xc8\x35\x2b\xae\x0f\x8a\x43\x7f\x1e\xec\x8c\x5e\x5c\x60\xe1\x4e\xe8\x87\xe9\x0c\xd7\x4a\x0a\x00\xdd\xa3\xdc\x8f\xf4\x57\xb7\xc2\xb0\x6f\xbe\x8c\x47\x52\xef\x2e\x81\x8d\x47\xfb\xa3\xf0\x20\xd0\x0e\xc0\xdb\x17\x82\x22\xe1\xdd\xc9\xca\xb0\xea\x78\x02\x71\xc6\x60\x40\x72\x0f\x19\x1a\x85\xae\x35\x0f\x54\x84\x57\xa2\x6c\x87\x4e\xa2\x3b\x9c\xd1\x9a\xe8\xfd\x24\xd5\xf0\x9e\xa2\x1b\xb0\x55\xc2\x04\x5b\xf9\xb2\xd7\x66\x6b\x1e\x45\xec\xe5\xfe\x2c\xad\x24\x64\xad\x67\x85\x60\x93\xe7\x38\x2e\xf5\x88\x3e\xdf\xc3\x85\x3a\x6a\xa2\x29\xcd\x7f\x9b\x7f\x11\x9f\x1b\x3a\x11\xd2\xf8\xb8\x2b\x2f\x1b\x77\xee\x81\x19\x3c\x42\x39\x98\xcf\xc3\xcf\xff\x28\xed\xd1\xf4\x53\x48\x0e\x62\xc2\xfd\x2e\xab\x02\x97\xf0\x3c\xd2\x44\x80\x1a\x8f\x25\x59\xfd\x97\x0e\x38\xac\xe1\x7e\xf9\x95\xd5\xa5\xf6\x4f\x36\xb0\x39\x85\x7e\x3d\xb6\x2d\x22\x44\xec\x9e\x47\xac\x54\x93\x4e\x11\x1b\xcc\x5f\x3d\x63\xda\x94\xdd\x93\x5a\x98\xfe\x06\x23\x75\x14\x2a\x48\x7e\x9b\xb2\x6f\xaf\xe0\x7a\xa4\xee\x1b\xfc\x20\x58\x39\xd9\xce\x83\x9d\x3b\xa4\x37\xb7\x56\x1f\xfa\x8c\xec\xd8\x65\x7d\x6f\xab\x2c\xd6\x91\xbb\xa3\x1b\xdc\x65\xd5\xbb\x62\xf4\x5c\x70\x19\xbb\x1f\xc3\xb8\xf2\xa4\x2f\xff\x52\xc2\x9f\x88\xe0\xdf\x52\xa6\x75\x93\x90\x7b\x29\x21\xb3\x5b\x5a\x7c\x84\x6d\x2c\x9c\x06\x6c\xcb\x89\x5e\x79\x0d\xfa\x86\x8b\xda\x59\xe1\xc9\xcb\x68\x26\xef\x74\x99\x57\x87\x6f\x96\x38\xe5\x96\xdc\x08\xba\x0b\x1d\x57\x96\xe4\xcd\x9c\xbf\x22\x5f\x90\x3f\x44\x7a\xfb\xc8\x6a\x1d\xab\x39\x7c\x37\x35\x2b\x67\x61\xaa\x57\x78\x22\x39\x63\x52\xba\x9b\x4a\xfb\xdb\x49\xbd\x7a\xdb\x3f\x50\xa0\x8e\x7d\xc4\x02\xa9\x86\xdf\x6b\xd5\x01\xef\x1d\xb5\xfc\x4e\xd8\xd6\xf9\x4d\xde\x77\x7b\x38\xe7\xf0\x9a\xa3\x84\x13\xbd\x64\x82\x21\x25\xc5\xf4\xf6\x6b\xab\x86\x83\xda\x9f\x0f\x03\x4d\x4e\xd0\x93\x87\x8d\xad\xf1\xd8\xd4\xfa\x6d\xdf\x36\xc8\xab\xc4\x32\x53\xc4\xd6\x21\xda\x90\x37\xbc\x17\x1e\xaf\xd6\xc4\x7b\xeb\x43\x87\x11\x88\x47\xb5\x84\xe9\x67\xcc\x03\x53\x1a\x76\x2f\xa6\xbd\x38\xa7\x7e\x88\x09\x0a\xae\xa4\xf7\x02\xad\x34\x57\x9e\xb1\x28\x4f\x6b\x8c\x5c\x43\x20\xc5\x7c\xd4\xa4\xec\x1f\x30\x06\x58\xe1\xb5\x16\xc2\xc8\x84\xfe\x31\x5c\x38\xe3\x53\x7a\x03\x78\xd7\xda\x35\xc2\x44\x42\x75\xdd\x18\x64\xc7\x73\x54\x3f\xf5\x59\xd1\x76\xf4\x13\xe7\x59\xff\xe6\x37\x2d\x01\x30\x64\x60\xcf\x29\x06\xf6\x53\x11\x60\x30\x61\x97\x2d\x54\x51\xa1\xf9\x8c\x55\xf3\x09\x97\xe8\x55\x05\x5e\xa7\x9b\xec\x1d\x11\xe1\x7a\x12\x9f\x91\xcd\x07\x95\x40\x33\x94\xbb\xcc\x47\xd4\x2e\x11\x11\x82\xde\xb6\xbb\xa6\x70\x76\xcf\xe4\x91\xc2\x46\x98\x7c\xc9\x33\xcc\x26\xf0\x5f\x1e\x0c\xc9\xf6\xe7\x12\x69\xfb\x25\x0b\x01\x7f\x89\x27\x4d\xfa\xce\x50\x3f\xc6\xf3\xad\xe4\x03\x54\xc9\x36\x5b\x5d\xdd\x80\x71\xa8\x30\x04\xc5\xe0\x94\x51\xd6\x50\xae\x50\x15\xf5\xa4\x4d\xbe\x66\xd5\x74\x98\xa8\x28\x20\x3a\x9b\x96\x1c\x56\xa1\xbc\x8d\x08\x14\x8b\x35\x65\xb6\x0f\xe1\x75\xeb\x46\xef\x7e\xf4\xce\x21\x96\x01\xfe\xc5\x64\xa5\xc1\x76\x5a\x61\x4a\xda\xb1\x14\xed\x3f\x92\xb2\xf6\x08\x81\xf2\x0a\xa8\x5e\x08\xa3\x67\x6b\xef\xe2\xc7\xe8\xcf\xc2\x0d\x94\x9b\x5f\x15\xde\x86\x3d\xdf\x2e\x72\x8f\xdb\x5d\x91\xf8\x58\x4e\x01\x43\xb6\x0e\x4f\x7a\xd2\x1e\x06\x57\x5e\xf7\xd2\x1f\x6b\x94\x56\xb7\x0f\x9c\x05\x46\x55\xe8\x5a\x4a\x3b\x84\x25\xe2\x8f\xd6\xa5\x90\xc2\xcb\xf7\xbb\x47\x2d\xa1\x4a\x0e\x64\xac\x4b\x71\x44\x87\x2a\x50\x6f\x1f\xb1\xbc\x5b\x95\xa0\xd5\x7d\x42\x2d\x61\x67\xac\xfc\x01\x6c\x1b\xaa\x3b\xc1\x9e\x8f\x9e\xde\xbb\xb9\x07\x74\x51\x76\xc5\x2d\xf3\x10\x65\xd9\x1c\xe9\x99\x4e\x9f\x94\x5a\x29\x69\x8d\xeb\xc4\x47\xea\x3c\x16\x38\x42\x8e\x9a\x43\x29\xfe\x83\x1f\xb9\xbd\xb6\x6d\x54\x25\x68\x63\x23\x61\x2f\x60\x81\xee\x4f\x42\xdd\x5d\xd1\x4d\xcd\x10\xbf\x34\xa8\xbf\x13\xd6\x8d\x3e\x20\x4a\xf4\xc9\x99\x02\x9d\x32\x0a\xed\xa8\x2a\xee\x96\x5e\xbc\x9b\x2f\x17\xb4\xb2\x8f\x29\x9f\x8f\x6e\x8e\x78\x4b\xd8\x49\x0d\x51\xd9\x31\x6b\x12\x33\x6e\xf4\x08\xcf\x06\xf7\x74\xed\x43\xa3\x83\xc8\xed\x3f\xda\x83\x11\x94\x94\x68\xc4\x6c\x83\x41\x9e\x5a\xdc\x48\x79\x64\x92\xcf\x6c\xc9\xea\x25\xac\x02\xe8\x8e\x7b\x0d\x56\xb7\xb8\xd6\x5d\x44\x42\xfa\x26\xe5\x3e\x76\x59\xbd\x79\x3e\xa4\x73\x13\xa2\x0f\x1f\xc2\x9b\x47\x5d\x86\xce\xd6\xf7\xbb\x76\x95\x77\xee\x94\xc1\x53\x33\x79\xd4\x9d\x0f\x9c\x35\x6a\x97\x73\xd4\x9c\x29\x9e\x3f\x52\xdf\xf1\x9d\x73\xc4\xc5\xda\xec\x9d\x49\x68\xfd\xa6\xe5\xfc\x86\xee\xe1\x53\x27\x3f\x25\x3f\x9e\xfe\x8b\x01\x5a\x1d\xb4\x36\x28\x9b\x2e\x81\xf7\x1c\xa9\xf5\xef\x86\xcf\xd0\xf8\x20\x47\xd1\x07\xe8\xd8\x75\x55\xb6\x34\x3c\x68\xd4\xcf\xce\x64\xb2\x2f\x7f\x27\x8c\xfd\xe2\x16\xee\x55\xb0\xf1\x60\x0c\xdd\xab\xd8\x05\xee\x9f\xf0\x35\xbf\x44\x0d\xb0\x9f\xe8\x96\xfa\x6a\x75\xe2\x1d\xef\x0e\x83\x55\x16\x83\xbb\x1a\x7b\xbb\x15\x8d\x65\x8f\x66\xa0\xfd\x91\xf7\x30\xe8\x5a\x73\x7d\xc5\x38\x55\xf2\x91\xec\x6b\x12\x09\xbb\x12\x55\xf5\x1a\x51\xdc\xad\x5b\x17\x0e\x0c\xf8\x07\x5d\x6b\x77\x04\x8c\xdf\xe5\x60\x61\xb8\xf5\xec\x9f\xdc\x65\x6b\x7b\x51\x1b\xa7\x6b\x96\x75\xc2\xaa\xce\xc4\x1d\x8e\xbe\x56\x42\x36\x13\xc3\x7b\x0e\x61\xcc\x2d\x52\xd7\x1d\xc9\x0c\x48\x5e\x6a\x56\x0c\x36\x5f\x55\x5a\x15\xb8\xdb\x03\x37\x3d\x17\xac\x82\xd8\x2a\x0c\x17\xda\x0b\xf1\xa1\x57\x0c\xfc\xc8\x8d\x8e\xc7\xa0\xab\xd0\xb1\x79\x65\xfb\xc7\x2e\x4c\xd8\x20\x54\x7d\x8a\x09\xbd\x7f\x54\x17\x1b\x4f\xaa\x14\x77\x31\x40\xa3\x71\x77\xc4\x72\x65\x23\xb2\xab\xed\x0a\x0a\x2f\xb1\xbd\x55\x79\x5b\x63\x26\x5f\x76\x7d\xcc\x4c\x1a\x2d\x30\xe7\xf4\x11\xd9\x1c\xb6\xbd\x20\x63\x73\x65\xa6\xd7\xc5\x66\xf2\x35\x33\x99\xd2\x8e\xa1\x8a\xba\x40\x41\x92\xa9\xac\xcd\x09\x61\xc0\xf0\x0f\x9d\x74\x0a\x8b\x50\xd4\x95\x7a\x87\x53\x6c\x56\x9d\xe8\x29\x7f\x86\x57\xe8\x98\x20\xc0\x6a\x5a\xa8\x4b\x61\x00\xb6\x3a\x86\x6d\x84\xeb\xca\xd5\xcc\x04\xdd\x49\xdd\x4c\x60\xeb\x2a\x4c\x73\x84\x9f\x82\xce\xf9\x93\x90\x31\xca\x81\x99\x5d\xde\xd5\x0c\xdb\x10\x11\x14\xed\xf0\x57\x59\xc5\x02\x16\xe0\xbb\x4e\x6a\x23\x5d\xe8\x99\x53\x24\x97\x00\x30\xb2\xf8\xfc\x24\x39\x40\xdc\x42\x60\x3c\x3a\xa1\x63\xb0\xff\x19\x7b\x4b\xcc\xe2\x59\x81\x3d\x67\x04\xe0\xdb\x49\xd2\xa2\xdf\xe7\x95\x8c\x4e\x85\x21\x79\xbb\xb2\x65\x57\xd1\xfa\x61\x4f\xdc\x66\xb1\x4b\xe9\x15\x1f\xe9\xe8\x87\xa8\x82\x56\x8b\x32\xb0\xca\xe6\x22\x68\xc2\xbc\x7b\xc8\xd0\x6d\xba\xa4\xb1\xc7\x08\x1a\x29\x5b\x9c\x00\xb9\xbd\x98\x2c\x68\x5c\x3f\xd8\x88\x64\xe2\xbb\x36\x32\x68\x48\x72\xdf\x70\xb8\xba\xc2\xdf\xf7\xc1\x8a\x0b\x30\xfc\x29\x8b\x88\x9b\x97\xe3\x0f\xe1\x3c\x94\x39\x6b\x47\x45\xd2\xc3\xc1\xf4\x08\x6c\x05\x7f\xd9\xdc\x17\x7f\x04\x17\xed\x48\x9c\x8a\x8e\x5f\x10\x37\x12\xe7\x07\x99\xf3\x8a\x8d\x9a\x02\x6f\x3d\x12\xbd\xaf\xed\xa8\x58\xfa\x1b\x8e\x6c\x7e\x31\x16\x69\x22\xc1\x2c\xf5\x4f\x6b\x15\x88\xb8\xa6\xc8\x05\x18\x29\x53\x2c\xbd\xbb\x5a\x78\x19\x1b\x97\xd8\x5e\x82\x46\x64\x0f\xd3\x9d\x7b\x7e\xff\x43\x45\x75\x2c\xd9\x87\xb3\x60\x9c\xbd\xa5\x0b\xed\x99\x32\x3a\xdb\xae\xf3\xd3\x6d\x95\x1e\x4d\x9b\xe3\xd3\x94\xb8\xb9\x3f\xf0\x2f\xd4\xe1\x0e\x03\x0a\x15\x8b\x76\x65\xc5\x16\x62\xc3\x62\x12\x37\x87\xc5\xdb\x4c\x34\x1e\xc5\xac\x5a\x89\x73\x2f\xc3\x5d\x58\x2c\x2b\xef\x61\x78\x39\x8c\xb0\xd9\x62\xa9\x0b\x1e\x6d\x4c\x16\xb2\x57\xb4\x28\xff\x57\x93\x03\x88\xae\x8e\xa3\x76\xce\x76\xef\x03\x7b\x95\xe0\xc8\xb0\x3f\x69\x43\x45\x6a\xf5\x03\x3d\x77\xb7\xf4\xf9\x35\x25\xa8\x6e\xf3\xee\xb5\xfc\x29\x1a\x8f\x55\x9d\x03\xfb\xe9\xe6\xcb\x77\x4b\xfe\xc7\xfc\x65\xc9\x3a\x6d\x91\xc7\x35\x0c\x2d\x4d\x9f\xdd\x04\xd0\x5a\x59\xec\xfa\x0c\xb6\xd6\xa0\xae\x7a\x07\x61\x0d\xef\x6b\xf7\x6f\x22\x75\x95\x55\xad\xcb\x1e\x34\xb3\xc2\xc8\xa2\xd1\xbd\xad\xee\xf5\x34\x87\x1c\xc0\x27\xed\x05\x58\xf3\x6e\xea\x27\xaf\x0c\x86\x59\x73\x5f\x3f\x7a\x50\xd7\xaa\xe1\x70\x7e\x3b\x75\xba\xfa\xb6\x7b\xac\xec\x5f\xee\xc8\xf8\x24\xaa\xb5\x2a\x73\xdd\x96\xc7\x1c\xd9\xb5\x5d\x92\xdc\x4e\x5a\x68\xab\x94\xc7\x48\xf7\x9a\x58\x33\xef\xcc\xd8\x87\xf0\x8e\x76\x19\xab\x4c\x2f\xe0\xcf\x11\x61\x5f\x30\x95\x50\xea\xe0\x42\x46\x27\x47\x4d\x27\xb4\xcc\xd1\x9d\x04\xa7\x7f\xe4\x2c\x0d\xbf\xe0\x10\xe3\x29\xd5\x67\x49\x84\x75\x97\x74\x70\xb7\xfe\x57\xa6\x66\x85\xf1\x97\x7f\x91\xaf\x7f\xd1\xaf\x9f\x61\x57\xe6\xe7\x6a\x6b\x04\x4f\x1f\x5b\x3b\x9c\x32\x8a\xf2\x98\xc6\xb8\x6e\x85\x71\x8a\xae\x9a\xfd\x23\x4d\xd0\x0a\xf2\x68\xeb\xff\x92\x7c\x6d\x99\xc2\xca\x6a\x9a\xc7\xb0\xcc\x08\x9b\x2d\xfb\x81\x0a\xbb\xae\x59\x90\xa5\x4d\x07\x4f\x35\x43\xe7\x65\x69\x15\x32\xfe\x6b\x52\x9a\x45\x50\x61\xa6\xfc\x4f\xb0\xed\xe7\x97\x75\x1b\xd5\x6d\x0d\x9f\x63\xd4\xbe\xe4\xbb\xda\xd2\x4e\x07\x0e\xd7\xef\x28\x82\x4c\x4e\x62\x48\xb9\x90\x09\x08\x02\xad\x6b\x23\xc2\xe6\x67\xfc\x93\x41\xbe\x04\xb0\x37\xce\x8d\xdf\xfa\xd9\xd5\x57\xfd\x8a\xb1\xaa\x38\x7f\x9b\x9e\xdd\x39\x43\xce\x27\xa3\xcc\x75\x47\x45\x9f\x8f\xf2\x96\xc6\xb1\xa7\x34\xf0\x25\xb2\xaa\xc8\x19\xd8\x1c\xb3\x14\x96\xb8\x60\x84\x30\x81\x53\xf0\x98\xba\xa2\xea\x58\x3e\x21\xb0\x9e\xf3\xea\xf4\x84\x96\xc2\x9d\x2d\x4e\x72\xe4\x9e\x45\x41\x15\x4f\x60\x61\xfb\x8f\xa3\x0a\x00\xfd\xc1\x36\x25\xeb\xeb\xf4\x52\x82\x06\xea\x5f\xe6\xea\x82\x68\xf0\x80\x69\x32\xbd\xea\x22\xbe\x09\x57\x97\x40\x18\x86\x49\x91\xbe\x7a\xbf\x67\xb5\x42\x68\x04\xdb\x76\x5c\x5e\xde\xc6\x2e\x38\x0f\x71\xa2\x67\xad\x91\xd8\x0c\xc9\xc4\x0f\xed\xcc\x2c\x9d\xd9\x80\x75\xd7\x8f\x1e\x30\x1b\xa4\x56\x76\x0a\xe7\x6e\x66\x2c\x30\x72\x31\xa6\x1c\x91\xb9\xe7\x16\x76\x75\x23\x7c\x5f\x59\xc0\x0d\x50\x82\x89\x68\x4a\xee\x2c\xf8\xf6\xfa\xb0\xc8\xb3\x96\x74\x11\x36\x59\xf4\xdb\x48\xd4\xa1\xd8\x36\xf3\xdc\xc5\x7c\xb4\xb9\x62\x8d\x5a\xf6\xe6\xe9\x63\xfb\xa9\xad\xd9\xa2\xf4\xd0\x3b\x66\x0d\xa8\xf9\xab\xd2\x7f\xd2\x73\xe3\xe1\x94\x55\x6d\xdd\xfb\x33\x87\x43\x46\xfd\x59\x35\x9f\xa1\x1a\x3c\xef\xe8\xe6\xf7\xb0\x1d\x96\x5f\x07\xbe\x07\xe5\x6f\xec\x10\x7d\xb2\xbd\xb9\x0f\x72\x5d\x2e\x6b\xb9\x28\x38\xb3\x21\x82\xbf\x7d\xf7\xd2\x17\x82\x20\x7d\xb0\x49\xb0\x8a\xa8\xbf\xd9\x33\x47\x38\x7b\x36\x3e\xab\x62\x99\x96\x62\xb0\x47\x9d\x2c\x42\x0e\xca\xe4\xa0\xd9\xe2\xaa\xfa\x7e\x45\x99\x3d\x09\xc5\xa0\x60\xe6\x71\x53\x3c\x48\xa4\x65\xf7\x29\xdc\x47\xa0\xa7\xe1\xba\xde\x7d\x76\x4a\xfa\x6a\x70\x77\xc1\xaa\x25\xe3\xf6\x24\x8e\xe0\x91\x40\x6c\x8a\x36\xfa\x6a\x8d\xa6\x66\xcd\x72\xc3\xb0\x72\x37\x3a\x9e\x5b\xab\xba\x61\x59\xa0\xe2\x79\xce\xb2\xed\xcb\xb3\xaa\xe8\x25\x8b\xcc\xd0\x85\x1f\x51\x57\xb4\x85\xef\xf0\xad\x45\xd1\x6c\x9f\x51\x83\xf0\xca\x73\xa3\x7b\x5a\xf9\xc5\xa3\x56\x6d\x70\x6c\x60\x25\x39\x87\xff\x98\x4a\xcf\x61\xbf\xdd\x4d\xaa\xe2\xed\xc8\x14\x5f\xbb\x1e\xd5\xd3\xb4\x4f\x37\xf1\x98\x9b\x8f\x60\xc0\x10\x1d\x35\x07\x0d\x6e\xf0\x2b\xe1\x2e\x4a\x15\x9e\xe2\x15\x7e\xa3\x40\x0b\x51\x85\x37\xef\x99\x4e\x56\x36\x1d\xe6\xf9\x30\x2d\x81\x45\x31\xab\xe9\xb5\xb0\x37\xea\xaf\x4d\xc2\x66\xfa\x2f\x2e\xd3\x74\x90\x8a\x92\x8a\x2c\xfe\xec\x92\x17\x9b\x36\xff\xd1\x55\x04\x92\x47\x16\xdb\xab\xa8\xab\x09\x25\x0b\xeb\xe6\x5e\x2d\xca\x2f\x16\x0c\x86\x64\x7d\x03\x5c\xfa\x12\x54\xfb\x26\x30\xaf\x59\x05\x97\x0a\x1a\x90\xaf\xe6\x9f\x42\xd8\xe0\x63\x35\x85\x6f\x3a\xde\x1f\x5e\x48\x3e\xb1\x44\x30\x49\x58\xda\xa3\x51\xba\xa9\x30\xc6\xfe\xe0\x17\x1e\x02\x95\xe3\xf9\xeb\x97\x46\x17\xd5\x7e\x4a\x2b\x7d\x2e\x8d\xf5\x4c\x8a\x05\x1f\xb2\x27\x69\x97\xb7\x4b\x2a\x99\xcf\xfb\xb6\x05\x32\x2c\x06\xb3\xf4\x6d\x0c\xd5\xd3\xdb\x7e\xbb\xe8\x92\x6d\xd7\xb1\x45\x9c\xfc\x0c\x36\x10\x73\x12\x0a\xcc\x74\xd2\x4f\xeb\x0d\x8e\xd7\xa6\x73\x09\x81\xc7\x22\xfb\x32\xe8\xca\xea\x75\x2f\xdb\xe7\xaf\x5f\x79\xdc\x6c\xbe\xe0\xca\x4d\xb2\xee\xba\x72\x03\xfe\x92\x08\xc8\x96\xc6\x4a\x4e\x6f\xe6\x14\x54\x57\x32\x6e\xeb\x44\xdb\xe7\xbb\x9e\x36\x21\x78\x6f\xd4\x56\x8c\x43\xe5\xb5\x62\x6a\x6e\xc0\x6d\xac\x9f\x27\x38\x9a\x2d\x73\xa3\xd7\x2a\xc1\xff\x86\x40\x6f\x8d\x60\x64\xd9\x0e\xd9\x74\xcf\x02\xfd\x42\x6a\x05\xc5\x74\x77\xbd\x55\xde\xba\xd9\x36\x87\x27\xed\x6f\x30\xa0\x55\x00\x58\xb6\x39\x24\x90\x67\x6e\xd5\xee\x5d\xe4\x13\x79\xb9\x0b\x8b\x9a\xc0\x94\x4e\x72\x70\xa8\xd0\x6e\x95\xef\x9d\x2a\x35\x7e\x28\xe5\xe3\xa6\x4d\x71\x75\x77\x04\xf4\xf5\x8b\xb2\x0a\x90\xc1\x17\xfd\xd2\x23\xf7\x73\xd2\x50\x05\x78\x79\x87\xf7\x09\xc3\xa1\xaf\x02\x46\x9d\x0f\x86\x96\xa1\x08\x49\x78\xc4\x38\xff\x15\x51\xc5\xae\x19\x5f\x26\x05\x8b\xb3\xad\x9f\x8a\x58\xee\x45\x4d\xbc\x58\x5e\x5d\x93\x9b\xd5\xf8\x08\x19\xbd\xac\xa5\xe7\x97\x3a\xb9\x92\x8a\x8a\x6b\x4a\x65\x25\x8b\xec\xac\xcd\xd7\x25\x34\xcd\xe6\x1b\x16\xd5\xa3\x2a\x11\x6f\xe6\x92\x04\x78\x41\x3b\xc1\x77\xdd\x4f\xf5\xd7\xc1\xad\xb2\xbf\x44\x55\x2d\x37\x5a\x28\x0d\x53\x0b\x71\x0e\x8e\x1c\x86\xf0\x55\xec\x72\x50\x8d\x69\x2e\x84\x82\x9a\x75\x74\x47\x8b\x8b\xb3\x32\x90\xc4\x8e\x76\x8b\x0f\xb2\xb7\x57\xcb\xbb\x85\x01\x16\x56\xd2\x3e\x3b\x88\x04\x7c\x51\xa1\x0c\x3d\x07\x1f\x65\x57\x87\xa0\xf8\x73\xd6\x6f\x7d\xfe\x5a\x75\xa7\x73\x19\x5a\xf0\xcc\xe6\xa7\xc0\xa9\x84\x49\x52\x7f\x1f\x4a\x99\xe1\x0c\xe4\xc5\xb4\xd7\x92\x6a\x91\xd6\x93\x12\xb7\x43\xd1\x30\xb0\xae\x53\x64\x20\x2b\x0f\x74\x77\xfb\xf9\x48\x2a\xb5\x95\x98\x86\xbc\x46\xb1\x7d\xd1\x48\x63\x55\xcd\xc2\x19\x7c\x48\x1c\xc1\x8d\x3d\xd5\xd8\xdd\xa9\x06\x38\x9e\xdc\xf2\xdb\x11\x1b\x63\x38\xb0\xb2\x93\x6b\x67\x47\x14\xbb\x75\x0d\x4b\x17\x77\x52\x7e\xb0\x8d\x3b\xc9\x59\xf1\x5c\x94\x64\xc3\x9f\xb6\x54\x3b\x31\xbc\xfc\x0d\x38\xc2\x6a\x62\x9d\x8b\xe5\xdc\x66\xf5\x7f\x4a\x0f\xaf\xf6\x99\xdc\xfe\xbd\x84\xb6\xdc\x6d\x61\x9c\xc7\x5b\xb8\xcd\xb2\x9a\x4d\x39\x9b\xfb\xac\xc8\x02\xd8\x30\x22\x50\x8c\x45\xae\x7f\xb2\xbd\xb1\x63\x3f\x11\xf5\xbb\x55\x92\x83\xe0\xb7\xc8\x96\x65\xb9\xdb\xc4\xe6\x96\x8b\xa2\xf4\x46\xdc\x20\xda\xfa\xba\x13\x6a\x9c\xca\x32\x79\xf8\xe6\x7f\x2c\x60\xd4\x22\xb5\x92\x8d\x74\xe1\x19\xc2\x37\xd5\xc6\xb3\x2b\xcb\xfc\xf2\xa6\x82\xd3\x81\xad\x0d\x04\xf1\x97\x0a\x63\x74\x3c\xd8\xda\x5d\xb9\x29\x41\x3f\xf7\x62\xc7\x36\xaf\x53\xd4\x4f\xee\xff\x22\xad\xe6\x49\x0e\x1a\xac\x5d\x9a\x6f\x79\x5d\xa3\x7b\x2b\xaa\x32\x8a\x60\x71\x70\xef\x24\x41\x67\x2f\x76\x18\x98\xba\xfb\xcf\x11\x7d\xac\x52\x6f\xee\xef\x14\x01\xdb\x29\x9a\x3b\x68\xf9\x46\xf6\xe1\xe8\x08\xf2\xaa\x8c\xba\xb5\xf2\xbc\x5d\x09\x23\x5b\x29\x6c\xab\x04\x39\xbc\xb3\xd7\xbc\x75\x60\xc0\xfe\x5a\x85\x7e\xba\x43\x7a\xf4\x76\x1b\xfb\x0e\x51\x6c\xd1\x00\xc5\xd8\xbf\x7a\xc0\xd5\xcd\xc9\x11\x32\x43\x66\x5d\xd3\xfb\x61\xe5\x17\x84\xe0\xc4\x45\x14\x60\x56\xbe\xb6\x16\xde\xf4\x64\x27\x6c\xe7\x05\x76\x13\x0c\xaf\x59\xd6\xce\x36\xf0\x2a\xc1\xc1\x6d\xda\xd1\x00\x34\x60\x8a\x4f\xaa\x4f\x15\xc7\xbb\xfb\xf4\x3c\x54\x46\x47\x11\xbd\xfd\x6e\xf5\xc6\x33\xbf\x8b\xf4\xd4\x31\xb2\x76\xc0\x2e\x7c\x40\xfc\xf9\x8d\x16\xb4\xb7\xa4\xf5\xa6\xc2\x24\x69\x41\xd8\x14\x2f\x2f\x38\x23\xd9\xb5\xc9\xb3\x61\x39\xa5\xf3\x2c\x84\x89\x16\xbb\x60\x16\xf9\xe0\x0c\x11\xb1\xbd\x27\x5e\x0f\x0b\x5d\xe3\xbd\x5a\x5f\xd3\xa4\x8a\x5c\xfd\x19\x2b\xbc\xbf\xb2\xb9\x09\x80\xc0\xb3\x1d\x23\xe0\x3b\xe7\x96\xcf\x23\x5c\x1a\xe6\xbe\xe8\x22\xdb\xd0\x2e\x5b\xbc\x9b\xb9\xcf\x60\xa8\x79\x55\xf9\x89\x30\xc7\xa1\x88\x44\x5c\x0f\x76\x96\x57\x65\x1f\xd8\x28\x72\x1a\x0b\x1a\x30\x1c\x0d\xe9\x50\x79\x2d\x47\x4a\x64\x5b\xab\x87\x6c\xef\x85\xcc\xe2\x5d\xe5\x9a\x4d\xaf\xaf\xe8\x3a\xb3\xcf\x0d\xf3\xc9\x10\x89\x0e\xcd\xde\x32\x0f\xaa\xcb\xbb\x10\x8b\x7e\x5a\xb6\x8f\x3a\xca\x03\xa9\x42\x13\x84\x3d\x99\x25\xf4\xa4\x7e\xf4\x18\x26\x0b\x63\xfd\xce\x0c\x00\xa2\x89\xce\x59\xf2\xa5\xad\xd3\x08\x78\x9f\x95\xb3\x62\x39\xc6\x13\xb4\xf8\x5d\x2e\x94\xd7\x1d\xb4\x63\xe5\x7a\x57\x59\xb9\x5f\x57\x92\x87\x7b\x98\xe2\x8e\x45\xfa\x18\x8f\xfa\xf6\xda\x5a\xa7\xcb\x38\x97\x73\xa4\x2a\xd4\x7b\x36\xb4\x72\xc7\xef\xc5\x57\xc7\xc8\xcf\xf8\x51\x76\x99\xf9\xd7\x8d\xae\x5c\x12\xc7\x96\x5b\x32\xa6\x96\x14\x9e\x24\xc1\x1d\xed\x57\x18\x55\x92\x8f\x9a\xd1\x9e\xcd\x1f\xd1\x2a\x70\xac\xb6\xbd\x3c\x6c\xd0\x33\xb5\x5f\xf2\xe1\x5d\x68\xda\x34\xb1\x25\xed\x5e\xf0\xfe\xd4\xb5\x2a\xb2\x08\xb2\x2e\x94\xd8\xfe\x00\x78\xf9\xf8\xd7\xf2\x9c\xf7\xa6\x59\xb4\x1d\x64\xbf\xf0\xee\x3e\x5a\xda\xbd\x62\xe4\x9e\xbe\x3d\x7b\x44\xdf\xda\x07\x81\x96\x64\x61\x08\x19\xdc\xa0\x9e\x69\x45\x7e\x02\x7b\x75\x7f\xea\x41\xe4\x07\xe6\xb0\x21\x77\x57\x8a\xe8\x45\xaf\x17\xb3\x10\x9c\x7e\x21\x6a\x32\x3e\xb1\x1e\x2a\xc0\x6b\xa8\xa2\xde\xc3\x21\xc0\x2b\x6d\xcc\x91\x2c\x44\x31\x14\xdf\x14\x21\xe4\x2a\xe7\x21\x11\x93\x7c\xf5\x97\xad\x34\xdf\xee\xca\xce\xe1\x4d\x16\xce\x30\x4b\xb8\x31\xcc\x48\xcb\xd4\xbd\x0b\x21\x80\x57\x78\x20\x23\xb2\x9c\x5d\x69\xec\x52\x36\xf2\x90\xcf\x1f\xb1\x1e\x6d\x8c\xad\xc8\xc7\x7e\xbe\xc5\x5d\x06\x9b\x4d\xb8\x40\x66\x66\xd3\xb4\x85\x1e\xb5\x67\xf1\x3f\xfc\x15\x92\x8d\xc3\x31\xb2\x07\x7c\x1f\x99\xe6\x3a\x5f\xb1\x96\xa8\xaa\x7f\xb7\x9e\x74\xb3\x71\xee\xdd\xb6\x82\xa2\xd4\x93\xbe\x8e\x18\xf8\xe1\x08\xbf\xcd\xb2\xbf\xa2\xc6\x9f\x18\xa5\xbe\xd6\x10\xae\x46\xa4\x8d\xed\x4c\xce\x08\x93\x82\xa3\x94\x28\x69\xb6\xa3\x63\x1a\x27\x0a\x17\x5f\x2a\xb0\x97\x10\x54\x31\x01\x09\x6c\x80\x0c\xff\xec\xd4\x74\xc7\x17\x15\xe6\x9a\x69\xa3\xea\xb7\x47\xc8\xc2\x47\x65\x7f\xcb\xe9\x87\xdd\x5d\xf3\x0e\x42\xe2\x1f\x47\xfb\x3f\x69\xec\xf6\x05\xa8\xa7\xda\x7d\xa2\x96\xad\xd8\x15\xb1\x90\x17\x25\x03\x2d\xf2\x20\x35\xc3\x2a\x86\xaf\xd9\x9d\xdf\xf2\x8d\xfe\x5b\x77\x7e\xa2\x51\x5e\xb7\xb9\x29\x64\x05\xd8\xe1\xba\x6e\x9d\xf5\x66\xa1\x7f\xce\xff\xa9\x8d\x3e\xef\xaa\xfe\x47\x25\xb7\x5a\xfd\xbf\xb5\xc0\xcf\xb7\xfa\xea\xbb\x2d\xe2\x5b\x30\xa0\x43\x1a\x02\x5f\x7f\x11\x49\xbb\xf2\xcd\x6c\xaa\xd7\x55\x4e\xff\x4d\x14\x0b\xd0\xbe\xd0\x34\xb3\x73\xa9\xb5\x0f\xa2\x0c\x2f\x91\xcf\x59\xdc\x99\x22\x88\xd0\x8a\x10\x98\x5f\x85\x26\x44\xa8\x41\x5d\xc4\xdc\xcd\x5d\x14\xf0\xfb\x33\x06\x36\x93\x88\xce\x9d\xd7\xca\x0e\xf8\xba\xa6\x96\x04\xb0\x36\xac\x40\x2e\xff\x3f\xbe\x08\x6a\x7b\x89\xab\xbb\xd6\x0b\xec\x90\x23\xca\x36\x12\xc8\xcf\x12\x07\x3b\x47\x8a\x0d\x7b\x1b\x8a\x5c\xcd\x99\xd4\xf6\x3f\xd1\x1f\xbf\xb8\x1b\x4c\x17\x59\xd6\xe0\x8b\x24\xea\x6d\x91\x66\xa6\x4a\x10\xaa\x41\xfa\x21\xf1\x9a\x6d\x05\x2d\x1a\xe4\x95\x09\xf7\x56\x7f\xea\x25\x57\x96\x56\x0a\x66\x4b\x02\x19\x70\xce\x2a\x8a\x25\x41\xd6\x1c\x1e\x14\x68\xc7\xf9\xc3\xd3\xa5\xbc\xd2\x49\x7c\xbf\x0f\xf7\xec\x1f\xa1\xfb\x1f\x10\xb5\xe9\x88\xf4\xd1\x67\xc4\x87\x70\x8f\x8d\xac\x3f\xa4\xb4\x2d\x32\x4e\xda\xc3\x87\x16\x38\xc0\x25\x39\x2f\x02\xe2\xf8\xfd\xbe\x83\xa8\xe1\xf6\x88\x81\x0d\xa0\x96\x92\x8f\x69\x92\x70\xd7\xca\x84\x7f\xac\x95\xf6\x26\xd2\x05\x43\xff\x6e\x99\xfa\x21\xe9\x4d\x08\xc7\x3f\x1e\x24\x5d\xf3\x96\xe0\x64\x59\x69\xef\xa1\xf4\x4c\xd1\x77\x33\xd4\x17\x53\x41\xfc\x0f\xcc\x24\x2c\xfc\x47\xa9\x75\xe1\x13\xf9\xe1\x88\xab\x06\x68\x7f\xf3\x16\x0c\x17\xe7\x69\xdb\xa7\x64\x57\xe0\x65\xaf\x56\x78\x4c\x5f\x41\x5b\xf5\xe6\xe7\xf6\x84\x02\xc6\x66\xd7\x3d\x78\xe9\x4e\xd0\x0e\x6a\xf7\x69\xb3\xed\x97\x94\xee\x1c\x26\x64\x18\x8a\x63\x73\x9f\x6e\x02\xe0\x0f\x28\x74\x7d\xbc\x35\xdb\xdf\x73\xe3\xee\x59\x11\x74\xff\xb1\xfb\x0f\x0f\x0e\x82\xe6\x37\x50\x7b\x3f\x6a\xf7\x78\x83\xc2\xc6\x9f\xf0\x46\x5b\x99\x9d\xe1\x0f\xa5\x0a\xd7\x7b\x16\xec\xa0\x49\x17\x28\x82\x68\xde\x34\x49\x6d\x1e\x40\xce\xc5\x16\xc3\x00\x7f\x84\xd7\x92\xfd\x54\x12\x41\x79\x23\xfe\xd7\xeb\xf2\x7e\x83\x0a\x65\x5b\xbd\xd5\x71\x87\x03\x6f\x74\xe4\xad\x12\x7c\xca\x17\x6a\x9a\x9f\x94\xaa\xda\xa8\x63\xf2\x85\x4d\x1f\x2f\xbd\x93\xa9\x6d\x14\x2f\xdb\x3a\x57\xbe\x65\x2b\x58\xb8\x01\xbe\x9a\x4f\x96\x9d\x9c\xcd\x0e\x29\x2b\x22\xc1\x76\xc6\x4f\x6e\x7c\x61\x9d\x1a\xd2\x15\xf0\x70\xb3\x3a\xe3\x6f\x69\x9d\xdc\x08\x69\xe3\x60\x83\x99\xff\xbf\x4d\x72\xff\xb8\x89\x76\x28\x57\x00\x51\x3e\x3e\xb5\x75\xb2\x4a\x3e\x3c\xb7\x5d\xfa\xcb\x81\x4b\xf7\x88\x53\xd0\xfe\xc8\x47\xdd\xcc\x52\xff\xba\xc5\xea\x16\x79\xea\xac\xe1\x7d\xa4\x44\xc8\x33\xf8\xa1\xee\xb6\xfa\x45\xf0\xaf\xff\x41\xef\xd4\x12\xf1\xff\x7a\xcc\xcc\x3a\x13\x36\xdf\x7e\x18\x74\x3f\xca\x64\x39\x1d\xb9\x11\x0b\x47\xba\x0f\xb1\xde\x0f\xb6\xd6\xc3\x7e\x75\x88\x86\xba\x2d\x34\xfe\x4a\xa9\x09\x2e\x20\x62\xc3\x3b\xb5\x53\x5e\xad\xa3\x03\xfd\x3e\x0a\x7d\x23\x38\xcf\x4d\x92\x58\xed\xc7\x5e\x1e\x29\xb4\x3d\xab\x7d\x89\x1a\x31\xe0\x44\x78\x34\x87\x6b\x9b\x06\xc9\xb5\x8e\x66\xe5\xe1\x60\xe3\x87\x23\xc9\xf9\x8e\xe8\x87\x1d\x53\x78\x93\x53\x04\xcc\x16\xfb\x89\x3e\x38\x6f\xc4\xe4\x36\x9e\x91\x44\x7b\x27\xf6\xc2\x3d\x88\x83\xda\x61\x58\x5d\x87\x4f\x97\xfd\xb1\xf0\x01\xe7\xa5\xcc\x83\x43\x21\xa6\x76\x6c\x64\x70\x2b\xfd\x3a\xf5\xd4\xd5\x6f\xb6\x25\xde\x49\x01\x58\x55\x57\x96\x20\x9e\x70\xcb\xed\xba\xff\x80\x6a\xa8\x07\x67\x86\x6a\x7b\xff\x80\xe5\x50\xb0\xc3\x22\x9a\x64\xe9\x04\x51\xec\x65\xa4\x2b\x15\x76\x74\xd7\xcc\xee\xfa\xed\x45\xe7\x0e\x7f\x00\xbc\xe2\x6e\x1f\xca\xdf\x3c\x6c\x68\xf5\x36\x76\x9b\x09\xfd\x41\x0a\xb7\xb0\x45\x67\x4e\x1c\x9c\x72\x47\xc7\x28\xd1\x92\x56\x3e\xd2\x5b\xb8\x28\x9d\x5a\xad\x21\x6e\x23\xad\xfc\x7d\x0a\x6f\x38\xe5\x13\x75\x26\x0d\xca\x19\x84\x2d\x44\xe4\xf6\xc2\x27\x06\x51\xcf\x7b\xba\xf6\x6e\x73\xcc\x22\x7c\x19\xfb\xc9\x3c\xa8\xd8\x06\x61\x2a\x05\x45\x32\x39\xdf\x33\x8c\xdc\xf9\x2c\x3b\x49\x92\x14\xc8\x54\x25\x6c\x50\xb5\x5f\xa1\x3b\x9e\xfb\x35\xe0\x0a\xd9\xca\xfa\xfc\xc4\x95\x7b\x6b\xf1\x3d\xb0\x9a\x0a\x94\xc2\xce\x04\x0d\xe0\x1b\xc4\xaf\x04\xf4\x62\x72\x71\xb7\x44\x4a\x25\x61\x8a\x70\x9e\xc2\x46\xb7\xf0\xe6\x06\x54\xf4\x44\x47\x30\xd0\x8b\x90\x59\x83\x4d\x8d\xb8\x3c\x9b\xad\x4b\x7a\xd5\x15\x12\x72\x1f\xcd\xa1\x4b\xda\x58\x11\xc3\xc0\x96\x6f\xd2\xc9\xe9\x3e\x68\x06\xb4\x98\x1a\xd8\xce\xcf\x2a\x4f\x1d\x7f\x50\xfe\x44\x81\x45\xd4\x2f\xa1\xe2\x19\x6d\xfc\x62\x1b\x4d\x06\x8d\xac\xb1\x9b\xf7\x81\xd0\x06\xf4\xc3\x28\x4b\x7e\xe9\x85\xf2\x4b\x09\x14\xbb\xca\xe2\x48\xc9\x5d\xf3\x44\x7c\x03\xcc\x3b\x01\x09\x7b\x7f\x17\x36\xe7\x37\xf2\xdd\xa5\xf7\x80\x46\xa4\xe8\x31\x92\xb1\x3d\xc0\x75\xff\x4b\x2d\xb8\x4d\x9e\x9c\xb6\xc2\xd6\x30\x87\xc2\xd9\x13\xa9\x15\x3c\xe3\x97\x36\x0f\x2c\x65\xd2\x71\x8c\x96\xbf\x6f\x9d\x96\xf9\xe5\xaf\xe7\x9d\xfe\x87\x62\x2d\xd8\xfb\xb7\x19\x77\xd4\x40\x77\x29\xc3\x68\x91\xdd\xca\xa2\x2d\xc6\x92\xc3\x82\x86\xe1\xc0\x1a\x1e\xad\xed\x1f\x46\x46\x08\x02\x24\x7f\xc0\x7e\xb6\x81\x14\x00\xff\xe9\xff\x40\x36\xc4\x07\xff\x2b\x92\xad\xfc\x15\x38\x7f\xd2\x57\xe0\x8f\x82\x51\x89\x03\xec\x84\xf0\x84\x08\x67\x03\x6c\x89\x1e\x57\xb6\xef\x09\xb9\xa5\x9d\xa6\x51\x0b\x4e\x35\xa8\x60\x53\x55\xbb\x60\xaa\xf3\x17\xd1\xe8\x3e\x1c\x43\x11\x7d\xee\x9f\x44\xa0\x43\x13\xbc\xa3\xcc\x8f\x80\x32\x3a\x35\xab\x27\xb1\x40\xa7\x79\xac\x56\xcf\x3c\xfe\x1f\xe3\xe3\xa2\xf7\x7f\x0e\xc2\x3a\x5a\x16\x1d\x35\xa7\x53\x76\x8d\x1c\xef\x43\xd6\x0e\x3b\xfa\xf6\x3c\xed\x63\xa7\xf4\xe1\xda\x69\x83\x4c\xcf\xab\xf7\x22\xab\xab\xfa\xf3\x03\x8e\x58\xba\x14\xd6\xab\xcc\xbb\x7d\x50\x69\xbd\x7e\x62\x90\x83\x26\x10\xaa\xb9\xb1\x7e\xbf\x4c\x3c\x6e\xa2\x1a\x68\xae\x43\xf1\x6c\xe7\x92\xe3\x0c\xe7\xfc\xcb\xe4\x10\xf4\x40\x5a\xd7\x58\xae\x80\x3e\xd6\x38\x0e\x5d\x50\xed\xd7\x3c\x48\xcb\x3a\xce\x67\xc4\xd6\x45\xea\x88\x53\x88\x88\x59\x84\xd3\x8c\x0d\x9e\xff\xb8\x60\x71\x47\x62\x57\x5f\x3c\xc3\x9b\x86\xb7\x46\x0f\x87\x3d\x6e\xc5\x72\x48\xa8\x09\x20\x84\x91\x79\xe9\x0c\xc9\xb5\x40\x62\x44\x77\xd0\x2c\xe6\x59\x6d\xd3\xe7\x80\x03\x2d\x30\x3d\xab\x43\xdf\xe5\xb3\x79\x81\xf9\x88\x98\x88\x55\x53\x6e\x7f\x08\x48\x84\x4b\xf9\xb3\xc4\xe3\x57\x77\x03\xb2\x03\x1b\x1b\xcf\xf9\x94\x58\x18\x9f\x64\x16\x0a\x02\xca\x16\x91\x4d\x67\x32\xd0\x97\x8b\x56\xc7\x1c\x49\x0a\x00\x67\x23\xbf\xc5\x36\x52\x04\x2a\x30\x37\x39\x06\x91\x0f\xd9\x54\x06\xcc\x11\x43\x87\x14\xb2\x70\x8c\xbc\xba\x77\x1e\x44\xf8\x3c\x81\xbc\xd1\x44\x0c\x27\x93\x24\x91\x27\xba\x15\xb5\x78\xf2\x21\x56\xa8\xa1\x1e\xa7\x32\xf8\xba\xfa\xbf\x2a\xaa\xbe\x95\x3f\x0f\x47\x3e\x56\xe1\x1e\xab\xb4\x6d\xc3\x1c\xdc\xc3\xa1\x61\x33\x43\xde\x96\x46\xd1\x7f\x38\x95\xe1\xd7\xd7\xec\xbf\xe9\x60\xd9\x8d\xad\x61\xf0\x0a\x48\x5a\x04\x26\x51\x4e\x0d\x74\x57\x68\xf8\x7b\x49\xbf\xfa\xb0\x9a\xb1\xc1\x44\x62\x39\x7c\x92\xb8\xb4\x79\x02\xb5\x4a\xfd\x72\x45\x38\xf5\x45\xf4\xbb\xf7\x1d\x9d\xa0\x8c\x60\xf1\xa3\xcc\x8c\xf8\xe9\x00\x09\xd6\x16\x6a\xd7\xd3\x8c\x2c\xfe\x48\x6a\x70\xda\x76\x95\x90\x7f\x0f\x0b\x11\x49\xb1\x53\x1d\xa6\x33\xe8\xfb\x91\x06\x22\xa2\x4c\xcf\x2e\xe4\xc3\x39\x0a\xa2\x9e\xc0\x6f\x63\x09\xbe\x02\x5f\x36\x6d\x47\xb4\x5b\x9d\x8f\x10\x8d\x7b\xe7\x24\x88\xb1\x91\x86\x12\x1c\xde\xae\xde\x4b\x48\xed\x10\xdc\x1d\x18\x85\x24\xcd\xf5\x6c\x36\x5f\x7b\x0b\x9c\x46\x2f\x8f\xe9\x1c\xd0\x65\x07\xbb\xa0\x84\x11\x57\xb6\xd5\xcc\x6b\xfc\x33\xac\x89\xcb\xa7\x59\x1d\x93\xfa\x50\xf6\xb2\x11\x3f\xa0\x79\x3f\x58\x05\xca\x72\xed\xd6\x1f\xaa\x61\x97\x46\x33\xe5\x02\xbe\x12\x3e\xcc\x4e\xa9\x22\xbd\x7e\xbf\x57\xf6\xfc\x27\x08\x08\x78\x9b\xe2\x80\xbb\x3c\x10\xc8\x01\xa3\xfe\x2e\xa4\xd9\x5b\xa5\x15\x41\x80\x1f\x2a\x0b\x51\xc2\xab\x17\x89\x21\x22\x39\x84\x0f\x84\x1f\x6a\xe7\x73\x81\xd8\x04\xa1\x3e\x48\x10\xa7\x8b\xb4\x0a\x7c\x81\x2e\x9a\x9d\x8a\x41\xbb\x6f\x24\xc3\x40\x0a\x0a\x9b\x7e\x25\x4a\xb2\x0d\xba\x84\xe8\x20\xe8\x0f\x61\xeb\x75\xe3\x56\x1f\xa1\x22\xa0\xb7\x17\x4b\x5b\x1c\xc7\x2a\x06\x7f\x38\x31\xab\xc7\x9b\xce\xf9\xeb\xdd\x1f\x8c\x77\x61\x0e\x1b\xd5\xd8\xb8\x1d\xc7\xf5\x98\x64\x06\xf6\x0b\xbd\x4e\xda\xbc\x7d\xeb\x91\x78\xfc\x99\xe6\x7c\xb3\x38\x5b\x5f\xf7\x3b\xfc\xfa\x68\x3a\xd3\xb0\xcf\xa4\x2c\x08\xea\x01\x61\xc1\x9f\x6c\xb5\x08\xbf\x32\xb7\x4b\xc7\x33\x3f\xff\x5e\xad\x9f\xd0\xef\xd8\x4c\x22\x9a\x42\x0b\xff\xb1\xd1\xb3\x21\x07\x25\xeb\x53\xbd\xad\x36\x22\xf7\x5f\xd0\xe7\x1b\x3c\x2a\xdd\x85\x47\x0b\xdd\x70\x2f\x39\x0e\x02\xd9\xf8\xc7\xf3\xf8\x1d\x72\x47\x04\x19\x2e\x91\xcf\xfd\xf9\x05\x7f\xdb\x5b\xd0\xde\xcc\x8a\x61\x5d\x46\x28\x8b\x19\x92\xf8\x2a\x5e\x97\xbd\x8a\x9f\xdf\xaf\x39\xd2\xb7\x6d\x0b\x7c\xce\xc1\x3e\x70\x37\x97\x87\x67\xb3\x10\x98\x48\x7a\xc7\xaf\x14\x6e\x3f\xb0\x2d\x4c\x91\xed\x43\xdd\x38\x99\x9b\xed\xcf\x7f\x7c\xc3\x7a\x49\x03\xdc\x27\xda\x5f\xd1\xe3\x52\x06\x0a\x6b\xef\x8f\x4e\xd4\xdb\xbe\x06\xa5\x7c\xbf\x43\xc3\x72\x57\x47\xf3\x65\xff\x05\x1c\xd9\x77\x5f\x77\xed\x42\x74\x1e\x95\x1e\x48\x07\xd5\x8f\xb6\x30\x55\x2a\x0b\xea\xa5\xf0\x16\xb7\x36\xe0\x72\x75\xc7\xe4\x7a\x13\x2b\x84\xd2\xc0\x0f\x2c\x3b\xee\x19\x79\x47\xfe\x3f\x62\x47\x77\xcf\x61\x5c\x1d\xc1\x59\x7e\xa2\xdf\xf3\x20\x42\xfd\x9d\x1d\xd6\xbb\x1b\x29\xfa\x4e\x7d\x64\xc5\x34\xd4\xa2\xca\x77\xab\xf1\x9a\xa8\x4a\x0f\xa9\xb9\xfd\x41\x6a\xc4\x19\xa1\x53\x1e\x1f\x37\x34\xd0\xc3\x6a\x54\xc5\xc7\x74\x0d\xf9\x90\x33\x75\xfa\x34\xb6\x44\xdc\x97\xc2\xca\xec\x28\x3b\xb6\x42\xe4\x41\x94\x96\x58\x0e\xf4\x23\xb0\x8f\x71\x17\x2d\xc3\xd3\xcb\x15\x23\xb9\x27\xea\xd7\x05\xf7\x5b\x41\xfa\x86\xd5\xb9\xa2\xcb\x59\x50\x82\xd3\xb0\x54\x29\x06\xa8\xa3\xb7\x85\x87\xf7\xa4\xb0\x32\xb0\xe1\x34\x33\x91\xdc\xfd\x74\xaf\xdc\xe4\xef\xe8\x5b\xed\x31\x08\x9d\xc0\xb9\x0a\x21\x89\x33\xda\x71\x91\x55\x68\x48\x95\x0b\xf4\xdc\xc8\x0e\x73\xc3\x46\xf8\x9e\xd2\xfb\x16\x74\xc8\x9c\x19\x8d\x88\xb5\xd8\x17\x1a\xa4\x70\x37\x42\x13\x32\x22\x71\x45\x4a\xe1\xa0\x70\x7e\x22\x5a\x06\xf4\x40\x07\x1e\x6a\xec\xed\xce\x3a\xec\x2d\x90\x86\x5a\x1a\xb0\xe7\x98\x4a\xf3\x56\xa8\xe4\x05\xbd\xe8\x83\xc0\x08\x09\x0f\xe2\xf2\x9f\xc1\x9c\x05\x93\xed\xd2\xd1\x9f\x33\x15\xee\xd6\x90\x97\xae\xbe\x10\x62\x07\x57\x0d\x1a\x25\xdc\xb6\x4a\x91\x49\xd3\x31\x5c\xf0\x4c\x61\xb7\x06\x8b\x7a\xce\x9b\x20\xfe\x3e\x18\x51\xf3\x70\x8c\x84\xd5\xdc\xff\xea\x4f\x1f\x0e\xbe\x84\xcc\xc4\xa1\x89\x9f\x76\xdf\x1a\x1e\xa7\x78\x12\x25\xd2\x15\x6c\x34\x9c\x61\x90\x06\xed\xb6\x72\x51\xec\x8f\x49\xb4\x02\x70\x28\xd8\x15\x3d\xca\x7c\xe9\xaa\x3b\xa0\x20\xe6\xac\x75\xac\xe5\x0e\xbf\xb4\x9b\x93\xf4\x41\xa1\x27\x60\x15\x09\xee\xe7\xbc\x44\xc6\x20\x24\x32\xfc\xdf\x4f\xda\xb5\x81\xa6\xce\x78\xf8\x7c\x37\x19\xea\x91\x01\x10\xea\x19\xc1\x19\x02\xef\x67\x2f\x2f\x11\x3a\x60\xb7\x49\xda\xc5\xdd\x74\x0e\x56\x4b\x93\xb7\x76\xa0\x2f\xc8\x27\xb9\x62\x4c\x9a\x85\x88\x00\x3c\xf8\xe3\x1e\xf4\x1d\x28\x81\xf9\x96\x30\xbd\x16\x48\xb5\x97\xb4\x88\x67\xe1\x97\x8a\xa7\x5e\x16\xef\xc5\xfb\xd0\x2f\xa4\x3d\xbf\xc9\x82\x74\x7d\x1d\x5e\x66\x67\x07\xce\xc3\xe1\x14\xe3\xb3\x10\x6e\xdc\x7f\x96\xe4\x36\xc1\xa8\x0a\x00\xd8\x31\x86\xbd\x9a\xa4\x1e\x50\x1a\x44\x4e\xcf\x3a\xdf\x0a\xea\x99\x04\x19\x30\xc6\x9b\x50\x07\x00\x0b\x9d\xda\x9b\x82\xf7\x40\xa5\xe0\x0d\xfd\x86\xb4\x78\x99\x9d\x8d\xb2\x15\xc3\xc8\x63\x7d\x42\x4c\x70\x36\x61\x41\x1f\x57\x26\xac\xb1\xcf\x57\x8b\xdd\x41\xdf\x88\x43\xd1\x13\x9d\x84\x41\xcb\xb3\xbb\x8a\x8b\x81\x73\x27\xa8\x54\x5b\x72\x84\x3d\x89\x9e\xd1\xe8\x15\x0d\x52\xa1\xfa\x6f\x6b\x3c\x8f\x24\xad\xcb\xd6\xfc\xe8\x20\xb1\xe1\xbf\x8a\x83\x1c\xd3\xec\xf4\xe1\x03\x86\x53\x20\x49\x6f\x53\x18\xa1\xf0\x4d\x54\xd9\x0d\x00\xf0\xad\x30\xfa\x1a\x70\x47\xff\xfd\x84\xae\xbe\xf6\x0a\xb1\xef\x1b\x36\x87\x9e\x8f\xdf\xe6\xde\x06\x1c\xc1\x36\x7a\x27\x31\x63\x22\xdd\x43\x70\xe8\x3a\x1f\x8c\x96\x58\xff\xf5\x10\xf5\xac\x4e\xa9\x28\xd6\xb9\xb9\x91\xe0\xb2\x74\x16\x85\xed\x43\x9d\x4c\xb3\x66\x27\xe7\xc3\xc2\x9b\xcf\xcb\x49\x09\xf5\x8a\x47\x5e\xd3\x5b\x72\x7f\xdb\x3a\x0f\x0a\x0d\x58\x53\x73\xa6\x5f\x93\xbc\x61\x16\x5b\x80\xc5\xb0\x20\xf1\x02\x2d\x73\x6e\x73\x96\xd3\xe5\x42\x91\xe8\x99\xbb\xab\x0b\x76\x85\xb2\x5c\x96\x5d\x11\x46\x80\x3e\x3c\x46\x48\xcd\xf4\xc5\xed\x08\x7c\x90\xc0\xd6\x14\xfc\x71\xcb\xdb\x1c\x34\x0b\x8f\x86\x16\x3c\xb2\x04\x4e\x32\x9e\x22\x5f\x34\x8f\x3f\x30\xf0\x5b\x1e\x91\x13\x30\x7c\xff\xbb\xc0\xc7\xe3\x01\x94\x03\xe8\xb4\xfa\x27\x7f\xfa\x2c\x7f\xe1\xa8\xf3\x57\x09\x1f\xc0\x32\x80\x3b\x0a\x44\x98\x33\x78\x88\x9a\x6f\x0c\xe7\x77\x5c\x53\x8d\x20\xe5\xaa\x0b\x3f\xd2\x43\x8e\xd6\x23\xd3\x87\x66\x6b\x7c\x67\x13\x73\xa7\xe0\x1a\x17\x4e\x03\xcf\x27\x18\xb3\xc1\xf4\x90\x6a\x6d\xaa\x91\x0c\x6a\xfb\x11\x62\x23\x4a\x04\x06\x1b\xef\xe2\x31\x52\xca\x6d\xa8\xe0\xd1\x71\xe4\x66\xdc\xd6\xb3\x44\xd5\xfc\x44\x9f\xfe\x49\x7a\x4c\x78\x0c\x34\x42\x07\x8c\x33\x9a\xa5\xdc\x6c\x55\xf1\xdc\xa0\x91\x5b\xc6\xf5\xc4\x49\x98\x97\x04\x79\x2d\xc9\x1c\xf5\xb8\x9b\x92\x62\x9c\xe4\x3e\x70\x87\xa0\x42\x0e\x05\xe9\x6c\x16\x02\xdb\x26\xd0\xc7\xb6\xdb\xbc\x65\x0d\x75\xd8\x44\xf1\xd1\x94\xc6\x86\xc1\x8c\xf1\x0e\xc7\xea\xad\xe3\x8b\x43\x30\x16\x9d\x97\x51\xd1\x98\x50\x4e\x12\x62\x97\x4a\x0c\x22\x59\xa9\xf0\x42\x02\x38\x22\x37\x02\x9d\x71\x23\x29\x4d\x0c\x2c\xbd\x62\x24\x02\x88\x14\xe8\x23\x5c\x27\x08\x77\xd4\x16\xc0\xe4\x66\x8b\x1c\xc2\x35\x4a\x8c\x8f\x96\x5d\xfe\xb4\x3d\x3e\x1d\xdf\xea\x3f\xe5\xc3\xb3\xca\x12\x1c\xbe\x88\x72\x16\x80\x09\x51\x34\xad\x9e\x72\x84\x79\xb6\xc0\x0a\x1b\x31\xcd\xea\x69\x57\x3e\xd9\x1f\x79\x8b\x52\xeb\x39\x13\xd0\xb6\x6d\x03\xe5\x17\x11\xde\xb4\x08\xd6\xb0\x4f\xab\xcc\xba\x67\x3a\x27\x17\x1b\x3d\x13\x82\x0a\xe5\x21\xf8\x4c\x23\x03\x4c\x79\xf9\xe7\xff\x56\x96\x61\xf6\x2f\x7c\xc2\x40\xfd\xe4\xd4\x0f\x59\x45\x35\x35\x0d\x1c\xc6\x15\x12\x31\xd4\x5d\x9b\x54\xa0\x16\xb3\xb4\x42\x36\x9e\x45\x79\x1c\xce\x14\xde\x71\x68\x58\x1d\xe2\x60\x94\xda\x45\x26\xf0\xa0\x7d\xc9\x10\xfa\x3c\xbb\xa4\x83\xe4\x31\xc5\x52\x3c\xcc\x17\xb1\x8e\x19\x69\xb4\x64\x73\xc0\xa6\x86\xb7\x49\xcf\x31\x5b\xed\x2b\x0f\x39\x94\x48\x3e\xb8\xd4\xd0\x47\x36\x6a\x9e\x48\xfa\x08\x83\x8d\x21\xcb\xe1\x78\xc8\xca\xd9\x82\xb5\x9c\x40\x15\x9f\xe6\x42\x82\xeb\xd1\xd7\xdc\xf5\xc1\x90\x9c\xe7\xd5\x90\x18\x95\x00\x96\x93\x64\x5e\x7d\x5d\xc3\x49\xfc\xac\xdf\x16\x9c\x3a\x78\x37\x1b\xb4\xb4\x66\x3c\x8e\x19\x95\x0b\xd3\x2f\x12\x00\xf9\x4f\xc4\x4b\xa2\x7b\x49\xaa\x87\x0f\xca\x1a\x13\xb9\x2d\x06\x81\xc8\xec\xa2\x5f\x00\xe6\x5f\x67\xf6\x98\xfa\xe2\x79\xd4\x64\x85\x74\x81\x79\xb8\xdf\x8d\xca\x9f\x1e\x93\x81\x84\x3f\xbd\x15\xf6\x76\x62\x8e\x14\x74\x4c\xe5\x6f\x17\xf3\x13\x7b\xe0\x30\x32\x00\xe5\x43\x9c\x15\xef\x23\x89\xe8\x91\x42\xd3\x71\xe9\x93\xda\x2a\xf6\xe6\xb1\x0b\x87\x84\xd4\xa5\xbf\x44\x11\xc8\xda\x83\x00\xe5\xc3\x99\x0a\x0e\xd1\x9b\x20\xa0\x22\xdf\xe3\x6c\xa2\xe7\xae\xee\x71\x54\x76\x49\x57\x83\x2d\xe1\x31\x53\xe1\x6b\x17\x1e\xe9\xb2\xa6\x9b\xe5\x92\x8e\x5e\x77\xcf\x91\xbe\x02\x7f\xe9\xe6\x7b\x2e\xf3\x70\xdb\xbf\x92\xdd\xd5\x95\x4f\xab\x03\xbb\x72\xd2\xf6\xa1\x2b\x41\xde\x98\x0a\x43\x7f\x9c\xb2\xc1\xdb\x60\x46\x42\xdf\xb7\x02\x6c\x47\xaf\xac\xb5\xcb\xf5\x1d\xa2\x48\x75\xb3\x14\xe9\x1d\x2d\x9b\xba\x68\x58\x38\x83\x51\x4e\x4b\x18\x87\xd3\x81\x48\x1c\xa4\x5d\x76\x9c\xd0\xe0\x6e\xa7\xf8\x0b\x1b\x86\x19\x7b\x68\xa2\x6c\xe0\xc8\x26\x86\xe9\x0b\xe2\x06\x9d\xe9\x3e\x52\x6c\x04\x0a\x65\x33\x97\xac\xea\xa4\xd3\x45\xcb\x43\x47\x3e\x63\x95\x60\xc6\x66\x91\x10\xda\x80\x72\x21\x92\x8d\xed\xf6\x57\x40\x24\x7c\x87\xf6\x11\xe5\xe4\x8b\xcb\xc6\x31\x8f\x54\xc9\xa1\x04\x17\x36\x3a\x3c\x49\xf8\xbb\x5d\x2e\xf1\xce\x52\x30\x60\xd3\x26\xf0\x5c\x7e\x09\xb4\x4b\xb8\x9b\x3d\x82\xd0\x63\x3a\xb1\xcf\x31\x12\xfc\x33\xff\x84\x65\x0c\xcd\xf2\x0d\x2c\x8c\xd8\x39\x69\x20\x52\x16\xc6\x23\x17\x45\x24\xce\xab\xfe\xb2\x8c\xfb\xa6\x53\x4b\xd6\x97\xee\x35\x5f\xa4\x1e\xda\xa7\x74\x4e\x48\x99\x88\xd0\xe0\xcb\x98\xe8\x66\x07\xb8\x46\xc0\x4c\xc8\xc3\x3e\x89\x0e\xdd\x36\xd7\x28\x48\x6b\xfe\x4f\xc0\x4b\x33\x58\x87\x09\xb9\x23\x24\xd3\x7c\x5f\xac\xcb\xdf\x50\xbf\xbf\x1b\x9e\x92\x3f\x22\x29\xbf\x43\x1b\x82\x8d\x8a\xc3\x3c\x24\x18\xc4\xb9\xfe\x76\x87\x8f\x9f\x53\x7c\x5a\x4f\xd2\xc8\x6d\x24\x7d\x82\xfb\xb9\x3b\x49\x04\x96\x0d\x2e\xed\x98\xed\x4b\xdf\x89\xc5\x90\x54\xf5\xca\xa7\x7c\x19\xf6\xfc\x0d\xce\x48\x8e\x44\x8c\x17\xca\xdb\xf3\x21\xcf\x3e\x5f\x13\xdd\xa5\x21\xfc\xda\x0b\x51\x03\xff\xc3\x73\x61\x32\x87\x83\xa4\x24\x75\x3a\xa5\x18\xd9\x83\xbb\x51\xd1\x68\x70\x27\x05\xb0\xfc\x68\xd6\x87\x95\x4e\x4c\x0d\x5b\x1f\xe4\xa0\x7f\x97\x8f\x00\x13\x3d\xb3\x30\x07\xa7\xb4\xb1\x3f\x0a\x00\x0a\xf1\x91\xdd\x56\x5c\xbf\xa2\x6e\x42\x41\xb7\xbd\x21\xdf\x79\x61\xb7\xf2\x43\x75\x26\x38\xf3\x73\x20\xaf\x91\x7b\x4a\xcb\xe2\x08\xf9\x1d\x67\xf9\x1d\x62\x52\x0f\xee\xba\x82\x22\x92\xee\xe6\xd5\x87\xac\x84\x16\xaa\x4b\x74\xe0\x9e\x7f\x7e\x80\x77\x6e\x26\x0a\x07\x65\x62\x82\x3e\x28\x00\xa1\x07\xa6\x9d\xec\x52\x60\xac\x3f\x37\x83\x59\x9f\xf7\x9a\x18\xb3\x49\x71\xc7\x97\xa3\x3b\x54\x1e\x27\x71\x24\x20\x21\x4d\xd6\x21\xa3\xa6\xcb\x79\x1b\xf2\x53\x78\x7b\x69\x84\x70\x04\x17\xc4\xf9\xfb\x0d\x4f\xee\x2b\x8f\x61\xfe\x03\xda\xc8\x4c\xb0\x63\xf6\xd4\x3f\xb7\x5e\x00\x14\x72\xde\xdf\xa5\x1c\xcf\xf8\xb3\x6b\xa2\x8f\x60\x91\x04\xf7\xdc\x83\xa6\xfa\xc0\x50\x9c\x69\x34\xc7\x58\x3e\xb1\x8e\x9f\xfc\x02\xb5\xde\x7a\xad\xdc\xc7\x2b\xa4\xf3\x88\xf0\xb0\xfe\x67\x5c\x76\xa5\x7a\xff\x8c\xe6\xe3\x01\x88\x8d\x7a\x50\x3f\xe2\xcb\x8d\x82\x32\xce\x4f\x80\x58\xe7\xa7\x13\xd9\x64\x0a\xbd\x47\x75\xc1\x98\xad\xc0\x11\x80\x85\x5d\x33\x9f\xcd\xb5\xcd\xa1\x94\xc0\xfe\xc1\x3d\x16\xf6\xe1\x7a\x08\x7f\xdc\xac\x36\xae\x0b\x4e\xa8\x3c\xd9\x84\xb9\x88\x2d\x03\xe2\x74\xc4\x00\xcf\x5d\x27\x44\xe5\x23\x07\x74\x6c\xf1\xfc\xe0\x93\x0a\xd0\x71\xe2\x27\x61\x3e\x7f\xed\x74\xf5\x3f\xd3\x71\xe8\x19\xe2\x45\x40\x09\x01\xc9\x0e\x45\x23\x75\x65\x2b\xea\xf0\x40\x61\x79\x08\xc6\xbf\x3b\xca\x87\x0d\x9c\x63\x85\x0a\x49\x00\xc8\x37\x06\xf7\x15\xd4\x12\x7d\xaf\x87\x07\x9f\xbe\x35\x52\x34\xca\x81\x74\xab\x9d\xd6\x0c\xe4\x00\x20\x3a\x8e\x20\x05\x61\xd9\x83\x6c\xa8\x03\xdc\x7d\x62\x17\x6f\x38\x86\xf4\xb4\x12\xbc\x99\x5e\x79\x44\x5f\xf0\x08\x56\x3b\xaa\x54\x45\x56\x01\x09\x19\x32\x1f\x4f\x40\xc7\xce\x9a\x27\xdb\x57\x3a\xd9\x4b\xe4\x67\x94\x0b\x76\xd9\xb4\x71\x68\x8a\x15\xe7\x8a\x15\xed\x19\xe1\x37\xcf\x23\x39\x4a\x85\x45\xe7\x2f\x96\x19\x16\x0f\x59\x83\xa8\x00\xcb\x60\x5f\xd1\x03\xe8\x89\xed\x74\x3d\xd6\x6b\xcf\x21\x00\xdf\xf3\xd6\xc9\x48\x68\xcf\xba\x54\x76\xce\xc3\x0e\xb3\x20\x03\x59\x64\x8e\x1f\x47\xa3\x12\x4b\xa9\x22\x47\x54\x72\x05\xb6\xca\x2d\x03\x8e\x7e\xf8\x93\x2d\x61\x65\x3b\x77\xb3\xad\x8b\x4c\xee\x4f\xd5\xcd\x5b\x09\x34\x06\x18\x3e\x29\x23\x21\x82\xc9\xf7\x97\xa4\xa0\x8d\x73\xde\x16\xa6\x7c\x5b\x26\xc0\xb7\xf5\x98\xba\x88\xad\x20\x8d\x3d\xc7\x48\xc9\xc6\x87\xad\x3b\x62\x8f\x6d\xfd\x12\x70\xd6\xd6\xcf\xb1\x8e\x80\x2e\x22\x6c\x06\xfb\x60\x3e\xb0\xaf\x7a\x49\xec\x20\x6f\xc1\x24\xff\x71\xf7\xe4\x36\x85\x4a\x29\x0b\x9f\xaa\x0d\x54\x61\xd9\x29\x02\x28\xe7\x6d\xbb\x98\x70\x8e\x9f\xd0\xdd\xe1\x8f\x16\x8e\x49\x55\xcc\x96\x26\x49\x7c\xd2\x70\x36\x64\x85\x64\x93\xb0\xbb\xc2\xc8\x21\x88\x8f\xae\xe6\xd5\xa7\x09\x51\x3e\xea\x21\x03\x1c\x9c\x2a\x2c\xd4\x57\x04\xab\x8a\x40\xba\x16\xd5\xf1\xa0\x94\xa8\x66\x59\x21\x54\x23\xf4\xf1\xca\x6c\xb9\xae\x4e\x7a\xd7\x3a\xb1\x46\x63\x77\xf5\xf4\x5a\x55\xa4\x6b\x7a\x87\x85\xc3\x5b\x86\xf9\x38\xf5\xe9\xf9\x40\x66\xd5\x9a\x84\xf1\x2d\x75\x3d\x88\x90\x34\x5e\x99\x0d\x19\xae\x5c\xbb\x72\x6b\x40\x41\xdc\x62\x1f\x58\xa0\x89\x7f\x72\x3b\x2c\xa1\xb2\x5c\x30\x71\xdf\xbc\x95\x4c\x75\x7b\x41\x2b\x20\xf4\x9a\x7b\xe9\xb4\xca\x2e\x6b\xb9\xda\x97\xa2\x3f\x7c\x04\xf5\x23\x11\x94\x53\xb0\xca\xec\x22\xd2\x36\x64\x22\x32\xf8\x18\x89\x5e\x12\x9e\x88\x4c\x22\xc9\x15\x9c\x91\xdc\x52\xd1\x04\xe9\x2d\x42\xd4\x17\x24\xbc\x11\xd0\x49\x41\x19\x58\x68\x60\xfd\x67\x9b\x81\xe2\xfb\xc4\x3f\x77\xe1\xf9\x10\x3b\x41\x76\x9e\xbb\x2d\x12\xdc\x06\x6e\xe2\xc7\x93\x7d\xfb\xf9\x29\x90\xa0\x48\x24\x37\x45\x1e\xf7\x84\x84\x0a\x62\x1e\x87\xe8\x18\x65\x09\xe2\xc0\xc4\xc0\x96\xa9\xf4\x55\xae\x8b\x58\x57\xfc\x9e\x99\xea\x52\xf8\xeb\x3b\x47\x64\x56\xf6\xec\xa4\xe0\x3e\x9b\x09\x1d\x82\xc4\xfa\xe5\x07\x51\x4e\x7a\xf9\x4d\x04\xe9\x5a\x5a\x19\x7f\xad\xbc\x21\xf2\x2c\x11\x11\x59\x45\x39\x04\x94\x12\x28\x09\x69\xa7\x53\x23\xf2\xda\x0a\x1d\x2e\x37\x36\xd1\xa0\xa2\x66\x94\xa3\xfd\x41\xcc\x60\x0a\xf0\x11\x83\xf4\xc3\x4c\x66\x21\x25\x37\xd1\x1a\xdb\x83\xea\x3e\x8f\x4d\x9b\x29\x4d\x76\xaa\x83\x98\x24\x08\x90\x72\xbc\x02\x6b\x5c\x2f\x32\x39\xf4\x35\xbb\x1f\x67\x39\x74\x3b\x54\x12\x71\x6c\xae\x83\xa4\x9b\x63\x15\x9a\x84\xeb\xc7\x6a\x10\x6e\x56\x88\xa1\x34\xef\xdd\xb1\x9d\x5f\x23\x75\xe6\x1c\xa9\x3f\x8b\x91\x2a\x95\x91\x44\xfe\xd1\xc5\x98\xfe\x46\x5d\xd1\x22\x8a\x89\x42\x83\x46\x78\x1d\xf9\xc3\x84\x19\x8c\xe9\xa5\xa3\xcd\xad\xc2\x5b\xee\x40\x75\x30\xe7\xf8\x86\x1f\xb6\xfe\x95\x9f\xf9\xa9\x28\x2c\x87\x58\x78\x43\x56\x26\xf9\x13\x32\x1f\xd9\xe7\xaf\x0a\xee\x79\x3a\x04\x1d\x1b\xb1\xa7\x00\x25\x17\xd3\xf8\x94\xfe\x54\xfe\x01\x8e\x61\xdc\x3f\x23\x1d\x3a\x18\x26\xb2\xff\x47\x33\xac\x38\xb7\xa5\xd9\x95\x3c\x73\x8b\xd0\x6e\xee\x02\x4f\x2f\x72\x1e\x61\x10\x39\x3b\x82\x22\xc5\x2a\xa6\x9d\x95\x34\xff\x2c\x52\xb9\xfb\x60\xc4\x97\xe4\xad\x7a\xed\x73\xb2\xd5\x00\xab\xf8\x21\x3b\xe1\x14\x85\xb8\x85\x2f\x34\x01\x10\xbf\x2b\x87\xec\x13\x62\x97\x41\x31\xa3\x2e\x78\x79\x08\x15\xe1\x9d\x3b\x93\xdd\x80\x8e\x3b\x98\x62\xef\x82\xd9\xae\xfe\x07\xb2\x30\x74\x5e\x0d\x39\xeb\x46\xe1\x42\x43\x72\xdb\xec\x21\x55\xf4\xcc\x09\x84\x0c\x34\x2c\x19\x92\x9c\xe4\x06\x57\xb3\xea\xfd\x5b\x11\xd9\xd1\xe9\xe2\x0c\x90\xbb\xaf\x6b\x40\x23\x55\x19\x9b\x80\x4b\x42\x3c\x53\xae\x8b\x26\x8f\x67\xf8\xca\x23\xd3\x88\x62\x9b\x33\x49\x1b\xf3\x09\xcc\x64\x97\x7f\x46\x59\x0f\xb1\x73\x01\xef\x50\x1e\x33\xfb\xe7\xec\x7d\x1f\x1e\xe9\xd9\xf9\x07\x86\x20\x86\x97\x8f\xb0\x69\x48\x3a\x99\x59\xec\x1a\xcf\xcb\x0e\xaa\x4a\x3e\x68\xfa\xde\x5b\xe5\x2d\xbf\x0b\xaf\x88\x4e\x72\x4d\x02\x06\xed\x93\x6d\xef\xbd\x80\xb4\x55\xe5\x67\x87\xb1\xca\xdf\x02\x89\xdb\x42\x6d\x12\x26\x78\x09\x64\xee\x48\x9a\x5b\xd7\x66\x40\x39\x06\x00\x63\x97\x88\x2f\x94\x48\x07\xd8\xb9\x6f\x80\x50\x26\x78\x26\x67\xb0\x4c\x58\x90\x75\x75\xf8\xb6\xf2\xbd\x62\x26\x93\xc9\x7f\xa8\x38\xce\x10\x61\x01\x4b\x19\x9b\x2c\x0f\x39\x73\xdf\x96\x2f\xc7\xff\x02\xae\xf3\xac\x9b\xe6\x4d\x82\x9c\x53\x3e\x7f\x6e\x4b\xd9\xd8\x2c\x41\xc4\x74\x1f\x8d\xdf\x59\xec\x84\xbd\x30\xac\x8c\x08\xf7\xae\x7c\x86\xcb\x0a\x46\x1f\x12\x3d\xeb\x90\x97\x4e\x41\x8c\xf0\x09\xbb\x7a\x9b\x94\xe6\x1c\xf4\xde\x6d\xdb\xf7\x4c\x64\x81\x76\x21\x4a\x77\xab\xca\x48\x14\x08\xca\xac\x2d\x8e\x65\xb8\xfc\x44\xe9\xfe\xc2\x5d\x72\x86\xe9\xb0\x0f\x7a\xb5\x08\x7d\x44\xe9\x51\xfe\x39\x4b\x9e\x12\xe9\x3b\xb3\x97\xb8\xcd\xfe\x0d\x00\x66\x8d\xd0\xf9\x2e\xcf\xa7\x32\xeb\xf0\x22\xfe\x72\xb9\xa3\x54\xc1\x29\x32\x22\xcb\xd8\x45\xe4\xaf\x61\xd3\x57\x43\x62\xb2\xe2\xc6\xcf\x3e\x8b\x1a\x43\x03\x58\x17\xe3\x28\xdc\x2e\xd2\xee\xe8\x0b\x6a\x05\x12\x71\x8f\x8b\x8b\x7a\xba\x84\x18\xa6\xf3\xcb\x48\x1a\xe0\x04\x44\x90\x12\xf5\x7d\x76\x82\xd1\x91\x83\xab\x5f\x3b\xcb\x6c\x66\x1b\x32\xb1\x18\x7d\xc3\xc9\x63\x33\x42\xfd\x85\x5c\x14\x17\xcd\x27\x82\x8a\x56\x11\x2a\x8a\xfc\x9d\x67\x16\x40\xdf\x2c\x43\x3c\x8f\x24\xf0\x23\x5e\x81\x7e\x95\x5f\x60\x33\xcc\xf1\xf9\x00\xd6\x41\x65\xf9\x89\x6c\xec\xf7\x47\x2c\x96\xb2\xca\x65\xf3\x6d\x3b\x75\x3b\x0b\xe9\x9b\xff\x76\x85\x94\xc3\x22\x91\x09\x46\xc2\xca\x3e\x22\x77\xcb\x87\xdb\x95\xe4\x74\xb9\x86\x4d\x96\xa7\xe8\x69\x3f\x00\x9a\x4b\x68\x74\x86\xd8\x17\xc1\x0b\xb3\xb5\xe1\xde\x73\x38\x7f\x4c\x45\x19\x8f\xb8\x52\xe9\x27\xf8\x56\xb3\xdf\x3e\xab\xf4\xb1\xef\x9f\x1c\xd6\x87\x7e\x24\x65\xe3\x0d\xe1\xd3\x4e\xd3\x8e\x53\x1e\x45\xef\x90\x2c\x02\x9c\x69\xb1\x7b\xbd\x80\x9b\xf8\x40\x29\x82\xd2\xdf\x54\xc6\x3e\x90\xc8\x37\x67\x5f\xc3\x5e\x50\x6b\xfb\x2d\x85\xce\xa6\x2f\xf0\xd1\x19\xb7\x50\x40\x74\x99\xf4\x36\x5e\xf3\xc9\x1d\x95\x95\x8c\xdc\x4b\xc0\x49\xd1\xef\xc9\x36\xff\xfd\x8f\xf1\x56\x2f\x9c\x3b\x95\x18\xcf\xe9\x51\x96\x04\x7e\x4e\xae\x3e\x2f\x30\xa2\x1a\xa1\x06\xea\x8e\xfc\x08\xce\xcd\xc6\x04\x2a\xc8\x20\x7c\x8f\x4e\x9a\x0d\x8b\x06\x20\x3f\xfe\xa9\xea\xa4\x65\xf1\xb6\x33\x88\x90\x8d\x7e\xda\xfb\xdd\x2c\x8c\x6d\x98\xb9\xd6\xdf\x55\xcd\x8e\xbb\x9e\x61\xb9\x76\x53\x9a\x78\xc3\x38\xd1\xd9\x06\xf7\x29\xa4\x0a\x7a\x22\xf7\x88\xe7\x98\x95\x0f\xe0\x24\xc5\x3b\xd8\xa8\xa7\xc4\x28\x51\xab\x72\x97\x43\xea\x20\xdb\xf7\xfc\x51\x60\x84\x42\x9e\xeb\xc2\x2d\x4e\xcf\x54\xb6\x4a\xaa\xcd\x94\x28\xf3\xbc\x7f\x89\x00\xb7\xb7\xd4\x05\x26\x65\x89\xf3\x6e\xab\xd6\xf9\x2e\x5a\x5c\xe4\xcd\x7e\xca\xf5\xed\xbf\xf9\x08\xef\xf4\xeb\xa3\xfe\x97\x95\x42\x89\x16\x9c\x35\x92\xf1\xea\x78\x56\xde\x32\x86\x93\xe1\x55\x95\x80\x7e\x7f\xe4\x67\x79\x0b\x7c\xb3\x19\xf2\x58\x44\xe1\x51\x66\xc4\x29\x05\x94\xe3\x54\x1c\x14\x24\xe1\xf0\xe1\x56\x49\xfe\x63\x21\xfa\x27\x93\x2a\xcc\xc4\x9b\x6c\x54\xf2\x3f\x9a\x27\x14\x02\x92\x36\xfd\xcc\x51\x26\x86\x4c\xdc\x21\x1b\x3a\xea\xa9\xfc\xc0\x43\xc8\xd3\x51\x22\xa4\xc8\x26\x22\x1a\xb6\x1c\x69\x0e\xc1\xfa\x25\x03\x98\x6b\x9b\x09\x63\xd9\x0f\x4c\xe7\x9f\xcb\x4a\x33\xa6\x47\xac\xa9\x05\x58\xd8\xa6\xe6\x0e\x53\x53\x5b\x35\x75\xf3\x90\x99\x27\xe3\x7f\x74\x2a\x87\x3c\xa3\x62\x8a\xe8\x2e\xfc\x85\x86\x94\x7f\x2c\x20\x5f\xf4\x34\x39\x68\x73\x73\xe5\x48\xbd\xe8\x4b\xc4\x1f\xce\x0a\x49\x43\x6b\x9a\x1d\x64\x4a\xa1\x7e\x4a\xa8\xca\x63\x0a\xc1\xf4\x15\x51\xba\x17\xfa\x67\x7c\xe1\x74\xb7\x88\xc4\x7d\x28\x1c\x58\x3d\xc0\x99\xf8\x54\x3c\x45\x7d\xe6\xc0\x94\xfe\x4d\x35\xb4\x8b\xda\xbf\x42\x0c\xd4\x12\x3f\x9b\x2a\xe0\x44\x82\x02\x0b\x52\x0c\xb3\x5c\x22\x6c\xbc\xc8\x19\xc5\xdf\x80\x34\xdf\xe0\xfc\xc8\x68\x14\xf9\xef\x1c\x78\xd2\xab\x4d\x13\x11\x94\x48\x32\x10\xa2\x45\x02\x64\x0a\xcb\x90\xd3\x85\x5d\x3e\xe8\x6d\xa7\x83\xcf\xf8\x3f\x2f\x29\x1c\xba\x69\x5b\x0b\xf8\x86\x9e\x2c\x22\x0e\x65\x7f\x79\x20\x3c\x81\xa5\x33\xbb\x31\xf7\x83\x04\x1f\x4a\xa2\x50\x1b\x38\x5c\x84\x7a\x8d\x32\xa3\x08\xf9\x39\xbc\x4d\xef\xaf\x35\x03\x63\x72\x6a\x8c\xed\xcb\xdd\xd6\x24\x33\x56\x05\x47\xf8\x2a\x3f\xc4\xca\x59\x84\x4c\x2d\x61\x23\x3a\x2a\x6b\x12\xe0\x14\x71\x22\xd8\xa4\x50\xbc\x54\x57\xee\xcf\x0e\xcc\x7b\xa9\xa5\x2e\x1c\x29\xf2\xe9\xed\x8a\x43\x1f\x43\xf4\x9c\x48\x7c\x29\xaf\x99\xab\x49\x18\x87\x0e\xf1\xc7\x2e\x86\x63\xc1\x14\xff\x70\xbe\x8e\xec\x54\x4e\x31\xb2\x0b\xc4\x90\x38\xee\xe5\xc9\x6d\x76\x59\x5f\x3c\x53\x4a\xb4\xf7\x8b\x55\xd0\x4f\xdf\x98\x17\x65\xa7\xee\x15\x7b\x25\x16\x84\x7b\x79\x91\xf1\xf3\x7c\x36\xbe\x82\x0c\x49\xac\x42\x13\xd1\xa7\x61\x51\x79\x23\x41\x6f\xc7\xf4\x4f\xf4\x0a\xfd\x19\x3f\x12\x45\xaa\x57\x5b\xe4\xa0\x7f\x6a\xb0\xc0\x66\xdf\x8e\xfa\xed\x1b\x9a\xcd\xaa\x6d\x30\x26\x88\x63\xdb\xe1\x2c\xf3\x6a\xc6\x2d\xd2\x6e\x90\xe3\x4e\xda\x9e\x42\x0b\x89\x4d\x1c\xe6\x4d\x45\xc8\x36\xef\xa4\xd5\xd8\x29\x24\xf6\xee\x36\x07\x36\xee\x6a\xa7\xc8\xd5\xcf\x6f\xd1\xba\xb7\x1c\x1c\x43\xfb\x4d\x84\x07\xd9\x4e\xc8\x31\x9f\x3c\x46\xde\xe2\x10\xe0\xd5\x15\xcb\xf4\xd6\xdb\xe3\x45\xe2\x80\xfe\xc9\x0b\x69\x64\x2f\x56\x67\xb3\xf9\x1f\x63\x15\x36\x8e\x61\xdf\x92\x39\x9c\xc6\x12\x8f\x69\x66\x08\x9b\x6d\xe6\xef\x8f\xa0\xb3\xfe\x94\xb0\xaa\x4f\xef\xf8\x7f\xa0\x2a\x63\x90\x3e\x74\x8b\x45\x76\x87\x7c\x4d\xef\x40\xc7\x4e\x32\x78\x08\x8a\x71\xd0\x50\xaf\x73\x91\x0e\xea\x47\xb2\x4a\xfb\x93\xff\x30\x2d\x55\x50\xd8\x10\x30\x60\x02\x57\x9d\xe2\xa6\xaa\x26\xe2\x6a\x3b\x23\xde\xf0\xe4\x87\x59\xab\x2a\x10\x20\x5c\xbc\xa5\xdc\x13\x6d\x2c\xd7\x42\xd1\xf1\x5a\xa4\xa6\x4a\x03\xda\x49\x0f\xa7\x0e\x09\x6d\x5e\xe1\x09\x11\xb2\x29\x6f\x49\xfa\x28\x2d\x85\x6c\xa1\xb7\xc8\x88\xc0\xc8\xe8\x19\xe3\x26\xa9\x04\xcb\x3c\x13\x74\xcd\xba\x96\xd7\xf4\x9e\xf8\x23\xac\x0a\x10\xb0\xa3\xc8\x45\x69\x8f\xb0\xc7\x5d\x46\xa9\xb2\x88\x49\x4c\x2b\x5d\xce\xd2\xf9\x61\x0e\x01\x55\x98\x7c\x2d\x67\xfe\xeb\x29\xae\x0a\x72\xf2\x52\xe4\x72\x5a\x82\xf6\x03\xa7\x0f\xff\xcf\x4b\xc9\xa1\xb5\xea\xaf\x39\x08\x46\xfa\x42\x17\x98\xf0\xfa\xe3\xd7\x72\x3c\x03\x18\x73\x18\x2b\x22\xeb\x3c\xc0\xd4\x9f\x9f\xcf\xce\x9f\x9e\xb7\x63\x09\xcc\x2b\x7c\xe4\x96\xc4\x50\x9a\x25\x55\x02\x5d\x2d\x43\x32\xd1\x2a\xc6\xf7\xd0\x9a\xd7\xff\x94\x3a\xf4\x97\x3e\x7d\xf9\xcf\x07\x9a\x71\xff\x12\x7b\x6c\xf3\x2b\x5a\x34\xd3\xe7\xf0\x50\x58\x52\x85\xa8\x1f\x45\x5f\x48\xfe\xa4\x55\x50\x56\xed\x9a\x44\xea\x6a\x5a\x29\x49\xa5\xd6\xef\x5b\xe6\x99\x36\xde\x82\x24\xf4\x6b\x38\xc1\x19\x86\xce\xa0\x93\xd8\xa3\x53\xe1\x53\x67\x4f\x64\x15\x04\x16\x41\x92\xa8\x83\x7e\x42\xab\x71\x8a\x66\xc9\x94\xc3\xe8\x15\x09\x1e\x04\xdd\x6c\x96\x3c\x3f\x34\x69\x5d\xd7\x1a\xc8\x97\x4c\x64\x22\x06\x1f\xdc\xa2\xb0\x60\xfd\x5c\xeb\x4c\xbf\xd5\x4f\x43\xba\x2e\xa6\xdc\xc2\x03\x29\x76\xf9\xb6\xe1\xf3\xd9\x7c\xb2\x12\x86\xe8\x17\x4b\x47\xa6\x91\x38\x32\xe4\x7b\x51\x99\xa1\x56\xd9\x22\x5b\xe1\x31\x9c\xc5\x9d\x53\x6b\xb8\xf4\x8c\xbe\x66\xf1\x39\x67\xfa\x0c\xf2\x97\x49\x5b\xb8\xba\x9c\xd4\x2c\x8e\xa5\x1e\xde\xec\x80\x5f\x6e\xd8\xbb\x14\x2c\xc6\x5e\x7b\x62\xf8\x0f\x28\x46\xa3\xd5\x22\x6c\x4b\xc9\x4d\xe3\x4f\xef\xe9\x46\xad\x34\x8b\x99\x51\x67\x05\x67\x28\xea\x20\xdb\xfe\x5f\x11\x5c\xf9\x1d\x84\x6a\xcd\x90\x48\x38\x41\x49\x92\xc5\x67\x55\x80\x1a\x5d\x5d\xea\xc2\x64\xa0\x67\x1d\x85\x67\x55\xd9\xc0\x3e\xdb\xea\x84\x5c\x13\xd5\xaf\x4f\xd8\x7d\xf9\x72\xfa\x14\x0c\xf2\x0c\x7f\x1d\x24\x6d\x79\xf2\xa5\xbd\x47\x02\x69\xfc\xd5\x9e\x84\x9d\x84\x8b\xa5\x33\xe2\x27\xed\xed\x93\xaa\x94\xb4\x13\x78\x26\x36\xfc\x32\x75\xe3\x19\x3a\xf8\x55\x43\x9a\xc0\x3a\x85\x1f\xcf\xf6\xce\xc3\x27\x06\x8e\x7d\x2d\x01\x6c\x8d\xde\xe9\x63\xc8\x0d\x99\x39\x1f\xd9\x46\x0d\x55\xf4\x1b\x12\x3d\x87\x3a\xf1\x2f\x99\xa6\x0e\x9e\xce\x79\x71\x24\x16\xd1\x89\x94\x2e\xde\xc9\x88\xc9\x52\xf4\x1b\x0f\x92\x54\x80\xfb\xa1\x81\x5f\xae\xe4\x06\x0c\x40\x9e\x31\x3d\x3e\xa8\xb2\x3a\x77\x49\x75\xad\x8e\x2c\x44\xbe\x3c\xfc\x5c\xd4\x21\xa9\xc7\x86\xc8\xae\x1f\xf2\x73\x8e\x4b\x6d\xc8\xee\x68\x87\x2d\xac\xfb\xd5\xca\x55\x60\x48\x84\x39\x06\x8f\x8d\x18\x59\xf4\x0f\xc9\xaa\x63\xfa\xca\xd8\xe2\x27\x2b\x5a\xe7\x26\xa0\x9b\x21\x25\x54\x55\x47\xad\xaf\x6c\x82\xf7\x41\xa2\x46\x9f\xa6\x17\x6c\x76\x88\x6d\xe4\x67\x49\x1f\xfa\xf7\x5e\xf5\x47\x2f\x9a\x45\x5f\xce\xaf\x7c\x5c\x8a\xdc\xcb\xfb\x12\x0a\xaf\x35\x64\x5d\x2b\xdb\xce\xd4\x6b\x05\x27\x0f\x7e\x6f\xac\xe8\xfb\xc2\x96\x4c\x5f\x84\x8d\xb7\xd8\x4c\x8c\xe7\xbd\x0f\x3a\xd4\x0c\xd2\x15\x51\x34\x45\x69\xfa\x78\xbe\x64\x3d\x83\x1f\x60\x23\xef\x08\xce\x06\xfe\x9a\x4c\x20\xf7\x21\x32\xe3\x15\xca\x02\x86\x9b\x0f\x9e\x69\xc5\x95\x68\x4b\x24\x7b\xa0\x7d\xf8\x31\xf4\x49\x4e\x33\x67\x88\xbc\xfa\xb4\xe9\x96\x4e\x6e\x34\x90\x63\x10\xb6\x3b\x7e\x16\x35\x81\xcf\xed\x7a\xab\x09\x0a\x5f\x9a\x21\xf6\xb6\x07\xe2\x11\x06\x28\x0d\x75\x6b\x5e\xb4\xc4\x8f\x6a\x03\xb5\x29\x02\xf3\x41\x38\xcf\xd8\xc5\x2b\x20\x09\x6e\xb4\xe2\x32\x21\xfa\x9c\x19\x2d\xf5\xa2\xa9\x0d\x12\xec\x1b\x5c\x16\xd1\x94\xb6\xe1\xa7\x60\x6c\xbe\x3a\xdd\x05\x02\xc9\x3f\x78\x1b\x31\x38\x47\xdb\x74\xbd\xda\xa4\xfd\x0d\x57\x97\x53\xeb\x15\x3c\x52\xde\xed\xb6\x4d\xea\x7f\xf7\xb0\x41\x39\x4d\xc8\xed\x13\x34\xa8\x77\xa4\x6e\x96\x08\x7e\xef\x3c\xbc\x97\x0f\xdb\x29\xc5\x83\xcd\xad\xdb\xe4\x72\x9c\x64\x19\x53\xb2\x88\x50\x9f\x4b\x1a\xc1\x0e\x29\x5d\x29\x94\x69\xc9\xff\xf1\x3c\x0c\xa4\x48\x89\x09\x96\x95\xc8\xd2\xd9\x36\x37\x32\x60\xee\xd8\x30\xe2\x52\x0c\xe2\xd4\xf8\x0c\x87\x5b\xb0\x53\xf8\x48\x71\x58\x5c\x17\x49\x63\x9a\x5f\xe1\xee\x90\x1a\x77\xe5\x9d\x7b\xc7\xf9\x60\x1c\x74\x67\xa4\x12\xa1\xe1\xc0\xb9\xd4\xae\xe0\xab\x31\x28\x12\x3b\x47\x60\x01\x12\x5b\xb3\x3d\x7e\x38\xcd\x9c\xcc\xb3\x4a\xb6\x98\x90\xda\x84\xeb\xd9\x6f\x01\x0e\xed\x13\x74\x92\xfd\xba\x5b\x8b\xfa\x0b\xac\xae\x2b\xb7\x43\xc8\x45\x12\x79\xad\x69\x46\x91\xed\x68\x9c\xb7\x75\x82\xdc\x34\x70\x8e\xb0\x2b\x98\x90\x1e\x4c\x78\xf1\xf8\xaf\xcd\x3f\x59\x84\xa6\xbc\x75\x14\xa0\xf5\x6e\x8f\xfb\x20\xde\x46\xc0\xed\x2d\x74\xe7\x7d\x32\xea\xf2\x5d\x34\xf3\x03\x79\xbb\x62\xb8\x6a\x9b\xf1\x6e\x85\xa8\x4d\xe7\xbd\x60\x36\x50\x09\x9b\xd9\x8d\x2a\x06\x8a\xc9\x6e\x62\x6e\x8b\xd2\x41\x67\xf1\xc9\xdf\x73\x00\xa1\x74\xc2\x51\x34\x4d\x8e\x08\x6c\x3b\x0f\x6f\xa2\x67\xca\xe9\x7b\xd3\xf9\x3b\x0c\x14\xde\x79\x0e\x98\xec\xa7\xd1\x7a\x27\x05\xab\x02\x15\x5b\x35\x3a\x89\x7e\x8d\xb4\xbc\x69\x06\x6d\xaf\xb9\x87\x4b\xad\x10\x30\x84\x13\xfa\x60\xef\x67\xc9\xca\xbe\xaa\xab\x5f\x2e\x35\xf3\xc1\xfe\x65\x06\xac\x8d\x5e\xc2\x5e\x51\xf7\x83\xb0\xa4\x9b\xff\x0f\x07\x0a\xab\xda\x2b\x8b\x31\xa5\x3f\x84\x9c\xd1\x3c\xa1\x7e\x58\x01\xd6\xed\x98\x9a\xa9\x2e\x51\x34\x9e\x84\x77\xbd\x83\x28\x75\x9e\x6d\x67\xed\x52\x34\xdd\x3f\x36\x05\x5a\xa5\xe5\x04\x9c\x6b\x98\x80\x71\x6b\x21\x63\x9c\xcd\xe3\xc7\x99\xe2\xb8\xe9\x5d\xee\x58\xe8\x6e\xe4\xaf\x11\xfa\x9a\xc0\x2f\xa3\x6d\xef\xbf\xc2\x43\x3b\xd3\xc5\x8c\x42\x2b\xdc\x31\x37\xfb\xec\x03\xb9\xaa\xb7\x5d\x08\xb4\xda\xd9\x9b\xd4\xfe\xf2\x4c\x43\x1f\x0c\x81\x1c\xd9\x67\x0a\x91\x59\xb3\x90\xbd\xc5\xcd\xc4\x44\x43\x92\xa1\xbb\xf7\x4a\x84\x65\x5f\xe9\xbb\x19\xf3\x84\x5f\x2a\x86\x13\xed\x77\x4a\x55\x68\x4e\x21\x9a\x76\xbb\xe2\x21\x72\x56\x61\xc4\x14\x34\xac\x10\xab\x85\x4a\xeb\x46\xc1\xbd\x5f\x91\x35\x78\x35\xbf\x20\xdf\xa3\xac\xba\x22\xaf\x43\x7a\x29\x48\xd7\x48\xa1\x42\x76\xab\x0a\xd8\x2b\x92\x81\xe0\xe4\xeb\xff\x67\xaf\xac\x4b\x3d\x46\x27\xb6\xa9\xd7\x16\xd9\x3e\x9b\x26\xb6\x0b\x3d\x3d\x25\xbf\x2e\x32\x13\x90\xd6\x0d\x23\xf8\x8f\xfa\x4d\x00\x7b\x66\xb6\x3a\xae\x59\xaf\x61\x2f\xa1\x4c\xd9\xa9\x28\xd1\x66\x6a\x69\x4e\xc8\xd3\x11\xe5\x6b\xee\x7e\x84\x30\xbd\x5d\x5b\x97\x76\xb6\xbf\x6d\x18\xaa\xc1\x0b\x5b\xeb\x2c\x45\xdc\x99\x95\xab\x13\x42\xb7\x42\xe2\xe6\x25\x26\xb4\x3b\xfe\xde\x77\x8c\xae\x9f\xb9\xef\x89\xfe\xc6\x3e\xd2\xb1\x8f\x71\x3b\x91\xb6\x43\x5b\x85\x0b\x09\x93\xf3\xf0\x4f\xfa\x6b\x73\x21\x73\x3e\x18\x7f\x96\x16\x10\xab\xc7\xc0\xb0\x41\xa3\x68\x95\xd3\x90\x88\x03\xc9\xd2\xdd\xa7\xaf\x6b\xe4\xb1\x66\x48\xf4\xca\xec\xf9\x21\x58\x56\x8c\x99\x2b\xac\x89\xe5\x67\x72\xa1\xfb\x1a\xa1\x3e\x2b\xc0\x0b\x7f\x1f\x09\x22\x11\x59\x12\x55\x72\xc6\x94\xa4\x46\x49\x9e\x13\xb1\xea\x20\xc7\x4d\x57\xe0\x11\xcc\xab\x21\x4f\x75\x0a\x59\x33\x43\x38\xcb\xd6\xac\x87\xff\x11\xb9\xc3\xc5\x5a\xd0\x5e\x8e\x57\xf8\x05\x91\x9c\x20\x6a\x0f\x44\xee\x86\x51\x2f\x6b\xd5\x59\x25\x68\xf7\x11\xb5\xf5\xcc\xff\x9d\xf4\xe7\x38\xdd\xd7\x84\xa3\x4d\xb4\x34\xf0\xf6\x49\x59\xb3\x79\x34\xb4\x7c\x73\x53\xf5\x91\x3d\x62\xab\x94\x9f\x65\xe8\x36\xee\x97\x6d\x12\x9d\x39\xe9\x7f\xe9\xdf\x02\x71\x10\xb3\xe9\x74\x23\xb7\x36\x5e\x78\x20\x66\x38\xfa\xad\xe8\x29\x50\x96\xf5\xbf\x8a\x5c\x3c\x87\xe3\xaa\x13\xf4\x0f\xd0\x04\xe3\x12\x3d\x6e\xda\x10\xc9\xe8\xf1\xc0\x3e\xe6\x14\x72\xe8\xe6\xc9\x82\x15\x89\x0e\x3b\x84\xe8\xb7\xc0\x4b\x49\x81\x44\x51\xd1\x1c\x10\xa8\x15\xc9\x02\x91\xbb\xbc\x47\x4c\x2e\x8d\x68\x0f\x6f\xfe\x48\x5a\x08\x9b\x1d\x09\xf8\xa6\x88\xdc\xc7\x22\x57\x7d\xf7\x7c\x48\x2e\x6c\x5f\x60\xef\x07\xa9\x6d\x8e\xac\x14\xdd\xa4\xab\xed\xb0\xef\x67\x25\xfe\x98\x5e\x32\x57\x8a\xb7\x93\xae\x10\x18\x26\x37\x97\xe0\xa3\x70\x6a\x52\xef\x77\xb8\x65\xa9\x0f\x65\x30\x77\xa4\xa0\xc5\xe9\x0d\x25\xfb\x6d\x08\xc6\x78\x1c\x92\x5e\xcd\xce\x7e\x72\xdf\xe0\x97\xf6\x70\xee\x9b\xf0\x32\x2b\xeb\xe9\x67\x61\x33\xbc\x0c\x11\x8a\x3a\x70\xc5\xde\xb1\x68\xf5\x45\x3e\x3f\x7b\x0d\x4f\xfd\x3d\x3c\xb6\x76\xab\xb5\x3f\x7e\xe1\x7b\xa0\xd1\xcc\x44\xa3\x19\x7a\xe2\x41\xa8\x63\x23\xc4\x51\x32\x63\x3f\x72\xf1\x7a\x7e\x4f\xa2\x74\xec\x29\xa1\xb6\xd9\xaa\xe0\xbe\x0a\xce\xcb\xa2\x08\xa3\x2a\x6f\x24\x9a\x5d\xb8\x97\x33\x31\x42\xad\xde\x9b\xad\x5f\x91\x7a\xb0\x95\xfe\xfb\x19\xb5\x7b\xb1\x3f\xee\xfc\xad\x5e\x74\x6c\x73\x40\xd4\x1b\x99\xf3\x5b\x98\x0e\xcd\x6b\xa1\xaf\xf3\xea\xcc\x2c\x0e\x25\x51\xda\xf0\x4d\xf0\xe9\x99\x1f\x7b\x6b\x81\x89\x36\xa2\xe7\x53\xc6\xf2\x50\x09\x2c\x9e\x81\x38\x9e\x9c\xfa\x6c\xd0\xb3\x1b\xb4\x81\x9d\x40\x58\x52\x70\xf8\xf6\xb3\x60\xe2\x1b\xeb\xb5\x88\xf8\xd8\x7e\x8b\x9b\x8c\xb0\xad\x07\x8f\xca\xe1\x11\x0c\x51\x43\xf2\x21\x90\xac\x69\x3e\xb6\x3f\xd2\xa6\x47\xcf\xeb\x4b\x26\x08\x44\x2c\x75\xb3\xfb\x37\x85\x72\x32\x85\x95\x55\xff\xcc\x4d\x99\x02\xc4\x52\x7e\xa2\x5b\x4f\xab\x3b\x90\xf6\xee\x3e\xe2\x9c\xfa\x24\x49\xd3\xd6\x23\x58\xfe\xe1\xe0\x65\x1f\xf8\xe2\xa7\x13\x56\x69\x3f\x45\x93\x4c\xea\x28\x3e\xdf\xf5\x4b\x2c\x3e\xa3\xe0\xb1\xe1\xa6\x9b\x96\x2b\x5a\x1a\xf8\xa3\xdd\xdd\xd3\x69\x7a\x83\x55\x1c\xb9\x8d\xee\x65\x9d\x64\x2d\x45\x07\xb0\x2d\xe4\x06\x5b\xc2\xcf\x47\xcb\xeb\xf1\x43\x9a\x23\xf0\x5e\x3e\x38\x48\x44\x18\xa1\xb1\x4c\xd8\xb4\x7f\x36\x5c\x75\x95\x3e\x79\xad\xa4\x48\x42\x58\x49\x62\x5e\xed\x7b\x42\xa3\x4a\x2a\x59\x21\xbc\xd1\x57\xb7\x32\x4f\x68\x2d\x32\xba\xbe\x28\xb1\x04\x63\x28\x09\x50\x0d\x0b\x5a\xc0\x87\xc2\x61\x0b\x7e\x35\xda\x4d\x85\xbb\xd2\x3a\x37\xfc\x54\x0e\xd8\xb3\x60\x46\xc7\x56\xff\xd9\xd2\x82\xc2\x4b\xc4\x14\x6f\xa3\x97\x2c\x13\x4b\x87\x8f\x2e\xe5\x6f\xac\x5c\x9c\x57\xcc\x58\x72\xbc\x5b\x31\x37\xe1\xab\x02\xb2\x4a\x94\x37\xd5\x9d\x6e\x05\x6b\x0a\x5d\xd1\x9a\xae\x48\xa4\xba\xc8\x29\x84\xb1\x39\xc1\x3e\x8c\x82\xf7\x88\xd2\x84\x0e\xf8\x98\x35\xc3\xf4\x2a\x8c\xb1\x3c\x8d\xe3\x41\x63\x6d\x29\x42\xb1\x3e\xb7\xb4\x66\xb4\x12\xfc\x28\x7c\x96\xa6\x47\x40\xaf\x83\xcf\xe8\xd0\x68\xa1\xd6\x64\x09\xb6\xde\x82\xec\x0d\x3f\x46\x2c\xe1\x52\x74\x9a\x2e\x7b\xf3\xb9\x52\xee\xbf\x8b\x3f\x79\xe4\x75\xb1\xc8\x7f\x77\x09\x2f\x23\xd7\x7f\x92\xd0\xc8\x15\x00\xd2\x4f\xcd\x01\xf4\xc6\xa2\x1e\x74\x6b\x72\x50\xe5\x94\xd9\xa8\x8f\xfd\xfc\xe2\x84\xad\x71\x0a\x71\xa9\xe8\xd4\xf6\xc2\x2f\x05\x48\x1f\xeb\x4f\x39\x5a\x7c\x2d\xc3\x49\x5e\x48\x8b\xa4\x2b\xc1\x12\x6e\xf7\x4b\xfa\xda\x05\x7d\xca\x71\x7d\x09\xbf\x11\xa0\xc3\xfe\x5e\x30\xef\x13\x1d\xfe\xb1\x0f\xfe\xe8\x7b\xfd\x17\xdb\xa5\x3f\xdb\x44\x45\xca\xd6\xdb\x11\xed\x3f\x59\xbb\xff\xa5\xff\xb9\x78\xe6\x0f\x60\xe8\x97\x03\xdf\xa0\x83\xdf\x43\x2d\xaf\xa4\x80\x2d\x9b\x78\xfa\xec\x22\x69\x4d\x1f\x56\x0a\x53\xe5\x7e\x77\x72\x79\xab\x63\xa8\x55\x1b\x2a\x4a\x4f\xeb\x3f\xd2\x53\x35\x0f\x27\x31\x67\xed\xe8\x74\xe9\xa9\xbc\xc2\xfb\xaa\x54\xa6\x78\x91\xe7\x62\x87\xe6\x8a\x45\x09\x01\x54\xa9\xee\xf9\xca\xb1\x90\x13\xcf\x03\x0b\x98\x02\xba\xf1\x9f\x81\xcc\x44\x7d\xef\x18\x44\x4d\x3f\xa3\x9a\x88\x60\x8a\xa0\x44\x68\x09\x1c\xa7\x86\xec\x29\x60\xe5\x73\x0b\xcf\x28\xec\x84\x77\xd2\xcc\xa7\xac\xa0\x31\xf7\xb6\x6c\xc0\xe2\x84\x75\x43\xd3\x20\x2c\xbf\x22\xce\xeb\x9b\x57\xb4\x53\x7d\xfc\x69\xf7\x9f\x04\x6e\xe0\x6c\x1e\x96\xe6\x2d\x79\xdb\xe6\xba\x5f\x5e\xb6\xbe\xf6\xb4\xaf\x11\xbc\xbd\x0b\xe9\x6e\x9d\x5f\x76\xad\x5b\x5a\xf7\x86\x7a\x98\xff\xe5\x6f\xa8\x1c\xd8\xa5\xd9\xfe\xb5\x9f\xb2\x53\x8b\x37\x9d\x92\x9c\xc3\xd6\xb0\xf9\xb2\xf5\xa7\x53\x76\x38\xf0\x6e\xe5\x36\x8d\x60\x78\x91\xfd\x19\xaa\xcc\xb1\xc5\x63\xc0\x7f\x85\x2e\x0c\x36\x0a\x5c\x5c\x56\xc9\x63\x61\xa3\x75\x5c\xab\x5c\xd8\xe7\xd6\x20\x06\x8a\x2d\x94\xda\xe9\x9b\x44\xae\x45\x70\x18\xb3\xd0\xef\xc4\x4c\xb3\xa4\x5e\xc5\x68\x1b\x24\x32\x96\xa1\x96\xfd\xd9\x82\x45\x84\x99\xc7\x08\x91\xd1\xc9\xcb\xe9\xe9\xb1\x88\x7a\x0e\xfe\xf0\x55\xcb\x6d\xc2\x82\x76\xfb\xb4\x8d\x2d\xe9\x98\x20\xaf\x2a\xc8\x3b\x63\x3f\x5d\x7d\x3f\xfd\x84\x98\x42\x5e\xeb\xe5\x2d\xef\xf3\xc2\x26\xd5\x73\xcd\x55\x86\x40\x36\xf4\x1e\x87\x1d\xc5\x66\x86\xad\x3a\x62\xb1\xe4\x21\x76\xb9\xd2\x82\xbe\xea\x5c\x18\x7c\x08\x44\x4f\xcc\x4f\x00\xc5\xbc\xdf\x83\x21\xfd\x69\xf1\x25\x3d\xa1\x59\xf3\xd7\xcf\xf2\x2e\x00\xce\xae\x92\xc7\x86\x07\x1f\xbd\xca\x44\xfb\x19\x3e\x04\xcf\x84\x20\x0b\xff\x57\xc9\xbe\x7c\x8a\x86\x6d\xed\x23\xc2\x7e\x5d\x6d\x46\x82\x72\x25\x86\x6e\xb9\xae\xe1\xd6\xba\xd5\xcc\xbf\xf7\x5b\xc1\xca\x20\xe3\x74\xd2\x54\x93\x15\x9b\x9e\x07\x17\x15\xfb\x4d\x83\x84\x16\xb9\xfb\x07\x82\x67\xda\x31\x76\x8c\x62\x7c\xd6\x38\xfb\x07\xac\x05\xba\xcd\x6a\xec\x90\x02\xd3\x80\x65\x10\xda\x7d\xe6\x50\xa6\x0f\xe5\xe3\xdf\xc3\x50\xc4\x1b\x80\x8e\x24\xcc\xdd\x3d\xb8\x6f\xd0\xe3\xba\x8f\xff\x04\x48\xe8\xe0\x91\xf6\x69\xc3\x3c\x04\xee\x7e\x41\x2f\x18\x63\x65\x54\x0c\x8a\xf8\x01\x5c\x76\x11\x80\xf7\xda\xd3\x47\xb0\x94\x23\xc9\xb5\x23\xb8\x2e\x04\x06\x8c\x99\x4c\xb6\x2a\x36\x73\x04\xe6\x77\x41\x57\x03\xbb\x41\xfd\x67\x14\x00\x7f\xcd\xbd\x1f\xcf\x67\xd8\xb9\xdb\xaa\x10\x23\xa2\xda\xd1\xc2\xec\x61\x7e\xdc\x38\xac\xc9\x69\xef\x3d\x4a\x2d\x12\x58\x93\xe8\xe7\xfd\x49\x3d\x6c\x8f\xa2\x4a\x47\xb5\x51\xfb\x02\x17\x77\x76\x9c\x7a\x30\xb2\x03\x9f\x47\x83\x97\xa3\x33\xcc\xd4\x6d\xab\x18\x98\xbd\xa0\xf8\x12\xf4\xb2\xbe\xa8\xf4\xe9\xad\x26\xd8\x63\xc4\x7f\x1e\xc2\xe0\x07\xb1\xfb\xe0\xa8\xae\x2a\xf0\xfa\xf9\xa9\x26\x94\xd5\xf7\x2e\x46\x21\x34\xef\xd8\x9e\xe4\xc4\x13\xbb\x4a\xb0\x03\xd9\x35\x13\xd9\xba\xfe\x4a\xdc\xa7\xf4\xb0\x02\x88\x91\xf6\x5c\x18\xad\x54\x4c\x03\x6e\x52\x70\x3a\x30\xcc\xef\x43\xf4\xd9\xb0\x90\xbf\x63\x87\xdf\xdb\x8a\xdd\xb1\x79\xdd\x47\x66\x81\xc7\xc9\x3d\x44\xa9\xa5\xf4\xf8\x88\xf0\xf5\x5d\xaf\xb5\x85\x2f\x9d\x0b\x16\x25\x54\x8e\x14\xb0\xee\xb3\x5e\x5e\x6b\xb8\x8d\x1b\x3d\xdb\xae\xf8\x9a\x81\xe1\xff\x78\xb5\x63\x73\x4b\xb7\x71\xdc\x5a\xf6\xd2\x8b\xff\x59\xf7\x9f\xeb\x5b\xbb\xbf\x6d\xf6\xbb\xba\xa9\xe0\xed\x2a\x75\x5c\x9d\x2d\xb0\x8b\xb0\x7c\x88\x9c\x45\xa7\xb5\xcb\xa0\x01\xf4\x14\x42\x38\xe7\x89\xe8\xbf\x4d\x71\xca\x40\x07\x46\x16\x36\x6f\x70\xa3\xe7\x72\xe1\x38\x7c\x0b\x7f\x03\xc5\x96\xe0\x74\x07\x1d\xb6\x02\xdd\xdf\x43\x91\xfb\xbc\xf2\x4c\x3b\x3b\x9b\x08\xde\x2b\xc0\x6e\x47\xae\x1f\xa1\xfd\x95\x22\xc1\x6e\x8d\x8c\x4b\x90\x6a\x25\x70\x66\x05\xd4\x45\xdd\xdd\x85\xd9\x57\x37\x7b\xdd\xef\x74\x55\xa7\x59\xeb\x99\x39\xb6\x16\x9d\x13\xea\x04\xf3\x07\x85\xaa\x63\xd1\xe6\x78\xbe\x48\xb6\xe3\x15\xcf\xdc\xa9\xa1\xeb\x10\xb7\xee\xc7\xe1\x2a\x91\xfb\x1e\x6a\x64\x38\xe1\x92\x59\xeb\xaf\x4e\xe9\xb5\x1c\xc6\x90\x26\xb9\xa9\x6b\x03\x09\x41\xd6\xa0\x59\xb6\x74\x80\x09\x82\x97\xab\x2e\x81\x0b\x99\xf4\xec\x67\x29\xa4\xe1\xc2\x5c\x8a\x2f\xb3\x22\x8e\xc2\x4f\xa7\xf4\xed\x89\xae\x3b\x45\xcc\x61\xfc\xea\xf8\xa3\x8f\x14\xd3\x9e\x18\xcc\x60\xdf\x8b\x48\xb6\x75\xf0\x29\xde\xdf\x6d\xc4\xe0\x01\x52\xa2\xed\x9c\x03\x2f\x99\xc3\x49\x9b\x89\x74\xc9\x92\x11\x7d\x88\x2f\xa9\xbd\x7b\xa1\xb3\x9d\x0c\x30\xec\xbc\x93\xff\x1d\xfe\x6d\xf2\xf3\x15\x06\xd3\x77\x92\xb6\x7a\xbb\xf8\x23\x61\xdf\xc1\xb3\x3c\xad\x4c\x78\xfa\x17\x65\x4f\xa0\x7d\xf0\x7f\xae\x3d\xf7\xac\x7e\xc9\x90\x08\x9f\xa6\x55\x2d\xe5\x34\x56\xda\xec\x8f\xa8\x32\xfc\xad\x01\x18\xf4\xc7\x0f\x65\x3d\x26\x7d\x84\x5e\xcc\xe1\x7a\xaa\x7d\x21\xba\xf0\x3b\x0a\xa5\x2f\xa1\xc3\x6f\xe6\x56\xfe\xd3\xa9\x53\xf7\xf1\xb6\x8d\x7f\x39\x30\x7d\x16\xe4\x8c\xa1\x4b\x62\x6d\xfc\xae\x04\x0e\x7f\xf6\x61\xef\x12\x61\x29\xef\x52\x84\xe5\xbd\xcb\x3e\x7a\x35\xf2\x0e\x35\x35\x53\xb9\xc8\xff\xbd\xbf\x0b\xbb\x19\xf7\xa4\x0e\xc7\x95\x6d\x4a\x79\xa9\x8d\xd1\x52\xfd\xec\x2b\x69\xbe\x7b\x34\xe9\x56\xe3\xc2\xca\x0d\xf0\x2e\x7c\xb8\xea\x1f\x45\x34\xeb\x5b\x6e\x66\xef\x39\xe6\x82\xb7\x17\x1d\xa4\x81\x79\x6e\xe0\x8f\xd8\x9c\x2f\xba\x29\xbf\x5b\xb3\xc5\xe5\xe0\xd4\x87\xc7\xa2\xfe\xce\x33\xfd\x3d\xd1\x21\x61\x27\x05\xb3\xb1\x97\xd4\xef\x74\x9f\x04\xc7\xde\x49\x3f\xb5\xfd\x30\x11\x4d\x90\xe6\xfd\x67\xe4\x37\x35\xeb\x97\x37\x18\x47\x98\x3e\x5f\xa7\x83\x30\xaf\x02\x00\xf1\xe1\x19\xff\x92\x91\xbf\x98\x14\xd8\xe2\xb5\x9c\x1e\x7f\xf5\xba\x75\x85\x01\x6b\x8f\xce\x4a\xdc\x87\xac\x5c\xfa\xfd\xa5\x35\xa2\xc8\x5e\x20\xf4\x49\x70\xbe\x47\xc3\xe5\x67\xf2\x17\xf7\x25\x7b\x8f\x47\xb4\x54\x40\xcf\xf1\xcf\x88\xf6\x4a\xfd\xfc\x43\x46\x3e\x8a\x3a\x36\x76\xcd\xd9\xd7\xe5\xaa\x77\xa0\x03\x5f\x32\x04\xaa\x0a\xc0\xbb\x9e\x7b\x34\xd9\x6e\x16\x80\xf7\xd9\x18\xff\xf6\x86\x36\x96\x7c\x08\x6f\x64\x3a\x81\xb7\xba\xc4\x0e\xf6\x73\xc5\x15\xe4\x0f\x8f\x34\xec\x25\x38\xcf\xe1\xdd\x97\xf7\xa4\xd4\x84\xfb\xa7\x69\xb7\xa9\x43\xcd\x94\x34\x8b\x1a\xdc\x11\xf0\x8f\x7e\xc2\x3b\xf9\xcf\x7c\xd5\x0f\x31\x19\x34\x51\x98\x05\x50\x83\x08\x6c\xfb\x3d\x7a\xde\xdd\x55\x84\xd6\xdb\xa6\x8c\x20\xa1\x7a\x1a\xe2\xe3\xff\x45\x02\xa2\x8f\x4a\x19\xd4\x09\xb8\x4b\x58\x66\x7b\x6f\xe4\xad\xc5\x1c\x6e\xb6\x4b\x74\x45\xd0\xe2\x6b\xd1\x89\xd9\x36\x9f\x0c\xfe\xbf\x73\x6e\xaf\x02\x1f\x0f\xa9\xdf\xed\xcc\x91\x91\xa8\x07\x22\x06\xe5\x77\xe6\x91\x02\xd2\xab\xb8\xe3\x20\x57\x1f\x9b\xa0\x6f\xa6\xce\xd8\xc8\xc1\x98\xab\x50\xf3\x79\x15\x09\x1f\x6d\x30\x4b\xc0\x68\x43\x55\xcd\x17\x53\xd1\x31\x5a\x55\x5f\x5f\x2b\x52\x8d\xa3\x8b\x62\x55\xa4\xfa\x19\x8b\x6d\x6b\x14\xaa\x38\x53\x93\x75\xfd\xfa\x23\x54\x03\xf8\xeb\x4c\x67\x51\x03\xa4\xa8\x27\x32\x13\x49\xf3\xd0\x44\xde\xe3\xea\x48\xf5\x4e\xa0\x0b\x61\x8f\x25\xe7\x18\xa1\x89\x10\x5d\x94\x68\x6c\xd8\x26\x26\xf9\xba\x64\x7b\xd9\xa7\xa2\x08\x6c\x28\x82\xed\xe5\x84\x10\x3e\x3d\x3b\xc9\xc9\x0e\xbf\x87\x89\x2f\x9c\x83\xeb\x67\x9b\x1b\x09\x9f\xae\xcc\x7d\xe0\x25\x05\xe3\xd5\x83\x07\x4f\x36\x78\x3f\x27\x22\x5a\xe8\xca\xac\xd4\xb8\x79\xe7\xa5\x7d\x3f\x60\x16\xc8\x75\xcf\x3b\x2f\x41\x7d\x9e\xd6\x1f\x95\xb9\x87\x6c\x51\xad\x1a\xef\xd4\x3c\xd8\x4e\x81\xda\x06\x43\x79\xe6\x80\xd5\x3c\x39\x0f\xda\xb6\x4e\x7e\x96\x26\xa6\x39\x7c\xda\x27\x3b\x3f\x32\x02\x3a\xeb\x3d\xb7\x0c\x92\xb3\x16\x3a\xd9\x9f\x55\xb5\xdd\x59\x07\xb1\x64\x4f\xfe\x06\xde\x34\x51\xb5\xc6\x71\x56\x83\x65\x6b\x68\x99\xbb\xe3\xaa\x99\x79\x96\xa9\xbd\xc0\xb4\x86\x55\x22\xe8\xee\x3a\x3e\x5c\xaa\x9f\xf4\x7f\x67\x65\x0c\x9c\x9a\x7c\xcf\x39\x30\x70\xbf\xf6\xfd\x5c\xe6\xff\xc8\xd7\x91\x82\x1b\x1d\xe2\x7f\x3a\x24\x72\x34\x6a\x07\x88\xb0\x38\xb2\x32\xce\xac\x66\xcc\x30\xb8\x5b\xdc\x09\x7a\x68\xf4\x54\xfc\xf9\x29\x90\xc1\xf3\xb7\x7d\x85\xc8\x5a\x1d\x20\x5b\x84\xe3\x93\xa7\x39\x6c\x00\x86\x30\x14\x86\x25\x0d\x8b\x0d\x24\xf1\x3b\x6b\xda\xf6\xa6\xcc\x83\xb0\xb5\x2e\xab\x53\x62\x27\xcb\x2a\xe6\xb5\x5d\xd6\xfc\x1f\x1c\x6b\x70\xf1\xee\xc2\x66\x4c\xf1\x74\x5a\xb6\x1f\x40\xf9\x6d\xa3\xc4\x63\x1f\xd9\x10\x70\x1c\x92\x52\xf9\x50\x7c\xf6\x81\xc6\x5e\x38\xdc\x1c\x45\xd0\x04\xb8\xdc\x9e\x7e\xb9\xfe\x3a\x30\x44\xd7\x0f\xae\xcc\x08\xb5\x14\xed\x7b\xde\x9b\xfc\xf9\xd0\xa4\x7a\xd8\x5e\x82\x1e\x02\xd3\xbc\xea\xc7\x07\x03\xdc\x93\x36\x41\x35\x95\xff\x00\x3b\x08\x87\x36\x06\x87\x78\xc2\x36\xe3\xac\x6a\xaa\x20\xe3\x82\x1d\x99\xc3\x4e\x76\x19\x6d\x00\xcc\xd8\xf8\x50\x0f\xab\xe0\x59\x18\x05\x63\x39\xc9\x6d\x2c\x64\xf8\x16\xc5\xaf\x17\x85\xe8\x95\xc3\xe7\xab\xd2\x4f\x3a\x21\x0a\xfc\xcf\x1c\xd5\xd8\x3f\xda\xda\xef\xa5\x39\x42\xed\x10\x4a\x71\xd3\xba\xcf\x2f\xb5\x4f\xfa\x30\xb1\xda\xf3\x5b\x69\x8e\x3b\x0c\x55\x33\xf9\xd9\x4c\xc9\x4c\xe1\x14\x80\xf6\x00\x5b\x21\x60\xbb\xac\x42\xc9\x91\xa2\x21\xf3\xc2\x72\x06\x1b\xdb\x8a\x81\x70\x4a\xb4\xcf\x1a\x59\x99\x56\x83\xfc\x93\x50\x31\x36\xc8\xbe\x68\x21\xda\xe6\xf3\xcb\x9e\x48\x58\x1c\xa2\x65\x7e\xa8\x3b\x87\xac\x4c\xb6\x47\x82\xa8\x3c\x47\x2e\x65\xbb\x6e\x3c\x43\x93\x0a\x7d\xe9\xca\xb6\xbc\xd6\x31\xba\x22\xb3\x6e\x6a\xde\xa2\x5b\x9e\xd8\x39\x49\x9c\x9a\xb7\x7e\xb7\x95\x5c\xbd\x91\x14\xad\x0e\x00\xd6\x62\x5e\xc3\xa4\x5e\x83\x55\xa9\x66\xe8\x64\x34\x9f\x82\xbe\xbc\xc3\x8a\x80\xf4\x32\x78\x12\x20\xb0\xb4\x7f\x44\x9f\x43\x04\xec\xef\x57\xac\x33\x1b\xf2\xf0\xe1\x37\x93\x44\xc6\xd8\xd2\x1d\x29\x8b\x5b\xba\xc3\x5c\xe0\xbc\x3f\x7c\xfe\x19\xe6\x8f\xe7\xde\xfa\x19\x6b\xcb\xe6\xac\xed\xc3\x42\x8b\xcd\xd6\x86\x7e\xeb\x2d\x4d\x6c\x9e\x3c\x9f\x91\x32\xe2\x25\x98\x0f\x38\xcb\xac\x9f\x9b\xe5\xc7\xfa\x51\xeb\x82\x9b\x34\x26\x78\x66\x79\x69\xac\xd5\xfd\xe1\xfd\x09\xe5\x97\xa6\x69\xc3\xd4\x62\x31\x6e\x71\xbd\x3b\xfa\x16\x14\x6d\x21\xd7\x99\x32\x21\x47\xb5\x3b\xe6\xdb\x7b\x1b\xc2\x07\xe8\x3b\x90\xc3\x8d\x18\x19\x7a\x47\xe6\x5f\x0e\xeb\x9a\x0f\x86\x85\xe6\xed\x9f\xb8\x4e\xa5\x7a\x92\xef\xb4\x2a\xc7\x0d\x56\x6b\x77\x50\xbb\x47\xde\xb2\x0f\x91\xf6\x69\x55\xf7\x29\x75\x06\x0c\xa3\xd8\x7f\x10\xe8\x5f\xd5\x25\xb0\xf3\xea\x4b\xdb\x83\xbc\xd3\xd8\x20\xcb\x44\xc2\x47\x3a\x06\x1b\x3c\x41\x88\xa2\x3c\xce\x9b\x6d\x96\xa5\x5c\x6a\x23\x9c\x7b\xec\x82\x96\x72\xfc\xb0\xf5\xc5\x66\x98\xde\x7d\x3a\xc5\x4d\x5a\x0a\xa3\x0b\x96\xd2\xcb\x56\x61\x4f\xd5\x69\xe7\x9b\xcc\x15\xec\x9b\xd7\xd2\xb6\x00\x84\x54\x0b\x22\x87\x4d\xc2\xfd\xdd\x22\xfc\x7b\x99\xcf\x8b\x0d\x89\x88\xf2\x98\x57\x36\xe0\x96\x19\x69\xd7\x97\x7f\xb6\xdc\xd8\x6b\x4b\xfe\xc7\x83\x72\xe2\xc1\xb9\x59\x4b\xaa\xdf\x35\xa9\x1d\x84\x14\x0a\x35\x0d\x26\xc7\x15\xfe\x0a\x09\x7f\x7f\x73\x00\xf6\x7f\x82\xe5\x61\xa6\x20\x5d\x0f\x5a\x03\xec\xe7\xfc\xa5\xf0\xf7\xfe\xb3\x2a\x0b\x34\x35\x7e\x15\x70\x57\x58\xac\x46\xc3\x90\x15\xa2\xbf\x19\x9e\xb7\xde\x2d\x99\x95\xc0\x8b\x0e\xc1\xb7\xc1\xe4\xb6\x73\x0e\x63\xca\x2c\x3f\x85\x77\x18\x2d\x54\x42\xe1\x45\x26\xf4\x13\x13\xae\xa6\x12\x5c\x50\x77\xae\x92\x13\xe5\x7a\x5f\xfd\xea\x82\xc8\x29\xa8\xa4\x8d\xa3\x2e\x4e\x7b\x9c\xc1\x36\x08\xa6\xfa\x2c\x83\xbe\x09\x41\x48\x20\x35\xfb\x78\x1c\x91\x4e\x26\x16\x8f\x55\x6c\x82\xfc\x7f\x81\x1f\x62\x73\xb9\xc9\xe5\x45\x35\x86\x53\xd3\xc9\x2b\xa7\x08\x91\xcf\x61\x32\x49\x16\x49\x79\x4a\x41\x37\xe6\x88\x03\xe2\xf8\xaf\x55\x28\x3d\x01\xc9\x29\xfd\x96\x15\x99\xa0\xd9\x34\x2f\x0d\x82\x13\xaa\x22\xeb\x24\x9d\x7f\x95\xc7\x21\x74\x12\xde\x02\x08\x07\xcc\x66\x06\xb1\x06\x6f\x76\x6a\xba\xd0\xb1\x1e\xb7\x22\x3c\xec\x57\xe1\x6a\x45\xa6\x7b\x04\x88\xe8\x64\x1d\x6d\xa3\x75\x2a\x40\xc4\xa6\x2d\x5d\xda\xe3\x49\xdd\xc7\x78\x8a\xce\x8c\xff\xbf\xd2\xf4\x01\x84\x77\xfa\x67\x16\xf5\x52\x6c\xf0\xfb\xe1\xc7\x88\xbd\x03\xfc\xef\xbc\x98\x31\xcc\x52\xc6\xb9\x59\xb0\x8d\x40\xfc\x14\x8b\x1a\x1f\x69\xcc\xe1\xb4\x3b\x46\x7c\xca\x98\xa4\x4c\x19\x45\x8c\x97\x42\x6c\x4c\x57\x2f\xae\x3b\xef\xb5\x4f\xe3\x9f\x21\xc9\x3a\x15\x79\xaa\xea\xe3\xd8\x30\xb1\xb3\xe1\x05\x17\x19\xd9\xcf\xca\x1e\x3b\xa8\xf0\xd8\xcc\x3b\x66\x8e\x2d\xb6\xa3\xfb\x27\xaf\xe9\xa7\xe6\x17\xf4\x0a\xf4\xbd\x3e\xb1\x55\xa3\x2b\x84\x7d\xaf\xea\xbc\xdb\xf9\xc5\x68\x55\x59\xa1\x3f\x23\x33\xf2\xe9\x20\xbf\x23\xf3\xab\xfb\x85\xf1\xe3\x3d\x3d\xa0\x55\x0d\x07\x32\xc6\xf9\x3a\x98\x5b\x05\xfb\xaf\x8d\x5e\xaf\x8e\x40\x58\xe6\x3f\xc3\x99\xf3\x95\x65\x09\x51\xd7\x3e\x02\x59\x5b\x0c\x89\x38\xed\x4f\xc4\xe6\xba\xa2\xd3\xff\xe8\xf3\xa6\xee\x47\xf2\xfa\x5b\x94\xb7\xd0\xe7\xe0\x94\xf7\x9b\xb1\xa5\x67\x27\x19\xe5\xb6\x53\xee\xed\xe7\x72\xc2\x19\xb3\x4b\xf8\x39\x40\x20\x12\x1f\x7f\x86\x73\x9a\x23\xe8\xad\x08\xb2\x02\x45\x0f\x1b\x04\x22\x23\xb1\x84\x3f\xf5\x50\x01\x93\xcb\xac\x13\x8e\xe0\x5e\x25\x0c\x60\x40\x25\x0e\xe0\x1d\x40\x1f\xd0\xf2\x4f\x13\xa0\xe3\x94\x36\x44\x59\x03\x1a\xd7\xcd\x9d\xce\x30\x87\xd8\x71\x98\x49\x55\x05\xdb\xfe\x55\x34\x58\x3b\x3a\x1e\xd8\xd8\xbe\x30\x1f\x34\x55\xdb\x90\xaf\x26\xac\xb1\xf1\x94\x28\x4d\xb0\xa9\x4d\x9b\x60\x74\x05\xce\x20\xeb\xc7\x73\x56\x66\x23\x7b\xd4\xab\x18\x63\x43\x0e\xf6\x0b\xae\x58\xc5\xad\x00\xa8\x4a\x8c\x7b\x75\x81\x0e\x5b\x08\x3b\x0f\x70\xfb\xe4\x20\xd1\xd8\xbe\x07\x37\xbc\xb5\xfc\x70\x37\x91\xfe\x72\x75\x33\x47\x59\x0d\x05\x47\xf8\xc5\xc7\x3f\x93\x06\x81\xd1\x97\xa5\x81\x4f\x76\x72\x28\xe9\xd5\xcf\xbf\x5f\x38\x6b\x59\x45\x2b\xef\x6d\xa5\xd0\x6b\x52\x0e\xec\x21\xb0\x0e\xb2\x3b\xe7\xfe\x9f\xf7\x33\x6d\xa4\x6c\x62\x4b\x47\x98\x7c\xf2\x75\x87\x0f\x4e\x94\x16\x00\x99\xf7\x97\x69\x71\xad\x91\xf0\x0a\xd4\x63\xd0\x98\x01\xf4\xbd\x73\x64\x57\xf1\xf0\x85\xfd\x20\x4a\x65\x8e\xf4\x95\x75\xfe\x4f\xc0\xfc\xdc\x93\xbc\xd4\x7d\x02\xe4\xea\xdc\xc7\x89\xa3\x73\x20\x1c\x7f\xf6\x0a\x2f\xea\xaa\x52\x7f\x30\x10\x2e\x9f\xc5\x6f\xeb\x6a\xaf\xc8\xd5\xda\x2f\x42\xe0\x60\x0f\x1a\x8c\xfb\x46\xb3\xaf\x9d\x98\xf2\x44\x90\x80\xb8\x2f\xd0\x4e\x11\x56\xaf\xdb\xa5\x20\xd8\x1a\x5e\x18\x60\xe1\x0b\x69\x2f\xc3\xee\x29\x43\xdd\x99\x7f\x56\x1a\x08\xa2\x26\x8c\xec\x55\x5f\xf3\x15\xdd\x25\x6b\xeb\x68\x66\x08\xe6\xbc\x50\x71\x89\x85\xc0\xa0\x57\x77\xc0\x16\x4e\xc6\xc5\xf2\x97\x74\x8f\x8b\x1c\x98\x3c\xeb\x98\x0e\x19\x2a\x84\xca\xd7\x26\xd8\x05\xe4\xbe\x3f\x02\x83\xdf\xc3\x4e\xc8\x2e\x8d\x3e\x0b\x8c\xea\xa8\xb8\xbc\xd6\xcc\xbf\xec\xe2\xf2\x7f\x85\xfd\x9f\xc2\x67\xf3\x15\x18\x91\x9d\x16\x81\x90\x75\x79\x87\xeb\x0b\x47\x20\x05\xd2\x4e\xd5\x76\x07\x5f\x19\x48\x77\xad\x4c\xee\x5a\x23\x11\x76\x59\x00\xe4\x65\x40\xfa\x11\x0f\x1f\xd3\xbf\x50\xf8\x8b\x86\xa1\xd7\x2e\xa3\x0d\xdb\x3c\xae\x82\xd9\x85\x52\x76\x89\x6c\x63\x40\xec\xe7\x9f\x72\x61\x58\x93\x76\x30\x3f\x91\x17\xa9\xef\x67\xf1\x8d\x8b\x3c\x96\xce\x6f\xf5\x03\x89\xa1\x6a\x3f\x01\xca\x58\xb9\xd7\x4f\x8d\x83\x98\xc4\x26\xc0\xa9\xea\x13\x55\xda\x60\x9d\x10\x68\x78\xe0\xdd\x87\x82\x66\xd7\xa7\x90\x74\xcd\xcb\xb6\x28\xd3\xeb\x94\x5e\xa4\x0e\x6d\xb9\xed\x28\x5d\x88\x12\xe5\x83\x70\xb5\x4a\x80\x4b\x7d\xec\x14\x55\x1b\x7d\x3f\xa3\x2c\x66\xbe\x1f\x5f\x74\xbf\x25\x2c\x42\xbf\xf2\xa2\xfe\x2a\x47\xff\x0b\x76\x22\xf9\x37\xdf\x84\x8e\xe0\x45\x52\xff\xca\x5b\x56\x40\x80\x78\xde\x27\x27\xb2\x32\xde\x9f\x20\xde\x97\xfa\x6f\x42\x0c\x77\x0b\x70\xd0\x68\xc1\xb4\x57\xcb\x85\x59\x7f\x38\xb7\x7b\x13\xbf\xdd\x8b\x3f\x28\x19\xef\xd9\xb6\x6b\x73\x83\xa7\x57\xf9\x33\x39\x8d\x9f\x40\xb1\x0f\xe3\x75\xb3\x80\xf2\x0c\x8e\xb7\x3c\x32\x82\xad\xf9\x0e\xa4\xf7\xdd\x02\xe6\x41\xf3\xc7\x55\xe5\x43\xab\x00\xdd\x90\xfc\xed\x3e\x49\x02\x93\xc2\xf9\x94\x4d\xd0\xb7\xb8\x3a\xef\xe6\x25\x0e\xbc\x3a\xd3\xa4\xfa\x25\x3c\xff\x45\x53\x3d\x41\xcf\x8f\xf0\xc0\x20\xd8\x7c\x6e\xb9\xae\xe2\xe5\x7f\x9b\x0d\x06\xad\xda\xec\xdb\x84\x59\x4f\xb0\xfb\x41\x71\x90\x13\xea\xfe\x6c\xc6\x1b\x76\x4e\xd2\x22\x51\xd0\xf3\x5a\xf2\xc8\xbf\x7a\xf1\x94\x00\x3d\x9f\xa4\x63\xd5\x0f\xda\x54\x8e\x41\xff\x5c\x54\x00\x51\x73\x25\xbc\xdb\xf2\x79\x8b\xa2\x21\x17\x3d\xf0\xfb\x15\x95\x6b\x8b\xc2\x90\xc2\x2a\x75\x8f\x7c\xdb\x13\x27\xb2\x54\x03\x05\x01\x38\xcd\x28\x55\xdb\x13\x67\xfc\xfb\x91\xb1\xab\xee\x74\xa1\xb0\x19\x46\x52\xdc\xd3\xec\xf3\xac\xbd\x24\xda\xed\x62\xfc\x9f\xaa\xac\x00\x6b\xfb\xbd\x1e\x9c\xe0\x03\x5a\xc3\x5c\x88\x63\x95\xb1\x44\xc5\x4f\xa3\xb8\xda\xba\xe9\xa3\x5c\xb4\x09\xbc\x6e\x66\xba\xd9\x89\xf0\xb5\x73\x8c\xc2\x61\x60\xd5\xd7\x3e\x37\x76\x7f\x66\x2e\x84\xfd\x77\x9c\x5a\x5b\xf0\xf5\xcb\x29\x9e\x01\x86\x42\x8c\xa1\x1c\x8a\x0c\x11\xfb\x63\xbc\x06\xae\x09\x58\x81\x08\xa5\xd8\x68\xd3\xbf\x86\x23\x04\x19\x0e\xd0\x4f\x0d\xba\xb7\x5f\xe4\x63\x0f\xe2\xff\x2d\x18\x3c\xed\x11\x59\x03\x84\x2e\x6c\x5b\xd9\xff\xbc\x8e\xd0\x24\x34\xcc\xcb\x46\xb4\xee\x88\x98\x54\x24\xfa\x16\x81\xdb\x35\x2c\xcd\xae\x88\xb9\xe9\xbc\x33\x71\x6d\x14\x92\x03\xbd\xfe\x79\xbd\xda\xbb\x96\x56\xc0\x2d\xb8\x09\x5a\x6f\xad\xf1\x76\x39\xf6\x01\xbf\x0c\x3e\xdc\x7e\x35\x92\x4e\x80\x5c\xf1\x73\x4e\xed\x5d\xd9\x59\x76\xb4\x10\x9c\x73\xf6\x2f\xf5\xc7\xd4\x87\x2a\x57\x78\xb2\xc0\x23\x1b\xa4\x27\x27\xae\x2b\x87\xbf\x46\x5e\xc9\x47\xb0\x32\x49\xed\x25\xac\x0b\xfa\x0f\x39\x49\xf1\x67\x73\x33\x1b\x16\xfd\x0c\x63\xd4\x5b\x3c\xfa\x3e\xbe\xf0\xde\x0d\xb2\x05\x2e\xef\xf3\x2f\x51\x78\xcd\xf5\xda\x7f\x68\xb4\x26\x18\xea\x00\x38\xc8\x9a\x70\x41\xed\x6f\xcc\x7e\xa2\xb7\xf0\x81\xdd\x83\xc4\x7f\xab\x2d\xee\x96\xb0\x67\xe4\xee\x0c\xe4\x63\x5d\x90\xe7\x25\x0d\x44\xd9\xb1\x61\xa8\x50\x82\x6d\x00\x52\xa4\x06\x59\x8a\x04\x7b\x4b\x74\xf5\xf0\x2b\xe0\xac\xd2\x10\x9e\xe0\xbd\x6f\x3e\x10\xec\x8c\xad\x81\x43\xca\xb5\xf9\x91\x20\x42\x56\x2c\xfc\xf2\xbe\xc5\xf3\xaf\x31\xb8\x19\xb4\x0f\xca\x3b\x23\x7e\x0a\x08\x0c\x61\x9d\x77\x96\x33\x2c\x69\xc9\xe2\x43\xd4\x31\x51\x6f\x34\x5a\xf9\xa2\x1d\x77\x60\x11\x32\x7c\x36\x97\x59\xd2\x5c\x4e\xb1\x90\xcf\x9f\x34\xf3\x6c\x64\x46\x0f\xfe\xe1\x20\xd4\x6d\x27\xfd\xad\xf8\x1a\x19\xbe\x03\xee\x40\x92\x5b\x76\x39\x70\x17\x96\xc2\x67\x56\xf7\x04\x97\xab\x5f\x46\x1c\x0f\x9a\x94\xcf\x3c\x3c\x33\xcb\x36\x00\xe1\x44\x47\xcf\xf8\x3f\x29\xfc\x6b\x9b\x46\x07\xd0\xaa\x9f\x41\x36\x70\x7a\xdd\x71\x16\xdf\xd5\xe3\xa8\x73\x9c\xc3\x30\xb9\x1d\x46\xc6\x8b\xda\x25\xfa\xe1\xa2\x73\x20\x8b\xcb\x8f\x21\x47\x3c\x5a\x6a\x0e\x54\x01\x4e\x94\x70\x16\x7d\x99\xcf\x96\x28\x34\x33\xf7\xf4\x80\xd1\x8c\x70\xe9\xff\xc4\xea\x47\x3a\x0b\x07\x32\x86\x07\x67\xec\x52\x22\x32\x4f\xd0\x83\x66\xbb\x04\xaa\x8b\x3b\xd8\x4e\xb2\xc8\xb3\x01\x17\x95\xc3\x23\x4e\xfc\x21\xb9\xfd\xdf\x9f\xab\xf0\x91\x15\xd7\x73\xe4\x2e\x14\x08\x89\x14\xff\x77\xe4\x61\x61\x38\xd3\x0e\xf7\x25\xb1\xf9\xa1\x12\x55\x6e\xd2\x07\x30\x4b\x3e\xeb\xea\x85\x2e\x62\xe8\x42\x05\xfa\xaf\x36\x8b\x93\xbc\xf0\x56\xb8\x75\x64\xde\x06\x6a\x59\xe6\x68\xe1\x97\x69\x73\xa0\xe4\x04\x47\x1a\x93\xc4\x0a\xd1\x06\xb0\xad\x31\x0f\x52\x19\x15\xa8\xe7\x5c\x71\x80\xeb\xdc\x41\xf2\x75\x56\xe9\x6a\x8b\x0e\x4d\x4e\xb8\x4f\x2a\x70\xf8\xb8\x09\x74\xe0\x54\xe6\x37\xbb\x7f\x04\x96\xec\xa4\x83\xef\x25\x20\xd5\x3d\x30\xea\xdd\x56\x2d\xc5\x40\xf3\x02\x86\xf5\xc9\xac\x54\x15\x2b\x83\xd8\x84\xdd\xb3\x56\x4d\x9a\xf6\xca\x24\xc5\x5b\x5e\x04\xd9\xd9\x7e\xdf\x43\xba\xe1\xba\x04\xd1\xcd\xdb\x66\x1c\xd0\x3a\xd5\x06\xb6\xf3\xcd\x36\x79\xf9\xb8\xdc\x04\xba\x43\x35\x6b\x55\x36\x97\x6c\x80\x99\x9d\x40\xdf\x79\xd7\x9e\xce\x15\x07\x6b\x03\xcc\x19\x54\xb6\xcd\x7f\x40\x43\x03\x30\x8f\x7c\x24\x60\xe7\x6b\x28\x0f\xa6\x97\x40\x73\x44\x1e\x3d\x88\xa7\xf3\x61\xf6\x70\xc2\x33\x4e\xb5\xa6\x3a\x61\x8a\xd8\xa2\x2c\x18\xdd\xb6\x5e\x02\xca\xdf\x32\x23\xe9\x8f\x29\x49\xd8\x81\xd4\x25\x78\x75\x73\xe8\x19\x16\x3e\x04\x3a\x44\x44\xdd\x6a\x69\x3e\x70\x9c\x77\xd1\x71\xa1\x2d\xb0\x47\xf2\x7e\x8f\x48\xe5\xcb\x3e\x59\x46\xd2\xfb\x77\x17\x20\xaf\x1d\x26\x00\x77\x31\xa6\xa4\x31\x10\x8c\xaf\x20\x45\x84\x4b\xcf\xf2\x79\x49\xe7\xcf\x1a\x45\xe1\xd9\xd8\xaf\x13\x1f\x57\x52\xd3\x1e\xe6\x9c\xf0\x3c\x39\x53\x66\x22\xf4\xc1\x98\xa4\x9d\x7b\xeb\xf5\x67\xa2\xbb\xd6\x5e\x1a\xa0\xb5\xf4\xb3\xba\xf2\x6b\xe1\xea\x42\x66\x7e\xb8\xf5\x8a\xf8\x3f\xcb\x07\x6c\x9d\x25\xd8\x07\xb2\x44\xe6\x3b\xa6\xcc\x60\xba\xc7\xa4\xb9\x02\x56\x1c\x5a\xf0\xb4\x20\xb2\x15\x4c\x43\x31\xf4\xbf\x04\xcf\xf7\x24\x40\x7d\xfe\x07\x45\x8f\xc2\xc8\x26\xd6\xdc\x55\x71\xee\xbf\x11\xaf\x14\xbc\xe2\x35\x85\x8f\xb0\x4e\x69\x06\x2a\x91\x2d\x8f\x71\x7b\x41\x7c\xd5\x1c\x48\x61\x03\xe8\x29\xc0\xf7\x63\xca\x8d\xd5\xaf\x3b\xb7\x23\x12\x9a\x20\xca\x78\x00\x6d\x77\x13\xea\xa5\xb6\xdc\x69\xdb\xa0\x6d\x51\xcd\x2e\x70\x21\x0f\x28\x9d\x77\x17\xd5\xb1\x0b\x1c\xb4\x79\x94\xab\x4a\x84\x35\x81\xc0\x42\x08\x7d\x0e\x6f\x15\xef\x72\x2f\x3b\xd2\x8f\x7f\x9a\x87\x05\x7d\x42\xbf\x03\x70\x44\x84\x58\xdf\x21\x5c\x5a\xe6\xe6\x32\x0c\x40\xbd\x85\x5c\x87\x9d\x71\x8b\x84\x5d\x66\x2d\xc0\x70\x68\x09\x13\x62\x51\x05\x90\xad\xda\xa2\xac\x27\xde\x92\xce\x20\xe5\xdb\x2e\x2f\x18\xf9\xdb\x41\xc4\x7d\x8b\x34\xec\xc4\x8d\xc1\x22\xed\xc3\x92\xec\xc5\x83\xb5\xaf\x78\xed\xc4\x4f\xfc\xa7\x44\x92\xbf\x1a\xe4\x91\xbf\xf2\xd9\x79\x0c\xe0\xf7\x2f\xd2\xd7\xfe\xb2\xd2\xdb\xe8\x78\x3c\x13\xaf\x77\x8f\x62\x3f\x7f\x6c\x96\xc8\x6c\x96\xfe\x25\x7b\x00\x6e\xb2\x69\x87\x27\x08\x4c\x48\xfd\x06\x08\x3a\x75\x16\xcf\x07\x4f\x19\xf8\x96\x29\x34\x7a\xfa\xfc\x8c\x62\xea\x26\xd6\xc5\x54\x7c\xb1\x98\x6c\xab\x21\x50\x7d\x4f\x93\x5f\x62\xee\x0b\xf3\xc3\xdc\x87\x50\xc1\x60\xdc\xec\x60\x56\x55\xb1\x30\x51\xa6\x23\xcc\x1d\x64\xc8\x69\x8e\x08\xe4\xa9\xe5\xb6\x4d\xe1\x12\x36\xcd\x71\x57\x17\x59\xdb\x2f\xf1\xf3\xf9\x0f\xb3\xdc\x76\x1c\x92\x0f\xbe\xfe\x16\x24\x78\x26\xf2\x5c\xbf\xa1\x1e\x1b\x2c\x4d\x8e\xbc\x37\x60\x1f\x4b\x74\x7e\xed\x87\xa7\x90\x06\xe8\x62\x9a\x52\x15\x65\x3e\x44\x31\x13\xe8\x72\x57\x80\xf1\x77\x1b\x5d\x53\x3b\x47\x01\xd8\x7f\x22\xc2\xea\x94\xa7\xc9\x94\x44\xe1\xdf\x24\x9e\x9e\xd2\x38\xaa\x09\x02\x97\x46\xc9\x61\xa6\x14\xb3\xc4\x94\x06\x7d\x7a\x6c\x64\x17\x2e\xae\x63\x0d\x03\x5e\x8c\x66\xec\x5c\x69\x48\x73\x4f\xad\x95\xe4\x52\xff\xdc\xd8\xfc\x22\xe4\xa3\xb9\x4a\xd0\xbe\x82\xa3\x44\xe2\xbe\x5d\x75\xc4\xef\x73\xeb\xde\x38\x9b\x1f\x47\x06\xf7\x8e\xa5\x85\x68\x8f\x56\x60\x94\xa0\xe1\x17\xa4\xc1\x73\x40\xc6\xdc\x08\xba\x90\xe6\x53\x97\x6d\xf0\x39\xf3\x7a\xa9\x10\xf0\x8c\xf0\x91\xfd\x82\x79\x5d\xf4\x1d\x8e\xb3\xe2\xc9\x67\x24\x25\x73\x8f\x0b\x7b\x67\xf6\x03\x9c\x66\xe0\x6f\x35\x5f\xd1\x0f\x38\x15\xef\x3a\x26\xc1\x6b\x28\x25\x55\x38\x8e\x58\x13\x06\x82\xfe\x6e\xe8\xcc\x07\x62\xfe\xfc\x70\xb4\xce\xea\x19\xfc\x93\xd4\x3a\xda\x05\x4c\xc3\x9b\x33\x12\x44\x9f\x60\xa2\x13\x40\xaf\xea\x49\x3c\x4f\x6f\x75\x3f\x4f\xb5\x19\x40\x00\x8f\xec\x7a\xf8\xe2\xf0\x24\x8e\x80\x98\xe7\x1a\xad\x60\x1b\x69\xf3\xf0\xa4\x3c\xfb\x39\x87\x55\xcd\xb3\x31\xc7\x9e\x12\x2b\x38\xb5\xdf\xbf\x23\x1b\xf6\x22\xf9\xd3\x2f\xa7\x29\xaf\x6d\x11\x90\x67\x74\xfa\xe6\x25\x9c\x73\xe4\x2d\x9d\xe6\x80\xf9\xf3\x9b\xb4\x7c\x4f\x8a\x21\x5c\x7f\x7b\xa9\xed\x80\xfc\xda\xb0\x04\xec\xd1\x9e\x64\xf7\xe7\x8e\xc8\xfd\x70\xf2\xed\xb8\x37\xe9\xc3\xc1\xfa\xdc\x97\xad\xf3\xa1\x0b\x8b\x91\x0f\xe8\x5e\x34\xf6\x2a\xa4\x1d\x0f\xf5\xfa\x41\xc2\x1e\x43\xce\xbd\xb7\x0d\xcb\x11\x3c\xff\xa7\x20\xff\x7d\xd4\x71\x50\xc2\xd7\x2f\xcf\x7e\xa8\x5d\x1c\x23\x6e\x4a\xae\x4b\x03\x0a\x94\x88\x37\x1b\xce\xe8\xcb\x0d\xf8\xc9\x68\xf2\x34\x94\x4b\x36\xeb\x43\x69\x5d\x85\x42\xbd\xd0\x50\x14\xe7\xe0\x44\x5b\x1a\xe2\xfc\x92\x1d\x86\xe2\x3d\x96\xa1\x65\xa9\xc1\xcc\x67\x8f\x51\xb4\x1a\xac\x88\xe1\x13\x63\x87\x3f\x80\xcf\xde\x47\x3b\x61\xd7\x40\x6d\x88\x0c\x49\x81\x77\x2e\x40\x78\x61\x7f\xa0\xa5\x8c\xe5\xdd\x4d\xf9\x87\xfc\xb3\x03\xda\x0a\x9f\xb6\x25\x35\x15\xc6\xb9\x0e\x64\x77\x23\x41\xc1\x0f\x48\xa6\xe0\x3e\x68\xd0\x36\x74\x08\x8b\x6c\xa5\x22\xf6\xf5\x06\x95\x9b\x68\x7e\xbd\x66\x05\xcf\xf5\xf5\x9f\xec\x76\x68\xb4\xd8\x37\xf8\x48\x6b\x50\xaf\xa2\x96\x82\xf2\x3e\xfa\x93\xd6\x4d\x3d\xbe\xe7\xd0\x26\xa4\x83\x1d\x85\x37\x3f\x61\x5f\xea\x41\x8d\x40\x0d\x17\x8c\xbe\xdc\x6c\x2b\xdc\x7b\xa1\x51\x35\x7e\x88\x59\x0e\xda\x87\x3a\x0b\xfb\x05\x76\x17\x5f\xcb\xde\x0a\x5f\x4b\x6a\x69\xec\x66\xce\x78\x31\xca\x16\x6c\xda\x08\xdd\x03\x82\x5b\x7d\x30\xe5\x4b\x9e\x04\x3d\xe7\xd8\x93\xc3\x41\xa0\x5f\x0f\x21\x94\xe0\xeb\xde\x76\x4d\x6b\x34\x1b\xce\x4a\x6a\x24\xc6\xf3\xda\x6e\x0e\x59\x02\x64\x01\x74\xe0\x40\x10\xd9\x18\xb4\x83\x5e\xa9\xda\x2e\x11\xe0\xec\x09\x43\xa0\x70\xd6\x56\x26\x55\x9f\x0e\xf9\xf4\xda\xe8\x8c\x9b\x8e\xf9\xe7\x09\xc4\x6d\x7f\x1f\xa6\xfc\xbd\x1b\x58\xab\xbb\x31\x5b\xa5\x22\x73\x20\x3b\xad\xe4\x19\xb4\x22\x49\x8a\x0f\x98\x41\x45\xf1\x51\x18\xed\xd4\xab\xa7\xcd\x11\xa8\xd0\xd1\xa5\x38\x47\x4d\xb8\xe8\x7e\x38\x06\x6e\x4b\x58\x5a\x99\xfa\x5e\x3d\x9e\x98\x7c\xfe\xb7\x3a\x12\x55\xd4\x7b\x78\x77\x97\xb0\x0f\xda\x18\x68\x56\x42\x46\xa0\xfd\x31\x86\x6c\xec\x33\xe0\x2a\x06\xea\x73\x60\xef\xdb\xf4\x06\x59\xc6\x68\x5d\x63\x62\xb8\x71\x90\x8c\xc2\x13\x13\xd7\xd2\xe7\xd7\xec\x18\x66\xa9\x0b\xa2\x6d\x62\x7b\x26\x75\x3b\x3e\x8e\xae\xa2\xdb\x41\x3b\xa0\x4b\x2c\x28\x74\x3b\xd8\x38\xf1\xd0\x16\xfa\x5d\x17\x84\xc9\xcb\x1d\xb9\x53\x1b\xa7\xb8\x44\x30\x9a\x26\x91\x4e\x8c\x51\x98\x12\x79\x7d\x45\x31\x02\x9d\xb3\xba\xb5\x28\x9a\xa4\x8b\xec\xf9\x55\xf1\x6e\xc8\x63\x8b\x99\xa8\xc3\xee\x4c\x2f\x22\xd3\x8d\xce\x4d\x6b\xf4\xc1\x82\x24\x8b\xb6\x49\xba\xbe\xb1\x46\xba\x27\xb8\x1f\xa3\x4b\x94\x7f\xa4\xba\x0e\x93\x9b\x1f\xcf\xe8\x71\x00\x76\xfe\x63\x2b\x45\xc8\xa6\xdb\x85\x87\x6f\x51\xa4\x29\x76\xe9\x3f\xc4\x56\x49\x93\x70\xc7\xa9\xe0\xdb\x7f\x0e\x26\x29\x4c\xd0\x21\xd9\xba\xd0\x2e\xac\x0d\x5b\x81\x78\x81\x47\xb7\x33\x89\x30\xfd\x49\x1d\x13\x76\x2f\xea\x2d\x61\x41\x1c\xe5\xe6\x1d\x53\xa5\xaf\xbe\xd4\x14\x74\x21\x3a\x06\xd2\xe6\x2d\x0f\xa7\x8a\x3c\x3c\xa5\x8d\x17\x2b\xea\x8a\xe6\x65\xf4\x9a\x07\x6e\x6a\x90\xdc\xa6\x40\x38\x58\x04\x29\x49\xff\x29\x36\x42\xf2\xd8\x3a\xbe\xf0\x12\xad\xbf\xb4\x70\xf3\xe2\xd9\xfa\xfe\x4c\xe2\xc4\xc9\xa6\x00\xb6\x09\xd9\x45\xa1\x6c\x80\xea\xac\xd4\xc9\x4a\x0e\x3b\x73\x1e\xbb\xe2\x1c\x85\xaf\x67\xe4\xe3\x65\xbe\x48\x5f\xe5\x1e\x9f\x81\x70\x0f\x6f\xad\xf8\x72\xf9\x0d\x04\xf5\x9b\xe3\xe1\x1f\xac\x43\xaa\x48\x3e\x85\x22\x0e\x44\xbe\xf9\x91\x94\x6b\x88\x0e\x76\xba\x3b\xbe\x03\xcc\x78\x57\xf7\x2b\xf5\xee\x44\xa4\x93\xd8\x60\x20\xd3\x05\xbd\x97\x87\xf7\x56\xae\x35\xcb\xa3\xfc\x67\x61\xbe\x6e\x5c\xb2\xe1\xa1\x24\xa6\xef\x7b\x0e\xb3\x73\x79\xb6\x41\x29\xf0\x9e\x65\xb5\x14\x3b\x71\xef\xc9\xa8\x11\x32\xcd\x03\x17\x86\xb7\x1b\x48\x8a\x9c\xf8\x8e\xdc\x1a\x5c\xd9\xf2\x15\x87\xff\x22\x82\x5a\x39\xde\x43\x53\x90\xd5\x44\x4a\x9f\x70\x65\x72\xc1\x7f\x9c\xf3\xf0\xd6\xe2\xfd\x6b\xec\x3e\x64\xc7\xd4\x24\x05\x34\x90\x79\xa7\x1c\xf4\xe2\xd7\xfc\x9a\x59\xd6\xbf\x04\xe4\xb4\xd0\x7f\x2a\x02\xd8\x53\x41\xd6\xec\x83\x39\x74\xce\xdb\x78\x81\x6e\x77\x69\x60\x67\x2d\xbb\x2a\x67\xcb\xfc\xf7\x7c\x29\x8d\xcb\xef\x66\x3b\x5d\x68\x6d\xe6\xe3\xa0\x14\xfb\x1f\xdc\xdf\xbd\x64\xad\x85\xa3\xd6\x04\x06\xd6\xd9\xac\xa5\x3f\x92\xaf\x18\xe1\x54\x54\xe5\x4f\x78\x57\x6f\xbd\xdd\x95\xe1\x72\x30\x5d\x5a\x65\x46\xbe\x1d\xec\xcd\x60\x37\xe8\x0f\xb5\xef\x86\xec\xa7\x1b\xab\x27\x5b\x2e\xf0\x5e\x65\x1b\x86\x26\x2b\xf7\x5f\xe2\xbb\xbe\x23\x1a\x2f\x7c\x30\xef\xa9\x9e\x9b\xc2\xea\xe8\xef\x82\xc8\x9b\xca\x46\xec\x6d\xdb\xb2\x53\x36\xe8\x76\xdd\xf8\x07\xb8\x53\x78\x83\xdd\x12\xae\x5e\x9f\x9b\x42\xb6\xeb\x23\xa3\x6f\x88\x52\xe4\x1f\x64\xe7\x4e\x56\x0c\x12\x14\x08\x02\x12\x2f\xdb\xcc\x85\x02\xe4\xaa\xdb\x2e\x18\x3e\x1c\x05\x21\x37\xb8\xee\xe8\xb7\x9c\xc5\x89\x36\xde\x91\x09\x26\xfc\x0d\x1f\x4b\xf6\x69\x10\x85\x24\xe8\x16\x1d\x99\x8d\xc7\x79\x6f\x61\xb3\xfe\x57\x96\x35\x39\x1d\x99\xa2\x6d\x03\xe9\x41\x92\xb2\xe0\x0c\x89\x41\x38\x71\x1f\xf3\xa0\x4e\x0d\x1d\xe6\xae\xf2\x0e\xd4\xe2\x2a\x57\xa3\xb7\x5d\x45\x96\x02\xd0\x20\xb0\x0b\xb3\xca\x31\xd9\x46\x5a\x59\xae\xf2\xc7\xbb\xe0\x14\x1a\x1f\x5c\x51\xda\xd7\x4f\x77\x77\x15\x36\x7a\xaf\x55\x9f\xd9\x36\xb1\x31\x6d\x5f\x00\x21\xd8\x9b\x80\xd0\x87\xb8\xd3\x45\x44\xfb\xf2\xab\x96\x1d\x9d\x3c\x2a\x08\xe0\x9a\xbc\x0d\x7b\x81\x01\xc7\xce\xc3\xb4\xc0\x4f\xd4\x47\x79\x5e\x28\x43\x70\xc2\x78\x34\x72\x64\x8e\x85\x7e\x76\xb4\x8d\xa6\xa4\x47\x66\x71\x44\xd4\xca\x91\x85\x93\xf4\x1a\xe1\x78\x95\xd9\x5a\xc9\x43\x6c\xee\x2f\xab\x05\x28\x6e\xe8\x27\xca\xf7\x30\x88\x4e\x4c\x51\x77\xc6\xd6\xdd\xcc\xd1\x9e\x35\xd8\xc2\xcb\x8d\xb2\x82\xd6\xbb\xd9\x22\x1f\x30\xb3\x63\x44\xa3\x3d\xdb\xa8\xc8\xe2\x2b\x58\xbd\x57\xb2\x5a\xe3\x92\x65\xbb\xbb\x86\xca\xaf\x69\xff\x86\xfd\xd2\x4a\x65\x11\x2c\x99\xe2\x96\x79\xa0\x0a\x61\xe6\x4f\x9b\x26\x35\x64\x5a\x67\x26\x33\xd4\x2f\xf2\xf8\x2a\x01\x9a\xb3\xdc\xd3\x93\x84\x17\x0c\xff\x81\xa0\x60\x41\xf2\x8f\x57\x53\x90\x1d\xbc\x21\x23\xce\x1d\x6c\x9b\xee\xa2\x81\x3c\xd3\xcb\x2a\xc7\x26\x49\x05\x4a\x56\xba\x25\x2e\x80\x49\x36\xea\x85\x7a\x8c\xb3\x74\x55\x4a\x03\xbe\x03\xfb\xac\x2b\xbb\x20\xe4\x2b\xca\xe2\x3d\x43\xe2\x26\x4b\xf6\xd6\x6b\xca\xf0\x0d\xe0\x6d\xbb\x1e\xb6\xc9\xfa\x69\xe3\x65\x72\x46\x5e\x9b\x0d\xbc\x1b\xd5\xb8\xe4\x36\x8a\x57\x99\xa4\xc9\xb0\xdf\x4c\xef\x38\x29\x2c\x0e\x79\xb7\x4e\x79\x80\xb8\xc0\xaf\x52\xfb\x0e\x98\x2c\xf3\xbf\x1a\x2e\x44\x90\xb5\x5e\x6a\xb9\x90\x86\x7f\xda\xaa\x2a\xe5\xe3\xa1\x8b\xfe\xf0\xf6\x94\x3c\x9c\xf6\xdb\xc9\x03\x70\x86\x67\xd3\x03\x5e\x0c\x2f\x45\x05\xda\x67\x49\x4a\x29\x4c\x37\x61\x5f\x4c\xa0\x6c\x58\x94\x61\x29\x6f\x1a\x2a\xc1\x83\x98\x2a\x04\x38\x1a\xec\xd1\xac\xe1\x76\xca\x06\xd8\x47\xc8\xcc\xe9\xbe\x37\x8d\x50\x9e\x8d\xd1\xc4\x61\x8b\x63\x86\x24\x6c\xa7\x58\xa0\xd9\x29\x49\x6b\x80\x8d\x1a\x31\xae\xa3\x69\x4f\x61\x2f\x1f\xdd\x17\xf8\xe9\xf8\xa7\xcb\x6a\xe2\xa2\x27\xa9\x3d\xbb\x0d\xd7\x2d\xf2\x28\xed\xdf\xe6\x23\xeb\xbf\xe4\x17\x5f\x11\x46\xa5\x14\x3e\x64\xcd\xf0\x47\xfa\xfa\xb6\xf2\x48\xec\x4a\xa2\x51\xd3\x22\xfe\x61\x3d\xcf\xc3\xdc\x3a\x36\x4f\x72\xae\x50\x6c\xaa\x7b\x79\xc0\xa3\xac\xe3\x93\x57\x28\xeb\x38\x1c\xd3\x50\xe9\x24\xff\x9f\xb4\x09\xf6\x7d\xc7\xec\x84\x71\xcf\x28\x12\x70\x41\xd4\xb7\xf2\x3c\x45\x3a\xd4\x5f\x6e\xf2\x53\xb0\x79\xc3\xcf\x80\x2d\x36\x57\xfd\x42\xc7\xcc\x42\xf4\x60\xff\x48\xdf\xb9\x23\x7b\xcd\xdb\x2f\x58\xfc\x95\x73\xb1\x17\x85\x82\xec\x60\xe5\x7a\xb3\x25\xab\x26\xd8\x49\x86\xd8\x73\x6d\x62\xc5\xdd\x36\x1e\xab\x9a\x34\x62\xe7\x58\x01\xdd\xcf\x4e\x22\xdf\x69\x6f\x61\x27\x3b\xbf\x1e\xf5\x64\xce\x7f\xbc\xa1\xce\x63\xf2\x2e\x25\xbc\x05\xe4\x21\xb5\xdf\x21\xbd\xc0\xb8\x3e\xe9\x17\x14\x9e\xf5\x57\xfc\x71\x9e\xb4\xf4\x81\x20\x82\xa7\xc3\x66\x0b\xc0\x29\x6d\x85\x5d\xed\xbc\x17\xf6\x16\x6a\x46\xcc\x91\xbc\x09\xaf\x5a\x12\x26\xe0\xc4\x40\x26\x3f\x69\x3e\xdb\x3c\x7c\xb8\x97\x43\x53\x87\x56\x44\xb9\x46\x12\xe3\x75\x25\xdd\xb4\x7f\x69\xba\x44\xea\x29\x0c\x19\xe6\xc8\x40\xcc\x9a\xe9\xdc\x54\xea\x92\xd9\xbd\x9a\x55\x9a\xc2\xa1\xa3\x60\x33\x2a\x43\xab\xe5\x83\x14\x42\x88\x37\xe3\xde\x1e\x6a\x06\xa9\x47\xe3\x7d\xd5\x14\xfd\x20\x3c\xe4\x60\x6f\xe7\xf8\x65\xa6\x41\x83\xa1\x4e\xf8\xd6\x6f\xa9\xb9\xe1\xbb\x19\xbe\xc6\x88\x02\x64\x57\x67\xb1\x39\x26\x5c\xa4\x10\xe1\x43\xcd\x85\x9b\x5a\xbf\xce\xc4\xc7\x00\xb4\xd3\x80\x09\x6f\xe8\x1a\xd5\x55\x83\x41\xaf\x99\x5b\x88\x64\x95\x35\x2b\x44\x1c\xbb\x7a\x41\xde\x53\x8a\x07\xc4\x93\x3d\xfb\xaa\x75\x97\xc2\x1d\x66\x4b\x5f\x1d\x3f\xa1\xb8\xf8\xe8\x2d\x7a\x3a\xff\xdc\x46\x3e\xb0\x2b\xe9\x3f\x0d\xf2\x9b\x5d\x29\x5b\xd4\xb5\x65\x88\x40\x61\xb4\xa7\x66\x3e\x3e\x9c\xc0\x12\xba\x82\x72\xad\x3a\xa3\x75\x15\xdb\x03\x0c\x65\x5a\x85\x49\xfe\x8c\x7e\xd6\xbc\xeb\xe5\x1c\x10\xa4\xcc\x44\xed\x89\x2d\xfd\xe5\xcc\x00\x80\x49\xbe\x61\x36\x21\x77\xfa\xaf\x23\xb5\x37\x89\x4e\x28\x3e\x1c\xb2\x22\x01\x40\xa6\xf0\x56\xd1\x27\x2c\xd8\xd7\xaa\xa0\xca\xea\x9a\x63\x6f\x69\xd5\xac\x6c\x04\xb4\xc5\xd8\x78\x62\x02\xbf\xb7\xa8\xca\xdb\x9b\x42\x85\xf6\x51\x36\x75\xaa\x0f\x15\xd6\x84\x68\x84\x59\x7d\xcc\xdb\xc6\xe9\xee\x93\x47\x07\x58\xb5\x7b\xb3\xfd\xb9\x06\x40\xb6\xe6\x12\xdd\xcf\x96\x06\x6a\x83\x91\xa6\x1e\x6b\x5b\x93\x7c\xab\xc0\xae\xd8\xaf\xc5\xd5\x45\xa5\x09\xd1\x64\x95\x37\x55\x0d\x31\x09\x6c\x7c\x9a\x39\xd5\x2f\xd4\x13\x7c\x4a\x1e\xa9\xa7\xc0\x72\x50\x83\x66\xb0\x26\xe7\x17\x6b\x44\xb8\x7d\x75\x7b\xc8\x9b\x4f\x92\x27\xd6\xb3\x8a\xc7\x68\xc3\xdc\xe2\xb6\xd6\xd4\x17\xde\xbc\x7c\xd4\xa9\xaa\x07\xf3\x60\x97\xba\x7f\x23\xbf\x33\x4f\xb4\x9f\x52\x8e\x01\xc2\x3a\xdd\xfc\xea\x94\x19\xd4\xd9\xec\x2a\x97\x33\xbd\xf5\xfc\x33\x31\xbf\xca\x06\xf7\x42\xfd\xc9\x7d\xcb\x30\x78\x29\x8e\xc3\x2e\xc5\x67\x83\xa5\x44\x43\x0e\x01\xcb\xd2\xb7\x64\xed\x6c\xd8\x4a\x63\x13\x0d\xa5\x73\x14\x8f\xcb\x1f\x83\x36\xdb\xe3\xc0\xeb\x56\x1f\x8c\xff\xc3\x66\x8b\x32\xd1\x6a\x4a\x89\x09\xa1\x6a\x5a\x12\x3b\x50\x4b\xda\xce\x16\x49\x6a\xe7\x9e\x4b\x43\xfe\xca\x64\x13\x28\x5e\xe9\x6f\x5e\xd5\x9e\x92\xe7\x31\x66\x4e\xe9\x4a\xe4\xce\xca\x04\xcf\x4d\x41\x06\x91\x6c\xd0\x17\x4f\xbd\x9d\xa1\x53\xf3\x6f\x17\xdc\x94\x70\xe6\x9f\xed\x67\xe2\x0a\x09\x5b\x3b\x62\xad\x53\xfd\x70\xfa\x9c\xe0\x9e\x2d\xa3\x29\x02\x1a\x90\xa2\x94\xb0\x9e\x8a\xec\xcf\x8f\x7c\x97\xca\x19\xca\x94\xd2\x1a\x07\xa0\x23\xb5\x74\xcd\x72\xce\xf2\x0b\x98\xca\x11\x39\xdf\xce\x53\xd2\x40\x5c\x5b\x6c\x7c\x5b\xcf\xa9\x88\x6c\x3c\xd9\xf4\x39\xfd\xda\x5d\xd1\x03\x5b\x7d\x56\x86\x04\x05\x41\x3a\xa2\x01\xc1\x9e\xea\xe1\x4a\x14\x36\x8c\x64\xd1\xc3\x61\x68\x52\x00\xd8\xb0\x57\x66\xeb\xb5\xa2\x0e\x64\x18\xc4\x3c\xd1\x25\xac\xab\xd8\x21\xd3\x98\xa5\xe8\xe4\xa2\xa4\xd0\x2f\xa3\x73\x16\x6e\xdd\x68\x88\xa9\x89\x16\x6e\xcb\x53\x8e\x46\x86\xdb\x59\xb1\x05\xf6\x55\x73\xeb\x03\xe3\x04\x86\x35\x58\x69\x16\x91\xa4\xe8\x8a\x65\x8d\x28\x84\x81\x0b\x86\x5a\x5d\x67\xf8\xd9\x80\x6f\x36\xab\x47\x36\xd8\x94\xfc\xd1\x50\x8e\x58\x2d\x4d\xd6\x87\x8c\x47\xa5\x89\x93\x2c\xaf\xc4\x6a\x43\x96\x69\x68\x63\xd6\x2b\xde\xe0\x2f\x9c\xcd\xdb\x6c\x0f\x76\xd1\x28\x7d\x09\xd3\x29\x68\x3c\x16\x45\x3c\x9c\xbc\x45\x40\x07\xfc\xb0\x94\xd1\x00\x33\x15\x45\x9c\x16\xce\xe7\x9e\x70\x2a\x60\xc0\xc6\x1d\x0f\x91\x2d\x7a\x4a\x93\x72\xa6\x3f\xe1\xc4\xa3\x4d\x31\x91\xe8\x70\xd3\x26\x2b\x12\x67\x50\x45\xdb\x66\xee\x66\x4f\x7f\x2c\x83\x7a\x61\xab\x9d\x6c\xdc\x4f\xd8\xd0\x0b\x2a\x48\x1b\xd5\xe7\x1f\x95\x70\x3b\xce\x27\x21\xf6\x71\x6e\x4c\x65\xd8\x64\x85\x36\xa6\xb6\xe6\x98\x6e\x40\x47\x84\x3a\x19\x59\xb4\x61\x89\x88\x8e\x1c\x41\x0b\x2b\x78\x5a\xb3\xcb\xbb\xec\x60\x2d\xb3\x77\x75\xba\x8c\x41\x72\x17\x14\x5c\x33\x87\x23\xb1\xc6\x67\xc4\x6b\xa2\xf7\x30\xf3\x28\x42\xc8\xb3\xf0\x3c\xb7\x23\x70\x54\x1a\x50\x15\x7e\xd4\xe7\xea\xe7\xfc\x53\x88\x16\x2c\xb4\x6e\x76\xb4\x6c\xea\xf5\x7b\xb8\xcd\x71\x97\xac\x9d\x83\x9e\x45\x30\x45\x32\xda\x05\x3f\xc3\xca\xed\x99\x2b\x17\x8b\x67\xa4\x08\x3e\x33\x12\x23\xbd\x8a\x78\xe6\x34\x4a\x30\x93\x42\x15\x53\xe1\xef\x1a\xa5\xad\x6d\xfe\x5a\xb0\xc9\x33\xc2\x25\x9f\xda\x2b\xd9\xae\xfa\x25\xa1\x8c\x44\x6f\x4f\x59\x91\x64\xed\x9e\x7d\x65\xa3\xf3\x95\xdc\x76\xe0\x4d\xe8\xc7\x43\x18\x5b\x06\xb9\x96\x83\xe9\x64\x15\x65\x4b\x5e\x24\x56\xa8\xb7\x93\xff\x8d\x47\x70\x1b\x73\x5d\xc2\xb9\xc1\x76\x0a\x81\xc9\x83\x08\x66\x43\x3d\x63\xa0\xc6\x3a\xe5\x34\x3e\xd8\x56\x2f\xf3\x87\xaa\xa9\x62\x08\x4f\xd0\xa1\x86\x1b\xe6\xa0\x36\xf5\x50\xaa\x42\x2d\xce\xc0\x6b\xd1\x30\x9b\x0a\x1b\x66\x5b\x6c\xf1\x07\xba\x11\xfb\x48\xed\xe4\x61\x3e\xd0\xa2\xf0\xf6\x96\x95\x70\x6c\x94\x65\x66\x4e\xcc\x60\x7a\xb6\xf6\xd8\x51\xa4\x87\xb9\x40\xd3\xe5\xf2\x0b\x08\x8d\xb1\x16\x39\xfa\xc0\x68\x85\x85\xe6\xe6\x9c\x23\x3c\x76\x2d\xee\xcf\xc5\x3b\x18\x37\xcc\x8e\x9a\xad\x68\x61\xaa\x44\x7b\xae\x5e\x96\xe3\xf6\xd7\x0f\xfc\x1f\x32\x1d\xcb\xe1\xdb\xb5\x2b\x9c\x36\xb1\x69\x69\xf3\x1b\x8f\xe7\xc8\x6f\x87\x3b\x02\xbe\x7c\x62\xc6\xc0\xa0\x68\x12\x25\x99\xf7\x27\x59\x45\x1e\x2a\xab\x2e\x99\xfd\x46\xb2\x8f\x3a\x15\x03\xde\x23\x51\x58\x1d\x33\x25\xd4\xda\xd7\x2b\x1d\x0e\xf8\xc3\x6c\xb5\x15\x39\xb6\xb9\xe7\x16\x3b\x46\x85\xe2\x99\x12\x41\xc0\x9e\x8c\x41\xf1\xba\x0d\xc3\x7b\xeb\x64\x9b\x2c\xfe\x09\x4e\x9e\xa4\x76\xd9\x76\x90\xb1\x64\xc3\x4c\xcb\xb0\xb9\xaa\xeb\x05\xa2\xb0\xbf\x77\x6f\xbd\x3f\x42\xd8\xc3\xb7\x30\x95\xc4\xe6\xda\xec\x34\x45\x34\x47\xe4\x2b\x36\xa5\x17\x7b\x41\x7d\xba\xd5\xcd\xba\xd3\xfa\x8c\x4c\x8c\xeb\xd7\x64\xee\x9d\xf8\xad\x2b\xb4\xf7\x70\x51\x35\xce\xec\x97\x4e\xea\x9c\xad\x52\xfc\xc0\x41\x34\xee\xd7\x46\xc4\xb6\xea\x6a\x82\x85\x1d\x57\x18\x8f\x81\xe5\x42\x0a\x5a\xf7\x61\xd4\xeb\x87\x8d\x9a\x7a\xb7\x3e\x18\x55\x30\xf0\x38\xd8\xd5\x94\xf3\x00\x06\x79\x6e\xb5\xce\x89\xcd\xbd\x6c\x77\x54\x36\x55\x6a\x84\xab\xd9\xa8\xf0\x59\x72\x22\xed\xe8\x98\x16\x4d\xb3\x68\x6e\xd5\xfc\xff\x8b\xbc\xf5\x3f\xe6\x3b\xfa\x64\x8a\x99\xb0\x3d\x69\x4b\xc8\x98\xe1\x55\xe2\x50\x0f\xfa\x64\x6b\xeb\x8e\x45\x2f\x2c\xfa\xd8\x36\xd4\x01\xe8\x6a\xf0\x81\xba\xf2\x65\xcf\xab\xc4\xcc\xd6\x45\x08\x75\x67\x13\x6a\x24\xcc\xfe\x10\x10\xc8\x7d\x66\x1e\xc3\x55\xeb\x97\x75\xdf\x49\xaf\x0f\xa7\x2d\x7a\x89\xb5\x4e\x1a\xfe\xa1\x37\xb4\x3c\xf8\x42\x1d\xa9\x33\x52\x33\xf6\xa4\xce\x54\x17\xf6\xb3\x1d\xbe\xeb\x52\x9e\x1c\x22\x57\xff\x6a\x21\x1a\x76\x11\xd6\x96\x82\x3b\xec\xe1\xa4\xdd\x65\x9f\x82\xfe\xed\x64\xe5\x9f\x10\x28\x3c\xbb\xe0\xff\x46\x3d\x0f\x12\x25\x69\xa3\x75\x7d\xd4\xc7\x8a\xe4\x5f\x60\xd1\x6b\x69\x2e\x5b\x77\xe3\xf7\xc1\x86\x4b\xee\xa6\x1d\x38\x6f\x52\x0d\x59\x15\xf0\x95\x5f\xd7\xc6\x97\xfa\xb9\x85\x74\xb6\x99\x08\xbd\xd1\xba\x86\x49\x97\xc2\xcb\xba\x34\xb0\xb9\xd3\x14\xbe\x20\x15\x9f\x3c\xe6\x5f\xd6\xed\x7f\xea\x4a\xfd\x97\x57\x2a\x54\xdc\x24\x90\x92\xa2\x78\x22\x52\xfa\x71\x3c\xe6\x50\xc6\xd9\x8e\x58\x5d\xb7\xb4\x2f\xc4\x95\xd1\x32\x7b\xc5\x6d\xc3\x99\x65\xcb\x09\xcb\x00\x3f\x34\xef\x05\xd8\x23\xc5\x68\x53\x5c\xc8\xea\xaa\x57\x7f\x77\x6b\x96\x29\x05\xf3\x40\x38\x71\xa4\xf1\x54\x30\x08\xcd\x90\x93\xad\x7c\x89\x86\x32\xee\x6b\xa2\x0f\x61\x27\xea\xcb\x8f\x83\x7e\x81\x84\x6a\x96\xbf\xef\xf7\xb3\xfb\xff\xfa\x56\xce\xf2\xdf\x59\x06\x5c\x37\xb5\x9b\x5f\x02\x17\x5f\xf8\x91\x29\xc8\x17\xeb\x0c\xcf\xd3\x4f\xd1\x8a\xfe\xb1\x89\x58\x5f\xfa\x07\x5a\x34\x57\x09\xd9\x22\x68\xab\x96\xf7\x92\xca\xb4\xc2\xb6\xca\x87\x79\x65\x58\xf0\x1b\xc5\x3c\x8f\xd1\xe4\xda\xbf\x31\x58\x34\x68\x6c\xa3\x37\xd0\x53\xde\x06\xc2\x97\x63\x30\x6f\x70\xd1\xbc\x4f\x06\xc5\x91\x4f\x4b\x6f\x29\xb8\xdf\x3c\x27\xdf\x79\xee\x14\xdb\x2b\x1b\x28\xd7\x17\x71\x94\xde\xa7\x7d\x7a\xb6\x1c\xde\x52\x01\xbd\x71\xfd\xee\x11\x31\xc2\xe9\xfa\x9d\xb8\x0b\x7a\xd3\xa1\x94\xef\x82\x94\x5b\x98\x67\xa9\x2f\x66\xf3\xae\x3c\xe4\x5e\x25\xec\xb1\x61\x93\xc5\xfc\xdd\x59\xd1\x24\xab\x57\xa1\x2c\x88\xac\x9e\xfa\x9c\xf2\xc6\xc2\x4c\xfd\xb2\xf7\x1e\x42\x25\xd1\x22\x5e\x10\x3f\xb1\x79\x55\xaf\xd6\xdc\xb2\x33\x70\xfd\xa7\xed\x15\xfa\xc5\x97\x4d\xba\x73\xeb\x70\xf1\x18\x61\x9c\x2f\x21\x93\xf5\xfb\xf5\xef\x03\x1b\x0b\xff\x3d\x42\x4b\x59\xa3\x13\x6e\x5b\x5a\x52\xf6\x6e\x25\x0e\xde\x1e\x28\x43\xf1\x52\x73\x89\xbd\x91\x7f\xc0\xc7\x17\xcd\xba\x77\x21\xb6\x72\xcf\xfc\x09\xec\xe8\x93\xd3\x1d\x93\xd3\x3d\xcd\x08\x65\xf1\x3d\xfd\x8d\x70\x3e\x99\x03\xde\x61\xba\x7a\x2b\xb3\x37\x6c\xe1\xd0\x72\xef\xbf\x1f\xef\x53\xbc\x21\x32\x50\x6a\x6b\xe5\xb7\x89\xdd\x77\x6b\x41\x81\x73\xf4\x60\x83\x6a\x4f\xe1\x83\x55\x23\x0d\xd7\x8a\xf6\x2d\x9e\x84\x98\xa2\x48\xbb\xcd\xf4\x39\xb3\xc1\xdf\xb5\x24\x29\x85\x22\x1f\x24\x4b\xc2\x94\xb5\x41\xb5\x51\xb3\x7c\xb2\xe1\xd2\xdc\xb1\xf6\x60\xed\x60\x2c\x3a\xcd\x75\x2a\xfa\x16\x09\xb9\x7e\xc3\x91\x95\x74\x5b\xac\xc4\x27\x0e\xeb\x4d\x27\xbd\xe2\x2e\xdf\xdd\xab\xb9\x4c\x5c\x00\x4b\x9a\x60\x28\x52\xd8\xd1\x79\x8a\xde\x12\xdf\x31\xbc\x7e\x67\x0e\xc2\xf3\x6a\x68\xc7\x3d\x46\xc3\xfc\x3f\x36\x9b\xec\x7b\x08\xdd\x10\x85\xf2\xa8\x6f\x4b\x28\x89\x86\xa1\xfe\x7f\x54\x5d\xe9\x92\xb3\x3c\xaf\xbc\x97\xf9\x7d\x6e\xca\x80\x03\x0c\x8b\x79\x01\x27\x4f\x72\xf5\x47\xad\x6e\x39\xf3\x55\x4d\x15\x0a\x01\x86\xb0\x78\x91\x7a\x91\x0c\xd5\x9b\x47\x47\xca\x64\x55\xa4\x9e\xd3\xa2\x45\x15\xa7\x80\x53\x41\xd9\x4a\xd5\xa6\xac\x46\xfd\xca\x6d\x40\x7e\x61\x18\xc5\x32\x5f\x3f\xa1\xea\x7c\xc5\x1d\x00\xa8\xe5\xbc\x3f\xdf\x8f\x9c\xcf\x51\xd5\x8a\x41\xb8\x96\x40\xdd\x4a\x60\x64\xaf\x48\xdd\x31\x6f\x47\x4d\x6a\x0e\x05\x33\x4a\x5d\x29\x4c\xb1\x27\x49\x3d\xd6\x6d\x3f\x2b\x91\x38\xc8\x33\x1f\x2c\x41\x61\xf4\xd7\xd4\xae\x46\x45\x83\x0d\x95\x48\x32\x72\x1f\x5f\x46\x1b\x0b\x54\x84\x1c\xda\xe8\xfd\xd3\x5c\x44\xde\x89\xae\x1d\x5e\xc6\xf2\xe5\x3c\xb0\x66\xc5\xe4\xde\x95\x12\x39\x44\xfb\x4c\xae\x17\x26\xa7\xdf\xf2\x13\x2d\x44\xb2\xc7\x77\xdd\x77\xd1\x85\x64\x06\xbc\xed\xf2\xfa\x9d\xc2\x3f\x58\xfa\x57\xfd\x22\x6a\x34\xa0\x3d\x1b\xeb\xce\xe7\x2c\x4a\x1b\xca\x4d\x64\x0f\x39\x8c\xe5\xc1\x32\xd2\xbf\x93\xa3\xc3\x53\x0a\xd0\x67\x56\xa1\x2c\x53\x2d\x06\x2c\xc3\x07\x4b\x45\x48\x28\x90\x55\x94\x25\x6d\x6a\x01\xa6\xe7\x1e\x85\xfd\xc7\x2b\x7c\x45\x40\x12\xee\xcf\xd4\x6c\x44\xc8\x79\x82\x3b\xb6\x2f\xf7\xc1\xaf\x27\x34\xaf\xe6\x6f\x4d\x8a\x6e\xc6\xc9\x1e\xc4\x39\x02\x5f\xca\xcd\xef\xd4\x2c\xcd\x9a\x1d\x66\xf5\x30\xc7\x13\x73\xeb\x3f\x38\x39\x06\xa1\xe9\xa8\xbf\x85\x0a\x59\x20\x58\xa8\x98\x73\xba\x3a\x2e\x23\x9b\x35\xa9\x56\x35\x4a\x21\xab\x91\xa1\x0e\xbb\x16\x2c\x4a\x9d\xb7\x26\xfe\xd0\xc1\xe2\xaa\x03\x6a\x5c\x8c\x58\xd9\xb1\xe5\xaf\x1b\xd2\x1e\x65\x3b\xbc\x8a\x72\xac\x75\x50\x89\x2a\xaa\x56\x36\xcf\xaf\xd4\xad\x9a\x3f\x5a\x13\xe6\x67\x47\x60\x7d\x51\xa7\x3a\x25\x84\x35\x89\x61\xa4\xc4\x8d\x4d\xc6\x2e\x71\x26\x8f\x89\x3e\x47\xc8\xe4\xb3\x67\x3e\xb2\x9c\x54\xe0\xad\x28\x7c\x1e\xe5\xb1\x58\x8c\x3a\x1f\x51\x38\xca\x2e\xe6\xe5\xd1\x3e\x15\x8d\xc4\xed\x2d\x95\xa2\xd6\xba\x86\xc1\xc8\x93\x82\x06\x30\x4b\x66\x19\x0a\xcc\xcb\x59\xb6\xc9\xa7\x32\x33\xc7\x17\x3f\x07\x66\xd1\x2f\xcb\x50\xed\x5f\x84\x0d\xfa\x91\x7e\xfd\x60\xe5\x55\x2f\x74\x6a\x61\xb8\x80\xdc\x1c\x2b\x4c\x8e\xf6\x42\x10\xe8\x55\x97\x27\xd6\x64\xb6\x64\xcd\x42\xfc\xc3\xf0\x92\x74\x57\x0f\x31\x08\x92\x8b\x00\xa7\x8a\x6b\xb8\x7f\x05\xe8\xf6\xba\x1e\xae\xa5\x85\x51\xa4\xdf\x62\x2f\x55\xf9\x6b\xb7\x97\xb3\xa9\x75\x0d\xde\xaa\xed\x24\x75\x51\x9e\xeb\x27\xc4\xb9\x7c\x0b\x37\x2c\xf5\x40\x6e\x28\x98\x18\xb3\xf6\x85\x49\x91\x97\xb2\x00\xca\x20\xf3\x6a\x77\x1b\xe4\x81\xc9\xf6\xad\x5e\x3e\xd1\xda\x50\x41\xe5\x32\x95\x85\xd1\xc6\xfc\xed\x56\x6d\x3c\x29\x0e\xd2\x7e\xff\x2a\x94\x08\xb6\xdb\xa4\xb0\xcc\x60\x11\x6b\x60\x60\xa6\xa9\x78\x65\x03\xfa\x21\x97\x3e\xd6\xa3\x10\x34\x9e\x51\xfd\x4a\xfb\xfb\x1b\x0e\x3a\xc8\xcc\x89\xed\x46\xfc\xd1\xe6\x15\x56\x16\xb3\x6e\x39\xa3\x68\x14\x68\xc1\xaf\x44\x97\xa1\x01\x56\xe4\xa1\xee\x7c\xa5\xaf\x4d\x33\x6a\xe9\x3c\xd1\x2c\x6a\xc2\xd6\xc4\x3c\x36\x38\x25\xb0\x52\x85\x7c\x0f\x7f\x7f\x5e\x47\x3c\x2a\x52\x04\x6b\xbc\x18\x54\xb5\x5a\xdc\x3b\x1a\x8f\xa5\x28\x7b\xfe\x55\x7c\x82\x4a\x8f\xea\x48\x76\x63\x54\x82\xeb\x47\x78\x74\xa8\x94\xd2\x8f\x73\xd7\x85\x64\x98\xb5\xd6\xb2\x52\x41\x82\x44\x3b\xda\xb4\xfa\x8c\xca\x56\x8f\x24\x67\x14\xbf\x80\x23\x53\x0d\xa7\x87\xb7\x63\xf3\x50\x01\x4c\x53\x3c\xa4\xb7\x0a\x36\xff\x82\xb2\xf4\x8a\x42\x93\xfd\x03\x55\xbd\x5c\xcf\x3a\x56\x5b\xac\x62\xcf\x7d\x55\xf9\x34\x6d\xe9\x62\xbd\xd6\xcd\xa0\xb9\x24\x31\x03\x81\xf8\x51\x71\x8e\x98\xde\x68\x9b\x7e\x66\x07\xcb\xd0\x83\xbd\x95\x3a\xad\x9b\x7a\x87\x63\x74\x78\xb3\xec\x32\x5e\xf1\xec\xb0\x6b\x39\xfe\xb0\xec\x94\x54\x0a\x5b\x51\x9c\x66\x24\x38\x38\x6a\x51\x34\xe6\xd8\x52\x0f\x35\x38\x86\xcc\x4c\xac\xe1\xc2\xbb\x72\x32\xbe\x56\xb4\x26\x3e\x38\x5d\xab\xf2\x58\xa8\x36\x51\x11\x0c\x6d\xa4\x17\xa7\x00\xf2\x94\x42\x99\x3d\xf7\xd1\xfb\x43\xa4\x4c\xd6\x23\x45\xbe\xcd\x6b\xf9\x22\xa4\x31\x16\x3a\x09\xf1\x5b\x9b\xe2\x13\x74\x26\x58\x81\x77\x7a\xa1\xd7\x76\x9c\xcd\x75\xf0\xe5\x5e\x23\xab\xb8\xce\x9a\xc3\xc3\x6b\xe5\x12\xed\x2b\x77\x41\xbb\x0a\x55\x35\x14\xb1\x52\x44\x2a\x77\xe5\xa7\x9c\xb4\x91\xd8\xe2\x32\x8a\x8d\x2b\x11\x9d\xfe\xff\x9d\x49\x2d\xd2\xd7\xe5\xb0\x0f\x3b\x8b\xf0\xbd\x16\x45\x7c\x95\x00\xaf\xbd\x91\xb4\xde\x5a\xe9\x5d\x1b\x76\x2c\x4e\xfe\xe1\x86\x8f\x1c\xbf\x27\x4b\x6c\x77\xcd\x29\x4a\x68\xd1\x9a\x59\xb4\x85\x2d\xf6\x47\x95\xad\xe7\x5c\xa2\x92\xbf\xe2\x21\x54\x0d\x2d\xc4\xd8\x6c\x12\xc0\xc2\x98\x2e\x42\x0a\x77\x98\x10\x77\x5c\xc3\x76\x6a\xa5\x26\xb4\xb7\xbd\x78\x27\x58\x23\x93\x25\x30\xea\x66\x57\x69\x15\xb4\x1f\x71\xc7\xe2\xbb\x50\x75\x4b\x23\x9d\x71\xec\x21\xaa\xbb\x20\xaf\xb0\x89\x41\xeb\x73\x33\x96\x27\x4e\xea\x08\x22\x59\x53\x5a\x3c\x31\x0e\x85\x26\x5f\x64\xc1\x5e\x60\x18\xc3\x82\xd7\xe9\xbe\x17\x0b\xec\x85\xf9\xb9\xb0\x78\xf6\xad\xac\xe5\xca\x0e\x75\x29\x4f\x82\x32\x97\x72\x7c\xcb\x57\x45\xea\x8c\xa0\xa2\x91\x40\xb9\x48\x30\x64\x89\x14\x1b\x1c\x61\xd4\x13\x2d\xf2\xe6\x5b\xbc\x4e\xf1\xe3\x22\x6d\x7d\xb0\xc7\xce\x2b\xd4\x63\x16\xc7\x55\xb0\xaa\x26\xf9\xa4\x25\xcc\x7e\x97\x1c\xf5\xb6\xed\x58\x65\x14\x83\xbe\x29\x34\x18\xed\x8d\x6e\xac\xb6\x9c\x48\x30\x8b\x0d\x31\x55\x67\x21\x6e\x23\xba\xfe\xb7\xde\xac\x03\x39\xc3\xec\x24\x55\x2c\xb4\x5e\x7e\x53\xc3\xb8\xfd\x26\x9a\x2e\xcf\xd7\xe2\xad\x38\x50\x88\x27\xa1\xdd\xf3\x42\xf1\x5a\x1b\xc7\x7a\xe5\xe4\xdd\x48\x56\x6f\x56\xa5\xa7\x7a\x25\x39\x87\x07\xcd\x02\x23\xed\x18\x8a\x4e\x35\x46\x05\x93\x84\x32\x26\xcf\x7b\x90\x0b\xf6\x26\xc9\x0c\xf9\xd2\x39\xaa\x6c\x61\x51\x8b\x2a\x9b\x2f\xc4\x38\x44\x01\xed\xe2\xb2\xa9\xac\x95\xb5\x15\xef\xd6\x21\xb7\xb0\x93\x34\x5c\xbc\x23\x10\xe3\xf5\x13\x83\x34\x65\xf3\x74\x19\x44\x00\x5b\x55\xac\xfb\x3a\xc5\x64\xa1\x8e\x5c\xc2\x36\x28\x58\x79\x18\x8f\xe0\x9c\x59\xeb\x2e\x65\xe2\x49\x32\xe3\x28\x8d\xbd\xa8\x1c\x3f\x49\x49\x74\x6a\xa4\xc4\x09\x98\x68\x1a\xb9\x5c\x61\x49\x8a\x82\x98\x0d\x62\x1f\xaa\x84\x85\x4c\xdc\x79\x8a\x21\x76\x1e\x5a\x0e\x73\xe8\xc5\xad\x34\x28\x5f\x57\x49\xbd\x01\xf1\xa0\xba\x56\x98\x48\x4d\xae\x0a\x19\x94\xb6\x14\x6c\xba\x94\x0e\x9a\x36\x8d\x55\x86\x56\xa3\x9b\x07\x8b\x77\x55\x49\xa0\x1f\x6b\x64\xcc\xc6\x3a\x48\xac\xed\x94\x71\xf3\x78\x42\xbd\x47\x91\x7e\x33\xa8\x62\x45\x72\x70\x59\xcf\xed\x78\x46\xb1\xc9\xba\x7c\x2f\x60\x95\x53\x2c\xb1\x73\xe4\x80\x75\x2c\xe5\x0a\xe6\x18\xd1\x33\xa3\xa4\x56\x00\xa6\x69\x59\x37\x74\xcd\x90\x37\xf3\x33\x03\x6a\x4b\x1d\x88\xc5\xc3\x4b\xa6\x31\xe8\x39\xfd\x90\xf3\x6a\xf7\x43\x6f\xe1\x88\x5b\x4b\xa2\x98\x3a\x07\x97\x8b\x63\x95\x4b\x39\x77\x15\xc7\xa8\x25\x77\x4b\xb4\x66\x4c\x41\x6f\x01\x77\xac\x44\xa5\x2c\x82\xf0\x92\xb1\x57\x49\xa4\xcc\x31\xb2\xd1\x16\xb8\x90\xf6\x08\x86\x46\x94\xa1\xea\x1f\xd9\xfb\x50\xed\x94\x76\x8e\x2b\xc9\xb1\x86\x75\xfe\x29\xa5\xb1\x44\xa1\x72\x05\x2b\x69\x2f\x55\xd4\xaa\xda\x91\x07\xf0\x87\xe7\x38\xab\x72\x76\xaa\x24\x6d\xed\xe4\xa0\x60\xbe\xc8\x3d\xe3\x5c\xfe\x61\x43\x16\x96\xb3\xc0\xb4\x56\x8d\x13\x5e\x34\x28\x0d\xfb\xfa\xf9\x0a\xf9\xe1\x47\xe4\xa9\x1e\x73\xd8\xf0\xa2\xb0\xe6\x17\xec\x91\xd9\x79\x3e\x9c\x30\xa9\x3c\xcb\x03\x4f\x78\x50\xc1\x1f\x3e\x34\x61\x85\x0e\x30\x97\x29\xae\xa1\x97\xc8\x9c\x28\x76\x35\x73\xb9\x46\x24\x42\x81\x5a\xba\x39\x20\x68\xfa\x0d\xcc\x0b\x7f\x54\x5e\x06\x52\x0d\x72\x48\x45\x5a\x67\x18\xdc\x30\xa6\x8a\xb9\x5d\x22\xab\x22\x27\x9a\xb4\x0d\x95\xf2\x3c\x43\xdd\x91\x32\x24\xbd\x0c\x30\x45\x96\xb9\x64\x69\x3d\x54\xde\x01\x5b\xb2\xb2\x84\xe4\x3f\x8b\x64\xa0\xab\x88\x4e\xe6\x4d\x97\x8d\x7e\xd7\xc6\x17\x13\x72\x69\x28\x9b\x37\xa3\x43\x81\xa4\x57\xd1\x14\xd1\xcf\xc9\x57\xc3\x96\x9c\x3b\x07\xbb\x78\x98\xd7\xba\xae\x51\xf8\x8a\x4a\x97\x8f\x10\x3c\xba\x38\xa0\xb7\x60\x2f\x72\x7b\x85\xff\x4c\xbd\x59\xaa\x05\x87\xac\x4a\x82\xee\xec\xde\x9e\x76\xf1\x0f\x3b\xdb\x07\x6b\xff\x74\x0e\x39\xcc\xe9\x50\x2c\x0b\x26\xda\x2a\x99\xa8\x21\x10\x61\x43\xfe\xad\x32\x83\xf7\x9c\x26\xd7\x11\x93\x32\x40\x67\x88\x5f\xc9\xc9\x67\x80\x90\x07\xbf\xfa\xaa\xe2\xf5\x75\xe6\x77\x21\xeb\x62\x33\xe0\xeb\xad\x25\x37\x96\xbb\xea\x90\x5e\xbc\x0f\x78\x5a\x06\x71\xba\x86\xc6\x0c\x19\x42\x79\x78\x80\x06\x16\x03\x96\xf0\xfc\x94\x6d\x64\x21\x06\x55\x25\xc8\xce\x96\x60\x7c\x65\x86\x49\x7a\x28\xfd\x59\xe7\xe5\x9a\xe8\xc5\xe3\x55\xb8\xf0\x9c\x39\xa3\xc5\xe9\x31\x12\x78\x33\x78\x84\x7f\xce\x4c\x51\x91\xfe\xcc\xd6\xc7\x73\x96\xda\x17\x8d\xb3\x51\x88\xe3\xa4\xd2\x46\xb9\x36\xdf\x29\x32\xa7\x07\x1f\xc5\x9d\x01\xdc\xd1\x7e\xe7\xa6\xd1\x95\x48\x49\x4f\xc7\x2f\x5d\xd7\x3c\x72\xc8\xe8\xe8\x57\x11\x41\x7a\x52\xb1\x7a\xf8\x41\xb3\x32\x87\x4e\xcc\xbf\x41\xfd\xcd\x8f\x3f\x41\x96\x99\xc5\x39\x74\x32\x2c\x15\x42\x2b\x85\x92\x78\x13\x9b\x25\xd0\xdd\x7c\x09\xa3\x68\x52\xec\x6c\x30\xae\x72\x1b\xd2\x22\xfe\x0b\xf3\x10\x5c\xb2\x77\xbe\x23\x7c\x41\x9a\x51\x45\xba\x27\xac\x6c\x64\x4b\x6f\x5d\x01\x01\xb0\x20\xbc\xad\x73\x78\xe5\x9c\x7f\xab\x73\x5f\x42\xdf\xb7\xac\x62\xe1\x18\x72\x7d\xf8\x7f\x1f\x71\xda\x82\x8d\x97\xd6\x10\xc2\xf5\xe4\x8f\xbc\x7c\x6c\x1a\x91\x46\x9d\x11\x8d\x91\xc2\x97\xaa\x7b\x4f\xad\xa0\x17\x52\x7e\x6d\x49\x8f\x3b\xf8\xe9\xcc\x61\x94\xcf\xf1\xb2\x17\xf8\xa2\xfe\xd7\x26\x95\x80\x52\xc3\x82\x04\xe1\xf9\x6e\xb5\xba\xad\x15\xf0\xec\x14\xa8\x0c\x67\x61\x54\xef\xca\x15\x06\x86\x16\xeb\xd4\x41\x87\x53\x16\x17\x8a\x63\xd1\x95\xba\x8d\x7e\x44\xfb\xf5\x35\xcc\x91\x7f\x9e\x33\xe1\x32\x83\xf1\x8f\xb4\x5f\x14\xf8\x36\x11\x91\x3a\x38\x37\x35\x1b\x9e\x02\x99\x76\x0f\xae\x28\xfa\x9d\x55\xe5\xa7\xa6\x53\x23\x01\xa0\x8b\xe1\x18\x76\x3e\x45\xa6\xc5\x9d\x30\xbf\xce\x7e\xd3\x6f\x28\xaa\x6d\x58\x20\xf9\xbf\xf2\xf6\x7f\xa8\x6a\x57\xa3\xa0\xcd\x50\x90\xfd\x61\x59\x90\xdf\xc0\xad\x59\xba\x7e\x1f\x5e\xef\x4c\x51\x99\xae\x65\x8e\x9c\xa6\xc6\xaf\xc0\x43\x5c\xf5\x6d\xaf\xf3\x09\x9d\x2d\x0b\x9a\x7b\xcf\xba\x3b\x42\xb0\x73\xd0\x03\x2b\x82\x84\xb6\x41\x11\x90\xd5\x01\xd6\x0b\x95\xc4\xef\xb2\xa6\xc1\xac\xf7\x69\x1f\x8f\x19\x9c\x5f\xa3\x9d\x2e\xab\x6a\x68\xbd\x97\xa2\xfd\x29\x43\xfd\x0e\x25\x26\xd5\xf5\x00\x3b\x0c\x91\xc0\x60\xb8\x5d\x04\xcd\x27\xcc\xc8\xfd\x19\xb5\x48\xd2\x42\x4e\x73\xdb\xed\x76\xea\x26\xda\xb1\x59\x0e\x3c\xb7\xb0\x14\x77\xf3\x2f\xb6\xc3\xee\x58\x18\x08\x13\x7d\xf0\x4a\x9e\x2b\xe2\x7b\xef\x15\x80\x06\x2f\xee\x09\x77\xee\x56\x9a\x7e\xd4\x0d\x49\x3a\xb2\x56\x41\x8f\xe3\x41\x21\x81\x8c\xe1\x35\xeb\x7a\xf2\xf9\x59\xd9\x3f\x24\x70\xc4\x3c\x6d\x9b\x5c\xca\x17\x41\xf6\x04\xb7\x4c\x7a\xfa\x5e\xd7\x28\x75\x83\xe0\x00\x89\xa5\x8f\xcf\x8b\xed\xef\xa7\x62\x1c\x86\x60\x3e\x9b\xa4\xe0\xc8\x8b\xf8\x99\xdd\x34\xc3\xa5\x08\x91\xd8\xf7\x13\xfa\x10\xc7\xfe\xc9\xec\x22\x3f\xe9\x9d\x7a\x5f\x9e\x01\xc5\x26\x2b\x8e\x02\x3c\x28\xf5\x71\x96\xf5\x92\xe9\x3c\xac\x74\x82\xf2\xc6\x5f\x83\xa1\xf5\xc9\x1a\xe0\x3a\x90\xd0\xc6\xd9\x17\x96\xe2\xa1\x79\xb3\x44\x0d\xc1\x79\x97\x1d\x8a\x45\xb1\x2a\xa0\x6f\xa2\xbb\x79\x38\xcd\x41\x51\xa5\x02\x21\xc9\x75\x6e\xce\xe1\x49\xec\xd7\x14\xf3\xf7\x97\xad\x08\xbe\xdb\xfe\x91\xec\x20\xeb\x80\xae\x28\x28\xae\x1c\x35\x9d\x5f\x8d\x75\xf3\x92\x97\x95\x93\xd9\x54\xf2\x1b\x64\x3b\x53\xf6\x53\x43\x10\xba\xe2\x78\x35\xae\x50\x64\x08\x39\x4e\x1b\xe8\xb4\x12\xdf\x2a\xcb\x1b\x0b\xfb\x59\xe4\xb6\xbb\xaa\x5f\x50\xcc\xa8\x69\x0c\xae\xac\xc0\x58\x34\x15\xc9\x99\xbb\xd3\xcd\xa7\x85\x52\x57\x64\xcd\x8f\x26\x4b\xaa\xff\x45\x96\xe3\x09\x73\xc9\xbc\x44\xd4\x75\xda\x01\xa5\xbd\x9e\xd5\xc3\x55\xcc\x3a\x62\x90\xaa\xbd\x5c\x7e\xc3\xea\x71\x4d\x5e\xae\xaa\xdb\x63\x25\xa0\xf0\xbe\xea\x2f\x59\x6e\x6f\xd2\xa0\x6e\x78\x4b\x15\x06\x4d\xb7\xe7\xb6\xbe\xca\x5f\x2c\xe8\x13\x0a\x86\x7c\x17\x15\x10\x61\xa8\x3e\xee\xe2\xc5\x9d\xb1\x4c\x92\x28\x6c\x8d\xde\x5d\x36\x19\xee\x5c\xa1\xd3\x7a\xcf\xfb\xd2\xa2\x41\x41\xbf\xec\x85\x51\x27\xdc\x12\x22\xea\x18\xbb\x0f\x5e\x7b\x55\xd1\x45\x6e\x51\x9a\xbd\xa7\x50\xf1\xf5\x88\x23\x39\x7b\x08\x2a\x59\xa1\x37\xa6\xee\xfc\xc5\x79\x94\xcd\xcb\x0d\x8c\x99\x5a\xe2\x3b\x55\x65\x93\xee\x3f\x6e\x53\x77\x6a\xbc\x3b\x4f\xb2\xf5\x5c\xd9\x15\x56\xee\xaf\xf7\xa1\x7a\xd4\x5b\xff\xfb\x7a\x69\x90\x78\xd9\x93\x1f\x25\x0a\x1b\xf1\x0c\x51\xfa\x0a\x12\xd0\x55\x4f\xa7\xcc\x50\x02\xb1\x79\xe0\x54\xca\x82\x5d\x75\x1c\xb4\x42\x35\xbe\x2a\xeb\x5c\xf0\xe3\x5b\x09\xb4\x1c\x8a\x40\x95\x09\x4d\x28\x62\x45\xc9\xbc\x73\x09\x49\xed\x96\x42\xbc\xfa\xba\x09\xe5\xf5\xd3\xbc\xed\x26\x53\x52\x0f\x51\xcf\xe0\x11\x47\xb2\x2b\x25\x01\x3d\xb2\xfb\x82\xd0\x37\xad\xe1\x1e\xe4\xd4\x3e\x11\x08\xb3\x38\x58\xfc\xe4\x16\xa8\x2a\x9d\xae\xc1\xf6\x3a\xce\x34\xba\x5e\xc8\x75\xb0\x9d\xb9\x8e\xf9\xab\xba\xc8\x3a\xdb\x91\x50\x32\xe6\x19\x1c\xaa\x71\x5b\x80\x41\x80\x7f\x5f\x9a\xd3\x8f\x18\x5b\x2e\xc1\xc8\x13\xda\xf3\x3a\x9e\x52\x5c\xdc\xb3\xc6\x2a\xd7\x46\x08\xe4\xb5\x8a\xe8\x8c\x1c\xf1\x22\x6b\xa6\xf9\xf4\x6a\xb4\xbd\x18\x71\xfd\xec\x15\xb9\x55\xb3\xd0\xeb\x82\xc7\x81\x68\x07\x9b\x8c\xbc\x36\x51\x13\xa1\x14\xc3\x65\x38\x28\xb9\x22\x0c\x9e\xbb\x6b\x3c\x45\xdb\x7b\xd0\x07\xe2\xca\x87\x26\xfb\x57\x16\xf8\x1a\x81\x7e\x5c\x4e\x8d\xba\x67\xc3\x65\xf1\x31\xad\x49\x71\x2a\x21\x4b\xa9\x3f\xdf\x2a\xaa\x7e\x3c\x3e\x09\x00\xe8\x7a\x8c\x51\x0d\x2d\x50\xcb\x0f\x99\x46\xe4\x0e\xd4\xae\x82\xc8\xd7\xd4\x22\xfb\xf6\x62\xb8\x24\xe3\xac\xa3\x20\xb5\x32\x86\x52\x62\xd5\xab\x61\x91\xf8\x06\x57\x9b\x8b\x5f\x40\xea\xdb\x88\x5a\x64\x3e\xbc\xbc\x0f\x5e\x2a\x60\x7c\x19\x70\xde\x77\x25\xe5\x30\xaf\x44\x52\xc4\x95\xbe\x44\xc5\x44\x9d\xe6\xf3\xed\xed\x96\x35\x41\x89\x54\x3f\x4d\x74\xa1\x72\x20\x22\x1f\xe6\x83\x51\x05\xd5\x15\x38\x69\x2b\x75\x16\xa9\x97\x82\xdf\xd7\x58\x7f\x5c\x6e\x7a\xed\xce\xf2\x2b\x66\x5c\xe9\x27\x6d\xdc\x79\x69\xf0\x47\xba\x8b\x34\xed\x29\xcc\x1f\x41\x30\x81\x5c\xbe\x9d\x7e\x4a\x08\x34\xed\x05\x64\x83\x15\xd8\xe8\xbb\xce\xb9\x4b\x2c\xae\x3e\xc3\x3b\xc8\xcd\xe2\x7e\x58\x5d\x95\x9c\xc3\x99\xc3\x0f\x28\x13\xb5\x0a\x87\x50\x99\x01\xdd\x4c\xfe\x41\xbe\xe3\xc3\x7a\x6a\x28\x7a\x9d\xcd\x30\xe0\x4c\xa4\xa1\x9c\x32\x65\x12\xdb\xe6\x4c\x54\x93\x4c\xfd\xc2\x01\xf9\x29\xc7\xa8\x13\xe2\xe4\xe1\xa6\x94\x3a\x07\xe1\xfd\x57\x9d\xc6\xef\x35\x3d\xe0\x3f\xbc\xca\x57\xf7\x41\x8c\xbf\x5b\x37\xf7\x80\xf2\x35\xe1\xcd\x70\x16\xaa\x77\x33\x19\x1a\x51\xf2\x65\xe9\xf4\x3d\x30\x8b\xe9\x9a\x8c\x2a\x95\xa2\xb2\x9a\x6e\x99\x08\xe9\xd9\x3c\xe0\xc1\xc1\x52\x28\xa0\x91\x94\x56\x64\x75\x1a\x96\x42\x49\x93\x37\x6b\x9c\xe7\x10\x65\x94\x1f\xbb\x3d\x95\x7d\x39\x58\x5f\x45\x77\xc2\x28\x7e\x13\x2a\xac\x89\x81\x3c\x57\xa0\xdd\xc8\xfd\xfa\x9e\xf3\x05\x94\x53\x43\xb2\xf1\xce\x8d\x11\x48\x12\xc9\x01\x2f\x32\x7e\x79\x42\x7b\x49\xc5\xd4\x18\x0c\x1d\xe8\x1a\x28\x4e\x99\x99\x03\x80\xb6\x6e\x33\x55\xca\x5f\x41\x9c\xa3\xbd\x15\x87\x3b\x47\xb3\xda\xba\x6b\x00\x77\x04\x3a\xcd\x02\x32\x20\xd9\xa2\x1e\x61\x24\x77\xa4\x41\x8f\x3b\xdc\x0b\x59\x84\xd3\xd0\xa0\x9c\xf1\xf2\xba\xda\x80\x73\xfe\xd0\x3d\x73\x29\x26\x1f\xb7\x5c\x60\xde\x8e\x60\x4a\x3e\xfd\x0f\xda\x26\x96\x34\xe6\xf0\xf2\x69\x79\x7d\x75\x19\x7d\x08\xe1\xdc\x7a\x91\xff\x9c\x6a\xba\x97\x51\x36\x47\x50\x0b\xf1\xd2\xe8\xdc\xb3\x7d\xb3\xa0\x57\xd1\xf4\xeb\x92\xb8\x43\x1e\x23\x92\xb4\xf6\x61\xcc\xd1\x78\xef\x9e\xf9\x8f\xff\x97\x45\x48\x01\x41\xf0\xd2\x92\x0b\x55\x76\x33\x67\x65\x40\xb1\x52\xb2\xb0\xba\x0d\x1d\x23\x5d\x60\x8b\x84\xc5\xdd\xc2\xa1\xd4\x02\x89\x3c\xd6\xfd\xad\x35\xd0\x47\x22\x89\xf0\x8e\x42\xab\x3d\x4a\x62\x07\xa2\x99\x6b\xd4\x43\xd7\x2d\xc2\xe5\x5f\x45\x24\x04\x16\xc1\x03\x91\xf6\xfc\x31\x4e\x7b\xab\xcd\x6e\xf5\x6c\x85\xda\x14\xdc\x43\x25\x1a\xb7\xd2\xea\xd4\xdb\x9a\xfb\x0f\x8b\x7b\xf3\x9b\xfd\xc5\x36\xdf\x91\xe5\x6f\x9a\x67\x5b\x7b\x9c\x11\x91\xd8\x88\x02\x60\xe8\x48\x4e\x59\x0c\x5a\x67\x66\x6f\x2c\xe6\x92\x51\x97\x75\x4b\x95\xc7\x72\x79\x49\x11\x02\x33\x69\xf5\x5b\xfe\x52\xd3\x32\xec\x68\x83\x8f\x98\x59\xe2\xdb\x90\xf5\xe6\x21\x42\x6e\x65\x8b\x77\x75\xcb\x83\x4d\xfe\x9d\xac\x99\x99\x1e\x01\x23\x51\x1b\x61\xf0\x10\x25\x62\x88\x51\x8a\xc3\xb7\x85\x90\x8a\x85\x59\xd5\xdb\x2d\x11\x25\xb4\xd9\x23\x24\x41\xcb\xb1\xbc\x9a\xe0\xa4\x3d\x1d\x8a\xe1\xfc\x4c\x9e\x65\x0f\xde\xa9\x8a\xc6\x48\xf4\x30\x08\xb3\xc5\xad\xef\xc4\x45\x14\x58\x6d\x4b\x6f\xbb\x69\x64\xea\xbd\x73\x15\x01\xf0\xaa\x2c\xac\x5e\x95\x95\xd3\xf6\x00\xa5\xf3\x13\x5c\xd3\xe4\xc8\xe3\xd0\xac\x54\x41\x04\xf5\x59\x6b\xe5\x1e\x3f\xdf\x0a\x6d\x23\x10\xda\x60\x18\x73\x52\x56\x50\xad\x2d\x50\x51\x96\x86\x25\x9b\x63\xc2\x3c\x08\x33\x65\x2f\x9f\xa9\xd0\x0c\xfd\x68\x56\x66\xa5\x69\x8d\x80\x2b\x6c\xa2\x16\x46\x50\xf6\xe1\x7f\x08\x97\xa9\x1f\x4a\xe3\x21\x72\x36\x03\x22\x60\xd0\xf7\xca\xf3\x29\x1a\x5c\x79\x4a\x87\x6d\x75\x70\xc1\x8f\x97\x4e\x3f\x3f\xaa\xa0\xb2\x3a\x69\x4f\xd8\xb7\xee\xea\x68\xe5\xe1\xfe\x09\x3e\x60\x29\x54\xcb\xdc\x9a\xaf\x54\xee\x58\x05\xb5\xbb\x7a\xe7\x08\xa7\xaf\x79\xe9\xea\x8a\xeb\x08\xf2\xeb\x5b\x6b\x95\x08\xbb\x47\xbe\x14\xa3\x0d\x0a\x9a\x2a\xbf\x7e\xfd\x6c\x90\x58\x3b\xf9\xed\xac\x3a\x6f\x34\x07\x00\x3d\x84\xa3\x54\x58\x01\xad\x10\xd4\xad\x1e\xa4\x8b\x75\xd8\xd4\xb4\x35\x85\xd2\x5a\x61\x4c\xab\x92\xa8\xea\xbd\x09\x4e\x75\x51\x1a\xfd\x51\x61\x94\xe2\xa0\x10\x6f\x65\x89\xd7\x27\xf6\xb4\xec\x4a\x22\x14\xad\x5f\x47\xab\x72\xaa\x20\x3b\x9f\x97\x34\x37\x67\xa9\xbe\xac\x09\x25\x60\x0f\x6c\xe6\xa2\x62\x68\x9f\x3e\xdc\xbe\xab\x03\xff\x73\x47\x9c\x03\x5c\xb1\xf8\x4a\x42\x1c\xdb\xeb\x8e\x6f\x55\x47\xdf\xa2\xfe\xd5\x06\xc1\x45\xe8\x35\xc5\x3a\x49\x4b\x63\xf1\x8a\x83\x17\x31\x6b\x37\x13\xeb\xb9\x20\xeb\x14\x58\x47\x7c\x98\x88\xc1\x43\x3d\x55\x45\xd4\x5b\x5e\x59\xe7\x2e\xdd\xc8\xa5\x9c\x92\xf7\x2c\x92\xbe\x5b\x98\x0a\x5a\x58\x2d\x5b\x8a\x88\xab\xd0\x03\x65\xcd\xf5\x51\x32\xf7\x6c\x86\x38\x8b\x50\x03\x8b\x9c\x01\x97\xb5\x1c\x98\x0d\xa8\x54\xb8\x90\xfd\xf3\x13\x6c\x46\x0f\xf8\x92\xd9\x52\x7b\xb0\x13\x58\xe4\x70\xb1\x34\xf8\x23\x68\xc9\x11\x8c\x57\x8c\x17\xe1\xbd\x55\xb8\x76\x5d\x8f\xa4\xf2\x2e\xa0\x39\x7e\xf4\xf9\xb7\x95\x88\xc9\x95\x59\x20\xb0\x73\x53\xc1\x7a\x71\x6c\x7e\x14\x69\x81\xc4\xd3\x7f\xca\xee\xe1\xc1\x7e\xc2\x3e\x04\xc1\x02\xa6\x5d\x2a\x0c\x5b\xe3\xc0\xe5\x2a\x8a\xe5\xa5\x65\x92\xf0\xff\x22\x08\xf1\x92\xa2\xe8\x4b\x54\xf5\x6f\x25\x81\xee\xb7\xee\x10\xff\x67\x28\x83\x3c\xa8\x89\x2a\x60\x8e\xee\xb7\x78\x17\xf4\x3b\xbb\xea\x24\xcb\xc2\x02\xdb\x58\x60\xcf\x8d\xf3\x33\x71\xb6\x7c\xd9\x7e\x93\x77\x68\xbf\xc9\xdf\x73\x1b\x34\xf9\xa5\x85\x86\x32\x34\xa4\x70\x15\xa0\x1e\x2a\x20\xfa\xf4\x26\x53\x67\xaa\x42\xfb\x21\x33\xe5\x55\x4e\xd8\x7a\xed\x1e\x9c\xcf\xc4\x35\xe7\x75\xb7\x5a\xee\x41\x71\x3e\x0c\x21\x16\xb1\xfb\xca\x7a\xdd\xda\x65\xca\x7f\x2a\xc3\xf9\xa1\x4d\xe5\x1c\x3b\x05\x55\x71\x76\x02\xa3\x93\x10\xb3\xac\x18\x41\x90\x54\x05\xf7\x4b\x3d\xb4\x66\x21\x48\x91\xe7\xf0\xad\x08\xc7\x76\x9d\xf6\x5c\x1b\xdb\x72\x8d\x55\x73\x3b\xb9\x3c\xbb\xef\xc4\xa5\x58\xee\x84\x13\xb8\x66\x71\x44\x58\x1d\x91\x42\x99\x9e\xa7\x38\x9a\xa9\xd9\x77\xbd\xbf\x0e\x5d\xf5\xcf\xaf\x73\x56\xbd\x78\x8a\x54\x05\x71\x87\xaf\xfd\x6b\xe2\xb5\x2f\xda\x72\xd3\x2c\x13\xaa\xa1\xfb\xa5\x80\xa5\xd9\xa6\x35\x3d\xba\xa3\x67\x04\x5e\xfc\x3c\x83\xd5\x3b\x9e\xe2\x80\x8c\x85\x88\x8b\xb1\xe8\x22\xa0\x14\xab\x1a\x6d\x70\x3e\x47\x37\x66\x3e\xc8\x37\x5c\x81\x71\x62\xe9\x95\xff\x70\x0a\xd2\x9f\xab\x72\xaa\x8c\xfa\x07\xa2\x35\x52\x6f\x5a\xf1\x90\xb5\x6d\x48\x92\xa6\x7f\xf2\x19\xb7\x29\x6b\xf3\xdb\x1e\xad\xdf\x0d\x57\x69\x8b\xfb\xa9\x29\x7b\x8e\x33\x91\x85\x08\x7f\x68\xe1\xa5\x62\xee\xa0\x67\xf6\xf1\x96\xa9\x55\xb5\xc1\x35\xb9\x87\x95\x22\x5d\x8f\x1a\xfd\xd4\xe3\x9c\x3f\x11\xc1\x92\x4f\x51\x0e\xeb\x2d\xeb\x16\x16\x07\x5e\x9c\xfa\xc4\x14\xd5\xa3\xd4\x75\x54\xb5\xf5\xdc\x78\xcc\xa2\x59\xfb\x43\xcc\xef\x47\xb8\x74\x82\xe9\xa8\x61\xc0\x03\xfd\x04\xff\x9d\x7b\x7c\xfb\xd7\x33\x2d\xc0\xfc\xb8\xe0\x21\x78\xe7\xcc\x88\x75\x57\x3a\xcd\xf9\xb1\x81\x05\x92\xc7\x57\x9e\xa4\xf1\xf9\xf9\x88\x15\x69\xfd\x40\x63\x28\x3f\xa2\xce\xfd\x40\x59\xe6\x97\x41\xfc\x68\x0c\x40\xb8\xd1\xa9\x8e\xfe\x61\xf3\x85\x68\xcf\x1e\x89\x43\xb6\x66\x7a\x67\x43\xeb\xd6\x68\xe5\x3f\x8c\xae\xa8\x57\x66\x29\xfe\xe5\xf3\x1f\xb2\xd9\xbe\xd9\xee\x5d\x7f\xde\xc2\xe0\x6b\x6d\x75\x5f\x8c\x38\x83\xcc\xb8\x3e\x50\x16\x41\xb4\x4c\x44\x20\xa3\x60\xef\xcf\x43\x53\x5d\xcf\xae\x75\xbe\xd2\x4b\x31\x8f\xcc\xd7\x21\x71\x60\xbd\x1c\xc9\x9d\x6e\x23\xe6\xcb\x60\x4e\x92\xa0\x21\xf5\x5e\x7b\xdf\x82\x03\xf9\x62\xfd\x97\x93\x25\x5b\x9e\x59\x0e\xdd\x03\xe0\xd2\xa2\xcd\x0e\x55\xb3\x07\xaf\x1e\x13\x85\x3f\x54\x61\xfd\x87\xba\xe8\xdd\x1e\xea\x43\x95\xcd\xaa\xad\x75\x28\x7b\x7c\x58\xfa\x2c\x9f\xa4\x1a\x72\x00\xed\x86\xf9\xd6\xf4\x0b\x1c\x67\x19\x95\x6d\xf5\x23\x1d\xd1\x55\xe2\x27\xc3\x0c\xe7\x31\x0e\xc6\x87\xf9\x5b\x5b\x9d\x07\x15\x1a\x87\x29\x34\xba\x6c\x3c\x48\x80\xa3\xe7\x72\xf3\x9f\x92\xb2\x07\xd4\x7a\xb5\x65\x8a\xff\x9b\x4f\x91\xc1\xa1\x03\xb1\xa9\xb2\x7b\x8a\x59\xb9\x5f\xab\x58\x97\xaa\x19\x33\xef\xc2\xb0\xd3\x57\x14\xc1\x02\xfb\x21\xea\xd6\x2b\x2b\x80\x10\xaf\x7c\x0d\x3a\xe4\x9a\xda\xba\x09\xf0\x42\x8f\x7f\x0b\xfb\xe6\xe1\x0f\x5a\x73\xc8\xc3\x2c\xfe\x26\x4a\x81\xe4\x93\xa2\x0e\x2b\x1e\x69\x66\xaa\xd8\x96\x67\x7d\xf3\xc4\x3b\xb4\x51\x1e\xc9\x35\xad\xa2\x52\x5c\x55\x67\xd6\xa8\x69\xc0\xa8\xd9\xb7\x4a\x61\xc8\x96\x24\x4e\xdb\x98\x83\x9f\xe4\x98\xe3\xfe\xdd\x31\xdb\xd8\xd7\x5b\x29\x1b\xd4\x99\xff\x48\xa4\xa2\xea\xc1\x0a\x71\xc3\x61\xf4\x78\x74\x39\x2c\xeb\x4f\x49\x15\xf4\xb0\x67\x66\x71\x39\x35\x8f\x06\x20\xd8\x55\xc7\xeb\xcb\x4b\x3b\xf3\x99\x98\x55\x1f\xae\xc8\xde\xeb\x0b\xf5\x55\x7d\xf9\x8a\xa1\xaa\x1e\x04\x7a\x68\x94\xb8\x30\x59\x71\xdc\x85\x8e\xb0\x13\x9b\x86\xba\x73\x21\x95\x34\xc4\x84\xfb\x36\x2c\xee\x5d\x32\x43\x97\xbd\x97\x13\x30\x96\xbc\x2c\x3d\x74\x15\x58\xbf\xb5\x6e\x42\xac\xd0\x9e\xf2\xca\xc0\x75\xb0\x55\x60\x84\x60\x7a\xdb\x0c\x9f\x78\xa8\x7e\x3a\x3f\x5f\x30\x19\x99\xa3\x73\x50\x47\x6d\xc8\x4e\x3e\xe9\x7c\xe8\xb2\xdb\xd3\x2b\xbd\x9f\x3e\x84\x0d\x20\xe1\x2a\xb7\x72\x97\xde\x94\x97\x6c\xef\x9a\x65\x8a\xc6\x22\xae\xea\x94\xba\x08\x91\xc3\x88\x5a\xf7\x2d\xc1\x54\x19\xc4\x55\xb2\x46\x21\x17\x96\x7f\x82\x7f\xaa\x80\x6f\x28\x44\x5d\x45\x5e\x85\x34\xd8\x8f\x0a\xdd\xb9\x15\xba\x07\x28\x44\x29\x16\x1a\x00\xf6\x5f\xfb\xce\x64\x1e\x3f\x68\x3f\x1f\x6c\xb3\x7a\x7d\x94\xcf\xa7\xe8\x78\x47\x73\xb8\xdb\xf7\x24\xd5\xd8\xed\x00\x31\x47\x52\xb2\xdb\x11\x07\xc3\xb4\x59\x41\x66\xda\xa8\x4f\x61\x57\x6c\x11\x3b\x6c\x0b\x36\x29\xce\xae\xc1\x7f\x5d\xd7\xb6\xd1\xac\x1f\xd6\xf7\x73\x89\xeb\x61\x71\xba\xa5\xf3\xe9\x89\xe7\x26\xee\x69\xbd\x5c\x54\xca\xd3\xa6\x82\xba\x04\x5b\x2b\xe7\x23\x5d\xdd\xba\x14\x3a\xd8\x5d\xd4\xda\xf3\x5f\x1b\xbc\x24\xa2\x32\x68\xb0\x5c\x16\x79\xdf\x95\x14\x12\x5f\x4e\x80\x55\xb1\x3b\x32\x97\x16\x0d\xaa\x74\xab\x0e\x6c\x13\xd1\xae\x90\x1d\x9b\xae\x28\x5a\xbf\x69\x33\x57\xfe\xb9\x4f\x1e\x13\x2b\xb6\x60\xc9\xb9\x9c\x54\x64\x47\x8b\xc0\xcd\x60\xf6\xf0\x94\xb4\x0b\x2a\xdd\xe7\x2f\x07\x88\x5d\x69\xd2\x65\xdd\x4a\xae\xa8\x4d\x25\x62\xc8\x85\x38\xc7\x84\xa5\xc3\x3c\xd6\x97\x8d\x91\xe5\xec\x59\xed\xcc\x9f\xf1\x0b\xa0\x22\x7f\xb7\x0d\x71\x30\xf7\x66\x79\x3c\xb4\x22\xba\xd9\x1a\x4f\x2a\xc8\x42\x82\x4f\x22\xaf\x6e\xe1\xf6\xc3\x1a\xb9\x96\x8d\xb3\xd6\xfd\x49\xf9\x75\xe8\xf4\xbf\x2c\x58\xd4\x79\xc9\xbc\x5d\xbf\x14\x5a\x59\xed\xd9\xe8\x48\xe7\x98\xb3\x63\x5a\x3c\x4c\x75\x38\x25\xae\x9b\xde\xbb\x2e\x78\xaa\x9c\x9c\x59\xa0\xb2\x7f\x8a\x25\x1e\x7b\x1d\x1a\x3a\x1d\x19\xe6\x59\xfc\x20\x4e\x6c\x30\x93\x3a\x57\x4c\x3a\x09\x11\xe9\xd2\x1c\x42\xb3\x76\x05\x64\xa3\xe7\x82\x85\xf6\x1f\x6e\x95\xce\x47\x0b\xbd\xe1\x4e\x8e\x5b\x65\x8d\x1a\x0a\x68\xd0\x78\xeb\x3a\xa7\xae\x9e\xbf\xaa\x6e\x9f\xb9\x23\xcf\xdb\x69\xdf\xba\x32\xae\x11\x7b\x2b\x58\x05\xa4\xc3\x24\xfa\xcd\xa5\x8d\x37\x79\xee\xe8\xe5\x64\xbf\x07\x2f\xdd\x9e\x45\x72\x55\x82\xf1\x2e\xf1\xd0\x16\xb0\x0c\xbe\xd6\xa6\x83\x08\x9f\xed\x18\x5f\xdb\x2f\x19\x45\xd9\x73\x34\x80\xbf\x09\xc9\x85\xd0\x3d\x5a\xe6\x37\xf5\x3d\xd3\xb4\x06\xcc\x35\x0d\x65\x3d\x26\x9d\xc6\x30\x28\x35\x91\x7a\x0e\x6b\x52\xbb\x73\x09\xea\xbb\x64\xc7\x02\x72\xfd\xe3\x35\x75\x20\x88\x11\x65\xb6\x1a\x1f\x8c\x9b\x7a\x4f\xe0\x7e\xd2\x6b\xbe\xc8\xa5\xf9\x40\x3c\x9a\xeb\x22\xd5\xf6\xf6\x27\x43\xbd\x14\x34\x65\xd9\xd1\xbd\xf5\xfc\xbf\x4e\xde\x0c\x3b\x84\x40\xf4\x2f\xd4\x43\xc3\x5d\xe0\x35\x13\xc5\x0d\x15\xd8\xfd\x87\xba\xb1\xa5\x36\xe1\xd8\x39\x0a\xe8\x68\x5e\x6e\x45\xa9\x85\x2c\xc2\xdb\x93\x72\x44\x7d\xdc\x21\x01\x94\x7e\x05\x4b\x30\x0c\xff\x5c\x37\x5e\xe5\x76\x96\xca\xc3\xbe\x2f\x2d\xa1\x75\x0b\xe8\x6c\x51\xa4\xc9\x06\x39\x1c\x62\xcd\x6e\x71\xb0\x10\xef\x44\xa1\x83\x65\xfd\x94\x5c\x94\xb5\x9c\x07\x6b\x26\xcf\xb2\x2e\xfa\x77\xcf\x59\xb8\x30\x7b\x16\xe6\x1b\xc3\x5a\xae\xb5\x91\x71\x8c\xc8\x9f\xa0\xb4\x85\xab\x1f\x3b\xb3\xe7\x3c\x48\x4d\x96\xbf\xf5\x89\x32\x23\x2b\xee\xe7\x40\xe6\x09\x5c\xa5\x78\x2c\x8c\x73\x1c\xed\x8e\x64\x94\xca\xed\xa7\x2a\xf8\xe4\xd5\x7e\xed\x01\xf7\xc1\x13\x41\x4f\x60\xad\x83\x79\x2b\xf8\x7b\xfd\xba\x23\x03\xba\x68\xcd\xd2\xe8\xbe\x7d\xf5\x68\xaf\xcc\xfd\xe2\x24\xe6\xae\x5b\x93\xd7\x42\x92\x97\x53\x72\x18\xfc\x69\x78\x0f\x8b\x3f\xbf\xd9\x08\xac\x0d\x8b\x66\xc4\x3e\xfe\x8a\xb2\x0b\x87\x84\x42\x5b\x3e\x1b\xa8\x70\x14\x78\x63\x64\x23\xd7\x3e\x58\x34\xa9\x37\x03\x6d\xcb\xaf\xc6\x6d\x2d\x39\x29\xb5\xc5\x7b\xf9\xbb\x84\xf1\x06\xac\x00\x7d\xa4\xe5\x15\xf3\x2b\xc4\x66\xd9\x4d\xdd\x50\xa5\xf4\x03\xe5\x7f\x2c\x64\xdf\x00\xcf\xfa\xf3\x79\x03\x04\xaa\x8a\x39\x6a\x21\x0c\x56\x49\xda\xa0\x8a\xae\x75\x1b\x92\x90\x2c\xd4\x07\x90\xf7\xb6\x17\xc3\xbf\x54\x13\x76\x27\xfd\xbf\xb4\x6a\x43\x98\x26\xfa\x70\xc6\xe5\x6a\x1b\x29\xf6\x95\x73\x68\xd6\x5d\xaf\xe0\x75\xd6\x28\x24\x7a\x71\xdc\xeb\x8c\x95\x3e\x87\x60\x04\x8f\x62\xab\xda\xa5\x3f\xe4\x2f\x58\x31\xb1\xb7\x66\x96\x6a\xb4\xf5\xeb\x46\x38\x35\x0a\xa9\x5d\xe4\x4e\x24\x5f\x2f\x8d\x47\x49\xdd\xfa\xa3\x2e\x6c\x03\x1b\x39\x05\x72\x0e\x1f\x2e\xd7\x28\x88\xdb\x46\x51\x4e\x45\xac\xde\x09\xe4\x61\x95\xf5\xed\x32\x86\x87\x1e\xae\xa8\xb5\x13\x21\x59\x7b\x47\x6d\xc0\xa6\x68\x9f\x59\x15\xe9\x42\x67\xb7\x2b\xdc\x05\xbb\x72\x7b\x1b\x73\x6d\x44\x95\x5e\x84\x95\x63\x36\x27\x00\xd7\xf5\x1b\xc6\xe9\xb0\x8d\x98\x19\x60\x48\xc4\x3a\x02\x62\x5f\x60\x94\x32\xff\x09\x13\x43\x92\xb5\x51\x4a\x0d\x9e\xb1\xcd\xb1\x5b\xfe\xf9\xb2\x47\xa4\x70\xa9\xfa\x77\x09\x6a\x31\xd2\x4a\xbe\xcc\x32\x89\xb0\x48\x96\x29\x57\xfe\x27\x08\x40\x0e\xcb\x45\xa0\xa1\x7d\xb9\xba\x68\xe6\xe5\x69\x5d\x16\xc2\xf3\xd8\x88\x7d\xd0\x63\xe5\xef\xcb\xc9\x86\x39\x2a\x80\x97\x51\x04\x70\x1b\x42\xbf\x9b\x59\xa0\xb5\x2e\x1d\xa3\xda\x48\xc4\x6b\x4b\x46\xfb\x27\x49\xde\x9e\x02\x34\x80\xaa\xaf\x25\x0f\x37\xed\x55\x15\xef\x3d\xf0\xb6\x4e\x31\x1e\xee\x45\x3b\xb4\x21\x85\x3b\x16\x46\xe1\x1a\xa3\xdf\xa5\xf9\x17\xe6\xb6\x67\xb2\xc9\xab\x88\x08\x97\x3b\x2a\x7f\x14\xe1\x61\xd0\xea\xf4\xf0\x43\xa7\x77\xe6\x33\x65\x7d\xe0\x20\x0b\x43\xd5\xcb\x4f\x49\x01\xbb\x76\xee\x57\x3c\x77\x91\x71\xe1\xd9\x87\x50\xee\x58\x39\xdc\xf7\x8a\xba\xbc\x11\x63\xea\x70\x25\x3d\x80\xe7\x9b\x4f\xcc\x59\x9f\xf2\x23\x8c\x5e\xff\xac\x4c\xe5\x9d\x35\x3c\x35\x4e\x3a\xfa\x63\xbc\xc6\x2d\xc9\x50\xb5\x65\x50\x54\xcf\xd0\x61\xb3\x80\x2c\xe3\xd0\xc7\x80\x17\x62\x57\x64\x8b\xf8\x96\xa9\xaf\x45\x81\x41\x02\x2b\x17\x93\x0a\x39\x21\x92\x46\x5c\xae\x8c\x59\x98\x17\xcb\x8b\x37\xd1\x60\x37\x47\xb1\xbf\x74\x22\x9e\x53\x73\xd6\xbf\x9e\x65\x72\x7e\x36\xfd\xdb\xb9\x39\x1a\xce\xbd\x88\x11\x67\x54\x80\xcf\xfc\x71\x08\x0b\x50\x89\x82\x02\xe4\xf9\x6e\xc6\x86\xfb\xc0\x55\x2e\x09\x06\x1b\xc3\xb9\x57\x10\x1b\x87\xa8\x6f\x4e\xaa\x9b\x5f\x2a\xc7\xdf\x93\x17\xd0\xc1\x6b\xd6\x4f\x4d\x9b\xd0\x03\x09\x63\x0c\x0f\x46\x55\xe2\xf1\x18\x40\xc6\xe3\xd6\x94\x41\xb1\x22\x76\xa5\xae\xdf\xe9\xf7\x03\x95\x78\x49\x10\x1f\x00\x8f\xa9\xa8\x5c\xcf\x81\xaa\xbc\x75\xdb\x58\x31\x3a\x6a\x54\x8c\xab\x28\xbd\x77\x08\xd1\x9e\x9f\x77\x47\xb6\xa0\xd3\xa4\xfc\xb0\x80\x82\xbe\xa4\xbf\x6b\x27\xe5\x69\x99\xa3\x54\x32\x3a\x8f\x36\x67\x3e\x7c\x76\xeb\xbb\x16\xaf\x3c\xb0\x94\xcf\x77\xc8\x35\x7a\xfd\xcc\x66\xcd\xaf\x0e\xf8\x16\x73\x14\x72\xcc\x1a\x3f\x1d\x73\xa4\x64\x8f\x39\x0c\xef\x2d\x62\x71\x9e\x08\xbb\x63\x1e\xa9\xa7\x64\xbd\x92\x18\x94\xa8\xdb\x13\x40\x2e\xd6\x73\x54\xec\x27\x78\x60\x23\x7e\xac\x55\xb6\x9d\xc7\xc3\x06\x19\x2a\xcb\xb9\xba\x6f\x27\x9a\x75\xd0\xa1\x51\xd3\x17\x42\xc0\x9e\x0f\x9d\x8c\x5b\xdb\x69\x5d\x69\x6c\x69\xe1\x37\x0e\x0e\x97\x6d\x41\x52\x22\xc8\x55\xc1\x99\xde\xf7\xf7\xab\x71\xaa\x77\xda\x32\xa2\xc5\x90\x1e\xca\xd1\x20\x02\x83\x34\x67\x9d\x53\x7d\x29\x10\x85\xe5\x48\x02\x40\x48\x78\xf4\xb0\x71\xfb\xc6\x8d\x8b\x80\xa7\x30\x65\xb4\x4e\xa2\x13\x78\x60\x67\xe5\xee\xb0\x31\xb0\xdf\xb6\xf2\x49\xde\x75\x94\xd7\x4e\xfc\xbd\x8b\xff\x12\xf5\x69\x37\x92\xcf\x75\x51\x26\xc1\x75\xaa\x1c\x21\xa0\x9e\xb4\x2c\xac\x3d\xc1\xff\xc9\x7f\x42\xc9\xd4\xee\xdf\x09\x26\xe0\xfd\x44\xfd\xdf\xbb\x04\x04\x1c\xad\xed\x25\x5a\xb5\xbd\x74\x9c\xd2\x58\x47\xf6\x2a\x6a\x6a\xdc\xa6\x91\x7c\x67\xbd\xaf\x7b\x56\xf2\x66\xcf\x71\xd4\xa4\x67\x0a\x22\x1d\xd2\x0a\x4e\x2c\x97\xd7\x4b\x15\xed\x2a\x9d\x0f\x57\xf8\x3c\x19\xec\xc1\x90\xb6\x7e\x44\x25\x7a\x87\xb5\x4f\x94\x53\x2d\x52\xef\xb0\xa0\x0b\x2a\x35\x73\x2f\x5e\xc2\xcf\xdf\x7a\x7c\x55\x8d\x3e\xbf\x8f\xf0\x32\x2c\x54\xc0\xd9\x8a\x44\xb7\xb7\xa2\x1b\xb4\xd1\xc5\x66\x43\xeb\x1b\xe9\x3e\xfb\xf0\x2d\xca\xce\x67\xab\xda\x41\x29\x58\x72\xc0\x3b\x0d\x0d\xec\xe5\xf4\xf7\x74\x03\x0a\x99\x95\xfc\x25\x26\x16\x20\x50\x8b\x77\xb6\xe1\x2e\x90\x38\x6d\x4f\xa5\x34\x8e\x6d\x60\x55\xb4\x4e\xc5\xef\xbc\xee\x6f\xd6\xee\x07\x11\xa0\xa5\xc8\xbe\xf5\x4f\x52\xc5\x31\x5b\x3d\xa9\xc3\xef\x71\x68\xfa\xd6\x3e\x45\xa1\xbd\x95\xca\x97\x39\xe8\x57\xce\xb5\xde\xc5\xa3\x76\x27\x48\x8f\xc6\xb3\x10\x3d\xd0\xbb\x5d\x43\x93\xff\x5d\x53\xc0\x0a\xfa\x71\x7e\x46\xa9\x7f\xb4\x91\x44\x28\x01\xf7\x35\x0a\x65\x1e\x2b\x38\xdf\x00\xfb\x31\x2e\x5f\x45\x5f\xeb\x31\x1b\x43\xdb\x7e\x56\xec\x96\x36\x6d\x60\x17\x4e\xf7\x04\xae\x89\x73\x96\xa3\xa6\xcd\x92\x55\x5a\xb7\x97\x97\xd3\xb0\x2d\x51\xfa\x45\x9e\x9c\xe9\x0a\x17\x49\x2f\xeb\x33\x6a\x0c\x70\x8c\xfa\x57\x56\x02\x11\x0b\xb5\xbc\x01\xa3\x24\x0d\x72\xc0\x13\x78\xad\xd2\x2c\xbc\xce\x06\xe7\x0a\x21\x0b\xb2\xd8\xdc\x83\x1a\x35\x28\xf5\xca\x41\x06\xda\x4d\xc4\xee\xa3\x80\x1f\xec\x70\x66\xda\xd7\x37\xe7\xf4\x28\xd5\x3b\x09\xcf\x63\xa7\x63\xaf\x75\x91\x69\x65\x15\x95\xd8\xda\xf2\x25\xc7\x00\x75\xad\x6e\xab\xa1\x61\xe5\x5a\x9d\x9e\x63\x0b\x15\xa4\xad\x77\x22\x95\x98\x72\xbf\xe4\x5a\xbf\x72\xd4\xfd\x43\xe0\x57\xa6\x8e\x6b\x71\xc7\x5b\x8f\x68\xe7\x0c\x14\x26\x39\xd6\x4f\x95\xb1\x67\xf9\xc2\xac\x52\xf1\x41\x02\x40\x45\x6c\xf8\x65\xbe\x7e\x1a\x3e\x40\xf0\x00\xd1\x95\x67\x9c\xa7\x28\xd4\xfe\x73\xf2\xb5\xcf\xe1\x22\x02\x99\x71\x12\xa8\xf1\xd2\x91\x04\x4d\x02\xdc\x1a\x45\xd5\x15\x9c\xa6\x9d\xc1\xb6\xd4\x89\xf4\xe9\x38\x56\x5e\xb8\x53\xe0\x84\x6d\x58\x7f\xfd\xf1\xd0\x64\x21\x1d\x52\x33\xd2\x81\xe1\xe7\x71\x8a\x82\xc6\xea\x88\x45\x95\x21\xfd\xc3\x7e\x85\x5e\xcc\xea\xd9\x47\x15\xf6\xcf\x22\x75\x65\xb4\xcd\x39\xa2\xfe\x2b\x20\x7c\xe4\xf7\x29\x92\x2f\xa4\xfa\xce\xa0\x4c\x8b\xdb\x2d\x54\x9a\x1d\x5b\x87\x79\x00\x73\x58\xe6\x20\x4a\x27\x51\xb2\x21\x2b\x48\xe8\x81\xeb\x47\x79\xb3\xb2\xbc\xa5\xff\x0b\x15\x61\x31\x8f\xc1\xf7\xf6\x5a\x73\xcd\x7c\x87\x9d\x24\xdd\x74\x80\x91\xc3\x0d\xc8\x0a\x3f\x10\x00\x20\xec\xc7\xe2\xb5\x44\xdf\x9f\x12\x70\xb6\xe8\x3a\x7e\x93\xe5\xd9\xb4\x9c\xe2\x11\x2c\x25\x30\x31\x16\xa5\xe5\x4b\xae\xbe\xe8\x9d\x59\x0e\x0e\x64\x96\xb2\x2f\x85\xd0\x80\x7c\x08\x60\xb9\x34\x89\x1e\x44\x3f\xce\xbf\x76\x1c\x07\x14\x8c\x1d\x88\xeb\x44\x6c\xdf\x1d\x19\x64\xd5\xfe\x77\x7e\x03\x39\x3e\x0f\x12\x7d\x42\xe7\x56\x43\x5c\xc8\xa1\xc0\x38\x38\x4a\xfa\xe7\x12\x35\x4d\x8b\xfb\x49\xf3\x92\x25\x06\x54\x78\x31\xf9\x8b\x71\x0d\xf4\xe2\x2f\x21\x16\x60\x81\xd0\x0d\xa8\xcd\xc8\xea\x34\x2c\x5f\x17\x48\xc8\x24\x05\x3e\xb2\x70\x88\x00\x4b\xfd\x67\xe6\xb8\x65\xc9\x8d\xf0\x1d\x2f\x06\x1a\x52\xcc\x5b\x5e\x3f\xc2\x0b\x08\x2e\xb0\x46\x4a\x76\xc9\x1a\x34\x81\x0b\x2e\x11\xe6\xca\xe1\xcd\x22\x09\x80\x25\xd8\x45\x8b\x94\x1d\x17\x66\xea\xac\x71\xf8\x94\x3f\x3a\xcd\xfd\x67\x0b\x19\x3e\x00\x07\xfc\xc6\xfd\x96\x33\xca\xa9\x36\x15\xf4\x44\xd7\xaf\x4d\x28\x73\x6c\x97\x01\x9f\x3e\x29\xdd\x7c\x4a\x4d\xee\xd7\xc6\xa6\xc3\x27\x70\x25\xb0\x06\x72\x77\x51\xf9\x7f\xd9\x3c\xd6\x7d\xa4\x9c\x5a\xbe\x39\x59\xcb\x09\xe7\x12\x39\x95\x46\xf0\xbb\x09\xfa\xbe\xd5\xd0\x4c\xef\xe0\xd8\x4f\x50\xd0\xf5\xe5\x1e\x92\xce\x91\xfa\x98\xaa\x2b\x74\x6b\x8f\x02\x00\x9f\x9c\x4b\x6f\xc1\x05\xc4\xb2\x26\xfc\x61\x2a\x02\x1e\x4c\x2b\xc1\x08\x83\xe4\x9b\x6d\x1c\xc1\x3c\xd8\x34\xff\xfb\x8a\x85\x4e\x36\xc3\x76\xd7\x16\x0b\xbc\xee\x03\x59\x37\x02\x8d\x26\xd8\x15\x84\x2d\x01\xca\x1b\x61\x62\x3a\x46\x59\x3e\x9f\x5f\xde\xf9\x99\x04\x50\x80\x6e\x80\xa6\xb3\xfc\xb0\x70\x5b\xc9\x01\xca\xb1\x54\x0e\xaa\xf9\xaf\x7b\xe9\x18\x0e\xa5\xf3\x25\x7c\xc1\xdc\x38\xf1\x74\xa9\x9d\x32\xf5\x14\x26\x9f\x5a\xfa\x9a\xf4\xb2\x19\x1b\xcd\x57\xd1\xb4\x49\x64\xb9\x86\x02\xb3\xcd\xff\xc9\x2f\x0f\x9b\xb1\x89\xb6\x8e\x13\xdf\x1d\x64\x9a\x88\x7c\xb7\x48\xe7\x94\xc8\x1b\xb7\xa5\x68\xe8\xdb\xe9\xf3\x1c\xa8\x32\x37\x68\x03\x21\xa9\x8e\x50\xe8\xaa\x58\xee\x1d\x29\x30\x76\xc7\x3f\x03\x3d\x4b\x09\x59\x1b\x6b\x02\x29\x94\xe9\xd5\xf1\x6c\x62\x65\xa3\xcd\xce\xfc\x30\x6e\x53\xaa\x4c\x9d\x13\xd1\x7d\x19\x12\xdc\x22\xa4\x4b\x9a\x39\x7d\x98\xe8\xb4\x48\xb5\x60\x8b\x82\xa2\x3c\x86\x71\x61\xe8\x2e\x0b\xf5\x60\xcd\xb5\x9f\x59\x99\xf8\xf9\xf1\xd8\x25\xcf\x2c\x3b\xac\x71\xbe\x45\x5e\x0f\xf7\x09\xe0\x22\xde\xe1\x5c\x6a\xaf\x66\x09\x5f\xd3\xc6\x91\x03\x5c\xc2\xff\xe9\x0c\x90\x2a\x4f\x6f\xce\x5d\xec\x2e\xe8\x02\x0a\xe9\xbe\x3c\xed\xb7\xfa\xf4\x1b\x79\xef\x3d\xd0\x13\x9a\xf7\x8c\x59\xed\xf5\xc8\xe2\xd5\x88\x11\x4b\x90\xd5\x6f\x4a\x6e\x00\x37\x11\x66\xa9\x1c\x60\x80\xda\xae\x23\x21\xfb\x28\xcc\x05\x06\x9b\x52\x1a\x1a\xd3\xa6\x63\x6c\x9a\x4a\x8e\x2e\x44\xe4\x01\xe4\x7b\x44\xa4\x4f\x0e\x21\xa8\x85\x4c\x75\xcf\x97\xbd\x04\xfe\x7a\xd8\x24\x80\x1c\x71\xeb\x2f\x58\x2f\x0b\xe5\xdd\x49\x00\x87\x72\x6a\x68\x4c\x5d\x0f\x79\x2e\x3e\x30\xa2\x78\x47\xe0\xcb\xcc\x09\xeb\x23\x54\x66\x1e\x71\xb1\x1f\x48\x61\x2b\x1d\xfc\x98\xed\x66\xdf\xfc\x7a\xa4\x08\xe7\x63\x6e\x10\x89\x39\xf0\x6b\x0f\x55\x5b\x1e\xe9\x25\x17\xd6\xf4\xea\x28\x19\x02\x02\xbb\xd4\xa6\xc3\x23\x0c\xa8\x88\x91\x80\x0a\xeb\x05\x9a\x07\x6c\x9a\x66\x22\x25\x3a\x39\x90\xe5\x67\x78\xe1\xe6\x7a\xf3\x2d\xcc\xd7\x14\x0f\x6c\x96\x15\x61\x3e\x94\x1f\xcf\x10\x51\x0b\x49\x62\xa4\x52\x56\xc9\x43\xe5\xed\x7a\xa9\x8c\xd2\xe0\x46\x8e\x96\xe5\x30\x36\xab\xa3\xc9\x0f\x6f\x73\x72\xf3\xd5\x42\x99\x92\xb7\x2c\xf7\x02\x67\xf4\xf0\x46\xa6\x04\x58\xee\x02\x37\x93\xd3\x39\x65\xf1\xa8\x05\x03\x1b\xde\x2a\xa8\x0c\x6f\x14\x5e\x11\x40\xb4\x91\xb5\xf9\x3a\x11\xde\x60\xef\x30\x39\xf0\xb5\x3b\x03\xe1\xc0\x11\x39\xdc\x58\x45\x98\x7f\x73\xf1\x12\xc1\xfd\xcc\xcd\x71\xf5\x99\x08\x7e\x38\x75\x01\xa1\x21\x4d\x85\xe5\xb2\xf3\xfd\x71\x2e\xbd\x52\x89\x43\xa1\x34\x74\xc9\x37\x9b\xe8\xa1\x10\x84\x66\xdd\x4f\x27\x29\xe8\x66\x8f\x0f\x54\x6b\x42\xf3\xff\xfe\xfb\xc1\x63\xb1\xe6\x75\x16\x73\x8e\x52\x00\x1e\xf2\x31\x8b\x7b\x2f\x2c\xfe\x80\xe7\x32\x09\x72\xe1\x3a\xd3\x24\xcf\xef\x41\x78\xdf\xde\xe2\xb4\x37\xe6\x3c\xdc\x6d\x08\x65\x58\xe7\x60\xf2\xdb\x10\xb2\x12\x19\x31\x33\xdf\x3b\x64\xb9\x62\x0e\xd9\xa5\x76\x15\x52\x90\xf6\xc7\xf1\x0a\xa8\xf5\x1d\x6c\x02\x07\x3c\x77\x42\x2e\xc8\x00\xd7\x1a\x5a\xc9\xe2\xc3\xe3\x95\x6c\x34\x9b\x01\x84\xed\xbc\xbd\x64\xd0\xd6\x66\xd4\xb0\x0d\x60\x28\xb0\x7e\x5c\x9d\xb0\xce\x70\x3b\xae\xaf\xcd\x2b\x89\xf3\xa2\xc1\xf6\x67\xfd\x48\xa3\xda\x46\x5d\xf2\x7a\xe9\xcf\x39\xee\x88\x85\x1b\x97\x59\x48\x25\x8b\xb4\x4c\xef\x90\xe5\x23\xd0\x41\x06\xad\x6f\xca\x53\xd7\xd3\x0d\x59\x33\x3f\x3c\x12\xbf\x3c\x9d\x45\x52\x18\x1f\x1c\xd2\xd8\xbc\x2d\xa0\x31\x7d\xa1\x58\x6c\xcf\xd9\x03\x64\xab\xfd\x39\xe8\x51\x09\xdb\x03\xb2\xf0\x54\xd9\xc6\xc2\xf0\xcb\x06\x6c\x01\xb9\xaa\x3d\x90\x0b\x30\xc5\xfd\x83\x43\xe0\xa4\x15\x67\x44\x06\x73\x3f\x85\x8d\x54\xaf\x17\xd0\xf1\xde\x3b\x8b\xd1\xee\xfe\xba\x33\x4b\xe3\x40\x01\xe1\x03\xce\xca\x9a\x50\xef\x77\x4b\x65\xfe\xa3\xa9\x59\x7b\x96\x86\xe5\x79\xdb\x99\xb5\xf8\xbd\xd7\x76\x4c\x41\x73\xc3\x35\x76\x66\xb9\xda\x86\x3f\xe1\x5d\x5a\xcf\x4e\x33\x8a\xae\xae\x0f\x55\xd7\xe9\x81\x5a\x87\xef\x7c\xc7\xc5\xa6\x59\x23\x57\x51\xde\xc5\xc6\x55\x51\x2f\x83\x35\xde\x8d\x94\xde\x2f\x4d\xa1\x7a\x1e\x9b\x9b\xab\x3c\x4a\xec\xa5\x6e\x52\xd5\x7c\x12\xbb\xb3\xd9\xb1\xd6\x7b\x1e\x49\xb9\x3f\xa3\x44\x7c\xd2\x03\xb1\x49\x22\x43\xec\xdf\x8f\x5d\x84\xa0\xeb\x6c\x28\x29\x8e\x7d\xa1\x88\x76\xd1\xe0\xae\x53\xeb\x8e\x1c\x6e\x24\xdc\x1c\x45\x11\x37\xdf\x3e\xf0\xbf\x95\x66\x9e\xaa\x79\x4d\x57\x68\xfb\xd4\xc1\x62\x5c\xff\xb9\xf8\x58\xca\x23\x6b\x3f\x68\xdd\x0a\x49\x2f\xd6\xec\xa3\xd3\xb7\x48\x06\x37\xac\xd5\x33\x61\x05\xa9\xeb\xa8\xda\xfb\xb5\x9d\x9b\x02\x34\x87\xa0\xd7\x4a\x7a\x7b\x1a\xe7\x22\x02\xba\x06\x8f\x18\x59\xaa\xf6\xec\x0a\xd8\xca\xab\x39\xc9\x5d\xb5\x7c\x49\x65\x50\x04\xfb\x87\x25\xfd\x28\xce\xef\x03\xc6\x92\x1e\xae\xb7\x0d\x70\x78\x9c\x20\x2b\x76\x5f\xdb\xd8\x3f\x30\xc9\x2e\x8f\x84\x06\xa4\x37\xeb\xe3\x9d\x75\xf0\xce\x86\x0f\x30\x0b\x04\xaf\xf5\x4e\xa8\x81\x11\xe3\xbe\xb5\x36\x5e\xcf\x2f\x52\x2f\x4f\xe7\x92\x54\xe4\x97\x79\x6d\x0a\xae\xbe\x8c\x50\x3b\x97\x69\xf0\x6b\xea\xb8\x92\x60\xd4\x87\xce\x36\x2d\x9e\xba\x06\x8c\x00\x83\x86\x47\xb4\x3e\x9e\x5d\x75\x87\x2e\xf2\x74\xd3\xd6\x37\x2d\x50\x43\xf2\x0e\xda\xd7\x58\xbc\x44\x8b\x87\x0c\xf2\x8f\x83\x05\x1a\x99\x25\xb9\x17\x9f\x98\xe9\x55\x7a\x30\x90\x5c\xf1\x05\xc6\x11\xaa\xa3\x87\xef\xa9\x5d\x00\x8e\x85\x5d\x48\x3e\xe6\x0d\x18\x43\xfa\x7f\x3e\x77\xb9\xe9\x41\x67\x77\x24\x32\x25\xf1\x71\x44\xb9\x83\xdb\xf4\x60\xf1\x72\x76\x9e\x1c\x3d\x4d\xfa\x7d\x58\x42\x03\x6a\xc0\xa3\x6d\xef\x7f\xbe\x98\x03\x6f\xc0\xd7\x19\xe8\x08\x19\xd6\x1e\x0d\x36\xc0\xe5\x4c\xea\x69\x9a\xf5\x6c\xa4\xf1\xb6\x3e\xe4\xf0\xc8\x6e\x2b\x87\x12\xc8\xef\xf8\x12\x8e\x89\x7e\x60\xf8\x5a\xc9\xa2\x56\x4b\x20\x72\xfc\xda\x27\x3e\x5b\x42\x6e\x42\x64\x3b\x88\xf8\x23\xd9\xfd\x7a\x01\x3e\xae\xd2\xb4\x2a\x22\x70\x60\xfb\xf8\x73\xf0\x61\xf2\xf5\x5d\x4f\xa7\x0d\xd9\x92\xd7\xd9\x9e\xb2\x85\x3d\xe8\x3b\xab\x9c\xfd\xce\x29\x40\x05\xe7\x17\x5d\xe0\x8b\x89\x0b\x76\x5a\xb6\x74\x5b\x09\xf4\x51\x4d\xfd\xd0\x55\x71\x14\x7c\xeb\x67\xaf\xf2\x7b\xf7\xd4\x5a\x7f\xed\x2e\xcf\x46\xe6\xfe\x2c\xed\xee\x5d\xb4\x65\x8b\xe4\x1b\x3b\x87\xbe\xb6\x17\x5f\x9c\x45\xef\x0f\x1b\x01\x0a\x28\x93\xf9\x95\x79\x21\x23\x41\x2c\x02\x26\x66\x12\xe7\xde\x6f\x3d\x01\x2f\xa7\x65\xff\xd0\xb0\x96\xae\x2d\xfc\x3f\x6d\x68\xfd\xca\xb1\x26\xf9\x4c\xec\xe5\xaf\x19\xc5\xbc\x1b\xd2\xe3\x95\x54\xa7\x7a\xa5\xc6\x14\x02\x0a\xc1\x2b\xfa\xd6\x2a\x90\xa8\xbf\xb2\x5a\x42\x79\xee\x70\xa7\x85\xcf\x79\xd8\xd6\xf2\x3f\x41\x2f\x50\xd6\xb4\x73\x62\xfe\xea\x89\x5f\xb1\x08\x80\x00\xa2\xf3\xc8\x90\xc0\x83\x73\x2a\xd2\x16\x7c\xa2\xf0\xa4\xa3\x39\x0c\xc1\x37\x88\xa2\xe2\x13\x5a\x52\x00\x3a\x30\xeb\x0b\xe3\x5b\x5f\xd8\xcc\x4d\x4f\xa2\xc5\xcf\x52\xce\x49\xfa\xde\xfb\x24\xfc\xba\x6b\x04\xbc\x03\xb2\x60\x73\x91\x52\xa4\xf7\xed\x48\x1b\xc9\x41\x3d\xc3\x54\x13\x58\x53\x29\x8b\x43\xb1\xc4\x96\x35\x14\x8d\xea\x79\x39\xda\xa9\xa2\x46\xe6\xc1\xa1\x9c\xca\x5d\x05\x7f\xbd\x83\x5a\x67\xc1\xde\x69\xa8\x68\xb1\xdf\xf7\x1b\xf2\x35\x8e\x1d\xe0\xb4\xf1\x46\xd5\x90\xdd\xa5\x8d\x81\x8f\x4c\x91\x80\x12\x39\x68\xd8\xe8\x36\xf3\xdc\x46\xa0\x77\xbc\x19\x64\x46\x7f\xa4\x3b\xee\x4f\x08\x64\xc7\x6b\xfe\x09\xfd\x71\x04\xe5\x22\xac\x21\x39\xac\x20\x6a\x67\xb7\x8b\x1f\x13\xe6\xb0\x95\xdb\x1b\xd7\x7b\x16\x0d\xc7\x66\x5b\xfc\x6a\x0a\x87\xe6\xdb\x39\x06\x42\x12\xdf\x53\x16\x80\x21\xd2\x5f\x77\x5e\xea\xea\x1d\xda\x9d\x7f\x39\x0a\xb9\x95\x36\x93\x40\x80\xaf\x4a\xe7\xc1\x7f\xee\xe3\x3b\xbe\x4b\x30\xe8\x95\x5e\xc6\xf5\x71\x50\xdd\xf5\xb6\x43\x5d\x54\x02\x40\x02\x55\x86\xbc\x39\xb5\xfe\xe3\x72\xd6\xf6\xa2\x28\x56\x59\x13\xaa\xca\x79\xf5\x9f\x7c\x55\xaf\xf7\x7b\xe1\xb7\x52\xfd\x84\x58\x88\x61\xc8\x12\x21\xaf\x40\x34\x29\x54\x5e\x18\xf8\x06\x91\xf9\xad\x71\xfc\xf2\xfd\x0b\x07\xfe\x16\x08\xd0\x60\x8f\xc9\x24\x9c\xc4\x1c\x76\xb2\xb7\x3a\xfe\x0b\x4a\xee\xea\x05\xae\xdb\x26\x7b\x55\xd0\x07\x29\x86\xc3\x96\x42\x58\x87\x3e\xc6\xb7\x97\x74\x8b\xae\x5b\x19\x06\x0b\x94\xbb\xbd\x5c\xe7\x9d\x05\x7d\xf4\x5c\xf6\x12\xb0\xc4\xfd\x9f\x1b\x1c\xd0\x43\xf8\x38\x9f\xd2\x53\x87\x09\x69\x44\x3e\xf2\xb9\x8e\xa2\x5a\xfa\xc1\x79\xe8\x75\xb8\x42\xa8\x90\x14\xb9\xa9\x07\x90\x49\x86\x25\x53\x77\xd0\x11\x18\xb9\x1c\xf7\x1a\xda\x01\xd7\xd7\x4e\xf8\xe4\x8d\x2b\x9a\xd4\x21\x2b\xec\xb8\xa5\x0b\x0d\x23\x61\x07\x85\x33\xe6\x6b\xd7\xc1\xb7\x33\x2f\x64\xb5\x6f\x85\xed\x81\x8d\x4d\x42\x53\x00\x1c\x3c\x26\xe5\xed\x85\x24\xc5\x1f\x72\x9e\x0b\x83\x00\x16\x2c\xb3\x04\xe0\x97\xec\xa9\x1d\x94\xb4\x88\xcd\xb0\xe6\xf1\x62\x10\x2c\xfd\x39\x7e\xa7\x84\x62\xaf\x79\x13\x26\x66\xce\xe3\x1f\x11\x78\x39\x0d\xdb\x00\xbe\x51\xa5\xe0\xf2\x71\x84\x00\x7c\x49\x42\x73\xa0\x04\x1e\x70\x8e\x8d\xf8\xe8\x4b\xe2\xe2\xd7\x17\xe4\x30\xe5\x2d\xbc\x91\xa7\xac\xde\xef\xca\x1a\xa1\x5f\x98\x79\xf1\x44\x51\xe6\x22\x8c\xc3\xde\x20\x5d\x55\x81\xa4\xae\xdc\xf2\x2c\x2e\x60\x70\x46\xa0\x2c\x81\xc3\x38\x7e\x84\xbf\xc8\x57\xd3\x7b\x5f\x9b\x0a\x7c\x13\x44\x02\xe6\xa2\xfb\xca\x16\xa4\x80\x5b\x1c\x94\xe8\x00\x8e\x42\xcb\xd8\x21\x55\x21\x40\xfc\xa2\xd3\xbe\xd9\x3a\xf8\x59\x9a\xed\xc8\x37\x49\xd1\xfd\xf0\xd6\x9c\xd7\x21\xed\x5b\x58\x71\x59\xbc\xd6\x39\xa4\x0d\x6e\x2d\xc3\x84\x00\xee\x66\x5c\x47\x5d\xfd\xb4\x72\xf5\xc2\x6c\x3f\x54\x33\xfd\x12\xa4\xbe\x51\xf0\x60\x7c\x58\x37\x05\xb3\x96\x9e\x3b\x38\x4b\x50\x82\x4e\x6a\x72\xc1\x47\x40\x6f\x3e\x30\x14\x4d\xc0\x20\xf4\xeb\xe5\x62\xdc\xcb\xc5\x38\xe9\x71\xf6\x70\x6e\x2b\xb9\x6a\x0e\x3f\x64\xd9\x18\xd3\xea\x00\xca\x58\x7f\xcd\x92\xc1\x07\xf8\xa1\x34\x82\x30\xf1\xee\x32\xf4\x62\x20\x17\x74\x69\xc2\xa1\xc2\x10\x81\x6c\x98\x21\xbb\x42\x14\x46\x97\x2a\x2d\x90\x69\x64\x04\x55\x2d\xbb\x25\x0c\x25\xa5\x7b\x5a\x37\xaf\x94\x2c\x42\xb9\x1f\x4b\x79\xc5\xda\x2c\xf9\x20\xa7\x4b\x82\x08\x3b\xaf\x9a\x80\x64\x2e\xc5\x25\xf4\x85\xf6\xf8\x8d\x49\x18\x10\xfd\x42\xd5\x9d\x54\x5c\x75\xf3\x63\xff\x07\x89\xf2\xb9\x48\x86\xef\x9e\x31\x3a\x44\xb6\x3c\xea\xad\xde\xef\x60\x42\xe3\xc0\x45\xf2\x7a\xbf\xfd\x9c\x37\x83\xdc\xf4\x0f\xee\x00\x56\x74\x04\x49\xbc\x85\xa0\x78\x45\xc1\xd7\xc1\x14\x34\x5f\x3a\x50\x5a\xba\x25\x77\x20\x3e\xff\x51\x34\x25\x3d\xa0\xa1\xec\x4b\x02\xf1\xc1\x50\x65\x95\xe3\x98\x05\xa9\xd8\x35\xd0\x3a\xe0\x2c\x2d\x0c\xc5\x4a\xdc\xfd\xa1\x96\xda\x96\x49\xfa\xf3\x23\xd5\xb1\x80\xae\x68\xe5\x03\x48\xcf\x6f\x21\x53\x07\xb4\x85\xee\x2e\xf4\x11\x84\xe0\xb0\x77\xde\x2d\x53\x8f\x4c\x2d\x9f\xe3\xef\xee\x70\x49\xc6\x50\x85\x40\x0b\x81\x18\x60\x02\x16\x50\x8a\x43\x46\x66\x88\x74\xc0\xbc\x3f\xc2\x8a\xd9\xc6\x09\xf7\xac\xef\x63\x22\x75\x64\x2f\x7f\xfd\x10\x3b\xd1\xac\x8f\x6b\x08\xce\x23\x0b\x4a\x08\xc4\x45\x29\x87\x44\x3d\x2f\x17\x51\x60\x4d\x0e\x32\x2a\x0e\x44\xb1\x9e\x86\x57\x21\xf5\xed\x40\xb0\x73\x70\xcc\x83\x20\xfa\x58\xf2\xce\x94\xaa\xb6\xb4\x5c\x5f\x91\xf9\x2f\x92\x13\x4a\xb5\xbb\x38\x84\x65\x62\x6e\xbe\x50\x1d\x1a\xd1\x50\x5d\xea\x05\xf2\x09\xd2\x4a\x78\xb1\xb8\x08\xd4\xc4\x5a\xa5\xa8\xd0\x69\xfa\x63\x91\x57\x6a\xf6\x39\x66\x4d\x3b\xf2\x9e\x7a\x46\xf6\xb9\x13\xab\x62\xcf\xaf\x2e\x52\x71\x7b\xde\x88\x36\xdc\x33\x4a\xff\x81\x99\x78\x12\x7e\x81\x0c\x88\xb7\x1b\x16\xc5\xbf\x73\x38\x89\xf7\x88\x7b\x5a\x88\x7f\xb2\x20\x39\x3c\xc3\xe3\x66\x7e\x6c\xd3\xb7\x93\xa9\xd5\xad\x52\x43\xa0\x2a\xe7\xb1\x41\x32\xbf\xb1\x62\x70\x38\xd1\xe4\x8b\xf4\x9b\xb6\xa2\x0a\x52\xb3\x58\x26\x86\x62\xbf\xac\x35\x9d\x85\xb7\x50\xfb\x62\xd1\x6f\xe6\x52\x84\xed\xcd\xde\x2d\x61\x2e\x24\xcb\xb7\x15\x8e\xac\x80\xaf\xf0\xd9\xc6\x26\x19\x52\x2c\x57\x29\xd3\x53\xe0\x70\x0b\xc8\xd1\x36\xaf\xc2\x2b\x00\x4c\x44\xa1\x7d\xbf\x9e\x3c\x31\x4c\x3a\xbe\x2e\xbf\xf9\x82\x8c\x3f\x41\x15\x6e\x6e\x46\xa5\x83\xb0\xea\x71\x18\x06\x71\x15\x02\x98\x84\xb1\x2d\x3c\x9b\x2f\x2d\xfd\x57\xe4\x6d\x2b\x01\xca\xa0\xfe\xe4\x96\x03\x20\xb1\xff\x91\x3a\x40\x16\x4d\x18\x8c\x2f\xc4\x02\x05\x6f\x1a\x1f\xaf\xca\x66\x4a\xed\xfe\xad\x50\xba\xf4\x0b\xb8\x02\xda\x7d\xbe\x92\xa3\xd7\x2c\xe2\x0e\x63\x9d\x25\x7f\xe0\xa2\x00\x84\x6c\x3c\x52\xd3\x6b\xa5\x82\x42\x88\x32\xe4\xf5\x4c\x4f\x1e\x1e\x59\x5c\x3a\xcc\xcb\x83\x99\x91\xf8\x49\xef\xff\xf9\x14\x18\x8d\xaf\xb5\xdc\xd6\x77\xb9\x69\x2b\x10\x1c\x71\xbf\x09\x2e\xb1\x0e\x75\x9a\x73\x68\x26\xdc\x57\x65\x82\x7f\x83\xe8\x4b\x40\x35\xa6\xb7\x36\xb5\x07\x94\x2e\x2c\x5b\x12\x1b\xd1\x02\x69\x75\x24\x79\x20\x79\x97\x2b\x80\x06\x40\xcd\xfc\xd2\x22\x65\x75\x80\xdb\xc8\x94\xe7\xdd\xe0\x1a\x3d\x72\x27\x4c\x47\x19\xb4\x23\xef\xa5\x8c\xf2\x67\x08\x6b\x2a\xf4\x83\xb1\xc3\x1c\x9b\x49\xe9\x65\xb3\x01\x6e\xc0\x3b\x56\xaa\x01\x6c\x89\xe9\xe3\x0d\x09\x95\x4f\x6e\xb2\x0f\x33\x94\x51\x18\x82\xef\xfd\xeb\xd1\x28\x5c\xfd\x26\x99\xa3\x0d\xa0\x4c\x21\x50\xfa\xad\xa9\x55\x58\x2b\xd1\x84\x1c\x9a\x10\xc3\x3b\xa4\xe1\x2b\x51\xd5\x6b\x60\xb9\x56\xd0\x64\x7c\xb9\x7f\x88\xdb\x40\xf3\x24\x48\xf3\x5a\x1e\x77\x08\x8e\xaf\x25\x6b\xfa\xb4\x12\x47\xb9\x12\xff\x81\x68\xfe\x70\x5c\xb2\xba\xde\x88\xb0\x1c\x57\xbc\x20\xf0\x09\x6f\x01\x4f\xd8\x22\x8e\x80\x21\x19\x21\x34\x48\x08\x8a\xaf\xd0\xc9\x15\x52\x65\x26\x85\x7c\x9d\x25\xcd\xdf\x54\x1c\x6c\x28\xc0\x46\x7f\x9d\x9b\x19\xb3\xb3\xc5\x42\x14\x22\x4b\x46\xff\xaa\x9c\x4c\x00\xcf\x2d\x60\x87\x2c\xc0\xd6\x2c\x21\xeb\x95\xc6\x90\x74\x4d\x58\xf3\x10\xd2\xfa\x36\x13\xeb\xa1\xba\xea\x71\x55\x39\x91\x51\x13\x54\xb0\x2e\x73\x94\x72\xfd\x4e\x68\xba\x63\x36\x44\x49\x75\x2f\xe9\x5d\xb9\xe7\x35\x35\xfb\x69\x64\x5c\xf5\x3d\x99\x4e\xae\x84\x1f\xb8\x10\xbb\x73\xed\xdf\x86\x6b\x80\xdd\x72\x5a\x1a\xa4\xbe\x09\x43\xc8\x6e\x61\x79\x7b\xf6\xd0\x6b\xfc\x6f\xfe\xb4\xa5\xba\xe2\x8c\x47\x42\xe7\x2f\x62\x72\x2d\xb5\xff\x6a\x3d\x00\xae\xe2\x91\x0d\x41\x24\xbe\x80\x53\xc7\xb2\x7c\xc8\xe7\x86\x1c\x7e\x3f\x6b\x1d\xb4\x6e\x67\x45\x5c\x10\x0d\x02\xa6\xc2\xca\xa1\xa0\xc5\xd6\x7f\xf3\xa8\x7b\x61\x27\xbb\xec\x89\xf8\x8f\x0d\xb0\x13\x22\x32\xac\x09\x11\x14\x04\x9a\xb7\xc8\xf5\x71\x53\x18\x1a\xc4\x17\x59\xab\xd2\xdc\x09\x52\x71\xdf\x81\xda\x68\x06\xc8\xcb\x97\x5a\xbe\x34\x24\xd1\x02\x97\xfe\x29\xa0\x1d\xc2\x93\x83\xc2\xcd\xd3\x8f\xc7\xc8\xda\xc4\x99\x2b\xe6\x93\x9f\xb3\x4c\x00\x52\x75\x9c\x48\x88\x3c\xc3\xe3\x9a\xeb\x4f\x79\x96\x2c\xe9\xa8\x5c\x13\x53\xee\x25\xc9\xf7\x79\x49\x13\x87\x09\xbf\xf5\x0a\x75\x25\x10\x9b\x02\x40\x31\xed\xd7\x59\x07\x0f\x9b\x21\xcd\x2f\x44\xc2\xf0\xca\xfd\x3a\x9d\x5a\x3b\x65\x26\x48\x7f\xc9\xfd\xf6\x68\x08\x4a\xd0\x6f\x5a\x0a\x6e\xff\x7c\x69\x60\x0a\x79\x24\x02\x5e\xdd\xc8\x7b\xa6\x6c\x3f\xea\xe8\xec\x76\xa6\x37\xd1\x04\xf5\x9f\x4f\x8f\x3d\x7c\x71\x71\x4a\xe6\x72\xaa\x9b\x8d\x75\x3c\x18\xa7\xe6\x94\x1d\xb5\x09\x8b\xc6\x90\xf0\x67\x0b\x34\x35\xa7\x05\x1b\x63\x0d\xef\xe8\xf1\xe0\x95\x00\x00\x3b\xc1\x15\xd6\x7c\x34\x40\x07\x9e\x0c\x22\x30\xd6\xab\x11\xea\xdd\x11\x9b\x08\x8c\x06\x99\x01\x18\x43\x5f\xe6\x47\x44\xc3\x35\x08\xd2\xd1\xec\xb2\x7f\x8b\x84\xfd\x01\x49\x6d\x91\x2f\xc3\x60\xdd\x49\xd2\x52\x87\x08\xcd\xfd\xfc\xd6\xa2\x7d\x96\x5b\xf6\x18\x06\xd7\x3b\x50\x18\x01\xc9\x00\xf2\xa8\x79\x67\x0b\x1d\x92\xd7\x51\xf6\xd8\xae\x59\xe4\xca\x10\xd2\x88\x00\x1e\x24\x50\x1e\xb2\xca\x9a\x98\x24\x99\x32\x4d\xcc\x27\x32\xa0\x1a\x69\x0b\x73\x4c\xff\x3e\x52\xe4\x5e\x9d\xb2\x3b\xd0\x37\x98\xc6\x4a\x84\xc5\x1d\x86\xda\x17\xec\x96\x76\x85\x3f\x94\x8b\xf8\x08\xc5\x37\x25\x0d\x39\x26\x64\x26\x14\x8c\x9c\x6e\xa0\x3f\xe1\x9a\xc0\xcd\x00\xa4\xc1\x63\x6e\x49\xa9\xa5\x29\x32\xd8\x13\x14\x01\x7c\xd9\xe3\x65\x28\x42\x90\x74\x70\x80\xf4\x88\x42\x53\x70\x11\x28\x0a\x16\x59\x6a\xaf\xec\xc0\x31\xa2\x20\x72\xa2\xce\x6b\xc7\xe1\x95\x85\x52\x97\xa8\xa8\xc9\x7b\x55\xd5\xad\x06\xb8\x27\xaa\x12\x9e\xb5\x1b\x1d\xc6\xbf\x47\xa4\x89\xa9\xc5\x8e\x44\xb1\x25\x9d\xe0\x47\xa0\xf7\x7d\x49\xa4\x8c\x30\x1e\x39\xbc\x34\xe0\x45\xf0\xb5\x13\x40\xa6\xaf\x89\x6c\x00\xfe\x71\x12\xfe\x61\x57\x5b\xc2\xe9\x16\x0b\x31\x52\x30\x6f\xa5\xb2\x3f\xfa\xc7\x30\xef\x5e\xe7\x41\xe6\xdd\x51\x15\xb2\x48\x28\x12\x21\x7c\xc7\xf5\xbd\xb9\x6d\xd4\xb8\x6a\x44\xed\xde\x05\xfc\x59\x6b\x1a\x1e\x41\xf0\x19\x21\xe2\x23\x20\xc9\xad\x97\x66\x9c\xcf\x71\x66\x20\xf2\x93\x0b\x6d\x08\xee\xf2\xbd\x8c\xa8\xef\x72\xcf\xaf\xec\xca\x68\x6f\x51\x22\x54\xc4\xf1\x23\x8c\x54\x4e\x1f\xdd\xa0\xd5\x2f\x77\x3e\x02\x85\x60\x8d\x05\x4f\x8b\x73\xf5\x11\x2c\x05\x02\x3f\x28\x05\x92\x7c\xfe\xa5\x48\xf3\xcb\x31\x1d\x73\x13\xe7\xd8\x03\x88\xa2\x2e\x16\xd6\x08\xfa\x66\xcc\x1b\x23\x54\xd5\x08\x49\x59\x07\xae\x99\x75\x35\x53\x28\x90\x8f\x69\x68\x77\xc0\x99\xcb\xde\x85\x3f\xbc\x2f\xa3\x6f\xee\xa3\x9e\xf7\x1c\x6a\x1e\x0f\xb9\x92\xd7\x28\x7e\x3f\xa0\x63\x79\x31\x00\xa4\xd5\x61\x22\xa0\xf6\xf8\x61\xce\xfc\xbe\x13\xd5\x3c\x32\xd0\x99\xd6\xb2\x4b\xe6\x83\x8e\x26\x72\x30\x8f\x70\x91\x3f\x03\x68\xee\xff\x8b\x60\xf9\x71\xfc\x8a\x8f\x38\x21\x85\xcf\x93\x28\xab\x2a\x26\x8f\xb5\xfa\x3f\x59\x67\x65\xdf\x1e\x0e\x49\x25\x12\x45\xfa\x69\x8f\xf9\x74\x8d\xc1\x87\x8d\x0b\x29\xfa\x00\xf5\x0f\x65\x8b\x81\x5c\xf9\x21\x6c\x45\x9d\xd9\xc3\x65\x84\x28\x95\xf7\x70\x82\x81\x44\x47\xf2\x4c\x1d\x0f\x72\xe7\x1f\x9a\x7b\x3d\x92\xee\x5f\xe6\x50\x38\x7f\xe4\x00\x68\x8d\x1e\xf1\x21\x92\x3d\x03\x5e\x45\x80\x93\xca\x86\x1b\xde\xd8\x4f\x16\x9a\x32\x95\xe0\x33\x9a\x41\xc1\x5b\xac\x49\xb4\xf7\x15\x43\xee\xbc\x1d\x5a\x07\xee\x45\x68\x82\xb8\x27\x79\xa4\x8d\xf3\x3c\x7c\x65\x3b\xda\xcb\x67\x4f\x58\xb8\x96\x0f\x73\x98\x36\x58\xd3\xf6\x92\x94\x48\xea\xcf\x4c\xf1\x8d\x8f\xd4\x35\xfe\x28\x25\x0d\xb5\x17\x63\x72\xa8\x1d\x47\x27\xc3\x59\xf8\x72\x0d\xa7\xb0\xb6\x83\xa8\x58\xd6\x53\x30\xdd\xec\xf0\x12\x5f\xb6\x99\xf7\x50\x34\x79\x1d\x4a\x3e\x64\xde\x10\xa3\xbe\xa1\x60\x00\x2b\x48\x89\x4f\x81\x29\xca\xc1\x6c\x9c\x87\xbb\xd7\x5e\x87\x39\x1e\xf3\x61\x1e\xe6\x50\xe1\xf8\x48\x7e\x60\xc8\x77\xa8\x6a\x5c\x93\xbe\x6c\x26\x07\x4d\xe8\x1c\x90\x92\x2a\x81\x8c\x4d\x70\x2e\x44\x82\xe6\x03\x68\xf2\x2e\x0a\xce\x25\x02\x21\x49\x56\x25\xf1\x2c\x12\x16\x61\x70\x25\xda\xeb\x62\xfc\xdb\x00\x7c\x43\x76\x63\x66\xa1\x66\xdc\x6a\x94\x58\x92\x6b\xda\x3d\x23\x08\x9e\x32\xd7\x50\x6a\x0e\xcb\x00\x9e\x28\xe9\x3b\xa0\x74\x19\xd2\x28\xe9\xe1\xaf\xcc\xc0\x31\x69\x5f\x79\xd3\x7a\x87\x5d\x36\xdd\x84\xba\x2e\x61\xb5\xf0\x12\x72\xe3\x74\x39\x0a\x8f\xd2\x4b\x06\xdc\x16\x56\x6e\x96\xf6\xef\x2a\x19\x07\xd8\xd5\x61\x93\xd2\xfb\x10\x93\x12\x0a\xd0\x14\xd3\x80\xa2\x47\x26\x5d\xb0\x93\x82\x71\x93\xea\xbe\x36\xd3\xfc\x58\xcb\x23\x0b\x87\x70\x17\xf5\x48\x02\x19\x30\xa2\xd6\xba\x21\x14\x1b\xca\x16\xfe\x0c\x53\x91\xa0\x06\xdb\xf9\xde\x5e\xea\x00\xd0\xac\xf8\x35\xc2\xa6\x44\x0e\x0a\x30\x15\xb7\xdb\xf9\x7e\xa4\x38\xc3\xda\xd2\x50\x36\x29\x3f\xf8\x40\xf6\xce\x57\xe3\x11\x52\xd4\xba\x30\xc4\x7e\x73\x89\xd4\x9b\xff\xf8\xa9\x4a\x30\xe3\x56\xa3\x07\x35\x5d\xca\x7a\x00\x83\x1a\x22\x1a\x10\x23\x0a\x45\x8c\x0c\x44\x81\xef\x8c\xa7\x4d\x7a\x21\x39\xf8\x7c\xbd\x44\x32\x92\x10\x44\x7d\x7a\x6a\x5c\x09\xbb\xf6\x1d\x3c\x08\xc2\x6a\xd2\x95\x04\x83\x39\x25\x54\x64\x91\x27\x40\x92\x3e\xc4\xca\x81\xea\xb1\x7d\x78\x4f\xf5\x6e\x59\xc8\x80\x08\x1e\x1b\x5d\xf0\xc2\xa1\x0b\xd0\x1c\xd5\xe3\x31\x84\x2c\xf4\x98\x24\x8c\xd3\xaa\xbc\xd8\x3d\x43\xed\x90\x03\x3a\xed\x76\xd5\x8b\x45\x92\xa9\x20\xcc\xce\x2b\xb7\x52\x2b\xa8\x3e\x22\xe8\x6a\x08\x23\xa8\x48\xdb\x01\xdd\xce\x6d\xa1\x8d\x19\xe0\x99\x0a\xee\x2d\x0f\xcb\x9c\x40\xe7\x04\xc4\x70\x5f\x70\x85\x4e\x57\x01\x08\x54\xce\xc9\x81\x38\xc1\x34\x22\x4e\x74\x67\xb0\x62\x5d\xd6\x22\xab\xbd\xa1\xf3\x03\xe1\x11\xc8\x9a\x6c\x52\xc5\x68\x7b\xe5\x41\xb8\x9b\xb4\x7c\x6d\x21\xec\x83\xe0\x3c\xf6\x36\x04\xb2\x26\xe6\x96\x04\xdd\xbc\xe6\xac\x98\xea\x98\x5d\x34\x79\x1d\xc4\xbb\x08\x89\x79\x06\x06\x07\x49\x66\x3f\xf3\x12\x83\x48\x8b\x84\x96\xb9\xc2\x8e\xde\x75\x49\x09\xab\xb1\xd7\xe9\x25\xc5\x0a\x78\x80\xc4\x06\xa7\x8e\x7b\x52\xc2\x02\xe3\xad\x13\x25\x1d\xa1\x75\xa8\xae\x81\xe1\x23\x67\x1b\x36\x27\x8f\x3d\x91\x65\x26\xc0\xc8\xae\x45\x60\x78\x26\xb2\xc1\xba\x42\xcf\x12\xc0\xe2\x26\x2d\x69\xc4\xbe\xda\x65\xb8\x9a\xd6\x46\xbf\x68\xe0\xce\x58\x5e\x14\xf1\x36\x00\xb3\x23\xec\x0e\x15\xca\x3a\xa0\xf5\x79\xc2\x10\xe8\xa3\x10\x87\x36\xa5\x99\xbd\xe7\x07\x02\xb9\xd3\x70\x3b\x77\x58\x4c\x84\x77\x07\x73\xb1\x1d\x20\x54\x76\x24\x81\x7e\x46\x54\x3a\xaf\x2b\x00\x3d\x82\xeb\x40\xa2\x86\x84\x97\x2e\x4b\x93\xc0\x81\x3d\x01\xe6\x21\xb7\xd9\xad\xee\x4f\xa6\xb5\x3b\xeb\xb5\x79\x15\x6c\x4a\x10\x9e\xf6\x7a\x1a\x73\x8a\x43\x48\x48\x12\x79\x98\x40\xe8\xbc\xf5\x24\x7d\xfd\xef\xed\x95\x18\xe4\x42\xcf\x76\xa1\x13\xf8\x5f\x42\x1e\x8c\x4a\x98\x61\x14\x3d\xeb\xde\x2f\x30\xb0\x96\x8a\x4a\x21\x5f\x17\xc8\x2e\xc1\xd2\xe6\xe3\xb9\x0c\xc7\xfe\x7c\x84\xf8\x59\x7a\xfd\x0f\xa5\x4f\x81\xf9\xe1\x23\x99\x88\xaf\x4a\xe3\x58\x04\x22\xa2\xbe\x1f\x0a\x6f\x7a\x98\x53\xd7\xba\xf1\x2e\xb1\x8a\x93\x2e\x4e\x3b\xbc\x8d\x71\x6c\x8b\x4d\x5a\x46\xca\x7f\x00\x29\x60\x0d\x73\xd1\x87\x14\xab\x51\x8c\xa1\x0c\xc7\xd9\x4a\x1d\xf8\x85\x92\xd7\x50\xb1\x1f\x09\xbc\xfd\x96\x7f\x4d\x42\xe5\x7f\x6d\x08\x1f\xa5\x8d\x6c\xf6\xce\xd1\x91\x0d\x5e\x05\x42\x41\x9f\xb7\x53\x4a\x64\xd4\x88\xc9\x23\x3e\x1d\x48\x59\xef\x5f\xb8\x8f\x2f\x12\x8a\x15\x44\x04\xad\x77\x08\x85\xdc\x84\x94\xfc\xc8\x82\xbf\xa7\xaa\x08\x93\xd8\x76\x39\xaf\xaf\xba\x48\xfe\x57\x8e\x22\x12\x67\x5a\x59\x9a\x75\x79\x11\xca\x94\x38\x85\x81\x90\xa6\x25\x1e\xcd\xa4\xde\x38\xe4\xdf\x21\x31\xe2\x65\x82\xd4\x17\xc7\xf6\x24\xef\x0c\xfc\xdc\xbb\xc3\x3a\x1a\x9b\xc7\xfd\x50\x77\x44\x86\x1e\x7c\x29\x3f\xaf\xd2\x24\x3f\x3f\x30\x29\xea\x8b\x98\xe7\x1f\xa2\xa6\x3f\x14\xa3\xff\x04\x0a\xf6\x13\xa5\xd1\x4f\x5e\x55\x25\xb2\x08\xf5\x0b\x8f\x04\x1f\xfe\xa4\x4a\xec\x97\x05\x2c\x31\x7f\x40\x37\xf5\xfb\xf0\x49\x63\x39\x9b\xcf\xbf\x90\xd2\xef\xc4\xd1\x0a\x60\xf3\x99\xcb\xe8\x11\xad\x8f\x5a\x65\x4c\x81\xee\x0a\x82\xfa\xeb\x4f\x78\xff\xaf\xf4\xfe\xa7\x16\xe3\x2b\xc8\x13\xf0\x00\x79\x09\x33\x74\x02\xbf\xc9\x95\x7b\x7c\x8b\xe1\x9f\xb4\x4c\x96\x60\x13\x59\x6c\x53\x77\xbf\x86\xf0\xff\xff\x74\x32\x31\x01\x43\x6a\x0a\x85\x92\xb7\xf2\x91\x2f\x17\xf1\x14\x9e\xa8\xad\xdb\xe3\x50\x99\xee\x75\x2f\x09\x42\x32\x94\xb4\x49\x16\x36\x19\xad\xa2\x36\xc7\x8b\xe6\x4b\x01\x86\x5f\x7f\x74\x64\x9e\x04\xfa\x3d\x9d\x40\xeb\x7a\x26\xf3\x2d\x24\xd1\x7e\x71\xc2\xec\x02\x26\xbb\xe4\x4b\x38\xe5\x7b\xce\xda\xb6\x27\xe0\xee\x89\xb4\x9a\x14\x4e\x90\xf3\xf2\xa5\xee\xe8\x33\x03\x22\x89\x20\xbd\xbd\xb3\x7f\x26\x0d\xbf\x9e\xe9\x94\x77\xc8\x19\x5e\x8a\x72\x09\xa1\x45\x37\x2c\x40\xd0\x11\xb2\xc7\x7f\x42\xfc\x3d\xbc\x3a\x2a\xd8\x3f\x14\x27\xae\x24\x8c\xc2\x08\xc4\x2f\x45\x65\x6e\xaa\x86\x61\x4c\xdd\x48\x78\xb7\x2e\x9b\x60\x9f\x3b\xe0\xef\x8e\x01\x4a\x81\x01\xda\x28\x58\x4d\xfc\x0f\xc7\x27\x77\x0d\x2d\x85\xfb\xb4\xf7\x87\x40\x9a\xb3\x4c\x04\x02\xcd\x72\xc3\xb1\xe8\x1a\xa8\x79\x41\x48\xd0\x4d\xc4\xd0\xdc\x75\x11\xe6\x83\x35\x67\x00\x82\x64\x1e\xe2\x06\xe3\x21\xe0\x0a\xc5\x8e\x5b\x56\x08\x1e\x6b\xf2\x72\xf3\xb7\xd8\x42\x48\xb1\xbb\x89\x61\x20\x89\xe0\xd5\x46\x0b\xf8\x0c\x58\x90\xae\x2a\x15\x15\x0b\x75\x76\x45\x2a\x2a\x03\x1e\x0c\x1d\x04\x50\xd9\x1f\xd9\x95\x08\x6c\x74\x1e\xde\x38\x59\xd0\x57\x37\x1a\xb7\xde\x8a\xd3\xb8\x7b\x16\x4f\xfc\xd6\x68\xf3\x9e\x37\x42\x88\x22\xcd\x75\xcf\x1c\x6b\xdc\x93\x5d\xc5\x1f\x0a\xb3\x8c\x10\x16\x0e\x6d\x96\xd3\x06\x6b\x4b\x8e\xd8\x61\xc8\xfa\x20\x7f\x93\x12\x3f\x78\xca\x61\xea\x69\x9d\x43\xe5\xc2\xfa\xfd\x27\xc7\x45\x77\x0e\xd2\x2f\x30\x4e\xb3\x54\xc6\x2c\x16\x94\xf8\xce\xe1\xa2\x6f\x17\x56\x25\xe1\xdb\xd5\xa7\xb8\x52\xa2\x7d\xf6\xca\x32\x51\x0c\x4d\x7f\xef\x38\x81\xed\xa9\xf2\x83\x49\xba\xf9\x09\x6c\x85\x9f\xff\xa3\x84\x0b\x01\x4c\x5f\x4d\x0c\x77\xab\xb0\xb7\x22\x1c\x4b\x5e\x01\xab\xbf\x9a\x2b\xe9\x55\x9b\xf2\xd9\x55\x69\x22\x7f\xd9\xcc\x85\x10\x96\xca\x61\xf8\x65\x8f\x63\x83\x25\xdb\x07\x02\x32\xc0\xb8\x10\xc0\xe8\x64\xe6\x09\x63\x82\x5d\x80\xa7\x3d\xd3\x06\x85\x9f\x62\xbc\x72\xdd\x2c\x1c\x7a\xa8\x1a\x25\x46\xc1\xa8\x99\x52\x58\x26\x47\x3a\x38\x0c\x4f\xf4\xcf\xec\x83\xe8\xa4\x17\xaf\x9f\xff\x54\x98\xc6\xe8\xac\x92\xea\x99\x16\x9d\xb4\xc1\x02\x26\x6a\x14\x28\xca\xfd\x31\x02\x0e\xe5\x14\x50\x6e\x7c\x38\x75\x53\x17\xee\x38\x93\x58\x4c\x08\x71\xce\x8c\x0b\x28\x8e\x01\x78\x42\x6b\xa9\x08\x3e\x36\xcd\x3a\x05\x50\xaa\x37\xc3\x59\x18\xa1\xc3\xda\x14\x22\x41\x2c\x0c\xe9\x99\x14\xca\x95\x57\x61\x59\xed\x2a\x07\x7f\x7b\x51\x89\xf1\x6a\xb8\x50\xee\xe2\xf0\x49\xee\xb1\xc2\x84\xd5\xa3\x05\x28\x52\xff\x7f\x4a\xe9\x5c\xdb\x5b\xff\x76\x9b\xa5\x16\x75\xad\x85\xf6\x20\x3e\x21\x23\xe6\x89\x36\x5c\xd6\xf3\x10\x90\xb4\xb0\x07\xbc\x80\x89\x1a\x14\xac\x04\x41\xb1\xd0\x6b\x33\xfe\x73\x90\x76\x0d\x0a\x35\x02\x0a\xcd\x5b\xe0\xa1\xb8\xbb\x64\xa1\xaf\x70\x02\x03\x3e\x2a\x80\x52\x1a\x64\x5c\x53\x5d\x08\xb4\x9a\x6c\x10\x42\x00\xd3\x54\x7c\xb6\x6b\x4b\x31\x83\x90\x1e\x58\xa2\xe9\x03\x6e\x8a\x27\x03\xb0\x94\xa0\x51\x73\x5b\xfa\x33\x0b\x59\x62\x1b\x1b\x49\x0f\x67\x8f\x6b\x3b\x09\x53\x84\x54\x72\x89\x88\x4b\x42\x51\x60\x88\x38\xc9\x60\xd5\xe2\xc7\x19\xdf\x0a\xa8\x95\x42\x2a\x12\x4e\x32\x04\x54\xbd\xd8\xe7\x3a\x10\x8b\x9a\x3a\xf7\xc4\x6f\xa8\x47\x7d\xe5\xff\xaa\x10\x5a\x47\x5d\x9f\xb4\x4f\xbc\xe4\x5d\x71\x35\x1b\x30\xc7\xe4\xfc\x32\xb2\x5f\x45\x61\xa5\x3c\xfb\x94\xed\x6a\xa8\xb2\x9c\xc7\xd0\xeb\x19\x34\x17\xb1\x48\x00\xbc\xc6\xcb\xbe\xb2\x8f\x7b\x75\xcd\x7b\x09\x59\x5f\x7d\xd8\x2e\xc0\xbf\x23\x74\x73\x5e\x74\x8a\x93\x89\x4d\x03\x7c\xd5\xaf\xe0\x8e\xe4\x36\x5d\xaf\xe7\x52\xb0\x36\x83\x9b\x3d\x0c\x0f\x42\xbe\x87\xe1\x56\x43\x75\x87\xea\xf4\x01\x13\x0b\xd6\x16\x90\x62\xdb\x15\x81\xff\xc8\x3e\x89\x0a\x70\xf5\x49\x78\x9c\xcb\x65\x1a\x67\x46\x8f\xc7\x3c\x10\xc0\xf5\x8f\x62\x49\xe9\x45\x70\xfa\x95\xea\xd3\x46\xd3\x84\x25\xa6\x70\x66\x4a\x4d\x99\x29\xb1\x6a\x8b\x21\xc1\xdc\x22\x0d\xf6\x10\x36\x01\xa8\xf0\x0c\xf0\x80\x42\x3e\x9a\x98\x5f\xb0\xcf\xe1\x9d\x4b\xab\x40\x6c\x23\x1f\xd2\xf4\xe0\x49\xc5\x31\x7a\xcf\xc4\x72\x9d\x14\x10\xaf\x94\xbc\x8f\x81\xf0\xd8\x2c\x89\x1f\x95\x00\x4e\x71\x8d\xce\x2a\x7b\x41\x0b\x3c\xb7\x09\xf5\x78\xe9\x02\xd9\xf5\x23\x36\xad\xe3\x63\x01\xaa\xa7\x30\x67\x9f\x77\x2f\x98\x59\x3d\x08\x4d\xab\x0f\xdf\x04\x35\x01\xa2\xd7\xae\x4d\x5e\xe4\x98\x5b\x9d\x01\x55\x6b\xf6\x37\x65\xa7\x2d\x8d\x4d\xc2\xb8\xe3\x2a\xd4\x59\x00\xbd\x11\x49\xec\xe2\x2c\x01\x89\xcb\x13\x15\x86\xec\xb7\xaa\xbd\x46\xec\x8b\x0e\x15\x50\xff\x16\x3c\x93\xe8\x2f\xcf\x59\x8f\x9c\x9b\xf9\x72\xca\xec\xc6\xe2\x7e\x66\x33\x8d\x7a\x26\x0a\x2b\xd8\x5d\xdd\xc3\xb1\x47\x8f\xe1\x89\x8a\xdf\x25\xc8\xda\x41\x7a\x8e\x45\x74\x03\xb2\xde\x65\xa6\x52\x92\xbd\xde\x8b\xce\xda\x7b\x03\xcf\xc4\x59\x7f\x43\x38\x1b\x34\x7a\x89\x63\x43\xe3\xb3\xf2\x7e\xda\x88\x85\x58\xb4\x3b\xfd\x6a\x92\x73\x26\xf1\x10\xa1\xbd\x11\x4e\x3d\x4c\x19\x9d\x89\x59\xc7\xd3\x26\x17\x9c\x4f\x9d\xf2\x16\x05\x7f\x9f\xdf\xfd\x57\x13\x95\x84\x38\x04\x3d\xea\x05\xd5\x15\x07\x42\x55\x80\xa1\x5b\x13\x7e\x58\x8f\x7e\x4b\x1c\xc8\x2e\x15\x67\x9e\x16\xf3\xe6\x1e\x18\xa4\x85\x04\xd0\x19\x29\xe3\x03\x1d\xf4\x8b\x3e\x8f\x36\x31\xba\x02\xdf\x16\x90\xae\x72\xea\x2b\x6b\x39\x99\x17\x38\x94\x06\x74\x0c\x1c\xf5\x71\x10\x11\x00\x77\xaa\x88\x7e\x94\x10\xe3\x3c\xbe\x93\x1e\x0b\x1f\x81\x8f\x4b\xdf\x75\x90\xb4\xf2\x2d\xd7\xca\xb3\x5f\xe5\x5e\x6d\x73\xa9\xca\xb4\xcb\x31\x2b\x2d\x0f\xd8\xdc\xbd\x7c\x91\x6f\xee\xd0\xff\x43\x0c\x5c\xe0\xd5\x38\x1f\x3d\xe6\x14\x23\xf5\x03\x0c\x58\x3a\x7f\xc2\xc5\x04\x8d\xc3\xf1\x20\x1d\xe5\x20\xf6\x4b\xf0\x35\xd8\xfe\xfd\x08\x11\x47\x41\x25\x29\xf5\x00\x7b\x2f\xe0\xdc\x79\x25\x22\xdc\xce\x35\xea\x59\x16\x2f\xda\x8c\x18\xba\x5d\x22\x68\x87\x2b\x69\x7b\xc0\x73\x0a\x12\xec\xc1\x1c\x0c\xb0\xcc\x1c\x6d\x40\x34\x68\xe0\x37\x74\x8f\x07\xa8\x19\xbe\x95\x21\x19\x34\xd8\xb5\xe3\x21\x84\xfd\x3f\x20\x46\x7b\xde\xd4\x55\x3b\xe4\x12\x05\x47\x95\xb7\x96\x78\x65\x0a\x06\x4d\xbc\x56\x7f\xcc\xdd\xcb\xb9\x49\x19\x03\x62\x42\x4c\x1f\xe0\xb5\xf5\xe7\xae\xac\x3e\x30\xf7\xe7\xab\x84\x56\xb0\x0c\xd8\x8a\x30\x67\x85\x32\x16\x65\x28\x82\xba\x14\x22\x91\x7e\xdc\x87\xc8\x27\x97\xb6\x9c\x59\xcd\x68\x12\xf6\x7b\x55\x26\x67\x2f\xcf\xc2\x85\xe6\x7d\xfb\xd7\x32\x07\xbe\x0c\xec\x74\xf6\x79\x61\x93\xb6\x4b\x39\x1f\xd8\x9e\x46\x64\xd9\x95\xc5\xb3\xe5\xf0\xc1\xbc\x51\x07\x02\xed\x53\x81\x76\x8e\x39\xd3\x9e\xdf\xca\x2c\xee\xf9\x75\xc9\xdf\xe8\x29\x97\x86\xdd\xda\xfb\xc2\x49\x9b\x85\xee\x37\xa5\x1e\x67\x83\xc5\x71\x52\x20\x61\xec\xcd\xfd\xdb\x57\xaf\x67\x6f\x75\x3b\xc4\xab\xdb\xd0\xd4\x3a\xd4\x28\xda\x5a\x0b\x26\x01\x50\xb6\x52\x57\xcc\x05\x0e\x8f\x6f\xc9\x1b\x2d\x61\x47\x14\xba\x88\x80\x99\x09\xe9\x65\xb7\x89\x89\x30\x57\x56\x65\x8d\x18\xe1\x42\x5f\xb9\xcd\x3d\xdd\x19\x80\xf6\x4a\x79\xa4\x1d\x36\x8d\x8a\xb8\x90\x59\x50\xdc\x2a\xf8\x14\x5d\x84\x93\x2a\xcb\x65\x73\xbd\x4b\x42\x45\x9a\xcc\x6c\xf3\x1a\xa0\xbb\xa6\x45\xba\xcd\x13\xe5\x8f\x06\xc0\x16\xc9\xe0\xdb\x34\x24\xd8\xf2\x25\xf9\x21\xf7\x81\xfa\x21\xba\xce\x1e\x2e\x21\xef\xa8\xf9\x93\xd7\x50\xa8\xdc\x5a\xb7\xbd\x65\xbc\xfd\x7e\xe0\xfe\x9e\x47\xa2\xbd\xfa\x3d\x07\x0a\x0e\xf6\x4f\x67\x0e\x30\x9d\x5a\x27\x87\xd8\xfd\x11\x3c\x5a\x9f\x02\xd2\xfd\xd6\x3d\x14\x7c\xfa\xb1\x29\x1e\xcd\x8e\xe9\xb7\x20\x2b\xb5\xb1\xf5\x0d\x09\xd9\xe7\x75\xc2\x94\x4e\xe0\xb8\x56\xfa\x06\x6e\x4e\x5d\x35\x04\x8c\x02\xef\x97\xd0\x3f\x7a\xf0\x92\xdc\x53\xaa\x72\xed\xb9\xab\xdc\x86\x1c\x39\xf7\x43\xec\xdb\xc4\x79\x27\x64\x8b\x02\xf2\x86\xc4\x27\x37\xf4\x0a\xaf\xc2\x6d\xd6\x3f\x38\x97\x10\x33\xf5\x6e\x36\x7d\xe1\x71\xe5\x8f\xc4\x51\x0e\x21\x22\x4a\x04\x63\x29\x93\x21\x6b\x4f\x56\x41\xe6\xde\xa8\x8b\x73\x8f\xbd\xa1\xe7\xb8\xf9\x2e\xc4\xf4\x46\x5e\x91\x0e\xc6\x61\xaa\x2d\xb7\xc8\x88\x58\x9c\xfa\x24\xec\xdf\x6f\x33\x54\x1a\x03\xc0\x07\x52\x15\x6d\xe8\x60\x73\xb4\x2b\x88\x1d\x7a\x4a\xd9\x6e\x3c\x07\xba\x49\x31\xc4\x80\xcc\xdf\xc0\xf5\x4d\x85\x9e\x37\x6b\x3e\xd6\x03\x04\x0a\xad\x3a\x8b\x71\x85\x7d\x1d\xbf\x81\x44\x37\xa9\x80\x6b\x9d\xce\x58\xfe\xb8\x34\x12\xde\x0f\xc2\xbc\xca\x4b\x50\x74\x74\x22\xaa\xb1\x43\x47\x80\xa2\x3f\x90\x40\x70\x8c\x57\xd9\x3d\xb5\x4c\x1d\xa2\x42\xdc\x58\x79\xc4\xcf\x5e\x4b\x7e\x7d\xe1\x0a\xab\x0d\x41\x02\x71\xc7\x72\x1e\xc4\xe3\x5b\xaf\xbb\xda\xff\x5e\xb9\x76\xfd\xcd\x7b\x3b\xc6\x3c\xd6\x42\xdf\x24\x34\x4a\xbe\xec\xd8\xaf\xad\xb9\x2e\x31\xe5\x5c\xb3\x8d\x21\x89\x3b\xcb\x8d\xb3\xb2\xc2\xa9\x91\x8a\x4e\x79\xff\x30\x17\xba\x7a\xa9\x57\x6d\x83\x7d\x38\x04\xc5\x13\xeb\xdf\x3b\x42\xc2\xfc\xe0\x49\x48\x1c\x1e\xaa\x5f\xc2\xbc\x7d\x84\x93\xfb\x30\xad\x0b\x9c\x1d\x31\x6e\xc2\xbf\x61\x94\x6b\x6f\xbb\x84\x94\x24\xa8\x64\x13\x0e\x0e\xc0\xd7\x14\x32\xcf\x70\x47\x8d\x48\xe0\x45\x98\xe2\xf0\x58\x68\x15\x2f\x46\x23\xdd\xc2\xd1\xaa\x29\xf7\x80\x0a\x1c\xdb\x26\x47\xe8\x29\xf8\x6a\xc6\xac\xd1\xb3\xb9\xfb\x92\x56\x35\x33\xd2\x35\x8d\x67\x96\xbb\x53\xcf\x27\x0d\x66\x4b\x51\x97\x81\xcb\xd2\x4d\xa8\x1d\x50\x38\x4a\x7f\x2d\x75\x3e\x78\x46\x4b\xf5\xc9\x8c\x47\x3e\x3e\x86\x98\x12\x67\xb6\xcb\x99\x84\x89\x5a\x5a\x6a\x69\x09\x21\xc5\x05\x0c\xeb\x39\x24\x93\x3e\x14\x72\x2a\xb0\x9e\x22\xee\xee\x0e\x57\xa5\x5b\x3d\x90\xa0\x78\x6f\x86\x42\xf3\x39\x66\x6c\x81\x8b\x66\x83\x01\x96\xf9\x53\x37\x46\x53\x38\x14\x95\x86\xc2\x83\xbf\xbe\x2f\xf9\x54\x2f\x3b\xe7\x8b\x8e\xe5\xfb\x71\x74\x9e\xe7\x96\x96\xc6\x90\x5a\xe6\x06\xc2\x43\x01\xc5\xff\xfd\x7c\x04\x2c\x6f\x68\xea\x48\x8d\xfa\xb2\x7c\x4d\x64\xac\x6d\x88\x92\xc1\xd2\x04\x35\x97\x3c\xf8\x78\x64\xf1\xaa\x48\x56\x18\xc3\x20\x87\xe4\x49\x10\xc9\xde\x76\xb6\xd6\x0b\x58\xf9\x0a\x64\x7a\xb6\x34\x91\x04\x44\x3e\x1e\xb5\x60\x26\x60\x8f\x73\x80\x5f\xb8\xc7\x87\x04\xdf\x2f\x34\xa2\xd2\x52\x3d\x5c\x55\x57\xfc\xcd\xd5\x9f\x0b\xe0\xdc\x89\xa3\xf9\x45\x51\xc1\x93\x88\xbf\x36\xa2\x76\x89\x23\xa9\xb4\x23\x85\xe1\xcb\x2b\xc7\x93\x63\x4d\x70\x1f\x83\x61\x8b\x5d\xf1\x68\x9b\x0a\xae\x27\x3c\x17\xbc\x81\x06\x4c\x8f\x58\x3c\xa7\x39\x8f\xdf\xd0\xa3\x35\xf4\x83\x2c\x9a\x39\x3b\x9d\x6c\x7e\x15\x58\x3d\xf4\x88\x82\xad\x9d\x54\xce\xb0\x93\xf3\xc7\xdf\xe1\x76\x0a\xf6\x2f\x52\x4f\xbd\xe5\x44\xdf\x47\x46\x0b\x37\x3b\x59\xe7\x9b\x4a\x69\x5f\xd9\xd5\x7f\xc5\x9e\x16\x83\x72\xe9\x03\x00\x1a\x34\x31\x98\x03\xcb\xe7\x95\x0e\x62\x00\xed\x69\x52\xfd\x73\x2a\x1c\x41\x41\xe5\x97\x9e\x46\xf6\x86\xf5\x53\x00\xd7\x3c\x75\xe9\xf5\x4e\xff\x34\xea\xb7\x80\x36\x4d\x25\xa5\x2e\x60\x7e\x9e\xc5\xf5\xa3\xe7\xd7\x2a\x4b\xa5\x2b\x5c\x96\xac\x3f\x5b\x85\xe8\xdb\x04\xb0\x3b\xa4\x43\xd4\x6c\x67\x80\xd8\x5b\xbf\x90\xbd\x86\xd8\x9b\x87\xbb\x93\x1f\xcd\x04\x2c\x03\x61\x70\x6c\xa3\x26\xc0\xf6\x42\xf4\xc8\x5a\x0b\xa4\x4d\x05\xbe\xe3\x14\x70\x4a\x2a\x06\x4c\x3e\xcf\xde\x42\x0f\x29\x39\x26\x3f\x3e\x10\xa3\x67\xef\xe0\xc7\x83\x79\x8c\x53\x02\xc6\x5b\xfe\x4d\xfd\xa2\x74\xfc\xa8\x4a\x95\xcd\xc9\x07\xe6\xcf\x02\x49\xb7\x12\x37\x67\x3d\x5f\x65\xfa\xdd\x63\xfb\xaf\x0e\xa1\x42\x33\x2a\x4d\x9f\xb3\x36\x61\xa4\x2a\x38\x9b\x05\xe2\x2f\x5b\x18\x10\xbb\xfb\xb3\x08\x4f\x77\x5d\x8d\x6f\x40\x98\xdd\xc5\x68\xd0\x1a\x3c\xf3\x82\x6a\x9d\x45\xe2\x4b\x04\xb1\xd9\xf2\xf1\x23\xf8\x9d\xf2\x47\x1e\x57\x42\xf6\xac\x21\xe7\x2a\x40\xa7\x2f\x45\x43\x0c\x82\xc6\xa0\x5d\x01\xa5\x17\xc7\x67\x4e\x78\xb4\x41\xeb\xd0\xb8\x86\xa3\x24\xbc\x80\xd0\x0b\x41\x24\x80\xf5\x4e\x90\x73\x7f\x08\xd7\x4b\x42\xe6\xad\xb9\x9b\xdb\xd9\xda\x73\x21\x91\x24\xdb\x91\xa5\x83\x71\xfd\x56\x3f\xc7\x18\x6e\x8e\x9e\xf5\xd4\xa6\x36\xda\x69\x1a\x4f\x90\x60\x5c\x7d\xd2\x30\xe2\x9d\xdd\xd8\x25\x7a\x4c\x0a\xce\x38\x85\x66\x13\xab\xa0\xa8\x45\x7e\xb8\x0c\x49\x2a\x1b\x7e\x9e\x1b\xc7\x63\x16\x4b\x93\x2a\xcf\x4d\xe3\x69\xde\x84\xa3\xab\x36\x4c\x97\xaf\xf8\x88\xfb\x2d\x24\x65\x3a\x39\xcc\x44\xef\x4a\x24\x60\x42\x99\xc5\xff\x4b\x12\x10\xd3\xfa\xbd\x57\xa0\xf1\x06\x56\x2a\xfd\xc3\xfa\xe6\x59\x91\x6d\x0b\x60\x5e\x3c\x5f\x29\xd8\xe3\xa3\x80\x05\x23\x90\x64\x3c\x98\xce\xd1\x96\x6a\xca\xc6\x94\xce\x90\x7b\xba\x66\x51\xbf\xe1\xb1\xc5\xe9\x1a\x3c\xb6\x68\x29\x55\xc7\x4a\xfc\x1c\xa5\xd4\x1e\xd0\x46\x17\x5c\x6f\x26\xe5\xce\x02\x92\xcc\x2c\x18\xe2\xbb\x7c\x51\x5c\xf4\x11\x5a\x42\x8f\x53\xb2\xa6\x0e\xd4\x63\x23\xe2\x40\x3d\x9f\x87\xf9\x43\xf1\x10\x83\xe8\x21\xf1\x67\xc0\xf3\xd4\x5c\x5a\xd8\x31\xc9\xf2\x58\x59\x7f\x7d\xc0\x18\x98\x2b\x72\x00\xf5\xb2\xbe\x08\xd5\x29\x9b\x94\x74\x95\x6f\x38\xf4\xa6\x92\x6a\xc1\x8f\xb9\xc1\xfe\x1f\xb1\x66\x9d\x0f\x26\xd8\x21\x41\x95\x67\x89\x50\x09\x1d\x28\xdb\xc4\xc7\x9c\xae\x96\x41\x03\x92\xcf\xef\x84\xcb\x52\xb1\xd6\xfd\x08\x60\xba\x05\x63\x24\x40\x1e\xd6\xe7\x51\xe6\x2a\xcf\x01\x08\xf4\x96\xf3\x01\xe1\x5f\xff\x05\x18\x08\x31\xa0\xea\x3d\x7c\xbc\x1c\x4f\xf7\x92\xfa\x17\x66\x1b\x6a\x1c\x11\x4a\x2e\x38\xdf\xa1\x33\xa5\xa9\x86\x0b\xfa\x79\xb0\x37\x79\xa9\xdd\x2d\xb6\x54\x89\xce\x33\xd5\x0a\x91\x81\xfa\x03\xea\x13\x8a\xf0\x6b\xdf\x35\x7c\xa1\xcd\x79\xd8\x4a\x30\xcd\x73\xc0\x61\x32\x07\xc6\xd9\xf1\x62\x04\xf0\xbd\x66\xcd\x0a\x87\x7a\x64\x82\xfc\xe0\xd4\xaf\x68\x0c\x2b\xae\x44\x05\xdf\xa1\x62\x12\xab\x70\x24\x1a\x7d\xb0\x76\xcc\x73\x9c\x43\x1d\x3e\x5e\x83\x1b\x00\xf4\x04\x82\x9c\xa9\x8f\xa1\xf6\x52\xaa\x2a\x4e\x04\xf7\xb1\x20\xc4\xaa\x3a\x80\xb7\x2f\x7e\x08\x18\x61\x8e\x47\x67\x70\xd9\x7e\x62\xf8\x80\xfd\x62\x80\xf7\xd7\x8f\x55\x36\x89\xd8\x0d\x01\x7e\x19\x4a\x90\x87\x07\x1b\x14\x01\x4d\xa2\x0f\x6a\xd0\x1d\x4c\x48\x94\xe0\xfc\x75\xd6\x9a\x03\xaf\x38\xab\x65\x18\x9a\xc4\xf5\x30\x07\xfa\x94\xeb\x7b\xd5\x43\x87\xe8\x0d\x01\x32\x14\x74\x64\x80\xd4\xa4\x10\x82\xc8\x8a\xb3\x7d\xb0\x37\xb5\xbe\x03\x5f\xa8\x81\xfb\x90\x43\xab\x75\x50\x72\x6a\xc8\x9b\x4c\xb6\xc0\xba\x2a\x11\x9e\x17\xd3\x58\xd6\x9f\x55\x19\x6e\x2d\x85\x68\xc4\x4e\xc0\x1f\x98\x71\xf1\x02\xe6\xe6\xae\xf5\x5e\x65\xc0\xf5\x0a\x27\xae\x4a\x4b\x80\x21\x69\x98\x0d\x31\x01\x7e\x63\x0d\xad\x70\x88\x67\xa2\x4c\x18\x98\x57\x17\xf4\xe6\xfc\x54\x31\xf6\x7f\x72\x5f\x6b\x2e\xfd\xd6\x5a\xdb\xf4\xf3\x7f\xb4\xea\x22\xa6\x8c\x6e\x17\x7d\x0d\x34\x5f\x4d\x83\xec\x93\xce\x2a\x55\x2b\x6c\x2c\xdc\x1c\x68\xc6\xd2\xa1\xb2\xd0\xde\x5e\x45\x54\x9f\x3a\xf3\x53\x46\x5c\xf6\x1c\x2c\x5b\x65\x28\x9d\xf9\x5e\x7e\x2a\x7d\x00\xb0\xfa\xf2\x21\xd6\xad\x50\x29\xac\x2f\xff\x04\x1f\x7c\xb2\x95\xed\xff\x3c\x6f\x40\x32\x06\xcc\x10\x98\xb6\x42\x05\x2c\x9b\xdf\xcc\xf4\xba\x2a\xe7\xb3\x09\x64\xfd\x91\xb4\x82\x56\xf6\xa9\xa8\x29\xab\x20\xd6\xbf\x2c\x3c\x13\xe5\xde\x7b\x08\x16\xf3\x9f\x40\x66\x88\x08\x3b\xd0\x2b\x0e\xad\x1c\x63\x55\xd6\x52\xde\xc6\x7d\x99\xf5\x5f\xc0\x6d\xf6\xa0\x9f\x38\xec\xf0\x0e\x4e\x66\x62\xa5\x99\x89\x61\x26\xe9\x87\x70\xf6\xfc\x2e\x90\xa4\x24\xe9\x10\x64\x3e\xeb\xbd\xcd\xe5\xbc\xdc\x01\xa5\x2e\xa9\x74\x11\x8e\x38\x41\x29\x8c\xaf\x8e\x7b\x82\xd5\xf0\x04\x93\x01\x1f\x40\x6a\xca\xbb\x03\x81\x78\x0b\x02\x09\xd1\xb8\x50\xf5\xb2\x96\xf9\xc1\x79\x9c\xbd\x25\x7e\xd0\x6c\x93\x75\xa2\x53\xe0\x09\xa6\x47\xc2\x1d\x2d\xb4\xae\x0b\x4c\x64\x1f\xaa\x84\x08\xd3\xd9\xc4\xbc\x5e\x41\x77\x80\x95\x58\x13\xec\xba\xd4\x49\x23\xd2\xb5\x43\x3d\x2a\xbc\xc5\xec\xe5\x8d\x0d\xad\x8d\x70\x02\x00\xcc\xc4\x24\xa7\xef\x61\x03\x54\x6a\x43\xc0\x82\x75\x48\x15\x0b\x90\xe6\x78\x36\xc3\xb0\xe3\xd0\xca\x43\xff\x6e\xbf\x65\xf8\xd5\x04\xca\xec\xb5\x55\x70\xd8\x24\x24\xf4\xc6\xba\x53\x48\xcc\xf5\x93\xb4\xa1\xbd\x46\xbf\x8a\xe6\x71\xae\x52\x23\x7b\xc4\x2e\x83\xb2\x40\x7d\xea\x09\xed\xed\xde\x62\x67\x74\x55\xf2\xe7\x76\xb7\xa5\x83\x85\x82\xfa\xc5\xa0\x19\x24\xa2\x24\x22\xcd\xfc\xae\x86\x1a\x72\x57\x77\x61\x38\xc9\x31\xeb\xaa\x90\x66\x95\xa2\xdb\x5d\x1d\x56\xe1\x2b\x6d\xb6\x3f\x25\x11\x23\xec\x83\xf3\xec\x6e\x01\x34\x25\x81\xd0\x85\xd7\x17\x6a\x46\x12\x2c\x2b\x55\x4b\xb2\x5e\xa0\x85\xb6\x10\x4d\x59\x02\x4c\x89\x77\x9c\xdf\xcd\x9b\x60\x9a\xeb\xda\xe0\x9d\xf6\x3a\x35\xf4\xe6\x10\xf2\x67\x40\xca\x50\x0b\x07\x96\x21\x02\x6f\x86\x0e\xa3\xe3\x38\x2f\xfd\x3b\xb7\x61\x7e\x47\xc8\xd4\x52\x47\x6d\x85\xae\xbc\xa9\xdd\x55\x5e\x64\xe2\x22\x10\xa4\xaf\x54\x69\x8f\x76\xf0\x01\x14\xd4\xb2\x92\x30\xd4\x15\xd5\x1b\x3a\xdc\x93\xc6\x97\x06\x9e\x73\x0e\xf4\xe6\xa5\x2d\x35\x36\x25\x9e\x93\xbf\xa8\x9c\x25\x1c\xd1\xd4\x1e\x76\x45\xec\x0c\x0b\xbe\x47\x93\xee\xda\x3a\x54\x06\x23\x47\x91\x5d\x91\x29\x64\xb7\x5a\x5b\xe6\xe7\xbf\x52\x74\xe9\xe6\xf4\xbf\x5b\x9d\xea\x2c\x00\xea\x9a\x21\xa2\x4e\xd3\x34\x16\x9b\x51\x1d\xc4\x50\xe7\x47\xe0\xcd\x90\x84\x03\x44\x53\x67\x3b\x07\x6e\xaa\xfb\xe3\x78\xde\xcd\xcc\xfc\x75\x18\x12\xfa\xab\x6a\x11\x94\x84\x89\xfd\x1c\x05\x94\x14\xb6\xcf\x96\xcc\xa4\x74\xb0\xc6\xe6\xc3\x00\x6d\x73\x62\x28\x5d\xc6\x3f\x22\xe2\x62\xf9\x16\x3b\x1a\xbe\x27\xac\x04\xba\x6d\x60\xce\xf1\xee\xc3\x16\x73\x9a\x85\xde\xdc\xad\x05\xa9\xc2\x76\xee\x57\x40\x35\x85\x35\xec\x30\x8f\x87\x8d\x22\x35\xdf\xe2\xdb\x55\x0a\x6b\x8e\xfb\x26\xe6\x74\x7d\xac\x21\x76\x97\x57\x8a\xe8\xe5\x25\xce\xf1\x37\x71\x39\x33\x3f\xd3\x61\x60\x15\xea\x71\x54\x10\xb4\x89\x1e\x7f\x44\xda\x3b\x48\x38\x08\x84\xf9\x22\x22\x97\xda\xaf\x9d\xdb\xab\x79\x60\x83\xcb\xac\xc0\x4d\x0c\xe4\xc6\x76\x37\x5b\xb7\x8b\x34\x53\x0b\x94\x0c\x73\x8d\x38\xc2\x59\xd1\x16\x85\x0c\xdc\x42\xac\x28\x85\x03\xd3\xd6\x36\x5e\x01\xa6\x62\xa4\x23\x42\xf8\xe9\xa5\x88\x97\x31\xd9\xf3\xe4\xa8\xc4\xb7\x46\x30\x68\xce\xd8\xe8\xa7\x6b\xd2\x5c\xd4\x22\xbd\x4b\xf0\xea\x14\xf1\x1e\xad\xe2\x5b\x60\x4d\x24\xee\x81\x2c\xf3\x0f\xe4\x72\xba\xa9\x16\xb5\xdb\xdc\x8b\xe7\xd6\xda\xde\xbb\x99\x1f\x87\x73\x96\xc8\x54\x83\xb2\x4d\xa9\xb4\x9d\x0e\x01\x49\x6e\xc7\xe9\xeb\x03\x95\x36\x41\xd2\x93\x35\xdc\xbe\xd3\x26\x25\x33\xf7\x5c\x74\x79\x4d\x78\x74\xa8\x0a\x65\xa1\x10\x51\x0e\xf4\xb4\xe1\x2e\x01\x9a\xa1\xde\x63\x01\x9f\x50\x5c\x0a\xe6\x09\xac\xeb\xf0\xdf\xbd\xf6\x52\xe4\x40\xc4\xda\x68\x5a\xa6\x9b\x10\xd8\xa9\xd2\xf7\x0e\xf3\x5f\x19\x07\x83\xe4\xed\x17\x67\xf8\xa2\x3f\xa1\x0b\xe7\xeb\x3a\xcf\xfd\x26\x60\x71\x7f\x20\xfe\xe6\xfd\xd4\x07\x25\x6c\xc1\x35\x67\xa7\x8b\x7c\x98\xca\xff\xa8\xaf\xf9\x40\xea\xce\x97\x2c\xcc\x7f\x92\x70\x3b\x1f\x89\xca\x7f\xec\x3f\x78\x23\xf5\x2e\x8e\x48\x82\xdc\x20\x0e\xf0\xde\x83\xf9\xf3\xce\xcc\xab\xbd\x73\x90\x2a\xdf\x6e\xe5\xfb\x43\xe3\xb9\x9b\xdf\x35\x89\x80\xb7\x0d\xf5\xa8\x2d\xe7\x75\x0b\xfc\xae\x97\x9c\xbb\x90\xf2\xf8\x23\x10\xf7\xe0\x74\xc3\xa2\x14\xa0\x4f\x98\x5b\xec\x1e\xfd\xee\x89\xab\x7e\x7b\x4e\xb2\x80\xf4\x0c\x33\x39\x07\x84\xd3\xad\xee\x7a\x85\x40\x9c\xca\x7b\x2f\x17\x6d\x87\x7a\x1a\x37\xdd\xa3\xf9\x46\x39\x40\x8f\xf4\xcb\x7a\x42\x51\x21\x2d\x8c\xa6\xf1\xd5\xb8\xe6\x2f\x2a\x9d\xbd\xe6\x24\x91\xb7\x69\xbe\xf9\xb6\x22\x02\x74\x5f\xf2\x73\x02\x4c\xbd\x90\x97\x52\xbe\xfe\xd5\x08\xbe\x2f\x8d\xb1\x5f\x4a\x7b\x41\x85\x8e\x9f\xd7\xc0\x8c\x36\x7d\x3a\x1b\x0f\xf8\xb0\xf1\x85\xd6\x20\x30\xa8\x43\x73\x6c\x78\xb9\x7b\x10\x82\xf4\xd6\x63\x6f\xd1\x1f\x11\x3a\x18\xae\x27\x8f\xee\x4f\x04\xfa\x81\xcd\xfd\xd9\x23\xf6\x6e\x2f\x10\x82\x88\x65\x8d\x39\xfd\x4b\x22\x74\x40\x52\x3c\xcb\xa8\xff\xb6\x7e\x6f\x58\x9a\xf8\x35\xf2\x55\xda\x63\x18\x38\xc8\x79\x92\xf7\xf9\x04\x8e\x97\x50\x56\xbe\xa5\xcf\x22\xc6\xdf\x73\x3e\x47\x0a\xe1\x09\x89\x8a\xdc\x0b\x47\x3b\xae\x93\x17\xa6\xa8\xfe\x41\x82\xac\x40\xb6\x72\x1d\x1c\x7c\xde\x8c\x22\x95\xfa\x04\x75\xca\xbf\xf5\xc2\x01\xad\x42\x41\x99\xf5\x3e\xd9\x02\x01\xb2\x01\xd5\xf7\xc4\x02\x63\xee\x2a\x38\x06\x82\x76\xb8\x35\xfd\xc7\xc7\xfd\x99\x7d\x26\x2c\x78\xac\x27\x8b\x9f\xc8\x3e\x64\x06\x2f\x01\x68\x77\x0d\x00\x9f\x42\xe6\x26\xb8\x44\x14\xd6\xe7\xdc\x85\x54\x5a\x79\x2e\xf7\x12\x36\x80\x4a\x0a\x84\x9e\x1e\x1d\x52\xf9\xe9\x28\x79\x6d\x1f\x60\x86\xc1\xdd\x5d\x61\xcf\x2f\x61\x0d\x05\xa7\x6a\x4b\x57\xf0\xa8\x67\x62\x5f\x54\x0f\xe9\x0d\xd4\x63\x78\xfb\xfd\xad\x1c\x5c\x55\x40\xe2\xbd\xd0\xe3\xf5\x2b\xee\xde\x8c\x2f\x11\x69\x3e\x5f\xd7\x37\xf3\x27\x64\x56\xee\x8c\x92\x50\xba\x0b\x91\xb8\xa7\xfd\x9d\x0a\x77\xb9\x62\x23\x94\x5c\x9e\x0d\xe9\x59\xae\xbf\x61\xb6\x2e\x69\xe0\x1b\x27\xc9\x7d\xfe\x82\x7a\x61\xbe\x1d\x46\x86\xc5\xe1\xa2\x67\xbd\x62\xf7\x33\x28\x3d\xb0\x31\xd4\xd8\x1b\x76\x2c\xfe\xff\xdc\x7b\x39\xe0\xb9\x59\x80\x39\x8b\x06\x36\xbd\x16\x91\xc2\x6a\x77\xf0\xad\xe9\xb5\x85\x8e\xf8\x2d\x74\x68\xbe\x0b\xc5\x23\x6c\x69\x53\x7f\x97\xbc\xe2\x27\x9f\xe0\x5a\xbb\xaf\x22\x2e\x7a\x00\x01\x77\xe5\xa6\x72\xcf\x67\x28\x03\x86\xaa\xdf\xcc\x57\xda\x2e\xb3\x75\x0b\x81\x69\xd5\x35\xbf\xe7\xa1\x4b\x02\xd0\xbe\x00\x65\xf5\xa3\x4e\xca\x61\xb8\x65\x62\xd8\xcd\x3b\xfe\x56\x9b\x96\x53\xd3\x2b\x94\x72\x1b\x18\x77\xce\x4d\x66\x50\x3a\x66\x9a\x1f\x41\x2f\x87\xf9\x77\x9b\x1c\x3d\xd8\x3b\xdc\xe9\xdd\x11\x11\x78\x27\xa5\x94\x6e\xc1\xee\x6f\x75\x0c\x8e\x95\x97\xcb\xea\x6d\x2f\x32\xe5\x0d\x53\xc7\xf9\x8f\x05\xb4\xe6\xbe\x3e\x79\x21\x7f\xee\xfa\xa4\x2f\xf4\xc5\x05\x08\xb1\x44\xfb\xaa\x46\x95\x40\xdc\x47\xfa\x23\x40\x28\xa8\x2e\x56\xdd\xc4\xda\x22\x01\x46\x04\x69\xd5\x83\xee\xf1\xf6\x0d\x39\x5e\x83\xee\xe0\x2c\xd9\xc1\x2e\x94\xca\xdc\x86\x51\x01\xc5\x81\xdd\x80\x31\x0b\x61\x7b\xbe\x03\x12\x6c\x8f\x90\xbe\x86\x02\x9a\x02\xa6\x6a\x91\xe4\x0a\xe3\x45\x28\x9f\x35\x9f\xc5\x33\x4d\x02\xd9\xb6\x87\xce\xc2\xb2\x5c\x61\x19\x88\x11\x53\x7b\x61\xec\xd3\x22\xb0\x6f\xd0\x28\x6c\x94\x1e\xf8\x4d\x3c\x0e\xbe\x04\xe3\x9a\x28\x43\xf7\xb4\xe4\x66\xa0\x81\xef\x47\x99\x56\x7d\xd2\x7c\xc7\x3b\x10\x9d\x42\xe3\x09\x59\xf8\x78\x84\xc4\x61\x0e\xd9\xc5\x66\x0c\x6f\x63\x73\x41\x1f\xed\x7e\x22\x53\xfe\x56\x9c\xc2\xe8\xd2\x15\x12\xb5\x76\xd4\x44\x51\xe8\x60\xdd\xca\xff\xea\xdc\xb9\xe7\xe1\x71\xd6\x10\x80\x74\x7d\xc4\xcc\x28\x4f\x11\x08\x29\x7c\x36\x1b\xca\xa3\x08\xcb\x71\x1d\x73\xe0\x45\xed\xea\x09\xd1\x7b\x68\xab\x02\x48\x2d\xaa\x3d\x7e\x9b\xda\xe5\xda\xcb\xeb\x0b\xff\xdd\x13\x6f\xec\xf6\x26\xc1\xc8\x02\x7e\xb1\xa6\x37\x07\x67\x2e\x6e\x18\x11\xbb\x19\x1b\x67\x3c\x67\x41\x7d\xb3\xae\xf3\x6f\x91\x72\x95\x45\x44\x25\x5d\xb6\x89\x0d\x21\x89\xee\x05\xbb\xe7\x0e\x14\x2f\x81\xc8\xc0\xea\xec\x29\xc0\xb8\xb5\xe9\xbb\x5d\x13\x25\x06\x30\x3b\x10\xca\x73\x9a\xb7\xd4\xd5\x45\x52\x80\x93\x4d\x5c\x08\xdc\x75\xd5\xe7\x9b\xa1\x80\xf8\x00\xe7\x4a\xdb\x70\xd0\xb0\xc9\x71\xba\xfc\x2e\xd5\x8e\x4b\xe4\x24\x78\xb0\x40\xe9\x01\xb3\x3b\xf0\xb0\x69\xd2\x25\x07\x60\x57\x8f\x24\x18\xfe\x36\xac\x70\x5c\xa7\xaa\x0c\x97\x2a\xa4\x97\xb4\xaa\xae\x3c\x7f\x4d\x65\xe9\x89\xe9\xc1\xc4\x05\xd1\xe2\x79\x0c\xa0\xaf\x83\x6b\xf9\x7f\xfa\x79\x63\x95\x9b\x48\xda\x10\x49\x84\x6a\xae\xc0\xad\x2f\x92\x92\xe5\x94\x99\x19\xd6\xaf\x3d\x25\x00\x98\x5a\x7b\x26\x21\x63\xf1\x44\xec\x0a\x65\x44\xda\x83\x66\xd0\x76\x0a\xdd\x4b\x57\x69\x14\x7c\x77\x4b\x4d\x97\xd1\x21\xb7\x0a\xe3\x98\x71\x8f\x00\xf8\x13\x2e\x78\x52\x93\x80\x87\xb3\x0b\xcd\x52\xd8\x67\x86\xac\x35\x3f\x04\x12\xf7\x3c\x42\x9e\x11\xb9\x37\x5f\x09\xc4\x81\xbe\x4f\x47\x51\x9e\xf3\xfa\x56\x8a\x2e\x0a\x43\xcf\x0a\x13\xc5\x7d\xaf\xb4\x7e\xa2\x15\x49\x01\x95\xc7\xb0\x5e\xf0\xef\x06\xbb\x4e\xbd\x48\xb9\x30\xac\x77\x40\x26\x20\x69\x04\x60\x56\xd1\x8a\x4f\xef\x1a\xf9\x25\xd0\x95\x04\xc8\xe2\x75\xfd\x71\x80\x6c\xde\x08\x67\xfd\x47\x88\x29\x58\x55\x44\xe0\x96\x7a\x52\x96\x51\x43\x30\x6b\xdb\x2a\xa5\x77\x91\x22\x39\x37\xbe\x14\x8e\x9f\xe5\xf6\xda\xbc\x50\x02\x51\xae\x65\x67\x11\x12\x36\x50\x01\x27\x9b\x29\x48\xed\x6a\xc3\xe0\x5d\x38\x15\x56\xcb\x3d\xbc\x32\x69\x64\x39\x93\x2a\x79\xce\xcc\xf6\x9d\xf3\x78\x71\x8a\x74\xba\x1b\xa5\x9f\xf9\x1c\xda\xbd\xa7\x6c\x0e\x4e\x70\x4c\x03\x97\xe2\x98\xd9\x4a\x58\xec\x5b\xdd\xe1\x99\x5f\x33\xe1\xae\x4e\x6c\xda\xb9\xae\x86\x63\x27\x75\xed\x4e\xe9\xaa\xb9\xee\x70\x78\x73\x4a\x01\xde\xbd\x39\x39\x69\x73\x3c\x6d\xfb\x5f\xd3\x49\xe5\xca\xb0\x0d\xb3\x06\x2e\x0b\x3b\x0d\x47\x2b\x62\x67\x2b\xcf\x92\xb9\x0e\x20\x01\xe8\x07\x71\xa6\xab\x63\x53\x7b\xda\x33\xc3\xdf\x2d\x2a\xfb\x99\xe4\x47\xe2\x12\x91\x52\x92\xec\x39\x3c\x07\xc2\x23\xeb\x88\xf1\xfb\x52\x77\xca\xf4\xb3\x2b\x7e\x37\x6c\x44\x7a\x69\xec\xee\x16\x7c\xb7\xb2\xd1\x07\x64\xcb\x82\x7d\x84\x56\x5b\x58\x5a\xc9\x76\x1d\x70\xa9\x8c\x48\xad\xed\x81\xfa\x6d\xe2\xff\xb6\xb8\x13\xfb\xf6\x40\xd6\x5b\xea\x91\xa7\x84\x96\x1c\x46\x2b\xa0\xec\x51\x7a\xe2\x69\x37\x8e\xc0\x2d\x10\xbe\x36\x1c\x3c\x57\x50\x07\x9e\x3c\xc2\x3a\x0b\xbb\x7f\xd8\x3c\x2f\x51\x52\x71\x7d\x6f\x2c\xca\x5a\xf3\x26\x67\x18\x8b\xa8\xad\xe8\x72\xb5\x08\xe4\xfb\x0c\xd7\xcf\x49\xc2\x94\xbd\x34\x28\x9b\x28\x88\x85\xc9\x8b\x96\xee\xe3\xa9\xf6\xd6\xba\x98\x8e\x3a\x98\x0f\x3e\xa8\x47\x8e\x5f\x91\x39\xde\xb1\x65\x48\x3a\x5a\xd3\x39\x13\x17\x0b\x90\xa3\xa2\x06\xac\x5d\x22\xb0\x06\xd3\xcf\x32\xef\x49\xfb\xad\xb2\xdf\xf4\xe9\xde\x2c\x4c\x2e\xba\x65\xee\x01\x87\x57\x69\x5e\x66\x25\xb8\x8f\xac\x1f\x9d\xde\xa1\xce\x99\xfe\x6d\x42\xd3\xbe\xd6\xf9\x8b\x21\xa6\x98\xde\x91\x08\x58\xb2\x25\xb3\xe8\x16\x78\xf7\x71\x68\x06\x6a\xcb\x44\xa4\xae\xf5\xd7\xd4\xaa\xbc\x04\x4e\x46\x45\x85\xf7\xd5\x7a\x77\xa1\x78\xcf\x90\x96\x38\x84\x30\x3d\x84\x1d\x3b\x12\x48\x4b\x97\x54\x2f\x0b\xcb\xc0\xc8\x20\x6f\x59\x5e\xa1\x91\x61\x39\x92\x73\xcf\xa0\x3e\xfa\x9c\x59\xbb\x6b\x5e\x6e\xc5\x7b\x42\x04\xe7\xbd\xd2\x89\xa9\x9c\x74\x74\x2b\x9a\x0e\xd8\xc4\xd6\x1e\x19\x3a\x84\xb2\x85\x2e\x13\x69\x39\x25\xae\x5c\x71\xf6\x54\xd0\xc4\x00\xe2\x95\x30\x5d\x19\x38\xb4\x2d\x43\x56\x46\x44\x0e\x64\x05\xf9\xed\x5d\x91\x04\xd7\x5c\xc3\x8f\x80\xde\x97\x26\xde\x16\x0d\x12\xd0\x7c\xea\x2d\xd9\x09\xb8\xde\xed\x34\x55\xe5\x84\xa6\xa6\xbc\xb4\xf6\x28\x6d\xec\x65\x0d\xf4\xef\xf5\xc7\x88\x74\x11\xaf\x74\xb7\x6e\x72\xa2\x62\x2f\x42\x55\xc8\xf7\x5c\xc1\x3d\xa5\xbc\x66\x8d\x19\xfb\x9e\x39\x03\xa4\xfa\xa1\xaf\x49\x73\xe8\x6d\x8e\x69\x93\x5c\x63\xa8\xa1\x6f\x15\xa3\xf8\x3b\xa0\xbe\x83\xa6\x68\xd6\x6c\x53\xf7\xaf\x4e\x6b\xa7\xe9\x85\xbd\x8a\x7f\x84\x0d\x36\xa8\x70\xf9\xd2\xad\x68\x36\xb4\x01\x44\xf7\xda\xe4\x8f\x15\x0b\xff\x64\xef\xbb\x70\xf6\x1e\xff\x10\xeb\xab\x65\xbe\x78\x28\xd9\x1f\x6d\xc5\x5e\x73\x1e\xcd\x66\xcf\xe5\xad\x88\x45\x09\x44\x72\xe1\x72\x13\x54\x8d\x29\x01\xf5\xd5\xb5\xde\x9c\xe3\x25\x70\xf0\x1a\x83\x77\x0b\x07\xdd\xdf\xad\x4c\xfc\x55\xf2\xe4\xf3\xaf\xe7\x6b\x22\x70\xd3\xc1\x53\x81\x87\x70\x74\x70\x12\x74\x18\xcf\xeb\xcc\xc8\xae\x7f\x43\xd1\xc2\xcb\x97\xca\x99\x5e\x3b\xd3\xe3\x60\x1f\xac\x07\xf0\xa0\x09\x61\xe2\xde\x04\xf2\x57\x6a\x09\x5b\xf3\x31\xcd\xca\xa9\x83\xff\xf5\xa2\x58\x97\x7f\xda\xaf\xd8\x65\xbf\x12\xa1\xc4\x3b\x71\xd6\x70\xf6\xe8\x84\xc9\xb6\x46\x82\xef\xe3\x16\xb2\x78\xa8\xfa\xf0\x8a\x79\x11\x9f\xff\x6c\x3a\x55\x2f\xdf\xf2\xa0\x2f\x81\xb9\xf1\xff\xd4\xdf\x32\x78\xdc\x7a\x1b\x4f\x9f\xfe\xfa\x59\x38\x35\x47\xd6\xf2\xda\x15\x85\xb5\x39\x84\x3f\xcf\xc0\x0f\x2f\x73\x8b\xf2\x77\xed\x58\xdf\x01\x3b\x4e\x01\x8f\xb6\x70\x78\x0b\x69\x4c\xa3\xa1\x08\xf9\xec\xf4\x7d\x0d\xc1\x4c\xa0\x92\x43\x3a\x14\x56\x3e\x82\x2a\xaf\x55\x72\xa3\x3d\xdf\x47\xc2\x94\x53\x44\xc3\x1a\xc2\xa4\x69\x2d\xc2\xf7\xbe\x75\x1b\x95\xec\xdb\x92\x0e\x61\x3d\xaa\xd4\x3d\xaf\x2a\x2b\x62\x8b\x25\x8a\x8a\xdb\x40\x69\xcb\x3b\xcb\x11\xf5\x2a\x61\xa6\x7a\x7d\xf1\xcd\x35\x44\x35\x4f\x82\x88\xcf\xab\xac\x59\xe0\x64\x58\xd9\xca\xa9\xd5\xa1\x18\x82\x2a\x07\x84\x64\x83\xe9\x52\x18\xbc\x42\xf2\x93\x67\xc0\x4a\x22\xa4\x3c\x4f\xe9\x76\xee\x63\x60\x9b\x99\x1d\x03\x7c\x59\x50\x76\xe4\x91\xe2\x6e\x43\x13\x90\xbf\x78\x0d\x4c\xf2\xda\xfe\xeb\x2a\xa0\xe6\x96\x7e\x97\x2a\x65\x4f\xe1\xaa\xbd\x35\xf4\xa8\xb7\x59\xe3\xcc\xff\xc0\x37\x35\xf5\xd6\x9c\xe8\xea\xc1\x40\x88\xd8\x65\x71\x41\x57\x6a\x41\xad\x55\x3a\x72\x2b\xd2\x41\x04\x31\x03\xef\x75\x32\xb4\xa1\x48\xc7\x43\x40\xfe\x93\x57\x6b\x85\x66\x88\xa4\x40\xa1\x5b\xe8\xbd\xf2\x2a\xee\xd5\x2a\x42\xcb\x5a\xdc\xe3\x9b\xdb\xef\x41\xaf\x76\x08\x73\x94\x00\xd7\x32\xc9\xf1\xb5\x8c\x4c\x26\x5b\x10\xa9\x8b\xb5\xa8\xd6\xb8\x16\x59\xf1\xd9\x9b\xe2\xfe\xba\x08\x81\x9c\x1c\x09\x6f\xfe\xce\x59\x20\xa4\xc5\x75\xca\x23\xa2\x5d\xe7\x3d\x84\xcc\x4e\x38\xae\xda\x68\x6e\x42\x7b\xee\x31\x7f\x1a\x21\xa5\xab\xb3\x2e\x6e\x1d\xab\x6b\x0a\xa2\xc0\xc6\x04\xc4\x19\x89\xb6\xa6\x56\x6a\xad\x8d\x30\xbb\x6b\x70\x66\x2c\x18\x40\x0c\x64\x48\xd7\xda\xb0\xf9\xf2\xc0\xff\x91\xbd\xcf\x8b\x82\x7d\xa2\x39\xe9\xca\x09\x16\x94\x2c\xf9\x8d\x0c\x68\x07\xae\xd5\x7f\xe9\x56\xb2\xbe\x6c\x28\x34\x09\x0a\xfd\xf9\xb0\xe0\xb0\xa6\x57\xc0\x0e\x56\x8c\x15\x08\x69\x7e\x05\xee\xd0\x42\x5e\x6b\x9b\xb7\x8c\x7b\xa0\xa9\x25\x5f\x5a\xe5\x00\xe3\xbe\xb5\x0a\xe2\x3e\xd8\xfb\x35\x71\x73\x90\x34\xd4\x2a\xe3\x83\x0e\x22\xa4\x99\x3d\xc5\x0b\x57\x08\xc4\x62\x41\x53\x31\xd5\xaf\x05\x52\x43\x27\x78\xc4\x3f\x74\x2d\x21\xfe\xa3\x6d\xd3\x91\x36\x6d\xbe\x81\x4a\x40\x50\x37\x44\x6b\xeb\xd5\x42\xed\xdc\xe7\xa6\xc0\xda\x71\x32\xb8\xa6\x44\x4f\xd9\x7a\x0b\x4c\x2d\x3c\x31\xcb\x26\x4b\x25\x7a\x67\x39\x21\xfa\x24\xab\xda\xda\x04\xea\x16\xf0\xea\xf8\x7d\xfe\x82\xaa\xf7\x08\xc2\xb8\x15\x7d\x24\xdd\x67\xa9\xf4\xbb\x14\x60\x3a\xfd\xf8\x28\xff\x30\x58\x65\x61\xb2\xa8\x45\x59\x8a\x4c\x85\x97\x12\xfa\x2f\x88\x2e\x09\xa6\x42\x1a\xcb\x37\xdb\x39\xb5\x5a\x6c\x04\x40\xf8\xb4\xf5\xfb\x34\xd0\xb5\x91\x03\x16\x6b\x65\x4b\xb8\x48\xfd\x7c\x11\x8e\x76\x59\x93\x4f\xd5\xe1\x53\x3b\x92\x6d\xe3\xe2\xa7\x42\x51\x37\x2f\x81\x25\x64\x01\x10\x34\xaf\x58\x8b\xe5\xe7\xb8\xb8\xf9\x82\xff\xc7\x79\x1d\x89\x95\xb0\xa8\x2f\x6f\xb9\xde\x46\xd7\x67\x91\x4f\xfb\x16\xd7\xba\x50\x0f\x09\x77\xda\xeb\x96\x3d\x2d\x80\x9b\xf2\x94\xcd\x7e\x7c\x02\xc3\xf3\x1f\x8b\xda\x75\xf8\x4a\xac\xce\x61\x70\xfb\x48\xf1\x1b\x13\x38\xfa\x2d\xdf\xb3\x10\x49\xbd\xa4\x4b\x0b\x61\xd5\x53\x5c\x75\x9b\x48\x07\x8a\x3e\xcd\x21\xd0\x3a\xbf\x66\x2e\x77\x47\x6b\xd7\xb3\x27\x07\xfe\xb7\xda\x0f\x8e\x49\xfa\x6f\x05\x67\x08\x01\x78\x7c\xcc\x8b\x43\x80\x75\xa0\x16\xe6\x2f\x7a\xb3\x3d\x14\x58\x11\x37\xb9\xd6\xc7\xe3\x9c\xc3\xf0\xd6\x7f\xf7\xaf\xaa\x12\xbf\x48\xb4\xf0\xfe\xfe\x42\x34\x77\xe7\x97\x52\x87\xfb\xf5\x69\x41\x80\xc5\x31\xdf\xf2\xf4\xcd\x6f\xaa\x7e\x91\x7e\x13\x38\x19\xbe\xaf\x3d\x80\xcc\x52\xff\x72\x9c\x35\xbf\xd2\x95\xc2\x2b\x57\x8a\xae\x17\xe7\x4d\xf3\xce\x7b\x38\xef\xac\x2e\xdb\xcc\x88\x7d\x37\xd0\xd2\x0f\x30\x99\x10\x67\x8e\x87\x20\x05\xc9\x25\xc8\xd1\xd4\x4d\x9c\xde\xab\x8c\xf6\xa7\xfa\x4f\x65\xa6\x29\xd4\x57\xeb\x86\xdc\xdc\x9b\x98\xf3\x48\x8f\x4c\x04\x17\x4f\x6e\x04\x33\x68\xc3\x15\xe0\x0f\x8f\xfa\xc5\x5e\x4d\xa2\xc4\x5f\x59\xd2\xae\x3e\x11\x97\xbf\xee\x53\x18\xf0\xda\x90\xe3\xf5\x14\x62\x72\x2a\x97\x70\xe9\xc7\x28\xd4\x73\xd9\x04\x8c\x75\x18\x79\xa0\xce\xed\x25\x5a\xe9\xaf\x5b\x16\xca\x4b\x63\xb4\x74\xa4\xea\x59\xed\xa9\x44\x16\x0c\x6a\xb0\xf1\x16\x30\x0e\xe3\x13\x7c\x12\x47\x6f\xb2\x1b\xe3\xae\xb6\x40\x78\xb9\x02\xeb\x1c\x9e\x6c\x53\x53\x9e\x46\xb4\xea\xdf\xcf\x7b\x48\xc6\x76\xce\x85\x9e\xbc\xbf\xf0\xe0\xfc\x04\x6a\xfc\xbc\x3f\x71\x7c\x17\x79\x6f\xa7\x01\xa0\x51\x62\xf0\x47\xe3\x75\x3f\xa5\x05\xbb\x07\xd6\x1c\xd4\x1f\x0f\x34\x4c\x80\x14\xcd\xc2\x6d\xd6\x31\x9e\x23\xa0\xd3\xaf\x22\x37\x5f\x1b\xd5\xfa\x72\x54\x1e\x03\x0a\xb3\xda\x75\x94\xc4\xec\xc3\xbb\x6a\x97\x98\xed\x43\x40\xf6\xbd\x86\x97\x6f\x0d\xa1\x5c\x6b\x89\x42\x4b\xf6\xa6\x58\x0f\xbc\x7f\x67\x7a\xfc\xba\x7e\xd4\xa5\xd0\x25\xf6\xa7\x90\x4d\x07\xd6\xbd\x84\xcc\xec\x76\x85\xbc\xac\xde\x50\xc8\xca\x6a\xd5\x57\xb9\x71\x8a\x61\x10\xcc\x44\x43\x04\x0e\x23\xf9\x98\x7f\xb9\xa2\xec\xc5\x89\x1e\xe2\x51\x00\xe4\x1a\x93\x4c\x80\xdf\x1d\xfa\x5c\x5d\x57\x33\xd3\x2c\xf8\x2b\xfd\x0c\x48\xfc\xa3\xbc\x03\xb0\x5e\x57\x87\x4c\x9f\xf3\x25\x24\x35\x52\x4c\x3a\x43\x1b\xc2\xbd\xb8\x59\xa0\xd3\xe5\xa1\xef\x08\x73\x45\xa7\x44\x8e\x46\xf2\xa6\xec\x49\x95\x46\xab\xe3\xca\x1d\x16\x60\x21\x07\xd3\x6e\x16\xcc\x60\x3d\x26\x6d\xa6\x31\xf9\x58\x1a\x0c\xdc\xc1\x1b\x8a\xfc\xab\x55\x4c\xd9\x71\x3e\x87\xaf\x83\xb0\xab\x5d\x78\x90\xa4\x05\x8b\xb1\xe4\x41\x1c\x36\x66\x37\xcd\x56\x78\x10\x38\xdd\x3a\xfe\xde\x83\x66\x24\xdc\x75\xb3\xf6\xc5\x4c\x7f\x17\x58\x7d\xa3\x55\x07\x3e\xe4\x5b\x2f\xe6\xf8\x47\x4f\x14\x31\x99\xb3\x23\x2a\x30\x82\xaa\xc3\xbd\xd1\xab\x72\x0c\x3d\x00\x5a\x25\xf6\x57\x5b\x05\xbc\xb9\x5f\xcd\x24\x03\x59\x97\x7c\xd5\x57\x42\x08\x20\x48\x9c\xe2\x5a\x98\x89\x47\xb7\xd3\xf3\x7f\xce\x6e\x0e\xb8\x71\x59\xf2\x86\xfd\xe5\xa3\x36\xd1\xd0\xc7\xd9\x54\x55\xcf\x79\x26\x3c\x7c\xce\xc3\x4a\xf0\xb7\xcb\xba\xb6\x5e\xc5\x3e\x0b\x84\xf8\x38\x13\x47\x19\xc8\xbd\xfa\xd2\x6e\x34\x91\xe7\x01\x9d\x7a\x14\xe8\x89\x31\xc2\x34\xd6\x83\x55\xc3\x90\x47\x89\xd2\xfe\xa3\xb0\xfc\xfd\x50\x9f\xe0\x3e\xc5\xc4\x8c\x0b\x80\xbd\x66\xd9\x2b\xc3\xba\xd8\xbb\x31\xa8\xbe\x8a\x4f\xff\x10\xdc\xe2\x31\x9f\x34\x42\x7a\x40\x4a\x4d\x0d\xde\x63\xde\x1f\xf2\xd8\x79\x60\x5c\xcb\x76\xe5\x31\xb7\xc3\x38\x79\xd8\xd7\xb9\x44\x78\x03\x8a\xef\x89\xd9\xc9\x47\x30\x09\x1e\x36\x82\xd5\x3b\xf6\x80\xa2\x1c\x45\x62\x9f\x74\x54\x46\x2e\xc1\x77\xb4\x6b\x42\xd3\x65\x9f\x43\xfd\x48\x3f\x96\x5f\xb5\x25\x9f\x9d\x07\xb0\x30\xef\x1f\xe1\xca\x83\xf6\x6c\x1f\x62\x65\x40\xda\x13\xfb\x82\x07\x6c\x88\x42\x51\xd6\xe7\x0f\xf9\x5a\x08\x73\xce\x51\xd8\xcd\xf4\x22\xca\xa7\x8f\xcc\x90\x71\x3b\x7d\x49\x14\xea\x17\xf0\x96\xb7\x47\x3c\x9f\x79\x53\xf5\x0d\xee\x6d\x92\xfe\x41\xcb\x5d\x09\x0d\xc8\xab\x3c\x0e\xa0\x22\x90\x62\x9f\x15\x43\x70\xef\x66\x90\x5c\x6a\x87\x62\x5b\x89\x6b\x2b\x5b\xe5\x41\xd2\xe6\x39\xfc\x04\x81\xe1\xf6\xff\x60\xc3\x12\x99\xa6\x0e\x35\xe0\xd4\xf0\x9e\xd7\x2a\xbf\xac\x43\xdd\x57\x26\xef\x1c\xab\xce\x55\xe1\xc8\x39\xd4\x31\xfc\x92\x65\x67\x6a\x11\x21\xd1\x57\xa9\xde\xc3\x0f\x27\x79\x8e\xc3\x29\x96\xe3\x50\xaa\x8c\x8f\x85\x15\x1a\x90\x88\xd5\x92\x1b\x48\x82\x77\x28\x2b\xc7\xea\x83\x3d\x3c\x82\x78\x37\xe2\xfa\x00\xdd\x2b\x07\xd5\xe3\xc7\x73\x38\x05\xfc\xf8\xd9\x11\xb5\x0d\xc3\x38\x02\xc8\x5f\x84\xae\xe7\x60\xd8\x0d\xe0\xa3\x8f\xd4\x89\x85\x50\xb0\x12\x73\x03\xa4\x82\x7d\x79\x07\x9c\xfc\xe2\x36\xe7\xb7\xa7\xb3\x0f\xc5\xc9\x94\x8c\xb9\xea\x28\x01\x32\xdf\x6d\x3e\xa8\xff\xf6\x15\xa5\x15\x0c\xc3\xf1\xe6\xef\x08\xe4\x14\x9d\xd7\xda\xeb\x4c\x56\x0a\xcf\x66\xa8\xef\x34\x54\x3a\x65\x54\x10\x68\x05\x94\x82\x3d\x9a\x27\x9e\xec\x14\x62\xbc\xae\x23\xcb\x68\x62\x0b\xef\x36\xcd\x95\xdf\x76\x76\x1b\xf8\xad\x14\x7c\x86\xf0\xce\x1c\x32\xf9\xcb\x43\x92\xc5\x34\x06\x72\xe1\x0f\x34\x00\x6b\x4b\xdf\xc0\x21\x85\xb6\xae\xc8\x09\x69\x0a\x67\xe6\x89\xbe\xfa\x43\x92\x6b\xe0\xe0\x34\x64\xc7\x02\xd7\x5b\xaf\x4b\x5f\x65\xb9\x85\x9c\x8b\x80\xc6\x5e\x26\xbf\x18\x91\xbc\xd5\xd7\x50\x33\xeb\x21\x8f\x76\x31\x5b\x0d\xd7\x15\xbe\x00\x16\x59\xef\x8b\x33\xeb\x4f\x72\xc2\x9d\x4d\x1e\x6a\xbc\xc7\x25\x08\x7c\x91\x56\x2c\x0a\x03\xcc\x59\x3b\x02\x5e\xc6\x75\x88\x13\xd4\xb5\x1d\x24\x7c\x72\x26\x0b\x3c\xfc\x20\xc8\xf8\xb3\xe9\xe6\x3e\x85\x39\x3f\x29\x5c\x0b\x2c\x05\xad\x95\xed\xa8\xfa\x15\xd0\x54\x52\x74\x04\xe6\xfc\xd0\xb8\xab\x47\x16\x91\x10\x6c\xb7\xfc\x21\xde\x19\x9c\x7d\x12\xad\xa0\xd1\xcb\x69\x53\x5f\x36\x15\x92\x01\x66\x97\xad\x5d\x2f\x49\x88\xbe\x4c\xa1\x05\x6c\x23\x2b\xc1\xc1\xcb\x1f\x04\x3d\x19\x03\xf6\x9a\x48\x31\xbb\x27\xf1\x1b\x92\xbe\xc4\x2b\x22\x81\xc6\x84\x3f\x45\x7a\x19\x58\xf7\x2c\x15\x60\xe1\xfe\xe7\x9b\x0d\xa4\x05\x81\x64\xb7\x6e\x48\x17\x03\x45\x8a\x2f\x8a\x7c\x3a\x21\xc0\xfa\xfa\xeb\x44\x1d\xc2\xbd\x8d\x6c\x10\xae\x75\x54\xf5\x3d\x19\xd9\xdc\x81\x5f\x4a\x88\x1b\xc1\xdc\xc7\xaa\x59\x68\xf9\x3c\xf3\x86\xdb\x60\x6e\xd2\x2a\x7b\x16\x25\xc5\x2b\xbb\xfb\x1e\x7a\x4c\x84\xbf\xaf\x6b\x20\xdd\x91\x82\xe0\x46\xbf\x04\xc9\xa7\x67\x43\x04\xf4\x98\xa3\xea\xce\x40\x17\x58\x72\xc4\x48\xd3\x48\x82\xd7\x63\x37\x2d\xe6\x87\x4b\xf6\x04\x3d\x6c\x32\xb2\x20\xec\x72\x8b\x81\x14\x21\xf7\x77\x6e\x07\x7f\x1f\xe8\x92\x4d\xa1\x13\xda\xc2\x65\x7f\x2b\x8a\x0d\x56\xe5\x77\x01\x86\x1f\x02\x5f\x7f\x1c\x25\x50\xf1\xb9\xd7\x2a\x09\xca\x23\xf7\x18\xff\x7b\x97\xf2\x71\x72\x55\x0d\x7d\x9b\x05\x6d\x5f\x7b\x94\x51\x78\x69\xd2\xd0\x64\x88\x3b\xbd\xe9\xdd\x9b\xea\xaf\xef\x3f\x72\xc1\x77\x3f\x11\x0e\x5b\x03\x6a\x5d\xc5\xe5\xe9\xaa\x97\xa7\x89\x92\xad\x9c\x55\x76\xc4\xd5\x63\x26\x25\x58\x70\x85\x9c\x5a\xd3\x0d\xae\x0d\xf8\xcc\x91\x8d\x47\x83\x06\x9c\x80\xc0\x87\x86\x31\xc0\xf0\x9a\xd5\xc3\xef\xfb\x97\x70\xfb\xb3\x6e\xab\xdc\xb0\xd1\x6b\xe8\x00\x36\xe6\x9d\x9a\xfd\x77\x6d\x68\xf6\x12\x23\x6c\x0b\x63\xcc\xe0\xe2\x8d\x3c\x94\x32\xf3\xe0\xbb\xea\x94\x2c\x1a\xd8\x9e\x42\xe3\x40\xbd\xac\x85\xb1\x9c\x83\x33\x06\x2c\xfc\xa0\xb5\x78\x02\xc8\x52\x04\x75\x37\xc5\xbf\x49\xa1\x7d\x9c\xa8\x92\x21\x5f\x73\xb1\x64\xba\xf2\x4f\x92\xbe\xa5\x86\x6a\x30\x72\xd9\x9c\x76\x03\xf6\xae\x7f\xa4\xdc\x23\x00\xef\x01\xd8\x2f\xa7\xd3\x49\x5d\xab\xf8\x64\x00\xd9\x97\x33\x80\xee\x36\x26\x2e\x52\x1f\x46\x2b\x42\x4d\xe4\xe0\x15\x94\x5f\xde\x4c\xa4\x6e\xb8\x0c\x98\x5b\x57\x46\xfd\xa4\x22\x0c\xbf\xb5\x75\x40\x0a\x10\x93\x6e\x97\x4d\x33\xdb\xae\x74\xba\xd7\x48\xdd\x91\x58\x80\x21\xa0\x4c\xb4\xe1\x57\xc4\x75\x48\x4b\xcf\x8c\x9c\xa9\x28\x4f\xed\xdf\x72\xc6\x8d\x43\x8b\x20\x41\x65\x3f\x80\x07\xf6\x6b\xfc\xe8\x33\x44\x70\xd7\x10\xbe\x6e\x96\xf6\x9d\xd0\x90\xdd\x3c\x0c\x6e\xa6\xe7\x41\xa0\xf1\x67\x40\xef\x43\xd3\xf8\x29\x86\x01\x5e\x04\xfa\x8a\xa3\x70\xec\x6d\x23\x60\xf0\x36\x2e\x8b\x9e\xdc\x89\x6a\xb7\x02\x00\x41\x3f\x8c\xd7\xb9\xd9\x99\x37\x6f\xf2\x73\x69\x45\x99\x0e\x4a\x38\xdc\x6d\x97\x47\xbc\x0d\x21\x22\x5a\x55\xde\xed\x32\xce\xaf\xf1\x0e\xb2\x3d\x80\xf1\x60\x22\x09\xae\x20\xa8\x01\x50\x62\x51\x98\x02\xe9\xd8\x25\x2f\x54\x3a\x5a\x1d\x40\x34\xc6\xfa\x3f\x29\xc4\x1d\x21\x6e\x2c\x49\x64\x0b\x3a\xed\x11\xe9\x78\x0b\x3b\xf1\x2c\x00\xea\x1e\xd8\x31\x74\x54\xef\x3d\xb9\x71\x53\x41\x9e\xf9\xab\xd4\xbc\x60\xd9\x04\xa2\x3d\x66\x4e\x02\xb1\xb5\x97\x8c\xfa\x44\x59\x06\x0b\xbd\xc7\xe8\x92\x6c\xda\x67\x32\xd1\xd2\xc7\x7f\xa9\x7b\xbb\x24\x06\x5f\x88\x6b\xaa\x97\x8a\xff\x09\x43\x0b\x5f\x02\xed\xee\xe8\x71\x88\xa3\x3a\x84\xdd\x26\xe1\x2c\x32\x41\x2d\xb9\x95\x2d\x92\x46\x06\xf8\xd1\x75\x50\xa0\x1d\xf2\x4e\x04\xfd\x50\xa5\x64\x0c\x61\x2c\x1e\xff\x4c\x12\xf0\x49\xf0\x55\x14\x22\x5e\x4a\xee\x8e\xd8\x0b\xed\xe3\x8b\x94\x23\xaf\x7a\xf0\x4b\x71\xeb\xd2\xf6\x72\x63\x6a\xe0\xe8\x29\x52\x94\x02\x2a\x0f\xd0\x0e\x91\xef\x92\x08\x4b\x6b\xe1\x62\x0d\x4b\x56\xcd\x20\xa0\xf6\xa9\x67\xdc\xae\x94\xde\x71\x9b\x0f\x66\x0e\x4e\x5d\x09\x43\x28\x7d\xd8\x3b\x10\x2a\x3f\x70\xa0\x05\xb4\x22\x3b\x1f\x46\x4d\x16\xd9\xb3\x44\x10\x3e\x26\xc2\xbe\xeb\xbe\xc8\xfa\x2e\xf9\x2d\xfd\x28\x39\xfb\xa9\x92\xc4\xfb\x40\xbb\xd6\xef\xea\xa7\x66\xce\x10\x3f\x1c\x97\x7f\x8a\xf7\xfc\x1f\x96\xd4\x3f\xd9\xc7\x82\x1f\xe6\xef\x3f\x99\xb2\x33\x1f\xa7\x89\xdd\x0c\x3d\x6d\xfd\x2e\xf5\x72\x32\xfc\xbb\x10\x34\x5f\x98\xe2\x7c\x67\x80\x42\x1c\x67\x9f\xf6\x58\xfa\x33\xf0\x4e\x32\x2d\x04\x4a\xde\x11\xd6\x6f\x19\xa7\x07\xb9\x14\x91\x98\x84\x40\xd0\xd3\xf0\xbc\x08\xdd\xf6\xf2\x4c\x04\xbd\xcf\xcb\xa6\x6a\xe4\xcb\x86\x73\xd7\xe7\xcd\xef\xc5\xd0\x78\xed\x39\x60\xf4\x3e\x04\x74\xe7\x75\x06\x91\x71\x79\xcd\xca\xc7\x00\x43\x4f\xbf\x74\x1b\xec\x2c\xde\x72\xbc\xa8\xde\x41\xd0\xfd\xba\x4e\x25\x3c\xdb\x57\x0d\xf7\x5e\xb3\x7e\x06\xf4\xb2\xb8\x42\x08\x7c\x41\xce\xe7\x74\xf3\xe2\x03\x43\x9f\x55\xd4\x7d\x4d\x76\x61\x35\x1d\x76\x3b\x77\x1f\x94\x41\x7e\x39\x55\xc9\x29\xbf\x39\xc2\x7d\x79\xb6\xfa\x0b\xad\x1f\x70\xbb\xfc\x1f\x64\x31\x20\x5c\x84\xd9\xc1\x08\x0e\xad\x8f\xd6\xca\x3e\xac\x78\xf8\xe2\x83\x67\xfa\x5f\x01\x8b\x6b\x3a\xb2\x2f\x81\xfa\xc3\x6e\xc9\x82\xa5\xe9\x48\x27\x25\x95\x2c\xf8\xca\x85\xd8\x87\x59\x10\x7a\xbb\xb6\x99\x11\xaf\xbf\xcf\x45\x79\x7f\x30\x38\xe3\x77\x71\xbe\x02\xbe\xbd\xdc\x5a\xd4\x6f\x9c\x4d\x14\x04\x45\x40\x18\x66\xf9\xa1\xfc\xfd\x02\xfc\x0b\x3f\xeb\x09\xcf\x76\xe2\xe9\xc1\xec\x42\xc6\x0c\xfb\xc0\xdb\x65\xa6\xaa\xf1\x93\x22\x16\x0e\xa3\x3f\xcf\x2a\x69\xe8\xd5\x5f\xb6\x16\x4a\x37\x7a\x85\xe4\xcd\x93\x1e\xf1\x20\xe1\x73\x6d\x7f\x87\xc1\xcd\xd3\x26\xf2\x42\xd5\x43\xe7\xed\x62\x68\x0f\xcb\xbc\x09\x32\xaf\x84\x12\x3d\xf3\xe4\x09\x9f\x89\xe6\x7a\x7a\x66\x21\x33\xa2\x0a\x75\xda\x9f\x39\x80\xf0\x37\x00\xef\x45\x80\xf7\x1c\xa0\xf9\xa1\xc1\xe7\x7b\x4e\x9a\x2d\xb2\x26\x92\x65\xfc\xa7\x2b\x9b\x4f\x3a\x82\xb5\xd0\xcf\x39\x3b\x7a\x25\x7c\xe4\x2b\xc1\xd9\xf5\x18\x72\x40\x92\xea\x7e\x7a\x41\xbd\x42\x6d\x86\x10\xee\x77\xcb\x1f\x5a\xfb\x4a\x91\x99\xbb\xee\x81\x37\xaf\x19\x18\x4d\xa2\xd0\x35\x66\xb9\x25\x18\x0a\x14\x39\xd5\x02\x6e\x98\x00\x4b\xd1\x58\x23\xd6\x1b\xa2\xee\x44\x5f\xdd\x85\xed\xf7\xad\x69\x27\xae\xe7\xc7\x97\xac\x07\xb5\x26\xf6\x2e\x84\x11\xdc\xa5\xa1\x9f\x6f\x7b\xe5\x67\xae\xa3\x28\x32\xe6\xf2\xfe\x85\x9d\x03\xa1\xe8\x32\x0d\xb9\x27\x02\x9b\xef\xe9\x14\x2c\x9c\x6a\xce\xfc\x4e\x58\xf9\x29\xd7\x7d\x16\xcf\x1f\x7c\xb8\x4d\x22\xd0\x59\xa8\xf4\xfc\xef\xf6\x44\xdc\x0d\xb0\x90\x1f\x2d\x53\x98\x5b\x9a\x69\x77\x5e\x12\x5b\x78\x7b\x5f\x4f\x8a\xfc\xde\xd9\xad\x75\x6e\xb9\x97\xc1\xbb\x5e\xea\xe2\xb7\x5d\x38\x97\xe4\xe2\x3f\x0f\x89\xc5\x3b\x51\xf4\x03\x3a\xc5\xf4\x85\xbd\xdd\x23\x8a\xa7\xad\xc6\xf0\x8e\x6c\xb1\x05\x60\x99\xce\x2d\x0c\xca\x40\x5a\x2f\xed\xbc\x12\x21\x0c\x9d\xe8\x81\x6b\x88\x01\xbe\xd3\x94\xb5\xa3\x35\xe9\x3c\xaa\xf5\x0f\x5c\x03\xdf\x4c\x0b\xac\x51\xdc\x24\x8b\x79\x7d\xfa\x4f\x3e\x00\x7e\xe0\x27\x87\x69\x0a\x12\xfd\x7a\xcb\x14\xee\x7a\xd1\x3d\xdd\xa1\xed\x6b\x53\x56\xad\xa1\x3d\x8c\xcc\x3a\xfd\x74\x1c\xcf\x2e\xb1\xe9\xd5\xb5\x8c\x7d\xff\xda\xaa\x62\x16\x06\x92\xbc\xaa\x99\x00\x8c\x5d\x1a\xb1\x18\x19\x35\x79\xe9\x50\xd3\xb5\x50\x62\xd0\xd6\xac\xf1\x4b\x58\xea\xeb\x41\xb3\x0f\x94\x2c\xbe\x0b\x91\xbd\x36\xba\x19\xc3\x66\x1d\x54\x85\xe6\xae\x1f\x66\xed\xf7\x57\xe3\x01\x31\x0f\x02\xcd\xe9\x2f\xa2\xfb\x96\xf3\xf9\x0d\x75\x51\x9e\x63\xda\x8f\x76\x54\xdc\xcb\xe6\x59\xea\x38\xf3\xb3\xa1\xcc\xa7\xca\xff\x46\xe5\x69\x6f\x21\xaf\x13\x46\xfc\xf6\x8c\xf8\x79\x1e\x45\x4e\xf9\x21\xa9\x0d\x04\x2e\x71\xd9\xc7\x1c\x12\xb5\x87\xfc\xdf\xcb\x4b\x87\xa6\xf1\xdb\x55\x4a\xc0\xca\x77\x76\x64\x57\x09\x88\x7b\x41\xa5\x4f\xf0\xe8\x22\x6d\xd7\xab\xb8\x1f\x89\x4e\x74\xa7\xe2\x9f\xff\x9e\x8d\xd3\x49\x6b\xd0\x6f\xe1\x77\x37\x5a\xdc\x40\x59\xd6\x77\x5d\x09\x72\xbc\xe0\x57\xe3\x2b\x16\x10\x85\x42\x96\x78\x49\xa1\x49\x3d\x8b\x99\x79\xcd\x67\xe8\x44\xcb\xe3\x7f\x46\x5e\x32\xb8\xe4\xd0\x8d\xe6\x13\x30\x2f\x0b\xdb\x67\x4e\x56\x28\xaf\x78\x59\x2b\x24\x7d\xe4\xa9\x6e\x3a\xf4\x14\x96\xee\x10\x8f\x5e\x69\xf8\x0e\xd1\x68\x2e\x1b\x74\xda\x9d\x0f\xb9\xc3\xbc\xd1\xbf\xc7\x8d\xf6\xed\xb7\x06\xe6\x79\x9a\x01\xb7\x23\x36\x7d\x96\xea\xf5\x94\x8f\x49\xbe\x45\x16\xab\xa3\x02\x50\xbd\x41\xd7\x81\x49\x12\x06\x1d\xb8\xb4\x90\x8b\x3e\xbf\x18\x75\x24\x19\x89\x6f\xc7\x2b\xfc\x20\x7b\x0d\xc3\x07\x59\xb0\x5b\x98\x05\xf4\x9f\xec\x35\x0c\x04\x3c\x25\x1c\xc0\x43\xf5\x24\xa8\xcb\x4c\x0b\xc2\x6c\x13\xa1\x4f\x08\x3e\x1f\x6c\xad\x2f\x99\xae\x5c\xc0\x62\x86\x2c\xb4\xab\x0b\x08\x85\x9d\x57\x16\x54\x2d\x28\x4f\xed\x1b\xd2\xaf\x57\x86\xc8\x92\x70\xe0\xf9\x21\xd4\x7f\x06\xa4\xd0\x7f\x5a\x86\xd4\x04\xd7\xa5\x90\xcd\xb3\x30\xc0\xfa\x19\x70\xdc\xfc\xf1\xff\xd9\x9f\xca\x3b\x5d\xbd\xf4\x33\x80\x7a\xd7\x53\x0c\x56\xea\xc2\xcd\xa6\xf3\x1d\xdb\x4d\xa7\xe6\x50\x8e\x84\xdf\xb2\x50\xf3\x45\x8a\xd5\xa8\xe9\x3c\xbe\x20\xf6\x3f\x82\x7e\xc0\xc3\xcf\x79\xf8\xc6\x7f\x70\xf0\x4d\x84\x3a\xd0\x69\xc0\xbf\x9f\x7f\xc0\xfe\x78\x8d\x03\xe1\x3e\x85\xeb\x2d\xc2\xef\xc9\xe4\xb9\x0b\x55\x6b\x32\x89\x11\x2c\xa1\x9f\x9d\x7c\x10\x74\xa5\x7f\xbe\x09\x26\x2b\x6f\x06\x4c\xd1\x5e\x5e\x5c\xa2\xde\x3a\x10\xf0\x91\xf4\xba\x9c\xfe\x96\x46\x3d\x4a\xd6\x64\x64\x16\xc7\xa1\x3d\xad\x07\x02\xd1\x18\xeb\x34\xdc\xb9\xa8\x59\x2f\x02\x11\x46\xc4\x64\xc5\xa4\xf5\x03\xfa\x63\xc4\x7c\x0d\xd3\xfa\x0c\xc8\xfe\xfa\x4c\x6d\xfe\x06\xd0\xbd\x84\xb6\x2c\x8c\x8e\xf2\x4a\x31\x8e\xba\xd2\xd4\xc4\xad\x33\xd5\x65\x2e\xf9\xd1\x9c\xef\x1d\xaa\x72\x04\xaf\xcb\x48\xf7\xac\x47\x33\xf3\x3d\x6b\x13\x54\x3c\xeb\x76\xb2\x15\x3c\xeb\xb4\x0b\x0b\x2f\xf6\x89\x2b\x69\x46\x8d\xc4\x26\x49\x92\x52\x2e\x17\xf3\x4f\x08\x9a\x03\x3b\x8c\x2c\x99\x6c\x3d\x61\x5f\xe6\xcb\x4d\x60\x7e\x01\xff\xad\x39\x17\xc4\x09\xe8\x7b\x09\x60\x8f\xcd\x89\xef\x2c\x92\xc2\x1e\xb0\x6a\xe6\x2a\x64\xf3\xa5\x80\x1d\xfb\xce\x14\x5d\x14\xbd\xdb\xed\x30\xfc\xdf\xcd\xfa\xf7\xf2\x2f\xd3\x63\x07\x68\xfe\xe5\x30\x27\x68\x53\x48\xda\x5a\x1c\x80\xd9\x75\x0f\xb8\x4e\x52\x09\xa7\x46\xf5\x8e\xda\xef\x88\x93\x3a\xa7\x10\x28\x83\xff\x8e\x9f\x43\x0e\xc6\x2d\xb0\xf7\xe1\xd4\x76\x8a\xe3\xea\xc7\xce\x23\x6b\x52\x67\x1e\x9a\x59\x1f\xa8\x7b\x85\x4b\x24\xaa\xa9\x50\x4d\xa9\xeb\x44\xd7\x2f\xd4\x8c\xe2\x08\x30\x47\x92\xed\xc1\x29\x33\x24\x87\xe2\x53\xe8\x7a\x17\x5e\x7f\xd7\x11\x09\x54\xb3\x37\x72\x4f\x54\xa7\x3a\xed\xf1\xf0\x9f\x86\x19\xa4\x6f\xfc\x5f\x05\xba\xbf\x48\x2d\x00\xc2\xd7\xb8\xaf\x47\x95\x73\x81\x05\x69\x17\x7a\x9e\xfa\xa1\x07\x20\x92\x97\xb0\xf5\x73\xc8\xd2\x1e\x1c\xf7\x01\x62\x4f\xb4\x0b\x1f\x2b\x42\xec\xe5\x21\x7a\x40\xbe\x82\x40\x79\xe0\xef\x2f\x06\x0d\x1c\x5e\x30\x47\xbf\xa5\x89\xbd\x87\x1b\x1a\x80\xf6\x92\xc5\xb6\x80\x1b\x4a\x87\x7a\x2d\x84\x2d\x1e\x40\x71\x31\x33\x65\x61\xcf\xdc\x09\x60\xf6\x55\x22\xd5\x00\x9b\xf9\xb1\x66\x58\xa4\x0b\xf1\xed\xa3\x4d\x3f\x87\x39\x94\x6c\x2d\x92\x8c\xd8\xc1\x51\xab\x2d\xc6\x70\xfb\x00\xf6\x8b\xf2\x4a\xc0\xe9\x17\x6d\xe6\x88\xfd\x39\x42\x5d\x0b\xeb\x95\xbc\xea\xe7\x81\x96\xf1\x85\x10\xef\xc0\x5d\x50\xd0\xfa\xb1\x86\xb1\xc2\x01\x7b\x45\xfd\xea\x0c\x32\x5f\x44\x99\x28\xfb\x7b\x96\xea\x36\xab\x90\x47\x86\xc0\xc0\x92\xb8\x2a\x0c\xb1\x21\xc2\x29\x55\x6e\x8f\x42\x8e\xcb\x4d\x97\x43\x24\xd7\x49\x01\xc1\x09\xf0\xb9\xd4\x11\xd9\x77\x0b\x46\xfd\xb8\x3f\x40\xa8\x23\xa3\x02\xe4\x2f\x01\xba\x42\x81\xfb\x33\xbc\x26\xfc\x62\xe7\x90\x11\x3d\x72\x8a\xab\x94\x45\x33\x3b\xd2\x73\x15\x17\xf8\x48\x97\x60\xac\x84\xf6\xc7\x80\xf7\x80\x0d\xa5\xef\x0e\xa7\x8f\x5e\x7a\xdd\x9f\x39\x78\x05\xe4\x53\x1f\xc8\x85\xcf\x11\x0d\x69\xe1\x4f\x48\x71\xc4\x9e\x37\xad\x10\xec\x0b\x29\x2b\xc7\xf1\xbf\x6c\x0c\x7c\x72\x18\x5c\x62\x2c\x5b\x50\x53\x65\xf5\x12\xc6\x0a\x9c\x35\xb4\x8c\xac\x4d\x66\x5e\xce\xbc\xf5\x58\x14\xf4\xe6\xb7\x6d\xa3\x38\x0a\x50\x15\x5d\x3b\x1b\xdc\x40\xbc\xc8\x0f\x71\xe6\x07\xa1\xf0\xf0\x79\xe6\x33\x2e\xc7\x67\x5b\x4c\x2c\xd4\x4a\x4d\xbf\xac\x07\x25\xc2\xbd\xce\xe3\xbf\xab\x2c\x7e\x15\xec\x42\x9f\x22\xf6\x95\x25\x09\x26\x5c\xa6\x5b\x84\x82\xe8\xf5\xf6\x37\x41\xf7\xf5\xe6\x14\x67\x47\xff\x4f\xf9\xef\x72\x92\x7d\xb2\x53\x36\xc9\xfa\x58\x21\xb1\xd9\xdb\x4e\x21\xc3\x8a\xa4\x7c\x11\x01\xd9\x66\x2c\xbe\x6f\x7e\xb3\xee\x01\xe9\xee\xa0\x0f\xb8\x57\x8c\x07\xfe\x2e\xee\xc4\xce\x95\x81\x12\xdf\x94\x34\xdc\x31\xd3\xf2\x23\xa4\xca\x5b\x60\x7d\x27\x48\x76\x1c\x44\x78\xca\x95\x6f\xd7\x2e\x08\xd7\xf6\x66\xc7\xb8\x41\x20\x29\xd2\x0f\x5b\xd8\x05\x81\xc5\xc5\x8e\xdd\x3f\xfc\x4a\xce\x9f\x82\xdf\x01\xd4\xaf\xa4\x77\x6c\x45\x1e\x9d\x5b\x91\x66\xdc\x56\x6e\xb9\x18\x6d\x21\xfd\x8c\xb2\xdd\x8f\x70\xfe\xb9\x12\x73\x5c\xc4\xdb\xf5\xdc\xd9\x2a\xab\x13\xfb\xb0\x47\xf3\x6c\xf1\x54\x84\xef\x70\xd8\x7f\x6c\x0e\x60\xc3\xa6\xe3\x61\x0e\xce\xc8\xfa\x8e\x59\xbb\xf5\x2a\x19\x68\x0c\xbe\x95\x91\x8b\x70\xe6\xda\x4a\xcf\x9b\x84\x0a\x67\xd1\x32\x73\xc9\xe2\x8b\xf5\xf4\x37\x47\x6d\x90\x70\x09\xd8\xf6\xdc\xc4\xf0\x37\xb5\x56\x10\x13\x26\x25\xa0\x9f\x44\x02\x98\x72\x80\x16\xf0\x81\x69\x0a\x48\x87\xfb\xe6\x4d\x86\x5a\xa5\x48\xbb\x45\xf3\xc3\xfb\xcb\x0d\x39\xa9\xf2\x23\x3c\x3f\x97\x21\x37\x1e\x2c\xb9\xcd\x61\x7f\x44\xf8\x0f\x14\x01\xda\x72\xef\x95\xc6\x2d\x6b\xb0\x6e\x01\xcf\xc4\x5a\xe3\xc4\xe0\x45\xc5\x16\x8f\xa1\x29\xe7\x9b\xf5\x28\x73\x0b\x69\xde\x2f\x4a\x51\x6c\x3d\x34\x33\x0b\x37\x18\x73\x0a\x71\x71\x5e\x93\x1e\x6e\xf7\x5c\xe3\x30\x4c\xc5\x43\x38\x61\x02\xcf\x6f\xcd\xd6\x1c\x82\xe2\x67\xd5\x48\x5f\x9a\xe3\xb3\xe2\x22\xcc\x3e\xb4\xc2\x02\xf2\xdf\x80\xfc\x10\xfb\x4a\x8d\x09\xc0\x9c\x22\xb0\xfe\xe7\x1d\xf8\xff\x48\xc8\x6f\x7d\x97\x04\xa7\x7f\x6b\xca\x8b\x48\xa0\xf8\x00\x5c\x59\xd4\x73\xbe\xbe\x25\x2a\x0c\x50\xbc\x5c\x41\x26\xfd\x00\x6e\xfc\xdc\xe6\xaa\x21\x46\x6e\x27\x4c\x1e\x06\xea\x02\x92\x17\x40\x4c\x25\x40\x6a\x0c\xdb\x7b\xd5\xd3\x88\x65\xf3\x8a\x01\x9f\xa2\x84\x21\x9b\xa4\xc7\xff\xe8\xfe\x36\xad\x73\x1d\x69\xca\x0f\x81\xff\xcf\x9e\x3a\x68\x5b\x90\xd9\x31\xe8\x08\x8c\x8c\xc5\x92\x13\x05\x25\x60\x0a\x96\x80\xa8\x75\x5b\x5a\x41\x59\x63\xb4\xa2\xd0\xc2\x2d\xd7\x50\x7e\xb6\x70\x2a\xb7\xc4\xca\x75\x95\x93\x44\x81\xa0\x3c\x10\x80\xd6\x2d\x61\x12\x26\x16\xc4\x38\x93\xc9\x90\x4f\xa9\xa5\x0f\xdc\x1f\x52\x6b\xe3\x24\xd9\xfc\x90\xa8\xb6\x80\xf2\xa8\x24\x1a\xfc\xb5\x63\x25\x8f\x60\x1f\x07\x0e\x6e\xb6\x24\x7c\xf6\x7b\xe7\x62\x70\x31\xc5\xb5\xbe\x25\x53\xb9\xd6\x9b\x58\x6e\x60\xac\x25\xb6\x63\xed\x31\x45\xd2\xad\x51\x22\x00\x70\xb5\x26\x69\xe7\xe6\x13\x3f\x4b\x6f\xc4\x02\xd2\xef\xd7\x3a\x00\x31\xe7\x51\x14\x32\xd7\xe0\x0f\x7b\xd0\x68\x0a\xfc\x20\x58\x7e\xed\x67\x9e\x47\x58\xf1\xae\x41\x67\x02\x9d\x61\x24\x0c\x04\xa2\xeb\x5a\x2e\x5c\x0c\x6f\x2d\x73\x0c\xa9\xd7\x12\x12\xef\x70\x3c\x0b\x75\x73\xc4\xec\xf1\x3c\xf4\xdf\x5e\xb6\x7a\xf5\x3a\x2e\xdc\xeb\xd9\x89\x61\x7a\xb8\x07\x8d\x61\x91\x3a\x38\xf4\xf2\x54\xea\xb4\x11\x99\x5e\x78\x70\xa0\x5e\x5c\x75\xd8\x6d\x3e\x3c\x82\x1c\x11\xd7\x41\x20\x9a\x40\xfe\xd9\x45\xca\x23\xcc\xfb\xc6\x81\xa9\x7d\x80\x62\x39\xe9\x0d\x0e\xb2\x06\xd5\xae\xfd\x6c\x94\xf9\x04\x73\x5a\x81\xb5\x7a\xba\x46\xe2\xaa\x32\xdc\x9a\x8b\x74\xdb\x77\xe5\x68\xd6\x3c\xea\xbc\xa1\x90\x11\x01\x17\x43\x93\xc3\x82\x44\xbb\x2c\x1b\xd7\xf4\x56\x86\x65\x85\x8c\x23\x19\x20\xf6\xfa\x56\x2e\xc7\x29\xe8\x01\x16\xea\x46\x92\xeb\x46\xec\x3f\xdd\xd8\x00\xfe\xb3\xc6\x5e\x24\x06\xa0\x06\xc4\x24\x90\x5a\x0f\xd2\xd4\x3e\x62\x58\x93\xf0\xb3\x6b\x3a\x84\xd2\x05\xd6\x86\xc2\x53\xab\xeb\x3a\x05\xd1\x60\x6c\x5c\x01\xf7\xea\x8c\xd5\x43\x5c\x77\xe4\x20\xb8\x14\xc7\xa2\xe1\x23\x10\xcd\xfc\xff\x1a\xdf\xda\xa9\x61\x8c\xed\x91\x4d\xb1\x38\x13\x5b\x3d\x17\xd8\x17\x9d\xf5\xe3\x31\x07\x59\x42\x3e\x01\xf6\x28\xf1\x6a\xf4\x4c\x6d\xad\xc9\x2b\xb0\x24\x26\xc8\x77\x7f\xa9\xec\xba\x96\x2a\xf2\xd7\x52\x3b\x1f\x0b\x2c\xd5\x4e\x0c\xbb\x2f\x2e\x70\xe7\xb8\xf7\x93\xf9\xee\xc5\x9e\x3d\x92\x07\xce\xf0\x57\x73\x7d\xf8\x0f\x03\x4e\x31\x2c\x68\x38\x85\x05\x3e\xc0\x7e\x84\xf2\x91\x26\xfc\x87\x5c\x85\x86\x40\x71\x49\x78\x87\xe8\x17\x19\x52\x2d\xe5\x97\xec\xcd\xa5\x68\x84\x0c\x46\xc2\x2f\x01\x26\xcb\x5a\x95\xe5\xb4\xc8\x67\xac\xc0\x20\x2e\x53\xe3\x4c\xe0\x59\x54\x94\x83\xbf\xa0\x2c\xe6\xb2\x8a\xcc\x64\x41\x22\xef\xe0\xf3\x21\x18\x7f\x21\xd5\xcf\x23\x24\xab\xe8\x27\xb2\xb8\x10\x78\x60\xa3\xe3\x93\x54\x49\xc0\x64\xe0\xab\x8a\x88\x0b\xd1\x15\xf6\x81\x24\x8d\x39\x48\xde\x16\xf5\x49\x7a\xf2\xb3\x06\x6f\x8b\x27\xc2\x78\x1a\xe3\x74\xb7\xd9\xa5\xb5\xfb\xfb\x87\x62\xfc\xec\x47\x96\x69\x26\xfb\xc1\x75\x45\xaf\x26\x4b\x4f\x0a\x83\x1d\x8d\x9f\x65\x89\xb7\xe4\xe0\x23\x64\x1f\xd6\xa0\x9c\xea\x6d\x9e\xea\xaa\xcb\x57\xb1\x3e\x5d\x87\x66\x03\x4b\x3a\x5a\x83\xb5\xe0\xb1\xfe\xfb\x81\x0b\x0e\x46\x97\x38\x16\x3a\x0a\xb2\x1d\x56\x6e\xa0\x3a\xa3\x05\x8d\x18\x81\xb7\x51\xdb\xa4\xf8\x92\xd7\x3d\xcd\xaa\x1d\x2d\x49\x97\xfd\xb7\x72\xcc\xfd\x5b\x5d\xec\xe3\xb7\x28\x39\xe4\x81\x2f\x31\xe6\xe6\x73\x6e\x31\xf9\x0f\x74\xd6\xfb\x9d\x41\x00\xf6\x9d\x6d\x9c\x53\x49\x7e\xb0\xf3\x26\x93\xe1\x7c\xce\x5c\x12\x46\xf1\x0b\xb9\x04\x1e\xd9\x7e\x0a\x47\x71\x8c\xb8\xd9\xdf\x04\xb2\x7f\x0a\x66\x84\xeb\xe8\xbf\x22\x05\x3f\xd7\xc0\x70\xcd\xa7\xdf\x2b\xbb\x7b\x92\x1c\x9a\x35\x52\x9a\x7f\x25\x1c\x3e\xcb\xe4\x63\x16\x8f\x1f\xb8\x47\xdd\x8c\xe9\xbd\xfb\xb3\x34\x85\x4c\x00\x9c\xe0\x48\x7b\x90\xec\xb1\x05\x3e\x0f\x9a\x6c\x54\x39\x48\x51\x7f\xbf\x5b\x91\x88\x8a\x73\xd9\xa3\xc0\x80\x4d\x75\xac\x79\xd7\xde\x23\xa9\x13\x04\x80\x4e\x15\x70\x0d\x8e\x3b\xa6\x2a\x7e\xc3\x4b\xc4\x08\xaf\x74\x79\x14\xd2\xfd\xa8\x10\x85\x85\x02\x98\x6c\x8f\x24\x9d\x7d\xf0\xb8\x79\x1d\x6c\xa0\xfd\x99\x44\xfc\xc5\xa0\x7b\x96\xb6\x7d\xf9\x43\xd2\xb0\x51\xf4\x29\x42\x86\x0d\xf8\x1a\xe8\x68\x9a\xdd\x12\x92\xd1\x95\x45\x85\xe0\x55\x98\xcf\xf4\x16\xc1\xc2\x5e\x2a\x70\xeb\x44\xac\x90\xf0\x94\x5b\xcc\xc3\x29\xfa\xc1\x4f\xeb\xfd\x15\xe8\x5f\xbf\x17\x67\xfe\x03\xe7\x9f\x83\x5a\xe1\xdd\x36\xa6\x90\x9b\x3e\x12\xe2\x3f\x77\xa1\xd4\xff\x0e\x2e\xc5\x2b\xc8\x0e\x61\x3f\x84\xd7\x8f\x57\x0d\x93\xf0\x60\x5c\x0c\xa4\x55\xec\xba\x83\xb9\x31\x82\xa1\xa0\xa0\x23\xac\x0f\x31\x22\xe6\x18\x32\x58\xb8\x2c\x04\x3d\x80\x27\x41\x1e\x07\x86\xe9\xda\xa3\x63\xb7\x28\xce\x04\x6f\x31\x54\xec\xb8\xec\xa2\x24\x37\x29\x4d\x34\xb9\x77\xe9\x2c\x12\x84\xf7\x7e\x6f\x86\xe2\x40\xd8\x0f\xf6\xd1\x2c\x18\x15\xfa\xea\xbc\x0f\xf1\xe4\x30\x82\xa3\x0b\x24\x7a\xfd\x38\xeb\x04\x06\x3a\x47\x90\x93\x4f\x05\x3d\xd8\xaf\x96\x2c\x74\x29\x98\xa9\x64\x85\x2b\x0f\x6b\xf3\x91\x85\xdc\x8d\x35\xd6\x84\xa9\x41\x5a\x1f\xaa\xd5\x20\x94\xea\xd2\x94\xac\x55\x89\x7f\xc9\x6c\xa6\x2d\xcf\xef\xbf\x56\x4f\x3e\x11\xeb\x38\xa5\x07\xa1\x46\x53\x0a\x15\xa1\xb1\x02\x34\x10\x14\x8c\x7b\xd9\x95\x1f\x1d\x2b\x46\x17\xa2\x38\xd4\x5d\xa9\xf3\x11\xdd\x1a\x84\x28\x43\xd3\x1e\x52\x62\x7c\x89\xc0\xe5\x10\xa7\x63\x0d\x9b\xe2\xb1\xfa\xbc\x08\xef\x54\x5b\xf3\xe5\x30\xd8\xfc\x64\xe2\x93\x30\x86\xe2\xc8\x78\xd2\x47\x72\x74\x6c\xe2\x1d\x91\xf3\x14\x6c\x16\x1b\x34\x06\x28\x82\x34\x9e\x83\xac\x09\x06\x6e\x2d\xab\x75\xb7\x26\x08\x47\x84\x14\x09\x5c\x0b\x85\x60\x40\xf4\x78\x44\xc0\x65\x27\x2f\x85\xd4\x51\x85\xbf\xdc\x2c\xc8\x8e\xe5\x7a\x05\x7f\xc4\xe7\xdf\x23\xe0\xaf\x3b\x45\x64\x2d\xee\x3f\xef\x9e\xab\xcb\x10\xcf\xd1\x58\x56\xb2\x4a\xec\xe4\x27\x2f\x54\xc3\xab\xe0\xc5\x66\x0e\x61\xd0\x57\x40\x4d\x23\x81\x74\xc4\xc8\x49\x98\x21\x8b\xe3\x44\xf1\xf3\x89\x88\x1d\x81\x6e\xe4\x55\x9c\xa9\x90\x37\x46\x32\xcd\xe6\x0e\x36\x27\x75\xe7\x86\x19\xe8\x00\x0a\x64\x8c\x60\x8f\x29\x8d\x3f\x66\xe5\x25\xad\x1b\xaf\x42\x41\x8d\x39\xa6\x4f\x76\x94\x97\x37\xb0\x23\x44\x88\x2e\xae\xfa\xb2\xe6\xdc\x80\x1d\x42\x97\x59\x1f\x58\xa2\x18\x95\x0b\xb1\x65\xd7\xf0\x1b\xee\xb6\xce\xb5\xb3\x9c\x12\xb2\xd4\x7a\xc7\x3c\xc8\xf2\x20\x87\xe2\x81\x4d\x73\xf9\xd6\x8e\xe9\x39\x93\x42\x52\x9b\x2f\x42\x15\x2b\x6a\x04\x46\x83\x1b\x9d\x54\x46\x85\xaf\x06\x4f\x3c\xa8\x01\x23\x4a\x08\xfc\x6a\x6d\xe3\x01\x8f\x7b\x39\x38\xa4\x35\xb8\xda\x63\xa2\x93\xf2\x98\x06\x9d\x58\xea\xf8\x1f\x3b\x20\x7f\x2f\xfd\xaf\xce\xde\x7a\x65\xbd\x1f\x55\xfc\x7a\x0b\x1a\x93\xeb\x51\x7f\xe7\xe2\xaf\xeb\xa3\x8a\x6f\xf0\xa8\x21\x34\x63\x97\x6e\x97\x0b\xc2\x28\xde\x8a\x17\xc4\x3c\x4a\x3e\x92\x9c\x19\x2f\xda\x3e\x0d\x7a\x47\x48\x68\x21\xb5\xe5\xbc\x94\x1a\x7b\x80\xc4\xf4\x43\x46\x4b\x7b\x9b\x1e\x65\xd7\x76\xfe\xb4\xd9\x82\xaf\xa5\x05\x6b\x38\x25\xac\x9a\x8a\x3d\x56\x2f\xa5\x3a\xc7\x03\x59\x8e\x46\x6c\x01\x93\x74\x14\xdd\x05\x56\xd5\x49\x53\xb6\x47\x2b\x8a\x5b\xa4\xb4\xa0\x0d\x97\xa7\x59\x56\x09\x4d\x8b\x58\xe1\xac\x30\x3d\xa5\x87\xff\x98\xb3\x08\x30\x9c\x29\x3e\x66\x90\xd3\x2f\x46\xbd\xcf\xa4\x1f\x59\x3c\x9d\xbf\x84\x03\xb0\x60\x26\xfd\x22\xa5\x0a\xdd\x41\x21\x2e\x7b\xe6\xcb\xf8\xc8\x82\x44\x3d\xd2\x73\x16\xf9\xc5\x2e\x5c\x9b\xef\x3c\x92\xe8\x2b\xe7\xae\xcf\x82\x8c\x3e\xd2\xe2\x0c\x46\x0f\xe7\xd3\x86\x07\xde\x70\x3e\xa8\x82\x99\x05\x86\xf6\x50\x7e\x3a\xd9\x25\xd2\x3c\xd0\xc4\x03\x9a\x49\x6a\xeb\xed\x25\xaa\xfc\x0e\x50\x64\xa7\xc9\x9c\xb9\xf3\xd3\xce\x47\x93\xa1\xc9\x87\x6c\x15\xf6\x37\x67\x96\x50\x9a\x58\xa5\x75\x93\xbf\x66\x69\x79\xf3\x76\x21\xbb\xcc\x86\xb7\x0b\xad\xd0\x92\x75\x27\xf5\xfb\xff\x94\x5a\x20\x73\xc4\xdd\x26\xd1\x5c\x06\xd4\xd8\x75\xc4\x41\x67\xd0\x20\x04\x2a\xbf\x81\x0b\x43\xe3\x06\x67\x6b\x37\x83\x1f\xc7\xb3\x69\xdf\x34\x8a\x36\x43\xab\x00\x52\xf2\x06\xeb\x27\x5a\x86\x65\x40\x47\xc1\x9c\x92\x85\xbd\x27\x1a\x87\x1a\x5e\x07\x36\x0f\x1a\x0b\x75\xa9\x07\xa0\x86\x9c\x0c\x72\xe6\x37\xc7\x9a\xc0\xff\xc9\x64\xe1\x45\xef\x80\xd2\xa8\x29\xf6\xbc\xeb\xab\x33\x8c\x0c\x4a\x89\xad\xf7\xe4\xaa\x59\x1e\x88\x2e\xb3\x75\x6d\xea\x34\x14\x09\xc1\x0d\x12\xc4\x1f\x4a\x27\x75\xa7\x01\xfe\xd0\xdc\x03\x33\x3f\xb1\x9d\x87\x59\x33\xbb\x61\x96\xa0\x32\x44\xbd\x39\x1e\x1b\xe6\xf8\x0a\xd9\x05\xb1\x74\xe6\xc3\x0b\x34\x7e\x0e\xf3\x16\x32\x35\xc3\xbc\xca\x6f\xc2\x35\x23\x9b\x65\xf7\xf0\xb5\x85\x70\x0c\x31\xb7\xed\xc2\xf7\x21\x3f\xc5\x9f\xb9\xef\x20\xb6\x5c\xa1\x1b\x6d\xa1\x4d\x6b\x77\x71\x6f\x2a\x69\xfe\x36\x62\xd7\x1c\x77\x00\x29\xae\xb3\x9e\x91\x07\xd8\x50\xef\xfd\x11\xc7\x86\x0d\x8c\x85\xf6\xec\x8a\x25\xb3\x16\x92\x71\x56\x2f\x2d\x0e\x41\x8f\xb5\xc0\x73\x18\x43\x9e\x83\x06\x34\xcb\x89\x6f\xc8\xf4\xc5\xe1\x89\x5a\xc3\x79\x73\xe8\x3e\xe4\x47\x95\x81\x45\xee\x4a\xb0\x7d\xec\xa9\x27\x23\x2a\xa7\x48\x71\x59\x38\x84\x48\xf0\x80\xa6\xc1\x81\x5d\x2e\x82\x73\x37\xdb\x08\x7d\x2b\x49\xf3\x01\x78\x54\x3d\x70\xca\x51\x0d\x50\x5a\xd5\x97\xeb\x87\x16\xda\x16\xf1\xe6\x26\x11\x70\xa8\xb0\x82\x8a\x58\x4c\x35\xfa\x7a\xbf\xce\x99\xaa\x69\x16\xaf\x34\x0d\xef\x2b\xe7\x1d\x7d\x95\x48\x52\x4f\x0c\x3f\xcf\xc7\x3f\xa8\x09\xe9\x6b\xb4\x8d\x3d\x2a\x77\xda\xe0\x9c\xd5\x95\xba\xed\x84\xa8\x3e\x00\xdf\xdb\x06\x9b\x68\x35\x3e\x33\xc6\xd2\x9f\x9d\xfe\x4c\xb3\xce\x0e\xee\xbb\x24\xc4\x88\x10\x51\x68\xf6\xd4\x17\xaf\xcb\xf7\xf6\x6f\xb2\x9c\x23\x9e\x8d\x85\x22\x58\x90\x2d\xc5\x88\xb9\xef\xa0\xdf\x5c\x04\x66\xad\x22\xe8\xfc\x3f\x55\x5f\x96\xe0\x2c\xcf\xf4\xba\x97\x77\x67\x06\x9c\xe0\x66\x30\xbf\x81\xe4\x4b\x56\x7f\x4a\x25\x95\xf3\x9c\x9b\xb6\xa0\x99\xc2\xe0\xa1\x5c\x92\x82\xa8\x73\x44\xdc\xdd\xf0\x46\xed\x72\x43\x62\x01\x1b\x5a\x98\x5f\xe8\x0c\x1d\x1a\x45\xec\xe2\x1d\x21\x03\x9e\x6c\x24\xfb\xb8\x2e\xae\x5a\xab\xf7\x2d\xc0\xd2\xb9\x75\x06\xb8\x12\xcb\xb4\x42\x43\x71\x0f\x2d\x5f\x5a\xa5\xfa\x03\x48\xde\x11\xd0\x86\x62\x97\x73\x8c\xa9\x80\x11\xfe\x89\xb3\xba\xb2\xe3\x1a\x42\x11\xe3\x9a\xc3\x6f\xdf\x20\x12\x15\xb4\x1f\x74\x2b\xc8\xca\xb1\xb7\xcb\x4f\x19\x0c\x96\xf2\x08\x8a\x4e\xb1\xce\x86\x87\x08\x49\xcf\x11\x25\x02\x79\x4b\x71\x43\x5c\xb4\xd0\x7b\x78\x80\x5a\x67\xbd\x36\xd1\x4d\xe6\x7f\x9b\x22\xc4\x2c\xd4\xd6\xc2\xb7\xe2\x15\x9b\xa4\xce\xd4\x89\xfc\x97\x11\xc9\x09\x7c\xd8\x39\x1c\x31\x92\x24\x3b\x5c\xcc\x79\xfd\x4f\xe4\x9a\x35\xd2\xbc\x9d\x5f\x43\x03\x8b\x24\x33\x4a\xf7\x8f\x10\xe3\x66\x63\xfc\x00\x3e\x12\xf1\x1e\x24\xb8\x86\x74\x4b\x8b\xd6\xed\x25\x50\xff\x68\x8b\xa3\xe8\x0b\x1d\x99\xf2\x2d\x94\x7c\x3c\x30\x86\x28\x32\x02\xe8\x9d\xfd\x83\xf1\x26\x8f\x84\x19\x06\xad\x4b\x9b\xec\x26\x98\x49\x3c\x6a\x7e\x00\xe5\x1a\x1c\x9b\x14\x13\xc8\xc0\x64\xdb\xbc\x25\x6a\x36\x7c\xd8\x33\x19\x3e\xf8\xc9\x38\xc2\x80\xa0\x59\x23\x98\xbd\xbb\x65\x20\x2c\x25\x44\x46\x47\x7e\x20\x5f\x65\xb0\x6f\x62\x73\xeb\xf8\x24\xfd\x57\xc3\x10\x43\xa2\xc4\xdc\xf4\x29\x85\x09\x05\x77\x13\x1d\x6f\xb8\x73\xa7\xe5\xa8\xd3\x39\xd8\x70\x2e\x34\xe3\x1d\x07\x83\xa4\x51\x0f\xd5\x4a\x45\x56\xac\x25\xfc\x5f\x50\x69\x38\xb4\x03\xf5\xc6\xa9\x09\x48\x92\x95\x37\x44\x61\x4e\x05\x38\x36\x4c\x3d\x71\x9f\xb9\xce\x0b\x6a\x65\x5b\x63\xcb\x50\xa9\x1a\x64\x0c\x3a\x84\x31\xd1\xa0\xec\x5b\x0c\xc1\x69\x7a\xa1\xee\x20\xfc\x26\x7c\x45\x7d\x73\x86\x6f\x80\xeb\xc0\xa9\x9f\x6f\xf5\x41\xe7\xd8\x9c\xb3\x7c\x23\xc4\x6a\x69\x93\x3e\xa3\xa1\xfe\x63\xc4\x80\xcc\xd4\x2c\xf0\x20\x9b\xc1\x27\xf5\x72\x16\x9b\x46\xb4\x97\x3a\x77\x02\x39\x98\x34\xd1\x03\x1c\xba\x36\xe4\x50\x47\x3a\x6e\x01\xe8\x6a\x06\xc9\x11\x0c\xdd\x52\x66\x60\x78\x09\x2d\x83\x2e\x74\xd5\x78\xc6\x1a\xc0\x6f\xfe\x8b\x30\xd1\x00\x1b\x19\xcd\xef\x39\xe6\x1d\x43\x16\x06\x9a\x05\xe2\xf2\x15\xe8\x5c\x2a\x40\xa9\xe0\x39\x16\x19\x46\x2d\xb5\xdb\x52\xe8\x3a\xf4\x82\x0e\xd0\xd6\xf0\xcd\x4b\x18\x38\xa1\x59\x85\x25\x04\x61\xb7\x12\xb7\x26\x4c\xec\x92\x02\x8d\x47\xd2\x6e\x3e\xa2\x6d\xe5\x8f\xdf\x2d\xb7\xf6\xe6\x7f\xc2\xaf\x68\x70\xd3\x2d\x12\x67\x90\x4d\x97\x88\xc4\x9b\x69\x41\xb4\xe9\xd2\x49\x8e\x45\xb2\xca\x9a\x4e\x1f\x82\xdc\xb3\xe7\x09\xc9\xda\xc4\x0a\x83\x5a\x6f\x41\x31\x2a\x47\x32\xb8\x58\xd7\x68\x67\x87\x1c\xbf\x3e\x2b\x7e\x31\xe4\x7f\x28\x38\xe3\x42\xdf\x12\x68\xb5\x34\x02\xde\x5e\x28\xc4\x4e\xa4\xbe\x38\xe1\x49\x2b\xf5\xea\x3a\x3d\x67\x0e\xb4\xfe\x8c\x25\x12\xeb\x6e\x2e\x05\xd9\x0d\x1a\x42\xe1\x37\x61\x5d\x2f\x31\x78\xfa\xfb\xe4\x55\x97\x38\x3c\x83\x6c\x4f\xa0\x34\x41\x3e\x58\xff\x34\xc3\x0b\x1b\x5c\x1c\x11\x7a\xd2\x79\x83\x7f\xc6\x34\xe5\x21\xf9\x48\x75\x48\x33\x37\xb7\x5e\x70\x0e\x1a\x90\x55\x37\xbc\x12\x11\x84\x98\x0f\x98\x3e\x9c\xe6\x85\x1c\x2c\xaf\xa5\x1b\xf2\x80\xa6\x13\xfd\x5e\x39\x2e\x5b\xd7\x44\xf5\x6d\xba\x16\xb5\x63\x40\x61\xdb\x80\xc1\xaf\x88\x29\x92\xd8\x82\xb8\xac\xd3\x74\xce\x3c\x70\x2c\xef\xd5\x38\x37\x97\x65\xa5\x95\x91\x14\x88\xf9\x49\xae\x72\x7e\xa7\xac\x32\x50\x5b\x67\xe5\x21\xd9\xa0\x56\x34\x39\xe8\xf1\x50\x42\x28\xc9\x9b\x10\x6e\x0f\xe4\x15\x6d\xac\xe8\xd3\x7a\x87\xaf\xc6\x6a\x2d\x30\x6f\x45\xfa\x31\xe5\xd5\x4d\x02\xb3\xc9\xc3\x82\x60\xc0\xa2\x98\x25\x7e\x97\x62\x14\x61\x1d\x28\x3a\x0b\xa5\x27\x7c\x69\xf6\x7f\xa0\xda\x9c\x14\x75\x4b\x9a\x4e\xd7\xb0\xc3\x14\x45\x7c\xbf\x69\x64\x25\x9e\xc6\x48\xd4\x43\x80\xda\x2f\x9d\x5d\x1e\x2b\xd8\xb3\xf8\xba\x6c\x38\x2b\x86\xef\x9d\x43\x93\xf4\x5b\x98\xc9\xf5\xed\x91\xd3\x2f\x63\x34\xce\xc5\xf9\x92\x8a\xc3\xa8\xec\xd7\x33\x67\xfc\x83\xfd\xa6\xe3\x50\xed\xff\xb5\x1b\xc2\xdb\x06\xeb\x0b\x1d\x3f\xe8\x21\x9f\x9b\xfc\xa3\xcf\xc9\xef\xfb\x63\x35\xbb\x78\x3c\x4c\x24\xfb\x60\xd4\xc3\x96\xe6\x23\x49\x5c\x78\x5f\xd0\x5a\xeb\x83\xb7\xc2\x0f\x83\xdb\x8e\xff\xfd\x8f\xd3\xab\xd6\xb7\x25\xaf\xa6\xa1\x1a\x64\x1d\xf2\xb6\xc1\xc6\x38\x87\x21\x05\x3c\x2d\xc9\xc6\xa9\x2b\xeb\x37\x18\x64\x20\x98\xa9\xfc\xbc\x77\xc5\xa8\x4d\x29\x9b\xef\xee\xbc\x0b\x94\x77\x89\x65\xdb\xc2\xe2\x0e\xef\xef\xae\x27\x04\x26\xcf\x97\xe5\x92\xd7\xee\x9f\xb1\x4f\x3a\xce\xae\xf7\xe4\x5d\x36\xae\x58\x5d\x74\x9f\x70\xd5\xd4\xb1\x1b\x69\x9f\xe1\x97\x11\x9d\x35\x43\x9e\x41\xeb\x57\x5d\xf2\xf4\x21\xbd\x68\xea\x27\xb1\xfe\x59\x38\x6f\x8c\x0c\x55\xbc\x4b\xb8\x15\xc9\x51\x83\x14\x20\x28\x3e\x6a\xcb\xb9\x4c\xde\x60\xbe\x99\x55\x03\xd2\xcf\xcf\x03\x03\xb9\x3c\x3e\x55\xf3\xf6\xfa\x99\x7e\x19\xec\xa6\xbf\xc9\x4f\x14\xf3\x28\xab\xa5\x72\xe1\xea\xdc\x7f\x63\xee\x32\x72\x80\xf2\x9f\x7d\x4b\x31\x00\x1e\x1c\xf3\x15\xd6\x1b\x45\xd3\x26\x6f\x28\x36\xc6\x49\x9f\x4d\x9c\xaa\xfc\x8c\x1f\x99\x4e\x36\xc1\x6f\x6b\x9a\xbd\xf2\x44\xd2\xfd\x4c\x22\xfa\xdb\x3b\x0f\x9a\xb5\xb0\x05\x05\xaa\xdc\x54\x43\x64\x9f\x18\xdb\x1b\x4a\xef\x08\xe8\x83\x03\xc4\x78\x0e\x50\x6c\x30\x8b\xe9\xff\xe6\x68\xe1\x65\xc3\xe8\x60\x0a\xbf\x60\x4c\xe5\x44\x53\xff\x0f\xf3\xc4\x5f\x1e\xde\x69\x8e\x20\x9a\xe3\x95\xee\x6b\xb5\x31\x31\x19\x3f\x90\xc0\xf6\x00\xdf\xab\x34\x79\x74\x1c\x5d\x63\xe4\x85\xc1\x02\xc3\x88\xaf\x32\x05\x1d\xe8\x94\xb9\xc6\x49\x7e\xc2\x4b\xb1\x74\x2b\x9f\x24\x00\xc2\x77\x43\x6c\x9e\x8c\x3c\x9d\x22\xc3\x8d\x15\x7d\xdb\xe7\xcd\x4d\x9e\x6c\x93\x5f\xea\x9a\xc2\x53\x83\xc7\x4d\xfb\x8b\xe6\x9e\x5a\xa0\xf2\xc8\xcd\x8d\xa1\xdc\x3a\x31\xf9\x0d\x0b\x5e\xd3\x00\x8c\xf3\xd5\xdd\x35\x74\xdc\x3d\x4b\x8b\xfe\x95\xc2\xa2\x63\x52\x72\x97\x9c\x37\xae\x4e\x44\x12\x67\x03\x28\x09\x29\xa6\xec\x60\x17\xa0\x33\x08\xc0\xca\x03\x43\xbd\xc2\x2b\xb2\x9b\xca\x71\x38\xbe\xb3\x8f\xe4\xc8\xb1\x2a\x56\x80\xe4\x45\xb7\x58\xf8\x6c\xd1\x79\xb8\xde\x55\x95\xae\xdd\x21\xa7\x5d\xc3\x24\x43\x44\x24\x0d\x8e\xae\x76\xff\x4f\x5d\x6f\xd8\x61\xb0\x0f\x65\x5f\xe6\x8b\x7a\x93\x86\x8a\x5c\x2e\x8a\xcf\x75\xbb\x2d\xc6\x49\x60\x55\xff\xaa\x75\x62\x16\x5e\x56\xf3\xc7\xf6\xf9\x41\x8e\x90\xd4\xfc\xc1\x2c\x41\xd0\x56\x27\xb0\xc6\x2f\xd3\x26\xc2\x2d\xc8\x7b\x52\xf3\x55\x6f\x86\x50\x0c\x50\x21\x04\x2e\x1a\x35\xda\x6a\x43\x9b\xfe\x1d\x23\x0f\x43\xf6\xd9\x71\x5f\x51\x94\x2a\xb3\xe0\xe1\xab\x71\xf2\x77\xd7\x9f\x72\x9a\x61\xce\x31\x82\xa3\xeb\xc5\xc0\xf5\x50\xf1\x8a\xc1\xdd\x55\x2e\x46\x79\x2e\x68\x74\xf1\x87\x94\x4c\xb1\xaa\x6b\x0e\x12\x88\x21\xbf\xe5\x73\x63\x82\x35\xb9\x51\x31\xa7\x72\xcd\x65\x80\xdf\xbc\x37\xa8\x17\x03\x99\xbc\xfb\xf6\xcb\x69\x8d\x81\xc1\xdd\x25\xc4\xea\xff\xca\xa2\x8f\xe5\x51\x97\x9b\x35\x86\x31\x80\x3a\x45\xe4\x25\x3d\xf5\x0b\xae\xe4\xe2\x2d\x1d\x4a\x39\xbe\xfa\x04\xc3\x45\xfb\x74\x78\x68\x90\x62\x63\x5f\xbe\xa7\x76\x19\x98\xc2\xbd\xe0\xbe\x1e\x32\xc4\x60\x92\xbb\x1a\x26\x94\xe1\x99\xd1\x93\xd6\x0c\xc3\xf6\xe7\x76\xe7\xf2\xd3\xde\xbd\x1c\x9d\x1d\x5b\x28\x2f\xd1\x8f\xc2\xa1\x1f\x16\x1b\xdd\xf5\xc2\xed\xcf\x85\x9e\x59\x5e\x19\x90\x87\x23\xc8\x94\x26\x83\x7d\xc6\x18\x4e\xaf\x98\xf1\x13\x01\x09\x79\xe7\x8d\xc8\xad\xa3\xf7\x8e\x09\x72\x19\x02\xb0\x84\xc3\x62\xec\x91\x2e\x11\x8c\x5a\x9a\x49\x49\xaa\x4c\x6a\x3a\xdd\x99\xa5\x09\x29\xe5\xff\xaa\x73\xd8\x60\x84\x73\x46\xd5\xeb\x8d\xbe\xc7\xde\x2d\x42\xc4\xaa\xb9\x4a\xa7\x56\x61\xd6\xcf\x5a\xce\xa7\x2e\x2f\x3f\xac\xda\x08\x82\xd2\x95\x95\xc4\x70\xfa\x83\x0c\x9a\xd3\x75\xf3\x8a\xba\x29\xc3\x15\x53\xd5\x3e\xa7\x5a\x82\xb1\x80\x05\x1d\xd6\x3a\x61\xe2\x4b\xcd\x6b\xee\x46\x24\x69\x52\x43\x7d\x1e\x2d\x05\x6b\xec\x88\x01\x1c\x0c\x34\x4a\x18\x68\xc4\x08\xc5\x6a\x57\xca\x23\x1b\x20\x09\x07\x19\xc4\x72\xa0\x38\x42\x85\xd6\xd0\xaa\xae\xf6\x59\xef\x35\xc8\x52\x22\x4f\x1d\x24\x24\xb1\x9a\x71\xca\x94\x5f\x64\xe5\x84\x21\xb4\x71\xa5\xc8\x6f\x50\x94\xf8\xb3\x52\xa0\xc8\x4a\xc9\x2a\x9e\x36\xc4\xc8\xa2\x40\x19\x94\x0d\xc7\x56\xfb\x30\xc8\xb1\x6f\x69\x55\x26\x29\x53\xe9\xa5\x87\xbf\x40\xaf\x41\x9b\x2d\x55\x92\xc8\xb6\x0c\xa7\x53\x47\x59\x64\x24\xc8\x39\x0a\xb0\xa3\x6c\x80\xe7\x2c\xd6\xab\x18\x1d\xbc\xc4\x70\x3e\x83\x86\x53\xc8\x2f\x38\xe9\x79\x76\x86\x99\xd7\x29\x65\x31\x2b\x47\xd1\x93\x60\x9b\xde\x15\x33\xcf\x92\x7b\x35\x8c\x3c\x8f\x3e\x07\x74\x5a\xe5\x21\xde\xcc\xec\x73\x9a\x9e\x18\xe2\xbe\x80\xfc\x77\x99\xc2\x20\xa4\x3b\xd5\xcc\x1e\xb8\xd7\x17\x38\x63\xcc\xa6\x03\xdb\xd3\xe3\xeb\x83\x19\x60\x4d\x9b\x83\x31\x15\x8c\xab\xb4\xa7\x7f\xf9\x52\x2c\x83\xee\xe5\x6c\xf0\xe0\x2a\xc9\x03\xc5\xda\xd9\x85\x6c\x29\x29\x1d\xc0\xef\x57\x16\x28\x59\x09\x90\xd6\xcd\x79\x42\xf8\xd7\xe1\xfa\x8f\xfb\x87\xf8\x4f\xfd\x07\xe4\x0c\x19\x48\x1e\x3f\xbb\x96\x1a\x20\xa5\x7d\x88\xda\x37\x58\x3d\x63\x45\xa3\x2a\x92\x10\x7a\x8d\x81\xa2\x6d\xd0\x72\x50\xc8\xdc\x35\x24\xb8\x50\x6f\xfc\xa6\xd8\xe4\x0e\xfd\x52\xe0\x10\x5f\x74\x12\x95\xb4\x09\x9d\x46\xe5\x69\x9f\x3a\x5d\xab\xba\x1e\x43\xf9\x78\xc4\xce\xad\x4a\x8c\xc8\x29\x56\xdb\xf1\x20\x64\xb8\xc1\x41\xac\xf9\xc7\xf6\xa3\xe6\xbd\xfb\xcf\xf8\x52\x1c\x4d\xc9\xe0\xee\x3c\x12\x6e\x27\x6b\x56\x00\x06\xb3\x6f\xe8\x20\xe8\xf7\x94\x7e\x00\x43\x7d\x0a\x1c\x53\x11\x61\xfb\x03\x1d\xb1\x47\x6f\x33\x4f\x8f\x24\xd5\x40\x3a\x4a\x8a\x53\x93\x68\x35\x3a\x39\xc0\x51\x9f\xf3\x3e\xd3\x9b\x8f\x3b\x75\xbd\xe2\x33\xbd\xe8\xc4\x00\x4a\x96\x98\x58\xb7\xdc\x67\x52\x3b\x44\xcd\x2a\x5a\x76\x63\x69\xc2\xf4\x08\x46\xd6\x22\x12\xd5\xfe\x77\xb7\xbf\xa0\x61\xc5\xe4\x83\x61\x78\xe4\x5c\xda\x78\xbb\xa7\xc2\x4d\x36\xa6\xd3\x6a\x41\xc4\x2f\x9f\x75\x03\xed\x2a\x48\x57\x8b\x88\x6c\x69\x95\xfe\xcb\x99\x16\x6b\xb7\x78\xe5\x0b\x1b\xf4\x33\x49\xf6\xff\x14\x51\xd1\x3a\x18\xde\x8c\x80\x44\xb3\x7b\x19\xcc\x25\xcc\x4c\xbf\x05\xe4\x85\xf2\x0f\x25\x6b\x63\xbe\x1e\xfd\x51\x9c\xe4\x03\xef\x6f\xdd\x28\xc3\x24\x2a\x45\x8a\x33\x92\x1c\x1a\xe9\x56\x60\x3a\x30\x1c\x0d\xa7\x8c\x53\x86\x27\x37\x8d\xcf\xdc\x3c\xa3\x11\x3c\xc5\x71\xb2\xca\x79\x24\xbf\x6b\x13\x33\x0b\xd1\x05\x8d\xfb\xed\xfd\x0c\x6a\x56\xf0\xbd\x9e\xb4\xff\x00\x31\xeb\xff\x6e\xf1\xb2\x5a\xfe\xbf\xa0\x6d\x4d\x3a\xe3\x28\xe7\x90\x2a\x0a\x15\x98\xec\x4d\x48\xcd\x58\xf8\xe4\x37\x17\xc3\xaf\xe2\x36\x42\xa8\x47\xf3\xaf\x5c\xb2\x7e\x0c\xa9\x5c\x1b\xeb\xee\x56\xfc\xb9\xa3\x72\xf0\x9b\x51\x94\xd6\x63\x40\x73\x6d\x4d\x99\x5c\x8d\xf9\x06\x2d\x7f\xea\x26\x60\x9d\x6a\x4d\x06\xb6\x7c\xeb\x14\x5d\xfc\xbe\x85\xd2\x8b\x5b\xe4\xd2\xb8\xc4\xc7\x2b\x95\xb0\x74\x76\x40\x73\xae\xa1\xd6\x8a\x49\x16\x62\x6c\x2d\x3f\x8b\x00\x6f\x59\xd6\xdc\x7a\x73\x09\x01\xf2\xc0\xd2\xad\x2b\x4a\x4f\x12\xd8\xa8\xdd\x09\xcb\x15\x29\x37\x45\x26\xa4\x95\x73\xa6\xf5\xca\x7e\x71\xc5\x2e\xb6\xd8\x3e\x29\xdc\xd8\xd2\x56\x78\xff\x53\x24\xf0\xb7\xa4\x08\x4a\x4b\x7f\x3a\x5e\x39\x35\x6e\x31\xb8\x69\x4a\xd5\xe0\x10\x3e\x2e\x61\xec\x02\x41\x46\xe6\x44\xfd\xdf\x1d\x9e\xd6\x44\x4f\x47\x7c\xd5\x60\xd6\xe2\x1b\x1d\x94\xf2\x38\xee\x06\xc3\x7a\x86\xae\x0e\x74\xfa\x1f\x8c\x22\x19\x0e\x96\xd6\xad\xfc\xb9\xe3\xe6\xec\xb7\x7b\xbc\x90\x51\x86\xcc\x31\xfa\xbe\xb4\x30\xaa\x38\x1a\xf3\xe3\xe1\xce\xe5\x7b\x35\x26\x00\x1e\x60\xc0\x2d\x04\x7a\x1e\x07\xb4\xb4\x88\xbc\xcb\x2e\x20\xf3\x92\xda\xe4\xfe\x72\x70\xbe\xcc\x00\xa6\xaa\x1d\x6d\x69\x15\x23\x6d\xad\x6f\x6e\xbd\xea\xae\x92\x79\x76\x06\x62\xa9\x23\xaa\x9f\xe6\x9c\x34\xd2\xd5\x26\x1b\x4c\xfa\x45\x55\xb7\x6f\x82\x27\x0c\x4f\xb6\xd6\xe0\x79\xad\xca\x27\x3c\xec\xc5\x67\x8f\xf7\x28\xa1\x20\xe1\x88\xa5\x8c\x58\x22\x85\xfe\x70\x3d\x3e\x07\xe8\x71\x71\xe2\x9a\x5c\xb4\xba\x05\x5f\x6d\xe4\x54\xfa\x51\x40\xbd\xf1\xcb\x98\xd9\xfb\x75\x46\xd9\x37\x1a\x3b\x5b\x62\xe2\x0e\x1a\xf5\x60\x6e\xc1\x3c\xfc\xd2\xca\x9f\x67\x4b\x8e\x74\x23\x43\x63\x89\x88\x1d\x98\xeb\x4c\x04\x39\xb2\x24\x18\x8f\xac\x5c\x45\x78\xc8\x5c\xe1\x26\x13\xee\x3a\xb9\xfc\xaf\xca\x4c\x46\xde\x35\x09\x5d\x6b\x92\xd0\x52\xeb\x31\xb3\x23\x47\x78\xea\x48\xaa\x2b\x0d\x90\xf4\xf5\xce\xab\x68\x64\x9c\x1b\x3a\xac\x4a\xe0\x7f\xae\x59\x86\x31\x2d\x24\xdc\x0e\xe8\x94\xf0\xf0\x76\x74\xef\xfe\x1d\x89\x63\xc5\x03\x39\x40\x83\x7a\xa5\xd6\xb3\xac\x8c\x1c\x00\xb1\xd8\xd4\xa8\x1d\x98\x8b\x8f\x3b\x96\x1e\x0f\x89\x77\x1e\xc9\xbd\x3d\xb8\xf1\x14\xd7\xe8\xce\x60\x4e\xd4\x7a\x69\x8a\x96\x86\x7b\x00\x67\xdc\xcd\xd0\x63\xb5\xca\xd6\x3f\x6a\x1b\xe7\x2b\xbf\xc9\xc9\x64\x7c\xd0\xb5\x49\x13\x15\x1f\x80\xdf\xe3\xaa\x2e\x41\x0d\xcb\xcb\x2a\xa7\x24\xe4\xb1\xfa\x17\x54\x97\x7b\xe3\xab\x54\x17\x0e\xec\x2a\x87\xae\x14\x6b\x5f\x93\xde\x3d\x8d\x61\xac\x7d\x65\xe7\x0a\x26\x33\xe4\x9c\x49\x8a\xbe\x52\xea\x62\xff\xcc\x3e\xf6\xdc\x3f\x3f\x17\x98\xdb\x53\xa7\x48\x30\xbb\xa1\x82\xc6\x3b\xb8\x83\xbb\xe0\x95\xe4\x4e\x0f\x1c\x78\xd3\x90\xa0\x66\xb7\x62\x09\x12\xca\x5e\xaf\x3c\xd4\x70\xa3\x79\xf1\x0b\xdc\x35\x0a\xda\x31\xa6\xde\x65\x5a\xa3\xf1\xef\x4e\xa2\x2d\xc2\x1f\x7e\x45\x45\x33\x50\x7b\xb9\xfc\xde\xed\x2e\x13\xff\x21\x9a\x8b\x52\x3c\xf7\x12\xbb\x23\x1f\xc3\x9e\xd4\xc6\x3b\x6f\x8b\x31\x4e\x44\xab\x16\xdb\x8c\x8b\x8e\x30\x42\x90\x9b\xac\xb5\x7c\x3f\x75\x2d\x99\xcc\x35\x7c\x19\xfc\x4f\x59\xd8\x09\xdf\x11\x3f\xf3\x52\x4a\xeb\x7b\xe6\x7c\xe2\x9e\xbe\x89\x31\xb0\x1d\x34\xf2\x2a\x40\x55\x03\xbb\xe5\xce\xd1\x27\x49\x8e\xd2\x49\xbb\xd5\x3b\x07\xff\xb9\xa4\xaa\xd2\x2f\x38\x41\xf9\xd4\x89\x30\x1f\x24\x9d\xd3\x17\x7a\xbb\xa5\x1e\x6f\x4f\x9b\x07\xc2\x63\x17\x45\xd5\xe1\x7f\x64\xce\x91\x5b\x74\x43\x9b\x41\x39\x2e\xdb\x6d\xa3\xc5\x50\x68\xdb\xee\xac\x24\x61\xeb\xfe\x90\x96\x73\x87\x44\xec\xd6\x6a\xef\x62\x80\x58\x47\xda\x9a\x93\x23\xfd\xfe\x6d\x35\xf8\x63\xb5\x49\x80\x1d\x68\x97\xb9\x4f\x70\x21\xb7\x8a\xf8\x08\x6f\xb4\x61\x71\xbf\xea\x7e\x45\x29\x3e\x9b\xa3\x49\xfc\x39\xcc\xe2\x0b\x16\xfd\x97\x3f\x09\xea\x77\xf4\x0e\xa9\x72\x3f\xf1\xfa\x99\x20\x91\x5e\xba\xd5\x85\x04\xac\xda\x15\x95\xe1\xbe\x33\x88\x91\xf7\x4c\xaf\x38\xb8\x8e\x14\x9c\xac\xf2\x01\xbb\xd3\xd1\xf5\x95\xe1\x4a\x39\xe5\xd0\xd3\xb8\xa8\x21\x31\xd2\x1f\x32\xcb\xee\x65\x53\x56\xd1\x11\x83\xc4\x54\x22\xee\xbd\xe1\x6d\x94\xe1\x50\x11\xc9\x6f\xf6\xde\xbb\x95\x89\xe3\x49\x6b\xb2\xff\xb5\x38\xca\xae\x36\x29\xb8\xe8\xd1\x41\xb6\xdf\x27\x53\x7d\x41\x1a\x40\x1b\x66\xcd\xff\x35\xef\x69\x0b\x37\x56\xd6\xc8\x16\x62\x35\x9b\x38\x94\x59\xe3\x77\x03\x36\x40\xf3\x56\xd6\xda\xa6\x1e\xd7\xde\xf2\x42\x67\x21\xab\x89\xbf\xda\x31\x18\x7b\x57\x09\xb7\x9e\xff\x83\x42\x39\x89\x76\x90\x2a\x15\xcd\x0e\x36\x4e\x7c\x60\x48\x50\x10\x27\x0f\x6c\x12\xe5\x32\xb8\x61\xcf\x37\xd0\x30\x88\x85\x67\x1d\x82\x60\xd9\xc1\xc9\x9a\x07\x78\xde\x8d\xcc\x3a\x34\x05\x4d\xff\x9e\xea\x5b\x96\x3a\x4e\xf3\x2a\x41\xc4\xbb\x39\xf7\x05\x72\x9f\x0c\xe5\x9d\xdb\xb7\x04\x17\xd0\x3a\x18\x4b\x40\x3b\x85\xc8\x8c\x20\xf8\x51\xf6\xd3\x1d\x7c\x94\x05\xb2\xa5\xef\xf7\x1e\x45\x68\xfb\x7e\x6b\x87\xf7\xff\xc7\x9f\x83\xf7\x07\xd5\x62\xb7\xf4\x91\xee\xc9\x96\xfe\x37\x92\xb4\xf6\x62\xd2\xef\x96\xee\xaf\x68\x6c\x37\x02\xf4\x24\x9e\x5d\xf7\x8f\x10\xb0\xc1\x4e\x5c\x1f\x92\x41\x91\xf6\xae\xf3\x0e\xa2\xb1\x2f\x88\x7c\x27\x9b\xf1\xcd\x75\xba\x08\x66\x0f\x16\xd3\x57\x44\xcc\xcc\x14\x0a\x8d\x40\xec\x09\x10\x89\x4b\x38\x93\x76\x9b\xdc\xfd\x4e\xa4\x3f\x51\x70\x53\x7b\xd6\xee\x16\xc4\x46\x02\x8c\x40\x06\xfc\x21\x55\xac\xad\xac\x7f\x95\x48\x64\x4d\xfb\xfb\xb7\x77\x70\x76\x93\xd3\x68\x79\x16\xfa\x36\x80\x2e\xd8\x0d\x87\x3c\xb3\x77\x2f\x22\x0a\x06\x37\xd2\x5e\xc1\xec\x12\x83\x1b\x32\x29\x74\xf4\x25\x75\xb6\xa0\xee\x51\x39\x83\xfe\xc8\x5c\x13\x2b\xd7\xb9\x54\x19\x0b\xe9\x37\x4e\x7a\x23\x51\x7b\xf2\xa4\xe3\x12\x5e\x4c\xa3\x54\x76\xc1\x07\x6c\xe1\x00\xf3\xa1\x01\xe9\x0a\x65\xe2\xec\x20\xc4\x61\xdd\x31\x88\xe5\x73\xde\xc2\x85\x8a\x19\x3a\xde\x13\xb7\x6e\xce\x79\x28\x58\x2a\x86\x9d\x23\x6b\xe5\x09\x58\x55\xa0\x74\xe1\x2f\xed\xbf\x67\x11\xf7\xe6\x98\xed\xb1\xae\xa1\x4f\xe6\xad\xb0\x7e\x63\x3f\xc8\x60\x30\xef\x64\x24\x54\x42\xd3\x1e\xac\x3b\xb9\xa7\xac\x85\x1e\xa3\x78\xb5\xc9\xa9\xeb\x89\x04\x10\xf4\xe1\xaa\xac\x79\x17\xa2\x70\xc8\x81\x55\x10\x27\x9a\xd7\xfc\x1d\xc5\x64\xcc\x2f\x3b\x10\x25\x22\xf0\x4d\x78\x07\x1a\x73\x80\xf9\x26\x0a\x65\xd7\x35\xcb\x16\x6a\xcd\xdb\x21\x53\x20\x0f\xee\x40\x65\x91\x9b\x5a\x4d\xaa\x7f\x94\xfd\x1d\x66\x48\x20\x4a\xf3\x2e\xd3\xdd\x77\x95\xf6\xcb\x9a\xde\x68\x8b\xc9\x64\xd3\x5d\x4f\x9e\x67\xe2\x34\xbb\x24\x9a\xdd\xd9\xc9\x72\xe7\xfc\x16\x3b\xad\x61\x20\x99\x09\x19\xf5\x5c\x93\x68\x77\x7b\xbd\xbb\xf3\x0f\x83\x13\x78\xbd\x54\x49\x3b\xa1\x8e\x9b\x87\x70\xe2\x9a\x82\x39\xa7\xc1\x90\x73\xe3\xe4\xca\x94\x1e\x1a\x3e\x58\x6b\x73\xe7\x53\x2b\x47\x5d\xfe\x10\xa6\x41\x43\x55\xc9\x61\xf5\xf2\x4e\x27\x7a\x22\xa2\xa7\x51\x28\x97\x04\xb0\x9b\xbe\x37\x77\xa4\x24\x2c\x36\x6e\xd2\xbf\x1a\xe9\xa0\xec\x85\x2e\xad\x2a\x45\xc1\x50\x16\x31\xad\x91\xda\x60\xe5\x33\x18\x64\x8d\xb2\x7c\xce\xa6\x8b\xe3\xe4\x50\x5c\x36\xf8\xe0\x0e\xe9\xa4\x94\xe6\xc2\x19\xa9\x85\xce\xb5\x4b\xbd\x3f\x9c\x8d\x59\x34\x75\xb0\x54\xb9\x05\x21\x5a\x4e\x02\x9d\xa6\x1c\x00\x06\x4a\x4b\x2f\x9e\xeb\x1f\x3e\x44\xd6\x5f\xe5\x86\x87\x82\xaa\x4b\xdd\x79\x8c\xed\xa7\xf3\xb1\x04\x9b\x7e\xa9\x3d\x99\x66\xa9\xf9\x08\xfa\x5b\xcd\x94\xad\x05\x5f\x8f\xae\x47\xca\x22\x5b\x76\x9f\xb3\xe3\xaa\x94\x29\x6a\xb3\xac\xa4\x47\x80\xb7\x07\x09\x56\xde\x95\xb5\x6c\x71\x05\xa2\x4c\x5a\x53\xad\x0e\xe6\xb2\xa6\x03\x1a\x40\xff\x39\x3b\x2f\x7c\x86\xec\xd3\xe5\x1a\xce\x5f\x38\xeb\x8e\xbc\xb6\xa2\x10\x0c\x0c\x86\x58\xe8\xcc\x25\x38\x72\x98\x1a\xf6\x46\x00\x0d\x1b\x77\x12\x8b\xc3\x59\x76\x3c\x30\x24\xe9\xe9\x1d\x64\x8d\xf2\xcf\x52\x08\x46\x08\xfc\xdd\xc0\xe2\xb8\x00\x4f\x02\x7d\xd3\x6d\x17\x89\x2e\x6f\xba\x68\xbc\x0b\xdc\x6a\xdd\xe7\xfa\xd0\xca\x1f\x4b\x69\x89\xe4\x09\x00\xb2\xc6\x96\xc8\x67\x05\x9f\x0f\xb3\x00\x93\xe3\xbc\xf3\x38\xf2\x54\x36\xf0\xf3\x32\xca\xda\x13\xe2\x78\x7a\xad\x38\x2f\xbf\xd8\x28\x5c\x9d\xee\x25\x8c\xd7\x01\xc8\x2f\x74\x3a\xa0\x58\x96\x4c\x81\xb5\x0a\xfd\x2d\x66\xdc\x92\x0e\xd0\x8d\xe3\xbd\x01\x21\x4e\x44\xc0\x4e\x9a\x00\x29\x70\x12\xcd\x6f\x09\xba\x9f\x5f\x90\xb5\x0c\xa3\x78\x86\xe3\xd7\xdb\x28\xf9\x22\x35\x06\xb0\xfe\x94\xc5\x40\xa1\x34\xaf\xfa\xff\x40\x3a\x61\x28\x03\xee\x48\x8c\xb3\xfd\x95\x9d\x3d\xdf\x3f\x68\x54\xb8\xc9\x52\x50\x02\x7d\xd2\x37\x4c\x8e\x1a\xc9\xd8\x7f\x79\x6f\xfd\xff\x3b\x05\xa6\xff\xa8\x90\xfa\x97\x3e\xcd\x3f\x7b\x03\xa4\x00\x4a\x95\xf8\x2f\x59\xb3\x7d\x39\x60\xec\x08\xfe\x48\x99\x2b\x30\x2d\xe3\x8d\xeb\x9f\x8f\xcd\xf8\x5f\x4d\xf2\x94\xef\xbd\x39\x31\xd0\x3b\xd0\x00\xfb\xf7\xa6\x74\x4e\xb0\x49\x0b\x15\xf4\x40\x7d\x94\xa0\x6c\xf1\x19\xf2\x2c\x94\x64\xbf\x3c\xdf\x4a\x35\x46\x54\x3a\x31\x4c\x30\xf7\xb4\x41\xa0\xe1\xa6\x73\xce\x7d\x84\x4b\x11\xa6\xc4\xfc\xcd\x9a\xef\xf5\xec\x1f\xa9\xf3\xfd\x64\xec\x83\xc0\xa3\x48\x56\x06\xf9\x38\x66\xaa\x4b\xcc\x2d\x7b\x5c\x6e\xae\xc1\xa8\x9e\xeb\x9b\x86\x48\xd6\xb3\xa9\x6b\xf0\xf8\xee\x73\x0a\x13\x25\x6f\x94\xe7\x2a\x57\x5a\xfb\x76\x64\xa4\xc4\x9f\x08\x63\xba\x6e\x43\x84\x49\x9d\x88\x41\x38\x45\x90\xc9\xc8\xa4\x08\x0e\x32\x12\xb5\x1e\x76\xb8\x34\xcf\xf5\x69\x35\x1f\x0f\xf3\xec\xd7\x43\xf2\x59\x9d\x9e\x4b\x77\x55\xea\xc9\x8f\xf3\x9f\x4c\x3a\x67\x18\xf3\xf1\x20\x6e\xa5\xdc\x3d\x94\x20\x4e\x16\x5c\x41\x5e\x61\x59\x83\x85\xb8\x0e\xe4\x05\x0e\x91\x93\x32\xe7\x8f\x47\xd2\xe7\xc8\xe5\x35\x10\x64\xbc\xeb\xf6\xa2\x33\x17\x61\xca\xe9\x6c\x4a\xcc\xc9\x4e\x72\x3b\x52\xa5\x0b\x90\xc4\x57\xf4\x6b\xa1\x7e\x35\xf1\xa8\xd5\x72\x75\x98\x73\x18\x32\x5d\x61\xa4\x67\xb8\xc4\x4a\x1f\x66\xce\x98\x1c\x22\xe7\x70\xdd\x82\xc5\x69\x57\xd8\xfd\x9e\x70\x01\x6d\x09\xa2\xa1\x32\x44\x66\x06\x6f\x66\x7b\x63\x99\x54\x03\xf5\xbc\xf4\xcf\x59\xc2\x63\x65\x4e\x1f\xb6\x54\x06\x94\x1e\x39\xff\x74\x80\x90\xd9\xb3\x26\x3d\x7f\xab\x1d\x74\xab\x12\xd4\xf1\xb4\xad\x8d\x71\xc8\xcf\x6b\x74\xa5\x4a\x3a\x43\x8a\x0e\x0c\xb8\x85\x2c\x31\xdc\x21\x53\x71\x57\x30\x16\x33\x10\xf1\x9b\xd2\xd6\xc8\x1f\xdc\x82\xa9\xb8\xe5\x60\x3b\xba\xcf\x8a\x23\xab\xd6\xc9\x67\x7c\x8a\x0d\xf8\xd4\x26\x0c\x39\x5a\x29\xa3\x29\x0c\xfa\xe2\x72\x13\x63\xda\x10\xf5\xda\xbd\x2c\x2d\x28\x84\x12\xb1\x7e\xde\xcf\x67\xe6\x5c\xa9\x8d\x9f\xa0\x2e\x48\xbb\x25\x60\xab\x81\xab\x30\x7d\xe8\x9f\xf7\xc4\xa8\xc8\xf3\xee\x46\xd0\xcf\x3b\x4c\x8f\xda\x4d\x52\x9f\x63\xf1\xc8\x5a\xe9\xe4\xbd\x2c\xcf\xa7\xcc\x2c\x99\xa7\xe7\xd3\x8a\xb7\xa5\x2f\xe0\xd9\x72\x90\x05\x39\xb2\x71\x6e\xa0\x58\x88\xcd\x95\x46\x88\xc0\x35\xda\x04\x75\x78\xcc\x03\x13\xcc\x41\x21\xec\x8e\x92\xc0\x22\xa8\xc1\xf2\xfb\x43\x5e\xe0\x1d\x22\x06\x18\x57\x60\x0a\x88\x66\x54\xb5\xc5\x5d\x83\xc8\xa5\xc0\xb4\xec\x9a\xe2\x7b\xba\xd4\x33\x57\xc3\x89\x8a\x26\x4a\xee\x9a\x21\xee\x62\x45\xc4\x84\x4c\x42\x6f\x90\x9f\x35\x1e\x08\x48\x84\x97\x1d\x75\x16\x3e\x15\x61\x00\xe6\xe5\xc9\x48\xe3\x59\xe5\x82\x95\x7f\x2e\x56\xbc\xb8\x1c\xd7\x36\x65\xfe\x0a\x6e\x2f\xf1\x8f\xe7\xca\x00\x97\x95\xbf\x1f\xdf\x27\x88\x9f\xab\x7c\xb7\xec\xb9\x4e\x2c\xd5\x87\x78\x16\xdd\xbd\xd2\x2b\xac\x67\x97\x92\x7e\x96\x20\x2c\xe6\xb3\xfb\x3c\x63\xe6\x80\x57\xa2\x7b\x58\x06\x51\x85\x9e\x05\xfa\x8d\x94\x54\x72\x36\x63\xb2\xee\x16\x04\x79\xb9\x4c\xb9\xe2\x67\x37\x06\x06\x4a\x9a\x3a\x7f\x66\xc5\x8e\xec\x84\x90\x49\x22\xf2\x20\x9b\x95\xb2\xcd\x33\x94\x98\x5f\xfd\x8c\x3a\xf4\x89\x29\xc4\xec\x99\xb9\xc8\x03\x4d\x17\x0d\xba\x62\xc2\xe3\x19\x96\xf2\x4f\x70\xad\xbc\x84\x95\x20\x0f\x21\x36\xd3\x13\xb1\xdb\xff\xc8\x69\xdc\x45\x6a\x3c\xc9\x4d\xbc\xde\x9c\xb8\x7f\xa6\xab\x5b\x6d\x49\xb7\x05\xe2\x95\xe2\x34\xb6\x46\x59\xb9\x27\x2c\xd9\x58\x96\xa0\x3d\x2a\xa4\xf5\x84\x6a\x03\x5f\x32\xfb\xb0\x99\x3d\xe5\xc8\x4b\x90\xc9\x49\xd6\x4c\x13\x87\x0e\x06\x2a\xd3\x6a\x1e\xb7\x52\x30\x1e\x91\xc5\x85\x29\x0b\xd6\xd2\x8f\x7b\xb9\x49\x4b\xfc\xb8\x4b\x18\x26\x91\xc3\x02\xcb\xbd\xb9\x4a\x47\x01\xc2\x3f\xaa\x61\x74\x27\x7d\x13\x5b\x90\x25\x16\xd2\xd7\x68\xbb\xd5\x92\xa7\x71\x3f\x7a\x1e\xd8\x83\xef\xee\xa3\x8a\xca\xd8\x94\x54\x0d\xa6\xa3\x7f\x08\x0f\xa7\xae\xd0\x91\xab\xca\x52\xca\xc0\xb3\x93\x1f\x45\xc9\xac\xa1\x7c\xf3\xa8\x24\xae\xf9\xf5\xac\xf2\xfc\x7b\x90\x57\xc2\x53\xda\xe3\x0c\x50\x68\xc8\x75\xf1\x15\x70\x7c\x86\x71\x3f\x2c\xbd\xc4\x72\xdc\x95\xda\xf7\x88\x2c\x4b\xe2\x7e\x4a\x3e\xe0\x87\xdb\x2e\x28\x8b\xc6\xc9\x8c\xe1\xd8\x45\x9f\xa7\x07\xfa\xbb\xe7\xe9\xb7\x3f\xaf\xf2\x2f\x33\x40\xfe\xa2\x8d\x19\x75\x60\xf0\x40\x58\xf2\x51\xe1\x4d\xe0\xed\x4b\x1c\xb9\x3c\x5c\x32\x33\x85\x91\x17\x7f\x61\x8f\x23\x88\xd9\xc8\x5d\x0b\x18\x7d\x8a\x96\x3d\x42\xa2\xde\xc0\x93\x49\x19\x8f\x34\x8a\x06\x23\xd1\x8f\xfc\x1e\x48\x42\xc8\x2f\xb9\x7b\xbd\x34\x9b\x9e\x31\x7d\x2c\x6e\xe4\x3b\x18\x91\xa2\x3a\x52\x91\x2c\xbb\xe0\x6f\x78\x82\xdd\x57\x11\xea\x04\x45\x98\xaf\x2a\x2e\x04\x9f\x8a\xae\x4d\xd9\x7b\xc0\x99\xc9\xac\x3e\xd2\x9e\x83\x21\x39\x50\xa1\x3f\x6b\x06\x3f\xff\xf4\x09\x5c\xf5\x57\xdf\x92\x14\x80\x49\x54\x04\xe1\x57\x9e\x64\xe0\x82\xd2\x2a\xec\xb7\x1b\x26\x5d\x91\x2a\xff\xe5\xc2\x48\xd1\xf1\xa0\xb3\x83\xf6\x26\x60\x7d\x24\x19\x10\xe7\x99\x99\x52\x19\x2f\x97\x7c\xca\x9e\xe1\x2f\x66\xdf\x50\x18\x8e\x85\x20\x6d\x8e\x09\x82\xcc\xe1\x51\xfe\x71\xa3\x9d\x59\x29\xda\xdb\xbb\xc4\x93\x98\xde\x6c\x6e\x27\xd2\xd0\x4e\xc1\x24\xa1\xe9\xe9\xd6\x10\x68\x92\xb1\xca\x74\x33\x3a\x39\x59\x5b\xba\xf0\x67\x4f\xe8\x97\xd2\x72\xcc\x7e\x95\xb8\xe5\xd6\xdf\xa7\x81\x3c\x40\xf4\x16\x0d\x8b\xf4\x67\x2f\xd4\xb3\x92\x45\xf9\x26\x6b\x4f\xce\x4f\x53\xfd\x91\xf3\xea\x3d\x91\x7a\x89\x04\x7c\x17\xc4\x81\x85\x19\x8b\x33\xee\x10\xb8\x9a\xac\xb9\xa7\xda\x19\x97\x7e\xfd\x8e\x10\x4d\x5e\x02\x3d\xbd\x35\x9b\xa2\x67\x3c\xc9\x99\xab\x0e\x54\xbf\x9f\x8a\xc4\x3b\xa6\x88\x7f\x4d\x45\x29\x78\x53\x81\xf2\x27\x57\xc5\xbc\xe1\x64\xf5\x1c\x7f\x77\x59\xc5\xef\x5c\x11\x95\xd0\x6f\x2d\x41\xf5\x5c\x74\xf0\xe7\x33\x40\x06\xd1\x88\xb4\xc6\xd2\x3d\xd5\xf4\xd9\x41\x0f\x8a\x7d\x86\xa9\x88\xf8\xb0\x65\x2e\x26\xb1\xb2\x60\xb6\x46\xea\xa8\x7d\x2d\x22\x51\x5a\x7b\xa3\xa8\x97\xfd\xb6\x3e\x7f\x80\x89\x74\x9f\xc6\x10\x1d\xb2\x91\xa0\x79\x24\x37\xf5\x07\x10\x0b\x14\x2e\xd3\xba\x29\x39\x42\x65\x40\x34\x79\x84\x95\x85\xec\x95\xad\x46\xa5\x0b\xc0\x7f\xe2\x74\x66\x81\xb3\x95\xaa\x0d\xd0\xeb\xe1\x6e\x1e\x28\x12\x52\x18\x6b\xca\x7f\x56\x49\xfa\x1d\xca\x9d\xf9\x9a\x9f\xc5\xcd\x21\x85\x27\x21\x18\x2f\x8a\xd3\xf9\x90\x5b\xdb\x08\x83\x34\xf2\x50\x9d\xab\xa2\x59\x11\x30\x3d\xb3\x12\x04\x27\x5a\xdd\x4e\xe9\xfe\xfa\x84\x86\xf3\x3d\x7f\x43\x16\x79\xab\x4f\x89\x6e\x73\x36\x54\xe6\x74\xc3\x64\x37\xf7\xae\x7c\xec\x21\xcb\x36\xb9\xe7\x34\xef\x06\x3c\x47\x78\xdc\x95\xcf\x83\xb2\x16\x53\x1a\xe5\x99\x46\x97\xba\xf1\x26\xe1\x11\x53\x78\x74\xad\xe2\xcc\x50\xd0\x28\xef\xd1\x19\x0d\xce\x5c\xbb\x07\x9f\xe3\x86\x18\xdc\xc6\x72\x22\x85\xb3\xbe\xbb\x8b\x1a\xb3\xe6\x47\xd7\x02\x26\x71\x10\x73\xb9\x31\x30\x1b\x9b\xe4\xff\x47\x06\x22\x46\xf5\x57\x41\x09\xbd\xc2\xce\x0a\xf7\x45\x4d\xb2\x61\xcd\x08\x8c\xd6\x4c\x66\x71\xef\x5a\xb8\x98\x41\xac\x96\x36\x25\x63\x45\x54\xa2\x33\x3d\xaf\x8b\x9c\x54\xa8\xfa\x91\x15\x2a\xe9\x38\xb8\xb1\x05\x3a\xe4\xe2\x55\x8f\x24\xaa\x27\xdf\x4e\x2b\x27\xd1\x3b\x2f\x8a\x5f\xc1\x91\x4d\xac\xd1\xdf\x4c\x27\x30\x7b\x02\x86\x7a\x5d\x05\x4e\x68\x13\x13\xf4\xd2\x76\xd0\x33\x13\x63\xb2\xae\xa1\x37\x62\xf0\xa1\xab\x05\x67\xa7\xf1\x56\xba\x14\x15\x79\xab\xf1\xc3\xd7\x5f\x87\x19\x73\x16\x27\x4b\x0d\x85\xc7\xb5\x7c\x5d\x61\x0c\xba\x84\xec\xe0\x8f\xab\xeb\x0d\x76\x58\x3b\xbe\xcf\x20\x8d\x26\xf2\xf1\xe0\xeb\x26\x3b\x3d\x28\x4d\x86\x59\x5b\x01\xab\x33\xc6\x19\xf6\xfc\x49\xf3\x1e\x3b\xb5\x65\x2c\x1b\x88\xb4\x7e\x43\x4a\x0a\xa3\x02\x91\x48\x49\x3c\xe2\x42\x30\x5b\x67\x0c\xe9\x48\x34\x2d\x5f\x86\x81\x91\x54\xb6\x97\x91\xdb\x96\x4e\x77\xb2\x9f\xff\x92\xd3\x04\x60\xa7\x99\xca\xfe\x0f\xe3\x57\x92\x50\x23\x3b\x70\x74\xb7\x5d\x82\x8d\xc5\x5a\x9e\xf5\x0f\x30\xaf\x39\xb8\x39\x63\xfa\xc8\x35\x2d\xc8\xa3\xaf\x14\x49\x0d\x23\xbe\x34\xf9\xda\xa5\x6b\x13\x6f\x38\x79\xce\x32\xf7\x95\xea\xe6\x98\xda\xdd\xd9\xa5\x37\x74\x0b\x09\xe1\x34\x21\xf4\x23\x9c\xb6\x7b\x10\x5d\xb5\x31\xad\xc9\x50\xa5\xaf\xcb\xe8\x59\x0d\xfa\x02\x12\x74\xbf\xb5\xe5\x8f\xbb\x3a\x24\xd1\x5e\x8f\x10\x5e\x1a\xa9\x4f\xf2\x9f\x98\xa9\x41\x26\xdd\x82\x54\xed\x39\x6a\x55\x3f\x68\x2b\x3a\xd0\x96\xff\x04\x92\x72\x63\xc6\x54\x4e\xd9\xd2\xd9\x23\x3e\x0e\xfe\x7b\x18\xa0\x59\x66\x9f\xe0\xc2\xc5\xe4\x11\x08\xe7\xad\x92\x1e\x26\x57\x9e\xe1\xfe\x9f\xa2\xca\xc3\x1d\x72\x45\xfe\x02\x04\x0b\x18\x6f\xde\x99\xb5\x7a\xef\x02\x13\xe0\xa3\xfe\xbb\xf0\x24\xcb\xb4\x5c\x8d\x1e\xe0\xc3\x9d\x49\x6d\x83\x28\x48\x27\xa3\x26\x71\x51\x17\xfd\x72\x50\x51\x87\x12\x3c\xcb\x7b\x14\x67\xf4\x3e\x3b\x3b\x75\x3f\xaf\x4e\x46\x2d\x9a\x06\x76\x87\xca\x7a\x93\x3b\x5a\xeb\x26\x16\xa9\x12\xd1\x00\x26\x1e\xa7\x8e\xdd\x48\x2e\xaa\x7a\xa0\xa0\x2d\xe2\xbd\x0c\x9a\x6a\xfe\xb2\x33\x6b\xe8\x0c\xcf\xb8\x1f\x27\xd0\x30\xc5\xe6\x86\x16\x33\xab\x30\x90\xd3\x61\x32\xb7\x8f\x38\x08\x4c\xe4\xee\xe0\xbc\x42\x40\x9e\x04\x72\x60\x5d\x60\x8a\x90\xb3\xc1\x60\x29\x3a\x31\x41\x80\xd3\x14\xe2\xc6\x0e\xe2\x7d\xca\xde\x7e\xa8\xff\x0b\x8a\xeb\x9b\x41\xe1\xa1\x86\x21\x1a\xc8\xdd\x62\x44\xd6\x2e\xf0\x45\xfb\x39\x12\x61\x2f\x49\x27\x0c\xd2\x41\xb6\x92\xd9\x58\x00\xa1\xff\x09\xe9\x17\xde\x2b\xa8\x4f\x88\x6a\x6b\xd5\xd9\x47\x20\xac\xee\xd0\x35\xe1\xf3\xa8\x0a\x75\x0e\x35\xec\xf8\xea\x1a\x6e\x76\x85\x3d\x7f\x03\x6b\x5c\xe5\xbc\xeb\xe4\x73\xf8\xd9\x75\xbe\x72\xb0\xe3\x01\x82\x30\x49\xcf\x98\x61\xfd\x90\x3e\x5c\x5e\x52\x50\x03\xa1\x8e\x25\xc7\xba\x70\x8a\xbe\x44\xcb\x55\x70\x1f\x14\x58\x8d\x46\x86\x22\x1d\x78\x03\xb3\xec\x3d\xc1\x7d\x4d\xc1\xc6\xfd\x35\x52\xe4\xb8\x0a\x90\x9c\xc6\x0b\xce\x8a\x7d\x1b\x08\x2f\x38\x4d\xd0\x83\xd6\x5e\xc9\x7b\x6d\xfb\x15\x5b\x85\x81\x1b\xec\x85\xd4\xa0\x0e\x68\x8a\xdf\x04\x4f\x5a\xbf\x58\x3f\xe2\x23\xbe\xeb\x27\x9c\xed\x38\x4b\x35\x60\x16\x4e\xc7\xda\xe9\xf4\x97\xe5\xd2\x01\xfa\x6b\x8a\x04\x3a\x58\x83\x51\x90\xc2\x4d\xc2\xc8\x1c\xed\xa9\xd1\xc3\xef\x0b\xcc\x4f\x99\xaa\x19\xe2\xa9\x1f\xec\xd1\x0e\xb4\x00\x02\x83\x53\xb7\x60\x9a\xe2\x5a\x7e\x5f\x2b\xcc\x35\x4e\xf1\xbd\x81\x83\x9d\xab\xa1\xff\x90\x3e\x2b\x99\xb3\xb4\x02\x1e\xe4\xb2\x63\x65\x50\xb6\x53\xf4\xe7\x07\x6a\x04\xcb\x84\x0e\xb7\xb9\x3b\xea\x59\xe3\x1d\x37\x2b\xb1\x66\x1b\x10\x38\xd4\x85\x59\x45\x1d\x46\x79\x56\x4d\xaf\xb4\xab\x0b\xa2\x75\x6a\x32\xab\x83\x8e\x15\x59\xb0\xa8\xa3\xf8\x13\x13\x85\x74\x87\xb4\x9e\xac\xb5\x9c\x6c\xab\xb7\x03\x7e\xde\xa2\xf4\xae\x52\x35\xb0\x1a\x54\x84\xac\xc1\xdd\x3e\x89\x46\xd6\x3b\x70\x83\x61\x39\xfa\xc1\x92\x52\x96\x90\x78\x12\xb6\x6f\x9c\x56\x01\xc9\x5b\x8e\x77\x77\x37\xb9\x43\x2e\x84\xd3\x42\xaf\x51\x39\x16\x98\x62\x41\x61\x75\x76\x63\x52\x00\x1e\xe9\xca\xfe\x27\x6c\x7a\x78\x30\x1b\x13\x87\xdf\x80\x4f\x82\x0e\x91\x31\x8f\x9c\x99\x91\xf4\xdb\xb4\x7b\x1b\xe1\x4e\x78\x7a\xa3\xe0\x6b\x02\xa1\x39\xde\xfc\x84\x04\x43\xb2\x5d\x3d\x37\x47\x74\xdb\x5d\x1c\xd6\x9d\xaf\x2d\x14\x49\xe5\x9f\x37\x35\xfd\x67\x6a\x1c\xaa\x00\x65\xa9\x78\x28\x07\x0e\x71\x43\xfd\xcb\xda\xc7\x27\xad\xf4\x3e\x3e\x27\x02\xfa\x6e\x7d\x39\xc8\x2b\x55\x27\x3c\x34\xeb\xbb\x6d\x49\x77\xce\x3a\xf6\x34\xb8\x73\x0d\xc0\xda\x1e\x8e\xed\x3c\x8c\x8f\xdb\x80\xbf\xbc\xb4\x01\xd3\x04\xad\x3c\x3c\x81\x2a\x95\x5f\x7b\x95\x58\x8f\xa7\x42\x6b\xfa\x34\x33\xaa\x0c\xa2\xaf\xf7\x46\xd3\xb3\x69\x4a\x17\x6e\x57\x9a\x09\x48\xd9\x7d\x48\x82\x0b\xac\x18\x27\xba\xf4\x8a\x95\x25\xeb\x66\x7b\xda\x50\x1a\x17\xa6\xf2\xcb\x69\x3f\x0d\x13\xc6\xec\x9b\xeb\x17\xdb\x4b\xe7\x47\xe3\x2c\xd8\xb7\x36\xb5\xea\xdf\x48\x94\xfa\x76\x6d\xaa\x2f\x67\x3a\xbf\xe5\x78\xd0\xb0\x0f\xb3\x3c\xe4\x7c\x7e\x0b\xfd\xfa\x38\x34\xfb\xb2\x8f\x6c\xcf\x8e\x93\x64\xdf\x14\xdc\x5e\xa9\xe4\x7c\xed\x49\x36\xfe\x67\x24\xc1\xe6\x8b\x39\xea\xe8\xfc\xd9\x42\x84\x2d\x3f\xdf\xff\xbb\xad\xcd\xf2\xa7\xe5\x69\x50\x5f\xaa\x49\x7e\xa8\xf0\xf7\x29\x3d\x4c\xf1\x89\x7a\xed\xc3\x59\x00\xe7\x02\x53\x6a\xc6\xe0\x12\x8e\x7f\x5b\x22\x81\xe3\xa3\x58\xfa\x27\xc1\x44\x54\x3c\xe0\x0f\x93\x3f\xde\x6c\xb6\xde\x61\x77\xd7\xbe\xf9\x8c\x0b\x7a\x37\x1f\xbe\xbd\x5b\x67\xe1\xda\x10\xfa\x26\x63\x37\x06\xd3\x4e\x21\xe6\x07\x49\x36\xb1\x03\xe5\x3d\xc1\x1a\xb0\x5b\xe0\xd9\xbd\xf5\xfc\xc7\x67\x5f\xd4\x01\x56\x65\xe8\xbf\xa3\x87\xfe\x56\x8f\xe9\x5d\x82\xab\x5b\x9a\x12\xc2\xdf\x1e\xf4\xda\xb5\x83\x2d\x74\x04\x41\xaf\x22\x18\x5a\xc2\x6f\xa5\x88\xbd\xd1\xde\x64\x91\x8a\x83\x7c\x09\x16\xb2\xbe\x77\x30\x8d\xf5\x25\x02\xe6\xed\x94\x87\xe0\xba\xd4\x93\x3c\x66\xe8\x90\x2c\xdc\x76\x91\x47\x61\x90\xa8\x8b\xc8\x27\x00\x39\xd0\xef\xdd\x7c\x97\xf0\xf8\x7e\x43\xcd\x99\x74\x65\x64\x15\x74\xed\x7f\xb0\x92\x65\xc8\x0f\x56\xf2\x1e\x2b\x25\xaf\xfe\xee\xa4\x7e\x43\x13\x09\x54\x86\x12\x33\xb7\xdf\xca\xc9\x78\x5b\xeb\xd7\x6d\x0a\x7d\x4c\xef\xcf\x29\xff\x04\xf4\xc1\xf4\x6d\x0c\x6c\x3a\xe9\xd7\xcb\x6e\xf1\x98\x8b\xc6\xc7\x20\x29\x2f\x34\x1a\x05\x0d\x39\xa8\xc9\x59\xbe\x88\x9e\xe4\x26\x1a\x17\x2c\x08\xc7\xb0\x20\x9c\x28\x9a\xec\x8c\x63\xf6\xb5\xe1\x3d\x18\xef\xb9\xe1\x91\xd3\x9b\xef\xa4\x2a\xf1\x9d\x94\x65\x61\x40\xc3\xa2\x77\x9a\x74\x57\x5f\xf5\xbd\xca\x73\xb0\x6d\x9a\x6d\x79\x61\xac\x47\xa2\x70\x9d\xc9\x1d\xf6\xbd\x5e\x48\xc3\x71\xca\xaf\x98\xd8\x2f\x99\x05\xb9\x00\x85\xd7\x37\x1e\x94\xd8\xf3\xfd\xea\x0b\xae\xb5\xfc\x5b\x62\x75\x04\xb1\x36\xff\x8c\x5e\xc5\x46\xae\xce\xb9\x2d\x49\x07\x90\xe9\x60\x0b\xf5\xaa\x97\x8c\x80\x5e\x56\x73\xfb\x3d\xc0\x54\xb3\xc0\x2a\xca\x32\x59\x81\xaf\xac\xe4\x9a\x57\xd2\x06\x68\x3b\x76\xb1\x7b\xe1\x7d\x2f\xc6\x6f\x1b\x82\x69\xfc\x66\xa2\x10\xc2\x6c\xe1\x57\x68\xbb\xd4\xe0\x30\xef\x08\xbd\x68\xd3\x2d\xa5\x3d\x78\xc5\x73\x37\x30\xb4\xe7\xa4\x83\xee\xa4\xc8\xbf\xd8\x3a\x04\xed\xb8\xb6\x43\x28\x87\x2e\x0e\x59\xca\xaf\xac\x6c\xb0\x4e\x5a\x5e\xaf\xbe\x34\x57\x17\xa4\xe4\xc2\xd0\x1d\x14\x07\xa4\x5b\xe8\x10\x78\xfc\x25\x08\xd0\x49\xba\x8c\x70\x4e\x64\x1c\xd6\x17\x8a\x38\xcf\x9a\x9b\x73\x25\x75\xf5\x8b\x40\x53\xf0\xde\xf0\x2d\x61\xee\x1b\x59\x45\x2c\x33\xf3\xa8\x7c\x52\x9c\x79\x82\x12\xe7\xbf\xc3\x4c\xf7\x92\x1a\x0a\x5c\x43\xfc\x82\x2e\x7b\xe7\xb8\x06\x13\xe5\xce\xf0\xbd\xd3\x97\x6b\xc0\x66\x16\x63\xd9\x0d\x6f\x2e\x62\xf2\x8c\xbd\xb9\xd6\xaa\x3e\xe9\x7a\x49\x0e\xf3\x6a\x91\x66\x77\xd5\xaf\xca\xff\x05\x13\x19\xe4\xd1\x51\x5c\x64\x0e\xa3\xaf\x7a\x54\xf9\x29\x1b\x2c\xe4\x19\x33\x55\xdf\x4a\x38\xa9\xf8\xf5\x54\xba\x8e\x21\x07\x97\x26\x87\x55\x2c\xe7\xcc\xc4\x2e\x6b\xfe\x55\x75\x5d\x4e\xa3\xcf\xb4\x66\x74\x9a\x71\x95\x9e\xb9\x2d\x6d\x4a\xd3\xbb\x66\x48\xbe\xf9\x8f\x99\xb3\x4c\x39\xf0\xed\x16\x19\x33\x26\x89\x12\x80\x8c\xcc\x2a\x1a\xce\x8c\x02\xed\x4b\x22\xf2\x0e\xef\x19\xd2\x94\x6d\xe4\xcb\x86\xf9\xca\x59\xee\x8c\xc9\xe7\x14\xae\xae\x58\x72\x61\x2e\xa9\x90\x6c\x1c\xf7\x3d\x29\xb3\xe7\xf2\x14\x45\xef\x0f\x5f\x49\x29\xaa\x20\x2c\xeb\x61\xa4\xee\x33\x74\x61\xe0\xfd\x21\x90\xa8\xce\x15\x19\x62\x34\x67\xe4\x56\x83\x68\xa8\x5f\xfb\x66\x9f\x1f\x27\x7d\x7d\x90\x07\x4b\x6e\x33\xe2\x5b\x0b\x11\x7c\x1e\xc2\x92\x0f\x72\x0b\xe1\xb1\x98\x3e\x7c\x76\x86\x68\xf6\xf7\xb2\xd6\x94\xac\xe7\x4b\x8e\x9f\x27\x0c\xd9\xbd\xa4\x61\xe0\x4d\x53\x51\x2b\xc9\x22\x75\xd1\xf2\x08\x01\xfa\x92\x76\x23\x3d\xe2\x44\xa0\xc3\x83\x39\x27\x0c\x08\x50\x5e\x37\xa7\xcd\x0c\x1c\xf2\x3c\x14\xe9\xb7\x1b\x13\xa0\xfa\x14\x9b\xb8\xd5\xb9\xaf\x63\xa6\x23\x4d\x19\x83\xe9\x8c\xfb\xcd\x73\x5f\x6e\x8d\xce\x53\x1c\xe9\x16\x55\x99\x31\x47\x67\x36\xb3\xdc\x36\x59\x40\x6e\x72\x56\xbb\xfe\x6e\x65\x30\xda\x67\x4b\xfe\xdf\x85\x50\x34\x6f\xe3\xd5\x2d\x18\x73\x30\x6c\xfb\xfc\xaf\xa1\x43\x53\x60\x06\xb7\xa0\x49\xc7\x44\x14\x51\xec\x83\x26\x85\x9c\x53\x77\x7e\x94\xd9\xdd\x35\xda\x37\xf8\x3f\xa2\x19\xe4\x10\x51\x99\xef\x6e\x31\xee\x8d\xbd\xa8\xd0\xd6\xa2\x75\x63\x3d\x2c\x91\xd6\xa7\x86\xf3\x3c\x10\xda\xe4\x3e\x07\x19\x53\x7e\x15\x56\xe7\xdc\xec\x07\x1b\x0c\xab\xcc\x23\x7f\x44\x11\x3d\x7a\x35\x66\xd0\x86\xfd\x8c\x15\x1a\xee\x0f\x03\x9e\x47\x37\xb7\xf5\xb7\x30\x68\x96\xd6\xb5\x25\xcb\xf9\xcd\x67\x55\x6f\x25\xb8\x9d\xd5\xd5\x88\xe8\x39\x07\x57\x7c\xfe\xbb\x89\xe6\xbc\xc7\x56\x5b\xd5\x5b\x0b\x5f\x4a\xf2\xa7\x29\x2a\x74\x56\x76\x1e\xfc\xb5\xd8\xad\xd2\x3f\x05\x16\x01\x52\x5d\xf7\xda\x0d\xa9\x4f\xba\x7c\xf8\xc8\x43\xa3\x9b\x13\x69\xb2\xe4\xd8\x6f\xf4\x2a\xdc\x52\x74\x6e\xad\x9f\xf0\xa3\x78\xae\x9a\xdd\x32\x40\x93\xc4\xe8\xa1\x9d\x4b\x15\x77\x7d\xf1\x04\x90\x13\x44\xf1\x44\x00\x91\x56\x32\xa3\xdf\x7c\x01\xca\x6f\xb8\x78\x22\x2a\xc9\x52\x2f\xa4\x55\xfb\x07\x47\x6a\x67\xb1\xcb\x0a\xd2\xf4\x46\x65\xd4\x13\x49\x5f\xb1\xab\x3b\x58\x2a\x73\xc0\x96\xc8\x6d\xef\xb3\x51\x27\xdc\xe8\xfd\xda\xca\x40\x8b\x8a\xb3\x68\xd8\x08\x2a\x75\xb8\x44\xde\x9c\x7f\x30\x90\xc7\x96\x1e\xa4\x3c\xb7\x90\xc8\x3b\x95\xa2\xd4\x9d\x0f\xe7\x2a\xff\x49\x90\x57\xf8\x04\xe0\x70\x79\x0a\x68\x27\xeb\x52\xc8\x79\x32\xb7\x6e\xa1\x38\x33\xd1\xc0\xfd\x2a\xd9\x80\x38\xfc\x51\xae\xe3\x2d\xb2\xe6\x8a\x9f\xb9\x35\xb0\xe2\x1a\x7b\x53\xbb\x92\x44\xdd\xe8\xf2\x71\x4a\x75\xfc\xec\x9f\x13\x72\x25\x65\x59\x29\xc3\xb2\x33\x26\x8c\x4f\xf5\x32\x40\xb2\x16\xb3\x58\xc2\x07\x79\x7e\x26\xbf\xd4\x9c\x43\xb5\xff\xcc\x53\x79\x71\xeb\x41\x1e\x88\xd6\x2b\xf5\x22\x6e\x60\x4e\x1e\x2a\x3f\x03\x6b\xab\x34\x30\x54\x4b\xc2\xf5\xd5\x39\xcc\x6f\x51\xa3\x9c\x72\xdd\xbe\xb1\xba\xdd\xbb\x08\xcd\x30\xf0\xec\x6b\x7f\xce\x92\x52\x93\x03\x21\x9a\xb5\x2e\x6c\x28\x1f\x3f\x4b\xca\x6c\x7d\x9f\xeb\x8a\xad\xe3\xa6\x03\xfd\x93\x89\xab\x65\xa2\xed\xee\x0e\x9e\x20\x54\xff\xd8\xdf\x36\xea\x7b\xf6\xfd\xd7\x5b\x67\x5b\x23\x75\x97\xde\x96\xc1\xa6\xee\xe2\x14\xc0\x0f\xb1\xb9\x73\xbc\x29\x5c\x4c\xc1\x37\x4f\x79\xea\x67\x94\xa9\xa3\xb5\xc8\x0a\x8e\x9d\xc9\x87\x4d\x67\xba\x29\x7d\x75\xba\xcc\xe0\x4e\xb4\xeb\x35\x01\xbf\x99\xa5\xdc\x24\xe5\x92\x01\x7f\xcb\xfa\x11\x80\xf5\xbf\x38\xd3\x56\xbf\x8c\x5a\x3d\x05\x8d\x5a\xf5\x25\x44\x64\x5c\x82\x43\x09\x56\x60\x4c\x8b\x88\xbd\x96\x6e\x62\xe9\x61\x10\xe4\x1d\x4a\x50\xc1\x9a\x7f\x35\x1d\x29\x5f\x59\x46\x95\xd3\x2d\x27\xd8\xd4\xeb\x3f\xc4\xa6\xb5\x1d\xfd\x22\xef\x6b\xa6\x8e\x79\xbb\xcf\x1f\x5b\xa6\x81\xbb\x9b\xff\x23\x53\xda\x49\xb6\xf7\xcc\xfd\x0c\xe4\xa8\xfd\xdb\xfd\x70\x8e\xeb\x2d\x42\xf4\x3d\x71\xc4\xd8\x50\x8b\x0a\x30\xc8\xdd\x7e\xb3\x30\x4e\x8d\xf6\x40\x45\x83\x71\xd1\x26\x1f\xca\x8b\x24\xda\x2a\x13\x20\x04\x9d\x83\x22\x5d\xa5\x69\x61\x68\xe2\x51\xf7\xa7\x72\x25\xc0\xef\x2f\x5c\x27\x8b\xd3\x56\x99\x42\xd8\xaa\x3d\x1a\x91\x89\xab\x06\x6a\x08\x51\x8b\x46\x6d\x0f\xda\x2f\xa1\xbc\x38\x09\x0c\x8f\x61\x44\x58\xe5\xc5\x19\x43\x64\x88\x30\x92\x1c\x8d\xae\x41\xe7\xeb\xb7\x42\xea\xaf\xdb\x5d\xca\x10\x33\xcb\xf9\xb3\x64\x0e\xcf\x91\xb1\x42\x3a\xb5\xab\x1e\x64\xc1\x4d\xc7\xb5\xb7\x8b\x36\x8a\x80\xea\x2d\x23\xd1\x36\xb4\x94\xdb\xec\x6e\x3d\x2d\xbf\x82\xec\xbc\x9d\x61\x0e\x9a\xb7\x7f\xfc\x2f\x6c\x49\x03\xd2\x98\x05\x02\x79\x9a\x89\x2f\x86\x34\xcb\xd0\x60\x55\x4f\x1a\xb5\xcf\x3e\xfb\xaf\xc8\xb3\x0c\x36\x95\xb9\x63\x60\xa7\xea\x94\xa1\x22\x37\xcc\x3c\x49\x4e\x88\xfa\x50\x9a\x57\xb5\x05\xcd\x92\x59\xbf\x7e\x92\xad\xe6\x67\xb8\xc5\x7f\xbe\x67\x26\x9f\x36\x31\x49\x9b\x44\x00\xc1\xf4\xfc\x23\x38\xa4\x8a\x82\xf8\x3c\x77\xdf\x42\xab\xc2\xe1\x1d\x68\x4a\x22\x50\x6f\x24\x54\xc3\x68\x8b\x5c\xec\x22\x5b\xce\xe0\xb2\xc3\xb1\x50\x1c\xeb\x99\x7b\xdb\x57\xc1\xd3\x3f\x92\xba\xec\xcd\xae\x76\xa7\x41\x67\x16\x1b\xcb\xd0\x5f\x62\x99\x45\xaf\xfe\xf8\x17\x74\x58\x55\x24\xd6\x33\xdc\x12\xc8\xa8\xfe\x67\xb6\xe6\xa0\xb4\xf7\x71\x93\x3e\x7c\x6b\xa6\xfc\x68\x9c\xfd\x3d\x14\x08\x86\x9d\x27\x23\xd8\xb0\xf1\xd4\x44\x81\xc1\x3b\x87\xa3\x27\xa4\x75\x7d\x25\x8c\xa9\xc8\x3f\x36\x94\x84\xae\x4b\xe9\x3e\x07\x3e\x18\x99\x76\x92\x31\xab\x21\xe4\x61\x9f\xcd\x4c\x5f\xf4\x43\x93\x0d\x47\x6d\x32\x6d\x3d\xea\xc1\x77\xfc\xa8\x55\x12\x81\x68\xbf\x27\xd2\x59\x0d\x8e\x1f\xae\x5b\x18\xd9\x3a\x2a\x8f\xb9\xde\xaf\x42\x8d\x9e\x43\x21\x21\x7b\x62\x3e\xe2\x3e\x56\xe5\x1a\x1e\x45\xcc\xdf\xf2\x9b\xc2\x3d\x90\xc8\x2b\x11\x06\x78\x7b\x86\x4f\x26\x08\xd5\x62\x62\xff\xd3\x85\x77\x57\x70\x92\x8e\xe7\x42\xf1\xbe\x63\x4e\x5c\x56\xa0\xfb\x78\x50\xcd\xe8\xd0\xdb\x71\x78\x83\xa2\x5f\x19\x8d\x8b\xd3\xb0\xc7\x1f\x0b\xbb\x31\x79\xef\xd4\x52\x12\xf0\xee\xe9\x11\x71\x68\xa7\x58\xb7\x30\xdd\x0c\xa3\x20\x43\x0c\xd7\x3a\xdf\x9a\xa5\x4c\x3d\xa5\x1a\x62\xfd\xc9\x59\xdc\xee\xe4\x8f\x28\x7d\xb7\x30\xcb\x04\x31\xc1\xff\x95\xac\x7b\xd3\x86\xff\x48\x9e\xe6\x29\x52\x84\x28\xa1\xe2\x42\xa2\x75\x1b\x15\xda\x39\x92\xbf\xad\x42\x93\x88\xd5\x87\x8e\xc5\xf9\x08\x74\x59\x6d\x88\x55\x02\x23\xdf\x56\x47\xde\x27\x71\xb9\xf7\x51\xc1\xf3\x43\x73\x8f\xe0\x5d\x6b\x56\x06\x6a\x3f\x62\x7b\x62\x9c\xba\x2f\xb1\x81\xd8\xdf\x3e\x6e\xe7\xc1\xc7\x85\x93\x8d\x70\xff\xe4\x03\xac\xfa\x14\x20\x23\x11\xb5\x80\x55\x16\x41\xbc\xbe\xcb\xf6\x33\xfe\xd4\xcc\xb5\xf5\x75\x6f\xd2\xb1\x35\xc7\x5b\x4f\x37\x9f\x24\x6e\xf5\xeb\xc5\x93\x83\x12\xce\x70\x51\xa8\xb4\x36\xf2\x1f\xac\x4f\xe1\xc4\xed\x43\xea\x79\xd5\x86\x90\xce\xdb\x5e\x41\xd9\xf2\x4e\x64\x0d\x45\x3c\xeb\xa5\x3f\x74\x64\x58\xe7\xeb\x62\xd7\xfc\x67\x8f\x55\x2e\xa5\x4b\x30\xb9\x6f\x0f\x02\xd5\xd9\xab\xc1\x3a\xab\xd5\xae\x70\x0f\x21\xc3\x5c\xe9\xa1\x61\x0a\xea\x47\x0d\x62\x6f\xb5\xa7\x7f\xd2\xc3\x74\xaa\xa1\x29\x6d\xbd\x77\xbf\x49\x48\xcb\x11\x39\x1d\x81\xa5\x0a\xeb\x11\x6f\x41\xea\xd0\x3e\x22\x84\xa3\x2a\x27\x77\xfa\x23\x01\xb5\xfd\x23\xc5\x2e\x03\x5a\xa1\x11\xc3\x7e\x93\x9b\x7d\x4b\x00\x6c\xc7\x67\xed\xaf\x29\xbc\xdf\xb5\x46\x03\x21\x43\xd2\x33\xdb\x91\x8b\xd1\x1c\x88\xbd\xbe\x73\x0c\xba\x87\x40\xda\x6e\x37\x2b\x46\x65\x7b\x1d\xa9\x1f\x05\x13\x53\x68\xa0\x12\xa2\xb5\x91\x01\x6a\xf1\x2c\x0c\xbf\xe4\x2c\x41\xec\x3d\xdf\xc3\xad\x3e\x37\x9c\x4a\xad\xdd\x65\x67\x03\x0b\xcc\x46\x07\x67\x5b\x61\x0d\x7b\x15\xaf\x56\x04\xbc\x78\x56\x45\x75\x61\x56\x8a\x02\x22\x0a\xe4\x5d\x5f\x67\x50\xb7\x90\xe4\x29\x60\x1d\xee\x4c\xeb\x46\x78\x50\x25\x5a\x2b\xde\x68\x14\x48\x94\x36\x38\xa9\xa4\x37\xe3\xed\xaf\x87\x15\xe4\x36\xde\xfb\x88\x2e\xac\x43\xfb\x88\xc8\x35\x0e\x67\xc1\x4d\x71\x9f\xad\x7e\x44\x7d\x56\xb8\xd2\x3d\x4e\x49\x44\xad\x9a\x55\x33\x30\x52\xed\x7a\x83\xae\x18\x77\x67\xe2\xd3\x06\x4b\x0a\x57\x9b\x02\x22\x3f\x7b\xd7\x71\xf6\x91\xb4\x52\xe7\x5b\x3b\xdf\xb5\xae\x5f\x0a\xcb\xda\xd0\x91\xe9\x20\xe0\x4f\xf3\xe2\x99\x42\x63\x45\x09\xe2\x69\xb9\x54\x3d\x61\x54\x28\x5d\xaa\xad\x78\x6d\xc3\x4b\xb7\x2e\x0b\xe9\xc1\x88\x9a\xca\x05\x60\xb3\xee\xcc\x50\xc4\x9d\xde\x5d\xe5\x5d\x98\x94\xe9\xbd\x33\xad\xf7\xae\x40\x01\x0c\x72\x95\xd8\xe0\x3d\xc0\xbe\x21\x0a\xcb\xd8\x16\x46\x7d\x59\x3b\xae\xeb\x9b\xf4\x62\xb7\x71\xfa\xf9\xa7\x32\x0a\xb1\x95\xa5\xdd\xbc\xfe\x39\xad\xbc\xbc\x18\xfe\xb9\x81\xaa\x77\xec\xb6\xec\xf1\x29\xab\x40\x82\x24\x9e\x4f\xd0\xa1\xfd\x27\x20\xc5\xd5\xc6\x10\x3f\x2e\x36\x0b\x8d\x31\x36\x88\x67\x68\x95\x9b\x51\xd2\x92\x16\x86\x85\x2c\x27\xeb\x33\x53\x32\x7c\x0b\x69\x4c\x37\x63\x25\x79\x39\xaf\x61\xcd\xee\xba\x2e\x64\xcb\x6b\x54\x8a\x72\x67\xbe\xf5\x96\x17\xca\x57\x6f\xb9\xd0\x05\x6a\xcb\xfe\x33\xc4\x17\x17\xf9\x19\xa1\x7d\xf1\x75\xc7\xb7\xb2\xac\xb6\x11\x31\x07\xd1\x79\x47\x06\x41\xb7\x71\xa7\x74\x0d\x40\x12\x5d\x7a\xf3\x9e\x85\xc3\x25\x27\x72\xaf\x67\xe4\xb2\xf9\xe9\xc6\xe7\x5d\xa4\xbb\x61\x38\x46\xdd\xb0\x6a\x6d\x99\xea\x19\xc0\xb3\x08\xc1\x80\x59\x71\x37\xb7\x73\x0d\xc3\x4b\x10\xbb\xdb\x95\xc2\xab\xf5\xe6\xa7\x47\x62\x77\x13\x6c\x41\xf6\x6e\x9b\xfb\x77\x92\xe1\xad\x0d\x57\xe9\x31\xb8\x7b\x2b\x72\x3b\xb4\x71\x6a\xfd\x04\xe0\x8d\x38\x4a\x61\xef\x9b\xbe\x5f\x59\xb2\x7e\x45\xb2\xfe\xca\xc6\x6c\xb3\xc1\x95\x5c\x84\x35\x7d\xbd\x91\x9e\x0a\xe6\x36\x3d\x74\x9d\x90\xdd\xb9\xd9\x20\x66\x10\xbb\xe9\x92\x60\xf0\xb0\x8b\x76\xc1\x5c\x6e\x15\x0a\xbb\x5e\x83\xe9\x19\x3c\xed\x2f\xe7\xb2\x0d\xc9\x06\x0b\xdc\x6d\xdd\x96\xd4\xce\xaf\x9a\x44\xe4\x04\xc5\x86\x9b\x8e\xdd\x38\xb7\x00\xef\x57\x65\x05\xbb\xf3\xab\x14\x10\x92\xd2\xaf\x00\xc2\x23\x76\xac\x9b\x40\x89\xf7\x13\x82\xcc\xc1\x2d\x6f\xb2\x0d\xda\x90\xb7\xa4\x7d\xf6\x6f\xa7\x76\x5f\x62\xbb\x63\xe6\x47\xfc\x70\x7c\x36\x64\x67\xef\xa9\xca\xe2\x76\xdd\x9e\xc1\x19\x77\x49\xbf\x2c\x58\x62\x9d\xc4\x03\xd2\xda\x6f\xf8\xd2\x5f\xe0\x30\xb2\x74\x76\xf8\x8f\x90\x5f\xc0\x44\x0f\x8a\x7d\x61\x52\x2e\x18\xe1\xbc\xda\x67\xbd\x3f\xfc\x1a\x23\x0d\x6e\x4b\x39\x18\xee\xd3\x47\x8f\x71\xba\xb3\x2e\x3b\x58\xe9\x93\x06\x59\x08\x27\xc6\x69\xc5\xcf\x73\xd3\x59\xd5\x98\x49\x26\xce\x69\x7c\x62\x14\xf9\x1f\x28\xe5\xcc\x6e\x01\x29\x6c\x25\x48\xec\x3c\xad\xb7\xba\x81\xeb\xad\xb7\x7e\xbd\x17\xef\xf9\xaf\xf7\x94\xbb\x41\x6c\xf1\x36\x7a\xd5\x34\xc3\x5a\xdf\x93\xdc\x5a\xd5\x5b\x70\x0a\x7a\xf7\x60\xad\x67\x10\xd0\x79\x97\xc8\x44\x17\xa1\xbb\xb6\xd8\x57\x23\xfd\xb5\x1e\x1a\xc5\x81\xa7\x1e\xc7\xab\x9b\x80\x8d\x58\x35\xc9\x0e\x8c\x6c\x3b\x8f\x48\xd9\xc2\x48\x5e\x77\x0d\x16\xfb\x53\x6f\xd5\x5a\x21\x39\xb9\xca\x9e\x08\x3d\xf0\x1f\x55\xbe\xbc\x2a\x19\xe6\x65\xd7\x04\x1b\xbe\xd0\x5f\x10\x0c\x29\x93\x2b\xc7\x0c\x74\x83\x55\xd6\x19\xc8\x1a\x4f\xf9\xc5\x8a\xf1\x8d\xd9\x3b\x85\x85\x1d\x77\xd5\x0a\xab\xc1\x4f\x11\xcd\xc9\xa1\xcf\x3e\xd1\x24\x12\x7c\xe6\x48\x6f\xcd\x07\xd3\xf3\x60\x15\xab\x61\xe5\x2a\xaa\x14\xfc\x9e\xb8\xa3\xc2\x5f\x2b\xd3\xe3\x89\x30\x52\xe3\x56\x33\x26\x98\x04\xf9\xa4\x72\xbe\xe4\x3f\x1b\x64\xba\xd5\xc6\xd2\xe1\x40\x3b\xbd\x99\x91\xb1\xe6\x7f\x1c\x67\xf3\xb8\xc6\x85\xe8\xc2\x82\x9c\x9f\x93\x52\x4a\xd7\xe4\x32\x4d\x8e\x3e\x24\xad\x7b\x64\x67\x4d\xff\x13\x5f\x3d\x74\x45\x41\x38\x6a\xca\xf6\x59\xd3\xab\x72\x6f\xc4\x32\xef\x40\x13\xff\x77\xd5\x9b\x71\x12\x83\xe1\x64\x7b\xcd\x3a\x9c\xf2\xcb\x41\xaa\x8f\x0e\x33\x32\x87\xd8\xc1\x72\xcf\x5a\xa6\x86\xae\x29\xdc\x84\xad\xe7\x5f\xf8\x4f\x1b\x7d\x6d\xfc\xdf\xfe\x2d\x6b\x0d\xae\xbc\x75\x66\x68\xb0\x8b\xd9\xbf\xa7\xfc\x72\xf7\x89\x8a\x3e\xce\x28\xf5\x26\x62\x4d\x5b\xb7\xa6\x5d\x75\xba\x95\x67\x58\xe2\x73\xb0\x4f\xfd\xaa\xdc\xc4\xe3\x8c\x6b\x9a\xc5\xfc\x77\xfa\x1a\x51\x6e\xd1\x3e\x82\xa7\xcf\x7f\x3f\xec\x16\xeb\x37\x87\xc8\xc0\xd0\xc4\x29\x86\xd8\x66\xa7\x1f\xdf\xba\x25\x60\xe7\x2b\xf8\xb3\xdc\xeb\x3f\xbe\xb3\xf7\x9f\xf7\xcb\x97\x3b\xcf\x3b\xb9\xe0\xf7\x60\xaf\x08\x57\xb6\xef\x4f\xed\x18\x14\x7e\xa7\xaa\x37\x56\x77\x8b\x0d\x6d\xcb\x52\x05\x69\xde\x8a\x99\x30\x9e\xa3\x85\x27\xe4\x02\xf7\x3e\xa7\x86\xb4\x58\x62\x79\x86\x51\x6e\xb0\xb6\x1b\x19\x1a\xc2\x22\xa2\x37\x66\x0d\x59\x19\xdc\xe9\x96\x90\x5a\x4e\xab\xc1\xa5\xb2\x87\xb9\x54\x65\xa0\x2d\x55\x8a\x83\x4b\x3d\x10\x4e\x72\xb4\x33\xbd\x60\x89\xdc\x17\x03\x64\xe3\xcb\x60\x70\xa9\x73\x90\xff\x31\x18\xe5\x71\xf2\x5e\x9e\x3c\xb4\xa8\xfb\x48\xd3\x0d\x96\xff\x40\x92\x38\xc4\xa0\x74\x55\xbb\x98\xbe\x06\xb2\x42\xb7\xe4\xf4\x93\xc8\x5f\x69\xfb\xe0\x3f\x7d\x75\x5d\x3d\xd1\xf5\xd5\x42\xb9\xcf\x2e\x5b\xd5\xa5\x9c\x29\x88\xf1\x45\xd4\xfb\xc6\x7e\xdb\xa2\x18\x7a\x6f\x34\xe0\xa7\x9b\xc3\x73\x37\x22\x22\x0b\x3b\x0a\x20\xf3\x6b\x76\x7a\x91\x2c\xc4\x52\x86\x3a\x69\x78\x87\xb1\xe5\x0b\x55\xa2\xdf\xa3\xfc\x61\x26\xf9\x12\x4e\xc0\xe2\x5e\x2e\xb2\x27\x5f\x5c\xb6\xff\x95\xfc\x68\x48\x7a\xf0\x41\x3c\x10\x0f\x9c\xb7\xe3\x52\xa9\x15\x99\x44\x74\xeb\x46\x49\xa1\x6e\xc1\x00\x3d\xb7\xa0\xc5\x7f\x45\x80\xff\xfe\x98\xf4\xef\x04\x43\x61\xae\x0e\xef\xe5\x74\xde\xfc\xe7\xa9\x24\x0e\xa0\x1c\x2c\xfc\xdf\xe1\xfa\x40\xca\xa0\x6c\xb1\x16\x37\xe8\x74\xb0\x6b\xe2\x77\x61\xe6\x01\xf8\xf7\xdc\x6d\x8d\xb0\xce\xa2\x6f\x14\x4c\xfc\x9d\xff\x9b\x29\xc7\x90\x66\x4d\xc9\x2d\x31\xa9\x0b\x07\x5e\x8a\xf3\xfd\xdd\x08\x00\x9c\x8e\x48\x3a\xf9\xbb\x33\xd5\x24\xff\x6e\xf9\x8c\xff\x31\xfa\xf6\x57\x31\x77\xe3\x51\x9e\xbf\xba\x2b\xad\xd2\x10\x69\xf2\x55\x9c\xfa\x3a\x04\x33\xff\x49\xd3\x42\xf8\xf3\xfe\xa5\xd1\x2a\x29\xff\x7f\x7a\xff\xf2\x9c\xfe\x78\x03\x1c\xed\x32\x3d\xff\x4b\x92\x45\xfa\x43\xb8\xa2\x93\xf0\x77\x15\xb1\x3c\x2e\xbf\x83\xc4\xbc\xde\x5f\x1a\x06\x06\x1e\x0b\x63\x3c\xe5\xa5\x10\x42\xe4\xcf\xd8\xa0\x44\x12\x98\x05\xd6\xdd\x6a\x92\x0d\xcb\xc6\xd7\xde\xb7\xfa\xa4\xd5\x2f\xdf\xb7\xb2\x64\x99\xa3\xfc\x63\x4d\x39\x7f\x82\x5c\xff\x3f\xb5\x9b\x90\xb1\xa5\x35\xeb\x2d\x42\x74\x58\x17\xfa\x0c\x14\xd7\x74\xb2\xd4\x7c\x6f\xdd\x4b\x61\x16\x3b\x19\x2a\x6e\x85\xa4\x38\xef\xae\x8b\xf4\xff\x64\x2b\x30\xdf\x53\xc8\x04\x0c\xbc\x03\x30\xd0\xf3\x68\xce\x8c\x48\x8a\xc2\xdd\x33\xc6\x99\x24\xcf\x9f\x1c\xdb\x22\x6b\x56\xde\xbf\x9d\x99\x6d\x97\x4c\xf2\x7a\xdd\xd5\x92\xfd\x04\x6a\xc1\xe6\xd7\xa1\xd6\xa0\xf7\x5b\x9f\x44\xde\x35\xa4\xf4\xf7\x09\x57\x5f\xfc\xb1\xfa\x3b\x39\xbd\xce\xfd\x54\xf6\x52\x28\xf1\x63\xae\x53\x9c\x6c\x92\xaf\xaf\x0e\x5a\x1a\x7f\x48\xd9\xbf\xb1\x5f\xd9\x51\xab\xf0\x85\x98\x7f\x16\xbf\xd2\x75\xb0\x8e\xc6\x97\x85\x94\x3d\x66\xeb\x72\x84\xe5\xef\x33\x51\x2b\xcc\x9a\x0a\x72\x52\xe9\x02\x0c\x90\x43\xbb\x79\xce\xdd\xda\x57\x16\x97\x48\x11\x98\x97\x70\xc2\x06\xc3\x3f\x33\xda\xa7\x39\x20\xc1\x27\xf9\xe8\xd9\x2e\xef\x79\x8b\xe1\x2f\xc6\xc7\x1c\x9a\x3b\x60\xec\x77\xd1\x63\x2c\x14\xf1\xf8\xa7\x90\x1c\x90\x69\x37\x0c\x82\xe3\x7c\xe5\x11\x3a\x02\x88\xa8\x72\xb3\xa7\x78\xf0\xd6\x81\xd3\xc3\xcf\x43\x6c\xd5\xd9\xb8\x06\xcf\xb5\x90\x4a\xff\x0d\xf0\xe1\x84\xbe\x01\x0e\x0b\xe7\xf4\x66\xa5\x38\xa3\xd3\xb1\x25\x91\xe8\x5f\xfc\x85\x9e\xe2\xea\xe0\xa4\xfb\x38\x38\xff\x7b\x18\x3d\x83\xe1\xb9\x12\x5c\x7b\x37\x13\x8e\x55\xf4\xf7\x33\x70\x94\x10\x00\x58\x9a\xe6\x50\xe6\xf0\x8d\x81\x16\x80\x56\xa0\x7f\xc5\xe9\x17\xab\x93\xbe\x59\x9e\xc1\x3b\x75\x2f\x52\x37\x66\x9c\x9d\x52\x41\xb0\xb9\x11\xf3\x56\xa8\xd3\x90\xd8\x0a\x23\x0b\x9d\x49\x87\x73\x8c\x4e\x30\x11\xb7\xc6\x55\x14\x2a\x0e\x3c\x9f\xec\x4c\xc2\x47\x98\xdb\xe8\x42\xa6\x22\x86\xb2\xc1\xc4\x6d\xc7\x25\x3e\xe6\x34\xe4\xec\xd6\xb4\xf7\x87\x81\xee\xa7\x0f\x0c\x9c\x95\x7c\xef\x9a\xac\x7b\x92\xda\x10\x56\xc2\x61\x11\x0e\x38\xa8\x81\x32\x2c\xdf\xd9\x1b\xf6\x84\x8d\xda\x03\x4a\x2f\x7c\xde\xb4\x74\xbd\x47\xd1\xaa\x6f\x5c\x94\x23\xeb\xb3\x0c\x3f\xf6\x3e\xb2\x48\x9d\xed\xee\x12\xc4\x91\x64\x6b\x4b\x9b\xbf\x8c\xae\x3c\x10\x8a\xf2\xcf\x7f\xf5\x87\x3d\x68\xa8\x2f\xca\x30\xf7\xd2\x4c\x14\x80\x5c\x4f\x0d\x3e\x1e\x85\x56\xe4\x2e\x59\xa0\x5e\xfe\x33\x26\x82\x20\x56\xa0\x89\x2c\xef\xd3\xe9\x26\xbb\xea\x80\x22\x20\x54\x20\x88\xa4\x64\x5b\x12\xf7\x5f\xa6\x6a\x36\xf4\x92\xd7\x6e\xbd\x99\xe4\xf3\x8c\x38\xd8\x93\xb9\xec\x88\xe3\xc5\x71\x6b\x8b\x59\x16\xc8\x0d\x44\x34\x08\x98\x32\x2e\x40\x6b\x5f\x07\x51\x8b\x90\x26\x98\x03\xe2\xd5\x92\x0a\xc1\xfe\x13\x3a\x58\xa7\x9f\xc0\x23\x96\x68\xd9\xf8\xc4\x14\x5f\x08\x0c\xb4\x55\xda\x0d\xd4\xf9\xe6\xd1\xb2\x3a\x57\xcf\x3a\x48\x99\x60\xc8\x6a\x1f\xec\x1b\xf5\xa6\xfc\xb9\x4a\x68\xcc\x4d\x91\x79\xaf\x3c\xbf\x9f\x1b\x75\x1f\xa2\x67\xb9\x4f\x89\x03\xb4\xae\x30\x70\xb0\xc7\xf1\x8c\x81\x19\xb4\x06\x74\xbf\x8a\xdd\xc9\x4a\x4a\x3e\x6a\x70\x09\x16\x64\x4d\xc0\x3c\x73\x37\xab\x7a\xa2\x72\x12\xf3\x1f\x63\x49\x5d\x4d\x3e\x74\x12\xb6\xce\x56\xe8\x66\x64\xaa\xb7\x3c\xff\xa1\x28\x43\x32\x40\x0a\x01\xc3\x20\x6f\xe3\x8f\x76\x4f\xca\xaa\x7c\x26\x37\x9b\xfd\x8f\x32\x00\x65\xe7\x84\xd4\x13\xb3\x14\xf1\xc2\xe2\x57\x17\xca\x4f\x43\x18\xa0\xb2\x94\x6d\xf1\x2e\xff\x6c\xf1\xbf\x9f\x69\x3d\xf4\x9f\x75\x95\x51\xf2\x5a\xbe\x3a\xf9\xdf\xc4\xd2\x2d\x19\x9e\xf1\x8a\x27\x7b\x5b\x9f\xe1\x6d\x2c\x39\x80\xc6\xbc\x65\x98\x1d\x2b\x6f\xf2\xd1\x6c\x4c\xcc\x80\xfd\xa3\x75\x0b\x22\x97\x07\x38\x05\xda\xc4\x9b\xe4\x8c\x51\x89\x51\x3c\x42\xbe\xd9\x69\xff\x21\x8e\x0a\x5b\x63\x16\x2d\x77\x80\xfb\x82\x0c\x51\x66\x87\x3c\x38\xb9\xf3\x08\x0d\x95\x87\x3c\x0a\xad\x64\xde\xc5\xa3\x87\x49\x9d\x99\xbf\x77\x0a\x95\x2d\xe6\x87\x12\x96\x1e\x65\x75\x2e\x98\x95\xe4\x90\x3c\x4a\xee\xcc\xfe\xde\x82\x80\xaf\x9f\x3f\x8d\x3c\x7e\xeb\x67\x48\x6c\x06\x9e\xc3\x39\x2c\x87\xf5\x5f\xf9\xdb\x3e\xa4\xbd\xff\xc8\x4a\xb3\x7d\x64\xa7\xbe\x96\x80\xbc\x12\xcc\x4b\x4b\x3c\x21\xf8\x21\x8f\x74\x9f\xbb\x9a\xf8\x87\xb7\x14\xbe\x3b\x69\x6c\x8f\x20\x71\x3c\x3c\x76\xb5\xca\xd4\xd8\xee\xa0\xd2\xab\x0c\xb3\xf7\x84\x89\x53\x49\x00\x40\xbf\xd0\xdf\x9a\x47\x82\x67\x2b\x37\x1b\xfc\x3d\xc9\xff\x53\x48\x3a\xbf\xc5\x5b\x27\x51\x10\x0d\x77\x66\x0d\x65\x3d\x78\xe6\xaf\x79\x2c\xb7\x8f\x18\xdc\x04\x59\x29\x53\xf9\x47\xfe\x4f\x3f\x8f\x60\xe5\x20\xe6\xf3\x60\xdd\x01\x11\x29\xe8\x88\xd3\x74\xc0\x6e\xe3\x97\xcc\xfa\x83\xb9\x3d\x56\xb2\xc2\xce\xfb\xf5\x2e\x9c\xa8\xc9\x91\x45\x49\xb3\x63\x07\xe8\x06\x28\x32\x9c\x43\xd2\x1d\x09\x36\xe2\xeb\xcb\x43\x59\x4a\xe7\xa8\x23\x82\xa4\xdf\xed\xa7\x34\xd1\x9f\x9f\x32\x73\x44\x48\x38\x54\x08\x28\x8c\x92\x7f\xd3\x41\x18\x33\xaf\x3f\x31\x02\x4e\x75\x66\xe8\x20\x00\x58\xdd\x4f\x3b\xdd\x0f\x87\xd6\x13\xf9\x96\x93\x24\xd4\xa7\x5b\x92\xce\x53\xa4\x45\x4d\x90\xa4\xe5\x26\xbf\x6e\xdd\x74\x53\x3f\x60\x52\xc2\xe2\xc4\x91\xea\x04\xd7\x96\xdd\xc1\x43\x2d\xe5\x74\xf7\x33\xde\x63\xcb\x87\xfe\x3d\xd0\x7b\xda\x01\xcb\xa2\xd2\x3b\x1c\x93\xa7\x67\x38\x28\xd4\xcd\x9c\x6c\xac\xad\x9e\xfd\x54\xff\x77\x25\x4a\x83\xc3\x63\x99\x47\xac\xef\xdd\x9b\x24\xb0\xfa\x33\x09\xfc\x72\xb4\x9d\x22\x48\x36\x49\x22\x76\xfa\x51\xfc\xcf\x1c\x5c\xff\x21\x6e\x00\x1a\x06\x96\x14\x23\x9b\x6a\xb7\xca\x00\x7c\x32\xa8\x32\xd5\x59\xfa\x02\x21\xef\x04\xa4\x4e\xd8\x54\x07\x9d\x79\x08\x4f\xe3\x42\xf7\x83\xa9\x58\xed\xbe\x8a\xf0\xff\x8f\xf3\x70\x89\x29\xbf\xa9\x28\xb3\x65\x0a\x37\x24\xfb\x70\x93\xf7\xf6\xc0\xca\xff\xa8\xe4\x23\xca\x2f\xcd\xf2\x4e\xfe\xf6\x3b\xb8\x38\x58\x9a\xf2\x09\x9f\x2b\xb6\xad\x13\xec\x1b\xb9\xeb\xc9\x18\xa9\x2b\xe1\xf3\x5f\x47\x9e\x44\xc6\x0f\x52\xc4\xe4\xa4\x2c\xde\xa2\xee\x26\x0d\xc0\x3d\xb7\xd2\xfe\x8a\x8c\x94\x31\x17\x32\x87\x7a\x00\x22\xe9\xd4\x79\x77\x9c\xc5\xd5\x7f\x2a\x5a\x69\xf0\x11\xfa\xbd\xc8\xa9\xbe\x5d\x70\xf8\xd4\xd2\x16\xd4\xff\x34\xf2\x1a\xca\xa5\x06\xca\xe0\x29\xff\xec\x0c\x79\x22\xb2\xe5\x01\xd3\xe3\x3f\xd2\xf9\xa5\xb3\x90\x51\x27\x1e\x87\x8e\x39\x62\xce\x58\x3f\x68\x68\x37\x5f\x13\xb7\x73\x2e\x44\x3a\x63\xd8\x21\x19\xf2\xc6\x71\x82\x24\xac\x0f\x97\x27\x68\xdd\x90\x93\x2f\x1f\x98\xc9\x4d\x10\x9d\x64\x6c\x70\xf8\xb8\xe5\x8a\x2f\x40\xd2\x87\x9b\x1e\x8d\xf3\x60\x53\xda\xc5\xff\x4f\x33\x8f\xbb\x7a\x92\x16\x21\x58\x7e\x92\x4f\x48\x52\x30\xb0\x6e\x26\xc5\x1b\x74\x0b\xd2\x78\xd3\x18\x63\xb2\xda\x83\x7d\xa2\xf1\xfb\x71\x1a\xfe\x7d\xf5\x26\x79\x8c\x61\xe4\x78\x23\xc7\x20\xb8\xc7\xf7\x1e\x12\x67\xa3\x0d\x25\xf9\x7f\x0c\xd3\x04\x30\x9d\xcf\x7f\xca\x10\x74\xbc\xa7\xe0\xf2\x73\x28\x3f\xde\xe3\x2d\x1b\xe7\x5b\x86\xcf\x71\xa0\x6e\x3e\xe1\xb6\xd1\xe4\x54\x3b\xff\x56\x84\xfb\x96\xd5\x21\x01\x0a\xd7\x68\x79\xed\x36\x69\x0b\x8f\x4d\xd4\x46\x03\x72\x9b\xb6\xa1\xb9\x9f\x1f\x0e\xd6\xab\xe8\xe0\x91\x0b\x30\x56\x75\x33\x20\x20\xe0\x93\x7e\x5a\xf8\xe9\x4a\xc1\x49\xfa\x9a\x83\xe9\x7f\x83\xb4\xe0\x8c\x79\xab\x07\xc8\xe0\xae\x4c\x4e\xb7\x72\xbf\xe5\x06\xdd\x96\xa6\x7d\x99\x40\x39\x22\xbd\x9e\x34\xf5\xce\xdc\xaf\xfb\x29\x23\xe2\xba\x33\x59\x1e\x20\xfe\xb7\x07\xa5\x0a\x6e\xd3\x3f\x63\x6e\xcf\xc5\xe7\xea\x6d\xa5\xba\x80\xb5\x0c\x8b\x74\x06\xc8\x3d\x0f\xf9\x6b\xd0\x01\xc3\x14\x7b\x55\x56\x31\xa6\x55\xdd\x58\xc4\x71\x89\xdf\x1e\xf4\x77\x64\xfa\xf2\x5e\x0d\x72\x9d\xe6\x04\xdb\xb8\xba\xd7\xb6\x23\xbc\x20\xfb\xc1\xd9\x44\x57\x1d\x08\xf7\xe9\x7d\x72\x5b\x10\x2e\xf4\x1c\x5a\xe8\x29\xeb\x18\x39\x74\xb1\x5d\x97\x40\x2d\x3b\xc4\x08\xb8\x61\xe9\xec\xfa\x02\x0b\x01\xa2\xca\x86\xc0\xbe\xce\xed\xf8\x09\x0b\x54\x4f\xac\xf7\xa3\xce\x50\xd5\xf7\xa9\x38\x64\x70\x92\x49\x3f\x7b\x6e\x3b\x1a\x58\x4a\x02\x58\xa5\x13\x8f\x66\xce\xbf\x8c\x71\x0a\x0b\xd0\xe7\xdc\x07\xdb\x94\x28\xc8\xeb\xcf\xc6\x1a\x3a\xd4\xd4\xc9\xf0\x81\x66\x12\xaa\x2a\x9f\xd2\x87\xc0\xa0\x31\x8e\x49\x3e\x22\x7f\xe7\x0c\xe3\x19\xdf\xc2\xfa\x4d\xb7\x7f\xa3\xd6\xcd\xa0\x42\x80\x35\xfa\xe1\x34\x6d\x9f\xac\x7a\x2d\x23\x34\x6e\xa5\x26\x70\x53\x85\x22\x75\x09\x83\xee\x56\x0d\xe1\xe6\x1f\x8a\x7f\xb7\x16\xff\x6f\x61\x94\x9e\xf4\x62\x25\xcc\x84\x32\x90\x0b\xf1\x81\x4f\xe8\x15\xac\x59\xff\x9e\x58\x7f\x8f\xe9\x60\x4e\xdf\x18\x96\x5f\x06\xa0\xc2\xec\x68\x3f\x29\x94\x32\xd2\x30\x30\x04\x08\xf4\x9e\x1a\xaa\x67\x5c\x01\x86\x13\x1e\xdd\x76\xf8\x97\x02\xc9\x21\x71\x44\xc6\xb6\x56\x62\x7a\x51\x48\x76\xde\xbf\xef\x30\xd1\x9f\x65\x4c\x2e\xf9\xe2\xa3\x65\xc7\x72\xf3\x1e\xd8\xeb\x1a\x6e\x7a\xd3\x0f\x1a\xe0\x58\x19\x83\xcf\x01\x5c\x6c\x78\xec\x3a\x6e\x5d\x91\x76\xf0\xbc\x9f\x8f\x50\xa8\x0b\x90\x50\x69\xe5\x42\xcd\x94\x81\xc6\x24\x5a\x29\x01\x3b\xf8\x67\xcb\x31\xf9\x2e\x98\x8c\x70\xe4\x0f\x73\x97\x20\xae\x2d\x3e\xb9\x53\x16\x53\x67\xb0\x7a\x43\x1c\xf0\x7b\x9a\x65\x49\x0c\x61\x03\x22\x12\x1b\xa1\x64\x30\x6b\xf2\x6c\x40\xad\x98\x42\x45\xe0\x0e\xdf\x0c\x6f\x70\x42\x6a\x01\x2e\x3f\xec\x3e\xd3\xd6\x2f\x60\x77\xcf\x35\xac\x0b\x69\x3e\x5a\xd5\x3a\xbc\x09\x61\xc8\x5d\xd5\x02\x0c\xad\x7c\x6f\x32\x92\x06\x89\xb2\x0c\x6e\xbb\x23\xd1\xbd\x21\x32\xbf\xc0\x20\x7e\x06\xcf\x1f\x43\x99\xb8\x18\xcc\x3a\x06\x11\xdb\xc6\x08\x2e\x30\x46\xfc\xa2\x09\x71\xcb\x77\xe7\xfe\x0d\x31\x85\x01\xf0\x93\x4d\x58\xcf\xdf\x11\xca\xa5\xc9\x64\x48\x25\x74\xfd\x03\x66\x19\x85\x50\x82\xa0\x86\xb3\x83\x2c\xf4\xa0\x82\x50\xb5\xfd\x30\x48\xe0\xe0\x1b\xd2\x04\x1f\xc9\x1c\x84\xeb\x7a\xfd\x2c\xd2\x17\xe8\x66\xe0\xef\x30\x0c\xbf\xa4\x53\xc0\x74\x48\xaa\x24\x04\xf0\xf2\xea\xea\x10\xf5\x54\x78\x17\xdc\xa7\x38\x50\x93\x3a\x81\x54\xef\x0c\x74\x89\x89\xba\x87\x50\xc3\x1e\x3a\x0a\xa2\xff\x5b\x69\x5f\x2b\x57\x45\x6e\x18\x90\xce\x23\xe9\xde\x21\x26\x60\x06\xb1\xda\xc3\x03\xbe\x5a\x37\x81\x6f\x5a\x9d\xd3\xe8\xe5\x53\x06\x24\x43\xd5\x14\xfb\x10\x8e\x33\x43\xb5\x6e\x74\xdd\x29\xb9\x40\x21\x80\xda\x15\x1b\x94\x03\xe9\x52\x0a\xdc\x7a\x05\xb5\x94\x28\x85\x02\xc6\xca\x40\x9f\x95\x5b\xfc\x0b\xef\x35\xef\x00\x30\x9c\x84\x80\xf5\x56\x89\x13\x39\xfc\xac\x57\x6d\x3c\xca\xff\x28\x29\x04\x32\x0c\xf1\xbe\x3b\x19\xf2\xce\x32\x38\x2f\xcf\x78\x3f\x3c\x49\x99\xc7\x11\xd7\x6f\x98\x91\x78\x4e\xda\x3d\x12\x2d\x63\xba\x7a\xc8\xaf\x50\xb9\x70\x7d\x86\xbe\x1a\x75\xa7\x34\x16\x2e\x25\x8b\x43\x83\x41\x65\x8f\xa3\xba\x1c\x83\xbc\xc9\x35\x99\x01\xc1\x85\x10\x6d\x30\x24\xb5\x87\x5d\xd3\xa5\x86\xf8\xf6\x59\xc9\xa9\x47\x43\xfa\xf1\x19\x93\xe7\x7c\x29\x40\xa0\x90\x78\x02\x75\x8c\x07\x28\xa3\xb3\xb6\x88\x69\x3d\x07\x5c\x23\x62\x3a\xda\x71\x6e\x0c\x56\x92\xae\x81\x5d\x1f\x94\x3c\x5e\x92\xc6\x49\x86\xbc\xd7\x25\xc4\x03\xa6\xaf\x12\x00\x06\x3c\x40\x59\x90\x5b\x97\x4c\x8e\xe7\x4d\x9a\x07\x67\x7c\x0b\xe9\xe4\x1c\xeb\x90\xba\x30\x09\x94\x14\xa8\xd7\xd0\xe2\xed\xa4\xbb\x4e\x98\xb4\x93\x7c\x52\x03\x52\x1f\xc5\x9a\xa2\x5d\x7b\x2d\xf3\xcd\x91\x92\xbb\xa0\xb3\x9c\x52\x38\x99\x87\xb5\x00\x90\x76\x48\xf2\x55\xdf\x1f\x45\x57\x85\x04\x11\x6e\xb4\x6e\xe1\x80\x4e\xc5\x0b\x1c\xe7\x0a\x39\x55\xa8\x33\xe8\xeb\x02\xe3\x73\x95\x3c\x83\x3e\xf8\x54\xa4\x3b\xa1\x20\xdb\x90\x74\xcc\x89\x53\x39\x06\xc2\xdb\x7d\xe2\x33\xe3\xdb\x4c\xa4\xaf\x0f\x8a\x0e\xff\xc1\x37\xfd\x1c\xbd\xad\x4a\xaa\x57\xd2\x7d\x46\x0a\x5a\xba\x7b\xac\x2c\x85\xfe\x6b\x02\x13\x4c\xc6\x15\xe9\x5c\xde\xa1\xd1\x00\x73\x2f\x07\xad\xae\x01\xa9\x48\xa0\x4e\x0b\xa5\x92\x74\x0e\x1b\xb6\xb0\xe1\x4e\xfb\xa5\x14\x7d\xc8\x82\x69\x15\x38\x3a\x54\x5c\x60\x7a\x82\x12\x6e\x6b\x87\x04\x48\x11\xe2\x0e\x53\x4b\x7e\x08\xa4\xed\x51\x6c\x21\xcb\x0f\x3d\x0e\x0e\x3e\x14\xff\xb5\x52\x6b\xcd\xca\x3a\xc4\xef\x84\xe5\xaa\x87\xd2\x12\xb2\xd2\x46\x66\xc9\xd8\x6f\x3c\x98\x51\x8c\x0e\xc0\xc6\x6c\x7e\x0c\x54\x5c\x07\x03\x66\x02\x3c\xe0\x72\xf0\xf7\xf5\x80\x43\x2a\x7c\x63\xd2\xf3\xa3\xf6\x06\x2c\x20\xf9\x74\x62\x50\xe3\x69\x73\x00\x7a\x63\xd2\x48\x52\x9b\xdd\x94\x97\xf4\x2a\xac\x1d\x51\x99\xe6\x24\x1a\x3d\x12\x0a\x71\x0d\x5f\xa7\xbd\x37\x47\xd9\x9b\xa8\xef\xad\xa8\xdb\xf7\xde\x24\xfe\xf4\xbd\xd7\x9b\xb6\x16\xdf\x7b\xf0\xd7\xe3\x5b\xe5\x86\xfb\xb5\x16\x4e\xdd\x8c\x6f\xe9\x33\xfd\x5f\x97\x81\x97\xe8\x02\x66\x3f\xa5\xca\xe0\x71\x85\x6f\xfe\x79\xad\xdf\xa1\xc7\xb0\x85\x34\xf2\x17\xf1\x45\x3f\xeb\xe7\x3e\x9d\xf0\xfe\xb9\xee\xc4\xaa\xef\xc3\x2e\xc7\x07\x06\x5c\xba\xc8\x4f\x96\x30\xe0\x27\x9d\x1c\x5b\x7d\x48\x1c\xfd\x28\x53\xe9\xe3\x9a\xc4\xd4\x5e\x18\xbb\x5e\xcf\xfb\xc3\xdc\x94\x37\xd3\x56\xdf\x61\x33\xe8\x31\x18\x11\xf6\xc1\x71\x67\x3f\x04\x30\x49\x4e\x21\xd2\x74\xdf\x1e\x0f\xf7\xa8\xfe\xdb\x55\x79\x9f\x5d\x8e\x61\x9a\x53\xe8\x31\x4c\x7d\xeb\x55\xea\x3e\xef\xfa\xf7\xcb\x9b\xb7\x85\x51\xc6\x0e\xef\xca\x80\xdf\x7b\xad\x53\x57\x6d\x7f\x97\xff\x5d\x99\x2a\x05\x7e\x59\xe1\xbb\x6e\x4d\x91\x73\xf9\x23\xc0\xf5\xee\x3a\x01\xf0\x89\x69\x5d\xad\xbb\x33\x0a\xdf\x65\x97\x39\xfb\x9e\x7b\x02\xd2\xdb\x93\xbd\x1d\xac\xd7\xa3\x4a\xcb\x01\x73\x8a\x2d\xcc\xda\xd7\x55\x66\xed\x2b\xf3\x34\xdc\xe8\x9d\xd7\xb0\x30\x4a\xf4\xee\xc4\x54\x20\x2f\x7e\x96\xe8\x85\x26\x79\x0e\x35\x3b\x09\x5d\x85\x30\xac\x9f\xdd\xb1\x5e\x3f\xca\x16\x42\x2a\x01\x50\xb1\x3c\x60\x0c\x73\x8e\xda\xb7\xd2\xd9\x80\x7c\xfc\xd9\xd7\x0f\x85\xaa\x09\x73\x0e\xf5\x05\x8f\x51\xf2\x01\x64\xa7\xd7\x2e\x82\x49\xce\xf2\xed\xfa\x27\xdf\xea\x6d\xd7\x12\x36\xf5\x98\x38\x95\x60\x42\x34\xf7\x5d\x15\xf6\x2d\x15\xab\x37\x9a\xdf\xa2\x88\xed\x3b\x4f\x59\xfa\xa1\x6f\x24\x1e\xf9\x35\x06\xfd\xf7\x6d\x15\x96\xd4\xd0\xdf\x64\xf0\x4b\xaa\x61\xbd\x64\xa0\xef\x4f\xec\xc4\xf4\x23\x97\x86\x48\x60\x7b\x5b\x5d\xe0\x83\x5c\x00\xa6\x49\xbd\x15\x99\x7f\xa7\x42\xc7\x71\x03\x30\xb0\xe5\x1b\x1d\xea\x58\x06\x7e\x6f\x4c\x7a\x8a\x2c\x01\x27\x56\xe7\xf9\xdf\x9f\xf8\x84\x5e\x37\x87\xe0\xaf\x7b\x8d\x15\xfd\xf3\x7a\x61\x1a\xba\x11\xac\x32\x78\x03\xec\x52\xcc\xaf\x6a\x75\xe8\x33\xdb\x98\xe7\xe9\x4b\x21\x97\xf3\x5a\xe5\x4d\x2f\xfd\x99\x57\xb9\x34\xb0\x85\x30\x04\xe9\x93\xaf\xa2\x7c\x55\x03\x89\xcd\xd2\xab\x50\x8d\xe8\x55\xc2\x4b\xe5\xe5\xd2\x21\xae\x34\x81\x6e\xd0\x21\xaa\x9d\x2f\x84\x41\xbd\x0e\xa2\x13\xc9\xbc\x1d\xb3\xf2\x0d\x34\x3a\x2e\xd8\x5b\xef\xdc\x0a\x64\x7d\x51\x84\x21\xef\x2c\x34\xe3\xfe\x12\x3f\x04\xde\x75\xaf\x10\x5b\xa8\x2d\x44\x22\xf6\xd5\x5b\x36\xe8\x39\xe4\x2e\xd8\x90\xdb\xbb\x72\x1e\x51\x52\x0c\x88\x34\xc7\x92\x6d\x78\xbf\x7e\x9b\xee\x43\x3d\x03\xeb\x5b\xa0\xd5\x3c\xc1\xb0\x46\x8a\x29\x24\x1a\xba\xf3\x7d\x52\xed\x8c\x80\x96\xd4\x2e\xd2\x1a\x52\x0e\x2b\x3d\x3b\x5e\x29\x56\x48\xda\x22\x69\x2c\x80\x69\x53\x5a\xcf\x43\xe8\xd3\xef\xc4\xdd\xac\x07\xe1\x6d\xc6\xbd\xeb\x79\x42\x3a\x56\x0e\xf5\x4e\x8f\xab\x8a\xb4\xde\x91\x7a\x71\x89\x98\x7d\x41\x52\x31\x8c\xeb\x93\x62\xa9\x36\xda\xdb\x95\x62\x60\x6d\xae\xdf\x41\x7f\xd2\x61\x53\xef\x95\x31\x64\x1d\xe4\x60\x1f\xa2\x72\x86\x24\xa5\x60\x63\x29\xfd\x6b\x70\x0d\x69\x00\xd6\xd8\x17\xdc\x77\xf3\x3b\xb0\x07\x55\x31\xff\xc5\xdd\x72\xa8\xde\xc3\xc9\x3e\x2b\x5f\xef\xf2\x71\x52\x23\xfa\x1f\x8b\x9b\x8a\x11\x6a\x86\xa0\xb6\x17\x1b\xa8\x9a\xbd\xac\x3e\x5e\x43\x2a\xc2\xde\x6c\xbc\x5b\x27\x97\xf6\x70\x8f\xb3\x82\x9b\xc2\xa3\xb5\x0a\xa9\xcd\xbf\x20\xbc\xc3\x1d\x38\xad\x07\xb2\xb9\xa7\x45\x5d\x52\xe4\xb9\x68\xd9\x7b\x39\x25\xbb\x7f\x96\x57\x39\x10\xf5\x77\xc1\x07\x9f\x9e\xea\xb6\xf0\x98\xc0\xfb\x10\x70\x96\xe2\x9a\x3f\xcf\xee\x8f\x3f\x93\xb3\x27\x8f\xab\xcb\xe7\x34\xd2\xf4\x5f\xe8\x4e\xe8\x18\x33\x02\xcd\xd2\x95\x80\xcf\xbe\xce\x39\xc7\x25\x79\x62\x86\x03\x48\xa2\xf0\x48\x79\x08\x33\x7c\xf5\x63\x61\x1d\xc9\xff\xe5\x37\x27\x8b\xae\x7c\x53\x7e\xc9\xc6\x85\xba\x69\x98\x0f\x1b\x42\x29\x23\x47\x08\xe6\x82\x88\xb3\xbe\xe2\x4b\x0f\x8d\x5d\x8c\xeb\x37\xfb\x72\xd9\x18\x2c\x32\xa8\xae\x4c\x83\xe5\x2b\x5d\xfe\x29\x5f\xd6\xe5\x55\x34\xfc\x52\x0f\xe0\x0a\x95\x57\x98\xb0\x8b\x34\x72\x59\x27\xc8\xbb\x78\x97\x5c\x89\xaf\xa4\xae\xc7\x95\xfc\xeb\x3a\xbf\x9f\x69\xed\x6a\x13\xdf\xda\x55\x28\xaa\x34\x22\xac\xba\x9e\x18\x65\x30\x18\x9e\xd2\x56\xa1\x3b\x5f\xf7\x9d\x64\x63\xfd\x0e\xab\x8f\xf3\x96\xb1\xcb\x79\x37\xa5\x1d\x9c\xe4\x10\x42\xc2\x94\x9c\x5e\xb8\x14\xef\x0c\xde\xbb\x04\x05\xc1\xd6\xdd\xe9\x4e\xda\x83\x42\x94\xa2\x88\xd4\x7b\x3f\x1e\x51\xe9\x9f\xf7\x40\x7d\x85\xcf\x28\xf7\xfd\x89\x26\x34\x70\xdf\x27\x43\xdb\x6d\xf8\xbb\x0e\x05\x39\x31\xee\xb7\x1f\x4d\x98\xcf\xc8\x8d\xe4\x1c\xe3\x63\xa1\x64\x44\x50\x75\x4f\x11\x9a\x2f\x1b\x1b\xd9\xfd\xa3\x1e\xc2\x05\x36\xd7\xbd\x08\x4e\x29\xcc\xf5\x8f\x30\xea\xaf\x21\xf0\x60\xe8\x51\x08\x72\x88\x42\x54\xb0\xcd\xc2\x4f\xdc\x25\x2c\x74\x75\x05\x42\x39\x55\x38\xec\xd9\x2f\x10\x0c\x3b\x67\xff\x2a\xcf\xd0\xaa\xc0\xa0\xb6\xc8\xfc\x1f\x33\xed\xaa\xdc\xac\xfa\xba\xb5\x45\xbe\xbb\x9d\xff\x2d\x61\x85\x2b\x1f\x57\xac\x3b\xf6\xfe\xac\x5d\x0d\x23\x2e\x23\x44\xbb\x69\xfb\x1f\xbe\xe6\x57\x24\xf5\xc0\xef\x47\x02\x1b\x99\x29\xa3\xf6\x9e\x7c\xe2\x7f\xe9\xa6\x12\xad\x21\x5d\x5b\x3a\xe9\x63\x7f\x71\x06\x09\x65\x1c\x1f\xc9\x65\xbc\x75\xc8\xfb\x95\x24\xca\xf5\xcf\x84\xb6\x2d\x6c\x1c\x5e\x19\xa2\x3e\xc0\x85\x86\x39\x9e\xa7\x1a\x69\x03\xe3\xcc\x68\xda\xf9\x7f\x77\x9a\x1a\x09\x5e\xa7\x53\x52\xc3\x9f\x08\xca\x1a\x4f\xaa\x5f\xc0\x72\x60\xd3\x3b\x7d\x74\x59\x86\x23\xf1\x4c\xf5\x55\xc7\x4a\xc1\x93\x7a\x23\x4a\x90\x08\xc9\x5a\x3a\x91\x7e\xda\xf4\xc6\x56\x19\x91\xd7\x9d\xa3\xf9\xd3\xfa\xb4\x94\xc5\x60\x24\xe9\xdc\x31\xff\xe1\xff\xd9\x14\x17\x82\xce\x05\xe7\x04\x5d\xf1\x42\xda\x17\x59\x9c\x6d\x48\x81\xfe\xd4\x2d\xea\x4b\x1e\x45\xa7\x7b\xf3\x6a\xb5\xf5\x8a\x9b\x46\xb4\x86\x17\xde\x8a\x45\xa3\x3c\xb7\xfb\xbf\x42\x91\x22\x44\x1f\x4f\xb0\x1f\x59\xc6\x0a\x58\x63\xee\x1d\x25\xc1\xcd\x9e\x02\x0f\xb3\x4a\x41\xe8\x2c\x91\x2f\x67\x43\xd4\x07\xdb\xdb\xb3\x4c\x22\xe2\xcf\x37\x7e\xcc\x4f\xda\x64\x8e\xc7\x6f\x40\xd2\x24\x33\x0d\x4a\x50\x26\x99\xfd\x17\x59\x32\xbb\x30\x05\x4b\x79\xfb\x43\x9f\xec\x24\x9a\xe2\xb9\xcc\xd1\xf5\x35\xb4\xbe\x24\x93\x81\xd7\x92\x0f\x79\x06\x6b\x80\xf1\x7d\x48\x5a\x8c\xad\x4a\x34\x23\x5d\xda\xc0\x9d\x48\x42\x5f\x65\x86\x00\xaa\x84\x02\xe6\xf0\xdd\x3d\xb3\xf4\x55\x0d\x3c\x92\x7e\x8b\x55\xdb\x92\x67\x41\x55\xcd\xc8\x06\xa0\x17\x87\x0d\xf5\x54\xf1\x59\xfd\x45\x71\x8a\xf0\xb4\xc7\x00\x94\x1b\xaf\x8c\x66\x9e\x90\xcc\x6f\x8b\x10\x8b\xf2\x3c\x43\x12\x83\xda\xb2\x67\x2e\xcc\xaf\x39\xd1\x6d\x56\xf5\x90\xbb\x0b\xf9\xe9\x93\xf3\xe7\x15\xc3\xd0\xae\x89\x7e\xe2\x73\x94\xca\x45\xe5\x2c\x27\x08\x41\x92\x46\x33\xf8\xe4\x80\xc9\x10\xc9\xfb\x90\x65\x95\x67\x15\xe0\x3a\xd5\x30\xda\x7f\xbb\x50\x60\xe0\xa8\xb0\x20\x97\xd1\x11\x33\xab\x0d\xdd\x76\x43\xbb\x5f\x18\x96\x31\x7b\x4c\xd8\x6a\xb8\xfe\x5b\x7d\xfb\x8a\x0d\x5a\x19\x3a\xa4\xcb\x9f\x23\xe8\x61\xd7\x33\xb6\x4f\x3f\xad\x8d\xfa\x8f\x76\x45\xa5\x08\xbd\xa3\x55\x67\x41\x3a\xd4\x0f\x7b\x72\xf6\xfe\x5b\x22\x82\xc9\x6f\x1c\x6f\x63\x7a\x80\xe7\xe8\xf1\x01\x43\x4e\x03\xfd\x8c\x78\x53\x31\x77\x88\xac\xf4\x90\x1d\xc1\xb2\x0e\x14\x13\xe1\x80\x43\x21\xc8\x21\xa8\xa1\xce\xac\xa3\xe1\x87\x08\xd2\xbd\xfd\xa3\xc1\x61\xdd\xe4\x2e\xc3\x81\xd6\xd7\xa1\x55\x26\x12\xfc\x71\x98\x45\x6b\x3d\xd1\x43\xf3\xd2\x6a\x0e\xe9\x66\xdc\xd2\x07\xf1\xe4\x2d\xfe\xb3\x61\x3a\x9a\x8d\x2b\x72\xdc\xbd\xe4\x43\x42\x82\x8b\xe6\xa1\x1c\x6b\x23\xa0\xe8\x91\x9d\xae\x61\x4a\x30\xbd\x58\x51\xbb\xc0\x5a\x5a\x03\x0e\x1d\x82\x68\x39\x6a\x13\xef\x5f\x14\x1e\xc2\xde\x72\x2a\x47\x1b\x3c\x43\x96\xf0\x84\xa6\xfc\x49\x90\xf4\xed\xc1\x14\x9e\x25\x7c\x23\x75\xfd\x6a\x02\x92\x64\x5f\x30\x12\x61\x43\x91\xb2\xc6\xda\x67\x9a\xca\xff\x79\x39\x50\x4d\x12\xba\x1e\x2b\xe9\xee\xed\xf3\xe4\x34\x22\xa6\x43\xd8\x37\x69\xf7\x3e\x52\xa8\x83\x4d\x17\x4a\x3f\x7f\xb3\x9a\x7b\x64\xd6\x6a\xbb\xd9\x4b\xb6\x72\xb8\x3d\x60\xd8\xee\x69\xe5\xe9\xda\xcd\x0f\xd2\x85\x3e\xe8\xca\x6f\xdd\x8a\x93\xe3\x63\x9f\x46\x61\xf3\xd3\x14\x28\x6f\x77\x27\x81\xb4\x5b\x8d\x64\x43\x3d\x28\x69\x0b\xa4\x42\xef\xd4\xf6\xf8\xf6\x26\xd7\x70\x64\x74\x20\xcd\x9c\x5b\x56\x1a\x86\x34\x70\x2d\xd2\x4b\xa9\x7b\xb6\x34\xa5\xb8\xc8\x6a\x63\x07\x8a\x67\xc0\x81\x64\x95\x98\x88\x75\x10\xaa\x34\x43\x4e\x85\x0c\x0d\x49\xee\xa2\x9e\x4a\x77\x04\xda\x31\xd7\x4c\x4d\x90\xa6\x73\xad\xeb\x27\x4e\x2f\xc1\xc8\x56\xff\x32\x77\x2d\xf2\x0a\x6d\x95\x29\xfa\xad\x3e\x25\x60\x42\xa3\x28\xfb\xd2\x6d\x54\xa2\x13\x4e\x55\x35\x92\xc1\xde\x07\x37\x9c\xa2\x94\x28\x09\x69\x40\x10\x25\x11\xdb\xab\xd5\x01\xfd\x5c\xef\x50\x37\x97\xb2\x8f\x7d\x93\x34\x45\xec\x17\x2a\xc6\xef\x72\x77\x11\x20\xc5\x42\xbe\x2a\xd7\xbb\x4a\x0a\x65\x38\xca\x3f\x5a\x57\x2d\x9a\xbc\x16\x5d\x27\x9f\x1f\x57\x57\xae\x05\xc3\x12\x8a\x26\x5e\x4c\xfa\x39\x76\x45\x7a\x94\x86\xfa\xb1\x34\xcb\x61\xdd\x59\xbf\x0d\xb3\xfa\x3d\x36\xa0\xe5\x5e\x9a\xa8\x6a\x99\x3c\xec\xe6\xb4\x0d\xdd\x43\xb9\xa3\x34\xf7\x4a\x51\xda\x32\xc4\x4b\x7e\x59\xc3\x58\x9a\xc9\x6e\x02\xd4\x75\xe7\xf2\x38\xf7\x8f\xf4\x5e\xf2\x2c\x2a\xb0\xfc\xf1\x60\xd0\xa5\x90\x85\x8d\xee\x1e\xcc\x7e\x6c\xa1\xf0\x03\x61\x13\xd2\xda\x9a\x4b\x2d\x37\xa1\x50\x44\xc1\x04\x51\xa5\xe4\x8b\xe3\xc4\x0d\x30\xc9\x5e\x5f\x39\x16\xf6\x8e\x02\xe8\xc7\x51\xd5\x18\xfe\x25\x7e\x37\x5c\x76\x59\x3f\x36\x5d\x4a\x7e\x6c\x52\xe0\x69\xe9\x74\x11\x1d\xf4\x53\x78\xaf\x9c\xc9\x45\xd9\x94\xad\xf0\x00\xdb\x44\x99\x9e\x14\x75\x66\x4b\xe0\x52\xe4\x8d\x5a\x26\x4f\xda\x68\x34\xd5\x19\x90\x90\xe0\x97\xca\x0c\x59\xd0\xb2\xf4\x06\x52\xae\x9e\x7b\x0d\x10\x08\x66\x7f\x12\x81\x66\xce\x6c\xff\x9f\x4f\x13\xb5\x8d\x52\xef\xb6\xe4\x3f\xe1\xff\xd0\x4f\x16\x61\xd2\xb0\xc6\x31\x86\x28\x4c\xa1\x68\x3d\x84\x52\xfc\x02\x0e\x5a\x4c\x5b\x5f\xa0\x70\x46\xfe\xb8\x5d\x54\x82\x5b\x59\x8d\xe4\xaf\xbc\x01\xf1\x12\x0e\x17\xfe\xa9\x44\x24\x18\x41\x2d\x45\xea\x21\x4d\xfe\xb8\x47\x88\x08\x1f\x78\x55\x62\xcf\xf4\x5e\x59\x6a\x30\x79\x54\xce\x63\x1f\x4e\xde\xf7\x43\xd5\x97\xfe\x63\x1d\x9f\x26\x05\x95\xfc\x1f\xf5\x52\x62\xa7\x26\x3d\x8d\x1a\x59\xf2\x47\xa5\xdf\xf0\x01\x03\x8b\x26\xdf\x74\x5f\x50\xc9\x62\x84\xdf\x54\xee\x72\x2a\x5b\xde\xb9\x13\xf4\x89\x6f\x9e\x9d\x04\x91\xc3\xda\x65\x3e\x84\x03\x41\x2e\xcd\x65\x1d\x56\x25\x44\x4c\xe4\x40\xab\xeb\xbb\xac\x32\xc0\x3e\xdc\x9e\x97\x3b\xad\xe0\x00\xc7\xc7\x6b\x4b\x45\x6b\x23\x56\x7a\xe0\x56\x8b\x64\x74\x94\x9f\x9d\xbc\x61\xe4\x46\x50\x58\x06\xf4\xb6\x90\x6c\xb1\xc3\x51\x9c\x65\xe7\x68\xd9\x00\x95\x2d\x01\x3c\xe0\x6f\x40\xb7\xa7\x88\x1b\x71\x78\x96\x28\xff\x07\xc3\x57\x9e\x2c\xfa\x54\x47\xd1\x0f\x2c\x5d\x6b\xa5\x30\x6d\xc5\xf5\x5d\xf8\x5c\x91\x50\xcb\xad\x1e\x1c\x1f\x1e\xf9\xfb\x65\xbe\x04\xd5\x5b\xfe\x95\x6f\x89\x93\x65\xc8\x53\x3a\xb0\xae\x80\x02\x2a\xd6\x55\x56\xbe\x07\x10\xeb\x17\x43\x22\x49\x02\xed\x02\x4c\xa2\x02\x45\x75\x0d\x95\x97\x36\x96\xc7\xcd\xb3\x1f\x52\x57\x81\xf2\xcb\xff\x02\xf0\x65\xb4\xea\x48\x71\x98\x23\x6f\x1c\xb9\x1c\xd9\xab\x66\x2b\xd4\x71\x36\x04\xe5\xd2\xcf\x7f\x52\x8a\xd1\x46\x34\xf0\x05\xc8\x50\x00\xe1\x31\xd6\xb2\xe8\xfc\x7a\xfa\x79\x88\x11\xc2\xa1\x98\xf3\x61\xef\xb3\x22\xfa\x87\xe7\xea\xfb\xf7\x79\x84\x67\x9b\x01\x7c\x8b\x39\x04\x64\xe4\xc4\x6e\x40\xdf\x48\x3a\x95\x2c\x71\x58\xef\x97\xed\x3e\x84\x07\x02\xf4\x35\xca\xbe\x70\xc4\x1d\x1b\x5f\x39\x65\x92\x1c\xe9\x40\x8e\x83\x24\x64\x90\x1a\x7a\x49\x6f\x06\x59\x7a\xbb\xb6\xd9\xa5\x5f\xb3\x4f\xba\x8c\x0d\x4f\x3a\x13\xda\x2e\xb7\x74\x69\x36\x9d\x63\xbd\x4f\xee\xc8\xd4\x90\x43\xd5\xd2\x11\xd4\xf1\x23\xfd\xe3\xd5\x5f\xbf\xd2\x96\xb2\x5a\x2a\x22\x5e\xa8\x7d\x43\x9a\xb3\xc2\xf0\xc1\x5f\x0b\xc4\x26\x9b\xb4\x53\xe0\x0e\xee\x5b\x9e\x19\x5c\x0d\x57\x74\x39\x39\xc0\x82\x9e\xaa\x57\x29\xb5\x6d\x12\xec\xaa\xcd\x9e\x49\x3f\x61\xb3\xaa\xb9\xb0\x8e\xb1\x77\x8c\xf5\xb4\x81\xae\x73\x63\x75\xa6\x78\xec\x15\xc9\xc7\x34\x34\xb1\x0f\x09\x63\x4b\xca\xd8\x48\x38\x66\xab\xf3\xbd\x33\x75\x13\x2d\xc9\xf6\xdf\x4f\x8e\xc6\xdf\x5d\xab\x29\x5e\xc8\xed\xf6\x43\xf1\x1b\xb3\x2f\x37\x12\x91\xeb\xc2\xf7\x0f\xba\x33\xbf\xd9\x85\xea\x33\x10\x14\xa1\x51\x87\xa2\x32\x4d\xb2\xda\x6d\x3c\x25\x10\xf3\xbd\xa9\x17\xc3\xf3\xec\xbf\x51\xb3\xd5\xf5\x95\x93\x09\x7b\xed\x74\x5e\xc4\xa7\xa9\x9c\xb6\xd7\xda\x18\x9f\xdb\xab\x33\x3a\x95\x92\xb1\x87\x71\xd6\xfe\x57\x17\x74\x44\xf6\xf2\x65\xfc\x77\x2f\x6f\x4a\xc4\x78\x25\xbb\xff\x78\xa0\x7b\xd9\xfc\xed\x46\xfb\xa4\x00\xe9\x5e\x34\x7e\x71\x0d\x99\x4a\x30\xb3\xa7\xba\x17\x4f\x70\x00\x42\xda\xf8\x7b\x27\x62\xb7\xd2\x00\x83\xda\x3b\x06\x4c\xfa\x91\xf9\x16\x51\xc4\x90\xe7\x80\xbf\x65\xdd\x6a\xcd\x90\xa6\x86\x76\x75\x34\xf6\xfc\xa7\x8b\xca\x88\x38\xf0\x5f\x1c\x3b\x6a\x41\xa2\x2b\xfb\x8f\x06\xba\xa7\x97\xcf\x4c\xed\xfc\x22\x91\x01\xff\xd1\x3f\x76\xa6\xe3\x40\x78\x66\xe3\x5e\xcc\x6a\xb0\x92\x52\x37\x53\x68\x02\x5b\xe5\x3f\xa6\xd0\xa4\x89\x0a\xc2\xa0\xe2\x19\x86\x64\x5d\x0d\xa9\x9a\x4b\xd4\x4f\xc8\xd4\x50\xc8\xc1\xde\x9e\x77\x4c\xb3\x41\xb4\x46\xbb\x93\x8c\x0e\x77\x9d\x3e\x1a\xb3\x05\xcc\x57\xed\x84\x63\x92\x16\xc7\xed\xcc\x2b\x6a\xe1\x8c\x74\xbf\xd8\x90\x90\xa9\x0e\xf1\x86\x1c\x4c\x8a\xcb\x30\x85\x7b\xab\xed\x95\xb8\x02\xd2\x42\xfe\x1e\x6e\x54\x98\x85\xbb\xf8\xc9\x32\xb4\x36\xaa\xbc\xe0\x91\x7c\x21\xe7\x86\x2d\xb4\x10\x00\x42\xab\x9d\x52\x38\xfa\xb7\x4c\x4e\x37\xbc\x69\xa2\x4a\xb9\xe7\xca\x5f\x0d\xf4\xa4\x48\x4c\xdd\xaf\xf8\x6f\x24\xe5\x1a\xec\x57\xbe\xd3\x0e\x62\x43\x1e\x6a\x12\xd2\x39\x66\x6e\x91\x67\x26\xa1\x39\x92\x72\x45\x9d\xc0\x29\x8a\xcb\x9a\x12\xde\x06\x62\x26\x8d\x6f\x95\xae\xe5\x5b\xf9\xa4\x59\xd2\x14\x98\x69\xf2\x6d\x4a\xd4\xaa\x6e\x28\x77\x12\xa8\x12\xde\xca\xa6\x35\x9a\xec\x34\x30\xb4\xd0\xeb\x89\x2c\x83\xad\xc0\x90\xfd\x4f\xdf\x83\x2d\x25\xef\xbb\x6d\x59\xbd\xaa\x2d\x5f\xa1\x37\x13\x52\x8c\x86\xa8\x27\x64\xdd\x18\x65\xb9\x00\xfe\x4b\xa7\xb3\x65\x0c\x12\x78\xb7\xb2\xab\x8f\x52\xd0\xa6\x9d\xec\xb8\x6d\x9a\x33\xdf\xa4\xc6\xb0\x41\x5c\x8b\x2b\x56\xc6\xb1\xb6\xb0\xe2\x32\xf0\xe0\x95\x94\xd0\x70\xd8\xf2\x13\x55\x1b\x77\x7c\x86\xc2\x4c\x8e\x8e\xf5\x96\xbb\x46\x8e\x2a\x4b\x7b\xe3\x18\x8a\xdf\xa2\xa7\xbd\xe5\x98\x3e\xdf\xfa\xfe\x4a\xc6\xd8\x6c\xc4\xc3\x29\xdf\x6d\x3c\x22\xe2\xb4\x45\xb6\x2c\xf4\x6f\xac\xca\x3c\x4e\xe1\xa2\x09\x9a\x6d\x7c\xba\xb8\x82\xf4\x69\x9e\x5d\x29\x64\x7c\x32\x8b\x79\x1b\x11\xb8\xf1\x1f\x3f\x7a\x5a\xf5\x46\xd8\x34\x29\xe6\x70\x5c\x42\xfc\xc6\x7a\x09\x64\x94\x41\xcb\xa6\x66\x01\x77\xb3\x8a\xb5\xe9\xb1\xc6\xd6\x3f\x0d\x9f\x91\x86\x9e\xdb\x98\xee\x95\x1f\xce\x88\x77\xca\x0f\x90\xac\x47\x33\x52\xb6\xe4\x73\xdd\xc3\x40\xc9\x15\x8d\x89\x0c\xf0\xd1\xdb\xb0\xa0\xeb\x2b\x19\x9e\x25\xe1\x72\x5d\x1a\x34\x39\x4c\xda\xd4\xba\x10\xa1\xd2\x82\x89\x65\xeb\xf0\xbf\xba\xee\xcd\x15\x93\xa6\x1b\x4c\xb6\x46\x49\xd6\xc4\x8b\x05\x3f\xc1\x96\xf6\x90\xb7\xb1\xce\xd0\x57\x50\xfb\xb8\xe1\x56\x0a\xe1\x9a\x7f\x8f\xd6\x9a\xae\x09\xea\xb7\x3a\x40\x0d\xf1\x1c\x10\xf0\x84\xe8\x5d\x08\xb1\x9b\x8b\x97\x64\x3d\x75\xa9\xde\x64\x0e\x55\xa5\x6f\x23\x35\x9b\x36\xf0\x43\x87\x6d\x4a\xc8\xdf\x24\xf2\xb6\xb7\xc4\xfe\xbf\xd5\x34\x76\x76\x65\x03\xba\xec\xcd\x19\x80\xe5\xb3\xeb\xe0\x3c\x7b\x30\xde\xbd\xba\x75\xa1\x9b\xee\xd6\x5a\x25\xa6\x63\x77\x9a\xdd\xc5\x2d\xf5\x1f\xf8\x37\xce\x8d\x69\x2f\x1b\x06\x54\xbc\x80\xdf\x97\x9d\x9c\xd2\xea\xc8\x67\xfa\x79\x09\x93\x8e\x3c\x7a\x04\x8b\xb0\x54\xd6\xcc\x88\xb7\x3f\x93\x50\xa8\xee\x20\x9d\x99\xeb\x68\x63\xb8\x7e\x86\x90\x25\xb9\x5f\x01\xd8\xf5\x47\xd9\x68\x9e\xb6\xde\x87\xff\xf8\xf5\x5e\xc2\x83\x1e\x32\x36\x6f\x7f\xed\x56\xab\xd4\x3b\xcb\x00\x02\xd7\xa3\xe4\x83\xd6\x6a\x4d\x3b\x14\x61\xaf\xff\xa4\x64\x23\x25\x9c\xea\xc2\xe3\x8e\x34\x85\x04\x5e\xb8\x54\x40\xa0\x5a\xc3\xbd\x77\x89\xd7\x6c\x4a\x0b\x5e\xad\x33\x92\x39\x52\x58\x6b\x59\xf8\xcf\xb9\xd1\xd1\xdf\x90\xf6\x7f\x5c\xa4\x79\x20\x32\xbc\x4a\xca\xc6\x86\xd3\x03\x4f\xdd\x7b\x8c\x48\xba\xf2\x12\xcc\x7e\x29\xcf\x5c\xaa\x4d\x56\x8d\xc8\xdd\xf1\x99\xff\xda\x25\x41\xb3\x4f\x0a\x45\x5a\x77\xf8\x56\x2e\xad\x55\x2a\xb7\x5f\x20\xa4\x51\xff\xa3\xf4\x8c\x5e\x03\x6b\x1c\x7d\x08\x88\x92\xff\xda\x79\x73\xad\x12\x49\xfc\x47\x99\x22\x4c\x6c\x58\xc2\xa9\x2b\x78\xa2\x5e\x4a\x52\x25\x3f\xa4\x80\x92\xb3\xd7\x10\x6b\x9e\xa8\xd4\x92\x9d\xdb\x40\x94\x79\x11\x91\xe5\x06\x79\x19\x7e\x2b\x86\x46\xfd\x5a\xeb\x5b\xac\x92\x8a\x79\x15\x16\x50\x68\x4c\x01\xbd\x94\xaa\x0c\x9d\x89\x56\xf6\x1d\x56\xcf\x47\x67\xea\xd4\x9a\x8e\x42\x89\x55\x43\x1a\x7b\xc1\x61\xb3\x2b\x15\x21\x1d\xed\xc5\x20\x48\x77\xde\x74\x59\x18\x75\x6f\x81\x95\xba\x0a\xe8\x66\xca\x8e\xbd\x26\xda\x79\xbc\xc8\x1a\xc7\x00\x53\x8d\x0e\x24\x61\x78\x8b\xed\x42\x78\x63\x53\x88\xce\xd0\xa2\x13\x35\xe4\xad\xdd\x1e\x48\x61\x25\xf2\x2e\x9b\xc3\x89\x07\x1f\xab\x74\x69\xc6\x78\x2b\x91\x5b\x26\xd1\x1b\x4f\x09\xe1\x9e\x83\x57\xa8\xcb\x3b\x9d\xec\x6a\x2e\x37\x9b\xcd\xe5\x0e\x69\xb8\xe5\x3e\xe7\xbd\x08\x51\x1b\xe3\x6e\x97\xd2\x93\x97\x3b\x04\x3d\xac\x6f\x14\xd2\x19\xf7\xb2\xfe\x24\x69\x38\x19\xba\xdc\x63\xcf\x65\x87\xe6\x8c\x74\x61\xee\x18\x24\x2f\x8d\x51\x10\x2b\x45\x67\x5f\x5a\x6c\x54\x42\x66\xc4\x23\x65\x0e\x42\x48\xd6\x65\x64\xbc\x4c\xd7\x57\x07\x22\xa1\x67\x91\xe2\xa0\x95\xa1\x2e\x53\xbf\xf7\xc0\x92\xb2\x23\x95\x13\xc1\x0b\xd2\x42\x98\xd1\xba\xd4\x48\x8f\x36\xe4\xfd\x9d\xa5\x62\xba\x83\x80\xea\x31\x27\xa5\x6d\xea\xe1\x1f\x28\x4a\xee\xe9\x73\xb4\x4b\x85\x90\x8c\x22\x07\x0b\x94\x07\x43\x44\x46\xb9\x39\x4b\x9d\xd7\x18\x54\x2d\x95\x44\x9f\xc5\x06\x04\x07\xcb\x68\xca\x0d\x72\xc2\xdf\x65\x65\xd6\x8e\x7a\x54\x62\x81\x36\x61\xcb\x9e\x3e\xbe\xec\x59\x16\x71\xcb\x4a\x2f\xf2\x05\xbe\x9b\x7a\x7a\x6b\x58\x30\x59\x65\xf1\x0e\xf5\x1d\xe4\x40\xfb\x53\xf3\x89\x40\xdf\x05\x55\x19\xc5\x6f\x4a\x8b\x79\x69\x83\x6e\xa3\xac\xbe\xa8\x2d\x8e\x8b\xa7\xd1\x2d\xe5\x50\x3d\x09\x62\x9f\xba\x53\x06\x8b\x34\x69\xd6\x10\xa7\x09\xc9\x7c\xab\xd9\xa8\xe1\xc3\xd9\xef\x85\x7d\x36\x1b\x4b\x85\xa4\x8e\xa4\xaf\x16\x17\xe8\xf0\xca\x7c\xc9\xf1\xcb\xa1\x12\xed\x33\xe9\x50\x9c\x91\x16\x4e\x8e\xd7\x44\xf7\x20\x2b\x95\x6a\xc9\x99\x9c\x24\x03\x13\x35\x69\xd4\x00\x2c\x99\x4a\x98\x56\xb6\x41\x22\x35\xef\xf4\xbc\xe9\x46\xb3\xa4\xfb\xb8\x59\x32\xfa\xb0\x24\xcc\x2e\xa9\xe7\xbf\xa4\xfe\x62\xa0\x0d\x3f\x09\x8e\x2a\xe1\x36\x60\xea\xca\x20\x65\xc7\xc1\x2e\x49\x75\x88\xd0\x34\x29\xca\x6c\x87\x56\xe9\x9e\x80\xe8\xfb\x26\x48\x0f\x6f\xa6\x97\x14\x52\x3c\xc9\x35\x78\x04\x2b\x0b\xb9\x57\xfd\xdd\x5d\x31\xee\xef\x5e\x43\x5b\xe6\x2b\xf7\xee\xbf\x10\xe8\xfe\xab\xf6\xd9\xb2\xf2\x30\xe8\xed\xdc\x9f\x35\xd0\x3e\x68\xfd\xcb\x6f\x29\xd8\xa0\x83\x29\xe0\x2f\xf2\x5f\x6e\x2e\xe8\xf0\x17\xea\xeb\x7f\xa9\xc9\x19\xca\x5a\x6d\x3a\xfa\xa1\xf9\xe6\x8c\xfa\x5f\xf7\x4e\xfb\x4b\x9b\x84\x65\x5c\x08\xfa\xcf\x2a\x20\xfc\x20\x8c\xbd\xf1\xef\x72\x85\xba\x0c\x66\xec\x38\xc7\x50\xaa\x68\x85\xe8\xac\x9c\x23\xee\xbe\xfd\xc0\xe6\x17\x58\x42\x6a\xa7\x6c\xcd\x7f\x4d\xd9\xd8\xf5\x2b\xf6\x9c\x43\xee\xbe\xcc\x5e\xd3\x97\xa7\xe7\x22\x15\x3d\xab\xf9\xfe\x84\x24\x8c\x7d\x30\x5e\xae\x57\xe8\xad\x60\x3e\x26\x74\x62\x9e\xae\xa4\x71\x67\x8f\x93\xa0\x94\x4c\xc8\x59\xbc\xe5\x9a\x63\x86\x7b\xae\x2f\xca\x70\x60\xfa\xbc\x53\xdb\x6c\x09\xb5\xf5\x4e\x28\x55\xb4\xd9\xd5\xc9\xb8\x2e\x3e\x8b\xb9\x9e\x21\x09\xa3\xd4\x3b\x64\xfe\x94\x45\x80\xf2\x23\x95\x59\xca\x73\xdd\x3f\xf7\x76\x20\x81\x87\x4b\xf9\xa3\x8b\x92\xbc\xec\x8c\xac\xa5\xa6\x17\x61\xae\xeb\xff\x27\x30\x53\xff\x11\x98\x41\x0d\x2c\x06\x0d\x96\x24\xf9\x4d\x48\x20\xcd\x58\xb8\x6c\x31\x54\x3b\xd7\x45\x17\x13\x1a\x16\x73\xb5\x0f\xd4\x75\x47\xea\xd4\x65\xa8\x0c\x3f\xe3\xa2\x26\x89\x25\xcc\xf6\x62\xd9\xaf\x0c\xe5\x1d\xcc\xab\x84\x39\xd7\x8c\xd9\x14\xde\x3e\xa6\xfb\xcf\x7b\xa2\xd6\x8c\x02\x6a\xb3\x57\x32\x44\xbb\x77\x9d\x3c\xe3\xd0\x16\x16\x16\xfe\x53\x51\x93\xf0\x78\x25\xb2\xeb\x5c\xc8\x5a\xc1\x7e\x1b\xe0\x5f\xec\xf9\x41\xc2\xc2\x3b\x58\xb3\x67\xb4\x6a\x6f\x0c\x42\x9e\x43\x50\x20\x6d\xe0\xee\x57\x02\x45\xdd\x90\xb7\x41\xfa\x27\xbd\x18\xfc\x9e\x41\x04\xb5\x4a\xee\x26\x54\x6b\x9a\x0d\x05\xd3\x7f\x21\x74\xe3\xbf\x1b\xdd\x1d\x8d\x57\x67\x5a\x85\x9d\x21\x71\x23\x2f\x33\x88\xf7\xb3\xf8\x47\xe8\xe6\x52\x0e\x9b\x0b\xe0\x64\x81\x22\x5e\x37\x30\x1f\x44\x8e\x4c\x64\x20\x48\xf3\x12\xea\xb8\xe5\x8a\x96\xc4\xb0\xae\xba\xec\x3a\x1c\x34\x65\x63\xbb\x3d\x49\xf6\x00\xa2\xce\x67\xd5\x61\xca\xa0\xcb\xa6\x8d\x8a\x95\x12\x94\xc9\x0f\xfd\xc3\x6a\xd2\x95\x19\x9a\x73\x9e\x7e\xa9\x70\xf6\x5a\x8d\x15\xe1\x5b\xc8\xe8\xe4\xf0\x87\xb6\x85\xac\x48\x27\x54\x75\xd8\x9f\x81\x88\xce\x2e\xb1\x43\x7b\x23\xe2\x11\xb9\x26\x9f\x64\x71\xd8\xa5\x44\x6b\xa2\xbc\x68\xc0\x78\x6e\xa9\x75\x39\x1c\x49\x43\xb9\x2b\x64\x4b\x21\x9b\x83\x97\xfc\x4d\x14\x02\x39\x8b\xce\xb1\x4b\xa4\x7e\x46\x87\x98\x97\xbb\x85\x68\x12\x26\x77\xd4\x46\xd9\x80\x21\x04\xa9\xd0\x23\x3b\xa5\xdb\xb3\x4a\xd6\x10\x48\xda\x3f\x25\x1e\x9a\xf5\xd7\x78\xf6\x12\x2a\x3f\x22\xa6\x1a\x58\xc3\xa3\x75\xf6\x71\x8c\xcb\x7f\xdc\x5f\xff\xd2\x9e\x51\x31\x3d\xc3\x00\xd8\x80\xb2\xc2\x9f\x77\x93\xa0\x92\x21\xaf\x88\x9f\x37\xf8\xb6\x14\xfa\xb8\x83\xa6\x6c\x88\x63\xf7\x27\x66\x79\xaa\x34\x64\xee\x4c\x56\x36\x80\x94\xb5\x3d\x1e\x2b\x15\x9d\x29\x15\xf2\xcb\xa1\x9b\x93\x6d\xa0\xe2\xb5\xe2\xd3\x13\x2c\xbc\x99\xb7\x3a\x97\xab\x9c\xc3\x47\x35\x14\x48\xe5\x50\x90\x06\x48\xd5\xe7\x93\x69\xd6\x56\x4c\x5d\x37\x47\xd7\xd3\xca\x97\x13\x52\xcf\xe6\x99\xe2\x44\xa7\x9c\xe7\x00\x67\x51\x7a\x9e\x98\x0e\x7d\xf2\x7c\x25\xd2\xf6\x01\x79\xb6\xf2\x78\x48\x3f\x07\x5a\x3b\xba\x04\x70\xa6\xb8\x12\x2e\x7d\x54\x76\x69\xb1\xc6\xf3\xbf\xb3\x14\x7e\x72\x5a\x19\x27\x74\xfd\x1d\xde\x6e\xa4\x4a\x7d\x05\x22\x8d\x0d\x38\x44\x6d\x42\x70\x06\x72\x82\x14\xe3\x69\xd2\xea\xa9\x2d\xc4\x78\xea\xc4\x19\x1b\x20\x74\xe7\x79\xe3\xe0\x8f\xb1\x71\x22\xe2\x59\xad\xfe\x65\xb9\x4a\x38\x1a\x30\xd1\x14\x1a\x1a\x3a\x6f\xd7\x83\x73\x6d\x9d\x81\x56\x00\x8e\x55\xb6\xd8\x67\xee\xe7\x9c\xb9\x3d\x18\xbd\x7c\x28\x15\x02\x7d\xd2\xd5\xe1\xe4\xc8\x13\x74\x86\x99\xc7\x62\x95\xf4\x5c\xef\xcd\x69\x74\x48\x78\x57\x27\xd0\x20\xf7\x5a\xbb\xb2\xde\x13\x3a\x97\x81\x38\xd6\x7a\xfe\xe5\x35\xc1\x88\xfc\x59\x5e\x6c\xd8\x9e\x85\x5a\xeb\x56\x32\x9e\x60\xe0\x99\xa5\x85\x54\x20\x24\xaf\x20\xc1\xb3\xac\x47\xba\x98\xfa\x01\x6d\x9e\x12\xfb\x6b\xd6\xc0\x00\xe3\xe7\x90\x79\xbf\x2f\x81\xd0\x20\x2a\xea\xa1\xe1\x70\xf1\x4e\xbb\x86\xdc\x25\xfd\x1f\xfb\x5c\x35\xaa\x85\xdb\xad\x6e\x4f\xbe\xa9\x6e\x82\xf9\x8c\x12\x80\xaf\x20\x45\x1b\x3c\x23\x91\x97\x87\x26\x93\x9b\x34\x8d\x3d\x9e\xee\x44\xc0\xcd\xba\xda\x9c\x4b\xfd\xf8\x65\x22\x6d\x6a\x27\x58\x63\x33\xf5\x30\x9f\x79\x9a\xfa\xf6\xa9\x49\x5d\x48\x9f\x8d\x0d\x3f\x43\xee\x28\x7d\xba\xde\x52\xba\xf9\xce\xa5\x9b\x29\x3c\x4f\xe7\xc2\x39\x08\x0a\xcb\x13\xb3\x4d\xdc\xcf\x7a\xc0\xac\x01\x92\xb8\x07\xd0\x06\xea\xca\x43\x76\xc6\x87\xf4\xb3\x52\x28\x06\xed\xac\xb5\x40\x56\x0e\xe5\x1f\x48\x1f\x9f\x44\x03\x8f\xb1\xd7\x3b\x04\x85\xfa\x47\x60\xdf\x00\x8d\x48\x9e\x98\x37\xe2\xd1\x56\x09\x34\x3f\xe1\x44\xa8\x73\xae\x6b\x4c\x52\x40\x74\x28\x44\xb3\x0c\x6e\x01\x16\x96\x39\x31\xf0\xf3\x4c\xf4\xe2\x42\x29\x76\x1f\x14\x43\xa2\x64\x88\xe9\x89\x24\x1c\x6e\x95\xe1\xc5\xc5\x7b\x35\xd9\x90\x1b\x57\xfc\xb8\xe3\x5d\x7d\xdc\xed\x5e\xe8\xe5\xfa\xb8\xdd\xb8\xb2\x33\x2c\x01\xce\xdf\x64\xed\xa3\x7d\x1a\x83\xae\x8f\x56\x39\x58\x7c\xf4\x5b\xe7\xfa\x18\x52\x3c\xf2\x9c\x8b\x99\x6a\xf0\x58\x5a\xfb\x16\x13\x77\xb2\x8e\x17\xdb\x5b\x87\x52\x44\xca\xc8\xed\xed\xa7\xca\x7d\xb5\x7f\xbc\x1c\x51\x3d\x5a\x96\x52\x92\x12\xf5\x3d\x51\xd3\xfb\xb4\xae\x93\x54\x76\x21\x0f\x5a\x3e\x64\x27\xff\x68\xdd\x8c\xf9\x51\x6f\xd9\x8d\x3f\xaa\x44\x42\x1f\x10\x0b\xbf\xb8\x4a\x2a\x2b\x0f\x18\x8b\xf8\x74\x29\x50\x11\x8f\xfd\x21\x05\x9f\x07\x33\x28\x1f\x21\x1f\xf1\x50\x00\xfa\xb1\x4a\xcd\x0f\x92\x4b\x3c\x0e\x34\x83\xb5\xf5\x8a\xf0\x07\xaf\x6f\x4d\x5b\xec\x99\xc4\x80\x7b\xb8\x3d\x1c\xab\xf3\x07\xd4\x8e\xf9\xde\x3e\xca\x4f\x15\xeb\xe1\x36\xb0\x6f\xa1\xdf\x5a\xf0\x4f\x08\x35\x27\xf7\x08\x62\x07\x3a\xf2\xb2\x36\x7a\xe0\xcd\xf1\x53\xa6\x17\x15\xaa\xec\xab\x19\xd5\xed\x7d\xa0\x63\xbb\x87\xca\x12\x3b\x2e\x8f\x14\x8d\xa7\x21\xee\xd8\x38\xf7\xf3\xf0\x20\x6a\x26\xda\xa3\x87\x6c\x58\xc3\xda\x07\x1a\x70\xaf\xcd\x1f\xf1\xb5\x3e\x52\xf8\x4a\x50\x6a\x89\xca\x4d\xc3\x2d\x99\xa6\x61\xf0\x50\x41\xfe\xac\x65\xf4\x8b\xc4\xb0\xc9\xbb\x3d\xc8\x93\x58\x29\xa5\x84\xe9\xef\xd1\x1e\x1d\x97\xc6\xf4\xc7\xe4\xcc\xfc\x7f\xb7\xff\x9e\xfc\x4f\x94\x35\xef\x55\x19\xf1\x36\xb8\xa6\x3c\xd2\xfe\x8c\xf4\x41\xaf\x84\x24\x9a\xa4\x7c\xce\x10\x74\xce\xc1\xaf\xce\x70\x01\xa0\x38\x13\x2a\xa8\xae\xb5\xc4\xe7\x6c\x20\x04\x66\xf3\xda\xc5\x93\x30\xf6\x1a\x18\x2f\x89\x80\x41\x2e\xf4\x04\xf3\xf8\x3b\x32\xfd\x95\x0e\x65\x83\x6a\x9e\xef\x97\x45\x85\x88\x91\x4b\x82\x02\x0f\xe1\xa8\xab\xf4\xeb\xe9\x5b\x6e\xaa\x29\x2d\xb3\x92\x8c\x27\xbb\xef\xd2\x3b\xf2\x6b\x9a\xee\x83\x92\x46\x37\x1b\x63\xc8\x28\xcd\x12\x3f\xda\x11\x61\xe7\x46\x7b\xd4\x48\x93\x67\xe0\xeb\x23\x9c\xee\xb8\x8c\xe9\xb6\x81\xa5\x04\x99\xe4\x09\x03\x90\x4f\xe9\xab\x4d\x8a\xc5\x4f\xf7\x10\xc2\x4b\xf1\xfb\x20\xaa\x44\x25\x1d\xeb\xe7\x80\xe4\xed\x97\x13\x92\x03\x93\x58\xd3\x53\xcb\xff\xa3\x0e\x4d\xd3\xc7\x02\xd0\xf7\xcc\x92\x5c\x9d\x1a\x4d\x49\x1c\x5b\x0f\xc2\x2f\xa4\x4a\x14\x7d\xaa\xa7\x02\x53\x86\xb4\xa6\x69\x64\x31\xd5\x26\xa5\x26\x1f\x74\x4d\x75\xcb\x21\xcb\xb4\xf6\x92\x35\x8b\xa1\xc1\x5f\x53\x00\xed\x34\x34\x9a\x29\x01\x25\x45\x6f\x27\x04\x92\x7a\xdc\x64\xb2\x1e\x1e\x85\x76\x4a\x3d\x27\xf2\x4a\xd1\x60\xe9\xed\x9a\x22\xc1\xd6\x40\x50\x2c\x09\x5d\x62\xc7\x1d\x7f\xa5\x70\x6a\x0b\x89\x29\xed\x13\xcc\xb6\xb8\x37\x99\x03\x0e\xa1\x55\x10\x48\xff\x4d\x9b\x04\xf7\x11\x3d\x67\x20\x16\x06\xb9\xfa\xe9\xf9\x15\x52\x46\xaf\xd0\x61\x31\x48\xf6\xca\x94\x7f\x89\x34\x93\xcf\xb5\x65\xb6\xef\x93\x93\xed\x36\x21\xed\x2f\x0f\x79\x57\x66\x93\xc9\xe9\x84\xcc\x09\xae\xd4\x68\xda\x00\xa2\xdf\x45\x82\x4d\x11\x92\x31\x74\xb9\x34\x0f\xe5\x9a\xf6\x50\xa9\xca\xdb\x39\x4b\x0b\x49\x12\x00\x10\x79\x0a\x51\x27\xf7\xb0\xd5\x76\x12\xa4\x5a\xbf\x3a\xde\xca\xec\x33\x74\xae\xd9\xda\x06\x75\xdb\xca\xce\x0e\x84\xa6\xd3\x2e\xe5\x2a\x43\x63\xa0\xb9\x7e\x78\x7d\x8b\x0e\x5f\x62\x24\x34\x85\x8a\xd4\x2c\x09\x69\x57\x7b\xd2\xed\x7d\x84\x5e\xfa\x24\x3b\x59\x2b\xfd\xa1\xe5\x31\x02\x0e\x06\x43\xe6\x0a\x84\x07\xfe\x12\x4f\x73\xba\x28\xac\xc4\x0f\x13\x39\x2b\x8c\x1b\x4c\xd1\x27\xa4\x32\x4e\xa4\xac\x4d\x50\xaf\x14\xcf\x74\x42\xcb\xa5\x2f\x9c\xa6\xd1\x56\x78\x57\x93\x5b\xc6\x20\xc9\x5e\x3c\x69\x2b\x1b\x9a\x98\x1f\x04\xc4\x07\x93\x46\xd1\x95\xc7\xcf\xa1\x00\xf2\x78\x87\x81\xed\x78\x37\x8a\x3b\x49\x6c\x6a\x24\x3f\xbc\xcf\x01\xf5\x99\xc6\xf1\x9e\xa2\x72\x1f\x25\x98\x36\xb6\x8f\x3e\x4a\x43\x3d\x4d\x16\x92\x4d\x9b\x34\x99\xec\x28\xab\xd4\x9b\xac\xb1\x9c\x25\x07\x37\xa2\xbb\xe0\xa5\x8d\x1a\x35\x79\x36\x36\x58\x2e\x0b\x8c\x02\x29\x14\x9f\x52\xc8\x1d\xbd\xa5\x93\xf4\x9e\xd8\xfa\x8c\xf5\x8e\x60\xe1\x58\xaf\x7f\x64\x99\x10\x61\xe8\xd8\x87\x37\x0e\xc3\x13\xd2\x90\xac\x9b\x0d\x1d\xb5\x0b\x32\x85\x46\x11\x90\x34\x91\xda\x44\x8d\x9f\xda\x49\x5b\x50\x60\x8a\xff\xee\x98\xde\x21\xca\x21\xb1\xb4\x91\xa9\x30\x86\x66\x31\x45\x97\xfc\x1b\x32\x38\x89\xf9\x69\x10\xf2\x46\xd2\x50\x42\x36\xdd\x75\x57\x09\x50\xad\x49\x09\x86\xc8\xe6\x87\x90\x1a\xd5\xaa\x6a\x30\x9b\x80\xa2\x5f\x39\x56\x37\x35\x10\x62\x61\x6f\x19\xb2\x90\xfd\xba\xad\x97\x40\x75\x23\xc8\x22\x7c\x04\x28\xae\xb5\x6a\x8a\xdf\xd5\x97\x74\x63\xd6\x2c\xf9\x20\xc8\x30\x85\x5b\xc2\x88\x09\x9e\x9d\x20\x3e\x31\xcc\x1f\x3f\x78\xc0\x42\xa7\xc4\xd1\x87\x40\x44\x47\x75\x9f\x0e\xc7\x08\x3c\x53\xa7\x29\xff\x79\x6f\x17\xf2\x08\x4a\xc2\x32\x38\xce\x4a\x58\x06\x4f\xe1\x11\x7b\xcd\x98\xaf\xd0\xd3\x70\x59\x27\x25\xee\xba\x6b\x26\x58\x33\x92\x75\x2a\x21\x71\xe4\xb1\x2a\xff\x35\xd6\xb0\xde\xdc\x34\x53\x4a\xd8\x80\x0d\x88\x1d\xa0\xbf\xa3\xed\xd3\x4b\xf5\xb0\x41\x09\x55\x23\x63\xdf\x09\x7b\x94\x83\x0a\xef\x7e\x97\x70\xaa\x5e\x73\x3a\xd4\x4e\x6b\x79\x68\xa5\x74\x0f\xc6\x6c\xad\x92\xd7\x4e\x23\x3b\xa6\x63\x0c\x8b\x01\xf6\x89\xdf\x2d\xf2\x67\x98\x8d\x3c\xfa\x9c\x37\x32\x98\xb4\x80\xfc\x3a\x36\xa9\x5a\x92\x42\x13\x48\x52\x7c\x3c\x49\xf2\x5c\xce\xa0\x3e\x85\xf4\x80\x5d\xf0\x89\x20\x64\xa2\x10\xe8\x91\x3a\x18\x6a\x9e\x53\x00\xbc\x62\xef\x1b\xf9\xc2\x53\x80\x19\x2b\x30\xbb\x29\x91\xac\xe8\xce\x37\x3a\x7a\x5b\x69\xd8\x6e\x48\x83\x49\x77\x76\x7c\x25\x21\x89\xc0\xf9\x8c\x38\x81\x87\x69\x47\xc8\xd4\x49\x93\x49\x3c\x20\x03\xf1\x7e\xa7\xa8\x9b\x30\x04\x95\xa6\x15\xcc\xbb\x79\xa2\x35\x5c\x67\x47\xb7\x7d\x11\x90\x6a\x96\xcf\xe9\x75\x24\xc3\x7e\x88\x47\xfd\x4f\xa5\xb6\xd7\xb7\x8f\xa9\xca\x16\xd2\x58\x6b\xd8\xb4\x8c\x29\x8c\xe6\x0d\x31\xb3\x15\x80\x5f\x19\x84\x11\x36\xfe\xe8\xc1\xa5\x96\x43\x94\xe5\xbe\x12\xe5\xa1\x40\x58\xcb\xbb\x44\xa0\x4e\xca\x3b\x59\xed\xaf\x36\x01\x12\x51\x92\x91\xb9\xdb\xcc\xde\x1b\x6d\xc5\xa8\x10\xa5\x83\x35\xf2\x0f\xa0\x1d\x25\x1d\xa2\x7b\xd5\x9a\xbc\xee\xd2\x8c\x0a\x19\x92\x3b\x36\xa6\x08\xd0\x8d\x2e\xc2\x2e\x95\xa5\x0f\x89\x7a\xd6\x6d\xb9\x42\x49\xe9\xd6\x8a\x8d\x99\xe2\x03\x23\xfe\x56\xc4\xff\x95\xcd\x35\xb4\xfa\x51\xec\x10\x90\xc5\x1b\x7a\xe2\x94\x67\xaa\x78\xb7\x78\x84\x8a\x3c\xbe\x27\x8f\xab\x88\x94\x74\xa3\xba\x74\x4b\xf3\x4c\x3e\xc2\x22\xdd\xad\x16\x31\xe8\xa1\x95\x3d\xe4\x9f\xf8\x00\x5d\xf0\x29\x54\xa3\xd2\xfe\xe9\x68\xe2\x7f\xd3\x26\x41\x1d\x08\x38\x4b\x7b\xa6\xf5\x29\xdd\xa1\x7e\x43\xec\xe8\xc3\x9e\xb6\x75\xe4\xc0\xf2\xf5\x38\xdb\xe0\xec\x39\x02\xde\xb9\x8a\xc6\x2f\x76\x95\x8b\x22\x80\xa4\x6d\x07\xb4\xcf\x7a\x7e\x08\x90\x40\x4a\x99\x7a\x31\xd5\x6a\x55\xff\xfa\x5c\xc5\x09\xaa\xa3\x3c\x81\xc4\x77\x6a\x5b\xe4\x1f\x63\x10\xce\x56\xfa\x2f\x42\x66\x59\xa7\x6e\x12\x8f\xaa\x57\xe8\x3b\x39\x61\x5f\xff\xdd\xd9\x24\x0d\x35\xb2\x91\x91\xa3\xcf\x63\x97\x6b\xe7\x70\xc3\x20\x65\x71\x60\xad\x71\x05\x08\xcf\x5a\x5b\x90\xd0\x52\xcd\x21\x21\x95\x15\x1c\x87\x43\x05\xb5\x9b\x90\x54\x21\x59\xa9\xa1\x9b\x1e\x0c\x60\x65\xa0\x5c\xef\xdc\x2f\x18\x1e\x3f\xf6\xb4\x26\x4a\xb3\xdb\x22\x5f\x87\xd5\x3a\x9c\xbc\x9a\xd5\x27\x2c\x89\x24\x08\x95\x4e\x69\xf4\xac\xc9\x05\x85\xb3\x52\xf0\x87\x55\x1a\x05\x00\x3f\x8e\xab\x2f\xe9\x25\x5c\xd3\x1a\xb2\x52\xc8\x6a\x02\x73\x90\x0b\xa3\xc6\xe4\x43\x81\x34\x8e\x5f\x43\x69\x6a\xb2\x06\xcc\x94\x86\x80\x59\x7f\xb5\x42\x03\x7a\x70\xc1\x38\x7f\x2b\x40\x1e\xf7\x29\xdb\x41\xaf\x5d\xd8\x2f\x0f\xb3\x12\x15\x5d\x1b\x6a\xd5\x6f\xcf\x57\x96\xb4\x93\x7a\x39\xd6\x23\xfa\x16\x09\x2d\x35\x8c\x44\xb9\x59\xfb\xa9\x3c\x31\xd7\xd8\xad\x05\x29\xe5\x30\x64\x65\xf8\x0c\xd1\x07\x1f\x5c\x36\x43\xc2\x4f\x7f\xa8\x3d\x08\x8b\xdd\x84\x53\xbb\x3c\xab\x52\xab\x87\xb0\x01\x19\x5c\x40\x51\xab\x46\xf1\xc2\x0d\xfa\xd7\xe8\x70\x3d\xd9\xeb\x00\x62\x02\xef\x90\x7b\xca\xc9\x20\xad\x76\x1b\x91\x46\xe5\x83\x6c\xd1\x50\x6a\xcb\xb3\xae\xe2\xc1\xea\x22\x4b\x56\x0a\x7a\xb4\x3c\xbb\xf5\xfb\x2f\x75\x4a\x07\xcf\x30\xd1\x55\xe9\xb9\x59\x3f\x57\x04\x2b\x9f\x15\xe3\x51\x41\x79\xe1\x66\x49\x41\x43\x03\x2a\x43\xd7\x7a\x40\x3b\xc6\xd6\x7f\x40\xab\xe5\xb7\x59\x8a\x55\xa0\x66\x84\x90\x53\x83\x60\x40\x6c\xc7\x29\x27\x2b\xf7\x28\xcb\xa8\x13\xb5\x95\xd1\x6e\x88\x47\xf1\xb7\x18\x98\x6e\x09\x43\xb9\x1d\xa9\x14\xab\xda\x10\x03\x32\x68\x48\x85\xf2\x1f\x20\xbb\x76\xd0\x2e\xd6\xad\xa2\xeb\x28\x37\x85\xf3\x8b\xfe\x1f\x9c\x15\x47\x5d\x71\xea\x9a\x9b\x27\x18\x40\x7c\x2a\x8d\x8c\x4a\x0c\xdd\xcd\x7e\x40\xc4\xfc\x5f\x05\xaa\xae\x9b\x05\xfd\x18\xad\x1b\x58\x3e\x43\x78\xea\xc9\xd1\xc4\x90\xa6\xc4\xae\xf1\xa0\x09\x5f\x0c\x94\xf9\x6b\x87\x12\xf7\x53\x1f\x31\xe2\xff\xfc\xa1\xd4\x62\x86\x50\x7a\x98\x25\x26\xe5\xaf\xa0\x4f\xe2\x1a\x4a\x2f\x8d\x6e\x6c\xac\xc1\x0e\x0f\x62\xa7\xcd\x4b\x12\x74\xd2\xcd\xcc\x36\x24\x28\x32\x27\xd7\xe7\xe8\x59\xc3\xe0\x61\xea\x56\xa6\xf3\x50\xba\x7a\x3a\xd7\x60\x98\x27\x98\xf1\xbb\x9c\xd4\x69\x95\x53\xe2\x7f\x3d\x83\xca\x5e\x78\x5f\x8f\x94\x3d\x91\x20\x61\x54\xb7\xb2\x23\x92\xda\xff\x29\xc0\xe3\x88\x8a\x52\xae\xd1\xfc\xbc\xb9\x9b\x74\x67\x31\xde\xa2\x5e\x55\x53\x96\x3b\x8c\x39\x39\x6b\x25\x9d\x30\x3c\x6e\xe9\x50\x1d\x74\x29\x74\x87\xe6\x3d\xc0\x53\x24\xe0\xa4\xfc\x0d\xa7\x3b\x86\x72\x15\x3c\x6e\xa8\x51\xb5\x5f\xdd\xfa\x05\x99\x7a\xa7\xca\x2a\x71\xad\x67\x57\xa7\x72\x05\xad\x55\x22\x5a\xd3\xcd\xdf\xb3\xc7\xbc\x83\x13\x8d\xf4\xf6\x58\x07\x83\x2c\x49\x1b\xcd\xaf\x8f\x22\xbe\x10\x38\x96\xa7\x80\x77\xd4\xec\xc5\xa2\xe9\x0d\x04\x71\xfc\xbc\xeb\x26\xab\x4d\xfb\x61\x6c\x4d\x60\x17\xd7\xd1\x53\xd3\x95\x2e\xac\x19\x36\xc5\xf6\xf6\x31\xe9\xca\x86\x91\x9c\x0b\x4a\xc1\xf6\x4e\x98\x34\xd0\xf1\xe0\x92\x3a\xe9\x21\x2c\xde\x49\x49\xe5\x62\x34\x15\x2a\x3a\x27\xcb\xd8\xf1\xd9\x6f\x14\xe4\xd4\x98\x6c\xeb\x0a\x5c\xbe\x1d\xda\x6c\xff\x36\xd2\x40\xa6\x9b\xbd\x85\x17\x89\x97\x5f\x1b\xe7\xaa\xf1\xf9\xde\x0f\x8e\x71\xbe\xf7\xe0\x8d\xd5\xb7\x90\xee\xf4\x75\xee\xb8\xff\x92\x6f\x51\x38\xc8\x80\x7f\x56\xdf\xc8\x24\xff\xb2\xe3\xf2\x85\x8d\x12\xbf\xef\x6f\xb6\x4e\xb1\x1f\x38\x4b\x40\xdd\xfe\x6a\xec\x07\x73\x99\x90\xe6\xfa\x55\x91\xdf\x41\x24\xd8\x2f\x98\x22\xfe\x40\xbf\x90\x37\xf5\xe9\x1d\xe4\xb9\xb1\xfa\xfd\x26\xce\xdb\x7f\x45\xf1\x80\xc9\xa9\x44\x0b\xbf\x1e\xb6\xe7\x21\x06\x46\xd0\x01\xf6\xec\x7a\x5c\x36\x94\xf2\x00\x9c\x01\xb6\x08\x9f\xca\x18\xe8\xa7\xf2\xe3\xfb\xf0\x0d\x87\x88\x17\x65\xb9\xd8\x9e\x7e\xec\x83\xe3\x5c\xf1\x27\x51\x13\xf9\x03\x3f\x14\x55\x58\x1f\xab\x7b\xb0\xd7\xdb\x3e\xc0\x9d\xa5\x75\x5e\xd4\x97\x7a\x47\x46\x24\x00\x3f\x6c\x43\xa1\xe4\x54\xeb\x1a\x03\x7a\xe0\x47\xd3\x06\x5d\x0c\xf4\x6d\xcf\xeb\xcf\x7f\x99\xa1\x07\x3e\x28\x1e\x6b\xed\x00\x55\xc0\xc2\xbd\xfe\xa4\xb3\x65\xdd\x0e\x4d\x24\xbc\x5d\xd3\xe5\x24\x0a\x65\xcb\x77\x39\xbb\x34\xd7\xb9\x97\x1c\x2a\x5c\xe7\xbf\x82\x5c\xec\xe0\x18\xfa\xe9\x81\xc1\x9a\x60\xe5\xb1\xc0\xf5\xec\x2a\x5e\x6a\xc1\x80\x20\xf9\x1d\x67\x5e\xbb\x4a\x54\x59\x15\x7b\x7a\x33\x95\xe1\xec\x50\x82\x6f\xb6\x30\x93\xcf\x0d\x94\xd7\x7f\x56\x4b\x21\x0f\xb1\xb5\x2e\x23\x16\x3f\x3e\x9c\xe7\xdf\xe2\x86\x41\xe4\xeb\x1f\x61\x04\x1b\x9e\x46\xb6\xc3\x1b\xe2\x09\x62\xfc\xbe\x4b\xd2\xc5\x48\xe8\x81\x52\x5f\x97\x50\x96\x00\x81\x63\x7f\x49\xde\xb3\x9c\x0c\x21\xce\x65\x0d\x4b\x16\x0c\x92\x19\xb0\x7f\x97\xef\xde\x83\x06\x8a\xd8\x9e\xe1\x08\x74\xbf\xa5\xea\x0a\x6d\xaf\x39\x24\x3f\xde\x9e\xf6\x45\x44\x35\x17\x2b\x9f\x7d\xcd\x24\xb0\x1e\x5e\x94\xc8\x52\x34\x78\x46\xe2\xdd\x9b\xa1\x6e\xd2\xac\x6d\x41\x13\x7d\xef\x90\x6f\x31\x30\xf5\x4b\x98\x1b\xdd\x0e\x80\xfa\xe4\xce\x3b\x67\x86\x21\x00\xb8\x4f\xe4\x94\xbf\x43\x9d\xc7\x80\x0c\x2c\xde\x49\xda\xae\x6f\x69\x79\xbf\x21\x7e\xc8\x9b\x6e\xb5\x7c\x17\x07\x87\x72\x58\x1c\x04\x61\xd6\xe8\xd2\x62\x92\xbc\x4a\x11\xcc\x3e\x6a\xaa\x90\xad\x92\x0d\xe3\x7b\x90\x7e\x3c\x1a\xc3\x12\xad\x7b\x85\x4c\x57\xad\x90\xb3\xf2\x55\x75\x77\xbf\xac\xdd\xb1\xb2\x47\xa0\x02\x86\xec\x9c\x4b\x58\x5a\x5c\x4c\xc5\x85\x2a\xd8\x7f\x2e\xf5\x75\x29\x6c\xfa\x2a\x55\x89\x1c\x2f\x25\x38\x58\x39\x16\xb7\xe3\xa1\x16\xd8\xee\xd7\xe8\xc3\x84\x2b\x05\x6a\x72\x3f\x7a\xe1\x3d\xe4\x10\xfc\x85\xee\x6d\xa2\x7c\x0e\xbc\x90\x37\xaa\x7a\x7d\x76\x96\x88\x17\xac\x12\xfa\xb2\xeb\xcb\x1e\x83\x74\xab\x47\x59\x7c\xbe\xc2\x83\xc5\x23\x15\x6c\x2f\x5e\x39\xcc\x19\x5f\xf9\x2f\x49\xbf\xe8\xd5\x99\x7f\x44\x00\x29\xec\xb4\x0c\xc9\x81\xea\xd5\xe9\x7c\xd6\xbd\xf0\xb6\x1d\x99\xfd\x69\x26\x98\x3f\xa1\x63\x66\x98\x89\x94\x40\xec\xd8\x3a\x52\xbc\x07\x5a\x60\xe5\x45\x37\x1e\xd7\x08\x7b\xc5\x7b\x49\x69\x31\x7a\x4b\x11\x2f\xb5\x96\x58\xc0\x28\x73\xe1\x82\x94\x4d\x5c\x2a\x4c\xc7\x54\x9e\x89\x83\xfd\x67\xb4\x09\x0d\x31\x74\x3a\x28\x8e\x86\x39\xd4\xcc\x63\x90\x7d\xf4\x4a\x4f\x1e\xea\x86\xe8\xe4\x75\x62\x7c\x41\xe1\x30\x07\xfe\xa4\x7b\x56\xc7\x45\x21\xed\xeb\xe3\x0f\xe2\x7a\x7f\x44\xf4\xb9\xde\x75\x0b\x1d\xaf\x77\x9d\x29\xfc\xe4\xd1\x05\x4e\x93\x41\x38\x8c\xa2\x59\x77\xfb\x59\x0f\x5e\xb7\xc2\x2a\xd6\x27\x5e\xf8\x5f\x06\x59\x2f\x9c\xd0\xdb\xf4\xeb\x46\x7a\x28\x95\xc9\xe0\x4c\x1f\x32\x50\xa7\x07\xd8\xfc\x96\x5d\x4e\x28\xa5\xda\xd7\x3d\x48\x3f\xac\xba\xb6\x3d\xcf\xd2\x98\xb0\x74\x79\x37\x8b\xdb\xe9\x1f\x52\x94\xb9\x64\x0a\x6e\x65\x6a\x2e\xde\x0a\x31\x32\x3f\x4f\x93\xc7\xb1\x0b\x89\x9d\x6c\x13\xaf\x5a\x25\x39\x65\x4d\x8c\x54\xba\xea\x2e\xc2\xc2\x55\xb7\xa5\x44\x76\xe5\x05\x29\x2e\xef\xf2\x44\x6f\xeb\xf2\x46\xc9\x4f\x56\x25\xd2\x6b\xc3\x07\x1f\x22\x5c\xa5\x51\x05\x6c\x3f\xa5\x58\x70\xf9\x4c\x8b\x2a\x1a\x88\xa6\xf0\x6b\xbc\x0a\xb4\xb9\xa9\x4b\x36\xdf\x3f\x09\xb0\x56\xf7\x2e\xfb\x85\x34\xd7\xed\x3f\xe9\x7d\x79\x29\x73\xeb\x8b\x46\x5d\xfe\xd0\x30\x3d\xa5\x43\x76\xcf\x8a\x0b\xf3\xba\x04\xf1\x9d\x5f\xdd\x67\xdb\x50\x76\x89\xb7\xac\x34\x81\x2b\xcb\x3f\xc4\x80\x77\x57\xae\xf4\x71\x23\xec\xcb\xfa\xe6\xfe\x44\x9d\x4f\x7f\xc6\x3f\xa1\xf8\x41\xf5\xae\x76\xce\xda\x40\x9c\x5e\x00\xbf\xc0\xd4\x2f\x25\x29\x6e\x6a\x20\xde\xb3\x54\xd4\x4e\x5e\xc9\x53\xf2\xaf\x9e\x7f\x0b\xf9\x24\xa6\x8a\xdb\x98\x80\x9a\x5f\xdf\x4f\x70\x1a\xcf\xef\x3b\xb9\x26\xfb\xf9\xad\x47\x17\x21\xfa\xb8\xcc\x90\x14\xc0\x10\x94\x6e\x8e\x4a\x44\xdf\x0d\x52\xd4\xc5\x5a\xbe\x36\x32\x07\x0b\x98\xca\x2a\x48\x97\x95\x3e\x8d\x35\x7d\x97\xfe\x9b\x69\x89\x6f\x40\x89\xc6\x50\x19\xfb\x72\x55\xa2\xa8\x03\x71\xd8\xaf\x19\x94\xdc\xc5\xf9\x66\x3e\xd1\xf9\xca\x1e\xd0\x3a\x5f\x4a\x2c\xe1\x2e\xb4\xa9\x3f\x6f\x30\x91\x1c\x1c\xd4\x28\xb9\x69\xef\x44\xc1\x31\x85\xbb\xb0\x20\x81\xab\x7b\x2d\x54\xcd\xfa\xf8\xa2\x7d\x95\x91\xa7\x6a\xf8\xe0\x8d\xb2\xef\xcd\xad\x90\xce\x2b\x64\x46\x0c\x65\xeb\x04\xfc\xdd\x99\x3e\xf9\xb6\x1c\xe1\x0a\xc2\x55\x68\x5e\xa5\x8c\x75\xeb\x5e\xd8\x47\x78\x85\xec\x17\x47\x8d\x2e\xae\x19\x22\x5d\xad\x2a\x9b\x9e\x3a\x65\x14\x62\xc3\x67\x46\xf1\xa3\x4b\xf5\xb1\x83\x90\x1f\x93\x84\xcc\x85\x94\x05\x82\x83\x8e\x4c\x86\xea\xc5\x9f\x55\xd9\x01\x85\x58\xd9\xd5\xe7\x1f\x6d\x29\x04\x7b\xae\x9a\x55\x4b\x58\xad\x1e\x3a\x4a\x57\x91\x80\x59\x89\x04\x03\xab\x21\xf8\x1d\x1b\x78\xe7\xae\x37\x16\x37\xc5\x9a\xdb\xa5\x71\xf6\x12\x43\xb6\xae\xd7\x63\x6d\xcf\xa1\x87\x68\xf0\x77\x7a\x90\xee\xf8\x23\x22\x80\x04\xb4\xf0\x57\xd8\x28\x3e\x94\xd2\xd2\xae\xe4\x6b\x57\x0e\xcb\x02\x45\xc2\x44\x57\x30\x44\x20\x20\x86\xe2\xff\xee\xc2\x5e\xa8\xeb\x81\x49\x77\x07\x90\x91\x1c\xc0\x2e\x61\x03\x05\x03\xd5\xa0\xe7\x51\xa8\xe6\x60\x80\x3e\x90\xe7\xa1\xf4\x05\x03\x92\x21\x3b\x72\x88\x8a\x65\xf1\xc7\xdc\xbc\xc9\xef\xc4\x91\xe8\xef\x0a\x10\x67\x4d\xae\x53\x41\xa8\xf7\xea\x48\xa5\xe7\x68\x9f\xf5\xd6\xa5\x7a\x66\x33\x15\xc7\x44\xe8\x3e\xd9\xc1\xb2\xa2\x48\x82\xac\x79\xc7\xff\xac\xff\xe7\x83\x61\xd8\x85\x70\x03\x29\xda\xc1\xad\x43\x69\x7b\x67\x65\x52\xf0\xb9\xdd\xa1\x21\xb5\x9d\xd2\x44\xda\x44\xd3\x76\x10\x3a\x59\xc2\x45\x58\x12\x4d\x9b\xcb\xc6\xfa\xf5\xad\x37\x67\x1a\xc0\xc4\xe7\xee\x6b\xed\x2a\x66\x3d\xc9\xe5\xb4\x1e\x8b\x74\xd2\x0c\x65\xad\xd2\xab\xba\xa6\xa7\x5e\xa3\x35\x51\x61\xfc\x84\xa5\x32\xbf\x10\xd8\x23\x1f\x52\x3c\x2b\x24\x6e\x9c\x7f\x35\x94\x67\x0d\x06\xd1\xfb\x2c\xaf\xc6\x1b\x65\x03\x54\xbf\x1f\x50\x8e\xf7\x32\x0c\x11\x4f\x44\x02\xf9\x63\x5d\xfa\x8c\x80\x97\x6f\x25\x77\xb2\x51\xf6\x49\xcd\x6a\xe8\xfe\xa8\x1b\x6a\x90\x5a\x49\x65\x55\x72\x24\xc5\xd1\x66\x3d\x0b\xeb\x7b\x71\xb2\xc6\x3a\xb5\x2d\xae\x27\xff\xc3\xf4\xe7\x12\x15\xcf\x18\xec\xb7\xd2\x73\x3e\xac\x44\xae\xa7\x23\x19\x4f\x9e\x73\x7d\xe9\x45\xb1\x13\x5c\x0c\x6d\x60\x22\x47\x07\x28\x1f\xcf\xbc\x77\x5d\xb5\x42\x53\x29\xc4\x47\xd5\x7b\x3c\xe7\xcc\xff\xe6\x50\xdb\x9b\xb3\x06\x2d\xc8\xfc\xf0\x2e\x03\x26\x60\xf9\x4e\xce\x69\x29\xdc\xc8\x7a\x36\xce\x96\x82\x36\x5a\xe5\xaf\x86\x0b\xa5\x56\x75\xef\x49\x24\x94\x7a\xc8\xe9\xcc\xb7\xe4\xc9\x6e\xf9\x10\x22\x7e\xf7\x51\xa9\xd7\x23\x37\xb6\x68\x50\x4f\xcb\x1d\xf0\x07\xe5\xc6\xd4\x14\xab\xa9\x4f\x38\x41\x13\x52\xc2\x0d\xc6\x30\xac\xb9\xf3\xa6\x8d\xb6\xc4\x8b\x82\x12\x1c\x3f\x4c\xb4\xb5\xf1\xdd\x61\xd0\xe7\x65\xc8\x8e\x59\x85\xc2\xd7\x2b\x43\x71\x2d\xd6\xc1\x66\x53\x30\xb4\xbe\xb2\xb2\x28\xce\x9c\x94\x2b\x72\x4a\xa9\x02\xe5\xaa\x92\x6a\x93\x98\x03\x96\xe2\xd6\x58\x63\x36\xe0\x74\xb7\x1c\xae\x84\x78\x9a\xd2\x2a\x99\xd5\xde\xa5\xd1\xde\x9a\xb7\x83\x60\xda\x1d\xe0\xf8\x09\x8e\x21\x7d\xb4\x2b\x96\xf1\x2b\xb0\x72\x0a\xed\xb1\x7b\x08\x7d\x31\x68\x23\xc5\x76\x5d\xab\xd2\xe5\xd1\xfa\xcd\x70\x21\xb4\x59\x82\x75\x58\x08\x7f\x56\x5f\xe8\x7b\x43\x43\x41\xa8\x76\x14\x23\x2f\x60\xf2\xc5\x1d\x95\x21\x14\xd7\x36\xd5\xe4\xa1\xa1\x46\x5c\xf6\xd8\x16\x52\x82\xfb\x33\x70\x96\xbe\x5a\x59\xaa\x74\xe7\xec\xf5\x64\xb8\xcd\x8d\xc8\x8f\xbe\x6d\x7f\x5d\x01\x25\xa1\x96\xb7\xae\x1c\x67\x0f\x7b\xa6\x85\x04\x16\xa6\xfe\x7b\xac\x53\xaf\x53\xa4\xc7\xa3\xd3\x1f\xb8\xd8\x1f\x44\xea\xdb\x87\x8a\x3d\xd0\xa3\x06\x78\x10\xec\x91\x01\x7f\x8e\xd4\x93\x3a\xd3\x27\xde\x09\x38\xd5\x7a\xf9\xbf\x2e\xd2\xf6\x92\x7e\xd9\xcb\x67\x65\xfd\x60\x1a\x75\x9d\xb0\x48\xf0\x12\xd3\x7e\x0e\x76\xef\x10\xee\xc4\x53\x64\x1f\x9d\xf0\x2b\x62\x35\x85\xf8\x9e\x3a\x2d\x08\xf1\xf1\x64\x36\xd2\xe4\xbb\xe0\x93\x9d\x94\x44\x5b\x59\xed\xdb\x98\xe4\x0a\xf5\x37\x0d\x08\x01\x36\x5e\x9b\x73\x22\x05\xd7\x38\xd7\x80\xb0\x3b\xeb\x3d\xeb\xed\x5d\x21\xf0\x99\x42\xe8\x0d\x96\x06\x38\x6f\xfb\x4c\xdf\x70\xcb\x6c\x9f\xc1\xab\x1d\x2b\xd5\x2d\xc4\x74\x3c\x6b\xcf\x66\xbd\x29\x4d\x2a\xb7\x5b\x9a\xf4\x90\x5a\x63\x86\x0a\xd4\xd4\xa4\xd6\x05\xc4\xd2\x0f\x75\x0f\x1f\xef\xff\xb6\xfa\xf1\x08\x6c\xab\xff\x8b\x96\xb7\xd5\x9b\xdd\x0c\x03\x9b\x67\xd6\xf9\x9d\xf8\xc7\x28\xb6\x49\xcf\xc1\x45\xcd\x72\x80\x61\x65\xca\x1a\x16\x14\x90\x31\xa8\xc0\x40\x0b\xc7\x21\x00\xaa\x98\x59\x5b\x2f\xd6\x8e\xb5\x24\x85\x87\xd9\xf8\x15\xb7\x6e\xa7\x02\x89\x34\x0a\xcb\x21\x3d\x7c\xe4\xd6\xcf\x99\xca\x44\x86\x18\x2d\xb0\xce\xd9\xc5\x6b\xa4\x9e\x58\xb5\x66\x2f\x36\x96\x51\xb9\x81\x10\x48\xb3\x57\x57\xa5\xf4\xd4\xa6\x3c\xe7\x24\xc9\x2d\xa8\xe1\xef\xbf\x5b\x81\xb9\x16\x5d\x88\x86\xd7\xd0\x42\x68\x13\x0f\xa9\x7a\xcb\x00\x26\x03\x6a\x48\xbd\x0d\x89\xf9\x58\xa0\x06\x54\xa6\x72\x03\x52\x3f\xab\xf7\xd2\x61\xb3\x2b\xc1\xae\x72\x4a\xd0\xb7\x95\x83\x3b\x52\x83\xcd\x06\x0d\x90\xb7\xde\x28\xc4\xb6\xbb\x3c\x3b\x77\xdd\x25\x85\xa6\x57\x12\x56\x67\x67\x90\xc1\x99\xe3\xdb\x65\xd3\xdc\xe1\x0b\xd5\x4a\xa1\x12\x5b\x7f\x38\xee\x79\xdd\x9f\x81\x96\x78\xd7\xe6\x4a\xf1\x37\x77\x64\xf1\xdf\x9a\xef\x2b\x94\xce\xce\x43\x32\xd7\x56\xb0\x8f\xd0\x22\x5d\xb3\xfd\xc3\xcd\x01\xee\xe2\x6b\xb1\x6f\xe9\xb1\x1f\xe0\xaa\x72\xc9\xff\xec\xf4\xd0\xc5\xc6\x72\x57\xb3\x43\x9a\x26\x55\xf9\x90\xa5\x59\xf4\xff\x99\xe7\xcd\x7c\x24\xcc\x62\x6e\x79\x94\x9a\xda\x90\xa9\x80\xda\x94\x40\xe3\xf0\x4d\x01\xb3\xfb\x92\xa2\xd9\x0d\x5e\x0a\x11\x1e\xbe\xe4\xfe\x30\xdf\x22\x3d\x3f\x1b\x0c\x4a\x4f\xed\xb8\xf5\x1a\xa7\xa3\x1e\x0c\x84\xb8\x1b\x2c\x7d\x3b\x30\x77\xd5\xb9\x56\x4d\xcd\x8d\x3d\x3f\x70\x4c\xe9\x0d\x00\x75\xb5\x2a\x36\xb1\x75\x1d\x78\xa8\x12\xba\x23\x2e\xa2\x16\xc9\x67\xe8\x0b\xef\x61\x05\x6a\x7d\x2c\x4d\x14\xa1\xb7\xe5\x03\x4e\x37\x1c\xcc\x5a\xb5\x96\xd0\x4d\xbb\x2e\x51\x16\x80\xf5\x6f\x4d\x29\x19\xa0\x6e\xd2\xdd\xc2\x36\xc6\xe0\xc1\x66\xe1\xb0\x36\x21\x80\xde\xd0\xc3\x13\x1a\x58\xff\xdb\xa0\x4d\xe5\xcd\x01\xb9\x01\x1f\xa0\x1d\x50\x65\xdc\xd9\x95\x3e\x30\x81\xcd\x32\x78\xd2\x87\x8b\x25\x36\xfd\xd7\x53\x85\x03\x26\x8a\xaa\x95\x17\xe3\x38\x07\x3f\x0d\x41\xcf\x7b\x3c\xe4\x42\x79\xa8\xc3\x62\xa5\x2e\x22\xf3\x12\xf2\x44\x71\xa8\xc6\xb9\xbd\x03\x72\x78\x92\x6f\x43\xb0\xd0\x2f\xa0\x5e\xa1\x31\x05\x4a\x33\xf7\x47\x44\xc5\x43\x41\x70\x42\xe3\xb4\x9a\xdd\x06\x6b\xeb\xb8\xce\x1a\x04\x0a\x38\x42\x83\x8d\xbb\x6e\x4d\x41\xd9\x03\x95\x14\x5d\x9f\x3c\xaf\x8a\x9b\xad\x88\x0e\xa9\x2f\x7a\x44\xf6\xf9\xc1\x08\xf2\xb1\x4a\x86\x4f\x4a\x6a\x40\x45\x72\xd5\xd6\xf8\x7e\xf9\xb8\x0e\x17\x57\xe7\x08\xf4\xf8\xd9\x86\x3b\xf4\x07\x6f\x28\x1d\x85\x0a\x69\xcf\x3d\x76\x7a\xee\xbd\x15\x39\xfc\x53\xe1\x06\xf9\x70\x6e\x8c\xfd\x90\x38\x22\x58\xd3\xfc\x5f\xc4\xff\x8f\xa2\x2a\xed\x88\x30\xc0\x21\xc7\xbc\x03\x9d\x53\xa4\xb2\xdc\x2f\xee\xcd\x14\x55\x8e\xd1\xb9\xc0\xc9\x66\xe8\xa7\xd9\xfb\x4a\x45\x39\x1b\x92\x30\x4e\x75\x64\xf9\x44\x50\x9e\x0c\x21\x13\xaf\xa5\x0e\xf4\x3c\xbf\x02\x89\xe5\x93\x89\x49\x98\x57\x92\x5c\x59\x93\x2e\x9d\xf3\x80\x79\xe0\xbd\x2b\xc2\x11\x72\xdf\x9d\xea\x42\x50\x50\x93\x5c\x9a\x18\x50\x47\x46\x47\x82\xff\x63\x3a\xfc\x91\x3e\x67\x95\x78\xd9\xab\xa7\x77\x1a\x66\xaf\xdc\x55\xd0\xbc\x49\x3b\xac\xfb\x72\x15\x29\x8a\x85\xfa\x9a\xdd\x1d\x3e\x0b\x7b\xb7\xe8\x2f\x64\xdd\xe6\xef\x2f\x5c\x76\xb0\x57\x04\x5d\x34\xc9\x9e\x02\x7a\x93\xf0\x9f\xa4\xd1\xf4\x30\xad\x32\xff\xe3\x1d\x4d\x52\xc3\x38\xdc\x2d\x82\xd3\xa0\x47\xd2\x53\x45\x8d\x12\x82\x7f\x29\x4c\x26\xa0\x12\x3e\x8e\x3a\xfd\x9e\xba\x6e\x1d\x8c\x95\x42\xb8\x4d\xfd\x9b\xc3\x1d\x94\xa9\x96\xd6\xab\xca\x23\x4d\x4f\x7e\x5d\x52\xee\xb6\x26\xbf\x2a\xce\x59\x5f\x9a\x3f\xae\xf7\xf5\xd6\xdb\x8a\x60\x33\x33\xd2\xeb\xc5\x6e\xa8\xa7\xd7\x76\x65\xb4\x73\x53\x19\x71\x29\xd8\xec\x8e\x12\x39\xab\x89\x1a\x68\xbb\xb4\x7a\xa0\x86\xe6\x05\xc7\xa0\xe8\xc4\xeb\xd8\x07\xb5\xca\x6d\x94\x2a\xde\x35\x22\x22\xfe\x5a\xd6\x18\x35\xd4\xe5\xf6\xf1\x75\x5d\xaa\xd3\x57\x21\x25\x20\x91\xb2\x70\x19\x36\xc0\xda\x16\x7a\x66\xa1\x0a\x5f\x3d\xf7\xcb\xab\xbc\xaa\x94\xf6\x3a\x21\xa3\xd0\xff\x39\x65\xae\xc8\x48\x5f\xd2\x2f\xe0\xd8\x1c\x19\xd8\x71\xd7\x80\xbb\x62\x3c\xbc\xdd\xcf\x99\x19\x42\xd5\x93\x3f\x88\xf7\xdb\x1f\xef\x6e\x5d\x4f\x57\xf3\xaa\xed\x47\x1a\xdd\xab\x42\x0e\x7b\x95\x5c\x5a\x0b\x0a\x09\xa0\xa2\xec\xdd\xc4\x03\x20\x69\xf3\xe4\x4d\xef\x8e\x5e\x4e\x26\x1a\x2a\xe5\xd0\xc2\x06\x6d\x2f\x3d\xce\x6a\x70\x57\x82\x3a\x94\xd0\x9a\xc7\xd1\xfb\xe2\x10\x3f\xd1\xc5\xd1\xf4\x09\x39\xa6\x97\xe3\xee\xda\x9c\xd1\xef\x41\xce\x61\x77\x12\xd8\xf3\x3b\xd2\x83\xf6\x7c\xa3\x1f\x46\x14\x77\x68\xf7\xb1\x66\x28\xe6\xee\x90\x85\x3a\x25\x72\x46\x59\xa7\x5d\x19\x99\x56\xfa\x18\x0e\xa5\x8c\x18\x76\x89\x14\xee\xff\x88\xa5\xec\x39\x85\x06\x9b\xf7\xbc\xf6\xf4\x82\xa2\xe9\x4a\xc8\x1b\x81\xee\xbe\x7f\x73\x86\xbc\x2b\x08\x77\x10\x8a\xa5\x29\x26\x0e\x10\xd3\x02\x12\x73\x95\xc8\x9a\x64\xd6\x0e\x8f\x1c\xee\x69\x73\xe5\x37\x4f\xf9\xe0\x7e\x8b\x01\xeb\x81\x50\x96\x2d\x12\x5c\xb6\x8f\x94\xac\xac\x8f\x1b\x8a\x69\x10\x5f\x0b\x6d\x25\x1b\xc7\x31\xb1\x69\xbb\xc9\x72\xd8\xee\x5a\xf8\x9f\x7d\xd2\x3f\xf6\xf4\xe0\x9a\xc5\xea\xfe\x3f\xaa\x38\xc1\x26\x7b\x15\xdd\x6f\xbb\xc3\x80\xdc\x63\x43\x94\x58\x53\x53\xbc\xb5\x9b\xea\x63\x1f\xaa\x8f\xa9\xf9\x36\xc0\x7c\xd3\xcd\x3e\xdc\x26\x6d\x35\x55\x5b\x5b\x64\xc3\x53\x49\x4d\xb3\x09\x58\xa0\x14\x19\x24\xe1\x3e\x84\xfb\x25\x37\x68\x57\x48\xfb\xde\x5b\xac\xc6\x3c\xc8\xb7\x2f\x6c\x31\xeb\xe4\x4b\xfa\xe5\x06\x07\x64\xb2\x4b\x37\xed\x4a\x6c\x32\x1c\x92\x34\x83\x40\xcf\x93\x73\x6e\x94\x58\xbb\xb8\x76\x55\x3c\x1f\x11\xae\xd0\x4c\x83\xe0\x10\x01\x1f\xfe\x56\xe3\xdd\xdb\x6a\x26\xc5\x60\x8b\xf1\xf3\x86\xcc\xac\xef\xcd\x3d\xcb\x27\x7d\x53\x87\x67\x40\x66\x00\xc3\x79\x97\xc7\x2d\x2e\x2c\xee\x7b\x63\x72\x5b\x3a\x65\x45\xa9\xd2\x5b\x59\x6d\xf0\xf2\xf2\xac\x9d\xcd\xa7\x97\x95\xed\x8d\x34\x67\x56\x20\x5b\x09\x11\x31\xe4\x4d\xab\xe6\x03\x16\x87\x1d\x30\xec\x63\x48\x9d\xd0\xb0\x80\x0b\x3f\xf4\x08\x53\x30\x2c\xea\xf6\x59\x3d\x4e\xe2\xb3\x55\xad\x0b\xc5\xd1\x9a\xf2\xf4\x88\xfc\x51\x64\x06\x8e\x10\x9f\x09\xd5\xb3\x5d\x19\x10\x5b\xd8\xc6\x6d\x88\xb3\x3c\xf8\x4f\x76\x91\xad\xe4\x45\x65\x1b\x44\xbe\x29\xd8\x36\xbe\x23\x46\xb3\xfd\x84\xa6\x46\xeb\x4c\x89\x06\xb2\x8d\x7b\xd6\xca\x5d\xfa\x63\x5b\x61\x02\xa1\xa1\xae\x75\xb6\xba\x6f\x5e\x0e\xfc\x0c\x25\xb3\x88\x2f\x6e\xe3\x72\xeb\xdf\x8b\xdf\xf5\x93\x38\x3d\xf9\x7e\x8c\x73\xd2\xe4\xc0\x36\x5a\x7f\xe0\xe0\x8b\x3a\x3e\xd7\xfa\xd3\x63\x93\xae\xdb\xf8\x08\x7b\x8f\x6d\x9c\xd2\xae\xa4\x23\x88\xb0\x31\x81\x03\x62\x6b\x94\x95\x1b\xc7\xf5\x13\x5c\x27\x2c\x14\x89\x75\x41\x6b\x6d\xd3\xa6\x3d\x3f\xce\x55\xd7\xae\x0e\xa3\xec\x7a\x71\x10\x09\xd5\x05\x02\x4a\x78\x6f\xf4\x2c\x5f\x47\x98\x4d\x5d\x57\x49\xb4\xad\xf9\xd5\x11\x33\x4f\xa1\xd1\x86\xe4\x53\x2a\x74\x7d\x64\x61\x61\x48\x63\x31\x43\xf2\xb2\xd9\x6c\x58\xc2\x5b\xee\x22\x6d\xef\x10\x64\xbb\xe6\xe2\x7c\x7f\xf8\xaa\xb7\x3d\x00\x8f\x27\x0d\xd6\xcd\x93\xf6\x78\xba\xf8\x00\x20\xd1\x56\x09\x70\x01\xbc\x5b\xc8\xf1\xda\x25\x7a\xd6\x1a\x13\xa2\x28\xc4\xa6\x93\x35\xe6\xec\xc3\x8b\x8c\xad\x34\x10\xf5\x19\x01\xa8\xbe\x67\x68\x0b\x61\xb5\x2e\x31\xd7\x86\xa6\xed\x07\xfd\xef\x08\xcd\xb9\xfd\x26\x8d\x68\x73\xbf\x43\x02\xca\xaa\x71\x57\xf4\x82\x74\x7d\xf6\xca\x7f\x25\xbc\xa6\x2a\x02\xd3\x4e\xdc\x6c\xbd\x06\xfe\x0b\x79\xe6\x8b\x90\xe4\xd5\x64\x3a\x05\xd0\xe3\xbe\x5b\xfa\x93\xd8\x1f\x94\xd8\x74\x6b\x8b\x9e\x0a\xbf\x0c\xb8\xf9\x5d\x02\x99\x67\x99\xee\x2c\xa5\x36\x64\x66\x12\x80\xa1\x10\xba\x6b\xaf\xa4\x33\x85\x53\xb6\x21\x37\x03\xd7\x2f\x50\xcf\x7a\x73\x3b\x3f\xb6\x2c\xeb\xc7\x1b\x7d\x2b\xa2\x03\xbe\xde\xd1\xb7\x33\xc4\x0c\x59\x64\xec\x51\x3d\xcb\xbe\x47\x4a\x7c\xf9\x87\x49\x65\x36\xca\x35\xac\xf7\xcf\xc9\x6f\xb5\x96\x82\x73\x9c\x20\x2b\xcc\x0c\x2d\xad\x48\x24\x67\x8d\xb6\xde\x49\x86\x70\x6b\xfd\xea\x14\x22\xed\x41\xd1\x4d\xf2\x6b\xaf\x48\x76\x59\xeb\xad\x12\x62\x80\xdc\xaa\x49\xf3\xad\x29\xf8\xb0\x56\x4e\xad\x59\xa9\x86\x0b\xda\xfd\xf6\xa1\x36\x0a\xb1\x51\x2f\x13\x06\x04\x71\xbe\xb9\x85\x04\x9c\x04\xc3\x30\x20\xf3\x43\xd8\xbd\xe3\x47\x01\xdd\xda\xfe\x9b\x8a\x34\x97\xad\x3b\xaf\x94\x4c\xb8\xfa\x67\xb6\x74\x6b\xf1\xd0\xfb\xea\x02\x92\x13\xd7\xa0\x89\x75\x79\x26\x97\x71\xf3\x4b\xb2\x8b\x0d\x42\xa6\x61\x26\xa2\xae\xe2\x59\x59\x29\xd5\x5f\xeb\x61\x3e\x39\x3f\xb2\x96\xa9\x6b\xc4\x4d\x12\x50\x95\x30\x32\xfc\x12\x9a\xb4\xd2\xf2\xe7\x64\x4d\xb4\x66\x34\xc1\x7a\x7a\x20\x61\x52\xbc\x8d\xb1\x4f\xb0\x8d\x25\xcb\x06\xc5\x38\x76\xfe\x57\x4a\xa6\x10\x15\x8a\xac\x61\x7a\x46\x65\x1c\x6a\xa3\x7a\xc9\x9a\x9f\xcf\xbe\x23\xb8\xa8\x94\x57\xcb\x6a\x33\xd7\x88\xb2\x1b\xd0\x46\xe4\x22\x40\x37\x6e\xd3\x9a\xb1\x8e\xff\xe7\x40\x79\x72\x06\xfc\x66\x42\x86\xd6\xa5\xf5\xd2\xa7\x4a\xec\xed\xdd\xa4\x95\xf6\xe6\x2c\xba\x55\xe3\x94\xb5\x53\x1e\x81\x8b\x55\xfa\x10\x57\xf7\x34\x5d\xb7\x3f\x14\xe2\x4b\x87\xb9\x68\x26\xe4\xb8\xdf\x82\xf0\x1c\x02\x5d\x89\x9b\x85\xc2\x93\x0d\x68\x5b\xd1\xf1\x38\x9b\x0e\xf3\xbc\x50\x9a\xe3\xce\x98\x47\xe6\x1d\x4f\xbb\x46\xcf\x40\xaa\x38\x5d\xdf\x47\x53\x51\x8e\x15\xf6\x72\xec\x99\xd7\xab\xfb\xad\xf0\xff\x50\x8e\xd6\x89\x37\xf8\x0b\xaf\x94\x8c\xf3\x0f\x8c\xbf\x1a\x04\x0e\xea\xd8\xfd\x85\xc2\xde\x93\x43\xa4\x3e\x76\x47\x3a\xa6\x04\x12\x53\x27\xda\xad\xd1\x56\xac\x69\xe4\x5c\x92\x01\xe8\xdb\x09\xce\x9e\xb2\x4b\x51\x3a\x5c\x00\x7c\x64\x51\xbc\x12\xcd\xd8\x00\xfe\x73\xf1\xb9\x33\xd4\xaf\x6e\x8e\x82\x96\x5b\x29\x87\xd6\x86\xf2\xf5\x59\xdc\x02\xcf\x77\xbf\xd7\x3f\xd7\xee\xba\x17\x06\x62\x97\xdb\xeb\x82\x45\x4e\x40\x56\x52\xb2\x0b\x54\x60\x09\xb2\xb5\xcf\x79\x59\xa7\xca\x6b\xcc\x05\x46\x5d\x89\x80\xd2\x73\x77\xaf\x37\x0d\xe7\xa7\x04\xe9\xec\x1d\xee\xda\x6d\x92\xb7\x5e\x9c\x4b\xe2\x27\xc1\xc8\x5d\xff\xa3\x5d\xe1\xd2\xa4\x8b\x60\xe0\xe1\xb3\x49\x0b\x9c\x00\x56\x89\xd1\x15\x89\xe9\xb9\xce\x36\x7f\x5a\x7d\x25\x7a\x9d\x02\x35\xee\x5b\xaf\xee\x87\x66\x6d\xcf\xc6\x48\x2f\x58\x1d\xd4\x9b\x63\xbf\x7f\xa1\x16\xc0\x52\xdb\xc1\x0d\x5b\x8f\x43\x2f\x15\xb2\x71\x0e\xe0\xee\xdc\xb8\xdd\xbc\xce\x99\x92\x6a\x55\x69\x7b\x8b\x0c\x6c\x08\x43\x32\x6f\xfc\xde\x3c\xfe\xc0\xfe\xfc\x82\x18\xef\x5c\x9e\xcc\x25\xc5\x12\x0a\x0c\xfc\xa4\xcb\xb6\xb3\x59\x5c\x82\x55\x4b\xd9\xba\x46\x64\x23\xa0\x2e\x58\xc7\xee\x0b\x14\xeb\x74\x52\xab\x0e\xbd\xb0\xa1\x90\x34\xe1\x56\xa4\x5b\x0a\x40\x94\x28\xc4\xeb\xec\x29\x48\x4e\x6c\x01\x31\xe2\xc8\x5d\xd5\x4e\x93\x2e\x0b\x3e\xc1\x4c\xc0\xf9\x66\x07\xff\x49\xe6\xce\xaf\xa1\xb4\xa5\x4b\xd8\xd9\x87\x18\xc3\x38\xeb\x99\x7d\x23\xd3\x0e\x1e\xa1\xd2\xb8\x73\xb6\x2e\x4a\x5e\x75\xf8\xd9\x2d\xd4\x39\x88\x14\x4b\x5b\x1c\x6f\x35\x34\x8b\xd7\xae\xdc\x3a\x77\xb9\x44\x1b\x9d\xf2\xb6\x60\x6c\xda\x9f\x6b\x99\x24\x5a\x08\x67\x28\x96\x56\x39\xfb\xbf\x7c\x54\xd9\x45\xf2\x10\x19\xa4\xdc\x1d\xe8\xfd\xac\x97\x17\xe6\xd1\xfd\x47\xc5\x3c\x2f\x76\x69\xe4\x45\xbb\x0a\x94\xa4\x86\xb7\xc3\x40\xdd\x7f\x5d\xde\x0e\x4c\xf5\xf3\x6a\xb3\xb4\xb4\xb8\x50\xc8\x1f\x5a\xe0\x36\xa8\x35\x92\xf4\x5e\x32\x05\x97\x96\x9c\x57\xe9\xec\x25\xf7\x61\x59\x22\x97\xc8\xc0\xca\x53\xe9\x94\xd6\x52\x84\xe0\x5d\x97\x0a\x24\x7d\xf0\x24\x3c\xbd\x3e\x86\x88\x1e\xdf\x31\xab\x72\x6e\x95\x08\xbf\xd9\xd8\x96\x1b\x1e\x2a\xd6\x9b\x9b\x1d\x49\x00\x7e\xd1\x21\x8c\x68\x15\x97\x57\xc7\x4b\x0c\xdc\x1d\xfc\x3f\xaa\xde\x2e\xd9\x59\x9e\x07\x16\xbd\x3f\xc3\x78\xe7\x75\x2e\x0c\x18\x70\x02\x98\x8f\x9f\xe4\x49\xaa\xf6\xdc\xb7\x5a\xdd\x72\xd6\xae\x5a\x55\x6e\x58\x04\x0c\x18\x5b\x96\xa5\xee\x85\x80\x9c\x8e\x9a\xfc\x3e\xd3\x4a\xce\xc1\xe4\x31\x27\xc1\xf4\xb7\x2c\x7f\x98\xe0\x9f\x21\xfc\xfc\x4c\xa3\xbe\x73\x2d\xdc\x3e\x53\x17\xf7\x92\xca\xec\x7d\xdd\xe3\x66\x7b\x79\x54\x17\x26\xe5\xeb\x79\xd4\x07\x3b\x58\xf0\xf1\x69\x15\xe5\x51\x07\xb1\xf6\xa5\x9e\x52\xd3\x8f\xb2\xc5\x02\xd6\x03\xd3\x5f\x52\xf9\x79\x08\x87\xb7\x88\x47\xa6\xa7\xe0\x61\x13\x30\x72\xf0\x65\xbb\x6b\xf9\xd3\x80\xa7\xa0\xc4\x7e\xa4\x77\xd0\x17\x3d\x40\x95\xe1\x4d\xdc\x7a\xf1\xe3\xa3\x52\xc3\xa9\x43\x7f\xce\x60\xf8\xbb\x78\xd4\x56\x79\x8a\x4d\xaa\xed\x86\xa4\x55\xfd\x48\x4b\x65\x4d\xcc\x02\x1d\xf8\xbf\x89\xf7\x95\x7a\x0f\x68\x71\xc8\xc9\x53\x79\xd3\xfd\x0c\x35\x41\xf6\x07\x66\x53\xfb\xc1\x88\x4b\x67\x63\x28\x27\x5d\x2e\x56\x9a\xf5\xe8\x68\x73\xde\x79\x47\x63\x7c\x83\xa5\xa3\xcf\x69\xfe\x88\x4e\xee\x23\x02\xbb\x8f\xc7\xb9\xfa\xae\xb7\x3b\x11\x66\x4f\xbb\x0e\x92\x3a\x6c\x84\x0e\xdb\xac\xd0\x90\xf9\x3e\x16\x28\x0c\x9c\x8e\x23\x4e\x6d\x76\xcb\x32\x7e\xb6\x9c\xfc\xc5\xa2\xed\x39\x38\xcb\x6e\xf9\x7d\x0d\xc0\x04\xe0\x39\xc6\x71\x0d\xb9\x88\x19\xce\x0a\xef\x0e\x67\xeb\xe5\xeb\x8b\xaf\x60\xae\xef\x41\x7c\x80\x8d\x54\xdf\x8c\x07\xf7\x14\xc3\x88\xb0\xcf\x88\xbd\x23\x36\x48\xdd\xa7\xbc\x5f\x9b\xc3\x50\xd8\xc3\x40\x4c\xd6\xe7\xba\x93\xd3\xad\x56\x95\x9b\xa8\xf5\x16\xeb\x10\x0b\xd1\x59\x57\x49\x04\xd9\x46\xe5\xc4\xc5\xd0\x1a\x22\x84\x33\x86\x81\xa2\x9a\xcc\x6d\x21\xca\x70\xb0\x82\xcc\x75\x22\x03\x8d\x01\xa6\xae\x19\x68\x4f\xd6\x5e\xa8\xfd\xe6\x0a\x4e\xc3\x58\xd1\x87\x0e\x9b\x17\x79\x0c\xf6\xb6\xaa\x99\xed\x5c\x38\x30\x92\xe9\x2f\x3c\x19\x10\xe5\x03\xbb\x3f\xa1\xf8\x82\x90\x04\x78\xf1\x37\x5b\x63\x21\x2c\x5b\x5a\x1b\xe3\x1f\x6f\xd8\x5b\xbb\x5c\xeb\x33\x5c\x14\x3f\x1e\x40\xf4\x2c\x24\xc3\x8b\x26\x93\x3d\x76\x0e\xb3\x48\x2f\x8e\x20\x66\x74\xf9\xd3\x60\xc6\x13\x57\x64\x0e\x02\x3d\xe4\x09\xfe\xe4\x06\xc1\x7d\x12\xf4\x78\x7f\xe8\xf4\x22\x0a\x90\xf1\x01\x41\xef\xa7\xaa\xd8\xa7\xaa\x1d\x8b\x96\x61\xe7\xfc\xa3\xc5\x81\xef\x23\x18\xef\xe0\x03\xd9\x42\xc9\x1b\x84\x7d\x75\xd3\x55\xcb\x26\x1a\x3a\x30\xb5\xb0\xfa\x25\x18\x73\xe6\x3c\x91\xff\x79\xce\xe3\x78\xe8\x07\xea\x95\x67\x30\xb7\x74\xe2\xb5\xcb\x9e\xa3\xbf\x09\xc6\xb3\x84\xb4\x3b\x6b\x02\xe6\xbe\xc8\x93\x9d\xd3\x5b\x77\x8c\xa5\xe0\xb8\x12\xd6\x93\x38\x7d\x83\x33\x55\xc2\x51\x73\x0a\x82\xbd\xe3\xfd\xa3\xee\x3b\x79\xd4\x11\x4a\x62\x4e\xdd\xc7\xd2\xd9\xe2\xb4\x6f\x50\xc3\x4c\x4e\x66\xd9\xf6\xe6\x55\xa4\x7c\xa1\x88\x37\xd3\x47\x60\x03\xa3\x08\x05\x23\xc5\x0d\x2e\xa9\x68\xe7\xa9\xc4\x47\x43\x02\x3e\xd5\x74\x26\xc5\xe0\x14\xe4\x7c\x41\x5b\x69\xa6\x75\x63\x4e\x4c\xb9\x71\xf0\x21\xb6\x48\x0e\xc5\x99\x74\xda\x73\xb2\x7e\xcc\x03\x4f\xa7\x17\x16\x6d\xc8\x90\x75\xdb\x15\xd4\xb9\x03\x93\xb8\x7a\xba\x49\x60\x6b\x25\x59\x6d\x9a\xd6\xd0\xd4\x3c\x25\xd3\x2d\x6e\x3e\x0a\x2d\x80\xe1\x2f\x18\x21\xc0\xe7\xe7\x95\x98\xee\x01\xbe\x52\x7e\x6b\x13\xa6\xb8\x24\xcd\xba\x91\x5a\x76\x90\x73\xcf\x3a\x2c\xce\xf0\x0d\x92\x48\x0b\x0b\x8d\x2c\xcf\xd3\x0c\xbe\x51\x58\xe7\x36\xd4\xb1\x83\x06\xf7\xde\x15\x68\x09\x22\xbd\x90\x59\x9c\x8e\xfc\x4e\xe4\xf0\xcb\x8d\x98\xb0\x51\x5e\x4c\x8a\x9c\xb4\xb2\x9d\xf8\xf7\xaf\x6d\x8b\x5d\x1b\x67\x86\x58\xd1\x8a\x7f\x76\x4a\x42\x9e\x2a\x17\x4d\x27\x08\x46\xf9\xff\x6c\x04\x0b\x8a\x40\x0c\x66\x71\x62\x28\xc7\x89\x80\x8f\x87\xf9\xf2\x1a\x09\xef\x18\xbe\x35\x55\x67\xc7\x12\x1c\x74\x7b\xc8\x14\x22\x8b\x5a\xdd\x5c\x83\x42\x30\x31\x05\x6b\xaa\x6d\xdd\x0c\x74\x7d\xab\xf8\xfa\x1a\x5b\x9f\x7d\xc5\x25\xc2\x4d\xa6\xca\x14\x65\x1b\xed\xbe\x64\x37\xab\x64\xb0\x03\xb7\x32\xaf\xb6\x78\x02\x92\x6c\x2c\x6d\xc5\x46\x52\x07\x39\x89\xf4\x8c\x32\x51\x01\xf9\xaf\x42\x2f\xef\xc4\xf9\xbd\x5f\xac\xac\x8b\xf6\x41\x5c\x54\x04\x7d\x25\x7c\x3c\x13\x13\x86\x06\x42\xaf\x7d\x28\xca\x4d\xe1\x90\x05\x9b\x9e\x0f\xeb\x93\xf3\xf3\xfc\x47\x2e\xbd\x5b\xf3\x51\xe7\xd5\xc3\xfc\xf4\xe4\xa6\x73\x54\xea\xa1\xe7\x83\xb3\x08\x00\xbe\x92\x1c\x99\x59\x86\x0a\x49\xf7\x40\x06\x9c\xff\x13\x93\x1e\x69\xe8\x48\x50\x6f\x05\x96\xcf\x48\xb7\xc7\x34\x70\x9b\x37\x1d\x37\x7f\x5e\xae\x89\x8d\x0d\x0a\x74\xc1\xa3\x37\x47\x5e\xb3\x4d\x27\x0e\xef\xa3\xa7\xf4\x6a\x32\x1e\xce\x9a\x17\x6c\x76\xd7\x0c\x05\xfd\xcc\x8d\xb5\x0a\x4c\xa2\x91\xe3\x67\x95\x22\xbc\x12\x7c\x75\x65\x6a\x7c\x75\xcb\x0f\xfc\xf6\x89\xba\x77\x6a\xb1\xe9\x13\xc9\xe0\x26\x65\x41\x4d\x49\xa6\xd3\x18\x64\x9d\x60\xa1\xfb\x90\x83\x8e\x52\xaf\xe3\xfd\x28\x92\x5d\x1f\xef\xf9\x28\x41\x07\x77\x67\xf2\xbe\x49\x60\x7c\x64\x1c\x0e\xd9\xd2\xc0\x38\x40\x27\x09\xb8\xe6\xc8\x31\x6e\x7d\x77\x09\xfe\xb9\x0c\xd6\x1d\xfe\x2a\x77\xb1\x4f\x84\x76\xa0\x36\x0d\x90\x1b\x87\xdf\x18\x91\xd6\x4e\x08\xe4\xef\x66\xac\xff\x7c\x46\xe2\x7b\x2b\x32\x51\x17\x31\xc5\x35\x4e\xb6\x7a\x70\x68\x1e\xf1\x45\x9c\xea\x6b\x47\xa4\xfd\x91\x97\x8e\x94\xc1\x8e\x06\xba\xc7\x46\x86\x84\x8e\xf0\x77\x71\x91\xc9\x06\xfe\xfb\x60\x1e\x30\x88\xe5\xe6\xc6\x28\xb7\x51\x4c\x65\x04\x2b\x10\x98\x80\xb5\xa2\x65\xdb\xea\x68\x0c\x69\x7a\x3f\xd2\xba\xb6\xc2\x87\xf2\xb1\xbc\x72\x5f\x9d\x1f\x78\x2c\x3f\x35\xa8\xb1\xd4\x48\x17\x35\xc3\xf0\xee\x39\x5b\x1a\xa3\x07\x1b\x8b\xf5\xac\xbe\x24\x38\x7a\x96\xd2\x40\x38\x0c\xa2\xca\x1d\x41\x15\x49\xf7\xe0\xc8\xf9\x94\xff\x2a\x22\xca\xc7\xbc\x92\x70\xce\x5e\x83\xdf\x72\x9e\x83\xdc\x2e\x4b\x2c\x6b\x04\x3f\xa1\x97\x8d\xca\x2e\xbd\x5f\x02\x60\xbe\xd7\xbb\xf2\x14\x64\xfe\x02\x4c\x9b\x66\xdd\x0b\x33\xfd\xdd\x80\xa8\xeb\x8e\xc3\xec\xc9\x24\x28\x5e\xbb\x43\x54\x00\x63\x23\x33\x19\x29\x7d\x34\xa6\x45\xe3\x92\x21\x27\x66\xb1\xf2\xf9\xcb\x1f\x1b\x45\x2c\x02\x36\xbb\xca\x7a\x14\x11\xf9\x99\x05\xc6\x85\x93\x51\xb6\x18\xe9\xed\x08\x90\x75\x45\x9f\xd1\x98\xf8\xa5\xe5\x8f\xdd\x80\xfc\xdf\xf9\xc3\xfe\x13\x1e\x36\x3a\xa3\xf2\x8b\x49\xc4\x6e\x28\x20\x38\x99\x04\x74\x88\xec\x77\x87\x70\x3e\xf7\xca\xc5\x19\x10\x27\x85\x63\x29\x87\x2b\x27\x6b\xb5\x16\xcd\xe7\x40\x9c\xac\xf0\xc6\xc9\x10\x14\x75\x58\x8a\xde\x3a\xef\xbc\xad\x4c\xb1\x38\x64\x01\x32\xc3\x22\x83\xfd\xc1\x07\xf7\xcc\x00\x30\xc4\x5d\x71\x56\xe3\x76\xd8\x27\x40\xd0\xe2\x5d\x5f\x2d\xfb\x99\x49\x96\x3e\x59\x04\x7a\x5a\x87\xcd\x9e\x8a\xe7\x40\x0e\x69\xac\x4a\x1d\xdc\x63\xa6\x11\x33\xbb\xb3\x66\x2e\x7e\x42\x7c\xae\xf2\x40\x80\x46\xd9\xcb\xf9\x88\x5f\xcf\x47\x8c\x63\x79\xa2\xba\x64\x36\x13\xe4\xed\xa5\x42\xfc\xf3\xc8\x1c\x26\xc4\xc3\xf2\x3b\xcb\xc3\xea\x41\x61\xfe\xa0\x20\x00\x7a\x89\x75\xef\x19\xef\x28\x23\x0a\x89\x57\x68\x31\xd0\x19\xe1\xb6\x7c\x4b\x9a\x27\x65\x17\xec\x59\xc8\xcd\x67\xd3\x6d\x0e\x1c\xc8\x90\x0f\x36\x52\x70\xf4\xdd\x85\x19\xc8\xc3\x47\x9c\x7c\x36\x0c\x47\x27\x64\x1b\x6e\x94\x0f\x58\xed\x97\x0b\x83\x38\x0b\x93\xc4\x71\xc0\x62\xd0\x22\xc0\xe6\x69\xa8\xe8\x5f\xec\x5e\x49\x1b\x56\x1d\x21\x9a\xe9\x14\xc9\xdf\x09\x65\x2a\xc1\x5c\x44\xf1\xb7\x6d\x1e\x61\x6a\x40\xf4\x77\xf7\xa6\x25\xa4\x81\x4b\xb1\xc3\x0d\x55\x16\x02\xa6\x6b\x0f\x77\x51\x73\x1e\x6e\xc6\x5d\x0f\xf7\x98\xc9\x3a\xd8\x57\xde\x9a\x47\x27\x64\xc1\xff\xc8\x0c\xe8\xa4\x7a\x20\x93\xc9\x04\xfe\xdd\x0d\x47\xfd\x99\xe7\xb6\xf1\xe4\x3f\x0b\x12\xae\xf8\xff\xf4\x56\x6c\x93\x41\xf1\x01\xda\x97\xef\x97\x93\x65\x33\xd4\xfb\xc9\x47\x5a\x11\x0b\x40\x20\x0e\xbf\x43\x56\x90\xa1\x8d\xcc\x88\x55\x2e\x61\x46\x63\x88\xc6\xce\xe6\x74\x2f\xd1\xff\xc1\x83\xe1\x0f\x0f\xa2\x18\x5e\x32\xba\x60\xa8\x72\x65\x0f\xc5\x17\x88\x86\xf2\x2a\x36\x18\x92\x4c\x75\x28\x52\x6d\x1c\x8a\x12\xe6\x0c\x60\xd9\x4f\xfb\x3a\x9e\xb2\xec\x12\xda\x1c\x1a\x69\xf9\x50\x96\xfb\xab\x73\x60\xb0\x24\x0f\x63\x21\xd3\x2f\x77\x8f\xb1\x10\x32\xc0\xa9\xa5\x06\xc3\xa8\x4b\x21\x45\x9a\xc0\x22\x63\xe3\x07\x7a\xea\x40\x50\xd3\x25\x2a\xc6\xd8\x46\x3e\xe9\x94\x1b\xf2\xbf\x96\xfe\x3b\x38\x8d\x4b\x16\xe2\x4b\xce\xaf\x7a\x91\x4d\x0f\x89\xa8\xe0\x1f\x3e\xb9\xc5\xe8\xf8\x21\xbb\xbb\x58\x8d\x06\x92\xdc\xf4\x39\x18\x2c\x72\x9b\x0c\x98\x15\x9e\x22\x05\x64\x23\xcc\xc8\x7d\x21\xc9\x1f\xd2\x48\x08\xc2\x6f\x68\xd0\xd9\x57\x79\x1d\x39\xe7\x07\xac\xd2\xb0\xee\x36\x5b\x14\x0d\x64\xde\x29\xb2\x07\x1e\xc1\x8f\x76\x6d\x55\x84\x84\x14\xc6\x1b\x5a\x25\xb6\x70\x16\x03\x3e\xb5\x4f\x17\x59\x83\x06\x70\x0d\xf2\x3f\x03\x3d\x7f\x6f\xf5\x3b\xb8\xde\x33\x60\x72\x19\x74\x82\x2f\xfe\x20\x56\xa2\x86\x90\x17\x1b\xd0\x47\xeb\x66\x97\xb9\x7e\x84\xa6\x43\xda\x7d\xc0\x52\xa6\x37\xd8\x33\x22\x0c\x54\x84\x85\xa0\xfc\xa1\x1b\xbc\x3f\x02\x9c\xe6\x18\xb2\x31\x42\xf7\xa9\x40\x6b\x03\xe3\x2f\x82\x76\xc8\x39\x1e\x4d\x4f\x22\x98\xc1\xe3\xd0\x1c\x74\x95\x27\x71\x82\x1d\x47\xa1\x25\xeb\x14\x85\x24\xf0\x31\x28\x26\x44\x1b\xb3\x0a\xbf\x6a\xc9\x02\x0e\xa2\x9e\x74\xc7\x96\x5e\x38\x03\xa7\x86\x24\x4e\x42\x36\x09\xeb\xea\xc4\x6d\x58\x29\xfe\x33\x90\xef\x63\x48\x4d\xfe\x1a\x4c\x86\xfc\x3c\xa8\x0f\x37\xa4\xc6\xc1\x60\x90\x62\xaa\x03\x88\x27\xbc\x7c\xea\x62\x6a\x0a\x29\x42\x8e\x07\x19\x92\x43\x8c\xab\x06\x26\xd6\x91\xae\x81\xfe\x8b\xb5\x62\x5d\xd3\x36\xf6\xc6\x9f\x0c\x7a\x30\x52\xbc\x99\x79\xc2\x14\xaa\xde\x0d\xaa\xaa\x9d\x9d\x93\x5e\xf5\x9c\x35\x7a\x4c\x5d\xfc\x0b\x29\x3f\xe2\x3d\xf4\xc4\x67\x1d\xd1\x75\x1a\xbb\xfb\x13\x09\x6b\x78\x72\xfd\xf1\xb9\x82\xb2\x30\x42\x62\x7a\x74\x78\xc1\xdb\xef\x1b\x6d\x37\xed\x94\x3e\x74\xc6\xfa\xa3\xb4\xdf\x20\x08\x5a\x64\x83\x98\x1e\x92\x43\x31\xbb\x4f\xd6\x4a\xd1\xa3\x21\x44\xa9\x27\x28\x53\x0d\xc0\xbb\x34\xd4\xb1\x74\x05\xad\x45\xff\xe6\x24\xa5\xaf\x1f\xf8\xa6\x83\x55\xf0\x1d\x64\x86\x11\x50\xd5\x23\x1c\x9f\x24\x6b\xce\xaf\x48\x70\xb6\x5d\x67\x12\x0d\xe3\x9f\x49\xb5\x6d\x78\x12\x04\x69\x18\xc5\x05\xc7\x00\x26\x2b\x98\x5e\x0d\xd6\xc5\x74\x73\x17\x82\x97\x44\x7d\x78\x40\x6c\xf1\x20\x14\xf5\x5e\x3d\x9e\xb7\x7e\x91\xc9\x1d\x67\x63\x30\x17\xe8\x7b\xd0\x7e\xea\xa0\x14\x19\x69\x7d\xdd\xa4\xac\xd9\x23\xb2\x28\x7e\xb3\x9d\x37\xbe\x26\xe1\x4b\xa4\x4c\x86\x2b\x03\x27\x40\xf4\x41\xd2\x39\xd0\x5c\xf1\x37\x2b\x13\xee\xa0\xe7\x79\x16\x01\x56\x65\x41\xd0\x0b\x67\xc4\xa0\x74\x0c\xdd\x05\xa7\x77\x0c\x06\xc9\x85\x61\xf8\x08\x72\x7b\xab\xbc\x8e\x5b\xd4\xdb\x64\x79\x3c\x7e\x50\x92\xfa\x3d\x42\x8f\xbc\x9c\xe2\x26\x86\xe0\xa9\x1c\x86\x20\x9d\xec\x9f\x22\xd4\xab\x58\xe5\x0c\x01\xe5\xbe\xca\x3f\xd1\x2f\x11\xc1\x05\x24\xee\xc7\xaa\x44\x16\x43\x4f\x1e\x04\x6d\x80\x95\x68\xc3\xca\xd3\x42\x3c\x8e\xfc\xb7\xcf\xab\xd9\x84\xac\x79\xc7\x97\x54\x8e\xe0\x03\x2c\x47\x52\x63\x2a\xd6\x6c\xbd\xd2\x90\xd0\xe5\x33\x36\x9b\x83\x2d\xb9\x74\x41\x34\x3a\xdf\xd4\x6c\x82\x7e\x9c\xdf\x86\x73\x39\xea\xd3\x37\x83\x6a\xe5\x3f\x11\x69\xae\xef\x8d\x8c\x8e\x27\xa1\xaa\x32\xbb\x2a\x73\x11\xe4\x4f\xf2\xfd\x3a\x48\xed\x81\x98\xdd\x45\x3b\x4f\x3d\x15\xd0\xaf\x71\xf0\x85\x41\xcc\x93\xd8\x8c\x94\xc4\xf8\x10\x9f\x27\x4d\xb8\x53\x3f\xea\xf9\xcd\x69\x0f\x5a\x4f\x83\xca\x38\x07\xdf\xa3\xb2\xf5\xfb\x7c\xf4\x2c\x17\x2e\xa5\xf7\x79\x09\xf2\x42\x66\xb4\xf4\x08\xee\x0d\xf6\xc3\xeb\x0a\x42\x44\x6b\x77\x49\x74\x90\x57\xb2\x8e\x48\xe4\x82\x8d\xee\xb1\xfe\x36\xb9\x12\x0c\x26\xc7\x70\x9a\xf5\x64\xc8\x4a\x82\x94\x1c\x33\xb4\xeb\xc0\x39\x5a\xa1\xf5\xc9\x72\x3c\x81\x48\x6c\x13\xe3\xe2\xc1\x58\xf8\xd8\x30\x13\x66\x09\xf8\xa2\xeb\xc2\x99\x1f\x15\xba\x01\x9c\x24\x44\x87\x20\x92\xe0\x8c\x70\x4e\xc8\xd8\xbb\x54\x55\xf2\x78\xc2\x29\x4f\x38\xe8\x63\x01\x62\x69\xcf\x61\x08\x1e\xc8\x1f\x8d\xe3\x11\xdf\x60\xf2\xd8\x32\x36\x6d\x7c\x14\xea\x8c\xd2\xb6\x8b\xa5\x92\xf4\xbd\x88\x72\x2a\x90\x9e\xe3\xcf\x25\x7a\x05\xa0\x07\xb1\xa9\x39\x63\xa4\x5e\x4b\x20\x96\xce\x62\xa9\x5e\x26\xc2\x7a\x7a\x9b\xe6\x1d\xfa\x2d\x72\x0a\xf5\x6e\x13\x02\x45\x82\x1c\x12\x5f\x81\x98\x34\xa5\xfa\x06\x9a\x48\xcd\x7a\xfa\x14\x6c\x60\xce\x0f\xf9\xe2\xf8\xd9\x43\xe7\x2d\x08\x36\xbb\x23\x0e\xe8\x24\x30\xd9\xc7\x02\x56\x77\xab\x8f\x77\x42\x49\x96\xc1\x28\x16\x92\x2d\xa0\x95\x2c\xa4\x29\xbb\x35\x5c\x18\x20\xaf\xd7\x7d\xca\x2a\x06\xd2\xbf\x8e\xb6\x2e\x02\xc2\x49\xcd\x71\x3a\x67\x8f\x38\x85\x1a\xe3\xe4\xa4\x20\x23\xc0\x83\xe9\x6e\xe0\xa1\x5c\x58\xaa\x1e\x9c\xab\x75\xf7\xaa\x99\x4f\x77\x2b\x56\xa7\xbb\x1f\x6d\x48\xef\xec\x83\x26\x3b\x5c\x48\x17\x76\x68\xcc\x22\x4a\xbb\xd5\x16\xbb\xe3\x5b\x29\x71\xd0\x1d\x9f\x61\x12\xdd\xe3\xbd\x32\x7c\xcc\x79\x25\xc5\xd3\x47\x8a\x60\xfd\xa8\x3e\x49\x59\xd8\x52\xa8\x9c\xd8\x85\xbc\x7c\xe8\x40\x82\x3e\x92\xdf\x5f\x0b\x06\x75\x7d\xda\xc8\xe5\xe1\xf6\xb9\x89\x4d\x12\xa1\x30\x35\xe0\x46\x06\x97\xae\xf1\x84\xd9\x8b\xfa\xea\xa4\xf6\x95\x15\x51\x4c\x9e\x2d\x40\xc8\x09\xc0\x35\x39\x04\x6e\x3b\xa3\x7a\x56\x01\x91\xc4\x3b\xd6\x3d\x25\x59\x89\x86\x4a\x43\x74\x0e\x74\x07\x27\xd8\x9d\xd8\x30\x6c\xa2\x24\xbe\xcb\xfa\x0e\x0e\xc7\xa0\x02\xe9\x68\x36\x76\x9e\x17\x4e\x0e\x47\x9a\x94\x56\x4a\x4e\xc1\xe3\xea\xc5\x7e\xc9\x4c\x87\xae\x4d\x82\xc0\x37\x39\x1f\x8c\xe3\x35\xcc\x77\x5a\xc9\x85\xd0\x55\xe5\x55\x00\x60\x6e\xc4\x9d\x4b\x7e\x88\x89\xb1\x2e\xa2\x43\xed\xaa\x88\xae\xba\x3a\x71\x8a\x62\x20\xf5\xba\x02\x02\xac\xf9\x74\x6a\x9e\xb7\xe0\x50\x44\x36\x3a\x6f\x72\x48\xa4\xe8\xec\xc8\x10\x5a\x45\x11\x79\x8b\xe0\xbb\x43\x0b\xe3\x7a\x44\xb7\x14\xda\x1c\x06\xb6\xde\x8d\x58\xfb\x78\xd4\x9c\x96\xf4\x19\xf8\x48\x16\x7b\x3b\xa4\xf8\x2b\x92\x22\xb1\x81\xf0\x94\x5a\x59\x57\xb4\x0e\xdd\x95\x08\x86\x75\x22\x48\x71\x68\xba\x92\xb1\x37\xc0\xe2\x8a\x76\x68\xac\x67\xb0\x49\x5a\xaf\xce\x83\x7e\x41\x03\x5d\x89\x98\x3b\xb0\x2a\xfb\x9e\x99\x0e\x97\x6e\xe6\xc1\x59\x46\x54\x97\x25\x1a\x63\xe0\x0a\x16\xaa\x2e\xa8\x0b\x63\xee\xe6\x94\x92\x24\x66\x3c\x83\x5a\x30\x8b\xfd\x11\x54\x87\x3c\xb8\xb1\xcc\x23\x46\xbe\xe4\x1f\x12\x10\x13\x2a\x12\x07\x04\xf2\x36\x89\xd8\x35\x44\x1b\x91\x3d\x20\xc2\x4c\x1b\xd7\x07\x77\xee\x74\x79\xe5\x15\x97\x22\x1a\xc7\xb2\x34\x26\xc8\x7b\xd3\x25\xe7\xf0\xc0\x74\xf6\x33\x31\x10\x66\xfa\xd8\xe0\xf4\x0e\x02\xc8\x1b\x89\x24\x34\xfd\x3a\x10\x9b\x68\xf7\xa4\xfb\x4b\x9f\x9b\xc4\x88\x1f\x31\x16\x7e\xc4\xc7\x8b\x29\x4c\x50\x46\x82\x1e\xf2\x0a\xfa\xc3\x4b\x14\xb5\xa9\x25\x79\x18\x9c\x52\x50\x38\x5e\x30\xc7\xb2\xc8\x1a\xc5\x17\x00\x14\xff\xf6\x20\x16\x00\x2c\xc8\xae\x29\x98\x24\x45\x9f\x01\x38\x89\x83\x12\x6f\xfc\x0c\x18\xf4\x8f\x91\x03\x05\x76\xc8\x60\xdb\x6e\x24\x4f\x5d\x92\x94\x06\xa8\x24\xdb\x7d\x1c\xdd\xcd\xd2\xe6\x2d\x97\x50\x9f\xb6\xe0\x93\xec\x74\x8e\x4d\xc4\x93\x9b\x6a\x18\x3c\x41\x5d\x62\x42\x17\xca\xe9\xe6\xb1\xb4\x4e\xad\x1c\x4a\x90\x42\x8a\x74\x72\xe9\xf9\x5a\xc1\x4d\xc4\x5f\x17\x3e\x2d\x79\x23\xbb\xc4\x06\x9d\xc8\xae\xdf\xa5\x29\xd8\x40\x13\x48\x5b\x74\xde\x81\x6d\xc8\x8c\x09\x3e\xb6\xfe\x79\xf3\xa3\x4a\x5a\x67\xc0\x57\x26\x1d\x93\xf4\xee\x18\xa8\xe7\xb4\x91\xa2\x27\x7c\x25\x34\x2b\x87\x37\x79\x8e\xd2\xad\xf5\xcb\x74\x3f\xfc\xcb\xc0\xfb\x6d\x5e\x53\x84\xa0\xd0\xd0\x01\x45\xa4\x76\xb1\xdf\x49\x71\xce\xe3\x2e\xdf\xfb\xbf\xa0\x80\x74\x10\x7c\x89\xd6\x0d\x86\x13\x9b\x64\x8f\x1b\x11\xfa\xf7\x2b\x76\x43\x86\xd5\x2b\x7c\x3c\x83\x17\x32\x66\xd2\x66\xb5\x9b\xe5\x43\xe6\x43\xf9\x10\xcc\x0e\xb9\x69\xcb\x03\x25\x8a\x3e\x9b\x21\xf2\x26\x67\xa3\xf3\xe3\x6f\x84\xd2\x1a\xf7\x65\x37\xe7\x4c\x44\x88\xbe\x5f\x69\x9b\x38\xaf\x73\x01\xfd\x55\x84\x8b\xee\x3a\xe5\xd9\x26\x0d\x50\xee\xca\x65\x03\xc7\xd8\xf0\xe1\x7f\x7b\xb6\x74\xc4\x19\xb3\x71\xa4\x75\x2d\x07\x79\x0b\xa1\xe3\xd3\xbb\x10\x30\x28\x47\xab\xba\xba\xd4\xe8\x02\x80\xe6\x40\xc1\xeb\xb9\xbc\x8a\xbb\x4c\xdc\xb0\x61\x86\x9a\x41\x5a\x20\x56\xe3\xda\xc9\x8a\x0f\x4c\xd8\xd2\xe7\x11\x9a\x13\x04\x96\x0b\x58\x89\x68\xfc\xa5\xe5\x74\x8e\x2d\x24\x7d\x7d\x78\x63\x2d\xfd\x2b\x2d\x71\x3c\x72\x15\x98\xee\xeb\x51\x3c\xda\x5b\x44\x1f\x96\x3c\xa0\x55\x1c\x94\x20\xdb\x1a\x08\xbb\xd2\x18\x2b\xbb\xe6\x8b\x46\x26\x59\xe1\x0d\xe9\xf1\x95\x20\x9d\x3c\x79\xde\xd9\x49\x95\x2b\x53\xf8\x93\x02\x36\x92\xdd\xb3\x9f\xb5\x0f\x1b\xc8\x1e\x11\xdf\x5c\x2f\xf6\xd0\x3e\x7b\xa4\x6e\xea\x4e\x05\x65\x9a\x81\x76\x93\x68\xd2\x90\x0f\x5f\xa9\x13\x31\x66\xc7\xa9\x28\x32\x62\xb9\x9d\x7d\x02\x03\x79\x1a\xfb\x09\xa7\xe0\x29\x69\x81\xfc\xfb\x71\x15\x80\xef\x2d\xa3\xff\x7b\xaf\xd1\xf4\xbf\x37\x03\x6e\xbf\xf7\xcc\x15\x92\xef\x6d\x83\xe2\x39\xa8\xf6\x5f\x3a\x4a\xad\x50\x7a\xfd\xb7\x1e\xe2\xb4\xfa\x52\x94\x0c\x21\x72\x22\xa4\x34\x34\xa9\xe4\x27\xf2\xe5\x6c\xe8\x8b\x0e\xda\x7d\xa3\x86\xfc\x88\x7c\x90\xfa\xfe\x1b\x91\x12\x5f\x4a\x23\x7e\xf3\xc2\xa0\xdd\x2f\x38\xfc\x58\x0e\x9c\x1f\x7c\xad\x9f\x0f\x52\xcb\x97\x4d\x40\x9d\x15\xd3\x1a\xcf\xd7\x9b\xe1\x17\xd9\x4b\x41\x8b\xb9\xd3\x50\x05\x41\x26\x05\x9b\xbf\xd6\xdf\x89\xc0\xf6\x0b\xba\x5c\x82\x75\x47\x52\x98\xc3\x60\xea\x4c\x36\x89\x7d\x30\x18\x13\x1f\x24\xf7\xd9\x40\xfb\xd5\xe5\xdc\x0e\x28\x82\xb8\xda\x87\x79\xd2\x9f\xdb\xab\x0f\xf6\xcc\x89\x79\x35\x9f\x08\xde\xf9\x38\xd9\xf8\x49\x34\x7b\x77\xe8\x24\x9a\x2c\x19\xeb\xfd\x51\xf7\xfb\xfe\xfc\xb8\x11\x83\x63\xf0\x88\xf8\xc8\xb7\xd5\xa2\xf0\xea\x58\xd2\xfb\xb2\x54\xbc\xdf\xdb\x6c\x28\xd1\x59\x56\x91\x6a\xd6\xa5\x17\x5f\xa3\x35\x19\xb2\x27\xc2\x7e\x92\xd7\xe3\x5d\x17\x8e\x4e\xf0\xb1\x86\x50\xe1\x9b\x4b\xd3\x44\xee\x3d\xb3\x32\x56\x17\xc1\xa6\x65\x36\x39\x09\x22\x1b\xf3\x01\x20\xa9\xec\x0c\x85\x62\xfa\x1b\xcb\xd8\xb7\xb8\x24\x0f\x45\xb0\xbc\x8b\x07\x23\xbc\x0b\x93\xa8\xde\x3c\xe7\xa6\x65\x35\xe8\x0f\x8a\x84\x32\x94\x08\x88\xfa\x80\x24\x61\x2c\xc8\x76\xd0\xf5\xe0\xa4\x0b\x42\xcb\xa0\xed\xb4\x96\xdf\xa0\x59\x86\xa5\xe1\x9f\xbd\xe2\x5b\xdb\x9c\x75\x2b\x39\x88\x1e\xed\xe1\xbe\xe9\x85\x06\x7c\xca\x9c\x04\xd6\x33\x06\x1b\xa6\xe6\xb3\xc0\xa2\xd6\x04\x1b\xe6\xc4\x85\x4a\xe0\xf6\x8c\x7c\xf1\x4c\x84\x9a\x86\x49\x6f\x39\x9b\x8d\x15\x67\xcb\xba\x61\x2c\x9f\x05\x89\xa6\x56\x3c\xdf\xe8\x2c\x82\x1a\x34\x37\x0a\x10\x90\x5e\xa2\xf3\xd0\xee\x9e\x1c\x9b\xe5\x74\x65\xb7\x83\x1b\xdb\x8f\x5d\x53\x89\x49\x06\x94\x6c\x06\xa4\xdf\xf4\x33\xe8\xf1\x79\xa2\xf9\xf0\x7e\xd7\x00\x0f\xa7\x0a\xd2\x5b\x99\xc2\xef\x14\x1a\xd0\xef\xf4\x7b\xed\xe9\xf8\x66\xba\x53\xdf\x18\x08\xff\x09\xe8\xde\x6c\x48\xa4\x7d\xfe\x4e\x62\xd3\x7a\x83\xc8\x24\x4e\xd3\xe8\x2b\x91\xe1\x52\x88\x78\xdf\x50\xb2\x60\xa5\x13\xe8\x5d\x37\x21\x57\xfb\x61\xff\x6d\x9b\x76\x3b\x07\xaf\xbc\xa8\x4f\x76\xa4\x9f\x89\x11\xe4\x9d\x34\xc1\x44\xb4\x40\x10\x77\x16\x5e\xb6\xd0\xb1\xf5\x4e\xf3\x6e\x13\x38\x2e\x45\xda\xc6\x12\x87\xc9\x3c\x7c\x83\xdb\xfa\xb9\xc9\xac\x04\xf1\x26\x0f\x1c\x86\x7a\xef\xa7\xa0\x78\x22\x25\x8e\xf2\xba\x9d\xa0\xc9\xad\xc9\xd7\x3d\x41\x19\x4c\x04\x9c\x27\xe5\x1b\x5e\x9e\xf0\xfb\x24\x62\xde\xd7\xab\x22\x1e\xdf\x01\x95\x1c\x5f\xb0\x9e\x9c\x2a\xb2\x44\x26\x3b\xf8\xb1\x05\xce\x3e\x35\xf2\xcd\x4c\x3a\x4d\xb5\x14\x77\x90\x4e\xe9\xbf\x60\xe1\x74\x4e\x4f\x64\xc6\x93\xe7\xb2\x64\xa9\xbe\xbf\xca\xa0\x3c\x95\x57\xe9\xdd\x8f\xf1\x6a\x81\xc3\x2f\x38\xc9\x5e\x64\xd7\x94\x24\x07\x46\xeb\xe0\xde\xb4\xf7\x3a\x2d\x62\xd7\x3c\xb8\xb0\xfc\x72\x05\xb7\x2c\xd4\x0e\xc4\xa4\x9f\xe4\x5f\x4e\xce\xf9\x9f\x93\x6f\xd2\x12\x78\x71\x92\xf2\x42\x2b\x19\xf8\x3d\xbd\xe0\xf5\xf0\x51\xf0\x05\x17\xcc\x76\x05\xbb\xe6\x79\xaf\xf9\x68\x1b\xfd\x7c\x7f\xb8\xd6\xfe\x82\xfa\x49\xbe\xdf\xf1\x2f\xb0\x94\x05\xaa\xdf\x38\x24\x07\x75\x27\x94\xa7\x96\x67\xec\xc6\x32\x12\xd2\x8b\xdb\x56\xfe\xfd\xe7\xf8\xbe\x53\x9a\xda\xd6\x5b\x73\x75\x6e\xcd\x9f\xdf\x86\x82\xbb\x0d\x9b\x0d\x44\x9a\x38\x60\x46\x8d\x3a\x9b\x27\xff\xbd\x90\xa3\x13\xb1\x10\x34\x97\x0d\xf7\x4e\xf6\xfe\x9f\x58\x3c\xfb\x2a\x34\x24\x22\xd9\x4e\xf7\x61\x0f\xd0\x73\xd1\x6e\xef\x33\xb3\xb4\x65\xef\x0d\x51\xc1\x01\x55\x32\x50\xda\x13\x21\xfa\x16\x89\x79\x8f\x63\x78\xc0\x6d\x2a\xcb\xd5\x83\x9b\xcc\x10\xd7\xe7\xfc\xba\xad\x6f\x80\xdc\x99\x9f\x55\x94\xa0\xbe\xfe\x70\x7d\x12\x43\x48\xcc\xe6\xdc\xdc\x89\x75\xdd\x0c\x52\xb9\xee\x53\x93\xb6\xeb\xd6\xe4\x0e\x64\xa0\x31\x1b\x05\x8e\x9d\x1e\x02\xe1\x27\xbf\x77\x1a\xcb\xd7\xfd\x14\x7d\xe8\xe8\x33\x99\x8b\xe2\x3c\xd7\x2d\xba\x50\xb6\x42\x27\x26\x94\xb1\x77\x9d\xb7\xfd\xb9\xf3\xcc\x4a\x1b\x42\xdd\x3e\xf5\xfc\xdc\xe0\x32\x3d\xca\x8b\xeb\x45\x86\xf6\xbd\x04\x00\x87\x57\xe6\xc6\xfa\x3b\x94\xbe\x3e\x07\xfe\x8c\x0c\x71\x2c\x07\xe8\x48\x3a\x9a\x5f\x85\xb6\xdc\x15\xca\x6f\xd7\x91\x3c\xc7\x97\xd0\x6d\x23\x2b\x9d\xc9\x12\x6b\x0f\xfe\xb3\x7a\xb1\x09\x5f\x2e\xc0\xa1\x10\x2a\x6c\x68\xe9\xe8\x92\x5f\x1f\xd9\x26\x0b\x4b\x4e\xc4\xcc\xde\x6d\x2a\x7d\x57\xa9\x93\x75\x90\xde\x26\xc4\xa5\x74\x81\x2d\xee\x24\x98\xc2\xcd\x73\x05\x03\xe5\xe5\xe1\x15\x0e\x98\x5a\x82\x92\xf7\x5d\xbc\x7f\xb8\x20\x68\xce\x43\x21\x5b\x14\xcc\xa2\x47\x8d\xd8\x42\xa8\x91\x6c\xa7\xd6\xf7\xaf\xa6\x44\x8e\x04\x4b\xed\x2a\xd1\x8a\x00\xff\x13\x19\xa9\x4e\x49\x82\x19\xb3\xd3\x2f\x95\xc1\x4e\xba\xc4\x6c\xc0\xf0\x20\x92\xd1\x3b\x22\x40\xd0\x9e\x6d\xce\xe5\xe6\x4f\x5b\x91\x41\xfe\x26\x1b\x67\xde\x47\x1d\x86\x5b\x67\x2d\xf3\xaa\x80\x3f\x43\x34\xb2\x2e\x0f\xdd\x28\xfc\x85\x72\xab\xc1\x5f\xaa\x9f\x26\xb9\xa3\x6c\xb2\x90\x9e\x74\x5b\x5c\x50\xb1\x3b\x09\xa2\x26\x98\xd1\x4d\xca\x12\xb6\x8d\x9d\x3f\xd9\x82\x3d\x55\xaf\x88\x3c\xcb\x56\x88\xfe\x35\x31\x4c\xef\x4a\xd2\x75\x82\x77\x34\x14\xef\x2e\x9b\x6b\x7f\x18\xb0\x03\xc8\xcf\x02\x74\x65\x5e\x72\x31\xe3\xb2\x67\x72\x9e\x81\xbc\xe8\x62\x55\x0d\xbc\xa7\xce\xab\xf5\xe5\xba\x88\x95\xce\x3b\xf7\xa5\x61\x6b\xa5\x2f\x0b\x9d\x1f\x91\x40\x7e\x16\x11\xcc\x7d\x44\x29\x66\x96\x5e\xe3\x40\x9d\x64\x61\x9d\xd6\x57\x9e\x8d\xc9\x94\x5c\x5e\x36\xd6\x1f\x01\xfc\xb5\x19\x48\xdb\x27\x40\x0e\xb6\xd3\xeb\x5d\x6f\x9e\x43\x5c\x5e\x6f\x11\x30\x9c\x91\xb3\x73\xde\x47\x80\x6d\xe8\x16\xff\x32\x48\x64\x7a\x92\x01\x91\xa4\xa6\x89\xdc\xa5\xe8\xd4\x93\xf8\x4d\xc9\xe1\x76\x4f\x91\x48\x73\xde\x20\xe5\x0f\x79\x86\xd3\x7b\x11\x82\x9d\x1c\xa4\x66\x6b\x07\x43\xe9\xbd\x6a\x9f\xcd\xce\x9f\xcc\x86\x20\x11\xf9\x29\xa4\x59\xaf\xc3\xc6\xca\x78\x89\x14\x09\x29\x50\xdf\xaf\x7e\x94\x6e\x27\x57\xbb\xa0\xad\xc6\x39\x38\xf0\xa3\xce\x41\x0c\x5a\x19\x93\xe2\xae\x20\x9d\x14\xbc\x08\x1f\xf1\xd6\xf8\x56\x8b\xfc\x06\x7b\x29\x23\xb2\x0d\x3d\xc5\x03\x5a\x9f\xba\xa5\x0a\x37\xdc\x22\xd8\x28\x58\x6b\xd0\x02\x3b\x0c\x33\xce\x37\x82\xbf\xd4\xe0\x20\x5e\xd8\xab\x8a\x7d\xcf\x49\x50\x83\x0e\x35\x2f\x24\x29\x2d\xc7\x24\x6e\x53\x30\x86\x10\x59\x33\x88\xf3\x23\x32\x22\x60\xf0\x7f\x42\xe5\x82\xf1\x9b\xa0\x02\xe2\x63\xc5\x12\xd0\x25\x12\xd5\xb2\x35\x2e\xbf\xeb\x8f\x99\xe9\x1b\x0a\x03\x3f\x9d\x4e\xf8\x78\xd3\xff\x80\x2d\xf1\xfa\xd9\x17\x78\x90\x1f\xd2\xbe\xbb\xf1\x4f\x52\x0d\x62\x31\x4e\x5e\xc0\xac\x7a\x55\xb9\x49\x11\x43\xbf\x6a\x8d\xe3\x9e\x99\x8f\x3f\x89\x60\xee\x32\x83\x6d\x0b\x8a\x0c\xdb\x9a\xb6\xc2\xb1\xc6\x70\x56\x1c\x97\xc1\xe4\x8f\xe6\x10\x8b\x1d\xc8\x97\x48\x80\x2a\xed\x28\x50\x77\x6d\x4f\x31\xa7\x3a\xa3\xd1\x22\xd8\xb8\x65\xf7\x43\x5d\x0b\xd0\x16\xe4\xaa\x62\xd9\xdc\xcb\xab\x11\xb1\x26\x96\x66\x6a\x68\x0f\x83\xee\x01\xa4\x54\xed\x54\xac\x6f\x02\xfe\x2b\xfb\x9b\x26\x14\x95\xe0\xce\xe4\x0c\x04\xa5\xe8\x21\x81\xe8\x26\x10\xdf\xfe\x8f\x93\xf5\x9a\x35\xa3\x3b\xeb\xe9\xbd\xe9\x29\x19\xa7\xb3\x56\x2d\xc9\x9e\x1e\x3d\xe4\xcf\xa3\x32\x31\xfc\x24\x67\x89\x15\x3c\x9b\x07\x92\xfa\x53\xad\x43\xcb\x31\x3b\xab\x1c\x14\x67\xed\xd8\x6d\xac\x4c\x7d\xb5\x32\x24\x07\xce\xb5\x5c\xf3\x83\xc0\x2b\xdd\x42\xa2\xa1\xaa\x27\x90\xb7\x86\x74\x43\x0b\x83\xea\xcf\x67\x15\x8f\xcc\xa9\xf4\x39\xc2\xae\x11\x3b\x3f\x33\x3f\xe5\x27\x47\x15\x2b\xc7\x42\x76\xaa\xb3\x90\x95\xe2\x2c\x7f\xb8\x9f\x83\x12\xec\x2c\xe2\xc9\xa5\xea\xd0\x69\xef\xa6\xa7\x00\x1a\x74\x15\x7b\xd1\x9f\xae\x7b\x1c\xbd\x8a\x66\xd6\xec\x04\x9a\xc8\xa4\x52\xd5\xcc\xf0\x2c\x41\xe0\x59\xf2\xf4\xd6\x94\xf3\x2c\x43\xf0\xb0\x0e\x9c\x18\x9c\xe0\x82\x15\xdd\xe9\xbd\xaa\x8b\x9a\x6f\x2e\x1d\x39\x96\xb6\x26\x16\xc9\x3a\x96\xfb\xe5\xee\xcf\x13\x69\x3b\x1f\x02\xe4\xd4\xe8\x2d\xfb\xc2\x73\xa3\x81\x01\x69\x54\xfb\xea\xb0\xd0\x2c\x4a\x03\xc3\xe3\x21\x6a\xd6\xd2\x58\x35\xe7\x88\x1b\x3c\x83\x74\x73\xce\x22\x79\xcd\x20\x81\x62\xed\xf2\xd2\x7a\xc5\x39\xc7\x2f\xd5\x15\xce\xae\x9d\x2c\xb4\x6b\x42\x87\xf5\x79\xdc\x31\xa7\x35\xe7\x6f\xfd\xd9\x65\x20\xff\x7e\xd9\x3f\x82\xcb\x59\x5e\xe6\x73\x14\xcb\xd5\xc9\x34\x9c\x33\xff\x8b\x8e\xc3\x1e\xf0\x47\x25\x59\x4b\xf3\xa5\x96\x4a\x8d\x9d\x33\x8b\x6b\x39\xc7\x97\x8c\x53\x29\xba\xcd\xf0\x40\x79\x66\xb0\x2f\x42\xfa\xd0\xe1\x32\xd4\x35\xd6\x38\xce\x0c\xb6\x5c\xf5\x25\x39\xe8\x98\xa1\x32\xc5\x5c\x82\xb3\x09\xab\x9e\x66\x1e\xfb\x47\x83\x05\x7a\x5e\x49\x7c\x9f\x99\xf1\x7d\x67\x0e\x85\x22\x27\xb7\x4a\x07\xa9\x3e\x8f\xaa\xfb\x66\xa0\x8a\xa3\x02\xc6\xab\x24\x78\x04\xd7\x68\x1b\x74\x9d\x8c\x35\xb8\x58\xef\xb5\x51\x8d\xde\x12\xd1\x01\xec\x23\xa7\xbf\x6d\x10\x1f\x25\x37\x1a\xd2\x83\xce\x33\x47\x76\x6b\x8b\x7e\x5b\x39\x73\xd6\x86\x9d\xf4\x2b\xf8\xe3\x5d\x1d\xa3\xa6\x22\x5d\x72\x02\x56\x09\xda\xf8\x42\x65\x1b\x27\x20\x6c\x79\x05\x9d\xea\x5a\xe8\x22\x70\xbe\x55\x32\x83\x3b\xd5\x62\x3c\x01\xa8\xf0\x9c\x8d\xb2\x75\x09\xba\x7d\x60\x9b\x52\xff\xf8\x59\x39\x3d\x73\x34\xb6\x9d\xa0\xcb\x21\x85\xad\x35\xd8\x6f\xec\xce\xe2\xba\x04\xb4\xf9\x78\x3b\x79\x70\x90\xe3\xd1\xec\x6d\x6f\x6a\x07\xa7\xc4\xea\xc1\x2b\x78\x15\x11\x22\xf7\xd6\x34\xe9\xf1\x3a\xed\x6a\xa1\x17\x71\x42\x45\x94\xe4\xa3\xf1\x82\xcd\x70\x9c\xb8\x00\x7c\xa6\xfb\xc5\xd0\xe7\x33\x18\xce\xd1\x76\xb5\xa8\x7a\x8a\x9c\x37\x41\xda\x59\x88\x5c\x52\x8e\x37\xe5\x21\x9d\x14\x6b\x12\x4a\xcb\xad\xd6\x81\x8d\xfe\xb8\xc5\xa1\xba\x81\x60\xbb\x08\x8b\x5b\xe4\x54\x26\xc4\xe9\x32\x4d\x02\x31\x43\x3a\x13\x38\xe2\x1b\xdd\x2b\x34\x65\x64\xe7\xc1\x15\xae\x3a\xc0\x80\xe7\x19\xac\xcd\x7e\x09\x44\xe8\xfa\x64\x4c\xad\x81\x46\x0d\x9d\x1e\x03\x3f\x3f\x2e\xd8\x9c\x69\x42\x14\x94\x23\xeb\x6b\x48\x27\xab\x78\x9e\x13\xf2\x5e\x2a\xc9\x04\xf9\x91\x96\x52\x38\xaf\x0f\x1b\xa4\x62\xda\x62\x38\x79\x71\x32\xe3\x05\x20\x89\xc1\x95\x14\xaa\xb7\x98\x89\x0c\x04\x7b\xda\xd1\xa6\x0d\xc7\xbd\x93\xc7\xf2\xfe\x65\xd9\x1d\xb7\x48\xde\x0c\x58\xff\x27\x6e\xcc\x3b\xdc\x39\x07\x0c\x0e\x85\xdf\x1f\xb7\x02\x67\xc0\xfe\x2a\x8f\x07\xa0\x17\x6c\x1c\xc7\x6d\x1d\x1a\x0f\xf1\xd5\x4e\x21\xb2\x8b\xda\xb0\xeb\x1d\x3c\xac\x88\x1f\x9d\xd8\x21\xbd\x44\x04\xb3\x4c\xb9\x0b\xa4\x4c\x00\x86\xb8\x34\xf6\xd5\x93\x9e\x70\x4c\x5f\xd9\xda\xb0\x0c\x1e\xe1\x9a\x58\x4a\x21\x7d\x29\x03\x56\x10\xe3\xa2\x78\x7f\x64\x6e\x89\xc0\x54\xc4\x99\x95\x6b\xa1\xf6\x5c\xf3\x29\x07\x04\xf0\xa2\x3a\xb5\x53\xfe\x54\xe1\x1d\x8f\x0d\xd3\x3f\x76\x54\x19\x4b\x47\x15\xad\xed\xcc\x0c\x1f\x80\x2e\x7c\x11\xb6\xd1\x28\x6c\x67\x71\xbc\x86\x60\xa1\x04\x93\xad\x38\xa2\xf6\x79\x96\x81\x00\x95\x00\xef\x36\x8e\x3a\xd8\xa4\xd8\x93\x25\x0d\xfe\x61\xba\x75\x03\x88\x61\x32\x47\x8d\x67\x5f\x3b\x31\xd1\xa6\xa1\xf7\x2f\xd8\x99\x63\x49\xe1\x6b\x03\x7f\x56\xc3\x2a\x2f\x86\xf0\x02\x1c\x62\x88\x3d\x45\x2f\x1b\x69\x6b\xf4\x04\x48\xc1\xc0\x36\xc8\x1c\x4c\x41\x2a\xd4\x37\x04\x55\x98\x11\x13\xec\xb1\x9b\x7c\x5a\x87\x2f\xd5\xfb\xed\x95\xac\xc4\xf7\xc3\x5d\xcb\x9b\x10\x8f\x52\x2e\x14\xba\x65\xb2\xf3\xba\x55\xd0\x12\x93\x8f\xb9\x7a\xa4\x0e\x78\x64\xfd\x21\xda\x74\x59\x59\xa4\xc7\xcc\x20\x9e\x03\xd6\xb5\x5f\x08\x71\xcf\x22\x1c\x86\x66\x18\x9f\x48\x6e\xbb\x7e\xe2\xe8\xee\x9c\xe0\x75\xf3\xff\x7c\x1d\xe4\x50\x0e\xf1\x91\xeb\xae\x76\x83\xb5\xf6\xf8\x14\x14\x7e\x7c\x84\x45\xa6\x91\xe3\x00\x81\x40\xdb\x33\x88\xab\x37\xcb\xd6\x3d\xf2\x4a\xd5\x3c\x92\xda\xf2\x04\xab\x8d\x6d\x5f\xa2\xf4\x54\x93\x03\xf4\xb2\x5c\x9c\x8b\x91\xfc\x36\x78\x69\xcb\x49\x92\x10\xa0\x59\xbf\x28\xca\x92\x3b\x98\xbf\x6b\xc5\xf0\xbb\x39\xb5\x20\x38\xab\x79\xd1\x89\x5d\xd8\x81\x17\xc1\xef\xd0\xcc\x43\xb2\x36\xe7\x4c\x02\x1d\xeb\xa6\x76\x66\x1d\x82\x0a\xb5\xe8\x28\xfb\xfa\xbf\x65\xd2\xdd\xa8\x2b\x80\x0d\x24\x8e\xda\xcf\x56\x3d\xd9\xfc\x48\xff\x44\xcb\x9b\xae\xd6\x17\x1e\xc9\xe6\xa8\xfc\xef\x09\x27\xee\xa9\x1f\x29\x65\x28\x12\x1f\x0f\x57\xea\x73\x36\xc6\x43\x02\x28\x98\x2d\x58\x87\x25\x14\xbc\xfd\x4d\x2f\xf6\xc0\x0a\x94\x7f\xa2\x69\xf8\x36\xea\x4c\xb3\xc7\x8e\x44\x42\x38\xf0\xd9\x5e\x1f\x51\xd7\x16\xcd\x13\x49\x68\xcb\xae\xc3\xf0\xb3\xd1\xf3\xda\x06\xdd\xab\xfb\x47\xa4\xa3\xb7\x16\xd7\x77\x69\x7d\x81\xe5\x96\x81\x06\xfb\xcd\xe8\xf5\xfd\x16\xa7\xc1\x7e\x4f\x2b\xa9\x59\xf6\x3b\x68\x34\x6d\xf0\x7d\xfa\x47\xb3\x1f\x94\x40\x02\xc1\x6d\x16\x33\x6d\x7d\xf1\x44\xf6\x59\xcd\xe2\x02\xb5\x4f\x19\x8a\x03\x1b\xe9\x6c\x69\x57\xed\x70\xb3\xa5\x20\xbe\xd5\x80\x6f\x93\xab\x4b\xf5\xc0\x0a\x39\x09\x3c\x0f\x08\x89\x0b\xf4\xbe\xc6\x09\x29\x44\x91\xdd\x3a\x59\x18\x11\xec\x4a\xa2\xa0\x94\x65\x80\xd2\x8e\xc8\x5e\x35\x1d\xc3\x8c\x3f\x00\x93\x2f\x79\x70\x2b\xde\x4e\x50\xfc\x56\xdd\x7d\x55\x06\xec\x5e\xd7\x35\x78\x70\x23\xdb\x0d\x9c\xb5\x43\x25\x37\xaf\x8d\xe3\xcc\xe1\xdf\x39\xa2\xef\xb5\x50\x0c\x68\x97\x54\xf6\x1e\xd4\x29\x7b\x1d\xea\x12\xc7\x4a\x73\x7a\x5f\x6e\xcd\x7b\xf6\x85\x5e\x6a\x2b\xf9\x74\xac\xe4\x2a\x9c\x21\xbe\x22\x08\xb6\xfb\x25\x96\x26\x55\xb7\x2f\x8c\x4a\x06\x6b\xee\x37\xf6\x90\x79\x17\x1c\xda\x7c\x0a\xc5\xba\x63\x2f\x9b\x40\x14\x18\x75\xf9\xaf\xca\xef\x67\x47\x28\xec\x49\x20\x8f\xc2\xae\x25\x84\xbd\xc0\x39\xde\xa8\x76\x59\x95\xb2\x90\x14\x89\x94\xba\x22\x67\x75\x7a\x4d\xbb\x00\x37\xfa\x78\xf8\x2d\x02\x09\xa8\x55\x20\xc8\x0a\xf6\xb9\x20\x80\x91\x54\xae\xb3\x08\x68\x67\x86\x57\xee\xa3\x0f\x24\x7e\xd8\xe8\xb4\xd8\xc4\xde\xcd\x05\x8f\x6f\xbe\x0e\x31\x60\x89\x5f\xb7\x17\x54\xe8\x92\xf3\xbe\x34\x5a\xd7\x1c\x79\xdf\xbb\xc7\x5f\xc4\x02\xe6\x9e\x65\x73\xec\xf2\xe9\x62\xfa\xc7\x57\x65\x93\x89\x9b\x9e\x20\x40\xb1\x2f\x23\x98\xa9\xa1\x76\x21\xe4\x7e\xeb\xc0\xaa\xf4\xa8\xa6\xaf\xb2\x6b\x10\x72\xee\x5e\x96\x83\x07\x72\x19\x48\x1c\xb2\x76\xb1\x0b\xec\x0c\x74\xdc\x7d\xc6\xc4\xfc\x4c\xd0\xf6\x4e\x7c\x41\xca\x57\x46\xa9\x9a\x96\x5d\xf7\x26\x6a\x90\xdd\x07\x23\x7d\x58\xe8\xe9\x78\x52\x50\x96\xef\x1a\x5a\x6d\x96\xe7\xff\x4d\x9f\xcc\xf2\xb5\x28\xe2\x0e\x90\x0c\xff\x76\xac\x16\x2a\x76\xbc\xd5\x53\x0c\xbb\xe7\x0f\xe4\x8f\x00\xfc\xb9\x1c\x64\x6c\x0b\x5d\x13\xcf\x74\xb2\x5b\x82\xa0\x8c\x28\x84\xed\xb1\xe9\x09\x72\x41\x30\x09\x92\x93\xd8\x23\x64\x78\xce\x83\xf1\x20\x3b\xc2\x99\x83\x61\x18\xcc\xc1\xc7\xc4\x53\x5b\x73\x7d\x7c\x03\xb1\x0c\x9a\xe2\x4d\xb3\xbc\x3d\x2d\x3e\x95\xdc\x53\x08\xfa\xef\xa9\x65\x55\xec\x49\xd4\xc3\x8b\x18\x2c\xc1\xa4\xb5\xf0\xf0\x1e\x79\xa4\x64\x3a\xee\xf9\x99\xa4\xbe\xd5\xd5\x4d\xcc\xfa\xed\x3c\xde\x09\x6c\x1d\x6f\x0f\x5b\x00\x02\xab\x86\xbf\x89\x8a\x25\x1e\x7f\x4b\x48\x42\xa0\xc7\xcb\x93\x0b\x3a\x4e\x09\x14\x5a\xe2\x94\xa3\x52\xba\xa9\x0a\x20\x35\x4b\x8e\xd4\xc2\xbe\x86\xea\x40\xf1\x79\xc8\x3b\x70\xcf\x7f\x75\x27\xa6\x9f\xe0\xa0\xfc\x60\x3d\x9a\x0e\x5a\x05\x13\xae\x37\x47\x24\x8b\xf9\x9e\x95\xd3\x8e\x1a\x0b\xa2\xf5\x19\x4c\x9c\xb5\x31\x14\x99\x05\xa7\x49\x54\x9d\x13\xc3\xa2\xea\x74\xb3\xb2\x5a\xf1\xac\xe3\x95\x83\x2a\x18\x99\x3c\x5e\x86\xb8\x7d\xcd\xb1\x24\x5b\x39\x45\x66\x4c\xe4\x40\x54\x28\xf8\x46\xa6\x60\x11\x22\x23\x68\xa6\x1e\xce\x76\xdc\x25\xc9\xa3\xd6\x4e\x61\x78\x95\x73\xd9\x9a\x9e\x1c\x86\xb7\x0f\xd3\x67\xb6\x0f\xcf\xbe\xdd\xab\x68\x7f\x79\xb6\xa0\xb8\xb1\x72\x8e\xa5\x7c\x30\x84\x44\x84\xbc\x8d\xe1\x3a\xce\x67\xe4\x1b\x94\x81\x50\x82\x3b\x59\xb9\xd9\xc1\xa3\x6c\x7d\x2e\x52\xfd\x1d\x29\x64\x61\xb3\x91\xed\xde\x83\x11\x58\xeb\x25\x06\x66\x11\x06\x0f\xc1\x7a\x84\x69\x9d\x04\xf0\x39\xc3\xfb\xc4\xb1\xd6\x05\xfa\xa0\xe6\x48\xa5\xbe\xa8\xad\xc9\x52\x01\x55\x1f\x16\xb6\xe9\xe3\xed\x6b\xcb\x6f\x25\x3d\x18\x0a\xea\xe2\xfc\xca\x4e\x7e\xb9\xe5\xfb\x4b\xa2\xdf\x7c\x83\x12\xd3\x4f\x91\x63\x25\x7d\xcb\xb2\x46\xb6\xc6\x80\x12\x5d\xe6\xa6\x84\xe7\x0d\x9c\x77\xcc\xdf\xd8\xc0\x72\xe7\xff\xb3\x49\x2f\x49\x7c\x0d\x49\x56\x17\xc8\xcf\x14\x6c\x05\x1b\xc9\x05\xb7\x14\x9b\xd4\x2e\xde\xd2\xf1\x4a\x85\xbf\x38\x86\xb8\x58\x72\x37\x3c\x28\x5c\xe2\x49\xa7\x42\xa6\xe3\xe4\x5e\xd6\x2d\x4d\x3a\xcb\xc4\x59\xea\x96\xc6\x6f\x89\x7d\xe3\x1f\x3c\x44\xf2\xe3\x8a\x0e\xff\x0f\x97\xb0\x4a\x91\x00\x1f\x4c\x99\x5e\x21\x9f\x47\x40\x72\xca\x5b\x8e\xd2\xf5\x5e\x6c\x20\x24\x57\xa9\xf7\x70\xc5\x5d\x70\x86\x15\x15\x60\xc8\x07\xde\xf5\x9e\xb9\x09\xc7\x19\xd9\x82\x3f\x9b\x12\xb9\x56\x84\xf3\xf0\x74\xf5\xba\xe2\xdf\x24\x5e\x5e\xeb\x29\x7b\x0e\xa8\xe3\xbf\xfe\xcc\xa0\xd7\x26\x92\xb4\xe2\xa3\x3e\x0f\x8f\x14\x06\x16\xb1\xae\x7d\xd7\x22\xa6\x6d\xa2\xb7\x4e\x4b\x7c\x04\x00\xdf\xfc\x12\x9b\xbc\x13\xe7\x2b\xd5\x79\xd2\xef\x42\x62\x7a\x06\xf2\x8f\xdf\x01\x4f\x58\xb9\x0d\x71\xe1\x8f\x90\x38\x52\x81\xb2\x80\xa8\x3b\x81\xa6\x80\x27\x8b\x1a\x8f\x22\x18\x15\x0c\x3d\x83\xf3\xd0\x70\x2f\x2a\xd1\xba\xee\x73\x23\x2f\xfe\x6c\xc1\xdd\x5a\xd5\x47\x3a\xd8\xc4\x47\xb2\xd6\x88\x63\x05\xbd\x31\x8b\x35\x6a\xac\x30\xb4\xf5\x27\xd5\x0b\x9e\x63\x1e\xd5\x73\x85\xc0\x00\xa3\x35\xb1\x16\x27\xbe\xe2\xef\x37\x1e\x4d\x09\x9f\x95\x21\x36\x1b\x04\x6c\xdf\x1a\xd2\x7c\x43\x74\xc5\xf0\x49\x5c\x42\xf1\x93\xe0\x29\x06\x79\x35\x1b\x9f\x19\x4b\x62\x4a\x6e\xde\x57\xa7\x47\x2e\x3a\x72\x8b\xa8\xa7\xb5\xfc\x5c\x32\x8e\x05\xa2\x35\x95\x60\x02\x5d\x23\x7a\x13\x60\x6e\x16\xe7\xea\x4c\x46\xed\x1f\xee\x7e\x6e\xff\xa0\xef\xc9\x10\x24\x44\xae\xc0\xd2\xca\x33\x38\xd7\xb8\xc8\xf0\xc7\x09\xbb\x36\xe6\xb9\x15\x5d\x5e\x7a\x88\xc2\x0b\x9c\xc5\x64\xaf\x2d\xcf\x27\x6d\xda\xf5\x8f\x40\xeb\x5a\xc6\xf3\xe6\x95\x33\x89\x2c\x0d\xc4\xea\x23\x98\x9f\xc3\x87\xbe\x96\x81\x27\x07\xf3\xb3\x40\xa9\xcf\x24\x48\xd7\x99\xd3\x39\x37\xa2\x67\x8e\xc9\xae\xd6\xc6\x52\xca\xe7\x6b\xe3\x99\x59\x65\x9e\xad\x5a\x2b\x41\xa9\xe6\x9b\xff\xdf\xef\x22\x73\x3d\x78\xcd\xdb\x1d\xb4\xcf\x21\x25\xb2\x36\x36\x96\x95\x14\x8c\x56\x4c\x62\x83\x05\xd3\x25\x7b\x8d\x2c\xa7\xeb\x8a\x70\x62\x96\x88\xd3\x20\x11\x9b\x93\x65\x88\x86\xba\x81\xf2\x4f\x8d\xd2\x85\x85\x5b\xae\xf2\x8a\xd8\x0c\x36\xdc\x5c\x62\xb5\x65\x75\x1d\x61\xa1\x85\x3c\xe6\x59\xc9\x76\x6b\x1e\xce\x60\xa8\x56\x6a\xb7\x03\x66\xd6\xac\x39\x75\x22\x67\x7e\x07\x19\xdc\xda\xff\x41\x39\xd8\x9f\x0f\x0e\xfc\x2b\x88\xd7\x0e\xf1\x24\xf7\xd0\xa3\x6f\xbb\xe3\x80\x7d\x16\x5f\xf3\x06\xee\x33\x9e\x7c\xcb\x8d\x3b\x7a\xcb\xfa\x6f\x2a\xfa\x01\x28\x86\xd7\x80\x41\x3b\xfd\x14\x57\x2e\x88\xab\x5f\xba\x9c\x7d\x1f\x8b\xc8\xb0\x9f\x39\x89\xed\xbd\x2f\xc7\x2b\xd0\x76\x1d\x1f\x9e\x1d\xea\x2d\x42\xe0\x18\x26\x9c\x0e\xb3\x05\x44\x56\x3d\x89\x4b\xb9\x9f\x70\x7a\xfe\x3e\x6f\x51\xa5\xbc\xcc\x25\x18\xb4\xed\x15\x1e\x5c\x74\x02\xd9\x35\x1c\xf1\xa7\x70\xd7\xa9\x8f\xef\xed\x91\x35\x92\x6b\x58\x6f\x41\x88\x1d\x1a\x80\xe0\xbe\x66\xf2\x8a\xf3\x5d\xcb\x67\xe5\x38\x07\x23\xf6\x12\x0f\xab\x53\x96\xc3\x0a\x66\xec\x99\xa4\xce\x9f\x2a\xd6\xe3\x0f\xf9\x45\xd7\xf4\x2f\x91\x1c\xf9\x3e\x92\x68\x9e\xcd\x1a\xe7\xbf\x2e\x0a\x8f\x62\xfe\xc4\xff\x5c\xfa\xcd\x35\x8b\x8f\xf9\xa2\x27\x1a\x4c\xd6\x59\xff\x4a\x2f\xee\xe1\x84\x13\x25\x7b\x3c\x00\x4e\xde\x56\x4f\x04\x4a\xa2\x92\x76\x39\x31\x47\xc7\xef\x43\x81\x43\x81\x97\x3c\x74\x9a\xe3\x9c\x1b\x57\x75\x78\x82\x57\x27\x20\xd1\xff\xe9\xc2\x75\x40\xbe\xe9\x48\xd8\x32\x34\xdd\xed\x07\x43\x81\xab\x32\xce\x04\x4f\x7a\x9c\x40\x82\x31\x4e\x91\x2f\xfe\x0f\xc7\x04\xdb\xad\x0a\xa9\x33\x43\xa4\x57\x50\xcd\x62\x43\xe5\xa6\x33\x37\xa1\x0f\xe4\xbc\x07\xa7\xb5\x22\x34\x30\x74\x14\x71\x61\x7b\x52\x9d\xa3\x95\x3b\x96\x70\x3c\x1b\xac\x37\x69\xba\xed\x85\x8e\xc1\x37\x8e\xee\x92\x7c\x30\x6b\x72\x62\x5c\x44\x3a\x2f\xb1\x2d\x12\x6d\xac\x5f\xa9\x22\x4f\x0d\xda\xc9\xa3\x3b\x5f\xac\x48\xd9\xaa\x48\xb9\xdb\x7f\x1b\x4d\x77\x8b\xc4\xb3\x6f\x68\xbd\x05\x06\xde\xde\x6c\xd3\x9d\x4b\x94\xe5\x78\x57\x1f\x21\xf1\xd4\xdb\x6c\xd0\x3e\x6a\xd6\x01\xfc\x6e\x1c\x3c\xcc\x84\xba\x54\x7a\x61\x9f\x11\xc1\x60\x33\xac\x1a\x28\xb8\xba\xe9\x86\x61\x60\xfe\x15\x48\xa1\x09\xc2\x44\xb3\xfe\xdb\x47\x13\xec\xff\x34\xf3\xe4\xc3\xf4\xf2\xa1\x78\xc0\x72\x33\xa1\x1c\x25\x17\xf8\x97\x5b\x33\x83\x25\x2c\x26\x03\xa4\x01\xe6\xda\xc2\x72\xab\xf5\x2c\xf7\x2c\x1b\x77\x91\x9c\xe8\x72\x0f\xaf\x22\x69\x5c\xc3\xf4\x47\x81\xdd\xa3\x1d\xf7\x87\xc5\x3b\xb3\xb9\x2c\x77\xa7\x5b\x5c\x44\xe1\xb1\xdc\x9d\xae\x2b\x5f\xbd\xd9\xe2\x22\x90\xae\x2f\xb1\x7b\xbb\x2b\xce\x0a\x91\x75\x33\xee\x6e\x31\x43\xed\x64\x29\x1b\x04\xb4\x46\xdb\x57\x13\x7e\xb0\x8b\x28\xcf\x10\x50\x94\xc6\x86\xac\x02\xa4\xa5\xae\x4f\xbb\xa3\xab\xbe\xfe\x23\x9d\xb7\x17\x39\x2e\x19\xf4\xd7\x95\x6c\xb1\x56\x36\x71\x22\x1b\xc9\x7d\xa0\x04\x89\x8e\xca\xba\x31\xbb\x7b\x71\xd1\xfa\x80\xc1\x1a\x5d\x0e\xf1\x65\x40\x05\x9c\x9e\xa9\xa5\xec\xb2\x61\x60\x0b\x44\x95\xed\x49\x37\x96\xf0\xad\xa9\x67\x2d\x6e\xbf\x04\xa1\xb8\xbd\x26\xd0\xe8\x12\x6b\xa1\x16\x88\x34\xce\x25\xd2\xef\x91\xe3\xe6\x96\x33\x2c\x2d\x25\x23\x3a\xf4\x32\xe2\x1e\x81\x8e\x60\x08\x8f\x50\x3b\x30\xce\xf3\x97\x36\x6a\xcd\x71\xba\x78\x96\xe0\x0c\xe1\xf3\xc3\x42\xe6\x7f\x41\x32\xce\xf0\xe7\xc5\xdd\x49\x1f\xa2\x39\x48\xc8\xd3\x4e\xb2\xec\xcc\x87\x00\x3d\x16\x2f\x45\x07\x67\x20\x9f\x4c\x5e\x04\xd4\x92\xc7\x92\x5f\xad\xde\xf9\xfe\x65\x93\x98\x05\xda\x05\x73\xa5\x59\x5d\x77\x80\xb5\x49\xb3\xd9\x86\xfb\x29\x16\x79\xa4\x96\xbc\xef\x52\x35\x33\x18\x3e\x86\xa5\x89\x5a\x82\x32\x2e\x1e\x02\xd2\x69\x32\x51\xf0\x11\x2e\xca\x68\x40\xee\x61\x94\x53\x12\xc5\x79\xf9\x99\x6c\xbe\xa1\x27\x98\xcb\x93\x5f\x03\xdc\xe2\x3c\x72\xbe\x6a\xd4\x21\x56\xac\x17\xf1\xf0\x2e\x79\x12\x97\x77\x1e\x21\x6e\xe9\xa8\xe7\x2d\xf4\x8a\x5d\x6c\x6c\x5b\x4b\xee\xc2\x13\xbe\xe4\x44\xe7\xd2\x92\xde\x57\x80\x83\xa1\x40\x4b\x7a\x91\x06\xfc\xe6\xfe\x1b\x82\xfc\xf2\x2f\xf9\xd6\x26\x20\x36\xf8\x14\x09\x04\x86\xb8\x9e\xb7\x84\xbc\xe3\x02\x67\x92\xd8\xbf\x0d\x15\xce\x34\x16\xe4\x8a\x93\xf0\x3e\x9d\x3c\xfe\x24\xb1\x27\x7c\xae\xf5\xad\x77\x9b\x48\xa9\x08\xc9\xfb\xad\x63\x8d\xce\x3e\xd8\xca\x61\x34\x09\xf5\x8c\xa5\x5b\xd2\x2e\x12\xf8\xe4\x7e\x3a\x31\xaa\x6f\xf7\x97\xc7\x6d\xad\x96\x6d\xca\x63\xf0\x1c\x38\x88\x1a\xfc\xd1\xa2\x83\x62\xbd\x88\x58\x7d\x92\x87\xc8\x79\xe9\x74\xa4\x8d\x18\xfe\x3a\xd3\xba\x33\x5c\xda\x99\xd4\x09\xca\xf7\xd6\x0d\x97\x4b\x64\x01\x06\x27\x5d\x64\x26\x05\xbb\x35\x59\x9f\x2b\x19\x0a\x65\x01\x43\x18\x96\x8b\x08\xdf\x27\x4d\xf0\x80\xf4\xa8\x46\x46\x7c\x1b\x90\x96\x41\x1a\x31\xa9\x17\x92\x1c\x41\x1a\xee\x5e\x77\x31\x6c\x71\x3f\xe0\xb8\xe1\x39\xfa\xea\x7e\x5d\xe2\x67\xa3\x8f\xef\xf3\xa1\x47\xd7\x31\x7f\xde\x40\x3d\xc4\x45\x2f\x36\x07\x8c\x88\xb7\x0f\x7e\x4f\xd1\x04\x3d\xef\xe6\xba\x35\x18\x44\xde\x37\xd7\x48\x9e\x77\xf0\xb4\x1f\x8c\x20\x32\x30\x7c\x48\x5f\x2e\x06\x86\xe7\xbd\x89\x43\x08\xac\xee\xf1\x73\x6f\x38\x90\x4a\xf9\xb0\x74\x5a\xf0\x5b\xa2\x66\x00\x13\x7f\xbb\x24\x81\x22\x26\xea\x9b\x16\x95\x95\xd4\xa8\x7b\xde\x6e\xea\x2f\xc2\x4f\x5d\x69\x50\x1d\x40\x0f\x47\xd4\x42\x8f\x00\xfb\x40\x89\xac\xef\xdf\xcf\xf7\x47\x1a\x03\xaa\xf8\x42\x6e\x64\x83\xeb\x5b\x4b\x81\xc0\x24\xb0\x3f\xea\x5e\x86\x1f\x63\xfb\x51\xd9\x1f\x3e\x91\x08\xcd\xf3\x95\x46\x13\x5f\x3a\xfe\x2b\x17\xad\x8c\x3c\x7d\x95\x4c\xff\x4d\x9f\x4d\xec\xf4\xe9\xe4\x71\xa9\x3d\x2d\xe8\x27\xa2\xac\xdf\x8f\x08\xdd\xbf\x51\x6f\xb8\x28\xc5\x1c\x8f\x98\xf3\xb9\xe1\x5e\xfe\xd0\x67\xbd\x6e\x91\xc8\x5f\x47\x27\x20\xdf\xb3\x55\xf6\x0f\xd9\xfc\x33\x07\xd9\x7c\xd1\x71\x67\xdf\x9e\x54\xd5\xdb\x85\x80\xa9\x97\x58\x6b\x11\x60\x25\x01\x96\x60\x94\x07\x76\xb0\xbd\x7f\xa7\x58\x1a\x8d\x7d\x38\xa9\x9f\xbe\x40\x1d\x3b\xff\x1c\xa9\x01\x1e\xf3\x56\x51\xdd\x67\x77\x2b\x3c\x49\x46\xfd\x84\xbf\xeb\x95\x8e\x80\x24\x76\xaf\x7c\xc0\x1b\xf3\xb5\xac\xcc\x51\x0d\x32\x39\x90\x92\x9e\x8c\xea\xfa\x56\x9e\x3e\x3b\x22\xc2\x8a\x1e\x4f\xa4\x80\x21\x10\xd1\x5f\x8d\xec\x1d\xe3\xec\x8f\x77\x7c\x91\x18\xa9\x53\xd2\x23\x3b\x99\xd7\x5c\xb2\xc7\x06\x58\xa9\xb5\xd6\xe7\xc3\x2e\x10\x3f\xc2\x9a\x69\x84\xa4\x3f\x0b\x57\x35\xad\xa4\x86\xd6\x93\x5c\x43\xcf\x42\x59\x75\x30\xd3\x8b\x3b\x7e\x9b\x4e\x0d\xa9\xc0\x22\x99\x5f\x77\x11\x94\x97\x55\x3f\x5f\x96\x55\x7b\x82\x40\xd4\xd0\x14\x9e\x5d\x90\xd5\xab\xad\x95\x99\x3b\x82\x9e\xee\x59\x5a\x6d\x75\x04\x0c\x55\x9e\x35\x7f\x4b\x22\xed\x7c\xb0\x89\x90\x96\xde\xcf\x90\xcf\x19\xde\x56\x11\xc3\x1f\xe7\x15\xfc\xf3\x87\x5a\x0b\xd6\xda\x4f\x82\xb8\x78\xde\xf2\xa8\xda\xe5\x75\x9f\x55\xfa\x4b\xcd\xcb\xd9\x26\xdb\x30\x5a\x37\x95\x7f\x09\xda\xf3\x33\xcd\xf7\xc6\xc3\xcb\xd9\x98\xf2\xcb\x21\x92\x77\xfb\x7d\xe1\x2e\xde\x64\xe6\x67\x9d\x18\x58\x6c\xe5\x38\x73\xb2\x03\x38\x06\x03\x3e\xa6\x6d\x3f\x4a\x79\x7b\xb0\x7e\x81\x74\x7e\x3f\xd1\x24\x0d\xdf\x9d\xf6\x9e\x60\xaa\x24\x4b\x3c\x1b\x27\x66\xb6\x59\xa0\x69\x5d\x24\x24\xa3\xf1\xee\x9d\xea\x5e\xcd\xbc\xad\x8d\x3c\xd3\x21\x9e\x7a\x2c\x2f\xfb\x03\x49\xfb\xa1\xea\xea\x9b\x82\x50\x10\x3b\x14\x1b\x14\x66\xae\xcb\x23\xc9\xb9\xd5\x7b\x15\xf9\xfd\xc2\xbb\x5b\xb8\x4a\xef\xac\xf6\x2f\x3e\x87\x98\xdb\x3e\x31\x01\x6c\xea\xca\x4f\x28\x2d\xce\x34\x3a\x9f\xe8\x37\xda\x3c\xf2\xe9\x31\xee\x02\xd6\x9d\xf0\x68\xc9\x13\xd8\xbc\xe5\x9d\x88\x48\xf0\xfa\x00\x51\x88\xf7\x57\x8f\x3f\xe9\x21\x86\x91\x79\x1d\x6b\x04\x0f\xeb\xb2\x5d\x8e\xf8\x51\x77\xbd\xb1\x07\x98\x23\x7d\x0d\xe2\x51\xb9\xf6\xf1\xa8\xe4\xc4\x62\x34\x76\x38\xbd\x1f\x2d\x76\xfa\x91\x5b\xe5\x01\x49\x98\x7f\x8a\x4d\xf8\x01\x36\x49\xaf\x8f\x7d\xf2\xfa\x25\x9c\x43\x64\xfb\x70\x36\x7d\x52\xde\x43\x7a\x46\x68\x64\x95\xcc\xea\x51\x0e\xd3\xc3\x66\xa0\x6c\xb7\x40\xa4\xe4\x17\x3d\xac\x95\xc9\x6c\x00\x3c\xad\xf2\x4e\xe1\x19\xfb\xb3\xea\x50\xce\x3f\x8d\xb4\x9c\x5d\x18\x98\x05\x7a\x22\x3e\xa7\x2b\xa2\x43\x28\xdb\x87\xae\x44\xb7\xe0\x7d\x04\x2b\xdb\x56\x7b\xb1\x0b\xd9\xe3\xd1\xd0\x57\x7c\x02\x1b\xaa\xaa\x08\x56\x79\xd1\x88\x2a\x10\xe8\x43\x89\x85\x19\xff\xcd\x6a\x1d\x90\x0f\x81\x25\x54\x93\x42\x1e\xa0\x80\x11\xcd\x63\x9e\xc0\x04\xac\xf0\xa7\xf9\xa6\x57\x67\xbe\xaf\xe0\xc4\x57\xf4\xde\x7c\x47\xf2\xdf\x7c\x7b\x7f\x6b\xdf\xda\x75\xfe\xb8\xce\x6f\x7b\x90\xc1\x99\x7d\x5b\x63\x73\x3e\x76\x2a\xe2\x5a\x41\x46\x7b\x33\x1c\x83\x5d\x5f\x74\xfc\x4e\x20\x2f\xd2\xed\x3b\xc7\x95\x06\xb1\x48\xce\xf5\x15\x9c\xf9\x0a\x14\xf6\x18\xda\xab\xd1\xca\x9f\x67\xf0\xdb\x2b\xd6\x01\x0e\x26\x75\x88\xa0\xcd\x27\xe3\xfd\xb1\xb5\x38\xa6\xd9\x35\x50\xec\xa2\x1e\x54\x09\x16\xfd\xd2\xd8\xec\xc9\xfd\x03\xd6\x27\xde\x66\x5d\xbe\xbf\xe9\x80\x6d\x7d\x7a\xd1\x43\x1a\xfe\x43\xe1\xbf\x5c\xdf\x25\xe6\x18\xb6\x25\x26\x13\xf0\xec\x2f\x8c\x9b\x41\xaf\x92\x83\xd2\x95\x1b\x8d\xf9\xbe\x2e\xaa\xff\x63\x13\x59\xc9\x5c\xe7\x65\x8c\xff\xce\x7f\x9a\xce\x8c\xc4\xa0\xb8\xa8\xc1\x5e\x8c\xec\x88\x18\x26\x21\x7c\xcd\x4a\x63\x98\x6b\x3c\xbf\x2c\xc1\xf9\xb9\x0e\x7c\x23\xc1\xbb\x5e\x5d\xdd\xfa\xd6\x13\xa2\x7b\xd9\x7a\xec\x33\x1c\xdb\x33\x7a\x50\x3f\x3f\x32\x78\xfd\xf4\x45\x8f\x8e\xca\x00\x45\x62\x32\x90\xd9\xff\xd1\xf3\x33\x4d\xd7\x40\x3f\x07\x07\x05\xd4\xf7\xf5\x13\x1f\xee\x78\xc9\xb2\x0c\x0a\xb6\x72\xa2\x7e\x56\xaa\xe4\x91\x0e\xfd\x19\xb1\xd4\x7c\x8b\x45\x9a\xe2\x73\xfe\x84\xfc\x29\x20\x0b\x11\xe1\xbf\xdc\x67\x31\x3b\xa9\x89\x48\xf2\x79\xd7\x90\xda\xe5\xbf\x30\x08\x0c\xa4\xea\x57\x7e\xda\x9c\x95\x35\x3b\xbb\x1c\x38\xd3\x4b\x67\x4f\x38\xfa\x10\x2d\x2a\x73\xe3\xfb\xcf\x54\x73\x9b\xa1\x74\xea\xe5\x2a\xf5\x2b\x43\x2b\x8b\x50\x09\x68\x3c\xfb\x8b\xf2\x33\x40\xf9\xef\x04\xeb\xdc\x78\x6c\x3c\x53\xd9\x16\x95\x14\x72\xf8\x13\x02\x35\xdb\xc1\x41\xd1\x8f\xe5\xc7\xc6\xe7\xcf\xf5\xc7\x39\x77\x4c\x21\x06\x9f\x6b\xc8\x04\x24\x89\x17\xa4\x89\xa7\x4f\x83\xc8\xc6\x01\xfd\x0e\xc1\xf1\xdf\x49\x1e\x71\xf6\x48\x00\x32\xe7\xc3\x48\xb7\xb7\x72\x71\x83\xf7\xe9\x59\x69\xa3\x82\x79\x66\x9f\x7d\x05\x19\xbf\x99\xf6\x5a\x79\x75\xae\xff\xd0\x8e\x30\xac\x9f\x1e\x57\x7c\xc5\x18\xee\x82\xcc\x7f\xaf\x55\xfb\x56\xf1\xf4\x1f\x43\xfb\xc6\xb0\xa4\xf8\x09\xe0\xde\x21\xd0\xff\xd3\x15\x09\xd4\xbe\x55\xeb\x59\x43\x35\x80\xb9\x19\x36\x30\xb4\xee\x29\x29\x29\x72\x4e\x5b\xc8\x12\x6c\x67\x6b\x78\x49\x39\x1e\x70\x36\xea\xbd\x62\x28\x8d\x5b\xe1\xa8\x09\xf5\x1d\x36\x7a\xcf\x40\xf9\x09\x09\xb4\x59\xc4\x2c\x9d\xaa\xd9\x83\x6f\xb5\xc2\x8f\x50\xbe\x25\x6e\xf5\x8f\x0e\x44\x6a\xc1\xfa\x36\x94\xee\x87\x9e\xd4\x12\x22\x04\x66\x93\xcd\x01\xa5\xff\x33\xa7\xa2\x05\x49\x68\x14\xb0\x26\x13\xb2\x65\x79\xd8\xf0\xd6\x39\xba\xc2\x49\xbc\xf3\x4e\xaf\x14\xb6\x9c\x44\xd7\x3c\xdd\x21\x36\x64\x28\x63\x5e\x75\xfd\x27\xa1\x82\x10\x15\x60\x78\x69\xfe\x8f\xda\x04\x0c\xf6\x70\x95\x02\xee\xda\x1a\xe5\xc6\x74\xaf\x1c\x61\x26\x2c\xa7\x72\x8c\x30\xe8\x6c\x52\x71\xb2\x82\x19\xa0\x13\xc0\xdf\x30\xee\x2a\x11\x35\xa9\xa7\xbb\x7f\xf3\xe7\x9e\xe9\x7b\xd0\x8e\xb0\x2d\xa4\x13\xeb\xd0\x04\xe6\x4c\x87\xc7\xcd\x55\xc0\xe9\xa8\xd4\x00\x01\xf0\xa5\xa6\x49\x42\x2e\x56\x66\x8c\x37\x13\x31\xa2\xdd\x1d\x49\x81\xcd\x40\x53\x39\x81\xaa\x01\x23\x40\x79\x70\xb6\x16\xcd\xfe\xd5\xb1\x1a\xd2\x74\xd0\x0f\x35\x1d\xc9\x66\xb4\x24\xaf\x3f\x12\x4f\x9a\x34\xba\x4e\xce\xbe\xa5\xa3\xb6\xaf\x2e\x69\x4f\xe0\x91\x9b\xe4\x41\xc7\x93\x81\x01\xc5\x51\xfd\x9a\xf1\x44\xb9\x02\x5a\x3c\x53\x65\xa0\xcc\x54\x4f\x71\xda\xdb\x4b\xe5\xa1\x87\x12\xb9\x21\x5e\x30\x31\xd6\x63\xaa\x9b\xe7\xbb\xba\x5e\x41\xbc\x0b\x70\x00\x1f\x3c\x50\x06\x01\xc0\xd4\xa4\x15\x9c\x13\x8b\x47\x16\x29\x17\x20\x7c\xd4\xc1\xa0\xa5\xd9\x09\xa2\x77\x2c\x15\xc9\x08\xe4\xc9\x8f\x53\x1b\xce\xa6\x82\x44\x16\x6a\x5d\x94\x9f\x3c\x00\x3c\x95\x6e\x5d\x18\x5a\x1b\x3c\x5e\xfa\xef\x71\x49\x7e\xe0\x10\x07\xb8\xdd\x0b\xe8\x7a\x79\x1a\x7d\x7e\x13\xe4\x39\x74\x11\x76\xb8\x53\x69\x37\x08\x5f\x20\x05\x1d\x8a\xab\x82\xf0\x02\xf6\xc1\xb2\x79\x30\x9e\x61\xca\x52\x16\x56\x82\xe3\x14\x52\x60\xd3\x4f\xa0\x23\x1f\x8a\x50\xc1\xba\x21\xaf\x92\xc9\x09\x32\xd1\xf0\xcb\x84\xcc\x13\x02\x38\x59\x0e\xa1\x4c\xc0\xb4\x89\x49\xde\x3b\x28\x15\x48\xd0\x60\xfa\xb3\xfc\x37\x79\xf7\xcc\xfd\x59\x33\x9d\x49\xa9\x09\x53\x7a\xc5\x75\x0c\xe9\x81\xd8\x1c\x41\xf5\x4a\xf7\x50\x18\x7d\x3a\x25\xb9\x44\x01\x78\xd4\x75\x9d\xf2\x9c\x4e\x89\xdd\xca\xe4\xbc\xf2\xdc\x83\x6c\xb2\x42\xf4\xe4\xf1\x67\xa9\x8d\x15\x6c\xc2\x7a\x56\x3c\x11\x60\x66\x04\x4e\x29\xb2\xf9\x1d\x51\x4c\x82\x0f\x39\xf6\x62\x49\x44\x87\xb2\x0d\x43\x7d\x8b\x15\xc3\x4a\xce\xc8\xb3\xfb\x78\x37\xa5\x55\x95\x59\xe3\x6e\x63\xaa\xe9\x2a\x0b\xa2\xcb\x70\x99\x85\x8b\xbf\x2b\x5b\x55\x45\x8a\x4b\x1f\x7b\xf3\x4d\xb3\x8c\x81\x09\xe9\xb1\xbc\x68\xee\x42\xae\x61\xf8\xc6\xf4\xdf\xb0\x6c\x96\x29\x75\xba\x79\x37\x78\xc7\x0f\xf9\xfd\x41\xef\xff\x26\xe0\x4d\x18\xe8\x19\x95\x37\xde\x9b\x0a\x4a\x1c\xdc\xcb\x9b\x64\x3a\x86\x5e\xee\xc4\x32\xb0\xa8\xc6\xe3\xfd\xbc\x15\x37\x35\xde\x0c\x32\xb7\xd2\xaf\x3c\x1e\x1f\xc5\xca\x8e\x07\x43\x6f\x47\x44\xc7\xd3\x6c\x82\xc4\x03\x53\x8e\x0c\x05\xa0\x8e\x9d\xdf\x8c\x0b\xfd\x08\xc1\xd4\x72\xe7\x3a\x64\x1f\x36\x95\x14\x72\x00\x3d\x3c\xd7\xf1\xc6\x43\xfc\xcc\xc8\xd1\xf3\x10\x48\x57\x77\xb0\xd9\xc8\xcd\x9f\xda\xdc\x0d\xdf\xf3\x58\xdf\x4c\x63\x1f\x3d\xdf\xa0\xc5\xd4\x8f\x15\xfe\xd4\x8d\x88\x4b\xc0\x23\x39\x7c\xb8\xef\x60\xa7\x6f\x40\xd9\xa7\xa3\x59\xc8\xea\x92\x47\xc6\x86\x30\x92\x6d\x44\x30\x48\xa6\x0a\x41\x95\xd4\xc8\x58\x23\xee\x73\x5c\x6e\x5f\xc1\xb5\x52\xdc\xfe\x86\xfc\x5e\x16\x2e\x55\xc0\x51\x59\xf9\xfc\x16\x8f\x98\x10\xcc\x7c\x02\x4b\xfa\x27\xd1\x0c\xef\x72\xfc\x2a\xf6\xd8\x18\x3c\xe3\x1b\x36\x65\xeb\x6f\x2a\x43\x6c\xcf\x50\x7b\xd8\x64\x14\x21\x91\x29\x94\x12\x6c\x3e\xea\x1c\x00\x63\x79\x24\x57\x4a\x80\xad\xcd\xef\x60\x2c\x41\x9e\x31\xba\xcd\xa9\x4b\xc2\x34\x68\x50\x4f\xc1\xc6\x6e\x8d\x92\x63\xa4\xa7\x8f\xe8\x32\x0e\xee\x52\xe0\x00\x06\x92\x88\x3b\x1d\xb3\xe6\x2f\x23\x46\x15\x1e\x36\x64\x19\xa4\x06\x13\xcb\xce\x5d\x38\x63\x86\x19\xe6\x67\x45\x64\x28\x55\x2c\x12\x75\x1a\x47\x2c\x52\x5c\x04\x07\xc7\x3f\x48\x4b\xa8\xd9\xa4\xbb\xe3\xd0\x30\xa6\x3f\xcd\x2a\x49\x28\xce\xe6\x85\x93\x13\x75\x8f\xce\xcc\x25\xd1\x8c\xb4\xf0\x29\x73\x5a\x3d\xa6\x54\xfc\x95\xe6\x7f\x7c\x79\xf9\x2d\xd9\x03\xe4\x41\xfb\x62\x68\xd6\xaa\x05\x4a\x01\x9b\x08\xbf\x7c\x20\xcb\xd6\xef\x3c\x78\xd0\x5a\xdc\x98\xf3\x75\x17\x3e\x57\x71\xec\x66\x12\x14\x99\xe5\xb4\x2b\xeb\xce\x1e\xdc\xc9\x45\x68\x90\xa8\x4b\x14\xa1\x09\x25\x1c\x1a\x95\xf2\xd1\x27\xc8\x5f\x51\xaa\xe0\xa0\x58\x67\x96\x3f\x11\x5d\xf8\x1a\x77\x8c\x01\xbd\xd0\xd6\xb6\xca\xd3\x69\xda\xe6\xf2\x79\x9b\x22\xaf\x11\x5a\x11\x59\x33\xdf\xbc\xfd\x86\x36\xc4\x8a\x6b\x66\xeb\x61\xe3\xfa\xbf\xec\xed\x4c\x96\x56\xf0\x97\x4b\xdc\x61\x55\x8a\x5c\x5e\xf9\x9f\x45\xdd\x3c\x7c\x51\x3c\x4b\x38\xa3\x7e\x58\x5f\x7a\x66\xc8\x87\x6b\x49\xf8\x65\x16\xb3\x39\xfc\x1f\x50\x74\x90\xe8\x43\x48\x6d\xf8\x88\x7a\x11\x49\x09\x2c\xb6\x54\x35\x38\x22\xf4\xbe\xe6\x63\xa2\xcb\x22\x2b\x80\x33\xc7\x9d\x20\xfb\x64\x0d\xdd\x88\x76\x13\x0e\x75\x64\xff\x8c\xca\xc2\x5f\x2a\xeb\xd8\x9f\x94\x9f\x1a\x8b\xbf\xba\x20\x14\x23\xf8\x6d\x61\x76\xc1\x5d\x3d\x03\x23\x87\x8f\x34\x0e\x6e\xb7\x98\x5c\x08\x42\x72\x0c\x66\xe6\x6d\x0e\x36\x79\x71\x0c\x85\xb8\x91\xcd\x45\x59\xc8\x01\xe5\x20\x11\x50\x6a\xe0\x8e\x14\x8f\xe1\xee\xf8\xe3\x4e\x1c\xfd\x87\xa8\xe4\x41\x99\x2a\xe1\x85\x99\xfa\x0e\x47\x12\x13\xd9\x50\xdf\xcc\x8d\x02\x60\x37\x68\x88\x52\x01\xf5\x3d\x2b\x79\x7a\x08\xfa\xe9\xc1\x1d\xe5\x6e\xe1\x0d\x75\xe7\x22\xfa\x10\x39\x74\x4e\x3a\x4a\xb0\xc6\x28\x34\x78\x72\xff\x4c\x66\x98\xa1\x46\x14\x9a\x21\x25\xac\x0d\xf0\x52\xf3\x9f\xcc\x94\x1c\x22\x76\x6d\xa8\xdd\x53\x55\x03\xe7\x07\x40\x79\x31\x20\x14\xd2\x0d\x5c\xe9\x18\x42\x17\x6a\x08\xea\xa2\xc1\x69\xb6\xa4\x47\xe1\x9f\x8f\x3f\xe4\x72\x70\xb9\x7c\x00\x43\x08\x9f\x24\x8d\x25\x4c\xfa\xde\x49\xa7\x75\x5d\xea\x4a\xb8\x86\x20\x44\x4b\xcb\x00\xad\x5a\xa0\xd2\x24\x1d\xa6\x59\xf2\x04\x25\x02\x23\x87\x92\x6f\x57\xa7\x20\x0e\x3d\x89\x7c\xfc\xea\x65\x5f\xef\x93\xb7\x94\xa3\xc5\xc0\xfb\x74\xa4\x76\xc0\x08\x91\xe1\x83\x5a\x10\xcc\x98\xb2\x32\xad\x0a\xf4\x1c\xc0\xcc\xad\x9a\x76\x74\xb4\x0e\xf9\x0b\x9a\x1f\x47\xef\x50\xa6\x73\xf5\x87\x33\x80\x44\x03\xee\xfd\xe0\x23\x47\x68\x99\x64\x6a\x06\x57\x9f\xa9\x44\xab\xd4\x45\xf2\xf9\x48\xf6\xdc\xa4\x1c\x11\x62\x45\x83\xf2\x4f\xac\xb4\xe1\x35\x74\x25\x8e\x23\x84\x27\xb0\x8a\x18\xda\x0f\xd5\xd9\x02\x75\x04\x92\xa1\x79\x02\x1b\xb5\x79\x29\x25\x16\x0d\x19\x4b\x82\xbb\xb4\x35\xb0\x31\x4a\x8a\xc1\x46\x58\x9d\xd6\xf3\x0d\x1c\x6d\x17\xef\x63\xab\x1b\x74\x18\x78\x9f\x1b\x16\xe6\xa4\xf6\x20\x89\x8d\xbc\x42\xa4\x56\x22\x08\xd9\x23\xee\xf8\x2a\xf2\xca\x88\x71\x00\x89\x69\xac\x49\x5c\xb8\x43\x46\x64\x86\x40\x68\x58\xa0\x31\xa4\x45\x30\x9d\x97\x2a\x02\x4c\x80\xf5\xb5\x4b\x7a\x1b\x92\x1b\xb1\xbe\xeb\xec\x7e\x32\x11\xc7\x1a\x7b\x8f\xe9\x23\x84\x59\x50\xa8\x50\xf8\x42\x65\x88\xae\xe4\xa2\xb7\x47\x5b\x7a\x40\x2e\x83\x6e\x63\x0e\x9d\x0e\x9f\x7e\xf1\x2e\xa6\x90\xf1\xf8\xa3\xbf\x31\x22\x80\x56\x67\xa1\xe5\x00\x6d\x09\x1f\x3a\xa0\x2d\x81\xa4\x46\xee\xec\xee\x99\x75\x54\x27\x37\x20\x17\xef\x08\xc0\x5b\x82\x7c\xd8\x26\x94\x55\x5a\xbf\xc5\x9a\xa4\x8f\x68\x2c\x06\x85\x5d\x0f\x58\x60\xe5\x7b\x4d\xce\xaa\x15\x90\xef\xc7\xc6\xee\x7c\x53\xe6\x05\x10\x19\x89\xc4\xdc\x75\x6c\xfa\x8a\x40\xc4\xfa\x20\xda\xe9\xd5\x1e\xe0\x7d\x95\x28\xdb\x00\x3f\xb3\xce\xbc\x45\x08\xea\x00\x52\x70\xfd\x7b\x51\xee\xd2\x10\xbc\x33\x58\xb3\xe6\xe2\x38\x50\x0c\x40\x03\x63\xb4\xad\xe0\x77\x82\x57\x40\x30\x69\xe9\x7a\x20\x87\x8c\x15\xa4\x18\xff\x7e\x9a\x0f\x1d\xd2\x14\x54\x3c\x80\x46\xc5\x9f\xbd\x0f\x92\xe5\x7f\x26\xd4\x4d\xf9\x6a\x90\xae\xe0\x27\x66\xe8\xe0\x47\xdb\x7b\x76\xef\x49\x94\x22\x24\xbd\xbf\xad\xb1\x93\x9a\xfe\xde\xa8\x6b\x70\xaf\xe5\x94\x2a\xc0\x1d\x12\x07\x77\xf9\x4a\xda\x42\x91\x4d\x00\x59\x25\x5f\x1d\xa4\x2e\xc4\xb9\x0e\x26\x6f\x94\xc7\x87\x5f\x61\x0f\x57\xbc\xff\xfc\x70\xfd\x07\xa2\x55\x3a\x0a\x47\x53\xad\xd0\xc1\x66\xa0\xdd\x42\x7e\xa9\x0a\x7e\x4b\x02\x31\xca\xd7\xb7\x04\x17\xde\x03\xcd\x7f\xd7\x9a\x38\x08\x8e\x8b\xce\x03\xc0\x16\x5c\xd3\xd7\x2b\x44\x27\x4e\xad\xe6\x02\x85\xe8\x44\x28\x09\x40\x6d\x42\x62\x0b\xae\x36\x51\x02\x4a\xee\x82\x5c\xf7\xda\x1b\x3f\xc9\x2a\x3b\xed\xa7\xb5\xd6\xd7\x9d\xcf\xd5\x9d\xdb\x93\x6a\x57\xed\x71\x6b\x86\x63\x1b\x9b\x1b\x64\x00\x94\x17\x71\x21\x8a\x10\x9f\xd8\x24\xd4\x0c\xc5\x09\x55\x73\xbd\x98\x0e\xdc\xbb\xb3\x81\x3f\x59\x7f\x32\x14\xab\xbc\x8a\x06\xf3\xf4\x21\x5a\x3e\xa4\xa5\x97\x04\x03\x24\x94\x34\xc5\x77\xac\xfb\xb2\x8e\x44\xb4\xfd\x22\x22\xb0\xb2\xa3\x5f\xab\xaf\x73\x28\x55\x4c\x5d\x68\x4d\x28\x0f\xa5\xaf\xbd\xd2\xc9\x7b\xc4\x97\xfb\x15\x97\x1a\xe7\x87\xf2\x3e\x4b\x84\x07\x4e\xd1\xd7\xf7\x10\x08\xd7\x0d\x9b\x61\xa1\xd7\xb2\x44\x7e\x53\xbf\x48\x88\x42\xe1\x9e\xfd\x22\xf6\xf3\xbe\x68\xcd\xbf\x47\x0c\x39\x5b\xb7\x99\xcc\x91\xcd\xd4\x3b\xa9\xa5\x03\xa7\x1d\xe5\xbb\x2b\x48\xcd\xa7\x92\x02\x95\x9a\xfb\x42\x5e\x59\x62\xb8\xa8\x42\xb0\xc2\x03\x4c\x25\x3a\x80\x90\x68\x3f\xe9\x2c\x75\x97\xf9\xee\xba\x5b\x4a\x0f\x87\x22\xbc\x29\x4c\x81\x58\x29\xdf\x28\x5c\xe4\xea\xc1\x48\x19\xce\x73\xdb\x30\x9b\x97\x17\xa3\x22\x85\xe0\x32\x70\x60\xf4\xe0\x4c\x89\xb3\x98\xc5\xd7\x14\x03\xe6\x4c\x59\x8c\xd9\x95\xb8\x29\x83\x81\x18\x4f\xab\xd6\xc8\x8d\x7d\x95\xf8\x05\xa4\x10\x3e\x44\xcc\xbf\x42\x18\x6f\x66\xcb\xca\x52\x99\xb1\x6f\x1c\x41\x9d\x27\x71\xf0\xad\xf5\x39\x62\x4b\x7b\xd0\x10\xe6\x21\x0e\xa0\xe2\x4e\x8f\xd8\xa5\x45\x8f\x27\x79\x6a\x95\x34\x0a\x98\x05\xe8\x0f\x06\x21\xaf\x21\x35\x41\x0f\x78\x1f\xf4\xcc\x00\x11\xba\x88\xe5\x38\xbe\x7d\xf8\xa2\x9b\x2e\x82\xbc\x52\xe8\x38\x7e\x1a\x12\xdb\x41\x07\x19\x60\x68\x3c\x20\x57\x5f\xaa\x09\xc7\x22\x35\x0c\x03\x5c\x1f\x33\xc8\x17\x62\x25\x83\x4e\x7a\x77\x9e\xe4\x40\xca\xea\x00\x0e\xb9\x8c\x03\xb6\x8c\x84\x16\xf6\x5b\xe5\x1e\x3a\x15\x88\x43\x68\x4a\x1d\x48\x58\xd5\xb5\x37\x2e\x4b\xbb\xfc\x44\x5e\x84\xd4\xec\xf0\x80\x8f\xf0\x68\x4b\x84\xa2\x0a\xaa\xc6\x58\x1d\x16\x60\xf3\x40\x94\x6c\xa8\x58\x0c\x6c\x10\x69\xdd\x3b\x9d\xda\x53\x1a\xf8\x83\x55\x32\x3c\x69\xa1\xc8\x25\x42\xa0\x4b\x88\xe5\x90\xce\xb0\x4f\x6a\x68\x11\xc4\xdd\x8b\xe1\xcd\x4a\x1b\x11\xa9\x64\x41\x39\x12\xcc\x14\x54\x56\xbd\xc6\xee\xc8\x0f\x21\x51\x27\x1b\x0a\xe9\x10\x58\xa5\x38\x75\xf7\x61\x57\xd0\x31\x53\xad\xbb\x3f\xd2\x78\x88\xe0\x22\x20\x29\x3c\x84\x0d\xd7\xdd\xb1\xd0\xd0\x61\x22\x72\x06\x0c\xa6\xf4\x1b\x42\xff\x9a\xb9\x77\xe2\xd7\xb6\xd2\x4d\x3f\x67\x21\xbf\xd7\x9d\x11\x36\xdd\xfd\xd3\x7f\x03\xe6\x62\x84\xa1\x89\x2e\xc7\xee\x9e\x48\xde\x7e\x73\x5a\x6f\x1f\x6c\x8b\x07\xc0\xc7\xdb\xcc\x5c\xa9\x4e\x70\x7f\x47\x62\x85\xee\xf8\x6c\x91\x6b\xd9\x1d\xb7\x07\xda\xa1\x0c\x47\x02\xf0\x53\x0a\x14\xf7\x26\xaa\x4e\x87\x1c\x48\xa1\x50\xa1\x95\x56\x83\x93\xec\x24\x83\x52\x3b\x40\x0c\xf3\xbd\xe8\xa2\x87\x0d\x69\x8a\x00\x35\xac\x98\x58\x43\x57\xfc\xaa\xf6\x21\x12\x51\xd3\x00\x6e\x34\x77\x3d\x74\xe4\x74\xed\x9a\x0b\xd1\x10\xfd\x66\x06\xd6\x95\xe6\x35\x22\xe2\xa9\x00\x68\x68\xc0\xda\xa0\xa4\x36\x8e\x0c\x85\x26\xd5\x06\x16\x3c\xc1\x8b\x7c\xf8\x87\x18\x6d\x75\x59\x6c\x49\x7c\x02\x84\x61\x11\xfd\xd8\x1d\xb1\xbc\xef\x91\x8a\x5e\xa6\x6f\x93\xc1\xb8\x63\x86\x02\x1c\xff\x6e\x22\x1c\xd0\xb5\x6d\x72\x17\x73\x0a\x14\x8a\x21\x10\x4a\x3d\x04\x9a\xc2\x06\xd9\xe7\x8f\x84\x15\xba\x10\x17\x80\xa8\x9f\x2b\x9b\xd4\xcf\xc5\x22\x74\x27\xde\x1c\x8b\x3b\x7b\xb8\xd2\x9f\xb8\x65\xcd\xba\xfa\x85\x80\x2e\x53\xcf\xff\x89\xc2\x1e\x39\x9f\x89\x80\x0f\x4e\x63\x3b\x46\x40\xac\x5d\xf3\x2a\x47\xa3\x79\xec\x6a\x0c\x3a\x1d\x42\x38\xd7\xbd\x12\x6e\x25\x5a\x40\xdd\x42\x85\x63\x53\x5f\x8b\xee\x86\xdf\x90\x8d\xb0\xac\xed\x5c\x79\xec\x9c\x7c\x96\xd1\xd9\xa8\xca\x73\x7a\xc0\x0e\xeb\xa8\xa4\x47\x03\xf2\xb7\x50\x1f\x43\xff\x6c\x34\x54\x1d\xc2\x2e\xbc\x6d\x54\x77\xfc\x77\x35\xf9\xb9\x17\x33\x35\xce\x88\x88\xf2\xad\x89\xc6\x47\xb7\x90\xfb\xc2\xc9\xb3\x39\xa9\xef\x96\xf2\x8f\x25\x32\x79\x63\x9f\x3d\xda\x98\xe1\xf9\x06\x49\xc2\xba\x07\xd6\xef\x69\xd6\x76\x11\x31\xe7\xaa\x1a\x5b\xd7\xf4\x51\xca\x15\x02\x18\xc8\x3b\xe0\x83\x71\x8f\xa1\xea\x0c\x6a\x72\xa6\x20\xc1\xae\xc8\x4d\x67\x43\x5d\x68\x57\x26\x29\x30\x94\x50\x21\xb0\xe1\xa8\x13\x98\x42\xc0\xe2\xeb\xe6\x42\xa7\x00\x49\x94\x58\xb0\x5a\x42\x10\xe3\x20\xdf\x1b\xd2\xb6\x34\xfb\x45\xb5\xa9\x17\x91\xff\x28\xbc\xd8\xfc\xf0\xc3\xd2\x86\xbd\xcc\x7e\xc4\x3f\x02\x01\x9b\x3a\x4c\x14\x3a\xf1\x64\x27\xfe\x1e\x11\x21\x92\xd1\xc9\x1b\x95\x73\xf2\xb6\x85\xac\xc7\xf6\xf0\x1e\x9c\x38\x7f\x03\xf0\x3e\xb3\x52\x93\xf0\xb8\x18\x90\x0a\x24\xb1\x8d\x6d\x68\xe7\x48\x77\x91\x7a\x49\x6e\x8a\x1b\x8a\x1a\x02\x90\x3a\x09\x66\x82\xae\xf7\xab\x0d\x3d\x93\xe5\x11\xbf\x98\xe8\x55\xe9\xa4\xe6\x89\x7c\x68\x39\xc7\x3b\x74\x70\xba\xcd\x99\x6d\x22\x67\x71\x2d\x19\xfa\x49\x7a\x64\x84\xfc\x53\x16\x64\x78\xc7\xe9\x86\x96\x69\x62\x58\x1f\x96\x35\x4b\xbd\xe1\xac\x65\x11\x03\x34\x6b\xa1\xfe\x81\x4e\x93\xf0\x18\xe4\xf9\x80\x0c\xc8\x9a\x25\x3f\xf1\x65\x1f\x91\xd4\xa8\xd2\xbd\x22\x86\x42\x70\x4a\xf2\x05\xdb\x46\x63\x81\x86\x1c\x88\x5e\x23\x7a\x1b\xce\x45\x3b\x50\x84\xb0\x97\x6e\x5a\x47\xa9\x7d\xf2\xe9\x54\x84\x3f\x50\x28\x3e\x75\xa2\x78\xee\xc0\x00\xf6\x11\x60\xc1\x8c\x3b\x07\xd4\x30\x03\xcc\x52\x12\xd1\xc2\x96\xa3\xf8\xe7\x56\x9b\x40\x08\x9d\x85\x06\x9e\x4d\x51\x24\x44\xc0\x0d\xfe\xd4\x43\x54\xb3\xc3\x49\x89\x63\x5f\x04\x06\x41\x34\x44\x95\x84\x91\xa1\x53\x77\x48\x9e\x4f\xba\x91\x43\xaa\x1d\xb1\x38\x01\xf4\xab\xc6\x7e\xd1\xc5\x00\xa6\xf6\x78\x58\xdb\x53\xb7\x19\xec\x9d\x86\x24\x8f\x82\x00\x1a\xfd\x53\xfe\xc0\x0e\x64\xcd\xda\x23\x19\x1d\x64\xea\xb4\xd1\x3d\x3d\x19\xb7\x6a\x20\x2d\x7a\x97\xe3\x78\xab\x2a\x99\x1a\xc4\x1d\x04\x83\x58\xa1\xfe\x59\x75\x16\xa4\x13\x2c\x42\x47\x48\x9c\xc0\x1c\xe7\x81\x5d\x3c\x0d\x28\xf3\x6c\x85\xa7\xf1\x0b\xa4\xaf\x66\x24\xe9\xeb\xab\xca\xe9\xd3\xd7\x7f\x5e\x26\x09\x51\xbc\x2a\xfd\xbc\xe9\x3e\xff\x58\x67\x3e\x50\x09\x8d\xa0\x90\x16\xa1\xb4\x06\x2d\x0c\x54\x9b\xf4\x48\x93\x7d\xec\x24\x98\x4e\xd1\x2b\xfb\xda\x2c\x35\x2a\xce\x10\x26\xa1\xcb\xcf\xda\x8f\x26\x77\x86\x0e\xb7\xd4\x0d\xb4\x38\xa7\x74\x76\xc1\x7d\x87\xd0\x03\x4e\x2d\xac\x89\x49\x66\xc4\xec\x72\x2e\xff\xa2\x1d\x7d\x55\xfe\xef\x96\x72\x89\xbd\x4b\x77\x02\xc3\x14\xe6\x28\xed\xd1\x1a\x97\x24\xc4\x5c\xda\xa4\x34\x69\x13\x0f\xf3\xc7\x84\x61\x25\x45\x18\x20\x69\xab\x21\x48\xa3\x18\xb5\xa4\x44\x6d\x04\x71\x50\xd1\xe2\x48\x2f\x1e\xc4\x78\x46\xd0\x7e\x2f\xfc\xfd\x76\xdd\x2c\xcc\x38\x67\x8d\xdc\xf0\xbd\x7c\xe8\xf2\xd8\xfd\x7a\xe8\xc0\xf9\xa7\x82\xa2\x00\x36\x08\x61\x71\xdd\xdf\x43\x05\xfd\x01\x6c\xcf\xd0\x32\x81\x1c\xd0\xa9\xdf\x0c\x9a\x3d\x43\xf3\xc4\x79\x16\x1d\xdb\x07\xcc\x9b\xdc\x24\xf1\x90\xd6\x73\xaf\x21\x6b\x72\xb0\x7d\x18\x90\x72\xcc\xaa\xd0\x78\xad\x83\x4a\x0e\x85\x4a\x1f\xcb\xd9\xc9\x59\x9d\x96\xed\xa6\x5c\x0d\x9c\x6b\xfc\x27\x08\x5e\xf8\xda\xb1\x36\x9a\xb3\xbf\x41\x7b\x02\x94\x52\x41\x6b\x27\x4f\x3c\x42\x42\xfc\x0d\x99\x15\xeb\x97\xd0\xf7\x93\x24\x01\x0a\x31\x7c\xff\x89\x07\x0d\x72\xc1\x27\x15\x7e\x3e\x2e\x45\xe2\xb2\x5d\x60\xb7\x47\xe1\x51\x16\x0e\xc8\xa5\x94\x86\x66\xec\xa4\x01\x8c\xc4\x5e\xa3\x41\x41\x11\x09\xae\xa7\x42\x57\x27\xc2\xd3\xf4\xe8\xba\x5b\x9a\x23\xf6\x0c\xa9\xa6\x9d\xba\xc1\x07\x04\x2b\x19\xdf\x63\x76\x3c\xe5\x05\xbe\x9f\x77\x71\x13\xea\xfb\x86\xfb\x77\x99\x2b\x79\x14\xbe\x20\xdc\xf7\x8f\xfd\x7b\x6b\x5d\xdf\xa5\x47\x50\xae\xb7\xd7\xf1\xbb\xca\x7e\xf9\x16\x92\xb3\x7c\x45\x8e\x83\xe5\x60\x9e\x04\xc9\xd9\x59\xe6\x6d\x5b\x0c\x73\x01\x92\xd0\x84\xfc\x16\x24\xf5\x70\x37\xd6\xec\x47\x72\x19\x7d\x41\xd4\xe5\x17\x73\x8e\xc3\x55\xc1\x19\xdf\x2c\xb5\x2e\x48\x94\x78\x3b\x80\x32\x49\xf6\x72\xed\xb4\x70\xf0\x75\x5a\x96\x98\xcd\x7f\xb3\x67\xaa\xfc\x47\xd1\x12\x1e\xca\x5a\x33\xde\xfb\x0b\xd9\x26\xaa\x99\x19\x94\xb6\x08\xc5\x07\xbf\x69\xde\xbc\x9b\x06\xe0\x55\x40\xa0\xe0\x8f\xfb\x9b\x7a\x2e\xba\x7f\x93\xdb\xe8\x3c\xde\xcc\xc1\x84\x87\xf9\xb9\x23\xbf\xf2\x73\x63\x55\x8b\xa3\x25\xf4\x47\x7c\x4a\xf4\x21\x73\xc1\x49\x48\x43\xf1\xc3\xf1\xfb\x83\x55\x60\x7f\xdc\x9f\x7a\x3c\x79\x04\xed\x11\xe8\x93\x64\xe9\x93\x90\x85\xe4\x93\xa5\x28\xf5\xc9\x52\xc3\xf8\xd8\xc8\xc9\x16\x0a\x44\xcb\xe1\x13\x4c\xd1\x9f\x4e\xb1\x22\x1f\x1b\x0f\x2f\xa6\xc7\x7d\x12\x39\x77\x3e\x69\xd3\xf6\xc6\x88\x95\x8f\xba\x7f\xf0\x64\x7f\x19\x1b\x05\xe8\x55\x84\xd7\xda\xca\x7f\xf0\x62\x78\xef\xf4\xfe\x7c\xd9\xe8\xdf\x1f\x59\x7c\xef\x8f\x9b\x69\xef\x8f\x34\x2e\x6e\xc5\x4e\xbc\x95\x19\xf7\xbe\xb5\x04\x69\xa0\x93\x52\x87\xe7\x4f\x33\xd6\xc5\xec\x54\x51\xcc\x19\x62\xbe\x91\x01\x4d\x0f\xde\xa2\x86\x81\x86\xca\x19\x6a\x2a\xc3\x41\x2b\x16\x90\x97\x06\x90\xfc\x44\xdd\xc8\x4f\xf5\x0e\x29\x87\x77\x5d\xcc\x58\x67\x68\xa0\xe1\x85\x2d\xf0\xfd\xcb\x63\x30\xa8\xd5\xc0\x77\x7d\x5c\x7a\x16\x0e\x09\xe6\x65\x0c\x6d\x93\x0a\x0f\x8f\x83\xce\x7a\xa4\x4b\x2a\x29\xbc\x39\x28\x92\x28\x92\xfa\xed\xcc\x06\x5e\x33\xfb\x4e\xf1\x38\x84\xb7\xf0\xe0\xbe\x63\xdd\xf5\x5d\x0e\xc9\xaa\x44\x5a\xc3\xbb\x6c\x3f\x1d\x94\x75\xf7\x09\xdb\xbb\x98\xed\xc7\xab\x2c\xd7\xa9\xb3\x2d\xcc\x63\x77\x90\x0e\x21\x29\xcf\x60\x4d\x69\x38\x25\xf6\xb2\x0c\xb3\x08\x10\x81\x13\x7f\xad\x98\x04\xeb\x09\xde\x7c\xf8\x25\x37\xc1\x99\x7c\x79\x3b\xff\x2f\x54\x5c\x02\xcc\xaa\x32\x98\x5f\x3d\xfa\xe0\x5d\x62\x95\xf3\x5d\x86\x4d\xf5\x1a\x56\x12\xb1\x82\x48\x47\xef\xac\x38\xb3\x20\x6b\x16\x32\xab\xef\xf9\xe2\xcb\x86\xe7\xac\xa1\x36\x19\xc5\x86\x38\x51\x5d\xd7\xc5\xcc\x20\x1f\x4a\x20\xe0\xb2\x37\xdd\x16\x69\x35\x1b\xca\x12\x0a\x71\x74\x36\xc4\xff\xda\x20\x2e\xad\x9d\xfc\x09\xe9\x13\x91\x9f\xba\xe6\x8b\xa7\xa3\x02\xed\x88\xaf\xdf\xb4\xc1\xb6\x05\x10\x3f\x09\x5e\x04\x43\x55\x5e\xd4\x77\xfe\xed\x93\x7c\x89\xcc\x8d\x37\xed\x43\x08\xc8\x88\x92\xd0\xe0\x73\x56\x0e\x2c\xe6\x8c\xe0\x97\x7e\x0a\xc7\xf3\xc9\x4b\x34\xfd\xdc\x16\xc8\xc1\x11\x7f\x4e\xf9\xa7\x2c\x83\x04\x0c\xde\x8e\x84\x55\xb2\x9e\x94\xe7\xf2\xf1\xa0\x26\xcb\xe7\xaa\x32\x7c\x38\x30\xf5\xa5\xc3\x13\xca\x32\x6a\x30\x8d\x49\xf7\x9d\xae\x23\x94\x5c\xce\xd3\xc7\x6a\x89\xcd\x08\xf9\xc2\xca\x3b\x39\x95\x30\x87\x38\xdb\x90\xa9\x13\x8c\xcc\x6f\x0f\x10\x95\x13\xfe\x8d\xf8\x68\x2f\xd7\x35\xa4\x68\x56\xa8\x45\xfa\x1d\xfc\xd1\x12\x83\x2e\xc8\x9f\x5f\x99\xdd\xd9\xe6\xe1\xef\x1f\x95\xd4\x3b\x3d\x9b\x26\x0c\xab\x29\x72\xb7\xd7\xdd\xb7\x0c\xdb\x57\xad\x93\x6b\xae\x54\xe5\xf1\xbc\xea\x53\x0a\x2f\xcc\xf9\xb3\x32\x2f\x41\xe3\xf9\xaa\x83\xd2\xc7\x5e\x0c\xbb\xa6\x90\x4c\xd9\x19\x18\x86\x80\xb9\x42\xb5\x16\xfb\xac\x16\x2e\xac\xbc\xca\x4f\x17\xe3\x15\x43\x95\x01\x2e\xef\xbe\x10\x99\xd1\xfe\x2b\x59\x91\xec\xd3\x88\x42\x78\xba\xd9\xfe\xca\x4c\xf6\x7a\x79\x0c\xc5\x49\x94\x7b\xc9\xba\x80\xf5\x51\x48\x01\x47\xaf\x6c\x1f\xf3\x5d\xa8\x1c\xf9\xb2\x17\xeb\x3f\xc1\x7b\x90\x02\x8a\xbd\x9e\xba\xd7\x9b\xe9\x28\x2f\x64\x66\xf6\xb7\xa3\xe3\x25\x95\x93\x83\xea\x38\xf0\xa3\x9e\x01\x78\x9b\x5a\xd9\x02\x0f\xd1\x49\x65\xae\x57\xda\xde\x1f\x6f\xd4\x40\xf6\x3d\x0d\x3f\xfd\x97\xd2\xd4\x5f\x36\x2d\x5f\x43\x08\xe6\x7e\xc5\x11\x4b\xad\x02\x08\x16\x54\x77\xf3\x02\x8d\xee\xfa\x53\x8a\x71\x36\xbd\x97\x6b\xf3\xee\x4b\x9c\x6f\xaa\xc7\xde\x50\xbb\xa2\x7c\x2f\x2f\xb7\x1c\x4f\xc9\xd8\x40\x96\x4a\x20\x7f\x94\x39\x4f\x8d\x19\x84\x96\xc7\x31\xf9\x75\xc6\x59\x06\xe7\xc4\x5f\xda\x86\xd4\xf0\xb9\x11\xcd\x21\x31\xd2\x9f\x3f\x59\x9e\xbc\x8f\x05\x71\x37\x3e\x04\x02\x87\xa2\xec\x2b\x85\xf6\x0c\xc8\xa2\x93\x10\xca\x9b\xbe\xb6\xbb\x09\x9f\xdf\x10\x55\x3e\x08\x2a\x77\x30\x9c\xfe\xde\xaf\x39\x87\xf2\x3b\x96\x1f\xff\x28\xca\x78\x55\x6f\x39\x17\xee\xb5\x05\xc7\x63\x8d\xce\x0b\x04\xbf\xfa\xc1\x74\x38\xdd\x8b\xb2\x5e\xee\x79\x71\x89\xa5\x7b\xa0\x8c\xdb\xf5\x91\x5f\xe6\xfa\xac\x21\xe3\xf1\x61\x57\x7b\xd9\x68\x34\xd0\xd7\x7d\xbd\xb3\xbe\x26\x20\x92\xa9\x5f\xc1\xf5\x74\xc9\x4f\x8b\x52\xda\x30\x9e\x11\x76\xdd\x92\x7c\x41\xe2\x7f\x48\xc3\xdb\x44\x8b\x3d\x11\x64\x3a\x77\x99\x6b\xc0\x1d\x27\xff\x88\x01\x96\x6e\xc9\x51\x25\xe2\x52\x9b\xc2\x4b\x79\xc1\xc0\xa5\x0e\x08\xa8\xff\x1a\xd0\x2f\xca\x15\x3a\x31\xc8\x04\xff\x10\x82\x79\xa3\x38\x44\xdc\x01\xcd\x66\xc3\xbc\xd7\x23\x3f\xfc\x9b\xb8\x10\x1a\xa0\xc8\x98\xeb\x68\xc1\xa2\xd7\x61\xb3\x48\x84\xf6\x13\xbf\xe2\x4c\x8a\x03\x03\x80\xf6\x12\x5b\x18\x22\x85\x4e\x17\x51\x00\xca\x2a\xfd\x51\xd8\x4b\xf4\xab\x54\xfa\x19\x2e\x36\x65\x48\x59\x44\x14\xd4\xd5\x44\x5d\x2a\x48\x3a\xfc\xce\xaa\xbf\xc2\x4b\xec\xf4\xd6\x01\xaa\x5c\xa1\x6c\xab\x97\x55\x57\x2e\x4e\x41\xce\xa2\x48\x39\xef\x0a\x52\xa7\xab\x3e\xd9\xf1\x5d\x55\x62\x3e\x55\xbc\xb8\xae\x49\xe1\xe5\x2a\x59\xbe\xcb\x75\xb1\xfc\x54\x05\x16\x35\x8f\xea\x16\x0e\x0c\xd7\xfc\xd1\x67\x70\xc1\x03\x48\x15\x9e\xf9\x0e\x3d\x31\x40\xd7\xbc\x99\x8f\x3b\x74\x68\xd0\x47\xe7\x41\x90\x7d\xf3\x05\x92\x5d\x9e\xae\x4a\x9c\xa6\x84\xaa\x4e\x0b\xd5\xc5\x1c\x9b\x7b\xd2\xb6\xa7\xcc\x57\xe2\x41\xfc\xaf\x6a\xa3\x61\xe2\x66\xc8\xdd\xe4\x7f\x85\xde\x24\xa0\x81\x0f\x2c\xbf\x7e\x39\xf0\x36\xf5\xa2\x78\x92\xbd\x27\x7d\x0c\x57\xde\x45\xae\x68\xc8\x6f\x9d\x54\x8f\x17\x06\x96\x85\x63\xeb\xe5\xac\xad\x7c\x76\x08\xf2\xe2\x4c\xcf\x2c\x46\xde\x79\x7e\xe4\xb8\x56\xb9\x9a\x4f\xf5\x02\x11\xae\x86\xd7\x4b\x66\xd1\x95\xa7\x66\x00\x5e\x59\xea\x38\x6e\xc4\x5f\x94\x5a\xb9\xd2\xad\xe0\x37\x43\xf2\xfb\x5c\xe9\x0c\x66\x4e\x17\xbf\x59\xd5\xcb\xd9\x86\xf3\x3a\x56\x62\x68\x80\xb3\x15\x60\x8d\x2a\x16\x34\x6c\xa3\x3c\x78\xf4\xc6\x61\x19\x5a\x39\x69\xa6\x2d\x70\x21\x6f\xc2\xe6\x0c\x54\xb1\x59\xd2\xfc\xb9\xf2\x9b\x47\x3f\xd3\x29\x75\xfc\x2b\x85\x2f\xf9\xf2\x09\x0d\x81\x64\x75\x3a\x55\xf7\xfc\x7e\xd6\x1f\x1f\xf9\x17\x6b\xb7\x6d\x0b\x1a\x60\x92\x01\xf8\x1c\x92\x5c\xf8\x6c\x9b\x74\x30\x3e\x83\xb8\xd2\xcd\x92\x75\xf2\xf2\x77\x0e\x25\x1b\x04\x7a\x4c\xa1\x74\x63\xb7\x2e\xe1\x94\xf7\x4f\x3d\xfb\x7c\x65\x76\x5d\xe7\x7d\x7c\x24\x68\xb3\xe3\x85\x66\xc2\x4d\x6a\x36\xb5\x63\xb6\xb1\xcb\xdd\xf8\xc9\xc1\xcf\x97\x04\xba\xc4\xa3\x16\x8e\x99\xe7\x8d\xac\x6b\x0a\x3c\xdc\x4f\x5f\x0f\x3b\xef\xa9\xd1\xb7\xdf\xd6\x85\xe8\xce\xee\xe1\xad\xb8\x16\x83\xa1\x45\xe1\xbc\xff\x3a\xe0\xba\x2f\x99\x61\x50\xbf\x19\xab\xc4\x60\x9c\xbe\xce\xd1\x71\xbb\xfc\x1d\x00\x09\xff\xbd\x2b\xa4\xb6\xcd\x51\xdf\xcd\x2b\xe4\xe4\x67\x8a\xf0\x03\x66\x3a\x12\xd0\xec\xfd\x7d\xec\x1f\xc7\x70\xe1\x9c\x2e\x90\x15\x97\xc9\x2f\x89\xce\x1c\x9a\x20\x03\x79\x58\x80\xf0\x1c\x47\x26\x26\xaf\x03\xad\x2a\xfb\x50\xc4\xb1\x89\xbe\x62\xa9\xa1\xb6\xc3\x1b\xbf\xaa\x44\x7d\x20\xa8\x63\x06\x78\x28\xbc\x54\xa9\xca\x54\x7e\x5d\x06\xc6\xf8\x4f\xff\x9c\x75\x6e\x83\xa3\x32\x35\xcf\x0b\x19\xa8\x3c\x77\x09\x21\x72\x83\x71\xcf\xe4\xd3\x3c\x03\xa7\x98\xde\xdb\x96\xbc\xd4\xae\x8e\x93\xfa\x80\xbc\x0b\x64\x91\x12\x6c\x3f\x45\xa1\xbc\x35\x69\x9d\x65\x89\x0b\x64\x05\xdd\x38\x8a\x1e\x43\x1b\x7d\xa8\x06\x99\x11\x1d\xcf\x0c\x9e\x41\x3e\xd3\x2c\x05\x2f\x08\xe7\x14\x70\xf0\x09\xeb\x37\xe9\xbc\xf4\xb6\xa0\x32\xce\x6a\x35\xf6\x2a\xc0\x45\x74\xb1\xc4\xac\x8d\x19\x44\x32\xf2\x81\xe3\xdf\xb0\x6b\x02\x85\x08\x03\x70\xfa\x61\x09\x2a\xba\x2a\x49\x04\x56\x9c\xd7\x2f\x71\x07\x7a\x3b\xf1\xb2\x5b\xd0\xbe\xc1\x41\x55\x1c\xae\x96\xd8\x6e\xc6\xcc\x87\x7d\xd0\xb9\xc3\x10\x61\x85\xf7\x83\xf2\xa9\xae\xaf\x13\x92\x37\xd5\x2c\x5e\xb9\xec\x4f\x50\x29\xfb\x4d\xec\x85\xa4\x9e\x66\xe7\x32\x5e\xca\xa3\xeb\x4e\x01\x18\x66\xda\xd9\x6b\x90\xc3\x64\x47\xda\x28\xbb\xe2\xb8\x5c\x43\x87\xd1\x9e\x06\xed\x46\xd5\xba\xcd\x8a\xf7\x53\x83\x01\x81\xfa\x4f\x08\xd5\x39\x04\xe2\x20\x5d\xcd\x4a\xea\x2a\xd4\xa3\x6b\x87\x47\x1b\x40\xd6\x8b\xdf\xbb\x02\x5f\x6d\xda\x56\x25\x68\x54\xa5\x4b\x54\x5b\xd8\xb0\x2b\xee\x60\x89\xcc\x31\x7b\xc1\x73\x2b\x4a\x63\xb6\xc9\xbb\x04\x25\x56\x3a\x3c\xa0\xc1\xb3\x85\xf6\x4e\xad\xee\xa6\x35\xc0\x79\x0d\xde\xce\x57\xa5\x3d\x66\xf6\x18\x6b\x38\xd5\x0d\x71\x79\xe7\x04\x39\x53\x48\xf2\xdc\x3e\x7e\x21\x80\x7a\x63\xf9\xd2\xd9\x97\x2a\x12\xa1\xf3\xa7\xec\xb1\x14\x8f\xc4\xb2\xc9\x67\xa8\x78\x2c\x29\xc4\x7d\x9e\xf7\xc8\xef\x12\xcc\x19\x7e\x99\x67\xf8\xad\xce\xa7\xd3\x23\x9d\xcf\x47\xe6\x66\x51\x92\x9c\x0d\x9e\x94\xc2\x35\x40\x06\x0e\x00\x76\x17\x4f\xaa\xec\x9d\xe5\xdf\xb3\x35\x1f\x99\x15\x67\xd1\xd5\x8b\x54\x61\x4a\x74\xa5\xe5\xa0\xfa\x49\x09\xb6\x58\x20\xfe\x67\x93\x1c\x2e\x50\x6e\x5f\xa3\x0d\x11\x75\xe5\x4f\x40\xa8\x28\x19\xa1\xe5\xc5\x55\x87\xb3\x55\x14\x4b\xab\x7a\x87\xe5\xf9\xa4\x25\x73\xc2\x36\x08\x31\xa1\x2c\x01\xa5\xa2\xb0\x6b\x00\x55\x32\x8f\xcf\x40\x3f\x45\xa1\x4e\x2d\x41\xf4\x47\x67\xe9\xef\x58\xe9\x3d\x9b\x72\x88\xf3\x20\x01\xcc\xf7\xaa\xda\x1b\xb2\xd9\x16\x1f\xcd\x7c\xff\x15\x0b\xba\xc1\xbf\xcf\x0b\xcd\x37\xd7\x1f\xcf\xf9\x88\x43\x0f\xac\xb4\x39\x82\x95\xc8\x53\x55\xd1\xc2\x01\xf1\x77\xe5\x15\x6a\x3e\x65\xb3\xa3\x6e\x41\x4a\x7f\xd8\xf0\x5d\x25\x22\x69\xb8\xb8\x8f\x0e\x1c\x25\x54\xb4\x42\x3a\xed\x4d\x51\xac\x39\xfa\xc8\xd9\x19\x1d\x98\x6c\x07\x81\xa1\x97\x64\x8a\x6c\x16\x7c\xa5\xd8\xb9\xb3\x2c\x71\x94\x26\x76\xd0\x21\x92\x42\xc9\xec\xeb\x5e\x42\x7b\xec\x92\xca\xdb\x6c\xd6\xc9\x47\x40\xe2\x41\x20\x2f\xd0\xe1\x0c\x34\x3c\xf3\x27\x9a\x50\x16\xad\xe0\x09\x5f\x37\x85\x7e\x14\x11\x7f\x62\x1e\xa1\x1e\xc4\x03\x7f\x09\xb8\x2d\x46\x0d\x00\x9d\xa7\xd9\x1b\x19\xa2\x8c\x7a\x3b\x79\xe3\xe3\xce\x5a\x7a\x38\xf3\xba\x8b\xac\xff\x44\xc3\xd0\x3e\x04\x28\x38\x5a\x9c\xdd\xef\x8c\x17\x9f\x97\xce\xdd\x6b\x67\x48\x7d\x42\xab\x88\x05\x3f\x70\x9b\x7c\xb3\x8c\xf4\x62\x43\xf6\x1b\xb9\x6c\xce\x3c\x30\x7c\xfe\x8c\x8c\x87\x33\x27\x24\xa2\x13\xf5\xea\xfe\x73\x12\x6f\xd7\xd9\xdf\x1b\xe3\x83\x5d\xbf\x68\x62\x07\xd8\x1f\xf0\x24\xb6\xd7\x06\x0e\xc5\x5d\x54\x34\xb6\x91\xa2\xd7\xb6\xb3\xf1\x7b\xef\x6b\xc7\xe7\x67\xb3\xf3\x44\x85\x4b\x48\x1b\xdd\x54\x42\x05\xa4\xd8\x86\x23\xcd\x02\x5d\x06\xe9\x88\x43\xc5\x89\x4e\x41\x24\xa7\xdb\xb8\x79\x35\xb3\xfb\x25\x5a\x74\xd3\x94\xc3\x7c\xd7\xff\x3d\x69\x2b\x64\x8b\x6e\xa7\xe6\xd6\xc1\x87\x7a\x7a\x2c\xf1\x35\x4d\xa2\x2a\xe2\x11\xc0\xed\x8f\xaa\x52\x65\x3a\x68\x3b\x2c\x6e\xcf\x50\x47\xb0\x95\xeb\xdb\x64\x90\xa2\xf3\x35\x94\xee\x90\x54\x22\xaf\x08\x40\xfe\x49\x2e\xad\x2e\x5c\xb1\x70\xc3\xba\xdc\xfc\x93\x47\x6a\x5a\x2e\xbe\xd1\xd5\x59\x37\xb7\x48\x80\x0f\x28\x78\xd4\x7c\xa3\xfc\x95\x53\x0a\xd1\x0c\x6e\xc4\x13\x58\x9a\xda\x1e\x16\xcf\x55\xc7\x12\x8f\xe4\x27\x0c\xe6\x12\x4b\x85\x28\xdf\xb1\x0f\x8d\x53\x52\x55\x73\xfe\xa9\x2b\x2d\x7a\x04\x0c\x63\x76\x60\x0f\xcb\x1d\x15\xda\x6e\x62\x51\xbf\x27\x9a\x8e\x5f\xd5\x53\x17\x2a\xa0\x67\x4f\x93\xa6\xc7\x44\x4b\x48\x52\x6a\x06\xc6\xb6\x6f\xd1\x97\x93\x3e\xd2\x4c\xfa\xf8\x34\x0f\xea\x4c\x7c\xdc\xce\x50\x2d\x6a\x05\x6c\x24\xac\x9e\x10\xb3\x27\x42\x28\x3b\x7f\xca\x5e\x2d\x1d\xc8\xcf\xbd\x08\x25\xa7\xe4\xdc\xd5\x7e\xbf\x58\x5c\xfc\x34\x24\x01\x28\x04\xad\x13\xfc\xd1\x44\xda\x46\x9e\xc3\xba\xc3\x77\x80\x2c\x20\x29\xa0\xf3\x8f\x19\x15\xf2\x1e\x0e\x24\xda\x84\x5c\x71\xf7\xcb\x19\x94\xf6\xd3\xca\x81\xde\x7a\x3d\x1d\x9c\x24\xea\x64\x7d\x83\xd4\x91\x0d\x5f\xa2\x8f\x30\x78\x4e\xf2\x82\x9d\xa1\x5b\x0f\x6e\x7e\x49\x3a\x85\x08\x94\x0d\xa3\x2a\xbd\xb0\xde\x7c\xa9\xb7\xf4\x9e\x28\x13\x72\x5a\xcf\x40\x6d\xb5\x34\x4d\xf1\xcc\xa7\x84\x69\x05\x71\x08\x4d\x85\x12\x94\x35\x07\x5e\x24\x2d\xae\x17\x28\xcc\x62\x70\x3d\xc3\xe3\x43\x59\x9d\x8f\xb4\x87\x3e\x54\x8b\xf9\xb8\xcf\xf4\xc0\xfa\x90\x8c\xc9\xe3\xfe\x4a\x10\xe9\xfe\x48\x72\xe5\x7e\x55\x2f\x48\xcb\x74\xdc\x9c\x05\x98\x69\x38\x28\x5e\x07\x30\xab\x4c\x92\x6e\x62\x48\xda\x61\x46\x8c\x52\xb4\x11\x0f\x27\xbb\xf4\xb8\x3d\x53\xf5\xb8\x7d\x69\xf5\xf0\xe8\x3b\xa9\x40\x31\x07\x0f\xd3\x21\xc4\x6a\x51\x7a\x48\xb1\x5a\x2d\x32\x0e\xd3\x23\xb8\xb8\x63\xef\x6c\x83\xd1\x4f\x73\x0a\x29\x09\x1e\x56\x41\xb4\x09\x25\x96\x3c\xbb\x8d\xed\x92\x7a\x62\x60\x2a\xd4\x9c\x36\x70\x77\x9d\xb1\x11\xe7\xca\x5b\x8f\x35\xf7\xef\xdf\xad\x38\x88\xd6\xe5\x51\x15\x4e\xe2\x6a\x82\x17\x17\x59\x91\x5f\xcd\x7f\x82\x5f\x80\x79\xcd\xc0\x52\x5e\x7a\x4a\x09\x6a\x86\x7f\x8d\x57\xca\xd7\x1c\x37\x64\xf3\xdc\xb8\x97\xa1\x69\x4c\x0d\x91\x26\x7c\xd4\xa1\x71\x3d\x00\x8f\x71\x68\x92\xe4\x33\x6c\xbb\x78\x50\x36\xfd\x92\xe2\x57\x24\xe2\x03\xa8\xba\xdd\x22\x99\xaa\x6e\x91\xfa\x13\xe7\x90\x9e\x45\xc3\x43\x7c\x30\x3c\xb4\xc8\x6b\xa5\x77\x4f\x56\xfa\xc5\xcb\x3f\x0a\xfb\x58\xa3\x2f\x3e\x18\x1f\xb4\xcd\x10\x99\x18\x82\x78\x86\x0f\xcc\xb0\xf9\x6f\x4a\x67\x61\xbd\xc2\x4b\x84\xda\x3a\xa0\x83\x0b\x2e\x3b\x35\x4f\x8f\x0a\x8b\xd8\x32\xcf\x63\x2a\x52\xf7\xc1\x24\x91\x8f\xb6\x84\x8e\x56\xa1\x2b\xef\x28\xf9\x19\xa5\x8e\xcd\xfd\x1c\x72\x50\xf0\x5e\xf2\x68\x0d\xb6\x98\x94\xfd\xc4\xae\x4a\xdf\xd6\x42\x80\xb3\x34\xd0\xf2\xc7\x0c\x78\x3f\x69\xfe\xc7\x9e\x13\xc1\x18\x59\x32\x4c\xfb\x1e\xd2\x64\x79\x27\xd1\x21\xc8\x19\x6f\x2e\x6f\x1f\x59\x1a\x36\x47\xf6\x4c\x7c\x4a\x8a\x21\xf4\x4a\x7b\xed\x69\x48\x60\x69\xb4\x69\xc5\x10\x7b\xc7\x00\xdd\x29\x65\xaa\x5c\x64\x33\xd8\xd4\x5d\x9a\x4a\xe1\x6d\x32\xdb\x71\x90\x60\xd3\xa0\x50\xd9\x23\xf7\x9c\xa1\x18\xb8\x65\x77\x19\xd4\x0f\x7b\xc5\x28\x1c\x41\x37\x69\xd3\xf8\xa5\x63\x7c\xce\xa1\x68\xab\x23\xe2\xf1\xe1\x04\x3d\x92\xf7\xad\x06\x7d\x7d\xe0\x48\x6f\xb6\x91\x04\xbd\xfb\xc2\x5e\xf9\xf0\x44\xfa\x2a\xb4\x85\x92\x9b\x9c\xa5\x56\x52\x69\xe9\xba\xc4\x3b\xe3\x50\x02\x4b\xd7\x3c\x4a\x73\xe9\x4a\x0f\x2f\xed\x61\xf4\x0e\x28\xdb\x94\x88\x11\x80\xcd\x8a\x6c\x41\x23\x74\xe8\x19\x78\x2f\x7e\x26\xee\x5a\x29\x95\x96\xc0\x63\x7d\xe8\x2e\xca\x36\x27\x81\xc8\x6b\x3f\x22\xcc\x02\x40\x72\x6b\x50\x30\xe5\xed\x20\xc5\x91\xd5\x1b\xee\x5e\xbf\x1d\x48\x05\x76\x84\x78\x26\xf4\x8b\xd8\x2f\xa4\xfe\xfb\x47\x93\xca\xa6\x0f\xe4\x2a\xfb\x9f\x13\x4d\xbb\x63\x16\xce\x3b\x85\x2f\x82\x01\x9c\x0b\x3a\x20\x00\x6f\xde\xad\x3d\xc8\x9c\xf6\x7b\xf7\x48\xd2\x3d\xb4\xf3\x76\xd9\xda\xfb\x3d\x49\x3c\x8a\xfa\x30\x07\x09\xd5\x76\xeb\x2b\x39\x84\x42\xfa\x49\x7e\x0d\x83\x3e\x0a\xc0\xc2\x7d\xc6\xca\x34\x36\xd4\x3d\xef\xf8\x8c\x78\xa0\x07\x61\xd0\x2a\xdb\xdd\x03\x25\x19\x29\x10\xe5\x52\xdb\xe5\xc8\x14\xd8\x39\x22\x2b\x17\x68\x8b\x13\x21\xb3\x9a\x2a\x36\x58\x86\x39\x03\x78\x19\x52\x30\x1e\xfe\x7b\x0a\x95\x10\xb1\x12\x37\x82\x01\xaf\xaa\xd2\x40\x77\x4f\xe7\xa1\x0e\x0d\xb2\x74\xf2\x3b\xb9\x53\x8d\x1b\x04\xbd\x14\xa4\xa0\x5a\x4e\x24\x09\x9b\xba\x37\x21\xa9\x9d\xd3\x2b\x03\x08\x02\xf1\x1b\x80\x1d\x75\x85\xd0\x4e\x0d\x57\x03\x10\xcf\xbb\xc8\x55\xba\xbb\x0a\x82\x8e\x6a\x3f\x7e\xc2\x51\xcf\x6a\xd9\x98\x3d\xe4\x7f\xa1\x50\x85\x90\x17\xe9\x2a\x99\xf9\xf8\xf5\x56\xbc\x2f\x12\x28\x5a\xb2\xf4\xa4\xf2\xa5\x6a\x23\xf1\x65\x21\xb0\xc9\x32\xff\x99\x3e\x6a\xd2\x06\xff\xb9\x15\x02\x49\x29\xaa\xe0\x2c\x29\xa4\x6f\x77\x68\x0d\x78\x53\x35\xd4\x17\xef\x05\xf7\x72\x86\x07\x72\x2f\xdb\x9d\xb9\x8a\xb7\x97\x46\xa1\xb1\x17\xd9\xc5\x7b\x51\x06\xd1\x8e\xf5\xcb\x10\x8d\xda\xf8\x86\x68\x8d\x5a\x71\xa9\x94\xa0\x0d\xe4\xa7\x20\x4e\x45\xfc\xbc\x79\x30\x48\x5b\x64\xe9\xdb\x86\x6c\x5e\x43\x21\x2a\x55\x32\x52\x7f\x99\xc9\xbe\xa3\x13\x96\xb7\x01\xac\x9c\xad\x12\x08\x21\x3f\xe5\xfa\xdf\xa5\x1e\x04\x12\x80\xc8\xad\xdb\xcd\xcc\x77\x0f\x1a\x34\xaa\x96\xb2\x4b\xf8\x68\xf6\xd8\x60\x7f\xb6\x20\x53\x98\x78\xa8\x58\x42\x77\xcc\x21\xa5\x7f\x94\xaf\x43\x52\x63\x40\x2b\x17\xb4\x1c\xc3\x3f\xe6\xd8\xa7\x39\xbb\xe7\x60\x73\xe4\xda\x99\x56\x7a\x85\x56\xd4\xa1\x06\x91\x0f\xc9\x3b\xa3\x3f\xbd\x58\x86\xc2\x93\x9d\xaf\x48\xd3\x6a\x8b\xd0\x00\x83\xdb\xa7\xe7\x14\x68\xa7\xde\x01\xef\xd3\x70\x92\x03\x0c\x8e\xfb\xd6\x88\x6c\x44\x91\x52\xbc\x19\x59\xf4\x7d\xef\x31\x61\x31\x40\x49\x36\xe6\x8d\xec\x59\xca\x5b\x45\xf4\xe8\x7b\xd6\xd0\xb6\x5b\x87\xcf\x8a\x88\xa2\x6e\x87\xe4\x2e\x4f\x91\x8a\x8f\xe8\x7b\x52\x86\x96\x01\xda\x62\x7b\xb2\x4f\x86\x72\x4b\x2f\x49\xaf\x52\x84\x8a\x72\x4e\xaf\x06\xa4\xc9\x74\x6b\xdd\x10\xfa\x54\x14\x48\x4b\x97\xbc\x81\xbb\x33\x68\x27\x21\xed\x09\xc3\x17\xe9\x1b\x4d\x99\xea\xa4\xd5\x4d\x8d\x2a\xbf\x59\xa7\x41\x91\xd2\x94\x3d\x5a\x5e\xc1\xf5\xff\x1c\xed\x2d\x6a\x0c\x92\x53\xe4\xf4\xdb\xc5\x50\xef\xe2\x53\xda\x31\xdc\x87\x50\x08\x76\x91\xce\x68\x87\xf6\x2a\xcb\x45\xfa\x4f\x2e\x47\x35\x11\x24\xe5\x06\xed\x52\x6f\x4b\x8f\x8b\xdf\x91\x59\xe4\x58\xc5\xa2\x30\x58\x8a\xf6\x8d\x6c\x76\x96\xe8\x43\xde\x29\x36\xa4\xc8\x35\x51\x62\x2c\xe9\xd3\x4f\xda\x8c\xb7\x94\xfa\xc5\xe7\xaf\xd0\xb7\xd2\x2b\x45\x0a\x53\x88\x64\x71\xca\x5e\x15\x78\x5b\xbf\x85\xd3\x90\xfa\x4d\xbe\x5c\x53\xdf\xec\x85\xea\x2d\xaf\xbd\x4d\x1e\x96\x50\x6d\xa9\xb7\x47\x5d\xfb\xf1\x18\x59\x38\x64\x79\x2e\x5e\xf3\xf4\x56\xd1\x6b\x6c\x81\x97\x3a\x07\x14\xfb\x52\x75\x6e\x31\x8e\xb7\x66\x32\x1f\xfe\xb9\x40\x23\xcb\xdf\x55\x0d\xfe\x63\x9f\xb7\x67\x27\xfc\x74\x32\x10\x49\x67\x55\x3a\x0e\x99\x1e\xa1\x03\xf3\xe3\x66\xec\x82\x75\x73\x95\xd3\xc7\xba\x9f\xd2\xb3\xda\xf5\xd5\xd4\xad\x8e\xde\x90\xea\xc6\x60\x58\x33\x98\xe7\xea\xfe\xf4\xba\x9c\xdf\xfa\xf6\x6f\xdc\x20\x57\xd8\xaa\x22\x9e\xaa\xac\x8a\x2a\x27\xa1\xf5\xde\xff\xf8\x9b\x21\x96\x54\x0c\x8e\x49\x86\x97\x75\xe9\x74\x55\x9b\x15\xbe\xb1\x08\x19\xae\x27\x1c\xf9\xbd\x23\x8a\x6f\x31\x14\xbe\x62\xce\x1f\xeb\x02\x20\xba\xcc\x8a\x76\xaf\xc3\x46\xa6\x52\xf0\xc9\xf8\xf7\x05\x97\xf1\x93\xca\x00\x75\xa0\x6b\xbc\xf6\xcc\x77\xa9\x1d\xa2\x5a\xf5\x3d\xd5\xc4\x20\xb0\xed\x96\x8e\x9b\xe7\x19\xd3\x4c\x36\x58\xbf\x2c\x37\x9b\x95\x90\x4b\xd0\x36\xa8\xac\xb3\xc1\x1b\xce\x1e\x72\x73\xc2\x60\x4a\x6a\x41\x79\x2b\xd4\xb6\xcc\xc2\xf1\x40\x9d\xad\x6a\x2d\x6e\xa3\x90\x91\xd7\x1f\x22\x5c\x54\xce\xaa\xc7\x94\x68\x0c\x6e\x48\x5f\x4d\x02\xdc\xd1\x41\xd3\x13\xa8\x7c\x8b\x62\x3d\x37\x0c\x2a\x21\x83\x55\xf2\xcb\xbf\x10\x70\x13\x41\x92\x93\xd0\xbf\xa7\x0d\x7c\xcb\x4d\x6a\x4c\x5b\xa9\x68\x0b\x69\xa6\xfe\xc3\xfc\x6e\xda\x5a\x3d\x0d\x5d\x43\x6d\x1a\xb7\x65\x44\x0e\xf0\xc0\x17\x07\xbd\x4d\x0b\x5b\x10\xdc\x8a\xc6\x1c\xcc\xfa\x5b\x8b\xc5\x42\xce\xa7\x57\xdc\x86\xd9\xcb\x4b\x32\xe9\x62\xdc\x69\x40\x47\x96\xd0\xe8\x2a\x74\x8e\x6d\x61\x7d\x6f\x8c\xaa\x40\x82\x1e\xfb\x08\x43\x21\xad\xb4\x31\x20\xd8\x8a\x21\xce\x83\x5a\x73\x7c\x40\x7b\x70\x33\x66\x03\x8f\x94\x37\x4b\x43\x63\x0b\x80\xdd\x12\xe3\x9d\xb7\x14\x32\x5f\x20\x5c\x63\xf7\x89\x4c\x44\xbd\xc2\x44\x2f\xed\x96\x1a\xa9\xe3\x06\xc3\x96\xcb\x75\x1e\x5f\xe3\x5f\x05\x8c\xb0\xc2\x1f\x3c\xb9\xce\xb3\x29\xfd\x6f\x6b\x91\xf3\x5b\xea\xd0\xd9\xac\x9f\x23\x31\x77\x72\x85\x9a\x9b\xab\x5d\xbc\xd3\xb3\x5c\x6f\xfa\x1c\x20\xea\xf5\xf1\x12\xec\xed\x12\xcb\xb8\x19\xa9\x84\x52\xf2\x0e\x80\x14\xb9\x00\xa0\x40\xc6\xfd\x4b\xf0\x5c\x29\xe5\xef\xb2\x60\x3e\xf6\x1a\x1a\x29\xa5\x62\xc6\x30\xcf\xbf\xe1\x05\x08\x4a\x58\xe3\xde\x44\x60\x03\x39\x5b\x69\x88\xdc\x41\x35\x6b\x28\x49\x64\x0c\xcb\x25\x5b\xfe\xd7\xb0\x9c\x30\xd0\x0e\x9b\x75\x26\x68\x29\xe8\x80\xd0\xfa\xb9\x75\x1a\x30\x43\x52\xb6\xe8\x6e\x6b\x3c\xeb\x3d\xb0\xb7\x76\xc0\x3d\xc8\x11\x54\x2c\xb2\x6d\xcc\xa1\xf4\xf5\xe5\x27\xb2\x56\xad\x40\xac\xf5\x5e\xb8\x20\x8d\xee\x83\x2a\x2a\x35\x46\xb6\x55\x1e\x87\xd5\xdd\x04\x5e\x49\x52\x2c\xad\xf8\x26\xff\xa8\x8f\xcd\x44\xa1\xc5\x75\x14\x1d\x3b\x29\x59\x1e\x6d\x43\x6f\xa1\x1e\xd4\xc0\x01\x0b\x14\xff\x55\xcf\x10\x3a\xb3\x31\xb1\xea\xc2\xb0\xd6\x6a\xa0\x09\x71\x17\x1f\x6d\x51\x19\xc6\x85\xc6\x4a\x6d\x90\x00\x4e\x2d\x5e\x7a\xfb\xfb\x9b\x29\xe9\xff\x43\x88\x6e\xd5\x0d\xce\xe4\x38\x55\x08\x8b\xb9\x7c\x9c\x23\x8a\xd3\xb8\xa8\xd8\x9c\x6e\x9d\x1e\x89\xce\xab\x50\x0d\x55\x97\xaa\xd1\x61\xad\xcf\x78\xc6\xe5\x94\xfa\x4b\x05\x5d\xbc\xa4\xd5\xe6\x50\xee\xaa\x60\x6b\xe4\xf5\x42\x8d\x8d\x53\xa2\x35\xf2\x86\x1d\xa8\xec\x59\xfa\xec\x6d\x45\x62\x3a\xe5\xb5\xdc\xa1\x08\x29\x32\x32\xd4\x03\x49\x9f\xcb\xe9\xdb\x2e\x66\x64\xdb\xc7\x86\xb1\xf0\x22\x94\x3b\xc8\xe9\x3d\xb0\x02\x4b\x2d\x9c\x72\x84\xce\xe4\x5a\x2a\x15\xf8\x0d\x70\xc8\x35\x7b\xe2\xca\x02\x3c\x62\x83\xfd\x13\x0a\x64\x92\x46\x5b\x43\x23\xac\x49\x8b\xd5\x8f\x40\x69\x72\x63\xfc\xa0\x0d\x68\x40\x86\x72\x98\x8d\x24\x84\x60\xda\x97\xda\xd9\x33\x64\xc3\x16\x49\x99\xd9\xdd\xf4\x7c\x0a\x05\xd2\x56\xf2\xbe\xc4\x86\xc3\xd0\xf3\x2a\x03\x17\x8f\x3c\xbf\x5a\x02\x70\xee\x22\xd7\x4e\x49\x18\xe5\xaf\xc4\xbc\xde\x27\x0b\x89\x7c\xc1\xed\xeb\xf5\xc9\x4d\x39\xeb\x54\xb5\x6c\x7a\xa8\x81\x73\x85\x9a\x2a\x9b\xb7\x7d\x98\x7a\xa5\x88\x16\x39\x24\x46\xe6\xaa\xae\x14\xcc\x70\xcc\x45\x9c\xb5\x31\xbd\x80\x3b\x85\x24\x7d\xab\x13\x93\xeb\x04\xc8\xe7\xae\x84\x24\xb2\x43\x3e\x0c\x4d\x68\x20\x3e\x62\x74\x5f\xda\x53\xaf\x38\x8a\xbf\xda\x26\xdd\x1c\x18\xf4\x24\x3e\x16\x9c\x08\xae\x3a\xa6\x6a\xe0\x41\xf3\xbd\x21\x55\x82\x02\x58\x88\x28\x1e\x42\xea\x0c\x3c\xfe\xca\x1d\x5d\x73\x5b\x98\x58\xf3\x7c\xa9\xc1\x60\x89\x28\x54\xc7\x42\xfe\xc7\x06\xc7\x22\xf9\x20\x84\xde\x38\xe8\x2f\x26\x86\x3b\xf0\x72\x6f\x51\x99\x6b\xef\xe4\x23\x44\x39\x94\x89\x5c\x11\xec\x2d\xa5\xaf\x25\x4b\x58\x2b\xa4\xbf\x96\xf4\x0a\xe9\xaf\xa7\x7d\xe5\x4d\x25\x6c\xd3\xeb\xec\x9f\xb1\x2b\xc7\x45\x9e\x7a\xe3\x7d\x59\xde\x36\x2a\x49\xd2\xac\x80\x42\xd9\xd1\x74\xd0\xf0\x81\x2c\xd8\xad\x4f\xbf\x87\xef\xa0\xb4\x8d\x26\xd4\x35\xfa\x92\x34\xef\xcc\x1e\x1d\xe3\xf3\xd6\x7e\x50\x3a\x85\xab\x7e\xfd\x50\xa7\x5b\xef\xb9\x38\xe6\x60\x69\xaa\x61\x55\x9a\xb8\x84\x29\x8e\xa0\x6b\xce\x35\xc3\x42\x49\x0c\x43\x45\x08\x8c\xb9\xe9\x17\xd8\x5a\x61\x53\x10\x13\xfb\x16\x25\xc6\xd8\x40\x00\xd5\xeb\xf5\xbd\x8b\x83\xeb\xd2\x18\x80\x75\x15\xad\xa5\x01\x51\xfc\xaf\x47\x34\x4c\x08\xa5\xa1\xe3\x0a\xf4\xd3\x28\x43\x36\x5c\x08\xa4\x21\x84\x83\x07\x4b\x7b\xd8\x4a\x6d\xde\x8d\x25\xd2\x36\x0a\x19\x74\x21\x66\x16\x12\x53\x1f\x89\x17\xa6\x7f\x91\xc6\xb6\xa6\x7b\x09\x31\x31\x9b\x19\x95\xd4\x94\xcc\x42\xa8\x2c\x6e\xd8\xec\xb4\x10\x0a\x83\x9c\x59\xfb\x3f\xf5\xfe\x92\xbc\x29\x50\x36\xe3\x6c\x7a\x45\xf2\xa4\x14\xb7\x6c\x38\xe3\x39\xce\x10\x0f\xc3\xc2\xa4\xbb\x06\xa1\x61\x56\x34\x2c\x3b\x97\x83\x1e\x2a\x74\x52\x34\xd5\x06\xd6\xd4\x10\xba\x65\xf7\xa0\xbb\x39\x14\x0b\xb0\x46\x42\x27\x72\x05\x42\x0f\x4d\x12\x6a\x47\x0e\xdd\xb2\xdc\xc4\xcc\x3c\xb9\x48\x34\x37\x2e\x68\xc6\x77\x07\x4e\xfe\x68\x80\x69\xfb\xde\xf9\x4a\x82\x09\x19\xd8\xda\x40\x44\xa0\x1a\x4c\xda\xa2\x6f\x49\x1b\x1c\x73\xbc\xd0\x56\x43\xec\x2c\xe4\xcc\x7e\xe2\xa1\x69\x7b\xa0\xa9\xb0\x8a\xdb\xa4\x47\xb8\x85\xf2\xd7\xf6\x1b\xde\x52\x48\x19\x82\x13\x52\xf7\x1b\x7e\xfb\x35\xad\x0c\x57\x59\x11\xef\xce\xbb\x58\xe2\x51\x35\x0d\xb0\xa8\xb1\x9a\x50\xfa\x29\xf2\x00\x4b\xbb\xd2\xd0\xa4\x9e\x1d\x3a\xd0\xf3\xaf\xa6\x5e\x69\xbe\xb4\xa7\x34\x6e\x10\x86\xb0\x67\xc6\x4d\xad\xe9\xf1\xe7\xb6\x1e\xd4\xeb\x4b\x0f\x78\x27\x79\x68\x39\xd5\x6c\x8a\x88\x0b\x5d\x32\xad\x34\xf9\xb4\x29\x94\xd7\xe6\x25\x04\xeb\x38\xd8\xa6\x09\x6b\x67\xdc\x33\xed\x66\x12\xf2\x79\x4c\x5b\x68\x9f\x4d\x21\xa8\x36\x65\x1b\x4c\x79\xfb\xbf\xbc\x30\x60\x44\x29\xf3\xd8\x88\xdf\x5a\xa1\x94\xa3\x87\x37\x0c\xea\x5b\x10\x24\x24\xe6\x21\x9b\xa0\x6d\x3f\xea\x01\xdb\x5a\x83\xc2\x0e\xab\x2e\x71\x2b\xfd\x73\x0b\xa9\x46\xc5\x87\x58\x69\x56\x87\xa7\x6e\x01\xe6\x87\xd6\xc1\xb0\x91\xd8\x34\x30\xd6\xe8\x53\xeb\x67\x91\x26\x1a\x9c\xee\x22\x71\xc0\x9e\x94\x50\xc4\x3d\xe4\xca\x54\x51\xeb\x20\x3a\x1e\xd2\x75\x1a\xce\x68\x8e\x9b\x3d\xc2\xed\xe5\xd3\x4b\x8d\xed\x22\x71\x34\x58\x3a\xc5\xc5\xb7\xc8\xfe\xb6\x12\xfc\x25\x8e\x38\xb9\x59\xee\xe0\xfe\xc4\x6a\x6b\xf8\xc3\xa1\xe8\x76\x7a\xc9\x06\xb1\xdc\x90\xd8\xa2\xf4\xd1\xed\x02\xe4\x22\xf3\x5a\xcc\xd8\x4d\x92\x6b\x1b\xf8\xcb\x7e\x0e\xc7\xd9\x72\x77\xce\xeb\xb7\xd4\x2f\x5b\x32\x74\x18\xa5\x9b\x04\xe2\xa6\xe8\x77\x20\xd7\x36\x1f\xb9\xc1\xa4\xbd\x0a\x4d\x5d\xea\x19\x0a\x6b\x48\x7e\x95\x30\x95\x99\xc3\xd2\x66\xc3\x7a\x32\xe5\xc2\x2a\xd3\xc1\x16\xb0\x22\xd1\xc6\x5e\x14\x3e\x65\xe5\x49\x49\xa6\x5a\xd9\xc4\xa0\xef\x76\x5e\x8c\x3f\x02\xae\x5d\x3c\x19\x08\xbf\xc5\x92\xe1\x52\xb9\x64\x8b\xf2\xdc\x54\xc5\x96\x00\x63\xb0\xcd\x1f\x97\x6a\x13\xb7\x8e\xd5\xc8\xaa\x06\x42\xfc\x16\x47\x83\xc4\xbf\x17\xae\xbe\xb1\x26\xfd\x93\xb6\x2c\xce\xe0\x95\x05\x47\x11\xab\x6d\xb6\x24\x47\x11\xa0\xb8\x5a\x89\xe7\x80\xd8\x74\x96\x9b\x76\xfc\xe6\xd3\x70\x45\x85\xbe\x5b\x5e\xb9\x6e\xe2\x98\x1a\x52\x91\xdc\xb5\x94\x10\xeb\x2b\xf9\x06\xd9\x6c\x26\x3e\xdb\x69\xb2\x2c\x2b\xf8\x42\xf8\xf6\x4a\x08\x0c\xc2\x80\x59\x4a\x48\xa8\x7d\x38\x7f\x37\x10\x22\x6a\x6f\x7f\x4e\xf9\xee\x54\x3d\x4c\xa9\xa9\xf7\x95\xcf\x2d\xe4\xd3\x90\xb1\x59\xc8\xf9\x66\xf7\x4f\x19\x2c\xa4\x27\x53\x4d\x0b\xa4\x79\x89\x80\xdd\xc4\x02\xb2\x11\xfe\x6b\xa3\x99\xb7\xe4\x95\xba\x52\x0b\x98\x5a\x37\x01\x16\x12\x21\xcb\x25\x58\x1a\x16\x04\x6a\x44\x75\x4a\x13\x48\x2b\x9d\x6e\x32\xcf\xab\xd6\x08\x97\x1c\xba\x38\xe0\x14\x1d\x78\x01\x2c\x46\x89\xea\xc2\x36\x34\x59\x01\xbb\xb6\x62\xaf\x16\x1c\x70\x85\x12\xd8\x2b\x49\xb9\xeb\x0e\x9d\x39\x9b\xc3\xfb\xf2\x13\x18\x92\xdd\xd1\x0c\x25\x34\xe9\x72\x9d\x73\xfc\xee\x14\xb5\x24\x84\xcf\xb0\xe4\xe3\x35\xc3\x98\xa8\x16\x83\x01\x8f\xa9\x60\x4b\x62\x38\x8d\x95\x76\x6e\x5d\x6d\x57\x1c\x2b\x44\xd0\xa8\x3b\xb6\x9d\x4a\xba\x70\xb9\x33\x55\x65\xab\xd2\x5e\x8b\x2c\x7d\xe4\xd1\xe8\x03\x45\x66\x8c\x7a\x72\x60\x59\xa7\x80\x11\x2b\xe4\x58\x13\x9a\x45\xc4\xa5\x74\x09\xe9\x49\x7a\xaa\x35\x09\x36\x21\x98\x16\xa5\xb4\xdd\xd6\x90\xa1\xd3\xcc\xd0\xd7\x24\x04\x10\x8d\xc0\x4b\x78\x1e\x81\xb4\xca\xca\xd9\x04\xd6\xf4\xe4\x26\x29\xf9\xa5\xe9\x77\xd1\x20\x75\x5d\xbc\xfb\xf7\xb7\x97\xfa\x7a\x4a\xc1\x0d\xfc\x56\xbb\xa6\xcf\x2e\x75\xb6\x09\x70\x5c\x86\xc0\x59\xf2\xd2\x7b\x52\x2b\x3c\xe8\xfd\x79\x1f\x1f\x37\x5a\x9e\x3f\x5b\xca\xa0\x96\xd4\x9f\xf7\x6e\x63\xa4\x83\x45\x12\x5f\xf7\xc2\xf5\x2a\x07\x5e\xce\x2a\x78\x99\x50\x46\x23\x43\x1e\xc4\xc5\x24\x2d\x56\x9b\xf8\xd0\x51\xd7\xd8\xc7\x78\x8d\xe7\x21\x22\x30\x03\x48\x56\x12\xdc\x38\x5f\x75\x09\x31\x9e\x2c\x5d\x75\x64\xb5\x0f\x4a\xb5\x5b\xd9\x06\xee\x67\xc4\x63\x3f\xeb\x57\xe2\x63\x55\xda\x39\xcf\xca\xe5\x02\x17\x10\xcb\xa1\xc0\x75\x99\x71\xc6\x06\xfa\xac\xe2\x09\x86\x44\x98\x46\x7f\x68\x84\x29\x19\xf7\x59\x19\x6e\x63\x65\x27\x41\x2e\xc4\xd2\xf3\xa8\x06\x28\x62\x54\xb7\x0f\x37\x37\x69\x54\x3f\xab\xbb\x24\xad\xf0\x07\x09\x46\x55\x9e\x62\xe1\x82\x0b\xc0\xf3\x27\x51\xe6\x21\x54\xcf\x3a\xa9\x0e\xf9\x54\x3c\x87\x41\x91\x55\x43\x1f\x8c\xea\x45\xb5\x97\x14\x5a\x3f\xcf\x91\xd9\xfc\xac\x5d\x1c\xd6\xd1\xe5\xf4\x04\x91\x8a\x3c\xa9\x4f\x38\xed\xfc\x01\x6e\xc9\x5e\x2d\xe9\xd9\x9e\xf2\x7a\x3d\x97\x5b\x1c\x4c\x50\xd5\xd6\x65\x97\xf6\x1a\x96\x62\xf3\x53\xc9\x84\xf9\x37\xa8\x54\xc2\xe7\xa2\xb8\x37\x03\xeb\x53\xa0\x6c\xa1\xdf\xc4\x0e\x8a\x22\x51\xa1\x10\x49\x3c\x8e\x71\x8d\xdc\xb7\x73\x25\x29\x84\x9d\x3e\x91\x7f\x82\x2f\x56\x3a\x5f\xa0\x90\xd0\x3a\xda\xb3\xc0\x99\xae\xba\x16\xc6\xdb\xa3\xd4\xf9\x0c\xf1\x6c\x70\x41\xc4\x51\x5b\x24\x57\x19\xdc\x6e\xbd\x63\x48\xba\x7c\x02\x48\x56\xaf\x80\x38\x8c\x3b\x97\x2b\x4a\x5e\x01\x09\xd0\x1f\x5d\xc3\x06\x19\x72\xfd\x02\x26\x1e\x47\x1a\x95\x27\x08\x64\x79\xaa\x1c\x72\x6b\xa5\xe9\xc6\xcd\x69\xf1\x1e\xe0\x99\xad\x71\x7a\x79\x86\x06\x5e\x16\x9f\xdf\x33\x1c\x02\xcf\xc6\x7c\x88\x75\x0a\xc5\xe1\x3c\x3d\x11\x96\x0d\x32\x4b\x48\x0d\x41\xe3\xd2\x2b\xab\xeb\x6d\xdd\x17\xe3\xbe\x9e\x11\x55\xf1\xcc\x5b\xd3\xc8\x7e\x86\x0e\x35\xd8\xbd\xbe\xc8\x47\x66\xd3\xb3\xa1\x46\xcf\x0f\xf9\x0f\x27\x41\x3e\x9a\xfe\xd8\xc2\x49\xcd\x13\x53\xf7\x38\x90\x6a\x7a\x08\xd3\xa0\xde\x57\x2e\x6c\x09\x79\xe2\x6f\x74\x58\xe2\x9c\xea\x99\x3e\x71\x3b\x89\x92\x37\x4f\x9b\x87\x4b\x5e\xea\x69\xf3\xa7\xae\x36\x29\xb2\x0b\x8c\x3e\x52\xc8\xf3\xc5\x30\x82\xfd\xa0\xc1\xf9\x84\x2b\x58\x82\x6e\xc8\xca\x1c\xe3\x87\xea\x2d\x9f\x29\xd4\xd4\xa4\x22\xf9\x6c\xac\x87\x4f\x10\xd9\x35\x8d\xb0\x7d\x9f\x19\x76\xf7\xc4\x04\xc6\x5f\x88\x4d\x37\x98\xf1\xfe\x24\x9d\x1c\x51\x6a\xea\x66\xcb\xcd\xd3\x70\xf9\xd7\xca\xdd\x3e\x60\xbe\x0f\x9b\x0b\x82\x08\x2c\x7e\xe4\x81\xc7\xcf\x54\xfc\xeb\x4a\xb1\xe2\xfb\x4c\xad\xb9\x2b\xb5\x1c\x09\x1e\xd5\x4b\xce\xf2\x51\x52\xa5\x8b\x2a\x63\xde\x2a\x0c\xfb\xe4\xe7\x71\x33\x2a\xf8\x71\xcb\x8c\x7e\x20\x7c\x4c\xe7\x99\x78\x1a\x2d\xfa\x3e\xaa\x56\xf8\x1f\x15\xb6\x2d\xd5\x12\x1f\x35\x7a\xbb\x87\x75\x52\x6e\xd7\x3c\xaa\x77\x28\x8f\x3a\x6f\xbe\x58\x48\x81\x32\xfe\x67\xa8\x85\xc7\x3a\xe9\xb6\x23\x7e\xcd\x8f\xb2\x35\x51\xf7\x07\x3a\x4d\xd6\xae\x44\x86\xe4\xc3\x99\xa4\x2e\x22\x2d\xc5\x3f\x82\x53\xd8\x41\xfe\x8f\x6a\x66\xc1\xeb\xfd\x80\x7f\x88\x15\x32\x03\xc4\xf5\x01\x48\xde\xf9\x48\x5f\x5f\xce\x7b\x24\x90\xe5\x39\x90\xd5\x61\xf3\xcb\x2f\x26\x00\x54\x30\xb3\x47\xcf\x75\x9f\x87\xdd\x2d\x43\x51\x1e\xe9\xe9\x6a\x8e\x8f\x04\x0d\x4b\x52\x78\x3c\x6c\xd8\xec\x28\x1f\xfa\xe0\xdc\xb6\x7c\x13\xcc\x32\x4a\x95\xbd\x13\xa5\xff\x6d\x2c\x62\xd0\x45\xb9\x90\xe4\x01\x70\x1e\x29\x8b\x4b\xa1\x9c\x92\x1f\x43\x44\x21\xbd\x03\x98\x1a\xa1\x22\x25\xfc\x4f\x2e\x88\xa6\x10\x0e\xe4\x22\x4e\x12\x37\x9b\xf2\x24\xb6\x4d\xcf\xbb\x3c\x79\x72\xd4\xdd\xbf\xb5\x02\x07\x31\xe5\xd7\x7a\x9b\xcf\xd3\xe5\x36\x7f\xf8\x30\xad\xa4\xa2\x90\x98\x04\xe7\x9b\x2e\x3a\x94\x14\xef\xb9\x2f\x2e\x7d\x40\xf2\x8c\x2a\x54\xf7\x21\xb1\x9f\x3b\xb8\x21\xe6\x9b\x44\x31\xf3\x2d\x6d\xcb\xf9\xde\xb8\x8c\x07\x90\x75\xb4\xa4\x9c\xee\x26\x98\x36\x5f\xc9\xee\xf1\xe0\xf5\xa4\x00\x75\x8f\xd2\x40\x0b\x49\xa8\xc3\x1e\xae\x7f\x2a\xb3\xd4\x84\xaa\x84\x7c\xe6\xfa\x3a\xf3\x4e\xbf\xae\xa7\xf2\xbb\x53\x79\x46\xc4\x5b\x88\x93\x61\x49\xd6\x01\x33\x24\xa0\x8c\x26\x55\x35\x43\x4d\x4e\x0c\xba\xbb\x3e\xfd\xf2\xd0\xff\x4d\x80\x05\x0f\x56\x06\xf2\x5c\xa3\x0a\x75\x2f\xfc\x81\x84\xc7\xea\xa4\x77\x34\x8b\x52\x76\x46\xca\xac\xce\xbf\x66\xf5\x53\x73\x5d\xa4\xcf\x54\x97\x6b\x3a\xf2\x3b\x0b\xeb\x1e\x9a\x2c\xbb\x43\xd2\x02\x18\xfc\xfd\xc8\xa9\x09\xe6\xac\x73\x2d\xce\x0a\xf8\xd4\x46\x92\xbc\x9b\x87\x68\xa8\x5a\x0a\x49\x9a\xe1\xab\x8f\xfa\x8d\x4a\x32\x98\x6d\xd2\x15\x12\x6a\xba\xab\x34\x85\x36\x5b\x39\x68\xca\x20\xe1\x01\xeb\x89\x4d\x8b\x0a\xf3\xa6\x8d\x20\xb4\xa8\x1c\xc5\x7f\xcd\xd8\xf6\x5a\x43\x63\x73\xb4\xae\x91\x58\x8a\x62\x45\x66\x9f\x07\x95\x48\xe5\x2c\x78\x32\x1c\x51\xf8\x0f\xe4\xac\x4d\xec\xab\x4c\xe1\x35\x30\xa8\xe1\x17\x32\xc4\x88\x6b\xe7\x11\x99\xec\x50\x60\xa0\x90\x48\x1b\x5c\xe1\xd2\x1f\x3b\xbe\xf1\x9a\x0d\xea\xf8\x03\x19\x18\x92\xcc\xc2\x86\x6c\x08\xa9\xa1\x55\x42\x90\xac\x4c\x65\x89\xcd\x76\x3e\x29\x3a\x00\x0d\x02\x9d\x74\xc7\x0e\x26\x33\x02\xf0\x5d\xe4\x9d\x47\xb8\x90\xda\x42\x88\xae\x58\xa2\x7d\x59\x1c\x85\x0e\x7e\xb2\x58\x1e\xd4\x23\xd3\x04\x1b\x2a\x87\x43\x35\xde\x86\x26\x91\x32\xe7\x95\xa4\x84\x00\x2a\x9d\x83\x38\xd4\xd6\xd4\xda\x91\x63\xc2\x7f\x2f\xab\xe4\xdb\x96\xb5\x49\x5d\xcf\x39\xde\x46\x0e\x09\xb5\x65\xa4\x77\x01\xb1\xee\xba\x80\xcd\x26\x0e\xa2\xf2\x0d\xf9\xb5\x98\x5f\x23\x33\xf2\xa3\x32\xfc\x0d\x4e\xe9\xd5\xe4\xde\x1c\x13\xfd\xb9\xd3\x50\x7d\xcb\x8c\x92\x37\xc0\x6a\x64\xd9\x72\x73\xfe\x31\x3b\x38\x8e\x7c\xdb\x39\xa7\x97\xaa\x9a\x86\x10\x0c\x43\x78\x4d\xa4\x05\xdb\x96\x22\x6a\x0c\xa5\x27\xe3\x37\xe6\xf4\x7e\x9e\xa1\x1f\xf6\x72\xf9\xd5\x50\x79\xbb\x58\xe8\xc1\xa5\x9b\x5d\x55\xba\x3b\x7d\xc4\x49\x4b\x55\x0e\xda\x2d\x9a\x29\x11\x9a\x89\x66\x7e\xbb\x6b\xc2\x25\xe0\x82\x2b\x03\x29\x3a\x25\xd0\x1e\x00\x61\xad\xa1\xf2\xb6\x96\xa6\xdd\xa6\xfc\x29\xa7\x3a\x8a\x7d\x85\x9d\x63\x3a\xa8\x6b\xe3\x60\x15\xc0\xbf\x59\xfb\xed\x1b\xda\x6d\x2f\xdd\x9c\x3a\x21\x97\x7a\xcb\x42\x7f\x84\xd7\x36\xb5\xaa\xa4\x9c\xf3\x39\xb5\xdc\x0f\x83\x23\x0b\xad\x20\x52\xb2\x6d\x09\x75\x36\x11\x0f\x1a\x3a\x93\x74\xec\x16\x56\x6d\x59\x22\xf3\x73\x6e\x72\x47\x33\x99\x71\xad\x50\xf2\x83\xa1\xc4\x43\x1e\x74\x44\xcf\x88\x00\xfd\xbd\x35\xdd\xd1\xcc\x9b\x9c\xa2\xc2\x13\x97\x05\xe6\x24\x9d\xc6\x34\xe8\x8e\x87\x21\x5e\xa1\x07\x5e\xf3\x9a\x29\x04\xee\x9c\x00\x6e\xba\xbf\x5f\x79\xec\x0c\x7e\xbc\xb8\xfe\x48\xb8\x85\xb7\xc8\x50\x9b\x39\x41\x0e\x8e\xcb\x9a\xd3\x7d\x70\xf8\x71\x01\xb8\xc3\x81\x75\x1e\xb2\x7e\xa7\xbb\x89\x8e\xdd\xb4\x07\xa7\xbb\x2c\x2b\x1d\x11\x80\x85\xeb\x4f\x06\xf5\x5b\xf6\x76\x28\x25\xc6\x75\x87\xe7\xcd\x90\xe6\x58\xa0\x23\x2b\x49\xe2\x6a\x77\xa8\x8e\x21\xda\x9e\x86\xa1\xc1\x53\xfd\x39\xe0\xcc\xd2\x67\x8c\x13\x32\x95\x55\xc6\x7d\x1d\x35\x6b\x58\x9e\x0e\xb2\x93\x4e\x1e\x56\x3e\x8e\xc2\x3f\x15\x0d\x97\x8a\xd3\x5e\xe6\x15\x18\x50\xda\xed\xe4\x5c\x0e\x3c\x2a\x6f\xbf\x5f\x64\x7a\x19\x26\xb0\x39\xc8\xfa\x07\x8e\xff\x4e\xbc\x8d\xfc\x47\xb3\x13\x72\x06\xdc\x1b\x9f\x95\x6b\xc8\xf1\x22\xd2\xbd\x83\x82\xf7\x87\x60\xd0\xfa\xc1\x54\xf9\xcd\x4f\x88\xbf\xca\x2b\x19\x4b\x6c\x83\x8b\xae\x93\x8d\x58\xfe\x83\x1a\x22\xc7\x56\x1d\xbe\x24\x33\x52\xf4\xea\x6a\x15\xf5\x01\x5c\x53\xef\x24\xa9\x35\x68\x9c\xb2\x9a\x36\xdf\xae\x2c\x25\x1e\x86\x65\xf8\x85\x44\x12\x53\x5d\x5b\x6c\xa6\xe1\x8e\xcf\xbb\x2e\x75\xd3\xa1\x91\xe3\x0c\xa9\xb9\xeb\x48\xbb\x60\x70\x2b\x4f\x35\x2a\xba\xb8\x0d\x39\x55\xdd\xd5\xa4\x5f\xb9\xba\xb2\x23\x4a\x61\xe8\x57\xc3\x53\x77\x3d\x84\x62\xda\x22\xfd\xb0\x85\xa9\xa8\x13\x1c\x5a\x3c\xd8\x65\xeb\x3e\x42\xda\x57\x5e\x45\x92\x7a\xc5\x8c\xab\xa3\x48\x7c\xee\x64\x80\x30\x54\xe8\x9a\x18\xc1\x54\xb6\xca\x37\x53\x36\x29\xcd\xf9\x2a\xdc\xde\xde\x5c\x41\x52\x0f\x1b\xbc\x73\x57\x10\xc4\x3f\x27\x6a\x1c\x4f\x25\x77\x71\xf1\xae\x93\xd8\x5a\x31\x33\xd9\x9d\x16\x8e\x16\x86\xdc\x01\xf7\x7f\xe0\xdc\xb4\xec\x78\x87\x1c\x4b\x25\x75\xa7\x0d\xc0\x31\x02\x89\xb0\xee\xe9\x15\xb5\xb9\x6d\x91\x28\xdd\xa1\x66\x82\x0e\x9e\x2a\x8e\x59\x91\x30\x06\x32\x33\x8c\x26\x2c\x5e\xbb\xca\x9b\x02\x0c\xac\xec\xdb\x32\x08\x04\xed\x24\x9a\xd7\x5c\xd7\x54\xb3\xe3\xb5\xe8\x89\x9b\x72\x34\xa0\xf4\x66\x3c\xa6\xeb\xd6\xf1\x0e\xb0\x02\x97\x08\x0e\xad\x17\x39\x04\xb9\xf2\xc5\x8d\x3a\x52\x13\xc0\x60\xf9\x09\x3e\xa6\x83\x52\x5e\xc4\x79\xf9\x89\xd1\x29\x92\x17\xc2\x73\xa3\xda\x4e\xa2\x5f\xdb\x5a\x90\x1c\x3b\x40\x73\xa0\xc9\x89\x05\xbd\xc7\x80\x58\x06\xfb\x96\x14\x59\xa6\x93\x96\x52\x51\xaa\xf6\xcb\x12\x72\x77\x8b\x19\x6d\x3a\x9c\xd7\x44\xcb\x22\x2a\x13\xeb\x36\x90\x91\x64\x4a\x1d\x0d\x20\x68\xce\xb9\xd8\xd6\x87\x03\x03\xc4\xe6\x28\x48\x76\x73\x32\x07\x6d\x39\xe5\x5a\x18\x94\x8c\xd8\xfd\xbc\x15\x71\x38\xde\x8f\xa2\x81\x7a\x04\x0f\x44\xfe\x8f\xea\x72\xcc\xc6\x1d\xc9\x7e\x38\x1e\x1f\xfd\x10\x04\x0e\x5e\x5a\xb7\xf8\xd3\x75\x3b\x82\x50\x06\xa8\xd7\x64\x89\x4a\x74\xb1\x5b\xe6\xde\x88\x0c\x1b\x3f\xb3\x67\x76\x4a\xe9\xec\x28\x21\xd4\x76\x20\x92\xba\x19\x6b\xbe\x19\xf2\x70\xae\x6e\xc3\x7b\x3c\x42\x9a\xee\x94\xd0\x5d\x0b\x80\x18\xb5\x78\x6a\x25\xfd\x87\xd0\xb1\xd3\xf2\x82\x4b\xda\xc5\x51\x22\x2d\x01\xf2\x07\xea\x80\x39\xc3\x06\x21\x30\x24\x64\x66\xdf\x33\x6a\x93\xb5\x3c\x31\x1e\x4d\x31\xed\x48\x1f\x29\xad\x1d\xa1\x99\x38\x92\xfc\xbb\xfc\x27\xa1\x3c\xed\xdb\x42\x95\x0f\xab\x18\x67\xaf\xe7\xa9\xd5\x35\x03\x53\x3c\x0c\xce\x2e\xc6\xfa\xcf\x7f\x58\xdf\x8b\xa8\x66\xc7\x96\xaa\x89\xc0\x18\x1f\xa8\x0c\x6c\xe9\x92\x3a\x5f\x70\x45\x8d\x5a\x4c\x1a\xa5\x53\x3a\xd6\xe9\x62\xc1\x05\xd7\x11\xc1\x44\x97\xfe\x13\x53\xe1\x71\xf9\x4c\x94\x9e\x19\xed\xc1\xb1\x2c\x72\x82\x8c\xec\x72\xc6\x85\x2e\x15\x94\xda\x9f\x60\xd1\xf1\xe7\xde\xba\xc7\x07\xed\xfe\xb1\x20\x04\x84\x29\xfe\x23\xd4\x22\x78\x7c\xf9\x2d\x8c\x8d\xee\xfe\x53\xf3\x28\xb2\xa5\x46\x84\xee\x27\x21\x1a\x1f\x63\x59\x98\x68\x04\x2d\xbd\x5e\xc1\x8f\x86\xf9\xd2\xe0\xf9\xba\x04\x24\xd4\x07\xbf\xcc\x46\xc4\x4f\x12\xfd\x47\x13\xd2\x43\x22\x1e\x1b\x08\x30\x73\x99\x46\xda\x04\x23\x9c\x87\x0c\xb7\x31\xf8\xd4\x2e\xef\xcf\x38\x3f\x1c\x95\x6a\x3a\x4a\x1f\x09\x25\x8b\xad\x84\x54\x63\xe6\x4a\xc3\x98\x49\x1f\x69\x3d\xa6\x0b\x02\x8d\xe1\x3a\xc4\xbc\x48\xf9\x99\x23\xe6\x77\xb3\xc8\x5a\x6d\xe3\x51\x79\xc9\xe2\x5c\xde\x0e\xe7\xa6\x0a\x48\xb1\xbc\x9c\xa3\xd9\x59\x63\x4d\x21\xe6\xa7\x00\x00\x43\xfa\xcc\x73\x38\xf7\x46\x31\x36\x8c\x29\xbe\x5f\x18\xe8\xfe\x1f\xd7\x08\xfa\x47\x74\x46\x6b\x4d\x5c\xc2\xb0\xb2\xd7\x36\xe7\x90\x23\x1a\xb4\x57\xc9\xfa\xb3\x5b\x3a\x4e\xc0\xf4\x08\x19\x4a\xfa\xc1\xd6\x4e\xb5\x91\xff\xde\x00\x9f\x77\xf2\xa4\x00\x1e\x2e\xa7\xd7\x88\x05\xff\x37\x41\x28\x0a\x3a\xe9\x5d\xe2\xcc\x7f\x4c\x0f\x95\x73\x48\x21\xa6\x4e\xbf\x6c\xb4\x25\xf9\x83\x30\x36\x89\xca\x7d\x34\x63\xce\xff\xe4\x23\xc9\xff\xfc\xf8\xfc\xd2\x88\xe5\xa1\xb5\xec\x89\xb3\x73\xb1\x01\x80\xd6\xcb\xdb\x55\xd6\xfd\xe6\xab\x2f\xa4\xf8\xcd\xd1\x76\xf3\xa9\x46\x94\x7f\x76\x9b\x0d\xd1\x0b\x8d\x4e\x0c\xd6\xdb\x09\x03\x99\x1b\x88\x1b\xf0\x06\x90\x91\xb3\xde\x71\x90\xc9\x9e\xa7\xee\x9f\xe0\xdf\xac\x8b\xe6\x62\x73\xa9\x58\xfe\x77\x09\xf2\x11\x98\xa9\xf2\x23\x1a\x14\x9d\x64\x10\x21\x3a\x63\x76\x48\x10\x36\x73\x3e\x07\x6b\x5a\xe6\x93\xcf\x72\x1e\x90\x9b\x95\x40\x24\x1f\x40\x4d\x90\xcf\x3a\xee\x8d\x61\x1b\x79\xe3\xe2\x84\xf2\x76\xf2\xf2\xf5\x79\x2c\xf8\x02\x19\x2c\x18\xad\x19\xf6\x00\x14\x12\x7d\x42\x8b\x8c\x14\xee\x1c\xe3\x8b\x01\xd1\x8d\xce\xbe\x64\xe5\xda\x01\xf1\x7f\x58\x62\xe6\xd9\x44\x87\x01\x32\xc7\x3d\xb9\x58\x56\xa6\x16\x8a\x75\xd8\x48\xa8\xf7\x33\x94\x48\xf2\x64\x2e\x7b\x0e\x84\x72\xa6\x46\x73\x9e\x6e\xf2\x82\xf9\xda\xa8\x9f\x72\xa8\xb7\x74\x1d\x07\x33\x2b\x98\xdd\x12\x71\x67\x59\xf9\xa0\xb9\xfb\x49\x0d\xbe\xc2\x49\x61\x9f\x0e\x2d\x1a\x10\xb6\x4b\xa7\x50\x69\x10\x59\x13\xa2\xe1\x7b\x73\xf0\x1d\x3e\xb4\x95\x87\x4f\xd4\x71\x78\x73\xde\x3e\x40\x88\xea\x4d\xc0\xb8\xe3\xe1\x3e\xdd\xd8\xb3\x32\x04\xf5\xee\xb3\x09\x15\x7e\xa8\x1b\x77\x1f\xf7\xae\xff\x1d\x97\xb8\x08\x05\xf5\x7f\xbe\xe2\xe1\x66\x5c\x34\x4a\x29\xc3\x0f\xb7\xa6\x97\x03\x79\x0a\x20\x63\xe8\x75\xbc\xf9\x89\xa1\x5c\x54\x25\x05\x58\x1a\x28\x8b\xaf\x11\xba\xd6\x21\xaf\x10\x92\x87\xf4\xdf\x0f\xf7\x4f\xf8\x76\x40\x34\x53\xe1\xde\xfe\x19\xd2\x7e\x58\xf1\x25\x27\xdf\x70\x77\x71\xcf\x64\x5f\x41\x29\x31\xc2\xdb\xed\x9b\xe1\xb8\x29\x9c\x06\x9d\x10\x3d\x84\x23\x9c\x5c\x03\x5c\x74\xbc\xf8\x21\x6e\xd6\xe1\xf8\x85\x17\x0e\x48\xe9\x8c\xdf\x24\xa6\x0e\x0e\x9e\xc0\xa8\x5d\x1d\x4d\xbd\xa1\x7e\xb8\xa7\xbe\x07\x3d\xde\x7a\x77\x5e\x1c\x21\xa7\x88\x21\x48\x92\x7b\xf5\xcf\x17\x39\x44\xe2\xdb\x50\x5b\x1c\xff\xa0\xf5\x43\x2b\x7b\xfe\x6b\xa3\x16\xfa\x50\x23\xbc\xdb\x91\x97\x88\x8f\xf2\xa5\xf0\xa1\x2e\x3c\xc6\xda\x07\x48\x5c\x89\xa5\x7a\x56\xe9\xe7\x1a\x6a\x91\x1e\x5b\x0b\xfc\x35\xa4\x82\x81\x75\x66\x84\x32\x25\x71\xa8\x14\xfd\x47\xf9\x53\x80\xec\x24\x8a\x08\x9e\xce\x2c\x4d\xb6\xc2\xd1\x95\x75\x2f\xf6\xbc\x4f\xee\x3e\x6a\xdf\x13\x85\xb2\xee\x50\xea\xee\xc5\x26\x9e\x41\x84\x16\x37\xc9\xc4\x35\x98\x11\x5d\x4e\x88\x3d\x91\x41\x24\x39\xf9\xe4\xde\xa7\x89\x2f\xe7\x02\x34\x18\x2a\x71\x65\x54\x24\x0f\x34\x1b\x55\x3b\xbc\xa1\x01\x19\x37\x47\x6c\xf2\xb4\x63\xd6\x5a\xa4\xc1\xd4\x49\xf3\x31\x87\x5c\xbc\xcb\x3b\x36\x88\x34\x3c\x31\xf3\xfa\xca\x92\x6c\x43\x17\x6e\xdc\xdd\x66\x1e\x4a\x68\xea\x95\xf4\x35\x6b\x2f\x4b\x3a\x32\xa9\x66\x59\x13\x16\x00\x69\x15\x86\xe6\x25\x00\x4b\x7f\x8e\x97\x30\xeb\x8b\x81\x23\xe4\x22\xf3\x8b\x0e\x96\x21\xe8\x77\x87\x3f\x49\x76\xd0\x7c\xac\x21\x5b\x78\x85\xab\x70\x08\x96\x5c\x17\xf8\x31\xa3\x2d\x24\x1f\x53\x68\x2d\xb6\x17\x03\x2f\x28\x8f\x3c\xa4\x2a\x18\x42\x94\xd0\xa8\x5d\xdb\x3f\x53\xa8\x4d\xfe\xef\xb6\xe1\x39\x84\x1d\x93\xf4\x0c\xf7\x34\xdc\xac\xce\xf6\x91\x60\xe1\x26\xf1\x40\x1f\x36\x78\xf8\xd6\xf8\x94\x86\xac\xcc\x9e\x21\x2b\x70\x1f\xa0\x31\xa9\x62\xc3\x7e\x36\x6d\x9f\xb6\xc1\xdb\x5a\x33\x7b\x33\xd8\x91\xfc\xf9\x02\xf2\x01\x9e\x7e\xd9\xcb\x25\x90\x76\x1d\x56\x7f\x9a\x8f\x52\x48\xcd\x7a\x33\x0b\x52\x20\x02\x4a\x1b\x14\xeb\x7d\x7b\xaa\x8a\x3c\xb2\x4d\x4e\x29\x1d\x34\xb9\x4a\xbc\x64\xcc\x51\x4f\x6d\x29\x76\xc7\x20\xd4\xb2\x6a\x40\x06\x79\x08\x12\xa5\x77\x3c\x13\x04\xad\xf4\xba\x46\x5a\xee\xaf\xd0\x7c\xab\x1d\x20\x9c\x03\xd7\xe0\x21\xcf\x2c\xab\xdf\x20\x45\x62\xf3\x23\xe5\x78\x92\x0f\x88\x3c\x48\x8d\x72\xd6\xbf\xe7\x78\x31\x13\xfb\x2a\xcc\x6c\xb8\xfd\xd3\x27\x8d\x1e\xdc\x85\x48\x08\x86\x23\xee\x05\xd9\xa3\x90\x02\xe1\xc6\x5c\xd4\x58\xfa\x1c\xcd\x06\xba\x63\xbc\x44\xfb\x44\x72\x53\x2e\x85\xf0\x86\xc4\x7c\x29\x3f\xa9\xb7\x6a\x66\x14\x0f\x80\x00\x65\x28\x4f\x62\x0a\x4b\x85\x58\x17\x28\xa1\xef\x62\x48\x5a\x4f\x19\xd2\x4b\x2d\xcf\xe6\xdd\x7e\x42\x30\x26\x7b\x79\x4e\xf7\xce\x5e\x21\x35\xb5\x52\xcc\x80\x45\x3e\x3d\x84\x47\x75\x48\xf5\xe6\xa9\xb6\x53\x1a\xea\x83\x2f\xdd\xf2\x9f\x5b\xd3\x5f\x33\x9c\x67\xfd\xbb\xd7\x45\x57\x49\x7e\x42\xdf\x80\xd7\x5a\xfa\xd8\x35\x2f\x8d\x5c\x73\x48\x43\x30\xd5\x0c\xa9\xbf\x99\xda\x6d\x28\x49\xd2\x7b\x60\xa0\x62\xff\x39\x94\xd6\xd9\x7f\x10\x7a\x84\x7a\xf6\x6f\xee\xb8\xe5\xb6\xea\x6f\xa5\xab\x18\xf0\x06\x6d\xa5\xcf\x3f\x50\x56\x2f\x29\xf0\x8e\xd2\x8d\x0d\x00\xa9\x9f\xd9\x84\x59\x2a\x8b\xb7\x35\x3c\x9f\xcd\xf7\x66\x5e\x6f\x4a\xca\x84\x7e\xa4\x9f\xe2\x94\x2d\xd1\x43\x31\xae\xf0\x2c\x3f\x71\x2d\xc0\x95\x19\xd6\x80\xa1\x21\x59\xef\x36\x83\x80\xb2\x3c\xd5\x05\x1b\xed\x86\x99\x9e\x67\x72\x77\x23\x88\x93\xa8\x52\x07\x53\xb7\x0a\xc4\x60\xd1\x3b\x61\x83\xe4\xf8\x40\xb0\x20\xd5\x4a\x48\xa6\x90\x62\xa1\xaf\xdf\x6f\x96\xde\xe2\xcb\xbb\x09\x2a\x4e\xc6\xda\x31\xb6\x7a\x05\x2e\x19\x8e\xfc\xc6\x1e\x22\x00\xb3\x64\x14\xef\x58\x2d\xb3\xd6\x70\x84\xb4\xe3\x39\x81\x40\xc1\xa1\x8d\xb9\x14\x2d\xe9\xab\xd2\x84\xc0\x5b\x51\x29\x52\x07\x99\x49\x5e\xc8\x97\x9a\x43\x53\xb1\xd6\x2d\x02\xbe\x6c\x42\x1d\xd1\x21\x14\x94\xd4\x15\xa0\xc7\xc1\x3b\xae\x5b\x88\x65\x6e\x87\xa4\x28\xb1\x5c\xa8\x5f\x04\xff\x30\x1c\x63\x12\xd7\xc4\x4a\x36\x91\x52\x95\x0d\x34\x3d\xcd\xc5\x66\xc5\xaa\xe5\x22\x82\x0d\x50\xa5\xbd\xa4\x46\x57\x17\x09\x50\x16\x5d\x60\x9a\xf4\xc3\xbc\x49\x44\x32\x28\xfb\x7b\xd2\x77\xc2\x08\x56\x80\xb9\x43\xbe\x55\x5f\xb2\x0f\xfd\x49\x6a\xf0\x80\xe4\x4a\x34\xb3\xfd\x82\xd5\xd6\x8d\xc8\x5f\xcf\xa2\x47\xb7\x80\x62\x44\x69\x2c\xfd\xd2\x84\x19\x97\x18\x40\x30\xc1\x49\x43\xd3\x93\x0c\x41\x44\xb3\x5a\xd9\x44\x8b\x34\x5d\x9b\x40\x24\xf7\x42\x23\xac\x49\x47\xaa\x73\x05\x18\x14\x37\x0f\x71\x48\xc5\xc3\xf6\x33\x69\x73\xac\xaf\x3a\xa4\xea\x58\xec\x49\xf9\xb4\x08\xa2\x8f\x89\xa9\x5f\x98\xbc\xd1\x42\x75\x62\x7c\x21\x3e\xf7\xd9\x17\x95\x1f\x92\x8c\x4c\x1a\x6d\x81\xd2\xb4\x71\x60\x76\xa9\xc8\xa2\xdd\xfb\x7e\x4b\xa2\x12\xbd\x89\x4e\xb1\xc6\x24\x0d\xe4\x0c\xf7\xce\x75\x05\xe0\x31\xf6\x8e\x63\x9c\x6a\x90\x96\x25\xf4\x47\x42\x1c\x35\x9f\x21\x75\xe8\x4f\xb1\x0a\x49\xfe\x35\xab\x2b\xed\x7d\xf1\x92\xff\xdc\x94\x1d\xd7\x43\x27\x4a\x2a\x89\xd0\xbe\x08\xf9\xc7\x97\x74\x11\x41\xb2\x1c\x4a\x87\x27\x73\x2d\x7b\x10\x46\x95\x3e\x7e\x05\x26\xd2\x1c\xaa\x92\x21\x1a\x09\x22\xcc\x90\x28\x3c\x29\xfb\xa5\x8d\xf3\x8a\xd4\x51\x17\xa8\x44\xba\x2c\xf1\xae\xda\x26\x91\xa7\x01\x24\xa9\x28\xd2\x50\xf3\xd1\xe3\xd4\xe1\x89\xaa\x89\x8d\xcb\x1a\x39\x24\xf1\x49\x23\xe1\x61\xe2\xda\x09\xd2\x49\xf8\x94\x93\xf2\x6e\x7b\xef\xf7\x6b\x20\xc9\x3e\x1e\xbb\x27\x87\xe9\x4e\x8f\x75\x89\x1f\x85\x67\xcb\xe1\x19\xb0\x0c\x43\x11\xea\xeb\xa8\xeb\x4b\x1b\x92\xf2\x20\x28\xef\x26\x82\xd9\x69\x97\x8f\x5b\xbc\xc6\xee\xc9\x54\x25\x70\x12\x10\xc1\xbb\xc1\xa6\x48\x79\xf3\xbc\xdb\x25\x31\xcc\x2d\x64\x72\x95\x71\x6a\xe5\x64\xc6\x63\xe5\x57\x61\xc3\xd1\xae\xe3\x86\xf6\x40\xcc\xae\xd0\xd9\xd6\x9d\x47\x21\xe7\x91\x20\x1e\xc6\xda\xba\x29\x74\xd7\xda\x97\x8e\xa6\x73\x99\xf2\x4c\xb4\xbc\x42\x7f\x93\x92\xb5\x84\xc3\x4f\xef\xd5\x46\xbb\x3b\xf4\x2f\xfb\x38\xb6\x48\x7f\xb2\x17\xc7\x5c\x8f\x40\x7c\x0a\xbf\xa6\xae\x5e\x4c\xda\x31\xc8\x65\xe5\xee\x13\x22\x94\x5f\xea\xca\xfd\x59\x83\x83\x94\xa5\xf4\xeb\xee\x93\x46\x06\x44\x2d\xeb\xab\xf0\xc3\xb1\x8d\x4e\xb2\x93\xa7\xc4\xb3\x6e\x70\x7d\xf5\x9c\xa4\xda\x68\x7e\x35\x85\x4b\xc9\xb3\x00\x29\x2d\x1d\x50\x27\x67\x28\x75\x77\x87\x88\xe5\x31\x85\x24\xd8\xed\x4e\x77\xa1\x12\x87\x83\x57\x57\xd7\xdb\xf8\x25\x18\x08\xe5\xcb\x6d\x90\x0a\xe4\xbd\xea\x8e\x5c\xf3\x84\xa8\x34\x31\xcc\x45\x35\xc3\xa9\xb8\x07\x9c\x38\xfa\xa7\x99\xdb\x7c\x3a\xb7\xce\x4e\xa7\x5b\x67\x33\xdf\xbf\x72\x9a\x12\xc1\x3b\x3c\x84\xcd\x8a\x21\xb4\x33\x3f\x52\x53\x3b\xee\xed\x7c\x71\x01\x1b\x38\x94\xef\x40\x2c\xcb\x7b\x31\x14\xad\x01\xf2\x99\x7a\x27\xc7\xcd\x02\x6a\xcf\x89\xa8\xc9\x8b\x82\x7b\xeb\x43\x50\x9f\xfc\xc0\x0d\x7a\x52\x21\x61\x98\x89\x6e\xf9\xfd\xb4\x3b\x9d\x0d\x8b\x1d\x0b\x54\xb2\xf8\x02\x9d\x21\x27\x50\x9f\x7f\x55\x29\x10\x51\xf0\x8f\xa6\x73\x6a\x8a\x4b\xbf\x63\x07\x04\x30\x68\x8f\x2b\x63\xd2\x8f\xdb\xe1\xca\x71\x8b\x52\xff\x46\xf8\x9c\x6a\x80\x14\xe4\x9f\xa2\xe6\x65\xa3\x1a\x63\x8b\x7d\xab\x93\x32\xe1\xd1\x22\x5d\x00\xb7\xd0\x05\x85\x26\x7b\x3b\x8f\x3c\xf7\x1d\x18\xa1\x36\x01\x4e\x8e\x3d\x13\x6d\x10\x78\x86\x16\x69\xfa\x96\x40\x9f\x96\xfc\x00\x7d\xce\x55\x25\x8b\x50\x5b\x75\x42\x7b\xfd\x6b\x6b\x4a\x61\xd8\xe8\xd3\x15\x50\x29\xbf\xdd\xc1\x74\x8c\xce\x55\xb1\x59\xd7\xd4\x94\x4b\x81\x36\x19\xb5\xee\x1b\x8b\xfd\x5a\x1f\xef\x10\x49\xb5\x11\x78\x27\x0e\x21\xcf\x2d\x54\x3a\xdf\xd2\x38\x84\xd3\xe2\xd2\xf1\xf7\xe5\x0c\x2c\x00\xf3\x8f\xf9\x02\xca\xf7\x67\xf9\xdf\x1d\xff\x3b\x1e\x95\x6e\x08\xe0\xac\x9d\xd0\x69\xe8\x75\x34\xeb\x81\xc8\xfc\x22\x62\x00\x6e\xf0\xdf\x21\x57\x5a\xc1\x02\xed\x7d\x11\x02\x39\x55\x86\xd0\xe8\xf1\x61\xe5\x0e\x15\x12\x03\xb5\x4b\xf3\xb7\xc7\xc4\x4e\xa0\x1e\x89\x55\xaf\xfa\x52\xab\x04\x0f\xeb\xe6\x33\xab\x08\x68\xef\x6a\x48\x43\x75\x10\x55\xe3\xd9\xb6\xa4\x29\xba\x6b\x87\x52\x35\x4f\x64\xfa\x5d\x0d\x51\xbe\x16\x7a\xe5\x88\x65\x61\x6d\x4b\xd0\xe1\x19\x94\x1a\xa9\xe2\x01\xa0\x3d\x7a\x48\xc3\xa2\xab\x13\x62\xd0\x42\x30\x35\x9f\xfb\xb8\xdc\x93\xb0\x2e\xa6\x0e\xa0\x86\xea\xe6\xef\x33\xf3\xe5\x61\x9e\xdb\xb5\x66\xb3\x1e\xde\x10\x42\xa8\x7d\x84\xab\x1b\xec\x79\x6d\x26\x19\x41\xa2\x54\xe5\xcc\xdf\xa4\x81\x91\x66\x1d\xb2\x99\x58\x86\x0a\x29\x65\x11\xe0\xac\x55\xeb\x5a\x62\x7c\x87\xfa\x12\x13\x82\x0d\x35\x39\xd4\x05\xd1\x38\x3c\x57\xd2\xa7\x88\x60\x6c\x8e\x84\x2e\x70\x4a\x85\x4c\x28\x9a\xea\xcb\x7a\xd4\xe3\x19\x11\x24\xdd\x83\x4c\x2a\x5d\xf9\xba\xa7\xac\x43\x48\x4c\x25\xa0\xb3\xcb\x40\x6a\xad\xa7\x78\x9c\x9e\x15\x14\xfa\xea\x8a\xa4\x1c\x6d\xf2\x72\x25\x0f\xf1\x07\x94\x8a\xa1\x3a\x35\xfc\x98\x8f\xb0\xfc\x51\x35\x8e\x6c\x1f\x0a\xa2\x3a\x98\x34\xdb\x34\x44\x67\x0b\x54\x50\xe5\x45\xec\x4a\x04\xa0\x74\x66\x9d\xf2\x8b\x2e\x69\x54\x1b\x9b\x13\x73\x13\x3b\x7a\x26\xba\xfc\x61\x63\x0c\x57\x4e\x07\x56\x0d\xaf\x9d\xf5\x45\xfe\x8b\x1c\x4a\xae\xf6\xa4\x3c\x1c\x49\x1b\xb3\xf4\x50\xaf\x18\x79\x9a\x38\x33\x32\xbb\x6d\x06\x20\xc8\x79\x2b\xd2\xc4\x62\x98\x43\xa2\xd0\x25\x90\x9a\x12\xab\xcd\x38\x0b\xe1\xb3\xc9\xaf\x42\xb1\x87\xad\x1d\x3c\x2a\x92\x26\x85\xe6\x6e\xd6\x01\x64\x6b\x80\x68\xeb\x97\xdd\x5d\x3e\x98\xf2\xdf\x65\x24\xa1\xa9\xae\x1b\x96\x63\x89\xe6\x54\x24\xd3\x6a\x56\x1a\xdf\xbc\xc1\x90\x44\x5d\x99\x87\xd1\x99\x11\x92\x74\xf7\xcb\x22\x5b\x4a\xca\xaa\x01\x43\xf6\xd4\x90\xbb\x65\x5c\x61\xb5\x1c\xf1\x23\x9b\x59\x86\x8c\x69\x7c\x0c\x59\x51\xa7\x5d\xe6\x2c\xa4\xcb\xb3\x9c\xd1\x90\x54\x65\xef\x94\x07\x9d\x0c\xc3\x46\x00\x2f\x43\x1c\x34\x43\x8f\x32\x85\x84\xac\x6d\x68\x6c\xcf\x69\x68\x75\x6a\xfd\x6a\xfa\x4a\x73\xf3\x73\xba\x03\xe3\xff\xff\x3f\xff\xdf\xff\x0d\x00\x00\xff\xff\x9b\xc6\xef\xe7\x7d\x0c\x06\x00") + +func dataSurnamesJsonBytes() ([]byte, error) { + return bindataRead( + _dataSurnamesJson, + "data/Surnames.json", + ) +} + +func dataSurnamesJson() (*asset, error) { + bytes, err := dataSurnamesJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/Surnames.json", size: 396413, mode: os.FileMode(420), modTime: time.Unix(1452717629, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "data/Dvorak.json": dataDvorakJson, + "data/English.json": dataEnglishJson, + "data/FemaleNames.json": dataFemalenamesJson, + "data/Keypad.json": dataKeypadJson, + "data/L33t.json": dataL33tJson, + "data/MacKeypad.json": dataMackeypadJson, + "data/MaleNames.json": dataMalenamesJson, + "data/Passwords.json": dataPasswordsJson, + "data/Qwerty.json": dataQwertyJson, + "data/Surnames.json": dataSurnamesJson, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "data": &bintree{nil, map[string]*bintree{ + "Dvorak.json": &bintree{dataDvorakJson, map[string]*bintree{}}, + "English.json": &bintree{dataEnglishJson, map[string]*bintree{}}, + "FemaleNames.json": &bintree{dataFemalenamesJson, map[string]*bintree{}}, + "Keypad.json": &bintree{dataKeypadJson, map[string]*bintree{}}, + "L33t.json": &bintree{dataL33tJson, map[string]*bintree{}}, + "MacKeypad.json": &bintree{dataMackeypadJson, map[string]*bintree{}}, + "MaleNames.json": &bintree{dataMalenamesJson, map[string]*bintree{}}, + "Passwords.json": &bintree{dataPasswordsJson, map[string]*bintree{}}, + "Qwerty.json": &bintree{dataQwertyJson, map[string]*bintree{}}, + "Surnames.json": &bintree{dataSurnamesJson, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go b/vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go new file mode 100644 index 000000000..8f57ea0a4 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go @@ -0,0 +1,216 @@ +package entropy + +import ( + "github.com/nbutton23/zxcvbn-go/adjacency" + "github.com/nbutton23/zxcvbn-go/match" + "github.com/nbutton23/zxcvbn-go/utils/math" + "math" + "regexp" + "unicode" +) + +const ( + numYears = float64(119) // years match against 1900 - 2019 + numMonths = float64(12) + numDays = float64(31) +) + +var ( + startUpperRx = regexp.MustCompile(`^[A-Z][^A-Z]+$`) + endUpperRx = regexp.MustCompile(`^[^A-Z]+[A-Z]$'`) + allUpperRx = regexp.MustCompile(`^[A-Z]+$`) + keyPadStartingPositions = len(adjacency.GraphMap["keypad"].Graph) + keyPadAvgDegree = adjacency.GraphMap["keypad"].CalculateAvgDegree() +) + +// DictionaryEntropy calculates the entropy of a dictionary match +func DictionaryEntropy(match match.Match, rank float64) float64 { + baseEntropy := math.Log2(rank) + upperCaseEntropy := extraUpperCaseEntropy(match) + //TODO: L33t + return baseEntropy + upperCaseEntropy +} + +func extraUpperCaseEntropy(match match.Match) float64 { + word := match.Token + + allLower := true + + for _, char := range word { + if unicode.IsUpper(char) { + allLower = false + break + } + } + if allLower { + return float64(0) + } + + //a capitalized word is the most common capitalization scheme, + //so it only doubles the search space (uncapitalized + capitalized): 1 extra bit of entropy. + //allcaps and end-capitalized are common enough too, underestimate as 1 extra bit to be safe. + + for _, matcher := range []*regexp.Regexp{startUpperRx, endUpperRx, allUpperRx} { + if matcher.MatchString(word) { + return float64(1) + } + } + //Otherwise calculate the number of ways to capitalize U+L uppercase+lowercase letters with U uppercase letters or + //less. Or, if there's more uppercase than lower (for e.g. PASSwORD), the number of ways to lowercase U+L letters + //with L lowercase letters or less. + + countUpper, countLower := float64(0), float64(0) + for _, char := range word { + if unicode.IsUpper(char) { + countUpper++ + } else if unicode.IsLower(char) { + countLower++ + } + } + totalLenght := countLower + countUpper + var possibililities float64 + + for i := float64(0); i <= math.Min(countUpper, countLower); i++ { + possibililities += float64(zxcvbnmath.NChoseK(totalLenght, i)) + } + + if possibililities < 1 { + return float64(1) + } + + return float64(math.Log2(possibililities)) +} + +// SpatialEntropy calculates the entropy for spatial matches +func SpatialEntropy(match match.Match, turns int, shiftCount int) float64 { + var s, d float64 + if match.DictionaryName == "qwerty" || match.DictionaryName == "dvorak" { + //todo: verify qwerty and dvorak have the same length and degree + s = float64(len(adjacency.BuildQwerty().Graph)) + d = adjacency.BuildQwerty().CalculateAvgDegree() + } else { + s = float64(keyPadStartingPositions) + d = keyPadAvgDegree + } + + possibilities := float64(0) + + length := float64(len(match.Token)) + + //TODO: Should this be <= or just < ? + //Estimate the number of possible patterns w/ length L or less with t turns or less + for i := float64(2); i <= length+1; i++ { + possibleTurns := math.Min(float64(turns), i-1) + for j := float64(1); j <= possibleTurns+1; j++ { + x := zxcvbnmath.NChoseK(i-1, j-1) * s * math.Pow(d, j) + possibilities += x + } + } + + entropy := math.Log2(possibilities) + //add extra entropu for shifted keys. ( % instead of 5 A instead of a) + //Math is similar to extra entropy for uppercase letters in dictionary matches. + + if S := float64(shiftCount); S > float64(0) { + possibilities = float64(0) + U := length - S + + for i := float64(0); i < math.Min(S, U)+1; i++ { + possibilities += zxcvbnmath.NChoseK(S+U, i) + } + + entropy += math.Log2(possibilities) + } + + return entropy +} + +// RepeatEntropy calculates the entropy for repeating entropy +func RepeatEntropy(match match.Match) float64 { + cardinality := CalcBruteForceCardinality(match.Token) + entropy := math.Log2(cardinality * float64(len(match.Token))) + + return entropy +} + +// CalcBruteForceCardinality calculates the brute force cardinality +//TODO: Validate against python +func CalcBruteForceCardinality(password string) float64 { + lower, upper, digits, symbols := float64(0), float64(0), float64(0), float64(0) + + for _, char := range password { + if unicode.IsLower(char) { + lower = float64(26) + } else if unicode.IsDigit(char) { + digits = float64(10) + } else if unicode.IsUpper(char) { + upper = float64(26) + } else { + symbols = float64(33) + } + } + + cardinality := lower + upper + digits + symbols + return cardinality +} + +// SequenceEntropy calculates the entropy for sequences such as 4567 or cdef +func SequenceEntropy(match match.Match, dictionaryLength int, ascending bool) float64 { + firstChar := match.Token[0] + baseEntropy := float64(0) + if string(firstChar) == "a" || string(firstChar) == "1" { + baseEntropy = float64(0) + } else { + baseEntropy = math.Log2(float64(dictionaryLength)) + //TODO: should this be just the first or any char? + if unicode.IsUpper(rune(firstChar)) { + baseEntropy++ + } + } + + if !ascending { + baseEntropy++ + } + return baseEntropy + math.Log2(float64(len(match.Token))) +} + +// ExtraLeetEntropy calulates the added entropy provied by l33t substitustions +func ExtraLeetEntropy(match match.Match, password string) float64 { + var subsitutions float64 + var unsub float64 + subPassword := password[match.I:match.J] + for index, char := range subPassword { + if string(char) != string(match.Token[index]) { + subsitutions++ + } else { + //TODO: Make this only true for 1337 chars that are not subs? + unsub++ + } + } + + var possibilities float64 + + for i := float64(0); i <= math.Min(subsitutions, unsub)+1; i++ { + possibilities += zxcvbnmath.NChoseK(subsitutions+unsub, i) + } + + if possibilities <= 1 { + return float64(1) + } + return math.Log2(possibilities) +} + +// DateEntropy calculates the entropy provided by a date +func DateEntropy(dateMatch match.DateMatch) float64 { + var entropy float64 + if dateMatch.Year < 100 { + entropy = math.Log2(numDays * numMonths * 100) + } else { + entropy = math.Log2(numDays * numMonths * numYears) + } + + if dateMatch.Separator != "" { + entropy += 2 //add two bits for separator selection [/,-,.,etc] + } + return entropy +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/frequency/frequency.go b/vendor/github.com/nbutton23/zxcvbn-go/frequency/frequency.go new file mode 100644 index 000000000..d056e4d4e --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/frequency/frequency.go @@ -0,0 +1,50 @@ +package frequency + +import ( + "encoding/json" + "log" + + "github.com/nbutton23/zxcvbn-go/data" +) + +// List holds a frequency list +type List struct { + Name string + List []string +} + +// Lists holds all the frequency list in a map +var Lists = make(map[string]List) + +func init() { + maleFilePath := getAsset("data/MaleNames.json") + femaleFilePath := getAsset("data/FemaleNames.json") + surnameFilePath := getAsset("data/Surnames.json") + englishFilePath := getAsset("data/English.json") + passwordsFilePath := getAsset("data/Passwords.json") + + Lists["MaleNames"] = getStringListFromAsset(maleFilePath, "MaleNames") + Lists["FemaleNames"] = getStringListFromAsset(femaleFilePath, "FemaleNames") + Lists["Surname"] = getStringListFromAsset(surnameFilePath, "Surname") + Lists["English"] = getStringListFromAsset(englishFilePath, "English") + Lists["Passwords"] = getStringListFromAsset(passwordsFilePath, "Passwords") + +} +func getAsset(name string) []byte { + data, err := data.Asset(name) + if err != nil { + panic("Error getting asset " + name) + } + + return data +} +func getStringListFromAsset(data []byte, name string) List { + + var tempList List + err := json.Unmarshal(data, &tempList) + if err != nil { + log.Fatal(err) + } + tempList.Name = name + return tempList +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/go.mod b/vendor/github.com/nbutton23/zxcvbn-go/go.mod new file mode 100644 index 000000000..61b9a67ea --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/go.mod @@ -0,0 +1,9 @@ +module github.com/nbutton23/zxcvbn-go + +go 1.14 + +require ( + github.com/davecgh/go-spew v1.1.0 + github.com/pmezard/go-difflib v1.0.0 + github.com/stretchr/testify v1.1.4 +) diff --git a/vendor/github.com/nbutton23/zxcvbn-go/go.sum b/vendor/github.com/nbutton23/zxcvbn-go/go.sum new file mode 100644 index 000000000..656d00476 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/go.sum @@ -0,0 +1,5 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.1.4 h1:ToftOQTytwshuOSj6bDSolVUa3GINfJP/fg3OkkOzQQ= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/nbutton23/zxcvbn-go/match/match.go b/vendor/github.com/nbutton23/zxcvbn-go/match/match.go new file mode 100644 index 000000000..dd30bea04 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/match/match.go @@ -0,0 +1,44 @@ +package match + +//Matches is an alies for []Match used for sorting +type Matches []Match + +func (s Matches) Len() int { + return len(s) +} +func (s Matches) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s Matches) Less(i, j int) bool { + if s[i].I < s[j].I { + return true + } else if s[i].I == s[j].I { + return s[i].J < s[j].J + } else { + return false + } +} + +// Match represents different matches +type Match struct { + Pattern string + I, J int + Token string + DictionaryName string + Entropy float64 +} + +//DateMatch is specifilly a match for type date +type DateMatch struct { + Pattern string + I, J int + Token string + Separator string + Day, Month, Year int64 +} + +//Matcher are a func and ID that can be used to match different passwords +type Matcher struct { + MatchingFunc func(password string) []Match + ID string +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/dateMatchers.go b/vendor/github.com/nbutton23/zxcvbn-go/matching/dateMatchers.go new file mode 100644 index 000000000..8dfdf2410 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/matching/dateMatchers.go @@ -0,0 +1,209 @@ +package matching + +import ( + "regexp" + "strconv" + "strings" + + "github.com/nbutton23/zxcvbn-go/entropy" + "github.com/nbutton23/zxcvbn-go/match" +) + +const ( + dateSepMatcherName = "DATESEP" + dateWithOutSepMatcherName = "DATEWITHOUT" +) + +var ( + dateRxYearSuffix = regexp.MustCompile(`((\d{1,2})(\s|-|\/|\\|_|\.)(\d{1,2})(\s|-|\/|\\|_|\.)(19\d{2}|200\d|201\d|\d{2}))`) + dateRxYearPrefix = regexp.MustCompile(`((19\d{2}|200\d|201\d|\d{2})(\s|-|/|\\|_|\.)(\d{1,2})(\s|-|/|\\|_|\.)(\d{1,2}))`) + dateWithOutSepMatch = regexp.MustCompile(`\d{4,8}`) +) + +//FilterDateSepMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +func FilterDateSepMatcher(m match.Matcher) bool { + return m.ID == dateSepMatcherName +} + +//FilterDateWithoutSepMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +func FilterDateWithoutSepMatcher(m match.Matcher) bool { + return m.ID == dateWithOutSepMatcherName +} + +func checkDate(day, month, year int64) (bool, int64, int64, int64) { + if (12 <= month && month <= 31) && day <= 12 { + day, month = month, day + } + + if day > 31 || month > 12 { + return false, 0, 0, 0 + } + + if !((1900 <= year && year <= 2019) || (0 <= year && year <= 99)) { + return false, 0, 0, 0 + } + + return true, day, month, year +} + +func dateSepMatcher(password string) []match.Match { + dateMatches := dateSepMatchHelper(password) + + var matches []match.Match + for _, dateMatch := range dateMatches { + match := match.Match{ + I: dateMatch.I, + J: dateMatch.J, + Entropy: entropy.DateEntropy(dateMatch), + DictionaryName: "date_match", + Token: dateMatch.Token, + } + + matches = append(matches, match) + } + + return matches +} +func dateSepMatchHelper(password string) []match.DateMatch { + + var matches []match.DateMatch + + for _, v := range dateRxYearSuffix.FindAllString(password, len(password)) { + splitV := dateRxYearSuffix.FindAllStringSubmatch(v, len(v)) + i := strings.Index(password, v) + j := i + len(v) + day, _ := strconv.ParseInt(splitV[0][4], 10, 16) + month, _ := strconv.ParseInt(splitV[0][2], 10, 16) + year, _ := strconv.ParseInt(splitV[0][6], 10, 16) + match := match.DateMatch{Day: day, Month: month, Year: year, Separator: splitV[0][5], I: i, J: j, Token: password[i:j]} + matches = append(matches, match) + } + + for _, v := range dateRxYearPrefix.FindAllString(password, len(password)) { + splitV := dateRxYearPrefix.FindAllStringSubmatch(v, len(v)) + i := strings.Index(password, v) + j := i + len(v) + day, _ := strconv.ParseInt(splitV[0][4], 10, 16) + month, _ := strconv.ParseInt(splitV[0][6], 10, 16) + year, _ := strconv.ParseInt(splitV[0][2], 10, 16) + match := match.DateMatch{Day: day, Month: month, Year: year, Separator: splitV[0][5], I: i, J: j, Token: password[i:j]} + matches = append(matches, match) + } + + var out []match.DateMatch + for _, match := range matches { + if valid, day, month, year := checkDate(match.Day, match.Month, match.Year); valid { + match.Pattern = "date" + match.Day = day + match.Month = month + match.Year = year + out = append(out, match) + } + } + return out + +} + +type dateMatchCandidate struct { + DayMonth string + Year string + I, J int +} + +type dateMatchCandidateTwo struct { + Day string + Month string + Year string + I, J int +} + +func dateWithoutSepMatch(password string) []match.Match { + dateMatches := dateWithoutSepMatchHelper(password) + + var matches []match.Match + for _, dateMatch := range dateMatches { + match := match.Match{ + I: dateMatch.I, + J: dateMatch.J, + Entropy: entropy.DateEntropy(dateMatch), + DictionaryName: "date_match", + Token: dateMatch.Token, + } + + matches = append(matches, match) + } + + return matches +} + +//TODO Has issues with 6 digit dates +func dateWithoutSepMatchHelper(password string) (matches []match.DateMatch) { + for _, v := range dateWithOutSepMatch.FindAllString(password, len(password)) { + i := strings.Index(password, v) + j := i + len(v) + length := len(v) + lastIndex := length - 1 + var candidatesRoundOne []dateMatchCandidate + + if length <= 6 { + //2-digit year prefix + candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[2:], v[0:2], i, j)) + + //2-digityear suffix + candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[0:lastIndex-2], v[lastIndex-2:], i, j)) + } + if length >= 6 { + //4-digit year prefix + candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[4:], v[0:4], i, j)) + + //4-digit year sufix + candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[0:lastIndex-3], v[lastIndex-3:], i, j)) + } + + var candidatesRoundTwo []dateMatchCandidateTwo + for _, c := range candidatesRoundOne { + if len(c.DayMonth) == 2 { + candidatesRoundTwo = append(candidatesRoundTwo, buildDateMatchCandidateTwo(c.DayMonth[0:0], c.DayMonth[1:1], c.Year, c.I, c.J)) + } else if len(c.DayMonth) == 3 { + candidatesRoundTwo = append(candidatesRoundTwo, buildDateMatchCandidateTwo(c.DayMonth[0:2], c.DayMonth[2:2], c.Year, c.I, c.J)) + candidatesRoundTwo = append(candidatesRoundTwo, buildDateMatchCandidateTwo(c.DayMonth[0:0], c.DayMonth[1:3], c.Year, c.I, c.J)) + } else if len(c.DayMonth) == 4 { + candidatesRoundTwo = append(candidatesRoundTwo, buildDateMatchCandidateTwo(c.DayMonth[0:2], c.DayMonth[2:4], c.Year, c.I, c.J)) + } + } + + for _, candidate := range candidatesRoundTwo { + intDay, err := strconv.ParseInt(candidate.Day, 10, 16) + if err != nil { + continue + } + + intMonth, err := strconv.ParseInt(candidate.Month, 10, 16) + + if err != nil { + continue + } + + intYear, err := strconv.ParseInt(candidate.Year, 10, 16) + if err != nil { + continue + } + + if ok, _, _, _ := checkDate(intDay, intMonth, intYear); ok { + matches = append(matches, match.DateMatch{Token: password, Pattern: "date", Day: intDay, Month: intMonth, Year: intYear, I: i, J: j}) + } + + } + } + + return matches +} + +func buildDateMatchCandidate(dayMonth, year string, i, j int) dateMatchCandidate { + return dateMatchCandidate{DayMonth: dayMonth, Year: year, I: i, J: j} +} + +func buildDateMatchCandidateTwo(day, month string, year string, i, j int) dateMatchCandidateTwo { + + return dateMatchCandidateTwo{Day: day, Month: month, Year: year, I: i, J: j} +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/dictionaryMatch.go b/vendor/github.com/nbutton23/zxcvbn-go/matching/dictionaryMatch.go new file mode 100644 index 000000000..4ddb2c3b0 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/matching/dictionaryMatch.go @@ -0,0 +1,57 @@ +package matching + +import ( + "strings" + + "github.com/nbutton23/zxcvbn-go/entropy" + "github.com/nbutton23/zxcvbn-go/match" +) + +func buildDictMatcher(dictName string, rankedDict map[string]int) func(password string) []match.Match { + return func(password string) []match.Match { + matches := dictionaryMatch(password, dictName, rankedDict) + for _, v := range matches { + v.DictionaryName = dictName + } + return matches + } + +} + +func dictionaryMatch(password string, dictionaryName string, rankedDict map[string]int) []match.Match { + var results []match.Match + pwLower := strings.ToLower(password) + + pwLowerRunes := []rune(pwLower) + length := len(pwLowerRunes) + + for i := 0; i < length; i++ { + for j := i; j < length; j++ { + word := pwLowerRunes[i : j+1] + if val, ok := rankedDict[string(word)]; ok { + matchDic := match.Match{Pattern: "dictionary", + DictionaryName: dictionaryName, + I: i, + J: j, + Token: string([]rune(password)[i : j+1]), + } + matchDic.Entropy = entropy.DictionaryEntropy(matchDic, float64(val)) + + results = append(results, matchDic) + } + } + } + + return results +} + +func buildRankedDict(unrankedList []string) map[string]int { + + result := make(map[string]int) + + for i, v := range unrankedList { + result[strings.ToLower(v)] = i + 1 + } + + return result +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/leet.go b/vendor/github.com/nbutton23/zxcvbn-go/matching/leet.go new file mode 100644 index 000000000..610f1973f --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/matching/leet.go @@ -0,0 +1,234 @@ +package matching + +import ( + "strings" + + "github.com/nbutton23/zxcvbn-go/entropy" + "github.com/nbutton23/zxcvbn-go/match" +) + +// L33TMatcherName id +const L33TMatcherName = "l33t" + +//FilterL33tMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +func FilterL33tMatcher(m match.Matcher) bool { + return m.ID == L33TMatcherName +} + +func l33tMatch(password string) []match.Match { + permutations := getPermutations(password) + + var matches []match.Match + + for _, permutation := range permutations { + for _, mather := range dictionaryMatchers { + matches = append(matches, mather.MatchingFunc(permutation)...) + } + } + + for _, match := range matches { + match.Entropy += entropy.ExtraLeetEntropy(match, password) + match.DictionaryName = match.DictionaryName + "_3117" + } + + return matches +} + +// This function creates a list of permutations based on a fixed table stored on data. The table +// will be reduced in order to proceed in the function using only relevant values (see +// relevantL33tSubtable). +func getPermutations(password string) []string { + substitutions := relevantL33tSubtable(password) + permutations := getAllPermutationsOfLeetSubstitutions(password, substitutions) + return permutations +} + +// This function loads the table from data but only keep in memory the values that are present +// inside the provided password. +func relevantL33tSubtable(password string) map[string][]string { + relevantSubs := make(map[string][]string) + for key, values := range l33tTable.Graph { + for _, value := range values { + if strings.Contains(password, value) { + relevantSubs[key] = append(relevantSubs[key], value) + } + } + } + + return relevantSubs +} + +// This function creates the list of permutations of a given password using the provided table as +// reference for its operation. +func getAllPermutationsOfLeetSubstitutions(password string, table map[string][]string) []string { + result := []string{} + + // create a list of tables without conflicting keys/values (this happens for "|", "7" and "1") + noConflictsTables := createListOfMapsWithoutConflicts(table) + for _, noConflictsTable := range noConflictsTables { + substitutionsMaps := createSubstitutionsMapsFromTable(noConflictsTable) + for _, substitutionsMap := range substitutionsMaps { + newValue := createWordForSubstitutionMap(password, substitutionsMap) + if !stringSliceContainsValue(result, newValue) { + result = append(result, newValue) + } + } + } + + return result +} + +// Create the possible list of maps removing the conflicts from it. As an example, the value "|" +// may represent "i" and "l". For each representation of the conflicting value, a new map is +// created. This may grow exponencialy according to the number of conflicts. The number of maps +// returned by this function may be reduced if the relevantL33tSubtable function was called to +// identify only relevant items. +func createListOfMapsWithoutConflicts(table map[string][]string) []map[string][]string { + // the resulting list starts with the provided table + result := []map[string][]string{} + result = append(result, table) + + // iterate over the list of conflicts in order to expand the maps for each one + conflicts := retrieveConflictsListFromTable(table) + for _, value := range conflicts { + newMapList := []map[string][]string{} + + // for each conflict a new list of maps will be created for every already known map + for _, currentMap := range result { + newMaps := createDifferentMapsForLeetChar(currentMap, value) + newMapList = append(newMapList, newMaps...) + } + + result = newMapList + } + + return result +} + +// This function retrieves the list of values that appear for one or more keys. This is usefull to +// know which l33t chars can represent more than one letter. +func retrieveConflictsListFromTable(table map[string][]string) []string { + result := []string{} + foundValues := []string{} + + for _, values := range table { + for _, value := range values { + if stringSliceContainsValue(foundValues, value) { + // only add on results if it was not identified as conflict before + if !stringSliceContainsValue(result, value) { + result = append(result, value) + } + } else { + foundValues = append(foundValues, value) + } + } + } + + return result +} + +// This function aims to create different maps for a given char if this char represents a conflict. +// If the specified char is not a conflit one, the same map will be returned. In scenarios which +// the provided char can not be found on map, an empty list will be returned. This function was +// designed to be used on conflicts situations. +func createDifferentMapsForLeetChar(table map[string][]string, leetChar string) []map[string][]string { + result := []map[string][]string{} + + keysWithSameValue := retrieveListOfKeysWithSpecificValueFromTable(table, leetChar) + for _, key := range keysWithSameValue { + newMap := copyMapRemovingSameValueFromOtherKeys(table, key, leetChar) + result = append(result, newMap) + } + + return result +} + +// This function retrieves the list of keys that can be represented using the given value. +func retrieveListOfKeysWithSpecificValueFromTable(table map[string][]string, valueToFind string) []string { + result := []string{} + + for key, values := range table { + for _, value := range values { + if value == valueToFind && !stringSliceContainsValue(result, key) { + result = append(result, key) + } + } + } + + return result +} + +// This function returns a lsit of substitution map from a given table. Each map in the result will +// provide only one representation for each value. As an example, if the provided map contains the +// values "@" and "4" in the possibilities to represent "a", two maps will be created where one +// will contain "a" mapping to "@" and the other one will provide "a" mapping to "4". +func createSubstitutionsMapsFromTable(table map[string][]string) []map[string]string { + result := []map[string]string{{"": ""}} + + for key, values := range table { + newResult := []map[string]string{} + + for _, mapInCurrentResult := range result { + for _, value := range values { + newMapForValue := copyMap(mapInCurrentResult) + newMapForValue[key] = value + newResult = append(newResult, newMapForValue) + } + } + + result = newResult + } + + // verification to make sure that the slice was filled + if len(result) == 1 && len(result[0]) == 1 && result[0][""] == "" { + return []map[string]string{} + } + + return result +} + +// This function replaces the values provided on substitution map over the provided word. +func createWordForSubstitutionMap(word string, substitutionMap map[string]string) string { + result := word + for key, value := range substitutionMap { + result = strings.Replace(result, value, key, -1) + } + + return result +} + +func stringSliceContainsValue(slice []string, value string) bool { + for _, valueInSlice := range slice { + if valueInSlice == value { + return true + } + } + + return false +} + +func copyMap(table map[string]string) map[string]string { + result := make(map[string]string) + + for key, value := range table { + result[key] = value + } + + return result +} + +// This function creates a new map based on the one provided but excluding possible representations +// of the same value on other keys. +func copyMapRemovingSameValueFromOtherKeys(table map[string][]string, keyToFix string, valueToFix string) map[string][]string { + result := make(map[string][]string) + + for key, values := range table { + for _, value := range values { + if !(value == valueToFix && key != keyToFix) { + result[key] = append(result[key], value) + } + } + } + + return result +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/matching.go b/vendor/github.com/nbutton23/zxcvbn-go/matching/matching.go new file mode 100644 index 000000000..4577db8a4 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/matching/matching.go @@ -0,0 +1,82 @@ +package matching + +import ( + "sort" + + "github.com/nbutton23/zxcvbn-go/adjacency" + "github.com/nbutton23/zxcvbn-go/frequency" + "github.com/nbutton23/zxcvbn-go/match" +) + +var ( + dictionaryMatchers []match.Matcher + matchers []match.Matcher + adjacencyGraphs []adjacency.Graph + l33tTable adjacency.Graph + + sequences map[string]string +) + +func init() { + loadFrequencyList() +} + +// Omnimatch runs all matchers against the password +func Omnimatch(password string, userInputs []string, filters ...func(match.Matcher) bool) (matches []match.Match) { + + //Can I run into the issue where nil is not equal to nil? + if dictionaryMatchers == nil || adjacencyGraphs == nil { + loadFrequencyList() + } + + if userInputs != nil { + userInputMatcher := buildDictMatcher("user_inputs", buildRankedDict(userInputs)) + matches = userInputMatcher(password) + } + + for _, matcher := range matchers { + shouldBeFiltered := false + for i := range filters { + if filters[i](matcher) { + shouldBeFiltered = true + break + } + } + if !shouldBeFiltered { + matches = append(matches, matcher.MatchingFunc(password)...) + } + } + sort.Sort(match.Matches(matches)) + return matches +} + +func loadFrequencyList() { + + for n, list := range frequency.Lists { + dictionaryMatchers = append(dictionaryMatchers, match.Matcher{MatchingFunc: buildDictMatcher(n, buildRankedDict(list.List)), ID: n}) + } + + l33tTable = adjacency.GraphMap["l33t"] + + adjacencyGraphs = append(adjacencyGraphs, adjacency.GraphMap["qwerty"]) + adjacencyGraphs = append(adjacencyGraphs, adjacency.GraphMap["dvorak"]) + adjacencyGraphs = append(adjacencyGraphs, adjacency.GraphMap["keypad"]) + adjacencyGraphs = append(adjacencyGraphs, adjacency.GraphMap["macKeypad"]) + + //l33tFilePath, _ := filepath.Abs("adjacency/L33t.json") + //L33T_TABLE = adjacency.GetAdjancencyGraphFromFile(l33tFilePath, "l33t") + + sequences = make(map[string]string) + sequences["lower"] = "abcdefghijklmnopqrstuvwxyz" + sequences["upper"] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + sequences["digits"] = "0123456789" + + matchers = append(matchers, dictionaryMatchers...) + matchers = append(matchers, match.Matcher{MatchingFunc: spatialMatch, ID: spatialMatcherName}) + matchers = append(matchers, match.Matcher{MatchingFunc: repeatMatch, ID: repeatMatcherName}) + matchers = append(matchers, match.Matcher{MatchingFunc: sequenceMatch, ID: sequenceMatcherName}) + matchers = append(matchers, match.Matcher{MatchingFunc: l33tMatch, ID: L33TMatcherName}) + matchers = append(matchers, match.Matcher{MatchingFunc: dateSepMatcher, ID: dateSepMatcherName}) + matchers = append(matchers, match.Matcher{MatchingFunc: dateWithoutSepMatch, ID: dateWithOutSepMatcherName}) + +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/repeatMatch.go b/vendor/github.com/nbutton23/zxcvbn-go/matching/repeatMatch.go new file mode 100644 index 000000000..a93e45935 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/matching/repeatMatch.go @@ -0,0 +1,67 @@ +package matching + +import ( + "strings" + + "github.com/nbutton23/zxcvbn-go/entropy" + "github.com/nbutton23/zxcvbn-go/match" +) + +const repeatMatcherName = "REPEAT" + +//FilterRepeatMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +func FilterRepeatMatcher(m match.Matcher) bool { + return m.ID == repeatMatcherName +} + +func repeatMatch(password string) []match.Match { + var matches []match.Match + + //Loop through password. if current == prev currentStreak++ else if currentStreak > 2 {buildMatch; currentStreak = 1} prev = current + var current, prev string + currentStreak := 1 + var i int + var char rune + for i, char = range password { + current = string(char) + if i == 0 { + prev = current + continue + } + + if strings.ToLower(current) == strings.ToLower(prev) { + currentStreak++ + + } else if currentStreak > 2 { + iPos := i - currentStreak + jPos := i - 1 + matchRepeat := match.Match{ + Pattern: "repeat", + I: iPos, + J: jPos, + Token: password[iPos : jPos+1], + DictionaryName: prev} + matchRepeat.Entropy = entropy.RepeatEntropy(matchRepeat) + matches = append(matches, matchRepeat) + currentStreak = 1 + } else { + currentStreak = 1 + } + + prev = current + } + + if currentStreak > 2 { + iPos := i - currentStreak + 1 + jPos := i + matchRepeat := match.Match{ + Pattern: "repeat", + I: iPos, + J: jPos, + Token: password[iPos : jPos+1], + DictionaryName: prev} + matchRepeat.Entropy = entropy.RepeatEntropy(matchRepeat) + matches = append(matches, matchRepeat) + } + return matches +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/sequenceMatch.go b/vendor/github.com/nbutton23/zxcvbn-go/matching/sequenceMatch.go new file mode 100644 index 000000000..e0ed05229 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/matching/sequenceMatch.go @@ -0,0 +1,76 @@ +package matching + +import ( + "strings" + + "github.com/nbutton23/zxcvbn-go/entropy" + "github.com/nbutton23/zxcvbn-go/match" +) + +const sequenceMatcherName = "SEQ" + +//FilterSequenceMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +func FilterSequenceMatcher(m match.Matcher) bool { + return m.ID == sequenceMatcherName +} + +func sequenceMatch(password string) []match.Match { + var matches []match.Match + for i := 0; i < len(password); { + j := i + 1 + var seq string + var seqName string + seqDirection := 0 + for seqCandidateName, seqCandidate := range sequences { + iN := strings.Index(seqCandidate, string(password[i])) + var jN int + if j < len(password) { + jN = strings.Index(seqCandidate, string(password[j])) + } else { + jN = -1 + } + + if iN > -1 && jN > -1 { + direction := jN - iN + if direction == 1 || direction == -1 { + seq = seqCandidate + seqName = seqCandidateName + seqDirection = direction + break + } + } + + } + + if seq != "" { + for { + var prevN, curN int + if j < len(password) { + prevChar, curChar := password[j-1], password[j] + prevN, curN = strings.Index(seq, string(prevChar)), strings.Index(seq, string(curChar)) + } + + if j == len(password) || curN-prevN != seqDirection { + if j-i > 2 { + matchSequence := match.Match{ + Pattern: "sequence", + I: i, + J: j - 1, + Token: password[i:j], + DictionaryName: seqName, + } + + matchSequence.Entropy = entropy.SequenceEntropy(matchSequence, len(seq), (seqDirection == 1)) + matches = append(matches, matchSequence) + } + break + } else { + j++ + } + + } + } + i = j + } + return matches +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/spatialMatch.go b/vendor/github.com/nbutton23/zxcvbn-go/matching/spatialMatch.go new file mode 100644 index 000000000..fd858f5d1 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/matching/spatialMatch.go @@ -0,0 +1,88 @@ +package matching + +import ( + "strings" + + "github.com/nbutton23/zxcvbn-go/adjacency" + "github.com/nbutton23/zxcvbn-go/entropy" + "github.com/nbutton23/zxcvbn-go/match" +) + +const spatialMatcherName = "SPATIAL" + +//FilterSpatialMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +func FilterSpatialMatcher(m match.Matcher) bool { + return m.ID == spatialMatcherName +} + +func spatialMatch(password string) (matches []match.Match) { + for _, graph := range adjacencyGraphs { + if graph.Graph != nil { + matches = append(matches, spatialMatchHelper(password, graph)...) + } + } + return matches +} + +func spatialMatchHelper(password string, graph adjacency.Graph) (matches []match.Match) { + + for i := 0; i < len(password)-1; { + j := i + 1 + lastDirection := -99 //an int that it should never be! + turns := 0 + shiftedCount := 0 + + for { + prevChar := password[j-1] + found := false + foundDirection := -1 + curDirection := -1 + //My graphs seem to be wrong. . . and where the hell is qwerty + adjacents := graph.Graph[string(prevChar)] + //Consider growing pattern by one character if j hasn't gone over the edge + if j < len(password) { + curChar := password[j] + for _, adj := range adjacents { + curDirection++ + + if strings.Index(adj, string(curChar)) != -1 { + found = true + foundDirection = curDirection + + if strings.Index(adj, string(curChar)) == 1 { + //index 1 in the adjacency means the key is shifted, 0 means unshifted: A vs a, % vs 5, etc. + //for example, 'q' is adjacent to the entry '2@'. @ is shifted w/ index 1, 2 is unshifted. + shiftedCount++ + } + + if lastDirection != foundDirection { + //adding a turn is correct even in the initial case when last_direction is null: + //every spatial pattern starts with a turn. + turns++ + lastDirection = foundDirection + } + break + } + } + } + + //if the current pattern continued, extend j and try to grow again + if found { + j++ + } else { + //otherwise push the pattern discovered so far, if any... + //don't consider length 1 or 2 chains. + if j-i > 2 { + matchSpc := match.Match{Pattern: "spatial", I: i, J: j - 1, Token: password[i:j], DictionaryName: graph.Name} + matchSpc.Entropy = entropy.SpatialEntropy(matchSpc, turns, shiftedCount) + matches = append(matches, matchSpc) + } + //. . . and then start a new search from the rest of the password + i = j + break + } + } + + } + return matches +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go b/vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go new file mode 100644 index 000000000..4f68a6dca --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go @@ -0,0 +1,177 @@ +package scoring + +import ( + "fmt" + "github.com/nbutton23/zxcvbn-go/entropy" + "github.com/nbutton23/zxcvbn-go/match" + "github.com/nbutton23/zxcvbn-go/utils/math" + "math" + "sort" +) + +const ( + //for a hash function like bcrypt/scrypt/PBKDF2, 10ms per guess is a safe lower bound. + //(usually a guess would take longer -- this assumes fast hardware and a small work factor.) + //adjust for your site accordingly if you use another hash function, possibly by + //several orders of magnitude! + singleGuess float64 = 0.010 + numAttackers float64 = 100 //Cores used to make guesses + secondsPerGuess float64 = singleGuess / numAttackers +) + +// MinEntropyMatch is the lowest entropy match found +type MinEntropyMatch struct { + Password string + Entropy float64 + MatchSequence []match.Match + CrackTime float64 + CrackTimeDisplay string + Score int + CalcTime float64 +} + +/* +MinimumEntropyMatchSequence returns the minimum entropy + + Takes a list of overlapping matches, returns the non-overlapping sublist with + minimum entropy. O(nm) dp alg for length-n password with m candidate matches. +*/ +func MinimumEntropyMatchSequence(password string, matches []match.Match) MinEntropyMatch { + bruteforceCardinality := float64(entropy.CalcBruteForceCardinality(password)) + upToK := make([]float64, len(password)) + backPointers := make([]match.Match, len(password)) + + for k := 0; k < len(password); k++ { + upToK[k] = get(upToK, k-1) + math.Log2(bruteforceCardinality) + + for _, match := range matches { + if match.J != k { + continue + } + + i, j := match.I, match.J + //see if best entropy up to i-1 + entropy of match is less that current min at j + upTo := get(upToK, i-1) + candidateEntropy := upTo + match.Entropy + + if candidateEntropy < upToK[j] { + upToK[j] = candidateEntropy + match.Entropy = candidateEntropy + backPointers[j] = match + } + } + } + + //walk backwards and decode the best sequence + var matchSequence []match.Match + passwordLen := len(password) + passwordLen-- + for k := passwordLen; k >= 0; { + match := backPointers[k] + if match.Pattern != "" { + matchSequence = append(matchSequence, match) + k = match.I - 1 + + } else { + k-- + } + + } + sort.Sort(match.Matches(matchSequence)) + + makeBruteForceMatch := func(i, j int) match.Match { + return match.Match{Pattern: "bruteforce", + I: i, + J: j, + Token: password[i : j+1], + Entropy: math.Log2(math.Pow(bruteforceCardinality, float64(j-i)))} + + } + + k := 0 + var matchSequenceCopy []match.Match + for _, match := range matchSequence { + i, j := match.I, match.J + if i-k > 0 { + matchSequenceCopy = append(matchSequenceCopy, makeBruteForceMatch(k, i-1)) + } + k = j + 1 + matchSequenceCopy = append(matchSequenceCopy, match) + } + + if k < len(password) { + matchSequenceCopy = append(matchSequenceCopy, makeBruteForceMatch(k, len(password)-1)) + } + var minEntropy float64 + if len(password) == 0 { + minEntropy = float64(0) + } else { + minEntropy = upToK[len(password)-1] + } + + crackTime := roundToXDigits(entropyToCrackTime(minEntropy), 3) + return MinEntropyMatch{Password: password, + Entropy: roundToXDigits(minEntropy, 3), + MatchSequence: matchSequenceCopy, + CrackTime: crackTime, + CrackTimeDisplay: displayTime(crackTime), + Score: crackTimeToScore(crackTime)} + +} +func get(a []float64, i int) float64 { + if i < 0 || i >= len(a) { + return float64(0) + } + + return a[i] +} + +func entropyToCrackTime(entropy float64) float64 { + crackTime := (0.5 * math.Pow(float64(2), entropy)) * secondsPerGuess + + return crackTime +} + +func roundToXDigits(number float64, digits int) float64 { + return zxcvbnmath.Round(number, .5, digits) +} + +func displayTime(seconds float64) string { + formater := "%.1f %s" + minute := float64(60) + hour := minute * float64(60) + day := hour * float64(24) + month := day * float64(31) + year := month * float64(12) + century := year * float64(100) + + if seconds < minute { + return "instant" + } else if seconds < hour { + return fmt.Sprintf(formater, (1 + math.Ceil(seconds/minute)), "minutes") + } else if seconds < day { + return fmt.Sprintf(formater, (1 + math.Ceil(seconds/hour)), "hours") + } else if seconds < month { + return fmt.Sprintf(formater, (1 + math.Ceil(seconds/day)), "days") + } else if seconds < year { + return fmt.Sprintf(formater, (1 + math.Ceil(seconds/month)), "months") + } else if seconds < century { + return fmt.Sprintf(formater, (1 + math.Ceil(seconds/century)), "years") + } else { + return "centuries" + } +} + +func crackTimeToScore(seconds float64) int { + if seconds < math.Pow(10, 2) { + return 0 + } else if seconds < math.Pow(10, 4) { + return 1 + } else if seconds < math.Pow(10, 6) { + return 2 + } else if seconds < math.Pow(10, 8) { + return 3 + } + + return 4 +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/utils/math/mathutils.go b/vendor/github.com/nbutton23/zxcvbn-go/utils/math/mathutils.go new file mode 100644 index 000000000..1b989d194 --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/utils/math/mathutils.go @@ -0,0 +1,40 @@ +package zxcvbnmath + +import "math" + +/* +NChoseK http://blog.plover.com/math/choose.html +I am surprised that I have to define these. . . Maybe i just didn't look hard enough for a lib. +*/ +func NChoseK(n, k float64) float64 { + if k > n { + return 0 + } else if k == 0 { + return 1 + } + + var r float64 = 1 + + for d := float64(1); d <= k; d++ { + r *= n + r /= d + n-- + } + + return r +} + +// Round a number +func Round(val float64, roundOn float64, places int) (newVal float64) { + var round float64 + pow := math.Pow(10, float64(places)) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + newVal = round / pow + return +} diff --git a/vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go b/vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go new file mode 100644 index 000000000..9c34b1c8c --- /dev/null +++ b/vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go @@ -0,0 +1,22 @@ +package zxcvbn + +import ( + "time" + + "github.com/nbutton23/zxcvbn-go/match" + "github.com/nbutton23/zxcvbn-go/matching" + "github.com/nbutton23/zxcvbn-go/scoring" + "github.com/nbutton23/zxcvbn-go/utils/math" +) + +// PasswordStrength takes a password, userInputs and optional filters and returns a MinEntropyMatch +func PasswordStrength(password string, userInputs []string, filters ...func(match.Matcher) bool) scoring.MinEntropyMatch { + start := time.Now() + matches := matching.Omnimatch(password, userInputs, filters...) + result := scoring.MinimumEntropyMatchSequence(password, matches) + end := time.Now() + + calcTime := end.Nanosecond() - start.Nanosecond() + result.CalcTime = zxcvbnmath.Round(float64(calcTime)*time.Nanosecond.Seconds(), .5, 3) + return result +} diff --git a/vendor/github.com/nishanths/exhaustive/.gitignore b/vendor/github.com/nishanths/exhaustive/.gitignore new file mode 100644 index 000000000..24bde5301 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/.gitignore @@ -0,0 +1,7 @@ +.DS_Store +*.swp +tags + +# binary +cmd/exhaustive/exhaustive +exhaustive diff --git a/vendor/github.com/nishanths/exhaustive/.travis.yml b/vendor/github.com/nishanths/exhaustive/.travis.yml new file mode 100644 index 000000000..bd342f558 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.x + - master + +# Only clone the most recent commit. +git: + depth: 1 + +notifications: + email: false diff --git a/vendor/github.com/nishanths/exhaustive/LICENSE b/vendor/github.com/nishanths/exhaustive/LICENSE new file mode 100644 index 000000000..32befa68f --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2020, Nishanth Shanmugham +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/nishanths/exhaustive/README.md b/vendor/github.com/nishanths/exhaustive/README.md new file mode 100644 index 000000000..633e19f01 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/README.md @@ -0,0 +1,70 @@ +# exhaustive + +[![Godoc](https://godoc.org/github.com/nishanths/exhaustive?status.svg)](https://godoc.org/github.com/nishanths/exhaustive) + +[![Build Status](https://travis-ci.org/nishanths/exhaustive.svg?branch=master)](https://travis-ci.org/nishanths/exhaustive) + +The `exhaustive` package and command line program can be used to detect +enum switch statements that are not exhaustive. + +An enum switch statement is exhaustive if it has cases for each of the enum's members. See godoc for the definition of enum used by the program. + +The `exhaustive` package provides an `Analyzer` that follows the guidelines +described in the [go/analysis](https://godoc.org/golang.org/x/tools/go/analysis) package; this makes +it possible to integrate into existing analysis driver programs. + +## Install + +``` +go get github.com/nishanths/exhaustive/... +``` + +## Docs + +https://godoc.org/github.com/nishanths/exhaustive + +## Example + +Given the code: + +```diff +package token + +type Token int + +const ( + Add Token = iota + Subtract + Multiply ++ Quotient ++ Remainder +) +``` +``` +package calc + +import "token" + +func processToken(t token.Token) { + switch t { + case token.Add: + ... + case token.Subtract: + ... + case token.Multiply: + ... + } +} +``` + +Running the `exhaustive` command will print: + +``` +calc.go:6:2: missing cases in switch of type token.Token: Quotient, Remainder +``` + +Enums can also be defined using explicit constant values instead of `iota`. + +## License + +BSD 2-Clause diff --git a/vendor/github.com/nishanths/exhaustive/enum.go b/vendor/github.com/nishanths/exhaustive/enum.go new file mode 100644 index 000000000..ed0df642b --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/enum.go @@ -0,0 +1,146 @@ +package exhaustive + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" +) + +type enums map[string]*enumMembers // enum type name -> enum members + +type enumMembers struct { + // Names in the order encountered in the AST. + OrderedNames []string + + // Maps name -> (constant.Value).ExactString(). + // If a name is missing in the map, it means that it does not have a + // corresponding constant.Value defined in the AST. + NameToValue map[string]string + + // Maps (constant.Value).ExactString() -> names. + // Names that don't have a constant.Value defined in the AST (e.g., some + // iota constants) will not have a corresponding entry in this map. + ValueToNames map[string][]string +} + +func (em *enumMembers) add(name string, constVal *string) { + em.OrderedNames = append(em.OrderedNames, name) + + if constVal != nil { + if em.NameToValue == nil { + em.NameToValue = make(map[string]string) + } + em.NameToValue[name] = *constVal + + if em.ValueToNames == nil { + em.ValueToNames = make(map[string][]string) + } + em.ValueToNames[*constVal] = append(em.ValueToNames[*constVal], name) + } +} + +func (em *enumMembers) numMembers() int { + return len(em.OrderedNames) +} + +func findEnums(pass *analysis.Pass) enums { + pkgEnums := make(enums) + + // Gather enum types. + for _, f := range pass.Files { + for _, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + if gen.Tok != token.TYPE { + continue + } + for _, s := range gen.Specs { + // Must be TypeSpec since we've filtered on token.TYPE. + t, ok := s.(*ast.TypeSpec) + obj := pass.TypesInfo.Defs[t.Name] + if obj == nil { + continue + } + + named, ok := obj.Type().(*types.Named) + if !ok { + continue + } + basic, ok := named.Underlying().(*types.Basic) + if !ok { + continue + } + + switch i := basic.Info(); { + case i&types.IsInteger != 0: + pkgEnums[named.Obj().Name()] = &enumMembers{} + case i&types.IsFloat != 0: + pkgEnums[named.Obj().Name()] = &enumMembers{} + case i&types.IsString != 0: + pkgEnums[named.Obj().Name()] = &enumMembers{} + } + } + } + } + + // Gather enum members. + for _, f := range pass.Files { + for _, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + if gen.Tok != token.CONST && gen.Tok != token.VAR { + continue + } + for _, s := range gen.Specs { + // Must be ValueSpec since we've filtered on token.CONST, token.VAR. + v := s.(*ast.ValueSpec) + for i, name := range v.Names { + obj := pass.TypesInfo.Defs[name] + if obj == nil { + continue + } + + named, ok := obj.Type().(*types.Named) + if !ok { + continue + } + + // Get the constant.Value representation, if any. + var constVal *string + if len(v.Values) > i { + value := v.Values[i] + if con, ok := pass.TypesInfo.Types[value]; ok && con.Value != nil { + str := con.Value.ExactString() // temp var to be able to take address + constVal = &str + } + } + + em, ok := pkgEnums[named.Obj().Name()] + if !ok { + continue + } + em.add(obj.Name(), constVal) + pkgEnums[named.Obj().Name()] = em + } + } + } + } + + // Delete member-less enum types. + // We can't call these enums, since we can't be sure without + // the existence of members. (The type may just be a named type, + // for instance.) + for k, v := range pkgEnums { + if v.numMembers() == 0 { + delete(pkgEnums, k) + } + } + + return pkgEnums +} diff --git a/vendor/github.com/nishanths/exhaustive/exhaustive.go b/vendor/github.com/nishanths/exhaustive/exhaustive.go new file mode 100644 index 000000000..bee01b108 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/exhaustive.go @@ -0,0 +1,207 @@ +// Package exhaustive provides an analyzer that checks exhaustiveness of enum +// switch statements. The analyzer also provides fixes to make the offending +// switch statements exhaustive (see "Fixes" section). +// +// See "cmd/exhaustive" subpackage for the related command line program. +// +// Definition of enum +// +// The Go language spec does not provide an explicit definition for enums. +// For the purpose of this program, an enum type is a package-level named type +// whose underlying type is an integer (includes byte and rune), a float, or +// a string type. An enum type must have associated with it one or more +// package-level variables of the named type in the package. These variables +// constitute the enum's members. +// +// In the code snippet below, Biome is an enum type with 3 members. (You may +// also use iota instead of explicitly specifying values.) +// +// type Biome int +// +// const ( +// Tundra Biome = 1 +// Savanna Biome = 2 +// Desert Biome = 3 +// ) +// +// Switch statement exhaustiveness +// +// An enum switch statement is exhaustive if it has cases for each of the enum's members. +// +// For an enum type defined in the same package as the switch statement, both +// exported and unexported enum members must be present in order to consider +// the switch exhaustive. On the other hand, for an enum type defined +// in an external package it is sufficient for just exported enum members +// to be present in order to consider the switch exhaustive. +// +// Flags +// +// The analyzer accepts 4 flags. +// +// The -default-signifies-exhaustive boolean flag indicates to the analyzer +// whether switch statements are to be considered exhaustive as long as a +// 'default' case is present (even if all enum members aren't listed in the +// switch statements cases). The default value is false. +// +// The -check-generated boolean flag indicates whether to check switch +// statements in generated Go source files. The default value is false. +// +// The -ignore-pattern flag specifies a regular expression. Member names +// in enum definitions that match the regular expression do not require a case +// clause to satisfy exhaustiveness. The regular expression is matched against +// enum member names inclusive of the import path, e.g. of the +// form: github.com/foo/bar.Tundra, where the import path is github.com/foo/bar +// and the enum member name is Tundra. +// +// The behavior of the -fix flag is described in the next section. +// +// Fixes +// +// The analyzer suggests fixes for a switch statement if it is not exhaustive. +// The suggested fix always adds a single case clause for the missing enum members. +// +// case MissingA, MissingB, MissingC: +// panic(fmt.Sprintf("unhandled value: %v", v)) +// +// where v is the expression in the switch statement's tag (in other words, the +// value being switched upon). If the switch statement's tag is a function or a +// method call the analyzer does not suggest a fix, as reusing the call expression +// in the panic/fmt.Sprintf call could be mutative. +// +// The rationale for the fix using panic is that it might be better to fail loudly on +// existing unhandled or impossible cases than to let them slip by quietly unnoticed. +// An even better fix may, of course, be to manually inspect the sites reported +// by the package and handle the missing cases if necessary. +// +// Imports will be adjusted automatically to account for the "fmt" dependency. +// +// Skipping analysis +// +// If the following directive comment: +// +// //exhaustive:ignore +// +// is associated with a switch statement, the analyzer skips +// checking of the switch statement and no diagnostics are reported. +// +// No diagnostics are reported for switch statements in +// generated files (see https://golang.org/s/generatedcode for definition of +// generated file), unless the -check-generated flag is enabled. +// +// Additionally, see the -ignore-pattern flag. +package exhaustive + +import ( + "go/ast" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +// Flag names used by the analyzer. They are exported for use by analyzer +// driver programs. +const ( + DefaultSignifiesExhaustiveFlag = "default-signifies-exhaustive" + CheckGeneratedFlag = "check-generated" + IgnorePatternFlag = "ignore-pattern" +) + +var ( + fDefaultSignifiesExhaustive bool + fCheckGeneratedFiles bool + fIgnorePattern regexpFlag +) + +func init() { + Analyzer.Flags.BoolVar(&fDefaultSignifiesExhaustive, DefaultSignifiesExhaustiveFlag, false, "indicates that switch statements are to be considered exhaustive if a 'default' case is present, even if all enum members aren't listed in the switch") + Analyzer.Flags.BoolVar(&fCheckGeneratedFiles, CheckGeneratedFlag, false, "check switch statements in generated files also") + Analyzer.Flags.Var(&fIgnorePattern, IgnorePatternFlag, "do not require a case clause to satisfy exhaustiveness for enum member names that match the provided regular expression pattern") +} + +// resetFlags resets the flag variables to their default values. +// Useful in tests. +func resetFlags() { + fDefaultSignifiesExhaustive = false + fCheckGeneratedFiles = false + fIgnorePattern = regexpFlag{} +} + +var Analyzer = &analysis.Analyzer{ + Name: "exhaustive", + Doc: "check exhaustiveness of enum switch statements", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + FactTypes: []analysis.Fact{&enumsFact{}}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + e := findEnums(pass) + if len(e) != 0 { + pass.ExportPackageFact(&enumsFact{Enums: e}) + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + err := checkSwitchStatements(pass, inspect) + return nil, err +} + +// IgnoreDirectivePrefix is used to exclude checking of specific switch statements. +// See package comment for details. +const IgnoreDirectivePrefix = "//exhaustive:ignore" + +func containsIgnoreDirective(comments []*ast.Comment) bool { + for _, c := range comments { + if strings.HasPrefix(c.Text, IgnoreDirectivePrefix) { + return true + } + } + return false +} + +type enumsFact struct { + Enums enums +} + +var _ analysis.Fact = (*enumsFact)(nil) + +func (e *enumsFact) AFact() {} + +func (e *enumsFact) String() string { + // sort for stability (required for testing) + var sortedKeys []string + for k := range e.Enums { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + var buf strings.Builder + for i, k := range sortedKeys { + v := e.Enums[k] + buf.WriteString(k) + buf.WriteString(":") + + for j, vv := range v.OrderedNames { + buf.WriteString(vv) + // add comma separator between each enum member in an enum type + if j != len(v.OrderedNames)-1 { + buf.WriteString(",") + } + } + // add semicolon separator between each enum type + if i != len(sortedKeys)-1 { + buf.WriteString("; ") + } + } + return buf.String() +} + +func enumTypeName(e *types.Named, samePkg bool) string { + if samePkg { + return e.Obj().Name() + } + return e.Obj().Pkg().Name() + "." + e.Obj().Name() +} diff --git a/vendor/github.com/nishanths/exhaustive/generated.go b/vendor/github.com/nishanths/exhaustive/generated.go new file mode 100644 index 000000000..19b4fb12b --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/generated.go @@ -0,0 +1,34 @@ +package exhaustive + +import ( + "go/ast" + "strings" +) + +// Adapated from https://gotools.org/dmitri.shuralyov.com/go/generated + +func isGeneratedFile(file *ast.File) bool { + for _, c := range file.Comments { + for _, cc := range c.List { + s := cc.Text // "\n" already removed (see doc comment) + if len(s) >= 1 && s[len(s)-1] == '\r' { + s = s[:len(s)-1] // Trim "\r". + } + if containsGeneratedComment(s) { + return true + } + } + } + + return false +} + +func containsGeneratedComment(s string) bool { + return strings.HasPrefix(s, genCommentPrefix) && + strings.HasSuffix(s, genCommentSuffix) +} + +const ( + genCommentPrefix = "// Code generated " + genCommentSuffix = " DO NOT EDIT." +) diff --git a/vendor/github.com/nishanths/exhaustive/go.mod b/vendor/github.com/nishanths/exhaustive/go.mod new file mode 100644 index 000000000..4db5aeb01 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/go.mod @@ -0,0 +1,8 @@ +module github.com/nishanths/exhaustive + +go 1.14 + +require ( + golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c // indirect + golang.org/x/tools v0.1.4 +) diff --git a/vendor/github.com/nishanths/exhaustive/go.sum b/vendor/github.com/nishanths/exhaustive/go.sum new file mode 100644 index 000000000..20d958ec4 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/go.sum @@ -0,0 +1,28 @@ +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.4 h1:cVngSRcfgyZCzys3KYOpCFa+4dqX/Oub9tAq00ttGVs= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/nishanths/exhaustive/regexp_flag.go b/vendor/github.com/nishanths/exhaustive/regexp_flag.go new file mode 100644 index 000000000..3a9ef7353 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/regexp_flag.go @@ -0,0 +1,35 @@ +package exhaustive + +import ( + "regexp" +) + +type regexpFlag struct { + r *regexp.Regexp +} + +func (v *regexpFlag) String() string { + if v.r != nil { + return v.r.String() + } + return "" +} + +func (v *regexpFlag) Set(expr string) error { + if expr == "" { + v.r = nil + return nil + } + + r, err := regexp.Compile(expr) + if err != nil { + return err + } + + v.r = r + return nil +} + +func (v *regexpFlag) Get() interface{} { + return v.r +} diff --git a/vendor/github.com/nishanths/exhaustive/switch.go b/vendor/github.com/nishanths/exhaustive/switch.go new file mode 100644 index 000000000..1a88eec0c --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/switch.go @@ -0,0 +1,444 @@ +package exhaustive + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "go/types" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/ast/inspector" +) + +func isDefaultCase(c *ast.CaseClause) bool { + return c.List == nil // see doc comment on field +} + +func checkSwitchStatements( + pass *analysis.Pass, + inspect *inspector.Inspector, +) error { + comments := make(map[*ast.File]ast.CommentMap) // CommentMap per package file, lazily populated by reference + generated := make(map[*ast.File]bool) + return checkSwitchStatements_(pass, inspect, comments, generated) +} + +func checkSwitchStatements_( + pass *analysis.Pass, + inspect *inspector.Inspector, + comments map[*ast.File]ast.CommentMap, + generated map[*ast.File]bool, +) error { + inspect.WithStack([]ast.Node{&ast.SwitchStmt{}}, func(n ast.Node, push bool, stack []ast.Node) bool { + if !push { + return true + } + + file := stack[0].(*ast.File) + + // Determine if file is a generated file, based on https://golang.org/s/generatedcode. + // If generated, don't check this file. + var isGenerated bool + if gen, ok := generated[file]; ok { + isGenerated = gen + } else { + isGenerated = isGeneratedFile(file) + generated[file] = isGenerated + } + if isGenerated && !fCheckGeneratedFiles { + // don't check + return true + } + + sw := n.(*ast.SwitchStmt) + if sw.Tag == nil { + return true + } + t := pass.TypesInfo.Types[sw.Tag] + if !t.IsValue() { + return true + } + tagType, ok := t.Type.(*types.Named) + if !ok { + return true + } + + tagPkg := tagType.Obj().Pkg() + if tagPkg == nil { + // Doc comment: nil for labels and objects in the Universe scope. + // This happens for the `error` type, for example. + // Continuing would mean that ImportPackageFact panics. + return true + } + + var enums enumsFact + if !pass.ImportPackageFact(tagPkg, &enums) { + // Can't do anything further. + return true + } + + em, isEnum := enums.Enums[tagType.Obj().Name()] + if !isEnum { + // Tag's type is not a known enum. + return true + } + + // Get comment map. + var allComments ast.CommentMap + if cm, ok := comments[file]; ok { + allComments = cm + } else { + allComments = ast.NewCommentMap(pass.Fset, file, file.Comments) + comments[file] = allComments + } + + specificComments := allComments.Filter(sw) + for _, group := range specificComments.Comments() { + if containsIgnoreDirective(group.List) { + return true // skip checking due to ignore directive + } + } + + samePkg := tagPkg == pass.Pkg + checkUnexported := samePkg + + hitlist := hitlistFromEnumMembers(em, tagPkg, checkUnexported, fIgnorePattern.Get().(*regexp.Regexp)) + if len(hitlist) == 0 { + return true + } + + var defaultCase *ast.CaseClause + for _, stmt := range sw.Body.List { + caseCl := stmt.(*ast.CaseClause) + if isDefaultCase(caseCl) { + defaultCase = caseCl + continue // nothing more to do if it's the default case + } + for _, e := range caseCl.List { + e = astutil.Unparen(e) + if samePkg { + ident, ok := e.(*ast.Ident) + if !ok { + continue + } + updateHitlist(hitlist, em, ident.Name) + } else { + selExpr, ok := e.(*ast.SelectorExpr) + if !ok { + continue + } + + // ensure X is package identifier + ident, ok := selExpr.X.(*ast.Ident) + if !ok { + continue + } + if !isPackageNameIdentifier(pass, ident) { + continue + } + + updateHitlist(hitlist, em, selExpr.Sel.Name) + } + } + } + + defaultSuffices := fDefaultSignifiesExhaustive && defaultCase != nil + shouldReport := len(hitlist) > 0 && !defaultSuffices + + if shouldReport { + reportSwitch(pass, sw, defaultCase, samePkg, tagType, em, hitlist, file) + } + return true + }) + + return nil +} + +func updateHitlist(hitlist map[string]struct{}, em *enumMembers, foundName string) { + constVal, ok := em.NameToValue[foundName] + if !ok { + // only delete the name alone from hitlist + delete(hitlist, foundName) + return + } + + // delete all of the same-valued names from hitlist + namesToDelete := em.ValueToNames[constVal] + for _, n := range namesToDelete { + delete(hitlist, n) + } +} + +func isPackageNameIdentifier(pass *analysis.Pass, ident *ast.Ident) bool { + obj := pass.TypesInfo.ObjectOf(ident) + if obj == nil { + return false + } + _, ok := obj.(*types.PkgName) + return ok +} + +func hitlistFromEnumMembers(em *enumMembers, enumPkg *types.Package, checkUnexported bool, ignorePattern *regexp.Regexp) map[string]struct{} { + hitlist := make(map[string]struct{}) + for _, name := range em.OrderedNames { + if name == "_" { + // blank identifier is often used to skip entries in iota lists + continue + } + if ignorePattern != nil && ignorePattern.MatchString(enumPkg.Path()+"."+name) { + continue + } + if !ast.IsExported(name) && !checkUnexported { + continue + } + hitlist[name] = struct{}{} + } + return hitlist +} + +func determineMissingOutput(missingMembers map[string]struct{}, em *enumMembers) []string { + constValMembers := make(map[string][]string) // value -> names + var otherMembers []string // non-constant value names + + for m := range missingMembers { + if constVal, ok := em.NameToValue[m]; ok { + constValMembers[constVal] = append(constValMembers[constVal], m) + } else { + otherMembers = append(otherMembers, m) + } + } + + missingOutput := make([]string, 0, len(constValMembers)+len(otherMembers)) + for _, names := range constValMembers { + sort.Strings(names) + missingOutput = append(missingOutput, strings.Join(names, "|")) + } + missingOutput = append(missingOutput, otherMembers...) + sort.Strings(missingOutput) + return missingOutput +} + +func reportSwitch( + pass *analysis.Pass, + sw *ast.SwitchStmt, + defaultCase *ast.CaseClause, + samePkg bool, + enumType *types.Named, + em *enumMembers, + missingMembers map[string]struct{}, + f *ast.File, +) { + missingOutput := determineMissingOutput(missingMembers, em) + + var fixes []analysis.SuggestedFix + if fix, ok := computeFix(pass, pass.Fset, f, sw, defaultCase, enumType, samePkg, missingMembers); ok { + fixes = append(fixes, fix) + } + + pass.Report(analysis.Diagnostic{ + Pos: sw.Pos(), + End: sw.End(), + Message: fmt.Sprintf("missing cases in switch of type %s: %s", enumTypeName(enumType, samePkg), strings.Join(missingOutput, ", ")), + SuggestedFixes: fixes, + }) +} + +func computeFix(pass *analysis.Pass, fset *token.FileSet, f *ast.File, sw *ast.SwitchStmt, defaultCase *ast.CaseClause, enumType *types.Named, samePkg bool, missingMembers map[string]struct{}) (analysis.SuggestedFix, bool) { + // Function and method calls may be mutative, so we don't want to reuse the + // call expression in the about-to-be-inserted case clause body. So we just + // don't suggest a fix in such situations. + // + // However, we need to make an exception for type conversions, which are + // also call expressions in the AST. + // + // We'll need to lookup type information for this, and can't rely solely + // on the AST. + if containsFuncCall(pass, sw.Tag) { + return analysis.SuggestedFix{}, false + } + + textEdits := []analysis.TextEdit{missingCasesTextEdit(fset, f, samePkg, sw, defaultCase, enumType, missingMembers)} + + // need to add "fmt" import if "fmt" import doesn't already exist + if !hasImportWithPath(fset, f, `"fmt"`) { + textEdits = append(textEdits, fmtImportTextEdit(fset, f)) + } + + missing := make([]string, 0, len(missingMembers)) + for m := range missingMembers { + missing = append(missing, m) + } + sort.Strings(missing) + + return analysis.SuggestedFix{ + Message: fmt.Sprintf("add case clause for: %s", strings.Join(missing, ", ")), + TextEdits: textEdits, + }, true +} + +func containsFuncCall(pass *analysis.Pass, e ast.Expr) bool { + e = astutil.Unparen(e) + c, ok := e.(*ast.CallExpr) + if !ok { + return false + } + if _, isFunc := pass.TypesInfo.TypeOf(c.Fun).Underlying().(*types.Signature); isFunc { + return true + } + for _, a := range c.Args { + if containsFuncCall(pass, a) { + return true + } + } + return false +} + +func firstImportDecl(fset *token.FileSet, f *ast.File) *ast.GenDecl { + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if ok && genDecl.Tok == token.IMPORT { + // first IMPORT GenDecl + return genDecl + } + } + return nil +} + +// copies an GenDecl in a manner such that appending to the returned GenDecl's Specs field +// doesn't mutate the original GenDecl +func copyGenDecl(im *ast.GenDecl) *ast.GenDecl { + imCopy := *im + imCopy.Specs = make([]ast.Spec, len(im.Specs)) + for i := range im.Specs { + imCopy.Specs[i] = im.Specs[i] + } + return &imCopy +} + +func hasImportWithPath(fset *token.FileSet, f *ast.File, pathLiteral string) bool { + igroups := astutil.Imports(fset, f) + for _, igroup := range igroups { + for _, importSpec := range igroup { + if importSpec.Path.Value == pathLiteral { + return true + } + } + } + return false +} + +func fmtImportTextEdit(fset *token.FileSet, f *ast.File) analysis.TextEdit { + firstDecl := firstImportDecl(fset, f) + + if firstDecl == nil { + // file has no import declarations + // insert "fmt" import spec after package statement + return analysis.TextEdit{ + Pos: f.Name.End() + 1, // end of package name + 1 + End: f.Name.End() + 1, + NewText: []byte(`import ( + "fmt" + )`), + } + } + + // copy because we'll be mutating its Specs field + firstDeclCopy := copyGenDecl(firstDecl) + + // find insertion index for "fmt" import spec + var i int + for ; i < len(firstDeclCopy.Specs); i++ { + im := firstDeclCopy.Specs[i].(*ast.ImportSpec) + if v, _ := strconv.Unquote(im.Path.Value); v > "fmt" { + break + } + } + + // insert "fmt" import spec at the index + fmtSpec := &ast.ImportSpec{ + Path: &ast.BasicLit{ + // NOTE: Pos field doesn't seem to be required for our + // purposes here. + Kind: token.STRING, + Value: `"fmt"`, + }, + } + s := firstDeclCopy.Specs // local var for easier comprehension of next line + s = append(s[:i], append([]ast.Spec{fmtSpec}, s[i:]...)...) + firstDeclCopy.Specs = s + + // create the text edit + var buf bytes.Buffer + printer.Fprint(&buf, fset, firstDeclCopy) + + return analysis.TextEdit{ + Pos: firstDecl.Pos(), + End: firstDecl.End(), + NewText: buf.Bytes(), + } +} + +func missingCasesTextEdit(fset *token.FileSet, f *ast.File, samePkg bool, sw *ast.SwitchStmt, defaultCase *ast.CaseClause, enumType *types.Named, missingMembers map[string]struct{}) analysis.TextEdit { + // ... Construct insertion text for case clause and its body ... + + var tag bytes.Buffer + printer.Fprint(&tag, fset, sw.Tag) + + // If possible and if necessary, determine the package identifier based on + // the AST of other `case` clauses. + var pkgIdent *ast.Ident + if !samePkg { + for _, stmt := range sw.Body.List { + caseCl := stmt.(*ast.CaseClause) + if len(caseCl.List) != 0 { // guard against default case + if sel, ok := caseCl.List[0].(*ast.SelectorExpr); ok { + pkgIdent = sel.X.(*ast.Ident) + break + } + } + } + } + + missing := make([]string, 0, len(missingMembers)) + for m := range missingMembers { + if !samePkg { + if pkgIdent != nil { + // we were able to determine package identifier + missing = append(missing, pkgIdent.Name+"."+m) + } else { + // use the package name (may not be correct always) + // + // TODO: May need to also add import if the package isn't imported + // elsewhere. This (ie, a switch with zero case clauses) should + // happen rarely, so don't implement this for now. + missing = append(missing, enumType.Obj().Pkg().Name()+"."+m) + } + } else { + missing = append(missing, m) + } + } + sort.Strings(missing) + + insert := `case ` + strings.Join(missing, ", ") + `: + panic(fmt.Sprintf("unhandled value: %v",` + tag.String() + `))` + + // ... Create the text edit ... + + pos := sw.Body.Rbrace - 1 // put it as last case + if defaultCase != nil { + pos = defaultCase.Case - 2 // put it before the default case (why -2?) + } + + return analysis.TextEdit{ + Pos: pos, + End: pos, + NewText: []byte(insert), + } +} diff --git a/vendor/github.com/nishanths/predeclared/LICENSE b/vendor/github.com/nishanths/predeclared/LICENSE new file mode 100644 index 000000000..946212315 --- /dev/null +++ b/vendor/github.com/nishanths/predeclared/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2017, Nishanth Shanmugham +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/nishanths/predeclared/passes/predeclared/go18.go b/vendor/github.com/nishanths/predeclared/passes/predeclared/go18.go new file mode 100644 index 000000000..4083efc74 --- /dev/null +++ b/vendor/github.com/nishanths/predeclared/passes/predeclared/go18.go @@ -0,0 +1,9 @@ +// +build go1.8 + +package predeclared + +import "go/doc" + +func isPredeclaredIdent(name string) bool { + return doc.IsPredeclared(name) +} diff --git a/vendor/github.com/nishanths/predeclared/passes/predeclared/pre_go18.go b/vendor/github.com/nishanths/predeclared/passes/predeclared/pre_go18.go new file mode 100644 index 000000000..5780e0b56 --- /dev/null +++ b/vendor/github.com/nishanths/predeclared/passes/predeclared/pre_go18.go @@ -0,0 +1,53 @@ +// +build !go1.8 + +package predeclared + +func isPredeclaredIdent(name string) bool { + return predeclaredIdents[name] +} + +// Keep in sync with https://golang.org/ref/spec#Predeclared_identifiers +var predeclaredIdents = map[string]bool{ + "bool": true, + "byte": true, + "complex64": true, + "complex128": true, + "error": true, + "float32": true, + "float64": true, + "int": true, + "int8": true, + "int16": true, + "int32": true, + "int64": true, + "rune": true, + "string": true, + "uint": true, + "uint8": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uintptr": true, + + "true": true, + "false": true, + "iota": true, + + "nil": true, + + "append": true, + "cap": true, + "close": true, + "complex": true, + "copy": true, + "delete": true, + "imag": true, + "len": true, + "make": true, + "new": true, + "panic": true, + "print": true, + "println": true, + "real": true, + "recover": true, +} diff --git a/vendor/github.com/nishanths/predeclared/passes/predeclared/predeclared.go b/vendor/github.com/nishanths/predeclared/passes/predeclared/predeclared.go new file mode 100644 index 000000000..67c0e0a00 --- /dev/null +++ b/vendor/github.com/nishanths/predeclared/passes/predeclared/predeclared.go @@ -0,0 +1,202 @@ +// Package predeclared provides a static analysis (used by the predeclared command) +// that can detect declarations in Go code that shadow one of Go's predeclared identifiers. +package predeclared + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// Flag names used by the analyzer. They are exported for use by analyzer +// driver programs. +const ( + IgnoreFlag = "ignore" + QualifiedFlag = "q" +) + +var ( + fIgnore string + fQualified bool +) + +func init() { + Analyzer.Flags.StringVar(&fIgnore, IgnoreFlag, "", "comma-separated list of predeclared identifiers to not report on") + Analyzer.Flags.BoolVar(&fQualified, QualifiedFlag, false, "include method names and field names (i.e., qualified names) in checks") +} + +var Analyzer = &analysis.Analyzer{ + Name: "predeclared", + Doc: "find code that shadows one of Go's predeclared identifiers", + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + cfg := newConfig(fIgnore, fQualified) + for _, file := range pass.Files { + processFile(pass.Report, cfg, pass.Fset, file) + } + return nil, nil +} + +type config struct { + qualified bool + ignoredIdents map[string]struct{} +} + +func newConfig(ignore string, qualified bool) *config { + cfg := &config{ + qualified: qualified, + ignoredIdents: map[string]struct{}{}, + } + for _, s := range strings.Split(ignore, ",") { + ident := strings.TrimSpace(s) + if ident == "" { + continue + } + cfg.ignoredIdents[ident] = struct{}{} + } + return cfg +} + +type issue struct { + ident *ast.Ident + kind string + fset *token.FileSet +} + +func (i issue) String() string { + pos := i.fset.Position(i.ident.Pos()) + return fmt.Sprintf("%s: %s %s has same name as predeclared identifier", pos, i.kind, i.ident.Name) +} + +func processFile(report func(analysis.Diagnostic), cfg *config, fset *token.FileSet, file *ast.File) []issue { // nolint: gocyclo + var issues []issue + + maybeReport := func(x *ast.Ident, kind string) { + if _, isIgnored := cfg.ignoredIdents[x.Name]; !isIgnored && isPredeclaredIdent(x.Name) { + report(analysis.Diagnostic{ + Pos: x.Pos(), + End: x.End(), + Message: fmt.Sprintf("%s %s has same name as predeclared identifier", kind, x.Name), + }) + issues = append(issues, issue{x, kind, fset}) + } + } + + seenValueSpecs := make(map[*ast.ValueSpec]bool) + + // TODO: consider deduping package name issues for files in the + // same directory. + maybeReport(file.Name, "package name") + + for _, spec := range file.Imports { + if spec.Name == nil { + continue + } + maybeReport(spec.Name, "import name") + } + + // Handle declarations and fields. + // https://golang.org/ref/spec#Declarations_and_scope + ast.Inspect(file, func(n ast.Node) bool { + switch x := n.(type) { + case *ast.GenDecl: + var kind string + switch x.Tok { + case token.CONST: + kind = "const" + case token.VAR: + kind = "variable" + default: + return true + } + for _, spec := range x.Specs { + if vspec, ok := spec.(*ast.ValueSpec); ok && !seenValueSpecs[vspec] { + seenValueSpecs[vspec] = true + for _, name := range vspec.Names { + maybeReport(name, kind) + } + } + } + return true + case *ast.TypeSpec: + maybeReport(x.Name, "type") + return true + case *ast.StructType: + if cfg.qualified && x.Fields != nil { + for _, field := range x.Fields.List { + for _, name := range field.Names { + maybeReport(name, "field") + } + } + } + return true + case *ast.InterfaceType: + if cfg.qualified && x.Methods != nil { + for _, meth := range x.Methods.List { + for _, name := range meth.Names { + maybeReport(name, "method") + } + } + } + return true + case *ast.FuncDecl: + if x.Recv == nil { + // it's a function + maybeReport(x.Name, "function") + } else { + // it's a method + if cfg.qualified { + maybeReport(x.Name, "method") + } + } + // add receivers idents + if x.Recv != nil { + for _, field := range x.Recv.List { + for _, name := range field.Names { + maybeReport(name, "receiver") + } + } + } + // Params and Results will be checked in the *ast.FuncType case. + return true + case *ast.FuncType: + // add params idents + for _, field := range x.Params.List { + for _, name := range field.Names { + maybeReport(name, "param") + } + } + // add returns idents + if x.Results != nil { + for _, field := range x.Results.List { + for _, name := range field.Names { + maybeReport(name, "named return") + } + } + } + return true + case *ast.LabeledStmt: + maybeReport(x.Label, "label") + return true + case *ast.AssignStmt: + // We only care about short variable declarations, which use token.DEFINE. + if x.Tok == token.DEFINE { + for _, expr := range x.Lhs { + if ident, ok := expr.(*ast.Ident); ok { + maybeReport(ident, "variable") + } + } + } + return true + default: + return true + } + }) + + return issues +} diff --git a/vendor/github.com/olekukonko/tablewriter/.gitignore b/vendor/github.com/olekukonko/tablewriter/.gitignore new file mode 100644 index 000000000..b66cec635 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/.gitignore @@ -0,0 +1,15 @@ +# Created by .ignore support plugin (hsz.mobi) +### Go template +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + diff --git a/vendor/github.com/olekukonko/tablewriter/.travis.yml b/vendor/github.com/olekukonko/tablewriter/.travis.yml new file mode 100644 index 000000000..366d48a35 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/.travis.yml @@ -0,0 +1,22 @@ +language: go +arch: + - ppc64le + - amd64 +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - 1.9 + - "1.10" + - tip +jobs: + exclude : + - arch : ppc64le + go : + - 1.3 + - arch : ppc64le + go : + - 1.4 diff --git a/vendor/github.com/olekukonko/tablewriter/LICENSE.md b/vendor/github.com/olekukonko/tablewriter/LICENSE.md new file mode 100644 index 000000000..a0769b5c1 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/LICENSE.md @@ -0,0 +1,19 @@ +Copyright (C) 2014 by Oleku Konko + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/olekukonko/tablewriter/README.md b/vendor/github.com/olekukonko/tablewriter/README.md new file mode 100644 index 000000000..f06530d75 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/README.md @@ -0,0 +1,431 @@ +ASCII Table Writer +========= + +[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter) +[![Total views](https://img.shields.io/sourcegraph/rrc/github.com/olekukonko/tablewriter.svg)](https://sourcegraph.com/github.com/olekukonko/tablewriter) +[![Godoc](https://godoc.org/github.com/olekukonko/tablewriter?status.svg)](https://godoc.org/github.com/olekukonko/tablewriter) + +Generate ASCII table on the fly ... Installation is simple as + + go get github.com/olekukonko/tablewriter + + +#### Features +- Automatic Padding +- Support Multiple Lines +- Supports Alignment +- Support Custom Separators +- Automatic Alignment of numbers & percentage +- Write directly to http , file etc via `io.Writer` +- Read directly from CSV file +- Optional row line via `SetRowLine` +- Normalise table header +- Make CSV Headers optional +- Enable or disable table border +- Set custom footer support +- Optional identical cells merging +- Set custom caption +- Optional reflowing of paragraphs in multi-line cells. + +#### Example 1 - Basic +```go +data := [][]string{ + []string{"A", "The Good", "500"}, + []string{"B", "The Very very Bad Man", "288"}, + []string{"C", "The Ugly", "120"}, + []string{"D", "The Gopher", "800"}, +} + +table := tablewriter.NewWriter(os.Stdout) +table.SetHeader([]string{"Name", "Sign", "Rating"}) + +for _, v := range data { + table.Append(v) +} +table.Render() // Send output +``` + +##### Output 1 +``` ++------+-----------------------+--------+ +| NAME | SIGN | RATING | ++------+-----------------------+--------+ +| A | The Good | 500 | +| B | The Very very Bad Man | 288 | +| C | The Ugly | 120 | +| D | The Gopher | 800 | ++------+-----------------------+--------+ +``` + +#### Example 2 - Without Border / Footer / Bulk Append +```go +data := [][]string{ + []string{"1/1/2014", "Domain name", "2233", "$10.98"}, + []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, + []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, + []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, +} + +table := tablewriter.NewWriter(os.Stdout) +table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) +table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer +table.SetBorder(false) // Set Border to false +table.AppendBulk(data) // Add Bulk Data +table.Render() +``` + +##### Output 2 +``` + + DATE | DESCRIPTION | CV2 | AMOUNT +-----------+--------------------------+-------+---------- + 1/1/2014 | Domain name | 2233 | $10.98 + 1/1/2014 | January Hosting | 2233 | $54.95 + 1/4/2014 | February Hosting | 2233 | $51.00 + 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 +-----------+--------------------------+-------+---------- + TOTAL | $146 93 + --------+---------- + +``` + + +#### Example 3 - CSV +```go +table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test_info.csv", true) +table.SetAlignment(tablewriter.ALIGN_LEFT) // Set Alignment +table.Render() +``` + +##### Output 3 +``` ++----------+--------------+------+-----+---------+----------------+ +| FIELD | TYPE | NULL | KEY | DEFAULT | EXTRA | ++----------+--------------+------+-----+---------+----------------+ +| user_id | smallint(5) | NO | PRI | NULL | auto_increment | +| username | varchar(10) | NO | | NULL | | +| password | varchar(100) | NO | | NULL | | ++----------+--------------+------+-----+---------+----------------+ +``` + +#### Example 4 - Custom Separator +```go +table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test.csv", true) +table.SetRowLine(true) // Enable row line + +// Change table lines +table.SetCenterSeparator("*") +table.SetColumnSeparator("╪") +table.SetRowSeparator("-") + +table.SetAlignment(tablewriter.ALIGN_LEFT) +table.Render() +``` + +##### Output 4 +``` +*------------*-----------*---------* +╪ FIRST NAME ╪ LAST NAME ╪ SSN ╪ +*------------*-----------*---------* +╪ John ╪ Barry ╪ 123456 ╪ +*------------*-----------*---------* +╪ Kathy ╪ Smith ╪ 687987 ╪ +*------------*-----------*---------* +╪ Bob ╪ McCornick ╪ 3979870 ╪ +*------------*-----------*---------* +``` + +#### Example 5 - Markdown Format +```go +data := [][]string{ + []string{"1/1/2014", "Domain name", "2233", "$10.98"}, + []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, + []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, + []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, +} + +table := tablewriter.NewWriter(os.Stdout) +table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) +table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) +table.SetCenterSeparator("|") +table.AppendBulk(data) // Add Bulk Data +table.Render() +``` + +##### Output 5 +``` +| DATE | DESCRIPTION | CV2 | AMOUNT | +|----------|--------------------------|------|--------| +| 1/1/2014 | Domain name | 2233 | $10.98 | +| 1/1/2014 | January Hosting | 2233 | $54.95 | +| 1/4/2014 | February Hosting | 2233 | $51.00 | +| 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 | +``` + +#### Example 6 - Identical cells merging +```go +data := [][]string{ + []string{"1/1/2014", "Domain name", "1234", "$10.98"}, + []string{"1/1/2014", "January Hosting", "2345", "$54.95"}, + []string{"1/4/2014", "February Hosting", "3456", "$51.00"}, + []string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"}, +} + +table := tablewriter.NewWriter(os.Stdout) +table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) +table.SetFooter([]string{"", "", "Total", "$146.93"}) +table.SetAutoMergeCells(true) +table.SetRowLine(true) +table.AppendBulk(data) +table.Render() +``` + +##### Output 6 +``` ++----------+--------------------------+-------+---------+ +| DATE | DESCRIPTION | CV2 | AMOUNT | ++----------+--------------------------+-------+---------+ +| 1/1/2014 | Domain name | 1234 | $10.98 | ++ +--------------------------+-------+---------+ +| | January Hosting | 2345 | $54.95 | ++----------+--------------------------+-------+---------+ +| 1/4/2014 | February Hosting | 3456 | $51.00 | ++ +--------------------------+-------+---------+ +| | February Extra Bandwidth | 4567 | $30.00 | ++----------+--------------------------+-------+---------+ +| TOTAL | $146 93 | ++----------+--------------------------+-------+---------+ +``` + +#### Example 7 - Identical cells merging (specify the column index to merge) +```go +data := [][]string{ + []string{"1/1/2014", "Domain name", "1234", "$10.98"}, + []string{"1/1/2014", "January Hosting", "1234", "$10.98"}, + []string{"1/4/2014", "February Hosting", "3456", "$51.00"}, + []string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"}, +} + +table := tablewriter.NewWriter(os.Stdout) +table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) +table.SetFooter([]string{"", "", "Total", "$146.93"}) +table.SetAutoMergeCellsByColumnIndex([]int{2, 3}) +table.SetRowLine(true) +table.AppendBulk(data) +table.Render() +``` + +##### Output 7 +``` ++----------+--------------------------+-------+---------+ +| DATE | DESCRIPTION | CV2 | AMOUNT | ++----------+--------------------------+-------+---------+ +| 1/1/2014 | Domain name | 1234 | $10.98 | ++----------+--------------------------+ + + +| 1/1/2014 | January Hosting | | | ++----------+--------------------------+-------+---------+ +| 1/4/2014 | February Hosting | 3456 | $51.00 | ++----------+--------------------------+-------+---------+ +| 1/4/2014 | February Extra Bandwidth | 4567 | $30.00 | ++----------+--------------------------+-------+---------+ +| TOTAL | $146.93 | ++----------+--------------------------+-------+---------+ +``` + + +#### Table with color +```go +data := [][]string{ + []string{"1/1/2014", "Domain name", "2233", "$10.98"}, + []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, + []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, + []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, +} + +table := tablewriter.NewWriter(os.Stdout) +table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) +table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer +table.SetBorder(false) // Set Border to false + +table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor}, + tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor}, + tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor}, + tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor}) + +table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, + tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor}, + tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, + tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor}) + +table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{}, + tablewriter.Colors{tablewriter.Bold}, + tablewriter.Colors{tablewriter.FgHiRedColor}) + +table.AppendBulk(data) +table.Render() +``` + +#### Table with color Output +![Table with Color](https://cloud.githubusercontent.com/assets/6460392/21101956/bbc7b356-c0a1-11e6-9f36-dba694746efc.png) + +#### Example - 8 Table Cells with Color + +Individual Cell Colors from `func Rich` take precedence over Column Colors + +```go +data := [][]string{ + []string{"Test1Merge", "HelloCol2 - 1", "HelloCol3 - 1", "HelloCol4 - 1"}, + []string{"Test1Merge", "HelloCol2 - 2", "HelloCol3 - 2", "HelloCol4 - 2"}, + []string{"Test1Merge", "HelloCol2 - 3", "HelloCol3 - 3", "HelloCol4 - 3"}, + []string{"Test2Merge", "HelloCol2 - 4", "HelloCol3 - 4", "HelloCol4 - 4"}, + []string{"Test2Merge", "HelloCol2 - 5", "HelloCol3 - 5", "HelloCol4 - 5"}, + []string{"Test2Merge", "HelloCol2 - 6", "HelloCol3 - 6", "HelloCol4 - 6"}, + []string{"Test2Merge", "HelloCol2 - 7", "HelloCol3 - 7", "HelloCol4 - 7"}, + []string{"Test3Merge", "HelloCol2 - 8", "HelloCol3 - 8", "HelloCol4 - 8"}, + []string{"Test3Merge", "HelloCol2 - 9", "HelloCol3 - 9", "HelloCol4 - 9"}, + []string{"Test3Merge", "HelloCol2 - 10", "HelloCol3 -10", "HelloCol4 - 10"}, +} + +table := tablewriter.NewWriter(os.Stdout) +table.SetHeader([]string{"Col1", "Col2", "Col3", "Col4"}) +table.SetFooter([]string{"", "", "Footer3", "Footer4"}) +table.SetBorder(false) + +table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor}, + tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor}, + tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor}, + tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor}) + +table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, + tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor}, + tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, + tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor}) + +table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{}, + tablewriter.Colors{tablewriter.Bold}, + tablewriter.Colors{tablewriter.FgHiRedColor}) + +colorData1 := []string{"TestCOLOR1Merge", "HelloCol2 - COLOR1", "HelloCol3 - COLOR1", "HelloCol4 - COLOR1"} +colorData2 := []string{"TestCOLOR2Merge", "HelloCol2 - COLOR2", "HelloCol3 - COLOR2", "HelloCol4 - COLOR2"} + +for i, row := range data { + if i == 4 { + table.Rich(colorData1, []tablewriter.Colors{tablewriter.Colors{}, tablewriter.Colors{tablewriter.Normal, tablewriter.FgCyanColor}, tablewriter.Colors{tablewriter.Bold, tablewriter.FgWhiteColor}, tablewriter.Colors{}}) + table.Rich(colorData2, []tablewriter.Colors{tablewriter.Colors{tablewriter.Normal, tablewriter.FgMagentaColor}, tablewriter.Colors{}, tablewriter.Colors{tablewriter.Bold, tablewriter.BgRedColor}, tablewriter.Colors{tablewriter.FgHiGreenColor, tablewriter.Italic, tablewriter.BgHiCyanColor}}) + } + table.Append(row) +} + +table.SetAutoMergeCells(true) +table.Render() + +``` + +##### Table cells with color Output +![Table cells with Color](https://user-images.githubusercontent.com/9064687/63969376-bcd88d80-ca6f-11e9-9466-c3d954700b25.png) + +#### Example 9 - Set table caption +```go +data := [][]string{ + []string{"A", "The Good", "500"}, + []string{"B", "The Very very Bad Man", "288"}, + []string{"C", "The Ugly", "120"}, + []string{"D", "The Gopher", "800"}, +} + +table := tablewriter.NewWriter(os.Stdout) +table.SetHeader([]string{"Name", "Sign", "Rating"}) +table.SetCaption(true, "Movie ratings.") + +for _, v := range data { + table.Append(v) +} +table.Render() // Send output +``` + +Note: Caption text will wrap with total width of rendered table. + +##### Output 9 +``` ++------+-----------------------+--------+ +| NAME | SIGN | RATING | ++------+-----------------------+--------+ +| A | The Good | 500 | +| B | The Very very Bad Man | 288 | +| C | The Ugly | 120 | +| D | The Gopher | 800 | ++------+-----------------------+--------+ +Movie ratings. +``` + +#### Example 10 - Set NoWhiteSpace and TablePadding option +```go +data := [][]string{ + {"node1.example.com", "Ready", "compute", "1.11"}, + {"node2.example.com", "Ready", "compute", "1.11"}, + {"node3.example.com", "Ready", "compute", "1.11"}, + {"node4.example.com", "NotReady", "compute", "1.11"}, +} + +table := tablewriter.NewWriter(os.Stdout) +table.SetHeader([]string{"Name", "Status", "Role", "Version"}) +table.SetAutoWrapText(false) +table.SetAutoFormatHeaders(true) +table.SetHeaderAlignment(ALIGN_LEFT) +table.SetAlignment(ALIGN_LEFT) +table.SetCenterSeparator("") +table.SetColumnSeparator("") +table.SetRowSeparator("") +table.SetHeaderLine(false) +table.SetBorder(false) +table.SetTablePadding("\t") // pad with tabs +table.SetNoWhiteSpace(true) +table.AppendBulk(data) // Add Bulk Data +table.Render() +``` + +##### Output 10 +``` +NAME STATUS ROLE VERSION +node1.example.com Ready compute 1.11 +node2.example.com Ready compute 1.11 +node3.example.com Ready compute 1.11 +node4.example.com NotReady compute 1.11 +``` + +#### Render table into a string + +Instead of rendering the table to `io.Stdout` you can also render it into a string. Go 1.10 introduced the `strings.Builder` type which implements the `io.Writer` interface and can therefore be used for this task. Example: + +```go +package main + +import ( + "strings" + "fmt" + + "github.com/olekukonko/tablewriter" +) + +func main() { + tableString := &strings.Builder{} + table := tablewriter.NewWriter(tableString) + + /* + * Code to fill the table + */ + + table.Render() + + fmt.Println(tableString.String()) +} +``` + +#### TODO +- ~~Import Directly from CSV~~ - `done` +- ~~Support for `SetFooter`~~ - `done` +- ~~Support for `SetBorder`~~ - `done` +- ~~Support table with uneven rows~~ - `done` +- ~~Support custom alignment~~ +- General Improvement & Optimisation +- `NewHTML` Parse table from HTML diff --git a/vendor/github.com/olekukonko/tablewriter/csv.go b/vendor/github.com/olekukonko/tablewriter/csv.go new file mode 100644 index 000000000..98878303b --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/csv.go @@ -0,0 +1,52 @@ +// Copyright 2014 Oleku Konko All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +// This module is a Table Writer API for the Go Programming Language. +// The protocols were written in pure Go and works on windows and unix systems + +package tablewriter + +import ( + "encoding/csv" + "io" + "os" +) + +// Start A new table by importing from a CSV file +// Takes io.Writer and csv File name +func NewCSV(writer io.Writer, fileName string, hasHeader bool) (*Table, error) { + file, err := os.Open(fileName) + if err != nil { + return &Table{}, err + } + defer file.Close() + csvReader := csv.NewReader(file) + t, err := NewCSVReader(writer, csvReader, hasHeader) + return t, err +} + +// Start a New Table Writer with csv.Reader +// This enables customisation such as reader.Comma = ';' +// See http://golang.org/src/pkg/encoding/csv/reader.go?s=3213:3671#L94 +func NewCSVReader(writer io.Writer, csvReader *csv.Reader, hasHeader bool) (*Table, error) { + t := NewWriter(writer) + if hasHeader { + // Read the first row + headers, err := csvReader.Read() + if err != nil { + return &Table{}, err + } + t.SetHeader(headers) + } + for { + record, err := csvReader.Read() + if err == io.EOF { + break + } else if err != nil { + return &Table{}, err + } + t.Append(record) + } + return t, nil +} diff --git a/vendor/github.com/olekukonko/tablewriter/go.mod b/vendor/github.com/olekukonko/tablewriter/go.mod new file mode 100644 index 000000000..484ab01f1 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/go.mod @@ -0,0 +1,5 @@ +module github.com/olekukonko/tablewriter + +go 1.12 + +require github.com/mattn/go-runewidth v0.0.9 diff --git a/vendor/github.com/olekukonko/tablewriter/go.sum b/vendor/github.com/olekukonko/tablewriter/go.sum new file mode 100644 index 000000000..4a94bf58b --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/go.sum @@ -0,0 +1,2 @@ +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= diff --git a/vendor/github.com/olekukonko/tablewriter/table.go b/vendor/github.com/olekukonko/tablewriter/table.go new file mode 100644 index 000000000..f913149c6 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/table.go @@ -0,0 +1,967 @@ +// Copyright 2014 Oleku Konko All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +// This module is a Table Writer API for the Go Programming Language. +// The protocols were written in pure Go and works on windows and unix systems + +// Create & Generate text based table +package tablewriter + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +const ( + MAX_ROW_WIDTH = 30 +) + +const ( + CENTER = "+" + ROW = "-" + COLUMN = "|" + SPACE = " " + NEWLINE = "\n" +) + +const ( + ALIGN_DEFAULT = iota + ALIGN_CENTER + ALIGN_RIGHT + ALIGN_LEFT +) + +var ( + decimal = regexp.MustCompile(`^-?(?:\d{1,3}(?:,\d{3})*|\d+)(?:\.\d+)?$`) + percent = regexp.MustCompile(`^-?\d+\.?\d*$%$`) +) + +type Border struct { + Left bool + Right bool + Top bool + Bottom bool +} + +type Table struct { + out io.Writer + rows [][]string + lines [][][]string + cs map[int]int + rs map[int]int + headers [][]string + footers [][]string + caption bool + captionText string + autoFmt bool + autoWrap bool + reflowText bool + mW int + pCenter string + pRow string + pColumn string + tColumn int + tRow int + hAlign int + fAlign int + align int + newLine string + rowLine bool + autoMergeCells bool + columnsToAutoMergeCells map[int]bool + noWhiteSpace bool + tablePadding string + hdrLine bool + borders Border + colSize int + headerParams []string + columnsParams []string + footerParams []string + columnsAlign []int +} + +// Start New Table +// Take io.Writer Directly +func NewWriter(writer io.Writer) *Table { + t := &Table{ + out: writer, + rows: [][]string{}, + lines: [][][]string{}, + cs: make(map[int]int), + rs: make(map[int]int), + headers: [][]string{}, + footers: [][]string{}, + caption: false, + captionText: "Table caption.", + autoFmt: true, + autoWrap: true, + reflowText: true, + mW: MAX_ROW_WIDTH, + pCenter: CENTER, + pRow: ROW, + pColumn: COLUMN, + tColumn: -1, + tRow: -1, + hAlign: ALIGN_DEFAULT, + fAlign: ALIGN_DEFAULT, + align: ALIGN_DEFAULT, + newLine: NEWLINE, + rowLine: false, + hdrLine: true, + borders: Border{Left: true, Right: true, Bottom: true, Top: true}, + colSize: -1, + headerParams: []string{}, + columnsParams: []string{}, + footerParams: []string{}, + columnsAlign: []int{}} + return t +} + +// Render table output +func (t *Table) Render() { + if t.borders.Top { + t.printLine(true) + } + t.printHeading() + if t.autoMergeCells { + t.printRowsMergeCells() + } else { + t.printRows() + } + if !t.rowLine && t.borders.Bottom { + t.printLine(true) + } + t.printFooter() + + if t.caption { + t.printCaption() + } +} + +const ( + headerRowIdx = -1 + footerRowIdx = -2 +) + +// Set table header +func (t *Table) SetHeader(keys []string) { + t.colSize = len(keys) + for i, v := range keys { + lines := t.parseDimension(v, i, headerRowIdx) + t.headers = append(t.headers, lines) + } +} + +// Set table Footer +func (t *Table) SetFooter(keys []string) { + //t.colSize = len(keys) + for i, v := range keys { + lines := t.parseDimension(v, i, footerRowIdx) + t.footers = append(t.footers, lines) + } +} + +// Set table Caption +func (t *Table) SetCaption(caption bool, captionText ...string) { + t.caption = caption + if len(captionText) == 1 { + t.captionText = captionText[0] + } +} + +// Turn header autoformatting on/off. Default is on (true). +func (t *Table) SetAutoFormatHeaders(auto bool) { + t.autoFmt = auto +} + +// Turn automatic multiline text adjustment on/off. Default is on (true). +func (t *Table) SetAutoWrapText(auto bool) { + t.autoWrap = auto +} + +// Turn automatic reflowing of multiline text when rewrapping. Default is on (true). +func (t *Table) SetReflowDuringAutoWrap(auto bool) { + t.reflowText = auto +} + +// Set the Default column width +func (t *Table) SetColWidth(width int) { + t.mW = width +} + +// Set the minimal width for a column +func (t *Table) SetColMinWidth(column int, width int) { + t.cs[column] = width +} + +// Set the Column Separator +func (t *Table) SetColumnSeparator(sep string) { + t.pColumn = sep +} + +// Set the Row Separator +func (t *Table) SetRowSeparator(sep string) { + t.pRow = sep +} + +// Set the center Separator +func (t *Table) SetCenterSeparator(sep string) { + t.pCenter = sep +} + +// Set Header Alignment +func (t *Table) SetHeaderAlignment(hAlign int) { + t.hAlign = hAlign +} + +// Set Footer Alignment +func (t *Table) SetFooterAlignment(fAlign int) { + t.fAlign = fAlign +} + +// Set Table Alignment +func (t *Table) SetAlignment(align int) { + t.align = align +} + +// Set No White Space +func (t *Table) SetNoWhiteSpace(allow bool) { + t.noWhiteSpace = allow +} + +// Set Table Padding +func (t *Table) SetTablePadding(padding string) { + t.tablePadding = padding +} + +func (t *Table) SetColumnAlignment(keys []int) { + for _, v := range keys { + switch v { + case ALIGN_CENTER: + break + case ALIGN_LEFT: + break + case ALIGN_RIGHT: + break + default: + v = ALIGN_DEFAULT + } + t.columnsAlign = append(t.columnsAlign, v) + } +} + +// Set New Line +func (t *Table) SetNewLine(nl string) { + t.newLine = nl +} + +// Set Header Line +// This would enable / disable a line after the header +func (t *Table) SetHeaderLine(line bool) { + t.hdrLine = line +} + +// Set Row Line +// This would enable / disable a line on each row of the table +func (t *Table) SetRowLine(line bool) { + t.rowLine = line +} + +// Set Auto Merge Cells +// This would enable / disable the merge of cells with identical values +func (t *Table) SetAutoMergeCells(auto bool) { + t.autoMergeCells = auto +} + +// Set Auto Merge Cells By Column Index +// This would enable / disable the merge of cells with identical values for specific columns +// If cols is empty, it is the same as `SetAutoMergeCells(true)`. +func (t *Table) SetAutoMergeCellsByColumnIndex(cols []int) { + t.autoMergeCells = true + + if len(cols) > 0 { + m := make(map[int]bool) + for _, col := range cols { + m[col] = true + } + t.columnsToAutoMergeCells = m + } +} + +// Set Table Border +// This would enable / disable line around the table +func (t *Table) SetBorder(border bool) { + t.SetBorders(Border{border, border, border, border}) +} + +func (t *Table) SetBorders(border Border) { + t.borders = border +} + +// Append row to table +func (t *Table) Append(row []string) { + rowSize := len(t.headers) + if rowSize > t.colSize { + t.colSize = rowSize + } + + n := len(t.lines) + line := [][]string{} + for i, v := range row { + + // Detect string width + // Detect String height + // Break strings into words + out := t.parseDimension(v, i, n) + + // Append broken words + line = append(line, out) + } + t.lines = append(t.lines, line) +} + +// Append row to table with color attributes +func (t *Table) Rich(row []string, colors []Colors) { + rowSize := len(t.headers) + if rowSize > t.colSize { + t.colSize = rowSize + } + + n := len(t.lines) + line := [][]string{} + for i, v := range row { + + // Detect string width + // Detect String height + // Break strings into words + out := t.parseDimension(v, i, n) + + if len(colors) > i { + color := colors[i] + out[0] = format(out[0], color) + } + + // Append broken words + line = append(line, out) + } + t.lines = append(t.lines, line) +} + +// Allow Support for Bulk Append +// Eliminates repeated for loops +func (t *Table) AppendBulk(rows [][]string) { + for _, row := range rows { + t.Append(row) + } +} + +// NumLines to get the number of lines +func (t *Table) NumLines() int { + return len(t.lines) +} + +// Clear rows +func (t *Table) ClearRows() { + t.lines = [][][]string{} +} + +// Clear footer +func (t *Table) ClearFooter() { + t.footers = [][]string{} +} + +// Center based on position and border. +func (t *Table) center(i int) string { + if i == -1 && !t.borders.Left { + return t.pRow + } + + if i == len(t.cs)-1 && !t.borders.Right { + return t.pRow + } + + return t.pCenter +} + +// Print line based on row width +func (t *Table) printLine(nl bool) { + fmt.Fprint(t.out, t.center(-1)) + for i := 0; i < len(t.cs); i++ { + v := t.cs[i] + fmt.Fprintf(t.out, "%s%s%s%s", + t.pRow, + strings.Repeat(string(t.pRow), v), + t.pRow, + t.center(i)) + } + if nl { + fmt.Fprint(t.out, t.newLine) + } +} + +// Print line based on row width with our without cell separator +func (t *Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) { + fmt.Fprint(t.out, t.pCenter) + for i := 0; i < len(t.cs); i++ { + v := t.cs[i] + if i > len(displayCellSeparator) || displayCellSeparator[i] { + // Display the cell separator + fmt.Fprintf(t.out, "%s%s%s%s", + t.pRow, + strings.Repeat(string(t.pRow), v), + t.pRow, + t.pCenter) + } else { + // Don't display the cell separator for this cell + fmt.Fprintf(t.out, "%s%s", + strings.Repeat(" ", v+2), + t.pCenter) + } + } + if nl { + fmt.Fprint(t.out, t.newLine) + } +} + +// Return the PadRight function if align is left, PadLeft if align is right, +// and Pad by default +func pad(align int) func(string, string, int) string { + padFunc := Pad + switch align { + case ALIGN_LEFT: + padFunc = PadRight + case ALIGN_RIGHT: + padFunc = PadLeft + } + return padFunc +} + +// Print heading information +func (t *Table) printHeading() { + // Check if headers is available + if len(t.headers) < 1 { + return + } + + // Identify last column + end := len(t.cs) - 1 + + // Get pad function + padFunc := pad(t.hAlign) + + // Checking for ANSI escape sequences for header + is_esc_seq := false + if len(t.headerParams) > 0 { + is_esc_seq = true + } + + // Maximum height. + max := t.rs[headerRowIdx] + + // Print Heading + for x := 0; x < max; x++ { + // Check if border is set + // Replace with space if not set + if !t.noWhiteSpace { + fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) + } + + for y := 0; y <= end; y++ { + v := t.cs[y] + h := "" + + if y < len(t.headers) && x < len(t.headers[y]) { + h = t.headers[y][x] + } + if t.autoFmt { + h = Title(h) + } + pad := ConditionString((y == end && !t.borders.Left), SPACE, t.pColumn) + if t.noWhiteSpace { + pad = ConditionString((y == end && !t.borders.Left), SPACE, t.tablePadding) + } + if is_esc_seq { + if !t.noWhiteSpace { + fmt.Fprintf(t.out, " %s %s", + format(padFunc(h, SPACE, v), + t.headerParams[y]), pad) + } else { + fmt.Fprintf(t.out, "%s %s", + format(padFunc(h, SPACE, v), + t.headerParams[y]), pad) + } + } else { + if !t.noWhiteSpace { + fmt.Fprintf(t.out, " %s %s", + padFunc(h, SPACE, v), + pad) + } else { + // the spaces between breaks the kube formatting + fmt.Fprintf(t.out, "%s%s", + padFunc(h, SPACE, v), + pad) + } + } + } + // Next line + fmt.Fprint(t.out, t.newLine) + } + if t.hdrLine { + t.printLine(true) + } +} + +// Print heading information +func (t *Table) printFooter() { + // Check if headers is available + if len(t.footers) < 1 { + return + } + + // Only print line if border is not set + if !t.borders.Bottom { + t.printLine(true) + } + + // Identify last column + end := len(t.cs) - 1 + + // Get pad function + padFunc := pad(t.fAlign) + + // Checking for ANSI escape sequences for header + is_esc_seq := false + if len(t.footerParams) > 0 { + is_esc_seq = true + } + + // Maximum height. + max := t.rs[footerRowIdx] + + // Print Footer + erasePad := make([]bool, len(t.footers)) + for x := 0; x < max; x++ { + // Check if border is set + // Replace with space if not set + fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE)) + + for y := 0; y <= end; y++ { + v := t.cs[y] + f := "" + if y < len(t.footers) && x < len(t.footers[y]) { + f = t.footers[y][x] + } + if t.autoFmt { + f = Title(f) + } + pad := ConditionString((y == end && !t.borders.Top), SPACE, t.pColumn) + + if erasePad[y] || (x == 0 && len(f) == 0) { + pad = SPACE + erasePad[y] = true + } + + if is_esc_seq { + fmt.Fprintf(t.out, " %s %s", + format(padFunc(f, SPACE, v), + t.footerParams[y]), pad) + } else { + fmt.Fprintf(t.out, " %s %s", + padFunc(f, SPACE, v), + pad) + } + + //fmt.Fprintf(t.out, " %s %s", + // padFunc(f, SPACE, v), + // pad) + } + // Next line + fmt.Fprint(t.out, t.newLine) + //t.printLine(true) + } + + hasPrinted := false + + for i := 0; i <= end; i++ { + v := t.cs[i] + pad := t.pRow + center := t.pCenter + length := len(t.footers[i][0]) + + if length > 0 { + hasPrinted = true + } + + // Set center to be space if length is 0 + if length == 0 && !t.borders.Right { + center = SPACE + } + + // Print first junction + if i == 0 { + if length > 0 && !t.borders.Left { + center = t.pRow + } + fmt.Fprint(t.out, center) + } + + // Pad With space of length is 0 + if length == 0 { + pad = SPACE + } + // Ignore left space as it has printed before + if hasPrinted || t.borders.Left { + pad = t.pRow + center = t.pCenter + } + + // Change Center end position + if center != SPACE { + if i == end && !t.borders.Right { + center = t.pRow + } + } + + // Change Center start position + if center == SPACE { + if i < end && len(t.footers[i+1][0]) != 0 { + if !t.borders.Left { + center = t.pRow + } else { + center = t.pCenter + } + } + } + + // Print the footer + fmt.Fprintf(t.out, "%s%s%s%s", + pad, + strings.Repeat(string(pad), v), + pad, + center) + + } + + fmt.Fprint(t.out, t.newLine) +} + +// Print caption text +func (t Table) printCaption() { + width := t.getTableWidth() + paragraph, _ := WrapString(t.captionText, width) + for linecount := 0; linecount < len(paragraph); linecount++ { + fmt.Fprintln(t.out, paragraph[linecount]) + } +} + +// Calculate the total number of characters in a row +func (t Table) getTableWidth() int { + var chars int + for _, v := range t.cs { + chars += v + } + + // Add chars, spaces, seperators to calculate the total width of the table. + // ncols := t.colSize + // spaces := ncols * 2 + // seps := ncols + 1 + + return (chars + (3 * t.colSize) + 2) +} + +func (t Table) printRows() { + for i, lines := range t.lines { + t.printRow(lines, i) + } +} + +func (t *Table) fillAlignment(num int) { + if len(t.columnsAlign) < num { + t.columnsAlign = make([]int, num) + for i := range t.columnsAlign { + t.columnsAlign[i] = t.align + } + } +} + +// Print Row Information +// Adjust column alignment based on type + +func (t *Table) printRow(columns [][]string, rowIdx int) { + // Get Maximum Height + max := t.rs[rowIdx] + total := len(columns) + + // TODO Fix uneven col size + // if total < t.colSize { + // for n := t.colSize - total; n < t.colSize ; n++ { + // columns = append(columns, []string{SPACE}) + // t.cs[n] = t.mW + // } + //} + + // Pad Each Height + pads := []int{} + + // Checking for ANSI escape sequences for columns + is_esc_seq := false + if len(t.columnsParams) > 0 { + is_esc_seq = true + } + t.fillAlignment(total) + + for i, line := range columns { + length := len(line) + pad := max - length + pads = append(pads, pad) + for n := 0; n < pad; n++ { + columns[i] = append(columns[i], " ") + } + } + //fmt.Println(max, "\n") + for x := 0; x < max; x++ { + for y := 0; y < total; y++ { + + // Check if border is set + if !t.noWhiteSpace { + fmt.Fprint(t.out, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn)) + fmt.Fprintf(t.out, SPACE) + } + + str := columns[y][x] + + // Embedding escape sequence with column value + if is_esc_seq { + str = format(str, t.columnsParams[y]) + } + + // This would print alignment + // Default alignment would use multiple configuration + switch t.columnsAlign[y] { + case ALIGN_CENTER: // + fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) + case ALIGN_RIGHT: + fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) + case ALIGN_LEFT: + fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) + default: + if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) { + fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) + } else { + fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) + + // TODO Custom alignment per column + //if max == 1 || pads[y] > 0 { + // fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) + //} else { + // fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) + //} + + } + } + if !t.noWhiteSpace { + fmt.Fprintf(t.out, SPACE) + } else { + fmt.Fprintf(t.out, t.tablePadding) + } + } + // Check if border is set + // Replace with space if not set + if !t.noWhiteSpace { + fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) + } + fmt.Fprint(t.out, t.newLine) + } + + if t.rowLine { + t.printLine(true) + } +} + +// Print the rows of the table and merge the cells that are identical +func (t *Table) printRowsMergeCells() { + var previousLine []string + var displayCellBorder []bool + var tmpWriter bytes.Buffer + for i, lines := range t.lines { + // We store the display of the current line in a tmp writer, as we need to know which border needs to be print above + previousLine, displayCellBorder = t.printRowMergeCells(&tmpWriter, lines, i, previousLine) + if i > 0 { //We don't need to print borders above first line + if t.rowLine { + t.printLineOptionalCellSeparators(true, displayCellBorder) + } + } + tmpWriter.WriteTo(t.out) + } + //Print the end of the table + if t.rowLine { + t.printLine(true) + } +} + +// Print Row Information to a writer and merge identical cells. +// Adjust column alignment based on type + +func (t *Table) printRowMergeCells(writer io.Writer, columns [][]string, rowIdx int, previousLine []string) ([]string, []bool) { + // Get Maximum Height + max := t.rs[rowIdx] + total := len(columns) + + // Pad Each Height + pads := []int{} + + // Checking for ANSI escape sequences for columns + is_esc_seq := false + if len(t.columnsParams) > 0 { + is_esc_seq = true + } + for i, line := range columns { + length := len(line) + pad := max - length + pads = append(pads, pad) + for n := 0; n < pad; n++ { + columns[i] = append(columns[i], " ") + } + } + + var displayCellBorder []bool + t.fillAlignment(total) + for x := 0; x < max; x++ { + for y := 0; y < total; y++ { + + // Check if border is set + fmt.Fprint(writer, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn)) + + fmt.Fprintf(writer, SPACE) + + str := columns[y][x] + + // Embedding escape sequence with column value + if is_esc_seq { + str = format(str, t.columnsParams[y]) + } + + if t.autoMergeCells { + var mergeCell bool + if t.columnsToAutoMergeCells != nil { + // Check to see if the column index is in columnsToAutoMergeCells. + if t.columnsToAutoMergeCells[y] { + mergeCell = true + } + } else { + // columnsToAutoMergeCells was not set. + mergeCell = true + } + //Store the full line to merge mutli-lines cells + fullLine := strings.TrimRight(strings.Join(columns[y], " "), " ") + if len(previousLine) > y && fullLine == previousLine[y] && fullLine != "" && mergeCell { + // If this cell is identical to the one above but not empty, we don't display the border and keep the cell empty. + displayCellBorder = append(displayCellBorder, false) + str = "" + } else { + // First line or different content, keep the content and print the cell border + displayCellBorder = append(displayCellBorder, true) + } + } + + // This would print alignment + // Default alignment would use multiple configuration + switch t.columnsAlign[y] { + case ALIGN_CENTER: // + fmt.Fprintf(writer, "%s", Pad(str, SPACE, t.cs[y])) + case ALIGN_RIGHT: + fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y])) + case ALIGN_LEFT: + fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y])) + default: + if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) { + fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y])) + } else { + fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y])) + } + } + fmt.Fprintf(writer, SPACE) + } + // Check if border is set + // Replace with space if not set + fmt.Fprint(writer, ConditionString(t.borders.Left, t.pColumn, SPACE)) + fmt.Fprint(writer, t.newLine) + } + + //The new previous line is the current one + previousLine = make([]string, total) + for y := 0; y < total; y++ { + previousLine[y] = strings.TrimRight(strings.Join(columns[y], " "), " ") //Store the full line for multi-lines cells + } + //Returns the newly added line and wether or not a border should be displayed above. + return previousLine, displayCellBorder +} + +func (t *Table) parseDimension(str string, colKey, rowKey int) []string { + var ( + raw []string + maxWidth int + ) + + raw = getLines(str) + maxWidth = 0 + for _, line := range raw { + if w := DisplayWidth(line); w > maxWidth { + maxWidth = w + } + } + + // If wrapping, ensure that all paragraphs in the cell fit in the + // specified width. + if t.autoWrap { + // If there's a maximum allowed width for wrapping, use that. + if maxWidth > t.mW { + maxWidth = t.mW + } + + // In the process of doing so, we need to recompute maxWidth. This + // is because perhaps a word in the cell is longer than the + // allowed maximum width in t.mW. + newMaxWidth := maxWidth + newRaw := make([]string, 0, len(raw)) + + if t.reflowText { + // Make a single paragraph of everything. + raw = []string{strings.Join(raw, " ")} + } + for i, para := range raw { + paraLines, _ := WrapString(para, maxWidth) + for _, line := range paraLines { + if w := DisplayWidth(line); w > newMaxWidth { + newMaxWidth = w + } + } + if i > 0 { + newRaw = append(newRaw, " ") + } + newRaw = append(newRaw, paraLines...) + } + raw = newRaw + maxWidth = newMaxWidth + } + + // Store the new known maximum width. + v, ok := t.cs[colKey] + if !ok || v < maxWidth || v == 0 { + t.cs[colKey] = maxWidth + } + + // Remember the number of lines for the row printer. + h := len(raw) + v, ok = t.rs[rowKey] + + if !ok || v < h || v == 0 { + t.rs[rowKey] = h + } + //fmt.Printf("Raw %+v %d\n", raw, len(raw)) + return raw +} diff --git a/vendor/github.com/olekukonko/tablewriter/table_with_color.go b/vendor/github.com/olekukonko/tablewriter/table_with_color.go new file mode 100644 index 000000000..ae7a364ae --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/table_with_color.go @@ -0,0 +1,136 @@ +package tablewriter + +import ( + "fmt" + "strconv" + "strings" +) + +const ESC = "\033" +const SEP = ";" + +const ( + BgBlackColor int = iota + 40 + BgRedColor + BgGreenColor + BgYellowColor + BgBlueColor + BgMagentaColor + BgCyanColor + BgWhiteColor +) + +const ( + FgBlackColor int = iota + 30 + FgRedColor + FgGreenColor + FgYellowColor + FgBlueColor + FgMagentaColor + FgCyanColor + FgWhiteColor +) + +const ( + BgHiBlackColor int = iota + 100 + BgHiRedColor + BgHiGreenColor + BgHiYellowColor + BgHiBlueColor + BgHiMagentaColor + BgHiCyanColor + BgHiWhiteColor +) + +const ( + FgHiBlackColor int = iota + 90 + FgHiRedColor + FgHiGreenColor + FgHiYellowColor + FgHiBlueColor + FgHiMagentaColor + FgHiCyanColor + FgHiWhiteColor +) + +const ( + Normal = 0 + Bold = 1 + UnderlineSingle = 4 + Italic +) + +type Colors []int + +func startFormat(seq string) string { + return fmt.Sprintf("%s[%sm", ESC, seq) +} + +func stopFormat() string { + return fmt.Sprintf("%s[%dm", ESC, Normal) +} + +// Making the SGR (Select Graphic Rendition) sequence. +func makeSequence(codes []int) string { + codesInString := []string{} + for _, code := range codes { + codesInString = append(codesInString, strconv.Itoa(code)) + } + return strings.Join(codesInString, SEP) +} + +// Adding ANSI escape sequences before and after string +func format(s string, codes interface{}) string { + var seq string + + switch v := codes.(type) { + + case string: + seq = v + case []int: + seq = makeSequence(v) + case Colors: + seq = makeSequence(v) + default: + return s + } + + if len(seq) == 0 { + return s + } + return startFormat(seq) + s + stopFormat() +} + +// Adding header colors (ANSI codes) +func (t *Table) SetHeaderColor(colors ...Colors) { + if t.colSize != len(colors) { + panic("Number of header colors must be equal to number of headers.") + } + for i := 0; i < len(colors); i++ { + t.headerParams = append(t.headerParams, makeSequence(colors[i])) + } +} + +// Adding column colors (ANSI codes) +func (t *Table) SetColumnColor(colors ...Colors) { + if t.colSize != len(colors) { + panic("Number of column colors must be equal to number of headers.") + } + for i := 0; i < len(colors); i++ { + t.columnsParams = append(t.columnsParams, makeSequence(colors[i])) + } +} + +// Adding column colors (ANSI codes) +func (t *Table) SetFooterColor(colors ...Colors) { + if len(t.footers) != len(colors) { + panic("Number of footer colors must be equal to number of footer.") + } + for i := 0; i < len(colors); i++ { + t.footerParams = append(t.footerParams, makeSequence(colors[i])) + } +} + +func Color(colors ...int) []int { + return colors +} diff --git a/vendor/github.com/olekukonko/tablewriter/util.go b/vendor/github.com/olekukonko/tablewriter/util.go new file mode 100644 index 000000000..380e7ab35 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/util.go @@ -0,0 +1,93 @@ +// Copyright 2014 Oleku Konko All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +// This module is a Table Writer API for the Go Programming Language. +// The protocols were written in pure Go and works on windows and unix systems + +package tablewriter + +import ( + "math" + "regexp" + "strings" + + "github.com/mattn/go-runewidth" +) + +var ansi = regexp.MustCompile("\033\\[(?:[0-9]{1,3}(?:;[0-9]{1,3})*)?[m|K]") + +func DisplayWidth(str string) int { + return runewidth.StringWidth(ansi.ReplaceAllLiteralString(str, "")) +} + +// Simple Condition for string +// Returns value based on condition +func ConditionString(cond bool, valid, inValid string) string { + if cond { + return valid + } + return inValid +} + +func isNumOrSpace(r rune) bool { + return ('0' <= r && r <= '9') || r == ' ' +} + +// Format Table Header +// Replace _ , . and spaces +func Title(name string) string { + origLen := len(name) + rs := []rune(name) + for i, r := range rs { + switch r { + case '_': + rs[i] = ' ' + case '.': + // ignore floating number 0.0 + if (i != 0 && !isNumOrSpace(rs[i-1])) || (i != len(rs)-1 && !isNumOrSpace(rs[i+1])) { + rs[i] = ' ' + } + } + } + name = string(rs) + name = strings.TrimSpace(name) + if len(name) == 0 && origLen > 0 { + // Keep at least one character. This is important to preserve + // empty lines in multi-line headers/footers. + name = " " + } + return strings.ToUpper(name) +} + +// Pad String +// Attempts to place string in the center +func Pad(s, pad string, width int) string { + gap := width - DisplayWidth(s) + if gap > 0 { + gapLeft := int(math.Ceil(float64(gap / 2))) + gapRight := gap - gapLeft + return strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight) + } + return s +} + +// Pad String Right position +// This would place string at the left side of the screen +func PadRight(s, pad string, width int) string { + gap := width - DisplayWidth(s) + if gap > 0 { + return s + strings.Repeat(string(pad), gap) + } + return s +} + +// Pad String Left position +// This would place string at the right side of the screen +func PadLeft(s, pad string, width int) string { + gap := width - DisplayWidth(s) + if gap > 0 { + return strings.Repeat(string(pad), gap) + s + } + return s +} diff --git a/vendor/github.com/olekukonko/tablewriter/wrap.go b/vendor/github.com/olekukonko/tablewriter/wrap.go new file mode 100644 index 000000000..a092ee1f7 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/wrap.go @@ -0,0 +1,99 @@ +// Copyright 2014 Oleku Konko All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +// This module is a Table Writer API for the Go Programming Language. +// The protocols were written in pure Go and works on windows and unix systems + +package tablewriter + +import ( + "math" + "strings" + + "github.com/mattn/go-runewidth" +) + +var ( + nl = "\n" + sp = " " +) + +const defaultPenalty = 1e5 + +// Wrap wraps s into a paragraph of lines of length lim, with minimal +// raggedness. +func WrapString(s string, lim int) ([]string, int) { + words := strings.Split(strings.Replace(s, nl, sp, -1), sp) + var lines []string + max := 0 + for _, v := range words { + max = runewidth.StringWidth(v) + if max > lim { + lim = max + } + } + for _, line := range WrapWords(words, 1, lim, defaultPenalty) { + lines = append(lines, strings.Join(line, sp)) + } + return lines, lim +} + +// WrapWords is the low-level line-breaking algorithm, useful if you need more +// control over the details of the text wrapping process. For most uses, +// WrapString will be sufficient and more convenient. +// +// WrapWords splits a list of words into lines with minimal "raggedness", +// treating each rune as one unit, accounting for spc units between adjacent +// words on each line, and attempting to limit lines to lim units. Raggedness +// is the total error over all lines, where error is the square of the +// difference of the length of the line and lim. Too-long lines (which only +// happen when a single word is longer than lim units) have pen penalty units +// added to the error. +func WrapWords(words []string, spc, lim, pen int) [][]string { + n := len(words) + + length := make([][]int, n) + for i := 0; i < n; i++ { + length[i] = make([]int, n) + length[i][i] = runewidth.StringWidth(words[i]) + for j := i + 1; j < n; j++ { + length[i][j] = length[i][j-1] + spc + runewidth.StringWidth(words[j]) + } + } + nbrk := make([]int, n) + cost := make([]int, n) + for i := range cost { + cost[i] = math.MaxInt32 + } + for i := n - 1; i >= 0; i-- { + if length[i][n-1] <= lim { + cost[i] = 0 + nbrk[i] = n + } else { + for j := i + 1; j < n; j++ { + d := lim - length[i][j-1] + c := d*d + cost[j] + if length[i][j-1] > lim { + c += pen // too-long lines get a worse penalty + } + if c < cost[i] { + cost[i] = c + nbrk[i] = j + } + } + } + } + var lines [][]string + i := 0 + for i < n { + lines = append(lines, words[i:nbrk[i]]) + i = nbrk[i] + } + return lines +} + +// getLines decomposes a multiline string into a slice of strings. +func getLines(s string) []string { + return strings.Split(s, nl) +} diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore index 565f0f732..c57100a59 100644 --- a/vendor/github.com/opentracing/opentracing-go/.gitignore +++ b/vendor/github.com/opentracing/opentracing-go/.gitignore @@ -1,13 +1 @@ -# IntelliJ project files -.idea/ -opentracing-go.iml -opentracing-go.ipr -opentracing-go.iws - -# Test results -*.cov -*.html -test.log - -# Build dir -build/ +coverage.txt diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml index 0538f1bfc..8d5b75e41 100644 --- a/vendor/github.com/opentracing/opentracing-go/.travis.yml +++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml @@ -1,14 +1,20 @@ language: go -go: - - 1.6 - - 1.7 - - 1.8 - - tip +matrix: + include: + - go: "1.11.x" + - go: "1.12.x" + - go: "tip" + env: + - LINT=true + - COVERAGE=true install: - - go get -d -t github.com/opentracing/opentracing-go/... - - go get -u github.com/golang/lint/... + - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi + - go get -u github.com/stretchr/testify/... + script: - - make test lint + - make test - go build ./... + - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi + - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md index 1fc9fdf7f..7c14febe1 100644 --- a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md +++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md @@ -1,14 +1,46 @@ Changes by Version ================== -1.1.0 (unreleased) +1.1.0 (2019-03-23) ------------------- -- Deprecate InitGlobalTracer() in favor of SetGlobalTracer() +Notable changes: +- The library is now released under Apache 2.0 license +- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159)) +- 'golang.org/x/net/context' is replaced with 'context' from the standard library +List of all changes: + +- Export StartSpanFromContextWithTracer (#214) +- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) +- Use Set() instead of Add() in HTTPHeadersCarrier (#191) +- Update license to Apache 2.0 (#181) +- Replace 'golang.org/x/net/context' with 'context' (#176) +- Port of Python opentracing/harness/api_check.py to Go (#146) +- Fix race condition in MockSpan.Context() (#170) +- Add PeerHostIPv4.SetString() (#155) +- Add a Noop log field type to log to allow for optional fields (#150) + + +1.0.2 (2017-04-26) +------------------- + +- Add more semantic tags (#139) + + +1.0.1 (2017-02-06) +------------------- + +- Correct spelling in comments +- Address race in nextMockID() (#123) +- log: avoid panic marshaling nil error (#131) +- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) +- Drop Go 1.5 that fails in Travis (#129) +- Add convenience methods Key() and Value() to log.Field +- Add convenience methods to log.Field (2 years, 6 months ago) 1.0.0 (2016-09-26) ------------------- -- This release implements OpenTracing Specification 1.0 (http://opentracing.io/spec) +- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec) diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE index 148509a40..f0027349e 100644 --- a/vendor/github.com/opentracing/opentracing-go/LICENSE +++ b/vendor/github.com/opentracing/opentracing-go/LICENSE @@ -1,21 +1,201 @@ -The MIT License (MIT) - -Copyright (c) 2016 The OpenTracing Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The OpenTracing Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile index 2f491f157..62abb63f5 100644 --- a/vendor/github.com/opentracing/opentracing-go/Makefile +++ b/vendor/github.com/opentracing/opentracing-go/Makefile @@ -1,26 +1,15 @@ -PACKAGES := . ./mocktracer/... ./ext/... - .DEFAULT_GOAL := test-and-lint -.PHONE: test-and-lint - +.PHONY: test-and-lint test-and-lint: test lint .PHONY: test test: - go test -v -cover ./... + go test -v -cover -race ./... +.PHONY: cover cover: - @rm -rf cover-all.out - $(foreach pkg, $(PACKAGES), $(MAKE) cover-pkg PKG=$(pkg) || true;) - @grep mode: cover.out > coverage.out - @cat cover-all.out >> coverage.out - go tool cover -html=coverage.out -o cover.html - @rm -rf cover.out cover-all.out coverage.out - -cover-pkg: - go test -coverprofile cover.out $(PKG) - @grep -v mode: cover.out >> cover-all.out + go test -v -coverprofile=coverage.txt -covermode=atomic -race ./... .PHONY: lint lint: @@ -29,4 +18,3 @@ lint: @# Run again with magic to exit non-zero if golint outputs anything. @! (golint ./... | read dummy) go vet ./... - diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md index 1fb77d227..6ef1d7c9d 100644 --- a/vendor/github.com/opentracing/opentracing-go/README.md +++ b/vendor/github.com/opentracing/opentracing-go/README.md @@ -1,4 +1,5 @@ [![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) # OpenTracing API for Go @@ -7,8 +8,8 @@ This package is a Go platform API for OpenTracing. ## Required Reading In order to understand the Go platform API, one must first be familiar with the -[OpenTracing project](http://opentracing.io) and -[terminology](http://opentracing.io/documentation/pages/spec.html) more specifically. +[OpenTracing project](https://opentracing.io) and +[terminology](https://opentracing.io/specification/) more specifically. ## API overview for those adding instrumentation @@ -26,7 +27,7 @@ The simplest starting point is `./default_tracer.go`. As early as possible, call import ".../some_tracing_impl" func main() { - opentracing.InitGlobalTracer( + opentracing.SetGlobalTracer( // tracing impl specific: some_tracing_impl.New(...), ) @@ -34,7 +35,7 @@ The simplest starting point is `./default_tracer.go`. As early as possible, call } ``` -##### Non-Singleton initialization +#### Non-Singleton initialization If you prefer direct control to singletons, manage ownership of the `opentracing.Tracer` implementation explicitly. @@ -133,6 +134,21 @@ reference. } ``` +#### Conditionally capture a field using `log.Noop` + +In some situations, you may want to dynamically decide whether or not +to log a field. For example, you may want to capture additional data, +such as a customer ID, in non-production environments: + +```go + func Customer(order *Order) log.Field { + if os.Getenv("ENVIRONMENT") == "dev" { + return log.String("customer", order.Customer.ID) + } + return log.Noop() + } +``` + #### Goroutine-safety The entire public API is goroutine-safe and does not require external @@ -145,3 +161,11 @@ Tracing system implementors may be able to reuse or copy-paste-modify the `basic ## API compatibility For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. + +## Tracer test suite + +A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. + +## Licensing + +[Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go index c67ab5eef..52e889582 100644 --- a/vendor/github.com/opentracing/opentracing-go/ext/tags.go +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go @@ -1,6 +1,6 @@ package ext -import opentracing "github.com/opentracing/opentracing-go" +import "github.com/opentracing/opentracing-go" // These constants define common tag names recommended for better portability across // tracing systems and languages/platforms. @@ -74,7 +74,7 @@ var ( PeerHostname = stringTagName("peer.hostname") // PeerHostIPv4 records IP v4 host address of the peer - PeerHostIPv4 = uint32TagName("peer.ipv4") + PeerHostIPv4 = ipv4Tag("peer.ipv4") // PeerHostIPv6 records IP v6 host address of the peer PeerHostIPv6 = stringTagName("peer.ipv6") @@ -196,3 +196,15 @@ type boolTagName string func (tag boolTagName) Set(span opentracing.Span, value bool) { span.SetTag(string(tag), value) } + +type ipv4Tag string + +// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility +func (tag ipv4Tag) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" +func (tag ipv4Tag) SetString(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go index 8c8e793ff..4f7066a92 100644 --- a/vendor/github.com/opentracing/opentracing-go/globaltracer.go +++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go @@ -1,7 +1,12 @@ package opentracing +type registeredTracer struct { + tracer Tracer + isRegistered bool +} + var ( - globalTracer Tracer = NoopTracer{} + globalTracer = registeredTracer{NoopTracer{}, false} ) // SetGlobalTracer sets the [singleton] opentracing.Tracer returned by @@ -11,22 +16,27 @@ var ( // Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` // (etc) globals are noops. func SetGlobalTracer(tracer Tracer) { - globalTracer = tracer + globalTracer = registeredTracer{tracer, true} } // GlobalTracer returns the global singleton `Tracer` implementation. // Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop // implementation that drops all data handed to it. func GlobalTracer() Tracer { - return globalTracer + return globalTracer.tracer } // StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. func StartSpan(operationName string, opts ...StartSpanOption) Span { - return globalTracer.StartSpan(operationName, opts...) + return globalTracer.tracer.StartSpan(operationName, opts...) } // InitGlobalTracer is deprecated. Please use SetGlobalTracer. func InitGlobalTracer(tracer Tracer) { SetGlobalTracer(tracer) } + +// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered +func IsGlobalTracerRegistered() bool { + return globalTracer.isRegistered +} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go index 222a65202..08c00c04e 100644 --- a/vendor/github.com/opentracing/opentracing-go/gocontext.go +++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -1,6 +1,6 @@ package opentracing -import "golang.org/x/net/context" +import "context" type contextKey struct{} @@ -41,17 +41,20 @@ func SpanFromContext(ctx context.Context) Span { // ... // } func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { - return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) + return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) } -// startSpanFromContextWithTracer is factored out for testing purposes. -func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { - var span Span +// StartSpanFromContextWithTracer starts and returns a span with `operationName` +// using a span found within the context as a ChildOfRef. If that doesn't exist +// it creates a root span. It also returns a context.Context object built +// around the returned span. +// +// It's behavior is identical to StartSpanFromContext except that it takes an explicit +// tracer as opposed to using the global tracer. +func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { if parentSpan := SpanFromContext(ctx); parentSpan != nil { opts = append(opts, ChildOf(parentSpan.Context())) - span = tracer.StartSpan(operationName, opts...) - } else { - span = tracer.StartSpan(operationName, opts...) } + span := tracer.StartSpan(operationName, opts...) return span, ContextWithSpan(ctx, span) } diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go index d2cd39a16..50feea341 100644 --- a/vendor/github.com/opentracing/opentracing-go/log/field.go +++ b/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -20,6 +20,7 @@ const ( errorType objectType lazyLoggerType + noopType ) // Field instances are constructed via LogBool, LogString, and so on. @@ -152,6 +153,25 @@ func Lazy(ll LazyLogger) Field { } } +// Noop creates a no-op log field that should be ignored by the tracer. +// It can be used to capture optional fields, for example those that should +// only be logged in non-production environment: +// +// func customerField(order *Order) log.Field { +// if os.Getenv("ENVIRONMENT") == "dev" { +// return log.String("customer", order.Customer.ID) +// } +// return log.Noop() +// } +// +// span.LogFields(log.String("event", "purchase"), customerField(order)) +// +func Noop() Field { + return Field{ + fieldType: noopType, + } +} + // Encoder allows access to the contents of a Field (via a call to // Field.Marshal). // @@ -203,6 +223,8 @@ func (lf Field) Marshal(visitor Encoder) { visitor.EmitObject(lf.key, lf.interfaceVal) case lazyLoggerType: visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) + case noopType: + // intentionally left blank } } @@ -234,6 +256,8 @@ func (lf Field) Value() interface{} { return math.Float64frombits(uint64(lf.numericVal)) case errorType, objectType, lazyLoggerType: return lf.interfaceVal + case noopType: + return nil default: return nil } diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go index 9583fc53a..b0c275eb0 100644 --- a/vendor/github.com/opentracing/opentracing-go/propagation.go +++ b/vendor/github.com/opentracing/opentracing-go/propagation.go @@ -72,18 +72,18 @@ const ( // // For Tracer.Extract(): the carrier must be a `TextMapReader`. // - // See HTTPHeaderCarrier for an implementation of both TextMapWriter + // See HTTPHeadersCarrier for an implementation of both TextMapWriter // and TextMapReader that defers to an http.Header instance for storage. // For example, Inject(): // // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) // err := span.Tracer().Inject( - // span, opentracing.HTTPHeaders, carrier) + // span.Context(), opentracing.HTTPHeaders, carrier) // // Or Extract(): // // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // span, err := tracer.Extract( + // clientContext, err := tracer.Extract( // opentracing.HTTPHeaders, carrier) // HTTPHeaders @@ -144,15 +144,15 @@ func (c TextMapCarrier) Set(key, val string) { // // Example usage for server side: // -// carrier := opentracing.HttpHeadersCarrier(httpReq.Header) -// spanContext, err := tracer.Extract(opentracing.HttpHeaders, carrier) +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) // // Example usage for client side: // // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) // err := tracer.Inject( // span.Context(), -// opentracing.HttpHeaders, +// opentracing.HTTPHeaders, // carrier) // type HTTPHeadersCarrier http.Header @@ -160,7 +160,7 @@ type HTTPHeadersCarrier http.Header // Set conforms to the TextMapWriter interface. func (c HTTPHeadersCarrier) Set(key, val string) { h := http.Header(c) - h.Add(key, val) + h.Set(key, val) } // ForeachKey conforms to the TextMapReader interface. diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go index f6c3234ac..0d3fb5341 100644 --- a/vendor/github.com/opentracing/opentracing-go/span.go +++ b/vendor/github.com/opentracing/opentracing-go/span.go @@ -41,6 +41,8 @@ type Span interface { Context() SpanContext // Sets or changes the operation name. + // + // Returns a reference to this Span for chaining. SetOperationName(operationName string) Span // Adds a tag to the span. @@ -51,6 +53,8 @@ type Span interface { // other tag value types is undefined at the OpenTracing level. If a // tracing system does not know how to handle a particular value type, it // may ignore the tag, but shall not panic. + // + // Returns a reference to this Span for chaining. SetTag(key string, value interface{}) Span // LogFields is an efficient and type-checked way to record key:value diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go index fd77c1df3..715f0cedf 100644 --- a/vendor/github.com/opentracing/opentracing-go/tracer.go +++ b/vendor/github.com/opentracing/opentracing-go/tracer.go @@ -30,7 +30,7 @@ type Tracer interface { // sp := tracer.StartSpan( // "GetFeed", // opentracing.ChildOf(parentSpan.Context()), - // opentracing.Tag("user_agent", loggedReq.UserAgent), + // opentracing.Tag{"user_agent", loggedReq.UserAgent}, // opentracing.StartTime(loggedReq.Timestamp), // ) // @@ -44,8 +44,7 @@ type Tracer interface { // and each has an expected carrier type. // // Other packages may declare their own `format` values, much like the keys - // used by `context.Context` (see - // https://godoc.org/golang.org/x/net/context#WithValue). + // used by `context.Context` (see https://godoc.org/context#WithValue). // // Example usage (sans error handling): // diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore new file mode 100644 index 000000000..7b5883475 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.dockerignore @@ -0,0 +1,2 @@ +cmd/tomll/tomll +cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore new file mode 100644 index 000000000..e6ba63a5c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.gitignore @@ -0,0 +1,5 @@ +test_program/test_program_bin +fuzz/ +cmd/tomll/tomll +cmd/tomljson/tomljson +cmd/tomltestgen/tomltestgen diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md new file mode 100644 index 000000000..98b9893d3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md @@ -0,0 +1,132 @@ +## Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use TOML +implementation for Go that gets the job done and gets out of your way – +dealing with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or +small, is more than welcomed! + +### Ask questions + +Any question you may have, somebody else might have it too. Always feel +free to ask them on the [issues tracker][issues-tracker]. We will try to +answer them as clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and +ask away! + +### Improve the documentation + +The best way to share your knowledge and experience with go-toml is to +improve the documentation. Fix a typo, clarify an interface, add an +example, anything goes! + +The documentation is present in the [README][readme] and thorough the +source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a +change to the documentation, create a pull request with your proposed +changes. For simple changes like that, the easiest way to go is probably +the "Fork this project and edit the file" button on Github, displayed at +the top right of the file. Unless it's a trivial change (for example a +typo), provide a little bit of context in your pull request description or +commit message. + +### Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and +fix by reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on +what to include. When in doubt: add more details! By reducing ambiguity and +providing more information, it decreases back and forth and saves everyone +time. + +### Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +* A short proposal with some POC code is better than a lengthy piece of + text with no code. Code speaks louder than words. +* No backward-incompatible patch will be accepted unless discussed. + Sometimes it's hard, and Go's lack of versioning by default does not + help, but we try not to break people's programs unless we absolutely have + to. +* If you are writing a new feature or extending an existing one, make sure + to write some documentation. +* Bug fixes need to be accompanied with regression tests. +* New code needs to be tested. +* Your commit messages need to explain why the change is needed, even if + already included in the PR description. + +It does sound like a lot, but those best practices are here to save time +overall and continuously improve the quality of the project, which is +something everyone benefits from. + +#### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. You're in! + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +#### Run the tests + +You can run tests for go-toml using Go's test tool: `go test ./...`. +When creating a pull requests, all tests will be ran on Linux on a few Go +versions (Travis CI), and on Windows using the latest Go version +(AppVeyor). + +#### Style + +Try to look around and follow the same format and structure as the rest of +the code. We enforce using `go fmt` on the whole code base. + +--- + +### Maintainers-only + +#### Merge pull request + +Checklist: + +* Passing CI. +* Does not introduce backward-incompatible changes (unless discussed). +* Has relevant doc changes. +* Has relevant unit tests. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +#### New release + +1. Go to [releases][releases]. Click on "X commits to master since this + release". +2. Make note of all the changes. Look for backward incompatible changes, + new features, and bug fixes. +3. Pick the new version using the above and semver. +4. Create a [new release][new-release]. +5. Follow the same format as [1.1.0][release-110]. + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[releases]: https://github.com/pelletier/go-toml/releases +[new-release]: https://github.com/pelletier/go-toml/releases/new +[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile new file mode 100644 index 000000000..fffdb0166 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/Dockerfile @@ -0,0 +1,11 @@ +FROM golang:1.12-alpine3.9 as builder +WORKDIR /go/src/github.com/pelletier/go-toml +COPY . . +ENV CGO_ENABLED=0 +ENV GOOS=linux +RUN go install ./... + +FROM scratch +COPY --from=builder /go/bin/tomll /usr/bin/tomll +COPY --from=builder /go/bin/tomljson /usr/bin/tomljson +COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE new file mode 100644 index 000000000..f414553c2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/LICENSE @@ -0,0 +1,247 @@ +The bulk of github.com/pelletier/go-toml is distributed under the MIT license +(see below), with the exception of localtime.go and localtime.test.go. +Those two files have been copied over from Google's civil library at revision +ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache +2.0 license (see below). + + +github.com/pelletier/go-toml: + + +The MIT License (MIT) + +Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +localtime.go, localtime_test.go: + +Originals: + https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go + https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go +Changes: + * Renamed files from civil* to localtime*. + * Package changed from civil to toml. + * 'Local' prefix added to all structs. +License: + https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile new file mode 100644 index 000000000..9e4503aea --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/Makefile @@ -0,0 +1,29 @@ +export CGO_ENABLED=0 +go := go +go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) +go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) + +out.tools := tomll tomljson jsontoml +out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) +sources := $(wildcard **/*.go) + + +.PHONY: +tools: $(out.tools) + +$(out.tools): $(sources) + GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ + +.PHONY: +dist: $(out.dist) + +$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % + if [ "$(go.goos)" = "windows" ]; then \ + tar -cJf $@ $^.exe; \ + else \ + tar -cJf $@ $^; \ + fi + +.PHONY: +clean: + rm -rf $(out.tools) $(out.dist) diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..041cdc4a2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,5 @@ +**Issue:** add link to pelletier/go-toml issue here + +Explanation of what this pull request does. + +More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md new file mode 100644 index 000000000..6c061712b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -0,0 +1,176 @@ +# go-toml + +Go library for the [TOML](https://toml.io/) format. + +This library supports TOML version +[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3) + +[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml) +[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) +[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) +[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) +[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) + + +## Development status + +**ℹ️ Consider go-toml v2!** + +The next version of go-toml is in [active development][v2-dev], and +[nearing completion][v2-map]. + +Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs], +and [much faster][v2-bench]. If you only need reading and writing TOML documents +(majority of cases), those features are implemented and the API unlikely to +change. + +The remaining features (Document structure editing and tooling) will be added +shortly. While pull-requests are welcome on v1, no active development is +expected on it. When v2.0.0 is released, v1 will be deprecated. + +👉 [go-toml v2][v2] + +[v2]: https://github.com/pelletier/go-toml/tree/v2 +[v2-map]: https://github.com/pelletier/go-toml/discussions/506 +[v2-dev]: https://github.com/pelletier/go-toml/tree/v2 +[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed +[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks + +## Features + +Go-toml provides the following features for using data parsed from TOML documents: + +* Load TOML documents from files and string data +* Easily navigate TOML structure using Tree +* Marshaling and unmarshaling to and from data structures +* Line & column position data for all parsed elements +* [Query support similar to JSON-Path](query/) +* Syntax errors contain line and column numbers + +## Import + +```go +import "github.com/pelletier/go-toml" +``` + +## Usage example + +Read a TOML document: + +```go +config, _ := toml.Load(` +[postgres] +user = "pelletier" +password = "mypassword"`) +// retrieve data directly +user := config.Get("postgres.user").(string) + +// or using an intermediate object +postgresConfig := config.Get("postgres").(*toml.Tree) +password := postgresConfig.Get("password").(string) +``` + +Or use Unmarshal: + +```go +type Postgres struct { + User string + Password string +} +type Config struct { + Postgres Postgres +} + +doc := []byte(` +[Postgres] +User = "pelletier" +Password = "mypassword"`) + +config := Config{} +toml.Unmarshal(doc, &config) +fmt.Println("user=", config.Postgres.User) +``` + +Or use a query: + +```go +// use a query to gather elements without walking the tree +q, _ := query.Compile("$..[user,password]") +results := q.Execute(config) +for ii, item := range results.Values() { + fmt.Printf("Query result %d: %v\n", ii, item) +} +``` + +## Documentation + +The documentation and additional examples are available at +[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml). + +## Tools + +Go-toml provides three handy command line tools: + +* `tomll`: Reads TOML files and lints them. + + ``` + go install github.com/pelletier/go-toml/cmd/tomll + tomll --help + ``` +* `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + go install github.com/pelletier/go-toml/cmd/tomljson + tomljson --help + ``` + + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + go install github.com/pelletier/go-toml/cmd/jsontoml + jsontoml --help + ``` + +### Docker image + +Those tools are also available as a Docker image from +[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to +use `tomljson`: + +``` +docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml +``` + +Only master (`latest`) and tagged versions are published to dockerhub. You +can build your own image as usual: + +``` +docker build -t go-toml . +``` + +## Contribute + +Feel free to report bugs and patches using GitHub's pull requests system on +[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be +much appreciated! + +### Run tests + +`go test ./...` + +### Fuzzing + +The script `./fuzz.sh` is available to +run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. + +## Versioning + +Go-toml follows [Semantic Versioning](http://semver.org/). The supported version +of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of +this document. The last two major versions of Go are supported +(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). + +## License + +The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml new file mode 100644 index 000000000..4af198b4d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml @@ -0,0 +1,188 @@ +trigger: +- master + +stages: +- stage: run_checks + displayName: "Check" + dependsOn: [] + jobs: + - job: fmt + displayName: "fmt" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.16" + inputs: + version: "1.16" + - task: Go@0 + displayName: "go fmt ./..." + inputs: + command: 'custom' + customCommand: 'fmt' + arguments: './...' + - job: coverage + displayName: "coverage" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.16" + inputs: + version: "1.16" + - task: Go@0 + displayName: "Generate coverage" + inputs: + command: 'test' + arguments: "-race -coverprofile=coverage.txt -covermode=atomic" + - task: Bash@3 + inputs: + targetType: 'inline' + script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' + env: + CODECOV_TOKEN: $(CODECOV_TOKEN) + - job: benchmark + displayName: "benchmark" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.16" + inputs: + version: "1.16" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - task: Bash@3 + inputs: + filePath: './benchmark.sh' + arguments: "master $(Build.Repository.Uri)" + + - job: go_unit_tests + displayName: "unit tests" + strategy: + matrix: + linux 1.16: + goVersion: '1.16' + imageName: 'ubuntu-latest' + mac 1.16: + goVersion: '1.16' + imageName: 'macOS-latest' + windows 1.16: + goVersion: '1.16' + imageName: 'windows-latest' + linux 1.15: + goVersion: '1.15' + imageName: 'ubuntu-latest' + mac 1.15: + goVersion: '1.15' + imageName: 'macOS-latest' + windows 1.15: + goVersion: '1.15' + imageName: 'windows-latest' + pool: + vmImage: $(imageName) + steps: + - task: GoTool@0 + displayName: "Install Go $(goVersion)" + inputs: + version: $(goVersion) + - task: Go@0 + displayName: "go test ./..." + inputs: + command: 'test' + arguments: './...' +- stage: build_binaries + displayName: "Build binaries" + dependsOn: run_checks + jobs: + - job: build_binary + displayName: "Build binary" + strategy: + matrix: + linux_amd64: + GOOS: linux + GOARCH: amd64 + darwin_amd64: + GOOS: darwin + GOARCH: amd64 + windows_amd64: + GOOS: windows + GOARCH: amd64 + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go" + inputs: + version: 1.16 + - task: Bash@3 + inputs: + targetType: inline + script: "make dist" + env: + go.goos: $(GOOS) + go.goarch: $(GOARCH) + - task: CopyFiles@2 + inputs: + sourceFolder: '$(Build.SourcesDirectory)' + contents: '*.tar.xz' + TargetFolder: '$(Build.ArtifactStagingDirectory)' + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: binaries +- stage: build_binaries_manifest + displayName: "Build binaries manifest" + dependsOn: build_binaries + jobs: + - job: build_manifest + displayName: "Build binaries manifest" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: 'current' + downloadType: 'single' + artifactName: 'binaries' + downloadPath: '$(Build.SourcesDirectory)' + - task: Bash@3 + inputs: + targetType: inline + script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: manifest + +- stage: build_docker_image + displayName: "Build Docker image" + dependsOn: run_checks + jobs: + - job: build + displayName: "Build" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + command: 'build' + Dockerfile: 'Dockerfile' + buildContext: '.' + addPipelineData: false + +- stage: publish_docker_image + displayName: "Publish Docker image" + dependsOn: build_docker_image + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: publish + displayName: "Publish" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + containerRegistry: 'DockerHub' + repository: 'pelletier/go-toml' + command: 'buildAndPush' + Dockerfile: 'Dockerfile' + buildContext: '.' + tags: 'latest' diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh new file mode 100644 index 000000000..a69d3040f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -ex + +reference_ref=${1:-master} +reference_git=${2:-.} + +if ! `hash benchstat 2>/dev/null`; then + echo "Installing benchstat" + go get golang.org/x/perf/cmd/benchstat +fi + +tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` +ref_tempdir="${tempdir}/ref" +ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" +local_benchmark="`pwd`/benchmark-local.txt" + +echo "=== ${reference_ref} (${ref_tempdir})" +git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null +pushd ${ref_tempdir} >/dev/null +git checkout ${reference_ref} >/dev/null 2>/dev/null +go test -bench=. -benchmem | tee ${ref_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${ref_benchmark} +popd >/dev/null + +echo "" +echo "=== local" +go test -bench=. -benchmem | tee ${local_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${local_benchmark} + +echo "" +echo "=== diff" +benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go new file mode 100644 index 000000000..a1406a32b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -0,0 +1,23 @@ +// Package toml is a TOML parser and manipulation library. +// +// This version supports the specification as described in +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md +// +// Marshaling +// +// Go-toml can marshal and unmarshal TOML documents from and to data +// structures. +// +// TOML document as a tree +// +// Go-toml can operate on a TOML document as a tree. Use one of the Load* +// functions to parse TOML data and obtain a Tree instance, then one of its +// methods to manipulate the tree. +// +// JSONPath-like queries +// +// The package github.com/pelletier/go-toml/query implements a system +// similar to JSONPath to quickly retrieve elements of a TOML document using a +// single expression. See the package documentation for more information. +// +package toml diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml new file mode 100644 index 000000000..780d9c68f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml @@ -0,0 +1,30 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml new file mode 100644 index 000000000..f45bf88b8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example.toml @@ -0,0 +1,30 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go new file mode 100644 index 000000000..14570c8d3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package toml + +func Fuzz(data []byte) int { + tree, err := LoadBytes(data) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + str, err := tree.ToTomlString() + if err != nil { + if str != "" { + panic(`str must be "" if there is an error`) + } + panic(err) + } + + tree, err = Load(str) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + return 1 +} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh new file mode 100644 index 000000000..3204b4c44 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.sh @@ -0,0 +1,15 @@ +#! /bin/sh +set -eu + +go get github.com/dvyukov/go-fuzz/go-fuzz +go get github.com/dvyukov/go-fuzz/go-fuzz-build + +if [ ! -e toml-fuzz.zip ]; then + go-fuzz-build github.com/pelletier/go-toml +fi + +rm -fr fuzz +mkdir -p fuzz/corpus +cp *.toml fuzz/corpus + +go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/go.mod b/vendor/github.com/pelletier/go-toml/go.mod new file mode 100644 index 000000000..7d29a0a66 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/go.mod @@ -0,0 +1,3 @@ +module github.com/pelletier/go-toml + +go 1.12 diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go new file mode 100644 index 000000000..e091500b2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -0,0 +1,112 @@ +// Parsing keys handling both bare and quoted keys. + +package toml + +import ( + "errors" + "fmt" +) + +// Convert the bare key group string to an array. +// The input supports double quotation and single quotation, +// but escape sequences are not supported. Lexers must unescape them beforehand. +func parseKey(key string) ([]string, error) { + runes := []rune(key) + var groups []string + + if len(key) == 0 { + return nil, errors.New("empty key") + } + + idx := 0 + for idx < len(runes) { + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip leading whitespace + } + if idx >= len(runes) { + break + } + r := runes[idx] + if isValidBareChar(r) { + // parse bare key + startIdx := idx + endIdx := -1 + idx++ + for idx < len(runes) { + r = runes[idx] + if isValidBareChar(r) { + idx++ + } else if r == '.' { + endIdx = idx + break + } else if isSpace(r) { + endIdx = idx + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip trailing whitespace + } + if idx < len(runes) && runes[idx] != '.' { + return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) + } + break + } else { + return nil, fmt.Errorf("invalid bare key character: %c", r) + } + } + if endIdx == -1 { + endIdx = idx + } + groups = append(groups, string(runes[startIdx:endIdx])) + } else if r == '\'' { + // parse single quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed single-quoted key") + } + r = runes[idx] + if r == '\'' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ + } + } else if r == '"' { + // parse double quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed double-quoted key") + } + r = runes[idx] + if r == '"' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ + } + } else if r == '.' { + idx++ + if idx >= len(runes) { + return nil, fmt.Errorf("unexpected end of key") + } + r = runes[idx] + if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { + return nil, fmt.Errorf("expecting key part after dot") + } + } else { + return nil, fmt.Errorf("invalid key character: %c", r) + } + } + if len(groups) == 0 { + return nil, fmt.Errorf("empty key") + } + return groups, nil +} + +func isValidBareChar(r rune) bool { + return isAlphanumeric(r) || r == '-' || isDigit(r) +} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go new file mode 100644 index 000000000..313908e3e --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -0,0 +1,1031 @@ +// TOML lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package toml + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +// Define state functions +type tomlLexStateFn func() tomlLexStateFn + +// Define lexer +type tomlLexer struct { + inputIdx int + input []rune // Textual source + currentTokenStart int + currentTokenStop int + tokens []token + brackets []rune + line int + col int + endbufferLine int + endbufferCol int +} + +// Basic read operations on input + +func (l *tomlLexer) read() rune { + r := l.peek() + if r == '\n' { + l.endbufferLine++ + l.endbufferCol = 1 + } else { + l.endbufferCol++ + } + l.inputIdx++ + return r +} + +func (l *tomlLexer) next() rune { + r := l.read() + + if r != eof { + l.currentTokenStop++ + } + return r +} + +func (l *tomlLexer) ignore() { + l.currentTokenStart = l.currentTokenStop + l.line = l.endbufferLine + l.col = l.endbufferCol +} + +func (l *tomlLexer) skip() { + l.next() + l.ignore() +} + +func (l *tomlLexer) fastForward(n int) { + for i := 0; i < n; i++ { + l.next() + } +} + +func (l *tomlLexer) emitWithValue(t tokenType, value string) { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: t, + val: value, + }) + l.ignore() +} + +func (l *tomlLexer) emit(t tokenType) { + l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) +} + +func (l *tomlLexer) peek() rune { + if l.inputIdx >= len(l.input) { + return eof + } + return l.input[l.inputIdx] +} + +func (l *tomlLexer) peekString(size int) string { + maxIdx := len(l.input) + upperIdx := l.inputIdx + size // FIXME: potential overflow + if upperIdx > maxIdx { + upperIdx = maxIdx + } + return string(l.input[l.inputIdx:upperIdx]) +} + +func (l *tomlLexer) follow(next string) bool { + return next == l.peekString(len(next)) +} + +// Error management + +func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + }) + return nil +} + +// State functions + +func (l *tomlLexer) lexVoid() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '}': // after '{' + return l.lexRightCurlyBrace + case '[': + return l.lexTableKey + case '#': + return l.lexComment(l.lexVoid) + case '=': + return l.lexEqual + case '\r': + fallthrough + case '\n': + l.skip() + continue + } + + if isSpace(next) { + l.skip() + } + + if isKeyStartChar(next) { + return l.lexKey + } + + if next == eof { + l.next() + break + } + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexRvalue() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '.': + return l.errorf("cannot start float with a dot") + case '=': + return l.lexEqual + case '[': + return l.lexLeftBracket + case ']': + return l.lexRightBracket + case '{': + return l.lexLeftCurlyBrace + case '}': + return l.lexRightCurlyBrace + case '#': + return l.lexComment(l.lexRvalue) + case '"': + return l.lexString + case '\'': + return l.lexLiteralString + case ',': + return l.lexComma + case '\r': + fallthrough + case '\n': + l.skip() + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { + return l.lexRvalue + } + return l.lexVoid + } + + if l.follow("true") { + return l.lexTrue + } + + if l.follow("false") { + return l.lexFalse + } + + if l.follow("inf") { + return l.lexInf + } + + if l.follow("nan") { + return l.lexNan + } + + if isSpace(next) { + l.skip() + continue + } + + if next == eof { + l.next() + break + } + + if next == '+' || next == '-' { + return l.lexNumber + } + + if isDigit(next) { + return l.lexDateTimeOrNumber + } + + return l.errorf("no value can start with %c", next) + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexDateTimeOrNumber() tomlLexStateFn { + // Could be either a date/time, or a digit. + // The options for date/times are: + // YYYY-... => date or date-time + // HH:... => time + // Anything else should be a number. + + lookAhead := l.peekString(5) + if len(lookAhead) < 3 { + return l.lexNumber() + } + + for idx, r := range lookAhead { + if !isDigit(r) { + if idx == 2 && r == ':' { + return l.lexDateTimeOrTime() + } + if idx == 4 && r == '-' { + return l.lexDateTimeOrTime() + } + return l.lexNumber() + } + } + return l.lexNumber() +} + +func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenLeftCurlyBrace) + l.brackets = append(l.brackets, '{') + return l.lexVoid +} + +func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenRightCurlyBrace) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { + return l.errorf("cannot have '}' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] + return l.lexRvalue +} + +func (l *tomlLexer) lexDateTimeOrTime() tomlLexStateFn { + // Example matches: + // 1979-05-27T07:32:00Z + // 1979-05-27T00:32:00-07:00 + // 1979-05-27T00:32:00.999999-07:00 + // 1979-05-27 07:32:00Z + // 1979-05-27 00:32:00-07:00 + // 1979-05-27 00:32:00.999999-07:00 + // 1979-05-27T07:32:00 + // 1979-05-27T00:32:00.999999 + // 1979-05-27 07:32:00 + // 1979-05-27 00:32:00.999999 + // 1979-05-27 + // 07:32:00 + // 00:32:00.999999 + + // we already know those two are digits + l.next() + l.next() + + // Got 2 digits. At that point it could be either a time or a date(-time). + + r := l.next() + if r == ':' { + return l.lexTime() + } + + return l.lexDateTime() +} + +func (l *tomlLexer) lexDateTime() tomlLexStateFn { + // This state accepts an offset date-time, a local date-time, or a local date. + // + // v--- cursor + // 1979-05-27T07:32:00Z + // 1979-05-27T00:32:00-07:00 + // 1979-05-27T00:32:00.999999-07:00 + // 1979-05-27 07:32:00Z + // 1979-05-27 00:32:00-07:00 + // 1979-05-27 00:32:00.999999-07:00 + // 1979-05-27T07:32:00 + // 1979-05-27T00:32:00.999999 + // 1979-05-27 07:32:00 + // 1979-05-27 00:32:00.999999 + // 1979-05-27 + + // date + + // already checked by lexRvalue + l.next() // digit + l.next() // - + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid month digit in date: %c", r) + } + } + + r := l.next() + if r != '-' { + return l.errorf("expected - to separate month of a date, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid day digit in date: %c", r) + } + } + + l.emit(tokenLocalDate) + + r = l.peek() + + if r == eof { + + return l.lexRvalue + } + + if r != ' ' && r != 'T' { + return l.errorf("incorrect date/time separation character: %c", r) + } + + if r == ' ' { + lookAhead := l.peekString(3)[1:] + if len(lookAhead) < 2 { + return l.lexRvalue + } + for _, r := range lookAhead { + if !isDigit(r) { + return l.lexRvalue + } + } + } + + l.skip() // skip the T or ' ' + + // time + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid hour digit in time: %c", r) + } + } + + r = l.next() + if r != ':' { + return l.errorf("time hour/minute separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid minute digit in time: %c", r) + } + } + + r = l.next() + if r != ':' { + return l.errorf("time minute/second separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid second digit in time: %c", r) + } + } + + r = l.peek() + if r == '.' { + l.next() + r := l.next() + if !isDigit(r) { + return l.errorf("expected at least one digit in time's fraction, not %c", r) + } + + for { + r := l.peek() + if !isDigit(r) { + break + } + l.next() + } + } + + l.emit(tokenLocalTime) + + return l.lexTimeOffset + +} + +func (l *tomlLexer) lexTimeOffset() tomlLexStateFn { + // potential offset + + // Z + // -07:00 + // +07:00 + // nothing + + r := l.peek() + + if r == 'Z' { + l.next() + l.emit(tokenTimeOffset) + } else if r == '+' || r == '-' { + l.next() + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid hour digit in time offset: %c", r) + } + } + + r = l.next() + if r != ':' { + return l.errorf("time offset hour/minute separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid minute digit in time offset: %c", r) + } + } + + l.emit(tokenTimeOffset) + } + + return l.lexRvalue +} + +func (l *tomlLexer) lexTime() tomlLexStateFn { + // v--- cursor + // 07:32:00 + // 00:32:00.999999 + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid minute digit in time: %c", r) + } + } + + r := l.next() + if r != ':' { + return l.errorf("time minute/second separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid second digit in time: %c", r) + } + } + + r = l.peek() + if r == '.' { + l.next() + r := l.next() + if !isDigit(r) { + return l.errorf("expected at least one digit in time's fraction, not %c", r) + } + + for { + r := l.peek() + if !isDigit(r) { + break + } + l.next() + } + } + + l.emit(tokenLocalTime) + return l.lexRvalue + +} + +func (l *tomlLexer) lexTrue() tomlLexStateFn { + l.fastForward(4) + l.emit(tokenTrue) + return l.lexRvalue +} + +func (l *tomlLexer) lexFalse() tomlLexStateFn { + l.fastForward(5) + l.emit(tokenFalse) + return l.lexRvalue +} + +func (l *tomlLexer) lexInf() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenInf) + return l.lexRvalue +} + +func (l *tomlLexer) lexNan() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenNan) + return l.lexRvalue +} + +func (l *tomlLexer) lexEqual() tomlLexStateFn { + l.next() + l.emit(tokenEqual) + return l.lexRvalue +} + +func (l *tomlLexer) lexComma() tomlLexStateFn { + l.next() + l.emit(tokenComma) + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { + return l.lexVoid + } + return l.lexRvalue +} + +// Parse the key and emits its value without escape sequences. +// bare keys, basic string keys and literal string keys are supported. +func (l *tomlLexer) lexKey() tomlLexStateFn { + var sb strings.Builder + + for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { + if r == '"' { + l.next() + str, err := l.lexStringAsString(`"`, false, true) + if err != nil { + return l.errorf(err.Error()) + } + sb.WriteString("\"") + sb.WriteString(str) + sb.WriteString("\"") + l.next() + continue + } else if r == '\'' { + l.next() + str, err := l.lexLiteralStringAsString(`'`, false) + if err != nil { + return l.errorf(err.Error()) + } + sb.WriteString("'") + sb.WriteString(str) + sb.WriteString("'") + l.next() + continue + } else if r == '\n' { + return l.errorf("keys cannot contain new lines") + } else if isSpace(r) { + var str strings.Builder + str.WriteString(" ") + + // skip trailing whitespace + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + // break loop if not a dot + if r != '.' { + break + } + str.WriteString(".") + // skip trailing whitespace after dot + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + sb.WriteString(str.String()) + continue + } else if r == '.' { + // skip + } else if !isValidBareChar(r) { + return l.errorf("keys cannot contain %c character", r) + } + sb.WriteRune(r) + l.next() + } + l.emitWithValue(tokenKey, sb.String()) + return l.lexVoid +} + +func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { + return func() tomlLexStateFn { + for next := l.peek(); next != '\n' && next != eof; next = l.peek() { + if next == '\r' && l.follow("\r\n") { + break + } + l.next() + } + l.ignore() + return previousState + } +} + +func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { + l.next() + l.emit(tokenLeftBracket) + l.brackets = append(l.brackets, '[') + return l.lexRvalue +} + +func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { + var sb strings.Builder + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + // find end of string + for { + if l.follow(terminator) { + return sb.String(), nil + } + + next := l.peek() + if next == eof { + break + } + sb.WriteRune(l.next()) + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexLiteralString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := "'" + discardLeadingNewLine := false + if l.follow("''") { + l.skip() + l.skip() + terminator = "'''" + discardLeadingNewLine = true + } + + str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +// Lex a string and return the results as a string. +// Terminator is the substring indicating the end of the token. +// The resulting string does not include the terminator. +func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { + var sb strings.Builder + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + for { + if l.follow(terminator) { + return sb.String(), nil + } + + if l.follow("\\") { + l.next() + switch l.peek() { + case '\r': + fallthrough + case '\n': + fallthrough + case '\t': + fallthrough + case ' ': + // skip all whitespace chars following backslash + for strings.ContainsRune("\r\n\t ", l.peek()) { + l.next() + } + case '"': + sb.WriteString("\"") + l.next() + case 'n': + sb.WriteString("\n") + l.next() + case 'b': + sb.WriteString("\b") + l.next() + case 'f': + sb.WriteString("\f") + l.next() + case '/': + sb.WriteString("/") + l.next() + case 't': + sb.WriteString("\t") + l.next() + case 'r': + sb.WriteString("\r") + l.next() + case '\\': + sb.WriteString("\\") + l.next() + case 'u': + l.next() + var code strings.Builder + for i := 0; i < 4; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code.WriteRune(c) + } + intcode, err := strconv.ParseInt(code.String(), 16, 32) + if err != nil { + return "", errors.New("invalid unicode escape: \\u" + code.String()) + } + sb.WriteRune(rune(intcode)) + case 'U': + l.next() + var code strings.Builder + for i := 0; i < 8; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code.WriteRune(c) + } + intcode, err := strconv.ParseInt(code.String(), 16, 64) + if err != nil { + return "", errors.New("invalid unicode escape: \\U" + code.String()) + } + sb.WriteRune(rune(intcode)) + default: + return "", errors.New("invalid escape sequence: \\" + string(l.peek())) + } + } else { + r := l.peek() + + if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { + return "", fmt.Errorf("unescaped control character %U", r) + } + l.next() + sb.WriteRune(r) + } + + if l.peek() == eof { + break + } + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := `"` + discardLeadingNewLine := false + acceptNewLines := false + if l.follow(`""`) { + l.skip() + l.skip() + terminator = `"""` + discardLeadingNewLine = true + acceptNewLines = true + } + + str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +func (l *tomlLexer) lexTableKey() tomlLexStateFn { + l.next() + + if l.peek() == '[' { + // token '[[' signifies an array of tables + l.next() + l.emit(tokenDoubleLeftBracket) + return l.lexInsideTableArrayKey + } + // vanilla table key + l.emit(tokenLeftBracket) + return l.lexInsideTableKey +} + +// Parse the key till "]]", but only bare keys are supported +func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroupArray) + } + l.next() + if l.peek() != ']' { + break + } + l.next() + l.emit(tokenDoubleRightBracket) + return l.lexVoid + case '[': + return l.errorf("table array key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table array key") +} + +// Parse the key till "]" but only bare keys are supported +func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroup) + } + l.next() + l.emit(tokenRightBracket) + return l.lexVoid + case '[': + return l.errorf("table key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table key") +} + +func (l *tomlLexer) lexRightBracket() tomlLexStateFn { + l.next() + l.emit(tokenRightBracket) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { + return l.errorf("cannot have ']' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] + return l.lexRvalue +} + +type validRuneFn func(r rune) bool + +func isValidHexRune(r rune) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r rune) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r rune) bool { + return r == '0' || r == '1' || r == '_' +} + +func (l *tomlLexer) lexNumber() tomlLexStateFn { + r := l.peek() + + if r == '0' { + follow := l.peekString(2) + if len(follow) == 2 { + var isValidRune validRuneFn + switch follow[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { + return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) + } + } + + if isValidRune != nil { + l.next() + l.next() + digitSeen := false + for { + next := l.peek() + if !isValidRune(next) { + break + } + digitSeen = true + l.next() + } + + if !digitSeen { + return l.errorf("number needs at least one digit") + } + + l.emit(tokenInteger) + + return l.lexRvalue + } + } + } + + if r == '+' || r == '-' { + l.next() + if l.follow("inf") { + return l.lexInf + } + if l.follow("nan") { + return l.lexNan + } + } + + pointSeen := false + expSeen := false + digitSeen := false + for { + next := l.peek() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + l.next() + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if next == 'e' || next == 'E' { + expSeen = true + l.next() + r := l.peek() + if r == '+' || r == '-' { + l.next() + } + } else if isDigit(next) { + digitSeen = true + l.next() + } else if next == '_' { + l.next() + } else { + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen || expSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexRvalue +} + +func (l *tomlLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } +} + +// Entry point +func lexToml(inputBytes []byte) []token { + runes := bytes.Runes(inputBytes) + l := &tomlLexer{ + input: runes, + tokens: make([]token, 0, 256), + line: 1, + col: 1, + endbufferLine: 1, + endbufferCol: 1, + } + l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go new file mode 100644 index 000000000..9dfe4b9e6 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/localtime.go @@ -0,0 +1,287 @@ +// Implementation of TOML's local date/time. +// +// Copied over from Google's civil to avoid pulling all the Google dependencies. +// Originals: +// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go +// Changes: +// * Renamed files from civil* to localtime*. +// * Package changed from civil to toml. +// * 'Local' prefix added to all structs. +// +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package toml + +import ( + "fmt" + "time" +) + +// A LocalDate represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type LocalDate struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// LocalDateOf returns the LocalDate in which a time occurs in that time's location. +func LocalDateOf(t time.Time) LocalDate { + var d LocalDate + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseLocalDate(s string) (LocalDate, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return LocalDate{}, err + } + return LocalDateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d LocalDate) IsValid() bool { + return LocalDateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.LocalDate, even when time.LocalDate returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d LocalDate) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d LocalDate) AddDays(n int) LocalDate { + return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d LocalDate) DaysSince(s LocalDate) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 LocalDate) Before(d2 LocalDate) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 LocalDate) After(d2 LocalDate) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseLocalDate. +func (d *LocalDate) UnmarshalText(data []byte) error { + var err error + *d, err = ParseLocalDate(string(data)) + return err +} + +// A LocalTime represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. +type LocalTime struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func LocalTimeOf(t time.Time) LocalTime { + var tm LocalTime + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseLocalTime parses a string and returns the time value it represents. +// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseLocalTime(s string) (LocalTime, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return LocalTime{}, err + } + return LocalTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t LocalTime) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return LocalTimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t LocalTime) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseLocalTime. +func (t *LocalTime) UnmarshalText(data []byte) error { + var err error + *t, err = ParseLocalTime(string(data)) + return err +} + +// A LocalDateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type LocalDateTime struct { + Date LocalDate + Time LocalTime +} + +// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. + +// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. +func LocalDateTimeOf(t time.Time) LocalDateTime { + return LocalDateTime{ + Date: LocalDateOf(t), + Time: LocalTimeOf(t), + } +} + +// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. +// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseLocalTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseLocalDateTime(s string) (LocalDateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return LocalDateTime{}, err + } + } + return LocalDateTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalDate. +func (dt LocalDateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt LocalDateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the LocalDateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then +// both +// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.LocalDateTime{ +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, +// civil.LocalTime{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt LocalDateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt LocalDateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseLocalDateTime +func (dt *LocalDateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseLocalDateTime(string(data)) + return err +} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go new file mode 100644 index 000000000..3443c3545 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -0,0 +1,1308 @@ +package toml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +const ( + tagFieldName = "toml" + tagFieldComment = "comment" + tagCommented = "commented" + tagMultiline = "multiline" + tagLiteral = "literal" + tagDefault = "default" +) + +type tomlOpts struct { + name string + nameFromTag bool + comment string + commented bool + multiline bool + literal bool + include bool + omitempty bool + defaultValue string +} + +type encOpts struct { + quoteMapKeys bool + arraysOneElementPerLine bool +} + +var encOptsDefaults = encOpts{ + quoteMapKeys: false, +} + +type annotation struct { + tag string + comment string + commented string + multiline string + literal string + defaultValue string +} + +var annotationDefault = annotation{ + tag: tagFieldName, + comment: tagFieldComment, + commented: tagCommented, + multiline: tagMultiline, + literal: tagLiteral, + defaultValue: tagDefault, +} + +type MarshalOrder int + +// Orders the Encoder can write the fields to the output stream. +const ( + // Sort fields alphabetically. + OrderAlphabetical MarshalOrder = iota + 1 + // Preserve the order the fields are encountered. For example, the order of fields in + // a struct. + OrderPreserve +) + +var timeType = reflect.TypeOf(time.Time{}) +var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() +var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() +var localDateType = reflect.TypeOf(LocalDate{}) +var localTimeType = reflect.TypeOf(LocalTime{}) +var localDateTimeType = reflect.TypeOf(LocalDateTime{}) +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) + +// Check if the given marshal type maps to a Tree primitive +func isPrimitive(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isPrimitive(mtype.Elem()) + case reflect.Bool: + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Struct: + return isTimeType(mtype) + default: + return false + } +} + +func isTimeType(mtype reflect.Type) bool { + return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType +} + +// Check if the given marshal type maps to a Tree slice or array +func isTreeSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTreeSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTree(mtype.Elem()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a custom marshaler type +func isCustomMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isCustomMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a text marshaler type +func isTextMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTextMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a non-Tree slice or array +func isOtherSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isOtherSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return !isTreeSequence(mtype) + default: + return false + } +} + +// Check if the given marshal type maps to a Tree +func isTree(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTree(mtype.Elem()) + case reflect.Map: + return true + case reflect.Struct: + return !isPrimitive(mtype) + default: + return false + } +} + +func isCustomMarshaler(mtype reflect.Type) bool { + return mtype.Implements(marshalerType) +} + +func callCustomMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(Marshaler).MarshalTOML() +} + +func isTextMarshaler(mtype reflect.Type) bool { + return mtype.Implements(textMarshalerType) && !isTimeType(mtype) +} + +func callTextMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(encoding.TextMarshaler).MarshalText() +} + +func isCustomUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(unmarshalerType) +} + +func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { + return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) +} + +func isTextUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(textUnmarshalerType) +} + +func callTextUnmarshaler(mval reflect.Value, text []byte) error { + return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) +} + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Unmarshaler is the interface implemented by types that +// can unmarshal a TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +/* +Marshal returns the TOML encoding of v. Behavior is similar to the Go json +encoder, except that there is no concept of a Marshaler interface or MarshalTOML +function for sub-structs, and currently only definite types can be marshaled +(i.e. no `interface{}`). + +The following struct annotations are supported: + + toml:"Field" Overrides the field's name to output. + omitempty When set, empty values and groups are not emitted. + comment:"comment" Emits a # comment on the same line. This supports new lines. + commented:"true" Emits the value as commented. + +Note that pointers are automatically assigned the "omitempty" option, as TOML +explicitly does not handle null values (saying instead the label should be +dropped). + +Tree structural types and corresponding marshal types: + + *Tree (*)struct, (*)map[string]interface{} + []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} + []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) + interface{} (*)primitive + +Tree primitive types and corresponding marshal types: + + uint64 uint, uint8-uint64, pointers to same + int64 int, int8-uint64, pointers to same + float64 float32, float64, pointers to same + string string, pointers to same + bool bool, pointers to same + time.LocalTime time.LocalTime{}, pointers to same + +For additional flexibility, use the Encoder API. +*/ +func Marshal(v interface{}) ([]byte, error) { + return NewEncoder(nil).marshal(v) +} + +// Encoder writes TOML values to an output stream. +type Encoder struct { + w io.Writer + encOpts + annotation + line int + col int + order MarshalOrder + promoteAnon bool + compactComments bool + indentation string +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + encOpts: encOptsDefaults, + annotation: annotationDefault, + line: 0, + col: 1, + order: OrderAlphabetical, + indentation: " ", + } +} + +// Encode writes the TOML encoding of v to the stream. +// +// See the documentation for Marshal for details. +func (e *Encoder) Encode(v interface{}) error { + b, err := e.marshal(v) + if err != nil { + return err + } + if _, err := e.w.Write(b); err != nil { + return err + } + return nil +} + +// QuoteMapKeys sets up the encoder to encode +// maps with string type keys with quoted TOML keys. +// +// This relieves the character limitations on map keys. +func (e *Encoder) QuoteMapKeys(v bool) *Encoder { + e.quoteMapKeys = v + return e +} + +// ArraysWithOneElementPerLine sets up the encoder to encode arrays +// with more than one element on multiple lines instead of one. +// +// For example: +// +// A = [1,2,3] +// +// Becomes +// +// A = [ +// 1, +// 2, +// 3, +// ] +func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { + e.arraysOneElementPerLine = v + return e +} + +// Order allows to change in which order fields will be written to the output stream. +func (e *Encoder) Order(ord MarshalOrder) *Encoder { + e.order = ord + return e +} + +// Indentation allows to change indentation when marshalling. +func (e *Encoder) Indentation(indent string) *Encoder { + e.indentation = indent + return e +} + +// SetTagName allows changing default tag "toml" +func (e *Encoder) SetTagName(v string) *Encoder { + e.tag = v + return e +} + +// SetTagComment allows changing default tag "comment" +func (e *Encoder) SetTagComment(v string) *Encoder { + e.comment = v + return e +} + +// SetTagCommented allows changing default tag "commented" +func (e *Encoder) SetTagCommented(v string) *Encoder { + e.commented = v + return e +} + +// SetTagMultiline allows changing default tag "multiline" +func (e *Encoder) SetTagMultiline(v string) *Encoder { + e.multiline = v + return e +} + +// PromoteAnonymous allows to change how anonymous struct fields are marshaled. +// Usually, they are marshaled as if the inner exported fields were fields in +// the outer struct. However, if an anonymous struct field is given a name in +// its TOML tag, it is treated like a regular struct field with that name. +// rather than being anonymous. +// +// In case anonymous promotion is enabled, all anonymous structs are promoted +// and treated like regular struct fields. +func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { + e.promoteAnon = promote + return e +} + +// CompactComments removes the new line before each comment in the tree. +func (e *Encoder) CompactComments(cc bool) *Encoder { + e.compactComments = cc + return e +} + +func (e *Encoder) marshal(v interface{}) ([]byte, error) { + // Check if indentation is valid + for _, char := range e.indentation { + if !isSpace(char) { + return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") + } + } + + mtype := reflect.TypeOf(v) + if mtype == nil { + return []byte{}, errors.New("nil cannot be marshaled to TOML") + } + + switch mtype.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Ptr: + if mtype.Elem().Kind() != reflect.Struct { + return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") + } + if reflect.ValueOf(v).IsNil() { + return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") + } + default: + return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") + } + + sval := reflect.ValueOf(v) + if isCustomMarshaler(mtype) { + return callCustomMarshaler(sval) + } + if isTextMarshaler(mtype) { + return callTextMarshaler(sval) + } + t, err := e.valueToTree(mtype, sval) + if err != nil { + return []byte{}, err + } + + var buf bytes.Buffer + _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, e.compactComments, false) + + return buf.Bytes(), err +} + +// Create next tree with a position based on Encoder.line +func (e *Encoder) nextTree() *Tree { + return newTreeWithPosition(Position{Line: e.line, Col: 1}) +} + +// Convert given marshal struct or map value to toml tree +func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { + if mtype.Kind() == reflect.Ptr { + return e.valueToTree(mtype.Elem(), mval.Elem()) + } + tval := e.nextTree() + switch mtype.Kind() { + case reflect.Struct: + switch mval.Interface().(type) { + case Tree: + reflect.ValueOf(tval).Elem().Set(mval) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef, mvalf := mtype.Field(i), mval.Field(i) + opts := tomlOptions(mtypef, e.annotation) + if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { + val, err := e.valueToToml(mtypef.Type, mvalf) + if err != nil { + return nil, err + } + if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { + e.appendTree(tval, tree) + } else { + val = e.wrapTomlValue(val, tval) + tval.SetPathWithOptions([]string{opts.name}, SetOptions{ + Comment: opts.comment, + Commented: opts.commented, + Multiline: opts.multiline, + Literal: opts.literal, + }, val) + } + } + } + } + case reflect.Map: + keys := mval.MapKeys() + if e.order == OrderPreserve && len(keys) > 0 { + // Sorting []reflect.Value is not straight forward. + // + // OrderPreserve will support deterministic results when string is used + // as the key to maps. + typ := keys[0].Type() + kind := keys[0].Kind() + if kind == reflect.String { + ikeys := make([]string, len(keys)) + for i := range keys { + ikeys[i] = keys[i].Interface().(string) + } + sort.Strings(ikeys) + for i := range ikeys { + keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) + } + } + } + for _, key := range keys { + mvalf := mval.MapIndex(key) + if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { + continue + } + val, err := e.valueToToml(mtype.Elem(), mvalf) + if err != nil { + return nil, err + } + val = e.wrapTomlValue(val, tval) + if e.quoteMapKeys { + keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) + if err != nil { + return nil, err + } + tval.SetPath([]string{keyStr}, val) + } else { + tval.SetPath([]string{key.String()}, val) + } + } + } + return tval, nil +} + +// Convert given marshal slice to slice of Toml trees +func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { + tval := make([]*Tree, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal slice to slice of toml values +func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + tval := make([]interface{}, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal value to toml value +func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + if mtype.Kind() == reflect.Ptr { + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err + default: + return e.valueToToml(mtype.Elem(), mval.Elem()) + } + } + if mtype.Kind() == reflect.Interface { + return e.valueToToml(mval.Elem().Type(), mval.Elem()) + } + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err + case isTree(mtype): + return e.valueToTree(mtype, mval) + case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): + return e.valueToOtherSlice(mtype, mval) + case isTreeSequence(mtype): + return e.valueToTreeSlice(mtype, mval) + default: + switch mtype.Kind() { + case reflect.Bool: + return mval.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { + return fmt.Sprint(mval), nil + } + return mval.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return mval.Uint(), nil + case reflect.Float32, reflect.Float64: + return mval.Float(), nil + case reflect.String: + return mval.String(), nil + case reflect.Struct: + return mval.Interface(), nil + default: + return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) + } + } +} + +func (e *Encoder) appendTree(t, o *Tree) error { + for key, value := range o.values { + if _, ok := t.values[key]; ok { + continue + } + if tomlValue, ok := value.(*tomlValue); ok { + tomlValue.position.Col = t.position.Col + } + t.values[key] = value + } + return nil +} + +// Create a toml value with the current line number as the position line +func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { + _, isTree := val.(*Tree) + _, isTreeS := val.([]*Tree) + if isTree || isTreeS { + e.line++ + return val + } + + ret := &tomlValue{ + value: val, + position: Position{ + e.line, + parent.position.Col, + }, + } + e.line++ + return ret +} + +// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. +// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for +// sub-structs, and only definite types can be unmarshaled. +func (t *Tree) Unmarshal(v interface{}) error { + d := Decoder{tval: t, tagName: tagFieldName} + return d.unmarshal(v) +} + +// Marshal returns the TOML encoding of Tree. +// See Marshal() documentation for types mapping table. +func (t *Tree) Marshal() ([]byte, error) { + var buf bytes.Buffer + _, err := t.WriteTo(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Unmarshal parses the TOML-encoded data and stores the result in the value +// pointed to by v. Behavior is similar to the Go json encoder, except that there +// is no concept of an Unmarshaler interface or UnmarshalTOML function for +// sub-structs, and currently only definite types can be unmarshaled to (i.e. no +// `interface{}`). +// +// The following struct annotations are supported: +// +// toml:"Field" Overrides the field's name to map to. +// default:"foo" Provides a default value. +// +// For default values, only fields of the following types are supported: +// * string +// * bool +// * int +// * int64 +// * float64 +// +// See Marshal() documentation for types mapping table. +func Unmarshal(data []byte, v interface{}) error { + t, err := LoadReader(bytes.NewReader(data)) + if err != nil { + return err + } + return t.Unmarshal(v) +} + +// Decoder reads and decodes TOML values from an input stream. +type Decoder struct { + r io.Reader + tval *Tree + encOpts + tagName string + strict bool + visitor visitorState +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + encOpts: encOptsDefaults, + tagName: tagFieldName, + } +} + +// Decode reads a TOML-encoded value from it's input +// and unmarshals it in the value pointed at by v. +// +// See the documentation for Marshal for details. +func (d *Decoder) Decode(v interface{}) error { + var err error + d.tval, err = LoadReader(d.r) + if err != nil { + return err + } + return d.unmarshal(v) +} + +// SetTagName allows changing default tag "toml" +func (d *Decoder) SetTagName(v string) *Decoder { + d.tagName = v + return d +} + +// Strict allows changing to strict decoding. Any fields that are found in the +// input data and do not have a corresponding struct member cause an error. +func (d *Decoder) Strict(strict bool) *Decoder { + d.strict = strict + return d +} + +func (d *Decoder) unmarshal(v interface{}) error { + mtype := reflect.TypeOf(v) + if mtype == nil { + return errors.New("nil cannot be unmarshaled from TOML") + } + if mtype.Kind() != reflect.Ptr { + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + elem := mtype.Elem() + + switch elem.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Interface: + elem = mapStringInterfaceType + default: + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + if reflect.ValueOf(v).IsNil() { + return errors.New("nil pointer cannot be unmarshaled from TOML") + } + + vv := reflect.ValueOf(v).Elem() + + if d.strict { + d.visitor = newVisitorState(d.tval) + } + + sval, err := d.valueFromTree(elem, d.tval, &vv) + if err != nil { + return err + } + if err := d.visitor.validate(); err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(sval) + return nil +} + +// Convert toml tree to marshal struct or map, using marshal type. When mval1 +// is non-nil, merge fields into the given value instead of allocating a new one. +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval, mval1) + } + + // Check if pointer to value implements the Unmarshaler interface. + if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { + d.visitor.visitAll() + + if tval == nil { + return mvalPtr.Elem(), nil + } + + if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) + } + return mvalPtr.Elem(), nil + } + + var mval reflect.Value + switch mtype.Kind() { + case reflect.Struct: + if mval1 != nil { + mval = *mval1 + } else { + mval = reflect.New(mtype).Elem() + } + + switch mval.Interface().(type) { + case Tree: + mval.Set(reflect.ValueOf(tval).Elem()) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef := mtype.Field(i) + an := annotation{tag: d.tagName} + opts := tomlOptions(mtypef, an) + if !opts.include { + continue + } + baseKey := opts.name + keysToTry := []string{ + baseKey, + strings.ToLower(baseKey), + strings.ToTitle(baseKey), + strings.ToLower(string(baseKey[0])) + baseKey[1:], + } + + found := false + if tval != nil { + for _, key := range keysToTry { + exists := tval.HasPath([]string{key}) + if !exists { + continue + } + + d.visitor.push(key) + val := tval.GetPath([]string{key}) + fval := mval.Field(i) + mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.Field(i).Set(mvalf) + found = true + d.visitor.pop() + break + } + } + + if !found && opts.defaultValue != "" { + mvalf := mval.Field(i) + var val interface{} + var err error + switch mvalf.Kind() { + case reflect.String: + val = opts.defaultValue + case reflect.Bool: + val, err = strconv.ParseBool(opts.defaultValue) + case reflect.Uint: + val, err = strconv.ParseUint(opts.defaultValue, 10, 0) + case reflect.Uint8: + val, err = strconv.ParseUint(opts.defaultValue, 10, 8) + case reflect.Uint16: + val, err = strconv.ParseUint(opts.defaultValue, 10, 16) + case reflect.Uint32: + val, err = strconv.ParseUint(opts.defaultValue, 10, 32) + case reflect.Uint64: + val, err = strconv.ParseUint(opts.defaultValue, 10, 64) + case reflect.Int: + val, err = strconv.ParseInt(opts.defaultValue, 10, 0) + case reflect.Int8: + val, err = strconv.ParseInt(opts.defaultValue, 10, 8) + case reflect.Int16: + val, err = strconv.ParseInt(opts.defaultValue, 10, 16) + case reflect.Int32: + val, err = strconv.ParseInt(opts.defaultValue, 10, 32) + case reflect.Int64: + // Check if the provided number has a non-numeric extension. + var hasExtension bool + if len(opts.defaultValue) > 0 { + lastChar := opts.defaultValue[len(opts.defaultValue)-1] + if lastChar < '0' || lastChar > '9' { + hasExtension = true + } + } + // If the value is a time.Duration with extension, parse as duration. + // If the value is an int64 or a time.Duration without extension, parse as number. + if hasExtension && mvalf.Type().String() == "time.Duration" { + val, err = time.ParseDuration(opts.defaultValue) + } else { + val, err = strconv.ParseInt(opts.defaultValue, 10, 64) + } + case reflect.Float32: + val, err = strconv.ParseFloat(opts.defaultValue, 32) + case reflect.Float64: + val, err = strconv.ParseFloat(opts.defaultValue, 64) + default: + return mvalf, fmt.Errorf("unsupported field type for default option") + } + + if err != nil { + return mvalf, err + } + mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) + } + + // save the old behavior above and try to check structs + if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { + tmpTval := tval + if !mtypef.Anonymous { + tmpTval = nil + } + fval := mval.Field(i) + v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) + if err != nil { + return v, err + } + mval.Field(i).Set(v) + } + } + } + case reflect.Map: + mval = reflect.MakeMap(mtype) + for _, key := range tval.Keys() { + d.visitor.push(key) + // TODO: path splits key + val := tval.GetPath([]string{key}) + mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) + d.visitor.pop() + } + } + return mval, nil +} + +// Convert toml value to marshal struct/map slice, using marshal type +func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + + for i := 0; i < len(tval); i++ { + d.visitor.push(strconv.Itoa(i)) + val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + d.visitor.pop() + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + + for i := 0; i < len(tval); i++ { + val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val := reflect.ValueOf(tval) + length := val.Len() + + mval, err := makeSliceOrArray(mtype, length) + if err != nil { + return mval, err + } + + for i := 0; i < length; i++ { + val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Create a new slice or a new array with specified length +func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { + var mval reflect.Value + switch mtype.Kind() { + case reflect.Slice: + mval = reflect.MakeSlice(mtype, tLength, tLength) + case reflect.Array: + mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() + if tLength > mtype.Len() { + return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) + } + } + return mval, nil +} + +// Convert toml value to marshal value, using marshal type. When mval1 is non-nil +// and the given type is a struct value, merge fields into it. +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval, mval1) + } + + switch t := tval.(type) { + case *Tree: + var mval11 *reflect.Value + if mtype.Kind() == reflect.Struct { + mval11 = mval1 + } + + if isTree(mtype) { + return d.valueFromTree(mtype, t, mval11) + } + + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) + } else { + return d.valueFromToml(mval1.Elem().Type(), t, nil) + } + } + + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) + case []*Tree: + if isTreeSequence(mtype) { + return d.valueFromTreeSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) + case []interface{}: + d.visitor.visit() + if isOtherSequence(mtype) { + return d.valueFromOtherSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) + default: + d.visitor.visit() + mvalPtr := reflect.New(mtype) + + // Check if pointer to value implements the Unmarshaler interface. + if isCustomUnmarshaler(mvalPtr.Type()) { + if err := callCustomUnmarshaler(mvalPtr, tval); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) + } + return mvalPtr.Elem(), nil + } + + // Check if pointer to value implements the encoding.TextUnmarshaler. + if isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { + if err := d.unmarshalText(tval, mvalPtr); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) + } + return mvalPtr.Elem(), nil + } + + switch mtype.Kind() { + case reflect.Bool, reflect.Struct: + val := reflect.ValueOf(tval) + + switch val.Type() { + case localDateType: + localDate := val.Interface().(LocalDate) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil + } + case localDateTimeType: + localDateTime := val.Interface().(LocalDateTime) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date( + localDateTime.Date.Year, + localDateTime.Date.Month, + localDateTime.Date.Day, + localDateTime.Time.Hour, + localDateTime.Time.Minute, + localDateTime.Time.Second, + localDateTime.Time.Nanosecond, + time.Local)), nil + } + } + + // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.String: + val := reflect.ValueOf(tval) + // stupidly, int64 is convertible to string. So special case this. + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val := reflect.ValueOf(tval) + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { + d, err := time.ParseDuration(val.String()) + if err != nil { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) + } + return reflect.ValueOf(d), nil + } + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Float32, reflect.Float64: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Interface: + if mval1 == nil || mval1.IsNil() { + return reflect.ValueOf(tval), nil + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + case reflect.Slice, reflect.Array: + if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { + return d.valueFromOtherSliceI(mtype, t) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + default: + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + } + } +} + +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + var melem *reflect.Value + + if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { + elem := mval1.Elem() + melem = &elem + } + + val, err := d.valueFromToml(mtype.Elem(), tval, melem) + if err != nil { + return reflect.ValueOf(nil), err + } + mval := reflect.New(mtype.Elem()) + mval.Elem().Set(val) + return mval, nil +} + +func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { + var buf bytes.Buffer + fmt.Fprint(&buf, tval) + return callTextUnmarshaler(mval, buf.Bytes()) +} + +func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { + tag := vf.Tag.Get(an.tag) + parse := strings.Split(tag, ",") + var comment string + if c := vf.Tag.Get(an.comment); c != "" { + comment = c + } + commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) + multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) + literal, _ := strconv.ParseBool(vf.Tag.Get(an.literal)) + defaultValue := vf.Tag.Get(tagDefault) + result := tomlOpts{ + name: vf.Name, + nameFromTag: false, + comment: comment, + commented: commented, + multiline: multiline, + literal: literal, + include: true, + omitempty: false, + defaultValue: defaultValue, + } + if parse[0] != "" { + if parse[0] == "-" && len(parse) == 1 { + result.include = false + } else { + result.name = strings.Trim(parse[0], " ") + result.nameFromTag = true + } + } + if vf.PkgPath != "" { + result.include = false + } + if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { + result.omitempty = true + } + if vf.Type.Kind() == reflect.Ptr { + result.omitempty = true + } + return result +} + +func isZero(val reflect.Value) bool { + switch val.Type().Kind() { + case reflect.Slice, reflect.Array, reflect.Map: + return val.Len() == 0 + default: + return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) + } +} + +func formatError(err error, pos Position) error { + if err.Error()[0] == '(' { // Error already contains position information + return err + } + return fmt.Errorf("%s: %s", pos, err) +} + +// visitorState keeps track of which keys were unmarshaled. +type visitorState struct { + tree *Tree + path []string + keys map[string]struct{} + active bool +} + +func newVisitorState(tree *Tree) visitorState { + path, result := []string{}, map[string]struct{}{} + insertKeys(path, result, tree) + return visitorState{ + tree: tree, + path: path[:0], + keys: result, + active: true, + } +} + +func (s *visitorState) push(key string) { + if s.active { + s.path = append(s.path, key) + } +} + +func (s *visitorState) pop() { + if s.active { + s.path = s.path[:len(s.path)-1] + } +} + +func (s *visitorState) visit() { + if s.active { + delete(s.keys, strings.Join(s.path, ".")) + } +} + +func (s *visitorState) visitAll() { + if s.active { + for k := range s.keys { + if strings.HasPrefix(k, strings.Join(s.path, ".")) { + delete(s.keys, k) + } + } + } +} + +func (s *visitorState) validate() error { + if !s.active { + return nil + } + undecoded := make([]string, 0, len(s.keys)) + for key := range s.keys { + undecoded = append(undecoded, key) + } + sort.Strings(undecoded) + if len(undecoded) > 0 { + return fmt.Errorf("undecoded keys: %q", undecoded) + } + return nil +} + +func insertKeys(path []string, m map[string]struct{}, tree *Tree) { + for k, v := range tree.values { + switch node := v.(type) { + case []*Tree: + for i, item := range node { + insertKeys(append(path, k, strconv.Itoa(i)), m, item) + } + case *Tree: + insertKeys(append(path, k), m, node) + case *tomlValue: + m[strings.Join(append(path, k), ".")] = struct{}{} + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml new file mode 100644 index 000000000..792b72ed7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic_lists] + floats = [12.3,45.6,78.9] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + ints = [8001,8001,8002] + uints = [5002,5003] + strings = ["One","Two","Three"] + +[[subdocptrs]] + name = "Second" + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.second] + name = "Second" + + [subdoc.first] + name = "First" + +[basic] + uint = 5001 + bool = true + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + date = 1979-05-27T07:32:00Z + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml new file mode 100644 index 000000000..ba5e110bf --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic] + bool = true + date = 1979-05-27T07:32:00Z + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + uint = 5001 + +[basic_lists] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + floats = [12.3,45.6,78.9] + ints = [8001,8001,8002] + strings = ["One","Two","Three"] + uints = [5002,5003] + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.first] + name = "First" + + [subdoc.second] + name = "Second" + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" + +[[subdocptrs]] + name = "Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go new file mode 100644 index 000000000..f5e1a44fb --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -0,0 +1,508 @@ +// TOML Parser. + +package toml + +import ( + "errors" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "time" +) + +type tomlParser struct { + flowIdx int + flow []token + tree *Tree + currentTable []string + seenTableKeys []string +} + +type tomlParserStateFn func() tomlParserStateFn + +// Formats and panics an error message based on a token +func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { + panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) +} + +func (p *tomlParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *tomlParser) peek() *token { + if p.flowIdx >= len(p.flow) { + return nil + } + return &p.flow[p.flowIdx] +} + +func (p *tomlParser) assume(typ tokenType) { + tok := p.getToken() + if tok == nil { + p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) + } + if tok.typ != typ { + p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) + } +} + +func (p *tomlParser) getToken() *token { + tok := p.peek() + if tok == nil { + return nil + } + p.flowIdx++ + return tok +} + +func (p *tomlParser) parseStart() tomlParserStateFn { + tok := p.peek() + + // end of stream, parsing is finished + if tok == nil { + return nil + } + + switch tok.typ { + case tokenDoubleLeftBracket: + return p.parseGroupArray + case tokenLeftBracket: + return p.parseGroup + case tokenKey: + return p.parseAssign + case tokenEOF: + return nil + case tokenError: + p.raiseError(tok, "parsing error: %s", tok.String()) + default: + p.raiseError(tok, "unexpected token %s", tok.typ) + } + return nil +} + +func (p *tomlParser) parseGroupArray() tomlParserStateFn { + startToken := p.getToken() // discard the [[ + key := p.getToken() + if key.typ != tokenKeyGroupArray { + p.raiseError(key, "unexpected token %s, was expecting a table array key", key) + } + + // get or create table array element at the indicated part in the path + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries + destTree := p.tree.GetPath(keys) + var array []*Tree + if destTree == nil { + array = make([]*Tree, 0) + } else if target, ok := destTree.([]*Tree); ok && target != nil { + array = destTree.([]*Tree) + } else { + p.raiseError(key, "key %s is already assigned and not of type table array", key) + } + p.currentTable = keys + + // add a new tree to the end of the table array + newTree := newTree() + newTree.position = startToken.Position + array = append(array, newTree) + p.tree.SetPath(p.currentTable, array) + + // remove all keys that were children of this table array + prefix := key.val + "." + found := false + for ii := 0; ii < len(p.seenTableKeys); { + tableKey := p.seenTableKeys[ii] + if strings.HasPrefix(tableKey, prefix) { + p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) + } else { + found = (tableKey == key.val) + ii++ + } + } + + // keep this key name from use by other kinds of assignments + if !found { + p.seenTableKeys = append(p.seenTableKeys, key.val) + } + + // move to next parser state + p.assume(tokenDoubleRightBracket) + return p.parseStart +} + +func (p *tomlParser) parseGroup() tomlParserStateFn { + startToken := p.getToken() // discard the [ + key := p.getToken() + if key.typ != tokenKeyGroup { + p.raiseError(key, "unexpected token %s, was expecting a table key", key) + } + for _, item := range p.seenTableKeys { + if item == key.val { + p.raiseError(key, "duplicated tables") + } + } + + p.seenTableKeys = append(p.seenTableKeys, key.val) + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + if err := p.tree.createSubTree(keys, startToken.Position); err != nil { + p.raiseError(key, "%s", err) + } + destTree := p.tree.GetPath(keys) + if target, ok := destTree.(*Tree); ok && target != nil && target.inline { + p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", + strings.Join(keys, ".")) + } + p.assume(tokenRightBracket) + p.currentTable = keys + return p.parseStart +} + +func (p *tomlParser) parseAssign() tomlParserStateFn { + key := p.getToken() + p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err.Error()) + } + + value := p.parseRvalue() + var tableKey []string + if len(p.currentTable) > 0 { + tableKey = p.currentTable + } else { + tableKey = []string{} + } + + prefixKey := parsedKey[0 : len(parsedKey)-1] + tableKey = append(tableKey, prefixKey...) + + // find the table to assign, looking out for arrays of tables + var targetNode *Tree + switch node := p.tree.GetPath(tableKey).(type) { + case []*Tree: + targetNode = node[len(node)-1] + case *Tree: + targetNode = node + case nil: + // create intermediate + if err := p.tree.createSubTree(tableKey, key.Position); err != nil { + p.raiseError(key, "could not create intermediate group: %s", err) + } + targetNode = p.tree.GetPath(tableKey).(*Tree) + default: + p.raiseError(key, "Unknown table type for path: %s", + strings.Join(tableKey, ".")) + } + + if targetNode.inline { + p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", + strings.Join(tableKey, ".")) + } + + // assign value to the found table + keyVal := parsedKey[len(parsedKey)-1] + localKey := []string{keyVal} + finalKey := append(tableKey, keyVal) + if targetNode.GetPath(localKey) != nil { + p.raiseError(key, "The following key was defined twice: %s", + strings.Join(finalKey, ".")) + } + var toInsert interface{} + + switch value.(type) { + case *Tree, []*Tree: + toInsert = value + default: + toInsert = &tomlValue{value: value, position: key.Position} + } + targetNode.values[keyVal] = toInsert + return p.parseStart +} + +var errInvalidUnderscore = errors.New("invalid use of _ in number") + +func numberContainsInvalidUnderscore(value string) error { + // For large numbers, you may use underscores between digits to enhance + // readability. Each underscore must be surrounded by at least one digit on + // each side. + + hasBefore := false + for idx, r := range value { + if r == '_' { + if !hasBefore || idx+1 >= len(value) { + // can't end with an underscore + return errInvalidUnderscore + } + } + hasBefore = isDigit(r) + } + return nil +} + +var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number") + +func hexNumberContainsInvalidUnderscore(value string) error { + hasBefore := false + for idx, r := range value { + if r == '_' { + if !hasBefore || idx+1 >= len(value) { + // can't end with an underscore + return errInvalidUnderscoreHex + } + } + hasBefore = isHexDigit(r) + } + return nil +} + +func cleanupNumberToken(value string) string { + cleanedVal := strings.Replace(value, "_", "", -1) + return cleanedVal +} + +func (p *tomlParser) parseRvalue() interface{} { + tok := p.getToken() + if tok == nil || tok.typ == tokenEOF { + p.raiseError(tok, "expecting a value") + } + + switch tok.typ { + case tokenString: + return tok.val + case tokenTrue: + return true + case tokenFalse: + return false + case tokenInf: + if tok.val[0] == '-' { + return math.Inf(-1) + } + return math.Inf(1) + case tokenNan: + return math.NaN() + case tokenInteger: + cleanedVal := cleanupNumberToken(tok.val) + var err error + var val int64 + if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { + switch cleanedVal[1] { + case 'x': + err = hexNumberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) + case 'o': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) + case 'b': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) + default: + panic("invalid base") // the lexer should catch this first + } + } else { + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal, 10, 64) + } + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenFloat: + err := numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + cleanedVal := cleanupNumberToken(tok.val) + val, err := strconv.ParseFloat(cleanedVal, 64) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLocalTime: + val, err := ParseLocalTime(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLocalDate: + // a local date may be followed by: + // * nothing: this is a local date + // * a local time: this is a local date-time + + next := p.peek() + if next == nil || next.typ != tokenLocalTime { + val, err := ParseLocalDate(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + } + + localDate := tok + localTime := p.getToken() + + next = p.peek() + if next == nil || next.typ != tokenTimeOffset { + v := localDate.val + "T" + localTime.val + val, err := ParseLocalDateTime(v) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + } + + offset := p.getToken() + + layout := time.RFC3339Nano + v := localDate.val + "T" + localTime.val + offset.val + val, err := time.ParseInLocation(layout, v, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLeftBracket: + return p.parseArray() + case tokenLeftCurlyBrace: + return p.parseInlineTable() + case tokenEqual: + p.raiseError(tok, "cannot have multiple equals for the same key") + case tokenError: + p.raiseError(tok, "%s", tok) + default: + panic(fmt.Errorf("unhandled token: %v", tok)) + } + + return nil +} + +func tokenIsComma(t *token) bool { + return t != nil && t.typ == tokenComma +} + +func (p *tomlParser) parseInlineTable() *Tree { + tree := newTree() + var previous *token +Loop: + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated inline table") + } + switch follow.typ { + case tokenRightCurlyBrace: + p.getToken() + break Loop + case tokenKey, tokenInteger, tokenString: + if !tokenIsComma(previous) && previous != nil { + p.raiseError(follow, "comma expected between fields in inline table") + } + key := p.getToken() + p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err) + } + + value := p.parseRvalue() + tree.SetPath(parsedKey, value) + case tokenComma: + if tokenIsComma(previous) { + p.raiseError(follow, "need field between two commas in inline table") + } + p.getToken() + default: + p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) + } + previous = follow + } + if tokenIsComma(previous) { + p.raiseError(previous, "trailing comma at the end of inline table") + } + tree.inline = true + return tree +} + +func (p *tomlParser) parseArray() interface{} { + var array []interface{} + arrayType := reflect.TypeOf(newTree()) + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ == tokenRightBracket { + p.getToken() + break + } + val := p.parseRvalue() + if reflect.TypeOf(val) != arrayType { + arrayType = nil + } + array = append(array, val) + follow = p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ != tokenRightBracket && follow.typ != tokenComma { + p.raiseError(follow, "missing comma") + } + if follow.typ == tokenComma { + p.getToken() + } + } + + // if the array is a mixed-type array or its length is 0, + // don't convert it to a table array + if len(array) <= 0 { + arrayType = nil + } + // An array of Trees is actually an array of inline + // tables, which is a shorthand for a table array. If the + // array was not converted from []interface{} to []*Tree, + // the two notations would not be equivalent. + if arrayType == reflect.TypeOf(newTree()) { + tomlArray := make([]*Tree, len(array)) + for i, v := range array { + tomlArray[i] = v.(*Tree) + } + return tomlArray + } + return array +} + +func parseToml(flow []token) *Tree { + result := newTree() + result.position = Position{1, 1} + parser := &tomlParser{ + flowIdx: 0, + flow: flow, + tree: result, + currentTable: make([]string, 0), + seenTableKeys: make([]string, 0), + } + parser.run() + return result +} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go new file mode 100644 index 000000000..c17bff87b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/position.go @@ -0,0 +1,29 @@ +// Position support for go-toml + +package toml + +import ( + "fmt" +) + +// Position of a document element within a TOML document. +// +// Line and Col are both 1-indexed positions for the element's line number and +// column number, respectively. Values of zero or less will cause Invalid(), +// to return true. +type Position struct { + Line int // line within the document + Col int // column within the line +} + +// String representation of the position. +// Displays 1-indexed line and column numbers. +func (p Position) String() string { + return fmt.Sprintf("(%d, %d)", p.Line, p.Col) +} + +// Invalid returns whether or not the position is valid (i.e. with negative or +// null values) +func (p Position) Invalid() bool { + return p.Line <= 0 || p.Col <= 0 +} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go new file mode 100644 index 000000000..b437fdd3b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -0,0 +1,136 @@ +package toml + +import "fmt" + +// Define tokens +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenComment + tokenKey + tokenString + tokenInteger + tokenTrue + tokenFalse + tokenFloat + tokenInf + tokenNan + tokenEqual + tokenLeftBracket + tokenRightBracket + tokenLeftCurlyBrace + tokenRightCurlyBrace + tokenLeftParen + tokenRightParen + tokenDoubleLeftBracket + tokenDoubleRightBracket + tokenLocalDate + tokenLocalTime + tokenTimeOffset + tokenKeyGroup + tokenKeyGroupArray + tokenComma + tokenColon + tokenDollar + tokenStar + tokenQuestion + tokenDot + tokenDotDot + tokenEOL +) + +var tokenTypeNames = []string{ + "Error", + "EOF", + "Comment", + "Key", + "String", + "Integer", + "True", + "False", + "Float", + "Inf", + "NaN", + "=", + "[", + "]", + "{", + "}", + "(", + ")", + "]]", + "[[", + "LocalDate", + "LocalTime", + "TimeOffset", + "KeyGroup", + "KeyGroupArray", + ",", + ":", + "$", + "*", + "?", + ".", + "..", + "EOL", +} + +type token struct { + Position + typ tokenType + val string +} + +func (tt tokenType) String() string { + idx := int(tt) + if idx < len(tokenTypeNames) { + return tokenTypeNames[idx] + } + return "Unknown" +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return t.val + } + + return fmt.Sprintf("%q", t.val) +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isAlphanumeric(r rune) bool { + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' +} + +func isKeyChar(r rune) bool { + // Keys start with the first character that isn't whitespace or [ and end + // with the last non-whitespace character before the equals sign. Keys + // cannot contain a # character." + return !(r == '\r' || r == '\n' || r == eof || r == '=') +} + +func isKeyStartChar(r rune) bool { + return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') +} + +func isDigit(r rune) bool { + return '0' <= r && r <= '9' +} + +func isHexDigit(r rune) bool { + return isDigit(r) || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go new file mode 100644 index 000000000..6d82587c4 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -0,0 +1,533 @@ +package toml + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" +) + +type tomlValue struct { + value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list + comment string + commented bool + multiline bool + literal bool + position Position +} + +// Tree is the result of the parsing of a TOML file. +type Tree struct { + values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree + comment string + commented bool + inline bool + position Position +} + +func newTree() *Tree { + return newTreeWithPosition(Position{}) +} + +func newTreeWithPosition(pos Position) *Tree { + return &Tree{ + values: make(map[string]interface{}), + position: pos, + } +} + +// TreeFromMap initializes a new Tree object using the given map. +func TreeFromMap(m map[string]interface{}) (*Tree, error) { + result, err := toTree(m) + if err != nil { + return nil, err + } + return result.(*Tree), nil +} + +// Position returns the position of the tree. +func (t *Tree) Position() Position { + return t.position +} + +// Has returns a boolean indicating if the given key exists. +func (t *Tree) Has(key string) bool { + if key == "" { + return false + } + return t.HasPath(strings.Split(key, ".")) +} + +// HasPath returns true if the given path of keys exists, false otherwise. +func (t *Tree) HasPath(keys []string) bool { + return t.GetPath(keys) != nil +} + +// Keys returns the keys of the toplevel tree (does not recurse). +func (t *Tree) Keys() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + return keys +} + +// Get the value at key in the Tree. +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// If you need to retrieve non-bare keys, use GetPath. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) Get(key string) interface{} { + if key == "" { + return t + } + return t.GetPath(strings.Split(key, ".")) +} + +// GetPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.value + default: + return node + } +} + +// GetArray returns the value at key in the Tree. +// It returns []string, []int64, etc type if key has homogeneous lists +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArray(key string) interface{} { + if key == "" { + return t + } + return t.GetArrayPath(strings.Split(key, ".")) +} + +// GetArrayPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArrayPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + switch n := node.value.(type) { + case []interface{}: + return getArray(n) + default: + return node.value + } + default: + return node + } +} + +// if homogeneous array, then return slice type object over []interface{} +func getArray(n []interface{}) interface{} { + var s []string + var i64 []int64 + var f64 []float64 + var bl []bool + for _, value := range n { + switch v := value.(type) { + case string: + s = append(s, v) + case int64: + i64 = append(i64, v) + case float64: + f64 = append(f64, v) + case bool: + bl = append(bl, v) + default: + return n + } + } + if len(s) == len(n) { + return s + } else if len(i64) == len(n) { + return i64 + } else if len(f64) == len(n) { + return f64 + } else if len(bl) == len(n) { + return bl + } + return n +} + +// GetPosition returns the position of the given key. +func (t *Tree) GetPosition(key string) Position { + if key == "" { + return t.position + } + return t.GetPositionPath(strings.Split(key, ".")) +} + +// SetPositionPath sets the position of element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree position is set. +func (t *Tree) SetPositionPath(keys []string, pos Position) { + if len(keys) == 0 { + t.position = pos + return + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + subtree = node[len(node)-1] + default: + return + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + node.position = pos + return + case *Tree: + node.position = pos + return + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + node[len(node)-1].position = pos + return + } +} + +// GetPositionPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPositionPath(keys []string) Position { + if len(keys) == 0 { + return t.position + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return Position{0, 0} + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + subtree = node[len(node)-1] + default: + return Position{0, 0} + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.position + case *Tree: + return node.position + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + return node[len(node)-1].position + default: + return Position{0, 0} + } +} + +// GetDefault works like Get but with a default value +func (t *Tree) GetDefault(key string, def interface{}) interface{} { + val := t.Get(key) + if val == nil { + return def + } + return val +} + +// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. +// The default values within the struct are valid default options. +type SetOptions struct { + Comment string + Commented bool + Multiline bool + Literal bool +} + +// SetWithOptions is the same as Set, but allows you to provide formatting +// instructions to the key, that will be used by Marshal(). +func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { + t.SetPathWithOptions(strings.Split(key, "."), opts, value) +} + +// SetPathWithOptions is the same as SetPath, but allows you to provide +// formatting instructions to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { + subtree := t + for i, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) + subtree.values[intermediateKey] = node + } + subtree = node[len(node)-1] + } + } + + var toInsert interface{} + + switch v := value.(type) { + case *Tree: + v.comment = opts.Comment + v.commented = opts.Commented + toInsert = value + case []*Tree: + for i := range v { + v[i].commented = opts.Commented + } + toInsert = value + case *tomlValue: + v.comment = opts.Comment + v.commented = opts.Commented + v.multiline = opts.Multiline + v.literal = opts.Literal + toInsert = v + default: + toInsert = &tomlValue{value: value, + comment: opts.Comment, + commented: opts.Commented, + multiline: opts.Multiline, + literal: opts.Literal, + position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} + } + + subtree.values[keys[len(keys)-1]] = toInsert +} + +// Set an element in the tree. +// Key is a dot-separated path (e.g. a.b.c). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) Set(key string, value interface{}) { + t.SetWithComment(key, "", false, value) +} + +// SetWithComment is the same as Set, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { + t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) +} + +// SetPath sets an element in the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) SetPath(keys []string, value interface{}) { + t.SetPathWithComment(keys, "", false, value) +} + +// SetPathWithComment is the same as SetPath, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { + t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) +} + +// Delete removes a key from the tree. +// Key is a dot-separated path (e.g. a.b.c). +func (t *Tree) Delete(key string) error { + keys, err := parseKey(key) + if err != nil { + return err + } + return t.DeletePath(keys) +} + +// DeletePath removes a key from the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +func (t *Tree) DeletePath(keys []string) error { + keyLen := len(keys) + if keyLen == 1 { + delete(t.values, keys[0]) + return nil + } + tree := t.GetPath(keys[:keyLen-1]) + item := keys[keyLen-1] + switch node := tree.(type) { + case *Tree: + delete(node.values, item) + return nil + } + return errors.New("no such key to delete") +} + +// createSubTree takes a tree and a key and create the necessary intermediate +// subtrees to create a subtree at that point. In-place. +// +// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] +// and tree[a][b][c] +// +// Returns nil on success, error object on failure +func (t *Tree) createSubTree(keys []string, pos Position) error { + subtree := t + for i, intermediateKey := range keys { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + tree.position = pos + tree.inline = subtree.inline + subtree.values[intermediateKey] = tree + nextTree = tree + } + + switch node := nextTree.(type) { + case []*Tree: + subtree = node[len(node)-1] + case *Tree: + subtree = node + default: + return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", + strings.Join(keys, "."), intermediateKey, nextTree, nextTree) + } + } + return nil +} + +// LoadBytes creates a Tree from a []byte. +func LoadBytes(b []byte) (tree *Tree, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = errors.New(r.(string)) + } + }() + + if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { + b = b[4:] + } else if len(b) >= 3 && hasUTF8BOM3(b) { + b = b[3:] + } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { + b = b[2:] + } + + tree = parseToml(lexToml(b)) + return +} + +func hasUTF16BigEndianBOM2(b []byte) bool { + return b[0] == 0xFE && b[1] == 0xFF +} + +func hasUTF16LittleEndianBOM2(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE +} + +func hasUTF8BOM3(b []byte) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +func hasUTF32BigEndianBOM4(b []byte) bool { + return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF +} + +func hasUTF32LittleEndianBOM4(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 +} + +// LoadReader creates a Tree from any io.Reader. +func LoadReader(reader io.Reader) (tree *Tree, err error) { + inputBytes, err := ioutil.ReadAll(reader) + if err != nil { + return + } + tree, err = LoadBytes(inputBytes) + return +} + +// Load creates a Tree from a string. +func Load(content string) (tree *Tree, err error) { + return LoadBytes([]byte(content)) +} + +// LoadFile creates a Tree from a file. +func LoadFile(path string) (tree *Tree, err error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + return LoadReader(file) +} diff --git a/vendor/github.com/pelletier/go-toml/tomlpub.go b/vendor/github.com/pelletier/go-toml/tomlpub.go new file mode 100644 index 000000000..4136b4625 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomlpub.go @@ -0,0 +1,71 @@ +package toml + +// PubTOMLValue wrapping tomlValue in order to access all properties from outside. +type PubTOMLValue = tomlValue + +func (ptv *PubTOMLValue) Value() interface{} { + return ptv.value +} +func (ptv *PubTOMLValue) Comment() string { + return ptv.comment +} +func (ptv *PubTOMLValue) Commented() bool { + return ptv.commented +} +func (ptv *PubTOMLValue) Multiline() bool { + return ptv.multiline +} +func (ptv *PubTOMLValue) Position() Position { + return ptv.position +} + +func (ptv *PubTOMLValue) SetValue(v interface{}) { + ptv.value = v +} +func (ptv *PubTOMLValue) SetComment(s string) { + ptv.comment = s +} +func (ptv *PubTOMLValue) SetCommented(c bool) { + ptv.commented = c +} +func (ptv *PubTOMLValue) SetMultiline(m bool) { + ptv.multiline = m +} +func (ptv *PubTOMLValue) SetPosition(p Position) { + ptv.position = p +} + +// PubTree wrapping Tree in order to access all properties from outside. +type PubTree = Tree + +func (pt *PubTree) Values() map[string]interface{} { + return pt.values +} + +func (pt *PubTree) Comment() string { + return pt.comment +} + +func (pt *PubTree) Commented() bool { + return pt.commented +} + +func (pt *PubTree) Inline() bool { + return pt.inline +} + +func (pt *PubTree) SetValues(v map[string]interface{}) { + pt.values = v +} + +func (pt *PubTree) SetComment(c string) { + pt.comment = c +} + +func (pt *PubTree) SetCommented(c bool) { + pt.commented = c +} + +func (pt *PubTree) SetInline(i bool) { + pt.inline = i +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go new file mode 100644 index 000000000..80353500a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -0,0 +1,155 @@ +package toml + +import ( + "fmt" + "reflect" + "time" +) + +var kindToType = [reflect.String + 1]reflect.Type{ + reflect.Bool: reflect.TypeOf(true), + reflect.String: reflect.TypeOf(""), + reflect.Float32: reflect.TypeOf(float64(1)), + reflect.Float64: reflect.TypeOf(float64(1)), + reflect.Int: reflect.TypeOf(int64(1)), + reflect.Int8: reflect.TypeOf(int64(1)), + reflect.Int16: reflect.TypeOf(int64(1)), + reflect.Int32: reflect.TypeOf(int64(1)), + reflect.Int64: reflect.TypeOf(int64(1)), + reflect.Uint: reflect.TypeOf(uint64(1)), + reflect.Uint8: reflect.TypeOf(uint64(1)), + reflect.Uint16: reflect.TypeOf(uint64(1)), + reflect.Uint32: reflect.TypeOf(uint64(1)), + reflect.Uint64: reflect.TypeOf(uint64(1)), +} + +// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. +// supported values: +// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 +func typeFor(k reflect.Kind) reflect.Type { + if k > 0 && int(k) < len(kindToType) { + return kindToType[k] + } + return nil +} + +func simpleValueCoercion(object interface{}) (interface{}, error) { + switch original := object.(type) { + case string, bool, int64, uint64, float64, time.Time: + return original, nil + case int: + return int64(original), nil + case int8: + return int64(original), nil + case int16: + return int64(original), nil + case int32: + return int64(original), nil + case uint: + return uint64(original), nil + case uint8: + return uint64(original), nil + case uint16: + return uint64(original), nil + case uint32: + return uint64(original), nil + case float32: + return float64(original), nil + case fmt.Stringer: + return original.String(), nil + case []interface{}: + value := reflect.ValueOf(original) + length := value.Len() + arrayValue := reflect.MakeSlice(value.Type(), 0, length) + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return arrayValue.Interface(), nil + default: + return nil, fmt.Errorf("cannot convert type %T to Tree", object) + } +} + +func sliceToTree(object interface{}) (interface{}, error) { + // arrays are a bit tricky, since they can represent either a + // collection of simple values, which is represented by one + // *tomlValue, or an array of tables, which is represented by an + // array of *Tree. + + // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice + value := reflect.ValueOf(object) + insideType := value.Type().Elem() + length := value.Len() + if length > 0 { + insideType = reflect.ValueOf(value.Index(0).Interface()).Type() + } + if insideType.Kind() == reflect.Map { + // this is considered as an array of tables + tablesArray := make([]*Tree, 0, length) + for i := 0; i < length; i++ { + table := value.Index(i) + tree, err := toTree(table.Interface()) + if err != nil { + return nil, err + } + tablesArray = append(tablesArray, tree.(*Tree)) + } + return tablesArray, nil + } + + sliceType := typeFor(insideType.Kind()) + if sliceType == nil { + sliceType = insideType + } + + arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) + + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil +} + +func toTree(object interface{}) (interface{}, error) { + value := reflect.ValueOf(object) + + if value.Kind() == reflect.Map { + values := map[string]interface{}{} + keys := value.MapKeys() + for _, key := range keys { + if key.Kind() != reflect.String { + if _, ok := key.Interface().(string); !ok { + return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) + } + } + + v := value.MapIndex(key) + newValue, err := toTree(v.Interface()) + if err != nil { + return nil, err + } + values[key.String()] = newValue + } + return &Tree{values: values, position: Position{}}, nil + } + + if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { + return sliceToTree(object) + } + + simpleValue, err := simpleValueCoercion(object) + if err != nil { + return nil, err + } + return &tomlValue{value: simpleValue, position: Position{}}, nil +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go new file mode 100644 index 000000000..c9afbdab7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -0,0 +1,552 @@ +package toml + +import ( + "bytes" + "fmt" + "io" + "math" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type valueComplexity int + +const ( + valueSimple valueComplexity = iota + 1 + valueComplex +) + +type sortNode struct { + key string + complexity valueComplexity +} + +// Encodes a string to a TOML-compliant multi-line string value +// This function is a clone of the existing encodeTomlString function, except that whitespace characters +// are preserved. Quotation marks and backslashes are also not escaped. +func encodeMultilineTomlString(value string, commented string) string { + var b bytes.Buffer + adjacentQuoteCount := 0 + + b.WriteString(commented) + for i, rr := range value { + if rr != '"' { + adjacentQuoteCount = 0 + } else { + adjacentQuoteCount++ + } + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString("\t") + case '\n': + b.WriteString("\n" + commented) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString("\r") + case '"': + if adjacentQuoteCount >= 3 || i == len(value)-1 { + adjacentQuoteCount = 0 + b.WriteString(`\"`) + } else { + b.WriteString(`"`) + } + case '\\': + b.WriteString(`\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +// Encodes a string to a TOML-compliant string value +func encodeTomlString(value string) string { + var b bytes.Buffer + + for _, rr := range value { + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString(`\t`) + case '\n': + b.WriteString(`\n`) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString(`\r`) + case '"': + b.WriteString(`\"`) + case '\\': + b.WriteString(`\\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) { + var orderedVals []sortNode + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + var values []string + for _, node := range orderedVals { + k := node.key + v := t.values[k] + + repr, err := tomlValueStringRepresentation(v, "", "", ord, false) + if err != nil { + return "", err + } + values = append(values, quoteKeyIfNeeded(k)+" = "+repr) + } + return "{ " + strings.Join(values, ", ") + " }", nil +} + +func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { + // this interface check is added to dereference the change made in the writeTo function. + // That change was made to allow this function to see formatting options. + tv, ok := v.(*tomlValue) + if ok { + v = tv.value + } else { + tv = &tomlValue{} + } + + switch value := v.(type) { + case uint64: + return strconv.FormatUint(value, 10), nil + case int64: + return strconv.FormatInt(value, 10), nil + case float64: + // Default bit length is full 64 + bits := 64 + // Float panics if nan is used + if !math.IsNaN(value) { + // if 32 bit accuracy is enough to exactly show, use 32 + _, acc := big.NewFloat(value).Float32() + if acc == big.Exact { + bits = 32 + } + } + if math.Trunc(value) == value { + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil + } + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil + case string: + if tv.multiline { + if tv.literal { + b := strings.Builder{} + b.WriteString("'''\n") + b.Write([]byte(value)) + b.WriteString("\n'''") + return b.String(), nil + } else { + return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil + } + } + return "\"" + encodeTomlString(value) + "\"", nil + case []byte: + b, _ := v.([]byte) + return string(b), nil + case bool: + if value { + return "true", nil + } + return "false", nil + case time.Time: + return value.Format(time.RFC3339), nil + case LocalDate: + return value.String(), nil + case LocalDateTime: + return value.String(), nil + case LocalTime: + return value.String(), nil + case *Tree: + return tomlTreeStringRepresentation(value, ord) + case nil: + return "", nil + } + + rv := reflect.ValueOf(v) + + if rv.Kind() == reflect.Slice { + var values []string + for i := 0; i < rv.Len(); i++ { + item := rv.Index(i).Interface() + itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) + if err != nil { + return "", err + } + values = append(values, itemRepr) + } + if arraysOneElementPerLine && len(values) > 1 { + stringBuffer := bytes.Buffer{} + valueIndent := indent + ` ` // TODO: move that to a shared encoder state + + stringBuffer.WriteString("[\n") + + for _, value := range values { + stringBuffer.WriteString(valueIndent) + stringBuffer.WriteString(commented + value) + stringBuffer.WriteString(`,`) + stringBuffer.WriteString("\n") + } + + stringBuffer.WriteString(indent + commented + "]") + + return stringBuffer.String(), nil + } + return "[" + strings.Join(values, ", ") + "]", nil + } + return "", fmt.Errorf("unsupported value type %T: %v", v, v) +} + +func getTreeArrayLine(trees []*Tree) (line int) { + // Prevent returning 0 for empty trees + line = int(^uint(0) >> 1) + // get lowest line number >= 0 + for _, tv := range trees { + if tv.position.Line < line || line == 0 { + line = tv.position.Line + } + } + return +} + +func sortByLines(t *Tree) (vals []sortNode) { + var ( + line int + lines []int + tv *Tree + tom *tomlValue + node sortNode + ) + vals = make([]sortNode, 0) + m := make(map[int]sortNode) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree: + tv = v.(*Tree) + line = tv.position.Line + node = sortNode{key: k, complexity: valueComplex} + case []*Tree: + line = getTreeArrayLine(v.([]*Tree)) + node = sortNode{key: k, complexity: valueComplex} + default: + tom = v.(*tomlValue) + line = tom.position.Line + node = sortNode{key: k, complexity: valueSimple} + } + lines = append(lines, line) + vals = append(vals, node) + m[line] = node + } + sort.Ints(lines) + + for i, line := range lines { + vals[i] = m[line] + } + + return vals +} + +func sortAlphabetical(t *Tree) (vals []sortNode) { + var ( + node sortNode + simpVals []string + compVals []string + ) + vals = make([]sortNode, 0) + m := make(map[string]sortNode) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree, []*Tree: + node = sortNode{key: k, complexity: valueComplex} + compVals = append(compVals, node.key) + default: + node = sortNode{key: k, complexity: valueSimple} + simpVals = append(simpVals, node.key) + } + vals = append(vals, node) + m[node.key] = node + } + + // Simples first to match previous implementation + sort.Strings(simpVals) + i := 0 + for _, key := range simpVals { + vals[i] = m[key] + i++ + } + + sort.Strings(compVals) + for _, key := range compVals { + vals[i] = m[key] + i++ + } + + return vals +} + +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false) +} + +func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) { + var orderedVals []sortNode + + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + for _, node := range orderedVals { + switch node.complexity { + case valueComplex: + k := node.key + v := t.values[k] + + combinedKey := quoteKeyIfNeeded(k) + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + var commented string + if parentCommented || t.commented || tv.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + var commented string + if parentCommented || t.commented || subTree.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + + bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented) + if err != nil { + return bytesCount, err + } + } + } + default: // Simple + k := node.key + v, ok := t.values[k].(*tomlValue) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + + var commented string + if parentCommented || t.commented || v.commented { + commented = "# " + } + repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + if !compactComments { + writtenBytesCountComment, errc := writeStrings(w, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + quotedKey := quoteKeyIfNeeded(k) + writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + } + } + + return bytesCount, nil +} + +// quote a key if it does not fit the bare key format (A-Za-z0-9_-) +// quoted keys use the same rules as strings +func quoteKeyIfNeeded(k string) string { + // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain + // keys that have already been quoted. + // not an ideal situation, but good enough of a stop gap. + if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { + return k + } + isBare := true + for _, r := range k { + if !isValidBareChar(r) { + isBare = false + break + } + } + if isBare { + return k + } + return quoteKey(k) +} + +func quoteKey(k string) string { + return "\"" + encodeTomlString(k) + "\"" +} + +func writeStrings(w io.Writer, s ...string) (int, error) { + var n int + for i := range s { + b, err := io.WriteString(w, s[i]) + n += b + if err != nil { + return n, err + } + } + return n, nil +} + +// WriteTo encode the Tree as Toml and writes it to the writer w. +// Returns the number of bytes written in case of success, or an error if anything happened. +func (t *Tree) WriteTo(w io.Writer) (int64, error) { + return t.writeTo(w, "", "", 0, false) +} + +// ToTomlString generates a human-readable representation of the current tree. +// Output spans multiple lines, and is suitable for ingest by a TOML parser. +// If the conversion cannot be performed, ToString returns a non-nil error. +func (t *Tree) ToTomlString() (string, error) { + b, err := t.Marshal() + if err != nil { + return "", err + } + return string(b), nil +} + +// String generates a human-readable representation of the current tree. +// Alias of ToString. Present to implement the fmt.Stringer interface. +func (t *Tree) String() string { + result, _ := t.ToTomlString() + return result +} + +// ToMap recursively generates a representation of the tree using Go built-in structures. +// The following types are used: +// +// * bool +// * float64 +// * int64 +// * string +// * uint64 +// * time.Time +// * map[string]interface{} (where interface{} is any of this list) +// * []interface{} (where interface{} is any of this list) +func (t *Tree) ToMap() map[string]interface{} { + result := map[string]interface{}{} + + for k, v := range t.values { + switch node := v.(type) { + case []*Tree: + var array []interface{} + for _, item := range node { + array = append(array, item.ToMap()) + } + result[k] = array + case *Tree: + result[k] = node.ToMap() + case *tomlValue: + result[k] = tomlValueToGo(node.value) + } + } + return result +} + +func tomlValueToGo(v interface{}) interface{} { + if tree, ok := v.(*Tree); ok { + return tree.ToMap() + } + + rv := reflect.ValueOf(v) + + if rv.Kind() != reflect.Slice { + return v + } + values := make([]interface{}, rv.Len()) + for i := 0; i < rv.Len(); i++ { + item := rv.Index(i).Interface() + values[i] = tomlValueToGo(item) + } + return values +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go new file mode 100644 index 000000000..fa326308c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go @@ -0,0 +1,6 @@ +package toml + +// ValueStringRepresentation transforms an interface{} value into its toml string representation. +func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { + return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) +} diff --git a/vendor/github.com/phayes/checkstyle/.scrutinizer.yml b/vendor/github.com/phayes/checkstyle/.scrutinizer.yml new file mode 100644 index 000000000..d9284b6b4 --- /dev/null +++ b/vendor/github.com/phayes/checkstyle/.scrutinizer.yml @@ -0,0 +1,15 @@ +build: + dependencies: + before: + - 'source <(curl -fsSL https://raw.githubusercontent.com/phayes/go-scrutinize/master/install-golang)' + + tests: + override: + - + command: 'cd $PROJECTPATH && go-scrutinize' + coverage: + file: 'coverage.xml' + format: 'clover' + analysis: + file: 'checkstyle_report.xml' + format: 'general-checkstyle' \ No newline at end of file diff --git a/vendor/github.com/phayes/checkstyle/LICENSE b/vendor/github.com/phayes/checkstyle/LICENSE new file mode 100644 index 000000000..6dc912f39 --- /dev/null +++ b/vendor/github.com/phayes/checkstyle/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2017, Patrick D Hayes +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/phayes/checkstyle/README.md b/vendor/github.com/phayes/checkstyle/README.md new file mode 100644 index 000000000..358cf6752 --- /dev/null +++ b/vendor/github.com/phayes/checkstyle/README.md @@ -0,0 +1,44 @@ +# checkstyle +[![GoDoc](https://godoc.org/github.com/phayes/checkstyle?status.svg)](https://godoc.org/github.com/phayes/checkstyle) +[![Go Report Card](https://goreportcard.com/badge/github.com/phayes/checkstyle)](https://goreportcard.com/report/github.com/phayes/checkstyle) +[![Build Status](https://scrutinizer-ci.com/g/phayes/checkstyle/badges/build.png?b=master)](https://scrutinizer-ci.com/g/phayes/checkstyle/build-status/master) + +Read and write checksyle_report.xml files with golang + +Checkstyle XML files are a standard file format for reporting errors in source code, and is often generated by static analysis tools. + +Example usage: + +```go + +import "github.com/phayes/checkstyle" + +// Print XML into human readable format +checkSyle, err := checkstyle.ReadFile("checkstyle_report.xml") +if err != nil { + log.Fatal(err) +} +for _, file := range checkStyle.File { + fmt.Println(File.Name) + for _, codingError := range file.Error { + fmt.Println("\t", codingError.Line, codingError.Message) + } +} + +// Create a new XML file from scratch +check := checkstyle.New() + +// Ensure that a file has been added +file := check.EnsureFile("/path/to/file") + +// Create an error on line 10 +codingError := checkstyle.NewError(10, "format", "line must end with a full stop") + +// Add the error to the file +file.AddError(codingError) + +// Output XML +fmt.Print(check) +``` + +For more information on checkstyle XML see: http://checkstyle.sourceforge.net/checks.html diff --git a/vendor/github.com/phayes/checkstyle/checkstyle.go b/vendor/github.com/phayes/checkstyle/checkstyle.go new file mode 100644 index 000000000..cabbd4b40 --- /dev/null +++ b/vendor/github.com/phayes/checkstyle/checkstyle.go @@ -0,0 +1,112 @@ +package checkstyle + +import "encoding/xml" +import "io/ioutil" + +// DefaultCheckStyleVersion defines the default "version" attribute on "" lememnt +var DefaultCheckStyleVersion = "1.0.0" + +// Severity defines a checkstyle severity code +type Severity string + +var ( + SeverityError Severity = "error" + SeverityInfo Severity = "info" + SeverityWarning Severity = "warning" + SeverityIgnore Severity = "ignore" + SeverityNone Severity +) + +// CheckStyle represents a xml element found in a checkstyle_report.xml file. +type CheckStyle struct { + XMLName xml.Name `xml:"checkstyle"` + Version string `xml:"version,attr"` + File []*File `xml:"file"` +} + +// AddFile adds a checkstyle.File with the given filename. +func (cs *CheckStyle) AddFile(csf *File) { + cs.File = append(cs.File, csf) +} + +// GetFile gets a CheckStyleFile with the given filename. +func (cs *CheckStyle) GetFile(filename string) (csf *File, ok bool) { + for _, file := range cs.File { + if file.Name == filename { + csf = file + ok = true + return + } + } + return +} + +// EnsureFile ensures that a CheckStyleFile with the given name exists +// Returns either an exiting CheckStyleFile (if a file with that name exists) +// or a new CheckStyleFile (if a file with that name does not exists) +func (cs *CheckStyle) EnsureFile(filename string) (csf *File) { + csf, ok := cs.GetFile(filename) + if !ok { + csf = NewFile(filename) + cs.AddFile(csf) + } + return csf +} + +// String implements Stringer. Returns as xml. +func (cs *CheckStyle) String() string { + checkStyleXML, err := xml.Marshal(cs) + if err != nil { + panic(err) + } + return string(checkStyleXML) +} + +// New returns a new CheckStyle +func New() *CheckStyle { + return &CheckStyle{Version: DefaultCheckStyleVersion, File: []*File{}} +} + +// File represents a xml element. +type File struct { + XMLName xml.Name `xml:"file"` + Name string `xml:"name,attr"` + Error []*Error `xml:"error"` +} + +// AddError adds a checkstyle.Error to the file. +func (csf *File) AddError(cse *Error) { + csf.Error = append(csf.Error, cse) +} + +// NewFile creates a new checkstyle.File +func NewFile(filename string) *File { + return &File{Name: filename, Error: []*Error{}} +} + +// Error represents a xml element +type Error struct { + XMLName xml.Name `xml:"error"` + Line int `xml:"line,attr"` + Column int `xml:"column,attr,omitempty"` + Severity Severity `xml:"severity,attr,omitempty"` + Message string `xml:"message,attr"` + Source string `xml:"source,attr"` +} + +// NewError creates a new checkstyle.Error +// Note that line starts at 0, and column starts at 1 +func NewError(line int, column int, severity Severity, message string, source string) *Error { + return &Error{Line: line, Column: column, Severity: severity, Message: message, Source: source} +} + +// ReadFile reads a checkfile.xml file and returns a CheckStyle object. +func ReadFile(filename string) (*CheckStyle, error) { + checkStyleXML, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + checkStyle := New() + err = xml.Unmarshal(checkStyleXML, checkStyle) + return checkStyle, err +} diff --git a/vendor/github.com/phayes/checkstyle/godoc.go b/vendor/github.com/phayes/checkstyle/godoc.go new file mode 100644 index 000000000..c9662fe9e --- /dev/null +++ b/vendor/github.com/phayes/checkstyle/godoc.go @@ -0,0 +1,36 @@ +/* +Package checkstyle allows the parsing of generation of checkstyle XML files. + +Checkstyle XML files are a standard file format for reporting errors in source code, and is often generated by static analysis tools. + +Example usage: + // Print XML into human readable format + checkSyle, err := checkstyle.ReadFile("checkstyle_report.xml") + if err != nil { + log.Fatal(err) + } + for _, file := range checkStyle.File { + fmt.Println(File.Name) + for _, codingError := range file.Error { + fmt.Println("\t", codingError.Line, codingError.Message) + } + } + + // Create a new XML file from scratch + check := checkstyle.New() + + // Ensure that a file has been added + file := check.EnsureFile("/path/to/file") + + // Create an error on line 10, column 5 + codingError := checkstyle.NewError(10, 5, checkstyle.SeverityWarning, "format", "line must end with a full stop") + + // Add the error to the file + file.AddError(codingError) + + // Output XML + fmt.Print(check) + +For more information on checkstyle XML see: http://checkstyle.sourceforge.net/checks.html +*/ +package checkstyle diff --git a/vendor/github.com/polyfloyd/go-errorlint/LICENSE b/vendor/github.com/polyfloyd/go-errorlint/LICENSE new file mode 100644 index 000000000..b7f88cf1c --- /dev/null +++ b/vendor/github.com/polyfloyd/go-errorlint/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2019 polyfloyd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go b/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go new file mode 100644 index 000000000..263efa3ba --- /dev/null +++ b/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go @@ -0,0 +1,137 @@ +package errorlint + +import ( + "fmt" + "go/ast" + "go/types" +) + +var allowedErrors = []struct { + err string + fun string +}{ + // pkg/archive/tar + {err: "io.EOF", fun: "(*tar.Reader).Next"}, + {err: "io.EOF", fun: "(*tar.Reader).Read"}, + // pkg/bufio + {err: "io.EOF", fun: "(*bufio.Reader).Read"}, + {err: "io.EOF", fun: "(*bufio.Reader).ReadByte"}, + {err: "io.EOF", fun: "(*bufio.Reader).ReadBytes"}, + {err: "io.EOF", fun: "(*bufio.Reader).ReadSlice"}, + {err: "io.EOF", fun: "(*bufio.Reader).ReadString"}, + {err: "io.EOF", fun: "(*bufio.Scanner).Scan"}, + // pkg/bytes + {err: "io.EOF", fun: "(*bytes.Buffer).Read"}, + {err: "io.EOF", fun: "(*bytes.Buffer).ReadByte"}, + {err: "io.EOF", fun: "(*bytes.Buffer).ReadBytes"}, + {err: "io.EOF", fun: "(*bytes.Buffer).ReadRune"}, + {err: "io.EOF", fun: "(*bytes.Buffer).ReadString"}, + // pkg/database/sql + {err: "sql.ErrNoRows", fun: "(*database/sql.Row).Scan"}, + // pkg/io + {err: "io.EOF", fun: "(io.Reader).Read"}, + {err: "io.ErrClosedPipe", fun: "(*io.PipeWriter).Write"}, + {err: "io.ErrShortBuffer", fun: "io.ReadAtLeast"}, + {err: "io.ErrUnexpectedEOF", fun: "io.ReadAtLeast"}, + {err: "io.ErrUnexpectedEOF", fun: "io.ReadFull"}, + // pkg/os + {err: "io.EOF", fun: "(*os.File).Read"}, + {err: "io.EOF", fun: "(*os.File).ReadAt"}, + {err: "io.EOF", fun: "(*os.File).ReadDir"}, + {err: "io.EOF", fun: "(*os.File).Readdir"}, + {err: "io.EOF", fun: "(*os.File).Readdirnames"}, + // pkg/strings + {err: "io.EOF", fun: "(*strings.Reader).Read"}, + {err: "io.EOF", fun: "(*strings.Reader).ReadAt"}, + {err: "io.EOF", fun: "(*strings.Reader).ReadByte"}, + {err: "io.EOF", fun: "(*strings.Reader).ReadRune"}, +} + +func isAllowedErrorComparison(info types.Info, binExpr *ast.BinaryExpr) bool { + var errName string // `.`, e.g. `io.EOF` + var callExpr *ast.CallExpr + + // Figure out which half of the expression is the returned error and which + // half is the presumed error declaration. + for _, expr := range []ast.Expr{binExpr.X, binExpr.Y} { + switch t := expr.(type) { + case *ast.SelectorExpr: + // A selector which we assume refers to a staticaly declared error + // in a package. + errName = selectorToString(t) + case *ast.Ident: + // Identifier, most likely to be the `err` variable or whatever + // produces it. + callExpr = assigningCallExpr(info, t) + case *ast.CallExpr: + callExpr = t + } + } + + // Unimplemented or not sure, disallow the expression. + if errName == "" || callExpr == nil { + return false + } + + // Find the expression that last assigned the subject identifier. + functionSelector, ok := callExpr.Fun.(*ast.SelectorExpr) + if !ok { + // If the function is not a selector it is not an Std function that is + // allowed. + return false + } + var functionName string + if sel, ok := info.Selections[functionSelector]; ok { + functionName = fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name()) + } else { + // If there is no selection, assume it is a package. + functionName = selectorToString(callExpr.Fun.(*ast.SelectorExpr)) + } + + for _, w := range allowedErrors { + if w.fun == functionName && w.err == errName { + return true + } + } + return false +} + +func assigningCallExpr(info types.Info, subject *ast.Ident) *ast.CallExpr { + if subject.Obj == nil { + return nil + } + switch declT := subject.Obj.Decl.(type) { + case *ast.AssignStmt: + // The identifier is LHS of an assignment. + assignment := declT + + assigningExpr := assignment.Rhs[0] + // If the assignment is comprised of multiple expressions, find out + // which LHS expression we should use by finding its index in the LHS. + if len(assignment.Rhs) > 1 { + for i, lhs := range assignment.Lhs { + if subject.Name == lhs.(*ast.Ident).Name { + assigningExpr = assignment.Rhs[i] + break + } + } + } + + switch assignT := assigningExpr.(type) { + case *ast.CallExpr: + // Found the function call. + return assignT + case *ast.Ident: + // The subject was the result of assigning from another identifier. + return assigningCallExpr(info, assignT) + } + } + return nil +} + +func selectorToString(selExpr *ast.SelectorExpr) string { + if ident, ok := selExpr.X.(*ast.Ident); ok { + return ident.Name + "." + selExpr.Sel.Name + } + return "" +} diff --git a/vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go b/vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go new file mode 100644 index 000000000..e2449f8f9 --- /dev/null +++ b/vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go @@ -0,0 +1,52 @@ +package errorlint + +import ( + "flag" + "sort" + + "golang.org/x/tools/go/analysis" +) + +func NewAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "errorlint", + Doc: "Source code linter for Go software that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13.", + Run: run, + Flags: flagSet, + } +} + +var ( + flagSet flag.FlagSet + checkComparison bool + checkAsserts bool + checkErrorf bool +) + +func init() { + flagSet.BoolVar(&checkComparison, "comparison", true, "Check for plain error comparisons") + flagSet.BoolVar(&checkAsserts, "asserts", true, "Check for plain type assertions and type switches") + flagSet.BoolVar(&checkErrorf, "errorf", false, "Check whether fmt.Errorf uses the %w verb for formatting errors. See the readme for caveats") +} + +func run(pass *analysis.Pass) (interface{}, error) { + lints := []Lint{} + if checkComparison { + l := LintErrorComparisons(pass.Fset, *pass.TypesInfo) + lints = append(lints, l...) + } + if checkAsserts { + l := LintErrorTypeAssertions(pass.Fset, *pass.TypesInfo) + lints = append(lints, l...) + } + if checkErrorf { + l := LintFmtErrorfCalls(pass.Fset, *pass.TypesInfo) + lints = append(lints, l...) + } + sort.Sort(ByPosition(lints)) + + for _, l := range lints { + pass.Report(analysis.Diagnostic{Pos: l.Pos, Message: l.Message}) + } + return nil, nil +} diff --git a/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go b/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go new file mode 100644 index 000000000..3d11946a0 --- /dev/null +++ b/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go @@ -0,0 +1,249 @@ +package errorlint + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "regexp" +) + +type Lint struct { + Message string + Pos token.Pos +} + +type ByPosition []Lint + +func (l ByPosition) Len() int { return len(l) } +func (l ByPosition) Swap(i, j int) { l[i], l[j] = l[j], l[i] } + +func (l ByPosition) Less(i, j int) bool { + return l[i].Pos < l[j].Pos +} + +func LintFmtErrorfCalls(fset *token.FileSet, info types.Info) []Lint { + lints := []Lint{} + for expr, t := range info.Types { + // Search for error expressions that are the result of fmt.Errorf + // invocations. + if t.Type.String() != "error" { + continue + } + call, ok := isFmtErrorfCallExpr(info, expr) + if !ok { + continue + } + + // Find all % fields in the format string. + formatVerbs, ok := printfFormatStringVerbs(info, call) + if !ok { + continue + } + + // For any arguments that are errors, check whether the wrapping verb + // is used. Only one %w verb may be used in a single format string at a + // time, so we stop after finding a correct %w. + var lintArg ast.Expr + args := call.Args[1:] + for i := 0; i < len(args) && i < len(formatVerbs); i++ { + if info.Types[args[i]].Type.String() != "error" && !isErrorStringCall(info, args[i]) { + continue + } + + if formatVerbs[i] == "%w" { + lintArg = nil + break + } + + if lintArg == nil { + lintArg = args[i] + } + } + if lintArg != nil { + lints = append(lints, Lint{ + Message: "non-wrapping format verb for fmt.Errorf. Use `%w` to format errors", + Pos: lintArg.Pos(), + }) + } + } + return lints +} + +// isErrorStringCall tests whether the expression is a string expression that +// is the result of an `(error).Error()` method call. +func isErrorStringCall(info types.Info, expr ast.Expr) bool { + if info.Types[expr].Type.String() == "string" { + if call, ok := expr.(*ast.CallExpr); ok { + if callSel, ok := call.Fun.(*ast.SelectorExpr); ok { + fun := info.Uses[callSel.Sel].(*types.Func) + return fun.Type().String() == "func() string" && fun.Name() == "Error" + } + } + } + return false +} + +func printfFormatStringVerbs(info types.Info, call *ast.CallExpr) ([]string, bool) { + if len(call.Args) <= 1 { + return nil, false + } + strLit, ok := call.Args[0].(*ast.BasicLit) + if !ok { + // Ignore format strings that are not literals. + return nil, false + } + formatString := constant.StringVal(info.Types[strLit].Value) + + // Naive format string argument verb. This does not take modifiers such as + // padding into account... + re := regexp.MustCompile(`%[^%]`) + return re.FindAllString(formatString, -1), true +} + +func isFmtErrorfCallExpr(info types.Info, expr ast.Expr) (*ast.CallExpr, bool) { + call, ok := expr.(*ast.CallExpr) + if !ok { + return nil, false + } + fn, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + // TODO: Support fmt.Errorf variable aliases? + return nil, false + } + obj := info.Uses[fn.Sel] + + pkg := obj.Pkg() + if pkg != nil && pkg.Name() == "fmt" && obj.Name() == "Errorf" { + return call, true + } + return nil, false +} + +func LintErrorComparisons(fset *token.FileSet, info types.Info) []Lint { + lints := []Lint{} + + for expr := range info.Types { + // Find == and != operations. + binExpr, ok := expr.(*ast.BinaryExpr) + if !ok { + continue + } + if binExpr.Op != token.EQL && binExpr.Op != token.NEQ { + continue + } + // Comparing errors with nil is okay. + if isNilComparison(binExpr) { + continue + } + // Find comparisons of which one side is a of type error. + if !isErrorComparison(info, binExpr) { + continue + } + + if isAllowedErrorComparison(info, binExpr) { + continue + } + + lints = append(lints, Lint{ + Message: fmt.Sprintf("comparing with %s will fail on wrapped errors. Use errors.Is to check for a specific error", binExpr.Op), + Pos: binExpr.Pos(), + }) + } + + for scope := range info.Scopes { + // Find value switch blocks. + switchStmt, ok := scope.(*ast.SwitchStmt) + if !ok { + continue + } + // Check whether the switch operates on an error type. + if switchStmt.Tag == nil { + continue + } + tagType := info.Types[switchStmt.Tag] + if tagType.Type.String() != "error" { + continue + } + + lints = append(lints, Lint{ + Message: "switch on an error will fail on wrapped errors. Use errors.Is to check for specific errors", + Pos: switchStmt.Pos(), + }) + } + + return lints +} + +func isNilComparison(binExpr *ast.BinaryExpr) bool { + if ident, ok := binExpr.X.(*ast.Ident); ok && ident.Name == "nil" { + return true + } + if ident, ok := binExpr.Y.(*ast.Ident); ok && ident.Name == "nil" { + return true + } + return false +} + +func isErrorComparison(info types.Info, binExpr *ast.BinaryExpr) bool { + tx := info.Types[binExpr.X] + ty := info.Types[binExpr.Y] + return tx.Type.String() == "error" || ty.Type.String() == "error" +} + +func LintErrorTypeAssertions(fset *token.FileSet, info types.Info) []Lint { + lints := []Lint{} + + for expr := range info.Types { + // Find type assertions. + typeAssert, ok := expr.(*ast.TypeAssertExpr) + if !ok { + continue + } + + // Find type assertions that operate on values of type error. + if !isErrorTypeAssertion(info, typeAssert) { + continue + } + + lints = append(lints, Lint{ + Message: "type assertion on error will fail on wrapped errors. Use errors.As to check for specific errors", + Pos: typeAssert.Pos(), + }) + } + + for scope := range info.Scopes { + // Find type switches. + typeSwitch, ok := scope.(*ast.TypeSwitchStmt) + if !ok { + continue + } + + // Find the type assertion in the type switch. + var typeAssert *ast.TypeAssertExpr + switch t := typeSwitch.Assign.(type) { + case *ast.ExprStmt: + typeAssert = t.X.(*ast.TypeAssertExpr) + case *ast.AssignStmt: + typeAssert = t.Rhs[0].(*ast.TypeAssertExpr) + } + + // Check whether the type switch is on a value of type error. + if !isErrorTypeAssertion(info, typeAssert) { + continue + } + + lints = append(lints, Lint{ + Message: "type switch on error will fail on wrapped errors. Use errors.As to check for specific errors", + Pos: typeAssert.Pos(), + }) + } + + return lints +} + +func isErrorTypeAssertion(info types.Info, typeAssert *ast.TypeAssertExpr) bool { + t := info.Types[typeAssert.X] + return t.Type.String() == "error" +} diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md deleted file mode 100644 index c5275d5ab..000000000 --- a/vendor/github.com/prometheus/client_golang/AUTHORS.md +++ /dev/null @@ -1,18 +0,0 @@ -The Prometheus project was started by Matt T. Proud (emeritus) and -Julius Volz in 2012. - -Maintainers of this repository: - -* Björn Rabenstein - -The following individuals have contributed code to this repository -(listed in alphabetical order): - -* Bernerd Schaefer -* Björn Rabenstein -* Daniel Bornkessel -* Jeff Younker -* Julius Volz -* Matt T. Proud -* Tobias Schmidt - diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go new file mode 100644 index 000000000..288f0e854 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.12 + +package prometheus + +import "runtime/debug" + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. +func readBuildInfo() (path, version, sum string) { + path, version, sum = "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go new file mode 100644 index 000000000..6609e2877 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.12 + +package prometheus + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before +// 1.12. Remove this whole file once the minimum supported Go version is 1.12. +func readBuildInfo() (path, version, sum string) { + return "unknown", "unknown", "unknown" +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index 623d3d83f..1e839650d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -29,27 +29,72 @@ type Collector interface { // collected by this Collector to the provided channel and returns once // the last descriptor has been sent. The sent descriptors fulfill the // consistency and uniqueness requirements described in the Desc - // documentation. (It is valid if one and the same Collector sends - // duplicate descriptors. Those duplicates are simply ignored. However, - // two different Collectors must not send duplicate descriptors.) This - // method idempotently sends the same descriptors throughout the - // lifetime of the Collector. If a Collector encounters an error while - // executing this method, it must send an invalid descriptor (created - // with NewInvalidDesc) to signal the error to the registry. + // documentation. + // + // It is valid if one and the same Collector sends duplicate + // descriptors. Those duplicates are simply ignored. However, two + // different Collectors must not send duplicate descriptors. + // + // Sending no descriptor at all marks the Collector as “unchecked”, + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + // + // This method idempotently sends the same descriptors throughout the + // lifetime of the Collector. It may be called concurrently and + // therefore must be implemented in a concurrency safe way. + // + // If a Collector encounters an error while executing this method, it + // must send an invalid descriptor (created with NewInvalidDesc) to + // signal the error to the registry. Describe(chan<- *Desc) // Collect is called by the Prometheus registry when collecting // metrics. The implementation sends each collected metric via the // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by - // Describe. Returned metrics that share the same descriptor must differ - // in their variable label values. This method may be called - // concurrently and must therefore be implemented in a concurrency safe - // way. Blocking occurs at the expense of total performance of rendering - // all registered metrics. Ideally, Collector implementations support - // concurrent readers. + // descriptor of each sent metric is one of those returned by Describe + // (unless the Collector is unchecked, see above). Returned metrics that + // share the same descriptor must differ in their variable label + // values. + // + // This method may be called concurrently and must therefore be + // implemented in a concurrency safe way. Blocking occurs at the expense + // of total performance of rendering all registered metrics. Ideally, + // Collector implementations support concurrent readers. Collect(chan<- Metric) } +// DescribeByCollect is a helper to implement the Describe method of a custom +// Collector. It collects the metrics from the provided Collector and sends +// their descriptors to the provided channel. +// +// If a Collector collects the same metrics throughout its lifetime, its +// Describe method can simply be implemented as: +// +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } +// +// However, this will not work if the metrics collected change dynamically over +// the lifetime of the Collector in a way that their combined set of descriptors +// changes as well. The shortcut implementation will then violate the contract +// of the Describe method. If a Collector sometimes collects no metrics at all +// (for example vectors like CounterVec, GaugeVec, etc., which only collect +// metrics after a metric with a fully specified label set has been accessed), +// it might even get registered as an unchecked Collector (cf. the Register +// method of the Registerer interface). Hence, only use this shortcut +// implementation of Describe if you are certain to fulfill the contract. +// +// The Collector example demonstrates a use of DescribeByCollect. +func DescribeByCollect(c Collector, descs chan<- *Desc) { + metrics := make(chan Metric) + go func() { + c.Collect(metrics) + close(metrics) + }() + for m := range metrics { + descs <- m.Desc() + } +} + // selfCollector implements Collector for a single Metric so that the Metric // collects itself. Add it as an anonymous field to a struct that implements // Metric, and call init with the Metric itself as an argument. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index ee37949ad..0e1b48c03 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -15,6 +15,11 @@ package prometheus import ( "errors" + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" ) // Counter is a Metric that represents a single numerical value that only ever @@ -30,26 +35,42 @@ type Counter interface { Metric Collector - // Set is used to set the Counter to an arbitrary value. It is only used - // if you have to transfer a value from an external counter into this - // Prometheus metric. Do not use it for regular handling of a - // Prometheus counter (as it can be used to break the contract of - // monotonically increasing values). - // - // Deprecated: Use NewConstMetric to create a counter for an external - // value. A Counter should never be set. - Set(float64) - // Inc increments the counter by 1. + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. Inc() // Add adds the given value to the counter. It panics if the value is < // 0. Add(float64) } +// ExemplarAdder is implemented by Counters that offer the option of adding a +// value to the Counter together with an exemplar. Its AddWithExemplar method +// works like the Add method of the Counter interface but also replaces the +// currently saved exemplar (if any) with a new one, created from the provided +// value, the current time as timestamp, and the provided labels. Empty Labels +// will lead to a valid (label-less) exemplar. But if Labels is nil, the current +// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any +// of the provided labels are invalid, or if the provided labels contain more +// than 64 runes in total. +type ExemplarAdder interface { + AddWithExemplar(value float64, exemplar Labels) +} + // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts Opts // NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation also implements ExemplarAdder. It is safe to +// perform the corresponding type assertion. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. func NewCounter(opts CounterOpts) Counter { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -57,20 +78,83 @@ func NewCounter(opts CounterOpts) Counter { nil, opts.ConstLabels, ) - result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} result.init(result) // Init self-collection. return result } type counter struct { - value + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair + exemplar atomic.Value // Containing nil or a *dto.Exemplar. + + now func() time.Time // To mock out time.Now() for testing. +} + +func (c *counter) Desc() *Desc { + return c.desc } func (c *counter) Add(v float64) { if v < 0 { panic(errors.New("counter cannot decrease in value")) } - c.value.Add(v) + + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) AddWithExemplar(v float64, e Labels) { + c.Add(v) + c.updateExemplar(v, e) +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + var exemplar *dto.Exemplar + if e := c.exemplar.Load(); e != nil { + exemplar = e.(*dto.Exemplar) + } + + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) +} + +func (c *counter) updateExemplar(v float64, l Labels) { + if l == nil { + return + } + e, err := newExemplar(v, c.now(), l) + if err != nil { + panic(err) + } + c.exemplar.Store(e) } // CounterVec is a Collector that bundles a set of Counters that all share the @@ -78,16 +162,12 @@ func (c *counter) Add(v float64) { // if you want to count the same thing partitioned by various dimensions // (e.g. number of HTTP requests, partitioned by response code and // method). Create instances with NewCounterVec. -// -// CounterVec embeds MetricVec. See there for a full list of methods with -// detailed documentation. type CounterVec struct { - *MetricVec + *metricVec } // NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -96,34 +176,62 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { opts.ConstLabels, ) return &CounterVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - result := &counter{value: value{ - desc: desc, - valType: CounterValue, - labelPairs: makeLabelPairs(desc, lvs), - }} + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now} result.init(result) // Init self-collection. return result }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Counter and not a -// Metric so that no type conversion is required. -func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { return metric.(Counter), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Counter and not a Metric so that no -// type conversion is required. -func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { return metric.(Counter), err } @@ -131,18 +239,57 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) -func (m *CounterVec) WithLabelValues(lvs ...string) Counter { - return m.MetricVec.WithLabelValues(lvs...).(Counter) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *CounterVec) With(labels Labels) Counter { - return m.MetricVec.With(labels).(Counter) +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } // CounterFunc is a Counter whose value is determined at collect time by calling a @@ -162,6 +309,8 @@ type CounterFunc interface { // provided function must be concurrency-safe. The function should also honor // the contract for a Counter (values only go up, not down), but compliance will // not be checked. +// +// Check out the ExampleGaugeFunc examples for the similar GaugeFunc. func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 77f4b30e8..2f19f5e1e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -16,33 +16,17 @@ package prometheus import ( "errors" "fmt" - "regexp" "sort" "strings" + "github.com/cespare/xxhash/v2" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) -var ( - metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) - labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") -) - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - // Desc is the descriptor used by every Prometheus Metric. It is essentially // the immutable meta-data of a Metric. The normal Metric implementations // included in this package manage their Desc under the hood. Users only have to @@ -78,32 +62,27 @@ type Desc struct { // Help string. Each Desc with the same fqName must have the same // dimHash. dimHash uint64 - // err is an error that occured during construction. It is reported on + // err is an error that occurred during construction. It is reported on // registration time. err error } // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc // and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName and help must not be empty. +// be nil if no such labels should be set. fqName must not be empty. // // variableLabels only contain the label names. Their label values are variable // and therefore not part of the Desc. (They are managed within the Metric.) // // For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Opts documentation for the implications of -// constant labels. +// specified in the Desc. See the Collector example for a usage pattern. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, help: help, variableLabels: variableLabels, } - if help == "" { - d.err = errors.New("empty help string") - return d - } - if !metricNameRE.MatchString(fqName) { + if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } @@ -116,7 +95,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // First add only the const label names and sort them... for labelName := range constLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, labelName) @@ -127,12 +106,18 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * for _, labelName := range labelNames { labelValues = append(labelValues, constLabels[labelName]) } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. for _, labelName := range variableLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, "$"+labelName) @@ -142,24 +127,25 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * d.err = errors.New("duplicate label names") return d } - vh := hashNew() + + xxh := xxhash.New() for _, val := range labelValues { - vh = hashAdd(vh, val) - vh = hashAddByte(vh, separatorByte) + xxh.WriteString(val) + xxh.Write(separatorByteSlice) } - d.id = vh + d.id = xxh.Sum64() // Sort labelNames so that order doesn't matter for the hash. sort.Strings(labelNames) // Now hash together (in this order) the help string and the sorted // label names. - lh := hashNew() - lh = hashAdd(lh, help) - lh = hashAddByte(lh, separatorByte) + xxh.Reset() + xxh.WriteString(help) + xxh.Write(separatorByteSlice) for _, labelName := range labelNames { - lh = hashAdd(lh, labelName) - lh = hashAddByte(lh, separatorByte) + xxh.WriteString(labelName) + xxh.Write(separatorByteSlice) } - d.dimHash = lh + d.dimHash = xxh.Sum64() d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) for n, v := range constLabels { @@ -168,7 +154,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * Value: proto.String(v), }) } - sort.Sort(LabelPairSorter(d.constLabelPairs)) + sort.Sort(labelPairSorter(d.constLabelPairs)) return d } @@ -198,8 +184,3 @@ func (d *Desc) String() string { d.variableLabels, ) } - -func checkLabelName(l string) bool { - return labelNameRE.MatchString(l) && - !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index b15a2d3b9..98450125d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -11,13 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package prometheus provides metrics primitives to instrument code for -// monitoring. It also offers a registry for metrics. Sub-packages allow to -// expose the registered metrics via HTTP (package promhttp) or push them to a -// Pushgateway (package push). +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). There is +// also a sub-package promauto, which provides metrics constructors with +// automatic registration. // // All exported functions and methods are safe to be used concurrently unless -//specified otherwise. +// specified otherwise. // // A Basic Example // @@ -26,6 +28,7 @@ // package main // // import ( +// "log" // "net/http" // // "github.com/prometheus/client_golang/prometheus" @@ -59,7 +62,7 @@ // // The Handler function provides a default handler to expose metrics // // via an HTTP server. "/metrics" is the usual endpoint for that. // http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":8080", nil) +// log.Fatal(http.ListenAndServe(":8080", nil)) // } // // @@ -69,34 +72,33 @@ // Metrics // // The number of exported identifiers in this package might appear a bit -// overwhelming. Hovever, in addition to the basic plumbing shown in the example +// overwhelming. However, in addition to the basic plumbing shown in the example // above, you only need to understand the different metric types and their -// vector versions for basic usage. +// vector versions for basic usage. Furthermore, if you are not concerned with +// fine-grained control of when and how to register metrics with the registry, +// have a look at the promauto package, which will effectively allow you to +// ignore registration altogether in simple cases. // // Above, you have already touched the Counter and the Gauge. There are two more // advanced metric types: the Summary and Histogram. A more thorough description // of those four metric types can be found in the Prometheus docs: // https://prometheus.io/docs/concepts/metric_types/ // -// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the -// Prometheus server not to assume anything about its type. -// -// In addition to the fundamental metric types Gauge, Counter, Summary, -// Histogram, and Untyped, a very important part of the Prometheus data model is -// the partitioning of samples along dimensions called labels, which results in +// In addition to the fundamental metric types Gauge, Counter, Summary, and +// Histogram, a very important part of the Prometheus data model is the +// partitioning of samples along dimensions called labels, which results in // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// HistogramVec, and UntypedVec. +// and HistogramVec. // // While only the fundamental metric types implement the Metric interface, both // the metrics and their vector versions implement the Collector interface. A // Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, -// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, -// SummaryVec, HistogramVec, and UntypedVec are not. +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and +// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, +// and HistogramVec are not. // // To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, -// HistogramOpts, or UntypedOpts. +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // // Custom Collectors and constant Metrics // @@ -112,10 +114,23 @@ // existing numbers into Prometheus Metrics during collection. An own // implementation of the Collector interface is perfect for that. You can create // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). That will happen in -// the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created -// later. NewDesc comes in handy to create those Desc instances. +// NewConstSummary (and their respective Must… versions). NewConstMetric is used +// for all metric types with just a float64 as their value: Counter, Gauge, and +// a special “type” called Untyped. Use the latter if you are not sure if the +// mirrored metric is a Counter or a Gauge. Creation of the Metric instance +// happens in the Collect method. The Describe method has to return separate +// Desc instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. Alternatively, +// you could return no Desc at all, which will mark the Collector “unchecked”. +// No checks are performed at registration time, but metric consistency will +// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// errors. Thus, with unchecked Collectors, the responsibility to not collect +// metrics that lead to inconsistencies in the total scrape result lies with the +// implementer of the Collector. While this is not a desirable state, it is +// sometimes necessary. The typical use case is a situation where the exact +// metrics to be returned by a Collector cannot be predicted at registration +// time, but the implementer has sufficient knowledge of the whole system to +// guarantee metric consistency. // // The Collector example illustrates the use case. You can also look at the // source code of the processCollector (mirroring process metrics), the @@ -129,34 +144,34 @@ // Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might -// cause. As suggested by the name, MustRegister panics if an error occurs. With -// the Register function, the error is returned and can be handled. +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. // // An error is returned if the registered Collector is incompatible or // inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data -// model. Inconsistencies are ideally detected at registration time, not at -// collect time. The former will usually be detected at start-up time of a -// program, while the latter will only happen at scrape time, possibly not even -// on the first scrape if the inconsistency only becomes relevant later. That is -// the main reason why a Collector and a Metric have to describe themselves to -// the registry. +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. // // So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegistry variable. With NewRegistry, you +// can be found in the global DefaultRegisterer variable. With NewRegistry, you // can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in -// the same way on a custom registry as the global functions Register and -// Unregister on the default registry. -// -// There are a number of uses for custom registries: You can use registries -// with special properties, see NewPedanticRegistry. You can avoid global state, -// as it is imposed by the DefaultRegistry. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use // separate registries for testing purposes. // -// Also note that the DefaultRegistry comes registered with a Collector for Go +// Also note that the DefaultRegisterer comes registered with a Collector for Go // runtime metrics (via NewGoCollector) and a Collector for process metrics (via // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. @@ -166,16 +181,19 @@ // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp -// sub-package. (The top-level functions in the prometheus package are -// deprecated.) +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. // // Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// // Other Means of Exposition // -// More ways of exposing metrics can easily be added. Sending metrics to -// Graphite would be an example that will soon be implemented. +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go index e3b67df8a..3d383a735 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package prometheus // Inline and byte-free variant of hash/fnv's fnv64a. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 8b70e5141..d67573f76 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -13,6 +13,14 @@ package prometheus +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + // Gauge is a Metric that represents a single numerical value that can // arbitrarily go up and down. // @@ -27,29 +35,95 @@ type Gauge interface { // Set sets the Gauge to an arbitrary value. Set(float64) - // Inc increments the Gauge by 1. + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. Inc() - // Dec decrements the Gauge by 1. + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. Dec() - // Add adds the given value to the Gauge. (The value can be - // negative, resulting in a decrease of the Gauge.) + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) Add(float64) // Sub subtracts the given value from the Gauge. (The value can be // negative, resulting in an increase of the Gauge.) Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() } // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts // NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. func NewGauge(opts GaugeOpts) Gauge { - return newValue(NewDesc( + desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, - ), GaugeValue, 0) + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same @@ -58,12 +132,11 @@ func NewGauge(opts GaugeOpts) Gauge { // (e.g. number of operations queued, partitioned by user and operation // type). Create instances with NewGaugeVec. type GaugeVec struct { - *MetricVec + *metricVec } // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -72,28 +145,62 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { opts.ConstLabels, ) return &GaugeVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newValue(desc, GaugeValue, 0, lvs...) + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Gauge and not a -// Metric so that no type conversion is required. -func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { return metric.(Gauge), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Gauge and not a Metric so that no -// type conversion is required. -func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { return metric.(Gauge), err } @@ -101,18 +208,57 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) -func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { - return m.MetricVec.WithLabelValues(lvs...).(Gauge) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *GaugeVec) With(labels Labels) Gauge { - return m.MetricVec.With(labels).(Gauge) +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } // GaugeFunc is a Gauge whose value is determined at collect time by calling a @@ -127,9 +273,12 @@ type GaugeFunc interface { // NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The // value reported is determined by calling the given function from within the // Write method. Take into account that metric collection may happen -// concurrently. If that results in concurrent calls to Write, like in the case -// where a GaugeFunc is directly registered with Prometheus, the provided -// function must be concurrency-safe. +// concurrently. Therefore, it must be safe to call the provided function +// concurrently. +// +// NewGaugeFunc is a good way to create an “info” style metric with a constant +// value of 1. Example: +// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index abc9d4ec4..ea05cf429 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -1,34 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package prometheus import ( - "fmt" "runtime" "runtime/debug" + "sync" "time" ) type goCollector struct { - goroutines Gauge - gcDesc *Desc + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc - // metrics to describe and collect - metrics memStatsMetrics + // ms... are memstats related. + msLast *runtime.MemStats // Previously collected memstats. + msLastTimestamp time.Time + msMtx sync.Mutex // Protects msLast and msLastTimestamp. + msMetrics memStatsMetrics + msRead func(*runtime.MemStats) // For mocking in tests. + msMaxWait time.Duration // Wait time for fresh memstats. + msMaxAge time.Duration // Maximum allowed age of old memstats. } -// NewGoCollector returns a collector which exports metrics about the current -// go process. +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. (The problem might be solved +// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go +// issue.) func NewGoCollector() Collector { return &goCollector{ - goroutines: NewGauge(GaugeOpts{ - Namespace: "go", - Name: "goroutines", - Help: "Number of goroutines that currently exist.", - }), + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the GC invocation durations.", + "A summary of the pause duration of garbage collection cycles.", nil, nil), - metrics: memStatsMetrics{ + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + msLast: &runtime.MemStats{}, + msRead: runtime.ReadMemStats, + msMaxWait: time.Second, + msMaxAge: 5 * time.Minute, + msMetrics: memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), @@ -48,7 +103,7 @@ func NewGoCollector() Collector { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained by system. Sum of all system allocations.", + "Number of bytes obtained from system.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, @@ -111,12 +166,12 @@ func NewGoCollector() Collector { valType: GaugeValue, }, { desc: NewDesc( - memstatNamespace("heap_released_bytes_total"), - "Total number of heap bytes released to OS.", + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: CounterValue, + valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("heap_objects"), @@ -213,29 +268,53 @@ func NewGoCollector() Collector { ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, }, }, } } func memstatNamespace(s string) string { - return fmt.Sprintf("go_memstats_%s", s) + return "go_memstats_" + s } // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutines.Desc() + ch <- c.goroutinesDesc + ch <- c.threadsDesc ch <- c.gcDesc - - for _, i := range c.metrics { + ch <- c.goInfoDesc + for _, i := range c.msMetrics { ch <- i.desc } } // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { - c.goroutines.Set(float64(runtime.NumGoroutine())) - ch <- c.goroutines + var ( + ms = &runtime.MemStats{} + done = make(chan struct{}) + ) + // Start reading memstats first as it might take a while. + go func() { + c.msRead(ms) + c.msMtx.Lock() + c.msLast = ms + c.msLastTimestamp = time.Now() + c.msMtx.Unlock() + close(done) + }() + + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) @@ -246,11 +325,35 @@ func (c *goCollector) Collect(ch chan<- Metric) { quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() } quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) - ms := &runtime.MemStats{} - runtime.ReadMemStats(ms) - for _, i := range c.metrics { + timer := time.NewTimer(c.msMaxWait) + select { + case <-done: // Our own ReadMemStats succeeded in time. Use it. + timer.Stop() // Important for high collection frequencies to not pile up timers. + c.msCollect(ch, ms) + return + case <-timer.C: // Time out, use last memstats if possible. Continue below. + } + c.msMtx.Lock() + if time.Since(c.msLastTimestamp) < c.msMaxAge { + // Last memstats are recent enough. Collect from them under the lock. + c.msCollect(ch, c.msLast) + c.msMtx.Unlock() + return + } + // If we are here, the last memstats are too old or don't exist. We have + // to wait until our own ReadMemStats finally completes. For that to + // happen, we have to release the lock. + c.msMtx.Unlock() + <-done + c.msCollect(ch, ms) +} + +func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { + for _, i := range c.msMetrics { ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) } } @@ -261,3 +364,33 @@ type memStatsMetrics []struct { eval func(*runtime.MemStats) float64 valType ValueType } + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() Collector { + path, version, sum := readBuildInfo() + c := &selfCollector{MustNewConstMetric( + NewDesc( + "go_build_info", + "Build information about the main Go module.", + nil, Labels{"path": path, "version": version, "checksum": sum}, + ), + GaugeValue, 1)} + c.init(c.self) + return c +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 9719e8fac..d4ea301a3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -16,9 +16,13 @@ package prometheus import ( "fmt" "math" + "runtime" "sort" + "sync" "sync/atomic" + "time" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" @@ -108,8 +112,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { } // HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. type HistogramOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Histogram (created by joining these components with @@ -120,29 +125,22 @@ type HistogramOpts struct { Subsystem string Name string - // Help provides information about this Histogram. Mandatory! + // Help provides information about this Histogram. // // Metrics with the same fully-qualified name must have the same Help // string. Help string - // ConstLabels are used to attach fixed labels to this - // Histogram. Histograms with the same fully-qualified name must have the - // same label names in their ConstLabels. + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // HistogramVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Histograms with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels // Buckets defines the buckets into which observations are counted. Each @@ -155,6 +153,10 @@ type HistogramOpts struct { // NewHistogram creates a new Histogram based on the provided HistogramOpts. It // panics if the buckets in HistogramOpts are not in strictly increasing order. +// +// The returned implementation also implements ExemplarObserver. It is safe to +// perform the corresponding type assertion. Exemplars are tracked separately +// for each bucket. func NewHistogram(opts HistogramOpts) Histogram { return newHistogram( NewDesc( @@ -169,7 +171,7 @@ func NewHistogram(opts HistogramOpts) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) } for _, n := range desc.variableLabels { @@ -191,6 +193,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr desc: desc, upperBounds: opts.Buckets, labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{{}, {}}, + now: time.Now, } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -207,30 +211,60 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } } - // Finally we know the final length of h.upperBounds and can make counts. - h.counts = make([]uint64, len(h.upperBounds)) + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts as well as exemplars: + h.counts[0].buckets = make([]uint64, len(h.upperBounds)) + h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. return h } -type histogram struct { +type histogramCounts struct { // sumBits contains the bits of the float64 representing the sum of all // observations. sumBits and count have to go first in the struct to // guarantee alignment for atomic operations. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG sumBits uint64 count uint64 + buckets []uint64 +} + +type histogram struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // histogramCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the histogram) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + // + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 selfCollector - // Note that there is no mutex required. + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. - desc *Desc + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*histogramCounts upperBounds []float64 - counts []uint64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - labelPairs []*dto.LabelPair + now func() time.Time // To mock out time.Now() for testing. } func (h *histogram) Desc() *Desc { @@ -238,6 +272,89 @@ func (h *histogram) Desc() *Desc { } func (h *histogram) Observe(v float64) { + h.observe(v, h.findBucket(v)) +} + +func (h *histogram) ObserveWithExemplar(v float64, e Labels) { + i := h.findBucket(v) + h.observe(v, i) + h.updateExemplar(v, i, e) +} + +func (h *histogram) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + h.writeMtx.Lock() + defer h.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := h.counts[n>>63] + coldCounts := h.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + his := &dto.Histogram{ + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + var cumCount uint64 + for i, upperBound := range h.upperBounds { + cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) + his.Bucket[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(cumCount), + UpperBound: proto.Float64(upperBound), + } + if e := h.exemplars[i].Load(); e != nil { + his.Bucket[i].Exemplar = e.(*dto.Exemplar) + } + } + // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. + if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e.(*dto.Exemplar), + } + his.Bucket = append(his.Bucket, b) + } + + out.Histogram = his + out.Label = h.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + for i := range h.upperBounds { + atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) + atomic.StoreUint64(&coldCounts.buckets[i], 0) + } + return nil +} + +// findBucket returns the index of the bucket for the provided value, or +// len(h.upperBounds) for the +Inf bucket. +func (h *histogram) findBucket(v float64) int { // TODO(beorn7): For small numbers of buckets (<30), a linear search is // slightly faster than the binary search. If we really care, we could // switch from one search strategy to the other depending on the number @@ -247,38 +364,43 @@ func (h *histogram) Observe(v float64) { // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - i := sort.SearchFloat64s(h.upperBounds, v) - if i < len(h.counts) { - atomic.AddUint64(&h.counts[i], 1) + return sort.SearchFloat64s(h.upperBounds, v) +} + +// observe is the implementation for Observe without the findBucket part. +func (h *histogram) observe(v float64, bucket int) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1) + hotCounts := h.counts[n>>63] + + if bucket < len(h.upperBounds) { + atomic.AddUint64(&hotCounts.buckets[bucket], 1) } - atomic.AddUint64(&h.count, 1) for { - oldBits := atomic.LoadUint64(&h.sumBits) + oldBits := atomic.LoadUint64(&hotCounts.sumBits) newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { break } } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) } -func (h *histogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, len(h.upperBounds)) - - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) - his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) - var count uint64 - for i, upperBound := range h.upperBounds { - count += atomic.LoadUint64(&h.counts[i]) - buckets[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - } +// updateExemplar replaces the exemplar for the provided bucket. With empty +// labels, it's a no-op. It panics if any of the labels is invalid. +func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { + if l == nil { + return } - his.Bucket = buckets - out.Histogram = his - out.Label = h.labelPairs - return nil + e, err := newExemplar(v, h.now(), l) + if err != nil { + panic(err) + } + h.exemplars[bucket].Store(e) } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -287,12 +409,11 @@ func (h *histogram) Write(out *dto.Metric) error { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewHistogramVec. type HistogramVec struct { - *MetricVec + *metricVec } // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -301,47 +422,116 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { opts.ConstLabels, ) return &HistogramVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + metricVec: newMetricVec(desc, func(lvs ...string) Metric { return newHistogram(desc, opts, lvs...) }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Histogram and not a -// Metric so that no type conversion is required. -func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Histogram), err + return metric.(Observer), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Histogram and not a Metric so that no -// type conversion is required. -func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { - return metric.(Histogram), err + return metric.(Observer), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { - return m.MetricVec.WithLabelValues(lvs...).(Histogram) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h } -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *HistogramVec) With(labels Labels) Histogram { - return m.MetricVec.With(labels).(Histogram) +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } type constHistogram struct { @@ -393,7 +583,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { // bucket. // // NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. +// consistent with the variable labels in Desc or if Desc is invalid. func NewConstHistogram( desc *Desc, count uint64, @@ -401,8 +591,11 @@ func NewConstHistogram( buckets map[float64]uint64, labelValues ...string, ) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err } return &constHistogram{ desc: desc, @@ -414,7 +607,7 @@ func NewConstHistogram( } // MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstMetric would have returned an error. +// NewConstHistogram would have returned an error. func MustNewConstHistogram( desc *Desc, count uint64, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go deleted file mode 100644 index 67ee5ac79..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bufio" - "bytes" - "compress/gzip" - "fmt" - "io" - "net" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "github.com/prometheus/common/expfmt" -) - -// TODO(beorn7): Remove this whole file. It is a partial mirror of -// promhttp/http.go (to avoid circular import chains) where everything HTTP -// related should live. The functions here are just for avoiding -// breakage. Everything is deprecated. - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) -} - -// Handler returns an HTTP handler for the DefaultGatherer. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). -// -// Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using promhttp.Handler instead -// (which is non instrumented). -func Handler() http.Handler { - return InstrumentHandler("prometheus", UninstrumentedHandler()) -} - -// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. -// -// Deprecated: Use promhttp.Handler instead. See there for further documentation. -func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - mfs, err := DefaultGatherer.Gather() - if err != nil { - http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - - contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf) - enc := expfmt.NewEncoder(writer, contentType) - var lastErr error - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - lastErr = err - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) - }) -} - -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} - -var instLabels = []string{"method", "code"} - -type nower interface { - Now() time.Time -} - -type nowFunc func() time.Time - -func (n nowFunc) Now() time.Time { - return n() -} - -var now nower = nowFunc(func() time.Time { - return time.Now() -}) - -func nowSeries(t ...time.Time) nower { - return nowFunc(func() time.Time { - defer func() { - t = t[1:] - }() - - return t[0] - }) -} - -// InstrumentHandler wraps the given HTTP handler for instrumentation. It -// registers four metric collectors (if not already done) and reports HTTP -// metrics to the (newly or already) registered collectors: http_requests_total -// (CounterVec), http_request_duration_microseconds (Summary), -// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each -// has a constant label named "handler" with the provided handlerName as -// value. http_requests_total is a metric vector partitioned by HTTP method -// (label name "method") and HTTP status code (label name "code"). -// -// Deprecated: InstrumentHandler has several issues: -// -// - It uses Summaries rather than Histograms. Summaries are not useful if -// aggregation across multiple instances is required. -// -// - It uses microseconds as unit, which is deprecated and should be replaced by -// seconds. -// -// - The size of the request is calculated in a separate goroutine. Since this -// calculator requires access to the request header, it creates a race with -// any writes to the header performed during request handling. -// httputil.ReverseProxy is a prominent example for a handler -// performing such writes. -// -// Upcoming versions of this package will provide ways of instrumenting HTTP -// handlers that are more flexible and have fewer issues. Please prefer direct -// instrumentation in the meantime. -func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) -} - -// InstrumentHandlerFunc wraps the given function for instrumentation. It -// otherwise works in the same way as InstrumentHandler (and shares the same -// issues). -// -// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. -func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts( - SummaryOpts{ - Subsystem: "http", - ConstLabels: Labels{"handler": handlerName}, - }, - handlerFunc, - ) -} - -// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same -// issues) but provides more flexibility (at the cost of a more complex call -// syntax). As InstrumentHandler, this function registers four metric -// collectors, but it uses the provided SummaryOpts to create them. However, the -// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced -// by "requests_total", "request_duration_microseconds", "request_size_bytes", -// and "response_size_bytes", respectively. "Help" is replaced by an appropriate -// help string. The names of the variable labels of the http_requests_total -// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). -// -// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the -// behavior of InstrumentHandler: -// -// prometheus.InstrumentHandlerWithOpts( -// prometheus.SummaryOpts{ -// Subsystem: "http", -// ConstLabels: prometheus.Labels{"handler": handlerName}, -// }, -// handler, -// ) -// -// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it -// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, -// and all its fields are set to the equally named fields in the provided -// SummaryOpts. -// -// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. -func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) -} - -// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares -// the same issues) but provides more flexibility (at the cost of a more complex -// call syntax). See InstrumentHandlerWithOpts for details how the provided -// SummaryOpts are used. -// -// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. -func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - reqCnt := NewCounterVec( - CounterOpts{ - Namespace: opts.Namespace, - Subsystem: opts.Subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: opts.ConstLabels, - }, - instLabels, - ) - - opts.Name = "request_duration_microseconds" - opts.Help = "The HTTP request latencies in microseconds." - reqDur := NewSummary(opts) - - opts.Name = "request_size_bytes" - opts.Help = "The HTTP request sizes in bytes." - reqSz := NewSummary(opts) - - opts.Name = "response_size_bytes" - opts.Help = "The HTTP response sizes in bytes." - resSz := NewSummary(opts) - - regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) - regReqDur := MustRegisterOrGet(reqDur).(Summary) - regReqSz := MustRegisterOrGet(reqSz).(Summary) - regResSz := MustRegisterOrGet(resSz).(Summary) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - - delegate := &responseWriterDelegator{ResponseWriter: w} - out := make(chan int) - urlLen := 0 - if r.URL != nil { - urlLen = len(r.URL.String()) - } - go computeApproximateRequestSize(r, out, urlLen) - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - var rw http.ResponseWriter - if cn && fl && hj && rf { - rw = &fancyResponseWriterDelegator{delegate} - } else { - rw = delegate - } - handlerFunc(rw, r) - - elapsed := float64(time.Since(now)) / float64(time.Microsecond) - - method := sanitizeMethod(r.Method) - code := sanitizeCode(delegate.status) - regReqCnt.WithLabelValues(method, code).Inc() - regReqDur.Observe(elapsed) - regResSz.Observe(float64(delegate.written)) - regReqSz.Observe(float64(<-out)) - }) -} - -func computeApproximateRequestSize(r *http.Request, out chan int, s int) { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s -} - -type responseWriterDelegator struct { - http.ResponseWriter - - handler, method string - status int - written int64 - wroteHeader bool -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type fancyResponseWriterDelegator struct { - *responseWriterDelegator -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return f.ResponseWriter.(http.Hijacker).Hijack() -} - -func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { - if !f.wroteHeader { - f.WriteHeader(http.StatusOK) - } - n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) - f.written += n - return n, err -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go new file mode 100644 index 000000000..351c26e1a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sort" + + dto "github.com/prometheus/client_model/go" +) + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// NormalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 000000000..2744443ac --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { + return fmt.Errorf( + "%s: %q has %d variable labels named %q but %d values %q were provided", + errInconsistentCardinality, fqName, + len(labels), labels, + len(labelValues), labelValues, + ) +} + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(labels), labels, + ) + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(vals), vals, + ) + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index d4063d98f..35bd8bde3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -15,11 +15,16 @@ package prometheus import ( "strings" + "time" + + //lint:ignore SA1019 Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) -const separatorByte byte = 255 +var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. // A Metric models a single sample value with its meta data being exported to // Prometheus. Implementations of Metric in this package are Gauge, Counter, @@ -43,9 +48,8 @@ type Metric interface { // While populating dto.Metric, it is the responsibility of the // implementation to ensure validity of the Metric protobuf (like valid // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. (Implementers may find - // LabelPairSorter useful for that.) Callers of Write should still make - // sure of sorting if they depend on it. + // recommended to sort labels lexicographically. Callers of Write should + // still make sure of sorting if they depend on it. Write(*dto.Metric) error // TODO(beorn7): The original rationale of passing in a pre-allocated // dto.Metric protobuf to save allocations has disappeared. The @@ -57,8 +61,9 @@ type Metric interface { // implementation XXX has its own XXXOpts type, but in most cases, it is just be // an alias of this type (which might change when the requirement arises.) // -// It is mandatory to set Name and Help to a non-empty string. All other fields -// are optional and can safely be left at their zero value. +// It is mandatory to set Name to a non-empty string. All other fields are +// optional and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. type Opts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Metric (created by joining these components with @@ -69,7 +74,7 @@ type Opts struct { Subsystem string Name string - // Help provides information about this metric. Mandatory! + // Help provides information about this metric. // // Metrics with the same fully-qualified name must have the same Help // string. @@ -79,20 +84,12 @@ type Opts struct { // with the same fully-qualified name must have the same label names in // their ConstLabels. // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a metric - // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels - // serve only special purposes. One is for the special case where the - // value of a label does not change during the lifetime of a process, - // e.g. if the revision of the running binary is put into a - // label. Another, more advanced purpose is if more than one Collector - // needs to collect Metrics with the same fully-qualified name. In that - // case, those Metrics must differ in the values of their - // ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels ConstLabels Labels } @@ -118,37 +115,22 @@ func BuildFQName(namespace, subsystem, name string) string { return name } -// LabelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. This is useful for implementing the Write method of -// custom metrics. -type LabelPairSorter []*dto.LabelPair +// labelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type labelPairSorter []*dto.LabelPair -func (s LabelPairSorter) Len() int { +func (s labelPairSorter) Len() int { return len(s) } -func (s LabelPairSorter) Swap(i, j int) { +func (s labelPairSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s LabelPairSorter) Less(i, j int) bool { +func (s labelPairSorter) Less(i, j int) bool { return s[i].GetName() < s[j].GetName() } -type hashSorter []uint64 - -func (s hashSorter) Len() int { - return len(s) -} - -func (s hashSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s hashSorter) Less(i, j int) bool { - return s[i] < s[j] -} - type invalidMetric struct { desc *Desc err error @@ -164,3 +146,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric { func (m *invalidMetric) Desc() *Desc { return m.desc } func (m *invalidMetric) Write(*dto.Metric) error { return m.err } + +type timestampedMetric struct { + Metric + t time.Time +} + +func (m timestampedMetric) Write(pb *dto.Metric) error { + e := m.Metric.Write(pb) + pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) + return e +} + +// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a +// way that it has an explicit timestamp set to the provided Time. This is only +// useful in rare cases as the timestamp of a Prometheus metric should usually +// be set by the Prometheus server during scraping. Exceptions include mirroring +// metrics with given timestamps from other metric +// sources. +// +// NewMetricWithTimestamp works best with MustNewConstMetric, +// MustNewConstHistogram, and MustNewConstSummary, see example. +// +// Currently, the exposition formats used by Prometheus are limited to +// millisecond resolution. Thus, the provided time will be rounded down to the +// next full millisecond value. +func NewMetricWithTimestamp(t time.Time, m Metric) Metric { + return timestampedMetric{Metric: m, t: t} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 000000000..44128016f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} + +// ExemplarObserver is implemented by Observers that offer the option of +// observing a value together with an exemplar. Its ObserveWithExemplar method +// works like the Observe method of an Observer but also replaces the currently +// saved exemplar (if any) with a new one, created from the provided value, the +// current time as timestamp, and the provided Labels. Empty Labels will lead to +// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is +// left in place. ObserveWithExemplar panics if any of the provided labels are +// invalid or if the provided labels contain more than 64 runes in total. +type ExemplarObserver interface { + ObserveWithExemplar(value float64, exemplar Labels) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index e31e62e78..9b8097942 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -13,89 +13,126 @@ package prometheus -import "github.com/prometheus/procfs" +import ( + "errors" + "os" +) type processCollector struct { - pid int collectFn func(chan<- Metric) pidFn func() (int, error) - cpuTotal Counter - openFDs, maxFDs Gauge - vsize, rss Gauge - startTime Gauge + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc } -// NewProcessCollector returns a collector which exports the current state of -// process metrics including cpu, memory and file descriptor usage as well as -// the process start time for the given process id under the given namespace. -func NewProcessCollector(pid int, namespace string) Collector { - return NewProcessCollectorPIDFn( - func() (int, error) { return pid, nil }, - namespace, - ) +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool } -// NewProcessCollectorPIDFn returns a collector which exports the current state -// of process metrics including cpu, memory and file descriptor usage as well -// as the process start time under the given namespace. The given pidFn is -// called on each collect and is used to determine the process to export -// metrics for. -func NewProcessCollectorPIDFn( - pidFn func() (int, error), - namespace string, -) Collector { - c := processCollector{ - pidFn: pidFn, - collectFn: func(chan<- Metric) {}, +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) Collector { + ns := "" + if len(opts.Namespace) > 0 { + ns = opts.Namespace + "_" + } - cpuTotal: NewCounter(CounterOpts{ - Namespace: namespace, - Name: "process_cpu_seconds_total", - Help: "Total user and system CPU time spent in seconds.", - }), - openFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_open_fds", - Help: "Number of open file descriptors.", - }), - maxFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_max_fds", - Help: "Maximum number of open file descriptors.", - }), - vsize: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_virtual_memory_bytes", - Help: "Virtual memory size in bytes.", - }), - rss: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_resident_memory_bytes", - Help: "Resident memory size in bytes.", - }), - startTime: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_start_time_seconds", - Help: "Start time of the process since unix epoch in seconds.", - }), + c := &processCollector{ + reportErrors: opts.ReportErrors, + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + maxVsize: NewDesc( + ns+"process_virtual_memory_max_bytes", + "Maximum amount of virtual memory available in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + if opts.PidFn == nil { + pid := os.Getpid() + c.pidFn = func() (int, error) { return pid, nil } + } else { + c.pidFn = opts.PidFn } // Set up process metric collection if supported by the runtime. - if _, err := procfs.NewStat(); err == nil { + if canCollectProcess() { c.collectFn = c.processCollect + } else { + c.collectFn = func(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) + } } - return &c + return c } // Describe returns all descriptions of the collector. func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal.Desc() - ch <- c.openFDs.Desc() - ch <- c.maxFDs.Desc() - ch <- c.vsize.Desc() - ch <- c.rss.Desc() - ch <- c.startTime.Desc() + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime } // Collect returns the current state of all metrics of the collector. @@ -103,40 +140,12 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } -// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the -// client allows users to configure the error behavior. -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { +func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { + if !c.reportErrors { return } - - p, err := procfs.NewProc(pid) - if err != nil { - return - } - - if stat, err := p.NewStat(); err == nil { - c.cpuTotal.Set(stat.CPUTime()) - ch <- c.cpuTotal - c.vsize.Set(float64(stat.VirtualMemory())) - ch <- c.vsize - c.rss.Set(float64(stat.ResidentMemory())) - ch <- c.rss - - if startTime, err := stat.StartTime(); err == nil { - c.startTime.Set(startTime) - ch <- c.startTime - } - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - c.openFDs.Set(float64(fds)) - ch <- c.openFDs - } - - if limits, err := p.NewLimits(); err == nil { - c.maxFDs.Set(float64(limits.OpenFiles)) - ch <- c.maxFDs + if desc == nil { + desc = NewInvalidDesc(err) } + ch <- NewInvalidMetric(desc, err) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go new file mode 100644 index 000000000..3117461cd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package prometheus + +import ( + "github.com/prometheus/procfs" +) + +func canCollectProcess() bool { + _, err := procfs.NewDefaultFS() + return err == nil +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.Stat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.Limits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go new file mode 100644 index 000000000..f973398df --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func canCollectProcess() bool { + return true +} + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") +) + +type processMemoryCounters struct { + // System interface description + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex + + // Refer to the Golang internal implementation + // https://golang.org/src/internal/syscall/windows/psapi_windows.go + _ uint32 + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivateUsage uintptr +} + +func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { + mem := processMemoryCounters{} + r1, _, err := procGetProcessMemoryInfo.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&mem)), + uintptr(unsafe.Sizeof(mem)), + ) + if r1 != 1 { + return mem, err + } else { + return mem, nil + } +} + +func getProcessHandleCount(handle windows.Handle) (uint32, error) { + var count uint32 + r1, _, err := procGetProcessHandleCount.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&count)), + ) + if r1 != 1 { + return 0, err + } else { + return count, nil + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + h, err := windows.GetCurrentProcess() + if err != nil { + c.reportError(ch, nil, err) + return + } + + var startTime, exitTime, kernelTime, userTime windows.Filetime + err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) + + mem, err := getProcessMemoryInfo(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) + + handles, err := getProcessHandleCount(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. +} + +func fileTimeToSeconds(ft windows.Filetime) float64 { + return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 000000000..5070e72e2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,370 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + if r.observeWriteHeader != nil && !r.wroteHeader { + // Only call observeWriteHeader for the 1st time. It's a bug if + // WriteHeader is called more than once, but we want to protect + // against it here. Note that we still delegate the WriteHeader + // to the original ResponseWriter to not mask the bug from it. + r.observeWriteHeader(code) + } + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } +type pusherDelegator struct{ *responseWriterDelegator } + +func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d flusherDelegator) Flush() { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + d.ResponseWriter.(http.Flusher).Flush() +} +func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index b6dd5a266..5e1c4546c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -11,31 +11,34 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. +// Package promhttp provides tooling around HTTP servers and clients. // -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -// Package promhttp contains functions to create http.Handler instances to -// expose Prometheus metrics via HTTP. In later versions of this package, it -// will also contain tooling to instrument instances of http.Handler and -// http.RoundTripper. +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. // -// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor, -// you can create a handler for a custom registry or anything that implements -// the Gatherer interface. It also allows to create handlers that act -// differently on errors or allow to log errors. +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. package promhttp import ( - "bytes" "compress/gzip" "fmt" "io" "net/http" "strings" "sync" + "time" "github.com/prometheus/common/expfmt" @@ -44,99 +47,221 @@ import ( const ( contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" contentEncodingHeader = "Content-Encoding" acceptEncodingHeader = "Accept-Encoding" ) -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, } -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) -} - -// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The -// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP -// error, no error logging, and compression if requested by the client. +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. // -// If you want to create a Handler for the DefaultGatherer with different -// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and -// your desired HandlerOpts. +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. func Handler() http.Handler { - return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) } -// HandlerFor returns an http.Handler for the provided Gatherer. The behavior -// of the Handler is defined by the provided HandlerOpts. +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + if opts.Registry != nil { + // Initialize all possibilites that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } + + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(rsp, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } mfs, err := reg.Gather() if err != nil { if opts.ErrorLog != nil { opts.ErrorLog.Println("error gathering metrics:", err) } + errCnt.WithLabelValues("gathering").Inc() switch opts.ErrorHandling { case PanicOnError: panic(err) case ContinueOnError: if len(mfs) == 0 { - http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) + // Still report the error if no metrics have been gathered. + httpError(rsp, err) return } case HTTPErrorOnError: - http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) + httpError(rsp, err) return } } - contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf, opts.DisableCompression) - enc := expfmt.NewEncoder(writer, contentType) - var lastErr error - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - lastErr = err - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding metric family:", err) - } - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - // Handled later. - case HTTPErrorOnError: - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) - return - } + var contentType expfmt.Format + if opts.EnableOpenMetrics { + contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) + } else { + contentType = expfmt.Negotiate(req.Header) + } + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + + // handleError handles the error according to opts.ErrorHandling + // and returns true if we have to abort after the handling. + handleError := func(err error) bool { + if err == nil { + return false + } + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding and sending metric family:", err) + } + errCnt.WithLabelValues("encoding").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case HTTPErrorOnError: + // We cannot really send an HTTP error at this + // point because we most likely have written + // something to rsp already. But at least we can + // stop sending. + return true } + // Do nothing in all other cases, including ContinueOnError. + return false } - if closer, ok := writer.(io.Closer); ok { - closer.Close() + + for _, mf := range mfs { + if handleError(enc.Encode(mf)) { + return + } } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) - return + if closer, ok := enc.(expfmt.Closer); ok { + // This in particular takes care of the final "# EOF\n" line for OpenMetrics. + if handleError(closer.Close()) { + return + } } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) } - w.Write(buf.Bytes()) - // TODO(beorn7): Consider streaming serving of metrics. + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) } // HandlerErrorHandling defines how a Handler serving metrics will handle @@ -147,14 +272,22 @@ type HandlerErrorHandling int // errors are encountered. const ( // Serve an HTTP status code 500 upon the first error - // encountered. Report the error message in the body. + // encountered. Report the error message in the body. Note that HTTP + // errors cannot be served anymore once the beginning of a regular + // payload has been sent. Thus, in the (unlikely) case that encoding the + // payload into the negotiated wire format fails, serving the response + // will simply be aborted. Set an ErrorLog in HandlerOpts to detect + // those errors. HTTPErrorOnError HandlerErrorHandling = iota // Ignore errors and try to serve as many metrics as possible. However, // if no metrics can be served, serve an HTTP status code 500 and the // last error message in the body. Only use this in deliberate "best - // effort" metrics collection scenarios. It is recommended to at least - // log errors (by providing an ErrorLog in HandlerOpts) to not mask - // errors completely. + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. ContinueOnError // Panic upon the first error encountered (useful for "crash only" apps). PanicOnError @@ -177,25 +310,70 @@ type HandlerOpts struct { // logged regardless of the configured ErrorHandling provided ErrorLog // is not nil. ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer // If DisableCompression is true, the handler will never compress the // response, even if requested by the client. DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration + // If true, the experimental OpenMetrics encoding is added to the + // possible options during content negotiation. Note that Prometheus + // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is + // the only way to transmit exemplars. However, the move to OpenMetrics + // is not completely transparent. Most notably, the values of "quantile" + // labels of Summaries and "le" labels of Histograms are formatted with + // a trailing ".0" if they would otherwise look like integer numbers + // (which changes the identity of the resulting series on the Prometheus + // server). + EnableOpenMetrics bool } -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { - if compressionDisabled { - return writer, "" - } - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") for _, part := range parts { - part := strings.TrimSpace(part) + part = strings.TrimSpace(part) if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" + return true } } - return writer, "" + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerError. Error contents is +// supposed to be uncompressed plain text. Same as with a plain http.Error, this +// must not be called if the header or any payload has already been sent. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 000000000..83c49b66a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,219 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 000000000..9db243805 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,447 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have zero, one, or two non-const non-curried labels. For those, the only +// allowed label names are "code" and "method". The function panics +// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or +// HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the response size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 32a3986b0..ba94405af 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -15,15 +15,24 @@ package prometheus import ( "bytes" - "errors" "fmt" + "io/ioutil" "os" + "path/filepath" + "runtime" "sort" + "strings" "sync" + "unicode/utf8" + "github.com/cespare/xxhash/v2" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" ) const ( @@ -35,13 +44,14 @@ const ( // DefaultRegisterer and DefaultGatherer are the implementations of the // Registerer and Gatherer interface a number of convenience functions in this // package act on. Initially, both variables point to the same Registry, which -// has a process collector (see NewProcessCollector) and a Go collector (see -// NewGoCollector) already registered. This approach to keep default instances -// as global state mirrors the approach of other packages in the Go standard -// library. Note that there are caveats. Change the variables with caution and -// only if you understand the consequences. Users who want to avoid global state -// altogether should not use the convenience function and act on custom -// instances instead. +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. var ( defaultRegistry = NewRegistry() DefaultRegisterer Registerer = defaultRegistry @@ -49,7 +59,7 @@ var ( ) func init() { - MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewProcessCollector(ProcessCollectorOpts{})) MustRegister(NewGoCollector()) } @@ -65,7 +75,8 @@ func NewRegistry() *Registry { // NewPedanticRegistry returns a registry that checks during collection if each // collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. +// actually been registered with the registry. Unchecked Collectors (those whose +// Describe method does not yield any descriptors) are excluded from the check. // // Usually, a Registry will be happy as long as the union of all collected // Metrics is consistent and valid even if some metrics are not consistent with @@ -80,7 +91,7 @@ func NewPedanticRegistry() *Registry { // Registerer is the interface for the part of a registry in charge of // registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather then the Registry type +// Registerer as type for registration purposes (rather than the Registry type // directly). In that way, they are free to use custom Registerer implementation // (e.g. for testing purposes). type Registerer interface { @@ -95,8 +106,13 @@ type Registerer interface { // returned error is an instance of AlreadyRegisteredError, which // contains the previously registered Collector. // - // It is in general not safe to register the same Collector multiple - // times concurrently. + // A Collector whose Describe method does not yield any Desc is treated + // as unchecked. Registration will always succeed. No check for + // re-registering (see previous paragraph) is performed. Thus, the + // caller is responsible for not double-registering the same unchecked + // Collector, and for providing a Collector that will not cause + // inconsistent metrics on collection. (This would lead to scrape + // errors.) Register(Collector) error // MustRegister works like Register but registers any number of // Collectors and panics upon the first registration that causes an @@ -105,7 +121,9 @@ type Registerer interface { // Unregister unregisters the Collector that equals the Collector passed // in as an argument. (Two Collectors are considered equal if their // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. + // returns whether a Collector was unregistered. Note that an unchecked + // Collector cannot be unregistered (as its Describe method does not + // yield any descriptor). // // Note that even after unregistering, it will not be possible to // register a new Collector that is inconsistent with the unregistered @@ -123,15 +141,23 @@ type Registerer interface { type Gatherer interface { // Gather calls the Collect method of the registered Collectors and then // gathers the collected metrics into a lexicographically sorted slice - // of MetricFamily protobufs. Even if an error occurs, Gather attempts - // to gather as many metrics as possible. Hence, if a non-nil error is - // returned, the returned MetricFamily slice could be nil (in case of a - // fatal error that prevented any meaningful metric collection) or - // contain a number of MetricFamily protobufs, some of which might be - // incomplete, and some might be missing altogether. The returned error - // (which might be a MultiError) explains the details. In scenarios - // where complete collection is critical, the returned MetricFamily - // protobufs should be disregarded if the returned error is non-nil. + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. Gather() ([]*dto.MetricFamily, error) } @@ -152,38 +178,6 @@ func MustRegister(cs ...Collector) { DefaultRegisterer.MustRegister(cs...) } -// RegisterOrGet registers the provided Collector with the DefaultRegisterer and -// returns the Collector, unless an equal Collector was registered before, in -// which case that Collector is returned. -// -// Deprecated: RegisterOrGet is merely a convenience function for the -// implementation as described in the documentation for -// AlreadyRegisteredError. As the use case is relatively rare, this function -// will be removed in a future version of this package to clean up the -// namespace. -func RegisterOrGet(c Collector) (Collector, error) { - if err := Register(c); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - return are.ExistingCollector, nil - } - return nil, err - } - return c, nil -} - -// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning -// an error. -// -// Deprecated: This is deprecated for the same reason RegisterOrGet is. See -// there for details. -func MustRegisterOrGet(c Collector) Collector { - c, err := RegisterOrGet(c) - if err != nil { - panic(err) - } - return c -} - // Unregister removes the registration of the provided Collector from the // DefaultRegisterer. // @@ -201,25 +195,6 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { return gf() } -// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that -// gathers from the previous DefaultGatherers but then merges the MetricFamily -// protobufs returned from the provided hook function with the MetricFamily -// protobufs returned from the original DefaultGatherer. -// -// Deprecated: This function manipulates the DefaultGatherer variable. Consider -// the implications, i.e. don't do this concurrently with any uses of the -// DefaultGatherer. In the rare cases where you need to inject MetricFamily -// protobufs directly, it is recommended to use a custom Registry and combine it -// with a custom Gatherer using the Gatherers type (see -// there). SetMetricFamilyInjectionHook only exists for compatibility reasons -// with previous versions of this package. -func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { - DefaultGatherer = Gatherers{ - DefaultGatherer, - GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }), - } -} - // AlreadyRegisteredError is returned by the Register method if the Collector to // be registered has already been registered before, or a different Collector // that collects the same metrics has been registered before. Registration fails @@ -252,6 +227,13 @@ func (errs MultiError) Error() string { return buf.String() } +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + // MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only // contained error as error if len(errs is 1). In all other cases, it returns // the MultiError directly. This is helpful for returning a MultiError in a way @@ -276,6 +258,7 @@ type Registry struct { collectorsByID map[uint64]Collector // ID is a hash of the descIDs. descIDs map[uint64]struct{} dimHashesByName map[string]uint64 + uncheckedCollectors []Collector pedanticChecksEnabled bool } @@ -285,7 +268,7 @@ func (r *Registry) Register(c Collector) error { descChan = make(chan *Desc, capDescChan) newDescIDs = map[uint64]struct{}{} newDimHashesByName = map[string]uint64{} - collectorID uint64 // Just a sum of all desc IDs. + collectorID uint64 // All desc IDs XOR'd together. duplicateDescErr error ) go func() { @@ -293,8 +276,13 @@ func (r *Registry) Register(c Collector) error { close(descChan) }() r.mtx.Lock() - defer r.mtx.Unlock() - // Coduct various tests... + defer func() { + // Drain channel in case of premature return to not leak a goroutine. + for range descChan { + } + r.mtx.Unlock() + }() + // Conduct various tests... for desc := range descChan { // Is the descriptor valid at all? @@ -307,12 +295,12 @@ func (r *Registry) Register(c Collector) error { if _, exists := r.descIDs[desc.id]; exists { duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) } - // If it is not a duplicate desc in this collector, add it to + // If it is not a duplicate desc in this collector, XOR it to // the collectorID. (We allow duplicate descs within the same // collector, but their existence must be a no-op.) if _, exists := newDescIDs[desc.id]; !exists { newDescIDs[desc.id] = struct{}{} - collectorID += desc.id + collectorID ^= desc.id } // Are all the label names and the help string consistent with @@ -333,14 +321,23 @@ func (r *Registry) Register(c Collector) error { } } } - // Did anything happen at all? + // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { - return errors.New("collector has no descriptors") + r.uncheckedCollectors = append(r.uncheckedCollectors, c) + return nil } if existing, exists := r.collectorsByID[collectorID]; exists { - return AlreadyRegisteredError{ - ExistingCollector: existing, - NewCollector: c, + switch e := existing.(type) { + case *wrappingCollector: + return AlreadyRegisteredError{ + ExistingCollector: e.unwrapRecursively(), + NewCollector: c, + } + default: + return AlreadyRegisteredError{ + ExistingCollector: e, + NewCollector: c, + } } } // If the collectorID is new, but at least one of the descs existed @@ -365,7 +362,7 @@ func (r *Registry) Unregister(c Collector) bool { var ( descChan = make(chan *Desc, capDescChan) descIDs = map[uint64]struct{}{} - collectorID uint64 // Just a sum of the desc IDs. + collectorID uint64 // All desc IDs XOR'd together. ) go func() { c.Describe(descChan) @@ -373,7 +370,7 @@ func (r *Registry) Unregister(c Collector) bool { }() for desc := range descChan { if _, exists := descIDs[desc.id]; !exists { - collectorID += desc.id + collectorID ^= desc.id descIDs[desc.id] = struct{}{} } } @@ -409,31 +406,25 @@ func (r *Registry) MustRegister(cs ...Collector) { // Gather implements Gatherer. func (r *Registry) Gather() ([]*dto.MetricFamily, error) { var ( - metricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + checkedMetricChan = make(chan Metric, capMetricChan) + uncheckedMetricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks ) r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - - // Scatter. - // (Collectors could be complex and slow, so we call them all at once.) - wg.Add(len(r.collectorsByID)) - go func() { - wg.Wait() - close(metricChan) - }() + checkedCollectors := make(chan Collector, len(r.collectorsByID)) + uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) for _, collector := range r.collectorsByID { - go func(collector Collector) { - defer wg.Done() - collector.Collect(metricChan) - }(collector) + checkedCollectors <- collector + } + for _, collector := range r.uncheckedCollectors { + uncheckedCollectors <- collector } - // In case pedantic checks are enabled, we have to copy the map before // giving up the RLock. if r.pedanticChecksEnabled { @@ -442,133 +433,264 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { registeredDescIDs[id] = struct{}{} } } - r.mtx.RUnlock() - // Drain metricChan in case of premature return. + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-checkedCollectors: + collector.Collect(checkedMetricChan) + case collector := <-uncheckedCollectors: + collector.Collect(uncheckedMetricChan) + default: + return + } + wg.Done() + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close checkedMetricChan and uncheckedMetricChan once all collectors + // are collected. + go func() { + wg.Wait() + close(checkedMetricChan) + close(uncheckedMetricChan) + }() + + // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. defer func() { - for _ = range metricChan { + if checkedMetricChan != nil { + for range checkedMetricChan { + } + } + if uncheckedMetricChan != nil { + for range uncheckedMetricChan { + } } }() - // Gather. - for metric := range metricChan { - // This could be done concurrently, too, but it required locking - // of metricFamiliesByName (and of metricHashes if checks are - // enabled). Most likely not worth it. - desc := metric.Desc() - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - errs = append(errs, fmt.Errorf( - "error collecting metric %v: %s", desc, err, + // Copy the channel references so we can nil them out later to remove + // them from the select statements below. + cmc := checkedMetricChan + umc := uncheckedMetricChan + + for { + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, )) - continue - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { - if metricFamily.GetHelp() != desc.help { - errs = append(errs, fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - )) - continue + case metric, ok := <-umc: + if !ok { + umc = nil + break } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + default: + if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { + // All collectors are already being worked on or + // we have already as many goroutines started as + // there are collectors. Do the same as above, + // just without the default. + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, )) - continue - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, )) - continue } - default: - panic("encountered MetricFamily with invalid type") + break } - } else { - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - errs = append(errs, fmt.Errorf( - "empty metric collected: %s", dtoMetric, - )) - continue + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + // Once both checkedMetricChan and uncheckdMetricChan are closed + // and drained, the contraption above will nil out cmc and umc, + // and then we can leave the collect loop here. + if cmc == nil && umc == nil { + break + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the +// Prometheus text format, and writes it to a temporary file. Upon success, the +// temporary file is renamed to the provided filename. +// +// This is intended for use with the textfile collector of the node exporter. +// Note that the node exporter expects the filename to be suffixed with ".prom". +func WriteToTextfile(filename string, g Gatherer) error { + tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + if err != nil { + return err + } + defer os.Remove(tmp.Name()) + + mfs, err := g.Gather() + if err != nil { + return err + } + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + + if err := os.Chmod(tmp.Name(), 0644); err != nil { + return err + } + return os.Rename(tmp.Name(), filename) +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + // Wrapped metrics collected by an unchecked Collector can have an + // invalid Desc. + if desc.err != nil { + return desc.err + } + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { // Existing name. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) } - metricFamiliesByName[desc.fqName] = metricFamily - } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { - errs = append(errs, err) - continue - } - if r.pedanticChecksEnabled { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - errs = append(errs, fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - )) - continue + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - errs = append(errs, err) - continue + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { // New name. + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { + return err + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil } // Gatherers is a slice of Gatherer instances that implements the Gatherer // interface itself. Its Gather method calls Gather on all Gatherers in the // slice in order and returns the merged results. Errors returned from the -// Gather calles are all returned in a flattened MultiError. Duplicate and +// Gather calls are all returned in a flattened MultiError. Duplicate and // inconsistent Metrics are skipped (first occurrence in slice order wins) and // reported in the returned error. // @@ -588,7 +710,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { var ( metricFamiliesByName = map[string]*dto.MetricFamily{} metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} errs MultiError // The collected errors to return in the end. ) @@ -625,10 +746,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { existingMF.Name = mf.Name existingMF.Help = mf.Help existingMF.Type = mf.Type + if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { + errs = append(errs, err) + continue + } metricFamiliesByName[mf.GetName()] = existingMF } for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { errs = append(errs, err) continue } @@ -636,88 +761,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { } } } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } -// normalizeMetricFamilies returns a MetricFamily slice whith empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) +// checkSuffixCollisions checks for collisions with the “magic” suffixes the +// Prometheus text format and the internal metric representation of the +// Prometheus server add while flattening Summaries and Histograms. +func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { + var ( + newName = mf.GetName() + newType = mf.GetType() + newNameWithoutSuffix = "" + ) + switch { + case strings.HasSuffix(newName, "_count"): + newNameWithoutSuffix = newName[:len(newName)-6] + case strings.HasSuffix(newName, "_sum"): + newNameWithoutSuffix = newName[:len(newName)-4] + case strings.HasSuffix(newName, "_bucket"): + newNameWithoutSuffix = newName[:len(newName)-7] + } + if newNameWithoutSuffix != "" { + if existingMF, ok := mfs[newNameWithoutSuffix]; ok { + switch existingMF.GetType() { + case dto.MetricType_SUMMARY: + if !strings.HasSuffix(newName, "_bucket") { + return fmt.Errorf( + "collected metric named %q collides with previously collected summary named %q", + newName, newNameWithoutSuffix, + ) + } + case dto.MetricType_HISTOGRAM: + return fmt.Errorf( + "collected metric named %q collides with previously collected histogram named %q", + newName, newNameWithoutSuffix, + ) + } + } } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) + if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_count"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_count", + ) + } + if _, ok := mfs[newName+"_sum"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_sum", + ) } } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) + if newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_bucket"]; ok { + return fmt.Errorf( + "collected histogram named %q collides with previously collected metric named %q", + newName, newName+"_bucket", + ) + } } - return result + return nil } // checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashed the Metric labels and the MetricFamily -// name. If the resulting hash is alread in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. The provided dimHashes maps -// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes -// doesn't yet contain a hash for the provided MetricFamily, it is -// added. Otherwise, an error is returned if the existing dimHashes in not equal -// the calculated dimHash. +// provided MetricFamily. It also hashes the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. func checkMetricConsistency( metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, metricHashes map[uint64]struct{}, - dimHashes map[string]uint64, ) error { + name := metricFamily.GetName() + // Type consistency with metric family. if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || @@ -725,42 +842,67 @@ func checkMetricConsistency( metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { return fmt.Errorf( - "collected metric %s %s is not a %s", - metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + "collected metric %q { %s} is not a %s", + name, dtoMetric, metricFamily.GetType(), ) } - // Is the metric unique (i.e. no other metric with the same name and the same label values)? - h := hashNew() - h = hashAdd(h, metricFamily.GetName()) - h = hashAddByte(h, separatorByte) - dh := hashNew() + previousLabelName := "" + for _, labelPair := range dtoMetric.GetLabel() { + labelName := labelPair.GetName() + if labelName == previousLabelName { + return fmt.Errorf( + "collected metric %q { %s} has two or more labels with the same name: %s", + name, dtoMetric, labelName, + ) + } + if !checkLabelName(labelName) { + return fmt.Errorf( + "collected metric %q { %s} has a label with an invalid name: %s", + name, dtoMetric, labelName, + ) + } + if dtoMetric.Summary != nil && labelName == quantileLabel { + return fmt.Errorf( + "collected metric %q { %s} must not have an explicit %q label", + name, dtoMetric, quantileLabel, + ) + } + if !utf8.ValidString(labelPair.GetValue()) { + return fmt.Errorf( + "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", + name, dtoMetric, labelName, labelPair.GetValue()) + } + previousLabelName = labelName + } + + // Is the metric unique (i.e. no other metric with the same name and the same labels)? + h := xxhash.New() + h.WriteString(name) + h.Write(separatorByteSlice) // Make sure label pairs are sorted. We depend on it for the consistency // check. - sort.Sort(LabelPairSorter(dtoMetric.Label)) + if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + // We cannot sort dtoMetric.Label in place as it is immutable by contract. + copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) + copy(copiedLabels, dtoMetric.Label) + sort.Sort(labelPairSorter(copiedLabels)) + dtoMetric.Label = copiedLabels + } for _, lp := range dtoMetric.Label { - h = hashAdd(h, lp.GetValue()) - h = hashAddByte(h, separatorByte) - dh = hashAdd(dh, lp.GetName()) - dh = hashAddByte(dh, separatorByte) + h.WriteString(lp.GetName()) + h.Write(separatorByteSlice) + h.WriteString(lp.GetValue()) + h.Write(separatorByteSlice) } - if _, exists := metricHashes[h]; exists { + hSum := h.Sum64() + if _, exists := metricHashes[hSum]; exists { return fmt.Errorf( - "collected metric %s %s was collected before with the same name and label values", - metricFamily.GetName(), dtoMetric, + "collected metric %q { %s} was collected before with the same name and label values", + name, dtoMetric, ) } - if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { - if dimHash != dh { - return fmt.Errorf( - "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", - metricFamily.GetName(), dtoMetric, - ) - } - } else { - dimHashes[metricFamily.GetName()] = dh - } - metricHashes[h] = struct{}{} + metricHashes[hSum] = struct{}{} return nil } @@ -778,8 +920,8 @@ func checkDescConsistency( } // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) - lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) + copy(lpsFromDesc, desc.constLabelPairs) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ Name: proto.String(l), @@ -791,7 +933,7 @@ func checkDescConsistency( metricFamily.GetName(), dtoMetric, desc, ) } - sort.Sort(LabelPairSorter(lpsFromDesc)) + sort.Sort(labelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index bce05bf9a..f3c1440d1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -16,11 +16,14 @@ package prometheus import ( "fmt" "math" + "runtime" "sort" "sync" + "sync/atomic" "time" "github.com/beorn7/perks/quantile" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" @@ -36,7 +39,10 @@ const quantileLabel = "quantile" // // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. +// as rank estimations. However, the default behavior will change in the +// upcoming v1.0.0 of the library. There will be no rank estimations at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. // // Note that the rank estimations cannot be aggregated in a meaningful way with // the Prometheus query language (i.e. you cannot average or add them). If you @@ -53,13 +59,8 @@ type Summary interface { Observe(float64) } -// DefObjectives are the default Summary quantile values. -var ( - DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} - - errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, - ) +var errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, ) // Default values for SummaryOpts. @@ -75,8 +76,10 @@ const ( ) // SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v1.0.0 of the library. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with @@ -87,35 +90,34 @@ type SummaryOpts struct { Subsystem string Name string - // Help provides information about this Summary. Mandatory! + // Help provides information about this Summary. // // Metrics with the same fully-qualified name must have the same Help // string. Help string - // ConstLabels are used to attach fixed labels to this - // Summary. Summaries with the same fully-qualified name must have the - // same label names in their ConstLabels. + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // SummaryVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Summaries with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. + // Due to the way a Summary is represented in the Prometheus text format + // and how it is handled by the Prometheus server internally, “quantile” + // is an illegal label name. Construction of a Summary or SummaryVec + // will panic if this label name is used in ConstLabels. // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels ConstLabels Labels // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported - // for q will be the φ-quantile value for some φ between q-e and q+e. - // The default value is DefObjectives. + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is an empty map, resulting in a summary without + // quantiles. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant @@ -139,7 +141,7 @@ type SummaryOpts struct { BufCap uint32 } -// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// Problem with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging // summaries in the first place. To avoid using Merge, we are currently adding @@ -169,7 +171,7 @@ func NewSummary(opts SummaryOpts) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) } for _, n := range desc.variableLabels { @@ -183,8 +185,8 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { } } - if len(opts.Objectives) == 0 { - opts.Objectives = DefObjectives + if opts.Objectives == nil { + opts.Objectives = map[float64]float64{} } if opts.MaxAge < 0 { @@ -202,6 +204,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{{}, {}}, + } + s.init(s) // Init self-collection. + return s + } + s := &summary{ desc: desc, @@ -370,6 +383,116 @@ func (s *summary) swapBufs(now time.Time) { } } +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // summaryCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the summary) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 1) + hotCounts := s.counts[n>>63] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := s.counts[n>>63] + coldCounts := s.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + sum := &dto.Summary{ + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + type quantSort []*dto.Quantile func (s quantSort) Len() int { @@ -390,13 +513,21 @@ func (s quantSort) Less(i, j int) bool { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewSummaryVec. type SummaryVec struct { - *MetricVec + *metricVec } // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. +// +// Due to the way a Summary is represented in the Prometheus text format and how +// it is handled by the Prometheus server internally, “quantile” is an illegal +// label name. NewSummaryVec will panic if this label name is used. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + for _, ln := range labelNames { + if ln == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, @@ -404,47 +535,116 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { opts.ConstLabels, ) return &SummaryVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + metricVec: newMetricVec(desc, func(lvs ...string) Metric { return newSummary(desc, opts, lvs...) }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Summary and not a -// Metric so that no type conversion is required. -func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Summary), err + return metric.(Observer), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Summary and not a Metric so that no -// type conversion is required. -func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { - return metric.(Summary), err + return metric.(Observer), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { - return m.MetricVec.WithLabelValues(lvs...).(Summary) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *SummaryVec) With(labels Labels) Summary { - return m.MetricVec.With(labels).(Summary) +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } type constSummary struct { @@ -497,7 +697,7 @@ func (s *constSummary) Write(out *dto.Metric) error { // map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. +// consistent with the variable labels in Desc or if Desc is invalid. func NewConstSummary( desc *Desc, count uint64, @@ -505,8 +705,11 @@ func NewConstSummary( quantiles map[float64]float64, labelValues ...string, ) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err } return &constSummary{ desc: desc, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go new file mode 100644 index 000000000..ec8061706 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go @@ -0,0 +1,386 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promlint provides a linter for Prometheus metrics. +package promlint + +import ( + "fmt" + "io" + "regexp" + "sort" + "strings" + + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" +) + +// A Linter is a Prometheus metrics linter. It identifies issues with metric +// names, types, and metadata, and reports them to the caller. +type Linter struct { + // The linter will read metrics in the Prometheus text format from r and + // then lint it, _and_ it will lint the metrics provided directly as + // MetricFamily proto messages in mfs. Note, however, that the current + // constructor functions New and NewWithMetricFamilies only ever set one + // of them. + r io.Reader + mfs []*dto.MetricFamily +} + +// A Problem is an issue detected by a Linter. +type Problem struct { + // The name of the metric indicated by this Problem. + Metric string + + // A description of the issue for this Problem. + Text string +} + +// newProblem is helper function to create a Problem. +func newProblem(mf *dto.MetricFamily, text string) Problem { + return Problem{ + Metric: mf.GetName(), + Text: text, + } +} + +// New creates a new Linter that reads an input stream of Prometheus metrics in +// the Prometheus text exposition format. +func New(r io.Reader) *Linter { + return &Linter{ + r: r, + } +} + +// NewWithMetricFamilies creates a new Linter that reads from a slice of +// MetricFamily protobuf messages. +func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter { + return &Linter{ + mfs: mfs, + } +} + +// Lint performs a linting pass, returning a slice of Problems indicating any +// issues found in the metrics stream. The slice is sorted by metric name +// and issue description. +func (l *Linter) Lint() ([]Problem, error) { + var problems []Problem + + if l.r != nil { + d := expfmt.NewDecoder(l.r, expfmt.FmtText) + + mf := &dto.MetricFamily{} + for { + if err := d.Decode(mf); err != nil { + if err == io.EOF { + break + } + + return nil, err + } + + problems = append(problems, lint(mf)...) + } + } + for _, mf := range l.mfs { + problems = append(problems, lint(mf)...) + } + + // Ensure deterministic output. + sort.SliceStable(problems, func(i, j int) bool { + if problems[i].Metric == problems[j].Metric { + return problems[i].Text < problems[j].Text + } + return problems[i].Metric < problems[j].Metric + }) + + return problems, nil +} + +// lint is the entry point for linting a single metric. +func lint(mf *dto.MetricFamily) []Problem { + fns := []func(mf *dto.MetricFamily) []Problem{ + lintHelp, + lintMetricUnits, + lintCounter, + lintHistogramSummaryReserved, + lintMetricTypeInName, + lintReservedChars, + lintCamelCase, + lintUnitAbbreviations, + } + + var problems []Problem + for _, fn := range fns { + problems = append(problems, fn(mf)...) + } + + // TODO(mdlayher): lint rules for specific metrics types. + return problems +} + +// lintHelp detects issues related to the help text for a metric. +func lintHelp(mf *dto.MetricFamily) []Problem { + var problems []Problem + + // Expect all metrics to have help text available. + if mf.Help == nil { + problems = append(problems, newProblem(mf, "no help text")) + } + + return problems +} + +// lintMetricUnits detects issues with metric unit names. +func lintMetricUnits(mf *dto.MetricFamily) []Problem { + var problems []Problem + + unit, base, ok := metricUnits(*mf.Name) + if !ok { + // No known units detected. + return nil + } + + // Unit is already a base unit. + if unit == base { + return nil + } + + problems = append(problems, newProblem(mf, fmt.Sprintf("use base unit %q instead of %q", base, unit))) + + return problems +} + +// lintCounter detects issues specific to counters, as well as patterns that should +// only be used with counters. +func lintCounter(mf *dto.MetricFamily) []Problem { + var problems []Problem + + isCounter := mf.GetType() == dto.MetricType_COUNTER + isUntyped := mf.GetType() == dto.MetricType_UNTYPED + hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total") + + switch { + case isCounter && !hasTotalSuffix: + problems = append(problems, newProblem(mf, `counter metrics should have "_total" suffix`)) + case !isUntyped && !isCounter && hasTotalSuffix: + problems = append(problems, newProblem(mf, `non-counter metrics should not have "_total" suffix`)) + } + + return problems +} + +// lintHistogramSummaryReserved detects when other types of metrics use names or labels +// reserved for use by histograms and/or summaries. +func lintHistogramSummaryReserved(mf *dto.MetricFamily) []Problem { + // These rules do not apply to untyped metrics. + t := mf.GetType() + if t == dto.MetricType_UNTYPED { + return nil + } + + var problems []Problem + + isHistogram := t == dto.MetricType_HISTOGRAM + isSummary := t == dto.MetricType_SUMMARY + + n := mf.GetName() + + if !isHistogram && strings.HasSuffix(n, "_bucket") { + problems = append(problems, newProblem(mf, `non-histogram metrics should not have "_bucket" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") { + problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_count" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") { + problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_sum" suffix`)) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + ln := l.GetName() + + if !isHistogram && ln == "le" { + problems = append(problems, newProblem(mf, `non-histogram metrics should not have "le" label`)) + } + if !isSummary && ln == "quantile" { + problems = append(problems, newProblem(mf, `non-summary metrics should not have "quantile" label`)) + } + } + } + + return problems +} + +// lintMetricTypeInName detects when metric types are included in the metric name. +func lintMetricTypeInName(mf *dto.MetricFamily) []Problem { + var problems []Problem + n := strings.ToLower(mf.GetName()) + + for i, t := range dto.MetricType_name { + if i == int32(dto.MetricType_UNTYPED) { + continue + } + + typename := strings.ToLower(t) + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, newProblem(mf, fmt.Sprintf(`metric name should not include type '%s'`, typename))) + } + } + return problems +} + +// lintReservedChars detects colons in metric names. +func lintReservedChars(mf *dto.MetricFamily) []Problem { + var problems []Problem + if strings.Contains(mf.GetName(), ":") { + problems = append(problems, newProblem(mf, "metric names should not contain ':'")) + } + return problems +} + +var camelCase = regexp.MustCompile(`[a-z][A-Z]`) + +// lintCamelCase detects metric names and label names written in camelCase. +func lintCamelCase(mf *dto.MetricFamily) []Problem { + var problems []Problem + if camelCase.FindString(mf.GetName()) != "" { + problems = append(problems, newProblem(mf, "metric names should be written in 'snake_case' not 'camelCase'")) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if camelCase.FindString(l.GetName()) != "" { + problems = append(problems, newProblem(mf, "label names should be written in 'snake_case' not 'camelCase'")) + } + } + } + return problems +} + +// lintUnitAbbreviations detects abbreviated units in the metric name. +func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem { + var problems []Problem + n := strings.ToLower(mf.GetName()) + for _, s := range unitAbbreviations { + if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) { + problems = append(problems, newProblem(mf, "metric names should not contain abbreviated units")) + } + } + return problems +} + +// metricUnits attempts to detect known unit types used as part of a metric name, +// e.g. "foo_bytes_total" or "bar_baz_milligrams". +func metricUnits(m string) (unit string, base string, ok bool) { + ss := strings.Split(m, "_") + + for unit, base := range units { + // Also check for "no prefix". + for _, p := range append(unitPrefixes, "") { + for _, s := range ss { + // Attempt to explicitly match a known unit with a known prefix, + // as some words may look like "units" when matching suffix. + // + // As an example, "thermometers" should not match "meters", but + // "kilometers" should. + if s == p+unit { + return p + unit, base, true + } + } + } + } + + return "", "", false +} + +// Units and their possible prefixes recognized by this library. More can be +// added over time as needed. +var ( + // map a unit to the appropriate base unit. + units = map[string]string{ + // Base units. + "amperes": "amperes", + "bytes": "bytes", + "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases. + "grams": "grams", + "joules": "joules", + "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements). + "meters": "meters", // Both American and international spelling permitted. + "metres": "metres", + "seconds": "seconds", + "volts": "volts", + + // Non base units. + // Time. + "minutes": "seconds", + "hours": "seconds", + "days": "seconds", + "weeks": "seconds", + // Temperature. + "kelvins": "kelvin", + "fahrenheit": "celsius", + "rankine": "celsius", + // Length. + "inches": "meters", + "yards": "meters", + "miles": "meters", + // Bytes. + "bits": "bytes", + // Energy. + "calories": "joules", + // Mass. + "pounds": "grams", + "ounces": "grams", + } + + unitPrefixes = []string{ + "pico", + "nano", + "micro", + "milli", + "centi", + "deci", + "deca", + "hecto", + "kilo", + "kibi", + "mega", + "mibi", + "giga", + "gibi", + "tera", + "tebi", + "peta", + "pebi", + } + + // Common abbreviations that we'd like to discourage. + unitAbbreviations = []string{ + "s", + "ms", + "us", + "ns", + "sec", + "b", + "kb", + "mb", + "gb", + "tb", + "pb", + "m", + "h", + "d", + } +) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 000000000..8d5f10523 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. The observed +// duration is also returned. ObserveDuration is usually called with a defer +// statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() time.Duration { + d := time.Since(t.begin) + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go index 5faf7e6e3..0f9ce63f4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -13,108 +13,12 @@ package prometheus -// Untyped is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// An Untyped metric works the same as a Gauge. The only difference is that to -// no type information is implied. -// -// To create Untyped instances, use NewUntyped. -type Untyped interface { - Metric - Collector - - // Set sets the Untyped metric to an arbitrary value. - Set(float64) - // Inc increments the Untyped metric by 1. - Inc() - // Dec decrements the Untyped metric by 1. - Dec() - // Add adds the given value to the Untyped metric. (The value can be - // negative, resulting in a decrease.) - Add(float64) - // Sub subtracts the given value from the Untyped metric. (The value can - // be negative, resulting in an increase.) - Sub(float64) -} - // UntypedOpts is an alias for Opts. See there for doc comments. type UntypedOpts Opts -// NewUntyped creates a new Untyped metric from the provided UntypedOpts. -func NewUntyped(opts UntypedOpts) Untyped { - return newValue(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, 0) -} - -// UntypedVec is a Collector that bundles a set of Untyped metrics that all -// share the same Desc, but have different values for their variable -// labels. This is used if you want to count the same thing partitioned by -// various dimensions. Create instances with NewUntypedVec. -type UntypedVec struct { - *MetricVec -} - -// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &UntypedVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newValue(desc, UntypedValue, 0, lvs...) - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns an Untyped and not a -// Metric so that no type conversion is required. -func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns an Untyped and not a Metric so that no -// type conversion is required. -func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { - return m.MetricVec.WithLabelValues(lvs...).(Untyped) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *UntypedVec) With(labels Labels) Untyped { - return m.MetricVec.With(labels).(Untyped) -} - -// UntypedFunc is an Untyped whose value is determined at collect time by -// calling a provided function. +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. // // To create UntypedFunc instances, use NewUntypedFunc. type UntypedFunc interface { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index a944c3775..6206928cc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,21 +14,23 @@ package prometheus import ( - "errors" "fmt" - "math" "sort" - "sync/atomic" - - dto "github.com/prometheus/client_model/go" + "time" + "unicode/utf8" + //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + + dto "github.com/prometheus/client_model/go" ) // ValueType is an enumeration of metric types that represent a simple value. type ValueType int -// Possible values for the ValueType enum. +// Possible values for the ValueType enum. Use UntypedValue to mark a metric +// with an unknown type. const ( _ ValueType = iota CounterValue @@ -36,77 +38,6 @@ const ( UntypedValue ) -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -// value is a generic metric for simple values. It implements Metric, Collector, -// Counter, Gauge, and Untyped. Its effective type is determined by -// ValueType. This is a low-level building block used by the library to back the -// implementations of Counter, Gauge, and Untyped. -type value struct { - // valBits containst the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - valType ValueType - labelPairs []*dto.LabelPair -} - -// newValue returns a newly allocated value with the given Desc, ValueType, -// sample value and label values. It panics if the number of label -// values is different from the number of variable labels in Desc. -func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { - if len(labelValues) != len(desc.variableLabels) { - panic(errInconsistentCardinality) - } - result := &value{ - desc: desc, - valType: valueType, - valBits: math.Float64bits(val), - labelPairs: makeLabelPairs(desc, labelValues), - } - result.init(result) - return result -} - -func (v *value) Desc() *Desc { - return v.desc -} - -func (v *value) Set(val float64) { - atomic.StoreUint64(&v.valBits, math.Float64bits(val)) -} - -func (v *value) Inc() { - v.Add(1) -} - -func (v *value) Dec() { - v.Add(-1) -} - -func (v *value) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&v.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { - return - } - } -} - -func (v *value) Sub(val float64) { - v.Add(val * -1) -} - -func (v *value) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) - return populateMetric(v.valType, val, v.labelPairs, out) -} - // valueFunc is a generic metric for simple values retrieved on collect time // from a function. It implements Metric and Collector. Its effective type is // determined by ValueType. This is a low-level building block used by the @@ -143,7 +74,7 @@ func (v *valueFunc) Desc() *Desc { } func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, out) + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) } // NewConstMetric returns a metric with one fixed value that cannot be @@ -151,10 +82,14 @@ func (v *valueFunc) Write(out *dto.Metric) error { // operations. However, when implementing custom Collectors, it is useful as a // throw-away metric that is generated on the fly to send it to Prometheus in // the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc. +// labelValues is not consistent with the variable labels in Desc or if Desc is +// invalid. func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err } return &constMetric{ desc: desc, @@ -186,19 +121,20 @@ func (m *constMetric) Desc() *Desc { } func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, out) + return populateMetric(m.valType, m.val, m.labelPairs, nil, out) } func populateMetric( t ValueType, v float64, labelPairs []*dto.LabelPair, + e *dto.Exemplar, m *dto.Metric, ) error { m.Label = labelPairs switch t { case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v)} + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: @@ -226,9 +162,44 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { Value: proto.String(labelValues[i]), }) } - for _, lp := range desc.constLabelPairs { - labelPairs = append(labelPairs, lp) - } - sort.Sort(LabelPairSorter(labelPairs)) + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(labelPairSorter(labelPairs)) return labelPairs } + +// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. +const ExemplarMaxRunes = 64 + +// newExemplar creates a new dto.Exemplar from the provided values. An error is +// returned if any of the label names or values are invalid or if the total +// number of runes in the label names and values exceeds ExemplarMaxRunes. +func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { + e := &dto.Exemplar{} + e.Value = proto.Float64(value) + tsProto, err := ptypes.TimestampProto(ts) + if err != nil { + return nil, err + } + e.Timestamp = tsProto + labelPairs := make([]*dto.LabelPair, 0, len(l)) + var runes int + for name, value := range l { + if !checkLabelName(name) { + return nil, fmt.Errorf("exemplar label name %q is invalid", name) + } + runes += utf8.RuneCountInString(name) + if !utf8.ValidString(value) { + return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) + } + runes += utf8.RuneCountInString(value) + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(name), + Value: proto.String(value), + }) + } + if runes > ExemplarMaxRunes { + return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) + } + e.Label = labelPairs + return e, nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 7f3eef9a4..d53848dc4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -20,200 +20,265 @@ import ( "github.com/prometheus/common/model" ) -// MetricVec is a Collector to bundle metrics of the same name that -// differ in their label values. MetricVec is usually not used directly but as a -// building block for implementations of vectors of a given metric -// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already -// provided in this package. -type MetricVec struct { - mtx sync.RWMutex // Protects the children. - children map[uint64][]metricWithLabelValues - desc *Desc - - newMetric func(labelValues ...string) Metric - hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. +type metricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 hashAddByte func(h uint64, b byte) uint64 } -// newMetricVec returns an initialized MetricVec. The concrete value is -// returned for embedding into another struct. -func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { - return &MetricVec{ - children: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, hashAdd: hashAdd, hashAddByte: hashAddByte, } } -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) } -// Describe implements Collector. The length of the returned slice -// is always one. -func (m *MetricVec) Describe(ch chan<- *Desc) { - ch <- m.desc +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) } +// Without explicit forwarding of Describe, Collect, Reset, those methods won't +// show up in GoDoc. + +// Describe implements Collector. +func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } + // Collect implements Collector. -func (m *MetricVec) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() +func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } - for _, metrics := range m.children { - for _, metric := range metrics { - ch <- metric.metric +// Reset deletes all metrics in this vector. +func (m *metricVec) Reset() { m.metricMap.Reset() } + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) } } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil } -// GetMetricWithLabelValues returns the Metric for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Metric is created. -// -// It is possible to call this method without using the returned Metric to only -// create the new Metric but leave it at its start value (e.g. a Summary or -// Histogram without any observations). See also the SummaryVec example. -// -// Keeping the Metric for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Metric from the MetricVec. In that case, the -// Metric will still exist, but it will not be exported anymore, even if a -// Metric with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { h, err := m.hashLabelValues(lvs) if err != nil { return nil, err } - return m.getOrCreateMetricWithLabelValues(h, lvs), nil + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil } -// GetMetricWith returns the Metric for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Metric is created. Implications of -// creating a Metric without using it and keeping the Metric for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { h, err := m.hashLabels(labels) if err != nil { return nil, err } - return m.getOrCreateMetricWithLabels(h, labels), nil + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil } -// WithLabelValues works as GetMetricWithLabelValues, but panics if an error -// occurs. The method allows neat syntax like: -// httpReqs.WithLabelValues("404", "POST").Inc() -func (m *MetricVec) WithLabelValues(lvs ...string) Metric { - metric, err := m.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) } - return metric + return h, nil } -// With works as GetMetricWith, but panics if an error occurs. The method allows -// neat syntax like: -// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() -func (m *MetricVec) With(labels Labels) Metric { - metric, err := m.GetMetricWith(labels) - if err != nil { - panic(err) +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err } - return metric + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil } -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual Metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { - m.mtx.Lock() - defer m.mtx.Unlock() +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} - h, err := m.hashLabelValues(lvs) - if err != nil { - return false +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } } - return m.deleteByHashWithLabelValues(h, lvs) } -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in the Desc of the MetricVec. However, such -// inconsistent Labels can never match an actual Metric, so the method will -// always return false in that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *MetricVec) Delete(labels Labels) bool { +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { m.mtx.Lock() defer m.mtx.Unlock() - h, err := m.hashLabels(labels) - if err != nil { - return false + for h := range m.metrics { + delete(m.metrics, h) } - - return m.deleteByHashWithLabels(h, labels) } // deleteByHashWithLabelValues removes the metric from the hash bucket h. If // there are multiple matches in the bucket, use lvs to select a metric and // remove only that metric. -func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { - metrics, ok := m.children[h] +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] if !ok { return false } - i := m.findMetricWithLabelValues(metrics, lvs) + i := findMetricWithLabelValues(metrics, lvs, curry) if i >= len(metrics) { return false } if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) } else { - delete(m.children, h) + delete(m.metrics, h) } return true } @@ -221,69 +286,38 @@ func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { // deleteByHashWithLabels removes the metric from the hash bucket h. If there // are multiple matches in the bucket, use lvs to select a metric and remove // only that metric. -func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { - metrics, ok := m.children[h] +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] if !ok { return false } - i := m.findMetricWithLabels(metrics, labels) + i := findMetricWithLabels(m.desc, metrics, labels, curry) if i >= len(metrics) { return false } if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) } else { - delete(m.children, h) + delete(m.metrics, h) } return true } -// Reset deletes all metrics in this vector. -func (m *MetricVec) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.children { - delete(m.children, h) - } -} - -func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if len(vals) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, val := range vals { - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if len(labels) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, label := range m.desc.variableLabels { - val, ok := labels[label] - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. -func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { m.mtx.RLock() - metric, ok := m.getMetricWithLabelValues(hash, lvs) + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) m.mtx.RUnlock() if ok { return metric @@ -291,13 +325,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) m.mtx.Lock() defer m.mtx.Unlock() - metric, ok = m.getMetricWithLabelValues(hash, lvs) + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) if !ok { - // Copy to avoid allocation in case wo don't go down this code path. - copiedLVs := make([]string, len(lvs)) - copy(copiedLVs, lvs) - metric = m.newMetric(copiedLVs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) } return metric } @@ -306,9 +338,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) // or creates it and returns the new one. // // This function holds the mutex. -func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { m.mtx.RLock() - metric, ok := m.getMetricWithLabels(hash, labels) + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) m.mtx.RUnlock() if ok { return metric @@ -316,33 +350,37 @@ func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metr m.mtx.Lock() defer m.mtx.Unlock() - metric, ok = m.getMetricWithLabels(hash, labels) + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) if !ok { - lvs := m.extractLabelValues(labels) + lvs := extractLabelValues(m.desc, labels, curry) metric = m.newMetric(lvs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) } return metric } -// getMetricWithLabelValues gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { - metrics, ok := m.children[h] +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] if ok { - if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { return metrics[i].metric, true } } return nil, false } -// getMetricWithLabels gets a metric while handling possible collisions in +// getMetricWithHashAndLabels gets a metric while handling possible collisions in // the hash space. Must be called while holding read mutex. -func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { - metrics, ok := m.children[h] +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] if ok { - if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { return metrics[i].metric, true } } @@ -351,9 +389,11 @@ func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) // findMetricWithLabelValues returns the index of the matching metric or // len(metrics) if not found. -func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { for i, metric := range metrics { - if m.matchLabelValues(metric.values, lvs) { + if matchLabelValues(metric.values, lvs, curry) { return i } } @@ -362,32 +402,51 @@ func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, l // findMetricWithLabels returns the index of the matching metric or len(metrics) // if not found. -func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { for i, metric := range metrics { - if m.matchLabels(metric.values, labels) { + if matchLabels(desc, metric.values, labels, curry) { return i } } return len(metrics) } -func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { - if len(values) != len(lvs) { +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { return false } + var iLVs, iCurry int for i, v := range values { - if v != lvs[i] { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { return false } + iLVs++ } return true } -func (m *MetricVec) matchLabels(values []string, labels Labels) bool { - if len(labels) != len(values) { +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { return false } - for i, k := range m.desc.variableLabels { + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } if values[i] != labels[k] { return false } @@ -395,10 +454,31 @@ func (m *MetricVec) matchLabels(values []string, labels Labels) bool { return true } -func (m *MetricVec) extractLabelValues(labels Labels) []string { - labelValues := make([]string, len(labels)) - for i, k := range m.desc.variableLabels { +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } labelValues[i] = labels[k] } return labelValues } + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go new file mode 100644 index 000000000..438aa5e92 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -0,0 +1,212 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + //lint:ignore SA1019 Need to keep deprecated package for compatibility. + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// WrapRegistererWith returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. Wrapping a nil value is valid, resulting +// in a no-op Registerer. +// +// WrapRegistererWith provides a way to add fixed labels to a subset of +// Collectors. It should not be used to add fixed labels to all metrics exposed. +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +// +// The Collector example demonstrates a use of WrapRegistererWith. +func WrapRegistererWith(labels Labels, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + labels: labels, + } +} + +// WrapRegistererWithPrefix returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided prefix to the name of all Metrics it collects. +// Wrapping a nil value is valid, resulting in a no-op Registerer. +// +// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of +// a sub-system. To make this work, register metrics of the sub-system with the +// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful +// to use the same prefix for all metrics exposed. In particular, do not prefix +// metric names that are standardized across applications, as that would break +// horizontal monitoring, for example the metrics provided by the Go collector +// (see NewGoCollector) and the process collector (see NewProcessCollector). (In +// fact, those metrics are already prefixed with “go_” or “process_”, +// respectively.) +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + prefix: prefix, + } +} + +type wrappingRegisterer struct { + wrappedRegisterer Registerer + prefix string + labels Labels +} + +func (r *wrappingRegisterer) Register(c Collector) error { + if r.wrappedRegisterer == nil { + return nil + } + return r.wrappedRegisterer.Register(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +func (r *wrappingRegisterer) MustRegister(cs ...Collector) { + if r.wrappedRegisterer == nil { + return + } + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +func (r *wrappingRegisterer) Unregister(c Collector) bool { + if r.wrappedRegisterer == nil { + return false + } + return r.wrappedRegisterer.Unregister(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +type wrappingCollector struct { + wrappedCollector Collector + prefix string + labels Labels +} + +func (c *wrappingCollector) Collect(ch chan<- Metric) { + wrappedCh := make(chan Metric) + go func() { + c.wrappedCollector.Collect(wrappedCh) + close(wrappedCh) + }() + for m := range wrappedCh { + ch <- &wrappingMetric{ + wrappedMetric: m, + prefix: c.prefix, + labels: c.labels, + } + } +} + +func (c *wrappingCollector) Describe(ch chan<- *Desc) { + wrappedCh := make(chan *Desc) + go func() { + c.wrappedCollector.Describe(wrappedCh) + close(wrappedCh) + }() + for desc := range wrappedCh { + ch <- wrapDesc(desc, c.prefix, c.labels) + } +} + +func (c *wrappingCollector) unwrapRecursively() Collector { + switch wc := c.wrappedCollector.(type) { + case *wrappingCollector: + return wc.unwrapRecursively() + default: + return wc + } +} + +type wrappingMetric struct { + wrappedMetric Metric + prefix string + labels Labels +} + +func (m *wrappingMetric) Desc() *Desc { + return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) +} + +func (m *wrappingMetric) Write(out *dto.Metric) error { + if err := m.wrappedMetric.Write(out); err != nil { + return err + } + if len(m.labels) == 0 { + // No wrapping labels. + return nil + } + for ln, lv := range m.labels { + out.Label = append(out.Label, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(lv), + }) + } + sort.Sort(labelPairSorter(out.Label)) + return nil +} + +func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { + constLabels := Labels{} + for _, lp := range desc.constLabelPairs { + constLabels[*lp.Name] = *lp.Value + } + for ln, lv := range labels { + if _, alreadyUsed := constLabels[ln]; alreadyUsed { + return &Desc{ + fqName: desc.fqName, + help: desc.help, + variableLabels: desc.variableLabels, + constLabelPairs: desc.constLabelPairs, + err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), + } + } + constLabels[ln] = lv + } + // NewDesc will do remaining validations. + newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + // Propagate errors if there was any. This will override any errer + // created by NewDesc above, i.e. earlier errors get precedence. + if desc.err != nil { + newDesc.err = desc.err + } + return newDesc +} diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 9805432c2..2f4930d9d 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,11 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: metrics.proto -package io_prometheus_client // import "github.com/prometheus/client_model/go" +package io_prometheus_client -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +19,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type MetricType int32 @@ -35,6 +38,7 @@ var MetricType_name = map[int32]string{ 3: "UNTYPED", 4: "HISTOGRAM", } + var MetricType_value = map[string]int32{ "COUNTER": 0, "GAUGE": 1, @@ -48,9 +52,11 @@ func (x MetricType) Enum() *MetricType { *p = x return p } + func (x MetricType) String() string { return proto.EnumName(MetricType_name, int32(x)) } + func (x *MetricType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") if err != nil { @@ -59,8 +65,9 @@ func (x *MetricType) UnmarshalJSON(data []byte) error { *x = MetricType(value) return nil } + func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} + return fileDescriptor_6039342a2ba47b72, []int{0} } type LabelPair struct { @@ -75,16 +82,17 @@ func (m *LabelPair) Reset() { *m = LabelPair{} } func (m *LabelPair) String() string { return proto.CompactTextString(m) } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} + return fileDescriptor_6039342a2ba47b72, []int{0} } + func (m *LabelPair) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LabelPair.Unmarshal(m, b) } func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) } -func (dst *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(dst, src) +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) } func (m *LabelPair) XXX_Size() int { return xxx_messageInfo_LabelPair.Size(m) @@ -120,16 +128,17 @@ func (m *Gauge) Reset() { *m = Gauge{} } func (m *Gauge) String() string { return proto.CompactTextString(m) } func (*Gauge) ProtoMessage() {} func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} + return fileDescriptor_6039342a2ba47b72, []int{1} } + func (m *Gauge) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Gauge.Unmarshal(m, b) } func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) } -func (dst *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(dst, src) +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) } func (m *Gauge) XXX_Size() int { return xxx_messageInfo_Gauge.Size(m) @@ -148,26 +157,28 @@ func (m *Gauge) GetValue() float64 { } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Counter) Reset() { *m = Counter{} } func (m *Counter) String() string { return proto.CompactTextString(m) } func (*Counter) ProtoMessage() {} func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} + return fileDescriptor_6039342a2ba47b72, []int{2} } + func (m *Counter) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Counter.Unmarshal(m, b) } func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Counter.Marshal(b, m, deterministic) } -func (dst *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(dst, src) +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) } func (m *Counter) XXX_Size() int { return xxx_messageInfo_Counter.Size(m) @@ -185,6 +196,13 @@ func (m *Counter) GetValue() float64 { return 0 } +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + type Quantile struct { Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` @@ -197,16 +215,17 @@ func (m *Quantile) Reset() { *m = Quantile{} } func (m *Quantile) String() string { return proto.CompactTextString(m) } func (*Quantile) ProtoMessage() {} func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} + return fileDescriptor_6039342a2ba47b72, []int{3} } + func (m *Quantile) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Quantile.Unmarshal(m, b) } func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) } -func (dst *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(dst, src) +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) } func (m *Quantile) XXX_Size() int { return xxx_messageInfo_Quantile.Size(m) @@ -244,16 +263,17 @@ func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} + return fileDescriptor_6039342a2ba47b72, []int{4} } + func (m *Summary) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Summary.Unmarshal(m, b) } func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Summary.Marshal(b, m, deterministic) } -func (dst *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(dst, src) +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) } func (m *Summary) XXX_Size() int { return xxx_messageInfo_Summary.Size(m) @@ -296,16 +316,17 @@ func (m *Untyped) Reset() { *m = Untyped{} } func (m *Untyped) String() string { return proto.CompactTextString(m) } func (*Untyped) ProtoMessage() {} func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} + return fileDescriptor_6039342a2ba47b72, []int{5} } + func (m *Untyped) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Untyped.Unmarshal(m, b) } func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) } -func (dst *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(dst, src) +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) } func (m *Untyped) XXX_Size() int { return xxx_messageInfo_Untyped.Size(m) @@ -336,16 +357,17 @@ func (m *Histogram) Reset() { *m = Histogram{} } func (m *Histogram) String() string { return proto.CompactTextString(m) } func (*Histogram) ProtoMessage() {} func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} + return fileDescriptor_6039342a2ba47b72, []int{6} } + func (m *Histogram) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Histogram.Unmarshal(m, b) } func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) } -func (dst *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(dst, src) +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) } func (m *Histogram) XXX_Size() int { return xxx_messageInfo_Histogram.Size(m) @@ -378,27 +400,29 @@ func (m *Histogram) GetBucket() []*Bucket { } type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} + return fileDescriptor_6039342a2ba47b72, []int{7} } + func (m *Bucket) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Bucket.Unmarshal(m, b) } func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) } -func (dst *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(dst, src) +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) } func (m *Bucket) XXX_Size() int { return xxx_messageInfo_Bucket.Size(m) @@ -423,6 +447,68 @@ func (m *Bucket) GetUpperBound() float64 { return 0 } +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Exemplar struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{8} +} + +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Exemplar.Unmarshal(m, b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return xxx_messageInfo_Exemplar.Size(m) +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + type Metric struct { Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` @@ -440,16 +526,17 @@ func (m *Metric) Reset() { *m = Metric{} } func (m *Metric) String() string { return proto.CompactTextString(m) } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} + return fileDescriptor_6039342a2ba47b72, []int{9} } + func (m *Metric) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Metric.Unmarshal(m, b) } func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metric.Marshal(b, m, deterministic) } -func (dst *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(dst, src) +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) } func (m *Metric) XXX_Size() int { return xxx_messageInfo_Metric.Size(m) @@ -523,16 +610,17 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} } func (m *MetricFamily) String() string { return proto.CompactTextString(m) } func (*MetricFamily) ProtoMessage() {} func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} + return fileDescriptor_6039342a2ba47b72, []int{10} } + func (m *MetricFamily) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MetricFamily.Unmarshal(m, b) } func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) } -func (dst *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(dst, src) +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) } func (m *MetricFamily) XXX_Size() int { return xxx_messageInfo_MetricFamily.Size(m) @@ -572,6 +660,7 @@ func (m *MetricFamily) GetMetric() []*Metric { } func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") @@ -580,50 +669,55 @@ func init() { proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) } -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } - -var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ - // 591 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, - 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, - 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, - 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, - 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, - 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, - 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, - 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, - 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, - 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, - 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, - 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, - 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, - 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, - 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, - 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, - 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, - 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, - 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, - 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, - 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, - 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, - 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, - 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, - 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, - 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, - 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, - 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, - 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, - 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, - 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, - 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, - 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, - 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, - 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, - 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, - 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } + +var fileDescriptor_6039342a2ba47b72 = []byte{ + // 665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, + 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, + 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, + 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, + 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, + 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, + 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, + 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, + 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, + 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, + 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, + 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, + 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, + 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, + 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, + 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, + 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, + 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, + 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, + 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, + 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, + 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, + 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, + 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, + 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, + 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, + 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, + 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, + 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, + 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, + 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, + 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, + 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, + 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, + 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, + 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, + 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, + 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, + 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, + 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, } diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 11839ed65..bd4e34745 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -30,17 +30,38 @@ type Encoder interface { Encode(*dto.MetricFamily) error } -type encoder func(*dto.MetricFamily) error +// Closer is implemented by Encoders that need to be closed to finalize +// encoding. (For example, OpenMetrics needs a final `# EOF` line.) +// +// Note that all Encoder implementations returned from this package implement +// Closer, too, even if the Close call is a no-op. This happens in preparation +// for adding a Close method to the Encoder interface directly in a (mildly +// breaking) release in the future. +type Closer interface { + Close() error +} + +type encoderCloser struct { + encode func(*dto.MetricFamily) error + close func() error +} -func (e encoder) Encode(v *dto.MetricFamily) error { - return e(v) +func (ec encoderCloser) Encode(v *dto.MetricFamily) error { + return ec.encode(v) } -// Negotiate returns the Content-Type based on the given Accept header. -// If no appropriate accepted type is found, FmtText is returned. +func (ec encoderCloser) Close() error { + return ec.close() +} + +// Negotiate returns the Content-Type based on the given Accept header. If no +// appropriate accepted type is found, FmtText is returned (which is the +// Prometheus text format). This function will never negotiate FmtOpenMetrics, +// as the support is still experimental. To include the option to negotiate +// FmtOpenMetrics, use NegotiateOpenMetrics. func Negotiate(h http.Header) Format { for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - // Check for protocol buffer + ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": @@ -51,38 +72,91 @@ func Negotiate(h http.Header) Format { return FmtProtoCompact } } - // Check for text format. + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NegotiateIncludingOpenMetrics works like Negotiate but includes +// FmtOpenMetrics as an option for the result. Note that this function is +// temporary and will disappear once FmtOpenMetrics is fully supported and as +// such may be negotiated by the normal Negotiate function. +func NegotiateIncludingOpenMetrics(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { ver := ac.Params["version"] + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { return FmtText } + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { + return FmtOpenMetrics + } } return FmtText } -// NewEncoder returns a new encoder based on content type negotiation. +// NewEncoder returns a new encoder based on content type negotiation. All +// Encoder implementations returned by NewEncoder also implement Closer, and +// callers should always call the Close method. It is currently only required +// for FmtOpenMetrics, but a future (breaking) release will add the Close method +// to the Encoder interface directly. The current version of the Encoder +// interface is kept for backwards compatibility. func NewEncoder(w io.Writer, format Format) Encoder { switch format { case FmtProtoDelim: - return encoder(func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }, + close: func() error { return nil }, + } case FmtProtoCompact: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }, + close: func() error { return nil }, + } case FmtProtoText: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }, + close: func() error { return nil }, + } case FmtText: - return encoder(func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }) + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }, + close: func() error { return nil }, + } + case FmtOpenMetrics: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToOpenMetrics(w, v) + return err + }, + close: func() error { + _, err := FinalizeOpenMetrics(w) + return err + }, + } } - panic("expfmt.NewEncoder: unknown format") + panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) } diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index c71bcb981..0f176fa64 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -19,10 +19,12 @@ type Format string // Constants to assemble the Content-Type values for the different wire protocols. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion = "0.0.1" // The Content-Type values for the different wire protocols. FmtUnknown Format = `` @@ -30,6 +32,7 @@ const ( FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` FmtProtoText Format = ProtoFmt + ` encoding=text` FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` ) const ( diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go new file mode 100644 index 000000000..8a9313a3b --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -0,0 +1,527 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the +// OpenMetrics text format and writes the resulting lines to 'out'. It returns +// the number of bytes written and any error encountered. The output will have +// the same order as the input, no further sorting is performed. Furthermore, +// this function assumes the input is already sanitized and does not perform any +// sanity checks. If the input contains duplicate metrics or invalid metric or +// label names, the conversion will result in invalid text format output. +// +// This function fulfills the type 'expfmt.encoder'. +// +// Note that OpenMetrics requires a final `# EOF` line. Since this function acts +// on individual metric families, it is the responsibility of the caller to +// append this line to 'out' once all metric families have been written. +// Conveniently, this can be done by calling FinalizeOpenMetrics. +// +// The output should be fully OpenMetrics compliant. However, there are a few +// missing features and peculiarities to avoid complications when switching from +// Prometheus to OpenMetrics or vice versa: +// +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. +// +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. +// +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). +// +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var ( + n int + metricType = in.GetType() + shortName = name + ) + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { + shortName = name[:len(name)-6] + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + switch metricType { + case dto.MetricType_COUNTER: + if strings.HasSuffix(name, "_total") { + n, err = w.WriteString(" counter\n") + } else { + n, err = w.WriteString(" unknown\n") + } + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" unknown\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + // Note that we have ensured above that either the name + // ends on `_total` or that the rendered type is + // `unknown`. Therefore, no `_total` must be added here. + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), 0, false, + metric.Counter.Exemplar, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), 0, false, + nil, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), 0, false, + nil, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeOpenMetricsSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Summary.GetSampleCount(), true, + nil, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + 0, b.GetCumulativeCount(), true, + b.Exemplar, + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. +func FinalizeOpenMetrics(w io.Writer) (written int, err error) { + return w.Write([]byte("# EOF\n")) +} + +// writeOpenMetricsSample writes a single sample in OpenMetrics text format to +// w, given the metric name, the metric proto message itself, optionally an +// additional label name with a float64 value (use empty string as label name if +// not required), the value (optionally as float64 or uint64, determined by +// useIntValue), and optionally an exemplar (use nil if not required). The +// function returns the number of bytes written and any error encountered. +func writeOpenMetricsSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + floatValue float64, intValue uint64, useIntValue bool, + exemplar *dto.Exemplar, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeOpenMetricsLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + if useIntValue { + n, err = writeUint(w, intValue) + } else { + n, err = writeOpenMetricsFloat(w, floatValue) + } + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly without converting to a float first. + n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) + written += n + if err != nil { + return written, err + } + } + if exemplar != nil { + n, err = writeExemplar(w, exemplar) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float +// in OpenMetrics style. +func writeOpenMetricsLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeExemplar writes the provided exemplar in OpenMetrics format to w. The +// function returns the number of bytes written and any error encountered. +func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { + written := 0 + n, err := w.WriteString(" # ") + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, e.GetValue()) + written += n + if err != nil { + return written, err + } + if e.Timestamp != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + ts, err := ptypes.Timestamp((*e).Timestamp) + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting +// number would otherwise contain neither a "." nor an "e". +func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return w.WriteString("1.0") + case f == 0: + return w.WriteString("0.0") + case f == -1: + return w.WriteString("-1.0") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + if !bytes.ContainsAny(*bp, "e.") { + *bp = append(*bp, '.', '0') + } + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeUint is like writeInt just for uint64. +func writeUint(w enhancedWriter, u uint64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendUint((*bp)[:0], u, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f11321cd0..5ba503b06 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -14,13 +14,45 @@ package expfmt import ( + "bufio" "fmt" "io" + "io/ioutil" "math" + "strconv" "strings" + "sync" - dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// enhancedWriter has all the enhanced write functions needed here. bufio.Writer +// implements it. +type enhancedWriter interface { + io.Writer + WriteRune(r rune) (n int, err error) + WriteString(s string) (n int, err error) + WriteByte(c byte) error +} + +const ( + initialNumBufSize = 24 +) + +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriter(ioutil.Discard) + }, + } + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } ) // MetricFamilyToText converts a MetricFamily proto message into text format and @@ -32,37 +64,90 @@ import ( // will result in invalid text format output. // // This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { - var written int - +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { // Fail-fast checks. if len(in.Metric) == 0 { - return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) } name := in.GetName() if name == "" { - return written, fmt.Errorf("MetricFamily has no name: %s", in) + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() } + var n int + // Comments, first HELP, then TYPE. if in.Help != nil { - n, err := fmt.Fprintf( - out, "# HELP %s %s\n", - name, escapeString(*in.Help, false), - ) + n, err = w.WriteString("# HELP ") written += n if err != nil { - return written, err + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return } + n, err = writeEscapedString(w, *in.Help, false) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return } metricType := in.GetType() - n, err := fmt.Fprintf( - out, "# TYPE %s %s\n", - name, strings.ToLower(metricType.String()), - ) + switch metricType { + case dto.MetricType_COUNTER: + n, err = w.WriteString(" counter\n") + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" untyped\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } written += n if err != nil { - return written, err + return } // Finally the samples, one line for each. @@ -75,9 +160,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Counter.GetValue(), - out, ) case dto.MetricType_GAUGE: if metric.Gauge == nil { @@ -86,9 +170,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Gauge.GetValue(), - out, ) case dto.MetricType_UNTYPED: if metric.Untyped == nil { @@ -97,9 +180,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Untyped.GetValue(), - out, ) case dto.MetricType_SUMMARY: if metric.Summary == nil { @@ -109,29 +191,26 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { } for _, q := range metric.Summary.Quantile { n, err = writeSample( - name, metric, - model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), q.GetValue(), - out, ) written += n if err != nil { - return written, err + return } } n, err = writeSample( - name+"_sum", metric, "", "", + w, name, "_sum", metric, "", 0, metric.Summary.GetSampleSum(), - out, ) + written += n if err != nil { - return written, err + return } - written += n n, err = writeSample( - name+"_count", metric, "", "", + w, name, "_count", metric, "", 0, float64(metric.Summary.GetSampleCount()), - out, ) case dto.MetricType_HISTOGRAM: if metric.Histogram == nil { @@ -140,46 +219,42 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } infSeen := false - for _, q := range metric.Histogram.Bucket { + for _, b := range metric.Histogram.Bucket { n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, fmt.Sprint(q.GetUpperBound()), - float64(q.GetCumulativeCount()), - out, + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + float64(b.GetCumulativeCount()), ) written += n if err != nil { - return written, err + return } - if math.IsInf(q.GetUpperBound(), +1) { + if math.IsInf(b.GetUpperBound(), +1) { infSeen = true } } if !infSeen { n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, "+Inf", + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), float64(metric.Histogram.GetSampleCount()), - out, ) + written += n if err != nil { - return written, err + return } - written += n } n, err = writeSample( - name+"_sum", metric, "", "", + w, name, "_sum", metric, "", 0, metric.Histogram.GetSampleSum(), - out, ) + written += n if err != nil { - return written, err + return } - written += n n, err = writeSample( - name+"_count", metric, "", "", + w, name, "_count", metric, "", 0, float64(metric.Histogram.GetSampleCount()), - out, ) default: return written, fmt.Errorf( @@ -188,116 +263,203 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { } written += n if err != nil { - return written, err + return } } - return written, nil + return } -// writeSample writes a single sample in text format to out, given the metric +// writeSample writes a single sample in text format to w, given the metric // name, the metric proto message itself, optionally an additional label name -// and value (use empty strings if not required), and the value. The function -// returns the number of bytes written and any error encountered. +// with a float64 value (use empty string as label name if not required), and +// the value. The function returns the number of bytes written and any error +// encountered. func writeSample( - name string, + w enhancedWriter, + name, suffix string, metric *dto.Metric, - additionalLabelName, additionalLabelValue string, + additionalLabelName string, additionalLabelValue float64, value float64, - out io.Writer, ) (int, error) { var written int - n, err := fmt.Fprint(out, name) + n, err := w.WriteString(name) written += n if err != nil { return written, err } - n, err = labelPairsToText( - metric.Label, - additionalLabelName, additionalLabelValue, - out, + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { return written, err } - n, err = fmt.Fprintf(out, " %v", value) + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeFloat(w, value) written += n if err != nil { return written, err } if metric.TimestampMs != nil { - n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeInt(w, *metric.TimestampMs) written += n if err != nil { return written, err } } - n, err = out.Write([]byte{'\n'}) - written += n + err = w.WriteByte('\n') + written++ if err != nil { return written, err } return written, nil } -// labelPairsToText converts a slice of LabelPair proto messages plus the +// writeLabelPairs converts a slice of LabelPair proto messages plus the // explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'out'. An empty slice in combination with an -// empty string 'additionalLabelName' results in nothing being -// written. Otherwise, the label pairs are written, escaped as required by the -// text format, and enclosed in '{...}'. The function returns the number of -// bytes written and any error encountered. -func labelPairsToText( +// text format and writes it to 'w'. An empty slice in combination with an empty +// string 'additionalLabelName' results in nothing being written. Otherwise, the +// label pairs are written, escaped as required by the text format, and enclosed +// in '{...}'. The function returns the number of bytes written and any error +// encountered. +func writeLabelPairs( + w enhancedWriter, in []*dto.LabelPair, - additionalLabelName, additionalLabelValue string, - out io.Writer, + additionalLabelName string, additionalLabelValue float64, ) (int, error) { if len(in) == 0 && additionalLabelName == "" { return 0, nil } - var written int - separator := '{' + var ( + written int + separator byte = '{' + ) for _, lp := range in { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, lp.GetName(), escapeString(lp.GetValue(), true), - ) + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) written += n if err != nil { return written, err } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } separator = ',' } if additionalLabelName != "" { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, additionalLabelName, - escapeString(additionalLabelValue, true), - ) + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeFloat(w, additionalLabelValue) written += n if err != nil { return written, err } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } } - n, err := out.Write([]byte{'}'}) - written += n + err := w.WriteByte('}') + written++ if err != nil { return written, err } return written, nil } +// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. var ( - escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) - escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) + escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) + quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) ) -// escapeString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -func escapeString(v string, includeDoubleQuote bool) string { +func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { if includeDoubleQuote { - return escapeWithDoubleQuote.Replace(v) + return quotedEscaper.WriteString(w, v) + } + return escaper.WriteString(w, v) +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +func writeFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return 1, w.WriteByte('1') + case f == 0: + return 1, w.WriteByte('0') + case f == -1: + return w.WriteString("-1") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err } +} - return escape.Replace(v) +// writeInt is equivalent to fmt.Fprint with an int64 argument but uses +// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid +// allocations. +func writeInt(w enhancedWriter, i int64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendInt((*bp)[:0], i, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err } diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index ec3d86ba7..342e5940d 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -325,7 +325,7 @@ func (p *TextParser) startLabelValue() stateFn { // - Other labels have to be added to currentLabels for signature calculation. if p.currentMF.GetType() == dto.MetricType_SUMMARY { if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -337,7 +337,7 @@ func (p *TextParser) startLabelValue() stateFn { // Similar special treatment of histograms. if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -392,7 +392,7 @@ func (p *TextParser) readingValue() stateFn { if p.readTokenUntilWhitespace(); p.err != nil { return nil // Unexpected end of input. } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) + value, err := parseFloat(p.currentToken.String()) if err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) @@ -755,3 +755,10 @@ func histogramMetricName(name string) string { return name } } + +func parseFloat(s string) (float64, error) { + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 648b38cb6..26e92288c 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -1,12 +1,12 @@ /* +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index f7250909b..00804b7fe 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -21,7 +21,6 @@ import ( ) var ( - separator = []byte{0} // MetricNameRE is a regular expression matching valid metric // names. Note that the IsValidMetricName function performs the same // check but faster than a match with this regular expression. diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 74ed5a9f7..490a0240c 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -43,7 +43,7 @@ const ( // (1970-01-01 00:00 UTC) excluding leap seconds. type Time int64 -// Interval describes and interval between two timestamps. +// Interval describes an interval between two timestamps. type Interval struct { Start, End Time } @@ -150,7 +150,13 @@ func (t *Time) UnmarshalJSON(b []byte) error { return err } - *t = Time(v + va) + // If the value was something like -0.1 the negative is lost in the + // parsing because of the leading zero, this ensures that we capture it. + if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { + *t = Time(v+va) * -1 + } else { + *t = Time(v + va) + } default: return fmt.Errorf("invalid time %q", string(b)) @@ -180,6 +186,10 @@ var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. func ParseDuration(durationStr string) (Duration, error) { + // Allow 0 without a unit. + if durationStr == "0" { + return 0, nil + } matches := durationRE.FindStringSubmatch(durationStr) if len(matches) != 3 { return 0, fmt.Errorf("not a valid duration string: %q", durationStr) diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml new file mode 100644 index 000000000..0aa09edac --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -0,0 +1,4 @@ +--- +linters: + enable: + - golint diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml deleted file mode 100644 index 66a0b7cf7..000000000 --- a/vendor/github.com/prometheus/procfs/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: -- 1.9.x -- 1.10.x - -go_import_path: github.com/prometheus/procfs - -script: -- make style check_license vet test staticcheck diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9a1aff412 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +## Prometheus Community Code of Conduct + +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md index 40503edbf..943de7615 100644 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -2,17 +2,120 @@ Prometheus uses GitHub to manage reviews of pull requests. +* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) + * If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) the maintainer of this repository (see + addressing (with `@...`) a suitable maintainer of this repository (see [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). This will avoid unnecessary work and surely give you and us a good deal - of inspiration. + of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. * Relevant coding style guidelines are the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). + Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). + +* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) + +## Steps to Contribute + +Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. + +Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). + +For quickly compiling and testing your changes do: +``` +make test # Make sure all the tests pass before you commit and push :) +``` + +We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. + +## Pull Request Checklist + +* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. + +* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). + +* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). + +* Add tests relevant to the fixed bug or new feature. + +## Dependency management + +The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. + +All dependencies are vendored in the `vendor/` directory. + +To add or update a new dependency, use the `go get` command: + +```bash +# Pick the latest tagged release. +go get example.com/some/module/pkg + +# Pick a specific version. +go get example.com/some/module/pkg@vX.Y.Z +``` + +Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: + + +```bash +# The GO111MODULE variable can be omitted when the code isn't located in GOPATH. +GO111MODULE=on go mod tidy + +GO111MODULE=on go mod vendor +``` + +You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. + + +## API Implementation Guidelines + +### Naming and Documentation + +Public functions and structs should normally be named according to the file(s) being read and parsed. For example, +the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function +should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s). + +### Reading vs. Parsing + +Most functionality in this library consists of reading files and then parsing the text into structured data. In most +cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and +a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested +directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types +such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files +in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead. + +### /proc and /sys filesystem I/O + +The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O. +Many of the files are changing continuously and the data being read can in some cases change between subsequent +reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls +to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the +full file in a single operation using an internal utility function called `util.ReadFileNoStat`. +This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of +the file. + +Note that parsing the file's contents can still be performed one line at a time. This is done by first reading +the full file, and then using a scanner on the `[]byte` or `string` containing the data. + +``` + data, err := util.ReadFileNoStat("/proc/cpuinfo") + if err != nil { + return err + } + reader := bytes.NewReader(data) + scanner := bufio.NewScanner(reader) +``` + +The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files +can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does +not bother to check the size of the file before reading. +``` + data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") +``` + diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md index 35993c41c..56ba67d3e 100644 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -1 +1,2 @@ -* Tobias Schmidt +* Johannes 'fish' Ziemke @discordianfish +* Paul Gier @pgier diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index 4d1098394..616a0d25e 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -11,67 +11,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck -pkgs = $(shell $(GO) list ./... | grep -v /vendor/) - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) - -ifdef DEBUG - bindata_flags = -debug -endif - -STATICCHECK_IGNORE = - -all: format staticcheck build test - -style: - @echo ">> checking code style" - @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' - -check_license: - @echo ">> checking license header" - @./scripts/check_license.sh - -test: fixtures/.unpacked sysfs/fixtures/.unpacked - @echo ">> running all tests" - @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples) - -format: - @echo ">> formatting code" - @$(GO) fmt $(pkgs) - -vet: - @echo ">> vetting code" - @$(GO) vet $(pkgs) - -staticcheck: $(STATICCHECK) - @echo ">> running staticcheck" - @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) +include Makefile.common %/.unpacked: %.ttar + @echo ">> extracting fixtures" ./ttar -C $(dir $*) -x -f $*.ttar touch $@ -update_fixtures: fixtures.ttar sysfs/fixtures.ttar - -%fixtures.ttar: %/fixtures - rm -v $(dir $*)fixtures/.unpacked - ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ - -$(FIRST_GOPATH)/bin/staticcheck: - @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck +update_fixtures: + rm -vf fixtures/.unpacked + ./ttar -c -f fixtures.ttar fixtures/ -.PHONY: all style check_license format test vet staticcheck +.PHONY: build +build: -# Declaring the binaries at their default locations as PHONY targets is a hack -# to ensure the latest version is downloaded on every make execution. -# If this is not desired, copy/symlink these binaries to a different path and -# set the respective environment variables. -.PHONY: $(GOPATH)/bin/staticcheck +.PHONY: test +test: fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common new file mode 100644 index 000000000..3ac29c636 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -0,0 +1,302 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= +GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) +GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +GOVENDOR := +GO111MODULE := +ifeq (, $(PRE_GO_111)) + ifneq (,$(wildcard go.mod)) + # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). + GO111MODULE := on + + ifneq (,$(wildcard vendor)) + # Always use the local vendor/ directory to satisfy the dependencies. + GOOPTS := $(GOOPTS) -mod=vendor + endif + endif +else + ifneq (,$(wildcard go.mod)) + ifneq (,$(wildcard vendor)) +$(warning This repository requires Go >= 1.11 because of Go modules) +$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') + endif + else + # This repository isn't using Go modules (yet). + GOVENDOR := $(FIRST_GOPATH)/bin/govendor + endif +endif +PROMU := $(FIRST_GOPATH)/bin/promu +pkgs = ./... + +ifeq (arm, $(GOHOSTARCH)) + GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) +else + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) +endif + +GOTEST := $(GO) test +GOTEST_DIR := +ifneq ($(CIRCLE_JOB),) +ifneq ($(shell which gotestsum),) + GOTEST_DIR := test-results + GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- +endif +endif + +PROMU_VERSION ?= 0.7.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + +GOLANGCI_LINT := +GOLANGCI_LINT_OPTS ?= +GOLANGCI_LINT_VERSION ?= v1.18.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +# windows isn't included here because of the path separator being different. +ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif +endif + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKERFILE_PATH ?= ./Dockerfile +DOCKERBUILD_CONTEXT ?= ./ +DOCKER_REPO ?= prom + +DOCKER_ARCHS ?= amd64 + +BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) +PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) +TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) + +ifeq ($(GOHOSTARCH),amd64) + ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) + # Only supported on amd64 + test-flags := -race + endif +endif + +# This rule is used to forward a target like "build" to "common-build". This +# allows a new "build" target to be defined in a Makefile which includes this +# one and override "common-build" without override warnings. +%: common-% ; + +.PHONY: common-all +common-all: precheck style check_license lint unused build test + +.PHONY: common-style +common-style: + @echo ">> checking code style" + @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + if [ -n "$${fmtRes}" ]; then \ + echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ + echo "Please ensure you are using $$($(GO) version) for formatting code."; \ + exit 1; \ + fi + +.PHONY: common-check_license +common-check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: common-deps +common-deps: + @echo ">> getting dependencies" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(GO) mod download +else + $(GO) get $(GOOPTS) -t ./... +endif + +.PHONY: update-go-deps +update-go-deps: + @echo ">> updating Go dependencies" + @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ + $(GO) get $$m; \ + done + GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifneq (,$(wildcard vendor)) + GO111MODULE=$(GO111MODULE) $(GO) mod vendor +endif + +.PHONY: common-test-short +common-test-short: $(GOTEST_DIR) + @echo ">> running short tests" + GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + +.PHONY: common-test +common-test: $(GOTEST_DIR) + @echo ">> running all tests" + GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + +$(GOTEST_DIR): + @mkdir -p $@ + +.PHONY: common-format +common-format: + @echo ">> formatting code" + GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + +.PHONY: common-vet +common-vet: + @echo ">> vetting code" + GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + +.PHONY: common-lint +common-lint: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint" +ifdef GO111MODULE +# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. +# Otherwise staticcheck might fail randomly for some reason not yet explained. + GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) +else + $(GOLANGCI_LINT) run $(pkgs) +endif +endif + +# For backward-compatibility. +.PHONY: common-staticcheck +common-staticcheck: lint + +.PHONY: common-unused +common-unused: $(GOVENDOR) +ifdef GOVENDOR + @echo ">> running check for unused packages" + @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' +else +ifdef GO111MODULE + @echo ">> running check for unused/missing packages in go.mod" + GO111MODULE=$(GO111MODULE) $(GO) mod tidy +ifeq (,$(wildcard vendor)) + @git diff --exit-code -- go.sum go.mod +else + @echo ">> running check for unused packages in vendor/" + GO111MODULE=$(GO111MODULE) $(GO) mod vendor + @git diff --exit-code -- go.sum go.mod vendor/ +endif +endif +endif + +.PHONY: common-build +common-build: promu + @echo ">> building binaries" + GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + +.PHONY: common-tarball +common-tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +.PHONY: common-docker $(BUILD_DOCKER_ARCHS) +common-docker: $(BUILD_DOCKER_ARCHS) +$(BUILD_DOCKER_ARCHS): common-docker-%: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + -f $(DOCKERFILE_PATH) \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + $(DOCKERBUILD_CONTEXT) + +.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) +common-docker-publish: $(PUBLISH_DOCKER_ARCHS) +$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + +DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) +.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) +common-docker-tag-latest: $(TAG_DOCKER_ARCHS) +$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + +.PHONY: common-docker-manifest +common-docker-manifest: + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + +.PHONY: promu +promu: $(PROMU) + +$(PROMU): + $(eval PROMU_TMP := $(shell mktemp -d)) + curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) + mkdir -p $(FIRST_GOPATH)/bin + cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu + rm -r $(PROMU_TMP) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh + +ifdef GOLANGCI_LINT +$(GOLANGCI_LINT): + mkdir -p $(FIRST_GOPATH)/bin + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ + | sed -e '/install -d/d' \ + | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) +endif + +ifdef GOVENDOR +.PHONY: $(GOVENDOR) +$(GOVENDOR): + GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 209549471..55d1e3261 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -1,7 +1,7 @@ # procfs -This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. +This package provides functions to retrieve system, kernel, and process +metrics from the pseudo-filesystems /proc and /sys. *WARNING*: This package is a work in progress. Its API may still break in backwards-incompatible ways without warnings. Use it at your own risk. @@ -9,3 +9,53 @@ backwards-incompatible ways without warnings. Use it at your own risk. [![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) [![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) + +## Usage + +The procfs library is organized by packages based on whether the gathered data is coming from +/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, +/sys, or both. For example, cpu statistics are gathered from +`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount +point is initialized, and then the stat information is read. + +```go +fs, err := procfs.NewFS("/proc") +stats, err := fs.Stat() +``` + +Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. + +```go + fs, err := blockdevice.NewFS("/proc", "/sys") + stats, err := fs.ProcDiskstats() +``` + +## Package Organization + +The packages in this project are organized according to (1) whether the data comes from the `/proc` or +`/sys` filesystem and (2) the type of information being retrieved. For example, most process information +can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives +is available in the `blockdevices` sub-package. + +## Building and Testing + +The procfs library is intended to be built as part of another application, so there are no distributable binaries. +However, most of the API includes unit tests which can be run with `make test`. + +### Updating Test Fixtures + +The procfs library includes a set of test fixtures which include many example files from +the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file +which is extracted automatically during testing. To add/update the test fixtures, first +ensure the `fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make fixtures/.unpacked` or just `make test`. + +```bash +rm -rf fixtures +make test +``` + +Next, make the required changes to the extracted files in the `fixtures` directory. When +the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file +based on the updated `fixtures` directory. And finally, verify the changes using +`git diff fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md new file mode 100644 index 000000000..67741f015 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/SECURITY.md @@ -0,0 +1,6 @@ +# Reporting a security issue + +The Prometheus security policy, including how to report vulnerabilities, can be +found here: + +https://prometheus.io/docs/operating/security/ diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go new file mode 100644 index 000000000..4e47e6172 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "net" + "strings" +) + +// ARPEntry contains a single row of the columnar data represented in +// /proc/net/arp. +type ARPEntry struct { + // IP address + IPAddr net.IP + // MAC address + HWAddr net.HardwareAddr + // Name of the device + Device string +} + +// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, +// and then return a slice of ARPEntry's. +func (fs FS) GatherARPEntries() ([]ARPEntry, error) { + data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) + if err != nil { + return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) + } + + return parseARPEntries(data) +} + +func parseARPEntries(data []byte) ([]ARPEntry, error) { + lines := strings.Split(string(data), "\n") + entries := make([]ARPEntry, 0) + var err error + const ( + expectedDataWidth = 6 + expectedHeaderWidth = 9 + ) + for _, line := range lines { + columns := strings.Fields(line) + width := len(columns) + + if width == expectedHeaderWidth || width == 0 { + continue + } else if width == expectedDataWidth { + entry, err := parseARPEntry(columns) + if err != nil { + return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) + } + entries = append(entries, entry) + } else { + return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + } + + } + + return entries, err +} + +func parseARPEntry(columns []string) (ARPEntry, error) { + ip := net.ParseIP(columns[0]) + mac := net.HardwareAddr(columns[3]) + + entry := ARPEntry{ + IPAddr: ip, + HWAddr: mac, + Device: columns[5], + } + + return entry, nil +} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index d3a826807..f5b7939b2 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -31,19 +31,9 @@ type BuddyInfo struct { Sizes []float64 } -// NewBuddyInfo reads the buddyinfo statistics. -func NewBuddyInfo() ([]BuddyInfo, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewBuddyInfo() -} - -// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.Path("buddyinfo")) +// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) BuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.proc.Path("buddyinfo")) if err != nil { return nil, err } @@ -84,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) } } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go new file mode 100644 index 000000000..5623b24a1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -0,0 +1,481 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo +type CPUInfo struct { + Processor uint + VendorID string + CPUFamily string + Model string + ModelName string + Stepping string + Microcode string + CPUMHz float64 + CacheSize string + PhysicalID string + Siblings uint + CoreID string + CPUCores uint + APICID string + InitialAPICID string + FPU string + FPUException string + CPUIDLevel uint + WP string + Flags []string + Bugs []string + BogoMips float64 + CLFlushSize uint + CacheAlignment uint + AddressSizes string + PowerManagement string +} + +var ( + cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) + cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) +) + +// CPUInfo returns information about current system CPUs. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) CPUInfo() ([]CPUInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) + if err != nil { + return nil, err + } + return parseCPUInfo(data) +} + +func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "vendor", "vendor_id": + cpuinfo[i].VendorID = field[1] + case "cpu family": + cpuinfo[i].CPUFamily = field[1] + case "model": + cpuinfo[i].Model = field[1] + case "model name": + cpuinfo[i].ModelName = field[1] + case "stepping": + cpuinfo[i].Stepping = field[1] + case "microcode": + cpuinfo[i].Microcode = field[1] + case "cpu MHz": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "cache size": + cpuinfo[i].CacheSize = field[1] + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "apicid": + cpuinfo[i].APICID = field[1] + case "initial apicid": + cpuinfo[i].InitialAPICID = field[1] + case "fpu": + cpuinfo[i].FPU = field[1] + case "fpu_exception": + cpuinfo[i].FPUException = field[1] + case "cpuid level": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUIDLevel = uint(v) + case "wp": + cpuinfo[i].WP = field[1] + case "flags": + cpuinfo[i].Flags = strings.Fields(field[1]) + case "bugs": + cpuinfo[i].Bugs = strings.Fields(field[1]) + case "bogomips": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "clflush size": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CLFlushSize = uint(v) + case "cache_alignment": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CacheAlignment = uint(v) + case "address sizes": + cpuinfo[i].AddressSizes = field[1] + case "power management": + cpuinfo[i].PowerManagement = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + if !match || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + featuresLine := "" + commonCPUInfo := CPUInfo{} + i := 0 + if strings.TrimSpace(field[0]) == "Processor" { + commonCPUInfo = CPUInfo{ModelName: field[1]} + i = -1 + } else { + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo = []CPUInfo{firstcpu} + } + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "BogoMIPS": + if i == -1 { + cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor + i++ + cpuinfo[i].Processor = 0 + } + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "Features": + featuresLine = line + case "model name": + cpuinfo[i].ModelName = field[1] + } + } + fields := strings.SplitN(featuresLine, ": ", 2) + for i := range cpuinfo { + cpuinfo[i].Flags = strings.Fields(fields[1]) + } + return cpuinfo, nil + +} + +func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + commonCPUInfo := CPUInfo{VendorID: field[1]} + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "bogomips per cpu": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + commonCPUInfo.BogoMips = v + case "features": + commonCPUInfo.Flags = strings.Fields(field[1]) + } + if strings.HasPrefix(line, "processor") { + match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) + if len(match) < 2 { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + cpu := commonCPUInfo + v, err := strconv.ParseUint(match[1], 0, 32) + if err != nil { + return nil, err + } + cpu.Processor = uint(v) + cpuinfo = append(cpuinfo, cpu) + } + if strings.HasPrefix(line, "cpu number") { + break + } + } + + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "cpu number": + i++ + case "cpu MHz dynamic": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + } + } + + return cpuinfo, nil +} + +func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "cpu model": + cpuinfo[i].ModelName = field[1] + case "BogoMIPS": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "cpu": + cpuinfo[i].VendorID = field[1] + case "clock": + clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) + v, err := strconv.ParseFloat(clock, 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + } + } + return cpuinfo, nil +} + +func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + firstcpu := CPUInfo{Processor: uint(v)} + cpuinfo := []CPUInfo{firstcpu} + i := 0 + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + case "hart": + cpuinfo[i].CoreID = field[1] + case "isa": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + +func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode + return nil, errors.New("not implemented") +} + +// firstNonEmptyLine advances the scanner to the first non-empty line +// and returns the contents of that line +func firstNonEmptyLine(scanner *bufio.Scanner) string { + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) != "" { + return line + } + } + return "" +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go new file mode 100644 index 000000000..44b590ed3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build arm arm64 + +package procfs + +var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go new file mode 100644 index 000000000..91e272573 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build mips mipsle mips64 mips64le + +package procfs + +var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go new file mode 100644 index 000000000..95b5b4ec4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x + +package procfs + +var parseCPUInfo = parseCPUInfoDummy diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go new file mode 100644 index 000000000..6068bd571 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build ppc64 ppc64le + +package procfs + +var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go new file mode 100644 index 000000000..e83c2e207 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build riscv riscv64 + +package procfs + +var parseCPUInfo = parseCPUInfoRISCV diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go new file mode 100644 index 000000000..26814eeba --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoS390X diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go new file mode 100644 index 000000000..d5bedf97f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux +// +build 386 amd64 + +package procfs + +var parseCPUInfo = parseCPUInfoX86 diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go new file mode 100644 index 000000000..5048ad1f2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -0,0 +1,153 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Crypto holds info parsed from /proc/crypto. +type Crypto struct { + Alignmask *uint64 + Async bool + Blocksize *uint64 + Chunksize *uint64 + Ctxsize *uint64 + Digestsize *uint64 + Driver string + Geniv string + Internal string + Ivsize *uint64 + Maxauthsize *uint64 + MaxKeysize *uint64 + MinKeysize *uint64 + Module string + Name string + Priority *int64 + Refcnt *int64 + Seedsize *uint64 + Selftest string + Type string + Walksize *uint64 +} + +// Crypto parses an crypto-file (/proc/crypto) and returns a slice of +// structs containing the relevant info. More information available here: +// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html +func (fs FS) Crypto() ([]Crypto, error) { + path := fs.proc.Path("crypto") + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, fmt.Errorf("error reading crypto %q: %w", path, err) + } + + crypto, err := parseCrypto(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) + } + + return crypto, nil +} + +// parseCrypto parses a /proc/crypto stream into Crypto elements. +func parseCrypto(r io.Reader) ([]Crypto, error) { + var out []Crypto + + s := bufio.NewScanner(r) + for s.Scan() { + text := s.Text() + switch { + case strings.HasPrefix(text, "name"): + // Each crypto element begins with its name. + out = append(out, Crypto{}) + case text == "": + continue + } + + kv := strings.Split(text, ":") + if len(kv) != 2 { + return nil, fmt.Errorf("malformed crypto line: %q", text) + } + + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + + // Parse the key/value pair into the currently focused element. + c := &out[len(out)-1] + if err := c.parseKV(k, v); err != nil { + return nil, err + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +// parseKV parses a key/value pair into the appropriate field of c. +func (c *Crypto) parseKV(k, v string) error { + vp := util.NewValueParser(v) + + switch k { + case "async": + // Interpret literal yes as true. + c.Async = v == "yes" + case "blocksize": + c.Blocksize = vp.PUInt64() + case "chunksize": + c.Chunksize = vp.PUInt64() + case "digestsize": + c.Digestsize = vp.PUInt64() + case "driver": + c.Driver = v + case "geniv": + c.Geniv = v + case "internal": + c.Internal = v + case "ivsize": + c.Ivsize = vp.PUInt64() + case "maxauthsize": + c.Maxauthsize = vp.PUInt64() + case "max keysize": + c.MaxKeysize = vp.PUInt64() + case "min keysize": + c.MinKeysize = vp.PUInt64() + case "module": + c.Module = v + case "name": + c.Name = v + case "priority": + c.Priority = vp.PInt64() + case "refcnt": + c.Refcnt = vp.PInt64() + case "seedsize": + c.Seedsize = vp.PUInt64() + case "selftest": + c.Selftest = v + case "type": + c.Type = v + case "walksize": + c.Walksize = vp.PUInt64() + } + + return vp.Err() +} diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 3ee8291e8..1e76173da 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -1,42 +1,95 @@ # Archive created by ttar -c -f fixtures.ttar fixtures/ Directory: fixtures -Mode: 755 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc +Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231 +Directory: fixtures/proc/26231 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/cmdline +Path: fixtures/proc/26231/cmdline Lines: 1 vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/comm +Path: fixtures/proc/26231/comm Lines: 1 vim Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/exe +Path: fixtures/proc/26231/cwd +SymlinkTo: /usr/bin +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/environ +Lines: 1 +PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/exe SymlinkTo: /usr/bin/vim # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/fd +Directory: fixtures/proc/26231/fd Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/0 +Path: fixtures/proc/26231/fd/0 SymlinkTo: ../../symlinktargets/abc # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/1 +Path: fixtures/proc/26231/fd/1 SymlinkTo: ../../symlinktargets/def # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/10 +Path: fixtures/proc/26231/fd/10 SymlinkTo: ../../symlinktargets/xyz # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/2 +Path: fixtures/proc/26231/fd/2 SymlinkTo: ../../symlinktargets/ghi # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/3 +Path: fixtures/proc/26231/fd/3 SymlinkTo: ../../symlinktargets/uvw # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/io +Directory: fixtures/proc/26231/fdinfo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/0 +Lines: 6 +pos: 0 +flags: 02004000 +mnt_id: 13 +inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000 +inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a +inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/1 +Lines: 4 +pos: 0 +flags: 02004002 +mnt_id: 13 +eventfd-count: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/10 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/2 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/fdinfo/3 +Lines: 3 +pos: 0 +flags: 02004002 +mnt_id: 9 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/io Lines: 7 rchar: 750339 wchar: 818609 @@ -47,7 +100,7 @@ write_bytes: 2048 cancelled_write_bytes: -1024 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/limits +Path: fixtures/proc/26231/limits Lines: 17 Limit Soft Limit Hard Limit Units Max cpu time unlimited unlimited seconds @@ -58,7 +111,7 @@ Max core file size 0 unlimited bytes Max resident set unlimited unlimited bytes Max processes 62898 62898 processes Max open files 2048 4096 files -Max locked memory 65536 65536 bytes +Max locked memory 18446744073708503040 18446744073708503040 bytes Max address space 8589934592 unlimited bytes Max file locks unlimited unlimited locks Max pending signals 62898 62898 signals @@ -68,14 +121,14 @@ Max realtime priority 0 0 Max realtime timeout unlimited unlimited us Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/mountstats -Lines: 19 +Path: fixtures/proc/26231/mountstats +Lines: 20 device rootfs mounted on / with fstype rootfs device sysfs mounted on /sys with fstype sysfs device proc mounted on /proc with fstype proc device /dev/sda1 mounted on / with fstype ext4 device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none + opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none age: 13968 caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured @@ -88,13 +141,14 @@ device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers= NULL: 0 0 0 0 0 0 0 0 READ: 1298 1298 0 207680 1210292152 6 79386 79407 WRITE: 0 0 0 0 0 0 0 0 + ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/net +Directory: fixtures/proc/26231/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/net/dev +Path: fixtures/proc/26231/net/dev Lines: 4 Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed @@ -102,133 +156,1784 @@ Inter-| Receive | Transmit eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/ns +Directory: fixtures/proc/26231/ns Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/ns/mnt +Path: fixtures/proc/26231/ns/mnt SymlinkTo: mnt:[4026531840] # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/ns/net +Path: fixtures/proc/26231/ns/net SymlinkTo: net:[4026531993] # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/stat +Path: fixtures/proc/26231/root +SymlinkTo: / +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/schedstat +Lines: 1 +411605849 93680043 79 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/smaps +Lines: 252 +00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager +Size: 8900 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 2952 kB +Pss: 2952 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 2952 kB +Private_Dirty: 0 kB +Referenced: 2864 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex mr mw me dw sd +00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager +Size: 10236 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 6152 kB +Pss: 6152 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 6152 kB +Private_Dirty: 0 kB +Referenced: 5308 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd mr mw me dw sd +016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager +Size: 424 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 176 kB +Pss: 176 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 84 kB +Private_Dirty: 92 kB +Referenced: 176 kB +Anonymous: 92 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 12 kB +SwapPss: 12 kB +Locked: 0 kB +VmFlags: rd wr mr mw me dw ac sd +0171a000-0173f000 rw-p 00000000 00:00 0 +Size: 148 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 76 kB +Pss: 76 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 76 kB +Referenced: 76 kB +Anonymous: 76 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +c000000000-c000400000 rw-p 00000000 00:00 0 +Size: 4096 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 2564 kB +Pss: 2564 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 20 kB +Private_Dirty: 2544 kB +Referenced: 2544 kB +Anonymous: 2564 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 1100 kB +SwapPss: 1100 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +c000400000-c001600000 rw-p 00000000 00:00 0 +Size: 18432 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 16024 kB +Pss: 16024 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 5864 kB +Private_Dirty: 10160 kB +Referenced: 11944 kB +Anonymous: 16024 kB +LazyFree: 5848 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 440 kB +SwapPss: 440 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd nh +c001600000-c004000000 rw-p 00000000 00:00 0 +Size: 43008 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 +Size: 38596 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 1992 kB +Pss: 1992 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 476 kB +Private_Dirty: 1516 kB +Referenced: 1828 kB +Anonymous: 1992 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 384 kB +SwapPss: 384 kB +Locked: 0 kB +VmFlags: rd wr mr mw me ac sd +7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] +Size: 132 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 8 kB +Pss: 8 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 8 kB +Referenced: 8 kB +Anonymous: 8 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 4 kB +SwapPss: 4 kB +Locked: 0 kB +VmFlags: rd wr mr mw me gd ac +7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] +Size: 12 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd mr pf io de dd sd +7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] +Size: 8 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 4 kB +Pss: 0 kB +Shared_Clean: 4 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 4 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex mr mw me de sd +ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] +Size: 4 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB +Rss: 0 kB +Pss: 0 kB +Shared_Clean: 0 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 0 kB +Anonymous: 0 kB +LazyFree: 0 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 0 kB +SwapPss: 0 kB +Locked: 0 kB +VmFlags: rd ex +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/smaps_rollup +Lines: 17 +00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] +Rss: 29948 kB +Pss: 29944 kB +Shared_Clean: 4 kB +Shared_Dirty: 0 kB +Private_Clean: 15548 kB +Private_Dirty: 14396 kB +Referenced: 24752 kB +Anonymous: 20756 kB +LazyFree: 5848 kB +AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB +Shared_Hugetlb: 0 kB +Private_Hugetlb: 0 kB +Swap: 1940 kB +SwapPss: 1940 kB +Locked: 0 kB +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/stat Lines: 1 26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26232 +Path: fixtures/proc/26231/status +Lines: 53 + +Name: prometheus +Umask: 0022 +State: S (sleeping) +Tgid: 26231 +Ngid: 0 +Pid: 26231 +PPid: 1 +TracerPid: 0 +Uid: 1000 1000 1000 0 +Gid: 1001 1001 1001 0 +FDSize: 128 +Groups: +NStgid: 1 +NSpid: 1 +NSpgid: 1 +NSsid: 1 +VmPeak: 58472 kB +VmSize: 58440 kB +VmLck: 0 kB +VmPin: 0 kB +VmHWM: 8028 kB +VmRSS: 6716 kB +RssAnon: 2092 kB +RssFile: 4624 kB +RssShmem: 0 kB +VmData: 2580 kB +VmStk: 136 kB +VmExe: 948 kB +VmLib: 6816 kB +VmPTE: 128 kB +VmPMD: 12 kB +VmSwap: 660 kB +HugetlbPages: 0 kB +Threads: 1 +SigQ: 8/63965 +SigPnd: 0000000000000000 +ShdPnd: 0000000000000000 +SigBlk: 7be3c0fe28014a03 +SigIgn: 0000000000001000 +SigCgt: 00000001800004ec +CapInh: 0000000000000000 +CapPrm: 0000003fffffffff +CapEff: 0000003fffffffff +CapBnd: 0000003fffffffff +CapAmb: 0000000000000000 +Seccomp: 0 +Cpus_allowed: ff +Cpus_allowed_list: 0-7 +Mems_allowed: 00000000,00000001 +Mems_allowed_list: 0 +voluntary_ctxt_switches: 4742839 +nonvoluntary_ctxt_switches: 1727500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26231/wchan +Lines: 1 +poll_schedule_timeoutEOF +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26232 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/cmdline +Path: fixtures/proc/26232/cmdline Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/comm +Path: fixtures/proc/26232/comm Lines: 1 ata_sff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26232/fd +Path: fixtures/proc/26232/cwd +SymlinkTo: /does/not/exist +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26232/fd Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/0 +Path: fixtures/proc/26232/fd/0 SymlinkTo: ../../symlinktargets/abc # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/1 +Path: fixtures/proc/26232/fd/1 SymlinkTo: ../../symlinktargets/def # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/2 +Path: fixtures/proc/26232/fd/2 SymlinkTo: ../../symlinktargets/ghi # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/3 +Path: fixtures/proc/26232/fd/3 SymlinkTo: ../../symlinktargets/uvw # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/4 +Path: fixtures/proc/26232/fd/4 SymlinkTo: ../../symlinktargets/xyz # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/limits +Path: fixtures/proc/26232/limits Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/stat +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 29436 29436 processes +Max open files 1024 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 29436 29436 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/maps +Lines: 9 +55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat +55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat +55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap] +7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive +7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so +7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack] +7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar] +7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso] +ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/root +SymlinkTo: /does/not/exist +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/26232/stat Lines: 1 33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26233 +Path: fixtures/proc/26232/wchan +Lines: 1 +0EOF +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26233 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26233/cmdline +Path: fixtures/proc/26233/cmdline Lines: 1 com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/584 +Path: fixtures/proc/26233/schedstat +Lines: 8 + ____________________________________ +< this is a malformed schedstat file > + ------------------------------------ + \ ^__^ + \ (oo)\_______ + (__)\ )\/\ + ||----w | + || || +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/26234 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat +Path: fixtures/proc/26234/maps +Lines: 4 +08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh +08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh +0808c000-08146000 rwxp 00000000 00:00 0 +40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo +Directory: fixtures/proc/584 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/short -Mode: 755 +Path: fixtures/proc/584/stat +Lines: 2 +1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 +#!/bin/cat /proc/self/stat +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/short/buddyinfo +Path: fixtures/proc/buddyinfo Lines: 3 -Node 0, zone -Node 0, zone -Node 0, zone +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/sizemismatch -Mode: 755 +Path: fixtures/proc/cpuinfo +Lines: 216 +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 799.998 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.037 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.010 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.028 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 799.989 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 0 +cpu cores : 4 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.083 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 1 +cpu cores : 4 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.017 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 2 +cpu cores : 4 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 142 +model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz +stepping : 10 +microcode : 0xb4 +cpu MHz : 800.030 +cache size : 8192 KB +physical id : 0 +siblings : 8 +core id : 3 +cpu cores : 4 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs +bogomips : 4224.00 +clflush size : 64 +cache_alignment : 64 +address sizes : 39 bits physical, 48 bits virtual +power management: + +Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/sizemismatch/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 +Path: fixtures/proc/crypto +Lines: 972 +name : ccm(aes) +driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) +module : ccm +priority : 300 +refcnt : 4 +selftest : passed +internal : no +type : aead +async : no +blocksize : 1 +ivsize : 16 +maxauthsize : 16 +geniv : + +name : cbcmac(aes) +driver : cbcmac(aes-aesni) +module : ccm +priority : 300 +refcnt : 7 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 16 + +name : ecdh +driver : ecdh-generic +module : ecdh_generic +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : kpp +async : yes + +name : ecb(arc4) +driver : ecb(arc4)-generic +module : arc4 +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 1 +max keysize : 256 +ivsize : 0 +chunksize : 1 +walksize : 1 + +name : arc4 +driver : arc4-generic +module : arc4 +priority : 0 +refcnt : 3 +selftest : passed +internal : no +type : cipher +blocksize : 1 +min keysize : 1 +max keysize : 256 + +name : crct10dif +driver : crct10dif-pclmul +module : crct10dif_pclmul +priority : 200 +refcnt : 2 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 2 + +name : crc32 +driver : crc32-pclmul +module : crc32_pclmul +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : __ghash +driver : cryptd(__ghash-pclmulqdqni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : ahash +async : yes +blocksize : 16 +digestsize : 16 + +name : ghash +driver : ghash-clmulni +module : ghash_clmulni_intel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : ahash +async : yes +blocksize : 16 +digestsize : 16 + +name : __ghash +driver : __ghash-pclmulqdqni +module : ghash_clmulni_intel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : shash +blocksize : 16 +digestsize : 16 + +name : crc32c +driver : crc32c-intel +module : crc32c_intel +priority : 200 +refcnt : 5 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : cbc(aes) +driver : cbc(aes-aesni) +module : kernel +priority : 300 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : ctr(aes) +driver : ctr(aes-aesni) +module : kernel +priority : 300 +refcnt : 5 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : pkcs1pad(rsa,sha256) +driver : pkcs1pad(rsa-generic,sha256) +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : akcipher + +name : __xts(aes) +driver : cryptd(__xts-aes-aesni) +module : kernel +priority : 451 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : xts(aes) +driver : xts-aes-aesni +module : kernel +priority : 401 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ctr(aes) +driver : cryptd(__ctr-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 1 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : ctr(aes) +driver : ctr-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __cbc(aes) +driver : cryptd(__cbc-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : cbc(aes) +driver : cbc-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ecb(aes) +driver : cryptd(__ecb-aes-aesni) +module : kernel +priority : 450 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : ecb(aes) +driver : ecb-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : yes +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : __generic-gcm-aes-aesni +driver : cryptd(__driver-generic-gcm-aes-aesni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : yes +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : gcm(aes) +driver : generic-gcm-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : aead +async : yes +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : __generic-gcm-aes-aesni +driver : __driver-generic-gcm-aes-aesni +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : no +blocksize : 1 +ivsize : 12 +maxauthsize : 16 +geniv : + +name : __gcm-aes-aesni +driver : cryptd(__driver-gcm-aes-aesni) +module : kernel +priority : 50 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : yes +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : rfc4106(gcm(aes)) +driver : rfc4106-gcm-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : no +type : aead +async : yes +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : __gcm-aes-aesni +driver : __driver-gcm-aes-aesni +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : yes +type : aead +async : no +blocksize : 1 +ivsize : 8 +maxauthsize : 16 +geniv : + +name : __xts(aes) +driver : __xts-aes-aesni +module : kernel +priority : 401 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 32 +max keysize : 64 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ctr(aes) +driver : __ctr-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 1 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __cbc(aes) +driver : __cbc-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 16 +chunksize : 16 +walksize : 16 + +name : __ecb(aes) +driver : __ecb-aes-aesni +module : kernel +priority : 400 +refcnt : 1 +selftest : passed +internal : yes +type : skcipher +async : no +blocksize : 16 +min keysize : 16 +max keysize : 32 +ivsize : 0 +chunksize : 16 +walksize : 16 + +name : __aes +driver : __aes-aesni +module : kernel +priority : 300 +refcnt : 1 +selftest : passed +internal : yes +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : aes +driver : aes-aesni +module : kernel +priority : 300 +refcnt : 8 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : hmac(sha1) +driver : hmac(sha1-generic) +module : kernel +priority : 100 +refcnt : 9 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 20 + +name : ghash +driver : ghash-generic +module : kernel +priority : 100 +refcnt : 3 +selftest : passed +internal : no +type : shash +blocksize : 16 +digestsize : 16 + +name : jitterentropy_rng +driver : jitterentropy_rng +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha256 +module : kernel +priority : 221 +refcnt : 2 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha512 +module : kernel +priority : 220 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha384 +module : kernel +priority : 219 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_hmac_sha1 +module : kernel +priority : 218 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha256 +module : kernel +priority : 217 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha512 +module : kernel +priority : 216 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha384 +module : kernel +priority : 215 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_sha1 +module : kernel +priority : 214 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes256 +module : kernel +priority : 213 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes192 +module : kernel +priority : 212 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_nopr_ctr_aes128 +module : kernel +priority : 211 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : hmac(sha256) +driver : hmac(sha256-generic) +module : kernel +priority : 100 +refcnt : 10 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 32 + +name : stdrng +driver : drbg_pr_hmac_sha256 +module : kernel +priority : 210 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha512 +module : kernel +priority : 209 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha384 +module : kernel +priority : 208 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_hmac_sha1 +module : kernel +priority : 207 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha256 +module : kernel +priority : 206 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha512 +module : kernel +priority : 205 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha384 +module : kernel +priority : 204 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_sha1 +module : kernel +priority : 203 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes256 +module : kernel +priority : 202 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes192 +module : kernel +priority : 201 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : stdrng +driver : drbg_pr_ctr_aes128 +module : kernel +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : rng +seedsize : 0 + +name : 842 +driver : 842-scomp +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : 842 +driver : 842-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : lzo-rle +driver : lzo-rle-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : lzo-rle +driver : lzo-rle-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : lzo +driver : lzo-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : lzo +driver : lzo-generic +module : kernel +priority : 0 +refcnt : 9 +selftest : passed +internal : no +type : compression + +name : crct10dif +driver : crct10dif-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 2 + +name : crc32c +driver : crc32c-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 4 + +name : zlib-deflate +driver : zlib-deflate-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : deflate +driver : deflate-scomp +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : scomp + +name : deflate +driver : deflate-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : aes +driver : aes-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +name : sha224 +driver : sha224-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 28 + +name : sha256 +driver : sha256-generic +module : kernel +priority : 100 +refcnt : 11 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 32 + +name : sha1 +driver : sha1-generic +module : kernel +priority : 100 +refcnt : 11 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 20 + +name : md5 +driver : md5-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 64 +digestsize : 16 + +name : ecb(cipher_null) +driver : ecb-cipher_null +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : skcipher +async : no +blocksize : 1 +min keysize : 0 +max keysize : 0 +ivsize : 0 +chunksize : 1 +walksize : 1 + +name : digest_null +driver : digest_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : shash +blocksize : 1 +digestsize : 0 + +name : compress_null +driver : compress_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : compression + +name : cipher_null +driver : cipher_null-generic +module : kernel +priority : 0 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 1 +min keysize : 0 +max keysize : 0 + +name : rsa +driver : rsa-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : akcipher + +name : dh +driver : dh-generic +module : kernel +priority : 100 +refcnt : 1 +selftest : passed +internal : no +type : kpp + +name : aes +driver : aes-asm +module : kernel +priority : 200 +refcnt : 1 +selftest : passed +internal : no +type : cipher +blocksize : 16 +min keysize : 16 +max keysize : 32 + +Mode: 444 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/valid -Mode: 755 +Path: fixtures/proc/diskstats +Lines: 52 + 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 + 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 + 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 + 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 + 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 + 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 + 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 + 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 + 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 + 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 + 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 + 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 + 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 + 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 + 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 + 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 + 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 + 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 + 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 + 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 + 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 + 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 + 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 + 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 + 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 + 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 + 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 + 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 + 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 + 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 + 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 + 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 + 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 + 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 + 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 + 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 + 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 + 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 + 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 + 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 + 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 + 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 + 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 + 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 + 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 + 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 + 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 + 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 + 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 + 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 + 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 + 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 +Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/valid/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 +Directory: fixtures/proc/fs +Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/fs +Directory: fixtures/proc/fs/fscache Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/fs/xfs +Path: fixtures/proc/fs/fscache/stats +Lines: 24 +FS-Cache statistics +Cookies: idx=3 dat=67877 spc=0 +Objects: alc=67473 nal=0 avl=67473 ded=388 +ChkAux : non=12 ok=33 upd=44 obs=55 +Pages : mrk=547164 unc=364577 +Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 +Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 +Invals : n=14 run=13 +Updates: n=7 nul=3 run=8 +Relinqs: n=394 nul=1 wcr=2 rtr=3 +AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 +Allocs : n=20 ok=19 wt=18 nbf=17 int=16 +Allocs : ops=15 owt=14 abt=13 +Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 +Retrvls: ops=151959 owt=42747 abt=44 +Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 +Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 +VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 +Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 +Ops : ini=377538 dfr=27 rel=377538 gc=37 +CacheOp: alo=1 luo=2 luc=3 gro=4 +CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 +CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 +CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/fs/xfs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/fs/xfs/stat +Path: fixtures/proc/fs/xfs/stat Lines: 23 extent_alloc 92447 97589 92448 93751 abt 0 0 0 0 @@ -255,40 +1960,131 @@ xpc 399724544 92823103 86219234 debug 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/mdstat -Lines: 26 +Path: fixtures/proc/loadavg +Lines: 1 +0.02 0.04 0.05 1/497 11947 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/mdstat +Lines: 60 Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] + +md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - + md127 : active raid1 sdi2[0] sdj2[1] 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] + +md0 : active raid1 sdi1[0] sdj1[1] 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0] sdb3[1] + +md4 : inactive raid1 sda3[0](F) sdb3[1](S) 4883648 blocks [2/2] [UU] -md6 : active raid1 sdb2[2] sda2[0] +md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] 195310144 blocks [2/1] [U_] [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec -md8 : active raid1 sdb1[1] sda1[0] +md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) 195310144 blocks [2/2] [UU] [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] +md201 : active raid1 sda3[0] sdb3[1] + 1993728 blocks super 1.2 [2/2] [UU] + [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec + +md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] bitmap: 0/30 pages [0KB], 65536KB chunk +md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) + 523968 blocks super 1.2 [4/4] [UUUU] + resync=DELAYED + +md10 : active raid0 sda1[0] sdb1[1] + 314159265 blocks 64k chunks + +md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) + 4190208 blocks super 1.2 [2/2] [UU] + resync=PENDING + +md12 : active raid0 sdc2[0] sdd2[1] + 3886394368 blocks super 1.2 512k chunks + +md126 : active raid0 sdb[1] sdc[0] + 1855870976 blocks super external:/md127/0 128k chunks + +md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) + 7932 blocks super external:imsm + +md00 : active raid0 xvdb[0] + 4186624 blocks super 1.2 256k chunks + +md120 : active linear sda1[1] sdb1[0] + 2095104 blocks super 1.2 0k rounding + +md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] + 322560 blocks super 1.2 512k chunks + unused devices: Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/net +Path: fixtures/proc/meminfo +Lines: 42 +MemTotal: 15666184 kB +MemFree: 440324 kB +Buffers: 1020128 kB +Cached: 12007640 kB +SwapCached: 0 kB +Active: 6761276 kB +Inactive: 6532708 kB +Active(anon): 267256 kB +Inactive(anon): 268 kB +Active(file): 6494020 kB +Inactive(file): 6532440 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 0 kB +SwapFree: 0 kB +Dirty: 768 kB +Writeback: 0 kB +AnonPages: 266216 kB +Mapped: 44204 kB +Shmem: 1308 kB +Slab: 1807264 kB +SReclaimable: 1738124 kB +SUnreclaim: 69140 kB +KernelStack: 1616 kB +PageTables: 5288 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 7833092 kB +Committed_AS: 530844 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 36596 kB +VmallocChunk: 34359637840 kB +HardwareCorrupted: 0 kB +AnonHugePages: 12288 kB +HugePages_Total: 0 +HugePages_Free: 0 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +DirectMap4k: 91136 kB +DirectMap2M: 16039936 kB +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/dev +Path: fixtures/proc/net/arp +Lines: 2 +IP address HW type Flags HW address Mask Device +192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/dev Lines: 6 Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed @@ -298,7 +2094,7 @@ docker0: 2568 38 0 0 0 0 0 0 438 eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/ip_vs +Path: fixtures/proc/net/ip_vs Lines: 21 IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags @@ -323,7 +2119,7 @@ FWM 10001000 wlc -> C0A83215:0CEA Route 0 0 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/ip_vs_stats +Path: fixtures/proc/net/ip_vs_stats Lines: 6 Total Incoming Outgoing Incoming Outgoing Conns Packets Packets Bytes Bytes @@ -333,10 +2129,28 @@ Lines: 6 4 1FB3C 0 1282A8F 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/net/rpc +Path: fixtures/proc/net/protocols +Lines: 14 +protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em +PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n +PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n +RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n +UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n +UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n +TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y +UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n +UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n +PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n +RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n +UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n +TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y +NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/net/rpc Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/rpc/nfs +Path: fixtures/proc/net/rpc/nfs Lines: 5 net 18628 0 18628 6 rpc 4329785 0 4338291 @@ -345,7 +2159,7 @@ proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/rpc/nfsd +Path: fixtures/proc/net/rpc/nfsd Lines: 11 rc 0 6 18622 fh 0 0 0 0 0 @@ -360,7 +2174,93 @@ proc4 2 2 10853 proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/xfrm_stat +Path: fixtures/proc/net/sockstat +Lines: 6 +sockets: used 1602 +TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22 +UDP: inuse 12 mem 62 +UDPLITE: inuse 0 +RAW: inuse 0 +FRAG: inuse 0 memory 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/sockstat6 +Lines: 5 +TCP6: inuse 17 +UDP6: inuse 9 +UDPLITE6: inuse 0 +RAW6: inuse 1 +FRAG6: inuse 0 memory 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/softnet_stat +Lines: 2 +00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/softnet_stat.broken +Lines: 1 +00015c73 00020e76 F0000769 00000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/tcp +Lines: 4 + sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode + 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/tcp6 +Lines: 3 + sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops + 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 + 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp +Lines: 4 + sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode + 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 + 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp6 +Lines: 3 + sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops + 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 + 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/udp_broken +Lines: 2 + sl local_address rem_address st + 1: 00000000:0016 00000000:0000 0A +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/unix +Lines: 6 +Num RefCount Protocol Flags Type St Inode Path +0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control +0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log +0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 00000003 00000000 00000000 0001 03 5091797 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/unix_without_inode +Lines: 6 +Num RefCount Protocol Flags Type St Path +0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control +0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log +0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 +0000000000000000: 00000003 00000000 00000000 0001 03 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/net/xfrm_stat Lines: 28 XfrmInError 1 XfrmInBufferError 2 @@ -392,10 +2292,346 @@ XfrmOutStateInvalid 28765 XfrmAcquireError 24532 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/self +Directory: fixtures/proc/pressure +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/cpu +Lines: 1 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/io +Lines: 2 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +full avg10=0.20 avg60=3.00 avg300=4.95 total=25 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/pressure/memory +Lines: 2 +some avg10=0.10 avg60=2.00 avg300=3.85 total=15 +full avg10=0.20 avg60=3.00 avg300=4.95 total=25 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/schedstat +Lines: 6 +version 15 +timestamp 15819019232 +cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 +domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 +cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 +domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/self SymlinkTo: 26231 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/stat +Path: fixtures/proc/slabinfo +Lines: 302 +slabinfo - version: 2.1 +# name : tunables : slabdata +pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0 +pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0 +nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0 +kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 +kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0 +kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0 +pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0 +x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0 +iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 +ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0 +bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 +bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0 +bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0 +fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0 +fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0 +squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 +fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 +fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0 +xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0 +xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0 +xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 +xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 +nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 +nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 +nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0 +nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0 +nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 +nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 +rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 +jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 +jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0 +reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 +btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 +btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0 +ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0 +ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 +ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0 +ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0 +ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0 +ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0 +ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0 +ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0 +ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0 +jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0 +jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0 +jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 +jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0 +jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0 +jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 +ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0 +mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0 +dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 +dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 +dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0 +kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0 +io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 +dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0 +dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 +aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 +qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 +sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0 +scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0 +virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 +L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 +ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 +fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 +ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0 +RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0 +UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0 +UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0 +tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0 +TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0 +uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0 +sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 +sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0 +sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0 +sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 +btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0 +bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0 +mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0 +isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0 +io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 +kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0 +aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0 +dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 +bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0 +posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0 +iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 +iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0 +iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0 +UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0 +ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0 +ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 +inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 +xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 +ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0 +ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0 +ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0 +PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 +RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0 +UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0 +tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0 +request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0 +TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0 +hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0 +dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 +bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 +eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0 +eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0 +inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0 +scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 +bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0 +request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0 +blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0 +bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0 +biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0 +biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0 +biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0 +biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 +bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0 +ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0 +uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 +dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0 +audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0 +sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0 +skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 +skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0 +skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0 +configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0 +file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0 +file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0 +fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0 +net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0 +task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0 +taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0 +proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0 +pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0 +proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0 +seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0 +sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 +bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 +shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0 +kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0 +kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0 +mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 +filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0 +inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0 +dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0 +names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0 +hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 +ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 +avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 +avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 +iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 +lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0 +lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0 +key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 +buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0 +uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 +nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 +vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0 +mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0 +fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0 +files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0 +signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0 +sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0 +task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0 +cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0 +anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0 +anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0 +pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0 +Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0 +Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0 +Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 +Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0 +Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0 +trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0 +ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0 +pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0 +radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0 +task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 +vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0 +dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 +dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 +kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0 +kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0 +kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 +kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0 +kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0 +kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0 +kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0 +kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0 +kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0 +kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0 +kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0 +kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0 +kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0 +kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0 +kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0 +kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0 +kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0 +kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/stat Lines: 16 cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 @@ -415,32 +2651,3903 @@ procs_blocked 1 softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/symlinktargets +Path: fixtures/proc/swaps +Lines: 2 +Filename Type Size Used Priority +/dev/dm-2 partition 131068 176 -2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/symlinktargets Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/README +Path: fixtures/proc/symlinktargets/README Lines: 2 This directory contains some empty files that are the symlinks the files in the "fd" directory point to. They are otherwise ignored by the tests Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/abc +Path: fixtures/proc/symlinktargets/abc Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/def +Path: fixtures/proc/symlinktargets/def Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/ghi +Path: fixtures/proc/symlinktargets/ghi Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/uvw +Path: fixtures/proc/symlinktargets/uvw Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/xyz +Path: fixtures/proc/symlinktargets/xyz Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/kernel +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/kernel/random +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/entropy_avail +Lines: 1 +3943 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/poolsize +Lines: 1 +4096 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs +Lines: 1 +60 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold +Lines: 1 +3072 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/proc/sys/vm +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/admin_reserve_kbytes +Lines: 1 +8192 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/block_dump +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/compact_unevictable_allowed +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_background_bytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_background_ratio +Lines: 1 +10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_bytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_expire_centisecs +Lines: 1 +3000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_ratio +Lines: 1 +20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirty_writeback_centisecs +Lines: 1 +500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/dirtytime_expire_seconds +Lines: 1 +43200 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/drop_caches +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/extfrag_threshold +Lines: 1 +500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/hugetlb_shm_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/laptop_mode +Lines: 1 +5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/legacy_va_layout +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/lowmem_reserve_ratio +Lines: 1 +256 256 32 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/max_map_count +Lines: 1 +65530 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/memory_failure_early_kill +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/memory_failure_recovery +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/min_free_kbytes +Lines: 1 +67584 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/min_slab_ratio +Lines: 1 +5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/min_unmapped_ratio +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/mmap_min_addr +Lines: 1 +65536 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/nr_hugepages +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/nr_overcommit_hugepages +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/numa_stat +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/numa_zonelist_order +Lines: 1 +Node +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/oom_dump_tasks +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/oom_kill_allocating_task +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/overcommit_kbytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/overcommit_memory +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/overcommit_ratio +Lines: 1 +50 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/page-cluster +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/panic_on_oom +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/percpu_pagelist_fraction +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/stat_interval +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/swappiness +Lines: 1 +60 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/user_reserve_kbytes +Lines: 1 +131072 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/vfs_cache_pressure +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/watermark_boost_factor +Lines: 1 +15000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/watermark_scale_factor +Lines: 1 +10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/sys/vm/zone_reclaim_mode +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/zoneinfo +Lines: 262 +Node 0, zone DMA + per-node stats + nr_inactive_anon 230981 + nr_active_anon 547580 + nr_inactive_file 316904 + nr_active_file 346282 + nr_unevictable 115467 + nr_slab_reclaimable 131220 + nr_slab_unreclaimable 47320 + nr_isolated_anon 0 + nr_isolated_file 0 + workingset_nodes 11627 + workingset_refault 466886 + workingset_activate 276925 + workingset_restore 84055 + workingset_nodereclaim 487 + nr_anon_pages 795576 + nr_mapped 215483 + nr_file_pages 761874 + nr_dirty 908 + nr_writeback 0 + nr_writeback_temp 0 + nr_shmem 224925 + nr_shmem_hugepages 0 + nr_shmem_pmdmapped 0 + nr_anon_transparent_hugepages 0 + nr_unstable 0 + nr_vmscan_write 12950 + nr_vmscan_immediate_reclaim 3033 + nr_dirtied 8007423 + nr_written 7752121 + nr_kernel_misc_reclaimable 0 + pages free 3952 + min 33 + low 41 + high 49 + spanned 4095 + present 3975 + managed 3956 + protection: (0, 2877, 7826, 7826, 7826) + nr_free_pages 3952 + nr_zone_inactive_anon 0 + nr_zone_active_anon 0 + nr_zone_inactive_file 0 + nr_zone_active_file 0 + nr_zone_unevictable 0 + nr_zone_write_pending 0 + nr_mlock 0 + nr_page_table_pages 0 + nr_kernel_stack 0 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 1 + numa_miss 0 + numa_foreign 0 + numa_interleave 0 + numa_local 1 + numa_other 0 + pagesets + cpu: 0 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 1 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 2 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 3 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 4 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 5 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 6 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + cpu: 7 + count: 0 + high: 0 + batch: 1 + vm stats threshold: 8 + node_unreclaimable: 0 + start_pfn: 1 +Node 0, zone DMA32 + pages free 204252 + min 19510 + low 21059 + high 22608 + spanned 1044480 + present 759231 + managed 742806 + protection: (0, 0, 4949, 4949, 4949) + nr_free_pages 204252 + nr_zone_inactive_anon 118558 + nr_zone_active_anon 106598 + nr_zone_inactive_file 75475 + nr_zone_active_file 70293 + nr_zone_unevictable 66195 + nr_zone_write_pending 64 + nr_mlock 4 + nr_page_table_pages 1756 + nr_kernel_stack 2208 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 113952967 + numa_miss 0 + numa_foreign 0 + numa_interleave 0 + numa_local 113952967 + numa_other 0 + pagesets + cpu: 0 + count: 345 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 1 + count: 356 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 2 + count: 325 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 3 + count: 346 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 4 + count: 321 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 5 + count: 316 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 6 + count: 373 + high: 378 + batch: 63 + vm stats threshold: 48 + cpu: 7 + count: 339 + high: 378 + batch: 63 + vm stats threshold: 48 + node_unreclaimable: 0 + start_pfn: 4096 +Node 0, zone Normal + pages free 18553 + min 11176 + low 13842 + high 16508 + spanned 1308160 + present 1308160 + managed 1268711 + protection: (0, 0, 0, 0, 0) + nr_free_pages 18553 + nr_zone_inactive_anon 112423 + nr_zone_active_anon 440982 + nr_zone_inactive_file 241429 + nr_zone_active_file 275989 + nr_zone_unevictable 49272 + nr_zone_write_pending 844 + nr_mlock 154 + nr_page_table_pages 9750 + nr_kernel_stack 15136 + nr_bounce 0 + nr_zspages 0 + nr_free_cma 0 + numa_hit 162718019 + numa_miss 0 + numa_foreign 0 + numa_interleave 26812 + numa_local 162718019 + numa_other 0 + pagesets + cpu: 0 + count: 316 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 1 + count: 366 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 2 + count: 60 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 3 + count: 256 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 4 + count: 253 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 5 + count: 159 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 6 + count: 311 + high: 378 + batch: 63 + vm stats threshold: 56 + cpu: 7 + count: 264 + high: 378 + batch: 63 + vm stats threshold: 56 + node_unreclaimable: 0 + start_pfn: 1048576 +Node 0, zone Movable + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) +Node 0, zone Device + pages free 0 + min 0 + low 0 + high 0 + spanned 0 + present 0 + managed 0 + protection: (0, 0, 0, 0, 0) +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/dm-0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/dm-0/stat +Lines: 1 +6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda/queue +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/add_random +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/chunk_sectors +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/dax +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_granularity +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_max_bytes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/discard_zeroes_data +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/fua +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/hw_sector_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_poll +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_poll_delay +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/io_timeout +Lines: 1 +30000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/block/sda/queue/iosched +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/back_seek_max +Lines: 1 +16384 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async +Lines: 1 +250 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync +Lines: 1 +125 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/low_latency +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/max_budget +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/slice_idle +Lines: 1 +8 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us +Lines: 1 +8000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iosched/timeout_sync +Lines: 1 +125 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/iostats +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/logical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_discard_segments +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb +Lines: 1 +32767 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_integrity_segments +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_sectors_kb +Lines: 1 +1280 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_segment_size +Lines: 1 +65536 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/max_segments +Lines: 1 +168 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/minimum_io_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nomerges +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nr_requests +Lines: 1 +64 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/nr_zones +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/optimal_io_size +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/physical_block_size +Lines: 1 +512 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/read_ahead_kb +Lines: 1 +128 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/rotational +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/rq_affinity +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/scheduler +Lines: 1 +mq-deadline kyber [bfq] none +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/wbt_lat_usec +Lines: 1 +75000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_cache +Lines: 1 +write back +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_same_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/queue/zoned +Lines: 1 +none +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/block/sda/stat +Lines: 1 +9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host/host0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo +Lines: 1 +30 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/fabric_name +Lines: 1 +0x0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/node_name +Lines: 1 +0x2000e0071bce95f2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_id +Lines: 1 +0x000002 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_name +Lines: 1 +0x1000e0071bce95f2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_state +Lines: 1 +Online +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/port_type +Lines: 1 +Point-To-Point (direct nport connection) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/speed +Lines: 1 +16 Gbit +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/fc_host/host0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames +Lines: 1 +0xffffffffffffffff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/error_frames +Lines: 1 +0x0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts +Lines: 1 +0x13 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count +Lines: 1 +0x2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count +Lines: 1 +0x8 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count +Lines: 1 +0x9 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count +Lines: 1 +0x11 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count +Lines: 1 +0x10 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/nos_count +Lines: 1 +0x12 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames +Lines: 1 +0x3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/rx_words +Lines: 1 +0x4 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset +Lines: 1 +0x7 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames +Lines: 1 +0x5 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/statistics/tx_words +Lines: 1 +0x6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/supported_classes +Lines: 1 +Class 3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/supported_speeds +Lines: 1 +4 Gbit, 8 Gbit, 16 Gbit +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/fc_host/host0/symbolic_name +Lines: 1 +Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/board_id +Lines: 1 +SM_1141000001000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver +Lines: 1 +2.31.5050 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/hca_type +Lines: 1 +MT4099 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data +Lines: 1 +2221223609 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets +Lines: 1 +87169372 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data +Lines: 1 +26509113295 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets +Lines: 1 +85734114 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait +Lines: 1 +3599 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data +Lines: 1 +2460436784 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets +Lines: 1 +89332064 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data +Lines: 1 +26540356890 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets +Lines: 1 +88622850 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait +Lines: 1 +3846 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/net +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/net/eth0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/addr_assign_type +Lines: 1 +3 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/addr_len +Lines: 1 +6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/address +Lines: 1 +01:01:01:01:01:01 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/broadcast +Lines: 1 +ff:ff:ff:ff:ff:ff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_changes +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_down_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/carrier_up_count +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/dev_id +Lines: 1 +0x20 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/device +SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/ +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/dormant +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/duplex +Lines: 1 +full +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/flags +Lines: 1 +0x1303 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/ifalias +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/ifindex +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/iflink +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/link_mode +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/mtu +Lines: 1 +1500 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/name_assign_type +Lines: 1 +2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/netdev_group +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/operstate +Lines: 1 +up +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/phys_port_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/phys_port_name +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/phys_switch_id +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/speed +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/tx_queue_len +Lines: 1 +1000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/net/eth0/type +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/AC +SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/power_supply/BAT0 +SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw +Lines: 1 +95000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name +Lines: 1 +long_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us +Lines: 1 +999424 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name +Lines: 1 +short_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us +Lines: 1 +2440 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj +Lines: 1 +240422366267 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/name +Lines: 1 +package-0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw +Lines: 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name +Lines: 1 +long_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us +Lines: 1 +976 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj +Lines: 1 +118821284256 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/name +Lines: 1 +core +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/powercap/intel-rapl:a +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw +Lines: 1 +95000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name +Lines: 1 +long_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us +Lines: 1 +999424 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name +Lines: 1 +short_term +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw +Lines: 1 +4090000000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us +Lines: 1 +2440 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/enabled +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj +Lines: 1 +240422366267 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/name +Lines: 1 +package-10 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/powercap/intel-rapl:a/uevent +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/cooling_device0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/cur_state +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/max_state +Lines: 1 +50 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device0/type +Lines: 1 +Processor +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/cooling_device1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/cur_state +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/max_state +Lines: 1 +27 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/cooling_device1/type +Lines: 1 +intel_powerclamp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/thermal_zone0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/policy +Lines: 1 +step_wise +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/temp +Lines: 1 +49925 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone0/type +Lines: 1 +bcm2835_thermal +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/thermal/thermal_zone1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/mode +Lines: 1 +enabled +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/passive +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/policy +Lines: 1 +step_wise +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/temp +Lines: 1 +-44000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/thermal/thermal_zone1/type +Lines: 1 +acpitz +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device +SymlinkTo: ../../../ACPI0003:00 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup +Lines: 1 +enabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms +Lines: 1 +10598 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem +SymlinkTo: ../../../../../../../../../class/power_supply +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type +Lines: 1 +Mains +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent +Lines: 2 +POWER_SUPPLY_NAME=AC +POWER_SUPPLY_ONLINE=0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm +Lines: 1 +2369000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity +Lines: 1 +98 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level +Lines: 1 +Normal +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold +Lines: 1 +95 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device +SymlinkTo: ../../../PNP0C0A:00 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full +Lines: 1 +50060000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design +Lines: 1 +47520000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now +Lines: 1 +49450000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer +Lines: 1 +LGC +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name +Lines: 1 +LNV-45N1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async +Lines: 1 +disabled +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control +Lines: 1 +auto +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled +Lines: 1 +disabled +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status +Lines: 1 +unsupported +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now +Lines: 1 +4830000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number +Lines: 1 +38109 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status +Lines: 1 +Discharging +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem +SymlinkTo: ../../../../../../../../../class/power_supply +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology +Lines: 1 +Li-ion +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type +Lines: 1 +Battery +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent +Lines: 16 +POWER_SUPPLY_NAME=BAT0 +POWER_SUPPLY_STATUS=Discharging +POWER_SUPPLY_PRESENT=1 +POWER_SUPPLY_TECHNOLOGY=Li-ion +POWER_SUPPLY_CYCLE_COUNT=0 +POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 +POWER_SUPPLY_VOLTAGE_NOW=11750000 +POWER_SUPPLY_POWER_NOW=5064000 +POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 +POWER_SUPPLY_ENERGY_FULL=47390000 +POWER_SUPPLY_ENERGY_NOW=40730000 +POWER_SUPPLY_CAPACITY=85 +POWER_SUPPLY_CAPACITY_LEVEL=Normal +POWER_SUPPLY_MODEL_NAME=LNV-45N1 +POWER_SUPPLY_MANUFACTURER=LGC +POWER_SUPPLY_SERIAL_NUMBER=38109 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design +Lines: 1 +10800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now +Lines: 1 +12229000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class +Lines: 1 +0x020000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device +Lines: 1 +0x15d7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits +Lines: 1 +64 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override +Lines: 1 +(null) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq +Lines: 1 +140 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias +Lines: 1 +pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource +Lines: 13 +0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision +Lines: 1 +0x21 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device +Lines: 1 +0x225a +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor +Lines: 1 +0x17aa +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent +Lines: 6 +DRIVER=e1000e +PCI_CLASS=20000 +PCI_ID=8086:15D7 +PCI_SUBSYS_ID=17AA:225A +PCI_SLOT_NAME=0000:00:1f.6 +MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor +Lines: 1 +0x8086 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd/0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/0/name +Lines: 1 +demo +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/0/pool +Lines: 1 +iscsi-images +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/rbd/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/1/name +Lines: 1 +wrong +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/rbd/1/pool +Lines: 1 +wrong-images +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node/node1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/node/node1/vmstat +Lines: 6 +nr_free_pages 1 +nr_zone_inactive_anon 2 +nr_zone_active_anon 3 +nr_zone_inactive_file 4 +nr_zone_active_file 5 +nr_zone_unevictable 6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node/node2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/node/node2/vmstat +Lines: 6 +nr_free_pages 7 +nr_zone_inactive_anon 8 +nr_zone_active_anon 9 +nr_zone_inactive_file 10 +nr_zone_active_file 11 +nr_zone_unevictable 12 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/clocksource/clocksource0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource +Lines: 1 +tsc hpet acpi_pm +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource +Lines: 1 +tsc +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq +SymlinkTo: ../cpufreq/policy0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count +Lines: 1 +10084 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count +Lines: 1 +34818 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu0/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings +Lines: 1 +11 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list +Lines: 1 +0,4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq +Lines: 1 +1200195 +Mode: 400 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency +Lines: 1 +4294967295 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus +Lines: 1 +1 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors +Lines: 1 +performance powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver +Lines: 1 +intel_pstate +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor +Lines: 1 +powersave +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq +Lines: 1 +3300000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq +Lines: 1 +1200000 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed +Lines: 1 + +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count +Lines: 1 +523 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count +Lines: 1 +34818 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpu1/topology +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings +Lines: 1 +ff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list +Lines: 1 +0-7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings +Lines: 1 +22 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list +Lines: 1 +1,5 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq +Lines: 1 +2400000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq +Lines: 1 +800000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors +Lines: 1 +performance powersave +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq +Lines: 1 +1219917 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver +Lines: 1 +intel_pstate +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor +Lines: 1 +powersave +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq +Lines: 1 +2400000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq +Lines: 1 +800000 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed +Lines: 1 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug +Lines: 7 +rate: 1.1M/sec +dirty: 20.4G +target: 20.4G +proportional: 427.5k +integral: 790.0k +change: 321.5k/sec +next io: 17ms +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 +Mode: 777 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written +Lines: 1 +512 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats +Lines: 5 +Unused: 99% +Metadata: 0% +Average: 10473 +Sectors per Q: 64 +Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us +Lines: 1 +1305 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits +Lines: 1 +289 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio +Lines: 1 +100 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits +Lines: 1 +546 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes +Lines: 1 +808189952 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly +Lines: 1 +131072 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used +Lines: 1 +933888 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total +Lines: 1 +2147483648 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used +Lines: 1 +1867776 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes +Lines: 1 +1073741824 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes +Lines: 1 +933888 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes +Lines: 1 +1073741824 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used +Lines: 1 +32768 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes +Lines: 1 +8388608 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes +Lines: 1 +8388608 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size +Lines: 1 +20971520 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size +Lines: 1 +20971520 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label +Lines: 1 +fixture +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid +Lines: 1 +0abb23a9-579b-43e6-ad30-227ef47fcb9d +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes +Lines: 1 +644087808 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly +Lines: 1 +262144 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes +Lines: 1 +114688 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes +Lines: 1 +429391872 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags +Lines: 1 +2 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes +Lines: 1 +16777216 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 +SymlinkTo: ../../../../devices/virtual/block/loop22 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 +SymlinkTo: ../../../../devices/virtual/block/loop23 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 +SymlinkTo: ../../../../devices/virtual/block/loop24 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 +SymlinkTo: ../../../../devices/virtual/block/loop25 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid +Lines: 1 +7f07c59f-6136-449c-ab87-e1cf2328731b +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize +Lines: 1 +16384 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize +Lines: 1 +4096 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sda1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sda1/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/xfs/sda1/stats/stats +Lines: 1 +extent_alloc 1 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sdb1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/fs/xfs/sdb1/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/fs/xfs/sdb1/stats/stats +Lines: 1 +extent_alloc 2 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path +Lines: 1 +/home/iscsi/file_back_1G +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/iblock_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path +Lines: 1 +/dev/rbd1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rbd_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path +Lines: 1 +/dev/rbd/iscsi-images/demo +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d +SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +204950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +10325 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +40325 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026 +SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +104950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +20095 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +71235 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686 +SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +301950 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +10195 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +30195 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893 +SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds +Lines: 1 +1234 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes +Lines: 1 +1504 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes +Lines: 1 +4733 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index b6c6b2ce1..0102ab0fd 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -14,69 +14,30 @@ package procfs import ( - "fmt" - "os" - "path" - - "github.com/prometheus/procfs/nfs" - "github.com/prometheus/procfs/xfs" + "github.com/prometheus/procfs/internal/fs" ) -// FS represents the pseudo-filesystem proc, which provides an interface to +// FS represents the pseudo-filesystem sys, which provides an interface to // kernel data structures. -type FS string - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = "/proc" - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path returns the path of the given subsystem relative to the procfs root. -func (fs FS) Path(p ...string) string { - return path.Join(append([]string{string(fs)}, p...)...) +type FS struct { + proc fs.FS } -// XFSStats retrieves XFS filesystem runtime statistics. -func (fs FS) XFSStats() (*xfs.Stats, error) { - f, err := os.Open(fs.Path("fs/xfs/stat")) - if err != nil { - return nil, err - } - defer f.Close() - - return xfs.ParseStats(f) -} - -// NFSClientRPCStats retrieves NFS client RPC statistics. -func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfs")) - if err != nil { - return nil, err - } - defer f.Close() +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = fs.DefaultProcMountPoint - return nfs.ParseClientRPCStats(f) +// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. +// It will error if the mount point directory can't be read or is a file. +func NewDefaultFS() (FS, error) { + return NewFS(DefaultMountPoint) } -// NFSdServerRPCStats retrieves NFS daemon RPC statistics. -func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfsd")) +// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error +// if the mount point directory can't be read or is a file. +func NewFS(mountPoint string) (FS, error) { + fs, err := fs.NewFS(mountPoint) if err != nil { - return nil, err + return FS{}, err } - defer f.Close() - - return nfs.ParseServerRPCStats(f) + return FS{fs}, nil } diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go new file mode 100644 index 000000000..f8070e6e2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -0,0 +1,422 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Fscacheinfo represents fscache statistics. +type Fscacheinfo struct { + // Number of index cookies allocated + IndexCookiesAllocated uint64 + // data storage cookies allocated + DataStorageCookiesAllocated uint64 + // Number of special cookies allocated + SpecialCookiesAllocated uint64 + // Number of objects allocated + ObjectsAllocated uint64 + // Number of object allocation failures + ObjectAllocationsFailure uint64 + // Number of objects that reached the available state + ObjectsAvailable uint64 + // Number of objects that reached the dead state + ObjectsDead uint64 + // Number of objects that didn't have a coherency check + ObjectsWithoutCoherencyCheck uint64 + // Number of objects that passed a coherency check + ObjectsWithCoherencyCheck uint64 + // Number of objects that needed a coherency data update + ObjectsNeedCoherencyCheckUpdate uint64 + // Number of objects that were declared obsolete + ObjectsDeclaredObsolete uint64 + // Number of pages marked as being cached + PagesMarkedAsBeingCached uint64 + // Number of uncache page requests seen + UncachePagesRequestSeen uint64 + // Number of acquire cookie requests seen + AcquireCookiesRequestSeen uint64 + // Number of acq reqs given a NULL parent + AcquireRequestsWithNullParent uint64 + // Number of acq reqs rejected due to no cache available + AcquireRequestsRejectedNoCacheAvailable uint64 + // Number of acq reqs succeeded + AcquireRequestsSucceeded uint64 + // Number of acq reqs rejected due to error + AcquireRequestsRejectedDueToError uint64 + // Number of acq reqs failed on ENOMEM + AcquireRequestsFailedDueToEnomem uint64 + // Number of lookup calls made on cache backends + LookupsNumber uint64 + // Number of negative lookups made + LookupsNegative uint64 + // Number of positive lookups made + LookupsPositive uint64 + // Number of objects created by lookup + ObjectsCreatedByLookup uint64 + // Number of lookups timed out and requeued + LookupsTimedOutAndRequed uint64 + InvalidationsNumber uint64 + InvalidationsRunning uint64 + // Number of update cookie requests seen + UpdateCookieRequestSeen uint64 + // Number of upd reqs given a NULL parent + UpdateRequestsWithNullParent uint64 + // Number of upd reqs granted CPU time + UpdateRequestsRunning uint64 + // Number of relinquish cookie requests seen + RelinquishCookiesRequestSeen uint64 + // Number of rlq reqs given a NULL parent + RelinquishCookiesWithNullParent uint64 + // Number of rlq reqs waited on completion of creation + RelinquishRequestsWaitingCompleteCreation uint64 + // Relinqs rtr + RelinquishRetries uint64 + // Number of attribute changed requests seen + AttributeChangedRequestsSeen uint64 + // Number of attr changed requests queued + AttributeChangedRequestsQueued uint64 + // Number of attr changed rejected -ENOBUFS + AttributeChangedRejectDueToEnobufs uint64 + // Number of attr changed failed -ENOMEM + AttributeChangedFailedDueToEnomem uint64 + // Number of attr changed ops given CPU time + AttributeChangedOps uint64 + // Number of allocation requests seen + AllocationRequestsSeen uint64 + // Number of successful alloc reqs + AllocationOkRequests uint64 + // Number of alloc reqs that waited on lookup completion + AllocationWaitingOnLookup uint64 + // Number of alloc reqs rejected -ENOBUFS + AllocationsRejectedDueToEnobufs uint64 + // Number of alloc reqs aborted -ERESTARTSYS + AllocationsAbortedDueToErestartsys uint64 + // Number of alloc reqs submitted + AllocationOperationsSubmitted uint64 + // Number of alloc reqs waited for CPU time + AllocationsWaitedForCPU uint64 + // Number of alloc reqs aborted due to object death + AllocationsAbortedDueToObjectDeath uint64 + // Number of retrieval (read) requests seen + RetrievalsReadRequests uint64 + // Number of successful retr reqs + RetrievalsOk uint64 + // Number of retr reqs that waited on lookup completion + RetrievalsWaitingLookupCompletion uint64 + // Number of retr reqs returned -ENODATA + RetrievalsReturnedEnodata uint64 + // Number of retr reqs rejected -ENOBUFS + RetrievalsRejectedDueToEnobufs uint64 + // Number of retr reqs aborted -ERESTARTSYS + RetrievalsAbortedDueToErestartsys uint64 + // Number of retr reqs failed -ENOMEM + RetrievalsFailedDueToEnomem uint64 + // Number of retr reqs submitted + RetrievalsRequests uint64 + // Number of retr reqs waited for CPU time + RetrievalsWaitingCPU uint64 + // Number of retr reqs aborted due to object death + RetrievalsAbortedDueToObjectDeath uint64 + // Number of storage (write) requests seen + StoreWriteRequests uint64 + // Number of successful store reqs + StoreSuccessfulRequests uint64 + // Number of store reqs on a page already pending storage + StoreRequestsOnPendingStorage uint64 + // Number of store reqs rejected -ENOBUFS + StoreRequestsRejectedDueToEnobufs uint64 + // Number of store reqs failed -ENOMEM + StoreRequestsFailedDueToEnomem uint64 + // Number of store reqs submitted + StoreRequestsSubmitted uint64 + // Number of store reqs granted CPU time + StoreRequestsRunning uint64 + // Number of pages given store req processing time + StorePagesWithRequestsProcessing uint64 + // Number of store reqs deleted from tracking tree + StoreRequestsDeleted uint64 + // Number of store reqs over store limit + StoreRequestsOverStoreLimit uint64 + // Number of release reqs against pages with no pending store + ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 + // Number of release reqs against pages stored by time lock granted + ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 + // Number of release reqs ignored due to in-progress store + ReleaseRequestsIgnoredDueToInProgressStore uint64 + // Number of page stores cancelled due to release req + PageStoresCancelledByReleaseRequests uint64 + VmscanWaiting uint64 + // Number of times async ops added to pending queues + OpsPending uint64 + // Number of times async ops given CPU time + OpsRunning uint64 + // Number of times async ops queued for processing + OpsEnqueued uint64 + // Number of async ops cancelled + OpsCancelled uint64 + // Number of async ops rejected due to object lookup/create failure + OpsRejected uint64 + // Number of async ops initialised + OpsInitialised uint64 + // Number of async ops queued for deferred release + OpsDeferred uint64 + // Number of async ops released (should equal ini=N when idle) + OpsReleased uint64 + // Number of deferred-release async ops garbage collected + OpsGarbageCollected uint64 + // Number of in-progress alloc_object() cache ops + CacheopAllocationsinProgress uint64 + // Number of in-progress lookup_object() cache ops + CacheopLookupObjectInProgress uint64 + // Number of in-progress lookup_complete() cache ops + CacheopLookupCompleteInPorgress uint64 + // Number of in-progress grab_object() cache ops + CacheopGrabObjectInProgress uint64 + CacheopInvalidations uint64 + // Number of in-progress update_object() cache ops + CacheopUpdateObjectInProgress uint64 + // Number of in-progress drop_object() cache ops + CacheopDropObjectInProgress uint64 + // Number of in-progress put_object() cache ops + CacheopPutObjectInProgress uint64 + // Number of in-progress attr_changed() cache ops + CacheopAttributeChangeInProgress uint64 + // Number of in-progress sync_cache() cache ops + CacheopSyncCacheInProgress uint64 + // Number of in-progress read_or_alloc_page() cache ops + CacheopReadOrAllocPageInProgress uint64 + // Number of in-progress read_or_alloc_pages() cache ops + CacheopReadOrAllocPagesInProgress uint64 + // Number of in-progress allocate_page() cache ops + CacheopAllocatePageInProgress uint64 + // Number of in-progress allocate_pages() cache ops + CacheopAllocatePagesInProgress uint64 + // Number of in-progress write_page() cache ops + CacheopWritePagesInProgress uint64 + // Number of in-progress uncache_page() cache ops + CacheopUncachePagesInProgress uint64 + // Number of in-progress dissociate_pages() cache ops + CacheopDissociatePagesInProgress uint64 + // Number of object lookups/creations rejected due to lack of space + CacheevLookupsAndCreationsRejectedLackSpace uint64 + // Number of stale objects deleted + CacheevStaleObjectsDeleted uint64 + // Number of objects retired when relinquished + CacheevRetiredWhenReliquished uint64 + // Number of objects culled + CacheevObjectsCulled uint64 +} + +// Fscacheinfo returns information about current fscache statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt +func (fs FS) Fscacheinfo() (Fscacheinfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) + if err != nil { + return Fscacheinfo{}, err + } + + m, err := parseFscacheinfo(bytes.NewReader(b)) + if err != nil { + return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) + } + + return *m, nil +} + +func setFSCacheFields(fields []string, setFields ...*uint64) error { + var err error + if len(fields) < len(setFields) { + return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + } + + for i := range setFields { + *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) + if err != nil { + return err + } + } + return nil +} + +func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { + var m Fscacheinfo + s := bufio.NewScanner(r) + for s.Scan() { + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + } + + switch fields[0] { + case "Cookies:": + err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, + &m.SpecialCookiesAllocated) + if err != nil { + return &m, err + } + case "Objects:": + err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, + &m.ObjectsAvailable, &m.ObjectsDead) + if err != nil { + return &m, err + } + case "ChkAux": + err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, + &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) + if err != nil { + return &m, err + } + case "Pages": + err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) + if err != nil { + return &m, err + } + case "Acquire:": + err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, + &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, + &m.AcquireRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + case "Lookups:": + err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, + &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) + if err != nil { + return &m, err + } + case "Invals": + err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) + if err != nil { + return &m, err + } + case "Updates:": + err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, + &m.UpdateRequestsRunning) + if err != nil { + return &m, err + } + case "Relinqs:": + err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, + &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) + if err != nil { + return &m, err + } + case "AttrChg:": + err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, + &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) + if err != nil { + return &m, err + } + case "Allocs": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, + &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, + &m.AllocationsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Retrvls:": + if strings.Split(fields[1], "=")[0] == "n" { + err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, + &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, + &m.RetrievalsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) + if err != nil { + return &m, err + } + } + case "Stores": + if strings.Split(fields[2], "=")[0] == "n" { + err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, + &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, + &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) + if err != nil { + return &m, err + } + } + case "VmScan": + err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, + &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, + &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) + if err != nil { + return &m, err + } + case "Ops": + if strings.Split(fields[2], "=")[0] == "pend" { + err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) + if err != nil { + return &m, err + } + } + case "CacheOp:": + if strings.Split(fields[1], "=")[0] == "alo" { + err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, + &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) + if err != nil { + return &m, err + } + } else if strings.Split(fields[1], "=")[0] == "inv" { + err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, + &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, + &m.CacheopSyncCacheInProgress) + if err != nil { + return &m, err + } + } else { + err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, + &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, + &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) + if err != nil { + return &m, err + } + } + case "CacheEv:": + err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, + &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) + if err != nil { + return &m, err + } + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod new file mode 100644 index 000000000..ba6681f52 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -0,0 +1,9 @@ +module github.com/prometheus/procfs + +go 1.13 + +require ( + github.com/google/go-cmp v0.5.4 + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c +) diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum new file mode 100644 index 000000000..7ceaf56b7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/go.sum @@ -0,0 +1,8 @@ +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go new file mode 100644 index 000000000..0040753b1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -0,0 +1,55 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + // DefaultProcMountPoint is the common mount point of the proc filesystem. + DefaultProcMountPoint = "/proc" + + // DefaultSysMountPoint is the common mount point of the sys filesystem. + DefaultSysMountPoint = "/sys" + + // DefaultConfigfsMountPoint is the common mount point of the configfs + DefaultConfigfsMountPoint = "/sys/kernel/config" +) + +// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an +// interface to kernel data structures. +type FS string + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %q: %w", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %q is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path appends the given path elements to the filesystem path, adding separators +// as necessary. +func (fs FS) Path(p ...string) string { + return filepath.Join(append([]string{string(fs)}, p...)...) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 1ad21c91a..22cb07a6b 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -13,7 +13,11 @@ package util -import "strconv" +import ( + "io/ioutil" + "strconv" + "strings" +) // ParseUint32s parses a slice of strings into a slice of uint32s. func ParseUint32s(ss []string) ([]uint32, error) { @@ -44,3 +48,50 @@ func ParseUint64s(ss []string) ([]uint64, error) { return us, nil } + +// ParsePInt64s parses a slice of strings into a slice of int64 pointers. +func ParsePInt64s(ss []string) ([]*int64, error) { + us := make([]*int64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// ReadIntFromFile reads a file and attempts to parse a int64 from it. +func ReadIntFromFile(path string) (int64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} + +// ParseBool parses a string into a boolean pointer. +func ParseBool(b string) *bool { + var truth bool + switch b { + case "enabled": + truth = true + case "disabled": + truth = false + default: + return nil + } + return &truth +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go new file mode 100644 index 000000000..8051161b2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io" + "io/ioutil" + "os" +) + +// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. +// This is similar to ioutil.ReadFile but without the call to os.Stat, because +// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). +// Reads a max file size of 512kB. For files larger than this, a scanner +// should be used. +func ReadFileNoStat(filename string) ([]byte, error) { + const maxBufferSize = 1024 * 512 + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + reader := io.LimitReader(f, maxBufferSize) + return ioutil.ReadAll(reader) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go new file mode 100644 index 000000000..c07de0b6c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -0,0 +1,48 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,!appengine + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +// +// Note that this function will not read files larger than 128 bytes. +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + const sysFileBufferSize = 128 + b := make([]byte, sysFileBufferSize) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go new file mode 100644 index 000000000..bd55b4537 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,appengine !linux + +package util + +import ( + "fmt" +) + +// SysReadFile is here implemented as a noop for builds that do not support +// the read syscall. For example Windows, or Linux on Google App Engine. +func SysReadFile(file string) (string, error) { + return "", fmt.Errorf("not supported on this platform") +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go new file mode 100644 index 000000000..fe2355d3c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "strconv" +) + +// TODO(mdlayher): util packages are an anti-pattern and this should be moved +// somewhere else that is more focused in the future. + +// A ValueParser enables parsing a single string into a variety of data types +// in a concise and safe way. The Err method must be invoked after invoking +// any other methods to ensure a value was successfully parsed. +type ValueParser struct { + v string + err error +} + +// NewValueParser creates a ValueParser using the input string. +func NewValueParser(v string) *ValueParser { + return &ValueParser{v: v} +} + +// Int interprets the underlying value as an int and returns that value. +func (vp *ValueParser) Int() int { return int(vp.int64()) } + +// PInt64 interprets the underlying value as an int64 and returns a pointer to +// that value. +func (vp *ValueParser) PInt64() *int64 { + if vp.err != nil { + return nil + } + + v := vp.int64() + return &v +} + +// int64 interprets the underlying value as an int64 and returns that value. +// TODO: export if/when necessary. +func (vp *ValueParser) int64() int64 { + if vp.err != nil { + return 0 + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseInt(vp.v, base, 64) + if err != nil { + vp.err = err + return 0 + } + + return v +} + +// PUInt64 interprets the underlying value as an uint64 and returns a pointer to +// that value. +func (vp *ValueParser) PUInt64() *uint64 { + if vp.err != nil { + return nil + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseUint(vp.v, base, 64) + if err != nil { + vp.err = err + return nil + } + + return &v +} + +// Err returns the last error, if any, encountered by the ValueParser. +func (vp *ValueParser) Err() error { + return vp.err +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index e36d4a3bd..89e447746 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -15,6 +15,7 @@ package procfs import ( "bufio" + "bytes" "encoding/hex" "errors" "fmt" @@ -24,6 +25,8 @@ import ( "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/util" ) // IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. @@ -62,29 +65,18 @@ type IPVSBackendStatus struct { Weight uint64 } -// NewIPVSStats reads the IPVS statistics. -func NewIPVSStats() (IPVSStats, error) { - fs, err := NewFS(DefaultMountPoint) +// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) IPVSStats() (IPVSStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) if err != nil { return IPVSStats{}, err } - return fs.NewIPVSStats() -} - -// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) NewIPVSStats() (IPVSStats, error) { - file, err := os.Open(fs.Path("net/ip_vs_stats")) - if err != nil { - return IPVSStats{}, err - } - defer file.Close() - - return parseIPVSStats(file) + return parseIPVSStats(bytes.NewReader(data)) } // parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(file io.Reader) (IPVSStats, error) { +func parseIPVSStats(r io.Reader) (IPVSStats, error) { var ( statContent []byte statLines []string @@ -92,7 +84,7 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) { stats IPVSStats ) - statContent, err := ioutil.ReadAll(file) + statContent, err := ioutil.ReadAll(r) if err != nil { return IPVSStats{}, err } @@ -131,19 +123,9 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) { return stats, nil } -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. -func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return []IPVSBackendStatus{}, err - } - - return fs.NewIPVSBackendStatus() -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.Path("net/ip_vs")) +// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs")) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go new file mode 100644 index 000000000..da3a941d6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -0,0 +1,62 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "os" + + "github.com/prometheus/procfs/internal/util" +) + +// KernelRandom contains information about to the kernel's random number generator. +type KernelRandom struct { + // EntropyAvaliable gives the available entropy, in bits. + EntropyAvaliable *uint64 + // PoolSize gives the size of the entropy pool, in bits. + PoolSize *uint64 + // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. + URandomMinReseedSeconds *uint64 + // WriteWakeupThreshold the number of bits of entropy below which we wake up processes + // that do a select(2) or poll(2) for write access to /dev/random. + WriteWakeupThreshold *uint64 + // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep + // waiting for entropy from /dev/random. + ReadWakeupThreshold *uint64 +} + +// KernelRandom returns values from /proc/sys/kernel/random. +func (fs FS) KernelRandom() (KernelRandom, error) { + random := KernelRandom{} + + for file, p := range map[string]**uint64{ + "entropy_avail": &random.EntropyAvaliable, + "poolsize": &random.PoolSize, + "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, + "write_wakeup_threshold": &random.WriteWakeupThreshold, + "read_wakeup_threshold": &random.ReadWakeupThreshold, + } { + val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) + if os.IsNotExist(err) { + continue + } + if err != nil { + return random, err + } + *p = &val + } + + return random, nil +} diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go new file mode 100644 index 000000000..0cce190ec --- /dev/null +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// LoadAvg represents an entry in /proc/loadavg +type LoadAvg struct { + Load1 float64 + Load5 float64 + Load15 float64 +} + +// LoadAvg returns loadavg from /proc. +func (fs FS) LoadAvg() (*LoadAvg, error) { + path := fs.proc.Path("loadavg") + + data, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + return parseLoad(data) +} + +// Parse /proc loadavg and return 1m, 5m and 15m. +func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { + loads := make([]float64, 3) + parts := strings.Fields(string(loadavgBytes)) + if len(parts) < 3 { + return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) + } + + var err error + for i, load := range parts[0:3] { + loads[i], err = strconv.ParseFloat(load, 64) + if err != nil { + return nil, fmt.Errorf("could not parse load %q: %w", load, err) + } + } + return &LoadAvg{ + Load1: loads[0], + Load5: loads[1], + Load15: loads[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 9dc19583d..4c4493bfa 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -22,8 +22,9 @@ import ( ) var ( - statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) + componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) ) // MDStat holds info parsed from /proc/mdstat. @@ -34,118 +35,179 @@ type MDStat struct { ActivityState string // Number of active disks. DisksActive int64 - // Total number of disks the device consists of. + // Total number of disks the device requires. DisksTotal int64 + // Number of failed disks. + DisksFailed int64 + // Spare disks in the device. + DisksSpare int64 // Number of blocks the device holds. BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 + // Name of md component devices + Devices []string } -// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. -func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { - mdStatusFilePath := fs.Path("mdstat") - content, err := ioutil.ReadFile(mdStatusFilePath) +// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. More information available here: +// https://raid.wiki.kernel.org/index.php/Mdstat +func (fs FS) MDStat() ([]MDStat, error) { + data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) if err != nil { - return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + return nil, err } + mdstat, err := parseMDStat(data) + if err != nil { + return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) + } + return mdstat, nil +} - mdStates := []MDStat{} - lines := strings.Split(string(content), "\n") - for i, l := range lines { - if l == "" { - continue - } - if l[0] == ' ' { - continue - } - if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { +// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. +func parseMDStat(mdStatData []byte) ([]MDStat, error) { + mdStats := []MDStat{} + lines := strings.Split(string(mdStatData), "\n") + + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || + strings.HasPrefix(line, "Personalities") || + strings.HasPrefix(line, "unused") { continue } - mainLine := strings.Split(l, " ") - if len(mainLine) < 3 { - return mdStates, fmt.Errorf("error parsing mdline: %s", l) + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 { + return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) } - mdName := mainLine[0] - activityState := mainLine[2] + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive if len(lines) <= i+3 { - return mdStates, fmt.Errorf( - "error parsing %s: too few lines for md device %s", - mdStatusFilePath, - mdName, - ) + return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) } - active, total, size, err := evalStatusline(lines[i+1]) + // Failed disks have the suffix (F) & Spare disks have the suffix (S). + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + active, total, size, err := evalStatusLine(lines[i], lines[i+1]) + if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + return nil, fmt.Errorf("error parsing md device lines: %w", err) } - // j is the line number of the syncing-line. - j := i + 2 + syncLineIdx := i + 2 if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - j = i + 3 + syncLineIdx++ } // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. syncedBlocks := size - if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { - syncedBlocks, err = evalBuildline(lines[j]) - if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + checking := strings.Contains(lines[syncLineIdx], "check") + + // Append recovery and resyncing state info. + if recovering || resyncing || checking { + if recovering { + state = "recovering" + } else if checking { + state = "checking" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || + strings.Contains(lines[syncLineIdx], "DELAYED") { + syncedBlocks = 0 + } else { + syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + } } } - mdStates = append(mdStates, MDStat{ + mdStats = append(mdStats, MDStat{ Name: mdName, - ActivityState: activityState, + ActivityState: state, DisksActive: active, + DisksFailed: fail, + DisksSpare: spare, DisksTotal: total, BlocksTotal: size, BlocksSynced: syncedBlocks, + Devices: evalComponentDevices(deviceFields), }) } - return mdStates, nil + return mdStats, nil } -func evalStatusline(statusline string) (active, total, size int64, err error) { - matches := statuslineRE.FindStringSubmatch(statusline) - if len(matches) != 4 { - return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) - } +func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) { - size, err = strconv.ParseInt(matches[1], 10, 64) + sizeStr := strings.Fields(statusLine)[0] + size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total = int64(strings.Count(deviceLine, "[")) + return total, total, size, nil + } + + if strings.Contains(deviceLine, "inactive") { + return 0, 0, size, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLine) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) } return active, total, size, nil } -func evalBuildline(buildline string) (syncedBlocks int64, err error) { - matches := buildlineRE.FindStringSubmatch(buildline) +func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { + matches := recoveryLineRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, fmt.Errorf("unexpected buildline: %s", buildline) + return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) } syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { - return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) } return syncedBlocks, nil } + +func evalComponentDevices(deviceFields []string) []string { + mdComponentDevices := make([]string, 0) + if len(deviceFields) > 3 { + for _, field := range deviceFields[4:] { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue + } + mdComponentDevices = append(mdComponentDevices, match[1]) + } + } + + return mdComponentDevices +} diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go new file mode 100644 index 000000000..f65e174e5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -0,0 +1,277 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Meminfo represents memory statistics. +type Meminfo struct { + // Total usable ram (i.e. physical ram minus a few reserved + // bits and the kernel binary code) + MemTotal *uint64 + // The sum of LowFree+HighFree + MemFree *uint64 + // An estimate of how much memory is available for starting + // new applications, without swapping. Calculated from + // MemFree, SReclaimable, the size of the file LRU lists, and + // the low watermarks in each zone. The estimate takes into + // account that the system needs some page cache to function + // well, and that not all reclaimable slab will be + // reclaimable, due to items being in use. The impact of those + // factors will vary from system to system. + MemAvailable *uint64 + // Relatively temporary storage for raw disk blocks shouldn't + // get tremendously large (20MB or so) + Buffers *uint64 + Cached *uint64 + // Memory that once was swapped out, is swapped back in but + // still also is in the swapfile (if memory is needed it + // doesn't need to be swapped out AGAIN because it is already + // in the swapfile. This saves I/O) + SwapCached *uint64 + // Memory that has been used more recently and usually not + // reclaimed unless absolutely necessary. + Active *uint64 + // Memory which has been less recently used. It is more + // eligible to be reclaimed for other purposes + Inactive *uint64 + ActiveAnon *uint64 + InactiveAnon *uint64 + ActiveFile *uint64 + InactiveFile *uint64 + Unevictable *uint64 + Mlocked *uint64 + // total amount of swap space available + SwapTotal *uint64 + // Memory which has been evicted from RAM, and is temporarily + // on the disk + SwapFree *uint64 + // Memory which is waiting to get written back to the disk + Dirty *uint64 + // Memory which is actively being written back to the disk + Writeback *uint64 + // Non-file backed pages mapped into userspace page tables + AnonPages *uint64 + // files which have been mapped, such as libraries + Mapped *uint64 + Shmem *uint64 + // in-kernel data structures cache + Slab *uint64 + // Part of Slab, that might be reclaimed, such as caches + SReclaimable *uint64 + // Part of Slab, that cannot be reclaimed on memory pressure + SUnreclaim *uint64 + KernelStack *uint64 + // amount of memory dedicated to the lowest level of page + // tables. + PageTables *uint64 + // NFS pages sent to the server, but not yet committed to + // stable storage + NFSUnstable *uint64 + // Memory used for block device "bounce buffers" + Bounce *uint64 + // Memory used by FUSE for temporary writeback buffers + WritebackTmp *uint64 + // Based on the overcommit ratio ('vm.overcommit_ratio'), + // this is the total amount of memory currently available to + // be allocated on the system. This limit is only adhered to + // if strict overcommit accounting is enabled (mode 2 in + // 'vm.overcommit_memory'). + // The CommitLimit is calculated with the following formula: + // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * + // overcommit_ratio / 100 + [total swap pages] + // For example, on a system with 1G of physical RAM and 7G + // of swap with a `vm.overcommit_ratio` of 30 it would + // yield a CommitLimit of 7.3G. + // For more details, see the memory overcommit documentation + // in vm/overcommit-accounting. + CommitLimit *uint64 + // The amount of memory presently allocated on the system. + // The committed memory is a sum of all of the memory which + // has been allocated by processes, even if it has not been + // "used" by them as of yet. A process which malloc()'s 1G + // of memory, but only touches 300M of it will show up as + // using 1G. This 1G is memory which has been "committed" to + // by the VM and can be used at any time by the allocating + // application. With strict overcommit enabled on the system + // (mode 2 in 'vm.overcommit_memory'),allocations which would + // exceed the CommitLimit (detailed above) will not be permitted. + // This is useful if one needs to guarantee that processes will + // not fail due to lack of memory once that memory has been + // successfully allocated. + CommittedAS *uint64 + // total size of vmalloc memory area + VmallocTotal *uint64 + // amount of vmalloc area which is used + VmallocUsed *uint64 + // largest contiguous block of vmalloc area which is free + VmallocChunk *uint64 + HardwareCorrupted *uint64 + AnonHugePages *uint64 + ShmemHugePages *uint64 + ShmemPmdMapped *uint64 + CmaTotal *uint64 + CmaFree *uint64 + HugePagesTotal *uint64 + HugePagesFree *uint64 + HugePagesRsvd *uint64 + HugePagesSurp *uint64 + Hugepagesize *uint64 + DirectMap4k *uint64 + DirectMap2M *uint64 + DirectMap1G *uint64 +} + +// Meminfo returns an information about current kernel/system memory statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Meminfo() (Meminfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) + if err != nil { + return Meminfo{}, err + } + + m, err := parseMemInfo(bytes.NewReader(b)) + if err != nil { + return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) + } + + return *m, nil +} + +func parseMemInfo(r io.Reader) (*Meminfo, error) { + var m Meminfo + s := bufio.NewScanner(r) + for s.Scan() { + // Each line has at least a name and value; we ignore the unit. + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + } + + v, err := strconv.ParseUint(fields[1], 0, 64) + if err != nil { + return nil, err + } + + switch fields[0] { + case "MemTotal:": + m.MemTotal = &v + case "MemFree:": + m.MemFree = &v + case "MemAvailable:": + m.MemAvailable = &v + case "Buffers:": + m.Buffers = &v + case "Cached:": + m.Cached = &v + case "SwapCached:": + m.SwapCached = &v + case "Active:": + m.Active = &v + case "Inactive:": + m.Inactive = &v + case "Active(anon):": + m.ActiveAnon = &v + case "Inactive(anon):": + m.InactiveAnon = &v + case "Active(file):": + m.ActiveFile = &v + case "Inactive(file):": + m.InactiveFile = &v + case "Unevictable:": + m.Unevictable = &v + case "Mlocked:": + m.Mlocked = &v + case "SwapTotal:": + m.SwapTotal = &v + case "SwapFree:": + m.SwapFree = &v + case "Dirty:": + m.Dirty = &v + case "Writeback:": + m.Writeback = &v + case "AnonPages:": + m.AnonPages = &v + case "Mapped:": + m.Mapped = &v + case "Shmem:": + m.Shmem = &v + case "Slab:": + m.Slab = &v + case "SReclaimable:": + m.SReclaimable = &v + case "SUnreclaim:": + m.SUnreclaim = &v + case "KernelStack:": + m.KernelStack = &v + case "PageTables:": + m.PageTables = &v + case "NFS_Unstable:": + m.NFSUnstable = &v + case "Bounce:": + m.Bounce = &v + case "WritebackTmp:": + m.WritebackTmp = &v + case "CommitLimit:": + m.CommitLimit = &v + case "Committed_AS:": + m.CommittedAS = &v + case "VmallocTotal:": + m.VmallocTotal = &v + case "VmallocUsed:": + m.VmallocUsed = &v + case "VmallocChunk:": + m.VmallocChunk = &v + case "HardwareCorrupted:": + m.HardwareCorrupted = &v + case "AnonHugePages:": + m.AnonHugePages = &v + case "ShmemHugePages:": + m.ShmemHugePages = &v + case "ShmemPmdMapped:": + m.ShmemPmdMapped = &v + case "CmaTotal:": + m.CmaTotal = &v + case "CmaFree:": + m.CmaFree = &v + case "HugePages_Total:": + m.HugePagesTotal = &v + case "HugePages_Free:": + m.HugePagesFree = &v + case "HugePages_Rsvd:": + m.HugePagesRsvd = &v + case "HugePages_Surp:": + m.HugePagesSurp = &v + case "Hugepagesize:": + m.Hugepagesize = &v + case "DirectMap4k:": + m.DirectMap4k = &v + case "DirectMap2M:": + m.DirectMap2M = &v + case "DirectMap1G:": + m.DirectMap1G = &v + } + } + + return &m, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go new file mode 100644 index 000000000..59f4d5055 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -0,0 +1,180 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A MountInfo is a type that describes the details, options +// for each mount, parsed from /proc/self/mountinfo. +// The fields described in each entry of /proc/self/mountinfo +// is described in the following man page. +// http://man7.org/linux/man-pages/man5/proc.5.html +type MountInfo struct { + // Unique ID for the mount + MountID int + // The ID of the parent mount + ParentID int + // The value of `st_dev` for the files on this FS + MajorMinorVer string + // The pathname of the directory in the FS that forms + // the root for this mount + Root string + // The pathname of the mount point relative to the root + MountPoint string + // Mount options + Options map[string]string + // Zero or more optional fields + OptionalFields map[string]string + // The Filesystem type + FSType string + // FS specific information or "none" + Source string + // Superblock options + SuperOptions map[string]string +} + +// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. +func parseMountInfo(info []byte) ([]*MountInfo, error) { + mounts := []*MountInfo{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseMountInfoString(mountString) + if err != nil { + return nil, err + } + mounts = append(mounts, parsedMounts) + } + + err := scanner.Err() + return mounts, err +} + +// Parses a mountinfo file line, and converts it to a MountInfo struct. +// An important check here is to see if the hyphen separator, as if it does not exist, +// it means that the line is malformed. +func parseMountInfoString(mountString string) (*MountInfo, error) { + var err error + + mountInfo := strings.Split(mountString, " ") + mountInfoLength := len(mountInfo) + if mountInfoLength < 10 { + return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + } + + if mountInfo[mountInfoLength-4] != "-" { + return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + } + + mount := &MountInfo{ + MajorMinorVer: mountInfo[2], + Root: mountInfo[3], + MountPoint: mountInfo[4], + Options: mountOptionsParser(mountInfo[5]), + OptionalFields: nil, + FSType: mountInfo[mountInfoLength-3], + Source: mountInfo[mountInfoLength-2], + SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), + } + + mount.MountID, err = strconv.Atoi(mountInfo[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse mount ID") + } + mount.ParentID, err = strconv.Atoi(mountInfo[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse parent ID") + } + // Has optional fields, which is a space separated list of values. + // Example: shared:2 master:7 + if mountInfo[6] != "" { + mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) + if err != nil { + return nil, err + } + } + return mount, nil +} + +// mountOptionsIsValidField checks a string against a valid list of optional fields keys. +func mountOptionsIsValidField(s string) bool { + switch s { + case + "shared", + "master", + "propagate_from", + "unbindable": + return true + } + return false +} + +// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. +func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { + optionalFields := make(map[string]string) + for _, field := range o { + optionSplit := strings.SplitN(field, ":", 2) + value := "" + if len(optionSplit) == 2 { + value = optionSplit[1] + } + if mountOptionsIsValidField(optionSplit[0]) { + optionalFields[optionSplit[0]] = value + } + } + return optionalFields, nil +} + +// mountOptionsParser parses the mount options, superblock options. +func mountOptionsParser(mountOptions string) map[string]string { + opts := make(map[string]string) + options := strings.Split(mountOptions, ",") + for _, opt := range options { + splitOption := strings.Split(opt, "=") + if len(splitOption) < 2 { + key := splitOption[0] + opts[key] = "" + } else { + key, value := splitOption[0], splitOption[1] + opts[key] = value + } + } + return opts +} + +// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. +func GetMounts() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat("/proc/self/mountinfo") + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. +func GetProcMounts(pid int) ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 7a8a1e099..f7a828bb1 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -69,6 +69,8 @@ type MountStats interface { type MountStatsNFS struct { // The version of statistics provided. StatVersion string + // The mount options of the NFS mount. + Opts map[string]string // The age of the NFS mount. Age time.Duration // Statistics related to byte counters for various operations. @@ -179,11 +181,13 @@ type NFSOperationStats struct { // Number of bytes received for this operation, including RPC headers and payload. BytesReceived uint64 // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueTime time.Duration + CumulativeQueueMilliseconds uint64 // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseTime time.Duration + CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestTime time.Duration + CumulativeTotalRequestMilliseconds uint64 + // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. + Errors uint64 } // A NFSTransportStats contains statistics for the NFS mount RPC requests and @@ -202,7 +206,7 @@ type NFSTransportStats struct { // spent waiting for connections to the server to be established. ConnectIdleTime uint64 // Duration since the NFS mount last saw any RPC traffic. - IdleTime time.Duration + IdleTimeSeconds uint64 // Number of RPC requests for this mount sent to the NFS server. Sends uint64 // Number of RPC responses for this mount received from the NFS server. @@ -317,6 +321,7 @@ func parseMount(ss []string) (*Mount, error) { func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { // Field indicators for parsing specific types of data const ( + fieldOpts = "opts:" fieldAge = "age:" fieldBytes = "bytes:" fieldEvents = "events:" @@ -333,12 +338,27 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e if len(ss) == 0 { break } - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } switch ss[0] { + case fieldOpts: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + if stats.Opts == nil { + stats.Opts = map[string]string{} + } + for _, opt := range strings.Split(ss[1], ",") { + split := strings.Split(opt, "=") + if len(split) == 2 { + stats.Opts[split[0]] = split[1] + } else { + stats.Opts[opt] = "" + } + } case fieldAge: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") if err != nil { @@ -347,6 +367,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Age = d case fieldBytes: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } bstats, err := parseNFSBytesStats(ss[1:]) if err != nil { return nil, err @@ -354,6 +377,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Bytes = *bstats case fieldEvents: + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } estats, err := parseNFSEventsStats(ss[1:]) if err != nil { return nil, err @@ -479,8 +505,8 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { // line is reached. func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { const ( - // Number of expected fields in each per-operation statistics set - numFields = 9 + // Minimum number of expected fields in each per-operation statistics set + minFields = 9 ) var ops []NFSOperationStats @@ -493,12 +519,12 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { break } - if len(ss) != numFields { + if len(ss) < minFields { return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) } // Skip string operation name for integers - ns := make([]uint64, 0, numFields-1) + ns := make([]uint64, 0, minFields-1) for _, st := range ss[1:] { n, err := strconv.ParseUint(st, 10, 64) if err != nil { @@ -508,17 +534,23 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { ns = append(ns, n) } - ops = append(ops, NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, - CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, - CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, - }) + opStats := NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueMilliseconds: ns[5], + CumulativeTotalResponseMilliseconds: ns[6], + CumulativeTotalRequestMilliseconds: ns[7], + } + + if len(ns) > 8 { + opStats.Errors = ns[8] + } + + ops = append(ops, opStats) } return ops, s.Err() @@ -593,7 +625,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats Bind: ns[1], Connect: ns[2], ConnectIdleTime: ns[3], - IdleTime: time.Duration(ns[4]) * time.Second, + IdleTimeSeconds: ns[4], Sends: ns[5], Receives: ns[6], BadTransactionIDs: ns[7], diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go new file mode 100644 index 000000000..9964a3600 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -0,0 +1,153 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A ConntrackStatEntry represents one line from net/stat/nf_conntrack +// and contains netfilter conntrack statistics at one CPU core +type ConntrackStatEntry struct { + Entries uint64 + Found uint64 + Invalid uint64 + Ignore uint64 + Insert uint64 + InsertFailed uint64 + Drop uint64 + EarlyDrop uint64 + SearchRestart uint64 +} + +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores +func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { + return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) +} + +// Parses a slice of ConntrackStatEntries from the given filepath +func readConntrackStat(path string) ([]ConntrackStatEntry, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(path) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseConntrackStat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) + } + + return stat, nil +} + +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries +func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { + var entries []ConntrackStatEntry + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + conntrackEntry, err := parseConntrackStatEntry(fields) + if err != nil { + return nil, err + } + entries = append(entries, *conntrackEntry) + } + + return entries, nil +} + +// Parses a ConntrackStatEntry from given array of fields +func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { + if len(fields) != 17 { + return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") + } + entry := &ConntrackStatEntry{} + + entries, err := parseConntrackStatField(fields[0]) + if err != nil { + return nil, err + } + entry.Entries = entries + + found, err := parseConntrackStatField(fields[2]) + if err != nil { + return nil, err + } + entry.Found = found + + invalid, err := parseConntrackStatField(fields[4]) + if err != nil { + return nil, err + } + entry.Invalid = invalid + + ignore, err := parseConntrackStatField(fields[5]) + if err != nil { + return nil, err + } + entry.Ignore = ignore + + insert, err := parseConntrackStatField(fields[8]) + if err != nil { + return nil, err + } + entry.Insert = insert + + insertFailed, err := parseConntrackStatField(fields[9]) + if err != nil { + return nil, err + } + entry.InsertFailed = insertFailed + + drop, err := parseConntrackStatField(fields[10]) + if err != nil { + return nil, err + } + entry.Drop = drop + + earlyDrop, err := parseConntrackStatField(fields[11]) + if err != nil { + return nil, err + } + entry.EarlyDrop = earlyDrop + + searchRestart, err := parseConntrackStatField(fields[16]) + if err != nil { + return nil, err + } + entry.SearchRestart = searchRestart + + return entry, nil +} + +// Parses a uint64 from given hex in string +func parseConntrackStatField(field string) (uint64, error) { + val, err := strconv.ParseUint(field, 16, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) + } + return val, err +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index 3f2523371..47a710bef 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -47,23 +47,13 @@ type NetDevLine struct { // are interface names. type NetDev map[string]NetDevLine -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func NewNetDev() (NetDev, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewNetDev() +// NetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NetDev() (NetDev, error) { + return newNetDev(fs.proc.Path("net/dev")) } -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NewNetDev() (NetDev, error) { - return newNetDev(fs.Path("net/dev")) -} - -// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NewNetDev() (NetDev, error) { +// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NetDev() (NetDev, error) { return newNetDev(p.path("net/dev")) } @@ -75,7 +65,7 @@ func newNetDev(file string) (NetDev, error) { } defer f.Close() - nd := NetDev{} + netDev := NetDev{} s := bufio.NewScanner(f) for n := 0; s.Scan(); n++ { // Skip the 2 header lines. @@ -83,20 +73,20 @@ func newNetDev(file string) (NetDev, error) { continue } - line, err := nd.parseLine(s.Text()) + line, err := netDev.parseLine(s.Text()) if err != nil { - return nd, err + return netDev, err } - nd[line.Name] = *line + netDev[line.Name] = *line } - return nd, s.Err() + return netDev, s.Err() } // parseLine parses a single line from the /proc/net/dev file. Header lines // must be filtered prior to calling this method. -func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { +func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { parts := strings.SplitN(rawLine, ":", 2) if len(parts) != 2 { return nil, errors.New("invalid net/dev line, missing colon") @@ -185,15 +175,14 @@ func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { // Total aggregates the values across interfaces and returns a new NetDevLine. // The Name field will be a sorted comma separated list of interface names. -func (nd NetDev) Total() NetDevLine { +func (netDev NetDev) Total() NetDevLine { total := NetDevLine{} - names := make([]string, 0, len(nd)) - for _, ifc := range nd { + names := make([]string, 0, len(netDev)) + for _, ifc := range netDev { names = append(names, ifc.Name) total.RxBytes += ifc.RxBytes total.RxPackets += ifc.RxPackets - total.RxPackets += ifc.RxPackets total.RxErrors += ifc.RxErrors total.RxDropped += ifc.RxDropped total.RxFIFO += ifc.RxFIFO diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go new file mode 100644 index 000000000..ac01dd847 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -0,0 +1,220 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" +) + +const ( + // readLimit is used by io.LimitReader while reading the content of the + // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic + // as each line represents a single used socket. + // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. + // With e.g. 150 Byte per line and the maximum number of 65535, + // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. + readLimit = 4294967296 // Byte -> 4 GiB +) + +// this contains generic data structures for both udp and tcp sockets +type ( + // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. + NetIPSocket []*netIPSocketLine + + // NetIPSocketSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetIPSocket it does not collect + // the parsed lines into a slice. + NetIPSocketSummary struct { + // TxQueueLength shows the total queue length of all parsed tx_queue lengths. + TxQueueLength uint64 + // RxQueueLength shows the total queue length of all parsed rx_queue lengths. + RxQueueLength uint64 + // UsedSockets shows the total number of parsed lines representing the + // number of used sockets. + UsedSockets uint64 + } + + // netIPSocketLine represents the fields parsed from a single line + // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // For the proc file format details, see https://linux.die.net/man/5/proc. + netIPSocketLine struct { + Sl uint64 + LocalAddr net.IP + LocalPort uint64 + RemAddr net.IP + RemPort uint64 + St uint64 + TxQueue uint64 + RxQueue uint64 + UID uint64 + } +) + +func newNetIPSocket(file string) (NetIPSocket, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var netIPSocket NetIPSocket + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetIPSocketLine(fields) + if err != nil { + return nil, err + } + netIPSocket = append(netIPSocket, line) + } + if err := s.Err(); err != nil { + return nil, err + } + return netIPSocket, nil +} + +// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file. +func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var netIPSocketSummary NetIPSocketSummary + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetIPSocketLine(fields) + if err != nil { + return nil, err + } + netIPSocketSummary.TxQueueLength += line.TxQueue + netIPSocketSummary.RxQueueLength += line.RxQueue + netIPSocketSummary.UsedSockets++ + } + if err := s.Err(); err != nil { + return nil, err + } + return &netIPSocketSummary, nil +} + +// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order. + +func parseIP(hexIP string) (net.IP, error) { + var byteIP []byte + byteIP, err := hex.DecodeString(hexIP) + if err != nil { + return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) + } + switch len(byteIP) { + case 4: + return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil + case 16: + i := net.IP{ + byteIP[3], byteIP[2], byteIP[1], byteIP[0], + byteIP[7], byteIP[6], byteIP[5], byteIP[4], + byteIP[11], byteIP[10], byteIP[9], byteIP[8], + byteIP[15], byteIP[14], byteIP[13], byteIP[12], + } + return i, nil + default: + return nil, fmt.Errorf("Unable to parse IP %s", hexIP) + } +} + +// parseNetIPSocketLine parses a single line, represented by a list of fields. +func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { + line := &netIPSocketLine{} + if len(fields) < 8 { + return nil, fmt.Errorf( + "cannot parse net socket line as it has less then 8 columns %q", + strings.Join(fields, " "), + ) + } + var err error // parse error + + // sl + s := strings.Split(fields[0], ":") + if len(s) != 2 { + return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) + } + + if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) + } + // local_address + l := strings.Split(fields[1], ":") + if len(l) != 2 { + return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) + } + if line.LocalAddr, err = parseIP(l[0]); err != nil { + return nil, err + } + if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) + } + + // remote_address + r := strings.Split(fields[2], ":") + if len(r) != 2 { + return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) + } + if line.RemAddr, err = parseIP(r[0]); err != nil { + return nil, err + } + if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) + } + + // st + if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) + } + + // tx_queue and rx_queue + q := strings.Split(fields[4], ":") + if len(q) != 2 { + return nil, fmt.Errorf( + "cannot parse tx/rx queues in socket line as it has a missing colon %q", + fields[4], + ) + } + if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) + } + if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) + } + + // uid + if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) + } + + return line, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go new file mode 100644 index 000000000..8c6de3791 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -0,0 +1,180 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// NetProtocolStats stores the contents from /proc/net/protocols +type NetProtocolStats map[string]NetProtocolStatLine + +// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We +// only care about the first six columns as the rest are not likely to change +// and only serve to provide a set of capabilities for each protocol. +type NetProtocolStatLine struct { + Name string // 0 The name of the protocol + Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock) + Sockets int64 // 2 Number of sockets in use by this protocol + Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol + Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure. + MaxHeader uint64 // 5 Protocol specific max header size + Slab bool // 6 Indicates whether or not memory is allocated from the SLAB + ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module + Capabilities NetProtocolCapabilities +} + +// NetProtocolCapabilities contains a list of capabilities for each protocol +type NetProtocolCapabilities struct { + Close bool // 8 + Connect bool // 9 + Disconnect bool // 10 + Accept bool // 11 + IoCtl bool // 12 + Init bool // 13 + Destroy bool // 14 + Shutdown bool // 15 + SetSockOpt bool // 16 + GetSockOpt bool // 17 + SendMsg bool // 18 + RecvMsg bool // 19 + SendPage bool // 20 + Bind bool // 21 + BacklogRcv bool // 22 + Hash bool // 23 + UnHash bool // 24 + GetPort bool // 25 + EnterMemoryPressure bool // 26 +} + +// NetProtocols reads stats from /proc/net/protocols and returns a map of +// PortocolStatLine entries. As of this writing no official Linux Documentation +// exists, however the source is fairly self-explanatory and the format seems +// stable since its introduction in 2.6.12-rc2 +// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452 +// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586 +func (fs FS) NetProtocols() (NetProtocolStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols")) + if err != nil { + return NetProtocolStats{}, err + } + return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data))) +} + +func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) { + nps := NetProtocolStats{} + + // Skip the header line + s.Scan() + + for s.Scan() { + line, err := nps.parseLine(s.Text()) + if err != nil { + return NetProtocolStats{}, err + } + + nps[line.Name] = *line + } + return nps, nil +} + +func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) { + line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}} + var err error + const enabled = "yes" + const disabled = "no" + + fields := strings.Fields(rawLine) + line.Name = fields[0] + line.Size, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.Sockets, err = strconv.ParseInt(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.Memory, err = strconv.ParseInt(fields[3], 10, 64) + if err != nil { + return nil, err + } + if fields[4] == enabled { + line.Pressure = 1 + } else if fields[4] == disabled { + line.Pressure = 0 + } else { + line.Pressure = -1 + } + line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + if fields[6] == enabled { + line.Slab = true + } else if fields[6] == disabled { + line.Slab = false + } else { + return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) + } + line.ModuleName = fields[7] + + err = line.Capabilities.parseCapabilities(fields[8:]) + if err != nil { + return nil, err + } + + return line, nil +} + +func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error { + // The capabilities are all bools so we can loop over to map them + capabilityFields := [...]*bool{ + &pc.Close, + &pc.Connect, + &pc.Disconnect, + &pc.Accept, + &pc.IoCtl, + &pc.Init, + &pc.Destroy, + &pc.Shutdown, + &pc.SetSockOpt, + &pc.GetSockOpt, + &pc.SendMsg, + &pc.RecvMsg, + &pc.SendPage, + &pc.Bind, + &pc.BacklogRcv, + &pc.Hash, + &pc.UnHash, + &pc.GetPort, + &pc.EnterMemoryPressure, + } + + for i := 0; i < len(capabilities); i++ { + if capabilities[i] == "y" { + *capabilityFields[i] = true + } else if capabilities[i] == "n" { + *capabilityFields[i] = false + } else { + return fmt.Errorf("unable to parse capability block for protocol: position %d", i) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go new file mode 100644 index 000000000..e36f4872d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -0,0 +1,163 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, +// respectively. +type NetSockstat struct { + // Used is non-nil for IPv4 sockstat results, but nil for IPv6. + Used *int + Protocols []NetSockstatProtocol +} + +// A NetSockstatProtocol contains statistics about a given socket protocol. +// Pointer fields indicate that the value may or may not be present on any +// given protocol. +type NetSockstatProtocol struct { + Protocol string + InUse int + Orphan *int + TW *int + Alloc *int + Mem *int + Memory *int +} + +// NetSockstat retrieves IPv4 socket statistics. +func (fs FS) NetSockstat() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat")) +} + +// NetSockstat6 retrieves IPv6 socket statistics. +// +// If IPv6 is disabled on this kernel, the returned error can be checked with +// os.IsNotExist. +func (fs FS) NetSockstat6() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat6")) +} + +// readSockstat opens and parses a NetSockstat from the input file. +func readSockstat(name string) (*NetSockstat, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(name) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseSockstat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) + } + + return stat, nil +} + +// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. +func parseSockstat(r io.Reader) (*NetSockstat, error) { + var stat NetSockstat + s := bufio.NewScanner(r) + for s.Scan() { + // Expect a minimum of a protocol and one key/value pair. + fields := strings.Split(s.Text(), " ") + if len(fields) < 3 { + return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + } + + // The remaining fields are key/value pairs. + kvs, err := parseSockstatKVs(fields[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) + } + + // The first field is the protocol. We must trim its colon suffix. + proto := strings.TrimSuffix(fields[0], ":") + switch proto { + case "sockets": + // Special case: IPv4 has a sockets "used" key/value pair that we + // embed at the top level of the structure. + used := kvs["used"] + stat.Used = &used + default: + // Parse all other lines as individual protocols. + nsp := parseSockstatProtocol(kvs) + nsp.Protocol = proto + stat.Protocols = append(stat.Protocols, nsp) + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return &stat, nil +} + +// parseSockstatKVs parses a string slice into a map of key/value pairs. +func parseSockstatKVs(kvs []string) (map[string]int, error) { + if len(kvs)%2 != 0 { + return nil, errors.New("odd number of fields in key/value pairs") + } + + // Iterate two values at a time to gather key/value pairs. + out := make(map[string]int, len(kvs)/2) + for i := 0; i < len(kvs); i += 2 { + vp := util.NewValueParser(kvs[i+1]) + out[kvs[i]] = vp.Int() + + if err := vp.Err(); err != nil { + return nil, err + } + } + + return out, nil +} + +// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. +func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { + var nsp NetSockstatProtocol + for k, v := range kvs { + // Capture the range variable to ensure we get unique pointers for + // each of the optional fields. + v := v + switch k { + case "inuse": + nsp.InUse = v + case "orphan": + nsp.Orphan = &v + case "tw": + nsp.TW = &v + case "alloc": + nsp.Alloc = &v + case "mem": + nsp.Mem = &v + case "memory": + nsp.Memory = &v + } + } + + return nsp +} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go new file mode 100644 index 000000000..46f12c61d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -0,0 +1,102 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// For the proc file format details, +// See: +// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 +// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 +// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. + +// SoftnetStat contains a single row of data from /proc/net/softnet_stat +type SoftnetStat struct { + // Number of processed packets + Processed uint32 + // Number of dropped packets + Dropped uint32 + // Number of times processing packets ran out of quota + TimeSqueezed uint32 +} + +var softNetProcFile = "net/softnet_stat" + +// NetSoftnetStat reads data from /proc/net/softnet_stat. +func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { + b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) + if err != nil { + return nil, err + } + + entries, err := parseSoftnet(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) + } + + return entries, nil +} + +func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { + const minColumns = 9 + + s := bufio.NewScanner(r) + + var stats []SoftnetStat + for s.Scan() { + columns := strings.Fields(s.Text()) + width := len(columns) + + if width < minColumns { + return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + } + + // We only parse the first three columns at the moment. + us, err := parseHexUint32s(columns[0:3]) + if err != nil { + return nil, err + } + + stats = append(stats, SoftnetStat{ + Processed: us[0], + Dropped: us[1], + TimeSqueezed: us[2], + }) + } + + return stats, nil +} + +func parseHexUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go new file mode 100644 index 000000000..527762955 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -0,0 +1,64 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +type ( + // NetTCP represents the contents of /proc/net/tcp{,6} file without the header. + NetTCP []*netIPSocketLine + + // NetTCPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetTCP it does not collect + // the parsed lines into a slice. + NetTCPSummary NetIPSocketSummary +) + +// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams +// read from /proc/net/tcp. +func (fs FS) NetTCP() (NetTCP, error) { + return newNetTCP(fs.proc.Path("net/tcp")) +} + +// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams +// read from /proc/net/tcp6. +func (fs FS) NetTCP6() (NetTCP, error) { + return newNetTCP(fs.proc.Path("net/tcp6")) +} + +// NetTCPSummary returns already computed statistics like the total queue lengths +// for TCP datagrams read from /proc/net/tcp. +func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { + return newNetTCPSummary(fs.proc.Path("net/tcp")) +} + +// NetTCP6Summary returns already computed statistics like the total queue lengths +// for TCP datagrams read from /proc/net/tcp6. +func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { + return newNetTCPSummary(fs.proc.Path("net/tcp6")) +} + +// newNetTCP creates a new NetTCP{,6} from the contents of the given file. +func newNetTCP(file string) (NetTCP, error) { + n, err := newNetIPSocket(file) + n1 := NetTCP(n) + return n1, err +} + +func newNetTCPSummary(file string) (*NetTCPSummary, error) { + n, err := newNetIPSocketSummary(file) + if n == nil { + return nil, err + } + n1 := NetTCPSummary(*n) + return &n1, err +} diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go new file mode 100644 index 000000000..9ac3daf2d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_udp.go @@ -0,0 +1,64 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +type ( + // NetUDP represents the contents of /proc/net/udp{,6} file without the header. + NetUDP []*netIPSocketLine + + // NetUDPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetUDP it does not collect + // the parsed lines into a slice. + NetUDPSummary NetIPSocketSummary +) + +// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp. +func (fs FS) NetUDP() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp")) +} + +// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp6. +func (fs FS) NetUDP6() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp6")) +} + +// NetUDPSummary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp. +func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp")) +} + +// NetUDP6Summary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp6. +func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp6")) +} + +// newNetUDP creates a new NetUDP{,6} from the contents of the given file. +func newNetUDP(file string) (NetUDP, error) { + n, err := newNetIPSocket(file) + n1 := NetUDP(n) + return n1, err +} + +func newNetUDPSummary(file string) (*NetUDPSummary, error) { + n, err := newNetIPSocketSummary(file) + if n == nil { + return nil, err + } + n1 := NetUDPSummary(*n) + return &n1, err +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go new file mode 100644 index 000000000..98aa8e1c3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -0,0 +1,257 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 +// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. + +// Constants for the various /proc/net/unix enumerations. +// TODO: match against x/sys/unix or similar? +const ( + netUnixTypeStream = 1 + netUnixTypeDgram = 2 + netUnixTypeSeqpacket = 5 + + netUnixFlagDefault = 0 + netUnixFlagListen = 1 << 16 + + netUnixStateUnconnected = 1 + netUnixStateConnecting = 2 + netUnixStateConnected = 3 + netUnixStateDisconnected = 4 +) + +// NetUNIXType is the type of the type field. +type NetUNIXType uint64 + +// NetUNIXFlags is the type of the flags field. +type NetUNIXFlags uint64 + +// NetUNIXState is the type of the state field. +type NetUNIXState uint64 + +// NetUNIXLine represents a line of /proc/net/unix. +type NetUNIXLine struct { + KernelPtr string + RefCount uint64 + Protocol uint64 + Flags NetUNIXFlags + Type NetUNIXType + State NetUNIXState + Inode uint64 + Path string +} + +// NetUNIX holds the data read from /proc/net/unix. +type NetUNIX struct { + Rows []*NetUNIXLine +} + +// NetUNIX returns data read from /proc/net/unix. +func (fs FS) NetUNIX() (*NetUNIX, error) { + return readNetUNIX(fs.proc.Path("net/unix")) +} + +// readNetUNIX reads data in /proc/net/unix format from the specified file. +func readNetUNIX(file string) (*NetUNIX, error) { + // This file could be quite large and a streaming read is desirable versus + // reading the entire contents at once. + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + return parseNetUNIX(f) +} + +// parseNetUNIX creates a NetUnix structure from the incoming stream. +func parseNetUNIX(r io.Reader) (*NetUNIX, error) { + // Begin scanning by checking for the existence of Inode. + s := bufio.NewScanner(r) + s.Scan() + + // From the man page of proc(5), it does not contain an Inode field, + // but in actually it exists. This code works for both cases. + hasInode := strings.Contains(s.Text(), "Inode") + + // Expect a minimum number of fields, but Inode and Path are optional: + // Num RefCount Protocol Flags Type St Inode Path + minFields := 6 + if hasInode { + minFields++ + } + + var nu NetUNIX + for s.Scan() { + line := s.Text() + item, err := nu.parseLine(line, hasInode, minFields) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) + } + + nu.Rows = append(nu.Rows, item) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) + } + + return &nu, nil +} + +func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { + fields := strings.Fields(line) + + l := len(fields) + if l < min { + return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + } + + // Field offsets are as follows: + // Num RefCount Protocol Flags Type St Inode Path + + kernelPtr := strings.TrimSuffix(fields[0], ":") + + users, err := u.parseUsers(fields[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) + } + + flags, err := u.parseFlags(fields[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) + } + + typ, err := u.parseType(fields[4]) + if err != nil { + return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) + } + + state, err := u.parseState(fields[5]) + if err != nil { + return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) + } + + var inode uint64 + if hasInode { + inode, err = u.parseInode(fields[6]) + if err != nil { + return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) + } + } + + n := &NetUNIXLine{ + KernelPtr: kernelPtr, + RefCount: users, + Type: typ, + Flags: flags, + State: state, + Inode: inode, + } + + // Path field is optional. + if l > min { + // Path occurs at either index 6 or 7 depending on whether inode is + // already present. + pathIdx := 7 + if !hasInode { + pathIdx-- + } + + n.Path = fields[pathIdx] + } + + return n, nil +} + +func (u NetUNIX) parseUsers(s string) (uint64, error) { + return strconv.ParseUint(s, 16, 32) +} + +func (u NetUNIX) parseType(s string) (NetUNIXType, error) { + typ, err := strconv.ParseUint(s, 16, 16) + if err != nil { + return 0, err + } + + return NetUNIXType(typ), nil +} + +func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { + flags, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return 0, err + } + + return NetUNIXFlags(flags), nil +} + +func (u NetUNIX) parseState(s string) (NetUNIXState, error) { + st, err := strconv.ParseInt(s, 16, 8) + if err != nil { + return 0, err + } + + return NetUNIXState(st), nil +} + +func (u NetUNIX) parseInode(s string) (uint64, error) { + return strconv.ParseUint(s, 10, 64) +} + +func (t NetUNIXType) String() string { + switch t { + case netUnixTypeStream: + return "stream" + case netUnixTypeDgram: + return "dgram" + case netUnixTypeSeqpacket: + return "seqpacket" + } + return "unknown" +} + +func (f NetUNIXFlags) String() string { + switch f { + case netUnixFlagListen: + return "listen" + default: + return "default" + } +} + +func (s NetUNIXState) String() string { + switch s { + case netUnixStateUnconnected: + return "unconnected" + case netUnixStateConnecting: + return "connecting" + case netUnixStateConnected: + return "connected" + case netUnixStateDisconnected: + return "disconnected" + } + return "unknown" +} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go deleted file mode 100644 index 651bf6819..000000000 --- a/vendor/github.com/prometheus/procfs/nfs/nfs.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package nfs implements parsing of /proc/net/rpc/nfsd. -// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ -package nfs - -// ReplyCache models the "rc" line. -type ReplyCache struct { - Hits uint64 - Misses uint64 - NoCache uint64 -} - -// FileHandles models the "fh" line. -type FileHandles struct { - Stale uint64 - TotalLookups uint64 - AnonLookups uint64 - DirNoCache uint64 - NoDirNoCache uint64 -} - -// InputOutput models the "io" line. -type InputOutput struct { - Read uint64 - Write uint64 -} - -// Threads models the "th" line. -type Threads struct { - Threads uint64 - FullCnt uint64 -} - -// ReadAheadCache models the "ra" line. -type ReadAheadCache struct { - CacheSize uint64 - CacheHistogram []uint64 - NotFound uint64 -} - -// Network models the "net" line. -type Network struct { - NetCount uint64 - UDPCount uint64 - TCPCount uint64 - TCPConnect uint64 -} - -// ClientRPC models the nfs "rpc" line. -type ClientRPC struct { - RPCCount uint64 - Retransmissions uint64 - AuthRefreshes uint64 -} - -// ServerRPC models the nfsd "rpc" line. -type ServerRPC struct { - RPCCount uint64 - BadCnt uint64 - BadFmt uint64 - BadAuth uint64 - BadcInt uint64 -} - -// V2Stats models the "proc2" line. -type V2Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Root uint64 - Lookup uint64 - ReadLink uint64 - Read uint64 - WrCache uint64 - Write uint64 - Create uint64 - Remove uint64 - Rename uint64 - Link uint64 - SymLink uint64 - MkDir uint64 - RmDir uint64 - ReadDir uint64 - FsStat uint64 -} - -// V3Stats models the "proc3" line. -type V3Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Lookup uint64 - Access uint64 - ReadLink uint64 - Read uint64 - Write uint64 - Create uint64 - MkDir uint64 - SymLink uint64 - MkNod uint64 - Remove uint64 - RmDir uint64 - Rename uint64 - Link uint64 - ReadDir uint64 - ReadDirPlus uint64 - FsStat uint64 - FsInfo uint64 - PathConf uint64 - Commit uint64 -} - -// ClientV4Stats models the nfs "proc4" line. -type ClientV4Stats struct { - Null uint64 - Read uint64 - Write uint64 - Commit uint64 - Open uint64 - OpenConfirm uint64 - OpenNoattr uint64 - OpenDowngrade uint64 - Close uint64 - Setattr uint64 - FsInfo uint64 - Renew uint64 - SetClientID uint64 - SetClientIDConfirm uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Access uint64 - Getattr uint64 - Lookup uint64 - LookupRoot uint64 - Remove uint64 - Rename uint64 - Link uint64 - Symlink uint64 - Create uint64 - Pathconf uint64 - StatFs uint64 - ReadLink uint64 - ReadDir uint64 - ServerCaps uint64 - DelegReturn uint64 - GetACL uint64 - SetACL uint64 - FsLocations uint64 - ReleaseLockowner uint64 - Secinfo uint64 - FsidPresent uint64 - ExchangeID uint64 - CreateSession uint64 - DestroySession uint64 - Sequence uint64 - GetLeaseTime uint64 - ReclaimComplete uint64 - LayoutGet uint64 - GetDeviceInfo uint64 - LayoutCommit uint64 - LayoutReturn uint64 - SecinfoNoName uint64 - TestStateID uint64 - FreeStateID uint64 - GetDeviceList uint64 - BindConnToSession uint64 - DestroyClientID uint64 - Seek uint64 - Allocate uint64 - DeAllocate uint64 - LayoutStats uint64 - Clone uint64 -} - -// ServerV4Stats models the nfsd "proc4" line. -type ServerV4Stats struct { - Null uint64 - Compound uint64 -} - -// V4Ops models the "proc4ops" line: NFSv4 operations -// Variable list, see: -// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) -// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) -// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) -type V4Ops struct { - //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? - Op0Unused uint64 - Op1Unused uint64 - Op2Future uint64 - Access uint64 - Close uint64 - Commit uint64 - Create uint64 - DelegPurge uint64 - DelegReturn uint64 - GetAttr uint64 - GetFH uint64 - Link uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Lookup uint64 - LookupRoot uint64 - Nverify uint64 - Open uint64 - OpenAttr uint64 - OpenConfirm uint64 - OpenDgrd uint64 - PutFH uint64 - PutPubFH uint64 - PutRootFH uint64 - Read uint64 - ReadDir uint64 - ReadLink uint64 - Remove uint64 - Rename uint64 - Renew uint64 - RestoreFH uint64 - SaveFH uint64 - SecInfo uint64 - SetAttr uint64 - Verify uint64 - Write uint64 - RelLockOwner uint64 -} - -// ClientRPCStats models all stats from /proc/net/rpc/nfs. -type ClientRPCStats struct { - Network Network - ClientRPC ClientRPC - V2Stats V2Stats - V3Stats V3Stats - ClientV4Stats ClientV4Stats -} - -// ServerRPCStats models all stats from /proc/net/rpc/nfsd. -type ServerRPCStats struct { - ReplyCache ReplyCache - FileHandles FileHandles - InputOutput InputOutput - Threads Threads - ReadAheadCache ReadAheadCache - Network Network - ServerRPC ServerRPC - V2Stats V2Stats - V3Stats V3Stats - ServerV4Stats ServerV4Stats - V4Ops V4Ops -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go deleted file mode 100644 index 95a83cc5b..000000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "fmt" -) - -func parseReplyCache(v []uint64) (ReplyCache, error) { - if len(v) != 3 { - return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) - } - - return ReplyCache{ - Hits: v[0], - Misses: v[1], - NoCache: v[2], - }, nil -} - -func parseFileHandles(v []uint64) (FileHandles, error) { - if len(v) != 5 { - return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) - } - - return FileHandles{ - Stale: v[0], - TotalLookups: v[1], - AnonLookups: v[2], - DirNoCache: v[3], - NoDirNoCache: v[4], - }, nil -} - -func parseInputOutput(v []uint64) (InputOutput, error) { - if len(v) != 2 { - return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) - } - - return InputOutput{ - Read: v[0], - Write: v[1], - }, nil -} - -func parseThreads(v []uint64) (Threads, error) { - if len(v) != 2 { - return Threads{}, fmt.Errorf("invalid Threads line %q", v) - } - - return Threads{ - Threads: v[0], - FullCnt: v[1], - }, nil -} - -func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { - if len(v) != 12 { - return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) - } - - return ReadAheadCache{ - CacheSize: v[0], - CacheHistogram: v[1:11], - NotFound: v[11], - }, nil -} - -func parseNetwork(v []uint64) (Network, error) { - if len(v) != 4 { - return Network{}, fmt.Errorf("invalid Network line %q", v) - } - - return Network{ - NetCount: v[0], - UDPCount: v[1], - TCPCount: v[2], - TCPConnect: v[3], - }, nil -} - -func parseServerRPC(v []uint64) (ServerRPC, error) { - if len(v) != 5 { - return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ServerRPC{ - RPCCount: v[0], - BadCnt: v[1], - BadFmt: v[2], - BadAuth: v[3], - BadcInt: v[4], - }, nil -} - -func parseClientRPC(v []uint64) (ClientRPC, error) { - if len(v) != 3 { - return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ClientRPC{ - RPCCount: v[0], - Retransmissions: v[1], - AuthRefreshes: v[2], - }, nil -} - -func parseV2Stats(v []uint64) (V2Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 18 { - return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) - } - - return V2Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Root: v[4], - Lookup: v[5], - ReadLink: v[6], - Read: v[7], - WrCache: v[8], - Write: v[9], - Create: v[10], - Remove: v[11], - Rename: v[12], - Link: v[13], - SymLink: v[14], - MkDir: v[15], - RmDir: v[16], - ReadDir: v[17], - FsStat: v[18], - }, nil -} - -func parseV3Stats(v []uint64) (V3Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 22 { - return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) - } - - return V3Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Lookup: v[4], - Access: v[5], - ReadLink: v[6], - Read: v[7], - Write: v[8], - Create: v[9], - MkDir: v[10], - SymLink: v[11], - MkNod: v[12], - Remove: v[13], - RmDir: v[14], - Rename: v[15], - Link: v[16], - ReadDir: v[17], - ReadDirPlus: v[18], - FsStat: v[19], - FsInfo: v[20], - PathConf: v[21], - Commit: v[22], - }, nil -} - -func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values { - return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) - } - - // This function currently supports mapping 59 NFS v4 client stats. Older - // kernels may emit fewer stats, so we must detect this and pad out the - // values to match the expected slice size. - if values < 59 { - newValues := make([]uint64, 60) - copy(newValues, v) - v = newValues - } - - return ClientV4Stats{ - Null: v[1], - Read: v[2], - Write: v[3], - Commit: v[4], - Open: v[5], - OpenConfirm: v[6], - OpenNoattr: v[7], - OpenDowngrade: v[8], - Close: v[9], - Setattr: v[10], - FsInfo: v[11], - Renew: v[12], - SetClientID: v[13], - SetClientIDConfirm: v[14], - Lock: v[15], - Lockt: v[16], - Locku: v[17], - Access: v[18], - Getattr: v[19], - Lookup: v[20], - LookupRoot: v[21], - Remove: v[22], - Rename: v[23], - Link: v[24], - Symlink: v[25], - Create: v[26], - Pathconf: v[27], - StatFs: v[28], - ReadLink: v[29], - ReadDir: v[30], - ServerCaps: v[31], - DelegReturn: v[32], - GetACL: v[33], - SetACL: v[34], - FsLocations: v[35], - ReleaseLockowner: v[36], - Secinfo: v[37], - FsidPresent: v[38], - ExchangeID: v[39], - CreateSession: v[40], - DestroySession: v[41], - Sequence: v[42], - GetLeaseTime: v[43], - ReclaimComplete: v[44], - LayoutGet: v[45], - GetDeviceInfo: v[46], - LayoutCommit: v[47], - LayoutReturn: v[48], - SecinfoNoName: v[49], - TestStateID: v[50], - FreeStateID: v[51], - GetDeviceList: v[52], - BindConnToSession: v[53], - DestroyClientID: v[54], - Seek: v[55], - Allocate: v[56], - DeAllocate: v[57], - LayoutStats: v[58], - Clone: v[59], - }, nil -} - -func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 2 { - return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) - } - - return ServerV4Stats{ - Null: v[1], - Compound: v[2], - }, nil -} - -func parseV4Ops(v []uint64) (V4Ops, error) { - values := int(v[0]) - if len(v[1:]) != values || values < 39 { - return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) - } - - stats := V4Ops{ - Op0Unused: v[1], - Op1Unused: v[2], - Op2Future: v[3], - Access: v[4], - Close: v[5], - Commit: v[6], - Create: v[7], - DelegPurge: v[8], - DelegReturn: v[9], - GetAttr: v[10], - GetFH: v[11], - Link: v[12], - Lock: v[13], - Lockt: v[14], - Locku: v[15], - Lookup: v[16], - LookupRoot: v[17], - Nverify: v[18], - Open: v[19], - OpenAttr: v[20], - OpenConfirm: v[21], - OpenDgrd: v[22], - PutFH: v[23], - PutPubFH: v[24], - PutRootFH: v[25], - Read: v[26], - ReadDir: v[27], - ReadLink: v[28], - Remove: v[29], - Rename: v[30], - Renew: v[31], - RestoreFH: v[32], - SaveFH: v[33], - SecInfo: v[34], - SetAttr: v[35], - Verify: v[36], - Write: v[37], - RelLockOwner: v[38], - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go deleted file mode 100644 index c0d3a5ad9..000000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs -func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { - stats := &ClientRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFS metric line %q", line) - } - - values, err := util.ParseUint64s(parts[1:]) - if err != nil { - return nil, fmt.Errorf("error parsing NFS metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ClientRPC, err = parseClientRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ClientV4Stats, err = parseClientV4Stats(values) - default: - return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFS file: %s", err) - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go deleted file mode 100644 index 57bb4a358..000000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd -func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { - stats := &ServerRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFSd metric line %q", line) - } - label := parts[0] - - var values []uint64 - var err error - if label == "th" { - if len(parts) < 3 { - return nil, fmt.Errorf("invalid NFSd th metric line %q", line) - } - values, err = util.ParseUint64s(parts[1:3]) - } else { - values, err = util.ParseUint64s(parts[1:]) - } - if err != nil { - return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "rc": - stats.ReplyCache, err = parseReplyCache(values) - case "fh": - stats.FileHandles, err = parseFileHandles(values) - case "io": - stats.InputOutput, err = parseInputOutput(values) - case "th": - stats.Threads, err = parseThreads(values) - case "ra": - stats.ReadAheadCache, err = parseReadAheadCache(values) - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ServerRPC, err = parseServerRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ServerV4Stats, err = parseServerV4Stats(values) - case "proc4ops": - stats.V4Ops, err = parseV4Ops(values) - default: - return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFSd file: %s", err) - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 7cf5b8acf..28f696803 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -20,6 +20,9 @@ import ( "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" ) // Proc provides information about a running process. @@ -27,7 +30,7 @@ type Proc struct { // The process ID. PID int - fs FS + fs fs.FS } // Procs represents a list of Proc structs. @@ -52,7 +55,7 @@ func NewProc(pid int) (Proc, error) { if err != nil { return Proc{}, err } - return fs.NewProc(pid) + return fs.Proc(pid) } // AllProcs returns a list of all currently available processes under /proc. @@ -66,28 +69,35 @@ func AllProcs() (Procs, error) { // Self returns a process for the current process. func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.Path("self")) + p, err := os.Readlink(fs.proc.Path("self")) if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) if err != nil { return Proc{}, err } - return fs.NewProc(pid) + return fs.Proc(pid) } // NewProc returns a process for the given pid. +// +// Deprecated: use fs.Proc() instead func (fs FS) NewProc(pid int) (Proc, error) { - if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return fs.Proc(pid) +} + +// Proc returns a process for the given pid. +func (fs FS) Proc(pid int) (Proc, error) { + if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } - return Proc{PID: pid, fs: fs}, nil + return Proc{PID: pid, fs: fs.proc}, nil } // AllProcs returns a list of all currently available processes. func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.Path()) + d, err := os.Open(fs.proc.Path()) if err != nil { return Procs{}, err } @@ -95,7 +105,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) } p := Procs{} @@ -104,7 +114,7 @@ func (fs FS) AllProcs() (Procs, error) { if err != nil { continue } - p = append(p, Proc{PID: int(pid), fs: fs}) + p = append(p, Proc{PID: int(pid), fs: fs.proc}) } return p, nil @@ -112,13 +122,7 @@ func (fs FS) AllProcs() (Procs, error) { // CmdLine returns the command line of a process. func (p Proc) CmdLine() ([]string, error) { - f, err := os.Open(p.path("cmdline")) - if err != nil { - return nil, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("cmdline")) if err != nil { return nil, err } @@ -130,9 +134,9 @@ func (p Proc) CmdLine() ([]string, error) { return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil } -// Comm returns the command name of a process. -func (p Proc) Comm() (string, error) { - f, err := os.Open(p.path("comm")) +// Wchan returns the wchan (wait channel) of a process. +func (p Proc) Wchan() (string, error) { + f, err := os.Open(p.path("wchan")) if err != nil { return "", err } @@ -143,6 +147,21 @@ func (p Proc) Comm() (string, error) { return "", err } + wchan := string(data) + if wchan == "" || wchan == "0" { + return "", nil + } + + return wchan, nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + data, err := util.ReadFileNoStat(p.path("comm")) + if err != nil { + return "", err + } + return strings.TrimSpace(string(data)), nil } @@ -156,6 +175,26 @@ func (p Proc) Executable() (string, error) { return exe, err } +// Cwd returns the absolute path to the current working directory of the process. +func (p Proc) Cwd() (string, error) { + wd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return wd, err +} + +// RootDir returns the absolute path to the process's root directory (as set by chroot) +func (p Proc) RootDir() (string, error) { + rdir, err := os.Readlink(p.path("root")) + if os.IsNotExist(err) { + return "", nil + } + + return rdir, err +} + // FileDescriptors returns the currently open file descriptors of a process. func (p Proc) FileDescriptors() ([]uintptr, error) { names, err := p.fileDescriptors() @@ -167,7 +206,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + return nil, fmt.Errorf("could not parse fd %q: %w", n, err) } fds[i] = uintptr(fd) } @@ -218,6 +257,18 @@ func (p Proc) MountStats() ([]*Mount, error) { return parseMountStats(f) } +// MountInfo retrieves mount information for mount points in a +// process's namespace. +// It supplies information missing in `/proc/self/mounts` and +// fixes various other problems with that file too. +func (p Proc) MountInfo() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(p.path("mountinfo")) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + func (p Proc) fileDescriptors() ([]string, error) { d, err := os.Open(p.path("fd")) if err != nil { @@ -227,7 +278,7 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) } return names, nil @@ -236,3 +287,33 @@ func (p Proc) fileDescriptors() ([]string, error) { func (p Proc) path(pa ...string) string { return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) } + +// FileDescriptorsInfo retrieves information about all file descriptors of +// the process. +func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + var fdinfos ProcFDInfos + + for _, n := range names { + fdinfo, err := p.FDInfo(n) + if err != nil { + continue + } + fdinfos = append(fdinfos, *fdinfo) + } + + return fdinfos, nil +} + +// Schedstat returns task scheduling information for the process. +func (p Proc) Schedstat() (ProcSchedstat, error) { + contents, err := ioutil.ReadFile(p.path("schedstat")) + if err != nil { + return ProcSchedstat{}, err + } + return parseProcSchedstat(string(contents)) +} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go new file mode 100644 index 000000000..0094a13c0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -0,0 +1,98 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies +// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in +// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of +// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID +// in this hierarchy +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type Cgroup struct { + // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one + // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number + HierarchyID int + // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For + // Cgroups V2 this may be empty, as all active controllers use the same hierarchy + Controllers []string + // Path of this control group, relative to the mount point of the cgroupfs representing this specific + // hierarchy + Path string +} + +// parseCgroupString parses each line of the /proc/[pid]/cgroup file +// Line format is hierarchyID:[controller1,controller2]:path +func parseCgroupString(cgroupStr string) (*Cgroup, error) { + var err error + + fields := strings.SplitN(cgroupStr, ":", 3) + if len(fields) < 3 { + return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + } + + cgroup := &Cgroup{ + Path: fields[2], + Controllers: nil, + } + cgroup.HierarchyID, err = strconv.Atoi(fields[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse hierarchy ID") + } + if fields[1] != "" { + ssNames := strings.Split(fields[1], ",") + cgroup.Controllers = append(cgroup.Controllers, ssNames...) + } + return cgroup, nil +} + +// parseCgroups reads each line of the /proc/[pid]/cgroup file +func parseCgroups(data []byte) ([]Cgroup, error) { + var cgroups []Cgroup + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseCgroupString(mountString) + if err != nil { + return nil, err + } + cgroups = append(cgroups, *parsedMounts) + } + + err := scanner.Err() + return cgroups, err +} + +// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process +// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, +// so the len of the returned struct is equal to the number of active hierarchies on this system +func (p Proc) Cgroups() ([]Cgroup, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID)) + if err != nil { + return nil, err + } + return parseCgroups(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go new file mode 100644 index 000000000..6134b3580 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Environ reads process environments from /proc//environ +func (p Proc) Environ() ([]string, error) { + environments := make([]string, 0) + + data, err := util.ReadFileNoStat(p.path("environ")) + if err != nil { + return environments, err + } + + environments = strings.Split(string(data), "\000") + if len(environments) > 0 { + environments = environments[:len(environments)-1] + } + + return environments, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go new file mode 100644 index 000000000..cf63227f0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -0,0 +1,133 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "regexp" + + "github.com/prometheus/procfs/internal/util" +) + +// Regexp variables +var ( + rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) + rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) + rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rInotify = regexp.MustCompile(`^inotify`) + rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) +) + +// ProcFDInfo contains represents file descriptor information. +type ProcFDInfo struct { + // File descriptor + FD string + // File offset + Pos string + // File access mode and status flags + Flags string + // Mount point ID + MntID string + // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) + InotifyInfos []InotifyInfo +} + +// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. +func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { + data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) + if err != nil { + return nil, err + } + + var text, pos, flags, mntid string + var inotify []InotifyInfo + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + text = scanner.Text() + if rPos.MatchString(text) { + pos = rPos.FindStringSubmatch(text)[1] + } else if rFlags.MatchString(text) { + flags = rFlags.FindStringSubmatch(text)[1] + } else if rMntID.MatchString(text) { + mntid = rMntID.FindStringSubmatch(text)[1] + } else if rInotify.MatchString(text) { + newInotify, err := parseInotifyInfo(text) + if err != nil { + return nil, err + } + inotify = append(inotify, *newInotify) + } + } + + i := &ProcFDInfo{ + FD: fd, + Pos: pos, + Flags: flags, + MntID: mntid, + InotifyInfos: inotify, + } + + return i, nil +} + +// InotifyInfo represents a single inotify line in the fdinfo file. +type InotifyInfo struct { + // Watch descriptor number + WD string + // Inode number + Ino string + // Device ID + Sdev string + // Mask of events being monitored + Mask string +} + +// InotifyInfo constructor. Only available on kernel 3.8+. +func parseInotifyInfo(line string) (*InotifyInfo, error) { + m := rInotifyParts.FindStringSubmatch(line) + if len(m) >= 4 { + var mask string + if len(m) == 5 { + mask = m[4] + } + i := &InotifyInfo{ + WD: m[1], + Ino: m[2], + Sdev: m[3], + Mask: mask, + } + return i, nil + } + return nil, fmt.Errorf("invalid inode entry: %q", line) +} + +// ProcFDInfos represents a list of ProcFDInfo structs. +type ProcFDInfos []ProcFDInfo + +func (p ProcFDInfos) Len() int { return len(p) } +func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } + +// InotifyWatchLen returns the total number of inotify watches +func (p ProcFDInfos) InotifyWatchLen() (int, error) { + length := 0 + for _, f := range p { + length += len(f.InotifyInfos) + } + + return length, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index 0251c83bf..776f34971 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -15,8 +15,8 @@ package procfs import ( "fmt" - "io/ioutil" - "os" + + "github.com/prometheus/procfs/internal/util" ) // ProcIO models the content of /proc//io. @@ -39,17 +39,11 @@ type ProcIO struct { CancelledWriteBytes int64 } -// NewIO creates a new ProcIO instance from a given Proc instance. -func (p Proc) NewIO() (ProcIO, error) { +// IO creates a new ProcIO instance from a given Proc instance. +func (p Proc) IO() (ProcIO, error) { pio := ProcIO{} - f, err := os.Open(p.path("io")) - if err != nil { - return pio, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) + data, err := util.ReadFileNoStat(p.path("io")) if err != nil { return pio, err } diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index f04ba6fda..dd20f198a 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -26,59 +26,66 @@ import ( // http://man7.org/linux/man-pages/man2/getrlimit.2.html. type ProcLimits struct { // CPU time limit in seconds. - CPUTime int64 + CPUTime uint64 // Maximum size of files that the process may create. - FileSize int64 + FileSize uint64 // Maximum size of the process's data segment (initialized data, // uninitialized data, and heap). - DataSize int64 + DataSize uint64 // Maximum size of the process stack in bytes. - StackSize int64 + StackSize uint64 // Maximum size of a core file. - CoreFileSize int64 + CoreFileSize uint64 // Limit of the process's resident set in pages. - ResidentSet int64 + ResidentSet uint64 // Maximum number of processes that can be created for the real user ID of // the calling process. - Processes int64 + Processes uint64 // Value one greater than the maximum file descriptor number that can be // opened by this process. - OpenFiles int64 + OpenFiles uint64 // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory int64 + LockedMemory uint64 // Maximum size of the process's virtual memory address space in bytes. - AddressSpace int64 + AddressSpace uint64 // Limit on the combined number of flock(2) locks and fcntl(2) leases that // this process may establish. - FileLocks int64 + FileLocks uint64 // Limit of signals that may be queued for the real user ID of the calling // process. - PendingSignals int64 + PendingSignals uint64 // Limit on the number of bytes that can be allocated for POSIX message // queues for the real user ID of the calling process. - MsqqueueSize int64 + MsqqueueSize uint64 // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority int64 + NicePriority uint64 // Limit of the real-time priority set using sched_setscheduler(2) or // sched_setparam(2). - RealtimePriority int64 + RealtimePriority uint64 // Limit (in microseconds) on the amount of CPU time that a process // scheduled under a real-time scheduling policy may consume without making // a blocking system call. - RealtimeTimeout int64 + RealtimeTimeout uint64 } const ( - limitsFields = 3 + limitsFields = 4 limitsUnlimited = "unlimited" ) var ( - limitsDelimiter = regexp.MustCompile(" +") + limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) ) // NewLimits returns the current soft limits of the process. +// +// Deprecated: use p.Limits() instead func (p Proc) NewLimits() (ProcLimits, error) { + return p.Limits() +} + +// Limits returns the current soft limits of the process. +func (p Proc) Limits() (ProcLimits, error) { f, err := os.Open(p.path("limits")) if err != nil { return ProcLimits{}, err @@ -89,46 +96,49 @@ func (p Proc) NewLimits() (ProcLimits, error) { l = ProcLimits{} s = bufio.NewScanner(f) ) + + s.Scan() // Skip limits header + for s.Scan() { - fields := limitsDelimiter.Split(s.Text(), limitsFields) + //fields := limitsMatch.Split(s.Text(), limitsFields) + fields := limitsMatch.FindStringSubmatch(s.Text()) if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf( - "couldn't parse %s line %s", f.Name(), s.Text()) + return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) } - switch fields[0] { + switch fields[1] { case "Max cpu time": - l.CPUTime, err = parseInt(fields[1]) + l.CPUTime, err = parseUint(fields[2]) case "Max file size": - l.FileSize, err = parseInt(fields[1]) + l.FileSize, err = parseUint(fields[2]) case "Max data size": - l.DataSize, err = parseInt(fields[1]) + l.DataSize, err = parseUint(fields[2]) case "Max stack size": - l.StackSize, err = parseInt(fields[1]) + l.StackSize, err = parseUint(fields[2]) case "Max core file size": - l.CoreFileSize, err = parseInt(fields[1]) + l.CoreFileSize, err = parseUint(fields[2]) case "Max resident set": - l.ResidentSet, err = parseInt(fields[1]) + l.ResidentSet, err = parseUint(fields[2]) case "Max processes": - l.Processes, err = parseInt(fields[1]) + l.Processes, err = parseUint(fields[2]) case "Max open files": - l.OpenFiles, err = parseInt(fields[1]) + l.OpenFiles, err = parseUint(fields[2]) case "Max locked memory": - l.LockedMemory, err = parseInt(fields[1]) + l.LockedMemory, err = parseUint(fields[2]) case "Max address space": - l.AddressSpace, err = parseInt(fields[1]) + l.AddressSpace, err = parseUint(fields[2]) case "Max file locks": - l.FileLocks, err = parseInt(fields[1]) + l.FileLocks, err = parseUint(fields[2]) case "Max pending signals": - l.PendingSignals, err = parseInt(fields[1]) + l.PendingSignals, err = parseUint(fields[2]) case "Max msgqueue size": - l.MsqqueueSize, err = parseInt(fields[1]) + l.MsqqueueSize, err = parseUint(fields[2]) case "Max nice priority": - l.NicePriority, err = parseInt(fields[1]) + l.NicePriority, err = parseUint(fields[2]) case "Max realtime priority": - l.RealtimePriority, err = parseInt(fields[1]) + l.RealtimePriority, err = parseUint(fields[2]) case "Max realtime timeout": - l.RealtimeTimeout, err = parseInt(fields[1]) + l.RealtimeTimeout, err = parseUint(fields[2]) } if err != nil { return ProcLimits{}, err @@ -138,13 +148,13 @@ func (p Proc) NewLimits() (ProcLimits, error) { return l, s.Err() } -func parseInt(s string) (int64, error) { +func parseUint(s string) (uint64, error) { if s == limitsUnlimited { - return -1, nil + return 18446744073709551615, nil } - i, err := strconv.ParseInt(s, 10, 64) + i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) } return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go new file mode 100644 index 000000000..1d7772d51 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -0,0 +1,209 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// ProcMapPermissions contains permission settings read from /proc/[pid]/maps +type ProcMapPermissions struct { + // mapping has the [R]ead flag set + Read bool + // mapping has the [W]rite flag set + Write bool + // mapping has the [X]ecutable flag set + Execute bool + // mapping has the [S]hared flag set + Shared bool + // mapping is marked as [P]rivate (copy on write) + Private bool +} + +// ProcMap contains the process memory-mappings of the process, +// read from /proc/[pid]/maps +type ProcMap struct { + // The start address of current mapping. + StartAddr uintptr + // The end address of the current mapping + EndAddr uintptr + // The permissions for this mapping + Perms *ProcMapPermissions + // The current offset into the file/fd (e.g., shared libs) + Offset int64 + // Device owner of this mapping (major:minor) in Mkdev format. + Dev uint64 + // The inode of the device above + Inode uint64 + // The file or psuedofile (or empty==anonymous) + Pathname string +} + +// parseDevice parses the device token of a line and converts it to a dev_t +// (mkdev) like structure. +func parseDevice(s string) (uint64, error) { + toks := strings.Split(s, ":") + if len(toks) < 2 { + return 0, fmt.Errorf("unexpected number of fields") + } + + major, err := strconv.ParseUint(toks[0], 16, 0) + if err != nil { + return 0, err + } + + minor, err := strconv.ParseUint(toks[1], 16, 0) + if err != nil { + return 0, err + } + + return unix.Mkdev(uint32(major), uint32(minor)), nil +} + +// parseAddress just converts a hex-string to a uintptr +func parseAddress(s string) (uintptr, error) { + a, err := strconv.ParseUint(s, 16, 0) + if err != nil { + return 0, err + } + + return uintptr(a), nil +} + +// parseAddresses parses the start-end address +func parseAddresses(s string) (uintptr, uintptr, error) { + toks := strings.Split(s, "-") + if len(toks) < 2 { + return 0, 0, fmt.Errorf("invalid address") + } + + saddr, err := parseAddress(toks[0]) + if err != nil { + return 0, 0, err + } + + eaddr, err := parseAddress(toks[1]) + if err != nil { + return 0, 0, err + } + + return saddr, eaddr, nil +} + +// parsePermissions parses a token and returns any that are set. +func parsePermissions(s string) (*ProcMapPermissions, error) { + if len(s) < 4 { + return nil, fmt.Errorf("invalid permissions token") + } + + perms := ProcMapPermissions{} + for _, ch := range s { + switch ch { + case 'r': + perms.Read = true + case 'w': + perms.Write = true + case 'x': + perms.Execute = true + case 'p': + perms.Private = true + case 's': + perms.Shared = true + } + } + + return &perms, nil +} + +// parseProcMap will attempt to parse a single line within a proc/[pid]/maps +// buffer. +func parseProcMap(text string) (*ProcMap, error) { + fields := strings.Fields(text) + if len(fields) < 5 { + return nil, fmt.Errorf("truncated procmap entry") + } + + saddr, eaddr, err := parseAddresses(fields[0]) + if err != nil { + return nil, err + } + + perms, err := parsePermissions(fields[1]) + if err != nil { + return nil, err + } + + offset, err := strconv.ParseInt(fields[2], 16, 0) + if err != nil { + return nil, err + } + + device, err := parseDevice(fields[3]) + if err != nil { + return nil, err + } + + inode, err := strconv.ParseUint(fields[4], 10, 0) + if err != nil { + return nil, err + } + + pathname := "" + + if len(fields) >= 5 { + pathname = strings.Join(fields[5:], " ") + } + + return &ProcMap{ + StartAddr: saddr, + EndAddr: eaddr, + Perms: perms, + Offset: offset, + Dev: device, + Inode: inode, + Pathname: pathname, + }, nil +} + +// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the +// process. +func (p Proc) ProcMaps() ([]*ProcMap, error) { + file, err := os.Open(p.path("maps")) + if err != nil { + return nil, err + } + defer file.Close() + + maps := []*ProcMap{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + m, err := parseProcMap(scan.Text()) + if err != nil { + return nil, err + } + + maps = append(maps, m) + } + + return maps, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index d06c26eba..391b4cbd1 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -29,9 +29,9 @@ type Namespace struct { // Namespaces contains all of the namespaces that the process is contained in. type Namespaces map[string]Namespace -// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// Namespaces reads from /proc//ns/* to get the namespaces of which the // process is a member. -func (p Proc) NewNamespaces() (Namespaces, error) { +func (p Proc) Namespaces() (Namespaces, error) { d, err := os.Open(p.path("ns")) if err != nil { return nil, err @@ -40,7 +40,7 @@ func (p Proc) NewNamespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) } ns := make(Namespaces, len(names)) @@ -52,13 +52,13 @@ func (p Proc) NewNamespaces() (Namespaces, error) { fields := strings.SplitN(target, ":", 2) if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) } typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go new file mode 100644 index 000000000..dc6c14f0a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// The PSI / pressure interface is described at +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt +// Each resource (cpu, io, memory, ...) is exposed as a single file. +// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. +// Each line contains several averages (over n seconds) and a total in µs. +// +// Example io pressure file: +// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 +// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" + +// PSILine is a single line of values as returned by /proc/pressure/* +// The Avg entries are averages over n seconds, as a percentage +// The Total line is in microseconds +type PSILine struct { + Avg10 float64 + Avg60 float64 + Avg300 float64 + Total uint64 +} + +// PSIStats represent pressure stall information from /proc/pressure/* +// Some indicates the share of time in which at least some tasks are stalled +// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +type PSIStats struct { + Some *PSILine + Full *PSILine +} + +// PSIStatsForResource reads pressure stall information for the specified +// resource from /proc/pressure/. At time of writing this can be +// either "cpu", "memory" or "io". +func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) + if err != nil { + return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) + } + + return parsePSIStats(resource, bytes.NewReader(data)) +} + +// parsePSIStats parses the specified file for pressure stall information +func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { + psiStats := PSIStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + l := scanner.Text() + prefix := strings.Split(l, " ")[0] + switch prefix { + case "some": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Some = &psi + case "full": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Full = &psi + default: + // If we encounter a line with an unknown prefix, ignore it and move on + // Should new measurement types be added in the future we'll simply ignore them instead + // of erroring on retrieval + continue + } + } + + return psiStats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go new file mode 100644 index 000000000..a576a720a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -0,0 +1,165 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bufio" + "errors" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + // match the header line before each mapped zone in /proc/pid/smaps + procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) +) + +type ProcSMapsRollup struct { + // Amount of the mapping that is currently resident in RAM + Rss uint64 + // Process's proportional share of this mapping + Pss uint64 + // Size in bytes of clean shared pages + SharedClean uint64 + // Size in bytes of dirty shared pages + SharedDirty uint64 + // Size in bytes of clean private pages + PrivateClean uint64 + // Size in bytes of dirty private pages + PrivateDirty uint64 + // Amount of memory currently marked as referenced or accessed + Referenced uint64 + // Amount of memory that does not belong to any file + Anonymous uint64 + // Amount would-be-anonymous memory currently on swap + Swap uint64 + // Process's proportional memory on swap + SwapPss uint64 +} + +// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the +// process. +// +// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will +// we read and summed. +func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { + data, err := util.ReadFileNoStat(p.path("smaps_rollup")) + if err != nil && os.IsNotExist(err) { + return p.procSMapsRollupManual() + } + if err != nil { + return ProcSMapsRollup{}, err + } + + lines := strings.Split(string(data), "\n") + smaps := ProcSMapsRollup{} + + // skip first line which don't contains information we need + lines = lines[1:] + for _, line := range lines { + if line == "" { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +// Read /proc/pid/smaps and do the roll-up in Go code. +func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { + file, err := os.Open(p.path("smaps")) + if err != nil { + return ProcSMapsRollup{}, err + } + defer file.Close() + + smaps := ProcSMapsRollup{} + scan := bufio.NewScanner(file) + + for scan.Scan() { + line := scan.Text() + + if procSMapsHeaderLine.MatchString(line) { + continue + } + + if err := smaps.parseLine(line); err != nil { + return ProcSMapsRollup{}, err + } + } + + return smaps, nil +} + +func (s *ProcSMapsRollup) parseLine(line string) error { + kv := strings.SplitN(line, ":", 2) + if len(kv) != 2 { + fmt.Println(line) + return errors.New("invalid net/dev line, missing colon") + } + + k := kv[0] + if k == "VmFlags" { + return nil + } + + v := strings.TrimSpace(kv[1]) + v = strings.TrimRight(v, " kB") + + vKBytes, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return err + } + vBytes := vKBytes * 1024 + + s.addValue(k, v, vKBytes, vBytes) + + return nil +} + +func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Rss": + s.Rss += vUintBytes + case "Pss": + s.Pss += vUintBytes + case "Shared_Clean": + s.SharedClean += vUintBytes + case "Shared_Dirty": + s.SharedDirty += vUintBytes + case "Private_Clean": + s.PrivateClean += vUintBytes + case "Private_Dirty": + s.PrivateDirty += vUintBytes + case "Referenced": + s.Referenced += vUintBytes + case "Anonymous": + s.Anonymous += vUintBytes + case "Swap": + s.Swap += vUintBytes + case "SwapPss": + s.SwapPss += vUintBytes + } +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 3cf2a9f18..67ca0e9fb 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -16,8 +16,10 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" "os" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" ) // Originally, this USER_HZ value was dynamically retrieved via a sysconf call @@ -95,22 +97,23 @@ type ProcStat struct { // in clock ticks. Starttime uint64 // Virtual memory size in bytes. - VSize int + VSize uint // Resident set size in pages. RSS int - fs FS + proc fs.FS } // NewStat returns the current status information of the process. +// +// Deprecated: use p.Stat() instead func (p Proc) NewStat() (ProcStat, error) { - f, err := os.Open(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - defer f.Close() + return p.Stat() +} - data, err := ioutil.ReadAll(f) +// Stat returns the current status information of the process. +func (p Proc) Stat() (ProcStat, error) { + data, err := util.ReadFileNoStat(p.path("stat")) if err != nil { return ProcStat{}, err } @@ -118,16 +121,13 @@ func (p Proc) NewStat() (ProcStat, error) { var ( ignore int - s = ProcStat{PID: p.PID, fs: p.fs} + s = ProcStat{PID: p.PID, proc: p.fs} l = bytes.Index(data, []byte("(")) r = bytes.LastIndex(data, []byte(")")) ) if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf( - "unexpected format, couldn't extract comm: %s", - data, - ) + return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) } s.Comm = string(data[l+1 : r]) @@ -164,7 +164,7 @@ func (p Proc) NewStat() (ProcStat, error) { } // VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() int { +func (s ProcStat) VirtualMemory() uint { return s.VSize } @@ -175,7 +175,8 @@ func (s ProcStat) ResidentMemory() int { // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { - stat, err := s.fs.NewStat() + fs := FS{proc: s.proc} + stat, err := fs.Stat() if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go new file mode 100644 index 000000000..6edd8333b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -0,0 +1,170 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcStatus provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStatus struct { + // The process ID. + PID int + // The process name. + Name string + + // Thread group ID. + TGID int + + // Peak virtual memory size. + VmPeak uint64 // nolint:golint + // Virtual memory size. + VmSize uint64 // nolint:golint + // Locked memory size. + VmLck uint64 // nolint:golint + // Pinned memory size. + VmPin uint64 // nolint:golint + // Peak resident set size. + VmHWM uint64 // nolint:golint + // Resident set size (sum of RssAnnon RssFile and RssShmem). + VmRSS uint64 // nolint:golint + // Size of resident anonymous memory. + RssAnon uint64 // nolint:golint + // Size of resident file mappings. + RssFile uint64 // nolint:golint + // Size of resident shared memory. + RssShmem uint64 // nolint:golint + // Size of data segments. + VmData uint64 // nolint:golint + // Size of stack segments. + VmStk uint64 // nolint:golint + // Size of text segments. + VmExe uint64 // nolint:golint + // Shared library code size. + VmLib uint64 // nolint:golint + // Page table entries size. + VmPTE uint64 // nolint:golint + // Size of second-level page tables. + VmPMD uint64 // nolint:golint + // Swapped-out virtual memory size by anonymous private. + VmSwap uint64 // nolint:golint + // Size of hugetlb memory portions + HugetlbPages uint64 + + // Number of voluntary context switches. + VoluntaryCtxtSwitches uint64 + // Number of involuntary context switches. + NonVoluntaryCtxtSwitches uint64 + + // UIDs of the process (Real, effective, saved set, and filesystem UIDs) + UIDs [4]string + // GIDs of the process (Real, effective, saved set, and filesystem GIDs) + GIDs [4]string +} + +// NewStatus returns the current status information of the process. +func (p Proc) NewStatus() (ProcStatus, error) { + data, err := util.ReadFileNoStat(p.path("status")) + if err != nil { + return ProcStatus{}, err + } + + s := ProcStatus{PID: p.PID} + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if !bytes.Contains([]byte(line), []byte(":")) { + continue + } + + kv := strings.SplitN(line, ":", 2) + + // removes spaces + k := string(strings.TrimSpace(kv[0])) + v := string(strings.TrimSpace(kv[1])) + // removes "kB" + v = string(bytes.Trim([]byte(v), " kB")) + + // value to int when possible + // we can skip error check here, 'cause vKBytes is not used when value is a string + vKBytes, _ := strconv.ParseUint(v, 10, 64) + // convert kB to B + vBytes := vKBytes * 1024 + + s.fillStatus(k, v, vKBytes, vBytes) + } + + return s, nil +} + +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Tgid": + s.TGID = int(vUint) + case "Name": + s.Name = vString + case "Uid": + copy(s.UIDs[:], strings.Split(vString, "\t")) + case "Gid": + copy(s.GIDs[:], strings.Split(vString, "\t")) + case "VmPeak": + s.VmPeak = vUintBytes + case "VmSize": + s.VmSize = vUintBytes + case "VmLck": + s.VmLck = vUintBytes + case "VmPin": + s.VmPin = vUintBytes + case "VmHWM": + s.VmHWM = vUintBytes + case "VmRSS": + s.VmRSS = vUintBytes + case "RssAnon": + s.RssAnon = vUintBytes + case "RssFile": + s.RssFile = vUintBytes + case "RssShmem": + s.RssShmem = vUintBytes + case "VmData": + s.VmData = vUintBytes + case "VmStk": + s.VmStk = vUintBytes + case "VmExe": + s.VmExe = vUintBytes + case "VmLib": + s.VmLib = vUintBytes + case "VmPTE": + s.VmPTE = vUintBytes + case "VmPMD": + s.VmPMD = vUintBytes + case "VmSwap": + s.VmSwap = vUintBytes + case "HugetlbPages": + s.HugetlbPages = vUintBytes + case "voluntary_ctxt_switches": + s.VoluntaryCtxtSwitches = vUint + case "nonvoluntary_ctxt_switches": + s.NonVoluntaryCtxtSwitches = vUint + } +} + +// TotalCtxtSwitches returns the total context switch. +func (s ProcStatus) TotalCtxtSwitches() uint64 { + return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches +} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go new file mode 100644 index 000000000..28228164e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -0,0 +1,121 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "regexp" + "strconv" +) + +var ( + cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) + procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) +) + +// Schedstat contains scheduler statistics from /proc/schedstat +// +// See +// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt +// for a detailed description of what these numbers mean. +// +// Note the current kernel documentation claims some of the time units are in +// jiffies when they are actually in nanoseconds since 2.6.23 with the +// introduction of CFS. A fix to the documentation is pending. See +// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 +type Schedstat struct { + CPUs []*SchedstatCPU +} + +// SchedstatCPU contains the values from one "cpu" line +type SchedstatCPU struct { + CPUNum string + + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// ProcSchedstat contains the values from /proc//schedstat +type ProcSchedstat struct { + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// Schedstat reads data from /proc/schedstat +func (fs FS) Schedstat() (*Schedstat, error) { + file, err := os.Open(fs.proc.Path("schedstat")) + if err != nil { + return nil, err + } + defer file.Close() + + stats := &Schedstat{} + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + match := cpuLineRE.FindStringSubmatch(scanner.Text()) + if match != nil { + cpu := &SchedstatCPU{} + cpu.CPUNum = match[1] + + cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) + if err != nil { + continue + } + + cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) + if err != nil { + continue + } + + cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) + if err != nil { + continue + } + + stats.CPUs = append(stats.CPUs, cpu) + } + } + + return stats, nil +} + +func parseProcSchedstat(contents string) (ProcSchedstat, error) { + var ( + stats ProcSchedstat + err error + ) + match := procLineRE.FindStringSubmatch(contents) + + if match != nil { + stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) + if err != nil { + return stats, err + } + + stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) + if err != nil { + return stats, err + } + + stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) + return stats, err + } + + return stats, errors.New("could not parse schedstat") +} diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go new file mode 100644 index 000000000..7896fd724 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -0,0 +1,151 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +var ( + slabSpace = regexp.MustCompile(`\s+`) + slabVer = regexp.MustCompile(`slabinfo -`) + slabHeader = regexp.MustCompile(`# name`) +) + +// Slab represents a slab pool in the kernel. +type Slab struct { + Name string + ObjActive int64 + ObjNum int64 + ObjSize int64 + ObjPerSlab int64 + PagesPerSlab int64 + // tunables + Limit int64 + Batch int64 + SharedFactor int64 + SlabActive int64 + SlabNum int64 + SharedAvail int64 +} + +// SlabInfo represents info for all slabs. +type SlabInfo struct { + Slabs []*Slab +} + +func shouldParseSlab(line string) bool { + if slabVer.MatchString(line) { + return false + } + if slabHeader.MatchString(line) { + return false + } + return true +} + +// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1. +func parseV21SlabEntry(line string) (*Slab, error) { + // First cleanup whitespace. + l := slabSpace.ReplaceAllString(line, " ") + s := strings.Split(l, " ") + if len(s) != 16 { + return nil, fmt.Errorf("unable to parse: %q", line) + } + var err error + i := &Slab{Name: s[0]} + i.ObjActive, err = strconv.ParseInt(s[1], 10, 64) + if err != nil { + return nil, err + } + i.ObjNum, err = strconv.ParseInt(s[2], 10, 64) + if err != nil { + return nil, err + } + i.ObjSize, err = strconv.ParseInt(s[3], 10, 64) + if err != nil { + return nil, err + } + i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64) + if err != nil { + return nil, err + } + i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64) + if err != nil { + return nil, err + } + i.Limit, err = strconv.ParseInt(s[8], 10, 64) + if err != nil { + return nil, err + } + i.Batch, err = strconv.ParseInt(s[9], 10, 64) + if err != nil { + return nil, err + } + i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64) + if err != nil { + return nil, err + } + i.SlabActive, err = strconv.ParseInt(s[13], 10, 64) + if err != nil { + return nil, err + } + i.SlabNum, err = strconv.ParseInt(s[14], 10, 64) + if err != nil { + return nil, err + } + i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64) + if err != nil { + return nil, err + } + return i, nil +} + +// parseSlabInfo21 is used to parse a slabinfo 2.1 file. +func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { + scanner := bufio.NewScanner(r) + s := SlabInfo{Slabs: []*Slab{}} + for scanner.Scan() { + line := scanner.Text() + if !shouldParseSlab(line) { + continue + } + slab, err := parseV21SlabEntry(line) + if err != nil { + return s, err + } + s.Slabs = append(s.Slabs, slab) + } + return s, nil +} + +// SlabInfo reads data from /proc/slabinfo +func (fs FS) SlabInfo() (SlabInfo, error) { + // TODO: Consider passing options to allow for parsing different + // slabinfo versions. However, slabinfo 2.1 has been stable since + // kernel 2.6.10 and later. + data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo")) + if err != nil { + return SlabInfo{}, err + } + + return parseSlabInfo21(bytes.NewReader(data)) +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 61eb6b0e3..6d8727541 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -15,11 +15,14 @@ package procfs import ( "bufio" + "bytes" "fmt" "io" - "os" "strconv" "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" ) // CPUStat shows how much time the cpu spend in various stages. @@ -78,16 +81,6 @@ type Stat struct { SoftIRQ SoftIRQStat } -// NewStat returns kernel/system statistics read from /proc/stat. -func NewStat() (Stat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Stat{}, err - } - - return fs.NewStat() -} - // Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). func parseCPUStat(line string) (CPUStat, int64, error) { cpuStat := CPUStat{} @@ -100,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) } if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) } cpuStat.User /= userHZ @@ -123,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) } return cpuStat, cpuID, nil @@ -143,25 +136,44 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) } return softIRQStat, total, nil } -// NewStat returns an information about current kernel/system statistics. +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func NewStat() (Stat, error) { + fs, err := NewFS(fs.DefaultProcMountPoint) + if err != nil { + return Stat{}, err + } + return fs.Stat() +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead func (fs FS) NewStat() (Stat, error) { - // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + return fs.Stat() +} - f, err := os.Open(fs.Path("stat")) +// Stat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Stat() (Stat, error) { + fileName := fs.proc.Path("stat") + data, err := util.ReadFileNoStat(fileName) if err != nil { return Stat{}, err } - defer f.Close() stat := Stat{} - scanner := bufio.NewScanner(f) + scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -172,34 +184,34 @@ func (fs FS) NewStat() (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -225,7 +237,7 @@ func (fs FS) NewStat() (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go new file mode 100644 index 000000000..15edc2212 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -0,0 +1,89 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Swap represents an entry in /proc/swaps. +type Swap struct { + Filename string + Type string + Size int + Used int + Priority int +} + +// Swaps returns a slice of all configured swap devices on the system. +func (fs FS) Swaps() ([]*Swap, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) + if err != nil { + return nil, err + } + return parseSwaps(data) +} + +func parseSwaps(info []byte) ([]*Swap, error) { + swaps := []*Swap{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + scanner.Scan() // ignore header line + for scanner.Scan() { + swapString := scanner.Text() + parsedSwap, err := parseSwapString(swapString) + if err != nil { + return nil, err + } + swaps = append(swaps, parsedSwap) + } + + err := scanner.Err() + return swaps, err +} + +func parseSwapString(swapString string) (*Swap, error) { + var err error + + swapFields := strings.Fields(swapString) + swapLength := len(swapFields) + if swapLength < 5 { + return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + } + + swap := &Swap{ + Filename: swapFields[0], + Type: swapFields[1], + } + + swap.Size, err = strconv.Atoi(swapFields[2]) + if err != nil { + return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + } + swap.Used, err = strconv.Atoi(swapFields[3]) + if err != nil { + return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + } + swap.Priority, err = strconv.Atoi(swapFields[4]) + if err != nil { + return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + } + + return swap, nil +} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar index b0171a12b..19ef02b8d 100644 --- a/vendor/github.com/prometheus/procfs/ttar +++ b/vendor/github.com/prometheus/procfs/ttar @@ -86,8 +86,10 @@ Usage: $bname [-C ] -c -f (create archive) $bname [-C ] -x -f (extract archive) Options: - -C (change directory) - -v (verbose) + -C (change directory) + -v (verbose) + --recursive-unlink (recursively delete existing directory if path + collides with file or directory to extract) Example: Change to sysfs directory, create ttar file from fixtures directory $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ @@ -111,8 +113,9 @@ function set_cmd { } unset VERBOSE +unset RECURSIVE_UNLINK -while getopts :cf:htxvC: opt; do +while getopts :cf:-:htxvC: opt; do case $opt in c) set_cmd "create" @@ -136,6 +139,18 @@ while getopts :cf:htxvC: opt; do C) CDIR=$OPTARG ;; + -) + case $OPTARG in + recursive-unlink) + RECURSIVE_UNLINK="yes" + ;; + *) + echo -e "Error: invalid option -$OPTARG" + echo + usage 1 + ;; + esac + ;; *) echo >&2 "ERROR: invalid option -$OPTARG" echo @@ -212,16 +227,16 @@ function extract { local eof_without_newline if [ "$size" -gt 0 ]; then if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceeded by a backslash indicates that the line + # An EOF not preceded by a backslash indicates that the line # does not end with a newline eof_without_newline=1 else eof_without_newline=0 fi # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceeded by backslash + # Replace NULLBYTE with null byte unless preceded by backslash # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceeded by backslash + # Remove EOF unless preceded by backslash # Remove one backslash in front of EOF if [ $USE_PYTHON -eq 1 ]; then echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" @@ -245,7 +260,16 @@ function extract { fi if [[ $line =~ ^Path:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} - if [ -e "$path" ] || [ -L "$path" ]; then + if [ -L "$path" ]; then + rm "$path" + elif [ -d "$path" ]; then + if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then + rm -r "$path" + else + # Safe because symlinks to directories are dealt with above + rmdir "$path" + fi + elif [ -e "$path" ]; then rm "$path" fi elif [[ $line =~ ^Lines:\ (.*)$ ]]; then @@ -338,8 +362,8 @@ function _create { else < "$file" \ sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; ' fi if [[ "$eof_without_newline" -eq 1 ]]; then diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go new file mode 100644 index 000000000..cb1389141 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// The VM interface is described at +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// Each setting is exposed as a single file. +// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array +// and numa_zonelist_order (deprecated) which is a string +type VM struct { + AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes + BlockDump *int64 // /proc/sys/vm/block_dump + CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed + DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes + DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio + DirtyBytes *int64 // /proc/sys/vm/dirty_bytes + DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs + DirtyRatio *int64 // /proc/sys/vm/dirty_ratio + DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds + DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs + DropCaches *int64 // /proc/sys/vm/drop_caches + ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold + HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group + LaptopMode *int64 // /proc/sys/vm/laptop_mode + LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout + LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio + MaxMapCount *int64 // /proc/sys/vm/max_map_count + MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill + MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery + MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes + MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio + MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio + MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr + NrHugepages *int64 // /proc/sys/vm/nr_hugepages + NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy + NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages + NumaStat *int64 // /proc/sys/vm/numa_stat + NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order + OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks + OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task + OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes + OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory + OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio + PageCluster *int64 // /proc/sys/vm/page-cluster + PanicOnOom *int64 // /proc/sys/vm/panic_on_oom + PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction + StatInterval *int64 // /proc/sys/vm/stat_interval + Swappiness *int64 // /proc/sys/vm/swappiness + UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes + VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure + WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor + WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor + ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode +} + +// VM reads the VM statistics from the specified `proc` filesystem. +func (fs FS) VM() (*VM, error) { + path := fs.proc.Path("sys/vm") + file, err := os.Stat(path) + if err != nil { + return nil, err + } + if !file.Mode().IsDir() { + return nil, fmt.Errorf("%s is not a directory", path) + } + + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + var vm VM + for _, f := range files { + if f.IsDir() { + continue + } + + name := filepath.Join(path, f.Name()) + // ignore errors on read, as there are some write only + // in /proc/sys/vm + value, err := util.SysReadFile(name) + if err != nil { + continue + } + vp := util.NewValueParser(value) + + switch f.Name() { + case "admin_reserve_kbytes": + vm.AdminReserveKbytes = vp.PInt64() + case "block_dump": + vm.BlockDump = vp.PInt64() + case "compact_unevictable_allowed": + vm.CompactUnevictableAllowed = vp.PInt64() + case "dirty_background_bytes": + vm.DirtyBackgroundBytes = vp.PInt64() + case "dirty_background_ratio": + vm.DirtyBackgroundRatio = vp.PInt64() + case "dirty_bytes": + vm.DirtyBytes = vp.PInt64() + case "dirty_expire_centisecs": + vm.DirtyExpireCentisecs = vp.PInt64() + case "dirty_ratio": + vm.DirtyRatio = vp.PInt64() + case "dirtytime_expire_seconds": + vm.DirtytimeExpireSeconds = vp.PInt64() + case "dirty_writeback_centisecs": + vm.DirtyWritebackCentisecs = vp.PInt64() + case "drop_caches": + vm.DropCaches = vp.PInt64() + case "extfrag_threshold": + vm.ExtfragThreshold = vp.PInt64() + case "hugetlb_shm_group": + vm.HugetlbShmGroup = vp.PInt64() + case "laptop_mode": + vm.LaptopMode = vp.PInt64() + case "legacy_va_layout": + vm.LegacyVaLayout = vp.PInt64() + case "lowmem_reserve_ratio": + stringSlice := strings.Fields(value) + pint64Slice := make([]*int64, 0, len(stringSlice)) + for _, value := range stringSlice { + vp := util.NewValueParser(value) + pint64Slice = append(pint64Slice, vp.PInt64()) + } + vm.LowmemReserveRatio = pint64Slice + case "max_map_count": + vm.MaxMapCount = vp.PInt64() + case "memory_failure_early_kill": + vm.MemoryFailureEarlyKill = vp.PInt64() + case "memory_failure_recovery": + vm.MemoryFailureRecovery = vp.PInt64() + case "min_free_kbytes": + vm.MinFreeKbytes = vp.PInt64() + case "min_slab_ratio": + vm.MinSlabRatio = vp.PInt64() + case "min_unmapped_ratio": + vm.MinUnmappedRatio = vp.PInt64() + case "mmap_min_addr": + vm.MmapMinAddr = vp.PInt64() + case "nr_hugepages": + vm.NrHugepages = vp.PInt64() + case "nr_hugepages_mempolicy": + vm.NrHugepagesMempolicy = vp.PInt64() + case "nr_overcommit_hugepages": + vm.NrOvercommitHugepages = vp.PInt64() + case "numa_stat": + vm.NumaStat = vp.PInt64() + case "numa_zonelist_order": + vm.NumaZonelistOrder = value + case "oom_dump_tasks": + vm.OomDumpTasks = vp.PInt64() + case "oom_kill_allocating_task": + vm.OomKillAllocatingTask = vp.PInt64() + case "overcommit_kbytes": + vm.OvercommitKbytes = vp.PInt64() + case "overcommit_memory": + vm.OvercommitMemory = vp.PInt64() + case "overcommit_ratio": + vm.OvercommitRatio = vp.PInt64() + case "page-cluster": + vm.PageCluster = vp.PInt64() + case "panic_on_oom": + vm.PanicOnOom = vp.PInt64() + case "percpu_pagelist_fraction": + vm.PercpuPagelistFraction = vp.PInt64() + case "stat_interval": + vm.StatInterval = vp.PInt64() + case "swappiness": + vm.Swappiness = vp.PInt64() + case "user_reserve_kbytes": + vm.UserReserveKbytes = vp.PInt64() + case "vfs_cache_pressure": + vm.VfsCachePressure = vp.PInt64() + case "watermark_boost_factor": + vm.WatermarkBoostFactor = vp.PInt64() + case "watermark_scale_factor": + vm.WatermarkScaleFactor = vp.PInt64() + case "zone_reclaim_mode": + vm.ZoneReclaimMode = vp.PInt64() + } + if err := vp.Err(); err != nil { + return nil, err + } + } + + return &vm, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go index 8f1508f0f..eed07c7d7 100644 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -97,7 +97,7 @@ func NewXfrmStat() (XfrmStat, error) { // NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.Path("net/xfrm_stat")) + file, err := os.Open(fs.proc.Path("net/xfrm_stat")) if err != nil { return XfrmStat{}, err } @@ -112,8 +112,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) { fields := strings.Fields(s.Text()) if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf( - "couldn't parse %s line %s", file.Name(), s.Text()) + return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) } name := fields[0] diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go deleted file mode 100644 index 2bc0ef342..000000000 --- a/vendor/github.com/prometheus/procfs/xfs/parse.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package xfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseStats parses a Stats from an input io.Reader, using the format -// found in /proc/fs/xfs/stat. -func ParseStats(r io.Reader) (*Stats, error) { - const ( - // Fields parsed into stats structures. - fieldExtentAlloc = "extent_alloc" - fieldAbt = "abt" - fieldBlkMap = "blk_map" - fieldBmbt = "bmbt" - fieldDir = "dir" - fieldTrans = "trans" - fieldIg = "ig" - fieldLog = "log" - fieldRw = "rw" - fieldAttr = "attr" - fieldIcluster = "icluster" - fieldVnodes = "vnodes" - fieldBuf = "buf" - fieldXpc = "xpc" - - // Unimplemented at this time due to lack of documentation. - fieldPushAil = "push_ail" - fieldXstrat = "xstrat" - fieldAbtb2 = "abtb2" - fieldAbtc2 = "abtc2" - fieldBmbt2 = "bmbt2" - fieldIbt2 = "ibt2" - fieldFibt2 = "fibt2" - fieldQm = "qm" - fieldDebug = "debug" - ) - - var xfss Stats - - s := bufio.NewScanner(r) - for s.Scan() { - // Expect at least a string label and a single integer value, ex: - // - abt 0 - // - rw 1 2 - ss := strings.Fields(string(s.Bytes())) - if len(ss) < 2 { - continue - } - label := ss[0] - - // Extended precision counters are uint64 values. - if label == fieldXpc { - us, err := util.ParseUint64s(ss[1:]) - if err != nil { - return nil, err - } - - xfss.ExtendedPrecision, err = extendedPrecisionStats(us) - if err != nil { - return nil, err - } - - continue - } - - // All other counters are uint32 values. - us, err := util.ParseUint32s(ss[1:]) - if err != nil { - return nil, err - } - - switch label { - case fieldExtentAlloc: - xfss.ExtentAllocation, err = extentAllocationStats(us) - case fieldAbt: - xfss.AllocationBTree, err = btreeStats(us) - case fieldBlkMap: - xfss.BlockMapping, err = blockMappingStats(us) - case fieldBmbt: - xfss.BlockMapBTree, err = btreeStats(us) - case fieldDir: - xfss.DirectoryOperation, err = directoryOperationStats(us) - case fieldTrans: - xfss.Transaction, err = transactionStats(us) - case fieldIg: - xfss.InodeOperation, err = inodeOperationStats(us) - case fieldLog: - xfss.LogOperation, err = logOperationStats(us) - case fieldRw: - xfss.ReadWrite, err = readWriteStats(us) - case fieldAttr: - xfss.AttributeOperation, err = attributeOperationStats(us) - case fieldIcluster: - xfss.InodeClustering, err = inodeClusteringStats(us) - case fieldVnodes: - xfss.Vnode, err = vnodeStats(us) - case fieldBuf: - xfss.Buffer, err = bufferStats(us) - } - if err != nil { - return nil, err - } - } - - return &xfss, s.Err() -} - -// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. -func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { - if l := len(us); l != 4 { - return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) - } - - return ExtentAllocationStats{ - ExtentsAllocated: us[0], - BlocksAllocated: us[1], - ExtentsFreed: us[2], - BlocksFreed: us[3], - }, nil -} - -// btreeStats builds a BTreeStats from a slice of uint32s. -func btreeStats(us []uint32) (BTreeStats, error) { - if l := len(us); l != 4 { - return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) - } - - return BTreeStats{ - Lookups: us[0], - Compares: us[1], - RecordsInserted: us[2], - RecordsDeleted: us[3], - }, nil -} - -// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. -func blockMappingStats(us []uint32) (BlockMappingStats, error) { - if l := len(us); l != 7 { - return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) - } - - return BlockMappingStats{ - Reads: us[0], - Writes: us[1], - Unmaps: us[2], - ExtentListInsertions: us[3], - ExtentListDeletions: us[4], - ExtentListLookups: us[5], - ExtentListCompares: us[6], - }, nil -} - -// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. -func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { - if l := len(us); l != 4 { - return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) - } - - return DirectoryOperationStats{ - Lookups: us[0], - Creates: us[1], - Removes: us[2], - Getdents: us[3], - }, nil -} - -// TransactionStats builds a TransactionStats from a slice of uint32s. -func transactionStats(us []uint32) (TransactionStats, error) { - if l := len(us); l != 3 { - return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) - } - - return TransactionStats{ - Sync: us[0], - Async: us[1], - Empty: us[2], - }, nil -} - -// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. -func inodeOperationStats(us []uint32) (InodeOperationStats, error) { - if l := len(us); l != 7 { - return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) - } - - return InodeOperationStats{ - Attempts: us[0], - Found: us[1], - Recycle: us[2], - Missed: us[3], - Duplicate: us[4], - Reclaims: us[5], - AttributeChange: us[6], - }, nil -} - -// LogOperationStats builds a LogOperationStats from a slice of uint32s. -func logOperationStats(us []uint32) (LogOperationStats, error) { - if l := len(us); l != 5 { - return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) - } - - return LogOperationStats{ - Writes: us[0], - Blocks: us[1], - NoInternalBuffers: us[2], - Force: us[3], - ForceSleep: us[4], - }, nil -} - -// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. -func readWriteStats(us []uint32) (ReadWriteStats, error) { - if l := len(us); l != 2 { - return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) - } - - return ReadWriteStats{ - Read: us[0], - Write: us[1], - }, nil -} - -// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. -func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { - if l := len(us); l != 4 { - return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) - } - - return AttributeOperationStats{ - Get: us[0], - Set: us[1], - Remove: us[2], - List: us[3], - }, nil -} - -// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. -func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { - if l := len(us); l != 3 { - return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) - } - - return InodeClusteringStats{ - Iflush: us[0], - Flush: us[1], - FlushInode: us[2], - }, nil -} - -// VnodeStats builds a VnodeStats from a slice of uint32s. -func vnodeStats(us []uint32) (VnodeStats, error) { - // The attribute "Free" appears to not be available on older XFS - // stats versions. Therefore, 7 or 8 elements may appear in - // this slice. - l := len(us) - if l != 7 && l != 8 { - return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) - } - - s := VnodeStats{ - Active: us[0], - Allocate: us[1], - Get: us[2], - Hold: us[3], - Release: us[4], - Reclaim: us[5], - Remove: us[6], - } - - // Skip adding free, unless it is present. The zero value will - // be used in place of an actual count. - if l == 7 { - return s, nil - } - - s.Free = us[7] - return s, nil -} - -// BufferStats builds a BufferStats from a slice of uint32s. -func bufferStats(us []uint32) (BufferStats, error) { - if l := len(us); l != 9 { - return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) - } - - return BufferStats{ - Get: us[0], - Create: us[1], - GetLocked: us[2], - GetLockedWaited: us[3], - BusyLocked: us[4], - MissLocked: us[5], - PageRetries: us[6], - PageFound: us[7], - GetRead: us[8], - }, nil -} - -// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. -func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { - if l := len(us); l != 3 { - return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) - } - - return ExtendedPrecisionStats{ - FlushBytes: us[0], - WriteBytes: us[1], - ReadBytes: us[2], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go deleted file mode 100644 index d86794b7c..000000000 --- a/vendor/github.com/prometheus/procfs/xfs/xfs.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package xfs provides access to statistics exposed by the XFS filesystem. -package xfs - -// Stats contains XFS filesystem runtime statistics, parsed from -// /proc/fs/xfs/stat. -// -// The names and meanings of each statistic were taken from -// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux -// kernel source. Most counters are uint32s (same data types used in -// xfs_stats.h), but some of the "extended precision stats" are uint64s. -type Stats struct { - // The name of the filesystem used to source these statistics. - // If empty, this indicates aggregated statistics for all XFS - // filesystems on the host. - Name string - - ExtentAllocation ExtentAllocationStats - AllocationBTree BTreeStats - BlockMapping BlockMappingStats - BlockMapBTree BTreeStats - DirectoryOperation DirectoryOperationStats - Transaction TransactionStats - InodeOperation InodeOperationStats - LogOperation LogOperationStats - ReadWrite ReadWriteStats - AttributeOperation AttributeOperationStats - InodeClustering InodeClusteringStats - Vnode VnodeStats - Buffer BufferStats - ExtendedPrecision ExtendedPrecisionStats -} - -// ExtentAllocationStats contains statistics regarding XFS extent allocations. -type ExtentAllocationStats struct { - ExtentsAllocated uint32 - BlocksAllocated uint32 - ExtentsFreed uint32 - BlocksFreed uint32 -} - -// BTreeStats contains statistics regarding an XFS internal B-tree. -type BTreeStats struct { - Lookups uint32 - Compares uint32 - RecordsInserted uint32 - RecordsDeleted uint32 -} - -// BlockMappingStats contains statistics regarding XFS block maps. -type BlockMappingStats struct { - Reads uint32 - Writes uint32 - Unmaps uint32 - ExtentListInsertions uint32 - ExtentListDeletions uint32 - ExtentListLookups uint32 - ExtentListCompares uint32 -} - -// DirectoryOperationStats contains statistics regarding XFS directory entries. -type DirectoryOperationStats struct { - Lookups uint32 - Creates uint32 - Removes uint32 - Getdents uint32 -} - -// TransactionStats contains statistics regarding XFS metadata transactions. -type TransactionStats struct { - Sync uint32 - Async uint32 - Empty uint32 -} - -// InodeOperationStats contains statistics regarding XFS inode operations. -type InodeOperationStats struct { - Attempts uint32 - Found uint32 - Recycle uint32 - Missed uint32 - Duplicate uint32 - Reclaims uint32 - AttributeChange uint32 -} - -// LogOperationStats contains statistics regarding the XFS log buffer. -type LogOperationStats struct { - Writes uint32 - Blocks uint32 - NoInternalBuffers uint32 - Force uint32 - ForceSleep uint32 -} - -// ReadWriteStats contains statistics regarding the number of read and write -// system calls for XFS filesystems. -type ReadWriteStats struct { - Read uint32 - Write uint32 -} - -// AttributeOperationStats contains statistics regarding manipulation of -// XFS extended file attributes. -type AttributeOperationStats struct { - Get uint32 - Set uint32 - Remove uint32 - List uint32 -} - -// InodeClusteringStats contains statistics regarding XFS inode clustering -// operations. -type InodeClusteringStats struct { - Iflush uint32 - Flush uint32 - FlushInode uint32 -} - -// VnodeStats contains statistics regarding XFS vnode operations. -type VnodeStats struct { - Active uint32 - Allocate uint32 - Get uint32 - Hold uint32 - Release uint32 - Reclaim uint32 - Remove uint32 - Free uint32 -} - -// BufferStats contains statistics regarding XFS read/write I/O buffers. -type BufferStats struct { - Get uint32 - Create uint32 - GetLocked uint32 - GetLockedWaited uint32 - BusyLocked uint32 - MissLocked uint32 - PageRetries uint32 - PageFound uint32 - GetRead uint32 -} - -// ExtendedPrecisionStats contains high precision counters used to track the -// total number of bytes read, written, or flushed, during XFS operations. -type ExtendedPrecisionStats struct { - FlushBytes uint64 - WriteBytes uint64 - ReadBytes uint64 -} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go new file mode 100644 index 000000000..0b9bb6796 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "regexp" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Zoneinfo holds info parsed from /proc/zoneinfo. +type Zoneinfo struct { + Node string + Zone string + NrFreePages *int64 + Min *int64 + Low *int64 + High *int64 + Scanned *int64 + Spanned *int64 + Present *int64 + Managed *int64 + NrActiveAnon *int64 + NrInactiveAnon *int64 + NrIsolatedAnon *int64 + NrAnonPages *int64 + NrAnonTransparentHugepages *int64 + NrActiveFile *int64 + NrInactiveFile *int64 + NrIsolatedFile *int64 + NrFilePages *int64 + NrSlabReclaimable *int64 + NrSlabUnreclaimable *int64 + NrMlockStack *int64 + NrKernelStack *int64 + NrMapped *int64 + NrDirty *int64 + NrWriteback *int64 + NrUnevictable *int64 + NrShmem *int64 + NrDirtied *int64 + NrWritten *int64 + NumaHit *int64 + NumaMiss *int64 + NumaForeign *int64 + NumaInterleave *int64 + NumaLocal *int64 + NumaOther *int64 + Protection []*int64 +} + +var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) + +// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of +// structs containing the relevant info. More information available here: +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +func (fs FS) Zoneinfo() ([]Zoneinfo, error) { + data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) + if err != nil { + return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + } + zoneinfo, err := parseZoneinfo(data) + if err != nil { + return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + } + return zoneinfo, nil +} + +func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { + + zoneinfo := []Zoneinfo{} + + zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) + for _, block := range zoneinfoBlocks { + var zoneinfoElement Zoneinfo + lines := strings.Split(string(block), "\n") + for _, line := range lines { + + if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { + zoneinfoElement.Node = nodeZone[1] + zoneinfoElement.Zone = nodeZone[2] + continue + } + if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { + zoneinfoElement.Zone = "" + continue + } + parts := strings.Fields(strings.TrimSpace(line)) + if len(parts) < 2 { + continue + } + vp := util.NewValueParser(parts[1]) + switch parts[0] { + case "nr_free_pages": + zoneinfoElement.NrFreePages = vp.PInt64() + case "min": + zoneinfoElement.Min = vp.PInt64() + case "low": + zoneinfoElement.Low = vp.PInt64() + case "high": + zoneinfoElement.High = vp.PInt64() + case "scanned": + zoneinfoElement.Scanned = vp.PInt64() + case "spanned": + zoneinfoElement.Spanned = vp.PInt64() + case "present": + zoneinfoElement.Present = vp.PInt64() + case "managed": + zoneinfoElement.Managed = vp.PInt64() + case "nr_active_anon": + zoneinfoElement.NrActiveAnon = vp.PInt64() + case "nr_inactive_anon": + zoneinfoElement.NrInactiveAnon = vp.PInt64() + case "nr_isolated_anon": + zoneinfoElement.NrIsolatedAnon = vp.PInt64() + case "nr_anon_pages": + zoneinfoElement.NrAnonPages = vp.PInt64() + case "nr_anon_transparent_hugepages": + zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() + case "nr_active_file": + zoneinfoElement.NrActiveFile = vp.PInt64() + case "nr_inactive_file": + zoneinfoElement.NrInactiveFile = vp.PInt64() + case "nr_isolated_file": + zoneinfoElement.NrIsolatedFile = vp.PInt64() + case "nr_file_pages": + zoneinfoElement.NrFilePages = vp.PInt64() + case "nr_slab_reclaimable": + zoneinfoElement.NrSlabReclaimable = vp.PInt64() + case "nr_slab_unreclaimable": + zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() + case "nr_mlock_stack": + zoneinfoElement.NrMlockStack = vp.PInt64() + case "nr_kernel_stack": + zoneinfoElement.NrKernelStack = vp.PInt64() + case "nr_mapped": + zoneinfoElement.NrMapped = vp.PInt64() + case "nr_dirty": + zoneinfoElement.NrDirty = vp.PInt64() + case "nr_writeback": + zoneinfoElement.NrWriteback = vp.PInt64() + case "nr_unevictable": + zoneinfoElement.NrUnevictable = vp.PInt64() + case "nr_shmem": + zoneinfoElement.NrShmem = vp.PInt64() + case "nr_dirtied": + zoneinfoElement.NrDirtied = vp.PInt64() + case "nr_written": + zoneinfoElement.NrWritten = vp.PInt64() + case "numa_hit": + zoneinfoElement.NumaHit = vp.PInt64() + case "numa_miss": + zoneinfoElement.NumaMiss = vp.PInt64() + case "numa_foreign": + zoneinfoElement.NumaForeign = vp.PInt64() + case "numa_interleave": + zoneinfoElement.NumaInterleave = vp.PInt64() + case "numa_local": + zoneinfoElement.NumaLocal = vp.PInt64() + case "numa_other": + zoneinfoElement.NumaOther = vp.PInt64() + case "protection:": + protectionParts := strings.Split(line, ":") + protectionValues := strings.Replace(protectionParts[1], "(", "", 1) + protectionValues = strings.Replace(protectionValues, ")", "", 1) + protectionValues = strings.TrimSpace(protectionValues) + protectionStringMap := strings.Split(protectionValues, ", ") + val, err := util.ParsePInt64s(protectionStringMap) + if err == nil { + zoneinfoElement.Protection = val + } + } + + } + + zoneinfo = append(zoneinfo, zoneinfoElement) + } + return zoneinfo, nil +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/LICENSE b/vendor/github.com/quasilyte/go-ruleguard/LICENSE new file mode 100644 index 000000000..f0381fb49 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Iskander (Alex) Sharipov / quasilyte +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/compile.go b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/compile.go new file mode 100644 index 000000000..d6e1b1e65 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/compile.go @@ -0,0 +1,976 @@ +package gogrep + +import ( + "fmt" + "go/ast" + "go/token" +) + +type compileError string + +func (e compileError) Error() string { return string(e) } + +type compiler struct { + prog *program + stringIndexes map[string]uint8 + ifaceIndexes map[interface{}]uint8 + strict bool + fset *token.FileSet +} + +func (c *compiler) Compile(fset *token.FileSet, root ast.Node, strict bool) (p *program, err error) { + defer func() { + if err != nil { + return + } + rv := recover() + if rv == nil { + return + } + if parseErr, ok := rv.(compileError); ok { + err = parseErr + return + } + panic(rv) // Not our panic + }() + + c.fset = fset + c.strict = strict + c.prog = &program{ + insts: make([]instruction, 0, 8), + } + c.stringIndexes = make(map[string]uint8) + c.ifaceIndexes = make(map[interface{}]uint8) + + c.compileNode(root) + + if len(c.prog.insts) == 0 { + return nil, c.errorf(root, "0 instructions generated") + } + + return c.prog, nil +} + +func (c *compiler) errorf(n ast.Node, format string, args ...interface{}) compileError { + loc := c.fset.Position(n.Pos()) + message := fmt.Sprintf("%s:%d: %s", loc.Filename, loc.Line, fmt.Sprintf(format, args...)) + return compileError(message) +} + +func (c *compiler) toUint8(n ast.Node, v int) uint8 { + if !fitsUint8(v) { + panic(c.errorf(n, "implementation error: %v can't be converted to uint8", v)) + } + return uint8(v) +} + +func (c *compiler) internString(n ast.Node, s string) uint8 { + if index, ok := c.stringIndexes[s]; ok { + return index + } + index := len(c.prog.strings) + if !fitsUint8(index) { + panic(c.errorf(n, "implementation limitation: too many string values")) + } + c.stringIndexes[s] = uint8(index) + c.prog.strings = append(c.prog.strings, s) + return uint8(index) +} + +func (c *compiler) internIface(n ast.Node, v interface{}) uint8 { + if index, ok := c.ifaceIndexes[v]; ok { + return index + } + index := len(c.prog.ifaces) + if !fitsUint8(index) { + panic(c.errorf(n, "implementation limitation: too many values")) + } + c.ifaceIndexes[v] = uint8(index) + c.prog.ifaces = append(c.prog.ifaces, v) + return uint8(index) +} + +func (c *compiler) emitInst(inst instruction) { + c.prog.insts = append(c.prog.insts, inst) +} + +func (c *compiler) emitInstOp(op operation) { + c.emitInst(instruction{op: op}) +} + +func (c *compiler) compileNode(n ast.Node) { + switch n := n.(type) { + case *ast.File: + c.compileFile(n) + case ast.Decl: + c.compileDecl(n) + case ast.Expr: + c.compileExpr(n) + case ast.Stmt: + c.compileStmt(n) + case *ast.ValueSpec: + c.compileValueSpec(n) + case stmtSlice: + c.compileStmtSlice(n) + case exprSlice: + c.compileExprSlice(n) + default: + panic(c.errorf(n, "compileNode: unexpected %T", n)) + } +} + +func (c *compiler) compileOptStmt(n ast.Stmt) { + if exprStmt, ok := n.(*ast.ExprStmt); ok { + if ident, ok := exprStmt.X.(*ast.Ident); ok && isWildName(ident.Name) { + c.compileWildIdent(ident, true) + return + } + } + c.compileStmt(n) +} + +func (c *compiler) compileOptExpr(n ast.Expr) { + if ident, ok := n.(*ast.Ident); ok && isWildName(ident.Name) { + c.compileWildIdent(ident, true) + return + } + c.compileExpr(n) +} + +func (c *compiler) compileFieldList(n *ast.FieldList) { + c.emitInstOp(opFieldList) + for _, x := range n.List { + c.compileField(x) + } + c.emitInstOp(opEnd) +} + +func (c *compiler) compileField(n *ast.Field) { + switch { + case len(n.Names) == 0: + c.emitInstOp(opUnnamedField) + case len(n.Names) == 1: + name := n.Names[0] + if isWildName(name.Name) { + c.emitInstOp(opField) + c.compileWildIdent(name, false) + } else { + c.emitInst(instruction{ + op: opSimpleField, + valueIndex: c.internString(name, name.Name), + }) + } + default: + c.emitInstOp(opMultiField) + for _, name := range n.Names { + c.compileIdent(name) + } + c.emitInstOp(opEnd) + } + c.compileExpr(n.Type) +} + +func (c *compiler) compileValueSpec(spec *ast.ValueSpec) { + switch { + case spec.Type == nil: + c.emitInstOp(opValueInitSpec) + case len(spec.Values) == 0: + c.emitInstOp(opTypedValueSpec) + default: + c.emitInstOp(opTypedValueInitSpec) + } + for _, name := range spec.Names { + c.compileIdent(name) + } + c.emitInstOp(opEnd) + if spec.Type != nil { + c.compileExpr(spec.Type) + } + if len(spec.Values) != 0 { + for _, v := range spec.Values { + c.compileExpr(v) + } + c.emitInstOp(opEnd) + } +} + +func (c *compiler) compileTypeSpec(spec *ast.TypeSpec) { + c.emitInstOp(pickOp(spec.Assign.IsValid(), opTypeAliasSpec, opTypeSpec)) + c.compileIdent(spec.Name) + c.compileExpr(spec.Type) +} + +func (c *compiler) compileFile(n *ast.File) { + if len(n.Imports) == 0 && len(n.Decls) == 0 { + c.emitInstOp(opEmptyPackage) + c.compileIdent(n.Name) + return + } + + panic(c.errorf(n, "compileFile: unsupported file pattern")) +} + +func (c *compiler) compileDecl(n ast.Decl) { + switch n := n.(type) { + case *ast.FuncDecl: + c.compileFuncDecl(n) + case *ast.GenDecl: + c.compileGenDecl(n) + + default: + panic(c.errorf(n, "compileDecl: unexpected %T", n)) + } +} + +func (c *compiler) compileFuncDecl(n *ast.FuncDecl) { + if n.Recv == nil { + c.emitInstOp(pickOp(n.Body == nil, opFuncProtoDecl, opFuncDecl)) + } else { + c.emitInstOp(pickOp(n.Body == nil, opMethodProtoDecl, opMethodDecl)) + } + + if n.Recv != nil { + c.compileFieldList(n.Recv) + } + c.compileIdent(n.Name) + c.compileFuncType(n.Type) + if n.Body != nil { + c.compileBlockStmt(n.Body) + } +} + +func (c *compiler) compileGenDecl(n *ast.GenDecl) { + switch n.Tok { + case token.CONST, token.VAR: + c.emitInstOp(pickOp(n.Tok == token.CONST, opConstDecl, opVarDecl)) + for _, spec := range n.Specs { + c.compileValueSpec(spec.(*ast.ValueSpec)) + } + c.emitInstOp(opEnd) + case token.TYPE: + c.emitInstOp(opTypeDecl) + for _, spec := range n.Specs { + c.compileTypeSpec(spec.(*ast.TypeSpec)) + } + c.emitInstOp(opEnd) + + default: + panic(c.errorf(n, "unexpected gen decl")) + } +} + +func (c *compiler) compileExpr(n ast.Expr) { + switch n := n.(type) { + case *ast.BasicLit: + c.compileBasicLit(n) + case *ast.BinaryExpr: + c.compileBinaryExpr(n) + case *ast.IndexExpr: + c.compileIndexExpr(n) + case *ast.Ident: + c.compileIdent(n) + case *ast.CallExpr: + c.compileCallExpr(n) + case *ast.UnaryExpr: + c.compileUnaryExpr(n) + case *ast.StarExpr: + c.compileStarExpr(n) + case *ast.ParenExpr: + c.compileParenExpr(n) + case *ast.SliceExpr: + c.compileSliceExpr(n) + case *ast.FuncType: + c.compileFuncType(n) + case *ast.ArrayType: + c.compileArrayType(n) + case *ast.MapType: + c.compileMapType(n) + case *ast.ChanType: + c.compileChanType(n) + case *ast.CompositeLit: + c.compileCompositeLit(n) + case *ast.FuncLit: + c.compileFuncLit(n) + case *ast.Ellipsis: + c.compileEllipsis(n) + case *ast.KeyValueExpr: + c.compileKeyValueExpr(n) + case *ast.SelectorExpr: + c.compileSelectorExpr(n) + case *ast.TypeAssertExpr: + c.compileTypeAssertExpr(n) + + default: + panic(c.errorf(n, "compileExpr: unexpected %T", n)) + } +} + +func (c *compiler) compileBasicLit(n *ast.BasicLit) { + if !c.strict { + v := literalValue(n) + if v == nil { + panic(c.errorf(n, "can't convert %s (%s) value", n.Value, n.Kind)) + } + c.prog.insts = append(c.prog.insts, instruction{ + op: opBasicLit, + valueIndex: c.internIface(n, v), + }) + return + } + + var inst instruction + switch n.Kind { + case token.INT: + inst.op = opStrictIntLit + case token.FLOAT: + inst.op = opStrictFloatLit + case token.STRING: + inst.op = opStrictStringLit + case token.CHAR: + inst.op = opStrictCharLit + default: + inst.op = opStrictComplexLit + } + inst.valueIndex = c.internString(n, n.Value) + c.prog.insts = append(c.prog.insts, inst) +} + +func (c *compiler) compileBinaryExpr(n *ast.BinaryExpr) { + c.prog.insts = append(c.prog.insts, instruction{ + op: opBinaryExpr, + value: c.toUint8(n, int(n.Op)), + }) + c.compileExpr(n.X) + c.compileExpr(n.Y) +} + +func (c *compiler) compileIndexExpr(n *ast.IndexExpr) { + c.emitInstOp(opIndexExpr) + c.compileExpr(n.X) + c.compileExpr(n.Index) +} + +func (c *compiler) compileWildIdent(n *ast.Ident, optional bool) { + info := decodeWildName(n.Name) + var inst instruction + switch { + case info.Name == "_" && !info.Seq: + inst.op = opNode + case info.Name == "_" && info.Seq: + inst.op = pickOp(optional, opOptNode, opNodeSeq) + case info.Name != "_" && !info.Seq: + inst.op = opNamedNode + inst.valueIndex = c.internString(n, info.Name) + default: + inst.op = pickOp(optional, opNamedOptNode, opNamedNodeSeq) + inst.valueIndex = c.internString(n, info.Name) + } + c.prog.insts = append(c.prog.insts, inst) +} + +func (c *compiler) compileIdent(n *ast.Ident) { + if isWildName(n.Name) { + c.compileWildIdent(n, false) + return + } + + c.prog.insts = append(c.prog.insts, instruction{ + op: opIdent, + valueIndex: c.internString(n, n.Name), + }) +} + +func (c *compiler) compileCallExpr(n *ast.CallExpr) { + op := opCallExpr + if n.Ellipsis.IsValid() { + op = opVariadicCallExpr + } + c.emitInstOp(op) + c.compileExpr(n.Fun) + for _, arg := range n.Args { + c.compileExpr(arg) + } + c.emitInstOp(opEnd) +} + +func (c *compiler) compileUnaryExpr(n *ast.UnaryExpr) { + c.prog.insts = append(c.prog.insts, instruction{ + op: opUnaryExpr, + value: c.toUint8(n, int(n.Op)), + }) + c.compileExpr(n.X) +} + +func (c *compiler) compileStarExpr(n *ast.StarExpr) { + c.emitInstOp(opStarExpr) + c.compileExpr(n.X) +} + +func (c *compiler) compileParenExpr(n *ast.ParenExpr) { + c.emitInstOp(opParenExpr) + c.compileExpr(n.X) +} + +func (c *compiler) compileSliceExpr(n *ast.SliceExpr) { + switch { + case n.Low == nil && n.High == nil && !n.Slice3: + c.emitInstOp(opSliceExpr) + c.compileExpr(n.X) + case n.Low != nil && n.High == nil && !n.Slice3: + c.emitInstOp(opSliceFromExpr) + c.compileExpr(n.X) + c.compileExpr(n.Low) + case n.Low == nil && n.High != nil && !n.Slice3: + c.emitInstOp(opSliceToExpr) + c.compileExpr(n.X) + c.compileExpr(n.High) + case n.Low != nil && n.High != nil && !n.Slice3: + c.emitInstOp(opSliceFromToExpr) + c.compileExpr(n.X) + c.compileExpr(n.Low) + c.compileExpr(n.High) + case n.Low == nil && n.Slice3: + c.emitInstOp(opSliceToCapExpr) + c.compileExpr(n.X) + c.compileExpr(n.High) + c.compileExpr(n.Max) + case n.Low != nil && n.Slice3: + c.emitInstOp(opSliceFromToCapExpr) + c.compileExpr(n.X) + c.compileExpr(n.Low) + c.compileExpr(n.High) + c.compileExpr(n.Max) + default: + panic(c.errorf(n, "unexpected slice expr")) + } +} + +func (c *compiler) compileFuncType(n *ast.FuncType) { + void := n.Results == nil || len(n.Results.List) == 0 + if void { + c.emitInstOp(opVoidFuncType) + } else { + c.emitInstOp(opFuncType) + } + c.compileFieldList(n.Params) + if !void { + c.compileFieldList(n.Results) + } +} + +func (c *compiler) compileArrayType(n *ast.ArrayType) { + if n.Len == nil { + c.emitInstOp(opSliceType) + c.compileExpr(n.Elt) + } else { + c.emitInstOp(opArrayType) + c.compileExpr(n.Len) + c.compileExpr(n.Elt) + } +} + +func (c *compiler) compileMapType(n *ast.MapType) { + c.emitInstOp(opMapType) + c.compileExpr(n.Key) + c.compileExpr(n.Value) +} + +func (c *compiler) compileChanType(n *ast.ChanType) { + c.emitInst(instruction{ + op: opChanType, + value: c.toUint8(n, int(n.Dir)), + }) + c.compileExpr(n.Value) +} + +func (c *compiler) compileCompositeLit(n *ast.CompositeLit) { + if n.Type == nil { + c.emitInstOp(opCompositeLit) + } else { + c.emitInstOp(opTypedCompositeLit) + c.compileExpr(n.Type) + } + for _, elt := range n.Elts { + c.compileExpr(elt) + } + c.emitInstOp(opEnd) +} + +func (c *compiler) compileFuncLit(n *ast.FuncLit) { + c.emitInstOp(opFuncLit) + c.compileFuncType(n.Type) + c.compileBlockStmt(n.Body) +} + +func (c *compiler) compileEllipsis(n *ast.Ellipsis) { + if n.Elt == nil { + c.emitInstOp(opEllipsis) + } else { + c.emitInstOp(opTypedEllipsis) + c.compileExpr(n.Elt) + } +} + +func (c *compiler) compileKeyValueExpr(n *ast.KeyValueExpr) { + c.emitInstOp(opKeyValueExpr) + c.compileExpr(n.Key) + c.compileExpr(n.Value) +} + +func (c *compiler) compileSelectorExpr(n *ast.SelectorExpr) { + if isWildName(n.Sel.Name) { + c.emitInstOp(opSelectorExpr) + c.compileWildIdent(n.Sel, false) + c.compileExpr(n.X) + return + } + + c.prog.insts = append(c.prog.insts, instruction{ + op: opSimpleSelectorExpr, + valueIndex: c.internString(n.Sel, n.Sel.String()), + }) + c.compileExpr(n.X) +} + +func (c *compiler) compileTypeAssertExpr(n *ast.TypeAssertExpr) { + if n.Type != nil { + c.emitInstOp(opTypeAssertExpr) + c.compileExpr(n.X) + c.compileExpr(n.Type) + } else { + c.emitInstOp(opTypeSwitchAssertExpr) + c.compileExpr(n.X) + } +} + +func (c *compiler) compileStmt(n ast.Stmt) { + switch n := n.(type) { + case *ast.AssignStmt: + c.compileAssignStmt(n) + case *ast.BlockStmt: + c.compileBlockStmt(n) + case *ast.ExprStmt: + c.compileExprStmt(n) + case *ast.IfStmt: + c.compileIfStmt(n) + case *ast.CaseClause: + c.compileCaseClause(n) + case *ast.SwitchStmt: + c.compileSwitchStmt(n) + case *ast.TypeSwitchStmt: + c.compileTypeSwitchStmt(n) + case *ast.SelectStmt: + c.compileSelectStmt(n) + case *ast.ForStmt: + c.compileForStmt(n) + case *ast.RangeStmt: + c.compileRangeStmt(n) + case *ast.IncDecStmt: + c.compileIncDecStmt(n) + case *ast.EmptyStmt: + c.compileEmptyStmt(n) + case *ast.ReturnStmt: + c.compileReturnStmt(n) + case *ast.BranchStmt: + c.compileBranchStmt(n) + case *ast.LabeledStmt: + c.compileLabeledStmt(n) + case *ast.GoStmt: + c.compileGoStmt(n) + case *ast.DeferStmt: + c.compileDeferStmt(n) + case *ast.SendStmt: + c.compileSendStmt(n) + case *ast.DeclStmt: + c.compileDecl(n.Decl) + + default: + panic(c.errorf(n, "compileStmt: unexpected %T", n)) + } +} + +func (c *compiler) compileAssignStmt(n *ast.AssignStmt) { + if len(n.Lhs) == 1 && len(n.Rhs) == 1 { + lhsInfo := decodeWildNode(n.Lhs[0]) + rhsInfo := decodeWildNode(n.Rhs[0]) + if !lhsInfo.Seq && !rhsInfo.Seq { + c.emitInst(instruction{ + op: opAssignStmt, + value: uint8(n.Tok), + }) + c.compileExpr(n.Lhs[0]) + c.compileExpr(n.Rhs[0]) + return + } + } + + c.emitInst(instruction{ + op: opMultiAssignStmt, + value: uint8(n.Tok), + }) + for _, x := range n.Lhs { + c.compileExpr(x) + } + c.emitInstOp(opEnd) + for _, x := range n.Rhs { + c.compileExpr(x) + } + c.emitInstOp(opEnd) +} + +func (c *compiler) compileBlockStmt(n *ast.BlockStmt) { + c.emitInstOp(opBlockStmt) + for _, elt := range n.List { + c.compileStmt(elt) + } + c.emitInstOp(opEnd) +} + +func (c *compiler) compileExprStmt(n *ast.ExprStmt) { + if ident, ok := n.X.(*ast.Ident); ok && isWildName(ident.Name) { + c.compileIdent(ident) + } else { + c.emitInstOp(opExprStmt) + c.compileExpr(n.X) + } +} + +func (c *compiler) compileIfStmt(n *ast.IfStmt) { + // Check for the special case: `if $*_ ...` should match all if statements. + if ident, ok := n.Cond.(*ast.Ident); ok && n.Init == nil && isWildName(ident.Name) { + info := decodeWildName(ident.Name) + if info.Seq && info.Name == "_" { + // Set Init to Cond, change cond from $*_ to $_. + n.Init = &ast.ExprStmt{X: n.Cond} + cond := &ast.Ident{Name: encodeWildName(info.Name, false)} + n.Cond = cond + c.compileIfStmt(n) + return + } + // Named $* is harder and slower. + c.prog.insts = append(c.prog.insts, instruction{ + op: pickOp(n.Else == nil, opIfNamedOptStmt, opIfNamedOptElseStmt), + valueIndex: c.internString(ident, info.Name), + }) + c.compileStmt(n.Body) + if n.Else != nil { + c.compileStmt(n.Else) + } + return + } + + switch { + case n.Init == nil && n.Else == nil: + c.emitInstOp(opIfStmt) + c.compileExpr(n.Cond) + c.compileStmt(n.Body) + case n.Init != nil && n.Else == nil: + c.emitInstOp(opIfInitStmt) + c.compileOptStmt(n.Init) + c.compileExpr(n.Cond) + c.compileStmt(n.Body) + case n.Init == nil && n.Else != nil: + c.emitInstOp(opIfElseStmt) + c.compileExpr(n.Cond) + c.compileStmt(n.Body) + c.compileStmt(n.Else) + case n.Init != nil && n.Else != nil: + c.emitInstOp(opIfInitElseStmt) + c.compileOptStmt(n.Init) + c.compileExpr(n.Cond) + c.compileStmt(n.Body) + c.compileStmt(n.Else) + + default: + panic(c.errorf(n, "unexpected if stmt")) + } +} + +func (c *compiler) compileCommClause(n *ast.CommClause) { + c.emitInstOp(pickOp(n.Comm == nil, opDefaultCommClause, opCommClause)) + if n.Comm != nil { + c.compileStmt(n.Comm) + } + for _, x := range n.Body { + c.compileStmt(x) + } + c.emitInstOp(opEnd) +} + +func (c *compiler) compileCaseClause(n *ast.CaseClause) { + c.emitInstOp(pickOp(n.List == nil, opDefaultCaseClause, opCaseClause)) + if n.List != nil { + for _, x := range n.List { + c.compileExpr(x) + } + c.emitInstOp(opEnd) + } + for _, x := range n.Body { + c.compileStmt(x) + } + c.emitInstOp(opEnd) +} + +func (c *compiler) compileSwitchBody(n *ast.BlockStmt) { + wildcardCase := func(cc *ast.CaseClause) *ast.Ident { + if len(cc.List) != 1 || len(cc.Body) != 1 { + return nil + } + v, ok := cc.List[0].(*ast.Ident) + if !ok || !isWildName(v.Name) { + return nil + } + bodyStmt, ok := cc.Body[0].(*ast.ExprStmt) + if !ok { + return nil + } + bodyIdent, ok := bodyStmt.X.(*ast.Ident) + if !ok || bodyIdent.Name != "gogrep_body" { + return nil + } + return v + } + for _, cc := range n.List { + cc := cc.(*ast.CaseClause) + wildcard := wildcardCase(cc) + if wildcard == nil { + c.compileCaseClause(cc) + continue + } + c.compileWildIdent(wildcard, false) + } + c.emitInstOp(opEnd) +} + +func (c *compiler) compileSwitchStmt(n *ast.SwitchStmt) { + var op operation + switch { + case n.Init == nil && n.Tag == nil: + op = opSwitchStmt + case n.Init == nil && n.Tag != nil: + op = opSwitchTagStmt + case n.Init != nil && n.Tag == nil: + op = opSwitchInitStmt + default: + op = opSwitchInitTagStmt + } + + c.emitInstOp(op) + if n.Init != nil { + c.compileOptStmt(n.Init) + } + if n.Tag != nil { + c.compileOptExpr(n.Tag) + } + c.compileSwitchBody(n.Body) +} + +func (c *compiler) compileTypeSwitchStmt(n *ast.TypeSwitchStmt) { + c.emitInstOp(pickOp(n.Init == nil, opTypeSwitchStmt, opTypeSwitchInitStmt)) + if n.Init != nil { + c.compileOptStmt(n.Init) + } + c.compileStmt(n.Assign) + c.compileSwitchBody(n.Body) +} + +func (c *compiler) compileSelectStmt(n *ast.SelectStmt) { + c.emitInstOp(opSelectStmt) + + wildcardCase := func(cc *ast.CommClause) *ast.Ident { + if cc.Comm == nil { + return nil + } + vStmt, ok := cc.Comm.(*ast.ExprStmt) + if !ok { + return nil + } + v, ok := vStmt.X.(*ast.Ident) + if !ok || !isWildName(v.Name) { + return nil + } + bodyStmt, ok := cc.Body[0].(*ast.ExprStmt) + if !ok { + return nil + } + bodyIdent, ok := bodyStmt.X.(*ast.Ident) + if !ok || bodyIdent.Name != "gogrep_body" { + return nil + } + return v + } + for _, cc := range n.Body.List { + cc := cc.(*ast.CommClause) + wildcard := wildcardCase(cc) + if wildcard == nil { + c.compileCommClause(cc) + continue + } + c.compileWildIdent(wildcard, false) + } + c.emitInstOp(opEnd) +} + +func (c *compiler) compileForStmt(n *ast.ForStmt) { + var op operation + switch { + case n.Init == nil && n.Cond == nil && n.Post == nil: + op = opForStmt + case n.Init == nil && n.Cond == nil && n.Post != nil: + op = opForPostStmt + case n.Init == nil && n.Cond != nil && n.Post == nil: + op = opForCondStmt + case n.Init == nil && n.Cond != nil && n.Post != nil: + op = opForCondPostStmt + case n.Init != nil && n.Cond == nil && n.Post == nil: + op = opForInitStmt + case n.Init != nil && n.Cond == nil && n.Post != nil: + op = opForInitPostStmt + case n.Init != nil && n.Cond != nil && n.Post == nil: + op = opForInitCondStmt + default: + op = opForInitCondPostStmt + } + + c.emitInstOp(op) + if n.Init != nil { + c.compileOptStmt(n.Init) + } + if n.Cond != nil { + c.compileOptExpr(n.Cond) + } + if n.Post != nil { + c.compileOptStmt(n.Post) + } + c.compileBlockStmt(n.Body) +} + +func (c *compiler) compileRangeStmt(n *ast.RangeStmt) { + switch { + case n.Key == nil && n.Value == nil: + c.emitInstOp(opRangeStmt) + c.compileExpr(n.X) + c.compileStmt(n.Body) + case n.Key != nil && n.Value == nil: + c.emitInst(instruction{ + op: opRangeKeyStmt, + value: c.toUint8(n, int(n.Tok)), + }) + c.compileExpr(n.Key) + c.compileExpr(n.X) + c.compileStmt(n.Body) + case n.Key != nil && n.Value != nil: + c.emitInst(instruction{ + op: opRangeKeyValueStmt, + value: c.toUint8(n, int(n.Tok)), + }) + c.compileExpr(n.Key) + c.compileExpr(n.Value) + c.compileExpr(n.X) + c.compileStmt(n.Body) + default: + panic(c.errorf(n, "unexpected range stmt")) + } +} + +func (c *compiler) compileIncDecStmt(n *ast.IncDecStmt) { + c.prog.insts = append(c.prog.insts, instruction{ + op: opIncDecStmt, + value: c.toUint8(n, int(n.Tok)), + }) + c.compileExpr(n.X) +} + +func (c *compiler) compileEmptyStmt(n *ast.EmptyStmt) { + _ = n // unused + c.emitInstOp(opEmptyStmt) +} + +func (c *compiler) compileReturnStmt(n *ast.ReturnStmt) { + c.emitInstOp(opReturnStmt) + for _, x := range n.Results { + c.compileExpr(x) + } + c.emitInstOp(opEnd) +} + +func (c *compiler) compileBranchStmt(n *ast.BranchStmt) { + if n.Label != nil { + if isWildName(n.Label.Name) { + c.prog.insts = append(c.prog.insts, instruction{ + op: opLabeledBranchStmt, + value: c.toUint8(n, int(n.Tok)), + }) + c.compileWildIdent(n.Label, false) + } else { + c.prog.insts = append(c.prog.insts, instruction{ + op: opSimpleLabeledBranchStmt, + value: c.toUint8(n, int(n.Tok)), + valueIndex: c.internString(n.Label, n.Label.Name), + }) + } + return + } + c.prog.insts = append(c.prog.insts, instruction{ + op: opBranchStmt, + value: c.toUint8(n, int(n.Tok)), + }) +} + +func (c *compiler) compileLabeledStmt(n *ast.LabeledStmt) { + if isWildName(n.Label.Name) { + c.emitInstOp(opLabeledStmt) + c.compileWildIdent(n.Label, false) + c.compileStmt(n.Stmt) + return + } + + c.prog.insts = append(c.prog.insts, instruction{ + op: opSimpleLabeledStmt, + valueIndex: c.internString(n.Label, n.Label.Name), + }) + c.compileStmt(n.Stmt) +} + +func (c *compiler) compileGoStmt(n *ast.GoStmt) { + c.emitInstOp(opGoStmt) + c.compileExpr(n.Call) +} + +func (c *compiler) compileDeferStmt(n *ast.DeferStmt) { + c.emitInstOp(opDeferStmt) + c.compileExpr(n.Call) +} + +func (c *compiler) compileSendStmt(n *ast.SendStmt) { + c.emitInstOp(opSendStmt) + c.compileExpr(n.Chan) + c.compileExpr(n.Value) +} + +func (c *compiler) compileStmtSlice(stmts stmtSlice) { + c.emitInstOp(opMultiStmt) + for _, n := range stmts { + c.compileStmt(n) + } + c.emitInstOp(opEnd) +} + +func (c *compiler) compileExprSlice(exprs exprSlice) { + c.emitInstOp(opMultiExpr) + for _, n := range exprs { + c.compileExpr(n) + } + c.emitInstOp(opEnd) +} + +func pickOp(cond bool, ifTrue, ifFalse operation) operation { + if cond { + return ifTrue + } + return ifFalse +} + +func fitsUint8(v int) bool { + return v >= 0 && v <= 0xff +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/gen_operations.go b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/gen_operations.go new file mode 100644 index 000000000..dbf2ae9a7 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/gen_operations.go @@ -0,0 +1,311 @@ +// +build main + +package main + +import ( + "bytes" + "fmt" + "go/format" + "io/ioutil" + "log" + "strings" + "text/template" +) + +var opPrototypes = []operationProto{ + {name: "Node", tag: "Node"}, + {name: "NamedNode", tag: "Node", valueIndex: "strings | wildcard name"}, + {name: "NodeSeq"}, + {name: "NamedNodeSeq", valueIndex: "strings | wildcard name"}, + {name: "OptNode"}, + {name: "NamedOptNode", valueIndex: "strings | wildcard name"}, + + {name: "MultiStmt", tag: "StmtList", args: "stmts...", example: "f(); g()"}, + {name: "MultiExpr", tag: "ExprList", args: "exprs...", example: "f(), g()"}, + + {name: "End"}, + + {name: "BasicLit", tag: "BasicLit", valueIndex: "ifaces | parsed literal value"}, + {name: "StrictIntLit", tag: "BasicLit", valueIndex: "strings | raw literal value"}, + {name: "StrictFloatLit", tag: "BasicLit", valueIndex: "strings | raw literal value"}, + {name: "StrictCharLit", tag: "BasicLit", valueIndex: "strings | raw literal value"}, + {name: "StrictStringLit", tag: "BasicLit", valueIndex: "strings | raw literal value"}, + {name: "StrictComplexLit", tag: "BasicLit", valueIndex: "strings | raw literal value"}, + + {name: "Ident", tag: "Ident", valueIndex: "strings | ident name"}, + + {name: "IndexExpr", tag: "IndexExpr", args: "x expr"}, + + {name: "SliceExpr", tag: "SliceExpr", args: "x"}, + {name: "SliceFromExpr", tag: "SliceExpr", args: "x from", example: "x[from:]"}, + {name: "SliceToExpr", tag: "SliceExpr", args: "x to", example: "x[:to]"}, + {name: "SliceFromToExpr", tag: "SliceExpr", args: "x from to", example: "x[from:to]"}, + {name: "SliceToCapExpr", tag: "SliceExpr", args: "x from cap", example: "x[:from:cap]"}, + {name: "SliceFromToCapExpr", tag: "SliceExpr", args: "x from to cap", example: "x[from:to:cap]"}, + + {name: "FuncLit", tag: "FuncLit", args: "type block"}, + + {name: "CompositeLit", tag: "CompositeLit", args: "elts...", example: "{elts...}"}, + {name: "TypedCompositeLit", tag: "CompositeLit", args: "typ elts...", example: "typ{elts...}"}, + + {name: "SimpleSelectorExpr", tag: "SelectorExpr", args: "x", valueIndex: "strings | selector name"}, + {name: "SelectorExpr", tag: "SelectorExpr", args: "x sel"}, + {name: "TypeAssertExpr", tag: "TypeAssertExpr", args: "x typ"}, + {name: "TypeSwitchAssertExpr", tag: "TypeAssertExpr", args: "x"}, + + {name: "VoidFuncType", tag: "FuncType", args: "params"}, + {name: "FuncType", tag: "FuncType", args: "params results"}, + {name: "ArrayType", tag: "ArrayType", args: "length elem"}, + {name: "SliceType", tag: "ArrayType", args: "elem"}, + {name: "MapType", tag: "MapType", args: "key value"}, + {name: "ChanType", tag: "ChanType", args: "value", value: "ast.ChanDir | channel direction"}, + {name: "KeyValueExpr", tag: "KeyValueExpr", args: "key value"}, + + {name: "Ellipsis", tag: "Ellipsis"}, + {name: "TypedEllipsis", tag: "Ellipsis", args: "type"}, + + {name: "StarExpr", tag: "StarExpr", args: "x"}, + {name: "UnaryExpr", tag: "UnaryExpr", args: "x", value: "token.Token | unary operator"}, + {name: "BinaryExpr", tag: "BinaryExpr", args: "x y", value: "token.Token | binary operator"}, + {name: "ParenExpr", tag: "ParenExpr", args: "x"}, + + {name: "VariadicCallExpr", tag: "CallExpr", args: "fn args...", example: "f(1, xs...)"}, + {name: "CallExpr", tag: "CallExpr", args: "fn args...", example: "f(1, xs)"}, + + {name: "AssignStmt", tag: "AssignStmt", args: "lhs rhs", value: "token.Token | ':=' or '='", example: "lhs := rhs()"}, + {name: "MultiAssignStmt", tag: "AssignStmt", args: "lhs... rhs...", value: "token.Token | ':=' or '='", example: "lhs1, lhs2 := rhs()"}, + + {name: "BranchStmt", tag: "BranchStmt", args: "x", value: "token.Token | branch kind"}, + {name: "SimpleLabeledBranchStmt", tag: "BranchStmt", args: "x", valueIndex: "strings | label name", value: "token.Token | branch kind"}, + {name: "LabeledBranchStmt", tag: "BranchStmt", args: "label x", value: "token.Token | branch kind"}, + {name: "SimpleLabeledStmt", tag: "LabeledStmt", args: "x", valueIndex: "strings | label name"}, + {name: "LabeledStmt", tag: "LabeledStmt", args: "label x"}, + + {name: "BlockStmt", tag: "BlockStmt", args: "body..."}, + {name: "ExprStmt", tag: "ExprStmt", args: "x"}, + + {name: "GoStmt", tag: "GoStmt", args: "x"}, + {name: "DeferStmt", tag: "DeferStmt", args: "x"}, + + {name: "SendStmt", tag: "SendStmt", args: "ch value"}, + + {name: "EmptyStmt", tag: "EmptyStmt"}, + {name: "IncDecStmt", tag: "IncDecStmt", args: "x", value: "token.Token | '++' or '--'"}, + {name: "ReturnStmt", tag: "ReturnStmt", args: "results..."}, + + {name: "IfStmt", tag: "IfStmt", args: "cond block", example: "if cond {}"}, + {name: "IfInitStmt", tag: "IfStmt", args: "init cond block", example: "if init; cond {}"}, + {name: "IfElseStmt", tag: "IfStmt", args: "cond block else", example: "if cond {} else ..."}, + {name: "IfInitElseStmt", tag: "IfStmt", args: "init cond block else", example: "if init; cond {} else ..."}, + {name: "IfNamedOptStmt", tag: "IfStmt", args: "block", valueIndex: "strings | wildcard name", example: "if $*x {}"}, + {name: "IfNamedOptElseStmt", tag: "IfStmt", args: "block else", valueIndex: "strings | wildcard name", example: "if $*x {} else ..."}, + + {name: "SwitchStmt", tag: "SwitchStmt", args: "body...", example: "switch {}"}, + {name: "SwitchTagStmt", tag: "SwitchStmt", args: "tag body...", example: "switch tag {}"}, + {name: "SwitchInitStmt", tag: "SwitchStmt", args: "init body...", example: "switch init; {}"}, + {name: "SwitchInitTagStmt", tag: "SwitchStmt", args: "init tag body...", example: "switch init; tag {}"}, + + {name: "SelectStmt", tag: "SelectStmt", args: "body..."}, + + {name: "TypeSwitchStmt", tag: "TypeSwitchStmt", args: "x block", example: "switch x.(type) {}"}, + {name: "TypeSwitchInitStmt", tag: "TypeSwitchStmt", args: "init x block", example: "switch init; x.(type) {}"}, + + {name: "CaseClause", tag: "CaseClause", args: "values... body..."}, + {name: "DefaultCaseClause", tag: "CaseClause", args: "body..."}, + + {name: "CommClause", tag: "CommClause", args: "comm body..."}, + {name: "DefaultCommClause", tag: "CommClause", args: "body..."}, + + {name: "ForStmt", tag: "ForStmt", args: "blocl", example: "for {}"}, + {name: "ForPostStmt", tag: "ForStmt", args: "post block", example: "for ; ; post {}"}, + {name: "ForCondStmt", tag: "ForStmt", args: "cond block", example: "for ; cond; {}"}, + {name: "ForCondPostStmt", tag: "ForStmt", args: "cond post block", example: "for ; cond; post {}"}, + {name: "ForInitStmt", tag: "ForStmt", args: "init block", example: "for init; ; {}"}, + {name: "ForInitPostStmt", tag: "ForStmt", args: "init post block", example: "for init; ; post {}"}, + {name: "ForInitCondStmt", tag: "ForStmt", args: "init cond block", example: "for init; cond; {}"}, + {name: "ForInitCondPostStmt", tag: "ForStmt", args: "init cond post block", example: "for init; cond; post {}"}, + + {name: "RangeStmt", tag: "RangeStmt", args: "x block", example: "for range x {}"}, + {name: "RangeKeyStmt", tag: "RangeStmt", args: "key x block", value: "token.Token | ':=' or '='", example: "for key := range x {}"}, + {name: "RangeKeyValueStmt", tag: "RangeStmt", args: "key value x block", value: "token.Token | ':=' or '='", example: "for key, value := range x {}"}, + + {name: "FieldList", args: "fields..."}, + {name: "UnnamedField", args: "typ", example: "type"}, + {name: "SimpleField", args: "typ", valueIndex: "strings | field name", example: "name type"}, + {name: "Field", args: "name typ", example: "$name type"}, + {name: "MultiField", args: "names... typ", example: "name1, name2 type"}, + + {name: "ValueInitSpec", tag: "ValueSpec", args: "lhs... rhs...", example: "lhs = rhs"}, + {name: "TypedValueInitSpec", tag: "ValueSpec", args: "lhs... type rhs...", example: "lhs typ = rhs"}, + {name: "TypedValueSpec", tag: "ValueSpec", args: "lhs... type", example: "lhs typ"}, + + {name: "TypeSpec", tag: "TypeSpec", args: "name type", example: "name type"}, + {name: "TypeAliasSpec", tag: "TypeSpec", args: "name type", example: "name = type"}, + + {name: "FuncDecl", tag: "FuncDecl", args: "name type block"}, + {name: "MethodDecl", tag: "FuncDecl", args: "recv name type block"}, + {name: "FuncProtoDecl", tag: "FuncDecl", args: "name type"}, + {name: "MethodProtoDecl", tag: "FuncDecl", args: "recv name type"}, + + {name: "ConstDecl", tag: "GenDecl", args: "valuespecs..."}, + {name: "VarDecl", tag: "GenDecl", args: "valuespecs..."}, + {name: "TypeDecl", tag: "GenDecl", args: "typespecs..."}, + + {name: "EmptyPackage", tag: "File", args: "name"}, +} + +type operationProto struct { + name string + value string + valueIndex string + tag string + example string + args string +} + +type operationInfo struct { + Example string + Args string + Enum uint8 + TagName string + Name string + ValueDoc string + ValueIndexDoc string + ExtraValueKindName string + ValueKindName string + VariadicMap uint64 + NumArgs int +} + +const stackUnchanged = "" + +var fileTemplate = template.Must(template.New("operations.go").Parse(`// Code generated "gen_operations.go"; DO NOT EDIT. + +package gogrep + +import ( + "github.com/quasilyte/go-ruleguard/nodetag" +) + +//go:generate stringer -type=operation -trimprefix=op +type operation uint8 + +const ( + opInvalid operation = 0 +{{ range .Operations }} + // Tag: {{.TagName}} + {{- if .Args}}{{print "\n"}}// Args: {{.Args}}{{end}} + {{- if .Example}}{{print "\n"}}// Example: {{.Example}}{{end}} + {{- if .ValueDoc}}{{print "\n"}}// Value: {{.ValueDoc}}{{end}} + {{- if .ValueIndexDoc}}{{print "\n"}}// ValueIndex: {{.ValueIndexDoc}}{{end}} + op{{ .Name }} operation = {{.Enum}} +{{ end -}} +) + +type operationInfo struct { + Tag nodetag.Value + NumArgs int + ValueKind valueKind + ExtraValueKind valueKind + VariadicMap bitmap64 +} + +var operationInfoTable = [256]operationInfo{ + opInvalid: {}, + +{{ range .Operations -}} + op{{.Name}}: { + Tag: nodetag.{{.TagName}}, + NumArgs: {{.NumArgs}}, + ValueKind: {{.ValueKindName}}, + ExtraValueKind: {{.ExtraValueKindName}}, + VariadicMap: {{.VariadicMap}}, // {{printf "%b" .VariadicMap}} + }, +{{ end }} +} +`)) + +func main() { + operations := make([]operationInfo, len(opPrototypes)) + for i, proto := range opPrototypes { + enum := uint8(i + 1) + + tagName := proto.tag + if tagName == "" { + tagName = "Unknown" + } + + variadicMap := uint64(0) + numArgs := 0 + if proto.args != "" { + args := strings.Split(proto.args, " ") + numArgs = len(args) + for i, arg := range args { + isVariadic := strings.HasSuffix(arg, "...") + if isVariadic { + variadicMap |= 1 << i + } + } + } + + extraValueKindName := "emptyValue" + if proto.valueIndex != "" { + parts := strings.Split(proto.valueIndex, " | ") + typ := parts[0] + switch typ { + case "strings": + extraValueKindName = "stringValue" + case "ifaces": + extraValueKindName = "ifaceValue" + default: + panic(fmt.Sprintf("%s: unexpected %s type", proto.name, typ)) + } + } + valueKindName := "emptyValue" + if proto.value != "" { + parts := strings.Split(proto.value, " | ") + typ := parts[0] + switch typ { + case "token.Token": + valueKindName = "tokenValue" + case "ast.ChanDir": + valueKindName = "chandirValue" + default: + panic(fmt.Sprintf("%s: unexpected %s type", proto.name, typ)) + } + } + + operations[i] = operationInfo{ + Example: proto.example, + Args: proto.args, + Enum: enum, + TagName: tagName, + Name: proto.name, + ValueDoc: proto.value, + ValueIndexDoc: proto.valueIndex, + NumArgs: numArgs, + VariadicMap: variadicMap, + ExtraValueKindName: extraValueKindName, + ValueKindName: valueKindName, + } + } + + var buf bytes.Buffer + err := fileTemplate.Execute(&buf, map[string]interface{}{ + "Operations": operations, + }) + if err != nil { + log.Panicf("execute template: %v", err) + } + writeFile("operations.gen.go", buf.Bytes()) +} + +func writeFile(filename string, data []byte) { + pretty, err := format.Source(data) + if err != nil { + log.Panicf("gofmt: %v", err) + } + if err := ioutil.WriteFile(filename, pretty, 0666); err != nil { + log.Panicf("write %s: %v", filename, err) + } +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/gogrep.go b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/gogrep.go new file mode 100644 index 000000000..e0d3d0696 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/gogrep.go @@ -0,0 +1,66 @@ +package gogrep + +import ( + "go/ast" + "go/token" + + "github.com/quasilyte/go-ruleguard/nodetag" +) + +func IsEmptyNodeSlice(n ast.Node) bool { + if list, ok := n.(nodeSlice); ok { + return list.len() == 0 + } + return false +} + +// MatchData describes a successful pattern match. +type MatchData struct { + Node ast.Node + Capture []CapturedNode +} + +type CapturedNode struct { + Name string + Node ast.Node +} + +func (data MatchData) CapturedByName(name string) (ast.Node, bool) { + return findNamed(data.Capture, name) +} + +type Pattern struct { + m *matcher +} + +func (p *Pattern) NodeTag() nodetag.Value { + return operationInfoTable[p.m.prog.insts[0].op].Tag +} + +// MatchNode calls cb if n matches a pattern. +func (p *Pattern) MatchNode(n ast.Node, cb func(MatchData)) { + p.m.MatchNode(n, cb) +} + +// Clone creates a pattern copy. +func (p *Pattern) Clone() *Pattern { + clone := *p + clone.m = &matcher{} + *clone.m = *p.m + clone.m.capture = make([]CapturedNode, 0, 8) + return &clone +} + +func Compile(fset *token.FileSet, src string, strict bool) (*Pattern, error) { + n, err := parseExpr(fset, src) + if err != nil { + return nil, err + } + var c compiler + prog, err := c.Compile(fset, n, strict) + if err != nil { + return nil, err + } + m := newMatcher(prog) + return &Pattern{m: m}, nil +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/instructions.go b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/instructions.go new file mode 100644 index 000000000..5d286eaec --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/instructions.go @@ -0,0 +1,107 @@ +package gogrep + +import ( + "fmt" + "go/ast" + "go/token" + "strings" +) + +type bitmap64 uint64 + +func (m bitmap64) IsSet(pos int) bool { + return m&(1<= sliceLen { + break + } + } +} + +func (m *matcher) matchNamed(name string, n ast.Node) bool { + prev, ok := findNamed(m.capture, name) + if !ok { + // First occurrence, record value. + m.capture = append(m.capture, CapturedNode{Name: name, Node: n}) + return true + } + return equalNodes(prev, n) +} + +func (m *matcher) matchNodeWithInst(inst instruction, n ast.Node) bool { + switch inst.op { + case opNode: + return n != nil + case opOptNode: + return true + + case opNamedNode: + return n != nil && m.matchNamed(m.stringValue(inst), n) + case opNamedOptNode: + return m.matchNamed(m.stringValue(inst), n) + + case opBasicLit: + n, ok := n.(*ast.BasicLit) + return ok && m.ifaceValue(inst) == literalValue(n) + + case opStrictIntLit: + n, ok := n.(*ast.BasicLit) + return ok && n.Kind == token.INT && m.stringValue(inst) == n.Value + case opStrictFloatLit: + n, ok := n.(*ast.BasicLit) + return ok && n.Kind == token.FLOAT && m.stringValue(inst) == n.Value + case opStrictCharLit: + n, ok := n.(*ast.BasicLit) + return ok && n.Kind == token.CHAR && m.stringValue(inst) == n.Value + case opStrictStringLit: + n, ok := n.(*ast.BasicLit) + return ok && n.Kind == token.STRING && m.stringValue(inst) == n.Value + case opStrictComplexLit: + n, ok := n.(*ast.BasicLit) + return ok && n.Kind == token.IMAG && m.stringValue(inst) == n.Value + + case opIdent: + n, ok := n.(*ast.Ident) + return ok && m.stringValue(inst) == n.Name + + case opBinaryExpr: + n, ok := n.(*ast.BinaryExpr) + return ok && n.Op == token.Token(inst.value) && + m.matchNode(n.X) && m.matchNode(n.Y) + + case opUnaryExpr: + n, ok := n.(*ast.UnaryExpr) + return ok && n.Op == token.Token(inst.value) && m.matchNode(n.X) + + case opStarExpr: + n, ok := n.(*ast.StarExpr) + return ok && m.matchNode(n.X) + + case opVariadicCallExpr: + n, ok := n.(*ast.CallExpr) + return ok && n.Ellipsis.IsValid() && m.matchNode(n.Fun) && m.matchExprSlice(n.Args) + case opCallExpr: + n, ok := n.(*ast.CallExpr) + return ok && !n.Ellipsis.IsValid() && m.matchNode(n.Fun) && m.matchExprSlice(n.Args) + + case opSimpleSelectorExpr: + n, ok := n.(*ast.SelectorExpr) + return ok && m.stringValue(inst) == n.Sel.Name && m.matchNode(n.X) + case opSelectorExpr: + n, ok := n.(*ast.SelectorExpr) + return ok && m.matchNode(n.Sel) && m.matchNode(n.X) + + case opTypeAssertExpr: + n, ok := n.(*ast.TypeAssertExpr) + return ok && m.matchNode(n.X) && m.matchNode(n.Type) + case opTypeSwitchAssertExpr: + n, ok := n.(*ast.TypeAssertExpr) + return ok && n.Type == nil && m.matchNode(n.X) + + case opSliceExpr: + n, ok := n.(*ast.SliceExpr) + return ok && n.Low == nil && n.High == nil && m.matchNode(n.X) + case opSliceFromExpr: + n, ok := n.(*ast.SliceExpr) + return ok && n.Low != nil && n.High == nil && !n.Slice3 && + m.matchNode(n.X) && m.matchNode(n.Low) + case opSliceToExpr: + n, ok := n.(*ast.SliceExpr) + return ok && n.Low == nil && n.High != nil && !n.Slice3 && + m.matchNode(n.X) && m.matchNode(n.High) + case opSliceFromToExpr: + n, ok := n.(*ast.SliceExpr) + return ok && n.Low != nil && n.High != nil && !n.Slice3 && + m.matchNode(n.X) && m.matchNode(n.Low) && m.matchNode(n.High) + case opSliceToCapExpr: + n, ok := n.(*ast.SliceExpr) + return ok && n.Low == nil && n.High != nil && n.Max != nil && + m.matchNode(n.X) && m.matchNode(n.High) && m.matchNode(n.Max) + case opSliceFromToCapExpr: + n, ok := n.(*ast.SliceExpr) + return ok && n.Low != nil && n.High != nil && n.Max != nil && + m.matchNode(n.X) && m.matchNode(n.Low) && m.matchNode(n.High) && m.matchNode(n.Max) + + case opIndexExpr: + n, ok := n.(*ast.IndexExpr) + return ok && m.matchNode(n.X) && m.matchNode(n.Index) + + case opKeyValueExpr: + n, ok := n.(*ast.KeyValueExpr) + return ok && m.matchNode(n.Key) && m.matchNode(n.Value) + + case opParenExpr: + n, ok := n.(*ast.ParenExpr) + return ok && m.matchNode(n.X) + + case opEllipsis: + n, ok := n.(*ast.Ellipsis) + return ok && n.Elt == nil + case opTypedEllipsis: + n, ok := n.(*ast.Ellipsis) + return ok && n.Elt != nil && m.matchNode(n.Elt) + + case opSliceType: + n, ok := n.(*ast.ArrayType) + return ok && n.Len == nil && m.matchNode(n.Elt) + case opArrayType: + n, ok := n.(*ast.ArrayType) + return ok && n.Len != nil && m.matchNode(n.Len) && m.matchNode(n.Elt) + case opMapType: + n, ok := n.(*ast.MapType) + return ok && m.matchNode(n.Key) && m.matchNode(n.Value) + case opChanType: + n, ok := n.(*ast.ChanType) + return ok && ast.ChanDir(inst.value) == n.Dir && m.matchNode(n.Value) + case opVoidFuncType: + n, ok := n.(*ast.FuncType) + return ok && n.Results == nil && m.matchNode(n.Params) + case opFuncType: + n, ok := n.(*ast.FuncType) + return ok && n.Results != nil && m.matchNode(n.Params) && m.matchNode(n.Results) + + case opCompositeLit: + n, ok := n.(*ast.CompositeLit) + return ok && n.Type == nil && m.matchExprSlice(n.Elts) + case opTypedCompositeLit: + n, ok := n.(*ast.CompositeLit) + return ok && n.Type != nil && m.matchNode(n.Type) && m.matchExprSlice(n.Elts) + + case opUnnamedField: + n, ok := n.(*ast.Field) + return ok && len(n.Names) == 0 && m.matchNode(n.Type) + case opSimpleField: + n, ok := n.(*ast.Field) + return ok && len(n.Names) == 1 && m.stringValue(inst) == n.Names[0].Name && m.matchNode(n.Type) + case opField: + n, ok := n.(*ast.Field) + return ok && len(n.Names) == 1 && m.matchNode(n.Names[0]) && m.matchNode(n.Type) + case opMultiField: + n, ok := n.(*ast.Field) + return ok && len(n.Names) >= 2 && m.matchIdentSlice(n.Names) && m.matchNode(n.Type) + case opFieldList: + n, ok := n.(*ast.FieldList) + return ok && m.matchFieldSlice(n.List) + + case opFuncLit: + n, ok := n.(*ast.FuncLit) + return ok && m.matchNode(n.Type) && m.matchNode(n.Body) + + case opAssignStmt: + n, ok := n.(*ast.AssignStmt) + return ok && token.Token(inst.value) == n.Tok && + len(n.Lhs) == 1 && m.matchNode(n.Lhs[0]) && + len(n.Rhs) == 1 && m.matchNode(n.Rhs[0]) + case opMultiAssignStmt: + n, ok := n.(*ast.AssignStmt) + return ok && token.Token(inst.value) == n.Tok && + m.matchExprSlice(n.Lhs) && m.matchExprSlice(n.Rhs) + + case opExprStmt: + n, ok := n.(*ast.ExprStmt) + return ok && m.matchNode(n.X) + + case opGoStmt: + n, ok := n.(*ast.GoStmt) + return ok && m.matchNode(n.Call) + case opDeferStmt: + n, ok := n.(*ast.DeferStmt) + return ok && m.matchNode(n.Call) + case opSendStmt: + n, ok := n.(*ast.SendStmt) + return ok && m.matchNode(n.Chan) && m.matchNode(n.Value) + + case opBlockStmt: + n, ok := n.(*ast.BlockStmt) + return ok && m.matchStmtSlice(n.List) + + case opIfStmt: + n, ok := n.(*ast.IfStmt) + return ok && n.Init == nil && n.Else == nil && + m.matchNode(n.Cond) && m.matchNode(n.Body) + case opIfElseStmt: + n, ok := n.(*ast.IfStmt) + return ok && n.Init == nil && n.Else != nil && + m.matchNode(n.Cond) && m.matchNode(n.Body) && m.matchNode(n.Else) + case opIfInitStmt: + n, ok := n.(*ast.IfStmt) + return ok && n.Else == nil && + m.matchNode(n.Init) && m.matchNode(n.Cond) && m.matchNode(n.Body) + case opIfInitElseStmt: + n, ok := n.(*ast.IfStmt) + return ok && n.Else != nil && + m.matchNode(n.Init) && m.matchNode(n.Cond) && m.matchNode(n.Body) && m.matchNode(n.Else) + + case opIfNamedOptStmt: + n, ok := n.(*ast.IfStmt) + return ok && n.Else == nil && m.matchNode(n.Body) && + m.matchNamed(m.stringValue(inst), toStmtSlice(n.Cond, n.Init)) + case opIfNamedOptElseStmt: + n, ok := n.(*ast.IfStmt) + return ok && n.Else != nil && m.matchNode(n.Body) && m.matchNode(n.Else) && + m.matchNamed(m.stringValue(inst), toStmtSlice(n.Cond, n.Init)) + + case opCaseClause: + n, ok := n.(*ast.CaseClause) + return ok && n.List != nil && m.matchExprSlice(n.List) && m.matchStmtSlice(n.Body) + case opDefaultCaseClause: + n, ok := n.(*ast.CaseClause) + return ok && n.List == nil && m.matchStmtSlice(n.Body) + + case opSwitchStmt: + n, ok := n.(*ast.SwitchStmt) + return ok && n.Init == nil && n.Tag == nil && m.matchStmtSlice(n.Body.List) + case opSwitchTagStmt: + n, ok := n.(*ast.SwitchStmt) + return ok && n.Init == nil && m.matchNode(n.Tag) && m.matchStmtSlice(n.Body.List) + case opSwitchInitStmt: + n, ok := n.(*ast.SwitchStmt) + return ok && n.Tag == nil && m.matchNode(n.Init) && m.matchStmtSlice(n.Body.List) + case opSwitchInitTagStmt: + n, ok := n.(*ast.SwitchStmt) + return ok && m.matchNode(n.Init) && m.matchNode(n.Tag) && m.matchStmtSlice(n.Body.List) + + case opTypeSwitchStmt: + n, ok := n.(*ast.TypeSwitchStmt) + return ok && n.Init == nil && m.matchNode(n.Assign) && m.matchStmtSlice(n.Body.List) + case opTypeSwitchInitStmt: + n, ok := n.(*ast.TypeSwitchStmt) + return ok && m.matchNode(n.Init) && + m.matchNode(n.Assign) && m.matchStmtSlice(n.Body.List) + + case opCommClause: + n, ok := n.(*ast.CommClause) + return ok && n.Comm != nil && m.matchNode(n.Comm) && m.matchStmtSlice(n.Body) + case opDefaultCommClause: + n, ok := n.(*ast.CommClause) + return ok && n.Comm == nil && m.matchStmtSlice(n.Body) + + case opSelectStmt: + n, ok := n.(*ast.SelectStmt) + return ok && m.matchStmtSlice(n.Body.List) + + case opRangeStmt: + n, ok := n.(*ast.RangeStmt) + return ok && n.Key == nil && n.Value == nil && m.matchNode(n.X) && m.matchNode(n.Body) + case opRangeKeyStmt: + n, ok := n.(*ast.RangeStmt) + return ok && n.Key != nil && n.Value == nil && token.Token(inst.value) == n.Tok && + m.matchNode(n.Key) && m.matchNode(n.X) && m.matchNode(n.Body) + case opRangeKeyValueStmt: + n, ok := n.(*ast.RangeStmt) + return ok && n.Key != nil && n.Value != nil && token.Token(inst.value) == n.Tok && + m.matchNode(n.Key) && m.matchNode(n.Value) && m.matchNode(n.X) && m.matchNode(n.Body) + + case opForStmt: + n, ok := n.(*ast.ForStmt) + return ok && n.Init == nil && n.Cond == nil && n.Post == nil && + m.matchNode(n.Body) + case opForPostStmt: + n, ok := n.(*ast.ForStmt) + return ok && n.Init == nil && n.Cond == nil && n.Post != nil && + m.matchNode(n.Post) && m.matchNode(n.Body) + case opForCondStmt: + n, ok := n.(*ast.ForStmt) + return ok && n.Init == nil && n.Cond != nil && n.Post == nil && + m.matchNode(n.Cond) && m.matchNode(n.Body) + case opForCondPostStmt: + n, ok := n.(*ast.ForStmt) + return ok && n.Init == nil && n.Cond != nil && n.Post != nil && + m.matchNode(n.Cond) && m.matchNode(n.Post) && m.matchNode(n.Body) + case opForInitStmt: + n, ok := n.(*ast.ForStmt) + return ok && n.Init != nil && n.Cond == nil && n.Post == nil && + m.matchNode(n.Init) && m.matchNode(n.Body) + case opForInitPostStmt: + n, ok := n.(*ast.ForStmt) + return ok && n.Init != nil && n.Cond == nil && n.Post != nil && + m.matchNode(n.Init) && m.matchNode(n.Post) && m.matchNode(n.Body) + case opForInitCondStmt: + n, ok := n.(*ast.ForStmt) + return ok && n.Init != nil && n.Cond != nil && n.Post == nil && + m.matchNode(n.Init) && m.matchNode(n.Cond) && m.matchNode(n.Body) + case opForInitCondPostStmt: + n, ok := n.(*ast.ForStmt) + return ok && m.matchNode(n.Init) && m.matchNode(n.Cond) && m.matchNode(n.Post) && m.matchNode(n.Body) + + case opIncDecStmt: + n, ok := n.(*ast.IncDecStmt) + return ok && token.Token(inst.value) == n.Tok && m.matchNode(n.X) + + case opReturnStmt: + n, ok := n.(*ast.ReturnStmt) + return ok && m.matchExprSlice(n.Results) + + case opLabeledStmt: + n, ok := n.(*ast.LabeledStmt) + return ok && m.matchNode(n.Label) && m.matchNode(n.Stmt) + case opSimpleLabeledStmt: + n, ok := n.(*ast.LabeledStmt) + return ok && m.stringValue(inst) == n.Label.Name && m.matchNode(n.Stmt) + + case opLabeledBranchStmt: + n, ok := n.(*ast.BranchStmt) + return ok && n.Label != nil && token.Token(inst.value) == n.Tok && m.matchNode(n.Label) + case opSimpleLabeledBranchStmt: + n, ok := n.(*ast.BranchStmt) + return ok && n.Label != nil && m.stringValue(inst) == n.Label.Name && token.Token(inst.value) == n.Tok + case opBranchStmt: + n, ok := n.(*ast.BranchStmt) + return ok && n.Label == nil && token.Token(inst.value) == n.Tok + + case opEmptyStmt: + _, ok := n.(*ast.EmptyStmt) + return ok + + case opFuncDecl: + n, ok := n.(*ast.FuncDecl) + return ok && n.Recv == nil && n.Body != nil && + m.matchNode(n.Name) && m.matchNode(n.Type) && m.matchNode(n.Body) + case opFuncProtoDecl: + n, ok := n.(*ast.FuncDecl) + return ok && n.Recv == nil && n.Body == nil && + m.matchNode(n.Name) && m.matchNode(n.Type) + case opMethodDecl: + n, ok := n.(*ast.FuncDecl) + return ok && n.Recv != nil && n.Body != nil && + m.matchNode(n.Recv) && m.matchNode(n.Name) && m.matchNode(n.Type) && m.matchNode(n.Body) + case opMethodProtoDecl: + n, ok := n.(*ast.FuncDecl) + return ok && n.Recv != nil && n.Body == nil && + m.matchNode(n.Recv) && m.matchNode(n.Name) && m.matchNode(n.Type) + + case opValueInitSpec: + n, ok := n.(*ast.ValueSpec) + return ok && len(n.Values) != 0 && n.Type == nil && + m.matchIdentSlice(n.Names) && m.matchExprSlice(n.Values) + case opTypedValueSpec: + n, ok := n.(*ast.ValueSpec) + return ok && len(n.Values) == 0 && n.Type != nil && + m.matchIdentSlice(n.Names) && m.matchNode(n.Type) + case opTypedValueInitSpec: + n, ok := n.(*ast.ValueSpec) + return ok && len(n.Values) != 0 && n.Type != nil && + m.matchIdentSlice(n.Names) && m.matchNode(n.Type) && m.matchExprSlice(n.Values) + + case opTypeSpec: + n, ok := n.(*ast.TypeSpec) + return ok && !n.Assign.IsValid() && m.matchNode(n.Name) && m.matchNode(n.Type) + case opTypeAliasSpec: + n, ok := n.(*ast.TypeSpec) + return ok && n.Assign.IsValid() && m.matchNode(n.Name) && m.matchNode(n.Type) + + case opConstDecl: + n, ok := n.(*ast.GenDecl) + return ok && n.Tok == token.CONST && m.matchSpecSlice(n.Specs) + case opVarDecl: + n, ok := n.(*ast.GenDecl) + return ok && n.Tok == token.VAR && m.matchSpecSlice(n.Specs) + case opTypeDecl: + n, ok := n.(*ast.GenDecl) + return ok && n.Tok == token.TYPE && m.matchSpecSlice(n.Specs) + + case opEmptyPackage: + n, ok := n.(*ast.File) + return ok && len(n.Imports) == 0 && len(n.Decls) == 0 && m.matchNode(n.Name) + + default: + panic(fmt.Sprintf("unexpected op %s", inst.op)) + } +} + +func (m *matcher) matchNode(n ast.Node) bool { + return m.matchNodeWithInst(m.nextInst(), n) +} + +func (m *matcher) matchStmtSlice(stmts []ast.Stmt) bool { + matched, _ := m.matchNodeList(stmtSlice(stmts), false) + return matched != nil +} + +func (m *matcher) matchExprSlice(exprs []ast.Expr) bool { + matched, _ := m.matchNodeList(exprSlice(exprs), false) + return matched != nil +} + +func (m *matcher) matchFieldSlice(fields []*ast.Field) bool { + matched, _ := m.matchNodeList(fieldSlice(fields), false) + return matched != nil +} + +func (m *matcher) matchIdentSlice(idents []*ast.Ident) bool { + matched, _ := m.matchNodeList(identSlice(idents), false) + return matched != nil +} + +func (m *matcher) matchSpecSlice(specs []ast.Spec) bool { + matched, _ := m.matchNodeList(specSlice(specs), false) + return matched != nil +} + +// matchNodeList matches two lists of nodes. It uses a common algorithm to match +// wildcard patterns with any number of nodes without recursion. +func (m *matcher) matchNodeList(nodes nodeSlice, partial bool) (ast.Node, int) { + sliceLen := nodes.len() + inst := m.nextInst() + if inst.op == opEnd { + if sliceLen == 0 { + return nodes, 0 + } + return nil, -1 + } + pcBase := m.pc + pcNext := 0 + j := 0 + jNext := 0 + partialStart, partialEnd := 0, sliceLen + + type restart struct { + matches []CapturedNode + pc int + j int + wildStart int + wildName string + } + // We need to stack these because otherwise some edge cases + // would not match properly. Since we have various kinds of + // wildcards (nodes containing them, $_, and $*_), in some cases + // we may have to go back and do multiple restarts to get to the + // right starting position. + var stack []restart + wildName := "" + wildStart := 0 + push := func(next int) { + if next > sliceLen { + return // would be discarded anyway + } + pcNext = m.pc - 1 + jNext = next + stack = append(stack, restart{m.capture, pcNext, next, wildStart, wildName}) + } + pop := func() { + j = jNext + m.pc = pcNext + m.capture = stack[len(stack)-1].matches + wildName = stack[len(stack)-1].wildName + wildStart = stack[len(stack)-1].wildStart + stack = stack[:len(stack)-1] + pcNext = 0 + jNext = 0 + if len(stack) > 0 { + pcNext = stack[len(stack)-1].pc + jNext = stack[len(stack)-1].j + } + } + + // wouldMatch returns whether the current wildcard - if any - + // matches the nodes we are currently trying it on. + wouldMatch := func() bool { + switch wildName { + case "", "_": + return true + } + return m.matchNamed(wildName, nodes.slice(wildStart, j)) + } + for ; inst.op != opEnd || j < sliceLen; inst = m.nextInst() { + if inst.op != opEnd { + if inst.op == opNodeSeq || inst.op == opNamedNodeSeq { + // keep track of where this wildcard + // started (if name == wildName, + // we're trying the same wildcard + // matching one more node) + name := "_" + if inst.op == opNamedNodeSeq { + name = m.stringValue(inst) + } + if name != wildName { + wildStart = j + wildName = name + } + // try to match zero or more at j, + // restarting at j+1 if it fails + push(j + 1) + continue + } + if partial && m.pc == pcBase { + // let "b; c" match "a; b; c" + // (simulates a $*_ at the beginning) + partialStart = j + push(j + 1) + } + if j < sliceLen && wouldMatch() && m.matchNodeWithInst(inst, nodes.at(j)) { + // ordinary match + wildName = "" + j++ + continue + } + } + if partial && inst.op == opEnd && wildName == "" { + partialEnd = j + break // let "b; c" match "b; c; d" + } + // mismatch, try to restart + if 0 < jNext && jNext <= sliceLen && (m.pc != pcNext || j != jNext) { + pop() + continue + } + return nil, -1 + } + if !wouldMatch() { + return nil, -1 + } + return nodes.slice(partialStart, partialEnd), partialEnd + 1 +} + +func findNamed(capture []CapturedNode, name string) (ast.Node, bool) { + for _, c := range capture { + if c.Name == name { + return c.Node, true + } + } + return nil, false +} + +func literalValue(lit *ast.BasicLit) interface{} { + switch lit.Kind { + case token.INT: + v, err := strconv.ParseInt(lit.Value, 0, 64) + if err == nil { + return v + } + case token.CHAR: + s, err := strconv.Unquote(lit.Value) + if err != nil { + return nil + } + // Return the first rune. + for _, c := range s { + return c + } + case token.STRING: + s, err := strconv.Unquote(lit.Value) + if err == nil { + return s + } + case token.FLOAT: + v, err := strconv.ParseFloat(lit.Value, 64) + if err == nil { + return v + } + case token.IMAG: + v, err := strconv.ParseComplex(lit.Value, 128) + if err == nil { + return v + } + } + return nil +} + +func equalNodes(x, y ast.Node) bool { + if x == nil || y == nil { + return x == y + } + switch x := x.(type) { + case stmtSlice: + y, ok := y.(stmtSlice) + if !ok || len(x) != len(y) { + return false + } + for i := range x { + if !astequal.Stmt(x[i], y[i]) { + return false + } + } + return true + case exprSlice: + y, ok := y.(exprSlice) + if !ok || len(x) != len(y) { + return false + } + for i := range x { + if !astequal.Expr(x[i], y[i]) { + return false + } + } + return true + default: + return astequal.Node(x, y) + } +} + +func toStmtSlice(nodes ...ast.Node) stmtSlice { + var stmts []ast.Stmt + for _, node := range nodes { + switch x := node.(type) { + case nil: + case ast.Stmt: + stmts = append(stmts, x) + case ast.Expr: + stmts = append(stmts, &ast.ExprStmt{X: x}) + default: + panic(fmt.Sprintf("unexpected node type: %T", x)) + } + } + return stmtSlice(stmts) +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/operation_string.go b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/operation_string.go new file mode 100644 index 000000000..9f50e2795 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/operation_string.go @@ -0,0 +1,129 @@ +// Code generated by "stringer -type=operation -trimprefix=op"; DO NOT EDIT. + +package gogrep + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[opInvalid-0] + _ = x[opNode-1] + _ = x[opNamedNode-2] + _ = x[opNodeSeq-3] + _ = x[opNamedNodeSeq-4] + _ = x[opOptNode-5] + _ = x[opNamedOptNode-6] + _ = x[opMultiStmt-7] + _ = x[opMultiExpr-8] + _ = x[opEnd-9] + _ = x[opBasicLit-10] + _ = x[opStrictIntLit-11] + _ = x[opStrictFloatLit-12] + _ = x[opStrictCharLit-13] + _ = x[opStrictStringLit-14] + _ = x[opStrictComplexLit-15] + _ = x[opIdent-16] + _ = x[opIndexExpr-17] + _ = x[opSliceExpr-18] + _ = x[opSliceFromExpr-19] + _ = x[opSliceToExpr-20] + _ = x[opSliceFromToExpr-21] + _ = x[opSliceToCapExpr-22] + _ = x[opSliceFromToCapExpr-23] + _ = x[opFuncLit-24] + _ = x[opCompositeLit-25] + _ = x[opTypedCompositeLit-26] + _ = x[opSimpleSelectorExpr-27] + _ = x[opSelectorExpr-28] + _ = x[opTypeAssertExpr-29] + _ = x[opTypeSwitchAssertExpr-30] + _ = x[opVoidFuncType-31] + _ = x[opFuncType-32] + _ = x[opArrayType-33] + _ = x[opSliceType-34] + _ = x[opMapType-35] + _ = x[opChanType-36] + _ = x[opKeyValueExpr-37] + _ = x[opEllipsis-38] + _ = x[opTypedEllipsis-39] + _ = x[opStarExpr-40] + _ = x[opUnaryExpr-41] + _ = x[opBinaryExpr-42] + _ = x[opParenExpr-43] + _ = x[opVariadicCallExpr-44] + _ = x[opCallExpr-45] + _ = x[opAssignStmt-46] + _ = x[opMultiAssignStmt-47] + _ = x[opBranchStmt-48] + _ = x[opSimpleLabeledBranchStmt-49] + _ = x[opLabeledBranchStmt-50] + _ = x[opSimpleLabeledStmt-51] + _ = x[opLabeledStmt-52] + _ = x[opBlockStmt-53] + _ = x[opExprStmt-54] + _ = x[opGoStmt-55] + _ = x[opDeferStmt-56] + _ = x[opSendStmt-57] + _ = x[opEmptyStmt-58] + _ = x[opIncDecStmt-59] + _ = x[opReturnStmt-60] + _ = x[opIfStmt-61] + _ = x[opIfInitStmt-62] + _ = x[opIfElseStmt-63] + _ = x[opIfInitElseStmt-64] + _ = x[opIfNamedOptStmt-65] + _ = x[opIfNamedOptElseStmt-66] + _ = x[opSwitchStmt-67] + _ = x[opSwitchTagStmt-68] + _ = x[opSwitchInitStmt-69] + _ = x[opSwitchInitTagStmt-70] + _ = x[opSelectStmt-71] + _ = x[opTypeSwitchStmt-72] + _ = x[opTypeSwitchInitStmt-73] + _ = x[opCaseClause-74] + _ = x[opDefaultCaseClause-75] + _ = x[opCommClause-76] + _ = x[opDefaultCommClause-77] + _ = x[opForStmt-78] + _ = x[opForPostStmt-79] + _ = x[opForCondStmt-80] + _ = x[opForCondPostStmt-81] + _ = x[opForInitStmt-82] + _ = x[opForInitPostStmt-83] + _ = x[opForInitCondStmt-84] + _ = x[opForInitCondPostStmt-85] + _ = x[opRangeStmt-86] + _ = x[opRangeKeyStmt-87] + _ = x[opRangeKeyValueStmt-88] + _ = x[opFieldList-89] + _ = x[opUnnamedField-90] + _ = x[opSimpleField-91] + _ = x[opField-92] + _ = x[opMultiField-93] + _ = x[opValueInitSpec-94] + _ = x[opTypedValueInitSpec-95] + _ = x[opTypedValueSpec-96] + _ = x[opTypeSpec-97] + _ = x[opTypeAliasSpec-98] + _ = x[opFuncDecl-99] + _ = x[opMethodDecl-100] + _ = x[opFuncProtoDecl-101] + _ = x[opMethodProtoDecl-102] + _ = x[opConstDecl-103] + _ = x[opVarDecl-104] + _ = x[opTypeDecl-105] + _ = x[opEmptyPackage-106] +} + +const _operation_name = "InvalidNodeNamedNodeNodeSeqNamedNodeSeqOptNodeNamedOptNodeMultiStmtMultiExprEndBasicLitStrictIntLitStrictFloatLitStrictCharLitStrictStringLitStrictComplexLitIdentIndexExprSliceExprSliceFromExprSliceToExprSliceFromToExprSliceToCapExprSliceFromToCapExprFuncLitCompositeLitTypedCompositeLitSimpleSelectorExprSelectorExprTypeAssertExprTypeSwitchAssertExprVoidFuncTypeFuncTypeArrayTypeSliceTypeMapTypeChanTypeKeyValueExprEllipsisTypedEllipsisStarExprUnaryExprBinaryExprParenExprVariadicCallExprCallExprAssignStmtMultiAssignStmtBranchStmtSimpleLabeledBranchStmtLabeledBranchStmtSimpleLabeledStmtLabeledStmtBlockStmtExprStmtGoStmtDeferStmtSendStmtEmptyStmtIncDecStmtReturnStmtIfStmtIfInitStmtIfElseStmtIfInitElseStmtIfNamedOptStmtIfNamedOptElseStmtSwitchStmtSwitchTagStmtSwitchInitStmtSwitchInitTagStmtSelectStmtTypeSwitchStmtTypeSwitchInitStmtCaseClauseDefaultCaseClauseCommClauseDefaultCommClauseForStmtForPostStmtForCondStmtForCondPostStmtForInitStmtForInitPostStmtForInitCondStmtForInitCondPostStmtRangeStmtRangeKeyStmtRangeKeyValueStmtFieldListUnnamedFieldSimpleFieldFieldMultiFieldValueInitSpecTypedValueInitSpecTypedValueSpecTypeSpecTypeAliasSpecFuncDeclMethodDeclFuncProtoDeclMethodProtoDeclConstDeclVarDeclTypeDeclEmptyPackage" + +var _operation_index = [...]uint16{0, 7, 11, 20, 27, 39, 46, 58, 67, 76, 79, 87, 99, 113, 126, 141, 157, 162, 171, 180, 193, 204, 219, 233, 251, 258, 270, 287, 305, 317, 331, 351, 363, 371, 380, 389, 396, 404, 416, 424, 437, 445, 454, 464, 473, 489, 497, 507, 522, 532, 555, 572, 589, 600, 609, 617, 623, 632, 640, 649, 659, 669, 675, 685, 695, 709, 723, 741, 751, 764, 778, 795, 805, 819, 837, 847, 864, 874, 891, 898, 909, 920, 935, 946, 961, 976, 995, 1004, 1016, 1033, 1042, 1054, 1065, 1070, 1080, 1093, 1111, 1125, 1133, 1146, 1154, 1164, 1177, 1192, 1201, 1208, 1216, 1228} + +func (i operation) String() string { + if i >= operation(len(_operation_index)-1) { + return "operation(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _operation_name[_operation_index[i]:_operation_index[i+1]] +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/operations.gen.go b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/operations.gen.go new file mode 100644 index 000000000..f4d7cff82 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/operations.gen.go @@ -0,0 +1,1249 @@ +// Code generated "gen_operations.go"; DO NOT EDIT. + +package gogrep + +import ( + "github.com/quasilyte/go-ruleguard/nodetag" +) + +//go:generate stringer -type=operation -trimprefix=op +type operation uint8 + +const ( + opInvalid operation = 0 + + // Tag: Node + opNode operation = 1 + + // Tag: Node + // ValueIndex: strings | wildcard name + opNamedNode operation = 2 + + // Tag: Unknown + opNodeSeq operation = 3 + + // Tag: Unknown + // ValueIndex: strings | wildcard name + opNamedNodeSeq operation = 4 + + // Tag: Unknown + opOptNode operation = 5 + + // Tag: Unknown + // ValueIndex: strings | wildcard name + opNamedOptNode operation = 6 + + // Tag: StmtList + // Args: stmts... + // Example: f(); g() + opMultiStmt operation = 7 + + // Tag: ExprList + // Args: exprs... + // Example: f(), g() + opMultiExpr operation = 8 + + // Tag: Unknown + opEnd operation = 9 + + // Tag: BasicLit + // ValueIndex: ifaces | parsed literal value + opBasicLit operation = 10 + + // Tag: BasicLit + // ValueIndex: strings | raw literal value + opStrictIntLit operation = 11 + + // Tag: BasicLit + // ValueIndex: strings | raw literal value + opStrictFloatLit operation = 12 + + // Tag: BasicLit + // ValueIndex: strings | raw literal value + opStrictCharLit operation = 13 + + // Tag: BasicLit + // ValueIndex: strings | raw literal value + opStrictStringLit operation = 14 + + // Tag: BasicLit + // ValueIndex: strings | raw literal value + opStrictComplexLit operation = 15 + + // Tag: Ident + // ValueIndex: strings | ident name + opIdent operation = 16 + + // Tag: IndexExpr + // Args: x expr + opIndexExpr operation = 17 + + // Tag: SliceExpr + // Args: x + opSliceExpr operation = 18 + + // Tag: SliceExpr + // Args: x from + // Example: x[from:] + opSliceFromExpr operation = 19 + + // Tag: SliceExpr + // Args: x to + // Example: x[:to] + opSliceToExpr operation = 20 + + // Tag: SliceExpr + // Args: x from to + // Example: x[from:to] + opSliceFromToExpr operation = 21 + + // Tag: SliceExpr + // Args: x from cap + // Example: x[:from:cap] + opSliceToCapExpr operation = 22 + + // Tag: SliceExpr + // Args: x from to cap + // Example: x[from:to:cap] + opSliceFromToCapExpr operation = 23 + + // Tag: FuncLit + // Args: type block + opFuncLit operation = 24 + + // Tag: CompositeLit + // Args: elts... + // Example: {elts...} + opCompositeLit operation = 25 + + // Tag: CompositeLit + // Args: typ elts... + // Example: typ{elts...} + opTypedCompositeLit operation = 26 + + // Tag: SelectorExpr + // Args: x + // ValueIndex: strings | selector name + opSimpleSelectorExpr operation = 27 + + // Tag: SelectorExpr + // Args: x sel + opSelectorExpr operation = 28 + + // Tag: TypeAssertExpr + // Args: x typ + opTypeAssertExpr operation = 29 + + // Tag: TypeAssertExpr + // Args: x + opTypeSwitchAssertExpr operation = 30 + + // Tag: FuncType + // Args: params + opVoidFuncType operation = 31 + + // Tag: FuncType + // Args: params results + opFuncType operation = 32 + + // Tag: ArrayType + // Args: length elem + opArrayType operation = 33 + + // Tag: ArrayType + // Args: elem + opSliceType operation = 34 + + // Tag: MapType + // Args: key value + opMapType operation = 35 + + // Tag: ChanType + // Args: value + // Value: ast.ChanDir | channel direction + opChanType operation = 36 + + // Tag: KeyValueExpr + // Args: key value + opKeyValueExpr operation = 37 + + // Tag: Ellipsis + opEllipsis operation = 38 + + // Tag: Ellipsis + // Args: type + opTypedEllipsis operation = 39 + + // Tag: StarExpr + // Args: x + opStarExpr operation = 40 + + // Tag: UnaryExpr + // Args: x + // Value: token.Token | unary operator + opUnaryExpr operation = 41 + + // Tag: BinaryExpr + // Args: x y + // Value: token.Token | binary operator + opBinaryExpr operation = 42 + + // Tag: ParenExpr + // Args: x + opParenExpr operation = 43 + + // Tag: CallExpr + // Args: fn args... + // Example: f(1, xs...) + opVariadicCallExpr operation = 44 + + // Tag: CallExpr + // Args: fn args... + // Example: f(1, xs) + opCallExpr operation = 45 + + // Tag: AssignStmt + // Args: lhs rhs + // Example: lhs := rhs() + // Value: token.Token | ':=' or '=' + opAssignStmt operation = 46 + + // Tag: AssignStmt + // Args: lhs... rhs... + // Example: lhs1, lhs2 := rhs() + // Value: token.Token | ':=' or '=' + opMultiAssignStmt operation = 47 + + // Tag: BranchStmt + // Args: x + // Value: token.Token | branch kind + opBranchStmt operation = 48 + + // Tag: BranchStmt + // Args: x + // Value: token.Token | branch kind + // ValueIndex: strings | label name + opSimpleLabeledBranchStmt operation = 49 + + // Tag: BranchStmt + // Args: label x + // Value: token.Token | branch kind + opLabeledBranchStmt operation = 50 + + // Tag: LabeledStmt + // Args: x + // ValueIndex: strings | label name + opSimpleLabeledStmt operation = 51 + + // Tag: LabeledStmt + // Args: label x + opLabeledStmt operation = 52 + + // Tag: BlockStmt + // Args: body... + opBlockStmt operation = 53 + + // Tag: ExprStmt + // Args: x + opExprStmt operation = 54 + + // Tag: GoStmt + // Args: x + opGoStmt operation = 55 + + // Tag: DeferStmt + // Args: x + opDeferStmt operation = 56 + + // Tag: SendStmt + // Args: ch value + opSendStmt operation = 57 + + // Tag: EmptyStmt + opEmptyStmt operation = 58 + + // Tag: IncDecStmt + // Args: x + // Value: token.Token | '++' or '--' + opIncDecStmt operation = 59 + + // Tag: ReturnStmt + // Args: results... + opReturnStmt operation = 60 + + // Tag: IfStmt + // Args: cond block + // Example: if cond {} + opIfStmt operation = 61 + + // Tag: IfStmt + // Args: init cond block + // Example: if init; cond {} + opIfInitStmt operation = 62 + + // Tag: IfStmt + // Args: cond block else + // Example: if cond {} else ... + opIfElseStmt operation = 63 + + // Tag: IfStmt + // Args: init cond block else + // Example: if init; cond {} else ... + opIfInitElseStmt operation = 64 + + // Tag: IfStmt + // Args: block + // Example: if $*x {} + // ValueIndex: strings | wildcard name + opIfNamedOptStmt operation = 65 + + // Tag: IfStmt + // Args: block else + // Example: if $*x {} else ... + // ValueIndex: strings | wildcard name + opIfNamedOptElseStmt operation = 66 + + // Tag: SwitchStmt + // Args: body... + // Example: switch {} + opSwitchStmt operation = 67 + + // Tag: SwitchStmt + // Args: tag body... + // Example: switch tag {} + opSwitchTagStmt operation = 68 + + // Tag: SwitchStmt + // Args: init body... + // Example: switch init; {} + opSwitchInitStmt operation = 69 + + // Tag: SwitchStmt + // Args: init tag body... + // Example: switch init; tag {} + opSwitchInitTagStmt operation = 70 + + // Tag: SelectStmt + // Args: body... + opSelectStmt operation = 71 + + // Tag: TypeSwitchStmt + // Args: x block + // Example: switch x.(type) {} + opTypeSwitchStmt operation = 72 + + // Tag: TypeSwitchStmt + // Args: init x block + // Example: switch init; x.(type) {} + opTypeSwitchInitStmt operation = 73 + + // Tag: CaseClause + // Args: values... body... + opCaseClause operation = 74 + + // Tag: CaseClause + // Args: body... + opDefaultCaseClause operation = 75 + + // Tag: CommClause + // Args: comm body... + opCommClause operation = 76 + + // Tag: CommClause + // Args: body... + opDefaultCommClause operation = 77 + + // Tag: ForStmt + // Args: blocl + // Example: for {} + opForStmt operation = 78 + + // Tag: ForStmt + // Args: post block + // Example: for ; ; post {} + opForPostStmt operation = 79 + + // Tag: ForStmt + // Args: cond block + // Example: for ; cond; {} + opForCondStmt operation = 80 + + // Tag: ForStmt + // Args: cond post block + // Example: for ; cond; post {} + opForCondPostStmt operation = 81 + + // Tag: ForStmt + // Args: init block + // Example: for init; ; {} + opForInitStmt operation = 82 + + // Tag: ForStmt + // Args: init post block + // Example: for init; ; post {} + opForInitPostStmt operation = 83 + + // Tag: ForStmt + // Args: init cond block + // Example: for init; cond; {} + opForInitCondStmt operation = 84 + + // Tag: ForStmt + // Args: init cond post block + // Example: for init; cond; post {} + opForInitCondPostStmt operation = 85 + + // Tag: RangeStmt + // Args: x block + // Example: for range x {} + opRangeStmt operation = 86 + + // Tag: RangeStmt + // Args: key x block + // Example: for key := range x {} + // Value: token.Token | ':=' or '=' + opRangeKeyStmt operation = 87 + + // Tag: RangeStmt + // Args: key value x block + // Example: for key, value := range x {} + // Value: token.Token | ':=' or '=' + opRangeKeyValueStmt operation = 88 + + // Tag: Unknown + // Args: fields... + opFieldList operation = 89 + + // Tag: Unknown + // Args: typ + // Example: type + opUnnamedField operation = 90 + + // Tag: Unknown + // Args: typ + // Example: name type + // ValueIndex: strings | field name + opSimpleField operation = 91 + + // Tag: Unknown + // Args: name typ + // Example: $name type + opField operation = 92 + + // Tag: Unknown + // Args: names... typ + // Example: name1, name2 type + opMultiField operation = 93 + + // Tag: ValueSpec + // Args: lhs... rhs... + // Example: lhs = rhs + opValueInitSpec operation = 94 + + // Tag: ValueSpec + // Args: lhs... type rhs... + // Example: lhs typ = rhs + opTypedValueInitSpec operation = 95 + + // Tag: ValueSpec + // Args: lhs... type + // Example: lhs typ + opTypedValueSpec operation = 96 + + // Tag: TypeSpec + // Args: name type + // Example: name type + opTypeSpec operation = 97 + + // Tag: TypeSpec + // Args: name type + // Example: name = type + opTypeAliasSpec operation = 98 + + // Tag: FuncDecl + // Args: name type block + opFuncDecl operation = 99 + + // Tag: FuncDecl + // Args: recv name type block + opMethodDecl operation = 100 + + // Tag: FuncDecl + // Args: name type + opFuncProtoDecl operation = 101 + + // Tag: FuncDecl + // Args: recv name type + opMethodProtoDecl operation = 102 + + // Tag: GenDecl + // Args: valuespecs... + opConstDecl operation = 103 + + // Tag: GenDecl + // Args: valuespecs... + opVarDecl operation = 104 + + // Tag: GenDecl + // Args: typespecs... + opTypeDecl operation = 105 + + // Tag: File + // Args: name + opEmptyPackage operation = 106 +) + +type operationInfo struct { + Tag nodetag.Value + NumArgs int + ValueKind valueKind + ExtraValueKind valueKind + VariadicMap bitmap64 +} + +var operationInfoTable = [256]operationInfo{ + opInvalid: {}, + + opNode: { + Tag: nodetag.Node, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opNamedNode: { + Tag: nodetag.Node, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opNodeSeq: { + Tag: nodetag.Unknown, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opNamedNodeSeq: { + Tag: nodetag.Unknown, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opOptNode: { + Tag: nodetag.Unknown, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opNamedOptNode: { + Tag: nodetag.Unknown, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opMultiStmt: { + Tag: nodetag.StmtList, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opMultiExpr: { + Tag: nodetag.ExprList, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opEnd: { + Tag: nodetag.Unknown, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opBasicLit: { + Tag: nodetag.BasicLit, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: ifaceValue, + VariadicMap: 0, // 0 + }, + opStrictIntLit: { + Tag: nodetag.BasicLit, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opStrictFloatLit: { + Tag: nodetag.BasicLit, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opStrictCharLit: { + Tag: nodetag.BasicLit, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opStrictStringLit: { + Tag: nodetag.BasicLit, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opStrictComplexLit: { + Tag: nodetag.BasicLit, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opIdent: { + Tag: nodetag.Ident, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opIndexExpr: { + Tag: nodetag.IndexExpr, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opSliceExpr: { + Tag: nodetag.SliceExpr, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opSliceFromExpr: { + Tag: nodetag.SliceExpr, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opSliceToExpr: { + Tag: nodetag.SliceExpr, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opSliceFromToExpr: { + Tag: nodetag.SliceExpr, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opSliceToCapExpr: { + Tag: nodetag.SliceExpr, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opSliceFromToCapExpr: { + Tag: nodetag.SliceExpr, + NumArgs: 4, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opFuncLit: { + Tag: nodetag.FuncLit, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opCompositeLit: { + Tag: nodetag.CompositeLit, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opTypedCompositeLit: { + Tag: nodetag.CompositeLit, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 2, // 10 + }, + opSimpleSelectorExpr: { + Tag: nodetag.SelectorExpr, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opSelectorExpr: { + Tag: nodetag.SelectorExpr, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opTypeAssertExpr: { + Tag: nodetag.TypeAssertExpr, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opTypeSwitchAssertExpr: { + Tag: nodetag.TypeAssertExpr, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opVoidFuncType: { + Tag: nodetag.FuncType, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opFuncType: { + Tag: nodetag.FuncType, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opArrayType: { + Tag: nodetag.ArrayType, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opSliceType: { + Tag: nodetag.ArrayType, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opMapType: { + Tag: nodetag.MapType, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opChanType: { + Tag: nodetag.ChanType, + NumArgs: 1, + ValueKind: chandirValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opKeyValueExpr: { + Tag: nodetag.KeyValueExpr, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opEllipsis: { + Tag: nodetag.Ellipsis, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opTypedEllipsis: { + Tag: nodetag.Ellipsis, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opStarExpr: { + Tag: nodetag.StarExpr, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opUnaryExpr: { + Tag: nodetag.UnaryExpr, + NumArgs: 1, + ValueKind: tokenValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opBinaryExpr: { + Tag: nodetag.BinaryExpr, + NumArgs: 2, + ValueKind: tokenValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opParenExpr: { + Tag: nodetag.ParenExpr, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opVariadicCallExpr: { + Tag: nodetag.CallExpr, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 2, // 10 + }, + opCallExpr: { + Tag: nodetag.CallExpr, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 2, // 10 + }, + opAssignStmt: { + Tag: nodetag.AssignStmt, + NumArgs: 2, + ValueKind: tokenValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opMultiAssignStmt: { + Tag: nodetag.AssignStmt, + NumArgs: 2, + ValueKind: tokenValue, + ExtraValueKind: emptyValue, + VariadicMap: 3, // 11 + }, + opBranchStmt: { + Tag: nodetag.BranchStmt, + NumArgs: 1, + ValueKind: tokenValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opSimpleLabeledBranchStmt: { + Tag: nodetag.BranchStmt, + NumArgs: 1, + ValueKind: tokenValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opLabeledBranchStmt: { + Tag: nodetag.BranchStmt, + NumArgs: 2, + ValueKind: tokenValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opSimpleLabeledStmt: { + Tag: nodetag.LabeledStmt, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opLabeledStmt: { + Tag: nodetag.LabeledStmt, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opBlockStmt: { + Tag: nodetag.BlockStmt, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opExprStmt: { + Tag: nodetag.ExprStmt, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opGoStmt: { + Tag: nodetag.GoStmt, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opDeferStmt: { + Tag: nodetag.DeferStmt, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opSendStmt: { + Tag: nodetag.SendStmt, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opEmptyStmt: { + Tag: nodetag.EmptyStmt, + NumArgs: 0, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opIncDecStmt: { + Tag: nodetag.IncDecStmt, + NumArgs: 1, + ValueKind: tokenValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opReturnStmt: { + Tag: nodetag.ReturnStmt, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opIfStmt: { + Tag: nodetag.IfStmt, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opIfInitStmt: { + Tag: nodetag.IfStmt, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opIfElseStmt: { + Tag: nodetag.IfStmt, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opIfInitElseStmt: { + Tag: nodetag.IfStmt, + NumArgs: 4, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opIfNamedOptStmt: { + Tag: nodetag.IfStmt, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opIfNamedOptElseStmt: { + Tag: nodetag.IfStmt, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opSwitchStmt: { + Tag: nodetag.SwitchStmt, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opSwitchTagStmt: { + Tag: nodetag.SwitchStmt, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 2, // 10 + }, + opSwitchInitStmt: { + Tag: nodetag.SwitchStmt, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 2, // 10 + }, + opSwitchInitTagStmt: { + Tag: nodetag.SwitchStmt, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 4, // 100 + }, + opSelectStmt: { + Tag: nodetag.SelectStmt, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opTypeSwitchStmt: { + Tag: nodetag.TypeSwitchStmt, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opTypeSwitchInitStmt: { + Tag: nodetag.TypeSwitchStmt, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opCaseClause: { + Tag: nodetag.CaseClause, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 3, // 11 + }, + opDefaultCaseClause: { + Tag: nodetag.CaseClause, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opCommClause: { + Tag: nodetag.CommClause, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 2, // 10 + }, + opDefaultCommClause: { + Tag: nodetag.CommClause, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opForStmt: { + Tag: nodetag.ForStmt, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opForPostStmt: { + Tag: nodetag.ForStmt, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opForCondStmt: { + Tag: nodetag.ForStmt, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opForCondPostStmt: { + Tag: nodetag.ForStmt, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opForInitStmt: { + Tag: nodetag.ForStmt, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opForInitPostStmt: { + Tag: nodetag.ForStmt, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opForInitCondStmt: { + Tag: nodetag.ForStmt, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opForInitCondPostStmt: { + Tag: nodetag.ForStmt, + NumArgs: 4, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opRangeStmt: { + Tag: nodetag.RangeStmt, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opRangeKeyStmt: { + Tag: nodetag.RangeStmt, + NumArgs: 3, + ValueKind: tokenValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opRangeKeyValueStmt: { + Tag: nodetag.RangeStmt, + NumArgs: 4, + ValueKind: tokenValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opFieldList: { + Tag: nodetag.Unknown, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opUnnamedField: { + Tag: nodetag.Unknown, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opSimpleField: { + Tag: nodetag.Unknown, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: stringValue, + VariadicMap: 0, // 0 + }, + opField: { + Tag: nodetag.Unknown, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opMultiField: { + Tag: nodetag.Unknown, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opValueInitSpec: { + Tag: nodetag.ValueSpec, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 3, // 11 + }, + opTypedValueInitSpec: { + Tag: nodetag.ValueSpec, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 5, // 101 + }, + opTypedValueSpec: { + Tag: nodetag.ValueSpec, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opTypeSpec: { + Tag: nodetag.TypeSpec, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opTypeAliasSpec: { + Tag: nodetag.TypeSpec, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opFuncDecl: { + Tag: nodetag.FuncDecl, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opMethodDecl: { + Tag: nodetag.FuncDecl, + NumArgs: 4, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opFuncProtoDecl: { + Tag: nodetag.FuncDecl, + NumArgs: 2, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opMethodProtoDecl: { + Tag: nodetag.FuncDecl, + NumArgs: 3, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, + opConstDecl: { + Tag: nodetag.GenDecl, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opVarDecl: { + Tag: nodetag.GenDecl, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opTypeDecl: { + Tag: nodetag.GenDecl, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 1, // 1 + }, + opEmptyPackage: { + Tag: nodetag.File, + NumArgs: 1, + ValueKind: emptyValue, + ExtraValueKind: emptyValue, + VariadicMap: 0, // 0 + }, +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/parse.go b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/parse.go new file mode 100644 index 000000000..e26a07212 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/parse.go @@ -0,0 +1,360 @@ +// Copyright (c) 2017, Daniel Martí +// See LICENSE for licensing information + +package gogrep + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "strings" + "text/template" +) + +func transformSource(expr string) (string, []posOffset, error) { + toks, err := tokenize([]byte(expr)) + if err != nil { + return "", nil, fmt.Errorf("cannot tokenize expr: %v", err) + } + var offs []posOffset + lbuf := lineColBuffer{line: 1, col: 1} + lastLit := false + for _, t := range toks { + if lbuf.offs >= t.pos.Offset && lastLit && t.lit != "" { + _, _ = lbuf.WriteString(" ") + } + for lbuf.offs < t.pos.Offset { + _, _ = lbuf.WriteString(" ") + } + if t.lit == "" { + _, _ = lbuf.WriteString(t.tok.String()) + lastLit = false + continue + } + _, _ = lbuf.WriteString(t.lit) + lastLit = strings.TrimSpace(t.lit) != "" + } + // trailing newlines can cause issues with commas + return strings.TrimSpace(lbuf.String()), offs, nil +} + +func parseExpr(fset *token.FileSet, expr string) (ast.Node, error) { + exprStr, offs, err := transformSource(expr) + if err != nil { + return nil, err + } + node, _, err := parseDetectingNode(fset, exprStr) + if err != nil { + err = subPosOffsets(err, offs...) + return nil, fmt.Errorf("cannot parse expr: %v", err) + } + return node, nil +} + +type lineColBuffer struct { + bytes.Buffer + line, col, offs int +} + +func (l *lineColBuffer) WriteString(s string) (n int, err error) { + for _, r := range s { + if r == '\n' { + l.line++ + l.col = 1 + } else { + l.col++ + } + l.offs++ + } + return l.Buffer.WriteString(s) +} + +var tmplDecl = template.Must(template.New("").Parse(`` + + `package p; {{ . }}`)) + +var tmplBlock = template.Must(template.New("").Parse(`` + + `package p; func _() { if true {{ . }} else {} }`)) + +var tmplExprs = template.Must(template.New("").Parse(`` + + `package p; var _ = []interface{}{ {{ . }}, }`)) + +var tmplStmts = template.Must(template.New("").Parse(`` + + `package p; func _() { {{ . }} }`)) + +var tmplType = template.Must(template.New("").Parse(`` + + `package p; var _ {{ . }}`)) + +var tmplValSpec = template.Must(template.New("").Parse(`` + + `package p; var {{ . }}`)) + +func execTmpl(tmpl *template.Template, src string) string { + var buf bytes.Buffer + if err := tmpl.Execute(&buf, src); err != nil { + panic(err) + } + return buf.String() +} + +func noBadNodes(node ast.Node) bool { + any := false + ast.Inspect(node, func(n ast.Node) bool { + if any { + return false + } + switch n.(type) { + case *ast.BadExpr, *ast.BadDecl: + any = true + } + return true + }) + return !any +} + +func parseType(fset *token.FileSet, src string) (ast.Expr, *ast.File, error) { + asType := execTmpl(tmplType, src) + f, err := parser.ParseFile(fset, "", asType, 0) + if err != nil { + err = subPosOffsets(err, posOffset{1, 1, 17}) + return nil, nil, err + } + vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec) + return vs.Type, f, nil +} + +// parseDetectingNode tries its best to parse the ast.Node contained in src, as +// one of: *ast.File, ast.Decl, ast.Expr, ast.Stmt, *ast.ValueSpec. +// It also returns the *ast.File used for the parsing, so that the returned node +// can be easily type-checked. +func parseDetectingNode(fset *token.FileSet, src string) (ast.Node, *ast.File, error) { + file := fset.AddFile("", fset.Base(), len(src)) + scan := scanner.Scanner{} + scan.Init(file, []byte(src), nil, 0) + if _, tok, _ := scan.Scan(); tok == token.EOF { + return nil, nil, fmt.Errorf("empty source code") + } + var mainErr error + + // first try as a whole file + if f, err := parser.ParseFile(fset, "", src, 0); err == nil && noBadNodes(f) { + return f, f, nil + } + + // then as a single declaration, or many + asDecl := execTmpl(tmplDecl, src) + if f, err := parser.ParseFile(fset, "", asDecl, 0); err == nil && noBadNodes(f) { + if len(f.Decls) == 1 { + return f.Decls[0], f, nil + } + return f, f, nil + } + + // then as a block; otherwise blocks might be mistaken for composite + // literals further below + asBlock := execTmpl(tmplBlock, src) + if f, err := parser.ParseFile(fset, "", asBlock, 0); err == nil && noBadNodes(f) { + bl := f.Decls[0].(*ast.FuncDecl).Body + if len(bl.List) == 1 { + ifs := bl.List[0].(*ast.IfStmt) + return ifs.Body, f, nil + } + } + + // then as value expressions + asExprs := execTmpl(tmplExprs, src) + if f, err := parser.ParseFile(fset, "", asExprs, 0); err == nil && noBadNodes(f) { + vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec) + cl := vs.Values[0].(*ast.CompositeLit) + if len(cl.Elts) == 1 { + return cl.Elts[0], f, nil + } + return exprSlice(cl.Elts), f, nil + } + + // then try as statements + asStmts := execTmpl(tmplStmts, src) + f, err := parser.ParseFile(fset, "", asStmts, 0) + if err == nil && noBadNodes(f) { + bl := f.Decls[0].(*ast.FuncDecl).Body + if len(bl.List) == 1 { + return bl.List[0], f, nil + } + return stmtSlice(bl.List), f, nil + } + // Statements is what covers most cases, so it will give + // the best overall error message. Show positions + // relative to where the user's code is put in the + // template. + mainErr = subPosOffsets(err, posOffset{1, 1, 22}) + + // type expressions not yet picked up, for e.g. chans and interfaces + if typ, f, err := parseType(fset, src); err == nil && noBadNodes(f) { + return typ, f, nil + } + + // value specs + asValSpec := execTmpl(tmplValSpec, src) + if f, err := parser.ParseFile(fset, "", asValSpec, 0); err == nil && noBadNodes(f) { + vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec) + return vs, f, nil + } + return nil, nil, mainErr +} + +type posOffset struct { + atLine, atCol int + offset int +} + +func subPosOffsets(err error, offs ...posOffset) error { + list, ok := err.(scanner.ErrorList) + if !ok { + return err + } + for i, err := range list { + for _, off := range offs { + if err.Pos.Line != off.atLine { + continue + } + if err.Pos.Column < off.atCol { + continue + } + err.Pos.Column -= off.offset + } + list[i] = err + } + return list +} + +type fullToken struct { + pos token.Position + tok token.Token + lit string +} + +type caseStatus uint + +const ( + caseNone caseStatus = iota + caseNeedBlock + caseHere +) + +func tokenize(src []byte) ([]fullToken, error) { + var s scanner.Scanner + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + + var err error + onError := func(pos token.Position, msg string) { + switch msg { // allow certain extra chars + case `illegal character U+0024 '$'`: + case `illegal character U+007E '~'`: + default: + err = fmt.Errorf("%v: %s", pos, msg) + } + } + + // we will modify the input source under the scanner's nose to + // enable some features such as regexes. + s.Init(file, src, onError, scanner.ScanComments) + + next := func() fullToken { + pos, tok, lit := s.Scan() + return fullToken{fset.Position(pos), tok, lit} + } + + caseStat := caseNone + + var toks []fullToken + for t := next(); t.tok != token.EOF; t = next() { + switch t.lit { + case "$": // continues below + case "switch", "select", "case": + if t.lit == "case" { + caseStat = caseNone + } else { + caseStat = caseNeedBlock + } + fallthrough + default: // regular Go code + if t.tok == token.LBRACE && caseStat == caseNeedBlock { + caseStat = caseHere + } + toks = append(toks, t) + continue + } + wt, err := tokenizeWildcard(t.pos, next) + if err != nil { + return nil, err + } + if caseStat == caseHere { + toks = append(toks, fullToken{wt.pos, token.IDENT, "case"}) + } + toks = append(toks, wt) + if caseStat == caseHere { + toks = append(toks, fullToken{wt.pos, token.COLON, ""}) + toks = append(toks, fullToken{wt.pos, token.IDENT, "gogrep_body"}) + } + } + return toks, err +} + +type varInfo struct { + Name string + Seq bool +} + +func tokenizeWildcard(pos token.Position, next func() fullToken) (fullToken, error) { + t := next() + any := false + if t.tok == token.MUL { + t = next() + any = true + } + wildName := encodeWildName(t.lit, any) + wt := fullToken{pos, token.IDENT, wildName} + if t.tok != token.IDENT { + return wt, fmt.Errorf("%v: $ must be followed by ident, got %v", + t.pos, t.tok) + } + return wt, nil +} + +const wildSeparator = "ᐸᐳ" + +func isWildName(s string) bool { + return strings.HasPrefix(s, wildSeparator) +} + +func encodeWildName(name string, any bool) string { + suffix := "v" + if any { + suffix = "a" + } + return wildSeparator + name + wildSeparator + suffix +} + +func decodeWildName(s string) varInfo { + s = s[len(wildSeparator):] + nameEnd := strings.Index(s, wildSeparator) + name := s[:nameEnd] + s = s[nameEnd:] + s = s[len(wildSeparator):] + kind := s + return varInfo{Name: name, Seq: kind == "a"} +} + +func decodeWildNode(n ast.Node) varInfo { + switch n := n.(type) { + case *ast.ExprStmt: + return decodeWildNode(n.X) + case *ast.Ident: + if isWildName(n.Name) { + return decodeWildName(n.Name) + } + } + return varInfo{} +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/slices.go b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/slices.go new file mode 100644 index 000000000..a9f6c0ae0 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/gogrep/slices.go @@ -0,0 +1,51 @@ +package gogrep + +import ( + "go/ast" + "go/token" +) + +type nodeSlice interface { + at(i int) ast.Node + len() int + slice(from, to int) nodeSlice + ast.Node +} + +type ( + exprSlice []ast.Expr + stmtSlice []ast.Stmt + fieldSlice []*ast.Field + identSlice []*ast.Ident + specSlice []ast.Spec +) + +func (l exprSlice) len() int { return len(l) } +func (l exprSlice) at(i int) ast.Node { return l[i] } +func (l exprSlice) slice(i, j int) nodeSlice { return l[i:j] } +func (l exprSlice) Pos() token.Pos { return l[0].Pos() } +func (l exprSlice) End() token.Pos { return l[len(l)-1].End() } + +func (l stmtSlice) len() int { return len(l) } +func (l stmtSlice) at(i int) ast.Node { return l[i] } +func (l stmtSlice) slice(i, j int) nodeSlice { return l[i:j] } +func (l stmtSlice) Pos() token.Pos { return l[0].Pos() } +func (l stmtSlice) End() token.Pos { return l[len(l)-1].End() } + +func (l fieldSlice) len() int { return len(l) } +func (l fieldSlice) at(i int) ast.Node { return l[i] } +func (l fieldSlice) slice(i, j int) nodeSlice { return l[i:j] } +func (l fieldSlice) Pos() token.Pos { return l[0].Pos() } +func (l fieldSlice) End() token.Pos { return l[len(l)-1].End() } + +func (l identSlice) len() int { return len(l) } +func (l identSlice) at(i int) ast.Node { return l[i] } +func (l identSlice) slice(i, j int) nodeSlice { return l[i:j] } +func (l identSlice) Pos() token.Pos { return l[0].Pos() } +func (l identSlice) End() token.Pos { return l[len(l)-1].End() } + +func (l specSlice) len() int { return len(l) } +func (l specSlice) at(i int) ast.Node { return l[i] } +func (l specSlice) slice(i, j int) nodeSlice { return l[i:j] } +func (l specSlice) Pos() token.Pos { return l[0].Pos() } +func (l specSlice) End() token.Pos { return l[len(l)-1].End() } diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/golist/golist.go b/vendor/github.com/quasilyte/go-ruleguard/internal/golist/golist.go new file mode 100644 index 000000000..50f9cca0b --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/golist/golist.go @@ -0,0 +1,30 @@ +package golist + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os/exec" +) + +// Package is `go list --json` output structure. +type Package struct { + Dir string // directory containing package sources + ImportPath string // import path of package in dir + GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) +} + +// JSON runs `go list --json` for the specified pkgName and returns the parsed JSON. +func JSON(pkgPath string) (*Package, error) { + out, err := exec.Command("go", "list", "--json", pkgPath).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("go list error (%v): %s", err, out) + } + + var pkg Package + if err := json.NewDecoder(bytes.NewReader(out)).Decode(&pkg); err != io.EOF && err != nil { + return nil, err + } + return &pkg, nil +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/xtypes/xtypes.go b/vendor/github.com/quasilyte/go-ruleguard/internal/xtypes/xtypes.go new file mode 100644 index 000000000..028a5f141 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/xtypes/xtypes.go @@ -0,0 +1,256 @@ +package xtypes + +import ( + "go/types" +) + +// Implements reports whether type v implements iface. +// +// Unlike types.Implements(), it permits X and Y named types +// to be considered identical even if their addresses are different. +func Implements(v types.Type, iface *types.Interface) bool { + if iface.Empty() { + return true + } + + if v, _ := v.Underlying().(*types.Interface); v != nil { + for i := 0; i < iface.NumMethods(); i++ { + m := iface.Method(i) + obj, _, _ := types.LookupFieldOrMethod(v, false, m.Pkg(), m.Name()) + switch { + case obj == nil: + return false + case !Identical(obj.Type(), m.Type()): + return false + } + } + return true + } + + // A concrete type v implements iface if it implements all methods of iface. + for i := 0; i < iface.NumMethods(); i++ { + m := iface.Method(i) + + obj, _, _ := types.LookupFieldOrMethod(v, false, m.Pkg(), m.Name()) + if obj == nil { + return false + } + + f, ok := obj.(*types.Func) + if !ok { + return false + } + + if !Identical(f.Type(), m.Type()) { + return false + } + } + + return true +} + +// Identical reports whether x and y are identical types. +// +// Unlike types.Identical(), it permits X and Y named types +// to be considered identical even if their addresses are different. +func Identical(x, y types.Type) bool { + return typeIdentical(x, y, nil) +} + +func typeIdentical(x, y types.Type, p *ifacePair) bool { + if x == y { + return true + } + + switch x := x.(type) { + case nil: + return false + + case *types.Basic: + // Basic types are singletons except for the rune and byte + // aliases, thus we cannot solely rely on the x == y check + // above. See also comment in TypeName.IsAlias. + if y, ok := y.(*types.Basic); ok { + return x.Kind() == y.Kind() + } + + case *types.Array: + // Two array types are identical if they have identical element types + // and the same array length. + if y, ok := y.(*types.Array); ok { + // If one or both array lengths are unknown (< 0) due to some error, + // assume they are the same to avoid spurious follow-on errors. + return (x.Len() < 0 || y.Len() < 0 || x.Len() == y.Len()) && typeIdentical(x.Elem(), y.Elem(), p) + } + + case *types.Slice: + // Two slice types are identical if they have identical element types. + if y, ok := y.(*types.Slice); ok { + return typeIdentical(x.Elem(), y.Elem(), p) + } + + case *types.Struct: + // Two struct types are identical if they have the same sequence of fields, + // and if corresponding fields have the same names, and identical types, + // and identical tags. Two embedded fields are considered to have the same + // name. Lower-case field names from different packages are always different. + if y, ok := y.(*types.Struct); ok { + if x.NumFields() == y.NumFields() { + for i := 0; i < x.NumFields(); i++ { + f := x.Field(i) + g := y.Field(i) + if f.Embedded() != g.Embedded() || !sameID(f, g.Pkg(), g.Name()) || !typeIdentical(f.Type(), g.Type(), p) { + return false + } + } + return true + } + } + + case *types.Pointer: + // Two pointer types are identical if they have identical base types. + if y, ok := y.(*types.Pointer); ok { + return typeIdentical(x.Elem(), y.Elem(), p) + } + + case *types.Tuple: + // Two tuples types are identical if they have the same number of elements + // and corresponding elements have identical types. + if y, ok := y.(*types.Tuple); ok { + if x.Len() == y.Len() { + if x != nil { + for i := 0; i < x.Len(); i++ { + v := x.At(i) + w := y.At(i) + if !typeIdentical(v.Type(), w.Type(), p) { + return false + } + } + } + return true + } + } + + case *types.Signature: + // Two function types are identical if they have the same number of parameters + // and result values, corresponding parameter and result types are identical, + // and either both functions are variadic or neither is. Parameter and result + // names are not required to match. + if y, ok := y.(*types.Signature); ok { + return x.Variadic() == y.Variadic() && + typeIdentical(x.Params(), y.Params(), p) && + typeIdentical(x.Results(), y.Results(), p) + } + + case *types.Interface: + // Two interface types are identical if they have the same set of methods with + // the same names and identical function types. Lower-case method names from + // different packages are always different. The order of the methods is irrelevant. + if y, ok := y.(*types.Interface); ok { + if x.NumMethods() != y.NumMethods() { + return false + } + // Interface types are the only types where cycles can occur + // that are not "terminated" via named types; and such cycles + // can only be created via method parameter types that are + // anonymous interfaces (directly or indirectly) embedding + // the current interface. Example: + // + // type T interface { + // m() interface{T} + // } + // + // If two such (differently named) interfaces are compared, + // endless recursion occurs if the cycle is not detected. + // + // If x and y were compared before, they must be equal + // (if they were not, the recursion would have stopped); + // search the ifacePair stack for the same pair. + // + // This is a quadratic algorithm, but in practice these stacks + // are extremely short (bounded by the nesting depth of interface + // type declarations that recur via parameter types, an extremely + // rare occurrence). An alternative implementation might use a + // "visited" map, but that is probably less efficient overall. + q := &ifacePair{x, y, p} + for p != nil { + if p.identical(q) { + return true // same pair was compared before + } + p = p.prev + } + for i := 0; i < x.NumMethods(); i++ { + f := x.Method(i) + g := y.Method(i) + if f.Id() != g.Id() || !typeIdentical(f.Type(), g.Type(), q) { + return false + } + } + return true + } + + case *types.Map: + // Two map types are identical if they have identical key and value types. + if y, ok := y.(*types.Map); ok { + return typeIdentical(x.Key(), y.Key(), p) && typeIdentical(x.Elem(), y.Elem(), p) + } + + case *types.Chan: + // Two channel types are identical if they have identical value types + // and the same direction. + if y, ok := y.(*types.Chan); ok { + return x.Dir() == y.Dir() && typeIdentical(x.Elem(), y.Elem(), p) + } + + case *types.Named: + // Two named types are identical if their type names originate + // in the same type declaration. + y, ok := y.(*types.Named) + if !ok { + return false + } + if x.Obj() == y.Obj() { + return true + } + return sameID(x.Obj(), y.Obj().Pkg(), y.Obj().Name()) + + default: + panic("unreachable") + } + + return false +} + +// An ifacePair is a node in a stack of interface type pairs compared for identity. +type ifacePair struct { + x *types.Interface + y *types.Interface + prev *ifacePair +} + +func (p *ifacePair) identical(q *ifacePair) bool { + return (p.x == q.x && p.y == q.y) || + (p.x == q.y && p.y == q.x) +} + +func sameID(obj types.Object, pkg *types.Package, name string) bool { + // spec: + // "Two identifiers are different if they are spelled differently, + // or if they appear in different packages and are not exported. + // Otherwise, they are the same." + if name != obj.Name() { + return false + } + // obj.Name == name + if obj.Exported() { + return true + } + // not exported, so packages must be the same (pkg == nil for + // fields in Universe scope; this can only happen for types + // introduced via Eval) + if pkg == nil || obj.Pkg() == nil { + return pkg == obj.Pkg() + } + // pkg != nil && obj.pkg != nil + return pkg.Path() == obj.Pkg().Path() +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/nodetag/nodetag.go b/vendor/github.com/quasilyte/go-ruleguard/nodetag/nodetag.go new file mode 100644 index 000000000..a9098c29f --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/nodetag/nodetag.go @@ -0,0 +1,277 @@ +package nodetag + +import "go/ast" + +type Value int + +const ( + Unknown Value = iota + + ArrayType + AssignStmt + BasicLit + BinaryExpr + BlockStmt + BranchStmt + CallExpr + CaseClause + ChanType + CommClause + CompositeLit + DeclStmt + DeferStmt + Ellipsis + EmptyStmt + ExprStmt + File + ForStmt + FuncDecl + FuncLit + FuncType + GenDecl + GoStmt + Ident + IfStmt + ImportSpec + IncDecStmt + IndexExpr + InterfaceType + KeyValueExpr + LabeledStmt + MapType + ParenExpr + RangeStmt + ReturnStmt + SelectStmt + SelectorExpr + SendStmt + SliceExpr + StarExpr + StructType + SwitchStmt + TypeAssertExpr + TypeSpec + TypeSwitchStmt + UnaryExpr + ValueSpec + + NumBuckets + + StmtList // gogrep stmt list + ExprList // gogrep expr list + + Node // ast.Node + Expr // ast.Expr + Stmt // ast.Stmt +) + +func FromNode(n ast.Node) Value { + switch n.(type) { + case *ast.ArrayType: + return ArrayType + case *ast.AssignStmt: + return AssignStmt + case *ast.BasicLit: + return BasicLit + case *ast.BinaryExpr: + return BinaryExpr + case *ast.BlockStmt: + return BlockStmt + case *ast.BranchStmt: + return BranchStmt + case *ast.CallExpr: + return CallExpr + case *ast.CaseClause: + return CaseClause + case *ast.ChanType: + return ChanType + case *ast.CommClause: + return CommClause + case *ast.CompositeLit: + return CompositeLit + case *ast.DeclStmt: + return DeclStmt + case *ast.DeferStmt: + return DeferStmt + case *ast.Ellipsis: + return Ellipsis + case *ast.EmptyStmt: + return EmptyStmt + case *ast.ExprStmt: + return ExprStmt + case *ast.File: + return File + case *ast.ForStmt: + return ForStmt + case *ast.FuncDecl: + return FuncDecl + case *ast.FuncLit: + return FuncLit + case *ast.FuncType: + return FuncType + case *ast.GenDecl: + return GenDecl + case *ast.GoStmt: + return GoStmt + case *ast.Ident: + return Ident + case *ast.IfStmt: + return IfStmt + case *ast.ImportSpec: + return ImportSpec + case *ast.IncDecStmt: + return IncDecStmt + case *ast.IndexExpr: + return IndexExpr + case *ast.InterfaceType: + return InterfaceType + case *ast.KeyValueExpr: + return KeyValueExpr + case *ast.LabeledStmt: + return LabeledStmt + case *ast.MapType: + return MapType + case *ast.ParenExpr: + return ParenExpr + case *ast.RangeStmt: + return RangeStmt + case *ast.ReturnStmt: + return ReturnStmt + case *ast.SelectStmt: + return SelectStmt + case *ast.SelectorExpr: + return SelectorExpr + case *ast.SendStmt: + return SendStmt + case *ast.SliceExpr: + return SliceExpr + case *ast.StarExpr: + return StarExpr + case *ast.StructType: + return StructType + case *ast.SwitchStmt: + return SwitchStmt + case *ast.TypeAssertExpr: + return TypeAssertExpr + case *ast.TypeSpec: + return TypeSpec + case *ast.TypeSwitchStmt: + return TypeSwitchStmt + case *ast.UnaryExpr: + return UnaryExpr + case *ast.ValueSpec: + return ValueSpec + default: + return Unknown + } +} + +func FromString(s string) Value { + switch s { + case "Expr": + return Expr + case "Stmt": + return Stmt + case "Node": + return Node + } + + switch s { + case "ArrayType": + return ArrayType + case "AssignStmt": + return AssignStmt + case "BasicLit": + return BasicLit + case "BinaryExpr": + return BinaryExpr + case "BlockStmt": + return BlockStmt + case "BranchStmt": + return BranchStmt + case "CallExpr": + return CallExpr + case "CaseClause": + return CaseClause + case "ChanType": + return ChanType + case "CommClause": + return CommClause + case "CompositeLit": + return CompositeLit + case "DeclStmt": + return DeclStmt + case "DeferStmt": + return DeferStmt + case "Ellipsis": + return Ellipsis + case "EmptyStmt": + return EmptyStmt + case "ExprStmt": + return ExprStmt + case "File": + return File + case "ForStmt": + return ForStmt + case "FuncDecl": + return FuncDecl + case "FuncLit": + return FuncLit + case "FuncType": + return FuncType + case "GenDecl": + return GenDecl + case "GoStmt": + return GoStmt + case "Ident": + return Ident + case "IfStmt": + return IfStmt + case "ImportSpec": + return ImportSpec + case "IncDecStmt": + return IncDecStmt + case "IndexExpr": + return IndexExpr + case "InterfaceType": + return InterfaceType + case "KeyValueExpr": + return KeyValueExpr + case "LabeledStmt": + return LabeledStmt + case "MapType": + return MapType + case "ParenExpr": + return ParenExpr + case "RangeStmt": + return RangeStmt + case "ReturnStmt": + return ReturnStmt + case "SelectStmt": + return SelectStmt + case "SelectorExpr": + return SelectorExpr + case "SendStmt": + return SendStmt + case "SliceExpr": + return SliceExpr + case "StarExpr": + return StarExpr + case "StructType": + return StructType + case "SwitchStmt": + return SwitchStmt + case "TypeAssertExpr": + return TypeAssertExpr + case "TypeSpec": + return TypeSpec + case "TypeSwitchStmt": + return TypeSwitchStmt + case "UnaryExpr": + return UnaryExpr + case "ValueSpec": + return ValueSpec + default: + return Unknown + } +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bundle.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bundle.go new file mode 100644 index 000000000..950e3c410 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bundle.go @@ -0,0 +1,19 @@ +package ruleguard + +import ( + "path/filepath" + + "github.com/quasilyte/go-ruleguard/internal/golist" +) + +func findBundleFiles(pkgPath string) ([]string, error) { + pkg, err := golist.JSON(pkgPath) + if err != nil { + return nil, err + } + files := make([]string, 0, len(pkg.GoFiles)) + for _, f := range pkg.GoFiles { + files = append(files, filepath.Join(pkg.Dir, f)) + } + return files, nil +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go new file mode 100644 index 000000000..66a4fd58a --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go @@ -0,0 +1,171 @@ +package ruleguard + +import ( + "errors" + "fmt" + "go/ast" + "go/types" + "io" + "strings" + "sync" + + "github.com/quasilyte/go-ruleguard/ruleguard/quasigo" + "github.com/quasilyte/go-ruleguard/ruleguard/typematch" +) + +type engine struct { + state *engineState + + ruleSet *goRuleSet +} + +func newEngine() *engine { + return &engine{ + state: newEngineState(), + } +} + +func (e *engine) Load(ctx *ParseContext, filename string, r io.Reader) error { + config := rulesParserConfig{ + state: e.state, + ctx: ctx, + importer: newGoImporter(e.state, goImporterConfig{ + fset: ctx.Fset, + debugImports: ctx.DebugImports, + debugPrint: ctx.DebugPrint, + }), + itab: typematch.NewImportsTab(stdlibPackages), + } + p := newRulesParser(config) + rset, err := p.ParseFile(filename, r) + if err != nil { + return err + } + + if e.ruleSet == nil { + e.ruleSet = rset + } else { + combinedRuleSet, err := mergeRuleSets([]*goRuleSet{e.ruleSet, rset}) + if err != nil { + return err + } + e.ruleSet = combinedRuleSet + } + + return nil +} + +func (e *engine) Run(ctx *RunContext, f *ast.File) error { + if e.ruleSet == nil { + return errors.New("used Run() with an empty rule set; forgot to call Load() first?") + } + rset := cloneRuleSet(e.ruleSet) + return newRulesRunner(ctx, e.state, rset).run(f) +} + +// engineState is a shared state inside the engine. +type engineState struct { + env *quasigo.Env + + typeByFQNMu sync.RWMutex + typeByFQN map[string]types.Type + + pkgCacheMu sync.RWMutex + // pkgCache contains all imported packages, from any importer. + pkgCache map[string]*types.Package +} + +func newEngineState() *engineState { + env := quasigo.NewEnv() + state := &engineState{ + env: env, + pkgCache: make(map[string]*types.Package), + typeByFQN: map[string]types.Type{}, + } + for key, typ := range typeByName { + state.typeByFQN[key] = typ + } + initEnv(state, env) + return state +} + +func (state *engineState) GetCachedPackage(pkgPath string) *types.Package { + state.pkgCacheMu.RLock() + pkg := state.pkgCache[pkgPath] + state.pkgCacheMu.RUnlock() + return pkg +} + +func (state *engineState) AddCachedPackage(pkgPath string, pkg *types.Package) { + state.pkgCacheMu.Lock() + state.addCachedPackage(pkgPath, pkg) + state.pkgCacheMu.Unlock() +} + +func (state *engineState) addCachedPackage(pkgPath string, pkg *types.Package) { + state.pkgCache[pkgPath] = pkg + + // Also add all complete packages that are dependencies of the pkg. + // This way we cache more and avoid duplicated package loading + // which can lead to typechecking issues. + // + // Note that it does not increase our memory consumption + // as these packages are reachable via pkg, so they'll + // not be freed by GC anyway. + for _, imported := range pkg.Imports() { + if imported.Complete() { + state.addCachedPackage(imported.Path(), imported) + } + } +} + +func (state *engineState) FindType(importer *goImporter, currentPkg *types.Package, fqn string) (types.Type, error) { + // TODO(quasilyte): we can pre-populate the cache during the Load() phase. + // If we inspect the AST of a user function, all constant FQN can be preloaded. + // It could be a good thing as Load() is not expected to be executed in + // concurrent environment, so write-locking is not a big deal there. + + state.typeByFQNMu.RLock() + cachedType, ok := state.typeByFQN[fqn] + state.typeByFQNMu.RUnlock() + if ok { + return cachedType, nil + } + + // Code below is under a write critical section. + state.typeByFQNMu.Lock() + defer state.typeByFQNMu.Unlock() + + typ, err := state.findTypeNoCache(importer, currentPkg, fqn) + if err != nil { + return nil, err + } + state.typeByFQN[fqn] = typ + return typ, nil +} + +func (state *engineState) findTypeNoCache(importer *goImporter, currentPkg *types.Package, fqn string) (types.Type, error) { + pos := strings.LastIndexByte(fqn, '.') + if pos == -1 { + return nil, fmt.Errorf("%s is not a valid FQN", fqn) + } + pkgPath := fqn[:pos] + objectName := fqn[pos+1:] + var pkg *types.Package + if directDep := findDependency(currentPkg, pkgPath); directDep != nil { + pkg = directDep + } else { + loadedPkg, err := importer.Import(pkgPath) + if err != nil { + return nil, err + } + pkg = loadedPkg + } + obj := pkg.Scope().Lookup(objectName) + if obj == nil { + return nil, fmt.Errorf("%s is not found in %s", objectName, pkgPath) + } + typ := obj.Type() + state.typeByFQN[fqn] = typ + return typ, nil +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go new file mode 100644 index 000000000..4918cbb3c --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go @@ -0,0 +1,267 @@ +package ruleguard + +import ( + "go/ast" + "go/constant" + "go/token" + "go/types" + "path/filepath" + "regexp" + + "github.com/quasilyte/go-ruleguard/internal/xtypes" + "github.com/quasilyte/go-ruleguard/nodetag" + "github.com/quasilyte/go-ruleguard/ruleguard/quasigo" + "github.com/quasilyte/go-ruleguard/ruleguard/typematch" +) + +const filterSuccess = matchFilterResult("") + +func filterFailure(reason string) matchFilterResult { + return matchFilterResult(reason) +} + +func makeNotFilter(src string, x matchFilter) filterFunc { + return func(params *filterParams) matchFilterResult { + if x.fn(params).Matched() { + return matchFilterResult(src) + } + return "" + } +} + +func makeAndFilter(lhs, rhs matchFilter) filterFunc { + return func(params *filterParams) matchFilterResult { + if lhsResult := lhs.fn(params); !lhsResult.Matched() { + return lhsResult + } + return rhs.fn(params) + } +} + +func makeOrFilter(lhs, rhs matchFilter) filterFunc { + return func(params *filterParams) matchFilterResult { + if lhsResult := lhs.fn(params); lhsResult.Matched() { + return filterSuccess + } + return rhs.fn(params) + } +} + +func makeFileImportsFilter(src, pkgPath string) filterFunc { + return func(params *filterParams) matchFilterResult { + _, imported := params.imports[pkgPath] + if imported { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeFilePkgPathMatchesFilter(src string, re *regexp.Regexp) filterFunc { + return func(params *filterParams) matchFilterResult { + pkgPath := params.ctx.Pkg.Path() + if re.MatchString(pkgPath) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeFileNameMatchesFilter(src string, re *regexp.Regexp) filterFunc { + return func(params *filterParams) matchFilterResult { + if re.MatchString(filepath.Base(params.filename)) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makePureFilter(src, varname string) filterFunc { + return func(params *filterParams) matchFilterResult { + n := params.subExpr(varname) + if isPure(params.ctx.Types, n) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeConstFilter(src, varname string) filterFunc { + return func(params *filterParams) matchFilterResult { + n := params.subExpr(varname) + if isConstant(params.ctx.Types, n) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeAddressableFilter(src, varname string) filterFunc { + return func(params *filterParams) matchFilterResult { + n := params.subExpr(varname) + if isAddressable(params.ctx.Types, n) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeCustomVarFilter(src, varname string, fn *quasigo.Func) filterFunc { + return func(params *filterParams) matchFilterResult { + // TODO(quasilyte): what if bytecode function panics due to the programming error? + // We should probably catch the panic here, print trace and return "false" + // from the filter (or even propagate that panic to let it crash). + params.varname = varname + result := quasigo.Call(params.env, fn, params) + if result.Value().(bool) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeTypeImplementsFilter(src, varname string, iface *types.Interface) filterFunc { + return func(params *filterParams) matchFilterResult { + typ := params.typeofNode(params.subExpr(varname)) + if xtypes.Implements(typ, iface) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeTypeIsFilter(src, varname string, underlying bool, pat *typematch.Pattern) filterFunc { + if underlying { + return func(params *filterParams) matchFilterResult { + typ := params.typeofNode(params.subExpr(varname)).Underlying() + if pat.MatchIdentical(typ) { + return filterSuccess + } + return filterFailure(src) + } + } + return func(params *filterParams) matchFilterResult { + typ := params.typeofNode(params.subExpr(varname)) + if pat.MatchIdentical(typ) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeTypeConvertibleToFilter(src, varname string, dstType types.Type) filterFunc { + return func(params *filterParams) matchFilterResult { + typ := params.typeofNode(params.subExpr(varname)) + if types.ConvertibleTo(typ, dstType) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeTypeAssignableToFilter(src, varname string, dstType types.Type) filterFunc { + return func(params *filterParams) matchFilterResult { + typ := params.typeofNode(params.subExpr(varname)) + if types.AssignableTo(typ, dstType) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeTypeSizeConstFilter(src, varname string, op token.Token, rhsValue constant.Value) filterFunc { + return func(params *filterParams) matchFilterResult { + typ := params.typeofNode(params.subExpr(varname)) + lhsValue := constant.MakeInt64(params.ctx.Sizes.Sizeof(typ)) + if constant.Compare(lhsValue, op, rhsValue) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeValueIntConstFilter(src, varname string, op token.Token, rhsValue constant.Value) filterFunc { + return func(params *filterParams) matchFilterResult { + lhsValue := intValueOf(params.ctx.Types, params.subExpr(varname)) + if lhsValue == nil { + return filterFailure(src) // The value is unknown + } + if constant.Compare(lhsValue, op, rhsValue) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeValueIntFilter(src, varname string, op token.Token, rhsVarname string) filterFunc { + return func(params *filterParams) matchFilterResult { + lhsValue := intValueOf(params.ctx.Types, params.subExpr(varname)) + if lhsValue == nil { + return filterFailure(src) + } + rhsValue := intValueOf(params.ctx.Types, params.subExpr(rhsVarname)) + if rhsValue == nil { + return filterFailure(src) + } + if constant.Compare(lhsValue, op, rhsValue) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeTextConstFilter(src, varname string, op token.Token, rhsValue constant.Value) filterFunc { + return func(params *filterParams) matchFilterResult { + s := params.nodeText(params.subNode(varname)) + lhsValue := constant.MakeString(string(s)) + if constant.Compare(lhsValue, op, rhsValue) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeTextFilter(src, varname string, op token.Token, rhsVarname string) filterFunc { + return func(params *filterParams) matchFilterResult { + s1 := params.nodeText(params.subNode(varname)) + lhsValue := constant.MakeString(string(s1)) + n, _ := params.match.CapturedByName(rhsVarname) + s2 := params.nodeText(n) + rhsValue := constant.MakeString(string(s2)) + if constant.Compare(lhsValue, op, rhsValue) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeTextMatchesFilter(src, varname string, re *regexp.Regexp) filterFunc { + return func(params *filterParams) matchFilterResult { + if re.Match(params.nodeText(params.subNode(varname))) { + return filterSuccess + } + return filterFailure(src) + } +} + +func makeNodeIsFilter(src, varname string, tag nodetag.Value) filterFunc { + // TODO: add comment nodes support? + return func(params *filterParams) matchFilterResult { + n := params.subExpr(varname) + var matched bool + switch tag { + case nodetag.Expr: + _, matched = n.(ast.Expr) + case nodetag.Stmt: + _, matched = n.(ast.Stmt) + case nodetag.Node: + _, matched = n.(ast.Node) + default: + matched = (tag == nodetag.FromNode(n)) + } + if matched { + return filterSuccess + } + return filterFailure(src) + } +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go new file mode 100644 index 000000000..08aee9132 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go @@ -0,0 +1,146 @@ +package ruleguard + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "regexp" + + "github.com/quasilyte/go-ruleguard/internal/gogrep" + "github.com/quasilyte/go-ruleguard/nodetag" + "github.com/quasilyte/go-ruleguard/ruleguard/quasigo" +) + +type goRuleSet struct { + universal *scopedGoRuleSet + + groups map[string]token.Position // To handle redefinitions +} + +type scopedGoRuleSet struct { + categorizedNum int + rulesByTag [nodetag.NumBuckets][]goRule + commentRules []goCommentRule +} + +type goCommentRule struct { + base goRule + pat *regexp.Regexp + captureGroups bool +} + +type goRule struct { + group string + filename string + line int + pat *gogrep.Pattern + msg string + location string + suggestion string + filter matchFilter +} + +type matchFilterResult string + +func (s matchFilterResult) Matched() bool { return s == "" } + +func (s matchFilterResult) RejectReason() string { return string(s) } + +type filterFunc func(*filterParams) matchFilterResult + +type matchFilter struct { + src string + fn func(*filterParams) matchFilterResult +} + +type filterParams struct { + ctx *RunContext + filename string + imports map[string]struct{} + env *quasigo.EvalEnv + + importer *goImporter + + match matchData + + nodeText func(n ast.Node) []byte + + // varname is set only for custom filters before bytecode function is called. + varname string +} + +func (params *filterParams) subNode(name string) ast.Node { + n, _ := params.match.CapturedByName(name) + return n +} + +func (params *filterParams) subExpr(name string) ast.Expr { + n, _ := params.match.CapturedByName(name) + switch n := n.(type) { + case ast.Expr: + return n + case *ast.ExprStmt: + return n.X + default: + return nil + } +} + +func (params *filterParams) typeofNode(n ast.Node) types.Type { + if e, ok := n.(ast.Expr); ok { + if typ := params.ctx.Types.TypeOf(e); typ != nil { + return typ + } + } + + return types.Typ[types.Invalid] +} + +func cloneRuleSet(rset *goRuleSet) *goRuleSet { + out, err := mergeRuleSets([]*goRuleSet{rset}) + if err != nil { + panic(err) // Should never happen + } + return out +} + +func mergeRuleSets(toMerge []*goRuleSet) (*goRuleSet, error) { + out := &goRuleSet{ + universal: &scopedGoRuleSet{}, + groups: make(map[string]token.Position), + } + + for _, x := range toMerge { + out.universal = appendScopedRuleSet(out.universal, x.universal) + for group, pos := range x.groups { + if prevPos, ok := out.groups[group]; ok { + newRef := fmt.Sprintf("%s:%d", pos.Filename, pos.Line) + oldRef := fmt.Sprintf("%s:%d", prevPos.Filename, prevPos.Line) + return nil, fmt.Errorf("%s: redefinition of %s(), previously defined at %s", newRef, group, oldRef) + } + out.groups[group] = pos + } + } + + return out, nil +} + +func appendScopedRuleSet(dst, src *scopedGoRuleSet) *scopedGoRuleSet { + for tag, rules := range src.rulesByTag { + dst.rulesByTag[tag] = append(dst.rulesByTag[tag], cloneRuleSlice(rules)...) + dst.categorizedNum += len(rules) + } + dst.commentRules = append(dst.commentRules, src.commentRules...) + return dst +} + +func cloneRuleSlice(slice []goRule) []goRule { + out := make([]goRule, len(slice)) + for i, rule := range slice { + clone := rule + clone.pat = rule.pat.Clone() + out[i] = clone + } + return out +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/goutil/goutil.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/goutil/goutil.go new file mode 100644 index 000000000..6cc4d9056 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/goutil/goutil.go @@ -0,0 +1,21 @@ +package goutil + +import ( + "go/ast" + "go/printer" + "go/token" + "strings" +) + +// SprintNode returns the textual representation of n. +// If fset is nil, freshly created file set will be used. +func SprintNode(fset *token.FileSet, n ast.Node) string { + if fset == nil { + fset = token.NewFileSet() + } + var buf strings.Builder + if err := printer.Fprint(&buf, fset, n); err != nil { + return "" + } + return buf.String() +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/goutil/resolve.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/goutil/resolve.go new file mode 100644 index 000000000..8705707ac --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/goutil/resolve.go @@ -0,0 +1,33 @@ +package goutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/ast/astutil" +) + +func ResolveFunc(info *types.Info, callable ast.Expr) (ast.Expr, *types.Func) { + switch callable := astutil.Unparen(callable).(type) { + case *ast.Ident: + sig, ok := info.ObjectOf(callable).(*types.Func) + if !ok { + return nil, nil + } + return nil, sig + + case *ast.SelectorExpr: + sig, ok := info.ObjectOf(callable.Sel).(*types.Func) + if !ok { + return nil, nil + } + isMethod := sig.Type().(*types.Signature).Recv() != nil + if _, ok := callable.X.(*ast.Ident); ok && !isMethod { + return nil, sig + } + return callable.X, sig + + default: + return nil, nil + } +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/importer.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/importer.go new file mode 100644 index 000000000..06a0bbf9f --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/importer.go @@ -0,0 +1,116 @@ +package ruleguard + +import ( + "fmt" + "go/ast" + "go/importer" + "go/parser" + "go/token" + "go/types" + "path/filepath" + "runtime" + + "github.com/quasilyte/go-ruleguard/internal/golist" +) + +// goImporter is a `types.Importer` that tries to load a package no matter what. +// It iterates through multiple import strategies and accepts whatever succeeds first. +type goImporter struct { + // TODO(quasilyte): share importers with gogrep? + + state *engineState + + defaultImporter types.Importer + srcImporter types.Importer + + fset *token.FileSet + + debugImports bool + debugPrint func(string) +} + +type goImporterConfig struct { + fset *token.FileSet + debugImports bool + debugPrint func(string) +} + +func newGoImporter(state *engineState, config goImporterConfig) *goImporter { + return &goImporter{ + state: state, + fset: config.fset, + debugImports: config.debugImports, + debugPrint: config.debugPrint, + defaultImporter: importer.Default(), + srcImporter: importer.ForCompiler(config.fset, "source", nil), + } +} + +func (imp *goImporter) Import(path string) (*types.Package, error) { + if pkg := imp.state.GetCachedPackage(path); pkg != nil { + if imp.debugImports { + imp.debugPrint(fmt.Sprintf(`imported "%s" from importer cache`, path)) + } + return pkg, nil + } + + pkg, err1 := imp.srcImporter.Import(path) + if err1 == nil { + imp.state.AddCachedPackage(path, pkg) + if imp.debugImports { + imp.debugPrint(fmt.Sprintf(`imported "%s" from source importer`, path)) + } + return pkg, nil + } + + pkg, err2 := imp.defaultImporter.Import(path) + if err2 == nil { + imp.state.AddCachedPackage(path, pkg) + if imp.debugImports { + imp.debugPrint(fmt.Sprintf(`imported "%s" from %s importer`, path, runtime.Compiler)) + } + return pkg, nil + } + + // Fallback to `go list` as a last resort. + pkg, err3 := imp.golistImport(path) + if err3 == nil { + imp.state.AddCachedPackage(path, pkg) + if imp.debugImports { + imp.debugPrint(fmt.Sprintf(`imported "%s" from golist importer`, path)) + } + return pkg, nil + } + + if imp.debugImports { + imp.debugPrint(fmt.Sprintf(`failed to import "%s":`, path)) + imp.debugPrint(fmt.Sprintf(" source importer: %v", err1)) + imp.debugPrint(fmt.Sprintf(" %s importer: %v", runtime.Compiler, err2)) + imp.debugPrint(fmt.Sprintf(" golist importer: %v", err3)) + } + + return nil, err2 +} + +func (imp *goImporter) golistImport(path string) (*types.Package, error) { + golistPkg, err := golist.JSON(path) + if err != nil { + return nil, err + } + + files := make([]*ast.File, 0, len(golistPkg.GoFiles)) + for _, filename := range golistPkg.GoFiles { + fullname := filepath.Join(golistPkg.Dir, filename) + f, err := parser.ParseFile(imp.fset, fullname, nil, 0) + if err != nil { + return nil, err + } + files = append(files, f) + } + + // TODO: do we want to assign imp as importer for this nested typecherker? + // Otherwise it won't be able to resolve imports. + var typecheker types.Config + var info types.Info + return typecheker.Check(path, imp.fset, files, &info) +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/libdsl.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/libdsl.go new file mode 100644 index 000000000..6202b7b8a --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/libdsl.go @@ -0,0 +1,276 @@ +package ruleguard + +import ( + "go/types" + + "github.com/quasilyte/go-ruleguard/internal/xtypes" + "github.com/quasilyte/go-ruleguard/ruleguard/quasigo" +) + +// This file implements `dsl/*` packages as native functions in quasigo. +// +// Every function and method defined in any `dsl/*` package should have +// associated Go function that implements it. +// +// In quasigo, it's impossible to have a pointer to an interface and +// non-pointer struct type. All interface type methods have FQN without `*` prefix +// while all struct type methods always begin with `*`. +// +// Fields are readonly. +// Field access is compiled into a method call that have a name identical to the field. +// For example, `foo.Bar` field access will be compiled as `foo.Bar()`. +// This may change in the future; benchmarks are needed to figure out +// what is more efficient: reflect-based field access or a function call. +// +// To keep this code organized, every type and package functions are represented +// as structs with methods. Then we bind a method value to quasigo symbol. +// The naming scheme is `dsl{$name}Package` for packages and `dsl{$pkg}{$name}` for types. + +func initEnv(state *engineState, env *quasigo.Env) { + nativeTypes := map[string]quasigoNative{ + `*github.com/quasilyte/go-ruleguard/dsl.VarFilterContext`: dslVarFilterContext{state: state}, + `github.com/quasilyte/go-ruleguard/dsl/types.Type`: dslTypesType{}, + `*github.com/quasilyte/go-ruleguard/dsl/types.Interface`: dslTypesInterface{}, + `*github.com/quasilyte/go-ruleguard/dsl/types.Pointer`: dslTypesPointer{}, + `*github.com/quasilyte/go-ruleguard/dsl/types.Array`: dslTypesArray{}, + `*github.com/quasilyte/go-ruleguard/dsl/types.Slice`: dslTypesSlice{}, + } + + for qualifier, typ := range nativeTypes { + for methodName, fn := range typ.funcs() { + env.AddNativeMethod(qualifier, methodName, fn) + } + } + + nativePackages := map[string]quasigoNative{ + `github.com/quasilyte/go-ruleguard/dsl/types`: dslTypesPackage{}, + } + + for qualifier, pkg := range nativePackages { + for funcName, fn := range pkg.funcs() { + env.AddNativeMethod(qualifier, funcName, fn) + } + } +} + +type quasigoNative interface { + funcs() map[string]func(*quasigo.ValueStack) +} + +type dslTypesType struct{} + +func (native dslTypesType) funcs() map[string]func(*quasigo.ValueStack) { + return map[string]func(*quasigo.ValueStack){ + "Underlying": native.Underlying, + "String": native.String, + } +} + +func (dslTypesType) Underlying(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(types.Type).Underlying()) +} + +func (dslTypesType) String(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(types.Type).String()) +} + +type dslTypesInterface struct{} + +func (native dslTypesInterface) funcs() map[string]func(*quasigo.ValueStack) { + return map[string]func(*quasigo.ValueStack){ + "Underlying": native.Underlying, + "String": native.String, + } +} + +func (dslTypesInterface) Underlying(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(*types.Interface).Underlying()) +} + +func (dslTypesInterface) String(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(*types.Interface).String()) +} + +type dslTypesSlice struct{} + +func (native dslTypesSlice) funcs() map[string]func(*quasigo.ValueStack) { + return map[string]func(*quasigo.ValueStack){ + "Underlying": native.Underlying, + "String": native.String, + "Elem": native.Elem, + } +} + +func (dslTypesSlice) Underlying(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(*types.Slice).Underlying()) +} + +func (dslTypesSlice) String(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(*types.Slice).String()) +} + +func (dslTypesSlice) Elem(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(*types.Slice).Elem()) +} + +type dslTypesArray struct{} + +func (native dslTypesArray) funcs() map[string]func(*quasigo.ValueStack) { + return map[string]func(*quasigo.ValueStack){ + "Underlying": native.Underlying, + "String": native.String, + "Elem": native.Elem, + "Len": native.Len, + } +} + +func (dslTypesArray) Underlying(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(*types.Array).Underlying()) +} + +func (dslTypesArray) String(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(*types.Array).String()) +} + +func (dslTypesArray) Elem(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(*types.Array).Elem()) +} + +func (dslTypesArray) Len(stack *quasigo.ValueStack) { + stack.PushInt(int(stack.Pop().(*types.Array).Len())) +} + +type dslTypesPointer struct{} + +func (native dslTypesPointer) funcs() map[string]func(*quasigo.ValueStack) { + return map[string]func(*quasigo.ValueStack){ + "Underlying": native.Underlying, + "String": native.String, + "Elem": native.Elem, + } +} + +func (dslTypesPointer) Underlying(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(*types.Pointer).Underlying()) +} + +func (dslTypesPointer) String(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(*types.Pointer).String()) +} + +func (dslTypesPointer) Elem(stack *quasigo.ValueStack) { + stack.Push(stack.Pop().(*types.Pointer).Elem()) +} + +type dslTypesPackage struct{} + +func (native dslTypesPackage) funcs() map[string]func(*quasigo.ValueStack) { + return map[string]func(*quasigo.ValueStack){ + "Implements": native.Implements, + "Identical": native.Identical, + "NewArray": native.NewArray, + "NewSlice": native.NewSlice, + "NewPointer": native.NewPointer, + "AsArray": native.AsArray, + "AsSlice": native.AsSlice, + "AsPointer": native.AsPointer, + "AsInterface": native.AsInterface, + } +} + +func (dslTypesPackage) Implements(stack *quasigo.ValueStack) { + iface := stack.Pop().(*types.Interface) + typ := stack.Pop().(types.Type) + stack.Push(xtypes.Implements(typ, iface)) +} + +func (dslTypesPackage) Identical(stack *quasigo.ValueStack) { + y := stack.Pop().(types.Type) + x := stack.Pop().(types.Type) + stack.Push(xtypes.Identical(x, y)) +} + +func (dslTypesPackage) NewArray(stack *quasigo.ValueStack) { + length := stack.PopInt() + typ := stack.Pop().(types.Type) + stack.Push(types.NewArray(typ, int64(length))) +} + +func (dslTypesPackage) NewSlice(stack *quasigo.ValueStack) { + typ := stack.Pop().(types.Type) + stack.Push(types.NewSlice(typ)) +} + +func (dslTypesPackage) NewPointer(stack *quasigo.ValueStack) { + typ := stack.Pop().(types.Type) + stack.Push(types.NewPointer(typ)) +} + +func (dslTypesPackage) AsArray(stack *quasigo.ValueStack) { + typ, _ := stack.Pop().(types.Type).(*types.Array) + stack.Push(typ) +} + +func (dslTypesPackage) AsSlice(stack *quasigo.ValueStack) { + typ, _ := stack.Pop().(types.Type).(*types.Slice) + stack.Push(typ) +} + +func (dslTypesPackage) AsPointer(stack *quasigo.ValueStack) { + typ, _ := stack.Pop().(types.Type).(*types.Pointer) + stack.Push(typ) +} + +func (dslTypesPackage) AsInterface(stack *quasigo.ValueStack) { + typ, _ := stack.Pop().(types.Type).(*types.Interface) + stack.Push(typ) +} + +type dslVarFilterContext struct { + state *engineState +} + +func (native dslVarFilterContext) funcs() map[string]func(*quasigo.ValueStack) { + return map[string]func(*quasigo.ValueStack){ + "Type": native.Type, + "SizeOf": native.SizeOf, + "GetType": native.GetType, + "GetInterface": native.GetInterface, + } +} + +func (dslVarFilterContext) Type(stack *quasigo.ValueStack) { + params := stack.Pop().(*filterParams) + typ := params.typeofNode(params.subExpr(params.varname)) + stack.Push(typ) +} + +func (native dslVarFilterContext) SizeOf(stack *quasigo.ValueStack) { + typ := stack.Pop().(types.Type) + params := stack.Pop().(*filterParams) + stack.PushInt(int(params.ctx.Sizes.Sizeof(typ))) +} + +func (native dslVarFilterContext) GetType(stack *quasigo.ValueStack) { + fqn := stack.Pop().(string) + params := stack.Pop().(*filterParams) + typ, err := native.state.FindType(params.importer, params.ctx.Pkg, fqn) + if err != nil { + panic(err) + } + stack.Push(typ) +} + +func (native dslVarFilterContext) GetInterface(stack *quasigo.ValueStack) { + fqn := stack.Pop().(string) + params := stack.Pop().(*filterParams) + typ, err := native.state.FindType(params.importer, params.ctx.Pkg, fqn) + if err != nil { + panic(err) + } + if ifaceType, ok := typ.Underlying().(*types.Interface); ok { + stack.Push(ifaceType) + return + } + stack.Push((*types.Interface)(nil)) // Not found or not an interface +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go new file mode 100644 index 000000000..c9d64aff7 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go @@ -0,0 +1,46 @@ +package ruleguard + +import ( + "go/ast" + + "github.com/quasilyte/go-ruleguard/internal/gogrep" +) + +// matchData is used to handle both regexp and AST match sets in the same way. +type matchData interface { + // TODO: don't use gogrep.CapturedNode type here. + + Node() ast.Node + CaptureList() []gogrep.CapturedNode + CapturedByName(name string) (ast.Node, bool) +} + +type commentMatchData struct { + node ast.Node + capture []gogrep.CapturedNode +} + +func (m commentMatchData) Node() ast.Node { return m.node } + +func (m commentMatchData) CaptureList() []gogrep.CapturedNode { return m.capture } + +func (m commentMatchData) CapturedByName(name string) (ast.Node, bool) { + for _, c := range m.capture { + if c.Name == name { + return c.Node, true + } + } + return nil, false +} + +type astMatchData struct { + match gogrep.MatchData +} + +func (m astMatchData) Node() ast.Node { return m.match.Node } + +func (m astMatchData) CaptureList() []gogrep.CapturedNode { return m.match.Capture } + +func (m astMatchData) CapturedByName(name string) (ast.Node, bool) { + return m.match.CapturedByName(name) +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go new file mode 100644 index 000000000..94826d497 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go @@ -0,0 +1,988 @@ +package ruleguard + +import ( + "bytes" + "errors" + "fmt" + "go/ast" + "go/parser" + "go/token" + "go/types" + "io" + "io/ioutil" + "path" + "regexp" + "strconv" + + "github.com/quasilyte/go-ruleguard/internal/gogrep" + "github.com/quasilyte/go-ruleguard/nodetag" + "github.com/quasilyte/go-ruleguard/ruleguard/goutil" + "github.com/quasilyte/go-ruleguard/ruleguard/quasigo" + "github.com/quasilyte/go-ruleguard/ruleguard/typematch" +) + +// TODO(quasilyte): use source code byte slicing instead of SprintNode? + +type parseError struct{ error } + +// ImportError is returned when a ruleguard file references a package that cannot be imported. +type ImportError struct { + msg string + err error +} + +func (e *ImportError) Error() string { return e.msg } +func (e *ImportError) Unwrap() error { return e.err } + +type rulesParser struct { + state *engineState + ctx *ParseContext + + prefix string // For imported packages, a prefix that is added to a rule group name + importedPkg string // Package path; only for imported packages + + filename string + group string + res *goRuleSet + pkg *types.Package + types *types.Info + + importer *goImporter + + itab *typematch.ImportsTab + + imported []*goRuleSet + + dslPkgname string // The local name of the "ruleguard/dsl" package (usually its just "dsl") +} + +type rulesParserConfig struct { + state *engineState + + ctx *ParseContext + + importer *goImporter + + prefix string + importedPkg string + + itab *typematch.ImportsTab +} + +func newRulesParser(config rulesParserConfig) *rulesParser { + return &rulesParser{ + state: config.state, + ctx: config.ctx, + importer: config.importer, + prefix: config.prefix, + importedPkg: config.importedPkg, + itab: config.itab, + } +} + +func (p *rulesParser) ParseFile(filename string, r io.Reader) (*goRuleSet, error) { + p.dslPkgname = "dsl" + p.filename = filename + p.res = &goRuleSet{ + universal: &scopedGoRuleSet{}, + groups: make(map[string]token.Position), + } + + parserFlags := parser.Mode(0) + f, err := parser.ParseFile(p.ctx.Fset, filename, r, parserFlags) + if err != nil { + return nil, fmt.Errorf("parse file error: %w", err) + } + + for _, imp := range f.Imports { + importPath, err := strconv.Unquote(imp.Path.Value) + if err != nil { + return nil, p.errorf(imp, fmt.Errorf("unquote %s import path: %w", imp.Path.Value, err)) + } + if importPath == "github.com/quasilyte/go-ruleguard/dsl" { + if imp.Name != nil { + p.dslPkgname = imp.Name.Name + } + } + } + + if f.Name.Name != "gorules" { + return nil, fmt.Errorf("expected a gorules package name, found %s", f.Name.Name) + } + + typechecker := types.Config{Importer: p.importer} + p.types = &types.Info{ + Types: map[ast.Expr]types.TypeAndValue{}, + Uses: map[*ast.Ident]types.Object{}, + Defs: map[*ast.Ident]types.Object{}, + } + pkg, err := typechecker.Check("gorules", p.ctx.Fset, []*ast.File{f}, p.types) + if err != nil { + return nil, fmt.Errorf("typechecker error: %w", err) + } + p.pkg = pkg + + var matcherFuncs []*ast.FuncDecl + var userFuncs []*ast.FuncDecl + for _, decl := range f.Decls { + decl, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + if decl.Name.String() == "init" { + if err := p.parseInitFunc(decl); err != nil { + return nil, err + } + continue + } + + if p.isMatcherFunc(decl) { + matcherFuncs = append(matcherFuncs, decl) + } else { + userFuncs = append(userFuncs, decl) + } + } + + for _, decl := range userFuncs { + if err := p.parseUserFunc(decl); err != nil { + return nil, err + } + } + for _, decl := range matcherFuncs { + if err := p.parseRuleGroup(decl); err != nil { + return nil, err + } + } + + if len(p.imported) != 0 { + toMerge := []*goRuleSet{p.res} + toMerge = append(toMerge, p.imported...) + merged, err := mergeRuleSets(toMerge) + if err != nil { + return nil, err + } + p.res = merged + } + + return p.res, nil +} + +func (p *rulesParser) parseUserFunc(f *ast.FuncDecl) error { + ctx := &quasigo.CompileContext{ + Env: p.state.env, + Types: p.types, + Fset: p.ctx.Fset, + } + compiled, err := quasigo.Compile(ctx, f) + if err != nil { + return err + } + if p.ctx.DebugFilter == f.Name.String() { + p.ctx.DebugPrint(quasigo.Disasm(p.state.env, compiled)) + } + ctx.Env.AddFunc(p.pkg.Path(), f.Name.String(), compiled) + return nil +} + +func (p *rulesParser) parseInitFunc(f *ast.FuncDecl) error { + type bundleImport struct { + node ast.Node + prefix string + pkgPath string + } + + var imported []bundleImport + + for _, stmt := range f.Body.List { + exprStmt, ok := stmt.(*ast.ExprStmt) + if !ok { + return p.errorf(stmt, errors.New("unsupported statement")) + } + call, ok := exprStmt.X.(*ast.CallExpr) + if !ok { + return p.errorf(stmt, errors.New("unsupported expr")) + } + fn, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return p.errorf(stmt, errors.New("unsupported call")) + } + pkg, ok := fn.X.(*ast.Ident) + if !ok || pkg.Name != p.dslPkgname { + return p.errorf(stmt, errors.New("unsupported call")) + } + + switch fn.Sel.Name { + case "ImportRules": + if p.importedPkg != "" { + return p.errorf(call, errors.New("imports from imported packages are not supported yet")) + } + prefix := p.parseStringArg(call.Args[0]) + bundleSelector, ok := call.Args[1].(*ast.SelectorExpr) + if !ok { + return p.errorf(call.Args[1], errors.New("expected a `pkgname.Bundle` argument")) + } + bundleObj := p.types.ObjectOf(bundleSelector.Sel) + imported = append(imported, bundleImport{ + node: stmt, + prefix: prefix, + pkgPath: bundleObj.Pkg().Path(), + }) + + default: + return p.errorf(stmt, fmt.Errorf("unsupported %s call", fn.Sel.Name)) + } + } + + for _, imp := range imported { + files, err := findBundleFiles(imp.pkgPath) + if err != nil { + return p.errorf(imp.node, fmt.Errorf("import lookup error: %w", err)) + } + for _, filename := range files { + rset, err := p.importRules(imp.prefix, imp.pkgPath, filename) + if err != nil { + return p.errorf(imp.node, fmt.Errorf("import parsing error: %w", err)) + } + p.imported = append(p.imported, rset) + } + } + + return nil +} + +func (p *rulesParser) importRules(prefix, pkgPath, filename string) (*goRuleSet, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + config := rulesParserConfig{ + state: p.state, + ctx: p.ctx, + importer: p.importer, + prefix: prefix, + importedPkg: pkgPath, + itab: p.itab, + } + rset, err := newRulesParser(config).ParseFile(filename, bytes.NewReader(data)) + if err != nil { + return nil, fmt.Errorf("%s: %w", p.importedPkg, err) + } + return rset, nil +} + +func (p *rulesParser) isMatcherFunc(f *ast.FuncDecl) bool { + typ := p.types.ObjectOf(f.Name).Type().(*types.Signature) + return typ.Results().Len() == 0 && + typ.Params().Len() == 1 && + typ.Params().At(0).Type().String() == "github.com/quasilyte/go-ruleguard/dsl.Matcher" +} + +func (p *rulesParser) parseRuleGroup(f *ast.FuncDecl) (err error) { + defer func() { + if err != nil { + return + } + rv := recover() + if rv == nil { + return + } + if parseErr, ok := rv.(parseError); ok { + err = parseErr.error + return + } + panic(rv) // not our panic + }() + + if f.Name.String() == "_" { + return p.errorf(f.Name, errors.New("`_` is not a valid rule group function name")) + } + if f.Body == nil { + return p.errorf(f, errors.New("unexpected empty function body")) + } + params := f.Type.Params.List + matcher := params[0].Names[0].Name + + p.group = f.Name.Name + if p.prefix != "" { + p.group = p.prefix + "/" + f.Name.Name + } + + if p.ctx.GroupFilter != nil && !p.ctx.GroupFilter(p.group) { + return nil // Skip this group + } + if _, ok := p.res.groups[p.group]; ok { + panic(fmt.Sprintf("duplicated function %s after the typecheck", p.group)) // Should never happen + } + p.res.groups[p.group] = token.Position{ + Filename: p.filename, + Line: p.ctx.Fset.Position(f.Name.Pos()).Line, + } + + p.itab.EnterScope() + defer p.itab.LeaveScope() + + for _, stmt := range f.Body.List { + if _, ok := stmt.(*ast.DeclStmt); ok { + continue + } + stmtExpr, ok := stmt.(*ast.ExprStmt) + if !ok { + return p.errorf(stmt, fmt.Errorf("expected a %s method call, found %s", matcher, goutil.SprintNode(p.ctx.Fset, stmt))) + } + call, ok := stmtExpr.X.(*ast.CallExpr) + if !ok { + return p.errorf(stmt, fmt.Errorf("expected a %s method call, found %s", matcher, goutil.SprintNode(p.ctx.Fset, stmt))) + } + if err := p.parseCall(matcher, call); err != nil { + return err + } + + } + + return nil +} + +func (p *rulesParser) parseCall(matcher string, call *ast.CallExpr) error { + f := call.Fun.(*ast.SelectorExpr) + x, ok := f.X.(*ast.Ident) + if ok && x.Name == matcher { + return p.parseStmt(f.Sel, call.Args) + } + + return p.parseRule(matcher, call) +} + +func (p *rulesParser) parseStmt(fn *ast.Ident, args []ast.Expr) error { + switch fn.Name { + case "Import": + pkgPath, ok := p.toStringValue(args[0]) + if !ok { + return p.errorf(args[0], errors.New("expected a string literal argument")) + } + pkgName := path.Base(pkgPath) + p.itab.Load(pkgName, pkgPath) + return nil + default: + return p.errorf(fn, fmt.Errorf("unexpected %s method", fn.Name)) + } +} + +func (p *rulesParser) parseRule(matcher string, call *ast.CallExpr) error { + origCall := call + var ( + matchArgs *[]ast.Expr + matchCommentArgs *[]ast.Expr + whereArgs *[]ast.Expr + suggestArgs *[]ast.Expr + reportArgs *[]ast.Expr + atArgs *[]ast.Expr + ) + for { + chain, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + break + } + switch chain.Sel.Name { + case "Match": + if matchArgs != nil { + return p.errorf(chain.Sel, errors.New("Match() can't be repeated")) + } + if matchCommentArgs != nil { + return p.errorf(chain.Sel, errors.New("Match() and MatchComment() can't be combined")) + } + matchArgs = &call.Args + case "MatchComment": + if matchCommentArgs != nil { + return p.errorf(chain.Sel, errors.New("MatchComment() can't be repeated")) + } + if matchArgs != nil { + return p.errorf(chain.Sel, errors.New("Match() and MatchComment() can't be combined")) + } + matchCommentArgs = &call.Args + case "Where": + if whereArgs != nil { + return p.errorf(chain.Sel, errors.New("Where() can't be repeated")) + } + whereArgs = &call.Args + case "Suggest": + if suggestArgs != nil { + return p.errorf(chain.Sel, errors.New("Suggest() can't be repeated")) + } + suggestArgs = &call.Args + case "Report": + if reportArgs != nil { + return p.errorf(chain.Sel, errors.New("Report() can't be repeated")) + } + reportArgs = &call.Args + case "At": + if atArgs != nil { + return p.errorf(chain.Sel, errors.New("At() can't be repeated")) + } + atArgs = &call.Args + default: + return p.errorf(chain.Sel, fmt.Errorf("unexpected %s method", chain.Sel.Name)) + } + call, ok = chain.X.(*ast.CallExpr) + if !ok { + break + } + } + + proto := goRule{ + filename: p.filename, + line: p.ctx.Fset.Position(origCall.Pos()).Line, + group: p.group, + } + + // AST patterns for Match() or regexp patterns for MatchComment(). + var alternatives []string + + if matchArgs == nil && matchCommentArgs == nil { + return p.errorf(origCall, errors.New("missing Match() or MatchComment() call")) + } + + if matchArgs != nil { + for _, arg := range *matchArgs { + alternatives = append(alternatives, p.parseStringArg(arg)) + } + } else { + for _, arg := range *matchCommentArgs { + alternatives = append(alternatives, p.parseStringArg(arg)) + } + } + + if whereArgs != nil { + proto.filter = p.parseFilter((*whereArgs)[0]) + } + + if suggestArgs != nil { + proto.suggestion = p.parseStringArg((*suggestArgs)[0]) + } + + if reportArgs == nil { + if suggestArgs == nil { + return p.errorf(origCall, errors.New("missing Report() or Suggest() call")) + } + proto.msg = "suggestion: " + proto.suggestion + } else { + proto.msg = p.parseStringArg((*reportArgs)[0]) + } + + if atArgs != nil { + index, ok := (*atArgs)[0].(*ast.IndexExpr) + if !ok { + return p.errorf((*atArgs)[0], fmt.Errorf("expected %s[`varname`] expression", matcher)) + } + arg, ok := p.toStringValue(index.Index) + if !ok { + return p.errorf(index.Index, errors.New("expected a string literal index")) + } + proto.location = arg + } + + if matchArgs != nil { + return p.loadGogrepRules(proto, *matchArgs, alternatives) + } + return p.loadCommentRules(proto, *matchCommentArgs, alternatives) +} + +func (p *rulesParser) loadCommentRules(proto goRule, matchArgs []ast.Expr, alternatives []string) error { + dst := p.res.universal + for i, alt := range alternatives { + pat, err := regexp.Compile(alt) + if err != nil { + return p.errorf(matchArgs[i], fmt.Errorf("parse match comment pattern: %w", err)) + } + rule := goCommentRule{ + base: proto, + pat: pat, + captureGroups: regexpHasCaptureGroups(alt), + } + dst.commentRules = append(dst.commentRules, rule) + } + + return nil +} + +func (p *rulesParser) loadGogrepRules(proto goRule, matchArgs []ast.Expr, alternatives []string) error { + dst := p.res.universal + for i, alt := range alternatives { + rule := proto + pat, err := gogrep.Compile(p.ctx.Fset, alt, false) + if err != nil { + return p.errorf(matchArgs[i], fmt.Errorf("parse match pattern: %w", err)) + } + rule.pat = pat + var dstTags []nodetag.Value + switch tag := pat.NodeTag(); tag { + case nodetag.Unknown: + return p.errorf(matchArgs[i], fmt.Errorf("can't infer a tag of %s", alt)) + case nodetag.Node: + // TODO: add to every bucket? + return p.errorf(matchArgs[i], fmt.Errorf("%s is too general", alt)) + case nodetag.StmtList: + dstTags = []nodetag.Value{ + nodetag.BlockStmt, + nodetag.CaseClause, + nodetag.CommClause, + } + case nodetag.ExprList: + dstTags = []nodetag.Value{ + nodetag.CallExpr, + nodetag.CompositeLit, + nodetag.ReturnStmt, + } + default: + dstTags = []nodetag.Value{tag} + } + for _, tag := range dstTags { + dst.rulesByTag[tag] = append(dst.rulesByTag[tag], rule) + } + dst.categorizedNum++ + } + + return nil +} + +func (p *rulesParser) parseFilter(root ast.Expr) matchFilter { + return p.parseFilterExpr(root) +} + +func (p *rulesParser) errorf(n ast.Node, err error) parseError { + loc := p.ctx.Fset.Position(n.Pos()) + return parseError{fmt.Errorf("%s:%d: %w", loc.Filename, loc.Line, err)} +} + +func (p *rulesParser) parseStringArg(e ast.Expr) string { + s, ok := p.toStringValue(e) + if !ok { + panic(p.errorf(e, errors.New("expected a string literal argument"))) + } + return s +} + +func (p *rulesParser) parseRegexpArg(e ast.Expr) *regexp.Regexp { + patternString, ok := p.toStringValue(e) + if !ok { + panic(p.errorf(e, errors.New("expected a regexp pattern argument"))) + } + re, err := regexp.Compile(patternString) + if err != nil { + panic(p.errorf(e, err)) + } + return re +} + +func (p *rulesParser) parseTypeStringArg(e ast.Expr) types.Type { + typeString, ok := p.toStringValue(e) + if !ok { + panic(p.errorf(e, errors.New("expected a type string argument"))) + } + typ, err := typeFromString(typeString) + if err != nil { + panic(p.errorf(e, fmt.Errorf("parse type expr: %w", err))) + } + if typ == nil { + panic(p.errorf(e, fmt.Errorf("can't convert %s into a type constraint yet", typeString))) + } + return typ +} + +func (p *rulesParser) parseFilterExpr(e ast.Expr) matchFilter { + result := matchFilter{src: goutil.SprintNode(p.ctx.Fset, e)} + + switch e := e.(type) { + case *ast.ParenExpr: + return p.parseFilterExpr(e.X) + + case *ast.UnaryExpr: + x := p.parseFilterExpr(e.X) + if e.Op == token.NOT { + result.fn = makeNotFilter(result.src, x) + return result + } + panic(p.errorf(e, fmt.Errorf("unsupported unary op: %s", result.src))) + + case *ast.BinaryExpr: + switch e.Op { + case token.LAND: + result.fn = makeAndFilter(p.parseFilterExpr(e.X), p.parseFilterExpr(e.Y)) + return result + case token.LOR: + result.fn = makeOrFilter(p.parseFilterExpr(e.X), p.parseFilterExpr(e.Y)) + return result + case token.GEQ, token.LEQ, token.LSS, token.GTR, token.EQL, token.NEQ: + operand := p.toFilterOperand(e.X) + rhs := p.toFilterOperand(e.Y) + rhsValue := p.types.Types[e.Y].Value + if operand.path == "Type.Size" && rhsValue != nil { + result.fn = makeTypeSizeConstFilter(result.src, operand.varName, e.Op, rhsValue) + return result + } + if operand.path == "Value.Int" && rhsValue != nil { + result.fn = makeValueIntConstFilter(result.src, operand.varName, e.Op, rhsValue) + return result + } + if operand.path == "Value.Int" && rhs.path == "Value.Int" && rhs.varName != "" { + result.fn = makeValueIntFilter(result.src, operand.varName, e.Op, rhs.varName) + return result + } + if operand.path == "Text" && rhsValue != nil { + result.fn = makeTextConstFilter(result.src, operand.varName, e.Op, rhsValue) + return result + } + if operand.path == "Text" && rhs.path == "Text" && rhs.varName != "" { + result.fn = makeTextFilter(result.src, operand.varName, e.Op, rhs.varName) + return result + } + } + panic(p.errorf(e, fmt.Errorf("unsupported binary op: %s", result.src))) + } + + operand := p.toFilterOperand(e) + args := operand.args + switch operand.path { + default: + panic(p.errorf(e, fmt.Errorf("unsupported expr: %s", result.src))) + + case "File.Imports": + pkgPath := p.parseStringArg(args[0]) + result.fn = makeFileImportsFilter(result.src, pkgPath) + + case "File.PkgPath.Matches": + re := p.parseRegexpArg(args[0]) + result.fn = makeFilePkgPathMatchesFilter(result.src, re) + + case "File.Name.Matches": + re := p.parseRegexpArg(args[0]) + result.fn = makeFileNameMatchesFilter(result.src, re) + + case "Pure": + result.fn = makePureFilter(result.src, operand.varName) + + case "Const": + result.fn = makeConstFilter(result.src, operand.varName) + + case "Addressable": + result.fn = makeAddressableFilter(result.src, operand.varName) + + case "Filter": + expr, fn := goutil.ResolveFunc(p.types, args[0]) + if expr != nil { + panic(p.errorf(expr, errors.New("expected a simple function name, found expression"))) + } + sig := fn.Type().(*types.Signature) + userFn := p.state.env.GetFunc(fn.Pkg().Path(), fn.Name()) + if userFn == nil { + panic(p.errorf(args[0], fmt.Errorf("can't find a compiled version of %s", sig.String()))) + } + result.fn = makeCustomVarFilter(result.src, operand.varName, userFn) + + case "Type.Is", "Type.Underlying.Is": + // TODO(quasilyte): add FQN support? + typeString, ok := p.toStringValue(args[0]) + if !ok { + panic(p.errorf(args[0], errors.New("expected a string literal argument"))) + } + ctx := typematch.Context{Itab: p.itab} + pat, err := typematch.Parse(&ctx, typeString) + if err != nil { + panic(p.errorf(args[0], fmt.Errorf("parse type expr: %w", err))) + } + underlying := operand.path == "Type.Underlying.Is" + result.fn = makeTypeIsFilter(result.src, operand.varName, underlying, pat) + + case "Type.ConvertibleTo": + dstType := p.parseTypeStringArg(args[0]) + result.fn = makeTypeConvertibleToFilter(result.src, operand.varName, dstType) + + case "Type.AssignableTo": + dstType := p.parseTypeStringArg(args[0]) + result.fn = makeTypeAssignableToFilter(result.src, operand.varName, dstType) + + case "Type.Implements": + iface := p.toInterfaceValue(args[0]) + result.fn = makeTypeImplementsFilter(result.src, operand.varName, iface) + + case "Text.Matches": + re := p.parseRegexpArg(args[0]) + result.fn = makeTextMatchesFilter(result.src, operand.varName, re) + + case "Node.Is": + typeString, ok := p.toStringValue(args[0]) + if !ok { + panic(p.errorf(args[0], errors.New("expected a string literal argument"))) + } + tag := nodetag.FromString(typeString) + if tag == nodetag.Unknown { + panic(p.errorf(args[0], fmt.Errorf("%s is not a valid go/ast type name", typeString))) + } + result.fn = makeNodeIsFilter(result.src, operand.varName, tag) + } + + if result.fn == nil { + panic("bug: nil func for the filter") // Should never happen + } + return result +} + +func (p *rulesParser) toInterfaceValue(x ast.Node) *types.Interface { + typeString, ok := p.toStringValue(x) + if !ok { + panic(p.errorf(x, errors.New("expected a string literal argument"))) + } + + typ, err := p.state.FindType(p.importer, p.pkg, typeString) + if err == nil { + iface, ok := typ.Underlying().(*types.Interface) + if !ok { + panic(p.errorf(x, fmt.Errorf("%s is not an interface type", typeString))) + } + return iface + } + + n, err := parser.ParseExpr(typeString) + if err != nil { + panic(p.errorf(x, fmt.Errorf("parse type expr: %w", err))) + } + qn, ok := n.(*ast.SelectorExpr) + if !ok { + panic(p.errorf(x, fmt.Errorf("can't resolve %s type; try a fully-qualified name", typeString))) + } + pkgName, ok := qn.X.(*ast.Ident) + if !ok { + panic(p.errorf(qn.X, errors.New("invalid package name"))) + } + pkgPath, ok := p.itab.Lookup(pkgName.Name) + if !ok { + panic(p.errorf(qn.X, fmt.Errorf("package %s is not imported", pkgName.Name))) + } + pkg, err := p.importer.Import(pkgPath) + if err != nil { + panic(p.errorf(n, &ImportError{msg: fmt.Sprintf("can't load %s", pkgPath), err: err})) + } + obj := pkg.Scope().Lookup(qn.Sel.Name) + if obj == nil { + panic(p.errorf(n, fmt.Errorf("%s is not found in %s", qn.Sel.Name, pkgPath))) + } + iface, ok := obj.Type().Underlying().(*types.Interface) + if !ok { + panic(p.errorf(n, fmt.Errorf("%s is not an interface type", qn.Sel.Name))) + } + return iface +} + +func (p *rulesParser) toStringValue(x ast.Node) (string, bool) { + switch x := x.(type) { + case *ast.BasicLit: + if x.Kind != token.STRING { + return "", false + } + s, err := strconv.Unquote(x.Value) + if err != nil { + return "", false + } + return s, true + case ast.Expr: + typ, ok := p.types.Types[x] + if !ok || typ.Type.String() != "string" { + return "", false + } + str := typ.Value.ExactString() + str = str[1 : len(str)-1] // remove quotes + return str, true + } + return "", false +} + +func (p *rulesParser) toFilterOperand(e ast.Expr) filterOperand { + var o filterOperand + + if call, ok := e.(*ast.CallExpr); ok { + o.args = call.Args + e = call.Fun + } + var path string + for { + if call, ok := e.(*ast.CallExpr); ok { + e = call.Fun + continue + } + selector, ok := e.(*ast.SelectorExpr) + if !ok { + break + } + if path == "" { + path = selector.Sel.Name + } else { + path = selector.Sel.Name + "." + path + } + e = selector.X + } + + o.path = path + + indexing, ok := e.(*ast.IndexExpr) + if !ok { + return o + } + mapIdent, ok := indexing.X.(*ast.Ident) + if !ok { + return o + } + o.mapName = mapIdent.Name + indexString, _ := p.toStringValue(indexing.Index) + o.varName = indexString + + return o +} + +type filterOperand struct { + mapName string + varName string + path string + args []ast.Expr +} + +var stdlibPackages = map[string]string{ + "adler32": "hash/adler32", + "aes": "crypto/aes", + "ascii85": "encoding/ascii85", + "asn1": "encoding/asn1", + "ast": "go/ast", + "atomic": "sync/atomic", + "base32": "encoding/base32", + "base64": "encoding/base64", + "big": "math/big", + "binary": "encoding/binary", + "bits": "math/bits", + "bufio": "bufio", + "build": "go/build", + "bytes": "bytes", + "bzip2": "compress/bzip2", + "cgi": "net/http/cgi", + "cgo": "runtime/cgo", + "cipher": "crypto/cipher", + "cmplx": "math/cmplx", + "color": "image/color", + "constant": "go/constant", + "context": "context", + "cookiejar": "net/http/cookiejar", + "crc32": "hash/crc32", + "crc64": "hash/crc64", + "crypto": "crypto", + "csv": "encoding/csv", + "debug": "runtime/debug", + "des": "crypto/des", + "doc": "go/doc", + "draw": "image/draw", + "driver": "database/sql/driver", + "dsa": "crypto/dsa", + "dwarf": "debug/dwarf", + "ecdsa": "crypto/ecdsa", + "ed25519": "crypto/ed25519", + "elf": "debug/elf", + "elliptic": "crypto/elliptic", + "encoding": "encoding", + "errors": "errors", + "exec": "os/exec", + "expvar": "expvar", + "fcgi": "net/http/fcgi", + "filepath": "path/filepath", + "flag": "flag", + "flate": "compress/flate", + "fmt": "fmt", + "fnv": "hash/fnv", + "format": "go/format", + "gif": "image/gif", + "gob": "encoding/gob", + "gosym": "debug/gosym", + "gzip": "compress/gzip", + "hash": "hash", + "heap": "container/heap", + "hex": "encoding/hex", + "hmac": "crypto/hmac", + "html": "html", + "http": "net/http", + "httptest": "net/http/httptest", + "httptrace": "net/http/httptrace", + "httputil": "net/http/httputil", + "image": "image", + "importer": "go/importer", + "io": "io", + "iotest": "testing/iotest", + "ioutil": "io/ioutil", + "jpeg": "image/jpeg", + "json": "encoding/json", + "jsonrpc": "net/rpc/jsonrpc", + "list": "container/list", + "log": "log", + "lzw": "compress/lzw", + "macho": "debug/macho", + "mail": "net/mail", + "math": "math", + "md5": "crypto/md5", + "mime": "mime", + "multipart": "mime/multipart", + "net": "net", + "os": "os", + "palette": "image/color/palette", + "parse": "text/template/parse", + "parser": "go/parser", + "path": "path", + "pe": "debug/pe", + "pem": "encoding/pem", + "pkix": "crypto/x509/pkix", + "plan9obj": "debug/plan9obj", + "plugin": "plugin", + "png": "image/png", + "pprof": "runtime/pprof", + "printer": "go/printer", + "quick": "testing/quick", + "quotedprintable": "mime/quotedprintable", + "race": "runtime/race", + "rand": "math/rand", + "rc4": "crypto/rc4", + "reflect": "reflect", + "regexp": "regexp", + "ring": "container/ring", + "rpc": "net/rpc", + "rsa": "crypto/rsa", + "runtime": "runtime", + "scanner": "text/scanner", + "sha1": "crypto/sha1", + "sha256": "crypto/sha256", + "sha512": "crypto/sha512", + "signal": "os/signal", + "smtp": "net/smtp", + "sort": "sort", + "sql": "database/sql", + "strconv": "strconv", + "strings": "strings", + "subtle": "crypto/subtle", + "suffixarray": "index/suffixarray", + "sync": "sync", + "syntax": "regexp/syntax", + "syscall": "syscall", + "syslog": "log/syslog", + "tabwriter": "text/tabwriter", + "tar": "archive/tar", + "template": "text/template", + "testing": "testing", + "textproto": "net/textproto", + "time": "time", + "tls": "crypto/tls", + "token": "go/token", + "trace": "runtime/trace", + "types": "go/types", + "unicode": "unicode", + "unsafe": "unsafe", + "url": "net/url", + "user": "os/user", + "utf16": "unicode/utf16", + "utf8": "unicode/utf8", + "x509": "crypto/x509", + "xml": "encoding/xml", + "zip": "archive/zip", + "zlib": "compress/zlib", +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/compile.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/compile.go new file mode 100644 index 000000000..db61b40ee --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/compile.go @@ -0,0 +1,707 @@ +package quasigo + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + + "github.com/quasilyte/go-ruleguard/ruleguard/goutil" + "golang.org/x/tools/go/ast/astutil" +) + +func compile(ctx *CompileContext, fn *ast.FuncDecl) (compiled *Func, err error) { + defer func() { + if err != nil { + return + } + rv := recover() + if rv == nil { + return + } + if compileErr, ok := rv.(compileError); ok { + err = compileErr + return + } + panic(rv) // not our panic + }() + + return compileFunc(ctx, fn), nil +} + +func compileFunc(ctx *CompileContext, fn *ast.FuncDecl) *Func { + cl := compiler{ + ctx: ctx, + fnType: ctx.Types.ObjectOf(fn.Name).Type().(*types.Signature), + constantsPool: make(map[interface{}]int), + intConstantsPool: make(map[int]int), + locals: make(map[string]int), + } + return cl.compileFunc(fn) +} + +type compiler struct { + ctx *CompileContext + + fnType *types.Signature + retType types.Type + + lastOp opcode + + locals map[string]int + constantsPool map[interface{}]int + intConstantsPool map[int]int + params map[string]int + + code []byte + constants []interface{} + intConstants []int + + breakTarget *label + continueTarget *label + + labels []*label +} + +type label struct { + targetPos int + sources []int +} + +type compileError string + +func (e compileError) Error() string { return string(e) } + +func (cl *compiler) compileFunc(fn *ast.FuncDecl) *Func { + if cl.fnType.Results().Len() != 1 { + panic(cl.errorf(fn.Name, "only functions with a single non-void results are supported")) + } + cl.retType = cl.fnType.Results().At(0).Type() + + if !cl.isSupportedType(cl.retType) { + panic(cl.errorUnsupportedType(fn.Name, cl.retType, "function result")) + } + + dbg := funcDebugInfo{ + paramNames: make([]string, cl.fnType.Params().Len()), + } + + cl.params = make(map[string]int, cl.fnType.Params().Len()) + for i := 0; i < cl.fnType.Params().Len(); i++ { + p := cl.fnType.Params().At(i) + paramName := p.Name() + paramType := p.Type() + cl.params[paramName] = i + dbg.paramNames[i] = paramName + if !cl.isSupportedType(paramType) { + panic(cl.errorUnsupportedType(fn.Name, paramType, paramName+" param")) + } + } + + cl.compileStmt(fn.Body) + compiled := &Func{ + code: cl.code, + constants: cl.constants, + intConstants: cl.intConstants, + } + if len(cl.locals) != 0 { + dbg.localNames = make([]string, len(cl.locals)) + for localName, localIndex := range cl.locals { + dbg.localNames[localIndex] = localName + } + } + cl.ctx.Env.debug.funcs[compiled] = dbg + cl.linkJumps() + return compiled +} + +func (cl *compiler) compileStmt(stmt ast.Stmt) { + switch stmt := stmt.(type) { + case *ast.ReturnStmt: + cl.compileReturnStmt(stmt) + + case *ast.AssignStmt: + cl.compileAssignStmt(stmt) + + case *ast.IncDecStmt: + cl.compileIncDecStmt(stmt) + + case *ast.IfStmt: + cl.compileIfStmt(stmt) + + case *ast.ForStmt: + cl.compileForStmt(stmt) + + case *ast.BranchStmt: + cl.compileBranchStmt(stmt) + + case *ast.BlockStmt: + for i := range stmt.List { + cl.compileStmt(stmt.List[i]) + } + + default: + panic(cl.errorf(stmt, "can't compile %T yet", stmt)) + } +} + +func (cl *compiler) compileIncDecStmt(stmt *ast.IncDecStmt) { + varname, ok := stmt.X.(*ast.Ident) + if !ok { + panic(cl.errorf(stmt.X, "can assign only to simple variables")) + } + id := cl.getLocal(varname, varname.String()) + if stmt.Tok == token.INC { + cl.emit8(opIncLocal, id) + } else { + cl.emit8(opDecLocal, id) + } +} + +func (cl *compiler) compileBranchStmt(branch *ast.BranchStmt) { + if branch.Label != nil { + panic(cl.errorf(branch.Label, "can't compile %s with a label", branch.Tok)) + } + + switch branch.Tok { + case token.BREAK: + cl.emitJump(opJump, cl.breakTarget) + default: + panic(cl.errorf(branch, "can't compile %s yet", branch.Tok)) + } +} + +func (cl *compiler) compileForStmt(stmt *ast.ForStmt) { + labelBreak := cl.newLabel() + labelContinue := cl.newLabel() + prevBreakTarget := cl.breakTarget + prevContinueTarget := cl.continueTarget + cl.breakTarget = labelBreak + cl.continueTarget = labelContinue + + switch { + case stmt.Cond != nil && stmt.Init != nil && stmt.Post != nil: + // Will be implemented later; probably when the max number of locals will be lifted. + panic(cl.errorf(stmt, "can't compile C-style for loops yet")) + + case stmt.Cond != nil && stmt.Init == nil && stmt.Post == nil: + // `for { ... }` + labelBody := cl.newLabel() + cl.emitJump(opJump, labelContinue) + cl.bindLabel(labelBody) + cl.compileStmt(stmt.Body) + cl.bindLabel(labelContinue) + cl.compileExpr(stmt.Cond) + cl.emitJump(opJumpTrue, labelBody) + cl.bindLabel(labelBreak) + + default: + // `for { ... }` + cl.bindLabel(labelContinue) + cl.compileStmt(stmt.Body) + cl.emitJump(opJump, labelContinue) + cl.bindLabel(labelBreak) + } + + cl.breakTarget = prevBreakTarget + cl.continueTarget = prevContinueTarget +} + +func (cl *compiler) compileIfStmt(stmt *ast.IfStmt) { + if stmt.Else == nil { + labelEnd := cl.newLabel() + cl.compileExpr(stmt.Cond) + cl.emitJump(opJumpFalse, labelEnd) + cl.compileStmt(stmt.Body) + cl.bindLabel(labelEnd) + return + } + + labelEnd := cl.newLabel() + labelElse := cl.newLabel() + cl.compileExpr(stmt.Cond) + cl.emitJump(opJumpFalse, labelElse) + cl.compileStmt(stmt.Body) + if !cl.isUncondJump(cl.lastOp) { + cl.emitJump(opJump, labelEnd) + } + cl.bindLabel(labelElse) + cl.compileStmt(stmt.Else) + cl.bindLabel(labelEnd) +} + +func (cl *compiler) compileAssignStmt(assign *ast.AssignStmt) { + if len(assign.Lhs) != 1 { + panic(cl.errorf(assign, "only single left operand is allowed in assignments")) + } + if len(assign.Rhs) != 1 { + panic(cl.errorf(assign, "only single right operand is allowed in assignments")) + } + lhs := assign.Lhs[0] + rhs := assign.Rhs[0] + varname, ok := lhs.(*ast.Ident) + if !ok { + panic(cl.errorf(lhs, "can assign only to simple variables")) + } + + cl.compileExpr(rhs) + + typ := cl.ctx.Types.TypeOf(varname) + if assign.Tok == token.DEFINE { + if _, ok := cl.locals[varname.String()]; ok { + panic(cl.errorf(lhs, "%s variable shadowing is not allowed", varname)) + } + if !cl.isSupportedType(typ) { + panic(cl.errorUnsupportedType(varname, typ, varname.String()+" local variable")) + } + if len(cl.locals) == maxFuncLocals { + panic(cl.errorf(lhs, "can't define %s: too many locals", varname)) + } + id := len(cl.locals) + cl.locals[varname.String()] = id + cl.emit8(pickOp(typeIsInt(typ), opSetIntLocal, opSetLocal), id) + } else { + id := cl.getLocal(varname, varname.String()) + cl.emit8(pickOp(typeIsInt(typ), opSetIntLocal, opSetLocal), id) + } +} + +func (cl *compiler) getLocal(v ast.Expr, varname string) int { + id, ok := cl.locals[varname] + if !ok { + if _, ok := cl.params[varname]; ok { + panic(cl.errorf(v, "can't assign to %s, params are readonly", varname)) + } + panic(cl.errorf(v, "%s is not a writeable local variable", varname)) + } + return id +} + +func (cl *compiler) compileReturnStmt(ret *ast.ReturnStmt) { + if ret.Results == nil { + panic(cl.errorf(ret, "'naked' return statements are not allowed")) + } + + switch { + case identName(ret.Results[0]) == "true": + cl.emit(opReturnTrue) + case identName(ret.Results[0]) == "false": + cl.emit(opReturnFalse) + default: + cl.compileExpr(ret.Results[0]) + typ := cl.ctx.Types.TypeOf(ret.Results[0]) + cl.emit(pickOp(typeIsInt(typ), opReturnIntTop, opReturnTop)) + } +} + +func (cl *compiler) compileExpr(e ast.Expr) { + cv := cl.ctx.Types.Types[e].Value + if cv != nil { + cl.compileConstantValue(e, cv) + return + } + + switch e := e.(type) { + case *ast.ParenExpr: + cl.compileExpr(e.X) + + case *ast.Ident: + cl.compileIdent(e) + + case *ast.SelectorExpr: + cl.compileSelectorExpr(e) + + case *ast.UnaryExpr: + switch e.Op { + case token.NOT: + cl.compileUnaryOp(opNot, e) + default: + panic(cl.errorf(e, "can't compile unary %s yet", e.Op)) + } + + case *ast.SliceExpr: + cl.compileSliceExpr(e) + + case *ast.BinaryExpr: + cl.compileBinaryExpr(e) + + case *ast.CallExpr: + cl.compileCallExpr(e) + + default: + panic(cl.errorf(e, "can't compile %T yet", e)) + } +} + +func (cl *compiler) compileSelectorExpr(e *ast.SelectorExpr) { + typ := cl.ctx.Types.TypeOf(e.X) + key := funcKey{ + name: e.Sel.String(), + qualifier: typ.String(), + } + + if funcID, ok := cl.ctx.Env.nameToNativeFuncID[key]; ok { + cl.compileExpr(e.X) + cl.emit16(opCallNative, int(funcID)) + return + } + + panic(cl.errorf(e, "can't compile %s field access", e.Sel)) +} + +func (cl *compiler) compileBinaryExpr(e *ast.BinaryExpr) { + typ := cl.ctx.Types.TypeOf(e.X) + + switch e.Op { + case token.LOR: + cl.compileOr(e) + case token.LAND: + cl.compileAnd(e) + + case token.NEQ: + switch { + case identName(e.X) == "nil": + cl.compileExpr(e.Y) + cl.emit(opIsNotNil) + case identName(e.Y) == "nil": + cl.compileExpr(e.X) + cl.emit(opIsNotNil) + case typeIsString(typ): + cl.compileBinaryOp(opNotEqString, e) + case typeIsInt(typ): + cl.compileBinaryOp(opNotEqInt, e) + default: + panic(cl.errorf(e, "!= is not implemented for %s operands", typ)) + } + case token.EQL: + switch { + case identName(e.X) == "nil": + cl.compileExpr(e.Y) + cl.emit(opIsNil) + case identName(e.Y) == "nil": + cl.compileExpr(e.X) + cl.emit(opIsNil) + case typeIsString(cl.ctx.Types.TypeOf(e.X)): + cl.compileBinaryOp(opEqString, e) + case typeIsInt(cl.ctx.Types.TypeOf(e.X)): + cl.compileBinaryOp(opEqInt, e) + default: + panic(cl.errorf(e, "== is not implemented for %s operands", typ)) + } + + case token.GTR: + cl.compileIntBinaryOp(e, opGtInt, typ) + case token.GEQ: + cl.compileIntBinaryOp(e, opGtEqInt, typ) + case token.LSS: + cl.compileIntBinaryOp(e, opLtInt, typ) + case token.LEQ: + cl.compileIntBinaryOp(e, opLtEqInt, typ) + + case token.ADD: + switch { + case typeIsString(typ): + cl.compileBinaryOp(opConcat, e) + case typeIsInt(typ): + cl.compileBinaryOp(opAdd, e) + default: + panic(cl.errorf(e, "+ is not implemented for %s operands", typ)) + } + + case token.SUB: + cl.compileIntBinaryOp(e, opSub, typ) + + default: + panic(cl.errorf(e, "can't compile binary %s yet", e.Op)) + } +} + +func (cl *compiler) compileIntBinaryOp(e *ast.BinaryExpr, op opcode, typ types.Type) { + switch { + case typeIsInt(typ): + cl.compileBinaryOp(op, e) + default: + panic(cl.errorf(e, "%s is not implemented for %s operands", e.Op, typ)) + } +} + +func (cl *compiler) compileSliceExpr(slice *ast.SliceExpr) { + if slice.Slice3 { + panic(cl.errorf(slice, "can't compile 3-index slicing")) + } + + // No need to do slicing, its no-op `s[:]`. + if slice.Low == nil && slice.High == nil { + cl.compileExpr(slice.X) + return + } + + sliceOp := opStringSlice + sliceFromOp := opStringSliceFrom + sliceToOp := opStringSliceTo + + if !typeIsString(cl.ctx.Types.TypeOf(slice.X)) { + panic(cl.errorf(slice.X, "can't compile slicing of something that is not a string")) + } + + switch { + case slice.Low == nil && slice.High != nil: + cl.compileExpr(slice.X) + cl.compileExpr(slice.High) + cl.emit(sliceToOp) + case slice.Low != nil && slice.High == nil: + cl.compileExpr(slice.X) + cl.compileExpr(slice.Low) + cl.emit(sliceFromOp) + default: + cl.compileExpr(slice.X) + cl.compileExpr(slice.Low) + cl.compileExpr(slice.High) + cl.emit(sliceOp) + } +} + +func (cl *compiler) compileBuiltinCall(fn *ast.Ident, call *ast.CallExpr) { + switch fn.Name { + case `len`: + s := call.Args[0] + cl.compileExpr(s) + if !typeIsString(cl.ctx.Types.TypeOf(s)) { + panic(cl.errorf(s, "can't compile len() with non-string argument yet")) + } + cl.emit(opStringLen) + default: + panic(cl.errorf(fn, "can't compile %s() builtin function call yet", fn)) + } +} + +func (cl *compiler) compileCallExpr(call *ast.CallExpr) { + if id, ok := astutil.Unparen(call.Fun).(*ast.Ident); ok { + _, isBuiltin := cl.ctx.Types.ObjectOf(id).(*types.Builtin) + if isBuiltin { + cl.compileBuiltinCall(id, call) + return + } + } + + expr, fn := goutil.ResolveFunc(cl.ctx.Types, call.Fun) + if fn == nil { + panic(cl.errorf(call.Fun, "can't resolve the called function")) + } + + // TODO: just use Func.FullName as a key? + key := funcKey{name: fn.Name()} + sig := fn.Type().(*types.Signature) + if sig.Recv() != nil { + key.qualifier = sig.Recv().Type().String() + } else { + key.qualifier = fn.Pkg().Path() + } + + if funcID, ok := cl.ctx.Env.nameToNativeFuncID[key]; ok { + if expr != nil { + cl.compileExpr(expr) + } + for _, arg := range call.Args { + cl.compileExpr(arg) + } + cl.emit16(opCallNative, int(funcID)) + return + } + + panic(cl.errorf(call.Fun, "can't compile a call to %s func", key)) +} + +func (cl *compiler) compileUnaryOp(op opcode, e *ast.UnaryExpr) { + cl.compileExpr(e.X) + cl.emit(op) +} + +func (cl *compiler) compileBinaryOp(op opcode, e *ast.BinaryExpr) { + cl.compileExpr(e.X) + cl.compileExpr(e.Y) + cl.emit(op) +} + +func (cl *compiler) compileOr(e *ast.BinaryExpr) { + labelEnd := cl.newLabel() + cl.compileExpr(e.X) + cl.emit(opDup) + cl.emitJump(opJumpTrue, labelEnd) + cl.compileExpr(e.Y) + cl.bindLabel(labelEnd) +} + +func (cl *compiler) compileAnd(e *ast.BinaryExpr) { + labelEnd := cl.newLabel() + cl.compileExpr(e.X) + cl.emit(opDup) + cl.emitJump(opJumpFalse, labelEnd) + cl.compileExpr(e.Y) + cl.bindLabel(labelEnd) +} + +func (cl *compiler) compileIdent(ident *ast.Ident) { + tv := cl.ctx.Types.Types[ident] + cv := tv.Value + if cv != nil { + cl.compileConstantValue(ident, cv) + return + } + if paramIndex, ok := cl.params[ident.String()]; ok { + cl.emit8(pickOp(typeIsInt(tv.Type), opPushIntParam, opPushParam), paramIndex) + return + } + if localIndex, ok := cl.locals[ident.String()]; ok { + cl.emit8(pickOp(typeIsInt(tv.Type), opPushIntLocal, opPushLocal), localIndex) + return + } + + panic(cl.errorf(ident, "can't compile a %s (type %s) variable read", ident.String(), tv.Type)) +} + +func (cl *compiler) compileConstantValue(source ast.Expr, cv constant.Value) { + switch cv.Kind() { + case constant.Bool: + v := constant.BoolVal(cv) + if v { + cl.emit(opPushTrue) + } else { + cl.emit(opPushFalse) + } + + case constant.String: + v := constant.StringVal(cv) + id := cl.internConstant(v) + cl.emit8(opPushConst, id) + + case constant.Int: + v, exact := constant.Int64Val(cv) + if !exact { + panic(cl.errorf(source, "non-exact int value")) + } + id := cl.internIntConstant(int(v)) + cl.emit8(opPushIntConst, id) + + case constant.Complex: + panic(cl.errorf(source, "can't compile complex number constants yet")) + + case constant.Float: + panic(cl.errorf(source, "can't compile float constants yet")) + + default: + panic(cl.errorf(source, "unexpected constant %v", cv)) + } +} + +func (cl *compiler) internIntConstant(v int) int { + if id, ok := cl.intConstantsPool[v]; ok { + return id + } + id := len(cl.intConstants) + cl.intConstants = append(cl.intConstants, v) + cl.intConstantsPool[v] = id + return id +} + +func (cl *compiler) internConstant(v interface{}) int { + if _, ok := v.(int); ok { + panic("compiler error: int constant interned as interface{}") + } + if id, ok := cl.constantsPool[v]; ok { + return id + } + id := len(cl.constants) + cl.constants = append(cl.constants, v) + cl.constantsPool[v] = id + return id +} + +func (cl *compiler) linkJumps() { + for _, l := range cl.labels { + for _, jumpPos := range l.sources { + offset := l.targetPos - jumpPos + patchPos := jumpPos + 1 + put16(cl.code, patchPos, offset) + } + } +} + +func (cl *compiler) newLabel() *label { + l := &label{} + cl.labels = append(cl.labels, l) + return l +} + +func (cl *compiler) bindLabel(l *label) { + l.targetPos = len(cl.code) +} + +func (cl *compiler) emit(op opcode) { + cl.lastOp = op + cl.code = append(cl.code, byte(op)) +} + +func (cl *compiler) emitJump(op opcode, l *label) { + l.sources = append(l.sources, len(cl.code)) + cl.emit(op) + cl.code = append(cl.code, 0, 0) +} + +func (cl *compiler) emit8(op opcode, arg8 int) { + cl.emit(op) + cl.code = append(cl.code, byte(arg8)) +} + +func (cl *compiler) emit16(op opcode, arg16 int) { + cl.emit(op) + buf := make([]byte, 2) + put16(buf, 0, arg16) + cl.code = append(cl.code, buf...) +} + +func (cl *compiler) errorUnsupportedType(e ast.Node, typ types.Type, where string) compileError { + return cl.errorf(e, "%s type: %s is not supported, try something simpler", where, typ) +} + +func (cl *compiler) errorf(n ast.Node, format string, args ...interface{}) compileError { + loc := cl.ctx.Fset.Position(n.Pos()) + message := fmt.Sprintf("%s:%d: %s", loc.Filename, loc.Line, fmt.Sprintf(format, args...)) + return compileError(message) +} + +func (cl *compiler) isUncondJump(op opcode) bool { + switch op { + case opJump, opReturnFalse, opReturnTrue, opReturnTop, opReturnIntTop: + return true + default: + return false + } +} + +func (cl *compiler) isSupportedType(typ types.Type) bool { + switch typ := typ.Underlying().(type) { + case *types.Pointer: + // 1. Pointers to structs are supported. + _, isStruct := typ.Elem().Underlying().(*types.Struct) + return isStruct + + case *types.Basic: + // 2. Some of the basic types are supported. + // TODO: support byte/uint8 and maybe float64. + switch typ.Kind() { + case types.Bool, types.Int, types.String: + return true + default: + return false + } + + case *types.Interface: + // 3. Interfaces are supported. + return true + + default: + return false + } +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/debug_info.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/debug_info.go new file mode 100644 index 000000000..e42bbb76a --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/debug_info.go @@ -0,0 +1,16 @@ +package quasigo + +type debugInfo struct { + funcs map[*Func]funcDebugInfo +} + +type funcDebugInfo struct { + paramNames []string + localNames []string +} + +func newDebugInfo() *debugInfo { + return &debugInfo{ + funcs: make(map[*Func]funcDebugInfo), + } +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/disasm.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/disasm.go new file mode 100644 index 000000000..192cf0710 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/disasm.go @@ -0,0 +1,74 @@ +package quasigo + +import ( + "fmt" + "strings" +) + +// TODO(quasilyte): generate extra opcode info so we can simplify disasm function? + +func disasm(env *Env, fn *Func) string { + var out strings.Builder + + dbg, ok := env.debug.funcs[fn] + if !ok { + return "\n" + } + + code := fn.code + labels := map[int]string{} + walkBytecode(code, func(pc int, op opcode) { + switch op { + case opJumpTrue, opJumpFalse, opJump: + offset := decode16(code, pc+1) + targetPC := pc + offset + if _, ok := labels[targetPC]; !ok { + labels[targetPC] = fmt.Sprintf("L%d", len(labels)) + } + } + }) + + walkBytecode(code, func(pc int, op opcode) { + if l := labels[pc]; l != "" { + fmt.Fprintf(&out, "%s:\n", l) + } + var arg interface{} + var comment string + switch op { + case opCallNative: + id := decode16(code, pc+1) + arg = id + comment = env.nativeFuncs[id].name + case opPushParam, opPushIntParam: + index := int(code[pc+1]) + arg = index + comment = dbg.paramNames[index] + case opSetLocal, opSetIntLocal, opPushLocal, opPushIntLocal, opIncLocal, opDecLocal: + index := int(code[pc+1]) + arg = index + comment = dbg.localNames[index] + case opPushConst: + arg = int(code[pc+1]) + comment = fmt.Sprintf("value=%#v", fn.constants[code[pc+1]]) + case opPushIntConst: + arg = int(code[pc+1]) + comment = fmt.Sprintf("value=%#v", fn.intConstants[code[pc+1]]) + case opJumpTrue, opJumpFalse, opJump: + offset := decode16(code, pc+1) + targetPC := pc + offset + arg = offset + comment = labels[targetPC] + } + + if comment != "" { + comment = " # " + comment + } + if arg == nil { + fmt.Fprintf(&out, " %s%s\n", op, comment) + } else { + fmt.Fprintf(&out, " %s %#v%s\n", op, arg, comment) + } + }) + + return out.String() +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/env.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/env.go new file mode 100644 index 000000000..0e2a450b1 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/env.go @@ -0,0 +1,42 @@ +package quasigo + +type funcKey struct { + qualifier string + name string +} + +func (k funcKey) String() string { + if k.qualifier != "" { + return k.qualifier + "." + k.name + } + return k.name +} + +type nativeFunc struct { + mappedFunc func(*ValueStack) + name string // Needed for the readable disasm +} + +func newEnv() *Env { + return &Env{ + nameToNativeFuncID: make(map[funcKey]uint16), + nameToFuncID: make(map[funcKey]uint16), + + debug: newDebugInfo(), + } +} + +func (env *Env) addNativeFunc(key funcKey, f func(*ValueStack)) { + id := len(env.nativeFuncs) + env.nativeFuncs = append(env.nativeFuncs, nativeFunc{ + mappedFunc: f, + name: key.String(), + }) + env.nameToNativeFuncID[key] = uint16(id) +} + +func (env *Env) addFunc(key funcKey, f *Func) { + id := len(env.userFuncs) + env.userFuncs = append(env.userFuncs, f) + env.nameToFuncID[key] = uint16(id) +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/eval.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/eval.go new file mode 100644 index 000000000..afc000ea3 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/eval.go @@ -0,0 +1,239 @@ +package quasigo + +import ( + "fmt" + "reflect" +) + +const maxFuncLocals = 8 + +// pop2 removes the two top stack elements and returns them. +// +// Note that it returns the popped elements in the reverse order +// to make it easier to map the order in which they were pushed. +func (s *ValueStack) pop2() (second, top interface{}) { + x := s.objects[len(s.objects)-2] + y := s.objects[len(s.objects)-1] + s.objects = s.objects[:len(s.objects)-2] + return x, y +} + +func (s *ValueStack) popInt2() (second, top int) { + x := s.ints[len(s.ints)-2] + y := s.ints[len(s.ints)-1] + s.ints = s.ints[:len(s.ints)-2] + return x, y +} + +// top returns top of the stack without popping it. +func (s *ValueStack) top() interface{} { return s.objects[len(s.objects)-1] } + +func (s *ValueStack) topInt() int { return s.ints[len(s.ints)-1] } + +// dup copies the top stack element. +// Identical to s.Push(s.Top()), but more concise. +func (s *ValueStack) dup() { s.objects = append(s.objects, s.objects[len(s.objects)-1]) } + +// discard drops the top stack element. +// Identical to s.Pop() without using the result. +func (s *ValueStack) discard() { s.objects = s.objects[:len(s.objects)-1] } + +func eval(env *EvalEnv, fn *Func, args []interface{}) CallResult { + pc := 0 + code := fn.code + stack := env.stack + var locals [maxFuncLocals]interface{} + var intLocals [maxFuncLocals]int + + for { + switch op := opcode(code[pc]); op { + case opPushParam: + index := code[pc+1] + stack.Push(args[index]) + pc += 2 + case opPushIntParam: + index := code[pc+1] + stack.PushInt(args[index].(int)) + pc += 2 + + case opPushLocal: + index := code[pc+1] + stack.Push(locals[index]) + pc += 2 + case opPushIntLocal: + index := code[pc+1] + stack.PushInt(intLocals[index]) + pc += 2 + + case opSetLocal: + index := code[pc+1] + locals[index] = stack.Pop() + pc += 2 + case opSetIntLocal: + index := code[pc+1] + intLocals[index] = stack.PopInt() + pc += 2 + + case opIncLocal: + index := code[pc+1] + intLocals[index]++ + pc += 2 + case opDecLocal: + index := code[pc+1] + intLocals[index]-- + pc += 2 + + case opPop: + stack.discard() + pc++ + case opDup: + stack.dup() + pc++ + + case opPushConst: + id := code[pc+1] + stack.Push(fn.constants[id]) + pc += 2 + case opPushIntConst: + id := code[pc+1] + stack.PushInt(fn.intConstants[id]) + pc += 2 + + case opPushTrue: + stack.Push(true) + pc++ + case opPushFalse: + stack.Push(false) + pc++ + + case opReturnTrue: + return CallResult{value: true} + case opReturnFalse: + return CallResult{value: false} + case opReturnTop: + return CallResult{value: stack.top()} + case opReturnIntTop: + return CallResult{scalarValue: uint64(stack.topInt())} + + case opCallNative: + id := decode16(code, pc+1) + fn := env.nativeFuncs[id].mappedFunc + fn(stack) + pc += 3 + + case opJump: + offset := decode16(code, pc+1) + pc += offset + + case opJumpFalse: + if !stack.Pop().(bool) { + offset := decode16(code, pc+1) + pc += offset + } else { + pc += 3 + } + case opJumpTrue: + if stack.Pop().(bool) { + offset := decode16(code, pc+1) + pc += offset + } else { + pc += 3 + } + + case opNot: + stack.Push(!stack.Pop().(bool)) + pc++ + + case opConcat: + x, y := stack.pop2() + stack.Push(x.(string) + y.(string)) + pc++ + + case opAdd: + x, y := stack.popInt2() + stack.PushInt(x + y) + pc++ + + case opSub: + x, y := stack.popInt2() + stack.PushInt(x - y) + pc++ + + case opEqInt: + x, y := stack.popInt2() + stack.Push(x == y) + pc++ + + case opNotEqInt: + x, y := stack.popInt2() + stack.Push(x != y) + pc++ + + case opGtInt: + x, y := stack.popInt2() + stack.Push(x > y) + pc++ + + case opGtEqInt: + x, y := stack.popInt2() + stack.Push(x >= y) + pc++ + + case opLtInt: + x, y := stack.popInt2() + stack.Push(x < y) + pc++ + + case opLtEqInt: + x, y := stack.popInt2() + stack.Push(x <= y) + pc++ + + case opEqString: + x, y := stack.pop2() + stack.Push(x.(string) == y.(string)) + pc++ + + case opNotEqString: + x, y := stack.pop2() + stack.Push(x.(string) != y.(string)) + pc++ + + case opIsNil: + x := stack.Pop() + stack.Push(x == nil || reflect.ValueOf(x).IsNil()) + pc++ + + case opIsNotNil: + x := stack.Pop() + stack.Push(x != nil && !reflect.ValueOf(x).IsNil()) + pc++ + + case opStringSlice: + to := stack.PopInt() + from := stack.PopInt() + s := stack.Pop().(string) + stack.Push(s[from:to]) + pc++ + + case opStringSliceFrom: + from := stack.PopInt() + s := stack.Pop().(string) + stack.Push(s[from:]) + pc++ + + case opStringSliceTo: + to := stack.PopInt() + s := stack.Pop().(string) + stack.Push(s[:to]) + pc++ + + case opStringLen: + stack.PushInt(len(stack.Pop().(string))) + pc++ + + default: + panic(fmt.Sprintf("malformed bytecode: unexpected %s found", op)) + } + } +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go new file mode 100644 index 000000000..fde48b7cd --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go @@ -0,0 +1,184 @@ +// +build main + +package main + +import ( + "bytes" + "fmt" + "go/format" + "io/ioutil" + "log" + "strings" + "text/template" +) + +var opcodePrototypes = []opcodeProto{ + {"Pop", "op", "(value) -> ()"}, + {"Dup", "op", "(x) -> (x x)"}, + + {"PushParam", "op index:u8", "() -> (value)"}, + {"PushIntParam", "op index:u8", "() -> (value:int)"}, + {"PushLocal", "op index:u8", "() -> (value)"}, + {"PushIntLocal", "op index:u8", "() -> (value:int)"}, + {"PushFalse", "op", "() -> (false)"}, + {"PushTrue", "op", "() -> (true)"}, + {"PushConst", "op constid:u8", "() -> (const)"}, + {"PushIntConst", "op constid:u8", "() -> (const:int)"}, + + {"SetLocal", "op index:u8", "(value) -> ()"}, + {"SetIntLocal", "op index:u8", "(value:int) -> ()"}, + {"IncLocal", "op index:u8", stackUnchanged}, + {"DecLocal", "op index:u8", stackUnchanged}, + + {"ReturnTop", "op", "(value) -> (value)"}, + {"ReturnIntTop", "op", "(value) -> (value)"}, + {"ReturnFalse", "op", stackUnchanged}, + {"ReturnTrue", "op", stackUnchanged}, + + {"Jump", "op offset:i16", stackUnchanged}, + {"JumpFalse", "op offset:i16", "(cond:bool) -> ()"}, + {"JumpTrue", "op offset:i16", "(cond:bool) -> ()"}, + + {"CallNative", "op funcid:u16", "(args...) -> (results...)"}, + + {"IsNil", "op", "(value) -> (result:bool)"}, + {"IsNotNil", "op", "(value) -> (result:bool)"}, + + {"Not", "op", "(value:bool) -> (result:bool)"}, + + {"EqInt", "op", "(x:int y:int) -> (result:bool)"}, + {"NotEqInt", "op", "(x:int y:int) -> (result:bool)"}, + {"GtInt", "op", "(x:int y:int) -> (result:bool)"}, + {"GtEqInt", "op", "(x:int y:int) -> (result:bool)"}, + {"LtInt", "op", "(x:int y:int) -> (result:bool)"}, + {"LtEqInt", "op", "(x:int y:int) -> (result:bool)"}, + + {"EqString", "op", "(x:string y:string) -> (result:bool)"}, + {"NotEqString", "op", "(x:string y:string) -> (result:bool)"}, + + {"Concat", "op", "(x:string y:string) -> (result:string)"}, + {"Add", "op", "(x:int y:int) -> (result:int)"}, + {"Sub", "op", "(x:int y:int) -> (result:int)"}, + + {"StringSlice", "op", "(s:string from:int to:int) -> (result:string)"}, + {"StringSliceFrom", "op", "(s:string from:int) -> (result:string)"}, + {"StringSliceTo", "op", "(s:string to:int) -> (result:string)"}, + {"StringLen", "op", "(s:string) -> (result:int)"}, +} + +type opcodeProto struct { + name string + enc string + stack string +} + +type encodingInfo struct { + width int + parts int +} + +type opcodeInfo struct { + Opcode byte + Name string + Enc string + EncString string + Stack string + Width int +} + +const stackUnchanged = "" + +var fileTemplate = template.Must(template.New("opcodes.go").Parse(`// Code generated "gen_opcodes.go"; DO NOT EDIT. + +package quasigo + +//go:generate stringer -type=opcode -trimprefix=op +type opcode byte + +const ( + opInvalid opcode = 0 +{{ range .Opcodes }} + // Encoding: {{.EncString}} + // Stack effect: {{ if .Stack}}{{.Stack}}{{else}}unchanged{{end}} + op{{ .Name }} opcode = {{.Opcode}} +{{ end -}} +) + +type opcodeInfo struct { + width int +} + +var opcodeInfoTable = [256]opcodeInfo{ + opInvalid: {width: 1}, + +{{ range .Opcodes -}} + op{{.Name}}: {width: {{.Width}}}, +{{ end }} +} +`)) + +func main() { + opcodes := make([]opcodeInfo, len(opcodePrototypes)) + for i, proto := range opcodePrototypes { + opcode := byte(i + 1) + encInfo := decodeEnc(proto.enc) + var encString string + if encInfo.parts == 1 { + encString = fmt.Sprintf("0x%02x (width=%d)", opcode, encInfo.width) + } else { + encString = fmt.Sprintf("0x%02x %s (width=%d)", + opcode, strings.TrimPrefix(proto.enc, "op "), encInfo.width) + } + + opcodes[i] = opcodeInfo{ + Opcode: opcode, + Name: proto.name, + Enc: proto.enc, + EncString: encString, + Stack: proto.stack, + Width: encInfo.width, + } + } + + var buf bytes.Buffer + err := fileTemplate.Execute(&buf, map[string]interface{}{ + "Opcodes": opcodes, + }) + if err != nil { + log.Panicf("execute template: %v", err) + } + writeFile("opcodes.gen.go", buf.Bytes()) +} + +func decodeEnc(enc string) encodingInfo { + fields := strings.Fields(enc) + width := 0 + for _, f := range fields { + parts := strings.Split(f, ":") + var typ string + if len(parts) == 2 { + typ = parts[1] + } else { + typ = "u8" + } + switch typ { + case "i8", "u8": + width++ + case "i16", "u16": + width += 2 + default: + panic(fmt.Sprintf("unknown op argument type: %s", typ)) + } + } + return encodingInfo{width: width, parts: len(fields)} +} + +func writeFile(filename string, data []byte) { + pretty, err := format.Source(data) + if err != nil { + log.Panicf("gofmt: %v", err) + } + if err := ioutil.WriteFile(filename, pretty, 0666); err != nil { + log.Panicf("write %s: %v", filename, err) + } +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/opcode_string.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/opcode_string.go new file mode 100644 index 000000000..27dfc1f67 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/opcode_string.go @@ -0,0 +1,63 @@ +// Code generated by "stringer -type=opcode -trimprefix=op"; DO NOT EDIT. + +package quasigo + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[opInvalid-0] + _ = x[opPop-1] + _ = x[opDup-2] + _ = x[opPushParam-3] + _ = x[opPushIntParam-4] + _ = x[opPushLocal-5] + _ = x[opPushIntLocal-6] + _ = x[opPushFalse-7] + _ = x[opPushTrue-8] + _ = x[opPushConst-9] + _ = x[opPushIntConst-10] + _ = x[opSetLocal-11] + _ = x[opSetIntLocal-12] + _ = x[opIncLocal-13] + _ = x[opDecLocal-14] + _ = x[opReturnTop-15] + _ = x[opReturnIntTop-16] + _ = x[opReturnFalse-17] + _ = x[opReturnTrue-18] + _ = x[opJump-19] + _ = x[opJumpFalse-20] + _ = x[opJumpTrue-21] + _ = x[opCallNative-22] + _ = x[opIsNil-23] + _ = x[opIsNotNil-24] + _ = x[opNot-25] + _ = x[opEqInt-26] + _ = x[opNotEqInt-27] + _ = x[opGtInt-28] + _ = x[opGtEqInt-29] + _ = x[opLtInt-30] + _ = x[opLtEqInt-31] + _ = x[opEqString-32] + _ = x[opNotEqString-33] + _ = x[opConcat-34] + _ = x[opAdd-35] + _ = x[opSub-36] + _ = x[opStringSlice-37] + _ = x[opStringSliceFrom-38] + _ = x[opStringSliceTo-39] + _ = x[opStringLen-40] +} + +const _opcode_name = "InvalidPopDupPushParamPushIntParamPushLocalPushIntLocalPushFalsePushTruePushConstPushIntConstSetLocalSetIntLocalIncLocalDecLocalReturnTopReturnIntTopReturnFalseReturnTrueJumpJumpFalseJumpTrueCallNativeIsNilIsNotNilNotEqIntNotEqIntGtIntGtEqIntLtIntLtEqIntEqStringNotEqStringConcatAddSubStringSliceStringSliceFromStringSliceToStringLen" + +var _opcode_index = [...]uint16{0, 7, 10, 13, 22, 34, 43, 55, 64, 72, 81, 93, 101, 112, 120, 128, 137, 149, 160, 170, 174, 183, 191, 201, 206, 214, 217, 222, 230, 235, 242, 247, 254, 262, 273, 279, 282, 285, 296, 311, 324, 333} + +func (i opcode) String() string { + if i >= opcode(len(_opcode_index)-1) { + return "opcode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _opcode_name[_opcode_index[i]:_opcode_index[i+1]] +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/opcodes.gen.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/opcodes.gen.go new file mode 100644 index 000000000..268b42a1e --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/opcodes.gen.go @@ -0,0 +1,219 @@ +// Code generated "gen_opcodes.go"; DO NOT EDIT. + +package quasigo + +//go:generate stringer -type=opcode -trimprefix=op +type opcode byte + +const ( + opInvalid opcode = 0 + + // Encoding: 0x01 (width=1) + // Stack effect: (value) -> () + opPop opcode = 1 + + // Encoding: 0x02 (width=1) + // Stack effect: (x) -> (x x) + opDup opcode = 2 + + // Encoding: 0x03 index:u8 (width=2) + // Stack effect: () -> (value) + opPushParam opcode = 3 + + // Encoding: 0x04 index:u8 (width=2) + // Stack effect: () -> (value:int) + opPushIntParam opcode = 4 + + // Encoding: 0x05 index:u8 (width=2) + // Stack effect: () -> (value) + opPushLocal opcode = 5 + + // Encoding: 0x06 index:u8 (width=2) + // Stack effect: () -> (value:int) + opPushIntLocal opcode = 6 + + // Encoding: 0x07 (width=1) + // Stack effect: () -> (false) + opPushFalse opcode = 7 + + // Encoding: 0x08 (width=1) + // Stack effect: () -> (true) + opPushTrue opcode = 8 + + // Encoding: 0x09 constid:u8 (width=2) + // Stack effect: () -> (const) + opPushConst opcode = 9 + + // Encoding: 0x0a constid:u8 (width=2) + // Stack effect: () -> (const:int) + opPushIntConst opcode = 10 + + // Encoding: 0x0b index:u8 (width=2) + // Stack effect: (value) -> () + opSetLocal opcode = 11 + + // Encoding: 0x0c index:u8 (width=2) + // Stack effect: (value:int) -> () + opSetIntLocal opcode = 12 + + // Encoding: 0x0d index:u8 (width=2) + // Stack effect: unchanged + opIncLocal opcode = 13 + + // Encoding: 0x0e index:u8 (width=2) + // Stack effect: unchanged + opDecLocal opcode = 14 + + // Encoding: 0x0f (width=1) + // Stack effect: (value) -> (value) + opReturnTop opcode = 15 + + // Encoding: 0x10 (width=1) + // Stack effect: (value) -> (value) + opReturnIntTop opcode = 16 + + // Encoding: 0x11 (width=1) + // Stack effect: unchanged + opReturnFalse opcode = 17 + + // Encoding: 0x12 (width=1) + // Stack effect: unchanged + opReturnTrue opcode = 18 + + // Encoding: 0x13 offset:i16 (width=3) + // Stack effect: unchanged + opJump opcode = 19 + + // Encoding: 0x14 offset:i16 (width=3) + // Stack effect: (cond:bool) -> () + opJumpFalse opcode = 20 + + // Encoding: 0x15 offset:i16 (width=3) + // Stack effect: (cond:bool) -> () + opJumpTrue opcode = 21 + + // Encoding: 0x16 funcid:u16 (width=3) + // Stack effect: (args...) -> (results...) + opCallNative opcode = 22 + + // Encoding: 0x17 (width=1) + // Stack effect: (value) -> (result:bool) + opIsNil opcode = 23 + + // Encoding: 0x18 (width=1) + // Stack effect: (value) -> (result:bool) + opIsNotNil opcode = 24 + + // Encoding: 0x19 (width=1) + // Stack effect: (value:bool) -> (result:bool) + opNot opcode = 25 + + // Encoding: 0x1a (width=1) + // Stack effect: (x:int y:int) -> (result:bool) + opEqInt opcode = 26 + + // Encoding: 0x1b (width=1) + // Stack effect: (x:int y:int) -> (result:bool) + opNotEqInt opcode = 27 + + // Encoding: 0x1c (width=1) + // Stack effect: (x:int y:int) -> (result:bool) + opGtInt opcode = 28 + + // Encoding: 0x1d (width=1) + // Stack effect: (x:int y:int) -> (result:bool) + opGtEqInt opcode = 29 + + // Encoding: 0x1e (width=1) + // Stack effect: (x:int y:int) -> (result:bool) + opLtInt opcode = 30 + + // Encoding: 0x1f (width=1) + // Stack effect: (x:int y:int) -> (result:bool) + opLtEqInt opcode = 31 + + // Encoding: 0x20 (width=1) + // Stack effect: (x:string y:string) -> (result:bool) + opEqString opcode = 32 + + // Encoding: 0x21 (width=1) + // Stack effect: (x:string y:string) -> (result:bool) + opNotEqString opcode = 33 + + // Encoding: 0x22 (width=1) + // Stack effect: (x:string y:string) -> (result:string) + opConcat opcode = 34 + + // Encoding: 0x23 (width=1) + // Stack effect: (x:int y:int) -> (result:int) + opAdd opcode = 35 + + // Encoding: 0x24 (width=1) + // Stack effect: (x:int y:int) -> (result:int) + opSub opcode = 36 + + // Encoding: 0x25 (width=1) + // Stack effect: (s:string from:int to:int) -> (result:string) + opStringSlice opcode = 37 + + // Encoding: 0x26 (width=1) + // Stack effect: (s:string from:int) -> (result:string) + opStringSliceFrom opcode = 38 + + // Encoding: 0x27 (width=1) + // Stack effect: (s:string to:int) -> (result:string) + opStringSliceTo opcode = 39 + + // Encoding: 0x28 (width=1) + // Stack effect: (s:string) -> (result:int) + opStringLen opcode = 40 +) + +type opcodeInfo struct { + width int +} + +var opcodeInfoTable = [256]opcodeInfo{ + opInvalid: {width: 1}, + + opPop: {width: 1}, + opDup: {width: 1}, + opPushParam: {width: 2}, + opPushIntParam: {width: 2}, + opPushLocal: {width: 2}, + opPushIntLocal: {width: 2}, + opPushFalse: {width: 1}, + opPushTrue: {width: 1}, + opPushConst: {width: 2}, + opPushIntConst: {width: 2}, + opSetLocal: {width: 2}, + opSetIntLocal: {width: 2}, + opIncLocal: {width: 2}, + opDecLocal: {width: 2}, + opReturnTop: {width: 1}, + opReturnIntTop: {width: 1}, + opReturnFalse: {width: 1}, + opReturnTrue: {width: 1}, + opJump: {width: 3}, + opJumpFalse: {width: 3}, + opJumpTrue: {width: 3}, + opCallNative: {width: 3}, + opIsNil: {width: 1}, + opIsNotNil: {width: 1}, + opNot: {width: 1}, + opEqInt: {width: 1}, + opNotEqInt: {width: 1}, + opGtInt: {width: 1}, + opGtEqInt: {width: 1}, + opLtInt: {width: 1}, + opLtEqInt: {width: 1}, + opEqString: {width: 1}, + opNotEqString: {width: 1}, + opConcat: {width: 1}, + opAdd: {width: 1}, + opSub: {width: 1}, + opStringSlice: {width: 1}, + opStringSliceFrom: {width: 1}, + opStringSliceTo: {width: 1}, + opStringLen: {width: 1}, +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/quasigo.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/quasigo.go new file mode 100644 index 000000000..7d457538d --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/quasigo.go @@ -0,0 +1,165 @@ +// Package quasigo implements a Go subset compiler and interpreter. +// +// The implementation details are not part of the contract of this package. +package quasigo + +import ( + "go/ast" + "go/token" + "go/types" +) + +// TODO(quasilyte): document what is thread-safe and what not. +// TODO(quasilyte): add a readme. + +// Env is used to hold both compilation and evaluation data. +type Env struct { + // TODO(quasilyte): store both native and user func ids in one map? + + nativeFuncs []nativeFunc + nameToNativeFuncID map[funcKey]uint16 + + userFuncs []*Func + nameToFuncID map[funcKey]uint16 + + // debug contains all information that is only needed + // for better debugging and compiled code introspection. + // Right now it's always enabled, but we may allow stripping it later. + debug *debugInfo +} + +// EvalEnv is a goroutine-local handle for Env. +// To get one, use Env.GetEvalEnv() method. +type EvalEnv struct { + nativeFuncs []nativeFunc + userFuncs []*Func + + stack *ValueStack +} + +// NewEnv creates a new empty environment. +func NewEnv() *Env { + return newEnv() +} + +// GetEvalEnv creates a new goroutine-local handle of env. +func (env *Env) GetEvalEnv() *EvalEnv { + return &EvalEnv{ + nativeFuncs: env.nativeFuncs, + userFuncs: env.userFuncs, + stack: &ValueStack{ + objects: make([]interface{}, 0, 32), + ints: make([]int, 0, 16), + }, + } +} + +// AddNativeMethod binds `$typeName.$methodName` symbol with f. +// A typeName should be fully qualified, like `github.com/user/pkgname.TypeName`. +// It method is defined only on pointer type, the typeName should start with `*`. +func (env *Env) AddNativeMethod(typeName, methodName string, f func(*ValueStack)) { + env.addNativeFunc(funcKey{qualifier: typeName, name: methodName}, f) +} + +// AddNativeFunc binds `$pkgPath.$funcName` symbol with f. +// A pkgPath should be a full package path in which funcName is defined. +func (env *Env) AddNativeFunc(pkgPath, funcName string, f func(*ValueStack)) { + env.addNativeFunc(funcKey{qualifier: pkgPath, name: funcName}, f) +} + +// AddFunc binds `$pkgPath.$funcName` symbol with f. +func (env *Env) AddFunc(pkgPath, funcName string, f *Func) { + env.addFunc(funcKey{qualifier: pkgPath, name: funcName}, f) +} + +// GetFunc finds previously bound function searching for the `$pkgPath.$funcName` symbol. +func (env *Env) GetFunc(pkgPath, funcName string) *Func { + id := env.nameToFuncID[funcKey{qualifier: pkgPath, name: funcName}] + return env.userFuncs[id] +} + +// CompileContext is used to provide necessary data to the compiler. +type CompileContext struct { + // Env is shared environment that should be used for all functions + // being compiled; then it should be used to execute these functions. + Env *Env + + Types *types.Info + Fset *token.FileSet +} + +// Compile prepares an executable version of fn. +func Compile(ctx *CompileContext, fn *ast.FuncDecl) (compiled *Func, err error) { + return compile(ctx, fn) +} + +// Call invokes a given function with provided arguments. +func Call(env *EvalEnv, fn *Func, args ...interface{}) CallResult { + env.stack.objects = env.stack.objects[:0] + env.stack.ints = env.stack.ints[:0] + return eval(env, fn, args) +} + +// CallResult is a return value of Call function. +// For most functions, Value() should be called to get the actual result. +// For int-typed functions, IntValue() should be used instead. +type CallResult struct { + value interface{} + scalarValue uint64 +} + +// Value unboxes an actual call return value. +// For int results, use IntValue(). +func (res CallResult) Value() interface{} { return res.value } + +// IntValue unboxes an actual call return value. +func (res CallResult) IntValue() int { return int(res.scalarValue) } + +// Disasm returns the compiled function disassembly text. +// This output is not guaranteed to be stable between versions +// and should be used only for debugging purposes. +func Disasm(env *Env, fn *Func) string { + return disasm(env, fn) +} + +// Func is a compiled function that is ready to be executed. +type Func struct { + code []byte + + constants []interface{} + intConstants []int +} + +// ValueStack is used to manipulate runtime values during the evaluation. +// Function arguments are pushed to the stack. +// Function results are returned via stack as well. +// +// For the sake of efficiency, it stores different types separately. +// If int was pushed with PushInt(), it should be retrieved by PopInt(). +// It's a bad idea to do a Push() and then PopInt() and vice-versa. +type ValueStack struct { + objects []interface{} + ints []int +} + +// Pop removes the top stack element and returns it. +// Important: for int-typed values, use PopInt. +func (s *ValueStack) Pop() interface{} { + x := s.objects[len(s.objects)-1] + s.objects = s.objects[:len(s.objects)-1] + return x +} + +// PopInt removes the top stack element and returns it. +func (s *ValueStack) PopInt() int { + x := s.ints[len(s.ints)-1] + s.ints = s.ints[:len(s.ints)-1] + return x +} + +// Push adds x to the stack. +// Important: for int-typed values, use PushInt. +func (s *ValueStack) Push(x interface{}) { s.objects = append(s.objects, x) } + +// PushInt adds x to the stack. +func (s *ValueStack) PushInt(x int) { s.ints = append(s.ints, x) } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/utils.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/utils.go new file mode 100644 index 000000000..a5c3676a4 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/utils.go @@ -0,0 +1,60 @@ +package quasigo + +import ( + "encoding/binary" + "go/ast" + "go/types" +) + +func pickOp(cond bool, ifTrue, otherwise opcode) opcode { + if cond { + return ifTrue + } + return otherwise +} + +func put16(code []byte, pos, value int) { + binary.LittleEndian.PutUint16(code[pos:], uint16(value)) +} + +func decode16(code []byte, pos int) int { + return int(int16(binary.LittleEndian.Uint16(code[pos:]))) +} + +func typeIsInt(typ types.Type) bool { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + return false + } + switch basic.Kind() { + case types.Int, types.UntypedInt: + return true + default: + return false + } +} + +func typeIsString(typ types.Type) bool { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + return false + } + return basic.Info()&types.IsString != 0 +} + +func walkBytecode(code []byte, fn func(pc int, op opcode)) { + pc := 0 + for pc < len(code) { + op := opcode(code[pc]) + fn(pc, op) + pc += opcodeInfoTable[op].width + } +} + +func identName(n ast.Expr) string { + id, ok := n.(*ast.Ident) + if ok { + return id.Name + } + return "" +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go new file mode 100644 index 000000000..ba23861a2 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go @@ -0,0 +1,87 @@ +package ruleguard + +import ( + "go/ast" + "go/token" + "go/types" + "io" +) + +// Engine is the main ruleguard package API object. +// +// First, load some ruleguard files with Load() to build a rule set. +// Then use Run() to execute the rules. +// +// It's advised to have only 1 engine per application as it does a lot of caching. +// The Run() method is synchronized, so it can be used concurrently. +// +// An Engine must be created with NewEngine() function. +type Engine struct { + impl *engine +} + +// NewEngine creates an engine with empty rule set. +func NewEngine() *Engine { + return &Engine{impl: newEngine()} +} + +// Load reads a ruleguard file from r and adds it to the engine rule set. +// +// Load() is not thread-safe, especially if used concurrently with Run() method. +// It's advised to Load() all ruleguard files under a critical section (like sync.Once) +// and then use Run() to execute all of them. +func (e *Engine) Load(ctx *ParseContext, filename string, r io.Reader) error { + return e.impl.Load(ctx, filename, r) +} + +// Run executes all loaded rules on a given file. +// Matched rules invoke `RunContext.Report()` method. +// +// Run() is thread-safe, unless used in parallel with Load(), +// which modifies the engine state. +func (e *Engine) Run(ctx *RunContext, f *ast.File) error { + return e.impl.Run(ctx, f) +} + +type ParseContext struct { + DebugFilter string + DebugImports bool + DebugPrint func(string) + + // GroupFilter is called for every rule group being parsed. + // If function returns false, that group will not be included + // in the resulting rules set. + // Nil filter accepts all rule groups. + GroupFilter func(string) bool + + Fset *token.FileSet +} + +type RunContext struct { + Debug string + DebugImports bool + DebugPrint func(string) + + Types *types.Info + Sizes types.Sizes + Fset *token.FileSet + Report func(rule GoRuleInfo, n ast.Node, msg string, s *Suggestion) + Pkg *types.Package +} + +type Suggestion struct { + From token.Pos + To token.Pos + Replacement []byte +} + +type GoRuleInfo struct { + // Filename is a file that defined this rule. + Filename string + + // Line is a line inside a file that defined this rule. + Line int + + // Group is a function name that contained this rule. + Group string +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go new file mode 100644 index 000000000..a5d254411 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go @@ -0,0 +1,349 @@ +package ruleguard + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "io/ioutil" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/quasilyte/go-ruleguard/internal/gogrep" + "github.com/quasilyte/go-ruleguard/nodetag" + "github.com/quasilyte/go-ruleguard/ruleguard/goutil" +) + +type rulesRunner struct { + state *engineState + + ctx *RunContext + rules *goRuleSet + + importer *goImporter + + filename string + src []byte + + filterParams filterParams +} + +func newRulesRunner(ctx *RunContext, state *engineState, rules *goRuleSet) *rulesRunner { + importer := newGoImporter(state, goImporterConfig{ + fset: ctx.Fset, + debugImports: ctx.DebugImports, + debugPrint: ctx.DebugPrint, + }) + rr := &rulesRunner{ + ctx: ctx, + importer: importer, + rules: rules, + filterParams: filterParams{ + env: state.env.GetEvalEnv(), + importer: importer, + ctx: ctx, + }, + } + rr.filterParams.nodeText = rr.nodeText + return rr +} + +func (rr *rulesRunner) nodeText(n ast.Node) []byte { + if gogrep.IsEmptyNodeSlice(n) { + return nil + } + + from := rr.ctx.Fset.Position(n.Pos()).Offset + to := rr.ctx.Fset.Position(n.End()).Offset + src := rr.fileBytes() + if (from >= 0 && from < len(src)) && (to >= 0 && to < len(src)) { + return src[from:to] + } + + // Go printer would panic on comments. + if n, ok := n.(*ast.Comment); ok { + return []byte(n.Text) + } + + // Fallback to the printer. + var buf bytes.Buffer + if err := printer.Fprint(&buf, rr.ctx.Fset, n); err != nil { + panic(err) + } + return buf.Bytes() +} + +func (rr *rulesRunner) fileBytes() []byte { + if rr.src != nil { + return rr.src + } + + // TODO(quasilyte): re-use src slice? + src, err := ioutil.ReadFile(rr.filename) + if err != nil || src == nil { + // Assign a zero-length slice so rr.src + // is never nil during the second fileBytes call. + rr.src = make([]byte, 0) + } else { + rr.src = src + } + return rr.src +} + +func (rr *rulesRunner) run(f *ast.File) error { + // TODO(quasilyte): run local rules as well. + + rr.filename = rr.ctx.Fset.Position(f.Pos()).Filename + rr.filterParams.filename = rr.filename + rr.collectImports(f) + + if rr.rules.universal.categorizedNum != 0 { + ast.Inspect(f, func(n ast.Node) bool { + if n == nil { + return false + } + rr.runRules(n) + return true + }) + } + + if len(rr.rules.universal.commentRules) != 0 { + for _, commentGroup := range f.Comments { + for _, comment := range commentGroup.List { + rr.runCommentRules(comment) + } + } + } + + return nil +} + +func (rr *rulesRunner) runCommentRules(comment *ast.Comment) { + // We'll need that file to create a token.Pos from the artificial offset. + file := rr.ctx.Fset.File(comment.Pos()) + + for _, rule := range rr.rules.universal.commentRules { + var m commentMatchData + if rule.captureGroups { + result := rule.pat.FindStringSubmatchIndex(comment.Text) + if result == nil { + continue + } + for i, name := range rule.pat.SubexpNames() { + if i == 0 || name == "" { + continue + } + resultIndex := i * 2 + beginPos := result[resultIndex+0] + endPos := result[resultIndex+1] + // Negative index a special case when named group captured nothing. + // Consider this pattern: `(?Pfoo)|(bar)`. + // If we have `bar` input string, will remain empty. + if beginPos < 0 || endPos < 0 { + m.capture = append(m.capture, gogrep.CapturedNode{ + Name: name, + Node: &ast.Comment{Slash: comment.Pos()}, + }) + continue + } + m.capture = append(m.capture, gogrep.CapturedNode{ + Name: name, + Node: &ast.Comment{ + Slash: file.Pos(beginPos + file.Offset(comment.Pos())), + Text: comment.Text[beginPos:endPos], + }, + }) + } + m.node = &ast.Comment{ + Slash: file.Pos(result[0] + file.Offset(comment.Pos())), + Text: comment.Text[result[0]:result[1]], + } + } else { + // Fast path: no need to save any submatches. + result := rule.pat.FindStringIndex(comment.Text) + if result == nil { + continue + } + m.node = &ast.Comment{ + Slash: file.Pos(result[0] + file.Offset(comment.Pos())), + Text: comment.Text[result[0]:result[1]], + } + } + + accept := rr.handleCommentMatch(rule, m) + if accept { + break + } + } +} + +func (rr *rulesRunner) runRules(n ast.Node) { + tag := nodetag.FromNode(n) + for _, rule := range rr.rules.universal.rulesByTag[tag] { + matched := false + rule.pat.MatchNode(n, func(m gogrep.MatchData) { + matched = rr.handleMatch(rule, m) + }) + if matched { + break + } + } +} + +func (rr *rulesRunner) reject(rule goRule, reason string, m matchData) { + if rule.group != rr.ctx.Debug { + return // This rule is not being debugged + } + + pos := rr.ctx.Fset.Position(m.Node().Pos()) + rr.ctx.DebugPrint(fmt.Sprintf("%s:%d: [%s:%d] rejected by %s", + pos.Filename, pos.Line, filepath.Base(rule.filename), rule.line, reason)) + + values := make([]gogrep.CapturedNode, len(m.CaptureList())) + copy(values, m.CaptureList()) + sort.Slice(values, func(i, j int) bool { + return values[i].Name < values[j].Name + }) + + for _, v := range values { + name := v.Name + node := v.Node + + if comment, ok := node.(*ast.Comment); ok { + s := strings.ReplaceAll(comment.Text, "\n", `\n`) + rr.ctx.DebugPrint(fmt.Sprintf(" $%s: %s", name, s)) + continue + } + + var expr ast.Expr + switch node := node.(type) { + case ast.Expr: + expr = node + case *ast.ExprStmt: + expr = node.X + default: + continue + } + + typ := rr.ctx.Types.TypeOf(expr) + typeString := "" + if typ != nil { + typeString = typ.String() + } + s := strings.ReplaceAll(goutil.SprintNode(rr.ctx.Fset, expr), "\n", `\n`) + rr.ctx.DebugPrint(fmt.Sprintf(" $%s %s: %s", name, typeString, s)) + } +} + +func (rr *rulesRunner) handleCommentMatch(rule goCommentRule, m commentMatchData) bool { + if rule.base.filter.fn != nil { + rr.filterParams.match = m + filterResult := rule.base.filter.fn(&rr.filterParams) + if !filterResult.Matched() { + rr.reject(rule.base, filterResult.RejectReason(), m) + return false + } + } + + message := rr.renderMessage(rule.base.msg, m, true) + node := m.Node() + if rule.base.location != "" { + node, _ = m.CapturedByName(rule.base.location) + } + var suggestion *Suggestion + if rule.base.suggestion != "" { + suggestion = &Suggestion{ + Replacement: []byte(rr.renderMessage(rule.base.suggestion, m, false)), + From: node.Pos(), + To: node.End(), + } + } + info := GoRuleInfo{ + Group: rule.base.group, + Filename: rule.base.filename, + Line: rule.base.line, + } + rr.ctx.Report(info, node, message, suggestion) + return true +} + +func (rr *rulesRunner) handleMatch(rule goRule, m gogrep.MatchData) bool { + if rule.filter.fn != nil { + rr.filterParams.match = astMatchData{match: m} + filterResult := rule.filter.fn(&rr.filterParams) + if !filterResult.Matched() { + rr.reject(rule, filterResult.RejectReason(), astMatchData{match: m}) + return false + } + } + + message := rr.renderMessage(rule.msg, astMatchData{match: m}, true) + node := m.Node + if rule.location != "" { + node, _ = m.CapturedByName(rule.location) + } + var suggestion *Suggestion + if rule.suggestion != "" { + suggestion = &Suggestion{ + Replacement: []byte(rr.renderMessage(rule.suggestion, astMatchData{match: m}, false)), + From: node.Pos(), + To: node.End(), + } + } + info := GoRuleInfo{ + Group: rule.group, + Filename: rule.filename, + Line: rule.line, + } + rr.ctx.Report(info, node, message, suggestion) + return true +} + +func (rr *rulesRunner) collectImports(f *ast.File) { + rr.filterParams.imports = make(map[string]struct{}, len(f.Imports)) + for _, spec := range f.Imports { + s, err := strconv.Unquote(spec.Path.Value) + if err != nil { + continue + } + rr.filterParams.imports[s] = struct{}{} + } +} + +func (rr *rulesRunner) renderMessage(msg string, m matchData, truncate bool) string { + var buf strings.Builder + if strings.Contains(msg, "$$") { + buf.Write(rr.nodeText(m.Node())) + msg = strings.ReplaceAll(msg, "$$", buf.String()) + } + if len(m.CaptureList()) == 0 { + return msg + } + + capture := make([]gogrep.CapturedNode, len(m.CaptureList())) + copy(capture, m.CaptureList()) + sort.Slice(capture, func(i, j int) bool { + return len(capture[i].Name) > len(capture[j].Name) + }) + + for _, c := range capture { + n := c.Node + key := "$" + c.Name + if !strings.Contains(msg, key) { + continue + } + buf.Reset() + buf.Write(rr.nodeText(n)) + // Don't interpolate strings that are too long. + var replacement string + if truncate && buf.Len() > 60 { + replacement = key + } else { + replacement = buf.String() + } + msg = strings.ReplaceAll(msg, key, replacement) + } + return msg +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/patternop_string.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/patternop_string.go new file mode 100644 index 000000000..1d739819d --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/patternop_string.go @@ -0,0 +1,34 @@ +// Code generated by "stringer -type=patternOp"; DO NOT EDIT. + +package typematch + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[opBuiltinType-0] + _ = x[opPointer-1] + _ = x[opVar-2] + _ = x[opVarSeq-3] + _ = x[opSlice-4] + _ = x[opArray-5] + _ = x[opMap-6] + _ = x[opChan-7] + _ = x[opFunc-8] + _ = x[opStructNoSeq-9] + _ = x[opStruct-10] + _ = x[opNamed-11] +} + +const _patternOp_name = "opBuiltinTypeopPointeropVaropVarSeqopSliceopArrayopMapopChanopFuncopStructNoSeqopStructopNamed" + +var _patternOp_index = [...]uint8{0, 13, 22, 27, 35, 42, 49, 54, 60, 66, 79, 87, 94} + +func (i patternOp) String() string { + if i < 0 || i >= patternOp(len(_patternOp_index)-1) { + return "patternOp(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _patternOp_name[_patternOp_index[i]:_patternOp_index[i+1]] +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go new file mode 100644 index 000000000..19391ecd4 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go @@ -0,0 +1,536 @@ +package typematch + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "go/types" + "strconv" + "strings" + + "github.com/quasilyte/go-ruleguard/internal/xtypes" +) + +//go:generate stringer -type=patternOp +type patternOp int + +const ( + opBuiltinType patternOp = iota + opPointer + opVar + opVarSeq + opSlice + opArray + opMap + opChan + opFunc + opStructNoSeq + opStruct + opNamed +) + +type Pattern struct { + typeMatches map[string]types.Type + int64Matches map[string]int64 + + root *pattern +} + +type pattern struct { + value interface{} + op patternOp + subs []*pattern +} + +func (pat pattern) String() string { + if len(pat.subs) == 0 { + return fmt.Sprintf("<%s %#v>", pat.op, pat.value) + } + parts := make([]string, len(pat.subs)) + for i, sub := range pat.subs { + parts[i] = sub.String() + } + return fmt.Sprintf("<%s %#v (%s)>", pat.op, pat.value, strings.Join(parts, ", ")) +} + +type ImportsTab struct { + imports []map[string]string +} + +func NewImportsTab(initial map[string]string) *ImportsTab { + return &ImportsTab{imports: []map[string]string{initial}} +} + +func (itab *ImportsTab) Lookup(pkgName string) (string, bool) { + for i := len(itab.imports) - 1; i >= 0; i-- { + pkgPath, ok := itab.imports[i][pkgName] + if ok { + return pkgPath, true + } + } + return "", false +} + +func (itab *ImportsTab) Load(pkgName, pkgPath string) { + itab.imports[len(itab.imports)-1][pkgName] = pkgPath +} + +func (itab *ImportsTab) EnterScope() { + itab.imports = append(itab.imports, map[string]string{}) +} + +func (itab *ImportsTab) LeaveScope() { + itab.imports = itab.imports[:len(itab.imports)-1] +} + +type Context struct { + Itab *ImportsTab +} + +const ( + varPrefix = `ᐸvarᐳ` + varSeqPrefix = `ᐸvar_seqᐳ` +) + +func Parse(ctx *Context, s string) (*Pattern, error) { + noDollars := strings.ReplaceAll(s, "$*", varSeqPrefix) + noDollars = strings.ReplaceAll(noDollars, "$", varPrefix) + n, err := parser.ParseExpr(noDollars) + if err != nil { + return nil, err + } + root := parseExpr(ctx, n) + if root == nil { + return nil, fmt.Errorf("can't convert %s type expression", s) + } + p := &Pattern{ + typeMatches: map[string]types.Type{}, + int64Matches: map[string]int64{}, + root: root, + } + return p, nil +} + +var ( + builtinTypeByName = map[string]types.Type{ + "bool": types.Typ[types.Bool], + "int": types.Typ[types.Int], + "int8": types.Typ[types.Int8], + "int16": types.Typ[types.Int16], + "int32": types.Typ[types.Int32], + "int64": types.Typ[types.Int64], + "uint": types.Typ[types.Uint], + "uint8": types.Typ[types.Uint8], + "uint16": types.Typ[types.Uint16], + "uint32": types.Typ[types.Uint32], + "uint64": types.Typ[types.Uint64], + "uintptr": types.Typ[types.Uintptr], + "float32": types.Typ[types.Float32], + "float64": types.Typ[types.Float64], + "complex64": types.Typ[types.Complex64], + "complex128": types.Typ[types.Complex128], + "string": types.Typ[types.String], + + "error": types.Universe.Lookup("error").Type(), + + // Aliases. + "byte": types.Typ[types.Uint8], + "rune": types.Typ[types.Int32], + } + + efaceType = types.NewInterfaceType(nil, nil) +) + +func parseExpr(ctx *Context, e ast.Expr) *pattern { + switch e := e.(type) { + case *ast.Ident: + basic, ok := builtinTypeByName[e.Name] + if ok { + return &pattern{op: opBuiltinType, value: basic} + } + if strings.HasPrefix(e.Name, varPrefix) { + name := strings.TrimPrefix(e.Name, varPrefix) + return &pattern{op: opVar, value: name} + } + if strings.HasPrefix(e.Name, varSeqPrefix) { + name := strings.TrimPrefix(e.Name, varSeqPrefix) + // Only unnamed seq are supported right now. + if name == "_" { + return &pattern{op: opVarSeq, value: name} + } + } + + case *ast.SelectorExpr: + pkg, ok := e.X.(*ast.Ident) + if !ok { + return nil + } + pkgPath, ok := ctx.Itab.Lookup(pkg.Name) + if !ok { + return nil + } + return &pattern{op: opNamed, value: [2]string{pkgPath, e.Sel.Name}} + + case *ast.StarExpr: + elem := parseExpr(ctx, e.X) + if elem == nil { + return nil + } + return &pattern{op: opPointer, subs: []*pattern{elem}} + + case *ast.ArrayType: + elem := parseExpr(ctx, e.Elt) + if elem == nil { + return nil + } + if e.Len == nil { + return &pattern{ + op: opSlice, + subs: []*pattern{elem}, + } + } + if id, ok := e.Len.(*ast.Ident); ok && strings.HasPrefix(id.Name, varPrefix) { + name := strings.TrimPrefix(id.Name, varPrefix) + return &pattern{ + op: opArray, + value: name, + subs: []*pattern{elem}, + } + } + lit, ok := e.Len.(*ast.BasicLit) + if !ok || lit.Kind != token.INT { + return nil + } + length, err := strconv.ParseInt(lit.Value, 10, 64) + if err != nil { + return nil + } + return &pattern{ + op: opArray, + value: length, + subs: []*pattern{elem}, + } + + case *ast.MapType: + keyType := parseExpr(ctx, e.Key) + if keyType == nil { + return nil + } + valType := parseExpr(ctx, e.Value) + if valType == nil { + return nil + } + return &pattern{ + op: opMap, + subs: []*pattern{keyType, valType}, + } + + case *ast.ChanType: + valType := parseExpr(ctx, e.Value) + if valType == nil { + return nil + } + var dir types.ChanDir + switch { + case e.Dir&ast.SEND != 0 && e.Dir&ast.RECV != 0: + dir = types.SendRecv + case e.Dir&ast.SEND != 0: + dir = types.SendOnly + case e.Dir&ast.RECV != 0: + dir = types.RecvOnly + default: + return nil + } + return &pattern{ + op: opChan, + value: dir, + subs: []*pattern{valType}, + } + + case *ast.ParenExpr: + return parseExpr(ctx, e.X) + + case *ast.FuncType: + var params []*pattern + var results []*pattern + if e.Params != nil { + for _, field := range e.Params.List { + p := parseExpr(ctx, field.Type) + if p == nil { + return nil + } + if len(field.Names) != 0 { + return nil + } + params = append(params, p) + } + } + if e.Results != nil { + for _, field := range e.Results.List { + p := parseExpr(ctx, field.Type) + if p == nil { + return nil + } + if len(field.Names) != 0 { + return nil + } + results = append(results, p) + } + } + return &pattern{ + op: opFunc, + value: len(params), + subs: append(params, results...), + } + + case *ast.StructType: + hasSeq := false + members := make([]*pattern, 0, len(e.Fields.List)) + for _, field := range e.Fields.List { + p := parseExpr(ctx, field.Type) + if p == nil { + return nil + } + if len(field.Names) != 0 { + return nil + } + if p.op == opVarSeq { + hasSeq = true + } + members = append(members, p) + } + op := opStructNoSeq + if hasSeq { + op = opStruct + } + return &pattern{ + op: op, + subs: members, + } + + case *ast.InterfaceType: + if len(e.Methods.List) == 0 { + return &pattern{op: opBuiltinType, value: efaceType} + } + } + + return nil +} + +// MatchIdentical returns true if the go typ matches pattern p. +func (p *Pattern) MatchIdentical(typ types.Type) bool { + p.reset() + return p.matchIdentical(p.root, typ) +} + +func (p *Pattern) reset() { + if len(p.int64Matches) != 0 { + p.int64Matches = map[string]int64{} + } + if len(p.typeMatches) != 0 { + p.typeMatches = map[string]types.Type{} + } +} + +func (p *Pattern) matchIdenticalFielder(subs []*pattern, f fielder) bool { + // TODO: do backtracking. + + numFields := f.NumFields() + fieldsMatched := 0 + + if len(subs) == 0 && numFields != 0 { + return false + } + + matchAny := false + + i := 0 + for i < len(subs) { + pat := subs[i] + + if pat.op == opVarSeq { + matchAny = true + } + + fieldsLeft := numFields - fieldsMatched + if matchAny { + switch { + // "Nothing left to match" stop condition. + case fieldsLeft == 0: + matchAny = false + i++ + // Lookahead for non-greedy matching. + case i+1 < len(subs) && p.matchIdentical(subs[i+1], f.Field(fieldsMatched).Type()): + matchAny = false + i += 2 + fieldsMatched++ + default: + fieldsMatched++ + } + continue + } + + if fieldsLeft == 0 || !p.matchIdentical(pat, f.Field(fieldsMatched).Type()) { + return false + } + i++ + fieldsMatched++ + } + + return numFields == fieldsMatched +} + +func (p *Pattern) matchIdentical(sub *pattern, typ types.Type) bool { + switch sub.op { + case opVar: + name := sub.value.(string) + if name == "_" { + return true + } + y, ok := p.typeMatches[name] + if !ok { + p.typeMatches[name] = typ + return true + } + if y == nil { + return typ == nil + } + return xtypes.Identical(typ, y) + + case opBuiltinType: + return xtypes.Identical(typ, sub.value.(types.Type)) + + case opPointer: + typ, ok := typ.(*types.Pointer) + if !ok { + return false + } + return p.matchIdentical(sub.subs[0], typ.Elem()) + + case opSlice: + typ, ok := typ.(*types.Slice) + if !ok { + return false + } + return p.matchIdentical(sub.subs[0], typ.Elem()) + + case opArray: + typ, ok := typ.(*types.Array) + if !ok { + return false + } + var wantLen int64 + switch v := sub.value.(type) { + case string: + if v == "_" { + wantLen = typ.Len() + break + } + length, ok := p.int64Matches[v] + if ok { + wantLen = length + } else { + p.int64Matches[v] = typ.Len() + wantLen = typ.Len() + } + case int64: + wantLen = v + } + return wantLen == typ.Len() && p.matchIdentical(sub.subs[0], typ.Elem()) + + case opMap: + typ, ok := typ.(*types.Map) + if !ok { + return false + } + return p.matchIdentical(sub.subs[0], typ.Key()) && + p.matchIdentical(sub.subs[1], typ.Elem()) + + case opChan: + typ, ok := typ.(*types.Chan) + if !ok { + return false + } + dir := sub.value.(types.ChanDir) + return dir == typ.Dir() && p.matchIdentical(sub.subs[0], typ.Elem()) + + case opNamed: + typ, ok := typ.(*types.Named) + if !ok { + return false + } + obj := typ.Obj() + pkg := obj.Pkg() + // pkg can be nil for builtin named types. + // There is no point in checking anything else as we never + // generate the opNamed for such types. + if pkg == nil { + return false + } + pkgPath := sub.value.([2]string)[0] + typeName := sub.value.([2]string)[1] + // obj.Pkg().Path() may be in a vendor directory. + path := strings.SplitAfter(obj.Pkg().Path(), "/vendor/") + return path[len(path)-1] == pkgPath && typeName == obj.Name() + + case opFunc: + typ, ok := typ.(*types.Signature) + if !ok { + return false + } + numParams := sub.value.(int) + params := sub.subs[:numParams] + results := sub.subs[numParams:] + if typ.Params().Len() != len(params) { + return false + } + if typ.Results().Len() != len(results) { + return false + } + for i := 0; i < typ.Params().Len(); i++ { + if !p.matchIdentical(params[i], typ.Params().At(i).Type()) { + return false + } + } + for i := 0; i < typ.Results().Len(); i++ { + if !p.matchIdentical(results[i], typ.Results().At(i).Type()) { + return false + } + } + return true + + case opStructNoSeq: + typ, ok := typ.(*types.Struct) + if !ok { + return false + } + if typ.NumFields() != len(sub.subs) { + return false + } + for i, member := range sub.subs { + if !p.matchIdentical(member, typ.Field(i).Type()) { + return false + } + } + return true + + case opStruct: + typ, ok := typ.(*types.Struct) + if !ok { + return false + } + if !p.matchIdenticalFielder(sub.subs, typ) { + return false + } + return true + + default: + return false + } +} + +type fielder interface { + Field(i int) *types.Var + NumFields() int +} diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go new file mode 100644 index 000000000..de3bb04c3 --- /dev/null +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go @@ -0,0 +1,251 @@ +package ruleguard + +import ( + "go/ast" + "go/constant" + "go/parser" + "go/token" + "go/types" + "regexp/syntax" + "strconv" + "strings" +) + +func regexpHasCaptureGroups(pattern string) bool { + // regexp.Compile() uses syntax.Perl flags, so + // we use the same flags here. + re, err := syntax.Parse(pattern, syntax.Perl) + if err != nil { + return true // true is more conservative than false + } + + found := false + + var walkRegexp func(*syntax.Regexp) + walkRegexp = func(re *syntax.Regexp) { + if found { + return + } + // OpCapture handles both named and unnamed capture groups. + if re.Op == syntax.OpCapture { + found = true + return + } + for _, sub := range re.Sub { + walkRegexp(sub) + } + } + walkRegexp(re) + + return found +} + +func findDependency(pkg *types.Package, path string) *types.Package { + if pkg.Path() == path { + return pkg + } + // It looks like indirect dependencies are always incomplete? + // If it's true, then we don't have to recurse here. + for _, imported := range pkg.Imports() { + if dep := findDependency(imported, path); dep != nil && dep.Complete() { + return dep + } + } + return nil +} + +var typeByName = map[string]types.Type{ + // Predeclared types. + `error`: types.Universe.Lookup("error").Type(), + `bool`: types.Typ[types.Bool], + `int`: types.Typ[types.Int], + `int8`: types.Typ[types.Int8], + `int16`: types.Typ[types.Int16], + `int32`: types.Typ[types.Int32], + `int64`: types.Typ[types.Int64], + `uint`: types.Typ[types.Uint], + `uint8`: types.Typ[types.Uint8], + `uint16`: types.Typ[types.Uint16], + `uint32`: types.Typ[types.Uint32], + `uint64`: types.Typ[types.Uint64], + `uintptr`: types.Typ[types.Uintptr], + `string`: types.Typ[types.String], + `float32`: types.Typ[types.Float32], + `float64`: types.Typ[types.Float64], + `complex64`: types.Typ[types.Complex64], + `complex128`: types.Typ[types.Complex128], + + // Predeclared aliases (provided for convenience). + `byte`: types.Typ[types.Uint8], + `rune`: types.Typ[types.Int32], +} + +func typeFromString(s string) (types.Type, error) { + s = strings.ReplaceAll(s, "?", "__any") + + n, err := parser.ParseExpr(s) + if err != nil { + return nil, err + } + return typeFromNode(n), nil +} + +func typeFromNode(e ast.Expr) types.Type { + switch e := e.(type) { + case *ast.Ident: + typ, ok := typeByName[e.Name] + if ok { + return typ + } + + case *ast.ArrayType: + elem := typeFromNode(e.Elt) + if elem == nil { + return nil + } + if e.Len == nil { + return types.NewSlice(elem) + } + lit, ok := e.Len.(*ast.BasicLit) + if !ok || lit.Kind != token.INT { + return nil + } + length, err := strconv.Atoi(lit.Value) + if err != nil { + return nil + } + return types.NewArray(elem, int64(length)) + + case *ast.MapType: + keyType := typeFromNode(e.Key) + if keyType == nil { + return nil + } + valType := typeFromNode(e.Value) + if valType == nil { + return nil + } + return types.NewMap(keyType, valType) + + case *ast.StarExpr: + typ := typeFromNode(e.X) + if typ != nil { + return types.NewPointer(typ) + } + + case *ast.ParenExpr: + return typeFromNode(e.X) + + case *ast.InterfaceType: + if len(e.Methods.List) == 0 { + return types.NewInterfaceType(nil, nil) + } + } + + return nil +} + +func intValueOf(info *types.Info, expr ast.Expr) constant.Value { + tv := info.Types[expr] + if tv.Value == nil { + return nil + } + if tv.Value.Kind() != constant.Int { + return nil + } + return tv.Value +} + +// isPure reports whether expr is a softly safe expression and contains +// no significant side-effects. As opposed to strictly safe expressions, +// soft safe expressions permit some forms of side-effects, like +// panic possibility during indexing or nil pointer dereference. +// +// Uses types info to determine type conversion expressions that +// are the only permitted kinds of call expressions. +// Note that is does not check whether called function really +// has any side effects. The analysis is very conservative. +func isPure(info *types.Info, expr ast.Expr) bool { + // This list switch is not comprehensive and uses + // whitelist to be on the conservative side. + // Can be extended as needed. + + switch expr := expr.(type) { + case *ast.StarExpr: + return isPure(info, expr.X) + case *ast.BinaryExpr: + return isPure(info, expr.X) && + isPure(info, expr.Y) + case *ast.UnaryExpr: + return expr.Op != token.ARROW && + isPure(info, expr.X) + case *ast.BasicLit, *ast.Ident: + return true + case *ast.IndexExpr: + return isPure(info, expr.X) && + isPure(info, expr.Index) + case *ast.SelectorExpr: + return isPure(info, expr.X) + case *ast.ParenExpr: + return isPure(info, expr.X) + case *ast.CompositeLit: + return isPureList(info, expr.Elts) + case *ast.CallExpr: + return isTypeExpr(info, expr.Fun) && isPureList(info, expr.Args) + + default: + return false + } +} + +// isPureList reports whether every expr in list is safe. +// +// See isPure. +func isPureList(info *types.Info, list []ast.Expr) bool { + for _, expr := range list { + if !isPure(info, expr) { + return false + } + } + return true +} + +func isAddressable(info *types.Info, expr ast.Expr) bool { + tv, ok := info.Types[expr] + return ok && tv.Addressable() +} + +func isConstant(info *types.Info, expr ast.Expr) bool { + tv, ok := info.Types[expr] + return ok && tv.Value != nil +} + +// isTypeExpr reports whether x represents a type expression. +// +// Type expression does not evaluate to any run time value, +// but rather describes a type that is used inside Go expression. +// +// For example, (*T)(v) is a CallExpr that "calls" (*T). +// (*T) is a type expression that tells Go compiler type v should be converted to. +func isTypeExpr(info *types.Info, x ast.Expr) bool { + switch x := x.(type) { + case *ast.StarExpr: + return isTypeExpr(info, x.X) + case *ast.ParenExpr: + return isTypeExpr(info, x.X) + case *ast.SelectorExpr: + return isTypeExpr(info, x.Sel) + + case *ast.Ident: + // Identifier may be a type expression if object + // it reffers to is a type name. + _, ok := info.ObjectOf(x).(*types.TypeName) + return ok + + case *ast.FuncType, *ast.StructType, *ast.InterfaceType, *ast.ArrayType, *ast.MapType, *ast.ChanType: + return true + + default: + return false + } +} diff --git a/vendor/github.com/quasilyte/regex/syntax/LICENSE b/vendor/github.com/quasilyte/regex/syntax/LICENSE new file mode 100644 index 000000000..f0c81282b --- /dev/null +++ b/vendor/github.com/quasilyte/regex/syntax/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Iskander (Alex) Sharipov / quasilyte + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/quasilyte/regex/syntax/README.md b/vendor/github.com/quasilyte/regex/syntax/README.md new file mode 100644 index 000000000..13064ec39 --- /dev/null +++ b/vendor/github.com/quasilyte/regex/syntax/README.md @@ -0,0 +1,26 @@ +# Package `regex/syntax` + +Package `syntax` provides regular expressions parser as well as AST definitions. + +## Rationale + +There are several problems with the stdlib [regexp/syntax](https://golang.org/pkg/regexp/syntax/) package: + +1. It does several transformations during the parsing that make it + hard to do any kind of syntax analysis afterward. + +2. The AST used there is optimized for the compilation and + execution inside the [regexp](https://golang.org/pkg/regexp) package. + It's somewhat complicated, especially in a way character ranges are encoded. + +3. It only supports [re2](https://github.com/google/re2/wiki/Syntax) syntax. + This parser recognizes most PCRE operations. + +4. It's easier to extend this package than something from the standard library. + +This package does almost no assumptions about how generated AST is going to be used +so it preserves as much syntax information as possible. + +It's easy to write another intermediate representation on top of it. The main +function of this package is to convert a textual regexp pattern into a more +structured form that can be processed more easily. diff --git a/vendor/github.com/quasilyte/regex/syntax/ast.go b/vendor/github.com/quasilyte/regex/syntax/ast.go new file mode 100644 index 000000000..44b7b61bb --- /dev/null +++ b/vendor/github.com/quasilyte/regex/syntax/ast.go @@ -0,0 +1,147 @@ +package syntax + +import ( + "fmt" + "strings" +) + +type Regexp struct { + Pattern string + Expr Expr +} + +type RegexpPCRE struct { + Pattern string + Expr Expr + + Source string + Modifiers string + Delim [2]byte +} + +func (re *RegexpPCRE) HasModifier(mod byte) bool { + return strings.IndexByte(re.Modifiers, mod) >= 0 +} + +type Expr struct { + // The operations that this expression performs. See `operation.go`. + Op Operation + + Form Form + + _ [2]byte // Reserved + + // Pos describes a source location inside regexp pattern. + Pos Position + + // Args is a list of sub-expressions of this expression. + // + // See Operation constants documentation to learn how to + // interpret the particular expression args. + Args []Expr + + // Value holds expression textual value. + // + // Usually, that value is identical to src[Begin():End()], + // but this is not true for programmatically generated objects. + Value string +} + +// Begin returns expression leftmost offset. +func (e Expr) Begin() uint16 { return e.Pos.Begin } + +// End returns expression rightmost offset. +func (e Expr) End() uint16 { return e.Pos.End } + +// LastArg returns expression last argument. +// +// Should not be called on expressions that may have 0 arguments. +func (e Expr) LastArg() Expr { + return e.Args[len(e.Args)-1] +} + +type Operation byte + +type Form byte + +func FormatSyntax(re *Regexp) string { + return formatExprSyntax(re, re.Expr) +} + +func formatExprSyntax(re *Regexp, e Expr) string { + switch e.Op { + case OpChar, OpLiteral: + switch e.Value { + case "{": + return "'{'" + case "}": + return "'}'" + default: + return e.Value + } + case OpString, OpEscapeChar, OpEscapeMeta, OpEscapeOctal, OpEscapeUni, OpEscapeHex, OpPosixClass: + return e.Value + case OpRepeat: + return fmt.Sprintf("(repeat %s %s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value) + case OpCaret: + return "^" + case OpDollar: + return "$" + case OpDot: + return "." + case OpQuote: + return fmt.Sprintf("(q %s)", e.Value) + case OpCharRange: + return fmt.Sprintf("%s-%s", formatExprSyntax(re, e.Args[0]), formatExprSyntax(re, e.Args[1])) + case OpCharClass: + return fmt.Sprintf("[%s]", formatArgsSyntax(re, e.Args)) + case OpNegCharClass: + return fmt.Sprintf("[^%s]", formatArgsSyntax(re, e.Args)) + case OpConcat: + return fmt.Sprintf("{%s}", formatArgsSyntax(re, e.Args)) + case OpAlt: + return fmt.Sprintf("(or %s)", formatArgsSyntax(re, e.Args)) + case OpCapture: + return fmt.Sprintf("(capture %s)", formatExprSyntax(re, e.Args[0])) + case OpNamedCapture: + return fmt.Sprintf("(capture %s %s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value) + case OpGroup: + return fmt.Sprintf("(group %s)", formatExprSyntax(re, e.Args[0])) + case OpAtomicGroup: + return fmt.Sprintf("(atomic %s)", formatExprSyntax(re, e.Args[0])) + case OpGroupWithFlags: + return fmt.Sprintf("(group %s ?%s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value) + case OpFlagOnlyGroup: + return fmt.Sprintf("(flags ?%s)", formatExprSyntax(re, e.Args[0])) + case OpPositiveLookahead: + return fmt.Sprintf("(?= %s)", formatExprSyntax(re, e.Args[0])) + case OpNegativeLookahead: + return fmt.Sprintf("(?! %s)", formatExprSyntax(re, e.Args[0])) + case OpPositiveLookbehind: + return fmt.Sprintf("(?<= %s)", formatExprSyntax(re, e.Args[0])) + case OpNegativeLookbehind: + return fmt.Sprintf("(?", e.Op) + } +} + +func formatArgsSyntax(re *Regexp, args []Expr) string { + parts := make([]string, len(args)) + for i, e := range args { + parts[i] = formatExprSyntax(re, e) + } + return strings.Join(parts, " ") +} diff --git a/vendor/github.com/quasilyte/regex/syntax/errors.go b/vendor/github.com/quasilyte/regex/syntax/errors.go new file mode 100644 index 000000000..cfafc1d0e --- /dev/null +++ b/vendor/github.com/quasilyte/regex/syntax/errors.go @@ -0,0 +1,27 @@ +package syntax + +import ( + "fmt" +) + +type ParseError struct { + Pos Position + Message string +} + +func (e ParseError) Error() string { return e.Message } + +func throwfPos(pos Position, format string, args ...interface{}) { + panic(ParseError{ + Pos: pos, + Message: fmt.Sprintf(format, args...), + }) +} + +func throwErrorf(posBegin, posEnd int, format string, args ...interface{}) { + pos := Position{ + Begin: uint16(posBegin), + End: uint16(posEnd), + } + throwfPos(pos, format, args...) +} diff --git a/vendor/github.com/quasilyte/regex/syntax/go.mod b/vendor/github.com/quasilyte/regex/syntax/go.mod new file mode 100644 index 000000000..2a4e1f33b --- /dev/null +++ b/vendor/github.com/quasilyte/regex/syntax/go.mod @@ -0,0 +1,3 @@ +module github.com/quasilyte/regex/syntax + +go 1.14 diff --git a/vendor/github.com/quasilyte/regex/syntax/lexer.go b/vendor/github.com/quasilyte/regex/syntax/lexer.go new file mode 100644 index 000000000..e92b038c2 --- /dev/null +++ b/vendor/github.com/quasilyte/regex/syntax/lexer.go @@ -0,0 +1,455 @@ +package syntax + +import ( + "strings" + "unicode" + "unicode/utf8" +) + +type token struct { + kind tokenKind + pos Position +} + +func (tok token) String() string { + return tok.kind.String() +} + +type tokenKind byte + +//go:generate stringer -type=tokenKind -trimprefix=tok -linecomment=true +const ( + tokNone tokenKind = iota + + tokChar + tokGroupFlags + tokPosixClass + tokConcat + tokRepeat + tokEscapeChar + tokEscapeMeta + tokEscapeOctal + tokEscapeUni + tokEscapeUniFull + tokEscapeHex + tokEscapeHexFull + tokComment + + tokQ // \Q + tokMinus // - + tokLbracket // [ + tokLbracketCaret // [^ + tokRbracket // ] + tokDollar // $ + tokCaret // ^ + tokQuestion // ? + tokDot // . + tokPlus // + + tokStar // * + tokPipe // | + tokLparen // ( + tokLparenName // (?P + tokLparenNameAngle // (? + tokLparenNameQuote // (?'name' + tokLparenFlags // (?flags + tokLparenAtomic // (?> + tokLparenPositiveLookahead // (?= + tokLparenPositiveLookbehind // (?<= + tokLparenNegativeLookahead // (?! + tokLparenNegativeLookbehind // (? unicode.MaxASCII { + _, size := utf8.DecodeRuneInString(l.input[l.pos:]) + l.pushTok(tokChar, size) + l.maybeInsertConcat() + continue + } + switch ch { + case '\\': + l.scanEscape(false) + case '.': + l.pushTok(tokDot, 1) + case '+': + l.pushTok(tokPlus, 1) + case '*': + l.pushTok(tokStar, 1) + case '^': + l.pushTok(tokCaret, 1) + case '$': + l.pushTok(tokDollar, 1) + case '?': + l.pushTok(tokQuestion, 1) + case ')': + l.pushTok(tokRparen, 1) + case '|': + l.pushTok(tokPipe, 1) + case '[': + if l.byteAt(l.pos+1) == '^' { + l.pushTok(tokLbracketCaret, 2) + } else { + l.pushTok(tokLbracket, 1) + } + l.scanCharClass() + case '(': + if l.byteAt(l.pos+1) == '?' { + switch { + case l.byteAt(l.pos+2) == '>': + l.pushTok(tokLparenAtomic, len("(?>")) + case l.byteAt(l.pos+2) == '=': + l.pushTok(tokLparenPositiveLookahead, len("(?=")) + case l.byteAt(l.pos+2) == '!': + l.pushTok(tokLparenNegativeLookahead, len("(?!")) + case l.byteAt(l.pos+2) == '<' && l.byteAt(l.pos+3) == '=': + l.pushTok(tokLparenPositiveLookbehind, len("(?<=")) + case l.byteAt(l.pos+2) == '<' && l.byteAt(l.pos+3) == '!': + l.pushTok(tokLparenNegativeLookbehind, len("(?= 0 { + l.pushTok(tokRepeat, len("{")+j) + } else { + l.pushTok(tokChar, 1) + } + default: + l.pushTok(tokChar, 1) + } + l.maybeInsertConcat() + } +} + +func (l *lexer) scanCharClass() { + l.maybeInsertConcat() + + // We need to handle first `]` in a special way. See #3. + if l.byteAt(l.pos) == ']' { + l.pushTok(tokChar, 1) + } + + for l.pos < len(l.input) { + ch := l.input[l.pos] + if ch > unicode.MaxASCII { + _, size := utf8.DecodeRuneInString(l.input[l.pos:]) + l.pushTok(tokChar, size) + continue + } + switch ch { + case '\\': + l.scanEscape(true) + case '[': + isPosixClass := false + if l.byteAt(l.pos+1) == ':' { + j := l.stringIndex(l.pos+2, ":]") + if j >= 0 { + isPosixClass = true + l.pushTok(tokPosixClass, j+len("[::]")) + } + } + if !isPosixClass { + l.pushTok(tokChar, 1) + } + case '-': + l.pushTok(tokMinus, 1) + case ']': + l.pushTok(tokRbracket, 1) + return // Stop scanning in the char context + default: + l.pushTok(tokChar, 1) + } + } +} + +func (l *lexer) scanEscape(insideCharClass bool) { + s := l.input + if l.pos+1 >= len(s) { + throwErrorf(l.pos, l.pos+1, `unexpected end of pattern: trailing '\'`) + } + switch { + case s[l.pos+1] == 'p' || s[l.pos+1] == 'P': + if l.pos+2 >= len(s) { + throwErrorf(l.pos, l.pos+2, "unexpected end of pattern: expected uni-class-short or '{'") + } + if s[l.pos+2] == '{' { + j := strings.IndexByte(s[l.pos+2:], '}') + if j < 0 { + throwErrorf(l.pos, l.pos+2, "can't find closing '}'") + } + l.pushTok(tokEscapeUniFull, len(`\p{`)+j) + } else { + l.pushTok(tokEscapeUni, len(`\pL`)) + } + case s[l.pos+1] == 'x': + if l.pos+2 >= len(s) { + throwErrorf(l.pos, l.pos+2, "unexpected end of pattern: expected hex-digit or '{'") + } + if s[l.pos+2] == '{' { + j := strings.IndexByte(s[l.pos+2:], '}') + if j < 0 { + throwErrorf(l.pos, l.pos+2, "can't find closing '}'") + } + l.pushTok(tokEscapeHexFull, len(`\x{`)+j) + } else { + if isHexDigit(l.byteAt(l.pos + 3)) { + l.pushTok(tokEscapeHex, len(`\xFF`)) + } else { + l.pushTok(tokEscapeHex, len(`\xF`)) + } + } + case isOctalDigit(s[l.pos+1]): + digits := 1 + if isOctalDigit(l.byteAt(l.pos + 2)) { + if isOctalDigit(l.byteAt(l.pos + 3)) { + digits = 3 + } else { + digits = 2 + } + } + l.pushTok(tokEscapeOctal, len(`\`)+digits) + case s[l.pos+1] == 'Q': + size := len(s) - l.pos // Until the pattern ends + j := l.stringIndex(l.pos+2, `\E`) + if j >= 0 { + size = j + len(`\Q\E`) + } + l.pushTok(tokQ, size) + + default: + ch := l.byteAt(l.pos + 1) + if ch > unicode.MaxASCII { + _, size := utf8.DecodeRuneInString(l.input[l.pos+1:]) + l.pushTok(tokEscapeChar, len(`\`)+size) + return + } + kind := tokEscapeChar + if insideCharClass { + if charClassMetachar[ch] { + kind = tokEscapeMeta + } + } else { + if reMetachar[ch] { + kind = tokEscapeMeta + } + } + l.pushTok(kind, 2) + } +} + +func (l *lexer) maybeInsertConcat() { + if l.isConcatPos() { + last := len(l.tokens) - 1 + tok := l.tokens[last] + l.tokens[last].kind = tokConcat + l.tokens = append(l.tokens, tok) + } +} + +func (l *lexer) Init(s string) { + l.pos = 0 + l.tokens = l.tokens[:0] + l.input = s + + l.scan() + + l.pos = 0 +} + +func (l *lexer) tryScanGroupName(pos int) bool { + tok := tokLparenName + endCh := byte('>') + offset := 1 + switch l.byteAt(pos) { + case '\'': + endCh = '\'' + tok = tokLparenNameQuote + case '<': + tok = tokLparenNameAngle + case 'P': + offset = 2 + default: + return false + } + if pos+offset >= len(l.input) { + return false + } + end := strings.IndexByte(l.input[pos+offset:], endCh) + if end < 0 { + return false + } + l.pushTok(tok, len("(?")+offset+end+1) + return true +} + +func (l *lexer) tryScanGroupFlags(pos int) bool { + colonPos := strings.IndexByte(l.input[pos:], ':') + parenPos := strings.IndexByte(l.input[pos:], ')') + if parenPos < 0 { + return false + } + end := parenPos + if colonPos >= 0 && colonPos < parenPos { + end = colonPos + len(":") + } + l.pushTok(tokLparenFlags, len("(?")+end) + return true +} + +func (l *lexer) tryScanComment(pos int) bool { + if l.byteAt(pos) != '#' { + return false + } + parenPos := strings.IndexByte(l.input[pos:], ')') + if parenPos < 0 { + return false + } + l.pushTok(tokComment, len("(?")+parenPos+len(")")) + return true +} + +func (l *lexer) repeatWidth(pos int) int { + j := pos + for isDigit(l.byteAt(j)) { + j++ + } + if j == pos { + return -1 + } + if l.byteAt(j) == '}' { + return (j + len("}")) - pos // {min} + } + if l.byteAt(j) != ',' { + return -1 + } + j += len(",") + for isDigit(l.byteAt(j)) { + j++ + } + if l.byteAt(j) == '}' { + return (j + len("}")) - pos // {min,} or {min,max} + } + return -1 +} + +func (l *lexer) stringIndex(offset int, s string) int { + if offset < len(l.input) { + return strings.Index(l.input[offset:], s) + } + return -1 +} + +func (l *lexer) byteAt(pos int) byte { + if pos >= 0 && pos < len(l.input) { + return l.input[pos] + } + return 0 +} + +func (l *lexer) pushTok(kind tokenKind, size int) { + l.tokens = append(l.tokens, token{ + kind: kind, + pos: Position{Begin: uint16(l.pos), End: uint16(l.pos + size)}, + }) + l.pos += size +} + +func (l *lexer) isConcatPos() bool { + if len(l.tokens) < 2 { + return false + } + x := l.tokens[len(l.tokens)-2].kind + if concatTable[x]&concatX != 0 { + return false + } + y := l.tokens[len(l.tokens)-1].kind + return concatTable[y]&concatY == 0 +} + +const ( + concatX byte = 1 << iota + concatY +) + +var concatTable = [256]byte{ + tokPipe: concatX | concatY, + + tokLparen: concatX, + tokLparenFlags: concatX, + tokLparenName: concatX, + tokLparenNameAngle: concatX, + tokLparenNameQuote: concatX, + tokLparenAtomic: concatX, + tokLbracket: concatX, + tokLbracketCaret: concatX, + tokLparenPositiveLookahead: concatX, + tokLparenPositiveLookbehind: concatX, + tokLparenNegativeLookahead: concatX, + tokLparenNegativeLookbehind: concatX, + + tokRparen: concatY, + tokRbracket: concatY, + tokPlus: concatY, + tokStar: concatY, + tokQuestion: concatY, + tokRepeat: concatY, +} diff --git a/vendor/github.com/quasilyte/regex/syntax/operation.go b/vendor/github.com/quasilyte/regex/syntax/operation.go new file mode 100644 index 000000000..284e5dc5b --- /dev/null +++ b/vendor/github.com/quasilyte/regex/syntax/operation.go @@ -0,0 +1,189 @@ +package syntax + +//go:generate stringer -type=Operation -trimprefix=Op +const ( + OpNone Operation = iota + + // OpConcat is a concatenation of ops. + // Examples: `xy` `abc\d` `` + // Args - concatenated ops + // + // As a special case, OpConcat with 0 Args is used for "empty" + // set of operations. + OpConcat + + // OpDot is a '.' wildcard. + OpDot + + // OpAlt is x|y alternation of ops. + // Examples: `a|bc` `x(.*?)|y(.*?)` + // Args - union-connected regexp branches + OpAlt + + // OpStar is a shorthand for {0,} repetition. + // Examples: `x*` + // Args[0] - repeated expression + OpStar + + // OpPlus is a shorthand for {1,} repetition. + // Examples: `x+` + // Args[0] - repeated expression + OpPlus + + // OpQuestion is a shorthand for {0,1} repetition. + // Examples: `x?` + // Args[0] - repeated expression + OpQuestion + + // OpNonGreedy makes its operand quantifier non-greedy. + // Examples: `x??` `x*?` `x+?` + // Args[0] - quantified expression + OpNonGreedy + + // OpPossessive makes its operand quantifier possessive. + // Examples: `x?+` `x*+` `x++` + // Args[0] - quantified expression + OpPossessive + + // OpCaret is ^ anchor. + OpCaret + + // OpDollar is $ anchor. + OpDollar + + // OpLiteral is a collection of consecutive chars. + // Examples: `ab` `10x` + // Args - enclosed characters (OpChar) + OpLiteral + + // OpChar is a single literal pattern character. + // Examples: `a` `6` `ф` + OpChar + + // OpString is an artificial element that is used in other expressions. + OpString + + // OpQuote is a \Q...\E enclosed literal. + // Examples: `\Q.?\E` `\Q?q[]=1` + // + // Note that closing \E is not mandatory. + OpQuote + + // OpEscapeChar is a single char escape. + // Examples: `\d` `\a` `\n` + OpEscapeChar + + // OpEscapeMeta is an escaped meta char. + // Examples: `\(` `\[` `\+` + OpEscapeMeta + + // OpEscapeOctal is an octal char code escape (up to 3 digits). + // Examples: `\123` `\12` + OpEscapeOctal + + // OpEscapeHex is a hex char code escape. + // Examples: `\x7F` `\xF7` + // FormEscapeHexFull examples: `\x{10FFFF}` `\x{F}`. + OpEscapeHex + + // OpEscapeUni is a Unicode char class escape. + // Examples: `\pS` `\pL` `\PL` + // FormEscapeUniFull examples: `\p{Greek}` `\p{Symbol}` `\p{^L}` + OpEscapeUni + + // OpCharClass is a char class enclosed in []. + // Examples: `[abc]` `[a-z0-9\]]` + // Args - char class elements (can include OpCharRange and OpPosixClass). + OpCharClass + + // OpNegCharClass is a negated char class enclosed in []. + // Examples: `[^abc]` `[^a-z0-9\]]` + // Args - char class elements (can include OpCharRange and OpPosixClass). + OpNegCharClass + + // OpCharRange is an inclusive char range inside a char class. + // Examples: `0-9` `A-Z` + // Args[0] - range lower bound (OpChar or OpEscape). + // Args[1] - range upper bound (OpChar or OpEscape). + OpCharRange + + // OpPosixClass is a named ASCII char set inside a char class. + // Examples: `[:alpha:]` `[:blank:]` + OpPosixClass + + // OpRepeat is a {min,max} repetition quantifier. + // Examples: `x{5}` `x{min,max}` `x{min,}` + // Args[0] - repeated expression + // Args[1] - repeat count (OpString) + OpRepeat + + // OpCapture is `(re)` capturing group. + // Examples: `(abc)` `(x|y)` + // Args[0] - enclosed expression + OpCapture + + // OpNamedCapture is `(?Pre)` capturing group. + // Examples: `(?Pabc)` `(?Px|y)` + // FormNamedCaptureAngle examples: `(?abc)` `(?x|y)` + // FormNamedCaptureQuote examples: `(?'foo'abc)` `(?'name'x|y)` + // Args[0] - enclosed expression (OpConcat with 0 args for empty group) + // Args[1] - group name (OpString) + OpNamedCapture + + // OpGroup is `(?:re)` non-capturing group. + // Examples: `(?:abc)` `(?:x|y)` + // Args[0] - enclosed expression (OpConcat with 0 args for empty group) + OpGroup + + // OpGroupWithFlags is `(?flags:re)` non-capturing group. + // Examples: `(?i:abc)` `(?i:x|y)` + // Args[0] - enclosed expression (OpConcat with 0 args for empty group) + // Args[1] - flags (OpString) + OpGroupWithFlags + + // OpAtomicGroup is `(?>re)` non-capturing group without backtracking. + // Examples: `(?>foo)` `(?>)` + // Args[0] - enclosed expression (OpConcat with 0 args for empty group) + OpAtomicGroup + + // OpPositiveLookahead is `(?=re)` asserts that following text matches re. + // Examples: `(?=foo)` + // Args[0] - enclosed expression (OpConcat with 0 args for empty group) + OpPositiveLookahead + + // OpNegativeLookahead is `(?!re)` asserts that following text doesn't match re. + // Examples: `(?!foo)` + // Args[0] - enclosed expression (OpConcat with 0 args for empty group) + OpNegativeLookahead + + // OpPositiveLookbehind is `(?<=re)` asserts that preceding text matches re. + // Examples: `(?<=foo)` + // Args[0] - enclosed expression (OpConcat with 0 args for empty group) + OpPositiveLookbehind + + // OpNegativeLookbehind is `(?=re)` asserts that preceding text doesn't match re. + // Examples: `(?= Operation(len(_Operation_index)-1) { + return "Operation(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Operation_name[_Operation_index[i]:_Operation_index[i+1]] +} diff --git a/vendor/github.com/quasilyte/regex/syntax/parser.go b/vendor/github.com/quasilyte/regex/syntax/parser.go new file mode 100644 index 000000000..faf0f8b21 --- /dev/null +++ b/vendor/github.com/quasilyte/regex/syntax/parser.go @@ -0,0 +1,471 @@ +package syntax + +import ( + "errors" + "fmt" + "strings" +) + +type ParserOptions struct { + // NoLiterals disables OpChar merging into OpLiteral. + NoLiterals bool +} + +func NewParser(opts *ParserOptions) *Parser { + return newParser(opts) +} + +type Parser struct { + out Regexp + lexer lexer + exprPool []Expr + + prefixParselets [256]prefixParselet + infixParselets [256]infixParselet + + charClass []Expr + allocated uint + + opts ParserOptions +} + +// ParsePCRE parses PHP-style pattern with delimiters. +// An example of such pattern is `/foo/i`. +func (p *Parser) ParsePCRE(pattern string) (*RegexpPCRE, error) { + pcre, err := p.newPCRE(pattern) + if err != nil { + return nil, err + } + if pcre.HasModifier('x') { + return nil, errors.New("'x' modifier is not supported") + } + re, err := p.Parse(pcre.Pattern) + if re != nil { + pcre.Expr = re.Expr + } + return pcre, err +} + +func (p *Parser) Parse(pattern string) (result *Regexp, err error) { + defer func() { + r := recover() + if r == nil { + return + } + if err2, ok := r.(ParseError); ok { + err = err2 + return + } + panic(r) + }() + + p.lexer.Init(pattern) + p.allocated = 0 + p.out.Pattern = pattern + if pattern == "" { + p.out.Expr = *p.newExpr(OpConcat, Position{}) + } else { + p.out.Expr = *p.parseExpr(0) + } + + if !p.opts.NoLiterals { + p.mergeChars(&p.out.Expr) + } + p.setValues(&p.out.Expr) + + return &p.out, nil +} + +type prefixParselet func(token) *Expr + +type infixParselet func(*Expr, token) *Expr + +func newParser(opts *ParserOptions) *Parser { + var p Parser + + if opts != nil { + p.opts = *opts + } + p.exprPool = make([]Expr, 256) + + for tok, op := range tok2op { + if op != 0 { + p.prefixParselets[tokenKind(tok)] = p.parsePrefixElementary + } + } + + p.prefixParselets[tokEscapeHexFull] = func(tok token) *Expr { + return p.newExprForm(OpEscapeHex, FormEscapeHexFull, tok.pos) + } + p.prefixParselets[tokEscapeUniFull] = func(tok token) *Expr { + return p.newExprForm(OpEscapeUni, FormEscapeUniFull, tok.pos) + } + + p.prefixParselets[tokLparen] = func(tok token) *Expr { return p.parseGroup(OpCapture, tok) } + p.prefixParselets[tokLparenAtomic] = func(tok token) *Expr { return p.parseGroup(OpAtomicGroup, tok) } + p.prefixParselets[tokLparenPositiveLookahead] = func(tok token) *Expr { return p.parseGroup(OpPositiveLookahead, tok) } + p.prefixParselets[tokLparenNegativeLookahead] = func(tok token) *Expr { return p.parseGroup(OpNegativeLookahead, tok) } + p.prefixParselets[tokLparenPositiveLookbehind] = func(tok token) *Expr { return p.parseGroup(OpPositiveLookbehind, tok) } + p.prefixParselets[tokLparenNegativeLookbehind] = func(tok token) *Expr { return p.parseGroup(OpNegativeLookbehind, tok) } + + p.prefixParselets[tokLparenName] = func(tok token) *Expr { + return p.parseNamedCapture(FormDefault, tok) + } + p.prefixParselets[tokLparenNameAngle] = func(tok token) *Expr { + return p.parseNamedCapture(FormNamedCaptureAngle, tok) + } + p.prefixParselets[tokLparenNameQuote] = func(tok token) *Expr { + return p.parseNamedCapture(FormNamedCaptureQuote, tok) + } + + p.prefixParselets[tokLparenFlags] = p.parseGroupWithFlags + + p.prefixParselets[tokPipe] = func(tok token) *Expr { + // We need prefix pipe parselet to handle `(|x)` syntax. + right := p.parseExpr(1) + return p.newExpr(OpAlt, tok.pos, p.newEmpty(tok.pos), right) + } + p.prefixParselets[tokLbracket] = func(tok token) *Expr { + return p.parseCharClass(OpCharClass, tok) + } + p.prefixParselets[tokLbracketCaret] = func(tok token) *Expr { + return p.parseCharClass(OpNegCharClass, tok) + } + + p.infixParselets[tokRepeat] = func(left *Expr, tok token) *Expr { + repeatLit := p.newExpr(OpString, tok.pos) + return p.newExpr(OpRepeat, combinePos(left.Pos, tok.pos), left, repeatLit) + } + p.infixParselets[tokStar] = func(left *Expr, tok token) *Expr { + return p.newExpr(OpStar, combinePos(left.Pos, tok.pos), left) + } + p.infixParselets[tokConcat] = func(left *Expr, tok token) *Expr { + right := p.parseExpr(2) + if left.Op == OpConcat { + left.Args = append(left.Args, *right) + left.Pos.End = right.End() + return left + } + return p.newExpr(OpConcat, combinePos(left.Pos, right.Pos), left, right) + } + p.infixParselets[tokPipe] = p.parseAlt + p.infixParselets[tokMinus] = p.parseMinus + p.infixParselets[tokPlus] = p.parsePlus + p.infixParselets[tokQuestion] = p.parseQuestion + + return &p +} + +func (p *Parser) setValues(e *Expr) { + for i := range e.Args { + p.setValues(&e.Args[i]) + } + e.Value = p.exprValue(e) +} + +func (p *Parser) exprValue(e *Expr) string { + return p.out.Pattern[e.Begin():e.End()] +} + +func (p *Parser) mergeChars(e *Expr) { + for i := range e.Args { + p.mergeChars(&e.Args[i]) + } + if e.Op != OpConcat || len(e.Args) < 2 { + return + } + + args := e.Args[:0] + i := 0 + for i < len(e.Args) { + first := i + chars := 0 + for j := i; j < len(e.Args) && e.Args[j].Op == OpChar; j++ { + chars++ + } + if chars > 1 { + c1 := e.Args[first] + c2 := e.Args[first+chars-1] + lit := p.newExpr(OpLiteral, combinePos(c1.Pos, c2.Pos)) + for j := 0; j < chars; j++ { + lit.Args = append(lit.Args, e.Args[first+j]) + } + args = append(args, *lit) + i += chars + } else { + args = append(args, e.Args[i]) + i++ + } + } + if len(args) == 1 { + *e = args[0] // Turn OpConcat into OpLiteral + } else { + e.Args = args + } +} + +func (p *Parser) newEmpty(pos Position) *Expr { + return p.newExpr(OpConcat, pos) +} + +func (p *Parser) newExprForm(op Operation, form Form, pos Position, args ...*Expr) *Expr { + e := p.newExpr(op, pos, args...) + e.Form = form + return e +} + +func (p *Parser) newExpr(op Operation, pos Position, args ...*Expr) *Expr { + e := p.allocExpr() + *e = Expr{ + Op: op, + Pos: pos, + Args: e.Args[:0], + } + for _, arg := range args { + e.Args = append(e.Args, *arg) + } + return e +} + +func (p *Parser) allocExpr() *Expr { + i := p.allocated + if i < uint(len(p.exprPool)) { + p.allocated++ + return &p.exprPool[i] + } + return &Expr{} +} + +func (p *Parser) expect(kind tokenKind) Position { + tok := p.lexer.NextToken() + if tok.kind != kind { + throwErrorf(int(tok.pos.Begin), int(tok.pos.End), "expected '%s', found '%s'", kind, tok.kind) + } + return tok.pos +} + +func (p *Parser) parseExpr(precedence int) *Expr { + tok := p.lexer.NextToken() + prefix := p.prefixParselets[tok.kind] + if prefix == nil { + throwfPos(tok.pos, "unexpected token: %v", tok) + } + left := prefix(tok) + + for precedence < p.precedenceOf(p.lexer.Peek()) { + tok := p.lexer.NextToken() + infix := p.infixParselets[tok.kind] + left = infix(left, tok) + } + + return left +} + +func (p *Parser) parsePrefixElementary(tok token) *Expr { + return p.newExpr(tok2op[tok.kind], tok.pos) +} + +func (p *Parser) parseCharClass(op Operation, tok token) *Expr { + var endPos Position + p.charClass = p.charClass[:0] + for { + p.charClass = append(p.charClass, *p.parseExpr(0)) + next := p.lexer.Peek() + if next.kind == tokRbracket { + endPos = next.pos + p.lexer.NextToken() + break + } + if next.kind == tokNone { + throwfPos(tok.pos, "unterminated '['") + } + } + + result := p.newExpr(op, combinePos(tok.pos, endPos)) + result.Args = append(result.Args, p.charClass...) + return result +} + +func (p *Parser) parseMinus(left *Expr, tok token) *Expr { + if p.isValidCharRangeOperand(left) { + if p.lexer.Peek().kind != tokRbracket { + right := p.parseExpr(2) + return p.newExpr(OpCharRange, combinePos(left.Pos, right.Pos), left, right) + } + } + p.charClass = append(p.charClass, *left) + return p.newExpr(OpChar, tok.pos) +} + +func (p *Parser) isValidCharRangeOperand(e *Expr) bool { + switch e.Op { + case OpEscapeHex, OpEscapeOctal, OpEscapeMeta, OpChar: + return true + case OpEscapeChar: + switch p.exprValue(e) { + case `\\`, `\|`, `\*`, `\+`, `\?`, `\.`, `\[`, `\^`, `\$`, `\(`, `\)`: + return true + } + } + return false +} + +func (p *Parser) parsePlus(left *Expr, tok token) *Expr { + op := OpPlus + switch left.Op { + case OpPlus, OpStar, OpQuestion, OpRepeat: + op = OpPossessive + } + return p.newExpr(op, combinePos(left.Pos, tok.pos), left) +} + +func (p *Parser) parseQuestion(left *Expr, tok token) *Expr { + op := OpQuestion + switch left.Op { + case OpPlus, OpStar, OpQuestion, OpRepeat: + op = OpNonGreedy + } + return p.newExpr(op, combinePos(left.Pos, tok.pos), left) +} + +func (p *Parser) parseAlt(left *Expr, tok token) *Expr { + var right *Expr + switch p.lexer.Peek().kind { + case tokRparen, tokNone: + // This is needed to handle `(x|)` syntax. + right = p.newEmpty(tok.pos) + default: + right = p.parseExpr(1) + } + if left.Op == OpAlt { + left.Args = append(left.Args, *right) + left.Pos.End = right.End() + return left + } + return p.newExpr(OpAlt, combinePos(left.Pos, right.Pos), left, right) +} + +func (p *Parser) parseGroupItem(tok token) *Expr { + if p.lexer.Peek().kind == tokRparen { + // This is needed to handle `() syntax.` + return p.newEmpty(tok.pos) + } + return p.parseExpr(0) +} + +func (p *Parser) parseGroup(op Operation, tok token) *Expr { + x := p.parseGroupItem(tok) + result := p.newExpr(op, tok.pos, x) + result.Pos.End = p.expect(tokRparen).End + return result +} + +func (p *Parser) parseNamedCapture(form Form, tok token) *Expr { + prefixLen := len("(?<") + if form == FormDefault { + prefixLen = len("(?P<") + } + name := p.newExpr(OpString, Position{ + Begin: tok.pos.Begin + uint16(prefixLen), + End: tok.pos.End - uint16(len(">")), + }) + x := p.parseGroupItem(tok) + result := p.newExprForm(OpNamedCapture, form, tok.pos, x, name) + result.Pos.End = p.expect(tokRparen).End + return result +} + +func (p *Parser) parseGroupWithFlags(tok token) *Expr { + var result *Expr + val := p.out.Pattern[tok.pos.Begin+1 : tok.pos.End] + switch { + case !strings.HasSuffix(val, ":"): + flags := p.newExpr(OpString, Position{ + Begin: tok.pos.Begin + uint16(len("(?")), + End: tok.pos.End, + }) + result = p.newExpr(OpFlagOnlyGroup, tok.pos, flags) + case val == "?:": + x := p.parseGroupItem(tok) + result = p.newExpr(OpGroup, tok.pos, x) + default: + flags := p.newExpr(OpString, Position{ + Begin: tok.pos.Begin + uint16(len("(?")), + End: tok.pos.End - uint16(len(":")), + }) + x := p.parseGroupItem(tok) + result = p.newExpr(OpGroupWithFlags, tok.pos, x, flags) + } + result.Pos.End = p.expect(tokRparen).End + return result +} + +func (p *Parser) precedenceOf(tok token) int { + switch tok.kind { + case tokPipe: + return 1 + case tokConcat, tokMinus: + return 2 + case tokPlus, tokStar, tokQuestion, tokRepeat: + return 3 + default: + return 0 + } +} + +func (p *Parser) newPCRE(source string) (*RegexpPCRE, error) { + if source == "" { + return nil, errors.New("empty pattern: can't find delimiters") + } + + delim := source[0] + endDelim := delim + switch delim { + case '(': + endDelim = ')' + case '{': + endDelim = '}' + case '[': + endDelim = ']' + case '<': + endDelim = '>' + case '\\': + return nil, errors.New("'\\' is not a valid delimiter") + default: + if isSpace(delim) { + return nil, errors.New("whitespace is not a valid delimiter") + } + if isAlphanumeric(delim) { + return nil, fmt.Errorf("'%c' is not a valid delimiter", delim) + } + } + + j := strings.LastIndexByte(source, endDelim) + if j == -1 { + return nil, fmt.Errorf("can't find '%c' ending delimiter", endDelim) + } + + pcre := &RegexpPCRE{ + Pattern: source[1:j], + Source: source, + Delim: [2]byte{delim, endDelim}, + Modifiers: source[j+1:], + } + return pcre, nil +} + +var tok2op = [256]Operation{ + tokDollar: OpDollar, + tokCaret: OpCaret, + tokDot: OpDot, + tokChar: OpChar, + tokMinus: OpChar, + tokEscapeChar: OpEscapeChar, + tokEscapeMeta: OpEscapeMeta, + tokEscapeHex: OpEscapeHex, + tokEscapeOctal: OpEscapeOctal, + tokEscapeUni: OpEscapeUni, + tokPosixClass: OpPosixClass, + tokQ: OpQuote, + tokComment: OpComment, +} diff --git a/vendor/github.com/quasilyte/regex/syntax/pos.go b/vendor/github.com/quasilyte/regex/syntax/pos.go new file mode 100644 index 000000000..51bdbf87a --- /dev/null +++ b/vendor/github.com/quasilyte/regex/syntax/pos.go @@ -0,0 +1,10 @@ +package syntax + +type Position struct { + Begin uint16 + End uint16 +} + +func combinePos(begin, end Position) Position { + return Position{Begin: begin.Begin, End: end.End} +} diff --git a/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go b/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go new file mode 100644 index 000000000..8800436bc --- /dev/null +++ b/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go @@ -0,0 +1,59 @@ +// Code generated by "stringer -type=tokenKind -trimprefix=tok -linecomment=true"; DO NOT EDIT. + +package syntax + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tokNone-0] + _ = x[tokChar-1] + _ = x[tokGroupFlags-2] + _ = x[tokPosixClass-3] + _ = x[tokConcat-4] + _ = x[tokRepeat-5] + _ = x[tokEscapeChar-6] + _ = x[tokEscapeMeta-7] + _ = x[tokEscapeOctal-8] + _ = x[tokEscapeUni-9] + _ = x[tokEscapeUniFull-10] + _ = x[tokEscapeHex-11] + _ = x[tokEscapeHexFull-12] + _ = x[tokComment-13] + _ = x[tokQ-14] + _ = x[tokMinus-15] + _ = x[tokLbracket-16] + _ = x[tokLbracketCaret-17] + _ = x[tokRbracket-18] + _ = x[tokDollar-19] + _ = x[tokCaret-20] + _ = x[tokQuestion-21] + _ = x[tokDot-22] + _ = x[tokPlus-23] + _ = x[tokStar-24] + _ = x[tokPipe-25] + _ = x[tokLparen-26] + _ = x[tokLparenName-27] + _ = x[tokLparenNameAngle-28] + _ = x[tokLparenNameQuote-29] + _ = x[tokLparenFlags-30] + _ = x[tokLparenAtomic-31] + _ = x[tokLparenPositiveLookahead-32] + _ = x[tokLparenPositiveLookbehind-33] + _ = x[tokLparenNegativeLookahead-34] + _ = x[tokLparenNegativeLookbehind-35] + _ = x[tokRparen-36] +} + +const _tokenKind_name = "NoneCharGroupFlagsPosixClassConcatRepeatEscapeCharEscapeMetaEscapeOctalEscapeUniEscapeUniFullEscapeHexEscapeHexFullComment\\Q-[[^]$^?.+*|((?P(?(?'name'(?flags(?>(?=(?<=(?!(?= tokenKind(len(_tokenKind_index)-1) { + return "tokenKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tokenKind_name[_tokenKind_index[i]:_tokenKind_index[i+1]] +} diff --git a/vendor/github.com/quasilyte/regex/syntax/utils.go b/vendor/github.com/quasilyte/regex/syntax/utils.go new file mode 100644 index 000000000..934680c8b --- /dev/null +++ b/vendor/github.com/quasilyte/regex/syntax/utils.go @@ -0,0 +1,30 @@ +package syntax + +func isSpace(ch byte) bool { + switch ch { + case '\r', '\n', '\t', '\f', '\v': + return true + default: + return false + } +} + +func isAlphanumeric(ch byte) bool { + return (ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') +} + +func isDigit(ch byte) bool { + return ch >= '0' && ch <= '9' +} + +func isOctalDigit(ch byte) bool { + return ch >= '0' && ch <= '7' +} + +func isHexDigit(ch byte) bool { + return (ch >= '0' && ch <= '9') || + (ch >= 'a' && ch <= 'f') || + (ch >= 'A' && ch <= 'F') +} diff --git a/vendor/github.com/ryancurrah/gomodguard/.dockerignore b/vendor/github.com/ryancurrah/gomodguard/.dockerignore new file mode 100644 index 000000000..77738287f --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/.dockerignore @@ -0,0 +1 @@ +dist/ \ No newline at end of file diff --git a/vendor/github.com/ryancurrah/gomodguard/.gitignore b/vendor/github.com/ryancurrah/gomodguard/.gitignore new file mode 100644 index 000000000..4ebc79c5d --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/.gitignore @@ -0,0 +1,25 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +/gomodguard + +*.xml + +dist/ + +coverage.* + +.idea/ diff --git a/vendor/github.com/ryancurrah/gomodguard/.golangci.yml b/vendor/github.com/ryancurrah/gomodguard/.golangci.yml new file mode 100644 index 000000000..fc05c0e3d --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/.golangci.yml @@ -0,0 +1,132 @@ +# See https://golangci-lint.run/usage/configuration/ + +linters-settings: + revive: + # see https://github.com/mgechev/revive#available-rules for details. + ignore-generated-header: true + severity: warning + rules: + - name: atomic + # - name: bare-return + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: confusing-naming + - name: confusing-results + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: deep-exit + - name: defer + - name: dot-imports + - name: duplicated-imports + - name: early-return + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + # - name: flag-parameter + - name: get-return + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: redefines-builtin-id + - name: string-of-int + - name: struct-tag + - name: superfluous-else + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unexported-return + # - name: unhandled-error + - name: unnecessary-stmt + - name: unreachable-code + - name: unused-parameter + # - name: unused-receiver + - name: var-declaration + - name: var-naming + - name: waitgroup-by-value + +linters: + disable-all: true + enable: + - asciicheck + - bodyclose + # - cyclop + - deadcode + - dogsled + - dupl + - durationcheck + - errcheck + - errorlint + - exhaustive + # - exhaustivestruct + - exportloopref + # - forbidigo + - forcetypeassert + - funlen + # - gci + # - gochecknoglobals + - gochecknoinits + - gocognit + - goconst + - gocritic + - gocyclo + - godot + - godox + - goerr113 + - goimports + - gomnd + - gomoddirectives + - gomodguard + - goprintffuncname + - gosec + - gosimple + - govet + # - ifshort + - importas + - ineffassign + - lll + - makezero + - misspell + - nakedret + - nestif + - nilerr + # - nlreturn + - noctx + - nolintlint + # - paralleltest + - prealloc + - predeclared + # - promlinter + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - structcheck + - stylecheck + # - tagliatelle + - testpackage + - thelper + - tparallel + - typecheck + - unconvert + - unparam + - unused + - varcheck + # - wastedassign + - whitespace + # - wrapcheck + - wsl diff --git a/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml b/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml new file mode 100644 index 000000000..3daecfd79 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml @@ -0,0 +1,29 @@ +builds: +- main: ./cmd/gomodguard/main.go + env: + - CGO_ENABLED=0 +archives: +- replacements: + darwin: Darwin + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 +checksum: + name_template: 'checksums.txt' +dockers: +- goos: linux + goarch: amd64 + image_templates: + - "ryancurrah/gomodguard:latest" + - "ryancurrah/gomodguard:{{.Tag}}" + skip_push: false + dockerfile: Dockerfile.goreleaser + build_flag_templates: + - "--pull" + - "--build-arg=gomodguard_VERSION={{.Version}}" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.name={{.ProjectName}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.source={{.GitURL}}" diff --git a/vendor/github.com/ryancurrah/gomodguard/Dockerfile b/vendor/github.com/ryancurrah/gomodguard/Dockerfile new file mode 100644 index 000000000..719a0ebdb --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/Dockerfile @@ -0,0 +1,17 @@ +ARG GO_VERSION=1.14.2 +ARG ALPINE_VERSION=3.11 +ARG gomodguard_VERSION= + +# ---- Build container +FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS builder +WORKDIR /gomodguard +COPY . . +RUN apk add --no-cache git +RUN go build -o gomodguard cmd/gomodguard/main.go + +# ---- App container +FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} +WORKDIR / +RUN apk --no-cache add ca-certificates +COPY --from=builder gomodguard/gomodguard / +ENTRYPOINT ./gomodguard diff --git a/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser b/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser new file mode 100644 index 000000000..57a042a67 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser @@ -0,0 +1,10 @@ +ARG GO_VERSION=1.14.2 +ARG ALPINE_VERSION=3.11 +ARG gomodguard_VERSION= + +# ---- App container +FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} +WORKDIR / +RUN apk --no-cache add ca-certificates +COPY gomodguard /gomodguard +ENTRYPOINT ./gomodguard diff --git a/vendor/github.com/ryancurrah/gomodguard/LICENSE b/vendor/github.com/ryancurrah/gomodguard/LICENSE new file mode 100644 index 000000000..acd8a81e1 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Ryan Currah + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ryancurrah/gomodguard/Makefile b/vendor/github.com/ryancurrah/gomodguard/Makefile new file mode 100644 index 000000000..a1962834c --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/Makefile @@ -0,0 +1,42 @@ +current_dir = $(shell pwd) + +.PHONY: lint +lint: + golangci-lint run ./... + +.PHONY: build +build: + go build -o gomodguard cmd/gomodguard/main.go + +.PHONY: run +run: build + ./gomodguard + +.PHONY: test +test: + go test -v -coverprofile coverage.out + +.PHONY: cover +cover: + gocover-cobertura < coverage.out > coverage.xml + +.PHONY: dockerrun +dockerrun: dockerbuild + docker run -v "${current_dir}/.gomodguard.yaml:/.gomodguard.yaml" ryancurrah/gomodguard:latest + +.PHONY: release +release: + goreleaser --rm-dist + +.PHONY: clean +clean: + rm -rf dist/ + rm -f gomodguard coverage.xml coverage.out + +.PHONY: install-tools-mac +install-tools-mac: + brew install goreleaser/tap/goreleaser + +.PHONY: install-go-tools +install-go-tools: + go get github.com/t-yuki/gocover-cobertura diff --git a/vendor/github.com/ryancurrah/gomodguard/README.md b/vendor/github.com/ryancurrah/gomodguard/README.md new file mode 100644 index 000000000..8e2e41688 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/README.md @@ -0,0 +1,131 @@ +# gomodguard +[![License](https://img.shields.io/github/license/ryancurrah/gomodguard?style=flat-square)](/LICENSE) +[![Codecov](https://img.shields.io/codecov/c/gh/ryancurrah/gomodguard?style=flat-square)](https://codecov.io/gh/ryancurrah/gomodguard) +[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/ryancurrah/gomodguard/Go?logo=Go&style=flat-square)](https://github.com/ryancurrah/gomodguard/actions?query=workflow%3AGo) +[![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/ryancurrah/gomodguard?style=flat-square)](https://github.com/ryancurrah/gomodguard/releases/latest) +[![Docker](https://img.shields.io/docker/pulls/ryancurrah/gomodguard?style=flat-square)](https://hub.docker.com/r/ryancurrah/gomodguard) +[![Github Releases Stats of golangci-lint](https://img.shields.io/github/downloads/ryancurrah/gomodguard/total.svg?logo=github&style=flat-square)](https://somsubhra.com/github-release-stats/?username=ryancurrah&repository=gomodguard) + + + +Allow and block list linter for direct Go module dependencies. This is useful for organizations where they want to standardize on the modules used and be able to recommend alternative modules. + +## Description + +Allowed and blocked modules are defined in a `./.gomodguard.yaml` or `~/.gomodguard.yaml` file. + +Modules can be allowed by module or domain name. When allowed modules are specified any modules not in the allowed configuration are blocked. + +If no allowed modules or domains are specified then all modules are allowed except for blocked ones. + +The linter looks for blocked modules in `go.mod` and searches for imported packages where the imported packages module is blocked. Indirect modules are not considered. + +Alternative modules can be optionally recommended in the blocked modules list. + +If the linted module imports a blocked module but the linted module is in the recommended modules list the blocked module is ignored. Usually, this means the linted module wraps that blocked module for use by other modules, therefore the import of the blocked module should not be blocked. + +Version constraints can be specified for modules as well which lets you block new or old versions of modules or specific versions. + +Results are printed to `stdout`. + +Logging statements are printed to `stderr`. + +Results can be exported to different report formats. Which can be imported into CI tools. See the help section for more information. + +## Configuration + +```yaml +allowed: + modules: # List of allowed modules + - gopkg.in/yaml.v2 + - github.com/go-xmlfmt/xmlfmt + - github.com/phayes/checkstyle + - github.com/mitchellh/go-homedir + domains: # List of allowed module domains + - golang.org + +blocked: + modules: # List of blocked modules + - github.com/uudashr/go-module: # Blocked module + recommendations: # Recommended modules that should be used instead (Optional) + - golang.org/x/mod + reason: "`mod` is the official go.mod parser library." # Reason why the recommended module should be used (Optional) + versions: # List of blocked module version constraints. + - github.com/mitchellh/go-homedir: # Blocked module with version constraint. + version: "<= 1.1.0" # Version constraint, see https://github.com/Masterminds/semver#basic-comparisons. + reason: "testing if blocked version constraint works." # Reason why the version constraint exists. +``` + +## Usage + +``` +╰─ ./gomodguard -h +Usage: gomodguard [files...] +Also supports package syntax but will use it in relative path, i.e. ./pkg/... +Flags: + -f string + Report results to the specified file. A report type must also be specified + -file string + + -h Show this help text + -help + + -i int + Exit code when issues were found (default 2) + -issues-exit-code int + (default 2) + + -n Don't lint test files + -no-test + + -r string + Report results to one of the following formats: checkstyle. A report file destination must also be specified + -report string +``` + +## Example + +``` +╰─ ./gomodguard -r checkstyle -f gomodguard-checkstyle.xml ./... + +info: allowed modules, [gopkg.in/yaml.v2 github.com/go-xmlfmt/xmlfmt github.com/phayes/checkstyle github.com/mitchellh/go-homedir] +info: allowed module domains, [golang.org] +info: blocked modules, [github.com/uudashr/go-module] +info: found `2` blocked modules in the go.mod file, [github.com/gofrs/uuid github.com/uudashr/go-module] +blocked_example.go:6: import of package `github.com/gofrs/uuid` is blocked because the module is not in the allowed modules list. +blocked_example.go:7: import of package `github.com/uudashr/go-module` is blocked because the module is in the blocked modules list. `golang.org/x/mod` is a recommended module. `mod` is the official go.mod parser library. +``` + +Resulting checkstyle file + +``` +╰─ cat gomodguard-checkstyle.xml + + + + + + + + + + +``` + +## Install + +``` +go get -u github.com/ryancurrah/gomodguard/cmd/gomodguard +``` + +## Develop + +``` +git clone https://github.com/ryancurrah/gomodguard.git && cd gomodguard + +go build -o gomodguard cmd/gomodguard/main.go +``` + +## License + +**MIT** diff --git a/vendor/github.com/ryancurrah/gomodguard/cmd.go b/vendor/github.com/ryancurrah/gomodguard/cmd.go new file mode 100644 index 000000000..a26fac890 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/cmd.go @@ -0,0 +1,247 @@ +package gomodguard + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + + "github.com/go-xmlfmt/xmlfmt" + "github.com/mitchellh/go-homedir" + "github.com/phayes/checkstyle" + "gopkg.in/yaml.v2" +) + +const ( + errFindingHomedir = "unable to find home directory, %w" + errReadingConfigFile = "could not read config file: %w" + errParsingConfigFile = "could not parse config file: %w" +) + +var ( + configFile = ".gomodguard.yaml" + logger = log.New(os.Stderr, "", 0) + errFindingConfigFile = fmt.Errorf("could not find config file") +) + +// Run the gomodguard linter. Returns the exit code to use. +//nolint:funlen +func Run() int { + var ( + args []string + help bool + noTest bool + report string + reportFile string + issuesExitCode int + cwd, _ = os.Getwd() + ) + + flag.BoolVar(&help, "h", false, "Show this help text") + flag.BoolVar(&help, "help", false, "") + flag.BoolVar(&noTest, "n", false, "Don't lint test files") + flag.BoolVar(&noTest, "no-test", false, "") + flag.StringVar(&report, "r", "", "Report results to one of the following formats: checkstyle. "+ + "A report file destination must also be specified") + flag.StringVar(&report, "report", "", "") + flag.StringVar(&reportFile, "f", "", "Report results to the specified file. A report type must also be specified") + flag.StringVar(&reportFile, "file", "", "") + flag.IntVar(&issuesExitCode, "i", 2, "Exit code when issues were found") + flag.IntVar(&issuesExitCode, "issues-exit-code", 2, "") + flag.Parse() + + report = strings.TrimSpace(strings.ToLower(report)) + + if help { + showHelp() + return 0 + } + + if report != "" && report != "checkstyle" { + logger.Fatalf("error: invalid report type '%s'", report) + } + + if report != "" && reportFile == "" { + logger.Fatalf("error: a report file must be specified when a report is enabled") + } + + if report == "" && reportFile != "" { + logger.Fatalf("error: a report type must be specified when a report file is enabled") + } + + args = flag.Args() + if len(args) == 0 { + args = []string{"./..."} + } + + config, err := GetConfig(configFile) + if err != nil { + logger.Fatalf("error: %s", err) + } + + filteredFiles := GetFilteredFiles(cwd, noTest, args) + + processor, err := NewProcessor(config) + if err != nil { + logger.Fatalf("error: %s", err) + } + + logger.Printf("info: allowed modules, %+v", config.Allowed.Modules) + logger.Printf("info: allowed module domains, %+v", config.Allowed.Domains) + logger.Printf("info: blocked modules, %+v", config.Blocked.Modules.Get()) + logger.Printf("info: blocked modules with version constraints, %+v", config.Blocked.Versions.Get()) + + results := processor.ProcessFiles(filteredFiles) + + if report == "checkstyle" { + err := WriteCheckstyle(reportFile, results) + if err != nil { + logger.Fatalf("error: %s", err) + } + } + + for _, r := range results { + fmt.Println(r.String()) + } + + if len(results) > 0 { + return issuesExitCode + } + + return 0 +} + +// GetConfig from YAML file. +func GetConfig(configFile string) (*Configuration, error) { + config := Configuration{} + + home, err := homedir.Dir() + if err != nil { + return nil, fmt.Errorf(errFindingHomedir, err) + } + + cfgFile := "" + homeDirCfgFile := filepath.Join(home, configFile) + + switch { + case fileExists(configFile): + cfgFile = configFile + case fileExists(homeDirCfgFile): + cfgFile = homeDirCfgFile + default: + return nil, fmt.Errorf("%w: %s %s", errFindingConfigFile, configFile, homeDirCfgFile) + } + + data, err := ioutil.ReadFile(cfgFile) + if err != nil { + return nil, fmt.Errorf(errReadingConfigFile, err) + } + + err = yaml.Unmarshal(data, &config) + if err != nil { + return nil, fmt.Errorf(errParsingConfigFile, err) + } + + return &config, nil +} + +// GetFilteredFiles returns files based on search string arguments and filters. +func GetFilteredFiles(cwd string, skipTests bool, args []string) []string { + var ( + foundFiles = []string{} + filteredFiles = []string{} + ) + + for _, f := range args { + if strings.HasSuffix(f, "/...") { + dir, _ := filepath.Split(f) + + foundFiles = append(foundFiles, expandGoWildcard(dir)...) + + continue + } + + if _, err := os.Stat(f); err == nil { + foundFiles = append(foundFiles, f) + } + } + + // Use relative path to print shorter names, sort out test foundFiles if chosen. + for _, f := range foundFiles { + if skipTests { + if strings.HasSuffix(f, "_test.go") { + continue + } + } + + if relativePath, err := filepath.Rel(cwd, f); err == nil { + filteredFiles = append(filteredFiles, relativePath) + + continue + } + + filteredFiles = append(filteredFiles, f) + } + + return filteredFiles +} + +// showHelp text for command line. +func showHelp() { + helpText := `Usage: gomodguard [files...] +Also supports package syntax but will use it in relative path, i.e. ./pkg/... +Flags:` + fmt.Println(helpText) + flag.PrintDefaults() +} + +// WriteCheckstyle takes the results and writes them to a checkstyle formated file. +func WriteCheckstyle(checkstyleFilePath string, results []Issue) error { + check := checkstyle.New() + + for i := range results { + file := check.EnsureFile(results[i].FileName) + file.AddError(checkstyle.NewError(results[i].LineNumber, 1, checkstyle.SeverityError, results[i].Reason, + "gomodguard")) + } + + checkstyleXML := fmt.Sprintf("\n%s", check.String()) + + err := ioutil.WriteFile(checkstyleFilePath, []byte(xmlfmt.FormatXML(checkstyleXML, "", " ")), 0644) // nolint:gosec + if err != nil { + return err + } + + return nil +} + +// fileExists returns true if the file path provided exists. +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + + return !info.IsDir() +} + +// expandGoWildcard path provided. +func expandGoWildcard(root string) []string { + foundFiles := []string{} + + _ = filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + // Only append go foundFiles. + if !strings.HasSuffix(info.Name(), ".go") { + return nil + } + + foundFiles = append(foundFiles, path) + + return nil + }) + + return foundFiles +} diff --git a/vendor/github.com/ryancurrah/gomodguard/go.mod b/vendor/github.com/ryancurrah/gomodguard/go.mod new file mode 100644 index 000000000..3a19311ba --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/go.mod @@ -0,0 +1,12 @@ +module github.com/ryancurrah/gomodguard + +go 1.16 + +require ( + github.com/Masterminds/semver v1.5.0 + github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b + github.com/mitchellh/go-homedir v1.1.0 + github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d + golang.org/x/mod v0.4.2 + gopkg.in/yaml.v2 v2.4.0 +) diff --git a/vendor/github.com/ryancurrah/gomodguard/go.sum b/vendor/github.com/ryancurrah/gomodguard/go.sum new file mode 100644 index 000000000..30447d902 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/go.sum @@ -0,0 +1,26 @@ +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/vendor/github.com/ryancurrah/gomodguard/gomodguard.go b/vendor/github.com/ryancurrah/gomodguard/gomodguard.go new file mode 100644 index 000000000..7ef462363 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/gomodguard.go @@ -0,0 +1,486 @@ +package gomodguard + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "go/parser" + "go/token" + "io/ioutil" + "os" + "os/exec" + "strings" + + "github.com/Masterminds/semver" + + "golang.org/x/mod/modfile" +) + +const ( + goModFilename = "go.mod" + errReadingGoModFile = "unable to read module file %s: %w" + errParsingGoModFile = "unable to parse module file %s: %w" +) + +var ( + blockReasonNotInAllowedList = "import of package `%s` is blocked because the module is not in the " + + "allowed modules list." + blockReasonInBlockedList = "import of package `%s` is blocked because the module is in the " + + "blocked modules list." + blockReasonHasLocalReplaceDirective = "import of package `%s` is blocked because the module has a " + + "local replace directive." +) + +// BlockedVersion has a version constraint a reason why the the module version is blocked. +type BlockedVersion struct { + Version string `yaml:"version"` + Reason string `yaml:"reason"` +} + +// IsLintedModuleVersionBlocked returns true if a version constraint is specified and the +// linted module version matches the constraint. +func (r *BlockedVersion) IsLintedModuleVersionBlocked(lintedModuleVersion string) bool { + if r.Version == "" { + return false + } + + constraint, err := semver.NewConstraint(r.Version) + if err != nil { + return false + } + + version, err := semver.NewVersion(lintedModuleVersion) + if err != nil { + return false + } + + meet := constraint.Check(version) + + return meet +} + +// Message returns the reason why the module version is blocked. +func (r *BlockedVersion) Message(lintedModuleVersion string) string { + var sb strings.Builder + + // Add version contraint to message. + _, _ = fmt.Fprintf(&sb, "version `%s` is blocked because it does not meet the version constraint `%s`.", + lintedModuleVersion, r.Version) + + if r.Reason == "" { + return sb.String() + } + + // Add reason to message. + _, _ = fmt.Fprintf(&sb, " %s.", strings.TrimRight(r.Reason, ".")) + + return sb.String() +} + +// BlockedModule has alternative modules to use and a reason why the module is blocked. +type BlockedModule struct { + Recommendations []string `yaml:"recommendations"` + Reason string `yaml:"reason"` +} + +// IsCurrentModuleARecommendation returns true if the current module is in the Recommendations list. +// +// If the current go.mod file being linted is a recommended module of a +// blocked module and it imports that blocked module, do not set as blocked. +// This could mean that the linted module is a wrapper for that blocked module. +func (r *BlockedModule) IsCurrentModuleARecommendation(currentModuleName string) bool { + if r == nil { + return false + } + + for n := range r.Recommendations { + if strings.TrimSpace(currentModuleName) == strings.TrimSpace(r.Recommendations[n]) { + return true + } + } + + return false +} + +// Message returns the reason why the module is blocked and a list of recommended modules if provided. +func (r *BlockedModule) Message() string { + var sb strings.Builder + + // Add recommendations to message + for i := range r.Recommendations { + switch { + case len(r.Recommendations) == 1: + _, _ = fmt.Fprintf(&sb, "`%s` is a recommended module.", r.Recommendations[i]) + case (i+1) != len(r.Recommendations) && (i+1) == (len(r.Recommendations)-1): + _, _ = fmt.Fprintf(&sb, "`%s` ", r.Recommendations[i]) + case (i + 1) != len(r.Recommendations): + _, _ = fmt.Fprintf(&sb, "`%s`, ", r.Recommendations[i]) + default: + _, _ = fmt.Fprintf(&sb, "and `%s` are recommended modules.", r.Recommendations[i]) + } + } + + if r.Reason == "" { + return sb.String() + } + + // Add reason to message + if sb.Len() == 0 { + _, _ = fmt.Fprintf(&sb, "%s.", strings.TrimRight(r.Reason, ".")) + } else { + _, _ = fmt.Fprintf(&sb, " %s.", strings.TrimRight(r.Reason, ".")) + } + + return sb.String() +} + +// HasRecommendations returns true if the blocked package has +// recommended modules. +func (r *BlockedModule) HasRecommendations() bool { + if r == nil { + return false + } + + return len(r.Recommendations) > 0 +} + +// BlockedVersions a list of blocked modules by a version constraint. +type BlockedVersions []map[string]BlockedVersion + +// Get returns the module names that are blocked. +func (b BlockedVersions) Get() []string { + modules := make([]string, len(b)) + + for n := range b { + for module := range b[n] { + modules[n] = module + break + } + } + + return modules +} + +// GetBlockReason returns a block version if one is set for the provided linted module name. +func (b BlockedVersions) GetBlockReason(lintedModuleName string) *BlockedVersion { + for _, blockedModule := range b { + for blockedModuleName, blockedVersion := range blockedModule { + if strings.TrimSpace(lintedModuleName) == strings.TrimSpace(blockedModuleName) { + return &blockedVersion + } + } + } + + return nil +} + +// BlockedModules a list of blocked modules. +type BlockedModules []map[string]BlockedModule + +// Get returns the module names that are blocked. +func (b BlockedModules) Get() []string { + modules := make([]string, len(b)) + + for n := range b { + for module := range b[n] { + modules[n] = module + break + } + } + + return modules +} + +// GetBlockReason returns a block module if one is set for the provided linted module name. +func (b BlockedModules) GetBlockReason(lintedModuleName string) *BlockedModule { + for _, blockedModule := range b { + for blockedModuleName, blockedModule := range blockedModule { + if strings.TrimSpace(lintedModuleName) == strings.TrimSpace(blockedModuleName) { + return &blockedModule + } + } + } + + return nil +} + +// Allowed is a list of modules and module +// domains that are allowed to be used. +type Allowed struct { + Modules []string `yaml:"modules"` + Domains []string `yaml:"domains"` +} + +// IsAllowedModule returns true if the given module +// name is in the allowed modules list. +func (a *Allowed) IsAllowedModule(moduleName string) bool { + allowedModules := a.Modules + + for i := range allowedModules { + if strings.TrimSpace(moduleName) == strings.TrimSpace(allowedModules[i]) { + return true + } + } + + return false +} + +// IsAllowedModuleDomain returns true if the given modules domain is +// in the allowed module domains list. +func (a *Allowed) IsAllowedModuleDomain(moduleName string) bool { + allowedDomains := a.Domains + + for i := range allowedDomains { + if strings.HasPrefix(strings.TrimSpace(strings.ToLower(moduleName)), + strings.TrimSpace(strings.ToLower(allowedDomains[i]))) { + return true + } + } + + return false +} + +// Blocked is a list of modules that are +// blocked and not to be used. +type Blocked struct { + Modules BlockedModules `yaml:"modules"` + Versions BlockedVersions `yaml:"versions"` + LocalReplaceDirectives bool `yaml:"local_replace_directives"` +} + +// Configuration of gomodguard allow and block lists. +type Configuration struct { + Allowed Allowed `yaml:"allowed"` + Blocked Blocked `yaml:"blocked"` +} + +// Issue represents the result of one error. +type Issue struct { + FileName string + LineNumber int + Position token.Position + Reason string +} + +// String returns the filename, line +// number and reason of a Issue. +func (r *Issue) String() string { + return fmt.Sprintf("%s:%d:1 %s", r.FileName, r.LineNumber, r.Reason) +} + +// Processor processes Go files. +type Processor struct { + Config *Configuration + Modfile *modfile.File + blockedModulesFromModFile map[string][]string +} + +// NewProcessor will create a Processor to lint blocked packages. +func NewProcessor(config *Configuration) (*Processor, error) { + goModFileBytes, err := loadGoModFile() + if err != nil { + return nil, fmt.Errorf(errReadingGoModFile, goModFilename, err) + } + + modFile, err := modfile.Parse(goModFilename, goModFileBytes, nil) + if err != nil { + return nil, fmt.Errorf(errParsingGoModFile, goModFilename, err) + } + + p := &Processor{ + Config: config, + Modfile: modFile, + } + + p.SetBlockedModules() + + return p, nil +} + +// ProcessFiles takes a string slice with file names (full paths) +// and lints them. +func (p *Processor) ProcessFiles(filenames []string) (issues []Issue) { + for _, filename := range filenames { + data, err := ioutil.ReadFile(filename) + if err != nil { + issues = append(issues, Issue{ + FileName: filename, + LineNumber: 0, + Reason: fmt.Sprintf("unable to read file, file cannot be linted (%s)", err.Error()), + }) + + continue + } + + issues = append(issues, p.process(filename, data)...) + } + + return issues +} + +// process file imports and add lint error if blocked package is imported. +func (p *Processor) process(filename string, data []byte) (issues []Issue) { + fileSet := token.NewFileSet() + + file, err := parser.ParseFile(fileSet, filename, data, parser.ParseComments) + if err != nil { + issues = append(issues, Issue{ + FileName: filename, + LineNumber: 0, + Reason: fmt.Sprintf("invalid syntax, file cannot be linted (%s)", err.Error()), + }) + + return + } + + imports := file.Imports + for n := range imports { + importedPkg := strings.TrimSpace(strings.Trim(imports[n].Path.Value, "\"")) + + blockReasons := p.isBlockedPackageFromModFile(importedPkg) + if blockReasons == nil { + continue + } + + for _, blockReason := range blockReasons { + issues = append(issues, p.addError(fileSet, imports[n].Pos(), blockReason)) + } + } + + return issues +} + +// addError adds an error for the file and line number for the current token.Pos +// with the given reason. +func (p *Processor) addError(fileset *token.FileSet, pos token.Pos, reason string) Issue { + position := fileset.Position(pos) + + return Issue{ + FileName: position.Filename, + LineNumber: position.Line, + Position: position, + Reason: reason, + } +} + +// SetBlockedModules determines and sets which modules are blocked by reading +// the go.mod file of the module that is being linted. +// +// It works by iterating over the dependant modules specified in the require +// directive, checking if the module domain or full name is in the allowed list. +func (p *Processor) SetBlockedModules() { //nolint:gocognit,funlen + blockedModules := make(map[string][]string, len(p.Modfile.Require)) + currentModuleName := p.Modfile.Module.Mod.Path + lintedModules := p.Modfile.Require + replacedModules := p.Modfile.Replace + + for i := range lintedModules { + if lintedModules[i].Indirect { + continue // Do not lint indirect modules. + } + + lintedModuleName := strings.TrimSpace(lintedModules[i].Mod.Path) + lintedModuleVersion := strings.TrimSpace(lintedModules[i].Mod.Version) + + var isAllowed bool + + switch { + case len(p.Config.Allowed.Modules) == 0 && len(p.Config.Allowed.Domains) == 0: + isAllowed = true + case p.Config.Allowed.IsAllowedModuleDomain(lintedModuleName): + isAllowed = true + case p.Config.Allowed.IsAllowedModule(lintedModuleName): + isAllowed = true + default: + isAllowed = false + } + + blockModuleReason := p.Config.Blocked.Modules.GetBlockReason(lintedModuleName) + blockVersionReason := p.Config.Blocked.Versions.GetBlockReason(lintedModuleName) + + if !isAllowed && blockModuleReason == nil && blockVersionReason == nil { + blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], blockReasonNotInAllowedList) + continue + } + + if blockModuleReason != nil && !blockModuleReason.IsCurrentModuleARecommendation(currentModuleName) { + blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], + fmt.Sprintf("%s %s", blockReasonInBlockedList, blockModuleReason.Message())) + } + + if blockVersionReason != nil && blockVersionReason.IsLintedModuleVersionBlocked(lintedModuleVersion) { + blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], + fmt.Sprintf("%s %s", blockReasonInBlockedList, blockVersionReason.Message(lintedModuleVersion))) + } + } + + // Replace directives with local paths are blocked. + // Filesystem paths found in "replace" directives are represented by a path with an empty version. + // https://github.com/golang/mod/blob/bc388b264a244501debfb9caea700c6dcaff10e2/module/module.go#L122-L124 + if p.Config.Blocked.LocalReplaceDirectives { + for i := range replacedModules { + replacedModuleOldName := strings.TrimSpace(replacedModules[i].Old.Path) + replacedModuleNewName := strings.TrimSpace(replacedModules[i].New.Path) + replacedModuleNewVersion := strings.TrimSpace(replacedModules[i].New.Version) + + if replacedModuleNewName != "" && replacedModuleNewVersion == "" { + blockedModules[replacedModuleOldName] = append(blockedModules[replacedModuleOldName], + blockReasonHasLocalReplaceDirective) + } + } + } + + p.blockedModulesFromModFile = blockedModules +} + +// isBlockedPackageFromModFile returns the block reason if the package is blocked. +func (p *Processor) isBlockedPackageFromModFile(packageName string) []string { + for blockedModuleName, blockReasons := range p.blockedModulesFromModFile { + if strings.HasPrefix(strings.TrimSpace(packageName), strings.TrimSpace(blockedModuleName)) { + formattedReasons := make([]string, 0, len(blockReasons)) + + for _, blockReason := range blockReasons { + formattedReasons = append(formattedReasons, fmt.Sprintf(blockReason, packageName)) + } + + return formattedReasons + } + } + + return nil +} + +func loadGoModFile() ([]byte, error) { + cmd := exec.Command("go", "env", "-json") + stdout, _ := cmd.StdoutPipe() + _ = cmd.Start() + + if stdout == nil { + return ioutil.ReadFile(goModFilename) + } + + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(stdout) + + goEnv := make(map[string]string) + + err := json.Unmarshal(buf.Bytes(), &goEnv) + if err != nil { + return ioutil.ReadFile(goModFilename) + } + + if _, ok := goEnv["GOMOD"]; !ok { + return ioutil.ReadFile(goModFilename) + } + + if _, err = os.Stat(goEnv["GOMOD"]); os.IsNotExist(err) { + return ioutil.ReadFile(goModFilename) + } + + if goEnv["GOMOD"] == "/dev/null" { + return nil, errors.New("current working directory must have a go.mod file") + } + + return ioutil.ReadFile(goEnv["GOMOD"]) +} diff --git a/vendor/github.com/ryanrolds/sqlclosecheck/LICENSE b/vendor/github.com/ryanrolds/sqlclosecheck/LICENSE new file mode 100644 index 000000000..77b261d7a --- /dev/null +++ b/vendor/github.com/ryanrolds/sqlclosecheck/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2020 Ryan R. Olds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go b/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go new file mode 100644 index 000000000..bc42dfb3a --- /dev/null +++ b/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go @@ -0,0 +1,311 @@ +package analyzer + +import ( + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" +) + +const ( + rowsName = "Rows" + stmtName = "Stmt" + closeMethod = "Close" +) + +var ( + sqlPackages = []string{ + "database/sql", + "github.com/jmoiron/sqlx", + } +) + +func NewAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "sqlclosecheck", + Doc: "Checks that sql.Rows and sql.Stmt are closed.", + Run: run, + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + }, + } +} + +func run(pass *analysis.Pass) (interface{}, error) { + pssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + + // Build list of types we are looking for + targetTypes := getTargetTypes(pssa, sqlPackages) + + // If non of the types are found, skip + if len(targetTypes) == 0 { + return nil, nil + } + + funcs := pssa.SrcFuncs + for _, f := range funcs { + for _, b := range f.Blocks { + for i := range b.Instrs { + // Check if instruction is call that returns a target type + targetValues := getTargetTypesValues(b, i, targetTypes) + if len(targetValues) == 0 { + continue + } + + // log.Printf("%s", f.Name()) + + // For each found target check if they are closed and deferred + for _, targetValue := range targetValues { + refs := (*targetValue.value).Referrers() + isClosed := checkClosed(refs, targetTypes) + if !isClosed { + pass.Reportf((targetValue.instr).Pos(), "Rows/Stmt was not closed") + } + + checkDeferred(pass, refs, targetTypes, false) + } + } + } + } + + return nil, nil +} + +func getTargetTypes(pssa *buildssa.SSA, targetPackages []string) []*types.Pointer { + targets := []*types.Pointer{} + + for _, sqlPkg := range targetPackages { + pkg := pssa.Pkg.Prog.ImportedPackage(sqlPkg) + if pkg == nil { + // the SQL package being checked isn't imported + return targets + } + + rowsType := getTypePointerFromName(pkg, rowsName) + if rowsType != nil { + targets = append(targets, rowsType) + } + + stmtType := getTypePointerFromName(pkg, stmtName) + if stmtType != nil { + targets = append(targets, stmtType) + } + } + + return targets +} + +func getTypePointerFromName(pkg *ssa.Package, name string) *types.Pointer { + pkgType := pkg.Type(name) + if pkgType == nil { + // this package does not use Rows/Stmt + return nil + } + + obj := pkgType.Object() + named, ok := obj.Type().(*types.Named) + if !ok { + return nil + } + + return types.NewPointer(named) +} + +type targetValue struct { + value *ssa.Value + instr ssa.Instruction +} + +func getTargetTypesValues(b *ssa.BasicBlock, i int, targetTypes []*types.Pointer) []targetValue { + targetValues := []targetValue{} + + instr := b.Instrs[i] + call, ok := instr.(*ssa.Call) + if !ok { + return targetValues + } + + signature := call.Call.Signature() + results := signature.Results() + for i := 0; i < results.Len(); i++ { + v := results.At(i) + varType := v.Type() + + for _, targetType := range targetTypes { + if !types.Identical(varType, targetType) { + continue + } + + for _, cRef := range *call.Referrers() { + switch instr := cRef.(type) { + case *ssa.Call: + if len(instr.Call.Args) >= 1 && types.Identical(instr.Call.Args[0].Type(), targetType) { + targetValues = append(targetValues, targetValue{ + value: &instr.Call.Args[0], + instr: call, + }) + } + case ssa.Value: + if types.Identical(instr.Type(), targetType) { + targetValues = append(targetValues, targetValue{ + value: &instr, + instr: call, + }) + } + } + } + } + } + + return targetValues +} + +func checkClosed(refs *[]ssa.Instruction, targetTypes []*types.Pointer) bool { + numInstrs := len(*refs) + for idx, ref := range *refs { + // log.Printf("%T - %s", ref, ref) + + action := getAction(ref, targetTypes) + switch action { + case "closed": + return true + case "passed": + // Passed and not used after + if numInstrs == idx+1 { + return true + } + case "returned": + return true + case "handled": + return true + default: + // log.Printf(action) + } + } + + return false +} + +func getAction(instr ssa.Instruction, targetTypes []*types.Pointer) string { + switch instr := instr.(type) { + case *ssa.Defer: + if instr.Call.Value == nil { + return "unvalued defer" + } + + name := instr.Call.Value.Name() + if name == closeMethod { + return "closed" + } + case *ssa.Call: + if instr.Call.Value == nil { + return "unvalued call" + } + + isTarget := false + receiver := instr.Call.StaticCallee().Signature.Recv() + if receiver != nil { + isTarget = isTargetType(receiver.Type(), targetTypes) + } + + name := instr.Call.Value.Name() + if isTarget && name == closeMethod { + return "closed" + } + + if !isTarget { + return "passed" + } + case *ssa.Phi: + return "passed" + case *ssa.MakeInterface: + return "passed" + case *ssa.Store: + if len(*instr.Addr.Referrers()) == 0 { + return "noop" + } + + for _, aRef := range *instr.Addr.Referrers() { + if c, ok := aRef.(*ssa.MakeClosure); ok { + f := c.Fn.(*ssa.Function) + for _, b := range f.Blocks { + if checkClosed(&b.Instrs, targetTypes) { + return "handled" + } + } + } + } + case *ssa.UnOp: + instrType := instr.Type() + for _, targetType := range targetTypes { + if types.Identical(instrType, targetType) { + if checkClosed(instr.Referrers(), targetTypes) { + return "handled" + } + } + } + case *ssa.FieldAddr: + if checkClosed(instr.Referrers(), targetTypes) { + return "handled" + } + case *ssa.Return: + return "returned" + default: + // log.Printf("%s", instr) + } + + return "unhandled" +} + +func checkDeferred(pass *analysis.Pass, instrs *[]ssa.Instruction, targetTypes []*types.Pointer, inDefer bool) { + for _, instr := range *instrs { + switch instr := instr.(type) { + case *ssa.Defer: + if instr.Call.Value != nil && instr.Call.Value.Name() == closeMethod { + return + } + case *ssa.Call: + if instr.Call.Value != nil && instr.Call.Value.Name() == closeMethod { + if !inDefer { + pass.Reportf(instr.Pos(), "Close should use defer") + } + + return + } + case *ssa.Store: + if len(*instr.Addr.Referrers()) == 0 { + return + } + + for _, aRef := range *instr.Addr.Referrers() { + if c, ok := aRef.(*ssa.MakeClosure); ok { + f := c.Fn.(*ssa.Function) + + for _, b := range f.Blocks { + checkDeferred(pass, &b.Instrs, targetTypes, true) + } + } + } + case *ssa.UnOp: + instrType := instr.Type() + for _, targetType := range targetTypes { + if types.Identical(instrType, targetType) { + checkDeferred(pass, instr.Referrers(), targetTypes, inDefer) + } + } + case *ssa.FieldAddr: + checkDeferred(pass, instr.Referrers(), targetTypes, inDefer) + } + } +} + +func isTargetType(t types.Type, targetTypes []*types.Pointer) bool { + for _, targetType := range targetTypes { + if types.Identical(t, targetType) { + return true + } + } + + return false +} diff --git a/vendor/github.com/sanposhiho/wastedassign/v2/LICENSE b/vendor/github.com/sanposhiho/wastedassign/v2/LICENSE new file mode 100644 index 000000000..4ed7724fe --- /dev/null +++ b/vendor/github.com/sanposhiho/wastedassign/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Kensei Nakada + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sanposhiho/wastedassign/v2/README.md b/vendor/github.com/sanposhiho/wastedassign/v2/README.md new file mode 100644 index 000000000..cd2deedad --- /dev/null +++ b/vendor/github.com/sanposhiho/wastedassign/v2/README.md @@ -0,0 +1,66 @@ +# wastedassign +`wastedassign` finds wasted assignment statements + +found the value ... + +- reassigned, but never used afterward +- reassigned, but reassigned without using the value + +## Example + +```go +package main + +import "fmt" + +func f() int { + a := 0 + b := 0 + fmt.Print(a) + fmt.Print(b) + a = 1 // This reassignment is wasted, because never used afterwards. Wastedassign find this + + b = 1 // This reassignment is wasted, because reassigned without use this value. Wastedassign find this + b = 2 + fmt.Print(b) + + return 1 + 2 +} +``` + + +```bash +$ go vet -vettool=`which wastedassign` sample.go +# command-line-arguments +./sample.go:10:2: assigned to a, but never used afterwards +./sample.go:12:2: assigned to b, but reassigned without using the value +``` + + +## Installation + +``` +go get -u github.com/sanposhiho/wastedassign/v2/cmd/wastedassign +``` + +## Usage + +``` +# in your project + +go vet -vettool=`which wastedassign` ./... +``` + +And, you can use wastedassign in [golangci-lint](https://github.com/golangci/golangci-lint). + +## Contribution + +I am waiting for your contribution :D + +Feel free to create an issue or a PR! + +### Run test + +``` +go test +``` diff --git a/vendor/github.com/sanposhiho/wastedassign/v2/go.mod b/vendor/github.com/sanposhiho/wastedassign/v2/go.mod new file mode 100644 index 000000000..32e5685f6 --- /dev/null +++ b/vendor/github.com/sanposhiho/wastedassign/v2/go.mod @@ -0,0 +1,5 @@ +module github.com/sanposhiho/wastedassign/v2 + +go 1.14 + +require golang.org/x/tools v0.1.0 diff --git a/vendor/github.com/sanposhiho/wastedassign/v2/go.sum b/vendor/github.com/sanposhiho/wastedassign/v2/go.sum new file mode 100644 index 000000000..21d696a65 --- /dev/null +++ b/vendor/github.com/sanposhiho/wastedassign/v2/go.sum @@ -0,0 +1,26 @@ +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/sanposhiho/wastedassign/v2/wastedassign.go b/vendor/github.com/sanposhiho/wastedassign/v2/wastedassign.go new file mode 100644 index 000000000..e0c0da616 --- /dev/null +++ b/vendor/github.com/sanposhiho/wastedassign/v2/wastedassign.go @@ -0,0 +1,272 @@ +package wastedassign + +import ( + "errors" + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/ssa" +) + +const doc = "wastedassign finds wasted assignment statements." + +// Analyzer is the wastedassign analyzer. +var Analyzer = &analysis.Analyzer{ + Name: "wastedassign", + Doc: doc, + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +type wastedAssignStruct struct { + pos token.Pos + reason string +} + +func run(pass *analysis.Pass) (interface{}, error) { + // Plundered from buildssa.Run. + prog := ssa.NewProgram(pass.Fset, ssa.NaiveForm) + + // Create SSA packages for all imports. + // Order is not significant. + created := make(map[*types.Package]bool) + var createAll func(pkgs []*types.Package) + createAll = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !created[p] { + created[p] = true + prog.CreatePackage(p, nil, nil, true) + createAll(p.Imports()) + } + } + } + createAll(pass.Pkg.Imports()) + + // Create and build the primary package. + ssapkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false) + ssapkg.Build() + + var srcFuncs []*ssa.Function + for _, f := range pass.Files { + for _, decl := range f.Decls { + if fdecl, ok := decl.(*ast.FuncDecl); ok { + + // SSA will not build a Function + // for a FuncDecl named blank. + // That's arguably too strict but + // relaxing it would break uniqueness of + // names of package members. + if fdecl.Name.Name == "_" { + continue + } + + // (init functions have distinct Func + // objects named "init" and distinct + // ssa.Functions named "init#1", ...) + + fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func) + if fn == nil { + return nil, errors.New("failed to get func's typesinfo") + } + + f := ssapkg.Prog.FuncValue(fn) + if f == nil { + return nil, errors.New("failed to get func's SSA-form intermediate representation") + } + + var addAnons func(f *ssa.Function) + addAnons = func(f *ssa.Function) { + srcFuncs = append(srcFuncs, f) + for _, anon := range f.AnonFuncs { + addAnons(anon) + } + } + addAnons(f) + } + } + } + + typeSwitchPos := map[int]bool{} + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + inspect.Preorder([]ast.Node{new(ast.TypeSwitchStmt)}, func(n ast.Node) { + if _, ok := n.(*ast.TypeSwitchStmt); ok { + typeSwitchPos[pass.Fset.Position(n.Pos()).Line] = true + } + }) + + var wastedAssignMap []wastedAssignStruct + + for _, sf := range srcFuncs { + for _, bl := range sf.Blocks { + blCopy := *bl + for _, ist := range bl.Instrs { + blCopy.Instrs = rmInstrFromInstrs(blCopy.Instrs, ist) + if _, ok := ist.(*ssa.Store); !ok { + continue + } + + var buf [10]*ssa.Value + for _, op := range ist.Operands(buf[:0]) { + if (*op) == nil || !opInLocals(sf.Locals, op) { + continue + } + + reason := isNextOperationToOpIsStore([]*ssa.BasicBlock{&blCopy}, op, nil) + if reason == notWasted { + continue + } + + if ist.Pos() == 0 || typeSwitchPos[pass.Fset.Position(ist.Pos()).Line] { + continue + } + + v, ok := (*op).(*ssa.Alloc) + if !ok { + // This block should never have been executed. + continue + } + wastedAssignMap = append(wastedAssignMap, wastedAssignStruct{ + pos: ist.Pos(), + reason: reason.String(v), + }) + } + } + } + } + + for _, was := range wastedAssignMap { + pass.Reportf(was.pos, was.reason) + } + + return nil, nil +} + +type wastedReason string + +const ( + noUseUntilReturn wastedReason = "assigned, but never used afterwards" + reassignedSoon wastedReason = "wasted assignment" + notWasted wastedReason = "" +) + +func (wr wastedReason) String(a *ssa.Alloc) string { + switch wr { + case noUseUntilReturn: + return fmt.Sprintf("assigned to %s, but never used afterwards", a.Comment) + case reassignedSoon: + return fmt.Sprintf("assigned to %s, but reassigned without using the value", a.Comment) + case notWasted: + return "" + default: + return "" + } +} + +func isNextOperationToOpIsStore(bls []*ssa.BasicBlock, currentOp *ssa.Value, haveCheckedMap map[int]int) wastedReason { + var wastedReasons []wastedReason + var wastedReasonsCurrentBls []wastedReason + + if haveCheckedMap == nil { + haveCheckedMap = map[int]int{} + } + + for _, bl := range bls { + if haveCheckedMap[bl.Index] == 2 { + continue + } + + haveCheckedMap[bl.Index]++ + breakFlag := false + for _, ist := range bl.Instrs { + if breakFlag { + break + } + + switch w := ist.(type) { + case *ssa.Store: + var buf [10]*ssa.Value + for _, op := range ist.Operands(buf[:0]) { + if *op == *currentOp { + if w.Addr.Name() == (*currentOp).Name() { + wastedReasonsCurrentBls = append(wastedReasonsCurrentBls, reassignedSoon) + breakFlag = true + break + } else { + return notWasted + } + } + } + default: + var buf [10]*ssa.Value + for _, op := range ist.Operands(buf[:0]) { + if *op == *currentOp { + // It wasn't a continuous store. + return notWasted + } + } + } + } + + if len(bl.Succs) != 0 && !breakFlag { + wastedReason := isNextOperationToOpIsStore(rmSameBlock(bl.Succs, bl), currentOp, haveCheckedMap) + if wastedReason == notWasted { + return notWasted + } + wastedReasons = append(wastedReasons, wastedReason) + } + } + + wastedReasons = append(wastedReasons, wastedReasonsCurrentBls...) + + if len(wastedReasons) != 0 && containReassignedSoon(wastedReasons) { + return reassignedSoon + } + + return noUseUntilReturn +} + +func rmSameBlock(bls []*ssa.BasicBlock, currentBl *ssa.BasicBlock) []*ssa.BasicBlock { + var rto []*ssa.BasicBlock + + for _, bl := range bls { + if bl != currentBl { + rto = append(rto, bl) + } + } + return rto +} + +func containReassignedSoon(ws []wastedReason) bool { + for _, w := range ws { + if w == reassignedSoon { + return true + } + } + return false +} + +func rmInstrFromInstrs(instrs []ssa.Instruction, instrToRm ssa.Instruction) []ssa.Instruction { + var rto []ssa.Instruction + for _, i := range instrs { + if i != instrToRm { + rto = append(rto, i) + } + } + return rto +} + +func opInLocals(locals []*ssa.Alloc, op *ssa.Value) bool { + for _, l := range locals { + if *op == ssa.Value(l) { + return true + } + } + return false +} diff --git a/vendor/github.com/securego/gosec/v2/.gitignore b/vendor/github.com/securego/gosec/v2/.gitignore new file mode 100644 index 000000000..f282cda24 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/.gitignore @@ -0,0 +1,35 @@ +# transient files +/image + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.swp +/gosec + +# Folders +_obj +_test +vendor +dist + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.DS_Store + +.vscode diff --git a/vendor/github.com/securego/gosec/v2/.golangci.yml b/vendor/github.com/securego/gosec/v2/.golangci.yml new file mode 100644 index 000000000..dcda64666 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/.golangci.yml @@ -0,0 +1,23 @@ +linters: + enable: + - asciicheck + - bodyclose + - depguard + - dogsled + - durationcheck + - errcheck + - exportloopref + - gofmt + - gofumpt + - goimports + - gosec + - govet + - importas + - megacheck + - misspell + - nakedret + - nolintlint + - revive + - unconvert + - unparam + - wastedassign \ No newline at end of file diff --git a/vendor/github.com/securego/gosec/v2/.goreleaser.yml b/vendor/github.com/securego/gosec/v2/.goreleaser.yml new file mode 100644 index 000000000..263e522bc --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/.goreleaser.yml @@ -0,0 +1,21 @@ +--- +project_name: gosec + +release: + github: + owner: securego + name: gosec + +builds: + - main : ./cmd/gosec/ + binary: gosec + goos: + - darwin + - linux + - windows + goarch: + - amd64 + - arm64 + ldflags: -X main.Version={{.Version}} -X main.GitTag={{.Tag}} -X main.BuildDate={{.Date}} + env: + - CGO_ENABLED=0 diff --git a/vendor/github.com/securego/gosec/v2/Dockerfile b/vendor/github.com/securego/gosec/v2/Dockerfile new file mode 100644 index 000000000..c937d5255 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/Dockerfile @@ -0,0 +1,15 @@ +ARG GO_VERSION +FROM golang:${GO_VERSION}-alpine AS builder +RUN apk add --update --no-cache ca-certificates make git curl gcc libc-dev +RUN mkdir -p /build +WORKDIR /build +COPY . /build/ +RUN go mod download +RUN make build-linux + +FROM golang:${GO_VERSION}-alpine +RUN apk add --update --no-cache ca-certificates bash git gcc libc-dev +ENV GO111MODULE on +COPY --from=builder /build/gosec /bin/gosec +COPY entrypoint.sh /bin/entrypoint.sh +ENTRYPOINT ["/bin/entrypoint.sh"] diff --git a/vendor/github.com/securego/gosec/v2/LICENSE.txt b/vendor/github.com/securego/gosec/v2/LICENSE.txt new file mode 100644 index 000000000..1756c7821 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/LICENSE.txt @@ -0,0 +1,154 @@ +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable copyright license to +reproduce, prepare Derivative Works of, publicly display, publicly perform, +sublicense, and distribute the Work and such Derivative Works in Source or +Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, +each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) patent +license to make, have made, use, offer to sell, sell, import, and otherwise +transfer the Work, where such license applies only to those patent claims +licensable by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) with the Work +to which such Contribution(s) was submitted. If You institute patent litigation +against any entity (including a cross-claim or counterclaim in a lawsuit) +alleging that the Work or a Contribution incorporated within the Work +constitutes direct or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate as of the date +such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or +Derivative Works thereof in any medium, with or without modifications, and in +Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and You must cause any modified files to carry prominent notices +stating that You changed the files; and You must retain, in the Source form of +any Derivative Works that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, excluding those notices +that do not pertain to any part of the Derivative Works; and If the Work +includes a "NOTICE" text file as part of its distribution, then any Derivative +Works that You distribute must include a readable copy of the attribution +notices contained within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one of the following +places: within a NOTICE text file distributed as part of the Derivative Works; +within the Source form or documentation, if provided along with the Derivative +Works; or, within a display generated by the Derivative Works, if and wherever +such third-party notices normally appear. The contents of the NOTICE file are +for informational purposes only and do not modify the License. You may add Your +own attribution notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided that such +additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. 5. Submission of Contributions. +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, +trademarks, service marks, or product names of the Licensor, except as required +for reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in +writing, Licensor provides the Work (and each Contributor provides its +Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied, including, without limitation, any warranties +or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in +tort (including negligence), contract, or otherwise, unless required by +applicable law (such as deliberate and grossly negligent acts) or agreed to in +writing, shall any Contributor be liable to You for damages, including any +direct, indirect, special, incidental, or consequential damages of any character +arising as a result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, work stoppage, +computer failure or malfunction, or any and all other commercial damages or +losses), even if such Contributor has been advised of the possibility of such +damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or +Derivative Works thereof, You may choose to offer, and charge a fee for, +acceptance of support, warranty, indemnity, or other liability obligations +and/or rights consistent with this License. However, in accepting such +obligations, You may act only on Your own behalf and on Your sole +responsibility, not on behalf of any other Contributor, and only if You agree to +indemnify, defend, and hold each Contributor harmless for any liability incurred +by, or claims asserted against, such Contributor by reason of your accepting any +such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/securego/gosec/v2/Makefile b/vendor/github.com/securego/gosec/v2/Makefile new file mode 100644 index 000000000..5974e5c08 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/Makefile @@ -0,0 +1,71 @@ +GIT_TAG?= $(shell git describe --always --tags) +BIN = gosec +FMT_CMD = $(gofmt -s -l -w $(find . -type f -name '*.go' -not -path './vendor/*') | tee /dev/stderr) +IMAGE_REPO = securego +BUILDFLAGS := '-w -s' +CGO_ENABLED = 0 +GO := GO111MODULE=on go +GO_NOMOD :=GO111MODULE=off go +GOPATH ?= $(shell $(GO) env GOPATH) +GOBIN ?= $(GOPATH)/bin +GOLINT ?= $(GOBIN)/golint +GOSEC ?= $(GOBIN)/gosec +GINKGO ?= $(GOBIN)/ginkgo +GO_VERSION = 1.15 + +default: + $(MAKE) build + +install-test-deps: + $(GO_NOMOD) get -u github.com/onsi/ginkgo/ginkgo + $(GO_NOMOD) get -u golang.org/x/crypto/ssh + $(GO_NOMOD) get -u github.com/lib/pq + +test: install-test-deps build fmt lint sec + $(GINKGO) -r -v + +fmt: + @echo "FORMATTING" + @FORMATTED=`$(GO) fmt ./...` + @([[ ! -z "$(FORMATTED)" ]] && printf "Fixed unformatted files:\n$(FORMATTED)") || true + +lint: + @echo "LINTING" + $(GO_NOMOD) get -u golang.org/x/lint/golint + $(GOLINT) -set_exit_status ./... + @echo "VETTING" + $(GO) vet ./... + +sec: + @echo "SECURITY SCANNING" + ./$(BIN) ./... + +test-coverage: install-test-deps + go test -race -v -count=1 -coverprofile=coverage.out ./... + +build: + go build -o $(BIN) ./cmd/gosec/ + +clean: + rm -rf build vendor dist coverage.txt + rm -f release image $(BIN) + +release: + @echo "Releasing the gosec binary..." + goreleaser release + +build-linux: + CGO_ENABLED=$(CGO_ENABLED) GOOS=linux GOARCH=amd64 go build -ldflags $(BUILDFLAGS) -o $(BIN) ./cmd/gosec/ + +image: + @echo "Building the Docker image..." + docker build -t $(IMAGE_REPO)/$(BIN):$(GIT_TAG) --build-arg GO_VERSION=$(GO_VERSION) . + docker tag $(IMAGE_REPO)/$(BIN):$(GIT_TAG) $(IMAGE_REPO)/$(BIN):latest + touch image + +image-push: image + @echo "Pushing the Docker image..." + docker push $(IMAGE_REPO)/$(BIN):$(GIT_TAG) + docker push $(IMAGE_REPO)/$(BIN):latest + +.PHONY: test build clean release image image-push diff --git a/vendor/github.com/securego/gosec/v2/README.md b/vendor/github.com/securego/gosec/v2/README.md new file mode 100644 index 000000000..e69e41684 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/README.md @@ -0,0 +1,401 @@ + +# gosec - Golang Security Checker + +Inspects source code for security problems by scanning the Go AST. + + + +## License + +Licensed under the Apache License, Version 2.0 (the "License"). +You may not use this file except in compliance with the License. +You may obtain a copy of the License [here](http://www.apache.org/licenses/LICENSE-2.0). + +## Project status + +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3218/badge)](https://bestpractices.coreinfrastructure.org/projects/3218) +[![Build Status](https://github.com/securego/gosec/workflows/CI/badge.svg)](https://github.com/securego/gosec/actions?query=workflows%3ACI) +[![Coverage Status](https://codecov.io/gh/securego/gosec/branch/master/graph/badge.svg)](https://codecov.io/gh/securego/gosec) +[![GoReport](https://goreportcard.com/badge/github.com/securego/gosec)](https://goreportcard.com/report/github.com/securego/gosec) +[![GoDoc](https://pkg.go.dev/badge/github.com/securego/gosec/v2)](https://pkg.go.dev/github.com/securego/gosec/v2) +[![Docs](https://readthedocs.org/projects/docs/badge/?version=latest)](https://securego.io/) +[![Downloads](https://img.shields.io/github/downloads/securego/gosec/total.svg)](https://github.com/securego/gosec/releases) +[![Docker Pulls](https://img.shields.io/docker/pulls/securego/gosec.svg)](https://hub.docker.com/r/securego/gosec/tags) +[![Slack](http://securego.herokuapp.com/badge.svg)](http://securego.herokuapp.com) + +## Install + +### CI Installation + +```bash +# binary will be $(go env GOPATH)/bin/gosec +curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s -- -b $(go env GOPATH)/bin vX.Y.Z + +# or install it into ./bin/ +curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s vX.Y.Z + +# In alpine linux (as it does not come with curl by default) +wget -O - -q https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s vX.Y.Z + +# If you want to use the checksums provided on the "Releases" page +# then you will have to download a tar.gz file for your operating system instead of a binary file +wget https://github.com/securego/gosec/releases/download/vX.Y.Z/gosec_vX.Y.Z_OS.tar.gz + +# The file will be in the current folder where you run the command +# and you can check the checksum like this +echo " gosec_vX.Y.Z_OS.tar.gz" | sha256sum -c - + +gosec --help +``` + +### GitHub Action + +You can run `gosec` as a GitHub action as follows: + +```yaml +name: Run Gosec +on: + push: + branches: + - master + pull_request: + branches: + - master +jobs: + tests: + runs-on: ubuntu-latest + env: + GO111MODULE: on + steps: + - name: Checkout Source + uses: actions/checkout@v2 + - name: Run Gosec Security Scanner + uses: securego/gosec@master + with: + args: ./... +``` + +### Integrating with code scanning + +You can [integrate third-party code analysis tools](https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/integrating-with-code-scanning) with GitHub code scanning by uploading data as SARIF files. + +The workflow shows an example of running the `gosec` as a step in a GitHub action workflow which outputs the `results.sarif` file. The workflow then uploads the `results.sarif` file to GitHub using the `upload-sarif` action. + +```yaml +name: "Security Scan" + +# Run workflow each time code is pushed to your repository and on a schedule. +# The scheduled workflow runs every at 00:00 on Sunday UTC time. +on: + push: + schedule: + - cron: '0 0 * * 0' + +jobs: + tests: + runs-on: ubuntu-latest + env: + GO111MODULE: on + steps: + - name: Checkout Source + uses: actions/checkout@v2 + - name: Run Gosec Security Scanner + uses: securego/gosec@master + with: + # we let the report trigger content trigger a failure using the GitHub Security features. + args: '-no-fail -fmt sarif -out results.sarif ./...' + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@v1 + with: + # Path to SARIF file relative to the root of the repository + sarif_file: results.sarif +``` + +### Local Installation + +```bash +go get -u github.com/securego/gosec/v2/cmd/gosec +``` + +## Usage + +Gosec can be configured to only run a subset of rules, to exclude certain file +paths, and produce reports in different formats. By default all rules will be +run against the supplied input files. To recursively scan from the current +directory you can supply `./...` as the input argument. + +### Available rules + +- G101: Look for hard coded credentials +- G102: Bind to all interfaces +- G103: Audit the use of unsafe block +- G104: Audit errors not checked +- G106: Audit the use of ssh.InsecureIgnoreHostKey +- G107: Url provided to HTTP request as taint input +- G108: Profiling endpoint automatically exposed on /debug/pprof +- G109: Potential Integer overflow made by strconv.Atoi result conversion to int16/32 +- G110: Potential DoS vulnerability via decompression bomb +- G201: SQL query construction using format string +- G202: SQL query construction using string concatenation +- G203: Use of unescaped data in HTML templates +- G204: Audit use of command execution +- G301: Poor file permissions used when creating a directory +- G302: Poor file permissions used with chmod +- G303: Creating tempfile using a predictable path +- G304: File path provided as taint input +- G305: File traversal when extracting zip/tar archive +- G306: Poor file permissions used when writing to a new file +- G307: Deferring a method which returns an error +- G401: Detect the usage of DES, RC4, MD5 or SHA1 +- G402: Look for bad TLS connection settings +- G403: Ensure minimum RSA key length of 2048 bits +- G404: Insecure random number source (rand) +- G501: Import blocklist: crypto/md5 +- G502: Import blocklist: crypto/des +- G503: Import blocklist: crypto/rc4 +- G504: Import blocklist: net/http/cgi +- G505: Import blocklist: crypto/sha1 +- G601: Implicit memory aliasing of items from a range statement + +### Retired rules + +- G105: Audit the use of math/big.Int.Exp - [CVE is fixed](https://github.com/golang/go/issues/15184) + +### Selecting rules + +By default, gosec will run all rules against the supplied file paths. It is however possible to select a subset of rules to run via the `-include=` flag, +or to specify a set of rules to explicitly exclude using the `-exclude=` flag. + +```bash +# Run a specific set of rules +$ gosec -include=G101,G203,G401 ./... + +# Run everything except for rule G303 +$ gosec -exclude=G303 ./... +``` + +### CWE Mapping + +Every issue detected by `gosec` is mapped to a [CWE (Common Weakness Enumeration)](http://cwe.mitre.org/data/index.html) which describes in more generic terms the vulnerability. The exact mapping can be found [here](https://github.com/securego/gosec/blob/master/issue.go#L50). + +### Configuration + +A number of global settings can be provided in a configuration file as follows: + +```JSON +{ + "global": { + "nosec": "enabled", + "audit": "enabled" + } +} +``` + +- `nosec`: this setting will overwrite all `#nosec` directives defined throughout the code base +- `audit`: runs in audit mode which enables addition checks that for normal code analysis might be too nosy + +```bash +# Run with a global configuration file +$ gosec -conf config.json . +``` + +Also some rules accept configuration. For instance on rule `G104`, it is possible to define packages along with a list +of functions which will be skipped when auditing the not checked errors: + +```JSON +{ + "G104": { + "io/ioutil": ["WriteFile"] + } +} +``` + +You can also configure the hard-coded credentials rule `G101` with additional patters, or adjust the entropy threshold: + +```JSON +{ + "G101": { + "pattern": "(?i)passwd|pass|password|pwd|secret|private_key|token", + "ignore_entropy": false, + "entropy_threshold": "80.0", + "per_char_threshold": "3.0", + "truncate": "32" + } +} +``` + +### Dependencies + +gosec will fetch automatically the dependencies of the code which is being analyzed when go module is turned on (e.g.`GO111MODULE=on`). If this is not the case, +the dependencies need to be explicitly downloaded by running the `go get -d` command before the scan. + +### Excluding test files and folders + +gosec will ignore test files across all packages and any dependencies in your vendor directory. + +The scanning of test files can be enabled with the following flag: + +```bash + +gosec -tests ./... +``` + +Also additional folders can be excluded as follows: + +```bash + gosec -exclude-dir=rules -exclude-dir=cmd ./... +``` + +### Annotating code + +As with all automated detection tools, there will be cases of false positives. In cases where gosec reports a failure that has been manually verified as being safe, +it is possible to annotate the code with a `#nosec` comment. + +The annotation causes gosec to stop processing any further nodes within the +AST so can apply to a whole block or more granularly to a single expression. + +```go + +import "md5" // #nosec + + +func main(){ + + /* #nosec */ + if x > y { + h := md5.New() // this will also be ignored + } + +} + +``` + +When a specific false positive has been identified and verified as safe, you may wish to suppress only that single rule (or a specific set of rules) +within a section of code, while continuing to scan for other problems. To do this, you can list the rule(s) to be suppressed within +the `#nosec` annotation, e.g: `/* #nosec G401 */` or `// #nosec G201 G202 G203` + +In some cases you may also want to revisit places where `#nosec` annotations +have been used. To run the scanner and ignore any `#nosec` annotations you +can do the following: + +```bash +gosec -nosec=true ./... +``` + +### Build tags + +gosec is able to pass your [Go build tags](https://golang.org/pkg/go/build/) to the analyzer. +They can be provided as a comma separated list as follows: + +```bash +gosec -tag debug,ignore ./... +``` + +### Output formats + +gosec currently supports `text`, `json`, `yaml`, `csv`, `sonarqube`, `JUnit XML`, `html` and `golint` output formats. By default +results will be reported to stdout, but can also be written to an output +file. The output format is controlled by the `-fmt` flag, and the output file is controlled by the `-out` flag as follows: + +```bash +# Write output in json format to results.json +$ gosec -fmt=json -out=results.json *.go +``` + +Results will be reported to stdout as well as to the provided output file by `-stdout` flag. The `-verbose` flag overrides the +output format when stdout the results while saving them in the output file +```bash +# Write output in json format to results.json as well as stdout +$ gosec -fmt=json -out=results.json -stdout *.go + +# Overrides the output format to 'text' when stdout the results, while writing it to results.json +$ gosec -fmt=json -out=results.json -stdout -verbose=text *.go +``` + +**Note:** gosec generates the [generic issue import format](https://docs.sonarqube.org/latest/analysis/generic-issue/) for SonarQube, and a report has to be imported into SonarQube using `sonar.externalIssuesReportPaths=path/to/gosec-report.json`. + +## Development + +### Build + +You can build the binary with: + +```bash +make +``` + +### Note on Sarif Types Generation + +Install the tool with : + +```bash +go get -u github.com/a-h/generate/cmd/schema-generate +``` + +Then generate the types with : + +```bash +schema-generate -i sarif-schema-2.1.0.json -o mypath/types.go +``` + +Most of the MarshallJSON/UnmarshalJSON are removed except the one for PropertyBag which is handy to inline the additionnal properties. The rest can be removed. +The URI,ID, UUID, GUID were renamed so it fits the Golang convention defined [here](https://github.com/golang/lint/blob/master/lint.go#L700) + +### Tests + +You can run all unit tests using: + +```bash +make test +``` + +### Release + +You can create a release by tagging the version as follows: + +``` bash +git tag v1.0.0 -m "Release version v1.0.0" +git push origin v1.0.0 +``` + +The GitHub [release workflow](.github/workflows/release.yml) triggers immediately after the tag is pushed upstream. This flow will +release the binaries using the [goreleaser](https://goreleaser.com/actions/) action and then it will build and publish the docker image into Docker Hub. + +### Docker image + +You can also build locally the docker image by using the command: + +```bash +make image +``` + +You can run the `gosec` tool in a container against your local Go project. You only have to mount the project +into a volume as follows: + +```bash +docker run --rm -it -w // -v /:/ securego/gosec //... +``` + +**Note:** the current working directory needs to be set with `-w` option in order to get successfully resolved the dependencies from go module file + +### Generate TLS rule + +The configuration of TLS rule can be generated from [Mozilla's TLS ciphers recommendation](https://statics.tls.security.mozilla.org/server-side-tls-conf.json). + +First you need to install the generator tool: + +```bash +go get github.com/securego/gosec/v2/cmd/tlsconfig/... +``` + +You can invoke now the `go generate` in the root of the project: + +```bash +go generate ./... +``` + +This will generate the `rules/tls_config.go` file which will contain the current ciphers recommendation from Mozilla. + +## Who is using gosec? + +This is a [list](USERS.md) with some of the gosec's users. diff --git a/vendor/github.com/securego/gosec/v2/USERS.md b/vendor/github.com/securego/gosec/v2/USERS.md new file mode 100644 index 000000000..73369ceec --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/USERS.md @@ -0,0 +1,28 @@ +# Users + +This is a list of gosec's users. Please send a pull request with your organisation or project name if you are using gosec. + +## Companies + +1. [Gitlab](https://docs.gitlab.com/ee/user/application_security/sast/) +2. [CloudBees](https://cloudbees.com) +3. [VMware](https://www.vmware.com) +4. [Codacy](https://support.codacy.com/hc/en-us/articles/213632009-Engines) +5. [Coinbase](https://github.com/coinbase/watchdog/blob/master/Makefile#L12) +6. [RedHat/OpenShift](https://github.com/openshift/openshift-azure) +7. [Guardalis](https://www.guardrails.io/) +8. [1Password](https://github.com/1Password/srp) +9. [PingCAP/tidb](https://github.com/pingcap/tidb) +10. [Checkmarx](https://www.checkmarx.com/) + +## Projects + +1. [golangci-lint](https://github.com/golangci/golangci-lint) +2. [Kubernetes](https://github.com/kubernetes/kubernetes) (via golangci) +3. [caddy](https://github.com/caddyserver/caddy) (via golangci) +4. [Jenkins X](https://github.com/jenkins-x/jx/blob/bdc51840a41b75776159c1c7b7faa1cf477be473/hack/linter.sh#L25) +5. [HuskyCI](https://huskyci.opensource.globo.com/) +6. [GolangCI](https://golangci.com/) +7. [semgrep.live](https://semgrep.live/) +8. [gofiber](https://github.com/gofiber/fiber) +9. [KICS](https://github.com/Checkmarx/kics) diff --git a/vendor/github.com/securego/gosec/v2/action.yml b/vendor/github.com/securego/gosec/v2/action.yml new file mode 100644 index 000000000..aab6c8039 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/action.yml @@ -0,0 +1,19 @@ +name: 'Gosec Security Checker' +description: 'Runs the gosec security checker' +author: '@ccojocar' + +inputs: + args: + description: 'Arguments for gosec' + required: true + default: '-h' + +runs: + using: 'docker' + image: 'docker://securego/gosec' + args: + - ${{ inputs.args }} + +branding: + icon: 'shield' + color: 'blue' diff --git a/vendor/github.com/securego/gosec/v2/analyzer.go b/vendor/github.com/securego/gosec/v2/analyzer.go new file mode 100644 index 000000000..f669d5ae0 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/analyzer.go @@ -0,0 +1,375 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gosec holds the central scanning logic used by gosec security scanner +package gosec + +import ( + "fmt" + "go/ast" + "go/build" + "go/token" + "go/types" + "log" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/packages" +) + +// LoadMode controls the amount of details to return when loading the packages +const LoadMode = packages.NeedName | + packages.NeedFiles | + packages.NeedCompiledGoFiles | + packages.NeedImports | + packages.NeedTypes | + packages.NeedTypesSizes | + packages.NeedTypesInfo | + packages.NeedSyntax + +// The Context is populated with data parsed from the source code as it is scanned. +// It is passed through to all rule functions as they are called. Rules may use +// this data in conjunction withe the encountered AST node. +type Context struct { + FileSet *token.FileSet + Comments ast.CommentMap + Info *types.Info + Pkg *types.Package + PkgFiles []*ast.File + Root *ast.File + Config Config + Imports *ImportTracker + Ignores []map[string]bool + PassedValues map[string]interface{} +} + +// Metrics used when reporting information about a scanning run. +type Metrics struct { + NumFiles int `json:"files"` + NumLines int `json:"lines"` + NumNosec int `json:"nosec"` + NumFound int `json:"found"` +} + +// Analyzer object is the main object of gosec. It has methods traverse an AST +// and invoke the correct checking rules as on each node as required. +type Analyzer struct { + ignoreNosec bool + ruleset RuleSet + context *Context + config Config + logger *log.Logger + issues []*Issue + stats *Metrics + errors map[string][]Error // keys are file paths; values are the golang errors in those files + tests bool +} + +// NewAnalyzer builds a new analyzer. +func NewAnalyzer(conf Config, tests bool, logger *log.Logger) *Analyzer { + ignoreNoSec := false + if enabled, err := conf.IsGlobalEnabled(Nosec); err == nil { + ignoreNoSec = enabled + } + if logger == nil { + logger = log.New(os.Stderr, "[gosec]", log.LstdFlags) + } + return &Analyzer{ + ignoreNosec: ignoreNoSec, + ruleset: make(RuleSet), + context: &Context{}, + config: conf, + logger: logger, + issues: make([]*Issue, 0, 16), + stats: &Metrics{}, + errors: make(map[string][]Error), + tests: tests, + } +} + +// SetConfig upates the analyzer configuration +func (gosec *Analyzer) SetConfig(conf Config) { + gosec.config = conf +} + +// Config returns the current configuration +func (gosec *Analyzer) Config() Config { + return gosec.config +} + +// LoadRules instantiates all the rules to be used when analyzing source +// packages +func (gosec *Analyzer) LoadRules(ruleDefinitions map[string]RuleBuilder) { + for id, def := range ruleDefinitions { + r, nodes := def(id, gosec.config) + gosec.ruleset.Register(r, nodes...) + } +} + +// Process kicks off the analysis process for a given package +func (gosec *Analyzer) Process(buildTags []string, packagePaths ...string) error { + config := &packages.Config{ + Mode: LoadMode, + BuildFlags: buildTags, + Tests: gosec.tests, + } + + for _, pkgPath := range packagePaths { + pkgs, err := gosec.load(pkgPath, config) + if err != nil { + gosec.AppendError(pkgPath, err) + } + for _, pkg := range pkgs { + if pkg.Name != "" { + err := gosec.ParseErrors(pkg) + if err != nil { + return fmt.Errorf("parsing errors in pkg %q: %v", pkg.Name, err) + } + gosec.Check(pkg) + } + } + } + sortErrors(gosec.errors) + return nil +} + +func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages.Package, error) { + abspath, err := GetPkgAbsPath(pkgPath) + if err != nil { + gosec.logger.Printf("Skipping: %s. Path doesn't exist.", abspath) + return []*packages.Package{}, nil + } + + gosec.logger.Println("Import directory:", abspath) + // step 1/3 create build context. + buildD := build.Default + // step 2/3: add build tags to get env dependent files into basePackage. + buildD.BuildTags = conf.BuildFlags + basePackage, err := buildD.ImportDir(pkgPath, build.ImportComment) + if err != nil { + return []*packages.Package{}, fmt.Errorf("importing dir %q: %v", pkgPath, err) + } + + var packageFiles []string + for _, filename := range basePackage.GoFiles { + packageFiles = append(packageFiles, path.Join(pkgPath, filename)) + } + for _, filename := range basePackage.CgoFiles { + packageFiles = append(packageFiles, path.Join(pkgPath, filename)) + } + + if gosec.tests { + testsFiles := []string{} + testsFiles = append(testsFiles, basePackage.TestGoFiles...) + testsFiles = append(testsFiles, basePackage.XTestGoFiles...) + for _, filename := range testsFiles { + packageFiles = append(packageFiles, path.Join(pkgPath, filename)) + } + } + + // step 3/3 remove build tags from conf to proceed build correctly. + conf.BuildFlags = nil + pkgs, err := packages.Load(conf, packageFiles...) + if err != nil { + return []*packages.Package{}, fmt.Errorf("loading files from package %q: %v", pkgPath, err) + } + return pkgs, nil +} + +// Check runs analysis on the given package +func (gosec *Analyzer) Check(pkg *packages.Package) { + gosec.logger.Println("Checking package:", pkg.Name) + for _, file := range pkg.Syntax { + checkedFile := pkg.Fset.File(file.Pos()).Name() + // Skip the no-Go file from analysis (e.g. a Cgo files is expanded in 3 different files + // stored in the cache which do not need to by analyzed) + if filepath.Ext(checkedFile) != ".go" { + continue + } + gosec.logger.Println("Checking file:", checkedFile) + gosec.context.FileSet = pkg.Fset + gosec.context.Config = gosec.config + gosec.context.Comments = ast.NewCommentMap(gosec.context.FileSet, file, file.Comments) + gosec.context.Root = file + gosec.context.Info = pkg.TypesInfo + gosec.context.Pkg = pkg.Types + gosec.context.PkgFiles = pkg.Syntax + gosec.context.Imports = NewImportTracker() + gosec.context.Imports.TrackFile(file) + gosec.context.PassedValues = make(map[string]interface{}) + ast.Walk(gosec, file) + gosec.stats.NumFiles++ + gosec.stats.NumLines += pkg.Fset.File(file.Pos()).LineCount() + } +} + +// ParseErrors parses the errors from given package +func (gosec *Analyzer) ParseErrors(pkg *packages.Package) error { + if len(pkg.Errors) == 0 { + return nil + } + for _, pkgErr := range pkg.Errors { + parts := strings.Split(pkgErr.Pos, ":") + file := parts[0] + var err error + var line int + if len(parts) > 1 { + if line, err = strconv.Atoi(parts[1]); err != nil { + return fmt.Errorf("parsing line: %v", err) + } + } + var column int + if len(parts) > 2 { + if column, err = strconv.Atoi(parts[2]); err != nil { + return fmt.Errorf("parsing column: %v", err) + } + } + msg := strings.TrimSpace(pkgErr.Msg) + newErr := NewError(line, column, msg) + if errSlice, ok := gosec.errors[file]; ok { + gosec.errors[file] = append(errSlice, *newErr) + } else { + errSlice = []Error{} + gosec.errors[file] = append(errSlice, *newErr) + } + } + return nil +} + +// AppendError appends an error to the file errors +func (gosec *Analyzer) AppendError(file string, err error) { + // Do not report the error for empty packages (e.g. files excluded from build with a tag) + r := regexp.MustCompile(`no buildable Go source files in`) + if r.MatchString(err.Error()) { + return + } + errors := []Error{} + if ferrs, ok := gosec.errors[file]; ok { + errors = ferrs + } + ferr := NewError(0, 0, err.Error()) + errors = append(errors, *ferr) + gosec.errors[file] = errors +} + +// ignore a node (and sub-tree) if it is tagged with a nosec tag comment +func (gosec *Analyzer) ignore(n ast.Node) ([]string, bool) { + if groups, ok := gosec.context.Comments[n]; ok && !gosec.ignoreNosec { + + // Checks if an alternative for #nosec is set and, if not, uses the default. + noSecDefaultTag := "#nosec" + noSecAlternativeTag, err := gosec.config.GetGlobal(NoSecAlternative) + if err != nil { + noSecAlternativeTag = noSecDefaultTag + } + + for _, group := range groups { + + foundDefaultTag := strings.Contains(group.Text(), noSecDefaultTag) + foundAlternativeTag := strings.Contains(group.Text(), noSecAlternativeTag) + + if foundDefaultTag || foundAlternativeTag { + gosec.stats.NumNosec++ + + // Pull out the specific rules that are listed to be ignored. + re := regexp.MustCompile(`(G\d{3})`) + matches := re.FindAllStringSubmatch(group.Text(), -1) + + // If no specific rules were given, ignore everything. + if len(matches) == 0 { + return nil, true + } + + // Find the rule IDs to ignore. + var ignores []string + for _, v := range matches { + ignores = append(ignores, v[1]) + } + return ignores, false + } + } + } + return nil, false +} + +// Visit runs the gosec visitor logic over an AST created by parsing go code. +// Rule methods added with AddRule will be invoked as necessary. +func (gosec *Analyzer) Visit(n ast.Node) ast.Visitor { + // If we've reached the end of this branch, pop off the ignores stack. + if n == nil { + if len(gosec.context.Ignores) > 0 { + gosec.context.Ignores = gosec.context.Ignores[1:] + } + return gosec + } + + // Get any new rule exclusions. + ignoredRules, ignoreAll := gosec.ignore(n) + if ignoreAll { + return nil + } + + // Now create the union of exclusions. + ignores := map[string]bool{} + if len(gosec.context.Ignores) > 0 { + for k, v := range gosec.context.Ignores[0] { + ignores[k] = v + } + } + + for _, v := range ignoredRules { + ignores[v] = true + } + + // Push the new set onto the stack. + gosec.context.Ignores = append([]map[string]bool{ignores}, gosec.context.Ignores...) + + // Track aliased and initialization imports + gosec.context.Imports.TrackImport(n) + + for _, rule := range gosec.ruleset.RegisteredFor(n) { + if _, ok := ignores[rule.ID()]; ok { + continue + } + issue, err := rule.Match(n, gosec.context) + if err != nil { + file, line := GetLocation(n, gosec.context) + file = path.Base(file) + gosec.logger.Printf("Rule error: %v => %s (%s:%d)\n", reflect.TypeOf(rule), err, file, line) + } + if issue != nil { + gosec.issues = append(gosec.issues, issue) + gosec.stats.NumFound++ + } + } + return gosec +} + +// Report returns the current issues discovered and the metrics about the scan +func (gosec *Analyzer) Report() ([]*Issue, *Metrics, map[string][]Error) { + return gosec.issues, gosec.stats, gosec.errors +} + +// Reset clears state such as context, issues and metrics from the configured analyzer +func (gosec *Analyzer) Reset() { + gosec.context = &Context{} + gosec.issues = make([]*Issue, 0, 16) + gosec.stats = &Metrics{} + gosec.ruleset = NewRuleSet() +} diff --git a/vendor/github.com/securego/gosec/v2/call_list.go b/vendor/github.com/securego/gosec/v2/call_list.go new file mode 100644 index 000000000..4b3fcf057 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/call_list.go @@ -0,0 +1,109 @@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gosec + +import ( + "go/ast" + "strings" +) + +const vendorPath = "vendor/" + +type set map[string]bool + +// CallList is used to check for usage of specific packages +// and functions. +type CallList map[string]set + +// NewCallList creates a new empty CallList +func NewCallList() CallList { + return make(CallList) +} + +// AddAll will add several calls to the call list at once +func (c CallList) AddAll(selector string, idents ...string) { + for _, ident := range idents { + c.Add(selector, ident) + } +} + +// Add a selector and call to the call list +func (c CallList) Add(selector, ident string) { + if _, ok := c[selector]; !ok { + c[selector] = make(set) + } + c[selector][ident] = true +} + +// Contains returns true if the package and function are +/// members of this call list. +func (c CallList) Contains(selector, ident string) bool { + if idents, ok := c[selector]; ok { + _, found := idents[ident] + return found + } + return false +} + +// ContainsPointer returns true if a pointer to the selector type or the type +// itself is a members of this call list. +func (c CallList) ContainsPointer(selector, indent string) bool { + if strings.HasPrefix(selector, "*") { + if c.Contains(selector, indent) { + return true + } + s := strings.TrimPrefix(selector, "*") + return c.Contains(s, indent) + } + return false +} + +// ContainsPkgCallExpr resolves the call expression name and type, and then further looks +// up the package path for that type. Finally, it determines if the call exists within the call list +func (c CallList) ContainsPkgCallExpr(n ast.Node, ctx *Context, stripVendor bool) *ast.CallExpr { + selector, ident, err := GetCallInfo(n, ctx) + if err != nil { + return nil + } + + // Use only explicit path (optionally strip vendor path prefix) to reduce conflicts + path, ok := GetImportPath(selector, ctx) + if !ok { + return nil + } + if stripVendor { + if vendorIdx := strings.Index(path, vendorPath); vendorIdx >= 0 { + path = path[vendorIdx+len(vendorPath):] + } + } + if !c.Contains(path, ident) { + return nil + } + + return n.(*ast.CallExpr) +} + +// ContainsCallExpr resolves the call expression name and type, and then determines +// if the call exists with the call list +func (c CallList) ContainsCallExpr(n ast.Node, ctx *Context) *ast.CallExpr { + selector, ident, err := GetCallInfo(n, ctx) + if err != nil { + return nil + } + if !c.Contains(selector, ident) && !c.ContainsPointer(selector, ident) { + return nil + } + + return n.(*ast.CallExpr) +} diff --git a/vendor/github.com/securego/gosec/v2/config.go b/vendor/github.com/securego/gosec/v2/config.go new file mode 100644 index 000000000..4af62b295 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/config.go @@ -0,0 +1,125 @@ +package gosec + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" +) + +const ( + // Globals are applicable to all rules and used for general + // configuration settings for gosec. + Globals = "global" +) + +// GlobalOption defines the name of the global options +type GlobalOption string + +const ( + // Nosec global option for #nosec directive + Nosec GlobalOption = "nosec" + // Audit global option which indicates that gosec runs in audit mode + Audit GlobalOption = "audit" + // NoSecAlternative global option alternative for #nosec directive + NoSecAlternative GlobalOption = "#nosec" +) + +// Config is used to provide configuration and customization to each of the rules. +type Config map[string]interface{} + +// NewConfig initializes a new configuration instance. The configuration data then +// needs to be loaded via c.ReadFrom(strings.NewReader("config data")) +// or from a *os.File. +func NewConfig() Config { + cfg := make(Config) + cfg[Globals] = make(map[GlobalOption]string) + return cfg +} + +func (c Config) keyToGlobalOptions(key string) GlobalOption { + return GlobalOption(key) +} + +func (c Config) convertGlobals() { + if globals, ok := c[Globals]; ok { + if settings, ok := globals.(map[string]interface{}); ok { + validGlobals := map[GlobalOption]string{} + for k, v := range settings { + validGlobals[c.keyToGlobalOptions(k)] = fmt.Sprintf("%v", v) + } + c[Globals] = validGlobals + } + } +} + +// ReadFrom implements the io.ReaderFrom interface. This +// should be used with io.Reader to load configuration from +// file or from string etc. +func (c Config) ReadFrom(r io.Reader) (int64, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return int64(len(data)), err + } + if err = json.Unmarshal(data, &c); err != nil { + return int64(len(data)), err + } + c.convertGlobals() + return int64(len(data)), nil +} + +// WriteTo implements the io.WriteTo interface. This should +// be used to save or print out the configuration information. +func (c Config) WriteTo(w io.Writer) (int64, error) { + data, err := json.Marshal(c) + if err != nil { + return int64(len(data)), err + } + return io.Copy(w, bytes.NewReader(data)) +} + +// Get returns the configuration section for the supplied key +func (c Config) Get(section string) (interface{}, error) { + settings, found := c[section] + if !found { + return nil, fmt.Errorf("Section %s not in configuration", section) + } + return settings, nil +} + +// Set section in the configuration to specified value +func (c Config) Set(section string, value interface{}) { + c[section] = value +} + +// GetGlobal returns value associated with global configuration option +func (c Config) GetGlobal(option GlobalOption) (string, error) { + if globals, ok := c[Globals]; ok { + if settings, ok := globals.(map[GlobalOption]string); ok { + if value, ok := settings[option]; ok { + return value, nil + } + return "", fmt.Errorf("global setting for %s not found", option) + } + } + return "", fmt.Errorf("no global config options found") +} + +// SetGlobal associates a value with a global configuration option +func (c Config) SetGlobal(option GlobalOption, value string) { + if globals, ok := c[Globals]; ok { + if settings, ok := globals.(map[GlobalOption]string); ok { + settings[option] = value + } + } +} + +// IsGlobalEnabled checks if a global option is enabled +func (c Config) IsGlobalEnabled(option GlobalOption) (bool, error) { + value, err := c.GetGlobal(option) + if err != nil { + return false, err + } + return (value == "true" || value == "enabled"), nil +} diff --git a/vendor/github.com/securego/gosec/v2/cwe/data.go b/vendor/github.com/securego/gosec/v2/cwe/data.go new file mode 100644 index 000000000..8789ddd63 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/cwe/data.go @@ -0,0 +1,143 @@ +package cwe + +import "fmt" + +const ( + // Acronym is the acronym of CWE + Acronym = "CWE" + // Version the CWE version + Version = "4.4" + // ReleaseDateUtc the release Date of CWE Version + ReleaseDateUtc = "2021-03-15" + // Organization MITRE + Organization = "MITRE" + // Description the description of CWE + Description = "The MITRE Common Weakness Enumeration" +) + +var ( + // InformationURI link to the published CWE PDF + InformationURI = fmt.Sprintf("https://cwe.mitre.org/data/published/cwe_v%s.pdf/", Version) + // DownloadURI link to the zipped XML of the CWE list + DownloadURI = fmt.Sprintf("https://cwe.mitre.org/data/xml/cwec_v%s.xml.zip", Version) + + data = map[string]*Weakness{} + + weaknesses = []*Weakness{ + { + ID: "118", + Description: "The software does not restrict or incorrectly restricts operations within the boundaries of a resource that is accessed using an index or pointer, such as memory or files.", + Name: "Incorrect Access of Indexable Resource ('Range Error')", + }, + { + ID: "190", + Description: "The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control.", + Name: "Integer Overflow or Wraparound", + }, + { + ID: "200", + Description: "The product exposes sensitive information to an actor that is not explicitly authorized to have access to that information.", + Name: "Exposure of Sensitive Information to an Unauthorized Actor", + }, + { + ID: "22", + Description: "The software uses external input to construct a pathname that is intended to identify a file or directory that is located underneath a restricted parent directory, but the software does not properly neutralize special elements within the pathname that can cause the pathname to resolve to a location that is outside of the restricted directory.", + Name: "Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal')", + }, + { + ID: "242", + Description: "The program calls a function that can never be guaranteed to work safely.", + Name: "Use of Inherently Dangerous Function", + }, + { + ID: "276", + Description: "During installation, installed file permissions are set to allow anyone to modify those files.", + Name: "Incorrect Default Permissions", + }, + { + ID: "295", + Description: "The software does not validate, or incorrectly validates, a certificate.", + Name: "Improper Certificate Validation", + }, + { + ID: "310", + Description: "Weaknesses in this category are related to the design and implementation of data confidentiality and integrity. Frequently these deal with the use of encoding techniques, encryption libraries, and hashing algorithms. The weaknesses in this category could lead to a degradation of the quality data if they are not addressed.", + Name: "Cryptographic Issues", + }, + { + ID: "322", + Description: "The software performs a key exchange with an actor without verifying the identity of that actor.", + Name: "Key Exchange without Entity Authentication", + }, + { + ID: "326", + Description: "The software stores or transmits sensitive data using an encryption scheme that is theoretically sound, but is not strong enough for the level of protection required.", + Name: "Inadequate Encryption Strength", + }, + { + ID: "327", + Description: "The use of a broken or risky cryptographic algorithm is an unnecessary risk that may result in the exposure of sensitive information.", + Name: "Use of a Broken or Risky Cryptographic Algorithm", + }, + { + ID: "338", + Description: "The product uses a Pseudo-Random Number Generator (PRNG) in a security context, but the PRNG's algorithm is not cryptographically strong.", + Name: "Use of Cryptographically Weak Pseudo-Random Number Generator (PRNG)", + }, + { + ID: "377", + Description: "Creating and using insecure temporary files can leave application and system data vulnerable to attack.", + Name: "Insecure Temporary File", + }, + { + ID: "409", + Description: "The software does not handle or incorrectly handles a compressed input with a very high compression ratio that produces a large output.", + Name: "Improper Handling of Highly Compressed Data (Data Amplification)", + }, + { + ID: "703", + Description: "The software does not properly anticipate or handle exceptional conditions that rarely occur during normal operation of the software.", + Name: "Improper Check or Handling of Exceptional Conditions", + }, + { + ID: "78", + Description: "The software constructs all or part of an OS command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended OS command when it is sent to a downstream component.", + Name: "Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')", + }, + { + ID: "79", + Description: "The software does not neutralize or incorrectly neutralizes user-controllable input before it is placed in output that is used as a web page that is served to other users.", + Name: "Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')", + }, + { + ID: "798", + Description: "The software contains hard-coded credentials, such as a password or cryptographic key, which it uses for its own inbound authentication, outbound communication to external components, or encryption of internal data.", + Name: "Use of Hard-coded Credentials", + }, + { + ID: "88", + Description: "The software constructs a string for a command to executed by a separate component\nin another control sphere, but it does not properly delimit the\nintended arguments, options, or switches within that command string.", + Name: "Improper Neutralization of Argument Delimiters in a Command ('Argument Injection')", + }, + { + ID: "89", + Description: "The software constructs all or part of an SQL command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended SQL command when it is sent to a downstream component.", + Name: "Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", + }, + } +) + +func init() { + for _, weakness := range weaknesses { + data[weakness.ID] = weakness + } +} + +// Get Retrieves a CWE weakness by it's id +func Get(id string) *Weakness { + weakness, ok := data[id] + if ok && weakness != nil { + return weakness + } + return nil +} diff --git a/vendor/github.com/securego/gosec/v2/cwe/types.go b/vendor/github.com/securego/gosec/v2/cwe/types.go new file mode 100644 index 000000000..a14ccb53a --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/cwe/types.go @@ -0,0 +1,34 @@ +package cwe + +import ( + "encoding/json" + "fmt" +) + +// Weakness defines a CWE weakness based on http://cwe.mitre.org/data/xsd/cwe_schema_v6.4.xsd +type Weakness struct { + ID string + Name string + Description string +} + +// SprintURL format the CWE URL +func (w *Weakness) SprintURL() string { + return fmt.Sprintf("https://cwe.mitre.org/data/definitions/%s.html", w.ID) +} + +// SprintID format the CWE ID +func (w *Weakness) SprintID() string { + return fmt.Sprintf("%s-%s", Acronym, w.ID) +} + +// MarshalJSON print only id and URL +func (w *Weakness) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + ID string `json:"id"` + URL string `json:"url"` + }{ + ID: w.ID, + URL: w.SprintURL(), + }) +} diff --git a/vendor/github.com/securego/gosec/v2/entrypoint.sh b/vendor/github.com/securego/gosec/v2/entrypoint.sh new file mode 100644 index 000000000..4dc046729 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/entrypoint.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +# Expand the arguments into an array of strings. This is requires because the GitHub action +# provides all arguments concatenated as a single string. +ARGS=("$@") + +/bin/gosec ${ARGS[*]} diff --git a/vendor/github.com/securego/gosec/v2/errors.go b/vendor/github.com/securego/gosec/v2/errors.go new file mode 100644 index 000000000..a27aa5821 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/errors.go @@ -0,0 +1,33 @@ +package gosec + +import ( + "sort" +) + +// Error is used when there are golang errors while parsing the AST +type Error struct { + Line int `json:"line"` + Column int `json:"column"` + Err string `json:"error"` +} + +// NewError creates Error object +func NewError(line, column int, err string) *Error { + return &Error{ + Line: line, + Column: column, + Err: err, + } +} + +// sortErros sorts the golang erros by line +func sortErrors(allErrors map[string][]Error) { + for _, errors := range allErrors { + sort.Slice(errors, func(i, j int) bool { + if errors[i].Line == errors[j].Line { + return errors[i].Column <= errors[j].Column + } + return errors[i].Line < errors[j].Line + }) + } +} diff --git a/vendor/github.com/securego/gosec/v2/go.mod b/vendor/github.com/securego/gosec/v2/go.mod new file mode 100644 index 000000000..1d08b1c74 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/go.mod @@ -0,0 +1,18 @@ +module github.com/securego/gosec/v2 + +require ( + github.com/google/uuid v1.2.0 + github.com/gookit/color v1.4.2 + github.com/lib/pq v1.10.2 + github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5 + github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 + github.com/onsi/ginkgo v1.16.4 + github.com/onsi/gomega v1.13.0 + golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 + golang.org/x/text v0.3.6 + golang.org/x/tools v0.1.3 + gopkg.in/yaml.v2 v2.4.0 +) + +go 1.16 diff --git a/vendor/github.com/securego/gosec/v2/go.sum b/vendor/github.com/securego/gosec/v2/go.sum new file mode 100644 index 000000000..bdf02fb39 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/go.sum @@ -0,0 +1,702 @@ +bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= +cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= +github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gookit/color v1.4.2 h1:tXy44JFSFkKnELV6WaMo/lLfu/meqITX3iAV52do7lk= +github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= +github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= +github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5 h1:0KqC6/sLy7fDpBdybhVkkv4Yz+PmB7c9Dz9z3dLW804= +github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= +github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= +github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8= +github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= +go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.3 h1:L69ShwSZEyCsLKoAxDKeMvLDZkumEe8gXUZAjab0tX8= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/github.com/securego/gosec/v2/helpers.go b/vendor/github.com/securego/gosec/v2/helpers.go new file mode 100644 index 000000000..50ce19859 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/helpers.go @@ -0,0 +1,451 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gosec + +import ( + "errors" + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + "os/user" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" +) + +// MatchCallByPackage ensures that the specified package is imported, +// adjusts the name for any aliases and ignores cases that are +// initialization only imports. +// +// Usage: +// node, matched := MatchCallByPackage(n, ctx, "math/rand", "Read") +// +func MatchCallByPackage(n ast.Node, c *Context, pkg string, names ...string) (*ast.CallExpr, bool) { + importedName, found := GetImportedName(pkg, c) + if !found { + return nil, false + } + + if callExpr, ok := n.(*ast.CallExpr); ok { + packageName, callName, err := GetCallInfo(callExpr, c) + if err != nil { + return nil, false + } + if packageName == importedName { + for _, name := range names { + if callName == name { + return callExpr, true + } + } + } + } + return nil, false +} + +// MatchCompLit will match an ast.CompositeLit based on the supplied type +func MatchCompLit(n ast.Node, ctx *Context, required string) *ast.CompositeLit { + if complit, ok := n.(*ast.CompositeLit); ok { + typeOf := ctx.Info.TypeOf(complit) + if typeOf.String() == required { + return complit + } + } + return nil +} + +// GetInt will read and return an integer value from an ast.BasicLit +func GetInt(n ast.Node) (int64, error) { + if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.INT { + return strconv.ParseInt(node.Value, 0, 64) + } + return 0, fmt.Errorf("Unexpected AST node type: %T", n) +} + +// GetFloat will read and return a float value from an ast.BasicLit +func GetFloat(n ast.Node) (float64, error) { + if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.FLOAT { + return strconv.ParseFloat(node.Value, 64) + } + return 0.0, fmt.Errorf("Unexpected AST node type: %T", n) +} + +// GetChar will read and return a char value from an ast.BasicLit +func GetChar(n ast.Node) (byte, error) { + if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.CHAR { + return node.Value[0], nil + } + return 0, fmt.Errorf("Unexpected AST node type: %T", n) +} + +// GetString will read and return a string value from an ast.BasicLit +func GetString(n ast.Node) (string, error) { + if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.STRING { + return strconv.Unquote(node.Value) + } + return "", fmt.Errorf("Unexpected AST node type: %T", n) +} + +// GetCallObject returns the object and call expression and associated +// object for a given AST node. nil, nil will be returned if the +// object cannot be resolved. +func GetCallObject(n ast.Node, ctx *Context) (*ast.CallExpr, types.Object) { + switch node := n.(type) { + case *ast.CallExpr: + switch fn := node.Fun.(type) { + case *ast.Ident: + return node, ctx.Info.Uses[fn] + case *ast.SelectorExpr: + return node, ctx.Info.Uses[fn.Sel] + } + } + return nil, nil +} + +// GetCallInfo returns the package or type and name associated with a +// call expression. +func GetCallInfo(n ast.Node, ctx *Context) (string, string, error) { + switch node := n.(type) { + case *ast.CallExpr: + switch fn := node.Fun.(type) { + case *ast.SelectorExpr: + switch expr := fn.X.(type) { + case *ast.Ident: + if expr.Obj != nil && expr.Obj.Kind == ast.Var { + t := ctx.Info.TypeOf(expr) + if t != nil { + return t.String(), fn.Sel.Name, nil + } + return "undefined", fn.Sel.Name, fmt.Errorf("missing type info") + } + return expr.Name, fn.Sel.Name, nil + case *ast.SelectorExpr: + if expr.Sel != nil { + t := ctx.Info.TypeOf(expr.Sel) + if t != nil { + return t.String(), fn.Sel.Name, nil + } + return "undefined", fn.Sel.Name, fmt.Errorf("missing type info") + } + case *ast.CallExpr: + switch call := expr.Fun.(type) { + case *ast.Ident: + if call.Name == "new" { + t := ctx.Info.TypeOf(expr.Args[0]) + if t != nil { + return t.String(), fn.Sel.Name, nil + } + return "undefined", fn.Sel.Name, fmt.Errorf("missing type info") + } + if call.Obj != nil { + switch decl := call.Obj.Decl.(type) { + case *ast.FuncDecl: + ret := decl.Type.Results + if ret != nil && len(ret.List) > 0 { + ret1 := ret.List[0] + if ret1 != nil { + t := ctx.Info.TypeOf(ret1.Type) + if t != nil { + return t.String(), fn.Sel.Name, nil + } + return "undefined", fn.Sel.Name, fmt.Errorf("missing type info") + } + } + } + } + } + } + case *ast.Ident: + return ctx.Pkg.Name(), fn.Name, nil + } + } + + return "", "", fmt.Errorf("unable to determine call info") +} + +// GetCallStringArgsValues returns the values of strings arguments if they can be resolved +func GetCallStringArgsValues(n ast.Node, ctx *Context) []string { + values := []string{} + switch node := n.(type) { + case *ast.CallExpr: + for _, arg := range node.Args { + switch param := arg.(type) { + case *ast.BasicLit: + value, err := GetString(param) + if err == nil { + values = append(values, value) + } + case *ast.Ident: + values = append(values, GetIdentStringValues(param)...) + } + } + } + return values +} + +// GetIdentStringValues return the string values of an Ident if they can be resolved +func GetIdentStringValues(ident *ast.Ident) []string { + values := []string{} + obj := ident.Obj + if obj != nil { + switch decl := obj.Decl.(type) { + case *ast.ValueSpec: + for _, v := range decl.Values { + value, err := GetString(v) + if err == nil { + values = append(values, value) + } + } + case *ast.AssignStmt: + for _, v := range decl.Rhs { + value, err := GetString(v) + if err == nil { + values = append(values, value) + } + } + } + } + return values +} + +// GetBinaryExprOperands returns all operands of a binary expression by traversing +// the expression tree +func GetBinaryExprOperands(be *ast.BinaryExpr) []ast.Node { + var traverse func(be *ast.BinaryExpr) + result := []ast.Node{} + traverse = func(be *ast.BinaryExpr) { + if lhs, ok := be.X.(*ast.BinaryExpr); ok { + traverse(lhs) + } else { + result = append(result, be.X) + } + if rhs, ok := be.Y.(*ast.BinaryExpr); ok { + traverse(rhs) + } else { + result = append(result, be.Y) + } + } + traverse(be) + return result +} + +// GetImportedName returns the name used for the package within the +// code. It will resolve aliases and ignores initialization only imports. +func GetImportedName(path string, ctx *Context) (string, bool) { + importName, imported := ctx.Imports.Imported[path] + if !imported { + return "", false + } + + if _, initonly := ctx.Imports.InitOnly[path]; initonly { + return "", false + } + + if alias, ok := ctx.Imports.Aliased[path]; ok { + importName = alias + } + return importName, true +} + +// GetImportPath resolves the full import path of an identifier based on +// the imports in the current context. +func GetImportPath(name string, ctx *Context) (string, bool) { + for path := range ctx.Imports.Imported { + if imported, ok := GetImportedName(path, ctx); ok && imported == name { + return path, true + } + } + return "", false +} + +// GetLocation returns the filename and line number of an ast.Node +func GetLocation(n ast.Node, ctx *Context) (string, int) { + fobj := ctx.FileSet.File(n.Pos()) + return fobj.Name(), fobj.Line(n.Pos()) +} + +// Gopath returns all GOPATHs +func Gopath() []string { + defaultGoPath := runtime.GOROOT() + if u, err := user.Current(); err == nil { + defaultGoPath = filepath.Join(u.HomeDir, "go") + } + path := Getenv("GOPATH", defaultGoPath) + paths := strings.Split(path, string(os.PathListSeparator)) + for idx, path := range paths { + if abs, err := filepath.Abs(path); err == nil { + paths[idx] = abs + } + } + return paths +} + +// Getenv returns the values of the environment variable, otherwise +// returns the default if variable is not set +func Getenv(key, userDefault string) string { + if val := os.Getenv(key); val != "" { + return val + } + return userDefault +} + +// GetPkgRelativePath returns the Go relative relative path derived +// form the given path +func GetPkgRelativePath(path string) (string, error) { + abspath, err := filepath.Abs(path) + if err != nil { + abspath = path + } + if strings.HasSuffix(abspath, ".go") { + abspath = filepath.Dir(abspath) + } + for _, base := range Gopath() { + projectRoot := filepath.FromSlash(fmt.Sprintf("%s/src/", base)) + if strings.HasPrefix(abspath, projectRoot) { + return strings.TrimPrefix(abspath, projectRoot), nil + } + } + return "", errors.New("no project relative path found") +} + +// GetPkgAbsPath returns the Go package absolute path derived from +// the given path +func GetPkgAbsPath(pkgPath string) (string, error) { + absPath, err := filepath.Abs(pkgPath) + if err != nil { + return "", err + } + if _, err := os.Stat(absPath); os.IsNotExist(err) { + return "", errors.New("no project absolute path found") + } + return absPath, nil +} + +// ConcatString recursively concatenates strings from a binary expression +func ConcatString(n *ast.BinaryExpr) (string, bool) { + var s string + // sub expressions are found in X object, Y object is always last BasicLit + if rightOperand, ok := n.Y.(*ast.BasicLit); ok { + if str, err := GetString(rightOperand); err == nil { + s = str + s + } + } else { + return "", false + } + if leftOperand, ok := n.X.(*ast.BinaryExpr); ok { + if recursion, ok := ConcatString(leftOperand); ok { + s = recursion + s + } + } else if leftOperand, ok := n.X.(*ast.BasicLit); ok { + if str, err := GetString(leftOperand); err == nil { + s = str + s + } + } else { + return "", false + } + return s, true +} + +// FindVarIdentities returns array of all variable identities in a given binary expression +func FindVarIdentities(n *ast.BinaryExpr, c *Context) ([]*ast.Ident, bool) { + identities := []*ast.Ident{} + // sub expressions are found in X object, Y object is always the last term + if rightOperand, ok := n.Y.(*ast.Ident); ok { + obj := c.Info.ObjectOf(rightOperand) + if _, ok := obj.(*types.Var); ok && !TryResolve(rightOperand, c) { + identities = append(identities, rightOperand) + } + } + if leftOperand, ok := n.X.(*ast.BinaryExpr); ok { + if leftIdentities, ok := FindVarIdentities(leftOperand, c); ok { + identities = append(identities, leftIdentities...) + } + } else { + if leftOperand, ok := n.X.(*ast.Ident); ok { + obj := c.Info.ObjectOf(leftOperand) + if _, ok := obj.(*types.Var); ok && !TryResolve(leftOperand, c) { + identities = append(identities, leftOperand) + } + } + } + + if len(identities) > 0 { + return identities, true + } + // if nil or error, return false + return nil, false +} + +// PackagePaths returns a slice with all packages path at given root directory +func PackagePaths(root string, excludes []*regexp.Regexp) ([]string, error) { + if strings.HasSuffix(root, "...") { + root = root[0 : len(root)-3] + } else { + return []string{root}, nil + } + paths := map[string]bool{} + err := filepath.Walk(root, func(path string, f os.FileInfo, err error) error { + if filepath.Ext(path) == ".go" { + path = filepath.Dir(path) + if isExcluded(path, excludes) { + return nil + } + paths[path] = true + } + return nil + }) + if err != nil { + return []string{}, err + } + + result := []string{} + for path := range paths { + result = append(result, path) + } + return result, nil +} + +// isExcluded checks if a string matches any of the exclusion regexps +func isExcluded(str string, excludes []*regexp.Regexp) bool { + if excludes == nil { + return false + } + for _, exclude := range excludes { + if exclude != nil && exclude.MatchString(str) { + return true + } + } + return false +} + +// ExcludedDirsRegExp builds the regexps for a list of excluded dirs provided as strings +func ExcludedDirsRegExp(excludedDirs []string) []*regexp.Regexp { + var exps []*regexp.Regexp + for _, excludedDir := range excludedDirs { + str := fmt.Sprintf(`([\\/])?%s([\\/])?`, excludedDir) + r := regexp.MustCompile(str) + exps = append(exps, r) + } + return exps +} + +// RootPath returns the absolute root path of a scan +func RootPath(root string) (string, error) { + root = strings.TrimSuffix(root, "...") + return filepath.Abs(root) +} diff --git a/vendor/github.com/securego/gosec/v2/import_tracker.go b/vendor/github.com/securego/gosec/v2/import_tracker.go new file mode 100644 index 000000000..cbb8c5518 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/import_tracker.go @@ -0,0 +1,75 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gosec + +import ( + "go/ast" + "go/types" + "strings" +) + +// ImportTracker is used to normalize the packages that have been imported +// by a source file. It is able to differentiate between plain imports, aliased +// imports and init only imports. +type ImportTracker struct { + Imported map[string]string + Aliased map[string]string + InitOnly map[string]bool +} + +// NewImportTracker creates an empty Import tracker instance +func NewImportTracker() *ImportTracker { + return &ImportTracker{ + make(map[string]string), + make(map[string]string), + make(map[string]bool), + } +} + +// TrackFile track all the imports used by the supplied file +func (t *ImportTracker) TrackFile(file *ast.File) { + for _, imp := range file.Imports { + path := strings.Trim(imp.Path.Value, `"`) + parts := strings.Split(path, "/") + if len(parts) > 0 { + name := parts[len(parts)-1] + t.Imported[path] = name + } + } +} + +// TrackPackages tracks all the imports used by the supplied packages +func (t *ImportTracker) TrackPackages(pkgs ...*types.Package) { + for _, pkg := range pkgs { + t.Imported[pkg.Path()] = pkg.Name() + } +} + +// TrackImport tracks imports and handles the 'unsafe' import +func (t *ImportTracker) TrackImport(n ast.Node) { + if imported, ok := n.(*ast.ImportSpec); ok { + path := strings.Trim(imported.Path.Value, `"`) + if imported.Name != nil { + if imported.Name.Name == "_" { + // Initialization only import + t.InitOnly[path] = true + } else { + // Aliased import + t.Aliased[path] = imported.Name.Name + } + } + if path == "unsafe" { + t.Imported[path] = path + } + } +} diff --git a/vendor/github.com/securego/gosec/v2/install.sh b/vendor/github.com/securego/gosec/v2/install.sh new file mode 100644 index 000000000..0da55d379 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/install.sh @@ -0,0 +1,375 @@ +#!/bin/sh +set -e +# Code generated by godownloader. DO NOT EDIT. +# + +usage() { + this=$1 + cat </dev/null +} +echoerr() { + echo "$@" 1>&2 +} +log_prefix() { + echo "$0" +} +_logp=6 +log_set_priority() { + _logp="$1" +} +log_priority() { + if test -z "$1"; then + echo "$_logp" + return + fi + [ "$1" -le "$_logp" ] +} +log_tag() { + case $1 in + 0) echo "emerg" ;; + 1) echo "alert" ;; + 2) echo "crit" ;; + 3) echo "err" ;; + 4) echo "warning" ;; + 5) echo "notice" ;; + 6) echo "info" ;; + 7) echo "debug" ;; + *) echo "$1" ;; + esac +} +log_debug() { + log_priority 7 || return 0 + echoerr "$(log_prefix)" "$(log_tag 7)" "$@" +} +log_info() { + log_priority 6 || return 0 + echoerr "$(log_prefix)" "$(log_tag 6)" "$@" +} +log_err() { + log_priority 3 || return 0 + echoerr "$(log_prefix)" "$(log_tag 3)" "$@" +} +log_crit() { + log_priority 2 || return 0 + echoerr "$(log_prefix)" "$(log_tag 2)" "$@" +} +uname_os() { + os=$(uname -s | tr '[:upper:]' '[:lower:]') + case "$os" in + cygwin_nt*) os="windows" ;; + mingw*) os="windows" ;; + msys_nt*) os="windows" ;; + esac + echo "$os" +} +uname_arch() { + arch=$(uname -m) + case $arch in + x86_64) arch="amd64" ;; + x86) arch="386" ;; + i686) arch="386" ;; + i386) arch="386" ;; + aarch64) arch="arm64" ;; + armv5*) arch="armv5" ;; + armv6*) arch="armv6" ;; + armv7*) arch="armv7" ;; + esac + echo ${arch} +} +uname_os_check() { + os=$(uname_os) + case "$os" in + darwin) return 0 ;; + dragonfly) return 0 ;; + freebsd) return 0 ;; + linux) return 0 ;; + android) return 0 ;; + nacl) return 0 ;; + netbsd) return 0 ;; + openbsd) return 0 ;; + plan9) return 0 ;; + solaris) return 0 ;; + windows) return 0 ;; + esac + log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib" + return 1 +} +uname_arch_check() { + arch=$(uname_arch) + case "$arch" in + 386) return 0 ;; + amd64) return 0 ;; + arm64) return 0 ;; + armv5) return 0 ;; + armv6) return 0 ;; + armv7) return 0 ;; + ppc64) return 0 ;; + ppc64le) return 0 ;; + mips) return 0 ;; + mipsle) return 0 ;; + mips64) return 0 ;; + mips64le) return 0 ;; + s390x) return 0 ;; + amd64p32) return 0 ;; + esac + log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib" + return 1 +} +untar() { + tarball=$1 + case "${tarball}" in + *.tar.gz | *.tgz) tar --no-same-owner -xzf "${tarball}" ;; + *.tar) tar --no-same-owner -xf "${tarball}" ;; + *.zip) unzip "${tarball}" ;; + *) + log_err "untar unknown archive format for ${tarball}" + return 1 + ;; + esac +} +http_download_curl() { + local_file=$1 + source_url=$2 + header=$3 + if [ -z "$header" ]; then + code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url") + else + code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url") + fi + if [ "$code" != "200" ]; then + log_debug "http_download_curl received HTTP status $code" + return 1 + fi + return 0 +} +http_download_wget() { + local_file=$1 + source_url=$2 + header=$3 + if [ -z "$header" ]; then + wget -q -O "$local_file" "$source_url" + else + wget -q --header "$header" -O "$local_file" "$source_url" + fi +} +http_download() { + log_debug "http_download $2" + if is_command curl; then + http_download_curl "$@" + return + elif is_command wget; then + http_download_wget "$@" + return + fi + log_crit "http_download unable to find wget or curl" + return 1 +} +http_copy() { + tmp=$(mktemp) + http_download "${tmp}" "$1" "$2" || return 1 + body=$(cat "$tmp") + rm -f "${tmp}" + echo "$body" +} +github_release() { + owner_repo=$1 + version=$2 + test -z "$version" && version="latest" + giturl="https://github.com/${owner_repo}/releases/${version}" + json=$(http_copy "$giturl" "Accept:application/json") + test -z "$json" && return 1 + version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//') + test -z "$version" && return 1 + echo "$version" +} +hash_sha256() { + TARGET=${1:-/dev/stdin} + if is_command gsha256sum; then + hash=$(gsha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command sha256sum; then + hash=$(sha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command shasum; then + hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command openssl; then + hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f a + else + log_crit "hash_sha256 unable to find command to compute sha-256 hash" + return 1 + fi +} +hash_sha256_verify() { + TARGET=$1 + checksums=$2 + if [ -z "$checksums" ]; then + log_err "hash_sha256_verify checksum file not specified in arg2" + return 1 + fi + BASENAME=${TARGET##*/} + want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1) + if [ -z "$want" ]; then + log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'" + return 1 + fi + got=$(hash_sha256 "$TARGET") + if [ "$want" != "$got" ]; then + log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got" + return 1 + fi +} +cat /dev/null < end { + break + } else if pos >= start && pos <= end { + code := fmt.Sprintf("%d: %s\n", pos, scanner.Text()) + buf.WriteString(code) + } + } + return buf.String(), nil +} + +func codeSnippetStartLine(node ast.Node, fobj *token.File) int64 { + s := (int64)(fobj.Line(node.Pos())) + if s-SnippetOffset > 0 { + return s - SnippetOffset + } + return s +} + +func codeSnippetEndLine(node ast.Node, fobj *token.File) int64 { + e := (int64)(fobj.Line(node.End())) + return e + SnippetOffset +} + +// NewIssue creates a new Issue +func NewIssue(ctx *Context, node ast.Node, ruleID, desc string, severity Score, confidence Score) *Issue { + fobj := ctx.FileSet.File(node.Pos()) + name := fobj.Name() + start, end := fobj.Line(node.Pos()), fobj.Line(node.End()) + line := strconv.Itoa(start) + if start != end { + line = fmt.Sprintf("%d-%d", start, end) + } + col := strconv.Itoa(fobj.Position(node.Pos()).Column) + + var code string + if file, err := os.Open(fobj.Name()); err == nil { + defer file.Close() // #nosec + s := codeSnippetStartLine(node, fobj) + e := codeSnippetEndLine(node, fobj) + code, err = codeSnippet(file, s, e, node) + if err != nil { + code = err.Error() + } + } + + return &Issue{ + File: name, + Line: line, + Col: col, + RuleID: ruleID, + What: desc, + Confidence: confidence, + Severity: severity, + Code: code, + Cwe: GetCweByRule(ruleID), + } +} diff --git a/vendor/github.com/securego/gosec/v2/renovate.json b/vendor/github.com/securego/gosec/v2/renovate.json new file mode 100644 index 000000000..95f2b7c1c --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/renovate.json @@ -0,0 +1,24 @@ +{ + "dependencyDashboard": true, + "vulnerabilityAlerts": { + "enabled": true + }, + "extends": [ + ":preserveSemverRanges", + "group:all", + "schedule:weekly" + ], + "lockFileMaintenance": { + "commitMessageAction": "Update", + "enabled": true, + "extends": [ + "group:all", + "schedule:weekly" + ] + }, + "postUpdateOptions": [ + "gomodTidy", + "gomodUpdateImportPaths" + ], + "separateMajorMinor": false +} diff --git a/vendor/github.com/securego/gosec/v2/report.go b/vendor/github.com/securego/gosec/v2/report.go new file mode 100644 index 000000000..96b1466d5 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/report.go @@ -0,0 +1,24 @@ +package gosec + +// ReportInfo this is report information +type ReportInfo struct { + Errors map[string][]Error `json:"Golang errors"` + Issues []*Issue + Stats *Metrics + GosecVersion string +} + +// NewReportInfo instantiate a ReportInfo +func NewReportInfo(issues []*Issue, metrics *Metrics, errors map[string][]Error) *ReportInfo { + return &ReportInfo{ + Errors: errors, + Issues: issues, + Stats: metrics, + } +} + +// WithVersion defines the version of gosec used to generate the report +func (r *ReportInfo) WithVersion(version string) *ReportInfo { + r.GosecVersion = version + return r +} diff --git a/vendor/github.com/securego/gosec/v2/resolve.go b/vendor/github.com/securego/gosec/v2/resolve.go new file mode 100644 index 000000000..cdc287e8e --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/resolve.go @@ -0,0 +1,95 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gosec + +import "go/ast" + +func resolveIdent(n *ast.Ident, c *Context) bool { + if n.Obj == nil || n.Obj.Kind != ast.Var { + return true + } + if node, ok := n.Obj.Decl.(ast.Node); ok { + return TryResolve(node, c) + } + return false +} + +func resolveValueSpec(n *ast.ValueSpec, c *Context) bool { + if len(n.Values) == 0 { + return false + } + for _, value := range n.Values { + if !TryResolve(value, c) { + return false + } + } + return true +} + +func resolveAssign(n *ast.AssignStmt, c *Context) bool { + if len(n.Rhs) == 0 { + return false + } + for _, arg := range n.Rhs { + if !TryResolve(arg, c) { + return false + } + } + return true +} + +func resolveCompLit(n *ast.CompositeLit, c *Context) bool { + if len(n.Elts) == 0 { + return false + } + for _, arg := range n.Elts { + if !TryResolve(arg, c) { + return false + } + } + return true +} + +func resolveBinExpr(n *ast.BinaryExpr, c *Context) bool { + return (TryResolve(n.X, c) && TryResolve(n.Y, c)) +} + +func resolveCallExpr(n *ast.CallExpr, c *Context) bool { + // TODO(tkelsey): next step, full function resolution + return false +} + +// TryResolve will attempt, given a subtree starting at some AST node, to resolve +// all values contained within to a known constant. It is used to check for any +// unknown values in compound expressions. +func TryResolve(n ast.Node, c *Context) bool { + switch node := n.(type) { + case *ast.BasicLit: + return true + case *ast.CompositeLit: + return resolveCompLit(node, c) + case *ast.Ident: + return resolveIdent(node, c) + case *ast.ValueSpec: + return resolveValueSpec(node, c) + case *ast.AssignStmt: + return resolveAssign(node, c) + case *ast.CallExpr: + return resolveCallExpr(node, c) + case *ast.BinaryExpr: + return resolveBinExpr(node, c) + } + return false +} diff --git a/vendor/github.com/securego/gosec/v2/rule.go b/vendor/github.com/securego/gosec/v2/rule.go new file mode 100644 index 000000000..fbba089bb --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rule.go @@ -0,0 +1,59 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gosec + +import ( + "go/ast" + "reflect" +) + +// The Rule interface used by all rules supported by gosec. +type Rule interface { + ID() string + Match(ast.Node, *Context) (*Issue, error) +} + +// RuleBuilder is used to register a rule definition with the analyzer +type RuleBuilder func(id string, c Config) (Rule, []ast.Node) + +// A RuleSet maps lists of rules to the type of AST node they should be run on. +// The analyzer will only invoke rules contained in the list associated with the +// type of AST node it is currently visiting. +type RuleSet map[reflect.Type][]Rule + +// NewRuleSet constructs a new RuleSet +func NewRuleSet() RuleSet { + return make(RuleSet) +} + +// Register adds a trigger for the supplied rule for the the +// specified ast nodes. +func (r RuleSet) Register(rule Rule, nodes ...ast.Node) { + for _, n := range nodes { + t := reflect.TypeOf(n) + if rules, ok := r[t]; ok { + r[t] = append(rules, rule) + } else { + r[t] = []Rule{rule} + } + } +} + +// RegisteredFor will return all rules that are registered for a +// specified ast node. +func (r RuleSet) RegisteredFor(n ast.Node) []Rule { + if rules, found := r[reflect.TypeOf(n)]; found { + return rules + } + return []Rule{} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/archive.go b/vendor/github.com/securego/gosec/v2/rules/archive.go new file mode 100644 index 000000000..92c7e4481 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/archive.go @@ -0,0 +1,65 @@ +package rules + +import ( + "go/ast" + "go/types" + + "github.com/securego/gosec/v2" +) + +type archive struct { + gosec.MetaData + calls gosec.CallList + argTypes []string +} + +func (a *archive) ID() string { + return a.MetaData.ID +} + +// Match inspects AST nodes to determine if the filepath.Joins uses any argument derived from type zip.File or tar.Header +func (a *archive) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + if node := a.calls.ContainsPkgCallExpr(n, c, false); node != nil { + for _, arg := range node.Args { + var argType types.Type + if selector, ok := arg.(*ast.SelectorExpr); ok { + argType = c.Info.TypeOf(selector.X) + } else if ident, ok := arg.(*ast.Ident); ok { + if ident.Obj != nil && ident.Obj.Kind == ast.Var { + decl := ident.Obj.Decl + if assign, ok := decl.(*ast.AssignStmt); ok { + if selector, ok := assign.Rhs[0].(*ast.SelectorExpr); ok { + argType = c.Info.TypeOf(selector.X) + } + } + } + } + + if argType != nil { + for _, t := range a.argTypes { + if argType.String() == t { + return gosec.NewIssue(c, n, a.ID(), a.What, a.Severity, a.Confidence), nil + } + } + } + } + } + return nil, nil +} + +// NewArchive creates a new rule which detects the file traversal when extracting zip/tar archives +func NewArchive(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + calls := gosec.NewCallList() + calls.Add("path/filepath", "Join") + calls.Add("path", "Join") + return &archive{ + calls: calls, + argTypes: []string{"*archive/zip.File", "*archive/tar.Header"}, + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + What: "File traversal when extracting zip/tar archive", + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/bad_defer.go b/vendor/github.com/securego/gosec/v2/rules/bad_defer.go new file mode 100644 index 000000000..13b42070d --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/bad_defer.go @@ -0,0 +1,68 @@ +package rules + +import ( + "fmt" + "go/ast" + "strings" + + "github.com/securego/gosec/v2" +) + +type deferType struct { + typ string + methods []string +} + +type badDefer struct { + gosec.MetaData + types []deferType +} + +func (r *badDefer) ID() string { + return r.MetaData.ID +} + +func normalize(typ string) string { + return strings.TrimPrefix(typ, "*") +} + +func contains(methods []string, method string) bool { + for _, m := range methods { + if m == method { + return true + } + } + return false +} + +func (r *badDefer) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + if deferStmt, ok := n.(*ast.DeferStmt); ok { + for _, deferTyp := range r.types { + if typ, method, err := gosec.GetCallInfo(deferStmt.Call, c); err == nil { + if normalize(typ) == deferTyp.typ && contains(deferTyp.methods, method) { + return gosec.NewIssue(c, n, r.ID(), fmt.Sprintf(r.What, method, typ), r.Severity, r.Confidence), nil + } + } + } + } + + return nil, nil +} + +// NewDeferredClosing detects unsafe defer of error returning methods +func NewDeferredClosing(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return &badDefer{ + types: []deferType{ + { + typ: "os.File", + methods: []string{"Close"}, + }, + }, + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + What: "Deferring unsafe method %q on type %q", + }, + }, []ast.Node{(*ast.DeferStmt)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/bind.go b/vendor/github.com/securego/gosec/v2/rules/bind.go new file mode 100644 index 000000000..8f6af067a --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/bind.go @@ -0,0 +1,83 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + "regexp" + + "github.com/securego/gosec/v2" +) + +// Looks for net.Listen("0.0.0.0") or net.Listen(":8080") +type bindsToAllNetworkInterfaces struct { + gosec.MetaData + calls gosec.CallList + pattern *regexp.Regexp +} + +func (r *bindsToAllNetworkInterfaces) ID() string { + return r.MetaData.ID +} + +func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + callExpr := r.calls.ContainsPkgCallExpr(n, c, false) + if callExpr == nil { + return nil, nil + } + if len(callExpr.Args) > 1 { + arg := callExpr.Args[1] + if bl, ok := arg.(*ast.BasicLit); ok { + if arg, err := gosec.GetString(bl); err == nil { + if r.pattern.MatchString(arg) { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } else if ident, ok := arg.(*ast.Ident); ok { + values := gosec.GetIdentStringValues(ident) + for _, value := range values { + if r.pattern.MatchString(value) { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + } else if len(callExpr.Args) > 0 { + values := gosec.GetCallStringArgsValues(callExpr.Args[0], c) + for _, value := range values { + if r.pattern.MatchString(value) { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + return nil, nil +} + +// NewBindsToAllNetworkInterfaces detects socket connections that are setup to +// listen on all network interfaces. +func NewBindsToAllNetworkInterfaces(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + calls := gosec.NewCallList() + calls.Add("net", "Listen") + calls.Add("crypto/tls", "Listen") + return &bindsToAllNetworkInterfaces{ + calls: calls, + pattern: regexp.MustCompile(`^(0.0.0.0|:).*$`), + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + What: "Binds to all network interfaces", + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/blocklist.go b/vendor/github.com/securego/gosec/v2/rules/blocklist.go new file mode 100644 index 000000000..afd4ee56b --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/blocklist.go @@ -0,0 +1,94 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + "strings" + + "github.com/securego/gosec/v2" +) + +type blocklistedImport struct { + gosec.MetaData + Blocklisted map[string]string +} + +func unquote(original string) string { + copy := strings.TrimSpace(original) + copy = strings.TrimLeft(copy, `"`) + return strings.TrimRight(copy, `"`) +} + +func (r *blocklistedImport) ID() string { + return r.MetaData.ID +} + +func (r *blocklistedImport) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + if node, ok := n.(*ast.ImportSpec); ok { + if description, ok := r.Blocklisted[unquote(node.Path.Value)]; ok { + return gosec.NewIssue(c, node, r.ID(), description, r.Severity, r.Confidence), nil + } + } + return nil, nil +} + +// NewBlocklistedImports reports when a blocklisted import is being used. +// Typically when a deprecated technology is being used. +func NewBlocklistedImports(id string, conf gosec.Config, blocklist map[string]string) (gosec.Rule, []ast.Node) { + return &blocklistedImport{ + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + }, + Blocklisted: blocklist, + }, []ast.Node{(*ast.ImportSpec)(nil)} +} + +// NewBlocklistedImportMD5 fails if MD5 is imported +func NewBlocklistedImportMD5(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return NewBlocklistedImports(id, conf, map[string]string{ + "crypto/md5": "Blocklisted import crypto/md5: weak cryptographic primitive", + }) +} + +// NewBlocklistedImportDES fails if DES is imported +func NewBlocklistedImportDES(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return NewBlocklistedImports(id, conf, map[string]string{ + "crypto/des": "Blocklisted import crypto/des: weak cryptographic primitive", + }) +} + +// NewBlocklistedImportRC4 fails if DES is imported +func NewBlocklistedImportRC4(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return NewBlocklistedImports(id, conf, map[string]string{ + "crypto/rc4": "Blocklisted import crypto/rc4: weak cryptographic primitive", + }) +} + +// NewBlocklistedImportCGI fails if CGI is imported +func NewBlocklistedImportCGI(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return NewBlocklistedImports(id, conf, map[string]string{ + "net/http/cgi": "Blocklisted import net/http/cgi: Go versions < 1.6.3 are vulnerable to Httpoxy attack: (CVE-2016-5386)", + }) +} + +// NewBlocklistedImportSHA1 fails if SHA1 is imported +func NewBlocklistedImportSHA1(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return NewBlocklistedImports(id, conf, map[string]string{ + "crypto/sha1": "Blocklisted import crypto/sha1: weak cryptographic primitive", + }) +} diff --git a/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go b/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go new file mode 100644 index 000000000..02256faa9 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go @@ -0,0 +1,110 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "fmt" + "go/ast" + + "github.com/securego/gosec/v2" +) + +type decompressionBombCheck struct { + gosec.MetaData + readerCalls gosec.CallList + copyCalls gosec.CallList +} + +func (d *decompressionBombCheck) ID() string { + return d.MetaData.ID +} + +func containsReaderCall(node ast.Node, ctx *gosec.Context, list gosec.CallList) bool { + if list.ContainsPkgCallExpr(node, ctx, false) != nil { + return true + } + // Resolve type info of ident (for *archive/zip.File.Open) + s, idt, _ := gosec.GetCallInfo(node, ctx) + return list.Contains(s, idt) +} + +func (d *decompressionBombCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { + var readerVarObj map[*ast.Object]struct{} + + // To check multiple lines, ctx.PassedValues is used to store temporary data. + if _, ok := ctx.PassedValues[d.ID()]; !ok { + readerVarObj = make(map[*ast.Object]struct{}) + ctx.PassedValues[d.ID()] = readerVarObj + } else if pv, ok := ctx.PassedValues[d.ID()].(map[*ast.Object]struct{}); ok { + readerVarObj = pv + } else { + return nil, fmt.Errorf("PassedValues[%s] of Context is not map[*ast.Object]struct{}, but %T", d.ID(), ctx.PassedValues[d.ID()]) + } + + // io.Copy is a common function. + // To reduce false positives, This rule detects code which is used for compressed data only. + switch n := node.(type) { + case *ast.AssignStmt: + for _, expr := range n.Rhs { + if callExpr, ok := expr.(*ast.CallExpr); ok && containsReaderCall(callExpr, ctx, d.readerCalls) { + if idt, ok := n.Lhs[0].(*ast.Ident); ok && idt.Name != "_" { + // Example: + // r, _ := zlib.NewReader(buf) + // Add r's Obj to readerVarObj map + readerVarObj[idt.Obj] = struct{}{} + } + } + } + case *ast.CallExpr: + if d.copyCalls.ContainsPkgCallExpr(n, ctx, false) != nil { + if idt, ok := n.Args[1].(*ast.Ident); ok { + if _, ok := readerVarObj[idt.Obj]; ok { + // Detect io.Copy(x, r) + return gosec.NewIssue(ctx, n, d.ID(), d.What, d.Severity, d.Confidence), nil + } + } + } + } + + return nil, nil +} + +// NewDecompressionBombCheck detects if there is potential DoS vulnerability via decompression bomb +func NewDecompressionBombCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + readerCalls := gosec.NewCallList() + readerCalls.Add("compress/gzip", "NewReader") + readerCalls.AddAll("compress/zlib", "NewReader", "NewReaderDict") + readerCalls.Add("compress/bzip2", "NewReader") + readerCalls.AddAll("compress/flate", "NewReader", "NewReaderDict") + readerCalls.Add("compress/lzw", "NewReader") + readerCalls.Add("archive/tar", "NewReader") + readerCalls.Add("archive/zip", "NewReader") + readerCalls.Add("*archive/zip.File", "Open") + + copyCalls := gosec.NewCallList() + copyCalls.Add("io", "Copy") + copyCalls.Add("io", "CopyBuffer") + + return &decompressionBombCheck{ + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.Medium, + What: "Potential DoS vulnerability via decompression bomb", + }, + readerCalls: readerCalls, + copyCalls: copyCalls, + }, []ast.Node{(*ast.FuncDecl)(nil), (*ast.AssignStmt)(nil), (*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/errors.go b/vendor/github.com/securego/gosec/v2/rules/errors.go new file mode 100644 index 000000000..7a34bc634 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/errors.go @@ -0,0 +1,120 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + "go/types" + + "github.com/securego/gosec/v2" +) + +type noErrorCheck struct { + gosec.MetaData + whitelist gosec.CallList +} + +func (r *noErrorCheck) ID() string { + return r.MetaData.ID +} + +func returnsError(callExpr *ast.CallExpr, ctx *gosec.Context) int { + if tv := ctx.Info.TypeOf(callExpr); tv != nil { + switch t := tv.(type) { + case *types.Tuple: + for pos := 0; pos < t.Len(); pos++ { + variable := t.At(pos) + if variable != nil && variable.Type().String() == "error" { + return pos + } + } + case *types.Named: + if t.String() == "error" { + return 0 + } + } + } + return -1 +} + +func (r *noErrorCheck) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { + switch stmt := n.(type) { + case *ast.AssignStmt: + cfg := ctx.Config + if enabled, err := cfg.IsGlobalEnabled(gosec.Audit); err == nil && enabled { + for _, expr := range stmt.Rhs { + if callExpr, ok := expr.(*ast.CallExpr); ok && r.whitelist.ContainsCallExpr(expr, ctx) == nil { + pos := returnsError(callExpr, ctx) + if pos < 0 || pos >= len(stmt.Lhs) { + return nil, nil + } + if id, ok := stmt.Lhs[pos].(*ast.Ident); ok && id.Name == "_" { + return gosec.NewIssue(ctx, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + } + case *ast.ExprStmt: + if callExpr, ok := stmt.X.(*ast.CallExpr); ok && r.whitelist.ContainsCallExpr(stmt.X, ctx) == nil { + pos := returnsError(callExpr, ctx) + if pos >= 0 { + return gosec.NewIssue(ctx, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + return nil, nil +} + +// NewNoErrorCheck detects if the returned error is unchecked +func NewNoErrorCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + // TODO(gm) Come up with sensible defaults here. Or flip it to use a + // black list instead. + whitelist := gosec.NewCallList() + whitelist.AddAll("bytes.Buffer", "Write", "WriteByte", "WriteRune", "WriteString") + whitelist.AddAll("fmt", "Print", "Printf", "Println", "Fprint", "Fprintf", "Fprintln") + whitelist.AddAll("strings.Builder", "Write", "WriteByte", "WriteRune", "WriteString") + whitelist.Add("io.PipeWriter", "CloseWithError") + whitelist.Add("hash.Hash", "Write") + + if configured, ok := conf["G104"]; ok { + if whitelisted, ok := configured.(map[string]interface{}); ok { + for pkg, funcs := range whitelisted { + if funcs, ok := funcs.([]interface{}); ok { + whitelist.AddAll(pkg, toStringSlice(funcs)...) + } + } + } + } + + return &noErrorCheck{ + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Low, + Confidence: gosec.High, + What: "Errors unhandled.", + }, + whitelist: whitelist, + }, []ast.Node{(*ast.AssignStmt)(nil), (*ast.ExprStmt)(nil)} +} + +func toStringSlice(values []interface{}) []string { + result := []string{} + for _, value := range values { + if value, ok := value.(string); ok { + result = append(result, value) + } + } + return result +} diff --git a/vendor/github.com/securego/gosec/v2/rules/fileperms.go b/vendor/github.com/securego/gosec/v2/rules/fileperms.go new file mode 100644 index 000000000..e6a80a5fb --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/fileperms.go @@ -0,0 +1,113 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "fmt" + "go/ast" + "strconv" + + "github.com/securego/gosec/v2" +) + +type filePermissions struct { + gosec.MetaData + mode int64 + pkgs []string + calls []string +} + +func (r *filePermissions) ID() string { + return r.MetaData.ID +} + +func getConfiguredMode(conf map[string]interface{}, configKey string, defaultMode int64) int64 { + mode := defaultMode + if value, ok := conf[configKey]; ok { + switch value := value.(type) { + case int64: + mode = value + case string: + if m, e := strconv.ParseInt(value, 0, 64); e != nil { + mode = defaultMode + } else { + mode = m + } + } + } + return mode +} + +func (r *filePermissions) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + for _, pkg := range r.pkgs { + if callexpr, matched := gosec.MatchCallByPackage(n, c, pkg, r.calls...); matched { + modeArg := callexpr.Args[len(callexpr.Args)-1] + if mode, err := gosec.GetInt(modeArg); err == nil && mode > r.mode { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + return nil, nil +} + +// NewWritePerms creates a rule to detect file Writes with bad permissions. +func NewWritePerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + mode := getConfiguredMode(conf, "G306", 0600) + return &filePermissions{ + mode: mode, + pkgs: []string{"io/ioutil", "os"}, + calls: []string{"WriteFile"}, + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + What: fmt.Sprintf("Expect WriteFile permissions to be %#o or less", mode), + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} + +// NewFilePerms creates a rule to detect file creation with a more permissive than configured +// permission mask. +func NewFilePerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + mode := getConfiguredMode(conf, "G302", 0600) + return &filePermissions{ + mode: mode, + pkgs: []string{"os"}, + calls: []string{"OpenFile", "Chmod"}, + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + What: fmt.Sprintf("Expect file permissions to be %#o or less", mode), + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} + +// NewMkdirPerms creates a rule to detect directory creation with more permissive than +// configured permission mask. +func NewMkdirPerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + mode := getConfiguredMode(conf, "G301", 0750) + return &filePermissions{ + mode: mode, + pkgs: []string{"os"}, + calls: []string{"Mkdir", "MkdirAll"}, + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + What: fmt.Sprintf("Expect directory permissions to be %#o or less", mode), + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go b/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go new file mode 100644 index 000000000..acdd583e4 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go @@ -0,0 +1,173 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + "go/token" + "regexp" + "strconv" + + zxcvbn "github.com/nbutton23/zxcvbn-go" + "github.com/securego/gosec/v2" +) + +type credentials struct { + gosec.MetaData + pattern *regexp.Regexp + entropyThreshold float64 + perCharThreshold float64 + truncate int + ignoreEntropy bool +} + +func (r *credentials) ID() string { + return r.MetaData.ID +} + +func truncate(s string, n int) string { + if n > len(s) { + return s + } + return s[:n] +} + +func (r *credentials) isHighEntropyString(str string) bool { + s := truncate(str, r.truncate) + info := zxcvbn.PasswordStrength(s, []string{}) + entropyPerChar := info.Entropy / float64(len(s)) + return (info.Entropy >= r.entropyThreshold || + (info.Entropy >= (r.entropyThreshold/2) && + entropyPerChar >= r.perCharThreshold)) +} + +func (r *credentials) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { + switch node := n.(type) { + case *ast.AssignStmt: + return r.matchAssign(node, ctx) + case *ast.ValueSpec: + return r.matchValueSpec(node, ctx) + case *ast.BinaryExpr: + return r.matchEqualityCheck(node, ctx) + } + return nil, nil +} + +func (r *credentials) matchAssign(assign *ast.AssignStmt, ctx *gosec.Context) (*gosec.Issue, error) { + for _, i := range assign.Lhs { + if ident, ok := i.(*ast.Ident); ok { + if r.pattern.MatchString(ident.Name) { + for _, e := range assign.Rhs { + if val, err := gosec.GetString(e); err == nil { + if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) { + return gosec.NewIssue(ctx, assign, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + } + } + } + return nil, nil +} + +func (r *credentials) matchValueSpec(valueSpec *ast.ValueSpec, ctx *gosec.Context) (*gosec.Issue, error) { + for index, ident := range valueSpec.Names { + if r.pattern.MatchString(ident.Name) && valueSpec.Values != nil { + // const foo, bar = "same value" + if len(valueSpec.Values) <= index { + index = len(valueSpec.Values) - 1 + } + if val, err := gosec.GetString(valueSpec.Values[index]); err == nil { + if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) { + return gosec.NewIssue(ctx, valueSpec, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + } + return nil, nil +} + +func (r *credentials) matchEqualityCheck(binaryExpr *ast.BinaryExpr, ctx *gosec.Context) (*gosec.Issue, error) { + if binaryExpr.Op == token.EQL || binaryExpr.Op == token.NEQ { + if ident, ok := binaryExpr.X.(*ast.Ident); ok { + if r.pattern.MatchString(ident.Name) { + if val, err := gosec.GetString(binaryExpr.Y); err == nil { + if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) { + return gosec.NewIssue(ctx, binaryExpr, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + } + } + return nil, nil +} + +// NewHardcodedCredentials attempts to find high entropy string constants being +// assigned to variables that appear to be related to credentials. +func NewHardcodedCredentials(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + pattern := `(?i)passwd|pass|password|pwd|secret|token` + entropyThreshold := 80.0 + perCharThreshold := 3.0 + ignoreEntropy := false + truncateString := 16 + if val, ok := conf["G101"]; ok { + conf := val.(map[string]interface{}) + if configPattern, ok := conf["pattern"]; ok { + if cfgPattern, ok := configPattern.(string); ok { + pattern = cfgPattern + } + } + if configIgnoreEntropy, ok := conf["ignore_entropy"]; ok { + if cfgIgnoreEntropy, ok := configIgnoreEntropy.(bool); ok { + ignoreEntropy = cfgIgnoreEntropy + } + } + if configEntropyThreshold, ok := conf["entropy_threshold"]; ok { + if cfgEntropyThreshold, ok := configEntropyThreshold.(string); ok { + if parsedNum, err := strconv.ParseFloat(cfgEntropyThreshold, 64); err == nil { + entropyThreshold = parsedNum + } + } + } + if configCharThreshold, ok := conf["per_char_threshold"]; ok { + if cfgCharThreshold, ok := configCharThreshold.(string); ok { + if parsedNum, err := strconv.ParseFloat(cfgCharThreshold, 64); err == nil { + perCharThreshold = parsedNum + } + } + } + if configTruncate, ok := conf["truncate"]; ok { + if cfgTruncate, ok := configTruncate.(string); ok { + if parsedInt, err := strconv.Atoi(cfgTruncate); err == nil { + truncateString = parsedInt + } + } + } + } + + return &credentials{ + pattern: regexp.MustCompile(pattern), + entropyThreshold: entropyThreshold, + perCharThreshold: perCharThreshold, + ignoreEntropy: ignoreEntropy, + truncate: truncateString, + MetaData: gosec.MetaData{ + ID: id, + What: "Potential hardcoded credentials", + Confidence: gosec.Low, + Severity: gosec.High, + }, + }, []ast.Node{(*ast.AssignStmt)(nil), (*ast.ValueSpec)(nil), (*ast.BinaryExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go new file mode 100644 index 000000000..b2668dec8 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go @@ -0,0 +1,119 @@ +package rules + +import ( + "go/ast" + "go/token" + + "github.com/securego/gosec/v2" +) + +type implicitAliasing struct { + gosec.MetaData + aliases map[*ast.Object]struct{} + rightBrace token.Pos + acceptableAlias []*ast.UnaryExpr +} + +func (r *implicitAliasing) ID() string { + return r.MetaData.ID +} + +func containsUnary(exprs []*ast.UnaryExpr, expr *ast.UnaryExpr) bool { + for _, e := range exprs { + if e == expr { + return true + } + } + return false +} + +func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + switch node := n.(type) { + case *ast.RangeStmt: + // When presented with a range statement, get the underlying Object bound to + // by assignment and add it to our set (r.aliases) of objects to check for. + if key, ok := node.Value.(*ast.Ident); ok { + if key.Obj != nil { + if assignment, ok := key.Obj.Decl.(*ast.AssignStmt); ok { + if len(assignment.Lhs) < 2 { + return nil, nil + } + + if object, ok := assignment.Lhs[1].(*ast.Ident); ok { + r.aliases[object.Obj] = struct{}{} + + if r.rightBrace < node.Body.Rbrace { + r.rightBrace = node.Body.Rbrace + } + } + } + } + } + + case *ast.UnaryExpr: + // If this unary expression is outside of the last range statement we were looking at + // then clear the list of objects we're concerned about because they're no longer in + // scope + if node.Pos() > r.rightBrace { + r.aliases = make(map[*ast.Object]struct{}) + r.acceptableAlias = make([]*ast.UnaryExpr, 0) + } + + // Short circuit logic to skip checking aliases if we have nothing to check against. + if len(r.aliases) == 0 { + return nil, nil + } + + // If this unary is at the top level of a return statement then it is okay-- + // see *ast.ReturnStmt comment below. + if containsUnary(r.acceptableAlias, node) { + return nil, nil + } + + // If we find a unary op of & (reference) of an object within r.aliases, complain. + if ident, ok := node.X.(*ast.Ident); ok && node.Op.String() == "&" { + if _, contains := r.aliases[ident.Obj]; contains { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + case *ast.ReturnStmt: + // Returning a rangeStmt yielded value is acceptable since only one value will be returned + for _, item := range node.Results { + if unary, ok := item.(*ast.UnaryExpr); ok && unary.Op.String() == "&" { + r.acceptableAlias = append(r.acceptableAlias, unary) + } + } + } + + return nil, nil +} + +// NewImplicitAliasing detects implicit memory aliasing of type: for blah := SomeCall() {... SomeOtherCall(&blah) ...} +func NewImplicitAliasing(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return &implicitAliasing{ + aliases: make(map[*ast.Object]struct{}), + rightBrace: token.NoPos, + acceptableAlias: make([]*ast.UnaryExpr, 0), + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.Medium, + What: "Implicit memory aliasing in for loop.", + }, + }, []ast.Node{(*ast.RangeStmt)(nil), (*ast.UnaryExpr)(nil), (*ast.ReturnStmt)(nil)} +} + +/* +This rule is prone to flag false positives. + +Within GoSec, the rule is just an AST match-- there are a handful of other +implementation strategies which might lend more nuance to the rule at the +cost of allowing false negatives. + +From a tooling side, I'd rather have this rule flag false positives than +potentially have some false negatives-- especially if the sentiment of this +rule (as I understand it, and Go) is that referencing a rangeStmt-yielded +value is kinda strange and does not have a strongly justified use case. + +Which is to say-- a false positive _should_ just be changed. +*/ diff --git a/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go b/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go new file mode 100644 index 000000000..dfcda94a8 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go @@ -0,0 +1,89 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "fmt" + "go/ast" + + "github.com/securego/gosec/v2" +) + +type integerOverflowCheck struct { + gosec.MetaData + calls gosec.CallList +} + +func (i *integerOverflowCheck) ID() string { + return i.MetaData.ID +} + +func (i *integerOverflowCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { + var atoiVarObj map[*ast.Object]ast.Node + + // To check multiple lines, ctx.PassedValues is used to store temporary data. + if _, ok := ctx.PassedValues[i.ID()]; !ok { + atoiVarObj = make(map[*ast.Object]ast.Node) + ctx.PassedValues[i.ID()] = atoiVarObj + } else if pv, ok := ctx.PassedValues[i.ID()].(map[*ast.Object]ast.Node); ok { + atoiVarObj = pv + } else { + return nil, fmt.Errorf("PassedValues[%s] of Context is not map[*ast.Object]ast.Node, but %T", i.ID(), ctx.PassedValues[i.ID()]) + } + + // strconv.Atoi is a common function. + // To reduce false positives, This rule detects code which is converted to int32/int16 only. + switch n := node.(type) { + case *ast.AssignStmt: + for _, expr := range n.Rhs { + if callExpr, ok := expr.(*ast.CallExpr); ok && i.calls.ContainsPkgCallExpr(callExpr, ctx, false) != nil { + if idt, ok := n.Lhs[0].(*ast.Ident); ok && idt.Name != "_" { + // Example: + // v, _ := strconv.Atoi("1111") + // Add v's Obj to atoiVarObj map + atoiVarObj[idt.Obj] = n + } + } + } + case *ast.CallExpr: + if fun, ok := n.Fun.(*ast.Ident); ok { + if fun.Name == "int32" || fun.Name == "int16" { + if idt, ok := n.Args[0].(*ast.Ident); ok { + if n, ok := atoiVarObj[idt.Obj]; ok { + // Detect int32(v) and int16(v) + return gosec.NewIssue(ctx, n, i.ID(), i.What, i.Severity, i.Confidence), nil + } + } + } + } + } + + return nil, nil +} + +// NewIntegerOverflowCheck detects if there is potential Integer OverFlow +func NewIntegerOverflowCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + calls := gosec.NewCallList() + calls.Add("strconv", "Atoi") + return &integerOverflowCheck{ + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.High, + Confidence: gosec.Medium, + What: "Potential Integer overflow made by strconv.Atoi result conversion to int16/32", + }, + calls: calls, + }, []ast.Node{(*ast.FuncDecl)(nil), (*ast.AssignStmt)(nil), (*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/pprof.go b/vendor/github.com/securego/gosec/v2/rules/pprof.go new file mode 100644 index 000000000..4c99af752 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/pprof.go @@ -0,0 +1,42 @@ +package rules + +import ( + "go/ast" + + "github.com/securego/gosec/v2" +) + +type pprofCheck struct { + gosec.MetaData + importPath string + importName string +} + +// ID returns the ID of the check +func (p *pprofCheck) ID() string { + return p.MetaData.ID +} + +// Match checks for pprof imports +func (p *pprofCheck) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + if node, ok := n.(*ast.ImportSpec); ok { + if p.importPath == unquote(node.Path.Value) && node.Name != nil && p.importName == node.Name.Name { + return gosec.NewIssue(c, node, p.ID(), p.What, p.Severity, p.Confidence), nil + } + } + return nil, nil +} + +// NewPprofCheck detects when the profiling endpoint is automatically exposed +func NewPprofCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return &pprofCheck{ + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.High, + Confidence: gosec.High, + What: "Profiling endpoint is automatically exposed on /debug/pprof", + }, + importPath: "net/http/pprof", + importName: "_", + }, []ast.Node{(*ast.ImportSpec)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/rand.go b/vendor/github.com/securego/gosec/v2/rules/rand.go new file mode 100644 index 000000000..055adce4d --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/rand.go @@ -0,0 +1,58 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + + "github.com/securego/gosec/v2" +) + +type weakRand struct { + gosec.MetaData + funcNames []string + packagePath string +} + +func (w *weakRand) ID() string { + return w.MetaData.ID +} + +func (w *weakRand) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + for _, funcName := range w.funcNames { + if _, matched := gosec.MatchCallByPackage(n, c, w.packagePath, funcName); matched { + return gosec.NewIssue(c, n, w.ID(), w.What, w.Severity, w.Confidence), nil + } + } + + return nil, nil +} + +// NewWeakRandCheck detects the use of random number generator that isn't cryptographically secure +func NewWeakRandCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return &weakRand{ + funcNames: []string{ + "New", "Read", "Float32", "Float64", "Int", "Int31", + "Int31n", "Int63", "Int63n", "Intn", "NormalFloat64", "Uint32", "Uint64", + }, + packagePath: "math/rand", + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.High, + Confidence: gosec.Medium, + What: "Use of weak random number generator (math/rand instead of crypto/rand)", + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/readfile.go b/vendor/github.com/securego/gosec/v2/rules/readfile.go new file mode 100644 index 000000000..072b016e2 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/readfile.go @@ -0,0 +1,128 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + "go/types" + + "github.com/securego/gosec/v2" +) + +type readfile struct { + gosec.MetaData + gosec.CallList + pathJoin gosec.CallList + clean gosec.CallList +} + +// ID returns the identifier for this rule +func (r *readfile) ID() string { + return r.MetaData.ID +} + +// isJoinFunc checks if there is a filepath.Join or other join function +func (r *readfile) isJoinFunc(n ast.Node, c *gosec.Context) bool { + if call := r.pathJoin.ContainsPkgCallExpr(n, c, false); call != nil { + for _, arg := range call.Args { + // edge case: check if one of the args is a BinaryExpr + if binExp, ok := arg.(*ast.BinaryExpr); ok { + // iterate and resolve all found identities from the BinaryExpr + if _, ok := gosec.FindVarIdentities(binExp, c); ok { + return true + } + } + + // try and resolve identity + if ident, ok := arg.(*ast.Ident); ok { + obj := c.Info.ObjectOf(ident) + if _, ok := obj.(*types.Var); ok && !gosec.TryResolve(ident, c) { + return true + } + } + } + } + return false +} + +// isFilepathClean checks if there is a filepath.Clean before assigning to a variable +func (r *readfile) isFilepathClean(n *ast.Ident, c *gosec.Context) bool { + if n.Obj.Kind != ast.Var { + return false + } + if node, ok := n.Obj.Decl.(*ast.AssignStmt); ok { + if call, ok := node.Rhs[0].(*ast.CallExpr); ok { + if clean := r.clean.ContainsPkgCallExpr(call, c, false); clean != nil { + return true + } + } + } + return false +} + +// Match inspects AST nodes to determine if the match the methods `os.Open` or `ioutil.ReadFile` +func (r *readfile) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + if node := r.ContainsPkgCallExpr(n, c, false); node != nil { + for _, arg := range node.Args { + // handles path joining functions in Arg + // eg. os.Open(filepath.Join("/tmp/", file)) + if callExpr, ok := arg.(*ast.CallExpr); ok { + if r.isJoinFunc(callExpr, c) { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + // handles binary string concatenation eg. ioutil.Readfile("/tmp/" + file + "/blob") + if binExp, ok := arg.(*ast.BinaryExpr); ok { + // resolve all found identities from the BinaryExpr + if _, ok := gosec.FindVarIdentities(binExp, c); ok { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + + if ident, ok := arg.(*ast.Ident); ok { + obj := c.Info.ObjectOf(ident) + if _, ok := obj.(*types.Var); ok && + !gosec.TryResolve(ident, c) && + !r.isFilepathClean(ident, c) { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + } + return nil, nil +} + +// NewReadFile detects cases where we read files +func NewReadFile(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + rule := &readfile{ + pathJoin: gosec.NewCallList(), + clean: gosec.NewCallList(), + CallList: gosec.NewCallList(), + MetaData: gosec.MetaData{ + ID: id, + What: "Potential file inclusion via variable", + Severity: gosec.Medium, + Confidence: gosec.High, + }, + } + rule.pathJoin.Add("path/filepath", "Join") + rule.pathJoin.Add("path", "Join") + rule.clean.Add("path/filepath", "Clean") + rule.clean.Add("path/filepath", "Rel") + rule.Add("io/ioutil", "ReadFile") + rule.Add("os", "Open") + rule.Add("os", "OpenFile") + return rule, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/rsa.go b/vendor/github.com/securego/gosec/v2/rules/rsa.go new file mode 100644 index 000000000..f2ed5db53 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/rsa.go @@ -0,0 +1,58 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "fmt" + "go/ast" + + "github.com/securego/gosec/v2" +) + +type weakKeyStrength struct { + gosec.MetaData + calls gosec.CallList + bits int +} + +func (w *weakKeyStrength) ID() string { + return w.MetaData.ID +} + +func (w *weakKeyStrength) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + if callExpr := w.calls.ContainsPkgCallExpr(n, c, false); callExpr != nil { + if bits, err := gosec.GetInt(callExpr.Args[1]); err == nil && bits < (int64)(w.bits) { + return gosec.NewIssue(c, n, w.ID(), w.What, w.Severity, w.Confidence), nil + } + } + return nil, nil +} + +// NewWeakKeyStrength builds a rule that detects RSA keys < 2048 bits +func NewWeakKeyStrength(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + calls := gosec.NewCallList() + calls.Add("crypto/rsa", "GenerateKey") + bits := 2048 + return &weakKeyStrength{ + calls: calls, + bits: bits, + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + What: fmt.Sprintf("RSA keys should be at least %d bits", bits), + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/rulelist.go b/vendor/github.com/securego/gosec/v2/rules/rulelist.go new file mode 100644 index 000000000..a3d9ca2f6 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/rulelist.go @@ -0,0 +1,116 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import "github.com/securego/gosec/v2" + +// RuleDefinition contains the description of a rule and a mechanism to +// create it. +type RuleDefinition struct { + ID string + Description string + Create gosec.RuleBuilder +} + +// RuleList is a mapping of rule ID's to rule definitions +type RuleList map[string]RuleDefinition + +// Builders returns all the create methods for a given rule list +func (rl RuleList) Builders() map[string]gosec.RuleBuilder { + builders := make(map[string]gosec.RuleBuilder) + for _, def := range rl { + builders[def.ID] = def.Create + } + return builders +} + +// RuleFilter can be used to include or exclude a rule depending on the return +// value of the function +type RuleFilter func(string) bool + +// NewRuleFilter is a closure that will include/exclude the rule ID's based on +// the supplied boolean value. +func NewRuleFilter(action bool, ruleIDs ...string) RuleFilter { + rulelist := make(map[string]bool) + for _, rule := range ruleIDs { + rulelist[rule] = true + } + return func(rule string) bool { + if _, found := rulelist[rule]; found { + return action + } + return !action + } +} + +// Generate the list of rules to use +func Generate(filters ...RuleFilter) RuleList { + rules := []RuleDefinition{ + // misc + {"G101", "Look for hardcoded credentials", NewHardcodedCredentials}, + {"G102", "Bind to all interfaces", NewBindsToAllNetworkInterfaces}, + {"G103", "Audit the use of unsafe block", NewUsingUnsafe}, + {"G104", "Audit errors not checked", NewNoErrorCheck}, + {"G106", "Audit the use of ssh.InsecureIgnoreHostKey function", NewSSHHostKey}, + {"G107", "Url provided to HTTP request as taint input", NewSSRFCheck}, + {"G108", "Profiling endpoint is automatically exposed", NewPprofCheck}, + {"G109", "Converting strconv.Atoi result to int32/int16", NewIntegerOverflowCheck}, + {"G110", "Detect io.Copy instead of io.CopyN when decompression", NewDecompressionBombCheck}, + + // injection + {"G201", "SQL query construction using format string", NewSQLStrFormat}, + {"G202", "SQL query construction using string concatenation", NewSQLStrConcat}, + {"G203", "Use of unescaped data in HTML templates", NewTemplateCheck}, + {"G204", "Audit use of command execution", NewSubproc}, + + // filesystem + {"G301", "Poor file permissions used when creating a directory", NewMkdirPerms}, + {"G302", "Poor file permissions used when creation file or using chmod", NewFilePerms}, + {"G303", "Creating tempfile using a predictable path", NewBadTempFile}, + {"G304", "File path provided as taint input", NewReadFile}, + {"G305", "File path traversal when extracting zip archive", NewArchive}, + {"G306", "Poor file permissions used when writing to a file", NewWritePerms}, + {"G307", "Unsafe defer call of a method returning an error", NewDeferredClosing}, + + // crypto + {"G401", "Detect the usage of DES, RC4, MD5 or SHA1", NewUsesWeakCryptography}, + {"G402", "Look for bad TLS connection settings", NewIntermediateTLSCheck}, + {"G403", "Ensure minimum RSA key length of 2048 bits", NewWeakKeyStrength}, + {"G404", "Insecure random number source (rand)", NewWeakRandCheck}, + + // blocklist + {"G501", "Import blocklist: crypto/md5", NewBlocklistedImportMD5}, + {"G502", "Import blocklist: crypto/des", NewBlocklistedImportDES}, + {"G503", "Import blocklist: crypto/rc4", NewBlocklistedImportRC4}, + {"G504", "Import blocklist: net/http/cgi", NewBlocklistedImportCGI}, + {"G505", "Import blocklist: crypto/sha1", NewBlocklistedImportSHA1}, + + // memory safety + {"G601", "Implicit memory aliasing in RangeStmt", NewImplicitAliasing}, + } + + ruleMap := make(map[string]RuleDefinition) + +RULES: + for _, rule := range rules { + for _, filter := range filters { + if filter(rule.ID) { + continue RULES + } + } + ruleMap[rule.ID] = rule + } + return ruleMap +} diff --git a/vendor/github.com/securego/gosec/v2/rules/sql.go b/vendor/github.com/securego/gosec/v2/rules/sql.go new file mode 100644 index 000000000..8a5b63861 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/sql.go @@ -0,0 +1,303 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + "regexp" + "strings" + + "github.com/securego/gosec/v2" +) + +type sqlStatement struct { + gosec.MetaData + gosec.CallList + + // Contains a list of patterns which must all match for the rule to match. + patterns []*regexp.Regexp +} + +func (s *sqlStatement) ID() string { + return s.MetaData.ID +} + +// See if the string matches the patterns for the statement. +func (s *sqlStatement) MatchPatterns(str string) bool { + for _, pattern := range s.patterns { + if !pattern.MatchString(str) { + return false + } + } + return true +} + +type sqlStrConcat struct { + sqlStatement +} + +func (s *sqlStrConcat) ID() string { + return s.MetaData.ID +} + +// see if we can figure out what it is +func (s *sqlStrConcat) checkObject(n *ast.Ident, c *gosec.Context) bool { + if n.Obj != nil { + return n.Obj.Kind != ast.Var && n.Obj.Kind != ast.Fun + } + + // Try to resolve unresolved identifiers using other files in same package + for _, file := range c.PkgFiles { + if node, ok := file.Scope.Objects[n.String()]; ok { + return node.Kind != ast.Var && node.Kind != ast.Fun + } + } + return false +} + +// checkQuery verifies if the query parameters is a string concatenation +func (s *sqlStrConcat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gosec.Issue, error) { + _, fnName, err := gosec.GetCallInfo(call, ctx) + if err != nil { + return nil, err + } + var query ast.Node + if strings.HasSuffix(fnName, "Context") { + query = call.Args[1] + } else { + query = call.Args[0] + } + + if be, ok := query.(*ast.BinaryExpr); ok { + operands := gosec.GetBinaryExprOperands(be) + if start, ok := operands[0].(*ast.BasicLit); ok { + if str, e := gosec.GetString(start); e == nil { + if !s.MatchPatterns(str) { + return nil, nil + } + } + for _, op := range operands[1:] { + if _, ok := op.(*ast.BasicLit); ok { + continue + } + if op, ok := op.(*ast.Ident); ok && s.checkObject(op, ctx) { + continue + } + return gosec.NewIssue(ctx, be, s.ID(), s.What, s.Severity, s.Confidence), nil + } + } + } + + return nil, nil +} + +// Checks SQL query concatenation issues such as "SELECT * FROM table WHERE " + " ' OR 1=1" +func (s *sqlStrConcat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { + switch stmt := n.(type) { + case *ast.AssignStmt: + for _, expr := range stmt.Rhs { + if sqlQueryCall, ok := expr.(*ast.CallExpr); ok && s.ContainsCallExpr(expr, ctx) != nil { + return s.checkQuery(sqlQueryCall, ctx) + } + } + case *ast.ExprStmt: + if sqlQueryCall, ok := stmt.X.(*ast.CallExpr); ok && s.ContainsCallExpr(stmt.X, ctx) != nil { + return s.checkQuery(sqlQueryCall, ctx) + } + } + return nil, nil +} + +// NewSQLStrConcat looks for cases where we are building SQL strings via concatenation +func NewSQLStrConcat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + rule := &sqlStrConcat{ + sqlStatement: sqlStatement{ + patterns: []*regexp.Regexp{ + regexp.MustCompile(`(?i)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) `), + }, + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + What: "SQL string concatenation", + }, + CallList: gosec.NewCallList(), + }, + } + + rule.AddAll("*database/sql.DB", "Query", "QueryContext", "QueryRow", "QueryRowContext") + rule.AddAll("*database/sql.Tx", "Query", "QueryContext", "QueryRow", "QueryRowContext") + return rule, []ast.Node{(*ast.AssignStmt)(nil), (*ast.ExprStmt)(nil)} +} + +type sqlStrFormat struct { + gosec.CallList + sqlStatement + fmtCalls gosec.CallList + noIssue gosec.CallList + noIssueQuoted gosec.CallList +} + +// see if we can figure out what it is +func (s *sqlStrFormat) constObject(e ast.Expr, c *gosec.Context) bool { + n, ok := e.(*ast.Ident) + if !ok { + return false + } + + if n.Obj != nil { + return n.Obj.Kind == ast.Con + } + + // Try to resolve unresolved identifiers using other files in same package + for _, file := range c.PkgFiles { + if node, ok := file.Scope.Objects[n.String()]; ok { + return node.Kind == ast.Con + } + } + return false +} + +func (s *sqlStrFormat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gosec.Issue, error) { + _, fnName, err := gosec.GetCallInfo(call, ctx) + if err != nil { + return nil, err + } + var query ast.Node + if strings.HasSuffix(fnName, "Context") { + query = call.Args[1] + } else { + query = call.Args[0] + } + + if ident, ok := query.(*ast.Ident); ok && ident.Obj != nil { + decl := ident.Obj.Decl + if assign, ok := decl.(*ast.AssignStmt); ok { + for _, expr := range assign.Rhs { + issue := s.checkFormatting(expr, ctx) + if issue != nil { + return issue, err + } + } + } + } + + return nil, nil +} + +func (s *sqlStrFormat) checkFormatting(n ast.Node, ctx *gosec.Context) *gosec.Issue { + // argIndex changes the function argument which gets matched to the regex + argIndex := 0 + if node := s.fmtCalls.ContainsPkgCallExpr(n, ctx, false); node != nil { + // if the function is fmt.Fprintf, search for SQL statement in Args[1] instead + if sel, ok := node.Fun.(*ast.SelectorExpr); ok { + if sel.Sel.Name == "Fprintf" { + // if os.Stderr or os.Stdout is in Arg[0], mark as no issue + if arg, ok := node.Args[0].(*ast.SelectorExpr); ok { + if ident, ok := arg.X.(*ast.Ident); ok { + if s.noIssue.Contains(ident.Name, arg.Sel.Name) { + return nil + } + } + } + // the function is Fprintf so set argIndex = 1 + argIndex = 1 + } + } + + // no formatter + if len(node.Args) == 0 { + return nil + } + + var formatter string + + // concats callexpr arg strings together if needed before regex evaluation + if argExpr, ok := node.Args[argIndex].(*ast.BinaryExpr); ok { + if fullStr, ok := gosec.ConcatString(argExpr); ok { + formatter = fullStr + } + } else if arg, e := gosec.GetString(node.Args[argIndex]); e == nil { + formatter = arg + } + if len(formatter) <= 0 { + return nil + } + + // If all formatter args are quoted or constant, then the SQL construction is safe + if argIndex+1 < len(node.Args) { + allSafe := true + for _, arg := range node.Args[argIndex+1:] { + if n := s.noIssueQuoted.ContainsPkgCallExpr(arg, ctx, true); n == nil && !s.constObject(arg, ctx) { + allSafe = false + break + } + } + if allSafe { + return nil + } + } + if s.MatchPatterns(formatter) { + return gosec.NewIssue(ctx, n, s.ID(), s.What, s.Severity, s.Confidence) + } + } + return nil +} + +// Check SQL query formatting issues such as "fmt.Sprintf("SELECT * FROM foo where '%s', userInput)" +func (s *sqlStrFormat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { + switch stmt := n.(type) { + case *ast.AssignStmt: + for _, expr := range stmt.Rhs { + if sqlQueryCall, ok := expr.(*ast.CallExpr); ok && s.ContainsCallExpr(expr, ctx) != nil { + return s.checkQuery(sqlQueryCall, ctx) + } + } + case *ast.ExprStmt: + if sqlQueryCall, ok := stmt.X.(*ast.CallExpr); ok && s.ContainsCallExpr(stmt.X, ctx) != nil { + return s.checkQuery(sqlQueryCall, ctx) + } + } + return nil, nil +} + +// NewSQLStrFormat looks for cases where we're building SQL query strings using format strings +func NewSQLStrFormat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + rule := &sqlStrFormat{ + CallList: gosec.NewCallList(), + fmtCalls: gosec.NewCallList(), + noIssue: gosec.NewCallList(), + noIssueQuoted: gosec.NewCallList(), + sqlStatement: sqlStatement{ + patterns: []*regexp.Regexp{ + regexp.MustCompile("(?i)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) "), + regexp.MustCompile("%[^bdoxXfFp]"), + }, + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + What: "SQL string formatting", + }, + }, + } + rule.AddAll("*database/sql.DB", "Query", "QueryContext", "QueryRow", "QueryRowContext") + rule.AddAll("*database/sql.Tx", "Query", "QueryContext", "QueryRow", "QueryRowContext") + rule.fmtCalls.AddAll("fmt", "Sprint", "Sprintf", "Sprintln", "Fprintf") + rule.noIssue.AddAll("os", "Stdout", "Stderr") + rule.noIssueQuoted.Add("github.com/lib/pq", "QuoteIdentifier") + + return rule, []ast.Node{(*ast.AssignStmt)(nil), (*ast.ExprStmt)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/ssh.go b/vendor/github.com/securego/gosec/v2/rules/ssh.go new file mode 100644 index 000000000..01f37da51 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/ssh.go @@ -0,0 +1,38 @@ +package rules + +import ( + "go/ast" + + "github.com/securego/gosec/v2" +) + +type sshHostKey struct { + gosec.MetaData + pkg string + calls []string +} + +func (r *sshHostKey) ID() string { + return r.MetaData.ID +} + +func (r *sshHostKey) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) { + if _, matches := gosec.MatchCallByPackage(n, c, r.pkg, r.calls...); matches { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + return nil, nil +} + +// NewSSHHostKey rule detects the use of insecure ssh HostKeyCallback. +func NewSSHHostKey(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return &sshHostKey{ + pkg: "golang.org/x/crypto/ssh", + calls: []string{"InsecureIgnoreHostKey"}, + MetaData: gosec.MetaData{ + ID: id, + What: "Use of ssh InsecureIgnoreHostKey should be audited", + Severity: gosec.Medium, + Confidence: gosec.High, + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/ssrf.go b/vendor/github.com/securego/gosec/v2/rules/ssrf.go new file mode 100644 index 000000000..86bb8278d --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/ssrf.go @@ -0,0 +1,66 @@ +package rules + +import ( + "go/ast" + "go/types" + + "github.com/securego/gosec/v2" +) + +type ssrf struct { + gosec.MetaData + gosec.CallList +} + +// ID returns the identifier for this rule +func (r *ssrf) ID() string { + return r.MetaData.ID +} + +// ResolveVar tries to resolve the first argument of a call expression +// The first argument is the url +func (r *ssrf) ResolveVar(n *ast.CallExpr, c *gosec.Context) bool { + if len(n.Args) > 0 { + arg := n.Args[0] + if ident, ok := arg.(*ast.Ident); ok { + obj := c.Info.ObjectOf(ident) + if _, ok := obj.(*types.Var); ok { + scope := c.Pkg.Scope() + if scope != nil && scope.Lookup(ident.Name) != nil { + // a URL defined in a variable at package scope can be changed at any time + return true + } + if !gosec.TryResolve(ident, c) { + return true + } + } + } + } + return false +} + +// Match inspects AST nodes to determine if certain net/http methods are called with variable input +func (r *ssrf) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + // Call expression is using http package directly + if node := r.ContainsPkgCallExpr(n, c, false); node != nil { + if r.ResolveVar(node, c) { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + return nil, nil +} + +// NewSSRFCheck detects cases where HTTP requests are sent +func NewSSRFCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + rule := &ssrf{ + CallList: gosec.NewCallList(), + MetaData: gosec.MetaData{ + ID: id, + What: "Potential HTTP request made with variable url", + Severity: gosec.Medium, + Confidence: gosec.Medium, + }, + } + rule.AddAll("net/http", "Do", "Get", "Head", "Post", "PostForm", "RoundTrip") + return rule, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/subproc.go b/vendor/github.com/securego/gosec/v2/rules/subproc.go new file mode 100644 index 000000000..48a07269f --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/subproc.go @@ -0,0 +1,85 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + "go/types" + + "github.com/securego/gosec/v2" +) + +type subprocess struct { + gosec.MetaData + gosec.CallList +} + +func (r *subprocess) ID() string { + return r.MetaData.ID +} + +// TODO(gm) The only real potential for command injection with a Go project +// is something like this: +// +// syscall.Exec("/bin/sh", []string{"-c", tainted}) +// +// E.g. Input is correctly escaped but the execution context being used +// is unsafe. For example: +// +// syscall.Exec("echo", "foobar" + tainted) +func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + if node := r.ContainsPkgCallExpr(n, c, false); node != nil { + args := node.Args + if r.isContext(n, c) { + args = args[1:] + } + for _, arg := range args { + if ident, ok := arg.(*ast.Ident); ok { + obj := c.Info.ObjectOf(ident) + if _, ok := obj.(*types.Var); ok && !gosec.TryResolve(ident, c) { + return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with variable", gosec.Medium, gosec.High), nil + } + } else if !gosec.TryResolve(arg, c) { + // the arg is not a constant or a variable but instead a function call or os.Args[i] + return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with function call as argument or cmd arguments", gosec.Medium, gosec.High), nil + } + } + } + return nil, nil +} + +// isContext checks whether or not the node is a CommandContext call or not +// Thi is required in order to skip the first argument from the check. +func (r *subprocess) isContext(n ast.Node, ctx *gosec.Context) bool { + selector, indent, err := gosec.GetCallInfo(n, ctx) + if err != nil { + return false + } + if selector == "exec" && indent == "CommandContext" { + return true + } + return false +} + +// NewSubproc detects cases where we are forking out to an external process +func NewSubproc(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + rule := &subprocess{gosec.MetaData{ID: id}, gosec.NewCallList()} + rule.Add("os/exec", "Command") + rule.Add("os/exec", "CommandContext") + rule.Add("syscall", "Exec") + rule.Add("syscall", "ForkExec") + rule.Add("syscall", "StartProcess") + return rule, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/tempfiles.go b/vendor/github.com/securego/gosec/v2/rules/tempfiles.go new file mode 100644 index 000000000..36f0f979b --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/tempfiles.go @@ -0,0 +1,58 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + "regexp" + + "github.com/securego/gosec/v2" +) + +type badTempFile struct { + gosec.MetaData + calls gosec.CallList + args *regexp.Regexp +} + +func (t *badTempFile) ID() string { + return t.MetaData.ID +} + +func (t *badTempFile) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) { + if node := t.calls.ContainsPkgCallExpr(n, c, false); node != nil { + if arg, e := gosec.GetString(node.Args[0]); t.args.MatchString(arg) && e == nil { + return gosec.NewIssue(c, n, t.ID(), t.What, t.Severity, t.Confidence), nil + } + } + return nil, nil +} + +// NewBadTempFile detects direct writes to predictable path in temporary directory +func NewBadTempFile(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + calls := gosec.NewCallList() + calls.Add("io/ioutil", "WriteFile") + calls.Add("os", "Create") + return &badTempFile{ + calls: calls, + args: regexp.MustCompile(`^/tmp/.*$|^/var/tmp/.*$`), + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + What: "File creation in shared tmp directory without using ioutil.Tempfile", + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/templates.go b/vendor/github.com/securego/gosec/v2/rules/templates.go new file mode 100644 index 000000000..b9e7bb7bf --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/templates.go @@ -0,0 +1,60 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + + "github.com/securego/gosec/v2" +) + +type templateCheck struct { + gosec.MetaData + calls gosec.CallList +} + +func (t *templateCheck) ID() string { + return t.MetaData.ID +} + +func (t *templateCheck) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + if node := t.calls.ContainsPkgCallExpr(n, c, false); node != nil { + for _, arg := range node.Args { + if _, ok := arg.(*ast.BasicLit); !ok { // basic lits are safe + return gosec.NewIssue(c, n, t.ID(), t.What, t.Severity, t.Confidence), nil + } + } + } + return nil, nil +} + +// NewTemplateCheck constructs the template check rule. This rule is used to +// find use of templates where HTML/JS escaping is not being used +func NewTemplateCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + calls := gosec.NewCallList() + calls.Add("html/template", "HTML") + calls.Add("html/template", "HTMLAttr") + calls.Add("html/template", "JS") + calls.Add("html/template", "URL") + return &templateCheck{ + calls: calls, + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.Low, + What: "this method will not auto-escape HTML. Verify data is well formed.", + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/tls.go b/vendor/github.com/securego/gosec/v2/rules/tls.go new file mode 100644 index 000000000..219d8fcde --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/tls.go @@ -0,0 +1,171 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate tlsconfig + +package rules + +import ( + "crypto/tls" + "fmt" + "go/ast" + + "github.com/securego/gosec/v2" +) + +type insecureConfigTLS struct { + gosec.MetaData + MinVersion int64 + MaxVersion int64 + requiredType string + goodCiphers []string + actualMinVersion int64 + actualMaxVersion int64 +} + +func (t *insecureConfigTLS) ID() string { + return t.MetaData.ID +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +func (t *insecureConfigTLS) processTLSCipherSuites(n ast.Node, c *gosec.Context) *gosec.Issue { + if ciphers, ok := n.(*ast.CompositeLit); ok { + for _, cipher := range ciphers.Elts { + if ident, ok := cipher.(*ast.SelectorExpr); ok { + if !stringInSlice(ident.Sel.Name, t.goodCiphers) { + err := fmt.Sprintf("TLS Bad Cipher Suite: %s", ident.Sel.Name) + return gosec.NewIssue(c, ident, t.ID(), err, gosec.High, gosec.High) + } + } + } + } + return nil +} + +func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Context) *gosec.Issue { + if ident, ok := n.Key.(*ast.Ident); ok { + switch ident.Name { + case "InsecureSkipVerify": + if node, ok := n.Value.(*ast.Ident); ok { + if node.Name != "false" { + return gosec.NewIssue(c, n, t.ID(), "TLS InsecureSkipVerify set true.", gosec.High, gosec.High) + } + } else { + // TODO(tk): symbol tab look up to get the actual value + return gosec.NewIssue(c, n, t.ID(), "TLS InsecureSkipVerify may be true.", gosec.High, gosec.Low) + } + + case "PreferServerCipherSuites": + if node, ok := n.Value.(*ast.Ident); ok { + if node.Name == "false" { + return gosec.NewIssue(c, n, t.ID(), "TLS PreferServerCipherSuites set false.", gosec.Medium, gosec.High) + } + } else { + // TODO(tk): symbol tab look up to get the actual value + return gosec.NewIssue(c, n, t.ID(), "TLS PreferServerCipherSuites may be false.", gosec.Medium, gosec.Low) + } + + case "MinVersion": + if ival, ierr := gosec.GetInt(n.Value); ierr == nil { + t.actualMinVersion = ival + } else { + if se, ok := n.Value.(*ast.SelectorExpr); ok { + if pkg, ok := se.X.(*ast.Ident); ok && pkg.Name == "tls" { + t.actualMinVersion = t.mapVersion(se.Sel.Name) + } + } + } + + case "MaxVersion": + if ival, ierr := gosec.GetInt(n.Value); ierr == nil { + t.actualMaxVersion = ival + } else { + if se, ok := n.Value.(*ast.SelectorExpr); ok { + if pkg, ok := se.X.(*ast.Ident); ok && pkg.Name == "tls" { + t.actualMaxVersion = t.mapVersion(se.Sel.Name) + } + } + } + + case "CipherSuites": + if ret := t.processTLSCipherSuites(n.Value, c); ret != nil { + return ret + } + + } + } + return nil +} + +func (t *insecureConfigTLS) mapVersion(version string) int64 { + var v int64 + switch version { + case "VersionTLS13": + v = tls.VersionTLS13 + case "VersionTLS12": + v = tls.VersionTLS12 + case "VersionTLS11": + v = tls.VersionTLS11 + case "VersionTLS10": + v = tls.VersionTLS10 + } + return v +} + +func (t *insecureConfigTLS) checkVersion(n ast.Node, c *gosec.Context) *gosec.Issue { + if t.actualMaxVersion == 0 && t.actualMinVersion >= t.MinVersion { + // no warning is generated since the min version is greater than the secure min version + return nil + } + if t.actualMinVersion < t.MinVersion { + return gosec.NewIssue(c, n, t.ID(), "TLS MinVersion too low.", gosec.High, gosec.High) + } + if t.actualMaxVersion < t.MaxVersion { + return gosec.NewIssue(c, n, t.ID(), "TLS MaxVersion too low.", gosec.High, gosec.High) + } + return nil +} + +func (t *insecureConfigTLS) resetVersion() { + t.actualMaxVersion = 0 + t.actualMinVersion = 0 +} + +func (t *insecureConfigTLS) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + if complit, ok := n.(*ast.CompositeLit); ok && complit.Type != nil { + actualType := c.Info.TypeOf(complit.Type) + if actualType != nil && actualType.String() == t.requiredType { + for _, elt := range complit.Elts { + if kve, ok := elt.(*ast.KeyValueExpr); ok { + issue := t.processTLSConfVal(kve, c) + if issue != nil { + return issue, nil + } + } + } + issue := t.checkVersion(complit, c) + t.resetVersion() + return issue, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/securego/gosec/v2/rules/tls_config.go b/vendor/github.com/securego/gosec/v2/rules/tls_config.go new file mode 100644 index 000000000..5d68593d8 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/tls_config.go @@ -0,0 +1,92 @@ +package rules + +import ( + "go/ast" + + "github.com/securego/gosec/v2" +) + +// NewModernTLSCheck creates a check for Modern TLS ciphers +// DO NOT EDIT - generated by tlsconfig tool +func NewModernTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return &insecureConfigTLS{ + MetaData: gosec.MetaData{ID: id}, + requiredType: "crypto/tls.Config", + MinVersion: 0x0304, + MaxVersion: 0x0304, + goodCiphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + }, + }, []ast.Node{(*ast.CompositeLit)(nil)} +} + +// NewIntermediateTLSCheck creates a check for Intermediate TLS ciphers +// DO NOT EDIT - generated by tlsconfig tool +func NewIntermediateTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return &insecureConfigTLS{ + MetaData: gosec.MetaData{ID: id}, + requiredType: "crypto/tls.Config", + MinVersion: 0x0303, + MaxVersion: 0x0304, + goodCiphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + }, + }, []ast.Node{(*ast.CompositeLit)(nil)} +} + +// NewOldTLSCheck creates a check for Old TLS ciphers +// DO NOT EDIT - generated by tlsconfig tool +func NewOldTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return &insecureConfigTLS{ + MetaData: gosec.MetaData{ID: id}, + requiredType: "crypto/tls.Config", + MinVersion: 0x0301, + MaxVersion: 0x0304, + goodCiphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA", + "TLS_RSA_WITH_AES_256_CBC_SHA", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + }, + }, []ast.Node{(*ast.CompositeLit)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/unsafe.go b/vendor/github.com/securego/gosec/v2/rules/unsafe.go new file mode 100644 index 000000000..88a298fb5 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/unsafe.go @@ -0,0 +1,53 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + + "github.com/securego/gosec/v2" +) + +type usingUnsafe struct { + gosec.MetaData + pkg string + calls []string +} + +func (r *usingUnsafe) ID() string { + return r.MetaData.ID +} + +func (r *usingUnsafe) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) { + if _, matches := gosec.MatchCallByPackage(n, c, r.pkg, r.calls...); matches { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + return nil, nil +} + +// NewUsingUnsafe rule detects the use of the unsafe package. This is only +// really useful for auditing purposes. +func NewUsingUnsafe(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + return &usingUnsafe{ + pkg: "unsafe", + calls: []string{"Alignof", "Offsetof", "Sizeof", "Pointer"}, + MetaData: gosec.MetaData{ + ID: id, + What: "Use of unsafe calls should be audited", + Severity: gosec.Low, + Confidence: gosec.High, + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go b/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go new file mode 100644 index 000000000..eecb88f04 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go @@ -0,0 +1,58 @@ +// (c) Copyright 2016 Hewlett Packard Enterprise Development LP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "go/ast" + + "github.com/securego/gosec/v2" +) + +type usesWeakCryptography struct { + gosec.MetaData + blocklist map[string][]string +} + +func (r *usesWeakCryptography) ID() string { + return r.MetaData.ID +} + +func (r *usesWeakCryptography) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { + for pkg, funcs := range r.blocklist { + if _, matched := gosec.MatchCallByPackage(n, c, pkg, funcs...); matched { + return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + return nil, nil +} + +// NewUsesWeakCryptography detects uses of des.* md5.* or rc4.* +func NewUsesWeakCryptography(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + calls := make(map[string][]string) + calls["crypto/des"] = []string{"NewCipher", "NewTripleDESCipher"} + calls["crypto/md5"] = []string{"New", "Sum"} + calls["crypto/sha1"] = []string{"New", "Sum"} + calls["crypto/rc4"] = []string{"NewCipher"} + rule := &usesWeakCryptography{ + blocklist: calls, + MetaData: gosec.MetaData{ + ID: id, + Severity: gosec.Medium, + Confidence: gosec.High, + What: "Use of weak cryptographic primitive", + }, + } + return rule, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/shazow/go-diff/LICENSE b/vendor/github.com/shazow/go-diff/LICENSE new file mode 100644 index 000000000..85e1e4b33 --- /dev/null +++ b/vendor/github.com/shazow/go-diff/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrey Petrov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/shazow/go-diff/difflib/differ.go b/vendor/github.com/shazow/go-diff/difflib/differ.go new file mode 100644 index 000000000..43dc84d9a --- /dev/null +++ b/vendor/github.com/shazow/go-diff/difflib/differ.go @@ -0,0 +1,39 @@ +// This package implements the diff.Differ interface using github.com/mb0/diff as a backend. +package difflib + +import ( + "io" + "io/ioutil" + + "github.com/pmezard/go-difflib/difflib" +) + +type differ struct{} + +// New returns an implementation of diff.Differ using mb0diff as the backend. +func New() *differ { + return &differ{} +} + +// Diff consumes the entire reader streams into memory before generating a diff +// which then gets filled into the buffer. This implementation stores and +// manipulates all three values in memory. +func (diff *differ) Diff(out io.Writer, a io.ReadSeeker, b io.ReadSeeker) error { + var src, dst []byte + var err error + + if src, err = ioutil.ReadAll(a); err != nil { + return err + } + if dst, err = ioutil.ReadAll(b); err != nil { + return err + } + + d := difflib.UnifiedDiff{ + A: difflib.SplitLines(string(src)), + B: difflib.SplitLines(string(dst)), + Context: 3, + } + + return difflib.WriteUnifiedDiff(out, d) +} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore new file mode 100644 index 000000000..1fb13abeb --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.gitignore @@ -0,0 +1,4 @@ +logrus +vendor + +.idea/ diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml new file mode 100644 index 000000000..65dc28503 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml new file mode 100644 index 000000000..c1dbd5a3a --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -0,0 +1,15 @@ +language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 +env: + - GO111MODULE=on +go: 1.15.x +os: linux +install: + - ./travis/install.sh +script: + - cd ci + - go run mage.go -v -w ../ crossBuild + - go run mage.go -v -w ../ lint + - go run mage.go -v -w ../ test diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md new file mode 100644 index 000000000..7567f6128 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,259 @@ +# 1.8.1 +Code quality: + * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer + * improve timestamp format documentation + +Fixes: + * fix race condition on logger hooks + + +# 1.8.0 + +Correct versioning number replacing v1.7.1. + +# 1.7.1 + +Beware this release has introduced a new public API and its semver is therefore incorrect. + +Code quality: + * use go 1.15 in travis + * use magefile as task runner + +Fixes: + * small fixes about new go 1.13 error formatting system + * Fix for long time race condiction with mutating data hooks + +Features: + * build support for zos + +# 1.7.0 +Fixes: + * the dependency toward a windows terminal library has been removed + +Features: + * a new buffer pool management API has been added + * a set of `Fn()` functions have been added + +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE new file mode 100644 index 000000000..f090cb42f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md new file mode 100644 index 000000000..5152b6aa4 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -0,0 +1,513 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + + +#### Level logging + +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Trace("Something very low level.") +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go new file mode 100644 index 000000000..8fd189e1c --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -0,0 +1,76 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml new file mode 100644 index 000000000..df9d65c3a --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go new file mode 100644 index 000000000..4545dec07 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -0,0 +1,52 @@ +package logrus + +import ( + "bytes" + "sync" +) + +var ( + bufferPool BufferPool +) + +type BufferPool interface { + Put(*bytes.Buffer) + Get() *bytes.Buffer +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func getBuffer() *bytes.Buffer { + return bufferPool.Get() +} + +func putBuffer(buf *bytes.Buffer) { + buf.Reset() + bufferPool.Put(buf) +} + +// SetBufferPool allows to replace the default logrus buffer pool +// to better meets the specific needs of an application. +func SetBufferPool(bp BufferPool) { + bufferPool = bp +} + +func init() { + SetBufferPool(&defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + }) +} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go new file mode 100644 index 000000000..da67aba06 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go new file mode 100644 index 000000000..07a1e5fa7 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -0,0 +1,431 @@ +package logrus + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +func (entry *Entry) Dup() *Entry { + data := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} +} + +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Bytes() + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + fieldErr := entry.err + for k, v := range fields { + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch { + case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: + isErrField = true + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, maximumCallerDepth) + _ = runtime.Callers(0, pcs) + + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f //nolint:scopelint + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +func (entry *Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + newEntry := entry.Dup() + + if newEntry.Time.IsZero() { + newEntry.Time = time.Now() + } + + newEntry.Level = level + newEntry.Message = msg + + newEntry.Logger.mu.Lock() + reportCaller := newEntry.Logger.ReportCaller + newEntry.Logger.mu.Unlock() + + if reportCaller { + newEntry.Caller = getCaller() + } + + newEntry.fireHooks() + + buffer = getBuffer() + defer func() { + newEntry.Buffer = nil + putBuffer(buffer) + }() + buffer.Reset() + newEntry.Buffer = buffer + + newEntry.write() + + newEntry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(newEntry) + } +} + +func (entry *Entry) fireHooks() { + var tmpHooks LevelHooks + entry.Logger.mu.Lock() + tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) + for k, v := range entry.Logger.Hooks { + tmpHooks[k] = v + } + entry.Logger.mu.Unlock() + + err := tmpHooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + return + } + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + if _, err := entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } +} + +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + entry.Log(InfoLevel, args...) +} + +func (entry *Entry) Warn(args ...interface{}) { + entry.Log(WarnLevel, args...) +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + entry.Log(ErrorLevel, args...) +} + +func (entry *Entry) Fatal(args ...interface{}) { + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + entry.Log(PanicLevel, args...) +} + +// Entry Printf family functions + +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + entry.Logf(InfoLevel, format, args...) +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + entry.Logf(WarnLevel, format, args...) +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + entry.Logf(ErrorLevel, format, args...) +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + entry.Logf(PanicLevel, format, args...) +} + +// Entry Println family functions + +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + +func (entry *Entry) Infoln(args ...interface{}) { + entry.Logln(InfoLevel, args...) +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + entry.Logln(WarnLevel, args...) +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + entry.Logln(ErrorLevel, args...) +} + +func (entry *Entry) Fatalln(args ...interface{}) { + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + entry.Logln(PanicLevel, args...) +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go new file mode 100644 index 000000000..017c30ce6 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -0,0 +1,270 @@ +package logrus + +import ( + "context" + "io" + "time" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.AddHook(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// WithTime creates an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// TraceFn logs a message from a func at level Trace on the standard logger. +func TraceFn(fn LogFunction) { + std.TraceFn(fn) +} + +// DebugFn logs a message from a func at level Debug on the standard logger. +func DebugFn(fn LogFunction) { + std.DebugFn(fn) +} + +// PrintFn logs a message from a func at level Info on the standard logger. +func PrintFn(fn LogFunction) { + std.PrintFn(fn) +} + +// InfoFn logs a message from a func at level Info on the standard logger. +func InfoFn(fn LogFunction) { + std.InfoFn(fn) +} + +// WarnFn logs a message from a func at level Warn on the standard logger. +func WarnFn(fn LogFunction) { + std.WarnFn(fn) +} + +// WarningFn logs a message from a func at level Warn on the standard logger. +func WarningFn(fn LogFunction) { + std.WarningFn(fn) +} + +// ErrorFn logs a message from a func at level Error on the standard logger. +func ErrorFn(fn LogFunction) { + std.ErrorFn(fn) +} + +// PanicFn logs a message from a func at level Panic on the standard logger. +func PanicFn(fn LogFunction) { + std.PanicFn(fn) +} + +// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. +func FatalFn(fn LogFunction) { + std.FatalFn(fn) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go new file mode 100644 index 000000000..408883773 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod new file mode 100644 index 000000000..b3919d5ea --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/go.mod @@ -0,0 +1,10 @@ +module github.com/sirupsen/logrus + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 +) + +go 1.13 diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum new file mode 100644 index 000000000..694c18b84 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/go.sum @@ -0,0 +1,8 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go new file mode 100644 index 000000000..3f151cdc3 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..c96dc5636 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,128 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go new file mode 100644 index 000000000..337704457 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -0,0 +1,404 @@ +package logrus + +import ( + "context" + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +// LogFunction For big messages, it can be more efficient to pass a function +// and only call it if the log level is actually enables rather than +// generating the log message and then checking if the level is enabled +type LogFunction func() []interface{} + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventurous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc +} + +type exitFunc func(int) + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} + logger.entryPool.Put(entry) +} + +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logf(level, format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + logger.Logf(InfoLevel, format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + logger.Logf(WarnLevel, format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + logger.Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + logger.Logf(ErrorLevel, format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + logger.Logf(PanicLevel, format, args...) +} + +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) LogFn(level Level, fn LogFunction) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, fn()...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + logger.Log(DebugLevel, args...) +} + +func (logger *Logger) Info(args ...interface{}) { + logger.Log(InfoLevel, args...) +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Print(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + logger.Log(WarnLevel, args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + logger.Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + logger.Log(ErrorLevel, args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + logger.Log(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) TraceFn(fn LogFunction) { + logger.LogFn(TraceLevel, fn) +} + +func (logger *Logger) DebugFn(fn LogFunction) { + logger.LogFn(DebugLevel, fn) +} + +func (logger *Logger) InfoFn(fn LogFunction) { + logger.LogFn(InfoLevel, fn) +} + +func (logger *Logger) PrintFn(fn LogFunction) { + entry := logger.newEntry() + entry.Print(fn()...) + logger.releaseEntry(entry) +} + +func (logger *Logger) WarnFn(fn LogFunction) { + logger.LogFn(WarnLevel, fn) +} + +func (logger *Logger) WarningFn(fn LogFunction) { + logger.WarnFn(fn) +} + +func (logger *Logger) ErrorFn(fn LogFunction) { + logger.LogFn(ErrorLevel, fn) +} + +func (logger *Logger) FatalFn(fn LogFunction) { + logger.LogFn(FatalLevel, fn) + logger.Exit(1) +} + +func (logger *Logger) PanicFn(fn LogFunction) { + logger.LogFn(PanicLevel, fn) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logln(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + logger.Logln(DebugLevel, args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + logger.Logln(InfoLevel, args...) +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + logger.Logln(WarnLevel, args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + logger.Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + logger.Logln(ErrorLevel, args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + logger.Logln(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +// SetLevel sets the logger level. +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go new file mode 100644 index 000000000..2f16224cb --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -0,0 +1,186 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" + } +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + case "trace": + return TraceLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = l + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, + TraceLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 000000000..2403de981 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 000000000..499789984 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 000000000..ebdae3ec6 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go new file mode 100644 index 000000000..97af92c68 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go @@ -0,0 +1,11 @@ +// +build js nacl plan9 + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 000000000..3293fb3ca --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,17 @@ +// +build !appengine,!js,!windows,!nacl,!plan9 + +package logrus + +import ( + "io" + "os" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return isTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 000000000..f6710b3bd --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 000000000..04748b851 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix zos +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 000000000..2879eb50e --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,27 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/windows" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + handle := windows.Handle(v.Fd()) + var mode uint32 + if err := windows.GetConsoleMode(handle, &mode); err != nil { + return false + } + mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + if err := windows.SetConsoleMode(handle, mode); err != nil { + return false + } + return true + } + return false +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..be2c6efe5 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,339 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + red = 31 + yellow = 33 + blue = 36 + gray = 37 +) + +var baseTimestamp time.Time + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": + isColored = true + case ok && force == "0", os.Getenv("CLICOLOR") == "0": + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) + } else { + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + case InfoLevel: + levelColor = blue + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation && !f.PadLevelText { + levelText = levelText[0:4] + } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + switch { + case f.DisableTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + case !f.FullTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + default: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + if f.DisableQuote { + return false + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go new file mode 100644 index 000000000..72e8e3a1b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -0,0 +1,70 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +// Writer at INFO level. See WriterLevel for details. +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case TraceLevel: + printFunc = entry.Trace + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/sonatard/noctx/.gitignore b/vendor/github.com/sonatard/noctx/.gitignore new file mode 100644 index 000000000..2d830686d --- /dev/null +++ b/vendor/github.com/sonatard/noctx/.gitignore @@ -0,0 +1 @@ +coverage.out diff --git a/vendor/github.com/sonatard/noctx/.golangci.yml b/vendor/github.com/sonatard/noctx/.golangci.yml new file mode 100644 index 000000000..1580acde2 --- /dev/null +++ b/vendor/github.com/sonatard/noctx/.golangci.yml @@ -0,0 +1,20 @@ +run: + +linters-settings: + govet: + enable-all: true + +linters: + enable-all: true + disable: + - gochecknoglobals + - gomnd + - gocognit + - nestif + +issues: + exclude-rules: + - path: reqwithoutctx/ssa.go + text: "Consider preallocating `exts`" + linters: + - prealloc diff --git a/vendor/github.com/sonatard/noctx/LICENSE b/vendor/github.com/sonatard/noctx/LICENSE new file mode 100644 index 000000000..a00d5727f --- /dev/null +++ b/vendor/github.com/sonatard/noctx/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 sonatard + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sonatard/noctx/Makefile b/vendor/github.com/sonatard/noctx/Makefile new file mode 100644 index 000000000..1a27f6b59 --- /dev/null +++ b/vendor/github.com/sonatard/noctx/Makefile @@ -0,0 +1,16 @@ +.PHONY: all imports test lint + +all: imports test lint + +imports: + goimports -w ./ + +test: + go test -race ./... + +test_coverage: + go test -race -coverprofile=coverage.out -covermode=atomic ./... + +lint: + golangci-lint run ./... + diff --git a/vendor/github.com/sonatard/noctx/README.md b/vendor/github.com/sonatard/noctx/README.md new file mode 100644 index 000000000..bfe9782c6 --- /dev/null +++ b/vendor/github.com/sonatard/noctx/README.md @@ -0,0 +1,95 @@ +# noctx + +![](https://github.com/sonatard/noctx/workflows/.github/workflows/ci.yml/badge.svg) + +`noctx` finds sending http request without context.Context. + +You should use `noctx` if sending http request in your library. +Passing `context.Context` enables library user to cancel http request, getting trace information and so on. + +## Install + +```sh +$ go get -u github.com/sonatard/noctx/cmd/noctx +``` + +## Usage + +```sh +$ go vet -vettool=`which noctx` main.go +./main.go:6:11: net/http.Get must not be called +``` + +## Detection rules +- Executing following functions + - `net/http.Get` + - `net/http.Head` + - `net/http.Post` + - `net/http.PostForm` + - `(*net/http.Client).Get` + - `(*net/http.Client).Head` + - `(*net/http.Client).Post` + - `(*net/http.Client).PostForm` +- `http.Request` returned by `http.NewRequest` function and passes it to other function. + +## How to fix +- Send http request using `(*http.Client).Do(*http.Request)` method. +- In Go 1.13 and later, use `http.NewRequestWithContext` function instead of using `http.NewRequest` function. +- In Go 1.12 and earlier, call `(http.Request).WithContext(ctx)` after `http.NewRequest`. + +`(http.Request).WithContext(ctx)` has a disadvantage of performance because it returns a copy of `http.Request`. Use `http.NewRequestWithContext` function if you only support Go1.13 or later. + +## Sample Code + +```go +package main + +import ( + "context" + "net/http" +) + +func main() { + const url = "http://example.com" + http.Get(url) // want `net/http\.Get must not be called` + http.Head(url) // want `net/http\.Head must not be called` + http.Post(url, "", nil) // want `net/http\.Post must not be called` + http.PostForm(url, nil) // want `net/http\.PostForm must not be called` + + cli := &http.Client{} + cli.Get(url) // want `\(\*net/http\.Client\)\.Get must not be called` + cli.Head(url) // want `\(\*net/http\.Client\)\.Head must not be called` + cli.Post(url, "", nil) // want `\(\*net/http\.Client\)\.Post must not be called` + cli.PostForm(url, nil) // want `\(\*net/http\.Client\)\.PostForm must not be called` + + req, _ := http.NewRequest(http.MethodPost, url, nil) // want `should rewrite http.NewRequestWithContext or add \(\*Request\).WithContext` + cli.Do(req) + + ctx := context.Background() + req2, _ := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) // OK + cli.Do(req2) + + req3, _ := http.NewRequest(http.MethodPost, url, nil) // OK + req3 = req3.WithContext(ctx) + cli.Do(req3) + + f2 := func(req *http.Request, ctx context.Context) *http.Request { + return req + } + req4, _ := http.NewRequest(http.MethodPost, url, nil) // want `should rewrite http.NewRequestWithContext or add \(\*Request\).WithContext` + req4 = f2(req4, ctx) + cli.Do(req4) + + req5, _ := func() (*http.Request, error) { + return http.NewRequest(http.MethodPost, url, nil) // want `should rewrite http.NewRequestWithContext or add \(\*Request\).WithContext` + }() + cli.Do(req5) + +} +``` + +## Reference +- [net/http - NewRequest](https://golang.org/pkg/net/http/#NewRequest) +- [net/http - NewRequestWithContext](https://golang.org/pkg/net/http/#NewRequestWithContext) +- [net/http - Request.WithContext](https://golang.org/pkg/net/http/#Request.WithContext) + diff --git a/vendor/github.com/sonatard/noctx/go.mod b/vendor/github.com/sonatard/noctx/go.mod new file mode 100644 index 000000000..47b7901a0 --- /dev/null +++ b/vendor/github.com/sonatard/noctx/go.mod @@ -0,0 +1,8 @@ +module github.com/sonatard/noctx + +go 1.13 + +require ( + github.com/gostaticanalysis/analysisutil v0.0.3 + golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9 +) diff --git a/vendor/github.com/sonatard/noctx/go.sum b/vendor/github.com/sonatard/noctx/go.sum new file mode 100644 index 000000000..f8e5b0759 --- /dev/null +++ b/vendor/github.com/sonatard/noctx/go.sum @@ -0,0 +1,16 @@ +github.com/gostaticanalysis/analysisutil v0.0.3 h1:iwp+5/UAyzQSFgQ4uR2sni99sJ8Eo9DEacKWM5pekIg= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9 h1:KOkk4e2xd5OeCDJGwacvr75ICCbCsShrHiqPEdsA9hg= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/sonatard/noctx/ngfunc/main.go b/vendor/github.com/sonatard/noctx/ngfunc/main.go new file mode 100644 index 000000000..cfeb0f001 --- /dev/null +++ b/vendor/github.com/sonatard/noctx/ngfunc/main.go @@ -0,0 +1,57 @@ +package ngfunc + +import ( + "go/types" + + "github.com/gostaticanalysis/analysisutil" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" +) + +func Run(pass *analysis.Pass) (interface{}, error) { + ngFuncNames := []string{ + "net/http.Get", + "net/http.Head", + "net/http.Post", + "net/http.PostForm", + "(*net/http.Client).Get", + "(*net/http.Client).Head", + "(*net/http.Client).Post", + "(*net/http.Client).PostForm", + } + + ngFuncs := typeFuncs(pass, ngFuncNames) + if len(ngFuncs) == 0 { + return nil, nil + } + + reportFuncs := ngCalledFuncs(pass, ngFuncs) + report(pass, reportFuncs) + + return nil, nil +} + +func ngCalledFuncs(pass *analysis.Pass, ngFuncs []*types.Func) []*Report { + var reports []*Report + + srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs + for _, sf := range srcFuncs { + for _, b := range sf.Blocks { + for _, instr := range b.Instrs { + for _, ngFunc := range ngFuncs { + if analysisutil.Called(instr, nil, ngFunc) { + ngCalledFunc := &Report{ + Instruction: instr, + function: ngFunc, + } + reports = append(reports, ngCalledFunc) + + break + } + } + } + } + } + + return reports +} diff --git a/vendor/github.com/sonatard/noctx/ngfunc/report.go b/vendor/github.com/sonatard/noctx/ngfunc/report.go new file mode 100644 index 000000000..e50051798 --- /dev/null +++ b/vendor/github.com/sonatard/noctx/ngfunc/report.go @@ -0,0 +1,29 @@ +package ngfunc + +import ( + "fmt" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ssa" +) + +type Report struct { + Instruction ssa.Instruction + function *types.Func +} + +func (n *Report) Pos() token.Pos { + return n.Instruction.Pos() +} + +func (n *Report) Message() string { + return fmt.Sprintf("%s must not be called", n.function.FullName()) +} + +func report(pass *analysis.Pass, reports []*Report) { + for _, report := range reports { + pass.Reportf(report.Pos(), report.Message()) + } +} diff --git a/vendor/github.com/sonatard/noctx/ngfunc/types.go b/vendor/github.com/sonatard/noctx/ngfunc/types.go new file mode 100644 index 000000000..f1877386c --- /dev/null +++ b/vendor/github.com/sonatard/noctx/ngfunc/types.go @@ -0,0 +1,65 @@ +package ngfunc + +import ( + "fmt" + "go/types" + "strings" + + "github.com/gostaticanalysis/analysisutil" + "golang.org/x/tools/go/analysis" +) + +var errNotFound = fmt.Errorf("function not found") + +func typeFuncs(pass *analysis.Pass, funcs []string) []*types.Func { + fs := make([]*types.Func, 0, len(funcs)) + + for _, fn := range funcs { + f, err := typeFunc(pass, fn) + if err != nil { + continue + } + + fs = append(fs, f) + } + + return fs +} + +func typeFunc(pass *analysis.Pass, funcName string) (*types.Func, error) { + ss := strings.Split(strings.TrimSpace(funcName), ".") + + switch len(ss) { + case 2: + // package function: pkgname.Func + f, ok := analysisutil.ObjectOf(pass, ss[0], ss[1]).(*types.Func) + if !ok || f == nil { + return nil, errNotFound + } + + return f, nil + case 3: + // method: (*pkgname.Type).Method + pkgname := strings.TrimLeft(ss[0], "(") + typename := strings.TrimRight(ss[1], ")") + + if pkgname != "" && pkgname[0] == '*' { + pkgname = pkgname[1:] + typename = "*" + typename + } + + typ := analysisutil.TypeOf(pass, pkgname, typename) + if typ == nil { + return nil, errNotFound + } + + m := analysisutil.MethodOf(typ, ss[2]) + if m == nil { + return nil, errNotFound + } + + return m, nil + } + + return nil, errNotFound +} diff --git a/vendor/github.com/sonatard/noctx/noctx.go b/vendor/github.com/sonatard/noctx/noctx.go new file mode 100644 index 000000000..478ad8855 --- /dev/null +++ b/vendor/github.com/sonatard/noctx/noctx.go @@ -0,0 +1,31 @@ +package noctx + +import ( + "github.com/sonatard/noctx/ngfunc" + "github.com/sonatard/noctx/reqwithoutctx" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" +) + +var Analyzer = &analysis.Analyzer{ + Name: "noctx", + Doc: Doc, + Run: run, + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + }, +} + +const Doc = "noctx finds sending http request without context.Context" + +func run(pass *analysis.Pass) (interface{}, error) { + if _, err := ngfunc.Run(pass); err != nil { + return nil, err + } + + if _, err := reqwithoutctx.Run(pass); err != nil { + return nil, err + } + + return nil, nil +} diff --git a/vendor/github.com/sonatard/noctx/reqwithoutctx/main.go b/vendor/github.com/sonatard/noctx/reqwithoutctx/main.go new file mode 100644 index 000000000..b09e1de1b --- /dev/null +++ b/vendor/github.com/sonatard/noctx/reqwithoutctx/main.go @@ -0,0 +1,14 @@ +package reqwithoutctx + +import ( + "golang.org/x/tools/go/analysis" +) + +func Run(pass *analysis.Pass) (interface{}, error) { + analyzer := NewAnalyzer(pass) + reports := analyzer.Exec() + + report(pass, reports) + + return nil, nil +} diff --git a/vendor/github.com/sonatard/noctx/reqwithoutctx/report.go b/vendor/github.com/sonatard/noctx/reqwithoutctx/report.go new file mode 100644 index 000000000..1c94e3148 --- /dev/null +++ b/vendor/github.com/sonatard/noctx/reqwithoutctx/report.go @@ -0,0 +1,26 @@ +package reqwithoutctx + +import ( + "go/token" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ssa" +) + +type Report struct { + Instruction ssa.Instruction +} + +func (n *Report) Pos() token.Pos { + return n.Instruction.Pos() +} + +func (n *Report) Message() string { + return "should rewrite http.NewRequestWithContext or add (*Request).WithContext" +} + +func report(pass *analysis.Pass, reports []*Report) { + for _, report := range reports { + pass.Reportf(report.Pos(), report.Message()) + } +} diff --git a/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go b/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go new file mode 100644 index 000000000..35751269e --- /dev/null +++ b/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go @@ -0,0 +1,180 @@ +package reqwithoutctx + +import ( + "go/types" + + "github.com/gostaticanalysis/analysisutil" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" +) + +type Analyzer struct { + Funcs []*ssa.Function + newRequestType types.Type + requestType types.Type +} + +func NewAnalyzer(pass *analysis.Pass) *Analyzer { + newRequestType := analysisutil.TypeOf(pass, "net/http", "NewRequest") + requestType := analysisutil.TypeOf(pass, "net/http", "*Request") + + srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs + + return &Analyzer{ + Funcs: srcFuncs, + newRequestType: newRequestType, + requestType: requestType, + } +} + +func (a *Analyzer) Exec() []*Report { + if a.newRequestType == nil || a.requestType == nil { + return []*Report{} + } + + usedReqs := a.usedReqs() + newReqs := a.requestsByNewRequest() + + return a.report(usedReqs, newReqs) +} + +func (a *Analyzer) report(usedReqs map[string]*ssa.Extract, newReqs map[*ssa.Call]*ssa.Extract) []*Report { + var reports []*Report + + for _, fReq := range usedReqs { + for newRequest, req := range newReqs { + if fReq == req { + reports = append(reports, &Report{Instruction: newRequest}) + } + } + } + + return reports +} + +func (a *Analyzer) usedReqs() map[string]*ssa.Extract { + reqExts := make(map[string]*ssa.Extract) + + for _, f := range a.Funcs { + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + switch i := instr.(type) { + case *ssa.Call: + exts := a.usedReqByCall(i) + for _, ext := range exts { + key := i.String() + ext.String() + reqExts[key] = ext + } + case *ssa.UnOp: + ext := a.usedReqByUnOp(i) + if ext != nil { + key := i.String() + ext.String() + reqExts[key] = ext + } + case *ssa.Return: + exts := a.usedReqByReturn(i) + for _, ext := range exts { + key := i.String() + ext.String() + reqExts[key] = ext + } + } + } + } + } + + return reqExts +} + +func (a *Analyzer) usedReqByCall(call *ssa.Call) []*ssa.Extract { + var exts []*ssa.Extract + + // skip net/http.Request method call + if call.Common().Signature().Recv() != nil && types.Identical(call.Value().Type(), a.requestType) { + return exts + } + + args := call.Common().Args + if len(args) == 0 { + return exts + } + + for _, arg := range args { + ext, ok := arg.(*ssa.Extract) + if !ok { + continue + } + + if !types.Identical(ext.Type(), a.requestType) { + continue + } + + exts = append(exts, ext) + } + + return exts +} + +func (a *Analyzer) usedReqByUnOp(op *ssa.UnOp) *ssa.Extract { + if ext, ok := op.X.(*ssa.Extract); ok && types.Identical(ext.Type(), a.requestType) { + return ext + } + + return nil +} + +func (a *Analyzer) usedReqByReturn(ret *ssa.Return) []*ssa.Extract { + rets := ret.Results + exts := make([]*ssa.Extract, 0, len(rets)) + + for _, ret := range rets { + ext, ok := ret.(*ssa.Extract) + if !ok { + continue + } + + if types.Identical(ext.Type(), a.requestType) { + exts = append(exts, ext) + } + } + + return exts +} + +func (a *Analyzer) requestsByNewRequest() map[*ssa.Call]*ssa.Extract { + reqs := make(map[*ssa.Call]*ssa.Extract) + + for _, f := range a.Funcs { + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + ext, ok := instr.(*ssa.Extract) + if !ok { + continue + } + + if !types.Identical(ext.Type(), a.requestType) { + continue + } + + operands := ext.Operands([]*ssa.Value{}) + if len(operands) != 1 { + continue + } + + operand := *operands[0] + + f, ok := operand.(*ssa.Call) + if !ok { + continue + } + + if types.Identical(f.Call.Value.Type(), a.newRequestType) { + reqs[f] = ext + } + } + } + } + + return reqs +} diff --git a/vendor/github.com/sourcegraph/go-diff/LICENSE b/vendor/github.com/sourcegraph/go-diff/LICENSE new file mode 100644 index 000000000..0733b6e5f --- /dev/null +++ b/vendor/github.com/sourcegraph/go-diff/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2014 Sourcegraph, Inc. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +----------------------------------------------------------------- + +Portions adapted from python-unidiff: + +Copyright (c) 2012 Matias Bordese + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: diff --git a/vendor/github.com/sourcegraph/go-diff/diff/diff.go b/vendor/github.com/sourcegraph/go-diff/diff/diff.go new file mode 100644 index 000000000..0f465b9e2 --- /dev/null +++ b/vendor/github.com/sourcegraph/go-diff/diff/diff.go @@ -0,0 +1,132 @@ +package diff + +import ( + "bytes" + "time" +) + +// A FileDiff represents a unified diff for a single file. +// +// A file unified diff has a header that resembles the following: +// +// --- oldname 2009-10-11 15:12:20.000000000 -0700 +// +++ newname 2009-10-11 15:12:30.000000000 -0700 +type FileDiff struct { + // the original name of the file + OrigName string + // the original timestamp (nil if not present) + OrigTime *time.Time + // the new name of the file (often same as OrigName) + NewName string + // the new timestamp (nil if not present) + NewTime *time.Time + // extended header lines (e.g., git's "new mode ", "rename from ", etc.) + Extended []string + // hunks that were changed from orig to new + Hunks []*Hunk +} + +// A Hunk represents a series of changes (additions or deletions) in a file's +// unified diff. +type Hunk struct { + // starting line number in original file + OrigStartLine int32 + // number of lines the hunk applies to in the original file + OrigLines int32 + // if > 0, then the original file had a 'No newline at end of file' mark at this offset + OrigNoNewlineAt int32 + // starting line number in new file + NewStartLine int32 + // number of lines the hunk applies to in the new file + NewLines int32 + // optional section heading + Section string + // 0-indexed line offset in unified file diff (including section headers); this is + // only set when Hunks are read from entire file diff (i.e., when ReadAllHunks is + // called) This accounts for hunk headers, too, so the StartPosition of the first + // hunk will be 1. + StartPosition int32 + // hunk body (lines prefixed with '-', '+', or ' ') + Body []byte +} + +// A Stat is a diff stat that represents the number of lines added/changed/deleted. +type Stat struct { + // number of lines added + Added int32 + // number of lines changed + Changed int32 + // number of lines deleted + Deleted int32 +} + +// Stat computes the number of lines added/changed/deleted in all +// hunks in this file's diff. +func (d *FileDiff) Stat() Stat { + total := Stat{} + for _, h := range d.Hunks { + total.add(h.Stat()) + } + return total +} + +// Stat computes the number of lines added/changed/deleted in this +// hunk. +func (h *Hunk) Stat() Stat { + lines := bytes.Split(h.Body, []byte{'\n'}) + var last byte + st := Stat{} + for _, line := range lines { + if len(line) == 0 { + last = 0 + continue + } + switch line[0] { + case '-': + if last == '+' { + st.Added-- + st.Changed++ + last = 0 // next line can't change this one since this is already a change + } else { + st.Deleted++ + last = line[0] + } + case '+': + if last == '-' { + st.Deleted-- + st.Changed++ + last = 0 // next line can't change this one since this is already a change + } else { + st.Added++ + last = line[0] + } + default: + last = 0 + } + } + return st +} + +var ( + hunkPrefix = []byte("@@ ") + onlyInMessagePrefix = []byte("Only in ") +) + +const hunkHeader = "@@ -%d,%d +%d,%d @@" +const onlyInMessage = "Only in %s: %s\n" + +// diffTimeParseLayout is the layout used to parse the time in unified diff file +// header timestamps. +// See https://www.gnu.org/software/diffutils/manual/html_node/Detailed-Unified.html. +const diffTimeParseLayout = "2006-01-02 15:04:05 -0700" + +// diffTimeFormatLayout is the layout used to format (i.e., print) the time in unified diff file +// header timestamps. +// See https://www.gnu.org/software/diffutils/manual/html_node/Detailed-Unified.html. +const diffTimeFormatLayout = "2006-01-02 15:04:05.000000000 -0700" + +func (s *Stat) add(o Stat) { + s.Added += o.Added + s.Changed += o.Changed + s.Deleted += o.Deleted +} diff --git a/vendor/github.com/sourcegraph/go-diff/diff/doc.go b/vendor/github.com/sourcegraph/go-diff/diff/doc.go new file mode 100644 index 000000000..12fe96a07 --- /dev/null +++ b/vendor/github.com/sourcegraph/go-diff/diff/doc.go @@ -0,0 +1,2 @@ +// Package diff provides a parser for unified diffs. +package diff diff --git a/vendor/github.com/sourcegraph/go-diff/diff/parse.go b/vendor/github.com/sourcegraph/go-diff/diff/parse.go new file mode 100644 index 000000000..8d5cfc238 --- /dev/null +++ b/vendor/github.com/sourcegraph/go-diff/diff/parse.go @@ -0,0 +1,725 @@ +package diff + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "path/filepath" + "strconv" + "strings" + "time" +) + +// ParseMultiFileDiff parses a multi-file unified diff. It returns an error if +// parsing failed as a whole, but does its best to parse as many files in the +// case of per-file errors. If it cannot detect when the diff of the next file +// begins, the hunks are added to the FileDiff of the previous file. +func ParseMultiFileDiff(diff []byte) ([]*FileDiff, error) { + return NewMultiFileDiffReader(bytes.NewReader(diff)).ReadAllFiles() +} + +// NewMultiFileDiffReader returns a new MultiFileDiffReader that reads +// a multi-file unified diff from r. +func NewMultiFileDiffReader(r io.Reader) *MultiFileDiffReader { + return &MultiFileDiffReader{reader: bufio.NewReader(r)} +} + +// MultiFileDiffReader reads a multi-file unified diff. +type MultiFileDiffReader struct { + line int + offset int64 + reader *bufio.Reader + + // TODO(sqs): line and offset tracking in multi-file diffs is broken; add tests and fix + + // nextFileFirstLine is a line that was read by a HunksReader that + // was how it determined the hunk was complete. But to determine + // that, it needed to read the first line of the next file. We + // store nextFileFirstLine so we can "give the first line back" to + // the next file. + nextFileFirstLine []byte +} + +// ReadFile reads the next file unified diff (including headers and +// all hunks) from r. If there are no more files in the diff, it +// returns error io.EOF. +func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) { + fr := &FileDiffReader{ + line: r.line, + offset: r.offset, + reader: r.reader, + fileHeaderLine: r.nextFileFirstLine, + } + r.nextFileFirstLine = nil + + fd, err := fr.ReadAllHeaders() + if err != nil { + switch e := err.(type) { + case *ParseError: + if e.Err == ErrNoFileHeader || e.Err == ErrExtendedHeadersEOF { + return nil, io.EOF + } + return nil, err + + case OverflowError: + r.nextFileFirstLine = []byte(e) + return fd, nil + + default: + return nil, err + } + } + + // FileDiff is added/deleted file + // No further collection of hunks needed + if fd.NewName == "" { + return fd, nil + } + + // Before reading hunks, check to see if there are any. If there + // aren't any, and there's another file after this file in the + // diff, then the hunks reader will complain ErrNoHunkHeader. It's + // not easy for us to tell from that error alone if that was + // caused by the lack of any hunks, or a malformatted hunk, so we + // need to perform the check here. + hr := fr.HunksReader() + line, err := readLine(r.reader) + if err != nil && err != io.EOF { + return fd, err + } + line = bytes.TrimSuffix(line, []byte{'\n'}) + if bytes.HasPrefix(line, hunkPrefix) { + hr.nextHunkHeaderLine = line + fd.Hunks, err = hr.ReadAllHunks() + r.line = fr.line + r.offset = fr.offset + if err != nil { + if e0, ok := err.(*ParseError); ok { + if e, ok := e0.Err.(*ErrBadHunkLine); ok { + // This just means we finished reading the hunks for the + // current file. See the ErrBadHunkLine doc for more info. + r.nextFileFirstLine = e.Line + return fd, nil + } + } + return nil, err + } + } else { + // There weren't any hunks, so that line we peeked ahead at + // actually belongs to the next file. Put it back. + r.nextFileFirstLine = line + } + + return fd, nil +} + +// ReadAllFiles reads all file unified diffs (including headers and all +// hunks) remaining in r. +func (r *MultiFileDiffReader) ReadAllFiles() ([]*FileDiff, error) { + var ds []*FileDiff + for { + d, err := r.ReadFile() + if d != nil { + ds = append(ds, d) + } + if err == io.EOF { + return ds, nil + } + if err != nil { + return nil, err + } + } +} + +// ParseFileDiff parses a file unified diff. +func ParseFileDiff(diff []byte) (*FileDiff, error) { + return NewFileDiffReader(bytes.NewReader(diff)).Read() +} + +// NewFileDiffReader returns a new FileDiffReader that reads a file +// unified diff. +func NewFileDiffReader(r io.Reader) *FileDiffReader { + return &FileDiffReader{reader: bufio.NewReader(r)} +} + +// FileDiffReader reads a unified file diff. +type FileDiffReader struct { + line int + offset int64 + reader *bufio.Reader + + // fileHeaderLine is the first file header line, set by: + // + // (1) ReadExtendedHeaders if it encroaches on a file header line + // (which it must to detect when extended headers are done); or + // (2) (*MultiFileDiffReader).ReadFile() if it encroaches on a + // file header line while reading the previous file's hunks (in a + // multi-file diff). + fileHeaderLine []byte +} + +// Read reads a file unified diff, including headers and hunks, from r. +func (r *FileDiffReader) Read() (*FileDiff, error) { + fd, err := r.ReadAllHeaders() + if err != nil { + return nil, err + } + + fd.Hunks, err = r.HunksReader().ReadAllHunks() + if err != nil { + return nil, err + } + + return fd, nil +} + +// ReadAllHeaders reads the file headers and extended headers (if any) +// from a file unified diff. It does not read hunks, and the returned +// FileDiff's Hunks field is nil. To read the hunks, call the +// (*FileDiffReader).HunksReader() method to get a HunksReader and +// read hunks from that. +func (r *FileDiffReader) ReadAllHeaders() (*FileDiff, error) { + var err error + fd := &FileDiff{} + + fd.Extended, err = r.ReadExtendedHeaders() + if pe, ok := err.(*ParseError); ok && pe.Err == ErrExtendedHeadersEOF { + wasEmpty := handleEmpty(fd) + if wasEmpty { + return fd, nil + } + return fd, err + } else if _, ok := err.(OverflowError); ok { + handleEmpty(fd) + return fd, err + } else if err != nil { + return fd, err + } + + var origTime, newTime *time.Time + fd.OrigName, fd.NewName, origTime, newTime, err = r.ReadFileHeaders() + if err != nil { + return nil, err + } + if origTime != nil { + fd.OrigTime = origTime + } + if newTime != nil { + fd.NewTime = newTime + } + + return fd, nil +} + +// HunksReader returns a new HunksReader that reads hunks from r. The +// HunksReader's line and offset (used in error messages) is set to +// start where the file diff header ended (which means errors have the +// correct position information). +func (r *FileDiffReader) HunksReader() *HunksReader { + return &HunksReader{ + line: r.line, + offset: r.offset, + reader: r.reader, + } +} + +// ReadFileHeaders reads the unified file diff header (the lines that +// start with "---" and "+++" with the orig/new file names and +// timestamps). Or which starts with "Only in " with dir path and filename. +// "Only in" message is supported in POSIX locale: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/diff.html#tag_20_34_10 +func (r *FileDiffReader) ReadFileHeaders() (origName, newName string, origTimestamp, newTimestamp *time.Time, err error) { + if r.fileHeaderLine != nil { + if isOnlyMessage, source, filename := parseOnlyInMessage(r.fileHeaderLine); isOnlyMessage { + return filepath.Join(string(source), string(filename)), + "", nil, nil, nil + } + } + + origName, origTimestamp, err = r.readOneFileHeader([]byte("--- ")) + if err != nil { + return "", "", nil, nil, err + } + + newName, newTimestamp, err = r.readOneFileHeader([]byte("+++ ")) + if err != nil { + return "", "", nil, nil, err + } + + unquotedOrigName, err := strconv.Unquote(origName) + if err == nil { + origName = unquotedOrigName + } + unquotedNewName, err := strconv.Unquote(newName) + if err == nil { + newName = unquotedNewName + } + + return origName, newName, origTimestamp, newTimestamp, nil +} + +// readOneFileHeader reads one of the file headers (prefix should be +// either "+++ " or "--- "). +func (r *FileDiffReader) readOneFileHeader(prefix []byte) (filename string, timestamp *time.Time, err error) { + var line []byte + + if r.fileHeaderLine == nil { + var err error + line, err = readLine(r.reader) + if err == io.EOF { + return "", nil, &ParseError{r.line, r.offset, ErrNoFileHeader} + } else if err != nil { + return "", nil, err + } + } else { + line = r.fileHeaderLine + r.fileHeaderLine = nil + } + + if !bytes.HasPrefix(line, prefix) { + return "", nil, &ParseError{r.line, r.offset, ErrBadFileHeader} + } + + r.offset += int64(len(line)) + r.line++ + line = line[len(prefix):] + + trimmedLine := strings.TrimSpace(string(line)) // filenames that contain spaces may be terminated by a tab + parts := strings.SplitN(trimmedLine, "\t", 2) + filename = parts[0] + if len(parts) == 2 { + // Timestamp is optional, but this header has it. + ts, err := time.Parse(diffTimeParseLayout, parts[1]) + if err != nil { + return "", nil, err + } + timestamp = &ts + } + + return filename, timestamp, err +} + +// OverflowError is returned when we have overflowed into the start +// of the next file while reading extended headers. +type OverflowError string + +func (e OverflowError) Error() string { + return fmt.Sprintf("overflowed into next file: %s", string(e)) +} + +// ReadExtendedHeaders reads the extended header lines, if any, from a +// unified diff file (e.g., git's "diff --git a/foo.go b/foo.go", "new +// mode ", "rename from ", etc.). +func (r *FileDiffReader) ReadExtendedHeaders() ([]string, error) { + var xheaders []string + firstLine := true + for { + var line []byte + if r.fileHeaderLine == nil { + var err error + line, err = readLine(r.reader) + if err == io.EOF { + return xheaders, &ParseError{r.line, r.offset, ErrExtendedHeadersEOF} + } else if err != nil { + return xheaders, err + } + } else { + line = r.fileHeaderLine + r.fileHeaderLine = nil + } + + if bytes.HasPrefix(line, []byte("diff --git ")) { + if firstLine { + firstLine = false + } else { + return xheaders, OverflowError(line) + } + } + if bytes.HasPrefix(line, []byte("--- ")) { + // We've reached the file header. + r.fileHeaderLine = line // pass to readOneFileHeader (see fileHeaderLine field doc) + return xheaders, nil + } + + // Reached message that file is added/deleted + if isOnlyInMessage, _, _ := parseOnlyInMessage(line); isOnlyInMessage { + r.fileHeaderLine = line // pass to readOneFileHeader (see fileHeaderLine field doc) + return xheaders, nil + } + + r.line++ + r.offset += int64(len(line)) + xheaders = append(xheaders, string(line)) + } +} + +// handleEmpty detects when FileDiff was an empty diff and will not have any hunks +// that follow. It updates fd fields from the parsed extended headers. +func handleEmpty(fd *FileDiff) (wasEmpty bool) { + var err error + lineCount := len(fd.Extended) + if lineCount > 0 && !strings.HasPrefix(fd.Extended[0], "diff --git ") { + return false + } + switch { + case (lineCount == 3 || lineCount == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ") || lineCount > 4 && strings.HasPrefix(fd.Extended[3], "GIT binary patch")) && + strings.HasPrefix(fd.Extended[1], "new file mode "): + + names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) + fd.OrigName = "/dev/null" + fd.NewName, err = strconv.Unquote(names[1]) + if err != nil { + fd.NewName = names[1] + } + return true + case (lineCount == 3 || lineCount == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ") || lineCount > 4 && strings.HasPrefix(fd.Extended[3], "GIT binary patch")) && + strings.HasPrefix(fd.Extended[1], "deleted file mode "): + + names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) + fd.OrigName, err = strconv.Unquote(names[0]) + if err != nil { + fd.OrigName = names[0] + } + fd.NewName = "/dev/null" + return true + case lineCount == 4 && strings.HasPrefix(fd.Extended[2], "rename from ") && strings.HasPrefix(fd.Extended[3], "rename to "): + names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) + fd.OrigName, err = strconv.Unquote(names[0]) + if err != nil { + fd.OrigName = names[0] + } + fd.NewName, err = strconv.Unquote(names[1]) + if err != nil { + fd.NewName = names[1] + } + return true + case lineCount == 6 && strings.HasPrefix(fd.Extended[5], "Binary files ") && strings.HasPrefix(fd.Extended[2], "rename from ") && strings.HasPrefix(fd.Extended[3], "rename to "): + names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) + fd.OrigName = names[0] + fd.NewName = names[1] + return true + case lineCount == 3 && strings.HasPrefix(fd.Extended[2], "Binary files ") || lineCount > 3 && strings.HasPrefix(fd.Extended[2], "GIT binary patch"): + names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) + fd.OrigName, err = strconv.Unquote(names[0]) + if err != nil { + fd.OrigName = names[0] + } + fd.NewName, err = strconv.Unquote(names[1]) + if err != nil { + fd.NewName = names[1] + } + return true + default: + return false + } +} + +var ( + // ErrNoFileHeader is when a file unified diff has no file header + // (i.e., the lines that begin with "---" and "+++"). + ErrNoFileHeader = errors.New("expected file header, got EOF") + + // ErrBadFileHeader is when a file unified diff has a malformed + // file header (i.e., the lines that begin with "---" and "+++"). + ErrBadFileHeader = errors.New("bad file header") + + // ErrExtendedHeadersEOF is when an EOF was encountered while reading extended file headers, which means that there were no ---/+++ headers encountered before hunks (if any) began. + ErrExtendedHeadersEOF = errors.New("expected file header while reading extended headers, got EOF") + + // ErrBadOnlyInMessage is when a file have a malformed `only in` message + // Should be in format `Only in {source}: {filename}` + ErrBadOnlyInMessage = errors.New("bad 'only in' message") +) + +// ParseHunks parses hunks from a unified diff. The diff must consist +// only of hunks and not include a file header; if it has a file +// header, use ParseFileDiff. +func ParseHunks(diff []byte) ([]*Hunk, error) { + r := NewHunksReader(bytes.NewReader(diff)) + hunks, err := r.ReadAllHunks() + if err != nil { + return nil, err + } + return hunks, nil +} + +// NewHunksReader returns a new HunksReader that reads unified diff hunks +// from r. +func NewHunksReader(r io.Reader) *HunksReader { + return &HunksReader{reader: bufio.NewReader(r)} +} + +// A HunksReader reads hunks from a unified diff. +type HunksReader struct { + line int + offset int64 + hunk *Hunk + reader *bufio.Reader + + nextHunkHeaderLine []byte +} + +// ReadHunk reads one hunk from r. If there are no more hunks, it +// returns error io.EOF. +func (r *HunksReader) ReadHunk() (*Hunk, error) { + r.hunk = nil + lastLineFromOrig := true + var line []byte + var err error + for { + if r.nextHunkHeaderLine != nil { + // Use stored hunk header line that was scanned in at the + // completion of the previous hunk's ReadHunk. + line = r.nextHunkHeaderLine + r.nextHunkHeaderLine = nil + } else { + line, err = readLine(r.reader) + if err != nil { + if err == io.EOF && r.hunk != nil { + return r.hunk, nil + } + return nil, err + } + } + + // Record position. + r.line++ + r.offset += int64(len(line)) + + if r.hunk == nil { + // Check for presence of hunk header. + if !bytes.HasPrefix(line, hunkPrefix) { + return nil, &ParseError{r.line, r.offset, ErrNoHunkHeader} + } + + // Parse hunk header. + r.hunk = &Hunk{} + items := []interface{}{ + &r.hunk.OrigStartLine, &r.hunk.OrigLines, + &r.hunk.NewStartLine, &r.hunk.NewLines, + } + header, section, err := normalizeHeader(string(line)) + if err != nil { + return nil, &ParseError{r.line, r.offset, err} + } + n, err := fmt.Sscanf(header, hunkHeader, items...) + if err != nil { + return nil, err + } + if n < len(items) { + return nil, &ParseError{r.line, r.offset, &ErrBadHunkHeader{header: string(line)}} + } + + r.hunk.Section = section + } else { + // Read hunk body line. + + // If the line starts with `---` and the next one with `+++` we're + // looking at a non-extended file header and need to abort. + if bytes.HasPrefix(line, []byte("---")) { + ok, err := peekPrefix(r.reader, "+++") + if err != nil { + return r.hunk, err + } + if ok { + return r.hunk, &ParseError{r.line, r.offset, &ErrBadHunkLine{Line: line}} + } + } + + // If the line starts with the hunk prefix, this hunk is complete. + if bytes.HasPrefix(line, hunkPrefix) { + // But we've already read in the next hunk's + // header, so we need to be sure that the next call to + // ReadHunk starts with that header. + r.nextHunkHeaderLine = line + + // Rewind position. + r.line-- + r.offset -= int64(len(line)) + + return r.hunk, nil + } + + if len(line) >= 1 && !linePrefix(line[0]) { + // Bad hunk header line. If we're reading a multi-file + // diff, this may be the end of the current + // file. Return a "rich" error that lets our caller + // handle that case. + return r.hunk, &ParseError{r.line, r.offset, &ErrBadHunkLine{Line: line}} + } + if bytes.Equal(line, []byte(noNewlineMessage)) { + if lastLineFromOrig { + // Retain the newline in the body (otherwise the + // diff line would be like "-a+b", where "+b" is + // the the next line of the new file, which is not + // validly formatted) but record that the orig had + // no newline. + r.hunk.OrigNoNewlineAt = int32(len(r.hunk.Body)) + } else { + // Remove previous line's newline. + if len(r.hunk.Body) != 0 { + r.hunk.Body = r.hunk.Body[:len(r.hunk.Body)-1] + } + } + continue + } + + if len(line) > 0 { + lastLineFromOrig = line[0] == '-' + } + + r.hunk.Body = append(r.hunk.Body, line...) + r.hunk.Body = append(r.hunk.Body, '\n') + } + } +} + +const noNewlineMessage = `\ No newline at end of file` + +// linePrefixes is the set of all characters a valid line in a diff +// hunk can start with. '\' can appear in diffs when no newline is +// present at the end of a file. +// See: 'http://www.gnu.org/software/diffutils/manual/diffutils.html#Incomplete-Lines' +var linePrefixes = []byte{' ', '-', '+', '\\'} + +// linePrefix returns true if 'c' is in 'linePrefixes'. +func linePrefix(c byte) bool { + for _, p := range linePrefixes { + if p == c { + return true + } + } + return false +} + +// peekPrefix peeks into the given reader to check whether the next +// bytes match the given prefix. +func peekPrefix(reader *bufio.Reader, prefix string) (bool, error) { + next, err := reader.Peek(len(prefix)) + if err != nil { + if err == io.EOF { + return false, nil + } + return false, err + } + return bytes.HasPrefix(next, []byte(prefix)), nil +} + +// normalizeHeader takes a header of the form: +// "@@ -linestart[,chunksize] +linestart[,chunksize] @@ section" +// and returns two strings, with the first in the form: +// "@@ -linestart,chunksize +linestart,chunksize @@". +// where linestart and chunksize are both integers. The second is the +// optional section header. chunksize may be omitted from the header +// if its value is 1. normalizeHeader returns an error if the header +// is not in the correct format. +func normalizeHeader(header string) (string, string, error) { + // Split the header into five parts: the first '@@', the two + // ranges, the last '@@', and the optional section. + pieces := strings.SplitN(header, " ", 5) + if len(pieces) < 4 { + return "", "", &ErrBadHunkHeader{header: header} + } + + if pieces[0] != "@@" { + return "", "", &ErrBadHunkHeader{header: header} + } + for i := 1; i < 3; i++ { + if !strings.ContainsRune(pieces[i], ',') { + pieces[i] = pieces[i] + ",1" + } + } + if pieces[3] != "@@" { + return "", "", &ErrBadHunkHeader{header: header} + } + + var section string + if len(pieces) == 5 { + section = pieces[4] + } + return strings.Join(pieces, " "), strings.TrimSpace(section), nil +} + +// ReadAllHunks reads all remaining hunks from r. A successful call +// returns err == nil, not err == EOF. Because ReadAllHunks is defined +// to read until EOF, it does not treat end of file as an error to be +// reported. +func (r *HunksReader) ReadAllHunks() ([]*Hunk, error) { + var hunks []*Hunk + linesRead := int32(0) + for { + hunk, err := r.ReadHunk() + if err == io.EOF { + return hunks, nil + } + if hunk != nil { + linesRead++ // account for the hunk header line + hunk.StartPosition = linesRead + hunks = append(hunks, hunk) + linesRead += int32(bytes.Count(hunk.Body, []byte{'\n'})) + } + if err != nil { + return hunks, err + } + } +} + +// parseOnlyInMessage checks if line is a "Only in {source}: {filename}" and returns source and filename +func parseOnlyInMessage(line []byte) (bool, []byte, []byte) { + if !bytes.HasPrefix(line, onlyInMessagePrefix) { + return false, nil, nil + } + line = line[len(onlyInMessagePrefix):] + idx := bytes.Index(line, []byte(": ")) + if idx < 0 { + return false, nil, nil + } + return true, line[:idx], line[idx+2:] +} + +// A ParseError is a description of a unified diff syntax error. +type ParseError struct { + Line int // Line where the error occurred + Offset int64 // Offset where the error occurred + Err error // The actual error +} + +func (e *ParseError) Error() string { + return fmt.Sprintf("line %d, char %d: %s", e.Line, e.Offset, e.Err) +} + +// ErrNoHunkHeader indicates that a unified diff hunk header was +// expected but not found during parsing. +var ErrNoHunkHeader = errors.New("no hunk header") + +// ErrBadHunkHeader indicates that a malformed unified diff hunk +// header was encountered during parsing. +type ErrBadHunkHeader struct { + header string +} + +func (e *ErrBadHunkHeader) Error() string { + if e.header == "" { + return "bad hunk header" + } + return "bad hunk header: " + e.header +} + +// ErrBadHunkLine is when a line not beginning with ' ', '-', '+', or +// '\' is encountered while reading a hunk. In the context of reading +// a single hunk or file, it is an unexpected error. In a multi-file +// diff, however, it indicates that the current file's diff is +// complete (and remaining diff data will describe another file +// unified diff). +type ErrBadHunkLine struct { + Line []byte +} + +func (e *ErrBadHunkLine) Error() string { + m := "bad hunk line (does not start with ' ', '-', '+', or '\\')" + if len(e.Line) == 0 { + return m + } + return m + ": " + string(e.Line) +} diff --git a/vendor/github.com/sourcegraph/go-diff/diff/print.go b/vendor/github.com/sourcegraph/go-diff/diff/print.go new file mode 100644 index 000000000..012651a33 --- /dev/null +++ b/vendor/github.com/sourcegraph/go-diff/diff/print.go @@ -0,0 +1,141 @@ +package diff + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "time" +) + +// PrintMultiFileDiff prints a multi-file diff in unified diff format. +func PrintMultiFileDiff(ds []*FileDiff) ([]byte, error) { + var buf bytes.Buffer + for _, d := range ds { + diff, err := PrintFileDiff(d) + if err != nil { + return nil, err + } + if _, err := buf.Write(diff); err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} + +// PrintFileDiff prints a FileDiff in unified diff format. +// +// TODO(sqs): handle escaping whitespace/etc. chars in filenames +func PrintFileDiff(d *FileDiff) ([]byte, error) { + var buf bytes.Buffer + + for _, xheader := range d.Extended { + if _, err := fmt.Fprintln(&buf, xheader); err != nil { + return nil, err + } + } + + // FileDiff is added/deleted file + // No further hunks printing needed + if d.NewName == "" { + _, err := fmt.Fprintf(&buf, onlyInMessage, filepath.Dir(d.OrigName), filepath.Base(d.OrigName)) + if err != nil { + return nil, err + } + return buf.Bytes(), nil + } + + if d.Hunks == nil { + return buf.Bytes(), nil + } + + if err := printFileHeader(&buf, "--- ", d.OrigName, d.OrigTime); err != nil { + return nil, err + } + if err := printFileHeader(&buf, "+++ ", d.NewName, d.NewTime); err != nil { + return nil, err + } + + ph, err := PrintHunks(d.Hunks) + if err != nil { + return nil, err + } + + if _, err := buf.Write(ph); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func printFileHeader(w io.Writer, prefix string, filename string, timestamp *time.Time) error { + if _, err := fmt.Fprint(w, prefix, filename); err != nil { + return err + } + if timestamp != nil { + if _, err := fmt.Fprint(w, "\t", timestamp.Format(diffTimeFormatLayout)); err != nil { + return err + } + } + if _, err := fmt.Fprintln(w); err != nil { + return err + } + return nil +} + +// PrintHunks prints diff hunks in unified diff format. +func PrintHunks(hunks []*Hunk) ([]byte, error) { + var buf bytes.Buffer + for _, hunk := range hunks { + _, err := fmt.Fprintf(&buf, + "@@ -%d,%d +%d,%d @@", hunk.OrigStartLine, hunk.OrigLines, hunk.NewStartLine, hunk.NewLines, + ) + if err != nil { + return nil, err + } + if hunk.Section != "" { + _, err := fmt.Fprint(&buf, " ", hunk.Section) + if err != nil { + return nil, err + } + } + if _, err := fmt.Fprintln(&buf); err != nil { + return nil, err + } + + if hunk.OrigNoNewlineAt == 0 { + if _, err := buf.Write(hunk.Body); err != nil { + return nil, err + } + } else { + if _, err := buf.Write(hunk.Body[:hunk.OrigNoNewlineAt]); err != nil { + return nil, err + } + if err := printNoNewlineMessage(&buf); err != nil { + return nil, err + } + if _, err := buf.Write(hunk.Body[hunk.OrigNoNewlineAt:]); err != nil { + return nil, err + } + } + + if !bytes.HasSuffix(hunk.Body, []byte{'\n'}) { + if _, err := fmt.Fprintln(&buf); err != nil { + return nil, err + } + if err := printNoNewlineMessage(&buf); err != nil { + return nil, err + } + } + } + return buf.Bytes(), nil +} + +func printNoNewlineMessage(w io.Writer) error { + if _, err := w.Write([]byte(noNewlineMessage)); err != nil { + return err + } + if _, err := fmt.Fprintln(w); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go b/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go new file mode 100644 index 000000000..395fb7baf --- /dev/null +++ b/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go @@ -0,0 +1,37 @@ +package diff + +import ( + "bufio" + "io" +) + +// readLine is a helper that mimics the functionality of calling bufio.Scanner.Scan() and +// bufio.Scanner.Bytes(), but without the token size limitation. It will read and return +// the next line in the Reader with the trailing newline stripped. It will return an +// io.EOF error when there is nothing left to read (at the start of the function call). It +// will return any other errors it receives from the underlying call to ReadBytes. +func readLine(r *bufio.Reader) ([]byte, error) { + line_, err := r.ReadBytes('\n') + if err == io.EOF { + if len(line_) == 0 { + return nil, io.EOF + } + + // ReadBytes returned io.EOF, because it didn't find another newline, but there is + // still the remainder of the file to return as a line. + line := line_ + return line, nil + } else if err != nil { + return nil, err + } + line := line_[0 : len(line_)-1] + return dropCR(line), nil +} + +// dropCR drops a terminal \r from the data. +func dropCR(data []byte) []byte { + if len(data) > 0 && data[len(data)-1] == '\r' { + return data[0 : len(data)-1] + } + return data +} diff --git a/vendor/github.com/spf13/afero/.gitignore b/vendor/github.com/spf13/afero/.gitignore new file mode 100644 index 000000000..9c1d98611 --- /dev/null +++ b/vendor/github.com/spf13/afero/.gitignore @@ -0,0 +1,2 @@ +sftpfs/file1 +sftpfs/test/ diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml new file mode 100644 index 000000000..e944f5947 --- /dev/null +++ b/vendor/github.com/spf13/afero/.travis.yml @@ -0,0 +1,26 @@ +sudo: false +language: go +arch: + - amd64 + - ppc64e + +go: + - "1.14" + - "1.15" + - "1.16" + - tip + +os: + - linux + - osx + +matrix: + allow_failures: + - go: tip + fast_finish: true + +script: + - go build -v ./... + - go test -count=1 -cover -race -v ./... + - go vet ./... + - FILES=$(gofmt -s -l . zipfs sftpfs mem tarfs); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt new file mode 100644 index 000000000..298f0e266 --- /dev/null +++ b/vendor/github.com/spf13/afero/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md new file mode 100644 index 000000000..fb8eaaf89 --- /dev/null +++ b/vendor/github.com/spf13/afero/README.md @@ -0,0 +1,430 @@ +![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) + +A FileSystem Abstraction System for Go + +[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +# Overview + +Afero is a filesystem framework providing a simple, uniform and universal API +interacting with any filesystem, as an abstraction layer providing interfaces, +types and methods. Afero has an exceptionally clean interface and simple design +without needless constructors or initialization methods. + +Afero is also a library providing a base set of interoperable backend +filesystems that make it easy to work with afero while retaining all the power +and benefit of the os and ioutil packages. + +Afero provides significant improvements over using the os package alone, most +notably the ability to create mock and testing filesystems without relying on the disk. + +It is suitable for use in any situation where you would consider using the OS +package as it provides an additional abstraction that makes it easy to use a +memory backed file system during testing. It also adds support for the http +filesystem for full interoperability. + + +## Afero Features + +* A single consistent API for accessing a variety of filesystems +* Interoperation between a variety of file system types +* A set of interfaces to encourage and enforce interoperability between backends +* An atomic cross platform memory backed file system +* Support for compositional (union) file systems by combining multiple file systems acting as one +* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) +* A set of utility functions ported from io, ioutil & hugo to be afero aware +* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` + +# Using Afero + +Afero is easy to use and easier to adopt. + +A few different ways you could use Afero: + +* Use the interfaces alone to define your own file system. +* Wrapper for the OS packages. +* Define different filesystems for different parts of your application. +* Use Afero for mock filesystems while testing + +## Step 1: Install Afero + +First use go get to install the latest version of the library. + + $ go get github.com/spf13/afero + +Next include Afero in your application. +```go +import "github.com/spf13/afero" +``` + +## Step 2: Declare a backend + +First define a package variable and set it to a pointer to a filesystem. +```go +var AppFs = afero.NewMemMapFs() + +or + +var AppFs = afero.NewOsFs() +``` +It is important to note that if you repeat the composite literal you +will be using a completely new and isolated filesystem. In the case of +OsFs it will still use the same underlying filesystem but will reduce +the ability to drop in other filesystems as desired. + +## Step 3: Use it like you would the OS package + +Throughout your application use any function and method like you normally +would. + +So if my application before had: +```go +os.Open('/tmp/foo') +``` +We would replace it with: +```go +AppFs.Open('/tmp/foo') +``` + +`AppFs` being the variable we defined above. + + +## List of all available functions + +File System Methods Available: +```go +Chmod(name string, mode os.FileMode) : error +Chown(name string, uid, gid int) : error +Chtimes(name string, atime time.Time, mtime time.Time) : error +Create(name string) : File, error +Mkdir(name string, perm os.FileMode) : error +MkdirAll(path string, perm os.FileMode) : error +Name() : string +Open(name string) : File, error +OpenFile(name string, flag int, perm os.FileMode) : File, error +Remove(name string) : error +RemoveAll(path string) : error +Rename(oldname, newname string) : error +Stat(name string) : os.FileInfo, error +``` +File Interfaces and Methods Available: +```go +io.Closer +io.Reader +io.ReaderAt +io.Seeker +io.Writer +io.WriterAt + +Name() : string +Readdir(count int) : []os.FileInfo, error +Readdirnames(n int) : []string, error +Stat() : os.FileInfo, error +Sync() : error +Truncate(size int64) : error +WriteString(s string) : ret int, err error +``` +In some applications it may make sense to define a new package that +simply exports the file system variable for easy access from anywhere. + +## Using Afero's utility functions + +Afero provides a set of functions to make it easier to use the underlying file systems. +These functions have been primarily ported from io & ioutil with some developed for Hugo. + +The afero utilities support all afero compatible backends. + +The list of utilities includes: + +```go +DirExists(path string) (bool, error) +Exists(path string) (bool, error) +FileContainsBytes(filename string, subslice []byte) (bool, error) +GetTempDir(subPath string) string +IsDir(path string) (bool, error) +IsEmpty(path string) (bool, error) +ReadDir(dirname string) ([]os.FileInfo, error) +ReadFile(filename string) ([]byte, error) +SafeWriteReader(path string, r io.Reader) (err error) +TempDir(dir, prefix string) (name string, err error) +TempFile(dir, prefix string) (f File, err error) +Walk(root string, walkFn filepath.WalkFunc) error +WriteFile(filename string, data []byte, perm os.FileMode) error +WriteReader(path string, r io.Reader) (err error) +``` +For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) + +They are available under two different approaches to use. You can either call +them directly where the first parameter of each function will be the file +system, or you can declare a new `Afero`, a custom type used to bind these +functions as methods to a given filesystem. + +### Calling utilities directly + +```go +fs := new(afero.MemMapFs) +f, err := afero.TempFile(fs,"", "ioutil-test") + +``` + +### Calling via Afero + +```go +fs := afero.NewMemMapFs() +afs := &afero.Afero{Fs: fs} +f, err := afs.TempFile("", "ioutil-test") +``` + +## Using Afero for Testing + +There is a large benefit to using a mock filesystem for testing. It has a +completely blank state every time it is initialized and can be easily +reproducible regardless of OS. You could create files to your heart’s content +and the file access would be fast while also saving you from all the annoying +issues with deleting temporary files, Windows file locking, etc. The MemMapFs +backend is perfect for testing. + +* Much faster than performing I/O operations on disk +* Avoid security issues and permissions +* Far more control. 'rm -rf /' with confidence +* Test setup is far more easier to do +* No test cleanup needed + +One way to accomplish this is to define a variable as mentioned above. +In your application this will be set to afero.NewOsFs() during testing you +can set it to afero.NewMemMapFs(). + +It wouldn't be uncommon to have each test initialize a blank slate memory +backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere +appropriate in my application code. This approach ensures that Tests are order +independent, with no test relying on the state left by an earlier test. + +Then in my tests I would initialize a new MemMapFs for each test: +```go +func TestExist(t *testing.T) { + appFS := afero.NewMemMapFs() + // create test files and directories + appFS.MkdirAll("src/a", 0755) + afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) + afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) + name := "src/c" + _, err := appFS.Stat(name) + if os.IsNotExist(err) { + t.Errorf("file \"%s\" does not exist.\n", name) + } +} +``` + +# Available Backends + +## Operating System Native + +### OsFs + +The first is simply a wrapper around the native OS calls. This makes it +very easy to use as all of the calls are the same as the existing OS +calls. It also makes it trivial to have your code use the OS during +operation and a mock filesystem during testing or as needed. + +```go +appfs := afero.NewOsFs() +appfs.MkdirAll("src/a", 0755) +``` + +## Memory Backed Storage + +### MemMapFs + +Afero also provides a fully atomic memory backed filesystem perfect for use in +mocking and to speed up unnecessary disk io when persistence isn’t +necessary. It is fully concurrent and will work within go routines +safely. + +```go +mm := afero.NewMemMapFs() +mm.MkdirAll("src/a", 0755) +``` + +#### InMemoryFile + +As part of MemMapFs, Afero also provides an atomic, fully concurrent memory +backed file implementation. This can be used in other memory backed file +systems with ease. Plans are to add a radix tree memory stored file +system using InMemoryFile. + +## Network Interfaces + +### SftpFs + +Afero has experimental support for secure file transfer protocol (sftp). Which can +be used to perform file operations over a encrypted channel. + +## Filtering Backends + +### BasePathFs + +The BasePathFs restricts all operations to a given path within an Fs. +The given file name to the operations on this Fs will be prepended with +the base path before calling the source Fs. + +```go +bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") +``` + +### ReadOnlyFs + +A thin wrapper around the source Fs providing a read only view. + +```go +fs := afero.NewReadOnlyFs(afero.NewOsFs()) +_, err := fs.Create("/file.txt") +// err = syscall.EPERM +``` + +# RegexpFs + +A filtered view on file names, any file NOT matching +the passed regexp will be treated as non-existing. +Files not matching the regexp provided will not be created. +Directories are not filtered. + +```go +fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) +_, err := fs.Create("/file.html") +// err = syscall.ENOENT +``` + +### HttpFs + +Afero provides an http compatible backend which can wrap any of the existing +backends. + +The Http package requires a slightly specific version of Open which +returns an http.File type. + +Afero provides an httpFs file system which satisfies this requirement. +Any Afero FileSystem can be used as an httpFs. + +```go +httpFs := afero.NewHttpFs() +fileserver := http.FileServer(httpFs.Dir()) +http.Handle("/", fileserver) +``` + +## Composite Backends + +Afero provides the ability have two filesystems (or more) act as a single +file system. + +### CacheOnReadFs + +The CacheOnReadFs will lazily make copies of any accessed files from the base +layer into the overlay. Subsequent reads will be pulled from the overlay +directly permitting the request is within the cache duration of when it was +created in the overlay. + +If the base filesystem is writeable, any changes to files will be +done first to the base, then to the overlay layer. Write calls to open file +handles like `Write()` or `Truncate()` to the overlay first. + +To writing files to the overlay only, you can use the overlay Fs directly (not +via the union Fs). + +Cache files in the layer for the given time.Duration, a cache duration of 0 +means "forever" meaning the file will not be re-requested from the base ever. + +A read-only base will make the overlay also read-only but still copy files +from the base to the overlay when they're not present (or outdated) in the +caching layer. + +```go +base := afero.NewOsFs() +layer := afero.NewMemMapFs() +ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) +``` + +### CopyOnWriteFs() + +The CopyOnWriteFs is a read only base file system with a potentially +writeable layer on top. + +Read operations will first look in the overlay and if not found there, will +serve the file from the base. + +Changes to the file system will only be made in the overlay. + +Any attempt to modify a file found only in the base will copy the file to the +overlay layer before modification (including opening a file with a writable +handle). + +Removing and Renaming files present only in the base layer is not currently +permitted. If a file is present in the base layer and the overlay, only the +overlay will be removed/renamed. + +```go + base := afero.NewOsFs() + roBase := afero.NewReadOnlyFs(base) + ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) + + fh, _ = ufs.Create("/home/test/file2.txt") + fh.WriteString("This is a test") + fh.Close() +``` + +In this example all write operations will only occur in memory (MemMapFs) +leaving the base filesystem (OsFs) untouched. + + +## Desired/possible backends + +The following is a short list of possible backends we hope someone will +implement: + +* SSH +* S3 + +# About the project + +## What's in the name + +Afero comes from the latin roots Ad-Facere. + +**"Ad"** is a prefix meaning "to". + +**"Facere"** is a form of the root "faciō" making "make or do". + +The literal meaning of afero is "to make" or "to do" which seems very fitting +for a library that allows one to make files and directories and do things with them. + +The English word that shares the same roots as Afero is "affair". Affair shares +the same concept but as a noun it means "something that is made or done" or "an +object of a particular type". + +It's also nice that unlike some of my other libraries (hugo, cobra, viper) it +Googles very well. + +## Release Notes + +See the [Releases Page](https://github.com/spf13/afero/releases). + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13) +* [jaqx0r](https://github.com/jaqx0r) +* [mbertschler](https://github.com/mbertschler) +* [xor-gate](https://github.com/xor-gate) + +## License + +Afero is released under the Apache 2.0 license. See +[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go new file mode 100644 index 000000000..469ff7d2d --- /dev/null +++ b/vendor/github.com/spf13/afero/afero.go @@ -0,0 +1,111 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package afero provides types and methods for interacting with the filesystem, +// as an abstraction layer. + +// Afero also provides a few implementations that are mostly interoperable. One that +// uses the operating system filesystem, one that uses memory to store files +// (cross platform) and an interface that should be implemented if you want to +// provide your own filesystem. + +package afero + +import ( + "errors" + "io" + "os" + "time" +) + +type Afero struct { + Fs +} + +// File represents a file in the filesystem. +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +// Fs is the filesystem interface. +// +// Any simulated or real filesystem should implement this interface. +type Fs interface { + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flag int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // The name of this FileSystem + Name() string + + // Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + // Chown changes the uid and gid of the named file. + Chown(name string, uid, gid int) error + + //Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml new file mode 100644 index 000000000..5d2f34bf1 --- /dev/null +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -0,0 +1,15 @@ +version: '{build}' +clone_folder: C:\gopath\src\github.com\spf13\afero +environment: + GOPATH: C:\gopath +build_script: +- cmd: >- + go version + + go env + + go get -v github.com/spf13/afero/... + + go build -v github.com/spf13/afero/... +test_script: +- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go new file mode 100644 index 000000000..4f9832829 --- /dev/null +++ b/vendor/github.com/spf13/afero/basepath.go @@ -0,0 +1,211 @@ +package afero + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +var _ Lstater = (*BasePathFs)(nil) + +// The BasePathFs restricts all operations to a given path within an Fs. +// The given file name to the operations on this Fs will be prepended with +// the base path before calling the base Fs. +// Any file name (after filepath.Clean()) outside this base path will be +// treated as non existing file. +// +// Note that it does not clean the error messages on return, so you may +// reveal the real path on errors. +type BasePathFs struct { + source Fs + path string +} + +type BasePathFile struct { + File + path string +} + +func (f *BasePathFile) Name() string { + sourcename := f.File.Name() + return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) +} + +func NewBasePathFs(source Fs, path string) Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (path string, err error) { + if err := validateBasePathName(name); err != nil { + return name, err + } + + bpath := filepath.Clean(b.path) + path = filepath.Clean(filepath.Join(bpath, name)) + if !strings.HasPrefix(path, bpath) { + return name, os.ErrNotExist + } + + return path, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if filepath.IsAbs(name) { + return os.ErrNotExist + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Chown(name string, uid, gid int) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chown", Path: name, Err: err} + } + return b.source.Chown(name, uid, gid) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + sourcef, err := b.source.OpenFile(name, flag, mode) + if err != nil { + return nil, err + } + return &BasePathFile{sourcef, b.path}, nil +} + +func (b *BasePathFs) Open(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + sourcef, err := b.source.Open(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + sourcef, err := b.source.Create(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + name, err := b.RealPath(name) + if err != nil { + return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} + } + if lstater, ok := b.source.(Lstater); ok { + return lstater.LstatIfPossible(name) + } + fi, err := b.source.Stat(name) + return fi, false, err +} + +func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error { + oldname, err := b.RealPath(oldname) + if err != nil { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} + } + newname, err = b.RealPath(newname) + if err != nil { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} + } + if linker, ok := b.source.(Linker); ok { + return linker.SymlinkIfPossible(oldname, newname) + } + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { + name, err := b.RealPath(name) + if err != nil { + return "", &os.PathError{Op: "readlink", Path: name, Err: err} + } + if reader, ok := b.source.(LinkReader); ok { + return reader.ReadlinkIfPossible(name) + } + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go new file mode 100644 index 000000000..71471aa25 --- /dev/null +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -0,0 +1,311 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +// If the cache duration is 0, cache time will be unlimited, i.e. once +// a file is in the layer, the base will never be read again for this file. +// +// For cache times greater than 0, the modification time of a file is +// checked. Note that a lot of file system implementations only allow a +// resolution of a second for timestamps... or as the godoc for os.Chtimes() +// states: "The underlying filesystem may truncate or round the values to a +// less precise time unit." +// +// This caching union will forward all write calls also to the base file +// system first. To prevent writing to the base Fs, wrap it in a read-only +// filter - Note: this will also make the overlay read-only, for writing files +// in the overlay, use the overlay Fs directly, not via the union Fs. +type CacheOnReadFs struct { + base Fs + layer Fs + cacheTime time.Duration +} + +func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { + return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} +} + +type cacheState int + +const ( + // not present in the overlay, unknown if it exists in the base: + cacheMiss cacheState = iota + // present in the overlay and in base, base file is newer: + cacheStale + // present in the overlay - with cache time == 0 it may exist in the base, + // with cacheTime > 0 it exists in the base and is same age or newer in the + // overlay + cacheHit + // happens if someone writes directly to the overlay without + // going through this union + cacheLocal +) + +func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { + var lfi, bfi os.FileInfo + lfi, err = u.layer.Stat(name) + if err == nil { + if u.cacheTime == 0 { + return cacheHit, lfi, nil + } + if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { + bfi, err = u.base.Stat(name) + if err != nil { + return cacheLocal, lfi, nil + } + if bfi.ModTime().After(lfi.ModTime()) { + return cacheStale, bfi, nil + } + } + return cacheHit, lfi, nil + } + + if err == syscall.ENOENT || os.IsNotExist(err) { + return cacheMiss, nil, nil + } + + return cacheMiss, nil, err +} + +func (u *CacheOnReadFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chtimes(name, atime, mtime) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chtimes(name, atime, mtime) + } + if err != nil { + return err + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chmod(name, mode) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chmod(name, mode) + } + if err != nil { + return err + } + return u.layer.Chmod(name, mode) +} + +func (u *CacheOnReadFs) Chown(name string, uid, gid int) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chown(name, uid, gid) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chown(name, uid, gid) + } + if err != nil { + return err + } + return u.layer.Chown(name, uid, gid) +} + +func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheMiss: + return u.base.Stat(name) + default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo + return fi, nil + } +} + +func (u *CacheOnReadFs) Rename(oldname, newname string) error { + st, _, err := u.cacheStatus(oldname) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Rename(oldname, newname) + case cacheStale, cacheMiss: + if err := u.copyToLayer(oldname); err != nil { + return err + } + err = u.base.Rename(oldname, newname) + } + if err != nil { + return err + } + return u.layer.Rename(oldname, newname) +} + +func (u *CacheOnReadFs) Remove(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.Remove(name) + } + if err != nil { + return err + } + return u.layer.Remove(name) +} + +func (u *CacheOnReadFs) RemoveAll(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.RemoveAll(name) + } + if err != nil { + return err + } + return u.layer.RemoveAll(name) +} + +func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + st, _, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheLocal, cacheHit: + default: + if err := u.copyToLayer(name); err != nil { + return nil, err + } + } + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + bfi, err := u.base.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + lfi, err := u.layer.OpenFile(name, flag, perm) + if err != nil { + bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? + return nil, err + } + return &UnionFile{Base: bfi, Layer: lfi}, nil + } + return u.layer.OpenFile(name, flag, perm) +} + +func (u *CacheOnReadFs) Open(name string) (File, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + + switch st { + case cacheLocal: + return u.layer.Open(name) + + case cacheMiss: + bfi, err := u.base.Stat(name) + if err != nil { + return nil, err + } + if bfi.IsDir() { + return u.base.Open(name) + } + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + + case cacheStale: + if !fi.IsDir() { + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + } + case cacheHit: + if !fi.IsDir() { + return u.layer.Open(name) + } + } + // the dirs from cacheHit, cacheStale fall down here: + bfile, _ := u.base.Open(name) + lfile, err := u.layer.Open(name) + if err != nil && bfile == nil { + return nil, err + } + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { + err := u.base.Mkdir(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache +} + +func (u *CacheOnReadFs) Name() string { + return "CacheOnReadFs" +} + +func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { + err := u.base.MkdirAll(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CacheOnReadFs) Create(name string) (File, error) { + bfh, err := u.base.Create(name) + if err != nil { + return nil, err + } + lfh, err := u.layer.Create(name) + if err != nil { + // oops, see comment about OS_TRUNC above, should we remove? then we have to + // remember if the file did not exist before + bfh.Close() + return nil, err + } + return &UnionFile{Base: bfh, Layer: lfh}, nil +} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go new file mode 100644 index 000000000..18b45824b --- /dev/null +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -0,0 +1,22 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build aix darwin openbsd freebsd netbsd dragonfly + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go new file mode 100644 index 000000000..2b850e4dd --- /dev/null +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -0,0 +1,26 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !darwin +// +build !openbsd +// +build !freebsd +// +build !dragonfly +// +build !netbsd +// +build !aix + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go new file mode 100644 index 000000000..6ff8f3099 --- /dev/null +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -0,0 +1,326 @@ +package afero + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" +) + +var _ Lstater = (*CopyOnWriteFs)(nil) + +// The CopyOnWriteFs is a union filesystem: a read only base file system with +// a possibly writeable layer on top. Changes to the file system will only +// be made in the overlay: Changing an existing file in the base layer which +// is not present in the overlay will copy the file to the overlay ("changing" +// includes also calls to e.g. Chtimes(), Chmod() and Chown()). +// +// Reading directories is currently only supported via Open(), not OpenFile(). +type CopyOnWriteFs struct { + base Fs + layer Fs +} + +func NewCopyOnWriteFs(base Fs, layer Fs) Fs { + return &CopyOnWriteFs{base: base, layer: layer} +} + +// Returns true if the file is not in the overlay +func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { + if _, err := u.layer.Stat(name); err == nil { + return false, nil + } + _, err := u.base.Stat(name) + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + return false, nil + } + } + if err == syscall.ENOENT { + return false, nil + } + } + return true, err +} + +func (u *CopyOnWriteFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chmod(name, mode) +} + +func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chown(name, uid, gid) +} + +func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { + fi, err := u.layer.Stat(name) + if err != nil { + isNotExist := u.isNotExist(err) + if isNotExist { + return u.base.Stat(name) + } + return nil, err + } + return fi, nil +} + +func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + llayer, ok1 := u.layer.(Lstater) + lbase, ok2 := u.base.(Lstater) + + if ok1 { + fi, b, err := llayer.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + + if !u.isNotExist(err) { + return nil, b, err + } + } + + if ok2 { + fi, b, err := lbase.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + if !u.isNotExist(err) { + return nil, b, err + } + } + + fi, err := u.Stat(name) + + return fi, false, err +} + +func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error { + if slayer, ok := u.layer.(Linker); ok { + return slayer.SymlinkIfPossible(oldname, newname) + } + + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) { + if rlayer, ok := u.layer.(LinkReader); ok { + return rlayer.ReadlinkIfPossible(name) + } + + if rbase, ok := u.base.(LinkReader); ok { + return rbase.ReadlinkIfPossible(name) + } + + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} + +func (u *CopyOnWriteFs) isNotExist(err error) bool { + if e, ok := err.(*os.PathError); ok { + err = e.Err + } + if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { + return true + } + return false +} + +// Renaming files present only in the base layer is not permitted +func (u *CopyOnWriteFs) Rename(oldname, newname string) error { + b, err := u.isBaseFile(oldname) + if err != nil { + return err + } + if b { + return syscall.EPERM + } + return u.layer.Rename(oldname, newname) +} + +// Removing files present only in the base layer is not permitted. If +// a file is present in the base layer and the overlay, only the overlay +// will be removed. +func (u *CopyOnWriteFs) Remove(name string) error { + err := u.layer.Remove(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) RemoveAll(name string) error { + err := u.layer.RemoveAll(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + if b { + if err = u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + dir := filepath.Dir(name) + isaDir, err := IsDir(u.base, dir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if isaDir { + if err = u.layer.MkdirAll(dir, 0777); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + isaDir, err = IsDir(u.layer, dir) + if err != nil { + return nil, err + } + if isaDir { + return u.layer.OpenFile(name, flag, perm) + } + + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + } + if b { + return u.base.OpenFile(name, flag, perm) + } + return u.layer.OpenFile(name, flag, perm) +} + +// This function handles the 9 different possibilities caused +// by the union which are the intersection of the following... +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory +func (u *CopyOnWriteFs) Open(name string) (File, error) { + // Since the overlay overrides the base we check that first + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + // If overlay doesn't exist, return the base (base state irrelevant) + if b { + return u.base.Open(name) + } + + // If overlay is a file, return it (base state irrelevant) + dir, err := IsDir(u.layer, name) + if err != nil { + return nil, err + } + if !dir { + return u.layer.Open(name) + } + + // Overlay is a directory, base state now matters. + // Base state has 3 states to check but 2 outcomes: + // A. It's a file or non-readable in the base (return just the overlay) + // B. It's an accessible directory in the base (return a UnionFile) + + // If base is file or nonreadable, return overlay + dir, err = IsDir(u.base, name) + if !dir || err != nil { + return u.layer.Open(name) + } + + // Both base & layer are directories + // Return union file (if opens are without error) + bfile, bErr := u.base.Open(name) + lfile, lErr := u.layer.Open(name) + + // If either have errors at this point something is very wrong. Return nil and the errors + if bErr != nil || lErr != nil { + return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) + } + + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return ErrFileExists + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Name() string { + return "CopyOnWriteFs" +} + +func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + // This is in line with how os.MkdirAll behaves. + return nil + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Create(name string) (File, error) { + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) +} diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod new file mode 100644 index 000000000..abe4fe1cf --- /dev/null +++ b/vendor/github.com/spf13/afero/go.mod @@ -0,0 +1,9 @@ +module github.com/spf13/afero + +require ( + github.com/pkg/sftp v1.10.1 + golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 + golang.org/x/text v0.3.3 +) + +go 1.13 diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum new file mode 100644 index 000000000..89d9bfbc4 --- /dev/null +++ b/vendor/github.com/spf13/afero/go.sum @@ -0,0 +1,29 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go new file mode 100644 index 000000000..2b86e30d1 --- /dev/null +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -0,0 +1,114 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +type httpDir struct { + basePath string + fs HttpFs +} + +func (d httpDir) Open(name string) (http.File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d.basePath) + if dir == "" { + dir = "." + } + + f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +type HttpFs struct { + source Fs +} + +func NewHttpFs(source Fs) *HttpFs { + return &HttpFs{source: source} +} + +func (h HttpFs) Dir(s string) *httpDir { + return &httpDir{basePath: s, fs: h} +} + +func (h HttpFs) Name() string { return "h HttpFs" } + +func (h HttpFs) Create(name string) (File, error) { + return h.source.Create(name) +} + +func (h HttpFs) Chmod(name string, mode os.FileMode) error { + return h.source.Chmod(name, mode) +} + +func (h HttpFs) Chown(name string, uid, gid int) error { + return h.source.Chown(name, uid, gid) +} + +func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return h.source.Chtimes(name, atime, mtime) +} + +func (h HttpFs) Mkdir(name string, perm os.FileMode) error { + return h.source.Mkdir(name, perm) +} + +func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { + return h.source.MkdirAll(path, perm) +} + +func (h HttpFs) Open(name string) (http.File, error) { + f, err := h.source.Open(name) + if err == nil { + if httpfile, ok := f.(http.File); ok { + return httpfile, nil + } + } + return nil, err +} + +func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return h.source.OpenFile(name, flag, perm) +} + +func (h HttpFs) Remove(name string) error { + return h.source.Remove(name) +} + +func (h HttpFs) RemoveAll(path string) error { + return h.source.RemoveAll(path) +} + +func (h HttpFs) Rename(oldname, newname string) error { + return h.source.Rename(oldname, newname) +} + +func (h HttpFs) Stat(name string) (os.FileInfo, error) { + return h.source.Stat(name) +} diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go new file mode 100644 index 000000000..c80345536 --- /dev/null +++ b/vendor/github.com/spf13/afero/iofs.go @@ -0,0 +1,288 @@ +// +build go1.16 + +package afero + +import ( + "io" + "io/fs" + "os" + "path" + "time" +) + +// IOFS adopts afero.Fs to stdlib io/fs.FS +type IOFS struct { + Fs +} + +func NewIOFS(fs Fs) IOFS { + return IOFS{Fs: fs} +} + +var ( + _ fs.FS = IOFS{} + _ fs.GlobFS = IOFS{} + _ fs.ReadDirFS = IOFS{} + _ fs.ReadFileFS = IOFS{} + _ fs.StatFS = IOFS{} + _ fs.SubFS = IOFS{} +) + +func (iofs IOFS) Open(name string) (fs.File, error) { + const op = "open" + + // by convention for fs.FS implementations we should perform this check + if !fs.ValidPath(name) { + return nil, iofs.wrapError(op, name, fs.ErrInvalid) + } + + file, err := iofs.Fs.Open(name) + if err != nil { + return nil, iofs.wrapError(op, name, err) + } + + // file should implement fs.ReadDirFile + if _, ok := file.(fs.ReadDirFile); !ok { + file = readDirFile{file} + } + + return file, nil +} + +func (iofs IOFS) Glob(pattern string) ([]string, error) { + const op = "glob" + + // afero.Glob does not perform this check but it's required for implementations + if _, err := path.Match(pattern, ""); err != nil { + return nil, iofs.wrapError(op, pattern, err) + } + + items, err := Glob(iofs.Fs, pattern) + if err != nil { + return nil, iofs.wrapError(op, pattern, err) + } + + return items, nil +} + +func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) { + items, err := ReadDir(iofs.Fs, name) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + + ret := make([]fs.DirEntry, len(items)) + for i := range items { + ret[i] = dirEntry{items[i]} + } + + return ret, nil +} + +func (iofs IOFS) ReadFile(name string) ([]byte, error) { + const op = "readfile" + + if !fs.ValidPath(name) { + return nil, iofs.wrapError(op, name, fs.ErrInvalid) + } + + bytes, err := ReadFile(iofs.Fs, name) + if err != nil { + return nil, iofs.wrapError(op, name, err) + } + + return bytes, nil +} + +func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil } + +func (IOFS) wrapError(op, path string, err error) error { + if _, ok := err.(*fs.PathError); ok { + return err // don't need to wrap again + } + + return &fs.PathError{ + Op: op, + Path: path, + Err: err, + } +} + +// dirEntry provides adapter from os.FileInfo to fs.DirEntry +type dirEntry struct { + fs.FileInfo +} + +var _ fs.DirEntry = dirEntry{} + +func (d dirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } + +func (d dirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } + +// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open +type readDirFile struct { + File +} + +var _ fs.ReadDirFile = readDirFile{} + +func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { + items, err := r.File.Readdir(n) + if err != nil { + return nil, err + } + + ret := make([]fs.DirEntry, len(items)) + for i := range items { + ret[i] = dirEntry{items[i]} + } + + return ret, nil +} + +// FromIOFS adopts io/fs.FS to use it as afero.Fs +// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission +// To store modifications you may use afero.CopyOnWriteFs +type FromIOFS struct { + fs.FS +} + +var _ Fs = FromIOFS{} + +func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } + +func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } + +func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { + return notImplemented("mkdirall", path) +} + +func (f FromIOFS) Open(name string) (File, error) { + file, err := f.FS.Open(name) + if err != nil { + return nil, err + } + + return fromIOFSFile{File: file, name: name}, nil +} + +func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return f.Open(name) +} + +func (f FromIOFS) Remove(name string) error { + return notImplemented("remove", name) +} + +func (f FromIOFS) RemoveAll(path string) error { + return notImplemented("removeall", path) +} + +func (f FromIOFS) Rename(oldname, newname string) error { + return notImplemented("rename", oldname) +} + +func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) } + +func (f FromIOFS) Name() string { return "fromiofs" } + +func (f FromIOFS) Chmod(name string, mode os.FileMode) error { + return notImplemented("chmod", name) +} + +func (f FromIOFS) Chown(name string, uid, gid int) error { + return notImplemented("chown", name) +} + +func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error { + return notImplemented("chtimes", name) +} + +type fromIOFSFile struct { + fs.File + name string +} + +func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) { + readerAt, ok := f.File.(io.ReaderAt) + if !ok { + return -1, notImplemented("readat", f.name) + } + + return readerAt.ReadAt(p, off) +} + +func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) { + seeker, ok := f.File.(io.Seeker) + if !ok { + return -1, notImplemented("seek", f.name) + } + + return seeker.Seek(offset, whence) +} + +func (f fromIOFSFile) Write(p []byte) (n int, err error) { + return -1, notImplemented("write", f.name) +} + +func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) { + return -1, notImplemented("writeat", f.name) +} + +func (f fromIOFSFile) Name() string { return f.name } + +func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { + rdfile, ok := f.File.(fs.ReadDirFile) + if !ok { + return nil, notImplemented("readdir", f.name) + } + + entries, err := rdfile.ReadDir(count) + if err != nil { + return nil, err + } + + ret := make([]os.FileInfo, len(entries)) + for i := range entries { + ret[i], err = entries[i].Info() + + if err != nil { + return nil, err + } + } + + return ret, nil +} + +func (f fromIOFSFile) Readdirnames(n int) ([]string, error) { + rdfile, ok := f.File.(fs.ReadDirFile) + if !ok { + return nil, notImplemented("readdir", f.name) + } + + entries, err := rdfile.ReadDir(n) + if err != nil { + return nil, err + } + + ret := make([]string, len(entries)) + for i := range entries { + ret[i] = entries[i].Name() + } + + return ret, nil +} + +func (f fromIOFSFile) Sync() error { return nil } + +func (f fromIOFSFile) Truncate(size int64) error { + return notImplemented("truncate", f.name) +} + +func (f fromIOFSFile) WriteString(s string) (ret int, err error) { + return -1, notImplemented("writestring", f.name) +} + +func notImplemented(op, path string) error { + return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission} +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go new file mode 100644 index 000000000..a403133e2 --- /dev/null +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -0,0 +1,240 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDir reads the directory named by dirname and returns +// a list of sorted directory entries. +func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { + return ReadDir(a.Fs, dirname) +} + +func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func (a Afero) ReadFile(filename string) ([]byte, error) { + return ReadFile(a.Fs, filename) +} + +func ReadFile(fs Fs, filename string) ([]byte, error) { + f, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + // It's a good but not certain bet that FileInfo will tell us exactly how much to + // read, so let's try it but be prepared for the answer to be wrong. + var n int64 + + if fi, err := f.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = size + } + } + // As initial capacity for readAll, use n + a little extra in case Size is zero, + // and to avoid another allocation after Read has filled the buffer. The readAll + // call will read into its allocated internal buffer cheaply. If the size was + // wrong, we'll either waste some space off the end or reallocate as needed, but + // in the overwhelmingly common case we'll get it just right. + return readAll(f, n+bytes.MinRead) +} + +// readAll reads from r until an error or EOF and returns the data it read +// from the internal buffer allocated with a specified capacity. +func readAll(r io.Reader, capacity int64) (b []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, capacity)) + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(r) + return buf.Bytes(), err +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +func ReadAll(r io.Reader) ([]byte, error) { + return readAll(r, bytes.MinRead) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { + return WriteFile(a.Fs, filename, data, perm) +} + +func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextRandom() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir, +// opens the file for reading and writing, and returns the resulting *os.File. +// The filename is generated by taking pattern and adding a random +// string to the end. If pattern includes a "*", the random string +// replaces the last "*". +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func (a Afero) TempFile(dir, pattern string) (f File, err error) { + return TempFile(a.Fs, dir, pattern) +} + +func TempFile(fs Fs, dir, pattern string) (f File, err error) { + if dir == "" { + dir = os.TempDir() + } + + var prefix, suffix string + if pos := strings.LastIndex(pattern, "*"); pos != -1 { + prefix, suffix = pattern[:pos], pattern[pos+1:] + } else { + prefix = pattern + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextRandom()+suffix) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func (a Afero) TempDir(dir, prefix string) (name string, err error) { + return TempDir(a.Fs, dir, prefix) +} +func TempDir(fs Fs, dir, prefix string) (name string, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextRandom()) + err = fs.Mkdir(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if err == nil { + name = try + } + break + } + return +} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go new file mode 100644 index 000000000..89c1bfc0a --- /dev/null +++ b/vendor/github.com/spf13/afero/lstater.go @@ -0,0 +1,27 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" +) + +// Lstater is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// Else it will call Stat. +// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +type Lstater interface { + LstatIfPossible(name string) (os.FileInfo, bool, error) +} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go new file mode 100644 index 000000000..7db4b7de6 --- /dev/null +++ b/vendor/github.com/spf13/afero/match.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "path/filepath" + "sort" + "strings" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// This was adapted from (http://golang.org/pkg/path/filepath) and uses several +// built-ins from that package. +func Glob(fs Fs, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + // Lstat not supported by a ll filesystems. + if _, err = lstatIfPossible(fs, pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + switch dir { + case "": + dir = "." + case string(filepath.Separator): + // nothing + default: + dir = dir[0 : len(dir)-1] // chop off trailing separator + } + + if !hasMeta(dir) { + return glob(fs, dir, file, nil) + } + + var m []string + m, err = Glob(fs, dir) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + d, err := fs.Open(dir) + if err != nil { + return + } + defer d.Close() + + names, _ := d.Readdirnames(-1) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.ContainsAny(path, "*?[") +} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go new file mode 100644 index 000000000..e104013f4 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dir.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +type Dir interface { + Len() int + Names() []string + Files() []*FileData + Add(*FileData) + Remove(*FileData) +} + +func RemoveFromMemDir(dir *FileData, f *FileData) { + dir.memDir.Remove(f) +} + +func AddToMemDir(dir *FileData, f *FileData) { + dir.memDir.Add(f) +} + +func InitializeDir(d *FileData) { + if d.memDir == nil { + d.dir = true + d.memDir = &DirMap{} + } +} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go new file mode 100644 index 000000000..03a57ee5b --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dirmap.go @@ -0,0 +1,43 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import "sort" + +type DirMap map[string]*FileData + +func (m DirMap) Len() int { return len(m) } +func (m DirMap) Add(f *FileData) { m[f.name] = f } +func (m DirMap) Remove(f *FileData) { delete(m, f.name) } +func (m DirMap) Files() (files []*FileData) { + for _, f := range m { + files = append(files, f) + } + sort.Sort(filesSorter(files)) + return files +} + +// implement sort.Interface for []*FileData +type filesSorter []*FileData + +func (s filesSorter) Len() int { return len(s) } +func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } + +func (m DirMap) Names() (names []string) { + for x := range m { + names = append(names, x) + } + return names +} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go new file mode 100644 index 000000000..5a20730c2 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -0,0 +1,338 @@ +// Copyright © 2015 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import ( + "bytes" + "errors" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" +) + +const FilePathSeparator = string(filepath.Separator) + +type File struct { + // atomic requires 64-bit alignment for struct field access + at int64 + readDirCount int64 + closed bool + readOnly bool + fileData *FileData +} + +func NewFileHandle(data *FileData) *File { + return &File{fileData: data} +} + +func NewReadOnlyFileHandle(data *FileData) *File { + return &File{fileData: data, readOnly: true} +} + +func (f File) Data() *FileData { + return f.fileData +} + +type FileData struct { + sync.Mutex + name string + data []byte + memDir Dir + dir bool + mode os.FileMode + modtime time.Time + uid int + gid int +} + +func (d *FileData) Name() string { + d.Lock() + defer d.Unlock() + return d.name +} + +func CreateFile(name string) *FileData { + return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} +} + +func CreateDir(name string) *FileData { + return &FileData{name: name, memDir: &DirMap{}, dir: true} +} + +func ChangeFileName(f *FileData, newname string) { + f.Lock() + f.name = newname + f.Unlock() +} + +func SetMode(f *FileData, mode os.FileMode) { + f.Lock() + f.mode = mode + f.Unlock() +} + +func SetModTime(f *FileData, mtime time.Time) { + f.Lock() + setModTime(f, mtime) + f.Unlock() +} + +func setModTime(f *FileData, mtime time.Time) { + f.modtime = mtime +} + +func SetUID(f *FileData, uid int) { + f.Lock() + f.uid = uid + f.Unlock() +} + +func SetGID(f *FileData, gid int) { + f.Lock() + f.gid = gid + f.Unlock() +} + +func GetFileInfo(f *FileData) *FileInfo { + return &FileInfo{f} +} + +func (f *File) Open() error { + atomic.StoreInt64(&f.at, 0) + atomic.StoreInt64(&f.readDirCount, 0) + f.fileData.Lock() + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + if !f.readOnly { + setModTime(f.fileData, time.Now()) + } + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + return f.fileData.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return &FileInfo{f.fileData}, nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + if !f.fileData.dir { + return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + } + var outLength int64 + + f.fileData.Lock() + files := f.fileData.memDir.Files()[f.readDirCount:] + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + f.fileData.Unlock() + + res = make([]os.FileInfo, outLength) + for i := range res { + res[i] = &FileInfo{files[i]} + } + + return res, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + _, names[i] = filepath.Split(f.Name()) + } + return names, err +} + +func (f *File) Read(b []byte) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.closed == true { + return 0, ErrFileClosed + } + if len(b) > 0 && int(f.at) == len(f.fileData.data) { + return 0, io.EOF + } + if int(f.at) > len(f.fileData.data) { + return 0, io.ErrUnexpectedEOF + } + if len(f.fileData.data)-int(f.at) >= len(b) { + n = len(b) + } else { + n = len(f.fileData.data) - int(f.at) + } + copy(b, f.fileData.data[f.at:f.at+int64(n)]) + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + prev := atomic.LoadInt64(&f.at) + atomic.StoreInt64(&f.at, off) + n, err = f.Read(b) + atomic.StoreInt64(&f.at, prev) + return +} + +func (f *File) Truncate(size int64) error { + if f.closed == true { + return ErrFileClosed + } + if f.readOnly { + return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + if size < 0 { + return ErrOutOfRange + } + f.fileData.Lock() + defer f.fileData.Unlock() + if size > int64(len(f.fileData.data)) { + diff := size - int64(len(f.fileData.data)) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + } else { + f.fileData.data = f.fileData.data[0:size] + } + setModTime(f.fileData, time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed == true { + return 0, ErrFileClosed + } + switch whence { + case io.SeekStart: + atomic.StoreInt64(&f.at, offset) + case io.SeekCurrent: + atomic.AddInt64(&f.at, offset) + case io.SeekEnd: + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) + } + return f.at, nil +} + +func (f *File) Write(b []byte) (n int, err error) { + if f.closed == true { + return 0, ErrFileClosed + } + if f.readOnly { + return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + n = len(b) + cur := atomic.LoadInt64(&f.at) + f.fileData.Lock() + defer f.fileData.Unlock() + diff := cur - int64(len(f.fileData.data)) + var tail []byte + if n+int(cur) < len(f.fileData.data) { + tail = f.fileData.data[n+int(cur):] + } + if diff > 0 { + f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{00}, int(diff)), b...)...) + f.fileData.data = append(f.fileData.data, tail...) + } else { + f.fileData.data = append(f.fileData.data[:cur], b...) + f.fileData.data = append(f.fileData.data, tail...) + } + setModTime(f.fileData, time.Now()) + + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Write(b) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +func (f *File) Info() *FileInfo { + return &FileInfo{f.fileData} +} + +type FileInfo struct { + *FileData +} + +// Implements os.FileInfo +func (s *FileInfo) Name() string { + s.Lock() + _, name := filepath.Split(s.name) + s.Unlock() + return name +} +func (s *FileInfo) Mode() os.FileMode { + s.Lock() + defer s.Unlock() + return s.mode +} +func (s *FileInfo) ModTime() time.Time { + s.Lock() + defer s.Unlock() + return s.modtime +} +func (s *FileInfo) IsDir() bool { + s.Lock() + defer s.Unlock() + return s.dir +} +func (s *FileInfo) Sys() interface{} { return nil } +func (s *FileInfo) Size() int64 { + if s.IsDir() { + return int64(42) + } + s.Lock() + defer s.Unlock() + return int64(len(s.data)) +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go new file mode 100644 index 000000000..5c265f92b --- /dev/null +++ b/vendor/github.com/spf13/afero/memmap.go @@ -0,0 +1,404 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/spf13/afero/mem" +) + +const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod() + +type MemMapFs struct { + mu sync.RWMutex + data map[string]*mem.FileData + init sync.Once +} + +func NewMemMapFs() Fs { + return &MemMapFs{} +} + +func (m *MemMapFs) getData() map[string]*mem.FileData { + m.init.Do(func() { + m.data = make(map[string]*mem.FileData) + // Root should always exist, right? + // TODO: what about windows? + root := mem.CreateDir(FilePathSeparator) + mem.SetMode(root, os.ModeDir|0755) + m.data[FilePathSeparator] = root + }) + return m.data +} + +func (*MemMapFs) Name() string { return "MemMapFS" } + +func (m *MemMapFs) Create(name string) (File, error) { + name = normalizePath(name) + m.mu.Lock() + file := mem.CreateFile(name) + m.getData()[name] = file + m.registerWithParent(file, 0) + m.mu.Unlock() + return mem.NewFileHandle(file), nil +} + +func (m *MemMapFs) unRegisterWithParent(fileName string) error { + f, err := m.lockfreeOpen(fileName) + if err != nil { + return err + } + parent := m.findParent(f) + if parent == nil { + log.Panic("parent of ", f.Name(), " is nil") + } + + parent.Lock() + mem.RemoveFromMemDir(parent, f) + parent.Unlock() + return nil +} + +func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { + pdir, _ := filepath.Split(f.Name()) + pdir = filepath.Clean(pdir) + pfile, err := m.lockfreeOpen(pdir) + if err != nil { + return nil + } + return pfile +} + +func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { + if f == nil { + return + } + parent := m.findParent(f) + if parent == nil { + pdir := filepath.Dir(filepath.Clean(f.Name())) + err := m.lockfreeMkdir(pdir, perm) + if err != nil { + //log.Println("Mkdir error:", err) + return + } + parent, err = m.lockfreeOpen(pdir) + if err != nil { + //log.Println("Open after Mkdir error:", err) + return + } + } + + parent.Lock() + mem.InitializeDir(parent) + mem.AddToMemDir(parent, f) + parent.Unlock() +} + +func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + x, ok := m.getData()[name] + if ok { + // Only return ErrFileExists if it's a file, not a directory. + i := mem.FileInfo{FileData: x} + if !i.IsDir() { + return ErrFileExists + } + } else { + item := mem.CreateDir(name) + mem.SetMode(item, os.ModeDir|perm) + m.getData()[name] = item + m.registerWithParent(item, perm) + } + return nil +} + +func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + perm &= chmodBits + name = normalizePath(name) + + m.mu.RLock() + _, ok := m.getData()[name] + m.mu.RUnlock() + if ok { + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } + + m.mu.Lock() + item := mem.CreateDir(name) + mem.SetMode(item, os.ModeDir|perm) + m.getData()[name] = item + m.registerWithParent(item, perm) + m.mu.Unlock() + + return m.setFileMode(name, perm|os.ModeDir) +} + +func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { + err := m.Mkdir(path, perm) + if err != nil { + if err.(*os.PathError).Err == ErrFileExists { + return nil + } + return err + } + return nil +} + +// Handle some relative paths +func normalizePath(path string) string { + path = filepath.Clean(path) + + switch path { + case ".": + return FilePathSeparator + case "..": + return FilePathSeparator + default: + return path + } +} + +func (m *MemMapFs) Open(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewReadOnlyFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) openWrite(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) open(name string) (*mem.FileData, error) { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} + } + return f, nil +} + +func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { + name = normalizePath(name) + f, ok := m.getData()[name] + if ok { + return f, nil + } else { + return nil, ErrFileNotFound + } +} + +func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + perm &= chmodBits + chmod := false + file, err := m.openWrite(name) + if err == nil && (flag&os.O_EXCL > 0) { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists} + } + if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { + file, err = m.Create(name) + chmod = true + } + if err != nil { + return nil, err + } + if flag == os.O_RDONLY { + file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) + } + if flag&os.O_APPEND > 0 { + _, err = file.Seek(0, os.SEEK_END) + if err != nil { + file.Close() + return nil, err + } + } + if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { + err = file.Truncate(0) + if err != nil { + file.Close() + return nil, err + } + } + if chmod { + return file, m.setFileMode(name, perm) + } + return file, nil +} + +func (m *MemMapFs) Remove(name string) error { + name = normalizePath(name) + + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.getData()[name]; ok { + err := m.unRegisterWithParent(name) + if err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + delete(m.getData(), name) + } else { + return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} + } + return nil +} + +func (m *MemMapFs) RemoveAll(path string) error { + path = normalizePath(path) + m.mu.Lock() + m.unRegisterWithParent(path) + m.mu.Unlock() + + m.mu.RLock() + defer m.mu.RUnlock() + + for p := range m.getData() { + if strings.HasPrefix(p, path) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) Rename(oldname, newname string) error { + oldname = normalizePath(oldname) + newname = normalizePath(newname) + + if oldname == newname { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.getData()[oldname]; ok { + m.mu.RUnlock() + m.mu.Lock() + m.unRegisterWithParent(oldname) + fileData := m.getData()[oldname] + delete(m.getData(), oldname) + mem.ChangeFileName(fileData, newname) + m.getData()[newname] = fileData + m.registerWithParent(fileData, 0) + m.mu.Unlock() + m.mu.RLock() + } else { + return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} + } + return nil +} + +func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + fileInfo, err := m.Stat(name) + return fileInfo, false, err +} + +func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { + f, err := m.Open(name) + if err != nil { + return nil, err + } + fi := mem.GetFileInfo(f.(*mem.File).Data()) + return fi, nil +} + +func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + mode &= chmodBits + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits + + mode = prevOtherBits | mode + return m.setFileMode(name, mode) +} + +func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetMode(f, mode) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) Chown(name string, uid, gid int) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound} + } + + mem.SetUID(f, uid) + mem.SetGID(f, gid) + + return nil +} + +func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetModTime(f, mtime) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) List() { + for _, x := range m.data { + y := mem.FileInfo{FileData: x} + fmt.Println(x.Name(), y.Size()) + } +} diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go new file mode 100644 index 000000000..f1366321e --- /dev/null +++ b/vendor/github.com/spf13/afero/os.go @@ -0,0 +1,113 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" +) + +var _ Lstater = (*OsFs)(nil) + +// OsFs is a Fs implementation that uses functions provided by the os package. +// +// For details in any method, check the documentation of the os package +// (http://golang.org/pkg/os/). +type OsFs struct{} + +func NewOsFs() Fs { + return &OsFs{} +} + +func (OsFs) Name() string { return "OsFs" } + +func (OsFs) Create(name string) (File, error) { + f, e := os.Create(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (OsFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OsFs) Open(name string) (File, error) { + f, e := os.Open(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, e := os.OpenFile(name, flag, perm) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Remove(name string) error { + return os.Remove(name) +} + +func (OsFs) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (OsFs) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (OsFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (OsFs) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (OsFs) Chown(name string, uid, gid int) error { + return os.Chown(name, uid, gid) +} + +func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + fi, err := os.Lstat(name) + return fi, true, err +} + +func (OsFs) SymlinkIfPossible(oldname, newname string) error { + return os.Symlink(oldname, newname) +} + +func (OsFs) ReadlinkIfPossible(name string) (string, error) { + return os.Readlink(name) +} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go new file mode 100644 index 000000000..18f60a0f6 --- /dev/null +++ b/vendor/github.com/spf13/afero/path.go @@ -0,0 +1,106 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "sort" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs Fs, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := lstatIfPossible(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// if the filesystem supports it, use Lstat, else use fs.Stat +func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { + if lfs, ok := fs.(Lstater); ok { + fi, _, err := lfs.LstatIfPossible(path) + return fi, err + } + return fs.Stat(path) +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. + +func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { + return Walk(a.Fs, root, walkFn) +} + +func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { + info, err := lstatIfPossible(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go new file mode 100644 index 000000000..bd8f9264d --- /dev/null +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -0,0 +1,96 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +var _ Lstater = (*ReadOnlyFs)(nil) + +type ReadOnlyFs struct { + source Fs +} + +func NewReadOnlyFs(source Fs) Fs { + return &ReadOnlyFs{source: source} +} + +func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { + return ReadDir(r.source, name) +} + +func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chown(n string, uid, gid int) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Name() string { + return "ReadOnlyFilter" +} + +func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { + return r.source.Stat(name) +} + +func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + if lsf, ok := r.source.(Lstater); ok { + return lsf.LstatIfPossible(name) + } + fi, err := r.Stat(name) + return fi, false, err +} + +func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) { + if srdr, ok := r.source.(LinkReader); ok { + return srdr.ReadlinkIfPossible(name) + } + + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} + +func (r *ReadOnlyFs) Rename(o, n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) RemoveAll(p string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Remove(n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, syscall.EPERM + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *ReadOnlyFs) Open(n string) (File, error) { + return r.source.Open(n) +} + +func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Create(n string) (File, error) { + return nil, syscall.EPERM +} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go new file mode 100644 index 000000000..ac359c62a --- /dev/null +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -0,0 +1,224 @@ +package afero + +import ( + "os" + "regexp" + "syscall" + "time" +) + +// The RegexpFs filters files (not directories) by regular expression. Only +// files matching the given regexp will be allowed, all others get a ENOENT error ( +// "No such file or directory"). +// +type RegexpFs struct { + re *regexp.Regexp + source Fs +} + +func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { + return &RegexpFs{source: source, re: re} +} + +type RegexpFile struct { + f File + re *regexp.Regexp +} + +func (r *RegexpFs) matchesName(name string) error { + if r.re == nil { + return nil + } + if r.re.MatchString(name) { + return nil + } + return syscall.ENOENT +} + +func (r *RegexpFs) dirOrMatches(name string) error { + dir, err := IsDir(r.source, name) + if err != nil { + return err + } + if dir { + return nil + } + return r.matchesName(name) +} + +func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chtimes(name, a, m) +} + +func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chmod(name, mode) +} + +func (r *RegexpFs) Chown(name string, uid, gid int) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chown(name, uid, gid) +} + +func (r *RegexpFs) Name() string { + return "RegexpFs" +} + +func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.Stat(name) +} + +func (r *RegexpFs) Rename(oldname, newname string) error { + dir, err := IsDir(r.source, oldname) + if err != nil { + return err + } + if dir { + return nil + } + if err := r.matchesName(oldname); err != nil { + return err + } + if err := r.matchesName(newname); err != nil { + return err + } + return r.source.Rename(oldname, newname) +} + +func (r *RegexpFs) RemoveAll(p string) error { + dir, err := IsDir(r.source, p) + if err != nil { + return err + } + if !dir { + if err := r.matchesName(p); err != nil { + return err + } + } + return r.source.RemoveAll(p) +} + +func (r *RegexpFs) Remove(name string) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Remove(name) +} + +func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *RegexpFs) Open(name string) (File, error) { + dir, err := IsDir(r.source, name) + if err != nil { + return nil, err + } + if !dir { + if err := r.matchesName(name); err != nil { + return nil, err + } + } + f, err := r.source.Open(name) + if err != nil { + return nil, err + } + return &RegexpFile{f: f, re: r.re}, nil +} + +func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { + return r.source.Mkdir(n, p) +} + +func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { + return r.source.MkdirAll(n, p) +} + +func (r *RegexpFs) Create(name string) (File, error) { + if err := r.matchesName(name); err != nil { + return nil, err + } + return r.source.Create(name) +} + +func (f *RegexpFile) Close() error { + return f.f.Close() +} + +func (f *RegexpFile) Read(s []byte) (int, error) { + return f.f.Read(s) +} + +func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { + return f.f.ReadAt(s, o) +} + +func (f *RegexpFile) Seek(o int64, w int) (int64, error) { + return f.f.Seek(o, w) +} + +func (f *RegexpFile) Write(s []byte) (int, error) { + return f.f.Write(s) +} + +func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { + return f.f.WriteAt(s, o) +} + +func (f *RegexpFile) Name() string { + return f.f.Name() +} + +func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { + var rfi []os.FileInfo + rfi, err = f.f.Readdir(c) + if err != nil { + return nil, err + } + for _, i := range rfi { + if i.IsDir() || f.re.MatchString(i.Name()) { + fi = append(fi, i) + } + } + return fi, nil +} + +func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { + fi, err := f.Readdir(c) + if err != nil { + return nil, err + } + for _, s := range fi { + n = append(n, s.Name()) + } + return n, nil +} + +func (f *RegexpFile) Stat() (os.FileInfo, error) { + return f.f.Stat() +} + +func (f *RegexpFile) Sync() error { + return f.f.Sync() +} + +func (f *RegexpFile) Truncate(s int64) error { + return f.f.Truncate(s) +} + +func (f *RegexpFile) WriteString(s string) (int, error) { + return f.f.WriteString(s) +} diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go new file mode 100644 index 000000000..d1c6ea53d --- /dev/null +++ b/vendor/github.com/spf13/afero/symlink.go @@ -0,0 +1,55 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" +) + +// Symlinker is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It indicates support for 3 symlink related interfaces that implement the +// behaviors of the os methods: +// - Lstat +// - Symlink, and +// - Readlink +type Symlinker interface { + Lstater + Linker + LinkReader +} + +// Linker is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem, +// or the filesystem otherwise supports Symlink's. +type Linker interface { + SymlinkIfPossible(oldname, newname string) error +} + +// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system +// does not support Symlink's either directly or through its delegated filesystem. +// As expressed by support for the Linker interface. +var ErrNoSymlink = errors.New("symlink not supported") + +// LinkReader is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +type LinkReader interface { + ReadlinkIfPossible(name string) (string, error) +} + +// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system +// does not support the readlink operation either directly or through its delegated filesystem. +// As expressed by support for the LinkReader interface. +var ErrNoReadlink = errors.New("readlink not supported") diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go new file mode 100644 index 000000000..985363eea --- /dev/null +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -0,0 +1,317 @@ +package afero + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +// The UnionFile implements the afero.File interface and will be returned +// when reading a directory present at least in the overlay or opening a file +// for writing. +// +// The calls to +// Readdir() and Readdirnames() merge the file os.FileInfo / names from the +// base and the overlay - for files present in both layers, only those +// from the overlay will be used. +// +// When opening files for writing (Create() / OpenFile() with the right flags) +// the operations will be done in both layers, starting with the overlay. A +// successful read in the overlay will move the cursor position in the base layer +// by the number of bytes read. +type UnionFile struct { + Base File + Layer File + Merger DirsMerger + off int + files []os.FileInfo +} + +func (f *UnionFile) Close() error { + // first close base, so we have a newer timestamp in the overlay. If we'd close + // the overlay first, we'd get a cacheStale the next time we access this file + // -> cache would be useless ;-) + if f.Base != nil { + f.Base.Close() + } + if f.Layer != nil { + return f.Layer.Close() + } + return BADFD +} + +func (f *UnionFile) Read(s []byte) (int, error) { + if f.Layer != nil { + n, err := f.Layer.Read(s) + if (err == nil || err == io.EOF) && f.Base != nil { + // advance the file position also in the base file, the next + // call may be a write at this position (or a seek with SEEK_CUR) + if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + // only overwrite err in case the seek fails: we need to + // report an eventual io.EOF to the caller + err = seekErr + } + } + return n, err + } + if f.Base != nil { + return f.Base.Read(s) + } + return 0, BADFD +} + +func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { + if f.Layer != nil { + n, err := f.Layer.ReadAt(s, o) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + } + return n, err + } + if f.Base != nil { + return f.Base.ReadAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { + if f.Layer != nil { + pos, err = f.Layer.Seek(o, w) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o, w) + } + return pos, err + } + if f.Base != nil { + return f.Base.Seek(o, w) + } + return 0, BADFD +} + +func (f *UnionFile) Write(s []byte) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.Write(s) + if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + _, err = f.Base.Write(s) + } + return n, err + } + if f.Base != nil { + return f.Base.Write(s) + } + return 0, BADFD +} + +func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteAt(s, o) + if err == nil && f.Base != nil { + _, err = f.Base.WriteAt(s, o) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Name() string { + if f.Layer != nil { + return f.Layer.Name() + } + return f.Base.Name() +} + +// DirsMerger is how UnionFile weaves two directories together. +// It takes the FileInfo slices from the layer and the base and returns a +// single view. +type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) + +var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { + var files = make(map[string]os.FileInfo) + + for _, fi := range lofi { + files[fi.Name()] = fi + } + + for _, fi := range bofi { + if _, exists := files[fi.Name()]; !exists { + files[fi.Name()] = fi + } + } + + rfi := make([]os.FileInfo, len(files)) + + i := 0 + for _, fi := range files { + rfi[i] = fi + i++ + } + + return rfi, nil + +} + +// Readdir will weave the two directories together and +// return a single view of the overlayed directories. +// At the end of the directory view, the error is io.EOF if c > 0. +func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { + var merge DirsMerger = f.Merger + if merge == nil { + merge = defaultUnionMergeDirsFn + } + + if f.off == 0 { + var lfi []os.FileInfo + if f.Layer != nil { + lfi, err = f.Layer.Readdir(-1) + if err != nil { + return nil, err + } + } + + var bfi []os.FileInfo + if f.Base != nil { + bfi, err = f.Base.Readdir(-1) + if err != nil { + return nil, err + } + + } + merged, err := merge(lfi, bfi) + if err != nil { + return nil, err + } + f.files = append(f.files, merged...) + } + files := f.files[f.off:] + + if c <= 0 { + return files, nil + } + + if len(files) == 0 { + return nil, io.EOF + } + + if c > len(files) { + c = len(files) + } + + defer func() { f.off += c }() + return files[:c], nil +} + +func (f *UnionFile) Readdirnames(c int) ([]string, error) { + rfi, err := f.Readdir(c) + if err != nil { + return nil, err + } + var names []string + for _, fi := range rfi { + names = append(names, fi.Name()) + } + return names, nil +} + +func (f *UnionFile) Stat() (os.FileInfo, error) { + if f.Layer != nil { + return f.Layer.Stat() + } + if f.Base != nil { + return f.Base.Stat() + } + return nil, BADFD +} + +func (f *UnionFile) Sync() (err error) { + if f.Layer != nil { + err = f.Layer.Sync() + if err == nil && f.Base != nil { + err = f.Base.Sync() + } + return err + } + if f.Base != nil { + return f.Base.Sync() + } + return BADFD +} + +func (f *UnionFile) Truncate(s int64) (err error) { + if f.Layer != nil { + err = f.Layer.Truncate(s) + if err == nil && f.Base != nil { + err = f.Base.Truncate(s) + } + return err + } + if f.Base != nil { + return f.Base.Truncate(s) + } + return BADFD +} + +func (f *UnionFile) WriteString(s string) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteString(s) + if err == nil && f.Base != nil { + _, err = f.Base.WriteString(s) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteString(s) + } + return 0, BADFD +} + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + // First make sure the directory exists + exists, err := Exists(layer, filepath.Dir(name)) + if err != nil { + return err + } + if !exists { + err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + if err != nil { + return err + } + } + + // Create the file on the overlay + lfh, err := layer.Create(name) + if err != nil { + return err + } + n, err := io.Copy(lfh, bfh) + if err != nil { + // If anything fails, clean up the file + layer.Remove(name) + lfh.Close() + return err + } + + bfi, err := bfh.Stat() + if err != nil || bfi.Size() != n { + layer.Remove(name) + lfh.Close() + return syscall.EIO + } + + err = lfh.Close() + if err != nil { + layer.Remove(name) + lfh.Close() + return err + } + return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go new file mode 100644 index 000000000..4f253f481 --- /dev/null +++ b/vendor/github.com/spf13/afero/util.go @@ -0,0 +1,330 @@ +// Copyright ©2015 Steve Francia +// Portions Copyright ©2015 The Hugo Authors +// Portions Copyright 2016-present Bjørn Erik Pedersen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// Filepath separator defined by os.Separator. +const FilePathSeparator = string(filepath.Separator) + +// Takes a reader and a path and writes the content +func (a Afero) WriteReader(path string, r io.Reader) (err error) { + return WriteReader(a.Fs, path, r) +} + +func WriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + if err != os.ErrExist { + return err + } + } + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +// Same as WriteReader but checks to see if file/directory already exists. +func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { + return SafeWriteReader(a.Fs, path, r) +} + +func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + return + } + } + + exists, err := Exists(fs, path) + if err != nil { + return + } + if exists { + return fmt.Errorf("%v already exists", path) + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func (a Afero) GetTempDir(subPath string) string { + return GetTempDir(a.Fs, subPath) +} + +// GetTempDir returns the default temp directory with trailing slash +// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx +func GetTempDir(fs Fs, subPath string) string { + addSlash := func(p string) string { + if FilePathSeparator != p[len(p)-1:] { + p = p + FilePathSeparator + } + return p + } + dir := addSlash(os.TempDir()) + + if subPath != "" { + // preserve windows backslash :-( + if FilePathSeparator == "\\" { + subPath = strings.Replace(subPath, "\\", "____", -1) + } + dir = dir + UnicodeSanitize((subPath)) + if FilePathSeparator == "\\" { + dir = strings.Replace(dir, "____", "\\", -1) + } + + if exists, _ := Exists(fs, dir); exists { + return addSlash(dir) + } + + err := fs.MkdirAll(dir, 0777) + if err != nil { + panic(err) + } + dir = addSlash(dir) + } + return dir +} + +// Rewrite string to remove non-standard path characters +func UnicodeSanitize(s string) string { + source := []rune(s) + target := make([]rune, 0, len(source)) + + for _, r := range source { + if unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsMark(r) || + r == '.' || + r == '/' || + r == '\\' || + r == '_' || + r == '-' || + r == '%' || + r == ' ' || + r == '#' { + target = append(target, r) + } + } + + return string(target) +} + +// Transform characters with accents into plain forms. +func NeuterAccents(s string) string { + t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + result, _, _ := transform.String(t, string(s)) + + return result +} + +func isMn(r rune) bool { + return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks +} + +func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { + return FileContainsBytes(a.Fs, filename, subslice) +} + +// Check if a file contains a specified byte slice. +func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslice), nil +} + +func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { + return FileContainsAnyBytes(a.Fs, filename, subslices) +} + +// Check if a file contains any of the specified byte slices. +func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslices...), nil +} + +// readerContains reports whether any of the subslices is within r. +func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + + if r == nil || len(subslices) == 0 { + return false + } + + largestSlice := 0 + + for _, sl := range subslices { + if len(sl) > largestSlice { + largestSlice = len(sl) + } + } + + if largestSlice == 0 { + return false + } + + bufflen := largestSlice * 4 + halflen := bufflen / 2 + buff := make([]byte, bufflen) + var err error + var n, i int + + for { + i++ + if i == 1 { + n, err = io.ReadAtLeast(r, buff[:halflen], halflen) + } else { + if i != 2 { + // shift left to catch overlapping matches + copy(buff[:], buff[halflen:]) + } + n, err = io.ReadAtLeast(r, buff[halflen:], halflen) + } + + if n > 0 { + for _, sl := range subslices { + if bytes.Contains(buff, sl) { + return true + } + } + } + + if err != nil { + break + } + } + return false +} + +func (a Afero) DirExists(path string) (bool, error) { + return DirExists(a.Fs, path) +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (a Afero) IsDir(path string) (bool, error) { + return IsDir(a.Fs, path) +} + +// IsDir checks if a given path is a directory. +func IsDir(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +func (a Afero) IsEmpty(path string) (bool, error) { + return IsEmpty(a.Fs, path) +} + +// IsEmpty checks if a given file or directory is empty. +func IsEmpty(fs Fs, path string) (bool, error) { + if b, _ := Exists(fs, path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := fs.Open(path) + if err != nil { + return false, err + } + defer f.Close() + list, err := f.Readdir(-1) + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +func (a Afero) Exists(path string) (bool, error) { + return Exists(a.Fs, path) +} + +// Check if a file or directory exists. +func Exists(fs Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { + combinedPath := filepath.Join(basePathFs.path, relativePath) + if parent, ok := basePathFs.source.(*BasePathFs); ok { + return FullBaseFsPath(parent, combinedPath) + } + + return combinedPath +} diff --git a/vendor/github.com/spf13/cast/.gitignore b/vendor/github.com/spf13/cast/.gitignore new file mode 100644 index 000000000..53053a8ac --- /dev/null +++ b/vendor/github.com/spf13/cast/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +*.bench diff --git a/vendor/github.com/spf13/cast/.travis.yml b/vendor/github.com/spf13/cast/.travis.yml new file mode 100644 index 000000000..833a48799 --- /dev/null +++ b/vendor/github.com/spf13/cast/.travis.yml @@ -0,0 +1,16 @@ +language: go +env: + - GO111MODULE=on +sudo: required +go: + - "1.11.x" + - "1.12.x" + - tip +os: + - linux +matrix: + allow_failures: + - go: tip + fast_finish: true +script: + - make check diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE new file mode 100644 index 000000000..4527efb9c --- /dev/null +++ b/vendor/github.com/spf13/cast/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile new file mode 100644 index 000000000..f01a5dbb6 --- /dev/null +++ b/vendor/github.com/spf13/cast/Makefile @@ -0,0 +1,40 @@ +GOVERSION := $(shell go version | cut -d ' ' -f 3 | cut -d '.' -f 2) + +.PHONY: check fmt lint test test-race vet test-cover-html help +.DEFAULT_GOAL := help + +check: test-race fmt vet lint ## Run tests and linters + +test: ## Run tests + go test ./... + +test-race: ## Run tests with race detector + go test -race ./... + +fmt: ## Run gofmt linter +ifeq "$(GOVERSION)" "12" + @for d in `go list` ; do \ + if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \ + echo "^ improperly formatted go files" && echo && exit 1; \ + fi \ + done +endif + +lint: ## Run golint linter + @for d in `go list` ; do \ + if [ "`golint $$d | tee /dev/stderr`" ]; then \ + echo "^ golint errors!" && echo && exit 1; \ + fi \ + done + +vet: ## Run go vet linter + @if [ "`go vet | tee /dev/stderr`" ]; then \ + echo "^ go vet errors!" && echo && exit 1; \ + fi + +test-cover-html: ## Generate test coverage report + go test -coverprofile=coverage.out -covermode=count + go tool cover -func=coverage.out + +help: + @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md new file mode 100644 index 000000000..e6939397d --- /dev/null +++ b/vendor/github.com/spf13/cast/README.md @@ -0,0 +1,75 @@ +cast +==== +[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast) +[![Build Status](https://api.travis-ci.org/spf13/cast.svg?branch=master)](https://travis-ci.org/spf13/cast) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) + +Easy and safe casting from one type to another in Go + +Don’t Panic! ... Cast + +## What is Cast? + +Cast is a library to convert between different go types in a consistent and easy way. + +Cast provides simple functions to easily convert a number to a string, an +interface into a bool, etc. Cast does this intelligently when an obvious +conversion is possible. It doesn’t make any attempts to guess what you meant, +for example you can only convert a string to an int when it is a string +representation of an int such as “8”. Cast was developed for use in +[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON +for meta data. + +## Why use Cast? + +When working with dynamic data in Go you often need to cast or convert the data +from one type into another. Cast goes beyond just using type assertion (though +it uses that when possible) to provide a very straightforward and convenient +library. + +If you are working with interfaces to handle things like dynamic content +you’ll need an easy way to convert an interface into a given type. This +is the library for you. + +If you are taking in data from YAML, TOML or JSON or other formats which lack +full types, then Cast is the library for you. + +## Usage + +Cast provides a handful of To_____ methods. These methods will always return +the desired type. **If input is provided that will not convert to that type, the +0 or nil value for that type will be returned**. + +Cast also provides identical methods To_____E. These return the same result as +the To_____ methods, plus an additional error which tells you if it successfully +converted. Using these methods you can tell the difference between when the +input matched the zero value or when the conversion failed and the zero value +was returned. + +The following examples are merely a sample of what is available. Please review +the code for a complete set. + +### Example ‘ToString’: + + cast.ToString("mayonegg") // "mayonegg" + cast.ToString(8) // "8" + cast.ToString(8.31) // "8.31" + cast.ToString([]byte("one time")) // "one time" + cast.ToString(nil) // "" + + var foo interface{} = "one more time" + cast.ToString(foo) // "one more time" + + +### Example ‘ToInt’: + + cast.ToInt(8) // 8 + cast.ToInt(8.31) // 8 + cast.ToInt("8") // 8 + cast.ToInt(true) // 1 + cast.ToInt(false) // 0 + + var eight interface{} = 8 + cast.ToInt(eight) // 8 + cast.ToInt(nil) // 0 + diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go new file mode 100644 index 000000000..9fba638d4 --- /dev/null +++ b/vendor/github.com/spf13/cast/cast.go @@ -0,0 +1,171 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Package cast provides easy and safe casting in Go. +package cast + +import "time" + +// ToBool casts an interface to a bool type. +func ToBool(i interface{}) bool { + v, _ := ToBoolE(i) + return v +} + +// ToTime casts an interface to a time.Time type. +func ToTime(i interface{}) time.Time { + v, _ := ToTimeE(i) + return v +} + +// ToDuration casts an interface to a time.Duration type. +func ToDuration(i interface{}) time.Duration { + v, _ := ToDurationE(i) + return v +} + +// ToFloat64 casts an interface to a float64 type. +func ToFloat64(i interface{}) float64 { + v, _ := ToFloat64E(i) + return v +} + +// ToFloat32 casts an interface to a float32 type. +func ToFloat32(i interface{}) float32 { + v, _ := ToFloat32E(i) + return v +} + +// ToInt64 casts an interface to an int64 type. +func ToInt64(i interface{}) int64 { + v, _ := ToInt64E(i) + return v +} + +// ToInt32 casts an interface to an int32 type. +func ToInt32(i interface{}) int32 { + v, _ := ToInt32E(i) + return v +} + +// ToInt16 casts an interface to an int16 type. +func ToInt16(i interface{}) int16 { + v, _ := ToInt16E(i) + return v +} + +// ToInt8 casts an interface to an int8 type. +func ToInt8(i interface{}) int8 { + v, _ := ToInt8E(i) + return v +} + +// ToInt casts an interface to an int type. +func ToInt(i interface{}) int { + v, _ := ToIntE(i) + return v +} + +// ToUint casts an interface to a uint type. +func ToUint(i interface{}) uint { + v, _ := ToUintE(i) + return v +} + +// ToUint64 casts an interface to a uint64 type. +func ToUint64(i interface{}) uint64 { + v, _ := ToUint64E(i) + return v +} + +// ToUint32 casts an interface to a uint32 type. +func ToUint32(i interface{}) uint32 { + v, _ := ToUint32E(i) + return v +} + +// ToUint16 casts an interface to a uint16 type. +func ToUint16(i interface{}) uint16 { + v, _ := ToUint16E(i) + return v +} + +// ToUint8 casts an interface to a uint8 type. +func ToUint8(i interface{}) uint8 { + v, _ := ToUint8E(i) + return v +} + +// ToString casts an interface to a string type. +func ToString(i interface{}) string { + v, _ := ToStringE(i) + return v +} + +// ToStringMapString casts an interface to a map[string]string type. +func ToStringMapString(i interface{}) map[string]string { + v, _ := ToStringMapStringE(i) + return v +} + +// ToStringMapStringSlice casts an interface to a map[string][]string type. +func ToStringMapStringSlice(i interface{}) map[string][]string { + v, _ := ToStringMapStringSliceE(i) + return v +} + +// ToStringMapBool casts an interface to a map[string]bool type. +func ToStringMapBool(i interface{}) map[string]bool { + v, _ := ToStringMapBoolE(i) + return v +} + +// ToStringMapInt casts an interface to a map[string]int type. +func ToStringMapInt(i interface{}) map[string]int { + v, _ := ToStringMapIntE(i) + return v +} + +// ToStringMapInt64 casts an interface to a map[string]int64 type. +func ToStringMapInt64(i interface{}) map[string]int64 { + v, _ := ToStringMapInt64E(i) + return v +} + +// ToStringMap casts an interface to a map[string]interface{} type. +func ToStringMap(i interface{}) map[string]interface{} { + v, _ := ToStringMapE(i) + return v +} + +// ToSlice casts an interface to a []interface{} type. +func ToSlice(i interface{}) []interface{} { + v, _ := ToSliceE(i) + return v +} + +// ToBoolSlice casts an interface to a []bool type. +func ToBoolSlice(i interface{}) []bool { + v, _ := ToBoolSliceE(i) + return v +} + +// ToStringSlice casts an interface to a []string type. +func ToStringSlice(i interface{}) []string { + v, _ := ToStringSliceE(i) + return v +} + +// ToIntSlice casts an interface to a []int type. +func ToIntSlice(i interface{}) []int { + v, _ := ToIntSliceE(i) + return v +} + +// ToDurationSlice casts an interface to a []time.Duration type. +func ToDurationSlice(i interface{}) []time.Duration { + v, _ := ToDurationSliceE(i) + return v +} diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go new file mode 100644 index 000000000..70c7291be --- /dev/null +++ b/vendor/github.com/spf13/cast/caste.go @@ -0,0 +1,1249 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "html/template" + "reflect" + "strconv" + "strings" + "time" +) + +var errNegativeNotAllowed = errors.New("unable to cast negative value") + +// ToTimeE casts an interface to a time.Time type. +func ToTimeE(i interface{}) (tim time.Time, err error) { + i = indirect(i) + + switch v := i.(type) { + case time.Time: + return v, nil + case string: + return StringToDate(v) + case int: + return time.Unix(int64(v), 0), nil + case int64: + return time.Unix(v, 0), nil + case int32: + return time.Unix(int64(v), 0), nil + case uint: + return time.Unix(int64(v), 0), nil + case uint64: + return time.Unix(int64(v), 0), nil + case uint32: + return time.Unix(int64(v), 0), nil + default: + return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) + } +} + +// ToDurationE casts an interface to a time.Duration type. +func ToDurationE(i interface{}) (d time.Duration, err error) { + i = indirect(i) + + switch s := i.(type) { + case time.Duration: + return s, nil + case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: + d = time.Duration(ToInt64(s)) + return + case float32, float64: + d = time.Duration(ToFloat64(s)) + return + case string: + if strings.ContainsAny(s, "nsuµmh") { + d, err = time.ParseDuration(s) + } else { + d, err = time.ParseDuration(s + "ns") + } + return + default: + err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) + return + } +} + +// ToBoolE casts an interface to a bool type. +func ToBoolE(i interface{}) (bool, error) { + i = indirect(i) + + switch b := i.(type) { + case bool: + return b, nil + case nil: + return false, nil + case int: + if i.(int) != 0 { + return true, nil + } + return false, nil + case string: + return strconv.ParseBool(i.(string)) + default: + return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) + } +} + +// ToFloat64E casts an interface to a float64 type. +func ToFloat64E(i interface{}) (float64, error) { + i = indirect(i) + + switch s := i.(type) { + case float64: + return s, nil + case float32: + return float64(s), nil + case int: + return float64(s), nil + case int64: + return float64(s), nil + case int32: + return float64(s), nil + case int16: + return float64(s), nil + case int8: + return float64(s), nil + case uint: + return float64(s), nil + case uint64: + return float64(s), nil + case uint32: + return float64(s), nil + case uint16: + return float64(s), nil + case uint8: + return float64(s), nil + case string: + v, err := strconv.ParseFloat(s, 64) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + } +} + +// ToFloat32E casts an interface to a float32 type. +func ToFloat32E(i interface{}) (float32, error) { + i = indirect(i) + + switch s := i.(type) { + case float64: + return float32(s), nil + case float32: + return s, nil + case int: + return float32(s), nil + case int64: + return float32(s), nil + case int32: + return float32(s), nil + case int16: + return float32(s), nil + case int8: + return float32(s), nil + case uint: + return float32(s), nil + case uint64: + return float32(s), nil + case uint32: + return float32(s), nil + case uint16: + return float32(s), nil + case uint8: + return float32(s), nil + case string: + v, err := strconv.ParseFloat(s, 32) + if err == nil { + return float32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + } +} + +// ToInt64E casts an interface to an int64 type. +func ToInt64E(i interface{}) (int64, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int64(s), nil + case int64: + return s, nil + case int32: + return int64(s), nil + case int16: + return int64(s), nil + case int8: + return int64(s), nil + case uint: + return int64(s), nil + case uint64: + return int64(s), nil + case uint32: + return int64(s), nil + case uint16: + return int64(s), nil + case uint8: + return int64(s), nil + case float64: + return int64(s), nil + case float32: + return int64(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) + } +} + +// ToInt32E casts an interface to an int32 type. +func ToInt32E(i interface{}) (int32, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int32(s), nil + case int64: + return int32(s), nil + case int32: + return s, nil + case int16: + return int32(s), nil + case int8: + return int32(s), nil + case uint: + return int32(s), nil + case uint64: + return int32(s), nil + case uint32: + return int32(s), nil + case uint16: + return int32(s), nil + case uint8: + return int32(s), nil + case float64: + return int32(s), nil + case float32: + return int32(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) + } +} + +// ToInt16E casts an interface to an int16 type. +func ToInt16E(i interface{}) (int16, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int16(s), nil + case int64: + return int16(s), nil + case int32: + return int16(s), nil + case int16: + return s, nil + case int8: + return int16(s), nil + case uint: + return int16(s), nil + case uint64: + return int16(s), nil + case uint32: + return int16(s), nil + case uint16: + return int16(s), nil + case uint8: + return int16(s), nil + case float64: + return int16(s), nil + case float32: + return int16(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int16(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) + } +} + +// ToInt8E casts an interface to an int8 type. +func ToInt8E(i interface{}) (int8, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return int8(s), nil + case int64: + return int8(s), nil + case int32: + return int8(s), nil + case int16: + return int8(s), nil + case int8: + return s, nil + case uint: + return int8(s), nil + case uint64: + return int8(s), nil + case uint32: + return int8(s), nil + case uint16: + return int8(s), nil + case uint8: + return int8(s), nil + case float64: + return int8(s), nil + case float32: + return int8(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int8(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) + } +} + +// ToIntE casts an interface to an int type. +func ToIntE(i interface{}) (int, error) { + i = indirect(i) + + switch s := i.(type) { + case int: + return s, nil + case int64: + return int(s), nil + case int32: + return int(s), nil + case int16: + return int(s), nil + case int8: + return int(s), nil + case uint: + return int(s), nil + case uint64: + return int(s), nil + case uint32: + return int(s), nil + case uint16: + return int(s), nil + case uint8: + return int(s), nil + case float64: + return int(s), nil + case float32: + return int(s), nil + case string: + v, err := strconv.ParseInt(s, 0, 0) + if err == nil { + return int(v), nil + } + return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) + } +} + +// ToUintE casts an interface to a uint type. +func ToUintE(i interface{}) (uint, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 0) + if err == nil { + return uint(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case uint: + return s, nil + case uint64: + return uint(s), nil + case uint32: + return uint(s), nil + case uint16: + return uint(s), nil + case uint8: + return uint(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) + } +} + +// ToUint64E casts an interface to a uint64 type. +func ToUint64E(i interface{}) (uint64, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 64) + if err == nil { + return v, nil + } + return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case uint: + return uint64(s), nil + case uint64: + return s, nil + case uint32: + return uint64(s), nil + case uint16: + return uint64(s), nil + case uint8: + return uint64(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint64(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) + } +} + +// ToUint32E casts an interface to a uint32 type. +func ToUint32E(i interface{}) (uint32, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 32) + if err == nil { + return uint32(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case uint: + return uint32(s), nil + case uint64: + return uint32(s), nil + case uint32: + return s, nil + case uint16: + return uint32(s), nil + case uint8: + return uint32(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint32(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) + } +} + +// ToUint16E casts an interface to a uint16 type. +func ToUint16E(i interface{}) (uint16, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 16) + if err == nil { + return uint16(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case uint: + return uint16(s), nil + case uint64: + return uint16(s), nil + case uint32: + return uint16(s), nil + case uint16: + return s, nil + case uint8: + return uint16(s), nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint16(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) + } +} + +// ToUint8E casts an interface to a uint type. +func ToUint8E(i interface{}) (uint8, error) { + i = indirect(i) + + switch s := i.(type) { + case string: + v, err := strconv.ParseUint(s, 0, 8) + if err == nil { + return uint8(v), nil + } + return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err) + case int: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int16: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case int8: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case uint: + return uint8(s), nil + case uint64: + return uint8(s), nil + case uint32: + return uint8(s), nil + case uint16: + return uint8(s), nil + case uint8: + return s, nil + case float64: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case float32: + if s < 0 { + return 0, errNegativeNotAllowed + } + return uint8(s), nil + case bool: + if s { + return 1, nil + } + return 0, nil + case nil: + return 0, nil + default: + return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) + } +} + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirect returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil). +func indirect(a interface{}) interface{} { + if a == nil { + return nil + } + if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { + // Avoid creating a reflect.Value if it's not a pointer. + return a + } + v := reflect.ValueOf(a) + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + return v.Interface() +} + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirectToStringerOrError returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer +// or error, +func indirectToStringerOrError(a interface{}) interface{} { + if a == nil { + return nil + } + + var errorType = reflect.TypeOf((*error)(nil)).Elem() + var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + + v := reflect.ValueOf(a) + for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + return v.Interface() +} + +// ToStringE casts an interface to a string type. +func ToStringE(i interface{}) (string, error) { + i = indirectToStringerOrError(i) + + switch s := i.(type) { + case string: + return s, nil + case bool: + return strconv.FormatBool(s), nil + case float64: + return strconv.FormatFloat(s, 'f', -1, 64), nil + case float32: + return strconv.FormatFloat(float64(s), 'f', -1, 32), nil + case int: + return strconv.Itoa(s), nil + case int64: + return strconv.FormatInt(s, 10), nil + case int32: + return strconv.Itoa(int(s)), nil + case int16: + return strconv.FormatInt(int64(s), 10), nil + case int8: + return strconv.FormatInt(int64(s), 10), nil + case uint: + return strconv.FormatUint(uint64(s), 10), nil + case uint64: + return strconv.FormatUint(uint64(s), 10), nil + case uint32: + return strconv.FormatUint(uint64(s), 10), nil + case uint16: + return strconv.FormatUint(uint64(s), 10), nil + case uint8: + return strconv.FormatUint(uint64(s), 10), nil + case []byte: + return string(s), nil + case template.HTML: + return string(s), nil + case template.URL: + return string(s), nil + case template.JS: + return string(s), nil + case template.CSS: + return string(s), nil + case template.HTMLAttr: + return string(s), nil + case nil: + return "", nil + case fmt.Stringer: + return s.String(), nil + case error: + return s.Error(), nil + default: + return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) + } +} + +// ToStringMapStringE casts an interface to a map[string]string type. +func ToStringMapStringE(i interface{}) (map[string]string, error) { + var m = map[string]string{} + + switch v := i.(type) { + case map[string]string: + return v, nil + case map[string]interface{}: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case map[interface{}]string: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToString(val) + } + return m, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) + } +} + +// ToStringMapStringSliceE casts an interface to a map[string][]string type. +func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { + var m = map[string][]string{} + + switch v := i.(type) { + case map[string][]string: + return v, nil + case map[string][]interface{}: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[string]string: + for k, val := range v { + m[ToString(k)] = []string{val} + } + case map[string]interface{}: + for k, val := range v { + switch vt := val.(type) { + case []interface{}: + m[ToString(k)] = ToStringSlice(vt) + case []string: + m[ToString(k)] = vt + default: + m[ToString(k)] = []string{ToString(val)} + } + } + return m, nil + case map[interface{}][]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}][]interface{}: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[interface{}]interface{}: + for k, val := range v { + key, err := ToStringE(k) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + value, err := ToStringSliceE(val) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + m[key] = value + } + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) + } + return m, nil +} + +// ToStringMapBoolE casts an interface to a map[string]bool type. +func ToStringMapBoolE(i interface{}) (map[string]bool, error) { + var m = map[string]bool{} + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToBool(val) + } + return m, nil + case map[string]interface{}: + for k, val := range v { + m[ToString(k)] = ToBool(val) + } + return m, nil + case map[string]bool: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) + } +} + +// ToStringMapE casts an interface to a map[string]interface{} type. +func ToStringMapE(i interface{}) (map[string]interface{}, error) { + var m = map[string]interface{}{} + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = val + } + return m, nil + case map[string]interface{}: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) + } +} + +// ToStringMapIntE casts an interface to a map[string]int{} type. +func ToStringMapIntE(i interface{}) (map[string]int, error) { + var m = map[string]int{} + if i == nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) + } + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToInt(val) + } + return m, nil + case map[string]interface{}: + for k, val := range v { + m[k] = ToInt(val) + } + return m, nil + case map[string]int: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + } + + if reflect.TypeOf(i).Kind() != reflect.Map { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) + } + + mVal := reflect.ValueOf(m) + v := reflect.ValueOf(i) + for _, keyVal := range v.MapKeys() { + val, err := ToIntE(v.MapIndex(keyVal).Interface()) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) + } + mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) + } + return m, nil +} + +// ToStringMapInt64E casts an interface to a map[string]int64{} type. +func ToStringMapInt64E(i interface{}) (map[string]int64, error) { + var m = map[string]int64{} + if i == nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) + } + + switch v := i.(type) { + case map[interface{}]interface{}: + for k, val := range v { + m[ToString(k)] = ToInt64(val) + } + return m, nil + case map[string]interface{}: + for k, val := range v { + m[k] = ToInt64(val) + } + return m, nil + case map[string]int64: + return v, nil + case string: + err := jsonStringToObject(v, &m) + return m, err + } + + if reflect.TypeOf(i).Kind() != reflect.Map { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) + } + mVal := reflect.ValueOf(m) + v := reflect.ValueOf(i) + for _, keyVal := range v.MapKeys() { + val, err := ToInt64E(v.MapIndex(keyVal).Interface()) + if err != nil { + return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) + } + mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) + } + return m, nil +} + +// ToSliceE casts an interface to a []interface{} type. +func ToSliceE(i interface{}) ([]interface{}, error) { + var s []interface{} + + switch v := i.(type) { + case []interface{}: + return append(s, v...), nil + case []map[string]interface{}: + for _, u := range v { + s = append(s, u) + } + return s, nil + default: + return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) + } +} + +// ToBoolSliceE casts an interface to a []bool type. +func ToBoolSliceE(i interface{}) ([]bool, error) { + if i == nil { + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } + + switch v := i.(type) { + case []bool: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]bool, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToBoolE(s.Index(j).Interface()) + if err != nil { + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } + a[j] = val + } + return a, nil + default: + return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) + } +} + +// ToStringSliceE casts an interface to a []string type. +func ToStringSliceE(i interface{}) ([]string, error) { + var a []string + + switch v := i.(type) { + case []interface{}: + for _, u := range v { + a = append(a, ToString(u)) + } + return a, nil + case []string: + return v, nil + case string: + return strings.Fields(v), nil + case interface{}: + str, err := ToStringE(v) + if err != nil { + return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) + } + return []string{str}, nil + default: + return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) + } +} + +// ToIntSliceE casts an interface to a []int type. +func ToIntSliceE(i interface{}) ([]int, error) { + if i == nil { + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } + + switch v := i.(type) { + case []int: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]int, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToIntE(s.Index(j).Interface()) + if err != nil { + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } + a[j] = val + } + return a, nil + default: + return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) + } +} + +// ToDurationSliceE casts an interface to a []time.Duration type. +func ToDurationSliceE(i interface{}) ([]time.Duration, error) { + if i == nil { + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } + + switch v := i.(type) { + case []time.Duration: + return v, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]time.Duration, s.Len()) + for j := 0; j < s.Len(); j++ { + val, err := ToDurationE(s.Index(j).Interface()) + if err != nil { + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } + a[j] = val + } + return a, nil + default: + return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) + } +} + +// StringToDate attempts to parse a string into a time.Time type using a +// predefined list of formats. If no suitable format is found, an error is +// returned. +func StringToDate(s string) (time.Time, error) { + return parseDateWith(s, []string{ + time.RFC3339, + "2006-01-02T15:04:05", // iso8601 without timezone + time.RFC1123Z, + time.RFC1123, + time.RFC822Z, + time.RFC822, + time.RFC850, + time.ANSIC, + time.UnixDate, + time.RubyDate, + "2006-01-02 15:04:05.999999999 -0700 MST", // Time.String() + "2006-01-02", + "02 Jan 2006", + "2006-01-02T15:04:05-0700", // RFC3339 without timezone hh:mm colon + "2006-01-02 15:04:05 -07:00", + "2006-01-02 15:04:05 -0700", + "2006-01-02 15:04:05Z07:00", // RFC3339 without T + "2006-01-02 15:04:05Z0700", // RFC3339 without T or timezone hh:mm colon + "2006-01-02 15:04:05", + time.Kitchen, + time.Stamp, + time.StampMilli, + time.StampMicro, + time.StampNano, + }) +} + +func parseDateWith(s string, dates []string) (d time.Time, e error) { + for _, dateType := range dates { + if d, e = time.Parse(dateType, s); e == nil { + return + } + } + return d, fmt.Errorf("unable to parse date: %s", s) +} + +// jsonStringToObject attempts to unmarshall a string as JSON into +// the object passed as pointer. +func jsonStringToObject(s string, v interface{}) error { + data := []byte(s) + return json.Unmarshal(data, v) +} diff --git a/vendor/github.com/spf13/cast/go.mod b/vendor/github.com/spf13/cast/go.mod new file mode 100644 index 000000000..c1c0232dd --- /dev/null +++ b/vendor/github.com/spf13/cast/go.mod @@ -0,0 +1,7 @@ +module github.com/spf13/cast + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 +) diff --git a/vendor/github.com/spf13/cast/go.sum b/vendor/github.com/spf13/cast/go.sum new file mode 100644 index 000000000..e03ee77d9 --- /dev/null +++ b/vendor/github.com/spf13/cast/go.sum @@ -0,0 +1,6 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore index 1b8c7c261..c7b459e4d 100644 --- a/vendor/github.com/spf13/cobra/.gitignore +++ b/vendor/github.com/spf13/cobra/.gitignore @@ -32,5 +32,8 @@ Session.vim tags *.exe - cobra.test +bin + +.idea/ +*.iml diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml new file mode 100644 index 000000000..0d6e61793 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -0,0 +1,48 @@ +run: + deadline: 5m + +linters: + disable-all: true + enable: + #- bodyclose + - deadcode + #- depguard + #- dogsled + #- dupl + - errcheck + #- exhaustive + #- funlen + - gas + #- gochecknoinits + - goconst + #- gocritic + #- gocyclo + #- gofmt + - goimports + - golint + #- gomnd + #- goprintffuncname + #- gosec + #- gosimple + - govet + - ineffassign + - interfacer + #- lll + - maligned + - megacheck + #- misspell + #- nakedret + #- noctx + #- nolintlint + #- rowserrcheck + #- scopelint + #- staticcheck + - structcheck + #- stylecheck + #- typecheck + - unconvert + #- unparam + #- unused + - varcheck + #- whitespace + fast: false diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index 5afcb2096..000000000 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go - -matrix: - include: - - go: 1.9.4 - - go: 1.10.0 - - go: tip - allow_failures: - - go: tip - -before_install: - - mkdir -p bin - - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck - - chmod +x bin/shellcheck -script: - - PATH=$PATH:$PWD/bin go test -v ./... - - go build - - diff -u <(echo -n) <(gofmt -d -s .) - - if [ -z $NOVET ]; then - diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); - fi diff --git a/vendor/github.com/spf13/cobra/CHANGELOG.md b/vendor/github.com/spf13/cobra/CHANGELOG.md new file mode 100644 index 000000000..8a23b4f85 --- /dev/null +++ b/vendor/github.com/spf13/cobra/CHANGELOG.md @@ -0,0 +1,51 @@ +# Cobra Changelog + +## v1.1.3 + +* **Fix:** release-branch.cobra1.1 only: Revert "Deprecate Go < 1.14" to maintain backward compatibility + +## v1.1.2 + +### Notable Changes + +* Bump license year to 2021 in golden files (#1309) @Bowbaq +* Enhance PowerShell completion with custom comp (#1208) @Luap99 +* Update gopkg.in/yaml.v2 to v2.4.0: The previous breaking change in yaml.v2 v2.3.0 has been reverted, see go-yaml/yaml#670 +* Documentation readability improvements (#1228 etc.) @zaataylor etc. +* Use golangci-lint: Repair warnings and errors resulting from linting (#1044) @umarcor + +## v1.1.1 + +* **Fix:** yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See https://github.com/spf13/cobra/pull/1259 for context. +* **Fix:** correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See https://github.com/spf13/cobra/issues/1049 for context. + +## v1.1.0 + +### Notable Changes + +* Extend Go completions and revamp zsh comp (#1070) +* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` (#1104) @jpmcb +* Add completion for help command (#1136) +* Complete subcommands when TraverseChildren is set (#1171) +* Fix stderr printing functions (#894) +* fix: fish output redirection (#1247) + +## v1.0.0 + +Announcing v1.0.0 of Cobra. 🎉 + +### Notable Changes +* Fish completion (including support for Go custom completion) @marckhouzam +* API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam +* Remove/replace SetOutput on Command - deprecated @jpmcb +* add support for autolabel stale PR @xchapter7x +* Add Labeler Actions @xchapter7x +* Custom completions coded in Go (instead of Bash) @marckhouzam +* Partial Revert of #922 @jharshman +* Add Makefile to project @jharshman +* Correct documentation for InOrStdin @desponda +* Apply formatting to templates @jharshman +* Revert change so help is printed on stdout again @marckhouzam +* Update md2man to v2.0.0 @pdf +* update viper to v1.4.0 @umarcor +* Update cmd/root.go example in README.md @jharshman diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md new file mode 100644 index 000000000..9d16f88fd --- /dev/null +++ b/vendor/github.com/spf13/cobra/CONDUCT.md @@ -0,0 +1,37 @@ +## Cobra User Contract + +### Versioning +Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release. + +### Backward Compatibility +We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released. + +### Deprecation +Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github. + +### CVE +Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one. + +### Communication +Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors. + +### Breaking Changes +Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra. + +There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version. + +Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release. + +Examples of breaking changes include: +- Removing or renaming exported constant, variable, type, or function. +- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc... + - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing. + +There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging. + +### CI Testing +Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang. + +### Disclaimer +Changes to this document and the contents therein are at the discretion of the maintainers. +None of the contents of this document are legally binding in any way to the maintainers or the users. diff --git a/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/vendor/github.com/spf13/cobra/CONTRIBUTING.md new file mode 100644 index 000000000..6f356e6a8 --- /dev/null +++ b/vendor/github.com/spf13/cobra/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# Contributing to Cobra + +Thank you so much for contributing to Cobra. We appreciate your time and help. +Here are some guidelines to help you get started. + +## Code of Conduct + +Be kind and respectful to the members of the community. Take time to educate +others who are seeking help. Harassment of any kind will not be tolerated. + +## Questions + +If you have questions regarding Cobra, feel free to ask it in the community +[#cobra Slack channel][cobra-slack] + +## Filing a bug or feature + +1. Before filing an issue, please check the existing issues to see if a + similar one was already opened. If there is one already opened, feel free + to comment on it. +1. If you believe you've found a bug, please provide detailed steps of + reproduction, the version of Cobra and anything else you believe will be + useful to help troubleshoot it (e.g. OS environment, environment variables, + etc...). Also state the current behavior vs. the expected behavior. +1. If you'd like to see a feature or an enhancement please open an issue with + a clear title and description of what the feature is and why it would be + beneficial to the project and its users. + +## Submitting changes + +1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to + sign a CLA. Please sign the CLA :slightly_smiling_face: +1. Tests: If you are submitting code, please ensure you have adequate tests + for the feature. Tests can be run via `go test ./...` or `make test`. +1. Since this is golang project, ensure the new code is properly formatted to + ensure code consistency. Run `make all`. + +### Quick steps to contribute + +1. Fork the project. +1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) +1. Create your feature branch (`git checkout -b my-new-feature`) +1. Make changes and run tests (`make test`) +1. Add them to staging (`git add .`) +1. Commit your changes (`git commit -m 'Add some feature'`) +1. Push to the branch (`git push origin my-new-feature`) +1. Create new pull request + + +[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199 diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile new file mode 100644 index 000000000..472c73bf1 --- /dev/null +++ b/vendor/github.com/spf13/cobra/Makefile @@ -0,0 +1,40 @@ +BIN="./bin" +SRC=$(shell find . -name "*.go") + +ifeq (, $(shell which golangci-lint)) +$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") +endif + +ifeq (, $(shell which richgo)) +$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo") +endif + +.PHONY: fmt lint test cobra_generator install_deps clean + +default: all + +all: fmt test cobra_generator + +fmt: + $(info ******************** checking formatting ********************) + @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) + +lint: + $(info ******************** running lint tools ********************) + golangci-lint run -v + +test: install_deps lint + $(info ******************** running tests ********************) + richgo test -v ./... + +cobra_generator: install_deps + $(info ******************** building generator ********************) + mkdir -p $(BIN) + make -C cobra all + +install_deps: + $(info ******************** downloading dependencies ********************) + go get -v ./... + +clean: + rm -rf $(BIN) diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 851fcc087..074e3979f 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -2,29 +2,14 @@ Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. -Many of the most widely used Go projects are built using Cobra including: - -* [Kubernetes](http://kubernetes.io/) -* [Hugo](http://gohugo.io) -* [rkt](https://github.com/coreos/rkt) -* [etcd](https://github.com/coreos/etcd) -* [Moby (former Docker)](https://github.com/moby/moby) -* [Docker (distribution)](https://github.com/docker/distribution) -* [OpenShift](https://www.openshift.com/) -* [Delve](https://github.com/derekparker/delve) -* [GopherJS](http://www.gopherjs.org/) -* [CockroachDB](http://www.cockroachlabs.com/) -* [Bleve](http://www.blevesearch.com/) -* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) -* [GiantSwarm's swarm](https://github.com/giantswarm/cli) -* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -* [rclone](http://rclone.org/) -* [nehm](https://github.com/bogem/nehm) -* [Pouch](https://github.com/alibaba/pouch) - -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) -[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) +Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/), +[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to +name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. + +[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) [![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) +[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) # Table of Contents @@ -33,19 +18,19 @@ Many of the most widely used Go projects are built using Cobra including: * [Commands](#commands) * [Flags](#flags) - [Installing](#installing) -- [Getting Started](#getting-started) - * [Using the Cobra Generator](#using-the-cobra-generator) - * [Using the Cobra Library](#using-the-cobra-library) - * [Working with Flags](#working-with-flags) - * [Positional and Custom Arguments](#positional-and-custom-arguments) - * [Example](#example) - * [Help Command](#help-command) - * [Usage Message](#usage-message) - * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) - * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) - * [Generating documentation for your command](#generating-documentation-for-your-command) - * [Generating bash completions](#generating-bash-completions) -- [Contributing](#contributing) +- [Usage](#usage) + * [Using the Cobra Generator](user_guide.md#using-the-cobra-generator) + * [Using the Cobra Library](user_guide.md#using-the-cobra-library) + * [Working with Flags](user_guide.md#working-with-flags) + * [Positional and Custom Arguments](user_guide.md#positional-and-custom-arguments) + * [Example](user_guide.md#example) + * [Help Command](user_guide.md#help-command) + * [Usage Message](user_guide.md#usage-message) + * [PreRun and PostRun Hooks](user_guide.md#prerun-and-postrun-hooks) + * [Suggestions when "unknown command" happens](user_guide.md#suggestions-when-unknown-command-happens) + * [Generating documentation for your command](user_guide.md#generating-documentation-for-your-command) + * [Generating shell completions](user_guide.md#generating-shell-completions) +- [Contributing](CONTRIBUTING.md) - [License](#license) # Overview @@ -65,7 +50,7 @@ Cobra provides: * Intelligent suggestions (`app srver`... did you mean `app server`?) * Automatic help generation for commands and flags * Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated bash autocomplete for your application +* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell) * Automatically generated man pages for your application * Command aliases so you can change things without breaking them * The flexibility to define your own help, usage, etc. @@ -77,8 +62,8 @@ Cobra is built on a structure of commands, arguments & flags. **Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. -The best applications will read like sentences when used. Users will know how -to use the application because they will natively understand how to use it. +The best applications read like sentences when used, and as a result, users +intuitively know how to interact with them. The pattern to follow is `APPNAME VERB NOUN --ADJECTIVE.` @@ -123,7 +108,7 @@ Using Cobra is easy. First, use `go get` to install the latest version of the library. This command will install the `cobra` generator executable along with the library and its dependencies: - go get -u github.com/spf13/cobra/cobra + go get -u github.com/spf13/cobra Next, include Cobra in your application: @@ -131,605 +116,9 @@ Next, include Cobra in your application: import "github.com/spf13/cobra" ``` -# Getting Started - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "fmt" - "os" - - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -import ( - "fmt" - "os" - - homedir "github.com/mitchellh/go-homedir" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func init() { - cobra.OnInitialize(initConfig) - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") - rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") -} - -func initConfig() { - // Don't forget to read config either from cfgFile or from home directory! - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := homedir.Dir() - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigName(".cobra") - } - - if err := viper.ReadInConfig(); err != nil { - fmt.Println("Can't read config:", err) - os.Exit(1) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import ( - "fmt" - "os" - - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - -```go -rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default Cobra only parses local flags on the target command, any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example the persistent flag `author` is bound with `viper`. -**Note**, that the variable `author` will not be set to the value from config, -when the `--author` flag is not provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field -of `Command`. - -The following validators are built in: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. -- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. -- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. - -An example of setting the custom validator: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("requires at least one arg") - } - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra --invalid - Error: unknown flag: --invalid - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. in the following formats: - -- [Markdown](doc/md_docs.md) -- [ReStructured Text](doc/rest_docs.md) -- [Man Page](doc/man_docs.md) - -## Generating bash completions - -Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). - -# Contributing +# Usage -1. Fork it -2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) -3. Create your feature branch (`git checkout -b my-new-feature`) -4. Make changes and add them (`git add .`) -5. Commit your changes (`git commit -m 'Add some feature'`) -6. Push to the branch (`git push origin my-new-feature`) -7. Create new pull request +See [User Guide](user_guide.md). # License diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index a5d8a9273..70e9b2629 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -2,6 +2,7 @@ package cobra import ( "fmt" + "strings" ) type PositionalArgs func(cmd *Command, args []string) error @@ -34,8 +35,15 @@ func NoArgs(cmd *Command, args []string) error { // OnlyValidArgs returns an error if any args are not in the list of ValidArgs. func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { + // Remove any description that may be included in ValidArgs. + // A description is following a tab character. + var validArgs []string + for _, v := range cmd.ValidArgs { + validArgs = append(validArgs, strings.Split(v, "\t")[0]) + } + for _, v := range args { - if !stringInSlice(v, cmd.ValidArgs) { + if !stringInSlice(v, validArgs) { return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) } } @@ -78,6 +86,18 @@ func ExactArgs(n int) PositionalArgs { } } +// ExactValidArgs returns an error if +// there are not exactly N positional args OR +// there are any positional args that are not in the `ValidArgs` field of `Command` +func ExactValidArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if err := ExactArgs(n)(cmd, args); err != nil { + return err + } + return OnlyValidArgs(cmd, args) + } +} + // RangeArgs returns an error if the number of args is not within the expected range. func RangeArgs(min int, max int) PositionalArgs { return func(cmd *Command, args []string) error { diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 8fa8f486f..733f4d121 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -19,9 +19,9 @@ const ( BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" ) -func writePreamble(buf *bytes.Buffer, name string) { - buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) - buf.WriteString(fmt.Sprintf(` +func writePreamble(buf io.StringWriter, name string) { + WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` __%[1]s_debug() { if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then @@ -58,9 +58,103 @@ __%[1]s_contains_word() return 1 } +__%[1]s_handle_go_custom_completion() +{ + __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}" + + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + + local out requestComp lastParam lastChar comp directive args + + # Prepare the command to request completions for the program. + # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + args=("${words[@]:1}") + requestComp="${words[0]} %[2]s ${args[*]}" + + lastParam=${words[$((${#words[@]}-1))]} + lastChar=${lastParam:$((${#lastParam}-1)):1} + __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}" + + if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter" + requestComp="${requestComp} \"\"" + fi + + __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}" + # Use eval to handle any environment variables and such + out=$(eval "${requestComp}" 2>/dev/null) + + # Extract the directive integer at the very end of the output following a colon (:) + directive=${out##*:} + # Remove the directive + out=${out%%:*} + if [ "${directive}" = "${out}" ]; then + # There is not directive specified + directive=0 + fi + __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}" + __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}" + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + # Error code. No completion. + __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code" + return + else + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "${FUNCNAME[0]}: activating no space" + compopt -o nospace + fi + fi + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "${FUNCNAME[0]}: activating no file completion" + compopt +o default + fi + fi + fi + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local fullFilter filter filteringCmd + # Do not use quotes around the $out variable or else newline + # characters will be kept. + for filter in ${out[*]}; do + fullFilter+="$filter|" + done + + filteringCmd="_filedir $fullFilter" + __%[1]s_debug "File filtering command: $filteringCmd" + $filteringCmd + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + local subDir + # Use printf to strip any trailing newline + subdir=$(printf "%%s" "${out[0]}") + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + __%[1]s_handle_subdirs_in_dir_flag "$subdir" + else + __%[1]s_debug "Listing directories in ." + _filedir -d + fi + else + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${out[*]}" -- "$cur") + fi +} + __%[1]s_handle_reply() { __%[1]s_debug "${FUNCNAME[0]}" + local comp case $cur in -*) if [[ $(type -t compopt) = "builtin" ]]; then @@ -72,7 +166,9 @@ __%[1]s_handle_reply() else allflags=("${flags[*]} ${two_word_flags[*]}") fi - COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${allflags[*]}" -- "$cur") if [[ $(type -t compopt) = "builtin" ]]; then [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace fi @@ -117,19 +213,32 @@ __%[1]s_handle_reply() local completions completions=("${commands[@]}") if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions=("${must_have_one_noun[@]}") + completions+=("${must_have_one_noun[@]}") + elif [[ -n "${has_completion_function}" ]]; then + # if a go completion function is provided, defer to that function + __%[1]s_handle_go_custom_completion fi if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then completions+=("${must_have_one_flag[@]}") fi - COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${completions[*]}" -- "$cur") if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then - COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${noun_aliases[*]}" -- "$cur") fi if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - declare -F __custom_func >/dev/null && __custom_func + if declare -F __%[1]s_custom_func >/dev/null; then + # try command name qualified custom func + __%[1]s_custom_func + else + # otherwise fall back to unqualified for compatibility + declare -F __custom_func >/dev/null && __custom_func + fi fi # available in bash-completion >= 2, not always present on macOS @@ -154,7 +263,7 @@ __%[1]s_handle_filename_extension_flag() __%[1]s_handle_subdirs_in_dir_flag() { local dir="$1" - pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 + pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return } __%[1]s_handle_flag() @@ -193,7 +302,8 @@ __%[1]s_handle_flag() fi # skip the argument to a two word flag - if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then + if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then + __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" c=$((c+1)) # if we are looking for a flags value, don't show commands if [[ $c -eq $cword ]]; then @@ -265,14 +375,16 @@ __%[1]s_handle_word() __%[1]s_handle_word } -`, name)) +`, name, ShellCompNoDescRequestCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) } -func writePostscript(buf *bytes.Buffer, name string) { +func writePostscript(buf io.StringWriter, name string) { name = strings.Replace(name, ":", "__", -1) - buf.WriteString(fmt.Sprintf("__start_%s()\n", name)) - buf.WriteString(fmt.Sprintf(`{ - local cur prev words cword + WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(`{ + local cur prev words cword split declare -A flaghash 2>/dev/null || : declare -A aliashash 2>/dev/null || : if declare -F _init_completion >/dev/null 2>&1; then @@ -288,42 +400,45 @@ func writePostscript(buf *bytes.Buffer, name string) { local flags_with_completion=() local flags_completion=() local commands=("%[1]s") + local command_aliases=() local must_have_one_flag=() local must_have_one_noun=() + local has_completion_function local last_command local nouns=() + local noun_aliases=() __%[1]s_handle_word } `, name)) - buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then + WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then complete -o default -F __start_%s %s else complete -o default -o nospace -F __start_%s %s fi `, name, name, name, name)) - buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n") + WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n") } -func writeCommands(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" commands=()\n") +func writeCommands(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " commands=()\n") for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { + if !c.IsAvailableCommand() && c != cmd.helpCommand { continue } - buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name())) + WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name())) writeCmdAliases(buf, c) } - buf.WriteString("\n") + WriteStringAndCheck(buf, "\n") } -func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) { +func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) { for key, value := range annotations { switch key { case BashCompFilenameExt: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) var ext string if len(value) > 0 { @@ -331,17 +446,18 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s } else { ext = "_filedir" } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) case BashCompCustom: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + if len(value) > 0 { handlers := strings.Join(value, "; ") - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) } else { - buf.WriteString(" flags_completion+=(:)\n") + WriteStringAndCheck(buf, " flags_completion+=(:)\n") } case BashCompSubdirsInDir: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) var ext string if len(value) == 1 { @@ -349,45 +465,70 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s } else { ext = "_filedir -d" } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) } } } -func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { +const cbn = "\")\n" + +func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { name := flag.Shorthand format := " " if len(flag.NoOptDefVal) == 0 { format += "two_word_" } - format += "flags+=(\"-%s\")\n" - buf.WriteString(fmt.Sprintf(format, name)) + format += "flags+=(\"-%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) } -func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { +func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { name := flag.Name format := " flags+=(\"--%s" if len(flag.NoOptDefVal) == 0 { format += "=" } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, name)) + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + if len(flag.NoOptDefVal) == 0 { + format = " two_word_flags+=(\"--%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + } writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) } -func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) { +func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { name := flag.Name - format := " local_nonpersistent_flags+=(\"--%s" + format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn if len(flag.NoOptDefVal) == 0 { - format += "=" + format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn + } + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand)) } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, name)) } -func writeFlags(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(` flags=() +// Setup annotations for go completions for registered flags +func prepareCustomAnnotationsForFlags(cmd *Command) { + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() + for flag := range flagCompletionFunctions { + // Make sure the completion script calls the __*_go_custom_completion function for + // every registered flag. We need to do this here (and not when the flag was registered + // for completion) so that we can know the root command name for the prefix + // of ___go_custom_completion + if flag.Annotations == nil { + flag.Annotations = map[string][]string{} + } + flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())} + } +} + +func writeFlags(buf io.StringWriter, cmd *Command) { + prepareCustomAnnotationsForFlags(cmd) + WriteStringAndCheck(buf, ` flags=() two_word_flags=() local_nonpersistent_flags=() flags_with_completion=() @@ -403,7 +544,9 @@ func writeFlags(buf *bytes.Buffer, cmd *Command) { if len(flag.Shorthand) > 0 { writeShortFlag(buf, flag, cmd) } - if localNonPersistentFlags.Lookup(flag.Name) != nil { + // localNonPersistentFlags are used to stop the completion of subcommands when one is set + // if TraverseChildren is true we should allow to complete subcommands + if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren { writeLocalNonPersistentFlag(buf, flag) } }) @@ -417,11 +560,11 @@ func writeFlags(buf *bytes.Buffer, cmd *Command) { } }) - buf.WriteString("\n") + WriteStringAndCheck(buf, "\n") } -func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_flag=()\n") +func writeRequiredFlag(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_flag=()\n") flags := cmd.NonInheritedFlags() flags.VisitAll(func(flag *pflag.Flag) { if nonCompletableFlag(flag) { @@ -434,51 +577,57 @@ func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { if flag.Value.Type() != "bool" { format += "=" } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, flag.Name)) + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) if len(flag.Shorthand) > 0 { - buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand)) + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } } }) } -func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_noun=()\n") - sort.Sort(sort.StringSlice(cmd.ValidArgs)) +func writeRequiredNouns(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_noun=()\n") + sort.Strings(cmd.ValidArgs) for _, value := range cmd.ValidArgs { - buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) + // Remove any description that may be included following a tab character. + // Descriptions are not supported by bash completion. + value = strings.Split(value, "\t")[0] + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) + } + if cmd.ValidArgsFunction != nil { + WriteStringAndCheck(buf, " has_completion_function=1\n") } } -func writeCmdAliases(buf *bytes.Buffer, cmd *Command) { +func writeCmdAliases(buf io.StringWriter, cmd *Command) { if len(cmd.Aliases) == 0 { return } - sort.Sort(sort.StringSlice(cmd.Aliases)) + sort.Strings(cmd.Aliases) - buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) + WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) for _, value := range cmd.Aliases { - buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value)) - buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) + WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) } - buf.WriteString(` fi`) - buf.WriteString("\n") + WriteStringAndCheck(buf, ` fi`) + WriteStringAndCheck(buf, "\n") } -func writeArgAliases(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" noun_aliases=()\n") - sort.Sort(sort.StringSlice(cmd.ArgAliases)) +func writeArgAliases(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " noun_aliases=()\n") + sort.Strings(cmd.ArgAliases) for _, value := range cmd.ArgAliases { - buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value)) } } -func gen(buf *bytes.Buffer, cmd *Command) { +func gen(buf io.StringWriter, cmd *Command) { for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { + if !c.IsAvailableCommand() && c != cmd.helpCommand { continue } gen(buf, c) @@ -488,22 +637,22 @@ func gen(buf *bytes.Buffer, cmd *Command) { commandName = strings.Replace(commandName, ":", "__", -1) if cmd.Root() == cmd { - buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName)) + WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName)) } else { - buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName)) + WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName)) } - buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName)) - buf.WriteString("\n") - buf.WriteString(" command_aliases=()\n") - buf.WriteString("\n") + WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName)) + WriteStringAndCheck(buf, "\n") + WriteStringAndCheck(buf, " command_aliases=()\n") + WriteStringAndCheck(buf, "\n") writeCommands(buf, cmd) writeFlags(buf, cmd) writeRequiredFlag(buf, cmd) writeRequiredNouns(buf, cmd) writeArgAliases(buf, cmd) - buf.WriteString("}\n\n") + WriteStringAndCheck(buf, "}\n\n") } // GenBashCompletion generates bash completion file and writes to the passed writer. @@ -534,51 +683,3 @@ func (c *Command) GenBashCompletionFile(filename string) error { return c.GenBashCompletion(outFile) } - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(c.Flags(), name) -} - -// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkPersistentFlagRequired(name string) error { - return MarkFlagRequired(c.PersistentFlags(), name) -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, -// and causes your command to report an error if invoked without the flag. -func MarkFlagRequired(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (c *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.Flags(), name, extensions...) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func (c *Command) MarkFlagCustom(name string, f string) error { - return MarkFlagCustom(c.Flags(), name, f) -} - -// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.PersistentFlags(), name, extensions...) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { - return flags.SetAnnotation(name, BashCompFilenameExt, extensions) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { - return flags.SetAnnotation(name, BashCompCustom, []string{f}) -} diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md index e79d4769d..52919b2fa 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -1,29 +1,16 @@ -# Generating Bash Completions For Your Own cobra.Command +# Generating Bash Completions For Your cobra.Command -Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: +Please refer to [Shell Completions](shell_completions.md) for details. -```go -package main - -import ( - "io/ioutil" - "os" +## Bash legacy dynamic completions - "k8s.io/kubernetes/pkg/kubectl/cmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/util" -) - -func main() { - kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - kubectl.GenBashCompletionFile("out.sh") -} -``` +For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. -`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. +**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own. -## Creating your own custom functions +The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. -Some more actual code that works in kubernetes: +Some code that works in kubernetes: ```bash const ( @@ -47,7 +34,7 @@ __kubectl_get_resource() fi } -__custom_func() { +__kubectl_custom_func() { case ${last_command} in kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) __kubectl_get_resource @@ -74,110 +61,9 @@ Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, } ``` -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -## Have the completions code complete your 'nouns' - -In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like - -```bash -# kubectl get [tab][tab] -node pod replicationcontroller service -``` - -## Plural form and shortcuts for nouns - -If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -# kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns -in this example again instead of the replication controllers. - -## Mark flags as required - -Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -# kubectl exec [tab][tab][tab] --c --container= -p --pod= -``` - -# Specify valid filename extensions for flags that take a filename - -In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. - -```go - annotations := []string{"json", "yaml", "yml"} - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = annotations - - flag := &pflag.Flag{ - Name: "filename", - Shorthand: "f", - Usage: usage, - Value: value, - DefValue: value.String(), - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -Now when you run a command with this filename flag you'll get something like - -```bash -# kubectl create -f -test/ example/ rpmbuild/ -hello.yml test.json -``` - -So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. +The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! -# Specify custom flag completion - -Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify -a custom flag completion function with cobra.BashCompCustom: +Similarly, for flags: ```go annotation := make(map[string][]string) @@ -191,7 +77,7 @@ a custom flag completion function with cobra.BashCompCustom: cmd.Flags().AddFlag(flag) ``` -In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` +In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction` value, e.g.: ```bash @@ -205,17 +91,3 @@ __kubectl_get_namespaces() fi } ``` -# Using bash aliases for commands - -You can also configure the `bash aliases` for the commands and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$) aliasname -completion firstcommand secondcommand -``` diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go new file mode 100644 index 000000000..8859b57c4 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -0,0 +1,302 @@ +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" +) + +func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genBashComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func genBashComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + + WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*- + +__%[1]s_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Macs have bash3 for which the bash-completion package doesn't include +# _init_completion. This is a minimal version of that function. +__%[1]s_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +# This function calls the %[1]s program to obtain the completion +# results and the directive. It fills the 'out' and 'directive' vars. +__%[1]s_get_completion_results() { + local requestComp lastParam lastChar args + + # Prepare the command to request completions for the program. + # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + args=("${words[@]:1}") + requestComp="${words[0]} %[2]s ${args[*]}" + + lastParam=${words[$((${#words[@]}-1))]} + lastChar=${lastParam:$((${#lastParam}-1)):1} + __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" + + if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" + requestComp="${requestComp} ''" + fi + + # When completing a flag with an = (e.g., %[1]s -n=) + # bash focuses on the part after the =, so we need to remove + # the flag part from $cur + if [[ "${cur}" == -*=* ]]; then + cur="${cur#*=}" + fi + + __%[1]s_debug "Calling ${requestComp}" + # Use eval to handle any environment variables and such + out=$(eval "${requestComp}" 2>/dev/null) + + # Extract the directive integer at the very end of the output following a colon (:) + directive=${out##*:} + # Remove the directive + out=${out%%:*} + if [ "${directive}" = "${out}" ]; then + # There is not directive specified + directive=0 + fi + __%[1]s_debug "The completion directive is: ${directive}" + __%[1]s_debug "The completions are: ${out[*]}" +} + +__%[1]s_process_completion_results() { + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + else + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "Activating no space" + compopt -o nospace + else + __%[1]s_debug "No space directive not supported in this version of bash" + fi + fi + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "Activating no file completion" + compopt +o default + else + __%[1]s_debug "No file completion directive not supported in this version of bash" + fi + fi + fi + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local fullFilter filter filteringCmd + + # Do not use quotes around the $out variable or else newline + # characters will be kept. + for filter in ${out[*]}; do + fullFilter+="$filter|" + done + + filteringCmd="_filedir $fullFilter" + __%[1]s_debug "File filtering command: $filteringCmd" + $filteringCmd + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + + # Use printf to strip any trailing newline + local subdir + subdir=$(printf "%%s" "${out[0]}") + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return + else + __%[1]s_debug "Listing directories in ." + _filedir -d + fi + else + __%[1]s_handle_standard_completion_case + fi + + __%[1]s_handle_special_char "$cur" : + __%[1]s_handle_special_char "$cur" = +} + +__%[1]s_handle_standard_completion_case() { + local tab comp + tab=$(printf '\t') + + local longest=0 + # Look for the longest completion so that we can format things nicely + while IFS='' read -r comp; do + # Strip any description before checking the length + comp=${comp%%%%$tab*} + # Only consider the completions that match + comp=$(compgen -W "$comp" -- "$cur") + if ((${#comp}>longest)); then + longest=${#comp} + fi + done < <(printf "%%s\n" "${out[@]}") + + local completions=() + while IFS='' read -r comp; do + if [ -z "$comp" ]; then + continue + fi + + __%[1]s_debug "Original comp: $comp" + comp="$(__%[1]s_format_comp_descriptions "$comp" "$longest")" + __%[1]s_debug "Final comp: $comp" + completions+=("$comp") + done < <(printf "%%s\n" "${out[@]}") + + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${completions[*]}" -- "$cur") + + # If there is a single completion left, remove the description text + if [ ${#COMPREPLY[*]} -eq 1 ]; then + __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" + comp="${COMPREPLY[0]%%%% *}" + __%[1]s_debug "Removed description from single completion, which is now: ${comp}" + COMPREPLY=() + COMPREPLY+=("$comp") + fi +} + +__%[1]s_handle_special_char() +{ + local comp="$1" + local char=$2 + if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then + local word=${comp%%"${comp##*${char}}"} + local idx=${#COMPREPLY[*]} + while [[ $((--idx)) -ge 0 ]]; do + COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"} + done + fi +} + +__%[1]s_format_comp_descriptions() +{ + local tab + tab=$(printf '\t') + local comp="$1" + local longest=$2 + + # Properly format the description string which follows a tab character if there is one + if [[ "$comp" == *$tab* ]]; then + desc=${comp#*$tab} + comp=${comp%%%%$tab*} + + # $COLUMNS stores the current shell width. + # Remove an extra 4 because we add 2 spaces and 2 parentheses. + maxdesclength=$(( COLUMNS - longest - 4 )) + + # Make sure we can fit a description of at least 8 characters + # if we are to align the descriptions. + if [[ $maxdesclength -gt 8 ]]; then + # Add the proper number of spaces to align the descriptions + for ((i = ${#comp} ; i < longest ; i++)); do + comp+=" " + done + else + # Don't pad the descriptions so we can fit more text after the completion + maxdesclength=$(( COLUMNS - ${#comp} - 4 )) + fi + + # If there is enough space for any description text, + # truncate the descriptions that are too long for the shell width + if [ $maxdesclength -gt 0 ]; then + if [ ${#desc} -gt $maxdesclength ]; then + desc=${desc:0:$(( maxdesclength - 1 ))} + desc+="…" + fi + comp+=" ($desc)" + fi + fi + + # Must use printf to escape all special characters + printf "%%q" "${comp}" +} + +__start_%[1]s() +{ + local cur prev words cword split + + COMPREPLY=() + + # Call _init_completion from the bash-completion package + # to prepare the arguments properly + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -n "=:" || return + else + __%[1]s_init_completion -n "=:" || return + fi + + __%[1]s_debug + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $cword location, so we need + # to truncate the command-line ($words) up to the $cword location. + words=("${words[@]:0:$cword+1}") + __%[1]s_debug "Truncated words[*]: ${words[*]}," + + local out directive + __%[1]s_get_completion_results + __%[1]s_process_completion_results +} + +if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%[1]s %[1]s +else + complete -o default -o nospace -F __start_%[1]s %[1]s +fi + +# ex: ts=4 sw=4 et filetype=sh +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) +} + +// GenBashCompletionFileV2 generates Bash completion version 2. +func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenBashCompletionV2(outFile, includeDesc) +} + +// GenBashCompletionV2 generates Bash completion file version 2 +// and writes it to the passed writer. +func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error { + return c.genBashCompletion(w, includeDesc) +} diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index 7010fd15b..d6cbfd719 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -19,10 +19,12 @@ package cobra import ( "fmt" "io" + "os" "reflect" "strconv" "strings" "text/template" + "time" "unicode" ) @@ -51,11 +53,17 @@ var EnableCommandSorting = true // if the CLI is started from explorer.exe. // To disable the mousetrap, just set this variable to blank string (""). // Works only on Microsoft Windows. -var MousetrapHelpText string = `This is a command line tool. +var MousetrapHelpText = `This is a command line tool. You need to open cmd.exe and run it from there. ` +// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows +// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed. +// To disable the mousetrap, just set MousetrapHelpText to blank string (""). +// Works only on Microsoft Windows. +var MousetrapDisplayDuration = 5 * time.Second + // AddTemplateFunc adds a template function that's available to Usage and Help // template generation. func AddTemplateFunc(name string, tmplFunc interface{}) { @@ -198,3 +206,17 @@ func stringInSlice(a string, list []string) bool { } return false } + +// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing. +func CheckErr(msg interface{}) { + if msg != nil { + fmt.Fprintln(os.Stderr, "Error:", msg) + os.Exit(1) + } +} + +// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil. +func WriteStringAndCheck(b io.StringWriter, s string) { + _, err := b.WriteString(s) + CheckErr(err) +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 34d1bf367..2cc18891d 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -17,6 +17,7 @@ package cobra import ( "bytes" + "context" "fmt" "io" "os" @@ -36,6 +37,14 @@ type FParseErrWhitelist flag.ParseErrorsWhitelist // definition to ensure usability. type Command struct { // Use is the one-line usage message. + // Recommended syntax is as follow: + // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. + // ... indicates that you can specify multiple values for the previous argument. + // | indicates mutually exclusive information. You can use the argument to the left of the separator or the + // argument to the right of the separator. You cannot use both arguments in a single use of the command. + // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are + // optional, they are enclosed in brackets ([ ]). + // Example: add [-F file | -D dir]... [-f format] profile Use string // Aliases is an array of aliases that can be used instead of the first word in Use. @@ -54,33 +63,36 @@ type Command struct { // Example is examples of how to use the command. Example string - // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions + // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions ValidArgs []string + // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. + // It is a dynamic version of using ValidArgs. + // Only one of ValidArgs and ValidArgsFunction can be used for a command. + ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) // Expected arguments Args PositionalArgs // ArgAliases is List of aliases for ValidArgs. - // These are not suggested to the user in the bash completion, + // These are not suggested to the user in the shell completion, // but accepted if entered manually. ArgAliases []string - // BashCompletionFunction is custom functions used by the bash autocompletion generator. + // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. + // For portability with other shells, it is recommended to instead use ValidArgsFunction BashCompletionFunction string // Deprecated defines, if this command is deprecated and should print this string when used. Deprecated string - // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. - Hidden bool - // Annotations are key/value pairs that can be used by applications to identify or // group commands. Annotations map[string]string // Version defines the version for this command. If this value is non-empty and the command does not // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, - // will print content of the "Version" variable. + // will print content of the "Version" variable. A shorthand "v" flag will also be added if the + // command does not define one. Version string // The *Run functions are executed in the following order: @@ -112,53 +124,6 @@ type Command struct { // PersistentPostRunE: PersistentPostRun but returns an error. PersistentPostRunE func(cmd *Command, args []string) error - // SilenceErrors is an option to quiet errors down stream. - SilenceErrors bool - - // SilenceUsage is an option to silence usage when an error occurs. - SilenceUsage bool - - // DisableFlagParsing disables the flag parsing. - // If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool - - // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") - // will be printed by generating docs for this command. - DisableAutoGenTag bool - - // DisableFlagsInUseLine will disable the addition of [flags] to the usage - // line of a command when printing help or generating docs - DisableFlagsInUseLine bool - - // DisableSuggestions disables the suggestions based on Levenshtein distance - // that go along with 'unknown command' messages. - DisableSuggestions bool - // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. - // Must be > 0. - SuggestionsMinimumDistance int - - // TraverseChildren parses flags on all parents before executing child command. - TraverseChildren bool - - //FParseErrWhitelist flag parse errors to be ignored - FParseErrWhitelist FParseErrWhitelist - - // commands is the list of commands supported by this program. - commands []*Command - // parent is a parent command for this command. - parent *Command - // Max lengths of commands' string lengths for use in padding. - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - // commandsAreSorted defines, if command slice are sorted or not. - commandsAreSorted bool - // commandCalledAs is the name or alias value used to call this command. - commandCalledAs struct { - name string - called bool - } - // args is actual args parsed from flags. args []string // flagErrorBuf contains all error messages from pflag. @@ -177,8 +142,6 @@ type Command struct { // that we can use on every pflag set and children commands globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName - // output is an output writer defined by user. - output io.Writer // usageFunc is usage func defined by user. usageFunc func(*Command) error // usageTemplate is usage template defined by user. @@ -195,6 +158,76 @@ type Command struct { helpCommand *Command // versionTemplate is the version template defined by user. versionTemplate string + + // inReader is a reader defined by the user that replaces stdin + inReader io.Reader + // outWriter is a writer defined by the user that replaces stdout + outWriter io.Writer + // errWriter is a writer defined by the user that replaces stderr + errWriter io.Writer + + //FParseErrWhitelist flag parse errors to be ignored + FParseErrWhitelist FParseErrWhitelist + + // CompletionOptions is a set of options to control the handling of shell completion + CompletionOptions CompletionOptions + + // commandsAreSorted defines, if command slice are sorted or not. + commandsAreSorted bool + // commandCalledAs is the name or alias value used to call this command. + commandCalledAs struct { + name string + called bool + } + + ctx context.Context + + // commands is the list of commands supported by this program. + commands []*Command + // parent is a parent command for this command. + parent *Command + // Max lengths of commands' string lengths for use in padding. + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + + // TraverseChildren parses flags on all parents before executing child command. + TraverseChildren bool + + // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. + Hidden bool + + // SilenceErrors is an option to quiet errors down stream. + SilenceErrors bool + + // SilenceUsage is an option to silence usage when an error occurs. + SilenceUsage bool + + // DisableFlagParsing disables the flag parsing. + // If this is true all flags will be passed to the command as arguments. + DisableFlagParsing bool + + // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") + // will be printed by generating docs for this command. + DisableAutoGenTag bool + + // DisableFlagsInUseLine will disable the addition of [flags] to the usage + // line of a command when printing help or generating docs + DisableFlagsInUseLine bool + + // DisableSuggestions disables the suggestions based on Levenshtein distance + // that go along with 'unknown command' messages. + DisableSuggestions bool + + // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. + // Must be > 0. + SuggestionsMinimumDistance int +} + +// Context returns underlying command context. If command wasn't +// executed with ExecuteContext Context returns Background context. +func (c *Command) Context() context.Context { + return c.ctx } // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden @@ -205,8 +238,28 @@ func (c *Command) SetArgs(a []string) { // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. +// Deprecated: Use SetOut and/or SetErr instead func (c *Command) SetOutput(output io.Writer) { - c.output = output + c.outWriter = output + c.errWriter = output +} + +// SetOut sets the destination for usage messages. +// If newOut is nil, os.Stdout is used. +func (c *Command) SetOut(newOut io.Writer) { + c.outWriter = newOut +} + +// SetErr sets the destination for error messages. +// If newErr is nil, os.Stderr is used. +func (c *Command) SetErr(newErr io.Writer) { + c.errWriter = newErr +} + +// SetIn sets the source for input data +// If newIn is nil, os.Stdin is used. +func (c *Command) SetIn(newIn io.Reader) { + c.inReader = newIn } // SetUsageFunc sets usage function. Usage can be defined by application. @@ -267,9 +320,19 @@ func (c *Command) OutOrStderr() io.Writer { return c.getOut(os.Stderr) } +// ErrOrStderr returns output to stderr +func (c *Command) ErrOrStderr() io.Writer { + return c.getErr(os.Stderr) +} + +// InOrStdin returns input to stdin +func (c *Command) InOrStdin() io.Reader { + return c.getIn(os.Stdin) +} + func (c *Command) getOut(def io.Writer) io.Writer { - if c.output != nil { - return c.output + if c.outWriter != nil { + return c.outWriter } if c.HasParent() { return c.parent.getOut(def) @@ -277,6 +340,26 @@ func (c *Command) getOut(def io.Writer) io.Writer { return def } +func (c *Command) getErr(def io.Writer) io.Writer { + if c.errWriter != nil { + return c.errWriter + } + if c.HasParent() { + return c.parent.getErr(def) + } + return def +} + +func (c *Command) getIn(def io.Reader) io.Reader { + if c.inReader != nil { + return c.inReader + } + if c.HasParent() { + return c.parent.getIn(def) + } + return def +} + // UsageFunc returns either the function set by SetUsageFunc for this command // or a parent, or it returns a default usage function. func (c *Command) UsageFunc() (f func(*Command) error) { @@ -290,7 +373,7 @@ func (c *Command) UsageFunc() (f func(*Command) error) { c.mergePersistentFlags() err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) if err != nil { - c.Println(err) + c.PrintErrln(err) } return err } @@ -314,9 +397,11 @@ func (c *Command) HelpFunc() func(*Command, []string) { } return func(c *Command, a []string) { c.mergePersistentFlags() + // The help should be sent to stdout + // See https://github.com/spf13/cobra/issues/1002 err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) if err != nil { - c.Println(err) + c.PrintErrln(err) } } } @@ -329,13 +414,22 @@ func (c *Command) Help() error { return nil } -// UsageString return usage string. +// UsageString returns usage string. func (c *Command) UsageString() string { - tmpOutput := c.output + // Storing normal writers + tmpOutput := c.outWriter + tmpErr := c.errWriter + bb := new(bytes.Buffer) - c.SetOutput(bb) - c.Usage() - c.output = tmpOutput + c.outWriter = bb + c.errWriter = bb + + CheckErr(c.Usage()) + + // Setting things back to normal + c.outWriter = tmpOutput + c.errWriter = tmpErr + return bb.String() } @@ -793,6 +887,14 @@ func (c *Command) preRun() { } } +// ExecuteContext is the same as Execute(), but sets the ctx on the command. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. +func (c *Command) ExecuteContext(ctx context.Context) error { + c.ctx = ctx + return c.Execute() +} + // Execute uses the args (os.Args[1:] by default) // and run through the command tree finding appropriate matches // for commands and then corresponding flags. @@ -801,8 +903,20 @@ func (c *Command) Execute() error { return err } +// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. +func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) { + c.ctx = ctx + return c.ExecuteC() +} + // ExecuteC executes the command. func (c *Command) ExecuteC() (cmd *Command, err error) { + if c.ctx == nil { + c.ctx = context.Background() + } + // Regardless of what command execute is called on, run on Root only if c.HasParent() { return c.Root().ExecuteC() @@ -813,19 +927,21 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { preExecHookFn(c) } - // initialize help as the last point possible to allow for user - // overriding + // initialize help at the last point to allow for user overriding c.InitDefaultHelpCmd() + // initialize completion at the last point to allow for user overriding + c.initDefaultCompletionCmd() - var args []string + args := c.args // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { args = os.Args[1:] - } else { - args = c.args } + // initialize the hidden command to be used for shell completion + c.initCompleteCmd(args) + var flags []string if c.TraverseChildren { cmd, flags, err = c.Traverse(args) @@ -838,8 +954,8 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { c = cmd } if !c.SilenceErrors { - c.Println("Error:", err.Error()) - c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) + c.PrintErrln("Error:", err.Error()) + c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) } return c, err } @@ -849,6 +965,12 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { cmd.commandCalledAs.name = cmd.Name() } + // We have to pass global context to children command + // if context is present on the parent command. + if cmd.ctx == nil { + cmd.ctx = c.ctx + } + err = cmd.execute(flags) if err != nil { // Always show help if requested, even if SilenceErrors is in @@ -858,13 +980,13 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { return cmd, nil } - // If root command has SilentErrors flagged, + // If root command has SilenceErrors flagged, // all subcommands should respect it if !cmd.SilenceErrors && !c.SilenceErrors { - c.Println("Error:", err.Error()) + c.PrintErrln("Error:", err.Error()) } - // If root command has SilentUsage flagged, + // If root command has SilenceUsage flagged, // all subcommands should respect it if !cmd.SilenceUsage && !c.SilenceUsage { c.Println(cmd.UsageString()) @@ -881,6 +1003,10 @@ func (c *Command) ValidateArgs(args []string) error { } func (c *Command) validateRequiredFlags() error { + if c.DisableFlagParsing { + return nil + } + flags := c.Flags() missingFlagNames := []string{} flags.VisitAll(func(pflag *flag.Flag) { @@ -932,7 +1058,11 @@ func (c *Command) InitDefaultVersionFlag() { } else { usage += c.Name() } - c.Flags().Bool("version", false, usage) + if c.Flags().ShorthandLookup("v") == nil { + c.Flags().BoolP("version", "v", false, usage) + } else { + c.Flags().Bool("version", false, usage) + } } } @@ -950,15 +1080,33 @@ func (c *Command) InitDefaultHelpCmd() { Short: "Help about any command", Long: `Help provides help for any command in the application. Simply type ` + c.Name() + ` help [path to command] for full details.`, - + ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + var completions []string + cmd, _, e := c.Root().Find(args) + if e != nil { + return nil, ShellCompDirectiveNoFileComp + } + if cmd == nil { + // Root help command. + cmd = c.Root() + } + for _, subCmd := range cmd.Commands() { + if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { + if strings.HasPrefix(subCmd.Name(), toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + } + } + } + return completions, ShellCompDirectiveNoFileComp + }, Run: func(c *Command, args []string) { cmd, _, e := c.Root().Find(args) if cmd == nil || e != nil { c.Printf("Unknown help topic %#q\n", args) - c.Root().Usage() + CheckErr(c.Root().Usage()) } else { cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown - cmd.Help() + CheckErr(cmd.Help()) } }, } @@ -1070,6 +1218,21 @@ func (c *Command) Printf(format string, i ...interface{}) { c.Print(fmt.Sprintf(format, i...)) } +// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErr(i ...interface{}) { + fmt.Fprint(c.ErrOrStderr(), i...) +} + +// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErrln(i ...interface{}) { + c.PrintErr(fmt.Sprintln(i...)) +} + +// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErrf(format string, i ...interface{}) { + c.PrintErr(fmt.Sprintf(format, i...)) +} + // CommandPath returns the full path to this command. func (c *Command) CommandPath() string { if c.HasParent() { @@ -1335,7 +1498,7 @@ func (c *Command) LocalFlags() *flag.FlagSet { return c.lflags } -// InheritedFlags returns all flags which were inherited from parents commands. +// InheritedFlags returns all flags which were inherited from parent commands. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() @@ -1470,7 +1633,7 @@ func (c *Command) ParseFlags(args []string) error { beforeErrorBufLen := c.flagErrorBuf.Len() c.mergePersistentFlags() - //do it here after merging all flags and just before parse + // do it here after merging all flags and just before parse c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) err := c.Flags().Parse(args) diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go index edec728e4..8768b1736 100644 --- a/vendor/github.com/spf13/cobra/command_win.go +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -3,6 +3,7 @@ package cobra import ( + "fmt" "os" "time" @@ -14,7 +15,12 @@ var preExecHookFn = preExecHook func preExecHook(c *Command) { if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { c.Print(MousetrapHelpText) - time.Sleep(5 * time.Second) + if MousetrapDisplayDuration > 0 { + time.Sleep(MousetrapDisplayDuration) + } else { + c.Println("Press return to continue...") + fmt.Scanln() + } os.Exit(1) } } diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go new file mode 100644 index 000000000..b849b9c84 --- /dev/null +++ b/vendor/github.com/spf13/cobra/completions.go @@ -0,0 +1,781 @@ +package cobra + +import ( + "fmt" + "os" + "strings" + "sync" + + "github.com/spf13/pflag" +) + +const ( + // ShellCompRequestCmd is the name of the hidden command that is used to request + // completion results from the program. It is used by the shell completion scripts. + ShellCompRequestCmd = "__complete" + // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request + // completion results without their description. It is used by the shell completion scripts. + ShellCompNoDescRequestCmd = "__completeNoDesc" +) + +// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. +var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} + +// lock for reading and writing from flagCompletionFunctions +var flagCompletionMutex = &sync.RWMutex{} + +// ShellCompDirective is a bit map representing the different behaviors the shell +// can be instructed to have once completions have been provided. +type ShellCompDirective int + +type flagCompError struct { + subCommand string + flagName string +} + +func (e *flagCompError) Error() string { + return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'" +} + +const ( + // ShellCompDirectiveError indicates an error occurred and completions should be ignored. + ShellCompDirectiveError ShellCompDirective = 1 << iota + + // ShellCompDirectiveNoSpace indicates that the shell should not add a space + // after the completion even if there is a single completion provided. + ShellCompDirectiveNoSpace + + // ShellCompDirectiveNoFileComp indicates that the shell should not provide + // file completion even when no completion is provided. + ShellCompDirectiveNoFileComp + + // ShellCompDirectiveFilterFileExt indicates that the provided completions + // should be used as file extension filters. + // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename() + // is a shortcut to using this directive explicitly. The BashCompFilenameExt + // annotation can also be used to obtain the same behavior for flags. + ShellCompDirectiveFilterFileExt + + // ShellCompDirectiveFilterDirs indicates that only directory names should + // be provided in file completion. To request directory names within another + // directory, the returned completions should specify the directory within + // which to search. The BashCompSubdirsInDir annotation can be used to + // obtain the same behavior but only for flags. + ShellCompDirectiveFilterDirs + + // =========================================================================== + + // All directives using iota should be above this one. + // For internal use. + shellCompDirectiveMaxValue + + // ShellCompDirectiveDefault indicates to let the shell perform its default + // behavior after completions have been provided. + // This one must be last to avoid messing up the iota count. + ShellCompDirectiveDefault ShellCompDirective = 0 +) + +const ( + // Constants for the completion command + compCmdName = "completion" + compCmdNoDescFlagName = "no-descriptions" + compCmdNoDescFlagDesc = "disable completion descriptions" + compCmdNoDescFlagDefault = false +) + +// CompletionOptions are the options to control shell completion +type CompletionOptions struct { + // DisableDefaultCmd prevents Cobra from creating a default 'completion' command + DisableDefaultCmd bool + // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag + // for shells that support completion descriptions + DisableNoDescFlag bool + // DisableDescriptions turns off all completion descriptions for shells + // that support them + DisableDescriptions bool +} + +// NoFileCompletions can be used to disable file completion for commands that should +// not trigger file completions. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return nil, ShellCompDirectiveNoFileComp +} + +// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. +func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { + flag := c.Flag(flagName) + if flag == nil { + return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) + } + flagCompletionMutex.Lock() + defer flagCompletionMutex.Unlock() + + if _, exists := flagCompletionFunctions[flag]; exists { + return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) + } + flagCompletionFunctions[flag] = f + return nil +} + +// Returns a string listing the different directive enabled in the specified parameter +func (d ShellCompDirective) string() string { + var directives []string + if d&ShellCompDirectiveError != 0 { + directives = append(directives, "ShellCompDirectiveError") + } + if d&ShellCompDirectiveNoSpace != 0 { + directives = append(directives, "ShellCompDirectiveNoSpace") + } + if d&ShellCompDirectiveNoFileComp != 0 { + directives = append(directives, "ShellCompDirectiveNoFileComp") + } + if d&ShellCompDirectiveFilterFileExt != 0 { + directives = append(directives, "ShellCompDirectiveFilterFileExt") + } + if d&ShellCompDirectiveFilterDirs != 0 { + directives = append(directives, "ShellCompDirectiveFilterDirs") + } + if len(directives) == 0 { + directives = append(directives, "ShellCompDirectiveDefault") + } + + if d >= shellCompDirectiveMaxValue { + return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d) + } + return strings.Join(directives, ", ") +} + +// Adds a special hidden command that can be used to request custom completions. +func (c *Command) initCompleteCmd(args []string) { + completeCmd := &Command{ + Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), + Aliases: []string{ShellCompNoDescRequestCmd}, + DisableFlagsInUseLine: true, + Hidden: true, + DisableFlagParsing: true, + Args: MinimumNArgs(1), + Short: "Request shell completion choices for the specified command-line", + Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s", + "to request completion choices for the specified command-line.", ShellCompRequestCmd), + Run: func(cmd *Command, args []string) { + finalCmd, completions, directive, err := cmd.getCompletions(args) + if err != nil { + CompErrorln(err.Error()) + // Keep going for multiple reasons: + // 1- There could be some valid completions even though there was an error + // 2- Even without completions, we need to print the directive + } + + noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + for _, comp := range completions { + if noDescriptions { + // Remove any description that may be included following a tab character. + comp = strings.Split(comp, "\t")[0] + } + + // Make sure we only write the first line to the output. + // This is needed if a description contains a linebreak. + // Otherwise the shell scripts will interpret the other lines as new flags + // and could therefore provide a wrong completion. + comp = strings.Split(comp, "\n")[0] + + // Finally trim the completion. This is especially important to get rid + // of a trailing tab when there are no description following it. + // For example, a sub-command without a description should not be completed + // with a tab at the end (or else zsh will show a -- following it + // although there is no description). + comp = strings.TrimSpace(comp) + + // Print each possible completion to stdout for the completion script to consume. + fmt.Fprintln(finalCmd.OutOrStdout(), comp) + } + + // As the last printout, print the completion directive for the completion script to parse. + // The directive integer must be that last character following a single colon (:). + // The completion script expects : + fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + + // Print some helpful info to stderr for the user to understand. + // Output from stderr must be ignored by the completion script. + fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string()) + }, + } + c.AddCommand(completeCmd) + subCmd, _, err := c.Find(args) + if err != nil || subCmd.Name() != ShellCompRequestCmd { + // Only create this special command if it is actually being called. + // This reduces possible side-effects of creating such a command; + // for example, having this command would cause problems to a + // cobra program that only consists of the root command, since this + // command would cause the root command to suddenly have a subcommand. + c.RemoveCommand(completeCmd) + } +} + +func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { + // The last argument, which is not completely typed by the user, + // should not be part of the list of arguments + toComplete := args[len(args)-1] + trimmedArgs := args[:len(args)-1] + + var finalCmd *Command + var finalArgs []string + var err error + // Find the real command for which completion must be performed + // check if we need to traverse here to parse local flags on parent commands + if c.Root().TraverseChildren { + finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs) + } else { + finalCmd, finalArgs, err = c.Root().Find(trimmedArgs) + } + if err != nil { + // Unable to find the real command. E.g., someInvalidCmd + return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + } + finalCmd.ctx = c.ctx + + // Check if we are doing flag value completion before parsing the flags. + // This is important because if we are completing a flag value, we need to also + // remove the flag name argument from the list of finalArgs or else the parsing + // could fail due to an invalid value (incomplete) for the flag. + flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) + + // Check if interspersed is false or -- was set on a previous arg. + // This works by counting the arguments. Normally -- is not counted as arg but + // if -- was already set or interspersed is false and there is already one arg then + // the extra added -- is counted as arg. + flagCompletion := true + _ = finalCmd.ParseFlags(append(finalArgs, "--")) + newArgCount := finalCmd.Flags().NArg() + + // Parse the flags early so we can check if required flags are set + if err = finalCmd.ParseFlags(finalArgs); err != nil { + return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) + } + + realArgCount := finalCmd.Flags().NArg() + if newArgCount > realArgCount { + // don't do flag completion (see above) + flagCompletion = false + } + // Error while attempting to parse flags + if flagErr != nil { + // If error type is flagCompError and we don't want flagCompletion we should ignore the error + if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + } + } + + if flag != nil && flagCompletion { + // Check if we are completing a flag value subject to annotations + if validExts, present := flag.Annotations[BashCompFilenameExt]; present { + if len(validExts) != 0 { + // File completion filtered by extensions + return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil + } + + // The annotation requests simple file completion. There is no reason to do + // that since it is the default behavior anyway. Let's ignore this annotation + // in case the program also registered a completion function for this flag. + // Even though it is a mistake on the program's side, let's be nice when we can. + } + + if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present { + if len(subDir) == 1 { + // Directory completion from within a directory + return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil + } + // Directory completion + return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil + } + } + + // When doing completion of a flag name, as soon as an argument starts with + // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires + // the flag name to be complete + if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion { + var completions []string + + // First check for required flags + completions = completeRequireFlags(finalCmd, toComplete) + + // If we have not found any required flags, only then can we show regular flags + if len(completions) == 0 { + doCompleteFlags := func(flag *pflag.Flag) { + if !flag.Changed || + strings.Contains(flag.Value.Type(), "Slice") || + strings.Contains(flag.Value.Type(), "Array") { + // If the flag is not already present, or if it can be specified multiple times (Array or Slice) + // we suggest it as a completion + completions = append(completions, getFlagNameCompletions(flag, toComplete)...) + } + } + + // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands + // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and + // non-inherited flags. + finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteFlags(flag) + }) + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteFlags(flag) + }) + } + + directive := ShellCompDirectiveNoFileComp + if len(completions) == 1 && strings.HasSuffix(completions[0], "=") { + // If there is a single completion, the shell usually adds a space + // after the completion. We don't want that if the flag ends with an = + directive = ShellCompDirectiveNoSpace + } + return finalCmd, completions, directive, nil + } + + // We only remove the flags from the arguments if DisableFlagParsing is not set. + // This is important for commands which have requested to do their own flag completion. + if !finalCmd.DisableFlagParsing { + finalArgs = finalCmd.Flags().Args() + } + + var completions []string + directive := ShellCompDirectiveDefault + if flag == nil { + foundLocalNonPersistentFlag := false + // If TraverseChildren is true on the root command we don't check for + // local flags because we can use a local flag on a parent command + if !finalCmd.Root().TraverseChildren { + // Check if there are any local, non-persistent flags on the command-line + localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { + foundLocalNonPersistentFlag = true + } + }) + } + + // Complete subcommand names, including the help command + if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { + // We only complete sub-commands if: + // - there are no arguments on the command-line and + // - there are no local, non-persistent flags on the command-line or TraverseChildren is true + for _, subCmd := range finalCmd.Commands() { + if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { + if strings.HasPrefix(subCmd.Name(), toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + } + directive = ShellCompDirectiveNoFileComp + } + } + } + + // Complete required flags even without the '-' prefix + completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) + + // Always complete ValidArgs, even if we are completing a subcommand name. + // This is for commands that have both subcommands and ValidArgs. + if len(finalCmd.ValidArgs) > 0 { + if len(finalArgs) == 0 { + // ValidArgs are only for the first argument + for _, validArg := range finalCmd.ValidArgs { + if strings.HasPrefix(validArg, toComplete) { + completions = append(completions, validArg) + } + } + directive = ShellCompDirectiveNoFileComp + + // If no completions were found within commands or ValidArgs, + // see if there are any ArgAliases that should be completed. + if len(completions) == 0 { + for _, argAlias := range finalCmd.ArgAliases { + if strings.HasPrefix(argAlias, toComplete) { + completions = append(completions, argAlias) + } + } + } + } + + // If there are ValidArgs specified (even if they don't match), we stop completion. + // Only one of ValidArgs or ValidArgsFunction can be used for a single command. + return finalCmd, completions, directive, nil + } + + // Let the logic continue so as to add any ValidArgsFunction completions, + // even if we already found sub-commands. + // This is for commands that have subcommands but also specify a ValidArgsFunction. + } + + // Find the completion function for the flag or command + var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + if flag != nil && flagCompletion { + flagCompletionMutex.RLock() + completionFn = flagCompletionFunctions[flag] + flagCompletionMutex.RUnlock() + } else { + completionFn = finalCmd.ValidArgsFunction + } + if completionFn != nil { + // Go custom completion defined for this flag or command. + // Call the registered completion function to get the completions. + var comps []string + comps, directive = completionFn(finalCmd, finalArgs, toComplete) + completions = append(completions, comps...) + } + + return finalCmd, completions, directive, nil +} + +func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { + if nonCompletableFlag(flag) { + return []string{} + } + + var completions []string + flagName := "--" + flag.Name + if strings.HasPrefix(flagName, toComplete) { + // Flag without the = + completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + + // Why suggest both long forms: --flag and --flag= ? + // This forces the user to *always* have to type either an = or a space after the flag name. + // Let's be nice and avoid making users have to do that. + // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it. + // The = form will still work, we just won't suggest it. + // This also makes the list of suggested flags shorter as we avoid all the = forms. + // + // if len(flag.NoOptDefVal) == 0 { + // // Flag requires a value, so it can be suffixed with = + // flagName += "=" + // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + // } + } + + flagName = "-" + flag.Shorthand + if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + } + + return completions +} + +func completeRequireFlags(finalCmd *Command, toComplete string) []string { + var completions []string + + doCompleteRequiredFlags := func(flag *pflag.Flag) { + if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { + if !flag.Changed { + // If the flag is not already present, we suggest it as a completion + completions = append(completions, getFlagNameCompletions(flag, toComplete)...) + } + } + } + + // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands + // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and + // non-inherited flags. + finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteRequiredFlags(flag) + }) + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteRequiredFlags(flag) + }) + + return completions +} + +func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) { + if finalCmd.DisableFlagParsing { + // We only do flag completion if we are allowed to parse flags + // This is important for commands which have requested to do their own flag completion. + return nil, args, lastArg, nil + } + + var flagName string + trimmedArgs := args + flagWithEqual := false + orgLastArg := lastArg + + // When doing completion of a flag name, as soon as an argument starts with + // a '-' we know it is a flag. We cannot use isFlagArg() here as that function + // requires the flag name to be complete + if len(lastArg) > 0 && lastArg[0] == '-' { + if index := strings.Index(lastArg, "="); index >= 0 { + // Flag with an = + if strings.HasPrefix(lastArg[:index], "--") { + // Flag has full name + flagName = lastArg[2:index] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = lastArg[index-1 : index] + } + lastArg = lastArg[index+1:] + flagWithEqual = true + } else { + // Normal flag completion + return nil, args, lastArg, nil + } + } + + if len(flagName) == 0 { + if len(args) > 0 { + prevArg := args[len(args)-1] + if isFlagArg(prevArg) { + // Only consider the case where the flag does not contain an =. + // If the flag contains an = it means it has already been fully processed, + // so we don't need to deal with it here. + if index := strings.Index(prevArg, "="); index < 0 { + if strings.HasPrefix(prevArg, "--") { + // Flag has full name + flagName = prevArg[2:] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = prevArg[len(prevArg)-1:] + } + // Remove the uncompleted flag or else there could be an error created + // for an invalid value for that flag + trimmedArgs = args[:len(args)-1] + } + } + } + } + + if len(flagName) == 0 { + // Not doing flag completion + return nil, trimmedArgs, lastArg, nil + } + + flag := findFlag(finalCmd, flagName) + if flag == nil { + // Flag not supported by this command, the interspersed option might be set so return the original args + return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName} + } + + if !flagWithEqual { + if len(flag.NoOptDefVal) != 0 { + // We had assumed dealing with a two-word flag but the flag is a boolean flag. + // In that case, there is no value following it, so we are not really doing flag completion. + // Reset everything to do noun completion. + trimmedArgs = args + flag = nil + } + } + + return flag, trimmedArgs, lastArg, nil +} + +// initDefaultCompletionCmd adds a default 'completion' command to c. +// This function will do nothing if any of the following is true: +// 1- the feature has been explicitly disabled by the program, +// 2- c has no subcommands (to avoid creating one), +// 3- c already has a 'completion' command provided by the program. +func (c *Command) initDefaultCompletionCmd() { + if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { + return + } + + for _, cmd := range c.commands { + if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) { + // A completion command is already available + return + } + } + + haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + + completionCmd := &Command{ + Use: compCmdName, + Short: "generate the autocompletion script for the specified shell", + Long: fmt.Sprintf(` +Generate the autocompletion script for %[1]s for the specified shell. +See each sub-command's help for details on how to use the generated script. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + } + c.AddCommand(completionCmd) + + out := c.OutOrStdout() + noDesc := c.CompletionOptions.DisableDescriptions + shortDesc := "generate the autocompletion script for %s" + bash := &Command{ + Use: "bash", + Short: fmt.Sprintf(shortDesc, "bash"), + Long: fmt.Sprintf(` +Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: +$ source <(%[1]s completion bash) + +To load completions for every new session, execute once: +Linux: + $ %[1]s completion bash > /etc/bash_completion.d/%[1]s +MacOS: + $ %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s + +You will need to start a new shell for this setup to take effect. + `, c.Root().Name()), + Args: NoArgs, + DisableFlagsInUseLine: true, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenBashCompletionV2(out, !noDesc) + }, + } + if haveNoDescFlag { + bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + zsh := &Command{ + Use: "zsh", + Short: fmt.Sprintf(shortDesc, "zsh"), + Long: fmt.Sprintf(` +Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + +$ echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions for every new session, execute once: +# Linux: +$ %[1]s completion zsh > "${fpath[1]}/_%[1]s" +# macOS: +$ %[1]s completion zsh > /usr/local/share/zsh/site-functions/_%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenZshCompletionNoDesc(out) + } + return cmd.Root().GenZshCompletion(out) + }, + } + if haveNoDescFlag { + zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + fish := &Command{ + Use: "fish", + Short: fmt.Sprintf(shortDesc, "fish"), + Long: fmt.Sprintf(` +Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: +$ %[1]s completion fish | source + +To load completions for every new session, execute once: +$ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenFishCompletion(out, !noDesc) + }, + } + if haveNoDescFlag { + fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + powershell := &Command{ + Use: "powershell", + Short: fmt.Sprintf(shortDesc, "powershell"), + Long: fmt.Sprintf(` +Generate the autocompletion script for powershell. + +To load completions in your current shell session: +PS C:\> %[1]s completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenPowerShellCompletion(out) + } + return cmd.Root().GenPowerShellCompletionWithDesc(out) + + }, + } + if haveNoDescFlag { + powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + completionCmd.AddCommand(bash, zsh, fish, powershell) +} + +func findFlag(cmd *Command, name string) *pflag.Flag { + flagSet := cmd.Flags() + if len(name) == 1 { + // First convert the short flag into a long flag + // as the cmd.Flag() search only accepts long flags + if short := flagSet.ShorthandLookup(name); short != nil { + name = short.Name + } else { + set := cmd.InheritedFlags() + if short = set.ShorthandLookup(name); short != nil { + name = short.Name + } else { + return nil + } + } + } + return cmd.Flag(name) +} + +// CompDebug prints the specified string to the same file as where the +// completion script prints its logs. +// Note that completion printouts should never be on stdout as they would +// be wrongly interpreted as actual completion choices by the completion script. +func CompDebug(msg string, printToStdErr bool) { + msg = fmt.Sprintf("[Debug] %s", msg) + + // Such logs are only printed when the user has set the environment + // variable BASH_COMP_DEBUG_FILE to the path of some file to be used. + if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" { + f, err := os.OpenFile(path, + os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err == nil { + defer f.Close() + WriteStringAndCheck(f, msg) + } + } + + if printToStdErr { + // Must print to stderr for this not to be read by the completion script. + fmt.Fprint(os.Stderr, msg) + } +} + +// CompDebugln prints the specified string with a newline at the end +// to the same file as where the completion script prints its logs. +// Such logs are only printed when the user has set the environment +// variable BASH_COMP_DEBUG_FILE to the path of some file to be used. +func CompDebugln(msg string, printToStdErr bool) { + CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr) +} + +// CompError prints the specified completion message to stderr. +func CompError(msg string) { + msg = fmt.Sprintf("[Error] %s", msg) + CompDebug(msg, true) +} + +// CompErrorln prints the specified completion message to stderr with a newline at the end. +func CompErrorln(msg string) { + CompError(fmt.Sprintf("%s\n", msg)) +} diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go new file mode 100644 index 000000000..bb57fd568 --- /dev/null +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -0,0 +1,219 @@ +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" +) + +func genFishComp(buf io.StringWriter, name string, includeDesc bool) { + // Variables should not contain a '-' or ':' character + nameForVar := name + nameForVar = strings.Replace(nameForVar, "-", "_", -1) + nameForVar = strings.Replace(nameForVar, ":", "_", -1) + + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` +function __%[1]s_debug + set -l file "$BASH_COMP_DEBUG_FILE" + if test -n "$file" + echo "$argv" >> $file + end +end + +function __%[1]s_perform_completion + __%[1]s_debug "Starting __%[1]s_perform_completion" + + # Extract all args except the last one + set -l args (commandline -opc) + # Extract the last arg and escape it in case it is a space + set -l lastArg (string escape -- (commandline -ct)) + + __%[1]s_debug "args: $args" + __%[1]s_debug "last arg: $lastArg" + + set -l requestComp "$args[1] %[3]s $args[2..-1] $lastArg" + + __%[1]s_debug "Calling $requestComp" + set -l results (eval $requestComp 2> /dev/null) + + # Some programs may output extra empty lines after the directive. + # Let's ignore them or else it will break completion. + # Ref: https://github.com/spf13/cobra/issues/1279 + for line in $results[-1..1] + if test (string trim -- $line) = "" + # Found an empty line, remove it + set results $results[1..-2] + else + # Found non-empty line, we have our proper output + break + end + end + + set -l comps $results[1..-2] + set -l directiveLine $results[-1] + + # For Fish, when completing a flag with an = (e.g., -n=) + # completions must be prefixed with the flag + set -l flagPrefix (string match -r -- '-.*=' "$lastArg") + + __%[1]s_debug "Comps: $comps" + __%[1]s_debug "DirectiveLine: $directiveLine" + __%[1]s_debug "flagPrefix: $flagPrefix" + + for comp in $comps + printf "%%s%%s\n" "$flagPrefix" "$comp" + end + + printf "%%s\n" "$directiveLine" +end + +# This function does two things: +# - Obtain the completions and store them in the global __%[1]s_comp_results +# - Return false if file completion should be performed +function __%[1]s_prepare_completions + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + + # Start fresh + set --erase __%[1]s_comp_results + + set -l results (__%[1]s_perform_completion) + __%[1]s_debug "Completion results: $results" + + if test -z "$results" + __%[1]s_debug "No completion, probably due to a failure" + # Might as well do file completion, in case it helps + return 1 + end + + set -l directive (string sub --start 2 $results[-1]) + set --global __%[1]s_comp_results $results[1..-2] + + __%[1]s_debug "Completions are: $__%[1]s_comp_results" + __%[1]s_debug "Directive is: $directive" + + set -l shellCompDirectiveError %[4]d + set -l shellCompDirectiveNoSpace %[5]d + set -l shellCompDirectiveNoFileComp %[6]d + set -l shellCompDirectiveFilterFileExt %[7]d + set -l shellCompDirectiveFilterDirs %[8]d + + if test -z "$directive" + set directive 0 + end + + set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) + if test $compErr -eq 1 + __%[1]s_debug "Received error directive: aborting." + # Might as well do file completion, in case it helps + return 1 + end + + set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) + set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) + if test $filefilter -eq 1; or test $dirfilter -eq 1 + __%[1]s_debug "File extension filtering or directory filtering not supported" + # Do full file completion instead + return 1 + end + + set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) + set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) + + __%[1]s_debug "nospace: $nospace, nofiles: $nofiles" + + # If we want to prevent a space, or if file completion is NOT disabled, + # we need to count the number of valid completions. + # To do so, we will filter on prefix as the completions we have received + # may not already be filtered so as to allow fish to match on different + # criteria than the prefix. + if test $nospace -ne 0; or test $nofiles -eq 0 + set -l prefix (commandline -t | string escape --style=regex) + __%[1]s_debug "prefix: $prefix" + + set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results) + set --global __%[1]s_comp_results $completions + __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results" + + # Important not to quote the variable for count to work + set -l numComps (count $__%[1]s_comp_results) + __%[1]s_debug "numComps: $numComps" + + if test $numComps -eq 1; and test $nospace -ne 0 + # We must first split on \t to get rid of the descriptions to be + # able to check what the actual completion will be. + # We don't need descriptions anyway since there is only a single + # real completion which the shell will expand immediately. + set -l split (string split --max 1 \t $__%[1]s_comp_results[1]) + + # Fish won't add a space if the completion ends with any + # of the following characters: @=/:., + set -l lastChar (string sub -s -1 -- $split) + if not string match -r -q "[@=/:.,]" -- "$lastChar" + # In other cases, to support the "nospace" directive we trick the shell + # by outputting an extra, longer completion. + __%[1]s_debug "Adding second completion to perform nospace directive" + set --global __%[1]s_comp_results $split[1] $split[1]. + __%[1]s_debug "Completions are now: $__%[1]s_comp_results" + end + end + + if test $numComps -eq 0; and test $nofiles -eq 0 + # To be consistent with bash and zsh, we only trigger file + # completion when there are no other completions + __%[1]s_debug "Requesting file completion" + return 1 + end + end + + return 0 +end + +# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves +# so we can properly delete any completions provided by another script. +# Only do this if the program can be found, or else fish may print some errors; besides, +# the existing completions will only be loaded if the program can be found. +if type -q "%[2]s" + # The space after the program name is essential to trigger completion for the program + # and not completion of the program name itself. + # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. + complete --do-complete "%[2]s " > /dev/null 2>&1 +end + +# Remove any pre-existing completions for the program since we will be handling all of them. +complete -c %[2]s -e + +# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results +# which provides the program's completion choices. +complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' + +`, nameForVar, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) +} + +// GenFishCompletion generates fish completion file and writes to the passed writer. +func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genFishComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +// GenFishCompletionFile generates fish completion file. +func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenFishCompletion(outFile, includeDesc) +} diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md new file mode 100644 index 000000000..19b2ed129 --- /dev/null +++ b/vendor/github.com/spf13/cobra/fish_completions.md @@ -0,0 +1,4 @@ +## Generating Fish Completions For Your cobra.Command + +Please refer to [Shell Completions](shell_completions.md) for details. + diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod new file mode 100644 index 000000000..1fb9439dd --- /dev/null +++ b/vendor/github.com/spf13/cobra/go.mod @@ -0,0 +1,11 @@ +module github.com/spf13/cobra + +go 1.14 + +require ( + github.com/cpuguy83/go-md2man/v2 v2.0.0 + github.com/inconshreveable/mousetrap v1.0.0 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.8.1 + gopkg.in/yaml.v2 v2.4.0 +) diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum new file mode 100644 index 000000000..3e22df29a --- /dev/null +++ b/vendor/github.com/spf13/cobra/go.sum @@ -0,0 +1,592 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go new file mode 100644 index 000000000..59234c09f --- /dev/null +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -0,0 +1,285 @@ +// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but +// can be downloaded separately for windows 7 or 8.1). + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" +) + +func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*- + +function __%[1]s_debug { + if ($env:BASH_COMP_DEBUG_FILE) { + "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE" + } +} + +filter __%[1]s_escapeStringWithSpecialChars { +`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` +} + +Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { + param( + $WordToComplete, + $CommandAst, + $CursorPosition + ) + + # Get the current command line and convert into a string + $Command = $CommandAst.CommandElements + $Command = "$Command" + + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $CursorPosition location, so we need + # to truncate the command-line ($Command) up to the $CursorPosition location. + # Make sure the $Command is longer then the $CursorPosition before we truncate. + # This happens because the $Command does not include the last space. + if ($Command.Length -gt $CursorPosition) { + $Command=$Command.Substring(0,$CursorPosition) + } + __%[1]s_debug "Truncated command: $Command" + + $ShellCompDirectiveError=%[3]d + $ShellCompDirectiveNoSpace=%[4]d + $ShellCompDirectiveNoFileComp=%[5]d + $ShellCompDirectiveFilterFileExt=%[6]d + $ShellCompDirectiveFilterDirs=%[7]d + + # Prepare the command to request completions for the program. + # Split the command at the first space to separate the program and arguments. + $Program,$Arguments = $Command.Split(" ",2) + $RequestComp="$Program %[2]s $Arguments" + __%[1]s_debug "RequestComp: $RequestComp" + + # we cannot use $WordToComplete because it + # has the wrong values if the cursor was moved + # so use the last argument + if ($WordToComplete -ne "" ) { + $WordToComplete = $Arguments.Split(" ")[-1] + } + __%[1]s_debug "New WordToComplete: $WordToComplete" + + + # Check for flag with equal sign + $IsEqualFlag = ($WordToComplete -Like "--*=*" ) + if ( $IsEqualFlag ) { + __%[1]s_debug "Completing equal sign flag" + # Remove the flag part + $Flag,$WordToComplete = $WordToComplete.Split("=",2) + } + + if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) { + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" +`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + } + + __%[1]s_debug "Calling $RequestComp" + #call the command store the output in $out and redirect stderr and stdout to null + # $Out is an array contains each line per element + Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null + + + # get directive from last line + [int]$Directive = $Out[-1].TrimStart(':') + if ($Directive -eq "") { + # There is no directive specified + $Directive = 0 + } + __%[1]s_debug "The completion directive is: $Directive" + + # remove directive (last element) from out + $Out = $Out | Where-Object { $_ -ne $Out[-1] } + __%[1]s_debug "The completions are: $Out" + + if (($Directive -band $ShellCompDirectiveError) -ne 0 ) { + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + } + + $Longest = 0 + $Values = $Out | ForEach-Object { + #Split the output in name and description +`+" $Name, $Description = $_.Split(\"`t\",2)"+` + __%[1]s_debug "Name: $Name Description: $Description" + + # Look for the longest completion so that we can format things nicely + if ($Longest -lt $Name.Length) { + $Longest = $Name.Length + } + + # Set the description to a one space string if there is none set. + # This is needed because the CompletionResult does not accept an empty string as argument + if (-Not $Description) { + $Description = " " + } + @{Name="$Name";Description="$Description"} + } + + + $Space = " " + if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) { + # remove the space here + __%[1]s_debug "ShellCompDirectiveNoSpace is called" + $Space = "" + } + + if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or + (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { + __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" + + # return here to prevent the completion of the extensions + return + } + + $Values = $Values | Where-Object { + # filter the result + $_.Name -like "$WordToComplete*" + + # Join the flag back if we have an equal sign flag + if ( $IsEqualFlag ) { + __%[1]s_debug "Join the equal sign flag back to the completion value" + $_.Name = $Flag + "=" + $_.Name + } + } + + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { + __%[1]s_debug "ShellCompDirectiveNoFileComp is called" + + if ($Values.Length -eq 0) { + # Just print an empty string here so the + # shell does not start to complete paths. + # We cannot use CompletionResult here because + # it does not accept an empty string as argument. + "" + return + } + } + + # Get the current mode + $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function + __%[1]s_debug "Mode: $Mode" + + $Values | ForEach-Object { + + # store temporary because switch will overwrite $_ + $comp = $_ + + # PowerShell supports three different completion modes + # - TabCompleteNext (default windows style - on each key press the next option is displayed) + # - Complete (works like bash) + # - MenuComplete (works like zsh) + # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function + + # CompletionResult Arguments: + # 1) CompletionText text to be used as the auto completion result + # 2) ListItemText text to be displayed in the suggestion list + # 3) ResultType type of completion result + # 4) ToolTip text for the tooltip with details about the object + + switch ($Mode) { + + # bash like + "Complete" { + + if ($Values.Length -eq 1) { + __%[1]s_debug "Only one completion left" + + # insert space after value + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + } else { + # Add the proper number of spaces to align the descriptions + while($comp.Name.Length -lt $Longest) { + $comp.Name = $comp.Name + " " + } + + # Check for empty description and only add parentheses if needed + if ($($comp.Description) -eq " " ) { + $Description = "" + } else { + $Description = " ($($comp.Description))" + } + + [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + } + } + + # zsh like + "MenuComplete" { + # insert space after value + # MenuComplete will automatically show the ToolTip of + # the highlighted value at the bottom of the suggestions. + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + + # TabCompleteNext and in case we get something unknown + Default { + # Like MenuComplete but we don't want to add a space here because + # the user need to press space anyway to get the completion. + # Description will not be shown because thats not possible with TabCompleteNext + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + } + + } +} +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) +} + +func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genPowerShellComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.genPowerShellCompletion(outFile, includeDesc) +} + +// GenPowerShellCompletionFile generates powershell completion file without descriptions. +func (c *Command) GenPowerShellCompletionFile(filename string) error { + return c.genPowerShellCompletionFile(filename, false) +} + +// GenPowerShellCompletion generates powershell completion file without descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletion(w io.Writer) error { + return c.genPowerShellCompletion(w, false) +} + +// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions. +func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error { + return c.genPowerShellCompletionFile(filename, true) +} + +// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error { + return c.genPowerShellCompletion(w, true) +} diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md new file mode 100644 index 000000000..c449f1e5c --- /dev/null +++ b/vendor/github.com/spf13/cobra/powershell_completions.md @@ -0,0 +1,3 @@ +# Generating PowerShell Completions For Your Own cobra.Command + +Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md new file mode 100644 index 000000000..d98a71e36 --- /dev/null +++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md @@ -0,0 +1,38 @@ +## Projects using Cobra + +- [Arduino CLI](https://github.com/arduino/arduino-cli) +- [Bleve](http://www.blevesearch.com/) +- [CockroachDB](http://www.cockroachlabs.com/) +- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) +- [Delve](https://github.com/derekparker/delve) +- [Docker (distribution)](https://github.com/docker/distribution) +- [Etcd](https://etcd.io/) +- [Gardener](https://github.com/gardener/gardenctl) +- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl) +- [Git Bump](https://github.com/erdaltsksn/git-bump) +- [Github CLI](https://github.com/cli/cli) +- [GitHub Labeler](https://github.com/erdaltsksn/gh-label) +- [Golangci-lint](https://golangci-lint.run) +- [GopherJS](http://www.gopherjs.org/) +- [Helm](https://helm.sh) +- [Hugo](https://gohugo.io) +- [Istio](https://istio.io) +- [Kool](https://github.com/kool-dev/kool) +- [Kubernetes](http://kubernetes.io/) +- [Linkerd](https://linkerd.io/) +- [Mattermost-server](https://github.com/mattermost/mattermost-server) +- [Metal Stack CLI](https://github.com/metal-stack/metalctl) +- [Moby (former Docker)](https://github.com/moby/moby) +- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) +- [OpenShift](https://www.openshift.com/) +- [Ory Hydra](https://github.com/ory/hydra) +- [Ory Kratos](https://github.com/ory/kratos) +- [Pouch](https://github.com/alibaba/pouch) +- [ProjectAtomic (enterprise)](http://www.projectatomic.io/) +- [Prototool](https://github.com/uber/prototool) +- [Random](https://github.com/erdaltsksn/random) +- [Rclone](https://rclone.org/) +- [Skaffold](https://skaffold.dev/) +- [Tendermint](https://github.com/tendermint/tendermint) +- [Twitch CLI](https://github.com/twitchdev/twitch-cli) +- [Werf](https://werf.io/) diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go new file mode 100644 index 000000000..d99bf91e5 --- /dev/null +++ b/vendor/github.com/spf13/cobra/shell_completions.go @@ -0,0 +1,84 @@ +package cobra + +import ( + "github.com/spf13/pflag" +) + +// MarkFlagRequired instructs the various shell completion implementations to +// prioritize the named flag when performing completion, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(c.Flags(), name) +} + +// MarkPersistentFlagRequired instructs the various shell completion implementations to +// prioritize the named persistent flag when performing completion, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(c.PersistentFlags(), name) +} + +// MarkFlagRequired instructs the various shell completion implementations to +// prioritize the named flag when performing completion, +// and causes your command to report an error if invoked without the flag. +func MarkFlagRequired(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) +} + +// MarkFlagFilename instructs the various shell completion implementations to +// limit completions for the named flag to the specified file extensions. +func (c *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.Flags(), name, extensions...) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// The bash completion script will call the bash function f for the flag. +// +// This will only work for bash completion. +// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows +// to register a Go function which will work across all shells. +func (c *Command) MarkFlagCustom(name string, f string) error { + return MarkFlagCustom(c.Flags(), name, f) +} + +// MarkPersistentFlagFilename instructs the various shell completion +// implementations to limit completions for the named persistent flag to the +// specified file extensions. +func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.PersistentFlags(), name, extensions...) +} + +// MarkFlagFilename instructs the various shell completion implementations to +// limit completions for the named flag to the specified file extensions. +func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { + return flags.SetAnnotation(name, BashCompFilenameExt, extensions) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// The bash completion script will call the bash function f for the flag. +// +// This will only work for bash completion. +// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows +// to register a Go function which will work across all shells. +func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { + return flags.SetAnnotation(name, BashCompCustom, []string{f}) +} + +// MarkFlagDirname instructs the various shell completion implementations to +// limit completions for the named flag to directory names. +func (c *Command) MarkFlagDirname(name string) error { + return MarkFlagDirname(c.Flags(), name) +} + +// MarkPersistentFlagDirname instructs the various shell completion +// implementations to limit completions for the named persistent flag to +// directory names. +func (c *Command) MarkPersistentFlagDirname(name string) error { + return MarkFlagDirname(c.PersistentFlags(), name) +} + +// MarkFlagDirname instructs the various shell completion implementations to +// limit completions for the named flag to directory names. +func MarkFlagDirname(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{}) +} diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md new file mode 100644 index 000000000..4ba06a11c --- /dev/null +++ b/vendor/github.com/spf13/cobra/shell_completions.md @@ -0,0 +1,546 @@ +# Generating shell completions + +Cobra can generate shell completions for multiple shells. +The currently supported shells are: +- Bash +- Zsh +- fish +- PowerShell + +Cobra will automatically provide your program with a fully functional `completion` command, +similarly to how it provides the `help` command. + +## Creating your own completion command + +If you do not wish to use the default `completion` command, you can choose to +provide your own, which will take precedence over the default one. (This also provides +backwards-compatibility with programs that already have their own `completion` command.) + +If you are using the generator, you can create a completion command by running + +```bash +cobra add completion +``` +and then modifying the generated `cmd/completion.go` file to look something like this +(writing the shell script to stdout allows the most flexible use): + +```go +var completionCmd = &cobra.Command{ + Use: "completion [bash|zsh|fish|powershell]", + Short: "Generate completion script", + Long: `To load completions: + +Bash: + + $ source <(yourprogram completion bash) + + # To load completions for each session, execute once: + # Linux: + $ yourprogram completion bash > /etc/bash_completion.d/yourprogram + # macOS: + $ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram + +Zsh: + + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: + + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + # To load completions for each session, execute once: + $ yourprogram completion zsh > "${fpath[1]}/_yourprogram" + + # You will need to start a new shell for this setup to take effect. + +fish: + + $ yourprogram completion fish | source + + # To load completions for each session, execute once: + $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish + +PowerShell: + + PS> yourprogram completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> yourprogram completion powershell > yourprogram.ps1 + # and source this file from your PowerShell profile. +`, + DisableFlagsInUseLine: true, + ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, + Args: cobra.ExactValidArgs(1), + Run: func(cmd *cobra.Command, args []string) { + switch args[0] { + case "bash": + cmd.Root().GenBashCompletion(os.Stdout) + case "zsh": + cmd.Root().GenZshCompletion(os.Stdout) + case "fish": + cmd.Root().GenFishCompletion(os.Stdout, true) + case "powershell": + cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) + } + }, +} +``` + +**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. + +## Adapting the default completion command + +Cobra provides a few options for the default `completion` command. To configure such options you must set +the `CompletionOptions` field on the *root* command. + +To tell Cobra *not* to provide the default `completion` command: +``` +rootCmd.CompletionOptions.DisableDefaultCmd = true +``` + +To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands: +``` +rootCmd.CompletionOptions.DisableNoDescFlag = true +``` + +To tell Cobra to completely disable descriptions for completions: +``` +rootCmd.CompletionOptions.DisableDescriptions = true +``` + +# Customizing completions + +The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. + +## Completion of nouns + +### Static completion of nouns + +Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field. +For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. +Some simplified code from `kubectl get` looks like: + +```go +validArgs []string = { "pod", "node", "service", "replicationcontroller" } + +cmd := &cobra.Command{ + Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", + Short: "Display one or many resources", + Long: get_long, + Example: get_example, + Run: func(cmd *cobra.Command, args []string) { + cobra.CheckErr(RunGet(f, out, cmd, args)) + }, + ValidArgs: validArgs, +} +``` + +Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like: + +```bash +$ kubectl get [tab][tab] +node pod replicationcontroller service +``` + +#### Aliases for nouns + +If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`: + +```go +argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } + +cmd := &cobra.Command{ + ... + ValidArgs: validArgs, + ArgAliases: argAliases +} +``` + +The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by +the completion algorithm if entered manually, e.g. in: + +```bash +$ kubectl get rc [tab][tab] +backend frontend database +``` + +Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of +replication controllers following `rc`. + +### Dynamic completion of nouns + +In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both. +Simplified code from `helm status` looks like: + +```go +cmd := &cobra.Command{ + Use: "status RELEASE_NAME", + Short: "Display the status of the named release", + Long: status_long, + RunE: func(cmd *cobra.Command, args []string) { + RunGet(args[0]) + }, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if len(args) != 0 { + return nil, cobra.ShellCompDirectiveNoFileComp + } + return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp + }, +} +``` +Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster. +Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like: + +```bash +$ helm status [tab][tab] +harbor notary rook thanos +``` +You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp` +```go +// Indicates that the shell will perform its default behavior after completions +// have been provided (this implies none of the other directives). +ShellCompDirectiveDefault + +// Indicates an error occurred and completions should be ignored. +ShellCompDirectiveError + +// Indicates that the shell should not add a space after the completion, +// even if there is a single completion provided. +ShellCompDirectiveNoSpace + +// Indicates that the shell should not provide file completion even when +// no completion is provided. +ShellCompDirectiveNoFileComp + +// Indicates that the returned completions should be used as file extension filters. +// For example, to complete only files of the form *.json or *.yaml: +// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt +// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename() +// is a shortcut to using this directive explicitly. +// +ShellCompDirectiveFilterFileExt + +// Indicates that only directory names should be provided in file completion. +// For example: +// return nil, ShellCompDirectiveFilterDirs +// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly. +// +// To request directory names within another directory, the returned completions +// should specify a single directory name within which to search. For example, +// to complete directories within "themes/": +// return []string{"themes"}, ShellCompDirectiveFilterDirs +// +ShellCompDirectiveFilterDirs +``` + +***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. + +#### Debugging + +Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly: +```bash +$ helm __complete status har +harbor +:4 +Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr +``` +***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command: +```bash +$ helm __complete status "" +harbor +notary +rook +thanos +:4 +Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr +``` +Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code: +```go +// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE +// is set to a file path) and optionally prints to stderr. +cobra.CompDebug(msg string, printToStdErr bool) { +cobra.CompDebugln(msg string, printToStdErr bool) + +// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE +// is set to a file path) and to stderr. +cobra.CompError(msg string) +cobra.CompErrorln(msg string) +``` +***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above. + +## Completions for flags + +### Mark flags as required + +Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so: + +```go +cmd.MarkFlagRequired("pod") +cmd.MarkFlagRequired("container") +``` + +and you'll get something like + +```bash +$ kubectl exec [tab][tab] +-c --container= -p --pod= +``` + +### Specify dynamic flag completion + +As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function. + +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault +}) +``` +Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so: + +```bash +$ helm status --output [tab][tab] +json table yaml +``` + +#### Debugging + +You can also easily debug your Go completion code for flags: +```bash +$ helm __complete status --output "" +json +table +yaml +:4 +Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr +``` +***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above. + +### Specify valid filename extensions for flags that take a filename + +To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so: +```go +flagName := "output" +cmd.MarkFlagFilename(flagName, "yaml", "json") +``` +or +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt}) +``` + +### Limit flag completions to directory names + +To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so: +```go +flagName := "output" +cmd.MarkFlagDirname(flagName) +``` +or +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveFilterDirs +}) +``` +To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so: +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs +}) +``` +### Descriptions for completions + +Cobra provides support for completion descriptions. Such descriptions are supported for each shell +(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)). +For commands and flags, Cobra will provide the descriptions automatically, based on usage information. +For example, using zsh: +``` +$ helm s[tab] +search -- search for a keyword in charts +show -- show information of a chart +status -- displays the status of the named release +``` +while using fish: +``` +$ helm s[tab] +search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) +``` + +Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: +```go +ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp +}} +``` +or +```go +ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} +``` +## Bash completions + +### Dependencies + +The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)) + +### Aliases + +You can also configure `bash` aliases for your program and they will also support completions. + +```bash +alias aliasname=origcommand +complete -o default -F __start_origcommand aliasname + +# and now when you run `aliasname` completion will make +# suggestions as it did for `origcommand`. + +$ aliasname +completion firstcommand secondcommand +``` +### Bash legacy dynamic completions + +For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. +Please refer to [Bash Completions](bash_completions.md) for details. + +### Bash completion V2 + +Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling +`GenBashCompletion()` or `GenBashCompletionFile()`. + +A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or +`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion +(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion +solution described in this document. +Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash +completion V2 solution which provides the following extra features: +- Supports completion descriptions (like the other shells) +- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines) +- Streamlined user experience thanks to a completion behavior aligned with the other shells + +`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()` +you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra +will provide the description automatically based on usage information. You can choose to make this option configurable by +your users. + +``` +# With descriptions +$ helm s[tab][tab] +search (search for a keyword in charts) status (display the status of the named release) +show (show information of a chart) + +# Without descriptions +$ helm s[tab][tab] +search show status +``` +**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command. +## Zsh completions + +Cobra supports native zsh completion generated from the root `cobra.Command`. +The generated completion script should be put somewhere in your `$fpath` and be named +`_`. You will need to start a new shell for the completions to become available. + +Zsh supports descriptions for completions. Cobra will provide the description automatically, +based on usage information. Cobra provides a way to completely disable such descriptions by +using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make +this a configurable option to your users. +``` +# With descriptions +$ helm s[tab] +search -- search for a keyword in charts +show -- show information of a chart +status -- displays the status of the named release + +# Without descriptions +$ helm s[tab] +search show status +``` +*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. + +### Limitations + +* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). +* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. + * You should instead use `RegisterFlagCompletionFunc()`. + +### Zsh completions standardization + +Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. +Please refer to [Zsh Completions](zsh_completions.md) for details. + +## fish completions + +Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. +``` +# With descriptions +$ helm s[tab] +search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) + +# Without descriptions +$ helm s[tab] +search show status +``` +*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. + +### Limitations + +* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). +* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. + * You should instead use `RegisterFlagCompletionFunc()`. +* The following flag completion annotations are not supported and will be ignored for `fish`: + * `BashCompFilenameExt` (filtering by file extension) + * `BashCompSubdirsInDir` (filtering by directory) +* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`: + * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) + * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) +* Similarly, the following completion directives are not supported and will be ignored for `fish`: + * `ShellCompDirectiveFilterFileExt` (filtering by file extension) + * `ShellCompDirectiveFilterDirs` (filtering by directory) + +## PowerShell completions + +Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. + +The script is designed to support all three PowerShell completion modes: + +* TabCompleteNext (default windows style - on each key press the next option is displayed) +* Complete (works like bash) +* MenuComplete (works like zsh) + +You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. + +Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. + +``` +# With descriptions and Mode 'Complete' +$ helm s[tab] +search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) + +# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. +$ helm s[tab] +search show status + +search for a keyword in charts + +# Without descriptions +$ helm s[tab] +search show status +``` + +### Limitations + +* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). +* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. + * You should instead use `RegisterFlagCompletionFunc()`. +* The following flag completion annotations are not supported and will be ignored for `powershell`: + * `BashCompFilenameExt` (filtering by file extension) + * `BashCompSubdirsInDir` (filtering by directory) +* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: + * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) + * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) +* Similarly, the following completion directives are not supported and will be ignored for `powershell`: + * `ShellCompDirectiveFilterFileExt` (filtering by file extension) + * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md new file mode 100644 index 000000000..311abce28 --- /dev/null +++ b/vendor/github.com/spf13/cobra/user_guide.md @@ -0,0 +1,637 @@ +# User Guide + +While you are welcome to provide your own organization, typically a Cobra-based +application will follow the following organizational structure: + +``` + ▾ appName/ + ▾ cmd/ + add.go + your.go + commands.go + here.go + main.go +``` + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. + +```go +package main + +import ( + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +## Using the Cobra Generator + +Cobra provides its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. + +## Using the Cobra Library + +To manually implement Cobra you need to create a bare main.go file and a rootCmd file. +You will optionally provide additional commands as you see fit. + +### Create rootCmd + +Cobra doesn't require any special constructors. Simply create your commands. + +Ideally you place this in app/cmd/root.go: + +```go +var rootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} +``` + +You will additionally define flags and handle configuration in your init() function. + +For example cmd/root.go: + +```go +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + // Used for flags. + cfgFile string + userLicense string + + rootCmd = &cobra.Command{ + Use: "cobra", + Short: "A generator for Cobra based Applications", + Long: `Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + } +) + +// Execute executes the root command. +func Execute() error { + return rootCmd.Execute() +} + +func init() { + cobra.OnInitialize(initConfig) + + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") + rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") + rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") + + rootCmd.AddCommand(addCmd) + rootCmd.AddCommand(initCmd) +} + +func initConfig() { + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := os.UserHomeDir() + cobra.CheckErr(err) + + // Search config in home directory with name ".cobra" (without extension). + viper.AddConfigPath(home) + viper.SetConfigType("yaml") + viper.SetConfigName(".cobra") + } + + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +} +``` + +### Create your main.go + +With the root command you need to have your main function execute it. +Execute should be run on the root for clarity, though it can be called on any command. + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. + +```go +package main + +import ( + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +### Create additional commands + +Additional commands can be defined and typically are each given their own file +inside of the cmd/ directory. + +If you wanted to create a version command you would create cmd/version.go and +populate it with the following: + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, +} +``` + +### Returning and handling errors + +If you wish to return an error to the caller of a command, `RunE` can be used. + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(tryCmd) +} + +var tryCmd = &cobra.Command{ + Use: "try", + Short: "Try and possibly fail at something", + RunE: func(cmd *cobra.Command, args []string) error { + if err := someFunc(); err != nil { + return err + } + return nil + }, +} +``` + +The error can then be caught at the execute function call. + +## Working with Flags + +Flags provide modifiers to control how the action command operates. + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + +```go +var Verbose bool +var Source string +``` + +There are two different approaches to assign a flag. + +### Persistent Flags + +A flag can be 'persistent', meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags, assign a flag as a persistent flag on the root. + +```go +rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +``` + +### Local Flags + +A flag can also be assigned locally, which will only apply to that specific command. + +```go +localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +``` + +### Local Flag on Parent Commands + +By default, Cobra only parses local flags on the target command, and any local flags on +parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will +parse local flags on each command before executing the target command. + +```go +command := cobra.Command{ + Use: "print [OPTIONS] [COMMANDS]", + TraverseChildren: true, +} +``` + +### Bind Flags with Config + +You can also bind your flags with [viper](https://github.com/spf13/viper): +```go +var author string + +func init() { + rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) +} +``` + +In this example, the persistent flag `author` is bound with `viper`. +**Note**: the variable `author` will not be set to the value from config, +when the `--author` flag is not provided by user. + +More in [viper documentation](https://github.com/spf13/viper#working-with-flags). + +### Required flags + +Flags are optional by default. If instead you wish your command to report an error +when a flag has not been set, mark it as required: +```go +rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkFlagRequired("region") +``` + +Or, for persistent flags: +```go +rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkPersistentFlagRequired("region") +``` + +## Positional and Custom Arguments + +Validation of positional arguments can be specified using the `Args` field +of `Command`. + +The following validators are built in: + +- `NoArgs` - the command will report an error if there are any positional args. +- `ArbitraryArgs` - the command will accept any args. +- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. +- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. +- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. +- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. +- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` +- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. + +An example of setting the custom validator: + +```go +var cmd = &cobra.Command{ + Short: "hello", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) < 1 { + return errors.New("requires a color argument") + } + if myapp.IsValidColor(args[0]) { + return nil + } + return fmt.Errorf("invalid color specified: %s", args[0]) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hello, World!") + }, +} +``` + +## Example + +In the example below, we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable, meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + +```go +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func main() { + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. +For many years people have printed back to the screen.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. +Echo works a lot like print, except it has a child command.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Echo: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing +a count and a string.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() +} +``` + +For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). + +## Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally, help will also +support all other commands as input. Say, for instance, you have a command called +'create' without any additional configuration; Cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by Cobra. Nothing beyond the +command and flag definitions are needed. + + $ cobra help + + Cobra is a CLI library for Go that empowers applications. + This application is a tool to generate the needed files + to quickly create a Cobra application. + + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact, you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or your own template for the default command to use +with following functions: + +```go +cmd.SetHelpCommand(cmd *Command) +cmd.SetHelpFunc(f func(*Command, []string)) +cmd.SetHelpTemplate(s string) +``` + +The latter two will also apply to any children commands. + +## Usage Message + +When the user provides an invalid flag or invalid command, Cobra responds by +showing the user the 'usage'. + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of its output. + + $ cobra --invalid + Error: unknown flag: --invalid + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + +### Defining your own usage +You can provide your own usage function or template for Cobra to use. +Like help, the function and template are overridable through public methods: + +```go +cmd.SetUsageFunc(f func(*Command) error) +cmd.SetUsageTemplate(s string) +``` + +## Version Flag + +Cobra adds a top-level '--version' flag if the Version field is set on the root command. +Running an application with the '--version' flag will print the version to stdout using +the version template. The template can be customized using the +`cmd.SetVersionTemplate(s string)` function. + +## PreRun and PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistentPostRun` + +An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + rootCmd.Execute() + fmt.Println() + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + rootCmd.Execute() +} +``` + +Output: +``` +Inside rootCmd PersistentPreRun with args: [] +Inside rootCmd PreRun with args: [] +Inside rootCmd Run with args: [] +Inside rootCmd PostRun with args: [] +Inside rootCmd PersistentPostRun with args: [] + +Inside rootCmd PersistentPreRun with args: [arg1 arg2] +Inside subCmd PreRun with args: [arg1 arg2] +Inside subCmd Run with args: [arg1 arg2] +Inside subCmd PostRun with args: [arg1 arg2] +Inside subCmd PersistentPostRun with args: [arg1 arg2] +``` + +## Suggestions when "unknown command" happens + +Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: + +``` +$ hugo srever +Error: unknown command "srever" for "hugo" + +Did you mean this? + server + +Run 'hugo --help' for usage. +``` + +Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. + +If you need to disable suggestions or tweak the string distance in your command, use: + +```go +command.DisableSuggestions = true +``` + +or + +```go +command.SuggestionsMinimumDistance = 1 +``` + +You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: + +``` +$ kubectl remove +Error: unknown command "remove" for "kubectl" + +Did you mean this? + delete + +Run 'kubectl help' for usage. +``` + +## Generating documentation for your command + +Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). + +## Generating shell completions + +Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 889c22e27..1afec30ea 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -5,122 +5,254 @@ import ( "fmt" "io" "os" - "strings" ) -// GenZshCompletionFile generates zsh completion file. +// GenZshCompletionFile generates zsh completion file including descriptions. func (c *Command) GenZshCompletionFile(filename string) error { + return c.genZshCompletionFile(filename, true) +} + +// GenZshCompletion generates zsh completion file including descriptions +// and writes it to the passed writer. +func (c *Command) GenZshCompletion(w io.Writer) error { + return c.genZshCompletion(w, true) +} + +// GenZshCompletionFileNoDesc generates zsh completion file without descriptions. +func (c *Command) GenZshCompletionFileNoDesc(filename string) error { + return c.genZshCompletionFile(filename, false) +} + +// GenZshCompletionNoDesc generates zsh completion file without descriptions +// and writes it to the passed writer. +func (c *Command) GenZshCompletionNoDesc(w io.Writer) error { + return c.genZshCompletion(w, false) +} + +// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was +// not consistent with Bash completion. It has therefore been disabled. +// Instead, when no other completion is specified, file completion is done by +// default for every argument. One can disable file completion on a per-argument +// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp. +// To achieve file extension filtering, one can use ValidArgsFunction and +// ShellCompDirectiveFilterFileExt. +// +// Deprecated +func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error { + return nil +} + +// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore +// been disabled. +// To achieve the same behavior across all shells, one can use +// ValidArgs (for the first argument only) or ValidArgsFunction for +// any argument (can include the first one also). +// +// Deprecated +func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error { + return nil +} + +func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error { outFile, err := os.Create(filename) if err != nil { return err } defer outFile.Close() - return c.GenZshCompletion(outFile) + return c.genZshCompletion(outFile, includeDesc) } -// GenZshCompletion generates a zsh completion file and writes to the passed writer. -func (c *Command) GenZshCompletion(w io.Writer) error { +func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error { buf := new(bytes.Buffer) - - writeHeader(buf, c) - maxDepth := maxDepth(c) - writeLevelMapping(buf, maxDepth) - writeLevelCases(buf, maxDepth, c) - + genZshComp(buf, c.Name(), includeDesc) _, err := buf.WriteTo(w) return err } -func writeHeader(w io.Writer, cmd *Command) { - fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name()) -} - -func maxDepth(c *Command) int { - if len(c.Commands()) == 0 { - return 0 +func genZshComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd } - maxDepthSub := 0 - for _, s := range c.Commands() { - subDepth := maxDepth(s) - if subDepth > maxDepthSub { - maxDepthSub = subDepth - } - } - return 1 + maxDepthSub -} + WriteStringAndCheck(buf, fmt.Sprintf(`#compdef _%[1]s %[1]s -func writeLevelMapping(w io.Writer, numLevels int) { - fmt.Fprintln(w, `_arguments \`) - for i := 1; i <= numLevels; i++ { - fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i) - fmt.Fprintln(w) - } - fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files") - fmt.Fprintln(w) +# zsh completion for %-36[1]s -*- shell-script -*- + +__%[1]s_debug() +{ + local file="$BASH_COMP_DEBUG_FILE" + if [[ -n ${file} ]]; then + echo "$*" >> "${file}" + fi } -func writeLevelCases(w io.Writer, maxDepth int, root *Command) { - fmt.Fprintln(w, "case $state in") - defer fmt.Fprintln(w, "esac") +_%[1]s() +{ + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d - for i := 1; i <= maxDepth; i++ { - fmt.Fprintf(w, " level%d)\n", i) - writeLevel(w, root, i) - fmt.Fprintln(w, " ;;") - } - fmt.Fprintln(w, " *)") - fmt.Fprintln(w, " _arguments '*: :_files'") - fmt.Fprintln(w, " ;;") -} + local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace + local -a completions -func writeLevel(w io.Writer, root *Command, i int) { - fmt.Fprintf(w, " case $words[%d] in\n", i) - defer fmt.Fprintln(w, " esac") + __%[1]s_debug "\n========= starting completion logic ==========" + __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}" - commands := filterByLevel(root, i) - byParent := groupByParent(commands) + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $CURRENT location, so we need + # to truncate the command-line ($words) up to the $CURRENT location. + # (We cannot use $CURSOR as its value does not work when a command is an alias.) + words=("${=words[1,CURRENT]}") + __%[1]s_debug "Truncated words[*]: ${words[*]}," - for p, c := range byParent { - names := names(c) - fmt.Fprintf(w, " %s)\n", p) - fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " ")) - fmt.Fprintln(w, " ;;") - } - fmt.Fprintln(w, " *)") - fmt.Fprintln(w, " _arguments '*: :_files'") - fmt.Fprintln(w, " ;;") + lastParam=${words[-1]} + lastChar=${lastParam[-1]} + __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}" -} + # For zsh, when completing a flag with an = (e.g., %[1]s -n=) + # completions must be prefixed with the flag + setopt local_options BASH_REMATCH + if [[ "${lastParam}" =~ '-.*=' ]]; then + # We are dealing with a flag with an = + flagPrefix="-P ${BASH_REMATCH}" + fi -func filterByLevel(c *Command, l int) []*Command { - cs := make([]*Command, 0) - if l == 0 { - cs = append(cs, c) - return cs - } - for _, s := range c.Commands() { - cs = append(cs, filterByLevel(s, l-1)...) - } - return cs -} + # Prepare the command to obtain completions + requestComp="${words[1]} %[2]s ${words[2,-1]}" + if [ "${lastChar}" = "" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go completion code. + __%[1]s_debug "Adding extra empty parameter" + requestComp="${requestComp} \"\"" + fi -func groupByParent(commands []*Command) map[string][]*Command { - m := make(map[string][]*Command) - for _, c := range commands { - parent := c.Parent() - if parent == nil { - continue - } - m[parent.Name()] = append(m[parent.Name()], c) - } - return m + __%[1]s_debug "About to call: eval ${requestComp}" + + # Use eval to handle any environment variables and such + out=$(eval ${requestComp} 2>/dev/null) + __%[1]s_debug "completion output: ${out}" + + # Extract the directive integer following a : from the last line + local lastLine + while IFS='\n' read -r line; do + lastLine=${line} + done < <(printf "%%s\n" "${out[@]}") + __%[1]s_debug "last line: ${lastLine}" + + if [ "${lastLine[1]}" = : ]; then + directive=${lastLine[2,-1]} + # Remove the directive including the : and the newline + local suffix + (( suffix=${#lastLine}+2)) + out=${out[1,-$suffix]} + else + # There is no directive specified. Leave $out as is. + __%[1]s_debug "No directive found. Setting do default" + directive=0 + fi + + __%[1]s_debug "directive: ${directive}" + __%[1]s_debug "completions: ${out}" + __%[1]s_debug "flagPrefix: ${flagPrefix}" + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + __%[1]s_debug "Completion received error. Ignoring completions." + return + fi + + while IFS='\n' read -r comp; do + if [ -n "$comp" ]; then + # If requested, completions are returned with a description. + # The description is preceded by a TAB character. + # For zsh's _describe, we need to use a : instead of a TAB. + # We first need to escape any : as part of the completion itself. + comp=${comp//:/\\:} + + local tab=$(printf '\t') + comp=${comp//$tab/:} + + __%[1]s_debug "Adding completion: ${comp}" + completions+=${comp} + lastComp=$comp + fi + done < <(printf "%%s\n" "${out[@]}") + + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + __%[1]s_debug "Activating nospace." + noSpace="-S ''" + fi + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local filteringCmd + filteringCmd='_files' + for filter in ${completions[@]}; do + if [ ${filter[1]} != '*' ]; then + # zsh requires a glob pattern to do file filtering + filter="\*.$filter" + fi + filteringCmd+=" -g $filter" + done + filteringCmd+=" ${flagPrefix}" + + __%[1]s_debug "File filtering command: $filteringCmd" + _arguments '*:filename:'"$filteringCmd" + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + local subDir + subdir="${completions[1]}" + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + pushd "${subdir}" >/dev/null 2>&1 + else + __%[1]s_debug "Listing directories in ." + fi + + local result + _arguments '*:dirname:_files -/'" ${flagPrefix}" + result=$? + if [ -n "$subdir" ]; then + popd >/dev/null 2>&1 + fi + return $result + else + __%[1]s_debug "Calling _describe" + if eval _describe "completions" completions $flagPrefix $noSpace; then + __%[1]s_debug "_describe found some completions" + + # Return the success of having called _describe + return 0 + else + __%[1]s_debug "_describe did not find completions." + __%[1]s_debug "Checking if we should do file completion." + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + __%[1]s_debug "deactivating file completion" + + # We must return an error code here to let zsh know that there were no + # completions found by _describe; this is what will trigger other + # matching algorithms to attempt to find completions. + # For example zsh can match letters in the middle of words. + return 1 + else + # Perform file completion + __%[1]s_debug "Activating file completion" + + # We must return the result of this command, so it must be the + # last command, or else we must store its result to return it. + _arguments '*:filename:_files'" ${flagPrefix}" + fi + fi + fi } -func names(commands []*Command) []string { - ns := make([]string, len(commands)) - for i, c := range commands { - ns[i] = c.Name() - } - return ns +# don't run the completion function when being source-ed or eval-ed +if [ "$funcstack[1]" = "_%[1]s" ]; then + _%[1]s +fi +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) } diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md new file mode 100644 index 000000000..7cff61787 --- /dev/null +++ b/vendor/github.com/spf13/cobra/zsh_completions.md @@ -0,0 +1,48 @@ +## Generating Zsh Completion For Your cobra.Command + +Please refer to [Shell Completions](shell_completions.md) for details. + +## Zsh completions standardization + +Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. + +### Deprecation summary + +See further below for more details on these deprecations. + +* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored. +* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored. + * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`. +* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored. + * Instead use `ValidArgsFunction`. + +### Behavioral changes + +**Noun completion** +|Old behavior|New behavior| +|---|---| +|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis| +|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`| +`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored| +|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)| +|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior| + +**Flag-value completion** + +|Old behavior|New behavior| +|---|---| +|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion| +|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored| +|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)| +|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells| +|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`| +|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`| + +**Improvements** + +* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`) +* File completion by default if no other completions found +* Handling of required flags +* File extension filtering no longer mutually exclusive with bash usage +* Completion of directory names *within* another directory +* Support for `=` form of flags diff --git a/vendor/github.com/spf13/jwalterweatherman/.gitignore b/vendor/github.com/spf13/jwalterweatherman/.gitignore new file mode 100644 index 000000000..a71f88af8 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.bench +go.sum \ No newline at end of file diff --git a/vendor/github.com/spf13/jwalterweatherman/LICENSE b/vendor/github.com/spf13/jwalterweatherman/LICENSE new file mode 100644 index 000000000..4527efb9c --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/jwalterweatherman/README.md b/vendor/github.com/spf13/jwalterweatherman/README.md new file mode 100644 index 000000000..932a23fc6 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/README.md @@ -0,0 +1,148 @@ +jWalterWeatherman +================= + +Seamless printing to the terminal (stdout) and logging to a io.Writer +(file) that’s as easy to use as fmt.Println. + +![and_that__s_why_you_always_leave_a_note_by_jonnyetc-d57q7um](https://cloud.githubusercontent.com/assets/173412/11002937/ccd01654-847d-11e5-828e-12ebaf582eaf.jpg) +Graphic by [JonnyEtc](http://jonnyetc.deviantart.com/art/And-That-s-Why-You-Always-Leave-a-Note-315311422) + +JWW is primarily a wrapper around the excellent standard log library. It +provides a few advantages over using the standard log library alone. + +1. Ready to go out of the box. +2. One library for both printing to the terminal and logging (to files). +3. Really easy to log to either a temp file or a file you specify. + + +I really wanted a very straightforward library that could seamlessly do +the following things. + +1. Replace all the println, printf, etc statements thoughout my code with + something more useful +2. Allow the user to easily control what levels are printed to stdout +3. Allow the user to easily control what levels are logged +4. Provide an easy mechanism (like fmt.Println) to print info to the user + which can be easily logged as well +5. Due to 2 & 3 provide easy verbose mode for output and logs +6. Not have any unnecessary initialization cruft. Just use it. + +# Usage + +## Step 1. Use it +Put calls throughout your source based on type of feedback. +No initialization or setup needs to happen. Just start calling things. + +Available Loggers are: + + * TRACE + * DEBUG + * INFO + * WARN + * ERROR + * CRITICAL + * FATAL + +These each are loggers based on the log standard library and follow the +standard usage. Eg. + +```go + import ( + jww "github.com/spf13/jwalterweatherman" + ) + + ... + + if err != nil { + + // This is a pretty serious error and the user should know about + // it. It will be printed to the terminal as well as logged under the + // default thresholds. + + jww.ERROR.Println(err) + } + + if err2 != nil { + // This error isn’t going to materially change the behavior of the + // application, but it’s something that may not be what the user + // expects. Under the default thresholds, Warn will be logged, but + // not printed to the terminal. + + jww.WARN.Println(err2) + } + + // Information that’s relevant to what’s happening, but not very + // important for the user. Under the default thresholds this will be + // discarded. + + jww.INFO.Printf("information %q", response) + +``` + +NOTE: You can also use the library in a non-global setting by creating an instance of a Notebook: + +```go +notepad = jww.NewNotepad(jww.LevelInfo, jww.LevelTrace, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) +notepad.WARN.Println("Some warning"") +``` + +_Why 7 levels?_ + +Maybe you think that 7 levels are too much for any application... and you +are probably correct. Just because there are seven levels doesn’t mean +that you should be using all 7 levels. Pick the right set for your needs. +Remember they only have to mean something to your project. + +## Step 2. Optionally configure JWW + +Under the default thresholds : + + * Debug, Trace & Info goto /dev/null + * Warn and above is logged (when a log file/io.Writer is provided) + * Error and above is printed to the terminal (stdout) + +### Changing the thresholds + +The threshold can be changed at any time, but will only affect calls that +execute after the change was made. + +This is very useful if your application has a verbose mode. Of course you +can decide what verbose means to you or even have multiple levels of +verbosity. + + +```go + import ( + jww "github.com/spf13/jwalterweatherman" + ) + + if Verbose { + jww.SetLogThreshold(jww.LevelTrace) + jww.SetStdoutThreshold(jww.LevelInfo) + } +``` + +Note that JWW's own internal output uses log levels as well, so set the log +level before making any other calls if you want to see what it's up to. + + +### Setting a log file + +JWW can log to any `io.Writer`: + + +```go + + jww.SetLogOutput(customWriter) + +``` + + +# More information + +This is an early release. I’ve been using it for a while and this is the +third interface I’ve tried. I like this one pretty well, but no guarantees +that it won’t change a bit. + +I wrote this for use in [hugo](https://gohugo.io). If you are looking +for a static website engine that’s super fast please checkout Hugo. diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go new file mode 100644 index 000000000..a018c15c4 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go @@ -0,0 +1,111 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package jwalterweatherman + +import ( + "io" + "io/ioutil" + "log" + "os" +) + +var ( + TRACE *log.Logger + DEBUG *log.Logger + INFO *log.Logger + WARN *log.Logger + ERROR *log.Logger + CRITICAL *log.Logger + FATAL *log.Logger + + LOG *log.Logger + FEEDBACK *Feedback + + defaultNotepad *Notepad +) + +func reloadDefaultNotepad() { + TRACE = defaultNotepad.TRACE + DEBUG = defaultNotepad.DEBUG + INFO = defaultNotepad.INFO + WARN = defaultNotepad.WARN + ERROR = defaultNotepad.ERROR + CRITICAL = defaultNotepad.CRITICAL + FATAL = defaultNotepad.FATAL + + LOG = defaultNotepad.LOG + FEEDBACK = defaultNotepad.FEEDBACK +} + +func init() { + defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) + reloadDefaultNotepad() +} + +// SetLogThreshold set the log threshold for the default notepad. Trace by default. +func SetLogThreshold(threshold Threshold) { + defaultNotepad.SetLogThreshold(threshold) + reloadDefaultNotepad() +} + +// SetLogOutput set the log output for the default notepad. Discarded by default. +func SetLogOutput(handle io.Writer) { + defaultNotepad.SetLogOutput(handle) + reloadDefaultNotepad() +} + +// SetStdoutThreshold set the standard output threshold for the default notepad. +// Info by default. +func SetStdoutThreshold(threshold Threshold) { + defaultNotepad.SetStdoutThreshold(threshold) + reloadDefaultNotepad() +} + +// SetStdoutOutput set the stdout output for the default notepad. Default is stdout. +func SetStdoutOutput(handle io.Writer) { + defaultNotepad.outHandle = handle + defaultNotepad.init() + reloadDefaultNotepad() +} + +// SetPrefix set the prefix for the default logger. Empty by default. +func SetPrefix(prefix string) { + defaultNotepad.SetPrefix(prefix) + reloadDefaultNotepad() +} + +// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default. +func SetFlags(flags int) { + defaultNotepad.SetFlags(flags) + reloadDefaultNotepad() +} + +// SetLogListeners configures the default logger with one or more log listeners. +func SetLogListeners(l ...LogListener) { + defaultNotepad.logListeners = l + defaultNotepad.init() + reloadDefaultNotepad() +} + +// Level returns the current global log threshold. +func LogThreshold() Threshold { + return defaultNotepad.logThreshold +} + +// Level returns the current global output threshold. +func StdoutThreshold() Threshold { + return defaultNotepad.stdoutThreshold +} + +// GetStdoutThreshold returns the defined Treshold for the log logger. +func GetLogThreshold() Threshold { + return defaultNotepad.GetLogThreshold() +} + +// GetStdoutThreshold returns the Treshold for the stdout logger. +func GetStdoutThreshold() Threshold { + return defaultNotepad.GetStdoutThreshold() +} diff --git a/vendor/github.com/spf13/jwalterweatherman/go.mod b/vendor/github.com/spf13/jwalterweatherman/go.mod new file mode 100644 index 000000000..1dbcfd3e8 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/go.mod @@ -0,0 +1,7 @@ +module github.com/spf13/jwalterweatherman + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 +) diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go new file mode 100644 index 000000000..41285f3dc --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/log_counter.go @@ -0,0 +1,46 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package jwalterweatherman + +import ( + "io" + "sync/atomic" +) + +// Counter is an io.Writer that increments a counter on Write. +type Counter struct { + count uint64 +} + +func (c *Counter) incr() { + atomic.AddUint64(&c.count, 1) +} + +// Reset resets the counter. +func (c *Counter) Reset() { + atomic.StoreUint64(&c.count, 0) +} + +// Count returns the current count. +func (c *Counter) Count() uint64 { + return atomic.LoadUint64(&c.count) +} + +func (c *Counter) Write(p []byte) (n int, err error) { + c.incr() + return len(p), nil +} + +// LogCounter creates a LogListener that counts log statements >= the given threshold. +func LogCounter(counter *Counter, t1 Threshold) LogListener { + return func(t2 Threshold) io.Writer { + if t2 < t1 { + // Not interested in this threshold. + return nil + } + return counter + } +} diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go new file mode 100644 index 000000000..cc7957bf7 --- /dev/null +++ b/vendor/github.com/spf13/jwalterweatherman/notepad.go @@ -0,0 +1,225 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package jwalterweatherman + +import ( + "fmt" + "io" + "io/ioutil" + "log" +) + +type Threshold int + +func (t Threshold) String() string { + return prefixes[t] +} + +const ( + LevelTrace Threshold = iota + LevelDebug + LevelInfo + LevelWarn + LevelError + LevelCritical + LevelFatal +) + +var prefixes map[Threshold]string = map[Threshold]string{ + LevelTrace: "TRACE", + LevelDebug: "DEBUG", + LevelInfo: "INFO", + LevelWarn: "WARN", + LevelError: "ERROR", + LevelCritical: "CRITICAL", + LevelFatal: "FATAL", +} + +// Notepad is where you leave a note! +type Notepad struct { + TRACE *log.Logger + DEBUG *log.Logger + INFO *log.Logger + WARN *log.Logger + ERROR *log.Logger + CRITICAL *log.Logger + FATAL *log.Logger + + LOG *log.Logger + FEEDBACK *Feedback + + loggers [7]**log.Logger + logHandle io.Writer + outHandle io.Writer + logThreshold Threshold + stdoutThreshold Threshold + prefix string + flags int + + logListeners []LogListener +} + +// A LogListener can ble supplied to a Notepad to listen on log writes for a given +// threshold. This can be used to capture log events in unit tests and similar. +// Note that this function will be invoked once for each log threshold. If +// the given threshold is not of interest to you, return nil. +// Note that these listeners will receive log events for a given threshold, even +// if the current configuration says not to log it. That way you can count ERRORs even +// if you don't print them to the console. +type LogListener func(t Threshold) io.Writer + +// NewNotepad creates a new Notepad. +func NewNotepad( + outThreshold Threshold, + logThreshold Threshold, + outHandle, logHandle io.Writer, + prefix string, flags int, + logListeners ...LogListener, +) *Notepad { + + n := &Notepad{logListeners: logListeners} + + n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL} + n.outHandle = outHandle + n.logHandle = logHandle + n.stdoutThreshold = outThreshold + n.logThreshold = logThreshold + + if len(prefix) != 0 { + n.prefix = "[" + prefix + "] " + } else { + n.prefix = "" + } + + n.flags = flags + + n.LOG = log.New(n.logHandle, + "LOG: ", + n.flags) + n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG} + + n.init() + return n +} + +// init creates the loggers for each level depending on the notepad thresholds. +func (n *Notepad) init() { + logAndOut := io.MultiWriter(n.outHandle, n.logHandle) + + for t, logger := range n.loggers { + threshold := Threshold(t) + prefix := n.prefix + threshold.String() + " " + + switch { + case threshold >= n.logThreshold && threshold >= n.stdoutThreshold: + *logger = log.New(n.createLogWriters(threshold, logAndOut), prefix, n.flags) + + case threshold >= n.logThreshold: + *logger = log.New(n.createLogWriters(threshold, n.logHandle), prefix, n.flags) + + case threshold >= n.stdoutThreshold: + *logger = log.New(n.createLogWriters(threshold, n.outHandle), prefix, n.flags) + + default: + *logger = log.New(n.createLogWriters(threshold, ioutil.Discard), prefix, n.flags) + } + } +} + +func (n *Notepad) createLogWriters(t Threshold, handle io.Writer) io.Writer { + if len(n.logListeners) == 0 { + return handle + } + writers := []io.Writer{handle} + for _, l := range n.logListeners { + w := l(t) + if w != nil { + writers = append(writers, w) + } + } + + if len(writers) == 1 { + return handle + } + + return io.MultiWriter(writers...) +} + +// SetLogThreshold changes the threshold above which messages are written to the +// log file. +func (n *Notepad) SetLogThreshold(threshold Threshold) { + n.logThreshold = threshold + n.init() +} + +// SetLogOutput changes the file where log messages are written. +func (n *Notepad) SetLogOutput(handle io.Writer) { + n.logHandle = handle + n.init() +} + +// GetStdoutThreshold returns the defined Treshold for the log logger. +func (n *Notepad) GetLogThreshold() Threshold { + return n.logThreshold +} + +// SetStdoutThreshold changes the threshold above which messages are written to the +// standard output. +func (n *Notepad) SetStdoutThreshold(threshold Threshold) { + n.stdoutThreshold = threshold + n.init() +} + +// GetStdoutThreshold returns the Treshold for the stdout logger. +func (n *Notepad) GetStdoutThreshold() Threshold { + return n.stdoutThreshold +} + +// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between +// brackets at the beginning of the line. An empty prefix won't be displayed at all. +func (n *Notepad) SetPrefix(prefix string) { + if len(prefix) != 0 { + n.prefix = "[" + prefix + "] " + } else { + n.prefix = "" + } + n.init() +} + +// SetFlags choose which flags the logger will display (after prefix and message +// level). See the package log for more informations on this. +func (n *Notepad) SetFlags(flags int) { + n.flags = flags + n.init() +} + +// Feedback writes plainly to the outHandle while +// logging with the standard extra information (date, file, etc). +type Feedback struct { + out *log.Logger + log *log.Logger +} + +func (fb *Feedback) Println(v ...interface{}) { + fb.output(fmt.Sprintln(v...)) +} + +func (fb *Feedback) Printf(format string, v ...interface{}) { + fb.output(fmt.Sprintf(format, v...)) +} + +func (fb *Feedback) Print(v ...interface{}) { + fb.output(fmt.Sprint(v...)) +} + +func (fb *Feedback) output(s string) { + if fb.out != nil { + fb.out.Output(2, s) + } + if fb.log != nil { + fb.log.Output(2, s) + } +} diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml index f8a63b308..00d04cb9b 100644 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ b/vendor/github.com/spf13/pflag/.travis.yml @@ -3,8 +3,9 @@ sudo: false language: go go: - - 1.7.3 - - 1.8.1 + - 1.9.x + - 1.10.x + - 1.11.x - tip matrix: @@ -12,7 +13,7 @@ matrix: - go: tip install: - - go get github.com/golang/lint/golint + - go get golang.org/x/lint/golint - export PATH=$GOPATH/bin:$PATH - go install ./... diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md index b052414d1..7eacc5bdb 100644 --- a/vendor/github.com/spf13/pflag/README.md +++ b/vendor/github.com/spf13/pflag/README.md @@ -86,8 +86,8 @@ fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) ``` -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. +There are helper functions available to get the value stored in a Flag if you have a FlagSet but find +it difficult to keep up with all of the pointers in your code. If you have a pflag.FlagSet with a flag called 'flagname' of type int you can use GetInt() to get the int value. But notice that 'flagname' must exist and it must be an int. GetString("flagname") will fail. diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go index 5af02f1a7..3731370d6 100644 --- a/vendor/github.com/spf13/pflag/bool_slice.go +++ b/vendor/github.com/spf13/pflag/bool_slice.go @@ -71,6 +71,44 @@ func (s *boolSliceValue) String() string { return "[" + out + "]" } +func (s *boolSliceValue) fromString(val string) (bool, error) { + return strconv.ParseBool(val) +} + +func (s *boolSliceValue) toString(val bool) string { + return strconv.FormatBool(val) +} + +func (s *boolSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *boolSliceValue) Replace(val []string) error { + out := make([]bool, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *boolSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func boolSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index aa126e44d..a0b2679f7 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -46,7 +46,7 @@ func (f *FlagSet) GetCount(name string) (int, error) { // CountVar defines a count flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) CountVar(p *int, name string, usage string) { f.CountVarP(p, name, "", usage) } @@ -69,7 +69,7 @@ func CountVarP(p *int, name, shorthand string, usage string) { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) Count(name string, usage string) *int { p := new(int) f.CountVarP(p, name, "", usage) diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go index 52c6b6dc1..badadda53 100644 --- a/vendor/github.com/spf13/pflag/duration_slice.go +++ b/vendor/github.com/spf13/pflag/duration_slice.go @@ -51,6 +51,44 @@ func (s *durationSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *durationSliceValue) fromString(val string) (time.Duration, error) { + return time.ParseDuration(val) +} + +func (s *durationSliceValue) toString(val time.Duration) string { + return fmt.Sprintf("%s", val) +} + +func (s *durationSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *durationSliceValue) Replace(val []string) error { + out := make([]time.Duration, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *durationSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func durationSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 5cc710ccd..24a5036e9 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -57,9 +57,9 @@ that give one-letter shorthands for flags. You can use these by appending var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { - flag.BoolVarP("boolname", "b", true, "help message") + flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") + flag.VarP(&flagval, "varname", "v", "help message") Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. @@ -190,6 +190,18 @@ type Value interface { Type() string } +// SliceValue is a secondary interface to all flags which hold a list +// of values. This allows full control over the value of list flags, +// and avoids complicated marshalling and unmarshalling to csv. +type SliceValue interface { + // Append adds the specified value to the end of the flag value list. + Append(string) error + // Replace will fully overwrite any data currently in the flag value list. + Replace([]string) error + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[NormalizedName]*Flag) []*Flag { list := make(sort.StringSlice, len(flags)) @@ -925,13 +937,16 @@ func stripUnknownFlagValue(args []string) []string { } first := args[0] - if first[0] == '-' { + if len(first) > 0 && first[0] == '-' { //--unknown --next-flag ... return args } //--unknown arg ... (args will be arg ...) - return args[1:] + if len(args) > 1 { + return args[1:] + } + return nil } func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go new file mode 100644 index 000000000..caa352741 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float32Slice Value +type float32SliceValue struct { + value *[]float32 + changed bool +} + +func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { + isv := new(float32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return err + } + out[i] = float32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float32SliceValue) Type() string { + return "float32Slice" +} + +func (s *float32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float32SliceValue) fromString(val string) (float32, error) { + t64, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(t64), nil +} + +func (s *float32SliceValue) toString(val float32) string { + return fmt.Sprintf("%f", val) +} + +func (s *float32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float32SliceValue) Replace(val []string) error { + out := make([]float32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float32{}, nil + } + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return nil, err + } + out[i] = float32(temp64) + + } + return out, nil +} + +// GetFloat32Slice return the []float32 value of a flag with the given name +func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { + val, err := f.getFlagType(name, "float32Slice", float32SliceConv) + if err != nil { + return []float32{}, err + } + return val.([]float32), nil +} + +// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. +// The argument p points to a []float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. +// The argument p points to a float32[] variable in which to store the value of the flag. +func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func Float32Slice(name string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, "", value, usage) +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go new file mode 100644 index 000000000..85bf3073d --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float64Slice Value +type float64SliceValue struct { + value *[]float64 + changed bool +} + +func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { + isv := new(float64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float64SliceValue) Type() string { + return "float64Slice" +} + +func (s *float64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float64SliceValue) fromString(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +func (s *float64SliceValue) toString(val float64) string { + return fmt.Sprintf("%f", val) +} + +func (s *float64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float64SliceValue) Replace(val []string) error { + out := make([]float64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float64{}, nil + } + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetFloat64Slice return the []float64 value of a flag with the given name +func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { + val, err := f.getFlagType(name, "float64Slice", float64SliceConv) + if err != nil { + return []float64{}, err + } + return val.([]float64), nil +} + +// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. +// The argument p points to a []float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. +// The argument p points to a float64[] variable in which to store the value of the flag. +func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func Float64Slice(name string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, "", value, usage) +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/go.mod b/vendor/github.com/spf13/pflag/go.mod new file mode 100644 index 000000000..b2287eec1 --- /dev/null +++ b/vendor/github.com/spf13/pflag/go.mod @@ -0,0 +1,3 @@ +module github.com/spf13/pflag + +go 1.12 diff --git a/vendor/github.com/spf13/pflag/go.sum b/vendor/github.com/spf13/pflag/go.sum new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go new file mode 100644 index 000000000..ff128ff06 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int32Slice Value +type int32SliceValue struct { + value *[]int32 + changed bool +} + +func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { + isv := new(int32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return err + } + out[i] = int32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int32SliceValue) Type() string { + return "int32Slice" +} + +func (s *int32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int32SliceValue) fromString(val string) (int32, error) { + t64, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(t64), nil +} + +func (s *int32SliceValue) toString(val int32) string { + return fmt.Sprintf("%d", val) +} + +func (s *int32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int32SliceValue) Replace(val []string) error { + out := make([]int32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int32{}, nil + } + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return nil, err + } + out[i] = int32(temp64) + + } + return out, nil +} + +// GetInt32Slice return the []int32 value of a flag with the given name +func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { + val, err := f.getFlagType(name, "int32Slice", int32SliceConv) + if err != nil { + return []int32{}, err + } + return val.([]int32), nil +} + +// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. +// The argument p points to a []int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. +// The argument p points to a int32[] variable in which to store the value of the flag. +func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func Int32Slice(name string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, "", value, usage) +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go new file mode 100644 index 000000000..25464638f --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int64Slice Value +type int64SliceValue struct { + value *[]int64 + changed bool +} + +func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { + isv := new(int64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int64SliceValue) Type() string { + return "int64Slice" +} + +func (s *int64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int64SliceValue) fromString(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +func (s *int64SliceValue) toString(val int64) string { + return fmt.Sprintf("%d", val) +} + +func (s *int64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int64SliceValue) Replace(val []string) error { + out := make([]int64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int64{}, nil + } + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetInt64Slice return the []int64 value of a flag with the given name +func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { + val, err := f.getFlagType(name, "int64Slice", int64SliceConv) + if err != nil { + return []int64{}, err + } + return val.([]int64), nil +} + +// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. +// The argument p points to a []int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. +// The argument p points to a int64[] variable in which to store the value of the flag. +func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func Int64Slice(name string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, "", value, usage) +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go index 1e7c9edde..e71c39d91 100644 --- a/vendor/github.com/spf13/pflag/int_slice.go +++ b/vendor/github.com/spf13/pflag/int_slice.go @@ -51,6 +51,36 @@ func (s *intSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *intSliceValue) Append(val string) error { + i, err := strconv.Atoi(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *intSliceValue) Replace(val []string) error { + out := make([]int, len(val)) + for i, d := range val { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *intSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = strconv.Itoa(d) + } + return out +} + func intSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go index 7dd196fe3..775faae4f 100644 --- a/vendor/github.com/spf13/pflag/ip_slice.go +++ b/vendor/github.com/spf13/pflag/ip_slice.go @@ -72,9 +72,47 @@ func (s *ipSliceValue) String() string { return "[" + out + "]" } +func (s *ipSliceValue) fromString(val string) (net.IP, error) { + return net.ParseIP(strings.TrimSpace(val)), nil +} + +func (s *ipSliceValue) toString(val net.IP) string { + return val.String() +} + +func (s *ipSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *ipSliceValue) Replace(val []string) error { + out := make([]net.IP, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *ipSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func ipSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IP{}, nil } diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index fa7bc6018..4894af818 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -23,6 +23,32 @@ func (s *stringArrayValue) Set(val string) error { return nil } +func (s *stringArrayValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringArrayValue) Replace(val []string) error { + out := make([]string, len(val)) + for i, d := range val { + var err error + out[i] = d + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *stringArrayValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = d + } + return out +} + func (s *stringArrayValue) Type() string { return "stringArray" } diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go index 0cd3ccc08..3cb2e69db 100644 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -62,6 +62,20 @@ func (s *stringSliceValue) String() string { return "[" + str + "]" } +func (s *stringSliceValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringSliceValue) Replace(val []string) error { + *s.value = val + return nil +} + +func (s *stringSliceValue) GetSlice() []string { + return *s.value +} + func stringSliceConv(sval string) (interface{}, error) { sval = sval[1 : len(sval)-1] // An empty string would cause a slice with one (empty) string @@ -84,7 +98,7 @@ func (f *FlagSet) GetStringSlice(name string) ([]string, error) { // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -100,7 +114,7 @@ func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []s // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -116,7 +130,7 @@ func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { @@ -136,7 +150,7 @@ func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage str // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSlice(name string, value []string, usage string) *[]string { diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go new file mode 100644 index 000000000..5ceda3965 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_to_int.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// -- stringToInt Value +type stringToIntValue struct { + value *map[string]int + changed bool +} + +func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue { + ssv := new(stringToIntValue) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToIntValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make(map[string]int, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.Atoi(kv[1]) + if err != nil { + return err + } + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToIntValue) Type() string { + return "stringToInt" +} + +func (s *stringToIntValue) String() string { + var buf bytes.Buffer + i := 0 + for k, v := range *s.value { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.Itoa(v)) + i++ + } + return "[" + buf.String() + "]" +} + +func stringToIntConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]int{}, nil + } + ss := strings.Split(val, ",") + out := make(map[string]int, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.Atoi(kv[1]) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetStringToInt return the map[string]int value of a flag with the given name +func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) { + val, err := f.getFlagType(name, "stringToInt", stringToIntConv) + if err != nil { + return map[string]int{}, err + } + return val.(map[string]int), nil +} + +// StringToIntVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]int variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { + f.VarP(newStringToIntValue(value, p), name, "", usage) +} + +// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { + f.VarP(newStringToIntValue(value, p), name, shorthand, usage) +} + +// StringToIntVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]int variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { + CommandLine.VarP(newStringToIntValue(value, p), name, "", usage) +} + +// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. +func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { + CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage) +} + +// StringToInt defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int { + p := map[string]int{} + f.StringToIntVarP(&p, name, "", value, usage) + return &p +} + +// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { + p := map[string]int{} + f.StringToIntVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToInt defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt(name string, value map[string]int, usage string) *map[string]int { + return CommandLine.StringToIntP(name, "", value, usage) +} + +// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. +func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { + return CommandLine.StringToIntP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go new file mode 100644 index 000000000..a807a04a0 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_to_int64.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// -- stringToInt64 Value +type stringToInt64Value struct { + value *map[string]int64 + changed bool +} + +func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { + ssv := new(stringToInt64Value) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToInt64Value) Set(val string) error { + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return err + } + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToInt64Value) Type() string { + return "stringToInt64" +} + +func (s *stringToInt64Value) String() string { + var buf bytes.Buffer + i := 0 + for k, v := range *s.value { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.FormatInt(v, 10)) + i++ + } + return "[" + buf.String() + "]" +} + +func stringToInt64Conv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]int64{}, nil + } + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetStringToInt64 return the map[string]int64 value of a flag with the given name +func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { + val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) + if err != nil { + return map[string]int64{}, err + } + return val.(map[string]int64), nil +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, "", value, usage) + return &p +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, "", value, usage) +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go new file mode 100644 index 000000000..890a01afc --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_to_string.go @@ -0,0 +1,160 @@ +package pflag + +import ( + "bytes" + "encoding/csv" + "fmt" + "strings" +) + +// -- stringToString Value +type stringToStringValue struct { + value *map[string]string + changed bool +} + +func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue { + ssv := new(stringToStringValue) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToStringValue) Set(val string) error { + var ss []string + n := strings.Count(val, "=") + switch n { + case 0: + return fmt.Errorf("%s must be formatted as key=value", val) + case 1: + ss = append(ss, strings.Trim(val, `"`)) + default: + r := csv.NewReader(strings.NewReader(val)) + var err error + ss, err = r.Read() + if err != nil { + return err + } + } + + out := make(map[string]string, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + out[kv[0]] = kv[1] + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToStringValue) Type() string { + return "stringToString" +} + +func (s *stringToStringValue) String() string { + records := make([]string, 0, len(*s.value)>>1) + for k, v := range *s.value { + records = append(records, k+"="+v) + } + + var buf bytes.Buffer + w := csv.NewWriter(&buf) + if err := w.Write(records); err != nil { + panic(err) + } + w.Flush() + return "[" + strings.TrimSpace(buf.String()) + "]" +} + +func stringToStringConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]string{}, nil + } + r := csv.NewReader(strings.NewReader(val)) + ss, err := r.Read() + if err != nil { + return nil, err + } + out := make(map[string]string, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + out[kv[0]] = kv[1] + } + return out, nil +} + +// GetStringToString return the map[string]string value of a flag with the given name +func (f *FlagSet) GetStringToString(name string) (map[string]string, error) { + val, err := f.getFlagType(name, "stringToString", stringToStringConv) + if err != nil { + return map[string]string{}, err + } + return val.(map[string]string), nil +} + +// StringToStringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]string variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { + f.VarP(newStringToStringValue(value, p), name, "", usage) +} + +// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { + f.VarP(newStringToStringValue(value, p), name, shorthand, usage) +} + +// StringToStringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a map[string]string variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { + CommandLine.VarP(newStringToStringValue(value, p), name, "", usage) +} + +// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. +func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { + CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage) +} + +// StringToString defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string { + p := map[string]string{} + f.StringToStringVarP(&p, name, "", value, usage) + return &p +} + +// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { + p := map[string]string{} + f.StringToStringVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToString defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToString(name string, value map[string]string, usage string) *map[string]string { + return CommandLine.StringToStringP(name, "", value, usage) +} + +// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. +func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { + return CommandLine.StringToStringP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go index edd94c600..5fa924835 100644 --- a/vendor/github.com/spf13/pflag/uint_slice.go +++ b/vendor/github.com/spf13/pflag/uint_slice.go @@ -50,6 +50,48 @@ func (s *uintSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *uintSliceValue) fromString(val string) (uint, error) { + t, err := strconv.ParseUint(val, 10, 0) + if err != nil { + return 0, err + } + return uint(t), nil +} + +func (s *uintSliceValue) toString(val uint) string { + return fmt.Sprintf("%d", val) +} + +func (s *uintSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *uintSliceValue) Replace(val []string) error { + out := make([]uint, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *uintSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func uintSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/viper/.editorconfig b/vendor/github.com/spf13/viper/.editorconfig new file mode 100644 index 000000000..63afcbcdd --- /dev/null +++ b/vendor/github.com/spf13/viper/.editorconfig @@ -0,0 +1,15 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{Makefile, *.mk}] +indent_style = tab diff --git a/vendor/github.com/spf13/viper/.gitignore b/vendor/github.com/spf13/viper/.gitignore new file mode 100644 index 000000000..896250839 --- /dev/null +++ b/vendor/github.com/spf13/viper/.gitignore @@ -0,0 +1,5 @@ +/.idea/ +/bin/ +/build/ +/var/ +/vendor/ diff --git a/vendor/github.com/spf13/viper/.golangci.yml b/vendor/github.com/spf13/viper/.golangci.yml new file mode 100644 index 000000000..4f970acb1 --- /dev/null +++ b/vendor/github.com/spf13/viper/.golangci.yml @@ -0,0 +1,93 @@ +run: + timeout: 5m + +linters-settings: + gci: + local-prefixes: github.com/spf13/viper + golint: + min-confidence: 0 + goimports: + local-prefixes: github.com/spf13/viper + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - dogsled + - dupl + - durationcheck + - exhaustive + - exportloopref + - gci + - goconst + - gofmt + - gofumpt + - goimports + - gomoddirectives + - goprintffuncname + - govet + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - structcheck + - stylecheck + - tparallel + - typecheck + - unconvert + - unparam + - unused + - varcheck + - wastedassign + - whitespace + + # fixme + # - cyclop + # - errcheck + # - errorlint + # - exhaustivestruct + # - forbidigo + # - forcetypeassert + # - gochecknoglobals + # - gochecknoinits + # - gocognit + # - gocritic + # - gocyclo + # - godot + # - gosec + # - gosimple + # - ifshort + # - lll + # - nlreturn + # - paralleltest + # - scopelint + # - thelper + # - wrapcheck + + # unused + # - depguard + # - goheader + # - gomodguard + + # don't enable: + # - asciicheck + # - funlen + # - godox + # - goerr113 + # - gomnd + # - interfacer + # - maligned + # - nestif + # - testpackage + # - wsl diff --git a/vendor/github.com/spf13/viper/LICENSE b/vendor/github.com/spf13/viper/LICENSE new file mode 100644 index 000000000..4527efb9c --- /dev/null +++ b/vendor/github.com/spf13/viper/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Steve Francia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile new file mode 100644 index 000000000..b0f9acf24 --- /dev/null +++ b/vendor/github.com/spf13/viper/Makefile @@ -0,0 +1,76 @@ +# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html + +OS = $(shell uname | tr A-Z a-z) +export PATH := $(abspath bin/):${PATH} + +# Build variables +BUILD_DIR ?= build +export CGO_ENABLED ?= 0 +export GOOS = $(shell go env GOOS) +ifeq (${VERBOSE}, 1) +ifeq ($(filter -v,${GOARGS}),) + GOARGS += -v +endif +TEST_FORMAT = short-verbose +endif + +# Dependency versions +GOTESTSUM_VERSION = 1.6.4 +GOLANGCI_VERSION = 1.40.1 + +# Add the ability to override some variables +# Use with care +-include override.mk + +.PHONY: clear +clear: ## Clear the working area and the project + rm -rf bin/ + +.PHONY: check +check: test lint ## Run tests and linters + +bin/gotestsum: bin/gotestsum-${GOTESTSUM_VERSION} + @ln -sf gotestsum-${GOTESTSUM_VERSION} bin/gotestsum +bin/gotestsum-${GOTESTSUM_VERSION}: + @mkdir -p bin + curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum-${GOTESTSUM_VERSION} && chmod +x ./bin/gotestsum-${GOTESTSUM_VERSION} + +TEST_PKGS ?= ./... +.PHONY: test +test: TEST_FORMAT ?= short +test: SHELL = /bin/bash +test: export CGO_ENABLED=1 +test: bin/gotestsum ## Run tests + @mkdir -p ${BUILD_DIR} + bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...) + +bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION} + @ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint +bin/golangci-lint-${GOLANGCI_VERSION}: + @mkdir -p bin + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION} + @mv bin/golangci-lint "$@" + +.PHONY: lint +lint: bin/golangci-lint ## Run linter + bin/golangci-lint run + +.PHONY: fix +fix: bin/golangci-lint ## Fix lint violations + bin/golangci-lint run --fix + +# Add custom targets here +-include custom.mk + +.PHONY: list +list: ## List all make targets + @${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort + +.PHONY: help +.DEFAULT_GOAL := help +help: + @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +# Variable outputting/exporting rules +var-%: ; @echo $($*) +varexport-%: ; @echo $*=$($*) diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md new file mode 100644 index 000000000..f409b1519 --- /dev/null +++ b/vendor/github.com/spf13/viper/README.md @@ -0,0 +1,865 @@ +> ## Viper v2 feedback +> Viper is heading towards v2 and we would love to hear what _**you**_ would like to see in it. Share your thoughts here: https://forms.gle/R6faU74qPRPAzchZ9 +> +> **Thank you!** + +![Viper](.github/logo.png?raw=true) + +[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) +[![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) + +[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) +[![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.14-61CFDD.svg?style=flat-square) +[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) + +**Go configuration with fangs!** + +Many Go projects are built using Viper including: + +* [Hugo](http://gohugo.io) +* [EMC RexRay](http://rexray.readthedocs.org/en/stable/) +* [Imgur’s Incus](https://github.com/Imgur/incus) +* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) +* [Docker Notary](https://github.com/docker/Notary) +* [BloomApi](https://www.bloomapi.com/) +* [doctl](https://github.com/digitalocean/doctl) +* [Clairctl](https://github.com/jgsqware/clairctl) +* [Mercure](https://mercure.rocks) + + +## Install + +```shell +go get github.com/spf13/viper +``` + +**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. + + +## What is Viper? + +Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed +to work within an application, and can handle all types of configuration needs +and formats. It supports: + +* setting defaults +* reading from JSON, TOML, YAML, HCL, envfile and Java properties config files +* live watching and re-reading of config files (optional) +* reading from environment variables +* reading from remote config systems (etcd or Consul), and watching changes +* reading from command line flags +* reading from buffer +* setting explicit values + +Viper can be thought of as a registry for all of your applications configuration needs. + + +## Why Viper? + +When building a modern application, you don’t want to worry about +configuration file formats; you want to focus on building awesome software. +Viper is here to help with that. + +Viper does the following for you: + +1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, INI, envfile or Java properties formats. +2. Provide a mechanism to set default values for your different configuration options. +3. Provide a mechanism to set override values for options specified through command line flags. +4. Provide an alias system to easily rename parameters without breaking existing code. +5. Make it easy to tell the difference between when a user has provided a command line or config file which is the same as the default. + +Viper uses the following precedence order. Each item takes precedence over the item below it: + + * explicit call to `Set` + * flag + * env + * config + * key/value store + * default + +**Important:** Viper configuration keys are case insensitive. +There are ongoing discussions about making that optional. + + +## Putting Values into Viper + +### Establishing Defaults + +A good configuration system will support default values. A default value is not +required for a key, but it’s useful in the event that a key hasn't been set via +config file, environment variable, remote configuration or flag. + +Examples: + +```go +viper.SetDefault("ContentDir", "content") +viper.SetDefault("LayoutDir", "layouts") +viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"}) +``` + +### Reading Config Files + +Viper requires minimal configuration so it knows where to look for config files. +Viper supports JSON, TOML, YAML, HCL, INI, envfile and Java Properties files. Viper can search multiple paths, but +currently a single Viper instance only supports a single configuration file. +Viper does not default to any configuration search paths leaving defaults decision +to an application. + +Here is an example of how to use Viper to search for and read a configuration file. +None of the specific paths are required, but at least one path should be provided +where a configuration file is expected. + +```go +viper.SetConfigName("config") // name of config file (without extension) +viper.SetConfigType("yaml") // REQUIRED if the config file does not have the extension in the name +viper.AddConfigPath("/etc/appname/") // path to look for the config file in +viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths +viper.AddConfigPath(".") // optionally look for config in the working directory +err := viper.ReadInConfig() // Find and read the config file +if err != nil { // Handle errors reading the config file + panic(fmt.Errorf("Fatal error config file: %w \n", err)) +} +``` + +You can handle the specific case where no config file is found like this: + +```go +if err := viper.ReadInConfig(); err != nil { + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + // Config file not found; ignore error if desired + } else { + // Config file was found but another error was produced + } +} + +// Config file found and successfully parsed +``` + +*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmaticaly. For those configuration files that lie in the home of the user without any extension like `.bashrc` + +### Writing Config Files + +Reading from config files is useful, but at times you want to store all modifications made at run time. +For that, a bunch of commands are available, each with its own purpose: + +* WriteConfig - writes the current viper configuration to the predefined path, if exists. Errors if no predefined path. Will overwrite the current config file, if it exists. +* SafeWriteConfig - writes the current viper configuration to the predefined path. Errors if no predefined path. Will not overwrite the current config file, if it exists. +* WriteConfigAs - writes the current viper configuration to the given filepath. Will overwrite the given file, if it exists. +* SafeWriteConfigAs - writes the current viper configuration to the given filepath. Will not overwrite the given file, if it exists. + +As a rule of the thumb, everything marked with safe won't overwrite any file, but just create if not existent, whilst the default behavior is to create or truncate. + +A small examples section: + +```go +viper.WriteConfig() // writes current config to predefined path set by 'viper.AddConfigPath()' and 'viper.SetConfigName' +viper.SafeWriteConfig() +viper.WriteConfigAs("/path/to/my/.config") +viper.SafeWriteConfigAs("/path/to/my/.config") // will error since it has already been written +viper.SafeWriteConfigAs("/path/to/my/.other_config") +``` + +### Watching and re-reading config files + +Viper supports the ability to have your application live read a config file while running. + +Gone are the days of needing to restart a server to have a config take effect, +viper powered applications can read an update to a config file while running and +not miss a beat. + +Simply tell the viper instance to watchConfig. +Optionally you can provide a function for Viper to run each time a change occurs. + +**Make sure you add all of the configPaths prior to calling `WatchConfig()`** + +```go +viper.WatchConfig() +viper.OnConfigChange(func(e fsnotify.Event) { + fmt.Println("Config file changed:", e.Name) +}) +``` + +### Reading Config from io.Reader + +Viper predefines many configuration sources such as files, environment +variables, flags, and remote K/V store, but you are not bound to them. You can +also implement your own required configuration source and feed it to viper. + +```go +viper.SetConfigType("yaml") // or viper.SetConfigType("YAML") + +// any approach to require this configuration into your program. +var yamlExample = []byte(` +Hacker: true +name: steve +hobbies: +- skateboarding +- snowboarding +- go +clothing: + jacket: leather + trousers: denim +age: 35 +eyes : brown +beard: true +`) + +viper.ReadConfig(bytes.NewBuffer(yamlExample)) + +viper.Get("name") // this would be "steve" +``` + +### Setting Overrides + +These could be from a command line flag, or from your own application logic. + +```go +viper.Set("Verbose", true) +viper.Set("LogFile", LogFile) +``` + +### Registering and Using Aliases + +Aliases permit a single value to be referenced by multiple keys + +```go +viper.RegisterAlias("loud", "Verbose") + +viper.Set("verbose", true) // same result as next line +viper.Set("loud", true) // same result as prior line + +viper.GetBool("loud") // true +viper.GetBool("verbose") // true +``` + +### Working with Environment Variables + +Viper has full support for environment variables. This enables 12 factor +applications out of the box. There are five methods that exist to aid working +with ENV: + + * `AutomaticEnv()` + * `BindEnv(string...) : error` + * `SetEnvPrefix(string)` + * `SetEnvKeyReplacer(string...) *strings.Replacer` + * `AllowEmptyEnv(bool)` + +_When working with ENV variables, it’s important to recognize that Viper +treats ENV variables as case sensitive._ + +Viper provides a mechanism to try to ensure that ENV variables are unique. By +using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from +the environment variables. Both `BindEnv` and `AutomaticEnv` will use this +prefix. + +`BindEnv` takes one or more parameters. The first parameter is the key name, the +rest are the name of the environment variables to bind to this key. If more than +one are provided, they will take precedence in the specified order. The name of +the environment variable is case sensitive. If the ENV variable name is not provided, then +Viper will automatically assume that the ENV variable matches the following format: prefix + "_" + the key name in ALL CAPS. When you explicitly provide the ENV variable name (the second parameter), +it **does not** automatically add the prefix. For example if the second parameter is "id", +Viper will look for the ENV variable "ID". + +One important thing to recognize when working with ENV variables is that the +value will be read each time it is accessed. Viper does not fix the value when +the `BindEnv` is called. + +`AutomaticEnv` is a powerful helper especially when combined with +`SetEnvPrefix`. When called, Viper will check for an environment variable any +time a `viper.Get` request is made. It will apply the following rules. It will +check for an environment variable with a name matching the key uppercased and +prefixed with the `EnvPrefix` if set. + +`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env +keys to an extent. This is useful if you want to use `-` or something in your +`Get()` calls, but want your environmental variables to use `_` delimiters. An +example of using it can be found in `viper_test.go`. + +Alternatively, you can use `EnvKeyReplacer` with `NewWithOptions` factory function. +Unlike `SetEnvKeyReplacer`, it accepts a `StringReplacer` interface allowing you to write custom string replacing logic. + +By default empty environment variables are considered unset and will fall back to +the next configuration source. To treat empty environment variables as set, use +the `AllowEmptyEnv` method. + +#### Env example + +```go +SetEnvPrefix("spf") // will be uppercased automatically +BindEnv("id") + +os.Setenv("SPF_ID", "13") // typically done outside of the app + +id := Get("id") // 13 +``` + +### Working with Flags + +Viper has the ability to bind to flags. Specifically, Viper supports `Pflags` +as used in the [Cobra](https://github.com/spf13/cobra) library. + +Like `BindEnv`, the value is not set when the binding method is called, but when +it is accessed. This means you can bind as early as you want, even in an +`init()` function. + +For individual flags, the `BindPFlag()` method provides this functionality. + +Example: + +```go +serverCmd.Flags().Int("port", 1138, "Port to run Application server on") +viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) +``` + +You can also bind an existing set of pflags (pflag.FlagSet): + +Example: + +```go +pflag.Int("flagname", 1234, "help message for flagname") + +pflag.Parse() +viper.BindPFlags(pflag.CommandLine) + +i := viper.GetInt("flagname") // retrieve values from viper instead of pflag +``` + +The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude +the use of other packages that use the [flag](https://golang.org/pkg/flag/) +package from the standard library. The pflag package can handle the flags +defined for the flag package by importing these flags. This is accomplished +by a calling a convenience function provided by the pflag package called +AddGoFlagSet(). + +Example: + +```go +package main + +import ( + "flag" + "github.com/spf13/pflag" +) + +func main() { + + // using standard library "flag" package + flag.Int("flagname", 1234, "help message for flagname") + + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + viper.BindPFlags(pflag.CommandLine) + + i := viper.GetInt("flagname") // retrieve value from viper + + ... +} +``` + +#### Flag interfaces + +Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`. + +`FlagValue` represents a single flag. This is a very simple example on how to implement this interface: + +```go +type myFlag struct {} +func (f myFlag) HasChanged() bool { return false } +func (f myFlag) Name() string { return "my-flag-name" } +func (f myFlag) ValueString() string { return "my-flag-value" } +func (f myFlag) ValueType() string { return "string" } +``` + +Once your flag implements this interface, you can simply tell Viper to bind it: + +```go +viper.BindFlagValue("my-flag-name", myFlag{}) +``` + +`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface: + +```go +type myFlagSet struct { + flags []myFlag +} + +func (f myFlagSet) VisitAll(fn func(FlagValue)) { + for _, flag := range flags { + fn(flag) + } +} +``` + +Once your flag set implements this interface, you can simply tell Viper to bind it: + +```go +fSet := myFlagSet{ + flags: []myFlag{myFlag{}, myFlag{}}, +} +viper.BindFlagValues("my-flags", fSet) +``` + +### Remote Key/Value Store Support + +To enable remote support in Viper, do a blank import of the `viper/remote` +package: + +`import _ "github.com/spf13/viper/remote"` + +Viper will read a config string (as JSON, TOML, YAML, HCL or envfile) retrieved from a path +in a Key/Value store such as etcd or Consul. These values take precedence over +default values, but are overridden by configuration values retrieved from disk, +flags, or environment variables. + +Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve +configuration from the K/V store, which means that you can store your +configuration values encrypted and have them automatically decrypted if you have +the correct gpg keyring. Encryption is optional. + +You can use remote configuration in conjunction with local configuration, or +independently of it. + +`crypt` has a command-line helper that you can use to put configurations in your +K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. + +```bash +$ go get github.com/bketelsen/crypt/bin/crypt +$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json +``` + +Confirm that your value was set: + +```bash +$ crypt get -plaintext /config/hugo.json +``` + +See the `crypt` documentation for examples of how to set encrypted values, or +how to use Consul. + +### Remote Key/Value Store Example - Unencrypted + +#### etcd +```go +viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json") +viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" +err := viper.ReadRemoteConfig() +``` + +#### Consul +You need to set a key to Consul key/value storage with JSON value containing your desired config. +For example, create a Consul key/value store key `MY_CONSUL_KEY` with value: + +```json +{ + "port": 8080, + "hostname": "myhostname.com" +} +``` + +```go +viper.AddRemoteProvider("consul", "localhost:8500", "MY_CONSUL_KEY") +viper.SetConfigType("json") // Need to explicitly set this to json +err := viper.ReadRemoteConfig() + +fmt.Println(viper.Get("port")) // 8080 +fmt.Println(viper.Get("hostname")) // myhostname.com +``` + +#### Firestore + +```go +viper.AddRemoteProvider("firestore", "google-cloud-project-id", "collection/document") +viper.SetConfigType("json") // Config's format: "json", "toml", "yaml", "yml" +err := viper.ReadRemoteConfig() +``` + +Of course, you're allowed to use `SecureRemoteProvider` also + +### Remote Key/Value Store Example - Encrypted + +```go +viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg") +viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" +err := viper.ReadRemoteConfig() +``` + +### Watching Changes in etcd - Unencrypted + +```go +// alternatively, you can create a new viper instance. +var runtime_viper = viper.New() + +runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml") +runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" + +// read from remote config the first time. +err := runtime_viper.ReadRemoteConfig() + +// unmarshal config +runtime_viper.Unmarshal(&runtime_conf) + +// open a goroutine to watch remote changes forever +go func(){ + for { + time.Sleep(time.Second * 5) // delay after each request + + // currently, only tested with etcd support + err := runtime_viper.WatchRemoteConfig() + if err != nil { + log.Errorf("unable to read remote config: %v", err) + continue + } + + // unmarshal new config into our runtime config struct. you can also use channel + // to implement a signal to notify the system of the changes + runtime_viper.Unmarshal(&runtime_conf) + } +}() +``` + +## Getting Values From Viper + +In Viper, there are a few ways to get a value depending on the value’s type. +The following functions and methods exist: + + * `Get(key string) : interface{}` + * `GetBool(key string) : bool` + * `GetFloat64(key string) : float64` + * `GetInt(key string) : int` + * `GetIntSlice(key string) : []int` + * `GetString(key string) : string` + * `GetStringMap(key string) : map[string]interface{}` + * `GetStringMapString(key string) : map[string]string` + * `GetStringSlice(key string) : []string` + * `GetTime(key string) : time.Time` + * `GetDuration(key string) : time.Duration` + * `IsSet(key string) : bool` + * `AllSettings() : map[string]interface{}` + +One important thing to recognize is that each Get function will return a zero +value if it’s not found. To check if a given key exists, the `IsSet()` method +has been provided. + +Example: +```go +viper.GetString("logfile") // case-insensitive Setting & Getting +if viper.GetBool("verbose") { + fmt.Println("verbose enabled") +} +``` +### Accessing nested keys + +The accessor methods also accept formatted paths to deeply nested keys. For +example, if the following JSON file is loaded: + +```json +{ + "host": { + "address": "localhost", + "port": 5799 + }, + "datastore": { + "metric": { + "host": "127.0.0.1", + "port": 3099 + }, + "warehouse": { + "host": "198.0.0.1", + "port": 2112 + } + } +} + +``` + +Viper can access a nested field by passing a `.` delimited path of keys: + +```go +GetString("datastore.metric.host") // (returns "127.0.0.1") +``` + +This obeys the precedence rules established above; the search for the path +will cascade through the remaining configuration registries until found. + +For example, given this configuration file, both `datastore.metric.host` and +`datastore.metric.port` are already defined (and may be overridden). If in addition +`datastore.metric.protocol` was defined in the defaults, Viper would also find it. + +However, if `datastore.metric` was overridden (by a flag, an environment variable, +the `Set()` method, …) with an immediate value, then all sub-keys of +`datastore.metric` become undefined, they are “shadowed” by the higher-priority +configuration level. + +Viper can access array indices by using numbers in the path. For example: + +```json +{ + "host": { + "address": "localhost", + "ports": [ + 5799, + 6029 + ] + }, + "datastore": { + "metric": { + "host": "127.0.0.1", + "port": 3099 + }, + "warehouse": { + "host": "198.0.0.1", + "port": 2112 + } + } +} + +GetInt("host.ports.1") // returns 6029 + +``` + +Lastly, if there exists a key that matches the delimited key path, its value +will be returned instead. E.g. + +```json +{ + "datastore.metric.host": "0.0.0.0", + "host": { + "address": "localhost", + "port": 5799 + }, + "datastore": { + "metric": { + "host": "127.0.0.1", + "port": 3099 + }, + "warehouse": { + "host": "198.0.0.1", + "port": 2112 + } + } +} + +GetString("datastore.metric.host") // returns "0.0.0.0" +``` + +### Extracting a sub-tree + +When developing reusable modules, it's often useful to extract a subset of the configuration +and pass it to a module. This way the module can be instantiated more than once, with different configurations. + +For example, an application might use multiple different cache stores for different purposes: + +```yaml +cache: + cache1: + max-items: 100 + item-size: 64 + cache2: + max-items: 200 + item-size: 80 +``` + +We could pass the cache name to a module (eg. `NewCache("cache1")`), +but it would require weird concatenation for accessing config keys and would be less separated from the global config. + +So instead of doing that let's pass a Viper instance to the constructor that represents a subset of the configuration: + +```go +cache1Config := viper.Sub("cache.cache1") +if cache1Config == nil { // Sub returns nil if the key cannot be found + panic("cache configuration not found") +} + +cache1 := NewCache(cache1Config) +``` + +**Note:** Always check the return value of `Sub`. It returns `nil` if a key cannot be found. + +Internally, the `NewCache` function can address `max-items` and `item-size` keys directly: + +```go +func NewCache(v *Viper) *Cache { + return &Cache{ + MaxItems: v.GetInt("max-items"), + ItemSize: v.GetInt("item-size"), + } +} +``` + +The resulting code is easy to test, since it's decoupled from the main config structure, +and easier to reuse (for the same reason). + + +### Unmarshaling + +You also have the option of Unmarshaling all or a specific value to a struct, map, +etc. + +There are two methods to do this: + + * `Unmarshal(rawVal interface{}) : error` + * `UnmarshalKey(key string, rawVal interface{}) : error` + +Example: + +```go +type config struct { + Port int + Name string + PathMap string `mapstructure:"path_map"` +} + +var C config + +err := viper.Unmarshal(&C) +if err != nil { + t.Fatalf("unable to decode into struct, %v", err) +} +``` + +If you want to unmarshal configuration where the keys themselves contain dot (the default key delimiter), +you have to change the delimiter: + +```go +v := viper.NewWithOptions(viper.KeyDelimiter("::")) + +v.SetDefault("chart::values", map[string]interface{}{ + "ingress": map[string]interface{}{ + "annotations": map[string]interface{}{ + "traefik.frontend.rule.type": "PathPrefix", + "traefik.ingress.kubernetes.io/ssl-redirect": "true", + }, + }, +}) + +type config struct { + Chart struct{ + Values map[string]interface{} + } +} + +var C config + +v.Unmarshal(&C) +``` + +Viper also supports unmarshaling into embedded structs: + +```go +/* +Example config: + +module: + enabled: true + token: 89h3f98hbwf987h3f98wenf89ehf +*/ +type config struct { + Module struct { + Enabled bool + + moduleConfig `mapstructure:",squash"` + } +} + +// moduleConfig could be in a module specific package +type moduleConfig struct { + Token string +} + +var C config + +err := viper.Unmarshal(&C) +if err != nil { + t.Fatalf("unable to decode into struct, %v", err) +} +``` + +Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. + +### Marshalling to string + +You may need to marshal all the settings held in viper into a string rather than write them to a file. +You can use your favorite format's marshaller with the config returned by `AllSettings()`. + +```go +import ( + yaml "gopkg.in/yaml.v2" + // ... +) + +func yamlStringSettings() string { + c := viper.AllSettings() + bs, err := yaml.Marshal(c) + if err != nil { + log.Fatalf("unable to marshal config to YAML: %v", err) + } + return string(bs) +} +``` + +## Viper or Vipers? + +Viper comes ready to use out of the box. There is no configuration or +initialization needed to begin using Viper. Since most applications will want +to use a single central repository for their configuration, the viper package +provides this. It is similar to a singleton. + +In all of the examples above, they demonstrate using viper in its singleton +style approach. + +### Working with multiple vipers + +You can also create many different vipers for use in your application. Each will +have its own unique set of configurations and values. Each can read from a +different config file, key value store, etc. All of the functions that viper +package supports are mirrored as methods on a viper. + +Example: + +```go +x := viper.New() +y := viper.New() + +x.SetDefault("ContentDir", "content") +y.SetDefault("ContentDir", "foobar") + +//... +``` + +When working with multiple vipers, it is up to the user to keep track of the +different vipers. + + +## Q & A + +### Why is it called “Viper”? + +A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe)) +to [Cobra](https://github.com/spf13/cobra). While both can operate completely +independently, together they make a powerful pair to handle much of your +application foundation needs. + +### Why is it called “Cobra”? + +Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)? + +### Does Viper support case sensitive keys? + +**tl;dr:** No. + +Viper merges configuration from various sources, many of which are either case insensitive or uses different casing than the rest of the sources (eg. env vars). +In order to provide the best experience when using multiple sources, the decision has been made to make all keys case insensitive. + +There has been several attempts to implement case sensitivity, but unfortunately it's not that trivial. We might take a stab at implementing it in [Viper v2](https://github.com/spf13/viper/issues/772), but despite the initial noise, it does not seem to be requested that much. + +You can vote for case sensitivity by filling out this feedback form: https://forms.gle/R6faU74qPRPAzchZ9 + +### Is it safe to concurrently read and write to a viper? + +No, you will need to synchronize access to the viper yourself (for example by using the `sync` package). Concurrent reads and writes can cause a panic. + +## Troubleshooting + +See [TROUBLESHOOTING.md](TROUBLESHOOTING.md). diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md new file mode 100644 index 000000000..096277af7 --- /dev/null +++ b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md @@ -0,0 +1,23 @@ +# Troubleshooting + +## Unmarshaling doesn't work + +The most common reason for this issue is improper use of struct tags (eg. `yaml` or `json`). Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. Please refer to the library's documentation for using other struct tags. + +## Cannot find package + +Viper installation seems to fail a lot lately with the following (or a similar) error: + +``` +cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: +/usr/local/Cellar/go/1.15.7_1/libexec/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOROOT) +/Users/user/go/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOPATH) +``` + +As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. +Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). + +The solution is easy: switch to using Go Modules. +Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that. + +**tl;dr* `export GO111MODULE=on` diff --git a/vendor/github.com/spf13/viper/flags.go b/vendor/github.com/spf13/viper/flags.go new file mode 100644 index 000000000..b5ddbf5d4 --- /dev/null +++ b/vendor/github.com/spf13/viper/flags.go @@ -0,0 +1,57 @@ +package viper + +import "github.com/spf13/pflag" + +// FlagValueSet is an interface that users can implement +// to bind a set of flags to viper. +type FlagValueSet interface { + VisitAll(fn func(FlagValue)) +} + +// FlagValue is an interface that users can implement +// to bind different flags to viper. +type FlagValue interface { + HasChanged() bool + Name() string + ValueString() string + ValueType() string +} + +// pflagValueSet is a wrapper around *pflag.ValueSet +// that implements FlagValueSet. +type pflagValueSet struct { + flags *pflag.FlagSet +} + +// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet. +func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) { + p.flags.VisitAll(func(flag *pflag.Flag) { + fn(pflagValue{flag}) + }) +} + +// pflagValue is a wrapper aroung *pflag.flag +// that implements FlagValue +type pflagValue struct { + flag *pflag.Flag +} + +// HasChanged returns whether the flag has changes or not. +func (p pflagValue) HasChanged() bool { + return p.flag.Changed +} + +// Name returns the name of the flag. +func (p pflagValue) Name() string { + return p.flag.Name +} + +// ValueString returns the value of the flag as a string. +func (p pflagValue) ValueString() string { + return p.flag.Value.String() +} + +// ValueType returns the type of the flag as a string. +func (p pflagValue) ValueType() string { + return p.flag.Value.Type() +} diff --git a/vendor/github.com/spf13/viper/go.mod b/vendor/github.com/spf13/viper/go.mod new file mode 100644 index 000000000..145e0a100 --- /dev/null +++ b/vendor/github.com/spf13/viper/go.mod @@ -0,0 +1,21 @@ +module github.com/spf13/viper + +go 1.12 + +require ( + github.com/bketelsen/crypt v0.0.4 + github.com/fsnotify/fsnotify v1.4.9 + github.com/hashicorp/hcl v1.0.0 + github.com/magiconair/properties v1.8.5 + github.com/mitchellh/mapstructure v1.4.1 + github.com/pelletier/go-toml v1.9.3 + github.com/smartystreets/goconvey v1.6.4 // indirect + github.com/spf13/afero v1.6.0 + github.com/spf13/cast v1.3.1 + github.com/spf13/jwalterweatherman v1.1.0 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.7.0 + github.com/subosito/gotenv v1.2.0 + gopkg.in/ini.v1 v1.62.0 + gopkg.in/yaml.v2 v2.4.0 +) diff --git a/vendor/github.com/spf13/viper/go.sum b/vendor/github.com/spf13/viper/go.sum new file mode 100644 index 000000000..27730e2aa --- /dev/null +++ b/vendor/github.com/spf13/viper/go.sum @@ -0,0 +1,632 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0 h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.4 h1:w/jqZtC9YD4DS/Vp9GhWfWcCpuAL58oTnLoI8vE9YHU= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go new file mode 100644 index 000000000..cee6b2429 --- /dev/null +++ b/vendor/github.com/spf13/viper/util.go @@ -0,0 +1,230 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Viper is a application configuration system. +// It believes that applications can be configured a variety of ways +// via flags, ENVIRONMENT variables, configuration files retrieved +// from the file system, or a remote key/value store. + +package viper + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "unicode" + + "github.com/spf13/afero" + "github.com/spf13/cast" + jww "github.com/spf13/jwalterweatherman" +) + +// ConfigParseError denotes failing to parse configuration file. +type ConfigParseError struct { + err error +} + +// Error returns the formatted configuration error. +func (pe ConfigParseError) Error() string { + return fmt.Sprintf("While parsing config: %s", pe.err.Error()) +} + +// toCaseInsensitiveValue checks if the value is a map; +// if so, create a copy and lower-case the keys recursively. +func toCaseInsensitiveValue(value interface{}) interface{} { + switch v := value.(type) { + case map[interface{}]interface{}: + value = copyAndInsensitiviseMap(cast.ToStringMap(v)) + case map[string]interface{}: + value = copyAndInsensitiviseMap(v) + } + + return value +} + +// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of +// any map it makes case insensitive. +func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { + nm := make(map[string]interface{}) + + for key, val := range m { + lkey := strings.ToLower(key) + switch v := val.(type) { + case map[interface{}]interface{}: + nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v)) + case map[string]interface{}: + nm[lkey] = copyAndInsensitiviseMap(v) + default: + nm[lkey] = v + } + } + + return nm +} + +func insensitiviseMap(m map[string]interface{}) { + for key, val := range m { + switch val.(type) { + case map[interface{}]interface{}: + // nested map: cast and recursively insensitivise + val = cast.ToStringMap(val) + insensitiviseMap(val.(map[string]interface{})) + case map[string]interface{}: + // nested map: recursively insensitivise + insensitiviseMap(val.(map[string]interface{})) + } + + lower := strings.ToLower(key) + if key != lower { + // remove old key (not lower-cased) + delete(m, key) + } + // update map + m[lower] = val + } +} + +func absPathify(inPath string) string { + jww.INFO.Println("Trying to resolve absolute path to", inPath) + + if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) { + inPath = userHomeDir() + inPath[5:] + } + + if strings.HasPrefix(inPath, "$") { + end := strings.Index(inPath, string(os.PathSeparator)) + + var value, suffix string + if end == -1 { + value = os.Getenv(inPath[1:]) + } else { + value = os.Getenv(inPath[1:end]) + suffix = inPath[end:] + } + + inPath = value + suffix + } + + if filepath.IsAbs(inPath) { + return filepath.Clean(inPath) + } + + p, err := filepath.Abs(inPath) + if err == nil { + return filepath.Clean(p) + } + + jww.ERROR.Println("Couldn't discover absolute path") + jww.ERROR.Println(err) + return "" +} + +// Check if file Exists +func exists(fs afero.Fs, path string) (bool, error) { + stat, err := fs.Stat(path) + if err == nil { + return !stat.IsDir(), nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +func userHomeDir() string { + if runtime.GOOS == "windows" { + home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + if home == "" { + home = os.Getenv("USERPROFILE") + } + return home + } + return os.Getenv("HOME") +} + +func safeMul(a, b uint) uint { + c := a * b + if a > 1 && b > 1 && c/b != a { + return 0 + } + return c +} + +// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes +func parseSizeInBytes(sizeStr string) uint { + sizeStr = strings.TrimSpace(sizeStr) + lastChar := len(sizeStr) - 1 + multiplier := uint(1) + + if lastChar > 0 { + if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' { + if lastChar > 1 { + switch unicode.ToLower(rune(sizeStr[lastChar-1])) { + case 'k': + multiplier = 1 << 10 + sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) + case 'm': + multiplier = 1 << 20 + sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) + case 'g': + multiplier = 1 << 30 + sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) + default: + multiplier = 1 + sizeStr = strings.TrimSpace(sizeStr[:lastChar]) + } + } + } + } + + size := cast.ToInt(sizeStr) + if size < 0 { + size = 0 + } + + return safeMul(uint(size), multiplier) +} + +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]interface{}) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]interface{}) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]interface{}) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go new file mode 100644 index 000000000..e8c04627b --- /dev/null +++ b/vendor/github.com/spf13/viper/viper.go @@ -0,0 +1,2169 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Viper is an application configuration system. +// It believes that applications can be configured a variety of ways +// via flags, ENVIRONMENT variables, configuration files retrieved +// from the file system, or a remote key/value store. + +// Each item takes precedence over the item below it: + +// overrides +// flag +// env +// config +// key/value store +// default + +package viper + +import ( + "bytes" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/printer" + "github.com/magiconair/properties" + "github.com/mitchellh/mapstructure" + "github.com/pelletier/go-toml" + "github.com/spf13/afero" + "github.com/spf13/cast" + jww "github.com/spf13/jwalterweatherman" + "github.com/spf13/pflag" + "github.com/subosito/gotenv" + "gopkg.in/ini.v1" + "gopkg.in/yaml.v2" +) + +// ConfigMarshalError happens when failing to marshal the configuration. +type ConfigMarshalError struct { + err error +} + +// Error returns the formatted configuration error. +func (e ConfigMarshalError) Error() string { + return fmt.Sprintf("While marshaling config: %s", e.err.Error()) +} + +var v *Viper + +type RemoteResponse struct { + Value []byte + Error error +} + +func init() { + v = New() +} + +type remoteConfigFactory interface { + Get(rp RemoteProvider) (io.Reader, error) + Watch(rp RemoteProvider) (io.Reader, error) + WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) +} + +// RemoteConfig is optional, see the remote package +var RemoteConfig remoteConfigFactory + +// UnsupportedConfigError denotes encountering an unsupported +// configuration filetype. +type UnsupportedConfigError string + +// Error returns the formatted configuration error. +func (str UnsupportedConfigError) Error() string { + return fmt.Sprintf("Unsupported Config Type %q", string(str)) +} + +// UnsupportedRemoteProviderError denotes encountering an unsupported remote +// provider. Currently only etcd and Consul are supported. +type UnsupportedRemoteProviderError string + +// Error returns the formatted remote provider error. +func (str UnsupportedRemoteProviderError) Error() string { + return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) +} + +// RemoteConfigError denotes encountering an error while trying to +// pull the configuration from the remote provider. +type RemoteConfigError string + +// Error returns the formatted remote provider error +func (rce RemoteConfigError) Error() string { + return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) +} + +// ConfigFileNotFoundError denotes failing to find configuration file. +type ConfigFileNotFoundError struct { + name, locations string +} + +// Error returns the formatted configuration error. +func (fnfe ConfigFileNotFoundError) Error() string { + return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations) +} + +// ConfigFileAlreadyExistsError denotes failure to write new configuration file. +type ConfigFileAlreadyExistsError string + +// Error returns the formatted error when configuration already exists. +func (faee ConfigFileAlreadyExistsError) Error() string { + return fmt.Sprintf("Config File %q Already Exists", string(faee)) +} + +// A DecoderConfigOption can be passed to viper.Unmarshal to configure +// mapstructure.DecoderConfig options +type DecoderConfigOption func(*mapstructure.DecoderConfig) + +// DecodeHook returns a DecoderConfigOption which overrides the default +// DecoderConfig.DecodeHook value, the default is: +// +// mapstructure.ComposeDecodeHookFunc( +// mapstructure.StringToTimeDurationHookFunc(), +// mapstructure.StringToSliceHookFunc(","), +// ) +func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { + return func(c *mapstructure.DecoderConfig) { + c.DecodeHook = hook + } +} + +// Viper is a prioritized configuration registry. It +// maintains a set of configuration sources, fetches +// values to populate those, and provides them according +// to the source's priority. +// The priority of the sources is the following: +// 1. overrides +// 2. flags +// 3. env. variables +// 4. config file +// 5. key/value store +// 6. defaults +// +// For example, if values from the following sources were loaded: +// +// Defaults : { +// "secret": "", +// "user": "default", +// "endpoint": "https://localhost" +// } +// Config : { +// "user": "root" +// "secret": "defaultsecret" +// } +// Env : { +// "secret": "somesecretkey" +// } +// +// The resulting config will have the following values: +// +// { +// "secret": "somesecretkey", +// "user": "root", +// "endpoint": "https://localhost" +// } +// +// Note: Vipers are not safe for concurrent Get() and Set() operations. +type Viper struct { + // Delimiter that separates a list of keys + // used to access a nested value in one go + keyDelim string + + // A set of paths to look for the config file in + configPaths []string + + // The filesystem to read config from. + fs afero.Fs + + // A set of remote providers to search for the configuration + remoteProviders []*defaultRemoteProvider + + // Name of file to look for inside the path + configName string + configFile string + configType string + configPermissions os.FileMode + envPrefix string + + // Specific commands for ini parsing + iniLoadOptions ini.LoadOptions + + automaticEnvApplied bool + envKeyReplacer StringReplacer + allowEmptyEnv bool + + config map[string]interface{} + override map[string]interface{} + defaults map[string]interface{} + kvstore map[string]interface{} + pflags map[string]FlagValue + env map[string][]string + aliases map[string]string + typeByDefValue bool + + // Store read properties on the object so that we can write back in order with comments. + // This will only be used if the configuration read is a properties file. + properties *properties.Properties + + onConfigChange func(fsnotify.Event) +} + +// New returns an initialized Viper instance. +func New() *Viper { + v := new(Viper) + v.keyDelim = "." + v.configName = "config" + v.configPermissions = os.FileMode(0644) + v.fs = afero.NewOsFs() + v.config = make(map[string]interface{}) + v.override = make(map[string]interface{}) + v.defaults = make(map[string]interface{}) + v.kvstore = make(map[string]interface{}) + v.pflags = make(map[string]FlagValue) + v.env = make(map[string][]string) + v.aliases = make(map[string]string) + v.typeByDefValue = false + + return v +} + +// Option configures Viper using the functional options paradigm popularized by Rob Pike and Dave Cheney. +// If you're unfamiliar with this style, +// see https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html and +// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis. +type Option interface { + apply(v *Viper) +} + +type optionFunc func(v *Viper) + +func (fn optionFunc) apply(v *Viper) { + fn(v) +} + +// KeyDelimiter sets the delimiter used for determining key parts. +// By default it's value is ".". +func KeyDelimiter(d string) Option { + return optionFunc(func(v *Viper) { + v.keyDelim = d + }) +} + +// StringReplacer applies a set of replacements to a string. +type StringReplacer interface { + // Replace returns a copy of s with all replacements performed. + Replace(s string) string +} + +// EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys. +func EnvKeyReplacer(r StringReplacer) Option { + return optionFunc(func(v *Viper) { + v.envKeyReplacer = r + }) +} + +// NewWithOptions creates a new Viper instance. +func NewWithOptions(opts ...Option) *Viper { + v := New() + + for _, opt := range opts { + opt.apply(v) + } + + return v +} + +// Reset is intended for testing, will reset all to default settings. +// In the public interface for the viper package so applications +// can use it in their testing as well. +func Reset() { + v = New() + SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} + SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} +} + +type defaultRemoteProvider struct { + provider string + endpoint string + path string + secretKeyring string +} + +func (rp defaultRemoteProvider) Provider() string { + return rp.provider +} + +func (rp defaultRemoteProvider) Endpoint() string { + return rp.endpoint +} + +func (rp defaultRemoteProvider) Path() string { + return rp.path +} + +func (rp defaultRemoteProvider) SecretKeyring() string { + return rp.secretKeyring +} + +// RemoteProvider stores the configuration necessary +// to connect to a remote key/value store. +// Optional secretKeyring to unencrypt encrypted values +// can be provided. +type RemoteProvider interface { + Provider() string + Endpoint() string + Path() string + SecretKeyring() string +} + +// SupportedExts are universally supported extensions. +var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} + +// SupportedRemoteProviders are universally supported remote providers. +var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} + +func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } +func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { + v.onConfigChange = run +} + +func WatchConfig() { v.WatchConfig() } + +func (v *Viper) WatchConfig() { + initWG := sync.WaitGroup{} + initWG.Add(1) + go func() { + watcher, err := newWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way + filename, err := v.getConfigFile() + if err != nil { + log.Printf("error: %v\n", err) + initWG.Done() + return + } + + configFile := filepath.Clean(filename) + configDir, _ := filepath.Split(configFile) + realConfigFile, _ := filepath.EvalSymlinks(filename) + + eventsWG := sync.WaitGroup{} + eventsWG.Add(1) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { // 'Events' channel is closed + eventsWG.Done() + return + } + currentConfigFile, _ := filepath.EvalSymlinks(filename) + // we only care about the config file with the following cases: + // 1 - if the config file was modified or created + // 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement) + const writeOrCreateMask = fsnotify.Write | fsnotify.Create + if (filepath.Clean(event.Name) == configFile && + event.Op&writeOrCreateMask != 0) || + (currentConfigFile != "" && currentConfigFile != realConfigFile) { + realConfigFile = currentConfigFile + err := v.ReadInConfig() + if err != nil { + log.Printf("error reading config file: %v\n", err) + } + if v.onConfigChange != nil { + v.onConfigChange(event) + } + } else if filepath.Clean(event.Name) == configFile && + event.Op&fsnotify.Remove&fsnotify.Remove != 0 { + eventsWG.Done() + return + } + + case err, ok := <-watcher.Errors: + if ok { // 'Errors' channel is not closed + log.Printf("watcher error: %v\n", err) + } + eventsWG.Done() + return + } + } + }() + watcher.Add(configDir) + initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on... + eventsWG.Wait() // now, wait for event loop to end in this go-routine... + }() + initWG.Wait() // make sure that the go routine above fully ended before returning +} + +// SetConfigFile explicitly defines the path, name and extension of the config file. +// Viper will use this and not check any of the config paths. +func SetConfigFile(in string) { v.SetConfigFile(in) } + +func (v *Viper) SetConfigFile(in string) { + if in != "" { + v.configFile = in + } +} + +// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use. +// E.g. if your prefix is "spf", the env registry will look for env +// variables that start with "SPF_". +func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } + +func (v *Viper) SetEnvPrefix(in string) { + if in != "" { + v.envPrefix = in + } +} + +func (v *Viper) mergeWithEnvPrefix(in string) string { + if v.envPrefix != "" { + return strings.ToUpper(v.envPrefix + "_" + in) + } + + return strings.ToUpper(in) +} + +// AllowEmptyEnv tells Viper to consider set, +// but empty environment variables as valid values instead of falling back. +// For backward compatibility reasons this is false by default. +func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) } + +func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) { + v.allowEmptyEnv = allowEmptyEnv +} + +// TODO: should getEnv logic be moved into find(). Can generalize the use of +// rewriting keys many things, Ex: Get('someKey') -> some_key +// (camel case to snake case for JSON keys perhaps) + +// getEnv is a wrapper around os.Getenv which replaces characters in the original +// key. This allows env vars which have different keys than the config object +// keys. +func (v *Viper) getEnv(key string) (string, bool) { + if v.envKeyReplacer != nil { + key = v.envKeyReplacer.Replace(key) + } + + val, ok := os.LookupEnv(key) + + return val, ok && (v.allowEmptyEnv || val != "") +} + +// ConfigFileUsed returns the file used to populate the config registry. +func ConfigFileUsed() string { return v.ConfigFileUsed() } +func (v *Viper) ConfigFileUsed() string { return v.configFile } + +// AddConfigPath adds a path for Viper to search for the config file in. +// Can be called multiple times to define multiple search paths. +func AddConfigPath(in string) { v.AddConfigPath(in) } + +func (v *Viper) AddConfigPath(in string) { + if in != "" { + absin := absPathify(in) + jww.INFO.Println("adding", absin, "to paths to search") + if !stringInSlice(absin, v.configPaths) { + v.configPaths = append(v.configPaths, absin) + } + } +} + +// AddRemoteProvider adds a remote configuration source. +// Remote Providers are searched in the order they are added. +// provider is a string value: "etcd", "consul" or "firestore" are currently supported. +// endpoint is the url. etcd requires http://ip:port consul requires ip:port +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp" +func AddRemoteProvider(provider, endpoint, path string) error { + return v.AddRemoteProvider(provider, endpoint, path) +} + +func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { + if !stringInSlice(provider, SupportedRemoteProviders) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +// AddSecureRemoteProvider adds a remote configuration source. +// Secure Remote Providers are searched in the order they are added. +// provider is a string value: "etcd", "consul" or "firestore" are currently supported. +// endpoint is the url. etcd requires http://ip:port consul requires ip:port +// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp" +// Secure Remote Providers are implemented with github.com/bketelsen/crypt +func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) +} + +func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + if !stringInSlice(provider, SupportedRemoteProviders) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + secretKeyring: secretkeyring, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { + for _, y := range v.remoteProviders { + if reflect.DeepEqual(y, p) { + return true + } + } + return false +} + +// searchMap recursively searches for a value for path in source map. +// Returns nil if not found. +// Note: This assumes that the path entries and map keys are lower cased. +func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} { + if len(path) == 0 { + return source + } + + next, ok := source[path[0]] + if ok { + // Fast path + if len(path) == 1 { + return next + } + + // Nested case + switch next.(type) { + case map[interface{}]interface{}: + return v.searchMap(cast.ToStringMap(next), path[1:]) + case map[string]interface{}: + // Type assertion is safe here since it is only reached + // if the type of `next` is the same as the type being asserted + return v.searchMap(next.(map[string]interface{}), path[1:]) + default: + // got a value but nested key expected, return "nil" for not found + return nil + } + } + return nil +} + +// searchIndexableWithPathPrefixes recursively searches for a value for path in source map/slice. +// +// While searchMap() considers each path element as a single map key or slice index, this +// function searches for, and prioritizes, merged path elements. +// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" +// is also defined, this latter value is returned for path ["foo", "bar"]. +// +// This should be useful only at config level (other maps may not contain dots +// in their keys). +// +// Note: This assumes that the path entries and map keys are lower cased. +func (v *Viper) searchIndexableWithPathPrefixes(source interface{}, path []string) interface{} { + if len(path) == 0 { + return source + } + + // search for path prefixes, starting from the longest one + for i := len(path); i > 0; i-- { + prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) + + var val interface{} + switch sourceIndexable := source.(type) { + case []interface{}: + val = v.searchSliceWithPathPrefixes(sourceIndexable, prefixKey, i, path) + case map[string]interface{}: + val = v.searchMapWithPathPrefixes(sourceIndexable, prefixKey, i, path) + } + if val != nil { + return val + } + } + + // not found + return nil +} + +// searchSliceWithPathPrefixes searches for a value for path in sourceSlice +// +// This function is part of the searchIndexableWithPathPrefixes recurring search and +// should not be called directly from functions other than searchIndexableWithPathPrefixes. +func (v *Viper) searchSliceWithPathPrefixes( + sourceSlice []interface{}, + prefixKey string, + pathIndex int, + path []string, +) interface{} { + // if the prefixKey is not a number or it is out of bounds of the slice + index, err := strconv.Atoi(prefixKey) + if err != nil || len(sourceSlice) <= index { + return nil + } + + next := sourceSlice[index] + + // Fast path + if pathIndex == len(path) { + return next + } + + switch n := next.(type) { + case map[interface{}]interface{}: + return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) + case map[string]interface{}, []interface{}: + return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) + default: + // got a value but nested key expected, do nothing and look for next prefix + } + + // not found + return nil +} + +// searchMapWithPathPrefixes searches for a value for path in sourceMap +// +// This function is part of the searchIndexableWithPathPrefixes recurring search and +// should not be called directly from functions other than searchIndexableWithPathPrefixes. +func (v *Viper) searchMapWithPathPrefixes( + sourceMap map[string]interface{}, + prefixKey string, + pathIndex int, + path []string, +) interface{} { + next, ok := sourceMap[prefixKey] + if !ok { + return nil + } + + // Fast path + if pathIndex == len(path) { + return next + } + + // Nested case + switch n := next.(type) { + case map[interface{}]interface{}: + return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) + case map[string]interface{}, []interface{}: + return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) + default: + // got a value but nested key expected, do nothing and look for next prefix + } + + // not found + return nil +} + +// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere +// on its path in the map. +// e.g., if "foo.bar" has a value in the given map, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string { + var parentVal interface{} + for i := 1; i < len(path); i++ { + parentVal = v.searchMap(m, path[0:i]) + if parentVal == nil { + // not found, no need to add more path elements + return "" + } + switch parentVal.(type) { + case map[interface{}]interface{}: + continue + case map[string]interface{}: + continue + default: + // parentVal is a regular value which shadows "path" + return strings.Join(path[0:i], v.keyDelim) + } + } + return "" +} + +// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere +// in a sub-path of the map. +// e.g., if "foo.bar" has a value in the given map, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { + // unify input map + var m map[string]interface{} + switch mi.(type) { + case map[string]string, map[string]FlagValue: + m = cast.ToStringMap(mi) + default: + return "" + } + + // scan paths + var parentKey string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if _, ok := m[parentKey]; ok { + return parentKey + } + } + return "" +} + +// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere +// in the environment, when automatic env is on. +// e.g., if "foo.bar" has a value in the environment, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInAutoEnv(path []string) string { + var parentKey string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok { + return parentKey + } + } + return "" +} + +// SetTypeByDefaultValue enables or disables the inference of a key value's +// type when the Get function is used based upon a key's default value as +// opposed to the value returned based on the normal fetch logic. +// +// For example, if a key has a default value of []string{} and the same key +// is set via an environment variable to "a b c", a call to the Get function +// would return a string slice for the key if the key's type is inferred by +// the default value and the Get function would return: +// +// []string {"a", "b", "c"} +// +// Otherwise the Get function would return: +// +// "a b c" +func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } + +func (v *Viper) SetTypeByDefaultValue(enable bool) { + v.typeByDefValue = enable +} + +// GetViper gets the global Viper instance. +func GetViper() *Viper { + return v +} + +// Get can retrieve any value given the key to use. +// Get is case-insensitive for a key. +// Get has the behavior of returning the value associated with the first +// place from where it is set. Viper will check in the following order: +// override, flag, env, config file, key/value store, default +// +// Get returns an interface. For a specific value use one of the Get____ methods. +func Get(key string) interface{} { return v.Get(key) } + +func (v *Viper) Get(key string) interface{} { + lcaseKey := strings.ToLower(key) + val := v.find(lcaseKey, true) + if val == nil { + return nil + } + + if v.typeByDefValue { + // TODO(bep) this branch isn't covered by a single test. + valType := val + path := strings.Split(lcaseKey, v.keyDelim) + defVal := v.searchMap(v.defaults, path) + if defVal != nil { + valType = defVal + } + + switch valType.(type) { + case bool: + return cast.ToBool(val) + case string: + return cast.ToString(val) + case int32, int16, int8, int: + return cast.ToInt(val) + case uint: + return cast.ToUint(val) + case uint32: + return cast.ToUint32(val) + case uint64: + return cast.ToUint64(val) + case int64: + return cast.ToInt64(val) + case float64, float32: + return cast.ToFloat64(val) + case time.Time: + return cast.ToTime(val) + case time.Duration: + return cast.ToDuration(val) + case []string: + return cast.ToStringSlice(val) + case []int: + return cast.ToIntSlice(val) + } + } + + return val +} + +// Sub returns new Viper instance representing a sub tree of this instance. +// Sub is case-insensitive for a key. +func Sub(key string) *Viper { return v.Sub(key) } + +func (v *Viper) Sub(key string) *Viper { + subv := New() + data := v.Get(key) + if data == nil { + return nil + } + + if reflect.TypeOf(data).Kind() == reflect.Map { + subv.config = cast.ToStringMap(data) + return subv + } + return nil +} + +// GetString returns the value associated with the key as a string. +func GetString(key string) string { return v.GetString(key) } + +func (v *Viper) GetString(key string) string { + return cast.ToString(v.Get(key)) +} + +// GetBool returns the value associated with the key as a boolean. +func GetBool(key string) bool { return v.GetBool(key) } + +func (v *Viper) GetBool(key string) bool { + return cast.ToBool(v.Get(key)) +} + +// GetInt returns the value associated with the key as an integer. +func GetInt(key string) int { return v.GetInt(key) } + +func (v *Viper) GetInt(key string) int { + return cast.ToInt(v.Get(key)) +} + +// GetInt32 returns the value associated with the key as an integer. +func GetInt32(key string) int32 { return v.GetInt32(key) } + +func (v *Viper) GetInt32(key string) int32 { + return cast.ToInt32(v.Get(key)) +} + +// GetInt64 returns the value associated with the key as an integer. +func GetInt64(key string) int64 { return v.GetInt64(key) } + +func (v *Viper) GetInt64(key string) int64 { + return cast.ToInt64(v.Get(key)) +} + +// GetUint returns the value associated with the key as an unsigned integer. +func GetUint(key string) uint { return v.GetUint(key) } + +func (v *Viper) GetUint(key string) uint { + return cast.ToUint(v.Get(key)) +} + +// GetUint32 returns the value associated with the key as an unsigned integer. +func GetUint32(key string) uint32 { return v.GetUint32(key) } + +func (v *Viper) GetUint32(key string) uint32 { + return cast.ToUint32(v.Get(key)) +} + +// GetUint64 returns the value associated with the key as an unsigned integer. +func GetUint64(key string) uint64 { return v.GetUint64(key) } + +func (v *Viper) GetUint64(key string) uint64 { + return cast.ToUint64(v.Get(key)) +} + +// GetFloat64 returns the value associated with the key as a float64. +func GetFloat64(key string) float64 { return v.GetFloat64(key) } + +func (v *Viper) GetFloat64(key string) float64 { + return cast.ToFloat64(v.Get(key)) +} + +// GetTime returns the value associated with the key as time. +func GetTime(key string) time.Time { return v.GetTime(key) } + +func (v *Viper) GetTime(key string) time.Time { + return cast.ToTime(v.Get(key)) +} + +// GetDuration returns the value associated with the key as a duration. +func GetDuration(key string) time.Duration { return v.GetDuration(key) } + +func (v *Viper) GetDuration(key string) time.Duration { + return cast.ToDuration(v.Get(key)) +} + +// GetIntSlice returns the value associated with the key as a slice of int values. +func GetIntSlice(key string) []int { return v.GetIntSlice(key) } + +func (v *Viper) GetIntSlice(key string) []int { + return cast.ToIntSlice(v.Get(key)) +} + +// GetStringSlice returns the value associated with the key as a slice of strings. +func GetStringSlice(key string) []string { return v.GetStringSlice(key) } + +func (v *Viper) GetStringSlice(key string) []string { + return cast.ToStringSlice(v.Get(key)) +} + +// GetStringMap returns the value associated with the key as a map of interfaces. +func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) } + +func (v *Viper) GetStringMap(key string) map[string]interface{} { + return cast.ToStringMap(v.Get(key)) +} + +// GetStringMapString returns the value associated with the key as a map of strings. +func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } + +func (v *Viper) GetStringMapString(key string) map[string]string { + return cast.ToStringMapString(v.Get(key)) +} + +// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. +func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } + +func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { + return cast.ToStringMapStringSlice(v.Get(key)) +} + +// GetSizeInBytes returns the size of the value associated with the given key +// in bytes. +func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } + +func (v *Viper) GetSizeInBytes(key string) uint { + sizeStr := cast.ToString(v.Get(key)) + return parseSizeInBytes(sizeStr) +} + +// UnmarshalKey takes a single key and unmarshals it into a Struct. +func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { + return v.UnmarshalKey(key, rawVal, opts...) +} + +func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { + return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) +} + +// Unmarshal unmarshals the config into a Struct. Make sure that the tags +// on the fields of the structure are properly set. +func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { + return v.Unmarshal(rawVal, opts...) +} + +func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { + return decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...)) +} + +// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot +// of time.Duration values & string slices +func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { + c := &mapstructure.DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + ), + } + for _, opt := range opts { + opt(c) + } + return c +} + +// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality +func decode(input interface{}, config *mapstructure.DecoderConfig) error { + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + return decoder.Decode(input) +} + +// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent +// in the destination struct. +func UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { + return v.UnmarshalExact(rawVal, opts...) +} + +func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { + config := defaultDecoderConfig(rawVal, opts...) + config.ErrorUnused = true + + return decode(v.AllSettings(), config) +} + +// BindPFlags binds a full flag set to the configuration, using each flag's long +// name as the config key. +func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } + +func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { + return v.BindFlagValues(pflagValueSet{flags}) +} + +// BindPFlag binds a specific key to a pflag (as used by cobra). +// Example (where serverCmd is a Cobra instance): +// +// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") +// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) +// +func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } + +func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { + if flag == nil { + return fmt.Errorf("flag for %q is nil", key) + } + return v.BindFlagValue(key, pflagValue{flag}) +} + +// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long +// name as the config key. +func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } + +func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { + flags.VisitAll(func(flag FlagValue) { + if err = v.BindFlagValue(flag.Name(), flag); err != nil { + return + } + }) + return nil +} + +// BindFlagValue binds a specific key to a FlagValue. +func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } + +func (v *Viper) BindFlagValue(key string, flag FlagValue) error { + if flag == nil { + return fmt.Errorf("flag for %q is nil", key) + } + v.pflags[strings.ToLower(key)] = flag + return nil +} + +// BindEnv binds a Viper key to a ENV variable. +// ENV variables are case sensitive. +// If only a key is provided, it will use the env key matching the key, uppercased. +// If more arguments are provided, they will represent the env variable names that +// should bind to this key and will be taken in the specified order. +// EnvPrefix will be used when set when env name is not provided. +func BindEnv(input ...string) error { return v.BindEnv(input...) } + +func (v *Viper) BindEnv(input ...string) error { + if len(input) == 0 { + return fmt.Errorf("missing key to bind to") + } + + key := strings.ToLower(input[0]) + + if len(input) == 1 { + v.env[key] = append(v.env[key], v.mergeWithEnvPrefix(key)) + } else { + v.env[key] = append(v.env[key], input[1:]...) + } + + return nil +} + +// Given a key, find the value. +// +// Viper will check to see if an alias exists first. +// Viper will then check in the following order: +// flag, env, config file, key/value store. +// Lastly, if no value was found and flagDefault is true, and if the key +// corresponds to a flag, the flag's default value is returned. +// +// Note: this assumes a lower-cased key given. +func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { + var ( + val interface{} + exists bool + path = strings.Split(lcaseKey, v.keyDelim) + nested = len(path) > 1 + ) + + // compute the path through the nested maps to the nested value + if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" { + return nil + } + + // if the requested key is an alias, then return the proper key + lcaseKey = v.realKey(lcaseKey) + path = strings.Split(lcaseKey, v.keyDelim) + nested = len(path) > 1 + + // Set() override first + val = v.searchMap(v.override, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.override) != "" { + return nil + } + + // PFlag override next + flag, exists := v.pflags[lcaseKey] + if exists && flag.HasChanged() { + switch flag.ValueType() { + case "int", "int8", "int16", "int32", "int64": + return cast.ToInt(flag.ValueString()) + case "bool": + return cast.ToBool(flag.ValueString()) + case "stringSlice", "stringArray": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return res + case "intSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToIntSlice(res) + case "stringToString": + return stringToStringConv(flag.ValueString()) + default: + return flag.ValueString() + } + } + if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" { + return nil + } + + // Env override next + if v.automaticEnvApplied { + // even if it hasn't been registered, if automaticEnv is used, + // check any Get request + if val, ok := v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); ok { + return val + } + if nested && v.isPathShadowedInAutoEnv(path) != "" { + return nil + } + } + envkeys, exists := v.env[lcaseKey] + if exists { + for _, envkey := range envkeys { + if val, ok := v.getEnv(envkey); ok { + return val + } + } + } + if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { + return nil + } + + // Config file next + val = v.searchIndexableWithPathPrefixes(v.config, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.config) != "" { + return nil + } + + // K/V store next + val = v.searchMap(v.kvstore, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" { + return nil + } + + // Default next + val = v.searchMap(v.defaults, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" { + return nil + } + + if flagDefault { + // last chance: if no value is found and a flag does exist for the key, + // get the flag's default value even if the flag's value has not been set. + if flag, exists := v.pflags[lcaseKey]; exists { + switch flag.ValueType() { + case "int", "int8", "int16", "int32", "int64": + return cast.ToInt(flag.ValueString()) + case "bool": + return cast.ToBool(flag.ValueString()) + case "stringSlice", "stringArray": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return res + case "intSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToIntSlice(res) + case "stringToString": + return stringToStringConv(flag.ValueString()) + default: + return flag.ValueString() + } + } + // last item, no need to check shadowing + } + + return nil +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79 +// alterations are: errors are swallowed, map[string]interface{} is returned in order to enable cast.ToStringMap +func stringToStringConv(val string) interface{} { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]interface{}{} + } + r := csv.NewReader(strings.NewReader(val)) + ss, err := r.Read() + if err != nil { + return nil + } + out := make(map[string]interface{}, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil + } + out[kv[0]] = kv[1] + } + return out +} + +// IsSet checks to see if the key has been set in any of the data locations. +// IsSet is case-insensitive for a key. +func IsSet(key string) bool { return v.IsSet(key) } + +func (v *Viper) IsSet(key string) bool { + lcaseKey := strings.ToLower(key) + val := v.find(lcaseKey, false) + return val != nil +} + +// AutomaticEnv makes Viper check if environment variables match any of the existing keys +// (config, default or flags). If matching env vars are found, they are loaded into Viper. +func AutomaticEnv() { v.AutomaticEnv() } + +func (v *Viper) AutomaticEnv() { + v.automaticEnvApplied = true +} + +// SetEnvKeyReplacer sets the strings.Replacer on the viper object +// Useful for mapping an environmental variable to a key that does +// not match it. +func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) } + +func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { + v.envKeyReplacer = r +} + +// RegisterAlias creates an alias that provides another accessor for the same key. +// This enables one to change a name without breaking the application. +func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) } + +func (v *Viper) RegisterAlias(alias string, key string) { + v.registerAlias(alias, strings.ToLower(key)) +} + +func (v *Viper) registerAlias(alias string, key string) { + alias = strings.ToLower(alias) + if alias != key && alias != v.realKey(key) { + _, exists := v.aliases[alias] + + if !exists { + // if we alias something that exists in one of the maps to another + // name, we'll never be able to get that value using the original + // name, so move the config value to the new realkey. + if val, ok := v.config[alias]; ok { + delete(v.config, alias) + v.config[key] = val + } + if val, ok := v.kvstore[alias]; ok { + delete(v.kvstore, alias) + v.kvstore[key] = val + } + if val, ok := v.defaults[alias]; ok { + delete(v.defaults, alias) + v.defaults[key] = val + } + if val, ok := v.override[alias]; ok { + delete(v.override, alias) + v.override[key] = val + } + v.aliases[alias] = key + } + } else { + jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key)) + } +} + +func (v *Viper) realKey(key string) string { + newkey, exists := v.aliases[key] + if exists { + jww.DEBUG.Println("Alias", key, "to", newkey) + return v.realKey(newkey) + } + return key +} + +// InConfig checks to see if the given key (or an alias) is in the config file. +func InConfig(key string) bool { return v.InConfig(key) } + +func (v *Viper) InConfig(key string) bool { + // if the requested key is an alias, then return the proper key + key = v.realKey(key) + + _, exists := v.config[key] + return exists +} + +// SetDefault sets the default value for this key. +// SetDefault is case-insensitive for a key. +// Default only used when no value is provided by the user via flag, config or ENV. +func SetDefault(key string, value interface{}) { v.SetDefault(key, value) } + +func (v *Viper) SetDefault(key string, value interface{}) { + // If alias passed in, then set the proper default + key = v.realKey(strings.ToLower(key)) + value = toCaseInsensitiveValue(value) + + path := strings.Split(key, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v.defaults, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value +} + +// Set sets the value for the key in the override register. +// Set is case-insensitive for a key. +// Will be used instead of values obtained via +// flags, config file, ENV, default, or key/value store. +func Set(key string, value interface{}) { v.Set(key, value) } + +func (v *Viper) Set(key string, value interface{}) { + // If alias passed in, then set the proper override + key = v.realKey(strings.ToLower(key)) + value = toCaseInsensitiveValue(value) + + path := strings.Split(key, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v.override, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value +} + +// ReadInConfig will discover and load the configuration file from disk +// and key/value stores, searching in one of the defined paths. +func ReadInConfig() error { return v.ReadInConfig() } + +func (v *Viper) ReadInConfig() error { + jww.INFO.Println("Attempting to read in config file") + filename, err := v.getConfigFile() + if err != nil { + return err + } + + if !stringInSlice(v.getConfigType(), SupportedExts) { + return UnsupportedConfigError(v.getConfigType()) + } + + jww.DEBUG.Println("Reading file: ", filename) + file, err := afero.ReadFile(v.fs, filename) + if err != nil { + return err + } + + config := make(map[string]interface{}) + + err = v.unmarshalReader(bytes.NewReader(file), config) + if err != nil { + return err + } + + v.config = config + return nil +} + +// MergeInConfig merges a new configuration with an existing config. +func MergeInConfig() error { return v.MergeInConfig() } + +func (v *Viper) MergeInConfig() error { + jww.INFO.Println("Attempting to merge in config file") + filename, err := v.getConfigFile() + if err != nil { + return err + } + + if !stringInSlice(v.getConfigType(), SupportedExts) { + return UnsupportedConfigError(v.getConfigType()) + } + + file, err := afero.ReadFile(v.fs, filename) + if err != nil { + return err + } + + return v.MergeConfig(bytes.NewReader(file)) +} + +// ReadConfig will read a configuration file, setting existing keys to nil if the +// key does not exist in the file. +func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } + +func (v *Viper) ReadConfig(in io.Reader) error { + v.config = make(map[string]interface{}) + return v.unmarshalReader(in, v.config) +} + +// MergeConfig merges a new configuration with an existing config. +func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } + +func (v *Viper) MergeConfig(in io.Reader) error { + cfg := make(map[string]interface{}) + if err := v.unmarshalReader(in, cfg); err != nil { + return err + } + return v.MergeConfigMap(cfg) +} + +// MergeConfigMap merges the configuration from the map given with an existing config. +// Note that the map given may be modified. +func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) } + +func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error { + if v.config == nil { + v.config = make(map[string]interface{}) + } + insensitiviseMap(cfg) + mergeMaps(cfg, v.config, nil) + return nil +} + +// WriteConfig writes the current configuration to a file. +func WriteConfig() error { return v.WriteConfig() } + +func (v *Viper) WriteConfig() error { + filename, err := v.getConfigFile() + if err != nil { + return err + } + return v.writeConfig(filename, true) +} + +// SafeWriteConfig writes current configuration to file only if the file does not exist. +func SafeWriteConfig() error { return v.SafeWriteConfig() } + +func (v *Viper) SafeWriteConfig() error { + if len(v.configPaths) < 1 { + return errors.New("missing configuration for 'configPath'") + } + return v.SafeWriteConfigAs(filepath.Join(v.configPaths[0], v.configName+"."+v.configType)) +} + +// WriteConfigAs writes current configuration to a given filename. +func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) } + +func (v *Viper) WriteConfigAs(filename string) error { + return v.writeConfig(filename, true) +} + +// SafeWriteConfigAs writes current configuration to a given filename if it does not exist. +func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } + +func (v *Viper) SafeWriteConfigAs(filename string) error { + alreadyExists, err := afero.Exists(v.fs, filename) + if alreadyExists && err == nil { + return ConfigFileAlreadyExistsError(filename) + } + return v.writeConfig(filename, false) +} + +func (v *Viper) writeConfig(filename string, force bool) error { + jww.INFO.Println("Attempting to write configuration to file.") + var configType string + + ext := filepath.Ext(filename) + if ext != "" { + configType = ext[1:] + } else { + configType = v.configType + } + if configType == "" { + return fmt.Errorf("config type could not be determined for %s", filename) + } + + if !stringInSlice(configType, SupportedExts) { + return UnsupportedConfigError(configType) + } + if v.config == nil { + v.config = make(map[string]interface{}) + } + flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY + if !force { + flags |= os.O_EXCL + } + f, err := v.fs.OpenFile(filename, flags, v.configPermissions) + if err != nil { + return err + } + defer f.Close() + + if err := v.marshalWriter(f, configType); err != nil { + return err + } + + return f.Sync() +} + +// Unmarshal a Reader into a map. +// Should probably be an unexported function. +func unmarshalReader(in io.Reader, c map[string]interface{}) error { + return v.unmarshalReader(in, c) +} + +func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { + buf := new(bytes.Buffer) + buf.ReadFrom(in) + + switch strings.ToLower(v.getConfigType()) { + case "yaml", "yml": + if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil { + return ConfigParseError{err} + } + + case "json": + if err := json.Unmarshal(buf.Bytes(), &c); err != nil { + return ConfigParseError{err} + } + + case "hcl": + obj, err := hcl.Parse(buf.String()) + if err != nil { + return ConfigParseError{err} + } + if err = hcl.DecodeObject(&c, obj); err != nil { + return ConfigParseError{err} + } + + case "toml": + tree, err := toml.LoadReader(buf) + if err != nil { + return ConfigParseError{err} + } + tmap := tree.ToMap() + for k, v := range tmap { + c[k] = v + } + + case "dotenv", "env": + env, err := gotenv.StrictParse(buf) + if err != nil { + return ConfigParseError{err} + } + for k, v := range env { + c[k] = v + } + + case "properties", "props", "prop": + v.properties = properties.NewProperties() + var err error + if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil { + return ConfigParseError{err} + } + for _, key := range v.properties.Keys() { + value, _ := v.properties.Get(key) + // recursively build nested maps + path := strings.Split(key, ".") + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(c, path[0:len(path)-1]) + // set innermost value + deepestMap[lastKey] = value + } + + case "ini": + cfg := ini.Empty(v.iniLoadOptions) + err := cfg.Append(buf.Bytes()) + if err != nil { + return ConfigParseError{err} + } + sections := cfg.Sections() + for i := 0; i < len(sections); i++ { + section := sections[i] + keys := section.Keys() + for j := 0; j < len(keys); j++ { + key := keys[j] + value := cfg.Section(section.Name()).Key(key.Name()).String() + c[section.Name()+"."+key.Name()] = value + } + } + } + + insensitiviseMap(c) + return nil +} + +// Marshal a map into Writer. +func (v *Viper) marshalWriter(f afero.File, configType string) error { + c := v.AllSettings() + switch configType { + case "json": + b, err := json.MarshalIndent(c, "", " ") + if err != nil { + return ConfigMarshalError{err} + } + _, err = f.WriteString(string(b)) + if err != nil { + return ConfigMarshalError{err} + } + + case "hcl": + b, err := json.Marshal(c) + if err != nil { + return ConfigMarshalError{err} + } + ast, err := hcl.Parse(string(b)) + if err != nil { + return ConfigMarshalError{err} + } + err = printer.Fprint(f, ast.Node) + if err != nil { + return ConfigMarshalError{err} + } + + case "prop", "props", "properties": + if v.properties == nil { + v.properties = properties.NewProperties() + } + p := v.properties + for _, key := range v.AllKeys() { + _, _, err := p.Set(key, v.GetString(key)) + if err != nil { + return ConfigMarshalError{err} + } + } + _, err := p.WriteComment(f, "#", properties.UTF8) + if err != nil { + return ConfigMarshalError{err} + } + + case "dotenv", "env": + lines := []string{} + for _, key := range v.AllKeys() { + envName := strings.ToUpper(strings.Replace(key, ".", "_", -1)) + val := v.Get(key) + lines = append(lines, fmt.Sprintf("%v=%v", envName, val)) + } + s := strings.Join(lines, "\n") + if _, err := f.WriteString(s); err != nil { + return ConfigMarshalError{err} + } + + case "toml": + t, err := toml.TreeFromMap(c) + if err != nil { + return ConfigMarshalError{err} + } + s := t.String() + if _, err := f.WriteString(s); err != nil { + return ConfigMarshalError{err} + } + + case "yaml", "yml": + b, err := yaml.Marshal(c) + if err != nil { + return ConfigMarshalError{err} + } + if _, err = f.WriteString(string(b)); err != nil { + return ConfigMarshalError{err} + } + + case "ini": + keys := v.AllKeys() + cfg := ini.Empty() + ini.PrettyFormat = false + for i := 0; i < len(keys); i++ { + key := keys[i] + lastSep := strings.LastIndex(key, ".") + sectionName := key[:(lastSep)] + keyName := key[(lastSep + 1):] + if sectionName == "default" { + sectionName = "" + } + cfg.Section(sectionName).Key(keyName).SetValue(v.GetString(key)) + } + cfg.WriteTo(f) + } + return nil +} + +func keyExists(k string, m map[string]interface{}) string { + lk := strings.ToLower(k) + for mk := range m { + lmk := strings.ToLower(mk) + if lmk == lk { + return mk + } + } + return "" +} + +func castToMapStringInterface( + src map[interface{}]interface{}) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[fmt.Sprintf("%v", k)] = v + } + return tgt +} + +func castMapStringSliceToMapInterface(src map[string][]string) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + +func castMapStringToMapInterface(src map[string]string) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + +func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + +// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's +// insistence on parsing nested structures as `map[interface{}]interface{}` +// instead of using a `string` as the key for nest structures beyond one level +// deep. Both map types are supported as there is a go-yaml fork that uses +// `map[string]interface{}` instead. +func mergeMaps( + src, tgt map[string]interface{}, itgt map[interface{}]interface{}) { + for sk, sv := range src { + tk := keyExists(sk, tgt) + if tk == "" { + jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv) + tgt[sk] = sv + if itgt != nil { + itgt[sk] = sv + } + continue + } + + tv, ok := tgt[tk] + if !ok { + jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv) + tgt[sk] = sv + if itgt != nil { + itgt[sk] = sv + } + continue + } + + svType := reflect.TypeOf(sv) + tvType := reflect.TypeOf(tv) + if tvType != nil && svType != tvType { // Allow for the target to be nil + jww.ERROR.Printf( + "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v", + sk, svType, tvType, sv, tv) + continue + } + + jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v", + sk, svType, tvType, sv, tv) + + switch ttv := tv.(type) { + case map[interface{}]interface{}: + jww.TRACE.Printf("merging maps (must convert)") + tsv := sv.(map[interface{}]interface{}) + ssv := castToMapStringInterface(tsv) + stv := castToMapStringInterface(ttv) + mergeMaps(ssv, stv, ttv) + case map[string]interface{}: + jww.TRACE.Printf("merging maps") + mergeMaps(sv.(map[string]interface{}), ttv, nil) + default: + jww.TRACE.Printf("setting value") + tgt[tk] = sv + if itgt != nil { + itgt[tk] = sv + } + } + } +} + +// ReadRemoteConfig attempts to get configuration from a remote source +// and read it in the remote configuration registry. +func ReadRemoteConfig() error { return v.ReadRemoteConfig() } + +func (v *Viper) ReadRemoteConfig() error { + return v.getKeyValueConfig() +} + +func WatchRemoteConfig() error { return v.WatchRemoteConfig() } +func (v *Viper) WatchRemoteConfig() error { + return v.watchKeyValueConfig() +} + +func (v *Viper) WatchRemoteConfigOnChannel() error { + return v.watchKeyValueConfigOnChannel() +} + +// Retrieve the first found remote configuration. +func (v *Viper) getKeyValueConfig() error { + if RemoteConfig == nil { + return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") + } + + for _, rp := range v.remoteProviders { + val, err := v.getRemoteConfig(rp) + if err != nil { + jww.ERROR.Printf("get remote config: %s", err) + + continue + } + + v.kvstore = val + + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { + reader, err := RemoteConfig.Get(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfigOnChannel() error { + for _, rp := range v.remoteProviders { + respc, _ := RemoteConfig.WatchChannel(rp) + // Todo: Add quit channel + go func(rc <-chan *RemoteResponse) { + for { + b := <-rc + reader := bytes.NewReader(b.Value) + v.unmarshalReader(reader, v.kvstore) + } + }(respc) + return nil + } + return RemoteConfigError("No Files Found") +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfig() error { + for _, rp := range v.remoteProviders { + val, err := v.watchRemoteConfig(rp) + if err != nil { + continue + } + v.kvstore = val + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { + reader, err := RemoteConfig.Watch(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} + +// AllKeys returns all keys holding a value, regardless of where they are set. +// Nested keys are returned with a v.keyDelim separator +func AllKeys() []string { return v.AllKeys() } + +func (v *Viper) AllKeys() []string { + m := map[string]bool{} + // add all paths, by order of descending priority to ensure correct shadowing + m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") + m = v.flattenAndMergeMap(m, v.override, "") + m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) + m = v.mergeFlatMap(m, castMapStringSliceToMapInterface(v.env)) + m = v.flattenAndMergeMap(m, v.config, "") + m = v.flattenAndMergeMap(m, v.kvstore, "") + m = v.flattenAndMergeMap(m, v.defaults, "") + + // convert set of paths to list + a := make([]string, 0, len(m)) + for x := range m { + a = append(a, x) + } + return a +} + +// flattenAndMergeMap recursively flattens the given map into a map[string]bool +// of key paths (used as a set, easier to manipulate than a []string): +// - each path is merged into a single key string, delimited with v.keyDelim +// - if a path is shadowed by an earlier value in the initial shadow map, +// it is skipped. +// The resulting set of paths is merged to the given shadow set at the same time. +func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool { + if shadow != nil && prefix != "" && shadow[prefix] { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]bool) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += v.keyDelim + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = true + continue + } + // recursively merge to shadow map + shadow = v.flattenAndMergeMap(shadow, m2, fullKey) + } + return shadow +} + +// mergeFlatMap merges the given maps, excluding values of the second map +// shadowed by values from the first map. +func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool { + // scan keys +outer: + for k := range m { + path := strings.Split(k, v.keyDelim) + // scan intermediate paths + var parentKey string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if shadow[parentKey] { + // path is shadowed, continue + continue outer + } + } + // add key + shadow[strings.ToLower(k)] = true + } + return shadow +} + +// AllSettings merges all settings and returns them as a map[string]interface{}. +func AllSettings() map[string]interface{} { return v.AllSettings() } + +func (v *Viper) AllSettings() map[string]interface{} { + m := map[string]interface{}{} + // start from the list of keys, and construct the map one value at a time + for _, k := range v.AllKeys() { + value := v.Get(k) + if value == nil { + // should not happen, since AllKeys() returns only keys holding a value, + // check just in case anything changes + continue + } + path := strings.Split(k, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(m, path[0:len(path)-1]) + // set innermost value + deepestMap[lastKey] = value + } + return m +} + +// SetFs sets the filesystem to use to read configuration. +func SetFs(fs afero.Fs) { v.SetFs(fs) } + +func (v *Viper) SetFs(fs afero.Fs) { + v.fs = fs +} + +// SetConfigName sets name for the config file. +// Does not include extension. +func SetConfigName(in string) { v.SetConfigName(in) } + +func (v *Viper) SetConfigName(in string) { + if in != "" { + v.configName = in + v.configFile = "" + } +} + +// SetConfigType sets the type of the configuration returned by the +// remote source, e.g. "json". +func SetConfigType(in string) { v.SetConfigType(in) } + +func (v *Viper) SetConfigType(in string) { + if in != "" { + v.configType = in + } +} + +// SetConfigPermissions sets the permissions for the config file. +func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) } + +func (v *Viper) SetConfigPermissions(perm os.FileMode) { + v.configPermissions = perm.Perm() +} + +// IniLoadOptions sets the load options for ini parsing. +func IniLoadOptions(in ini.LoadOptions) Option { + return optionFunc(func(v *Viper) { + v.iniLoadOptions = in + }) +} + +func (v *Viper) getConfigType() string { + if v.configType != "" { + return v.configType + } + + cf, err := v.getConfigFile() + if err != nil { + return "" + } + + ext := filepath.Ext(cf) + + if len(ext) > 1 { + return ext[1:] + } + + return "" +} + +func (v *Viper) getConfigFile() (string, error) { + if v.configFile == "" { + cf, err := v.findConfigFile() + if err != nil { + return "", err + } + v.configFile = cf + } + return v.configFile, nil +} + +func (v *Viper) searchInPath(in string) (filename string) { + jww.DEBUG.Println("Searching for config in ", in) + for _, ext := range SupportedExts { + jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext)) + if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { + jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext)) + return filepath.Join(in, v.configName+"."+ext) + } + } + + if v.configType != "" { + if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { + return filepath.Join(in, v.configName) + } + } + + return "" +} + +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). +func (v *Viper) findConfigFile() (string, error) { + jww.INFO.Println("Searching for config in ", v.configPaths) + + for _, cp := range v.configPaths { + file := v.searchInPath(cp) + if file != "" { + return file, nil + } + } + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} +} + +// Debug prints all configuration registries for debugging +// purposes. +func Debug() { v.Debug() } + +func (v *Viper) Debug() { + fmt.Printf("Aliases:\n%#v\n", v.aliases) + fmt.Printf("Override:\n%#v\n", v.override) + fmt.Printf("PFlags:\n%#v\n", v.pflags) + fmt.Printf("Env:\n%#v\n", v.env) + fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore) + fmt.Printf("Config:\n%#v\n", v.config) + fmt.Printf("Defaults:\n%#v\n", v.defaults) +} diff --git a/vendor/github.com/spf13/viper/watch.go b/vendor/github.com/spf13/viper/watch.go new file mode 100644 index 000000000..c433a8fa4 --- /dev/null +++ b/vendor/github.com/spf13/viper/watch.go @@ -0,0 +1,11 @@ +// +build !js + +package viper + +import "github.com/fsnotify/fsnotify" + +type watcher = fsnotify.Watcher + +func newWatcher() (*watcher, error) { + return fsnotify.NewWatcher() +} diff --git a/vendor/github.com/spf13/viper/watch_wasm.go b/vendor/github.com/spf13/viper/watch_wasm.go new file mode 100644 index 000000000..8e47e6a91 --- /dev/null +++ b/vendor/github.com/spf13/viper/watch_wasm.go @@ -0,0 +1,30 @@ +// +build js,wasm + +package viper + +import ( + "errors" + + "github.com/fsnotify/fsnotify" +) + +type watcher struct { + Events chan fsnotify.Event + Errors chan error +} + +func (*watcher) Close() error { + return nil +} + +func (*watcher) Add(name string) error { + return nil +} + +func (*watcher) Remove(name string) error { + return nil +} + +func newWatcher() (*watcher, error) { + return &watcher{}, errors.New("fsnotify is not supported on WASM") +} diff --git a/vendor/github.com/ssgreg/nlreturn/v2/LICENSE b/vendor/github.com/ssgreg/nlreturn/v2/LICENSE new file mode 100644 index 000000000..0a5b4d106 --- /dev/null +++ b/vendor/github.com/ssgreg/nlreturn/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Grigory Zubankov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ssgreg/nlreturn/v2/pkg/nlreturn/nlreturn.go b/vendor/github.com/ssgreg/nlreturn/v2/pkg/nlreturn/nlreturn.go new file mode 100644 index 000000000..52318ccfd --- /dev/null +++ b/vendor/github.com/ssgreg/nlreturn/v2/pkg/nlreturn/nlreturn.go @@ -0,0 +1,86 @@ +package nlreturn + +import ( + "fmt" + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" +) + +const ( + linterName = "nlreturn" + linterDoc = `Linter requires a new line before return and branch statements except when the return is alone inside a statement group (such as an if statement) to increase code clarity.` +) + +// NewAnalyzer returns a new nlreturn analyzer. +func NewAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: linterName, + Doc: linterDoc, + Run: run, + } +} + +func run(pass *analysis.Pass) (interface{}, error) { + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + switch c := node.(type) { + case *ast.CaseClause: + inspectBlock(pass, c.Body) + case *ast.CommClause: + inspectBlock(pass, c.Body) + case *ast.BlockStmt: + inspectBlock(pass, c.List) + } + + return true + }) + } + + return nil, nil +} + +func inspectBlock(pass *analysis.Pass, block []ast.Stmt) { + for i, stmt := range block { + switch stmt.(type) { + case *ast.BranchStmt, *ast.ReturnStmt: + if i == 0 { + return + } + + if line(pass, stmt.Pos())-line(pass, block[i-1].End()) <= 1 { + pass.Report(analysis.Diagnostic{ + Pos: stmt.Pos(), + Message: fmt.Sprintf("%s with no blank line before", name(stmt)), + SuggestedFixes: []analysis.SuggestedFix{ + { + TextEdits: []analysis.TextEdit{ + { + Pos: stmt.Pos(), + NewText: []byte("\n"), + End: stmt.Pos(), + }, + }, + }, + }, + }) + } + } + } +} + +func name(stmt ast.Stmt) string { + switch c := stmt.(type) { + case *ast.BranchStmt: + return c.Tok.String() + case *ast.ReturnStmt: + return "return" + default: + return "unknown" + } +} + +func line(pass *analysis.Pass, pos token.Pos) int { + return pass.Fset.Position(pos).Line +} diff --git a/vendor/github.com/stretchr/objx/.codeclimate.yml b/vendor/github.com/stretchr/objx/.codeclimate.yml new file mode 100644 index 000000000..010d4ccd5 --- /dev/null +++ b/vendor/github.com/stretchr/objx/.codeclimate.yml @@ -0,0 +1,13 @@ +engines: + gofmt: + enabled: true + golint: + enabled: true + govet: + enabled: true + +exclude_patterns: +- ".github/" +- "vendor/" +- "codegen/" +- "doc.go" diff --git a/vendor/github.com/stretchr/objx/.gitignore b/vendor/github.com/stretchr/objx/.gitignore new file mode 100644 index 000000000..ea58090bd --- /dev/null +++ b/vendor/github.com/stretchr/objx/.gitignore @@ -0,0 +1,11 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/stretchr/objx/.travis.yml b/vendor/github.com/stretchr/objx/.travis.yml new file mode 100644 index 000000000..a63efa59d --- /dev/null +++ b/vendor/github.com/stretchr/objx/.travis.yml @@ -0,0 +1,25 @@ +language: go +go: + - 1.8 + - 1.9 + - tip + +env: + global: + - CC_TEST_REPORTER_ID=68feaa3410049ce73e145287acbcdacc525087a30627f96f04e579e75bd71c00 + +before_script: + - curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter + - chmod +x ./cc-test-reporter + - ./cc-test-reporter before-build + +install: +- go get github.com/go-task/task/cmd/task + +script: +- task dl-deps +- task lint +- task test-coverage + +after_script: + - ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT diff --git a/vendor/github.com/stretchr/objx/Gopkg.lock b/vendor/github.com/stretchr/objx/Gopkg.lock new file mode 100644 index 000000000..eebe342a9 --- /dev/null +++ b/vendor/github.com/stretchr/objx/Gopkg.lock @@ -0,0 +1,30 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require" + ] + revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c" + version = "v1.2.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "2d160a7dea4ffd13c6c31dab40373822f9d78c73beba016d662bef8f7a998876" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/stretchr/objx/Gopkg.toml b/vendor/github.com/stretchr/objx/Gopkg.toml new file mode 100644 index 000000000..d70f1570b --- /dev/null +++ b/vendor/github.com/stretchr/objx/Gopkg.toml @@ -0,0 +1,8 @@ +[prune] + unused-packages = true + non-go = true + go-tests = true + +[[constraint]] + name = "github.com/stretchr/testify" + version = "~1.2.0" diff --git a/vendor/github.com/stretchr/objx/LICENSE b/vendor/github.com/stretchr/objx/LICENSE new file mode 100644 index 000000000..44d4d9d5a --- /dev/null +++ b/vendor/github.com/stretchr/objx/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md new file mode 100644 index 000000000..be5750c94 --- /dev/null +++ b/vendor/github.com/stretchr/objx/README.md @@ -0,0 +1,80 @@ +# Objx +[![Build Status](https://travis-ci.org/stretchr/objx.svg?branch=master)](https://travis-ci.org/stretchr/objx) +[![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/objx)](https://goreportcard.com/report/github.com/stretchr/objx) +[![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) +[![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) +[![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) +[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) + +Objx - Go package for dealing with maps, slices, JSON and other data. + +Get started: + +- Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) +- Check out the API Documentation http://godoc.org/github.com/stretchr/objx + +## Overview +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. + +### Pattern +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. + +### Reading data +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +### Ranging +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } + +## Installation +To install Objx, use go get: + + go get github.com/stretchr/objx + +### Staying up to date +To update Objx to the latest version, run: + + go get -u github.com/stretchr/objx + +### Supported go versions +We support the lastest two major Go versions, which are 1.8 and 1.9 at the moment. + +## Contributing +Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml new file mode 100644 index 000000000..f8035641f --- /dev/null +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -0,0 +1,32 @@ +default: + deps: [test] + +dl-deps: + desc: Downloads cli dependencies + cmds: + - go get -u github.com/golang/lint/golint + - go get -u github.com/golang/dep/cmd/dep + +update-deps: + desc: Updates dependencies + cmds: + - dep ensure + - dep ensure -update + +lint: + desc: Runs golint + cmds: + - go fmt $(go list ./... | grep -v /vendor/) + - go vet $(go list ./... | grep -v /vendor/) + - golint $(ls *.go | grep -v "doc.go") + silent: true + +test: + desc: Runs go tests + cmds: + - go test -race . + +test-coverage: + desc: Runs go tests and calucates test coverage + cmds: + - go test -coverprofile=c.out . diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go new file mode 100644 index 000000000..204356a22 --- /dev/null +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -0,0 +1,148 @@ +package objx + +import ( + "regexp" + "strconv" + "strings" +) + +// arrayAccesRegexString is the regex used to extract the array number +// from the access path +const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + +// arrayAccesRegex is the compiled arrayAccesRegexString +var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true) + return m +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current, selector, value interface{}, isSet bool) interface{} { + switch selector.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + if array, ok := current.([]interface{}); ok { + index := intFromInterface(selector) + if index >= len(array) { + return nil + } + return array[index] + } + return nil + + case string: + selStr := selector.(string) + selSegs := strings.SplitN(selStr, PathSeparator, 2) + thisSel := selSegs[0] + index := -1 + var err error + + if strings.Contains(thisSel, "[") { + arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) + if len(arrayMatches) > 0 { + // Get the key into the map + thisSel = arrayMatches[1] + + // Get the index into the array at the key + index, err = strconv.Atoi(arrayMatches[2]) + + if err != nil { + // This should never happen. If it does, something has gone + // seriously wrong. Panic. + panic("objx: Array index is not an integer. Must use array[int].") + } + } + } + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if len(selSegs) <= 1 && isSet { + curMSI[thisSel] = value + return nil + } + current = curMSI[thisSel] + default: + current = nil + } + // do we need to access the item of an array? + if index > -1 { + if array, ok := current.([]interface{}); ok { + if index < len(array) { + current = array[index] + } else { + current = nil + } + } + } + if len(selSegs) > 1 { + current = access(current, selSegs[1], value, isSet) + } + } + return current +} + +// intFromInterface converts an interface object to the largest +// representation of an unsigned integer using a type switch and +// assertions +func intFromInterface(selector interface{}) int { + var value int + switch selector.(type) { + case int: + value = selector.(int) + case int8: + value = int(selector.(int8)) + case int16: + value = int(selector.(int16)) + case int32: + value = int(selector.(int32)) + case int64: + value = int(selector.(int64)) + case uint: + value = int(selector.(uint)) + case uint8: + value = int(selector.(uint8)) + case uint16: + value = int(selector.(uint16)) + case uint32: + value = int(selector.(uint32)) + case uint64: + value = int(selector.(uint64)) + default: + return 0 + } + return value +} diff --git a/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/stretchr/objx/constants.go new file mode 100644 index 000000000..f9eb42a25 --- /dev/null +++ b/vendor/github.com/stretchr/objx/constants.go @@ -0,0 +1,13 @@ +package objx + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // SignatureSeparator is the character that is used to + // separate the Base64 string from the security signature. + SignatureSeparator = "_" +) diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go new file mode 100644 index 000000000..5e020f310 --- /dev/null +++ b/vendor/github.com/stretchr/objx/conversions.go @@ -0,0 +1,108 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" +) + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + result, err := json.Marshal(m) + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + return string(result), err +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + _, err = encoder.Write([]byte(jsonData)) + if err != nil { + return "", err + } + _ = encoder.Close() + + return buf.String(), nil +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + return base64 + SignatureSeparator + sig, nil +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + vals := make(url.Values) + for k, v := range m { + //TODO: can this be done without sprintf? + vals.Set(k, fmt.Sprintf("%v", v)) + } + return vals +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go new file mode 100644 index 000000000..6d6af1a83 --- /dev/null +++ b/vendor/github.com/stretchr/objx/doc.go @@ -0,0 +1,66 @@ +/* +Objx - Go package for dealing with maps, slices, JSON and other data. + +Overview + +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +a powerful `Get` method (among others) that allows you to easily and quickly get +access to data within the map, without having to worry too much about type assertions, +missing data, default values etc. + +Pattern + +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, +or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, +manipulating and selecting that data. You can find out more by exploring the index below. + +Reading data + +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +Ranging + +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. +For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } +*/ +package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go new file mode 100644 index 000000000..406bc8926 --- /dev/null +++ b/vendor/github.com/stretchr/objx/map.go @@ -0,0 +1,190 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// +// Returns nil if any key argument is non-string or if there are an odd number of arguments. +// +// Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} +func MSI(keyAndValuePairs ...interface{}) Map { + newMap := Map{} + keyAndValuePairsLen := len(keyAndValuePairs) + if keyAndValuePairsLen%2 != 0 { + return nil + } + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + return nil + } + newMap[keyString] = value + } + return newMap +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + return o +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + var data interface{} + err := json.Unmarshal([]byte(jsonString), &data) + if err != nil { + return Nil, err + } + return New(data), nil +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + result, err := FromBase64(base64String) + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match") + } + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + result, err := FromSignedBase64(base64String, key) + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + vals, err := url.ParseQuery(query) + if err != nil { + return nil, err + } + m := Map{} + for k, vals := range vals { + m[k] = vals[0] + } + return m, nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + o, err := FromURLQuery(query) + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + return o +} diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go new file mode 100644 index 000000000..c3400a3f7 --- /dev/null +++ b/vendor/github.com/stretchr/objx/mutations.go @@ -0,0 +1,77 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (m Map) Exclude(exclude []string) Map { + excluded := make(Map) + for k, v := range m { + if !contains(exclude, k) { + excluded[k] = v + } + } + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := Map{} + for k, v := range m { + copied[k] = v + } + return copied +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// MergeHere blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + for k, v := range merge { + m[k] = v + } + return m +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := Map{} + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return newMap +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + if newKey, ok := mapping[key]; ok { + return newKey, value + } + return key, value + }) +} + +// Checks if a string slice contains a string +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go new file mode 100644 index 000000000..692be8e2a --- /dev/null +++ b/vendor/github.com/stretchr/objx/security.go @@ -0,0 +1,12 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security key +func HashWithKey(data, key string) string { + d := sha1.Sum([]byte(data + ":" + key)) + return hex.EncodeToString(d[:]) +} diff --git a/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/stretchr/objx/tests.go new file mode 100644 index 000000000..d9e0b479a --- /dev/null +++ b/vendor/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 000000000..202a91f8c --- /dev/null +++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2501 @@ +package objx + +/* + Inter (interface{} and []interface{}) +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + var selected []interface{} + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + groups := make(map[string][]interface{}) + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + MSI (map[string]interface{} and []map[string]interface{}) +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + var selected []map[string]interface{} + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + groups := make(map[string][]map[string]interface{}) + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([](Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + var selected [](Map) + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + groups := make(map[string][](Map)) + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Bool (bool and []bool) +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + var selected []bool + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + groups := make(map[string][]bool) + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Str (string and []string) +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + var selected []string + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + groups := make(map[string][]string) + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int (int and []int) +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + var selected []int + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + groups := make(map[string][]int) + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + var selected []int8 + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + groups := make(map[string][]int8) + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + var selected []int16 + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + groups := make(map[string][]int16) + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + var selected []int32 + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + groups := make(map[string][]int32) + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + var selected []int64 + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + groups := make(map[string][]int64) + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint (uint and []uint) +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + var selected []uint + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + groups := make(map[string][]uint) + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + var selected []uint8 + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + groups := make(map[string][]uint8) + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + var selected []uint16 + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + groups := make(map[string][]uint16) + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + var selected []uint32 + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + groups := make(map[string][]uint32) + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + var selected []uint64 + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + groups := make(map[string][]uint64) + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + var selected []uintptr + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + groups := make(map[string][]uintptr) + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + var selected []float32 + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + groups := make(map[string][]float32) + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + var selected []float64 + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + groups := make(map[string][]float64) + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + var selected []complex64 + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + groups := make(map[string][]complex64) + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + var selected []complex128 + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + groups := make(map[string][]complex128) + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go new file mode 100644 index 000000000..e4b4a1433 --- /dev/null +++ b/vendor/github.com/stretchr/objx/value.go @@ -0,0 +1,53 @@ +package objx + +import ( + "fmt" + "strconv" +) + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} + +// String returns the value always as a string +func (v *Value) String() string { + switch { + case v.IsStr(): + return v.Str() + case v.IsBool(): + return strconv.FormatBool(v.Bool()) + case v.IsFloat32(): + return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32) + case v.IsFloat64(): + return strconv.FormatFloat(v.Float64(), 'f', -1, 64) + case v.IsInt(): + return strconv.FormatInt(int64(v.Int()), 10) + case v.IsInt8(): + return strconv.FormatInt(int64(v.Int8()), 10) + case v.IsInt16(): + return strconv.FormatInt(int64(v.Int16()), 10) + case v.IsInt32(): + return strconv.FormatInt(int64(v.Int32()), 10) + case v.IsInt64(): + return strconv.FormatInt(v.Int64(), 10) + case v.IsUint(): + return strconv.FormatUint(uint64(v.Uint()), 10) + case v.IsUint8(): + return strconv.FormatUint(uint64(v.Uint8()), 10) + case v.IsUint16(): + return strconv.FormatUint(uint64(v.Uint16()), 10) + case v.IsUint32(): + return strconv.FormatUint(uint64(v.Uint32()), 10) + case v.IsUint64(): + return strconv.FormatUint(v.Uint64(), 10) + } + return fmt.Sprintf("%#v", v.Data()) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index dc200395c..41649d267 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -13,12 +13,42 @@ const ( compareGreater ) +var ( + intType = reflect.TypeOf(int(1)) + int8Type = reflect.TypeOf(int8(1)) + int16Type = reflect.TypeOf(int16(1)) + int32Type = reflect.TypeOf(int32(1)) + int64Type = reflect.TypeOf(int64(1)) + + uintType = reflect.TypeOf(uint(1)) + uint8Type = reflect.TypeOf(uint8(1)) + uint16Type = reflect.TypeOf(uint16(1)) + uint32Type = reflect.TypeOf(uint32(1)) + uint64Type = reflect.TypeOf(uint64(1)) + + float32Type = reflect.TypeOf(float32(1)) + float64Type = reflect.TypeOf(float64(1)) + + stringType = reflect.TypeOf("") +) + func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { + obj1Value := reflect.ValueOf(obj1) + obj2Value := reflect.ValueOf(obj2) + + // throughout this switch we try and avoid calling .Convert() if possible, + // as this has a pretty big performance impact switch kind { case reflect.Int: { - intobj1 := obj1.(int) - intobj2 := obj2.(int) + intobj1, ok := obj1.(int) + if !ok { + intobj1 = obj1Value.Convert(intType).Interface().(int) + } + intobj2, ok := obj2.(int) + if !ok { + intobj2 = obj2Value.Convert(intType).Interface().(int) + } if intobj1 > intobj2 { return compareGreater, true } @@ -31,8 +61,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int8: { - int8obj1 := obj1.(int8) - int8obj2 := obj2.(int8) + int8obj1, ok := obj1.(int8) + if !ok { + int8obj1 = obj1Value.Convert(int8Type).Interface().(int8) + } + int8obj2, ok := obj2.(int8) + if !ok { + int8obj2 = obj2Value.Convert(int8Type).Interface().(int8) + } if int8obj1 > int8obj2 { return compareGreater, true } @@ -45,8 +81,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int16: { - int16obj1 := obj1.(int16) - int16obj2 := obj2.(int16) + int16obj1, ok := obj1.(int16) + if !ok { + int16obj1 = obj1Value.Convert(int16Type).Interface().(int16) + } + int16obj2, ok := obj2.(int16) + if !ok { + int16obj2 = obj2Value.Convert(int16Type).Interface().(int16) + } if int16obj1 > int16obj2 { return compareGreater, true } @@ -59,8 +101,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int32: { - int32obj1 := obj1.(int32) - int32obj2 := obj2.(int32) + int32obj1, ok := obj1.(int32) + if !ok { + int32obj1 = obj1Value.Convert(int32Type).Interface().(int32) + } + int32obj2, ok := obj2.(int32) + if !ok { + int32obj2 = obj2Value.Convert(int32Type).Interface().(int32) + } if int32obj1 > int32obj2 { return compareGreater, true } @@ -73,8 +121,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Int64: { - int64obj1 := obj1.(int64) - int64obj2 := obj2.(int64) + int64obj1, ok := obj1.(int64) + if !ok { + int64obj1 = obj1Value.Convert(int64Type).Interface().(int64) + } + int64obj2, ok := obj2.(int64) + if !ok { + int64obj2 = obj2Value.Convert(int64Type).Interface().(int64) + } if int64obj1 > int64obj2 { return compareGreater, true } @@ -87,8 +141,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint: { - uintobj1 := obj1.(uint) - uintobj2 := obj2.(uint) + uintobj1, ok := obj1.(uint) + if !ok { + uintobj1 = obj1Value.Convert(uintType).Interface().(uint) + } + uintobj2, ok := obj2.(uint) + if !ok { + uintobj2 = obj2Value.Convert(uintType).Interface().(uint) + } if uintobj1 > uintobj2 { return compareGreater, true } @@ -101,8 +161,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint8: { - uint8obj1 := obj1.(uint8) - uint8obj2 := obj2.(uint8) + uint8obj1, ok := obj1.(uint8) + if !ok { + uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8) + } + uint8obj2, ok := obj2.(uint8) + if !ok { + uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8) + } if uint8obj1 > uint8obj2 { return compareGreater, true } @@ -115,8 +181,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint16: { - uint16obj1 := obj1.(uint16) - uint16obj2 := obj2.(uint16) + uint16obj1, ok := obj1.(uint16) + if !ok { + uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16) + } + uint16obj2, ok := obj2.(uint16) + if !ok { + uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16) + } if uint16obj1 > uint16obj2 { return compareGreater, true } @@ -129,8 +201,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint32: { - uint32obj1 := obj1.(uint32) - uint32obj2 := obj2.(uint32) + uint32obj1, ok := obj1.(uint32) + if !ok { + uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32) + } + uint32obj2, ok := obj2.(uint32) + if !ok { + uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32) + } if uint32obj1 > uint32obj2 { return compareGreater, true } @@ -143,8 +221,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Uint64: { - uint64obj1 := obj1.(uint64) - uint64obj2 := obj2.(uint64) + uint64obj1, ok := obj1.(uint64) + if !ok { + uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64) + } + uint64obj2, ok := obj2.(uint64) + if !ok { + uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64) + } if uint64obj1 > uint64obj2 { return compareGreater, true } @@ -157,8 +241,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Float32: { - float32obj1 := obj1.(float32) - float32obj2 := obj2.(float32) + float32obj1, ok := obj1.(float32) + if !ok { + float32obj1 = obj1Value.Convert(float32Type).Interface().(float32) + } + float32obj2, ok := obj2.(float32) + if !ok { + float32obj2 = obj2Value.Convert(float32Type).Interface().(float32) + } if float32obj1 > float32obj2 { return compareGreater, true } @@ -171,8 +261,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.Float64: { - float64obj1 := obj1.(float64) - float64obj2 := obj2.(float64) + float64obj1, ok := obj1.(float64) + if !ok { + float64obj1 = obj1Value.Convert(float64Type).Interface().(float64) + } + float64obj2, ok := obj2.(float64) + if !ok { + float64obj2 = obj2Value.Convert(float64Type).Interface().(float64) + } if float64obj1 > float64obj2 { return compareGreater, true } @@ -185,8 +281,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { } case reflect.String: { - stringobj1 := obj1.(string) - stringobj2 := obj2.(string) + stringobj1, ok := obj1.(string) + if !ok { + stringobj1 = obj1Value.Convert(stringType).Interface().(string) + } + stringobj2, ok := obj2.(string) + if !ok { + stringobj2 = obj2Value.Convert(stringType).Interface().(string) + } if stringobj1 > stringobj2 { return compareGreater, true } @@ -240,6 +342,24 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) } +// Positive asserts that the specified element is positive +// +// assert.Positive(t, 1) +// assert.Positive(t, 1.23) +func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + zero := reflect.Zero(reflect.TypeOf(e)) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) +} + +// Negative asserts that the specified element is negative +// +// assert.Negative(t, -1) +// assert.Negative(t, -1.23) +func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + zero := reflect.Zero(reflect.TypeOf(e)) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) +} + func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 49370eb16..4dfd1229a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -114,6 +114,24 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { return Error(t, err, append([]interface{}{msg}, args...)...) } +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...) +} + // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // @@ -321,6 +339,54 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) } +// IsDecreasingf asserts that the collection is decreasing +// +// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsDecreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsIncreasingf asserts that the collection is increasing +// +// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsIncreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsNonDecreasingf asserts that the collection is not decreasing +// +// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...) +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -375,6 +441,17 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) } +// Negativef asserts that the specified element is negative +// +// assert.Negativef(t, -1, "error message %s", "formatted") +// assert.Negativef(t, -1.23, "error message %s", "formatted") +func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Negative(t, e, append([]interface{}{msg}, args...)...) +} + // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // @@ -476,6 +553,15 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -572,6 +658,17 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) } +// Positivef asserts that the specified element is positive +// +// assert.Positivef(t, 1, "error message %s", "formatted") +// assert.Positivef(t, 1.23, "error message %s", "formatted") +func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Positive(t, e, append([]interface{}{msg}, args...)...) +} + // Regexpf asserts that a specified regexp matches a string. // // assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 9db889427..25337a6f0 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -204,6 +204,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { return Error(a.t, err, msgAndArgs...) } +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorAs(a.t, err, target, msgAndArgs...) +} + +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorAsf(a.t, err, target, msg, args...) +} + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorIs(a.t, err, target, msgAndArgs...) +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorIsf(a.t, err, target, msg, args...) +} + // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() @@ -631,6 +667,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) } +// IsDecreasing asserts that the collection is decreasing +// +// a.IsDecreasing([]int{2, 1, 0}) +// a.IsDecreasing([]float{2, 1}) +// a.IsDecreasing([]string{"b", "a"}) +func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsDecreasing(a.t, object, msgAndArgs...) +} + +// IsDecreasingf asserts that the collection is decreasing +// +// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsDecreasingf(a.t, object, msg, args...) +} + +// IsIncreasing asserts that the collection is increasing +// +// a.IsIncreasing([]int{1, 2, 3}) +// a.IsIncreasing([]float{1, 2}) +// a.IsIncreasing([]string{"a", "b"}) +func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsIncreasing(a.t, object, msgAndArgs...) +} + +// IsIncreasingf asserts that the collection is increasing +// +// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsIncreasingf(a.t, object, msg, args...) +} + +// IsNonDecreasing asserts that the collection is not decreasing +// +// a.IsNonDecreasing([]int{1, 1, 2}) +// a.IsNonDecreasing([]float{1, 2}) +// a.IsNonDecreasing([]string{"a", "b"}) +func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasing(a.t, object, msgAndArgs...) +} + +// IsNonDecreasingf asserts that the collection is not decreasing +// +// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonDecreasingf(a.t, object, msg, args...) +} + +// IsNonIncreasing asserts that the collection is not increasing +// +// a.IsNonIncreasing([]int{2, 1, 1}) +// a.IsNonIncreasing([]float{2, 1}) +// a.IsNonIncreasing([]string{"b", "a"}) +func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasing(a.t, object, msgAndArgs...) +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNonIncreasingf(a.t, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -739,6 +871,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i return Lessf(a.t, e1, e2, msg, args...) } +// Negative asserts that the specified element is negative +// +// a.Negative(-1) +// a.Negative(-1.23) +func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Negative(a.t, e, msgAndArgs...) +} + +// Negativef asserts that the specified element is negative +// +// a.Negativef(-1, "error message %s", "formatted") +// a.Negativef(-1.23, "error message %s", "formatted") +func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Negativef(a.t, e, msg, args...) +} + // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // @@ -941,6 +1095,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorIs(a.t, err, target, msgAndArgs...) +} + +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorIsf(a.t, err, target, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1133,6 +1305,28 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b return Panicsf(a.t, f, msg, args...) } +// Positive asserts that the specified element is positive +// +// a.Positive(1) +// a.Positive(1.23) +func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Positive(a.t, e, msgAndArgs...) +} + +// Positivef asserts that the specified element is positive +// +// a.Positivef(1, "error message %s", "formatted") +// a.Positivef(1.23, "error message %s", "formatted") +func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Positivef(a.t, e, msg, args...) +} + // Regexp asserts that a specified regexp matches a string. // // a.Regexp(regexp.MustCompile("start"), "it's starting") diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go new file mode 100644 index 000000000..1c3b47182 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -0,0 +1,81 @@ +package assert + +import ( + "fmt" + "reflect" +) + +// isOrdered checks that collection contains orderable elements. +func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { + objKind := reflect.TypeOf(object).Kind() + if objKind != reflect.Slice && objKind != reflect.Array { + return false + } + + objValue := reflect.ValueOf(object) + objLen := objValue.Len() + + if objLen <= 1 { + return true + } + + value := objValue.Index(0) + valueInterface := value.Interface() + firstValueKind := value.Kind() + + for i := 1; i < objLen; i++ { + prevValue := value + prevValueInterface := valueInterface + + value = objValue.Index(i) + valueInterface = value.Interface() + + compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) + + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + } + + if !containsValue(allowedComparesResults, compareResult) { + return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...) + } + } + + return true +} + +// IsIncreasing asserts that the collection is increasing +// +// assert.IsIncreasing(t, []int{1, 2, 3}) +// assert.IsIncreasing(t, []float{1, 2}) +// assert.IsIncreasing(t, []string{"a", "b"}) +func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) +} + +// IsNonIncreasing asserts that the collection is not increasing +// +// assert.IsNonIncreasing(t, []int{2, 1, 1}) +// assert.IsNonIncreasing(t, []float{2, 1}) +// assert.IsNonIncreasing(t, []string{"b", "a"}) +func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) +} + +// IsDecreasing asserts that the collection is decreasing +// +// assert.IsDecreasing(t, []int{2, 1, 0}) +// assert.IsDecreasing(t, []float{2, 1}) +// assert.IsDecreasing(t, []string{"b", "a"}) +func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) +} + +// IsNonDecreasing asserts that the collection is not decreasing +// +// assert.IsNonDecreasing(t, []int{1, 1, 2}) +// assert.IsNonDecreasing(t, []float{1, 2}) +// assert.IsNonDecreasing(t, []string{"a", "b"}) +func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 914a10d83..bcac4401f 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -172,8 +172,8 @@ func isTest(name, prefix string) bool { if len(name) == len(prefix) { // "Test" is ok return true } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) + r, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(r) } func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { @@ -1622,6 +1622,7 @@ var spewConfig = spew.ConfigState{ DisableCapacities: true, SortKeys: true, DisableMethods: true, + MaxDepth: 10, } type tHelper interface { @@ -1693,3 +1694,81 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } } } + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if errors.Is(err, target) { + return true + } + + var expectedText string + if target != nil { + expectedText = target.Error() + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ + "expected: %q\n"+ + "in chain: %s", expectedText, chain, + ), msgAndArgs...) +} + +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.Is(err, target) { + return true + } + + var expectedText string + if target != nil { + expectedText = target.Error() + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", expectedText, chain, + ), msgAndArgs...) +} + +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ + "expected: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + +func buildErrorChainString(err error) string { + if err == nil { + return "" + } + + e := errors.Unwrap(err) + chain := fmt.Sprintf("%q", err.Error()) + for e != nil { + chain += fmt.Sprintf("\n\t%q", e.Error()) + e = errors.Unwrap(e) + } + return chain +} diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go new file mode 100644 index 000000000..7324128ef --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/doc.go @@ -0,0 +1,44 @@ +// Package mock provides a system by which it is possible to mock your objects +// and verify calls are happening as expected. +// +// Example Usage +// +// The mock package provides an object, Mock, that tracks activity on another object. It is usually +// embedded into a test object as shown below: +// +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock +// +// // other fields go here as normal +// } +// +// When implementing the methods of an interface, you wire your functions up +// to call the Mock.Called(args...) method, and return the appropriate values. +// +// For example, to mock a method that saves the name and age of a person and returns +// the year of their birth or an error, you might write this: +// +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } +// +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument +// index position. Given this argument list: +// +// (12, true, "Something") +// +// You could read them out strongly typed like this: +// +// args.Int(0) +// args.Bool(1) +// args.String(2) +// +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: +// +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those +// cases you should check for nil first. +package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go new file mode 100644 index 000000000..e2e6a2d23 --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -0,0 +1,1008 @@ +package mock + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() +} + +/* + Call +*/ + +// Call represents a method call and is used for setting expectations, +// as well as recording activity. +type Call struct { + Parent *Mock + + // The name of the method that was or will be called. + Method string + + // Holds the arguments of the method. + Arguments Arguments + + // Holds the arguments that should be returned when + // this method is called. + ReturnArguments Arguments + + // Holds the caller info for the On() call + callerInfo []string + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int + + // Amount of times this call has been called + totalCalls int + + // Call to this method can be optional + optional bool + + // Holds a channel that will be used to block the Return until it either + // receives a message or is closed. nil means it returns immediately. + WaitFor <-chan time.Time + + waitTime time.Duration + + // Holds a handler used to manipulate arguments content that are passed by + // reference. It's useful when mocking methods such as unmarshalers or + // decoders. + RunFn func(Arguments) + + // PanicMsg holds msg to be used to mock panic on the function call + // if the PanicMsg is set to a non nil string the function call will panic + // irrespective of other settings + PanicMsg *string +} + +func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { + return &Call{ + Parent: parent, + Method: methodName, + Arguments: methodArguments, + ReturnArguments: make([]interface{}, 0), + callerInfo: callerInfo, + Repeatability: 0, + WaitFor: nil, + RunFn: nil, + PanicMsg: nil, + } +} + +func (c *Call) lock() { + c.Parent.mutex.Lock() +} + +func (c *Call) unlock() { + c.Parent.mutex.Unlock() +} + +// Return specifies the return arguments for the expectation. +// +// Mock.On("DoSomething").Return(errors.New("failed")) +func (c *Call) Return(returnArguments ...interface{}) *Call { + c.lock() + defer c.unlock() + + c.ReturnArguments = returnArguments + + return c +} + +// Panic specifies if the functon call should fail and the panic message +// +// Mock.On("DoSomething").Panic("test panic") +func (c *Call) Panic(msg string) *Call { + c.lock() + defer c.unlock() + + c.PanicMsg = &msg + + return c +} + +// Once indicates that that the mock should only return the value once. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +func (c *Call) Once() *Call { + return c.Times(1) +} + +// Twice indicates that that the mock should only return the value twice. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +func (c *Call) Twice() *Call { + return c.Times(2) +} + +// Times indicates that that the mock should only return the indicated number +// of times. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +func (c *Call) Times(i int) *Call { + c.lock() + defer c.unlock() + c.Repeatability = i + return c +} + +// WaitUntil sets the channel that will block the mock's return until its closed +// or a message is received. +// +// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) +func (c *Call) WaitUntil(w <-chan time.Time) *Call { + c.lock() + defer c.unlock() + c.WaitFor = w + return c +} + +// After sets how long to block until the call returns +// +// Mock.On("MyMethod", arg1, arg2).After(time.Second) +func (c *Call) After(d time.Duration) *Call { + c.lock() + defer c.unlock() + c.waitTime = d + return c +} + +// Run sets a handler to be called before returning. It can be used when +// mocking a method (such as an unmarshaler) that takes a pointer to a struct and +// sets properties in such struct +// +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}")).Return().Run(func(args Arguments) { +// arg := args.Get(0).(*map[string]interface{}) +// arg["foo"] = "bar" +// }) +func (c *Call) Run(fn func(args Arguments)) *Call { + c.lock() + defer c.unlock() + c.RunFn = fn + return c +} + +// Maybe allows the method call to be optional. Not calling an optional method +// will not cause an error while asserting expectations +func (c *Call) Maybe() *Call { + c.lock() + defer c.unlock() + c.optional = true + return c +} + +// On chains a new expectation description onto the mocked interface. This +// allows syntax like. +// +// Mock. +// On("MyMethod", 1).Return(nil). +// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) +//go:noinline +func (c *Call) On(methodName string, arguments ...interface{}) *Call { + return c.Parent.On(methodName, arguments...) +} + +// Mock is the workhorse used to track activity on another object. +// For an example of its usage, refer to the "Example Usage" section at the top +// of this document. +type Mock struct { + // Represents the calls that are expected of + // an object. + ExpectedCalls []*Call + + // Holds the calls that were made to this mocked object. + Calls []Call + + // test is An optional variable that holds the test struct, to be used when an + // invalid mock call was made. + test TestingT + + // TestData holds any data that might be useful for testing. Testify ignores + // this data completely allowing you to do whatever you like with it. + testData objx.Map + + mutex sync.Mutex +} + +// TestData holds any data that might be useful for testing. Testify ignores +// this data completely allowing you to do whatever you like with it. +func (m *Mock) TestData() objx.Map { + + if m.testData == nil { + m.testData = make(objx.Map) + } + + return m.testData +} + +/* + Setting expectations +*/ + +// Test sets the test struct variable of the mock object +func (m *Mock) Test(t TestingT) { + m.mutex.Lock() + defer m.mutex.Unlock() + m.test = t +} + +// fail fails the current test with the given formatted format and args. +// In case that a test was defined, it uses the test APIs for failing a test, +// otherwise it uses panic. +func (m *Mock) fail(format string, args ...interface{}) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.test == nil { + panic(fmt.Sprintf(format, args...)) + } + m.test.Errorf(format, args...) + m.test.FailNow() +} + +// On starts a description of an expectation of the specified method +// being called. +// +// Mock.On("MyMethod", arg1, arg2) +func (m *Mock) On(methodName string, arguments ...interface{}) *Call { + for _, arg := range arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + m.mutex.Lock() + defer m.mutex.Unlock() + c := newCall(m, methodName, assert.CallerInfo(), arguments...) + m.ExpectedCalls = append(m.ExpectedCalls, c) + return c +} + +// /* +// Recording and responding to activity +// */ + +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { + var expectedCall *Call + + for i, call := range m.ExpectedCalls { + if call.Method == method { + _, diffCount := call.Arguments.Diff(arguments) + if diffCount == 0 { + expectedCall = call + if call.Repeatability > -1 { + return i, call + } + } + } + } + + return -1, expectedCall +} + +type matchCandidate struct { + call *Call + mismatch string + diffCount int +} + +func (c matchCandidate) isBetterMatchThan(other matchCandidate) bool { + if c.call == nil { + return false + } + if other.call == nil { + return true + } + + if c.diffCount > other.diffCount { + return false + } + if c.diffCount < other.diffCount { + return true + } + + if c.call.Repeatability > 0 && other.call.Repeatability <= 0 { + return true + } + return false +} + +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) { + var bestMatch matchCandidate + + for _, call := range m.expectedCalls() { + if call.Method == method { + + errInfo, tempDiffCount := call.Arguments.Diff(arguments) + tempCandidate := matchCandidate{ + call: call, + mismatch: errInfo, + diffCount: tempDiffCount, + } + if tempCandidate.isBetterMatchThan(bestMatch) { + bestMatch = tempCandidate + } + } + } + + return bestMatch.call, bestMatch.mismatch +} + +func callString(method string, arguments Arguments, includeArgumentValues bool) string { + + var argValsString string + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { + argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) + } + + return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) +} + +// Called tells the mock object that a method has been called, and gets an array +// of arguments to return. Panics if the call is unexpected (i.e. not preceded by +// appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) Called(arguments ...interface{}) Arguments { + // get the calling function's name + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Couldn't get the caller information") + } + functionPath := runtime.FuncForPC(pc).Name() + //Next four lines are required to use GCCGO function naming conventions. + //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock + //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree + //With GCCGO we need to remove interface information starting from pN
. + re := regexp.MustCompile("\\.pN\\d+_") + if re.MatchString(functionPath) { + functionPath = re.Split(functionPath, -1)[0] + } + parts := strings.Split(functionPath, ".") + functionName := parts[len(parts)-1] + return m.MethodCalled(functionName, arguments...) +} + +// MethodCalled tells the mock object that the given method has been called, and gets +// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded +// by appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { + m.mutex.Lock() + //TODO: could combine expected and closes in single loop + found, call := m.findExpectedCall(methodName, arguments...) + + if found < 0 { + // expected call found but it has already been called with repeatable times + if call != nil { + m.mutex.Unlock() + m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + } + // we have to fail here - because we don't know what to do + // as the return arguments. This is because: + // + // a) this is a totally unexpected call to this method, + // b) the arguments are not what was expected, or + // c) the developer has forgotten to add an accompanying On...Return pair. + closestCall, mismatch := m.findClosestCall(methodName, arguments...) + m.mutex.Unlock() + + if closestCall != nil { + m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s", + callString(methodName, arguments, true), + callString(methodName, closestCall.Arguments, true), + diffArguments(closestCall.Arguments, arguments), + strings.TrimSpace(mismatch), + ) + } else { + m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + } + } + + if call.Repeatability == 1 { + call.Repeatability = -1 + } else if call.Repeatability > 1 { + call.Repeatability-- + } + call.totalCalls++ + + // add the call + m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...)) + m.mutex.Unlock() + + // block if specified + if call.WaitFor != nil { + <-call.WaitFor + } else { + time.Sleep(call.waitTime) + } + + m.mutex.Lock() + panicMsg := call.PanicMsg + m.mutex.Unlock() + if panicMsg != nil { + panic(*panicMsg) + } + + m.mutex.Lock() + runFn := call.RunFn + m.mutex.Unlock() + + if runFn != nil { + runFn(arguments) + } + + m.mutex.Lock() + returnArgs := call.ReturnArguments + m.mutex.Unlock() + + return returnArgs +} + +/* + Assertions +*/ + +type assertExpectationser interface { + AssertExpectations(TestingT) bool +} + +// AssertExpectationsForObjects asserts that everything specified with On and Return +// of the specified objects was in fact called as expected. +// +// Calls may have occurred in any order. +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + for _, obj := range testObjects { + if m, ok := obj.(Mock); ok { + t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") + obj = &m + } + m := obj.(assertExpectationser) + if !m.AssertExpectations(t) { + t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) + return false + } + } + return true +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact called as expected. Calls may have occurred in any order. +func (m *Mock) AssertExpectations(t TestingT) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + var somethingMissing bool + var failedExpectations int + + // iterate through each expectation + expectedCalls := m.expectedCalls() + for _, expectedCall := range expectedCalls { + if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { + somethingMissing = true + failedExpectations++ + t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) + } else { + if expectedCall.Repeatability > 0 { + somethingMissing = true + failedExpectations++ + t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) + } else { + t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } + } + } + + if somethingMissing { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) + } + + return !somethingMissing +} + +// AssertNumberOfCalls asserts that the method was called expectedCalls times. +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + var actualCalls int + for _, call := range m.calls() { + if call.Method == methodName { + actualCalls++ + } + } + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) +} + +// AssertCalled asserts that the method was called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + if !m.methodWasCalled(methodName, arguments) { + var calledWithArgs []string + for _, call := range m.calls() { + calledWithArgs = append(calledWithArgs, fmt.Sprintf("%v", call.Arguments)) + } + if len(calledWithArgs) == 0 { + return assert.Fail(t, "Should have called with given arguments", + fmt.Sprintf("Expected %q to have been called with:\n%v\nbut no actual calls happened", methodName, arguments)) + } + return assert.Fail(t, "Should have called with given arguments", + fmt.Sprintf("Expected %q to have been called with:\n%v\nbut actual calls were:\n %v", methodName, arguments, strings.Join(calledWithArgs, "\n"))) + } + return true +} + +// AssertNotCalled asserts that the method was not called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + if m.methodWasCalled(methodName, arguments) { + return assert.Fail(t, "Should not have called with given arguments", + fmt.Sprintf("Expected %q to not have been called with:\n%v\nbut actually it was.", methodName, arguments)) + } + return true +} + +// IsMethodCallable checking that the method can be called +// If the method was called more than `Repeatability` return false +func (m *Mock) IsMethodCallable(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + + for _, v := range m.ExpectedCalls { + if v.Method != methodName { + continue + } + if len(arguments) != len(v.Arguments) { + continue + } + if v.Repeatability < v.totalCalls { + continue + } + if isArgsEqual(v.Arguments, arguments) { + return true + } + } + return false +} + +// isArgsEqual compares arguments +func isArgsEqual(expected Arguments, args []interface{}) bool { + if len(expected) != len(args) { + return false + } + for i, v := range args { + if !reflect.DeepEqual(expected[i], v) { + return false + } + } + return true +} + +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { + for _, call := range m.calls() { + if call.Method == methodName { + + _, differences := Arguments(expected).Diff(call.Arguments) + + if differences == 0 { + // found the expected call + return true + } + + } + } + // we didn't find the expected call + return false +} + +func (m *Mock) expectedCalls() []*Call { + return append([]*Call{}, m.ExpectedCalls...) +} + +func (m *Mock) calls() []Call { + return append([]Call{}, m.Calls...) +} + +/* + Arguments +*/ + +// Arguments holds an array of method arguments or return values. +type Arguments []interface{} + +const ( + // Anything is used in Diff and Assert when the argument being tested + // shouldn't be taken into consideration. + Anything = "mock.Anything" +) + +// AnythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type AnythingOfTypeArgument string + +// AnythingOfType returns an AnythingOfTypeArgument object containing the +// name of the type to check for. Used in Diff and Assert. +// +// For example: +// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +func AnythingOfType(t string) AnythingOfTypeArgument { + return AnythingOfTypeArgument(t) +} + +// IsTypeArgument is a struct that contains the type of an argument +// for use when type checking. This is an alternative to AnythingOfType. +// Used in Diff and Assert. +type IsTypeArgument struct { + t interface{} +} + +// IsType returns an IsTypeArgument object containing the type to check for. +// You can provide a zero-value of the type to check. This is an +// alternative to AnythingOfType. Used in Diff and Assert. +// +// For example: +// Assert(t, IsType(""), IsType(0)) +func IsType(t interface{}) *IsTypeArgument { + return &IsTypeArgument{t: t} +} + +// argumentMatcher performs custom argument matching, returning whether or +// not the argument is matched by the expectation fixture function. +type argumentMatcher struct { + // fn is a function which accepts one argument, and returns a bool. + fn reflect.Value +} + +func (f argumentMatcher) Matches(argument interface{}) bool { + expectType := f.fn.Type().In(0) + expectTypeNilSupported := false + switch expectType.Kind() { + case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr: + expectTypeNilSupported = true + } + + argType := reflect.TypeOf(argument) + var arg reflect.Value + if argType == nil { + arg = reflect.New(expectType).Elem() + } else { + arg = reflect.ValueOf(argument) + } + + if argType == nil && !expectTypeNilSupported { + panic(errors.New("attempting to call matcher with nil for non-nil expected type")) + } + if argType == nil || argType.AssignableTo(expectType) { + result := f.fn.Call([]reflect.Value{arg}) + return result[0].Bool() + } + return false +} + +func (f argumentMatcher) String() string { + return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) +} + +// MatchedBy can be used to match a mock call based on only certain properties +// from a complex struct or some calculation. It takes a function that will be +// evaluated with the called argument and will return true when there's a match +// and false otherwise. +// +// Example: +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// |fn|, must be a function accepting a single argument (of the expected type) +// which returns a bool. If |fn| doesn't match the required signature, +// MatchedBy() panics. +func MatchedBy(fn interface{}) argumentMatcher { + fnType := reflect.TypeOf(fn) + + if fnType.Kind() != reflect.Func { + panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) + } + if fnType.NumIn() != 1 { + panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) + } + if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { + panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) + } + + return argumentMatcher{fn: reflect.ValueOf(fn)} +} + +// Get Returns the argument at the specified index. +func (args Arguments) Get(index int) interface{} { + if index+1 > len(args) { + panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) + } + return args[index] +} + +// Is gets whether the objects match the arguments specified. +func (args Arguments) Is(objects ...interface{}) bool { + for i, obj := range args { + if obj != objects[i] { + return false + } + } + return true +} + +// Diff gets a string describing the differences between the arguments +// and the specified objects. +// +// Returns the diff string and number of differences found. +func (args Arguments) Diff(objects []interface{}) (string, int) { + //TODO: could return string as error and nil for No difference + + var output = "\n" + var differences int + + var maxArgCount = len(args) + if len(objects) > maxArgCount { + maxArgCount = len(objects) + } + + for i := 0; i < maxArgCount; i++ { + var actual, expected interface{} + var actualFmt, expectedFmt string + + if len(objects) <= i { + actual = "(Missing)" + actualFmt = "(Missing)" + } else { + actual = objects[i] + actualFmt = fmt.Sprintf("(%[1]T=%[1]v)", actual) + } + + if len(args) <= i { + expected = "(Missing)" + expectedFmt = "(Missing)" + } else { + expected = args[i] + expectedFmt = fmt.Sprintf("(%[1]T=%[1]v)", expected) + } + + if matcher, ok := expected.(argumentMatcher); ok { + if matcher.Matches(actual) { + output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher) + } else { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) + } + } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { + + // type checking + if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) + } + + } else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) { + t := expected.(*IsTypeArgument).t + if reflect.TypeOf(t) != reflect.TypeOf(actual) { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) + } + } else { + + // normal checking + + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) + } + } + + } + + if differences == 0 { + return "No differences.", differences + } + + return output, differences + +} + +// Assert compares the arguments with the specified objects and fails if +// they do not exactly match. +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + // get the differences + diff, diffCount := args.Diff(objects) + + if diffCount == 0 { + return true + } + + // there are differences... report them... + t.Logf(diff) + t.Errorf("%sArguments do not match.", assert.CallerInfo()) + + return false + +} + +// String gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +// +// If no index is provided, String() returns a complete string representation +// of the arguments. +func (args Arguments) String(indexOrNil ...int) string { + + if len(indexOrNil) == 0 { + // normal String() method - return a string representation of the args + var argsStr []string + for _, arg := range args { + argsStr = append(argsStr, fmt.Sprintf("%T", arg)) // handles nil nicely + } + return strings.Join(argsStr, ",") + } else if len(indexOrNil) == 1 { + // Index has been specified - get the argument at that index + var index = indexOrNil[0] + var s string + var ok bool + if s, ok = args.Get(index).(string); !ok { + panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s + } + + panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) + +} + +// Int gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Int(index int) int { + var s int + var ok bool + if s, ok = args.Get(index).(int); !ok { + panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Error gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Error(index int) error { + obj := args.Get(index) + var s error + var ok bool + if obj == nil { + return nil + } + if s, ok = obj.(error); !ok { + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Bool gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Bool(index int) bool { + var s bool + var ok bool + if s, ok = args.Get(index).(bool); !ok { + panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +func diffArguments(expected Arguments, actual Arguments) string { + if len(expected) != len(actual) { + return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual)) + } + + for x := range expected { + if diffString := diff(expected[x], actual[x]); diffString != "" { + return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) + } + } + + return "" +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spewConfig.Sdump(expected) + a := spewConfig.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return diff +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} + +type tHelper interface { + Helper() +} diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index ec4624b28..51820df2e 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -256,6 +256,54 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) { t.FailNow() } +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorAs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorAsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorIs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ErrorIsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() @@ -806,6 +854,126 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl t.FailNow() } +// IsDecreasing asserts that the collection is decreasing +// +// assert.IsDecreasing(t, []int{2, 1, 0}) +// assert.IsDecreasing(t, []float{2, 1}) +// assert.IsDecreasing(t, []string{"b", "a"}) +func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsDecreasing(t, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsDecreasingf asserts that the collection is decreasing +// +// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsDecreasingf(t, object, msg, args...) { + return + } + t.FailNow() +} + +// IsIncreasing asserts that the collection is increasing +// +// assert.IsIncreasing(t, []int{1, 2, 3}) +// assert.IsIncreasing(t, []float{1, 2}) +// assert.IsIncreasing(t, []string{"a", "b"}) +func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsIncreasing(t, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsIncreasingf asserts that the collection is increasing +// +// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsIncreasingf(t, object, msg, args...) { + return + } + t.FailNow() +} + +// IsNonDecreasing asserts that the collection is not decreasing +// +// assert.IsNonDecreasing(t, []int{1, 1, 2}) +// assert.IsNonDecreasing(t, []float{1, 2}) +// assert.IsNonDecreasing(t, []string{"a", "b"}) +func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNonDecreasing(t, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsNonDecreasingf asserts that the collection is not decreasing +// +// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNonDecreasingf(t, object, msg, args...) { + return + } + t.FailNow() +} + +// IsNonIncreasing asserts that the collection is not increasing +// +// assert.IsNonIncreasing(t, []int{2, 1, 1}) +// assert.IsNonIncreasing(t, []float{2, 1}) +// assert.IsNonIncreasing(t, []string{"b", "a"}) +func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNonIncreasing(t, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNonIncreasingf(t, object, msg, args...) { + return + } + t.FailNow() +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -944,6 +1112,34 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter t.FailNow() } +// Negative asserts that the specified element is negative +// +// assert.Negative(t, -1) +// assert.Negative(t, -1.23) +func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Negative(t, e, msgAndArgs...) { + return + } + t.FailNow() +} + +// Negativef asserts that the specified element is negative +// +// assert.Negativef(t, -1, "error message %s", "formatted") +// assert.Negativef(t, -1.23, "error message %s", "formatted") +func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Negativef(t, e, msg, args...) { + return + } + t.FailNow() +} + // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // @@ -1200,6 +1396,30 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorIs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorIsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + // NotNil asserts that the specified object is not nil. // // assert.NotNil(t, err) @@ -1446,6 +1666,34 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} t.FailNow() } +// Positive asserts that the specified element is positive +// +// assert.Positive(t, 1) +// assert.Positive(t, 1.23) +func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Positive(t, e, msgAndArgs...) { + return + } + t.FailNow() +} + +// Positivef asserts that the specified element is positive +// +// assert.Positivef(t, 1, "error message %s", "formatted") +// assert.Positivef(t, 1.23, "error message %s", "formatted") +func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Positivef(t, e, msg, args...) { + return + } + t.FailNow() +} + // Regexp asserts that a specified regexp matches a string. // // assert.Regexp(t, regexp.MustCompile("start"), "it's starting") diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 103d7dcb6..ed54a9d83 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -205,6 +205,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { Error(a.t, err, msgAndArgs...) } +// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorAs(a.t, err, target, msgAndArgs...) +} + +// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. +// This is a wrapper for errors.As. +func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorAsf(a.t, err, target, msg, args...) +} + +// ErrorIs asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorIs(a.t, err, target, msgAndArgs...) +} + +// ErrorIsf asserts that at least one of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ErrorIsf(a.t, err, target, msg, args...) +} + // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() @@ -632,6 +668,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo InEpsilonf(a.t, expected, actual, epsilon, msg, args...) } +// IsDecreasing asserts that the collection is decreasing +// +// a.IsDecreasing([]int{2, 1, 0}) +// a.IsDecreasing([]float{2, 1}) +// a.IsDecreasing([]string{"b", "a"}) +func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsDecreasing(a.t, object, msgAndArgs...) +} + +// IsDecreasingf asserts that the collection is decreasing +// +// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsDecreasingf(a.t, object, msg, args...) +} + +// IsIncreasing asserts that the collection is increasing +// +// a.IsIncreasing([]int{1, 2, 3}) +// a.IsIncreasing([]float{1, 2}) +// a.IsIncreasing([]string{"a", "b"}) +func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsIncreasing(a.t, object, msgAndArgs...) +} + +// IsIncreasingf asserts that the collection is increasing +// +// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsIncreasingf(a.t, object, msg, args...) +} + +// IsNonDecreasing asserts that the collection is not decreasing +// +// a.IsNonDecreasing([]int{1, 1, 2}) +// a.IsNonDecreasing([]float{1, 2}) +// a.IsNonDecreasing([]string{"a", "b"}) +func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNonDecreasing(a.t, object, msgAndArgs...) +} + +// IsNonDecreasingf asserts that the collection is not decreasing +// +// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") +func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNonDecreasingf(a.t, object, msg, args...) +} + +// IsNonIncreasing asserts that the collection is not increasing +// +// a.IsNonIncreasing([]int{2, 1, 1}) +// a.IsNonIncreasing([]float{2, 1}) +// a.IsNonIncreasing([]string{"b", "a"}) +func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNonIncreasing(a.t, object, msgAndArgs...) +} + +// IsNonIncreasingf asserts that the collection is not increasing +// +// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") +func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNonIncreasingf(a.t, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -740,6 +872,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i Lessf(a.t, e1, e2, msg, args...) } +// Negative asserts that the specified element is negative +// +// a.Negative(-1) +// a.Negative(-1.23) +func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Negative(a.t, e, msgAndArgs...) +} + +// Negativef asserts that the specified element is negative +// +// a.Negativef(-1, "error message %s", "formatted") +// a.Negativef(-1.23, "error message %s", "formatted") +func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Negativef(a.t, e, msg, args...) +} + // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // @@ -942,6 +1096,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str NotEqualf(a.t, expected, actual, msg, args...) } +// NotErrorIs asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorIs(a.t, err, target, msgAndArgs...) +} + +// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// This is a wrapper for errors.Is. +func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorIsf(a.t, err, target, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1134,6 +1306,28 @@ func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interfa Panicsf(a.t, f, msg, args...) } +// Positive asserts that the specified element is positive +// +// a.Positive(1) +// a.Positive(1.23) +func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Positive(a.t, e, msgAndArgs...) +} + +// Positivef asserts that the specified element is positive +// +// a.Positivef(1, "error message %s", "formatted") +// a.Positivef(1.23, "error message %s", "formatted") +func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Positivef(a.t, e, msg, args...) +} + // Regexp asserts that a specified regexp matches a string. // // a.Regexp(regexp.MustCompile("start"), "it's starting") diff --git a/vendor/github.com/subosito/gotenv/.env b/vendor/github.com/subosito/gotenv/.env new file mode 100644 index 000000000..6405eca71 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/.env @@ -0,0 +1 @@ +HELLO=world diff --git a/vendor/github.com/subosito/gotenv/.env.invalid b/vendor/github.com/subosito/gotenv/.env.invalid new file mode 100644 index 000000000..016d5e0ce --- /dev/null +++ b/vendor/github.com/subosito/gotenv/.env.invalid @@ -0,0 +1 @@ +lol$wut diff --git a/vendor/github.com/subosito/gotenv/.gitignore b/vendor/github.com/subosito/gotenv/.gitignore new file mode 100644 index 000000000..2b8d45610 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/.gitignore @@ -0,0 +1,3 @@ +*.test +*.out +annotate.json diff --git a/vendor/github.com/subosito/gotenv/.travis.yml b/vendor/github.com/subosito/gotenv/.travis.yml new file mode 100644 index 000000000..3370d5f40 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.x +os: + - linux + - osx +script: + - go test -test.v -coverprofile=coverage.out -covermode=count +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/subosito/gotenv/CHANGELOG.md b/vendor/github.com/subosito/gotenv/CHANGELOG.md new file mode 100644 index 000000000..67f687382 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/CHANGELOG.md @@ -0,0 +1,47 @@ +# Changelog + +## [1.2.0] - 2019-08-03 + +### Added + +- Add `Must` helper to raise an error as panic. It can be used with `Load` and `OverLoad`. +- Add more tests to be 100% coverage. +- Add CHANGELOG +- Add more OS for the test: OSX and Windows + +### Changed + +- Reduce complexity and improve source code for having `A+` score in [goreportcard](https://goreportcard.com/report/github.com/subosito/gotenv). +- Updated README with mentions to all available functions + +### Removed + +- Remove `ErrFormat` +- Remove `MustLoad` and `MustOverload`, replaced with `Must` helper. + +## [1.1.1] - 2018-06-05 + +### Changed + +- Replace `os.Getenv` with `os.LookupEnv` to ensure that the environment variable is not set, by [radding](https://github.com/radding) + +## [1.1.0] - 2017-03-20 + +### Added + +- Supports carriage return in env +- Handle files with UTF-8 BOM + +### Changed + +- Whitespace handling + +### Fixed + +- Incorrect variable expansion +- Handling escaped '$' characters + +## [1.0.0] - 2014-10-05 + +First stable release. + diff --git a/vendor/github.com/subosito/gotenv/LICENSE b/vendor/github.com/subosito/gotenv/LICENSE new file mode 100644 index 000000000..f64ccaedc --- /dev/null +++ b/vendor/github.com/subosito/gotenv/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Alif Rachmawadi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/subosito/gotenv/README.md b/vendor/github.com/subosito/gotenv/README.md new file mode 100644 index 000000000..d610cdf0b --- /dev/null +++ b/vendor/github.com/subosito/gotenv/README.md @@ -0,0 +1,131 @@ +# gotenv + +[![Build Status](https://travis-ci.org/subosito/gotenv.svg?branch=master)](https://travis-ci.org/subosito/gotenv) +[![Build status](https://ci.appveyor.com/api/projects/status/wb2e075xkfl0m0v2/branch/master?svg=true)](https://ci.appveyor.com/project/subosito/gotenv/branch/master) +[![Coverage Status](https://badgen.net/codecov/c/github/subosito/gotenv)](https://codecov.io/gh/subosito/gotenv) +[![Go Report Card](https://goreportcard.com/badge/github.com/subosito/gotenv)](https://goreportcard.com/report/github.com/subosito/gotenv) +[![GoDoc](https://godoc.org/github.com/subosito/gotenv?status.svg)](https://godoc.org/github.com/subosito/gotenv) + +Load environment variables dynamically in Go. + +## Usage + +Put the gotenv package on your `import` statement: + +```go +import "github.com/subosito/gotenv" +``` + +To modify your app environment variables, `gotenv` expose 2 main functions: + +- `gotenv.Load` +- `gotenv.Apply` + +By default, `gotenv.Load` will look for a file called `.env` in the current working directory. + +Behind the scene, it will then load `.env` file and export the valid variables to the environment variables. Make sure you call the method as soon as possible to ensure it loads all variables, say, put it on `init()` function. + +Once loaded you can use `os.Getenv()` to get the value of the variable. + +Let's say you have `.env` file: + +``` +APP_ID=1234567 +APP_SECRET=abcdef +``` + +Here's the example of your app: + +```go +package main + +import ( + "github.com/subosito/gotenv" + "log" + "os" +) + +func init() { + gotenv.Load() +} + +func main() { + log.Println(os.Getenv("APP_ID")) // "1234567" + log.Println(os.Getenv("APP_SECRET")) // "abcdef" +} +``` + +You can also load other than `.env` file if you wish. Just supply filenames when calling `Load()`. It will load them in order and the first value set for a variable will win.: + +```go +gotenv.Load(".env.production", "credentials") +``` + +While `gotenv.Load` loads entries from `.env` file, `gotenv.Apply` allows you to use any `io.Reader`: + +```go +gotenv.Apply(strings.NewReader("APP_ID=1234567")) + +log.Println(os.Getenv("APP_ID")) +// Output: "1234567" +``` + +Both `gotenv.Load` and `gotenv.Apply` **DO NOT** overrides existing environment variables. If you want to override existing ones, you can see section below. + +### Environment Overrides + +Besides above functions, `gotenv` also provides another functions that overrides existing: + +- `gotenv.OverLoad` +- `gotenv.OverApply` + + +Here's the example of this overrides behavior: + +```go +os.Setenv("HELLO", "world") + +// NOTE: using Apply existing value will be reserved +gotenv.Apply(strings.NewReader("HELLO=universe")) +fmt.Println(os.Getenv("HELLO")) +// Output: "world" + +// NOTE: using OverApply existing value will be overridden +gotenv.OverApply(strings.NewReader("HELLO=universe")) +fmt.Println(os.Getenv("HELLO")) +// Output: "universe" +``` + +### Throw a Panic + +Both `gotenv.Load` and `gotenv.OverLoad` returns an error on something wrong occurred, like your env file is not exist, and so on. To make it easier to use, `gotenv` also provides `gotenv.Must` helper, to let it panic when an error returned. + +```go +err := gotenv.Load(".env-is-not-exist") +fmt.Println("error", err) +// error: open .env-is-not-exist: no such file or directory + +gotenv.Must(gotenv.Load, ".env-is-not-exist") +// it will throw a panic +// panic: open .env-is-not-exist: no such file or directory +``` + +### Another Scenario + +Just in case you want to parse environment variables from any `io.Reader`, gotenv keeps its `Parse` and `StrictParse` function as public API so you can use that. + +```go +// import "strings" + +pairs := gotenv.Parse(strings.NewReader("FOO=test\nBAR=$FOO")) +// gotenv.Env{"FOO": "test", "BAR": "test"} + +err, pairs = gotenv.StrictParse(strings.NewReader(`FOO="bar"`)) +// gotenv.Env{"FOO": "bar"} +``` + +`Parse` ignores invalid lines and returns `Env` of valid environment variables, while `StrictParse` returns an error for invalid lines. + +## Notes + +The gotenv package is a Go port of [`dotenv`](https://github.com/bkeepers/dotenv) project with some additions made for Go. For general features, it aims to be compatible as close as possible. diff --git a/vendor/github.com/subosito/gotenv/appveyor.yml b/vendor/github.com/subosito/gotenv/appveyor.yml new file mode 100644 index 000000000..33b4c4046 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/appveyor.yml @@ -0,0 +1,9 @@ +build: off +clone_folder: c:\gopath\src\github.com\subosito\gotenv +environment: + GOPATH: c:\gopath +stack: go 1.10 +before_test: + - go get -t +test_script: + - go test -v -cover -race diff --git a/vendor/github.com/subosito/gotenv/gotenv.go b/vendor/github.com/subosito/gotenv/gotenv.go new file mode 100644 index 000000000..745a34489 --- /dev/null +++ b/vendor/github.com/subosito/gotenv/gotenv.go @@ -0,0 +1,265 @@ +// Package gotenv provides functionality to dynamically load the environment variables +package gotenv + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" +) + +const ( + // Pattern for detecting valid line format + linePattern = `\A\s*(?:export\s+)?([\w\.]+)(?:\s*=\s*|:\s+?)('(?:\'|[^'])*'|"(?:\"|[^"])*"|[^#\n]+)?\s*(?:\s*\#.*)?\z` + + // Pattern for detecting valid variable within a value + variablePattern = `(\\)?(\$)(\{?([A-Z0-9_]+)?\}?)` +) + +// Env holds key/value pair of valid environment variable +type Env map[string]string + +/* +Load is a function to load a file or multiple files and then export the valid variables into environment variables if they do not exist. +When it's called with no argument, it will load `.env` file on the current path and set the environment variables. +Otherwise, it will loop over the filenames parameter and set the proper environment variables. +*/ +func Load(filenames ...string) error { + return loadenv(false, filenames...) +} + +/* +OverLoad is a function to load a file or multiple files and then export and override the valid variables into environment variables. +*/ +func OverLoad(filenames ...string) error { + return loadenv(true, filenames...) +} + +/* +Must is wrapper function that will panic when supplied function returns an error. +*/ +func Must(fn func(filenames ...string) error, filenames ...string) { + if err := fn(filenames...); err != nil { + panic(err.Error()) + } +} + +/* +Apply is a function to load an io Reader then export the valid variables into environment variables if they do not exist. +*/ +func Apply(r io.Reader) error { + return parset(r, false) +} + +/* +OverApply is a function to load an io Reader then export and override the valid variables into environment variables. +*/ +func OverApply(r io.Reader) error { + return parset(r, true) +} + +func loadenv(override bool, filenames ...string) error { + if len(filenames) == 0 { + filenames = []string{".env"} + } + + for _, filename := range filenames { + f, err := os.Open(filename) + if err != nil { + return err + } + + err = parset(f, override) + if err != nil { + return err + } + + f.Close() + } + + return nil +} + +// parse and set :) +func parset(r io.Reader, override bool) error { + env, err := StrictParse(r) + if err != nil { + return err + } + + for key, val := range env { + setenv(key, val, override) + } + + return nil +} + +func setenv(key, val string, override bool) { + if override { + os.Setenv(key, val) + } else { + if _, present := os.LookupEnv(key); !present { + os.Setenv(key, val) + } + } +} + +// Parse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables. +// It expands the value of a variable from the environment variable but does not set the value to the environment itself. +// This function is skipping any invalid lines and only processing the valid one. +func Parse(r io.Reader) Env { + env, _ := StrictParse(r) + return env +} + +// StrictParse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables. +// It expands the value of a variable from the environment variable but does not set the value to the environment itself. +// This function is returning an error if there are any invalid lines. +func StrictParse(r io.Reader) (Env, error) { + env := make(Env) + scanner := bufio.NewScanner(r) + + i := 1 + bom := string([]byte{239, 187, 191}) + + for scanner.Scan() { + line := scanner.Text() + + if i == 1 { + line = strings.TrimPrefix(line, bom) + } + + i++ + + err := parseLine(line, env) + if err != nil { + return env, err + } + } + + return env, nil +} + +func parseLine(s string, env Env) error { + rl := regexp.MustCompile(linePattern) + rm := rl.FindStringSubmatch(s) + + if len(rm) == 0 { + return checkFormat(s, env) + } + + key := rm[1] + val := rm[2] + + // determine if string has quote prefix + hdq := strings.HasPrefix(val, `"`) + + // determine if string has single quote prefix + hsq := strings.HasPrefix(val, `'`) + + // trim whitespace + val = strings.Trim(val, " ") + + // remove quotes '' or "" + rq := regexp.MustCompile(`\A(['"])(.*)(['"])\z`) + val = rq.ReplaceAllString(val, "$2") + + if hdq { + val = strings.Replace(val, `\n`, "\n", -1) + val = strings.Replace(val, `\r`, "\r", -1) + + // Unescape all characters except $ so variables can be escaped properly + re := regexp.MustCompile(`\\([^$])`) + val = re.ReplaceAllString(val, "$1") + } + + rv := regexp.MustCompile(variablePattern) + fv := func(s string) string { + return varReplacement(s, hsq, env) + } + + val = rv.ReplaceAllStringFunc(val, fv) + val = parseVal(val, env) + + env[key] = val + return nil +} + +func parseExport(st string, env Env) error { + if strings.HasPrefix(st, "export") { + vs := strings.SplitN(st, " ", 2) + + if len(vs) > 1 { + if _, ok := env[vs[1]]; !ok { + return fmt.Errorf("line `%s` has an unset variable", st) + } + } + } + + return nil +} + +func varReplacement(s string, hsq bool, env Env) string { + if strings.HasPrefix(s, "\\") { + return strings.TrimPrefix(s, "\\") + } + + if hsq { + return s + } + + sn := `(\$)(\{?([A-Z0-9_]+)\}?)` + rn := regexp.MustCompile(sn) + mn := rn.FindStringSubmatch(s) + + if len(mn) == 0 { + return s + } + + v := mn[3] + + replace, ok := env[v] + if !ok { + replace = os.Getenv(v) + } + + return replace +} + +func checkFormat(s string, env Env) error { + st := strings.TrimSpace(s) + + if (st == "") || strings.HasPrefix(st, "#") { + return nil + } + + if err := parseExport(st, env); err != nil { + return err + } + + return fmt.Errorf("line `%s` doesn't match format", s) +} + +func parseVal(val string, env Env) string { + if strings.Contains(val, "=") { + if !(val == "\n" || val == "\r") { + kv := strings.Split(val, "\n") + + if len(kv) == 1 { + kv = strings.Split(val, "\r") + } + + if len(kv) > 1 { + val = kv[0] + + for i := 1; i < len(kv); i++ { + parseLine(kv[i], env) + } + } + } + } + + return val +} diff --git a/vendor/github.com/tdakkota/asciicheck/.gitignore b/vendor/github.com/tdakkota/asciicheck/.gitignore new file mode 100644 index 000000000..cf875a711 --- /dev/null +++ b/vendor/github.com/tdakkota/asciicheck/.gitignore @@ -0,0 +1,33 @@ +# IntelliJ project files +.idea +*.iml +out +gen + +# Go template +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ +.idea/$CACHE_FILE$ +.idea/$PRODUCT_WORKSPACE_FILE$ +.idea/.gitignore +.idea/codeStyles +.idea/deployment.xml +.idea/inspectionProfiles/ +.idea/kotlinc.xml +.idea/misc.xml +.idea/modules.xml +asciicheck.iml +go.sum diff --git a/vendor/github.com/tdakkota/asciicheck/LICENSE b/vendor/github.com/tdakkota/asciicheck/LICENSE new file mode 100644 index 000000000..48a60cf1c --- /dev/null +++ b/vendor/github.com/tdakkota/asciicheck/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 tdakkota + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/tdakkota/asciicheck/README.md b/vendor/github.com/tdakkota/asciicheck/README.md new file mode 100644 index 000000000..a7ff5884f --- /dev/null +++ b/vendor/github.com/tdakkota/asciicheck/README.md @@ -0,0 +1,72 @@ +# asciicheck [![Go Report Card](https://goreportcard.com/badge/github.com/tdakkota/asciicheck)](https://goreportcard.com/report/github.com/tdakkota/asciicheck) [![codecov](https://codecov.io/gh/tdakkota/asciicheck/branch/master/graph/badge.svg)](https://codecov.io/gh/tdakkota/asciicheck) ![Go](https://github.com/tdakkota/asciicheck/workflows/Go/badge.svg) +Simple linter to check that your code does not contain non-ASCII identifiers + +# Install + +``` +go get -u github.com/tdakkota/asciicheck/cmd/asciicheck +``` + +# Reason to use +So, do you see this code? Looks correct, isn't it? + +```go +package main + +import "fmt" + +type TеstStruct struct{} + +func main() { + s := TestStruct{} + fmt.Println(s) +} +``` +But if you try to run it, you will get an error: +``` +./prog.go:8:7: undefined: TestStruct +``` +What? `TestStruct` is defined above, but compiler thinks diffrent. Why? + +**Answer**: +Because `TestStruct` is not `TеstStruct`. +``` +type TеstStruct struct{} + ^ this 'e' (U+0435) is not 'e' (U+0065) +``` + +# Usage +asciicheck uses [`singlechecker`](https://pkg.go.dev/golang.org/x/tools/go/analysis/singlechecker) package to run: + +``` +asciicheck: checks that all code identifiers does not have non-ASCII symbols in the name + +Usage: asciicheck [-flag] [package] + + +Flags: + -V print version and exit + -all + no effect (deprecated) + -c int + display offending line with this many lines of context (default -1) + -cpuprofile string + write CPU profile to this file + -debug string + debug flags, any subset of "fpstv" + -fix + apply all suggested fixes + -flags + print analyzer flags in JSON + -json + emit JSON output + -memprofile string + write memory profile to this file + -source + no effect (deprecated) + -tags string + no effect (deprecated) + -trace string + write trace log to this file + -v no effect (deprecated) +``` diff --git a/vendor/github.com/tdakkota/asciicheck/ascii.go b/vendor/github.com/tdakkota/asciicheck/ascii.go new file mode 100644 index 000000000..9e70c391d --- /dev/null +++ b/vendor/github.com/tdakkota/asciicheck/ascii.go @@ -0,0 +1,18 @@ +package asciicheck + +import "unicode" + +func isASCII(s string) (rune, bool) { + if len(s) == 1 { + return []rune(s)[0], s[0] <= unicode.MaxASCII + } + + r := []rune(s) + for i := 0; i < len(s); i++ { + if r[i] > unicode.MaxASCII { + return r[i], false + } + } + + return 0, true +} diff --git a/vendor/github.com/tdakkota/asciicheck/asciicheck.go b/vendor/github.com/tdakkota/asciicheck/asciicheck.go new file mode 100644 index 000000000..690728022 --- /dev/null +++ b/vendor/github.com/tdakkota/asciicheck/asciicheck.go @@ -0,0 +1,49 @@ +package asciicheck + +import ( + "fmt" + "go/ast" + "golang.org/x/tools/go/analysis" +) + +func NewAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "asciicheck", + Doc: "checks that all code identifiers does not have non-ASCII symbols in the name", + Run: run, + } +} + +func run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + alreadyViewed := map[*ast.Object]struct{}{} + ast.Inspect( + file, func(node ast.Node) bool { + cb(pass, node, alreadyViewed) + return true + }, + ) + } + + return nil, nil +} + +func cb(pass *analysis.Pass, n ast.Node, m map[*ast.Object]struct{}) { + if v, ok := n.(*ast.Ident); ok { + if _, ok := m[v.Obj]; ok { + return + } else { + m[v.Obj] = struct{}{} + } + + ch, ascii := isASCII(v.Name) + if !ascii { + pass.Report( + analysis.Diagnostic{ + Pos: v.Pos(), + Message: fmt.Sprintf("identifier \"%s\" contain non-ASCII character: %#U", v.Name, ch), + }, + ) + } + } +} diff --git a/vendor/github.com/tdakkota/asciicheck/go.mod b/vendor/github.com/tdakkota/asciicheck/go.mod new file mode 100644 index 000000000..43aa5d806 --- /dev/null +++ b/vendor/github.com/tdakkota/asciicheck/go.mod @@ -0,0 +1,5 @@ +module github.com/tdakkota/asciicheck + +go 1.13 + +require golang.org/x/tools v0.0.0-20200414032229-332987a829c3 diff --git a/vendor/github.com/tetafro/godot/.gitignore b/vendor/github.com/tetafro/godot/.gitignore new file mode 100644 index 000000000..db77fd15d --- /dev/null +++ b/vendor/github.com/tetafro/godot/.gitignore @@ -0,0 +1,4 @@ +/dist/ +/vendor/ +/godot +/profile.out diff --git a/vendor/github.com/tetafro/godot/.godot.yaml b/vendor/github.com/tetafro/godot/.godot.yaml new file mode 100644 index 000000000..af36858f9 --- /dev/null +++ b/vendor/github.com/tetafro/godot/.godot.yaml @@ -0,0 +1,16 @@ +# Which comments to check: +# declarations - for top level declaration comments (default); +# toplevel - for top level comments; +# all - for all comments. +scope: declarations + +# List of regexps for excluding particular comment lines from check. +# Example: exclude comments which contain numbers. +exclude: + # - '[0-9]+' + +# Check periods at the end of sentences. +period: true + +# Check that first letter of each sentence is capital. +capital: false diff --git a/vendor/github.com/tetafro/godot/.golangci.yml b/vendor/github.com/tetafro/godot/.golangci.yml new file mode 100644 index 000000000..2b799b265 --- /dev/null +++ b/vendor/github.com/tetafro/godot/.golangci.yml @@ -0,0 +1,67 @@ +run: + concurrency: 2 + deadline: 5m + +skip-dirs: + - path: ./testdata/ + +linters: + disable-all: true + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + - bodyclose + - depguard + - dogsled + - dupl + - funlen + - gochecknoinits + - goconst + - gocritic + - gocyclo + - godot + - gofmt + - gofumpt + - goimports + - golint + - gomnd + - gomodguard + - goprintffuncname + - gosec + - lll + - maligned + - misspell + - nakedret + - nestif + - prealloc + - rowserrcheck + - scopelint + - stylecheck + - unconvert + - unparam + - whitespace + +linters-settings: + godot: + check-all: true + +issues: + exclude-use-default: false + exclude-rules: + - path: _test\.go + linters: + - dupl + - errcheck + - funlen + - gosec + - path: cmd/godot/main\.go + linters: + - gomnd diff --git a/vendor/github.com/tetafro/godot/.goreleaser.yml b/vendor/github.com/tetafro/godot/.goreleaser.yml new file mode 100644 index 000000000..c0fc2b6b1 --- /dev/null +++ b/vendor/github.com/tetafro/godot/.goreleaser.yml @@ -0,0 +1,11 @@ +builds: + - dir: ./cmd/godot +checksum: + name_template: checksums.txt +snapshot: + name_template: "{{ .Tag }}" +changelog: + sort: asc + filters: + exclude: + - '^Merge pull request' diff --git a/vendor/github.com/tetafro/godot/LICENSE b/vendor/github.com/tetafro/godot/LICENSE new file mode 100644 index 000000000..120c6d502 --- /dev/null +++ b/vendor/github.com/tetafro/godot/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Denis Krivak + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/tetafro/godot/Makefile b/vendor/github.com/tetafro/godot/Makefile new file mode 100644 index 000000000..f167051e8 --- /dev/null +++ b/vendor/github.com/tetafro/godot/Makefile @@ -0,0 +1,25 @@ +.PHONY: dep +dep: + go mod tidy && go mod verify + +.PHONY: test +test: + go test ./... + +.PHONY: cover +cover: + go test -coverprofile cover.out ./... + go tool cover -html=cover.out + rm -f cover.out + +.PHONY: lint +lint: + golangci-lint run + +.PHONY: build +build: + go build -o godot ./cmd/godot + +.PHONY: release +release: + goreleaser release --rm-dist diff --git a/vendor/github.com/tetafro/godot/README.md b/vendor/github.com/tetafro/godot/README.md new file mode 100644 index 000000000..ff3516e6c --- /dev/null +++ b/vendor/github.com/tetafro/godot/README.md @@ -0,0 +1,84 @@ +# godot + +[![License](http://img.shields.io/badge/license-MIT-green.svg?style=flat)](https://raw.githubusercontent.com/tetafro/godot/master/LICENSE) +[![Github CI](https://img.shields.io/github/workflow/status/tetafro/godot/Test)](https://github.com/tetafro/godot/actions?query=workflow%3ATest) +[![Go Report](https://goreportcard.com/badge/github.com/tetafro/godot)](https://goreportcard.com/report/github.com/tetafro/godot) +[![Codecov](https://codecov.io/gh/tetafro/godot/branch/master/graph/badge.svg)](https://codecov.io/gh/tetafro/godot) + +Linter that checks if all top-level comments contain a period at the +end of the last sentence if needed. + +[CodeReviewComments](https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences) quote: + +> Comments should begin with the name of the thing being described +> and end in a period + +## Install + +*NOTE: Godot is available as a part of [GolangCI Lint](https://github.com/golangci/golangci-lint) +(disabled by default).* + +Build from source + +```sh +go get -u github.com/tetafro/godot/cmd/godot +``` + +or download binary from [releases page](https://github.com/tetafro/godot/releases). + +## Config + +You can specify options using config file. Use default name `.godot.yaml`, or +set it using `-c filename.yaml` argument. If no config provided the following +defaults are used: + +```yaml +# Which comments to check: +# declarations - for top level declaration comments (default); +# toplevel - for top level comments; +# all - for all comments. +scope: declarations + +# List pf regexps for excluding particular comment lines from check. +exclude: + +# Check periods at the end of sentences. +period: true + +# Check that first letter of each sentence is capital. +capital: false +``` + +## Run + +```sh +godot ./myproject +``` + +Autofix flags are also available + +```sh +godot -f ./myproject # fix issues and print the result +godot -w ./myproject # fix issues and replace the original file +``` + +See all flags with `godot -h`. + +## Example + +Code + +```go +package math + +// Sum sums two integers +func Sum(a, b int) int { + return a + b // result +} +``` + +Output + +```sh +Comment should end in a period: math/math.go:3:1 +``` diff --git a/vendor/github.com/tetafro/godot/checks.go b/vendor/github.com/tetafro/godot/checks.go new file mode 100644 index 000000000..e9b43f0a5 --- /dev/null +++ b/vendor/github.com/tetafro/godot/checks.go @@ -0,0 +1,269 @@ +package godot + +import ( + "go/token" + "regexp" + "strings" + "unicode" +) + +// Error messages. +const ( + noPeriodMessage = "Comment should end in a period" + noCapitalMessage = "Sentence should start with a capital letter" +) + +var ( + // List of valid sentence ending. + // A sentence can be inside parenthesis, and therefore ends with parenthesis. + lastChars = []string{".", "?", "!", ".)", "?)", "!)", specialReplacer} + + // Abbreviations to exclude from capital letters check. + abbreviations = []string{"i.e.", "i. e.", "e.g.", "e. g."} + + // Special tags in comments like "// nolint:", or "// +k8s:". + tags = regexp.MustCompile(`^\+?[a-z0-9]+:`) + + // Special hashtags in comments like "// #nosec". + hashtags = regexp.MustCompile(`^#[a-z]+($|\s)`) + + // URL at the end of the line. + endURL = regexp.MustCompile(`[a-z]+://[^\s]+$`) +) + +// checkComments checks every comment accordings to the rules from +// `settings` argument. +func checkComments(comments []comment, settings Settings) []Issue { + var issues []Issue // nolint: prealloc + for _, c := range comments { + if settings.Period { + if iss := checkCommentForPeriod(c); iss != nil { + issues = append(issues, *iss) + } + } + if settings.Capital { + if iss := checkCommentForCapital(c); len(iss) > 0 { + issues = append(issues, iss...) + } + } + } + return issues +} + +// checkCommentForPeriod checks that the last sentense of the comment ends +// in a period. +func checkCommentForPeriod(c comment) *Issue { + pos, ok := checkPeriod(c.text) + if ok { + return nil + } + + // Shift position by the length of comment's special symbols: /* or // + isBlock := strings.HasPrefix(c.lines[0], "/*") + if (isBlock && pos.line == 1) || !isBlock { + pos.column += 2 + } + + iss := Issue{ + Pos: token.Position{ + Filename: c.start.Filename, + Offset: c.start.Offset, + Line: pos.line + c.start.Line - 1, + Column: pos.column + c.start.Column - 1, + }, + Message: noPeriodMessage, + } + + // Make a replacement. Use `pos.line` to get an original line from + // attached lines. Use `iss.Pos.Column` because it's a position in + // the original line. + original := []rune(c.lines[pos.line-1]) + iss.Replacement = string(original[:iss.Pos.Column-1]) + "." + + string(original[iss.Pos.Column-1:]) + + // Save replacement to raw lines to be able to combine it with + // further replacements + c.lines[pos.line-1] = iss.Replacement + + return &iss +} + +// checkCommentForCapital checks that the each sentense of the comment starts with +// a capital letter. +// nolint: unparam +func checkCommentForCapital(c comment) []Issue { + pp := checkCapital(c.text, c.decl) + if len(pp) == 0 { + return nil + } + + issues := make([]Issue, len(pp)) + for i, pos := range pp { + // Shift position by the length of comment's special symbols: /* or // + isBlock := strings.HasPrefix(c.lines[0], "/*") + if (isBlock && pos.line == 1) || !isBlock { + pos.column += 2 + } + + iss := Issue{ + Pos: token.Position{ + Filename: c.start.Filename, + Offset: c.start.Offset, + Line: pos.line + c.start.Line - 1, + Column: pos.column + c.start.Column - 1, + }, + Message: noCapitalMessage, + } + + // Make a replacement. Use `pos.line` to get an original line from + // attached lines. Use `iss.Pos.Column` because it's a position in + // the original line. + rep := []rune(c.lines[pos.line-1]) + rep[iss.Pos.Column-1] = unicode.ToTitle(rep[iss.Pos.Column-1]) + iss.Replacement = string(rep) + + // Save replacement to raw lines to be able to combine it with + // further replacements + c.lines[pos.line-1] = iss.Replacement + + issues[i] = iss + } + + return issues +} + +// checkPeriod checks that the last sentense of the text ends in a period. +// NOTE: Returned position is a position inside given text, not in the +// original file. +func checkPeriod(comment string) (pos position, ok bool) { + // Check last non-empty line + var found bool + var line string + lines := strings.Split(comment, "\n") + for i := len(lines) - 1; i >= 0; i-- { + line = strings.TrimRightFunc(lines[i], unicode.IsSpace) + if line == "" { + continue + } + found = true + pos.line = i + 1 + break + } + // All lines are empty + if !found { + return position{}, true + } + // Correct line + if hasSuffix(line, lastChars) { + return position{}, true + } + + pos.column = len([]rune(line)) + 1 + return pos, false +} + +// checkCapital checks that each sentense of the text starts with +// a capital letter. +// NOTE: First letter is not checked in declaration comments, because they +// can describe unexported functions, which start from small letter. +func checkCapital(comment string, skipFirst bool) (pp []position) { + // Remove common abbreviations from the comment + for _, abbr := range abbreviations { + repl := strings.ReplaceAll(abbr, ".", "_") + comment = strings.ReplaceAll(comment, abbr, repl) + } + + // List of states during the scan: `empty` - nothing special, + // `endChar` - found one of sentence ending chars (.!?), + // `endOfSentence` - found `endChar`, and then space or newline. + const empty, endChar, endOfSentence = 1, 2, 3 + + pos := position{line: 1} + state := endOfSentence + if skipFirst { + state = empty + } + for _, r := range comment { + s := string(r) + + pos.column++ + if s == "\n" { + pos.line++ + pos.column = 0 + if state == endChar { + state = endOfSentence + } + continue + } + if s == "." || s == "!" || s == "?" { + state = endChar + continue + } + if s == ")" && state == endChar { + continue + } + if s == " " { + if state == endChar { + state = endOfSentence + } + continue + } + if state == endOfSentence && unicode.IsLower(r) { + pp = append(pp, position{line: pos.line, column: pos.column}) + } + state = empty + } + return pp +} + +// isSpecialBlock checks that given block of comment lines is special and +// shouldn't be checked as a regular sentence. +func isSpecialBlock(comment string) bool { + // Skip cgo code blocks + // TODO: Find a better way to detect cgo code + if strings.HasPrefix(comment, "/*") && (strings.Contains(comment, "#include") || + strings.Contains(comment, "#define")) { + return true + } + return false +} + +// isSpecialBlock checks that given comment line is special and +// shouldn't be checked as a regular sentence. +func isSpecialLine(comment string) bool { + // Skip cgo export tags: https://golang.org/cmd/cgo/#hdr-C_references_to_Go + if strings.HasPrefix(comment, "//export ") { + return true + } + + comment = strings.TrimPrefix(comment, "//") + comment = strings.TrimPrefix(comment, "/*") + + // Don't check comments starting with space indentation - they may + // contain code examples, which shouldn't end with period + if strings.HasPrefix(comment, " ") || + strings.HasPrefix(comment, " \t") || + strings.HasPrefix(comment, "\t") { + return true + } + + // Skip tags and URLs + comment = strings.TrimSpace(comment) + if tags.MatchString(comment) || + hashtags.MatchString(comment) || + endURL.MatchString(comment) || + strings.HasPrefix(comment, "+build") { + return true + } + + return false +} + +func hasSuffix(s string, suffixes []string) bool { + for _, suffix := range suffixes { + if strings.HasSuffix(s, suffix) { + return true + } + } + return false +} diff --git a/vendor/github.com/tetafro/godot/getters.go b/vendor/github.com/tetafro/godot/getters.go new file mode 100644 index 000000000..6153772bd --- /dev/null +++ b/vendor/github.com/tetafro/godot/getters.go @@ -0,0 +1,283 @@ +package godot + +import ( + "errors" + "fmt" + "go/ast" + "go/token" + "io/ioutil" + "regexp" + "strings" +) + +var ( + errEmptyInput = errors.New("empty input") + errUnsuitableInput = errors.New("unsuitable input") +) + +// specialReplacer is a replacer for some types of special lines in comments, +// which shouldn't be checked. For example, if comment ends with a block of +// code it should not necessarily have a period at the end. +const specialReplacer = "" + +type parsedFile struct { + fset *token.FileSet + file *ast.File + lines []string +} + +func newParsedFile(file *ast.File, fset *token.FileSet) (*parsedFile, error) { + if file == nil || fset == nil || len(file.Comments) == 0 { + return nil, errEmptyInput + } + + pf := parsedFile{ + fset: fset, + file: file, + } + + var err error + + // Read original file. This is necessary for making a replacements for + // inline comments. I couldn't find a better way to get original line + // with code and comment without reading the file. Function `Format` + // from "go/format" won't help here if the original file is not gofmt-ed. + pf.lines, err = readFile(file, fset) + if err != nil { + return nil, fmt.Errorf("read file: %v", err) + } + + // Dirty hack. For some cases Go generates temporary files during + // compilation process if there is a cgo block in the source file. Some of + // these temporary files are just copies of original source files but with + // new generated comments at the top. Because of them the content differs + // from AST. For some reason it differs only in golangci-lint. I failed to + // find out the exact description of the process, so let's just skip files + // generated by cgo. + if isCgoGenerated(pf.lines) { + return nil, errUnsuitableInput + } + + // Check consistency to avoid checking slice indexes in each function + lastComment := pf.file.Comments[len(pf.file.Comments)-1] + if p := pf.fset.Position(lastComment.End()); len(pf.lines) < p.Line { + return nil, fmt.Errorf("inconsistency between file and AST: %s", p.Filename) + } + + return &pf, nil +} + +// getComments extracts comments from a file. +func (pf *parsedFile) getComments(scope Scope, exclude []*regexp.Regexp) []comment { + var comments []comment + decl := pf.getDeclarationComments(exclude) + switch scope { + case AllScope: + // All comments + comments = pf.getAllComments(exclude) + case TopLevelScope: + // All top level comments and comments from the inside + // of top level blocks + comments = append( + pf.getBlockComments(exclude), + pf.getTopLevelComments(exclude)..., + ) + default: + // Top level declaration comments and comments from the inside + // of top level blocks + comments = append(pf.getBlockComments(exclude), decl...) + } + + // Set `decl` flag + setDecl(comments, decl) + + return comments +} + +// getBlockComments gets comments from the inside of top level blocks: +// var (...), const (...). +func (pf *parsedFile) getBlockComments(exclude []*regexp.Regexp) []comment { + var comments []comment + for _, decl := range pf.file.Decls { + d, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + // No parenthesis == no block + if d.Lparen == 0 { + continue + } + for _, c := range pf.file.Comments { + if c == nil || len(c.List) == 0 { + continue + } + // Skip comments outside this block + if d.Lparen > c.Pos() || c.Pos() > d.Rparen { + continue + } + // Skip comments that are not top-level for this block + // (the block itself is top level, so comments inside this block + // would be on column 2) + // nolint: gomnd + if pf.fset.Position(c.Pos()).Column != 2 { + continue + } + firstLine := pf.fset.Position(c.Pos()).Line + lastLine := pf.fset.Position(c.End()).Line + comments = append(comments, comment{ + lines: pf.lines[firstLine-1 : lastLine], + text: getText(c, exclude), + start: pf.fset.Position(c.List[0].Slash), + }) + } + } + return comments +} + +// getTopLevelComments gets all top level comments. +func (pf *parsedFile) getTopLevelComments(exclude []*regexp.Regexp) []comment { + var comments []comment // nolint: prealloc + for _, c := range pf.file.Comments { + if c == nil || len(c.List) == 0 { + continue + } + if pf.fset.Position(c.Pos()).Column != 1 { + continue + } + firstLine := pf.fset.Position(c.Pos()).Line + lastLine := pf.fset.Position(c.End()).Line + comments = append(comments, comment{ + lines: pf.lines[firstLine-1 : lastLine], + text: getText(c, exclude), + start: pf.fset.Position(c.List[0].Slash), + }) + } + return comments +} + +// getDeclarationComments gets top level declaration comments. +func (pf *parsedFile) getDeclarationComments(exclude []*regexp.Regexp) []comment { + var comments []comment // nolint: prealloc + for _, decl := range pf.file.Decls { + var cg *ast.CommentGroup + switch d := decl.(type) { + case *ast.GenDecl: + cg = d.Doc + case *ast.FuncDecl: + cg = d.Doc + } + + if cg == nil || len(cg.List) == 0 { + continue + } + + firstLine := pf.fset.Position(cg.Pos()).Line + lastLine := pf.fset.Position(cg.End()).Line + comments = append(comments, comment{ + lines: pf.lines[firstLine-1 : lastLine], + text: getText(cg, exclude), + start: pf.fset.Position(cg.List[0].Slash), + }) + } + return comments +} + +// getAllComments gets every single comment from the file. +func (pf *parsedFile) getAllComments(exclude []*regexp.Regexp) []comment { + var comments []comment //nolint: prealloc + for _, c := range pf.file.Comments { + if c == nil || len(c.List) == 0 { + continue + } + firstLine := pf.fset.Position(c.Pos()).Line + lastLine := pf.fset.Position(c.End()).Line + comments = append(comments, comment{ + lines: pf.lines[firstLine-1 : lastLine], + start: pf.fset.Position(c.List[0].Slash), + text: getText(c, exclude), + }) + } + return comments +} + +// getText extracts text from comment. If comment is a special block +// (e.g., CGO code), a block of empty lines is returned. If comment contains +// special lines (e.g., tags or indented code examples), they are replaced +// with `specialReplacer` to skip checks for it. +// The result can be multiline. +func getText(comment *ast.CommentGroup, exclude []*regexp.Regexp) (s string) { + if len(comment.List) == 1 && + strings.HasPrefix(comment.List[0].Text, "/*") && + isSpecialBlock(comment.List[0].Text) { + return "" + } + + for _, c := range comment.List { + text := c.Text + isBlock := false + if strings.HasPrefix(c.Text, "/*") { + isBlock = true + text = strings.TrimPrefix(text, "/*") + text = strings.TrimSuffix(text, "*/") + } + for _, line := range strings.Split(text, "\n") { + if isSpecialLine(line) { + s += specialReplacer + "\n" + continue + } + if !isBlock { + line = strings.TrimPrefix(line, "//") + } + if matchAny(line, exclude) { + s += specialReplacer + "\n" + continue + } + s += line + "\n" + } + } + if len(s) == 0 { + return "" + } + return s[:len(s)-1] // trim last "\n" +} + +// readFile reads file and returns it's lines as strings. +func readFile(file *ast.File, fset *token.FileSet) ([]string, error) { + fname := fset.File(file.Package) + f, err := ioutil.ReadFile(fname.Name()) + if err != nil { + return nil, err + } + return strings.Split(string(f), "\n"), nil +} + +// setDecl sets `decl` flag to comments which are declaration comments. +func setDecl(comments, decl []comment) { + for _, d := range decl { + for i, c := range comments { + if d.start == c.start { + comments[i].decl = true + break + } + } + } +} + +// matchAny checks if string matches any of given regexps. +func matchAny(s string, rr []*regexp.Regexp) bool { + for _, re := range rr { + if re.MatchString(s) { + return true + } + } + return false +} + +func isCgoGenerated(lines []string) bool { + for i := range lines { + if strings.Contains(lines[i], "Code generated by cmd/cgo") { + return true + } + } + return false +} diff --git a/vendor/github.com/tetafro/godot/go.mod b/vendor/github.com/tetafro/godot/go.mod new file mode 100644 index 000000000..86ead9453 --- /dev/null +++ b/vendor/github.com/tetafro/godot/go.mod @@ -0,0 +1,5 @@ +module github.com/tetafro/godot + +go 1.16 + +require gopkg.in/yaml.v2 v2.4.0 diff --git a/vendor/github.com/tetafro/godot/go.sum b/vendor/github.com/tetafro/godot/go.sum new file mode 100644 index 000000000..dd0bc19f1 --- /dev/null +++ b/vendor/github.com/tetafro/godot/go.sum @@ -0,0 +1,4 @@ +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/vendor/github.com/tetafro/godot/godot.go b/vendor/github.com/tetafro/godot/godot.go new file mode 100644 index 000000000..dcc515b9c --- /dev/null +++ b/vendor/github.com/tetafro/godot/godot.go @@ -0,0 +1,135 @@ +// Package godot checks if comments contain a period at the end of the last +// sentence if needed. +package godot + +import ( + "fmt" + "go/ast" + "go/token" + "io/ioutil" + "os" + "regexp" + "sort" + "strings" +) + +// NOTE: Line and column indexes are 1-based. + +// NOTE: Errors `invalid line number inside comment...` should never happen. +// Their goal is to prevent panic, if there's a bug with array indexes. + +// Issue contains a description of linting error and a recommended replacement. +type Issue struct { + Pos token.Position + Message string + Replacement string +} + +// position is a position inside a comment (might be multiline comment). +type position struct { + line int + column int +} + +// comment is an internal representation of AST comment entity with additional +// data attached. The latter is used for creating a full replacement for +// the line with issues. +type comment struct { + lines []string // unmodified lines from file + text string // concatenated `lines` with special parts excluded + start token.Position // position of the first symbol in comment + decl bool // whether comment is a special one (should not be checked) +} + +// Run runs this linter on the provided code. +func Run(file *ast.File, fset *token.FileSet, settings Settings) ([]Issue, error) { + pf, err := newParsedFile(file, fset) + if err == errEmptyInput || err == errUnsuitableInput { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("parse input file: %v", err) + } + + exclude := make([]*regexp.Regexp, len(settings.Exclude)) + for i := 0; i < len(settings.Exclude); i++ { + exclude[i], err = regexp.Compile(settings.Exclude[i]) + if err != nil { + return nil, fmt.Errorf("invalid regexp: %v", err) + } + } + + comments := pf.getComments(settings.Scope, exclude) + issues := checkComments(comments, settings) + sortIssues(issues) + + return issues, nil +} + +// Fix fixes all issues and returns new version of file content. +func Fix(path string, file *ast.File, fset *token.FileSet, settings Settings) ([]byte, error) { + // Read file + content, err := ioutil.ReadFile(path) // nolint: gosec + if err != nil { + return nil, fmt.Errorf("read file: %v", err) + } + if len(content) == 0 { + return nil, nil + } + + issues, err := Run(file, fset, settings) + if err != nil { + return nil, fmt.Errorf("run linter: %v", err) + } + + // slice -> map + m := map[int]Issue{} + for _, iss := range issues { + m[iss.Pos.Line] = iss + } + + // Replace lines from issues + fixed := make([]byte, 0, len(content)) + for i, line := range strings.Split(string(content), "\n") { + newline := line + if iss, ok := m[i+1]; ok { + newline = iss.Replacement + } + fixed = append(fixed, []byte(newline+"\n")...) + } + fixed = fixed[:len(fixed)-1] // trim last "\n" + + return fixed, nil +} + +// Replace rewrites original file with it's fixed version. +func Replace(path string, file *ast.File, fset *token.FileSet, settings Settings) error { + info, err := os.Stat(path) + if err != nil { + return fmt.Errorf("check file: %v", err) + } + mode := info.Mode() + + fixed, err := Fix(path, file, fset, settings) + if err != nil { + return fmt.Errorf("fix issues: %v", err) + } + + if err := ioutil.WriteFile(path, fixed, mode); err != nil { + return fmt.Errorf("write file: %v", err) + } + return nil +} + +// sortIssues sorts by filename, line and column. +func sortIssues(iss []Issue) { + sort.Slice(iss, func(i, j int) bool { + if iss[i].Pos.Filename != iss[j].Pos.Filename { + return iss[i].Pos.Filename < iss[j].Pos.Filename + } + if iss[i].Pos.Line != iss[j].Pos.Line { + return iss[i].Pos.Line < iss[j].Pos.Line + } + return iss[i].Pos.Column < iss[j].Pos.Column + }) +} diff --git a/vendor/github.com/tetafro/godot/settings.go b/vendor/github.com/tetafro/godot/settings.go new file mode 100644 index 000000000..b71bf5d58 --- /dev/null +++ b/vendor/github.com/tetafro/godot/settings.go @@ -0,0 +1,29 @@ +package godot + +// Settings contains linter settings. +type Settings struct { + // Which comments to check (top level declarations, top level, all). + Scope Scope + + // Regexp for excluding particular comment lines from check. + Exclude []string + + // Check periods at the end of sentences. + Period bool + + // Check that first letter of each sentence is capital. + Capital bool +} + +// Scope sets which comments should be checked. +type Scope string + +// List of available check scopes. +const ( + // DeclScope is for top level declaration comments. + DeclScope Scope = "declarations" + // TopLevelScope is for all top level comments. + TopLevelScope Scope = "toplevel" + // AllScope is for all comments. + AllScope Scope = "all" +) diff --git a/vendor/github.com/timakin/bodyclose/LICENSE b/vendor/github.com/timakin/bodyclose/LICENSE new file mode 100644 index 000000000..6957f1889 --- /dev/null +++ b/vendor/github.com/timakin/bodyclose/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Seiji Takahashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go new file mode 100644 index 000000000..145d5409e --- /dev/null +++ b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go @@ -0,0 +1,368 @@ +package bodyclose + +import ( + "fmt" + "go/ast" + "go/types" + "strconv" + "strings" + + "github.com/gostaticanalysis/analysisutil" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" +) + +var Analyzer = &analysis.Analyzer{ + Name: "bodyclose", + Doc: Doc, + Run: new(runner).run, + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + }, +} + +const ( + Doc = "bodyclose checks whether HTTP response body is closed successfully" + + nethttpPath = "net/http" + closeMethod = "Close" +) + +type runner struct { + pass *analysis.Pass + resObj types.Object + resTyp *types.Pointer + bodyObj types.Object + closeMthd *types.Func + skipFile map[*ast.File]bool +} + +// run executes an analysis for the pass. The receiver is passed +// by value because this func is called in parallel for different passes. +func (r runner) run(pass *analysis.Pass) (interface{}, error) { + r.pass = pass + funcs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs + + r.resObj = analysisutil.LookupFromImports(pass.Pkg.Imports(), nethttpPath, "Response") + if r.resObj == nil { + // skip checking + return nil, nil + } + + resNamed, ok := r.resObj.Type().(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot find http.Response") + } + r.resTyp = types.NewPointer(resNamed) + + resStruct, ok := r.resObj.Type().Underlying().(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot find http.Response") + } + for i := 0; i < resStruct.NumFields(); i++ { + field := resStruct.Field(i) + if field.Id() == "Body" { + r.bodyObj = field + } + } + if r.bodyObj == nil { + return nil, fmt.Errorf("cannot find the object http.Response.Body") + } + bodyNamed := r.bodyObj.Type().(*types.Named) + bodyItrf := bodyNamed.Underlying().(*types.Interface) + for i := 0; i < bodyItrf.NumMethods(); i++ { + bmthd := bodyItrf.Method(i) + if bmthd.Id() == closeMethod { + r.closeMthd = bmthd + } + } + + r.skipFile = map[*ast.File]bool{} + for _, f := range funcs { + if r.noImportedNetHTTP(f) { + // skip this + continue + } + + // skip if the function is just referenced + var isreffunc bool + for i := 0; i < f.Signature.Results().Len(); i++ { + if f.Signature.Results().At(i).Type().String() == r.resTyp.String() { + isreffunc = true + } + } + if isreffunc { + continue + } + + for _, b := range f.Blocks { + for i := range b.Instrs { + pos := b.Instrs[i].Pos() + if r.isopen(b, i) { + pass.Reportf(pos, "response body must be closed") + } + } + } + } + + return nil, nil +} + +func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { + call, ok := r.getReqCall(b.Instrs[i]) + if !ok { + return false + } + + if len(*call.Referrers()) == 0 { + return true + } + cRefs := *call.Referrers() + for _, cRef := range cRefs { + val, ok := r.getResVal(cRef) + if !ok { + continue + } + + if len(*val.Referrers()) == 0 { + return true + } + resRefs := *val.Referrers() + for _, resRef := range resRefs { + switch resRef := resRef.(type) { + case *ssa.Store: // Call in Closure function + if len(*resRef.Addr.Referrers()) == 0 { + return true + } + + for _, aref := range *resRef.Addr.Referrers() { + if c, ok := aref.(*ssa.MakeClosure); ok { + f := c.Fn.(*ssa.Function) + if r.noImportedNetHTTP(f) { + // skip this + return false + } + called := r.isClosureCalled(c) + + return r.calledInFunc(f, called) + } + + } + case *ssa.Call: // Indirect function call + if f, ok := resRef.Call.Value.(*ssa.Function); ok { + for _, b := range f.Blocks { + for i := range b.Instrs { + return r.isopen(b, i) + } + } + } + case *ssa.FieldAddr: // Normal reference to response entity + if resRef.Referrers() == nil { + return true + } + + bRefs := *resRef.Referrers() + + for _, bRef := range bRefs { + bOp, ok := r.getBodyOp(bRef) + if !ok { + continue + } + if len(*bOp.Referrers()) == 0 { + return true + } + ccalls := *bOp.Referrers() + for _, ccall := range ccalls { + if r.isCloseCall(ccall) { + return false + } + } + } + } + } + } + + return true +} + +func (r *runner) getReqCall(instr ssa.Instruction) (*ssa.Call, bool) { + call, ok := instr.(*ssa.Call) + if !ok { + return nil, false + } + if !strings.Contains(call.Type().String(), r.resTyp.String()) { + return nil, false + } + return call, true +} + +func (r *runner) getResVal(instr ssa.Instruction) (ssa.Value, bool) { + switch instr := instr.(type) { + case *ssa.FieldAddr: + if instr.X.Type().String() == r.resTyp.String() { + return instr.X.(ssa.Value), true + } + case ssa.Value: + if instr.Type().String() == r.resTyp.String() { + return instr, true + } + } + return nil, false +} + +func (r *runner) getBodyOp(instr ssa.Instruction) (*ssa.UnOp, bool) { + op, ok := instr.(*ssa.UnOp) + if !ok { + return nil, false + } + if op.Type() != r.bodyObj.Type() { + return nil, false + } + return op, true +} + +func (r *runner) isCloseCall(ccall ssa.Instruction) bool { + switch ccall := ccall.(type) { + case *ssa.Defer: + if ccall.Call.Method != nil && ccall.Call.Method.Name() == r.closeMthd.Name() { + return true + } + case *ssa.Call: + if ccall.Call.Method != nil && ccall.Call.Method.Name() == r.closeMthd.Name() { + return true + } + case *ssa.ChangeInterface: + if ccall.Type().String() == "io.Closer" { + closeMtd := ccall.Type().Underlying().(*types.Interface).Method(0) + crs := *ccall.Referrers() + for _, cs := range crs { + if cs, ok := cs.(*ssa.Defer); ok { + if val, ok := cs.Common().Value.(*ssa.Function); ok { + for _, b := range val.Blocks { + for _, instr := range b.Instrs { + if c, ok := instr.(*ssa.Call); ok { + if c.Call.Method == closeMtd { + return true + } + } + } + } + } + } + + if returnOp, ok := cs.(*ssa.Return); ok { + for _, resultValue := range returnOp.Results { + if resultValue.Type().String() == "io.Closer" { + return true + } + } + } + } + } + case *ssa.Return: + for _, resultValue := range ccall.Results { + if resultValue.Type().String() == "io.ReadCloser" { + return true + } + } + } + return false +} + +func (r *runner) isClosureCalled(c *ssa.MakeClosure) bool { + refs := *c.Referrers() + if len(refs) == 0 { + return false + } + for _, ref := range refs { + switch ref.(type) { + case *ssa.Call, *ssa.Defer: + return true + } + } + return false +} + +func (r *runner) noImportedNetHTTP(f *ssa.Function) (ret bool) { + obj := f.Object() + if obj == nil { + return false + } + + file := analysisutil.File(r.pass, obj.Pos()) + if file == nil { + return false + } + + if skip, has := r.skipFile[file]; has { + return skip + } + defer func() { + r.skipFile[file] = ret + }() + + for _, impt := range file.Imports { + path, err := strconv.Unquote(impt.Path.Value) + if err != nil { + continue + } + path = analysisutil.RemoveVendor(path) + if path == nethttpPath { + return false + } + } + + return true +} + +func (r *runner) calledInFunc(f *ssa.Function, called bool) bool { + for _, b := range f.Blocks { + for i, instr := range b.Instrs { + switch instr := instr.(type) { + case *ssa.UnOp: + refs := *instr.Referrers() + if len(refs) == 0 { + return true + } + for _, r := range refs { + if v, ok := r.(ssa.Value); ok { + if ptr, ok := v.Type().(*types.Pointer); !ok || !isNamedType(ptr.Elem(), "io", "ReadCloser") { + continue + } + vrefs := *v.Referrers() + for _, vref := range vrefs { + if vref, ok := vref.(*ssa.UnOp); ok { + vrefs := *vref.Referrers() + if len(vrefs) == 0 { + return true + } + for _, vref := range vrefs { + if c, ok := vref.(*ssa.Call); ok { + if c.Call.Method != nil && c.Call.Method.Name() == closeMethod { + return !called + } + } + } + } + } + } + + } + default: + return r.isopen(b, i) || !called + } + } + } + return false +} + +// isNamedType reports whether t is the named type path.name. +func isNamedType(t types.Type, path, name string) bool { + n, ok := t.(*types.Named) + if !ok { + return false + } + obj := n.Obj() + return obj.Name() == name && obj.Pkg() != nil && obj.Pkg().Path() == path +} diff --git a/vendor/github.com/tomarrell/wrapcheck/v2/LICENSE b/vendor/github.com/tomarrell/wrapcheck/v2/LICENSE new file mode 100644 index 000000000..b5d9d30d3 --- /dev/null +++ b/vendor/github.com/tomarrell/wrapcheck/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Tom Arrell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go new file mode 100644 index 000000000..3b445e295 --- /dev/null +++ b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go @@ -0,0 +1,337 @@ +package wrapcheck + +import ( + "go/ast" + "go/token" + "go/types" + "log" + "os" + "strings" + + "github.com/gobwas/glob" + "golang.org/x/tools/go/analysis" +) + +var ( + DefaultIgnoreSigs = []string{ + ".Errorf(", + "errors.New(", + "errors.Unwrap(", + ".Wrap(", + ".Wrapf(", + ".WithMessage(", + ".WithMessagef(", + ".WithStack(", + } +) + +// WrapcheckConfig is the set of configuration values which configure the +// behaviour of the linter. +type WrapcheckConfig struct { + // IgnoreSigs defines a list of substrings which if contained within the + // signature of the function call returning the error, will be ignored. This + // allows you to specify functions that wrapcheck will not report as + // unwrapped. + // + // For example, an ingoredSig of `[]string{"errors.New("}` will ignore errors + // returned from the stdlib package error's function: + // + // `func errors.New(message string) error` + // + // Due to the signature containing the substring `errors.New(`. + // + // Note: Setting this value will intentionally override the default ignored + // sigs. To achieve the same behaviour as default, you should add the default + // list to your config. + IgnoreSigs []string `mapstructure:"ignoreSigs" yaml:"ignoreSigs"` + + // IgnorePackageGlobs defines a list of globs which, if matching the package + // of the function returning the error, will ignore the error when doing + // wrapcheck analysis. + // + // This is useful for broadly ignoring packages and subpackages from wrapcheck + // analysis. For example, to ignore all errors from all packages and + // subpackages of "encoding" you may include the configuration: + // + // -- .wrapcheck.yaml + // ignorePackageGlobs: + // - encoding/* + IgnorePackageGlobs []string `mapstructure:"ignorePackageGlobs" yaml:"ignorePackageGlobs"` +} + +func NewDefaultConfig() WrapcheckConfig { + return WrapcheckConfig{ + IgnoreSigs: DefaultIgnoreSigs, + IgnorePackageGlobs: []string{}, + } +} + +func NewAnalyzer(cfg WrapcheckConfig) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "wrapcheck", + Doc: "Checks that errors returned from external packages are wrapped", + Run: run(cfg), + } +} + +func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { + return func(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + ast.Inspect(file, func(n ast.Node) bool { + if _, ok := n.(*ast.AssignStmt); ok { + return true + } + + ret, ok := n.(*ast.ReturnStmt) + if !ok { + return true + } + + if len(ret.Results) < 1 { + return true + } + + // Iterate over the values to be returned looking for errors + for _, expr := range ret.Results { + // Check if the return expression is a function call, if it is, we need + // to handle it by checking the return params of the function. + retFn, ok := expr.(*ast.CallExpr) + if ok { + // If the return type of the function is a single error. This will not + // match an error within multiple return values, for that, the below + // tuple check is required. + if isError(pass.TypesInfo.TypeOf(expr)) { + reportUnwrapped(pass, retFn, retFn.Pos(), cfg) + return true + } + + // Check if one of the return values from the function is an error + tup, ok := pass.TypesInfo.TypeOf(expr).(*types.Tuple) + if !ok { + return true + } + + // Iterate over the return values of the function looking for error + // types + for i := 0; i < tup.Len(); i++ { + v := tup.At(i) + if v == nil { + return true + } + if isError(v.Type()) { + reportUnwrapped(pass, retFn, expr.Pos(), cfg) + return true + } + } + } + + if !isError(pass.TypesInfo.TypeOf(expr)) { + continue + } + + ident, ok := expr.(*ast.Ident) + if !ok { + return true + } + + var ( + call *ast.CallExpr + ) + + // Attempt to find the most recent short assign + if shortAss := prevErrAssign(pass, file, ident); shortAss != nil { + call, ok = shortAss.Rhs[0].(*ast.CallExpr) + if !ok { + return true + } + } else if isUnresolved(file, ident) { + // TODO Check if the identifier is unresolved, and try to resolve it in + // another file. + return true + } else { + // Check for ValueSpec nodes in order to locate a possible var + // declaration. + if ident.Obj == nil { + return true + } + + vSpec, ok := ident.Obj.Decl.(*ast.ValueSpec) + if !ok { + // We couldn't find a short or var assign for this error return. + // This is an error. Where did this identifier come from? Possibly a + // function param. + // + // TODO decide how to handle this case, whether to follow function + // param back, or assert wrapping at call site. + + return true + } + + if len(vSpec.Values) < 1 { + return true + } + + call, ok = vSpec.Values[0].(*ast.CallExpr) + if !ok { + return true + } + } + + // Make sure there is a call identified as producing the error being + // returned, otherwise just bail + if call == nil { + return true + } + + reportUnwrapped(pass, call, ident.NamePos, cfg) + } + + return true + }) + } + + return nil, nil + } +} + +// Report unwrapped takes a call expression and an identifier and reports +// if the call is unwrapped. +func reportUnwrapped(pass *analysis.Pass, call *ast.CallExpr, tokenPos token.Pos, cfg WrapcheckConfig) { + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + + // Check for ignored signatures + fnSig := pass.TypesInfo.ObjectOf(sel.Sel).String() + if contains(cfg.IgnoreSigs, fnSig) { + return + } + + // Check if the underlying type of the "x" in x.y.z is an interface, as + // errors returned from interface types should be wrapped. + if isInterface(pass, sel) { + pass.Reportf(tokenPos, "error returned from interface method should be wrapped: sig: %s", fnSig) + return + } + + // Check whether the function being called comes from another package, + // as functions called across package boundaries which returns errors + // should be wrapped + if isFromOtherPkg(pass, sel, cfg) { + pass.Reportf(tokenPos, "error returned from external package is unwrapped: sig: %s", fnSig) + return + } +} + +// isInterface returns whether the function call is one defined on an interface. +func isInterface(pass *analysis.Pass, sel *ast.SelectorExpr) bool { + _, ok := pass.TypesInfo.TypeOf(sel.X).Underlying().(*types.Interface) + + return ok +} + +func isFromOtherPkg(pass *analysis.Pass, sel *ast.SelectorExpr, config WrapcheckConfig) bool { + // The package of the function that we are calling which returns the error + fn := pass.TypesInfo.ObjectOf(sel.Sel) + + for _, globString := range config.IgnorePackageGlobs { + g, err := glob.Compile(globString) + if err != nil { + log.Printf("unable to parse glob: %s\n", globString) + os.Exit(1) + } + + if g.Match(fn.Pkg().Path()) { + return false + } + } + + // If it's not a package name, then we should check the selector to make sure + // that it's an identifier from the same package + if pass.Pkg.Path() == fn.Pkg().Path() { + return false + } + + return true +} + +// prevErrAssign traverses the AST of a file looking for the most recent +// assignment to an error declaration which is specified by the returnIdent +// identifier. +// +// This only returns short form assignments and reassignments, e.g. `:=` and +// `=`. This does not include `var` statements. This function will return nil if +// the only declaration is a `var` (aka ValueSpec) declaration. +func prevErrAssign(pass *analysis.Pass, file *ast.File, returnIdent *ast.Ident) *ast.AssignStmt { + // A slice containing all the assignments which contain an identifer + // referring to the source declaration of the error. This is to catch + // cases where err is defined once, and then reassigned multiple times + // within the same block. In these cases, we should check the method of + // the most recent call. + var assigns []*ast.AssignStmt + + // Find all assignments which have the same declaration + ast.Inspect(file, func(n ast.Node) bool { + if ass, ok := n.(*ast.AssignStmt); ok { + for _, expr := range ass.Lhs { + if !isError(pass.TypesInfo.TypeOf(expr)) { + continue + } + if assIdent, ok := expr.(*ast.Ident); ok { + if assIdent.Obj == nil || returnIdent.Obj == nil { + // If we can't find the Obj for one of the identifiers, just skip + // it. + return true + } else if assIdent.Obj.Decl == returnIdent.Obj.Decl { + assigns = append(assigns, ass) + } + } + } + } + + return true + }) + + // Iterate through the assignments, comparing the token positions to + // find the assignment that directly precedes the return position + var mostRecentAssign *ast.AssignStmt + + for _, ass := range assigns { + if ass.Pos() > returnIdent.Pos() { + break + } + mostRecentAssign = ass + } + + return mostRecentAssign +} + +func contains(slice []string, el string) bool { + for _, s := range slice { + if strings.Contains(el, s) { + return true + } + } + + return false +} + +// isError returns whether or not the provided type interface is an error +func isError(typ types.Type) bool { + if typ == nil { + return false + } + + return typ.String() == "error" +} + +func isUnresolved(file *ast.File, ident *ast.Ident) bool { + for _, unresolvedIdent := range file.Unresolved { + if unresolvedIdent.Pos() == ident.Pos() { + return true + } + } + + return false +} diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/.editorconfig b/vendor/github.com/tommy-muehle/go-mnd/v2/.editorconfig new file mode 100644 index 000000000..fe2c20fb0 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/.editorconfig @@ -0,0 +1,21 @@ +root = true + +[*] +charset = utf-8 +indent_size = 4 +indent_style = space +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*.md] +trim_trailing_whitespace = false + +[*.json] +indent_size = 2 + +[*.{yaml,yml}] +indent_size = 2 + +[Makefile] +indent_style = tab diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/.gitattributes b/vendor/github.com/tommy-muehle/go-mnd/v2/.gitattributes new file mode 100644 index 000000000..005358190 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/.gitattributes @@ -0,0 +1,9 @@ +/.gitattributes export-ignore +/.gitignore export-ignore +/.editorconfig export-ignore +/.goreleaser.yml export-ignore +/.github/ export-ignore +/examples/ export-ignore +/testdata/ export-ignore +/tools/ export-ignore +/Makefile export-ignore diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/.gitignore b/vendor/github.com/tommy-muehle/go-mnd/v2/.gitignore new file mode 100644 index 000000000..abc11b330 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/.gitignore @@ -0,0 +1,3 @@ +build/ +dist/ +coverage.txt diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/.goreleaser.yml b/vendor/github.com/tommy-muehle/go-mnd/v2/.goreleaser.yml new file mode 100644 index 000000000..47cbca5e5 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/.goreleaser.yml @@ -0,0 +1,29 @@ +builds: + - main: ./cmd/mnd/main.go + binary: mnd + goos: + - windows + - darwin + - linux + goarch: + - amd64 + ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.buildTime={{.Date}}`. + +archives: + - format: tar.gz + format_overrides: + - goos: windows + format: zip + +brews: + - name: mnd + tap: + owner: tommy-muehle + name: homebrew-tap + folder: Formula + homepage: https://github.com/tommy-muehle/go-mnd + description: Magic number detector for Go + test: | + system "#{bin}/mnd --version" + install: | + bin.install "mnd" diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/Dockerfile b/vendor/github.com/tommy-muehle/go-mnd/v2/Dockerfile new file mode 100644 index 000000000..bb8e2b7f4 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/Dockerfile @@ -0,0 +1,17 @@ +ARG GO_VERSION=1.15 + +FROM golang:${GO_VERSION}-alpine AS builder +RUN apk add --update --no-cache make git curl gcc libc-dev +RUN mkdir -p /build +WORKDIR /build +COPY . /build/ +RUN go mod download +RUN go build -o go-mnd cmd/mnd/main.go + +FROM golang:${GO_VERSION}-alpine +RUN apk add --update --no-cache bash git gcc libc-dev +COPY --from=builder /build/go-mnd /bin/go-mnd +COPY entrypoint.sh /bin/entrypoint.sh +VOLUME /app +WORKDIR /app +ENTRYPOINT ["/bin/entrypoint.sh"] diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/LICENSE b/vendor/github.com/tommy-muehle/go-mnd/v2/LICENSE new file mode 100644 index 000000000..8825fad20 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2019 Tommy Muehle + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/Makefile b/vendor/github.com/tommy-muehle/go-mnd/v2/Makefile new file mode 100644 index 000000000..b8a32316b --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/Makefile @@ -0,0 +1,32 @@ +GIT_TAG?= $(shell git describe --abbrev=0) + +GO_VERSION = 1.15 +BUILDFLAGS := '-w -s' + +IMAGE_REPO = "tommymuehle" +BIN = "go-mnd" + +clean: + rm -rf build dist coverage.txt + +test: + go test -race ./... + +test-coverage: + go test -race -coverprofile=coverage.txt -covermode=atomic -coverpkg=./checks,./config + +build: + go build -o build/$(BIN) cmd/mnd/main.go + +image: + @echo "Building the Docker image..." + docker build --rm -t $(IMAGE_REPO)/$(BIN):$(GIT_TAG) --build-arg GO_VERSION=$(GO_VERSION) . + docker tag $(IMAGE_REPO)/$(BIN):$(GIT_TAG) $(IMAGE_REPO)/$(BIN):$(GIT_TAG) + docker tag $(IMAGE_REPO)/$(BIN):$(GIT_TAG) $(IMAGE_REPO)/$(BIN):latest + +image-push: image + @echo "Pushing the Docker image..." + docker push $(IMAGE_REPO)/$(BIN):$(GIT_TAG) + docker push $(IMAGE_REPO)/$(BIN):latest + +.PHONY: clean test test-coverage build image image-push diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/README.md b/vendor/github.com/tommy-muehle/go-mnd/v2/README.md new file mode 100644 index 000000000..6e3a55573 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/README.md @@ -0,0 +1,230 @@ +# go-mnd - Magic number detector for Golang + + + +A vet analyzer to detect magic numbers. + +> **What is a magic number?** +> A magic number is a numeric literal that is not defined as a constant, but which may change, and therefore can be hard to update. It's considered a bad programming practice to use numbers directly in any source code without an explanation. It makes programs harder to read, understand, and maintain. + +## Project status + +![CI](https://github.com/tommy-muehle/go-mnd/workflows/CI/badge.svg) +[![Go Report Card](https://goreportcard.com/badge/github.com/tommy-muehle/go-mnd)](https://goreportcard.com/report/github.com/tommy-muehle/go-mnd) +[![codecov](https://codecov.io/gh/tommy-muehle/go-mnd/branch/master/graph/badge.svg)](https://codecov.io/gh/tommy-muehle/go-mnd) + +## Install + +### Local + +This analyzer requires Golang in version >= 1.12 because it's depends on the **go/analysis** API. + +``` +go get -u github.com/tommy-muehle/go-mnd/v2/cmd/mnd +``` + +### Github action + +You can run go-mnd as a GitHub action as follows: + +``` +name: Example workflow +on: + push: + branches: + - master + pull_request: + branches: + - master +jobs: + tests: + runs-on: ubuntu-latest + env: + GO111MODULE: on + steps: + - name: Checkout Source + uses: actions/checkout@v2 + - name: Run go-mnd + uses: tommy-muehle/go-mnd@master + with: + args: ./... +``` + +### GitLab CI + +You can run go-mnd inside a GitLab CI pipeline as follows: + +``` +stages: + - lint + +go:lint:mnd: + stage: lint + needs: [] + image: golang:latest + before_script: + - go get -u github.com/tommy-muehle/go-mnd/cmd/mnd + - go mod tidy + - go mod vendor + script: + - go vet -vettool $(which mnd) ./... +``` + +### Homebrew + +To install with [Homebrew](https://brew.sh/), run: + +``` +brew tap tommy-muehle/tap && brew install tommy-muehle/tap/mnd +``` + +### Docker + +To get the latest available Docker image: + +``` +docker pull tommymuehle/go-mnd +``` + +### Windows + +On Windows download the [latest release](https://github.com/tommy-muehle/go-mnd/releases). + +## Usage + +[![asciicast](https://asciinema.org/a/231021.svg)](https://asciinema.org/a/231021) + +``` +go vet -vettool $(which mnd) ./... +``` + +or directly + +``` +mnd ./... +``` + +or via Docker + +``` +docker run --rm -v "$PWD":/app -w /app tommymuehle/go-mnd:latest ./... +``` + +## Options + +The ```-checks``` option let's you define a comma separated list of checks. + +The ```-ignored-numbers``` option let's you define a comma separated list of numbers to ignore. +For example: `-ignored-numbers=1000,10_000,3.14159264` + +The ```-ignored-functions``` option let's you define a comma separated list of function name regexp patterns to exclude. +For example: `-ignored-functions=math.*,http.StatusText` + +The ```-ignored-files``` option let's you define a comma separated list of filename regexp patterns to exclude. +For example: `-ignored-files=magic_.*.go,.*_numbers.go` + +## Checks + +By default this detector analyses arguments, assigns, cases, conditions, operations and return statements. + +* argument + +``` +t := http.StatusText(200) +``` + +* assign + +``` +c := &http.Client{ + Timeout: 5 * time.Second, +} +``` + +* case + +``` +switch x { + case 3: +} +``` + +* condition + +``` +if x > 7 { +} +``` + +* operation + +``` +var x, y int +y = 10 * x +``` + +* return + +``` +return 3 +``` + +## Excludes + +By default the numbers 0 and 1 as well as test files are excluded! + +### Further known excludes + +The function "Date" in the "Time" package. + +``` +t := time.Date(2017, time.September, 26, 12, 13, 14, 0, time.UTC) +``` + +Additional custom excludes can be defined via option flag. + +## Development + +### Build + +You can build the binary with: + +``` +make +``` + +### Tests + +You can run all unit tests using: + +``` +make test +``` + +And with coverage report: + +``` +make test-coverage +``` + +### Docker image + +You can also build locally the docker image by using the command: + +``` +make image +``` + +## Stickers + +

+ Stickers image + Sticker image +

+ +Just drop me a message via Twitter DM or email if you want some go-mnd stickers +for you or your Gopher usergroup. + +## License + +The MIT License (MIT). Please see [LICENSE](LICENSE) for more information. diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/action.yml b/vendor/github.com/tommy-muehle/go-mnd/v2/action.yml new file mode 100644 index 000000000..3a1f8eb11 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/action.yml @@ -0,0 +1,19 @@ +name: 'go-mnd' +description: 'Runs the Golang magic number detector' +author: '@tommy-muehle' + +inputs: + args: + description: 'Arguments for go-mnd' + required: true + default: '-h' + +runs: + using: 'docker' + image: 'Dockerfile' + args: + - ${{ inputs.args }} + +branding: + icon: 'check-circle' + color: 'blue' diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/analyzer.go b/vendor/github.com/tommy-muehle/go-mnd/v2/analyzer.go new file mode 100644 index 000000000..bf658f42d --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/analyzer.go @@ -0,0 +1,118 @@ +package magic_numbers + +import ( + "flag" + "go/ast" + "strings" + + "github.com/tommy-muehle/go-mnd/v2/checks" + "github.com/tommy-muehle/go-mnd/v2/config" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `magic number detector` + +var Analyzer = &analysis.Analyzer{ + Name: "mnd", + Doc: Doc, + Run: run, + Flags: options(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + RunDespiteErrors: true, +} + +type Checker interface { + NodeFilter() []ast.Node + Check(n ast.Node) +} + +func options() flag.FlagSet { + options := flag.NewFlagSet("", flag.ExitOnError) + options.String("excludes", "", "deprecated: use ignored-files instead") + options.String("ignored-files", "", "comma separated list of file patterns to exclude from analysis") + options.String("ignored-functions", "", "comma separated list of function patterns to exclude from analysis") + options.String("ignored-numbers", "", "comma separated list of numbers to exclude from analysis") + options.String( + "checks", + checks.ArgumentCheck+","+ + checks.CaseCheck+","+ + checks.ConditionCheck+","+ + checks.OperationCheck+","+ + checks.ReturnCheck+","+ + checks.AssignCheck, + "comma separated list of checks", + ) + + return *options +} + +func run(pass *analysis.Pass) (interface{}, error) { + var ignoredFiles string + + ignoredFiles = strings.Join( + []string{ + pass.Analyzer.Flags.Lookup("excludes").Value.String(), // is deprecated + pass.Analyzer.Flags.Lookup("ignored-files").Value.String(), + }, + ",", + ) + + if ignoredFiles == "," { + ignoredFiles = "" + } + + conf := config.WithOptions( + config.WithCustomChecks(pass.Analyzer.Flags.Lookup("checks").Value.String()), + config.WithIgnoredFiles(ignoredFiles), + config.WithIgnoredFunctions(pass.Analyzer.Flags.Lookup("ignored-functions").Value.String()), + config.WithIgnoredNumbers(pass.Analyzer.Flags.Lookup("ignored-numbers").Value.String()), + ) + + var checker []Checker + if conf.IsCheckEnabled(checks.ArgumentCheck) { + checker = append(checker, checks.NewArgumentAnalyzer(pass, conf)) + } + + if conf.IsCheckEnabled(checks.CaseCheck) { + checker = append(checker, checks.NewCaseAnalyzer(pass, conf)) + } + + if conf.IsCheckEnabled(checks.ConditionCheck) { + checker = append(checker, checks.NewConditionAnalyzer(pass, conf)) + } + + if conf.IsCheckEnabled(checks.OperationCheck) { + checker = append(checker, checks.NewOperationAnalyzer(pass, conf)) + } + + if conf.IsCheckEnabled(checks.ReturnCheck) { + checker = append(checker, checks.NewReturnAnalyzer(pass, conf)) + } + + if conf.IsCheckEnabled(checks.AssignCheck) { + checker = append(checker, checks.NewAssignAnalyzer(pass, conf)) + } + + i := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + for _, c := range checker { + c := c + i.Preorder(c.NodeFilter(), func(node ast.Node) { + for _, exclude := range conf.IgnoredFiles { + if exclude.String() == "" { + continue + } + if exclude.MatchString(pass.Fset.Position(node.Pos()).Filename) { + return + } + } + + c.Check(node) + }) + } + + return nil, nil +} diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/checks/argument.go b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/argument.go new file mode 100644 index 000000000..df6ad676d --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/argument.go @@ -0,0 +1,121 @@ +package checks + +import ( + "go/ast" + "go/token" + "strconv" + "sync" + + "golang.org/x/tools/go/analysis" + + "github.com/tommy-muehle/go-mnd/v2/config" +) + +const ArgumentCheck = "argument" + +// constantDefinitions is used to save lines (by number) which contain a constant definition. +var constantDefinitions = map[string]bool{} +var mu sync.RWMutex + +type ArgumentAnalyzer struct { + config *config.Config + pass *analysis.Pass +} + +func NewArgumentAnalyzer(pass *analysis.Pass, config *config.Config) *ArgumentAnalyzer { + return &ArgumentAnalyzer{ + pass: pass, + config: config, + } +} + +func (a *ArgumentAnalyzer) NodeFilter() []ast.Node { + return []ast.Node{ + (*ast.GenDecl)(nil), + (*ast.CallExpr)(nil), + } +} + +func (a *ArgumentAnalyzer) Check(n ast.Node) { + switch expr := n.(type) { + case *ast.CallExpr: + a.checkCallExpr(expr) + case *ast.GenDecl: + if expr.Tok != token.CONST { + return + } + + for _, x := range expr.Specs { + pos := a.pass.Fset.Position(x.Pos()) + + mu.Lock() + constantDefinitions[pos.Filename+":"+strconv.Itoa(pos.Line)] = true + mu.Unlock() + } + } +} + +func (a *ArgumentAnalyzer) checkCallExpr(expr *ast.CallExpr) { + pos := a.pass.Fset.Position(expr.Pos()) + + mu.RLock() + ok := constantDefinitions[pos.Filename+":"+strconv.Itoa(pos.Line)] + mu.RUnlock() + + if ok { + return + } + + switch f := expr.Fun.(type) { + case *ast.SelectorExpr: + switch prefix := f.X.(type) { + case *ast.Ident: + if a.config.IsIgnoredFunction(prefix.Name + "." + f.Sel.Name) { + return + } + } + } + + for i, arg := range expr.Args { + switch x := arg.(type) { + case *ast.BasicLit: + if !a.isMagicNumber(x) { + continue + } + // If it's a magic number and has no previous element, report it + if i == 0 { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, ArgumentCheck) + } else { + // Otherwise check all args + switch expr.Args[i].(type) { + case *ast.BasicLit: + if a.isMagicNumber(x) { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, ArgumentCheck) + } + } + } + case *ast.BinaryExpr: + a.checkBinaryExpr(x) + } + } +} + +func (a *ArgumentAnalyzer) checkBinaryExpr(expr *ast.BinaryExpr) { + switch x := expr.X.(type) { + case *ast.BasicLit: + if a.isMagicNumber(x) { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, ArgumentCheck) + } + } + + switch y := expr.Y.(type) { + case *ast.BasicLit: + if a.isMagicNumber(y) { + a.pass.Reportf(y.Pos(), reportMsg, y.Value, ArgumentCheck) + } + } +} + +func (a *ArgumentAnalyzer) isMagicNumber(l *ast.BasicLit) bool { + return (l.Kind == token.FLOAT || l.Kind == token.INT) && !a.config.IsIgnoredNumber(l.Value) +} diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/checks/assign.go b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/assign.go new file mode 100644 index 000000000..f930d0880 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/assign.go @@ -0,0 +1,86 @@ +package checks + +import ( + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" + + config "github.com/tommy-muehle/go-mnd/v2/config" +) + +const AssignCheck = "assign" + +type AssignAnalyzer struct { + pass *analysis.Pass + config *config.Config +} + +func NewAssignAnalyzer(pass *analysis.Pass, config *config.Config) *AssignAnalyzer { + return &AssignAnalyzer{ + pass: pass, + config: config, + } +} + +func (a *AssignAnalyzer) NodeFilter() []ast.Node { + return []ast.Node{ + (*ast.KeyValueExpr)(nil), + (*ast.AssignStmt)(nil), + } +} + +func (a *AssignAnalyzer) Check(n ast.Node) { + switch expr := n.(type) { + case *ast.KeyValueExpr: + switch x := expr.Value.(type) { + case *ast.BasicLit: + if a.isMagicNumber(x) { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, AssignCheck) + } + case *ast.BinaryExpr: + a.checkBinaryExpr(x) + } + case *ast.AssignStmt: + for _, e := range expr.Rhs { + switch y := e.(type) { + case *ast.UnaryExpr: + a.checkUnaryExpr(y) + case *ast.BinaryExpr: + switch x := y.Y.(type) { + case *ast.UnaryExpr: + a.checkUnaryExpr(x) + } + } + } + } +} + +func (a *AssignAnalyzer) checkUnaryExpr(expr *ast.UnaryExpr) { + switch x := expr.X.(type) { + case *ast.BasicLit: + if a.isMagicNumber(x) { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, AssignCheck) + } + } +} + +func (a *AssignAnalyzer) checkBinaryExpr(expr *ast.BinaryExpr) { + switch x := expr.X.(type) { + case *ast.BasicLit: + if a.isMagicNumber(x) { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, AssignCheck) + } + } + + switch y := expr.Y.(type) { + case *ast.BasicLit: + if a.isMagicNumber(y) { + a.pass.Reportf(y.Pos(), reportMsg, y.Value, AssignCheck) + } + } +} + +func (a *AssignAnalyzer) isMagicNumber(l *ast.BasicLit) bool { + return (l.Kind == token.FLOAT || l.Kind == token.INT) && !a.config.IsIgnoredNumber(l.Value) +} diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/checks/case.go b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/case.go new file mode 100644 index 000000000..228cab4b8 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/case.go @@ -0,0 +1,68 @@ +package checks + +import ( + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" + + config "github.com/tommy-muehle/go-mnd/v2/config" +) + +const CaseCheck = "case" + +type CaseAnalyzer struct { + pass *analysis.Pass + config *config.Config +} + +func NewCaseAnalyzer(pass *analysis.Pass, config *config.Config) *CaseAnalyzer { + return &CaseAnalyzer{ + pass: pass, + config: config, + } +} + +func (a *CaseAnalyzer) NodeFilter() []ast.Node { + return []ast.Node{ + (*ast.CaseClause)(nil), + } +} + +func (a *CaseAnalyzer) Check(n ast.Node) { + caseClause, ok := n.(*ast.CaseClause) + if !ok { + return + } + + for _, c := range caseClause.List { + switch x := c.(type) { + case *ast.BasicLit: + if a.isMagicNumber(x) { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, CaseCheck) + } + case *ast.BinaryExpr: + a.checkBinaryExpr(x) + } + } +} + +func (a *CaseAnalyzer) checkBinaryExpr(expr *ast.BinaryExpr) { + switch x := expr.X.(type) { + case *ast.BasicLit: + if a.isMagicNumber(x) { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, CaseCheck) + } + } + + switch y := expr.Y.(type) { + case *ast.BasicLit: + if a.isMagicNumber(y) { + a.pass.Reportf(y.Pos(), reportMsg, y.Value, CaseCheck) + } + } +} + +func (a *CaseAnalyzer) isMagicNumber(l *ast.BasicLit) bool { + return (l.Kind == token.FLOAT || l.Kind == token.INT) && !a.config.IsIgnoredNumber(l.Value) +} diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/checks/checks.go b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/checks.go new file mode 100644 index 000000000..deff0c7bf --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/checks.go @@ -0,0 +1,3 @@ +package checks + +const reportMsg = "Magic number: %v, in <%s> detected" diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/checks/condition.go b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/condition.go new file mode 100644 index 000000000..20f892ede --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/condition.go @@ -0,0 +1,55 @@ +package checks + +import ( + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" + + config "github.com/tommy-muehle/go-mnd/v2/config" +) + +const ConditionCheck = "condition" + +type ConditionAnalyzer struct { + pass *analysis.Pass + config *config.Config +} + +func NewConditionAnalyzer(pass *analysis.Pass, config *config.Config) *ConditionAnalyzer { + return &ConditionAnalyzer{ + pass: pass, + config: config, + } +} + +func (a *ConditionAnalyzer) NodeFilter() []ast.Node { + return []ast.Node{ + (*ast.IfStmt)(nil), + } +} + +func (a *ConditionAnalyzer) Check(n ast.Node) { + expr, ok := n.(*ast.IfStmt).Cond.(*ast.BinaryExpr) + if !ok { + return + } + + switch x := expr.X.(type) { + case *ast.BasicLit: + if a.isMagicNumber(x) { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, ConditionCheck) + } + } + + switch y := expr.Y.(type) { + case *ast.BasicLit: + if a.isMagicNumber(y) { + a.pass.Reportf(y.Pos(), reportMsg, y.Value, ConditionCheck) + } + } +} + +func (a *ConditionAnalyzer) isMagicNumber(l *ast.BasicLit) bool { + return (l.Kind == token.FLOAT || l.Kind == token.INT) && !a.config.IsIgnoredNumber(l.Value) +} diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/checks/operation.go b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/operation.go new file mode 100644 index 000000000..ddf3a0363 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/operation.go @@ -0,0 +1,77 @@ +package checks + +import ( + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" + + config "github.com/tommy-muehle/go-mnd/v2/config" +) + +const OperationCheck = "operation" + +type OperationAnalyzer struct { + pass *analysis.Pass + config *config.Config +} + +func NewOperationAnalyzer(pass *analysis.Pass, config *config.Config) *OperationAnalyzer { + return &OperationAnalyzer{ + pass: pass, + config: config, + } +} + +func (a *OperationAnalyzer) NodeFilter() []ast.Node { + return []ast.Node{ + (*ast.AssignStmt)(nil), + (*ast.ParenExpr)(nil), + } +} + +func (a *OperationAnalyzer) Check(n ast.Node) { + switch expr := n.(type) { + case *ast.ParenExpr: + switch x := expr.X.(type) { + case *ast.BinaryExpr: + a.checkBinaryExpr(x) + } + case *ast.AssignStmt: + for _, y := range expr.Rhs { + switch x := y.(type) { + case *ast.BinaryExpr: + switch xExpr := x.X.(type) { + case *ast.BinaryExpr: + a.checkBinaryExpr(xExpr) + } + switch yExpr := x.Y.(type) { + case *ast.BinaryExpr: + a.checkBinaryExpr(yExpr) + } + + a.checkBinaryExpr(x) + } + } + } +} + +func (a *OperationAnalyzer) checkBinaryExpr(expr *ast.BinaryExpr) { + switch x := expr.X.(type) { + case *ast.BasicLit: + if a.isMagicNumber(x) { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, OperationCheck) + } + } + + switch y := expr.Y.(type) { + case *ast.BasicLit: + if a.isMagicNumber(y) { + a.pass.Reportf(y.Pos(), reportMsg, y.Value, OperationCheck) + } + } +} + +func (a *OperationAnalyzer) isMagicNumber(l *ast.BasicLit) bool { + return (l.Kind == token.FLOAT || l.Kind == token.INT) && !a.config.IsIgnoredNumber(l.Value) +} diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/checks/return.go b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/return.go new file mode 100644 index 000000000..bc53940c7 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/checks/return.go @@ -0,0 +1,68 @@ +package checks + +import ( + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" + + config "github.com/tommy-muehle/go-mnd/v2/config" +) + +const ReturnCheck = "return" + +type ReturnAnalyzer struct { + pass *analysis.Pass + config *config.Config +} + +func NewReturnAnalyzer(pass *analysis.Pass, config *config.Config) *ReturnAnalyzer { + return &ReturnAnalyzer{ + pass: pass, + config: config, + } +} + +func (a *ReturnAnalyzer) NodeFilter() []ast.Node { + return []ast.Node{ + (*ast.ReturnStmt)(nil), + } +} + +func (a *ReturnAnalyzer) Check(n ast.Node) { + stmt, ok := n.(*ast.ReturnStmt) + if !ok { + return + } + + for _, expr := range stmt.Results { + switch x := expr.(type) { + case *ast.BasicLit: + if a.isMagicNumber(x) { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, ReturnCheck) + } + case *ast.BinaryExpr: + a.checkBinaryExpr(x) + } + } +} + +func (a *ReturnAnalyzer) checkBinaryExpr(expr *ast.BinaryExpr) { + switch x := expr.X.(type) { + case *ast.BasicLit: + if a.isMagicNumber(x) { + a.pass.Reportf(x.Pos(), reportMsg, x.Value, ReturnCheck) + } + } + + switch y := expr.Y.(type) { + case *ast.BasicLit: + if a.isMagicNumber(y) { + a.pass.Reportf(y.Pos(), reportMsg, y.Value, ReturnCheck) + } + } +} + +func (a *ReturnAnalyzer) isMagicNumber(l *ast.BasicLit) bool { + return (l.Kind == token.FLOAT || l.Kind == token.INT) && !a.config.IsIgnoredNumber(l.Value) +} diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/config/config.go b/vendor/github.com/tommy-muehle/go-mnd/v2/config/config.go new file mode 100644 index 000000000..a4681e37d --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/config/config.go @@ -0,0 +1,118 @@ +package config + +import ( + "regexp" + "strings" +) + +type Config struct { + Checks map[string]bool + IgnoredNumbers map[string]struct{} + IgnoredFunctions []*regexp.Regexp + IgnoredFiles []*regexp.Regexp +} + +type Option func(config *Config) + +func DefaultConfig() *Config { + return &Config{ + Checks: map[string]bool{}, + IgnoredNumbers: map[string]struct{}{ + "0": {}, + "0.0": {}, + "1": {}, + "1.0": {}, + }, + IgnoredFiles: []*regexp.Regexp{ + regexp.MustCompile(`_test.go`), + }, + IgnoredFunctions: []*regexp.Regexp{ + regexp.MustCompile(`time.Date`), + }, + } +} + +func WithOptions(options ...Option) *Config { + c := DefaultConfig() + + for _, option := range options { + option(c) + } + + return c +} + +func WithIgnoredFunctions(excludes string) Option { + return func(config *Config) { + if excludes == "" { + return + } + + for _, exclude := range strings.Split(excludes, ",") { + config.IgnoredFunctions = append(config.IgnoredFunctions, regexp.MustCompile(exclude)) + } + } +} + +func WithIgnoredFiles(excludes string) Option { + return func(config *Config) { + if excludes == "" { + return + } + + for _, exclude := range strings.Split(excludes, ",") { + config.IgnoredFiles = append(config.IgnoredFiles, regexp.MustCompile(exclude)) + } + } +} + +func WithIgnoredNumbers(numbers string) Option { + return func(config *Config) { + if numbers == "" { + return + } + + for _, number := range strings.Split(numbers, ",") { + config.IgnoredNumbers[config.removeDigitSeparator(number)] = struct{}{} + } + } +} + +func WithCustomChecks(checks string) Option { + return func(config *Config) { + if checks == "" { + return + } + + for name, _ := range config.Checks { + config.Checks[name] = false + } + + for _, name := range strings.Split(checks, ",") { + config.Checks[name] = true + } + } +} + +func (c *Config) IsCheckEnabled(name string) bool { + return c.Checks[name] +} + +func (c *Config) IsIgnoredNumber(number string) bool { + _, ok := c.IgnoredNumbers[c.removeDigitSeparator(number)] + return ok +} + +func (c *Config) IsIgnoredFunction(f string) bool { + for _, pattern := range c.IgnoredFunctions { + if pattern.MatchString(f) { + return true + } + } + + return false +} + +func (c *Config) removeDigitSeparator(number string) string { + return strings.Replace(number, "_", "", -1) +} diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/entrypoint.sh b/vendor/github.com/tommy-muehle/go-mnd/v2/entrypoint.sh new file mode 100644 index 000000000..cabc2f63d --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/entrypoint.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +# Expand the arguments into an array of strings. This is required because the GitHub action +# provides all arguments concatenated as a single string. +ARGS=("$@") + +/bin/go-mnd "${ARGS[*]}" diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/go.mod b/vendor/github.com/tommy-muehle/go-mnd/v2/go.mod new file mode 100644 index 000000000..8e7c18e22 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/go.mod @@ -0,0 +1,9 @@ +module github.com/tommy-muehle/go-mnd/v2 + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/stretchr/testify v1.3.0 + golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65 +) diff --git a/vendor/github.com/tommy-muehle/go-mnd/v2/go.sum b/vendor/github.com/tommy-muehle/go-mnd/v2/go.sum new file mode 100644 index 000000000..991a43759 --- /dev/null +++ b/vendor/github.com/tommy-muehle/go-mnd/v2/go.sum @@ -0,0 +1,28 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65 h1:1KSbntBked74wYsKq0jzXYy7ZwcjAUtrl7EmPE97Iiw= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/ultraware/funlen/LICENSE b/vendor/github.com/ultraware/funlen/LICENSE new file mode 100644 index 000000000..dca75556d --- /dev/null +++ b/vendor/github.com/ultraware/funlen/LICENSE @@ -0,0 +1,7 @@ +Copyright 2018 Ultraware Consultancy and Development B.V. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/ultraware/funlen/README.md b/vendor/github.com/ultraware/funlen/README.md new file mode 100644 index 000000000..aaf348521 --- /dev/null +++ b/vendor/github.com/ultraware/funlen/README.md @@ -0,0 +1,9 @@ +# Funlen linter + +Funlen is a linter that checks for long functions. It can checks both on the number of lines and the number of statements. + +The default limits are 60 lines and 40 statements. You can configure these. + +## Installation guide + +Funlen is included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable funlen. diff --git a/vendor/github.com/ultraware/funlen/main.go b/vendor/github.com/ultraware/funlen/main.go new file mode 100644 index 000000000..2ba353002 --- /dev/null +++ b/vendor/github.com/ultraware/funlen/main.go @@ -0,0 +1,104 @@ +package funlen + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" +) + +const ( + defaultLineLimit = 60 + defaultStmtLimit = 40 +) + +// Run runs this linter on the provided code +func Run(file *ast.File, fset *token.FileSet, lineLimit, stmtLimit int) []Message { + if lineLimit == 0 { + lineLimit = defaultLineLimit + } + if stmtLimit == 0 { + stmtLimit = defaultStmtLimit + } + + var msgs []Message + for _, f := range file.Decls { + decl, ok := f.(*ast.FuncDecl) + if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo + continue + } + + if stmtLimit > 0 { + if stmts := parseStmts(decl.Body.List); stmts > stmtLimit { + msgs = append(msgs, makeStmtMessage(fset, decl.Name, stmts, stmtLimit)) + continue + } + } + + if lineLimit > 0 { + if lines := getLines(fset, decl); lines > lineLimit { + msgs = append(msgs, makeLineMessage(fset, decl.Name, lines, lineLimit)) + } + } + } + + return msgs +} + +// Message contains a message +type Message struct { + Pos token.Position + Message string +} + +func makeLineMessage(fset *token.FileSet, funcInfo *ast.Ident, lines, lineLimit int) Message { + return Message{ + fset.Position(funcInfo.Pos()), + fmt.Sprintf("Function '%s' is too long (%d > %d)\n", funcInfo.Name, lines, lineLimit), + } +} + +func makeStmtMessage(fset *token.FileSet, funcInfo *ast.Ident, stmts, stmtLimit int) Message { + return Message{ + fset.Position(funcInfo.Pos()), + fmt.Sprintf("Function '%s' has too many statements (%d > %d)\n", funcInfo.Name, stmts, stmtLimit), + } +} + +func getLines(fset *token.FileSet, f *ast.FuncDecl) int { // nolint: interfacer + return fset.Position(f.End()).Line - fset.Position(f.Pos()).Line - 1 +} + +func parseStmts(s []ast.Stmt) (total int) { + for _, v := range s { + total++ + switch stmt := v.(type) { + case *ast.BlockStmt: + total += parseStmts(stmt.List) - 1 + case *ast.ForStmt, *ast.RangeStmt, *ast.IfStmt, + *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: + total += parseBodyListStmts(stmt) + case *ast.CaseClause: + total += parseStmts(stmt.Body) + case *ast.AssignStmt: + total += checkInlineFunc(stmt.Rhs[0]) + case *ast.GoStmt: + total += checkInlineFunc(stmt.Call.Fun) + case *ast.DeferStmt: + total += checkInlineFunc(stmt.Call.Fun) + } + } + return +} + +func checkInlineFunc(stmt ast.Expr) int { + if block, ok := stmt.(*ast.FuncLit); ok { + return parseStmts(block.Body.List) + } + return 0 +} + +func parseBodyListStmts(t interface{}) int { + i := reflect.ValueOf(t).Elem().FieldByName(`Body`).Elem().FieldByName(`List`).Interface() + return parseStmts(i.([]ast.Stmt)) +} diff --git a/vendor/github.com/ultraware/whitespace/LICENSE b/vendor/github.com/ultraware/whitespace/LICENSE new file mode 100644 index 000000000..dca75556d --- /dev/null +++ b/vendor/github.com/ultraware/whitespace/LICENSE @@ -0,0 +1,7 @@ +Copyright 2018 Ultraware Consultancy and Development B.V. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/ultraware/whitespace/README.md b/vendor/github.com/ultraware/whitespace/README.md new file mode 100644 index 000000000..aed9a485f --- /dev/null +++ b/vendor/github.com/ultraware/whitespace/README.md @@ -0,0 +1,7 @@ +# Whitespace linter + +Whitespace is a linter that checks for unnecessary newlines at the start and end of functions, if, for, etc. + +## Installation guide + +Whitespace is included in [https://github.com/golangci/golangci-lint/](golangci-lint). Install it and enable whitespace. diff --git a/vendor/github.com/ultraware/whitespace/main.go b/vendor/github.com/ultraware/whitespace/main.go new file mode 100644 index 000000000..c36086c0e --- /dev/null +++ b/vendor/github.com/ultraware/whitespace/main.go @@ -0,0 +1,158 @@ +package whitespace + +import ( + "go/ast" + "go/token" +) + +// Message contains a message +type Message struct { + Pos token.Position + Type MessageType + Message string +} + +// MessageType describes what should happen to fix the warning +type MessageType uint8 + +// List of MessageTypes +const ( + MessageTypeLeading MessageType = iota + 1 + MessageTypeTrailing + MessageTypeAddAfter +) + +// Settings contains settings for edge-cases +type Settings struct { + MultiIf bool + MultiFunc bool +} + +// Run runs this linter on the provided code +func Run(file *ast.File, fset *token.FileSet, settings Settings) []Message { + var messages []Message + + for _, f := range file.Decls { + decl, ok := f.(*ast.FuncDecl) + if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo + continue + } + + vis := visitor{file.Comments, fset, nil, make(map[*ast.BlockStmt]bool), settings} + ast.Walk(&vis, decl) + + messages = append(messages, vis.messages...) + } + + return messages +} + +type visitor struct { + comments []*ast.CommentGroup + fset *token.FileSet + messages []Message + wantNewline map[*ast.BlockStmt]bool + settings Settings +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + if node == nil { + return v + } + + if stmt, ok := node.(*ast.IfStmt); ok && v.settings.MultiIf { + checkMultiLine(v, stmt.Body, stmt.Cond) + } + + if stmt, ok := node.(*ast.FuncDecl); ok && v.settings.MultiFunc { + checkMultiLine(v, stmt.Body, stmt.Type) + } + + if stmt, ok := node.(*ast.BlockStmt); ok { + wantNewline := v.wantNewline[stmt] + + comments := v.comments + if wantNewline { + comments = nil // Comments also count as a newline if we want a newline + } + first, last := firstAndLast(comments, v.fset, stmt.Pos(), stmt.End(), stmt.List) + + startMsg := checkStart(v.fset, stmt.Lbrace, first) + + if wantNewline && startMsg == nil { + v.messages = append(v.messages, Message{v.fset.Position(stmt.Pos()), MessageTypeAddAfter, `multi-line statement should be followed by a newline`}) + } else if !wantNewline && startMsg != nil { + v.messages = append(v.messages, *startMsg) + } + + if msg := checkEnd(v.fset, stmt.Rbrace, last); msg != nil { + v.messages = append(v.messages, *msg) + } + } + + return v +} + +func checkMultiLine(v *visitor, body *ast.BlockStmt, stmtStart ast.Node) { + start, end := posLine(v.fset, stmtStart.Pos()), posLine(v.fset, stmtStart.End()) + + if end > start { // Check only multi line conditions + v.wantNewline[body] = true + } +} + +func posLine(fset *token.FileSet, pos token.Pos) int { + return fset.Position(pos).Line +} + +func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, start, end token.Pos, stmts []ast.Stmt) (ast.Node, ast.Node) { + if len(stmts) == 0 { + return nil, nil + } + + first, last := ast.Node(stmts[0]), ast.Node(stmts[len(stmts)-1]) + + for _, c := range comments { + if posLine(fset, c.Pos()) == posLine(fset, start) || posLine(fset, c.End()) == posLine(fset, end) { + continue + } + + if c.Pos() < start || c.End() > end { + continue + } + if c.Pos() < first.Pos() { + first = c + } + if c.End() > last.End() { + last = c + } + } + + return first, last +} + +func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *Message { + if first == nil { + return nil + } + + if posLine(fset, start)+1 < posLine(fset, first.Pos()) { + pos := fset.Position(start) + return &Message{pos, MessageTypeLeading, `unnecessary leading newline`} + } + + return nil +} + +func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *Message { + if last == nil { + return nil + } + + if posLine(fset, end)-1 > posLine(fset, last.End()) { + pos := fset.Position(end) + return &Message{pos, MessageTypeTrailing, `unnecessary trailing newline`} + } + + return nil +} diff --git a/vendor/github.com/uudashr/gocognit/LICENSE b/vendor/github.com/uudashr/gocognit/LICENSE new file mode 100644 index 000000000..75d4b9c98 --- /dev/null +++ b/vendor/github.com/uudashr/gocognit/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Nuruddin Ashr + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/uudashr/gocognit/README.md b/vendor/github.com/uudashr/gocognit/README.md new file mode 100644 index 000000000..4a8846907 --- /dev/null +++ b/vendor/github.com/uudashr/gocognit/README.md @@ -0,0 +1,185 @@ +[![GoDoc](https://godoc.org/github.com/uudashr/gocognit?status.svg)](https://godoc.org/github.com/uudashr/gocognit) +# Gocognit +Gocognit calculates cognitive complexities of functions in Go source code. A measurement of how hard does the code is intuitively to understand. + +## Understanding the complexity + +Given code using `if` statement, +```go +func GetWords(number int) string { + if number == 1 { // +1 + return "one" + } else if number == 2 { // +1 + return "a couple" + } else if number == 3 { // +1 + return "a few" + } else { // +1 + return "lots" + } +} // Cognitive complexity = 4 +``` + +Above code can be refactored using `switch` statement, +```go +func GetWords(number int) string { + switch number { // +1 + case 1: + return "one" + case 2: + return "a couple" + case 3: + return "a few" + default: + return "lots" + } +} // Cognitive complexity = 1 +``` + +As you see above codes are the same, but the second code are easier to understand, that is why the cognitive complexity score are lower compare to the first one. + +## Comparison with cyclometic complexity + +### Example 1 +#### Cyclometic complexity +```go +func GetWords(number int) string { // +1 + switch number { + case 1: // +1 + return "one" + case 2: // +1 + return "a couple" + case 3: // +1 + return "a few" + default: + return "lots" + } +} // Cyclomatic complexity = 4 +``` + +#### Cognitive complexity +```go +func GetWords(number int) string { + switch number { // +1 + case 1: + return "one" + case 2: + return "a couple" + case 3: + return "a few" + default: + return "lots" + } +} // Cognitive complexity = 1 +``` + +Cognitive complexity give lower score compare to cyclomatic complexity. + +### Example 2 +#### Cyclomatic complexity +```go +func SumOfPrimes(max int) int { // +1 + var total int + +OUT: + for i := 1; i < max; i++ { // +1 + for j := 2; j < i; j++ { // +1 + if i%j == 0 { // +1 + continue OUT + } + } + total += i + } + + return total +} // Cyclomatic complexity = 4 +``` + +#### Cognitive complexity +```go +func SumOfPrimes(max int) int { + var total int + +OUT: + for i := 1; i < max; i++ { // +1 + for j := 2; j < i; j++ { // +2 (nesting = 1) + if i%j == 0 { // +3 (nesting = 2) + continue OUT // +1 + } + } + total += i + } + + return total +} // Cognitive complexity = 7 +``` + +Cognitive complexity give higher score compare to cyclomatic complexity. + +## Rules + +The cognitive complexity of a function is calculated according to the +following rules: +> Note: these rules are specific for Go, please see the [original whitepaper](https://www.sonarsource.com/docs/CognitiveComplexity.pdf) for more complete reference. + +### Increments +There is an increment for each of the following: +1. `if`, `else if`, `else` +2. `switch`, `select` +3. `for` +4. `goto` LABEL, `break` LABEL, `continue` LABEL +5. sequence of binary logical operators +6. each method in a recursion cycle + +### Nesting level +The following structures increment the nesting level: +1. `if`, `else if`, `else` +2. `switch`, `select` +3. `for` +4. function literal or lambda + +### Nesting increments +The following structures receive a nesting increment commensurate with their nested depth inside nesting structures: +1. `if` +2. `switch`, `select` +3. `for` + +## Installation +``` +$ go get github.com/uudashr/gocognit/cmd/gocognit +``` + +## Usage + +``` +$ gocognit +Calculate cognitive complexities of Go functions. +Usage: + gocognit [flags] ... +Flags: + -over N show functions with complexity > N only and + return exit code 1 if the set is non-empty + -top N show the top N most complex functions only + -avg show the average complexity over all functions, + not depending on whether -over or -top are set +The output fields for each line are: + +``` + +Examples: + +``` +$ gocognit . +$ gocognit main.go +$ gocognit -top 10 src/ +$ gocognit -over 25 docker +$ gocognit -avg . +``` + +The output fields for each line are: +``` + +``` + +## Related project +- [Gocyclo](https://github.com/fzipp/gocyclo) where the code are based on. +- [Cognitive Complexity: A new way of measuring understandability](https://www.sonarsource.com/docs/CognitiveComplexity.pdf) white paper by G. Ann Campbell. \ No newline at end of file diff --git a/vendor/github.com/uudashr/gocognit/doc.go b/vendor/github.com/uudashr/gocognit/doc.go new file mode 100644 index 000000000..ae3d0a226 --- /dev/null +++ b/vendor/github.com/uudashr/gocognit/doc.go @@ -0,0 +1,2 @@ +// Package gocognit defines Analyzer other utilities to checks and calculate the complexity of function based on "cognitive complexity" methods. +package gocognit diff --git a/vendor/github.com/uudashr/gocognit/go.mod b/vendor/github.com/uudashr/gocognit/go.mod new file mode 100644 index 000000000..749f228ac --- /dev/null +++ b/vendor/github.com/uudashr/gocognit/go.mod @@ -0,0 +1,5 @@ +module github.com/uudashr/gocognit + +go 1.16 + +require golang.org/x/tools v0.1.4 diff --git a/vendor/github.com/uudashr/gocognit/go.sum b/vendor/github.com/uudashr/gocognit/go.sum new file mode 100644 index 000000000..40fff4fa9 --- /dev/null +++ b/vendor/github.com/uudashr/gocognit/go.sum @@ -0,0 +1,27 @@ +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.4 h1:cVngSRcfgyZCzys3KYOpCFa+4dqX/Oub9tAq00ttGVs= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/uudashr/gocognit/gocognit.go b/vendor/github.com/uudashr/gocognit/gocognit.go new file mode 100644 index 000000000..0687f5e2e --- /dev/null +++ b/vendor/github.com/uudashr/gocognit/gocognit.go @@ -0,0 +1,385 @@ +package gocognit + +import ( + "fmt" + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +// Stat is statistic of the complexity. +type Stat struct { + PkgName string + FuncName string + Complexity int + Pos token.Position +} + +func (s Stat) String() string { + return fmt.Sprintf("%d %s %s %s", s.Complexity, s.PkgName, s.FuncName, s.Pos) +} + +// ComplexityStats builds the complexity statistics. +func ComplexityStats(f *ast.File, fset *token.FileSet, stats []Stat) []Stat { + for _, decl := range f.Decls { + if fn, ok := decl.(*ast.FuncDecl); ok { + stats = append(stats, Stat{ + PkgName: f.Name.Name, + FuncName: funcName(fn), + Complexity: Complexity(fn), + Pos: fset.Position(fn.Pos()), + }) + } + } + return stats +} + +// funcName returns the name representation of a function or method: +// "(Type).Name" for methods or simply "Name" for functions. +func funcName(fn *ast.FuncDecl) string { + if fn.Recv != nil { + if fn.Recv.NumFields() > 0 { + typ := fn.Recv.List[0].Type + return fmt.Sprintf("(%s).%s", recvString(typ), fn.Name) + } + } + return fn.Name.Name +} + +// recvString returns a string representation of recv of the +// form "T", "*T", or "BADRECV" (if not a proper receiver type). +func recvString(recv ast.Expr) string { + switch t := recv.(type) { + case *ast.Ident: + return t.Name + case *ast.StarExpr: + return "*" + recvString(t.X) + } + return "BADRECV" +} + +// Complexity calculates the cognitive complexity of a function. +func Complexity(fn *ast.FuncDecl) int { + v := complexityVisitor{ + name: fn.Name, + } + + ast.Walk(&v, fn) + return v.complexity +} + +type complexityVisitor struct { + name *ast.Ident + complexity int + nesting int + elseNodes map[ast.Node]bool + calculatedExprs map[ast.Expr]bool +} + +func (v *complexityVisitor) incNesting() { + v.nesting++ +} + +func (v *complexityVisitor) decNesting() { + v.nesting-- +} + +func (v *complexityVisitor) incComplexity() { + v.complexity++ +} + +func (v *complexityVisitor) nestIncComplexity() { + v.complexity += (v.nesting + 1) +} + +func (v *complexityVisitor) markAsElseNode(n ast.Node) { + if v.elseNodes == nil { + v.elseNodes = make(map[ast.Node]bool) + } + + v.elseNodes[n] = true +} + +func (v *complexityVisitor) markedAsElseNode(n ast.Node) bool { + if v.elseNodes == nil { + return false + } + + return v.elseNodes[n] +} + +func (v *complexityVisitor) markCalculated(e ast.Expr) { + if v.calculatedExprs == nil { + v.calculatedExprs = make(map[ast.Expr]bool) + } + + v.calculatedExprs[e] = true +} + +func (v *complexityVisitor) isCalculated(e ast.Expr) bool { + if v.calculatedExprs == nil { + return false + } + + return v.calculatedExprs[e] +} + +// Visit implements the ast.Visitor interface. +func (v *complexityVisitor) Visit(n ast.Node) ast.Visitor { + switch n := n.(type) { + case *ast.IfStmt: + return v.visitIfStmt(n) + case *ast.SwitchStmt: + return v.visitSwitchStmt(n) + case *ast.TypeSwitchStmt: + return v.visitTypeSwitchStmt(n) + case *ast.SelectStmt: + return v.visitSelectStmt(n) + case *ast.ForStmt: + return v.visitForStmt(n) + case *ast.RangeStmt: + return v.visitRangeStmt(n) + case *ast.FuncLit: + return v.visitFuncLit(n) + case *ast.BranchStmt: + return v.visitBranchStmt(n) + case *ast.BinaryExpr: + return v.visitBinaryExpr(n) + case *ast.CallExpr: + return v.visitCallExpr(n) + } + return v +} + +func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { + v.incIfComplexity(n) + + if n := n.Init; n != nil { + ast.Walk(v, n) + } + + ast.Walk(v, n.Cond) + + pure := !v.markedAsElseNode(n) // pure `if` statement, not an `else if` + if pure { + v.incNesting() + ast.Walk(v, n.Body) + v.decNesting() + } else { + ast.Walk(v, n.Body) + } + + if _, ok := n.Else.(*ast.BlockStmt); ok { + v.incComplexity() + + ast.Walk(v, n.Else) + } else if _, ok := n.Else.(*ast.IfStmt); ok { + v.markAsElseNode(n.Else) + ast.Walk(v, n.Else) + } + + return nil +} + +func (v *complexityVisitor) visitSwitchStmt(n *ast.SwitchStmt) ast.Visitor { + v.nestIncComplexity() + + if n := n.Init; n != nil { + ast.Walk(v, n) + } + + if n := n.Tag; n != nil { + ast.Walk(v, n) + } + + v.incNesting() + ast.Walk(v, n.Body) + v.decNesting() + return nil +} + +func (v *complexityVisitor) visitTypeSwitchStmt(n *ast.TypeSwitchStmt) ast.Visitor { + v.nestIncComplexity() + + if n := n.Init; n != nil { + ast.Walk(v, n) + } + + if n := n.Assign; n != nil { + ast.Walk(v, n) + } + + v.incNesting() + ast.Walk(v, n.Body) + v.decNesting() + return nil +} + +func (v *complexityVisitor) visitSelectStmt(n *ast.SelectStmt) ast.Visitor { + v.nestIncComplexity() + + v.incNesting() + ast.Walk(v, n.Body) + v.decNesting() + return nil +} + +func (v *complexityVisitor) visitForStmt(n *ast.ForStmt) ast.Visitor { + v.nestIncComplexity() + + if n := n.Init; n != nil { + ast.Walk(v, n) + } + + if n := n.Cond; n != nil { + ast.Walk(v, n) + } + + if n := n.Post; n != nil { + ast.Walk(v, n) + } + + v.incNesting() + ast.Walk(v, n.Body) + v.decNesting() + return nil +} + +func (v *complexityVisitor) visitRangeStmt(n *ast.RangeStmt) ast.Visitor { + v.nestIncComplexity() + + if n := n.Key; n != nil { + ast.Walk(v, n) + } + + if n := n.Value; n != nil { + ast.Walk(v, n) + } + + ast.Walk(v, n.X) + + v.incNesting() + ast.Walk(v, n.Body) + v.decNesting() + return nil +} + +func (v *complexityVisitor) visitFuncLit(n *ast.FuncLit) ast.Visitor { + ast.Walk(v, n.Type) + + v.incNesting() + ast.Walk(v, n.Body) + v.decNesting() + return nil +} + +func (v *complexityVisitor) visitBranchStmt(n *ast.BranchStmt) ast.Visitor { + if n.Label != nil { + v.incComplexity() + } + return v +} + +func (v *complexityVisitor) visitBinaryExpr(n *ast.BinaryExpr) ast.Visitor { + if (n.Op == token.LAND || n.Op == token.LOR) && !v.isCalculated(n) { + ops := v.collectBinaryOps(n) + + var lastOp token.Token + for _, op := range ops { + if lastOp != op { + v.incComplexity() + lastOp = op + } + } + } + return v +} + +func (v *complexityVisitor) visitCallExpr(n *ast.CallExpr) ast.Visitor { + if callIdent, ok := n.Fun.(*ast.Ident); ok { + obj, name := callIdent.Obj, callIdent.Name + if obj == v.name.Obj && name == v.name.Name { + // called by same function directly (direct recursion) + v.incComplexity() + } + } + return v +} + +func (v *complexityVisitor) collectBinaryOps(exp ast.Expr) []token.Token { + v.markCalculated(exp) + switch exp := exp.(type) { + case *ast.BinaryExpr: + return mergeBinaryOps(v.collectBinaryOps(exp.X), exp.Op, v.collectBinaryOps(exp.Y)) + case *ast.ParenExpr: + // interest only on what inside paranthese + return v.collectBinaryOps(exp.X) + default: + return []token.Token{} + } +} + +func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt) { + if v.markedAsElseNode(n) { + v.incComplexity() + } else { + v.nestIncComplexity() + } +} + +func mergeBinaryOps(x []token.Token, op token.Token, y []token.Token) []token.Token { + var out []token.Token + if len(x) != 0 { + out = append(out, x...) + } + out = append(out, op) + if len(y) != 0 { + out = append(out, y...) + } + return out +} + +const Doc = `Find complex function using cognitive complexity calculation. + +The gocognit analysis repots functions or methods which the complexity is over +than the specified limit.` + +// Analyzer reports a diagnostic for every function or method which is +// too complex specified by its -over flag. +var Analyzer = &analysis.Analyzer{ + Name: "gocognit", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +var ( + over int // -over flag +) + +func init() { + Analyzer.Flags.IntVar(&over, "over", over, "show functions with complexity > N only") +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + fnDecl := n.(*ast.FuncDecl) + + fnName := funcName(fnDecl) + fnComplexity := Complexity(fnDecl) + + if fnComplexity > over { + pass.Reportf(fnDecl.Pos(), "cognitive complexity %d of func %s is high (> %d)", fnComplexity, fnName, over) + } + }) + + return nil, nil +} diff --git a/vendor/github.com/yeya24/promlinter/.gitignore b/vendor/github.com/yeya24/promlinter/.gitignore new file mode 100644 index 000000000..bffb9a029 --- /dev/null +++ b/vendor/github.com/yeya24/promlinter/.gitignore @@ -0,0 +1,20 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# binary +bin + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +.idea diff --git a/vendor/github.com/yeya24/promlinter/LICENSE b/vendor/github.com/yeya24/promlinter/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/yeya24/promlinter/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/yeya24/promlinter/Makefile b/vendor/github.com/yeya24/promlinter/Makefile new file mode 100644 index 000000000..d50db1ee7 --- /dev/null +++ b/vendor/github.com/yeya24/promlinter/Makefile @@ -0,0 +1,39 @@ +GOOS := $(if $(GOOS),$(GOOS),linux) +GOARCH := $(if $(GOARCH),$(GOARCH),amd64) +GO=CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) GO111MODULE=on go +GOVERSION = $(shell $(GO) version | cut -c 14- | cut -d' ' -f1) + +GIT_COMMIT = $(shell git rev-parse --short HEAD) + +BUILDFLAGS ?= + +ifeq ($(shell expr ${GOVERSION} \>= 1.14), 1) + BUILDFLAGS += -mod=mod +endif + +PACKAGE_LIST := go list ./... +PACKAGES := $$($(PACKAGE_LIST)) +FILES_TO_FMT := $(shell find . -path -prune -o -name '*.go' -print) + +all: format build test + +format: vet fmt + +fmt: + @echo "gofmt" + @gofmt -w ${FILES_TO_FMT} + @git diff --exit-code . + +build: mod + $(GO) build ${BUILDFLAGS} -o ./bin/promlinter cmd/promlinter/main.go + +vet: + $(GO) vet ${BUILDFLAGS} ./... + +mod: + @echo "go mod tidy" + $(GO) mod tidy + @git diff --exit-code -- go.mod + +test: + $(GO) test ${BUILDFLAGS} ./... -cover $(PACKAGES) diff --git a/vendor/github.com/yeya24/promlinter/README.md b/vendor/github.com/yeya24/promlinter/README.md new file mode 100644 index 000000000..c7e664103 --- /dev/null +++ b/vendor/github.com/yeya24/promlinter/README.md @@ -0,0 +1,74 @@ +# promlinter + +A linter for checking Prometheus metrics name via promlint. + +![example](assets/example.png) + +## Installation + +### Build from source + +#### Requirements + +- Go >= 1.13 +- make + +``` bash +git clone https://github.com/yeya24/promlinter.git +make build +``` + +Then you can find the `promlinter` binary file in the `./bin` directory. + +### Download from release + +TBD + +## Usage + +``` bash +promlinter -h + +usage: promlinter [] [...] + +Prometheus metrics linter for Go code. + +This tool can cover most of the patterns of metrics naming issues, but it cannot detect metric values that can only be determined in the runtime. + +By default it doesn't output parsing failures, if you want to see them, you can add --strict flag to enable it. + +It is also supported to disable the lint functions using repeated flag --disable. Current supported functions are: + + [Help]: Help detects issues related to the help text for a metric. + + [MetricUnits]: MetricUnits detects issues with metric unit names. + + [Counter]: Counter detects issues specific to counters, as well as patterns that should only be used with counters. + + [HistogramSummaryReserved]: HistogramSummaryReserved detects when other types of metrics use names or labels reserved for use by histograms and/or summaries. + + [MetricTypeInName]: MetricTypeInName detects when metric types are included in the metric name. + + [ReservedChars]: ReservedChars detects colons in metric names. + + [CamelCase]: CamelCase detects metric names and label names written in camelCase. + + [UnitAbbreviations]: UnitAbbreviations detects abbreviated units in the metric name. + +Flags: + -h, --help Show context-sensitive help (also try --help-long and --help-man). + --version Show application version. + -s, --strict Strict mode. If true, linter will output more issues including parsing failures. + -d, --disable=DISABLE ... Disable lint functions (repeated).Supported options: Help, Counter, MetricUnits, HistogramSummaryReserved, MetricTypeInName, + ReservedChars, CamelCase, UnitAbbreviations + +Args: + [] Files to lint. + +``` + +## Run tests + +``` bash +make test +``` diff --git a/vendor/github.com/yeya24/promlinter/go.mod b/vendor/github.com/yeya24/promlinter/go.mod new file mode 100644 index 000000000..941cabdda --- /dev/null +++ b/vendor/github.com/yeya24/promlinter/go.mod @@ -0,0 +1,9 @@ +module github.com/yeya24/promlinter + +go 1.14 + +require ( + github.com/prometheus/client_golang v1.7.1 + github.com/prometheus/client_model v0.2.0 + gopkg.in/alecthomas/kingpin.v2 v2.2.6 +) diff --git a/vendor/github.com/yeya24/promlinter/go.sum b/vendor/github.com/yeya24/promlinter/go.sum new file mode 100644 index 000000000..303d57dc3 --- /dev/null +++ b/vendor/github.com/yeya24/promlinter/go.sum @@ -0,0 +1,114 @@ +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/yeya24/promlinter/promlinter.go b/vendor/github.com/yeya24/promlinter/promlinter.go new file mode 100644 index 000000000..898336a6f --- /dev/null +++ b/vendor/github.com/yeya24/promlinter/promlinter.go @@ -0,0 +1,664 @@ +package promlinter + +import ( + "fmt" + "go/ast" + "go/token" + "sort" + "strconv" + "strings" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil/promlint" + dto "github.com/prometheus/client_model/go" +) + +var ( + metricsType map[string]dto.MetricType + constMetricArgNum map[string]int + validOptsFields map[string]bool + lintFuncText map[string][]string + LintFuncNames []string +) + +func init() { + metricsType = map[string]dto.MetricType{ + "Counter": dto.MetricType_COUNTER, + "NewCounter": dto.MetricType_COUNTER, + "NewCounterVec": dto.MetricType_COUNTER, + "Gauge": dto.MetricType_GAUGE, + "NewGauge": dto.MetricType_GAUGE, + "NewGaugeVec": dto.MetricType_GAUGE, + "NewHistogram": dto.MetricType_HISTOGRAM, + "NewHistogramVec": dto.MetricType_HISTOGRAM, + "NewSummary": dto.MetricType_SUMMARY, + "NewSummaryVec": dto.MetricType_SUMMARY, + } + + constMetricArgNum = map[string]int{ + "MustNewConstMetric": 3, + "MustNewHistogram": 4, + "MustNewSummary": 4, + "NewLazyConstMetric": 3, + } + + // Doesn't contain ConstLabels since we don't need this field here. + validOptsFields = map[string]bool{ + "Name": true, + "Namespace": true, + "Subsystem": true, + "Help": true, + } + + lintFuncText = map[string][]string{ + "Help": {"no help text"}, + "MetricUnits": {"use base unit"}, + "Counter": {"counter metrics should"}, + "HistogramSummaryReserved": {"non-histogram", "non-summary"}, + "MetricTypeInName": {"metric name should not include type"}, + "ReservedChars": {"metric names should not contain ':'"}, + "CamelCase": {"'snake_case' not 'camelCase'"}, + "lintUnitAbbreviations": {"metric names should not contain abbreviated units"}, + } + + LintFuncNames = []string{"Help", "MetricUnits", "Counter", "HistogramSummaryReserved", + "MetricTypeInName", "ReservedChars", "CamelCase", "lintUnitAbbreviations"} +} + +type Setting struct { + Strict bool + DisabledLintFuncs []string +} + +// Issue contains metric name, error text and metric position. +type Issue struct { + Text string + Metric string + Pos token.Position +} + +type MetricFamilyWithPos struct { + MetricFamily *dto.MetricFamily + Pos token.Position +} + +type visitor struct { + fs *token.FileSet + metrics []MetricFamilyWithPos + issues []Issue + strict bool +} + +type opt struct { + namespace string + subsystem string + name string +} + +func RunList(fs *token.FileSet, files []*ast.File, strict bool) []MetricFamilyWithPos { + v := &visitor{ + fs: fs, + metrics: make([]MetricFamilyWithPos, 0), + issues: make([]Issue, 0), + strict: strict, + } + + for _, file := range files { + ast.Walk(v, file) + } + + sort.Slice(v.metrics, func(i, j int) bool { + return v.metrics[i].Pos.String() < v.metrics[j].Pos.String() + }) + return v.metrics +} + +func RunLint(fs *token.FileSet, files []*ast.File, s Setting) []Issue { + v := &visitor{ + fs: fs, + metrics: make([]MetricFamilyWithPos, 0), + issues: make([]Issue, 0), + strict: s.Strict, + } + + for _, file := range files { + ast.Walk(v, file) + } + + // lint metrics + for _, mfp := range v.metrics { + problems, err := promlint.NewWithMetricFamilies([]*dto.MetricFamily{mfp.MetricFamily}).Lint() + if err != nil { + panic(err) + } + + for _, p := range problems { + for _, disabledFunc := range s.DisabledLintFuncs { + for _, pattern := range lintFuncText[disabledFunc] { + if strings.Contains(p.Text, pattern) { + goto END + } + } + } + + v.issues = append(v.issues, Issue{ + Pos: mfp.Pos, + Metric: p.Metric, + Text: p.Text, + }) + + END: + } + } + + sort.Slice(v.issues, func(i, j int) bool { + return v.issues[i].Pos.String() < v.issues[j].Pos.String() + }) + return v.issues +} + +func (v *visitor) Visit(n ast.Node) ast.Visitor { + if n == nil { + return v + } + + switch t := n.(type) { + case *ast.CallExpr: + return v.parseCallerExpr(t) + + case *ast.SendStmt: + return v.parseSendMetricChanExpr(t) + } + + return v +} + +func (v *visitor) parseCallerExpr(call *ast.CallExpr) ast.Visitor { + var ( + metricType dto.MetricType + methodName string + ok bool + ) + + switch stmt := call.Fun.(type) { + + /* + That's the case of setting alias . to client_golang/prometheus or promauto package. + + import . "github.com/prometheus/client_golang/prometheus" + metric := NewCounter(CounterOpts{}) + */ + case *ast.Ident: + if stmt.Name == "NewCounterFunc" { + return v.parseOpts(call.Args[0], dto.MetricType_COUNTER) + } + + if stmt.Name == "NewGaugeFunc" { + return v.parseOpts(call.Args[0], dto.MetricType_GAUGE) + } + + if metricType, ok = metricsType[stmt.Name]; !ok { + return v + } + methodName = stmt.Name + + /* + This case covers the most of cases to initialize metrics. + + prometheus.NewCounter(CounterOpts{}) + + promauto.With(nil).NewCounter(CounterOpts{}) + + factory := promauto.With(nil) + factory.NewCounter(CounterOpts{}) + + prometheus.NewCounterFunc() + */ + case *ast.SelectorExpr: + if stmt.Sel.Name == "NewCounterFunc" { + return v.parseOpts(call.Args[0], dto.MetricType_COUNTER) + } + + if stmt.Sel.Name == "NewGaugeFunc" { + return v.parseOpts(call.Args[0], dto.MetricType_GAUGE) + } + + if stmt.Sel.Name == "NewFamilyGenerator" && len(call.Args) == 5 { + return v.parseKSMMetrics(call.Args[0], call.Args[1], call.Args[2]) + } + + if metricType, ok = metricsType[stmt.Sel.Name]; !ok { + return v + } + methodName = stmt.Sel.Name + + default: + return v + } + + argNum := 1 + if strings.HasSuffix(methodName, "Vec") { + argNum = 2 + } + // The methods used to initialize metrics should have at least one arg. + if len(call.Args) < argNum && v.strict { + v.issues = append(v.issues, Issue{ + Pos: v.fs.Position(call.Pos()), + Metric: "", + Text: fmt.Sprintf("%s should have at least %d arguments", methodName, argNum), + }) + return v + } + + return v.parseOpts(call.Args[0], metricType) +} + +func (v *visitor) parseOpts(optArg ast.Node, metricType dto.MetricType) ast.Visitor { + // position for the first arg of the CallExpr + optsPosition := v.fs.Position(optArg.Pos()) + opts, help := v.parseOptsExpr(optArg) + if opts == nil { + return v + } + currentMetric := dto.MetricFamily{ + Type: &metricType, + Help: help, + } + + metricName := prometheus.BuildFQName(opts.namespace, opts.subsystem, opts.name) + // We skip the invalid metric if the name is an empty string. + // This kind of metric declaration might be used as a stud metric + // https://github.com/thanos-io/thanos/blob/main/cmd/thanos/tools_bucket.go#L538. + if metricName == "" { + return v + } + currentMetric.Name = &metricName + + v.metrics = append(v.metrics, MetricFamilyWithPos{MetricFamily: ¤tMetric, Pos: optsPosition}) + return v +} + +// Parser for kube-state-metrics generators. +func (v *visitor) parseKSMMetrics(nameArg ast.Node, helpArg ast.Node, metricTypeArg ast.Node) ast.Visitor { + optsPosition := v.fs.Position(nameArg.Pos()) + currentMetric := dto.MetricFamily{} + name, ok := v.parseValue("name", nameArg) + if !ok { + return v + } + currentMetric.Name = &name + + help, ok := v.parseValue("help", helpArg) + if !ok { + return v + } + currentMetric.Help = &help + + switch stmt := metricTypeArg.(type) { + case *ast.SelectorExpr: + if metricType, ok := metricsType[stmt.Sel.Name]; !ok { + return v + } else { + currentMetric.Type = &metricType + } + } + + v.metrics = append(v.metrics, MetricFamilyWithPos{MetricFamily: ¤tMetric, Pos: optsPosition}) + return v +} + +func (v *visitor) parseSendMetricChanExpr(chExpr *ast.SendStmt) ast.Visitor { + var ( + ok bool + requiredArgNum int + methodName string + metricType dto.MetricType + ) + + call, ok := chExpr.Value.(*ast.CallExpr) + if !ok { + return v + } + + switch stmt := call.Fun.(type) { + case *ast.Ident: + if requiredArgNum, ok = constMetricArgNum[stmt.Name]; !ok { + return v + } + methodName = stmt.Name + + case *ast.SelectorExpr: + if requiredArgNum, ok = constMetricArgNum[stmt.Sel.Name]; !ok { + return v + } + methodName = stmt.Sel.Name + } + + if len(call.Args) < requiredArgNum && v.strict { + v.issues = append(v.issues, Issue{ + Metric: "", + Pos: v.fs.Position(call.Pos()), + Text: fmt.Sprintf("%s should have at least %d arguments", methodName, requiredArgNum), + }) + return v + } + + name, help := v.parseConstMetricOptsExpr(call.Args[0]) + if name == nil { + return v + } + + metric := &dto.MetricFamily{ + Name: name, + Help: help, + } + switch methodName { + case "MustNewConstMetric", "NewLazyConstMetric": + switch t := call.Args[1].(type) { + case *ast.Ident: + metric.Type = getConstMetricType(t.Name) + case *ast.SelectorExpr: + metric.Type = getConstMetricType(t.Sel.Name) + } + + case "MustNewHistogram": + metricType = dto.MetricType_HISTOGRAM + metric.Type = &metricType + case "MustNewSummary": + metricType = dto.MetricType_SUMMARY + metric.Type = &metricType + } + + v.metrics = append(v.metrics, MetricFamilyWithPos{MetricFamily: metric, Pos: v.fs.Position(call.Pos())}) + return v +} + +func (v *visitor) parseOptsExpr(n ast.Node) (*opt, *string) { + switch stmt := n.(type) { + case *ast.CompositeLit: + return v.parseCompositeOpts(stmt) + + case *ast.Ident: + if stmt.Obj != nil { + if decl, ok := stmt.Obj.Decl.(*ast.AssignStmt); ok && len(decl.Rhs) > 0 { + if t, ok := decl.Rhs[0].(*ast.CompositeLit); ok { + return v.parseCompositeOpts(t) + } + } + } + + case *ast.UnaryExpr: + return v.parseOptsExpr(stmt.X) + } + + return nil, nil +} + +func (v *visitor) parseCompositeOpts(stmt *ast.CompositeLit) (*opt, *string) { + metricOption := &opt{} + var help *string + for _, elt := range stmt.Elts { + kvExpr, ok := elt.(*ast.KeyValueExpr) + if !ok { + continue + } + object, ok := kvExpr.Key.(*ast.Ident) + if !ok { + continue + } + + if _, ok := validOptsFields[object.Name]; !ok { + continue + } + + // If failed to parse field value, stop parsing. + stringLiteral, ok := v.parseValue(object.Name, kvExpr.Value) + if !ok { + return nil, nil + } + + switch object.Name { + case "Namespace": + metricOption.namespace = stringLiteral + case "Subsystem": + metricOption.subsystem = stringLiteral + case "Name": + metricOption.name = stringLiteral + case "Help": + help = &stringLiteral + } + } + + return metricOption, help +} + +func (v *visitor) parseValue(object string, n ast.Node) (string, bool) { + switch t := n.(type) { + + // make sure it is string literal value + case *ast.BasicLit: + if t.Kind == token.STRING { + return mustUnquote(t.Value), true + } + + return "", false + + case *ast.Ident: + if t.Obj == nil { + return "", false + } + + if vs, ok := t.Obj.Decl.(*ast.ValueSpec); ok { + return v.parseValue(object, vs) + } + + case *ast.ValueSpec: + if len(t.Values) == 0 { + return "", false + } + return v.parseValue(object, t.Values[0]) + + // For binary expr, we only support adding two strings like `foo` + `bar`. + case *ast.BinaryExpr: + if t.Op == token.ADD { + x, ok := v.parseValue(object, t.X) + if !ok { + return "", false + } + + y, ok := v.parseValue(object, t.Y) + if !ok { + return "", false + } + + return x + y, true + } + + // We can only cover some basic cases here + case *ast.CallExpr: + return v.parseValueCallExpr(object, t) + + default: + if v.strict { + v.issues = append(v.issues, Issue{ + Pos: v.fs.Position(n.Pos()), + Metric: "", + Text: fmt.Sprintf("parsing %s with type %T is not supported", object, t), + }) + } + } + + return "", false +} + +func (v *visitor) parseValueCallExpr(object string, call *ast.CallExpr) (string, bool) { + var ( + methodName string + namespace string + subsystem string + name string + ok bool + ) + switch expr := call.Fun.(type) { + case *ast.SelectorExpr: + methodName = expr.Sel.Name + case *ast.Ident: + methodName = expr.Name + default: + return "", false + } + + if methodName == "BuildFQName" && len(call.Args) == 3 { + namespace, ok = v.parseValue("namespace", call.Args[0]) + if !ok { + return "", false + } + subsystem, ok = v.parseValue("subsystem", call.Args[1]) + if !ok { + return "", false + } + name, ok = v.parseValue("name", call.Args[2]) + if !ok { + return "", false + } + return prometheus.BuildFQName(namespace, subsystem, name), true + } + + if v.strict { + v.issues = append(v.issues, Issue{ + Metric: "", + Pos: v.fs.Position(call.Pos()), + Text: fmt.Sprintf("parsing %s with function %s is not supported", object, methodName), + }) + } + + return "", false +} + +func (v *visitor) parseConstMetricOptsExpr(n ast.Node) (*string, *string) { + switch stmt := n.(type) { + case *ast.CallExpr: + return v.parseNewDescCallExpr(stmt) + + case *ast.Ident: + if stmt.Obj != nil { + switch t := stmt.Obj.Decl.(type) { + case *ast.AssignStmt: + if len(t.Rhs) > 0 { + if call, ok := t.Rhs[0].(*ast.CallExpr); ok { + return v.parseNewDescCallExpr(call) + } + } + case *ast.ValueSpec: + if len(t.Values) > 0 { + if call, ok := t.Values[0].(*ast.CallExpr); ok { + return v.parseNewDescCallExpr(call) + } + } + } + + if v.strict { + v.issues = append(v.issues, Issue{ + Pos: v.fs.Position(stmt.Pos()), + Metric: "", + Text: fmt.Sprintf("parsing desc of type %T is not supported", stmt.Obj.Decl), + }) + } + } + + default: + if v.strict { + v.issues = append(v.issues, Issue{ + Pos: v.fs.Position(stmt.Pos()), + Metric: "", + Text: fmt.Sprintf("parsing desc of type %T is not supported", stmt), + }) + } + } + + return nil, nil +} + +func (v *visitor) parseNewDescCallExpr(call *ast.CallExpr) (*string, *string) { + var ( + help string + name string + ok bool + ) + + switch expr := call.Fun.(type) { + case *ast.Ident: + if expr.Name != "NewDesc" { + if v.strict { + v.issues = append(v.issues, Issue{ + Pos: v.fs.Position(expr.Pos()), + Metric: "", + Text: fmt.Sprintf("parsing desc with function %s is not supported", expr.Name), + }) + } + return nil, nil + } + case *ast.SelectorExpr: + if expr.Sel.Name != "NewDesc" { + if v.strict { + v.issues = append(v.issues, Issue{ + Pos: v.fs.Position(expr.Sel.Pos()), + Metric: "", + Text: fmt.Sprintf("parsing desc with function %s is not supported", expr.Sel.Name), + }) + } + return nil, nil + } + default: + if v.strict { + v.issues = append(v.issues, Issue{ + Pos: v.fs.Position(expr.Pos()), + Metric: "", + Text: fmt.Sprintf("parsing desc of %T is not supported", expr), + }) + } + return nil, nil + } + + // k8s.io/component-base/metrics.NewDesc has 6 args + // while prometheus.NewDesc has 4 args + if len(call.Args) < 4 && v.strict { + v.issues = append(v.issues, Issue{ + Metric: "", + Pos: v.fs.Position(call.Pos()), + Text: "NewDesc should have at least 4 args", + }) + return nil, nil + } + + name, ok = v.parseValue("fqName", call.Args[0]) + if !ok { + return nil, nil + } + help, ok = v.parseValue("help", call.Args[1]) + if !ok { + return nil, nil + } + + return &name, &help +} + +func mustUnquote(str string) string { + stringLiteral, err := strconv.Unquote(str) + if err != nil { + panic(err) + } + + return stringLiteral +} + +func getConstMetricType(name string) *dto.MetricType { + metricType := dto.MetricType_UNTYPED + if name == "CounterValue" { + metricType = dto.MetricType_COUNTER + } else if name == "GaugeValue" { + metricType = dto.MetricType_GAUGE + } + + return &metricType +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go index 2fc1ec031..a014ac92a 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,!gccgo,!appengine +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego package argon2 diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s index 74a6e7332..b2cc05150 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,!gccgo,!appengine +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go index baf7b551d..167c59d2d 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_ref.go +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine gccgo +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc package argon2 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go index 4d31dd0fd..56bfaaa17 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.7,amd64,!gccgo,!appengine +//go:build go1.7 && amd64 && gc && !purego +// +build go1.7,amd64,gc,!purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 5593b1b3d..4b9daa18d 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.7,amd64,!gccgo,!appengine +//go:build go1.7 && amd64 && gc && !purego +// +build go1.7,amd64,gc,!purego #include "textflag.h" @@ -282,14 +283,12 @@ TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment MOVQ blocks_len+32(FP), DI MOVQ SP, DX - MOVQ SP, R9 - ADDQ $31, R9 - ANDQ $~31, R9 - MOVQ R9, SP + ADDQ $31, DX + ANDQ $~31, DX - MOVQ CX, 16(SP) + MOVQ CX, 16(DX) XORQ CX, CX - MOVQ CX, 24(SP) + MOVQ CX, 24(DX) VMOVDQU ·AVX2_c40<>(SB), Y4 VMOVDQU ·AVX2_c48<>(SB), Y5 @@ -301,33 +300,33 @@ TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment MOVQ 0(BX), R8 MOVQ 8(BX), R9 - MOVQ R9, 8(SP) + MOVQ R9, 8(DX) loop: ADDQ $128, R8 - MOVQ R8, 0(SP) + MOVQ R8, 0(DX) CMPQ R8, $128 JGE noinc INCQ R9 - MOVQ R9, 8(SP) + MOVQ R9, 8(DX) noinc: VMOVDQA Y8, Y0 VMOVDQA Y9, Y1 VMOVDQA Y6, Y2 - VPXOR 0(SP), Y7, Y3 + VPXOR 0(DX), Y7, Y3 LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(SP) - VMOVDQA Y13, 64(SP) - VMOVDQA Y14, 96(SP) - VMOVDQA Y15, 128(SP) + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(SP) - VMOVDQA Y13, 192(SP) - VMOVDQA Y14, 224(SP) - VMOVDQA Y15, 256(SP) + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() @@ -347,8 +346,8 @@ noinc: LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5) - ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5) + ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) + ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) VPXOR Y0, Y8, Y8 VPXOR Y1, Y9, Y9 @@ -366,7 +365,6 @@ noinc: VMOVDQU Y9, 32(AX) VZEROUPPER - MOVQ DX, SP RET #define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA @@ -584,11 +582,9 @@ TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment MOVQ blocks_base+24(FP), SI MOVQ blocks_len+32(FP), DI - MOVQ SP, BP - MOVQ SP, R9 - ADDQ $15, R9 - ANDQ $~15, R9 - MOVQ R9, SP + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 VMOVDQU ·AVX_c40<>(SB), X0 VMOVDQU ·AVX_c48<>(SB), X1 @@ -596,8 +592,8 @@ TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment VMOVDQA X1, X9 VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(SP) - XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0) + VMOVDQA X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) VMOVDQU 0(AX), X10 VMOVDQU 16(AX), X11 @@ -624,35 +620,35 @@ noinc: VMOVDQU ·AVX_iv2<>(SB), X6 VPXOR X15, X6, X6 - VMOVDQA 0(SP), X7 + VMOVDQA 0(R10), X7 LOAD_MSG_AVX_0_2_4_6_1_3_5_7() - VMOVDQA X12, 16(SP) - VMOVDQA X13, 32(SP) - VMOVDQA X14, 48(SP) - VMOVDQA X15, 64(SP) + VMOVDQA X12, 16(R10) + VMOVDQA X13, 32(R10) + VMOVDQA X14, 48(R10) + VMOVDQA X15, 64(R10) HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) SHUFFLE_AVX() LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) - VMOVDQA X12, 80(SP) - VMOVDQA X13, 96(SP) - VMOVDQA X14, 112(SP) - VMOVDQA X15, 128(SP) + VMOVDQA X12, 80(R10) + VMOVDQA X13, 96(R10) + VMOVDQA X14, 112(R10) + VMOVDQA X15, 128(R10) HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) SHUFFLE_AVX_INV() LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) - VMOVDQA X12, 144(SP) - VMOVDQA X13, 160(SP) - VMOVDQA X14, 176(SP) - VMOVDQA X15, 192(SP) + VMOVDQA X12, 144(R10) + VMOVDQA X13, 160(R10) + VMOVDQA X14, 176(R10) + VMOVDQA X15, 192(R10) HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) SHUFFLE_AVX() LOAD_MSG_AVX_1_0_11_5_12_2_7_3() - VMOVDQA X12, 208(SP) - VMOVDQA X13, 224(SP) - VMOVDQA X14, 240(SP) - VMOVDQA X15, 256(SP) + VMOVDQA X12, 208(R10) + VMOVDQA X13, 224(R10) + VMOVDQA X14, 240(R10) + VMOVDQA X15, 256(R10) HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) SHUFFLE_AVX_INV() @@ -712,14 +708,14 @@ noinc: HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) SHUFFLE_AVX_INV() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) SHUFFLE_AVX_INV() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) SHUFFLE_AVX_INV() VMOVDQU 32(AX), X14 @@ -746,5 +742,4 @@ noinc: MOVQ R9, 8(BX) VZEROUPPER - MOVQ BP, SP RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go index 30e2fcd58..5fa1b3284 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !go1.7,amd64,!gccgo,!appengine +//go:build !go1.7 && amd64 && gc && !purego +// +build !go1.7,amd64,gc,!purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index 578e947b3..ae75eb9af 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,!gccgo,!appengine +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego #include "textflag.h" @@ -118,15 +119,13 @@ TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment MOVQ blocks_base+24(FP), SI MOVQ blocks_len+32(FP), DI - MOVQ SP, BP - MOVQ SP, R9 - ADDQ $15, R9 - ANDQ $~15, R9 - MOVQ R9, SP + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(SP) - XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0) + MOVO X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) MOVOU ·c40<>(SB), X13 MOVOU ·c48<>(SB), X14 @@ -156,35 +155,35 @@ noinc: MOVOU ·iv2<>(SB), X6 PXOR X8, X6 - MOVO 0(SP), X7 + MOVO 0(R10), X7 LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(SP) - MOVO X9, 32(SP) - MOVO X10, 48(SP) - MOVO X11, 64(SP) + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(SP) - MOVO X9, 96(SP) - MOVO X10, 112(SP) - MOVO X11, 128(SP) + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(SP) - MOVO X9, 160(SP) - MOVO X10, 176(SP) - MOVO X11, 192(SP) + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(SP) - MOVO X9, 224(SP) - MOVO X10, 240(SP) - MOVO X11, 256(SP) + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) @@ -244,14 +243,14 @@ noinc: HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) MOVOU 32(AX), X10 @@ -277,5 +276,4 @@ noinc: MOVQ R8, 0(BX) MOVQ R9, 8(BX) - MOVQ BP, SP RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go index da156a1ba..b0137cdf0 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine gccgo +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go index efd689af4..9d8633963 100644 --- a/vendor/golang.org/x/crypto/blake2b/register.go +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.9 // +build go1.9 package blake2b diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index e7de24ee6..c79aa73f2 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -137,11 +137,13 @@ func trimOWS(x string) string { // contains token amongst its comma-separated tokens, ASCII // case-insensitively. func headerValueContainsToken(v string, token string) bool { - v = trimOWS(v) - if comma := strings.IndexByte(v, ','); comma != -1 { - return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') { + if tokenEqual(trimOWS(v[:comma]), token) { + return true + } + v = v[comma+1:] } - return tokenEqual(v, token) + return tokenEqual(trimOWS(v), token) } // lowerASCII returns the ASCII lowercase version of b. diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore new file mode 100644 index 000000000..190f12234 --- /dev/null +++ b/vendor/golang.org/x/net/http2/.gitignore @@ -0,0 +1,2 @@ +*~ +h2i/h2i diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile new file mode 100644 index 000000000..851224595 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Dockerfile @@ -0,0 +1,51 @@ +# +# This Dockerfile builds a recent curl with HTTP/2 client support, using +# a recent nghttp2 build. +# +# See the Makefile for how to tag it. If Docker and that image is found, the +# Go tests use this curl binary for integration tests. +# + +FROM ubuntu:trusty + +RUN apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y git-core build-essential wget + +RUN apt-get install -y --no-install-recommends \ + autotools-dev libtool pkg-config zlib1g-dev \ + libcunit1-dev libssl-dev libxml2-dev libevent-dev \ + automake autoconf + +# The list of packages nghttp2 recommends for h2load: +RUN apt-get install -y --no-install-recommends make binutils \ + autoconf automake autotools-dev \ + libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ + libev-dev libevent-dev libjansson-dev libjemalloc-dev \ + cython python3.4-dev python-setuptools + +# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: +ENV NGHTTP2_VER 895da9a +RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git + +WORKDIR /root/nghttp2 +RUN git reset --hard $NGHTTP2_VER +RUN autoreconf -i +RUN automake +RUN autoconf +RUN ./configure +RUN make +RUN make install + +WORKDIR /root +RUN wget https://curl.se/download/curl-7.45.0.tar.gz +RUN tar -zxvf curl-7.45.0.tar.gz +WORKDIR /root/curl-7.45.0 +RUN ./configure --with-ssl --with-nghttp2=/usr/local +RUN make +RUN make install +RUN ldconfig + +CMD ["-h"] +ENTRYPOINT ["/usr/local/bin/curl"] + diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile new file mode 100644 index 000000000..55fd826f7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Makefile @@ -0,0 +1,3 @@ +curlimage: + docker build -t gohttp2/curl . + diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README new file mode 100644 index 000000000..360d5aa37 --- /dev/null +++ b/vendor/golang.org/x/net/http2/README @@ -0,0 +1,20 @@ +This is a work-in-progress HTTP/2 implementation for Go. + +It will eventually live in the Go standard library and won't require +any changes to your code to use. It will just be automatic. + +Status: + +* The server support is pretty good. A few things are missing + but are being worked on. +* The client work has just started but shares a lot of code + is coming along much quicker. + +Docs are at https://godoc.org/golang.org/x/net/http2 + +Demo test server at https://http2.golang.org/ + +Help & bug reports welcome! + +Contributing: https://golang.org/doc/contribute.html +Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go new file mode 100644 index 000000000..c9a0cf3b4 --- /dev/null +++ b/vendor/golang.org/x/net/http2/ciphers.go @@ -0,0 +1,641 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +// A list of the possible cipher suite ids. Taken from +// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt + +const ( + cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 + cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 + cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 + cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 + cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 + cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 + cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 + cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 + cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B + // Reserved uint16 = 0x001C-1D + cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F + cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 + cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 + cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B + cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C + cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D + cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E + cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 + cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A + cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 + // Reserved uint16 = 0x0047-4F + // Reserved uint16 = 0x0050-58 + // Reserved uint16 = 0x0059-5C + // Unassigned uint16 = 0x005D-5F + // Reserved uint16 = 0x0060-66 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D + // Unassigned uint16 = 0x006E-83 + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 + cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B + cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C + cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 + cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D + cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E + cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 + cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 + cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 + cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA + cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF + cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 + cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 + cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 + cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 + cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 + cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 + // Unassigned uint16 = 0x00C6-FE + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF + // Unassigned uint16 = 0x01-55,* + cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 + // Unassigned uint16 = 0x5601 - 0xC000 + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A + cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 + cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E + cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F + cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 + cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 + cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 + cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 + cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B + cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C + cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B + cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C + cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D + cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E + cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F + cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 + cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 + cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 + cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 + cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 + cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 + cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 + cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 + cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 + cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 + cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA + cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF + // Unassigned uint16 = 0xC0B0-FF + // Unassigned uint16 = 0xC1-CB,* + // Unassigned uint16 = 0xCC00-A7 + cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 + cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 + cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA + cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB + cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC + cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD + cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE +) + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +// References: +// https://tools.ietf.org/html/rfc7540#appendix-A +// Reject cipher suites from Appendix A. +// "This list includes those cipher suites that do not +// offer an ephemeral key exchange and those that are +// based on the TLS null, stream or block cipher type" +func isBadCipher(cipher uint16) bool { + switch cipher { + case cipher_TLS_NULL_WITH_NULL_NULL, + cipher_TLS_RSA_WITH_NULL_MD5, + cipher_TLS_RSA_WITH_NULL_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_RSA_WITH_RC4_128_MD5, + cipher_TLS_RSA_WITH_RC4_128_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_RSA_WITH_IDEA_CBC_SHA, + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_RSA_WITH_DES_CBC_SHA, + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_DH_anon_WITH_RC4_128_MD5, + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_anon_WITH_DES_CBC_SHA, + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_SHA, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_RC4_128_SHA, + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_MD5, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, + cipher_TLS_KRB5_WITH_RC4_128_MD5, + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_PSK_WITH_NULL_SHA, + cipher_TLS_DHE_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_NULL_SHA256, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_PSK_WITH_RC4_128_SHA, + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_PSK_WITH_NULL_SHA256, + cipher_TLS_PSK_WITH_NULL_SHA384, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_NULL_SHA256, + cipher_TLS_DHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_NULL_SHA256, + cipher_TLS_RSA_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_NULL_SHA, + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_NULL_SHA, + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_AES_128_CCM, + cipher_TLS_RSA_WITH_AES_256_CCM, + cipher_TLS_RSA_WITH_AES_128_CCM_8, + cipher_TLS_RSA_WITH_AES_256_CCM_8, + cipher_TLS_PSK_WITH_AES_128_CCM, + cipher_TLS_PSK_WITH_AES_256_CCM, + cipher_TLS_PSK_WITH_AES_128_CCM_8, + cipher_TLS_PSK_WITH_AES_256_CCM_8: + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go new file mode 100644 index 000000000..3a67636fe --- /dev/null +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -0,0 +1,278 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code's client connection pooling. + +package http2 + +import ( + "crypto/tls" + "net/http" + "sync" +) + +// ClientConnPool manages a pool of HTTP/2 client connections. +type ClientConnPool interface { + GetClientConn(req *http.Request, addr string) (*ClientConn, error) + MarkDead(*ClientConn) +} + +// clientConnPoolIdleCloser is the interface implemented by ClientConnPool +// implementations which can close their idle connections. +type clientConnPoolIdleCloser interface { + ClientConnPool + closeIdleConnections() +} + +var ( + _ clientConnPoolIdleCloser = (*clientConnPool)(nil) + _ clientConnPoolIdleCloser = noDialClientConnPool{} +) + +// TODO: use singleflight for dialing and addConnCalls? +type clientConnPool struct { + t *Transport + + mu sync.Mutex // TODO: maybe switch to RWMutex + // TODO: add support for sharing conns based on cert names + // (e.g. share conn for googleapis.com and appspot.com) + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls +} + +func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, dialOnMiss) +} + +const ( + dialOnMiss = true + noDialOnMiss = false +) + +// shouldTraceGetConn reports whether getClientConn should call any +// ClientTrace.GetConn hook associated with the http.Request. +// +// This complexity is needed to avoid double calls of the GetConn hook +// during the back-and-forth between net/http and x/net/http2 (when the +// net/http.Transport is upgraded to also speak http2), as well as support +// the case where x/net/http2 is being used directly. +func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool { + // If our Transport wasn't made via ConfigureTransport, always + // trace the GetConn hook if provided, because that means the + // http2 package is being used directly and it's the one + // dialing, as opposed to net/http. + if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok { + return true + } + // Otherwise, only use the GetConn hook if this connection has + // been used previously for other requests. For fresh + // connections, the net/http package does the dialing. + return !st.freshConn +} + +func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + if isConnectionCloseRequest(req) && dialOnMiss { + // It gets its own connection. + traceGetConn(req, addr) + const singleUse = true + cc, err := p.t.dialClientConn(addr, singleUse) + if err != nil { + return nil, err + } + return cc, nil + } + p.mu.Lock() + for _, cc := range p.conns[addr] { + if st := cc.idleState(); st.canTakeNewRequest { + if p.shouldTraceGetConn(st) { + traceGetConn(req, addr) + } + p.mu.Unlock() + return cc, nil + } + } + if !dialOnMiss { + p.mu.Unlock() + return nil, ErrNoCachedConn + } + traceGetConn(req, addr) + call := p.getStartDialLocked(addr) + p.mu.Unlock() + <-call.done + return call.res, call.err +} + +// dialCall is an in-flight Transport dial call to a host. +type dialCall struct { + _ incomparable + p *clientConnPool + done chan struct{} // closed when done + res *ClientConn // valid after done is closed + err error // valid after done is closed +} + +// requires p.mu is held. +func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { + if call, ok := p.dialing[addr]; ok { + // A dial is already in-flight. Don't start another. + return call + } + call := &dialCall{p: p, done: make(chan struct{})} + if p.dialing == nil { + p.dialing = make(map[string]*dialCall) + } + p.dialing[addr] = call + go call.dial(addr) + return call +} + +// run in its own goroutine. +func (c *dialCall) dial(addr string) { + const singleUse = false // shared conn + c.res, c.err = c.p.t.dialClientConn(addr, singleUse) + close(c.done) + + c.p.mu.Lock() + delete(c.p.dialing, addr) + if c.err == nil { + c.p.addConnLocked(addr, c.res) + } + c.p.mu.Unlock() +} + +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + _ incomparable + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + +// p.mu must be held +func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { + for _, v := range p.conns[key] { + if v == cc { + return + } + } + if p.conns == nil { + p.conns = make(map[string][]*ClientConn) + } + if p.keys == nil { + p.keys = make(map[*ClientConn][]string) + } + p.conns[key] = append(p.conns[key], cc) + p.keys[cc] = append(p.keys[cc], key) +} + +func (p *clientConnPool) MarkDead(cc *ClientConn) { + p.mu.Lock() + defer p.mu.Unlock() + for _, key := range p.keys[cc] { + vv, ok := p.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + p.conns[key] = newList + } else { + delete(p.conns, key) + } + } + delete(p.keys, cc) +} + +func (p *clientConnPool) closeIdleConnections() { + p.mu.Lock() + defer p.mu.Unlock() + // TODO: don't close a cc if it was just added to the pool + // milliseconds ago and has never been used. There's currently + // a small race window with the HTTP/1 Transport's integration + // where it can add an idle conn just before using it, and + // somebody else can concurrently call CloseIdleConns and + // break some caller's RoundTrip. + for _, vv := range p.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + // If we filtered it out, zero out the last item to prevent + // the GC from seeing it. + if len(in) != len(out) { + in[len(in)-1] = nil + } + return out +} + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 000000000..a3067f8de --- /dev/null +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -0,0 +1,146 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) + +func getDataBufferChunk(size int64) []byte { + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } + } + return dataChunkPools[i].Get().([]byte) +} + +func putDataBufferChunk(p []byte) { + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } + } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go new file mode 100644 index 000000000..71f2c4631 --- /dev/null +++ b/vendor/golang.org/x/net/http2/errors.go @@ -0,0 +1,133 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" +) + +// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. +type ErrCode uint32 + +const ( + ErrCodeNo ErrCode = 0x0 + ErrCodeProtocol ErrCode = 0x1 + ErrCodeInternal ErrCode = 0x2 + ErrCodeFlowControl ErrCode = 0x3 + ErrCodeSettingsTimeout ErrCode = 0x4 + ErrCodeStreamClosed ErrCode = 0x5 + ErrCodeFrameSize ErrCode = 0x6 + ErrCodeRefusedStream ErrCode = 0x7 + ErrCodeCancel ErrCode = 0x8 + ErrCodeCompression ErrCode = 0x9 + ErrCodeConnect ErrCode = 0xa + ErrCodeEnhanceYourCalm ErrCode = 0xb + ErrCodeInadequateSecurity ErrCode = 0xc + ErrCodeHTTP11Required ErrCode = 0xd +) + +var errCodeName = map[ErrCode]string{ + ErrCodeNo: "NO_ERROR", + ErrCodeProtocol: "PROTOCOL_ERROR", + ErrCodeInternal: "INTERNAL_ERROR", + ErrCodeFlowControl: "FLOW_CONTROL_ERROR", + ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", + ErrCodeStreamClosed: "STREAM_CLOSED", + ErrCodeFrameSize: "FRAME_SIZE_ERROR", + ErrCodeRefusedStream: "REFUSED_STREAM", + ErrCodeCancel: "CANCEL", + ErrCodeCompression: "COMPRESSION_ERROR", + ErrCodeConnect: "CONNECT_ERROR", + ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", + ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", +} + +func (e ErrCode) String() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("unknown error code 0x%x", uint32(e)) +} + +// ConnectionError is an error that results in the termination of the +// entire connection. +type ConnectionError ErrCode + +func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } + +// StreamError is an error that only affects one stream within an +// HTTP/2 connection. +type StreamError struct { + StreamID uint32 + Code ErrCode + Cause error // optional additional detail +} + +func streamError(id uint32, code ErrCode) StreamError { + return StreamError{StreamID: id, Code: code} +} + +func (e StreamError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) + } + return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) +} + +// 6.9.1 The Flow Control Window +// "If a sender receives a WINDOW_UPDATE that causes a flow control +// window to exceed this maximum it MUST terminate either the stream +// or the connection, as appropriate. For streams, [...]; for the +// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." +type goAwayFlowError struct{} + +func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } + +// connError represents an HTTP/2 ConnectionError error code, along +// with a string (for debugging) explaining why. +// +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(Code), after stashing away +// the Reason into the Framer's errDetail field, accessible via +// the (*Framer).ErrorDetail method. +type connError struct { + Code ErrCode // the ConnectionError error code + Reason string // additional reason +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go new file mode 100644 index 000000000..b51f0e0cf --- /dev/null +++ b/vendor/golang.org/x/net/http2/flow.go @@ -0,0 +1,52 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Flow control + +package http2 + +// flow is the flow control window's size. +type flow struct { + _ incomparable + + // n is the number of DATA bytes we're allowed to send. + // A flow is kept both on a conn and a per-stream. + n int32 + + // conn points to the shared connection-level flow that is + // shared by all streams on that conn. It is nil for the flow + // that's on the conn directly. + conn *flow +} + +func (f *flow) setConnFlow(cf *flow) { f.conn = cf } + +func (f *flow) available() int32 { + n := f.n + if f.conn != nil && f.conn.n < n { + n = f.conn.n + } + return n +} + +func (f *flow) take(n int32) { + if n > f.available() { + panic("internal error: took too much") + } + f.n -= n + if f.conn != nil { + f.conn.n -= n + } +} + +// add adds n bytes (positive or negative) to the flow control window. +// It returns false if the sum would exceed 2^31-1. +func (f *flow) add(n int32) bool { + sum := f.n + n + if (sum > n) == (f.n > 0) { + f.n = sum + return true + } + return false +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go new file mode 100644 index 000000000..514c126c5 --- /dev/null +++ b/vendor/golang.org/x/net/http2/frame.go @@ -0,0 +1,1614 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +const frameHeaderLen = 9 + +var padZeros = make([]byte, 255) // zeros for padding + +// A FrameType is a registered frame type as defined in +// http://http2.github.io/http2-spec/#rfc.section.11.2 +type FrameType uint8 + +const ( + FrameData FrameType = 0x0 + FrameHeaders FrameType = 0x1 + FramePriority FrameType = 0x2 + FrameRSTStream FrameType = 0x3 + FrameSettings FrameType = 0x4 + FramePushPromise FrameType = 0x5 + FramePing FrameType = 0x6 + FrameGoAway FrameType = 0x7 + FrameWindowUpdate FrameType = 0x8 + FrameContinuation FrameType = 0x9 +) + +var frameName = map[FrameType]string{ + FrameData: "DATA", + FrameHeaders: "HEADERS", + FramePriority: "PRIORITY", + FrameRSTStream: "RST_STREAM", + FrameSettings: "SETTINGS", + FramePushPromise: "PUSH_PROMISE", + FramePing: "PING", + FrameGoAway: "GOAWAY", + FrameWindowUpdate: "WINDOW_UPDATE", + FrameContinuation: "CONTINUATION", +} + +func (t FrameType) String() string { + if s, ok := frameName[t]; ok { + return s + } + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) +} + +// Flags is a bitmask of HTTP/2 flags. +// The meaning of flags varies depending on the frame type. +type Flags uint8 + +// Has reports whether f contains all (0 or more) flags in v. +func (f Flags) Has(v Flags) bool { + return (f & v) == v +} + +// Frame-specific FrameHeader flag bits. +const ( + // Data Frame + FlagDataEndStream Flags = 0x1 + FlagDataPadded Flags = 0x8 + + // Headers Frame + FlagHeadersEndStream Flags = 0x1 + FlagHeadersEndHeaders Flags = 0x4 + FlagHeadersPadded Flags = 0x8 + FlagHeadersPriority Flags = 0x20 + + // Settings Frame + FlagSettingsAck Flags = 0x1 + + // Ping Frame + FlagPingAck Flags = 0x1 + + // Continuation Frame + FlagContinuationEndHeaders Flags = 0x4 + + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 +) + +var flagName = map[FrameType]map[Flags]string{ + FrameData: { + FlagDataEndStream: "END_STREAM", + FlagDataPadded: "PADDED", + }, + FrameHeaders: { + FlagHeadersEndStream: "END_STREAM", + FlagHeadersEndHeaders: "END_HEADERS", + FlagHeadersPadded: "PADDED", + FlagHeadersPriority: "PRIORITY", + }, + FrameSettings: { + FlagSettingsAck: "ACK", + }, + FramePing: { + FlagPingAck: "ACK", + }, + FrameContinuation: { + FlagContinuationEndHeaders: "END_HEADERS", + }, + FramePushPromise: { + FlagPushPromiseEndHeaders: "END_HEADERS", + FlagPushPromisePadded: "PADDED", + }, +} + +// a frameParser parses a frame given its FrameHeader and payload +// bytes. The length of payload will always equal fh.Length (which +// might be 0). +type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) + +var frameParsers = map[FrameType]frameParser{ + FrameData: parseDataFrame, + FrameHeaders: parseHeadersFrame, + FramePriority: parsePriorityFrame, + FrameRSTStream: parseRSTStreamFrame, + FrameSettings: parseSettingsFrame, + FramePushPromise: parsePushPromise, + FramePing: parsePingFrame, + FrameGoAway: parseGoAwayFrame, + FrameWindowUpdate: parseWindowUpdateFrame, + FrameContinuation: parseContinuationFrame, +} + +func typeFrameParser(t FrameType) frameParser { + if f := frameParsers[t]; f != nil { + return f + } + return parseUnknownFrame +} + +// A FrameHeader is the 9 byte header of all HTTP/2 frames. +// +// See http://http2.github.io/http2-spec/#FrameHeader +type FrameHeader struct { + valid bool // caller can access []byte fields in the Frame + + // Type is the 1 byte frame type. There are ten standard frame + // types, but extension frame types may be written by WriteRawFrame + // and will be returned by ReadFrame (as UnknownFrame). + Type FrameType + + // Flags are the 1 byte of 8 potential bit flags per frame. + // They are specific to the frame type. + Flags Flags + + // Length is the length of the frame, not including the 9 byte header. + // The maximum size is one byte less than 16MB (uint24), but only + // frames up to 16KB are allowed without peer agreement. + Length uint32 + + // StreamID is which stream this frame is for. Certain frames + // are not stream-specific, in which case this field is 0. + StreamID uint32 +} + +// Header returns h. It exists so FrameHeaders can be embedded in other +// specific frame types and implement the Frame interface. +func (h FrameHeader) Header() FrameHeader { return h } + +func (h FrameHeader) String() string { + var buf bytes.Buffer + buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { + buf.WriteString(h.Type.String()) + if h.Flags != 0 { + buf.WriteString(" flags=") + set := 0 + for i := uint8(0); i < 8; i++ { + if h.Flags&(1< 1 { + buf.WriteByte('|') + } + name := flagName[h.Type][Flags(1<>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) +} + +func (f *Framer) endWrite() error { + // Now that we know the final size, fill in the FrameHeader in + // the space previously reserved for it. Abuse append. + length := len(f.wbuf) - frameHeaderLen + if length >= (1 << 24) { + return ErrFrameTooLarge + } + _ = append(f.wbuf[:0], + byte(length>>16), + byte(length>>8), + byte(length)) + if f.logWrites { + f.logWrite() + } + + n, err := f.w.Write(f.wbuf) + if err == nil && n != len(f.wbuf) { + err = io.ErrShortWrite + } + return err +} + +func (f *Framer) logWrite() { + if f.debugFramer == nil { + f.debugFramerBuf = new(bytes.Buffer) + f.debugFramer = NewFramer(nil, f.debugFramerBuf) + f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + f.debugFramer.AllowIllegalReads = true + } + f.debugFramerBuf.Write(f.wbuf) + fr, err := f.debugFramer.ReadFrame() + if err != nil { + f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) + return + } + f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) +} + +func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } +func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } +func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } +func (f *Framer) writeUint32(v uint32) { + f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +const ( + minMaxFrameSize = 1 << 14 + maxFrameSize = 1<<24 - 1 +) + +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (fr *Framer) SetReuseFrames() { + if fr.frameCache != nil { + return + } + fr.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + +// NewFramer returns a Framer that writes frames to w and reads them from r. +func NewFramer(w io.Writer, r io.Reader) *Framer { + fr := &Framer{ + w: w, + r: r, + logReads: logFrameReads, + logWrites: logFrameWrites, + debugReadLoggerf: log.Printf, + debugWriteLoggerf: log.Printf, + } + fr.getReadBuf = func(size uint32) []byte { + if cap(fr.readBuf) >= int(size) { + return fr.readBuf[:size] + } + fr.readBuf = make([]byte, size) + return fr.readBuf + } + fr.SetMaxReadFrameSize(maxFrameSize) + return fr +} + +// SetMaxReadFrameSize sets the maximum size of a frame +// that will be read by a subsequent call to ReadFrame. +// It is the caller's responsibility to advertise this +// limit with a SETTINGS frame. +func (fr *Framer) SetMaxReadFrameSize(v uint32) { + if v > maxFrameSize { + v = maxFrameSize + } + fr.maxReadSize = v +} + +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + +// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer +// sends a frame that is larger than declared with SetMaxReadFrameSize. +var ErrFrameTooLarge = errors.New("http2: frame too large") + +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil + if fr.lastFrame != nil { + fr.lastFrame.invalidate() + } + fh, err := readFrameHeader(fr.headerBuf[:], fr.r) + if err != nil { + return nil, err + } + if fh.Length > fr.maxReadSize { + return nil, ErrFrameTooLarge + } + payload := fr.getReadBuf(fh.Length) + if _, err := io.ReadFull(fr.r, payload); err != nil { + return nil, err + } + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) + if err != nil { + if ce, ok := err.(connError); ok { + return nil, fr.connError(ce.Code, ce.Reason) + } + return nil, err + } + if err := fr.checkFrameOrder(f); err != nil { + return nil, err + } + if fr.logReads { + fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } + return f, nil +} + +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (fr *Framer) connError(code ErrCode, reason string) error { + fr.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (fr *Framer) checkFrameOrder(f Frame) error { + last := fr.lastFrame + fr.lastFrame = f + if fr.AllowIllegalReads { + return nil + } + + fh := f.Header() + if fr.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, fr.lastHeaderStream)) + } + if fh.StreamID != fr.lastHeaderStream { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, fr.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + fr.lastHeaderStream = 0 + } else { + fr.lastHeaderStream = fh.StreamID + } + } + + return nil +} + +// A DataFrame conveys arbitrary, variable-length sequences of octets +// associated with a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.1 +type DataFrame struct { + FrameHeader + data []byte +} + +func (f *DataFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +// Data returns the frame's data octets, not including any padding +// size byte or padding suffix bytes. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *DataFrame) Data() []byte { + f.checkValid() + return f.data +} + +func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} + } + f := fc.getDataFrame() + f.FrameHeader = fh + + var padSize byte + if fh.Flags.Has(FlagDataPadded) { + var err error + payload, padSize, err = readByte(payload) + if err != nil { + return nil, err + } + } + if int(padSize) > len(payload) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} + } + f.data = payload[:len(payload)-int(padSize)] + return f, nil +} + +var ( + errStreamID = errors.New("invalid stream ID") + errDepStreamID = errors.New("invalid dependent stream ID") + errPadLength = errors.New("pad length too large") + errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") +) + +func validStreamIDOrZero(streamID uint32) bool { + return streamID&(1<<31) == 0 +} + +func validStreamID(streamID uint32) bool { + return streamID != 0 && streamID&(1<<31) == 0 +} + +// WriteData writes a DATA frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { + return f.WriteDataPadded(streamID, endStream, data, nil) +} + +// WriteDataPadded writes a DATA frame with optional padding. +// +// If pad is nil, the padding bit is not sent. +// The length of pad must not exceed 255 bytes. +// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if len(pad) > 0 { + if len(pad) > 255 { + return errPadLength + } + if !f.AllowIllegalWrites { + for _, b := range pad { + if b != 0 { + // "Padding octets MUST be set to zero when sending." + return errPadBytes + } + } + } + } + var flags Flags + if endStream { + flags |= FlagDataEndStream + } + if pad != nil { + flags |= FlagDataPadded + } + f.startWrite(FrameData, flags, streamID) + if pad != nil { + f.wbuf = append(f.wbuf, byte(len(pad))) + } + f.wbuf = append(f.wbuf, data...) + f.wbuf = append(f.wbuf, pad...) + return f.endWrite() +} + +// A SettingsFrame conveys configuration parameters that affect how +// endpoints communicate, such as preferences and constraints on peer +// behavior. +// +// See http://http2.github.io/http2-spec/#SETTINGS +type SettingsFrame struct { + FrameHeader + p []byte +} + +func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { + // When this (ACK 0x1) bit is set, the payload of the + // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame with the ACK flag set and a length + // field value other than 0 MUST be treated as a + // connection error (Section 5.4.1) of type + // FRAME_SIZE_ERROR. + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + // SETTINGS frames always apply to a connection, + // never a single stream. The stream identifier for a + // SETTINGS frame MUST be zero (0x0). If an endpoint + // receives a SETTINGS frame whose stream identifier + // field is anything other than 0x0, the endpoint MUST + // respond with a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p)%6 != 0 { + // Expecting even number of 6 byte settings. + return nil, ConnectionError(ErrCodeFrameSize) + } + f := &SettingsFrame{FrameHeader: fh, p: p} + if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + // Values above the maximum flow control window size of 2^31 - 1 MUST + // be treated as a connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + return nil, ConnectionError(ErrCodeFlowControl) + } + return f, nil +} + +func (f *SettingsFrame) IsAck() bool { + return f.FrameHeader.Flags.Has(FlagSettingsAck) +} + +func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) { + f.checkValid() + for i := 0; i < f.NumSettings(); i++ { + if s := f.Setting(i); s.ID == id { + return s.Val, true + } + } + return 0, false +} + +// Setting returns the setting from the frame at the given 0-based index. +// The index must be >= 0 and less than f.NumSettings(). +func (f *SettingsFrame) Setting(i int) Setting { + buf := f.p + return Setting{ + ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])), + Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]), + } +} + +func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 } + +// HasDuplicates reports whether f contains any duplicate setting IDs. +func (f *SettingsFrame) HasDuplicates() bool { + num := f.NumSettings() + if num == 0 { + return false + } + // If it's small enough (the common case), just do the n^2 + // thing and avoid a map allocation. + if num < 10 { + for i := 0; i < num; i++ { + idi := f.Setting(i).ID + for j := i + 1; j < num; j++ { + idj := f.Setting(j).ID + if idi == idj { + return true + } + } + } + return false + } + seen := map[SettingID]bool{} + for i := 0; i < num; i++ { + id := f.Setting(i).ID + if seen[id] { + return true + } + seen[id] = true + } + return false +} + +// ForeachSetting runs fn for each setting. +// It stops and returns the first error. +func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { + f.checkValid() + for i := 0; i < f.NumSettings(); i++ { + if err := fn(f.Setting(i)); err != nil { + return err + } + } + return nil +} + +// WriteSettings writes a SETTINGS frame with zero or more settings +// specified and the ACK bit not set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettings(settings ...Setting) error { + f.startWrite(FrameSettings, 0, 0) + for _, s := range settings { + f.writeUint16(uint16(s.ID)) + f.writeUint32(s.Val) + } + return f.endWrite() +} + +// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettingsAck() error { + f.startWrite(FrameSettings, FlagSettingsAck, 0) + return f.endWrite() +} + +// A PingFrame is a mechanism for measuring a minimal round trip time +// from the sender, as well as determining whether an idle connection +// is still functional. +// See http://http2.github.io/http2-spec/#rfc.section.6.7 +type PingFrame struct { + FrameHeader + Data [8]byte +} + +func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } + +func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if len(payload) != 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + f := &PingFrame{FrameHeader: fh} + copy(f.Data[:], payload) + return f, nil +} + +func (f *Framer) WritePing(ack bool, data [8]byte) error { + var flags Flags + if ack { + flags = FlagPingAck + } + f.startWrite(FramePing, flags, 0) + f.writeBytes(data[:]) + return f.endWrite() +} + +// A GoAwayFrame informs the remote peer to stop creating streams on this connection. +// See http://http2.github.io/http2-spec/#rfc.section.6.8 +type GoAwayFrame struct { + FrameHeader + LastStreamID uint32 + ErrCode ErrCode + debugData []byte +} + +// DebugData returns any debug data in the GOAWAY frame. Its contents +// are not defined. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *GoAwayFrame) DebugData() []byte { + f.checkValid() + return f.debugData +} + +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p) < 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + return &GoAwayFrame{ + FrameHeader: fh, + LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), + ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), + debugData: p[8:], + }, nil +} + +func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { + f.startWrite(FrameGoAway, 0, 0) + f.writeUint32(maxStreamID & (1<<31 - 1)) + f.writeUint32(uint32(code)) + f.writeBytes(debugData) + return f.endWrite() +} + +// An UnknownFrame is the frame type returned when the frame type is unknown +// or no specific frame type parser exists. +type UnknownFrame struct { + FrameHeader + p []byte +} + +// Payload returns the frame's payload (after the header). It is not +// valid to call this method after a subsequent call to +// Framer.ReadFrame, nor is it valid to retain the returned slice. +// The memory is owned by the Framer and is invalidated when the next +// frame is read. +func (f *UnknownFrame) Payload() []byte { + f.checkValid() + return f.p +} + +func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + return &UnknownFrame{fh, p}, nil +} + +// A WindowUpdateFrame is used to implement flow control. +// See http://http2.github.io/http2-spec/#rfc.section.6.9 +type WindowUpdateFrame struct { + FrameHeader + Increment uint32 // never read with high bit set +} + +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit + if inc == 0 { + // A receiver MUST treat the receipt of a + // WINDOW_UPDATE frame with an flow control window + // increment of 0 as a stream error (Section 5.4.2) of + // type PROTOCOL_ERROR; errors on the connection flow + // control window MUST be treated as a connection + // error (Section 5.4.1). + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + return &WindowUpdateFrame{ + FrameHeader: fh, + Increment: inc, + }, nil +} + +// WriteWindowUpdate writes a WINDOW_UPDATE frame. +// The increment value must be between 1 and 2,147,483,647, inclusive. +// If the Stream ID is zero, the window update applies to the +// connection as a whole. +func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { + // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." + if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { + return errors.New("illegal window increment value") + } + f.startWrite(FrameWindowUpdate, 0, streamID) + f.writeUint32(incr) + return f.endWrite() +} + +// A HeadersFrame is used to open a stream and additionally carries a +// header block fragment. +type HeadersFrame struct { + FrameHeader + + // Priority is set if FlagHeadersPriority is set in the FrameHeader. + Priority PriorityParam + + headerFragBuf []byte // not owned +} + +func (f *HeadersFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *HeadersFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) +} + +func (f *HeadersFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndStream) +} + +func (f *HeadersFrame) HasPriority() bool { + return f.FrameHeader.Flags.Has(FlagHeadersPriority) +} + +func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + hf := &HeadersFrame{ + FrameHeader: fh, + } + if fh.StreamID == 0 { + // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // is received whose stream identifier field is 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} + } + var padLength uint8 + if fh.Flags.Has(FlagHeadersPadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + if fh.Flags.Has(FlagHeadersPriority) { + var v uint32 + p, v, err = readUint32(p) + if err != nil { + return nil, err + } + hf.Priority.StreamDep = v & 0x7fffffff + hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set + p, hf.Priority.Weight, err = readByte(p) + if err != nil { + return nil, err + } + } + if len(p)-int(padLength) <= 0 { + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + hf.headerFragBuf = p[:len(p)-int(padLength)] + return hf, nil +} + +// HeadersFrameParam are the parameters for writing a HEADERS frame. +type HeadersFrameParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndStream indicates that the header block is the last that + // the endpoint will send for the identified stream. Setting + // this flag causes the stream to enter one of "half closed" + // states. + EndStream bool + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 + + // Priority, if non-zero, includes stream priority information + // in the HEADER frame. + Priority PriorityParam +} + +// WriteHeaders writes a single HEADERS frame. +// +// This is a low-level header writing method. Encoding headers and +// splitting them into any necessary CONTINUATION frames is handled +// elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteHeaders(p HeadersFrameParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagHeadersPadded + } + if p.EndStream { + flags |= FlagHeadersEndStream + } + if p.EndHeaders { + flags |= FlagHeadersEndHeaders + } + if !p.Priority.IsZero() { + flags |= FlagHeadersPriority + } + f.startWrite(FrameHeaders, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !p.Priority.IsZero() { + v := p.Priority.StreamDep + if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { + return errDepStreamID + } + if p.Priority.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Priority.Weight) + } + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// A PriorityFrame specifies the sender-advised priority of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.3 +type PriorityFrame struct { + FrameHeader + PriorityParam +} + +// PriorityParam are the stream prioritzation parameters. +type PriorityParam struct { + // StreamDep is a 31-bit stream identifier for the + // stream that this stream depends on. Zero means no + // dependency. + StreamDep uint32 + + // Exclusive is whether the dependency is exclusive. + Exclusive bool + + // Weight is the stream's zero-indexed weight. It should be + // set together with StreamDep, or neither should be set. Per + // the spec, "Add one to the value to obtain a weight between + // 1 and 256." + Weight uint8 +} + +func (p PriorityParam) IsZero() bool { + return p == PriorityParam{} +} + +func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} + } + if len(payload) != 5 { + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} + } + v := binary.BigEndian.Uint32(payload[:4]) + streamID := v & 0x7fffffff // mask off high bit + return &PriorityFrame{ + FrameHeader: fh, + PriorityParam: PriorityParam{ + Weight: payload[4], + StreamDep: streamID, + Exclusive: streamID != v, // was high bit set? + }, + }, nil +} + +// WritePriority writes a PRIORITY frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if !validStreamIDOrZero(p.StreamDep) { + return errDepStreamID + } + f.startWrite(FramePriority, 0, streamID) + v := p.StreamDep + if p.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Weight) + return f.endWrite() +} + +// A RSTStreamFrame allows for abnormal termination of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.4 +type RSTStreamFrame struct { + FrameHeader + ErrCode ErrCode +} + +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil +} + +// WriteRSTStream writes a RST_STREAM frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FrameRSTStream, 0, streamID) + f.writeUint32(uint32(code)) + return f.endWrite() +} + +// A ContinuationFrame is used to continue a sequence of header block fragments. +// See http://http2.github.io/http2-spec/#rfc.section.6.10 +type ContinuationFrame struct { + FrameHeader + headerFragBuf []byte +} + +func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } + return &ContinuationFrame{fh, p}, nil +} + +func (f *ContinuationFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *ContinuationFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) +} + +// WriteContinuation writes a CONTINUATION frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endHeaders { + flags |= FlagContinuationEndHeaders + } + f.startWrite(FrameContinuation, flags, streamID) + f.wbuf = append(f.wbuf, headerBlockFragment...) + return f.endWrite() +} + +// A PushPromiseFrame is used to initiate a server stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.6 +type PushPromiseFrame struct { + FrameHeader + PromiseID uint32 + headerFragBuf []byte // not owned +} + +func (f *PushPromiseFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *PushPromiseFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) +} + +func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + pp := &PushPromiseFrame{ + FrameHeader: fh, + } + if pp.StreamID == 0 { + // PUSH_PROMISE frames MUST be associated with an existing, + // peer-initiated stream. The stream identifier of a + // PUSH_PROMISE frame indicates the stream it is associated + // with. If the stream identifier field specifies the value + // 0x0, a recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + // The PUSH_PROMISE frame includes optional padding. + // Padding fields and flags are identical to those defined for DATA frames + var padLength uint8 + if fh.Flags.Has(FlagPushPromisePadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + + p, pp.PromiseID, err = readUint32(p) + if err != nil { + return + } + pp.PromiseID = pp.PromiseID & (1<<31 - 1) + + if int(padLength) > len(p) { + // like the DATA frame, error out if padding is longer than the body. + return nil, ConnectionError(ErrCodeProtocol) + } + pp.headerFragBuf = p[:len(p)-int(padLength)] + return pp, nil +} + +// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. +type PushPromiseParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + + // PromiseID is the required Stream ID which this + // Push Promises + PromiseID uint32 + + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 +} + +// WritePushPromise writes a single PushPromise Frame. +// +// As with Header Frames, This is the low level call for writing +// individual frames. Continuation frames are handled elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePushPromise(p PushPromiseParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagPushPromisePadded + } + if p.EndHeaders { + flags |= FlagPushPromiseEndHeaders + } + f.startWrite(FramePushPromise, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { + return errStreamID + } + f.writeUint32(p.PromiseID) + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// WriteRawFrame writes a raw frame. This can be used to write +// extension frames unknown to this package. +func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { + f.startWrite(t, flags, streamID) + f.writeBytes(payload) + return f.endWrite() +} + +func readByte(p []byte) (remain []byte, b byte, err error) { + if len(p) == 0 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[1:], p[0], nil +} + +func readUint32(p []byte) (remain []byte, v uint32, err error) { + if len(p) < 4 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[4:], binary.BigEndian.Uint32(p[:4]), nil +} + +type streamEnder interface { + StreamEnded() bool +} + +type headersEnder interface { + HeadersEnded() bool +} + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if VerboseLogs && fr.logReads { + fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) + } + if !httpguts.ValidHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validWireHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + if VerboseLogs { + log.Printf("http2: invalid pseudo headers: %v", err) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go new file mode 100644 index 000000000..5bf62b032 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go111.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go new file mode 100644 index 000000000..9933c9f8c --- /dev/null +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Defensive debug-only utility to track that functions run on the +// goroutine that they're supposed to. + +package http2 + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" +) + +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" + +type goroutineLock uint64 + +func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } + return goroutineLock(curGoroutineID()) +} + +func (g goroutineLock) check() { + if !DebugGoroutines { + return + } + if curGoroutineID() != uint64(g) { + panic("running on the wrong goroutine") + } +} + +func (g goroutineLock) checkNotOn() { + if !DebugGoroutines { + return + } + if curGoroutineID() == uint64(g) { + panic("running on the wrong goroutine") + } +} + +var goroutineSpace = []byte("goroutine ") + +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := parseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} + +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// parseUintBytes is like strconv.ParseUint, but using a []byte. +func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 000000000..c3ff3fa1c --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,88 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" + "sync" +) + +var ( + commonBuildOnce sync.Once + commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case + commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case +) + +func buildCommonHeaderMapsOnce() { + commonBuildOnce.Do(buildCommonHeaderMaps) +} + +func buildCommonHeaderMaps() { + common := []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } + commonLowerHeader = make(map[string]string, len(common)) + commonCanonHeader = make(map[string]string, len(common)) + for _, v := range common { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + buildCommonHeaderMapsOnce() + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 000000000..97f17831f --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,240 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.table.init() + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true + } + + j, nameValueMatch := e.dynTab.table.search(f) + if nameValueMatch || (i == 0 && j != 0) { + return j + uint64(staticTable.len()), nameValueMatch + } + + return i, false +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 000000000..85f18a2b0 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,504 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer + + firstField bool // processing the first field of the header block +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + firstField: true, + } + d.dynTab.table.init() + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + table headerFieldTable + size uint32 // in bytes + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +func (dt *dynamicTable) add(f HeaderField) { + dt.table.addEntry(f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff. +func (dt *dynamicTable) evict() { + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ + } + dt.table.evictOldest(n) +} + +func (d *Decoder) maxTableIndex() int { + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + // See Section 2.3.3. + if i == 0 { + return + } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } + if i > uint64(d.maxTableIndex()) { + return + } + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +// Close declares that the decoding is complete and resets the Decoder +// to be reused again for a new header block. If there is any remaining +// data in the decoder's buffer, Close returns an error. +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + d.firstField = true + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + d.firstField = false + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the + // beginning of the first header block following the change to the dynamic table size. + if !d.firstField && d.dynTab.size > 0 { + return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")} + } + + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 000000000..a1ab2f056 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,229 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + rootHuffmanNode := getRootHuffmanNode() + n := rootHuffmanNode + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } else { + cbits -= 8 + } + } + } + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { + break + } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1< 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 000000000..a66cfbea6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,479 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 000000000..5571ccfd2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,385 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +// +package http2 // import "golang.org/x/net/http2" + +import ( + "bufio" + "crypto/tls" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httpguts.ValidHeaderName for the base rules. +// +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httpguts.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +func httpCodeString(code int) string { + switch code { + case 200: + return "200" + case 404: + return "404" + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + _ incomparable + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + _ incomparable + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go new file mode 100644 index 000000000..cc0baa819 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 000000000..2a5399ec4 --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,168 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading + unread int // bytes unread when done + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.b == nil { + return p.unread + } + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b != nil && p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + p.b = nil + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + if p.breakErr != nil { + p.unread += len(d) + return len(d), nil // discard when there is no reader + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + if dst == &p.breakErr { + if p.b != nil { + p.unread += p.b.Len() + } + p.b = nil + } + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 000000000..e125bbd2a --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2984 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + maxQueuedControlFrames = 10000 +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +// maxQueuedControlFrames is the maximum number of control frames like +// SETTINGS, PING and RST_STREAM that will be queued for writing before +// the connection is closed to prevent memory exhaustion attacks. +func (s *Server) maxQueuedControlFrames() int { + // TODO: if anybody asks, add a Server field, and remember to define the + // behavior of negative values. + return maxQueuedControlFrames +} + +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } + if conf == nil { + conf = new(Server) + } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if h1, h2 := s, conf; h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } + } + s.RegisterOnShutdown(conf.state.startGracefulShutdown) + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + switch cs { + case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + // The TLSNextProto interface predates contexts, so + // the net/http package passes down its per-connection + // base context via an exported but unadvertised + // method on the Handler. This is for internal + // net/http<=>http2 use only. + var ctx context.Context + type baseContexter interface { + BaseContext() context.Context + } + if bc, ok := h.(baseContexter); ok { + ctx = bc.BaseContext() + } + conf.ServeConn(c, &ServeConnOpts{ + Context: ctx, + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // Context is the base context to use. + // If nil, context.Background is used. + Context context.Context + + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) context() context.Context { + if o != nil && o.Context != nil { + return o.Context + } + return context.Background() +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { + ctx, cancel = context.WithCancel(opts.context()) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, http.ServerContextKey, hs) + } + return +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + baseCtx context.Context + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + writeSched WriteScheduler + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + queuedControlFrames int // control frames in the writeSched queue + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx context.Context + cancelCtx func() + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + state streamState + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://tools.ietf.org/html/rfc7540#section-5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + buildCommonHeaderMapsOnce() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + _ incomparable + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(FrameWriteRequest{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + }, + }) + sc.unackedSettings++ + + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + + loopNum := 0 + for { + loopNum++ + select { + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer != nil { + settingsTimer.Stop() + settingsTimer = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } + } + + // If the peer is causing us to generate a lot of control frames, + // but not reading them from us, assume they are trying to make us + // run out of memory. + if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + sc.vlogf("http2: too many control frames in send queue, closing connection") + return + } + + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) + } + } +} + +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + +var errPrefaceTimeout = errors.New("timeout waiting for client preface") + +// readPreface reads the ClientPreface greeting from the peer or +// returns errPrefaceTimeout on timeout, or an error if the greeting +// is invalid. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errPrefaceTimeout + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wr: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { + sc.serveG.check() + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + if wr.isControl() { + sc.queuedControlFrames++ + // For extra safety, detect wraparounds, which should not happen, + // and pull the plug. + if sc.queuedControlFrames < 0 { + sc.conn.Close() + } + } + sc.writeSched.Push(wr) + } + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wr (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wr.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) + } + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr: wr, err: err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.writingFrameAsync = false + + wr := res.wr + + if writeEndsStream(wr.write) { + st := wr.stream + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } + } + + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written and we need to send one, the best frame +// to send is selected by writeSched. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame || sc.inFrameScheduleLoop { + return + } + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + if wr.isControl() { + sc.queuedControlFrames-- + } + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break + } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. +func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +// After sending GOAWAY with an error code (non-graceful shutdown), the +// connection will close after goAwayTimeout. +// +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAway(ErrCodeNo) +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(FrameWriteRequest{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.resetQueued = true + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return streamError(f.StreamID, ErrCodeFlowControl) + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- + } + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdownInternal() + } + } + if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.CloseStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if f.NumSettings() > 100 || f.HasDuplicates() { + // This isn't actually in the spec, but hang up on + // suspiciously large settings frames or those with + // duplicate entries. + return ConnectionError(ErrCodeProtocol) + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + // TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be + // acknowledged individually, even if multiple are received before the ACK. + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + id := f.Header().StreamID + if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || id > sc.maxClientStreamID) { + // Discard all DATA frames if the GOAWAY is due to an + // error, or: + // + // Section 6.8: After sending a GOAWAY frame, the sender + // can discard frames for streams initiated by the + // receiver with identifiers higher than the identified + // last stream. + return nil + } + + data := f.Data() + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 6.1: "DATA frames MUST be associated with a + // stream. If a DATA frame is received whose stream + // identifier field is 0x0, the recipient MUST respond + // with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + // + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the + // value of a content-length header field does not equal the sum of the + // DATA frame payload lengths that form the body. + return streamError(id, ErrCodeProtocol) + } + if f.Length > 0 { + // Check whether the client has flow control quota. + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + sc.sendWindowUpdate(nil, int(f.Length)-wrote) + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) + } + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdownInternal() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://tools.ietf.org/html/rfc7540#section-5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } + // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than + // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in + // this state, it MUST respond with a stream error (Section 5.4.2) of + // type STREAM_CLOSED. + if st.state == stateHalfClosedRemote { + return streamError(id, ErrCodeStreamClosed) + } + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxClientStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxClientStreamID = id + + if sc.idleTimer != nil { + sc.idleTimer.Stop() + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { + if sc.unackedSettings == 0 { + // They should know better. + return streamError(id, ErrCodeProtocol) + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) + } + + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !httpguts.ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) + return nil +} + +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") + } + + ctx, cancelCtx := context.WithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } + + isConnect := rp.method == "CONNECT" + if isConnect { + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + bodyOpen := !f.StreamEnded() + if rp.method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { + req.ContentLength = int64(cl) + } else { + req.ContentLength = 0 + } + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range rp.header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(rp.header, "Trailer") + + var url_ *url.URL + var requestURI string + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.path) + if err != nil { + return nil, nil, streamError(st.id, ErrCodeProtocol) + } + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + req := &http.Request{ + Method: rp.method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: rp.authority, + Body: body, + Trailer: trailer, + } + req = req.WithContext(st.ctx) + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + sc.writeFrameFromHandler(FrameWriteRequest{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + // Same as net/http: + if e != nil && e != http.ErrAbortHandler { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(FrameWriteRequest{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { + sc.serveG.checkNotOn() // NOT on + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(FrameWriteRequest{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. +type requestBody struct { + _ incomparable + stream *stream + conn *serverConn + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil || b.sawEOF { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 } + +func (rws *responseWriterState) hasNonemptyTrailers() bool { + for _, trailer := range rws.trailers { + if _, ok := rws.handlerHeader[trailer]; ok { + return true + } + } + return false +} + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !httpguts.ValidTrailerHeader(k) { + // Forbidden by RFC 7230, section 4.1.2. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + if cl, err := strconv.ParseUint(clen, 10, 63); err == nil { + rws.sentContentLen = int64(cl) + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + // If the Content-Encoding is non-blank, we shouldn't + // sniff the body. See Issue golang.org/issue/31753. + ce := rws.snapHeader.Get("Content-Encoding") + hasCE := len(ce) > 0 + if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2), + // but respect "Connection" == "close" to mean sending a GOAWAY and tearing + // down the TCP connection when idle, like we do for HTTP/1. + // TODO: remove more Connection-specific header fields here, in addition + // to "Connection". + if _, ok := rws.snapHeader["Connection"]; ok { + v := rws.snapHeader.Get("Connection") + delete(rws.snapHeader, "Connection") + if v == "close" { + rws.conn.startGracefulShutdown() + } + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + rws.dirty = true + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + // only send trailers if they have actually been defined by the + // server handler. + hasNonemptyTrailers := rws.hasNonemptyTrailers() + endStream := rws.handlerDone && !hasNonemptyTrailers + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true + return 0, err + } + } + + if rws.handlerDone && hasNonemptyTrailers { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + if err != nil { + rws.dirty = true + } + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 7230 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclaring them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + cw := rws.stream.cw + go func() { + cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + checkWriteHeaderCode(code) + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler might call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + dirty := rws.dirty + rws.handlerDone = true + w.Flush() + w.rws = nil + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +var _ http.Pusher = (*responseWriter)(nil) + +func (w *responseWriter) Push(target string, opts *http.PushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + if opts == nil { + opts = new(http.PushOptions) + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := &startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.serveMsgCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg *startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiated. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdownInternal() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 7230 section 7 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 000000000..7688d72c3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,2760 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math" + mathrand "math/rand" + "net" + "net/http" + "net/http/httptrace" + "net/textproto" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allowed. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an unlimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // StrictMaxConcurrentStreams controls whether the server's + // SETTINGS_MAX_CONCURRENT_STREAMS should be respected + // globally. If false, new TCP connections are created to the + // server as needed to keep each under the per-connection + // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the + // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as + // a global limit and callers of RoundTrip block when needed, + // waiting for their turn. + StrictMaxConcurrentStreams bool + + // ReadIdleTimeout is the timeout after which a health check using ping + // frame will be carried out if no frame is received on the connection. + // Note that a ping response will is considered a received frame, so if + // there is no other traffic on the connection, the health check will + // be performed every ReadIdleTimeout interval. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to Ping is not received. + // Defaults to 15s. + PingTimeout time.Duration + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +func (t *Transport) pingTimeout() time.Duration { + if t.PingTimeout == 0 { + return 15 * time.Second + } + return t.PingTimeout + +} + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It returns an error if t1 has already been HTTP/2-enabled. +// +// Use ConfigureTransports instead to configure the HTTP/2 Transport. +func ConfigureTransport(t1 *http.Transport) error { + _, err := ConfigureTransports(t1) + return err +} + +// ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2. +// It returns a new HTTP/2 Transport for further configuration. +// It returns an error if t1 has already been HTTP/2-enabled. +func ConfigureTransports(t1 *http.Transport) (*Transport, error) { + return configureTransports(t1) +} + +func configureTransports(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr("https", authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + reused uint32 // whether conn is being reused; atomic + singleUse bool // whether being used for a single http.Request + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closing bool + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + lastIdle time.Time // time last idle + // Settings from peer: (also guarded by mu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + trace *httptrace.ClientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + num1xx uint8 // number of 1xx responses seen + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel waits for the user to cancel a request or for the done +// channel to be signaled. A non-nil error is returned only if the request was +// canceled. +func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { + ctx := req.Context() + if req.Cancel == nil && ctx.Done() == nil { + return nil + } + select { + case <-req.Cancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} + +var got1xxFuncForTests func(int, textproto.MIMEHeader) error + +// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, +// if any. It returns nil if not set or if the Go version is too old. +func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { + if fn := got1xxFuncForTests; fn != nil { + return fn + } + return traceGot1xxResponseFunc(cs.trace) +} + +// awaitRequestCancel waits for the user to cancel a request, its context to +// expire, or for the request to be done (any way it might be removed from the +// cc.streams map: peer reset, successful completion, TCP connection breakage, +// etc). If the request is canceled, then cs will be canceled and closed. +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + if err := awaitRequestCancel(req, cs.done); err != nil { + cs.cancelStream() + cs.bufPipe.CloseWithError(err) + } +} + +func (cs *clientStream) cancelStream() { + cc := cs.cc + cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cc.mu.Unlock() + + if !didReset { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc.forgetStreamID(cs.ID) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) getStartedWrite() bool { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return cs.startedWrite +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// isNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func isNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + for retry := 0; ; retry++ { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + traceGotConn(req, cc, reused) + res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) + if err != nil && retry <= 6 { + if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { + // After the first retry, do exponential backoff with 10% jitter. + if retry == 0 { + continue + } + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + select { + case <-time.After(time.Second * time.Duration(backoff)): + continue + case <-req.Context().Done(): + return nil, req.Context().Err() + } + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") +) + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { + if !canRetryError(err) { + return nil, err + } + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || req.Body == http.NoBody { + return req, nil + } + + // If the request body can be reset back to its original + // state via the optional req.GetBody, do that. + if req.GetBody != nil { + // TODO: consider a req.Body.Close here? or audit that all caller paths do? + body, err := req.GetBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil + } + + // The Request.Body can't reset back to the beginning, but we + // don't seem to have started to read from it yet, so reuse + // the request directly. The "afterBodyWrite" means the + // bodyWrite process has started, which becomes true before + // the first Read. + if !afterBodyWrite { + return req, nil + } + + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) +} + +func canRetryError(err error) bool { + if err == errClientConnUnusable || err == errClientConnGotGoAway { + return true + } + if se, ok := err.(StreamError); ok { + return se.Code == ErrCodeRefusedStream + } + return false +} + +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *t.TLSClientConfig.Clone() + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return t.t1.ExpectContinueTimeout +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, t.disableKeepAlives()) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if t.AllowHTTP { + cc.nextStreamID = 3 + } + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + cc.Close() + return nil, cc.werr + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) healthCheck() { + pingTimeout := cc.t.pingTimeout() + // We don't need to periodically ping in the health check, because the readLoop of ClientConn will + // trigger the healthCheck again if there is no frame received. + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + defer cancel() + err := cc.Ping(ctx) + if err != nil { + cc.closeForLostPing() + cc.t.connPool().MarkDead(cc) + return + } +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } +} + +// CanTakeNewRequest reports whether the connection can take a new request, +// meaning it has not been closed or received or sent a GOAWAY. +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +// clientConnIdleState describes the suitability of a client +// connection to initiate a new RoundTrip request. +type clientConnIdleState struct { + canTakeNewRequest bool + freshConn bool // whether it's unused by any previous request +} + +func (cc *ClientConn) idleState() clientConnIdleState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.idleStateLocked() +} + +func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { + if cc.singleUse && cc.nextStreamID > 1 { + return + } + var maxConcurrentOkay bool + if cc.t.StrictMaxConcurrentStreams { + // We'll tell the caller we can take a new request to + // prevent the caller from dialing a new TCP + // connection, but then we'll block later before + // writing it. + maxConcurrentOkay = true + } else { + maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) + } + + st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && + !cc.tooIdleLocked() + st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest + return +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + st := cc.idleStateLocked() + return st.canTakeNewRequest +} + +// tooIdleLocked reports whether this connection has been been sitting idle +// for too much wall time. +func (cc *ClientConn) tooIdleLocked() bool { + // The Round(0) strips the monontonic clock reading so the + // times are compared based on their wall time. We don't want + // to reuse a connection that's been sitting idle during + // VM/laptop suspend if monotonic time was also frozen. + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.tconn.Close() +} + +var shutdownEnterWaitStateHook = func() {} + +// Shutdown gracefully close the client connection, waiting for running streams to complete. +func (cc *ClientConn) Shutdown(ctx context.Context) error { + if err := cc.sendGoAway(); err != nil { + return err + } + // Wait for all in-flight streams to complete or connection to close + done := make(chan error, 1) + cancelled := false // guarded by cc.mu + go func() { + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if len(cc.streams) == 0 || cc.closed { + cc.closed = true + done <- cc.tconn.Close() + break + } + if cancelled { + break + } + cc.cond.Wait() + } + }() + shutdownEnterWaitStateHook() + select { + case err := <-done: + return err + case <-ctx.Done(): + cc.mu.Lock() + // Free the goroutine above + cancelled = true + cc.cond.Broadcast() + cc.mu.Unlock() + return ctx.Err() + } +} + +func (cc *ClientConn) sendGoAway() error { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.wmu.Lock() + defer cc.wmu.Unlock() + if cc.closing { + // GOAWAY sent already + return nil + } + // Send a graceful shutdown frame to server + maxStreamID := cc.nextStreamID + if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { + return err + } + if err := cc.bw.Flush(); err != nil { + return err + } + // Prevent new requests + cc.closing = true + return nil +} + +// closes the client connection immediately. In-flight requests are interrupted. +// err is sent to streams. +func (cc *ClientConn) closeForError(err error) error { + cc.mu.Lock() + defer cc.cond.Broadcast() + defer cc.mu.Unlock() + for id, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + cs.bufPipe.CloseWithError(err) + delete(cc.streams, id) + } + cc.closed = true + return cc.tconn.Close() +} + +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + err := errors.New("http2: client connection force closed via ClientConn.Close") + return cc.closeForError(err) +} + +// closes the client connection immediately. In-flight requests are interrupted. +func (cc *ClientConn) closeForLostPing() error { + err := errors.New("http2: client connection lost") + return cc.closeForError(err) +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", fmt.Errorf("invalid Trailer key %q", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || req.Body == http.NoBody { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + resp, _, err := cc.roundTrip(req) + return resp, err +} + +func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { + if err := checkConnHeaders(req); err != nil { + return nil, false, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, false, err + } + hasTrailers := trailers != "" + + cc.mu.Lock() + if err := cc.awaitOpenSlotForRequest(req); err != nil { + cc.mu.Unlock() + return nil, false, err + } + + body := req.Body + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, false, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = httptrace.ContextClientTrace(req.Context()) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + defer func() { + cc.wmu.Lock() + werr := cc.werr + cc.wmu.Unlock() + if werr != nil { + cc.Close() + } + }() + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, false, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := req.Context() + + handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + if hasBody && !bodyWritten { + <-bodyWriter.resc + } + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, false, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + <-bodyWriter.resc + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errTimeout + case <-ctx.Done(): + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + <-bodyWriter.resc + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), ctx.Err() + case <-req.Cancel: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + <-bodyWriter.resc + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.getStartedWrite(), cs.resetErr + case err := <-bodyWriter.resc: + bodyWritten = true + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), err + } + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// Must hold cc.mu. +func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { + var waitingForConn chan struct{} + var waitingForConnErr error // guarded by cc.mu + for { + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + if waitingForConn != nil { + close(waitingForConn) + } + return errClientConnUnusable + } + cc.lastIdle = time.Time{} + if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { + if waitingForConn != nil { + close(waitingForConn) + } + return nil + } + // Unfortunately, we cannot wait on a condition variable and channel at + // the same time, so instead, we spin up a goroutine to check if the + // request is canceled while we wait for a slot to open in the connection. + if waitingForConn == nil { + waitingForConn = make(chan struct{}) + go func() { + if err := awaitRequestCancel(req, waitingForConn); err != nil { + cc.mu.Lock() + waitingForConnErr = err + cc.cond.Broadcast() + cc.mu.Unlock() + } + }() + } + cc.pendingRequests++ + cc.cond.Wait() + cc.pendingRequests-- + if waitingForConnErr != nil { + return waitingForConnErr + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") + + errReqBodyTooLong = errors.New("http2: request body larger than specified content length") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + remainLen := actualContentLength(req) + hasContentLen := remainLen != -1 + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf[:len(buf)-1]) + if hasContentLen { + remainLen -= int64(n) + if remainLen == 0 && err == nil { + // The request body's Content-Length was predeclared and + // we just finished reading it all, but the underlying io.Reader + // returned the final chunk with a nil error (which is one of + // the two valid things a Reader can do at EOF). Because we'd prefer + // to send the END_STREAM bit early, double-check that we're actually + // at EOF. Subsequent reads should return (0, EOF) at this point. + // If either value is different, we return an error in one of two ways below. + var n1 int + n1, err = body.Read(buf[n:]) + remainLen -= int64(n1) + } + if remainLen < 0 { + err = errReqBodyTooLong + cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + return err + } + } + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } + } + + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httpguts.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = http.MethodGet + } + f(":method", m) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } else if strings.EqualFold(k, "cookie") { + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + f("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + f("cookie", v) + } + } + continue + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + trace := httptrace.ContextClientTrace(req.Context()) + traceHeaders := traceHasWroteHeaderField(trace) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name = strings.ToLower(name) + cc.writeHeader(name, value) + if traceHeaders { + traceWroteHeaderField(trace, name, value) + } + }) + + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { + cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range req.Trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filtered at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes(), nil +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + _ incomparable + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() + delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + cc.lastIdle = time.Now() + } + close(cs.done) + // Wake up checkResetOrDone via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + _ incomparable + cc *ClientConn + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{cc: cc} + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range cc.streams { + cs.bufPipe.CloseWithError(err) // no-op if already closed + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false + readIdleTimeout := cc.t.ReadIdleTimeout + var t *time.Timer + if readIdleTimeout != 0 { + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) + defer t.Stop() + } + for { + f, err := cc.fr.ReadFrame() + if t != nil { + t.Reset(readIdleTimeout) + } + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, false); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + cs.cc.forgetStreamID(cs.ID) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + // + // Issue 22413: If there is no request body, we should close the + // stream before writing to cs.resc so that the stream is closed + // immediately once RoundTrip returns. + if cs.req.Body != nil { + defer cc.forgetStreamID(f.StreamID) + } else { + cc.forgetStreamID(f.StreamID) + } + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cc.forgetStreamID(cs.ID) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 1xx responses). +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("malformed response from server: missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") + } + + regularFields := f.RegularFields() + strs := make([]string, len(regularFields)) + header := make(http.Header, len(regularFields)) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range regularFields { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + vv := header[key] + if vv == nil && len(strs) > 0 { + // More than likely this will be a single-element key. + // Most headers aren't multi-valued. + // Set the capacity on strs[0] to 1, so any future append + // won't extend the slice into the other strings. + vv, strs = strs[:1:1], strs[1:] + vv[0] = hf.Value + header[key] = vv + } else { + header[key] = append(vv, hf.Value) + } + } + } + + if statusCode >= 100 && statusCode <= 199 { + cs.num1xx++ + const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http + if cs.num1xx > max1xxResponses { + return nil, errors.New("http2: too many 1xx informational responses") + } + if fn := cs.get1xxTraceFunc(); fn != nil { + if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { + return nil, err + } + } + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil { + res.ContentLength = int64(cl) + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + res.Uncompressed = true + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() + } + + cs.bufPipe.BreakWithError(errClosedResponseBody) + cc.forgetStreamID(cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } + return nil + } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } + cc.mu.Unlock() + + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + cs.bufPipe.closeWithErrorAndCode(err, code) + + select { + case cs.resc <- resAndError{err: err}: + default: + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) + case SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := streamError(cs.ID, f.ErrCode) + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +func (cc *ClientConn) Ping(ctx context.Context) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTripErr() error { return rt.err } +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + _ incomparable + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httpguts.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + if s.timer.Stop() { + s.resc <- nil + } + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") +} + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// converting panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +// (The field is exported so it can be accessed via reflect from net/http; tested +// by TestNoDialH2RoundTripperType) +type noDialH2RoundTripper struct{ *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.Transport.RoundTrip(req) + if isNoCachedConnError(err) { + return nil, http.ErrSkipAltProtocol + } + return res, err +} + +func (t *Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + +func traceGetConn(req *http.Request, hostPort string) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GetConn == nil { + return + } + trace.GetConn(hostPort) +} + +func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + ci.Reused = reused + cc.mu.Lock() + ci.WasIdle = len(cc.streams) == 0 && reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *httptrace.ClientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *httptrace.ClientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *httptrace.ClientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 000000000..3849bc263 --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,365 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("writeEndsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + +type writeSettings []Setting + +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + ctx.Flush() // ignore error: we're hanging up on them anyway + return err +} + +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // upper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only if k is in keys. +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 000000000..f24d2b1e7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,248 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. No frames should be discarded except by CloseStream. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { + // write is the interface value that does the writing, once the + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. + write writeFramer + + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID + } + return 0 + } + return wr.stream.id +} + +// isControl reports whether wr is a control frame for MaxQueuedControlFrames +// purposes. That includes non-stream frames and RST_STREAM frames. +func (wr FrameWriteRequest) isControl() bool { + return wr.stream == nil +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { + return + } + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) + } + wr.write = nil // prevent use (assume it's tainted after wr.done send) +} + +// writeQueue is used by implementations of WriteScheduler. +type writeQueue struct { + s []FrameWriteRequest +} + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) +} + +func (q *writeQueue) shift() FrameWriteRequest { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr +} + +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false + } + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 000000000..2618b2c11 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this function returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 000000000..9a7b9e581 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,77 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle, closed, or emptied, it's deleted + // from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for streamID, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + if q.empty() { + delete(ws.sq, streamID) + ws.queuePool.put(q) + } + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 000000000..dc5225b6d --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := int(dstStart.Sub(srcStart) / srcInterval) + srcIndex += advance + srcStart = srcStart.Add(time.Duration(advance) * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 000000000..c646a6952 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 000000000..9bf4286c7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 000000000..3ebf6f2da --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index f77701fe8..abbec2d44 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -154,14 +154,13 @@ var MIPS64X struct { // For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, // since there are no optional categories. There are some exceptions that also // require kernel support to work (DARN, SCV), so there are feature bits for -// those as well. The minimum processor requirement is POWER8 (ISA 2.07). -// The struct is padded to avoid false sharing. +// those as well. The struct is padded to avoid false sharing. var PPC64 struct { _ CacheLinePad HasDARN bool // Hardware random number generator (requires kernel enablement) HasSCV bool // Syscall vectored (requires kernel enablement) IsPOWER8 bool // ISA v2.07 (POWER8) - IsPOWER9 bool // ISA v3.00 (POWER9) + IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go index 28b521643..8aaeef545 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_aix.go +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -20,6 +20,7 @@ func archInit() { PPC64.IsPOWER8 = true } if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER8 = true PPC64.IsPOWER9 = true } diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 579d2d735..474efad0e 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -76,7 +76,7 @@ arguments can be passed to the kernel. The third is for low-level use by the ForkExec wrapper. Unlike the first two, it does not call into the scheduler to let it know that a system call is running. -When porting Go to an new architecture/OS, this file must be implemented for +When porting Go to a new architecture/OS, this file must be implemented for each GOOS/GOARCH pair. ### mksysnum @@ -107,7 +107,7 @@ prototype can be exported (capitalized) or not. Adding a new syscall often just requires adding a new `//sys` function prototype with the desired arguments and a capitalized name so it is exported. However, if you want the interface to the syscall to be different, often one will make an -unexported `//sys` prototype, an then write a custom wrapper in +unexported `//sys` prototype, and then write a custom wrapper in `syscall_${GOOS}.go`. ### types files @@ -137,7 +137,7 @@ some `#if/#elif` macros in your include statements. This script is used to generate the system's various constants. This doesn't just include the error numbers and error strings, but also the signal numbers -an a wide variety of miscellaneous constants. The constants come from the list +and a wide variety of miscellaneous constants. The constants come from the list of include files in the `includes_${uname}` variable. A regex then picks out the desired `#define` statements, and generates the corresponding Go constants. The error numbers and strings are generated from `#include `, and the diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s index 7f29275fa..e0fcd9b3d 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd +//go:build (freebsd || netbsd || openbsd) && gc +// +build freebsd netbsd openbsd // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s index 98ebfad9d..d702d4adc 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd +//go:build (freebsd || netbsd || openbsd) && gc +// +build freebsd netbsd openbsd // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 007358af8..6e6afcaa1 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -239,6 +239,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -258,6 +259,7 @@ struct ltchars { #include #include +#include #include #if defined(__sparc__) @@ -501,6 +503,9 @@ ccflags="$@" $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL)_/ || + $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || + $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || @@ -558,6 +563,7 @@ ccflags="$@" $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SEEK_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || $2 !~ /^AUDIT_RECORD_MAGIC/ && @@ -593,6 +599,9 @@ ccflags="$@" $2 == "HID_MAX_DESCRIPTOR_SIZE" || $2 ~ /^_?HIDIOC/ || $2 ~ /^BUS_(USB|HIL|BLUETOOTH|VIRTUAL)$/ || + $2 ~ /^MTD/ || + $2 ~ /^OTP/ || + $2 ~ /^MEM/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 9945e5f96..23f6b5760 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -13,6 +13,7 @@ package unix import ( + "fmt" "runtime" "syscall" "unsafe" @@ -398,6 +399,38 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } +func SysctlKinfoProcSlice(name string) ([]KinfoProc, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } + + // Read into buffer of that size. + buf := make([]KinfoProc, n/SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { + return nil, err + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } + + // The actual call may return less than the original reported required + // size so ensure we deal with that. + return buf[:n/SizeofKinfoProc], nil +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) /* diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 2dd7c8e34..41b91fdfb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -904,6 +904,46 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrIUCV, nil } +type SockaddrNFC struct { + DeviceIdx uint32 + TargetIdx uint32 + NFCProtocol uint32 + raw RawSockaddrNFC +} + +func (sa *SockaddrNFC) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Sa_family = AF_NFC + sa.raw.Dev_idx = sa.DeviceIdx + sa.raw.Target_idx = sa.TargetIdx + sa.raw.Nfc_protocol = sa.NFCProtocol + return unsafe.Pointer(&sa.raw), SizeofSockaddrNFC, nil +} + +type SockaddrNFCLLCP struct { + DeviceIdx uint32 + TargetIdx uint32 + NFCProtocol uint32 + DestinationSAP uint8 + SourceSAP uint8 + ServiceName string + raw RawSockaddrNFCLLCP +} + +func (sa *SockaddrNFCLLCP) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Sa_family = AF_NFC + sa.raw.Dev_idx = sa.DeviceIdx + sa.raw.Target_idx = sa.TargetIdx + sa.raw.Nfc_protocol = sa.NFCProtocol + sa.raw.Dsap = sa.DestinationSAP + sa.raw.Ssap = sa.SourceSAP + if len(sa.ServiceName) > len(sa.raw.Service_name) { + return nil, 0, EINVAL + } + copy(sa.raw.Service_name[:], sa.ServiceName) + sa.raw.SetServiceNameLen(len(sa.ServiceName)) + return unsafe.Pointer(&sa.raw), SizeofSockaddrNFCLLCP, nil +} + var socketProtocol = func(fd int) (int, error) { return GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) } @@ -1144,6 +1184,37 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } return sa, nil } + case AF_NFC: + proto, err := socketProtocol(fd) + if err != nil { + return nil, err + } + switch proto { + case NFC_SOCKPROTO_RAW: + pp := (*RawSockaddrNFC)(unsafe.Pointer(rsa)) + sa := &SockaddrNFC{ + DeviceIdx: pp.Dev_idx, + TargetIdx: pp.Target_idx, + NFCProtocol: pp.Nfc_protocol, + } + return sa, nil + case NFC_SOCKPROTO_LLCP: + pp := (*RawSockaddrNFCLLCP)(unsafe.Pointer(rsa)) + if uint64(pp.Service_name_len) > uint64(len(pp.Service_name)) { + return nil, EINVAL + } + sa := &SockaddrNFCLLCP{ + DeviceIdx: pp.Dev_idx, + TargetIdx: pp.Target_idx, + NFCProtocol: pp.Nfc_protocol, + DestinationSAP: pp.Dsap, + SourceSAP: pp.Ssap, + ServiceName: string(pp.Service_name[:pp.Service_name_len]), + } + return sa, nil + default: + return nil, EINVAL + } } return nil, EAFNOSUPPORT } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 7b52e5d8a..b430536c8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -378,6 +378,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 28b764115..85cd97da0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -172,6 +172,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 68877728e..39a864d4e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -256,6 +256,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 7ed703476..7f27ebf2f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -207,6 +207,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + func InotifyInit() (fd int, err error) { return InotifyInit1(0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 06dec06fa..27aee81d9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -217,6 +217,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + func InotifyInit() (fd int, err error) { return InotifyInit1(0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 8f0d0a5b5..3a5621e37 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -229,6 +229,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 7e65e088d..cf0d36f76 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -215,6 +215,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + //sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 0b1f0d6da..5259a5fea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -100,6 +100,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + //sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index ce9bcd317..8ef821e5d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -188,6 +188,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + func InotifyInit() (fd int, err error) { return InotifyInit1(0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index a1e45694b..a1c0574b5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -129,6 +129,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + // Linux on s390x uses the old mmap interface, which requires arguments to be passed in a struct. // mmap2 also requires arguments to be passed in a struct; it is currently not exposed in . func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 49055a3cf..de14b8898 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -116,6 +116,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + //sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 991996b60..5bb48ef54 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1262,6 +1262,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIMESTAMP_MONOTONIC = 0x4 + SEEK_CUR = 0x1 + SEEK_DATA = 0x4 + SEEK_END = 0x2 + SEEK_HOLE = 0x3 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index e644eaf5e..11e570979 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1262,6 +1262,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIMESTAMP_MONOTONIC = 0x4 + SEEK_CUR = 0x1 + SEEK_DATA = 0x4 + SEEK_END = 0x2 + SEEK_HOLE = 0x3 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 9c7c5e165..440900112 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -1297,6 +1297,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index b265abb25..64520d312 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -1298,6 +1298,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 3df99f285..99e9a0e06 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1276,6 +1276,11 @@ const ( SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 218d39906..4c8377114 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -1298,6 +1298,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 47572aaa6..52f5bbc14 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1406,6 +1406,10 @@ const ( MCAST_LEAVE_SOURCE_GROUP = 0x2f MCAST_MSFILTER = 0x30 MCAST_UNBLOCK_SOURCE = 0x2c + MEMGETREGIONINFO = 0xc0104d08 + MEMREADOOB64 = 0xc0184d16 + MEMWRITE = 0xc0304d18 + MEMWRITEOOB64 = 0xc0184d15 MFD_ALLOW_SEALING = 0x2 MFD_CLOEXEC = 0x1 MFD_HUGETLB = 0x4 @@ -1494,7 +1498,35 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_ABSENT = 0x0 + MTD_BIT_WRITEABLE = 0x800 + MTD_CAP_NANDFLASH = 0x400 + MTD_CAP_NORFLASH = 0xc00 + MTD_CAP_NVRAM = 0x1c00 + MTD_CAP_RAM = 0x1c00 + MTD_CAP_ROM = 0x0 + MTD_DATAFLASH = 0x6 MTD_INODE_FS_MAGIC = 0x11307854 + MTD_MAX_ECCPOS_ENTRIES = 0x40 + MTD_MAX_OOBFREE_ENTRIES = 0x8 + MTD_MLCNANDFLASH = 0x8 + MTD_NANDECC_AUTOPLACE = 0x2 + MTD_NANDECC_AUTOPL_USR = 0x4 + MTD_NANDECC_OFF = 0x0 + MTD_NANDECC_PLACE = 0x1 + MTD_NANDECC_PLACEONLY = 0x3 + MTD_NANDFLASH = 0x4 + MTD_NORFLASH = 0x3 + MTD_NO_ERASE = 0x1000 + MTD_OTP_FACTORY = 0x1 + MTD_OTP_OFF = 0x0 + MTD_OTP_USER = 0x2 + MTD_POWERUP_LOCK = 0x2000 + MTD_RAM = 0x1 + MTD_ROM = 0x2 + MTD_SLC_ON_MLC_EMULATION = 0x4000 + MTD_UBIVOLUME = 0x7 + MTD_WRITEABLE = 0x400 NAME_MAX = 0xff NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 @@ -1534,6 +1566,59 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFC_ATR_REQ_GB_MAXSIZE = 0x30 + NFC_ATR_REQ_MAXSIZE = 0x40 + NFC_ATR_RES_GB_MAXSIZE = 0x2f + NFC_ATR_RES_MAXSIZE = 0x40 + NFC_COMM_ACTIVE = 0x0 + NFC_COMM_PASSIVE = 0x1 + NFC_DEVICE_NAME_MAXSIZE = 0x8 + NFC_DIRECTION_RX = 0x0 + NFC_DIRECTION_TX = 0x1 + NFC_FIRMWARE_NAME_MAXSIZE = 0x20 + NFC_GB_MAXSIZE = 0x30 + NFC_GENL_MCAST_EVENT_NAME = "events" + NFC_GENL_NAME = "nfc" + NFC_GENL_VERSION = 0x1 + NFC_HEADER_SIZE = 0x1 + NFC_ISO15693_UID_MAXSIZE = 0x8 + NFC_LLCP_MAX_SERVICE_NAME = 0x3f + NFC_LLCP_MIUX = 0x1 + NFC_LLCP_REMOTE_LTO = 0x3 + NFC_LLCP_REMOTE_MIU = 0x2 + NFC_LLCP_REMOTE_RW = 0x4 + NFC_LLCP_RW = 0x0 + NFC_NFCID1_MAXSIZE = 0xa + NFC_NFCID2_MAXSIZE = 0x8 + NFC_NFCID3_MAXSIZE = 0xa + NFC_PROTO_FELICA = 0x3 + NFC_PROTO_FELICA_MASK = 0x8 + NFC_PROTO_ISO14443 = 0x4 + NFC_PROTO_ISO14443_B = 0x6 + NFC_PROTO_ISO14443_B_MASK = 0x40 + NFC_PROTO_ISO14443_MASK = 0x10 + NFC_PROTO_ISO15693 = 0x7 + NFC_PROTO_ISO15693_MASK = 0x80 + NFC_PROTO_JEWEL = 0x1 + NFC_PROTO_JEWEL_MASK = 0x2 + NFC_PROTO_MAX = 0x8 + NFC_PROTO_MIFARE = 0x2 + NFC_PROTO_MIFARE_MASK = 0x4 + NFC_PROTO_NFC_DEP = 0x5 + NFC_PROTO_NFC_DEP_MASK = 0x20 + NFC_RAW_HEADER_SIZE = 0x2 + NFC_RF_INITIATOR = 0x0 + NFC_RF_NONE = 0x2 + NFC_RF_TARGET = 0x1 + NFC_SENSB_RES_MAXSIZE = 0xc + NFC_SENSF_RES_MAXSIZE = 0x12 + NFC_SE_DISABLED = 0x0 + NFC_SE_EMBEDDED = 0x2 + NFC_SE_ENABLED = 0x1 + NFC_SE_UICC = 0x1 + NFC_SOCKPROTO_LLCP = 0x1 + NFC_SOCKPROTO_MAX = 0x2 + NFC_SOCKPROTO_RAW = 0x0 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1959,6 +2044,11 @@ const ( QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 RAMFS_MAGIC = 0x858458f6 + RAW_PAYLOAD_DIGITAL = 0x3 + RAW_PAYLOAD_HCI = 0x2 + RAW_PAYLOAD_LLCP = 0x0 + RAW_PAYLOAD_NCI = 0x1 + RAW_PAYLOAD_PROPRIETARY = 0x4 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 RENAME_EXCHANGE = 0x2 @@ -2194,6 +2284,12 @@ const ( SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 SECURITYFS_MAGIC = 0x73636673 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_MAX = 0x4 + SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index e91a1a957..09fc559ed 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -60,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -123,6 +125,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -132,6 +147,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a9cbac644..75730cc22 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -60,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -123,6 +125,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -132,6 +147,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index d74f3c15a..127cf17ad 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -60,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -121,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -130,6 +145,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index e1538995b..957ca1ff1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -60,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -124,6 +126,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -133,6 +148,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 5e8e71ff8..314a2054f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -60,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -121,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -130,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index e670ee148..457e8de97 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -60,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -121,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -130,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index dd11eacb8..33cd28f6b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -60,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -121,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -130,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index a0a5b22ae..0e085ba14 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -60,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -121,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 @@ -130,6 +145,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x1000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index d9530e5fb..1b5928cff 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -60,6 +60,8 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 ECHOE = 0x2 ECHOK = 0x4 @@ -121,6 +123,19 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc00c4d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x20 NL2 = 0x200 NL3 = 0x300 @@ -132,6 +147,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index e60102f6a..f3a41d6ec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -60,6 +60,8 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 ECHOE = 0x2 ECHOK = 0x4 @@ -121,6 +123,19 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NL2 = 0x200 NL3 = 0x300 @@ -132,6 +147,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 838ff4ea6..6a5a555d5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -60,6 +60,8 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 ECHOE = 0x2 ECHOK = 0x4 @@ -121,6 +123,19 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NL2 = 0x200 NL3 = 0x300 @@ -132,6 +147,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 7cc98f09c..a4da67edb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -60,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -121,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -130,6 +145,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 6d30e6fd8..a7028e0ef 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -60,6 +60,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -121,6 +123,19 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -130,6 +145,10 @@ const ( NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d O_APPEND = 0x400 O_ASYNC = 0x2000 O_CLOEXEC = 0x80000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index d5e2dc94f..ed3b3286c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -63,6 +63,8 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + ECCGETLAYOUT = 0x41484d11 + ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 ECHOE = 0x10 ECHOK = 0x20 @@ -126,6 +128,19 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MEMERASE = 0x80084d02 + MEMERASE64 = 0x80104d14 + MEMGETBADBLOCK = 0x80084d0b + MEMGETINFO = 0x40204d01 + MEMGETOOBSEL = 0x40c84d0a + MEMGETREGIONCOUNT = 0x40044d07 + MEMISLOCKED = 0x40084d17 + MEMLOCK = 0x80084d05 + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x80084d0c + MEMUNLOCK = 0x80084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 @@ -135,6 +150,10 @@ const ( NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 + OTPGETREGIONCOUNT = 0x80044d0e + OTPGETREGIONINFO = 0x800c4d0f + OTPLOCK = 0x400c4d10 + OTPSELECT = 0x40044d0d O_APPEND = 0x8 O_ASYNC = 0x40 O_CLOEXEC = 0x400000 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 2673e6c59..4c8dc0ba2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -535,3 +535,107 @@ type CtlInfo struct { Id uint32 Name [96]byte } + +const SizeofKinfoProc = 0x288 + +type Eproc struct { + Paddr uintptr + Sess uintptr + Pcred Pcred + Ucred Ucred + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Tdev int32 + Tpgid int32 + Tsess uintptr + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Flag int32 + Login [12]int8 + Spare [4]int32 + _ [4]byte +} + +type ExternProc struct { + P_starttime Timeval + P_vmspace *Vmspace + P_sigacts uintptr + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + User_stack *int8 + Exit_thread *byte + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + P_wchan *byte + P_wmesg *int8 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + P_tracep uintptr + P_siglist int32 + P_textvp uintptr + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + P_pgrp uintptr + P_addr uintptr + P_xstat uint16 + P_acflag uint16 + P_ru *Rusage +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Vmspace struct { + Dummy int32 + Dummy2 *int8 + Dummy3 [5]int32 + Dummy4 [3]*int8 +} + +type Pcred struct { + Pc_lock [72]int8 + Pc_ucred uintptr + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + _ [4]byte +} + +type Ucred struct { + Ref int32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 1465cbcff..96f0e6ae2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -535,3 +535,107 @@ type CtlInfo struct { Id uint32 Name [96]byte } + +const SizeofKinfoProc = 0x288 + +type Eproc struct { + Paddr uintptr + Sess uintptr + Pcred Pcred + Ucred Ucred + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Tdev int32 + Tpgid int32 + Tsess uintptr + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Flag int32 + Login [12]int8 + Spare [4]int32 + _ [4]byte +} + +type ExternProc struct { + P_starttime Timeval + P_vmspace *Vmspace + P_sigacts uintptr + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + User_stack *int8 + Exit_thread *byte + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + P_wchan *byte + P_wmesg *int8 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + P_tracep uintptr + P_siglist int32 + P_textvp uintptr + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + P_pgrp uintptr + P_addr uintptr + P_xstat uint16 + P_acflag uint16 + P_ru *Rusage +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Vmspace struct { + Dummy int32 + Dummy2 *int8 + Dummy3 [5]int32 + Dummy4 [3]*int8 +} + +type Pcred struct { + Pc_lock [72]int8 + Pc_ucred uintptr + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + _ [4]byte +} + +type Ucred struct { + Ref int32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 1d049d7a1..d0ba8e9b8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -431,6 +431,9 @@ type Winsize struct { const ( AT_FDCWD = 0xfffafdcd AT_SYMLINK_NOFOLLOW = 0x1 + AT_REMOVEDIR = 0x2 + AT_EACCESS = 0x4 + AT_SYMLINK_FOLLOW = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index c51bc88ff..1f99c024a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -672,9 +672,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 395b69187..ddf0305a5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -675,9 +675,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index d3f9d2541..dce0a5c80 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -656,9 +656,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 434d6e8e8..e23244702 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -653,9 +653,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 087323591..c9d7eb41e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -351,6 +351,13 @@ type RawSockaddrIUCV struct { Name [8]int8 } +type RawSockaddrNFC struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 +} + type _Socklen uint32 type Linger struct { @@ -464,6 +471,7 @@ const ( SizeofSockaddrL2TPIP = 0x10 SizeofSockaddrL2TPIP6 = 0x20 SizeofSockaddrIUCV = 0x20 + SizeofSockaddrNFC = 0x10 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc @@ -1765,6 +1773,8 @@ const ( NFPROTO_NUMPROTO = 0xd ) +const SO_ORIGINAL_DST = 0x50 + type Nfgenmsg struct { Nfgen_family uint8 Version uint8 @@ -3742,3 +3752,158 @@ const ( NLMSGERR_ATTR_OFFS = 0x2 NLMSGERR_ATTR_COOKIE = 0x3 ) + +type ( + EraseInfo struct { + Start uint32 + Length uint32 + } + EraseInfo64 struct { + Start uint64 + Length uint64 + } + MtdOobBuf struct { + Start uint32 + Length uint32 + Ptr *uint8 + } + MtdOobBuf64 struct { + Start uint64 + Pad uint32 + Length uint32 + Ptr uint64 + } + MtdWriteReq struct { + Start uint64 + Len uint64 + Ooblen uint64 + Data uint64 + Oob uint64 + Mode uint8 + _ [7]uint8 + } + MtdInfo struct { + Type uint8 + Flags uint32 + Size uint32 + Erasesize uint32 + Writesize uint32 + Oobsize uint32 + _ uint64 + } + RegionInfo struct { + Offset uint32 + Erasesize uint32 + Numblocks uint32 + Regionindex uint32 + } + OtpInfo struct { + Start uint32 + Length uint32 + Locked uint32 + } + NandOobinfo struct { + Useecc uint32 + Eccbytes uint32 + Oobfree [8][2]uint32 + Eccpos [32]uint32 + } + NandOobfree struct { + Offset uint32 + Length uint32 + } + NandEcclayout struct { + Eccbytes uint32 + Eccpos [64]uint32 + Oobavail uint32 + Oobfree [8]NandOobfree + } + MtdEccStats struct { + Corrected uint32 + Failed uint32 + Badblocks uint32 + Bbtblocks uint32 + } +) + +const ( + MTD_OPS_PLACE_OOB = 0x0 + MTD_OPS_AUTO_OOB = 0x1 + MTD_OPS_RAW = 0x2 +) + +const ( + MTD_FILE_MODE_NORMAL = 0x0 + MTD_FILE_MODE_OTP_FACTORY = 0x1 + MTD_FILE_MODE_OTP_USER = 0x2 + MTD_FILE_MODE_RAW = 0x3 +) + +const ( + NFC_CMD_UNSPEC = 0x0 + NFC_CMD_GET_DEVICE = 0x1 + NFC_CMD_DEV_UP = 0x2 + NFC_CMD_DEV_DOWN = 0x3 + NFC_CMD_DEP_LINK_UP = 0x4 + NFC_CMD_DEP_LINK_DOWN = 0x5 + NFC_CMD_START_POLL = 0x6 + NFC_CMD_STOP_POLL = 0x7 + NFC_CMD_GET_TARGET = 0x8 + NFC_EVENT_TARGETS_FOUND = 0x9 + NFC_EVENT_DEVICE_ADDED = 0xa + NFC_EVENT_DEVICE_REMOVED = 0xb + NFC_EVENT_TARGET_LOST = 0xc + NFC_EVENT_TM_ACTIVATED = 0xd + NFC_EVENT_TM_DEACTIVATED = 0xe + NFC_CMD_LLC_GET_PARAMS = 0xf + NFC_CMD_LLC_SET_PARAMS = 0x10 + NFC_CMD_ENABLE_SE = 0x11 + NFC_CMD_DISABLE_SE = 0x12 + NFC_CMD_LLC_SDREQ = 0x13 + NFC_EVENT_LLC_SDRES = 0x14 + NFC_CMD_FW_DOWNLOAD = 0x15 + NFC_EVENT_SE_ADDED = 0x16 + NFC_EVENT_SE_REMOVED = 0x17 + NFC_EVENT_SE_CONNECTIVITY = 0x18 + NFC_EVENT_SE_TRANSACTION = 0x19 + NFC_CMD_GET_SE = 0x1a + NFC_CMD_SE_IO = 0x1b + NFC_CMD_ACTIVATE_TARGET = 0x1c + NFC_CMD_VENDOR = 0x1d + NFC_CMD_DEACTIVATE_TARGET = 0x1e + NFC_ATTR_UNSPEC = 0x0 + NFC_ATTR_DEVICE_INDEX = 0x1 + NFC_ATTR_DEVICE_NAME = 0x2 + NFC_ATTR_PROTOCOLS = 0x3 + NFC_ATTR_TARGET_INDEX = 0x4 + NFC_ATTR_TARGET_SENS_RES = 0x5 + NFC_ATTR_TARGET_SEL_RES = 0x6 + NFC_ATTR_TARGET_NFCID1 = 0x7 + NFC_ATTR_TARGET_SENSB_RES = 0x8 + NFC_ATTR_TARGET_SENSF_RES = 0x9 + NFC_ATTR_COMM_MODE = 0xa + NFC_ATTR_RF_MODE = 0xb + NFC_ATTR_DEVICE_POWERED = 0xc + NFC_ATTR_IM_PROTOCOLS = 0xd + NFC_ATTR_TM_PROTOCOLS = 0xe + NFC_ATTR_LLC_PARAM_LTO = 0xf + NFC_ATTR_LLC_PARAM_RW = 0x10 + NFC_ATTR_LLC_PARAM_MIUX = 0x11 + NFC_ATTR_SE = 0x12 + NFC_ATTR_LLC_SDP = 0x13 + NFC_ATTR_FIRMWARE_NAME = 0x14 + NFC_ATTR_SE_INDEX = 0x15 + NFC_ATTR_SE_TYPE = 0x16 + NFC_ATTR_SE_AID = 0x17 + NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS = 0x18 + NFC_ATTR_SE_APDU = 0x19 + NFC_ATTR_TARGET_ISO15693_DSFID = 0x1a + NFC_ATTR_TARGET_ISO15693_UID = 0x1b + NFC_ATTR_SE_PARAMS = 0x1c + NFC_ATTR_VENDOR_ID = 0x1d + NFC_ATTR_VENDOR_SUBCMD = 0x1e + NFC_ATTR_VENDOR_DATA = 0x1f + NFC_SDP_ATTR_UNSPEC = 0x0 + NFC_SDP_ATTR_URI = 0x1 + NFC_SDP_ATTR_SAP = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 4d4d283de..235c62e46 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -128,6 +128,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -160,9 +171,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 8a2eed5ec..99b1e5b6a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -130,6 +130,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -163,9 +174,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 94b34add6..cc8bba791 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -134,6 +134,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -166,9 +177,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 2143de4d5..fa8fe3a75 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -131,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -164,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index a40216eee..e7fb8d9b7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -133,6 +133,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -165,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index e834b069f..2fa61d593 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -131,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -164,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index e31083b04..7f3639933 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -131,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -164,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 42811f7fb..f3c20cb86 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -133,6 +133,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -165,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index af7a72017..885d27950 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -134,6 +134,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -166,9 +177,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 2a3afbaef..a94eb8e18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -132,6 +132,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -165,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index c0de30a65..659e32ebd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -132,6 +132,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -165,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 74faf2e91..ab8ec604f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -131,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -164,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 9a8f0c2c6..3ec08237f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -130,6 +130,17 @@ const ( FADV_NOREUSE = 0x7 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -163,9 +174,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 72cdda75b..23d474470 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -134,6 +134,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -167,9 +178,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index b10e73abf..2fd2060e6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -445,8 +445,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 28ed6d55a..6a5a1a8ae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -453,8 +453,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 4ba196ebe..84cc8d01e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -450,8 +450,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index dd642bd9c..c844e7096 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -453,8 +453,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 1fdb0e5fa..2a8b1e6f7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -438,8 +438,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index e2fc93c7c..b1759cf70 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -438,8 +438,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 8d34b5a2f..e807de206 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -439,8 +439,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index ea8f1a0d9..ff3aecaee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -432,8 +432,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index ec6e8bc3f..9ecda6917 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -432,8 +432,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go new file mode 100644 index 000000000..af3af60db --- /dev/null +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows +// +build go1.9 + +package windows + +import "syscall" + +type Errno = syscall.Errno +type SysProcAttr = syscall.SysProcAttr diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go new file mode 100644 index 000000000..115341fba --- /dev/null +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -0,0 +1,416 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +// We need to use LoadLibrary and GetProcAddress from the Go runtime, because +// the these symbols are loaded by the system linker and are required to +// dynamically load additional symbols. Note that in the Go runtime, these +// return syscall.Handle and syscall.Errno, but these are the same, in fact, +// as windows.Handle and windows.Errno, and we intend to keep these the same. + +//go:linkname syscall_loadlibrary syscall.loadlibrary +func syscall_loadlibrary(filename *uint16) (handle Handle, err Errno) + +//go:linkname syscall_getprocaddress syscall.getprocaddress +func syscall_getprocaddress(handle Handle, procname *uint8) (proc uintptr, err Errno) + +// DLLError describes reasons for DLL load failures. +type DLLError struct { + Err error + ObjName string + Msg string +} + +func (e *DLLError) Error() string { return e.Msg } + +func (e *DLLError) Unwrap() error { return e.Err } + +// A DLL implements access to a single DLL. +type DLL struct { + Name string + Handle Handle +} + +// LoadDLL loads DLL file into memory. +// +// Warning: using LoadDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use LazyDLL +// with System set to true, or use LoadLibraryEx directly. +func LoadDLL(name string) (dll *DLL, err error) { + namep, err := UTF16PtrFromString(name) + if err != nil { + return nil, err + } + h, e := syscall_loadlibrary(namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to load " + name + ": " + e.Error(), + } + } + d := &DLL{ + Name: name, + Handle: h, + } + return d, nil +} + +// MustLoadDLL is like LoadDLL but panics if load operation failes. +func MustLoadDLL(name string) *DLL { + d, e := LoadDLL(name) + if e != nil { + panic(e) + } + return d +} + +// FindProc searches DLL d for procedure named name and returns *Proc +// if found. It returns an error if search fails. +func (d *DLL) FindProc(name string) (proc *Proc, err error) { + namep, err := BytePtrFromString(name) + if err != nil { + return nil, err + } + a, e := syscall_getprocaddress(d.Handle, namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), + } + } + p := &Proc{ + Dll: d, + Name: name, + addr: a, + } + return p, nil +} + +// MustFindProc is like FindProc but panics if search fails. +func (d *DLL) MustFindProc(name string) *Proc { + p, e := d.FindProc(name) + if e != nil { + panic(e) + } + return p +} + +// FindProcByOrdinal searches DLL d for procedure by ordinal and returns *Proc +// if found. It returns an error if search fails. +func (d *DLL) FindProcByOrdinal(ordinal uintptr) (proc *Proc, err error) { + a, e := GetProcAddressByOrdinal(d.Handle, ordinal) + name := "#" + itoa(int(ordinal)) + if e != nil { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), + } + } + p := &Proc{ + Dll: d, + Name: name, + addr: a, + } + return p, nil +} + +// MustFindProcByOrdinal is like FindProcByOrdinal but panics if search fails. +func (d *DLL) MustFindProcByOrdinal(ordinal uintptr) *Proc { + p, e := d.FindProcByOrdinal(ordinal) + if e != nil { + panic(e) + } + return p +} + +// Release unloads DLL d from memory. +func (d *DLL) Release() (err error) { + return FreeLibrary(d.Handle) +} + +// A Proc implements access to a procedure inside a DLL. +type Proc struct { + Dll *DLL + Name string + addr uintptr +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +func (p *Proc) Addr() uintptr { + return p.addr +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + switch len(a) { + case 0: + return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0) + case 1: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0) + case 2: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0) + case 3: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2]) + case 4: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0) + case 5: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0) + case 6: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5]) + case 7: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0) + case 8: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0) + case 9: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]) + case 10: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0) + case 11: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0) + case 12: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11]) + case 13: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0) + case 14: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0) + case 15: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14]) + default: + panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") + } +} + +// A LazyDLL implements access to a single DLL. +// It will delay the load of the DLL until the first +// call to its Handle method or to one of its +// LazyProc's Addr method. +type LazyDLL struct { + Name string + + // System determines whether the DLL must be loaded from the + // Windows System directory, bypassing the normal DLL search + // path. + System bool + + mu sync.Mutex + dll *DLL // non nil once DLL is loaded +} + +// Load loads DLL file d.Name into memory. It returns an error if fails. +// Load will not try to load DLL, if it is already loaded into memory. +func (d *LazyDLL) Load() error { + // Non-racy version of: + // if d.dll != nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil { + return nil + } + d.mu.Lock() + defer d.mu.Unlock() + if d.dll != nil { + return nil + } + + // kernel32.dll is special, since it's where LoadLibraryEx comes from. + // The kernel already special-cases its name, so it's always + // loaded from system32. + var dll *DLL + var err error + if d.Name == "kernel32.dll" { + dll, err = LoadDLL(d.Name) + } else { + dll, err = loadLibraryEx(d.Name, d.System) + } + if err != nil { + return err + } + + // Non-racy version of: + // d.dll = dll + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll)) + return nil +} + +// mustLoad is like Load but panics if search fails. +func (d *LazyDLL) mustLoad() { + e := d.Load() + if e != nil { + panic(e) + } +} + +// Handle returns d's module handle. +func (d *LazyDLL) Handle() uintptr { + d.mustLoad() + return uintptr(d.dll.Handle) +} + +// NewProc returns a LazyProc for accessing the named procedure in the DLL d. +func (d *LazyDLL) NewProc(name string) *LazyProc { + return &LazyProc{l: d, Name: name} +} + +// NewLazyDLL creates new LazyDLL associated with DLL file. +func NewLazyDLL(name string) *LazyDLL { + return &LazyDLL{Name: name} +} + +// NewLazySystemDLL is like NewLazyDLL, but will only +// search Windows System directory for the DLL if name is +// a base name (like "advapi32.dll"). +func NewLazySystemDLL(name string) *LazyDLL { + return &LazyDLL{Name: name, System: true} +} + +// A LazyProc implements access to a procedure inside a LazyDLL. +// It delays the lookup until the Addr method is called. +type LazyProc struct { + Name string + + mu sync.Mutex + l *LazyDLL + proc *Proc +} + +// Find searches DLL for procedure named p.Name. It returns +// an error if search fails. Find will not search procedure, +// if it is already found and loaded into memory. +func (p *LazyProc) Find() error { + // Non-racy version of: + // if p.proc == nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc))) == nil { + p.mu.Lock() + defer p.mu.Unlock() + if p.proc == nil { + e := p.l.Load() + if e != nil { + return e + } + proc, e := p.l.dll.FindProc(p.Name) + if e != nil { + return e + } + // Non-racy version of: + // p.proc = proc + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc)), unsafe.Pointer(proc)) + } + } + return nil +} + +// mustFind is like Find but panics if search fails. +func (p *LazyProc) mustFind() { + e := p.Find() + if e != nil { + panic(e) + } +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +// It will panic if the procedure cannot be found. +func (p *LazyProc) Addr() uintptr { + p.mustFind() + return p.proc.Addr() +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. It will also panic if the procedure cannot be found. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + p.mustFind() + return p.proc.Call(a...) +} + +var canDoSearchSystem32Once struct { + sync.Once + v bool +} + +func initCanDoSearchSystem32() { + // https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says: + // "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows + // Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on + // systems that have KB2533623 installed. To determine whether the + // flags are available, use GetProcAddress to get the address of the + // AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories + // function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_* + // flags can be used with LoadLibraryEx." + canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil) +} + +func canDoSearchSystem32() bool { + canDoSearchSystem32Once.Do(initCanDoSearchSystem32) + return canDoSearchSystem32Once.v +} + +func isBaseName(name string) bool { + for _, c := range name { + if c == ':' || c == '/' || c == '\\' { + return false + } + } + return true +} + +// loadLibraryEx wraps the Windows LoadLibraryEx function. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx +// +// If name is not an absolute path, LoadLibraryEx searches for the DLL +// in a variety of automatic locations unless constrained by flags. +// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx +func loadLibraryEx(name string, system bool) (*DLL, error) { + loadDLL := name + var flags uintptr + if system { + if canDoSearchSystem32() { + flags = LOAD_LIBRARY_SEARCH_SYSTEM32 + } else if isBaseName(name) { + // WindowsXP or unpatched Windows machine + // trying to load "foo.dll" out of the system + // folder, but LoadLibraryEx doesn't support + // that yet on their system, so emulate it. + systemdir, err := GetSystemDirectory() + if err != nil { + return nil, err + } + loadDLL = systemdir + "\\" + name + } + } + h, err := LoadLibraryEx(loadDLL, 0, flags) + if err != nil { + return nil, err + } + return &DLL{Name: name, Handle: h}, nil +} + +type errString string + +func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s new file mode 100644 index 000000000..fdbbbcd31 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/empty.s @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.12 +// +build !go1.12 + +// This file is here to allow bodyless functions with go:linkname for Go 1.11 +// and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go new file mode 100644 index 000000000..92ac05ff4 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -0,0 +1,54 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows environment variables. + +package windows + +import ( + "syscall" + "unsafe" +) + +func Getenv(key string) (value string, found bool) { + return syscall.Getenv(key) +} + +func Setenv(key, value string) error { + return syscall.Setenv(key, value) +} + +func Clearenv() { + syscall.Clearenv() +} + +func Environ() []string { + return syscall.Environ() +} + +// Returns a default environment associated with the token, rather than the current +// process. If inheritExisting is true, then this environment also inherits the +// environment of the current process. +func (token Token) Environ(inheritExisting bool) (env []string, err error) { + var block *uint16 + err = CreateEnvironmentBlock(&block, token, inheritExisting) + if err != nil { + return nil, err + } + defer DestroyEnvironmentBlock(block) + blockp := uintptr(unsafe.Pointer(block)) + for { + entry := UTF16PtrToString((*uint16)(unsafe.Pointer(blockp))) + if len(entry) == 0 { + break + } + env = append(env, entry) + blockp += 2 * (uintptr(len(entry)) + 1) + } + return env, nil +} + +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go new file mode 100644 index 000000000..40af946e1 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +const ( + EVENTLOG_SUCCESS = 0 + EVENTLOG_ERROR_TYPE = 1 + EVENTLOG_WARNING_TYPE = 2 + EVENTLOG_INFORMATION_TYPE = 4 + EVENTLOG_AUDIT_SUCCESS = 8 + EVENTLOG_AUDIT_FAILURE = 16 +) + +//sys RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) [failretval==0] = advapi32.RegisterEventSourceW +//sys DeregisterEventSource(handle Handle) (err error) = advapi32.DeregisterEventSource +//sys ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) = advapi32.ReportEventW diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go new file mode 100644 index 000000000..7a11e83b7 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -0,0 +1,195 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fork, exec, wait, etc. + +package windows + +import ( + errorspkg "errors" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) + +// EscapeArg rewrites command line argument s as prescribed +// in http://msdn.microsoft.com/en-us/library/ms880421. +// This function returns "" (2 double quotes) if s is empty. +// Alternatively, these transformations are done: +// - every back slash (\) is doubled, but only if immediately +// followed by double quote ("); +// - every double quote (") is escaped by back slash (\); +// - finally, s is wrapped with double quotes (arg -> "arg"), +// but only if there is space or tab inside s. +func EscapeArg(s string) string { + if len(s) == 0 { + return "\"\"" + } + n := len(s) + hasSpace := false + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '\\': + n++ + case ' ', '\t': + hasSpace = true + } + } + if hasSpace { + n += 2 + } + if n == len(s) { + return s + } + + qs := make([]byte, n) + j := 0 + if hasSpace { + qs[j] = '"' + j++ + } + slashes := 0 + for i := 0; i < len(s); i++ { + switch s[i] { + default: + slashes = 0 + qs[j] = s[i] + case '\\': + slashes++ + qs[j] = s[i] + case '"': + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '\\' + j++ + qs[j] = s[i] + } + j++ + } + if hasSpace { + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '"' + j++ + } + return string(qs[:j]) +} + +// ComposeCommandLine escapes and joins the given arguments suitable for use as a Windows command line, +// in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument, +// or any program that uses CommandLineToArgv. +func ComposeCommandLine(args []string) string { + var commandLine string + for i := range args { + if i > 0 { + commandLine += " " + } + commandLine += EscapeArg(args[i]) + } + return commandLine +} + +// DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, +// as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that +// command lines are passed around. +func DecomposeCommandLine(commandLine string) ([]string, error) { + if len(commandLine) == 0 { + return []string{}, nil + } + var argc int32 + argv, err := CommandLineToArgv(StringToUTF16Ptr(commandLine), &argc) + if err != nil { + return nil, err + } + defer LocalFree(Handle(unsafe.Pointer(argv))) + var args []string + for _, v := range (*argv)[:argc] { + args = append(args, UTF16ToString((*v)[:])) + } + return args, nil +} + +func CloseOnExec(fd Handle) { + SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) +} + +// FullPath retrieves the full path of the specified file. +func FullPath(name string) (path string, err error) { + p, err := UTF16PtrFromString(name) + if err != nil { + return "", err + } + n := uint32(100) + for { + buf := make([]uint16, n) + n, err = GetFullPathName(p, uint32(len(buf)), &buf[0], nil) + if err != nil { + return "", err + } + if n <= uint32(len(buf)) { + return UTF16ToString(buf[:n]), nil + } + } +} + +// NewProcThreadAttributeList allocates a new ProcThreadAttributeListContainer, with the requested maximum number of attributes. +func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListContainer, error) { + var size uintptr + err := initializeProcThreadAttributeList(nil, maxAttrCount, 0, &size) + if err != ERROR_INSUFFICIENT_BUFFER { + if err == nil { + return nil, errorspkg.New("unable to query buffer size from InitializeProcThreadAttributeList") + } + return nil, err + } + // size is guaranteed to be ≥1 by InitializeProcThreadAttributeList. + al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(&make([]byte, size)[0]))} + err = initializeProcThreadAttributeList(al.data, maxAttrCount, 0, &size) + if err != nil { + return nil, err + } + return al, err +} + +// Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute. +// Note that the value passed to this function will be copied into memory +// allocated by LocalAlloc, the contents of which should not contain any +// Go-managed pointers, even if the passed value itself is a Go-managed +// pointer. +func (al *ProcThreadAttributeListContainer) Update(attribute uintptr, value unsafe.Pointer, size uintptr) error { + alloc, err := LocalAlloc(LMEM_FIXED, uint32(size)) + if err != nil { + return err + } + var src, dst []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&src)) + hdr.Data = value + hdr.Cap = int(size) + hdr.Len = int(size) + hdr = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) + hdr.Data = unsafe.Pointer(alloc) + hdr.Cap = int(size) + hdr.Len = int(size) + copy(dst, src) + al.heapAllocations = append(al.heapAllocations, alloc) + return updateProcThreadAttribute(al.data, 0, attribute, unsafe.Pointer(alloc), size, nil, nil) +} + +// Delete frees ProcThreadAttributeList's resources. +func (al *ProcThreadAttributeListContainer) Delete() { + deleteProcThreadAttributeList(al.data) + for i := range al.heapAllocations { + LocalFree(Handle(al.heapAllocations[i])) + } + al.heapAllocations = nil +} + +// List returns the actual ProcThreadAttributeList to be passed to StartupInfoEx. +func (al *ProcThreadAttributeListContainer) List() *ProcThreadAttributeList { + return al.data +} diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go new file mode 100644 index 000000000..1adb60739 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -0,0 +1,37 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +const ( + MEM_COMMIT = 0x00001000 + MEM_RESERVE = 0x00002000 + MEM_DECOMMIT = 0x00004000 + MEM_RELEASE = 0x00008000 + MEM_RESET = 0x00080000 + MEM_TOP_DOWN = 0x00100000 + MEM_WRITE_WATCH = 0x00200000 + MEM_PHYSICAL = 0x00400000 + MEM_RESET_UNDO = 0x01000000 + MEM_LARGE_PAGES = 0x20000000 + + PAGE_NOACCESS = 0x00000001 + PAGE_READONLY = 0x00000002 + PAGE_READWRITE = 0x00000004 + PAGE_WRITECOPY = 0x00000008 + PAGE_EXECUTE = 0x00000010 + PAGE_EXECUTE_READ = 0x00000020 + PAGE_EXECUTE_READWRITE = 0x00000040 + PAGE_EXECUTE_WRITECOPY = 0x00000080 + PAGE_GUARD = 0x00000100 + PAGE_NOCACHE = 0x00000200 + PAGE_WRITECOMBINE = 0x00000400 + PAGE_TARGETS_INVALID = 0x40000000 + PAGE_TARGETS_NO_UPDATE = 0x40000000 + + QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002 + QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001 + QUOTA_LIMITS_HARDWS_MAX_DISABLE = 0x00000008 + QUOTA_LIMITS_HARDWS_MAX_ENABLE = 0x00000004 +) diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash new file mode 100644 index 000000000..58e0188fb --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mkerrors.bash @@ -0,0 +1,70 @@ +#!/bin/bash + +# Copyright 2019 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +set -e +shopt -s nullglob + +winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" +[[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } +ntstatus="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/ntstatus.h | sort -Vr | head -n 1)" +[[ -n $ntstatus ]] || { echo "Unable to find ntstatus.h" >&2; exit 1; } + +declare -A errors + +{ + echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT." + echo + echo "package windows" + echo "import \"syscall\"" + echo "const (" + + while read -r line; do + unset vtype + if [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?([A-Z][A-Z0-9_]+k?)\)? ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?((0x)?[0-9A-Fa-f]+)L?\)? ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + vtype="${BASH_REMATCH[2]}" + elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +\(\(([A-Z]+)\)((0x)?[0-9A-Fa-f]+)L?\) ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + vtype="${BASH_REMATCH[2]}" + else + continue + fi + [[ -n $key && -n $value ]] || continue + [[ -z ${errors["$key"]} ]] || continue + errors["$key"]="$value" + if [[ -v vtype ]]; then + if [[ $key == FACILITY_* || $key == NO_ERROR ]]; then + vtype="" + elif [[ $vtype == *HANDLE* || $vtype == *HRESULT* ]]; then + vtype="Handle" + else + vtype="syscall.Errno" + fi + last_vtype="$vtype" + else + vtype="" + if [[ $last_vtype == Handle && $value == NO_ERROR ]]; then + value="S_OK" + elif [[ $last_vtype == syscall.Errno && $value == NO_ERROR ]]; then + value="ERROR_SUCCESS" + fi + fi + + echo "$key $vtype = $value" + done < "$winerror" + + while read -r line; do + [[ $line =~ ^#define\ (STATUS_[^\s]+)\ +\(\(NTSTATUS\)((0x)?[0-9a-fA-F]+)L?\) ]] || continue + echo "${BASH_REMATCH[1]} NTStatus = ${BASH_REMATCH[2]}" + done < "$ntstatus" + + echo ")" +} | gofmt > "zerrors_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash new file mode 100644 index 000000000..ab8924e93 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2019 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +set -e +shopt -s nullglob + +knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)" +[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; } + +{ + echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT." + echo + echo "package windows" + echo "type KNOWNFOLDERID GUID" + echo "var (" + while read -r line; do + [[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue + printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \ + "${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \ + $(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \ + $(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" )) + done < "$knownfolders" + echo ")" +} | gofmt > "zknownfolderids_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go new file mode 100644 index 000000000..328e3b2ac --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build generate + +package windows + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go new file mode 100644 index 000000000..a74e3e24b --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race.go @@ -0,0 +1,30 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,race + +package windows + +import ( + "runtime" + "unsafe" +) + +const raceenabled = true + +func raceAcquire(addr unsafe.Pointer) { + runtime.RaceAcquire(addr) +} + +func raceReleaseMerge(addr unsafe.Pointer) { + runtime.RaceReleaseMerge(addr) +} + +func raceReadRange(addr unsafe.Pointer, len int) { + runtime.RaceReadRange(addr, len) +} + +func raceWriteRange(addr unsafe.Pointer, len int) { + runtime.RaceWriteRange(addr, len) +} diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go new file mode 100644 index 000000000..e44a3cbf6 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -0,0 +1,25 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!race + +package windows + +import ( + "unsafe" +) + +const raceenabled = false + +func raceAcquire(addr unsafe.Pointer) { +} + +func raceReleaseMerge(addr unsafe.Pointer) { +} + +func raceReadRange(addr unsafe.Pointer, len int) { +} + +func raceWriteRange(addr unsafe.Pointer, len int) { +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go new file mode 100644 index 000000000..111c10d3a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -0,0 +1,1443 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) + +const ( + NameUnknown = 0 + NameFullyQualifiedDN = 1 + NameSamCompatible = 2 + NameDisplay = 3 + NameUniqueId = 6 + NameCanonical = 7 + NameUserPrincipal = 8 + NameCanonicalEx = 9 + NameServicePrincipal = 10 + NameDnsDomain = 12 +) + +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx +//sys TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW +//sys GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW + +// TranslateAccountName converts a directory service +// object name from one format to another. +func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) { + u, e := UTF16PtrFromString(username) + if e != nil { + return "", e + } + n := uint32(50) + for { + b := make([]uint16, n) + e = TranslateName(u, from, to, &b[0], &n) + if e == nil { + return UTF16ToString(b[:n]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +const ( + // do not reorder + NetSetupUnknownStatus = iota + NetSetupUnjoined + NetSetupWorkgroupName + NetSetupDomainName +) + +type UserInfo10 struct { + Name *uint16 + Comment *uint16 + UsrComment *uint16 + FullName *uint16 +} + +//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo +//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation +//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree + +const ( + // do not reorder + SidTypeUser = 1 + iota + SidTypeGroup + SidTypeDomain + SidTypeAlias + SidTypeWellKnownGroup + SidTypeDeletedAccount + SidTypeInvalid + SidTypeUnknown + SidTypeComputer + SidTypeLabel +) + +type SidIdentifierAuthority struct { + Value [6]byte +} + +var ( + SECURITY_NULL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 0}} + SECURITY_WORLD_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 1}} + SECURITY_LOCAL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 2}} + SECURITY_CREATOR_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 3}} + SECURITY_NON_UNIQUE_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 4}} + SECURITY_NT_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 5}} + SECURITY_MANDATORY_LABEL_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 16}} +) + +const ( + SECURITY_NULL_RID = 0 + SECURITY_WORLD_RID = 0 + SECURITY_LOCAL_RID = 0 + SECURITY_CREATOR_OWNER_RID = 0 + SECURITY_CREATOR_GROUP_RID = 1 + SECURITY_DIALUP_RID = 1 + SECURITY_NETWORK_RID = 2 + SECURITY_BATCH_RID = 3 + SECURITY_INTERACTIVE_RID = 4 + SECURITY_LOGON_IDS_RID = 5 + SECURITY_SERVICE_RID = 6 + SECURITY_LOCAL_SYSTEM_RID = 18 + SECURITY_BUILTIN_DOMAIN_RID = 32 + SECURITY_PRINCIPAL_SELF_RID = 10 + SECURITY_CREATOR_OWNER_SERVER_RID = 0x2 + SECURITY_CREATOR_GROUP_SERVER_RID = 0x3 + SECURITY_LOGON_IDS_RID_COUNT = 0x3 + SECURITY_ANONYMOUS_LOGON_RID = 0x7 + SECURITY_PROXY_RID = 0x8 + SECURITY_ENTERPRISE_CONTROLLERS_RID = 0x9 + SECURITY_SERVER_LOGON_RID = SECURITY_ENTERPRISE_CONTROLLERS_RID + SECURITY_AUTHENTICATED_USER_RID = 0xb + SECURITY_RESTRICTED_CODE_RID = 0xc + SECURITY_NT_NON_UNIQUE_RID = 0x15 +) + +// Predefined domain-relative RIDs for local groups. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx +const ( + DOMAIN_ALIAS_RID_ADMINS = 0x220 + DOMAIN_ALIAS_RID_USERS = 0x221 + DOMAIN_ALIAS_RID_GUESTS = 0x222 + DOMAIN_ALIAS_RID_POWER_USERS = 0x223 + DOMAIN_ALIAS_RID_ACCOUNT_OPS = 0x224 + DOMAIN_ALIAS_RID_SYSTEM_OPS = 0x225 + DOMAIN_ALIAS_RID_PRINT_OPS = 0x226 + DOMAIN_ALIAS_RID_BACKUP_OPS = 0x227 + DOMAIN_ALIAS_RID_REPLICATOR = 0x228 + DOMAIN_ALIAS_RID_RAS_SERVERS = 0x229 + DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 0x22a + DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b + DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c + DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d + DOMAIN_ALIAS_RID_MONITORING_USERS = 0x22e + DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f + DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 + DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 + DOMAIN_ALIAS_RID_DCOM_USERS = 0x232 + DOMAIN_ALIAS_RID_IUSERS = 0x238 + DOMAIN_ALIAS_RID_CRYPTO_OPERATORS = 0x239 + DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP = 0x23b + DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c + DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP = 0x23d + DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP = 0x23e +) + +//sys LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW +//sys LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW +//sys ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW +//sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid +//sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid +//sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid +//sys createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) = advapi32.CreateWellKnownSid +//sys isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) = advapi32.IsWellKnownSid +//sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid +//sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid +//sys getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) = advapi32.GetSidIdentifierAuthority +//sys getSidSubAuthorityCount(sid *SID) (count *uint8) = advapi32.GetSidSubAuthorityCount +//sys getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) = advapi32.GetSidSubAuthority +//sys isValidSid(sid *SID) (isValid bool) = advapi32.IsValidSid + +// The security identifier (SID) structure is a variable-length +// structure used to uniquely identify users or groups. +type SID struct{} + +// StringToSid converts a string-format security identifier +// SID into a valid, functional SID. +func StringToSid(s string) (*SID, error) { + var sid *SID + p, e := UTF16PtrFromString(s) + if e != nil { + return nil, e + } + e = ConvertStringSidToSid(p, &sid) + if e != nil { + return nil, e + } + defer LocalFree((Handle)(unsafe.Pointer(sid))) + return sid.Copy() +} + +// LookupSID retrieves a security identifier SID for the account +// and the name of the domain on which the account was found. +// System specify target computer to search. +func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { + if len(account) == 0 { + return nil, "", 0, syscall.EINVAL + } + acc, e := UTF16PtrFromString(account) + if e != nil { + return nil, "", 0, e + } + var sys *uint16 + if len(system) > 0 { + sys, e = UTF16PtrFromString(system) + if e != nil { + return nil, "", 0, e + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]byte, n) + db := make([]uint16, dn) + sid = (*SID)(unsafe.Pointer(&b[0])) + e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType) + if e == nil { + return sid, UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, "", 0, e + } + if n <= uint32(len(b)) { + return nil, "", 0, e + } + } +} + +// String converts SID to a string format suitable for display, storage, or transmission. +func (sid *SID) String() string { + var s *uint16 + e := ConvertSidToStringSid(sid, &s) + if e != nil { + return "" + } + defer LocalFree((Handle)(unsafe.Pointer(s))) + return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]) +} + +// Len returns the length, in bytes, of a valid security identifier SID. +func (sid *SID) Len() int { + return int(GetLengthSid(sid)) +} + +// Copy creates a duplicate of security identifier SID. +func (sid *SID) Copy() (*SID, error) { + b := make([]byte, sid.Len()) + sid2 := (*SID)(unsafe.Pointer(&b[0])) + e := CopySid(uint32(len(b)), sid2, sid) + if e != nil { + return nil, e + } + return sid2, nil +} + +// IdentifierAuthority returns the identifier authority of the SID. +func (sid *SID) IdentifierAuthority() SidIdentifierAuthority { + return *getSidIdentifierAuthority(sid) +} + +// SubAuthorityCount returns the number of sub-authorities in the SID. +func (sid *SID) SubAuthorityCount() uint8 { + return *getSidSubAuthorityCount(sid) +} + +// SubAuthority returns the sub-authority of the SID as specified by +// the index, which must be less than sid.SubAuthorityCount(). +func (sid *SID) SubAuthority(idx uint32) uint32 { + if idx >= uint32(sid.SubAuthorityCount()) { + panic("sub-authority index out of range") + } + return *getSidSubAuthority(sid, idx) +} + +// IsValid returns whether the SID has a valid revision and length. +func (sid *SID) IsValid() bool { + return isValidSid(sid) +} + +// Equals compares two SIDs for equality. +func (sid *SID) Equals(sid2 *SID) bool { + return EqualSid(sid, sid2) +} + +// IsWellKnown determines whether the SID matches the well-known sidType. +func (sid *SID) IsWellKnown(sidType WELL_KNOWN_SID_TYPE) bool { + return isWellKnownSid(sid, sidType) +} + +// LookupAccount retrieves the name of the account for this SID +// and the name of the first domain on which this SID is found. +// System specify target computer to search for. +func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { + var sys *uint16 + if len(system) > 0 { + sys, err = UTF16PtrFromString(system) + if err != nil { + return "", "", 0, err + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]uint16, n) + db := make([]uint16, dn) + e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType) + if e == nil { + return UTF16ToString(b), UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", "", 0, e + } + if n <= uint32(len(b)) { + return "", "", 0, e + } + } +} + +// Various types of pre-specified SIDs that can be synthesized and compared at runtime. +type WELL_KNOWN_SID_TYPE uint32 + +const ( + WinNullSid = 0 + WinWorldSid = 1 + WinLocalSid = 2 + WinCreatorOwnerSid = 3 + WinCreatorGroupSid = 4 + WinCreatorOwnerServerSid = 5 + WinCreatorGroupServerSid = 6 + WinNtAuthoritySid = 7 + WinDialupSid = 8 + WinNetworkSid = 9 + WinBatchSid = 10 + WinInteractiveSid = 11 + WinServiceSid = 12 + WinAnonymousSid = 13 + WinProxySid = 14 + WinEnterpriseControllersSid = 15 + WinSelfSid = 16 + WinAuthenticatedUserSid = 17 + WinRestrictedCodeSid = 18 + WinTerminalServerSid = 19 + WinRemoteLogonIdSid = 20 + WinLogonIdsSid = 21 + WinLocalSystemSid = 22 + WinLocalServiceSid = 23 + WinNetworkServiceSid = 24 + WinBuiltinDomainSid = 25 + WinBuiltinAdministratorsSid = 26 + WinBuiltinUsersSid = 27 + WinBuiltinGuestsSid = 28 + WinBuiltinPowerUsersSid = 29 + WinBuiltinAccountOperatorsSid = 30 + WinBuiltinSystemOperatorsSid = 31 + WinBuiltinPrintOperatorsSid = 32 + WinBuiltinBackupOperatorsSid = 33 + WinBuiltinReplicatorSid = 34 + WinBuiltinPreWindows2000CompatibleAccessSid = 35 + WinBuiltinRemoteDesktopUsersSid = 36 + WinBuiltinNetworkConfigurationOperatorsSid = 37 + WinAccountAdministratorSid = 38 + WinAccountGuestSid = 39 + WinAccountKrbtgtSid = 40 + WinAccountDomainAdminsSid = 41 + WinAccountDomainUsersSid = 42 + WinAccountDomainGuestsSid = 43 + WinAccountComputersSid = 44 + WinAccountControllersSid = 45 + WinAccountCertAdminsSid = 46 + WinAccountSchemaAdminsSid = 47 + WinAccountEnterpriseAdminsSid = 48 + WinAccountPolicyAdminsSid = 49 + WinAccountRasAndIasServersSid = 50 + WinNTLMAuthenticationSid = 51 + WinDigestAuthenticationSid = 52 + WinSChannelAuthenticationSid = 53 + WinThisOrganizationSid = 54 + WinOtherOrganizationSid = 55 + WinBuiltinIncomingForestTrustBuildersSid = 56 + WinBuiltinPerfMonitoringUsersSid = 57 + WinBuiltinPerfLoggingUsersSid = 58 + WinBuiltinAuthorizationAccessSid = 59 + WinBuiltinTerminalServerLicenseServersSid = 60 + WinBuiltinDCOMUsersSid = 61 + WinBuiltinIUsersSid = 62 + WinIUserSid = 63 + WinBuiltinCryptoOperatorsSid = 64 + WinUntrustedLabelSid = 65 + WinLowLabelSid = 66 + WinMediumLabelSid = 67 + WinHighLabelSid = 68 + WinSystemLabelSid = 69 + WinWriteRestrictedCodeSid = 70 + WinCreatorOwnerRightsSid = 71 + WinCacheablePrincipalsGroupSid = 72 + WinNonCacheablePrincipalsGroupSid = 73 + WinEnterpriseReadonlyControllersSid = 74 + WinAccountReadonlyControllersSid = 75 + WinBuiltinEventLogReadersGroup = 76 + WinNewEnterpriseReadonlyControllersSid = 77 + WinBuiltinCertSvcDComAccessGroup = 78 + WinMediumPlusLabelSid = 79 + WinLocalLogonSid = 80 + WinConsoleLogonSid = 81 + WinThisOrganizationCertificateSid = 82 + WinApplicationPackageAuthoritySid = 83 + WinBuiltinAnyPackageSid = 84 + WinCapabilityInternetClientSid = 85 + WinCapabilityInternetClientServerSid = 86 + WinCapabilityPrivateNetworkClientServerSid = 87 + WinCapabilityPicturesLibrarySid = 88 + WinCapabilityVideosLibrarySid = 89 + WinCapabilityMusicLibrarySid = 90 + WinCapabilityDocumentsLibrarySid = 91 + WinCapabilitySharedUserCertificatesSid = 92 + WinCapabilityEnterpriseAuthenticationSid = 93 + WinCapabilityRemovableStorageSid = 94 + WinBuiltinRDSRemoteAccessServersSid = 95 + WinBuiltinRDSEndpointServersSid = 96 + WinBuiltinRDSManagementServersSid = 97 + WinUserModeDriversSid = 98 + WinBuiltinHyperVAdminsSid = 99 + WinAccountCloneableControllersSid = 100 + WinBuiltinAccessControlAssistanceOperatorsSid = 101 + WinBuiltinRemoteManagementUsersSid = 102 + WinAuthenticationAuthorityAssertedSid = 103 + WinAuthenticationServiceAssertedSid = 104 + WinLocalAccountSid = 105 + WinLocalAccountAndAdministratorSid = 106 + WinAccountProtectedUsersSid = 107 + WinCapabilityAppointmentsSid = 108 + WinCapabilityContactsSid = 109 + WinAccountDefaultSystemManagedSid = 110 + WinBuiltinDefaultSystemManagedGroupSid = 111 + WinBuiltinStorageReplicaAdminsSid = 112 + WinAccountKeyAdminsSid = 113 + WinAccountEnterpriseKeyAdminsSid = 114 + WinAuthenticationKeyTrustSid = 115 + WinAuthenticationKeyPropertyMFASid = 116 + WinAuthenticationKeyPropertyAttestationSid = 117 + WinAuthenticationFreshKeyAuthSid = 118 + WinBuiltinDeviceOwnersSid = 119 +) + +// Creates a SID for a well-known predefined alias, generally using the constants of the form +// Win*Sid, for the local machine. +func CreateWellKnownSid(sidType WELL_KNOWN_SID_TYPE) (*SID, error) { + return CreateWellKnownDomainSid(sidType, nil) +} + +// Creates a SID for a well-known predefined alias, generally using the constants of the form +// Win*Sid, for the domain specified by the domainSid parameter. +func CreateWellKnownDomainSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID) (*SID, error) { + n := uint32(50) + for { + b := make([]byte, n) + sid := (*SID)(unsafe.Pointer(&b[0])) + err := createWellKnownSid(sidType, domainSid, sid, &n) + if err == nil { + return sid, nil + } + if err != ERROR_INSUFFICIENT_BUFFER { + return nil, err + } + if n <= uint32(len(b)) { + return nil, err + } + } +} + +const ( + // do not reorder + TOKEN_ASSIGN_PRIMARY = 1 << iota + TOKEN_DUPLICATE + TOKEN_IMPERSONATE + TOKEN_QUERY + TOKEN_QUERY_SOURCE + TOKEN_ADJUST_PRIVILEGES + TOKEN_ADJUST_GROUPS + TOKEN_ADJUST_DEFAULT + TOKEN_ADJUST_SESSIONID + + TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | + TOKEN_ASSIGN_PRIMARY | + TOKEN_DUPLICATE | + TOKEN_IMPERSONATE | + TOKEN_QUERY | + TOKEN_QUERY_SOURCE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT | + TOKEN_ADJUST_SESSIONID + TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY + TOKEN_WRITE = STANDARD_RIGHTS_WRITE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT + TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE +) + +const ( + // do not reorder + TokenUser = 1 + iota + TokenGroups + TokenPrivileges + TokenOwner + TokenPrimaryGroup + TokenDefaultDacl + TokenSource + TokenType + TokenImpersonationLevel + TokenStatistics + TokenRestrictedSids + TokenSessionId + TokenGroupsAndPrivileges + TokenSessionReference + TokenSandBoxInert + TokenAuditPolicy + TokenOrigin + TokenElevationType + TokenLinkedToken + TokenElevation + TokenHasRestrictions + TokenAccessInformation + TokenVirtualizationAllowed + TokenVirtualizationEnabled + TokenIntegrityLevel + TokenUIAccess + TokenMandatoryPolicy + TokenLogonSid + MaxTokenInfoClass +) + +// Group attributes inside of Tokengroups.Groups[i].Attributes +const ( + SE_GROUP_MANDATORY = 0x00000001 + SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002 + SE_GROUP_ENABLED = 0x00000004 + SE_GROUP_OWNER = 0x00000008 + SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010 + SE_GROUP_INTEGRITY = 0x00000020 + SE_GROUP_INTEGRITY_ENABLED = 0x00000040 + SE_GROUP_LOGON_ID = 0xC0000000 + SE_GROUP_RESOURCE = 0x20000000 + SE_GROUP_VALID_ATTRIBUTES = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED | SE_GROUP_OWNER | SE_GROUP_USE_FOR_DENY_ONLY | SE_GROUP_LOGON_ID | SE_GROUP_RESOURCE | SE_GROUP_INTEGRITY | SE_GROUP_INTEGRITY_ENABLED +) + +// Privilege attributes +const ( + SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001 + SE_PRIVILEGE_ENABLED = 0x00000002 + SE_PRIVILEGE_REMOVED = 0x00000004 + SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000 + SE_PRIVILEGE_VALID_ATTRIBUTES = SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS +) + +// Token types +const ( + TokenPrimary = 1 + TokenImpersonation = 2 +) + +// Impersonation levels +const ( + SecurityAnonymous = 0 + SecurityIdentification = 1 + SecurityImpersonation = 2 + SecurityDelegation = 3 +) + +type LUID struct { + LowPart uint32 + HighPart int32 +} + +type LUIDAndAttributes struct { + Luid LUID + Attributes uint32 +} + +type SIDAndAttributes struct { + Sid *SID + Attributes uint32 +} + +type Tokenuser struct { + User SIDAndAttributes +} + +type Tokenprimarygroup struct { + PrimaryGroup *SID +} + +type Tokengroups struct { + GroupCount uint32 + Groups [1]SIDAndAttributes // Use AllGroups() for iterating. +} + +// AllGroups returns a slice that can be used to iterate over the groups in g. +func (g *Tokengroups) AllGroups() []SIDAndAttributes { + return (*[(1 << 28) - 1]SIDAndAttributes)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount] +} + +type Tokenprivileges struct { + PrivilegeCount uint32 + Privileges [1]LUIDAndAttributes // Use AllPrivileges() for iterating. +} + +// AllPrivileges returns a slice that can be used to iterate over the privileges in p. +func (p *Tokenprivileges) AllPrivileges() []LUIDAndAttributes { + return (*[(1 << 27) - 1]LUIDAndAttributes)(unsafe.Pointer(&p.Privileges[0]))[:p.PrivilegeCount:p.PrivilegeCount] +} + +type Tokenmandatorylabel struct { + Label SIDAndAttributes +} + +func (tml *Tokenmandatorylabel) Size() uint32 { + return uint32(unsafe.Sizeof(Tokenmandatorylabel{})) + GetLengthSid(tml.Label.Sid) +} + +// Authorization Functions +//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership +//sys isTokenRestricted(tokenHandle Token) (ret bool, err error) [!failretval] = advapi32.IsTokenRestricted +//sys OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken +//sys OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken +//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf +//sys RevertToSelf() (err error) = advapi32.RevertToSelf +//sys SetThreadToken(thread *Handle, token Token) (err error) = advapi32.SetThreadToken +//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW +//sys AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) = advapi32.AdjustTokenPrivileges +//sys AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) = advapi32.AdjustTokenGroups +//sys GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation +//sys SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) = advapi32.SetTokenInformation +//sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx +//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW +//sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW +//sys getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetWindowsDirectoryW +//sys getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemWindowsDirectoryW + +// An access token contains the security information for a logon session. +// The system creates an access token when a user logs on, and every +// process executed on behalf of the user has a copy of the token. +// The token identifies the user, the user's groups, and the user's +// privileges. The system uses the token to control access to securable +// objects and to control the ability of the user to perform various +// system-related operations on the local computer. +type Token Handle + +// OpenCurrentProcessToken opens an access token associated with current +// process with TOKEN_QUERY access. It is a real token that needs to be closed. +// +// Deprecated: Explicitly call OpenProcessToken(CurrentProcess(), ...) +// with the desired access instead, or use GetCurrentProcessToken for a +// TOKEN_QUERY token. +func OpenCurrentProcessToken() (Token, error) { + var token Token + err := OpenProcessToken(CurrentProcess(), TOKEN_QUERY, &token) + return token, err +} + +// GetCurrentProcessToken returns the access token associated with +// the current process. It is a pseudo token that does not need +// to be closed. +func GetCurrentProcessToken() Token { + return Token(^uintptr(4 - 1)) +} + +// GetCurrentThreadToken return the access token associated with +// the current thread. It is a pseudo token that does not need +// to be closed. +func GetCurrentThreadToken() Token { + return Token(^uintptr(5 - 1)) +} + +// GetCurrentThreadEffectiveToken returns the effective access token +// associated with the current thread. It is a pseudo token that does +// not need to be closed. +func GetCurrentThreadEffectiveToken() Token { + return Token(^uintptr(6 - 1)) +} + +// Close releases access to access token. +func (t Token) Close() error { + return CloseHandle(Handle(t)) +} + +// getInfo retrieves a specified type of information about an access token. +func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) { + n := uint32(initSize) + for { + b := make([]byte, n) + e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) + if e == nil { + return unsafe.Pointer(&b[0]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, e + } + if n <= uint32(len(b)) { + return nil, e + } + } +} + +// GetTokenUser retrieves access token t user account information. +func (t Token) GetTokenUser() (*Tokenuser, error) { + i, e := t.getInfo(TokenUser, 50) + if e != nil { + return nil, e + } + return (*Tokenuser)(i), nil +} + +// GetTokenGroups retrieves group accounts associated with access token t. +func (t Token) GetTokenGroups() (*Tokengroups, error) { + i, e := t.getInfo(TokenGroups, 50) + if e != nil { + return nil, e + } + return (*Tokengroups)(i), nil +} + +// GetTokenPrimaryGroup retrieves access token t primary group information. +// A pointer to a SID structure representing a group that will become +// the primary group of any objects created by a process using this access token. +func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) { + i, e := t.getInfo(TokenPrimaryGroup, 50) + if e != nil { + return nil, e + } + return (*Tokenprimarygroup)(i), nil +} + +// GetUserProfileDirectory retrieves path to the +// root directory of the access token t user's profile. +func (t Token) GetUserProfileDirectory() (string, error) { + n := uint32(100) + for { + b := make([]uint16, n) + e := GetUserProfileDirectory(t, &b[0], &n) + if e == nil { + return UTF16ToString(b), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +// IsElevated returns whether the current token is elevated from a UAC perspective. +func (token Token) IsElevated() bool { + var isElevated uint32 + var outLen uint32 + err := GetTokenInformation(token, TokenElevation, (*byte)(unsafe.Pointer(&isElevated)), uint32(unsafe.Sizeof(isElevated)), &outLen) + if err != nil { + return false + } + return outLen == uint32(unsafe.Sizeof(isElevated)) && isElevated != 0 +} + +// GetLinkedToken returns the linked token, which may be an elevated UAC token. +func (token Token) GetLinkedToken() (Token, error) { + var linkedToken Token + var outLen uint32 + err := GetTokenInformation(token, TokenLinkedToken, (*byte)(unsafe.Pointer(&linkedToken)), uint32(unsafe.Sizeof(linkedToken)), &outLen) + if err != nil { + return Token(0), err + } + return linkedToken, nil +} + +// GetSystemDirectory retrieves the path to current location of the system +// directory, which is typically, though not always, `C:\Windows\System32`. +func GetSystemDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getSystemDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// GetWindowsDirectory retrieves the path to current location of the Windows +// directory, which is typically, though not always, `C:\Windows`. This may +// be a private user directory in the case that the application is running +// under a terminal server. +func GetWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// GetSystemWindowsDirectory retrieves the path to current location of the +// Windows directory, which is typically, though not always, `C:\Windows`. +func GetSystemWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getSystemWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// IsMember reports whether the access token t is a member of the provided SID. +func (t Token) IsMember(sid *SID) (bool, error) { + var b int32 + if e := checkTokenMembership(t, sid, &b); e != nil { + return false, e + } + return b != 0, nil +} + +// IsRestricted reports whether the access token t is a restricted token. +func (t Token) IsRestricted() (isRestricted bool, err error) { + isRestricted, err = isTokenRestricted(t) + if !isRestricted && err == syscall.EINVAL { + // If err is EINVAL, this returned ERROR_SUCCESS indicating a non-restricted token. + err = nil + } + return +} + +const ( + WTS_CONSOLE_CONNECT = 0x1 + WTS_CONSOLE_DISCONNECT = 0x2 + WTS_REMOTE_CONNECT = 0x3 + WTS_REMOTE_DISCONNECT = 0x4 + WTS_SESSION_LOGON = 0x5 + WTS_SESSION_LOGOFF = 0x6 + WTS_SESSION_LOCK = 0x7 + WTS_SESSION_UNLOCK = 0x8 + WTS_SESSION_REMOTE_CONTROL = 0x9 + WTS_SESSION_CREATE = 0xa + WTS_SESSION_TERMINATE = 0xb +) + +const ( + WTSActive = 0 + WTSConnected = 1 + WTSConnectQuery = 2 + WTSShadow = 3 + WTSDisconnected = 4 + WTSIdle = 5 + WTSListen = 6 + WTSReset = 7 + WTSDown = 8 + WTSInit = 9 +) + +type WTSSESSION_NOTIFICATION struct { + Size uint32 + SessionID uint32 +} + +type WTS_SESSION_INFO struct { + SessionID uint32 + WindowStationName *uint16 + State uint32 +} + +//sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken +//sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW +//sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory + +type ACL struct { + aclRevision byte + sbz1 byte + aclSize uint16 + aceCount uint16 + sbz2 uint16 +} + +type SECURITY_DESCRIPTOR struct { + revision byte + sbz1 byte + control SECURITY_DESCRIPTOR_CONTROL + owner *SID + group *SID + sacl *ACL + dacl *ACL +} + +type SECURITY_QUALITY_OF_SERVICE struct { + Length uint32 + ImpersonationLevel uint32 + ContextTrackingMode byte + EffectiveOnly byte +} + +// Constants for the ContextTrackingMode field of SECURITY_QUALITY_OF_SERVICE. +const ( + SECURITY_STATIC_TRACKING = 0 + SECURITY_DYNAMIC_TRACKING = 1 +) + +type SecurityAttributes struct { + Length uint32 + SecurityDescriptor *SECURITY_DESCRIPTOR + InheritHandle uint32 +} + +type SE_OBJECT_TYPE uint32 + +// Constants for type SE_OBJECT_TYPE +const ( + SE_UNKNOWN_OBJECT_TYPE = 0 + SE_FILE_OBJECT = 1 + SE_SERVICE = 2 + SE_PRINTER = 3 + SE_REGISTRY_KEY = 4 + SE_LMSHARE = 5 + SE_KERNEL_OBJECT = 6 + SE_WINDOW_OBJECT = 7 + SE_DS_OBJECT = 8 + SE_DS_OBJECT_ALL = 9 + SE_PROVIDER_DEFINED_OBJECT = 10 + SE_WMIGUID_OBJECT = 11 + SE_REGISTRY_WOW64_32KEY = 12 + SE_REGISTRY_WOW64_64KEY = 13 +) + +type SECURITY_INFORMATION uint32 + +// Constants for type SECURITY_INFORMATION +const ( + OWNER_SECURITY_INFORMATION = 0x00000001 + GROUP_SECURITY_INFORMATION = 0x00000002 + DACL_SECURITY_INFORMATION = 0x00000004 + SACL_SECURITY_INFORMATION = 0x00000008 + LABEL_SECURITY_INFORMATION = 0x00000010 + ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 + SCOPE_SECURITY_INFORMATION = 0x00000040 + BACKUP_SECURITY_INFORMATION = 0x00010000 + PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 + PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 + UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 + UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 +) + +type SECURITY_DESCRIPTOR_CONTROL uint16 + +// Constants for type SECURITY_DESCRIPTOR_CONTROL +const ( + SE_OWNER_DEFAULTED = 0x0001 + SE_GROUP_DEFAULTED = 0x0002 + SE_DACL_PRESENT = 0x0004 + SE_DACL_DEFAULTED = 0x0008 + SE_SACL_PRESENT = 0x0010 + SE_SACL_DEFAULTED = 0x0020 + SE_DACL_AUTO_INHERIT_REQ = 0x0100 + SE_SACL_AUTO_INHERIT_REQ = 0x0200 + SE_DACL_AUTO_INHERITED = 0x0400 + SE_SACL_AUTO_INHERITED = 0x0800 + SE_DACL_PROTECTED = 0x1000 + SE_SACL_PROTECTED = 0x2000 + SE_RM_CONTROL_VALID = 0x4000 + SE_SELF_RELATIVE = 0x8000 +) + +type ACCESS_MASK uint32 + +// Constants for type ACCESS_MASK +const ( + DELETE = 0x00010000 + READ_CONTROL = 0x00020000 + WRITE_DAC = 0x00040000 + WRITE_OWNER = 0x00080000 + SYNCHRONIZE = 0x00100000 + STANDARD_RIGHTS_REQUIRED = 0x000F0000 + STANDARD_RIGHTS_READ = READ_CONTROL + STANDARD_RIGHTS_WRITE = READ_CONTROL + STANDARD_RIGHTS_EXECUTE = READ_CONTROL + STANDARD_RIGHTS_ALL = 0x001F0000 + SPECIFIC_RIGHTS_ALL = 0x0000FFFF + ACCESS_SYSTEM_SECURITY = 0x01000000 + MAXIMUM_ALLOWED = 0x02000000 + GENERIC_READ = 0x80000000 + GENERIC_WRITE = 0x40000000 + GENERIC_EXECUTE = 0x20000000 + GENERIC_ALL = 0x10000000 +) + +type ACCESS_MODE uint32 + +// Constants for type ACCESS_MODE +const ( + NOT_USED_ACCESS = 0 + GRANT_ACCESS = 1 + SET_ACCESS = 2 + DENY_ACCESS = 3 + REVOKE_ACCESS = 4 + SET_AUDIT_SUCCESS = 5 + SET_AUDIT_FAILURE = 6 +) + +// Constants for AceFlags and Inheritance fields +const ( + NO_INHERITANCE = 0x0 + SUB_OBJECTS_ONLY_INHERIT = 0x1 + SUB_CONTAINERS_ONLY_INHERIT = 0x2 + SUB_CONTAINERS_AND_OBJECTS_INHERIT = 0x3 + INHERIT_NO_PROPAGATE = 0x4 + INHERIT_ONLY = 0x8 + INHERITED_ACCESS_ENTRY = 0x10 + INHERITED_PARENT = 0x10000000 + INHERITED_GRANDPARENT = 0x20000000 + OBJECT_INHERIT_ACE = 0x1 + CONTAINER_INHERIT_ACE = 0x2 + NO_PROPAGATE_INHERIT_ACE = 0x4 + INHERIT_ONLY_ACE = 0x8 + INHERITED_ACE = 0x10 + VALID_INHERIT_FLAGS = 0x1F +) + +type MULTIPLE_TRUSTEE_OPERATION uint32 + +// Constants for MULTIPLE_TRUSTEE_OPERATION +const ( + NO_MULTIPLE_TRUSTEE = 0 + TRUSTEE_IS_IMPERSONATE = 1 +) + +type TRUSTEE_FORM uint32 + +// Constants for TRUSTEE_FORM +const ( + TRUSTEE_IS_SID = 0 + TRUSTEE_IS_NAME = 1 + TRUSTEE_BAD_FORM = 2 + TRUSTEE_IS_OBJECTS_AND_SID = 3 + TRUSTEE_IS_OBJECTS_AND_NAME = 4 +) + +type TRUSTEE_TYPE uint32 + +// Constants for TRUSTEE_TYPE +const ( + TRUSTEE_IS_UNKNOWN = 0 + TRUSTEE_IS_USER = 1 + TRUSTEE_IS_GROUP = 2 + TRUSTEE_IS_DOMAIN = 3 + TRUSTEE_IS_ALIAS = 4 + TRUSTEE_IS_WELL_KNOWN_GROUP = 5 + TRUSTEE_IS_DELETED = 6 + TRUSTEE_IS_INVALID = 7 + TRUSTEE_IS_COMPUTER = 8 +) + +// Constants for ObjectsPresent field +const ( + ACE_OBJECT_TYPE_PRESENT = 0x1 + ACE_INHERITED_OBJECT_TYPE_PRESENT = 0x2 +) + +type EXPLICIT_ACCESS struct { + AccessPermissions ACCESS_MASK + AccessMode ACCESS_MODE + Inheritance uint32 + Trustee TRUSTEE +} + +// This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. +type TrusteeValue uintptr + +func TrusteeValueFromString(str string) TrusteeValue { + return TrusteeValue(unsafe.Pointer(StringToUTF16Ptr(str))) +} +func TrusteeValueFromSID(sid *SID) TrusteeValue { + return TrusteeValue(unsafe.Pointer(sid)) +} +func TrusteeValueFromObjectsAndSid(objectsAndSid *OBJECTS_AND_SID) TrusteeValue { + return TrusteeValue(unsafe.Pointer(objectsAndSid)) +} +func TrusteeValueFromObjectsAndName(objectsAndName *OBJECTS_AND_NAME) TrusteeValue { + return TrusteeValue(unsafe.Pointer(objectsAndName)) +} + +type TRUSTEE struct { + MultipleTrustee *TRUSTEE + MultipleTrusteeOperation MULTIPLE_TRUSTEE_OPERATION + TrusteeForm TRUSTEE_FORM + TrusteeType TRUSTEE_TYPE + TrusteeValue TrusteeValue +} + +type OBJECTS_AND_SID struct { + ObjectsPresent uint32 + ObjectTypeGuid GUID + InheritedObjectTypeGuid GUID + Sid *SID +} + +type OBJECTS_AND_NAME struct { + ObjectsPresent uint32 + ObjectType SE_OBJECT_TYPE + ObjectTypeName *uint16 + InheritedObjectTypeName *uint16 + Name *uint16 +} + +//sys getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetSecurityInfo +//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetSecurityInfo +//sys getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetNamedSecurityInfoW +//sys SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW +//sys SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) = advapi32.SetKernelObjectSecurity + +//sys buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) = advapi32.BuildSecurityDescriptorW +//sys initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) = advapi32.InitializeSecurityDescriptor + +//sys getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) = advapi32.GetSecurityDescriptorControl +//sys getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorDacl +//sys getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorSacl +//sys getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorOwner +//sys getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorGroup +//sys getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) = advapi32.GetSecurityDescriptorLength +//sys getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) [failretval!=0] = advapi32.GetSecurityDescriptorRMControl +//sys isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) = advapi32.IsValidSecurityDescriptor + +//sys setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) = advapi32.SetSecurityDescriptorControl +//sys setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorDacl +//sys setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorSacl +//sys setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) = advapi32.SetSecurityDescriptorOwner +//sys setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) = advapi32.SetSecurityDescriptorGroup +//sys setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) = advapi32.SetSecurityDescriptorRMControl + +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW + +//sys makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) = advapi32.MakeAbsoluteSD +//sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD + +//sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW + +// Control returns the security descriptor control bits. +func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { + err = getSecurityDescriptorControl(sd, &control, &revision) + return +} + +// SetControl sets the security descriptor control bits. +func (sd *SECURITY_DESCRIPTOR) SetControl(controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) error { + return setSecurityDescriptorControl(sd, controlBitsOfInterest, controlBitsToSet) +} + +// RMControl returns the security descriptor resource manager control bits. +func (sd *SECURITY_DESCRIPTOR) RMControl() (control uint8, err error) { + err = getSecurityDescriptorRMControl(sd, &control) + return +} + +// SetRMControl sets the security descriptor resource manager control bits. +func (sd *SECURITY_DESCRIPTOR) SetRMControl(rmControl uint8) { + setSecurityDescriptorRMControl(sd, &rmControl) +} + +// DACL returns the security descriptor DACL and whether it was defaulted. The dacl return value may be nil +// if a DACL exists but is an "empty DACL", meaning fully permissive. If the DACL does not exist, err returns +// ERROR_OBJECT_NOT_FOUND. +func (sd *SECURITY_DESCRIPTOR) DACL() (dacl *ACL, defaulted bool, err error) { + var present bool + err = getSecurityDescriptorDacl(sd, &present, &dacl, &defaulted) + if !present { + err = ERROR_OBJECT_NOT_FOUND + } + return +} + +// SetDACL sets the absolute security descriptor DACL. +func (absoluteSD *SECURITY_DESCRIPTOR) SetDACL(dacl *ACL, present, defaulted bool) error { + return setSecurityDescriptorDacl(absoluteSD, present, dacl, defaulted) +} + +// SACL returns the security descriptor SACL and whether it was defaulted. The sacl return value may be nil +// if a SACL exists but is an "empty SACL", meaning fully permissive. If the SACL does not exist, err returns +// ERROR_OBJECT_NOT_FOUND. +func (sd *SECURITY_DESCRIPTOR) SACL() (sacl *ACL, defaulted bool, err error) { + var present bool + err = getSecurityDescriptorSacl(sd, &present, &sacl, &defaulted) + if !present { + err = ERROR_OBJECT_NOT_FOUND + } + return +} + +// SetSACL sets the absolute security descriptor SACL. +func (absoluteSD *SECURITY_DESCRIPTOR) SetSACL(sacl *ACL, present, defaulted bool) error { + return setSecurityDescriptorSacl(absoluteSD, present, sacl, defaulted) +} + +// Owner returns the security descriptor owner and whether it was defaulted. +func (sd *SECURITY_DESCRIPTOR) Owner() (owner *SID, defaulted bool, err error) { + err = getSecurityDescriptorOwner(sd, &owner, &defaulted) + return +} + +// SetOwner sets the absolute security descriptor owner. +func (absoluteSD *SECURITY_DESCRIPTOR) SetOwner(owner *SID, defaulted bool) error { + return setSecurityDescriptorOwner(absoluteSD, owner, defaulted) +} + +// Group returns the security descriptor group and whether it was defaulted. +func (sd *SECURITY_DESCRIPTOR) Group() (group *SID, defaulted bool, err error) { + err = getSecurityDescriptorGroup(sd, &group, &defaulted) + return +} + +// SetGroup sets the absolute security descriptor owner. +func (absoluteSD *SECURITY_DESCRIPTOR) SetGroup(group *SID, defaulted bool) error { + return setSecurityDescriptorGroup(absoluteSD, group, defaulted) +} + +// Length returns the length of the security descriptor. +func (sd *SECURITY_DESCRIPTOR) Length() uint32 { + return getSecurityDescriptorLength(sd) +} + +// IsValid returns whether the security descriptor is valid. +func (sd *SECURITY_DESCRIPTOR) IsValid() bool { + return isValidSecurityDescriptor(sd) +} + +// String returns the SDDL form of the security descriptor, with a function signature that can be +// used with %v formatting directives. +func (sd *SECURITY_DESCRIPTOR) String() string { + var sddl *uint16 + err := convertSecurityDescriptorToStringSecurityDescriptor(sd, 1, 0xff, &sddl, nil) + if err != nil { + return "" + } + defer LocalFree(Handle(unsafe.Pointer(sddl))) + return UTF16PtrToString(sddl) +} + +// ToAbsolute converts a self-relative security descriptor into an absolute one. +func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DESCRIPTOR, err error) { + control, _, err := selfRelativeSD.Control() + if err != nil { + return + } + if control&SE_SELF_RELATIVE == 0 { + err = ERROR_INVALID_PARAMETER + return + } + var absoluteSDSize, daclSize, saclSize, ownerSize, groupSize uint32 + err = makeAbsoluteSD(selfRelativeSD, nil, &absoluteSDSize, + nil, &daclSize, nil, &saclSize, nil, &ownerSize, nil, &groupSize) + switch err { + case ERROR_INSUFFICIENT_BUFFER: + case nil: + // makeAbsoluteSD is expected to fail, but it succeeds. + return nil, ERROR_INTERNAL_ERROR + default: + return nil, err + } + if absoluteSDSize > 0 { + absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) + } + var ( + dacl *ACL + sacl *ACL + owner *SID + group *SID + ) + if daclSize > 0 { + dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) + } + if saclSize > 0 { + sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) + } + if ownerSize > 0 { + owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) + } + if groupSize > 0 { + group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) + } + err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, + dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) + return +} + +// ToSelfRelative converts an absolute security descriptor into a self-relative one. +func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURITY_DESCRIPTOR, err error) { + control, _, err := absoluteSD.Control() + if err != nil { + return + } + if control&SE_SELF_RELATIVE != 0 { + err = ERROR_INVALID_PARAMETER + return + } + var selfRelativeSDSize uint32 + err = makeSelfRelativeSD(absoluteSD, nil, &selfRelativeSDSize) + switch err { + case ERROR_INSUFFICIENT_BUFFER: + case nil: + // makeSelfRelativeSD is expected to fail, but it succeeds. + return nil, ERROR_INTERNAL_ERROR + default: + return nil, err + } + if selfRelativeSDSize > 0 { + selfRelativeSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, selfRelativeSDSize)[0])) + } + err = makeSelfRelativeSD(absoluteSD, selfRelativeSD, &selfRelativeSDSize) + return +} + +func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { + sdLen := int(selfRelativeSD.Length()) + const min = int(unsafe.Sizeof(SECURITY_DESCRIPTOR{})) + if sdLen < min { + sdLen = min + } + + var src []byte + h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) + h.Data = unsafe.Pointer(selfRelativeSD) + h.Len = sdLen + h.Cap = sdLen + + const psize = int(unsafe.Sizeof(uintptr(0))) + + var dst []byte + h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) + alloc := make([]uintptr, (sdLen+psize-1)/psize) + h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data + h.Len = sdLen + h.Cap = sdLen + + copy(dst, src) + return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) +} + +// SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a +// self-relative security descriptor object allocated on the Go heap. +func SecurityDescriptorFromString(sddl string) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &winHeapSD, nil) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// GetSecurityInfo queries the security information for a given handle and returns the self-relative security +// descriptor result on the Go heap. +func GetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = getSecurityInfo(handle, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// GetNamedSecurityInfo queries the security information for a given named object and returns the self-relative security +// descriptor result on the Go heap. +func GetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = getNamedSecurityInfo(objectName, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// BuildSecurityDescriptor makes a new security descriptor using the input trustees, explicit access lists, and +// prior security descriptor to be merged, any of which can be nil, returning the self-relative security descriptor +// result on the Go heap. +func BuildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, accessEntries []EXPLICIT_ACCESS, auditEntries []EXPLICIT_ACCESS, mergedSecurityDescriptor *SECURITY_DESCRIPTOR) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + var winHeapSDSize uint32 + var firstAccessEntry *EXPLICIT_ACCESS + if len(accessEntries) > 0 { + firstAccessEntry = &accessEntries[0] + } + var firstAuditEntry *EXPLICIT_ACCESS + if len(auditEntries) > 0 { + firstAuditEntry = &auditEntries[0] + } + err = buildSecurityDescriptor(owner, group, uint32(len(accessEntries)), firstAccessEntry, uint32(len(auditEntries)), firstAuditEntry, mergedSecurityDescriptor, &winHeapSDSize, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// NewSecurityDescriptor creates and initializes a new absolute security descriptor. +func NewSecurityDescriptor() (absoluteSD *SECURITY_DESCRIPTOR, err error) { + absoluteSD = &SECURITY_DESCRIPTOR{} + err = initializeSecurityDescriptor(absoluteSD, 1) + return +} + +// ACLFromEntries returns a new ACL on the Go heap containing a list of explicit entries as well as those of another ACL. +// Both explicitEntries and mergedACL are optional and can be nil. +func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL, err error) { + var firstExplicitEntry *EXPLICIT_ACCESS + if len(explicitEntries) > 0 { + firstExplicitEntry = &explicitEntries[0] + } + var winHeapACL *ACL + err = setEntriesInAcl(uint32(len(explicitEntries)), firstExplicitEntry, mergedACL, &winHeapACL) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapACL))) + aclBytes := make([]byte, winHeapACL.aclSize) + copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes):len(aclBytes)]) + return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil +} diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go new file mode 100644 index 000000000..b269850d0 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/service.go @@ -0,0 +1,237 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +const ( + SC_MANAGER_CONNECT = 1 + SC_MANAGER_CREATE_SERVICE = 2 + SC_MANAGER_ENUMERATE_SERVICE = 4 + SC_MANAGER_LOCK = 8 + SC_MANAGER_QUERY_LOCK_STATUS = 16 + SC_MANAGER_MODIFY_BOOT_CONFIG = 32 + SC_MANAGER_ALL_ACCESS = 0xf003f +) + +//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW + +const ( + SERVICE_KERNEL_DRIVER = 1 + SERVICE_FILE_SYSTEM_DRIVER = 2 + SERVICE_ADAPTER = 4 + SERVICE_RECOGNIZER_DRIVER = 8 + SERVICE_WIN32_OWN_PROCESS = 16 + SERVICE_WIN32_SHARE_PROCESS = 32 + SERVICE_WIN32 = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS + SERVICE_INTERACTIVE_PROCESS = 256 + SERVICE_DRIVER = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER + SERVICE_TYPE_ALL = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS + + SERVICE_BOOT_START = 0 + SERVICE_SYSTEM_START = 1 + SERVICE_AUTO_START = 2 + SERVICE_DEMAND_START = 3 + SERVICE_DISABLED = 4 + + SERVICE_ERROR_IGNORE = 0 + SERVICE_ERROR_NORMAL = 1 + SERVICE_ERROR_SEVERE = 2 + SERVICE_ERROR_CRITICAL = 3 + + SC_STATUS_PROCESS_INFO = 0 + + SC_ACTION_NONE = 0 + SC_ACTION_RESTART = 1 + SC_ACTION_REBOOT = 2 + SC_ACTION_RUN_COMMAND = 3 + + SERVICE_STOPPED = 1 + SERVICE_START_PENDING = 2 + SERVICE_STOP_PENDING = 3 + SERVICE_RUNNING = 4 + SERVICE_CONTINUE_PENDING = 5 + SERVICE_PAUSE_PENDING = 6 + SERVICE_PAUSED = 7 + SERVICE_NO_CHANGE = 0xffffffff + + SERVICE_ACCEPT_STOP = 1 + SERVICE_ACCEPT_PAUSE_CONTINUE = 2 + SERVICE_ACCEPT_SHUTDOWN = 4 + SERVICE_ACCEPT_PARAMCHANGE = 8 + SERVICE_ACCEPT_NETBINDCHANGE = 16 + SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32 + SERVICE_ACCEPT_POWEREVENT = 64 + SERVICE_ACCEPT_SESSIONCHANGE = 128 + SERVICE_ACCEPT_PRESHUTDOWN = 256 + + SERVICE_CONTROL_STOP = 1 + SERVICE_CONTROL_PAUSE = 2 + SERVICE_CONTROL_CONTINUE = 3 + SERVICE_CONTROL_INTERROGATE = 4 + SERVICE_CONTROL_SHUTDOWN = 5 + SERVICE_CONTROL_PARAMCHANGE = 6 + SERVICE_CONTROL_NETBINDADD = 7 + SERVICE_CONTROL_NETBINDREMOVE = 8 + SERVICE_CONTROL_NETBINDENABLE = 9 + SERVICE_CONTROL_NETBINDDISABLE = 10 + SERVICE_CONTROL_DEVICEEVENT = 11 + SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12 + SERVICE_CONTROL_POWEREVENT = 13 + SERVICE_CONTROL_SESSIONCHANGE = 14 + SERVICE_CONTROL_PRESHUTDOWN = 15 + + SERVICE_ACTIVE = 1 + SERVICE_INACTIVE = 2 + SERVICE_STATE_ALL = 3 + + SERVICE_QUERY_CONFIG = 1 + SERVICE_CHANGE_CONFIG = 2 + SERVICE_QUERY_STATUS = 4 + SERVICE_ENUMERATE_DEPENDENTS = 8 + SERVICE_START = 16 + SERVICE_STOP = 32 + SERVICE_PAUSE_CONTINUE = 64 + SERVICE_INTERROGATE = 128 + SERVICE_USER_DEFINED_CONTROL = 256 + SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL + + SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 + + SERVICE_CONFIG_DESCRIPTION = 1 + SERVICE_CONFIG_FAILURE_ACTIONS = 2 + SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 3 + SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 4 + SERVICE_CONFIG_SERVICE_SID_INFO = 5 + SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 6 + SERVICE_CONFIG_PRESHUTDOWN_INFO = 7 + SERVICE_CONFIG_TRIGGER_INFO = 8 + SERVICE_CONFIG_PREFERRED_NODE = 9 + SERVICE_CONFIG_LAUNCH_PROTECTED = 12 + + SERVICE_SID_TYPE_NONE = 0 + SERVICE_SID_TYPE_UNRESTRICTED = 1 + SERVICE_SID_TYPE_RESTRICTED = 2 | SERVICE_SID_TYPE_UNRESTRICTED + + SC_ENUM_PROCESS_INFO = 0 + + SERVICE_NOTIFY_STATUS_CHANGE = 2 + SERVICE_NOTIFY_STOPPED = 0x00000001 + SERVICE_NOTIFY_START_PENDING = 0x00000002 + SERVICE_NOTIFY_STOP_PENDING = 0x00000004 + SERVICE_NOTIFY_RUNNING = 0x00000008 + SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010 + SERVICE_NOTIFY_PAUSE_PENDING = 0x00000020 + SERVICE_NOTIFY_PAUSED = 0x00000040 + SERVICE_NOTIFY_CREATED = 0x00000080 + SERVICE_NOTIFY_DELETED = 0x00000100 + SERVICE_NOTIFY_DELETE_PENDING = 0x00000200 + + SC_EVENT_DATABASE_CHANGE = 0 + SC_EVENT_PROPERTY_CHANGE = 1 + SC_EVENT_STATUS_CHANGE = 2 +) + +type SERVICE_STATUS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 +} + +type SERVICE_TABLE_ENTRY struct { + ServiceName *uint16 + ServiceProc uintptr +} + +type QUERY_SERVICE_CONFIG struct { + ServiceType uint32 + StartType uint32 + ErrorControl uint32 + BinaryPathName *uint16 + LoadOrderGroup *uint16 + TagId uint32 + Dependencies *uint16 + ServiceStartName *uint16 + DisplayName *uint16 +} + +type SERVICE_DESCRIPTION struct { + Description *uint16 +} + +type SERVICE_DELAYED_AUTO_START_INFO struct { + IsDelayedAutoStartUp uint32 +} + +type SERVICE_STATUS_PROCESS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 + ProcessId uint32 + ServiceFlags uint32 +} + +type ENUM_SERVICE_STATUS_PROCESS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatusProcess SERVICE_STATUS_PROCESS +} + +type SERVICE_NOTIFY struct { + Version uint32 + NotifyCallback uintptr + Context uintptr + NotificationStatus uint32 + ServiceStatus SERVICE_STATUS_PROCESS + NotificationTriggered uint32 + ServiceNames *uint16 +} + +type SERVICE_FAILURE_ACTIONS struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions *SC_ACTION +} + +type SC_ACTION struct { + Type uint32 + Delay uint32 +} + +type QUERY_SERVICE_LOCK_STATUS struct { + IsLocked uint32 + LockOwner *uint16 + LockDuration uint32 +} + +//sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle +//sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW +//sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW +//sys DeleteService(service Handle) (err error) = advapi32.DeleteService +//sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW +//sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus +//sys QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW +//sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService +//sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW +//sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus +//sys ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) = advapi32.ChangeServiceConfigW +//sys QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfigW +//sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W +//sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W +//sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW +//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx +//sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW +//sys SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) = sechost.SubscribeServiceChangeNotifications? +//sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? diff --git a/vendor/golang.org/x/sys/windows/setupapierrors_windows.go b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go new file mode 100644 index 000000000..1681810e0 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/setupapierrors_windows.go @@ -0,0 +1,100 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +const ( + ERROR_EXPECTED_SECTION_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0 + ERROR_BAD_SECTION_NAME_LINE syscall.Errno = 0x20000000 | 0xC0000000 | 1 + ERROR_SECTION_NAME_TOO_LONG syscall.Errno = 0x20000000 | 0xC0000000 | 2 + ERROR_GENERAL_SYNTAX syscall.Errno = 0x20000000 | 0xC0000000 | 3 + ERROR_WRONG_INF_STYLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x100 + ERROR_SECTION_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x101 + ERROR_LINE_NOT_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x102 + ERROR_NO_BACKUP syscall.Errno = 0x20000000 | 0xC0000000 | 0x103 + ERROR_NO_ASSOCIATED_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x200 + ERROR_CLASS_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x201 + ERROR_DUPLICATE_FOUND syscall.Errno = 0x20000000 | 0xC0000000 | 0x202 + ERROR_NO_DRIVER_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x203 + ERROR_KEY_DOES_NOT_EXIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x204 + ERROR_INVALID_DEVINST_NAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x205 + ERROR_INVALID_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x206 + ERROR_DEVINST_ALREADY_EXISTS syscall.Errno = 0x20000000 | 0xC0000000 | 0x207 + ERROR_DEVINFO_NOT_REGISTERED syscall.Errno = 0x20000000 | 0xC0000000 | 0x208 + ERROR_INVALID_REG_PROPERTY syscall.Errno = 0x20000000 | 0xC0000000 | 0x209 + ERROR_NO_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x20A + ERROR_NO_SUCH_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x20B + ERROR_CANT_LOAD_CLASS_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x20C + ERROR_INVALID_CLASS_INSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x20D + ERROR_DI_DO_DEFAULT syscall.Errno = 0x20000000 | 0xC0000000 | 0x20E + ERROR_DI_NOFILECOPY syscall.Errno = 0x20000000 | 0xC0000000 | 0x20F + ERROR_INVALID_HWPROFILE syscall.Errno = 0x20000000 | 0xC0000000 | 0x210 + ERROR_NO_DEVICE_SELECTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x211 + ERROR_DEVINFO_LIST_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x212 + ERROR_DEVINFO_DATA_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x213 + ERROR_DI_BAD_PATH syscall.Errno = 0x20000000 | 0xC0000000 | 0x214 + ERROR_NO_CLASSINSTALL_PARAMS syscall.Errno = 0x20000000 | 0xC0000000 | 0x215 + ERROR_FILEQUEUE_LOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x216 + ERROR_BAD_SERVICE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x217 + ERROR_NO_CLASS_DRIVER_LIST syscall.Errno = 0x20000000 | 0xC0000000 | 0x218 + ERROR_NO_ASSOCIATED_SERVICE syscall.Errno = 0x20000000 | 0xC0000000 | 0x219 + ERROR_NO_DEFAULT_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21A + ERROR_DEVICE_INTERFACE_ACTIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x21B + ERROR_DEVICE_INTERFACE_REMOVED syscall.Errno = 0x20000000 | 0xC0000000 | 0x21C + ERROR_BAD_INTERFACE_INSTALLSECT syscall.Errno = 0x20000000 | 0xC0000000 | 0x21D + ERROR_NO_SUCH_INTERFACE_CLASS syscall.Errno = 0x20000000 | 0xC0000000 | 0x21E + ERROR_INVALID_REFERENCE_STRING syscall.Errno = 0x20000000 | 0xC0000000 | 0x21F + ERROR_INVALID_MACHINENAME syscall.Errno = 0x20000000 | 0xC0000000 | 0x220 + ERROR_REMOTE_COMM_FAILURE syscall.Errno = 0x20000000 | 0xC0000000 | 0x221 + ERROR_MACHINE_UNAVAILABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x222 + ERROR_NO_CONFIGMGR_SERVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x223 + ERROR_INVALID_PROPPAGE_PROVIDER syscall.Errno = 0x20000000 | 0xC0000000 | 0x224 + ERROR_NO_SUCH_DEVICE_INTERFACE syscall.Errno = 0x20000000 | 0xC0000000 | 0x225 + ERROR_DI_POSTPROCESSING_REQUIRED syscall.Errno = 0x20000000 | 0xC0000000 | 0x226 + ERROR_INVALID_COINSTALLER syscall.Errno = 0x20000000 | 0xC0000000 | 0x227 + ERROR_NO_COMPAT_DRIVERS syscall.Errno = 0x20000000 | 0xC0000000 | 0x228 + ERROR_NO_DEVICE_ICON syscall.Errno = 0x20000000 | 0xC0000000 | 0x229 + ERROR_INVALID_INF_LOGCONFIG syscall.Errno = 0x20000000 | 0xC0000000 | 0x22A + ERROR_DI_DONT_INSTALL syscall.Errno = 0x20000000 | 0xC0000000 | 0x22B + ERROR_INVALID_FILTER_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22C + ERROR_NON_WINDOWS_NT_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22D + ERROR_NON_WINDOWS_DRIVER syscall.Errno = 0x20000000 | 0xC0000000 | 0x22E + ERROR_NO_CATALOG_FOR_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x22F + ERROR_DEVINSTALL_QUEUE_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x230 + ERROR_NOT_DISABLEABLE syscall.Errno = 0x20000000 | 0xC0000000 | 0x231 + ERROR_CANT_REMOVE_DEVINST syscall.Errno = 0x20000000 | 0xC0000000 | 0x232 + ERROR_INVALID_TARGET syscall.Errno = 0x20000000 | 0xC0000000 | 0x233 + ERROR_DRIVER_NONNATIVE syscall.Errno = 0x20000000 | 0xC0000000 | 0x234 + ERROR_IN_WOW64 syscall.Errno = 0x20000000 | 0xC0000000 | 0x235 + ERROR_SET_SYSTEM_RESTORE_POINT syscall.Errno = 0x20000000 | 0xC0000000 | 0x236 + ERROR_SCE_DISABLED syscall.Errno = 0x20000000 | 0xC0000000 | 0x238 + ERROR_UNKNOWN_EXCEPTION syscall.Errno = 0x20000000 | 0xC0000000 | 0x239 + ERROR_PNP_REGISTRY_ERROR syscall.Errno = 0x20000000 | 0xC0000000 | 0x23A + ERROR_REMOTE_REQUEST_UNSUPPORTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x23B + ERROR_NOT_AN_INSTALLED_OEM_INF syscall.Errno = 0x20000000 | 0xC0000000 | 0x23C + ERROR_INF_IN_USE_BY_DEVICES syscall.Errno = 0x20000000 | 0xC0000000 | 0x23D + ERROR_DI_FUNCTION_OBSOLETE syscall.Errno = 0x20000000 | 0xC0000000 | 0x23E + ERROR_NO_AUTHENTICODE_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x23F + ERROR_AUTHENTICODE_DISALLOWED syscall.Errno = 0x20000000 | 0xC0000000 | 0x240 + ERROR_AUTHENTICODE_TRUSTED_PUBLISHER syscall.Errno = 0x20000000 | 0xC0000000 | 0x241 + ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED syscall.Errno = 0x20000000 | 0xC0000000 | 0x242 + ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED syscall.Errno = 0x20000000 | 0xC0000000 | 0x243 + ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH syscall.Errno = 0x20000000 | 0xC0000000 | 0x244 + ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE syscall.Errno = 0x20000000 | 0xC0000000 | 0x245 + ERROR_DEVICE_INSTALLER_NOT_READY syscall.Errno = 0x20000000 | 0xC0000000 | 0x246 + ERROR_DRIVER_STORE_ADD_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x247 + ERROR_DEVICE_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x248 + ERROR_DRIVER_INSTALL_BLOCKED syscall.Errno = 0x20000000 | 0xC0000000 | 0x249 + ERROR_WRONG_INF_TYPE syscall.Errno = 0x20000000 | 0xC0000000 | 0x24A + ERROR_FILE_HASH_NOT_IN_CATALOG syscall.Errno = 0x20000000 | 0xC0000000 | 0x24B + ERROR_DRIVER_STORE_DELETE_FAILED syscall.Errno = 0x20000000 | 0xC0000000 | 0x24C + ERROR_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = 0x20000000 | 0xC0000000 | 0x300 + EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW syscall.Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW + ERROR_NO_DEFAULT_INTERFACE_DEVICE syscall.Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE + ERROR_INTERFACE_DEVICE_ACTIVE syscall.Errno = ERROR_DEVICE_INTERFACE_ACTIVE + ERROR_INTERFACE_DEVICE_REMOVED syscall.Errno = ERROR_DEVICE_INTERFACE_REMOVED + ERROR_NO_SUCH_INTERFACE_DEVICE syscall.Errno = ERROR_NO_SUCH_DEVICE_INTERFACE +) diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go new file mode 100644 index 000000000..917cc2aae --- /dev/null +++ b/vendor/golang.org/x/sys/windows/str.go @@ -0,0 +1,22 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +func itoa(val int) string { // do it here rather than with fmt to avoid dependency + if val < 0 { + return "-" + itoa(-val) + } + var buf [32]byte // big enough for int64 + i := len(buf) - 1 + for val >= 10 { + buf[i] = byte(val%10 + '0') + i-- + val /= 10 + } + buf[i] = byte(val + '0') + return string(buf[i:]) +} diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go new file mode 100644 index 000000000..6122f557a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -0,0 +1,112 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package windows contains an interface to the low-level operating system +// primitives. OS details vary depending on the underlying system, and +// by default, godoc will display the OS-specific documentation for the current +// system. If you want godoc to display syscall documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if +// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS +// to freebsd and $GOARCH to arm. +// +// The primary use of this package is inside other packages that provide a more +// portable interface to the system, such as "os", "time" and "net". Use +// those packages rather than this one if you can. +// +// For details of the functions and data types in this package consult +// the manuals for the appropriate operating system. +// +// These calls return err == nil to indicate success; otherwise +// err represents an operating system error describing the failure and +// holds a value of type syscall.Errno. +package windows // import "golang.org/x/sys/windows" + +import ( + "bytes" + "strings" + "syscall" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) + +// ByteSliceFromString returns a NUL-terminated slice of bytes +// containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func ByteSliceFromString(s string) ([]byte, error) { + if strings.IndexByte(s, 0) != -1 { + return nil, syscall.EINVAL + } + a := make([]byte, len(s)+1) + copy(a, s) + return a, nil +} + +// BytePtrFromString returns a pointer to a NUL-terminated array of +// bytes containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func BytePtrFromString(s string) (*byte, error) { + a, err := ByteSliceFromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +// ByteSliceToString returns a string form of the text represented by the slice s, with a terminating NUL and any +// bytes after the NUL removed. +func ByteSliceToString(s []byte) string { + if i := bytes.IndexByte(s, 0); i != -1 { + s = s[:i] + } + return string(s) +} + +// BytePtrToString takes a pointer to a sequence of text and returns the corresponding string. +// If the pointer is nil, it returns the empty string. It assumes that the text sequence is terminated +// at a zero byte; if the zero byte is not present, the program may crash. +func BytePtrToString(p *byte) string { + if p == nil { + return "" + } + if *p == 0 { + return "" + } + + // Find NUL terminator. + n := 0 + for ptr := unsafe.Pointer(p); *(*byte)(ptr) != 0; n++ { + ptr = unsafe.Pointer(uintptr(ptr) + 1) + } + + var s []byte + h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) + h.Data = unsafe.Pointer(p) + h.Len = n + h.Cap = n + + return string(s) +} + +// Single-word zero for use when we need a valid pointer to 0 bytes. +// See mksyscall.pl. +var _zero uintptr + +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go new file mode 100644 index 000000000..1215b2ae2 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -0,0 +1,1672 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows system calls. + +package windows + +import ( + errorspkg "errors" + "fmt" + "runtime" + "sync" + "syscall" + "time" + "unicode/utf16" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) + +type Handle uintptr +type HWND uintptr + +const ( + InvalidHandle = ^Handle(0) + InvalidHWND = ^HWND(0) + + // Flags for DefineDosDevice. + DDD_EXACT_MATCH_ON_REMOVE = 0x00000004 + DDD_NO_BROADCAST_SYSTEM = 0x00000008 + DDD_RAW_TARGET_PATH = 0x00000001 + DDD_REMOVE_DEFINITION = 0x00000002 + + // Return values for GetDriveType. + DRIVE_UNKNOWN = 0 + DRIVE_NO_ROOT_DIR = 1 + DRIVE_REMOVABLE = 2 + DRIVE_FIXED = 3 + DRIVE_REMOTE = 4 + DRIVE_CDROM = 5 + DRIVE_RAMDISK = 6 + + // File system flags from GetVolumeInformation and GetVolumeInformationByHandle. + FILE_CASE_SENSITIVE_SEARCH = 0x00000001 + FILE_CASE_PRESERVED_NAMES = 0x00000002 + FILE_FILE_COMPRESSION = 0x00000010 + FILE_DAX_VOLUME = 0x20000000 + FILE_NAMED_STREAMS = 0x00040000 + FILE_PERSISTENT_ACLS = 0x00000008 + FILE_READ_ONLY_VOLUME = 0x00080000 + FILE_SEQUENTIAL_WRITE_ONCE = 0x00100000 + FILE_SUPPORTS_ENCRYPTION = 0x00020000 + FILE_SUPPORTS_EXTENDED_ATTRIBUTES = 0x00800000 + FILE_SUPPORTS_HARD_LINKS = 0x00400000 + FILE_SUPPORTS_OBJECT_IDS = 0x00010000 + FILE_SUPPORTS_OPEN_BY_FILE_ID = 0x01000000 + FILE_SUPPORTS_REPARSE_POINTS = 0x00000080 + FILE_SUPPORTS_SPARSE_FILES = 0x00000040 + FILE_SUPPORTS_TRANSACTIONS = 0x00200000 + FILE_SUPPORTS_USN_JOURNAL = 0x02000000 + FILE_UNICODE_ON_DISK = 0x00000004 + FILE_VOLUME_IS_COMPRESSED = 0x00008000 + FILE_VOLUME_QUOTAS = 0x00000020 + + // Flags for LockFileEx. + LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 + LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 + + // Return value of SleepEx and other APC functions + WAIT_IO_COMPLETION = 0x000000C0 +) + +// StringToUTF16 is deprecated. Use UTF16FromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16(s string) []uint16 { + a, err := UTF16FromString(s) + if err != nil { + panic("windows: string with NUL passed to StringToUTF16") + } + return a +} + +// UTF16FromString returns the UTF-16 encoding of the UTF-8 string +// s, with a terminating NUL added. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func UTF16FromString(s string) ([]uint16, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, syscall.EINVAL + } + } + return utf16.Encode([]rune(s + "\x00")), nil +} + +// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, +// with a terminating NUL and any bytes after the NUL removed. +func UTF16ToString(s []uint16) string { + for i, v := range s { + if v == 0 { + s = s[:i] + break + } + } + return string(utf16.Decode(s)) +} + +// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] } + +// UTF16PtrFromString returns pointer to the UTF-16 encoding of +// the UTF-8 string s, with a terminating NUL added. If s +// contains a NUL byte at any location, it returns (nil, syscall.EINVAL). +func UTF16PtrFromString(s string) (*uint16, error) { + a, err := UTF16FromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +// UTF16PtrToString takes a pointer to a UTF-16 sequence and returns the corresponding UTF-8 encoded string. +// If the pointer is nil, it returns the empty string. It assumes that the UTF-16 sequence is terminated +// at a zero word; if the zero word is not present, the program may crash. +func UTF16PtrToString(p *uint16) string { + if p == nil { + return "" + } + if *p == 0 { + return "" + } + + // Find NUL terminator. + n := 0 + for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { + ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) + } + + var s []uint16 + h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) + h.Data = unsafe.Pointer(p) + h.Len = n + h.Cap = n + + return string(utf16.Decode(s)) +} + +func Getpagesize() int { return 4096 } + +// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +func NewCallback(fn interface{}) uintptr { + return syscall.NewCallback(fn) +} + +// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +func NewCallbackCDecl(fn interface{}) uintptr { + return syscall.NewCallbackCDecl(fn) +} + +// windows api calls + +//sys GetLastError() (lasterr error) +//sys LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW +//sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW +//sys FreeLibrary(handle Handle) (err error) +//sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) +//sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW +//sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW +//sys SetDefaultDllDirectories(directoryFlags uint32) (err error) +//sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW +//sys GetVersion() (ver uint32, err error) +//sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW +//sys ExitProcess(exitcode uint32) +//sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process +//sys IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) = IsWow64Process2? +//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW +//sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) +//sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState +//sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) +//sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] +//sys CloseHandle(handle Handle) (err error) +//sys GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle] +//sys SetStdHandle(stdhandle uint32, handle Handle) (err error) +//sys findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstFileW +//sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW +//sys FindClose(handle Handle) (err error) +//sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) +//sys GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) +//sys SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) +//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW +//sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW +//sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW +//sys RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW +//sys DeleteFile(path *uint16) (err error) = DeleteFileW +//sys MoveFile(from *uint16, to *uint16) (err error) = MoveFileW +//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW +//sys LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) +//sys UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) +//sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW +//sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW +//sys SetEndOfFile(handle Handle) (err error) +//sys GetSystemTimeAsFileTime(time *Filetime) +//sys GetSystemTimePreciseAsFileTime(time *Filetime) +//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] +//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) +//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) +//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) +//sys CancelIo(s Handle) (err error) +//sys CancelIoEx(s Handle, o *Overlapped) (err error) +//sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW +//sys CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = advapi32.CreateProcessAsUserW +//sys initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) = InitializeProcThreadAttributeList +//sys deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) = DeleteProcThreadAttributeList +//sys updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) = UpdateProcThreadAttribute +//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) +//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW +//sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId +//sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow +//sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW +//sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx +//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath +//sys TerminateProcess(handle Handle, exitcode uint32) (err error) +//sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) +//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) +//sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) +//sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] +//sys waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] = WaitForMultipleObjects +//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW +//sys CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) +//sys GetFileType(filehandle Handle) (n uint32, err error) +//sys CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) = advapi32.CryptAcquireContextW +//sys CryptReleaseContext(provhandle Handle, flags uint32) (err error) = advapi32.CryptReleaseContext +//sys CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) = advapi32.CryptGenRandom +//sys GetEnvironmentStrings() (envs *uint16, err error) [failretval==nil] = kernel32.GetEnvironmentStringsW +//sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW +//sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW +//sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW +//sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock +//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock +//sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) +//sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW +//sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW +//sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW +//sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW +//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] +//sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) +//sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) +//sys FlushFileBuffers(handle Handle) (err error) +//sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW +//sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW +//sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW +//sys GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW +//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateFileMappingW +//sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) +//sys UnmapViewOfFile(addr uintptr) (err error) +//sys FlushViewOfFile(addr uintptr, length uintptr) (err error) +//sys VirtualLock(addr uintptr, length uintptr) (err error) +//sys VirtualUnlock(addr uintptr, length uintptr) (err error) +//sys VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc +//sys VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree +//sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect +//sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile +//sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW +//sys FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.FindFirstChangeNotificationW +//sys FindNextChangeNotification(handle Handle) (err error) +//sys FindCloseChangeNotification(handle Handle) (err error) +//sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW +//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) = crypt32.CertOpenStore +//sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore +//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore +//sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore +//sys CertDeleteCertificateFromStore(certContext *CertContext) (err error) = crypt32.CertDeleteCertificateFromStore +//sys CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) = crypt32.CertDuplicateCertificateContext +//sys PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) = crypt32.PFXImportCertStore +//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain +//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain +//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext +//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext +//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy +//sys CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) = crypt32.CertGetNameStringW +//sys CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) = crypt32.CertFindExtension +//sys CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) [failretval==nil] = crypt32.CertFindCertificateInStore +//sys CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) [failretval==nil] = crypt32.CertFindChainInStore +//sys CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, parameters unsafe.Pointer, cryptProvOrNCryptKey *Handle, keySpec *uint32, callerFreeProvOrNCryptKey *bool) (err error) = crypt32.CryptAcquireCertificatePrivateKey +//sys CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) = crypt32.CryptQueryObject +//sys CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) = crypt32.CryptDecodeObject +//sys CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptProtectData +//sys CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptUnprotectData +//sys WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) = wintrust.WinVerifyTrustEx +//sys RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW +//sys RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey +//sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW +//sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW +//sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW +//sys RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue +//sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId +//sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode +//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode +//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo +//sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW +//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot +//sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW +//sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW +//sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) +//sys Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) +//sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +//sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW +//sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW +//sys GetCurrentThreadId() (id uint32) +//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventW +//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventExW +//sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW +//sys SetEvent(event Handle) (err error) = kernel32.SetEvent +//sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent +//sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent +//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexW +//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexExW +//sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW +//sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex +//sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx +//sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW +//sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject +//sys TerminateJobObject(job Handle, exitCode uint32) (err error) = kernel32.TerminateJobObject +//sys SetErrorMode(mode uint32) (ret uint32) = kernel32.SetErrorMode +//sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread +//sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass +//sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass +//sys QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) = kernel32.QueryInformationJobObject +//sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) +//sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) +//sys GetProcessId(process Handle) (id uint32, err error) +//sys QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) = kernel32.QueryFullProcessImageNameW +//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) +//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost +//sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) +//sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) + +// Volume Management Functions +//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW +//sys DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) = DeleteVolumeMountPointW +//sys FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeW +//sys FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeMountPointW +//sys FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) = FindNextVolumeW +//sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW +//sys FindVolumeClose(findVolume Handle) (err error) +//sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) +//sys GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) = GetDiskFreeSpaceExW +//sys GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW +//sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] +//sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW +//sys GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW +//sys GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW +//sys GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW +//sys GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) = GetVolumePathNameW +//sys GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) = GetVolumePathNamesForVolumeNameW +//sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW +//sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW +//sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW +//sys InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) = advapi32.InitiateSystemShutdownExW +//sys SetProcessShutdownParameters(level uint32, flags uint32) (err error) = kernel32.SetProcessShutdownParameters +//sys GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) = kernel32.GetProcessShutdownParameters +//sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString +//sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 +//sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid +//sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree +//sys CoInitializeEx(reserved uintptr, coInit uint32) (ret error) = ole32.CoInitializeEx +//sys CoUninitialize() = ole32.CoUninitialize +//sys CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) = ole32.CoGetObject +//sys getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetProcessPreferredUILanguages +//sys getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages +//sys getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages +//sys getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages +//sys findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) = kernel32.FindResourceW +//sys SizeofResource(module Handle, resInfo Handle) (size uint32, err error) = kernel32.SizeofResource +//sys LoadResource(module Handle, resInfo Handle) (resData Handle, err error) = kernel32.LoadResource +//sys LockResource(resData Handle) (addr uintptr, err error) = kernel32.LockResource + +// Process Status API (PSAPI) +//sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses + +// NT Native APIs +//sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) = ntdll.RtlGetVersion +//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers +//sys RtlGetCurrentPeb() (peb *PEB) = ntdll.RtlGetCurrentPeb +//sys RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) = ntdll.RtlInitUnicodeString +//sys RtlInitString(destinationString *NTString, sourceString *byte) = ntdll.RtlInitString +//sys NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) = ntdll.NtCreateFile +//sys NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) = ntdll.NtCreateNamedPipeFile +//sys RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToNtPathName_U_WithStatus +//sys RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToRelativeNtPathName_U_WithStatus +//sys RtlDefaultNpAcl(acl **ACL) (ntstatus error) = ntdll.RtlDefaultNpAcl +//sys NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQueryInformationProcess +//sys NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) = ntdll.NtSetInformationProcess + +// syscall interface implementation for other packages + +// GetCurrentProcess returns the handle for the current process. +// It is a pseudo handle that does not need to be closed. +// The returned error is always nil. +// +// Deprecated: use CurrentProcess for the same Handle without the nil +// error. +func GetCurrentProcess() (Handle, error) { + return CurrentProcess(), nil +} + +// CurrentProcess returns the handle for the current process. +// It is a pseudo handle that does not need to be closed. +func CurrentProcess() Handle { return Handle(^uintptr(1 - 1)) } + +// GetCurrentThread returns the handle for the current thread. +// It is a pseudo handle that does not need to be closed. +// The returned error is always nil. +// +// Deprecated: use CurrentThread for the same Handle without the nil +// error. +func GetCurrentThread() (Handle, error) { + return CurrentThread(), nil +} + +// CurrentThread returns the handle for the current thread. +// It is a pseudo handle that does not need to be closed. +func CurrentThread() Handle { return Handle(^uintptr(2 - 1)) } + +// GetProcAddressByOrdinal retrieves the address of the exported +// function from module by ordinal. +func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) + proc = uintptr(r0) + if proc == 0 { + err = errnoErr(e1) + } + return +} + +func Exit(code int) { ExitProcess(uint32(code)) } + +func makeInheritSa() *SecurityAttributes { + var sa SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func Open(path string, mode int, perm uint32) (fd Handle, err error) { + if len(path) == 0 { + return InvalidHandle, ERROR_FILE_NOT_FOUND + } + pathp, err := UTF16PtrFromString(path) + if err != nil { + return InvalidHandle, err + } + var access uint32 + switch mode & (O_RDONLY | O_WRONLY | O_RDWR) { + case O_RDONLY: + access = GENERIC_READ + case O_WRONLY: + access = GENERIC_WRITE + case O_RDWR: + access = GENERIC_READ | GENERIC_WRITE + } + if mode&O_CREAT != 0 { + access |= GENERIC_WRITE + } + if mode&O_APPEND != 0 { + access &^= GENERIC_WRITE + access |= FILE_APPEND_DATA + } + sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE) + var sa *SecurityAttributes + if mode&O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(O_CREAT|O_EXCL) == (O_CREAT | O_EXCL): + createmode = CREATE_NEW + case mode&(O_CREAT|O_TRUNC) == (O_CREAT | O_TRUNC): + createmode = CREATE_ALWAYS + case mode&O_CREAT == O_CREAT: + createmode = OPEN_ALWAYS + case mode&O_TRUNC == O_TRUNC: + createmode = TRUNCATE_EXISTING + default: + createmode = OPEN_EXISTING + } + var attrs uint32 = FILE_ATTRIBUTE_NORMAL + if perm&S_IWRITE == 0 { + attrs = FILE_ATTRIBUTE_READONLY + } + h, e := CreateFile(pathp, access, sharemode, sa, createmode, attrs, 0) + return h, e +} + +func Read(fd Handle, p []byte) (n int, err error) { + var done uint32 + e := ReadFile(fd, p, &done, nil) + if e != nil { + if e == ERROR_BROKEN_PIPE { + // NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin + return 0, nil + } + return 0, e + } + if raceenabled { + if done > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), int(done)) + } + raceAcquire(unsafe.Pointer(&ioSync)) + } + return int(done), nil +} + +func Write(fd Handle, p []byte) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + var done uint32 + e := WriteFile(fd, p, &done, nil) + if e != nil { + return 0, e + } + if raceenabled && done > 0 { + raceReadRange(unsafe.Pointer(&p[0]), int(done)) + } + return int(done), nil +} + +var ioSync int64 + +func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) { + var w uint32 + switch whence { + case 0: + w = FILE_BEGIN + case 1: + w = FILE_CURRENT + case 2: + w = FILE_END + } + hi := int32(offset >> 32) + lo := int32(offset) + // use GetFileType to check pipe, pipe can't do seek + ft, _ := GetFileType(fd) + if ft == FILE_TYPE_PIPE { + return 0, syscall.EPIPE + } + rlo, e := SetFilePointer(fd, lo, &hi, w) + if e != nil { + return 0, e + } + return int64(hi)<<32 + int64(rlo), nil +} + +func Close(fd Handle) (err error) { + return CloseHandle(fd) +} + +var ( + Stdin = getStdHandle(STD_INPUT_HANDLE) + Stdout = getStdHandle(STD_OUTPUT_HANDLE) + Stderr = getStdHandle(STD_ERROR_HANDLE) +) + +func getStdHandle(stdhandle uint32) (fd Handle) { + r, _ := GetStdHandle(stdhandle) + CloseOnExec(r) + return r +} + +const ImplementsGetwd = true + +func Getwd() (wd string, err error) { + b := make([]uint16, 300) + n, e := GetCurrentDirectory(uint32(len(b)), &b[0]) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func Chdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return SetCurrentDirectory(pathp) +} + +func Mkdir(path string, mode uint32) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return CreateDirectory(pathp, nil) +} + +func Rmdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return RemoveDirectory(pathp) +} + +func Unlink(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return DeleteFile(pathp) +} + +func Rename(oldpath, newpath string) (err error) { + from, err := UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := UTF16PtrFromString(newpath) + if err != nil { + return err + } + return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) +} + +func ComputerName() (name string, err error) { + var n uint32 = MAX_COMPUTERNAME_LENGTH + 1 + b := make([]uint16, n) + e := GetComputerName(&b[0], &n) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func DurationSinceBoot() time.Duration { + return time.Duration(getTickCount64()) * time.Millisecond +} + +func Ftruncate(fd Handle, length int64) (err error) { + curoffset, e := Seek(fd, 0, 1) + if e != nil { + return e + } + defer Seek(fd, curoffset, 0) + _, e = Seek(fd, length, 0) + if e != nil { + return e + } + e = SetEndOfFile(fd) + if e != nil { + return e + } + return nil +} + +func Gettimeofday(tv *Timeval) (err error) { + var ft Filetime + GetSystemTimeAsFileTime(&ft) + *tv = NsecToTimeval(ft.Nanoseconds()) + return nil +} + +func Pipe(p []Handle) (err error) { + if len(p) != 2 { + return syscall.EINVAL + } + var r, w Handle + e := CreatePipe(&r, &w, makeInheritSa(), 0) + if e != nil { + return e + } + p[0] = r + p[1] = w + return nil +} + +func Utimes(path string, tv []Timeval) (err error) { + if len(tv) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer Close(h) + a := NsecToFiletime(tv[0].Nanoseconds()) + w := NsecToFiletime(tv[1].Nanoseconds()) + return SetFileTime(h, nil, &a, &w) +} + +func UtimesNano(path string, ts []Timespec) (err error) { + if len(ts) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer Close(h) + a := NsecToFiletime(TimespecToNsec(ts[0])) + w := NsecToFiletime(TimespecToNsec(ts[1])) + return SetFileTime(h, nil, &a, &w) +} + +func Fsync(fd Handle) (err error) { + return FlushFileBuffers(fd) +} + +func Chmod(path string, mode uint32) (err error) { + p, e := UTF16PtrFromString(path) + if e != nil { + return e + } + attrs, e := GetFileAttributes(p) + if e != nil { + return e + } + if mode&S_IWRITE != 0 { + attrs &^= FILE_ATTRIBUTE_READONLY + } else { + attrs |= FILE_ATTRIBUTE_READONLY + } + return SetFileAttributes(p, attrs) +} + +func LoadGetSystemTimePreciseAsFileTime() error { + return procGetSystemTimePreciseAsFileTime.Find() +} + +func LoadCancelIoEx() error { + return procCancelIoEx.Find() +} + +func LoadSetFileCompletionNotificationModes() error { + return procSetFileCompletionNotificationModes.Find() +} + +func WaitForMultipleObjects(handles []Handle, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { + // Every other win32 array API takes arguments as "pointer, count", except for this function. So we + // can't declare it as a usual [] type, because mksyscall will use the opposite order. We therefore + // trivially stub this ourselves. + + var handlePtr *Handle + if len(handles) > 0 { + handlePtr = &handles[0] + } + return waitForMultipleObjects(uint32(len(handles)), uintptr(unsafe.Pointer(handlePtr)), waitAll, waitMilliseconds) +} + +// net api calls + +const socket_error = uintptr(^uint32(0)) + +//sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup +//sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup +//sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl +//sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket +//sys sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto +//sys recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom +//sys Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt +//sys Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt +//sys bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind +//sys connect(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.connect +//sys getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockname +//sys getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getpeername +//sys listen(s Handle, backlog int32) (err error) [failretval==socket_error] = ws2_32.listen +//sys shutdown(s Handle, how int32) (err error) [failretval==socket_error] = ws2_32.shutdown +//sys Closesocket(s Handle) (err error) [failretval==socket_error] = ws2_32.closesocket +//sys AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) = mswsock.AcceptEx +//sys GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = mswsock.GetAcceptExSockaddrs +//sys WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecv +//sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend +//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom +//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo +//sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW +//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname +//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname +//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs +//sys GetProtoByName(name string) (p *Protoent, err error) [failretval==nil] = ws2_32.getprotobyname +//sys DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) = dnsapi.DnsQuery_W +//sys DnsRecordListFree(rl *DNSRecord, freetype uint32) = dnsapi.DnsRecordListFree +//sys DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) = dnsapi.DnsNameCompare_W +//sys GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) = ws2_32.GetAddrInfoW +//sys FreeAddrInfoW(addrinfo *AddrinfoW) = ws2_32.FreeAddrInfoW +//sys GetIfEntry(pIfRow *MibIfRow) (errcode error) = iphlpapi.GetIfEntry +//sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo +//sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes +//sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW +//sys WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult +//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses +//sys GetACP() (acp uint32) = kernel32.GetACP +//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [100]int8 +} + +type Sockaddr interface { + sockaddr() (ptr unsafe.Pointer, len int32, err error) // lowercase; only we can define Sockaddrs +} + +type SockaddrInet4 struct { + Port int + Addr [4]byte + raw RawSockaddrInet4 +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type SockaddrInet6 struct { + Port int + ZoneId uint32 + Addr [16]byte + raw RawSockaddrInet6 +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type RawSockaddrUnix struct { + Family uint16 + Path [UNIX_PATH_MAX]int8 +} + +type SockaddrUnix struct { + Name string + raw RawSockaddrUnix +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { + name := sa.Name + n := len(name) + if n > len(sa.raw.Path) { + return nil, 0, syscall.EINVAL + } + if n == len(sa.raw.Path) && name[0] != '@' { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + // length is family (uint16), name, NUL. + sl := int32(2) + if n > 0 { + sl += int32(n) + 1 + } + if sa.raw.Path[0] == '@' { + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- + } + + return unsafe.Pointer(&sa.raw), sl, nil +} + +func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + if pp.Path[0] == 0 { + // "Abstract" Unix domain socket. + // Rewrite leading NUL as @ for textual display. + // (This is the standard convention.) + // Not friendly to overwrite in place, + // but the callers below don't care. + pp.Path[0] = '@' + } + + // Assume path ends at NUL. + // This is not technically the Linux semantics for + // abstract Unix domain sockets--they are supposed + // to be uninterpreted fixed-size binary blobs--but + // everyone uses this convention. + n := 0 + for n < len(pp.Path) && pp.Path[n] != 0 { + n++ + } + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, syscall.EAFNOSUPPORT +} + +func Socket(domain, typ, proto int) (fd Handle, err error) { + if domain == AF_INET6 && SocketDisableIPv6 { + return InvalidHandle, syscall.EAFNOSUPPORT + } + return socket(int32(domain), int32(typ), int32(proto)) +} + +func SetsockoptInt(fd Handle, level, opt int, value int) (err error) { + v := int32(value) + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v))) +} + +func Bind(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return bind(fd, ptr, n) +} + +func Connect(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connect(fd, ptr, n) +} + +func Getsockname(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getsockname(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Getpeername(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getpeername(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Listen(s Handle, n int) (err error) { + return listen(s, int32(n)) +} + +func Shutdown(fd Handle, how int) (err error) { + return shutdown(fd, int32(how)) +} + +func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { + rsa, l, err := to.sockaddr() + if err != nil { + return err + } + return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) +} + +func LoadGetAddrInfo() error { + return procGetAddrInfoW.Find() +} + +var connectExFunc struct { + once sync.Once + addr uintptr + err error +} + +func LoadConnectEx() error { + connectExFunc.once.Do(func() { + var s Handle + s, connectExFunc.err = Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) + if connectExFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + connectExFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_CONNECTEX)), + uint32(unsafe.Sizeof(WSAID_CONNECTEX)), + (*byte)(unsafe.Pointer(&connectExFunc.addr)), + uint32(unsafe.Sizeof(connectExFunc.addr)), + &n, nil, 0) + }) + return connectExFunc.err +} + +func connectEx(s Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, 7, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConnectEx(fd Handle, sa Sockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) error { + err := LoadConnectEx() + if err != nil { + return errorspkg.New("failed to find ConnectEx: " + err.Error()) + } + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +var sendRecvMsgFunc struct { + once sync.Once + sendAddr uintptr + recvAddr uintptr + err error +} + +func loadWSASendRecvMsg() error { + sendRecvMsgFunc.once.Do(func() { + var s Handle + s, sendRecvMsgFunc.err = Socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP) + if sendRecvMsgFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), + uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), + &n, nil, 0) + if sendRecvMsgFunc.err != nil { + return + } + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), + uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), + &n, nil, 0) + }) + return sendRecvMsgFunc.err +} + +func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) + } + return err +} + +func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return err +} + +// Invented structures to support what package os expects. +type Rusage struct { + CreationTime Filetime + ExitTime Filetime + KernelTime Filetime + UserTime Filetime +} + +type WaitStatus struct { + ExitCode uint32 +} + +func (w WaitStatus) Exited() bool { return true } + +func (w WaitStatus) ExitStatus() int { return int(w.ExitCode) } + +func (w WaitStatus) Signal() Signal { return -1 } + +func (w WaitStatus) CoreDump() bool { return false } + +func (w WaitStatus) Stopped() bool { return false } + +func (w WaitStatus) Continued() bool { return false } + +func (w WaitStatus) StopSignal() Signal { return -1 } + +func (w WaitStatus) Signaled() bool { return false } + +func (w WaitStatus) TrapCause() int { return -1 } + +// Timespec is an invented structure on Windows, but here for +// consistency with the corresponding package for other operating systems. +type Timespec struct { + Sec int64 + Nsec int64 +} + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +// TODO(brainman): fix all needed for net + +func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, syscall.EWINDOWS } + +func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + n32, err := recvfrom(fd, p, int32(flags), &rsa, &l) + n = int(n32) + if err != nil { + return + } + from, err = rsa.Sockaddr() + return +} + +func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error) { + ptr, l, err := to.sockaddr() + if err != nil { + return err + } + return sendto(fd, p, int32(flags), ptr, l) +} + +func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS } + +// The Linger struct is wrong but we only noticed after Go 1. +// sysLinger is the real system call structure. + +// BUG(brainman): The definition of Linger is not appropriate for direct use +// with Setsockopt and Getsockopt. +// Use SetsockoptLinger instead. + +type Linger struct { + Onoff int32 + Linger int32 +} + +type sysLinger struct { + Onoff uint16 + Linger uint16 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +func GetsockoptInt(fd Handle, level, opt int) (int, error) { + v := int32(0) + l := int32(unsafe.Sizeof(v)) + err := Getsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), &l) + return int(v), err +} + +func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { + sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)} + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&sys)), int32(unsafe.Sizeof(sys))) +} + +func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) +} +func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) +} +func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { + return syscall.EWINDOWS +} + +func Getpid() (pid int) { return int(GetCurrentProcessId()) } + +func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { + // NOTE(rsc): The Win32finddata struct is wrong for the system call: + // the two paths are each one uint16 short. Use the correct struct, + // a win32finddata1, and then copy the results out. + // There is no loss of expressivity here, because the final + // uint16, if it is used, is supposed to be a NUL, and Go doesn't need that. + // For Go 1.1, we might avoid the allocation of win32finddata1 here + // by adding a final Bug [2]uint16 field to the struct and then + // adjusting the fields in the result directly. + var data1 win32finddata1 + handle, err = findFirstFile1(name, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func FindNextFile(handle Handle, data *Win32finddata) (err error) { + var data1 win32finddata1 + err = findNextFile1(handle, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func getProcessEntry(pid int) (*ProcessEntry32, error) { + snapshot, err := CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer CloseHandle(snapshot) + var procEntry ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +func Getppid() (ppid int) { + pe, err := getProcessEntry(Getpid()) + if err != nil { + return -1 + } + return int(pe.ParentProcessID) +} + +// TODO(brainman): fix all needed for os +func Fchdir(fd Handle) (err error) { return syscall.EWINDOWS } +func Link(oldpath, newpath string) (err error) { return syscall.EWINDOWS } +func Symlink(path, link string) (err error) { return syscall.EWINDOWS } + +func Fchmod(fd Handle, mode uint32) (err error) { return syscall.EWINDOWS } +func Chown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Lchown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Fchown(fd Handle, uid int, gid int) (err error) { return syscall.EWINDOWS } + +func Getuid() (uid int) { return -1 } +func Geteuid() (euid int) { return -1 } +func Getgid() (gid int) { return -1 } +func Getegid() (egid int) { return -1 } +func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS } + +type Signal int + +func (s Signal) Signal() {} + +func (s Signal) String() string { + if 0 <= s && int(s) < len(signals) { + str := signals[s] + if str != "" { + return str + } + } + return "signal " + itoa(int(s)) +} + +func LoadCreateSymbolicLink() error { + return procCreateSymbolicLinkW.Find() +} + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + fd, err := CreateFile(StringToUTF16Ptr(path), GENERIC_READ, 0, nil, OPEN_EXISTING, + FILE_FLAG_OPEN_REPARSE_POINT|FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return -1, err + } + defer CloseHandle(fd) + + rdbbuf := make([]byte, MAXIMUM_REPARSE_DATA_BUFFER_SIZE) + var bytesReturned uint32 + err = DeviceIoControl(fd, FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) + if err != nil { + return -1, err + } + + rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) + var s string + switch rdb.ReparseTag { + case IO_REPARSE_TAG_SYMLINK: + data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + case IO_REPARSE_TAG_MOUNT_POINT: + data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + default: + // the path is not a symlink or junction but another type of reparse + // point + return -1, syscall.ENOENT + } + n = copy(buf, []byte(s)) + + return n, nil +} + +// GUIDFromString parses a string in the form of +// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID. +func GUIDFromString(str string) (GUID, error) { + guid := GUID{} + str16, err := syscall.UTF16PtrFromString(str) + if err != nil { + return guid, err + } + err = clsidFromString(str16, &guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// GenerateGUID creates a new random GUID. +func GenerateGUID() (GUID, error) { + guid := GUID{} + err := coCreateGuid(&guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// String returns the canonical string form of the GUID, +// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +func (guid GUID) String() string { + var str [100]uint16 + chars := stringFromGUID2(&guid, &str[0], int32(len(str))) + if chars <= 1 { + return "" + } + return string(utf16.Decode(str[:chars-1])) +} + +// KnownFolderPath returns a well-known folder path for the current user, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + return Token(0).KnownFolderPath(folderID, flags) +} + +// KnownFolderPath returns a well-known folder path for the user token, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + var p *uint16 + err := shGetKnownFolderPath(folderID, flags, t, &p) + if err != nil { + return "", err + } + defer CoTaskMemFree(unsafe.Pointer(p)) + return UTF16PtrToString(p), nil +} + +// RtlGetVersion returns the version of the underlying operating system, ignoring +// manifest semantics but is affected by the application compatibility layer. +func RtlGetVersion() *OsVersionInfoEx { + info := &OsVersionInfoEx{} + info.osVersionInfoSize = uint32(unsafe.Sizeof(*info)) + // According to documentation, this function always succeeds. + // The function doesn't even check the validity of the + // osVersionInfoSize member. Disassembling ntdll.dll indicates + // that the documentation is indeed correct about that. + _ = rtlGetVersion(info) + return info +} + +// RtlGetNtVersionNumbers returns the version of the underlying operating system, +// ignoring manifest semantics and the application compatibility layer. +func RtlGetNtVersionNumbers() (majorVersion, minorVersion, buildNumber uint32) { + rtlGetNtVersionNumbers(&majorVersion, &minorVersion, &buildNumber) + buildNumber &= 0xffff + return +} + +// GetProcessPreferredUILanguages retrieves the process preferred UI languages. +func GetProcessPreferredUILanguages(flags uint32) ([]string, error) { + return getUILanguages(flags, getProcessPreferredUILanguages) +} + +// GetThreadPreferredUILanguages retrieves the thread preferred UI languages for the current thread. +func GetThreadPreferredUILanguages(flags uint32) ([]string, error) { + return getUILanguages(flags, getThreadPreferredUILanguages) +} + +// GetUserPreferredUILanguages retrieves information about the user preferred UI languages. +func GetUserPreferredUILanguages(flags uint32) ([]string, error) { + return getUILanguages(flags, getUserPreferredUILanguages) +} + +// GetSystemPreferredUILanguages retrieves the system preferred UI languages. +func GetSystemPreferredUILanguages(flags uint32) ([]string, error) { + return getUILanguages(flags, getSystemPreferredUILanguages) +} + +func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) error) ([]string, error) { + size := uint32(128) + for { + var numLanguages uint32 + buf := make([]uint16, size) + err := f(flags, &numLanguages, &buf[0], &size) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return nil, err + } + buf = buf[:size] + if numLanguages == 0 || len(buf) == 0 { // GetProcessPreferredUILanguages may return numLanguages==0 with "\0\0" + return []string{}, nil + } + if buf[len(buf)-1] == 0 { + buf = buf[:len(buf)-1] // remove terminating null + } + languages := make([]string, 0, numLanguages) + from := 0 + for i, c := range buf { + if c == 0 { + languages = append(languages, string(utf16.Decode(buf[from:i]))) + from = i + 1 + } + } + return languages, nil + } +} + +func SetConsoleCursorPosition(console Handle, position Coord) error { + return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) +} + +func (s NTStatus) Errno() syscall.Errno { + return rtlNtStatusToDosErrorNoTeb(s) +} + +func langID(pri, sub uint16) uint32 { return uint32(sub)<<10 | uint32(pri) } + +func (s NTStatus) Error() string { + b := make([]uint16, 300) + n, err := FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_FROM_HMODULE|FORMAT_MESSAGE_ARGUMENT_ARRAY, modntdll.Handle(), uint32(s), langID(LANG_ENGLISH, SUBLANG_ENGLISH_US), b, nil) + if err != nil { + return fmt.Sprintf("NTSTATUS 0x%08x", uint32(s)) + } + // trim terminating \r and \n + for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { + } + return string(utf16.Decode(b[:n])) +} + +// NewNTUnicodeString returns a new NTUnicodeString structure for use with native +// NT APIs that work over the NTUnicodeString type. Note that most Windows APIs +// do not use NTUnicodeString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTUnicodeString(s string) (*NTUnicodeString, error) { + var u NTUnicodeString + s16, err := UTF16PtrFromString(s) + if err != nil { + return nil, err + } + RtlInitUnicodeString(&u, s16) + return &u, nil +} + +// Slice returns a uint16 slice that aliases the data in the NTUnicodeString. +func (s *NTUnicodeString) Slice() []uint16 { + var slice []uint16 + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTUnicodeString) String() string { + return UTF16ToString(s.Slice()) +} + +// NewNTString returns a new NTString structure for use with native +// NT APIs that work over the NTString type. Note that most Windows APIs +// do not use NTString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTString(s string) (*NTString, error) { + var nts NTString + s8, err := BytePtrFromString(s) + if err != nil { + return nil, err + } + RtlInitString(&nts, s8) + return &nts, nil +} + +// Slice returns a byte slice that aliases the data in the NTString. +func (s *NTString) Slice() []byte { + var slice []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTString) String() string { + return ByteSliceToString(s.Slice()) +} + +// FindResource resolves a resource of the given name and resource type. +func FindResource(module Handle, name, resType ResourceIDOrString) (Handle, error) { + var namePtr, resTypePtr uintptr + var name16, resType16 *uint16 + var err error + resolvePtr := func(i interface{}, keep **uint16) (uintptr, error) { + switch v := i.(type) { + case string: + *keep, err = UTF16PtrFromString(v) + if err != nil { + return 0, err + } + return uintptr(unsafe.Pointer(*keep)), nil + case ResourceID: + return uintptr(v), nil + } + return 0, errorspkg.New("parameter must be a ResourceID or a string") + } + namePtr, err = resolvePtr(name, &name16) + if err != nil { + return 0, err + } + resTypePtr, err = resolvePtr(resType, &resType16) + if err != nil { + return 0, err + } + resInfo, err := findResource(module, namePtr, resTypePtr) + runtime.KeepAlive(name16) + runtime.KeepAlive(resType16) + return resInfo, err +} + +func LoadResourceData(module, resInfo Handle) (data []byte, err error) { + size, err := SizeofResource(module, resInfo) + if err != nil { + return + } + resData, err := LoadResource(module, resInfo) + if err != nil { + return + } + ptr, err := LockResource(resData) + if err != nil { + return + } + h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) + h.Data = unsafe.Pointer(ptr) + h.Len = int(size) + h.Cap = int(size) + return +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go new file mode 100644 index 000000000..17f03312d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -0,0 +1,2775 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "net" + "syscall" + "unsafe" +) + +// NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and +// other native functions. +type NTStatus uint32 + +const ( + // Invented values to support what package os expects. + O_RDONLY = 0x00000 + O_WRONLY = 0x00001 + O_RDWR = 0x00002 + O_CREAT = 0x00040 + O_EXCL = 0x00080 + O_NOCTTY = 0x00100 + O_TRUNC = 0x00200 + O_NONBLOCK = 0x00800 + O_APPEND = 0x00400 + O_SYNC = 0x01000 + O_ASYNC = 0x02000 + O_CLOEXEC = 0x80000 +) + +const ( + // More invented values for signals + SIGHUP = Signal(0x1) + SIGINT = Signal(0x2) + SIGQUIT = Signal(0x3) + SIGILL = Signal(0x4) + SIGTRAP = Signal(0x5) + SIGABRT = Signal(0x6) + SIGBUS = Signal(0x7) + SIGFPE = Signal(0x8) + SIGKILL = Signal(0x9) + SIGSEGV = Signal(0xb) + SIGPIPE = Signal(0xd) + SIGALRM = Signal(0xe) + SIGTERM = Signal(0xf) +) + +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", +} + +const ( + FILE_LIST_DIRECTORY = 0x00000001 + FILE_APPEND_DATA = 0x00000004 + FILE_WRITE_ATTRIBUTES = 0x00000100 + + FILE_SHARE_READ = 0x00000001 + FILE_SHARE_WRITE = 0x00000002 + FILE_SHARE_DELETE = 0x00000004 + + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_DEVICE = 0x00000040 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_TEMPORARY = 0x00000100 + FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_ATTRIBUTE_COMPRESSED = 0x00000800 + FILE_ATTRIBUTE_OFFLINE = 0x00001000 + FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 + FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 + FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 + FILE_ATTRIBUTE_VIRTUAL = 0x00010000 + FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 + FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x00040000 + FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000 + + INVALID_FILE_ATTRIBUTES = 0xffffffff + + CREATE_NEW = 1 + CREATE_ALWAYS = 2 + OPEN_EXISTING = 3 + OPEN_ALWAYS = 4 + TRUNCATE_EXISTING = 5 + + FILE_FLAG_OPEN_REQUIRING_OPLOCK = 0x00040000 + FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000 + FILE_FLAG_OPEN_NO_RECALL = 0x00100000 + FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 + FILE_FLAG_SESSION_AWARE = 0x00800000 + FILE_FLAG_POSIX_SEMANTICS = 0x01000000 + FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 + FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 + FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000 + FILE_FLAG_RANDOM_ACCESS = 0x10000000 + FILE_FLAG_NO_BUFFERING = 0x20000000 + FILE_FLAG_OVERLAPPED = 0x40000000 + FILE_FLAG_WRITE_THROUGH = 0x80000000 + + HANDLE_FLAG_INHERIT = 0x00000001 + STARTF_USESTDHANDLES = 0x00000100 + STARTF_USESHOWWINDOW = 0x00000001 + DUPLICATE_CLOSE_SOURCE = 0x00000001 + DUPLICATE_SAME_ACCESS = 0x00000002 + + STD_INPUT_HANDLE = -10 & (1<<32 - 1) + STD_OUTPUT_HANDLE = -11 & (1<<32 - 1) + STD_ERROR_HANDLE = -12 & (1<<32 - 1) + + FILE_BEGIN = 0 + FILE_CURRENT = 1 + FILE_END = 2 + + LANG_ENGLISH = 0x09 + SUBLANG_ENGLISH_US = 0x01 + + FORMAT_MESSAGE_ALLOCATE_BUFFER = 256 + FORMAT_MESSAGE_IGNORE_INSERTS = 512 + FORMAT_MESSAGE_FROM_STRING = 1024 + FORMAT_MESSAGE_FROM_HMODULE = 2048 + FORMAT_MESSAGE_FROM_SYSTEM = 4096 + FORMAT_MESSAGE_ARGUMENT_ARRAY = 8192 + FORMAT_MESSAGE_MAX_WIDTH_MASK = 255 + + MAX_PATH = 260 + MAX_LONG_PATH = 32768 + + MAX_COMPUTERNAME_LENGTH = 15 + + TIME_ZONE_ID_UNKNOWN = 0 + TIME_ZONE_ID_STANDARD = 1 + + TIME_ZONE_ID_DAYLIGHT = 2 + IGNORE = 0 + INFINITE = 0xffffffff + + WAIT_ABANDONED = 0x00000080 + WAIT_OBJECT_0 = 0x00000000 + WAIT_FAILED = 0xFFFFFFFF + + // Access rights for process. + PROCESS_CREATE_PROCESS = 0x0080 + PROCESS_CREATE_THREAD = 0x0002 + PROCESS_DUP_HANDLE = 0x0040 + PROCESS_QUERY_INFORMATION = 0x0400 + PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 + PROCESS_SET_INFORMATION = 0x0200 + PROCESS_SET_QUOTA = 0x0100 + PROCESS_SUSPEND_RESUME = 0x0800 + PROCESS_TERMINATE = 0x0001 + PROCESS_VM_OPERATION = 0x0008 + PROCESS_VM_READ = 0x0010 + PROCESS_VM_WRITE = 0x0020 + + // Access rights for thread. + THREAD_DIRECT_IMPERSONATION = 0x0200 + THREAD_GET_CONTEXT = 0x0008 + THREAD_IMPERSONATE = 0x0100 + THREAD_QUERY_INFORMATION = 0x0040 + THREAD_QUERY_LIMITED_INFORMATION = 0x0800 + THREAD_SET_CONTEXT = 0x0010 + THREAD_SET_INFORMATION = 0x0020 + THREAD_SET_LIMITED_INFORMATION = 0x0400 + THREAD_SET_THREAD_TOKEN = 0x0080 + THREAD_SUSPEND_RESUME = 0x0002 + THREAD_TERMINATE = 0x0001 + + FILE_MAP_COPY = 0x01 + FILE_MAP_WRITE = 0x02 + FILE_MAP_READ = 0x04 + FILE_MAP_EXECUTE = 0x20 + + CTRL_C_EVENT = 0 + CTRL_BREAK_EVENT = 1 + CTRL_CLOSE_EVENT = 2 + CTRL_LOGOFF_EVENT = 5 + CTRL_SHUTDOWN_EVENT = 6 + + // Windows reserves errors >= 1<<29 for application use. + APPLICATION_ERROR = 1 << 29 +) + +const ( + // Process creation flags. + CREATE_BREAKAWAY_FROM_JOB = 0x01000000 + CREATE_DEFAULT_ERROR_MODE = 0x04000000 + CREATE_NEW_CONSOLE = 0x00000010 + CREATE_NEW_PROCESS_GROUP = 0x00000200 + CREATE_NO_WINDOW = 0x08000000 + CREATE_PROTECTED_PROCESS = 0x00040000 + CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000 + CREATE_SEPARATE_WOW_VDM = 0x00000800 + CREATE_SHARED_WOW_VDM = 0x00001000 + CREATE_SUSPENDED = 0x00000004 + CREATE_UNICODE_ENVIRONMENT = 0x00000400 + DEBUG_ONLY_THIS_PROCESS = 0x00000002 + DEBUG_PROCESS = 0x00000001 + DETACHED_PROCESS = 0x00000008 + EXTENDED_STARTUPINFO_PRESENT = 0x00080000 + INHERIT_PARENT_AFFINITY = 0x00010000 +) + +const ( + // attributes for ProcThreadAttributeList + PROC_THREAD_ATTRIBUTE_PARENT_PROCESS = 0x00020000 + PROC_THREAD_ATTRIBUTE_HANDLE_LIST = 0x00020002 + PROC_THREAD_ATTRIBUTE_GROUP_AFFINITY = 0x00030003 + PROC_THREAD_ATTRIBUTE_PREFERRED_NODE = 0x00020004 + PROC_THREAD_ATTRIBUTE_IDEAL_PROCESSOR = 0x00030005 + PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 + PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 + PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b +) + +const ( + // flags for CreateToolhelp32Snapshot + TH32CS_SNAPHEAPLIST = 0x01 + TH32CS_SNAPPROCESS = 0x02 + TH32CS_SNAPTHREAD = 0x04 + TH32CS_SNAPMODULE = 0x08 + TH32CS_SNAPMODULE32 = 0x10 + TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD + TH32CS_INHERIT = 0x80000000 +) + +const ( + // filters for ReadDirectoryChangesW and FindFirstChangeNotificationW + FILE_NOTIFY_CHANGE_FILE_NAME = 0x001 + FILE_NOTIFY_CHANGE_DIR_NAME = 0x002 + FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004 + FILE_NOTIFY_CHANGE_SIZE = 0x008 + FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 + FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 + FILE_NOTIFY_CHANGE_CREATION = 0x040 + FILE_NOTIFY_CHANGE_SECURITY = 0x100 +) + +const ( + // do not reorder + FILE_ACTION_ADDED = iota + 1 + FILE_ACTION_REMOVED + FILE_ACTION_MODIFIED + FILE_ACTION_RENAMED_OLD_NAME + FILE_ACTION_RENAMED_NEW_NAME +) + +const ( + // wincrypt.h + /* certenrolld_begin -- PROV_RSA_*/ + PROV_RSA_FULL = 1 + PROV_RSA_SIG = 2 + PROV_DSS = 3 + PROV_FORTEZZA = 4 + PROV_MS_EXCHANGE = 5 + PROV_SSL = 6 + PROV_RSA_SCHANNEL = 12 + PROV_DSS_DH = 13 + PROV_EC_ECDSA_SIG = 14 + PROV_EC_ECNRA_SIG = 15 + PROV_EC_ECDSA_FULL = 16 + PROV_EC_ECNRA_FULL = 17 + PROV_DH_SCHANNEL = 18 + PROV_SPYRUS_LYNKS = 20 + PROV_RNG = 21 + PROV_INTEL_SEC = 22 + PROV_REPLACE_OWF = 23 + PROV_RSA_AES = 24 + + /* dwFlags definitions for CryptAcquireContext */ + CRYPT_VERIFYCONTEXT = 0xF0000000 + CRYPT_NEWKEYSET = 0x00000008 + CRYPT_DELETEKEYSET = 0x00000010 + CRYPT_MACHINE_KEYSET = 0x00000020 + CRYPT_SILENT = 0x00000040 + CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080 + + /* Flags for PFXImportCertStore */ + CRYPT_EXPORTABLE = 0x00000001 + CRYPT_USER_PROTECTED = 0x00000002 + CRYPT_USER_KEYSET = 0x00001000 + PKCS12_PREFER_CNG_KSP = 0x00000100 + PKCS12_ALWAYS_CNG_KSP = 0x00000200 + PKCS12_ALLOW_OVERWRITE_KEY = 0x00004000 + PKCS12_NO_PERSIST_KEY = 0x00008000 + PKCS12_INCLUDE_EXTENDED_PROPERTIES = 0x00000010 + + /* Flags for CryptAcquireCertificatePrivateKey */ + CRYPT_ACQUIRE_CACHE_FLAG = 0x00000001 + CRYPT_ACQUIRE_USE_PROV_INFO_FLAG = 0x00000002 + CRYPT_ACQUIRE_COMPARE_KEY_FLAG = 0x00000004 + CRYPT_ACQUIRE_NO_HEALING = 0x00000008 + CRYPT_ACQUIRE_SILENT_FLAG = 0x00000040 + CRYPT_ACQUIRE_WINDOW_HANDLE_FLAG = 0x00000080 + CRYPT_ACQUIRE_NCRYPT_KEY_FLAGS_MASK = 0x00070000 + CRYPT_ACQUIRE_ALLOW_NCRYPT_KEY_FLAG = 0x00010000 + CRYPT_ACQUIRE_PREFER_NCRYPT_KEY_FLAG = 0x00020000 + CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG = 0x00040000 + + /* pdwKeySpec for CryptAcquireCertificatePrivateKey */ + AT_KEYEXCHANGE = 1 + AT_SIGNATURE = 2 + CERT_NCRYPT_KEY_SPEC = 0xFFFFFFFF + + /* Default usage match type is AND with value zero */ + USAGE_MATCH_TYPE_AND = 0 + USAGE_MATCH_TYPE_OR = 1 + + /* msgAndCertEncodingType values for CertOpenStore function */ + X509_ASN_ENCODING = 0x00000001 + PKCS_7_ASN_ENCODING = 0x00010000 + + /* storeProvider values for CertOpenStore function */ + CERT_STORE_PROV_MSG = 1 + CERT_STORE_PROV_MEMORY = 2 + CERT_STORE_PROV_FILE = 3 + CERT_STORE_PROV_REG = 4 + CERT_STORE_PROV_PKCS7 = 5 + CERT_STORE_PROV_SERIALIZED = 6 + CERT_STORE_PROV_FILENAME_A = 7 + CERT_STORE_PROV_FILENAME_W = 8 + CERT_STORE_PROV_FILENAME = CERT_STORE_PROV_FILENAME_W + CERT_STORE_PROV_SYSTEM_A = 9 + CERT_STORE_PROV_SYSTEM_W = 10 + CERT_STORE_PROV_SYSTEM = CERT_STORE_PROV_SYSTEM_W + CERT_STORE_PROV_COLLECTION = 11 + CERT_STORE_PROV_SYSTEM_REGISTRY_A = 12 + CERT_STORE_PROV_SYSTEM_REGISTRY_W = 13 + CERT_STORE_PROV_SYSTEM_REGISTRY = CERT_STORE_PROV_SYSTEM_REGISTRY_W + CERT_STORE_PROV_PHYSICAL_W = 14 + CERT_STORE_PROV_PHYSICAL = CERT_STORE_PROV_PHYSICAL_W + CERT_STORE_PROV_SMART_CARD_W = 15 + CERT_STORE_PROV_SMART_CARD = CERT_STORE_PROV_SMART_CARD_W + CERT_STORE_PROV_LDAP_W = 16 + CERT_STORE_PROV_LDAP = CERT_STORE_PROV_LDAP_W + CERT_STORE_PROV_PKCS12 = 17 + + /* store characteristics (low WORD of flag) for CertOpenStore function */ + CERT_STORE_NO_CRYPT_RELEASE_FLAG = 0x00000001 + CERT_STORE_SET_LOCALIZED_NAME_FLAG = 0x00000002 + CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004 + CERT_STORE_DELETE_FLAG = 0x00000010 + CERT_STORE_UNSAFE_PHYSICAL_FLAG = 0x00000020 + CERT_STORE_SHARE_STORE_FLAG = 0x00000040 + CERT_STORE_SHARE_CONTEXT_FLAG = 0x00000080 + CERT_STORE_MANIFOLD_FLAG = 0x00000100 + CERT_STORE_ENUM_ARCHIVED_FLAG = 0x00000200 + CERT_STORE_UPDATE_KEYID_FLAG = 0x00000400 + CERT_STORE_BACKUP_RESTORE_FLAG = 0x00000800 + CERT_STORE_MAXIMUM_ALLOWED_FLAG = 0x00001000 + CERT_STORE_CREATE_NEW_FLAG = 0x00002000 + CERT_STORE_OPEN_EXISTING_FLAG = 0x00004000 + CERT_STORE_READONLY_FLAG = 0x00008000 + + /* store locations (high WORD of flag) for CertOpenStore function */ + CERT_SYSTEM_STORE_CURRENT_USER = 0x00010000 + CERT_SYSTEM_STORE_LOCAL_MACHINE = 0x00020000 + CERT_SYSTEM_STORE_CURRENT_SERVICE = 0x00040000 + CERT_SYSTEM_STORE_SERVICES = 0x00050000 + CERT_SYSTEM_STORE_USERS = 0x00060000 + CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY = 0x00070000 + CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY = 0x00080000 + CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE = 0x00090000 + CERT_SYSTEM_STORE_UNPROTECTED_FLAG = 0x40000000 + CERT_SYSTEM_STORE_RELOCATE_FLAG = 0x80000000 + + /* Miscellaneous high-WORD flags for CertOpenStore function */ + CERT_REGISTRY_STORE_REMOTE_FLAG = 0x00010000 + CERT_REGISTRY_STORE_SERIALIZED_FLAG = 0x00020000 + CERT_REGISTRY_STORE_ROAMING_FLAG = 0x00040000 + CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG = 0x00080000 + CERT_REGISTRY_STORE_LM_GPT_FLAG = 0x01000000 + CERT_REGISTRY_STORE_CLIENT_GPT_FLAG = 0x80000000 + CERT_FILE_STORE_COMMIT_ENABLE_FLAG = 0x00010000 + CERT_LDAP_STORE_SIGN_FLAG = 0x00010000 + CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG = 0x00020000 + CERT_LDAP_STORE_OPENED_FLAG = 0x00040000 + CERT_LDAP_STORE_UNBIND_FLAG = 0x00080000 + + /* addDisposition values for CertAddCertificateContextToStore function */ + CERT_STORE_ADD_NEW = 1 + CERT_STORE_ADD_USE_EXISTING = 2 + CERT_STORE_ADD_REPLACE_EXISTING = 3 + CERT_STORE_ADD_ALWAYS = 4 + CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES = 5 + CERT_STORE_ADD_NEWER = 6 + CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES = 7 + + /* ErrorStatus values for CertTrustStatus struct */ + CERT_TRUST_NO_ERROR = 0x00000000 + CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001 + CERT_TRUST_IS_REVOKED = 0x00000004 + CERT_TRUST_IS_NOT_SIGNATURE_VALID = 0x00000008 + CERT_TRUST_IS_NOT_VALID_FOR_USAGE = 0x00000010 + CERT_TRUST_IS_UNTRUSTED_ROOT = 0x00000020 + CERT_TRUST_REVOCATION_STATUS_UNKNOWN = 0x00000040 + CERT_TRUST_IS_CYCLIC = 0x00000080 + CERT_TRUST_INVALID_EXTENSION = 0x00000100 + CERT_TRUST_INVALID_POLICY_CONSTRAINTS = 0x00000200 + CERT_TRUST_INVALID_BASIC_CONSTRAINTS = 0x00000400 + CERT_TRUST_INVALID_NAME_CONSTRAINTS = 0x00000800 + CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000 + CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000 + CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000 + CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000 + CERT_TRUST_IS_PARTIAL_CHAIN = 0x00010000 + CERT_TRUST_CTL_IS_NOT_TIME_VALID = 0x00020000 + CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID = 0x00040000 + CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE = 0x00080000 + CERT_TRUST_HAS_WEAK_SIGNATURE = 0x00100000 + CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000 + CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000 + CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000 + CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000 + + /* InfoStatus values for CertTrustStatus struct */ + CERT_TRUST_HAS_EXACT_MATCH_ISSUER = 0x00000001 + CERT_TRUST_HAS_KEY_MATCH_ISSUER = 0x00000002 + CERT_TRUST_HAS_NAME_MATCH_ISSUER = 0x00000004 + CERT_TRUST_IS_SELF_SIGNED = 0x00000008 + CERT_TRUST_HAS_PREFERRED_ISSUER = 0x00000100 + CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY = 0x00000400 + CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS = 0x00000400 + CERT_TRUST_IS_PEER_TRUSTED = 0x00000800 + CERT_TRUST_HAS_CRL_VALIDITY_EXTENDED = 0x00001000 + CERT_TRUST_IS_FROM_EXCLUSIVE_TRUST_STORE = 0x00002000 + CERT_TRUST_IS_CA_TRUSTED = 0x00004000 + CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000 + + /* Certificate Information Flags */ + CERT_INFO_VERSION_FLAG = 1 + CERT_INFO_SERIAL_NUMBER_FLAG = 2 + CERT_INFO_SIGNATURE_ALGORITHM_FLAG = 3 + CERT_INFO_ISSUER_FLAG = 4 + CERT_INFO_NOT_BEFORE_FLAG = 5 + CERT_INFO_NOT_AFTER_FLAG = 6 + CERT_INFO_SUBJECT_FLAG = 7 + CERT_INFO_SUBJECT_PUBLIC_KEY_INFO_FLAG = 8 + CERT_INFO_ISSUER_UNIQUE_ID_FLAG = 9 + CERT_INFO_SUBJECT_UNIQUE_ID_FLAG = 10 + CERT_INFO_EXTENSION_FLAG = 11 + + /* dwFindType for CertFindCertificateInStore */ + CERT_COMPARE_MASK = 0xFFFF + CERT_COMPARE_SHIFT = 16 + CERT_COMPARE_ANY = 0 + CERT_COMPARE_SHA1_HASH = 1 + CERT_COMPARE_NAME = 2 + CERT_COMPARE_ATTR = 3 + CERT_COMPARE_MD5_HASH = 4 + CERT_COMPARE_PROPERTY = 5 + CERT_COMPARE_PUBLIC_KEY = 6 + CERT_COMPARE_HASH = CERT_COMPARE_SHA1_HASH + CERT_COMPARE_NAME_STR_A = 7 + CERT_COMPARE_NAME_STR_W = 8 + CERT_COMPARE_KEY_SPEC = 9 + CERT_COMPARE_ENHKEY_USAGE = 10 + CERT_COMPARE_CTL_USAGE = CERT_COMPARE_ENHKEY_USAGE + CERT_COMPARE_SUBJECT_CERT = 11 + CERT_COMPARE_ISSUER_OF = 12 + CERT_COMPARE_EXISTING = 13 + CERT_COMPARE_SIGNATURE_HASH = 14 + CERT_COMPARE_KEY_IDENTIFIER = 15 + CERT_COMPARE_CERT_ID = 16 + CERT_COMPARE_CROSS_CERT_DIST_POINTS = 17 + CERT_COMPARE_PUBKEY_MD5_HASH = 18 + CERT_COMPARE_SUBJECT_INFO_ACCESS = 19 + CERT_COMPARE_HASH_STR = 20 + CERT_COMPARE_HAS_PRIVATE_KEY = 21 + CERT_FIND_ANY = (CERT_COMPARE_ANY << CERT_COMPARE_SHIFT) + CERT_FIND_SHA1_HASH = (CERT_COMPARE_SHA1_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_MD5_HASH = (CERT_COMPARE_MD5_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_SIGNATURE_HASH = (CERT_COMPARE_SIGNATURE_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_KEY_IDENTIFIER = (CERT_COMPARE_KEY_IDENTIFIER << CERT_COMPARE_SHIFT) + CERT_FIND_HASH = CERT_FIND_SHA1_HASH + CERT_FIND_PROPERTY = (CERT_COMPARE_PROPERTY << CERT_COMPARE_SHIFT) + CERT_FIND_PUBLIC_KEY = (CERT_COMPARE_PUBLIC_KEY << CERT_COMPARE_SHIFT) + CERT_FIND_SUBJECT_NAME = (CERT_COMPARE_NAME<> 32 & 0xffffffff) + return ft +} + +type Win32finddata struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH - 1]uint16 + AlternateFileName [13]uint16 +} + +// This is the actual system call structure. +// Win32finddata is what we committed to in Go 1. +type win32finddata1 struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH]uint16 + AlternateFileName [14]uint16 + + // The Microsoft documentation for this struct¹ describes three additional + // fields: dwFileType, dwCreatorType, and wFinderFlags. However, those fields + // are empirically only present in the macOS port of the Win32 API,² and thus + // not needed for binaries built for Windows. + // + // ¹ https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-win32_find_dataw describe + // ² https://golang.org/issue/42637#issuecomment-760715755. +} + +func copyFindData(dst *Win32finddata, src *win32finddata1) { + dst.FileAttributes = src.FileAttributes + dst.CreationTime = src.CreationTime + dst.LastAccessTime = src.LastAccessTime + dst.LastWriteTime = src.LastWriteTime + dst.FileSizeHigh = src.FileSizeHigh + dst.FileSizeLow = src.FileSizeLow + dst.Reserved0 = src.Reserved0 + dst.Reserved1 = src.Reserved1 + + // The src is 1 element bigger than dst, but it must be NUL. + copy(dst.FileName[:], src.FileName[:]) + copy(dst.AlternateFileName[:], src.AlternateFileName[:]) +} + +type ByHandleFileInformation struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + VolumeSerialNumber uint32 + FileSizeHigh uint32 + FileSizeLow uint32 + NumberOfLinks uint32 + FileIndexHigh uint32 + FileIndexLow uint32 +} + +const ( + GetFileExInfoStandard = 0 + GetFileExMaxInfoLevel = 1 +) + +type Win32FileAttributeData struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 +} + +// ShowWindow constants +const ( + // winuser.h + SW_HIDE = 0 + SW_NORMAL = 1 + SW_SHOWNORMAL = 1 + SW_SHOWMINIMIZED = 2 + SW_SHOWMAXIMIZED = 3 + SW_MAXIMIZE = 3 + SW_SHOWNOACTIVATE = 4 + SW_SHOW = 5 + SW_MINIMIZE = 6 + SW_SHOWMINNOACTIVE = 7 + SW_SHOWNA = 8 + SW_RESTORE = 9 + SW_SHOWDEFAULT = 10 + SW_FORCEMINIMIZE = 11 +) + +type StartupInfo struct { + Cb uint32 + _ *uint16 + Desktop *uint16 + Title *uint16 + X uint32 + Y uint32 + XSize uint32 + YSize uint32 + XCountChars uint32 + YCountChars uint32 + FillAttribute uint32 + Flags uint32 + ShowWindow uint16 + _ uint16 + _ *byte + StdInput Handle + StdOutput Handle + StdErr Handle +} + +type StartupInfoEx struct { + StartupInfo + ProcThreadAttributeList *ProcThreadAttributeList +} + +// ProcThreadAttributeList is a placeholder type to represent a PROC_THREAD_ATTRIBUTE_LIST. +// +// To create a *ProcThreadAttributeList, use NewProcThreadAttributeList, update +// it with ProcThreadAttributeListContainer.Update, free its memory using +// ProcThreadAttributeListContainer.Delete, and access the list itself using +// ProcThreadAttributeListContainer.List. +type ProcThreadAttributeList struct{} + +type ProcThreadAttributeListContainer struct { + data *ProcThreadAttributeList + heapAllocations []uintptr +} + +type ProcessInformation struct { + Process Handle + Thread Handle + ProcessId uint32 + ThreadId uint32 +} + +type ProcessEntry32 struct { + Size uint32 + Usage uint32 + ProcessID uint32 + DefaultHeapID uintptr + ModuleID uint32 + Threads uint32 + ParentProcessID uint32 + PriClassBase int32 + Flags uint32 + ExeFile [MAX_PATH]uint16 +} + +type ThreadEntry32 struct { + Size uint32 + Usage uint32 + ThreadID uint32 + OwnerProcessID uint32 + BasePri int32 + DeltaPri int32 + Flags uint32 +} + +type Systemtime struct { + Year uint16 + Month uint16 + DayOfWeek uint16 + Day uint16 + Hour uint16 + Minute uint16 + Second uint16 + Milliseconds uint16 +} + +type Timezoneinformation struct { + Bias int32 + StandardName [32]uint16 + StandardDate Systemtime + StandardBias int32 + DaylightName [32]uint16 + DaylightDate Systemtime + DaylightBias int32 +} + +// Socket related. + +const ( + AF_UNSPEC = 0 + AF_UNIX = 1 + AF_INET = 2 + AF_NETBIOS = 17 + AF_INET6 = 23 + AF_IRDA = 26 + AF_BTH = 32 + + SOCK_STREAM = 1 + SOCK_DGRAM = 2 + SOCK_RAW = 3 + SOCK_RDM = 4 + SOCK_SEQPACKET = 5 + + IPPROTO_IP = 0 + IPPROTO_ICMP = 1 + IPPROTO_IGMP = 2 + BTHPROTO_RFCOMM = 3 + IPPROTO_TCP = 6 + IPPROTO_UDP = 17 + IPPROTO_IPV6 = 41 + IPPROTO_ICMPV6 = 58 + IPPROTO_RM = 113 + + SOL_SOCKET = 0xffff + SO_REUSEADDR = 4 + SO_KEEPALIVE = 8 + SO_DONTROUTE = 16 + SO_BROADCAST = 32 + SO_LINGER = 128 + SO_RCVBUF = 0x1002 + SO_RCVTIMEO = 0x1006 + SO_SNDBUF = 0x1001 + SO_UPDATE_ACCEPT_CONTEXT = 0x700b + SO_UPDATE_CONNECT_CONTEXT = 0x7010 + + IOC_OUT = 0x40000000 + IOC_IN = 0x80000000 + IOC_VENDOR = 0x18000000 + IOC_INOUT = IOC_IN | IOC_OUT + IOC_WS2 = 0x08000000 + SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 + SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 + SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + + // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 + + IP_HDRINCL = 0x2 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_LOOP = 0xb + IP_ADD_MEMBERSHIP = 0xc + IP_DROP_MEMBERSHIP = 0xd + IP_PKTINFO = 0x13 + + IPV6_V6ONLY = 0x1b + IPV6_UNICAST_HOPS = 0x4 + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_LOOP = 0xb + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_PKTINFO = 0x13 + + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_DONTROUTE = 0x4 + MSG_WAITALL = 0x8 + + MSG_TRUNC = 0x0100 + MSG_CTRUNC = 0x0200 + MSG_BCAST = 0x0400 + MSG_MCAST = 0x0800 + + SOMAXCONN = 0x7fffffff + + TCP_NODELAY = 1 + + SHUT_RD = 0 + SHUT_WR = 1 + SHUT_RDWR = 2 + + WSADESCRIPTION_LEN = 256 + WSASYS_STATUS_LEN = 128 +) + +type WSABuf struct { + Len uint32 + Buf *byte +} + +type WSAMsg struct { + Name *syscall.RawSockaddrAny + Namelen int32 + Buffers *WSABuf + BufferCount uint32 + Control WSABuf + Flags uint32 +} + +// Flags for WSASocket +const ( + WSA_FLAG_OVERLAPPED = 0x01 + WSA_FLAG_MULTIPOINT_C_ROOT = 0x02 + WSA_FLAG_MULTIPOINT_C_LEAF = 0x04 + WSA_FLAG_MULTIPOINT_D_ROOT = 0x08 + WSA_FLAG_MULTIPOINT_D_LEAF = 0x10 + WSA_FLAG_ACCESS_SYSTEM_SECURITY = 0x40 + WSA_FLAG_NO_HANDLE_INHERIT = 0x80 + WSA_FLAG_REGISTERED_IO = 0x100 +) + +// Invented values to support what package os expects. +const ( + S_IFMT = 0x1f000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +const ( + FILE_TYPE_CHAR = 0x0002 + FILE_TYPE_DISK = 0x0001 + FILE_TYPE_PIPE = 0x0003 + FILE_TYPE_REMOTE = 0x8000 + FILE_TYPE_UNKNOWN = 0x0000 +) + +type Hostent struct { + Name *byte + Aliases **byte + AddrType uint16 + Length uint16 + AddrList **byte +} + +type Protoent struct { + Name *byte + Aliases **byte + Proto uint16 +} + +const ( + DNS_TYPE_A = 0x0001 + DNS_TYPE_NS = 0x0002 + DNS_TYPE_MD = 0x0003 + DNS_TYPE_MF = 0x0004 + DNS_TYPE_CNAME = 0x0005 + DNS_TYPE_SOA = 0x0006 + DNS_TYPE_MB = 0x0007 + DNS_TYPE_MG = 0x0008 + DNS_TYPE_MR = 0x0009 + DNS_TYPE_NULL = 0x000a + DNS_TYPE_WKS = 0x000b + DNS_TYPE_PTR = 0x000c + DNS_TYPE_HINFO = 0x000d + DNS_TYPE_MINFO = 0x000e + DNS_TYPE_MX = 0x000f + DNS_TYPE_TEXT = 0x0010 + DNS_TYPE_RP = 0x0011 + DNS_TYPE_AFSDB = 0x0012 + DNS_TYPE_X25 = 0x0013 + DNS_TYPE_ISDN = 0x0014 + DNS_TYPE_RT = 0x0015 + DNS_TYPE_NSAP = 0x0016 + DNS_TYPE_NSAPPTR = 0x0017 + DNS_TYPE_SIG = 0x0018 + DNS_TYPE_KEY = 0x0019 + DNS_TYPE_PX = 0x001a + DNS_TYPE_GPOS = 0x001b + DNS_TYPE_AAAA = 0x001c + DNS_TYPE_LOC = 0x001d + DNS_TYPE_NXT = 0x001e + DNS_TYPE_EID = 0x001f + DNS_TYPE_NIMLOC = 0x0020 + DNS_TYPE_SRV = 0x0021 + DNS_TYPE_ATMA = 0x0022 + DNS_TYPE_NAPTR = 0x0023 + DNS_TYPE_KX = 0x0024 + DNS_TYPE_CERT = 0x0025 + DNS_TYPE_A6 = 0x0026 + DNS_TYPE_DNAME = 0x0027 + DNS_TYPE_SINK = 0x0028 + DNS_TYPE_OPT = 0x0029 + DNS_TYPE_DS = 0x002B + DNS_TYPE_RRSIG = 0x002E + DNS_TYPE_NSEC = 0x002F + DNS_TYPE_DNSKEY = 0x0030 + DNS_TYPE_DHCID = 0x0031 + DNS_TYPE_UINFO = 0x0064 + DNS_TYPE_UID = 0x0065 + DNS_TYPE_GID = 0x0066 + DNS_TYPE_UNSPEC = 0x0067 + DNS_TYPE_ADDRS = 0x00f8 + DNS_TYPE_TKEY = 0x00f9 + DNS_TYPE_TSIG = 0x00fa + DNS_TYPE_IXFR = 0x00fb + DNS_TYPE_AXFR = 0x00fc + DNS_TYPE_MAILB = 0x00fd + DNS_TYPE_MAILA = 0x00fe + DNS_TYPE_ALL = 0x00ff + DNS_TYPE_ANY = 0x00ff + DNS_TYPE_WINS = 0xff01 + DNS_TYPE_WINSR = 0xff02 + DNS_TYPE_NBSTAT = 0xff01 +) + +const ( + // flags inside DNSRecord.Dw + DnsSectionQuestion = 0x0000 + DnsSectionAnswer = 0x0001 + DnsSectionAuthority = 0x0002 + DnsSectionAdditional = 0x0003 +) + +type DNSSRVData struct { + Target *uint16 + Priority uint16 + Weight uint16 + Port uint16 + Pad uint16 +} + +type DNSPTRData struct { + Host *uint16 +} + +type DNSMXData struct { + NameExchange *uint16 + Preference uint16 + Pad uint16 +} + +type DNSTXTData struct { + StringCount uint16 + StringArray [1]*uint16 +} + +type DNSRecord struct { + Next *DNSRecord + Name *uint16 + Type uint16 + Length uint16 + Dw uint32 + Ttl uint32 + Reserved uint32 + Data [40]byte +} + +const ( + TF_DISCONNECT = 1 + TF_REUSE_SOCKET = 2 + TF_WRITE_BEHIND = 4 + TF_USE_DEFAULT_WORKER = 0 + TF_USE_SYSTEM_THREAD = 16 + TF_USE_KERNEL_APC = 32 +) + +type TransmitFileBuffers struct { + Head uintptr + HeadLength uint32 + Tail uintptr + TailLength uint32 +} + +const ( + IFF_UP = 1 + IFF_BROADCAST = 2 + IFF_LOOPBACK = 4 + IFF_POINTTOPOINT = 8 + IFF_MULTICAST = 16 +) + +const SIO_GET_INTERFACE_LIST = 0x4004747F + +// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old. +// will be fixed to change variable type as suitable. + +type SockaddrGen [24]byte + +type InterfaceInfo struct { + Flags uint32 + Address SockaddrGen + BroadcastAddress SockaddrGen + Netmask SockaddrGen +} + +type IpAddressString struct { + String [16]byte +} + +type IpMaskString IpAddressString + +type IpAddrString struct { + Next *IpAddrString + IpAddress IpAddressString + IpMask IpMaskString + Context uint32 +} + +const MAX_ADAPTER_NAME_LENGTH = 256 +const MAX_ADAPTER_DESCRIPTION_LENGTH = 128 +const MAX_ADAPTER_ADDRESS_LENGTH = 8 + +type IpAdapterInfo struct { + Next *IpAdapterInfo + ComboIndex uint32 + AdapterName [MAX_ADAPTER_NAME_LENGTH + 4]byte + Description [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte + AddressLength uint32 + Address [MAX_ADAPTER_ADDRESS_LENGTH]byte + Index uint32 + Type uint32 + DhcpEnabled uint32 + CurrentIpAddress *IpAddrString + IpAddressList IpAddrString + GatewayList IpAddrString + DhcpServer IpAddrString + HaveWins bool + PrimaryWinsServer IpAddrString + SecondaryWinsServer IpAddrString + LeaseObtained int64 + LeaseExpires int64 +} + +const MAXLEN_PHYSADDR = 8 +const MAX_INTERFACE_NAME_LEN = 256 +const MAXLEN_IFDESCR = 256 + +type MibIfRow struct { + Name [MAX_INTERFACE_NAME_LEN]uint16 + Index uint32 + Type uint32 + Mtu uint32 + Speed uint32 + PhysAddrLen uint32 + PhysAddr [MAXLEN_PHYSADDR]byte + AdminStatus uint32 + OperStatus uint32 + LastChange uint32 + InOctets uint32 + InUcastPkts uint32 + InNUcastPkts uint32 + InDiscards uint32 + InErrors uint32 + InUnknownProtos uint32 + OutOctets uint32 + OutUcastPkts uint32 + OutNUcastPkts uint32 + OutDiscards uint32 + OutErrors uint32 + OutQLen uint32 + DescrLen uint32 + Descr [MAXLEN_IFDESCR]byte +} + +type CertInfo struct { + Version uint32 + SerialNumber CryptIntegerBlob + SignatureAlgorithm CryptAlgorithmIdentifier + Issuer CertNameBlob + NotBefore Filetime + NotAfter Filetime + Subject CertNameBlob + SubjectPublicKeyInfo CertPublicKeyInfo + IssuerUniqueId CryptBitBlob + SubjectUniqueId CryptBitBlob + CountExtensions uint32 + Extensions *CertExtension +} + +type CertExtension struct { + ObjId *byte + Critical int32 + Value CryptObjidBlob +} + +type CryptAlgorithmIdentifier struct { + ObjId *byte + Parameters CryptObjidBlob +} + +type CertPublicKeyInfo struct { + Algorithm CryptAlgorithmIdentifier + PublicKey CryptBitBlob +} + +type DataBlob struct { + Size uint32 + Data *byte +} +type CryptIntegerBlob DataBlob +type CryptUintBlob DataBlob +type CryptObjidBlob DataBlob +type CertNameBlob DataBlob +type CertRdnValueBlob DataBlob +type CertBlob DataBlob +type CrlBlob DataBlob +type CryptDataBlob DataBlob +type CryptHashBlob DataBlob +type CryptDigestBlob DataBlob +type CryptDerBlob DataBlob +type CryptAttrBlob DataBlob + +type CryptBitBlob struct { + Size uint32 + Data *byte + UnusedBits uint32 +} + +type CertContext struct { + EncodingType uint32 + EncodedCert *byte + Length uint32 + CertInfo *CertInfo + Store Handle +} + +type CertChainContext struct { + Size uint32 + TrustStatus CertTrustStatus + ChainCount uint32 + Chains **CertSimpleChain + LowerQualityChainCount uint32 + LowerQualityChains **CertChainContext + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertTrustListInfo struct { + // Not implemented +} + +type CertSimpleChain struct { + Size uint32 + TrustStatus CertTrustStatus + NumElements uint32 + Elements **CertChainElement + TrustListInfo *CertTrustListInfo + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertChainElement struct { + Size uint32 + CertContext *CertContext + TrustStatus CertTrustStatus + RevocationInfo *CertRevocationInfo + IssuanceUsage *CertEnhKeyUsage + ApplicationUsage *CertEnhKeyUsage + ExtendedErrorInfo *uint16 +} + +type CertRevocationCrlInfo struct { + // Not implemented +} + +type CertRevocationInfo struct { + Size uint32 + RevocationResult uint32 + RevocationOid *byte + OidSpecificInfo Pointer + HasFreshnessTime uint32 + FreshnessTime uint32 + CrlInfo *CertRevocationCrlInfo +} + +type CertTrustStatus struct { + ErrorStatus uint32 + InfoStatus uint32 +} + +type CertUsageMatch struct { + Type uint32 + Usage CertEnhKeyUsage +} + +type CertEnhKeyUsage struct { + Length uint32 + UsageIdentifiers **byte +} + +type CertChainPara struct { + Size uint32 + RequestedUsage CertUsageMatch + RequstedIssuancePolicy CertUsageMatch + URLRetrievalTimeout uint32 + CheckRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 + CacheResync *Filetime +} + +type CertChainPolicyPara struct { + Size uint32 + Flags uint32 + ExtraPolicyPara Pointer +} + +type SSLExtraCertChainPolicyPara struct { + Size uint32 + AuthType uint32 + Checks uint32 + ServerName *uint16 +} + +type CertChainPolicyStatus struct { + Size uint32 + Error uint32 + ChainIndex uint32 + ElementIndex uint32 + ExtraPolicyStatus Pointer +} + +type CertPolicyInfo struct { + Identifier *byte + CountQualifiers uint32 + Qualifiers *CertPolicyQualifierInfo +} + +type CertPoliciesInfo struct { + Count uint32 + PolicyInfos *CertPolicyInfo +} + +type CertPolicyQualifierInfo struct { + // Not implemented +} + +type CertStrongSignPara struct { + Size uint32 + InfoChoice uint32 + InfoOrSerializedInfoOrOID unsafe.Pointer +} + +type CryptProtectPromptStruct struct { + Size uint32 + PromptFlags uint32 + App HWND + Prompt *uint16 +} + +type CertChainFindByIssuerPara struct { + Size uint32 + UsageIdentifier *byte + KeySpec uint32 + AcquirePrivateKeyFlags uint32 + IssuerCount uint32 + Issuer Pointer + FindCallback Pointer + FindArg Pointer + IssuerChainIndex *uint32 + IssuerElementIndex *uint32 +} + +type WinTrustData struct { + Size uint32 + PolicyCallbackData uintptr + SIPClientData uintptr + UIChoice uint32 + RevocationChecks uint32 + UnionChoice uint32 + FileOrCatalogOrBlobOrSgnrOrCert unsafe.Pointer + StateAction uint32 + StateData Handle + URLReference *uint16 + ProvFlags uint32 + UIContext uint32 + SignatureSettings *WinTrustSignatureSettings +} + +type WinTrustFileInfo struct { + Size uint32 + FilePath *uint16 + File Handle + KnownSubject *GUID +} + +type WinTrustSignatureSettings struct { + Size uint32 + Index uint32 + Flags uint32 + SecondarySigs uint32 + VerifiedSigIndex uint32 + CryptoPolicy *CertStrongSignPara +} + +const ( + // do not reorder + HKEY_CLASSES_ROOT = 0x80000000 + iota + HKEY_CURRENT_USER + HKEY_LOCAL_MACHINE + HKEY_USERS + HKEY_PERFORMANCE_DATA + HKEY_CURRENT_CONFIG + HKEY_DYN_DATA + + KEY_QUERY_VALUE = 1 + KEY_SET_VALUE = 2 + KEY_CREATE_SUB_KEY = 4 + KEY_ENUMERATE_SUB_KEYS = 8 + KEY_NOTIFY = 16 + KEY_CREATE_LINK = 32 + KEY_WRITE = 0x20006 + KEY_EXECUTE = 0x20019 + KEY_READ = 0x20019 + KEY_WOW64_64KEY = 0x0100 + KEY_WOW64_32KEY = 0x0200 + KEY_ALL_ACCESS = 0xf003f +) + +const ( + // do not reorder + REG_NONE = iota + REG_SZ + REG_EXPAND_SZ + REG_BINARY + REG_DWORD_LITTLE_ENDIAN + REG_DWORD_BIG_ENDIAN + REG_LINK + REG_MULTI_SZ + REG_RESOURCE_LIST + REG_FULL_RESOURCE_DESCRIPTOR + REG_RESOURCE_REQUIREMENTS_LIST + REG_QWORD_LITTLE_ENDIAN + REG_DWORD = REG_DWORD_LITTLE_ENDIAN + REG_QWORD = REG_QWORD_LITTLE_ENDIAN +) + +const ( + EVENT_MODIFY_STATE = 0x0002 + EVENT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + MUTANT_QUERY_STATE = 0x0001 + MUTANT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE + + SEMAPHORE_MODIFY_STATE = 0x0002 + SEMAPHORE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + TIMER_QUERY_STATE = 0x0001 + TIMER_MODIFY_STATE = 0x0002 + TIMER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | TIMER_QUERY_STATE | TIMER_MODIFY_STATE + + MUTEX_MODIFY_STATE = MUTANT_QUERY_STATE + MUTEX_ALL_ACCESS = MUTANT_ALL_ACCESS + + CREATE_EVENT_MANUAL_RESET = 0x1 + CREATE_EVENT_INITIAL_SET = 0x2 + CREATE_MUTEX_INITIAL_OWNER = 0x1 +) + +type AddrinfoW struct { + Flags int32 + Family int32 + Socktype int32 + Protocol int32 + Addrlen uintptr + Canonname *uint16 + Addr uintptr + Next *AddrinfoW +} + +const ( + AI_PASSIVE = 1 + AI_CANONNAME = 2 + AI_NUMERICHOST = 4 +) + +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +var WSAID_CONNECTEX = GUID{ + 0x25a207b9, + 0xddf3, + 0x4660, + [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, +} + +var WSAID_WSASENDMSG = GUID{ + 0xa441e712, + 0x754f, + 0x43ca, + [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, +} + +var WSAID_WSARECVMSG = GUID{ + 0xf689d7c8, + 0x6f1f, + 0x436b, + [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, +} + +const ( + FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + FILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +const ( + WSAPROTOCOL_LEN = 255 + MAX_PROTOCOL_CHAIN = 7 + BASE_PROTOCOL = 1 + LAYERED_PROTOCOL = 0 + + XP1_CONNECTIONLESS = 0x00000001 + XP1_GUARANTEED_DELIVERY = 0x00000002 + XP1_GUARANTEED_ORDER = 0x00000004 + XP1_MESSAGE_ORIENTED = 0x00000008 + XP1_PSEUDO_STREAM = 0x00000010 + XP1_GRACEFUL_CLOSE = 0x00000020 + XP1_EXPEDITED_DATA = 0x00000040 + XP1_CONNECT_DATA = 0x00000080 + XP1_DISCONNECT_DATA = 0x00000100 + XP1_SUPPORT_BROADCAST = 0x00000200 + XP1_SUPPORT_MULTIPOINT = 0x00000400 + XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800 + XP1_MULTIPOINT_DATA_PLANE = 0x00001000 + XP1_QOS_SUPPORTED = 0x00002000 + XP1_UNI_SEND = 0x00008000 + XP1_UNI_RECV = 0x00010000 + XP1_IFS_HANDLES = 0x00020000 + XP1_PARTIAL_MESSAGE = 0x00040000 + XP1_SAN_SUPPORT_SDP = 0x00080000 + + PFL_MULTIPLE_PROTO_ENTRIES = 0x00000001 + PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002 + PFL_HIDDEN = 0x00000004 + PFL_MATCHES_PROTOCOL_ZERO = 0x00000008 + PFL_NETWORKDIRECT_PROVIDER = 0x00000010 +) + +type WSAProtocolInfo struct { + ServiceFlags1 uint32 + ServiceFlags2 uint32 + ServiceFlags3 uint32 + ServiceFlags4 uint32 + ProviderFlags uint32 + ProviderId GUID + CatalogEntryId uint32 + ProtocolChain WSAProtocolChain + Version int32 + AddressFamily int32 + MaxSockAddr int32 + MinSockAddr int32 + SocketType int32 + Protocol int32 + ProtocolMaxOffset int32 + NetworkByteOrder int32 + SecurityScheme int32 + MessageSize uint32 + ProviderReserved uint32 + ProtocolName [WSAPROTOCOL_LEN + 1]uint16 +} + +type WSAProtocolChain struct { + ChainLen int32 + ChainEntries [MAX_PROTOCOL_CHAIN]uint32 +} + +type TCPKeepalive struct { + OnOff uint32 + Time uint32 + Interval uint32 +} + +type symbolicLinkReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + Flags uint32 + PathBuffer [1]uint16 +} + +type mountPointReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + PathBuffer [1]uint16 +} + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + + // GenericReparseBuffer + reparseBuffer byte +} + +const ( + FSCTL_GET_REPARSE_POINT = 0x900A8 + MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024 + IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + IO_REPARSE_TAG_SYMLINK = 0xA000000C + SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 +) + +const ( + ComputerNameNetBIOS = 0 + ComputerNameDnsHostname = 1 + ComputerNameDnsDomain = 2 + ComputerNameDnsFullyQualified = 3 + ComputerNamePhysicalNetBIOS = 4 + ComputerNamePhysicalDnsHostname = 5 + ComputerNamePhysicalDnsDomain = 6 + ComputerNamePhysicalDnsFullyQualified = 7 + ComputerNameMax = 8 +) + +// For MessageBox() +const ( + MB_OK = 0x00000000 + MB_OKCANCEL = 0x00000001 + MB_ABORTRETRYIGNORE = 0x00000002 + MB_YESNOCANCEL = 0x00000003 + MB_YESNO = 0x00000004 + MB_RETRYCANCEL = 0x00000005 + MB_CANCELTRYCONTINUE = 0x00000006 + MB_ICONHAND = 0x00000010 + MB_ICONQUESTION = 0x00000020 + MB_ICONEXCLAMATION = 0x00000030 + MB_ICONASTERISK = 0x00000040 + MB_USERICON = 0x00000080 + MB_ICONWARNING = MB_ICONEXCLAMATION + MB_ICONERROR = MB_ICONHAND + MB_ICONINFORMATION = MB_ICONASTERISK + MB_ICONSTOP = MB_ICONHAND + MB_DEFBUTTON1 = 0x00000000 + MB_DEFBUTTON2 = 0x00000100 + MB_DEFBUTTON3 = 0x00000200 + MB_DEFBUTTON4 = 0x00000300 + MB_APPLMODAL = 0x00000000 + MB_SYSTEMMODAL = 0x00001000 + MB_TASKMODAL = 0x00002000 + MB_HELP = 0x00004000 + MB_NOFOCUS = 0x00008000 + MB_SETFOREGROUND = 0x00010000 + MB_DEFAULT_DESKTOP_ONLY = 0x00020000 + MB_TOPMOST = 0x00040000 + MB_RIGHT = 0x00080000 + MB_RTLREADING = 0x00100000 + MB_SERVICE_NOTIFICATION = 0x00200000 +) + +const ( + MOVEFILE_REPLACE_EXISTING = 0x1 + MOVEFILE_COPY_ALLOWED = 0x2 + MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 + MOVEFILE_WRITE_THROUGH = 0x8 + MOVEFILE_CREATE_HARDLINK = 0x10 + MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 +) + +const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 + +const ( + IF_TYPE_OTHER = 1 + IF_TYPE_ETHERNET_CSMACD = 6 + IF_TYPE_ISO88025_TOKENRING = 9 + IF_TYPE_PPP = 23 + IF_TYPE_SOFTWARE_LOOPBACK = 24 + IF_TYPE_ATM = 37 + IF_TYPE_IEEE80211 = 71 + IF_TYPE_TUNNEL = 131 + IF_TYPE_IEEE1394 = 144 +) + +type SocketAddress struct { + Sockaddr *syscall.RawSockaddrAny + SockaddrLength int32 +} + +// IP returns an IPv4 or IPv6 address, or nil if the underlying SocketAddress is neither. +func (addr *SocketAddress) IP() net.IP { + if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet4{}) && addr.Sockaddr.Addr.Family == AF_INET { + return (*RawSockaddrInet4)(unsafe.Pointer(addr.Sockaddr)).Addr[:] + } else if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet6{}) && addr.Sockaddr.Addr.Family == AF_INET6 { + return (*RawSockaddrInet6)(unsafe.Pointer(addr.Sockaddr)).Addr[:] + } + return nil +} + +type IpAdapterUnicastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterUnicastAddress + Address SocketAddress + PrefixOrigin int32 + SuffixOrigin int32 + DadState int32 + ValidLifetime uint32 + PreferredLifetime uint32 + LeaseLifetime uint32 + OnLinkPrefixLength uint8 +} + +type IpAdapterAnycastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterAnycastAddress + Address SocketAddress +} + +type IpAdapterMulticastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterMulticastAddress + Address SocketAddress +} + +type IpAdapterDnsServerAdapter struct { + Length uint32 + Reserved uint32 + Next *IpAdapterDnsServerAdapter + Address SocketAddress +} + +type IpAdapterPrefix struct { + Length uint32 + Flags uint32 + Next *IpAdapterPrefix + Address SocketAddress + PrefixLength uint32 +} + +type IpAdapterAddresses struct { + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + /* more fields might be present here. */ +} + +const ( + IfOperStatusUp = 1 + IfOperStatusDown = 2 + IfOperStatusTesting = 3 + IfOperStatusUnknown = 4 + IfOperStatusDormant = 5 + IfOperStatusNotPresent = 6 + IfOperStatusLowerLayerDown = 7 +) + +// Console related constants used for the mode parameter to SetConsoleMode. See +// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. + +const ( + ENABLE_PROCESSED_INPUT = 0x1 + ENABLE_LINE_INPUT = 0x2 + ENABLE_ECHO_INPUT = 0x4 + ENABLE_WINDOW_INPUT = 0x8 + ENABLE_MOUSE_INPUT = 0x10 + ENABLE_INSERT_MODE = 0x20 + ENABLE_QUICK_EDIT_MODE = 0x40 + ENABLE_EXTENDED_FLAGS = 0x80 + ENABLE_AUTO_POSITION = 0x100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200 + + ENABLE_PROCESSED_OUTPUT = 0x1 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x2 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 + DISABLE_NEWLINE_AUTO_RETURN = 0x8 + ENABLE_LVB_GRID_WORLDWIDE = 0x10 +) + +type Coord struct { + X int16 + Y int16 +} + +type SmallRect struct { + Left int16 + Top int16 + Right int16 + Bottom int16 +} + +// Used with GetConsoleScreenBuffer to retrieve information about a console +// screen buffer. See +// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str +// for details. + +type ConsoleScreenBufferInfo struct { + Size Coord + CursorPosition Coord + Attributes uint16 + Window SmallRect + MaximumWindowSize Coord +} + +const UNIX_PATH_MAX = 108 // defined in afunix.h + +const ( + // flags for JOBOBJECT_BASIC_LIMIT_INFORMATION.LimitFlags + JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x00000008 + JOB_OBJECT_LIMIT_AFFINITY = 0x00000010 + JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800 + JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400 + JOB_OBJECT_LIMIT_JOB_MEMORY = 0x00000200 + JOB_OBJECT_LIMIT_JOB_TIME = 0x00000004 + JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000 + JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x00000040 + JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x00000020 + JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x00000100 + JOB_OBJECT_LIMIT_PROCESS_TIME = 0x00000002 + JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x00000080 + JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000 + JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x00004000 + JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001 +) + +type IO_COUNTERS struct { + ReadOperationCount uint64 + WriteOperationCount uint64 + OtherOperationCount uint64 + ReadTransferCount uint64 + WriteTransferCount uint64 + OtherTransferCount uint64 +} + +type JOBOBJECT_EXTENDED_LIMIT_INFORMATION struct { + BasicLimitInformation JOBOBJECT_BASIC_LIMIT_INFORMATION + IoInfo IO_COUNTERS + ProcessMemoryLimit uintptr + JobMemoryLimit uintptr + PeakProcessMemoryUsed uintptr + PeakJobMemoryUsed uintptr +} + +const ( + // UIRestrictionsClass + JOB_OBJECT_UILIMIT_DESKTOP = 0x00000040 + JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x00000010 + JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x00000080 + JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x00000020 + JOB_OBJECT_UILIMIT_HANDLES = 0x00000001 + JOB_OBJECT_UILIMIT_READCLIPBOARD = 0x00000002 + JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS = 0x00000008 + JOB_OBJECT_UILIMIT_WRITECLIPBOARD = 0x00000004 +) + +type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { + UIRestrictionsClass uint32 +} + +const ( + // JobObjectInformationClass + JobObjectAssociateCompletionPortInformation = 7 + JobObjectBasicLimitInformation = 2 + JobObjectBasicUIRestrictions = 4 + JobObjectCpuRateControlInformation = 15 + JobObjectEndOfJobTimeInformation = 6 + JobObjectExtendedLimitInformation = 9 + JobObjectGroupInformation = 11 + JobObjectGroupInformationEx = 14 + JobObjectLimitViolationInformation2 = 35 + JobObjectNetRateControlInformation = 32 + JobObjectNotificationLimitInformation = 12 + JobObjectNotificationLimitInformation2 = 34 + JobObjectSecurityLimitInformation = 5 +) + +const ( + KF_FLAG_DEFAULT = 0x00000000 + KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000 + KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000 + KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000 + KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000 + KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000 + KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000 + KF_FLAG_CREATE = 0x00008000 + KF_FLAG_DONT_VERIFY = 0x00004000 + KF_FLAG_DONT_UNEXPAND = 0x00002000 + KF_FLAG_NO_ALIAS = 0x00001000 + KF_FLAG_INIT = 0x00000800 + KF_FLAG_DEFAULT_PATH = 0x00000400 + KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200 + KF_FLAG_SIMPLE_IDLIST = 0x00000100 + KF_FLAG_ALIAS_ONLY = 0x80000000 +) + +type OsVersionInfoEx struct { + osVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformId uint32 + CsdVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + _ byte +} + +const ( + EWX_LOGOFF = 0x00000000 + EWX_SHUTDOWN = 0x00000001 + EWX_REBOOT = 0x00000002 + EWX_FORCE = 0x00000004 + EWX_POWEROFF = 0x00000008 + EWX_FORCEIFHUNG = 0x00000010 + EWX_QUICKRESOLVE = 0x00000020 + EWX_RESTARTAPPS = 0x00000040 + EWX_HYBRID_SHUTDOWN = 0x00400000 + EWX_BOOTOPTIONS = 0x01000000 + + SHTDN_REASON_FLAG_COMMENT_REQUIRED = 0x01000000 + SHTDN_REASON_FLAG_DIRTY_PROBLEM_ID_REQUIRED = 0x02000000 + SHTDN_REASON_FLAG_CLEAN_UI = 0x04000000 + SHTDN_REASON_FLAG_DIRTY_UI = 0x08000000 + SHTDN_REASON_FLAG_USER_DEFINED = 0x40000000 + SHTDN_REASON_FLAG_PLANNED = 0x80000000 + SHTDN_REASON_MAJOR_OTHER = 0x00000000 + SHTDN_REASON_MAJOR_NONE = 0x00000000 + SHTDN_REASON_MAJOR_HARDWARE = 0x00010000 + SHTDN_REASON_MAJOR_OPERATINGSYSTEM = 0x00020000 + SHTDN_REASON_MAJOR_SOFTWARE = 0x00030000 + SHTDN_REASON_MAJOR_APPLICATION = 0x00040000 + SHTDN_REASON_MAJOR_SYSTEM = 0x00050000 + SHTDN_REASON_MAJOR_POWER = 0x00060000 + SHTDN_REASON_MAJOR_LEGACY_API = 0x00070000 + SHTDN_REASON_MINOR_OTHER = 0x00000000 + SHTDN_REASON_MINOR_NONE = 0x000000ff + SHTDN_REASON_MINOR_MAINTENANCE = 0x00000001 + SHTDN_REASON_MINOR_INSTALLATION = 0x00000002 + SHTDN_REASON_MINOR_UPGRADE = 0x00000003 + SHTDN_REASON_MINOR_RECONFIG = 0x00000004 + SHTDN_REASON_MINOR_HUNG = 0x00000005 + SHTDN_REASON_MINOR_UNSTABLE = 0x00000006 + SHTDN_REASON_MINOR_DISK = 0x00000007 + SHTDN_REASON_MINOR_PROCESSOR = 0x00000008 + SHTDN_REASON_MINOR_NETWORKCARD = 0x00000009 + SHTDN_REASON_MINOR_POWER_SUPPLY = 0x0000000a + SHTDN_REASON_MINOR_CORDUNPLUGGED = 0x0000000b + SHTDN_REASON_MINOR_ENVIRONMENT = 0x0000000c + SHTDN_REASON_MINOR_HARDWARE_DRIVER = 0x0000000d + SHTDN_REASON_MINOR_OTHERDRIVER = 0x0000000e + SHTDN_REASON_MINOR_BLUESCREEN = 0x0000000F + SHTDN_REASON_MINOR_SERVICEPACK = 0x00000010 + SHTDN_REASON_MINOR_HOTFIX = 0x00000011 + SHTDN_REASON_MINOR_SECURITYFIX = 0x00000012 + SHTDN_REASON_MINOR_SECURITY = 0x00000013 + SHTDN_REASON_MINOR_NETWORK_CONNECTIVITY = 0x00000014 + SHTDN_REASON_MINOR_WMI = 0x00000015 + SHTDN_REASON_MINOR_SERVICEPACK_UNINSTALL = 0x00000016 + SHTDN_REASON_MINOR_HOTFIX_UNINSTALL = 0x00000017 + SHTDN_REASON_MINOR_SECURITYFIX_UNINSTALL = 0x00000018 + SHTDN_REASON_MINOR_MMC = 0x00000019 + SHTDN_REASON_MINOR_SYSTEMRESTORE = 0x0000001a + SHTDN_REASON_MINOR_TERMSRV = 0x00000020 + SHTDN_REASON_MINOR_DC_PROMOTION = 0x00000021 + SHTDN_REASON_MINOR_DC_DEMOTION = 0x00000022 + SHTDN_REASON_UNKNOWN = SHTDN_REASON_MINOR_NONE + SHTDN_REASON_LEGACY_API = SHTDN_REASON_MAJOR_LEGACY_API | SHTDN_REASON_FLAG_PLANNED + SHTDN_REASON_VALID_BIT_MASK = 0xc0ffffff + + SHUTDOWN_NORETRY = 0x1 +) + +// Flags used for GetModuleHandleEx +const ( + GET_MODULE_HANDLE_EX_FLAG_PIN = 1 + GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT = 2 + GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS = 4 +) + +// MUI function flag values +const ( + MUI_LANGUAGE_ID = 0x4 + MUI_LANGUAGE_NAME = 0x8 + MUI_MERGE_SYSTEM_FALLBACK = 0x10 + MUI_MERGE_USER_FALLBACK = 0x20 + MUI_UI_FALLBACK = MUI_MERGE_SYSTEM_FALLBACK | MUI_MERGE_USER_FALLBACK + MUI_THREAD_LANGUAGES = 0x40 + MUI_CONSOLE_FILTER = 0x100 + MUI_COMPLEX_SCRIPT_FILTER = 0x200 + MUI_RESET_FILTERS = 0x001 + MUI_USER_PREFERRED_UI_LANGUAGES = 0x10 + MUI_USE_INSTALLED_LANGUAGES = 0x20 + MUI_USE_SEARCH_ALL_LANGUAGES = 0x40 + MUI_LANG_NEUTRAL_PE_FILE = 0x100 + MUI_NON_LANG_NEUTRAL_FILE = 0x200 + MUI_MACHINE_LANGUAGE_SETTINGS = 0x400 + MUI_FILETYPE_NOT_LANGUAGE_NEUTRAL = 0x001 + MUI_FILETYPE_LANGUAGE_NEUTRAL_MAIN = 0x002 + MUI_FILETYPE_LANGUAGE_NEUTRAL_MUI = 0x004 + MUI_QUERY_TYPE = 0x001 + MUI_QUERY_CHECKSUM = 0x002 + MUI_QUERY_LANGUAGE_NAME = 0x004 + MUI_QUERY_RESOURCE_TYPES = 0x008 + MUI_FILEINFO_VERSION = 0x001 + + MUI_FULL_LANGUAGE = 0x01 + MUI_PARTIAL_LANGUAGE = 0x02 + MUI_LIP_LANGUAGE = 0x04 + MUI_LANGUAGE_INSTALLED = 0x20 + MUI_LANGUAGE_LICENSED = 0x40 +) + +// FILE_INFO_BY_HANDLE_CLASS constants for SetFileInformationByHandle/GetFileInformationByHandleEx +const ( + FileBasicInfo = 0 + FileStandardInfo = 1 + FileNameInfo = 2 + FileRenameInfo = 3 + FileDispositionInfo = 4 + FileAllocationInfo = 5 + FileEndOfFileInfo = 6 + FileStreamInfo = 7 + FileCompressionInfo = 8 + FileAttributeTagInfo = 9 + FileIdBothDirectoryInfo = 10 + FileIdBothDirectoryRestartInfo = 11 + FileIoPriorityHintInfo = 12 + FileRemoteProtocolInfo = 13 + FileFullDirectoryInfo = 14 + FileFullDirectoryRestartInfo = 15 + FileStorageInfo = 16 + FileAlignmentInfo = 17 + FileIdInfo = 18 + FileIdExtdDirectoryInfo = 19 + FileIdExtdDirectoryRestartInfo = 20 + FileDispositionInfoEx = 21 + FileRenameInfoEx = 22 + FileCaseSensitiveInfo = 23 + FileNormalizedNameInfo = 24 +) + +// LoadLibrary flags for determining from where to search for a DLL +const ( + DONT_RESOLVE_DLL_REFERENCES = 0x1 + LOAD_LIBRARY_AS_DATAFILE = 0x2 + LOAD_WITH_ALTERED_SEARCH_PATH = 0x8 + LOAD_IGNORE_CODE_AUTHZ_LEVEL = 0x10 + LOAD_LIBRARY_AS_IMAGE_RESOURCE = 0x20 + LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE = 0x40 + LOAD_LIBRARY_REQUIRE_SIGNED_TARGET = 0x80 + LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR = 0x100 + LOAD_LIBRARY_SEARCH_APPLICATION_DIR = 0x200 + LOAD_LIBRARY_SEARCH_USER_DIRS = 0x400 + LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x800 + LOAD_LIBRARY_SEARCH_DEFAULT_DIRS = 0x1000 + LOAD_LIBRARY_SAFE_CURRENT_DIRS = 0x00002000 + LOAD_LIBRARY_SEARCH_SYSTEM32_NO_FORWARDER = 0x00004000 + LOAD_LIBRARY_OS_INTEGRITY_CONTINUITY = 0x00008000 +) + +// RegNotifyChangeKeyValue notifyFilter flags. +const ( + // REG_NOTIFY_CHANGE_NAME notifies the caller if a subkey is added or deleted. + REG_NOTIFY_CHANGE_NAME = 0x00000001 + + // REG_NOTIFY_CHANGE_ATTRIBUTES notifies the caller of changes to the attributes of the key, such as the security descriptor information. + REG_NOTIFY_CHANGE_ATTRIBUTES = 0x00000002 + + // REG_NOTIFY_CHANGE_LAST_SET notifies the caller of changes to a value of the key. This can include adding or deleting a value, or changing an existing value. + REG_NOTIFY_CHANGE_LAST_SET = 0x00000004 + + // REG_NOTIFY_CHANGE_SECURITY notifies the caller of changes to the security descriptor of the key. + REG_NOTIFY_CHANGE_SECURITY = 0x00000008 + + // REG_NOTIFY_THREAD_AGNOSTIC indicates that the lifetime of the registration must not be tied to the lifetime of the thread issuing the RegNotifyChangeKeyValue call. Note: This flag value is only supported in Windows 8 and later. + REG_NOTIFY_THREAD_AGNOSTIC = 0x10000000 +) + +type CommTimeouts struct { + ReadIntervalTimeout uint32 + ReadTotalTimeoutMultiplier uint32 + ReadTotalTimeoutConstant uint32 + WriteTotalTimeoutMultiplier uint32 + WriteTotalTimeoutConstant uint32 +} + +// NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING. +type NTUnicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer *uint16 +} + +// NTString is an ANSI string for NT native APIs, corresponding to STRING. +type NTString struct { + Length uint16 + MaximumLength uint16 + Buffer *byte +} + +type LIST_ENTRY struct { + Flink *LIST_ENTRY + Blink *LIST_ENTRY +} + +type LDR_DATA_TABLE_ENTRY struct { + reserved1 [2]uintptr + InMemoryOrderLinks LIST_ENTRY + reserved2 [2]uintptr + DllBase uintptr + reserved3 [2]uintptr + FullDllName NTUnicodeString + reserved4 [8]byte + reserved5 [3]uintptr + reserved6 uintptr + TimeDateStamp uint32 +} + +type PEB_LDR_DATA struct { + reserved1 [8]byte + reserved2 [3]uintptr + InMemoryOrderModuleList LIST_ENTRY +} + +type CURDIR struct { + DosPath NTUnicodeString + Handle Handle +} + +type RTL_DRIVE_LETTER_CURDIR struct { + Flags uint16 + Length uint16 + TimeStamp uint32 + DosPath NTString +} + +type RTL_USER_PROCESS_PARAMETERS struct { + MaximumLength, Length uint32 + + Flags, DebugFlags uint32 + + ConsoleHandle Handle + ConsoleFlags uint32 + StandardInput, StandardOutput, StandardError Handle + + CurrentDirectory CURDIR + DllPath NTUnicodeString + ImagePathName NTUnicodeString + CommandLine NTUnicodeString + Environment unsafe.Pointer + + StartingX, StartingY, CountX, CountY, CountCharsX, CountCharsY, FillAttribute uint32 + + WindowFlags, ShowWindowFlags uint32 + WindowTitle, DesktopInfo, ShellInfo, RuntimeData NTUnicodeString + CurrentDirectories [32]RTL_DRIVE_LETTER_CURDIR + + EnvironmentSize, EnvironmentVersion uintptr + + PackageDependencyData unsafe.Pointer + ProcessGroupId uint32 + LoaderThreads uint32 + + RedirectionDllName NTUnicodeString + HeapPartitionName NTUnicodeString + DefaultThreadpoolCpuSetMasks uintptr + DefaultThreadpoolCpuSetMaskCount uint32 +} + +type PEB struct { + reserved1 [2]byte + BeingDebugged byte + BitField byte + reserved3 uintptr + ImageBaseAddress uintptr + Ldr *PEB_LDR_DATA + ProcessParameters *RTL_USER_PROCESS_PARAMETERS + reserved4 [3]uintptr + AtlThunkSListPtr uintptr + reserved5 uintptr + reserved6 uint32 + reserved7 uintptr + reserved8 uint32 + AtlThunkSListPtr32 uint32 + reserved9 [45]uintptr + reserved10 [96]byte + PostProcessInitRoutine uintptr + reserved11 [128]byte + reserved12 [1]uintptr + SessionId uint32 +} + +type OBJECT_ATTRIBUTES struct { + Length uint32 + RootDirectory Handle + ObjectName *NTUnicodeString + Attributes uint32 + SecurityDescriptor *SECURITY_DESCRIPTOR + SecurityQoS *SECURITY_QUALITY_OF_SERVICE +} + +// Values for the Attributes member of OBJECT_ATTRIBUTES. +const ( + OBJ_INHERIT = 0x00000002 + OBJ_PERMANENT = 0x00000010 + OBJ_EXCLUSIVE = 0x00000020 + OBJ_CASE_INSENSITIVE = 0x00000040 + OBJ_OPENIF = 0x00000080 + OBJ_OPENLINK = 0x00000100 + OBJ_KERNEL_HANDLE = 0x00000200 + OBJ_FORCE_ACCESS_CHECK = 0x00000400 + OBJ_IGNORE_IMPERSONATED_DEVICEMAP = 0x00000800 + OBJ_DONT_REPARSE = 0x00001000 + OBJ_VALID_ATTRIBUTES = 0x00001FF2 +) + +type IO_STATUS_BLOCK struct { + Status NTStatus + Information uintptr +} + +type RTLP_CURDIR_REF struct { + RefCount int32 + Handle Handle +} + +type RTL_RELATIVE_NAME struct { + RelativeName NTUnicodeString + ContainingDirectory Handle + CurDirRef *RTLP_CURDIR_REF +} + +const ( + // CreateDisposition flags for NtCreateFile and NtCreateNamedPipeFile. + FILE_SUPERSEDE = 0x00000000 + FILE_OPEN = 0x00000001 + FILE_CREATE = 0x00000002 + FILE_OPEN_IF = 0x00000003 + FILE_OVERWRITE = 0x00000004 + FILE_OVERWRITE_IF = 0x00000005 + FILE_MAXIMUM_DISPOSITION = 0x00000005 + + // CreateOptions flags for NtCreateFile and NtCreateNamedPipeFile. + FILE_DIRECTORY_FILE = 0x00000001 + FILE_WRITE_THROUGH = 0x00000002 + FILE_SEQUENTIAL_ONLY = 0x00000004 + FILE_NO_INTERMEDIATE_BUFFERING = 0x00000008 + FILE_SYNCHRONOUS_IO_ALERT = 0x00000010 + FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + FILE_NON_DIRECTORY_FILE = 0x00000040 + FILE_CREATE_TREE_CONNECTION = 0x00000080 + FILE_COMPLETE_IF_OPLOCKED = 0x00000100 + FILE_NO_EA_KNOWLEDGE = 0x00000200 + FILE_OPEN_REMOTE_INSTANCE = 0x00000400 + FILE_RANDOM_ACCESS = 0x00000800 + FILE_DELETE_ON_CLOSE = 0x00001000 + FILE_OPEN_BY_FILE_ID = 0x00002000 + FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + FILE_NO_COMPRESSION = 0x00008000 + FILE_OPEN_REQUIRING_OPLOCK = 0x00010000 + FILE_DISALLOW_EXCLUSIVE = 0x00020000 + FILE_RESERVE_OPFILTER = 0x00100000 + FILE_OPEN_REPARSE_POINT = 0x00200000 + FILE_OPEN_NO_RECALL = 0x00400000 + FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x00800000 + + // Parameter constants for NtCreateNamedPipeFile. + + FILE_PIPE_BYTE_STREAM_TYPE = 0x00000000 + FILE_PIPE_MESSAGE_TYPE = 0x00000001 + + FILE_PIPE_ACCEPT_REMOTE_CLIENTS = 0x00000000 + FILE_PIPE_REJECT_REMOTE_CLIENTS = 0x00000002 + + FILE_PIPE_TYPE_VALID_MASK = 0x00000003 + + FILE_PIPE_BYTE_STREAM_MODE = 0x00000000 + FILE_PIPE_MESSAGE_MODE = 0x00000001 + + FILE_PIPE_QUEUE_OPERATION = 0x00000000 + FILE_PIPE_COMPLETE_OPERATION = 0x00000001 + + FILE_PIPE_INBOUND = 0x00000000 + FILE_PIPE_OUTBOUND = 0x00000001 + FILE_PIPE_FULL_DUPLEX = 0x00000002 + + FILE_PIPE_DISCONNECTED_STATE = 0x00000001 + FILE_PIPE_LISTENING_STATE = 0x00000002 + FILE_PIPE_CONNECTED_STATE = 0x00000003 + FILE_PIPE_CLOSING_STATE = 0x00000004 + + FILE_PIPE_CLIENT_END = 0x00000000 + FILE_PIPE_SERVER_END = 0x00000001 +) + +// ProcessInformationClasses for NtQueryInformationProcess and NtSetInformationProcess. +const ( + ProcessBasicInformation = iota + ProcessQuotaLimits + ProcessIoCounters + ProcessVmCounters + ProcessTimes + ProcessBasePriority + ProcessRaisePriority + ProcessDebugPort + ProcessExceptionPort + ProcessAccessToken + ProcessLdtInformation + ProcessLdtSize + ProcessDefaultHardErrorMode + ProcessIoPortHandlers + ProcessPooledUsageAndLimits + ProcessWorkingSetWatch + ProcessUserModeIOPL + ProcessEnableAlignmentFaultFixup + ProcessPriorityClass + ProcessWx86Information + ProcessHandleCount + ProcessAffinityMask + ProcessPriorityBoost + ProcessDeviceMap + ProcessSessionInformation + ProcessForegroundInformation + ProcessWow64Information + ProcessImageFileName + ProcessLUIDDeviceMapsEnabled + ProcessBreakOnTermination + ProcessDebugObjectHandle + ProcessDebugFlags + ProcessHandleTracing + ProcessIoPriority + ProcessExecuteFlags + ProcessTlsInformation + ProcessCookie + ProcessImageInformation + ProcessCycleTime + ProcessPagePriority + ProcessInstrumentationCallback + ProcessThreadStackAllocation + ProcessWorkingSetWatchEx + ProcessImageFileNameWin32 + ProcessImageFileMapping + ProcessAffinityUpdateMode + ProcessMemoryAllocationMode + ProcessGroupInformation + ProcessTokenVirtualizationEnabled + ProcessConsoleHostProcess + ProcessWindowInformation + ProcessHandleInformation + ProcessMitigationPolicy + ProcessDynamicFunctionTableInformation + ProcessHandleCheckingMode + ProcessKeepAliveCount + ProcessRevokeFileHandles + ProcessWorkingSetControl + ProcessHandleTable + ProcessCheckStackExtentsMode + ProcessCommandLineInformation + ProcessProtectionInformation + ProcessMemoryExhaustion + ProcessFaultInformation + ProcessTelemetryIdInformation + ProcessCommitReleaseInformation + ProcessDefaultCpuSetsInformation + ProcessAllowedCpuSetsInformation + ProcessSubsystemProcess + ProcessJobMemoryInformation + ProcessInPrivate + ProcessRaiseUMExceptionOnInvalidHandleClose + ProcessIumChallengeResponse + ProcessChildProcessInformation + ProcessHighGraphicsPriorityInformation + ProcessSubsystemInformation + ProcessEnergyValues + ProcessActivityThrottleState + ProcessActivityThrottlePolicy + ProcessWin32kSyscallFilterInformation + ProcessDisableSystemAllowedCpuSets + ProcessWakeInformation + ProcessEnergyTrackingState + ProcessManageWritesToExecutableMemory + ProcessCaptureTrustletLiveDump + ProcessTelemetryCoverage + ProcessEnclaveInformation + ProcessEnableReadWriteVmLogging + ProcessUptimeInformation + ProcessImageSection + ProcessDebugAuthInformation + ProcessSystemResourceManagement + ProcessSequenceNumber + ProcessLoaderDetour + ProcessSecurityDomainInformation + ProcessCombineSecurityDomainsInformation + ProcessEnableLogging + ProcessLeapSecondInformation + ProcessFiberShadowStackAllocation + ProcessFreeFiberShadowStackAllocation + ProcessAltSystemCallInformation + ProcessDynamicEHContinuationTargets + ProcessDynamicEnforcedCetCompatibleRanges +) + +type PROCESS_BASIC_INFORMATION struct { + ExitStatus NTStatus + PebBaseAddress *PEB + AffinityMask uintptr + BasePriority int32 + UniqueProcessId uintptr + InheritedFromUniqueProcessId uintptr +} + +// Constants for LocalAlloc flags. +const ( + LMEM_FIXED = 0x0 + LMEM_MOVEABLE = 0x2 + LMEM_NOCOMPACT = 0x10 + LMEM_NODISCARD = 0x20 + LMEM_ZEROINIT = 0x40 + LMEM_MODIFY = 0x80 + LMEM_DISCARDABLE = 0xf00 + LMEM_VALID_FLAGS = 0xf72 + LMEM_INVALID_HANDLE = 0x8000 + LHND = LMEM_MOVEABLE | LMEM_ZEROINIT + LPTR = LMEM_FIXED | LMEM_ZEROINIT + NONZEROLHND = LMEM_MOVEABLE + NONZEROLPTR = LMEM_FIXED +) + +// Constants for the CreateNamedPipe-family of functions. +const ( + PIPE_ACCESS_INBOUND = 0x1 + PIPE_ACCESS_OUTBOUND = 0x2 + PIPE_ACCESS_DUPLEX = 0x3 + + PIPE_CLIENT_END = 0x0 + PIPE_SERVER_END = 0x1 + + PIPE_WAIT = 0x0 + PIPE_NOWAIT = 0x1 + PIPE_READMODE_BYTE = 0x0 + PIPE_READMODE_MESSAGE = 0x2 + PIPE_TYPE_BYTE = 0x0 + PIPE_TYPE_MESSAGE = 0x4 + PIPE_ACCEPT_REMOTE_CLIENTS = 0x0 + PIPE_REJECT_REMOTE_CLIENTS = 0x8 + + PIPE_UNLIMITED_INSTANCES = 255 +) + +// Constants for security attributes when opening named pipes. +const ( + SECURITY_ANONYMOUS = SecurityAnonymous << 16 + SECURITY_IDENTIFICATION = SecurityIdentification << 16 + SECURITY_IMPERSONATION = SecurityImpersonation << 16 + SECURITY_DELEGATION = SecurityDelegation << 16 + + SECURITY_CONTEXT_TRACKING = 0x40000 + SECURITY_EFFECTIVE_ONLY = 0x80000 + + SECURITY_SQOS_PRESENT = 0x100000 + SECURITY_VALID_SQOS_FLAGS = 0x1f0000 +) + +// ResourceID represents a 16-bit resource identifier, traditionally created with the MAKEINTRESOURCE macro. +type ResourceID uint16 + +// ResourceIDOrString must be either a ResourceID, to specify a resource or resource type by ID, +// or a string, to specify a resource or resource type by name. +type ResourceIDOrString interface{} + +// Predefined resource names and types. +var ( + // Predefined names. + CREATEPROCESS_MANIFEST_RESOURCE_ID ResourceID = 1 + ISOLATIONAWARE_MANIFEST_RESOURCE_ID ResourceID = 2 + ISOLATIONAWARE_NOSTATICIMPORT_MANIFEST_RESOURCE_ID ResourceID = 3 + ISOLATIONPOLICY_MANIFEST_RESOURCE_ID ResourceID = 4 + ISOLATIONPOLICY_BROWSER_MANIFEST_RESOURCE_ID ResourceID = 5 + MINIMUM_RESERVED_MANIFEST_RESOURCE_ID ResourceID = 1 // inclusive + MAXIMUM_RESERVED_MANIFEST_RESOURCE_ID ResourceID = 16 // inclusive + + // Predefined types. + RT_CURSOR ResourceID = 1 + RT_BITMAP ResourceID = 2 + RT_ICON ResourceID = 3 + RT_MENU ResourceID = 4 + RT_DIALOG ResourceID = 5 + RT_STRING ResourceID = 6 + RT_FONTDIR ResourceID = 7 + RT_FONT ResourceID = 8 + RT_ACCELERATOR ResourceID = 9 + RT_RCDATA ResourceID = 10 + RT_MESSAGETABLE ResourceID = 11 + RT_GROUP_CURSOR ResourceID = 12 + RT_GROUP_ICON ResourceID = 14 + RT_VERSION ResourceID = 16 + RT_DLGINCLUDE ResourceID = 17 + RT_PLUGPLAY ResourceID = 19 + RT_VXD ResourceID = 20 + RT_ANICURSOR ResourceID = 21 + RT_ANIICON ResourceID = 22 + RT_HTML ResourceID = 23 + RT_MANIFEST ResourceID = 24 +) + +type COAUTHIDENTITY struct { + User *uint16 + UserLength uint32 + Domain *uint16 + DomainLength uint32 + Password *uint16 + PasswordLength uint32 + Flags uint32 +} + +type COAUTHINFO struct { + AuthnSvc uint32 + AuthzSvc uint32 + ServerPrincName *uint16 + AuthnLevel uint32 + ImpersonationLevel uint32 + AuthIdentityData *COAUTHIDENTITY + Capabilities uint32 +} + +type COSERVERINFO struct { + Reserved1 uint32 + Aame *uint16 + AuthInfo *COAUTHINFO + Reserved2 uint32 +} + +type BIND_OPTS3 struct { + CbStruct uint32 + Flags uint32 + Mode uint32 + TickCountDeadline uint32 + TrackFlags uint32 + ClassContext uint32 + Locale uint32 + ServerInfo *COSERVERINFO + Hwnd HWND +} + +const ( + CLSCTX_INPROC_SERVER = 0x1 + CLSCTX_INPROC_HANDLER = 0x2 + CLSCTX_LOCAL_SERVER = 0x4 + CLSCTX_INPROC_SERVER16 = 0x8 + CLSCTX_REMOTE_SERVER = 0x10 + CLSCTX_INPROC_HANDLER16 = 0x20 + CLSCTX_RESERVED1 = 0x40 + CLSCTX_RESERVED2 = 0x80 + CLSCTX_RESERVED3 = 0x100 + CLSCTX_RESERVED4 = 0x200 + CLSCTX_NO_CODE_DOWNLOAD = 0x400 + CLSCTX_RESERVED5 = 0x800 + CLSCTX_NO_CUSTOM_MARSHAL = 0x1000 + CLSCTX_ENABLE_CODE_DOWNLOAD = 0x2000 + CLSCTX_NO_FAILURE_LOG = 0x4000 + CLSCTX_DISABLE_AAA = 0x8000 + CLSCTX_ENABLE_AAA = 0x10000 + CLSCTX_FROM_DEFAULT_CONTEXT = 0x20000 + CLSCTX_ACTIVATE_32_BIT_SERVER = 0x40000 + CLSCTX_ACTIVATE_64_BIT_SERVER = 0x80000 + CLSCTX_ENABLE_CLOAKING = 0x100000 + CLSCTX_APPCONTAINER = 0x400000 + CLSCTX_ACTIVATE_AAA_AS_IU = 0x800000 + CLSCTX_PS_DLL = 0x80000000 + + COINIT_MULTITHREADED = 0x0 + COINIT_APARTMENTTHREADED = 0x2 + COINIT_DISABLE_OLE1DDE = 0x4 + COINIT_SPEED_OVER_MEMORY = 0x8 +) + +// Flag for QueryFullProcessImageName. +const PROCESS_NAME_NATIVE = 1 diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go new file mode 100644 index 000000000..8bce3e2fc --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_386.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte +} + +type Servent struct { + Name *byte + Aliases **byte + Port uint16 + Proto *byte +} + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 + _ uint32 // pad to 8 byte boundary +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go new file mode 100644 index 000000000..fdddc0c70 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go @@ -0,0 +1,34 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte +} + +type Servent struct { + Name *byte + Aliases **byte + Proto *byte + Port uint16 +} + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm.go b/vendor/golang.org/x/sys/windows/types_windows_arm.go new file mode 100644 index 000000000..321872c3e --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_arm.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte +} + +type Servent struct { + Name *byte + Aliases **byte + Port uint16 + Proto *byte +} + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 + _ uint32 // pad to 8 byte boundary +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm64.go b/vendor/golang.org/x/sys/windows/types_windows_arm64.go new file mode 100644 index 000000000..fdddc0c70 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_arm64.go @@ -0,0 +1,34 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte +} + +type Servent struct { + Name *byte + Aliases **byte + Proto *byte + Port uint16 +} + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} diff --git a/vendor/golang.org/x/sys/windows/zerrors_windows.go b/vendor/golang.org/x/sys/windows/zerrors_windows.go new file mode 100644 index 000000000..0cf658fbd --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zerrors_windows.go @@ -0,0 +1,9468 @@ +// Code generated by 'mkerrors.bash'; DO NOT EDIT. + +package windows + +import "syscall" + +const ( + FACILITY_NULL = 0 + FACILITY_RPC = 1 + FACILITY_DISPATCH = 2 + FACILITY_STORAGE = 3 + FACILITY_ITF = 4 + FACILITY_WIN32 = 7 + FACILITY_WINDOWS = 8 + FACILITY_SSPI = 9 + FACILITY_SECURITY = 9 + FACILITY_CONTROL = 10 + FACILITY_CERT = 11 + FACILITY_INTERNET = 12 + FACILITY_MEDIASERVER = 13 + FACILITY_MSMQ = 14 + FACILITY_SETUPAPI = 15 + FACILITY_SCARD = 16 + FACILITY_COMPLUS = 17 + FACILITY_AAF = 18 + FACILITY_URT = 19 + FACILITY_ACS = 20 + FACILITY_DPLAY = 21 + FACILITY_UMI = 22 + FACILITY_SXS = 23 + FACILITY_WINDOWS_CE = 24 + FACILITY_HTTP = 25 + FACILITY_USERMODE_COMMONLOG = 26 + FACILITY_WER = 27 + FACILITY_USERMODE_FILTER_MANAGER = 31 + FACILITY_BACKGROUNDCOPY = 32 + FACILITY_CONFIGURATION = 33 + FACILITY_WIA = 33 + FACILITY_STATE_MANAGEMENT = 34 + FACILITY_METADIRECTORY = 35 + FACILITY_WINDOWSUPDATE = 36 + FACILITY_DIRECTORYSERVICE = 37 + FACILITY_GRAPHICS = 38 + FACILITY_SHELL = 39 + FACILITY_NAP = 39 + FACILITY_TPM_SERVICES = 40 + FACILITY_TPM_SOFTWARE = 41 + FACILITY_UI = 42 + FACILITY_XAML = 43 + FACILITY_ACTION_QUEUE = 44 + FACILITY_PLA = 48 + FACILITY_WINDOWS_SETUP = 48 + FACILITY_FVE = 49 + FACILITY_FWP = 50 + FACILITY_WINRM = 51 + FACILITY_NDIS = 52 + FACILITY_USERMODE_HYPERVISOR = 53 + FACILITY_CMI = 54 + FACILITY_USERMODE_VIRTUALIZATION = 55 + FACILITY_USERMODE_VOLMGR = 56 + FACILITY_BCD = 57 + FACILITY_USERMODE_VHD = 58 + FACILITY_USERMODE_HNS = 59 + FACILITY_SDIAG = 60 + FACILITY_WEBSERVICES = 61 + FACILITY_WINPE = 61 + FACILITY_WPN = 62 + FACILITY_WINDOWS_STORE = 63 + FACILITY_INPUT = 64 + FACILITY_EAP = 66 + FACILITY_WINDOWS_DEFENDER = 80 + FACILITY_OPC = 81 + FACILITY_XPS = 82 + FACILITY_MBN = 84 + FACILITY_POWERSHELL = 84 + FACILITY_RAS = 83 + FACILITY_P2P_INT = 98 + FACILITY_P2P = 99 + FACILITY_DAF = 100 + FACILITY_BLUETOOTH_ATT = 101 + FACILITY_AUDIO = 102 + FACILITY_STATEREPOSITORY = 103 + FACILITY_VISUALCPP = 109 + FACILITY_SCRIPT = 112 + FACILITY_PARSE = 113 + FACILITY_BLB = 120 + FACILITY_BLB_CLI = 121 + FACILITY_WSBAPP = 122 + FACILITY_BLBUI = 128 + FACILITY_USN = 129 + FACILITY_USERMODE_VOLSNAP = 130 + FACILITY_TIERING = 131 + FACILITY_WSB_ONLINE = 133 + FACILITY_ONLINE_ID = 134 + FACILITY_DEVICE_UPDATE_AGENT = 135 + FACILITY_DRVSERVICING = 136 + FACILITY_DLS = 153 + FACILITY_DELIVERY_OPTIMIZATION = 208 + FACILITY_USERMODE_SPACES = 231 + FACILITY_USER_MODE_SECURITY_CORE = 232 + FACILITY_USERMODE_LICENSING = 234 + FACILITY_SOS = 160 + FACILITY_DEBUGGERS = 176 + FACILITY_SPP = 256 + FACILITY_RESTORE = 256 + FACILITY_DMSERVER = 256 + FACILITY_DEPLOYMENT_SERVICES_SERVER = 257 + FACILITY_DEPLOYMENT_SERVICES_IMAGING = 258 + FACILITY_DEPLOYMENT_SERVICES_MANAGEMENT = 259 + FACILITY_DEPLOYMENT_SERVICES_UTIL = 260 + FACILITY_DEPLOYMENT_SERVICES_BINLSVC = 261 + FACILITY_DEPLOYMENT_SERVICES_PXE = 263 + FACILITY_DEPLOYMENT_SERVICES_TFTP = 264 + FACILITY_DEPLOYMENT_SERVICES_TRANSPORT_MANAGEMENT = 272 + FACILITY_DEPLOYMENT_SERVICES_DRIVER_PROVISIONING = 278 + FACILITY_DEPLOYMENT_SERVICES_MULTICAST_SERVER = 289 + FACILITY_DEPLOYMENT_SERVICES_MULTICAST_CLIENT = 290 + FACILITY_DEPLOYMENT_SERVICES_CONTENT_PROVIDER = 293 + FACILITY_LINGUISTIC_SERVICES = 305 + FACILITY_AUDIOSTREAMING = 1094 + FACILITY_ACCELERATOR = 1536 + FACILITY_WMAAECMA = 1996 + FACILITY_DIRECTMUSIC = 2168 + FACILITY_DIRECT3D10 = 2169 + FACILITY_DXGI = 2170 + FACILITY_DXGI_DDI = 2171 + FACILITY_DIRECT3D11 = 2172 + FACILITY_DIRECT3D11_DEBUG = 2173 + FACILITY_DIRECT3D12 = 2174 + FACILITY_DIRECT3D12_DEBUG = 2175 + FACILITY_LEAP = 2184 + FACILITY_AUDCLNT = 2185 + FACILITY_WINCODEC_DWRITE_DWM = 2200 + FACILITY_WINML = 2192 + FACILITY_DIRECT2D = 2201 + FACILITY_DEFRAG = 2304 + FACILITY_USERMODE_SDBUS = 2305 + FACILITY_JSCRIPT = 2306 + FACILITY_PIDGENX = 2561 + FACILITY_EAS = 85 + FACILITY_WEB = 885 + FACILITY_WEB_SOCKET = 886 + FACILITY_MOBILE = 1793 + FACILITY_SQLITE = 1967 + FACILITY_UTC = 1989 + FACILITY_WEP = 2049 + FACILITY_SYNCENGINE = 2050 + FACILITY_XBOX = 2339 + FACILITY_GAME = 2340 + FACILITY_PIX = 2748 + ERROR_SUCCESS syscall.Errno = 0 + NO_ERROR = 0 + SEC_E_OK Handle = 0x00000000 + ERROR_INVALID_FUNCTION syscall.Errno = 1 + ERROR_FILE_NOT_FOUND syscall.Errno = 2 + ERROR_PATH_NOT_FOUND syscall.Errno = 3 + ERROR_TOO_MANY_OPEN_FILES syscall.Errno = 4 + ERROR_ACCESS_DENIED syscall.Errno = 5 + ERROR_INVALID_HANDLE syscall.Errno = 6 + ERROR_ARENA_TRASHED syscall.Errno = 7 + ERROR_NOT_ENOUGH_MEMORY syscall.Errno = 8 + ERROR_INVALID_BLOCK syscall.Errno = 9 + ERROR_BAD_ENVIRONMENT syscall.Errno = 10 + ERROR_BAD_FORMAT syscall.Errno = 11 + ERROR_INVALID_ACCESS syscall.Errno = 12 + ERROR_INVALID_DATA syscall.Errno = 13 + ERROR_OUTOFMEMORY syscall.Errno = 14 + ERROR_INVALID_DRIVE syscall.Errno = 15 + ERROR_CURRENT_DIRECTORY syscall.Errno = 16 + ERROR_NOT_SAME_DEVICE syscall.Errno = 17 + ERROR_NO_MORE_FILES syscall.Errno = 18 + ERROR_WRITE_PROTECT syscall.Errno = 19 + ERROR_BAD_UNIT syscall.Errno = 20 + ERROR_NOT_READY syscall.Errno = 21 + ERROR_BAD_COMMAND syscall.Errno = 22 + ERROR_CRC syscall.Errno = 23 + ERROR_BAD_LENGTH syscall.Errno = 24 + ERROR_SEEK syscall.Errno = 25 + ERROR_NOT_DOS_DISK syscall.Errno = 26 + ERROR_SECTOR_NOT_FOUND syscall.Errno = 27 + ERROR_OUT_OF_PAPER syscall.Errno = 28 + ERROR_WRITE_FAULT syscall.Errno = 29 + ERROR_READ_FAULT syscall.Errno = 30 + ERROR_GEN_FAILURE syscall.Errno = 31 + ERROR_SHARING_VIOLATION syscall.Errno = 32 + ERROR_LOCK_VIOLATION syscall.Errno = 33 + ERROR_WRONG_DISK syscall.Errno = 34 + ERROR_SHARING_BUFFER_EXCEEDED syscall.Errno = 36 + ERROR_HANDLE_EOF syscall.Errno = 38 + ERROR_HANDLE_DISK_FULL syscall.Errno = 39 + ERROR_NOT_SUPPORTED syscall.Errno = 50 + ERROR_REM_NOT_LIST syscall.Errno = 51 + ERROR_DUP_NAME syscall.Errno = 52 + ERROR_BAD_NETPATH syscall.Errno = 53 + ERROR_NETWORK_BUSY syscall.Errno = 54 + ERROR_DEV_NOT_EXIST syscall.Errno = 55 + ERROR_TOO_MANY_CMDS syscall.Errno = 56 + ERROR_ADAP_HDW_ERR syscall.Errno = 57 + ERROR_BAD_NET_RESP syscall.Errno = 58 + ERROR_UNEXP_NET_ERR syscall.Errno = 59 + ERROR_BAD_REM_ADAP syscall.Errno = 60 + ERROR_PRINTQ_FULL syscall.Errno = 61 + ERROR_NO_SPOOL_SPACE syscall.Errno = 62 + ERROR_PRINT_CANCELLED syscall.Errno = 63 + ERROR_NETNAME_DELETED syscall.Errno = 64 + ERROR_NETWORK_ACCESS_DENIED syscall.Errno = 65 + ERROR_BAD_DEV_TYPE syscall.Errno = 66 + ERROR_BAD_NET_NAME syscall.Errno = 67 + ERROR_TOO_MANY_NAMES syscall.Errno = 68 + ERROR_TOO_MANY_SESS syscall.Errno = 69 + ERROR_SHARING_PAUSED syscall.Errno = 70 + ERROR_REQ_NOT_ACCEP syscall.Errno = 71 + ERROR_REDIR_PAUSED syscall.Errno = 72 + ERROR_FILE_EXISTS syscall.Errno = 80 + ERROR_CANNOT_MAKE syscall.Errno = 82 + ERROR_FAIL_I24 syscall.Errno = 83 + ERROR_OUT_OF_STRUCTURES syscall.Errno = 84 + ERROR_ALREADY_ASSIGNED syscall.Errno = 85 + ERROR_INVALID_PASSWORD syscall.Errno = 86 + ERROR_INVALID_PARAMETER syscall.Errno = 87 + ERROR_NET_WRITE_FAULT syscall.Errno = 88 + ERROR_NO_PROC_SLOTS syscall.Errno = 89 + ERROR_TOO_MANY_SEMAPHORES syscall.Errno = 100 + ERROR_EXCL_SEM_ALREADY_OWNED syscall.Errno = 101 + ERROR_SEM_IS_SET syscall.Errno = 102 + ERROR_TOO_MANY_SEM_REQUESTS syscall.Errno = 103 + ERROR_INVALID_AT_INTERRUPT_TIME syscall.Errno = 104 + ERROR_SEM_OWNER_DIED syscall.Errno = 105 + ERROR_SEM_USER_LIMIT syscall.Errno = 106 + ERROR_DISK_CHANGE syscall.Errno = 107 + ERROR_DRIVE_LOCKED syscall.Errno = 108 + ERROR_BROKEN_PIPE syscall.Errno = 109 + ERROR_OPEN_FAILED syscall.Errno = 110 + ERROR_BUFFER_OVERFLOW syscall.Errno = 111 + ERROR_DISK_FULL syscall.Errno = 112 + ERROR_NO_MORE_SEARCH_HANDLES syscall.Errno = 113 + ERROR_INVALID_TARGET_HANDLE syscall.Errno = 114 + ERROR_INVALID_CATEGORY syscall.Errno = 117 + ERROR_INVALID_VERIFY_SWITCH syscall.Errno = 118 + ERROR_BAD_DRIVER_LEVEL syscall.Errno = 119 + ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120 + ERROR_SEM_TIMEOUT syscall.Errno = 121 + ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 + ERROR_INVALID_NAME syscall.Errno = 123 + ERROR_INVALID_LEVEL syscall.Errno = 124 + ERROR_NO_VOLUME_LABEL syscall.Errno = 125 + ERROR_MOD_NOT_FOUND syscall.Errno = 126 + ERROR_PROC_NOT_FOUND syscall.Errno = 127 + ERROR_WAIT_NO_CHILDREN syscall.Errno = 128 + ERROR_CHILD_NOT_COMPLETE syscall.Errno = 129 + ERROR_DIRECT_ACCESS_HANDLE syscall.Errno = 130 + ERROR_NEGATIVE_SEEK syscall.Errno = 131 + ERROR_SEEK_ON_DEVICE syscall.Errno = 132 + ERROR_IS_JOIN_TARGET syscall.Errno = 133 + ERROR_IS_JOINED syscall.Errno = 134 + ERROR_IS_SUBSTED syscall.Errno = 135 + ERROR_NOT_JOINED syscall.Errno = 136 + ERROR_NOT_SUBSTED syscall.Errno = 137 + ERROR_JOIN_TO_JOIN syscall.Errno = 138 + ERROR_SUBST_TO_SUBST syscall.Errno = 139 + ERROR_JOIN_TO_SUBST syscall.Errno = 140 + ERROR_SUBST_TO_JOIN syscall.Errno = 141 + ERROR_BUSY_DRIVE syscall.Errno = 142 + ERROR_SAME_DRIVE syscall.Errno = 143 + ERROR_DIR_NOT_ROOT syscall.Errno = 144 + ERROR_DIR_NOT_EMPTY syscall.Errno = 145 + ERROR_IS_SUBST_PATH syscall.Errno = 146 + ERROR_IS_JOIN_PATH syscall.Errno = 147 + ERROR_PATH_BUSY syscall.Errno = 148 + ERROR_IS_SUBST_TARGET syscall.Errno = 149 + ERROR_SYSTEM_TRACE syscall.Errno = 150 + ERROR_INVALID_EVENT_COUNT syscall.Errno = 151 + ERROR_TOO_MANY_MUXWAITERS syscall.Errno = 152 + ERROR_INVALID_LIST_FORMAT syscall.Errno = 153 + ERROR_LABEL_TOO_LONG syscall.Errno = 154 + ERROR_TOO_MANY_TCBS syscall.Errno = 155 + ERROR_SIGNAL_REFUSED syscall.Errno = 156 + ERROR_DISCARDED syscall.Errno = 157 + ERROR_NOT_LOCKED syscall.Errno = 158 + ERROR_BAD_THREADID_ADDR syscall.Errno = 159 + ERROR_BAD_ARGUMENTS syscall.Errno = 160 + ERROR_BAD_PATHNAME syscall.Errno = 161 + ERROR_SIGNAL_PENDING syscall.Errno = 162 + ERROR_MAX_THRDS_REACHED syscall.Errno = 164 + ERROR_LOCK_FAILED syscall.Errno = 167 + ERROR_BUSY syscall.Errno = 170 + ERROR_DEVICE_SUPPORT_IN_PROGRESS syscall.Errno = 171 + ERROR_CANCEL_VIOLATION syscall.Errno = 173 + ERROR_ATOMIC_LOCKS_NOT_SUPPORTED syscall.Errno = 174 + ERROR_INVALID_SEGMENT_NUMBER syscall.Errno = 180 + ERROR_INVALID_ORDINAL syscall.Errno = 182 + ERROR_ALREADY_EXISTS syscall.Errno = 183 + ERROR_INVALID_FLAG_NUMBER syscall.Errno = 186 + ERROR_SEM_NOT_FOUND syscall.Errno = 187 + ERROR_INVALID_STARTING_CODESEG syscall.Errno = 188 + ERROR_INVALID_STACKSEG syscall.Errno = 189 + ERROR_INVALID_MODULETYPE syscall.Errno = 190 + ERROR_INVALID_EXE_SIGNATURE syscall.Errno = 191 + ERROR_EXE_MARKED_INVALID syscall.Errno = 192 + ERROR_BAD_EXE_FORMAT syscall.Errno = 193 + ERROR_ITERATED_DATA_EXCEEDS_64k syscall.Errno = 194 + ERROR_INVALID_MINALLOCSIZE syscall.Errno = 195 + ERROR_DYNLINK_FROM_INVALID_RING syscall.Errno = 196 + ERROR_IOPL_NOT_ENABLED syscall.Errno = 197 + ERROR_INVALID_SEGDPL syscall.Errno = 198 + ERROR_AUTODATASEG_EXCEEDS_64k syscall.Errno = 199 + ERROR_RING2SEG_MUST_BE_MOVABLE syscall.Errno = 200 + ERROR_RELOC_CHAIN_XEEDS_SEGLIM syscall.Errno = 201 + ERROR_INFLOOP_IN_RELOC_CHAIN syscall.Errno = 202 + ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203 + ERROR_NO_SIGNAL_SENT syscall.Errno = 205 + ERROR_FILENAME_EXCED_RANGE syscall.Errno = 206 + ERROR_RING2_STACK_IN_USE syscall.Errno = 207 + ERROR_META_EXPANSION_TOO_LONG syscall.Errno = 208 + ERROR_INVALID_SIGNAL_NUMBER syscall.Errno = 209 + ERROR_THREAD_1_INACTIVE syscall.Errno = 210 + ERROR_LOCKED syscall.Errno = 212 + ERROR_TOO_MANY_MODULES syscall.Errno = 214 + ERROR_NESTING_NOT_ALLOWED syscall.Errno = 215 + ERROR_EXE_MACHINE_TYPE_MISMATCH syscall.Errno = 216 + ERROR_EXE_CANNOT_MODIFY_SIGNED_BINARY syscall.Errno = 217 + ERROR_EXE_CANNOT_MODIFY_STRONG_SIGNED_BINARY syscall.Errno = 218 + ERROR_FILE_CHECKED_OUT syscall.Errno = 220 + ERROR_CHECKOUT_REQUIRED syscall.Errno = 221 + ERROR_BAD_FILE_TYPE syscall.Errno = 222 + ERROR_FILE_TOO_LARGE syscall.Errno = 223 + ERROR_FORMS_AUTH_REQUIRED syscall.Errno = 224 + ERROR_VIRUS_INFECTED syscall.Errno = 225 + ERROR_VIRUS_DELETED syscall.Errno = 226 + ERROR_PIPE_LOCAL syscall.Errno = 229 + ERROR_BAD_PIPE syscall.Errno = 230 + ERROR_PIPE_BUSY syscall.Errno = 231 + ERROR_NO_DATA syscall.Errno = 232 + ERROR_PIPE_NOT_CONNECTED syscall.Errno = 233 + ERROR_MORE_DATA syscall.Errno = 234 + ERROR_NO_WORK_DONE syscall.Errno = 235 + ERROR_VC_DISCONNECTED syscall.Errno = 240 + ERROR_INVALID_EA_NAME syscall.Errno = 254 + ERROR_EA_LIST_INCONSISTENT syscall.Errno = 255 + WAIT_TIMEOUT syscall.Errno = 258 + ERROR_NO_MORE_ITEMS syscall.Errno = 259 + ERROR_CANNOT_COPY syscall.Errno = 266 + ERROR_DIRECTORY syscall.Errno = 267 + ERROR_EAS_DIDNT_FIT syscall.Errno = 275 + ERROR_EA_FILE_CORRUPT syscall.Errno = 276 + ERROR_EA_TABLE_FULL syscall.Errno = 277 + ERROR_INVALID_EA_HANDLE syscall.Errno = 278 + ERROR_EAS_NOT_SUPPORTED syscall.Errno = 282 + ERROR_NOT_OWNER syscall.Errno = 288 + ERROR_TOO_MANY_POSTS syscall.Errno = 298 + ERROR_PARTIAL_COPY syscall.Errno = 299 + ERROR_OPLOCK_NOT_GRANTED syscall.Errno = 300 + ERROR_INVALID_OPLOCK_PROTOCOL syscall.Errno = 301 + ERROR_DISK_TOO_FRAGMENTED syscall.Errno = 302 + ERROR_DELETE_PENDING syscall.Errno = 303 + ERROR_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING syscall.Errno = 304 + ERROR_SHORT_NAMES_NOT_ENABLED_ON_VOLUME syscall.Errno = 305 + ERROR_SECURITY_STREAM_IS_INCONSISTENT syscall.Errno = 306 + ERROR_INVALID_LOCK_RANGE syscall.Errno = 307 + ERROR_IMAGE_SUBSYSTEM_NOT_PRESENT syscall.Errno = 308 + ERROR_NOTIFICATION_GUID_ALREADY_DEFINED syscall.Errno = 309 + ERROR_INVALID_EXCEPTION_HANDLER syscall.Errno = 310 + ERROR_DUPLICATE_PRIVILEGES syscall.Errno = 311 + ERROR_NO_RANGES_PROCESSED syscall.Errno = 312 + ERROR_NOT_ALLOWED_ON_SYSTEM_FILE syscall.Errno = 313 + ERROR_DISK_RESOURCES_EXHAUSTED syscall.Errno = 314 + ERROR_INVALID_TOKEN syscall.Errno = 315 + ERROR_DEVICE_FEATURE_NOT_SUPPORTED syscall.Errno = 316 + ERROR_MR_MID_NOT_FOUND syscall.Errno = 317 + ERROR_SCOPE_NOT_FOUND syscall.Errno = 318 + ERROR_UNDEFINED_SCOPE syscall.Errno = 319 + ERROR_INVALID_CAP syscall.Errno = 320 + ERROR_DEVICE_UNREACHABLE syscall.Errno = 321 + ERROR_DEVICE_NO_RESOURCES syscall.Errno = 322 + ERROR_DATA_CHECKSUM_ERROR syscall.Errno = 323 + ERROR_INTERMIXED_KERNEL_EA_OPERATION syscall.Errno = 324 + ERROR_FILE_LEVEL_TRIM_NOT_SUPPORTED syscall.Errno = 326 + ERROR_OFFSET_ALIGNMENT_VIOLATION syscall.Errno = 327 + ERROR_INVALID_FIELD_IN_PARAMETER_LIST syscall.Errno = 328 + ERROR_OPERATION_IN_PROGRESS syscall.Errno = 329 + ERROR_BAD_DEVICE_PATH syscall.Errno = 330 + ERROR_TOO_MANY_DESCRIPTORS syscall.Errno = 331 + ERROR_SCRUB_DATA_DISABLED syscall.Errno = 332 + ERROR_NOT_REDUNDANT_STORAGE syscall.Errno = 333 + ERROR_RESIDENT_FILE_NOT_SUPPORTED syscall.Errno = 334 + ERROR_COMPRESSED_FILE_NOT_SUPPORTED syscall.Errno = 335 + ERROR_DIRECTORY_NOT_SUPPORTED syscall.Errno = 336 + ERROR_NOT_READ_FROM_COPY syscall.Errno = 337 + ERROR_FT_WRITE_FAILURE syscall.Errno = 338 + ERROR_FT_DI_SCAN_REQUIRED syscall.Errno = 339 + ERROR_INVALID_KERNEL_INFO_VERSION syscall.Errno = 340 + ERROR_INVALID_PEP_INFO_VERSION syscall.Errno = 341 + ERROR_OBJECT_NOT_EXTERNALLY_BACKED syscall.Errno = 342 + ERROR_EXTERNAL_BACKING_PROVIDER_UNKNOWN syscall.Errno = 343 + ERROR_COMPRESSION_NOT_BENEFICIAL syscall.Errno = 344 + ERROR_STORAGE_TOPOLOGY_ID_MISMATCH syscall.Errno = 345 + ERROR_BLOCKED_BY_PARENTAL_CONTROLS syscall.Errno = 346 + ERROR_BLOCK_TOO_MANY_REFERENCES syscall.Errno = 347 + ERROR_MARKED_TO_DISALLOW_WRITES syscall.Errno = 348 + ERROR_ENCLAVE_FAILURE syscall.Errno = 349 + ERROR_FAIL_NOACTION_REBOOT syscall.Errno = 350 + ERROR_FAIL_SHUTDOWN syscall.Errno = 351 + ERROR_FAIL_RESTART syscall.Errno = 352 + ERROR_MAX_SESSIONS_REACHED syscall.Errno = 353 + ERROR_NETWORK_ACCESS_DENIED_EDP syscall.Errno = 354 + ERROR_DEVICE_HINT_NAME_BUFFER_TOO_SMALL syscall.Errno = 355 + ERROR_EDP_POLICY_DENIES_OPERATION syscall.Errno = 356 + ERROR_EDP_DPL_POLICY_CANT_BE_SATISFIED syscall.Errno = 357 + ERROR_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT syscall.Errno = 358 + ERROR_DEVICE_IN_MAINTENANCE syscall.Errno = 359 + ERROR_NOT_SUPPORTED_ON_DAX syscall.Errno = 360 + ERROR_DAX_MAPPING_EXISTS syscall.Errno = 361 + ERROR_CLOUD_FILE_PROVIDER_NOT_RUNNING syscall.Errno = 362 + ERROR_CLOUD_FILE_METADATA_CORRUPT syscall.Errno = 363 + ERROR_CLOUD_FILE_METADATA_TOO_LARGE syscall.Errno = 364 + ERROR_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE syscall.Errno = 365 + ERROR_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH syscall.Errno = 366 + ERROR_CHILD_PROCESS_BLOCKED syscall.Errno = 367 + ERROR_STORAGE_LOST_DATA_PERSISTENCE syscall.Errno = 368 + ERROR_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE syscall.Errno = 369 + ERROR_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT syscall.Errno = 370 + ERROR_FILE_SYSTEM_VIRTUALIZATION_BUSY syscall.Errno = 371 + ERROR_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN syscall.Errno = 372 + ERROR_GDI_HANDLE_LEAK syscall.Errno = 373 + ERROR_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS syscall.Errno = 374 + ERROR_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED syscall.Errno = 375 + ERROR_NOT_A_CLOUD_FILE syscall.Errno = 376 + ERROR_CLOUD_FILE_NOT_IN_SYNC syscall.Errno = 377 + ERROR_CLOUD_FILE_ALREADY_CONNECTED syscall.Errno = 378 + ERROR_CLOUD_FILE_NOT_SUPPORTED syscall.Errno = 379 + ERROR_CLOUD_FILE_INVALID_REQUEST syscall.Errno = 380 + ERROR_CLOUD_FILE_READ_ONLY_VOLUME syscall.Errno = 381 + ERROR_CLOUD_FILE_CONNECTED_PROVIDER_ONLY syscall.Errno = 382 + ERROR_CLOUD_FILE_VALIDATION_FAILED syscall.Errno = 383 + ERROR_SMB1_NOT_AVAILABLE syscall.Errno = 384 + ERROR_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION syscall.Errno = 385 + ERROR_CLOUD_FILE_AUTHENTICATION_FAILED syscall.Errno = 386 + ERROR_CLOUD_FILE_INSUFFICIENT_RESOURCES syscall.Errno = 387 + ERROR_CLOUD_FILE_NETWORK_UNAVAILABLE syscall.Errno = 388 + ERROR_CLOUD_FILE_UNSUCCESSFUL syscall.Errno = 389 + ERROR_CLOUD_FILE_NOT_UNDER_SYNC_ROOT syscall.Errno = 390 + ERROR_CLOUD_FILE_IN_USE syscall.Errno = 391 + ERROR_CLOUD_FILE_PINNED syscall.Errno = 392 + ERROR_CLOUD_FILE_REQUEST_ABORTED syscall.Errno = 393 + ERROR_CLOUD_FILE_PROPERTY_CORRUPT syscall.Errno = 394 + ERROR_CLOUD_FILE_ACCESS_DENIED syscall.Errno = 395 + ERROR_CLOUD_FILE_INCOMPATIBLE_HARDLINKS syscall.Errno = 396 + ERROR_CLOUD_FILE_PROPERTY_LOCK_CONFLICT syscall.Errno = 397 + ERROR_CLOUD_FILE_REQUEST_CANCELED syscall.Errno = 398 + ERROR_EXTERNAL_SYSKEY_NOT_SUPPORTED syscall.Errno = 399 + ERROR_THREAD_MODE_ALREADY_BACKGROUND syscall.Errno = 400 + ERROR_THREAD_MODE_NOT_BACKGROUND syscall.Errno = 401 + ERROR_PROCESS_MODE_ALREADY_BACKGROUND syscall.Errno = 402 + ERROR_PROCESS_MODE_NOT_BACKGROUND syscall.Errno = 403 + ERROR_CLOUD_FILE_PROVIDER_TERMINATED syscall.Errno = 404 + ERROR_NOT_A_CLOUD_SYNC_ROOT syscall.Errno = 405 + ERROR_FILE_PROTECTED_UNDER_DPL syscall.Errno = 406 + ERROR_VOLUME_NOT_CLUSTER_ALIGNED syscall.Errno = 407 + ERROR_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND syscall.Errno = 408 + ERROR_APPX_FILE_NOT_ENCRYPTED syscall.Errno = 409 + ERROR_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED syscall.Errno = 410 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET syscall.Errno = 411 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE syscall.Errno = 412 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER syscall.Errno = 413 + ERROR_LINUX_SUBSYSTEM_NOT_PRESENT syscall.Errno = 414 + ERROR_FT_READ_FAILURE syscall.Errno = 415 + ERROR_STORAGE_RESERVE_ID_INVALID syscall.Errno = 416 + ERROR_STORAGE_RESERVE_DOES_NOT_EXIST syscall.Errno = 417 + ERROR_STORAGE_RESERVE_ALREADY_EXISTS syscall.Errno = 418 + ERROR_STORAGE_RESERVE_NOT_EMPTY syscall.Errno = 419 + ERROR_NOT_A_DAX_VOLUME syscall.Errno = 420 + ERROR_NOT_DAX_MAPPABLE syscall.Errno = 421 + ERROR_TIME_SENSITIVE_THREAD syscall.Errno = 422 + ERROR_DPL_NOT_SUPPORTED_FOR_USER syscall.Errno = 423 + ERROR_CASE_DIFFERING_NAMES_IN_DIR syscall.Errno = 424 + ERROR_FILE_NOT_SUPPORTED syscall.Errno = 425 + ERROR_CLOUD_FILE_REQUEST_TIMEOUT syscall.Errno = 426 + ERROR_NO_TASK_QUEUE syscall.Errno = 427 + ERROR_SRC_SRV_DLL_LOAD_FAILED syscall.Errno = 428 + ERROR_NOT_SUPPORTED_WITH_BTT syscall.Errno = 429 + ERROR_ENCRYPTION_DISABLED syscall.Errno = 430 + ERROR_ENCRYPTING_METADATA_DISALLOWED syscall.Errno = 431 + ERROR_CANT_CLEAR_ENCRYPTION_FLAG syscall.Errno = 432 + ERROR_NO_SUCH_DEVICE syscall.Errno = 433 + ERROR_CAPAUTHZ_NOT_DEVUNLOCKED syscall.Errno = 450 + ERROR_CAPAUTHZ_CHANGE_TYPE syscall.Errno = 451 + ERROR_CAPAUTHZ_NOT_PROVISIONED syscall.Errno = 452 + ERROR_CAPAUTHZ_NOT_AUTHORIZED syscall.Errno = 453 + ERROR_CAPAUTHZ_NO_POLICY syscall.Errno = 454 + ERROR_CAPAUTHZ_DB_CORRUPTED syscall.Errno = 455 + ERROR_CAPAUTHZ_SCCD_INVALID_CATALOG syscall.Errno = 456 + ERROR_CAPAUTHZ_SCCD_NO_AUTH_ENTITY syscall.Errno = 457 + ERROR_CAPAUTHZ_SCCD_PARSE_ERROR syscall.Errno = 458 + ERROR_CAPAUTHZ_SCCD_DEV_MODE_REQUIRED syscall.Errno = 459 + ERROR_CAPAUTHZ_SCCD_NO_CAPABILITY_MATCH syscall.Errno = 460 + ERROR_PNP_QUERY_REMOVE_DEVICE_TIMEOUT syscall.Errno = 480 + ERROR_PNP_QUERY_REMOVE_RELATED_DEVICE_TIMEOUT syscall.Errno = 481 + ERROR_PNP_QUERY_REMOVE_UNRELATED_DEVICE_TIMEOUT syscall.Errno = 482 + ERROR_DEVICE_HARDWARE_ERROR syscall.Errno = 483 + ERROR_INVALID_ADDRESS syscall.Errno = 487 + ERROR_VRF_CFG_ENABLED syscall.Errno = 1183 + ERROR_PARTITION_TERMINATING syscall.Errno = 1184 + ERROR_USER_PROFILE_LOAD syscall.Errno = 500 + ERROR_ARITHMETIC_OVERFLOW syscall.Errno = 534 + ERROR_PIPE_CONNECTED syscall.Errno = 535 + ERROR_PIPE_LISTENING syscall.Errno = 536 + ERROR_VERIFIER_STOP syscall.Errno = 537 + ERROR_ABIOS_ERROR syscall.Errno = 538 + ERROR_WX86_WARNING syscall.Errno = 539 + ERROR_WX86_ERROR syscall.Errno = 540 + ERROR_TIMER_NOT_CANCELED syscall.Errno = 541 + ERROR_UNWIND syscall.Errno = 542 + ERROR_BAD_STACK syscall.Errno = 543 + ERROR_INVALID_UNWIND_TARGET syscall.Errno = 544 + ERROR_INVALID_PORT_ATTRIBUTES syscall.Errno = 545 + ERROR_PORT_MESSAGE_TOO_LONG syscall.Errno = 546 + ERROR_INVALID_QUOTA_LOWER syscall.Errno = 547 + ERROR_DEVICE_ALREADY_ATTACHED syscall.Errno = 548 + ERROR_INSTRUCTION_MISALIGNMENT syscall.Errno = 549 + ERROR_PROFILING_NOT_STARTED syscall.Errno = 550 + ERROR_PROFILING_NOT_STOPPED syscall.Errno = 551 + ERROR_COULD_NOT_INTERPRET syscall.Errno = 552 + ERROR_PROFILING_AT_LIMIT syscall.Errno = 553 + ERROR_CANT_WAIT syscall.Errno = 554 + ERROR_CANT_TERMINATE_SELF syscall.Errno = 555 + ERROR_UNEXPECTED_MM_CREATE_ERR syscall.Errno = 556 + ERROR_UNEXPECTED_MM_MAP_ERROR syscall.Errno = 557 + ERROR_UNEXPECTED_MM_EXTEND_ERR syscall.Errno = 558 + ERROR_BAD_FUNCTION_TABLE syscall.Errno = 559 + ERROR_NO_GUID_TRANSLATION syscall.Errno = 560 + ERROR_INVALID_LDT_SIZE syscall.Errno = 561 + ERROR_INVALID_LDT_OFFSET syscall.Errno = 563 + ERROR_INVALID_LDT_DESCRIPTOR syscall.Errno = 564 + ERROR_TOO_MANY_THREADS syscall.Errno = 565 + ERROR_THREAD_NOT_IN_PROCESS syscall.Errno = 566 + ERROR_PAGEFILE_QUOTA_EXCEEDED syscall.Errno = 567 + ERROR_LOGON_SERVER_CONFLICT syscall.Errno = 568 + ERROR_SYNCHRONIZATION_REQUIRED syscall.Errno = 569 + ERROR_NET_OPEN_FAILED syscall.Errno = 570 + ERROR_IO_PRIVILEGE_FAILED syscall.Errno = 571 + ERROR_CONTROL_C_EXIT syscall.Errno = 572 + ERROR_MISSING_SYSTEMFILE syscall.Errno = 573 + ERROR_UNHANDLED_EXCEPTION syscall.Errno = 574 + ERROR_APP_INIT_FAILURE syscall.Errno = 575 + ERROR_PAGEFILE_CREATE_FAILED syscall.Errno = 576 + ERROR_INVALID_IMAGE_HASH syscall.Errno = 577 + ERROR_NO_PAGEFILE syscall.Errno = 578 + ERROR_ILLEGAL_FLOAT_CONTEXT syscall.Errno = 579 + ERROR_NO_EVENT_PAIR syscall.Errno = 580 + ERROR_DOMAIN_CTRLR_CONFIG_ERROR syscall.Errno = 581 + ERROR_ILLEGAL_CHARACTER syscall.Errno = 582 + ERROR_UNDEFINED_CHARACTER syscall.Errno = 583 + ERROR_FLOPPY_VOLUME syscall.Errno = 584 + ERROR_BIOS_FAILED_TO_CONNECT_INTERRUPT syscall.Errno = 585 + ERROR_BACKUP_CONTROLLER syscall.Errno = 586 + ERROR_MUTANT_LIMIT_EXCEEDED syscall.Errno = 587 + ERROR_FS_DRIVER_REQUIRED syscall.Errno = 588 + ERROR_CANNOT_LOAD_REGISTRY_FILE syscall.Errno = 589 + ERROR_DEBUG_ATTACH_FAILED syscall.Errno = 590 + ERROR_SYSTEM_PROCESS_TERMINATED syscall.Errno = 591 + ERROR_DATA_NOT_ACCEPTED syscall.Errno = 592 + ERROR_VDM_HARD_ERROR syscall.Errno = 593 + ERROR_DRIVER_CANCEL_TIMEOUT syscall.Errno = 594 + ERROR_REPLY_MESSAGE_MISMATCH syscall.Errno = 595 + ERROR_LOST_WRITEBEHIND_DATA syscall.Errno = 596 + ERROR_CLIENT_SERVER_PARAMETERS_INVALID syscall.Errno = 597 + ERROR_NOT_TINY_STREAM syscall.Errno = 598 + ERROR_STACK_OVERFLOW_READ syscall.Errno = 599 + ERROR_CONVERT_TO_LARGE syscall.Errno = 600 + ERROR_FOUND_OUT_OF_SCOPE syscall.Errno = 601 + ERROR_ALLOCATE_BUCKET syscall.Errno = 602 + ERROR_MARSHALL_OVERFLOW syscall.Errno = 603 + ERROR_INVALID_VARIANT syscall.Errno = 604 + ERROR_BAD_COMPRESSION_BUFFER syscall.Errno = 605 + ERROR_AUDIT_FAILED syscall.Errno = 606 + ERROR_TIMER_RESOLUTION_NOT_SET syscall.Errno = 607 + ERROR_INSUFFICIENT_LOGON_INFO syscall.Errno = 608 + ERROR_BAD_DLL_ENTRYPOINT syscall.Errno = 609 + ERROR_BAD_SERVICE_ENTRYPOINT syscall.Errno = 610 + ERROR_IP_ADDRESS_CONFLICT1 syscall.Errno = 611 + ERROR_IP_ADDRESS_CONFLICT2 syscall.Errno = 612 + ERROR_REGISTRY_QUOTA_LIMIT syscall.Errno = 613 + ERROR_NO_CALLBACK_ACTIVE syscall.Errno = 614 + ERROR_PWD_TOO_SHORT syscall.Errno = 615 + ERROR_PWD_TOO_RECENT syscall.Errno = 616 + ERROR_PWD_HISTORY_CONFLICT syscall.Errno = 617 + ERROR_UNSUPPORTED_COMPRESSION syscall.Errno = 618 + ERROR_INVALID_HW_PROFILE syscall.Errno = 619 + ERROR_INVALID_PLUGPLAY_DEVICE_PATH syscall.Errno = 620 + ERROR_QUOTA_LIST_INCONSISTENT syscall.Errno = 621 + ERROR_EVALUATION_EXPIRATION syscall.Errno = 622 + ERROR_ILLEGAL_DLL_RELOCATION syscall.Errno = 623 + ERROR_DLL_INIT_FAILED_LOGOFF syscall.Errno = 624 + ERROR_VALIDATE_CONTINUE syscall.Errno = 625 + ERROR_NO_MORE_MATCHES syscall.Errno = 626 + ERROR_RANGE_LIST_CONFLICT syscall.Errno = 627 + ERROR_SERVER_SID_MISMATCH syscall.Errno = 628 + ERROR_CANT_ENABLE_DENY_ONLY syscall.Errno = 629 + ERROR_FLOAT_MULTIPLE_FAULTS syscall.Errno = 630 + ERROR_FLOAT_MULTIPLE_TRAPS syscall.Errno = 631 + ERROR_NOINTERFACE syscall.Errno = 632 + ERROR_DRIVER_FAILED_SLEEP syscall.Errno = 633 + ERROR_CORRUPT_SYSTEM_FILE syscall.Errno = 634 + ERROR_COMMITMENT_MINIMUM syscall.Errno = 635 + ERROR_PNP_RESTART_ENUMERATION syscall.Errno = 636 + ERROR_SYSTEM_IMAGE_BAD_SIGNATURE syscall.Errno = 637 + ERROR_PNP_REBOOT_REQUIRED syscall.Errno = 638 + ERROR_INSUFFICIENT_POWER syscall.Errno = 639 + ERROR_MULTIPLE_FAULT_VIOLATION syscall.Errno = 640 + ERROR_SYSTEM_SHUTDOWN syscall.Errno = 641 + ERROR_PORT_NOT_SET syscall.Errno = 642 + ERROR_DS_VERSION_CHECK_FAILURE syscall.Errno = 643 + ERROR_RANGE_NOT_FOUND syscall.Errno = 644 + ERROR_NOT_SAFE_MODE_DRIVER syscall.Errno = 646 + ERROR_FAILED_DRIVER_ENTRY syscall.Errno = 647 + ERROR_DEVICE_ENUMERATION_ERROR syscall.Errno = 648 + ERROR_MOUNT_POINT_NOT_RESOLVED syscall.Errno = 649 + ERROR_INVALID_DEVICE_OBJECT_PARAMETER syscall.Errno = 650 + ERROR_MCA_OCCURED syscall.Errno = 651 + ERROR_DRIVER_DATABASE_ERROR syscall.Errno = 652 + ERROR_SYSTEM_HIVE_TOO_LARGE syscall.Errno = 653 + ERROR_DRIVER_FAILED_PRIOR_UNLOAD syscall.Errno = 654 + ERROR_VOLSNAP_PREPARE_HIBERNATE syscall.Errno = 655 + ERROR_HIBERNATION_FAILURE syscall.Errno = 656 + ERROR_PWD_TOO_LONG syscall.Errno = 657 + ERROR_FILE_SYSTEM_LIMITATION syscall.Errno = 665 + ERROR_ASSERTION_FAILURE syscall.Errno = 668 + ERROR_ACPI_ERROR syscall.Errno = 669 + ERROR_WOW_ASSERTION syscall.Errno = 670 + ERROR_PNP_BAD_MPS_TABLE syscall.Errno = 671 + ERROR_PNP_TRANSLATION_FAILED syscall.Errno = 672 + ERROR_PNP_IRQ_TRANSLATION_FAILED syscall.Errno = 673 + ERROR_PNP_INVALID_ID syscall.Errno = 674 + ERROR_WAKE_SYSTEM_DEBUGGER syscall.Errno = 675 + ERROR_HANDLES_CLOSED syscall.Errno = 676 + ERROR_EXTRANEOUS_INFORMATION syscall.Errno = 677 + ERROR_RXACT_COMMIT_NECESSARY syscall.Errno = 678 + ERROR_MEDIA_CHECK syscall.Errno = 679 + ERROR_GUID_SUBSTITUTION_MADE syscall.Errno = 680 + ERROR_STOPPED_ON_SYMLINK syscall.Errno = 681 + ERROR_LONGJUMP syscall.Errno = 682 + ERROR_PLUGPLAY_QUERY_VETOED syscall.Errno = 683 + ERROR_UNWIND_CONSOLIDATE syscall.Errno = 684 + ERROR_REGISTRY_HIVE_RECOVERED syscall.Errno = 685 + ERROR_DLL_MIGHT_BE_INSECURE syscall.Errno = 686 + ERROR_DLL_MIGHT_BE_INCOMPATIBLE syscall.Errno = 687 + ERROR_DBG_EXCEPTION_NOT_HANDLED syscall.Errno = 688 + ERROR_DBG_REPLY_LATER syscall.Errno = 689 + ERROR_DBG_UNABLE_TO_PROVIDE_HANDLE syscall.Errno = 690 + ERROR_DBG_TERMINATE_THREAD syscall.Errno = 691 + ERROR_DBG_TERMINATE_PROCESS syscall.Errno = 692 + ERROR_DBG_CONTROL_C syscall.Errno = 693 + ERROR_DBG_PRINTEXCEPTION_C syscall.Errno = 694 + ERROR_DBG_RIPEXCEPTION syscall.Errno = 695 + ERROR_DBG_CONTROL_BREAK syscall.Errno = 696 + ERROR_DBG_COMMAND_EXCEPTION syscall.Errno = 697 + ERROR_OBJECT_NAME_EXISTS syscall.Errno = 698 + ERROR_THREAD_WAS_SUSPENDED syscall.Errno = 699 + ERROR_IMAGE_NOT_AT_BASE syscall.Errno = 700 + ERROR_RXACT_STATE_CREATED syscall.Errno = 701 + ERROR_SEGMENT_NOTIFICATION syscall.Errno = 702 + ERROR_BAD_CURRENT_DIRECTORY syscall.Errno = 703 + ERROR_FT_READ_RECOVERY_FROM_BACKUP syscall.Errno = 704 + ERROR_FT_WRITE_RECOVERY syscall.Errno = 705 + ERROR_IMAGE_MACHINE_TYPE_MISMATCH syscall.Errno = 706 + ERROR_RECEIVE_PARTIAL syscall.Errno = 707 + ERROR_RECEIVE_EXPEDITED syscall.Errno = 708 + ERROR_RECEIVE_PARTIAL_EXPEDITED syscall.Errno = 709 + ERROR_EVENT_DONE syscall.Errno = 710 + ERROR_EVENT_PENDING syscall.Errno = 711 + ERROR_CHECKING_FILE_SYSTEM syscall.Errno = 712 + ERROR_FATAL_APP_EXIT syscall.Errno = 713 + ERROR_PREDEFINED_HANDLE syscall.Errno = 714 + ERROR_WAS_UNLOCKED syscall.Errno = 715 + ERROR_SERVICE_NOTIFICATION syscall.Errno = 716 + ERROR_WAS_LOCKED syscall.Errno = 717 + ERROR_LOG_HARD_ERROR syscall.Errno = 718 + ERROR_ALREADY_WIN32 syscall.Errno = 719 + ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE syscall.Errno = 720 + ERROR_NO_YIELD_PERFORMED syscall.Errno = 721 + ERROR_TIMER_RESUME_IGNORED syscall.Errno = 722 + ERROR_ARBITRATION_UNHANDLED syscall.Errno = 723 + ERROR_CARDBUS_NOT_SUPPORTED syscall.Errno = 724 + ERROR_MP_PROCESSOR_MISMATCH syscall.Errno = 725 + ERROR_HIBERNATED syscall.Errno = 726 + ERROR_RESUME_HIBERNATION syscall.Errno = 727 + ERROR_FIRMWARE_UPDATED syscall.Errno = 728 + ERROR_DRIVERS_LEAKING_LOCKED_PAGES syscall.Errno = 729 + ERROR_WAKE_SYSTEM syscall.Errno = 730 + ERROR_WAIT_1 syscall.Errno = 731 + ERROR_WAIT_2 syscall.Errno = 732 + ERROR_WAIT_3 syscall.Errno = 733 + ERROR_WAIT_63 syscall.Errno = 734 + ERROR_ABANDONED_WAIT_0 syscall.Errno = 735 + ERROR_ABANDONED_WAIT_63 syscall.Errno = 736 + ERROR_USER_APC syscall.Errno = 737 + ERROR_KERNEL_APC syscall.Errno = 738 + ERROR_ALERTED syscall.Errno = 739 + ERROR_ELEVATION_REQUIRED syscall.Errno = 740 + ERROR_REPARSE syscall.Errno = 741 + ERROR_OPLOCK_BREAK_IN_PROGRESS syscall.Errno = 742 + ERROR_VOLUME_MOUNTED syscall.Errno = 743 + ERROR_RXACT_COMMITTED syscall.Errno = 744 + ERROR_NOTIFY_CLEANUP syscall.Errno = 745 + ERROR_PRIMARY_TRANSPORT_CONNECT_FAILED syscall.Errno = 746 + ERROR_PAGE_FAULT_TRANSITION syscall.Errno = 747 + ERROR_PAGE_FAULT_DEMAND_ZERO syscall.Errno = 748 + ERROR_PAGE_FAULT_COPY_ON_WRITE syscall.Errno = 749 + ERROR_PAGE_FAULT_GUARD_PAGE syscall.Errno = 750 + ERROR_PAGE_FAULT_PAGING_FILE syscall.Errno = 751 + ERROR_CACHE_PAGE_LOCKED syscall.Errno = 752 + ERROR_CRASH_DUMP syscall.Errno = 753 + ERROR_BUFFER_ALL_ZEROS syscall.Errno = 754 + ERROR_REPARSE_OBJECT syscall.Errno = 755 + ERROR_RESOURCE_REQUIREMENTS_CHANGED syscall.Errno = 756 + ERROR_TRANSLATION_COMPLETE syscall.Errno = 757 + ERROR_NOTHING_TO_TERMINATE syscall.Errno = 758 + ERROR_PROCESS_NOT_IN_JOB syscall.Errno = 759 + ERROR_PROCESS_IN_JOB syscall.Errno = 760 + ERROR_VOLSNAP_HIBERNATE_READY syscall.Errno = 761 + ERROR_FSFILTER_OP_COMPLETED_SUCCESSFULLY syscall.Errno = 762 + ERROR_INTERRUPT_VECTOR_ALREADY_CONNECTED syscall.Errno = 763 + ERROR_INTERRUPT_STILL_CONNECTED syscall.Errno = 764 + ERROR_WAIT_FOR_OPLOCK syscall.Errno = 765 + ERROR_DBG_EXCEPTION_HANDLED syscall.Errno = 766 + ERROR_DBG_CONTINUE syscall.Errno = 767 + ERROR_CALLBACK_POP_STACK syscall.Errno = 768 + ERROR_COMPRESSION_DISABLED syscall.Errno = 769 + ERROR_CANTFETCHBACKWARDS syscall.Errno = 770 + ERROR_CANTSCROLLBACKWARDS syscall.Errno = 771 + ERROR_ROWSNOTRELEASED syscall.Errno = 772 + ERROR_BAD_ACCESSOR_FLAGS syscall.Errno = 773 + ERROR_ERRORS_ENCOUNTERED syscall.Errno = 774 + ERROR_NOT_CAPABLE syscall.Errno = 775 + ERROR_REQUEST_OUT_OF_SEQUENCE syscall.Errno = 776 + ERROR_VERSION_PARSE_ERROR syscall.Errno = 777 + ERROR_BADSTARTPOSITION syscall.Errno = 778 + ERROR_MEMORY_HARDWARE syscall.Errno = 779 + ERROR_DISK_REPAIR_DISABLED syscall.Errno = 780 + ERROR_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE syscall.Errno = 781 + ERROR_SYSTEM_POWERSTATE_TRANSITION syscall.Errno = 782 + ERROR_SYSTEM_POWERSTATE_COMPLEX_TRANSITION syscall.Errno = 783 + ERROR_MCA_EXCEPTION syscall.Errno = 784 + ERROR_ACCESS_AUDIT_BY_POLICY syscall.Errno = 785 + ERROR_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY syscall.Errno = 786 + ERROR_ABANDON_HIBERFILE syscall.Errno = 787 + ERROR_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED syscall.Errno = 788 + ERROR_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR syscall.Errno = 789 + ERROR_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR syscall.Errno = 790 + ERROR_BAD_MCFG_TABLE syscall.Errno = 791 + ERROR_DISK_REPAIR_REDIRECTED syscall.Errno = 792 + ERROR_DISK_REPAIR_UNSUCCESSFUL syscall.Errno = 793 + ERROR_CORRUPT_LOG_OVERFULL syscall.Errno = 794 + ERROR_CORRUPT_LOG_CORRUPTED syscall.Errno = 795 + ERROR_CORRUPT_LOG_UNAVAILABLE syscall.Errno = 796 + ERROR_CORRUPT_LOG_DELETED_FULL syscall.Errno = 797 + ERROR_CORRUPT_LOG_CLEARED syscall.Errno = 798 + ERROR_ORPHAN_NAME_EXHAUSTED syscall.Errno = 799 + ERROR_OPLOCK_SWITCHED_TO_NEW_HANDLE syscall.Errno = 800 + ERROR_CANNOT_GRANT_REQUESTED_OPLOCK syscall.Errno = 801 + ERROR_CANNOT_BREAK_OPLOCK syscall.Errno = 802 + ERROR_OPLOCK_HANDLE_CLOSED syscall.Errno = 803 + ERROR_NO_ACE_CONDITION syscall.Errno = 804 + ERROR_INVALID_ACE_CONDITION syscall.Errno = 805 + ERROR_FILE_HANDLE_REVOKED syscall.Errno = 806 + ERROR_IMAGE_AT_DIFFERENT_BASE syscall.Errno = 807 + ERROR_ENCRYPTED_IO_NOT_POSSIBLE syscall.Errno = 808 + ERROR_FILE_METADATA_OPTIMIZATION_IN_PROGRESS syscall.Errno = 809 + ERROR_QUOTA_ACTIVITY syscall.Errno = 810 + ERROR_HANDLE_REVOKED syscall.Errno = 811 + ERROR_CALLBACK_INVOKE_INLINE syscall.Errno = 812 + ERROR_CPU_SET_INVALID syscall.Errno = 813 + ERROR_ENCLAVE_NOT_TERMINATED syscall.Errno = 814 + ERROR_ENCLAVE_VIOLATION syscall.Errno = 815 + ERROR_EA_ACCESS_DENIED syscall.Errno = 994 + ERROR_OPERATION_ABORTED syscall.Errno = 995 + ERROR_IO_INCOMPLETE syscall.Errno = 996 + ERROR_IO_PENDING syscall.Errno = 997 + ERROR_NOACCESS syscall.Errno = 998 + ERROR_SWAPERROR syscall.Errno = 999 + ERROR_STACK_OVERFLOW syscall.Errno = 1001 + ERROR_INVALID_MESSAGE syscall.Errno = 1002 + ERROR_CAN_NOT_COMPLETE syscall.Errno = 1003 + ERROR_INVALID_FLAGS syscall.Errno = 1004 + ERROR_UNRECOGNIZED_VOLUME syscall.Errno = 1005 + ERROR_FILE_INVALID syscall.Errno = 1006 + ERROR_FULLSCREEN_MODE syscall.Errno = 1007 + ERROR_NO_TOKEN syscall.Errno = 1008 + ERROR_BADDB syscall.Errno = 1009 + ERROR_BADKEY syscall.Errno = 1010 + ERROR_CANTOPEN syscall.Errno = 1011 + ERROR_CANTREAD syscall.Errno = 1012 + ERROR_CANTWRITE syscall.Errno = 1013 + ERROR_REGISTRY_RECOVERED syscall.Errno = 1014 + ERROR_REGISTRY_CORRUPT syscall.Errno = 1015 + ERROR_REGISTRY_IO_FAILED syscall.Errno = 1016 + ERROR_NOT_REGISTRY_FILE syscall.Errno = 1017 + ERROR_KEY_DELETED syscall.Errno = 1018 + ERROR_NO_LOG_SPACE syscall.Errno = 1019 + ERROR_KEY_HAS_CHILDREN syscall.Errno = 1020 + ERROR_CHILD_MUST_BE_VOLATILE syscall.Errno = 1021 + ERROR_NOTIFY_ENUM_DIR syscall.Errno = 1022 + ERROR_DEPENDENT_SERVICES_RUNNING syscall.Errno = 1051 + ERROR_INVALID_SERVICE_CONTROL syscall.Errno = 1052 + ERROR_SERVICE_REQUEST_TIMEOUT syscall.Errno = 1053 + ERROR_SERVICE_NO_THREAD syscall.Errno = 1054 + ERROR_SERVICE_DATABASE_LOCKED syscall.Errno = 1055 + ERROR_SERVICE_ALREADY_RUNNING syscall.Errno = 1056 + ERROR_INVALID_SERVICE_ACCOUNT syscall.Errno = 1057 + ERROR_SERVICE_DISABLED syscall.Errno = 1058 + ERROR_CIRCULAR_DEPENDENCY syscall.Errno = 1059 + ERROR_SERVICE_DOES_NOT_EXIST syscall.Errno = 1060 + ERROR_SERVICE_CANNOT_ACCEPT_CTRL syscall.Errno = 1061 + ERROR_SERVICE_NOT_ACTIVE syscall.Errno = 1062 + ERROR_FAILED_SERVICE_CONTROLLER_CONNECT syscall.Errno = 1063 + ERROR_EXCEPTION_IN_SERVICE syscall.Errno = 1064 + ERROR_DATABASE_DOES_NOT_EXIST syscall.Errno = 1065 + ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066 + ERROR_PROCESS_ABORTED syscall.Errno = 1067 + ERROR_SERVICE_DEPENDENCY_FAIL syscall.Errno = 1068 + ERROR_SERVICE_LOGON_FAILED syscall.Errno = 1069 + ERROR_SERVICE_START_HANG syscall.Errno = 1070 + ERROR_INVALID_SERVICE_LOCK syscall.Errno = 1071 + ERROR_SERVICE_MARKED_FOR_DELETE syscall.Errno = 1072 + ERROR_SERVICE_EXISTS syscall.Errno = 1073 + ERROR_ALREADY_RUNNING_LKG syscall.Errno = 1074 + ERROR_SERVICE_DEPENDENCY_DELETED syscall.Errno = 1075 + ERROR_BOOT_ALREADY_ACCEPTED syscall.Errno = 1076 + ERROR_SERVICE_NEVER_STARTED syscall.Errno = 1077 + ERROR_DUPLICATE_SERVICE_NAME syscall.Errno = 1078 + ERROR_DIFFERENT_SERVICE_ACCOUNT syscall.Errno = 1079 + ERROR_CANNOT_DETECT_DRIVER_FAILURE syscall.Errno = 1080 + ERROR_CANNOT_DETECT_PROCESS_ABORT syscall.Errno = 1081 + ERROR_NO_RECOVERY_PROGRAM syscall.Errno = 1082 + ERROR_SERVICE_NOT_IN_EXE syscall.Errno = 1083 + ERROR_NOT_SAFEBOOT_SERVICE syscall.Errno = 1084 + ERROR_END_OF_MEDIA syscall.Errno = 1100 + ERROR_FILEMARK_DETECTED syscall.Errno = 1101 + ERROR_BEGINNING_OF_MEDIA syscall.Errno = 1102 + ERROR_SETMARK_DETECTED syscall.Errno = 1103 + ERROR_NO_DATA_DETECTED syscall.Errno = 1104 + ERROR_PARTITION_FAILURE syscall.Errno = 1105 + ERROR_INVALID_BLOCK_LENGTH syscall.Errno = 1106 + ERROR_DEVICE_NOT_PARTITIONED syscall.Errno = 1107 + ERROR_UNABLE_TO_LOCK_MEDIA syscall.Errno = 1108 + ERROR_UNABLE_TO_UNLOAD_MEDIA syscall.Errno = 1109 + ERROR_MEDIA_CHANGED syscall.Errno = 1110 + ERROR_BUS_RESET syscall.Errno = 1111 + ERROR_NO_MEDIA_IN_DRIVE syscall.Errno = 1112 + ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113 + ERROR_DLL_INIT_FAILED syscall.Errno = 1114 + ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 1115 + ERROR_NO_SHUTDOWN_IN_PROGRESS syscall.Errno = 1116 + ERROR_IO_DEVICE syscall.Errno = 1117 + ERROR_SERIAL_NO_DEVICE syscall.Errno = 1118 + ERROR_IRQ_BUSY syscall.Errno = 1119 + ERROR_MORE_WRITES syscall.Errno = 1120 + ERROR_COUNTER_TIMEOUT syscall.Errno = 1121 + ERROR_FLOPPY_ID_MARK_NOT_FOUND syscall.Errno = 1122 + ERROR_FLOPPY_WRONG_CYLINDER syscall.Errno = 1123 + ERROR_FLOPPY_UNKNOWN_ERROR syscall.Errno = 1124 + ERROR_FLOPPY_BAD_REGISTERS syscall.Errno = 1125 + ERROR_DISK_RECALIBRATE_FAILED syscall.Errno = 1126 + ERROR_DISK_OPERATION_FAILED syscall.Errno = 1127 + ERROR_DISK_RESET_FAILED syscall.Errno = 1128 + ERROR_EOM_OVERFLOW syscall.Errno = 1129 + ERROR_NOT_ENOUGH_SERVER_MEMORY syscall.Errno = 1130 + ERROR_POSSIBLE_DEADLOCK syscall.Errno = 1131 + ERROR_MAPPED_ALIGNMENT syscall.Errno = 1132 + ERROR_SET_POWER_STATE_VETOED syscall.Errno = 1140 + ERROR_SET_POWER_STATE_FAILED syscall.Errno = 1141 + ERROR_TOO_MANY_LINKS syscall.Errno = 1142 + ERROR_OLD_WIN_VERSION syscall.Errno = 1150 + ERROR_APP_WRONG_OS syscall.Errno = 1151 + ERROR_SINGLE_INSTANCE_APP syscall.Errno = 1152 + ERROR_RMODE_APP syscall.Errno = 1153 + ERROR_INVALID_DLL syscall.Errno = 1154 + ERROR_NO_ASSOCIATION syscall.Errno = 1155 + ERROR_DDE_FAIL syscall.Errno = 1156 + ERROR_DLL_NOT_FOUND syscall.Errno = 1157 + ERROR_NO_MORE_USER_HANDLES syscall.Errno = 1158 + ERROR_MESSAGE_SYNC_ONLY syscall.Errno = 1159 + ERROR_SOURCE_ELEMENT_EMPTY syscall.Errno = 1160 + ERROR_DESTINATION_ELEMENT_FULL syscall.Errno = 1161 + ERROR_ILLEGAL_ELEMENT_ADDRESS syscall.Errno = 1162 + ERROR_MAGAZINE_NOT_PRESENT syscall.Errno = 1163 + ERROR_DEVICE_REINITIALIZATION_NEEDED syscall.Errno = 1164 + ERROR_DEVICE_REQUIRES_CLEANING syscall.Errno = 1165 + ERROR_DEVICE_DOOR_OPEN syscall.Errno = 1166 + ERROR_DEVICE_NOT_CONNECTED syscall.Errno = 1167 + ERROR_NOT_FOUND syscall.Errno = 1168 + ERROR_NO_MATCH syscall.Errno = 1169 + ERROR_SET_NOT_FOUND syscall.Errno = 1170 + ERROR_POINT_NOT_FOUND syscall.Errno = 1171 + ERROR_NO_TRACKING_SERVICE syscall.Errno = 1172 + ERROR_NO_VOLUME_ID syscall.Errno = 1173 + ERROR_UNABLE_TO_REMOVE_REPLACED syscall.Errno = 1175 + ERROR_UNABLE_TO_MOVE_REPLACEMENT syscall.Errno = 1176 + ERROR_UNABLE_TO_MOVE_REPLACEMENT_2 syscall.Errno = 1177 + ERROR_JOURNAL_DELETE_IN_PROGRESS syscall.Errno = 1178 + ERROR_JOURNAL_NOT_ACTIVE syscall.Errno = 1179 + ERROR_POTENTIAL_FILE_FOUND syscall.Errno = 1180 + ERROR_JOURNAL_ENTRY_DELETED syscall.Errno = 1181 + ERROR_SHUTDOWN_IS_SCHEDULED syscall.Errno = 1190 + ERROR_SHUTDOWN_USERS_LOGGED_ON syscall.Errno = 1191 + ERROR_BAD_DEVICE syscall.Errno = 1200 + ERROR_CONNECTION_UNAVAIL syscall.Errno = 1201 + ERROR_DEVICE_ALREADY_REMEMBERED syscall.Errno = 1202 + ERROR_NO_NET_OR_BAD_PATH syscall.Errno = 1203 + ERROR_BAD_PROVIDER syscall.Errno = 1204 + ERROR_CANNOT_OPEN_PROFILE syscall.Errno = 1205 + ERROR_BAD_PROFILE syscall.Errno = 1206 + ERROR_NOT_CONTAINER syscall.Errno = 1207 + ERROR_EXTENDED_ERROR syscall.Errno = 1208 + ERROR_INVALID_GROUPNAME syscall.Errno = 1209 + ERROR_INVALID_COMPUTERNAME syscall.Errno = 1210 + ERROR_INVALID_EVENTNAME syscall.Errno = 1211 + ERROR_INVALID_DOMAINNAME syscall.Errno = 1212 + ERROR_INVALID_SERVICENAME syscall.Errno = 1213 + ERROR_INVALID_NETNAME syscall.Errno = 1214 + ERROR_INVALID_SHARENAME syscall.Errno = 1215 + ERROR_INVALID_PASSWORDNAME syscall.Errno = 1216 + ERROR_INVALID_MESSAGENAME syscall.Errno = 1217 + ERROR_INVALID_MESSAGEDEST syscall.Errno = 1218 + ERROR_SESSION_CREDENTIAL_CONFLICT syscall.Errno = 1219 + ERROR_REMOTE_SESSION_LIMIT_EXCEEDED syscall.Errno = 1220 + ERROR_DUP_DOMAINNAME syscall.Errno = 1221 + ERROR_NO_NETWORK syscall.Errno = 1222 + ERROR_CANCELLED syscall.Errno = 1223 + ERROR_USER_MAPPED_FILE syscall.Errno = 1224 + ERROR_CONNECTION_REFUSED syscall.Errno = 1225 + ERROR_GRACEFUL_DISCONNECT syscall.Errno = 1226 + ERROR_ADDRESS_ALREADY_ASSOCIATED syscall.Errno = 1227 + ERROR_ADDRESS_NOT_ASSOCIATED syscall.Errno = 1228 + ERROR_CONNECTION_INVALID syscall.Errno = 1229 + ERROR_CONNECTION_ACTIVE syscall.Errno = 1230 + ERROR_NETWORK_UNREACHABLE syscall.Errno = 1231 + ERROR_HOST_UNREACHABLE syscall.Errno = 1232 + ERROR_PROTOCOL_UNREACHABLE syscall.Errno = 1233 + ERROR_PORT_UNREACHABLE syscall.Errno = 1234 + ERROR_REQUEST_ABORTED syscall.Errno = 1235 + ERROR_CONNECTION_ABORTED syscall.Errno = 1236 + ERROR_RETRY syscall.Errno = 1237 + ERROR_CONNECTION_COUNT_LIMIT syscall.Errno = 1238 + ERROR_LOGIN_TIME_RESTRICTION syscall.Errno = 1239 + ERROR_LOGIN_WKSTA_RESTRICTION syscall.Errno = 1240 + ERROR_INCORRECT_ADDRESS syscall.Errno = 1241 + ERROR_ALREADY_REGISTERED syscall.Errno = 1242 + ERROR_SERVICE_NOT_FOUND syscall.Errno = 1243 + ERROR_NOT_AUTHENTICATED syscall.Errno = 1244 + ERROR_NOT_LOGGED_ON syscall.Errno = 1245 + ERROR_CONTINUE syscall.Errno = 1246 + ERROR_ALREADY_INITIALIZED syscall.Errno = 1247 + ERROR_NO_MORE_DEVICES syscall.Errno = 1248 + ERROR_NO_SUCH_SITE syscall.Errno = 1249 + ERROR_DOMAIN_CONTROLLER_EXISTS syscall.Errno = 1250 + ERROR_ONLY_IF_CONNECTED syscall.Errno = 1251 + ERROR_OVERRIDE_NOCHANGES syscall.Errno = 1252 + ERROR_BAD_USER_PROFILE syscall.Errno = 1253 + ERROR_NOT_SUPPORTED_ON_SBS syscall.Errno = 1254 + ERROR_SERVER_SHUTDOWN_IN_PROGRESS syscall.Errno = 1255 + ERROR_HOST_DOWN syscall.Errno = 1256 + ERROR_NON_ACCOUNT_SID syscall.Errno = 1257 + ERROR_NON_DOMAIN_SID syscall.Errno = 1258 + ERROR_APPHELP_BLOCK syscall.Errno = 1259 + ERROR_ACCESS_DISABLED_BY_POLICY syscall.Errno = 1260 + ERROR_REG_NAT_CONSUMPTION syscall.Errno = 1261 + ERROR_CSCSHARE_OFFLINE syscall.Errno = 1262 + ERROR_PKINIT_FAILURE syscall.Errno = 1263 + ERROR_SMARTCARD_SUBSYSTEM_FAILURE syscall.Errno = 1264 + ERROR_DOWNGRADE_DETECTED syscall.Errno = 1265 + ERROR_MACHINE_LOCKED syscall.Errno = 1271 + ERROR_SMB_GUEST_LOGON_BLOCKED syscall.Errno = 1272 + ERROR_CALLBACK_SUPPLIED_INVALID_DATA syscall.Errno = 1273 + ERROR_SYNC_FOREGROUND_REFRESH_REQUIRED syscall.Errno = 1274 + ERROR_DRIVER_BLOCKED syscall.Errno = 1275 + ERROR_INVALID_IMPORT_OF_NON_DLL syscall.Errno = 1276 + ERROR_ACCESS_DISABLED_WEBBLADE syscall.Errno = 1277 + ERROR_ACCESS_DISABLED_WEBBLADE_TAMPER syscall.Errno = 1278 + ERROR_RECOVERY_FAILURE syscall.Errno = 1279 + ERROR_ALREADY_FIBER syscall.Errno = 1280 + ERROR_ALREADY_THREAD syscall.Errno = 1281 + ERROR_STACK_BUFFER_OVERRUN syscall.Errno = 1282 + ERROR_PARAMETER_QUOTA_EXCEEDED syscall.Errno = 1283 + ERROR_DEBUGGER_INACTIVE syscall.Errno = 1284 + ERROR_DELAY_LOAD_FAILED syscall.Errno = 1285 + ERROR_VDM_DISALLOWED syscall.Errno = 1286 + ERROR_UNIDENTIFIED_ERROR syscall.Errno = 1287 + ERROR_INVALID_CRUNTIME_PARAMETER syscall.Errno = 1288 + ERROR_BEYOND_VDL syscall.Errno = 1289 + ERROR_INCOMPATIBLE_SERVICE_SID_TYPE syscall.Errno = 1290 + ERROR_DRIVER_PROCESS_TERMINATED syscall.Errno = 1291 + ERROR_IMPLEMENTATION_LIMIT syscall.Errno = 1292 + ERROR_PROCESS_IS_PROTECTED syscall.Errno = 1293 + ERROR_SERVICE_NOTIFY_CLIENT_LAGGING syscall.Errno = 1294 + ERROR_DISK_QUOTA_EXCEEDED syscall.Errno = 1295 + ERROR_CONTENT_BLOCKED syscall.Errno = 1296 + ERROR_INCOMPATIBLE_SERVICE_PRIVILEGE syscall.Errno = 1297 + ERROR_APP_HANG syscall.Errno = 1298 + ERROR_INVALID_LABEL syscall.Errno = 1299 + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + ERROR_SOME_NOT_MAPPED syscall.Errno = 1301 + ERROR_NO_QUOTAS_FOR_ACCOUNT syscall.Errno = 1302 + ERROR_LOCAL_USER_SESSION_KEY syscall.Errno = 1303 + ERROR_NULL_LM_PASSWORD syscall.Errno = 1304 + ERROR_UNKNOWN_REVISION syscall.Errno = 1305 + ERROR_REVISION_MISMATCH syscall.Errno = 1306 + ERROR_INVALID_OWNER syscall.Errno = 1307 + ERROR_INVALID_PRIMARY_GROUP syscall.Errno = 1308 + ERROR_NO_IMPERSONATION_TOKEN syscall.Errno = 1309 + ERROR_CANT_DISABLE_MANDATORY syscall.Errno = 1310 + ERROR_NO_LOGON_SERVERS syscall.Errno = 1311 + ERROR_NO_SUCH_LOGON_SESSION syscall.Errno = 1312 + ERROR_NO_SUCH_PRIVILEGE syscall.Errno = 1313 + ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 + ERROR_INVALID_ACCOUNT_NAME syscall.Errno = 1315 + ERROR_USER_EXISTS syscall.Errno = 1316 + ERROR_NO_SUCH_USER syscall.Errno = 1317 + ERROR_GROUP_EXISTS syscall.Errno = 1318 + ERROR_NO_SUCH_GROUP syscall.Errno = 1319 + ERROR_MEMBER_IN_GROUP syscall.Errno = 1320 + ERROR_MEMBER_NOT_IN_GROUP syscall.Errno = 1321 + ERROR_LAST_ADMIN syscall.Errno = 1322 + ERROR_WRONG_PASSWORD syscall.Errno = 1323 + ERROR_ILL_FORMED_PASSWORD syscall.Errno = 1324 + ERROR_PASSWORD_RESTRICTION syscall.Errno = 1325 + ERROR_LOGON_FAILURE syscall.Errno = 1326 + ERROR_ACCOUNT_RESTRICTION syscall.Errno = 1327 + ERROR_INVALID_LOGON_HOURS syscall.Errno = 1328 + ERROR_INVALID_WORKSTATION syscall.Errno = 1329 + ERROR_PASSWORD_EXPIRED syscall.Errno = 1330 + ERROR_ACCOUNT_DISABLED syscall.Errno = 1331 + ERROR_NONE_MAPPED syscall.Errno = 1332 + ERROR_TOO_MANY_LUIDS_REQUESTED syscall.Errno = 1333 + ERROR_LUIDS_EXHAUSTED syscall.Errno = 1334 + ERROR_INVALID_SUB_AUTHORITY syscall.Errno = 1335 + ERROR_INVALID_ACL syscall.Errno = 1336 + ERROR_INVALID_SID syscall.Errno = 1337 + ERROR_INVALID_SECURITY_DESCR syscall.Errno = 1338 + ERROR_BAD_INHERITANCE_ACL syscall.Errno = 1340 + ERROR_SERVER_DISABLED syscall.Errno = 1341 + ERROR_SERVER_NOT_DISABLED syscall.Errno = 1342 + ERROR_INVALID_ID_AUTHORITY syscall.Errno = 1343 + ERROR_ALLOTTED_SPACE_EXCEEDED syscall.Errno = 1344 + ERROR_INVALID_GROUP_ATTRIBUTES syscall.Errno = 1345 + ERROR_BAD_IMPERSONATION_LEVEL syscall.Errno = 1346 + ERROR_CANT_OPEN_ANONYMOUS syscall.Errno = 1347 + ERROR_BAD_VALIDATION_CLASS syscall.Errno = 1348 + ERROR_BAD_TOKEN_TYPE syscall.Errno = 1349 + ERROR_NO_SECURITY_ON_OBJECT syscall.Errno = 1350 + ERROR_CANT_ACCESS_DOMAIN_INFO syscall.Errno = 1351 + ERROR_INVALID_SERVER_STATE syscall.Errno = 1352 + ERROR_INVALID_DOMAIN_STATE syscall.Errno = 1353 + ERROR_INVALID_DOMAIN_ROLE syscall.Errno = 1354 + ERROR_NO_SUCH_DOMAIN syscall.Errno = 1355 + ERROR_DOMAIN_EXISTS syscall.Errno = 1356 + ERROR_DOMAIN_LIMIT_EXCEEDED syscall.Errno = 1357 + ERROR_INTERNAL_DB_CORRUPTION syscall.Errno = 1358 + ERROR_INTERNAL_ERROR syscall.Errno = 1359 + ERROR_GENERIC_NOT_MAPPED syscall.Errno = 1360 + ERROR_BAD_DESCRIPTOR_FORMAT syscall.Errno = 1361 + ERROR_NOT_LOGON_PROCESS syscall.Errno = 1362 + ERROR_LOGON_SESSION_EXISTS syscall.Errno = 1363 + ERROR_NO_SUCH_PACKAGE syscall.Errno = 1364 + ERROR_BAD_LOGON_SESSION_STATE syscall.Errno = 1365 + ERROR_LOGON_SESSION_COLLISION syscall.Errno = 1366 + ERROR_INVALID_LOGON_TYPE syscall.Errno = 1367 + ERROR_CANNOT_IMPERSONATE syscall.Errno = 1368 + ERROR_RXACT_INVALID_STATE syscall.Errno = 1369 + ERROR_RXACT_COMMIT_FAILURE syscall.Errno = 1370 + ERROR_SPECIAL_ACCOUNT syscall.Errno = 1371 + ERROR_SPECIAL_GROUP syscall.Errno = 1372 + ERROR_SPECIAL_USER syscall.Errno = 1373 + ERROR_MEMBERS_PRIMARY_GROUP syscall.Errno = 1374 + ERROR_TOKEN_ALREADY_IN_USE syscall.Errno = 1375 + ERROR_NO_SUCH_ALIAS syscall.Errno = 1376 + ERROR_MEMBER_NOT_IN_ALIAS syscall.Errno = 1377 + ERROR_MEMBER_IN_ALIAS syscall.Errno = 1378 + ERROR_ALIAS_EXISTS syscall.Errno = 1379 + ERROR_LOGON_NOT_GRANTED syscall.Errno = 1380 + ERROR_TOO_MANY_SECRETS syscall.Errno = 1381 + ERROR_SECRET_TOO_LONG syscall.Errno = 1382 + ERROR_INTERNAL_DB_ERROR syscall.Errno = 1383 + ERROR_TOO_MANY_CONTEXT_IDS syscall.Errno = 1384 + ERROR_LOGON_TYPE_NOT_GRANTED syscall.Errno = 1385 + ERROR_NT_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1386 + ERROR_NO_SUCH_MEMBER syscall.Errno = 1387 + ERROR_INVALID_MEMBER syscall.Errno = 1388 + ERROR_TOO_MANY_SIDS syscall.Errno = 1389 + ERROR_LM_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1390 + ERROR_NO_INHERITANCE syscall.Errno = 1391 + ERROR_FILE_CORRUPT syscall.Errno = 1392 + ERROR_DISK_CORRUPT syscall.Errno = 1393 + ERROR_NO_USER_SESSION_KEY syscall.Errno = 1394 + ERROR_LICENSE_QUOTA_EXCEEDED syscall.Errno = 1395 + ERROR_WRONG_TARGET_NAME syscall.Errno = 1396 + ERROR_MUTUAL_AUTH_FAILED syscall.Errno = 1397 + ERROR_TIME_SKEW syscall.Errno = 1398 + ERROR_CURRENT_DOMAIN_NOT_ALLOWED syscall.Errno = 1399 + ERROR_INVALID_WINDOW_HANDLE syscall.Errno = 1400 + ERROR_INVALID_MENU_HANDLE syscall.Errno = 1401 + ERROR_INVALID_CURSOR_HANDLE syscall.Errno = 1402 + ERROR_INVALID_ACCEL_HANDLE syscall.Errno = 1403 + ERROR_INVALID_HOOK_HANDLE syscall.Errno = 1404 + ERROR_INVALID_DWP_HANDLE syscall.Errno = 1405 + ERROR_TLW_WITH_WSCHILD syscall.Errno = 1406 + ERROR_CANNOT_FIND_WND_CLASS syscall.Errno = 1407 + ERROR_WINDOW_OF_OTHER_THREAD syscall.Errno = 1408 + ERROR_HOTKEY_ALREADY_REGISTERED syscall.Errno = 1409 + ERROR_CLASS_ALREADY_EXISTS syscall.Errno = 1410 + ERROR_CLASS_DOES_NOT_EXIST syscall.Errno = 1411 + ERROR_CLASS_HAS_WINDOWS syscall.Errno = 1412 + ERROR_INVALID_INDEX syscall.Errno = 1413 + ERROR_INVALID_ICON_HANDLE syscall.Errno = 1414 + ERROR_PRIVATE_DIALOG_INDEX syscall.Errno = 1415 + ERROR_LISTBOX_ID_NOT_FOUND syscall.Errno = 1416 + ERROR_NO_WILDCARD_CHARACTERS syscall.Errno = 1417 + ERROR_CLIPBOARD_NOT_OPEN syscall.Errno = 1418 + ERROR_HOTKEY_NOT_REGISTERED syscall.Errno = 1419 + ERROR_WINDOW_NOT_DIALOG syscall.Errno = 1420 + ERROR_CONTROL_ID_NOT_FOUND syscall.Errno = 1421 + ERROR_INVALID_COMBOBOX_MESSAGE syscall.Errno = 1422 + ERROR_WINDOW_NOT_COMBOBOX syscall.Errno = 1423 + ERROR_INVALID_EDIT_HEIGHT syscall.Errno = 1424 + ERROR_DC_NOT_FOUND syscall.Errno = 1425 + ERROR_INVALID_HOOK_FILTER syscall.Errno = 1426 + ERROR_INVALID_FILTER_PROC syscall.Errno = 1427 + ERROR_HOOK_NEEDS_HMOD syscall.Errno = 1428 + ERROR_GLOBAL_ONLY_HOOK syscall.Errno = 1429 + ERROR_JOURNAL_HOOK_SET syscall.Errno = 1430 + ERROR_HOOK_NOT_INSTALLED syscall.Errno = 1431 + ERROR_INVALID_LB_MESSAGE syscall.Errno = 1432 + ERROR_SETCOUNT_ON_BAD_LB syscall.Errno = 1433 + ERROR_LB_WITHOUT_TABSTOPS syscall.Errno = 1434 + ERROR_DESTROY_OBJECT_OF_OTHER_THREAD syscall.Errno = 1435 + ERROR_CHILD_WINDOW_MENU syscall.Errno = 1436 + ERROR_NO_SYSTEM_MENU syscall.Errno = 1437 + ERROR_INVALID_MSGBOX_STYLE syscall.Errno = 1438 + ERROR_INVALID_SPI_VALUE syscall.Errno = 1439 + ERROR_SCREEN_ALREADY_LOCKED syscall.Errno = 1440 + ERROR_HWNDS_HAVE_DIFF_PARENT syscall.Errno = 1441 + ERROR_NOT_CHILD_WINDOW syscall.Errno = 1442 + ERROR_INVALID_GW_COMMAND syscall.Errno = 1443 + ERROR_INVALID_THREAD_ID syscall.Errno = 1444 + ERROR_NON_MDICHILD_WINDOW syscall.Errno = 1445 + ERROR_POPUP_ALREADY_ACTIVE syscall.Errno = 1446 + ERROR_NO_SCROLLBARS syscall.Errno = 1447 + ERROR_INVALID_SCROLLBAR_RANGE syscall.Errno = 1448 + ERROR_INVALID_SHOWWIN_COMMAND syscall.Errno = 1449 + ERROR_NO_SYSTEM_RESOURCES syscall.Errno = 1450 + ERROR_NONPAGED_SYSTEM_RESOURCES syscall.Errno = 1451 + ERROR_PAGED_SYSTEM_RESOURCES syscall.Errno = 1452 + ERROR_WORKING_SET_QUOTA syscall.Errno = 1453 + ERROR_PAGEFILE_QUOTA syscall.Errno = 1454 + ERROR_COMMITMENT_LIMIT syscall.Errno = 1455 + ERROR_MENU_ITEM_NOT_FOUND syscall.Errno = 1456 + ERROR_INVALID_KEYBOARD_HANDLE syscall.Errno = 1457 + ERROR_HOOK_TYPE_NOT_ALLOWED syscall.Errno = 1458 + ERROR_REQUIRES_INTERACTIVE_WINDOWSTATION syscall.Errno = 1459 + ERROR_TIMEOUT syscall.Errno = 1460 + ERROR_INVALID_MONITOR_HANDLE syscall.Errno = 1461 + ERROR_INCORRECT_SIZE syscall.Errno = 1462 + ERROR_SYMLINK_CLASS_DISABLED syscall.Errno = 1463 + ERROR_SYMLINK_NOT_SUPPORTED syscall.Errno = 1464 + ERROR_XML_PARSE_ERROR syscall.Errno = 1465 + ERROR_XMLDSIG_ERROR syscall.Errno = 1466 + ERROR_RESTART_APPLICATION syscall.Errno = 1467 + ERROR_WRONG_COMPARTMENT syscall.Errno = 1468 + ERROR_AUTHIP_FAILURE syscall.Errno = 1469 + ERROR_NO_NVRAM_RESOURCES syscall.Errno = 1470 + ERROR_NOT_GUI_PROCESS syscall.Errno = 1471 + ERROR_EVENTLOG_FILE_CORRUPT syscall.Errno = 1500 + ERROR_EVENTLOG_CANT_START syscall.Errno = 1501 + ERROR_LOG_FILE_FULL syscall.Errno = 1502 + ERROR_EVENTLOG_FILE_CHANGED syscall.Errno = 1503 + ERROR_CONTAINER_ASSIGNED syscall.Errno = 1504 + ERROR_JOB_NO_CONTAINER syscall.Errno = 1505 + ERROR_INVALID_TASK_NAME syscall.Errno = 1550 + ERROR_INVALID_TASK_INDEX syscall.Errno = 1551 + ERROR_THREAD_ALREADY_IN_TASK syscall.Errno = 1552 + ERROR_INSTALL_SERVICE_FAILURE syscall.Errno = 1601 + ERROR_INSTALL_USEREXIT syscall.Errno = 1602 + ERROR_INSTALL_FAILURE syscall.Errno = 1603 + ERROR_INSTALL_SUSPEND syscall.Errno = 1604 + ERROR_UNKNOWN_PRODUCT syscall.Errno = 1605 + ERROR_UNKNOWN_FEATURE syscall.Errno = 1606 + ERROR_UNKNOWN_COMPONENT syscall.Errno = 1607 + ERROR_UNKNOWN_PROPERTY syscall.Errno = 1608 + ERROR_INVALID_HANDLE_STATE syscall.Errno = 1609 + ERROR_BAD_CONFIGURATION syscall.Errno = 1610 + ERROR_INDEX_ABSENT syscall.Errno = 1611 + ERROR_INSTALL_SOURCE_ABSENT syscall.Errno = 1612 + ERROR_INSTALL_PACKAGE_VERSION syscall.Errno = 1613 + ERROR_PRODUCT_UNINSTALLED syscall.Errno = 1614 + ERROR_BAD_QUERY_SYNTAX syscall.Errno = 1615 + ERROR_INVALID_FIELD syscall.Errno = 1616 + ERROR_DEVICE_REMOVED syscall.Errno = 1617 + ERROR_INSTALL_ALREADY_RUNNING syscall.Errno = 1618 + ERROR_INSTALL_PACKAGE_OPEN_FAILED syscall.Errno = 1619 + ERROR_INSTALL_PACKAGE_INVALID syscall.Errno = 1620 + ERROR_INSTALL_UI_FAILURE syscall.Errno = 1621 + ERROR_INSTALL_LOG_FAILURE syscall.Errno = 1622 + ERROR_INSTALL_LANGUAGE_UNSUPPORTED syscall.Errno = 1623 + ERROR_INSTALL_TRANSFORM_FAILURE syscall.Errno = 1624 + ERROR_INSTALL_PACKAGE_REJECTED syscall.Errno = 1625 + ERROR_FUNCTION_NOT_CALLED syscall.Errno = 1626 + ERROR_FUNCTION_FAILED syscall.Errno = 1627 + ERROR_INVALID_TABLE syscall.Errno = 1628 + ERROR_DATATYPE_MISMATCH syscall.Errno = 1629 + ERROR_UNSUPPORTED_TYPE syscall.Errno = 1630 + ERROR_CREATE_FAILED syscall.Errno = 1631 + ERROR_INSTALL_TEMP_UNWRITABLE syscall.Errno = 1632 + ERROR_INSTALL_PLATFORM_UNSUPPORTED syscall.Errno = 1633 + ERROR_INSTALL_NOTUSED syscall.Errno = 1634 + ERROR_PATCH_PACKAGE_OPEN_FAILED syscall.Errno = 1635 + ERROR_PATCH_PACKAGE_INVALID syscall.Errno = 1636 + ERROR_PATCH_PACKAGE_UNSUPPORTED syscall.Errno = 1637 + ERROR_PRODUCT_VERSION syscall.Errno = 1638 + ERROR_INVALID_COMMAND_LINE syscall.Errno = 1639 + ERROR_INSTALL_REMOTE_DISALLOWED syscall.Errno = 1640 + ERROR_SUCCESS_REBOOT_INITIATED syscall.Errno = 1641 + ERROR_PATCH_TARGET_NOT_FOUND syscall.Errno = 1642 + ERROR_PATCH_PACKAGE_REJECTED syscall.Errno = 1643 + ERROR_INSTALL_TRANSFORM_REJECTED syscall.Errno = 1644 + ERROR_INSTALL_REMOTE_PROHIBITED syscall.Errno = 1645 + ERROR_PATCH_REMOVAL_UNSUPPORTED syscall.Errno = 1646 + ERROR_UNKNOWN_PATCH syscall.Errno = 1647 + ERROR_PATCH_NO_SEQUENCE syscall.Errno = 1648 + ERROR_PATCH_REMOVAL_DISALLOWED syscall.Errno = 1649 + ERROR_INVALID_PATCH_XML syscall.Errno = 1650 + ERROR_PATCH_MANAGED_ADVERTISED_PRODUCT syscall.Errno = 1651 + ERROR_INSTALL_SERVICE_SAFEBOOT syscall.Errno = 1652 + ERROR_FAIL_FAST_EXCEPTION syscall.Errno = 1653 + ERROR_INSTALL_REJECTED syscall.Errno = 1654 + ERROR_DYNAMIC_CODE_BLOCKED syscall.Errno = 1655 + ERROR_NOT_SAME_OBJECT syscall.Errno = 1656 + ERROR_STRICT_CFG_VIOLATION syscall.Errno = 1657 + ERROR_SET_CONTEXT_DENIED syscall.Errno = 1660 + ERROR_CROSS_PARTITION_VIOLATION syscall.Errno = 1661 + RPC_S_INVALID_STRING_BINDING syscall.Errno = 1700 + RPC_S_WRONG_KIND_OF_BINDING syscall.Errno = 1701 + RPC_S_INVALID_BINDING syscall.Errno = 1702 + RPC_S_PROTSEQ_NOT_SUPPORTED syscall.Errno = 1703 + RPC_S_INVALID_RPC_PROTSEQ syscall.Errno = 1704 + RPC_S_INVALID_STRING_UUID syscall.Errno = 1705 + RPC_S_INVALID_ENDPOINT_FORMAT syscall.Errno = 1706 + RPC_S_INVALID_NET_ADDR syscall.Errno = 1707 + RPC_S_NO_ENDPOINT_FOUND syscall.Errno = 1708 + RPC_S_INVALID_TIMEOUT syscall.Errno = 1709 + RPC_S_OBJECT_NOT_FOUND syscall.Errno = 1710 + RPC_S_ALREADY_REGISTERED syscall.Errno = 1711 + RPC_S_TYPE_ALREADY_REGISTERED syscall.Errno = 1712 + RPC_S_ALREADY_LISTENING syscall.Errno = 1713 + RPC_S_NO_PROTSEQS_REGISTERED syscall.Errno = 1714 + RPC_S_NOT_LISTENING syscall.Errno = 1715 + RPC_S_UNKNOWN_MGR_TYPE syscall.Errno = 1716 + RPC_S_UNKNOWN_IF syscall.Errno = 1717 + RPC_S_NO_BINDINGS syscall.Errno = 1718 + RPC_S_NO_PROTSEQS syscall.Errno = 1719 + RPC_S_CANT_CREATE_ENDPOINT syscall.Errno = 1720 + RPC_S_OUT_OF_RESOURCES syscall.Errno = 1721 + RPC_S_SERVER_UNAVAILABLE syscall.Errno = 1722 + RPC_S_SERVER_TOO_BUSY syscall.Errno = 1723 + RPC_S_INVALID_NETWORK_OPTIONS syscall.Errno = 1724 + RPC_S_NO_CALL_ACTIVE syscall.Errno = 1725 + RPC_S_CALL_FAILED syscall.Errno = 1726 + RPC_S_CALL_FAILED_DNE syscall.Errno = 1727 + RPC_S_PROTOCOL_ERROR syscall.Errno = 1728 + RPC_S_PROXY_ACCESS_DENIED syscall.Errno = 1729 + RPC_S_UNSUPPORTED_TRANS_SYN syscall.Errno = 1730 + RPC_S_UNSUPPORTED_TYPE syscall.Errno = 1732 + RPC_S_INVALID_TAG syscall.Errno = 1733 + RPC_S_INVALID_BOUND syscall.Errno = 1734 + RPC_S_NO_ENTRY_NAME syscall.Errno = 1735 + RPC_S_INVALID_NAME_SYNTAX syscall.Errno = 1736 + RPC_S_UNSUPPORTED_NAME_SYNTAX syscall.Errno = 1737 + RPC_S_UUID_NO_ADDRESS syscall.Errno = 1739 + RPC_S_DUPLICATE_ENDPOINT syscall.Errno = 1740 + RPC_S_UNKNOWN_AUTHN_TYPE syscall.Errno = 1741 + RPC_S_MAX_CALLS_TOO_SMALL syscall.Errno = 1742 + RPC_S_STRING_TOO_LONG syscall.Errno = 1743 + RPC_S_PROTSEQ_NOT_FOUND syscall.Errno = 1744 + RPC_S_PROCNUM_OUT_OF_RANGE syscall.Errno = 1745 + RPC_S_BINDING_HAS_NO_AUTH syscall.Errno = 1746 + RPC_S_UNKNOWN_AUTHN_SERVICE syscall.Errno = 1747 + RPC_S_UNKNOWN_AUTHN_LEVEL syscall.Errno = 1748 + RPC_S_INVALID_AUTH_IDENTITY syscall.Errno = 1749 + RPC_S_UNKNOWN_AUTHZ_SERVICE syscall.Errno = 1750 + EPT_S_INVALID_ENTRY syscall.Errno = 1751 + EPT_S_CANT_PERFORM_OP syscall.Errno = 1752 + EPT_S_NOT_REGISTERED syscall.Errno = 1753 + RPC_S_NOTHING_TO_EXPORT syscall.Errno = 1754 + RPC_S_INCOMPLETE_NAME syscall.Errno = 1755 + RPC_S_INVALID_VERS_OPTION syscall.Errno = 1756 + RPC_S_NO_MORE_MEMBERS syscall.Errno = 1757 + RPC_S_NOT_ALL_OBJS_UNEXPORTED syscall.Errno = 1758 + RPC_S_INTERFACE_NOT_FOUND syscall.Errno = 1759 + RPC_S_ENTRY_ALREADY_EXISTS syscall.Errno = 1760 + RPC_S_ENTRY_NOT_FOUND syscall.Errno = 1761 + RPC_S_NAME_SERVICE_UNAVAILABLE syscall.Errno = 1762 + RPC_S_INVALID_NAF_ID syscall.Errno = 1763 + RPC_S_CANNOT_SUPPORT syscall.Errno = 1764 + RPC_S_NO_CONTEXT_AVAILABLE syscall.Errno = 1765 + RPC_S_INTERNAL_ERROR syscall.Errno = 1766 + RPC_S_ZERO_DIVIDE syscall.Errno = 1767 + RPC_S_ADDRESS_ERROR syscall.Errno = 1768 + RPC_S_FP_DIV_ZERO syscall.Errno = 1769 + RPC_S_FP_UNDERFLOW syscall.Errno = 1770 + RPC_S_FP_OVERFLOW syscall.Errno = 1771 + RPC_X_NO_MORE_ENTRIES syscall.Errno = 1772 + RPC_X_SS_CHAR_TRANS_OPEN_FAIL syscall.Errno = 1773 + RPC_X_SS_CHAR_TRANS_SHORT_FILE syscall.Errno = 1774 + RPC_X_SS_IN_NULL_CONTEXT syscall.Errno = 1775 + RPC_X_SS_CONTEXT_DAMAGED syscall.Errno = 1777 + RPC_X_SS_HANDLES_MISMATCH syscall.Errno = 1778 + RPC_X_SS_CANNOT_GET_CALL_HANDLE syscall.Errno = 1779 + RPC_X_NULL_REF_POINTER syscall.Errno = 1780 + RPC_X_ENUM_VALUE_OUT_OF_RANGE syscall.Errno = 1781 + RPC_X_BYTE_COUNT_TOO_SMALL syscall.Errno = 1782 + RPC_X_BAD_STUB_DATA syscall.Errno = 1783 + ERROR_INVALID_USER_BUFFER syscall.Errno = 1784 + ERROR_UNRECOGNIZED_MEDIA syscall.Errno = 1785 + ERROR_NO_TRUST_LSA_SECRET syscall.Errno = 1786 + ERROR_NO_TRUST_SAM_ACCOUNT syscall.Errno = 1787 + ERROR_TRUSTED_DOMAIN_FAILURE syscall.Errno = 1788 + ERROR_TRUSTED_RELATIONSHIP_FAILURE syscall.Errno = 1789 + ERROR_TRUST_FAILURE syscall.Errno = 1790 + RPC_S_CALL_IN_PROGRESS syscall.Errno = 1791 + ERROR_NETLOGON_NOT_STARTED syscall.Errno = 1792 + ERROR_ACCOUNT_EXPIRED syscall.Errno = 1793 + ERROR_REDIRECTOR_HAS_OPEN_HANDLES syscall.Errno = 1794 + ERROR_PRINTER_DRIVER_ALREADY_INSTALLED syscall.Errno = 1795 + ERROR_UNKNOWN_PORT syscall.Errno = 1796 + ERROR_UNKNOWN_PRINTER_DRIVER syscall.Errno = 1797 + ERROR_UNKNOWN_PRINTPROCESSOR syscall.Errno = 1798 + ERROR_INVALID_SEPARATOR_FILE syscall.Errno = 1799 + ERROR_INVALID_PRIORITY syscall.Errno = 1800 + ERROR_INVALID_PRINTER_NAME syscall.Errno = 1801 + ERROR_PRINTER_ALREADY_EXISTS syscall.Errno = 1802 + ERROR_INVALID_PRINTER_COMMAND syscall.Errno = 1803 + ERROR_INVALID_DATATYPE syscall.Errno = 1804 + ERROR_INVALID_ENVIRONMENT syscall.Errno = 1805 + RPC_S_NO_MORE_BINDINGS syscall.Errno = 1806 + ERROR_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT syscall.Errno = 1807 + ERROR_NOLOGON_WORKSTATION_TRUST_ACCOUNT syscall.Errno = 1808 + ERROR_NOLOGON_SERVER_TRUST_ACCOUNT syscall.Errno = 1809 + ERROR_DOMAIN_TRUST_INCONSISTENT syscall.Errno = 1810 + ERROR_SERVER_HAS_OPEN_HANDLES syscall.Errno = 1811 + ERROR_RESOURCE_DATA_NOT_FOUND syscall.Errno = 1812 + ERROR_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 1813 + ERROR_RESOURCE_NAME_NOT_FOUND syscall.Errno = 1814 + ERROR_RESOURCE_LANG_NOT_FOUND syscall.Errno = 1815 + ERROR_NOT_ENOUGH_QUOTA syscall.Errno = 1816 + RPC_S_NO_INTERFACES syscall.Errno = 1817 + RPC_S_CALL_CANCELLED syscall.Errno = 1818 + RPC_S_BINDING_INCOMPLETE syscall.Errno = 1819 + RPC_S_COMM_FAILURE syscall.Errno = 1820 + RPC_S_UNSUPPORTED_AUTHN_LEVEL syscall.Errno = 1821 + RPC_S_NO_PRINC_NAME syscall.Errno = 1822 + RPC_S_NOT_RPC_ERROR syscall.Errno = 1823 + RPC_S_UUID_LOCAL_ONLY syscall.Errno = 1824 + RPC_S_SEC_PKG_ERROR syscall.Errno = 1825 + RPC_S_NOT_CANCELLED syscall.Errno = 1826 + RPC_X_INVALID_ES_ACTION syscall.Errno = 1827 + RPC_X_WRONG_ES_VERSION syscall.Errno = 1828 + RPC_X_WRONG_STUB_VERSION syscall.Errno = 1829 + RPC_X_INVALID_PIPE_OBJECT syscall.Errno = 1830 + RPC_X_WRONG_PIPE_ORDER syscall.Errno = 1831 + RPC_X_WRONG_PIPE_VERSION syscall.Errno = 1832 + RPC_S_COOKIE_AUTH_FAILED syscall.Errno = 1833 + RPC_S_DO_NOT_DISTURB syscall.Errno = 1834 + RPC_S_SYSTEM_HANDLE_COUNT_EXCEEDED syscall.Errno = 1835 + RPC_S_SYSTEM_HANDLE_TYPE_MISMATCH syscall.Errno = 1836 + RPC_S_GROUP_MEMBER_NOT_FOUND syscall.Errno = 1898 + EPT_S_CANT_CREATE syscall.Errno = 1899 + RPC_S_INVALID_OBJECT syscall.Errno = 1900 + ERROR_INVALID_TIME syscall.Errno = 1901 + ERROR_INVALID_FORM_NAME syscall.Errno = 1902 + ERROR_INVALID_FORM_SIZE syscall.Errno = 1903 + ERROR_ALREADY_WAITING syscall.Errno = 1904 + ERROR_PRINTER_DELETED syscall.Errno = 1905 + ERROR_INVALID_PRINTER_STATE syscall.Errno = 1906 + ERROR_PASSWORD_MUST_CHANGE syscall.Errno = 1907 + ERROR_DOMAIN_CONTROLLER_NOT_FOUND syscall.Errno = 1908 + ERROR_ACCOUNT_LOCKED_OUT syscall.Errno = 1909 + OR_INVALID_OXID syscall.Errno = 1910 + OR_INVALID_OID syscall.Errno = 1911 + OR_INVALID_SET syscall.Errno = 1912 + RPC_S_SEND_INCOMPLETE syscall.Errno = 1913 + RPC_S_INVALID_ASYNC_HANDLE syscall.Errno = 1914 + RPC_S_INVALID_ASYNC_CALL syscall.Errno = 1915 + RPC_X_PIPE_CLOSED syscall.Errno = 1916 + RPC_X_PIPE_DISCIPLINE_ERROR syscall.Errno = 1917 + RPC_X_PIPE_EMPTY syscall.Errno = 1918 + ERROR_NO_SITENAME syscall.Errno = 1919 + ERROR_CANT_ACCESS_FILE syscall.Errno = 1920 + ERROR_CANT_RESOLVE_FILENAME syscall.Errno = 1921 + RPC_S_ENTRY_TYPE_MISMATCH syscall.Errno = 1922 + RPC_S_NOT_ALL_OBJS_EXPORTED syscall.Errno = 1923 + RPC_S_INTERFACE_NOT_EXPORTED syscall.Errno = 1924 + RPC_S_PROFILE_NOT_ADDED syscall.Errno = 1925 + RPC_S_PRF_ELT_NOT_ADDED syscall.Errno = 1926 + RPC_S_PRF_ELT_NOT_REMOVED syscall.Errno = 1927 + RPC_S_GRP_ELT_NOT_ADDED syscall.Errno = 1928 + RPC_S_GRP_ELT_NOT_REMOVED syscall.Errno = 1929 + ERROR_KM_DRIVER_BLOCKED syscall.Errno = 1930 + ERROR_CONTEXT_EXPIRED syscall.Errno = 1931 + ERROR_PER_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1932 + ERROR_ALL_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1933 + ERROR_USER_DELETE_TRUST_QUOTA_EXCEEDED syscall.Errno = 1934 + ERROR_AUTHENTICATION_FIREWALL_FAILED syscall.Errno = 1935 + ERROR_REMOTE_PRINT_CONNECTIONS_BLOCKED syscall.Errno = 1936 + ERROR_NTLM_BLOCKED syscall.Errno = 1937 + ERROR_PASSWORD_CHANGE_REQUIRED syscall.Errno = 1938 + ERROR_LOST_MODE_LOGON_RESTRICTION syscall.Errno = 1939 + ERROR_INVALID_PIXEL_FORMAT syscall.Errno = 2000 + ERROR_BAD_DRIVER syscall.Errno = 2001 + ERROR_INVALID_WINDOW_STYLE syscall.Errno = 2002 + ERROR_METAFILE_NOT_SUPPORTED syscall.Errno = 2003 + ERROR_TRANSFORM_NOT_SUPPORTED syscall.Errno = 2004 + ERROR_CLIPPING_NOT_SUPPORTED syscall.Errno = 2005 + ERROR_INVALID_CMM syscall.Errno = 2010 + ERROR_INVALID_PROFILE syscall.Errno = 2011 + ERROR_TAG_NOT_FOUND syscall.Errno = 2012 + ERROR_TAG_NOT_PRESENT syscall.Errno = 2013 + ERROR_DUPLICATE_TAG syscall.Errno = 2014 + ERROR_PROFILE_NOT_ASSOCIATED_WITH_DEVICE syscall.Errno = 2015 + ERROR_PROFILE_NOT_FOUND syscall.Errno = 2016 + ERROR_INVALID_COLORSPACE syscall.Errno = 2017 + ERROR_ICM_NOT_ENABLED syscall.Errno = 2018 + ERROR_DELETING_ICM_XFORM syscall.Errno = 2019 + ERROR_INVALID_TRANSFORM syscall.Errno = 2020 + ERROR_COLORSPACE_MISMATCH syscall.Errno = 2021 + ERROR_INVALID_COLORINDEX syscall.Errno = 2022 + ERROR_PROFILE_DOES_NOT_MATCH_DEVICE syscall.Errno = 2023 + ERROR_CONNECTED_OTHER_PASSWORD syscall.Errno = 2108 + ERROR_CONNECTED_OTHER_PASSWORD_DEFAULT syscall.Errno = 2109 + ERROR_BAD_USERNAME syscall.Errno = 2202 + ERROR_NOT_CONNECTED syscall.Errno = 2250 + ERROR_OPEN_FILES syscall.Errno = 2401 + ERROR_ACTIVE_CONNECTIONS syscall.Errno = 2402 + ERROR_DEVICE_IN_USE syscall.Errno = 2404 + ERROR_UNKNOWN_PRINT_MONITOR syscall.Errno = 3000 + ERROR_PRINTER_DRIVER_IN_USE syscall.Errno = 3001 + ERROR_SPOOL_FILE_NOT_FOUND syscall.Errno = 3002 + ERROR_SPL_NO_STARTDOC syscall.Errno = 3003 + ERROR_SPL_NO_ADDJOB syscall.Errno = 3004 + ERROR_PRINT_PROCESSOR_ALREADY_INSTALLED syscall.Errno = 3005 + ERROR_PRINT_MONITOR_ALREADY_INSTALLED syscall.Errno = 3006 + ERROR_INVALID_PRINT_MONITOR syscall.Errno = 3007 + ERROR_PRINT_MONITOR_IN_USE syscall.Errno = 3008 + ERROR_PRINTER_HAS_JOBS_QUEUED syscall.Errno = 3009 + ERROR_SUCCESS_REBOOT_REQUIRED syscall.Errno = 3010 + ERROR_SUCCESS_RESTART_REQUIRED syscall.Errno = 3011 + ERROR_PRINTER_NOT_FOUND syscall.Errno = 3012 + ERROR_PRINTER_DRIVER_WARNED syscall.Errno = 3013 + ERROR_PRINTER_DRIVER_BLOCKED syscall.Errno = 3014 + ERROR_PRINTER_DRIVER_PACKAGE_IN_USE syscall.Errno = 3015 + ERROR_CORE_DRIVER_PACKAGE_NOT_FOUND syscall.Errno = 3016 + ERROR_FAIL_REBOOT_REQUIRED syscall.Errno = 3017 + ERROR_FAIL_REBOOT_INITIATED syscall.Errno = 3018 + ERROR_PRINTER_DRIVER_DOWNLOAD_NEEDED syscall.Errno = 3019 + ERROR_PRINT_JOB_RESTART_REQUIRED syscall.Errno = 3020 + ERROR_INVALID_PRINTER_DRIVER_MANIFEST syscall.Errno = 3021 + ERROR_PRINTER_NOT_SHAREABLE syscall.Errno = 3022 + ERROR_REQUEST_PAUSED syscall.Errno = 3050 + ERROR_APPEXEC_CONDITION_NOT_SATISFIED syscall.Errno = 3060 + ERROR_APPEXEC_HANDLE_INVALIDATED syscall.Errno = 3061 + ERROR_APPEXEC_INVALID_HOST_GENERATION syscall.Errno = 3062 + ERROR_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION syscall.Errno = 3063 + ERROR_APPEXEC_INVALID_HOST_STATE syscall.Errno = 3064 + ERROR_APPEXEC_NO_DONOR syscall.Errno = 3065 + ERROR_APPEXEC_HOST_ID_MISMATCH syscall.Errno = 3066 + ERROR_APPEXEC_UNKNOWN_USER syscall.Errno = 3067 + ERROR_IO_REISSUE_AS_CACHED syscall.Errno = 3950 + ERROR_WINS_INTERNAL syscall.Errno = 4000 + ERROR_CAN_NOT_DEL_LOCAL_WINS syscall.Errno = 4001 + ERROR_STATIC_INIT syscall.Errno = 4002 + ERROR_INC_BACKUP syscall.Errno = 4003 + ERROR_FULL_BACKUP syscall.Errno = 4004 + ERROR_REC_NON_EXISTENT syscall.Errno = 4005 + ERROR_RPL_NOT_ALLOWED syscall.Errno = 4006 + PEERDIST_ERROR_CONTENTINFO_VERSION_UNSUPPORTED syscall.Errno = 4050 + PEERDIST_ERROR_CANNOT_PARSE_CONTENTINFO syscall.Errno = 4051 + PEERDIST_ERROR_MISSING_DATA syscall.Errno = 4052 + PEERDIST_ERROR_NO_MORE syscall.Errno = 4053 + PEERDIST_ERROR_NOT_INITIALIZED syscall.Errno = 4054 + PEERDIST_ERROR_ALREADY_INITIALIZED syscall.Errno = 4055 + PEERDIST_ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 4056 + PEERDIST_ERROR_INVALIDATED syscall.Errno = 4057 + PEERDIST_ERROR_ALREADY_EXISTS syscall.Errno = 4058 + PEERDIST_ERROR_OPERATION_NOTFOUND syscall.Errno = 4059 + PEERDIST_ERROR_ALREADY_COMPLETED syscall.Errno = 4060 + PEERDIST_ERROR_OUT_OF_BOUNDS syscall.Errno = 4061 + PEERDIST_ERROR_VERSION_UNSUPPORTED syscall.Errno = 4062 + PEERDIST_ERROR_INVALID_CONFIGURATION syscall.Errno = 4063 + PEERDIST_ERROR_NOT_LICENSED syscall.Errno = 4064 + PEERDIST_ERROR_SERVICE_UNAVAILABLE syscall.Errno = 4065 + PEERDIST_ERROR_TRUST_FAILURE syscall.Errno = 4066 + ERROR_DHCP_ADDRESS_CONFLICT syscall.Errno = 4100 + ERROR_WMI_GUID_NOT_FOUND syscall.Errno = 4200 + ERROR_WMI_INSTANCE_NOT_FOUND syscall.Errno = 4201 + ERROR_WMI_ITEMID_NOT_FOUND syscall.Errno = 4202 + ERROR_WMI_TRY_AGAIN syscall.Errno = 4203 + ERROR_WMI_DP_NOT_FOUND syscall.Errno = 4204 + ERROR_WMI_UNRESOLVED_INSTANCE_REF syscall.Errno = 4205 + ERROR_WMI_ALREADY_ENABLED syscall.Errno = 4206 + ERROR_WMI_GUID_DISCONNECTED syscall.Errno = 4207 + ERROR_WMI_SERVER_UNAVAILABLE syscall.Errno = 4208 + ERROR_WMI_DP_FAILED syscall.Errno = 4209 + ERROR_WMI_INVALID_MOF syscall.Errno = 4210 + ERROR_WMI_INVALID_REGINFO syscall.Errno = 4211 + ERROR_WMI_ALREADY_DISABLED syscall.Errno = 4212 + ERROR_WMI_READ_ONLY syscall.Errno = 4213 + ERROR_WMI_SET_FAILURE syscall.Errno = 4214 + ERROR_NOT_APPCONTAINER syscall.Errno = 4250 + ERROR_APPCONTAINER_REQUIRED syscall.Errno = 4251 + ERROR_NOT_SUPPORTED_IN_APPCONTAINER syscall.Errno = 4252 + ERROR_INVALID_PACKAGE_SID_LENGTH syscall.Errno = 4253 + ERROR_INVALID_MEDIA syscall.Errno = 4300 + ERROR_INVALID_LIBRARY syscall.Errno = 4301 + ERROR_INVALID_MEDIA_POOL syscall.Errno = 4302 + ERROR_DRIVE_MEDIA_MISMATCH syscall.Errno = 4303 + ERROR_MEDIA_OFFLINE syscall.Errno = 4304 + ERROR_LIBRARY_OFFLINE syscall.Errno = 4305 + ERROR_EMPTY syscall.Errno = 4306 + ERROR_NOT_EMPTY syscall.Errno = 4307 + ERROR_MEDIA_UNAVAILABLE syscall.Errno = 4308 + ERROR_RESOURCE_DISABLED syscall.Errno = 4309 + ERROR_INVALID_CLEANER syscall.Errno = 4310 + ERROR_UNABLE_TO_CLEAN syscall.Errno = 4311 + ERROR_OBJECT_NOT_FOUND syscall.Errno = 4312 + ERROR_DATABASE_FAILURE syscall.Errno = 4313 + ERROR_DATABASE_FULL syscall.Errno = 4314 + ERROR_MEDIA_INCOMPATIBLE syscall.Errno = 4315 + ERROR_RESOURCE_NOT_PRESENT syscall.Errno = 4316 + ERROR_INVALID_OPERATION syscall.Errno = 4317 + ERROR_MEDIA_NOT_AVAILABLE syscall.Errno = 4318 + ERROR_DEVICE_NOT_AVAILABLE syscall.Errno = 4319 + ERROR_REQUEST_REFUSED syscall.Errno = 4320 + ERROR_INVALID_DRIVE_OBJECT syscall.Errno = 4321 + ERROR_LIBRARY_FULL syscall.Errno = 4322 + ERROR_MEDIUM_NOT_ACCESSIBLE syscall.Errno = 4323 + ERROR_UNABLE_TO_LOAD_MEDIUM syscall.Errno = 4324 + ERROR_UNABLE_TO_INVENTORY_DRIVE syscall.Errno = 4325 + ERROR_UNABLE_TO_INVENTORY_SLOT syscall.Errno = 4326 + ERROR_UNABLE_TO_INVENTORY_TRANSPORT syscall.Errno = 4327 + ERROR_TRANSPORT_FULL syscall.Errno = 4328 + ERROR_CONTROLLING_IEPORT syscall.Errno = 4329 + ERROR_UNABLE_TO_EJECT_MOUNTED_MEDIA syscall.Errno = 4330 + ERROR_CLEANER_SLOT_SET syscall.Errno = 4331 + ERROR_CLEANER_SLOT_NOT_SET syscall.Errno = 4332 + ERROR_CLEANER_CARTRIDGE_SPENT syscall.Errno = 4333 + ERROR_UNEXPECTED_OMID syscall.Errno = 4334 + ERROR_CANT_DELETE_LAST_ITEM syscall.Errno = 4335 + ERROR_MESSAGE_EXCEEDS_MAX_SIZE syscall.Errno = 4336 + ERROR_VOLUME_CONTAINS_SYS_FILES syscall.Errno = 4337 + ERROR_INDIGENOUS_TYPE syscall.Errno = 4338 + ERROR_NO_SUPPORTING_DRIVES syscall.Errno = 4339 + ERROR_CLEANER_CARTRIDGE_INSTALLED syscall.Errno = 4340 + ERROR_IEPORT_FULL syscall.Errno = 4341 + ERROR_FILE_OFFLINE syscall.Errno = 4350 + ERROR_REMOTE_STORAGE_NOT_ACTIVE syscall.Errno = 4351 + ERROR_REMOTE_STORAGE_MEDIA_ERROR syscall.Errno = 4352 + ERROR_NOT_A_REPARSE_POINT syscall.Errno = 4390 + ERROR_REPARSE_ATTRIBUTE_CONFLICT syscall.Errno = 4391 + ERROR_INVALID_REPARSE_DATA syscall.Errno = 4392 + ERROR_REPARSE_TAG_INVALID syscall.Errno = 4393 + ERROR_REPARSE_TAG_MISMATCH syscall.Errno = 4394 + ERROR_REPARSE_POINT_ENCOUNTERED syscall.Errno = 4395 + ERROR_APP_DATA_NOT_FOUND syscall.Errno = 4400 + ERROR_APP_DATA_EXPIRED syscall.Errno = 4401 + ERROR_APP_DATA_CORRUPT syscall.Errno = 4402 + ERROR_APP_DATA_LIMIT_EXCEEDED syscall.Errno = 4403 + ERROR_APP_DATA_REBOOT_REQUIRED syscall.Errno = 4404 + ERROR_SECUREBOOT_ROLLBACK_DETECTED syscall.Errno = 4420 + ERROR_SECUREBOOT_POLICY_VIOLATION syscall.Errno = 4421 + ERROR_SECUREBOOT_INVALID_POLICY syscall.Errno = 4422 + ERROR_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND syscall.Errno = 4423 + ERROR_SECUREBOOT_POLICY_NOT_SIGNED syscall.Errno = 4424 + ERROR_SECUREBOOT_NOT_ENABLED syscall.Errno = 4425 + ERROR_SECUREBOOT_FILE_REPLACED syscall.Errno = 4426 + ERROR_SECUREBOOT_POLICY_NOT_AUTHORIZED syscall.Errno = 4427 + ERROR_SECUREBOOT_POLICY_UNKNOWN syscall.Errno = 4428 + ERROR_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION syscall.Errno = 4429 + ERROR_SECUREBOOT_PLATFORM_ID_MISMATCH syscall.Errno = 4430 + ERROR_SECUREBOOT_POLICY_ROLLBACK_DETECTED syscall.Errno = 4431 + ERROR_SECUREBOOT_POLICY_UPGRADE_MISMATCH syscall.Errno = 4432 + ERROR_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING syscall.Errno = 4433 + ERROR_SECUREBOOT_NOT_BASE_POLICY syscall.Errno = 4434 + ERROR_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY syscall.Errno = 4435 + ERROR_OFFLOAD_READ_FLT_NOT_SUPPORTED syscall.Errno = 4440 + ERROR_OFFLOAD_WRITE_FLT_NOT_SUPPORTED syscall.Errno = 4441 + ERROR_OFFLOAD_READ_FILE_NOT_SUPPORTED syscall.Errno = 4442 + ERROR_OFFLOAD_WRITE_FILE_NOT_SUPPORTED syscall.Errno = 4443 + ERROR_ALREADY_HAS_STREAM_ID syscall.Errno = 4444 + ERROR_SMR_GARBAGE_COLLECTION_REQUIRED syscall.Errno = 4445 + ERROR_WOF_WIM_HEADER_CORRUPT syscall.Errno = 4446 + ERROR_WOF_WIM_RESOURCE_TABLE_CORRUPT syscall.Errno = 4447 + ERROR_WOF_FILE_RESOURCE_TABLE_CORRUPT syscall.Errno = 4448 + ERROR_VOLUME_NOT_SIS_ENABLED syscall.Errno = 4500 + ERROR_SYSTEM_INTEGRITY_ROLLBACK_DETECTED syscall.Errno = 4550 + ERROR_SYSTEM_INTEGRITY_POLICY_VIOLATION syscall.Errno = 4551 + ERROR_SYSTEM_INTEGRITY_INVALID_POLICY syscall.Errno = 4552 + ERROR_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED syscall.Errno = 4553 + ERROR_SYSTEM_INTEGRITY_TOO_MANY_POLICIES syscall.Errno = 4554 + ERROR_SYSTEM_INTEGRITY_SUPPLEMENTAL_POLICY_NOT_AUTHORIZED syscall.Errno = 4555 + ERROR_VSM_NOT_INITIALIZED syscall.Errno = 4560 + ERROR_VSM_DMA_PROTECTION_NOT_IN_USE syscall.Errno = 4561 + ERROR_PLATFORM_MANIFEST_NOT_AUTHORIZED syscall.Errno = 4570 + ERROR_PLATFORM_MANIFEST_INVALID syscall.Errno = 4571 + ERROR_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED syscall.Errno = 4572 + ERROR_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED syscall.Errno = 4573 + ERROR_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND syscall.Errno = 4574 + ERROR_PLATFORM_MANIFEST_NOT_ACTIVE syscall.Errno = 4575 + ERROR_PLATFORM_MANIFEST_NOT_SIGNED syscall.Errno = 4576 + ERROR_DEPENDENT_RESOURCE_EXISTS syscall.Errno = 5001 + ERROR_DEPENDENCY_NOT_FOUND syscall.Errno = 5002 + ERROR_DEPENDENCY_ALREADY_EXISTS syscall.Errno = 5003 + ERROR_RESOURCE_NOT_ONLINE syscall.Errno = 5004 + ERROR_HOST_NODE_NOT_AVAILABLE syscall.Errno = 5005 + ERROR_RESOURCE_NOT_AVAILABLE syscall.Errno = 5006 + ERROR_RESOURCE_NOT_FOUND syscall.Errno = 5007 + ERROR_SHUTDOWN_CLUSTER syscall.Errno = 5008 + ERROR_CANT_EVICT_ACTIVE_NODE syscall.Errno = 5009 + ERROR_OBJECT_ALREADY_EXISTS syscall.Errno = 5010 + ERROR_OBJECT_IN_LIST syscall.Errno = 5011 + ERROR_GROUP_NOT_AVAILABLE syscall.Errno = 5012 + ERROR_GROUP_NOT_FOUND syscall.Errno = 5013 + ERROR_GROUP_NOT_ONLINE syscall.Errno = 5014 + ERROR_HOST_NODE_NOT_RESOURCE_OWNER syscall.Errno = 5015 + ERROR_HOST_NODE_NOT_GROUP_OWNER syscall.Errno = 5016 + ERROR_RESMON_CREATE_FAILED syscall.Errno = 5017 + ERROR_RESMON_ONLINE_FAILED syscall.Errno = 5018 + ERROR_RESOURCE_ONLINE syscall.Errno = 5019 + ERROR_QUORUM_RESOURCE syscall.Errno = 5020 + ERROR_NOT_QUORUM_CAPABLE syscall.Errno = 5021 + ERROR_CLUSTER_SHUTTING_DOWN syscall.Errno = 5022 + ERROR_INVALID_STATE syscall.Errno = 5023 + ERROR_RESOURCE_PROPERTIES_STORED syscall.Errno = 5024 + ERROR_NOT_QUORUM_CLASS syscall.Errno = 5025 + ERROR_CORE_RESOURCE syscall.Errno = 5026 + ERROR_QUORUM_RESOURCE_ONLINE_FAILED syscall.Errno = 5027 + ERROR_QUORUMLOG_OPEN_FAILED syscall.Errno = 5028 + ERROR_CLUSTERLOG_CORRUPT syscall.Errno = 5029 + ERROR_CLUSTERLOG_RECORD_EXCEEDS_MAXSIZE syscall.Errno = 5030 + ERROR_CLUSTERLOG_EXCEEDS_MAXSIZE syscall.Errno = 5031 + ERROR_CLUSTERLOG_CHKPOINT_NOT_FOUND syscall.Errno = 5032 + ERROR_CLUSTERLOG_NOT_ENOUGH_SPACE syscall.Errno = 5033 + ERROR_QUORUM_OWNER_ALIVE syscall.Errno = 5034 + ERROR_NETWORK_NOT_AVAILABLE syscall.Errno = 5035 + ERROR_NODE_NOT_AVAILABLE syscall.Errno = 5036 + ERROR_ALL_NODES_NOT_AVAILABLE syscall.Errno = 5037 + ERROR_RESOURCE_FAILED syscall.Errno = 5038 + ERROR_CLUSTER_INVALID_NODE syscall.Errno = 5039 + ERROR_CLUSTER_NODE_EXISTS syscall.Errno = 5040 + ERROR_CLUSTER_JOIN_IN_PROGRESS syscall.Errno = 5041 + ERROR_CLUSTER_NODE_NOT_FOUND syscall.Errno = 5042 + ERROR_CLUSTER_LOCAL_NODE_NOT_FOUND syscall.Errno = 5043 + ERROR_CLUSTER_NETWORK_EXISTS syscall.Errno = 5044 + ERROR_CLUSTER_NETWORK_NOT_FOUND syscall.Errno = 5045 + ERROR_CLUSTER_NETINTERFACE_EXISTS syscall.Errno = 5046 + ERROR_CLUSTER_NETINTERFACE_NOT_FOUND syscall.Errno = 5047 + ERROR_CLUSTER_INVALID_REQUEST syscall.Errno = 5048 + ERROR_CLUSTER_INVALID_NETWORK_PROVIDER syscall.Errno = 5049 + ERROR_CLUSTER_NODE_DOWN syscall.Errno = 5050 + ERROR_CLUSTER_NODE_UNREACHABLE syscall.Errno = 5051 + ERROR_CLUSTER_NODE_NOT_MEMBER syscall.Errno = 5052 + ERROR_CLUSTER_JOIN_NOT_IN_PROGRESS syscall.Errno = 5053 + ERROR_CLUSTER_INVALID_NETWORK syscall.Errno = 5054 + ERROR_CLUSTER_NODE_UP syscall.Errno = 5056 + ERROR_CLUSTER_IPADDR_IN_USE syscall.Errno = 5057 + ERROR_CLUSTER_NODE_NOT_PAUSED syscall.Errno = 5058 + ERROR_CLUSTER_NO_SECURITY_CONTEXT syscall.Errno = 5059 + ERROR_CLUSTER_NETWORK_NOT_INTERNAL syscall.Errno = 5060 + ERROR_CLUSTER_NODE_ALREADY_UP syscall.Errno = 5061 + ERROR_CLUSTER_NODE_ALREADY_DOWN syscall.Errno = 5062 + ERROR_CLUSTER_NETWORK_ALREADY_ONLINE syscall.Errno = 5063 + ERROR_CLUSTER_NETWORK_ALREADY_OFFLINE syscall.Errno = 5064 + ERROR_CLUSTER_NODE_ALREADY_MEMBER syscall.Errno = 5065 + ERROR_CLUSTER_LAST_INTERNAL_NETWORK syscall.Errno = 5066 + ERROR_CLUSTER_NETWORK_HAS_DEPENDENTS syscall.Errno = 5067 + ERROR_INVALID_OPERATION_ON_QUORUM syscall.Errno = 5068 + ERROR_DEPENDENCY_NOT_ALLOWED syscall.Errno = 5069 + ERROR_CLUSTER_NODE_PAUSED syscall.Errno = 5070 + ERROR_NODE_CANT_HOST_RESOURCE syscall.Errno = 5071 + ERROR_CLUSTER_NODE_NOT_READY syscall.Errno = 5072 + ERROR_CLUSTER_NODE_SHUTTING_DOWN syscall.Errno = 5073 + ERROR_CLUSTER_JOIN_ABORTED syscall.Errno = 5074 + ERROR_CLUSTER_INCOMPATIBLE_VERSIONS syscall.Errno = 5075 + ERROR_CLUSTER_MAXNUM_OF_RESOURCES_EXCEEDED syscall.Errno = 5076 + ERROR_CLUSTER_SYSTEM_CONFIG_CHANGED syscall.Errno = 5077 + ERROR_CLUSTER_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 5078 + ERROR_CLUSTER_RESTYPE_NOT_SUPPORTED syscall.Errno = 5079 + ERROR_CLUSTER_RESNAME_NOT_FOUND syscall.Errno = 5080 + ERROR_CLUSTER_NO_RPC_PACKAGES_REGISTERED syscall.Errno = 5081 + ERROR_CLUSTER_OWNER_NOT_IN_PREFLIST syscall.Errno = 5082 + ERROR_CLUSTER_DATABASE_SEQMISMATCH syscall.Errno = 5083 + ERROR_RESMON_INVALID_STATE syscall.Errno = 5084 + ERROR_CLUSTER_GUM_NOT_LOCKER syscall.Errno = 5085 + ERROR_QUORUM_DISK_NOT_FOUND syscall.Errno = 5086 + ERROR_DATABASE_BACKUP_CORRUPT syscall.Errno = 5087 + ERROR_CLUSTER_NODE_ALREADY_HAS_DFS_ROOT syscall.Errno = 5088 + ERROR_RESOURCE_PROPERTY_UNCHANGEABLE syscall.Errno = 5089 + ERROR_NO_ADMIN_ACCESS_POINT syscall.Errno = 5090 + ERROR_CLUSTER_MEMBERSHIP_INVALID_STATE syscall.Errno = 5890 + ERROR_CLUSTER_QUORUMLOG_NOT_FOUND syscall.Errno = 5891 + ERROR_CLUSTER_MEMBERSHIP_HALT syscall.Errno = 5892 + ERROR_CLUSTER_INSTANCE_ID_MISMATCH syscall.Errno = 5893 + ERROR_CLUSTER_NETWORK_NOT_FOUND_FOR_IP syscall.Errno = 5894 + ERROR_CLUSTER_PROPERTY_DATA_TYPE_MISMATCH syscall.Errno = 5895 + ERROR_CLUSTER_EVICT_WITHOUT_CLEANUP syscall.Errno = 5896 + ERROR_CLUSTER_PARAMETER_MISMATCH syscall.Errno = 5897 + ERROR_NODE_CANNOT_BE_CLUSTERED syscall.Errno = 5898 + ERROR_CLUSTER_WRONG_OS_VERSION syscall.Errno = 5899 + ERROR_CLUSTER_CANT_CREATE_DUP_CLUSTER_NAME syscall.Errno = 5900 + ERROR_CLUSCFG_ALREADY_COMMITTED syscall.Errno = 5901 + ERROR_CLUSCFG_ROLLBACK_FAILED syscall.Errno = 5902 + ERROR_CLUSCFG_SYSTEM_DISK_DRIVE_LETTER_CONFLICT syscall.Errno = 5903 + ERROR_CLUSTER_OLD_VERSION syscall.Errno = 5904 + ERROR_CLUSTER_MISMATCHED_COMPUTER_ACCT_NAME syscall.Errno = 5905 + ERROR_CLUSTER_NO_NET_ADAPTERS syscall.Errno = 5906 + ERROR_CLUSTER_POISONED syscall.Errno = 5907 + ERROR_CLUSTER_GROUP_MOVING syscall.Errno = 5908 + ERROR_CLUSTER_RESOURCE_TYPE_BUSY syscall.Errno = 5909 + ERROR_RESOURCE_CALL_TIMED_OUT syscall.Errno = 5910 + ERROR_INVALID_CLUSTER_IPV6_ADDRESS syscall.Errno = 5911 + ERROR_CLUSTER_INTERNAL_INVALID_FUNCTION syscall.Errno = 5912 + ERROR_CLUSTER_PARAMETER_OUT_OF_BOUNDS syscall.Errno = 5913 + ERROR_CLUSTER_PARTIAL_SEND syscall.Errno = 5914 + ERROR_CLUSTER_REGISTRY_INVALID_FUNCTION syscall.Errno = 5915 + ERROR_CLUSTER_INVALID_STRING_TERMINATION syscall.Errno = 5916 + ERROR_CLUSTER_INVALID_STRING_FORMAT syscall.Errno = 5917 + ERROR_CLUSTER_DATABASE_TRANSACTION_IN_PROGRESS syscall.Errno = 5918 + ERROR_CLUSTER_DATABASE_TRANSACTION_NOT_IN_PROGRESS syscall.Errno = 5919 + ERROR_CLUSTER_NULL_DATA syscall.Errno = 5920 + ERROR_CLUSTER_PARTIAL_READ syscall.Errno = 5921 + ERROR_CLUSTER_PARTIAL_WRITE syscall.Errno = 5922 + ERROR_CLUSTER_CANT_DESERIALIZE_DATA syscall.Errno = 5923 + ERROR_DEPENDENT_RESOURCE_PROPERTY_CONFLICT syscall.Errno = 5924 + ERROR_CLUSTER_NO_QUORUM syscall.Errno = 5925 + ERROR_CLUSTER_INVALID_IPV6_NETWORK syscall.Errno = 5926 + ERROR_CLUSTER_INVALID_IPV6_TUNNEL_NETWORK syscall.Errno = 5927 + ERROR_QUORUM_NOT_ALLOWED_IN_THIS_GROUP syscall.Errno = 5928 + ERROR_DEPENDENCY_TREE_TOO_COMPLEX syscall.Errno = 5929 + ERROR_EXCEPTION_IN_RESOURCE_CALL syscall.Errno = 5930 + ERROR_CLUSTER_RHS_FAILED_INITIALIZATION syscall.Errno = 5931 + ERROR_CLUSTER_NOT_INSTALLED syscall.Errno = 5932 + ERROR_CLUSTER_RESOURCES_MUST_BE_ONLINE_ON_THE_SAME_NODE syscall.Errno = 5933 + ERROR_CLUSTER_MAX_NODES_IN_CLUSTER syscall.Errno = 5934 + ERROR_CLUSTER_TOO_MANY_NODES syscall.Errno = 5935 + ERROR_CLUSTER_OBJECT_ALREADY_USED syscall.Errno = 5936 + ERROR_NONCORE_GROUPS_FOUND syscall.Errno = 5937 + ERROR_FILE_SHARE_RESOURCE_CONFLICT syscall.Errno = 5938 + ERROR_CLUSTER_EVICT_INVALID_REQUEST syscall.Errno = 5939 + ERROR_CLUSTER_SINGLETON_RESOURCE syscall.Errno = 5940 + ERROR_CLUSTER_GROUP_SINGLETON_RESOURCE syscall.Errno = 5941 + ERROR_CLUSTER_RESOURCE_PROVIDER_FAILED syscall.Errno = 5942 + ERROR_CLUSTER_RESOURCE_CONFIGURATION_ERROR syscall.Errno = 5943 + ERROR_CLUSTER_GROUP_BUSY syscall.Errno = 5944 + ERROR_CLUSTER_NOT_SHARED_VOLUME syscall.Errno = 5945 + ERROR_CLUSTER_INVALID_SECURITY_DESCRIPTOR syscall.Errno = 5946 + ERROR_CLUSTER_SHARED_VOLUMES_IN_USE syscall.Errno = 5947 + ERROR_CLUSTER_USE_SHARED_VOLUMES_API syscall.Errno = 5948 + ERROR_CLUSTER_BACKUP_IN_PROGRESS syscall.Errno = 5949 + ERROR_NON_CSV_PATH syscall.Errno = 5950 + ERROR_CSV_VOLUME_NOT_LOCAL syscall.Errno = 5951 + ERROR_CLUSTER_WATCHDOG_TERMINATING syscall.Errno = 5952 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_INCOMPATIBLE_NODES syscall.Errno = 5953 + ERROR_CLUSTER_INVALID_NODE_WEIGHT syscall.Errno = 5954 + ERROR_CLUSTER_RESOURCE_VETOED_CALL syscall.Errno = 5955 + ERROR_RESMON_SYSTEM_RESOURCES_LACKING syscall.Errno = 5956 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_DESTINATION syscall.Errno = 5957 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_SOURCE syscall.Errno = 5958 + ERROR_CLUSTER_GROUP_QUEUED syscall.Errno = 5959 + ERROR_CLUSTER_RESOURCE_LOCKED_STATUS syscall.Errno = 5960 + ERROR_CLUSTER_SHARED_VOLUME_FAILOVER_NOT_ALLOWED syscall.Errno = 5961 + ERROR_CLUSTER_NODE_DRAIN_IN_PROGRESS syscall.Errno = 5962 + ERROR_CLUSTER_DISK_NOT_CONNECTED syscall.Errno = 5963 + ERROR_DISK_NOT_CSV_CAPABLE syscall.Errno = 5964 + ERROR_RESOURCE_NOT_IN_AVAILABLE_STORAGE syscall.Errno = 5965 + ERROR_CLUSTER_SHARED_VOLUME_REDIRECTED syscall.Errno = 5966 + ERROR_CLUSTER_SHARED_VOLUME_NOT_REDIRECTED syscall.Errno = 5967 + ERROR_CLUSTER_CANNOT_RETURN_PROPERTIES syscall.Errno = 5968 + ERROR_CLUSTER_RESOURCE_CONTAINS_UNSUPPORTED_DIFF_AREA_FOR_SHARED_VOLUMES syscall.Errno = 5969 + ERROR_CLUSTER_RESOURCE_IS_IN_MAINTENANCE_MODE syscall.Errno = 5970 + ERROR_CLUSTER_AFFINITY_CONFLICT syscall.Errno = 5971 + ERROR_CLUSTER_RESOURCE_IS_REPLICA_VIRTUAL_MACHINE syscall.Errno = 5972 + ERROR_CLUSTER_UPGRADE_INCOMPATIBLE_VERSIONS syscall.Errno = 5973 + ERROR_CLUSTER_UPGRADE_FIX_QUORUM_NOT_SUPPORTED syscall.Errno = 5974 + ERROR_CLUSTER_UPGRADE_RESTART_REQUIRED syscall.Errno = 5975 + ERROR_CLUSTER_UPGRADE_IN_PROGRESS syscall.Errno = 5976 + ERROR_CLUSTER_UPGRADE_INCOMPLETE syscall.Errno = 5977 + ERROR_CLUSTER_NODE_IN_GRACE_PERIOD syscall.Errno = 5978 + ERROR_CLUSTER_CSV_IO_PAUSE_TIMEOUT syscall.Errno = 5979 + ERROR_NODE_NOT_ACTIVE_CLUSTER_MEMBER syscall.Errno = 5980 + ERROR_CLUSTER_RESOURCE_NOT_MONITORED syscall.Errno = 5981 + ERROR_CLUSTER_RESOURCE_DOES_NOT_SUPPORT_UNMONITORED syscall.Errno = 5982 + ERROR_CLUSTER_RESOURCE_IS_REPLICATED syscall.Errno = 5983 + ERROR_CLUSTER_NODE_ISOLATED syscall.Errno = 5984 + ERROR_CLUSTER_NODE_QUARANTINED syscall.Errno = 5985 + ERROR_CLUSTER_DATABASE_UPDATE_CONDITION_FAILED syscall.Errno = 5986 + ERROR_CLUSTER_SPACE_DEGRADED syscall.Errno = 5987 + ERROR_CLUSTER_TOKEN_DELEGATION_NOT_SUPPORTED syscall.Errno = 5988 + ERROR_CLUSTER_CSV_INVALID_HANDLE syscall.Errno = 5989 + ERROR_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR syscall.Errno = 5990 + ERROR_GROUPSET_NOT_AVAILABLE syscall.Errno = 5991 + ERROR_GROUPSET_NOT_FOUND syscall.Errno = 5992 + ERROR_GROUPSET_CANT_PROVIDE syscall.Errno = 5993 + ERROR_CLUSTER_FAULT_DOMAIN_PARENT_NOT_FOUND syscall.Errno = 5994 + ERROR_CLUSTER_FAULT_DOMAIN_INVALID_HIERARCHY syscall.Errno = 5995 + ERROR_CLUSTER_FAULT_DOMAIN_FAILED_S2D_VALIDATION syscall.Errno = 5996 + ERROR_CLUSTER_FAULT_DOMAIN_S2D_CONNECTIVITY_LOSS syscall.Errno = 5997 + ERROR_CLUSTER_INVALID_INFRASTRUCTURE_FILESERVER_NAME syscall.Errno = 5998 + ERROR_CLUSTERSET_MANAGEMENT_CLUSTER_UNREACHABLE syscall.Errno = 5999 + ERROR_ENCRYPTION_FAILED syscall.Errno = 6000 + ERROR_DECRYPTION_FAILED syscall.Errno = 6001 + ERROR_FILE_ENCRYPTED syscall.Errno = 6002 + ERROR_NO_RECOVERY_POLICY syscall.Errno = 6003 + ERROR_NO_EFS syscall.Errno = 6004 + ERROR_WRONG_EFS syscall.Errno = 6005 + ERROR_NO_USER_KEYS syscall.Errno = 6006 + ERROR_FILE_NOT_ENCRYPTED syscall.Errno = 6007 + ERROR_NOT_EXPORT_FORMAT syscall.Errno = 6008 + ERROR_FILE_READ_ONLY syscall.Errno = 6009 + ERROR_DIR_EFS_DISALLOWED syscall.Errno = 6010 + ERROR_EFS_SERVER_NOT_TRUSTED syscall.Errno = 6011 + ERROR_BAD_RECOVERY_POLICY syscall.Errno = 6012 + ERROR_EFS_ALG_BLOB_TOO_BIG syscall.Errno = 6013 + ERROR_VOLUME_NOT_SUPPORT_EFS syscall.Errno = 6014 + ERROR_EFS_DISABLED syscall.Errno = 6015 + ERROR_EFS_VERSION_NOT_SUPPORT syscall.Errno = 6016 + ERROR_CS_ENCRYPTION_INVALID_SERVER_RESPONSE syscall.Errno = 6017 + ERROR_CS_ENCRYPTION_UNSUPPORTED_SERVER syscall.Errno = 6018 + ERROR_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE syscall.Errno = 6019 + ERROR_CS_ENCRYPTION_NEW_ENCRYPTED_FILE syscall.Errno = 6020 + ERROR_CS_ENCRYPTION_FILE_NOT_CSE syscall.Errno = 6021 + ERROR_ENCRYPTION_POLICY_DENIES_OPERATION syscall.Errno = 6022 + ERROR_WIP_ENCRYPTION_FAILED syscall.Errno = 6023 + ERROR_NO_BROWSER_SERVERS_FOUND syscall.Errno = 6118 + SCHED_E_SERVICE_NOT_LOCALSYSTEM syscall.Errno = 6200 + ERROR_LOG_SECTOR_INVALID syscall.Errno = 6600 + ERROR_LOG_SECTOR_PARITY_INVALID syscall.Errno = 6601 + ERROR_LOG_SECTOR_REMAPPED syscall.Errno = 6602 + ERROR_LOG_BLOCK_INCOMPLETE syscall.Errno = 6603 + ERROR_LOG_INVALID_RANGE syscall.Errno = 6604 + ERROR_LOG_BLOCKS_EXHAUSTED syscall.Errno = 6605 + ERROR_LOG_READ_CONTEXT_INVALID syscall.Errno = 6606 + ERROR_LOG_RESTART_INVALID syscall.Errno = 6607 + ERROR_LOG_BLOCK_VERSION syscall.Errno = 6608 + ERROR_LOG_BLOCK_INVALID syscall.Errno = 6609 + ERROR_LOG_READ_MODE_INVALID syscall.Errno = 6610 + ERROR_LOG_NO_RESTART syscall.Errno = 6611 + ERROR_LOG_METADATA_CORRUPT syscall.Errno = 6612 + ERROR_LOG_METADATA_INVALID syscall.Errno = 6613 + ERROR_LOG_METADATA_INCONSISTENT syscall.Errno = 6614 + ERROR_LOG_RESERVATION_INVALID syscall.Errno = 6615 + ERROR_LOG_CANT_DELETE syscall.Errno = 6616 + ERROR_LOG_CONTAINER_LIMIT_EXCEEDED syscall.Errno = 6617 + ERROR_LOG_START_OF_LOG syscall.Errno = 6618 + ERROR_LOG_POLICY_ALREADY_INSTALLED syscall.Errno = 6619 + ERROR_LOG_POLICY_NOT_INSTALLED syscall.Errno = 6620 + ERROR_LOG_POLICY_INVALID syscall.Errno = 6621 + ERROR_LOG_POLICY_CONFLICT syscall.Errno = 6622 + ERROR_LOG_PINNED_ARCHIVE_TAIL syscall.Errno = 6623 + ERROR_LOG_RECORD_NONEXISTENT syscall.Errno = 6624 + ERROR_LOG_RECORDS_RESERVED_INVALID syscall.Errno = 6625 + ERROR_LOG_SPACE_RESERVED_INVALID syscall.Errno = 6626 + ERROR_LOG_TAIL_INVALID syscall.Errno = 6627 + ERROR_LOG_FULL syscall.Errno = 6628 + ERROR_COULD_NOT_RESIZE_LOG syscall.Errno = 6629 + ERROR_LOG_MULTIPLEXED syscall.Errno = 6630 + ERROR_LOG_DEDICATED syscall.Errno = 6631 + ERROR_LOG_ARCHIVE_NOT_IN_PROGRESS syscall.Errno = 6632 + ERROR_LOG_ARCHIVE_IN_PROGRESS syscall.Errno = 6633 + ERROR_LOG_EPHEMERAL syscall.Errno = 6634 + ERROR_LOG_NOT_ENOUGH_CONTAINERS syscall.Errno = 6635 + ERROR_LOG_CLIENT_ALREADY_REGISTERED syscall.Errno = 6636 + ERROR_LOG_CLIENT_NOT_REGISTERED syscall.Errno = 6637 + ERROR_LOG_FULL_HANDLER_IN_PROGRESS syscall.Errno = 6638 + ERROR_LOG_CONTAINER_READ_FAILED syscall.Errno = 6639 + ERROR_LOG_CONTAINER_WRITE_FAILED syscall.Errno = 6640 + ERROR_LOG_CONTAINER_OPEN_FAILED syscall.Errno = 6641 + ERROR_LOG_CONTAINER_STATE_INVALID syscall.Errno = 6642 + ERROR_LOG_STATE_INVALID syscall.Errno = 6643 + ERROR_LOG_PINNED syscall.Errno = 6644 + ERROR_LOG_METADATA_FLUSH_FAILED syscall.Errno = 6645 + ERROR_LOG_INCONSISTENT_SECURITY syscall.Errno = 6646 + ERROR_LOG_APPENDED_FLUSH_FAILED syscall.Errno = 6647 + ERROR_LOG_PINNED_RESERVATION syscall.Errno = 6648 + ERROR_INVALID_TRANSACTION syscall.Errno = 6700 + ERROR_TRANSACTION_NOT_ACTIVE syscall.Errno = 6701 + ERROR_TRANSACTION_REQUEST_NOT_VALID syscall.Errno = 6702 + ERROR_TRANSACTION_NOT_REQUESTED syscall.Errno = 6703 + ERROR_TRANSACTION_ALREADY_ABORTED syscall.Errno = 6704 + ERROR_TRANSACTION_ALREADY_COMMITTED syscall.Errno = 6705 + ERROR_TM_INITIALIZATION_FAILED syscall.Errno = 6706 + ERROR_RESOURCEMANAGER_READ_ONLY syscall.Errno = 6707 + ERROR_TRANSACTION_NOT_JOINED syscall.Errno = 6708 + ERROR_TRANSACTION_SUPERIOR_EXISTS syscall.Errno = 6709 + ERROR_CRM_PROTOCOL_ALREADY_EXISTS syscall.Errno = 6710 + ERROR_TRANSACTION_PROPAGATION_FAILED syscall.Errno = 6711 + ERROR_CRM_PROTOCOL_NOT_FOUND syscall.Errno = 6712 + ERROR_TRANSACTION_INVALID_MARSHALL_BUFFER syscall.Errno = 6713 + ERROR_CURRENT_TRANSACTION_NOT_VALID syscall.Errno = 6714 + ERROR_TRANSACTION_NOT_FOUND syscall.Errno = 6715 + ERROR_RESOURCEMANAGER_NOT_FOUND syscall.Errno = 6716 + ERROR_ENLISTMENT_NOT_FOUND syscall.Errno = 6717 + ERROR_TRANSACTIONMANAGER_NOT_FOUND syscall.Errno = 6718 + ERROR_TRANSACTIONMANAGER_NOT_ONLINE syscall.Errno = 6719 + ERROR_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION syscall.Errno = 6720 + ERROR_TRANSACTION_NOT_ROOT syscall.Errno = 6721 + ERROR_TRANSACTION_OBJECT_EXPIRED syscall.Errno = 6722 + ERROR_TRANSACTION_RESPONSE_NOT_ENLISTED syscall.Errno = 6723 + ERROR_TRANSACTION_RECORD_TOO_LONG syscall.Errno = 6724 + ERROR_IMPLICIT_TRANSACTION_NOT_SUPPORTED syscall.Errno = 6725 + ERROR_TRANSACTION_INTEGRITY_VIOLATED syscall.Errno = 6726 + ERROR_TRANSACTIONMANAGER_IDENTITY_MISMATCH syscall.Errno = 6727 + ERROR_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT syscall.Errno = 6728 + ERROR_TRANSACTION_MUST_WRITETHROUGH syscall.Errno = 6729 + ERROR_TRANSACTION_NO_SUPERIOR syscall.Errno = 6730 + ERROR_HEURISTIC_DAMAGE_POSSIBLE syscall.Errno = 6731 + ERROR_TRANSACTIONAL_CONFLICT syscall.Errno = 6800 + ERROR_RM_NOT_ACTIVE syscall.Errno = 6801 + ERROR_RM_METADATA_CORRUPT syscall.Errno = 6802 + ERROR_DIRECTORY_NOT_RM syscall.Errno = 6803 + ERROR_TRANSACTIONS_UNSUPPORTED_REMOTE syscall.Errno = 6805 + ERROR_LOG_RESIZE_INVALID_SIZE syscall.Errno = 6806 + ERROR_OBJECT_NO_LONGER_EXISTS syscall.Errno = 6807 + ERROR_STREAM_MINIVERSION_NOT_FOUND syscall.Errno = 6808 + ERROR_STREAM_MINIVERSION_NOT_VALID syscall.Errno = 6809 + ERROR_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION syscall.Errno = 6810 + ERROR_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT syscall.Errno = 6811 + ERROR_CANT_CREATE_MORE_STREAM_MINIVERSIONS syscall.Errno = 6812 + ERROR_REMOTE_FILE_VERSION_MISMATCH syscall.Errno = 6814 + ERROR_HANDLE_NO_LONGER_VALID syscall.Errno = 6815 + ERROR_NO_TXF_METADATA syscall.Errno = 6816 + ERROR_LOG_CORRUPTION_DETECTED syscall.Errno = 6817 + ERROR_CANT_RECOVER_WITH_HANDLE_OPEN syscall.Errno = 6818 + ERROR_RM_DISCONNECTED syscall.Errno = 6819 + ERROR_ENLISTMENT_NOT_SUPERIOR syscall.Errno = 6820 + ERROR_RECOVERY_NOT_NEEDED syscall.Errno = 6821 + ERROR_RM_ALREADY_STARTED syscall.Errno = 6822 + ERROR_FILE_IDENTITY_NOT_PERSISTENT syscall.Errno = 6823 + ERROR_CANT_BREAK_TRANSACTIONAL_DEPENDENCY syscall.Errno = 6824 + ERROR_CANT_CROSS_RM_BOUNDARY syscall.Errno = 6825 + ERROR_TXF_DIR_NOT_EMPTY syscall.Errno = 6826 + ERROR_INDOUBT_TRANSACTIONS_EXIST syscall.Errno = 6827 + ERROR_TM_VOLATILE syscall.Errno = 6828 + ERROR_ROLLBACK_TIMER_EXPIRED syscall.Errno = 6829 + ERROR_TXF_ATTRIBUTE_CORRUPT syscall.Errno = 6830 + ERROR_EFS_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6831 + ERROR_TRANSACTIONAL_OPEN_NOT_ALLOWED syscall.Errno = 6832 + ERROR_LOG_GROWTH_FAILED syscall.Errno = 6833 + ERROR_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE syscall.Errno = 6834 + ERROR_TXF_METADATA_ALREADY_PRESENT syscall.Errno = 6835 + ERROR_TRANSACTION_SCOPE_CALLBACKS_NOT_SET syscall.Errno = 6836 + ERROR_TRANSACTION_REQUIRED_PROMOTION syscall.Errno = 6837 + ERROR_CANNOT_EXECUTE_FILE_IN_TRANSACTION syscall.Errno = 6838 + ERROR_TRANSACTIONS_NOT_FROZEN syscall.Errno = 6839 + ERROR_TRANSACTION_FREEZE_IN_PROGRESS syscall.Errno = 6840 + ERROR_NOT_SNAPSHOT_VOLUME syscall.Errno = 6841 + ERROR_NO_SAVEPOINT_WITH_OPEN_FILES syscall.Errno = 6842 + ERROR_DATA_LOST_REPAIR syscall.Errno = 6843 + ERROR_SPARSE_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6844 + ERROR_TM_IDENTITY_MISMATCH syscall.Errno = 6845 + ERROR_FLOATED_SECTION syscall.Errno = 6846 + ERROR_CANNOT_ACCEPT_TRANSACTED_WORK syscall.Errno = 6847 + ERROR_CANNOT_ABORT_TRANSACTIONS syscall.Errno = 6848 + ERROR_BAD_CLUSTERS syscall.Errno = 6849 + ERROR_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6850 + ERROR_VOLUME_DIRTY syscall.Errno = 6851 + ERROR_NO_LINK_TRACKING_IN_TRANSACTION syscall.Errno = 6852 + ERROR_OPERATION_NOT_SUPPORTED_IN_TRANSACTION syscall.Errno = 6853 + ERROR_EXPIRED_HANDLE syscall.Errno = 6854 + ERROR_TRANSACTION_NOT_ENLISTED syscall.Errno = 6855 + ERROR_CTX_WINSTATION_NAME_INVALID syscall.Errno = 7001 + ERROR_CTX_INVALID_PD syscall.Errno = 7002 + ERROR_CTX_PD_NOT_FOUND syscall.Errno = 7003 + ERROR_CTX_WD_NOT_FOUND syscall.Errno = 7004 + ERROR_CTX_CANNOT_MAKE_EVENTLOG_ENTRY syscall.Errno = 7005 + ERROR_CTX_SERVICE_NAME_COLLISION syscall.Errno = 7006 + ERROR_CTX_CLOSE_PENDING syscall.Errno = 7007 + ERROR_CTX_NO_OUTBUF syscall.Errno = 7008 + ERROR_CTX_MODEM_INF_NOT_FOUND syscall.Errno = 7009 + ERROR_CTX_INVALID_MODEMNAME syscall.Errno = 7010 + ERROR_CTX_MODEM_RESPONSE_ERROR syscall.Errno = 7011 + ERROR_CTX_MODEM_RESPONSE_TIMEOUT syscall.Errno = 7012 + ERROR_CTX_MODEM_RESPONSE_NO_CARRIER syscall.Errno = 7013 + ERROR_CTX_MODEM_RESPONSE_NO_DIALTONE syscall.Errno = 7014 + ERROR_CTX_MODEM_RESPONSE_BUSY syscall.Errno = 7015 + ERROR_CTX_MODEM_RESPONSE_VOICE syscall.Errno = 7016 + ERROR_CTX_TD_ERROR syscall.Errno = 7017 + ERROR_CTX_WINSTATION_NOT_FOUND syscall.Errno = 7022 + ERROR_CTX_WINSTATION_ALREADY_EXISTS syscall.Errno = 7023 + ERROR_CTX_WINSTATION_BUSY syscall.Errno = 7024 + ERROR_CTX_BAD_VIDEO_MODE syscall.Errno = 7025 + ERROR_CTX_GRAPHICS_INVALID syscall.Errno = 7035 + ERROR_CTX_LOGON_DISABLED syscall.Errno = 7037 + ERROR_CTX_NOT_CONSOLE syscall.Errno = 7038 + ERROR_CTX_CLIENT_QUERY_TIMEOUT syscall.Errno = 7040 + ERROR_CTX_CONSOLE_DISCONNECT syscall.Errno = 7041 + ERROR_CTX_CONSOLE_CONNECT syscall.Errno = 7042 + ERROR_CTX_SHADOW_DENIED syscall.Errno = 7044 + ERROR_CTX_WINSTATION_ACCESS_DENIED syscall.Errno = 7045 + ERROR_CTX_INVALID_WD syscall.Errno = 7049 + ERROR_CTX_SHADOW_INVALID syscall.Errno = 7050 + ERROR_CTX_SHADOW_DISABLED syscall.Errno = 7051 + ERROR_CTX_CLIENT_LICENSE_IN_USE syscall.Errno = 7052 + ERROR_CTX_CLIENT_LICENSE_NOT_SET syscall.Errno = 7053 + ERROR_CTX_LICENSE_NOT_AVAILABLE syscall.Errno = 7054 + ERROR_CTX_LICENSE_CLIENT_INVALID syscall.Errno = 7055 + ERROR_CTX_LICENSE_EXPIRED syscall.Errno = 7056 + ERROR_CTX_SHADOW_NOT_RUNNING syscall.Errno = 7057 + ERROR_CTX_SHADOW_ENDED_BY_MODE_CHANGE syscall.Errno = 7058 + ERROR_ACTIVATION_COUNT_EXCEEDED syscall.Errno = 7059 + ERROR_CTX_WINSTATIONS_DISABLED syscall.Errno = 7060 + ERROR_CTX_ENCRYPTION_LEVEL_REQUIRED syscall.Errno = 7061 + ERROR_CTX_SESSION_IN_USE syscall.Errno = 7062 + ERROR_CTX_NO_FORCE_LOGOFF syscall.Errno = 7063 + ERROR_CTX_ACCOUNT_RESTRICTION syscall.Errno = 7064 + ERROR_RDP_PROTOCOL_ERROR syscall.Errno = 7065 + ERROR_CTX_CDM_CONNECT syscall.Errno = 7066 + ERROR_CTX_CDM_DISCONNECT syscall.Errno = 7067 + ERROR_CTX_SECURITY_LAYER_ERROR syscall.Errno = 7068 + ERROR_TS_INCOMPATIBLE_SESSIONS syscall.Errno = 7069 + ERROR_TS_VIDEO_SUBSYSTEM_ERROR syscall.Errno = 7070 + FRS_ERR_INVALID_API_SEQUENCE syscall.Errno = 8001 + FRS_ERR_STARTING_SERVICE syscall.Errno = 8002 + FRS_ERR_STOPPING_SERVICE syscall.Errno = 8003 + FRS_ERR_INTERNAL_API syscall.Errno = 8004 + FRS_ERR_INTERNAL syscall.Errno = 8005 + FRS_ERR_SERVICE_COMM syscall.Errno = 8006 + FRS_ERR_INSUFFICIENT_PRIV syscall.Errno = 8007 + FRS_ERR_AUTHENTICATION syscall.Errno = 8008 + FRS_ERR_PARENT_INSUFFICIENT_PRIV syscall.Errno = 8009 + FRS_ERR_PARENT_AUTHENTICATION syscall.Errno = 8010 + FRS_ERR_CHILD_TO_PARENT_COMM syscall.Errno = 8011 + FRS_ERR_PARENT_TO_CHILD_COMM syscall.Errno = 8012 + FRS_ERR_SYSVOL_POPULATE syscall.Errno = 8013 + FRS_ERR_SYSVOL_POPULATE_TIMEOUT syscall.Errno = 8014 + FRS_ERR_SYSVOL_IS_BUSY syscall.Errno = 8015 + FRS_ERR_SYSVOL_DEMOTE syscall.Errno = 8016 + FRS_ERR_INVALID_SERVICE_PARAMETER syscall.Errno = 8017 + DS_S_SUCCESS = ERROR_SUCCESS + ERROR_DS_NOT_INSTALLED syscall.Errno = 8200 + ERROR_DS_MEMBERSHIP_EVALUATED_LOCALLY syscall.Errno = 8201 + ERROR_DS_NO_ATTRIBUTE_OR_VALUE syscall.Errno = 8202 + ERROR_DS_INVALID_ATTRIBUTE_SYNTAX syscall.Errno = 8203 + ERROR_DS_ATTRIBUTE_TYPE_UNDEFINED syscall.Errno = 8204 + ERROR_DS_ATTRIBUTE_OR_VALUE_EXISTS syscall.Errno = 8205 + ERROR_DS_BUSY syscall.Errno = 8206 + ERROR_DS_UNAVAILABLE syscall.Errno = 8207 + ERROR_DS_NO_RIDS_ALLOCATED syscall.Errno = 8208 + ERROR_DS_NO_MORE_RIDS syscall.Errno = 8209 + ERROR_DS_INCORRECT_ROLE_OWNER syscall.Errno = 8210 + ERROR_DS_RIDMGR_INIT_ERROR syscall.Errno = 8211 + ERROR_DS_OBJ_CLASS_VIOLATION syscall.Errno = 8212 + ERROR_DS_CANT_ON_NON_LEAF syscall.Errno = 8213 + ERROR_DS_CANT_ON_RDN syscall.Errno = 8214 + ERROR_DS_CANT_MOD_OBJ_CLASS syscall.Errno = 8215 + ERROR_DS_CROSS_DOM_MOVE_ERROR syscall.Errno = 8216 + ERROR_DS_GC_NOT_AVAILABLE syscall.Errno = 8217 + ERROR_SHARED_POLICY syscall.Errno = 8218 + ERROR_POLICY_OBJECT_NOT_FOUND syscall.Errno = 8219 + ERROR_POLICY_ONLY_IN_DS syscall.Errno = 8220 + ERROR_PROMOTION_ACTIVE syscall.Errno = 8221 + ERROR_NO_PROMOTION_ACTIVE syscall.Errno = 8222 + ERROR_DS_OPERATIONS_ERROR syscall.Errno = 8224 + ERROR_DS_PROTOCOL_ERROR syscall.Errno = 8225 + ERROR_DS_TIMELIMIT_EXCEEDED syscall.Errno = 8226 + ERROR_DS_SIZELIMIT_EXCEEDED syscall.Errno = 8227 + ERROR_DS_ADMIN_LIMIT_EXCEEDED syscall.Errno = 8228 + ERROR_DS_COMPARE_FALSE syscall.Errno = 8229 + ERROR_DS_COMPARE_TRUE syscall.Errno = 8230 + ERROR_DS_AUTH_METHOD_NOT_SUPPORTED syscall.Errno = 8231 + ERROR_DS_STRONG_AUTH_REQUIRED syscall.Errno = 8232 + ERROR_DS_INAPPROPRIATE_AUTH syscall.Errno = 8233 + ERROR_DS_AUTH_UNKNOWN syscall.Errno = 8234 + ERROR_DS_REFERRAL syscall.Errno = 8235 + ERROR_DS_UNAVAILABLE_CRIT_EXTENSION syscall.Errno = 8236 + ERROR_DS_CONFIDENTIALITY_REQUIRED syscall.Errno = 8237 + ERROR_DS_INAPPROPRIATE_MATCHING syscall.Errno = 8238 + ERROR_DS_CONSTRAINT_VIOLATION syscall.Errno = 8239 + ERROR_DS_NO_SUCH_OBJECT syscall.Errno = 8240 + ERROR_DS_ALIAS_PROBLEM syscall.Errno = 8241 + ERROR_DS_INVALID_DN_SYNTAX syscall.Errno = 8242 + ERROR_DS_IS_LEAF syscall.Errno = 8243 + ERROR_DS_ALIAS_DEREF_PROBLEM syscall.Errno = 8244 + ERROR_DS_UNWILLING_TO_PERFORM syscall.Errno = 8245 + ERROR_DS_LOOP_DETECT syscall.Errno = 8246 + ERROR_DS_NAMING_VIOLATION syscall.Errno = 8247 + ERROR_DS_OBJECT_RESULTS_TOO_LARGE syscall.Errno = 8248 + ERROR_DS_AFFECTS_MULTIPLE_DSAS syscall.Errno = 8249 + ERROR_DS_SERVER_DOWN syscall.Errno = 8250 + ERROR_DS_LOCAL_ERROR syscall.Errno = 8251 + ERROR_DS_ENCODING_ERROR syscall.Errno = 8252 + ERROR_DS_DECODING_ERROR syscall.Errno = 8253 + ERROR_DS_FILTER_UNKNOWN syscall.Errno = 8254 + ERROR_DS_PARAM_ERROR syscall.Errno = 8255 + ERROR_DS_NOT_SUPPORTED syscall.Errno = 8256 + ERROR_DS_NO_RESULTS_RETURNED syscall.Errno = 8257 + ERROR_DS_CONTROL_NOT_FOUND syscall.Errno = 8258 + ERROR_DS_CLIENT_LOOP syscall.Errno = 8259 + ERROR_DS_REFERRAL_LIMIT_EXCEEDED syscall.Errno = 8260 + ERROR_DS_SORT_CONTROL_MISSING syscall.Errno = 8261 + ERROR_DS_OFFSET_RANGE_ERROR syscall.Errno = 8262 + ERROR_DS_RIDMGR_DISABLED syscall.Errno = 8263 + ERROR_DS_ROOT_MUST_BE_NC syscall.Errno = 8301 + ERROR_DS_ADD_REPLICA_INHIBITED syscall.Errno = 8302 + ERROR_DS_ATT_NOT_DEF_IN_SCHEMA syscall.Errno = 8303 + ERROR_DS_MAX_OBJ_SIZE_EXCEEDED syscall.Errno = 8304 + ERROR_DS_OBJ_STRING_NAME_EXISTS syscall.Errno = 8305 + ERROR_DS_NO_RDN_DEFINED_IN_SCHEMA syscall.Errno = 8306 + ERROR_DS_RDN_DOESNT_MATCH_SCHEMA syscall.Errno = 8307 + ERROR_DS_NO_REQUESTED_ATTS_FOUND syscall.Errno = 8308 + ERROR_DS_USER_BUFFER_TO_SMALL syscall.Errno = 8309 + ERROR_DS_ATT_IS_NOT_ON_OBJ syscall.Errno = 8310 + ERROR_DS_ILLEGAL_MOD_OPERATION syscall.Errno = 8311 + ERROR_DS_OBJ_TOO_LARGE syscall.Errno = 8312 + ERROR_DS_BAD_INSTANCE_TYPE syscall.Errno = 8313 + ERROR_DS_MASTERDSA_REQUIRED syscall.Errno = 8314 + ERROR_DS_OBJECT_CLASS_REQUIRED syscall.Errno = 8315 + ERROR_DS_MISSING_REQUIRED_ATT syscall.Errno = 8316 + ERROR_DS_ATT_NOT_DEF_FOR_CLASS syscall.Errno = 8317 + ERROR_DS_ATT_ALREADY_EXISTS syscall.Errno = 8318 + ERROR_DS_CANT_ADD_ATT_VALUES syscall.Errno = 8320 + ERROR_DS_SINGLE_VALUE_CONSTRAINT syscall.Errno = 8321 + ERROR_DS_RANGE_CONSTRAINT syscall.Errno = 8322 + ERROR_DS_ATT_VAL_ALREADY_EXISTS syscall.Errno = 8323 + ERROR_DS_CANT_REM_MISSING_ATT syscall.Errno = 8324 + ERROR_DS_CANT_REM_MISSING_ATT_VAL syscall.Errno = 8325 + ERROR_DS_ROOT_CANT_BE_SUBREF syscall.Errno = 8326 + ERROR_DS_NO_CHAINING syscall.Errno = 8327 + ERROR_DS_NO_CHAINED_EVAL syscall.Errno = 8328 + ERROR_DS_NO_PARENT_OBJECT syscall.Errno = 8329 + ERROR_DS_PARENT_IS_AN_ALIAS syscall.Errno = 8330 + ERROR_DS_CANT_MIX_MASTER_AND_REPS syscall.Errno = 8331 + ERROR_DS_CHILDREN_EXIST syscall.Errno = 8332 + ERROR_DS_OBJ_NOT_FOUND syscall.Errno = 8333 + ERROR_DS_ALIASED_OBJ_MISSING syscall.Errno = 8334 + ERROR_DS_BAD_NAME_SYNTAX syscall.Errno = 8335 + ERROR_DS_ALIAS_POINTS_TO_ALIAS syscall.Errno = 8336 + ERROR_DS_CANT_DEREF_ALIAS syscall.Errno = 8337 + ERROR_DS_OUT_OF_SCOPE syscall.Errno = 8338 + ERROR_DS_OBJECT_BEING_REMOVED syscall.Errno = 8339 + ERROR_DS_CANT_DELETE_DSA_OBJ syscall.Errno = 8340 + ERROR_DS_GENERIC_ERROR syscall.Errno = 8341 + ERROR_DS_DSA_MUST_BE_INT_MASTER syscall.Errno = 8342 + ERROR_DS_CLASS_NOT_DSA syscall.Errno = 8343 + ERROR_DS_INSUFF_ACCESS_RIGHTS syscall.Errno = 8344 + ERROR_DS_ILLEGAL_SUPERIOR syscall.Errno = 8345 + ERROR_DS_ATTRIBUTE_OWNED_BY_SAM syscall.Errno = 8346 + ERROR_DS_NAME_TOO_MANY_PARTS syscall.Errno = 8347 + ERROR_DS_NAME_TOO_LONG syscall.Errno = 8348 + ERROR_DS_NAME_VALUE_TOO_LONG syscall.Errno = 8349 + ERROR_DS_NAME_UNPARSEABLE syscall.Errno = 8350 + ERROR_DS_NAME_TYPE_UNKNOWN syscall.Errno = 8351 + ERROR_DS_NOT_AN_OBJECT syscall.Errno = 8352 + ERROR_DS_SEC_DESC_TOO_SHORT syscall.Errno = 8353 + ERROR_DS_SEC_DESC_INVALID syscall.Errno = 8354 + ERROR_DS_NO_DELETED_NAME syscall.Errno = 8355 + ERROR_DS_SUBREF_MUST_HAVE_PARENT syscall.Errno = 8356 + ERROR_DS_NCNAME_MUST_BE_NC syscall.Errno = 8357 + ERROR_DS_CANT_ADD_SYSTEM_ONLY syscall.Errno = 8358 + ERROR_DS_CLASS_MUST_BE_CONCRETE syscall.Errno = 8359 + ERROR_DS_INVALID_DMD syscall.Errno = 8360 + ERROR_DS_OBJ_GUID_EXISTS syscall.Errno = 8361 + ERROR_DS_NOT_ON_BACKLINK syscall.Errno = 8362 + ERROR_DS_NO_CROSSREF_FOR_NC syscall.Errno = 8363 + ERROR_DS_SHUTTING_DOWN syscall.Errno = 8364 + ERROR_DS_UNKNOWN_OPERATION syscall.Errno = 8365 + ERROR_DS_INVALID_ROLE_OWNER syscall.Errno = 8366 + ERROR_DS_COULDNT_CONTACT_FSMO syscall.Errno = 8367 + ERROR_DS_CROSS_NC_DN_RENAME syscall.Errno = 8368 + ERROR_DS_CANT_MOD_SYSTEM_ONLY syscall.Errno = 8369 + ERROR_DS_REPLICATOR_ONLY syscall.Errno = 8370 + ERROR_DS_OBJ_CLASS_NOT_DEFINED syscall.Errno = 8371 + ERROR_DS_OBJ_CLASS_NOT_SUBCLASS syscall.Errno = 8372 + ERROR_DS_NAME_REFERENCE_INVALID syscall.Errno = 8373 + ERROR_DS_CROSS_REF_EXISTS syscall.Errno = 8374 + ERROR_DS_CANT_DEL_MASTER_CROSSREF syscall.Errno = 8375 + ERROR_DS_SUBTREE_NOTIFY_NOT_NC_HEAD syscall.Errno = 8376 + ERROR_DS_NOTIFY_FILTER_TOO_COMPLEX syscall.Errno = 8377 + ERROR_DS_DUP_RDN syscall.Errno = 8378 + ERROR_DS_DUP_OID syscall.Errno = 8379 + ERROR_DS_DUP_MAPI_ID syscall.Errno = 8380 + ERROR_DS_DUP_SCHEMA_ID_GUID syscall.Errno = 8381 + ERROR_DS_DUP_LDAP_DISPLAY_NAME syscall.Errno = 8382 + ERROR_DS_SEMANTIC_ATT_TEST syscall.Errno = 8383 + ERROR_DS_SYNTAX_MISMATCH syscall.Errno = 8384 + ERROR_DS_EXISTS_IN_MUST_HAVE syscall.Errno = 8385 + ERROR_DS_EXISTS_IN_MAY_HAVE syscall.Errno = 8386 + ERROR_DS_NONEXISTENT_MAY_HAVE syscall.Errno = 8387 + ERROR_DS_NONEXISTENT_MUST_HAVE syscall.Errno = 8388 + ERROR_DS_AUX_CLS_TEST_FAIL syscall.Errno = 8389 + ERROR_DS_NONEXISTENT_POSS_SUP syscall.Errno = 8390 + ERROR_DS_SUB_CLS_TEST_FAIL syscall.Errno = 8391 + ERROR_DS_BAD_RDN_ATT_ID_SYNTAX syscall.Errno = 8392 + ERROR_DS_EXISTS_IN_AUX_CLS syscall.Errno = 8393 + ERROR_DS_EXISTS_IN_SUB_CLS syscall.Errno = 8394 + ERROR_DS_EXISTS_IN_POSS_SUP syscall.Errno = 8395 + ERROR_DS_RECALCSCHEMA_FAILED syscall.Errno = 8396 + ERROR_DS_TREE_DELETE_NOT_FINISHED syscall.Errno = 8397 + ERROR_DS_CANT_DELETE syscall.Errno = 8398 + ERROR_DS_ATT_SCHEMA_REQ_ID syscall.Errno = 8399 + ERROR_DS_BAD_ATT_SCHEMA_SYNTAX syscall.Errno = 8400 + ERROR_DS_CANT_CACHE_ATT syscall.Errno = 8401 + ERROR_DS_CANT_CACHE_CLASS syscall.Errno = 8402 + ERROR_DS_CANT_REMOVE_ATT_CACHE syscall.Errno = 8403 + ERROR_DS_CANT_REMOVE_CLASS_CACHE syscall.Errno = 8404 + ERROR_DS_CANT_RETRIEVE_DN syscall.Errno = 8405 + ERROR_DS_MISSING_SUPREF syscall.Errno = 8406 + ERROR_DS_CANT_RETRIEVE_INSTANCE syscall.Errno = 8407 + ERROR_DS_CODE_INCONSISTENCY syscall.Errno = 8408 + ERROR_DS_DATABASE_ERROR syscall.Errno = 8409 + ERROR_DS_GOVERNSID_MISSING syscall.Errno = 8410 + ERROR_DS_MISSING_EXPECTED_ATT syscall.Errno = 8411 + ERROR_DS_NCNAME_MISSING_CR_REF syscall.Errno = 8412 + ERROR_DS_SECURITY_CHECKING_ERROR syscall.Errno = 8413 + ERROR_DS_SCHEMA_NOT_LOADED syscall.Errno = 8414 + ERROR_DS_SCHEMA_ALLOC_FAILED syscall.Errno = 8415 + ERROR_DS_ATT_SCHEMA_REQ_SYNTAX syscall.Errno = 8416 + ERROR_DS_GCVERIFY_ERROR syscall.Errno = 8417 + ERROR_DS_DRA_SCHEMA_MISMATCH syscall.Errno = 8418 + ERROR_DS_CANT_FIND_DSA_OBJ syscall.Errno = 8419 + ERROR_DS_CANT_FIND_EXPECTED_NC syscall.Errno = 8420 + ERROR_DS_CANT_FIND_NC_IN_CACHE syscall.Errno = 8421 + ERROR_DS_CANT_RETRIEVE_CHILD syscall.Errno = 8422 + ERROR_DS_SECURITY_ILLEGAL_MODIFY syscall.Errno = 8423 + ERROR_DS_CANT_REPLACE_HIDDEN_REC syscall.Errno = 8424 + ERROR_DS_BAD_HIERARCHY_FILE syscall.Errno = 8425 + ERROR_DS_BUILD_HIERARCHY_TABLE_FAILED syscall.Errno = 8426 + ERROR_DS_CONFIG_PARAM_MISSING syscall.Errno = 8427 + ERROR_DS_COUNTING_AB_INDICES_FAILED syscall.Errno = 8428 + ERROR_DS_HIERARCHY_TABLE_MALLOC_FAILED syscall.Errno = 8429 + ERROR_DS_INTERNAL_FAILURE syscall.Errno = 8430 + ERROR_DS_UNKNOWN_ERROR syscall.Errno = 8431 + ERROR_DS_ROOT_REQUIRES_CLASS_TOP syscall.Errno = 8432 + ERROR_DS_REFUSING_FSMO_ROLES syscall.Errno = 8433 + ERROR_DS_MISSING_FSMO_SETTINGS syscall.Errno = 8434 + ERROR_DS_UNABLE_TO_SURRENDER_ROLES syscall.Errno = 8435 + ERROR_DS_DRA_GENERIC syscall.Errno = 8436 + ERROR_DS_DRA_INVALID_PARAMETER syscall.Errno = 8437 + ERROR_DS_DRA_BUSY syscall.Errno = 8438 + ERROR_DS_DRA_BAD_DN syscall.Errno = 8439 + ERROR_DS_DRA_BAD_NC syscall.Errno = 8440 + ERROR_DS_DRA_DN_EXISTS syscall.Errno = 8441 + ERROR_DS_DRA_INTERNAL_ERROR syscall.Errno = 8442 + ERROR_DS_DRA_INCONSISTENT_DIT syscall.Errno = 8443 + ERROR_DS_DRA_CONNECTION_FAILED syscall.Errno = 8444 + ERROR_DS_DRA_BAD_INSTANCE_TYPE syscall.Errno = 8445 + ERROR_DS_DRA_OUT_OF_MEM syscall.Errno = 8446 + ERROR_DS_DRA_MAIL_PROBLEM syscall.Errno = 8447 + ERROR_DS_DRA_REF_ALREADY_EXISTS syscall.Errno = 8448 + ERROR_DS_DRA_REF_NOT_FOUND syscall.Errno = 8449 + ERROR_DS_DRA_OBJ_IS_REP_SOURCE syscall.Errno = 8450 + ERROR_DS_DRA_DB_ERROR syscall.Errno = 8451 + ERROR_DS_DRA_NO_REPLICA syscall.Errno = 8452 + ERROR_DS_DRA_ACCESS_DENIED syscall.Errno = 8453 + ERROR_DS_DRA_NOT_SUPPORTED syscall.Errno = 8454 + ERROR_DS_DRA_RPC_CANCELLED syscall.Errno = 8455 + ERROR_DS_DRA_SOURCE_DISABLED syscall.Errno = 8456 + ERROR_DS_DRA_SINK_DISABLED syscall.Errno = 8457 + ERROR_DS_DRA_NAME_COLLISION syscall.Errno = 8458 + ERROR_DS_DRA_SOURCE_REINSTALLED syscall.Errno = 8459 + ERROR_DS_DRA_MISSING_PARENT syscall.Errno = 8460 + ERROR_DS_DRA_PREEMPTED syscall.Errno = 8461 + ERROR_DS_DRA_ABANDON_SYNC syscall.Errno = 8462 + ERROR_DS_DRA_SHUTDOWN syscall.Errno = 8463 + ERROR_DS_DRA_INCOMPATIBLE_PARTIAL_SET syscall.Errno = 8464 + ERROR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA syscall.Errno = 8465 + ERROR_DS_DRA_EXTN_CONNECTION_FAILED syscall.Errno = 8466 + ERROR_DS_INSTALL_SCHEMA_MISMATCH syscall.Errno = 8467 + ERROR_DS_DUP_LINK_ID syscall.Errno = 8468 + ERROR_DS_NAME_ERROR_RESOLVING syscall.Errno = 8469 + ERROR_DS_NAME_ERROR_NOT_FOUND syscall.Errno = 8470 + ERROR_DS_NAME_ERROR_NOT_UNIQUE syscall.Errno = 8471 + ERROR_DS_NAME_ERROR_NO_MAPPING syscall.Errno = 8472 + ERROR_DS_NAME_ERROR_DOMAIN_ONLY syscall.Errno = 8473 + ERROR_DS_NAME_ERROR_NO_SYNTACTICAL_MAPPING syscall.Errno = 8474 + ERROR_DS_CONSTRUCTED_ATT_MOD syscall.Errno = 8475 + ERROR_DS_WRONG_OM_OBJ_CLASS syscall.Errno = 8476 + ERROR_DS_DRA_REPL_PENDING syscall.Errno = 8477 + ERROR_DS_DS_REQUIRED syscall.Errno = 8478 + ERROR_DS_INVALID_LDAP_DISPLAY_NAME syscall.Errno = 8479 + ERROR_DS_NON_BASE_SEARCH syscall.Errno = 8480 + ERROR_DS_CANT_RETRIEVE_ATTS syscall.Errno = 8481 + ERROR_DS_BACKLINK_WITHOUT_LINK syscall.Errno = 8482 + ERROR_DS_EPOCH_MISMATCH syscall.Errno = 8483 + ERROR_DS_SRC_NAME_MISMATCH syscall.Errno = 8484 + ERROR_DS_SRC_AND_DST_NC_IDENTICAL syscall.Errno = 8485 + ERROR_DS_DST_NC_MISMATCH syscall.Errno = 8486 + ERROR_DS_NOT_AUTHORITIVE_FOR_DST_NC syscall.Errno = 8487 + ERROR_DS_SRC_GUID_MISMATCH syscall.Errno = 8488 + ERROR_DS_CANT_MOVE_DELETED_OBJECT syscall.Errno = 8489 + ERROR_DS_PDC_OPERATION_IN_PROGRESS syscall.Errno = 8490 + ERROR_DS_CROSS_DOMAIN_CLEANUP_REQD syscall.Errno = 8491 + ERROR_DS_ILLEGAL_XDOM_MOVE_OPERATION syscall.Errno = 8492 + ERROR_DS_CANT_WITH_ACCT_GROUP_MEMBERSHPS syscall.Errno = 8493 + ERROR_DS_NC_MUST_HAVE_NC_PARENT syscall.Errno = 8494 + ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE syscall.Errno = 8495 + ERROR_DS_DST_DOMAIN_NOT_NATIVE syscall.Errno = 8496 + ERROR_DS_MISSING_INFRASTRUCTURE_CONTAINER syscall.Errno = 8497 + ERROR_DS_CANT_MOVE_ACCOUNT_GROUP syscall.Errno = 8498 + ERROR_DS_CANT_MOVE_RESOURCE_GROUP syscall.Errno = 8499 + ERROR_DS_INVALID_SEARCH_FLAG syscall.Errno = 8500 + ERROR_DS_NO_TREE_DELETE_ABOVE_NC syscall.Errno = 8501 + ERROR_DS_COULDNT_LOCK_TREE_FOR_DELETE syscall.Errno = 8502 + ERROR_DS_COULDNT_IDENTIFY_OBJECTS_FOR_TREE_DELETE syscall.Errno = 8503 + ERROR_DS_SAM_INIT_FAILURE syscall.Errno = 8504 + ERROR_DS_SENSITIVE_GROUP_VIOLATION syscall.Errno = 8505 + ERROR_DS_CANT_MOD_PRIMARYGROUPID syscall.Errno = 8506 + ERROR_DS_ILLEGAL_BASE_SCHEMA_MOD syscall.Errno = 8507 + ERROR_DS_NONSAFE_SCHEMA_CHANGE syscall.Errno = 8508 + ERROR_DS_SCHEMA_UPDATE_DISALLOWED syscall.Errno = 8509 + ERROR_DS_CANT_CREATE_UNDER_SCHEMA syscall.Errno = 8510 + ERROR_DS_INSTALL_NO_SRC_SCH_VERSION syscall.Errno = 8511 + ERROR_DS_INSTALL_NO_SCH_VERSION_IN_INIFILE syscall.Errno = 8512 + ERROR_DS_INVALID_GROUP_TYPE syscall.Errno = 8513 + ERROR_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8514 + ERROR_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8515 + ERROR_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8516 + ERROR_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8517 + ERROR_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8518 + ERROR_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER syscall.Errno = 8519 + ERROR_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER syscall.Errno = 8520 + ERROR_DS_HAVE_PRIMARY_MEMBERS syscall.Errno = 8521 + ERROR_DS_STRING_SD_CONVERSION_FAILED syscall.Errno = 8522 + ERROR_DS_NAMING_MASTER_GC syscall.Errno = 8523 + ERROR_DS_DNS_LOOKUP_FAILURE syscall.Errno = 8524 + ERROR_DS_COULDNT_UPDATE_SPNS syscall.Errno = 8525 + ERROR_DS_CANT_RETRIEVE_SD syscall.Errno = 8526 + ERROR_DS_KEY_NOT_UNIQUE syscall.Errno = 8527 + ERROR_DS_WRONG_LINKED_ATT_SYNTAX syscall.Errno = 8528 + ERROR_DS_SAM_NEED_BOOTKEY_PASSWORD syscall.Errno = 8529 + ERROR_DS_SAM_NEED_BOOTKEY_FLOPPY syscall.Errno = 8530 + ERROR_DS_CANT_START syscall.Errno = 8531 + ERROR_DS_INIT_FAILURE syscall.Errno = 8532 + ERROR_DS_NO_PKT_PRIVACY_ON_CONNECTION syscall.Errno = 8533 + ERROR_DS_SOURCE_DOMAIN_IN_FOREST syscall.Errno = 8534 + ERROR_DS_DESTINATION_DOMAIN_NOT_IN_FOREST syscall.Errno = 8535 + ERROR_DS_DESTINATION_AUDITING_NOT_ENABLED syscall.Errno = 8536 + ERROR_DS_CANT_FIND_DC_FOR_SRC_DOMAIN syscall.Errno = 8537 + ERROR_DS_SRC_OBJ_NOT_GROUP_OR_USER syscall.Errno = 8538 + ERROR_DS_SRC_SID_EXISTS_IN_FOREST syscall.Errno = 8539 + ERROR_DS_SRC_AND_DST_OBJECT_CLASS_MISMATCH syscall.Errno = 8540 + ERROR_SAM_INIT_FAILURE syscall.Errno = 8541 + ERROR_DS_DRA_SCHEMA_INFO_SHIP syscall.Errno = 8542 + ERROR_DS_DRA_SCHEMA_CONFLICT syscall.Errno = 8543 + ERROR_DS_DRA_EARLIER_SCHEMA_CONFLICT syscall.Errno = 8544 + ERROR_DS_DRA_OBJ_NC_MISMATCH syscall.Errno = 8545 + ERROR_DS_NC_STILL_HAS_DSAS syscall.Errno = 8546 + ERROR_DS_GC_REQUIRED syscall.Errno = 8547 + ERROR_DS_LOCAL_MEMBER_OF_LOCAL_ONLY syscall.Errno = 8548 + ERROR_DS_NO_FPO_IN_UNIVERSAL_GROUPS syscall.Errno = 8549 + ERROR_DS_CANT_ADD_TO_GC syscall.Errno = 8550 + ERROR_DS_NO_CHECKPOINT_WITH_PDC syscall.Errno = 8551 + ERROR_DS_SOURCE_AUDITING_NOT_ENABLED syscall.Errno = 8552 + ERROR_DS_CANT_CREATE_IN_NONDOMAIN_NC syscall.Errno = 8553 + ERROR_DS_INVALID_NAME_FOR_SPN syscall.Errno = 8554 + ERROR_DS_FILTER_USES_CONTRUCTED_ATTRS syscall.Errno = 8555 + ERROR_DS_UNICODEPWD_NOT_IN_QUOTES syscall.Errno = 8556 + ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED syscall.Errno = 8557 + ERROR_DS_MUST_BE_RUN_ON_DST_DC syscall.Errno = 8558 + ERROR_DS_SRC_DC_MUST_BE_SP4_OR_GREATER syscall.Errno = 8559 + ERROR_DS_CANT_TREE_DELETE_CRITICAL_OBJ syscall.Errno = 8560 + ERROR_DS_INIT_FAILURE_CONSOLE syscall.Errno = 8561 + ERROR_DS_SAM_INIT_FAILURE_CONSOLE syscall.Errno = 8562 + ERROR_DS_FOREST_VERSION_TOO_HIGH syscall.Errno = 8563 + ERROR_DS_DOMAIN_VERSION_TOO_HIGH syscall.Errno = 8564 + ERROR_DS_FOREST_VERSION_TOO_LOW syscall.Errno = 8565 + ERROR_DS_DOMAIN_VERSION_TOO_LOW syscall.Errno = 8566 + ERROR_DS_INCOMPATIBLE_VERSION syscall.Errno = 8567 + ERROR_DS_LOW_DSA_VERSION syscall.Errno = 8568 + ERROR_DS_NO_BEHAVIOR_VERSION_IN_MIXEDDOMAIN syscall.Errno = 8569 + ERROR_DS_NOT_SUPPORTED_SORT_ORDER syscall.Errno = 8570 + ERROR_DS_NAME_NOT_UNIQUE syscall.Errno = 8571 + ERROR_DS_MACHINE_ACCOUNT_CREATED_PRENT4 syscall.Errno = 8572 + ERROR_DS_OUT_OF_VERSION_STORE syscall.Errno = 8573 + ERROR_DS_INCOMPATIBLE_CONTROLS_USED syscall.Errno = 8574 + ERROR_DS_NO_REF_DOMAIN syscall.Errno = 8575 + ERROR_DS_RESERVED_LINK_ID syscall.Errno = 8576 + ERROR_DS_LINK_ID_NOT_AVAILABLE syscall.Errno = 8577 + ERROR_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8578 + ERROR_DS_MODIFYDN_DISALLOWED_BY_INSTANCE_TYPE syscall.Errno = 8579 + ERROR_DS_NO_OBJECT_MOVE_IN_SCHEMA_NC syscall.Errno = 8580 + ERROR_DS_MODIFYDN_DISALLOWED_BY_FLAG syscall.Errno = 8581 + ERROR_DS_MODIFYDN_WRONG_GRANDPARENT syscall.Errno = 8582 + ERROR_DS_NAME_ERROR_TRUST_REFERRAL syscall.Errno = 8583 + ERROR_NOT_SUPPORTED_ON_STANDARD_SERVER syscall.Errno = 8584 + ERROR_DS_CANT_ACCESS_REMOTE_PART_OF_AD syscall.Errno = 8585 + ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE_V2 syscall.Errno = 8586 + ERROR_DS_THREAD_LIMIT_EXCEEDED syscall.Errno = 8587 + ERROR_DS_NOT_CLOSEST syscall.Errno = 8588 + ERROR_DS_CANT_DERIVE_SPN_WITHOUT_SERVER_REF syscall.Errno = 8589 + ERROR_DS_SINGLE_USER_MODE_FAILED syscall.Errno = 8590 + ERROR_DS_NTDSCRIPT_SYNTAX_ERROR syscall.Errno = 8591 + ERROR_DS_NTDSCRIPT_PROCESS_ERROR syscall.Errno = 8592 + ERROR_DS_DIFFERENT_REPL_EPOCHS syscall.Errno = 8593 + ERROR_DS_DRS_EXTENSIONS_CHANGED syscall.Errno = 8594 + ERROR_DS_REPLICA_SET_CHANGE_NOT_ALLOWED_ON_DISABLED_CR syscall.Errno = 8595 + ERROR_DS_NO_MSDS_INTID syscall.Errno = 8596 + ERROR_DS_DUP_MSDS_INTID syscall.Errno = 8597 + ERROR_DS_EXISTS_IN_RDNATTID syscall.Errno = 8598 + ERROR_DS_AUTHORIZATION_FAILED syscall.Errno = 8599 + ERROR_DS_INVALID_SCRIPT syscall.Errno = 8600 + ERROR_DS_REMOTE_CROSSREF_OP_FAILED syscall.Errno = 8601 + ERROR_DS_CROSS_REF_BUSY syscall.Errno = 8602 + ERROR_DS_CANT_DERIVE_SPN_FOR_DELETED_DOMAIN syscall.Errno = 8603 + ERROR_DS_CANT_DEMOTE_WITH_WRITEABLE_NC syscall.Errno = 8604 + ERROR_DS_DUPLICATE_ID_FOUND syscall.Errno = 8605 + ERROR_DS_INSUFFICIENT_ATTR_TO_CREATE_OBJECT syscall.Errno = 8606 + ERROR_DS_GROUP_CONVERSION_ERROR syscall.Errno = 8607 + ERROR_DS_CANT_MOVE_APP_BASIC_GROUP syscall.Errno = 8608 + ERROR_DS_CANT_MOVE_APP_QUERY_GROUP syscall.Errno = 8609 + ERROR_DS_ROLE_NOT_VERIFIED syscall.Errno = 8610 + ERROR_DS_WKO_CONTAINER_CANNOT_BE_SPECIAL syscall.Errno = 8611 + ERROR_DS_DOMAIN_RENAME_IN_PROGRESS syscall.Errno = 8612 + ERROR_DS_EXISTING_AD_CHILD_NC syscall.Errno = 8613 + ERROR_DS_REPL_LIFETIME_EXCEEDED syscall.Errno = 8614 + ERROR_DS_DISALLOWED_IN_SYSTEM_CONTAINER syscall.Errno = 8615 + ERROR_DS_LDAP_SEND_QUEUE_FULL syscall.Errno = 8616 + ERROR_DS_DRA_OUT_SCHEDULE_WINDOW syscall.Errno = 8617 + ERROR_DS_POLICY_NOT_KNOWN syscall.Errno = 8618 + ERROR_NO_SITE_SETTINGS_OBJECT syscall.Errno = 8619 + ERROR_NO_SECRETS syscall.Errno = 8620 + ERROR_NO_WRITABLE_DC_FOUND syscall.Errno = 8621 + ERROR_DS_NO_SERVER_OBJECT syscall.Errno = 8622 + ERROR_DS_NO_NTDSA_OBJECT syscall.Errno = 8623 + ERROR_DS_NON_ASQ_SEARCH syscall.Errno = 8624 + ERROR_DS_AUDIT_FAILURE syscall.Errno = 8625 + ERROR_DS_INVALID_SEARCH_FLAG_SUBTREE syscall.Errno = 8626 + ERROR_DS_INVALID_SEARCH_FLAG_TUPLE syscall.Errno = 8627 + ERROR_DS_HIERARCHY_TABLE_TOO_DEEP syscall.Errno = 8628 + ERROR_DS_DRA_CORRUPT_UTD_VECTOR syscall.Errno = 8629 + ERROR_DS_DRA_SECRETS_DENIED syscall.Errno = 8630 + ERROR_DS_RESERVED_MAPI_ID syscall.Errno = 8631 + ERROR_DS_MAPI_ID_NOT_AVAILABLE syscall.Errno = 8632 + ERROR_DS_DRA_MISSING_KRBTGT_SECRET syscall.Errno = 8633 + ERROR_DS_DOMAIN_NAME_EXISTS_IN_FOREST syscall.Errno = 8634 + ERROR_DS_FLAT_NAME_EXISTS_IN_FOREST syscall.Errno = 8635 + ERROR_INVALID_USER_PRINCIPAL_NAME syscall.Errno = 8636 + ERROR_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS syscall.Errno = 8637 + ERROR_DS_OID_NOT_FOUND syscall.Errno = 8638 + ERROR_DS_DRA_RECYCLED_TARGET syscall.Errno = 8639 + ERROR_DS_DISALLOWED_NC_REDIRECT syscall.Errno = 8640 + ERROR_DS_HIGH_ADLDS_FFL syscall.Errno = 8641 + ERROR_DS_HIGH_DSA_VERSION syscall.Errno = 8642 + ERROR_DS_LOW_ADLDS_FFL syscall.Errno = 8643 + ERROR_DOMAIN_SID_SAME_AS_LOCAL_WORKSTATION syscall.Errno = 8644 + ERROR_DS_UNDELETE_SAM_VALIDATION_FAILED syscall.Errno = 8645 + ERROR_INCORRECT_ACCOUNT_TYPE syscall.Errno = 8646 + ERROR_DS_SPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8647 + ERROR_DS_UPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8648 + ERROR_DS_MISSING_FOREST_TRUST syscall.Errno = 8649 + ERROR_DS_VALUE_KEY_NOT_UNIQUE syscall.Errno = 8650 + DNS_ERROR_RESPONSE_CODES_BASE syscall.Errno = 9000 + DNS_ERROR_RCODE_NO_ERROR = ERROR_SUCCESS + DNS_ERROR_MASK syscall.Errno = 0x00002328 + DNS_ERROR_RCODE_FORMAT_ERROR syscall.Errno = 9001 + DNS_ERROR_RCODE_SERVER_FAILURE syscall.Errno = 9002 + DNS_ERROR_RCODE_NAME_ERROR syscall.Errno = 9003 + DNS_ERROR_RCODE_NOT_IMPLEMENTED syscall.Errno = 9004 + DNS_ERROR_RCODE_REFUSED syscall.Errno = 9005 + DNS_ERROR_RCODE_YXDOMAIN syscall.Errno = 9006 + DNS_ERROR_RCODE_YXRRSET syscall.Errno = 9007 + DNS_ERROR_RCODE_NXRRSET syscall.Errno = 9008 + DNS_ERROR_RCODE_NOTAUTH syscall.Errno = 9009 + DNS_ERROR_RCODE_NOTZONE syscall.Errno = 9010 + DNS_ERROR_RCODE_BADSIG syscall.Errno = 9016 + DNS_ERROR_RCODE_BADKEY syscall.Errno = 9017 + DNS_ERROR_RCODE_BADTIME syscall.Errno = 9018 + DNS_ERROR_RCODE_LAST = DNS_ERROR_RCODE_BADTIME + DNS_ERROR_DNSSEC_BASE syscall.Errno = 9100 + DNS_ERROR_KEYMASTER_REQUIRED syscall.Errno = 9101 + DNS_ERROR_NOT_ALLOWED_ON_SIGNED_ZONE syscall.Errno = 9102 + DNS_ERROR_NSEC3_INCOMPATIBLE_WITH_RSA_SHA1 syscall.Errno = 9103 + DNS_ERROR_NOT_ENOUGH_SIGNING_KEY_DESCRIPTORS syscall.Errno = 9104 + DNS_ERROR_UNSUPPORTED_ALGORITHM syscall.Errno = 9105 + DNS_ERROR_INVALID_KEY_SIZE syscall.Errno = 9106 + DNS_ERROR_SIGNING_KEY_NOT_ACCESSIBLE syscall.Errno = 9107 + DNS_ERROR_KSP_DOES_NOT_SUPPORT_PROTECTION syscall.Errno = 9108 + DNS_ERROR_UNEXPECTED_DATA_PROTECTION_ERROR syscall.Errno = 9109 + DNS_ERROR_UNEXPECTED_CNG_ERROR syscall.Errno = 9110 + DNS_ERROR_UNKNOWN_SIGNING_PARAMETER_VERSION syscall.Errno = 9111 + DNS_ERROR_KSP_NOT_ACCESSIBLE syscall.Errno = 9112 + DNS_ERROR_TOO_MANY_SKDS syscall.Errno = 9113 + DNS_ERROR_INVALID_ROLLOVER_PERIOD syscall.Errno = 9114 + DNS_ERROR_INVALID_INITIAL_ROLLOVER_OFFSET syscall.Errno = 9115 + DNS_ERROR_ROLLOVER_IN_PROGRESS syscall.Errno = 9116 + DNS_ERROR_STANDBY_KEY_NOT_PRESENT syscall.Errno = 9117 + DNS_ERROR_NOT_ALLOWED_ON_ZSK syscall.Errno = 9118 + DNS_ERROR_NOT_ALLOWED_ON_ACTIVE_SKD syscall.Errno = 9119 + DNS_ERROR_ROLLOVER_ALREADY_QUEUED syscall.Errno = 9120 + DNS_ERROR_NOT_ALLOWED_ON_UNSIGNED_ZONE syscall.Errno = 9121 + DNS_ERROR_BAD_KEYMASTER syscall.Errno = 9122 + DNS_ERROR_INVALID_SIGNATURE_VALIDITY_PERIOD syscall.Errno = 9123 + DNS_ERROR_INVALID_NSEC3_ITERATION_COUNT syscall.Errno = 9124 + DNS_ERROR_DNSSEC_IS_DISABLED syscall.Errno = 9125 + DNS_ERROR_INVALID_XML syscall.Errno = 9126 + DNS_ERROR_NO_VALID_TRUST_ANCHORS syscall.Errno = 9127 + DNS_ERROR_ROLLOVER_NOT_POKEABLE syscall.Errno = 9128 + DNS_ERROR_NSEC3_NAME_COLLISION syscall.Errno = 9129 + DNS_ERROR_NSEC_INCOMPATIBLE_WITH_NSEC3_RSA_SHA1 syscall.Errno = 9130 + DNS_ERROR_PACKET_FMT_BASE syscall.Errno = 9500 + DNS_INFO_NO_RECORDS syscall.Errno = 9501 + DNS_ERROR_BAD_PACKET syscall.Errno = 9502 + DNS_ERROR_NO_PACKET syscall.Errno = 9503 + DNS_ERROR_RCODE syscall.Errno = 9504 + DNS_ERROR_UNSECURE_PACKET syscall.Errno = 9505 + DNS_STATUS_PACKET_UNSECURE = DNS_ERROR_UNSECURE_PACKET + DNS_REQUEST_PENDING syscall.Errno = 9506 + DNS_ERROR_NO_MEMORY = ERROR_OUTOFMEMORY + DNS_ERROR_INVALID_NAME = ERROR_INVALID_NAME + DNS_ERROR_INVALID_DATA = ERROR_INVALID_DATA + DNS_ERROR_GENERAL_API_BASE syscall.Errno = 9550 + DNS_ERROR_INVALID_TYPE syscall.Errno = 9551 + DNS_ERROR_INVALID_IP_ADDRESS syscall.Errno = 9552 + DNS_ERROR_INVALID_PROPERTY syscall.Errno = 9553 + DNS_ERROR_TRY_AGAIN_LATER syscall.Errno = 9554 + DNS_ERROR_NOT_UNIQUE syscall.Errno = 9555 + DNS_ERROR_NON_RFC_NAME syscall.Errno = 9556 + DNS_STATUS_FQDN syscall.Errno = 9557 + DNS_STATUS_DOTTED_NAME syscall.Errno = 9558 + DNS_STATUS_SINGLE_PART_NAME syscall.Errno = 9559 + DNS_ERROR_INVALID_NAME_CHAR syscall.Errno = 9560 + DNS_ERROR_NUMERIC_NAME syscall.Errno = 9561 + DNS_ERROR_NOT_ALLOWED_ON_ROOT_SERVER syscall.Errno = 9562 + DNS_ERROR_NOT_ALLOWED_UNDER_DELEGATION syscall.Errno = 9563 + DNS_ERROR_CANNOT_FIND_ROOT_HINTS syscall.Errno = 9564 + DNS_ERROR_INCONSISTENT_ROOT_HINTS syscall.Errno = 9565 + DNS_ERROR_DWORD_VALUE_TOO_SMALL syscall.Errno = 9566 + DNS_ERROR_DWORD_VALUE_TOO_LARGE syscall.Errno = 9567 + DNS_ERROR_BACKGROUND_LOADING syscall.Errno = 9568 + DNS_ERROR_NOT_ALLOWED_ON_RODC syscall.Errno = 9569 + DNS_ERROR_NOT_ALLOWED_UNDER_DNAME syscall.Errno = 9570 + DNS_ERROR_DELEGATION_REQUIRED syscall.Errno = 9571 + DNS_ERROR_INVALID_POLICY_TABLE syscall.Errno = 9572 + DNS_ERROR_ADDRESS_REQUIRED syscall.Errno = 9573 + DNS_ERROR_ZONE_BASE syscall.Errno = 9600 + DNS_ERROR_ZONE_DOES_NOT_EXIST syscall.Errno = 9601 + DNS_ERROR_NO_ZONE_INFO syscall.Errno = 9602 + DNS_ERROR_INVALID_ZONE_OPERATION syscall.Errno = 9603 + DNS_ERROR_ZONE_CONFIGURATION_ERROR syscall.Errno = 9604 + DNS_ERROR_ZONE_HAS_NO_SOA_RECORD syscall.Errno = 9605 + DNS_ERROR_ZONE_HAS_NO_NS_RECORDS syscall.Errno = 9606 + DNS_ERROR_ZONE_LOCKED syscall.Errno = 9607 + DNS_ERROR_ZONE_CREATION_FAILED syscall.Errno = 9608 + DNS_ERROR_ZONE_ALREADY_EXISTS syscall.Errno = 9609 + DNS_ERROR_AUTOZONE_ALREADY_EXISTS syscall.Errno = 9610 + DNS_ERROR_INVALID_ZONE_TYPE syscall.Errno = 9611 + DNS_ERROR_SECONDARY_REQUIRES_MASTER_IP syscall.Errno = 9612 + DNS_ERROR_ZONE_NOT_SECONDARY syscall.Errno = 9613 + DNS_ERROR_NEED_SECONDARY_ADDRESSES syscall.Errno = 9614 + DNS_ERROR_WINS_INIT_FAILED syscall.Errno = 9615 + DNS_ERROR_NEED_WINS_SERVERS syscall.Errno = 9616 + DNS_ERROR_NBSTAT_INIT_FAILED syscall.Errno = 9617 + DNS_ERROR_SOA_DELETE_INVALID syscall.Errno = 9618 + DNS_ERROR_FORWARDER_ALREADY_EXISTS syscall.Errno = 9619 + DNS_ERROR_ZONE_REQUIRES_MASTER_IP syscall.Errno = 9620 + DNS_ERROR_ZONE_IS_SHUTDOWN syscall.Errno = 9621 + DNS_ERROR_ZONE_LOCKED_FOR_SIGNING syscall.Errno = 9622 + DNS_ERROR_DATAFILE_BASE syscall.Errno = 9650 + DNS_ERROR_PRIMARY_REQUIRES_DATAFILE syscall.Errno = 9651 + DNS_ERROR_INVALID_DATAFILE_NAME syscall.Errno = 9652 + DNS_ERROR_DATAFILE_OPEN_FAILURE syscall.Errno = 9653 + DNS_ERROR_FILE_WRITEBACK_FAILED syscall.Errno = 9654 + DNS_ERROR_DATAFILE_PARSING syscall.Errno = 9655 + DNS_ERROR_DATABASE_BASE syscall.Errno = 9700 + DNS_ERROR_RECORD_DOES_NOT_EXIST syscall.Errno = 9701 + DNS_ERROR_RECORD_FORMAT syscall.Errno = 9702 + DNS_ERROR_NODE_CREATION_FAILED syscall.Errno = 9703 + DNS_ERROR_UNKNOWN_RECORD_TYPE syscall.Errno = 9704 + DNS_ERROR_RECORD_TIMED_OUT syscall.Errno = 9705 + DNS_ERROR_NAME_NOT_IN_ZONE syscall.Errno = 9706 + DNS_ERROR_CNAME_LOOP syscall.Errno = 9707 + DNS_ERROR_NODE_IS_CNAME syscall.Errno = 9708 + DNS_ERROR_CNAME_COLLISION syscall.Errno = 9709 + DNS_ERROR_RECORD_ONLY_AT_ZONE_ROOT syscall.Errno = 9710 + DNS_ERROR_RECORD_ALREADY_EXISTS syscall.Errno = 9711 + DNS_ERROR_SECONDARY_DATA syscall.Errno = 9712 + DNS_ERROR_NO_CREATE_CACHE_DATA syscall.Errno = 9713 + DNS_ERROR_NAME_DOES_NOT_EXIST syscall.Errno = 9714 + DNS_WARNING_PTR_CREATE_FAILED syscall.Errno = 9715 + DNS_WARNING_DOMAIN_UNDELETED syscall.Errno = 9716 + DNS_ERROR_DS_UNAVAILABLE syscall.Errno = 9717 + DNS_ERROR_DS_ZONE_ALREADY_EXISTS syscall.Errno = 9718 + DNS_ERROR_NO_BOOTFILE_IF_DS_ZONE syscall.Errno = 9719 + DNS_ERROR_NODE_IS_DNAME syscall.Errno = 9720 + DNS_ERROR_DNAME_COLLISION syscall.Errno = 9721 + DNS_ERROR_ALIAS_LOOP syscall.Errno = 9722 + DNS_ERROR_OPERATION_BASE syscall.Errno = 9750 + DNS_INFO_AXFR_COMPLETE syscall.Errno = 9751 + DNS_ERROR_AXFR syscall.Errno = 9752 + DNS_INFO_ADDED_LOCAL_WINS syscall.Errno = 9753 + DNS_ERROR_SECURE_BASE syscall.Errno = 9800 + DNS_STATUS_CONTINUE_NEEDED syscall.Errno = 9801 + DNS_ERROR_SETUP_BASE syscall.Errno = 9850 + DNS_ERROR_NO_TCPIP syscall.Errno = 9851 + DNS_ERROR_NO_DNS_SERVERS syscall.Errno = 9852 + DNS_ERROR_DP_BASE syscall.Errno = 9900 + DNS_ERROR_DP_DOES_NOT_EXIST syscall.Errno = 9901 + DNS_ERROR_DP_ALREADY_EXISTS syscall.Errno = 9902 + DNS_ERROR_DP_NOT_ENLISTED syscall.Errno = 9903 + DNS_ERROR_DP_ALREADY_ENLISTED syscall.Errno = 9904 + DNS_ERROR_DP_NOT_AVAILABLE syscall.Errno = 9905 + DNS_ERROR_DP_FSMO_ERROR syscall.Errno = 9906 + DNS_ERROR_RRL_NOT_ENABLED syscall.Errno = 9911 + DNS_ERROR_RRL_INVALID_WINDOW_SIZE syscall.Errno = 9912 + DNS_ERROR_RRL_INVALID_IPV4_PREFIX syscall.Errno = 9913 + DNS_ERROR_RRL_INVALID_IPV6_PREFIX syscall.Errno = 9914 + DNS_ERROR_RRL_INVALID_TC_RATE syscall.Errno = 9915 + DNS_ERROR_RRL_INVALID_LEAK_RATE syscall.Errno = 9916 + DNS_ERROR_RRL_LEAK_RATE_LESSTHAN_TC_RATE syscall.Errno = 9917 + DNS_ERROR_VIRTUALIZATION_INSTANCE_ALREADY_EXISTS syscall.Errno = 9921 + DNS_ERROR_VIRTUALIZATION_INSTANCE_DOES_NOT_EXIST syscall.Errno = 9922 + DNS_ERROR_VIRTUALIZATION_TREE_LOCKED syscall.Errno = 9923 + DNS_ERROR_INVAILD_VIRTUALIZATION_INSTANCE_NAME syscall.Errno = 9924 + DNS_ERROR_DEFAULT_VIRTUALIZATION_INSTANCE syscall.Errno = 9925 + DNS_ERROR_ZONESCOPE_ALREADY_EXISTS syscall.Errno = 9951 + DNS_ERROR_ZONESCOPE_DOES_NOT_EXIST syscall.Errno = 9952 + DNS_ERROR_DEFAULT_ZONESCOPE syscall.Errno = 9953 + DNS_ERROR_INVALID_ZONESCOPE_NAME syscall.Errno = 9954 + DNS_ERROR_NOT_ALLOWED_WITH_ZONESCOPES syscall.Errno = 9955 + DNS_ERROR_LOAD_ZONESCOPE_FAILED syscall.Errno = 9956 + DNS_ERROR_ZONESCOPE_FILE_WRITEBACK_FAILED syscall.Errno = 9957 + DNS_ERROR_INVALID_SCOPE_NAME syscall.Errno = 9958 + DNS_ERROR_SCOPE_DOES_NOT_EXIST syscall.Errno = 9959 + DNS_ERROR_DEFAULT_SCOPE syscall.Errno = 9960 + DNS_ERROR_INVALID_SCOPE_OPERATION syscall.Errno = 9961 + DNS_ERROR_SCOPE_LOCKED syscall.Errno = 9962 + DNS_ERROR_SCOPE_ALREADY_EXISTS syscall.Errno = 9963 + DNS_ERROR_POLICY_ALREADY_EXISTS syscall.Errno = 9971 + DNS_ERROR_POLICY_DOES_NOT_EXIST syscall.Errno = 9972 + DNS_ERROR_POLICY_INVALID_CRITERIA syscall.Errno = 9973 + DNS_ERROR_POLICY_INVALID_SETTINGS syscall.Errno = 9974 + DNS_ERROR_CLIENT_SUBNET_IS_ACCESSED syscall.Errno = 9975 + DNS_ERROR_CLIENT_SUBNET_DOES_NOT_EXIST syscall.Errno = 9976 + DNS_ERROR_CLIENT_SUBNET_ALREADY_EXISTS syscall.Errno = 9977 + DNS_ERROR_SUBNET_DOES_NOT_EXIST syscall.Errno = 9978 + DNS_ERROR_SUBNET_ALREADY_EXISTS syscall.Errno = 9979 + DNS_ERROR_POLICY_LOCKED syscall.Errno = 9980 + DNS_ERROR_POLICY_INVALID_WEIGHT syscall.Errno = 9981 + DNS_ERROR_POLICY_INVALID_NAME syscall.Errno = 9982 + DNS_ERROR_POLICY_MISSING_CRITERIA syscall.Errno = 9983 + DNS_ERROR_INVALID_CLIENT_SUBNET_NAME syscall.Errno = 9984 + DNS_ERROR_POLICY_PROCESSING_ORDER_INVALID syscall.Errno = 9985 + DNS_ERROR_POLICY_SCOPE_MISSING syscall.Errno = 9986 + DNS_ERROR_POLICY_SCOPE_NOT_ALLOWED syscall.Errno = 9987 + DNS_ERROR_SERVERSCOPE_IS_REFERENCED syscall.Errno = 9988 + DNS_ERROR_ZONESCOPE_IS_REFERENCED syscall.Errno = 9989 + DNS_ERROR_POLICY_INVALID_CRITERIA_CLIENT_SUBNET syscall.Errno = 9990 + DNS_ERROR_POLICY_INVALID_CRITERIA_TRANSPORT_PROTOCOL syscall.Errno = 9991 + DNS_ERROR_POLICY_INVALID_CRITERIA_NETWORK_PROTOCOL syscall.Errno = 9992 + DNS_ERROR_POLICY_INVALID_CRITERIA_INTERFACE syscall.Errno = 9993 + DNS_ERROR_POLICY_INVALID_CRITERIA_FQDN syscall.Errno = 9994 + DNS_ERROR_POLICY_INVALID_CRITERIA_QUERY_TYPE syscall.Errno = 9995 + DNS_ERROR_POLICY_INVALID_CRITERIA_TIME_OF_DAY syscall.Errno = 9996 + WSABASEERR syscall.Errno = 10000 + WSAEINTR syscall.Errno = 10004 + WSAEBADF syscall.Errno = 10009 + WSAEACCES syscall.Errno = 10013 + WSAEFAULT syscall.Errno = 10014 + WSAEINVAL syscall.Errno = 10022 + WSAEMFILE syscall.Errno = 10024 + WSAEWOULDBLOCK syscall.Errno = 10035 + WSAEINPROGRESS syscall.Errno = 10036 + WSAEALREADY syscall.Errno = 10037 + WSAENOTSOCK syscall.Errno = 10038 + WSAEDESTADDRREQ syscall.Errno = 10039 + WSAEMSGSIZE syscall.Errno = 10040 + WSAEPROTOTYPE syscall.Errno = 10041 + WSAENOPROTOOPT syscall.Errno = 10042 + WSAEPROTONOSUPPORT syscall.Errno = 10043 + WSAESOCKTNOSUPPORT syscall.Errno = 10044 + WSAEOPNOTSUPP syscall.Errno = 10045 + WSAEPFNOSUPPORT syscall.Errno = 10046 + WSAEAFNOSUPPORT syscall.Errno = 10047 + WSAEADDRINUSE syscall.Errno = 10048 + WSAEADDRNOTAVAIL syscall.Errno = 10049 + WSAENETDOWN syscall.Errno = 10050 + WSAENETUNREACH syscall.Errno = 10051 + WSAENETRESET syscall.Errno = 10052 + WSAECONNABORTED syscall.Errno = 10053 + WSAECONNRESET syscall.Errno = 10054 + WSAENOBUFS syscall.Errno = 10055 + WSAEISCONN syscall.Errno = 10056 + WSAENOTCONN syscall.Errno = 10057 + WSAESHUTDOWN syscall.Errno = 10058 + WSAETOOMANYREFS syscall.Errno = 10059 + WSAETIMEDOUT syscall.Errno = 10060 + WSAECONNREFUSED syscall.Errno = 10061 + WSAELOOP syscall.Errno = 10062 + WSAENAMETOOLONG syscall.Errno = 10063 + WSAEHOSTDOWN syscall.Errno = 10064 + WSAEHOSTUNREACH syscall.Errno = 10065 + WSAENOTEMPTY syscall.Errno = 10066 + WSAEPROCLIM syscall.Errno = 10067 + WSAEUSERS syscall.Errno = 10068 + WSAEDQUOT syscall.Errno = 10069 + WSAESTALE syscall.Errno = 10070 + WSAEREMOTE syscall.Errno = 10071 + WSASYSNOTREADY syscall.Errno = 10091 + WSAVERNOTSUPPORTED syscall.Errno = 10092 + WSANOTINITIALISED syscall.Errno = 10093 + WSAEDISCON syscall.Errno = 10101 + WSAENOMORE syscall.Errno = 10102 + WSAECANCELLED syscall.Errno = 10103 + WSAEINVALIDPROCTABLE syscall.Errno = 10104 + WSAEINVALIDPROVIDER syscall.Errno = 10105 + WSAEPROVIDERFAILEDINIT syscall.Errno = 10106 + WSASYSCALLFAILURE syscall.Errno = 10107 + WSASERVICE_NOT_FOUND syscall.Errno = 10108 + WSATYPE_NOT_FOUND syscall.Errno = 10109 + WSA_E_NO_MORE syscall.Errno = 10110 + WSA_E_CANCELLED syscall.Errno = 10111 + WSAEREFUSED syscall.Errno = 10112 + WSAHOST_NOT_FOUND syscall.Errno = 11001 + WSATRY_AGAIN syscall.Errno = 11002 + WSANO_RECOVERY syscall.Errno = 11003 + WSANO_DATA syscall.Errno = 11004 + WSA_QOS_RECEIVERS syscall.Errno = 11005 + WSA_QOS_SENDERS syscall.Errno = 11006 + WSA_QOS_NO_SENDERS syscall.Errno = 11007 + WSA_QOS_NO_RECEIVERS syscall.Errno = 11008 + WSA_QOS_REQUEST_CONFIRMED syscall.Errno = 11009 + WSA_QOS_ADMISSION_FAILURE syscall.Errno = 11010 + WSA_QOS_POLICY_FAILURE syscall.Errno = 11011 + WSA_QOS_BAD_STYLE syscall.Errno = 11012 + WSA_QOS_BAD_OBJECT syscall.Errno = 11013 + WSA_QOS_TRAFFIC_CTRL_ERROR syscall.Errno = 11014 + WSA_QOS_GENERIC_ERROR syscall.Errno = 11015 + WSA_QOS_ESERVICETYPE syscall.Errno = 11016 + WSA_QOS_EFLOWSPEC syscall.Errno = 11017 + WSA_QOS_EPROVSPECBUF syscall.Errno = 11018 + WSA_QOS_EFILTERSTYLE syscall.Errno = 11019 + WSA_QOS_EFILTERTYPE syscall.Errno = 11020 + WSA_QOS_EFILTERCOUNT syscall.Errno = 11021 + WSA_QOS_EOBJLENGTH syscall.Errno = 11022 + WSA_QOS_EFLOWCOUNT syscall.Errno = 11023 + WSA_QOS_EUNKOWNPSOBJ syscall.Errno = 11024 + WSA_QOS_EPOLICYOBJ syscall.Errno = 11025 + WSA_QOS_EFLOWDESC syscall.Errno = 11026 + WSA_QOS_EPSFLOWSPEC syscall.Errno = 11027 + WSA_QOS_EPSFILTERSPEC syscall.Errno = 11028 + WSA_QOS_ESDMODEOBJ syscall.Errno = 11029 + WSA_QOS_ESHAPERATEOBJ syscall.Errno = 11030 + WSA_QOS_RESERVED_PETYPE syscall.Errno = 11031 + WSA_SECURE_HOST_NOT_FOUND syscall.Errno = 11032 + WSA_IPSEC_NAME_POLICY_ERROR syscall.Errno = 11033 + ERROR_IPSEC_QM_POLICY_EXISTS syscall.Errno = 13000 + ERROR_IPSEC_QM_POLICY_NOT_FOUND syscall.Errno = 13001 + ERROR_IPSEC_QM_POLICY_IN_USE syscall.Errno = 13002 + ERROR_IPSEC_MM_POLICY_EXISTS syscall.Errno = 13003 + ERROR_IPSEC_MM_POLICY_NOT_FOUND syscall.Errno = 13004 + ERROR_IPSEC_MM_POLICY_IN_USE syscall.Errno = 13005 + ERROR_IPSEC_MM_FILTER_EXISTS syscall.Errno = 13006 + ERROR_IPSEC_MM_FILTER_NOT_FOUND syscall.Errno = 13007 + ERROR_IPSEC_TRANSPORT_FILTER_EXISTS syscall.Errno = 13008 + ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND syscall.Errno = 13009 + ERROR_IPSEC_MM_AUTH_EXISTS syscall.Errno = 13010 + ERROR_IPSEC_MM_AUTH_NOT_FOUND syscall.Errno = 13011 + ERROR_IPSEC_MM_AUTH_IN_USE syscall.Errno = 13012 + ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND syscall.Errno = 13013 + ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND syscall.Errno = 13014 + ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND syscall.Errno = 13015 + ERROR_IPSEC_TUNNEL_FILTER_EXISTS syscall.Errno = 13016 + ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND syscall.Errno = 13017 + ERROR_IPSEC_MM_FILTER_PENDING_DELETION syscall.Errno = 13018 + ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION syscall.Errno = 13019 + ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION syscall.Errno = 13020 + ERROR_IPSEC_MM_POLICY_PENDING_DELETION syscall.Errno = 13021 + ERROR_IPSEC_MM_AUTH_PENDING_DELETION syscall.Errno = 13022 + ERROR_IPSEC_QM_POLICY_PENDING_DELETION syscall.Errno = 13023 + WARNING_IPSEC_MM_POLICY_PRUNED syscall.Errno = 13024 + WARNING_IPSEC_QM_POLICY_PRUNED syscall.Errno = 13025 + ERROR_IPSEC_IKE_NEG_STATUS_BEGIN syscall.Errno = 13800 + ERROR_IPSEC_IKE_AUTH_FAIL syscall.Errno = 13801 + ERROR_IPSEC_IKE_ATTRIB_FAIL syscall.Errno = 13802 + ERROR_IPSEC_IKE_NEGOTIATION_PENDING syscall.Errno = 13803 + ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR syscall.Errno = 13804 + ERROR_IPSEC_IKE_TIMED_OUT syscall.Errno = 13805 + ERROR_IPSEC_IKE_NO_CERT syscall.Errno = 13806 + ERROR_IPSEC_IKE_SA_DELETED syscall.Errno = 13807 + ERROR_IPSEC_IKE_SA_REAPED syscall.Errno = 13808 + ERROR_IPSEC_IKE_MM_ACQUIRE_DROP syscall.Errno = 13809 + ERROR_IPSEC_IKE_QM_ACQUIRE_DROP syscall.Errno = 13810 + ERROR_IPSEC_IKE_QUEUE_DROP_MM syscall.Errno = 13811 + ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM syscall.Errno = 13812 + ERROR_IPSEC_IKE_DROP_NO_RESPONSE syscall.Errno = 13813 + ERROR_IPSEC_IKE_MM_DELAY_DROP syscall.Errno = 13814 + ERROR_IPSEC_IKE_QM_DELAY_DROP syscall.Errno = 13815 + ERROR_IPSEC_IKE_ERROR syscall.Errno = 13816 + ERROR_IPSEC_IKE_CRL_FAILED syscall.Errno = 13817 + ERROR_IPSEC_IKE_INVALID_KEY_USAGE syscall.Errno = 13818 + ERROR_IPSEC_IKE_INVALID_CERT_TYPE syscall.Errno = 13819 + ERROR_IPSEC_IKE_NO_PRIVATE_KEY syscall.Errno = 13820 + ERROR_IPSEC_IKE_SIMULTANEOUS_REKEY syscall.Errno = 13821 + ERROR_IPSEC_IKE_DH_FAIL syscall.Errno = 13822 + ERROR_IPSEC_IKE_CRITICAL_PAYLOAD_NOT_RECOGNIZED syscall.Errno = 13823 + ERROR_IPSEC_IKE_INVALID_HEADER syscall.Errno = 13824 + ERROR_IPSEC_IKE_NO_POLICY syscall.Errno = 13825 + ERROR_IPSEC_IKE_INVALID_SIGNATURE syscall.Errno = 13826 + ERROR_IPSEC_IKE_KERBEROS_ERROR syscall.Errno = 13827 + ERROR_IPSEC_IKE_NO_PUBLIC_KEY syscall.Errno = 13828 + ERROR_IPSEC_IKE_PROCESS_ERR syscall.Errno = 13829 + ERROR_IPSEC_IKE_PROCESS_ERR_SA syscall.Errno = 13830 + ERROR_IPSEC_IKE_PROCESS_ERR_PROP syscall.Errno = 13831 + ERROR_IPSEC_IKE_PROCESS_ERR_TRANS syscall.Errno = 13832 + ERROR_IPSEC_IKE_PROCESS_ERR_KE syscall.Errno = 13833 + ERROR_IPSEC_IKE_PROCESS_ERR_ID syscall.Errno = 13834 + ERROR_IPSEC_IKE_PROCESS_ERR_CERT syscall.Errno = 13835 + ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ syscall.Errno = 13836 + ERROR_IPSEC_IKE_PROCESS_ERR_HASH syscall.Errno = 13837 + ERROR_IPSEC_IKE_PROCESS_ERR_SIG syscall.Errno = 13838 + ERROR_IPSEC_IKE_PROCESS_ERR_NONCE syscall.Errno = 13839 + ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY syscall.Errno = 13840 + ERROR_IPSEC_IKE_PROCESS_ERR_DELETE syscall.Errno = 13841 + ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR syscall.Errno = 13842 + ERROR_IPSEC_IKE_INVALID_PAYLOAD syscall.Errno = 13843 + ERROR_IPSEC_IKE_LOAD_SOFT_SA syscall.Errno = 13844 + ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN syscall.Errno = 13845 + ERROR_IPSEC_IKE_INVALID_COOKIE syscall.Errno = 13846 + ERROR_IPSEC_IKE_NO_PEER_CERT syscall.Errno = 13847 + ERROR_IPSEC_IKE_PEER_CRL_FAILED syscall.Errno = 13848 + ERROR_IPSEC_IKE_POLICY_CHANGE syscall.Errno = 13849 + ERROR_IPSEC_IKE_NO_MM_POLICY syscall.Errno = 13850 + ERROR_IPSEC_IKE_NOTCBPRIV syscall.Errno = 13851 + ERROR_IPSEC_IKE_SECLOADFAIL syscall.Errno = 13852 + ERROR_IPSEC_IKE_FAILSSPINIT syscall.Errno = 13853 + ERROR_IPSEC_IKE_FAILQUERYSSP syscall.Errno = 13854 + ERROR_IPSEC_IKE_SRVACQFAIL syscall.Errno = 13855 + ERROR_IPSEC_IKE_SRVQUERYCRED syscall.Errno = 13856 + ERROR_IPSEC_IKE_GETSPIFAIL syscall.Errno = 13857 + ERROR_IPSEC_IKE_INVALID_FILTER syscall.Errno = 13858 + ERROR_IPSEC_IKE_OUT_OF_MEMORY syscall.Errno = 13859 + ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED syscall.Errno = 13860 + ERROR_IPSEC_IKE_INVALID_POLICY syscall.Errno = 13861 + ERROR_IPSEC_IKE_UNKNOWN_DOI syscall.Errno = 13862 + ERROR_IPSEC_IKE_INVALID_SITUATION syscall.Errno = 13863 + ERROR_IPSEC_IKE_DH_FAILURE syscall.Errno = 13864 + ERROR_IPSEC_IKE_INVALID_GROUP syscall.Errno = 13865 + ERROR_IPSEC_IKE_ENCRYPT syscall.Errno = 13866 + ERROR_IPSEC_IKE_DECRYPT syscall.Errno = 13867 + ERROR_IPSEC_IKE_POLICY_MATCH syscall.Errno = 13868 + ERROR_IPSEC_IKE_UNSUPPORTED_ID syscall.Errno = 13869 + ERROR_IPSEC_IKE_INVALID_HASH syscall.Errno = 13870 + ERROR_IPSEC_IKE_INVALID_HASH_ALG syscall.Errno = 13871 + ERROR_IPSEC_IKE_INVALID_HASH_SIZE syscall.Errno = 13872 + ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG syscall.Errno = 13873 + ERROR_IPSEC_IKE_INVALID_AUTH_ALG syscall.Errno = 13874 + ERROR_IPSEC_IKE_INVALID_SIG syscall.Errno = 13875 + ERROR_IPSEC_IKE_LOAD_FAILED syscall.Errno = 13876 + ERROR_IPSEC_IKE_RPC_DELETE syscall.Errno = 13877 + ERROR_IPSEC_IKE_BENIGN_REINIT syscall.Errno = 13878 + ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY syscall.Errno = 13879 + ERROR_IPSEC_IKE_INVALID_MAJOR_VERSION syscall.Errno = 13880 + ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN syscall.Errno = 13881 + ERROR_IPSEC_IKE_MM_LIMIT syscall.Errno = 13882 + ERROR_IPSEC_IKE_NEGOTIATION_DISABLED syscall.Errno = 13883 + ERROR_IPSEC_IKE_QM_LIMIT syscall.Errno = 13884 + ERROR_IPSEC_IKE_MM_EXPIRED syscall.Errno = 13885 + ERROR_IPSEC_IKE_PEER_MM_ASSUMED_INVALID syscall.Errno = 13886 + ERROR_IPSEC_IKE_CERT_CHAIN_POLICY_MISMATCH syscall.Errno = 13887 + ERROR_IPSEC_IKE_UNEXPECTED_MESSAGE_ID syscall.Errno = 13888 + ERROR_IPSEC_IKE_INVALID_AUTH_PAYLOAD syscall.Errno = 13889 + ERROR_IPSEC_IKE_DOS_COOKIE_SENT syscall.Errno = 13890 + ERROR_IPSEC_IKE_SHUTTING_DOWN syscall.Errno = 13891 + ERROR_IPSEC_IKE_CGA_AUTH_FAILED syscall.Errno = 13892 + ERROR_IPSEC_IKE_PROCESS_ERR_NATOA syscall.Errno = 13893 + ERROR_IPSEC_IKE_INVALID_MM_FOR_QM syscall.Errno = 13894 + ERROR_IPSEC_IKE_QM_EXPIRED syscall.Errno = 13895 + ERROR_IPSEC_IKE_TOO_MANY_FILTERS syscall.Errno = 13896 + ERROR_IPSEC_IKE_NEG_STATUS_END syscall.Errno = 13897 + ERROR_IPSEC_IKE_KILL_DUMMY_NAP_TUNNEL syscall.Errno = 13898 + ERROR_IPSEC_IKE_INNER_IP_ASSIGNMENT_FAILURE syscall.Errno = 13899 + ERROR_IPSEC_IKE_REQUIRE_CP_PAYLOAD_MISSING syscall.Errno = 13900 + ERROR_IPSEC_KEY_MODULE_IMPERSONATION_NEGOTIATION_PENDING syscall.Errno = 13901 + ERROR_IPSEC_IKE_COEXISTENCE_SUPPRESS syscall.Errno = 13902 + ERROR_IPSEC_IKE_RATELIMIT_DROP syscall.Errno = 13903 + ERROR_IPSEC_IKE_PEER_DOESNT_SUPPORT_MOBIKE syscall.Errno = 13904 + ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE syscall.Errno = 13905 + ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_FAILURE syscall.Errno = 13906 + ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE_WITH_OPTIONAL_RETRY syscall.Errno = 13907 + ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_AND_CERTMAP_FAILURE syscall.Errno = 13908 + ERROR_IPSEC_IKE_NEG_STATUS_EXTENDED_END syscall.Errno = 13909 + ERROR_IPSEC_BAD_SPI syscall.Errno = 13910 + ERROR_IPSEC_SA_LIFETIME_EXPIRED syscall.Errno = 13911 + ERROR_IPSEC_WRONG_SA syscall.Errno = 13912 + ERROR_IPSEC_REPLAY_CHECK_FAILED syscall.Errno = 13913 + ERROR_IPSEC_INVALID_PACKET syscall.Errno = 13914 + ERROR_IPSEC_INTEGRITY_CHECK_FAILED syscall.Errno = 13915 + ERROR_IPSEC_CLEAR_TEXT_DROP syscall.Errno = 13916 + ERROR_IPSEC_AUTH_FIREWALL_DROP syscall.Errno = 13917 + ERROR_IPSEC_THROTTLE_DROP syscall.Errno = 13918 + ERROR_IPSEC_DOSP_BLOCK syscall.Errno = 13925 + ERROR_IPSEC_DOSP_RECEIVED_MULTICAST syscall.Errno = 13926 + ERROR_IPSEC_DOSP_INVALID_PACKET syscall.Errno = 13927 + ERROR_IPSEC_DOSP_STATE_LOOKUP_FAILED syscall.Errno = 13928 + ERROR_IPSEC_DOSP_MAX_ENTRIES syscall.Errno = 13929 + ERROR_IPSEC_DOSP_KEYMOD_NOT_ALLOWED syscall.Errno = 13930 + ERROR_IPSEC_DOSP_NOT_INSTALLED syscall.Errno = 13931 + ERROR_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES syscall.Errno = 13932 + ERROR_SXS_SECTION_NOT_FOUND syscall.Errno = 14000 + ERROR_SXS_CANT_GEN_ACTCTX syscall.Errno = 14001 + ERROR_SXS_INVALID_ACTCTXDATA_FORMAT syscall.Errno = 14002 + ERROR_SXS_ASSEMBLY_NOT_FOUND syscall.Errno = 14003 + ERROR_SXS_MANIFEST_FORMAT_ERROR syscall.Errno = 14004 + ERROR_SXS_MANIFEST_PARSE_ERROR syscall.Errno = 14005 + ERROR_SXS_ACTIVATION_CONTEXT_DISABLED syscall.Errno = 14006 + ERROR_SXS_KEY_NOT_FOUND syscall.Errno = 14007 + ERROR_SXS_VERSION_CONFLICT syscall.Errno = 14008 + ERROR_SXS_WRONG_SECTION_TYPE syscall.Errno = 14009 + ERROR_SXS_THREAD_QUERIES_DISABLED syscall.Errno = 14010 + ERROR_SXS_PROCESS_DEFAULT_ALREADY_SET syscall.Errno = 14011 + ERROR_SXS_UNKNOWN_ENCODING_GROUP syscall.Errno = 14012 + ERROR_SXS_UNKNOWN_ENCODING syscall.Errno = 14013 + ERROR_SXS_INVALID_XML_NAMESPACE_URI syscall.Errno = 14014 + ERROR_SXS_ROOT_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14015 + ERROR_SXS_LEAF_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14016 + ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14017 + ERROR_SXS_MANIFEST_MISSING_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14018 + ERROR_SXS_MANIFEST_INVALID_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14019 + ERROR_SXS_PRIVATE_MANIFEST_CROSS_PATH_WITH_REPARSE_POINT syscall.Errno = 14020 + ERROR_SXS_DUPLICATE_DLL_NAME syscall.Errno = 14021 + ERROR_SXS_DUPLICATE_WINDOWCLASS_NAME syscall.Errno = 14022 + ERROR_SXS_DUPLICATE_CLSID syscall.Errno = 14023 + ERROR_SXS_DUPLICATE_IID syscall.Errno = 14024 + ERROR_SXS_DUPLICATE_TLBID syscall.Errno = 14025 + ERROR_SXS_DUPLICATE_PROGID syscall.Errno = 14026 + ERROR_SXS_DUPLICATE_ASSEMBLY_NAME syscall.Errno = 14027 + ERROR_SXS_FILE_HASH_MISMATCH syscall.Errno = 14028 + ERROR_SXS_POLICY_PARSE_ERROR syscall.Errno = 14029 + ERROR_SXS_XML_E_MISSINGQUOTE syscall.Errno = 14030 + ERROR_SXS_XML_E_COMMENTSYNTAX syscall.Errno = 14031 + ERROR_SXS_XML_E_BADSTARTNAMECHAR syscall.Errno = 14032 + ERROR_SXS_XML_E_BADNAMECHAR syscall.Errno = 14033 + ERROR_SXS_XML_E_BADCHARINSTRING syscall.Errno = 14034 + ERROR_SXS_XML_E_XMLDECLSYNTAX syscall.Errno = 14035 + ERROR_SXS_XML_E_BADCHARDATA syscall.Errno = 14036 + ERROR_SXS_XML_E_MISSINGWHITESPACE syscall.Errno = 14037 + ERROR_SXS_XML_E_EXPECTINGTAGEND syscall.Errno = 14038 + ERROR_SXS_XML_E_MISSINGSEMICOLON syscall.Errno = 14039 + ERROR_SXS_XML_E_UNBALANCEDPAREN syscall.Errno = 14040 + ERROR_SXS_XML_E_INTERNALERROR syscall.Errno = 14041 + ERROR_SXS_XML_E_UNEXPECTED_WHITESPACE syscall.Errno = 14042 + ERROR_SXS_XML_E_INCOMPLETE_ENCODING syscall.Errno = 14043 + ERROR_SXS_XML_E_MISSING_PAREN syscall.Errno = 14044 + ERROR_SXS_XML_E_EXPECTINGCLOSEQUOTE syscall.Errno = 14045 + ERROR_SXS_XML_E_MULTIPLE_COLONS syscall.Errno = 14046 + ERROR_SXS_XML_E_INVALID_DECIMAL syscall.Errno = 14047 + ERROR_SXS_XML_E_INVALID_HEXIDECIMAL syscall.Errno = 14048 + ERROR_SXS_XML_E_INVALID_UNICODE syscall.Errno = 14049 + ERROR_SXS_XML_E_WHITESPACEORQUESTIONMARK syscall.Errno = 14050 + ERROR_SXS_XML_E_UNEXPECTEDENDTAG syscall.Errno = 14051 + ERROR_SXS_XML_E_UNCLOSEDTAG syscall.Errno = 14052 + ERROR_SXS_XML_E_DUPLICATEATTRIBUTE syscall.Errno = 14053 + ERROR_SXS_XML_E_MULTIPLEROOTS syscall.Errno = 14054 + ERROR_SXS_XML_E_INVALIDATROOTLEVEL syscall.Errno = 14055 + ERROR_SXS_XML_E_BADXMLDECL syscall.Errno = 14056 + ERROR_SXS_XML_E_MISSINGROOT syscall.Errno = 14057 + ERROR_SXS_XML_E_UNEXPECTEDEOF syscall.Errno = 14058 + ERROR_SXS_XML_E_BADPEREFINSUBSET syscall.Errno = 14059 + ERROR_SXS_XML_E_UNCLOSEDSTARTTAG syscall.Errno = 14060 + ERROR_SXS_XML_E_UNCLOSEDENDTAG syscall.Errno = 14061 + ERROR_SXS_XML_E_UNCLOSEDSTRING syscall.Errno = 14062 + ERROR_SXS_XML_E_UNCLOSEDCOMMENT syscall.Errno = 14063 + ERROR_SXS_XML_E_UNCLOSEDDECL syscall.Errno = 14064 + ERROR_SXS_XML_E_UNCLOSEDCDATA syscall.Errno = 14065 + ERROR_SXS_XML_E_RESERVEDNAMESPACE syscall.Errno = 14066 + ERROR_SXS_XML_E_INVALIDENCODING syscall.Errno = 14067 + ERROR_SXS_XML_E_INVALIDSWITCH syscall.Errno = 14068 + ERROR_SXS_XML_E_BADXMLCASE syscall.Errno = 14069 + ERROR_SXS_XML_E_INVALID_STANDALONE syscall.Errno = 14070 + ERROR_SXS_XML_E_UNEXPECTED_STANDALONE syscall.Errno = 14071 + ERROR_SXS_XML_E_INVALID_VERSION syscall.Errno = 14072 + ERROR_SXS_XML_E_MISSINGEQUALS syscall.Errno = 14073 + ERROR_SXS_PROTECTION_RECOVERY_FAILED syscall.Errno = 14074 + ERROR_SXS_PROTECTION_PUBLIC_KEY_TOO_SHORT syscall.Errno = 14075 + ERROR_SXS_PROTECTION_CATALOG_NOT_VALID syscall.Errno = 14076 + ERROR_SXS_UNTRANSLATABLE_HRESULT syscall.Errno = 14077 + ERROR_SXS_PROTECTION_CATALOG_FILE_MISSING syscall.Errno = 14078 + ERROR_SXS_MISSING_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14079 + ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14080 + ERROR_SXS_ASSEMBLY_MISSING syscall.Errno = 14081 + ERROR_SXS_CORRUPT_ACTIVATION_STACK syscall.Errno = 14082 + ERROR_SXS_CORRUPTION syscall.Errno = 14083 + ERROR_SXS_EARLY_DEACTIVATION syscall.Errno = 14084 + ERROR_SXS_INVALID_DEACTIVATION syscall.Errno = 14085 + ERROR_SXS_MULTIPLE_DEACTIVATION syscall.Errno = 14086 + ERROR_SXS_PROCESS_TERMINATION_REQUESTED syscall.Errno = 14087 + ERROR_SXS_RELEASE_ACTIVATION_CONTEXT syscall.Errno = 14088 + ERROR_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY syscall.Errno = 14089 + ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE syscall.Errno = 14090 + ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14091 + ERROR_SXS_IDENTITY_DUPLICATE_ATTRIBUTE syscall.Errno = 14092 + ERROR_SXS_IDENTITY_PARSE_ERROR syscall.Errno = 14093 + ERROR_MALFORMED_SUBSTITUTION_STRING syscall.Errno = 14094 + ERROR_SXS_INCORRECT_PUBLIC_KEY_TOKEN syscall.Errno = 14095 + ERROR_UNMAPPED_SUBSTITUTION_STRING syscall.Errno = 14096 + ERROR_SXS_ASSEMBLY_NOT_LOCKED syscall.Errno = 14097 + ERROR_SXS_COMPONENT_STORE_CORRUPT syscall.Errno = 14098 + ERROR_ADVANCED_INSTALLER_FAILED syscall.Errno = 14099 + ERROR_XML_ENCODING_MISMATCH syscall.Errno = 14100 + ERROR_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT syscall.Errno = 14101 + ERROR_SXS_IDENTITIES_DIFFERENT syscall.Errno = 14102 + ERROR_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT syscall.Errno = 14103 + ERROR_SXS_FILE_NOT_PART_OF_ASSEMBLY syscall.Errno = 14104 + ERROR_SXS_MANIFEST_TOO_BIG syscall.Errno = 14105 + ERROR_SXS_SETTING_NOT_REGISTERED syscall.Errno = 14106 + ERROR_SXS_TRANSACTION_CLOSURE_INCOMPLETE syscall.Errno = 14107 + ERROR_SMI_PRIMITIVE_INSTALLER_FAILED syscall.Errno = 14108 + ERROR_GENERIC_COMMAND_FAILED syscall.Errno = 14109 + ERROR_SXS_FILE_HASH_MISSING syscall.Errno = 14110 + ERROR_SXS_DUPLICATE_ACTIVATABLE_CLASS syscall.Errno = 14111 + ERROR_EVT_INVALID_CHANNEL_PATH syscall.Errno = 15000 + ERROR_EVT_INVALID_QUERY syscall.Errno = 15001 + ERROR_EVT_PUBLISHER_METADATA_NOT_FOUND syscall.Errno = 15002 + ERROR_EVT_EVENT_TEMPLATE_NOT_FOUND syscall.Errno = 15003 + ERROR_EVT_INVALID_PUBLISHER_NAME syscall.Errno = 15004 + ERROR_EVT_INVALID_EVENT_DATA syscall.Errno = 15005 + ERROR_EVT_CHANNEL_NOT_FOUND syscall.Errno = 15007 + ERROR_EVT_MALFORMED_XML_TEXT syscall.Errno = 15008 + ERROR_EVT_SUBSCRIPTION_TO_DIRECT_CHANNEL syscall.Errno = 15009 + ERROR_EVT_CONFIGURATION_ERROR syscall.Errno = 15010 + ERROR_EVT_QUERY_RESULT_STALE syscall.Errno = 15011 + ERROR_EVT_QUERY_RESULT_INVALID_POSITION syscall.Errno = 15012 + ERROR_EVT_NON_VALIDATING_MSXML syscall.Errno = 15013 + ERROR_EVT_FILTER_ALREADYSCOPED syscall.Errno = 15014 + ERROR_EVT_FILTER_NOTELTSET syscall.Errno = 15015 + ERROR_EVT_FILTER_INVARG syscall.Errno = 15016 + ERROR_EVT_FILTER_INVTEST syscall.Errno = 15017 + ERROR_EVT_FILTER_INVTYPE syscall.Errno = 15018 + ERROR_EVT_FILTER_PARSEERR syscall.Errno = 15019 + ERROR_EVT_FILTER_UNSUPPORTEDOP syscall.Errno = 15020 + ERROR_EVT_FILTER_UNEXPECTEDTOKEN syscall.Errno = 15021 + ERROR_EVT_INVALID_OPERATION_OVER_ENABLED_DIRECT_CHANNEL syscall.Errno = 15022 + ERROR_EVT_INVALID_CHANNEL_PROPERTY_VALUE syscall.Errno = 15023 + ERROR_EVT_INVALID_PUBLISHER_PROPERTY_VALUE syscall.Errno = 15024 + ERROR_EVT_CHANNEL_CANNOT_ACTIVATE syscall.Errno = 15025 + ERROR_EVT_FILTER_TOO_COMPLEX syscall.Errno = 15026 + ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027 + ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028 + ERROR_EVT_UNRESOLVED_VALUE_INSERT syscall.Errno = 15029 + ERROR_EVT_UNRESOLVED_PARAMETER_INSERT syscall.Errno = 15030 + ERROR_EVT_MAX_INSERTS_REACHED syscall.Errno = 15031 + ERROR_EVT_EVENT_DEFINITION_NOT_FOUND syscall.Errno = 15032 + ERROR_EVT_MESSAGE_LOCALE_NOT_FOUND syscall.Errno = 15033 + ERROR_EVT_VERSION_TOO_OLD syscall.Errno = 15034 + ERROR_EVT_VERSION_TOO_NEW syscall.Errno = 15035 + ERROR_EVT_CANNOT_OPEN_CHANNEL_OF_QUERY syscall.Errno = 15036 + ERROR_EVT_PUBLISHER_DISABLED syscall.Errno = 15037 + ERROR_EVT_FILTER_OUT_OF_RANGE syscall.Errno = 15038 + ERROR_EC_SUBSCRIPTION_CANNOT_ACTIVATE syscall.Errno = 15080 + ERROR_EC_LOG_DISABLED syscall.Errno = 15081 + ERROR_EC_CIRCULAR_FORWARDING syscall.Errno = 15082 + ERROR_EC_CREDSTORE_FULL syscall.Errno = 15083 + ERROR_EC_CRED_NOT_FOUND syscall.Errno = 15084 + ERROR_EC_NO_ACTIVE_CHANNEL syscall.Errno = 15085 + ERROR_MUI_FILE_NOT_FOUND syscall.Errno = 15100 + ERROR_MUI_INVALID_FILE syscall.Errno = 15101 + ERROR_MUI_INVALID_RC_CONFIG syscall.Errno = 15102 + ERROR_MUI_INVALID_LOCALE_NAME syscall.Errno = 15103 + ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME syscall.Errno = 15104 + ERROR_MUI_FILE_NOT_LOADED syscall.Errno = 15105 + ERROR_RESOURCE_ENUM_USER_STOP syscall.Errno = 15106 + ERROR_MUI_INTLSETTINGS_UILANG_NOT_INSTALLED syscall.Errno = 15107 + ERROR_MUI_INTLSETTINGS_INVALID_LOCALE_NAME syscall.Errno = 15108 + ERROR_MRM_RUNTIME_NO_DEFAULT_OR_NEUTRAL_RESOURCE syscall.Errno = 15110 + ERROR_MRM_INVALID_PRICONFIG syscall.Errno = 15111 + ERROR_MRM_INVALID_FILE_TYPE syscall.Errno = 15112 + ERROR_MRM_UNKNOWN_QUALIFIER syscall.Errno = 15113 + ERROR_MRM_INVALID_QUALIFIER_VALUE syscall.Errno = 15114 + ERROR_MRM_NO_CANDIDATE syscall.Errno = 15115 + ERROR_MRM_NO_MATCH_OR_DEFAULT_CANDIDATE syscall.Errno = 15116 + ERROR_MRM_RESOURCE_TYPE_MISMATCH syscall.Errno = 15117 + ERROR_MRM_DUPLICATE_MAP_NAME syscall.Errno = 15118 + ERROR_MRM_DUPLICATE_ENTRY syscall.Errno = 15119 + ERROR_MRM_INVALID_RESOURCE_IDENTIFIER syscall.Errno = 15120 + ERROR_MRM_FILEPATH_TOO_LONG syscall.Errno = 15121 + ERROR_MRM_UNSUPPORTED_DIRECTORY_TYPE syscall.Errno = 15122 + ERROR_MRM_INVALID_PRI_FILE syscall.Errno = 15126 + ERROR_MRM_NAMED_RESOURCE_NOT_FOUND syscall.Errno = 15127 + ERROR_MRM_MAP_NOT_FOUND syscall.Errno = 15135 + ERROR_MRM_UNSUPPORTED_PROFILE_TYPE syscall.Errno = 15136 + ERROR_MRM_INVALID_QUALIFIER_OPERATOR syscall.Errno = 15137 + ERROR_MRM_INDETERMINATE_QUALIFIER_VALUE syscall.Errno = 15138 + ERROR_MRM_AUTOMERGE_ENABLED syscall.Errno = 15139 + ERROR_MRM_TOO_MANY_RESOURCES syscall.Errno = 15140 + ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_MERGE syscall.Errno = 15141 + ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_LOAD_UNLOAD_PRI_FILE syscall.Errno = 15142 + ERROR_MRM_NO_CURRENT_VIEW_ON_THREAD syscall.Errno = 15143 + ERROR_DIFFERENT_PROFILE_RESOURCE_MANAGER_EXIST syscall.Errno = 15144 + ERROR_OPERATION_NOT_ALLOWED_FROM_SYSTEM_COMPONENT syscall.Errno = 15145 + ERROR_MRM_DIRECT_REF_TO_NON_DEFAULT_RESOURCE syscall.Errno = 15146 + ERROR_MRM_GENERATION_COUNT_MISMATCH syscall.Errno = 15147 + ERROR_PRI_MERGE_VERSION_MISMATCH syscall.Errno = 15148 + ERROR_PRI_MERGE_MISSING_SCHEMA syscall.Errno = 15149 + ERROR_PRI_MERGE_LOAD_FILE_FAILED syscall.Errno = 15150 + ERROR_PRI_MERGE_ADD_FILE_FAILED syscall.Errno = 15151 + ERROR_PRI_MERGE_WRITE_FILE_FAILED syscall.Errno = 15152 + ERROR_PRI_MERGE_MULTIPLE_PACKAGE_FAMILIES_NOT_ALLOWED syscall.Errno = 15153 + ERROR_PRI_MERGE_MULTIPLE_MAIN_PACKAGES_NOT_ALLOWED syscall.Errno = 15154 + ERROR_PRI_MERGE_BUNDLE_PACKAGES_NOT_ALLOWED syscall.Errno = 15155 + ERROR_PRI_MERGE_MAIN_PACKAGE_REQUIRED syscall.Errno = 15156 + ERROR_PRI_MERGE_RESOURCE_PACKAGE_REQUIRED syscall.Errno = 15157 + ERROR_PRI_MERGE_INVALID_FILE_NAME syscall.Errno = 15158 + ERROR_MRM_PACKAGE_NOT_FOUND syscall.Errno = 15159 + ERROR_MRM_MISSING_DEFAULT_LANGUAGE syscall.Errno = 15160 + ERROR_MCA_INVALID_CAPABILITIES_STRING syscall.Errno = 15200 + ERROR_MCA_INVALID_VCP_VERSION syscall.Errno = 15201 + ERROR_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION syscall.Errno = 15202 + ERROR_MCA_MCCS_VERSION_MISMATCH syscall.Errno = 15203 + ERROR_MCA_UNSUPPORTED_MCCS_VERSION syscall.Errno = 15204 + ERROR_MCA_INTERNAL_ERROR syscall.Errno = 15205 + ERROR_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED syscall.Errno = 15206 + ERROR_MCA_UNSUPPORTED_COLOR_TEMPERATURE syscall.Errno = 15207 + ERROR_AMBIGUOUS_SYSTEM_DEVICE syscall.Errno = 15250 + ERROR_SYSTEM_DEVICE_NOT_FOUND syscall.Errno = 15299 + ERROR_HASH_NOT_SUPPORTED syscall.Errno = 15300 + ERROR_HASH_NOT_PRESENT syscall.Errno = 15301 + ERROR_SECONDARY_IC_PROVIDER_NOT_REGISTERED syscall.Errno = 15321 + ERROR_GPIO_CLIENT_INFORMATION_INVALID syscall.Errno = 15322 + ERROR_GPIO_VERSION_NOT_SUPPORTED syscall.Errno = 15323 + ERROR_GPIO_INVALID_REGISTRATION_PACKET syscall.Errno = 15324 + ERROR_GPIO_OPERATION_DENIED syscall.Errno = 15325 + ERROR_GPIO_INCOMPATIBLE_CONNECT_MODE syscall.Errno = 15326 + ERROR_GPIO_INTERRUPT_ALREADY_UNMASKED syscall.Errno = 15327 + ERROR_CANNOT_SWITCH_RUNLEVEL syscall.Errno = 15400 + ERROR_INVALID_RUNLEVEL_SETTING syscall.Errno = 15401 + ERROR_RUNLEVEL_SWITCH_TIMEOUT syscall.Errno = 15402 + ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT syscall.Errno = 15403 + ERROR_RUNLEVEL_SWITCH_IN_PROGRESS syscall.Errno = 15404 + ERROR_SERVICES_FAILED_AUTOSTART syscall.Errno = 15405 + ERROR_COM_TASK_STOP_PENDING syscall.Errno = 15501 + ERROR_INSTALL_OPEN_PACKAGE_FAILED syscall.Errno = 15600 + ERROR_INSTALL_PACKAGE_NOT_FOUND syscall.Errno = 15601 + ERROR_INSTALL_INVALID_PACKAGE syscall.Errno = 15602 + ERROR_INSTALL_RESOLVE_DEPENDENCY_FAILED syscall.Errno = 15603 + ERROR_INSTALL_OUT_OF_DISK_SPACE syscall.Errno = 15604 + ERROR_INSTALL_NETWORK_FAILURE syscall.Errno = 15605 + ERROR_INSTALL_REGISTRATION_FAILURE syscall.Errno = 15606 + ERROR_INSTALL_DEREGISTRATION_FAILURE syscall.Errno = 15607 + ERROR_INSTALL_CANCEL syscall.Errno = 15608 + ERROR_INSTALL_FAILED syscall.Errno = 15609 + ERROR_REMOVE_FAILED syscall.Errno = 15610 + ERROR_PACKAGE_ALREADY_EXISTS syscall.Errno = 15611 + ERROR_NEEDS_REMEDIATION syscall.Errno = 15612 + ERROR_INSTALL_PREREQUISITE_FAILED syscall.Errno = 15613 + ERROR_PACKAGE_REPOSITORY_CORRUPTED syscall.Errno = 15614 + ERROR_INSTALL_POLICY_FAILURE syscall.Errno = 15615 + ERROR_PACKAGE_UPDATING syscall.Errno = 15616 + ERROR_DEPLOYMENT_BLOCKED_BY_POLICY syscall.Errno = 15617 + ERROR_PACKAGES_IN_USE syscall.Errno = 15618 + ERROR_RECOVERY_FILE_CORRUPT syscall.Errno = 15619 + ERROR_INVALID_STAGED_SIGNATURE syscall.Errno = 15620 + ERROR_DELETING_EXISTING_APPLICATIONDATA_STORE_FAILED syscall.Errno = 15621 + ERROR_INSTALL_PACKAGE_DOWNGRADE syscall.Errno = 15622 + ERROR_SYSTEM_NEEDS_REMEDIATION syscall.Errno = 15623 + ERROR_APPX_INTEGRITY_FAILURE_CLR_NGEN syscall.Errno = 15624 + ERROR_RESILIENCY_FILE_CORRUPT syscall.Errno = 15625 + ERROR_INSTALL_FIREWALL_SERVICE_NOT_RUNNING syscall.Errno = 15626 + ERROR_PACKAGE_MOVE_FAILED syscall.Errno = 15627 + ERROR_INSTALL_VOLUME_NOT_EMPTY syscall.Errno = 15628 + ERROR_INSTALL_VOLUME_OFFLINE syscall.Errno = 15629 + ERROR_INSTALL_VOLUME_CORRUPT syscall.Errno = 15630 + ERROR_NEEDS_REGISTRATION syscall.Errno = 15631 + ERROR_INSTALL_WRONG_PROCESSOR_ARCHITECTURE syscall.Errno = 15632 + ERROR_DEV_SIDELOAD_LIMIT_EXCEEDED syscall.Errno = 15633 + ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE syscall.Errno = 15634 + ERROR_PACKAGE_NOT_SUPPORTED_ON_FILESYSTEM syscall.Errno = 15635 + ERROR_PACKAGE_MOVE_BLOCKED_BY_STREAMING syscall.Errno = 15636 + ERROR_INSTALL_OPTIONAL_PACKAGE_APPLICATIONID_NOT_UNIQUE syscall.Errno = 15637 + ERROR_PACKAGE_STAGING_ONHOLD syscall.Errno = 15638 + ERROR_INSTALL_INVALID_RELATED_SET_UPDATE syscall.Errno = 15639 + ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_FULLTRUST_CAPABILITY syscall.Errno = 15640 + ERROR_DEPLOYMENT_BLOCKED_BY_USER_LOG_OFF syscall.Errno = 15641 + ERROR_PROVISION_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_PROVISIONED syscall.Errno = 15642 + ERROR_PACKAGES_REPUTATION_CHECK_FAILED syscall.Errno = 15643 + ERROR_PACKAGES_REPUTATION_CHECK_TIMEDOUT syscall.Errno = 15644 + ERROR_DEPLOYMENT_OPTION_NOT_SUPPORTED syscall.Errno = 15645 + ERROR_APPINSTALLER_ACTIVATION_BLOCKED syscall.Errno = 15646 + ERROR_REGISTRATION_FROM_REMOTE_DRIVE_NOT_SUPPORTED syscall.Errno = 15647 + ERROR_APPX_RAW_DATA_WRITE_FAILED syscall.Errno = 15648 + ERROR_DEPLOYMENT_BLOCKED_BY_VOLUME_POLICY_PACKAGE syscall.Errno = 15649 + ERROR_DEPLOYMENT_BLOCKED_BY_VOLUME_POLICY_MACHINE syscall.Errno = 15650 + ERROR_DEPLOYMENT_BLOCKED_BY_PROFILE_POLICY syscall.Errno = 15651 + ERROR_DEPLOYMENT_FAILED_CONFLICTING_MUTABLE_PACKAGE_DIRECTORY syscall.Errno = 15652 + ERROR_SINGLETON_RESOURCE_INSTALLED_IN_ACTIVE_USER syscall.Errno = 15653 + ERROR_DIFFERENT_VERSION_OF_PACKAGED_SERVICE_INSTALLED syscall.Errno = 15654 + ERROR_SERVICE_EXISTS_AS_NON_PACKAGED_SERVICE syscall.Errno = 15655 + ERROR_PACKAGED_SERVICE_REQUIRES_ADMIN_PRIVILEGES syscall.Errno = 15656 + APPMODEL_ERROR_NO_PACKAGE syscall.Errno = 15700 + APPMODEL_ERROR_PACKAGE_RUNTIME_CORRUPT syscall.Errno = 15701 + APPMODEL_ERROR_PACKAGE_IDENTITY_CORRUPT syscall.Errno = 15702 + APPMODEL_ERROR_NO_APPLICATION syscall.Errno = 15703 + APPMODEL_ERROR_DYNAMIC_PROPERTY_READ_FAILED syscall.Errno = 15704 + APPMODEL_ERROR_DYNAMIC_PROPERTY_INVALID syscall.Errno = 15705 + APPMODEL_ERROR_PACKAGE_NOT_AVAILABLE syscall.Errno = 15706 + APPMODEL_ERROR_NO_MUTABLE_DIRECTORY syscall.Errno = 15707 + ERROR_STATE_LOAD_STORE_FAILED syscall.Errno = 15800 + ERROR_STATE_GET_VERSION_FAILED syscall.Errno = 15801 + ERROR_STATE_SET_VERSION_FAILED syscall.Errno = 15802 + ERROR_STATE_STRUCTURED_RESET_FAILED syscall.Errno = 15803 + ERROR_STATE_OPEN_CONTAINER_FAILED syscall.Errno = 15804 + ERROR_STATE_CREATE_CONTAINER_FAILED syscall.Errno = 15805 + ERROR_STATE_DELETE_CONTAINER_FAILED syscall.Errno = 15806 + ERROR_STATE_READ_SETTING_FAILED syscall.Errno = 15807 + ERROR_STATE_WRITE_SETTING_FAILED syscall.Errno = 15808 + ERROR_STATE_DELETE_SETTING_FAILED syscall.Errno = 15809 + ERROR_STATE_QUERY_SETTING_FAILED syscall.Errno = 15810 + ERROR_STATE_READ_COMPOSITE_SETTING_FAILED syscall.Errno = 15811 + ERROR_STATE_WRITE_COMPOSITE_SETTING_FAILED syscall.Errno = 15812 + ERROR_STATE_ENUMERATE_CONTAINER_FAILED syscall.Errno = 15813 + ERROR_STATE_ENUMERATE_SETTINGS_FAILED syscall.Errno = 15814 + ERROR_STATE_COMPOSITE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15815 + ERROR_STATE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15816 + ERROR_STATE_SETTING_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15817 + ERROR_STATE_CONTAINER_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15818 + ERROR_API_UNAVAILABLE syscall.Errno = 15841 + STORE_ERROR_UNLICENSED syscall.Errno = 15861 + STORE_ERROR_UNLICENSED_USER syscall.Errno = 15862 + STORE_ERROR_PENDING_COM_TRANSACTION syscall.Errno = 15863 + STORE_ERROR_LICENSE_REVOKED syscall.Errno = 15864 + SEVERITY_SUCCESS syscall.Errno = 0 + SEVERITY_ERROR syscall.Errno = 1 + FACILITY_NT_BIT = 0x10000000 + E_NOT_SET = ERROR_NOT_FOUND + E_NOT_VALID_STATE = ERROR_INVALID_STATE + E_NOT_SUFFICIENT_BUFFER = ERROR_INSUFFICIENT_BUFFER + E_TIME_SENSITIVE_THREAD = ERROR_TIME_SENSITIVE_THREAD + E_NO_TASK_QUEUE = ERROR_NO_TASK_QUEUE + NOERROR syscall.Errno = 0 + E_UNEXPECTED Handle = 0x8000FFFF + E_NOTIMPL Handle = 0x80004001 + E_OUTOFMEMORY Handle = 0x8007000E + E_INVALIDARG Handle = 0x80070057 + E_NOINTERFACE Handle = 0x80004002 + E_POINTER Handle = 0x80004003 + E_HANDLE Handle = 0x80070006 + E_ABORT Handle = 0x80004004 + E_FAIL Handle = 0x80004005 + E_ACCESSDENIED Handle = 0x80070005 + E_PENDING Handle = 0x8000000A + E_BOUNDS Handle = 0x8000000B + E_CHANGED_STATE Handle = 0x8000000C + E_ILLEGAL_STATE_CHANGE Handle = 0x8000000D + E_ILLEGAL_METHOD_CALL Handle = 0x8000000E + RO_E_METADATA_NAME_NOT_FOUND Handle = 0x8000000F + RO_E_METADATA_NAME_IS_NAMESPACE Handle = 0x80000010 + RO_E_METADATA_INVALID_TYPE_FORMAT Handle = 0x80000011 + RO_E_INVALID_METADATA_FILE Handle = 0x80000012 + RO_E_CLOSED Handle = 0x80000013 + RO_E_EXCLUSIVE_WRITE Handle = 0x80000014 + RO_E_CHANGE_NOTIFICATION_IN_PROGRESS Handle = 0x80000015 + RO_E_ERROR_STRING_NOT_FOUND Handle = 0x80000016 + E_STRING_NOT_NULL_TERMINATED Handle = 0x80000017 + E_ILLEGAL_DELEGATE_ASSIGNMENT Handle = 0x80000018 + E_ASYNC_OPERATION_NOT_STARTED Handle = 0x80000019 + E_APPLICATION_EXITING Handle = 0x8000001A + E_APPLICATION_VIEW_EXITING Handle = 0x8000001B + RO_E_MUST_BE_AGILE Handle = 0x8000001C + RO_E_UNSUPPORTED_FROM_MTA Handle = 0x8000001D + RO_E_COMMITTED Handle = 0x8000001E + RO_E_BLOCKED_CROSS_ASTA_CALL Handle = 0x8000001F + RO_E_CANNOT_ACTIVATE_FULL_TRUST_SERVER Handle = 0x80000020 + RO_E_CANNOT_ACTIVATE_UNIVERSAL_APPLICATION_SERVER Handle = 0x80000021 + CO_E_INIT_TLS Handle = 0x80004006 + CO_E_INIT_SHARED_ALLOCATOR Handle = 0x80004007 + CO_E_INIT_MEMORY_ALLOCATOR Handle = 0x80004008 + CO_E_INIT_CLASS_CACHE Handle = 0x80004009 + CO_E_INIT_RPC_CHANNEL Handle = 0x8000400A + CO_E_INIT_TLS_SET_CHANNEL_CONTROL Handle = 0x8000400B + CO_E_INIT_TLS_CHANNEL_CONTROL Handle = 0x8000400C + CO_E_INIT_UNACCEPTED_USER_ALLOCATOR Handle = 0x8000400D + CO_E_INIT_SCM_MUTEX_EXISTS Handle = 0x8000400E + CO_E_INIT_SCM_FILE_MAPPING_EXISTS Handle = 0x8000400F + CO_E_INIT_SCM_MAP_VIEW_OF_FILE Handle = 0x80004010 + CO_E_INIT_SCM_EXEC_FAILURE Handle = 0x80004011 + CO_E_INIT_ONLY_SINGLE_THREADED Handle = 0x80004012 + CO_E_CANT_REMOTE Handle = 0x80004013 + CO_E_BAD_SERVER_NAME Handle = 0x80004014 + CO_E_WRONG_SERVER_IDENTITY Handle = 0x80004015 + CO_E_OLE1DDE_DISABLED Handle = 0x80004016 + CO_E_RUNAS_SYNTAX Handle = 0x80004017 + CO_E_CREATEPROCESS_FAILURE Handle = 0x80004018 + CO_E_RUNAS_CREATEPROCESS_FAILURE Handle = 0x80004019 + CO_E_RUNAS_LOGON_FAILURE Handle = 0x8000401A + CO_E_LAUNCH_PERMSSION_DENIED Handle = 0x8000401B + CO_E_START_SERVICE_FAILURE Handle = 0x8000401C + CO_E_REMOTE_COMMUNICATION_FAILURE Handle = 0x8000401D + CO_E_SERVER_START_TIMEOUT Handle = 0x8000401E + CO_E_CLSREG_INCONSISTENT Handle = 0x8000401F + CO_E_IIDREG_INCONSISTENT Handle = 0x80004020 + CO_E_NOT_SUPPORTED Handle = 0x80004021 + CO_E_RELOAD_DLL Handle = 0x80004022 + CO_E_MSI_ERROR Handle = 0x80004023 + CO_E_ATTEMPT_TO_CREATE_OUTSIDE_CLIENT_CONTEXT Handle = 0x80004024 + CO_E_SERVER_PAUSED Handle = 0x80004025 + CO_E_SERVER_NOT_PAUSED Handle = 0x80004026 + CO_E_CLASS_DISABLED Handle = 0x80004027 + CO_E_CLRNOTAVAILABLE Handle = 0x80004028 + CO_E_ASYNC_WORK_REJECTED Handle = 0x80004029 + CO_E_SERVER_INIT_TIMEOUT Handle = 0x8000402A + CO_E_NO_SECCTX_IN_ACTIVATE Handle = 0x8000402B + CO_E_TRACKER_CONFIG Handle = 0x80004030 + CO_E_THREADPOOL_CONFIG Handle = 0x80004031 + CO_E_SXS_CONFIG Handle = 0x80004032 + CO_E_MALFORMED_SPN Handle = 0x80004033 + CO_E_UNREVOKED_REGISTRATION_ON_APARTMENT_SHUTDOWN Handle = 0x80004034 + CO_E_PREMATURE_STUB_RUNDOWN Handle = 0x80004035 + S_OK Handle = 0 + S_FALSE Handle = 1 + OLE_E_FIRST Handle = 0x80040000 + OLE_E_LAST Handle = 0x800400FF + OLE_S_FIRST Handle = 0x00040000 + OLE_S_LAST Handle = 0x000400FF + OLE_E_OLEVERB Handle = 0x80040000 + OLE_E_ADVF Handle = 0x80040001 + OLE_E_ENUM_NOMORE Handle = 0x80040002 + OLE_E_ADVISENOTSUPPORTED Handle = 0x80040003 + OLE_E_NOCONNECTION Handle = 0x80040004 + OLE_E_NOTRUNNING Handle = 0x80040005 + OLE_E_NOCACHE Handle = 0x80040006 + OLE_E_BLANK Handle = 0x80040007 + OLE_E_CLASSDIFF Handle = 0x80040008 + OLE_E_CANT_GETMONIKER Handle = 0x80040009 + OLE_E_CANT_BINDTOSOURCE Handle = 0x8004000A + OLE_E_STATIC Handle = 0x8004000B + OLE_E_PROMPTSAVECANCELLED Handle = 0x8004000C + OLE_E_INVALIDRECT Handle = 0x8004000D + OLE_E_WRONGCOMPOBJ Handle = 0x8004000E + OLE_E_INVALIDHWND Handle = 0x8004000F + OLE_E_NOT_INPLACEACTIVE Handle = 0x80040010 + OLE_E_CANTCONVERT Handle = 0x80040011 + OLE_E_NOSTORAGE Handle = 0x80040012 + DV_E_FORMATETC Handle = 0x80040064 + DV_E_DVTARGETDEVICE Handle = 0x80040065 + DV_E_STGMEDIUM Handle = 0x80040066 + DV_E_STATDATA Handle = 0x80040067 + DV_E_LINDEX Handle = 0x80040068 + DV_E_TYMED Handle = 0x80040069 + DV_E_CLIPFORMAT Handle = 0x8004006A + DV_E_DVASPECT Handle = 0x8004006B + DV_E_DVTARGETDEVICE_SIZE Handle = 0x8004006C + DV_E_NOIVIEWOBJECT Handle = 0x8004006D + DRAGDROP_E_FIRST syscall.Errno = 0x80040100 + DRAGDROP_E_LAST syscall.Errno = 0x8004010F + DRAGDROP_S_FIRST syscall.Errno = 0x00040100 + DRAGDROP_S_LAST syscall.Errno = 0x0004010F + DRAGDROP_E_NOTREGISTERED Handle = 0x80040100 + DRAGDROP_E_ALREADYREGISTERED Handle = 0x80040101 + DRAGDROP_E_INVALIDHWND Handle = 0x80040102 + DRAGDROP_E_CONCURRENT_DRAG_ATTEMPTED Handle = 0x80040103 + CLASSFACTORY_E_FIRST syscall.Errno = 0x80040110 + CLASSFACTORY_E_LAST syscall.Errno = 0x8004011F + CLASSFACTORY_S_FIRST syscall.Errno = 0x00040110 + CLASSFACTORY_S_LAST syscall.Errno = 0x0004011F + CLASS_E_NOAGGREGATION Handle = 0x80040110 + CLASS_E_CLASSNOTAVAILABLE Handle = 0x80040111 + CLASS_E_NOTLICENSED Handle = 0x80040112 + MARSHAL_E_FIRST syscall.Errno = 0x80040120 + MARSHAL_E_LAST syscall.Errno = 0x8004012F + MARSHAL_S_FIRST syscall.Errno = 0x00040120 + MARSHAL_S_LAST syscall.Errno = 0x0004012F + DATA_E_FIRST syscall.Errno = 0x80040130 + DATA_E_LAST syscall.Errno = 0x8004013F + DATA_S_FIRST syscall.Errno = 0x00040130 + DATA_S_LAST syscall.Errno = 0x0004013F + VIEW_E_FIRST syscall.Errno = 0x80040140 + VIEW_E_LAST syscall.Errno = 0x8004014F + VIEW_S_FIRST syscall.Errno = 0x00040140 + VIEW_S_LAST syscall.Errno = 0x0004014F + VIEW_E_DRAW Handle = 0x80040140 + REGDB_E_FIRST syscall.Errno = 0x80040150 + REGDB_E_LAST syscall.Errno = 0x8004015F + REGDB_S_FIRST syscall.Errno = 0x00040150 + REGDB_S_LAST syscall.Errno = 0x0004015F + REGDB_E_READREGDB Handle = 0x80040150 + REGDB_E_WRITEREGDB Handle = 0x80040151 + REGDB_E_KEYMISSING Handle = 0x80040152 + REGDB_E_INVALIDVALUE Handle = 0x80040153 + REGDB_E_CLASSNOTREG Handle = 0x80040154 + REGDB_E_IIDNOTREG Handle = 0x80040155 + REGDB_E_BADTHREADINGMODEL Handle = 0x80040156 + REGDB_E_PACKAGEPOLICYVIOLATION Handle = 0x80040157 + CAT_E_FIRST syscall.Errno = 0x80040160 + CAT_E_LAST syscall.Errno = 0x80040161 + CAT_E_CATIDNOEXIST Handle = 0x80040160 + CAT_E_NODESCRIPTION Handle = 0x80040161 + CS_E_FIRST syscall.Errno = 0x80040164 + CS_E_LAST syscall.Errno = 0x8004016F + CS_E_PACKAGE_NOTFOUND Handle = 0x80040164 + CS_E_NOT_DELETABLE Handle = 0x80040165 + CS_E_CLASS_NOTFOUND Handle = 0x80040166 + CS_E_INVALID_VERSION Handle = 0x80040167 + CS_E_NO_CLASSSTORE Handle = 0x80040168 + CS_E_OBJECT_NOTFOUND Handle = 0x80040169 + CS_E_OBJECT_ALREADY_EXISTS Handle = 0x8004016A + CS_E_INVALID_PATH Handle = 0x8004016B + CS_E_NETWORK_ERROR Handle = 0x8004016C + CS_E_ADMIN_LIMIT_EXCEEDED Handle = 0x8004016D + CS_E_SCHEMA_MISMATCH Handle = 0x8004016E + CS_E_INTERNAL_ERROR Handle = 0x8004016F + CACHE_E_FIRST syscall.Errno = 0x80040170 + CACHE_E_LAST syscall.Errno = 0x8004017F + CACHE_S_FIRST syscall.Errno = 0x00040170 + CACHE_S_LAST syscall.Errno = 0x0004017F + CACHE_E_NOCACHE_UPDATED Handle = 0x80040170 + OLEOBJ_E_FIRST syscall.Errno = 0x80040180 + OLEOBJ_E_LAST syscall.Errno = 0x8004018F + OLEOBJ_S_FIRST syscall.Errno = 0x00040180 + OLEOBJ_S_LAST syscall.Errno = 0x0004018F + OLEOBJ_E_NOVERBS Handle = 0x80040180 + OLEOBJ_E_INVALIDVERB Handle = 0x80040181 + CLIENTSITE_E_FIRST syscall.Errno = 0x80040190 + CLIENTSITE_E_LAST syscall.Errno = 0x8004019F + CLIENTSITE_S_FIRST syscall.Errno = 0x00040190 + CLIENTSITE_S_LAST syscall.Errno = 0x0004019F + INPLACE_E_NOTUNDOABLE Handle = 0x800401A0 + INPLACE_E_NOTOOLSPACE Handle = 0x800401A1 + INPLACE_E_FIRST syscall.Errno = 0x800401A0 + INPLACE_E_LAST syscall.Errno = 0x800401AF + INPLACE_S_FIRST syscall.Errno = 0x000401A0 + INPLACE_S_LAST syscall.Errno = 0x000401AF + ENUM_E_FIRST syscall.Errno = 0x800401B0 + ENUM_E_LAST syscall.Errno = 0x800401BF + ENUM_S_FIRST syscall.Errno = 0x000401B0 + ENUM_S_LAST syscall.Errno = 0x000401BF + CONVERT10_E_FIRST syscall.Errno = 0x800401C0 + CONVERT10_E_LAST syscall.Errno = 0x800401CF + CONVERT10_S_FIRST syscall.Errno = 0x000401C0 + CONVERT10_S_LAST syscall.Errno = 0x000401CF + CONVERT10_E_OLESTREAM_GET Handle = 0x800401C0 + CONVERT10_E_OLESTREAM_PUT Handle = 0x800401C1 + CONVERT10_E_OLESTREAM_FMT Handle = 0x800401C2 + CONVERT10_E_OLESTREAM_BITMAP_TO_DIB Handle = 0x800401C3 + CONVERT10_E_STG_FMT Handle = 0x800401C4 + CONVERT10_E_STG_NO_STD_STREAM Handle = 0x800401C5 + CONVERT10_E_STG_DIB_TO_BITMAP Handle = 0x800401C6 + CLIPBRD_E_FIRST syscall.Errno = 0x800401D0 + CLIPBRD_E_LAST syscall.Errno = 0x800401DF + CLIPBRD_S_FIRST syscall.Errno = 0x000401D0 + CLIPBRD_S_LAST syscall.Errno = 0x000401DF + CLIPBRD_E_CANT_OPEN Handle = 0x800401D0 + CLIPBRD_E_CANT_EMPTY Handle = 0x800401D1 + CLIPBRD_E_CANT_SET Handle = 0x800401D2 + CLIPBRD_E_BAD_DATA Handle = 0x800401D3 + CLIPBRD_E_CANT_CLOSE Handle = 0x800401D4 + MK_E_FIRST syscall.Errno = 0x800401E0 + MK_E_LAST syscall.Errno = 0x800401EF + MK_S_FIRST syscall.Errno = 0x000401E0 + MK_S_LAST syscall.Errno = 0x000401EF + MK_E_CONNECTMANUALLY Handle = 0x800401E0 + MK_E_EXCEEDEDDEADLINE Handle = 0x800401E1 + MK_E_NEEDGENERIC Handle = 0x800401E2 + MK_E_UNAVAILABLE Handle = 0x800401E3 + MK_E_SYNTAX Handle = 0x800401E4 + MK_E_NOOBJECT Handle = 0x800401E5 + MK_E_INVALIDEXTENSION Handle = 0x800401E6 + MK_E_INTERMEDIATEINTERFACENOTSUPPORTED Handle = 0x800401E7 + MK_E_NOTBINDABLE Handle = 0x800401E8 + MK_E_NOTBOUND Handle = 0x800401E9 + MK_E_CANTOPENFILE Handle = 0x800401EA + MK_E_MUSTBOTHERUSER Handle = 0x800401EB + MK_E_NOINVERSE Handle = 0x800401EC + MK_E_NOSTORAGE Handle = 0x800401ED + MK_E_NOPREFIX Handle = 0x800401EE + MK_E_ENUMERATION_FAILED Handle = 0x800401EF + CO_E_FIRST syscall.Errno = 0x800401F0 + CO_E_LAST syscall.Errno = 0x800401FF + CO_S_FIRST syscall.Errno = 0x000401F0 + CO_S_LAST syscall.Errno = 0x000401FF + CO_E_NOTINITIALIZED Handle = 0x800401F0 + CO_E_ALREADYINITIALIZED Handle = 0x800401F1 + CO_E_CANTDETERMINECLASS Handle = 0x800401F2 + CO_E_CLASSSTRING Handle = 0x800401F3 + CO_E_IIDSTRING Handle = 0x800401F4 + CO_E_APPNOTFOUND Handle = 0x800401F5 + CO_E_APPSINGLEUSE Handle = 0x800401F6 + CO_E_ERRORINAPP Handle = 0x800401F7 + CO_E_DLLNOTFOUND Handle = 0x800401F8 + CO_E_ERRORINDLL Handle = 0x800401F9 + CO_E_WRONGOSFORAPP Handle = 0x800401FA + CO_E_OBJNOTREG Handle = 0x800401FB + CO_E_OBJISREG Handle = 0x800401FC + CO_E_OBJNOTCONNECTED Handle = 0x800401FD + CO_E_APPDIDNTREG Handle = 0x800401FE + CO_E_RELEASED Handle = 0x800401FF + EVENT_E_FIRST syscall.Errno = 0x80040200 + EVENT_E_LAST syscall.Errno = 0x8004021F + EVENT_S_FIRST syscall.Errno = 0x00040200 + EVENT_S_LAST syscall.Errno = 0x0004021F + EVENT_S_SOME_SUBSCRIBERS_FAILED Handle = 0x00040200 + EVENT_E_ALL_SUBSCRIBERS_FAILED Handle = 0x80040201 + EVENT_S_NOSUBSCRIBERS Handle = 0x00040202 + EVENT_E_QUERYSYNTAX Handle = 0x80040203 + EVENT_E_QUERYFIELD Handle = 0x80040204 + EVENT_E_INTERNALEXCEPTION Handle = 0x80040205 + EVENT_E_INTERNALERROR Handle = 0x80040206 + EVENT_E_INVALID_PER_USER_SID Handle = 0x80040207 + EVENT_E_USER_EXCEPTION Handle = 0x80040208 + EVENT_E_TOO_MANY_METHODS Handle = 0x80040209 + EVENT_E_MISSING_EVENTCLASS Handle = 0x8004020A + EVENT_E_NOT_ALL_REMOVED Handle = 0x8004020B + EVENT_E_COMPLUS_NOT_INSTALLED Handle = 0x8004020C + EVENT_E_CANT_MODIFY_OR_DELETE_UNCONFIGURED_OBJECT Handle = 0x8004020D + EVENT_E_CANT_MODIFY_OR_DELETE_CONFIGURED_OBJECT Handle = 0x8004020E + EVENT_E_INVALID_EVENT_CLASS_PARTITION Handle = 0x8004020F + EVENT_E_PER_USER_SID_NOT_LOGGED_ON Handle = 0x80040210 + TPC_E_INVALID_PROPERTY Handle = 0x80040241 + TPC_E_NO_DEFAULT_TABLET Handle = 0x80040212 + TPC_E_UNKNOWN_PROPERTY Handle = 0x8004021B + TPC_E_INVALID_INPUT_RECT Handle = 0x80040219 + TPC_E_INVALID_STROKE Handle = 0x80040222 + TPC_E_INITIALIZE_FAIL Handle = 0x80040223 + TPC_E_NOT_RELEVANT Handle = 0x80040232 + TPC_E_INVALID_PACKET_DESCRIPTION Handle = 0x80040233 + TPC_E_RECOGNIZER_NOT_REGISTERED Handle = 0x80040235 + TPC_E_INVALID_RIGHTS Handle = 0x80040236 + TPC_E_OUT_OF_ORDER_CALL Handle = 0x80040237 + TPC_E_QUEUE_FULL Handle = 0x80040238 + TPC_E_INVALID_CONFIGURATION Handle = 0x80040239 + TPC_E_INVALID_DATA_FROM_RECOGNIZER Handle = 0x8004023A + TPC_S_TRUNCATED Handle = 0x00040252 + TPC_S_INTERRUPTED Handle = 0x00040253 + TPC_S_NO_DATA_TO_PROCESS Handle = 0x00040254 + XACT_E_FIRST syscall.Errno = 0x8004D000 + XACT_E_LAST syscall.Errno = 0x8004D02B + XACT_S_FIRST syscall.Errno = 0x0004D000 + XACT_S_LAST syscall.Errno = 0x0004D010 + XACT_E_ALREADYOTHERSINGLEPHASE Handle = 0x8004D000 + XACT_E_CANTRETAIN Handle = 0x8004D001 + XACT_E_COMMITFAILED Handle = 0x8004D002 + XACT_E_COMMITPREVENTED Handle = 0x8004D003 + XACT_E_HEURISTICABORT Handle = 0x8004D004 + XACT_E_HEURISTICCOMMIT Handle = 0x8004D005 + XACT_E_HEURISTICDAMAGE Handle = 0x8004D006 + XACT_E_HEURISTICDANGER Handle = 0x8004D007 + XACT_E_ISOLATIONLEVEL Handle = 0x8004D008 + XACT_E_NOASYNC Handle = 0x8004D009 + XACT_E_NOENLIST Handle = 0x8004D00A + XACT_E_NOISORETAIN Handle = 0x8004D00B + XACT_E_NORESOURCE Handle = 0x8004D00C + XACT_E_NOTCURRENT Handle = 0x8004D00D + XACT_E_NOTRANSACTION Handle = 0x8004D00E + XACT_E_NOTSUPPORTED Handle = 0x8004D00F + XACT_E_UNKNOWNRMGRID Handle = 0x8004D010 + XACT_E_WRONGSTATE Handle = 0x8004D011 + XACT_E_WRONGUOW Handle = 0x8004D012 + XACT_E_XTIONEXISTS Handle = 0x8004D013 + XACT_E_NOIMPORTOBJECT Handle = 0x8004D014 + XACT_E_INVALIDCOOKIE Handle = 0x8004D015 + XACT_E_INDOUBT Handle = 0x8004D016 + XACT_E_NOTIMEOUT Handle = 0x8004D017 + XACT_E_ALREADYINPROGRESS Handle = 0x8004D018 + XACT_E_ABORTED Handle = 0x8004D019 + XACT_E_LOGFULL Handle = 0x8004D01A + XACT_E_TMNOTAVAILABLE Handle = 0x8004D01B + XACT_E_CONNECTION_DOWN Handle = 0x8004D01C + XACT_E_CONNECTION_DENIED Handle = 0x8004D01D + XACT_E_REENLISTTIMEOUT Handle = 0x8004D01E + XACT_E_TIP_CONNECT_FAILED Handle = 0x8004D01F + XACT_E_TIP_PROTOCOL_ERROR Handle = 0x8004D020 + XACT_E_TIP_PULL_FAILED Handle = 0x8004D021 + XACT_E_DEST_TMNOTAVAILABLE Handle = 0x8004D022 + XACT_E_TIP_DISABLED Handle = 0x8004D023 + XACT_E_NETWORK_TX_DISABLED Handle = 0x8004D024 + XACT_E_PARTNER_NETWORK_TX_DISABLED Handle = 0x8004D025 + XACT_E_XA_TX_DISABLED Handle = 0x8004D026 + XACT_E_UNABLE_TO_READ_DTC_CONFIG Handle = 0x8004D027 + XACT_E_UNABLE_TO_LOAD_DTC_PROXY Handle = 0x8004D028 + XACT_E_ABORTING Handle = 0x8004D029 + XACT_E_PUSH_COMM_FAILURE Handle = 0x8004D02A + XACT_E_PULL_COMM_FAILURE Handle = 0x8004D02B + XACT_E_LU_TX_DISABLED Handle = 0x8004D02C + XACT_E_CLERKNOTFOUND Handle = 0x8004D080 + XACT_E_CLERKEXISTS Handle = 0x8004D081 + XACT_E_RECOVERYINPROGRESS Handle = 0x8004D082 + XACT_E_TRANSACTIONCLOSED Handle = 0x8004D083 + XACT_E_INVALIDLSN Handle = 0x8004D084 + XACT_E_REPLAYREQUEST Handle = 0x8004D085 + XACT_S_ASYNC Handle = 0x0004D000 + XACT_S_DEFECT Handle = 0x0004D001 + XACT_S_READONLY Handle = 0x0004D002 + XACT_S_SOMENORETAIN Handle = 0x0004D003 + XACT_S_OKINFORM Handle = 0x0004D004 + XACT_S_MADECHANGESCONTENT Handle = 0x0004D005 + XACT_S_MADECHANGESINFORM Handle = 0x0004D006 + XACT_S_ALLNORETAIN Handle = 0x0004D007 + XACT_S_ABORTING Handle = 0x0004D008 + XACT_S_SINGLEPHASE Handle = 0x0004D009 + XACT_S_LOCALLY_OK Handle = 0x0004D00A + XACT_S_LASTRESOURCEMANAGER Handle = 0x0004D010 + CONTEXT_E_FIRST syscall.Errno = 0x8004E000 + CONTEXT_E_LAST syscall.Errno = 0x8004E02F + CONTEXT_S_FIRST syscall.Errno = 0x0004E000 + CONTEXT_S_LAST syscall.Errno = 0x0004E02F + CONTEXT_E_ABORTED Handle = 0x8004E002 + CONTEXT_E_ABORTING Handle = 0x8004E003 + CONTEXT_E_NOCONTEXT Handle = 0x8004E004 + CONTEXT_E_WOULD_DEADLOCK Handle = 0x8004E005 + CONTEXT_E_SYNCH_TIMEOUT Handle = 0x8004E006 + CONTEXT_E_OLDREF Handle = 0x8004E007 + CONTEXT_E_ROLENOTFOUND Handle = 0x8004E00C + CONTEXT_E_TMNOTAVAILABLE Handle = 0x8004E00F + CO_E_ACTIVATIONFAILED Handle = 0x8004E021 + CO_E_ACTIVATIONFAILED_EVENTLOGGED Handle = 0x8004E022 + CO_E_ACTIVATIONFAILED_CATALOGERROR Handle = 0x8004E023 + CO_E_ACTIVATIONFAILED_TIMEOUT Handle = 0x8004E024 + CO_E_INITIALIZATIONFAILED Handle = 0x8004E025 + CONTEXT_E_NOJIT Handle = 0x8004E026 + CONTEXT_E_NOTRANSACTION Handle = 0x8004E027 + CO_E_THREADINGMODEL_CHANGED Handle = 0x8004E028 + CO_E_NOIISINTRINSICS Handle = 0x8004E029 + CO_E_NOCOOKIES Handle = 0x8004E02A + CO_E_DBERROR Handle = 0x8004E02B + CO_E_NOTPOOLED Handle = 0x8004E02C + CO_E_NOTCONSTRUCTED Handle = 0x8004E02D + CO_E_NOSYNCHRONIZATION Handle = 0x8004E02E + CO_E_ISOLEVELMISMATCH Handle = 0x8004E02F + CO_E_CALL_OUT_OF_TX_SCOPE_NOT_ALLOWED Handle = 0x8004E030 + CO_E_EXIT_TRANSACTION_SCOPE_NOT_CALLED Handle = 0x8004E031 + OLE_S_USEREG Handle = 0x00040000 + OLE_S_STATIC Handle = 0x00040001 + OLE_S_MAC_CLIPFORMAT Handle = 0x00040002 + DRAGDROP_S_DROP Handle = 0x00040100 + DRAGDROP_S_CANCEL Handle = 0x00040101 + DRAGDROP_S_USEDEFAULTCURSORS Handle = 0x00040102 + DATA_S_SAMEFORMATETC Handle = 0x00040130 + VIEW_S_ALREADY_FROZEN Handle = 0x00040140 + CACHE_S_FORMATETC_NOTSUPPORTED Handle = 0x00040170 + CACHE_S_SAMECACHE Handle = 0x00040171 + CACHE_S_SOMECACHES_NOTUPDATED Handle = 0x00040172 + OLEOBJ_S_INVALIDVERB Handle = 0x00040180 + OLEOBJ_S_CANNOT_DOVERB_NOW Handle = 0x00040181 + OLEOBJ_S_INVALIDHWND Handle = 0x00040182 + INPLACE_S_TRUNCATED Handle = 0x000401A0 + CONVERT10_S_NO_PRESENTATION Handle = 0x000401C0 + MK_S_REDUCED_TO_SELF Handle = 0x000401E2 + MK_S_ME Handle = 0x000401E4 + MK_S_HIM Handle = 0x000401E5 + MK_S_US Handle = 0x000401E6 + MK_S_MONIKERALREADYREGISTERED Handle = 0x000401E7 + SCHED_S_TASK_READY Handle = 0x00041300 + SCHED_S_TASK_RUNNING Handle = 0x00041301 + SCHED_S_TASK_DISABLED Handle = 0x00041302 + SCHED_S_TASK_HAS_NOT_RUN Handle = 0x00041303 + SCHED_S_TASK_NO_MORE_RUNS Handle = 0x00041304 + SCHED_S_TASK_NOT_SCHEDULED Handle = 0x00041305 + SCHED_S_TASK_TERMINATED Handle = 0x00041306 + SCHED_S_TASK_NO_VALID_TRIGGERS Handle = 0x00041307 + SCHED_S_EVENT_TRIGGER Handle = 0x00041308 + SCHED_E_TRIGGER_NOT_FOUND Handle = 0x80041309 + SCHED_E_TASK_NOT_READY Handle = 0x8004130A + SCHED_E_TASK_NOT_RUNNING Handle = 0x8004130B + SCHED_E_SERVICE_NOT_INSTALLED Handle = 0x8004130C + SCHED_E_CANNOT_OPEN_TASK Handle = 0x8004130D + SCHED_E_INVALID_TASK Handle = 0x8004130E + SCHED_E_ACCOUNT_INFORMATION_NOT_SET Handle = 0x8004130F + SCHED_E_ACCOUNT_NAME_NOT_FOUND Handle = 0x80041310 + SCHED_E_ACCOUNT_DBASE_CORRUPT Handle = 0x80041311 + SCHED_E_NO_SECURITY_SERVICES Handle = 0x80041312 + SCHED_E_UNKNOWN_OBJECT_VERSION Handle = 0x80041313 + SCHED_E_UNSUPPORTED_ACCOUNT_OPTION Handle = 0x80041314 + SCHED_E_SERVICE_NOT_RUNNING Handle = 0x80041315 + SCHED_E_UNEXPECTEDNODE Handle = 0x80041316 + SCHED_E_NAMESPACE Handle = 0x80041317 + SCHED_E_INVALIDVALUE Handle = 0x80041318 + SCHED_E_MISSINGNODE Handle = 0x80041319 + SCHED_E_MALFORMEDXML Handle = 0x8004131A + SCHED_S_SOME_TRIGGERS_FAILED Handle = 0x0004131B + SCHED_S_BATCH_LOGON_PROBLEM Handle = 0x0004131C + SCHED_E_TOO_MANY_NODES Handle = 0x8004131D + SCHED_E_PAST_END_BOUNDARY Handle = 0x8004131E + SCHED_E_ALREADY_RUNNING Handle = 0x8004131F + SCHED_E_USER_NOT_LOGGED_ON Handle = 0x80041320 + SCHED_E_INVALID_TASK_HASH Handle = 0x80041321 + SCHED_E_SERVICE_NOT_AVAILABLE Handle = 0x80041322 + SCHED_E_SERVICE_TOO_BUSY Handle = 0x80041323 + SCHED_E_TASK_ATTEMPTED Handle = 0x80041324 + SCHED_S_TASK_QUEUED Handle = 0x00041325 + SCHED_E_TASK_DISABLED Handle = 0x80041326 + SCHED_E_TASK_NOT_V1_COMPAT Handle = 0x80041327 + SCHED_E_START_ON_DEMAND Handle = 0x80041328 + SCHED_E_TASK_NOT_UBPM_COMPAT Handle = 0x80041329 + SCHED_E_DEPRECATED_FEATURE_USED Handle = 0x80041330 + CO_E_CLASS_CREATE_FAILED Handle = 0x80080001 + CO_E_SCM_ERROR Handle = 0x80080002 + CO_E_SCM_RPC_FAILURE Handle = 0x80080003 + CO_E_BAD_PATH Handle = 0x80080004 + CO_E_SERVER_EXEC_FAILURE Handle = 0x80080005 + CO_E_OBJSRV_RPC_FAILURE Handle = 0x80080006 + MK_E_NO_NORMALIZED Handle = 0x80080007 + CO_E_SERVER_STOPPING Handle = 0x80080008 + MEM_E_INVALID_ROOT Handle = 0x80080009 + MEM_E_INVALID_LINK Handle = 0x80080010 + MEM_E_INVALID_SIZE Handle = 0x80080011 + CO_S_NOTALLINTERFACES Handle = 0x00080012 + CO_S_MACHINENAMENOTFOUND Handle = 0x00080013 + CO_E_MISSING_DISPLAYNAME Handle = 0x80080015 + CO_E_RUNAS_VALUE_MUST_BE_AAA Handle = 0x80080016 + CO_E_ELEVATION_DISABLED Handle = 0x80080017 + APPX_E_PACKAGING_INTERNAL Handle = 0x80080200 + APPX_E_INTERLEAVING_NOT_ALLOWED Handle = 0x80080201 + APPX_E_RELATIONSHIPS_NOT_ALLOWED Handle = 0x80080202 + APPX_E_MISSING_REQUIRED_FILE Handle = 0x80080203 + APPX_E_INVALID_MANIFEST Handle = 0x80080204 + APPX_E_INVALID_BLOCKMAP Handle = 0x80080205 + APPX_E_CORRUPT_CONTENT Handle = 0x80080206 + APPX_E_BLOCK_HASH_INVALID Handle = 0x80080207 + APPX_E_REQUESTED_RANGE_TOO_LARGE Handle = 0x80080208 + APPX_E_INVALID_SIP_CLIENT_DATA Handle = 0x80080209 + APPX_E_INVALID_KEY_INFO Handle = 0x8008020A + APPX_E_INVALID_CONTENTGROUPMAP Handle = 0x8008020B + APPX_E_INVALID_APPINSTALLER Handle = 0x8008020C + APPX_E_DELTA_BASELINE_VERSION_MISMATCH Handle = 0x8008020D + APPX_E_DELTA_PACKAGE_MISSING_FILE Handle = 0x8008020E + APPX_E_INVALID_DELTA_PACKAGE Handle = 0x8008020F + APPX_E_DELTA_APPENDED_PACKAGE_NOT_ALLOWED Handle = 0x80080210 + APPX_E_INVALID_PACKAGING_LAYOUT Handle = 0x80080211 + APPX_E_INVALID_PACKAGESIGNCONFIG Handle = 0x80080212 + APPX_E_RESOURCESPRI_NOT_ALLOWED Handle = 0x80080213 + APPX_E_FILE_COMPRESSION_MISMATCH Handle = 0x80080214 + APPX_E_INVALID_PAYLOAD_PACKAGE_EXTENSION Handle = 0x80080215 + APPX_E_INVALID_ENCRYPTION_EXCLUSION_FILE_LIST Handle = 0x80080216 + BT_E_SPURIOUS_ACTIVATION Handle = 0x80080300 + DISP_E_UNKNOWNINTERFACE Handle = 0x80020001 + DISP_E_MEMBERNOTFOUND Handle = 0x80020003 + DISP_E_PARAMNOTFOUND Handle = 0x80020004 + DISP_E_TYPEMISMATCH Handle = 0x80020005 + DISP_E_UNKNOWNNAME Handle = 0x80020006 + DISP_E_NONAMEDARGS Handle = 0x80020007 + DISP_E_BADVARTYPE Handle = 0x80020008 + DISP_E_EXCEPTION Handle = 0x80020009 + DISP_E_OVERFLOW Handle = 0x8002000A + DISP_E_BADINDEX Handle = 0x8002000B + DISP_E_UNKNOWNLCID Handle = 0x8002000C + DISP_E_ARRAYISLOCKED Handle = 0x8002000D + DISP_E_BADPARAMCOUNT Handle = 0x8002000E + DISP_E_PARAMNOTOPTIONAL Handle = 0x8002000F + DISP_E_BADCALLEE Handle = 0x80020010 + DISP_E_NOTACOLLECTION Handle = 0x80020011 + DISP_E_DIVBYZERO Handle = 0x80020012 + DISP_E_BUFFERTOOSMALL Handle = 0x80020013 + TYPE_E_BUFFERTOOSMALL Handle = 0x80028016 + TYPE_E_FIELDNOTFOUND Handle = 0x80028017 + TYPE_E_INVDATAREAD Handle = 0x80028018 + TYPE_E_UNSUPFORMAT Handle = 0x80028019 + TYPE_E_REGISTRYACCESS Handle = 0x8002801C + TYPE_E_LIBNOTREGISTERED Handle = 0x8002801D + TYPE_E_UNDEFINEDTYPE Handle = 0x80028027 + TYPE_E_QUALIFIEDNAMEDISALLOWED Handle = 0x80028028 + TYPE_E_INVALIDSTATE Handle = 0x80028029 + TYPE_E_WRONGTYPEKIND Handle = 0x8002802A + TYPE_E_ELEMENTNOTFOUND Handle = 0x8002802B + TYPE_E_AMBIGUOUSNAME Handle = 0x8002802C + TYPE_E_NAMECONFLICT Handle = 0x8002802D + TYPE_E_UNKNOWNLCID Handle = 0x8002802E + TYPE_E_DLLFUNCTIONNOTFOUND Handle = 0x8002802F + TYPE_E_BADMODULEKIND Handle = 0x800288BD + TYPE_E_SIZETOOBIG Handle = 0x800288C5 + TYPE_E_DUPLICATEID Handle = 0x800288C6 + TYPE_E_INVALIDID Handle = 0x800288CF + TYPE_E_TYPEMISMATCH Handle = 0x80028CA0 + TYPE_E_OUTOFBOUNDS Handle = 0x80028CA1 + TYPE_E_IOERROR Handle = 0x80028CA2 + TYPE_E_CANTCREATETMPFILE Handle = 0x80028CA3 + TYPE_E_CANTLOADLIBRARY Handle = 0x80029C4A + TYPE_E_INCONSISTENTPROPFUNCS Handle = 0x80029C83 + TYPE_E_CIRCULARTYPE Handle = 0x80029C84 + STG_E_INVALIDFUNCTION Handle = 0x80030001 + STG_E_FILENOTFOUND Handle = 0x80030002 + STG_E_PATHNOTFOUND Handle = 0x80030003 + STG_E_TOOMANYOPENFILES Handle = 0x80030004 + STG_E_ACCESSDENIED Handle = 0x80030005 + STG_E_INVALIDHANDLE Handle = 0x80030006 + STG_E_INSUFFICIENTMEMORY Handle = 0x80030008 + STG_E_INVALIDPOINTER Handle = 0x80030009 + STG_E_NOMOREFILES Handle = 0x80030012 + STG_E_DISKISWRITEPROTECTED Handle = 0x80030013 + STG_E_SEEKERROR Handle = 0x80030019 + STG_E_WRITEFAULT Handle = 0x8003001D + STG_E_READFAULT Handle = 0x8003001E + STG_E_SHAREVIOLATION Handle = 0x80030020 + STG_E_LOCKVIOLATION Handle = 0x80030021 + STG_E_FILEALREADYEXISTS Handle = 0x80030050 + STG_E_INVALIDPARAMETER Handle = 0x80030057 + STG_E_MEDIUMFULL Handle = 0x80030070 + STG_E_PROPSETMISMATCHED Handle = 0x800300F0 + STG_E_ABNORMALAPIEXIT Handle = 0x800300FA + STG_E_INVALIDHEADER Handle = 0x800300FB + STG_E_INVALIDNAME Handle = 0x800300FC + STG_E_UNKNOWN Handle = 0x800300FD + STG_E_UNIMPLEMENTEDFUNCTION Handle = 0x800300FE + STG_E_INVALIDFLAG Handle = 0x800300FF + STG_E_INUSE Handle = 0x80030100 + STG_E_NOTCURRENT Handle = 0x80030101 + STG_E_REVERTED Handle = 0x80030102 + STG_E_CANTSAVE Handle = 0x80030103 + STG_E_OLDFORMAT Handle = 0x80030104 + STG_E_OLDDLL Handle = 0x80030105 + STG_E_SHAREREQUIRED Handle = 0x80030106 + STG_E_NOTFILEBASEDSTORAGE Handle = 0x80030107 + STG_E_EXTANTMARSHALLINGS Handle = 0x80030108 + STG_E_DOCFILECORRUPT Handle = 0x80030109 + STG_E_BADBASEADDRESS Handle = 0x80030110 + STG_E_DOCFILETOOLARGE Handle = 0x80030111 + STG_E_NOTSIMPLEFORMAT Handle = 0x80030112 + STG_E_INCOMPLETE Handle = 0x80030201 + STG_E_TERMINATED Handle = 0x80030202 + STG_S_CONVERTED Handle = 0x00030200 + STG_S_BLOCK Handle = 0x00030201 + STG_S_RETRYNOW Handle = 0x00030202 + STG_S_MONITORING Handle = 0x00030203 + STG_S_MULTIPLEOPENS Handle = 0x00030204 + STG_S_CONSOLIDATIONFAILED Handle = 0x00030205 + STG_S_CANNOTCONSOLIDATE Handle = 0x00030206 + STG_S_POWER_CYCLE_REQUIRED Handle = 0x00030207 + STG_E_FIRMWARE_SLOT_INVALID Handle = 0x80030208 + STG_E_FIRMWARE_IMAGE_INVALID Handle = 0x80030209 + STG_E_DEVICE_UNRESPONSIVE Handle = 0x8003020A + STG_E_STATUS_COPY_PROTECTION_FAILURE Handle = 0x80030305 + STG_E_CSS_AUTHENTICATION_FAILURE Handle = 0x80030306 + STG_E_CSS_KEY_NOT_PRESENT Handle = 0x80030307 + STG_E_CSS_KEY_NOT_ESTABLISHED Handle = 0x80030308 + STG_E_CSS_SCRAMBLED_SECTOR Handle = 0x80030309 + STG_E_CSS_REGION_MISMATCH Handle = 0x8003030A + STG_E_RESETS_EXHAUSTED Handle = 0x8003030B + RPC_E_CALL_REJECTED Handle = 0x80010001 + RPC_E_CALL_CANCELED Handle = 0x80010002 + RPC_E_CANTPOST_INSENDCALL Handle = 0x80010003 + RPC_E_CANTCALLOUT_INASYNCCALL Handle = 0x80010004 + RPC_E_CANTCALLOUT_INEXTERNALCALL Handle = 0x80010005 + RPC_E_CONNECTION_TERMINATED Handle = 0x80010006 + RPC_E_SERVER_DIED Handle = 0x80010007 + RPC_E_CLIENT_DIED Handle = 0x80010008 + RPC_E_INVALID_DATAPACKET Handle = 0x80010009 + RPC_E_CANTTRANSMIT_CALL Handle = 0x8001000A + RPC_E_CLIENT_CANTMARSHAL_DATA Handle = 0x8001000B + RPC_E_CLIENT_CANTUNMARSHAL_DATA Handle = 0x8001000C + RPC_E_SERVER_CANTMARSHAL_DATA Handle = 0x8001000D + RPC_E_SERVER_CANTUNMARSHAL_DATA Handle = 0x8001000E + RPC_E_INVALID_DATA Handle = 0x8001000F + RPC_E_INVALID_PARAMETER Handle = 0x80010010 + RPC_E_CANTCALLOUT_AGAIN Handle = 0x80010011 + RPC_E_SERVER_DIED_DNE Handle = 0x80010012 + RPC_E_SYS_CALL_FAILED Handle = 0x80010100 + RPC_E_OUT_OF_RESOURCES Handle = 0x80010101 + RPC_E_ATTEMPTED_MULTITHREAD Handle = 0x80010102 + RPC_E_NOT_REGISTERED Handle = 0x80010103 + RPC_E_FAULT Handle = 0x80010104 + RPC_E_SERVERFAULT Handle = 0x80010105 + RPC_E_CHANGED_MODE Handle = 0x80010106 + RPC_E_INVALIDMETHOD Handle = 0x80010107 + RPC_E_DISCONNECTED Handle = 0x80010108 + RPC_E_RETRY Handle = 0x80010109 + RPC_E_SERVERCALL_RETRYLATER Handle = 0x8001010A + RPC_E_SERVERCALL_REJECTED Handle = 0x8001010B + RPC_E_INVALID_CALLDATA Handle = 0x8001010C + RPC_E_CANTCALLOUT_ININPUTSYNCCALL Handle = 0x8001010D + RPC_E_WRONG_THREAD Handle = 0x8001010E + RPC_E_THREAD_NOT_INIT Handle = 0x8001010F + RPC_E_VERSION_MISMATCH Handle = 0x80010110 + RPC_E_INVALID_HEADER Handle = 0x80010111 + RPC_E_INVALID_EXTENSION Handle = 0x80010112 + RPC_E_INVALID_IPID Handle = 0x80010113 + RPC_E_INVALID_OBJECT Handle = 0x80010114 + RPC_S_CALLPENDING Handle = 0x80010115 + RPC_S_WAITONTIMER Handle = 0x80010116 + RPC_E_CALL_COMPLETE Handle = 0x80010117 + RPC_E_UNSECURE_CALL Handle = 0x80010118 + RPC_E_TOO_LATE Handle = 0x80010119 + RPC_E_NO_GOOD_SECURITY_PACKAGES Handle = 0x8001011A + RPC_E_ACCESS_DENIED Handle = 0x8001011B + RPC_E_REMOTE_DISABLED Handle = 0x8001011C + RPC_E_INVALID_OBJREF Handle = 0x8001011D + RPC_E_NO_CONTEXT Handle = 0x8001011E + RPC_E_TIMEOUT Handle = 0x8001011F + RPC_E_NO_SYNC Handle = 0x80010120 + RPC_E_FULLSIC_REQUIRED Handle = 0x80010121 + RPC_E_INVALID_STD_NAME Handle = 0x80010122 + CO_E_FAILEDTOIMPERSONATE Handle = 0x80010123 + CO_E_FAILEDTOGETSECCTX Handle = 0x80010124 + CO_E_FAILEDTOOPENTHREADTOKEN Handle = 0x80010125 + CO_E_FAILEDTOGETTOKENINFO Handle = 0x80010126 + CO_E_TRUSTEEDOESNTMATCHCLIENT Handle = 0x80010127 + CO_E_FAILEDTOQUERYCLIENTBLANKET Handle = 0x80010128 + CO_E_FAILEDTOSETDACL Handle = 0x80010129 + CO_E_ACCESSCHECKFAILED Handle = 0x8001012A + CO_E_NETACCESSAPIFAILED Handle = 0x8001012B + CO_E_WRONGTRUSTEENAMESYNTAX Handle = 0x8001012C + CO_E_INVALIDSID Handle = 0x8001012D + CO_E_CONVERSIONFAILED Handle = 0x8001012E + CO_E_NOMATCHINGSIDFOUND Handle = 0x8001012F + CO_E_LOOKUPACCSIDFAILED Handle = 0x80010130 + CO_E_NOMATCHINGNAMEFOUND Handle = 0x80010131 + CO_E_LOOKUPACCNAMEFAILED Handle = 0x80010132 + CO_E_SETSERLHNDLFAILED Handle = 0x80010133 + CO_E_FAILEDTOGETWINDIR Handle = 0x80010134 + CO_E_PATHTOOLONG Handle = 0x80010135 + CO_E_FAILEDTOGENUUID Handle = 0x80010136 + CO_E_FAILEDTOCREATEFILE Handle = 0x80010137 + CO_E_FAILEDTOCLOSEHANDLE Handle = 0x80010138 + CO_E_EXCEEDSYSACLLIMIT Handle = 0x80010139 + CO_E_ACESINWRONGORDER Handle = 0x8001013A + CO_E_INCOMPATIBLESTREAMVERSION Handle = 0x8001013B + CO_E_FAILEDTOOPENPROCESSTOKEN Handle = 0x8001013C + CO_E_DECODEFAILED Handle = 0x8001013D + CO_E_ACNOTINITIALIZED Handle = 0x8001013F + CO_E_CANCEL_DISABLED Handle = 0x80010140 + RPC_E_UNEXPECTED Handle = 0x8001FFFF + ERROR_AUDITING_DISABLED Handle = 0xC0090001 + ERROR_ALL_SIDS_FILTERED Handle = 0xC0090002 + ERROR_BIZRULES_NOT_ENABLED Handle = 0xC0090003 + NTE_BAD_UID Handle = 0x80090001 + NTE_BAD_HASH Handle = 0x80090002 + NTE_BAD_KEY Handle = 0x80090003 + NTE_BAD_LEN Handle = 0x80090004 + NTE_BAD_DATA Handle = 0x80090005 + NTE_BAD_SIGNATURE Handle = 0x80090006 + NTE_BAD_VER Handle = 0x80090007 + NTE_BAD_ALGID Handle = 0x80090008 + NTE_BAD_FLAGS Handle = 0x80090009 + NTE_BAD_TYPE Handle = 0x8009000A + NTE_BAD_KEY_STATE Handle = 0x8009000B + NTE_BAD_HASH_STATE Handle = 0x8009000C + NTE_NO_KEY Handle = 0x8009000D + NTE_NO_MEMORY Handle = 0x8009000E + NTE_EXISTS Handle = 0x8009000F + NTE_PERM Handle = 0x80090010 + NTE_NOT_FOUND Handle = 0x80090011 + NTE_DOUBLE_ENCRYPT Handle = 0x80090012 + NTE_BAD_PROVIDER Handle = 0x80090013 + NTE_BAD_PROV_TYPE Handle = 0x80090014 + NTE_BAD_PUBLIC_KEY Handle = 0x80090015 + NTE_BAD_KEYSET Handle = 0x80090016 + NTE_PROV_TYPE_NOT_DEF Handle = 0x80090017 + NTE_PROV_TYPE_ENTRY_BAD Handle = 0x80090018 + NTE_KEYSET_NOT_DEF Handle = 0x80090019 + NTE_KEYSET_ENTRY_BAD Handle = 0x8009001A + NTE_PROV_TYPE_NO_MATCH Handle = 0x8009001B + NTE_SIGNATURE_FILE_BAD Handle = 0x8009001C + NTE_PROVIDER_DLL_FAIL Handle = 0x8009001D + NTE_PROV_DLL_NOT_FOUND Handle = 0x8009001E + NTE_BAD_KEYSET_PARAM Handle = 0x8009001F + NTE_FAIL Handle = 0x80090020 + NTE_SYS_ERR Handle = 0x80090021 + NTE_SILENT_CONTEXT Handle = 0x80090022 + NTE_TOKEN_KEYSET_STORAGE_FULL Handle = 0x80090023 + NTE_TEMPORARY_PROFILE Handle = 0x80090024 + NTE_FIXEDPARAMETER Handle = 0x80090025 + NTE_INVALID_HANDLE Handle = 0x80090026 + NTE_INVALID_PARAMETER Handle = 0x80090027 + NTE_BUFFER_TOO_SMALL Handle = 0x80090028 + NTE_NOT_SUPPORTED Handle = 0x80090029 + NTE_NO_MORE_ITEMS Handle = 0x8009002A + NTE_BUFFERS_OVERLAP Handle = 0x8009002B + NTE_DECRYPTION_FAILURE Handle = 0x8009002C + NTE_INTERNAL_ERROR Handle = 0x8009002D + NTE_UI_REQUIRED Handle = 0x8009002E + NTE_HMAC_NOT_SUPPORTED Handle = 0x8009002F + NTE_DEVICE_NOT_READY Handle = 0x80090030 + NTE_AUTHENTICATION_IGNORED Handle = 0x80090031 + NTE_VALIDATION_FAILED Handle = 0x80090032 + NTE_INCORRECT_PASSWORD Handle = 0x80090033 + NTE_ENCRYPTION_FAILURE Handle = 0x80090034 + NTE_DEVICE_NOT_FOUND Handle = 0x80090035 + NTE_USER_CANCELLED Handle = 0x80090036 + NTE_PASSWORD_CHANGE_REQUIRED Handle = 0x80090037 + NTE_NOT_ACTIVE_CONSOLE Handle = 0x80090038 + SEC_E_INSUFFICIENT_MEMORY Handle = 0x80090300 + SEC_E_INVALID_HANDLE Handle = 0x80090301 + SEC_E_UNSUPPORTED_FUNCTION Handle = 0x80090302 + SEC_E_TARGET_UNKNOWN Handle = 0x80090303 + SEC_E_INTERNAL_ERROR Handle = 0x80090304 + SEC_E_SECPKG_NOT_FOUND Handle = 0x80090305 + SEC_E_NOT_OWNER Handle = 0x80090306 + SEC_E_CANNOT_INSTALL Handle = 0x80090307 + SEC_E_INVALID_TOKEN Handle = 0x80090308 + SEC_E_CANNOT_PACK Handle = 0x80090309 + SEC_E_QOP_NOT_SUPPORTED Handle = 0x8009030A + SEC_E_NO_IMPERSONATION Handle = 0x8009030B + SEC_E_LOGON_DENIED Handle = 0x8009030C + SEC_E_UNKNOWN_CREDENTIALS Handle = 0x8009030D + SEC_E_NO_CREDENTIALS Handle = 0x8009030E + SEC_E_MESSAGE_ALTERED Handle = 0x8009030F + SEC_E_OUT_OF_SEQUENCE Handle = 0x80090310 + SEC_E_NO_AUTHENTICATING_AUTHORITY Handle = 0x80090311 + SEC_I_CONTINUE_NEEDED Handle = 0x00090312 + SEC_I_COMPLETE_NEEDED Handle = 0x00090313 + SEC_I_COMPLETE_AND_CONTINUE Handle = 0x00090314 + SEC_I_LOCAL_LOGON Handle = 0x00090315 + SEC_I_GENERIC_EXTENSION_RECEIVED Handle = 0x00090316 + SEC_E_BAD_PKGID Handle = 0x80090316 + SEC_E_CONTEXT_EXPIRED Handle = 0x80090317 + SEC_I_CONTEXT_EXPIRED Handle = 0x00090317 + SEC_E_INCOMPLETE_MESSAGE Handle = 0x80090318 + SEC_E_INCOMPLETE_CREDENTIALS Handle = 0x80090320 + SEC_E_BUFFER_TOO_SMALL Handle = 0x80090321 + SEC_I_INCOMPLETE_CREDENTIALS Handle = 0x00090320 + SEC_I_RENEGOTIATE Handle = 0x00090321 + SEC_E_WRONG_PRINCIPAL Handle = 0x80090322 + SEC_I_NO_LSA_CONTEXT Handle = 0x00090323 + SEC_E_TIME_SKEW Handle = 0x80090324 + SEC_E_UNTRUSTED_ROOT Handle = 0x80090325 + SEC_E_ILLEGAL_MESSAGE Handle = 0x80090326 + SEC_E_CERT_UNKNOWN Handle = 0x80090327 + SEC_E_CERT_EXPIRED Handle = 0x80090328 + SEC_E_ENCRYPT_FAILURE Handle = 0x80090329 + SEC_E_DECRYPT_FAILURE Handle = 0x80090330 + SEC_E_ALGORITHM_MISMATCH Handle = 0x80090331 + SEC_E_SECURITY_QOS_FAILED Handle = 0x80090332 + SEC_E_UNFINISHED_CONTEXT_DELETED Handle = 0x80090333 + SEC_E_NO_TGT_REPLY Handle = 0x80090334 + SEC_E_NO_IP_ADDRESSES Handle = 0x80090335 + SEC_E_WRONG_CREDENTIAL_HANDLE Handle = 0x80090336 + SEC_E_CRYPTO_SYSTEM_INVALID Handle = 0x80090337 + SEC_E_MAX_REFERRALS_EXCEEDED Handle = 0x80090338 + SEC_E_MUST_BE_KDC Handle = 0x80090339 + SEC_E_STRONG_CRYPTO_NOT_SUPPORTED Handle = 0x8009033A + SEC_E_TOO_MANY_PRINCIPALS Handle = 0x8009033B + SEC_E_NO_PA_DATA Handle = 0x8009033C + SEC_E_PKINIT_NAME_MISMATCH Handle = 0x8009033D + SEC_E_SMARTCARD_LOGON_REQUIRED Handle = 0x8009033E + SEC_E_SHUTDOWN_IN_PROGRESS Handle = 0x8009033F + SEC_E_KDC_INVALID_REQUEST Handle = 0x80090340 + SEC_E_KDC_UNABLE_TO_REFER Handle = 0x80090341 + SEC_E_KDC_UNKNOWN_ETYPE Handle = 0x80090342 + SEC_E_UNSUPPORTED_PREAUTH Handle = 0x80090343 + SEC_E_DELEGATION_REQUIRED Handle = 0x80090345 + SEC_E_BAD_BINDINGS Handle = 0x80090346 + SEC_E_MULTIPLE_ACCOUNTS Handle = 0x80090347 + SEC_E_NO_KERB_KEY Handle = 0x80090348 + SEC_E_CERT_WRONG_USAGE Handle = 0x80090349 + SEC_E_DOWNGRADE_DETECTED Handle = 0x80090350 + SEC_E_SMARTCARD_CERT_REVOKED Handle = 0x80090351 + SEC_E_ISSUING_CA_UNTRUSTED Handle = 0x80090352 + SEC_E_REVOCATION_OFFLINE_C Handle = 0x80090353 + SEC_E_PKINIT_CLIENT_FAILURE Handle = 0x80090354 + SEC_E_SMARTCARD_CERT_EXPIRED Handle = 0x80090355 + SEC_E_NO_S4U_PROT_SUPPORT Handle = 0x80090356 + SEC_E_CROSSREALM_DELEGATION_FAILURE Handle = 0x80090357 + SEC_E_REVOCATION_OFFLINE_KDC Handle = 0x80090358 + SEC_E_ISSUING_CA_UNTRUSTED_KDC Handle = 0x80090359 + SEC_E_KDC_CERT_EXPIRED Handle = 0x8009035A + SEC_E_KDC_CERT_REVOKED Handle = 0x8009035B + SEC_I_SIGNATURE_NEEDED Handle = 0x0009035C + SEC_E_INVALID_PARAMETER Handle = 0x8009035D + SEC_E_DELEGATION_POLICY Handle = 0x8009035E + SEC_E_POLICY_NLTM_ONLY Handle = 0x8009035F + SEC_I_NO_RENEGOTIATION Handle = 0x00090360 + SEC_E_NO_CONTEXT Handle = 0x80090361 + SEC_E_PKU2U_CERT_FAILURE Handle = 0x80090362 + SEC_E_MUTUAL_AUTH_FAILED Handle = 0x80090363 + SEC_I_MESSAGE_FRAGMENT Handle = 0x00090364 + SEC_E_ONLY_HTTPS_ALLOWED Handle = 0x80090365 + SEC_I_CONTINUE_NEEDED_MESSAGE_OK Handle = 0x00090366 + SEC_E_APPLICATION_PROTOCOL_MISMATCH Handle = 0x80090367 + SEC_I_ASYNC_CALL_PENDING Handle = 0x00090368 + SEC_E_INVALID_UPN_NAME Handle = 0x80090369 + SEC_E_EXT_BUFFER_TOO_SMALL Handle = 0x8009036A + SEC_E_INSUFFICIENT_BUFFERS Handle = 0x8009036B + SEC_E_NO_SPM = SEC_E_INTERNAL_ERROR + SEC_E_NOT_SUPPORTED = SEC_E_UNSUPPORTED_FUNCTION + CRYPT_E_MSG_ERROR Handle = 0x80091001 + CRYPT_E_UNKNOWN_ALGO Handle = 0x80091002 + CRYPT_E_OID_FORMAT Handle = 0x80091003 + CRYPT_E_INVALID_MSG_TYPE Handle = 0x80091004 + CRYPT_E_UNEXPECTED_ENCODING Handle = 0x80091005 + CRYPT_E_AUTH_ATTR_MISSING Handle = 0x80091006 + CRYPT_E_HASH_VALUE Handle = 0x80091007 + CRYPT_E_INVALID_INDEX Handle = 0x80091008 + CRYPT_E_ALREADY_DECRYPTED Handle = 0x80091009 + CRYPT_E_NOT_DECRYPTED Handle = 0x8009100A + CRYPT_E_RECIPIENT_NOT_FOUND Handle = 0x8009100B + CRYPT_E_CONTROL_TYPE Handle = 0x8009100C + CRYPT_E_ISSUER_SERIALNUMBER Handle = 0x8009100D + CRYPT_E_SIGNER_NOT_FOUND Handle = 0x8009100E + CRYPT_E_ATTRIBUTES_MISSING Handle = 0x8009100F + CRYPT_E_STREAM_MSG_NOT_READY Handle = 0x80091010 + CRYPT_E_STREAM_INSUFFICIENT_DATA Handle = 0x80091011 + CRYPT_I_NEW_PROTECTION_REQUIRED Handle = 0x00091012 + CRYPT_E_BAD_LEN Handle = 0x80092001 + CRYPT_E_BAD_ENCODE Handle = 0x80092002 + CRYPT_E_FILE_ERROR Handle = 0x80092003 + CRYPT_E_NOT_FOUND Handle = 0x80092004 + CRYPT_E_EXISTS Handle = 0x80092005 + CRYPT_E_NO_PROVIDER Handle = 0x80092006 + CRYPT_E_SELF_SIGNED Handle = 0x80092007 + CRYPT_E_DELETED_PREV Handle = 0x80092008 + CRYPT_E_NO_MATCH Handle = 0x80092009 + CRYPT_E_UNEXPECTED_MSG_TYPE Handle = 0x8009200A + CRYPT_E_NO_KEY_PROPERTY Handle = 0x8009200B + CRYPT_E_NO_DECRYPT_CERT Handle = 0x8009200C + CRYPT_E_BAD_MSG Handle = 0x8009200D + CRYPT_E_NO_SIGNER Handle = 0x8009200E + CRYPT_E_PENDING_CLOSE Handle = 0x8009200F + CRYPT_E_REVOKED Handle = 0x80092010 + CRYPT_E_NO_REVOCATION_DLL Handle = 0x80092011 + CRYPT_E_NO_REVOCATION_CHECK Handle = 0x80092012 + CRYPT_E_REVOCATION_OFFLINE Handle = 0x80092013 + CRYPT_E_NOT_IN_REVOCATION_DATABASE Handle = 0x80092014 + CRYPT_E_INVALID_NUMERIC_STRING Handle = 0x80092020 + CRYPT_E_INVALID_PRINTABLE_STRING Handle = 0x80092021 + CRYPT_E_INVALID_IA5_STRING Handle = 0x80092022 + CRYPT_E_INVALID_X500_STRING Handle = 0x80092023 + CRYPT_E_NOT_CHAR_STRING Handle = 0x80092024 + CRYPT_E_FILERESIZED Handle = 0x80092025 + CRYPT_E_SECURITY_SETTINGS Handle = 0x80092026 + CRYPT_E_NO_VERIFY_USAGE_DLL Handle = 0x80092027 + CRYPT_E_NO_VERIFY_USAGE_CHECK Handle = 0x80092028 + CRYPT_E_VERIFY_USAGE_OFFLINE Handle = 0x80092029 + CRYPT_E_NOT_IN_CTL Handle = 0x8009202A + CRYPT_E_NO_TRUSTED_SIGNER Handle = 0x8009202B + CRYPT_E_MISSING_PUBKEY_PARA Handle = 0x8009202C + CRYPT_E_OBJECT_LOCATOR_OBJECT_NOT_FOUND Handle = 0x8009202D + CRYPT_E_OSS_ERROR Handle = 0x80093000 + OSS_MORE_BUF Handle = 0x80093001 + OSS_NEGATIVE_UINTEGER Handle = 0x80093002 + OSS_PDU_RANGE Handle = 0x80093003 + OSS_MORE_INPUT Handle = 0x80093004 + OSS_DATA_ERROR Handle = 0x80093005 + OSS_BAD_ARG Handle = 0x80093006 + OSS_BAD_VERSION Handle = 0x80093007 + OSS_OUT_MEMORY Handle = 0x80093008 + OSS_PDU_MISMATCH Handle = 0x80093009 + OSS_LIMITED Handle = 0x8009300A + OSS_BAD_PTR Handle = 0x8009300B + OSS_BAD_TIME Handle = 0x8009300C + OSS_INDEFINITE_NOT_SUPPORTED Handle = 0x8009300D + OSS_MEM_ERROR Handle = 0x8009300E + OSS_BAD_TABLE Handle = 0x8009300F + OSS_TOO_LONG Handle = 0x80093010 + OSS_CONSTRAINT_VIOLATED Handle = 0x80093011 + OSS_FATAL_ERROR Handle = 0x80093012 + OSS_ACCESS_SERIALIZATION_ERROR Handle = 0x80093013 + OSS_NULL_TBL Handle = 0x80093014 + OSS_NULL_FCN Handle = 0x80093015 + OSS_BAD_ENCRULES Handle = 0x80093016 + OSS_UNAVAIL_ENCRULES Handle = 0x80093017 + OSS_CANT_OPEN_TRACE_WINDOW Handle = 0x80093018 + OSS_UNIMPLEMENTED Handle = 0x80093019 + OSS_OID_DLL_NOT_LINKED Handle = 0x8009301A + OSS_CANT_OPEN_TRACE_FILE Handle = 0x8009301B + OSS_TRACE_FILE_ALREADY_OPEN Handle = 0x8009301C + OSS_TABLE_MISMATCH Handle = 0x8009301D + OSS_TYPE_NOT_SUPPORTED Handle = 0x8009301E + OSS_REAL_DLL_NOT_LINKED Handle = 0x8009301F + OSS_REAL_CODE_NOT_LINKED Handle = 0x80093020 + OSS_OUT_OF_RANGE Handle = 0x80093021 + OSS_COPIER_DLL_NOT_LINKED Handle = 0x80093022 + OSS_CONSTRAINT_DLL_NOT_LINKED Handle = 0x80093023 + OSS_COMPARATOR_DLL_NOT_LINKED Handle = 0x80093024 + OSS_COMPARATOR_CODE_NOT_LINKED Handle = 0x80093025 + OSS_MEM_MGR_DLL_NOT_LINKED Handle = 0x80093026 + OSS_PDV_DLL_NOT_LINKED Handle = 0x80093027 + OSS_PDV_CODE_NOT_LINKED Handle = 0x80093028 + OSS_API_DLL_NOT_LINKED Handle = 0x80093029 + OSS_BERDER_DLL_NOT_LINKED Handle = 0x8009302A + OSS_PER_DLL_NOT_LINKED Handle = 0x8009302B + OSS_OPEN_TYPE_ERROR Handle = 0x8009302C + OSS_MUTEX_NOT_CREATED Handle = 0x8009302D + OSS_CANT_CLOSE_TRACE_FILE Handle = 0x8009302E + CRYPT_E_ASN1_ERROR Handle = 0x80093100 + CRYPT_E_ASN1_INTERNAL Handle = 0x80093101 + CRYPT_E_ASN1_EOD Handle = 0x80093102 + CRYPT_E_ASN1_CORRUPT Handle = 0x80093103 + CRYPT_E_ASN1_LARGE Handle = 0x80093104 + CRYPT_E_ASN1_CONSTRAINT Handle = 0x80093105 + CRYPT_E_ASN1_MEMORY Handle = 0x80093106 + CRYPT_E_ASN1_OVERFLOW Handle = 0x80093107 + CRYPT_E_ASN1_BADPDU Handle = 0x80093108 + CRYPT_E_ASN1_BADARGS Handle = 0x80093109 + CRYPT_E_ASN1_BADREAL Handle = 0x8009310A + CRYPT_E_ASN1_BADTAG Handle = 0x8009310B + CRYPT_E_ASN1_CHOICE Handle = 0x8009310C + CRYPT_E_ASN1_RULE Handle = 0x8009310D + CRYPT_E_ASN1_UTF8 Handle = 0x8009310E + CRYPT_E_ASN1_PDU_TYPE Handle = 0x80093133 + CRYPT_E_ASN1_NYI Handle = 0x80093134 + CRYPT_E_ASN1_EXTENDED Handle = 0x80093201 + CRYPT_E_ASN1_NOEOD Handle = 0x80093202 + CERTSRV_E_BAD_REQUESTSUBJECT Handle = 0x80094001 + CERTSRV_E_NO_REQUEST Handle = 0x80094002 + CERTSRV_E_BAD_REQUESTSTATUS Handle = 0x80094003 + CERTSRV_E_PROPERTY_EMPTY Handle = 0x80094004 + CERTSRV_E_INVALID_CA_CERTIFICATE Handle = 0x80094005 + CERTSRV_E_SERVER_SUSPENDED Handle = 0x80094006 + CERTSRV_E_ENCODING_LENGTH Handle = 0x80094007 + CERTSRV_E_ROLECONFLICT Handle = 0x80094008 + CERTSRV_E_RESTRICTEDOFFICER Handle = 0x80094009 + CERTSRV_E_KEY_ARCHIVAL_NOT_CONFIGURED Handle = 0x8009400A + CERTSRV_E_NO_VALID_KRA Handle = 0x8009400B + CERTSRV_E_BAD_REQUEST_KEY_ARCHIVAL Handle = 0x8009400C + CERTSRV_E_NO_CAADMIN_DEFINED Handle = 0x8009400D + CERTSRV_E_BAD_RENEWAL_CERT_ATTRIBUTE Handle = 0x8009400E + CERTSRV_E_NO_DB_SESSIONS Handle = 0x8009400F + CERTSRV_E_ALIGNMENT_FAULT Handle = 0x80094010 + CERTSRV_E_ENROLL_DENIED Handle = 0x80094011 + CERTSRV_E_TEMPLATE_DENIED Handle = 0x80094012 + CERTSRV_E_DOWNLEVEL_DC_SSL_OR_UPGRADE Handle = 0x80094013 + CERTSRV_E_ADMIN_DENIED_REQUEST Handle = 0x80094014 + CERTSRV_E_NO_POLICY_SERVER Handle = 0x80094015 + CERTSRV_E_WEAK_SIGNATURE_OR_KEY Handle = 0x80094016 + CERTSRV_E_KEY_ATTESTATION_NOT_SUPPORTED Handle = 0x80094017 + CERTSRV_E_ENCRYPTION_CERT_REQUIRED Handle = 0x80094018 + CERTSRV_E_UNSUPPORTED_CERT_TYPE Handle = 0x80094800 + CERTSRV_E_NO_CERT_TYPE Handle = 0x80094801 + CERTSRV_E_TEMPLATE_CONFLICT Handle = 0x80094802 + CERTSRV_E_SUBJECT_ALT_NAME_REQUIRED Handle = 0x80094803 + CERTSRV_E_ARCHIVED_KEY_REQUIRED Handle = 0x80094804 + CERTSRV_E_SMIME_REQUIRED Handle = 0x80094805 + CERTSRV_E_BAD_RENEWAL_SUBJECT Handle = 0x80094806 + CERTSRV_E_BAD_TEMPLATE_VERSION Handle = 0x80094807 + CERTSRV_E_TEMPLATE_POLICY_REQUIRED Handle = 0x80094808 + CERTSRV_E_SIGNATURE_POLICY_REQUIRED Handle = 0x80094809 + CERTSRV_E_SIGNATURE_COUNT Handle = 0x8009480A + CERTSRV_E_SIGNATURE_REJECTED Handle = 0x8009480B + CERTSRV_E_ISSUANCE_POLICY_REQUIRED Handle = 0x8009480C + CERTSRV_E_SUBJECT_UPN_REQUIRED Handle = 0x8009480D + CERTSRV_E_SUBJECT_DIRECTORY_GUID_REQUIRED Handle = 0x8009480E + CERTSRV_E_SUBJECT_DNS_REQUIRED Handle = 0x8009480F + CERTSRV_E_ARCHIVED_KEY_UNEXPECTED Handle = 0x80094810 + CERTSRV_E_KEY_LENGTH Handle = 0x80094811 + CERTSRV_E_SUBJECT_EMAIL_REQUIRED Handle = 0x80094812 + CERTSRV_E_UNKNOWN_CERT_TYPE Handle = 0x80094813 + CERTSRV_E_CERT_TYPE_OVERLAP Handle = 0x80094814 + CERTSRV_E_TOO_MANY_SIGNATURES Handle = 0x80094815 + CERTSRV_E_RENEWAL_BAD_PUBLIC_KEY Handle = 0x80094816 + CERTSRV_E_INVALID_EK Handle = 0x80094817 + CERTSRV_E_INVALID_IDBINDING Handle = 0x80094818 + CERTSRV_E_INVALID_ATTESTATION Handle = 0x80094819 + CERTSRV_E_KEY_ATTESTATION Handle = 0x8009481A + CERTSRV_E_CORRUPT_KEY_ATTESTATION Handle = 0x8009481B + CERTSRV_E_EXPIRED_CHALLENGE Handle = 0x8009481C + CERTSRV_E_INVALID_RESPONSE Handle = 0x8009481D + CERTSRV_E_INVALID_REQUESTID Handle = 0x8009481E + CERTSRV_E_REQUEST_PRECERTIFICATE_MISMATCH Handle = 0x8009481F + CERTSRV_E_PENDING_CLIENT_RESPONSE Handle = 0x80094820 + XENROLL_E_KEY_NOT_EXPORTABLE Handle = 0x80095000 + XENROLL_E_CANNOT_ADD_ROOT_CERT Handle = 0x80095001 + XENROLL_E_RESPONSE_KA_HASH_NOT_FOUND Handle = 0x80095002 + XENROLL_E_RESPONSE_UNEXPECTED_KA_HASH Handle = 0x80095003 + XENROLL_E_RESPONSE_KA_HASH_MISMATCH Handle = 0x80095004 + XENROLL_E_KEYSPEC_SMIME_MISMATCH Handle = 0x80095005 + TRUST_E_SYSTEM_ERROR Handle = 0x80096001 + TRUST_E_NO_SIGNER_CERT Handle = 0x80096002 + TRUST_E_COUNTER_SIGNER Handle = 0x80096003 + TRUST_E_CERT_SIGNATURE Handle = 0x80096004 + TRUST_E_TIME_STAMP Handle = 0x80096005 + TRUST_E_BAD_DIGEST Handle = 0x80096010 + TRUST_E_MALFORMED_SIGNATURE Handle = 0x80096011 + TRUST_E_BASIC_CONSTRAINTS Handle = 0x80096019 + TRUST_E_FINANCIAL_CRITERIA Handle = 0x8009601E + MSSIPOTF_E_OUTOFMEMRANGE Handle = 0x80097001 + MSSIPOTF_E_CANTGETOBJECT Handle = 0x80097002 + MSSIPOTF_E_NOHEADTABLE Handle = 0x80097003 + MSSIPOTF_E_BAD_MAGICNUMBER Handle = 0x80097004 + MSSIPOTF_E_BAD_OFFSET_TABLE Handle = 0x80097005 + MSSIPOTF_E_TABLE_TAGORDER Handle = 0x80097006 + MSSIPOTF_E_TABLE_LONGWORD Handle = 0x80097007 + MSSIPOTF_E_BAD_FIRST_TABLE_PLACEMENT Handle = 0x80097008 + MSSIPOTF_E_TABLES_OVERLAP Handle = 0x80097009 + MSSIPOTF_E_TABLE_PADBYTES Handle = 0x8009700A + MSSIPOTF_E_FILETOOSMALL Handle = 0x8009700B + MSSIPOTF_E_TABLE_CHECKSUM Handle = 0x8009700C + MSSIPOTF_E_FILE_CHECKSUM Handle = 0x8009700D + MSSIPOTF_E_FAILED_POLICY Handle = 0x80097010 + MSSIPOTF_E_FAILED_HINTS_CHECK Handle = 0x80097011 + MSSIPOTF_E_NOT_OPENTYPE Handle = 0x80097012 + MSSIPOTF_E_FILE Handle = 0x80097013 + MSSIPOTF_E_CRYPT Handle = 0x80097014 + MSSIPOTF_E_BADVERSION Handle = 0x80097015 + MSSIPOTF_E_DSIG_STRUCTURE Handle = 0x80097016 + MSSIPOTF_E_PCONST_CHECK Handle = 0x80097017 + MSSIPOTF_E_STRUCTURE Handle = 0x80097018 + ERROR_CRED_REQUIRES_CONFIRMATION Handle = 0x80097019 + NTE_OP_OK syscall.Errno = 0 + TRUST_E_PROVIDER_UNKNOWN Handle = 0x800B0001 + TRUST_E_ACTION_UNKNOWN Handle = 0x800B0002 + TRUST_E_SUBJECT_FORM_UNKNOWN Handle = 0x800B0003 + TRUST_E_SUBJECT_NOT_TRUSTED Handle = 0x800B0004 + DIGSIG_E_ENCODE Handle = 0x800B0005 + DIGSIG_E_DECODE Handle = 0x800B0006 + DIGSIG_E_EXTENSIBILITY Handle = 0x800B0007 + DIGSIG_E_CRYPTO Handle = 0x800B0008 + PERSIST_E_SIZEDEFINITE Handle = 0x800B0009 + PERSIST_E_SIZEINDEFINITE Handle = 0x800B000A + PERSIST_E_NOTSELFSIZING Handle = 0x800B000B + TRUST_E_NOSIGNATURE Handle = 0x800B0100 + CERT_E_EXPIRED Handle = 0x800B0101 + CERT_E_VALIDITYPERIODNESTING Handle = 0x800B0102 + CERT_E_ROLE Handle = 0x800B0103 + CERT_E_PATHLENCONST Handle = 0x800B0104 + CERT_E_CRITICAL Handle = 0x800B0105 + CERT_E_PURPOSE Handle = 0x800B0106 + CERT_E_ISSUERCHAINING Handle = 0x800B0107 + CERT_E_MALFORMED Handle = 0x800B0108 + CERT_E_UNTRUSTEDROOT Handle = 0x800B0109 + CERT_E_CHAINING Handle = 0x800B010A + TRUST_E_FAIL Handle = 0x800B010B + CERT_E_REVOKED Handle = 0x800B010C + CERT_E_UNTRUSTEDTESTROOT Handle = 0x800B010D + CERT_E_REVOCATION_FAILURE Handle = 0x800B010E + CERT_E_CN_NO_MATCH Handle = 0x800B010F + CERT_E_WRONG_USAGE Handle = 0x800B0110 + TRUST_E_EXPLICIT_DISTRUST Handle = 0x800B0111 + CERT_E_UNTRUSTEDCA Handle = 0x800B0112 + CERT_E_INVALID_POLICY Handle = 0x800B0113 + CERT_E_INVALID_NAME Handle = 0x800B0114 + SPAPI_E_EXPECTED_SECTION_NAME Handle = 0x800F0000 + SPAPI_E_BAD_SECTION_NAME_LINE Handle = 0x800F0001 + SPAPI_E_SECTION_NAME_TOO_LONG Handle = 0x800F0002 + SPAPI_E_GENERAL_SYNTAX Handle = 0x800F0003 + SPAPI_E_WRONG_INF_STYLE Handle = 0x800F0100 + SPAPI_E_SECTION_NOT_FOUND Handle = 0x800F0101 + SPAPI_E_LINE_NOT_FOUND Handle = 0x800F0102 + SPAPI_E_NO_BACKUP Handle = 0x800F0103 + SPAPI_E_NO_ASSOCIATED_CLASS Handle = 0x800F0200 + SPAPI_E_CLASS_MISMATCH Handle = 0x800F0201 + SPAPI_E_DUPLICATE_FOUND Handle = 0x800F0202 + SPAPI_E_NO_DRIVER_SELECTED Handle = 0x800F0203 + SPAPI_E_KEY_DOES_NOT_EXIST Handle = 0x800F0204 + SPAPI_E_INVALID_DEVINST_NAME Handle = 0x800F0205 + SPAPI_E_INVALID_CLASS Handle = 0x800F0206 + SPAPI_E_DEVINST_ALREADY_EXISTS Handle = 0x800F0207 + SPAPI_E_DEVINFO_NOT_REGISTERED Handle = 0x800F0208 + SPAPI_E_INVALID_REG_PROPERTY Handle = 0x800F0209 + SPAPI_E_NO_INF Handle = 0x800F020A + SPAPI_E_NO_SUCH_DEVINST Handle = 0x800F020B + SPAPI_E_CANT_LOAD_CLASS_ICON Handle = 0x800F020C + SPAPI_E_INVALID_CLASS_INSTALLER Handle = 0x800F020D + SPAPI_E_DI_DO_DEFAULT Handle = 0x800F020E + SPAPI_E_DI_NOFILECOPY Handle = 0x800F020F + SPAPI_E_INVALID_HWPROFILE Handle = 0x800F0210 + SPAPI_E_NO_DEVICE_SELECTED Handle = 0x800F0211 + SPAPI_E_DEVINFO_LIST_LOCKED Handle = 0x800F0212 + SPAPI_E_DEVINFO_DATA_LOCKED Handle = 0x800F0213 + SPAPI_E_DI_BAD_PATH Handle = 0x800F0214 + SPAPI_E_NO_CLASSINSTALL_PARAMS Handle = 0x800F0215 + SPAPI_E_FILEQUEUE_LOCKED Handle = 0x800F0216 + SPAPI_E_BAD_SERVICE_INSTALLSECT Handle = 0x800F0217 + SPAPI_E_NO_CLASS_DRIVER_LIST Handle = 0x800F0218 + SPAPI_E_NO_ASSOCIATED_SERVICE Handle = 0x800F0219 + SPAPI_E_NO_DEFAULT_DEVICE_INTERFACE Handle = 0x800F021A + SPAPI_E_DEVICE_INTERFACE_ACTIVE Handle = 0x800F021B + SPAPI_E_DEVICE_INTERFACE_REMOVED Handle = 0x800F021C + SPAPI_E_BAD_INTERFACE_INSTALLSECT Handle = 0x800F021D + SPAPI_E_NO_SUCH_INTERFACE_CLASS Handle = 0x800F021E + SPAPI_E_INVALID_REFERENCE_STRING Handle = 0x800F021F + SPAPI_E_INVALID_MACHINENAME Handle = 0x800F0220 + SPAPI_E_REMOTE_COMM_FAILURE Handle = 0x800F0221 + SPAPI_E_MACHINE_UNAVAILABLE Handle = 0x800F0222 + SPAPI_E_NO_CONFIGMGR_SERVICES Handle = 0x800F0223 + SPAPI_E_INVALID_PROPPAGE_PROVIDER Handle = 0x800F0224 + SPAPI_E_NO_SUCH_DEVICE_INTERFACE Handle = 0x800F0225 + SPAPI_E_DI_POSTPROCESSING_REQUIRED Handle = 0x800F0226 + SPAPI_E_INVALID_COINSTALLER Handle = 0x800F0227 + SPAPI_E_NO_COMPAT_DRIVERS Handle = 0x800F0228 + SPAPI_E_NO_DEVICE_ICON Handle = 0x800F0229 + SPAPI_E_INVALID_INF_LOGCONFIG Handle = 0x800F022A + SPAPI_E_DI_DONT_INSTALL Handle = 0x800F022B + SPAPI_E_INVALID_FILTER_DRIVER Handle = 0x800F022C + SPAPI_E_NON_WINDOWS_NT_DRIVER Handle = 0x800F022D + SPAPI_E_NON_WINDOWS_DRIVER Handle = 0x800F022E + SPAPI_E_NO_CATALOG_FOR_OEM_INF Handle = 0x800F022F + SPAPI_E_DEVINSTALL_QUEUE_NONNATIVE Handle = 0x800F0230 + SPAPI_E_NOT_DISABLEABLE Handle = 0x800F0231 + SPAPI_E_CANT_REMOVE_DEVINST Handle = 0x800F0232 + SPAPI_E_INVALID_TARGET Handle = 0x800F0233 + SPAPI_E_DRIVER_NONNATIVE Handle = 0x800F0234 + SPAPI_E_IN_WOW64 Handle = 0x800F0235 + SPAPI_E_SET_SYSTEM_RESTORE_POINT Handle = 0x800F0236 + SPAPI_E_INCORRECTLY_COPIED_INF Handle = 0x800F0237 + SPAPI_E_SCE_DISABLED Handle = 0x800F0238 + SPAPI_E_UNKNOWN_EXCEPTION Handle = 0x800F0239 + SPAPI_E_PNP_REGISTRY_ERROR Handle = 0x800F023A + SPAPI_E_REMOTE_REQUEST_UNSUPPORTED Handle = 0x800F023B + SPAPI_E_NOT_AN_INSTALLED_OEM_INF Handle = 0x800F023C + SPAPI_E_INF_IN_USE_BY_DEVICES Handle = 0x800F023D + SPAPI_E_DI_FUNCTION_OBSOLETE Handle = 0x800F023E + SPAPI_E_NO_AUTHENTICODE_CATALOG Handle = 0x800F023F + SPAPI_E_AUTHENTICODE_DISALLOWED Handle = 0x800F0240 + SPAPI_E_AUTHENTICODE_TRUSTED_PUBLISHER Handle = 0x800F0241 + SPAPI_E_AUTHENTICODE_TRUST_NOT_ESTABLISHED Handle = 0x800F0242 + SPAPI_E_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Handle = 0x800F0243 + SPAPI_E_SIGNATURE_OSATTRIBUTE_MISMATCH Handle = 0x800F0244 + SPAPI_E_ONLY_VALIDATE_VIA_AUTHENTICODE Handle = 0x800F0245 + SPAPI_E_DEVICE_INSTALLER_NOT_READY Handle = 0x800F0246 + SPAPI_E_DRIVER_STORE_ADD_FAILED Handle = 0x800F0247 + SPAPI_E_DEVICE_INSTALL_BLOCKED Handle = 0x800F0248 + SPAPI_E_DRIVER_INSTALL_BLOCKED Handle = 0x800F0249 + SPAPI_E_WRONG_INF_TYPE Handle = 0x800F024A + SPAPI_E_FILE_HASH_NOT_IN_CATALOG Handle = 0x800F024B + SPAPI_E_DRIVER_STORE_DELETE_FAILED Handle = 0x800F024C + SPAPI_E_UNRECOVERABLE_STACK_OVERFLOW Handle = 0x800F0300 + SPAPI_E_ERROR_NOT_INSTALLED Handle = 0x800F1000 + SCARD_S_SUCCESS = S_OK + SCARD_F_INTERNAL_ERROR Handle = 0x80100001 + SCARD_E_CANCELLED Handle = 0x80100002 + SCARD_E_INVALID_HANDLE Handle = 0x80100003 + SCARD_E_INVALID_PARAMETER Handle = 0x80100004 + SCARD_E_INVALID_TARGET Handle = 0x80100005 + SCARD_E_NO_MEMORY Handle = 0x80100006 + SCARD_F_WAITED_TOO_LONG Handle = 0x80100007 + SCARD_E_INSUFFICIENT_BUFFER Handle = 0x80100008 + SCARD_E_UNKNOWN_READER Handle = 0x80100009 + SCARD_E_TIMEOUT Handle = 0x8010000A + SCARD_E_SHARING_VIOLATION Handle = 0x8010000B + SCARD_E_NO_SMARTCARD Handle = 0x8010000C + SCARD_E_UNKNOWN_CARD Handle = 0x8010000D + SCARD_E_CANT_DISPOSE Handle = 0x8010000E + SCARD_E_PROTO_MISMATCH Handle = 0x8010000F + SCARD_E_NOT_READY Handle = 0x80100010 + SCARD_E_INVALID_VALUE Handle = 0x80100011 + SCARD_E_SYSTEM_CANCELLED Handle = 0x80100012 + SCARD_F_COMM_ERROR Handle = 0x80100013 + SCARD_F_UNKNOWN_ERROR Handle = 0x80100014 + SCARD_E_INVALID_ATR Handle = 0x80100015 + SCARD_E_NOT_TRANSACTED Handle = 0x80100016 + SCARD_E_READER_UNAVAILABLE Handle = 0x80100017 + SCARD_P_SHUTDOWN Handle = 0x80100018 + SCARD_E_PCI_TOO_SMALL Handle = 0x80100019 + SCARD_E_READER_UNSUPPORTED Handle = 0x8010001A + SCARD_E_DUPLICATE_READER Handle = 0x8010001B + SCARD_E_CARD_UNSUPPORTED Handle = 0x8010001C + SCARD_E_NO_SERVICE Handle = 0x8010001D + SCARD_E_SERVICE_STOPPED Handle = 0x8010001E + SCARD_E_UNEXPECTED Handle = 0x8010001F + SCARD_E_ICC_INSTALLATION Handle = 0x80100020 + SCARD_E_ICC_CREATEORDER Handle = 0x80100021 + SCARD_E_UNSUPPORTED_FEATURE Handle = 0x80100022 + SCARD_E_DIR_NOT_FOUND Handle = 0x80100023 + SCARD_E_FILE_NOT_FOUND Handle = 0x80100024 + SCARD_E_NO_DIR Handle = 0x80100025 + SCARD_E_NO_FILE Handle = 0x80100026 + SCARD_E_NO_ACCESS Handle = 0x80100027 + SCARD_E_WRITE_TOO_MANY Handle = 0x80100028 + SCARD_E_BAD_SEEK Handle = 0x80100029 + SCARD_E_INVALID_CHV Handle = 0x8010002A + SCARD_E_UNKNOWN_RES_MNG Handle = 0x8010002B + SCARD_E_NO_SUCH_CERTIFICATE Handle = 0x8010002C + SCARD_E_CERTIFICATE_UNAVAILABLE Handle = 0x8010002D + SCARD_E_NO_READERS_AVAILABLE Handle = 0x8010002E + SCARD_E_COMM_DATA_LOST Handle = 0x8010002F + SCARD_E_NO_KEY_CONTAINER Handle = 0x80100030 + SCARD_E_SERVER_TOO_BUSY Handle = 0x80100031 + SCARD_E_PIN_CACHE_EXPIRED Handle = 0x80100032 + SCARD_E_NO_PIN_CACHE Handle = 0x80100033 + SCARD_E_READ_ONLY_CARD Handle = 0x80100034 + SCARD_W_UNSUPPORTED_CARD Handle = 0x80100065 + SCARD_W_UNRESPONSIVE_CARD Handle = 0x80100066 + SCARD_W_UNPOWERED_CARD Handle = 0x80100067 + SCARD_W_RESET_CARD Handle = 0x80100068 + SCARD_W_REMOVED_CARD Handle = 0x80100069 + SCARD_W_SECURITY_VIOLATION Handle = 0x8010006A + SCARD_W_WRONG_CHV Handle = 0x8010006B + SCARD_W_CHV_BLOCKED Handle = 0x8010006C + SCARD_W_EOF Handle = 0x8010006D + SCARD_W_CANCELLED_BY_USER Handle = 0x8010006E + SCARD_W_CARD_NOT_AUTHENTICATED Handle = 0x8010006F + SCARD_W_CACHE_ITEM_NOT_FOUND Handle = 0x80100070 + SCARD_W_CACHE_ITEM_STALE Handle = 0x80100071 + SCARD_W_CACHE_ITEM_TOO_BIG Handle = 0x80100072 + COMADMIN_E_OBJECTERRORS Handle = 0x80110401 + COMADMIN_E_OBJECTINVALID Handle = 0x80110402 + COMADMIN_E_KEYMISSING Handle = 0x80110403 + COMADMIN_E_ALREADYINSTALLED Handle = 0x80110404 + COMADMIN_E_APP_FILE_WRITEFAIL Handle = 0x80110407 + COMADMIN_E_APP_FILE_READFAIL Handle = 0x80110408 + COMADMIN_E_APP_FILE_VERSION Handle = 0x80110409 + COMADMIN_E_BADPATH Handle = 0x8011040A + COMADMIN_E_APPLICATIONEXISTS Handle = 0x8011040B + COMADMIN_E_ROLEEXISTS Handle = 0x8011040C + COMADMIN_E_CANTCOPYFILE Handle = 0x8011040D + COMADMIN_E_NOUSER Handle = 0x8011040F + COMADMIN_E_INVALIDUSERIDS Handle = 0x80110410 + COMADMIN_E_NOREGISTRYCLSID Handle = 0x80110411 + COMADMIN_E_BADREGISTRYPROGID Handle = 0x80110412 + COMADMIN_E_AUTHENTICATIONLEVEL Handle = 0x80110413 + COMADMIN_E_USERPASSWDNOTVALID Handle = 0x80110414 + COMADMIN_E_CLSIDORIIDMISMATCH Handle = 0x80110418 + COMADMIN_E_REMOTEINTERFACE Handle = 0x80110419 + COMADMIN_E_DLLREGISTERSERVER Handle = 0x8011041A + COMADMIN_E_NOSERVERSHARE Handle = 0x8011041B + COMADMIN_E_DLLLOADFAILED Handle = 0x8011041D + COMADMIN_E_BADREGISTRYLIBID Handle = 0x8011041E + COMADMIN_E_APPDIRNOTFOUND Handle = 0x8011041F + COMADMIN_E_REGISTRARFAILED Handle = 0x80110423 + COMADMIN_E_COMPFILE_DOESNOTEXIST Handle = 0x80110424 + COMADMIN_E_COMPFILE_LOADDLLFAIL Handle = 0x80110425 + COMADMIN_E_COMPFILE_GETCLASSOBJ Handle = 0x80110426 + COMADMIN_E_COMPFILE_CLASSNOTAVAIL Handle = 0x80110427 + COMADMIN_E_COMPFILE_BADTLB Handle = 0x80110428 + COMADMIN_E_COMPFILE_NOTINSTALLABLE Handle = 0x80110429 + COMADMIN_E_NOTCHANGEABLE Handle = 0x8011042A + COMADMIN_E_NOTDELETEABLE Handle = 0x8011042B + COMADMIN_E_SESSION Handle = 0x8011042C + COMADMIN_E_COMP_MOVE_LOCKED Handle = 0x8011042D + COMADMIN_E_COMP_MOVE_BAD_DEST Handle = 0x8011042E + COMADMIN_E_REGISTERTLB Handle = 0x80110430 + COMADMIN_E_SYSTEMAPP Handle = 0x80110433 + COMADMIN_E_COMPFILE_NOREGISTRAR Handle = 0x80110434 + COMADMIN_E_COREQCOMPINSTALLED Handle = 0x80110435 + COMADMIN_E_SERVICENOTINSTALLED Handle = 0x80110436 + COMADMIN_E_PROPERTYSAVEFAILED Handle = 0x80110437 + COMADMIN_E_OBJECTEXISTS Handle = 0x80110438 + COMADMIN_E_COMPONENTEXISTS Handle = 0x80110439 + COMADMIN_E_REGFILE_CORRUPT Handle = 0x8011043B + COMADMIN_E_PROPERTY_OVERFLOW Handle = 0x8011043C + COMADMIN_E_NOTINREGISTRY Handle = 0x8011043E + COMADMIN_E_OBJECTNOTPOOLABLE Handle = 0x8011043F + COMADMIN_E_APPLID_MATCHES_CLSID Handle = 0x80110446 + COMADMIN_E_ROLE_DOES_NOT_EXIST Handle = 0x80110447 + COMADMIN_E_START_APP_NEEDS_COMPONENTS Handle = 0x80110448 + COMADMIN_E_REQUIRES_DIFFERENT_PLATFORM Handle = 0x80110449 + COMADMIN_E_CAN_NOT_EXPORT_APP_PROXY Handle = 0x8011044A + COMADMIN_E_CAN_NOT_START_APP Handle = 0x8011044B + COMADMIN_E_CAN_NOT_EXPORT_SYS_APP Handle = 0x8011044C + COMADMIN_E_CANT_SUBSCRIBE_TO_COMPONENT Handle = 0x8011044D + COMADMIN_E_EVENTCLASS_CANT_BE_SUBSCRIBER Handle = 0x8011044E + COMADMIN_E_LIB_APP_PROXY_INCOMPATIBLE Handle = 0x8011044F + COMADMIN_E_BASE_PARTITION_ONLY Handle = 0x80110450 + COMADMIN_E_START_APP_DISABLED Handle = 0x80110451 + COMADMIN_E_CAT_DUPLICATE_PARTITION_NAME Handle = 0x80110457 + COMADMIN_E_CAT_INVALID_PARTITION_NAME Handle = 0x80110458 + COMADMIN_E_CAT_PARTITION_IN_USE Handle = 0x80110459 + COMADMIN_E_FILE_PARTITION_DUPLICATE_FILES Handle = 0x8011045A + COMADMIN_E_CAT_IMPORTED_COMPONENTS_NOT_ALLOWED Handle = 0x8011045B + COMADMIN_E_AMBIGUOUS_APPLICATION_NAME Handle = 0x8011045C + COMADMIN_E_AMBIGUOUS_PARTITION_NAME Handle = 0x8011045D + COMADMIN_E_REGDB_NOTINITIALIZED Handle = 0x80110472 + COMADMIN_E_REGDB_NOTOPEN Handle = 0x80110473 + COMADMIN_E_REGDB_SYSTEMERR Handle = 0x80110474 + COMADMIN_E_REGDB_ALREADYRUNNING Handle = 0x80110475 + COMADMIN_E_MIG_VERSIONNOTSUPPORTED Handle = 0x80110480 + COMADMIN_E_MIG_SCHEMANOTFOUND Handle = 0x80110481 + COMADMIN_E_CAT_BITNESSMISMATCH Handle = 0x80110482 + COMADMIN_E_CAT_UNACCEPTABLEBITNESS Handle = 0x80110483 + COMADMIN_E_CAT_WRONGAPPBITNESS Handle = 0x80110484 + COMADMIN_E_CAT_PAUSE_RESUME_NOT_SUPPORTED Handle = 0x80110485 + COMADMIN_E_CAT_SERVERFAULT Handle = 0x80110486 + COMQC_E_APPLICATION_NOT_QUEUED Handle = 0x80110600 + COMQC_E_NO_QUEUEABLE_INTERFACES Handle = 0x80110601 + COMQC_E_QUEUING_SERVICE_NOT_AVAILABLE Handle = 0x80110602 + COMQC_E_NO_IPERSISTSTREAM Handle = 0x80110603 + COMQC_E_BAD_MESSAGE Handle = 0x80110604 + COMQC_E_UNAUTHENTICATED Handle = 0x80110605 + COMQC_E_UNTRUSTED_ENQUEUER Handle = 0x80110606 + MSDTC_E_DUPLICATE_RESOURCE Handle = 0x80110701 + COMADMIN_E_OBJECT_PARENT_MISSING Handle = 0x80110808 + COMADMIN_E_OBJECT_DOES_NOT_EXIST Handle = 0x80110809 + COMADMIN_E_APP_NOT_RUNNING Handle = 0x8011080A + COMADMIN_E_INVALID_PARTITION Handle = 0x8011080B + COMADMIN_E_SVCAPP_NOT_POOLABLE_OR_RECYCLABLE Handle = 0x8011080D + COMADMIN_E_USER_IN_SET Handle = 0x8011080E + COMADMIN_E_CANTRECYCLELIBRARYAPPS Handle = 0x8011080F + COMADMIN_E_CANTRECYCLESERVICEAPPS Handle = 0x80110811 + COMADMIN_E_PROCESSALREADYRECYCLED Handle = 0x80110812 + COMADMIN_E_PAUSEDPROCESSMAYNOTBERECYCLED Handle = 0x80110813 + COMADMIN_E_CANTMAKEINPROCSERVICE Handle = 0x80110814 + COMADMIN_E_PROGIDINUSEBYCLSID Handle = 0x80110815 + COMADMIN_E_DEFAULT_PARTITION_NOT_IN_SET Handle = 0x80110816 + COMADMIN_E_RECYCLEDPROCESSMAYNOTBEPAUSED Handle = 0x80110817 + COMADMIN_E_PARTITION_ACCESSDENIED Handle = 0x80110818 + COMADMIN_E_PARTITION_MSI_ONLY Handle = 0x80110819 + COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_1_0_FORMAT Handle = 0x8011081A + COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_NONBASE_PARTITIONS Handle = 0x8011081B + COMADMIN_E_COMP_MOVE_SOURCE Handle = 0x8011081C + COMADMIN_E_COMP_MOVE_DEST Handle = 0x8011081D + COMADMIN_E_COMP_MOVE_PRIVATE Handle = 0x8011081E + COMADMIN_E_BASEPARTITION_REQUIRED_IN_SET Handle = 0x8011081F + COMADMIN_E_CANNOT_ALIAS_EVENTCLASS Handle = 0x80110820 + COMADMIN_E_PRIVATE_ACCESSDENIED Handle = 0x80110821 + COMADMIN_E_SAFERINVALID Handle = 0x80110822 + COMADMIN_E_REGISTRY_ACCESSDENIED Handle = 0x80110823 + COMADMIN_E_PARTITIONS_DISABLED Handle = 0x80110824 + WER_S_REPORT_DEBUG Handle = 0x001B0000 + WER_S_REPORT_UPLOADED Handle = 0x001B0001 + WER_S_REPORT_QUEUED Handle = 0x001B0002 + WER_S_DISABLED Handle = 0x001B0003 + WER_S_SUSPENDED_UPLOAD Handle = 0x001B0004 + WER_S_DISABLED_QUEUE Handle = 0x001B0005 + WER_S_DISABLED_ARCHIVE Handle = 0x001B0006 + WER_S_REPORT_ASYNC Handle = 0x001B0007 + WER_S_IGNORE_ASSERT_INSTANCE Handle = 0x001B0008 + WER_S_IGNORE_ALL_ASSERTS Handle = 0x001B0009 + WER_S_ASSERT_CONTINUE Handle = 0x001B000A + WER_S_THROTTLED Handle = 0x001B000B + WER_S_REPORT_UPLOADED_CAB Handle = 0x001B000C + WER_E_CRASH_FAILURE Handle = 0x801B8000 + WER_E_CANCELED Handle = 0x801B8001 + WER_E_NETWORK_FAILURE Handle = 0x801B8002 + WER_E_NOT_INITIALIZED Handle = 0x801B8003 + WER_E_ALREADY_REPORTING Handle = 0x801B8004 + WER_E_DUMP_THROTTLED Handle = 0x801B8005 + WER_E_INSUFFICIENT_CONSENT Handle = 0x801B8006 + WER_E_TOO_HEAVY Handle = 0x801B8007 + ERROR_FLT_IO_COMPLETE Handle = 0x001F0001 + ERROR_FLT_NO_HANDLER_DEFINED Handle = 0x801F0001 + ERROR_FLT_CONTEXT_ALREADY_DEFINED Handle = 0x801F0002 + ERROR_FLT_INVALID_ASYNCHRONOUS_REQUEST Handle = 0x801F0003 + ERROR_FLT_DISALLOW_FAST_IO Handle = 0x801F0004 + ERROR_FLT_INVALID_NAME_REQUEST Handle = 0x801F0005 + ERROR_FLT_NOT_SAFE_TO_POST_OPERATION Handle = 0x801F0006 + ERROR_FLT_NOT_INITIALIZED Handle = 0x801F0007 + ERROR_FLT_FILTER_NOT_READY Handle = 0x801F0008 + ERROR_FLT_POST_OPERATION_CLEANUP Handle = 0x801F0009 + ERROR_FLT_INTERNAL_ERROR Handle = 0x801F000A + ERROR_FLT_DELETING_OBJECT Handle = 0x801F000B + ERROR_FLT_MUST_BE_NONPAGED_POOL Handle = 0x801F000C + ERROR_FLT_DUPLICATE_ENTRY Handle = 0x801F000D + ERROR_FLT_CBDQ_DISABLED Handle = 0x801F000E + ERROR_FLT_DO_NOT_ATTACH Handle = 0x801F000F + ERROR_FLT_DO_NOT_DETACH Handle = 0x801F0010 + ERROR_FLT_INSTANCE_ALTITUDE_COLLISION Handle = 0x801F0011 + ERROR_FLT_INSTANCE_NAME_COLLISION Handle = 0x801F0012 + ERROR_FLT_FILTER_NOT_FOUND Handle = 0x801F0013 + ERROR_FLT_VOLUME_NOT_FOUND Handle = 0x801F0014 + ERROR_FLT_INSTANCE_NOT_FOUND Handle = 0x801F0015 + ERROR_FLT_CONTEXT_ALLOCATION_NOT_FOUND Handle = 0x801F0016 + ERROR_FLT_INVALID_CONTEXT_REGISTRATION Handle = 0x801F0017 + ERROR_FLT_NAME_CACHE_MISS Handle = 0x801F0018 + ERROR_FLT_NO_DEVICE_OBJECT Handle = 0x801F0019 + ERROR_FLT_VOLUME_ALREADY_MOUNTED Handle = 0x801F001A + ERROR_FLT_ALREADY_ENLISTED Handle = 0x801F001B + ERROR_FLT_CONTEXT_ALREADY_LINKED Handle = 0x801F001C + ERROR_FLT_NO_WAITER_FOR_REPLY Handle = 0x801F0020 + ERROR_FLT_REGISTRATION_BUSY Handle = 0x801F0023 + ERROR_HUNG_DISPLAY_DRIVER_THREAD Handle = 0x80260001 + DWM_E_COMPOSITIONDISABLED Handle = 0x80263001 + DWM_E_REMOTING_NOT_SUPPORTED Handle = 0x80263002 + DWM_E_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x80263003 + DWM_E_NOT_QUEUING_PRESENTS Handle = 0x80263004 + DWM_E_ADAPTER_NOT_FOUND Handle = 0x80263005 + DWM_S_GDI_REDIRECTION_SURFACE Handle = 0x00263005 + DWM_E_TEXTURE_TOO_LARGE Handle = 0x80263007 + DWM_S_GDI_REDIRECTION_SURFACE_BLT_VIA_GDI Handle = 0x00263008 + ERROR_MONITOR_NO_DESCRIPTOR Handle = 0x00261001 + ERROR_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT Handle = 0x00261002 + ERROR_MONITOR_INVALID_DESCRIPTOR_CHECKSUM Handle = 0xC0261003 + ERROR_MONITOR_INVALID_STANDARD_TIMING_BLOCK Handle = 0xC0261004 + ERROR_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED Handle = 0xC0261005 + ERROR_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK Handle = 0xC0261006 + ERROR_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK Handle = 0xC0261007 + ERROR_MONITOR_NO_MORE_DESCRIPTOR_DATA Handle = 0xC0261008 + ERROR_MONITOR_INVALID_DETAILED_TIMING_BLOCK Handle = 0xC0261009 + ERROR_MONITOR_INVALID_MANUFACTURE_DATE Handle = 0xC026100A + ERROR_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER Handle = 0xC0262000 + ERROR_GRAPHICS_INSUFFICIENT_DMA_BUFFER Handle = 0xC0262001 + ERROR_GRAPHICS_INVALID_DISPLAY_ADAPTER Handle = 0xC0262002 + ERROR_GRAPHICS_ADAPTER_WAS_RESET Handle = 0xC0262003 + ERROR_GRAPHICS_INVALID_DRIVER_MODEL Handle = 0xC0262004 + ERROR_GRAPHICS_PRESENT_MODE_CHANGED Handle = 0xC0262005 + ERROR_GRAPHICS_PRESENT_OCCLUDED Handle = 0xC0262006 + ERROR_GRAPHICS_PRESENT_DENIED Handle = 0xC0262007 + ERROR_GRAPHICS_CANNOTCOLORCONVERT Handle = 0xC0262008 + ERROR_GRAPHICS_DRIVER_MISMATCH Handle = 0xC0262009 + ERROR_GRAPHICS_PARTIAL_DATA_POPULATED Handle = 0x4026200A + ERROR_GRAPHICS_PRESENT_REDIRECTION_DISABLED Handle = 0xC026200B + ERROR_GRAPHICS_PRESENT_UNOCCLUDED Handle = 0xC026200C + ERROR_GRAPHICS_WINDOWDC_NOT_AVAILABLE Handle = 0xC026200D + ERROR_GRAPHICS_WINDOWLESS_PRESENT_DISABLED Handle = 0xC026200E + ERROR_GRAPHICS_PRESENT_INVALID_WINDOW Handle = 0xC026200F + ERROR_GRAPHICS_PRESENT_BUFFER_NOT_BOUND Handle = 0xC0262010 + ERROR_GRAPHICS_VAIL_STATE_CHANGED Handle = 0xC0262011 + ERROR_GRAPHICS_INDIRECT_DISPLAY_ABANDON_SWAPCHAIN Handle = 0xC0262012 + ERROR_GRAPHICS_INDIRECT_DISPLAY_DEVICE_STOPPED Handle = 0xC0262013 + ERROR_GRAPHICS_NO_VIDEO_MEMORY Handle = 0xC0262100 + ERROR_GRAPHICS_CANT_LOCK_MEMORY Handle = 0xC0262101 + ERROR_GRAPHICS_ALLOCATION_BUSY Handle = 0xC0262102 + ERROR_GRAPHICS_TOO_MANY_REFERENCES Handle = 0xC0262103 + ERROR_GRAPHICS_TRY_AGAIN_LATER Handle = 0xC0262104 + ERROR_GRAPHICS_TRY_AGAIN_NOW Handle = 0xC0262105 + ERROR_GRAPHICS_ALLOCATION_INVALID Handle = 0xC0262106 + ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE Handle = 0xC0262107 + ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED Handle = 0xC0262108 + ERROR_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION Handle = 0xC0262109 + ERROR_GRAPHICS_INVALID_ALLOCATION_USAGE Handle = 0xC0262110 + ERROR_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION Handle = 0xC0262111 + ERROR_GRAPHICS_ALLOCATION_CLOSED Handle = 0xC0262112 + ERROR_GRAPHICS_INVALID_ALLOCATION_INSTANCE Handle = 0xC0262113 + ERROR_GRAPHICS_INVALID_ALLOCATION_HANDLE Handle = 0xC0262114 + ERROR_GRAPHICS_WRONG_ALLOCATION_DEVICE Handle = 0xC0262115 + ERROR_GRAPHICS_ALLOCATION_CONTENT_LOST Handle = 0xC0262116 + ERROR_GRAPHICS_GPU_EXCEPTION_ON_DEVICE Handle = 0xC0262200 + ERROR_GRAPHICS_SKIP_ALLOCATION_PREPARATION Handle = 0x40262201 + ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY Handle = 0xC0262300 + ERROR_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED Handle = 0xC0262301 + ERROR_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED Handle = 0xC0262302 + ERROR_GRAPHICS_INVALID_VIDPN Handle = 0xC0262303 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE Handle = 0xC0262304 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET Handle = 0xC0262305 + ERROR_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED Handle = 0xC0262306 + ERROR_GRAPHICS_MODE_NOT_PINNED Handle = 0x00262307 + ERROR_GRAPHICS_INVALID_VIDPN_SOURCEMODESET Handle = 0xC0262308 + ERROR_GRAPHICS_INVALID_VIDPN_TARGETMODESET Handle = 0xC0262309 + ERROR_GRAPHICS_INVALID_FREQUENCY Handle = 0xC026230A + ERROR_GRAPHICS_INVALID_ACTIVE_REGION Handle = 0xC026230B + ERROR_GRAPHICS_INVALID_TOTAL_REGION Handle = 0xC026230C + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE Handle = 0xC0262310 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE Handle = 0xC0262311 + ERROR_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET Handle = 0xC0262312 + ERROR_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY Handle = 0xC0262313 + ERROR_GRAPHICS_MODE_ALREADY_IN_MODESET Handle = 0xC0262314 + ERROR_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET Handle = 0xC0262315 + ERROR_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET Handle = 0xC0262316 + ERROR_GRAPHICS_SOURCE_ALREADY_IN_SET Handle = 0xC0262317 + ERROR_GRAPHICS_TARGET_ALREADY_IN_SET Handle = 0xC0262318 + ERROR_GRAPHICS_INVALID_VIDPN_PRESENT_PATH Handle = 0xC0262319 + ERROR_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY Handle = 0xC026231A + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET Handle = 0xC026231B + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE Handle = 0xC026231C + ERROR_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET Handle = 0xC026231D + ERROR_GRAPHICS_NO_PREFERRED_MODE Handle = 0x0026231E + ERROR_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET Handle = 0xC026231F + ERROR_GRAPHICS_STALE_MODESET Handle = 0xC0262320 + ERROR_GRAPHICS_INVALID_MONITOR_SOURCEMODESET Handle = 0xC0262321 + ERROR_GRAPHICS_INVALID_MONITOR_SOURCE_MODE Handle = 0xC0262322 + ERROR_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN Handle = 0xC0262323 + ERROR_GRAPHICS_MODE_ID_MUST_BE_UNIQUE Handle = 0xC0262324 + ERROR_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION Handle = 0xC0262325 + ERROR_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES Handle = 0xC0262326 + ERROR_GRAPHICS_PATH_NOT_IN_TOPOLOGY Handle = 0xC0262327 + ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE Handle = 0xC0262328 + ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET Handle = 0xC0262329 + ERROR_GRAPHICS_INVALID_MONITORDESCRIPTORSET Handle = 0xC026232A + ERROR_GRAPHICS_INVALID_MONITORDESCRIPTOR Handle = 0xC026232B + ERROR_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET Handle = 0xC026232C + ERROR_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET Handle = 0xC026232D + ERROR_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE Handle = 0xC026232E + ERROR_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE Handle = 0xC026232F + ERROR_GRAPHICS_RESOURCES_NOT_RELATED Handle = 0xC0262330 + ERROR_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE Handle = 0xC0262331 + ERROR_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE Handle = 0xC0262332 + ERROR_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET Handle = 0xC0262333 + ERROR_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER Handle = 0xC0262334 + ERROR_GRAPHICS_NO_VIDPNMGR Handle = 0xC0262335 + ERROR_GRAPHICS_NO_ACTIVE_VIDPN Handle = 0xC0262336 + ERROR_GRAPHICS_STALE_VIDPN_TOPOLOGY Handle = 0xC0262337 + ERROR_GRAPHICS_MONITOR_NOT_CONNECTED Handle = 0xC0262338 + ERROR_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY Handle = 0xC0262339 + ERROR_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE Handle = 0xC026233A + ERROR_GRAPHICS_INVALID_VISIBLEREGION_SIZE Handle = 0xC026233B + ERROR_GRAPHICS_INVALID_STRIDE Handle = 0xC026233C + ERROR_GRAPHICS_INVALID_PIXELFORMAT Handle = 0xC026233D + ERROR_GRAPHICS_INVALID_COLORBASIS Handle = 0xC026233E + ERROR_GRAPHICS_INVALID_PIXELVALUEACCESSMODE Handle = 0xC026233F + ERROR_GRAPHICS_TARGET_NOT_IN_TOPOLOGY Handle = 0xC0262340 + ERROR_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT Handle = 0xC0262341 + ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0xC0262342 + ERROR_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN Handle = 0xC0262343 + ERROR_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL Handle = 0xC0262344 + ERROR_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION Handle = 0xC0262345 + ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED Handle = 0xC0262346 + ERROR_GRAPHICS_INVALID_GAMMA_RAMP Handle = 0xC0262347 + ERROR_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED Handle = 0xC0262348 + ERROR_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED Handle = 0xC0262349 + ERROR_GRAPHICS_MODE_NOT_IN_MODESET Handle = 0xC026234A + ERROR_GRAPHICS_DATASET_IS_EMPTY Handle = 0x0026234B + ERROR_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET Handle = 0x0026234C + ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON Handle = 0xC026234D + ERROR_GRAPHICS_INVALID_PATH_CONTENT_TYPE Handle = 0xC026234E + ERROR_GRAPHICS_INVALID_COPYPROTECTION_TYPE Handle = 0xC026234F + ERROR_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS Handle = 0xC0262350 + ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED Handle = 0x00262351 + ERROR_GRAPHICS_INVALID_SCANLINE_ORDERING Handle = 0xC0262352 + ERROR_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED Handle = 0xC0262353 + ERROR_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS Handle = 0xC0262354 + ERROR_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT Handle = 0xC0262355 + ERROR_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM Handle = 0xC0262356 + ERROR_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN Handle = 0xC0262357 + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT Handle = 0xC0262358 + ERROR_GRAPHICS_MAX_NUM_PATHS_REACHED Handle = 0xC0262359 + ERROR_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION Handle = 0xC026235A + ERROR_GRAPHICS_INVALID_CLIENT_TYPE Handle = 0xC026235B + ERROR_GRAPHICS_CLIENTVIDPN_NOT_SET Handle = 0xC026235C + ERROR_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED Handle = 0xC0262400 + ERROR_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED Handle = 0xC0262401 + ERROR_GRAPHICS_UNKNOWN_CHILD_STATUS Handle = 0x4026242F + ERROR_GRAPHICS_NOT_A_LINKED_ADAPTER Handle = 0xC0262430 + ERROR_GRAPHICS_LEADLINK_NOT_ENUMERATED Handle = 0xC0262431 + ERROR_GRAPHICS_CHAINLINKS_NOT_ENUMERATED Handle = 0xC0262432 + ERROR_GRAPHICS_ADAPTER_CHAIN_NOT_READY Handle = 0xC0262433 + ERROR_GRAPHICS_CHAINLINKS_NOT_STARTED Handle = 0xC0262434 + ERROR_GRAPHICS_CHAINLINKS_NOT_POWERED_ON Handle = 0xC0262435 + ERROR_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE Handle = 0xC0262436 + ERROR_GRAPHICS_LEADLINK_START_DEFERRED Handle = 0x40262437 + ERROR_GRAPHICS_NOT_POST_DEVICE_DRIVER Handle = 0xC0262438 + ERROR_GRAPHICS_POLLING_TOO_FREQUENTLY Handle = 0x40262439 + ERROR_GRAPHICS_START_DEFERRED Handle = 0x4026243A + ERROR_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED Handle = 0xC026243B + ERROR_GRAPHICS_DEPENDABLE_CHILD_STATUS Handle = 0x4026243C + ERROR_GRAPHICS_OPM_NOT_SUPPORTED Handle = 0xC0262500 + ERROR_GRAPHICS_COPP_NOT_SUPPORTED Handle = 0xC0262501 + ERROR_GRAPHICS_UAB_NOT_SUPPORTED Handle = 0xC0262502 + ERROR_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS Handle = 0xC0262503 + ERROR_GRAPHICS_OPM_NO_VIDEO_OUTPUTS_EXIST Handle = 0xC0262505 + ERROR_GRAPHICS_OPM_INTERNAL_ERROR Handle = 0xC026250B + ERROR_GRAPHICS_OPM_INVALID_HANDLE Handle = 0xC026250C + ERROR_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH Handle = 0xC026250E + ERROR_GRAPHICS_OPM_SPANNING_MODE_ENABLED Handle = 0xC026250F + ERROR_GRAPHICS_OPM_THEATER_MODE_ENABLED Handle = 0xC0262510 + ERROR_GRAPHICS_PVP_HFS_FAILED Handle = 0xC0262511 + ERROR_GRAPHICS_OPM_INVALID_SRM Handle = 0xC0262512 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP Handle = 0xC0262513 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP Handle = 0xC0262514 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA Handle = 0xC0262515 + ERROR_GRAPHICS_OPM_HDCP_SRM_NEVER_SET Handle = 0xC0262516 + ERROR_GRAPHICS_OPM_RESOLUTION_TOO_HIGH Handle = 0xC0262517 + ERROR_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE Handle = 0xC0262518 + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_NO_LONGER_EXISTS Handle = 0xC026251A + ERROR_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC026251B + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS Handle = 0xC026251C + ERROR_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST Handle = 0xC026251D + ERROR_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR Handle = 0xC026251E + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS Handle = 0xC026251F + ERROR_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED Handle = 0xC0262520 + ERROR_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST Handle = 0xC0262521 + ERROR_GRAPHICS_I2C_NOT_SUPPORTED Handle = 0xC0262580 + ERROR_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST Handle = 0xC0262581 + ERROR_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA Handle = 0xC0262582 + ERROR_GRAPHICS_I2C_ERROR_RECEIVING_DATA Handle = 0xC0262583 + ERROR_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED Handle = 0xC0262584 + ERROR_GRAPHICS_DDCCI_INVALID_DATA Handle = 0xC0262585 + ERROR_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE Handle = 0xC0262586 + ERROR_GRAPHICS_MCA_INVALID_CAPABILITIES_STRING Handle = 0xC0262587 + ERROR_GRAPHICS_MCA_INTERNAL_ERROR Handle = 0xC0262588 + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND Handle = 0xC0262589 + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH Handle = 0xC026258A + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM Handle = 0xC026258B + ERROR_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE Handle = 0xC026258C + ERROR_GRAPHICS_MONITOR_NO_LONGER_EXISTS Handle = 0xC026258D + ERROR_GRAPHICS_DDCCI_CURRENT_CURRENT_VALUE_GREATER_THAN_MAXIMUM_VALUE Handle = 0xC02625D8 + ERROR_GRAPHICS_MCA_INVALID_VCP_VERSION Handle = 0xC02625D9 + ERROR_GRAPHICS_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION Handle = 0xC02625DA + ERROR_GRAPHICS_MCA_MCCS_VERSION_MISMATCH Handle = 0xC02625DB + ERROR_GRAPHICS_MCA_UNSUPPORTED_MCCS_VERSION Handle = 0xC02625DC + ERROR_GRAPHICS_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED Handle = 0xC02625DE + ERROR_GRAPHICS_MCA_UNSUPPORTED_COLOR_TEMPERATURE Handle = 0xC02625DF + ERROR_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED Handle = 0xC02625E0 + ERROR_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME Handle = 0xC02625E1 + ERROR_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP Handle = 0xC02625E2 + ERROR_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED Handle = 0xC02625E3 + ERROR_GRAPHICS_INVALID_POINTER Handle = 0xC02625E4 + ERROR_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE Handle = 0xC02625E5 + ERROR_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL Handle = 0xC02625E6 + ERROR_GRAPHICS_INTERNAL_ERROR Handle = 0xC02625E7 + ERROR_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC02605E8 + NAP_E_INVALID_PACKET Handle = 0x80270001 + NAP_E_MISSING_SOH Handle = 0x80270002 + NAP_E_CONFLICTING_ID Handle = 0x80270003 + NAP_E_NO_CACHED_SOH Handle = 0x80270004 + NAP_E_STILL_BOUND Handle = 0x80270005 + NAP_E_NOT_REGISTERED Handle = 0x80270006 + NAP_E_NOT_INITIALIZED Handle = 0x80270007 + NAP_E_MISMATCHED_ID Handle = 0x80270008 + NAP_E_NOT_PENDING Handle = 0x80270009 + NAP_E_ID_NOT_FOUND Handle = 0x8027000A + NAP_E_MAXSIZE_TOO_SMALL Handle = 0x8027000B + NAP_E_SERVICE_NOT_RUNNING Handle = 0x8027000C + NAP_S_CERT_ALREADY_PRESENT Handle = 0x0027000D + NAP_E_ENTITY_DISABLED Handle = 0x8027000E + NAP_E_NETSH_GROUPPOLICY_ERROR Handle = 0x8027000F + NAP_E_TOO_MANY_CALLS Handle = 0x80270010 + NAP_E_SHV_CONFIG_EXISTED Handle = 0x80270011 + NAP_E_SHV_CONFIG_NOT_FOUND Handle = 0x80270012 + NAP_E_SHV_TIMEOUT Handle = 0x80270013 + TPM_E_ERROR_MASK Handle = 0x80280000 + TPM_E_AUTHFAIL Handle = 0x80280001 + TPM_E_BADINDEX Handle = 0x80280002 + TPM_E_BAD_PARAMETER Handle = 0x80280003 + TPM_E_AUDITFAILURE Handle = 0x80280004 + TPM_E_CLEAR_DISABLED Handle = 0x80280005 + TPM_E_DEACTIVATED Handle = 0x80280006 + TPM_E_DISABLED Handle = 0x80280007 + TPM_E_DISABLED_CMD Handle = 0x80280008 + TPM_E_FAIL Handle = 0x80280009 + TPM_E_BAD_ORDINAL Handle = 0x8028000A + TPM_E_INSTALL_DISABLED Handle = 0x8028000B + TPM_E_INVALID_KEYHANDLE Handle = 0x8028000C + TPM_E_KEYNOTFOUND Handle = 0x8028000D + TPM_E_INAPPROPRIATE_ENC Handle = 0x8028000E + TPM_E_MIGRATEFAIL Handle = 0x8028000F + TPM_E_INVALID_PCR_INFO Handle = 0x80280010 + TPM_E_NOSPACE Handle = 0x80280011 + TPM_E_NOSRK Handle = 0x80280012 + TPM_E_NOTSEALED_BLOB Handle = 0x80280013 + TPM_E_OWNER_SET Handle = 0x80280014 + TPM_E_RESOURCES Handle = 0x80280015 + TPM_E_SHORTRANDOM Handle = 0x80280016 + TPM_E_SIZE Handle = 0x80280017 + TPM_E_WRONGPCRVAL Handle = 0x80280018 + TPM_E_BAD_PARAM_SIZE Handle = 0x80280019 + TPM_E_SHA_THREAD Handle = 0x8028001A + TPM_E_SHA_ERROR Handle = 0x8028001B + TPM_E_FAILEDSELFTEST Handle = 0x8028001C + TPM_E_AUTH2FAIL Handle = 0x8028001D + TPM_E_BADTAG Handle = 0x8028001E + TPM_E_IOERROR Handle = 0x8028001F + TPM_E_ENCRYPT_ERROR Handle = 0x80280020 + TPM_E_DECRYPT_ERROR Handle = 0x80280021 + TPM_E_INVALID_AUTHHANDLE Handle = 0x80280022 + TPM_E_NO_ENDORSEMENT Handle = 0x80280023 + TPM_E_INVALID_KEYUSAGE Handle = 0x80280024 + TPM_E_WRONG_ENTITYTYPE Handle = 0x80280025 + TPM_E_INVALID_POSTINIT Handle = 0x80280026 + TPM_E_INAPPROPRIATE_SIG Handle = 0x80280027 + TPM_E_BAD_KEY_PROPERTY Handle = 0x80280028 + TPM_E_BAD_MIGRATION Handle = 0x80280029 + TPM_E_BAD_SCHEME Handle = 0x8028002A + TPM_E_BAD_DATASIZE Handle = 0x8028002B + TPM_E_BAD_MODE Handle = 0x8028002C + TPM_E_BAD_PRESENCE Handle = 0x8028002D + TPM_E_BAD_VERSION Handle = 0x8028002E + TPM_E_NO_WRAP_TRANSPORT Handle = 0x8028002F + TPM_E_AUDITFAIL_UNSUCCESSFUL Handle = 0x80280030 + TPM_E_AUDITFAIL_SUCCESSFUL Handle = 0x80280031 + TPM_E_NOTRESETABLE Handle = 0x80280032 + TPM_E_NOTLOCAL Handle = 0x80280033 + TPM_E_BAD_TYPE Handle = 0x80280034 + TPM_E_INVALID_RESOURCE Handle = 0x80280035 + TPM_E_NOTFIPS Handle = 0x80280036 + TPM_E_INVALID_FAMILY Handle = 0x80280037 + TPM_E_NO_NV_PERMISSION Handle = 0x80280038 + TPM_E_REQUIRES_SIGN Handle = 0x80280039 + TPM_E_KEY_NOTSUPPORTED Handle = 0x8028003A + TPM_E_AUTH_CONFLICT Handle = 0x8028003B + TPM_E_AREA_LOCKED Handle = 0x8028003C + TPM_E_BAD_LOCALITY Handle = 0x8028003D + TPM_E_READ_ONLY Handle = 0x8028003E + TPM_E_PER_NOWRITE Handle = 0x8028003F + TPM_E_FAMILYCOUNT Handle = 0x80280040 + TPM_E_WRITE_LOCKED Handle = 0x80280041 + TPM_E_BAD_ATTRIBUTES Handle = 0x80280042 + TPM_E_INVALID_STRUCTURE Handle = 0x80280043 + TPM_E_KEY_OWNER_CONTROL Handle = 0x80280044 + TPM_E_BAD_COUNTER Handle = 0x80280045 + TPM_E_NOT_FULLWRITE Handle = 0x80280046 + TPM_E_CONTEXT_GAP Handle = 0x80280047 + TPM_E_MAXNVWRITES Handle = 0x80280048 + TPM_E_NOOPERATOR Handle = 0x80280049 + TPM_E_RESOURCEMISSING Handle = 0x8028004A + TPM_E_DELEGATE_LOCK Handle = 0x8028004B + TPM_E_DELEGATE_FAMILY Handle = 0x8028004C + TPM_E_DELEGATE_ADMIN Handle = 0x8028004D + TPM_E_TRANSPORT_NOTEXCLUSIVE Handle = 0x8028004E + TPM_E_OWNER_CONTROL Handle = 0x8028004F + TPM_E_DAA_RESOURCES Handle = 0x80280050 + TPM_E_DAA_INPUT_DATA0 Handle = 0x80280051 + TPM_E_DAA_INPUT_DATA1 Handle = 0x80280052 + TPM_E_DAA_ISSUER_SETTINGS Handle = 0x80280053 + TPM_E_DAA_TPM_SETTINGS Handle = 0x80280054 + TPM_E_DAA_STAGE Handle = 0x80280055 + TPM_E_DAA_ISSUER_VALIDITY Handle = 0x80280056 + TPM_E_DAA_WRONG_W Handle = 0x80280057 + TPM_E_BAD_HANDLE Handle = 0x80280058 + TPM_E_BAD_DELEGATE Handle = 0x80280059 + TPM_E_BADCONTEXT Handle = 0x8028005A + TPM_E_TOOMANYCONTEXTS Handle = 0x8028005B + TPM_E_MA_TICKET_SIGNATURE Handle = 0x8028005C + TPM_E_MA_DESTINATION Handle = 0x8028005D + TPM_E_MA_SOURCE Handle = 0x8028005E + TPM_E_MA_AUTHORITY Handle = 0x8028005F + TPM_E_PERMANENTEK Handle = 0x80280061 + TPM_E_BAD_SIGNATURE Handle = 0x80280062 + TPM_E_NOCONTEXTSPACE Handle = 0x80280063 + TPM_20_E_ASYMMETRIC Handle = 0x80280081 + TPM_20_E_ATTRIBUTES Handle = 0x80280082 + TPM_20_E_HASH Handle = 0x80280083 + TPM_20_E_VALUE Handle = 0x80280084 + TPM_20_E_HIERARCHY Handle = 0x80280085 + TPM_20_E_KEY_SIZE Handle = 0x80280087 + TPM_20_E_MGF Handle = 0x80280088 + TPM_20_E_MODE Handle = 0x80280089 + TPM_20_E_TYPE Handle = 0x8028008A + TPM_20_E_HANDLE Handle = 0x8028008B + TPM_20_E_KDF Handle = 0x8028008C + TPM_20_E_RANGE Handle = 0x8028008D + TPM_20_E_AUTH_FAIL Handle = 0x8028008E + TPM_20_E_NONCE Handle = 0x8028008F + TPM_20_E_PP Handle = 0x80280090 + TPM_20_E_SCHEME Handle = 0x80280092 + TPM_20_E_SIZE Handle = 0x80280095 + TPM_20_E_SYMMETRIC Handle = 0x80280096 + TPM_20_E_TAG Handle = 0x80280097 + TPM_20_E_SELECTOR Handle = 0x80280098 + TPM_20_E_INSUFFICIENT Handle = 0x8028009A + TPM_20_E_SIGNATURE Handle = 0x8028009B + TPM_20_E_KEY Handle = 0x8028009C + TPM_20_E_POLICY_FAIL Handle = 0x8028009D + TPM_20_E_INTEGRITY Handle = 0x8028009F + TPM_20_E_TICKET Handle = 0x802800A0 + TPM_20_E_RESERVED_BITS Handle = 0x802800A1 + TPM_20_E_BAD_AUTH Handle = 0x802800A2 + TPM_20_E_EXPIRED Handle = 0x802800A3 + TPM_20_E_POLICY_CC Handle = 0x802800A4 + TPM_20_E_BINDING Handle = 0x802800A5 + TPM_20_E_CURVE Handle = 0x802800A6 + TPM_20_E_ECC_POINT Handle = 0x802800A7 + TPM_20_E_INITIALIZE Handle = 0x80280100 + TPM_20_E_FAILURE Handle = 0x80280101 + TPM_20_E_SEQUENCE Handle = 0x80280103 + TPM_20_E_PRIVATE Handle = 0x8028010B + TPM_20_E_HMAC Handle = 0x80280119 + TPM_20_E_DISABLED Handle = 0x80280120 + TPM_20_E_EXCLUSIVE Handle = 0x80280121 + TPM_20_E_ECC_CURVE Handle = 0x80280123 + TPM_20_E_AUTH_TYPE Handle = 0x80280124 + TPM_20_E_AUTH_MISSING Handle = 0x80280125 + TPM_20_E_POLICY Handle = 0x80280126 + TPM_20_E_PCR Handle = 0x80280127 + TPM_20_E_PCR_CHANGED Handle = 0x80280128 + TPM_20_E_UPGRADE Handle = 0x8028012D + TPM_20_E_TOO_MANY_CONTEXTS Handle = 0x8028012E + TPM_20_E_AUTH_UNAVAILABLE Handle = 0x8028012F + TPM_20_E_REBOOT Handle = 0x80280130 + TPM_20_E_UNBALANCED Handle = 0x80280131 + TPM_20_E_COMMAND_SIZE Handle = 0x80280142 + TPM_20_E_COMMAND_CODE Handle = 0x80280143 + TPM_20_E_AUTHSIZE Handle = 0x80280144 + TPM_20_E_AUTH_CONTEXT Handle = 0x80280145 + TPM_20_E_NV_RANGE Handle = 0x80280146 + TPM_20_E_NV_SIZE Handle = 0x80280147 + TPM_20_E_NV_LOCKED Handle = 0x80280148 + TPM_20_E_NV_AUTHORIZATION Handle = 0x80280149 + TPM_20_E_NV_UNINITIALIZED Handle = 0x8028014A + TPM_20_E_NV_SPACE Handle = 0x8028014B + TPM_20_E_NV_DEFINED Handle = 0x8028014C + TPM_20_E_BAD_CONTEXT Handle = 0x80280150 + TPM_20_E_CPHASH Handle = 0x80280151 + TPM_20_E_PARENT Handle = 0x80280152 + TPM_20_E_NEEDS_TEST Handle = 0x80280153 + TPM_20_E_NO_RESULT Handle = 0x80280154 + TPM_20_E_SENSITIVE Handle = 0x80280155 + TPM_E_COMMAND_BLOCKED Handle = 0x80280400 + TPM_E_INVALID_HANDLE Handle = 0x80280401 + TPM_E_DUPLICATE_VHANDLE Handle = 0x80280402 + TPM_E_EMBEDDED_COMMAND_BLOCKED Handle = 0x80280403 + TPM_E_EMBEDDED_COMMAND_UNSUPPORTED Handle = 0x80280404 + TPM_E_RETRY Handle = 0x80280800 + TPM_E_NEEDS_SELFTEST Handle = 0x80280801 + TPM_E_DOING_SELFTEST Handle = 0x80280802 + TPM_E_DEFEND_LOCK_RUNNING Handle = 0x80280803 + TPM_20_E_CONTEXT_GAP Handle = 0x80280901 + TPM_20_E_OBJECT_MEMORY Handle = 0x80280902 + TPM_20_E_SESSION_MEMORY Handle = 0x80280903 + TPM_20_E_MEMORY Handle = 0x80280904 + TPM_20_E_SESSION_HANDLES Handle = 0x80280905 + TPM_20_E_OBJECT_HANDLES Handle = 0x80280906 + TPM_20_E_LOCALITY Handle = 0x80280907 + TPM_20_E_YIELDED Handle = 0x80280908 + TPM_20_E_CANCELED Handle = 0x80280909 + TPM_20_E_TESTING Handle = 0x8028090A + TPM_20_E_NV_RATE Handle = 0x80280920 + TPM_20_E_LOCKOUT Handle = 0x80280921 + TPM_20_E_RETRY Handle = 0x80280922 + TPM_20_E_NV_UNAVAILABLE Handle = 0x80280923 + TBS_E_INTERNAL_ERROR Handle = 0x80284001 + TBS_E_BAD_PARAMETER Handle = 0x80284002 + TBS_E_INVALID_OUTPUT_POINTER Handle = 0x80284003 + TBS_E_INVALID_CONTEXT Handle = 0x80284004 + TBS_E_INSUFFICIENT_BUFFER Handle = 0x80284005 + TBS_E_IOERROR Handle = 0x80284006 + TBS_E_INVALID_CONTEXT_PARAM Handle = 0x80284007 + TBS_E_SERVICE_NOT_RUNNING Handle = 0x80284008 + TBS_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80284009 + TBS_E_TOO_MANY_RESOURCES Handle = 0x8028400A + TBS_E_SERVICE_START_PENDING Handle = 0x8028400B + TBS_E_PPI_NOT_SUPPORTED Handle = 0x8028400C + TBS_E_COMMAND_CANCELED Handle = 0x8028400D + TBS_E_BUFFER_TOO_LARGE Handle = 0x8028400E + TBS_E_TPM_NOT_FOUND Handle = 0x8028400F + TBS_E_SERVICE_DISABLED Handle = 0x80284010 + TBS_E_NO_EVENT_LOG Handle = 0x80284011 + TBS_E_ACCESS_DENIED Handle = 0x80284012 + TBS_E_PROVISIONING_NOT_ALLOWED Handle = 0x80284013 + TBS_E_PPI_FUNCTION_UNSUPPORTED Handle = 0x80284014 + TBS_E_OWNERAUTH_NOT_FOUND Handle = 0x80284015 + TBS_E_PROVISIONING_INCOMPLETE Handle = 0x80284016 + TPMAPI_E_INVALID_STATE Handle = 0x80290100 + TPMAPI_E_NOT_ENOUGH_DATA Handle = 0x80290101 + TPMAPI_E_TOO_MUCH_DATA Handle = 0x80290102 + TPMAPI_E_INVALID_OUTPUT_POINTER Handle = 0x80290103 + TPMAPI_E_INVALID_PARAMETER Handle = 0x80290104 + TPMAPI_E_OUT_OF_MEMORY Handle = 0x80290105 + TPMAPI_E_BUFFER_TOO_SMALL Handle = 0x80290106 + TPMAPI_E_INTERNAL_ERROR Handle = 0x80290107 + TPMAPI_E_ACCESS_DENIED Handle = 0x80290108 + TPMAPI_E_AUTHORIZATION_FAILED Handle = 0x80290109 + TPMAPI_E_INVALID_CONTEXT_HANDLE Handle = 0x8029010A + TPMAPI_E_TBS_COMMUNICATION_ERROR Handle = 0x8029010B + TPMAPI_E_TPM_COMMAND_ERROR Handle = 0x8029010C + TPMAPI_E_MESSAGE_TOO_LARGE Handle = 0x8029010D + TPMAPI_E_INVALID_ENCODING Handle = 0x8029010E + TPMAPI_E_INVALID_KEY_SIZE Handle = 0x8029010F + TPMAPI_E_ENCRYPTION_FAILED Handle = 0x80290110 + TPMAPI_E_INVALID_KEY_PARAMS Handle = 0x80290111 + TPMAPI_E_INVALID_MIGRATION_AUTHORIZATION_BLOB Handle = 0x80290112 + TPMAPI_E_INVALID_PCR_INDEX Handle = 0x80290113 + TPMAPI_E_INVALID_DELEGATE_BLOB Handle = 0x80290114 + TPMAPI_E_INVALID_CONTEXT_PARAMS Handle = 0x80290115 + TPMAPI_E_INVALID_KEY_BLOB Handle = 0x80290116 + TPMAPI_E_INVALID_PCR_DATA Handle = 0x80290117 + TPMAPI_E_INVALID_OWNER_AUTH Handle = 0x80290118 + TPMAPI_E_FIPS_RNG_CHECK_FAILED Handle = 0x80290119 + TPMAPI_E_EMPTY_TCG_LOG Handle = 0x8029011A + TPMAPI_E_INVALID_TCG_LOG_ENTRY Handle = 0x8029011B + TPMAPI_E_TCG_SEPARATOR_ABSENT Handle = 0x8029011C + TPMAPI_E_TCG_INVALID_DIGEST_ENTRY Handle = 0x8029011D + TPMAPI_E_POLICY_DENIES_OPERATION Handle = 0x8029011E + TPMAPI_E_NV_BITS_NOT_DEFINED Handle = 0x8029011F + TPMAPI_E_NV_BITS_NOT_READY Handle = 0x80290120 + TPMAPI_E_SEALING_KEY_NOT_AVAILABLE Handle = 0x80290121 + TPMAPI_E_NO_AUTHORIZATION_CHAIN_FOUND Handle = 0x80290122 + TPMAPI_E_SVN_COUNTER_NOT_AVAILABLE Handle = 0x80290123 + TPMAPI_E_OWNER_AUTH_NOT_NULL Handle = 0x80290124 + TPMAPI_E_ENDORSEMENT_AUTH_NOT_NULL Handle = 0x80290125 + TPMAPI_E_AUTHORIZATION_REVOKED Handle = 0x80290126 + TPMAPI_E_MALFORMED_AUTHORIZATION_KEY Handle = 0x80290127 + TPMAPI_E_AUTHORIZING_KEY_NOT_SUPPORTED Handle = 0x80290128 + TPMAPI_E_INVALID_AUTHORIZATION_SIGNATURE Handle = 0x80290129 + TPMAPI_E_MALFORMED_AUTHORIZATION_POLICY Handle = 0x8029012A + TPMAPI_E_MALFORMED_AUTHORIZATION_OTHER Handle = 0x8029012B + TPMAPI_E_SEALING_KEY_CHANGED Handle = 0x8029012C + TBSIMP_E_BUFFER_TOO_SMALL Handle = 0x80290200 + TBSIMP_E_CLEANUP_FAILED Handle = 0x80290201 + TBSIMP_E_INVALID_CONTEXT_HANDLE Handle = 0x80290202 + TBSIMP_E_INVALID_CONTEXT_PARAM Handle = 0x80290203 + TBSIMP_E_TPM_ERROR Handle = 0x80290204 + TBSIMP_E_HASH_BAD_KEY Handle = 0x80290205 + TBSIMP_E_DUPLICATE_VHANDLE Handle = 0x80290206 + TBSIMP_E_INVALID_OUTPUT_POINTER Handle = 0x80290207 + TBSIMP_E_INVALID_PARAMETER Handle = 0x80290208 + TBSIMP_E_RPC_INIT_FAILED Handle = 0x80290209 + TBSIMP_E_SCHEDULER_NOT_RUNNING Handle = 0x8029020A + TBSIMP_E_COMMAND_CANCELED Handle = 0x8029020B + TBSIMP_E_OUT_OF_MEMORY Handle = 0x8029020C + TBSIMP_E_LIST_NO_MORE_ITEMS Handle = 0x8029020D + TBSIMP_E_LIST_NOT_FOUND Handle = 0x8029020E + TBSIMP_E_NOT_ENOUGH_SPACE Handle = 0x8029020F + TBSIMP_E_NOT_ENOUGH_TPM_CONTEXTS Handle = 0x80290210 + TBSIMP_E_COMMAND_FAILED Handle = 0x80290211 + TBSIMP_E_UNKNOWN_ORDINAL Handle = 0x80290212 + TBSIMP_E_RESOURCE_EXPIRED Handle = 0x80290213 + TBSIMP_E_INVALID_RESOURCE Handle = 0x80290214 + TBSIMP_E_NOTHING_TO_UNLOAD Handle = 0x80290215 + TBSIMP_E_HASH_TABLE_FULL Handle = 0x80290216 + TBSIMP_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80290217 + TBSIMP_E_TOO_MANY_RESOURCES Handle = 0x80290218 + TBSIMP_E_PPI_NOT_SUPPORTED Handle = 0x80290219 + TBSIMP_E_TPM_INCOMPATIBLE Handle = 0x8029021A + TBSIMP_E_NO_EVENT_LOG Handle = 0x8029021B + TPM_E_PPI_ACPI_FAILURE Handle = 0x80290300 + TPM_E_PPI_USER_ABORT Handle = 0x80290301 + TPM_E_PPI_BIOS_FAILURE Handle = 0x80290302 + TPM_E_PPI_NOT_SUPPORTED Handle = 0x80290303 + TPM_E_PPI_BLOCKED_IN_BIOS Handle = 0x80290304 + TPM_E_PCP_ERROR_MASK Handle = 0x80290400 + TPM_E_PCP_DEVICE_NOT_READY Handle = 0x80290401 + TPM_E_PCP_INVALID_HANDLE Handle = 0x80290402 + TPM_E_PCP_INVALID_PARAMETER Handle = 0x80290403 + TPM_E_PCP_FLAG_NOT_SUPPORTED Handle = 0x80290404 + TPM_E_PCP_NOT_SUPPORTED Handle = 0x80290405 + TPM_E_PCP_BUFFER_TOO_SMALL Handle = 0x80290406 + TPM_E_PCP_INTERNAL_ERROR Handle = 0x80290407 + TPM_E_PCP_AUTHENTICATION_FAILED Handle = 0x80290408 + TPM_E_PCP_AUTHENTICATION_IGNORED Handle = 0x80290409 + TPM_E_PCP_POLICY_NOT_FOUND Handle = 0x8029040A + TPM_E_PCP_PROFILE_NOT_FOUND Handle = 0x8029040B + TPM_E_PCP_VALIDATION_FAILED Handle = 0x8029040C + TPM_E_PCP_WRONG_PARENT Handle = 0x8029040E + TPM_E_KEY_NOT_LOADED Handle = 0x8029040F + TPM_E_NO_KEY_CERTIFICATION Handle = 0x80290410 + TPM_E_KEY_NOT_FINALIZED Handle = 0x80290411 + TPM_E_ATTESTATION_CHALLENGE_NOT_SET Handle = 0x80290412 + TPM_E_NOT_PCR_BOUND Handle = 0x80290413 + TPM_E_KEY_ALREADY_FINALIZED Handle = 0x80290414 + TPM_E_KEY_USAGE_POLICY_NOT_SUPPORTED Handle = 0x80290415 + TPM_E_KEY_USAGE_POLICY_INVALID Handle = 0x80290416 + TPM_E_SOFT_KEY_ERROR Handle = 0x80290417 + TPM_E_KEY_NOT_AUTHENTICATED Handle = 0x80290418 + TPM_E_PCP_KEY_NOT_AIK Handle = 0x80290419 + TPM_E_KEY_NOT_SIGNING_KEY Handle = 0x8029041A + TPM_E_LOCKED_OUT Handle = 0x8029041B + TPM_E_CLAIM_TYPE_NOT_SUPPORTED Handle = 0x8029041C + TPM_E_VERSION_NOT_SUPPORTED Handle = 0x8029041D + TPM_E_BUFFER_LENGTH_MISMATCH Handle = 0x8029041E + TPM_E_PCP_IFX_RSA_KEY_CREATION_BLOCKED Handle = 0x8029041F + TPM_E_PCP_TICKET_MISSING Handle = 0x80290420 + TPM_E_PCP_RAW_POLICY_NOT_SUPPORTED Handle = 0x80290421 + TPM_E_PCP_KEY_HANDLE_INVALIDATED Handle = 0x80290422 + TPM_E_PCP_UNSUPPORTED_PSS_SALT Handle = 0x40290423 + TPM_E_ZERO_EXHAUST_ENABLED Handle = 0x80290500 + PLA_E_DCS_NOT_FOUND Handle = 0x80300002 + PLA_E_DCS_IN_USE Handle = 0x803000AA + PLA_E_TOO_MANY_FOLDERS Handle = 0x80300045 + PLA_E_NO_MIN_DISK Handle = 0x80300070 + PLA_E_DCS_ALREADY_EXISTS Handle = 0x803000B7 + PLA_S_PROPERTY_IGNORED Handle = 0x00300100 + PLA_E_PROPERTY_CONFLICT Handle = 0x80300101 + PLA_E_DCS_SINGLETON_REQUIRED Handle = 0x80300102 + PLA_E_CREDENTIALS_REQUIRED Handle = 0x80300103 + PLA_E_DCS_NOT_RUNNING Handle = 0x80300104 + PLA_E_CONFLICT_INCL_EXCL_API Handle = 0x80300105 + PLA_E_NETWORK_EXE_NOT_VALID Handle = 0x80300106 + PLA_E_EXE_ALREADY_CONFIGURED Handle = 0x80300107 + PLA_E_EXE_PATH_NOT_VALID Handle = 0x80300108 + PLA_E_DC_ALREADY_EXISTS Handle = 0x80300109 + PLA_E_DCS_START_WAIT_TIMEOUT Handle = 0x8030010A + PLA_E_DC_START_WAIT_TIMEOUT Handle = 0x8030010B + PLA_E_REPORT_WAIT_TIMEOUT Handle = 0x8030010C + PLA_E_NO_DUPLICATES Handle = 0x8030010D + PLA_E_EXE_FULL_PATH_REQUIRED Handle = 0x8030010E + PLA_E_INVALID_SESSION_NAME Handle = 0x8030010F + PLA_E_PLA_CHANNEL_NOT_ENABLED Handle = 0x80300110 + PLA_E_TASKSCHED_CHANNEL_NOT_ENABLED Handle = 0x80300111 + PLA_E_RULES_MANAGER_FAILED Handle = 0x80300112 + PLA_E_CABAPI_FAILURE Handle = 0x80300113 + FVE_E_LOCKED_VOLUME Handle = 0x80310000 + FVE_E_NOT_ENCRYPTED Handle = 0x80310001 + FVE_E_NO_TPM_BIOS Handle = 0x80310002 + FVE_E_NO_MBR_METRIC Handle = 0x80310003 + FVE_E_NO_BOOTSECTOR_METRIC Handle = 0x80310004 + FVE_E_NO_BOOTMGR_METRIC Handle = 0x80310005 + FVE_E_WRONG_BOOTMGR Handle = 0x80310006 + FVE_E_SECURE_KEY_REQUIRED Handle = 0x80310007 + FVE_E_NOT_ACTIVATED Handle = 0x80310008 + FVE_E_ACTION_NOT_ALLOWED Handle = 0x80310009 + FVE_E_AD_SCHEMA_NOT_INSTALLED Handle = 0x8031000A + FVE_E_AD_INVALID_DATATYPE Handle = 0x8031000B + FVE_E_AD_INVALID_DATASIZE Handle = 0x8031000C + FVE_E_AD_NO_VALUES Handle = 0x8031000D + FVE_E_AD_ATTR_NOT_SET Handle = 0x8031000E + FVE_E_AD_GUID_NOT_FOUND Handle = 0x8031000F + FVE_E_BAD_INFORMATION Handle = 0x80310010 + FVE_E_TOO_SMALL Handle = 0x80310011 + FVE_E_SYSTEM_VOLUME Handle = 0x80310012 + FVE_E_FAILED_WRONG_FS Handle = 0x80310013 + FVE_E_BAD_PARTITION_SIZE Handle = 0x80310014 + FVE_E_NOT_SUPPORTED Handle = 0x80310015 + FVE_E_BAD_DATA Handle = 0x80310016 + FVE_E_VOLUME_NOT_BOUND Handle = 0x80310017 + FVE_E_TPM_NOT_OWNED Handle = 0x80310018 + FVE_E_NOT_DATA_VOLUME Handle = 0x80310019 + FVE_E_AD_INSUFFICIENT_BUFFER Handle = 0x8031001A + FVE_E_CONV_READ Handle = 0x8031001B + FVE_E_CONV_WRITE Handle = 0x8031001C + FVE_E_KEY_REQUIRED Handle = 0x8031001D + FVE_E_CLUSTERING_NOT_SUPPORTED Handle = 0x8031001E + FVE_E_VOLUME_BOUND_ALREADY Handle = 0x8031001F + FVE_E_OS_NOT_PROTECTED Handle = 0x80310020 + FVE_E_PROTECTION_DISABLED Handle = 0x80310021 + FVE_E_RECOVERY_KEY_REQUIRED Handle = 0x80310022 + FVE_E_FOREIGN_VOLUME Handle = 0x80310023 + FVE_E_OVERLAPPED_UPDATE Handle = 0x80310024 + FVE_E_TPM_SRK_AUTH_NOT_ZERO Handle = 0x80310025 + FVE_E_FAILED_SECTOR_SIZE Handle = 0x80310026 + FVE_E_FAILED_AUTHENTICATION Handle = 0x80310027 + FVE_E_NOT_OS_VOLUME Handle = 0x80310028 + FVE_E_AUTOUNLOCK_ENABLED Handle = 0x80310029 + FVE_E_WRONG_BOOTSECTOR Handle = 0x8031002A + FVE_E_WRONG_SYSTEM_FS Handle = 0x8031002B + FVE_E_POLICY_PASSWORD_REQUIRED Handle = 0x8031002C + FVE_E_CANNOT_SET_FVEK_ENCRYPTED Handle = 0x8031002D + FVE_E_CANNOT_ENCRYPT_NO_KEY Handle = 0x8031002E + FVE_E_BOOTABLE_CDDVD Handle = 0x80310030 + FVE_E_PROTECTOR_EXISTS Handle = 0x80310031 + FVE_E_RELATIVE_PATH Handle = 0x80310032 + FVE_E_PROTECTOR_NOT_FOUND Handle = 0x80310033 + FVE_E_INVALID_KEY_FORMAT Handle = 0x80310034 + FVE_E_INVALID_PASSWORD_FORMAT Handle = 0x80310035 + FVE_E_FIPS_RNG_CHECK_FAILED Handle = 0x80310036 + FVE_E_FIPS_PREVENTS_RECOVERY_PASSWORD Handle = 0x80310037 + FVE_E_FIPS_PREVENTS_EXTERNAL_KEY_EXPORT Handle = 0x80310038 + FVE_E_NOT_DECRYPTED Handle = 0x80310039 + FVE_E_INVALID_PROTECTOR_TYPE Handle = 0x8031003A + FVE_E_NO_PROTECTORS_TO_TEST Handle = 0x8031003B + FVE_E_KEYFILE_NOT_FOUND Handle = 0x8031003C + FVE_E_KEYFILE_INVALID Handle = 0x8031003D + FVE_E_KEYFILE_NO_VMK Handle = 0x8031003E + FVE_E_TPM_DISABLED Handle = 0x8031003F + FVE_E_NOT_ALLOWED_IN_SAFE_MODE Handle = 0x80310040 + FVE_E_TPM_INVALID_PCR Handle = 0x80310041 + FVE_E_TPM_NO_VMK Handle = 0x80310042 + FVE_E_PIN_INVALID Handle = 0x80310043 + FVE_E_AUTH_INVALID_APPLICATION Handle = 0x80310044 + FVE_E_AUTH_INVALID_CONFIG Handle = 0x80310045 + FVE_E_FIPS_DISABLE_PROTECTION_NOT_ALLOWED Handle = 0x80310046 + FVE_E_FS_NOT_EXTENDED Handle = 0x80310047 + FVE_E_FIRMWARE_TYPE_NOT_SUPPORTED Handle = 0x80310048 + FVE_E_NO_LICENSE Handle = 0x80310049 + FVE_E_NOT_ON_STACK Handle = 0x8031004A + FVE_E_FS_MOUNTED Handle = 0x8031004B + FVE_E_TOKEN_NOT_IMPERSONATED Handle = 0x8031004C + FVE_E_DRY_RUN_FAILED Handle = 0x8031004D + FVE_E_REBOOT_REQUIRED Handle = 0x8031004E + FVE_E_DEBUGGER_ENABLED Handle = 0x8031004F + FVE_E_RAW_ACCESS Handle = 0x80310050 + FVE_E_RAW_BLOCKED Handle = 0x80310051 + FVE_E_BCD_APPLICATIONS_PATH_INCORRECT Handle = 0x80310052 + FVE_E_NOT_ALLOWED_IN_VERSION Handle = 0x80310053 + FVE_E_NO_AUTOUNLOCK_MASTER_KEY Handle = 0x80310054 + FVE_E_MOR_FAILED Handle = 0x80310055 + FVE_E_HIDDEN_VOLUME Handle = 0x80310056 + FVE_E_TRANSIENT_STATE Handle = 0x80310057 + FVE_E_PUBKEY_NOT_ALLOWED Handle = 0x80310058 + FVE_E_VOLUME_HANDLE_OPEN Handle = 0x80310059 + FVE_E_NO_FEATURE_LICENSE Handle = 0x8031005A + FVE_E_INVALID_STARTUP_OPTIONS Handle = 0x8031005B + FVE_E_POLICY_RECOVERY_PASSWORD_NOT_ALLOWED Handle = 0x8031005C + FVE_E_POLICY_RECOVERY_PASSWORD_REQUIRED Handle = 0x8031005D + FVE_E_POLICY_RECOVERY_KEY_NOT_ALLOWED Handle = 0x8031005E + FVE_E_POLICY_RECOVERY_KEY_REQUIRED Handle = 0x8031005F + FVE_E_POLICY_STARTUP_PIN_NOT_ALLOWED Handle = 0x80310060 + FVE_E_POLICY_STARTUP_PIN_REQUIRED Handle = 0x80310061 + FVE_E_POLICY_STARTUP_KEY_NOT_ALLOWED Handle = 0x80310062 + FVE_E_POLICY_STARTUP_KEY_REQUIRED Handle = 0x80310063 + FVE_E_POLICY_STARTUP_PIN_KEY_NOT_ALLOWED Handle = 0x80310064 + FVE_E_POLICY_STARTUP_PIN_KEY_REQUIRED Handle = 0x80310065 + FVE_E_POLICY_STARTUP_TPM_NOT_ALLOWED Handle = 0x80310066 + FVE_E_POLICY_STARTUP_TPM_REQUIRED Handle = 0x80310067 + FVE_E_POLICY_INVALID_PIN_LENGTH Handle = 0x80310068 + FVE_E_KEY_PROTECTOR_NOT_SUPPORTED Handle = 0x80310069 + FVE_E_POLICY_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006A + FVE_E_POLICY_PASSPHRASE_REQUIRED Handle = 0x8031006B + FVE_E_FIPS_PREVENTS_PASSPHRASE Handle = 0x8031006C + FVE_E_OS_VOLUME_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006D + FVE_E_INVALID_BITLOCKER_OID Handle = 0x8031006E + FVE_E_VOLUME_TOO_SMALL Handle = 0x8031006F + FVE_E_DV_NOT_SUPPORTED_ON_FS Handle = 0x80310070 + FVE_E_DV_NOT_ALLOWED_BY_GP Handle = 0x80310071 + FVE_E_POLICY_USER_CERTIFICATE_NOT_ALLOWED Handle = 0x80310072 + FVE_E_POLICY_USER_CERTIFICATE_REQUIRED Handle = 0x80310073 + FVE_E_POLICY_USER_CERT_MUST_BE_HW Handle = 0x80310074 + FVE_E_POLICY_USER_CONFIGURE_FDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310075 + FVE_E_POLICY_USER_CONFIGURE_RDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310076 + FVE_E_POLICY_USER_CONFIGURE_RDV_NOT_ALLOWED Handle = 0x80310077 + FVE_E_POLICY_USER_ENABLE_RDV_NOT_ALLOWED Handle = 0x80310078 + FVE_E_POLICY_USER_DISABLE_RDV_NOT_ALLOWED Handle = 0x80310079 + FVE_E_POLICY_INVALID_PASSPHRASE_LENGTH Handle = 0x80310080 + FVE_E_POLICY_PASSPHRASE_TOO_SIMPLE Handle = 0x80310081 + FVE_E_RECOVERY_PARTITION Handle = 0x80310082 + FVE_E_POLICY_CONFLICT_FDV_RK_OFF_AUK_ON Handle = 0x80310083 + FVE_E_POLICY_CONFLICT_RDV_RK_OFF_AUK_ON Handle = 0x80310084 + FVE_E_NON_BITLOCKER_OID Handle = 0x80310085 + FVE_E_POLICY_PROHIBITS_SELFSIGNED Handle = 0x80310086 + FVE_E_POLICY_CONFLICT_RO_AND_STARTUP_KEY_REQUIRED Handle = 0x80310087 + FVE_E_CONV_RECOVERY_FAILED Handle = 0x80310088 + FVE_E_VIRTUALIZED_SPACE_TOO_BIG Handle = 0x80310089 + FVE_E_POLICY_CONFLICT_OSV_RP_OFF_ADB_ON Handle = 0x80310090 + FVE_E_POLICY_CONFLICT_FDV_RP_OFF_ADB_ON Handle = 0x80310091 + FVE_E_POLICY_CONFLICT_RDV_RP_OFF_ADB_ON Handle = 0x80310092 + FVE_E_NON_BITLOCKER_KU Handle = 0x80310093 + FVE_E_PRIVATEKEY_AUTH_FAILED Handle = 0x80310094 + FVE_E_REMOVAL_OF_DRA_FAILED Handle = 0x80310095 + FVE_E_OPERATION_NOT_SUPPORTED_ON_VISTA_VOLUME Handle = 0x80310096 + FVE_E_CANT_LOCK_AUTOUNLOCK_ENABLED_VOLUME Handle = 0x80310097 + FVE_E_FIPS_HASH_KDF_NOT_ALLOWED Handle = 0x80310098 + FVE_E_ENH_PIN_INVALID Handle = 0x80310099 + FVE_E_INVALID_PIN_CHARS Handle = 0x8031009A + FVE_E_INVALID_DATUM_TYPE Handle = 0x8031009B + FVE_E_EFI_ONLY Handle = 0x8031009C + FVE_E_MULTIPLE_NKP_CERTS Handle = 0x8031009D + FVE_E_REMOVAL_OF_NKP_FAILED Handle = 0x8031009E + FVE_E_INVALID_NKP_CERT Handle = 0x8031009F + FVE_E_NO_EXISTING_PIN Handle = 0x803100A0 + FVE_E_PROTECTOR_CHANGE_PIN_MISMATCH Handle = 0x803100A1 + FVE_E_PIN_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100A2 + FVE_E_PROTECTOR_CHANGE_MAX_PIN_CHANGE_ATTEMPTS_REACHED Handle = 0x803100A3 + FVE_E_POLICY_PASSPHRASE_REQUIRES_ASCII Handle = 0x803100A4 + FVE_E_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A5 + FVE_E_WIPE_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A6 + FVE_E_KEY_LENGTH_NOT_SUPPORTED_BY_EDRIVE Handle = 0x803100A7 + FVE_E_NO_EXISTING_PASSPHRASE Handle = 0x803100A8 + FVE_E_PROTECTOR_CHANGE_PASSPHRASE_MISMATCH Handle = 0x803100A9 + FVE_E_PASSPHRASE_TOO_LONG Handle = 0x803100AA + FVE_E_NO_PASSPHRASE_WITH_TPM Handle = 0x803100AB + FVE_E_NO_TPM_WITH_PASSPHRASE Handle = 0x803100AC + FVE_E_NOT_ALLOWED_ON_CSV_STACK Handle = 0x803100AD + FVE_E_NOT_ALLOWED_ON_CLUSTER Handle = 0x803100AE + FVE_E_EDRIVE_NO_FAILOVER_TO_SW Handle = 0x803100AF + FVE_E_EDRIVE_BAND_IN_USE Handle = 0x803100B0 + FVE_E_EDRIVE_DISALLOWED_BY_GP Handle = 0x803100B1 + FVE_E_EDRIVE_INCOMPATIBLE_VOLUME Handle = 0x803100B2 + FVE_E_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING Handle = 0x803100B3 + FVE_E_EDRIVE_DV_NOT_SUPPORTED Handle = 0x803100B4 + FVE_E_NO_PREBOOT_KEYBOARD_DETECTED Handle = 0x803100B5 + FVE_E_NO_PREBOOT_KEYBOARD_OR_WINRE_DETECTED Handle = 0x803100B6 + FVE_E_POLICY_REQUIRES_STARTUP_PIN_ON_TOUCH_DEVICE Handle = 0x803100B7 + FVE_E_POLICY_REQUIRES_RECOVERY_PASSWORD_ON_TOUCH_DEVICE Handle = 0x803100B8 + FVE_E_WIPE_CANCEL_NOT_APPLICABLE Handle = 0x803100B9 + FVE_E_SECUREBOOT_DISABLED Handle = 0x803100BA + FVE_E_SECUREBOOT_CONFIGURATION_INVALID Handle = 0x803100BB + FVE_E_EDRIVE_DRY_RUN_FAILED Handle = 0x803100BC + FVE_E_SHADOW_COPY_PRESENT Handle = 0x803100BD + FVE_E_POLICY_INVALID_ENHANCED_BCD_SETTINGS Handle = 0x803100BE + FVE_E_EDRIVE_INCOMPATIBLE_FIRMWARE Handle = 0x803100BF + FVE_E_PROTECTOR_CHANGE_MAX_PASSPHRASE_CHANGE_ATTEMPTS_REACHED Handle = 0x803100C0 + FVE_E_PASSPHRASE_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100C1 + FVE_E_LIVEID_ACCOUNT_SUSPENDED Handle = 0x803100C2 + FVE_E_LIVEID_ACCOUNT_BLOCKED Handle = 0x803100C3 + FVE_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x803100C4 + FVE_E_DE_FIXED_DATA_NOT_SUPPORTED Handle = 0x803100C5 + FVE_E_DE_HARDWARE_NOT_COMPLIANT Handle = 0x803100C6 + FVE_E_DE_WINRE_NOT_CONFIGURED Handle = 0x803100C7 + FVE_E_DE_PROTECTION_SUSPENDED Handle = 0x803100C8 + FVE_E_DE_OS_VOLUME_NOT_PROTECTED Handle = 0x803100C9 + FVE_E_DE_DEVICE_LOCKEDOUT Handle = 0x803100CA + FVE_E_DE_PROTECTION_NOT_YET_ENABLED Handle = 0x803100CB + FVE_E_INVALID_PIN_CHARS_DETAILED Handle = 0x803100CC + FVE_E_DEVICE_LOCKOUT_COUNTER_UNAVAILABLE Handle = 0x803100CD + FVE_E_DEVICELOCKOUT_COUNTER_MISMATCH Handle = 0x803100CE + FVE_E_BUFFER_TOO_LARGE Handle = 0x803100CF + FVE_E_NO_SUCH_CAPABILITY_ON_TARGET Handle = 0x803100D0 + FVE_E_DE_PREVENTED_FOR_OS Handle = 0x803100D1 + FVE_E_DE_VOLUME_OPTED_OUT Handle = 0x803100D2 + FVE_E_DE_VOLUME_NOT_SUPPORTED Handle = 0x803100D3 + FVE_E_EOW_NOT_SUPPORTED_IN_VERSION Handle = 0x803100D4 + FVE_E_ADBACKUP_NOT_ENABLED Handle = 0x803100D5 + FVE_E_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT Handle = 0x803100D6 + FVE_E_NOT_DE_VOLUME Handle = 0x803100D7 + FVE_E_PROTECTION_CANNOT_BE_DISABLED Handle = 0x803100D8 + FVE_E_OSV_KSR_NOT_ALLOWED Handle = 0x803100D9 + FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_OS_DRIVE Handle = 0x803100DA + FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_FIXED_DRIVE Handle = 0x803100DB + FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_REMOVABLE_DRIVE Handle = 0x803100DC + FVE_E_KEY_ROTATION_NOT_SUPPORTED Handle = 0x803100DD + FVE_E_EXECUTE_REQUEST_SENT_TOO_SOON Handle = 0x803100DE + FVE_E_KEY_ROTATION_NOT_ENABLED Handle = 0x803100DF + FVE_E_DEVICE_NOT_JOINED Handle = 0x803100E0 + FWP_E_CALLOUT_NOT_FOUND Handle = 0x80320001 + FWP_E_CONDITION_NOT_FOUND Handle = 0x80320002 + FWP_E_FILTER_NOT_FOUND Handle = 0x80320003 + FWP_E_LAYER_NOT_FOUND Handle = 0x80320004 + FWP_E_PROVIDER_NOT_FOUND Handle = 0x80320005 + FWP_E_PROVIDER_CONTEXT_NOT_FOUND Handle = 0x80320006 + FWP_E_SUBLAYER_NOT_FOUND Handle = 0x80320007 + FWP_E_NOT_FOUND Handle = 0x80320008 + FWP_E_ALREADY_EXISTS Handle = 0x80320009 + FWP_E_IN_USE Handle = 0x8032000A + FWP_E_DYNAMIC_SESSION_IN_PROGRESS Handle = 0x8032000B + FWP_E_WRONG_SESSION Handle = 0x8032000C + FWP_E_NO_TXN_IN_PROGRESS Handle = 0x8032000D + FWP_E_TXN_IN_PROGRESS Handle = 0x8032000E + FWP_E_TXN_ABORTED Handle = 0x8032000F + FWP_E_SESSION_ABORTED Handle = 0x80320010 + FWP_E_INCOMPATIBLE_TXN Handle = 0x80320011 + FWP_E_TIMEOUT Handle = 0x80320012 + FWP_E_NET_EVENTS_DISABLED Handle = 0x80320013 + FWP_E_INCOMPATIBLE_LAYER Handle = 0x80320014 + FWP_E_KM_CLIENTS_ONLY Handle = 0x80320015 + FWP_E_LIFETIME_MISMATCH Handle = 0x80320016 + FWP_E_BUILTIN_OBJECT Handle = 0x80320017 + FWP_E_TOO_MANY_CALLOUTS Handle = 0x80320018 + FWP_E_NOTIFICATION_DROPPED Handle = 0x80320019 + FWP_E_TRAFFIC_MISMATCH Handle = 0x8032001A + FWP_E_INCOMPATIBLE_SA_STATE Handle = 0x8032001B + FWP_E_NULL_POINTER Handle = 0x8032001C + FWP_E_INVALID_ENUMERATOR Handle = 0x8032001D + FWP_E_INVALID_FLAGS Handle = 0x8032001E + FWP_E_INVALID_NET_MASK Handle = 0x8032001F + FWP_E_INVALID_RANGE Handle = 0x80320020 + FWP_E_INVALID_INTERVAL Handle = 0x80320021 + FWP_E_ZERO_LENGTH_ARRAY Handle = 0x80320022 + FWP_E_NULL_DISPLAY_NAME Handle = 0x80320023 + FWP_E_INVALID_ACTION_TYPE Handle = 0x80320024 + FWP_E_INVALID_WEIGHT Handle = 0x80320025 + FWP_E_MATCH_TYPE_MISMATCH Handle = 0x80320026 + FWP_E_TYPE_MISMATCH Handle = 0x80320027 + FWP_E_OUT_OF_BOUNDS Handle = 0x80320028 + FWP_E_RESERVED Handle = 0x80320029 + FWP_E_DUPLICATE_CONDITION Handle = 0x8032002A + FWP_E_DUPLICATE_KEYMOD Handle = 0x8032002B + FWP_E_ACTION_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002C + FWP_E_ACTION_INCOMPATIBLE_WITH_SUBLAYER Handle = 0x8032002D + FWP_E_CONTEXT_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002E + FWP_E_CONTEXT_INCOMPATIBLE_WITH_CALLOUT Handle = 0x8032002F + FWP_E_INCOMPATIBLE_AUTH_METHOD Handle = 0x80320030 + FWP_E_INCOMPATIBLE_DH_GROUP Handle = 0x80320031 + FWP_E_EM_NOT_SUPPORTED Handle = 0x80320032 + FWP_E_NEVER_MATCH Handle = 0x80320033 + FWP_E_PROVIDER_CONTEXT_MISMATCH Handle = 0x80320034 + FWP_E_INVALID_PARAMETER Handle = 0x80320035 + FWP_E_TOO_MANY_SUBLAYERS Handle = 0x80320036 + FWP_E_CALLOUT_NOTIFICATION_FAILED Handle = 0x80320037 + FWP_E_INVALID_AUTH_TRANSFORM Handle = 0x80320038 + FWP_E_INVALID_CIPHER_TRANSFORM Handle = 0x80320039 + FWP_E_INCOMPATIBLE_CIPHER_TRANSFORM Handle = 0x8032003A + FWP_E_INVALID_TRANSFORM_COMBINATION Handle = 0x8032003B + FWP_E_DUPLICATE_AUTH_METHOD Handle = 0x8032003C + FWP_E_INVALID_TUNNEL_ENDPOINT Handle = 0x8032003D + FWP_E_L2_DRIVER_NOT_READY Handle = 0x8032003E + FWP_E_KEY_DICTATOR_ALREADY_REGISTERED Handle = 0x8032003F + FWP_E_KEY_DICTATION_INVALID_KEYING_MATERIAL Handle = 0x80320040 + FWP_E_CONNECTIONS_DISABLED Handle = 0x80320041 + FWP_E_INVALID_DNS_NAME Handle = 0x80320042 + FWP_E_STILL_ON Handle = 0x80320043 + FWP_E_IKEEXT_NOT_RUNNING Handle = 0x80320044 + FWP_E_DROP_NOICMP Handle = 0x80320104 + WS_S_ASYNC Handle = 0x003D0000 + WS_S_END Handle = 0x003D0001 + WS_E_INVALID_FORMAT Handle = 0x803D0000 + WS_E_OBJECT_FAULTED Handle = 0x803D0001 + WS_E_NUMERIC_OVERFLOW Handle = 0x803D0002 + WS_E_INVALID_OPERATION Handle = 0x803D0003 + WS_E_OPERATION_ABORTED Handle = 0x803D0004 + WS_E_ENDPOINT_ACCESS_DENIED Handle = 0x803D0005 + WS_E_OPERATION_TIMED_OUT Handle = 0x803D0006 + WS_E_OPERATION_ABANDONED Handle = 0x803D0007 + WS_E_QUOTA_EXCEEDED Handle = 0x803D0008 + WS_E_NO_TRANSLATION_AVAILABLE Handle = 0x803D0009 + WS_E_SECURITY_VERIFICATION_FAILURE Handle = 0x803D000A + WS_E_ADDRESS_IN_USE Handle = 0x803D000B + WS_E_ADDRESS_NOT_AVAILABLE Handle = 0x803D000C + WS_E_ENDPOINT_NOT_FOUND Handle = 0x803D000D + WS_E_ENDPOINT_NOT_AVAILABLE Handle = 0x803D000E + WS_E_ENDPOINT_FAILURE Handle = 0x803D000F + WS_E_ENDPOINT_UNREACHABLE Handle = 0x803D0010 + WS_E_ENDPOINT_ACTION_NOT_SUPPORTED Handle = 0x803D0011 + WS_E_ENDPOINT_TOO_BUSY Handle = 0x803D0012 + WS_E_ENDPOINT_FAULT_RECEIVED Handle = 0x803D0013 + WS_E_ENDPOINT_DISCONNECTED Handle = 0x803D0014 + WS_E_PROXY_FAILURE Handle = 0x803D0015 + WS_E_PROXY_ACCESS_DENIED Handle = 0x803D0016 + WS_E_NOT_SUPPORTED Handle = 0x803D0017 + WS_E_PROXY_REQUIRES_BASIC_AUTH Handle = 0x803D0018 + WS_E_PROXY_REQUIRES_DIGEST_AUTH Handle = 0x803D0019 + WS_E_PROXY_REQUIRES_NTLM_AUTH Handle = 0x803D001A + WS_E_PROXY_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001B + WS_E_SERVER_REQUIRES_BASIC_AUTH Handle = 0x803D001C + WS_E_SERVER_REQUIRES_DIGEST_AUTH Handle = 0x803D001D + WS_E_SERVER_REQUIRES_NTLM_AUTH Handle = 0x803D001E + WS_E_SERVER_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001F + WS_E_INVALID_ENDPOINT_URL Handle = 0x803D0020 + WS_E_OTHER Handle = 0x803D0021 + WS_E_SECURITY_TOKEN_EXPIRED Handle = 0x803D0022 + WS_E_SECURITY_SYSTEM_FAILURE Handle = 0x803D0023 + ERROR_NDIS_INTERFACE_CLOSING syscall.Errno = 0x80340002 + ERROR_NDIS_BAD_VERSION syscall.Errno = 0x80340004 + ERROR_NDIS_BAD_CHARACTERISTICS syscall.Errno = 0x80340005 + ERROR_NDIS_ADAPTER_NOT_FOUND syscall.Errno = 0x80340006 + ERROR_NDIS_OPEN_FAILED syscall.Errno = 0x80340007 + ERROR_NDIS_DEVICE_FAILED syscall.Errno = 0x80340008 + ERROR_NDIS_MULTICAST_FULL syscall.Errno = 0x80340009 + ERROR_NDIS_MULTICAST_EXISTS syscall.Errno = 0x8034000A + ERROR_NDIS_MULTICAST_NOT_FOUND syscall.Errno = 0x8034000B + ERROR_NDIS_REQUEST_ABORTED syscall.Errno = 0x8034000C + ERROR_NDIS_RESET_IN_PROGRESS syscall.Errno = 0x8034000D + ERROR_NDIS_NOT_SUPPORTED syscall.Errno = 0x803400BB + ERROR_NDIS_INVALID_PACKET syscall.Errno = 0x8034000F + ERROR_NDIS_ADAPTER_NOT_READY syscall.Errno = 0x80340011 + ERROR_NDIS_INVALID_LENGTH syscall.Errno = 0x80340014 + ERROR_NDIS_INVALID_DATA syscall.Errno = 0x80340015 + ERROR_NDIS_BUFFER_TOO_SHORT syscall.Errno = 0x80340016 + ERROR_NDIS_INVALID_OID syscall.Errno = 0x80340017 + ERROR_NDIS_ADAPTER_REMOVED syscall.Errno = 0x80340018 + ERROR_NDIS_UNSUPPORTED_MEDIA syscall.Errno = 0x80340019 + ERROR_NDIS_GROUP_ADDRESS_IN_USE syscall.Errno = 0x8034001A + ERROR_NDIS_FILE_NOT_FOUND syscall.Errno = 0x8034001B + ERROR_NDIS_ERROR_READING_FILE syscall.Errno = 0x8034001C + ERROR_NDIS_ALREADY_MAPPED syscall.Errno = 0x8034001D + ERROR_NDIS_RESOURCE_CONFLICT syscall.Errno = 0x8034001E + ERROR_NDIS_MEDIA_DISCONNECTED syscall.Errno = 0x8034001F + ERROR_NDIS_INVALID_ADDRESS syscall.Errno = 0x80340022 + ERROR_NDIS_INVALID_DEVICE_REQUEST syscall.Errno = 0x80340010 + ERROR_NDIS_PAUSED syscall.Errno = 0x8034002A + ERROR_NDIS_INTERFACE_NOT_FOUND syscall.Errno = 0x8034002B + ERROR_NDIS_UNSUPPORTED_REVISION syscall.Errno = 0x8034002C + ERROR_NDIS_INVALID_PORT syscall.Errno = 0x8034002D + ERROR_NDIS_INVALID_PORT_STATE syscall.Errno = 0x8034002E + ERROR_NDIS_LOW_POWER_STATE syscall.Errno = 0x8034002F + ERROR_NDIS_REINIT_REQUIRED syscall.Errno = 0x80340030 + ERROR_NDIS_NO_QUEUES syscall.Errno = 0x80340031 + ERROR_NDIS_DOT11_AUTO_CONFIG_ENABLED syscall.Errno = 0x80342000 + ERROR_NDIS_DOT11_MEDIA_IN_USE syscall.Errno = 0x80342001 + ERROR_NDIS_DOT11_POWER_STATE_INVALID syscall.Errno = 0x80342002 + ERROR_NDIS_PM_WOL_PATTERN_LIST_FULL syscall.Errno = 0x80342003 + ERROR_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL syscall.Errno = 0x80342004 + ERROR_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342005 + ERROR_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342006 + ERROR_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED syscall.Errno = 0x80342007 + ERROR_NDIS_DOT11_AP_BAND_NOT_ALLOWED syscall.Errno = 0x80342008 + ERROR_NDIS_INDICATION_REQUIRED syscall.Errno = 0x00340001 + ERROR_NDIS_OFFLOAD_POLICY syscall.Errno = 0xC034100F + ERROR_NDIS_OFFLOAD_CONNECTION_REJECTED syscall.Errno = 0xC0341012 + ERROR_NDIS_OFFLOAD_PATH_REJECTED syscall.Errno = 0xC0341013 + ERROR_HV_INVALID_HYPERCALL_CODE syscall.Errno = 0xC0350002 + ERROR_HV_INVALID_HYPERCALL_INPUT syscall.Errno = 0xC0350003 + ERROR_HV_INVALID_ALIGNMENT syscall.Errno = 0xC0350004 + ERROR_HV_INVALID_PARAMETER syscall.Errno = 0xC0350005 + ERROR_HV_ACCESS_DENIED syscall.Errno = 0xC0350006 + ERROR_HV_INVALID_PARTITION_STATE syscall.Errno = 0xC0350007 + ERROR_HV_OPERATION_DENIED syscall.Errno = 0xC0350008 + ERROR_HV_UNKNOWN_PROPERTY syscall.Errno = 0xC0350009 + ERROR_HV_PROPERTY_VALUE_OUT_OF_RANGE syscall.Errno = 0xC035000A + ERROR_HV_INSUFFICIENT_MEMORY syscall.Errno = 0xC035000B + ERROR_HV_PARTITION_TOO_DEEP syscall.Errno = 0xC035000C + ERROR_HV_INVALID_PARTITION_ID syscall.Errno = 0xC035000D + ERROR_HV_INVALID_VP_INDEX syscall.Errno = 0xC035000E + ERROR_HV_INVALID_PORT_ID syscall.Errno = 0xC0350011 + ERROR_HV_INVALID_CONNECTION_ID syscall.Errno = 0xC0350012 + ERROR_HV_INSUFFICIENT_BUFFERS syscall.Errno = 0xC0350013 + ERROR_HV_NOT_ACKNOWLEDGED syscall.Errno = 0xC0350014 + ERROR_HV_INVALID_VP_STATE syscall.Errno = 0xC0350015 + ERROR_HV_ACKNOWLEDGED syscall.Errno = 0xC0350016 + ERROR_HV_INVALID_SAVE_RESTORE_STATE syscall.Errno = 0xC0350017 + ERROR_HV_INVALID_SYNIC_STATE syscall.Errno = 0xC0350018 + ERROR_HV_OBJECT_IN_USE syscall.Errno = 0xC0350019 + ERROR_HV_INVALID_PROXIMITY_DOMAIN_INFO syscall.Errno = 0xC035001A + ERROR_HV_NO_DATA syscall.Errno = 0xC035001B + ERROR_HV_INACTIVE syscall.Errno = 0xC035001C + ERROR_HV_NO_RESOURCES syscall.Errno = 0xC035001D + ERROR_HV_FEATURE_UNAVAILABLE syscall.Errno = 0xC035001E + ERROR_HV_INSUFFICIENT_BUFFER syscall.Errno = 0xC0350033 + ERROR_HV_INSUFFICIENT_DEVICE_DOMAINS syscall.Errno = 0xC0350038 + ERROR_HV_CPUID_FEATURE_VALIDATION syscall.Errno = 0xC035003C + ERROR_HV_CPUID_XSAVE_FEATURE_VALIDATION syscall.Errno = 0xC035003D + ERROR_HV_PROCESSOR_STARTUP_TIMEOUT syscall.Errno = 0xC035003E + ERROR_HV_SMX_ENABLED syscall.Errno = 0xC035003F + ERROR_HV_INVALID_LP_INDEX syscall.Errno = 0xC0350041 + ERROR_HV_INVALID_REGISTER_VALUE syscall.Errno = 0xC0350050 + ERROR_HV_INVALID_VTL_STATE syscall.Errno = 0xC0350051 + ERROR_HV_NX_NOT_DETECTED syscall.Errno = 0xC0350055 + ERROR_HV_INVALID_DEVICE_ID syscall.Errno = 0xC0350057 + ERROR_HV_INVALID_DEVICE_STATE syscall.Errno = 0xC0350058 + ERROR_HV_PENDING_PAGE_REQUESTS syscall.Errno = 0x00350059 + ERROR_HV_PAGE_REQUEST_INVALID syscall.Errno = 0xC0350060 + ERROR_HV_INVALID_CPU_GROUP_ID syscall.Errno = 0xC035006F + ERROR_HV_INVALID_CPU_GROUP_STATE syscall.Errno = 0xC0350070 + ERROR_HV_OPERATION_FAILED syscall.Errno = 0xC0350071 + ERROR_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE syscall.Errno = 0xC0350072 + ERROR_HV_INSUFFICIENT_ROOT_MEMORY syscall.Errno = 0xC0350073 + ERROR_HV_NOT_PRESENT syscall.Errno = 0xC0351000 + ERROR_VID_DUPLICATE_HANDLER syscall.Errno = 0xC0370001 + ERROR_VID_TOO_MANY_HANDLERS syscall.Errno = 0xC0370002 + ERROR_VID_QUEUE_FULL syscall.Errno = 0xC0370003 + ERROR_VID_HANDLER_NOT_PRESENT syscall.Errno = 0xC0370004 + ERROR_VID_INVALID_OBJECT_NAME syscall.Errno = 0xC0370005 + ERROR_VID_PARTITION_NAME_TOO_LONG syscall.Errno = 0xC0370006 + ERROR_VID_MESSAGE_QUEUE_NAME_TOO_LONG syscall.Errno = 0xC0370007 + ERROR_VID_PARTITION_ALREADY_EXISTS syscall.Errno = 0xC0370008 + ERROR_VID_PARTITION_DOES_NOT_EXIST syscall.Errno = 0xC0370009 + ERROR_VID_PARTITION_NAME_NOT_FOUND syscall.Errno = 0xC037000A + ERROR_VID_MESSAGE_QUEUE_ALREADY_EXISTS syscall.Errno = 0xC037000B + ERROR_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT syscall.Errno = 0xC037000C + ERROR_VID_MB_STILL_REFERENCED syscall.Errno = 0xC037000D + ERROR_VID_CHILD_GPA_PAGE_SET_CORRUPTED syscall.Errno = 0xC037000E + ERROR_VID_INVALID_NUMA_SETTINGS syscall.Errno = 0xC037000F + ERROR_VID_INVALID_NUMA_NODE_INDEX syscall.Errno = 0xC0370010 + ERROR_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED syscall.Errno = 0xC0370011 + ERROR_VID_INVALID_MEMORY_BLOCK_HANDLE syscall.Errno = 0xC0370012 + ERROR_VID_PAGE_RANGE_OVERFLOW syscall.Errno = 0xC0370013 + ERROR_VID_INVALID_MESSAGE_QUEUE_HANDLE syscall.Errno = 0xC0370014 + ERROR_VID_INVALID_GPA_RANGE_HANDLE syscall.Errno = 0xC0370015 + ERROR_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE syscall.Errno = 0xC0370016 + ERROR_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED syscall.Errno = 0xC0370017 + ERROR_VID_INVALID_PPM_HANDLE syscall.Errno = 0xC0370018 + ERROR_VID_MBPS_ARE_LOCKED syscall.Errno = 0xC0370019 + ERROR_VID_MESSAGE_QUEUE_CLOSED syscall.Errno = 0xC037001A + ERROR_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED syscall.Errno = 0xC037001B + ERROR_VID_STOP_PENDING syscall.Errno = 0xC037001C + ERROR_VID_INVALID_PROCESSOR_STATE syscall.Errno = 0xC037001D + ERROR_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT syscall.Errno = 0xC037001E + ERROR_VID_KM_INTERFACE_ALREADY_INITIALIZED syscall.Errno = 0xC037001F + ERROR_VID_MB_PROPERTY_ALREADY_SET_RESET syscall.Errno = 0xC0370020 + ERROR_VID_MMIO_RANGE_DESTROYED syscall.Errno = 0xC0370021 + ERROR_VID_INVALID_CHILD_GPA_PAGE_SET syscall.Errno = 0xC0370022 + ERROR_VID_RESERVE_PAGE_SET_IS_BEING_USED syscall.Errno = 0xC0370023 + ERROR_VID_RESERVE_PAGE_SET_TOO_SMALL syscall.Errno = 0xC0370024 + ERROR_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE syscall.Errno = 0xC0370025 + ERROR_VID_MBP_COUNT_EXCEEDED_LIMIT syscall.Errno = 0xC0370026 + ERROR_VID_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370027 + ERROR_VID_SAVED_STATE_UNRECOGNIZED_ITEM syscall.Errno = 0xC0370028 + ERROR_VID_SAVED_STATE_INCOMPATIBLE syscall.Errno = 0xC0370029 + ERROR_VID_VTL_ACCESS_DENIED syscall.Errno = 0xC037002A + ERROR_VMCOMPUTE_TERMINATED_DURING_START syscall.Errno = 0xC0370100 + ERROR_VMCOMPUTE_IMAGE_MISMATCH syscall.Errno = 0xC0370101 + ERROR_VMCOMPUTE_HYPERV_NOT_INSTALLED syscall.Errno = 0xC0370102 + ERROR_VMCOMPUTE_OPERATION_PENDING syscall.Errno = 0xC0370103 + ERROR_VMCOMPUTE_TOO_MANY_NOTIFICATIONS syscall.Errno = 0xC0370104 + ERROR_VMCOMPUTE_INVALID_STATE syscall.Errno = 0xC0370105 + ERROR_VMCOMPUTE_UNEXPECTED_EXIT syscall.Errno = 0xC0370106 + ERROR_VMCOMPUTE_TERMINATED syscall.Errno = 0xC0370107 + ERROR_VMCOMPUTE_CONNECT_FAILED syscall.Errno = 0xC0370108 + ERROR_VMCOMPUTE_TIMEOUT syscall.Errno = 0xC0370109 + ERROR_VMCOMPUTE_CONNECTION_CLOSED syscall.Errno = 0xC037010A + ERROR_VMCOMPUTE_UNKNOWN_MESSAGE syscall.Errno = 0xC037010B + ERROR_VMCOMPUTE_UNSUPPORTED_PROTOCOL_VERSION syscall.Errno = 0xC037010C + ERROR_VMCOMPUTE_INVALID_JSON syscall.Errno = 0xC037010D + ERROR_VMCOMPUTE_SYSTEM_NOT_FOUND syscall.Errno = 0xC037010E + ERROR_VMCOMPUTE_SYSTEM_ALREADY_EXISTS syscall.Errno = 0xC037010F + ERROR_VMCOMPUTE_SYSTEM_ALREADY_STOPPED syscall.Errno = 0xC0370110 + ERROR_VMCOMPUTE_PROTOCOL_ERROR syscall.Errno = 0xC0370111 + ERROR_VMCOMPUTE_INVALID_LAYER syscall.Errno = 0xC0370112 + ERROR_VMCOMPUTE_WINDOWS_INSIDER_REQUIRED syscall.Errno = 0xC0370113 + HCS_E_TERMINATED_DURING_START Handle = 0x80370100 + HCS_E_IMAGE_MISMATCH Handle = 0x80370101 + HCS_E_HYPERV_NOT_INSTALLED Handle = 0x80370102 + HCS_E_INVALID_STATE Handle = 0x80370105 + HCS_E_UNEXPECTED_EXIT Handle = 0x80370106 + HCS_E_TERMINATED Handle = 0x80370107 + HCS_E_CONNECT_FAILED Handle = 0x80370108 + HCS_E_CONNECTION_TIMEOUT Handle = 0x80370109 + HCS_E_CONNECTION_CLOSED Handle = 0x8037010A + HCS_E_UNKNOWN_MESSAGE Handle = 0x8037010B + HCS_E_UNSUPPORTED_PROTOCOL_VERSION Handle = 0x8037010C + HCS_E_INVALID_JSON Handle = 0x8037010D + HCS_E_SYSTEM_NOT_FOUND Handle = 0x8037010E + HCS_E_SYSTEM_ALREADY_EXISTS Handle = 0x8037010F + HCS_E_SYSTEM_ALREADY_STOPPED Handle = 0x80370110 + HCS_E_PROTOCOL_ERROR Handle = 0x80370111 + HCS_E_INVALID_LAYER Handle = 0x80370112 + HCS_E_WINDOWS_INSIDER_REQUIRED Handle = 0x80370113 + HCS_E_SERVICE_NOT_AVAILABLE Handle = 0x80370114 + HCS_E_OPERATION_NOT_STARTED Handle = 0x80370115 + HCS_E_OPERATION_ALREADY_STARTED Handle = 0x80370116 + HCS_E_OPERATION_PENDING Handle = 0x80370117 + HCS_E_OPERATION_TIMEOUT Handle = 0x80370118 + HCS_E_OPERATION_SYSTEM_CALLBACK_ALREADY_SET Handle = 0x80370119 + HCS_E_OPERATION_RESULT_ALLOCATION_FAILED Handle = 0x8037011A + HCS_E_ACCESS_DENIED Handle = 0x8037011B + HCS_E_GUEST_CRITICAL_ERROR Handle = 0x8037011C + ERROR_VNET_VIRTUAL_SWITCH_NAME_NOT_FOUND syscall.Errno = 0xC0370200 + ERROR_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED syscall.Errno = 0x80370001 + WHV_E_UNKNOWN_CAPABILITY Handle = 0x80370300 + WHV_E_INSUFFICIENT_BUFFER Handle = 0x80370301 + WHV_E_UNKNOWN_PROPERTY Handle = 0x80370302 + WHV_E_UNSUPPORTED_HYPERVISOR_CONFIG Handle = 0x80370303 + WHV_E_INVALID_PARTITION_CONFIG Handle = 0x80370304 + WHV_E_GPA_RANGE_NOT_FOUND Handle = 0x80370305 + WHV_E_VP_ALREADY_EXISTS Handle = 0x80370306 + WHV_E_VP_DOES_NOT_EXIST Handle = 0x80370307 + WHV_E_INVALID_VP_STATE Handle = 0x80370308 + WHV_E_INVALID_VP_REGISTER_NAME Handle = 0x80370309 + ERROR_VSMB_SAVED_STATE_FILE_NOT_FOUND syscall.Errno = 0xC0370400 + ERROR_VSMB_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370401 + ERROR_VOLMGR_INCOMPLETE_REGENERATION syscall.Errno = 0x80380001 + ERROR_VOLMGR_INCOMPLETE_DISK_MIGRATION syscall.Errno = 0x80380002 + ERROR_VOLMGR_DATABASE_FULL syscall.Errno = 0xC0380001 + ERROR_VOLMGR_DISK_CONFIGURATION_CORRUPTED syscall.Errno = 0xC0380002 + ERROR_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC syscall.Errno = 0xC0380003 + ERROR_VOLMGR_PACK_CONFIG_UPDATE_FAILED syscall.Errno = 0xC0380004 + ERROR_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME syscall.Errno = 0xC0380005 + ERROR_VOLMGR_DISK_DUPLICATE syscall.Errno = 0xC0380006 + ERROR_VOLMGR_DISK_DYNAMIC syscall.Errno = 0xC0380007 + ERROR_VOLMGR_DISK_ID_INVALID syscall.Errno = 0xC0380008 + ERROR_VOLMGR_DISK_INVALID syscall.Errno = 0xC0380009 + ERROR_VOLMGR_DISK_LAST_VOTER syscall.Errno = 0xC038000A + ERROR_VOLMGR_DISK_LAYOUT_INVALID syscall.Errno = 0xC038000B + ERROR_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS syscall.Errno = 0xC038000C + ERROR_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED syscall.Errno = 0xC038000D + ERROR_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL syscall.Errno = 0xC038000E + ERROR_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS syscall.Errno = 0xC038000F + ERROR_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS syscall.Errno = 0xC0380010 + ERROR_VOLMGR_DISK_MISSING syscall.Errno = 0xC0380011 + ERROR_VOLMGR_DISK_NOT_EMPTY syscall.Errno = 0xC0380012 + ERROR_VOLMGR_DISK_NOT_ENOUGH_SPACE syscall.Errno = 0xC0380013 + ERROR_VOLMGR_DISK_REVECTORING_FAILED syscall.Errno = 0xC0380014 + ERROR_VOLMGR_DISK_SECTOR_SIZE_INVALID syscall.Errno = 0xC0380015 + ERROR_VOLMGR_DISK_SET_NOT_CONTAINED syscall.Errno = 0xC0380016 + ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS syscall.Errno = 0xC0380017 + ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES syscall.Errno = 0xC0380018 + ERROR_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED syscall.Errno = 0xC0380019 + ERROR_VOLMGR_EXTENT_ALREADY_USED syscall.Errno = 0xC038001A + ERROR_VOLMGR_EXTENT_NOT_CONTIGUOUS syscall.Errno = 0xC038001B + ERROR_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION syscall.Errno = 0xC038001C + ERROR_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED syscall.Errno = 0xC038001D + ERROR_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION syscall.Errno = 0xC038001E + ERROR_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH syscall.Errno = 0xC038001F + ERROR_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED syscall.Errno = 0xC0380020 + ERROR_VOLMGR_INTERLEAVE_LENGTH_INVALID syscall.Errno = 0xC0380021 + ERROR_VOLMGR_MAXIMUM_REGISTERED_USERS syscall.Errno = 0xC0380022 + ERROR_VOLMGR_MEMBER_IN_SYNC syscall.Errno = 0xC0380023 + ERROR_VOLMGR_MEMBER_INDEX_DUPLICATE syscall.Errno = 0xC0380024 + ERROR_VOLMGR_MEMBER_INDEX_INVALID syscall.Errno = 0xC0380025 + ERROR_VOLMGR_MEMBER_MISSING syscall.Errno = 0xC0380026 + ERROR_VOLMGR_MEMBER_NOT_DETACHED syscall.Errno = 0xC0380027 + ERROR_VOLMGR_MEMBER_REGENERATING syscall.Errno = 0xC0380028 + ERROR_VOLMGR_ALL_DISKS_FAILED syscall.Errno = 0xC0380029 + ERROR_VOLMGR_NO_REGISTERED_USERS syscall.Errno = 0xC038002A + ERROR_VOLMGR_NO_SUCH_USER syscall.Errno = 0xC038002B + ERROR_VOLMGR_NOTIFICATION_RESET syscall.Errno = 0xC038002C + ERROR_VOLMGR_NUMBER_OF_MEMBERS_INVALID syscall.Errno = 0xC038002D + ERROR_VOLMGR_NUMBER_OF_PLEXES_INVALID syscall.Errno = 0xC038002E + ERROR_VOLMGR_PACK_DUPLICATE syscall.Errno = 0xC038002F + ERROR_VOLMGR_PACK_ID_INVALID syscall.Errno = 0xC0380030 + ERROR_VOLMGR_PACK_INVALID syscall.Errno = 0xC0380031 + ERROR_VOLMGR_PACK_NAME_INVALID syscall.Errno = 0xC0380032 + ERROR_VOLMGR_PACK_OFFLINE syscall.Errno = 0xC0380033 + ERROR_VOLMGR_PACK_HAS_QUORUM syscall.Errno = 0xC0380034 + ERROR_VOLMGR_PACK_WITHOUT_QUORUM syscall.Errno = 0xC0380035 + ERROR_VOLMGR_PARTITION_STYLE_INVALID syscall.Errno = 0xC0380036 + ERROR_VOLMGR_PARTITION_UPDATE_FAILED syscall.Errno = 0xC0380037 + ERROR_VOLMGR_PLEX_IN_SYNC syscall.Errno = 0xC0380038 + ERROR_VOLMGR_PLEX_INDEX_DUPLICATE syscall.Errno = 0xC0380039 + ERROR_VOLMGR_PLEX_INDEX_INVALID syscall.Errno = 0xC038003A + ERROR_VOLMGR_PLEX_LAST_ACTIVE syscall.Errno = 0xC038003B + ERROR_VOLMGR_PLEX_MISSING syscall.Errno = 0xC038003C + ERROR_VOLMGR_PLEX_REGENERATING syscall.Errno = 0xC038003D + ERROR_VOLMGR_PLEX_TYPE_INVALID syscall.Errno = 0xC038003E + ERROR_VOLMGR_PLEX_NOT_RAID5 syscall.Errno = 0xC038003F + ERROR_VOLMGR_PLEX_NOT_SIMPLE syscall.Errno = 0xC0380040 + ERROR_VOLMGR_STRUCTURE_SIZE_INVALID syscall.Errno = 0xC0380041 + ERROR_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS syscall.Errno = 0xC0380042 + ERROR_VOLMGR_TRANSACTION_IN_PROGRESS syscall.Errno = 0xC0380043 + ERROR_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE syscall.Errno = 0xC0380044 + ERROR_VOLMGR_VOLUME_CONTAINS_MISSING_DISK syscall.Errno = 0xC0380045 + ERROR_VOLMGR_VOLUME_ID_INVALID syscall.Errno = 0xC0380046 + ERROR_VOLMGR_VOLUME_LENGTH_INVALID syscall.Errno = 0xC0380047 + ERROR_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE syscall.Errno = 0xC0380048 + ERROR_VOLMGR_VOLUME_NOT_MIRRORED syscall.Errno = 0xC0380049 + ERROR_VOLMGR_VOLUME_NOT_RETAINED syscall.Errno = 0xC038004A + ERROR_VOLMGR_VOLUME_OFFLINE syscall.Errno = 0xC038004B + ERROR_VOLMGR_VOLUME_RETAINED syscall.Errno = 0xC038004C + ERROR_VOLMGR_NUMBER_OF_EXTENTS_INVALID syscall.Errno = 0xC038004D + ERROR_VOLMGR_DIFFERENT_SECTOR_SIZE syscall.Errno = 0xC038004E + ERROR_VOLMGR_BAD_BOOT_DISK syscall.Errno = 0xC038004F + ERROR_VOLMGR_PACK_CONFIG_OFFLINE syscall.Errno = 0xC0380050 + ERROR_VOLMGR_PACK_CONFIG_ONLINE syscall.Errno = 0xC0380051 + ERROR_VOLMGR_NOT_PRIMARY_PACK syscall.Errno = 0xC0380052 + ERROR_VOLMGR_PACK_LOG_UPDATE_FAILED syscall.Errno = 0xC0380053 + ERROR_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID syscall.Errno = 0xC0380054 + ERROR_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID syscall.Errno = 0xC0380055 + ERROR_VOLMGR_VOLUME_MIRRORED syscall.Errno = 0xC0380056 + ERROR_VOLMGR_PLEX_NOT_SIMPLE_SPANNED syscall.Errno = 0xC0380057 + ERROR_VOLMGR_NO_VALID_LOG_COPIES syscall.Errno = 0xC0380058 + ERROR_VOLMGR_PRIMARY_PACK_PRESENT syscall.Errno = 0xC0380059 + ERROR_VOLMGR_NUMBER_OF_DISKS_INVALID syscall.Errno = 0xC038005A + ERROR_VOLMGR_MIRROR_NOT_SUPPORTED syscall.Errno = 0xC038005B + ERROR_VOLMGR_RAID5_NOT_SUPPORTED syscall.Errno = 0xC038005C + ERROR_BCD_NOT_ALL_ENTRIES_IMPORTED syscall.Errno = 0x80390001 + ERROR_BCD_TOO_MANY_ELEMENTS syscall.Errno = 0xC0390002 + ERROR_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED syscall.Errno = 0x80390003 + ERROR_VHD_DRIVE_FOOTER_MISSING syscall.Errno = 0xC03A0001 + ERROR_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0002 + ERROR_VHD_DRIVE_FOOTER_CORRUPT syscall.Errno = 0xC03A0003 + ERROR_VHD_FORMAT_UNKNOWN syscall.Errno = 0xC03A0004 + ERROR_VHD_FORMAT_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0005 + ERROR_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0006 + ERROR_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0007 + ERROR_VHD_SPARSE_HEADER_CORRUPT syscall.Errno = 0xC03A0008 + ERROR_VHD_BLOCK_ALLOCATION_FAILURE syscall.Errno = 0xC03A0009 + ERROR_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT syscall.Errno = 0xC03A000A + ERROR_VHD_INVALID_BLOCK_SIZE syscall.Errno = 0xC03A000B + ERROR_VHD_BITMAP_MISMATCH syscall.Errno = 0xC03A000C + ERROR_VHD_PARENT_VHD_NOT_FOUND syscall.Errno = 0xC03A000D + ERROR_VHD_CHILD_PARENT_ID_MISMATCH syscall.Errno = 0xC03A000E + ERROR_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH syscall.Errno = 0xC03A000F + ERROR_VHD_METADATA_READ_FAILURE syscall.Errno = 0xC03A0010 + ERROR_VHD_METADATA_WRITE_FAILURE syscall.Errno = 0xC03A0011 + ERROR_VHD_INVALID_SIZE syscall.Errno = 0xC03A0012 + ERROR_VHD_INVALID_FILE_SIZE syscall.Errno = 0xC03A0013 + ERROR_VIRTDISK_PROVIDER_NOT_FOUND syscall.Errno = 0xC03A0014 + ERROR_VIRTDISK_NOT_VIRTUAL_DISK syscall.Errno = 0xC03A0015 + ERROR_VHD_PARENT_VHD_ACCESS_DENIED syscall.Errno = 0xC03A0016 + ERROR_VHD_CHILD_PARENT_SIZE_MISMATCH syscall.Errno = 0xC03A0017 + ERROR_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED syscall.Errno = 0xC03A0018 + ERROR_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT syscall.Errno = 0xC03A0019 + ERROR_VIRTUAL_DISK_LIMITATION syscall.Errno = 0xC03A001A + ERROR_VHD_INVALID_TYPE syscall.Errno = 0xC03A001B + ERROR_VHD_INVALID_STATE syscall.Errno = 0xC03A001C + ERROR_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE syscall.Errno = 0xC03A001D + ERROR_VIRTDISK_DISK_ALREADY_OWNED syscall.Errno = 0xC03A001E + ERROR_VIRTDISK_DISK_ONLINE_AND_WRITABLE syscall.Errno = 0xC03A001F + ERROR_CTLOG_TRACKING_NOT_INITIALIZED syscall.Errno = 0xC03A0020 + ERROR_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE syscall.Errno = 0xC03A0021 + ERROR_CTLOG_VHD_CHANGED_OFFLINE syscall.Errno = 0xC03A0022 + ERROR_CTLOG_INVALID_TRACKING_STATE syscall.Errno = 0xC03A0023 + ERROR_CTLOG_INCONSISTENT_TRACKING_FILE syscall.Errno = 0xC03A0024 + ERROR_VHD_RESIZE_WOULD_TRUNCATE_DATA syscall.Errno = 0xC03A0025 + ERROR_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0026 + ERROR_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0027 + ERROR_VHD_METADATA_FULL syscall.Errno = 0xC03A0028 + ERROR_VHD_INVALID_CHANGE_TRACKING_ID syscall.Errno = 0xC03A0029 + ERROR_VHD_CHANGE_TRACKING_DISABLED syscall.Errno = 0xC03A002A + ERROR_VHD_MISSING_CHANGE_TRACKING_INFORMATION syscall.Errno = 0xC03A0030 + ERROR_QUERY_STORAGE_ERROR syscall.Errno = 0x803A0001 + HCN_E_NETWORK_NOT_FOUND Handle = 0x803B0001 + HCN_E_ENDPOINT_NOT_FOUND Handle = 0x803B0002 + HCN_E_LAYER_NOT_FOUND Handle = 0x803B0003 + HCN_E_SWITCH_NOT_FOUND Handle = 0x803B0004 + HCN_E_SUBNET_NOT_FOUND Handle = 0x803B0005 + HCN_E_ADAPTER_NOT_FOUND Handle = 0x803B0006 + HCN_E_PORT_NOT_FOUND Handle = 0x803B0007 + HCN_E_POLICY_NOT_FOUND Handle = 0x803B0008 + HCN_E_VFP_PORTSETTING_NOT_FOUND Handle = 0x803B0009 + HCN_E_INVALID_NETWORK Handle = 0x803B000A + HCN_E_INVALID_NETWORK_TYPE Handle = 0x803B000B + HCN_E_INVALID_ENDPOINT Handle = 0x803B000C + HCN_E_INVALID_POLICY Handle = 0x803B000D + HCN_E_INVALID_POLICY_TYPE Handle = 0x803B000E + HCN_E_INVALID_REMOTE_ENDPOINT_OPERATION Handle = 0x803B000F + HCN_E_NETWORK_ALREADY_EXISTS Handle = 0x803B0010 + HCN_E_LAYER_ALREADY_EXISTS Handle = 0x803B0011 + HCN_E_POLICY_ALREADY_EXISTS Handle = 0x803B0012 + HCN_E_PORT_ALREADY_EXISTS Handle = 0x803B0013 + HCN_E_ENDPOINT_ALREADY_ATTACHED Handle = 0x803B0014 + HCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0015 + HCN_E_MAPPING_NOT_SUPPORTED Handle = 0x803B0016 + HCN_E_DEGRADED_OPERATION Handle = 0x803B0017 + HCN_E_SHARED_SWITCH_MODIFICATION Handle = 0x803B0018 + HCN_E_GUID_CONVERSION_FAILURE Handle = 0x803B0019 + HCN_E_REGKEY_FAILURE Handle = 0x803B001A + HCN_E_INVALID_JSON Handle = 0x803B001B + HCN_E_INVALID_JSON_REFERENCE Handle = 0x803B001C + HCN_E_ENDPOINT_SHARING_DISABLED Handle = 0x803B001D + HCN_E_INVALID_IP Handle = 0x803B001E + HCN_E_SWITCH_EXTENSION_NOT_FOUND Handle = 0x803B001F + HCN_E_MANAGER_STOPPED Handle = 0x803B0020 + GCN_E_MODULE_NOT_FOUND Handle = 0x803B0021 + GCN_E_NO_REQUEST_HANDLERS Handle = 0x803B0022 + GCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0023 + GCN_E_RUNTIMEKEYS_FAILED Handle = 0x803B0024 + GCN_E_NETADAPTER_TIMEOUT Handle = 0x803B0025 + GCN_E_NETADAPTER_NOT_FOUND Handle = 0x803B0026 + GCN_E_NETCOMPARTMENT_NOT_FOUND Handle = 0x803B0027 + GCN_E_NETINTERFACE_NOT_FOUND Handle = 0x803B0028 + GCN_E_DEFAULTNAMESPACE_EXISTS Handle = 0x803B0029 + HCN_E_ICS_DISABLED Handle = 0x803B002A + HCN_E_ENDPOINT_NAMESPACE_ALREADY_EXISTS Handle = 0x803B002B + HCN_E_ENTITY_HAS_REFERENCES Handle = 0x803B002C + HCN_E_INVALID_INTERNAL_PORT Handle = 0x803B002D + HCN_E_NAMESPACE_ATTACH_FAILED Handle = 0x803B002E + HCN_E_ADDR_INVALID_OR_RESERVED Handle = 0x803B002F + SDIAG_E_CANCELLED syscall.Errno = 0x803C0100 + SDIAG_E_SCRIPT syscall.Errno = 0x803C0101 + SDIAG_E_POWERSHELL syscall.Errno = 0x803C0102 + SDIAG_E_MANAGEDHOST syscall.Errno = 0x803C0103 + SDIAG_E_NOVERIFIER syscall.Errno = 0x803C0104 + SDIAG_S_CANNOTRUN syscall.Errno = 0x003C0105 + SDIAG_E_DISABLED syscall.Errno = 0x803C0106 + SDIAG_E_TRUST syscall.Errno = 0x803C0107 + SDIAG_E_CANNOTRUN syscall.Errno = 0x803C0108 + SDIAG_E_VERSION syscall.Errno = 0x803C0109 + SDIAG_E_RESOURCE syscall.Errno = 0x803C010A + SDIAG_E_ROOTCAUSE syscall.Errno = 0x803C010B + WPN_E_CHANNEL_CLOSED Handle = 0x803E0100 + WPN_E_CHANNEL_REQUEST_NOT_COMPLETE Handle = 0x803E0101 + WPN_E_INVALID_APP Handle = 0x803E0102 + WPN_E_OUTSTANDING_CHANNEL_REQUEST Handle = 0x803E0103 + WPN_E_DUPLICATE_CHANNEL Handle = 0x803E0104 + WPN_E_PLATFORM_UNAVAILABLE Handle = 0x803E0105 + WPN_E_NOTIFICATION_POSTED Handle = 0x803E0106 + WPN_E_NOTIFICATION_HIDDEN Handle = 0x803E0107 + WPN_E_NOTIFICATION_NOT_POSTED Handle = 0x803E0108 + WPN_E_CLOUD_DISABLED Handle = 0x803E0109 + WPN_E_CLOUD_INCAPABLE Handle = 0x803E0110 + WPN_E_CLOUD_AUTH_UNAVAILABLE Handle = 0x803E011A + WPN_E_CLOUD_SERVICE_UNAVAILABLE Handle = 0x803E011B + WPN_E_FAILED_LOCK_SCREEN_UPDATE_INTIALIZATION Handle = 0x803E011C + WPN_E_NOTIFICATION_DISABLED Handle = 0x803E0111 + WPN_E_NOTIFICATION_INCAPABLE Handle = 0x803E0112 + WPN_E_INTERNET_INCAPABLE Handle = 0x803E0113 + WPN_E_NOTIFICATION_TYPE_DISABLED Handle = 0x803E0114 + WPN_E_NOTIFICATION_SIZE Handle = 0x803E0115 + WPN_E_TAG_SIZE Handle = 0x803E0116 + WPN_E_ACCESS_DENIED Handle = 0x803E0117 + WPN_E_DUPLICATE_REGISTRATION Handle = 0x803E0118 + WPN_E_PUSH_NOTIFICATION_INCAPABLE Handle = 0x803E0119 + WPN_E_DEV_ID_SIZE Handle = 0x803E0120 + WPN_E_TAG_ALPHANUMERIC Handle = 0x803E012A + WPN_E_INVALID_HTTP_STATUS_CODE Handle = 0x803E012B + WPN_E_OUT_OF_SESSION Handle = 0x803E0200 + WPN_E_POWER_SAVE Handle = 0x803E0201 + WPN_E_IMAGE_NOT_FOUND_IN_CACHE Handle = 0x803E0202 + WPN_E_ALL_URL_NOT_COMPLETED Handle = 0x803E0203 + WPN_E_INVALID_CLOUD_IMAGE Handle = 0x803E0204 + WPN_E_NOTIFICATION_ID_MATCHED Handle = 0x803E0205 + WPN_E_CALLBACK_ALREADY_REGISTERED Handle = 0x803E0206 + WPN_E_TOAST_NOTIFICATION_DROPPED Handle = 0x803E0207 + WPN_E_STORAGE_LOCKED Handle = 0x803E0208 + WPN_E_GROUP_SIZE Handle = 0x803E0209 + WPN_E_GROUP_ALPHANUMERIC Handle = 0x803E020A + WPN_E_CLOUD_DISABLED_FOR_APP Handle = 0x803E020B + E_MBN_CONTEXT_NOT_ACTIVATED Handle = 0x80548201 + E_MBN_BAD_SIM Handle = 0x80548202 + E_MBN_DATA_CLASS_NOT_AVAILABLE Handle = 0x80548203 + E_MBN_INVALID_ACCESS_STRING Handle = 0x80548204 + E_MBN_MAX_ACTIVATED_CONTEXTS Handle = 0x80548205 + E_MBN_PACKET_SVC_DETACHED Handle = 0x80548206 + E_MBN_PROVIDER_NOT_VISIBLE Handle = 0x80548207 + E_MBN_RADIO_POWER_OFF Handle = 0x80548208 + E_MBN_SERVICE_NOT_ACTIVATED Handle = 0x80548209 + E_MBN_SIM_NOT_INSERTED Handle = 0x8054820A + E_MBN_VOICE_CALL_IN_PROGRESS Handle = 0x8054820B + E_MBN_INVALID_CACHE Handle = 0x8054820C + E_MBN_NOT_REGISTERED Handle = 0x8054820D + E_MBN_PROVIDERS_NOT_FOUND Handle = 0x8054820E + E_MBN_PIN_NOT_SUPPORTED Handle = 0x8054820F + E_MBN_PIN_REQUIRED Handle = 0x80548210 + E_MBN_PIN_DISABLED Handle = 0x80548211 + E_MBN_FAILURE Handle = 0x80548212 + E_MBN_INVALID_PROFILE Handle = 0x80548218 + E_MBN_DEFAULT_PROFILE_EXIST Handle = 0x80548219 + E_MBN_SMS_ENCODING_NOT_SUPPORTED Handle = 0x80548220 + E_MBN_SMS_FILTER_NOT_SUPPORTED Handle = 0x80548221 + E_MBN_SMS_INVALID_MEMORY_INDEX Handle = 0x80548222 + E_MBN_SMS_LANG_NOT_SUPPORTED Handle = 0x80548223 + E_MBN_SMS_MEMORY_FAILURE Handle = 0x80548224 + E_MBN_SMS_NETWORK_TIMEOUT Handle = 0x80548225 + E_MBN_SMS_UNKNOWN_SMSC_ADDRESS Handle = 0x80548226 + E_MBN_SMS_FORMAT_NOT_SUPPORTED Handle = 0x80548227 + E_MBN_SMS_OPERATION_NOT_ALLOWED Handle = 0x80548228 + E_MBN_SMS_MEMORY_FULL Handle = 0x80548229 + PEER_E_IPV6_NOT_INSTALLED Handle = 0x80630001 + PEER_E_NOT_INITIALIZED Handle = 0x80630002 + PEER_E_CANNOT_START_SERVICE Handle = 0x80630003 + PEER_E_NOT_LICENSED Handle = 0x80630004 + PEER_E_INVALID_GRAPH Handle = 0x80630010 + PEER_E_DBNAME_CHANGED Handle = 0x80630011 + PEER_E_DUPLICATE_GRAPH Handle = 0x80630012 + PEER_E_GRAPH_NOT_READY Handle = 0x80630013 + PEER_E_GRAPH_SHUTTING_DOWN Handle = 0x80630014 + PEER_E_GRAPH_IN_USE Handle = 0x80630015 + PEER_E_INVALID_DATABASE Handle = 0x80630016 + PEER_E_TOO_MANY_ATTRIBUTES Handle = 0x80630017 + PEER_E_CONNECTION_NOT_FOUND Handle = 0x80630103 + PEER_E_CONNECT_SELF Handle = 0x80630106 + PEER_E_ALREADY_LISTENING Handle = 0x80630107 + PEER_E_NODE_NOT_FOUND Handle = 0x80630108 + PEER_E_CONNECTION_FAILED Handle = 0x80630109 + PEER_E_CONNECTION_NOT_AUTHENTICATED Handle = 0x8063010A + PEER_E_CONNECTION_REFUSED Handle = 0x8063010B + PEER_E_CLASSIFIER_TOO_LONG Handle = 0x80630201 + PEER_E_TOO_MANY_IDENTITIES Handle = 0x80630202 + PEER_E_NO_KEY_ACCESS Handle = 0x80630203 + PEER_E_GROUPS_EXIST Handle = 0x80630204 + PEER_E_RECORD_NOT_FOUND Handle = 0x80630301 + PEER_E_DATABASE_ACCESSDENIED Handle = 0x80630302 + PEER_E_DBINITIALIZATION_FAILED Handle = 0x80630303 + PEER_E_MAX_RECORD_SIZE_EXCEEDED Handle = 0x80630304 + PEER_E_DATABASE_ALREADY_PRESENT Handle = 0x80630305 + PEER_E_DATABASE_NOT_PRESENT Handle = 0x80630306 + PEER_E_IDENTITY_NOT_FOUND Handle = 0x80630401 + PEER_E_EVENT_HANDLE_NOT_FOUND Handle = 0x80630501 + PEER_E_INVALID_SEARCH Handle = 0x80630601 + PEER_E_INVALID_ATTRIBUTES Handle = 0x80630602 + PEER_E_INVITATION_NOT_TRUSTED Handle = 0x80630701 + PEER_E_CHAIN_TOO_LONG Handle = 0x80630703 + PEER_E_INVALID_TIME_PERIOD Handle = 0x80630705 + PEER_E_CIRCULAR_CHAIN_DETECTED Handle = 0x80630706 + PEER_E_CERT_STORE_CORRUPTED Handle = 0x80630801 + PEER_E_NO_CLOUD Handle = 0x80631001 + PEER_E_CLOUD_NAME_AMBIGUOUS Handle = 0x80631005 + PEER_E_INVALID_RECORD Handle = 0x80632010 + PEER_E_NOT_AUTHORIZED Handle = 0x80632020 + PEER_E_PASSWORD_DOES_NOT_MEET_POLICY Handle = 0x80632021 + PEER_E_DEFERRED_VALIDATION Handle = 0x80632030 + PEER_E_INVALID_GROUP_PROPERTIES Handle = 0x80632040 + PEER_E_INVALID_PEER_NAME Handle = 0x80632050 + PEER_E_INVALID_CLASSIFIER Handle = 0x80632060 + PEER_E_INVALID_FRIENDLY_NAME Handle = 0x80632070 + PEER_E_INVALID_ROLE_PROPERTY Handle = 0x80632071 + PEER_E_INVALID_CLASSIFIER_PROPERTY Handle = 0x80632072 + PEER_E_INVALID_RECORD_EXPIRATION Handle = 0x80632080 + PEER_E_INVALID_CREDENTIAL_INFO Handle = 0x80632081 + PEER_E_INVALID_CREDENTIAL Handle = 0x80632082 + PEER_E_INVALID_RECORD_SIZE Handle = 0x80632083 + PEER_E_UNSUPPORTED_VERSION Handle = 0x80632090 + PEER_E_GROUP_NOT_READY Handle = 0x80632091 + PEER_E_GROUP_IN_USE Handle = 0x80632092 + PEER_E_INVALID_GROUP Handle = 0x80632093 + PEER_E_NO_MEMBERS_FOUND Handle = 0x80632094 + PEER_E_NO_MEMBER_CONNECTIONS Handle = 0x80632095 + PEER_E_UNABLE_TO_LISTEN Handle = 0x80632096 + PEER_E_IDENTITY_DELETED Handle = 0x806320A0 + PEER_E_SERVICE_NOT_AVAILABLE Handle = 0x806320A1 + PEER_E_CONTACT_NOT_FOUND Handle = 0x80636001 + PEER_S_GRAPH_DATA_CREATED Handle = 0x00630001 + PEER_S_NO_EVENT_DATA Handle = 0x00630002 + PEER_S_ALREADY_CONNECTED Handle = 0x00632000 + PEER_S_SUBSCRIPTION_EXISTS Handle = 0x00636000 + PEER_S_NO_CONNECTIVITY Handle = 0x00630005 + PEER_S_ALREADY_A_MEMBER Handle = 0x00630006 + PEER_E_CANNOT_CONVERT_PEER_NAME Handle = 0x80634001 + PEER_E_INVALID_PEER_HOST_NAME Handle = 0x80634002 + PEER_E_NO_MORE Handle = 0x80634003 + PEER_E_PNRP_DUPLICATE_PEER_NAME Handle = 0x80634005 + PEER_E_INVITE_CANCELLED Handle = 0x80637000 + PEER_E_INVITE_RESPONSE_NOT_AVAILABLE Handle = 0x80637001 + PEER_E_NOT_SIGNED_IN Handle = 0x80637003 + PEER_E_PRIVACY_DECLINED Handle = 0x80637004 + PEER_E_TIMEOUT Handle = 0x80637005 + PEER_E_INVALID_ADDRESS Handle = 0x80637007 + PEER_E_FW_EXCEPTION_DISABLED Handle = 0x80637008 + PEER_E_FW_BLOCKED_BY_POLICY Handle = 0x80637009 + PEER_E_FW_BLOCKED_BY_SHIELDS_UP Handle = 0x8063700A + PEER_E_FW_DECLINED Handle = 0x8063700B + UI_E_CREATE_FAILED Handle = 0x802A0001 + UI_E_SHUTDOWN_CALLED Handle = 0x802A0002 + UI_E_ILLEGAL_REENTRANCY Handle = 0x802A0003 + UI_E_OBJECT_SEALED Handle = 0x802A0004 + UI_E_VALUE_NOT_SET Handle = 0x802A0005 + UI_E_VALUE_NOT_DETERMINED Handle = 0x802A0006 + UI_E_INVALID_OUTPUT Handle = 0x802A0007 + UI_E_BOOLEAN_EXPECTED Handle = 0x802A0008 + UI_E_DIFFERENT_OWNER Handle = 0x802A0009 + UI_E_AMBIGUOUS_MATCH Handle = 0x802A000A + UI_E_FP_OVERFLOW Handle = 0x802A000B + UI_E_WRONG_THREAD Handle = 0x802A000C + UI_E_STORYBOARD_ACTIVE Handle = 0x802A0101 + UI_E_STORYBOARD_NOT_PLAYING Handle = 0x802A0102 + UI_E_START_KEYFRAME_AFTER_END Handle = 0x802A0103 + UI_E_END_KEYFRAME_NOT_DETERMINED Handle = 0x802A0104 + UI_E_LOOPS_OVERLAP Handle = 0x802A0105 + UI_E_TRANSITION_ALREADY_USED Handle = 0x802A0106 + UI_E_TRANSITION_NOT_IN_STORYBOARD Handle = 0x802A0107 + UI_E_TRANSITION_ECLIPSED Handle = 0x802A0108 + UI_E_TIME_BEFORE_LAST_UPDATE Handle = 0x802A0109 + UI_E_TIMER_CLIENT_ALREADY_CONNECTED Handle = 0x802A010A + UI_E_INVALID_DIMENSION Handle = 0x802A010B + UI_E_PRIMITIVE_OUT_OF_BOUNDS Handle = 0x802A010C + UI_E_WINDOW_CLOSED Handle = 0x802A0201 + E_BLUETOOTH_ATT_INVALID_HANDLE Handle = 0x80650001 + E_BLUETOOTH_ATT_READ_NOT_PERMITTED Handle = 0x80650002 + E_BLUETOOTH_ATT_WRITE_NOT_PERMITTED Handle = 0x80650003 + E_BLUETOOTH_ATT_INVALID_PDU Handle = 0x80650004 + E_BLUETOOTH_ATT_INSUFFICIENT_AUTHENTICATION Handle = 0x80650005 + E_BLUETOOTH_ATT_REQUEST_NOT_SUPPORTED Handle = 0x80650006 + E_BLUETOOTH_ATT_INVALID_OFFSET Handle = 0x80650007 + E_BLUETOOTH_ATT_INSUFFICIENT_AUTHORIZATION Handle = 0x80650008 + E_BLUETOOTH_ATT_PREPARE_QUEUE_FULL Handle = 0x80650009 + E_BLUETOOTH_ATT_ATTRIBUTE_NOT_FOUND Handle = 0x8065000A + E_BLUETOOTH_ATT_ATTRIBUTE_NOT_LONG Handle = 0x8065000B + E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE Handle = 0x8065000C + E_BLUETOOTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH Handle = 0x8065000D + E_BLUETOOTH_ATT_UNLIKELY Handle = 0x8065000E + E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION Handle = 0x8065000F + E_BLUETOOTH_ATT_UNSUPPORTED_GROUP_TYPE Handle = 0x80650010 + E_BLUETOOTH_ATT_INSUFFICIENT_RESOURCES Handle = 0x80650011 + E_BLUETOOTH_ATT_UNKNOWN_ERROR Handle = 0x80651000 + E_AUDIO_ENGINE_NODE_NOT_FOUND Handle = 0x80660001 + E_HDAUDIO_EMPTY_CONNECTION_LIST Handle = 0x80660002 + E_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED Handle = 0x80660003 + E_HDAUDIO_NO_LOGICAL_DEVICES_CREATED Handle = 0x80660004 + E_HDAUDIO_NULL_LINKED_LIST_ENTRY Handle = 0x80660005 + STATEREPOSITORY_E_CONCURRENCY_LOCKING_FAILURE Handle = 0x80670001 + STATEREPOSITORY_E_STATEMENT_INPROGRESS Handle = 0x80670002 + STATEREPOSITORY_E_CONFIGURATION_INVALID Handle = 0x80670003 + STATEREPOSITORY_E_UNKNOWN_SCHEMA_VERSION Handle = 0x80670004 + STATEREPOSITORY_ERROR_DICTIONARY_CORRUPTED Handle = 0x80670005 + STATEREPOSITORY_E_BLOCKED Handle = 0x80670006 + STATEREPOSITORY_E_BUSY_RETRY Handle = 0x80670007 + STATEREPOSITORY_E_BUSY_RECOVERY_RETRY Handle = 0x80670008 + STATEREPOSITORY_E_LOCKED_RETRY Handle = 0x80670009 + STATEREPOSITORY_E_LOCKED_SHAREDCACHE_RETRY Handle = 0x8067000A + STATEREPOSITORY_E_TRANSACTION_REQUIRED Handle = 0x8067000B + STATEREPOSITORY_E_BUSY_TIMEOUT_EXCEEDED Handle = 0x8067000C + STATEREPOSITORY_E_BUSY_RECOVERY_TIMEOUT_EXCEEDED Handle = 0x8067000D + STATEREPOSITORY_E_LOCKED_TIMEOUT_EXCEEDED Handle = 0x8067000E + STATEREPOSITORY_E_LOCKED_SHAREDCACHE_TIMEOUT_EXCEEDED Handle = 0x8067000F + STATEREPOSITORY_E_SERVICE_STOP_IN_PROGRESS Handle = 0x80670010 + STATEREPOSTORY_E_NESTED_TRANSACTION_NOT_SUPPORTED Handle = 0x80670011 + STATEREPOSITORY_ERROR_CACHE_CORRUPTED Handle = 0x80670012 + STATEREPOSITORY_TRANSACTION_CALLER_ID_CHANGED Handle = 0x00670013 + STATEREPOSITORY_TRANSACTION_IN_PROGRESS Handle = 0x00670014 + ERROR_SPACES_POOL_WAS_DELETED Handle = 0x00E70001 + ERROR_SPACES_FAULT_DOMAIN_TYPE_INVALID Handle = 0x80E70001 + ERROR_SPACES_INTERNAL_ERROR Handle = 0x80E70002 + ERROR_SPACES_RESILIENCY_TYPE_INVALID Handle = 0x80E70003 + ERROR_SPACES_DRIVE_SECTOR_SIZE_INVALID Handle = 0x80E70004 + ERROR_SPACES_DRIVE_REDUNDANCY_INVALID Handle = 0x80E70006 + ERROR_SPACES_NUMBER_OF_DATA_COPIES_INVALID Handle = 0x80E70007 + ERROR_SPACES_PARITY_LAYOUT_INVALID Handle = 0x80E70008 + ERROR_SPACES_INTERLEAVE_LENGTH_INVALID Handle = 0x80E70009 + ERROR_SPACES_NUMBER_OF_COLUMNS_INVALID Handle = 0x80E7000A + ERROR_SPACES_NOT_ENOUGH_DRIVES Handle = 0x80E7000B + ERROR_SPACES_EXTENDED_ERROR Handle = 0x80E7000C + ERROR_SPACES_PROVISIONING_TYPE_INVALID Handle = 0x80E7000D + ERROR_SPACES_ALLOCATION_SIZE_INVALID Handle = 0x80E7000E + ERROR_SPACES_ENCLOSURE_AWARE_INVALID Handle = 0x80E7000F + ERROR_SPACES_WRITE_CACHE_SIZE_INVALID Handle = 0x80E70010 + ERROR_SPACES_NUMBER_OF_GROUPS_INVALID Handle = 0x80E70011 + ERROR_SPACES_DRIVE_OPERATIONAL_STATE_INVALID Handle = 0x80E70012 + ERROR_SPACES_ENTRY_INCOMPLETE Handle = 0x80E70013 + ERROR_SPACES_ENTRY_INVALID Handle = 0x80E70014 + ERROR_VOLSNAP_BOOTFILE_NOT_VALID Handle = 0x80820001 + ERROR_VOLSNAP_ACTIVATION_TIMEOUT Handle = 0x80820002 + ERROR_TIERING_NOT_SUPPORTED_ON_VOLUME Handle = 0x80830001 + ERROR_TIERING_VOLUME_DISMOUNT_IN_PROGRESS Handle = 0x80830002 + ERROR_TIERING_STORAGE_TIER_NOT_FOUND Handle = 0x80830003 + ERROR_TIERING_INVALID_FILE_ID Handle = 0x80830004 + ERROR_TIERING_WRONG_CLUSTER_NODE Handle = 0x80830005 + ERROR_TIERING_ALREADY_PROCESSING Handle = 0x80830006 + ERROR_TIERING_CANNOT_PIN_OBJECT Handle = 0x80830007 + ERROR_TIERING_FILE_IS_NOT_PINNED Handle = 0x80830008 + ERROR_NOT_A_TIERED_VOLUME Handle = 0x80830009 + ERROR_ATTRIBUTE_NOT_PRESENT Handle = 0x8083000A + ERROR_SECCORE_INVALID_COMMAND Handle = 0xC0E80000 + ERROR_NO_APPLICABLE_APP_LICENSES_FOUND Handle = 0xC0EA0001 + ERROR_CLIP_LICENSE_NOT_FOUND Handle = 0xC0EA0002 + ERROR_CLIP_DEVICE_LICENSE_MISSING Handle = 0xC0EA0003 + ERROR_CLIP_LICENSE_INVALID_SIGNATURE Handle = 0xC0EA0004 + ERROR_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID Handle = 0xC0EA0005 + ERROR_CLIP_LICENSE_EXPIRED Handle = 0xC0EA0006 + ERROR_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE Handle = 0xC0EA0007 + ERROR_CLIP_LICENSE_NOT_SIGNED Handle = 0xC0EA0008 + ERROR_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE Handle = 0xC0EA0009 + ERROR_CLIP_LICENSE_DEVICE_ID_MISMATCH Handle = 0xC0EA000A + DXGI_STATUS_OCCLUDED Handle = 0x087A0001 + DXGI_STATUS_CLIPPED Handle = 0x087A0002 + DXGI_STATUS_NO_REDIRECTION Handle = 0x087A0004 + DXGI_STATUS_NO_DESKTOP_ACCESS Handle = 0x087A0005 + DXGI_STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x087A0006 + DXGI_STATUS_MODE_CHANGED Handle = 0x087A0007 + DXGI_STATUS_MODE_CHANGE_IN_PROGRESS Handle = 0x087A0008 + DXGI_ERROR_INVALID_CALL Handle = 0x887A0001 + DXGI_ERROR_NOT_FOUND Handle = 0x887A0002 + DXGI_ERROR_MORE_DATA Handle = 0x887A0003 + DXGI_ERROR_UNSUPPORTED Handle = 0x887A0004 + DXGI_ERROR_DEVICE_REMOVED Handle = 0x887A0005 + DXGI_ERROR_DEVICE_HUNG Handle = 0x887A0006 + DXGI_ERROR_DEVICE_RESET Handle = 0x887A0007 + DXGI_ERROR_WAS_STILL_DRAWING Handle = 0x887A000A + DXGI_ERROR_FRAME_STATISTICS_DISJOINT Handle = 0x887A000B + DXGI_ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x887A000C + DXGI_ERROR_DRIVER_INTERNAL_ERROR Handle = 0x887A0020 + DXGI_ERROR_NONEXCLUSIVE Handle = 0x887A0021 + DXGI_ERROR_NOT_CURRENTLY_AVAILABLE Handle = 0x887A0022 + DXGI_ERROR_REMOTE_CLIENT_DISCONNECTED Handle = 0x887A0023 + DXGI_ERROR_REMOTE_OUTOFMEMORY Handle = 0x887A0024 + DXGI_ERROR_ACCESS_LOST Handle = 0x887A0026 + DXGI_ERROR_WAIT_TIMEOUT Handle = 0x887A0027 + DXGI_ERROR_SESSION_DISCONNECTED Handle = 0x887A0028 + DXGI_ERROR_RESTRICT_TO_OUTPUT_STALE Handle = 0x887A0029 + DXGI_ERROR_CANNOT_PROTECT_CONTENT Handle = 0x887A002A + DXGI_ERROR_ACCESS_DENIED Handle = 0x887A002B + DXGI_ERROR_NAME_ALREADY_EXISTS Handle = 0x887A002C + DXGI_ERROR_SDK_COMPONENT_MISSING Handle = 0x887A002D + DXGI_ERROR_NOT_CURRENT Handle = 0x887A002E + DXGI_ERROR_HW_PROTECTION_OUTOFMEMORY Handle = 0x887A0030 + DXGI_ERROR_DYNAMIC_CODE_POLICY_VIOLATION Handle = 0x887A0031 + DXGI_ERROR_NON_COMPOSITED_UI Handle = 0x887A0032 + DXGI_STATUS_UNOCCLUDED Handle = 0x087A0009 + DXGI_STATUS_DDA_WAS_STILL_DRAWING Handle = 0x087A000A + DXGI_ERROR_MODE_CHANGE_IN_PROGRESS Handle = 0x887A0025 + DXGI_STATUS_PRESENT_REQUIRED Handle = 0x087A002F + DXGI_ERROR_CACHE_CORRUPT Handle = 0x887A0033 + DXGI_ERROR_CACHE_FULL Handle = 0x887A0034 + DXGI_ERROR_CACHE_HASH_COLLISION Handle = 0x887A0035 + DXGI_ERROR_ALREADY_EXISTS Handle = 0x887A0036 + DXGI_DDI_ERR_WASSTILLDRAWING Handle = 0x887B0001 + DXGI_DDI_ERR_UNSUPPORTED Handle = 0x887B0002 + DXGI_DDI_ERR_NONEXCLUSIVE Handle = 0x887B0003 + D3D10_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x88790001 + D3D10_ERROR_FILE_NOT_FOUND Handle = 0x88790002 + D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x887C0001 + D3D11_ERROR_FILE_NOT_FOUND Handle = 0x887C0002 + D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS Handle = 0x887C0003 + D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD Handle = 0x887C0004 + D3D12_ERROR_ADAPTER_NOT_FOUND Handle = 0x887E0001 + D3D12_ERROR_DRIVER_VERSION_MISMATCH Handle = 0x887E0002 + D2DERR_WRONG_STATE Handle = 0x88990001 + D2DERR_NOT_INITIALIZED Handle = 0x88990002 + D2DERR_UNSUPPORTED_OPERATION Handle = 0x88990003 + D2DERR_SCANNER_FAILED Handle = 0x88990004 + D2DERR_SCREEN_ACCESS_DENIED Handle = 0x88990005 + D2DERR_DISPLAY_STATE_INVALID Handle = 0x88990006 + D2DERR_ZERO_VECTOR Handle = 0x88990007 + D2DERR_INTERNAL_ERROR Handle = 0x88990008 + D2DERR_DISPLAY_FORMAT_NOT_SUPPORTED Handle = 0x88990009 + D2DERR_INVALID_CALL Handle = 0x8899000A + D2DERR_NO_HARDWARE_DEVICE Handle = 0x8899000B + D2DERR_RECREATE_TARGET Handle = 0x8899000C + D2DERR_TOO_MANY_SHADER_ELEMENTS Handle = 0x8899000D + D2DERR_SHADER_COMPILE_FAILED Handle = 0x8899000E + D2DERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8899000F + D2DERR_UNSUPPORTED_VERSION Handle = 0x88990010 + D2DERR_BAD_NUMBER Handle = 0x88990011 + D2DERR_WRONG_FACTORY Handle = 0x88990012 + D2DERR_LAYER_ALREADY_IN_USE Handle = 0x88990013 + D2DERR_POP_CALL_DID_NOT_MATCH_PUSH Handle = 0x88990014 + D2DERR_WRONG_RESOURCE_DOMAIN Handle = 0x88990015 + D2DERR_PUSH_POP_UNBALANCED Handle = 0x88990016 + D2DERR_RENDER_TARGET_HAS_LAYER_OR_CLIPRECT Handle = 0x88990017 + D2DERR_INCOMPATIBLE_BRUSH_TYPES Handle = 0x88990018 + D2DERR_WIN32_ERROR Handle = 0x88990019 + D2DERR_TARGET_NOT_GDI_COMPATIBLE Handle = 0x8899001A + D2DERR_TEXT_EFFECT_IS_WRONG_TYPE Handle = 0x8899001B + D2DERR_TEXT_RENDERER_NOT_RELEASED Handle = 0x8899001C + D2DERR_EXCEEDS_MAX_BITMAP_SIZE Handle = 0x8899001D + D2DERR_INVALID_GRAPH_CONFIGURATION Handle = 0x8899001E + D2DERR_INVALID_INTERNAL_GRAPH_CONFIGURATION Handle = 0x8899001F + D2DERR_CYCLIC_GRAPH Handle = 0x88990020 + D2DERR_BITMAP_CANNOT_DRAW Handle = 0x88990021 + D2DERR_OUTSTANDING_BITMAP_REFERENCES Handle = 0x88990022 + D2DERR_ORIGINAL_TARGET_NOT_BOUND Handle = 0x88990023 + D2DERR_INVALID_TARGET Handle = 0x88990024 + D2DERR_BITMAP_BOUND_AS_TARGET Handle = 0x88990025 + D2DERR_INSUFFICIENT_DEVICE_CAPABILITIES Handle = 0x88990026 + D2DERR_INTERMEDIATE_TOO_LARGE Handle = 0x88990027 + D2DERR_EFFECT_IS_NOT_REGISTERED Handle = 0x88990028 + D2DERR_INVALID_PROPERTY Handle = 0x88990029 + D2DERR_NO_SUBPROPERTIES Handle = 0x8899002A + D2DERR_PRINT_JOB_CLOSED Handle = 0x8899002B + D2DERR_PRINT_FORMAT_NOT_SUPPORTED Handle = 0x8899002C + D2DERR_TOO_MANY_TRANSFORM_INPUTS Handle = 0x8899002D + D2DERR_INVALID_GLYPH_IMAGE Handle = 0x8899002E + DWRITE_E_FILEFORMAT Handle = 0x88985000 + DWRITE_E_UNEXPECTED Handle = 0x88985001 + DWRITE_E_NOFONT Handle = 0x88985002 + DWRITE_E_FILENOTFOUND Handle = 0x88985003 + DWRITE_E_FILEACCESS Handle = 0x88985004 + DWRITE_E_FONTCOLLECTIONOBSOLETE Handle = 0x88985005 + DWRITE_E_ALREADYREGISTERED Handle = 0x88985006 + DWRITE_E_CACHEFORMAT Handle = 0x88985007 + DWRITE_E_CACHEVERSION Handle = 0x88985008 + DWRITE_E_UNSUPPORTEDOPERATION Handle = 0x88985009 + DWRITE_E_TEXTRENDERERINCOMPATIBLE Handle = 0x8898500A + DWRITE_E_FLOWDIRECTIONCONFLICTS Handle = 0x8898500B + DWRITE_E_NOCOLOR Handle = 0x8898500C + DWRITE_E_REMOTEFONT Handle = 0x8898500D + DWRITE_E_DOWNLOADCANCELLED Handle = 0x8898500E + DWRITE_E_DOWNLOADFAILED Handle = 0x8898500F + DWRITE_E_TOOMANYDOWNLOADS Handle = 0x88985010 + WINCODEC_ERR_WRONGSTATE Handle = 0x88982F04 + WINCODEC_ERR_VALUEOUTOFRANGE Handle = 0x88982F05 + WINCODEC_ERR_UNKNOWNIMAGEFORMAT Handle = 0x88982F07 + WINCODEC_ERR_UNSUPPORTEDVERSION Handle = 0x88982F0B + WINCODEC_ERR_NOTINITIALIZED Handle = 0x88982F0C + WINCODEC_ERR_ALREADYLOCKED Handle = 0x88982F0D + WINCODEC_ERR_PROPERTYNOTFOUND Handle = 0x88982F40 + WINCODEC_ERR_PROPERTYNOTSUPPORTED Handle = 0x88982F41 + WINCODEC_ERR_PROPERTYSIZE Handle = 0x88982F42 + WINCODEC_ERR_CODECPRESENT Handle = 0x88982F43 + WINCODEC_ERR_CODECNOTHUMBNAIL Handle = 0x88982F44 + WINCODEC_ERR_PALETTEUNAVAILABLE Handle = 0x88982F45 + WINCODEC_ERR_CODECTOOMANYSCANLINES Handle = 0x88982F46 + WINCODEC_ERR_INTERNALERROR Handle = 0x88982F48 + WINCODEC_ERR_SOURCERECTDOESNOTMATCHDIMENSIONS Handle = 0x88982F49 + WINCODEC_ERR_COMPONENTNOTFOUND Handle = 0x88982F50 + WINCODEC_ERR_IMAGESIZEOUTOFRANGE Handle = 0x88982F51 + WINCODEC_ERR_TOOMUCHMETADATA Handle = 0x88982F52 + WINCODEC_ERR_BADIMAGE Handle = 0x88982F60 + WINCODEC_ERR_BADHEADER Handle = 0x88982F61 + WINCODEC_ERR_FRAMEMISSING Handle = 0x88982F62 + WINCODEC_ERR_BADMETADATAHEADER Handle = 0x88982F63 + WINCODEC_ERR_BADSTREAMDATA Handle = 0x88982F70 + WINCODEC_ERR_STREAMWRITE Handle = 0x88982F71 + WINCODEC_ERR_STREAMREAD Handle = 0x88982F72 + WINCODEC_ERR_STREAMNOTAVAILABLE Handle = 0x88982F73 + WINCODEC_ERR_UNSUPPORTEDPIXELFORMAT Handle = 0x88982F80 + WINCODEC_ERR_UNSUPPORTEDOPERATION Handle = 0x88982F81 + WINCODEC_ERR_INVALIDREGISTRATION Handle = 0x88982F8A + WINCODEC_ERR_COMPONENTINITIALIZEFAILURE Handle = 0x88982F8B + WINCODEC_ERR_INSUFFICIENTBUFFER Handle = 0x88982F8C + WINCODEC_ERR_DUPLICATEMETADATAPRESENT Handle = 0x88982F8D + WINCODEC_ERR_PROPERTYUNEXPECTEDTYPE Handle = 0x88982F8E + WINCODEC_ERR_UNEXPECTEDSIZE Handle = 0x88982F8F + WINCODEC_ERR_INVALIDQUERYREQUEST Handle = 0x88982F90 + WINCODEC_ERR_UNEXPECTEDMETADATATYPE Handle = 0x88982F91 + WINCODEC_ERR_REQUESTONLYVALIDATMETADATAROOT Handle = 0x88982F92 + WINCODEC_ERR_INVALIDQUERYCHARACTER Handle = 0x88982F93 + WINCODEC_ERR_WIN32ERROR Handle = 0x88982F94 + WINCODEC_ERR_INVALIDPROGRESSIVELEVEL Handle = 0x88982F95 + WINCODEC_ERR_INVALIDJPEGSCANINDEX Handle = 0x88982F96 + MILERR_OBJECTBUSY Handle = 0x88980001 + MILERR_INSUFFICIENTBUFFER Handle = 0x88980002 + MILERR_WIN32ERROR Handle = 0x88980003 + MILERR_SCANNER_FAILED Handle = 0x88980004 + MILERR_SCREENACCESSDENIED Handle = 0x88980005 + MILERR_DISPLAYSTATEINVALID Handle = 0x88980006 + MILERR_NONINVERTIBLEMATRIX Handle = 0x88980007 + MILERR_ZEROVECTOR Handle = 0x88980008 + MILERR_TERMINATED Handle = 0x88980009 + MILERR_BADNUMBER Handle = 0x8898000A + MILERR_INTERNALERROR Handle = 0x88980080 + MILERR_DISPLAYFORMATNOTSUPPORTED Handle = 0x88980084 + MILERR_INVALIDCALL Handle = 0x88980085 + MILERR_ALREADYLOCKED Handle = 0x88980086 + MILERR_NOTLOCKED Handle = 0x88980087 + MILERR_DEVICECANNOTRENDERTEXT Handle = 0x88980088 + MILERR_GLYPHBITMAPMISSED Handle = 0x88980089 + MILERR_MALFORMEDGLYPHCACHE Handle = 0x8898008A + MILERR_GENERIC_IGNORE Handle = 0x8898008B + MILERR_MALFORMED_GUIDELINE_DATA Handle = 0x8898008C + MILERR_NO_HARDWARE_DEVICE Handle = 0x8898008D + MILERR_NEED_RECREATE_AND_PRESENT Handle = 0x8898008E + MILERR_ALREADY_INITIALIZED Handle = 0x8898008F + MILERR_MISMATCHED_SIZE Handle = 0x88980090 + MILERR_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x88980091 + MILERR_REMOTING_NOT_SUPPORTED Handle = 0x88980092 + MILERR_QUEUED_PRESENT_NOT_SUPPORTED Handle = 0x88980093 + MILERR_NOT_QUEUING_PRESENTS Handle = 0x88980094 + MILERR_NO_REDIRECTION_SURFACE_RETRY_LATER Handle = 0x88980095 + MILERR_TOOMANYSHADERELEMNTS Handle = 0x88980096 + MILERR_MROW_READLOCK_FAILED Handle = 0x88980097 + MILERR_MROW_UPDATE_FAILED Handle = 0x88980098 + MILERR_SHADER_COMPILE_FAILED Handle = 0x88980099 + MILERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8898009A + MILERR_QPC_TIME_WENT_BACKWARD Handle = 0x8898009B + MILERR_DXGI_ENUMERATION_OUT_OF_SYNC Handle = 0x8898009D + MILERR_ADAPTER_NOT_FOUND Handle = 0x8898009E + MILERR_COLORSPACE_NOT_SUPPORTED Handle = 0x8898009F + MILERR_PREFILTER_NOT_SUPPORTED Handle = 0x889800A0 + MILERR_DISPLAYID_ACCESS_DENIED Handle = 0x889800A1 + UCEERR_INVALIDPACKETHEADER Handle = 0x88980400 + UCEERR_UNKNOWNPACKET Handle = 0x88980401 + UCEERR_ILLEGALPACKET Handle = 0x88980402 + UCEERR_MALFORMEDPACKET Handle = 0x88980403 + UCEERR_ILLEGALHANDLE Handle = 0x88980404 + UCEERR_HANDLELOOKUPFAILED Handle = 0x88980405 + UCEERR_RENDERTHREADFAILURE Handle = 0x88980406 + UCEERR_CTXSTACKFRSTTARGETNULL Handle = 0x88980407 + UCEERR_CONNECTIONIDLOOKUPFAILED Handle = 0x88980408 + UCEERR_BLOCKSFULL Handle = 0x88980409 + UCEERR_MEMORYFAILURE Handle = 0x8898040A + UCEERR_PACKETRECORDOUTOFRANGE Handle = 0x8898040B + UCEERR_ILLEGALRECORDTYPE Handle = 0x8898040C + UCEERR_OUTOFHANDLES Handle = 0x8898040D + UCEERR_UNCHANGABLE_UPDATE_ATTEMPTED Handle = 0x8898040E + UCEERR_NO_MULTIPLE_WORKER_THREADS Handle = 0x8898040F + UCEERR_REMOTINGNOTSUPPORTED Handle = 0x88980410 + UCEERR_MISSINGENDCOMMAND Handle = 0x88980411 + UCEERR_MISSINGBEGINCOMMAND Handle = 0x88980412 + UCEERR_CHANNELSYNCTIMEDOUT Handle = 0x88980413 + UCEERR_CHANNELSYNCABANDONED Handle = 0x88980414 + UCEERR_UNSUPPORTEDTRANSPORTVERSION Handle = 0x88980415 + UCEERR_TRANSPORTUNAVAILABLE Handle = 0x88980416 + UCEERR_FEEDBACK_UNSUPPORTED Handle = 0x88980417 + UCEERR_COMMANDTRANSPORTDENIED Handle = 0x88980418 + UCEERR_GRAPHICSSTREAMUNAVAILABLE Handle = 0x88980419 + UCEERR_GRAPHICSSTREAMALREADYOPEN Handle = 0x88980420 + UCEERR_TRANSPORTDISCONNECTED Handle = 0x88980421 + UCEERR_TRANSPORTOVERLOADED Handle = 0x88980422 + UCEERR_PARTITION_ZOMBIED Handle = 0x88980423 + MILAVERR_NOCLOCK Handle = 0x88980500 + MILAVERR_NOMEDIATYPE Handle = 0x88980501 + MILAVERR_NOVIDEOMIXER Handle = 0x88980502 + MILAVERR_NOVIDEOPRESENTER Handle = 0x88980503 + MILAVERR_NOREADYFRAMES Handle = 0x88980504 + MILAVERR_MODULENOTLOADED Handle = 0x88980505 + MILAVERR_WMPFACTORYNOTREGISTERED Handle = 0x88980506 + MILAVERR_INVALIDWMPVERSION Handle = 0x88980507 + MILAVERR_INSUFFICIENTVIDEORESOURCES Handle = 0x88980508 + MILAVERR_VIDEOACCELERATIONNOTAVAILABLE Handle = 0x88980509 + MILAVERR_REQUESTEDTEXTURETOOBIG Handle = 0x8898050A + MILAVERR_SEEKFAILED Handle = 0x8898050B + MILAVERR_UNEXPECTEDWMPFAILURE Handle = 0x8898050C + MILAVERR_MEDIAPLAYERCLOSED Handle = 0x8898050D + MILAVERR_UNKNOWNHARDWAREERROR Handle = 0x8898050E + MILEFFECTSERR_UNKNOWNPROPERTY Handle = 0x8898060E + MILEFFECTSERR_EFFECTNOTPARTOFGROUP Handle = 0x8898060F + MILEFFECTSERR_NOINPUTSOURCEATTACHED Handle = 0x88980610 + MILEFFECTSERR_CONNECTORNOTCONNECTED Handle = 0x88980611 + MILEFFECTSERR_CONNECTORNOTASSOCIATEDWITHEFFECT Handle = 0x88980612 + MILEFFECTSERR_RESERVED Handle = 0x88980613 + MILEFFECTSERR_CYCLEDETECTED Handle = 0x88980614 + MILEFFECTSERR_EFFECTINMORETHANONEGRAPH Handle = 0x88980615 + MILEFFECTSERR_EFFECTALREADYINAGRAPH Handle = 0x88980616 + MILEFFECTSERR_EFFECTHASNOCHILDREN Handle = 0x88980617 + MILEFFECTSERR_ALREADYATTACHEDTOLISTENER Handle = 0x88980618 + MILEFFECTSERR_NOTAFFINETRANSFORM Handle = 0x88980619 + MILEFFECTSERR_EMPTYBOUNDS Handle = 0x8898061A + MILEFFECTSERR_OUTPUTSIZETOOLARGE Handle = 0x8898061B + DWMERR_STATE_TRANSITION_FAILED Handle = 0x88980700 + DWMERR_THEME_FAILED Handle = 0x88980701 + DWMERR_CATASTROPHIC_FAILURE Handle = 0x88980702 + DCOMPOSITION_ERROR_WINDOW_ALREADY_COMPOSED Handle = 0x88980800 + DCOMPOSITION_ERROR_SURFACE_BEING_RENDERED Handle = 0x88980801 + DCOMPOSITION_ERROR_SURFACE_NOT_BEING_RENDERED Handle = 0x88980802 + ONL_E_INVALID_AUTHENTICATION_TARGET Handle = 0x80860001 + ONL_E_ACCESS_DENIED_BY_TOU Handle = 0x80860002 + ONL_E_INVALID_APPLICATION Handle = 0x80860003 + ONL_E_PASSWORD_UPDATE_REQUIRED Handle = 0x80860004 + ONL_E_ACCOUNT_UPDATE_REQUIRED Handle = 0x80860005 + ONL_E_FORCESIGNIN Handle = 0x80860006 + ONL_E_ACCOUNT_LOCKED Handle = 0x80860007 + ONL_E_PARENTAL_CONSENT_REQUIRED Handle = 0x80860008 + ONL_E_EMAIL_VERIFICATION_REQUIRED Handle = 0x80860009 + ONL_E_ACCOUNT_SUSPENDED_COMPROIMISE Handle = 0x8086000A + ONL_E_ACCOUNT_SUSPENDED_ABUSE Handle = 0x8086000B + ONL_E_ACTION_REQUIRED Handle = 0x8086000C + ONL_CONNECTION_COUNT_LIMIT Handle = 0x8086000D + ONL_E_CONNECTED_ACCOUNT_CAN_NOT_SIGNOUT Handle = 0x8086000E + ONL_E_USER_AUTHENTICATION_REQUIRED Handle = 0x8086000F + ONL_E_REQUEST_THROTTLED Handle = 0x80860010 + FA_E_MAX_PERSISTED_ITEMS_REACHED Handle = 0x80270220 + FA_E_HOMEGROUP_NOT_AVAILABLE Handle = 0x80270222 + E_MONITOR_RESOLUTION_TOO_LOW Handle = 0x80270250 + E_ELEVATED_ACTIVATION_NOT_SUPPORTED Handle = 0x80270251 + E_UAC_DISABLED Handle = 0x80270252 + E_FULL_ADMIN_NOT_SUPPORTED Handle = 0x80270253 + E_APPLICATION_NOT_REGISTERED Handle = 0x80270254 + E_MULTIPLE_EXTENSIONS_FOR_APPLICATION Handle = 0x80270255 + E_MULTIPLE_PACKAGES_FOR_FAMILY Handle = 0x80270256 + E_APPLICATION_MANAGER_NOT_RUNNING Handle = 0x80270257 + S_STORE_LAUNCHED_FOR_REMEDIATION Handle = 0x00270258 + S_APPLICATION_ACTIVATION_ERROR_HANDLED_BY_DIALOG Handle = 0x00270259 + E_APPLICATION_ACTIVATION_TIMED_OUT Handle = 0x8027025A + E_APPLICATION_ACTIVATION_EXEC_FAILURE Handle = 0x8027025B + E_APPLICATION_TEMPORARY_LICENSE_ERROR Handle = 0x8027025C + E_APPLICATION_TRIAL_LICENSE_EXPIRED Handle = 0x8027025D + E_SKYDRIVE_ROOT_TARGET_FILE_SYSTEM_NOT_SUPPORTED Handle = 0x80270260 + E_SKYDRIVE_ROOT_TARGET_OVERLAP Handle = 0x80270261 + E_SKYDRIVE_ROOT_TARGET_CANNOT_INDEX Handle = 0x80270262 + E_SKYDRIVE_FILE_NOT_UPLOADED Handle = 0x80270263 + E_SKYDRIVE_UPDATE_AVAILABILITY_FAIL Handle = 0x80270264 + E_SKYDRIVE_ROOT_TARGET_VOLUME_ROOT_NOT_SUPPORTED Handle = 0x80270265 + E_SYNCENGINE_FILE_SIZE_OVER_LIMIT Handle = 0x8802B001 + E_SYNCENGINE_FILE_SIZE_EXCEEDS_REMAINING_QUOTA Handle = 0x8802B002 + E_SYNCENGINE_UNSUPPORTED_FILE_NAME Handle = 0x8802B003 + E_SYNCENGINE_FOLDER_ITEM_COUNT_LIMIT_EXCEEDED Handle = 0x8802B004 + E_SYNCENGINE_FILE_SYNC_PARTNER_ERROR Handle = 0x8802B005 + E_SYNCENGINE_SYNC_PAUSED_BY_SERVICE Handle = 0x8802B006 + E_SYNCENGINE_FILE_IDENTIFIER_UNKNOWN Handle = 0x8802C002 + E_SYNCENGINE_SERVICE_AUTHENTICATION_FAILED Handle = 0x8802C003 + E_SYNCENGINE_UNKNOWN_SERVICE_ERROR Handle = 0x8802C004 + E_SYNCENGINE_SERVICE_RETURNED_UNEXPECTED_SIZE Handle = 0x8802C005 + E_SYNCENGINE_REQUEST_BLOCKED_BY_SERVICE Handle = 0x8802C006 + E_SYNCENGINE_REQUEST_BLOCKED_DUE_TO_CLIENT_ERROR Handle = 0x8802C007 + E_SYNCENGINE_FOLDER_INACCESSIBLE Handle = 0x8802D001 + E_SYNCENGINE_UNSUPPORTED_FOLDER_NAME Handle = 0x8802D002 + E_SYNCENGINE_UNSUPPORTED_MARKET Handle = 0x8802D003 + E_SYNCENGINE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D004 + E_SYNCENGINE_REMOTE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D005 + E_SYNCENGINE_CLIENT_UPDATE_NEEDED Handle = 0x8802D006 + E_SYNCENGINE_PROXY_AUTHENTICATION_REQUIRED Handle = 0x8802D007 + E_SYNCENGINE_STORAGE_SERVICE_PROVISIONING_FAILED Handle = 0x8802D008 + E_SYNCENGINE_UNSUPPORTED_REPARSE_POINT Handle = 0x8802D009 + E_SYNCENGINE_STORAGE_SERVICE_BLOCKED Handle = 0x8802D00A + E_SYNCENGINE_FOLDER_IN_REDIRECTION Handle = 0x8802D00B + EAS_E_POLICY_NOT_MANAGED_BY_OS Handle = 0x80550001 + EAS_E_POLICY_COMPLIANT_WITH_ACTIONS Handle = 0x80550002 + EAS_E_REQUESTED_POLICY_NOT_ENFORCEABLE Handle = 0x80550003 + EAS_E_CURRENT_USER_HAS_BLANK_PASSWORD Handle = 0x80550004 + EAS_E_REQUESTED_POLICY_PASSWORD_EXPIRATION_INCOMPATIBLE Handle = 0x80550005 + EAS_E_USER_CANNOT_CHANGE_PASSWORD Handle = 0x80550006 + EAS_E_ADMINS_HAVE_BLANK_PASSWORD Handle = 0x80550007 + EAS_E_ADMINS_CANNOT_CHANGE_PASSWORD Handle = 0x80550008 + EAS_E_LOCAL_CONTROLLED_USERS_CANNOT_CHANGE_PASSWORD Handle = 0x80550009 + EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CONNECTED_ADMINS Handle = 0x8055000A + EAS_E_CONNECTED_ADMINS_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000B + EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CURRENT_CONNECTED_USER Handle = 0x8055000C + EAS_E_CURRENT_CONNECTED_USER_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000D + WEB_E_UNSUPPORTED_FORMAT Handle = 0x83750001 + WEB_E_INVALID_XML Handle = 0x83750002 + WEB_E_MISSING_REQUIRED_ELEMENT Handle = 0x83750003 + WEB_E_MISSING_REQUIRED_ATTRIBUTE Handle = 0x83750004 + WEB_E_UNEXPECTED_CONTENT Handle = 0x83750005 + WEB_E_RESOURCE_TOO_LARGE Handle = 0x83750006 + WEB_E_INVALID_JSON_STRING Handle = 0x83750007 + WEB_E_INVALID_JSON_NUMBER Handle = 0x83750008 + WEB_E_JSON_VALUE_NOT_FOUND Handle = 0x83750009 + HTTP_E_STATUS_UNEXPECTED Handle = 0x80190001 + HTTP_E_STATUS_UNEXPECTED_REDIRECTION Handle = 0x80190003 + HTTP_E_STATUS_UNEXPECTED_CLIENT_ERROR Handle = 0x80190004 + HTTP_E_STATUS_UNEXPECTED_SERVER_ERROR Handle = 0x80190005 + HTTP_E_STATUS_AMBIGUOUS Handle = 0x8019012C + HTTP_E_STATUS_MOVED Handle = 0x8019012D + HTTP_E_STATUS_REDIRECT Handle = 0x8019012E + HTTP_E_STATUS_REDIRECT_METHOD Handle = 0x8019012F + HTTP_E_STATUS_NOT_MODIFIED Handle = 0x80190130 + HTTP_E_STATUS_USE_PROXY Handle = 0x80190131 + HTTP_E_STATUS_REDIRECT_KEEP_VERB Handle = 0x80190133 + HTTP_E_STATUS_BAD_REQUEST Handle = 0x80190190 + HTTP_E_STATUS_DENIED Handle = 0x80190191 + HTTP_E_STATUS_PAYMENT_REQ Handle = 0x80190192 + HTTP_E_STATUS_FORBIDDEN Handle = 0x80190193 + HTTP_E_STATUS_NOT_FOUND Handle = 0x80190194 + HTTP_E_STATUS_BAD_METHOD Handle = 0x80190195 + HTTP_E_STATUS_NONE_ACCEPTABLE Handle = 0x80190196 + HTTP_E_STATUS_PROXY_AUTH_REQ Handle = 0x80190197 + HTTP_E_STATUS_REQUEST_TIMEOUT Handle = 0x80190198 + HTTP_E_STATUS_CONFLICT Handle = 0x80190199 + HTTP_E_STATUS_GONE Handle = 0x8019019A + HTTP_E_STATUS_LENGTH_REQUIRED Handle = 0x8019019B + HTTP_E_STATUS_PRECOND_FAILED Handle = 0x8019019C + HTTP_E_STATUS_REQUEST_TOO_LARGE Handle = 0x8019019D + HTTP_E_STATUS_URI_TOO_LONG Handle = 0x8019019E + HTTP_E_STATUS_UNSUPPORTED_MEDIA Handle = 0x8019019F + HTTP_E_STATUS_RANGE_NOT_SATISFIABLE Handle = 0x801901A0 + HTTP_E_STATUS_EXPECTATION_FAILED Handle = 0x801901A1 + HTTP_E_STATUS_SERVER_ERROR Handle = 0x801901F4 + HTTP_E_STATUS_NOT_SUPPORTED Handle = 0x801901F5 + HTTP_E_STATUS_BAD_GATEWAY Handle = 0x801901F6 + HTTP_E_STATUS_SERVICE_UNAVAIL Handle = 0x801901F7 + HTTP_E_STATUS_GATEWAY_TIMEOUT Handle = 0x801901F8 + HTTP_E_STATUS_VERSION_NOT_SUP Handle = 0x801901F9 + E_INVALID_PROTOCOL_OPERATION Handle = 0x83760001 + E_INVALID_PROTOCOL_FORMAT Handle = 0x83760002 + E_PROTOCOL_EXTENSIONS_NOT_SUPPORTED Handle = 0x83760003 + E_SUBPROTOCOL_NOT_SUPPORTED Handle = 0x83760004 + E_PROTOCOL_VERSION_NOT_SUPPORTED Handle = 0x83760005 + INPUT_E_OUT_OF_ORDER Handle = 0x80400000 + INPUT_E_REENTRANCY Handle = 0x80400001 + INPUT_E_MULTIMODAL Handle = 0x80400002 + INPUT_E_PACKET Handle = 0x80400003 + INPUT_E_FRAME Handle = 0x80400004 + INPUT_E_HISTORY Handle = 0x80400005 + INPUT_E_DEVICE_INFO Handle = 0x80400006 + INPUT_E_TRANSFORM Handle = 0x80400007 + INPUT_E_DEVICE_PROPERTY Handle = 0x80400008 + INET_E_INVALID_URL Handle = 0x800C0002 + INET_E_NO_SESSION Handle = 0x800C0003 + INET_E_CANNOT_CONNECT Handle = 0x800C0004 + INET_E_RESOURCE_NOT_FOUND Handle = 0x800C0005 + INET_E_OBJECT_NOT_FOUND Handle = 0x800C0006 + INET_E_DATA_NOT_AVAILABLE Handle = 0x800C0007 + INET_E_DOWNLOAD_FAILURE Handle = 0x800C0008 + INET_E_AUTHENTICATION_REQUIRED Handle = 0x800C0009 + INET_E_NO_VALID_MEDIA Handle = 0x800C000A + INET_E_CONNECTION_TIMEOUT Handle = 0x800C000B + INET_E_INVALID_REQUEST Handle = 0x800C000C + INET_E_UNKNOWN_PROTOCOL Handle = 0x800C000D + INET_E_SECURITY_PROBLEM Handle = 0x800C000E + INET_E_CANNOT_LOAD_DATA Handle = 0x800C000F + INET_E_CANNOT_INSTANTIATE_OBJECT Handle = 0x800C0010 + INET_E_INVALID_CERTIFICATE Handle = 0x800C0019 + INET_E_REDIRECT_FAILED Handle = 0x800C0014 + INET_E_REDIRECT_TO_DIR Handle = 0x800C0015 + ERROR_DBG_CREATE_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00001 + ERROR_DBG_ATTACH_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00002 + ERROR_DBG_CONNECT_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00003 + ERROR_DBG_START_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00004 + ERROR_IO_PREEMPTED Handle = 0x89010001 + JSCRIPT_E_CANTEXECUTE Handle = 0x89020001 + WEP_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x88010001 + WEP_E_FIXED_DATA_NOT_SUPPORTED Handle = 0x88010002 + WEP_E_HARDWARE_NOT_COMPLIANT Handle = 0x88010003 + WEP_E_LOCK_NOT_CONFIGURED Handle = 0x88010004 + WEP_E_PROTECTION_SUSPENDED Handle = 0x88010005 + WEP_E_NO_LICENSE Handle = 0x88010006 + WEP_E_OS_NOT_PROTECTED Handle = 0x88010007 + WEP_E_UNEXPECTED_FAIL Handle = 0x88010008 + WEP_E_BUFFER_TOO_LARGE Handle = 0x88010009 + ERROR_SVHDX_ERROR_STORED Handle = 0xC05C0000 + ERROR_SVHDX_ERROR_NOT_AVAILABLE Handle = 0xC05CFF00 + ERROR_SVHDX_UNIT_ATTENTION_AVAILABLE Handle = 0xC05CFF01 + ERROR_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED Handle = 0xC05CFF02 + ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED Handle = 0xC05CFF03 + ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED Handle = 0xC05CFF04 + ERROR_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED Handle = 0xC05CFF05 + ERROR_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED Handle = 0xC05CFF06 + ERROR_SVHDX_RESERVATION_CONFLICT Handle = 0xC05CFF07 + ERROR_SVHDX_WRONG_FILE_TYPE Handle = 0xC05CFF08 + ERROR_SVHDX_VERSION_MISMATCH Handle = 0xC05CFF09 + ERROR_VHD_SHARED Handle = 0xC05CFF0A + ERROR_SVHDX_NO_INITIATOR Handle = 0xC05CFF0B + ERROR_VHDSET_BACKING_STORAGE_NOT_FOUND Handle = 0xC05CFF0C + ERROR_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP Handle = 0xC05D0000 + ERROR_SMB_BAD_CLUSTER_DIALECT Handle = 0xC05D0001 + WININET_E_OUT_OF_HANDLES Handle = 0x80072EE1 + WININET_E_TIMEOUT Handle = 0x80072EE2 + WININET_E_EXTENDED_ERROR Handle = 0x80072EE3 + WININET_E_INTERNAL_ERROR Handle = 0x80072EE4 + WININET_E_INVALID_URL Handle = 0x80072EE5 + WININET_E_UNRECOGNIZED_SCHEME Handle = 0x80072EE6 + WININET_E_NAME_NOT_RESOLVED Handle = 0x80072EE7 + WININET_E_PROTOCOL_NOT_FOUND Handle = 0x80072EE8 + WININET_E_INVALID_OPTION Handle = 0x80072EE9 + WININET_E_BAD_OPTION_LENGTH Handle = 0x80072EEA + WININET_E_OPTION_NOT_SETTABLE Handle = 0x80072EEB + WININET_E_SHUTDOWN Handle = 0x80072EEC + WININET_E_INCORRECT_USER_NAME Handle = 0x80072EED + WININET_E_INCORRECT_PASSWORD Handle = 0x80072EEE + WININET_E_LOGIN_FAILURE Handle = 0x80072EEF + WININET_E_INVALID_OPERATION Handle = 0x80072EF0 + WININET_E_OPERATION_CANCELLED Handle = 0x80072EF1 + WININET_E_INCORRECT_HANDLE_TYPE Handle = 0x80072EF2 + WININET_E_INCORRECT_HANDLE_STATE Handle = 0x80072EF3 + WININET_E_NOT_PROXY_REQUEST Handle = 0x80072EF4 + WININET_E_REGISTRY_VALUE_NOT_FOUND Handle = 0x80072EF5 + WININET_E_BAD_REGISTRY_PARAMETER Handle = 0x80072EF6 + WININET_E_NO_DIRECT_ACCESS Handle = 0x80072EF7 + WININET_E_NO_CONTEXT Handle = 0x80072EF8 + WININET_E_NO_CALLBACK Handle = 0x80072EF9 + WININET_E_REQUEST_PENDING Handle = 0x80072EFA + WININET_E_INCORRECT_FORMAT Handle = 0x80072EFB + WININET_E_ITEM_NOT_FOUND Handle = 0x80072EFC + WININET_E_CANNOT_CONNECT Handle = 0x80072EFD + WININET_E_CONNECTION_ABORTED Handle = 0x80072EFE + WININET_E_CONNECTION_RESET Handle = 0x80072EFF + WININET_E_FORCE_RETRY Handle = 0x80072F00 + WININET_E_INVALID_PROXY_REQUEST Handle = 0x80072F01 + WININET_E_NEED_UI Handle = 0x80072F02 + WININET_E_HANDLE_EXISTS Handle = 0x80072F04 + WININET_E_SEC_CERT_DATE_INVALID Handle = 0x80072F05 + WININET_E_SEC_CERT_CN_INVALID Handle = 0x80072F06 + WININET_E_HTTP_TO_HTTPS_ON_REDIR Handle = 0x80072F07 + WININET_E_HTTPS_TO_HTTP_ON_REDIR Handle = 0x80072F08 + WININET_E_MIXED_SECURITY Handle = 0x80072F09 + WININET_E_CHG_POST_IS_NON_SECURE Handle = 0x80072F0A + WININET_E_POST_IS_NON_SECURE Handle = 0x80072F0B + WININET_E_CLIENT_AUTH_CERT_NEEDED Handle = 0x80072F0C + WININET_E_INVALID_CA Handle = 0x80072F0D + WININET_E_CLIENT_AUTH_NOT_SETUP Handle = 0x80072F0E + WININET_E_ASYNC_THREAD_FAILED Handle = 0x80072F0F + WININET_E_REDIRECT_SCHEME_CHANGE Handle = 0x80072F10 + WININET_E_DIALOG_PENDING Handle = 0x80072F11 + WININET_E_RETRY_DIALOG Handle = 0x80072F12 + WININET_E_NO_NEW_CONTAINERS Handle = 0x80072F13 + WININET_E_HTTPS_HTTP_SUBMIT_REDIR Handle = 0x80072F14 + WININET_E_SEC_CERT_ERRORS Handle = 0x80072F17 + WININET_E_SEC_CERT_REV_FAILED Handle = 0x80072F19 + WININET_E_HEADER_NOT_FOUND Handle = 0x80072F76 + WININET_E_DOWNLEVEL_SERVER Handle = 0x80072F77 + WININET_E_INVALID_SERVER_RESPONSE Handle = 0x80072F78 + WININET_E_INVALID_HEADER Handle = 0x80072F79 + WININET_E_INVALID_QUERY_REQUEST Handle = 0x80072F7A + WININET_E_HEADER_ALREADY_EXISTS Handle = 0x80072F7B + WININET_E_REDIRECT_FAILED Handle = 0x80072F7C + WININET_E_SECURITY_CHANNEL_ERROR Handle = 0x80072F7D + WININET_E_UNABLE_TO_CACHE_FILE Handle = 0x80072F7E + WININET_E_TCPIP_NOT_INSTALLED Handle = 0x80072F7F + WININET_E_DISCONNECTED Handle = 0x80072F83 + WININET_E_SERVER_UNREACHABLE Handle = 0x80072F84 + WININET_E_PROXY_SERVER_UNREACHABLE Handle = 0x80072F85 + WININET_E_BAD_AUTO_PROXY_SCRIPT Handle = 0x80072F86 + WININET_E_UNABLE_TO_DOWNLOAD_SCRIPT Handle = 0x80072F87 + WININET_E_SEC_INVALID_CERT Handle = 0x80072F89 + WININET_E_SEC_CERT_REVOKED Handle = 0x80072F8A + WININET_E_FAILED_DUETOSECURITYCHECK Handle = 0x80072F8B + WININET_E_NOT_INITIALIZED Handle = 0x80072F8C + WININET_E_LOGIN_FAILURE_DISPLAY_ENTITY_BODY Handle = 0x80072F8E + WININET_E_DECODING_FAILED Handle = 0x80072F8F + WININET_E_NOT_REDIRECTED Handle = 0x80072F80 + WININET_E_COOKIE_NEEDS_CONFIRMATION Handle = 0x80072F81 + WININET_E_COOKIE_DECLINED Handle = 0x80072F82 + WININET_E_REDIRECT_NEEDS_CONFIRMATION Handle = 0x80072F88 + SQLITE_E_ERROR Handle = 0x87AF0001 + SQLITE_E_INTERNAL Handle = 0x87AF0002 + SQLITE_E_PERM Handle = 0x87AF0003 + SQLITE_E_ABORT Handle = 0x87AF0004 + SQLITE_E_BUSY Handle = 0x87AF0005 + SQLITE_E_LOCKED Handle = 0x87AF0006 + SQLITE_E_NOMEM Handle = 0x87AF0007 + SQLITE_E_READONLY Handle = 0x87AF0008 + SQLITE_E_INTERRUPT Handle = 0x87AF0009 + SQLITE_E_IOERR Handle = 0x87AF000A + SQLITE_E_CORRUPT Handle = 0x87AF000B + SQLITE_E_NOTFOUND Handle = 0x87AF000C + SQLITE_E_FULL Handle = 0x87AF000D + SQLITE_E_CANTOPEN Handle = 0x87AF000E + SQLITE_E_PROTOCOL Handle = 0x87AF000F + SQLITE_E_EMPTY Handle = 0x87AF0010 + SQLITE_E_SCHEMA Handle = 0x87AF0011 + SQLITE_E_TOOBIG Handle = 0x87AF0012 + SQLITE_E_CONSTRAINT Handle = 0x87AF0013 + SQLITE_E_MISMATCH Handle = 0x87AF0014 + SQLITE_E_MISUSE Handle = 0x87AF0015 + SQLITE_E_NOLFS Handle = 0x87AF0016 + SQLITE_E_AUTH Handle = 0x87AF0017 + SQLITE_E_FORMAT Handle = 0x87AF0018 + SQLITE_E_RANGE Handle = 0x87AF0019 + SQLITE_E_NOTADB Handle = 0x87AF001A + SQLITE_E_NOTICE Handle = 0x87AF001B + SQLITE_E_WARNING Handle = 0x87AF001C + SQLITE_E_ROW Handle = 0x87AF0064 + SQLITE_E_DONE Handle = 0x87AF0065 + SQLITE_E_IOERR_READ Handle = 0x87AF010A + SQLITE_E_IOERR_SHORT_READ Handle = 0x87AF020A + SQLITE_E_IOERR_WRITE Handle = 0x87AF030A + SQLITE_E_IOERR_FSYNC Handle = 0x87AF040A + SQLITE_E_IOERR_DIR_FSYNC Handle = 0x87AF050A + SQLITE_E_IOERR_TRUNCATE Handle = 0x87AF060A + SQLITE_E_IOERR_FSTAT Handle = 0x87AF070A + SQLITE_E_IOERR_UNLOCK Handle = 0x87AF080A + SQLITE_E_IOERR_RDLOCK Handle = 0x87AF090A + SQLITE_E_IOERR_DELETE Handle = 0x87AF0A0A + SQLITE_E_IOERR_BLOCKED Handle = 0x87AF0B0A + SQLITE_E_IOERR_NOMEM Handle = 0x87AF0C0A + SQLITE_E_IOERR_ACCESS Handle = 0x87AF0D0A + SQLITE_E_IOERR_CHECKRESERVEDLOCK Handle = 0x87AF0E0A + SQLITE_E_IOERR_LOCK Handle = 0x87AF0F0A + SQLITE_E_IOERR_CLOSE Handle = 0x87AF100A + SQLITE_E_IOERR_DIR_CLOSE Handle = 0x87AF110A + SQLITE_E_IOERR_SHMOPEN Handle = 0x87AF120A + SQLITE_E_IOERR_SHMSIZE Handle = 0x87AF130A + SQLITE_E_IOERR_SHMLOCK Handle = 0x87AF140A + SQLITE_E_IOERR_SHMMAP Handle = 0x87AF150A + SQLITE_E_IOERR_SEEK Handle = 0x87AF160A + SQLITE_E_IOERR_DELETE_NOENT Handle = 0x87AF170A + SQLITE_E_IOERR_MMAP Handle = 0x87AF180A + SQLITE_E_IOERR_GETTEMPPATH Handle = 0x87AF190A + SQLITE_E_IOERR_CONVPATH Handle = 0x87AF1A0A + SQLITE_E_IOERR_VNODE Handle = 0x87AF1A02 + SQLITE_E_IOERR_AUTH Handle = 0x87AF1A03 + SQLITE_E_LOCKED_SHAREDCACHE Handle = 0x87AF0106 + SQLITE_E_BUSY_RECOVERY Handle = 0x87AF0105 + SQLITE_E_BUSY_SNAPSHOT Handle = 0x87AF0205 + SQLITE_E_CANTOPEN_NOTEMPDIR Handle = 0x87AF010E + SQLITE_E_CANTOPEN_ISDIR Handle = 0x87AF020E + SQLITE_E_CANTOPEN_FULLPATH Handle = 0x87AF030E + SQLITE_E_CANTOPEN_CONVPATH Handle = 0x87AF040E + SQLITE_E_CORRUPT_VTAB Handle = 0x87AF010B + SQLITE_E_READONLY_RECOVERY Handle = 0x87AF0108 + SQLITE_E_READONLY_CANTLOCK Handle = 0x87AF0208 + SQLITE_E_READONLY_ROLLBACK Handle = 0x87AF0308 + SQLITE_E_READONLY_DBMOVED Handle = 0x87AF0408 + SQLITE_E_ABORT_ROLLBACK Handle = 0x87AF0204 + SQLITE_E_CONSTRAINT_CHECK Handle = 0x87AF0113 + SQLITE_E_CONSTRAINT_COMMITHOOK Handle = 0x87AF0213 + SQLITE_E_CONSTRAINT_FOREIGNKEY Handle = 0x87AF0313 + SQLITE_E_CONSTRAINT_FUNCTION Handle = 0x87AF0413 + SQLITE_E_CONSTRAINT_NOTNULL Handle = 0x87AF0513 + SQLITE_E_CONSTRAINT_PRIMARYKEY Handle = 0x87AF0613 + SQLITE_E_CONSTRAINT_TRIGGER Handle = 0x87AF0713 + SQLITE_E_CONSTRAINT_UNIQUE Handle = 0x87AF0813 + SQLITE_E_CONSTRAINT_VTAB Handle = 0x87AF0913 + SQLITE_E_CONSTRAINT_ROWID Handle = 0x87AF0A13 + SQLITE_E_NOTICE_RECOVER_WAL Handle = 0x87AF011B + SQLITE_E_NOTICE_RECOVER_ROLLBACK Handle = 0x87AF021B + SQLITE_E_WARNING_AUTOINDEX Handle = 0x87AF011C + UTC_E_TOGGLE_TRACE_STARTED Handle = 0x87C51001 + UTC_E_ALTERNATIVE_TRACE_CANNOT_PREEMPT Handle = 0x87C51002 + UTC_E_AOT_NOT_RUNNING Handle = 0x87C51003 + UTC_E_SCRIPT_TYPE_INVALID Handle = 0x87C51004 + UTC_E_SCENARIODEF_NOT_FOUND Handle = 0x87C51005 + UTC_E_TRACEPROFILE_NOT_FOUND Handle = 0x87C51006 + UTC_E_FORWARDER_ALREADY_ENABLED Handle = 0x87C51007 + UTC_E_FORWARDER_ALREADY_DISABLED Handle = 0x87C51008 + UTC_E_EVENTLOG_ENTRY_MALFORMED Handle = 0x87C51009 + UTC_E_DIAGRULES_SCHEMAVERSION_MISMATCH Handle = 0x87C5100A + UTC_E_SCRIPT_TERMINATED Handle = 0x87C5100B + UTC_E_INVALID_CUSTOM_FILTER Handle = 0x87C5100C + UTC_E_TRACE_NOT_RUNNING Handle = 0x87C5100D + UTC_E_REESCALATED_TOO_QUICKLY Handle = 0x87C5100E + UTC_E_ESCALATION_ALREADY_RUNNING Handle = 0x87C5100F + UTC_E_PERFTRACK_ALREADY_TRACING Handle = 0x87C51010 + UTC_E_REACHED_MAX_ESCALATIONS Handle = 0x87C51011 + UTC_E_FORWARDER_PRODUCER_MISMATCH Handle = 0x87C51012 + UTC_E_INTENTIONAL_SCRIPT_FAILURE Handle = 0x87C51013 + UTC_E_SQM_INIT_FAILED Handle = 0x87C51014 + UTC_E_NO_WER_LOGGER_SUPPORTED Handle = 0x87C51015 + UTC_E_TRACERS_DONT_EXIST Handle = 0x87C51016 + UTC_E_WINRT_INIT_FAILED Handle = 0x87C51017 + UTC_E_SCENARIODEF_SCHEMAVERSION_MISMATCH Handle = 0x87C51018 + UTC_E_INVALID_FILTER Handle = 0x87C51019 + UTC_E_EXE_TERMINATED Handle = 0x87C5101A + UTC_E_ESCALATION_NOT_AUTHORIZED Handle = 0x87C5101B + UTC_E_SETUP_NOT_AUTHORIZED Handle = 0x87C5101C + UTC_E_CHILD_PROCESS_FAILED Handle = 0x87C5101D + UTC_E_COMMAND_LINE_NOT_AUTHORIZED Handle = 0x87C5101E + UTC_E_CANNOT_LOAD_SCENARIO_EDITOR_XML Handle = 0x87C5101F + UTC_E_ESCALATION_TIMED_OUT Handle = 0x87C51020 + UTC_E_SETUP_TIMED_OUT Handle = 0x87C51021 + UTC_E_TRIGGER_MISMATCH Handle = 0x87C51022 + UTC_E_TRIGGER_NOT_FOUND Handle = 0x87C51023 + UTC_E_SIF_NOT_SUPPORTED Handle = 0x87C51024 + UTC_E_DELAY_TERMINATED Handle = 0x87C51025 + UTC_E_DEVICE_TICKET_ERROR Handle = 0x87C51026 + UTC_E_TRACE_BUFFER_LIMIT_EXCEEDED Handle = 0x87C51027 + UTC_E_API_RESULT_UNAVAILABLE Handle = 0x87C51028 + UTC_E_RPC_TIMEOUT Handle = 0x87C51029 + UTC_E_RPC_WAIT_FAILED Handle = 0x87C5102A + UTC_E_API_BUSY Handle = 0x87C5102B + UTC_E_TRACE_MIN_DURATION_REQUIREMENT_NOT_MET Handle = 0x87C5102C + UTC_E_EXCLUSIVITY_NOT_AVAILABLE Handle = 0x87C5102D + UTC_E_GETFILE_FILE_PATH_NOT_APPROVED Handle = 0x87C5102E + UTC_E_ESCALATION_DIRECTORY_ALREADY_EXISTS Handle = 0x87C5102F + UTC_E_TIME_TRIGGER_ON_START_INVALID Handle = 0x87C51030 + UTC_E_TIME_TRIGGER_ONLY_VALID_ON_SINGLE_TRANSITION Handle = 0x87C51031 + UTC_E_TIME_TRIGGER_INVALID_TIME_RANGE Handle = 0x87C51032 + UTC_E_MULTIPLE_TIME_TRIGGER_ON_SINGLE_STATE Handle = 0x87C51033 + UTC_E_BINARY_MISSING Handle = 0x87C51034 + UTC_E_NETWORK_CAPTURE_NOT_ALLOWED Handle = 0x87C51035 + UTC_E_FAILED_TO_RESOLVE_CONTAINER_ID Handle = 0x87C51036 + UTC_E_UNABLE_TO_RESOLVE_SESSION Handle = 0x87C51037 + UTC_E_THROTTLED Handle = 0x87C51038 + UTC_E_UNAPPROVED_SCRIPT Handle = 0x87C51039 + UTC_E_SCRIPT_MISSING Handle = 0x87C5103A + UTC_E_SCENARIO_THROTTLED Handle = 0x87C5103B + UTC_E_API_NOT_SUPPORTED Handle = 0x87C5103C + UTC_E_GETFILE_EXTERNAL_PATH_NOT_APPROVED Handle = 0x87C5103D + UTC_E_TRY_GET_SCENARIO_TIMEOUT_EXCEEDED Handle = 0x87C5103E + UTC_E_CERT_REV_FAILED Handle = 0x87C5103F + UTC_E_FAILED_TO_START_NDISCAP Handle = 0x87C51040 + UTC_E_KERNELDUMP_LIMIT_REACHED Handle = 0x87C51041 + UTC_E_MISSING_AGGREGATE_EVENT_TAG Handle = 0x87C51042 + UTC_E_INVALID_AGGREGATION_STRUCT Handle = 0x87C51043 + UTC_E_ACTION_NOT_SUPPORTED_IN_DESTINATION Handle = 0x87C51044 + UTC_E_FILTER_MISSING_ATTRIBUTE Handle = 0x87C51045 + UTC_E_FILTER_INVALID_TYPE Handle = 0x87C51046 + UTC_E_FILTER_VARIABLE_NOT_FOUND Handle = 0x87C51047 + UTC_E_FILTER_FUNCTION_RESTRICTED Handle = 0x87C51048 + UTC_E_FILTER_VERSION_MISMATCH Handle = 0x87C51049 + UTC_E_FILTER_INVALID_FUNCTION Handle = 0x87C51050 + UTC_E_FILTER_INVALID_FUNCTION_PARAMS Handle = 0x87C51051 + UTC_E_FILTER_INVALID_COMMAND Handle = 0x87C51052 + UTC_E_FILTER_ILLEGAL_EVAL Handle = 0x87C51053 + UTC_E_TTTRACER_RETURNED_ERROR Handle = 0x87C51054 + UTC_E_AGENT_DIAGNOSTICS_TOO_LARGE Handle = 0x87C51055 + UTC_E_FAILED_TO_RECEIVE_AGENT_DIAGNOSTICS Handle = 0x87C51056 + UTC_E_SCENARIO_HAS_NO_ACTIONS Handle = 0x87C51057 + UTC_E_TTTRACER_STORAGE_FULL Handle = 0x87C51058 + UTC_E_INSUFFICIENT_SPACE_TO_START_TRACE Handle = 0x87C51059 + UTC_E_ESCALATION_CANCELLED_AT_SHUTDOWN Handle = 0x87C5105A + UTC_E_GETFILEINFOACTION_FILE_NOT_APPROVED Handle = 0x87C5105B + UTC_E_SETREGKEYACTION_TYPE_NOT_APPROVED Handle = 0x87C5105C + WINML_ERR_INVALID_DEVICE Handle = 0x88900001 + WINML_ERR_INVALID_BINDING Handle = 0x88900002 + WINML_ERR_VALUE_NOTFOUND Handle = 0x88900003 + WINML_ERR_SIZE_MISMATCH Handle = 0x88900004 + STATUS_WAIT_0 NTStatus = 0x00000000 + STATUS_SUCCESS NTStatus = 0x00000000 + STATUS_WAIT_1 NTStatus = 0x00000001 + STATUS_WAIT_2 NTStatus = 0x00000002 + STATUS_WAIT_3 NTStatus = 0x00000003 + STATUS_WAIT_63 NTStatus = 0x0000003F + STATUS_ABANDONED NTStatus = 0x00000080 + STATUS_ABANDONED_WAIT_0 NTStatus = 0x00000080 + STATUS_ABANDONED_WAIT_63 NTStatus = 0x000000BF + STATUS_USER_APC NTStatus = 0x000000C0 + STATUS_ALREADY_COMPLETE NTStatus = 0x000000FF + STATUS_KERNEL_APC NTStatus = 0x00000100 + STATUS_ALERTED NTStatus = 0x00000101 + STATUS_TIMEOUT NTStatus = 0x00000102 + STATUS_PENDING NTStatus = 0x00000103 + STATUS_REPARSE NTStatus = 0x00000104 + STATUS_MORE_ENTRIES NTStatus = 0x00000105 + STATUS_NOT_ALL_ASSIGNED NTStatus = 0x00000106 + STATUS_SOME_NOT_MAPPED NTStatus = 0x00000107 + STATUS_OPLOCK_BREAK_IN_PROGRESS NTStatus = 0x00000108 + STATUS_VOLUME_MOUNTED NTStatus = 0x00000109 + STATUS_RXACT_COMMITTED NTStatus = 0x0000010A + STATUS_NOTIFY_CLEANUP NTStatus = 0x0000010B + STATUS_NOTIFY_ENUM_DIR NTStatus = 0x0000010C + STATUS_NO_QUOTAS_FOR_ACCOUNT NTStatus = 0x0000010D + STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED NTStatus = 0x0000010E + STATUS_PAGE_FAULT_TRANSITION NTStatus = 0x00000110 + STATUS_PAGE_FAULT_DEMAND_ZERO NTStatus = 0x00000111 + STATUS_PAGE_FAULT_COPY_ON_WRITE NTStatus = 0x00000112 + STATUS_PAGE_FAULT_GUARD_PAGE NTStatus = 0x00000113 + STATUS_PAGE_FAULT_PAGING_FILE NTStatus = 0x00000114 + STATUS_CACHE_PAGE_LOCKED NTStatus = 0x00000115 + STATUS_CRASH_DUMP NTStatus = 0x00000116 + STATUS_BUFFER_ALL_ZEROS NTStatus = 0x00000117 + STATUS_REPARSE_OBJECT NTStatus = 0x00000118 + STATUS_RESOURCE_REQUIREMENTS_CHANGED NTStatus = 0x00000119 + STATUS_TRANSLATION_COMPLETE NTStatus = 0x00000120 + STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY NTStatus = 0x00000121 + STATUS_NOTHING_TO_TERMINATE NTStatus = 0x00000122 + STATUS_PROCESS_NOT_IN_JOB NTStatus = 0x00000123 + STATUS_PROCESS_IN_JOB NTStatus = 0x00000124 + STATUS_VOLSNAP_HIBERNATE_READY NTStatus = 0x00000125 + STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY NTStatus = 0x00000126 + STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED NTStatus = 0x00000127 + STATUS_INTERRUPT_STILL_CONNECTED NTStatus = 0x00000128 + STATUS_PROCESS_CLONED NTStatus = 0x00000129 + STATUS_FILE_LOCKED_WITH_ONLY_READERS NTStatus = 0x0000012A + STATUS_FILE_LOCKED_WITH_WRITERS NTStatus = 0x0000012B + STATUS_VALID_IMAGE_HASH NTStatus = 0x0000012C + STATUS_VALID_CATALOG_HASH NTStatus = 0x0000012D + STATUS_VALID_STRONG_CODE_HASH NTStatus = 0x0000012E + STATUS_GHOSTED NTStatus = 0x0000012F + STATUS_DATA_OVERWRITTEN NTStatus = 0x00000130 + STATUS_RESOURCEMANAGER_READ_ONLY NTStatus = 0x00000202 + STATUS_RING_PREVIOUSLY_EMPTY NTStatus = 0x00000210 + STATUS_RING_PREVIOUSLY_FULL NTStatus = 0x00000211 + STATUS_RING_PREVIOUSLY_ABOVE_QUOTA NTStatus = 0x00000212 + STATUS_RING_NEWLY_EMPTY NTStatus = 0x00000213 + STATUS_RING_SIGNAL_OPPOSITE_ENDPOINT NTStatus = 0x00000214 + STATUS_OPLOCK_SWITCHED_TO_NEW_HANDLE NTStatus = 0x00000215 + STATUS_OPLOCK_HANDLE_CLOSED NTStatus = 0x00000216 + STATUS_WAIT_FOR_OPLOCK NTStatus = 0x00000367 + STATUS_REPARSE_GLOBAL NTStatus = 0x00000368 + STATUS_FLT_IO_COMPLETE NTStatus = 0x001C0001 + STATUS_OBJECT_NAME_EXISTS NTStatus = 0x40000000 + STATUS_THREAD_WAS_SUSPENDED NTStatus = 0x40000001 + STATUS_WORKING_SET_LIMIT_RANGE NTStatus = 0x40000002 + STATUS_IMAGE_NOT_AT_BASE NTStatus = 0x40000003 + STATUS_RXACT_STATE_CREATED NTStatus = 0x40000004 + STATUS_SEGMENT_NOTIFICATION NTStatus = 0x40000005 + STATUS_LOCAL_USER_SESSION_KEY NTStatus = 0x40000006 + STATUS_BAD_CURRENT_DIRECTORY NTStatus = 0x40000007 + STATUS_SERIAL_MORE_WRITES NTStatus = 0x40000008 + STATUS_REGISTRY_RECOVERED NTStatus = 0x40000009 + STATUS_FT_READ_RECOVERY_FROM_BACKUP NTStatus = 0x4000000A + STATUS_FT_WRITE_RECOVERY NTStatus = 0x4000000B + STATUS_SERIAL_COUNTER_TIMEOUT NTStatus = 0x4000000C + STATUS_NULL_LM_PASSWORD NTStatus = 0x4000000D + STATUS_IMAGE_MACHINE_TYPE_MISMATCH NTStatus = 0x4000000E + STATUS_RECEIVE_PARTIAL NTStatus = 0x4000000F + STATUS_RECEIVE_EXPEDITED NTStatus = 0x40000010 + STATUS_RECEIVE_PARTIAL_EXPEDITED NTStatus = 0x40000011 + STATUS_EVENT_DONE NTStatus = 0x40000012 + STATUS_EVENT_PENDING NTStatus = 0x40000013 + STATUS_CHECKING_FILE_SYSTEM NTStatus = 0x40000014 + STATUS_FATAL_APP_EXIT NTStatus = 0x40000015 + STATUS_PREDEFINED_HANDLE NTStatus = 0x40000016 + STATUS_WAS_UNLOCKED NTStatus = 0x40000017 + STATUS_SERVICE_NOTIFICATION NTStatus = 0x40000018 + STATUS_WAS_LOCKED NTStatus = 0x40000019 + STATUS_LOG_HARD_ERROR NTStatus = 0x4000001A + STATUS_ALREADY_WIN32 NTStatus = 0x4000001B + STATUS_WX86_UNSIMULATE NTStatus = 0x4000001C + STATUS_WX86_CONTINUE NTStatus = 0x4000001D + STATUS_WX86_SINGLE_STEP NTStatus = 0x4000001E + STATUS_WX86_BREAKPOINT NTStatus = 0x4000001F + STATUS_WX86_EXCEPTION_CONTINUE NTStatus = 0x40000020 + STATUS_WX86_EXCEPTION_LASTCHANCE NTStatus = 0x40000021 + STATUS_WX86_EXCEPTION_CHAIN NTStatus = 0x40000022 + STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE NTStatus = 0x40000023 + STATUS_NO_YIELD_PERFORMED NTStatus = 0x40000024 + STATUS_TIMER_RESUME_IGNORED NTStatus = 0x40000025 + STATUS_ARBITRATION_UNHANDLED NTStatus = 0x40000026 + STATUS_CARDBUS_NOT_SUPPORTED NTStatus = 0x40000027 + STATUS_WX86_CREATEWX86TIB NTStatus = 0x40000028 + STATUS_MP_PROCESSOR_MISMATCH NTStatus = 0x40000029 + STATUS_HIBERNATED NTStatus = 0x4000002A + STATUS_RESUME_HIBERNATION NTStatus = 0x4000002B + STATUS_FIRMWARE_UPDATED NTStatus = 0x4000002C + STATUS_DRIVERS_LEAKING_LOCKED_PAGES NTStatus = 0x4000002D + STATUS_MESSAGE_RETRIEVED NTStatus = 0x4000002E + STATUS_SYSTEM_POWERSTATE_TRANSITION NTStatus = 0x4000002F + STATUS_ALPC_CHECK_COMPLETION_LIST NTStatus = 0x40000030 + STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION NTStatus = 0x40000031 + STATUS_ACCESS_AUDIT_BY_POLICY NTStatus = 0x40000032 + STATUS_ABANDON_HIBERFILE NTStatus = 0x40000033 + STATUS_BIZRULES_NOT_ENABLED NTStatus = 0x40000034 + STATUS_FT_READ_FROM_COPY NTStatus = 0x40000035 + STATUS_IMAGE_AT_DIFFERENT_BASE NTStatus = 0x40000036 + STATUS_PATCH_DEFERRED NTStatus = 0x40000037 + STATUS_HEURISTIC_DAMAGE_POSSIBLE NTStatus = 0x40190001 + STATUS_GUARD_PAGE_VIOLATION NTStatus = 0x80000001 + STATUS_DATATYPE_MISALIGNMENT NTStatus = 0x80000002 + STATUS_BREAKPOINT NTStatus = 0x80000003 + STATUS_SINGLE_STEP NTStatus = 0x80000004 + STATUS_BUFFER_OVERFLOW NTStatus = 0x80000005 + STATUS_NO_MORE_FILES NTStatus = 0x80000006 + STATUS_WAKE_SYSTEM_DEBUGGER NTStatus = 0x80000007 + STATUS_HANDLES_CLOSED NTStatus = 0x8000000A + STATUS_NO_INHERITANCE NTStatus = 0x8000000B + STATUS_GUID_SUBSTITUTION_MADE NTStatus = 0x8000000C + STATUS_PARTIAL_COPY NTStatus = 0x8000000D + STATUS_DEVICE_PAPER_EMPTY NTStatus = 0x8000000E + STATUS_DEVICE_POWERED_OFF NTStatus = 0x8000000F + STATUS_DEVICE_OFF_LINE NTStatus = 0x80000010 + STATUS_DEVICE_BUSY NTStatus = 0x80000011 + STATUS_NO_MORE_EAS NTStatus = 0x80000012 + STATUS_INVALID_EA_NAME NTStatus = 0x80000013 + STATUS_EA_LIST_INCONSISTENT NTStatus = 0x80000014 + STATUS_INVALID_EA_FLAG NTStatus = 0x80000015 + STATUS_VERIFY_REQUIRED NTStatus = 0x80000016 + STATUS_EXTRANEOUS_INFORMATION NTStatus = 0x80000017 + STATUS_RXACT_COMMIT_NECESSARY NTStatus = 0x80000018 + STATUS_NO_MORE_ENTRIES NTStatus = 0x8000001A + STATUS_FILEMARK_DETECTED NTStatus = 0x8000001B + STATUS_MEDIA_CHANGED NTStatus = 0x8000001C + STATUS_BUS_RESET NTStatus = 0x8000001D + STATUS_END_OF_MEDIA NTStatus = 0x8000001E + STATUS_BEGINNING_OF_MEDIA NTStatus = 0x8000001F + STATUS_MEDIA_CHECK NTStatus = 0x80000020 + STATUS_SETMARK_DETECTED NTStatus = 0x80000021 + STATUS_NO_DATA_DETECTED NTStatus = 0x80000022 + STATUS_REDIRECTOR_HAS_OPEN_HANDLES NTStatus = 0x80000023 + STATUS_SERVER_HAS_OPEN_HANDLES NTStatus = 0x80000024 + STATUS_ALREADY_DISCONNECTED NTStatus = 0x80000025 + STATUS_LONGJUMP NTStatus = 0x80000026 + STATUS_CLEANER_CARTRIDGE_INSTALLED NTStatus = 0x80000027 + STATUS_PLUGPLAY_QUERY_VETOED NTStatus = 0x80000028 + STATUS_UNWIND_CONSOLIDATE NTStatus = 0x80000029 + STATUS_REGISTRY_HIVE_RECOVERED NTStatus = 0x8000002A + STATUS_DLL_MIGHT_BE_INSECURE NTStatus = 0x8000002B + STATUS_DLL_MIGHT_BE_INCOMPATIBLE NTStatus = 0x8000002C + STATUS_STOPPED_ON_SYMLINK NTStatus = 0x8000002D + STATUS_CANNOT_GRANT_REQUESTED_OPLOCK NTStatus = 0x8000002E + STATUS_NO_ACE_CONDITION NTStatus = 0x8000002F + STATUS_DEVICE_SUPPORT_IN_PROGRESS NTStatus = 0x80000030 + STATUS_DEVICE_POWER_CYCLE_REQUIRED NTStatus = 0x80000031 + STATUS_NO_WORK_DONE NTStatus = 0x80000032 + STATUS_CLUSTER_NODE_ALREADY_UP NTStatus = 0x80130001 + STATUS_CLUSTER_NODE_ALREADY_DOWN NTStatus = 0x80130002 + STATUS_CLUSTER_NETWORK_ALREADY_ONLINE NTStatus = 0x80130003 + STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE NTStatus = 0x80130004 + STATUS_CLUSTER_NODE_ALREADY_MEMBER NTStatus = 0x80130005 + STATUS_FLT_BUFFER_TOO_SMALL NTStatus = 0x801C0001 + STATUS_FVE_PARTIAL_METADATA NTStatus = 0x80210001 + STATUS_FVE_TRANSIENT_STATE NTStatus = 0x80210002 + STATUS_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH NTStatus = 0x8000CF00 + STATUS_UNSUCCESSFUL NTStatus = 0xC0000001 + STATUS_NOT_IMPLEMENTED NTStatus = 0xC0000002 + STATUS_INVALID_INFO_CLASS NTStatus = 0xC0000003 + STATUS_INFO_LENGTH_MISMATCH NTStatus = 0xC0000004 + STATUS_ACCESS_VIOLATION NTStatus = 0xC0000005 + STATUS_IN_PAGE_ERROR NTStatus = 0xC0000006 + STATUS_PAGEFILE_QUOTA NTStatus = 0xC0000007 + STATUS_INVALID_HANDLE NTStatus = 0xC0000008 + STATUS_BAD_INITIAL_STACK NTStatus = 0xC0000009 + STATUS_BAD_INITIAL_PC NTStatus = 0xC000000A + STATUS_INVALID_CID NTStatus = 0xC000000B + STATUS_TIMER_NOT_CANCELED NTStatus = 0xC000000C + STATUS_INVALID_PARAMETER NTStatus = 0xC000000D + STATUS_NO_SUCH_DEVICE NTStatus = 0xC000000E + STATUS_NO_SUCH_FILE NTStatus = 0xC000000F + STATUS_INVALID_DEVICE_REQUEST NTStatus = 0xC0000010 + STATUS_END_OF_FILE NTStatus = 0xC0000011 + STATUS_WRONG_VOLUME NTStatus = 0xC0000012 + STATUS_NO_MEDIA_IN_DEVICE NTStatus = 0xC0000013 + STATUS_UNRECOGNIZED_MEDIA NTStatus = 0xC0000014 + STATUS_NONEXISTENT_SECTOR NTStatus = 0xC0000015 + STATUS_MORE_PROCESSING_REQUIRED NTStatus = 0xC0000016 + STATUS_NO_MEMORY NTStatus = 0xC0000017 + STATUS_CONFLICTING_ADDRESSES NTStatus = 0xC0000018 + STATUS_NOT_MAPPED_VIEW NTStatus = 0xC0000019 + STATUS_UNABLE_TO_FREE_VM NTStatus = 0xC000001A + STATUS_UNABLE_TO_DELETE_SECTION NTStatus = 0xC000001B + STATUS_INVALID_SYSTEM_SERVICE NTStatus = 0xC000001C + STATUS_ILLEGAL_INSTRUCTION NTStatus = 0xC000001D + STATUS_INVALID_LOCK_SEQUENCE NTStatus = 0xC000001E + STATUS_INVALID_VIEW_SIZE NTStatus = 0xC000001F + STATUS_INVALID_FILE_FOR_SECTION NTStatus = 0xC0000020 + STATUS_ALREADY_COMMITTED NTStatus = 0xC0000021 + STATUS_ACCESS_DENIED NTStatus = 0xC0000022 + STATUS_BUFFER_TOO_SMALL NTStatus = 0xC0000023 + STATUS_OBJECT_TYPE_MISMATCH NTStatus = 0xC0000024 + STATUS_NONCONTINUABLE_EXCEPTION NTStatus = 0xC0000025 + STATUS_INVALID_DISPOSITION NTStatus = 0xC0000026 + STATUS_UNWIND NTStatus = 0xC0000027 + STATUS_BAD_STACK NTStatus = 0xC0000028 + STATUS_INVALID_UNWIND_TARGET NTStatus = 0xC0000029 + STATUS_NOT_LOCKED NTStatus = 0xC000002A + STATUS_PARITY_ERROR NTStatus = 0xC000002B + STATUS_UNABLE_TO_DECOMMIT_VM NTStatus = 0xC000002C + STATUS_NOT_COMMITTED NTStatus = 0xC000002D + STATUS_INVALID_PORT_ATTRIBUTES NTStatus = 0xC000002E + STATUS_PORT_MESSAGE_TOO_LONG NTStatus = 0xC000002F + STATUS_INVALID_PARAMETER_MIX NTStatus = 0xC0000030 + STATUS_INVALID_QUOTA_LOWER NTStatus = 0xC0000031 + STATUS_DISK_CORRUPT_ERROR NTStatus = 0xC0000032 + STATUS_OBJECT_NAME_INVALID NTStatus = 0xC0000033 + STATUS_OBJECT_NAME_NOT_FOUND NTStatus = 0xC0000034 + STATUS_OBJECT_NAME_COLLISION NTStatus = 0xC0000035 + STATUS_PORT_DO_NOT_DISTURB NTStatus = 0xC0000036 + STATUS_PORT_DISCONNECTED NTStatus = 0xC0000037 + STATUS_DEVICE_ALREADY_ATTACHED NTStatus = 0xC0000038 + STATUS_OBJECT_PATH_INVALID NTStatus = 0xC0000039 + STATUS_OBJECT_PATH_NOT_FOUND NTStatus = 0xC000003A + STATUS_OBJECT_PATH_SYNTAX_BAD NTStatus = 0xC000003B + STATUS_DATA_OVERRUN NTStatus = 0xC000003C + STATUS_DATA_LATE_ERROR NTStatus = 0xC000003D + STATUS_DATA_ERROR NTStatus = 0xC000003E + STATUS_CRC_ERROR NTStatus = 0xC000003F + STATUS_SECTION_TOO_BIG NTStatus = 0xC0000040 + STATUS_PORT_CONNECTION_REFUSED NTStatus = 0xC0000041 + STATUS_INVALID_PORT_HANDLE NTStatus = 0xC0000042 + STATUS_SHARING_VIOLATION NTStatus = 0xC0000043 + STATUS_QUOTA_EXCEEDED NTStatus = 0xC0000044 + STATUS_INVALID_PAGE_PROTECTION NTStatus = 0xC0000045 + STATUS_MUTANT_NOT_OWNED NTStatus = 0xC0000046 + STATUS_SEMAPHORE_LIMIT_EXCEEDED NTStatus = 0xC0000047 + STATUS_PORT_ALREADY_SET NTStatus = 0xC0000048 + STATUS_SECTION_NOT_IMAGE NTStatus = 0xC0000049 + STATUS_SUSPEND_COUNT_EXCEEDED NTStatus = 0xC000004A + STATUS_THREAD_IS_TERMINATING NTStatus = 0xC000004B + STATUS_BAD_WORKING_SET_LIMIT NTStatus = 0xC000004C + STATUS_INCOMPATIBLE_FILE_MAP NTStatus = 0xC000004D + STATUS_SECTION_PROTECTION NTStatus = 0xC000004E + STATUS_EAS_NOT_SUPPORTED NTStatus = 0xC000004F + STATUS_EA_TOO_LARGE NTStatus = 0xC0000050 + STATUS_NONEXISTENT_EA_ENTRY NTStatus = 0xC0000051 + STATUS_NO_EAS_ON_FILE NTStatus = 0xC0000052 + STATUS_EA_CORRUPT_ERROR NTStatus = 0xC0000053 + STATUS_FILE_LOCK_CONFLICT NTStatus = 0xC0000054 + STATUS_LOCK_NOT_GRANTED NTStatus = 0xC0000055 + STATUS_DELETE_PENDING NTStatus = 0xC0000056 + STATUS_CTL_FILE_NOT_SUPPORTED NTStatus = 0xC0000057 + STATUS_UNKNOWN_REVISION NTStatus = 0xC0000058 + STATUS_REVISION_MISMATCH NTStatus = 0xC0000059 + STATUS_INVALID_OWNER NTStatus = 0xC000005A + STATUS_INVALID_PRIMARY_GROUP NTStatus = 0xC000005B + STATUS_NO_IMPERSONATION_TOKEN NTStatus = 0xC000005C + STATUS_CANT_DISABLE_MANDATORY NTStatus = 0xC000005D + STATUS_NO_LOGON_SERVERS NTStatus = 0xC000005E + STATUS_NO_SUCH_LOGON_SESSION NTStatus = 0xC000005F + STATUS_NO_SUCH_PRIVILEGE NTStatus = 0xC0000060 + STATUS_PRIVILEGE_NOT_HELD NTStatus = 0xC0000061 + STATUS_INVALID_ACCOUNT_NAME NTStatus = 0xC0000062 + STATUS_USER_EXISTS NTStatus = 0xC0000063 + STATUS_NO_SUCH_USER NTStatus = 0xC0000064 + STATUS_GROUP_EXISTS NTStatus = 0xC0000065 + STATUS_NO_SUCH_GROUP NTStatus = 0xC0000066 + STATUS_MEMBER_IN_GROUP NTStatus = 0xC0000067 + STATUS_MEMBER_NOT_IN_GROUP NTStatus = 0xC0000068 + STATUS_LAST_ADMIN NTStatus = 0xC0000069 + STATUS_WRONG_PASSWORD NTStatus = 0xC000006A + STATUS_ILL_FORMED_PASSWORD NTStatus = 0xC000006B + STATUS_PASSWORD_RESTRICTION NTStatus = 0xC000006C + STATUS_LOGON_FAILURE NTStatus = 0xC000006D + STATUS_ACCOUNT_RESTRICTION NTStatus = 0xC000006E + STATUS_INVALID_LOGON_HOURS NTStatus = 0xC000006F + STATUS_INVALID_WORKSTATION NTStatus = 0xC0000070 + STATUS_PASSWORD_EXPIRED NTStatus = 0xC0000071 + STATUS_ACCOUNT_DISABLED NTStatus = 0xC0000072 + STATUS_NONE_MAPPED NTStatus = 0xC0000073 + STATUS_TOO_MANY_LUIDS_REQUESTED NTStatus = 0xC0000074 + STATUS_LUIDS_EXHAUSTED NTStatus = 0xC0000075 + STATUS_INVALID_SUB_AUTHORITY NTStatus = 0xC0000076 + STATUS_INVALID_ACL NTStatus = 0xC0000077 + STATUS_INVALID_SID NTStatus = 0xC0000078 + STATUS_INVALID_SECURITY_DESCR NTStatus = 0xC0000079 + STATUS_PROCEDURE_NOT_FOUND NTStatus = 0xC000007A + STATUS_INVALID_IMAGE_FORMAT NTStatus = 0xC000007B + STATUS_NO_TOKEN NTStatus = 0xC000007C + STATUS_BAD_INHERITANCE_ACL NTStatus = 0xC000007D + STATUS_RANGE_NOT_LOCKED NTStatus = 0xC000007E + STATUS_DISK_FULL NTStatus = 0xC000007F + STATUS_SERVER_DISABLED NTStatus = 0xC0000080 + STATUS_SERVER_NOT_DISABLED NTStatus = 0xC0000081 + STATUS_TOO_MANY_GUIDS_REQUESTED NTStatus = 0xC0000082 + STATUS_GUIDS_EXHAUSTED NTStatus = 0xC0000083 + STATUS_INVALID_ID_AUTHORITY NTStatus = 0xC0000084 + STATUS_AGENTS_EXHAUSTED NTStatus = 0xC0000085 + STATUS_INVALID_VOLUME_LABEL NTStatus = 0xC0000086 + STATUS_SECTION_NOT_EXTENDED NTStatus = 0xC0000087 + STATUS_NOT_MAPPED_DATA NTStatus = 0xC0000088 + STATUS_RESOURCE_DATA_NOT_FOUND NTStatus = 0xC0000089 + STATUS_RESOURCE_TYPE_NOT_FOUND NTStatus = 0xC000008A + STATUS_RESOURCE_NAME_NOT_FOUND NTStatus = 0xC000008B + STATUS_ARRAY_BOUNDS_EXCEEDED NTStatus = 0xC000008C + STATUS_FLOAT_DENORMAL_OPERAND NTStatus = 0xC000008D + STATUS_FLOAT_DIVIDE_BY_ZERO NTStatus = 0xC000008E + STATUS_FLOAT_INEXACT_RESULT NTStatus = 0xC000008F + STATUS_FLOAT_INVALID_OPERATION NTStatus = 0xC0000090 + STATUS_FLOAT_OVERFLOW NTStatus = 0xC0000091 + STATUS_FLOAT_STACK_CHECK NTStatus = 0xC0000092 + STATUS_FLOAT_UNDERFLOW NTStatus = 0xC0000093 + STATUS_INTEGER_DIVIDE_BY_ZERO NTStatus = 0xC0000094 + STATUS_INTEGER_OVERFLOW NTStatus = 0xC0000095 + STATUS_PRIVILEGED_INSTRUCTION NTStatus = 0xC0000096 + STATUS_TOO_MANY_PAGING_FILES NTStatus = 0xC0000097 + STATUS_FILE_INVALID NTStatus = 0xC0000098 + STATUS_ALLOTTED_SPACE_EXCEEDED NTStatus = 0xC0000099 + STATUS_INSUFFICIENT_RESOURCES NTStatus = 0xC000009A + STATUS_DFS_EXIT_PATH_FOUND NTStatus = 0xC000009B + STATUS_DEVICE_DATA_ERROR NTStatus = 0xC000009C + STATUS_DEVICE_NOT_CONNECTED NTStatus = 0xC000009D + STATUS_DEVICE_POWER_FAILURE NTStatus = 0xC000009E + STATUS_FREE_VM_NOT_AT_BASE NTStatus = 0xC000009F + STATUS_MEMORY_NOT_ALLOCATED NTStatus = 0xC00000A0 + STATUS_WORKING_SET_QUOTA NTStatus = 0xC00000A1 + STATUS_MEDIA_WRITE_PROTECTED NTStatus = 0xC00000A2 + STATUS_DEVICE_NOT_READY NTStatus = 0xC00000A3 + STATUS_INVALID_GROUP_ATTRIBUTES NTStatus = 0xC00000A4 + STATUS_BAD_IMPERSONATION_LEVEL NTStatus = 0xC00000A5 + STATUS_CANT_OPEN_ANONYMOUS NTStatus = 0xC00000A6 + STATUS_BAD_VALIDATION_CLASS NTStatus = 0xC00000A7 + STATUS_BAD_TOKEN_TYPE NTStatus = 0xC00000A8 + STATUS_BAD_MASTER_BOOT_RECORD NTStatus = 0xC00000A9 + STATUS_INSTRUCTION_MISALIGNMENT NTStatus = 0xC00000AA + STATUS_INSTANCE_NOT_AVAILABLE NTStatus = 0xC00000AB + STATUS_PIPE_NOT_AVAILABLE NTStatus = 0xC00000AC + STATUS_INVALID_PIPE_STATE NTStatus = 0xC00000AD + STATUS_PIPE_BUSY NTStatus = 0xC00000AE + STATUS_ILLEGAL_FUNCTION NTStatus = 0xC00000AF + STATUS_PIPE_DISCONNECTED NTStatus = 0xC00000B0 + STATUS_PIPE_CLOSING NTStatus = 0xC00000B1 + STATUS_PIPE_CONNECTED NTStatus = 0xC00000B2 + STATUS_PIPE_LISTENING NTStatus = 0xC00000B3 + STATUS_INVALID_READ_MODE NTStatus = 0xC00000B4 + STATUS_IO_TIMEOUT NTStatus = 0xC00000B5 + STATUS_FILE_FORCED_CLOSED NTStatus = 0xC00000B6 + STATUS_PROFILING_NOT_STARTED NTStatus = 0xC00000B7 + STATUS_PROFILING_NOT_STOPPED NTStatus = 0xC00000B8 + STATUS_COULD_NOT_INTERPRET NTStatus = 0xC00000B9 + STATUS_FILE_IS_A_DIRECTORY NTStatus = 0xC00000BA + STATUS_NOT_SUPPORTED NTStatus = 0xC00000BB + STATUS_REMOTE_NOT_LISTENING NTStatus = 0xC00000BC + STATUS_DUPLICATE_NAME NTStatus = 0xC00000BD + STATUS_BAD_NETWORK_PATH NTStatus = 0xC00000BE + STATUS_NETWORK_BUSY NTStatus = 0xC00000BF + STATUS_DEVICE_DOES_NOT_EXIST NTStatus = 0xC00000C0 + STATUS_TOO_MANY_COMMANDS NTStatus = 0xC00000C1 + STATUS_ADAPTER_HARDWARE_ERROR NTStatus = 0xC00000C2 + STATUS_INVALID_NETWORK_RESPONSE NTStatus = 0xC00000C3 + STATUS_UNEXPECTED_NETWORK_ERROR NTStatus = 0xC00000C4 + STATUS_BAD_REMOTE_ADAPTER NTStatus = 0xC00000C5 + STATUS_PRINT_QUEUE_FULL NTStatus = 0xC00000C6 + STATUS_NO_SPOOL_SPACE NTStatus = 0xC00000C7 + STATUS_PRINT_CANCELLED NTStatus = 0xC00000C8 + STATUS_NETWORK_NAME_DELETED NTStatus = 0xC00000C9 + STATUS_NETWORK_ACCESS_DENIED NTStatus = 0xC00000CA + STATUS_BAD_DEVICE_TYPE NTStatus = 0xC00000CB + STATUS_BAD_NETWORK_NAME NTStatus = 0xC00000CC + STATUS_TOO_MANY_NAMES NTStatus = 0xC00000CD + STATUS_TOO_MANY_SESSIONS NTStatus = 0xC00000CE + STATUS_SHARING_PAUSED NTStatus = 0xC00000CF + STATUS_REQUEST_NOT_ACCEPTED NTStatus = 0xC00000D0 + STATUS_REDIRECTOR_PAUSED NTStatus = 0xC00000D1 + STATUS_NET_WRITE_FAULT NTStatus = 0xC00000D2 + STATUS_PROFILING_AT_LIMIT NTStatus = 0xC00000D3 + STATUS_NOT_SAME_DEVICE NTStatus = 0xC00000D4 + STATUS_FILE_RENAMED NTStatus = 0xC00000D5 + STATUS_VIRTUAL_CIRCUIT_CLOSED NTStatus = 0xC00000D6 + STATUS_NO_SECURITY_ON_OBJECT NTStatus = 0xC00000D7 + STATUS_CANT_WAIT NTStatus = 0xC00000D8 + STATUS_PIPE_EMPTY NTStatus = 0xC00000D9 + STATUS_CANT_ACCESS_DOMAIN_INFO NTStatus = 0xC00000DA + STATUS_CANT_TERMINATE_SELF NTStatus = 0xC00000DB + STATUS_INVALID_SERVER_STATE NTStatus = 0xC00000DC + STATUS_INVALID_DOMAIN_STATE NTStatus = 0xC00000DD + STATUS_INVALID_DOMAIN_ROLE NTStatus = 0xC00000DE + STATUS_NO_SUCH_DOMAIN NTStatus = 0xC00000DF + STATUS_DOMAIN_EXISTS NTStatus = 0xC00000E0 + STATUS_DOMAIN_LIMIT_EXCEEDED NTStatus = 0xC00000E1 + STATUS_OPLOCK_NOT_GRANTED NTStatus = 0xC00000E2 + STATUS_INVALID_OPLOCK_PROTOCOL NTStatus = 0xC00000E3 + STATUS_INTERNAL_DB_CORRUPTION NTStatus = 0xC00000E4 + STATUS_INTERNAL_ERROR NTStatus = 0xC00000E5 + STATUS_GENERIC_NOT_MAPPED NTStatus = 0xC00000E6 + STATUS_BAD_DESCRIPTOR_FORMAT NTStatus = 0xC00000E7 + STATUS_INVALID_USER_BUFFER NTStatus = 0xC00000E8 + STATUS_UNEXPECTED_IO_ERROR NTStatus = 0xC00000E9 + STATUS_UNEXPECTED_MM_CREATE_ERR NTStatus = 0xC00000EA + STATUS_UNEXPECTED_MM_MAP_ERROR NTStatus = 0xC00000EB + STATUS_UNEXPECTED_MM_EXTEND_ERR NTStatus = 0xC00000EC + STATUS_NOT_LOGON_PROCESS NTStatus = 0xC00000ED + STATUS_LOGON_SESSION_EXISTS NTStatus = 0xC00000EE + STATUS_INVALID_PARAMETER_1 NTStatus = 0xC00000EF + STATUS_INVALID_PARAMETER_2 NTStatus = 0xC00000F0 + STATUS_INVALID_PARAMETER_3 NTStatus = 0xC00000F1 + STATUS_INVALID_PARAMETER_4 NTStatus = 0xC00000F2 + STATUS_INVALID_PARAMETER_5 NTStatus = 0xC00000F3 + STATUS_INVALID_PARAMETER_6 NTStatus = 0xC00000F4 + STATUS_INVALID_PARAMETER_7 NTStatus = 0xC00000F5 + STATUS_INVALID_PARAMETER_8 NTStatus = 0xC00000F6 + STATUS_INVALID_PARAMETER_9 NTStatus = 0xC00000F7 + STATUS_INVALID_PARAMETER_10 NTStatus = 0xC00000F8 + STATUS_INVALID_PARAMETER_11 NTStatus = 0xC00000F9 + STATUS_INVALID_PARAMETER_12 NTStatus = 0xC00000FA + STATUS_REDIRECTOR_NOT_STARTED NTStatus = 0xC00000FB + STATUS_REDIRECTOR_STARTED NTStatus = 0xC00000FC + STATUS_STACK_OVERFLOW NTStatus = 0xC00000FD + STATUS_NO_SUCH_PACKAGE NTStatus = 0xC00000FE + STATUS_BAD_FUNCTION_TABLE NTStatus = 0xC00000FF + STATUS_VARIABLE_NOT_FOUND NTStatus = 0xC0000100 + STATUS_DIRECTORY_NOT_EMPTY NTStatus = 0xC0000101 + STATUS_FILE_CORRUPT_ERROR NTStatus = 0xC0000102 + STATUS_NOT_A_DIRECTORY NTStatus = 0xC0000103 + STATUS_BAD_LOGON_SESSION_STATE NTStatus = 0xC0000104 + STATUS_LOGON_SESSION_COLLISION NTStatus = 0xC0000105 + STATUS_NAME_TOO_LONG NTStatus = 0xC0000106 + STATUS_FILES_OPEN NTStatus = 0xC0000107 + STATUS_CONNECTION_IN_USE NTStatus = 0xC0000108 + STATUS_MESSAGE_NOT_FOUND NTStatus = 0xC0000109 + STATUS_PROCESS_IS_TERMINATING NTStatus = 0xC000010A + STATUS_INVALID_LOGON_TYPE NTStatus = 0xC000010B + STATUS_NO_GUID_TRANSLATION NTStatus = 0xC000010C + STATUS_CANNOT_IMPERSONATE NTStatus = 0xC000010D + STATUS_IMAGE_ALREADY_LOADED NTStatus = 0xC000010E + STATUS_ABIOS_NOT_PRESENT NTStatus = 0xC000010F + STATUS_ABIOS_LID_NOT_EXIST NTStatus = 0xC0000110 + STATUS_ABIOS_LID_ALREADY_OWNED NTStatus = 0xC0000111 + STATUS_ABIOS_NOT_LID_OWNER NTStatus = 0xC0000112 + STATUS_ABIOS_INVALID_COMMAND NTStatus = 0xC0000113 + STATUS_ABIOS_INVALID_LID NTStatus = 0xC0000114 + STATUS_ABIOS_SELECTOR_NOT_AVAILABLE NTStatus = 0xC0000115 + STATUS_ABIOS_INVALID_SELECTOR NTStatus = 0xC0000116 + STATUS_NO_LDT NTStatus = 0xC0000117 + STATUS_INVALID_LDT_SIZE NTStatus = 0xC0000118 + STATUS_INVALID_LDT_OFFSET NTStatus = 0xC0000119 + STATUS_INVALID_LDT_DESCRIPTOR NTStatus = 0xC000011A + STATUS_INVALID_IMAGE_NE_FORMAT NTStatus = 0xC000011B + STATUS_RXACT_INVALID_STATE NTStatus = 0xC000011C + STATUS_RXACT_COMMIT_FAILURE NTStatus = 0xC000011D + STATUS_MAPPED_FILE_SIZE_ZERO NTStatus = 0xC000011E + STATUS_TOO_MANY_OPENED_FILES NTStatus = 0xC000011F + STATUS_CANCELLED NTStatus = 0xC0000120 + STATUS_CANNOT_DELETE NTStatus = 0xC0000121 + STATUS_INVALID_COMPUTER_NAME NTStatus = 0xC0000122 + STATUS_FILE_DELETED NTStatus = 0xC0000123 + STATUS_SPECIAL_ACCOUNT NTStatus = 0xC0000124 + STATUS_SPECIAL_GROUP NTStatus = 0xC0000125 + STATUS_SPECIAL_USER NTStatus = 0xC0000126 + STATUS_MEMBERS_PRIMARY_GROUP NTStatus = 0xC0000127 + STATUS_FILE_CLOSED NTStatus = 0xC0000128 + STATUS_TOO_MANY_THREADS NTStatus = 0xC0000129 + STATUS_THREAD_NOT_IN_PROCESS NTStatus = 0xC000012A + STATUS_TOKEN_ALREADY_IN_USE NTStatus = 0xC000012B + STATUS_PAGEFILE_QUOTA_EXCEEDED NTStatus = 0xC000012C + STATUS_COMMITMENT_LIMIT NTStatus = 0xC000012D + STATUS_INVALID_IMAGE_LE_FORMAT NTStatus = 0xC000012E + STATUS_INVALID_IMAGE_NOT_MZ NTStatus = 0xC000012F + STATUS_INVALID_IMAGE_PROTECT NTStatus = 0xC0000130 + STATUS_INVALID_IMAGE_WIN_16 NTStatus = 0xC0000131 + STATUS_LOGON_SERVER_CONFLICT NTStatus = 0xC0000132 + STATUS_TIME_DIFFERENCE_AT_DC NTStatus = 0xC0000133 + STATUS_SYNCHRONIZATION_REQUIRED NTStatus = 0xC0000134 + STATUS_DLL_NOT_FOUND NTStatus = 0xC0000135 + STATUS_OPEN_FAILED NTStatus = 0xC0000136 + STATUS_IO_PRIVILEGE_FAILED NTStatus = 0xC0000137 + STATUS_ORDINAL_NOT_FOUND NTStatus = 0xC0000138 + STATUS_ENTRYPOINT_NOT_FOUND NTStatus = 0xC0000139 + STATUS_CONTROL_C_EXIT NTStatus = 0xC000013A + STATUS_LOCAL_DISCONNECT NTStatus = 0xC000013B + STATUS_REMOTE_DISCONNECT NTStatus = 0xC000013C + STATUS_REMOTE_RESOURCES NTStatus = 0xC000013D + STATUS_LINK_FAILED NTStatus = 0xC000013E + STATUS_LINK_TIMEOUT NTStatus = 0xC000013F + STATUS_INVALID_CONNECTION NTStatus = 0xC0000140 + STATUS_INVALID_ADDRESS NTStatus = 0xC0000141 + STATUS_DLL_INIT_FAILED NTStatus = 0xC0000142 + STATUS_MISSING_SYSTEMFILE NTStatus = 0xC0000143 + STATUS_UNHANDLED_EXCEPTION NTStatus = 0xC0000144 + STATUS_APP_INIT_FAILURE NTStatus = 0xC0000145 + STATUS_PAGEFILE_CREATE_FAILED NTStatus = 0xC0000146 + STATUS_NO_PAGEFILE NTStatus = 0xC0000147 + STATUS_INVALID_LEVEL NTStatus = 0xC0000148 + STATUS_WRONG_PASSWORD_CORE NTStatus = 0xC0000149 + STATUS_ILLEGAL_FLOAT_CONTEXT NTStatus = 0xC000014A + STATUS_PIPE_BROKEN NTStatus = 0xC000014B + STATUS_REGISTRY_CORRUPT NTStatus = 0xC000014C + STATUS_REGISTRY_IO_FAILED NTStatus = 0xC000014D + STATUS_NO_EVENT_PAIR NTStatus = 0xC000014E + STATUS_UNRECOGNIZED_VOLUME NTStatus = 0xC000014F + STATUS_SERIAL_NO_DEVICE_INITED NTStatus = 0xC0000150 + STATUS_NO_SUCH_ALIAS NTStatus = 0xC0000151 + STATUS_MEMBER_NOT_IN_ALIAS NTStatus = 0xC0000152 + STATUS_MEMBER_IN_ALIAS NTStatus = 0xC0000153 + STATUS_ALIAS_EXISTS NTStatus = 0xC0000154 + STATUS_LOGON_NOT_GRANTED NTStatus = 0xC0000155 + STATUS_TOO_MANY_SECRETS NTStatus = 0xC0000156 + STATUS_SECRET_TOO_LONG NTStatus = 0xC0000157 + STATUS_INTERNAL_DB_ERROR NTStatus = 0xC0000158 + STATUS_FULLSCREEN_MODE NTStatus = 0xC0000159 + STATUS_TOO_MANY_CONTEXT_IDS NTStatus = 0xC000015A + STATUS_LOGON_TYPE_NOT_GRANTED NTStatus = 0xC000015B + STATUS_NOT_REGISTRY_FILE NTStatus = 0xC000015C + STATUS_NT_CROSS_ENCRYPTION_REQUIRED NTStatus = 0xC000015D + STATUS_DOMAIN_CTRLR_CONFIG_ERROR NTStatus = 0xC000015E + STATUS_FT_MISSING_MEMBER NTStatus = 0xC000015F + STATUS_ILL_FORMED_SERVICE_ENTRY NTStatus = 0xC0000160 + STATUS_ILLEGAL_CHARACTER NTStatus = 0xC0000161 + STATUS_UNMAPPABLE_CHARACTER NTStatus = 0xC0000162 + STATUS_UNDEFINED_CHARACTER NTStatus = 0xC0000163 + STATUS_FLOPPY_VOLUME NTStatus = 0xC0000164 + STATUS_FLOPPY_ID_MARK_NOT_FOUND NTStatus = 0xC0000165 + STATUS_FLOPPY_WRONG_CYLINDER NTStatus = 0xC0000166 + STATUS_FLOPPY_UNKNOWN_ERROR NTStatus = 0xC0000167 + STATUS_FLOPPY_BAD_REGISTERS NTStatus = 0xC0000168 + STATUS_DISK_RECALIBRATE_FAILED NTStatus = 0xC0000169 + STATUS_DISK_OPERATION_FAILED NTStatus = 0xC000016A + STATUS_DISK_RESET_FAILED NTStatus = 0xC000016B + STATUS_SHARED_IRQ_BUSY NTStatus = 0xC000016C + STATUS_FT_ORPHANING NTStatus = 0xC000016D + STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT NTStatus = 0xC000016E + STATUS_PARTITION_FAILURE NTStatus = 0xC0000172 + STATUS_INVALID_BLOCK_LENGTH NTStatus = 0xC0000173 + STATUS_DEVICE_NOT_PARTITIONED NTStatus = 0xC0000174 + STATUS_UNABLE_TO_LOCK_MEDIA NTStatus = 0xC0000175 + STATUS_UNABLE_TO_UNLOAD_MEDIA NTStatus = 0xC0000176 + STATUS_EOM_OVERFLOW NTStatus = 0xC0000177 + STATUS_NO_MEDIA NTStatus = 0xC0000178 + STATUS_NO_SUCH_MEMBER NTStatus = 0xC000017A + STATUS_INVALID_MEMBER NTStatus = 0xC000017B + STATUS_KEY_DELETED NTStatus = 0xC000017C + STATUS_NO_LOG_SPACE NTStatus = 0xC000017D + STATUS_TOO_MANY_SIDS NTStatus = 0xC000017E + STATUS_LM_CROSS_ENCRYPTION_REQUIRED NTStatus = 0xC000017F + STATUS_KEY_HAS_CHILDREN NTStatus = 0xC0000180 + STATUS_CHILD_MUST_BE_VOLATILE NTStatus = 0xC0000181 + STATUS_DEVICE_CONFIGURATION_ERROR NTStatus = 0xC0000182 + STATUS_DRIVER_INTERNAL_ERROR NTStatus = 0xC0000183 + STATUS_INVALID_DEVICE_STATE NTStatus = 0xC0000184 + STATUS_IO_DEVICE_ERROR NTStatus = 0xC0000185 + STATUS_DEVICE_PROTOCOL_ERROR NTStatus = 0xC0000186 + STATUS_BACKUP_CONTROLLER NTStatus = 0xC0000187 + STATUS_LOG_FILE_FULL NTStatus = 0xC0000188 + STATUS_TOO_LATE NTStatus = 0xC0000189 + STATUS_NO_TRUST_LSA_SECRET NTStatus = 0xC000018A + STATUS_NO_TRUST_SAM_ACCOUNT NTStatus = 0xC000018B + STATUS_TRUSTED_DOMAIN_FAILURE NTStatus = 0xC000018C + STATUS_TRUSTED_RELATIONSHIP_FAILURE NTStatus = 0xC000018D + STATUS_EVENTLOG_FILE_CORRUPT NTStatus = 0xC000018E + STATUS_EVENTLOG_CANT_START NTStatus = 0xC000018F + STATUS_TRUST_FAILURE NTStatus = 0xC0000190 + STATUS_MUTANT_LIMIT_EXCEEDED NTStatus = 0xC0000191 + STATUS_NETLOGON_NOT_STARTED NTStatus = 0xC0000192 + STATUS_ACCOUNT_EXPIRED NTStatus = 0xC0000193 + STATUS_POSSIBLE_DEADLOCK NTStatus = 0xC0000194 + STATUS_NETWORK_CREDENTIAL_CONFLICT NTStatus = 0xC0000195 + STATUS_REMOTE_SESSION_LIMIT NTStatus = 0xC0000196 + STATUS_EVENTLOG_FILE_CHANGED NTStatus = 0xC0000197 + STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT NTStatus = 0xC0000198 + STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT NTStatus = 0xC0000199 + STATUS_NOLOGON_SERVER_TRUST_ACCOUNT NTStatus = 0xC000019A + STATUS_DOMAIN_TRUST_INCONSISTENT NTStatus = 0xC000019B + STATUS_FS_DRIVER_REQUIRED NTStatus = 0xC000019C + STATUS_IMAGE_ALREADY_LOADED_AS_DLL NTStatus = 0xC000019D + STATUS_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING NTStatus = 0xC000019E + STATUS_SHORT_NAMES_NOT_ENABLED_ON_VOLUME NTStatus = 0xC000019F + STATUS_SECURITY_STREAM_IS_INCONSISTENT NTStatus = 0xC00001A0 + STATUS_INVALID_LOCK_RANGE NTStatus = 0xC00001A1 + STATUS_INVALID_ACE_CONDITION NTStatus = 0xC00001A2 + STATUS_IMAGE_SUBSYSTEM_NOT_PRESENT NTStatus = 0xC00001A3 + STATUS_NOTIFICATION_GUID_ALREADY_DEFINED NTStatus = 0xC00001A4 + STATUS_INVALID_EXCEPTION_HANDLER NTStatus = 0xC00001A5 + STATUS_DUPLICATE_PRIVILEGES NTStatus = 0xC00001A6 + STATUS_NOT_ALLOWED_ON_SYSTEM_FILE NTStatus = 0xC00001A7 + STATUS_REPAIR_NEEDED NTStatus = 0xC00001A8 + STATUS_QUOTA_NOT_ENABLED NTStatus = 0xC00001A9 + STATUS_NO_APPLICATION_PACKAGE NTStatus = 0xC00001AA + STATUS_FILE_METADATA_OPTIMIZATION_IN_PROGRESS NTStatus = 0xC00001AB + STATUS_NOT_SAME_OBJECT NTStatus = 0xC00001AC + STATUS_FATAL_MEMORY_EXHAUSTION NTStatus = 0xC00001AD + STATUS_ERROR_PROCESS_NOT_IN_JOB NTStatus = 0xC00001AE + STATUS_CPU_SET_INVALID NTStatus = 0xC00001AF + STATUS_IO_DEVICE_INVALID_DATA NTStatus = 0xC00001B0 + STATUS_IO_UNALIGNED_WRITE NTStatus = 0xC00001B1 + STATUS_NETWORK_OPEN_RESTRICTION NTStatus = 0xC0000201 + STATUS_NO_USER_SESSION_KEY NTStatus = 0xC0000202 + STATUS_USER_SESSION_DELETED NTStatus = 0xC0000203 + STATUS_RESOURCE_LANG_NOT_FOUND NTStatus = 0xC0000204 + STATUS_INSUFF_SERVER_RESOURCES NTStatus = 0xC0000205 + STATUS_INVALID_BUFFER_SIZE NTStatus = 0xC0000206 + STATUS_INVALID_ADDRESS_COMPONENT NTStatus = 0xC0000207 + STATUS_INVALID_ADDRESS_WILDCARD NTStatus = 0xC0000208 + STATUS_TOO_MANY_ADDRESSES NTStatus = 0xC0000209 + STATUS_ADDRESS_ALREADY_EXISTS NTStatus = 0xC000020A + STATUS_ADDRESS_CLOSED NTStatus = 0xC000020B + STATUS_CONNECTION_DISCONNECTED NTStatus = 0xC000020C + STATUS_CONNECTION_RESET NTStatus = 0xC000020D + STATUS_TOO_MANY_NODES NTStatus = 0xC000020E + STATUS_TRANSACTION_ABORTED NTStatus = 0xC000020F + STATUS_TRANSACTION_TIMED_OUT NTStatus = 0xC0000210 + STATUS_TRANSACTION_NO_RELEASE NTStatus = 0xC0000211 + STATUS_TRANSACTION_NO_MATCH NTStatus = 0xC0000212 + STATUS_TRANSACTION_RESPONDED NTStatus = 0xC0000213 + STATUS_TRANSACTION_INVALID_ID NTStatus = 0xC0000214 + STATUS_TRANSACTION_INVALID_TYPE NTStatus = 0xC0000215 + STATUS_NOT_SERVER_SESSION NTStatus = 0xC0000216 + STATUS_NOT_CLIENT_SESSION NTStatus = 0xC0000217 + STATUS_CANNOT_LOAD_REGISTRY_FILE NTStatus = 0xC0000218 + STATUS_DEBUG_ATTACH_FAILED NTStatus = 0xC0000219 + STATUS_SYSTEM_PROCESS_TERMINATED NTStatus = 0xC000021A + STATUS_DATA_NOT_ACCEPTED NTStatus = 0xC000021B + STATUS_NO_BROWSER_SERVERS_FOUND NTStatus = 0xC000021C + STATUS_VDM_HARD_ERROR NTStatus = 0xC000021D + STATUS_DRIVER_CANCEL_TIMEOUT NTStatus = 0xC000021E + STATUS_REPLY_MESSAGE_MISMATCH NTStatus = 0xC000021F + STATUS_MAPPED_ALIGNMENT NTStatus = 0xC0000220 + STATUS_IMAGE_CHECKSUM_MISMATCH NTStatus = 0xC0000221 + STATUS_LOST_WRITEBEHIND_DATA NTStatus = 0xC0000222 + STATUS_CLIENT_SERVER_PARAMETERS_INVALID NTStatus = 0xC0000223 + STATUS_PASSWORD_MUST_CHANGE NTStatus = 0xC0000224 + STATUS_NOT_FOUND NTStatus = 0xC0000225 + STATUS_NOT_TINY_STREAM NTStatus = 0xC0000226 + STATUS_RECOVERY_FAILURE NTStatus = 0xC0000227 + STATUS_STACK_OVERFLOW_READ NTStatus = 0xC0000228 + STATUS_FAIL_CHECK NTStatus = 0xC0000229 + STATUS_DUPLICATE_OBJECTID NTStatus = 0xC000022A + STATUS_OBJECTID_EXISTS NTStatus = 0xC000022B + STATUS_CONVERT_TO_LARGE NTStatus = 0xC000022C + STATUS_RETRY NTStatus = 0xC000022D + STATUS_FOUND_OUT_OF_SCOPE NTStatus = 0xC000022E + STATUS_ALLOCATE_BUCKET NTStatus = 0xC000022F + STATUS_PROPSET_NOT_FOUND NTStatus = 0xC0000230 + STATUS_MARSHALL_OVERFLOW NTStatus = 0xC0000231 + STATUS_INVALID_VARIANT NTStatus = 0xC0000232 + STATUS_DOMAIN_CONTROLLER_NOT_FOUND NTStatus = 0xC0000233 + STATUS_ACCOUNT_LOCKED_OUT NTStatus = 0xC0000234 + STATUS_HANDLE_NOT_CLOSABLE NTStatus = 0xC0000235 + STATUS_CONNECTION_REFUSED NTStatus = 0xC0000236 + STATUS_GRACEFUL_DISCONNECT NTStatus = 0xC0000237 + STATUS_ADDRESS_ALREADY_ASSOCIATED NTStatus = 0xC0000238 + STATUS_ADDRESS_NOT_ASSOCIATED NTStatus = 0xC0000239 + STATUS_CONNECTION_INVALID NTStatus = 0xC000023A + STATUS_CONNECTION_ACTIVE NTStatus = 0xC000023B + STATUS_NETWORK_UNREACHABLE NTStatus = 0xC000023C + STATUS_HOST_UNREACHABLE NTStatus = 0xC000023D + STATUS_PROTOCOL_UNREACHABLE NTStatus = 0xC000023E + STATUS_PORT_UNREACHABLE NTStatus = 0xC000023F + STATUS_REQUEST_ABORTED NTStatus = 0xC0000240 + STATUS_CONNECTION_ABORTED NTStatus = 0xC0000241 + STATUS_BAD_COMPRESSION_BUFFER NTStatus = 0xC0000242 + STATUS_USER_MAPPED_FILE NTStatus = 0xC0000243 + STATUS_AUDIT_FAILED NTStatus = 0xC0000244 + STATUS_TIMER_RESOLUTION_NOT_SET NTStatus = 0xC0000245 + STATUS_CONNECTION_COUNT_LIMIT NTStatus = 0xC0000246 + STATUS_LOGIN_TIME_RESTRICTION NTStatus = 0xC0000247 + STATUS_LOGIN_WKSTA_RESTRICTION NTStatus = 0xC0000248 + STATUS_IMAGE_MP_UP_MISMATCH NTStatus = 0xC0000249 + STATUS_INSUFFICIENT_LOGON_INFO NTStatus = 0xC0000250 + STATUS_BAD_DLL_ENTRYPOINT NTStatus = 0xC0000251 + STATUS_BAD_SERVICE_ENTRYPOINT NTStatus = 0xC0000252 + STATUS_LPC_REPLY_LOST NTStatus = 0xC0000253 + STATUS_IP_ADDRESS_CONFLICT1 NTStatus = 0xC0000254 + STATUS_IP_ADDRESS_CONFLICT2 NTStatus = 0xC0000255 + STATUS_REGISTRY_QUOTA_LIMIT NTStatus = 0xC0000256 + STATUS_PATH_NOT_COVERED NTStatus = 0xC0000257 + STATUS_NO_CALLBACK_ACTIVE NTStatus = 0xC0000258 + STATUS_LICENSE_QUOTA_EXCEEDED NTStatus = 0xC0000259 + STATUS_PWD_TOO_SHORT NTStatus = 0xC000025A + STATUS_PWD_TOO_RECENT NTStatus = 0xC000025B + STATUS_PWD_HISTORY_CONFLICT NTStatus = 0xC000025C + STATUS_PLUGPLAY_NO_DEVICE NTStatus = 0xC000025E + STATUS_UNSUPPORTED_COMPRESSION NTStatus = 0xC000025F + STATUS_INVALID_HW_PROFILE NTStatus = 0xC0000260 + STATUS_INVALID_PLUGPLAY_DEVICE_PATH NTStatus = 0xC0000261 + STATUS_DRIVER_ORDINAL_NOT_FOUND NTStatus = 0xC0000262 + STATUS_DRIVER_ENTRYPOINT_NOT_FOUND NTStatus = 0xC0000263 + STATUS_RESOURCE_NOT_OWNED NTStatus = 0xC0000264 + STATUS_TOO_MANY_LINKS NTStatus = 0xC0000265 + STATUS_QUOTA_LIST_INCONSISTENT NTStatus = 0xC0000266 + STATUS_FILE_IS_OFFLINE NTStatus = 0xC0000267 + STATUS_EVALUATION_EXPIRATION NTStatus = 0xC0000268 + STATUS_ILLEGAL_DLL_RELOCATION NTStatus = 0xC0000269 + STATUS_LICENSE_VIOLATION NTStatus = 0xC000026A + STATUS_DLL_INIT_FAILED_LOGOFF NTStatus = 0xC000026B + STATUS_DRIVER_UNABLE_TO_LOAD NTStatus = 0xC000026C + STATUS_DFS_UNAVAILABLE NTStatus = 0xC000026D + STATUS_VOLUME_DISMOUNTED NTStatus = 0xC000026E + STATUS_WX86_INTERNAL_ERROR NTStatus = 0xC000026F + STATUS_WX86_FLOAT_STACK_CHECK NTStatus = 0xC0000270 + STATUS_VALIDATE_CONTINUE NTStatus = 0xC0000271 + STATUS_NO_MATCH NTStatus = 0xC0000272 + STATUS_NO_MORE_MATCHES NTStatus = 0xC0000273 + STATUS_NOT_A_REPARSE_POINT NTStatus = 0xC0000275 + STATUS_IO_REPARSE_TAG_INVALID NTStatus = 0xC0000276 + STATUS_IO_REPARSE_TAG_MISMATCH NTStatus = 0xC0000277 + STATUS_IO_REPARSE_DATA_INVALID NTStatus = 0xC0000278 + STATUS_IO_REPARSE_TAG_NOT_HANDLED NTStatus = 0xC0000279 + STATUS_PWD_TOO_LONG NTStatus = 0xC000027A + STATUS_STOWED_EXCEPTION NTStatus = 0xC000027B + STATUS_CONTEXT_STOWED_EXCEPTION NTStatus = 0xC000027C + STATUS_REPARSE_POINT_NOT_RESOLVED NTStatus = 0xC0000280 + STATUS_DIRECTORY_IS_A_REPARSE_POINT NTStatus = 0xC0000281 + STATUS_RANGE_LIST_CONFLICT NTStatus = 0xC0000282 + STATUS_SOURCE_ELEMENT_EMPTY NTStatus = 0xC0000283 + STATUS_DESTINATION_ELEMENT_FULL NTStatus = 0xC0000284 + STATUS_ILLEGAL_ELEMENT_ADDRESS NTStatus = 0xC0000285 + STATUS_MAGAZINE_NOT_PRESENT NTStatus = 0xC0000286 + STATUS_REINITIALIZATION_NEEDED NTStatus = 0xC0000287 + STATUS_DEVICE_REQUIRES_CLEANING NTStatus = 0x80000288 + STATUS_DEVICE_DOOR_OPEN NTStatus = 0x80000289 + STATUS_ENCRYPTION_FAILED NTStatus = 0xC000028A + STATUS_DECRYPTION_FAILED NTStatus = 0xC000028B + STATUS_RANGE_NOT_FOUND NTStatus = 0xC000028C + STATUS_NO_RECOVERY_POLICY NTStatus = 0xC000028D + STATUS_NO_EFS NTStatus = 0xC000028E + STATUS_WRONG_EFS NTStatus = 0xC000028F + STATUS_NO_USER_KEYS NTStatus = 0xC0000290 + STATUS_FILE_NOT_ENCRYPTED NTStatus = 0xC0000291 + STATUS_NOT_EXPORT_FORMAT NTStatus = 0xC0000292 + STATUS_FILE_ENCRYPTED NTStatus = 0xC0000293 + STATUS_WAKE_SYSTEM NTStatus = 0x40000294 + STATUS_WMI_GUID_NOT_FOUND NTStatus = 0xC0000295 + STATUS_WMI_INSTANCE_NOT_FOUND NTStatus = 0xC0000296 + STATUS_WMI_ITEMID_NOT_FOUND NTStatus = 0xC0000297 + STATUS_WMI_TRY_AGAIN NTStatus = 0xC0000298 + STATUS_SHARED_POLICY NTStatus = 0xC0000299 + STATUS_POLICY_OBJECT_NOT_FOUND NTStatus = 0xC000029A + STATUS_POLICY_ONLY_IN_DS NTStatus = 0xC000029B + STATUS_VOLUME_NOT_UPGRADED NTStatus = 0xC000029C + STATUS_REMOTE_STORAGE_NOT_ACTIVE NTStatus = 0xC000029D + STATUS_REMOTE_STORAGE_MEDIA_ERROR NTStatus = 0xC000029E + STATUS_NO_TRACKING_SERVICE NTStatus = 0xC000029F + STATUS_SERVER_SID_MISMATCH NTStatus = 0xC00002A0 + STATUS_DS_NO_ATTRIBUTE_OR_VALUE NTStatus = 0xC00002A1 + STATUS_DS_INVALID_ATTRIBUTE_SYNTAX NTStatus = 0xC00002A2 + STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED NTStatus = 0xC00002A3 + STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS NTStatus = 0xC00002A4 + STATUS_DS_BUSY NTStatus = 0xC00002A5 + STATUS_DS_UNAVAILABLE NTStatus = 0xC00002A6 + STATUS_DS_NO_RIDS_ALLOCATED NTStatus = 0xC00002A7 + STATUS_DS_NO_MORE_RIDS NTStatus = 0xC00002A8 + STATUS_DS_INCORRECT_ROLE_OWNER NTStatus = 0xC00002A9 + STATUS_DS_RIDMGR_INIT_ERROR NTStatus = 0xC00002AA + STATUS_DS_OBJ_CLASS_VIOLATION NTStatus = 0xC00002AB + STATUS_DS_CANT_ON_NON_LEAF NTStatus = 0xC00002AC + STATUS_DS_CANT_ON_RDN NTStatus = 0xC00002AD + STATUS_DS_CANT_MOD_OBJ_CLASS NTStatus = 0xC00002AE + STATUS_DS_CROSS_DOM_MOVE_FAILED NTStatus = 0xC00002AF + STATUS_DS_GC_NOT_AVAILABLE NTStatus = 0xC00002B0 + STATUS_DIRECTORY_SERVICE_REQUIRED NTStatus = 0xC00002B1 + STATUS_REPARSE_ATTRIBUTE_CONFLICT NTStatus = 0xC00002B2 + STATUS_CANT_ENABLE_DENY_ONLY NTStatus = 0xC00002B3 + STATUS_FLOAT_MULTIPLE_FAULTS NTStatus = 0xC00002B4 + STATUS_FLOAT_MULTIPLE_TRAPS NTStatus = 0xC00002B5 + STATUS_DEVICE_REMOVED NTStatus = 0xC00002B6 + STATUS_JOURNAL_DELETE_IN_PROGRESS NTStatus = 0xC00002B7 + STATUS_JOURNAL_NOT_ACTIVE NTStatus = 0xC00002B8 + STATUS_NOINTERFACE NTStatus = 0xC00002B9 + STATUS_DS_RIDMGR_DISABLED NTStatus = 0xC00002BA + STATUS_DS_ADMIN_LIMIT_EXCEEDED NTStatus = 0xC00002C1 + STATUS_DRIVER_FAILED_SLEEP NTStatus = 0xC00002C2 + STATUS_MUTUAL_AUTHENTICATION_FAILED NTStatus = 0xC00002C3 + STATUS_CORRUPT_SYSTEM_FILE NTStatus = 0xC00002C4 + STATUS_DATATYPE_MISALIGNMENT_ERROR NTStatus = 0xC00002C5 + STATUS_WMI_READ_ONLY NTStatus = 0xC00002C6 + STATUS_WMI_SET_FAILURE NTStatus = 0xC00002C7 + STATUS_COMMITMENT_MINIMUM NTStatus = 0xC00002C8 + STATUS_REG_NAT_CONSUMPTION NTStatus = 0xC00002C9 + STATUS_TRANSPORT_FULL NTStatus = 0xC00002CA + STATUS_DS_SAM_INIT_FAILURE NTStatus = 0xC00002CB + STATUS_ONLY_IF_CONNECTED NTStatus = 0xC00002CC + STATUS_DS_SENSITIVE_GROUP_VIOLATION NTStatus = 0xC00002CD + STATUS_PNP_RESTART_ENUMERATION NTStatus = 0xC00002CE + STATUS_JOURNAL_ENTRY_DELETED NTStatus = 0xC00002CF + STATUS_DS_CANT_MOD_PRIMARYGROUPID NTStatus = 0xC00002D0 + STATUS_SYSTEM_IMAGE_BAD_SIGNATURE NTStatus = 0xC00002D1 + STATUS_PNP_REBOOT_REQUIRED NTStatus = 0xC00002D2 + STATUS_POWER_STATE_INVALID NTStatus = 0xC00002D3 + STATUS_DS_INVALID_GROUP_TYPE NTStatus = 0xC00002D4 + STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN NTStatus = 0xC00002D5 + STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN NTStatus = 0xC00002D6 + STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER NTStatus = 0xC00002D7 + STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER NTStatus = 0xC00002D8 + STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER NTStatus = 0xC00002D9 + STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER NTStatus = 0xC00002DA + STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER NTStatus = 0xC00002DB + STATUS_DS_HAVE_PRIMARY_MEMBERS NTStatus = 0xC00002DC + STATUS_WMI_NOT_SUPPORTED NTStatus = 0xC00002DD + STATUS_INSUFFICIENT_POWER NTStatus = 0xC00002DE + STATUS_SAM_NEED_BOOTKEY_PASSWORD NTStatus = 0xC00002DF + STATUS_SAM_NEED_BOOTKEY_FLOPPY NTStatus = 0xC00002E0 + STATUS_DS_CANT_START NTStatus = 0xC00002E1 + STATUS_DS_INIT_FAILURE NTStatus = 0xC00002E2 + STATUS_SAM_INIT_FAILURE NTStatus = 0xC00002E3 + STATUS_DS_GC_REQUIRED NTStatus = 0xC00002E4 + STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY NTStatus = 0xC00002E5 + STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS NTStatus = 0xC00002E6 + STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED NTStatus = 0xC00002E7 + STATUS_MULTIPLE_FAULT_VIOLATION NTStatus = 0xC00002E8 + STATUS_CURRENT_DOMAIN_NOT_ALLOWED NTStatus = 0xC00002E9 + STATUS_CANNOT_MAKE NTStatus = 0xC00002EA + STATUS_SYSTEM_SHUTDOWN NTStatus = 0xC00002EB + STATUS_DS_INIT_FAILURE_CONSOLE NTStatus = 0xC00002EC + STATUS_DS_SAM_INIT_FAILURE_CONSOLE NTStatus = 0xC00002ED + STATUS_UNFINISHED_CONTEXT_DELETED NTStatus = 0xC00002EE + STATUS_NO_TGT_REPLY NTStatus = 0xC00002EF + STATUS_OBJECTID_NOT_FOUND NTStatus = 0xC00002F0 + STATUS_NO_IP_ADDRESSES NTStatus = 0xC00002F1 + STATUS_WRONG_CREDENTIAL_HANDLE NTStatus = 0xC00002F2 + STATUS_CRYPTO_SYSTEM_INVALID NTStatus = 0xC00002F3 + STATUS_MAX_REFERRALS_EXCEEDED NTStatus = 0xC00002F4 + STATUS_MUST_BE_KDC NTStatus = 0xC00002F5 + STATUS_STRONG_CRYPTO_NOT_SUPPORTED NTStatus = 0xC00002F6 + STATUS_TOO_MANY_PRINCIPALS NTStatus = 0xC00002F7 + STATUS_NO_PA_DATA NTStatus = 0xC00002F8 + STATUS_PKINIT_NAME_MISMATCH NTStatus = 0xC00002F9 + STATUS_SMARTCARD_LOGON_REQUIRED NTStatus = 0xC00002FA + STATUS_KDC_INVALID_REQUEST NTStatus = 0xC00002FB + STATUS_KDC_UNABLE_TO_REFER NTStatus = 0xC00002FC + STATUS_KDC_UNKNOWN_ETYPE NTStatus = 0xC00002FD + STATUS_SHUTDOWN_IN_PROGRESS NTStatus = 0xC00002FE + STATUS_SERVER_SHUTDOWN_IN_PROGRESS NTStatus = 0xC00002FF + STATUS_NOT_SUPPORTED_ON_SBS NTStatus = 0xC0000300 + STATUS_WMI_GUID_DISCONNECTED NTStatus = 0xC0000301 + STATUS_WMI_ALREADY_DISABLED NTStatus = 0xC0000302 + STATUS_WMI_ALREADY_ENABLED NTStatus = 0xC0000303 + STATUS_MFT_TOO_FRAGMENTED NTStatus = 0xC0000304 + STATUS_COPY_PROTECTION_FAILURE NTStatus = 0xC0000305 + STATUS_CSS_AUTHENTICATION_FAILURE NTStatus = 0xC0000306 + STATUS_CSS_KEY_NOT_PRESENT NTStatus = 0xC0000307 + STATUS_CSS_KEY_NOT_ESTABLISHED NTStatus = 0xC0000308 + STATUS_CSS_SCRAMBLED_SECTOR NTStatus = 0xC0000309 + STATUS_CSS_REGION_MISMATCH NTStatus = 0xC000030A + STATUS_CSS_RESETS_EXHAUSTED NTStatus = 0xC000030B + STATUS_PASSWORD_CHANGE_REQUIRED NTStatus = 0xC000030C + STATUS_LOST_MODE_LOGON_RESTRICTION NTStatus = 0xC000030D + STATUS_PKINIT_FAILURE NTStatus = 0xC0000320 + STATUS_SMARTCARD_SUBSYSTEM_FAILURE NTStatus = 0xC0000321 + STATUS_NO_KERB_KEY NTStatus = 0xC0000322 + STATUS_HOST_DOWN NTStatus = 0xC0000350 + STATUS_UNSUPPORTED_PREAUTH NTStatus = 0xC0000351 + STATUS_EFS_ALG_BLOB_TOO_BIG NTStatus = 0xC0000352 + STATUS_PORT_NOT_SET NTStatus = 0xC0000353 + STATUS_DEBUGGER_INACTIVE NTStatus = 0xC0000354 + STATUS_DS_VERSION_CHECK_FAILURE NTStatus = 0xC0000355 + STATUS_AUDITING_DISABLED NTStatus = 0xC0000356 + STATUS_PRENT4_MACHINE_ACCOUNT NTStatus = 0xC0000357 + STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER NTStatus = 0xC0000358 + STATUS_INVALID_IMAGE_WIN_32 NTStatus = 0xC0000359 + STATUS_INVALID_IMAGE_WIN_64 NTStatus = 0xC000035A + STATUS_BAD_BINDINGS NTStatus = 0xC000035B + STATUS_NETWORK_SESSION_EXPIRED NTStatus = 0xC000035C + STATUS_APPHELP_BLOCK NTStatus = 0xC000035D + STATUS_ALL_SIDS_FILTERED NTStatus = 0xC000035E + STATUS_NOT_SAFE_MODE_DRIVER NTStatus = 0xC000035F + STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT NTStatus = 0xC0000361 + STATUS_ACCESS_DISABLED_BY_POLICY_PATH NTStatus = 0xC0000362 + STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER NTStatus = 0xC0000363 + STATUS_ACCESS_DISABLED_BY_POLICY_OTHER NTStatus = 0xC0000364 + STATUS_FAILED_DRIVER_ENTRY NTStatus = 0xC0000365 + STATUS_DEVICE_ENUMERATION_ERROR NTStatus = 0xC0000366 + STATUS_MOUNT_POINT_NOT_RESOLVED NTStatus = 0xC0000368 + STATUS_INVALID_DEVICE_OBJECT_PARAMETER NTStatus = 0xC0000369 + STATUS_MCA_OCCURED NTStatus = 0xC000036A + STATUS_DRIVER_BLOCKED_CRITICAL NTStatus = 0xC000036B + STATUS_DRIVER_BLOCKED NTStatus = 0xC000036C + STATUS_DRIVER_DATABASE_ERROR NTStatus = 0xC000036D + STATUS_SYSTEM_HIVE_TOO_LARGE NTStatus = 0xC000036E + STATUS_INVALID_IMPORT_OF_NON_DLL NTStatus = 0xC000036F + STATUS_DS_SHUTTING_DOWN NTStatus = 0x40000370 + STATUS_NO_SECRETS NTStatus = 0xC0000371 + STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY NTStatus = 0xC0000372 + STATUS_FAILED_STACK_SWITCH NTStatus = 0xC0000373 + STATUS_HEAP_CORRUPTION NTStatus = 0xC0000374 + STATUS_SMARTCARD_WRONG_PIN NTStatus = 0xC0000380 + STATUS_SMARTCARD_CARD_BLOCKED NTStatus = 0xC0000381 + STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED NTStatus = 0xC0000382 + STATUS_SMARTCARD_NO_CARD NTStatus = 0xC0000383 + STATUS_SMARTCARD_NO_KEY_CONTAINER NTStatus = 0xC0000384 + STATUS_SMARTCARD_NO_CERTIFICATE NTStatus = 0xC0000385 + STATUS_SMARTCARD_NO_KEYSET NTStatus = 0xC0000386 + STATUS_SMARTCARD_IO_ERROR NTStatus = 0xC0000387 + STATUS_DOWNGRADE_DETECTED NTStatus = 0xC0000388 + STATUS_SMARTCARD_CERT_REVOKED NTStatus = 0xC0000389 + STATUS_ISSUING_CA_UNTRUSTED NTStatus = 0xC000038A + STATUS_REVOCATION_OFFLINE_C NTStatus = 0xC000038B + STATUS_PKINIT_CLIENT_FAILURE NTStatus = 0xC000038C + STATUS_SMARTCARD_CERT_EXPIRED NTStatus = 0xC000038D + STATUS_DRIVER_FAILED_PRIOR_UNLOAD NTStatus = 0xC000038E + STATUS_SMARTCARD_SILENT_CONTEXT NTStatus = 0xC000038F + STATUS_PER_USER_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000401 + STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000402 + STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000403 + STATUS_DS_NAME_NOT_UNIQUE NTStatus = 0xC0000404 + STATUS_DS_DUPLICATE_ID_FOUND NTStatus = 0xC0000405 + STATUS_DS_GROUP_CONVERSION_ERROR NTStatus = 0xC0000406 + STATUS_VOLSNAP_PREPARE_HIBERNATE NTStatus = 0xC0000407 + STATUS_USER2USER_REQUIRED NTStatus = 0xC0000408 + STATUS_STACK_BUFFER_OVERRUN NTStatus = 0xC0000409 + STATUS_NO_S4U_PROT_SUPPORT NTStatus = 0xC000040A + STATUS_CROSSREALM_DELEGATION_FAILURE NTStatus = 0xC000040B + STATUS_REVOCATION_OFFLINE_KDC NTStatus = 0xC000040C + STATUS_ISSUING_CA_UNTRUSTED_KDC NTStatus = 0xC000040D + STATUS_KDC_CERT_EXPIRED NTStatus = 0xC000040E + STATUS_KDC_CERT_REVOKED NTStatus = 0xC000040F + STATUS_PARAMETER_QUOTA_EXCEEDED NTStatus = 0xC0000410 + STATUS_HIBERNATION_FAILURE NTStatus = 0xC0000411 + STATUS_DELAY_LOAD_FAILED NTStatus = 0xC0000412 + STATUS_AUTHENTICATION_FIREWALL_FAILED NTStatus = 0xC0000413 + STATUS_VDM_DISALLOWED NTStatus = 0xC0000414 + STATUS_HUNG_DISPLAY_DRIVER_THREAD NTStatus = 0xC0000415 + STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE NTStatus = 0xC0000416 + STATUS_INVALID_CRUNTIME_PARAMETER NTStatus = 0xC0000417 + STATUS_NTLM_BLOCKED NTStatus = 0xC0000418 + STATUS_DS_SRC_SID_EXISTS_IN_FOREST NTStatus = 0xC0000419 + STATUS_DS_DOMAIN_NAME_EXISTS_IN_FOREST NTStatus = 0xC000041A + STATUS_DS_FLAT_NAME_EXISTS_IN_FOREST NTStatus = 0xC000041B + STATUS_INVALID_USER_PRINCIPAL_NAME NTStatus = 0xC000041C + STATUS_FATAL_USER_CALLBACK_EXCEPTION NTStatus = 0xC000041D + STATUS_ASSERTION_FAILURE NTStatus = 0xC0000420 + STATUS_VERIFIER_STOP NTStatus = 0xC0000421 + STATUS_CALLBACK_POP_STACK NTStatus = 0xC0000423 + STATUS_INCOMPATIBLE_DRIVER_BLOCKED NTStatus = 0xC0000424 + STATUS_HIVE_UNLOADED NTStatus = 0xC0000425 + STATUS_COMPRESSION_DISABLED NTStatus = 0xC0000426 + STATUS_FILE_SYSTEM_LIMITATION NTStatus = 0xC0000427 + STATUS_INVALID_IMAGE_HASH NTStatus = 0xC0000428 + STATUS_NOT_CAPABLE NTStatus = 0xC0000429 + STATUS_REQUEST_OUT_OF_SEQUENCE NTStatus = 0xC000042A + STATUS_IMPLEMENTATION_LIMIT NTStatus = 0xC000042B + STATUS_ELEVATION_REQUIRED NTStatus = 0xC000042C + STATUS_NO_SECURITY_CONTEXT NTStatus = 0xC000042D + STATUS_PKU2U_CERT_FAILURE NTStatus = 0xC000042F + STATUS_BEYOND_VDL NTStatus = 0xC0000432 + STATUS_ENCOUNTERED_WRITE_IN_PROGRESS NTStatus = 0xC0000433 + STATUS_PTE_CHANGED NTStatus = 0xC0000434 + STATUS_PURGE_FAILED NTStatus = 0xC0000435 + STATUS_CRED_REQUIRES_CONFIRMATION NTStatus = 0xC0000440 + STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE NTStatus = 0xC0000441 + STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER NTStatus = 0xC0000442 + STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE NTStatus = 0xC0000443 + STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE NTStatus = 0xC0000444 + STATUS_CS_ENCRYPTION_FILE_NOT_CSE NTStatus = 0xC0000445 + STATUS_INVALID_LABEL NTStatus = 0xC0000446 + STATUS_DRIVER_PROCESS_TERMINATED NTStatus = 0xC0000450 + STATUS_AMBIGUOUS_SYSTEM_DEVICE NTStatus = 0xC0000451 + STATUS_SYSTEM_DEVICE_NOT_FOUND NTStatus = 0xC0000452 + STATUS_RESTART_BOOT_APPLICATION NTStatus = 0xC0000453 + STATUS_INSUFFICIENT_NVRAM_RESOURCES NTStatus = 0xC0000454 + STATUS_INVALID_SESSION NTStatus = 0xC0000455 + STATUS_THREAD_ALREADY_IN_SESSION NTStatus = 0xC0000456 + STATUS_THREAD_NOT_IN_SESSION NTStatus = 0xC0000457 + STATUS_INVALID_WEIGHT NTStatus = 0xC0000458 + STATUS_REQUEST_PAUSED NTStatus = 0xC0000459 + STATUS_NO_RANGES_PROCESSED NTStatus = 0xC0000460 + STATUS_DISK_RESOURCES_EXHAUSTED NTStatus = 0xC0000461 + STATUS_NEEDS_REMEDIATION NTStatus = 0xC0000462 + STATUS_DEVICE_FEATURE_NOT_SUPPORTED NTStatus = 0xC0000463 + STATUS_DEVICE_UNREACHABLE NTStatus = 0xC0000464 + STATUS_INVALID_TOKEN NTStatus = 0xC0000465 + STATUS_SERVER_UNAVAILABLE NTStatus = 0xC0000466 + STATUS_FILE_NOT_AVAILABLE NTStatus = 0xC0000467 + STATUS_DEVICE_INSUFFICIENT_RESOURCES NTStatus = 0xC0000468 + STATUS_PACKAGE_UPDATING NTStatus = 0xC0000469 + STATUS_NOT_READ_FROM_COPY NTStatus = 0xC000046A + STATUS_FT_WRITE_FAILURE NTStatus = 0xC000046B + STATUS_FT_DI_SCAN_REQUIRED NTStatus = 0xC000046C + STATUS_OBJECT_NOT_EXTERNALLY_BACKED NTStatus = 0xC000046D + STATUS_EXTERNAL_BACKING_PROVIDER_UNKNOWN NTStatus = 0xC000046E + STATUS_COMPRESSION_NOT_BENEFICIAL NTStatus = 0xC000046F + STATUS_DATA_CHECKSUM_ERROR NTStatus = 0xC0000470 + STATUS_INTERMIXED_KERNEL_EA_OPERATION NTStatus = 0xC0000471 + STATUS_TRIM_READ_ZERO_NOT_SUPPORTED NTStatus = 0xC0000472 + STATUS_TOO_MANY_SEGMENT_DESCRIPTORS NTStatus = 0xC0000473 + STATUS_INVALID_OFFSET_ALIGNMENT NTStatus = 0xC0000474 + STATUS_INVALID_FIELD_IN_PARAMETER_LIST NTStatus = 0xC0000475 + STATUS_OPERATION_IN_PROGRESS NTStatus = 0xC0000476 + STATUS_INVALID_INITIATOR_TARGET_PATH NTStatus = 0xC0000477 + STATUS_SCRUB_DATA_DISABLED NTStatus = 0xC0000478 + STATUS_NOT_REDUNDANT_STORAGE NTStatus = 0xC0000479 + STATUS_RESIDENT_FILE_NOT_SUPPORTED NTStatus = 0xC000047A + STATUS_COMPRESSED_FILE_NOT_SUPPORTED NTStatus = 0xC000047B + STATUS_DIRECTORY_NOT_SUPPORTED NTStatus = 0xC000047C + STATUS_IO_OPERATION_TIMEOUT NTStatus = 0xC000047D + STATUS_SYSTEM_NEEDS_REMEDIATION NTStatus = 0xC000047E + STATUS_APPX_INTEGRITY_FAILURE_CLR_NGEN NTStatus = 0xC000047F + STATUS_SHARE_UNAVAILABLE NTStatus = 0xC0000480 + STATUS_APISET_NOT_HOSTED NTStatus = 0xC0000481 + STATUS_APISET_NOT_PRESENT NTStatus = 0xC0000482 + STATUS_DEVICE_HARDWARE_ERROR NTStatus = 0xC0000483 + STATUS_FIRMWARE_SLOT_INVALID NTStatus = 0xC0000484 + STATUS_FIRMWARE_IMAGE_INVALID NTStatus = 0xC0000485 + STATUS_STORAGE_TOPOLOGY_ID_MISMATCH NTStatus = 0xC0000486 + STATUS_WIM_NOT_BOOTABLE NTStatus = 0xC0000487 + STATUS_BLOCKED_BY_PARENTAL_CONTROLS NTStatus = 0xC0000488 + STATUS_NEEDS_REGISTRATION NTStatus = 0xC0000489 + STATUS_QUOTA_ACTIVITY NTStatus = 0xC000048A + STATUS_CALLBACK_INVOKE_INLINE NTStatus = 0xC000048B + STATUS_BLOCK_TOO_MANY_REFERENCES NTStatus = 0xC000048C + STATUS_MARKED_TO_DISALLOW_WRITES NTStatus = 0xC000048D + STATUS_NETWORK_ACCESS_DENIED_EDP NTStatus = 0xC000048E + STATUS_ENCLAVE_FAILURE NTStatus = 0xC000048F + STATUS_PNP_NO_COMPAT_DRIVERS NTStatus = 0xC0000490 + STATUS_PNP_DRIVER_PACKAGE_NOT_FOUND NTStatus = 0xC0000491 + STATUS_PNP_DRIVER_CONFIGURATION_NOT_FOUND NTStatus = 0xC0000492 + STATUS_PNP_DRIVER_CONFIGURATION_INCOMPLETE NTStatus = 0xC0000493 + STATUS_PNP_FUNCTION_DRIVER_REQUIRED NTStatus = 0xC0000494 + STATUS_PNP_DEVICE_CONFIGURATION_PENDING NTStatus = 0xC0000495 + STATUS_DEVICE_HINT_NAME_BUFFER_TOO_SMALL NTStatus = 0xC0000496 + STATUS_PACKAGE_NOT_AVAILABLE NTStatus = 0xC0000497 + STATUS_DEVICE_IN_MAINTENANCE NTStatus = 0xC0000499 + STATUS_NOT_SUPPORTED_ON_DAX NTStatus = 0xC000049A + STATUS_FREE_SPACE_TOO_FRAGMENTED NTStatus = 0xC000049B + STATUS_DAX_MAPPING_EXISTS NTStatus = 0xC000049C + STATUS_CHILD_PROCESS_BLOCKED NTStatus = 0xC000049D + STATUS_STORAGE_LOST_DATA_PERSISTENCE NTStatus = 0xC000049E + STATUS_VRF_CFG_ENABLED NTStatus = 0xC000049F + STATUS_PARTITION_TERMINATING NTStatus = 0xC00004A0 + STATUS_EXTERNAL_SYSKEY_NOT_SUPPORTED NTStatus = 0xC00004A1 + STATUS_ENCLAVE_VIOLATION NTStatus = 0xC00004A2 + STATUS_FILE_PROTECTED_UNDER_DPL NTStatus = 0xC00004A3 + STATUS_VOLUME_NOT_CLUSTER_ALIGNED NTStatus = 0xC00004A4 + STATUS_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND NTStatus = 0xC00004A5 + STATUS_APPX_FILE_NOT_ENCRYPTED NTStatus = 0xC00004A6 + STATUS_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED NTStatus = 0xC00004A7 + STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET NTStatus = 0xC00004A8 + STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE NTStatus = 0xC00004A9 + STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER NTStatus = 0xC00004AA + STATUS_FT_READ_FAILURE NTStatus = 0xC00004AB + STATUS_PATCH_CONFLICT NTStatus = 0xC00004AC + STATUS_STORAGE_RESERVE_ID_INVALID NTStatus = 0xC00004AD + STATUS_STORAGE_RESERVE_DOES_NOT_EXIST NTStatus = 0xC00004AE + STATUS_STORAGE_RESERVE_ALREADY_EXISTS NTStatus = 0xC00004AF + STATUS_STORAGE_RESERVE_NOT_EMPTY NTStatus = 0xC00004B0 + STATUS_NOT_A_DAX_VOLUME NTStatus = 0xC00004B1 + STATUS_NOT_DAX_MAPPABLE NTStatus = 0xC00004B2 + STATUS_CASE_DIFFERING_NAMES_IN_DIR NTStatus = 0xC00004B3 + STATUS_FILE_NOT_SUPPORTED NTStatus = 0xC00004B4 + STATUS_NOT_SUPPORTED_WITH_BTT NTStatus = 0xC00004B5 + STATUS_ENCRYPTION_DISABLED NTStatus = 0xC00004B6 + STATUS_ENCRYPTING_METADATA_DISALLOWED NTStatus = 0xC00004B7 + STATUS_CANT_CLEAR_ENCRYPTION_FLAG NTStatus = 0xC00004B8 + STATUS_INVALID_TASK_NAME NTStatus = 0xC0000500 + STATUS_INVALID_TASK_INDEX NTStatus = 0xC0000501 + STATUS_THREAD_ALREADY_IN_TASK NTStatus = 0xC0000502 + STATUS_CALLBACK_BYPASS NTStatus = 0xC0000503 + STATUS_UNDEFINED_SCOPE NTStatus = 0xC0000504 + STATUS_INVALID_CAP NTStatus = 0xC0000505 + STATUS_NOT_GUI_PROCESS NTStatus = 0xC0000506 + STATUS_DEVICE_HUNG NTStatus = 0xC0000507 + STATUS_CONTAINER_ASSIGNED NTStatus = 0xC0000508 + STATUS_JOB_NO_CONTAINER NTStatus = 0xC0000509 + STATUS_DEVICE_UNRESPONSIVE NTStatus = 0xC000050A + STATUS_REPARSE_POINT_ENCOUNTERED NTStatus = 0xC000050B + STATUS_ATTRIBUTE_NOT_PRESENT NTStatus = 0xC000050C + STATUS_NOT_A_TIERED_VOLUME NTStatus = 0xC000050D + STATUS_ALREADY_HAS_STREAM_ID NTStatus = 0xC000050E + STATUS_JOB_NOT_EMPTY NTStatus = 0xC000050F + STATUS_ALREADY_INITIALIZED NTStatus = 0xC0000510 + STATUS_ENCLAVE_NOT_TERMINATED NTStatus = 0xC0000511 + STATUS_ENCLAVE_IS_TERMINATING NTStatus = 0xC0000512 + STATUS_SMB1_NOT_AVAILABLE NTStatus = 0xC0000513 + STATUS_SMR_GARBAGE_COLLECTION_REQUIRED NTStatus = 0xC0000514 + STATUS_INTERRUPTED NTStatus = 0xC0000515 + STATUS_THREAD_NOT_RUNNING NTStatus = 0xC0000516 + STATUS_FAIL_FAST_EXCEPTION NTStatus = 0xC0000602 + STATUS_IMAGE_CERT_REVOKED NTStatus = 0xC0000603 + STATUS_DYNAMIC_CODE_BLOCKED NTStatus = 0xC0000604 + STATUS_IMAGE_CERT_EXPIRED NTStatus = 0xC0000605 + STATUS_STRICT_CFG_VIOLATION NTStatus = 0xC0000606 + STATUS_SET_CONTEXT_DENIED NTStatus = 0xC000060A + STATUS_CROSS_PARTITION_VIOLATION NTStatus = 0xC000060B + STATUS_PORT_CLOSED NTStatus = 0xC0000700 + STATUS_MESSAGE_LOST NTStatus = 0xC0000701 + STATUS_INVALID_MESSAGE NTStatus = 0xC0000702 + STATUS_REQUEST_CANCELED NTStatus = 0xC0000703 + STATUS_RECURSIVE_DISPATCH NTStatus = 0xC0000704 + STATUS_LPC_RECEIVE_BUFFER_EXPECTED NTStatus = 0xC0000705 + STATUS_LPC_INVALID_CONNECTION_USAGE NTStatus = 0xC0000706 + STATUS_LPC_REQUESTS_NOT_ALLOWED NTStatus = 0xC0000707 + STATUS_RESOURCE_IN_USE NTStatus = 0xC0000708 + STATUS_HARDWARE_MEMORY_ERROR NTStatus = 0xC0000709 + STATUS_THREADPOOL_HANDLE_EXCEPTION NTStatus = 0xC000070A + STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED NTStatus = 0xC000070B + STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED NTStatus = 0xC000070C + STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED NTStatus = 0xC000070D + STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED NTStatus = 0xC000070E + STATUS_THREADPOOL_RELEASED_DURING_OPERATION NTStatus = 0xC000070F + STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING NTStatus = 0xC0000710 + STATUS_APC_RETURNED_WHILE_IMPERSONATING NTStatus = 0xC0000711 + STATUS_PROCESS_IS_PROTECTED NTStatus = 0xC0000712 + STATUS_MCA_EXCEPTION NTStatus = 0xC0000713 + STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE NTStatus = 0xC0000714 + STATUS_SYMLINK_CLASS_DISABLED NTStatus = 0xC0000715 + STATUS_INVALID_IDN_NORMALIZATION NTStatus = 0xC0000716 + STATUS_NO_UNICODE_TRANSLATION NTStatus = 0xC0000717 + STATUS_ALREADY_REGISTERED NTStatus = 0xC0000718 + STATUS_CONTEXT_MISMATCH NTStatus = 0xC0000719 + STATUS_PORT_ALREADY_HAS_COMPLETION_LIST NTStatus = 0xC000071A + STATUS_CALLBACK_RETURNED_THREAD_PRIORITY NTStatus = 0xC000071B + STATUS_INVALID_THREAD NTStatus = 0xC000071C + STATUS_CALLBACK_RETURNED_TRANSACTION NTStatus = 0xC000071D + STATUS_CALLBACK_RETURNED_LDR_LOCK NTStatus = 0xC000071E + STATUS_CALLBACK_RETURNED_LANG NTStatus = 0xC000071F + STATUS_CALLBACK_RETURNED_PRI_BACK NTStatus = 0xC0000720 + STATUS_CALLBACK_RETURNED_THREAD_AFFINITY NTStatus = 0xC0000721 + STATUS_LPC_HANDLE_COUNT_EXCEEDED NTStatus = 0xC0000722 + STATUS_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000723 + STATUS_KERNEL_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000724 + STATUS_ATTACHED_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000725 + STATUS_TRIGGERED_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000726 + STATUS_DISK_REPAIR_DISABLED NTStatus = 0xC0000800 + STATUS_DS_DOMAIN_RENAME_IN_PROGRESS NTStatus = 0xC0000801 + STATUS_DISK_QUOTA_EXCEEDED NTStatus = 0xC0000802 + STATUS_DATA_LOST_REPAIR NTStatus = 0x80000803 + STATUS_CONTENT_BLOCKED NTStatus = 0xC0000804 + STATUS_BAD_CLUSTERS NTStatus = 0xC0000805 + STATUS_VOLUME_DIRTY NTStatus = 0xC0000806 + STATUS_DISK_REPAIR_REDIRECTED NTStatus = 0x40000807 + STATUS_DISK_REPAIR_UNSUCCESSFUL NTStatus = 0xC0000808 + STATUS_CORRUPT_LOG_OVERFULL NTStatus = 0xC0000809 + STATUS_CORRUPT_LOG_CORRUPTED NTStatus = 0xC000080A + STATUS_CORRUPT_LOG_UNAVAILABLE NTStatus = 0xC000080B + STATUS_CORRUPT_LOG_DELETED_FULL NTStatus = 0xC000080C + STATUS_CORRUPT_LOG_CLEARED NTStatus = 0xC000080D + STATUS_ORPHAN_NAME_EXHAUSTED NTStatus = 0xC000080E + STATUS_PROACTIVE_SCAN_IN_PROGRESS NTStatus = 0xC000080F + STATUS_ENCRYPTED_IO_NOT_POSSIBLE NTStatus = 0xC0000810 + STATUS_CORRUPT_LOG_UPLEVEL_RECORDS NTStatus = 0xC0000811 + STATUS_FILE_CHECKED_OUT NTStatus = 0xC0000901 + STATUS_CHECKOUT_REQUIRED NTStatus = 0xC0000902 + STATUS_BAD_FILE_TYPE NTStatus = 0xC0000903 + STATUS_FILE_TOO_LARGE NTStatus = 0xC0000904 + STATUS_FORMS_AUTH_REQUIRED NTStatus = 0xC0000905 + STATUS_VIRUS_INFECTED NTStatus = 0xC0000906 + STATUS_VIRUS_DELETED NTStatus = 0xC0000907 + STATUS_BAD_MCFG_TABLE NTStatus = 0xC0000908 + STATUS_CANNOT_BREAK_OPLOCK NTStatus = 0xC0000909 + STATUS_BAD_KEY NTStatus = 0xC000090A + STATUS_BAD_DATA NTStatus = 0xC000090B + STATUS_NO_KEY NTStatus = 0xC000090C + STATUS_FILE_HANDLE_REVOKED NTStatus = 0xC0000910 + STATUS_WOW_ASSERTION NTStatus = 0xC0009898 + STATUS_INVALID_SIGNATURE NTStatus = 0xC000A000 + STATUS_HMAC_NOT_SUPPORTED NTStatus = 0xC000A001 + STATUS_AUTH_TAG_MISMATCH NTStatus = 0xC000A002 + STATUS_INVALID_STATE_TRANSITION NTStatus = 0xC000A003 + STATUS_INVALID_KERNEL_INFO_VERSION NTStatus = 0xC000A004 + STATUS_INVALID_PEP_INFO_VERSION NTStatus = 0xC000A005 + STATUS_HANDLE_REVOKED NTStatus = 0xC000A006 + STATUS_EOF_ON_GHOSTED_RANGE NTStatus = 0xC000A007 + STATUS_IPSEC_QUEUE_OVERFLOW NTStatus = 0xC000A010 + STATUS_ND_QUEUE_OVERFLOW NTStatus = 0xC000A011 + STATUS_HOPLIMIT_EXCEEDED NTStatus = 0xC000A012 + STATUS_PROTOCOL_NOT_SUPPORTED NTStatus = 0xC000A013 + STATUS_FASTPATH_REJECTED NTStatus = 0xC000A014 + STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED NTStatus = 0xC000A080 + STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR NTStatus = 0xC000A081 + STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR NTStatus = 0xC000A082 + STATUS_XML_PARSE_ERROR NTStatus = 0xC000A083 + STATUS_XMLDSIG_ERROR NTStatus = 0xC000A084 + STATUS_WRONG_COMPARTMENT NTStatus = 0xC000A085 + STATUS_AUTHIP_FAILURE NTStatus = 0xC000A086 + STATUS_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS NTStatus = 0xC000A087 + STATUS_DS_OID_NOT_FOUND NTStatus = 0xC000A088 + STATUS_INCORRECT_ACCOUNT_TYPE NTStatus = 0xC000A089 + STATUS_HASH_NOT_SUPPORTED NTStatus = 0xC000A100 + STATUS_HASH_NOT_PRESENT NTStatus = 0xC000A101 + STATUS_SECONDARY_IC_PROVIDER_NOT_REGISTERED NTStatus = 0xC000A121 + STATUS_GPIO_CLIENT_INFORMATION_INVALID NTStatus = 0xC000A122 + STATUS_GPIO_VERSION_NOT_SUPPORTED NTStatus = 0xC000A123 + STATUS_GPIO_INVALID_REGISTRATION_PACKET NTStatus = 0xC000A124 + STATUS_GPIO_OPERATION_DENIED NTStatus = 0xC000A125 + STATUS_GPIO_INCOMPATIBLE_CONNECT_MODE NTStatus = 0xC000A126 + STATUS_GPIO_INTERRUPT_ALREADY_UNMASKED NTStatus = 0x8000A127 + STATUS_CANNOT_SWITCH_RUNLEVEL NTStatus = 0xC000A141 + STATUS_INVALID_RUNLEVEL_SETTING NTStatus = 0xC000A142 + STATUS_RUNLEVEL_SWITCH_TIMEOUT NTStatus = 0xC000A143 + STATUS_SERVICES_FAILED_AUTOSTART NTStatus = 0x4000A144 + STATUS_RUNLEVEL_SWITCH_AGENT_TIMEOUT NTStatus = 0xC000A145 + STATUS_RUNLEVEL_SWITCH_IN_PROGRESS NTStatus = 0xC000A146 + STATUS_NOT_APPCONTAINER NTStatus = 0xC000A200 + STATUS_NOT_SUPPORTED_IN_APPCONTAINER NTStatus = 0xC000A201 + STATUS_INVALID_PACKAGE_SID_LENGTH NTStatus = 0xC000A202 + STATUS_LPAC_ACCESS_DENIED NTStatus = 0xC000A203 + STATUS_ADMINLESS_ACCESS_DENIED NTStatus = 0xC000A204 + STATUS_APP_DATA_NOT_FOUND NTStatus = 0xC000A281 + STATUS_APP_DATA_EXPIRED NTStatus = 0xC000A282 + STATUS_APP_DATA_CORRUPT NTStatus = 0xC000A283 + STATUS_APP_DATA_LIMIT_EXCEEDED NTStatus = 0xC000A284 + STATUS_APP_DATA_REBOOT_REQUIRED NTStatus = 0xC000A285 + STATUS_OFFLOAD_READ_FLT_NOT_SUPPORTED NTStatus = 0xC000A2A1 + STATUS_OFFLOAD_WRITE_FLT_NOT_SUPPORTED NTStatus = 0xC000A2A2 + STATUS_OFFLOAD_READ_FILE_NOT_SUPPORTED NTStatus = 0xC000A2A3 + STATUS_OFFLOAD_WRITE_FILE_NOT_SUPPORTED NTStatus = 0xC000A2A4 + STATUS_WOF_WIM_HEADER_CORRUPT NTStatus = 0xC000A2A5 + STATUS_WOF_WIM_RESOURCE_TABLE_CORRUPT NTStatus = 0xC000A2A6 + STATUS_WOF_FILE_RESOURCE_TABLE_CORRUPT NTStatus = 0xC000A2A7 + STATUS_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE NTStatus = 0xC000CE01 + STATUS_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT NTStatus = 0xC000CE02 + STATUS_FILE_SYSTEM_VIRTUALIZATION_BUSY NTStatus = 0xC000CE03 + STATUS_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN NTStatus = 0xC000CE04 + STATUS_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION NTStatus = 0xC000CE05 + STATUS_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT NTStatus = 0xC000CF00 + STATUS_CLOUD_FILE_PROVIDER_NOT_RUNNING NTStatus = 0xC000CF01 + STATUS_CLOUD_FILE_METADATA_CORRUPT NTStatus = 0xC000CF02 + STATUS_CLOUD_FILE_METADATA_TOO_LARGE NTStatus = 0xC000CF03 + STATUS_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE NTStatus = 0x8000CF04 + STATUS_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS NTStatus = 0x8000CF05 + STATUS_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED NTStatus = 0xC000CF06 + STATUS_NOT_A_CLOUD_FILE NTStatus = 0xC000CF07 + STATUS_CLOUD_FILE_NOT_IN_SYNC NTStatus = 0xC000CF08 + STATUS_CLOUD_FILE_ALREADY_CONNECTED NTStatus = 0xC000CF09 + STATUS_CLOUD_FILE_NOT_SUPPORTED NTStatus = 0xC000CF0A + STATUS_CLOUD_FILE_INVALID_REQUEST NTStatus = 0xC000CF0B + STATUS_CLOUD_FILE_READ_ONLY_VOLUME NTStatus = 0xC000CF0C + STATUS_CLOUD_FILE_CONNECTED_PROVIDER_ONLY NTStatus = 0xC000CF0D + STATUS_CLOUD_FILE_VALIDATION_FAILED NTStatus = 0xC000CF0E + STATUS_CLOUD_FILE_AUTHENTICATION_FAILED NTStatus = 0xC000CF0F + STATUS_CLOUD_FILE_INSUFFICIENT_RESOURCES NTStatus = 0xC000CF10 + STATUS_CLOUD_FILE_NETWORK_UNAVAILABLE NTStatus = 0xC000CF11 + STATUS_CLOUD_FILE_UNSUCCESSFUL NTStatus = 0xC000CF12 + STATUS_CLOUD_FILE_NOT_UNDER_SYNC_ROOT NTStatus = 0xC000CF13 + STATUS_CLOUD_FILE_IN_USE NTStatus = 0xC000CF14 + STATUS_CLOUD_FILE_PINNED NTStatus = 0xC000CF15 + STATUS_CLOUD_FILE_REQUEST_ABORTED NTStatus = 0xC000CF16 + STATUS_CLOUD_FILE_PROPERTY_CORRUPT NTStatus = 0xC000CF17 + STATUS_CLOUD_FILE_ACCESS_DENIED NTStatus = 0xC000CF18 + STATUS_CLOUD_FILE_INCOMPATIBLE_HARDLINKS NTStatus = 0xC000CF19 + STATUS_CLOUD_FILE_PROPERTY_LOCK_CONFLICT NTStatus = 0xC000CF1A + STATUS_CLOUD_FILE_REQUEST_CANCELED NTStatus = 0xC000CF1B + STATUS_CLOUD_FILE_PROVIDER_TERMINATED NTStatus = 0xC000CF1D + STATUS_NOT_A_CLOUD_SYNC_ROOT NTStatus = 0xC000CF1E + STATUS_CLOUD_FILE_REQUEST_TIMEOUT NTStatus = 0xC000CF1F + STATUS_ACPI_INVALID_OPCODE NTStatus = 0xC0140001 + STATUS_ACPI_STACK_OVERFLOW NTStatus = 0xC0140002 + STATUS_ACPI_ASSERT_FAILED NTStatus = 0xC0140003 + STATUS_ACPI_INVALID_INDEX NTStatus = 0xC0140004 + STATUS_ACPI_INVALID_ARGUMENT NTStatus = 0xC0140005 + STATUS_ACPI_FATAL NTStatus = 0xC0140006 + STATUS_ACPI_INVALID_SUPERNAME NTStatus = 0xC0140007 + STATUS_ACPI_INVALID_ARGTYPE NTStatus = 0xC0140008 + STATUS_ACPI_INVALID_OBJTYPE NTStatus = 0xC0140009 + STATUS_ACPI_INVALID_TARGETTYPE NTStatus = 0xC014000A + STATUS_ACPI_INCORRECT_ARGUMENT_COUNT NTStatus = 0xC014000B + STATUS_ACPI_ADDRESS_NOT_MAPPED NTStatus = 0xC014000C + STATUS_ACPI_INVALID_EVENTTYPE NTStatus = 0xC014000D + STATUS_ACPI_HANDLER_COLLISION NTStatus = 0xC014000E + STATUS_ACPI_INVALID_DATA NTStatus = 0xC014000F + STATUS_ACPI_INVALID_REGION NTStatus = 0xC0140010 + STATUS_ACPI_INVALID_ACCESS_SIZE NTStatus = 0xC0140011 + STATUS_ACPI_ACQUIRE_GLOBAL_LOCK NTStatus = 0xC0140012 + STATUS_ACPI_ALREADY_INITIALIZED NTStatus = 0xC0140013 + STATUS_ACPI_NOT_INITIALIZED NTStatus = 0xC0140014 + STATUS_ACPI_INVALID_MUTEX_LEVEL NTStatus = 0xC0140015 + STATUS_ACPI_MUTEX_NOT_OWNED NTStatus = 0xC0140016 + STATUS_ACPI_MUTEX_NOT_OWNER NTStatus = 0xC0140017 + STATUS_ACPI_RS_ACCESS NTStatus = 0xC0140018 + STATUS_ACPI_INVALID_TABLE NTStatus = 0xC0140019 + STATUS_ACPI_REG_HANDLER_FAILED NTStatus = 0xC0140020 + STATUS_ACPI_POWER_REQUEST_FAILED NTStatus = 0xC0140021 + STATUS_CTX_WINSTATION_NAME_INVALID NTStatus = 0xC00A0001 + STATUS_CTX_INVALID_PD NTStatus = 0xC00A0002 + STATUS_CTX_PD_NOT_FOUND NTStatus = 0xC00A0003 + STATUS_CTX_CDM_CONNECT NTStatus = 0x400A0004 + STATUS_CTX_CDM_DISCONNECT NTStatus = 0x400A0005 + STATUS_CTX_CLOSE_PENDING NTStatus = 0xC00A0006 + STATUS_CTX_NO_OUTBUF NTStatus = 0xC00A0007 + STATUS_CTX_MODEM_INF_NOT_FOUND NTStatus = 0xC00A0008 + STATUS_CTX_INVALID_MODEMNAME NTStatus = 0xC00A0009 + STATUS_CTX_RESPONSE_ERROR NTStatus = 0xC00A000A + STATUS_CTX_MODEM_RESPONSE_TIMEOUT NTStatus = 0xC00A000B + STATUS_CTX_MODEM_RESPONSE_NO_CARRIER NTStatus = 0xC00A000C + STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE NTStatus = 0xC00A000D + STATUS_CTX_MODEM_RESPONSE_BUSY NTStatus = 0xC00A000E + STATUS_CTX_MODEM_RESPONSE_VOICE NTStatus = 0xC00A000F + STATUS_CTX_TD_ERROR NTStatus = 0xC00A0010 + STATUS_CTX_LICENSE_CLIENT_INVALID NTStatus = 0xC00A0012 + STATUS_CTX_LICENSE_NOT_AVAILABLE NTStatus = 0xC00A0013 + STATUS_CTX_LICENSE_EXPIRED NTStatus = 0xC00A0014 + STATUS_CTX_WINSTATION_NOT_FOUND NTStatus = 0xC00A0015 + STATUS_CTX_WINSTATION_NAME_COLLISION NTStatus = 0xC00A0016 + STATUS_CTX_WINSTATION_BUSY NTStatus = 0xC00A0017 + STATUS_CTX_BAD_VIDEO_MODE NTStatus = 0xC00A0018 + STATUS_CTX_GRAPHICS_INVALID NTStatus = 0xC00A0022 + STATUS_CTX_NOT_CONSOLE NTStatus = 0xC00A0024 + STATUS_CTX_CLIENT_QUERY_TIMEOUT NTStatus = 0xC00A0026 + STATUS_CTX_CONSOLE_DISCONNECT NTStatus = 0xC00A0027 + STATUS_CTX_CONSOLE_CONNECT NTStatus = 0xC00A0028 + STATUS_CTX_SHADOW_DENIED NTStatus = 0xC00A002A + STATUS_CTX_WINSTATION_ACCESS_DENIED NTStatus = 0xC00A002B + STATUS_CTX_INVALID_WD NTStatus = 0xC00A002E + STATUS_CTX_WD_NOT_FOUND NTStatus = 0xC00A002F + STATUS_CTX_SHADOW_INVALID NTStatus = 0xC00A0030 + STATUS_CTX_SHADOW_DISABLED NTStatus = 0xC00A0031 + STATUS_RDP_PROTOCOL_ERROR NTStatus = 0xC00A0032 + STATUS_CTX_CLIENT_LICENSE_NOT_SET NTStatus = 0xC00A0033 + STATUS_CTX_CLIENT_LICENSE_IN_USE NTStatus = 0xC00A0034 + STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE NTStatus = 0xC00A0035 + STATUS_CTX_SHADOW_NOT_RUNNING NTStatus = 0xC00A0036 + STATUS_CTX_LOGON_DISABLED NTStatus = 0xC00A0037 + STATUS_CTX_SECURITY_LAYER_ERROR NTStatus = 0xC00A0038 + STATUS_TS_INCOMPATIBLE_SESSIONS NTStatus = 0xC00A0039 + STATUS_TS_VIDEO_SUBSYSTEM_ERROR NTStatus = 0xC00A003A + STATUS_PNP_BAD_MPS_TABLE NTStatus = 0xC0040035 + STATUS_PNP_TRANSLATION_FAILED NTStatus = 0xC0040036 + STATUS_PNP_IRQ_TRANSLATION_FAILED NTStatus = 0xC0040037 + STATUS_PNP_INVALID_ID NTStatus = 0xC0040038 + STATUS_IO_REISSUE_AS_CACHED NTStatus = 0xC0040039 + STATUS_MUI_FILE_NOT_FOUND NTStatus = 0xC00B0001 + STATUS_MUI_INVALID_FILE NTStatus = 0xC00B0002 + STATUS_MUI_INVALID_RC_CONFIG NTStatus = 0xC00B0003 + STATUS_MUI_INVALID_LOCALE_NAME NTStatus = 0xC00B0004 + STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME NTStatus = 0xC00B0005 + STATUS_MUI_FILE_NOT_LOADED NTStatus = 0xC00B0006 + STATUS_RESOURCE_ENUM_USER_STOP NTStatus = 0xC00B0007 + STATUS_FLT_NO_HANDLER_DEFINED NTStatus = 0xC01C0001 + STATUS_FLT_CONTEXT_ALREADY_DEFINED NTStatus = 0xC01C0002 + STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST NTStatus = 0xC01C0003 + STATUS_FLT_DISALLOW_FAST_IO NTStatus = 0xC01C0004 + STATUS_FLT_INVALID_NAME_REQUEST NTStatus = 0xC01C0005 + STATUS_FLT_NOT_SAFE_TO_POST_OPERATION NTStatus = 0xC01C0006 + STATUS_FLT_NOT_INITIALIZED NTStatus = 0xC01C0007 + STATUS_FLT_FILTER_NOT_READY NTStatus = 0xC01C0008 + STATUS_FLT_POST_OPERATION_CLEANUP NTStatus = 0xC01C0009 + STATUS_FLT_INTERNAL_ERROR NTStatus = 0xC01C000A + STATUS_FLT_DELETING_OBJECT NTStatus = 0xC01C000B + STATUS_FLT_MUST_BE_NONPAGED_POOL NTStatus = 0xC01C000C + STATUS_FLT_DUPLICATE_ENTRY NTStatus = 0xC01C000D + STATUS_FLT_CBDQ_DISABLED NTStatus = 0xC01C000E + STATUS_FLT_DO_NOT_ATTACH NTStatus = 0xC01C000F + STATUS_FLT_DO_NOT_DETACH NTStatus = 0xC01C0010 + STATUS_FLT_INSTANCE_ALTITUDE_COLLISION NTStatus = 0xC01C0011 + STATUS_FLT_INSTANCE_NAME_COLLISION NTStatus = 0xC01C0012 + STATUS_FLT_FILTER_NOT_FOUND NTStatus = 0xC01C0013 + STATUS_FLT_VOLUME_NOT_FOUND NTStatus = 0xC01C0014 + STATUS_FLT_INSTANCE_NOT_FOUND NTStatus = 0xC01C0015 + STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND NTStatus = 0xC01C0016 + STATUS_FLT_INVALID_CONTEXT_REGISTRATION NTStatus = 0xC01C0017 + STATUS_FLT_NAME_CACHE_MISS NTStatus = 0xC01C0018 + STATUS_FLT_NO_DEVICE_OBJECT NTStatus = 0xC01C0019 + STATUS_FLT_VOLUME_ALREADY_MOUNTED NTStatus = 0xC01C001A + STATUS_FLT_ALREADY_ENLISTED NTStatus = 0xC01C001B + STATUS_FLT_CONTEXT_ALREADY_LINKED NTStatus = 0xC01C001C + STATUS_FLT_NO_WAITER_FOR_REPLY NTStatus = 0xC01C0020 + STATUS_FLT_REGISTRATION_BUSY NTStatus = 0xC01C0023 + STATUS_SXS_SECTION_NOT_FOUND NTStatus = 0xC0150001 + STATUS_SXS_CANT_GEN_ACTCTX NTStatus = 0xC0150002 + STATUS_SXS_INVALID_ACTCTXDATA_FORMAT NTStatus = 0xC0150003 + STATUS_SXS_ASSEMBLY_NOT_FOUND NTStatus = 0xC0150004 + STATUS_SXS_MANIFEST_FORMAT_ERROR NTStatus = 0xC0150005 + STATUS_SXS_MANIFEST_PARSE_ERROR NTStatus = 0xC0150006 + STATUS_SXS_ACTIVATION_CONTEXT_DISABLED NTStatus = 0xC0150007 + STATUS_SXS_KEY_NOT_FOUND NTStatus = 0xC0150008 + STATUS_SXS_VERSION_CONFLICT NTStatus = 0xC0150009 + STATUS_SXS_WRONG_SECTION_TYPE NTStatus = 0xC015000A + STATUS_SXS_THREAD_QUERIES_DISABLED NTStatus = 0xC015000B + STATUS_SXS_ASSEMBLY_MISSING NTStatus = 0xC015000C + STATUS_SXS_RELEASE_ACTIVATION_CONTEXT NTStatus = 0x4015000D + STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET NTStatus = 0xC015000E + STATUS_SXS_EARLY_DEACTIVATION NTStatus = 0xC015000F + STATUS_SXS_INVALID_DEACTIVATION NTStatus = 0xC0150010 + STATUS_SXS_MULTIPLE_DEACTIVATION NTStatus = 0xC0150011 + STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY NTStatus = 0xC0150012 + STATUS_SXS_PROCESS_TERMINATION_REQUESTED NTStatus = 0xC0150013 + STATUS_SXS_CORRUPT_ACTIVATION_STACK NTStatus = 0xC0150014 + STATUS_SXS_CORRUPTION NTStatus = 0xC0150015 + STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE NTStatus = 0xC0150016 + STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME NTStatus = 0xC0150017 + STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE NTStatus = 0xC0150018 + STATUS_SXS_IDENTITY_PARSE_ERROR NTStatus = 0xC0150019 + STATUS_SXS_COMPONENT_STORE_CORRUPT NTStatus = 0xC015001A + STATUS_SXS_FILE_HASH_MISMATCH NTStatus = 0xC015001B + STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT NTStatus = 0xC015001C + STATUS_SXS_IDENTITIES_DIFFERENT NTStatus = 0xC015001D + STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT NTStatus = 0xC015001E + STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY NTStatus = 0xC015001F + STATUS_ADVANCED_INSTALLER_FAILED NTStatus = 0xC0150020 + STATUS_XML_ENCODING_MISMATCH NTStatus = 0xC0150021 + STATUS_SXS_MANIFEST_TOO_BIG NTStatus = 0xC0150022 + STATUS_SXS_SETTING_NOT_REGISTERED NTStatus = 0xC0150023 + STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE NTStatus = 0xC0150024 + STATUS_SMI_PRIMITIVE_INSTALLER_FAILED NTStatus = 0xC0150025 + STATUS_GENERIC_COMMAND_FAILED NTStatus = 0xC0150026 + STATUS_SXS_FILE_HASH_MISSING NTStatus = 0xC0150027 + STATUS_CLUSTER_INVALID_NODE NTStatus = 0xC0130001 + STATUS_CLUSTER_NODE_EXISTS NTStatus = 0xC0130002 + STATUS_CLUSTER_JOIN_IN_PROGRESS NTStatus = 0xC0130003 + STATUS_CLUSTER_NODE_NOT_FOUND NTStatus = 0xC0130004 + STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND NTStatus = 0xC0130005 + STATUS_CLUSTER_NETWORK_EXISTS NTStatus = 0xC0130006 + STATUS_CLUSTER_NETWORK_NOT_FOUND NTStatus = 0xC0130007 + STATUS_CLUSTER_NETINTERFACE_EXISTS NTStatus = 0xC0130008 + STATUS_CLUSTER_NETINTERFACE_NOT_FOUND NTStatus = 0xC0130009 + STATUS_CLUSTER_INVALID_REQUEST NTStatus = 0xC013000A + STATUS_CLUSTER_INVALID_NETWORK_PROVIDER NTStatus = 0xC013000B + STATUS_CLUSTER_NODE_DOWN NTStatus = 0xC013000C + STATUS_CLUSTER_NODE_UNREACHABLE NTStatus = 0xC013000D + STATUS_CLUSTER_NODE_NOT_MEMBER NTStatus = 0xC013000E + STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS NTStatus = 0xC013000F + STATUS_CLUSTER_INVALID_NETWORK NTStatus = 0xC0130010 + STATUS_CLUSTER_NO_NET_ADAPTERS NTStatus = 0xC0130011 + STATUS_CLUSTER_NODE_UP NTStatus = 0xC0130012 + STATUS_CLUSTER_NODE_PAUSED NTStatus = 0xC0130013 + STATUS_CLUSTER_NODE_NOT_PAUSED NTStatus = 0xC0130014 + STATUS_CLUSTER_NO_SECURITY_CONTEXT NTStatus = 0xC0130015 + STATUS_CLUSTER_NETWORK_NOT_INTERNAL NTStatus = 0xC0130016 + STATUS_CLUSTER_POISONED NTStatus = 0xC0130017 + STATUS_CLUSTER_NON_CSV_PATH NTStatus = 0xC0130018 + STATUS_CLUSTER_CSV_VOLUME_NOT_LOCAL NTStatus = 0xC0130019 + STATUS_CLUSTER_CSV_READ_OPLOCK_BREAK_IN_PROGRESS NTStatus = 0xC0130020 + STATUS_CLUSTER_CSV_AUTO_PAUSE_ERROR NTStatus = 0xC0130021 + STATUS_CLUSTER_CSV_REDIRECTED NTStatus = 0xC0130022 + STATUS_CLUSTER_CSV_NOT_REDIRECTED NTStatus = 0xC0130023 + STATUS_CLUSTER_CSV_VOLUME_DRAINING NTStatus = 0xC0130024 + STATUS_CLUSTER_CSV_SNAPSHOT_CREATION_IN_PROGRESS NTStatus = 0xC0130025 + STATUS_CLUSTER_CSV_VOLUME_DRAINING_SUCCEEDED_DOWNLEVEL NTStatus = 0xC0130026 + STATUS_CLUSTER_CSV_NO_SNAPSHOTS NTStatus = 0xC0130027 + STATUS_CSV_IO_PAUSE_TIMEOUT NTStatus = 0xC0130028 + STATUS_CLUSTER_CSV_INVALID_HANDLE NTStatus = 0xC0130029 + STATUS_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR NTStatus = 0xC0130030 + STATUS_CLUSTER_CAM_TICKET_REPLAY_DETECTED NTStatus = 0xC0130031 + STATUS_TRANSACTIONAL_CONFLICT NTStatus = 0xC0190001 + STATUS_INVALID_TRANSACTION NTStatus = 0xC0190002 + STATUS_TRANSACTION_NOT_ACTIVE NTStatus = 0xC0190003 + STATUS_TM_INITIALIZATION_FAILED NTStatus = 0xC0190004 + STATUS_RM_NOT_ACTIVE NTStatus = 0xC0190005 + STATUS_RM_METADATA_CORRUPT NTStatus = 0xC0190006 + STATUS_TRANSACTION_NOT_JOINED NTStatus = 0xC0190007 + STATUS_DIRECTORY_NOT_RM NTStatus = 0xC0190008 + STATUS_COULD_NOT_RESIZE_LOG NTStatus = 0x80190009 + STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE NTStatus = 0xC019000A + STATUS_LOG_RESIZE_INVALID_SIZE NTStatus = 0xC019000B + STATUS_REMOTE_FILE_VERSION_MISMATCH NTStatus = 0xC019000C + STATUS_CRM_PROTOCOL_ALREADY_EXISTS NTStatus = 0xC019000F + STATUS_TRANSACTION_PROPAGATION_FAILED NTStatus = 0xC0190010 + STATUS_CRM_PROTOCOL_NOT_FOUND NTStatus = 0xC0190011 + STATUS_TRANSACTION_SUPERIOR_EXISTS NTStatus = 0xC0190012 + STATUS_TRANSACTION_REQUEST_NOT_VALID NTStatus = 0xC0190013 + STATUS_TRANSACTION_NOT_REQUESTED NTStatus = 0xC0190014 + STATUS_TRANSACTION_ALREADY_ABORTED NTStatus = 0xC0190015 + STATUS_TRANSACTION_ALREADY_COMMITTED NTStatus = 0xC0190016 + STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER NTStatus = 0xC0190017 + STATUS_CURRENT_TRANSACTION_NOT_VALID NTStatus = 0xC0190018 + STATUS_LOG_GROWTH_FAILED NTStatus = 0xC0190019 + STATUS_OBJECT_NO_LONGER_EXISTS NTStatus = 0xC0190021 + STATUS_STREAM_MINIVERSION_NOT_FOUND NTStatus = 0xC0190022 + STATUS_STREAM_MINIVERSION_NOT_VALID NTStatus = 0xC0190023 + STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION NTStatus = 0xC0190024 + STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT NTStatus = 0xC0190025 + STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS NTStatus = 0xC0190026 + STATUS_HANDLE_NO_LONGER_VALID NTStatus = 0xC0190028 + STATUS_NO_TXF_METADATA NTStatus = 0x80190029 + STATUS_LOG_CORRUPTION_DETECTED NTStatus = 0xC0190030 + STATUS_CANT_RECOVER_WITH_HANDLE_OPEN NTStatus = 0x80190031 + STATUS_RM_DISCONNECTED NTStatus = 0xC0190032 + STATUS_ENLISTMENT_NOT_SUPERIOR NTStatus = 0xC0190033 + STATUS_RECOVERY_NOT_NEEDED NTStatus = 0x40190034 + STATUS_RM_ALREADY_STARTED NTStatus = 0x40190035 + STATUS_FILE_IDENTITY_NOT_PERSISTENT NTStatus = 0xC0190036 + STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY NTStatus = 0xC0190037 + STATUS_CANT_CROSS_RM_BOUNDARY NTStatus = 0xC0190038 + STATUS_TXF_DIR_NOT_EMPTY NTStatus = 0xC0190039 + STATUS_INDOUBT_TRANSACTIONS_EXIST NTStatus = 0xC019003A + STATUS_TM_VOLATILE NTStatus = 0xC019003B + STATUS_ROLLBACK_TIMER_EXPIRED NTStatus = 0xC019003C + STATUS_TXF_ATTRIBUTE_CORRUPT NTStatus = 0xC019003D + STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC019003E + STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED NTStatus = 0xC019003F + STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE NTStatus = 0xC0190040 + STATUS_TXF_METADATA_ALREADY_PRESENT NTStatus = 0x80190041 + STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET NTStatus = 0x80190042 + STATUS_TRANSACTION_REQUIRED_PROMOTION NTStatus = 0xC0190043 + STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION NTStatus = 0xC0190044 + STATUS_TRANSACTIONS_NOT_FROZEN NTStatus = 0xC0190045 + STATUS_TRANSACTION_FREEZE_IN_PROGRESS NTStatus = 0xC0190046 + STATUS_NOT_SNAPSHOT_VOLUME NTStatus = 0xC0190047 + STATUS_NO_SAVEPOINT_WITH_OPEN_FILES NTStatus = 0xC0190048 + STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC0190049 + STATUS_TM_IDENTITY_MISMATCH NTStatus = 0xC019004A + STATUS_FLOATED_SECTION NTStatus = 0xC019004B + STATUS_CANNOT_ACCEPT_TRANSACTED_WORK NTStatus = 0xC019004C + STATUS_CANNOT_ABORT_TRANSACTIONS NTStatus = 0xC019004D + STATUS_TRANSACTION_NOT_FOUND NTStatus = 0xC019004E + STATUS_RESOURCEMANAGER_NOT_FOUND NTStatus = 0xC019004F + STATUS_ENLISTMENT_NOT_FOUND NTStatus = 0xC0190050 + STATUS_TRANSACTIONMANAGER_NOT_FOUND NTStatus = 0xC0190051 + STATUS_TRANSACTIONMANAGER_NOT_ONLINE NTStatus = 0xC0190052 + STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION NTStatus = 0xC0190053 + STATUS_TRANSACTION_NOT_ROOT NTStatus = 0xC0190054 + STATUS_TRANSACTION_OBJECT_EXPIRED NTStatus = 0xC0190055 + STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC0190056 + STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED NTStatus = 0xC0190057 + STATUS_TRANSACTION_RECORD_TOO_LONG NTStatus = 0xC0190058 + STATUS_NO_LINK_TRACKING_IN_TRANSACTION NTStatus = 0xC0190059 + STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION NTStatus = 0xC019005A + STATUS_TRANSACTION_INTEGRITY_VIOLATED NTStatus = 0xC019005B + STATUS_TRANSACTIONMANAGER_IDENTITY_MISMATCH NTStatus = 0xC019005C + STATUS_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT NTStatus = 0xC019005D + STATUS_TRANSACTION_MUST_WRITETHROUGH NTStatus = 0xC019005E + STATUS_TRANSACTION_NO_SUPERIOR NTStatus = 0xC019005F + STATUS_EXPIRED_HANDLE NTStatus = 0xC0190060 + STATUS_TRANSACTION_NOT_ENLISTED NTStatus = 0xC0190061 + STATUS_LOG_SECTOR_INVALID NTStatus = 0xC01A0001 + STATUS_LOG_SECTOR_PARITY_INVALID NTStatus = 0xC01A0002 + STATUS_LOG_SECTOR_REMAPPED NTStatus = 0xC01A0003 + STATUS_LOG_BLOCK_INCOMPLETE NTStatus = 0xC01A0004 + STATUS_LOG_INVALID_RANGE NTStatus = 0xC01A0005 + STATUS_LOG_BLOCKS_EXHAUSTED NTStatus = 0xC01A0006 + STATUS_LOG_READ_CONTEXT_INVALID NTStatus = 0xC01A0007 + STATUS_LOG_RESTART_INVALID NTStatus = 0xC01A0008 + STATUS_LOG_BLOCK_VERSION NTStatus = 0xC01A0009 + STATUS_LOG_BLOCK_INVALID NTStatus = 0xC01A000A + STATUS_LOG_READ_MODE_INVALID NTStatus = 0xC01A000B + STATUS_LOG_NO_RESTART NTStatus = 0x401A000C + STATUS_LOG_METADATA_CORRUPT NTStatus = 0xC01A000D + STATUS_LOG_METADATA_INVALID NTStatus = 0xC01A000E + STATUS_LOG_METADATA_INCONSISTENT NTStatus = 0xC01A000F + STATUS_LOG_RESERVATION_INVALID NTStatus = 0xC01A0010 + STATUS_LOG_CANT_DELETE NTStatus = 0xC01A0011 + STATUS_LOG_CONTAINER_LIMIT_EXCEEDED NTStatus = 0xC01A0012 + STATUS_LOG_START_OF_LOG NTStatus = 0xC01A0013 + STATUS_LOG_POLICY_ALREADY_INSTALLED NTStatus = 0xC01A0014 + STATUS_LOG_POLICY_NOT_INSTALLED NTStatus = 0xC01A0015 + STATUS_LOG_POLICY_INVALID NTStatus = 0xC01A0016 + STATUS_LOG_POLICY_CONFLICT NTStatus = 0xC01A0017 + STATUS_LOG_PINNED_ARCHIVE_TAIL NTStatus = 0xC01A0018 + STATUS_LOG_RECORD_NONEXISTENT NTStatus = 0xC01A0019 + STATUS_LOG_RECORDS_RESERVED_INVALID NTStatus = 0xC01A001A + STATUS_LOG_SPACE_RESERVED_INVALID NTStatus = 0xC01A001B + STATUS_LOG_TAIL_INVALID NTStatus = 0xC01A001C + STATUS_LOG_FULL NTStatus = 0xC01A001D + STATUS_LOG_MULTIPLEXED NTStatus = 0xC01A001E + STATUS_LOG_DEDICATED NTStatus = 0xC01A001F + STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS NTStatus = 0xC01A0020 + STATUS_LOG_ARCHIVE_IN_PROGRESS NTStatus = 0xC01A0021 + STATUS_LOG_EPHEMERAL NTStatus = 0xC01A0022 + STATUS_LOG_NOT_ENOUGH_CONTAINERS NTStatus = 0xC01A0023 + STATUS_LOG_CLIENT_ALREADY_REGISTERED NTStatus = 0xC01A0024 + STATUS_LOG_CLIENT_NOT_REGISTERED NTStatus = 0xC01A0025 + STATUS_LOG_FULL_HANDLER_IN_PROGRESS NTStatus = 0xC01A0026 + STATUS_LOG_CONTAINER_READ_FAILED NTStatus = 0xC01A0027 + STATUS_LOG_CONTAINER_WRITE_FAILED NTStatus = 0xC01A0028 + STATUS_LOG_CONTAINER_OPEN_FAILED NTStatus = 0xC01A0029 + STATUS_LOG_CONTAINER_STATE_INVALID NTStatus = 0xC01A002A + STATUS_LOG_STATE_INVALID NTStatus = 0xC01A002B + STATUS_LOG_PINNED NTStatus = 0xC01A002C + STATUS_LOG_METADATA_FLUSH_FAILED NTStatus = 0xC01A002D + STATUS_LOG_INCONSISTENT_SECURITY NTStatus = 0xC01A002E + STATUS_LOG_APPENDED_FLUSH_FAILED NTStatus = 0xC01A002F + STATUS_LOG_PINNED_RESERVATION NTStatus = 0xC01A0030 + STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD NTStatus = 0xC01B00EA + STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED NTStatus = 0x801B00EB + STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST NTStatus = 0x401B00EC + STATUS_MONITOR_NO_DESCRIPTOR NTStatus = 0xC01D0001 + STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT NTStatus = 0xC01D0002 + STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM NTStatus = 0xC01D0003 + STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK NTStatus = 0xC01D0004 + STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED NTStatus = 0xC01D0005 + STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK NTStatus = 0xC01D0006 + STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK NTStatus = 0xC01D0007 + STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA NTStatus = 0xC01D0008 + STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK NTStatus = 0xC01D0009 + STATUS_MONITOR_INVALID_MANUFACTURE_DATE NTStatus = 0xC01D000A + STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER NTStatus = 0xC01E0000 + STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER NTStatus = 0xC01E0001 + STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER NTStatus = 0xC01E0002 + STATUS_GRAPHICS_ADAPTER_WAS_RESET NTStatus = 0xC01E0003 + STATUS_GRAPHICS_INVALID_DRIVER_MODEL NTStatus = 0xC01E0004 + STATUS_GRAPHICS_PRESENT_MODE_CHANGED NTStatus = 0xC01E0005 + STATUS_GRAPHICS_PRESENT_OCCLUDED NTStatus = 0xC01E0006 + STATUS_GRAPHICS_PRESENT_DENIED NTStatus = 0xC01E0007 + STATUS_GRAPHICS_CANNOTCOLORCONVERT NTStatus = 0xC01E0008 + STATUS_GRAPHICS_DRIVER_MISMATCH NTStatus = 0xC01E0009 + STATUS_GRAPHICS_PARTIAL_DATA_POPULATED NTStatus = 0x401E000A + STATUS_GRAPHICS_PRESENT_REDIRECTION_DISABLED NTStatus = 0xC01E000B + STATUS_GRAPHICS_PRESENT_UNOCCLUDED NTStatus = 0xC01E000C + STATUS_GRAPHICS_WINDOWDC_NOT_AVAILABLE NTStatus = 0xC01E000D + STATUS_GRAPHICS_WINDOWLESS_PRESENT_DISABLED NTStatus = 0xC01E000E + STATUS_GRAPHICS_PRESENT_INVALID_WINDOW NTStatus = 0xC01E000F + STATUS_GRAPHICS_PRESENT_BUFFER_NOT_BOUND NTStatus = 0xC01E0010 + STATUS_GRAPHICS_VAIL_STATE_CHANGED NTStatus = 0xC01E0011 + STATUS_GRAPHICS_INDIRECT_DISPLAY_ABANDON_SWAPCHAIN NTStatus = 0xC01E0012 + STATUS_GRAPHICS_INDIRECT_DISPLAY_DEVICE_STOPPED NTStatus = 0xC01E0013 + STATUS_GRAPHICS_NO_VIDEO_MEMORY NTStatus = 0xC01E0100 + STATUS_GRAPHICS_CANT_LOCK_MEMORY NTStatus = 0xC01E0101 + STATUS_GRAPHICS_ALLOCATION_BUSY NTStatus = 0xC01E0102 + STATUS_GRAPHICS_TOO_MANY_REFERENCES NTStatus = 0xC01E0103 + STATUS_GRAPHICS_TRY_AGAIN_LATER NTStatus = 0xC01E0104 + STATUS_GRAPHICS_TRY_AGAIN_NOW NTStatus = 0xC01E0105 + STATUS_GRAPHICS_ALLOCATION_INVALID NTStatus = 0xC01E0106 + STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE NTStatus = 0xC01E0107 + STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED NTStatus = 0xC01E0108 + STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION NTStatus = 0xC01E0109 + STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE NTStatus = 0xC01E0110 + STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION NTStatus = 0xC01E0111 + STATUS_GRAPHICS_ALLOCATION_CLOSED NTStatus = 0xC01E0112 + STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE NTStatus = 0xC01E0113 + STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE NTStatus = 0xC01E0114 + STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE NTStatus = 0xC01E0115 + STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST NTStatus = 0xC01E0116 + STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE NTStatus = 0xC01E0200 + STATUS_GRAPHICS_SKIP_ALLOCATION_PREPARATION NTStatus = 0x401E0201 + STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY NTStatus = 0xC01E0300 + STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED NTStatus = 0xC01E0301 + STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED NTStatus = 0xC01E0302 + STATUS_GRAPHICS_INVALID_VIDPN NTStatus = 0xC01E0303 + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE NTStatus = 0xC01E0304 + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET NTStatus = 0xC01E0305 + STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED NTStatus = 0xC01E0306 + STATUS_GRAPHICS_MODE_NOT_PINNED NTStatus = 0x401E0307 + STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET NTStatus = 0xC01E0308 + STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET NTStatus = 0xC01E0309 + STATUS_GRAPHICS_INVALID_FREQUENCY NTStatus = 0xC01E030A + STATUS_GRAPHICS_INVALID_ACTIVE_REGION NTStatus = 0xC01E030B + STATUS_GRAPHICS_INVALID_TOTAL_REGION NTStatus = 0xC01E030C + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE NTStatus = 0xC01E0310 + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE NTStatus = 0xC01E0311 + STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET NTStatus = 0xC01E0312 + STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY NTStatus = 0xC01E0313 + STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET NTStatus = 0xC01E0314 + STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET NTStatus = 0xC01E0315 + STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET NTStatus = 0xC01E0316 + STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET NTStatus = 0xC01E0317 + STATUS_GRAPHICS_TARGET_ALREADY_IN_SET NTStatus = 0xC01E0318 + STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH NTStatus = 0xC01E0319 + STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY NTStatus = 0xC01E031A + STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET NTStatus = 0xC01E031B + STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE NTStatus = 0xC01E031C + STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET NTStatus = 0xC01E031D + STATUS_GRAPHICS_NO_PREFERRED_MODE NTStatus = 0x401E031E + STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET NTStatus = 0xC01E031F + STATUS_GRAPHICS_STALE_MODESET NTStatus = 0xC01E0320 + STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET NTStatus = 0xC01E0321 + STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE NTStatus = 0xC01E0322 + STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN NTStatus = 0xC01E0323 + STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0324 + STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION NTStatus = 0xC01E0325 + STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES NTStatus = 0xC01E0326 + STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY NTStatus = 0xC01E0327 + STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE NTStatus = 0xC01E0328 + STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET NTStatus = 0xC01E0329 + STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET NTStatus = 0xC01E032A + STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR NTStatus = 0xC01E032B + STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET NTStatus = 0xC01E032C + STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET NTStatus = 0xC01E032D + STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE NTStatus = 0xC01E032E + STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE NTStatus = 0xC01E032F + STATUS_GRAPHICS_RESOURCES_NOT_RELATED NTStatus = 0xC01E0330 + STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0331 + STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0332 + STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET NTStatus = 0xC01E0333 + STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER NTStatus = 0xC01E0334 + STATUS_GRAPHICS_NO_VIDPNMGR NTStatus = 0xC01E0335 + STATUS_GRAPHICS_NO_ACTIVE_VIDPN NTStatus = 0xC01E0336 + STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY NTStatus = 0xC01E0337 + STATUS_GRAPHICS_MONITOR_NOT_CONNECTED NTStatus = 0xC01E0338 + STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY NTStatus = 0xC01E0339 + STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE NTStatus = 0xC01E033A + STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE NTStatus = 0xC01E033B + STATUS_GRAPHICS_INVALID_STRIDE NTStatus = 0xC01E033C + STATUS_GRAPHICS_INVALID_PIXELFORMAT NTStatus = 0xC01E033D + STATUS_GRAPHICS_INVALID_COLORBASIS NTStatus = 0xC01E033E + STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE NTStatus = 0xC01E033F + STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY NTStatus = 0xC01E0340 + STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT NTStatus = 0xC01E0341 + STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE NTStatus = 0xC01E0342 + STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN NTStatus = 0xC01E0343 + STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL NTStatus = 0xC01E0344 + STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION NTStatus = 0xC01E0345 + STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED NTStatus = 0xC01E0346 + STATUS_GRAPHICS_INVALID_GAMMA_RAMP NTStatus = 0xC01E0347 + STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED NTStatus = 0xC01E0348 + STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED NTStatus = 0xC01E0349 + STATUS_GRAPHICS_MODE_NOT_IN_MODESET NTStatus = 0xC01E034A + STATUS_GRAPHICS_DATASET_IS_EMPTY NTStatus = 0x401E034B + STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET NTStatus = 0x401E034C + STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON NTStatus = 0xC01E034D + STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE NTStatus = 0xC01E034E + STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE NTStatus = 0xC01E034F + STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS NTStatus = 0xC01E0350 + STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED NTStatus = 0x401E0351 + STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING NTStatus = 0xC01E0352 + STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED NTStatus = 0xC01E0353 + STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS NTStatus = 0xC01E0354 + STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT NTStatus = 0xC01E0355 + STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM NTStatus = 0xC01E0356 + STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN NTStatus = 0xC01E0357 + STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT NTStatus = 0xC01E0358 + STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED NTStatus = 0xC01E0359 + STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION NTStatus = 0xC01E035A + STATUS_GRAPHICS_INVALID_CLIENT_TYPE NTStatus = 0xC01E035B + STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET NTStatus = 0xC01E035C + STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED NTStatus = 0xC01E0400 + STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED NTStatus = 0xC01E0401 + STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS NTStatus = 0x401E042F + STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER NTStatus = 0xC01E0430 + STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED NTStatus = 0xC01E0431 + STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED NTStatus = 0xC01E0432 + STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY NTStatus = 0xC01E0433 + STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED NTStatus = 0xC01E0434 + STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON NTStatus = 0xC01E0435 + STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE NTStatus = 0xC01E0436 + STATUS_GRAPHICS_LEADLINK_START_DEFERRED NTStatus = 0x401E0437 + STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER NTStatus = 0xC01E0438 + STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY NTStatus = 0x401E0439 + STATUS_GRAPHICS_START_DEFERRED NTStatus = 0x401E043A + STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED NTStatus = 0xC01E043B + STATUS_GRAPHICS_DEPENDABLE_CHILD_STATUS NTStatus = 0x401E043C + STATUS_GRAPHICS_OPM_NOT_SUPPORTED NTStatus = 0xC01E0500 + STATUS_GRAPHICS_COPP_NOT_SUPPORTED NTStatus = 0xC01E0501 + STATUS_GRAPHICS_UAB_NOT_SUPPORTED NTStatus = 0xC01E0502 + STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS NTStatus = 0xC01E0503 + STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST NTStatus = 0xC01E0505 + STATUS_GRAPHICS_OPM_INTERNAL_ERROR NTStatus = 0xC01E050B + STATUS_GRAPHICS_OPM_INVALID_HANDLE NTStatus = 0xC01E050C + STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH NTStatus = 0xC01E050E + STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED NTStatus = 0xC01E050F + STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED NTStatus = 0xC01E0510 + STATUS_GRAPHICS_PVP_HFS_FAILED NTStatus = 0xC01E0511 + STATUS_GRAPHICS_OPM_INVALID_SRM NTStatus = 0xC01E0512 + STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP NTStatus = 0xC01E0513 + STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP NTStatus = 0xC01E0514 + STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA NTStatus = 0xC01E0515 + STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET NTStatus = 0xC01E0516 + STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH NTStatus = 0xC01E0517 + STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE NTStatus = 0xC01E0518 + STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS NTStatus = 0xC01E051A + STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS NTStatus = 0xC01E051C + STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST NTStatus = 0xC01E051D + STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR NTStatus = 0xC01E051E + STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS NTStatus = 0xC01E051F + STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED NTStatus = 0xC01E0520 + STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST NTStatus = 0xC01E0521 + STATUS_GRAPHICS_I2C_NOT_SUPPORTED NTStatus = 0xC01E0580 + STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST NTStatus = 0xC01E0581 + STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA NTStatus = 0xC01E0582 + STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA NTStatus = 0xC01E0583 + STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED NTStatus = 0xC01E0584 + STATUS_GRAPHICS_DDCCI_INVALID_DATA NTStatus = 0xC01E0585 + STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE NTStatus = 0xC01E0586 + STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING NTStatus = 0xC01E0587 + STATUS_GRAPHICS_MCA_INTERNAL_ERROR NTStatus = 0xC01E0588 + STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND NTStatus = 0xC01E0589 + STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH NTStatus = 0xC01E058A + STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM NTStatus = 0xC01E058B + STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE NTStatus = 0xC01E058C + STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS NTStatus = 0xC01E058D + STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED NTStatus = 0xC01E05E0 + STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME NTStatus = 0xC01E05E1 + STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP NTStatus = 0xC01E05E2 + STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED NTStatus = 0xC01E05E3 + STATUS_GRAPHICS_INVALID_POINTER NTStatus = 0xC01E05E4 + STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE NTStatus = 0xC01E05E5 + STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL NTStatus = 0xC01E05E6 + STATUS_GRAPHICS_INTERNAL_ERROR NTStatus = 0xC01E05E7 + STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS NTStatus = 0xC01E05E8 + STATUS_FVE_LOCKED_VOLUME NTStatus = 0xC0210000 + STATUS_FVE_NOT_ENCRYPTED NTStatus = 0xC0210001 + STATUS_FVE_BAD_INFORMATION NTStatus = 0xC0210002 + STATUS_FVE_TOO_SMALL NTStatus = 0xC0210003 + STATUS_FVE_FAILED_WRONG_FS NTStatus = 0xC0210004 + STATUS_FVE_BAD_PARTITION_SIZE NTStatus = 0xC0210005 + STATUS_FVE_FS_NOT_EXTENDED NTStatus = 0xC0210006 + STATUS_FVE_FS_MOUNTED NTStatus = 0xC0210007 + STATUS_FVE_NO_LICENSE NTStatus = 0xC0210008 + STATUS_FVE_ACTION_NOT_ALLOWED NTStatus = 0xC0210009 + STATUS_FVE_BAD_DATA NTStatus = 0xC021000A + STATUS_FVE_VOLUME_NOT_BOUND NTStatus = 0xC021000B + STATUS_FVE_NOT_DATA_VOLUME NTStatus = 0xC021000C + STATUS_FVE_CONV_READ_ERROR NTStatus = 0xC021000D + STATUS_FVE_CONV_WRITE_ERROR NTStatus = 0xC021000E + STATUS_FVE_OVERLAPPED_UPDATE NTStatus = 0xC021000F + STATUS_FVE_FAILED_SECTOR_SIZE NTStatus = 0xC0210010 + STATUS_FVE_FAILED_AUTHENTICATION NTStatus = 0xC0210011 + STATUS_FVE_NOT_OS_VOLUME NTStatus = 0xC0210012 + STATUS_FVE_KEYFILE_NOT_FOUND NTStatus = 0xC0210013 + STATUS_FVE_KEYFILE_INVALID NTStatus = 0xC0210014 + STATUS_FVE_KEYFILE_NO_VMK NTStatus = 0xC0210015 + STATUS_FVE_TPM_DISABLED NTStatus = 0xC0210016 + STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO NTStatus = 0xC0210017 + STATUS_FVE_TPM_INVALID_PCR NTStatus = 0xC0210018 + STATUS_FVE_TPM_NO_VMK NTStatus = 0xC0210019 + STATUS_FVE_PIN_INVALID NTStatus = 0xC021001A + STATUS_FVE_AUTH_INVALID_APPLICATION NTStatus = 0xC021001B + STATUS_FVE_AUTH_INVALID_CONFIG NTStatus = 0xC021001C + STATUS_FVE_DEBUGGER_ENABLED NTStatus = 0xC021001D + STATUS_FVE_DRY_RUN_FAILED NTStatus = 0xC021001E + STATUS_FVE_BAD_METADATA_POINTER NTStatus = 0xC021001F + STATUS_FVE_OLD_METADATA_COPY NTStatus = 0xC0210020 + STATUS_FVE_REBOOT_REQUIRED NTStatus = 0xC0210021 + STATUS_FVE_RAW_ACCESS NTStatus = 0xC0210022 + STATUS_FVE_RAW_BLOCKED NTStatus = 0xC0210023 + STATUS_FVE_NO_AUTOUNLOCK_MASTER_KEY NTStatus = 0xC0210024 + STATUS_FVE_MOR_FAILED NTStatus = 0xC0210025 + STATUS_FVE_NO_FEATURE_LICENSE NTStatus = 0xC0210026 + STATUS_FVE_POLICY_USER_DISABLE_RDV_NOT_ALLOWED NTStatus = 0xC0210027 + STATUS_FVE_CONV_RECOVERY_FAILED NTStatus = 0xC0210028 + STATUS_FVE_VIRTUALIZED_SPACE_TOO_BIG NTStatus = 0xC0210029 + STATUS_FVE_INVALID_DATUM_TYPE NTStatus = 0xC021002A + STATUS_FVE_VOLUME_TOO_SMALL NTStatus = 0xC0210030 + STATUS_FVE_ENH_PIN_INVALID NTStatus = 0xC0210031 + STATUS_FVE_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE NTStatus = 0xC0210032 + STATUS_FVE_WIPE_NOT_ALLOWED_ON_TP_STORAGE NTStatus = 0xC0210033 + STATUS_FVE_NOT_ALLOWED_ON_CSV_STACK NTStatus = 0xC0210034 + STATUS_FVE_NOT_ALLOWED_ON_CLUSTER NTStatus = 0xC0210035 + STATUS_FVE_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING NTStatus = 0xC0210036 + STATUS_FVE_WIPE_CANCEL_NOT_APPLICABLE NTStatus = 0xC0210037 + STATUS_FVE_EDRIVE_DRY_RUN_FAILED NTStatus = 0xC0210038 + STATUS_FVE_SECUREBOOT_DISABLED NTStatus = 0xC0210039 + STATUS_FVE_SECUREBOOT_CONFIG_CHANGE NTStatus = 0xC021003A + STATUS_FVE_DEVICE_LOCKEDOUT NTStatus = 0xC021003B + STATUS_FVE_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT NTStatus = 0xC021003C + STATUS_FVE_NOT_DE_VOLUME NTStatus = 0xC021003D + STATUS_FVE_PROTECTION_DISABLED NTStatus = 0xC021003E + STATUS_FVE_PROTECTION_CANNOT_BE_DISABLED NTStatus = 0xC021003F + STATUS_FVE_OSV_KSR_NOT_ALLOWED NTStatus = 0xC0210040 + STATUS_FWP_CALLOUT_NOT_FOUND NTStatus = 0xC0220001 + STATUS_FWP_CONDITION_NOT_FOUND NTStatus = 0xC0220002 + STATUS_FWP_FILTER_NOT_FOUND NTStatus = 0xC0220003 + STATUS_FWP_LAYER_NOT_FOUND NTStatus = 0xC0220004 + STATUS_FWP_PROVIDER_NOT_FOUND NTStatus = 0xC0220005 + STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND NTStatus = 0xC0220006 + STATUS_FWP_SUBLAYER_NOT_FOUND NTStatus = 0xC0220007 + STATUS_FWP_NOT_FOUND NTStatus = 0xC0220008 + STATUS_FWP_ALREADY_EXISTS NTStatus = 0xC0220009 + STATUS_FWP_IN_USE NTStatus = 0xC022000A + STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS NTStatus = 0xC022000B + STATUS_FWP_WRONG_SESSION NTStatus = 0xC022000C + STATUS_FWP_NO_TXN_IN_PROGRESS NTStatus = 0xC022000D + STATUS_FWP_TXN_IN_PROGRESS NTStatus = 0xC022000E + STATUS_FWP_TXN_ABORTED NTStatus = 0xC022000F + STATUS_FWP_SESSION_ABORTED NTStatus = 0xC0220010 + STATUS_FWP_INCOMPATIBLE_TXN NTStatus = 0xC0220011 + STATUS_FWP_TIMEOUT NTStatus = 0xC0220012 + STATUS_FWP_NET_EVENTS_DISABLED NTStatus = 0xC0220013 + STATUS_FWP_INCOMPATIBLE_LAYER NTStatus = 0xC0220014 + STATUS_FWP_KM_CLIENTS_ONLY NTStatus = 0xC0220015 + STATUS_FWP_LIFETIME_MISMATCH NTStatus = 0xC0220016 + STATUS_FWP_BUILTIN_OBJECT NTStatus = 0xC0220017 + STATUS_FWP_TOO_MANY_CALLOUTS NTStatus = 0xC0220018 + STATUS_FWP_NOTIFICATION_DROPPED NTStatus = 0xC0220019 + STATUS_FWP_TRAFFIC_MISMATCH NTStatus = 0xC022001A + STATUS_FWP_INCOMPATIBLE_SA_STATE NTStatus = 0xC022001B + STATUS_FWP_NULL_POINTER NTStatus = 0xC022001C + STATUS_FWP_INVALID_ENUMERATOR NTStatus = 0xC022001D + STATUS_FWP_INVALID_FLAGS NTStatus = 0xC022001E + STATUS_FWP_INVALID_NET_MASK NTStatus = 0xC022001F + STATUS_FWP_INVALID_RANGE NTStatus = 0xC0220020 + STATUS_FWP_INVALID_INTERVAL NTStatus = 0xC0220021 + STATUS_FWP_ZERO_LENGTH_ARRAY NTStatus = 0xC0220022 + STATUS_FWP_NULL_DISPLAY_NAME NTStatus = 0xC0220023 + STATUS_FWP_INVALID_ACTION_TYPE NTStatus = 0xC0220024 + STATUS_FWP_INVALID_WEIGHT NTStatus = 0xC0220025 + STATUS_FWP_MATCH_TYPE_MISMATCH NTStatus = 0xC0220026 + STATUS_FWP_TYPE_MISMATCH NTStatus = 0xC0220027 + STATUS_FWP_OUT_OF_BOUNDS NTStatus = 0xC0220028 + STATUS_FWP_RESERVED NTStatus = 0xC0220029 + STATUS_FWP_DUPLICATE_CONDITION NTStatus = 0xC022002A + STATUS_FWP_DUPLICATE_KEYMOD NTStatus = 0xC022002B + STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER NTStatus = 0xC022002C + STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER NTStatus = 0xC022002D + STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER NTStatus = 0xC022002E + STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT NTStatus = 0xC022002F + STATUS_FWP_INCOMPATIBLE_AUTH_METHOD NTStatus = 0xC0220030 + STATUS_FWP_INCOMPATIBLE_DH_GROUP NTStatus = 0xC0220031 + STATUS_FWP_EM_NOT_SUPPORTED NTStatus = 0xC0220032 + STATUS_FWP_NEVER_MATCH NTStatus = 0xC0220033 + STATUS_FWP_PROVIDER_CONTEXT_MISMATCH NTStatus = 0xC0220034 + STATUS_FWP_INVALID_PARAMETER NTStatus = 0xC0220035 + STATUS_FWP_TOO_MANY_SUBLAYERS NTStatus = 0xC0220036 + STATUS_FWP_CALLOUT_NOTIFICATION_FAILED NTStatus = 0xC0220037 + STATUS_FWP_INVALID_AUTH_TRANSFORM NTStatus = 0xC0220038 + STATUS_FWP_INVALID_CIPHER_TRANSFORM NTStatus = 0xC0220039 + STATUS_FWP_INCOMPATIBLE_CIPHER_TRANSFORM NTStatus = 0xC022003A + STATUS_FWP_INVALID_TRANSFORM_COMBINATION NTStatus = 0xC022003B + STATUS_FWP_DUPLICATE_AUTH_METHOD NTStatus = 0xC022003C + STATUS_FWP_INVALID_TUNNEL_ENDPOINT NTStatus = 0xC022003D + STATUS_FWP_L2_DRIVER_NOT_READY NTStatus = 0xC022003E + STATUS_FWP_KEY_DICTATOR_ALREADY_REGISTERED NTStatus = 0xC022003F + STATUS_FWP_KEY_DICTATION_INVALID_KEYING_MATERIAL NTStatus = 0xC0220040 + STATUS_FWP_CONNECTIONS_DISABLED NTStatus = 0xC0220041 + STATUS_FWP_INVALID_DNS_NAME NTStatus = 0xC0220042 + STATUS_FWP_STILL_ON NTStatus = 0xC0220043 + STATUS_FWP_IKEEXT_NOT_RUNNING NTStatus = 0xC0220044 + STATUS_FWP_TCPIP_NOT_READY NTStatus = 0xC0220100 + STATUS_FWP_INJECT_HANDLE_CLOSING NTStatus = 0xC0220101 + STATUS_FWP_INJECT_HANDLE_STALE NTStatus = 0xC0220102 + STATUS_FWP_CANNOT_PEND NTStatus = 0xC0220103 + STATUS_FWP_DROP_NOICMP NTStatus = 0xC0220104 + STATUS_NDIS_CLOSING NTStatus = 0xC0230002 + STATUS_NDIS_BAD_VERSION NTStatus = 0xC0230004 + STATUS_NDIS_BAD_CHARACTERISTICS NTStatus = 0xC0230005 + STATUS_NDIS_ADAPTER_NOT_FOUND NTStatus = 0xC0230006 + STATUS_NDIS_OPEN_FAILED NTStatus = 0xC0230007 + STATUS_NDIS_DEVICE_FAILED NTStatus = 0xC0230008 + STATUS_NDIS_MULTICAST_FULL NTStatus = 0xC0230009 + STATUS_NDIS_MULTICAST_EXISTS NTStatus = 0xC023000A + STATUS_NDIS_MULTICAST_NOT_FOUND NTStatus = 0xC023000B + STATUS_NDIS_REQUEST_ABORTED NTStatus = 0xC023000C + STATUS_NDIS_RESET_IN_PROGRESS NTStatus = 0xC023000D + STATUS_NDIS_NOT_SUPPORTED NTStatus = 0xC02300BB + STATUS_NDIS_INVALID_PACKET NTStatus = 0xC023000F + STATUS_NDIS_ADAPTER_NOT_READY NTStatus = 0xC0230011 + STATUS_NDIS_INVALID_LENGTH NTStatus = 0xC0230014 + STATUS_NDIS_INVALID_DATA NTStatus = 0xC0230015 + STATUS_NDIS_BUFFER_TOO_SHORT NTStatus = 0xC0230016 + STATUS_NDIS_INVALID_OID NTStatus = 0xC0230017 + STATUS_NDIS_ADAPTER_REMOVED NTStatus = 0xC0230018 + STATUS_NDIS_UNSUPPORTED_MEDIA NTStatus = 0xC0230019 + STATUS_NDIS_GROUP_ADDRESS_IN_USE NTStatus = 0xC023001A + STATUS_NDIS_FILE_NOT_FOUND NTStatus = 0xC023001B + STATUS_NDIS_ERROR_READING_FILE NTStatus = 0xC023001C + STATUS_NDIS_ALREADY_MAPPED NTStatus = 0xC023001D + STATUS_NDIS_RESOURCE_CONFLICT NTStatus = 0xC023001E + STATUS_NDIS_MEDIA_DISCONNECTED NTStatus = 0xC023001F + STATUS_NDIS_INVALID_ADDRESS NTStatus = 0xC0230022 + STATUS_NDIS_INVALID_DEVICE_REQUEST NTStatus = 0xC0230010 + STATUS_NDIS_PAUSED NTStatus = 0xC023002A + STATUS_NDIS_INTERFACE_NOT_FOUND NTStatus = 0xC023002B + STATUS_NDIS_UNSUPPORTED_REVISION NTStatus = 0xC023002C + STATUS_NDIS_INVALID_PORT NTStatus = 0xC023002D + STATUS_NDIS_INVALID_PORT_STATE NTStatus = 0xC023002E + STATUS_NDIS_LOW_POWER_STATE NTStatus = 0xC023002F + STATUS_NDIS_REINIT_REQUIRED NTStatus = 0xC0230030 + STATUS_NDIS_NO_QUEUES NTStatus = 0xC0230031 + STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED NTStatus = 0xC0232000 + STATUS_NDIS_DOT11_MEDIA_IN_USE NTStatus = 0xC0232001 + STATUS_NDIS_DOT11_POWER_STATE_INVALID NTStatus = 0xC0232002 + STATUS_NDIS_PM_WOL_PATTERN_LIST_FULL NTStatus = 0xC0232003 + STATUS_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL NTStatus = 0xC0232004 + STATUS_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE NTStatus = 0xC0232005 + STATUS_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE NTStatus = 0xC0232006 + STATUS_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED NTStatus = 0xC0232007 + STATUS_NDIS_DOT11_AP_BAND_NOT_ALLOWED NTStatus = 0xC0232008 + STATUS_NDIS_INDICATION_REQUIRED NTStatus = 0x40230001 + STATUS_NDIS_OFFLOAD_POLICY NTStatus = 0xC023100F + STATUS_NDIS_OFFLOAD_CONNECTION_REJECTED NTStatus = 0xC0231012 + STATUS_NDIS_OFFLOAD_PATH_REJECTED NTStatus = 0xC0231013 + STATUS_TPM_ERROR_MASK NTStatus = 0xC0290000 + STATUS_TPM_AUTHFAIL NTStatus = 0xC0290001 + STATUS_TPM_BADINDEX NTStatus = 0xC0290002 + STATUS_TPM_BAD_PARAMETER NTStatus = 0xC0290003 + STATUS_TPM_AUDITFAILURE NTStatus = 0xC0290004 + STATUS_TPM_CLEAR_DISABLED NTStatus = 0xC0290005 + STATUS_TPM_DEACTIVATED NTStatus = 0xC0290006 + STATUS_TPM_DISABLED NTStatus = 0xC0290007 + STATUS_TPM_DISABLED_CMD NTStatus = 0xC0290008 + STATUS_TPM_FAIL NTStatus = 0xC0290009 + STATUS_TPM_BAD_ORDINAL NTStatus = 0xC029000A + STATUS_TPM_INSTALL_DISABLED NTStatus = 0xC029000B + STATUS_TPM_INVALID_KEYHANDLE NTStatus = 0xC029000C + STATUS_TPM_KEYNOTFOUND NTStatus = 0xC029000D + STATUS_TPM_INAPPROPRIATE_ENC NTStatus = 0xC029000E + STATUS_TPM_MIGRATEFAIL NTStatus = 0xC029000F + STATUS_TPM_INVALID_PCR_INFO NTStatus = 0xC0290010 + STATUS_TPM_NOSPACE NTStatus = 0xC0290011 + STATUS_TPM_NOSRK NTStatus = 0xC0290012 + STATUS_TPM_NOTSEALED_BLOB NTStatus = 0xC0290013 + STATUS_TPM_OWNER_SET NTStatus = 0xC0290014 + STATUS_TPM_RESOURCES NTStatus = 0xC0290015 + STATUS_TPM_SHORTRANDOM NTStatus = 0xC0290016 + STATUS_TPM_SIZE NTStatus = 0xC0290017 + STATUS_TPM_WRONGPCRVAL NTStatus = 0xC0290018 + STATUS_TPM_BAD_PARAM_SIZE NTStatus = 0xC0290019 + STATUS_TPM_SHA_THREAD NTStatus = 0xC029001A + STATUS_TPM_SHA_ERROR NTStatus = 0xC029001B + STATUS_TPM_FAILEDSELFTEST NTStatus = 0xC029001C + STATUS_TPM_AUTH2FAIL NTStatus = 0xC029001D + STATUS_TPM_BADTAG NTStatus = 0xC029001E + STATUS_TPM_IOERROR NTStatus = 0xC029001F + STATUS_TPM_ENCRYPT_ERROR NTStatus = 0xC0290020 + STATUS_TPM_DECRYPT_ERROR NTStatus = 0xC0290021 + STATUS_TPM_INVALID_AUTHHANDLE NTStatus = 0xC0290022 + STATUS_TPM_NO_ENDORSEMENT NTStatus = 0xC0290023 + STATUS_TPM_INVALID_KEYUSAGE NTStatus = 0xC0290024 + STATUS_TPM_WRONG_ENTITYTYPE NTStatus = 0xC0290025 + STATUS_TPM_INVALID_POSTINIT NTStatus = 0xC0290026 + STATUS_TPM_INAPPROPRIATE_SIG NTStatus = 0xC0290027 + STATUS_TPM_BAD_KEY_PROPERTY NTStatus = 0xC0290028 + STATUS_TPM_BAD_MIGRATION NTStatus = 0xC0290029 + STATUS_TPM_BAD_SCHEME NTStatus = 0xC029002A + STATUS_TPM_BAD_DATASIZE NTStatus = 0xC029002B + STATUS_TPM_BAD_MODE NTStatus = 0xC029002C + STATUS_TPM_BAD_PRESENCE NTStatus = 0xC029002D + STATUS_TPM_BAD_VERSION NTStatus = 0xC029002E + STATUS_TPM_NO_WRAP_TRANSPORT NTStatus = 0xC029002F + STATUS_TPM_AUDITFAIL_UNSUCCESSFUL NTStatus = 0xC0290030 + STATUS_TPM_AUDITFAIL_SUCCESSFUL NTStatus = 0xC0290031 + STATUS_TPM_NOTRESETABLE NTStatus = 0xC0290032 + STATUS_TPM_NOTLOCAL NTStatus = 0xC0290033 + STATUS_TPM_BAD_TYPE NTStatus = 0xC0290034 + STATUS_TPM_INVALID_RESOURCE NTStatus = 0xC0290035 + STATUS_TPM_NOTFIPS NTStatus = 0xC0290036 + STATUS_TPM_INVALID_FAMILY NTStatus = 0xC0290037 + STATUS_TPM_NO_NV_PERMISSION NTStatus = 0xC0290038 + STATUS_TPM_REQUIRES_SIGN NTStatus = 0xC0290039 + STATUS_TPM_KEY_NOTSUPPORTED NTStatus = 0xC029003A + STATUS_TPM_AUTH_CONFLICT NTStatus = 0xC029003B + STATUS_TPM_AREA_LOCKED NTStatus = 0xC029003C + STATUS_TPM_BAD_LOCALITY NTStatus = 0xC029003D + STATUS_TPM_READ_ONLY NTStatus = 0xC029003E + STATUS_TPM_PER_NOWRITE NTStatus = 0xC029003F + STATUS_TPM_FAMILYCOUNT NTStatus = 0xC0290040 + STATUS_TPM_WRITE_LOCKED NTStatus = 0xC0290041 + STATUS_TPM_BAD_ATTRIBUTES NTStatus = 0xC0290042 + STATUS_TPM_INVALID_STRUCTURE NTStatus = 0xC0290043 + STATUS_TPM_KEY_OWNER_CONTROL NTStatus = 0xC0290044 + STATUS_TPM_BAD_COUNTER NTStatus = 0xC0290045 + STATUS_TPM_NOT_FULLWRITE NTStatus = 0xC0290046 + STATUS_TPM_CONTEXT_GAP NTStatus = 0xC0290047 + STATUS_TPM_MAXNVWRITES NTStatus = 0xC0290048 + STATUS_TPM_NOOPERATOR NTStatus = 0xC0290049 + STATUS_TPM_RESOURCEMISSING NTStatus = 0xC029004A + STATUS_TPM_DELEGATE_LOCK NTStatus = 0xC029004B + STATUS_TPM_DELEGATE_FAMILY NTStatus = 0xC029004C + STATUS_TPM_DELEGATE_ADMIN NTStatus = 0xC029004D + STATUS_TPM_TRANSPORT_NOTEXCLUSIVE NTStatus = 0xC029004E + STATUS_TPM_OWNER_CONTROL NTStatus = 0xC029004F + STATUS_TPM_DAA_RESOURCES NTStatus = 0xC0290050 + STATUS_TPM_DAA_INPUT_DATA0 NTStatus = 0xC0290051 + STATUS_TPM_DAA_INPUT_DATA1 NTStatus = 0xC0290052 + STATUS_TPM_DAA_ISSUER_SETTINGS NTStatus = 0xC0290053 + STATUS_TPM_DAA_TPM_SETTINGS NTStatus = 0xC0290054 + STATUS_TPM_DAA_STAGE NTStatus = 0xC0290055 + STATUS_TPM_DAA_ISSUER_VALIDITY NTStatus = 0xC0290056 + STATUS_TPM_DAA_WRONG_W NTStatus = 0xC0290057 + STATUS_TPM_BAD_HANDLE NTStatus = 0xC0290058 + STATUS_TPM_BAD_DELEGATE NTStatus = 0xC0290059 + STATUS_TPM_BADCONTEXT NTStatus = 0xC029005A + STATUS_TPM_TOOMANYCONTEXTS NTStatus = 0xC029005B + STATUS_TPM_MA_TICKET_SIGNATURE NTStatus = 0xC029005C + STATUS_TPM_MA_DESTINATION NTStatus = 0xC029005D + STATUS_TPM_MA_SOURCE NTStatus = 0xC029005E + STATUS_TPM_MA_AUTHORITY NTStatus = 0xC029005F + STATUS_TPM_PERMANENTEK NTStatus = 0xC0290061 + STATUS_TPM_BAD_SIGNATURE NTStatus = 0xC0290062 + STATUS_TPM_NOCONTEXTSPACE NTStatus = 0xC0290063 + STATUS_TPM_20_E_ASYMMETRIC NTStatus = 0xC0290081 + STATUS_TPM_20_E_ATTRIBUTES NTStatus = 0xC0290082 + STATUS_TPM_20_E_HASH NTStatus = 0xC0290083 + STATUS_TPM_20_E_VALUE NTStatus = 0xC0290084 + STATUS_TPM_20_E_HIERARCHY NTStatus = 0xC0290085 + STATUS_TPM_20_E_KEY_SIZE NTStatus = 0xC0290087 + STATUS_TPM_20_E_MGF NTStatus = 0xC0290088 + STATUS_TPM_20_E_MODE NTStatus = 0xC0290089 + STATUS_TPM_20_E_TYPE NTStatus = 0xC029008A + STATUS_TPM_20_E_HANDLE NTStatus = 0xC029008B + STATUS_TPM_20_E_KDF NTStatus = 0xC029008C + STATUS_TPM_20_E_RANGE NTStatus = 0xC029008D + STATUS_TPM_20_E_AUTH_FAIL NTStatus = 0xC029008E + STATUS_TPM_20_E_NONCE NTStatus = 0xC029008F + STATUS_TPM_20_E_PP NTStatus = 0xC0290090 + STATUS_TPM_20_E_SCHEME NTStatus = 0xC0290092 + STATUS_TPM_20_E_SIZE NTStatus = 0xC0290095 + STATUS_TPM_20_E_SYMMETRIC NTStatus = 0xC0290096 + STATUS_TPM_20_E_TAG NTStatus = 0xC0290097 + STATUS_TPM_20_E_SELECTOR NTStatus = 0xC0290098 + STATUS_TPM_20_E_INSUFFICIENT NTStatus = 0xC029009A + STATUS_TPM_20_E_SIGNATURE NTStatus = 0xC029009B + STATUS_TPM_20_E_KEY NTStatus = 0xC029009C + STATUS_TPM_20_E_POLICY_FAIL NTStatus = 0xC029009D + STATUS_TPM_20_E_INTEGRITY NTStatus = 0xC029009F + STATUS_TPM_20_E_TICKET NTStatus = 0xC02900A0 + STATUS_TPM_20_E_RESERVED_BITS NTStatus = 0xC02900A1 + STATUS_TPM_20_E_BAD_AUTH NTStatus = 0xC02900A2 + STATUS_TPM_20_E_EXPIRED NTStatus = 0xC02900A3 + STATUS_TPM_20_E_POLICY_CC NTStatus = 0xC02900A4 + STATUS_TPM_20_E_BINDING NTStatus = 0xC02900A5 + STATUS_TPM_20_E_CURVE NTStatus = 0xC02900A6 + STATUS_TPM_20_E_ECC_POINT NTStatus = 0xC02900A7 + STATUS_TPM_20_E_INITIALIZE NTStatus = 0xC0290100 + STATUS_TPM_20_E_FAILURE NTStatus = 0xC0290101 + STATUS_TPM_20_E_SEQUENCE NTStatus = 0xC0290103 + STATUS_TPM_20_E_PRIVATE NTStatus = 0xC029010B + STATUS_TPM_20_E_HMAC NTStatus = 0xC0290119 + STATUS_TPM_20_E_DISABLED NTStatus = 0xC0290120 + STATUS_TPM_20_E_EXCLUSIVE NTStatus = 0xC0290121 + STATUS_TPM_20_E_ECC_CURVE NTStatus = 0xC0290123 + STATUS_TPM_20_E_AUTH_TYPE NTStatus = 0xC0290124 + STATUS_TPM_20_E_AUTH_MISSING NTStatus = 0xC0290125 + STATUS_TPM_20_E_POLICY NTStatus = 0xC0290126 + STATUS_TPM_20_E_PCR NTStatus = 0xC0290127 + STATUS_TPM_20_E_PCR_CHANGED NTStatus = 0xC0290128 + STATUS_TPM_20_E_UPGRADE NTStatus = 0xC029012D + STATUS_TPM_20_E_TOO_MANY_CONTEXTS NTStatus = 0xC029012E + STATUS_TPM_20_E_AUTH_UNAVAILABLE NTStatus = 0xC029012F + STATUS_TPM_20_E_REBOOT NTStatus = 0xC0290130 + STATUS_TPM_20_E_UNBALANCED NTStatus = 0xC0290131 + STATUS_TPM_20_E_COMMAND_SIZE NTStatus = 0xC0290142 + STATUS_TPM_20_E_COMMAND_CODE NTStatus = 0xC0290143 + STATUS_TPM_20_E_AUTHSIZE NTStatus = 0xC0290144 + STATUS_TPM_20_E_AUTH_CONTEXT NTStatus = 0xC0290145 + STATUS_TPM_20_E_NV_RANGE NTStatus = 0xC0290146 + STATUS_TPM_20_E_NV_SIZE NTStatus = 0xC0290147 + STATUS_TPM_20_E_NV_LOCKED NTStatus = 0xC0290148 + STATUS_TPM_20_E_NV_AUTHORIZATION NTStatus = 0xC0290149 + STATUS_TPM_20_E_NV_UNINITIALIZED NTStatus = 0xC029014A + STATUS_TPM_20_E_NV_SPACE NTStatus = 0xC029014B + STATUS_TPM_20_E_NV_DEFINED NTStatus = 0xC029014C + STATUS_TPM_20_E_BAD_CONTEXT NTStatus = 0xC0290150 + STATUS_TPM_20_E_CPHASH NTStatus = 0xC0290151 + STATUS_TPM_20_E_PARENT NTStatus = 0xC0290152 + STATUS_TPM_20_E_NEEDS_TEST NTStatus = 0xC0290153 + STATUS_TPM_20_E_NO_RESULT NTStatus = 0xC0290154 + STATUS_TPM_20_E_SENSITIVE NTStatus = 0xC0290155 + STATUS_TPM_COMMAND_BLOCKED NTStatus = 0xC0290400 + STATUS_TPM_INVALID_HANDLE NTStatus = 0xC0290401 + STATUS_TPM_DUPLICATE_VHANDLE NTStatus = 0xC0290402 + STATUS_TPM_EMBEDDED_COMMAND_BLOCKED NTStatus = 0xC0290403 + STATUS_TPM_EMBEDDED_COMMAND_UNSUPPORTED NTStatus = 0xC0290404 + STATUS_TPM_RETRY NTStatus = 0xC0290800 + STATUS_TPM_NEEDS_SELFTEST NTStatus = 0xC0290801 + STATUS_TPM_DOING_SELFTEST NTStatus = 0xC0290802 + STATUS_TPM_DEFEND_LOCK_RUNNING NTStatus = 0xC0290803 + STATUS_TPM_COMMAND_CANCELED NTStatus = 0xC0291001 + STATUS_TPM_TOO_MANY_CONTEXTS NTStatus = 0xC0291002 + STATUS_TPM_NOT_FOUND NTStatus = 0xC0291003 + STATUS_TPM_ACCESS_DENIED NTStatus = 0xC0291004 + STATUS_TPM_INSUFFICIENT_BUFFER NTStatus = 0xC0291005 + STATUS_TPM_PPI_FUNCTION_UNSUPPORTED NTStatus = 0xC0291006 + STATUS_PCP_ERROR_MASK NTStatus = 0xC0292000 + STATUS_PCP_DEVICE_NOT_READY NTStatus = 0xC0292001 + STATUS_PCP_INVALID_HANDLE NTStatus = 0xC0292002 + STATUS_PCP_INVALID_PARAMETER NTStatus = 0xC0292003 + STATUS_PCP_FLAG_NOT_SUPPORTED NTStatus = 0xC0292004 + STATUS_PCP_NOT_SUPPORTED NTStatus = 0xC0292005 + STATUS_PCP_BUFFER_TOO_SMALL NTStatus = 0xC0292006 + STATUS_PCP_INTERNAL_ERROR NTStatus = 0xC0292007 + STATUS_PCP_AUTHENTICATION_FAILED NTStatus = 0xC0292008 + STATUS_PCP_AUTHENTICATION_IGNORED NTStatus = 0xC0292009 + STATUS_PCP_POLICY_NOT_FOUND NTStatus = 0xC029200A + STATUS_PCP_PROFILE_NOT_FOUND NTStatus = 0xC029200B + STATUS_PCP_VALIDATION_FAILED NTStatus = 0xC029200C + STATUS_PCP_DEVICE_NOT_FOUND NTStatus = 0xC029200D + STATUS_PCP_WRONG_PARENT NTStatus = 0xC029200E + STATUS_PCP_KEY_NOT_LOADED NTStatus = 0xC029200F + STATUS_PCP_NO_KEY_CERTIFICATION NTStatus = 0xC0292010 + STATUS_PCP_KEY_NOT_FINALIZED NTStatus = 0xC0292011 + STATUS_PCP_ATTESTATION_CHALLENGE_NOT_SET NTStatus = 0xC0292012 + STATUS_PCP_NOT_PCR_BOUND NTStatus = 0xC0292013 + STATUS_PCP_KEY_ALREADY_FINALIZED NTStatus = 0xC0292014 + STATUS_PCP_KEY_USAGE_POLICY_NOT_SUPPORTED NTStatus = 0xC0292015 + STATUS_PCP_KEY_USAGE_POLICY_INVALID NTStatus = 0xC0292016 + STATUS_PCP_SOFT_KEY_ERROR NTStatus = 0xC0292017 + STATUS_PCP_KEY_NOT_AUTHENTICATED NTStatus = 0xC0292018 + STATUS_PCP_KEY_NOT_AIK NTStatus = 0xC0292019 + STATUS_PCP_KEY_NOT_SIGNING_KEY NTStatus = 0xC029201A + STATUS_PCP_LOCKED_OUT NTStatus = 0xC029201B + STATUS_PCP_CLAIM_TYPE_NOT_SUPPORTED NTStatus = 0xC029201C + STATUS_PCP_TPM_VERSION_NOT_SUPPORTED NTStatus = 0xC029201D + STATUS_PCP_BUFFER_LENGTH_MISMATCH NTStatus = 0xC029201E + STATUS_PCP_IFX_RSA_KEY_CREATION_BLOCKED NTStatus = 0xC029201F + STATUS_PCP_TICKET_MISSING NTStatus = 0xC0292020 + STATUS_PCP_RAW_POLICY_NOT_SUPPORTED NTStatus = 0xC0292021 + STATUS_PCP_KEY_HANDLE_INVALIDATED NTStatus = 0xC0292022 + STATUS_PCP_UNSUPPORTED_PSS_SALT NTStatus = 0x40292023 + STATUS_RTPM_CONTEXT_CONTINUE NTStatus = 0x00293000 + STATUS_RTPM_CONTEXT_COMPLETE NTStatus = 0x00293001 + STATUS_RTPM_NO_RESULT NTStatus = 0xC0293002 + STATUS_RTPM_PCR_READ_INCOMPLETE NTStatus = 0xC0293003 + STATUS_RTPM_INVALID_CONTEXT NTStatus = 0xC0293004 + STATUS_RTPM_UNSUPPORTED_CMD NTStatus = 0xC0293005 + STATUS_TPM_ZERO_EXHAUST_ENABLED NTStatus = 0xC0294000 + STATUS_HV_INVALID_HYPERCALL_CODE NTStatus = 0xC0350002 + STATUS_HV_INVALID_HYPERCALL_INPUT NTStatus = 0xC0350003 + STATUS_HV_INVALID_ALIGNMENT NTStatus = 0xC0350004 + STATUS_HV_INVALID_PARAMETER NTStatus = 0xC0350005 + STATUS_HV_ACCESS_DENIED NTStatus = 0xC0350006 + STATUS_HV_INVALID_PARTITION_STATE NTStatus = 0xC0350007 + STATUS_HV_OPERATION_DENIED NTStatus = 0xC0350008 + STATUS_HV_UNKNOWN_PROPERTY NTStatus = 0xC0350009 + STATUS_HV_PROPERTY_VALUE_OUT_OF_RANGE NTStatus = 0xC035000A + STATUS_HV_INSUFFICIENT_MEMORY NTStatus = 0xC035000B + STATUS_HV_PARTITION_TOO_DEEP NTStatus = 0xC035000C + STATUS_HV_INVALID_PARTITION_ID NTStatus = 0xC035000D + STATUS_HV_INVALID_VP_INDEX NTStatus = 0xC035000E + STATUS_HV_INVALID_PORT_ID NTStatus = 0xC0350011 + STATUS_HV_INVALID_CONNECTION_ID NTStatus = 0xC0350012 + STATUS_HV_INSUFFICIENT_BUFFERS NTStatus = 0xC0350013 + STATUS_HV_NOT_ACKNOWLEDGED NTStatus = 0xC0350014 + STATUS_HV_INVALID_VP_STATE NTStatus = 0xC0350015 + STATUS_HV_ACKNOWLEDGED NTStatus = 0xC0350016 + STATUS_HV_INVALID_SAVE_RESTORE_STATE NTStatus = 0xC0350017 + STATUS_HV_INVALID_SYNIC_STATE NTStatus = 0xC0350018 + STATUS_HV_OBJECT_IN_USE NTStatus = 0xC0350019 + STATUS_HV_INVALID_PROXIMITY_DOMAIN_INFO NTStatus = 0xC035001A + STATUS_HV_NO_DATA NTStatus = 0xC035001B + STATUS_HV_INACTIVE NTStatus = 0xC035001C + STATUS_HV_NO_RESOURCES NTStatus = 0xC035001D + STATUS_HV_FEATURE_UNAVAILABLE NTStatus = 0xC035001E + STATUS_HV_INSUFFICIENT_BUFFER NTStatus = 0xC0350033 + STATUS_HV_INSUFFICIENT_DEVICE_DOMAINS NTStatus = 0xC0350038 + STATUS_HV_CPUID_FEATURE_VALIDATION_ERROR NTStatus = 0xC035003C + STATUS_HV_CPUID_XSAVE_FEATURE_VALIDATION_ERROR NTStatus = 0xC035003D + STATUS_HV_PROCESSOR_STARTUP_TIMEOUT NTStatus = 0xC035003E + STATUS_HV_SMX_ENABLED NTStatus = 0xC035003F + STATUS_HV_INVALID_LP_INDEX NTStatus = 0xC0350041 + STATUS_HV_INVALID_REGISTER_VALUE NTStatus = 0xC0350050 + STATUS_HV_INVALID_VTL_STATE NTStatus = 0xC0350051 + STATUS_HV_NX_NOT_DETECTED NTStatus = 0xC0350055 + STATUS_HV_INVALID_DEVICE_ID NTStatus = 0xC0350057 + STATUS_HV_INVALID_DEVICE_STATE NTStatus = 0xC0350058 + STATUS_HV_PENDING_PAGE_REQUESTS NTStatus = 0x00350059 + STATUS_HV_PAGE_REQUEST_INVALID NTStatus = 0xC0350060 + STATUS_HV_INVALID_CPU_GROUP_ID NTStatus = 0xC035006F + STATUS_HV_INVALID_CPU_GROUP_STATE NTStatus = 0xC0350070 + STATUS_HV_OPERATION_FAILED NTStatus = 0xC0350071 + STATUS_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE NTStatus = 0xC0350072 + STATUS_HV_INSUFFICIENT_ROOT_MEMORY NTStatus = 0xC0350073 + STATUS_HV_NOT_PRESENT NTStatus = 0xC0351000 + STATUS_VID_DUPLICATE_HANDLER NTStatus = 0xC0370001 + STATUS_VID_TOO_MANY_HANDLERS NTStatus = 0xC0370002 + STATUS_VID_QUEUE_FULL NTStatus = 0xC0370003 + STATUS_VID_HANDLER_NOT_PRESENT NTStatus = 0xC0370004 + STATUS_VID_INVALID_OBJECT_NAME NTStatus = 0xC0370005 + STATUS_VID_PARTITION_NAME_TOO_LONG NTStatus = 0xC0370006 + STATUS_VID_MESSAGE_QUEUE_NAME_TOO_LONG NTStatus = 0xC0370007 + STATUS_VID_PARTITION_ALREADY_EXISTS NTStatus = 0xC0370008 + STATUS_VID_PARTITION_DOES_NOT_EXIST NTStatus = 0xC0370009 + STATUS_VID_PARTITION_NAME_NOT_FOUND NTStatus = 0xC037000A + STATUS_VID_MESSAGE_QUEUE_ALREADY_EXISTS NTStatus = 0xC037000B + STATUS_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT NTStatus = 0xC037000C + STATUS_VID_MB_STILL_REFERENCED NTStatus = 0xC037000D + STATUS_VID_CHILD_GPA_PAGE_SET_CORRUPTED NTStatus = 0xC037000E + STATUS_VID_INVALID_NUMA_SETTINGS NTStatus = 0xC037000F + STATUS_VID_INVALID_NUMA_NODE_INDEX NTStatus = 0xC0370010 + STATUS_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED NTStatus = 0xC0370011 + STATUS_VID_INVALID_MEMORY_BLOCK_HANDLE NTStatus = 0xC0370012 + STATUS_VID_PAGE_RANGE_OVERFLOW NTStatus = 0xC0370013 + STATUS_VID_INVALID_MESSAGE_QUEUE_HANDLE NTStatus = 0xC0370014 + STATUS_VID_INVALID_GPA_RANGE_HANDLE NTStatus = 0xC0370015 + STATUS_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE NTStatus = 0xC0370016 + STATUS_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED NTStatus = 0xC0370017 + STATUS_VID_INVALID_PPM_HANDLE NTStatus = 0xC0370018 + STATUS_VID_MBPS_ARE_LOCKED NTStatus = 0xC0370019 + STATUS_VID_MESSAGE_QUEUE_CLOSED NTStatus = 0xC037001A + STATUS_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED NTStatus = 0xC037001B + STATUS_VID_STOP_PENDING NTStatus = 0xC037001C + STATUS_VID_INVALID_PROCESSOR_STATE NTStatus = 0xC037001D + STATUS_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT NTStatus = 0xC037001E + STATUS_VID_KM_INTERFACE_ALREADY_INITIALIZED NTStatus = 0xC037001F + STATUS_VID_MB_PROPERTY_ALREADY_SET_RESET NTStatus = 0xC0370020 + STATUS_VID_MMIO_RANGE_DESTROYED NTStatus = 0xC0370021 + STATUS_VID_INVALID_CHILD_GPA_PAGE_SET NTStatus = 0xC0370022 + STATUS_VID_RESERVE_PAGE_SET_IS_BEING_USED NTStatus = 0xC0370023 + STATUS_VID_RESERVE_PAGE_SET_TOO_SMALL NTStatus = 0xC0370024 + STATUS_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE NTStatus = 0xC0370025 + STATUS_VID_MBP_COUNT_EXCEEDED_LIMIT NTStatus = 0xC0370026 + STATUS_VID_SAVED_STATE_CORRUPT NTStatus = 0xC0370027 + STATUS_VID_SAVED_STATE_UNRECOGNIZED_ITEM NTStatus = 0xC0370028 + STATUS_VID_SAVED_STATE_INCOMPATIBLE NTStatus = 0xC0370029 + STATUS_VID_VTL_ACCESS_DENIED NTStatus = 0xC037002A + STATUS_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED NTStatus = 0x80370001 + STATUS_IPSEC_BAD_SPI NTStatus = 0xC0360001 + STATUS_IPSEC_SA_LIFETIME_EXPIRED NTStatus = 0xC0360002 + STATUS_IPSEC_WRONG_SA NTStatus = 0xC0360003 + STATUS_IPSEC_REPLAY_CHECK_FAILED NTStatus = 0xC0360004 + STATUS_IPSEC_INVALID_PACKET NTStatus = 0xC0360005 + STATUS_IPSEC_INTEGRITY_CHECK_FAILED NTStatus = 0xC0360006 + STATUS_IPSEC_CLEAR_TEXT_DROP NTStatus = 0xC0360007 + STATUS_IPSEC_AUTH_FIREWALL_DROP NTStatus = 0xC0360008 + STATUS_IPSEC_THROTTLE_DROP NTStatus = 0xC0360009 + STATUS_IPSEC_DOSP_BLOCK NTStatus = 0xC0368000 + STATUS_IPSEC_DOSP_RECEIVED_MULTICAST NTStatus = 0xC0368001 + STATUS_IPSEC_DOSP_INVALID_PACKET NTStatus = 0xC0368002 + STATUS_IPSEC_DOSP_STATE_LOOKUP_FAILED NTStatus = 0xC0368003 + STATUS_IPSEC_DOSP_MAX_ENTRIES NTStatus = 0xC0368004 + STATUS_IPSEC_DOSP_KEYMOD_NOT_ALLOWED NTStatus = 0xC0368005 + STATUS_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES NTStatus = 0xC0368006 + STATUS_VOLMGR_INCOMPLETE_REGENERATION NTStatus = 0x80380001 + STATUS_VOLMGR_INCOMPLETE_DISK_MIGRATION NTStatus = 0x80380002 + STATUS_VOLMGR_DATABASE_FULL NTStatus = 0xC0380001 + STATUS_VOLMGR_DISK_CONFIGURATION_CORRUPTED NTStatus = 0xC0380002 + STATUS_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC NTStatus = 0xC0380003 + STATUS_VOLMGR_PACK_CONFIG_UPDATE_FAILED NTStatus = 0xC0380004 + STATUS_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME NTStatus = 0xC0380005 + STATUS_VOLMGR_DISK_DUPLICATE NTStatus = 0xC0380006 + STATUS_VOLMGR_DISK_DYNAMIC NTStatus = 0xC0380007 + STATUS_VOLMGR_DISK_ID_INVALID NTStatus = 0xC0380008 + STATUS_VOLMGR_DISK_INVALID NTStatus = 0xC0380009 + STATUS_VOLMGR_DISK_LAST_VOTER NTStatus = 0xC038000A + STATUS_VOLMGR_DISK_LAYOUT_INVALID NTStatus = 0xC038000B + STATUS_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS NTStatus = 0xC038000C + STATUS_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED NTStatus = 0xC038000D + STATUS_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL NTStatus = 0xC038000E + STATUS_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS NTStatus = 0xC038000F + STATUS_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS NTStatus = 0xC0380010 + STATUS_VOLMGR_DISK_MISSING NTStatus = 0xC0380011 + STATUS_VOLMGR_DISK_NOT_EMPTY NTStatus = 0xC0380012 + STATUS_VOLMGR_DISK_NOT_ENOUGH_SPACE NTStatus = 0xC0380013 + STATUS_VOLMGR_DISK_REVECTORING_FAILED NTStatus = 0xC0380014 + STATUS_VOLMGR_DISK_SECTOR_SIZE_INVALID NTStatus = 0xC0380015 + STATUS_VOLMGR_DISK_SET_NOT_CONTAINED NTStatus = 0xC0380016 + STATUS_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS NTStatus = 0xC0380017 + STATUS_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES NTStatus = 0xC0380018 + STATUS_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED NTStatus = 0xC0380019 + STATUS_VOLMGR_EXTENT_ALREADY_USED NTStatus = 0xC038001A + STATUS_VOLMGR_EXTENT_NOT_CONTIGUOUS NTStatus = 0xC038001B + STATUS_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION NTStatus = 0xC038001C + STATUS_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED NTStatus = 0xC038001D + STATUS_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION NTStatus = 0xC038001E + STATUS_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH NTStatus = 0xC038001F + STATUS_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED NTStatus = 0xC0380020 + STATUS_VOLMGR_INTERLEAVE_LENGTH_INVALID NTStatus = 0xC0380021 + STATUS_VOLMGR_MAXIMUM_REGISTERED_USERS NTStatus = 0xC0380022 + STATUS_VOLMGR_MEMBER_IN_SYNC NTStatus = 0xC0380023 + STATUS_VOLMGR_MEMBER_INDEX_DUPLICATE NTStatus = 0xC0380024 + STATUS_VOLMGR_MEMBER_INDEX_INVALID NTStatus = 0xC0380025 + STATUS_VOLMGR_MEMBER_MISSING NTStatus = 0xC0380026 + STATUS_VOLMGR_MEMBER_NOT_DETACHED NTStatus = 0xC0380027 + STATUS_VOLMGR_MEMBER_REGENERATING NTStatus = 0xC0380028 + STATUS_VOLMGR_ALL_DISKS_FAILED NTStatus = 0xC0380029 + STATUS_VOLMGR_NO_REGISTERED_USERS NTStatus = 0xC038002A + STATUS_VOLMGR_NO_SUCH_USER NTStatus = 0xC038002B + STATUS_VOLMGR_NOTIFICATION_RESET NTStatus = 0xC038002C + STATUS_VOLMGR_NUMBER_OF_MEMBERS_INVALID NTStatus = 0xC038002D + STATUS_VOLMGR_NUMBER_OF_PLEXES_INVALID NTStatus = 0xC038002E + STATUS_VOLMGR_PACK_DUPLICATE NTStatus = 0xC038002F + STATUS_VOLMGR_PACK_ID_INVALID NTStatus = 0xC0380030 + STATUS_VOLMGR_PACK_INVALID NTStatus = 0xC0380031 + STATUS_VOLMGR_PACK_NAME_INVALID NTStatus = 0xC0380032 + STATUS_VOLMGR_PACK_OFFLINE NTStatus = 0xC0380033 + STATUS_VOLMGR_PACK_HAS_QUORUM NTStatus = 0xC0380034 + STATUS_VOLMGR_PACK_WITHOUT_QUORUM NTStatus = 0xC0380035 + STATUS_VOLMGR_PARTITION_STYLE_INVALID NTStatus = 0xC0380036 + STATUS_VOLMGR_PARTITION_UPDATE_FAILED NTStatus = 0xC0380037 + STATUS_VOLMGR_PLEX_IN_SYNC NTStatus = 0xC0380038 + STATUS_VOLMGR_PLEX_INDEX_DUPLICATE NTStatus = 0xC0380039 + STATUS_VOLMGR_PLEX_INDEX_INVALID NTStatus = 0xC038003A + STATUS_VOLMGR_PLEX_LAST_ACTIVE NTStatus = 0xC038003B + STATUS_VOLMGR_PLEX_MISSING NTStatus = 0xC038003C + STATUS_VOLMGR_PLEX_REGENERATING NTStatus = 0xC038003D + STATUS_VOLMGR_PLEX_TYPE_INVALID NTStatus = 0xC038003E + STATUS_VOLMGR_PLEX_NOT_RAID5 NTStatus = 0xC038003F + STATUS_VOLMGR_PLEX_NOT_SIMPLE NTStatus = 0xC0380040 + STATUS_VOLMGR_STRUCTURE_SIZE_INVALID NTStatus = 0xC0380041 + STATUS_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS NTStatus = 0xC0380042 + STATUS_VOLMGR_TRANSACTION_IN_PROGRESS NTStatus = 0xC0380043 + STATUS_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE NTStatus = 0xC0380044 + STATUS_VOLMGR_VOLUME_CONTAINS_MISSING_DISK NTStatus = 0xC0380045 + STATUS_VOLMGR_VOLUME_ID_INVALID NTStatus = 0xC0380046 + STATUS_VOLMGR_VOLUME_LENGTH_INVALID NTStatus = 0xC0380047 + STATUS_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE NTStatus = 0xC0380048 + STATUS_VOLMGR_VOLUME_NOT_MIRRORED NTStatus = 0xC0380049 + STATUS_VOLMGR_VOLUME_NOT_RETAINED NTStatus = 0xC038004A + STATUS_VOLMGR_VOLUME_OFFLINE NTStatus = 0xC038004B + STATUS_VOLMGR_VOLUME_RETAINED NTStatus = 0xC038004C + STATUS_VOLMGR_NUMBER_OF_EXTENTS_INVALID NTStatus = 0xC038004D + STATUS_VOLMGR_DIFFERENT_SECTOR_SIZE NTStatus = 0xC038004E + STATUS_VOLMGR_BAD_BOOT_DISK NTStatus = 0xC038004F + STATUS_VOLMGR_PACK_CONFIG_OFFLINE NTStatus = 0xC0380050 + STATUS_VOLMGR_PACK_CONFIG_ONLINE NTStatus = 0xC0380051 + STATUS_VOLMGR_NOT_PRIMARY_PACK NTStatus = 0xC0380052 + STATUS_VOLMGR_PACK_LOG_UPDATE_FAILED NTStatus = 0xC0380053 + STATUS_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID NTStatus = 0xC0380054 + STATUS_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID NTStatus = 0xC0380055 + STATUS_VOLMGR_VOLUME_MIRRORED NTStatus = 0xC0380056 + STATUS_VOLMGR_PLEX_NOT_SIMPLE_SPANNED NTStatus = 0xC0380057 + STATUS_VOLMGR_NO_VALID_LOG_COPIES NTStatus = 0xC0380058 + STATUS_VOLMGR_PRIMARY_PACK_PRESENT NTStatus = 0xC0380059 + STATUS_VOLMGR_NUMBER_OF_DISKS_INVALID NTStatus = 0xC038005A + STATUS_VOLMGR_MIRROR_NOT_SUPPORTED NTStatus = 0xC038005B + STATUS_VOLMGR_RAID5_NOT_SUPPORTED NTStatus = 0xC038005C + STATUS_BCD_NOT_ALL_ENTRIES_IMPORTED NTStatus = 0x80390001 + STATUS_BCD_TOO_MANY_ELEMENTS NTStatus = 0xC0390002 + STATUS_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED NTStatus = 0x80390003 + STATUS_VHD_DRIVE_FOOTER_MISSING NTStatus = 0xC03A0001 + STATUS_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH NTStatus = 0xC03A0002 + STATUS_VHD_DRIVE_FOOTER_CORRUPT NTStatus = 0xC03A0003 + STATUS_VHD_FORMAT_UNKNOWN NTStatus = 0xC03A0004 + STATUS_VHD_FORMAT_UNSUPPORTED_VERSION NTStatus = 0xC03A0005 + STATUS_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH NTStatus = 0xC03A0006 + STATUS_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION NTStatus = 0xC03A0007 + STATUS_VHD_SPARSE_HEADER_CORRUPT NTStatus = 0xC03A0008 + STATUS_VHD_BLOCK_ALLOCATION_FAILURE NTStatus = 0xC03A0009 + STATUS_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT NTStatus = 0xC03A000A + STATUS_VHD_INVALID_BLOCK_SIZE NTStatus = 0xC03A000B + STATUS_VHD_BITMAP_MISMATCH NTStatus = 0xC03A000C + STATUS_VHD_PARENT_VHD_NOT_FOUND NTStatus = 0xC03A000D + STATUS_VHD_CHILD_PARENT_ID_MISMATCH NTStatus = 0xC03A000E + STATUS_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH NTStatus = 0xC03A000F + STATUS_VHD_METADATA_READ_FAILURE NTStatus = 0xC03A0010 + STATUS_VHD_METADATA_WRITE_FAILURE NTStatus = 0xC03A0011 + STATUS_VHD_INVALID_SIZE NTStatus = 0xC03A0012 + STATUS_VHD_INVALID_FILE_SIZE NTStatus = 0xC03A0013 + STATUS_VIRTDISK_PROVIDER_NOT_FOUND NTStatus = 0xC03A0014 + STATUS_VIRTDISK_NOT_VIRTUAL_DISK NTStatus = 0xC03A0015 + STATUS_VHD_PARENT_VHD_ACCESS_DENIED NTStatus = 0xC03A0016 + STATUS_VHD_CHILD_PARENT_SIZE_MISMATCH NTStatus = 0xC03A0017 + STATUS_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED NTStatus = 0xC03A0018 + STATUS_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT NTStatus = 0xC03A0019 + STATUS_VIRTUAL_DISK_LIMITATION NTStatus = 0xC03A001A + STATUS_VHD_INVALID_TYPE NTStatus = 0xC03A001B + STATUS_VHD_INVALID_STATE NTStatus = 0xC03A001C + STATUS_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE NTStatus = 0xC03A001D + STATUS_VIRTDISK_DISK_ALREADY_OWNED NTStatus = 0xC03A001E + STATUS_VIRTDISK_DISK_ONLINE_AND_WRITABLE NTStatus = 0xC03A001F + STATUS_CTLOG_TRACKING_NOT_INITIALIZED NTStatus = 0xC03A0020 + STATUS_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE NTStatus = 0xC03A0021 + STATUS_CTLOG_VHD_CHANGED_OFFLINE NTStatus = 0xC03A0022 + STATUS_CTLOG_INVALID_TRACKING_STATE NTStatus = 0xC03A0023 + STATUS_CTLOG_INCONSISTENT_TRACKING_FILE NTStatus = 0xC03A0024 + STATUS_VHD_METADATA_FULL NTStatus = 0xC03A0028 + STATUS_VHD_INVALID_CHANGE_TRACKING_ID NTStatus = 0xC03A0029 + STATUS_VHD_CHANGE_TRACKING_DISABLED NTStatus = 0xC03A002A + STATUS_VHD_MISSING_CHANGE_TRACKING_INFORMATION NTStatus = 0xC03A0030 + STATUS_VHD_RESIZE_WOULD_TRUNCATE_DATA NTStatus = 0xC03A0031 + STATUS_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE NTStatus = 0xC03A0032 + STATUS_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE NTStatus = 0xC03A0033 + STATUS_QUERY_STORAGE_ERROR NTStatus = 0x803A0001 + STATUS_GDI_HANDLE_LEAK NTStatus = 0x803F0001 + STATUS_RKF_KEY_NOT_FOUND NTStatus = 0xC0400001 + STATUS_RKF_DUPLICATE_KEY NTStatus = 0xC0400002 + STATUS_RKF_BLOB_FULL NTStatus = 0xC0400003 + STATUS_RKF_STORE_FULL NTStatus = 0xC0400004 + STATUS_RKF_FILE_BLOCKED NTStatus = 0xC0400005 + STATUS_RKF_ACTIVE_KEY NTStatus = 0xC0400006 + STATUS_RDBSS_RESTART_OPERATION NTStatus = 0xC0410001 + STATUS_RDBSS_CONTINUE_OPERATION NTStatus = 0xC0410002 + STATUS_RDBSS_POST_OPERATION NTStatus = 0xC0410003 + STATUS_RDBSS_RETRY_LOOKUP NTStatus = 0xC0410004 + STATUS_BTH_ATT_INVALID_HANDLE NTStatus = 0xC0420001 + STATUS_BTH_ATT_READ_NOT_PERMITTED NTStatus = 0xC0420002 + STATUS_BTH_ATT_WRITE_NOT_PERMITTED NTStatus = 0xC0420003 + STATUS_BTH_ATT_INVALID_PDU NTStatus = 0xC0420004 + STATUS_BTH_ATT_INSUFFICIENT_AUTHENTICATION NTStatus = 0xC0420005 + STATUS_BTH_ATT_REQUEST_NOT_SUPPORTED NTStatus = 0xC0420006 + STATUS_BTH_ATT_INVALID_OFFSET NTStatus = 0xC0420007 + STATUS_BTH_ATT_INSUFFICIENT_AUTHORIZATION NTStatus = 0xC0420008 + STATUS_BTH_ATT_PREPARE_QUEUE_FULL NTStatus = 0xC0420009 + STATUS_BTH_ATT_ATTRIBUTE_NOT_FOUND NTStatus = 0xC042000A + STATUS_BTH_ATT_ATTRIBUTE_NOT_LONG NTStatus = 0xC042000B + STATUS_BTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE NTStatus = 0xC042000C + STATUS_BTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH NTStatus = 0xC042000D + STATUS_BTH_ATT_UNLIKELY NTStatus = 0xC042000E + STATUS_BTH_ATT_INSUFFICIENT_ENCRYPTION NTStatus = 0xC042000F + STATUS_BTH_ATT_UNSUPPORTED_GROUP_TYPE NTStatus = 0xC0420010 + STATUS_BTH_ATT_INSUFFICIENT_RESOURCES NTStatus = 0xC0420011 + STATUS_BTH_ATT_UNKNOWN_ERROR NTStatus = 0xC0421000 + STATUS_SECUREBOOT_ROLLBACK_DETECTED NTStatus = 0xC0430001 + STATUS_SECUREBOOT_POLICY_VIOLATION NTStatus = 0xC0430002 + STATUS_SECUREBOOT_INVALID_POLICY NTStatus = 0xC0430003 + STATUS_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND NTStatus = 0xC0430004 + STATUS_SECUREBOOT_POLICY_NOT_SIGNED NTStatus = 0xC0430005 + STATUS_SECUREBOOT_NOT_ENABLED NTStatus = 0x80430006 + STATUS_SECUREBOOT_FILE_REPLACED NTStatus = 0xC0430007 + STATUS_SECUREBOOT_POLICY_NOT_AUTHORIZED NTStatus = 0xC0430008 + STATUS_SECUREBOOT_POLICY_UNKNOWN NTStatus = 0xC0430009 + STATUS_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION NTStatus = 0xC043000A + STATUS_SECUREBOOT_PLATFORM_ID_MISMATCH NTStatus = 0xC043000B + STATUS_SECUREBOOT_POLICY_ROLLBACK_DETECTED NTStatus = 0xC043000C + STATUS_SECUREBOOT_POLICY_UPGRADE_MISMATCH NTStatus = 0xC043000D + STATUS_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING NTStatus = 0xC043000E + STATUS_SECUREBOOT_NOT_BASE_POLICY NTStatus = 0xC043000F + STATUS_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY NTStatus = 0xC0430010 + STATUS_PLATFORM_MANIFEST_NOT_AUTHORIZED NTStatus = 0xC0EB0001 + STATUS_PLATFORM_MANIFEST_INVALID NTStatus = 0xC0EB0002 + STATUS_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED NTStatus = 0xC0EB0003 + STATUS_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED NTStatus = 0xC0EB0004 + STATUS_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND NTStatus = 0xC0EB0005 + STATUS_PLATFORM_MANIFEST_NOT_ACTIVE NTStatus = 0xC0EB0006 + STATUS_PLATFORM_MANIFEST_NOT_SIGNED NTStatus = 0xC0EB0007 + STATUS_SYSTEM_INTEGRITY_ROLLBACK_DETECTED NTStatus = 0xC0E90001 + STATUS_SYSTEM_INTEGRITY_POLICY_VIOLATION NTStatus = 0xC0E90002 + STATUS_SYSTEM_INTEGRITY_INVALID_POLICY NTStatus = 0xC0E90003 + STATUS_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED NTStatus = 0xC0E90004 + STATUS_SYSTEM_INTEGRITY_TOO_MANY_POLICIES NTStatus = 0xC0E90005 + STATUS_SYSTEM_INTEGRITY_SUPPLEMENTAL_POLICY_NOT_AUTHORIZED NTStatus = 0xC0E90006 + STATUS_NO_APPLICABLE_APP_LICENSES_FOUND NTStatus = 0xC0EA0001 + STATUS_CLIP_LICENSE_NOT_FOUND NTStatus = 0xC0EA0002 + STATUS_CLIP_DEVICE_LICENSE_MISSING NTStatus = 0xC0EA0003 + STATUS_CLIP_LICENSE_INVALID_SIGNATURE NTStatus = 0xC0EA0004 + STATUS_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID NTStatus = 0xC0EA0005 + STATUS_CLIP_LICENSE_EXPIRED NTStatus = 0xC0EA0006 + STATUS_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE NTStatus = 0xC0EA0007 + STATUS_CLIP_LICENSE_NOT_SIGNED NTStatus = 0xC0EA0008 + STATUS_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE NTStatus = 0xC0EA0009 + STATUS_CLIP_LICENSE_DEVICE_ID_MISMATCH NTStatus = 0xC0EA000A + STATUS_AUDIO_ENGINE_NODE_NOT_FOUND NTStatus = 0xC0440001 + STATUS_HDAUDIO_EMPTY_CONNECTION_LIST NTStatus = 0xC0440002 + STATUS_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED NTStatus = 0xC0440003 + STATUS_HDAUDIO_NO_LOGICAL_DEVICES_CREATED NTStatus = 0xC0440004 + STATUS_HDAUDIO_NULL_LINKED_LIST_ENTRY NTStatus = 0xC0440005 + STATUS_SPACES_REPAIRED NTStatus = 0x00E70000 + STATUS_SPACES_PAUSE NTStatus = 0x00E70001 + STATUS_SPACES_COMPLETE NTStatus = 0x00E70002 + STATUS_SPACES_REDIRECT NTStatus = 0x00E70003 + STATUS_SPACES_FAULT_DOMAIN_TYPE_INVALID NTStatus = 0xC0E70001 + STATUS_SPACES_RESILIENCY_TYPE_INVALID NTStatus = 0xC0E70003 + STATUS_SPACES_DRIVE_SECTOR_SIZE_INVALID NTStatus = 0xC0E70004 + STATUS_SPACES_DRIVE_REDUNDANCY_INVALID NTStatus = 0xC0E70006 + STATUS_SPACES_NUMBER_OF_DATA_COPIES_INVALID NTStatus = 0xC0E70007 + STATUS_SPACES_INTERLEAVE_LENGTH_INVALID NTStatus = 0xC0E70009 + STATUS_SPACES_NUMBER_OF_COLUMNS_INVALID NTStatus = 0xC0E7000A + STATUS_SPACES_NOT_ENOUGH_DRIVES NTStatus = 0xC0E7000B + STATUS_SPACES_EXTENDED_ERROR NTStatus = 0xC0E7000C + STATUS_SPACES_PROVISIONING_TYPE_INVALID NTStatus = 0xC0E7000D + STATUS_SPACES_ALLOCATION_SIZE_INVALID NTStatus = 0xC0E7000E + STATUS_SPACES_ENCLOSURE_AWARE_INVALID NTStatus = 0xC0E7000F + STATUS_SPACES_WRITE_CACHE_SIZE_INVALID NTStatus = 0xC0E70010 + STATUS_SPACES_NUMBER_OF_GROUPS_INVALID NTStatus = 0xC0E70011 + STATUS_SPACES_DRIVE_OPERATIONAL_STATE_INVALID NTStatus = 0xC0E70012 + STATUS_SPACES_UPDATE_COLUMN_STATE NTStatus = 0xC0E70013 + STATUS_SPACES_MAP_REQUIRED NTStatus = 0xC0E70014 + STATUS_SPACES_UNSUPPORTED_VERSION NTStatus = 0xC0E70015 + STATUS_SPACES_CORRUPT_METADATA NTStatus = 0xC0E70016 + STATUS_SPACES_DRT_FULL NTStatus = 0xC0E70017 + STATUS_SPACES_INCONSISTENCY NTStatus = 0xC0E70018 + STATUS_SPACES_LOG_NOT_READY NTStatus = 0xC0E70019 + STATUS_SPACES_NO_REDUNDANCY NTStatus = 0xC0E7001A + STATUS_SPACES_DRIVE_NOT_READY NTStatus = 0xC0E7001B + STATUS_SPACES_DRIVE_SPLIT NTStatus = 0xC0E7001C + STATUS_SPACES_DRIVE_LOST_DATA NTStatus = 0xC0E7001D + STATUS_SPACES_ENTRY_INCOMPLETE NTStatus = 0xC0E7001E + STATUS_SPACES_ENTRY_INVALID NTStatus = 0xC0E7001F + STATUS_SPACES_MARK_DIRTY NTStatus = 0xC0E70020 + STATUS_VOLSNAP_BOOTFILE_NOT_VALID NTStatus = 0xC0500003 + STATUS_VOLSNAP_ACTIVATION_TIMEOUT NTStatus = 0xC0500004 + STATUS_IO_PREEMPTED NTStatus = 0xC0510001 + STATUS_SVHDX_ERROR_STORED NTStatus = 0xC05C0000 + STATUS_SVHDX_ERROR_NOT_AVAILABLE NTStatus = 0xC05CFF00 + STATUS_SVHDX_UNIT_ATTENTION_AVAILABLE NTStatus = 0xC05CFF01 + STATUS_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED NTStatus = 0xC05CFF02 + STATUS_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED NTStatus = 0xC05CFF03 + STATUS_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED NTStatus = 0xC05CFF04 + STATUS_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED NTStatus = 0xC05CFF05 + STATUS_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED NTStatus = 0xC05CFF06 + STATUS_SVHDX_RESERVATION_CONFLICT NTStatus = 0xC05CFF07 + STATUS_SVHDX_WRONG_FILE_TYPE NTStatus = 0xC05CFF08 + STATUS_SVHDX_VERSION_MISMATCH NTStatus = 0xC05CFF09 + STATUS_VHD_SHARED NTStatus = 0xC05CFF0A + STATUS_SVHDX_NO_INITIATOR NTStatus = 0xC05CFF0B + STATUS_VHDSET_BACKING_STORAGE_NOT_FOUND NTStatus = 0xC05CFF0C + STATUS_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP NTStatus = 0xC05D0000 + STATUS_SMB_BAD_CLUSTER_DIALECT NTStatus = 0xC05D0001 + STATUS_SMB_GUEST_LOGON_BLOCKED NTStatus = 0xC05D0002 + STATUS_SECCORE_INVALID_COMMAND NTStatus = 0xC0E80000 + STATUS_VSM_NOT_INITIALIZED NTStatus = 0xC0450000 + STATUS_VSM_DMA_PROTECTION_NOT_IN_USE NTStatus = 0xC0450001 + STATUS_APPEXEC_CONDITION_NOT_SATISFIED NTStatus = 0xC0EC0000 + STATUS_APPEXEC_HANDLE_INVALIDATED NTStatus = 0xC0EC0001 + STATUS_APPEXEC_INVALID_HOST_GENERATION NTStatus = 0xC0EC0002 + STATUS_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION NTStatus = 0xC0EC0003 + STATUS_APPEXEC_INVALID_HOST_STATE NTStatus = 0xC0EC0004 + STATUS_APPEXEC_NO_DONOR NTStatus = 0xC0EC0005 + STATUS_APPEXEC_HOST_ID_MISMATCH NTStatus = 0xC0EC0006 + STATUS_APPEXEC_UNKNOWN_USER NTStatus = 0xC0EC0007 +) diff --git a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go new file mode 100644 index 000000000..6048ac679 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go @@ -0,0 +1,149 @@ +// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT. + +package windows + +type KNOWNFOLDERID GUID + +var ( + FOLDERID_NetworkFolder = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}} + FOLDERID_ComputerFolder = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}} + FOLDERID_InternetFolder = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}} + FOLDERID_ControlPanelFolder = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}} + FOLDERID_PrintersFolder = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}} + FOLDERID_SyncManagerFolder = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}} + FOLDERID_SyncSetupFolder = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}} + FOLDERID_ConflictFolder = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}} + FOLDERID_SyncResultsFolder = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}} + FOLDERID_RecycleBinFolder = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}} + FOLDERID_ConnectionsFolder = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}} + FOLDERID_Fonts = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}} + FOLDERID_Desktop = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}} + FOLDERID_Startup = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}} + FOLDERID_Programs = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}} + FOLDERID_StartMenu = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}} + FOLDERID_Recent = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}} + FOLDERID_SendTo = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}} + FOLDERID_Documents = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}} + FOLDERID_Favorites = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}} + FOLDERID_NetHood = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}} + FOLDERID_PrintHood = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}} + FOLDERID_Templates = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}} + FOLDERID_CommonStartup = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}} + FOLDERID_CommonPrograms = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}} + FOLDERID_CommonStartMenu = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}} + FOLDERID_PublicDesktop = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}} + FOLDERID_ProgramData = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}} + FOLDERID_CommonTemplates = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}} + FOLDERID_PublicDocuments = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}} + FOLDERID_RoamingAppData = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}} + FOLDERID_LocalAppData = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}} + FOLDERID_LocalAppDataLow = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}} + FOLDERID_InternetCache = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}} + FOLDERID_Cookies = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}} + FOLDERID_History = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}} + FOLDERID_System = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}} + FOLDERID_SystemX86 = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}} + FOLDERID_Windows = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}} + FOLDERID_Profile = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}} + FOLDERID_Pictures = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}} + FOLDERID_ProgramFilesX86 = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}} + FOLDERID_ProgramFilesCommonX86 = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}} + FOLDERID_ProgramFilesX64 = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}} + FOLDERID_ProgramFilesCommonX64 = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}} + FOLDERID_ProgramFiles = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}} + FOLDERID_ProgramFilesCommon = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}} + FOLDERID_UserProgramFiles = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}} + FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}} + FOLDERID_AdminTools = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}} + FOLDERID_CommonAdminTools = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}} + FOLDERID_Music = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}} + FOLDERID_Videos = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}} + FOLDERID_Ringtones = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}} + FOLDERID_PublicPictures = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}} + FOLDERID_PublicMusic = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}} + FOLDERID_PublicVideos = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}} + FOLDERID_PublicRingtones = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}} + FOLDERID_ResourceDir = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}} + FOLDERID_LocalizedResourcesDir = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}} + FOLDERID_CommonOEMLinks = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}} + FOLDERID_CDBurning = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}} + FOLDERID_UserProfiles = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}} + FOLDERID_Playlists = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}} + FOLDERID_SamplePlaylists = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}} + FOLDERID_SampleMusic = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}} + FOLDERID_SamplePictures = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}} + FOLDERID_SampleVideos = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}} + FOLDERID_PhotoAlbums = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}} + FOLDERID_Public = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}} + FOLDERID_ChangeRemovePrograms = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}} + FOLDERID_AppUpdates = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}} + FOLDERID_AddNewPrograms = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}} + FOLDERID_Downloads = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}} + FOLDERID_PublicDownloads = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}} + FOLDERID_SavedSearches = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}} + FOLDERID_QuickLaunch = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}} + FOLDERID_Contacts = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}} + FOLDERID_SidebarParts = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}} + FOLDERID_SidebarDefaultParts = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}} + FOLDERID_PublicGameTasks = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}} + FOLDERID_GameTasks = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}} + FOLDERID_SavedGames = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}} + FOLDERID_Games = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}} + FOLDERID_SEARCH_MAPI = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}} + FOLDERID_SEARCH_CSC = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}} + FOLDERID_Links = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}} + FOLDERID_UsersFiles = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}} + FOLDERID_UsersLibraries = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}} + FOLDERID_SearchHome = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}} + FOLDERID_OriginalImages = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}} + FOLDERID_DocumentsLibrary = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}} + FOLDERID_MusicLibrary = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}} + FOLDERID_PicturesLibrary = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}} + FOLDERID_VideosLibrary = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}} + FOLDERID_RecordedTVLibrary = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}} + FOLDERID_HomeGroup = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}} + FOLDERID_HomeGroupCurrentUser = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}} + FOLDERID_DeviceMetadataStore = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}} + FOLDERID_Libraries = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}} + FOLDERID_PublicLibraries = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}} + FOLDERID_UserPinned = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}} + FOLDERID_ImplicitAppShortcuts = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}} + FOLDERID_AccountPictures = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}} + FOLDERID_PublicUserTiles = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}} + FOLDERID_AppsFolder = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}} + FOLDERID_StartMenuAllPrograms = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}} + FOLDERID_CommonStartMenuPlaces = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}} + FOLDERID_ApplicationShortcuts = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}} + FOLDERID_RoamingTiles = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}} + FOLDERID_RoamedTileImages = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}} + FOLDERID_Screenshots = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}} + FOLDERID_CameraRoll = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}} + FOLDERID_SkyDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_OneDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_SkyDriveDocuments = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}} + FOLDERID_SkyDrivePictures = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}} + FOLDERID_SkyDriveMusic = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}} + FOLDERID_SkyDriveCameraRoll = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}} + FOLDERID_SearchHistory = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}} + FOLDERID_SearchTemplates = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}} + FOLDERID_CameraRollLibrary = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}} + FOLDERID_SavedPictures = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}} + FOLDERID_SavedPicturesLibrary = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}} + FOLDERID_RetailDemo = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}} + FOLDERID_Device = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}} + FOLDERID_DevelopmentFiles = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}} + FOLDERID_Objects3D = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}} + FOLDERID_AppCaptures = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}} + FOLDERID_LocalDocuments = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}} + FOLDERID_LocalPictures = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}} + FOLDERID_LocalVideos = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}} + FOLDERID_LocalMusic = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}} + FOLDERID_LocalDownloads = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}} + FOLDERID_RecordedCalls = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}} + FOLDERID_AllAppMods = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}} + FOLDERID_CurrentAppMods = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}} + FOLDERID_AppDataDesktop = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}} + FOLDERID_AppDataDocuments = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}} + FOLDERID_AppDataFavorites = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}} + FOLDERID_AppDataProgramData = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}} +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go new file mode 100644 index 000000000..148de0ffb --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -0,0 +1,3652 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package windows + +import ( + "syscall" + "unsafe" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = NewLazySystemDLL("advapi32.dll") + modcrypt32 = NewLazySystemDLL("crypt32.dll") + moddnsapi = NewLazySystemDLL("dnsapi.dll") + modiphlpapi = NewLazySystemDLL("iphlpapi.dll") + modkernel32 = NewLazySystemDLL("kernel32.dll") + modmswsock = NewLazySystemDLL("mswsock.dll") + modnetapi32 = NewLazySystemDLL("netapi32.dll") + modntdll = NewLazySystemDLL("ntdll.dll") + modole32 = NewLazySystemDLL("ole32.dll") + modpsapi = NewLazySystemDLL("psapi.dll") + modsechost = NewLazySystemDLL("sechost.dll") + modsecur32 = NewLazySystemDLL("secur32.dll") + modshell32 = NewLazySystemDLL("shell32.dll") + moduser32 = NewLazySystemDLL("user32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") + modwintrust = NewLazySystemDLL("wintrust.dll") + modws2_32 = NewLazySystemDLL("ws2_32.dll") + modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") + + procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") + procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") + procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") + procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procControlService = modadvapi32.NewProc("ControlService") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procCopySid = modadvapi32.NewProc("CopySid") + procCreateProcessAsUserW = modadvapi32.NewProc("CreateProcessAsUserW") + procCreateServiceW = modadvapi32.NewProc("CreateServiceW") + procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") + procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") + procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") + procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") + procDeleteService = modadvapi32.NewProc("DeleteService") + procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") + procEqualSid = modadvapi32.NewProc("EqualSid") + procFreeSid = modadvapi32.NewProc("FreeSid") + procGetLengthSid = modadvapi32.NewProc("GetLengthSid") + procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") + procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") + procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") + procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") + procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") + procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") + procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") + procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") + procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") + procIsTokenRestricted = modadvapi32.NewProc("IsTokenRestricted") + procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") + procIsValidSid = modadvapi32.NewProc("IsValidSid") + procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") + procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") + procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") + procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegNotifyChangeKeyValue = modadvapi32.NewProc("RegNotifyChangeKeyValue") + procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") + procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") + procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") + procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procReportEventW = modadvapi32.NewProc("ReportEventW") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetKernelObjectSecurity = modadvapi32.NewProc("SetKernelObjectSecurity") + procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") + procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") + procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") + procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") + procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") + procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") + procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") + procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") + procSetThreadToken = modadvapi32.NewProc("SetThreadToken") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") + procStartServiceW = modadvapi32.NewProc("StartServiceW") + procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") + procCertCloseStore = modcrypt32.NewProc("CertCloseStore") + procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") + procCertDeleteCertificateFromStore = modcrypt32.NewProc("CertDeleteCertificateFromStore") + procCertDuplicateCertificateContext = modcrypt32.NewProc("CertDuplicateCertificateContext") + procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertFindCertificateInStore = modcrypt32.NewProc("CertFindCertificateInStore") + procCertFindChainInStore = modcrypt32.NewProc("CertFindChainInStore") + procCertFindExtension = modcrypt32.NewProc("CertFindExtension") + procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") + procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") + procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertGetNameStringW = modcrypt32.NewProc("CertGetNameStringW") + procCertOpenStore = modcrypt32.NewProc("CertOpenStore") + procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") + procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") + procCryptAcquireCertificatePrivateKey = modcrypt32.NewProc("CryptAcquireCertificatePrivateKey") + procCryptDecodeObject = modcrypt32.NewProc("CryptDecodeObject") + procCryptProtectData = modcrypt32.NewProc("CryptProtectData") + procCryptQueryObject = modcrypt32.NewProc("CryptQueryObject") + procCryptUnprotectData = modcrypt32.NewProc("CryptUnprotectData") + procPFXImportCertStore = modcrypt32.NewProc("PFXImportCertStore") + procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") + procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") + procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") + procCreateEventExW = modkernel32.NewProc("CreateEventExW") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") + procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") + procCreateMutexW = modkernel32.NewProc("CreateMutexW") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procCreatePipe = modkernel32.NewProc("CreatePipe") + procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") + procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") + procDeleteFileW = modkernel32.NewProc("DeleteFileW") + procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") + procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") + procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procExitProcess = modkernel32.NewProc("ExitProcess") + procFindClose = modkernel32.NewProc("FindClose") + procFindCloseChangeNotification = modkernel32.NewProc("FindCloseChangeNotification") + procFindFirstChangeNotificationW = modkernel32.NewProc("FindFirstChangeNotificationW") + procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") + procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") + procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindNextChangeNotification = modkernel32.NewProc("FindNextChangeNotification") + procFindNextFileW = modkernel32.NewProc("FindNextFileW") + procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") + procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindResourceW = modkernel32.NewProc("FindResourceW") + procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") + procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") + procFormatMessageW = modkernel32.NewProc("FormatMessageW") + procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") + procFreeLibrary = modkernel32.NewProc("FreeLibrary") + procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetACP = modkernel32.NewProc("GetACP") + procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") + procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") + procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") + procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") + procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") + procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") + procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") + procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") + procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") + procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = modkernel32.NewProc("GetFileType") + procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") + procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLastError = modkernel32.NewProc("GetLastError") + procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") + procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") + procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") + procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") + procGetProcAddress = modkernel32.NewProc("GetProcAddress") + procGetProcessId = modkernel32.NewProc("GetProcessId") + procGetProcessPreferredUILanguages = modkernel32.NewProc("GetProcessPreferredUILanguages") + procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") + procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") + procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") + procGetStdHandle = modkernel32.NewProc("GetStdHandle") + procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procGetSystemPreferredUILanguages = modkernel32.NewProc("GetSystemPreferredUILanguages") + procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") + procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") + procGetTempPathW = modkernel32.NewProc("GetTempPathW") + procGetThreadPreferredUILanguages = modkernel32.NewProc("GetThreadPreferredUILanguages") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") + procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procGetUserPreferredUILanguages = modkernel32.NewProc("GetUserPreferredUILanguages") + procGetVersion = modkernel32.NewProc("GetVersion") + procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") + procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") + procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") + procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") + procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") + procInitializeProcThreadAttributeList = modkernel32.NewProc("InitializeProcThreadAttributeList") + procIsWow64Process = modkernel32.NewProc("IsWow64Process") + procIsWow64Process2 = modkernel32.NewProc("IsWow64Process2") + procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") + procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLoadResource = modkernel32.NewProc("LoadResource") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procLockResource = modkernel32.NewProc("LockResource") + procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procMoveFileW = modkernel32.NewProc("MoveFileW") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procOpenEventW = modkernel32.NewProc("OpenEventW") + procOpenMutexW = modkernel32.NewProc("OpenMutexW") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procOpenThread = modkernel32.NewProc("OpenThread") + procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") + procProcess32FirstW = modkernel32.NewProc("Process32FirstW") + procProcess32NextW = modkernel32.NewProc("Process32NextW") + procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") + procPulseEvent = modkernel32.NewProc("PulseEvent") + procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procReadConsoleW = modkernel32.NewProc("ReadConsoleW") + procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") + procReadFile = modkernel32.NewProc("ReadFile") + procReleaseMutex = modkernel32.NewProc("ReleaseMutex") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procResetEvent = modkernel32.NewProc("ResetEvent") + procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") + procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") + procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procSetErrorMode = modkernel32.NewProc("SetErrorMode") + procSetEvent = modkernel32.NewProc("SetEvent") + procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") + procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") + procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") + procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") + procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") + procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") + procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx") + procSetStdHandle = modkernel32.NewProc("SetStdHandle") + procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") + procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSizeofResource = modkernel32.NewProc("SizeofResource") + procSleepEx = modkernel32.NewProc("SleepEx") + procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procThread32First = modkernel32.NewProc("Thread32First") + procThread32Next = modkernel32.NewProc("Thread32Next") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") + procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") + procUpdateProcThreadAttribute = modkernel32.NewProc("UpdateProcThreadAttribute") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualLock = modkernel32.NewProc("VirtualLock") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") + procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") + procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") + procWriteFile = modkernel32.NewProc("WriteFile") + procAcceptEx = modmswsock.NewProc("AcceptEx") + procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") + procTransmitFile = modmswsock.NewProc("TransmitFile") + procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") + procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") + procNtSetInformationProcess = modntdll.NewProc("NtSetInformationProcess") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U_WithStatus = modntdll.NewProc("RtlDosPathNameToNtPathName_U_WithStatus") + procRtlDosPathNameToRelativeNtPathName_U_WithStatus = modntdll.NewProc("RtlDosPathNameToRelativeNtPathName_U_WithStatus") + procRtlGetCurrentPeb = modntdll.NewProc("RtlGetCurrentPeb") + procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") + procRtlGetVersion = modntdll.NewProc("RtlGetVersion") + procRtlInitString = modntdll.NewProc("RtlInitString") + procRtlInitUnicodeString = modntdll.NewProc("RtlInitUnicodeString") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procCoCreateGuid = modole32.NewProc("CoCreateGuid") + procCoGetObject = modole32.NewProc("CoGetObject") + procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procCoUninitialize = modole32.NewProc("CoUninitialize") + procStringFromGUID2 = modole32.NewProc("StringFromGUID2") + procEnumProcesses = modpsapi.NewProc("EnumProcesses") + procSubscribeServiceChangeNotifications = modsechost.NewProc("SubscribeServiceChangeNotifications") + procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") + procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") + procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") + procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") + procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procGetShellWindow = moduser32.NewProc("GetShellWindow") + procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") + procMessageBoxW = moduser32.NewProc("MessageBoxW") + procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") + procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") + procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") + procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") + procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") + procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") + procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procWSARecv = modws2_32.NewProc("WSARecv") + procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") + procWSASend = modws2_32.NewProc("WSASend") + procWSASendTo = modws2_32.NewProc("WSASendTo") + procWSASocketW = modws2_32.NewProc("WSASocketW") + procWSAStartup = modws2_32.NewProc("WSAStartup") + procbind = modws2_32.NewProc("bind") + procclosesocket = modws2_32.NewProc("closesocket") + procconnect = modws2_32.NewProc("connect") + procgethostbyname = modws2_32.NewProc("gethostbyname") + procgetpeername = modws2_32.NewProc("getpeername") + procgetprotobyname = modws2_32.NewProc("getprotobyname") + procgetservbyname = modws2_32.NewProc("getservbyname") + procgetsockname = modws2_32.NewProc("getsockname") + procgetsockopt = modws2_32.NewProc("getsockopt") + proclisten = modws2_32.NewProc("listen") + procntohs = modws2_32.NewProc("ntohs") + procrecvfrom = modws2_32.NewProc("recvfrom") + procsendto = modws2_32.NewProc("sendto") + procsetsockopt = modws2_32.NewProc("setsockopt") + procshutdown = modws2_32.NewProc("shutdown") + procsocket = modws2_32.NewProc("socket") + procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") + procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") + procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") +) + +func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { + var _p0 uint32 + if resetToDefault { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { + var _p0 uint32 + if disableAllPrivileges { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { + r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CloseServiceHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { + var _p0 uint32 + if inheritHandles { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { + r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DeleteService(service Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DeregisterEventSource(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { + r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { + r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + isEqual = r0 != 0 + return +} + +func FreeSid(sid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func GetLengthSid(sid *SID) (len uint32) { + r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + len = uint32(r0) + return +} + +func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) +} + +func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { + var _p0 uint32 + if *daclPresent { + _p0 = 1 + } + var _p1 uint32 + if *daclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *daclPresent = _p0 != 0 + *daclDefaulted = _p1 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { + var _p0 uint32 + if *groupDefaulted { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + *groupDefaulted = _p0 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + len = uint32(r0) + return +} + +func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { + var _p0 uint32 + if *ownerDefaulted { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + *ownerDefaulted = _p0 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { + var _p0 uint32 + if *saclPresent { + _p0 = 1 + } + var _p1 uint32 + if *saclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *saclPresent = _p0 != 0 + *saclDefaulted = _p1 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { + r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) + return +} + +func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + subAuthority = (*uint32)(unsafe.Pointer(r0)) + return +} + +func getSidSubAuthorityCount(sid *SID) (count *uint8) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + count = (*uint8)(unsafe.Pointer(r0)) + return +} + +func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ImpersonateSelf(impersonationlevel uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { + r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { + var _p0 uint32 + if forceAppsClosed { + _p0 = 1 + } + var _p1 uint32 + if rebootAfterShutdown { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func isTokenRestricted(tokenHandle Token) (ret bool, err error) { + r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + ret = r0 != 0 + if !ret { + err = errnoErr(e1) + } + return +} + +func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + isValid = r0 != 0 + return +} + +func isValidSid(sid *SID) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + isValid = r0 != 0 + return +} + +func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { + r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + isWellKnown = r0 != 0 + return +} + +func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { + r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func RegCloseKey(key Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) { + var _p0 uint32 + if watchSubtree { + _p0 = 1 + } + var _p1 uint32 + if asynchronous { + _p1 = 1 + } + r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func RevertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { + r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) +} + +func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { + var _p0 uint32 + if daclPresent { + _p0 = 1 + } + var _p1 uint32 + if daclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { + var _p0 uint32 + if groupDefaulted { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { + var _p0 uint32 + if ownerDefaulted { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { + syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + return +} + +func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { + var _p0 uint32 + if saclPresent { + _p0 = 1 + } + var _p1 uint32 + if saclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetThreadToken(thread *Handle, token Token) (err error) { + r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { + r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertCloseStore(store Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + err = errnoErr(e1) + } + return +} + +func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { + r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + dupContext = (*CertContext)(unsafe.Pointer(r0)) + return +} + +func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + err = errnoErr(e1) + } + return +} + +func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { + r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) + cert = (*CertContext)(unsafe.Pointer(r0)) + if cert == nil { + err = errnoErr(e1) + } + return +} + +func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { + r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) + certchain = (*CertChainContext)(unsafe.Pointer(r0)) + if certchain == nil { + err = errnoErr(e1) + } + return +} + +func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { + r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + ret = (*CertExtension)(unsafe.Pointer(r0)) + return +} + +func CertFreeCertificateChain(ctx *CertChainContext) { + syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + return +} + +func CertFreeCertificateContext(ctx *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { + r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { + r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + chars = uint32(r0) + return +} + +func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { + r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + store = Handle(r0) + if store == 0 { + err = errnoErr(e1) + } + return +} + +func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { + r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, parameters unsafe.Pointer, cryptProvOrNCryptKey *Handle, keySpec *uint32, callerFreeProvOrNCryptKey *bool) (err error) { + var _p0 uint32 + if *callerFreeProvOrNCryptKey { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) + *callerFreeProvOrNCryptKey = _p0 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { + r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { + r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { + r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { + r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) + store = Handle(r0) + if store == 0 { + err = errnoErr(e1) + } + return +} + +func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { + r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + same = r0 != 0 + return +} + +func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + var _p0 *uint16 + _p0, status = syscall.UTF16PtrFromString(name) + if status != nil { + return + } + return _DnsQuery(_p0, qtype, options, extra, qrs, pr) +} + +func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func DnsRecordListFree(rl *DNSRecord, freetype uint32) { + syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + return +} + +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIfEntry(pIfRow *MibIfRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func AssignProcessToJobObject(job Handle, process Handle) (err error) { + r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CancelIo(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CancelIoEx(s Handle, o *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CloseHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { + r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { + err = errnoErr(e1) + } + return +} + +func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + handle = Handle(r0) + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { + err = errnoErr(e1) + } + return +} + +func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { + err = errnoErr(e1) + } + return +} + +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + if r1&0xff == 0 { + err = errnoErr(e1) + } + return +} + +func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { + err = errnoErr(e1) + } + return +} + +func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if initialOwner { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { + err = errnoErr(e1) + } + return +} + +func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { + var _p0 uint32 + if inheritHandles { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + if r1&0xff == 0 { + err = errnoErr(e1) + } + return +} + +func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DeleteFile(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { + syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) + return +} + +func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { + var _p0 uint32 + if bInheritHandle { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ExitProcess(exitcode uint32) { + syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + return +} + +func FindClose(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FindCloseChangeNotification(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _FindFirstChangeNotification(_p0, watchSubtree, notifyFilter) +} + +func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) { + var _p1 uint32 + if watchSubtree { + _p1 = 1 + } + r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func FindNextChangeNotification(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func findNextFile1(handle Handle, data *win32finddata1) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + resInfo = Handle(r0) + if resInfo == 0 { + err = errnoErr(e1) + } + return +} + +func FindVolumeClose(findVolume Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FlushFileBuffers(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FlushViewOfFile(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { + var _p0 *uint16 + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func FreeEnvironmentStrings(envs *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FreeLibrary(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) + return +} + +func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommandLine() (cmd *uint16) { + r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + cmd = (*uint16)(unsafe.Pointer(r0)) + return +} + +func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetComputerName(buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetConsoleMode(console Handle, mode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetCurrentProcessId() (pid uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + pid = uint32(r0) + return +} + +func GetCurrentThreadId() (id uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + id = uint32(r0) + return +} + +func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetDriveType(rootPathName *uint16) (driveType uint32) { + r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + driveType = uint32(r0) + return +} + +func GetEnvironmentStrings() (envs *uint16, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + envs = (*uint16)(unsafe.Pointer(r0)) + if envs == nil { + err = errnoErr(e1) + } + return +} + +func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileAttributes(name *uint16) (attrs uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + attrs = uint32(r0) + if attrs == INVALID_FILE_ATTRIBUTES { + err = errnoErr(e1) + } + return +} + +func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileType(filehandle Handle) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetLastError() (lasterr error) { + r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + if r0 != 0 { + lasterr = syscall.Errno(r0) + } + return +} + +func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetLogicalDrives() (drivesBitMask uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + drivesBitMask = uint32(r0) + if drivesBitMask == 0 { + err = errnoErr(e1) + } + return +} + +func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetPriorityClass(process Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + ret = uint32(r0) + if ret == 0 { + err = errnoErr(e1) + } + return +} + +func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(procname) + if err != nil { + return + } + return _GetProcAddress(module, _p0) +} + +func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + proc = uintptr(r0) + if proc == 0 { + err = errnoErr(e1) + } + return +} + +func GetProcessId(process Handle) (id uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + id = uint32(r0) + if id == 0 { + err = errnoErr(e1) + } + return +} + +func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { + syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + return +} + +func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetStartupInfo(startupInfo *StartupInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetStdHandle(stdhandle uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) + } + return +} + +func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetSystemTimeAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func GetSystemTimePreciseAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) + } + return +} + +func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getTickCount64() (ms uint64) { + r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + ms = uint64(r0) + return +} + +func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + err = errnoErr(e1) + } + return +} + +func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetVersion() (ver uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + ver = uint32(r0) + if ver == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) + } + return +} + +func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func IsWow64Process(handle Handle, isWow64 *bool) (err error) { + var _p0 uint32 + if *isWow64 { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + *isWow64 = _p0 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) { + err = procIsWow64Process2.Find() + if err != nil { + return + } + r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibraryEx(_p0, zero, flags) +} + +func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func LoadLibrary(libname string) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibrary(_p0) +} + +func _LoadLibrary(libname *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + resData = Handle(r0) + if resData == 0 { + err = errnoErr(e1) + } + return +} + +func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { + r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + ptr = uintptr(r0) + if ptr == 0 { + err = errnoErr(e1) + } + return +} + +func LocalFree(hmem Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + handle = Handle(r0) + if handle != 0 { + err = errnoErr(e1) + } + return +} + +func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func LockResource(resData Handle) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + addr = uintptr(r0) + if addr == 0 { + err = errnoErr(e1) + } + return +} + +func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + addr = uintptr(r0) + if addr == 0 { + err = errnoErr(e1) + } + return +} + +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func MoveFile(from *uint16, to *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + err = errnoErr(e1) + } + return +} + +func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func PulseEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + var _p0 uint32 + if watchSubTree { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ReleaseMutex(mutex Handle) (err error) { + r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func RemoveDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ResetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ResumeThread(thread Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + ret = uint32(r0) + if ret == 0xffffffff { + err = errnoErr(e1) + } + return +} + +func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setConsoleCursorPosition(console Handle, position uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetConsoleMode(console Handle, mode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCurrentDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetDefaultDllDirectories(directoryFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetDllDirectory(path string) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _SetDllDirectory(_p0) +} + +func _SetDllDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetEndOfFile(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetErrorMode(mode uint32) (ret uint32) { + r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + ret = uint32(r0) + return +} + +func SetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetFileAttributes(name *uint16, attrs uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + newlowoffset = uint32(r0) + if newlowoffset == 0xffffffff { + err = errnoErr(e1) + } + return +} + +func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { + r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + ret = int(r0) + if ret == 0 { + err = errnoErr(e1) + } + return +} + +func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetPriorityClass(process Handle, priorityClass uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetProcessPriorityBoost(process Handle, disable bool) (err error) { + var _p0 uint32 + if disable { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetStdHandle(stdhandle uint32, handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { + r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + size = uint32(r0) + if size == 0 { + err = errnoErr(e1) + } + return +} + +func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { + var _p0 uint32 + if alertable { + _p0 = 1 + } + r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + ret = uint32(r0) + return +} + +func TerminateJobObject(job Handle, exitCode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func TerminateProcess(handle Handle, exitcode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func UnmapViewOfFile(addr uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + value = uintptr(r0) + if value == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualLock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualUnlock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { + var _p0 uint32 + if waitAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + event = uint32(r0) + if event == 0xffffffff { + err = errnoErr(e1) + } + return +} + +func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { + r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + event = uint32(r0) + if event == 0xffffffff { + err = errnoErr(e1) + } + return +} + +func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { + syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + return +} + +func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func NetApiBufferFree(buf *byte) (neterr error) { + r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlGetCurrentPeb() (peb *PEB) { + r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + peb = (*PEB)(unsafe.Pointer(r0)) + return +} + +func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { + syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + return +} + +func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { + r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlInitString(destinationString *NTString, sourceString *byte) { + syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + return +} + +func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { + syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + return +} + +func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + ret = syscall.Errno(r0) + return +} + +func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func coCreateGuid(pguid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { + r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { + r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func CoTaskMemFree(address unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + return +} + +func CoUninitialize() { + syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + return +} + +func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { + r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + chars = int32(r0) + return +} + +func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { + var _p0 *uint32 + if len(processIds) > 0 { + _p0 = &processIds[0] + } + r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) { + ret = procSubscribeServiceChangeNotifications.Find() + if ret != nil { + return + } + r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { + err = procUnsubscribeServiceChangeNotifications.Find() + if err != nil { + return + } + syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + return +} + +func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + if r1&0xff == 0 { + err = errnoErr(e1) + } + return +} + +func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + if r1&0xff == 0 { + err = errnoErr(e1) + } + return +} + +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + if argv == nil { + err = errnoErr(e1) + } + return +} + +func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { + r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { + r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + if r1 <= 32 { + err = errnoErr(e1) + } + return +} + +func ExitWindowsEx(flags uint32, reason uint32) (err error) { + r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetShellWindow() (shellWindow HWND) { + r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) + shellWindow = HWND(r0) + return +} + +func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) + tid = uint32(r0) + if tid == 0 { + err = errnoErr(e1) + } + return +} + +func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { + r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + ret = int32(r0) + if ret == 0 { + err = errnoErr(e1) + } + return +} + +func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { + var _p0 uint32 + if inheritExisting { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DestroyEnvironmentBlock(block *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { + r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func FreeAddrInfoW(addrinfo *AddrinfoW) { + syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + return +} + +func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { + r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func WSACleanup() (err error) { + r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { + r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + n = int32(r0) + if n == -1 { + err = errnoErr(e1) + } + return +} + +func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { + r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func Closesocket(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func GetHostByName(name string) (h *Hostent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetHostByName(_p0) +} + +func _GetHostByName(name *byte) (h *Hostent, err error) { + r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + h = (*Hostent)(unsafe.Pointer(r0)) + if h == nil { + err = errnoErr(e1) + } + return +} + +func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func GetProtoByName(name string) (p *Protoent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetProtoByName(_p0) +} + +func _GetProtoByName(name *byte) (p *Protoent, err error) { + r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + p = (*Protoent)(unsafe.Pointer(r0)) + if p == nil { + err = errnoErr(e1) + } + return +} + +func GetServByName(name string, proto string) (s *Servent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(proto) + if err != nil { + return + } + return _GetServByName(_p0, _p1) +} + +func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { + r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + s = (*Servent)(unsafe.Pointer(r0)) + if s == nil { + err = errnoErr(e1) + } + return +} + +func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { + r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func listen(s Handle, backlog int32) (err error) { + r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func Ntohs(netshort uint16) (u uint16) { + r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + u = uint16(r0) + return +} + +func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int32(r0) + if n == -1 { + err = errnoErr(e1) + } + return +} + +func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { + r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func shutdown(s Handle, how int32) (err error) { + r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func WTSFreeMemory(ptr uintptr) { + syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + return +} + +func WTSQueryUserToken(session uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index e4c62289f..8a7392c4a 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.10 // +build go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index 02b9e1e9d..bb0a92001 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.10 // +build !go1.10 package bidirule diff --git a/vendor/golang.org/x/text/unicode/bidi/bidi.go b/vendor/golang.org/x/text/unicode/bidi/bidi.go index e8edc54cc..fd057601b 100644 --- a/vendor/golang.org/x/text/unicode/bidi/bidi.go +++ b/vendor/golang.org/x/text/unicode/bidi/bidi.go @@ -12,15 +12,14 @@ // and without notice. package bidi // import "golang.org/x/text/unicode/bidi" -// TODO: -// The following functionality would not be hard to implement, but hinges on -// the definition of a Segmenter interface. For now this is up to the user. -// - Iterate over paragraphs -// - Segmenter to iterate over runs directly from a given text. -// Also: +// TODO // - Transformer for reordering? // - Transformer (validator, really) for Bidi Rule. +import ( + "bytes" +) + // This API tries to avoid dealing with embedding levels for now. Under the hood // these will be computed, but the question is to which extent the user should // know they exist. We should at some point allow the user to specify an @@ -49,7 +48,9 @@ const ( Neutral ) -type options struct{} +type options struct { + defaultDirection Direction +} // An Option is an option for Bidi processing. type Option func(*options) @@ -66,12 +67,62 @@ type Option func(*options) // DefaultDirection sets the default direction for a Paragraph. The direction is // overridden if the text contains directional characters. func DefaultDirection(d Direction) Option { - panic("unimplemented") + return func(opts *options) { + opts.defaultDirection = d + } } // A Paragraph holds a single Paragraph for Bidi processing. type Paragraph struct { - // buffers + p []byte + o Ordering + opts []Option + types []Class + pairTypes []bracketType + pairValues []rune + runes []rune + options options +} + +// Initialize the p.pairTypes, p.pairValues and p.types from the input previously +// set by p.SetBytes() or p.SetString(). Also limit the input up to (and including) a paragraph +// separator (bidi class B). +// +// The function p.Order() needs these values to be set, so this preparation could be postponed. +// But since the SetBytes and SetStrings functions return the length of the input up to the paragraph +// separator, the whole input needs to be processed anyway and should not be done twice. +// +// The function has the same return values as SetBytes() / SetString() +func (p *Paragraph) prepareInput() (n int, err error) { + p.runes = bytes.Runes(p.p) + bytecount := 0 + // clear slices from previous SetString or SetBytes + p.pairTypes = nil + p.pairValues = nil + p.types = nil + + for _, r := range p.runes { + props, i := LookupRune(r) + bytecount += i + cls := props.Class() + if cls == B { + return bytecount, nil + } + p.types = append(p.types, cls) + if props.IsOpeningBracket() { + p.pairTypes = append(p.pairTypes, bpOpen) + p.pairValues = append(p.pairValues, r) + } else if props.IsBracket() { + // this must be a closing bracket, + // since IsOpeningBracket is not true + p.pairTypes = append(p.pairTypes, bpClose) + p.pairValues = append(p.pairValues, r) + } else { + p.pairTypes = append(p.pairTypes, bpNone) + p.pairValues = append(p.pairValues, 0) + } + } + return bytecount, nil } // SetBytes configures p for the given paragraph text. It replaces text @@ -80,70 +131,150 @@ type Paragraph struct { // consumed from b including this separator. Error may be non-nil if options are // given. func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) { - panic("unimplemented") + p.p = b + p.opts = opts + return p.prepareInput() } -// SetString configures p for the given paragraph text. It replaces text -// previously set by SetBytes or SetString. If b contains a paragraph separator +// SetString configures s for the given paragraph text. It replaces text +// previously set by SetBytes or SetString. If s contains a paragraph separator // it will only process the first paragraph and report the number of bytes -// consumed from b including this separator. Error may be non-nil if options are +// consumed from s including this separator. Error may be non-nil if options are // given. func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) { - panic("unimplemented") + p.p = []byte(s) + p.opts = opts + return p.prepareInput() } // IsLeftToRight reports whether the principle direction of rendering for this // paragraphs is left-to-right. If this returns false, the principle direction // of rendering is right-to-left. func (p *Paragraph) IsLeftToRight() bool { - panic("unimplemented") + return p.Direction() == LeftToRight } // Direction returns the direction of the text of this paragraph. // // The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. func (p *Paragraph) Direction() Direction { - panic("unimplemented") + return p.o.Direction() } +// TODO: what happens if the position is > len(input)? This should return an error. + // RunAt reports the Run at the given position of the input text. // // This method can be used for computing line breaks on paragraphs. func (p *Paragraph) RunAt(pos int) Run { - panic("unimplemented") + c := 0 + runNumber := 0 + for i, r := range p.o.runes { + c += len(r) + if pos < c { + runNumber = i + } + } + return p.o.Run(runNumber) +} + +func calculateOrdering(levels []level, runes []rune) Ordering { + var curDir Direction + + prevDir := Neutral + prevI := 0 + + o := Ordering{} + // lvl = 0,2,4,...: left to right + // lvl = 1,3,5,...: right to left + for i, lvl := range levels { + if lvl%2 == 0 { + curDir = LeftToRight + } else { + curDir = RightToLeft + } + if curDir != prevDir { + if i > 0 { + o.runes = append(o.runes, runes[prevI:i]) + o.directions = append(o.directions, prevDir) + o.startpos = append(o.startpos, prevI) + } + prevI = i + prevDir = curDir + } + } + o.runes = append(o.runes, runes[prevI:]) + o.directions = append(o.directions, prevDir) + o.startpos = append(o.startpos, prevI) + return o } // Order computes the visual ordering of all the runs in a Paragraph. func (p *Paragraph) Order() (Ordering, error) { - panic("unimplemented") + if len(p.types) == 0 { + return Ordering{}, nil + } + + for _, fn := range p.opts { + fn(&p.options) + } + lvl := level(-1) + if p.options.defaultDirection == RightToLeft { + lvl = 1 + } + para, err := newParagraph(p.types, p.pairTypes, p.pairValues, lvl) + if err != nil { + return Ordering{}, err + } + + levels := para.getLevels([]int{len(p.types)}) + + p.o = calculateOrdering(levels, p.runes) + return p.o, nil } // Line computes the visual ordering of runs for a single line starting and // ending at the given positions in the original text. func (p *Paragraph) Line(start, end int) (Ordering, error) { - panic("unimplemented") + lineTypes := p.types[start:end] + para, err := newParagraph(lineTypes, p.pairTypes[start:end], p.pairValues[start:end], -1) + if err != nil { + return Ordering{}, err + } + levels := para.getLevels([]int{len(lineTypes)}) + o := calculateOrdering(levels, p.runes[start:end]) + return o, nil } // An Ordering holds the computed visual order of runs of a Paragraph. Calling // SetBytes or SetString on the originating Paragraph invalidates an Ordering. // The methods of an Ordering should only be called by one goroutine at a time. -type Ordering struct{} +type Ordering struct { + runes [][]rune + directions []Direction + startpos []int +} // Direction reports the directionality of the runs. // // The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. func (o *Ordering) Direction() Direction { - panic("unimplemented") + return o.directions[0] } // NumRuns returns the number of runs. func (o *Ordering) NumRuns() int { - panic("unimplemented") + return len(o.runes) } // Run returns the ith run within the ordering. func (o *Ordering) Run(i int) Run { - panic("unimplemented") + r := Run{ + runes: o.runes[i], + direction: o.directions[i], + startpos: o.startpos[i], + } + return r } // TODO: perhaps with options. @@ -155,16 +286,19 @@ func (o *Ordering) Run(i int) Run { // A Run is a continuous sequence of characters of a single direction. type Run struct { + runes []rune + direction Direction + startpos int } // String returns the text of the run in its original order. func (r *Run) String() string { - panic("unimplemented") + return string(r.runes) } // Bytes returns the text of the run in its original order. func (r *Run) Bytes() []byte { - panic("unimplemented") + return []byte(r.String()) } // TODO: methods for @@ -174,25 +308,52 @@ func (r *Run) Bytes() []byte { // Direction reports the direction of the run. func (r *Run) Direction() Direction { - panic("unimplemented") + return r.direction } -// Position of the Run within the text passed to SetBytes or SetString of the +// Pos returns the position of the Run within the text passed to SetBytes or SetString of the // originating Paragraph value. func (r *Run) Pos() (start, end int) { - panic("unimplemented") + return r.startpos, r.startpos + len(r.runes) - 1 } // AppendReverse reverses the order of characters of in, appends them to out, // and returns the result. Modifiers will still follow the runes they modify. // Brackets are replaced with their counterparts. func AppendReverse(out, in []byte) []byte { - panic("unimplemented") + ret := make([]byte, len(in)+len(out)) + copy(ret, out) + inRunes := bytes.Runes(in) + + for i, r := range inRunes { + prop, _ := LookupRune(r) + if prop.IsBracket() { + inRunes[i] = prop.reverseBracket(r) + } + } + + for i, j := 0, len(inRunes)-1; i < j; i, j = i+1, j-1 { + inRunes[i], inRunes[j] = inRunes[j], inRunes[i] + } + copy(ret[len(out):], string(inRunes)) + + return ret } // ReverseString reverses the order of characters in s and returns a new string. // Modifiers will still follow the runes they modify. Brackets are replaced with // their counterparts. func ReverseString(s string) string { - panic("unimplemented") + input := []rune(s) + li := len(input) + ret := make([]rune, li) + for i, r := range input { + prop, _ := LookupRune(r) + if prop.IsBracket() { + ret[li-i-1] = prop.reverseBracket(r) + } else { + ret[li-i-1] = r + } + } + return string(ret) } diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index 50deb6600..e4c081101 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -4,7 +4,10 @@ package bidi -import "log" +import ( + "fmt" + "log" +) // This implementation is a port based on the reference implementation found at: // https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ @@ -97,13 +100,20 @@ type paragraph struct { // rune (suggested is the rune of the open bracket for opening and matching // close brackets, after normalization). The embedding levels are optional, but // may be supplied to encode embedding levels of styled text. -// -// TODO: return an error. -func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) *paragraph { - validateTypes(types) - validatePbTypes(pairTypes) - validatePbValues(pairValues, pairTypes) - validateParagraphEmbeddingLevel(levels) +func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) (*paragraph, error) { + var err error + if err = validateTypes(types); err != nil { + return nil, err + } + if err = validatePbTypes(pairTypes); err != nil { + return nil, err + } + if err = validatePbValues(pairValues, pairTypes); err != nil { + return nil, err + } + if err = validateParagraphEmbeddingLevel(levels); err != nil { + return nil, err + } p := ¶graph{ initialTypes: append([]Class(nil), types...), @@ -115,7 +125,7 @@ func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, lev resultTypes: append([]Class(nil), types...), } p.run() - return p + return p, nil } func (p *paragraph) Len() int { return len(p.initialTypes) } @@ -1001,58 +1011,61 @@ func typeForLevel(level level) Class { return R } -// TODO: change validation to not panic - -func validateTypes(types []Class) { +func validateTypes(types []Class) error { if len(types) == 0 { - log.Panic("types is null") + return fmt.Errorf("types is null") } for i, t := range types[:len(types)-1] { if t == B { - log.Panicf("B type before end of paragraph at index: %d", i) + return fmt.Errorf("B type before end of paragraph at index: %d", i) } } + return nil } -func validateParagraphEmbeddingLevel(embeddingLevel level) { +func validateParagraphEmbeddingLevel(embeddingLevel level) error { if embeddingLevel != implicitLevel && embeddingLevel != 0 && embeddingLevel != 1 { - log.Panicf("illegal paragraph embedding level: %d", embeddingLevel) + return fmt.Errorf("illegal paragraph embedding level: %d", embeddingLevel) } + return nil } -func validateLineBreaks(linebreaks []int, textLength int) { +func validateLineBreaks(linebreaks []int, textLength int) error { prev := 0 for i, next := range linebreaks { if next <= prev { - log.Panicf("bad linebreak: %d at index: %d", next, i) + return fmt.Errorf("bad linebreak: %d at index: %d", next, i) } prev = next } if prev != textLength { - log.Panicf("last linebreak was %d, want %d", prev, textLength) + return fmt.Errorf("last linebreak was %d, want %d", prev, textLength) } + return nil } -func validatePbTypes(pairTypes []bracketType) { +func validatePbTypes(pairTypes []bracketType) error { if len(pairTypes) == 0 { - log.Panic("pairTypes is null") + return fmt.Errorf("pairTypes is null") } for i, pt := range pairTypes { switch pt { case bpNone, bpOpen, bpClose: default: - log.Panicf("illegal pairType value at %d: %v", i, pairTypes[i]) + return fmt.Errorf("illegal pairType value at %d: %v", i, pairTypes[i]) } } + return nil } -func validatePbValues(pairValues []rune, pairTypes []bracketType) { +func validatePbValues(pairValues []rune, pairTypes []bracketType) error { if pairValues == nil { - log.Panic("pairValues is null") + return fmt.Errorf("pairValues is null") } if len(pairTypes) != len(pairValues) { - log.Panic("pairTypes is different length from pairValues") + return fmt.Errorf("pairTypes is different length from pairValues") } + return nil } diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index d8c94e1bd..42fa8d72c 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 16b11db53..56a0e1ea2 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index 7ffa36512..baacf32b4 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.14 +//go:build go1.14 && !go1.16 +// +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go new file mode 100644 index 000000000..f248effae --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -0,0 +1,1956 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.16 +// +build go1.16 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "13.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 17408 bytes (17.00 KiB). Checksum: df85fcbfe9b8377f. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 248 blocks, 15872 entries, 15872 bytes +// The third block is the zero block. +var bidiValues = [15872]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + 0x77e: 0x000c, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x895: 0x000c, 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, 0x944: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa81: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaca: 0x000c, + 0xad2: 0x000c, 0xad3: 0x000c, 0xad4: 0x000c, 0xad6: 0x000c, + // Block 0x2c, offset 0xb00 + 0xb31: 0x000c, 0xb34: 0x000c, 0xb35: 0x000c, + 0xb36: 0x000c, 0xb37: 0x000c, 0xb38: 0x000c, 0xb39: 0x000c, 0xb3a: 0x000c, + 0xb3f: 0x0004, + // Block 0x2d, offset 0xb40 + 0xb47: 0x000c, 0xb48: 0x000c, 0xb49: 0x000c, 0xb4a: 0x000c, 0xb4b: 0x000c, + 0xb4c: 0x000c, 0xb4d: 0x000c, 0xb4e: 0x000c, + // Block 0x2e, offset 0xb80 + 0xbb1: 0x000c, 0xbb4: 0x000c, 0xbb5: 0x000c, + 0xbb6: 0x000c, 0xbb7: 0x000c, 0xbb8: 0x000c, 0xbb9: 0x000c, 0xbba: 0x000c, 0xbbb: 0x000c, + 0xbbc: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbc8: 0x000c, 0xbc9: 0x000c, 0xbca: 0x000c, 0xbcb: 0x000c, + 0xbcc: 0x000c, 0xbcd: 0x000c, + // Block 0x30, offset 0xc00 + 0xc18: 0x000c, 0xc19: 0x000c, + 0xc35: 0x000c, + 0xc37: 0x000c, 0xc39: 0x000c, 0xc3a: 0x003a, 0xc3b: 0x002a, + 0xc3c: 0x003a, 0xc3d: 0x002a, + // Block 0x31, offset 0xc40 + 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, 0xc7d: 0x000c, 0xc7e: 0x000c, + // Block 0x32, offset 0xc80 + 0xc80: 0x000c, 0xc81: 0x000c, 0xc82: 0x000c, 0xc83: 0x000c, 0xc84: 0x000c, + 0xc86: 0x000c, 0xc87: 0x000c, + 0xc8d: 0x000c, 0xc8e: 0x000c, 0xc8f: 0x000c, 0xc90: 0x000c, 0xc91: 0x000c, + 0xc92: 0x000c, 0xc93: 0x000c, 0xc94: 0x000c, 0xc95: 0x000c, 0xc96: 0x000c, 0xc97: 0x000c, + 0xc99: 0x000c, 0xc9a: 0x000c, 0xc9b: 0x000c, 0xc9c: 0x000c, 0xc9d: 0x000c, + 0xc9e: 0x000c, 0xc9f: 0x000c, 0xca0: 0x000c, 0xca1: 0x000c, 0xca2: 0x000c, 0xca3: 0x000c, + 0xca4: 0x000c, 0xca5: 0x000c, 0xca6: 0x000c, 0xca7: 0x000c, 0xca8: 0x000c, 0xca9: 0x000c, + 0xcaa: 0x000c, 0xcab: 0x000c, 0xcac: 0x000c, 0xcad: 0x000c, 0xcae: 0x000c, 0xcaf: 0x000c, + 0xcb0: 0x000c, 0xcb1: 0x000c, 0xcb2: 0x000c, 0xcb3: 0x000c, 0xcb4: 0x000c, 0xcb5: 0x000c, + 0xcb6: 0x000c, 0xcb7: 0x000c, 0xcb8: 0x000c, 0xcb9: 0x000c, 0xcba: 0x000c, 0xcbb: 0x000c, + 0xcbc: 0x000c, + // Block 0x33, offset 0xcc0 + 0xcc6: 0x000c, + // Block 0x34, offset 0xd00 + 0xd2d: 0x000c, 0xd2e: 0x000c, 0xd2f: 0x000c, + 0xd30: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, 0xd35: 0x000c, + 0xd36: 0x000c, 0xd37: 0x000c, 0xd39: 0x000c, 0xd3a: 0x000c, + 0xd3d: 0x000c, 0xd3e: 0x000c, + // Block 0x35, offset 0xd40 + 0xd58: 0x000c, 0xd59: 0x000c, + 0xd5e: 0x000c, 0xd5f: 0x000c, 0xd60: 0x000c, + 0xd71: 0x000c, 0xd72: 0x000c, 0xd73: 0x000c, 0xd74: 0x000c, + // Block 0x36, offset 0xd80 + 0xd82: 0x000c, 0xd85: 0x000c, + 0xd86: 0x000c, + 0xd8d: 0x000c, + 0xd9d: 0x000c, + // Block 0x37, offset 0xdc0 + 0xddd: 0x000c, + 0xdde: 0x000c, 0xddf: 0x000c, + // Block 0x38, offset 0xe00 + 0xe10: 0x000a, 0xe11: 0x000a, + 0xe12: 0x000a, 0xe13: 0x000a, 0xe14: 0x000a, 0xe15: 0x000a, 0xe16: 0x000a, 0xe17: 0x000a, + 0xe18: 0x000a, 0xe19: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x000a, + // Block 0x3a, offset 0xe80 + 0xe80: 0x0009, + 0xe9b: 0x007a, 0xe9c: 0x006a, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, 0xed4: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, 0xef4: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf12: 0x000c, 0xf13: 0x000c, + 0xf32: 0x000c, 0xf33: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf74: 0x000c, 0xf75: 0x000c, + 0xf77: 0x000c, 0xf78: 0x000c, 0xf79: 0x000c, 0xf7a: 0x000c, 0xf7b: 0x000c, + 0xf7c: 0x000c, 0xf7d: 0x000c, + // Block 0x3e, offset 0xf80 + 0xf86: 0x000c, 0xf89: 0x000c, 0xf8a: 0x000c, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000c, 0xf8f: 0x000c, 0xf90: 0x000c, 0xf91: 0x000c, + 0xf92: 0x000c, 0xf93: 0x000c, + 0xf9b: 0x0004, 0xf9d: 0x000c, + 0xfb0: 0x000a, 0xfb1: 0x000a, 0xfb2: 0x000a, 0xfb3: 0x000a, 0xfb4: 0x000a, 0xfb5: 0x000a, + 0xfb6: 0x000a, 0xfb7: 0x000a, 0xfb8: 0x000a, 0xfb9: 0x000a, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x000a, 0xfc1: 0x000a, 0xfc2: 0x000a, 0xfc3: 0x000a, 0xfc4: 0x000a, 0xfc5: 0x000a, + 0xfc6: 0x000a, 0xfc7: 0x000a, 0xfc8: 0x000a, 0xfc9: 0x000a, 0xfca: 0x000a, 0xfcb: 0x000c, + 0xfcc: 0x000c, 0xfcd: 0x000c, 0xfce: 0x000b, + // Block 0x40, offset 0x1000 + 0x1005: 0x000c, + 0x1006: 0x000c, + 0x1029: 0x000c, + // Block 0x41, offset 0x1040 + 0x1060: 0x000c, 0x1061: 0x000c, 0x1062: 0x000c, + 0x1067: 0x000c, 0x1068: 0x000c, + 0x1072: 0x000c, + 0x1079: 0x000c, 0x107a: 0x000c, 0x107b: 0x000c, + // Block 0x42, offset 0x1080 + 0x1080: 0x000a, 0x1084: 0x000a, 0x1085: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10de: 0x000a, 0x10df: 0x000a, 0x10e0: 0x000a, 0x10e1: 0x000a, 0x10e2: 0x000a, 0x10e3: 0x000a, + 0x10e4: 0x000a, 0x10e5: 0x000a, 0x10e6: 0x000a, 0x10e7: 0x000a, 0x10e8: 0x000a, 0x10e9: 0x000a, + 0x10ea: 0x000a, 0x10eb: 0x000a, 0x10ec: 0x000a, 0x10ed: 0x000a, 0x10ee: 0x000a, 0x10ef: 0x000a, + 0x10f0: 0x000a, 0x10f1: 0x000a, 0x10f2: 0x000a, 0x10f3: 0x000a, 0x10f4: 0x000a, 0x10f5: 0x000a, + 0x10f6: 0x000a, 0x10f7: 0x000a, 0x10f8: 0x000a, 0x10f9: 0x000a, 0x10fa: 0x000a, 0x10fb: 0x000a, + 0x10fc: 0x000a, 0x10fd: 0x000a, 0x10fe: 0x000a, 0x10ff: 0x000a, + // Block 0x44, offset 0x1100 + 0x1117: 0x000c, + 0x1118: 0x000c, 0x111b: 0x000c, + // Block 0x45, offset 0x1140 + 0x1156: 0x000c, + 0x1158: 0x000c, 0x1159: 0x000c, 0x115a: 0x000c, 0x115b: 0x000c, 0x115c: 0x000c, 0x115d: 0x000c, + 0x115e: 0x000c, 0x1160: 0x000c, 0x1162: 0x000c, + 0x1165: 0x000c, 0x1166: 0x000c, 0x1167: 0x000c, 0x1168: 0x000c, 0x1169: 0x000c, + 0x116a: 0x000c, 0x116b: 0x000c, 0x116c: 0x000c, + 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117f: 0x000c, + // Block 0x46, offset 0x1180 + 0x11b0: 0x000c, 0x11b1: 0x000c, 0x11b2: 0x000c, 0x11b3: 0x000c, 0x11b4: 0x000c, 0x11b5: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, 0x11bb: 0x000c, + 0x11bc: 0x000c, 0x11bd: 0x000c, 0x11be: 0x000c, 0x11bf: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, 0x1202: 0x000c, 0x1203: 0x000c, + 0x1234: 0x000c, + 0x1236: 0x000c, 0x1237: 0x000c, 0x1238: 0x000c, 0x1239: 0x000c, 0x123a: 0x000c, + 0x123c: 0x000c, + // Block 0x49, offset 0x1240 + 0x1242: 0x000c, + 0x126b: 0x000c, 0x126c: 0x000c, 0x126d: 0x000c, 0x126e: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, 0x1272: 0x000c, 0x1273: 0x000c, + // Block 0x4a, offset 0x1280 + 0x1280: 0x000c, 0x1281: 0x000c, + 0x12a2: 0x000c, 0x12a3: 0x000c, + 0x12a4: 0x000c, 0x12a5: 0x000c, 0x12a8: 0x000c, 0x12a9: 0x000c, + 0x12ab: 0x000c, 0x12ac: 0x000c, 0x12ad: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12e6: 0x000c, 0x12e8: 0x000c, 0x12e9: 0x000c, + 0x12ed: 0x000c, 0x12ef: 0x000c, + 0x12f0: 0x000c, 0x12f1: 0x000c, + // Block 0x4c, offset 0x1300 + 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, + // Block 0x4d, offset 0x1340 + 0x1350: 0x000c, 0x1351: 0x000c, + 0x1352: 0x000c, 0x1354: 0x000c, 0x1355: 0x000c, 0x1356: 0x000c, 0x1357: 0x000c, + 0x1358: 0x000c, 0x1359: 0x000c, 0x135a: 0x000c, 0x135b: 0x000c, 0x135c: 0x000c, 0x135d: 0x000c, + 0x135e: 0x000c, 0x135f: 0x000c, 0x1360: 0x000c, 0x1362: 0x000c, 0x1363: 0x000c, + 0x1364: 0x000c, 0x1365: 0x000c, 0x1366: 0x000c, 0x1367: 0x000c, 0x1368: 0x000c, + 0x136d: 0x000c, + 0x1374: 0x000c, + 0x1378: 0x000c, 0x1379: 0x000c, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000c, 0x1381: 0x000c, 0x1382: 0x000c, 0x1383: 0x000c, 0x1384: 0x000c, 0x1385: 0x000c, + 0x1386: 0x000c, 0x1387: 0x000c, 0x1388: 0x000c, 0x1389: 0x000c, 0x138a: 0x000c, 0x138b: 0x000c, + 0x138c: 0x000c, 0x138d: 0x000c, 0x138e: 0x000c, 0x138f: 0x000c, 0x1390: 0x000c, 0x1391: 0x000c, + 0x1392: 0x000c, 0x1393: 0x000c, 0x1394: 0x000c, 0x1395: 0x000c, 0x1396: 0x000c, 0x1397: 0x000c, + 0x1398: 0x000c, 0x1399: 0x000c, 0x139a: 0x000c, 0x139b: 0x000c, 0x139c: 0x000c, 0x139d: 0x000c, + 0x139e: 0x000c, 0x139f: 0x000c, 0x13a0: 0x000c, 0x13a1: 0x000c, 0x13a2: 0x000c, 0x13a3: 0x000c, + 0x13a4: 0x000c, 0x13a5: 0x000c, 0x13a6: 0x000c, 0x13a7: 0x000c, 0x13a8: 0x000c, 0x13a9: 0x000c, + 0x13aa: 0x000c, 0x13ab: 0x000c, 0x13ac: 0x000c, 0x13ad: 0x000c, 0x13ae: 0x000c, 0x13af: 0x000c, + 0x13b0: 0x000c, 0x13b1: 0x000c, 0x13b2: 0x000c, 0x13b3: 0x000c, 0x13b4: 0x000c, 0x13b5: 0x000c, + 0x13b6: 0x000c, 0x13b7: 0x000c, 0x13b8: 0x000c, 0x13b9: 0x000c, 0x13bb: 0x000c, + 0x13bc: 0x000c, 0x13bd: 0x000c, 0x13be: 0x000c, 0x13bf: 0x000c, + // Block 0x4f, offset 0x13c0 + 0x13fd: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, + 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, + 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x000a, + 0x142d: 0x000a, 0x142e: 0x000a, 0x142f: 0x000a, + 0x143d: 0x000a, 0x143e: 0x000a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0009, 0x1441: 0x0009, 0x1442: 0x0009, 0x1443: 0x0009, 0x1444: 0x0009, 0x1445: 0x0009, + 0x1446: 0x0009, 0x1447: 0x0009, 0x1448: 0x0009, 0x1449: 0x0009, 0x144a: 0x0009, 0x144b: 0x000b, + 0x144c: 0x000b, 0x144d: 0x000b, 0x144f: 0x0001, 0x1450: 0x000a, 0x1451: 0x000a, + 0x1452: 0x000a, 0x1453: 0x000a, 0x1454: 0x000a, 0x1455: 0x000a, 0x1456: 0x000a, 0x1457: 0x000a, + 0x1458: 0x000a, 0x1459: 0x000a, 0x145a: 0x000a, 0x145b: 0x000a, 0x145c: 0x000a, 0x145d: 0x000a, + 0x145e: 0x000a, 0x145f: 0x000a, 0x1460: 0x000a, 0x1461: 0x000a, 0x1462: 0x000a, 0x1463: 0x000a, + 0x1464: 0x000a, 0x1465: 0x000a, 0x1466: 0x000a, 0x1467: 0x000a, 0x1468: 0x0009, 0x1469: 0x0007, + 0x146a: 0x000e, 0x146b: 0x000e, 0x146c: 0x000e, 0x146d: 0x000e, 0x146e: 0x000e, 0x146f: 0x0006, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x000a, + 0x1476: 0x000a, 0x1477: 0x000a, 0x1478: 0x000a, 0x1479: 0x000a, 0x147a: 0x000a, 0x147b: 0x000a, + 0x147c: 0x000a, 0x147d: 0x000a, 0x147e: 0x000a, 0x147f: 0x000a, + // Block 0x52, offset 0x1480 + 0x1480: 0x000a, 0x1481: 0x000a, 0x1482: 0x000a, 0x1483: 0x000a, 0x1484: 0x0006, 0x1485: 0x009a, + 0x1486: 0x008a, 0x1487: 0x000a, 0x1488: 0x000a, 0x1489: 0x000a, 0x148a: 0x000a, 0x148b: 0x000a, + 0x148c: 0x000a, 0x148d: 0x000a, 0x148e: 0x000a, 0x148f: 0x000a, 0x1490: 0x000a, 0x1491: 0x000a, + 0x1492: 0x000a, 0x1493: 0x000a, 0x1494: 0x000a, 0x1495: 0x000a, 0x1496: 0x000a, 0x1497: 0x000a, + 0x1498: 0x000a, 0x1499: 0x000a, 0x149a: 0x000a, 0x149b: 0x000a, 0x149c: 0x000a, 0x149d: 0x000a, + 0x149e: 0x000a, 0x149f: 0x0009, 0x14a0: 0x000b, 0x14a1: 0x000b, 0x14a2: 0x000b, 0x14a3: 0x000b, + 0x14a4: 0x000b, 0x14a5: 0x000b, 0x14a6: 0x000e, 0x14a7: 0x000e, 0x14a8: 0x000e, 0x14a9: 0x000e, + 0x14aa: 0x000b, 0x14ab: 0x000b, 0x14ac: 0x000b, 0x14ad: 0x000b, 0x14ae: 0x000b, 0x14af: 0x000b, + 0x14b0: 0x0002, 0x14b4: 0x0002, 0x14b5: 0x0002, + 0x14b6: 0x0002, 0x14b7: 0x0002, 0x14b8: 0x0002, 0x14b9: 0x0002, 0x14ba: 0x0003, 0x14bb: 0x0003, + 0x14bc: 0x000a, 0x14bd: 0x009a, 0x14be: 0x008a, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0002, 0x14c1: 0x0002, 0x14c2: 0x0002, 0x14c3: 0x0002, 0x14c4: 0x0002, 0x14c5: 0x0002, + 0x14c6: 0x0002, 0x14c7: 0x0002, 0x14c8: 0x0002, 0x14c9: 0x0002, 0x14ca: 0x0003, 0x14cb: 0x0003, + 0x14cc: 0x000a, 0x14cd: 0x009a, 0x14ce: 0x008a, + 0x14e0: 0x0004, 0x14e1: 0x0004, 0x14e2: 0x0004, 0x14e3: 0x0004, + 0x14e4: 0x0004, 0x14e5: 0x0004, 0x14e6: 0x0004, 0x14e7: 0x0004, 0x14e8: 0x0004, 0x14e9: 0x0004, + 0x14ea: 0x0004, 0x14eb: 0x0004, 0x14ec: 0x0004, 0x14ed: 0x0004, 0x14ee: 0x0004, 0x14ef: 0x0004, + 0x14f0: 0x0004, 0x14f1: 0x0004, 0x14f2: 0x0004, 0x14f3: 0x0004, 0x14f4: 0x0004, 0x14f5: 0x0004, + 0x14f6: 0x0004, 0x14f7: 0x0004, 0x14f8: 0x0004, 0x14f9: 0x0004, 0x14fa: 0x0004, 0x14fb: 0x0004, + 0x14fc: 0x0004, 0x14fd: 0x0004, 0x14fe: 0x0004, 0x14ff: 0x0004, + // Block 0x54, offset 0x1500 + 0x1500: 0x0004, 0x1501: 0x0004, 0x1502: 0x0004, 0x1503: 0x0004, 0x1504: 0x0004, 0x1505: 0x0004, + 0x1506: 0x0004, 0x1507: 0x0004, 0x1508: 0x0004, 0x1509: 0x0004, 0x150a: 0x0004, 0x150b: 0x0004, + 0x150c: 0x0004, 0x150d: 0x0004, 0x150e: 0x0004, 0x150f: 0x0004, 0x1510: 0x000c, 0x1511: 0x000c, + 0x1512: 0x000c, 0x1513: 0x000c, 0x1514: 0x000c, 0x1515: 0x000c, 0x1516: 0x000c, 0x1517: 0x000c, + 0x1518: 0x000c, 0x1519: 0x000c, 0x151a: 0x000c, 0x151b: 0x000c, 0x151c: 0x000c, 0x151d: 0x000c, + 0x151e: 0x000c, 0x151f: 0x000c, 0x1520: 0x000c, 0x1521: 0x000c, 0x1522: 0x000c, 0x1523: 0x000c, + 0x1524: 0x000c, 0x1525: 0x000c, 0x1526: 0x000c, 0x1527: 0x000c, 0x1528: 0x000c, 0x1529: 0x000c, + 0x152a: 0x000c, 0x152b: 0x000c, 0x152c: 0x000c, 0x152d: 0x000c, 0x152e: 0x000c, 0x152f: 0x000c, + 0x1530: 0x000c, + // Block 0x55, offset 0x1540 + 0x1540: 0x000a, 0x1541: 0x000a, 0x1543: 0x000a, 0x1544: 0x000a, 0x1545: 0x000a, + 0x1546: 0x000a, 0x1548: 0x000a, 0x1549: 0x000a, + 0x1554: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1565: 0x000a, 0x1567: 0x000a, 0x1569: 0x000a, + 0x156e: 0x0004, + 0x157a: 0x000a, 0x157b: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, + 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x000a, 0x15d3: 0x000a, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x000a, 0x1609: 0x000a, 0x160a: 0x000a, 0x160b: 0x000a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x000a, + 0x162a: 0x000a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + 0x1636: 0x000a, 0x1637: 0x000a, 0x1638: 0x000a, 0x1639: 0x000a, 0x163a: 0x000a, 0x163b: 0x000a, + 0x163c: 0x000a, 0x163d: 0x000a, 0x163e: 0x000a, 0x163f: 0x000a, + // Block 0x59, offset 0x1640 + 0x1640: 0x000a, 0x1641: 0x000a, 0x1642: 0x000a, 0x1643: 0x000a, 0x1644: 0x000a, 0x1645: 0x000a, + 0x1646: 0x000a, 0x1647: 0x000a, 0x1648: 0x000a, 0x1649: 0x000a, 0x164a: 0x000a, 0x164b: 0x000a, + 0x164c: 0x000a, 0x164d: 0x000a, 0x164e: 0x000a, 0x164f: 0x000a, 0x1650: 0x000a, 0x1651: 0x000a, + 0x1652: 0x0003, 0x1653: 0x0004, 0x1654: 0x000a, 0x1655: 0x000a, 0x1656: 0x000a, 0x1657: 0x000a, + 0x1658: 0x000a, 0x1659: 0x000a, 0x165a: 0x000a, 0x165b: 0x000a, 0x165c: 0x000a, 0x165d: 0x000a, + 0x165e: 0x000a, 0x165f: 0x000a, 0x1660: 0x000a, 0x1661: 0x000a, 0x1662: 0x000a, 0x1663: 0x000a, + 0x1664: 0x000a, 0x1665: 0x000a, 0x1666: 0x000a, 0x1667: 0x000a, 0x1668: 0x000a, 0x1669: 0x000a, + 0x166a: 0x000a, 0x166b: 0x000a, 0x166c: 0x000a, 0x166d: 0x000a, 0x166e: 0x000a, 0x166f: 0x000a, + 0x1670: 0x000a, 0x1671: 0x000a, 0x1672: 0x000a, 0x1673: 0x000a, 0x1674: 0x000a, 0x1675: 0x000a, + 0x1676: 0x000a, 0x1677: 0x000a, 0x1678: 0x000a, 0x1679: 0x000a, 0x167a: 0x000a, 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x003a, 0x1689: 0x002a, 0x168a: 0x003a, 0x168b: 0x002a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1695: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x009a, + 0x16aa: 0x008a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16fb: 0x000a, + 0x16fc: 0x000a, 0x16fd: 0x000a, 0x16fe: 0x000a, 0x16ff: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, 0x170b: 0x000a, + 0x170c: 0x000a, 0x170d: 0x000a, 0x170e: 0x000a, 0x170f: 0x000a, 0x1710: 0x000a, 0x1711: 0x000a, + 0x1712: 0x000a, 0x1713: 0x000a, 0x1714: 0x000a, 0x1716: 0x000a, 0x1717: 0x000a, + 0x1718: 0x000a, 0x1719: 0x000a, 0x171a: 0x000a, 0x171b: 0x000a, 0x171c: 0x000a, 0x171d: 0x000a, + 0x171e: 0x000a, 0x171f: 0x000a, 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a, + 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a, + 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a, + 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a, + 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x000a, 0x1749: 0x000a, 0x174a: 0x000a, 0x174b: 0x000a, + 0x174c: 0x000a, 0x174d: 0x000a, 0x174e: 0x000a, 0x174f: 0x000a, 0x1750: 0x000a, 0x1751: 0x000a, + 0x1752: 0x000a, 0x1753: 0x000a, 0x1754: 0x000a, 0x1755: 0x000a, 0x1756: 0x000a, 0x1757: 0x000a, + 0x1758: 0x000a, 0x1759: 0x000a, 0x175a: 0x000a, 0x175b: 0x000a, 0x175c: 0x000a, 0x175d: 0x000a, + 0x175e: 0x000a, 0x175f: 0x000a, 0x1760: 0x000a, 0x1761: 0x000a, 0x1762: 0x000a, 0x1763: 0x000a, + 0x1764: 0x000a, 0x1765: 0x000a, 0x1766: 0x000a, + // Block 0x5e, offset 0x1780 + 0x1780: 0x000a, 0x1781: 0x000a, 0x1782: 0x000a, 0x1783: 0x000a, 0x1784: 0x000a, 0x1785: 0x000a, + 0x1786: 0x000a, 0x1787: 0x000a, 0x1788: 0x000a, 0x1789: 0x000a, 0x178a: 0x000a, + 0x17a0: 0x000a, 0x17a1: 0x000a, 0x17a2: 0x000a, 0x17a3: 0x000a, + 0x17a4: 0x000a, 0x17a5: 0x000a, 0x17a6: 0x000a, 0x17a7: 0x000a, 0x17a8: 0x000a, 0x17a9: 0x000a, + 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a, + 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a, + 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a, + 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a, + 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x0002, 0x17c9: 0x0002, 0x17ca: 0x0002, 0x17cb: 0x0002, + 0x17cc: 0x0002, 0x17cd: 0x0002, 0x17ce: 0x0002, 0x17cf: 0x0002, 0x17d0: 0x0002, 0x17d1: 0x0002, + 0x17d2: 0x0002, 0x17d3: 0x0002, 0x17d4: 0x0002, 0x17d5: 0x0002, 0x17d6: 0x0002, 0x17d7: 0x0002, + 0x17d8: 0x0002, 0x17d9: 0x0002, 0x17da: 0x0002, 0x17db: 0x0002, + // Block 0x60, offset 0x1800 + 0x182a: 0x000a, 0x182b: 0x000a, 0x182c: 0x000a, 0x182d: 0x000a, 0x182e: 0x000a, 0x182f: 0x000a, + 0x1830: 0x000a, 0x1831: 0x000a, 0x1832: 0x000a, 0x1833: 0x000a, 0x1834: 0x000a, 0x1835: 0x000a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x000a, + 0x1846: 0x000a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x000a, 0x1867: 0x000a, 0x1868: 0x000a, 0x1869: 0x000a, + 0x186a: 0x000a, 0x186b: 0x000a, 0x186d: 0x000a, 0x186e: 0x000a, 0x186f: 0x000a, + 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x000a, 0x1884: 0x000a, 0x1885: 0x000a, + 0x1886: 0x000a, 0x1887: 0x000a, 0x1888: 0x000a, 0x1889: 0x000a, 0x188a: 0x000a, 0x188b: 0x000a, + 0x188c: 0x000a, 0x188d: 0x000a, 0x188e: 0x000a, 0x188f: 0x000a, 0x1890: 0x000a, 0x1891: 0x000a, + 0x1892: 0x000a, 0x1893: 0x000a, 0x1894: 0x000a, 0x1895: 0x000a, 0x1896: 0x000a, 0x1897: 0x000a, + 0x1898: 0x000a, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x003a, 0x18a9: 0x002a, + 0x18aa: 0x003a, 0x18ab: 0x002a, 0x18ac: 0x003a, 0x18ad: 0x002a, 0x18ae: 0x003a, 0x18af: 0x002a, + 0x18b0: 0x003a, 0x18b1: 0x002a, 0x18b2: 0x003a, 0x18b3: 0x002a, 0x18b4: 0x003a, 0x18b5: 0x002a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x009a, + 0x18c6: 0x008a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a, + 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a, + 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a, + 0x18d8: 0x000a, 0x18d9: 0x000a, 0x18da: 0x000a, 0x18db: 0x000a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x003a, 0x18e7: 0x002a, 0x18e8: 0x003a, 0x18e9: 0x002a, + 0x18ea: 0x003a, 0x18eb: 0x002a, 0x18ec: 0x003a, 0x18ed: 0x002a, 0x18ee: 0x003a, 0x18ef: 0x002a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x000a, 0x18fd: 0x000a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x007a, 0x1904: 0x006a, 0x1905: 0x009a, + 0x1906: 0x008a, 0x1907: 0x00ba, 0x1908: 0x00aa, 0x1909: 0x009a, 0x190a: 0x008a, 0x190b: 0x007a, + 0x190c: 0x006a, 0x190d: 0x00da, 0x190e: 0x002a, 0x190f: 0x003a, 0x1910: 0x00ca, 0x1911: 0x009a, + 0x1912: 0x008a, 0x1913: 0x007a, 0x1914: 0x006a, 0x1915: 0x009a, 0x1916: 0x008a, 0x1917: 0x00ba, + 0x1918: 0x00aa, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, 0x1934: 0x000a, 0x1935: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, 0x1956: 0x000a, 0x1957: 0x000a, + 0x1958: 0x003a, 0x1959: 0x002a, 0x195a: 0x003a, 0x195b: 0x002a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x003a, 0x197d: 0x002a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x1989: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1996: 0x000a, 0x1997: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a, + 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x000a, 0x19c1: 0x000a, 0x19c2: 0x000a, 0x19c3: 0x000a, 0x19c4: 0x000a, 0x19c5: 0x000a, + 0x19c6: 0x000a, 0x19c7: 0x000a, 0x19c8: 0x000a, 0x19c9: 0x000a, 0x19ca: 0x000a, 0x19cb: 0x000a, + 0x19cc: 0x000a, 0x19cd: 0x000a, 0x19ce: 0x000a, 0x19cf: 0x000a, 0x19d0: 0x000a, 0x19d1: 0x000a, + 0x19d2: 0x000a, 0x19d3: 0x000a, 0x19d4: 0x000a, 0x19d5: 0x000a, 0x19d7: 0x000a, + 0x19d8: 0x000a, 0x19d9: 0x000a, 0x19da: 0x000a, 0x19db: 0x000a, 0x19dc: 0x000a, 0x19dd: 0x000a, + 0x19de: 0x000a, 0x19df: 0x000a, 0x19e0: 0x000a, 0x19e1: 0x000a, 0x19e2: 0x000a, 0x19e3: 0x000a, + 0x19e4: 0x000a, 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a, + 0x19ea: 0x000a, 0x19eb: 0x000a, 0x19ec: 0x000a, 0x19ed: 0x000a, 0x19ee: 0x000a, 0x19ef: 0x000a, + 0x19f0: 0x000a, 0x19f1: 0x000a, 0x19f2: 0x000a, 0x19f3: 0x000a, 0x19f4: 0x000a, 0x19f5: 0x000a, + 0x19f6: 0x000a, 0x19f7: 0x000a, 0x19f8: 0x000a, 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a, + 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a25: 0x000a, 0x1a26: 0x000a, 0x1a27: 0x000a, 0x1a28: 0x000a, 0x1a29: 0x000a, + 0x1a2a: 0x000a, 0x1a2f: 0x000c, + 0x1a30: 0x000c, 0x1a31: 0x000c, + 0x1a39: 0x000a, 0x1a3a: 0x000a, 0x1a3b: 0x000a, + 0x1a3c: 0x000a, 0x1a3d: 0x000a, 0x1a3e: 0x000a, 0x1a3f: 0x000a, + // Block 0x69, offset 0x1a40 + 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1aa0: 0x000c, 0x1aa1: 0x000c, 0x1aa2: 0x000c, 0x1aa3: 0x000c, + 0x1aa4: 0x000c, 0x1aa5: 0x000c, 0x1aa6: 0x000c, 0x1aa7: 0x000c, 0x1aa8: 0x000c, 0x1aa9: 0x000c, + 0x1aaa: 0x000c, 0x1aab: 0x000c, 0x1aac: 0x000c, 0x1aad: 0x000c, 0x1aae: 0x000c, 0x1aaf: 0x000c, + 0x1ab0: 0x000c, 0x1ab1: 0x000c, 0x1ab2: 0x000c, 0x1ab3: 0x000c, 0x1ab4: 0x000c, 0x1ab5: 0x000c, + 0x1ab6: 0x000c, 0x1ab7: 0x000c, 0x1ab8: 0x000c, 0x1ab9: 0x000c, 0x1aba: 0x000c, 0x1abb: 0x000c, + 0x1abc: 0x000c, 0x1abd: 0x000c, 0x1abe: 0x000c, 0x1abf: 0x000c, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, + 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x000a, 0x1ad6: 0x000a, 0x1ad7: 0x000a, + 0x1ad8: 0x000a, 0x1ad9: 0x000a, 0x1ada: 0x000a, 0x1adb: 0x000a, 0x1adc: 0x000a, 0x1add: 0x000a, + 0x1ade: 0x000a, 0x1adf: 0x000a, 0x1ae0: 0x000a, 0x1ae1: 0x000a, 0x1ae2: 0x003a, 0x1ae3: 0x002a, + 0x1ae4: 0x003a, 0x1ae5: 0x002a, 0x1ae6: 0x003a, 0x1ae7: 0x002a, 0x1ae8: 0x003a, 0x1ae9: 0x002a, + 0x1aea: 0x000a, 0x1aeb: 0x000a, 0x1aec: 0x000a, 0x1aed: 0x000a, 0x1aee: 0x000a, 0x1aef: 0x000a, + 0x1af0: 0x000a, 0x1af1: 0x000a, 0x1af2: 0x000a, 0x1af3: 0x000a, 0x1af4: 0x000a, 0x1af5: 0x000a, + 0x1af6: 0x000a, 0x1af7: 0x000a, 0x1af8: 0x000a, 0x1af9: 0x000a, 0x1afa: 0x000a, 0x1afb: 0x000a, + 0x1afc: 0x000a, 0x1afd: 0x000a, 0x1afe: 0x000a, 0x1aff: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, 0x1b74: 0x000a, 0x1b75: 0x000a, + 0x1b76: 0x000a, 0x1b77: 0x000a, 0x1b78: 0x000a, 0x1b79: 0x000a, 0x1b7a: 0x000a, 0x1b7b: 0x000a, + 0x1b7c: 0x000a, 0x1b7d: 0x000a, 0x1b7e: 0x000a, 0x1b7f: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, 0x1b96: 0x000a, 0x1b97: 0x000a, + 0x1b98: 0x000a, 0x1b99: 0x000a, 0x1b9a: 0x000a, 0x1b9b: 0x000a, 0x1b9c: 0x000a, 0x1b9d: 0x000a, + 0x1b9e: 0x000a, 0x1b9f: 0x000a, 0x1ba0: 0x000a, 0x1ba1: 0x000a, 0x1ba2: 0x000a, 0x1ba3: 0x000a, + 0x1ba4: 0x000a, 0x1ba5: 0x000a, 0x1ba6: 0x000a, 0x1ba7: 0x000a, 0x1ba8: 0x000a, 0x1ba9: 0x000a, + 0x1baa: 0x000a, 0x1bab: 0x000a, 0x1bac: 0x000a, 0x1bad: 0x000a, 0x1bae: 0x000a, 0x1baf: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x000a, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, 0x1bc5: 0x000a, + 0x1bc6: 0x000a, 0x1bc7: 0x000a, 0x1bc8: 0x000a, 0x1bc9: 0x000a, 0x1bca: 0x000a, 0x1bcb: 0x000a, + 0x1bcc: 0x000a, 0x1bcd: 0x000a, 0x1bce: 0x000a, 0x1bcf: 0x000a, 0x1bd0: 0x000a, 0x1bd1: 0x000a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x000a, 0x1bd5: 0x000a, + 0x1bf0: 0x000a, 0x1bf1: 0x000a, 0x1bf2: 0x000a, 0x1bf3: 0x000a, 0x1bf4: 0x000a, 0x1bf5: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, 0x1bf8: 0x000a, 0x1bf9: 0x000a, 0x1bfa: 0x000a, 0x1bfb: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x0009, 0x1c01: 0x000a, 0x1c02: 0x000a, 0x1c03: 0x000a, 0x1c04: 0x000a, + 0x1c08: 0x003a, 0x1c09: 0x002a, 0x1c0a: 0x003a, 0x1c0b: 0x002a, + 0x1c0c: 0x003a, 0x1c0d: 0x002a, 0x1c0e: 0x003a, 0x1c0f: 0x002a, 0x1c10: 0x003a, 0x1c11: 0x002a, + 0x1c12: 0x000a, 0x1c13: 0x000a, 0x1c14: 0x003a, 0x1c15: 0x002a, 0x1c16: 0x003a, 0x1c17: 0x002a, + 0x1c18: 0x003a, 0x1c19: 0x002a, 0x1c1a: 0x003a, 0x1c1b: 0x002a, 0x1c1c: 0x000a, 0x1c1d: 0x000a, + 0x1c1e: 0x000a, 0x1c1f: 0x000a, 0x1c20: 0x000a, + 0x1c2a: 0x000c, 0x1c2b: 0x000c, 0x1c2c: 0x000c, 0x1c2d: 0x000c, + 0x1c30: 0x000a, + 0x1c36: 0x000a, 0x1c37: 0x000a, + 0x1c3d: 0x000a, 0x1c3e: 0x000a, 0x1c3f: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c59: 0x000c, 0x1c5a: 0x000c, 0x1c5b: 0x000a, 0x1c5c: 0x000a, + 0x1c60: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1cbb: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x000a, 0x1cc1: 0x000a, 0x1cc2: 0x000a, 0x1cc3: 0x000a, 0x1cc4: 0x000a, 0x1cc5: 0x000a, + 0x1cc6: 0x000a, 0x1cc7: 0x000a, 0x1cc8: 0x000a, 0x1cc9: 0x000a, 0x1cca: 0x000a, 0x1ccb: 0x000a, + 0x1ccc: 0x000a, 0x1ccd: 0x000a, 0x1cce: 0x000a, 0x1ccf: 0x000a, 0x1cd0: 0x000a, 0x1cd1: 0x000a, + 0x1cd2: 0x000a, 0x1cd3: 0x000a, 0x1cd4: 0x000a, 0x1cd5: 0x000a, 0x1cd6: 0x000a, 0x1cd7: 0x000a, + 0x1cd8: 0x000a, 0x1cd9: 0x000a, 0x1cda: 0x000a, 0x1cdb: 0x000a, 0x1cdc: 0x000a, 0x1cdd: 0x000a, + 0x1cde: 0x000a, 0x1cdf: 0x000a, 0x1ce0: 0x000a, 0x1ce1: 0x000a, 0x1ce2: 0x000a, 0x1ce3: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d1d: 0x000a, + 0x1d1e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d50: 0x000a, 0x1d51: 0x000a, + 0x1d52: 0x000a, 0x1d53: 0x000a, 0x1d54: 0x000a, 0x1d55: 0x000a, 0x1d56: 0x000a, 0x1d57: 0x000a, + 0x1d58: 0x000a, 0x1d59: 0x000a, 0x1d5a: 0x000a, 0x1d5b: 0x000a, 0x1d5c: 0x000a, 0x1d5d: 0x000a, + 0x1d5e: 0x000a, 0x1d5f: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1db1: 0x000a, 0x1db2: 0x000a, 0x1db3: 0x000a, 0x1db4: 0x000a, 0x1db5: 0x000a, + 0x1db6: 0x000a, 0x1db7: 0x000a, 0x1db8: 0x000a, 0x1db9: 0x000a, 0x1dba: 0x000a, 0x1dbb: 0x000a, + 0x1dbc: 0x000a, 0x1dbd: 0x000a, 0x1dbe: 0x000a, 0x1dbf: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1dcc: 0x000a, 0x1dcd: 0x000a, 0x1dce: 0x000a, 0x1dcf: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e37: 0x000a, 0x1e38: 0x000a, 0x1e39: 0x000a, 0x1e3a: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e5e: 0x000a, 0x1e5f: 0x000a, + 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e90: 0x000a, 0x1e91: 0x000a, + 0x1e92: 0x000a, 0x1e93: 0x000a, 0x1e94: 0x000a, 0x1e95: 0x000a, 0x1e96: 0x000a, 0x1e97: 0x000a, + 0x1e98: 0x000a, 0x1e99: 0x000a, 0x1e9a: 0x000a, 0x1e9b: 0x000a, 0x1e9c: 0x000a, 0x1e9d: 0x000a, + 0x1e9e: 0x000a, 0x1e9f: 0x000a, 0x1ea0: 0x000a, 0x1ea1: 0x000a, 0x1ea2: 0x000a, 0x1ea3: 0x000a, + 0x1ea4: 0x000a, 0x1ea5: 0x000a, 0x1ea6: 0x000a, 0x1ea7: 0x000a, 0x1ea8: 0x000a, 0x1ea9: 0x000a, + 0x1eaa: 0x000a, 0x1eab: 0x000a, 0x1eac: 0x000a, 0x1ead: 0x000a, 0x1eae: 0x000a, 0x1eaf: 0x000a, + 0x1eb0: 0x000a, 0x1eb1: 0x000a, 0x1eb2: 0x000a, 0x1eb3: 0x000a, 0x1eb4: 0x000a, 0x1eb5: 0x000a, + 0x1eb6: 0x000a, 0x1eb7: 0x000a, 0x1eb8: 0x000a, 0x1eb9: 0x000a, 0x1eba: 0x000a, 0x1ebb: 0x000a, + 0x1ebc: 0x000a, 0x1ebd: 0x000a, 0x1ebe: 0x000a, 0x1ebf: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0x000a, 0x1ec1: 0x000a, 0x1ec2: 0x000a, 0x1ec3: 0x000a, 0x1ec4: 0x000a, 0x1ec5: 0x000a, + 0x1ec6: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f0d: 0x000a, 0x1f0e: 0x000a, 0x1f0f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f6f: 0x000c, + 0x1f70: 0x000c, 0x1f71: 0x000c, 0x1f72: 0x000c, 0x1f73: 0x000a, 0x1f74: 0x000c, 0x1f75: 0x000c, + 0x1f76: 0x000c, 0x1f77: 0x000c, 0x1f78: 0x000c, 0x1f79: 0x000c, 0x1f7a: 0x000c, 0x1f7b: 0x000c, + 0x1f7c: 0x000c, 0x1f7d: 0x000c, 0x1f7e: 0x000a, 0x1f7f: 0x000a, + // Block 0x7e, offset 0x1f80 + 0x1f9e: 0x000c, 0x1f9f: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1ff0: 0x000c, 0x1ff1: 0x000c, + // Block 0x80, offset 0x2000 + 0x2000: 0x000a, 0x2001: 0x000a, 0x2002: 0x000a, 0x2003: 0x000a, 0x2004: 0x000a, 0x2005: 0x000a, + 0x2006: 0x000a, 0x2007: 0x000a, 0x2008: 0x000a, 0x2009: 0x000a, 0x200a: 0x000a, 0x200b: 0x000a, + 0x200c: 0x000a, 0x200d: 0x000a, 0x200e: 0x000a, 0x200f: 0x000a, 0x2010: 0x000a, 0x2011: 0x000a, + 0x2012: 0x000a, 0x2013: 0x000a, 0x2014: 0x000a, 0x2015: 0x000a, 0x2016: 0x000a, 0x2017: 0x000a, + 0x2018: 0x000a, 0x2019: 0x000a, 0x201a: 0x000a, 0x201b: 0x000a, 0x201c: 0x000a, 0x201d: 0x000a, + 0x201e: 0x000a, 0x201f: 0x000a, 0x2020: 0x000a, 0x2021: 0x000a, + // Block 0x81, offset 0x2040 + 0x2048: 0x000a, + // Block 0x82, offset 0x2080 + 0x2082: 0x000c, + 0x2086: 0x000c, 0x208b: 0x000c, + 0x20a5: 0x000c, 0x20a6: 0x000c, 0x20a8: 0x000a, 0x20a9: 0x000a, + 0x20aa: 0x000a, 0x20ab: 0x000a, 0x20ac: 0x000c, + 0x20b8: 0x0004, 0x20b9: 0x0004, + // Block 0x83, offset 0x20c0 + 0x20f4: 0x000a, 0x20f5: 0x000a, + 0x20f6: 0x000a, 0x20f7: 0x000a, + // Block 0x84, offset 0x2100 + 0x2104: 0x000c, 0x2105: 0x000c, + 0x2120: 0x000c, 0x2121: 0x000c, 0x2122: 0x000c, 0x2123: 0x000c, + 0x2124: 0x000c, 0x2125: 0x000c, 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, 0x212e: 0x000c, 0x212f: 0x000c, + 0x2130: 0x000c, 0x2131: 0x000c, + 0x213f: 0x000c, + // Block 0x85, offset 0x2140 + 0x2166: 0x000c, 0x2167: 0x000c, 0x2168: 0x000c, 0x2169: 0x000c, + 0x216a: 0x000c, 0x216b: 0x000c, 0x216c: 0x000c, 0x216d: 0x000c, + // Block 0x86, offset 0x2180 + 0x2187: 0x000c, 0x2188: 0x000c, 0x2189: 0x000c, 0x218a: 0x000c, 0x218b: 0x000c, + 0x218c: 0x000c, 0x218d: 0x000c, 0x218e: 0x000c, 0x218f: 0x000c, 0x2190: 0x000c, 0x2191: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21c0: 0x000c, 0x21c1: 0x000c, 0x21c2: 0x000c, + 0x21f3: 0x000c, + 0x21f6: 0x000c, 0x21f7: 0x000c, 0x21f8: 0x000c, 0x21f9: 0x000c, + 0x21fc: 0x000c, 0x21fd: 0x000c, + // Block 0x88, offset 0x2200 + 0x2225: 0x000c, + // Block 0x89, offset 0x2240 + 0x2269: 0x000c, + 0x226a: 0x000c, 0x226b: 0x000c, 0x226c: 0x000c, 0x226d: 0x000c, 0x226e: 0x000c, + 0x2271: 0x000c, 0x2272: 0x000c, 0x2275: 0x000c, + 0x2276: 0x000c, + // Block 0x8a, offset 0x2280 + 0x2283: 0x000c, + 0x228c: 0x000c, + 0x22bc: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22f0: 0x000c, 0x22f2: 0x000c, 0x22f3: 0x000c, 0x22f4: 0x000c, + 0x22f7: 0x000c, 0x22f8: 0x000c, + 0x22fe: 0x000c, 0x22ff: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2301: 0x000c, + 0x232c: 0x000c, 0x232d: 0x000c, + 0x2336: 0x000c, + // Block 0x8d, offset 0x2340 + 0x236a: 0x000a, 0x236b: 0x000a, + // Block 0x8e, offset 0x2380 + 0x23a5: 0x000c, 0x23a8: 0x000c, + 0x23ad: 0x000c, + // Block 0x8f, offset 0x23c0 + 0x23dd: 0x0001, + 0x23de: 0x000c, 0x23df: 0x0001, 0x23e0: 0x0001, 0x23e1: 0x0001, 0x23e2: 0x0001, 0x23e3: 0x0001, + 0x23e4: 0x0001, 0x23e5: 0x0001, 0x23e6: 0x0001, 0x23e7: 0x0001, 0x23e8: 0x0001, 0x23e9: 0x0003, + 0x23ea: 0x0001, 0x23eb: 0x0001, 0x23ec: 0x0001, 0x23ed: 0x0001, 0x23ee: 0x0001, 0x23ef: 0x0001, + 0x23f0: 0x0001, 0x23f1: 0x0001, 0x23f2: 0x0001, 0x23f3: 0x0001, 0x23f4: 0x0001, 0x23f5: 0x0001, + 0x23f6: 0x0001, 0x23f7: 0x0001, 0x23f8: 0x0001, 0x23f9: 0x0001, 0x23fa: 0x0001, 0x23fb: 0x0001, + 0x23fc: 0x0001, 0x23fd: 0x0001, 0x23fe: 0x0001, 0x23ff: 0x0001, + // Block 0x90, offset 0x2400 + 0x2400: 0x0001, 0x2401: 0x0001, 0x2402: 0x0001, 0x2403: 0x0001, 0x2404: 0x0001, 0x2405: 0x0001, + 0x2406: 0x0001, 0x2407: 0x0001, 0x2408: 0x0001, 0x2409: 0x0001, 0x240a: 0x0001, 0x240b: 0x0001, + 0x240c: 0x0001, 0x240d: 0x0001, 0x240e: 0x0001, 0x240f: 0x0001, 0x2410: 0x000d, 0x2411: 0x000d, + 0x2412: 0x000d, 0x2413: 0x000d, 0x2414: 0x000d, 0x2415: 0x000d, 0x2416: 0x000d, 0x2417: 0x000d, + 0x2418: 0x000d, 0x2419: 0x000d, 0x241a: 0x000d, 0x241b: 0x000d, 0x241c: 0x000d, 0x241d: 0x000d, + 0x241e: 0x000d, 0x241f: 0x000d, 0x2420: 0x000d, 0x2421: 0x000d, 0x2422: 0x000d, 0x2423: 0x000d, + 0x2424: 0x000d, 0x2425: 0x000d, 0x2426: 0x000d, 0x2427: 0x000d, 0x2428: 0x000d, 0x2429: 0x000d, + 0x242a: 0x000d, 0x242b: 0x000d, 0x242c: 0x000d, 0x242d: 0x000d, 0x242e: 0x000d, 0x242f: 0x000d, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000d, 0x243e: 0x000d, 0x243f: 0x000d, + // Block 0x91, offset 0x2440 + 0x2440: 0x000d, 0x2441: 0x000d, 0x2442: 0x000d, 0x2443: 0x000d, 0x2444: 0x000d, 0x2445: 0x000d, + 0x2446: 0x000d, 0x2447: 0x000d, 0x2448: 0x000d, 0x2449: 0x000d, 0x244a: 0x000d, 0x244b: 0x000d, + 0x244c: 0x000d, 0x244d: 0x000d, 0x244e: 0x000d, 0x244f: 0x000d, 0x2450: 0x000d, 0x2451: 0x000d, + 0x2452: 0x000d, 0x2453: 0x000d, 0x2454: 0x000d, 0x2455: 0x000d, 0x2456: 0x000d, 0x2457: 0x000d, + 0x2458: 0x000d, 0x2459: 0x000d, 0x245a: 0x000d, 0x245b: 0x000d, 0x245c: 0x000d, 0x245d: 0x000d, + 0x245e: 0x000d, 0x245f: 0x000d, 0x2460: 0x000d, 0x2461: 0x000d, 0x2462: 0x000d, 0x2463: 0x000d, + 0x2464: 0x000d, 0x2465: 0x000d, 0x2466: 0x000d, 0x2467: 0x000d, 0x2468: 0x000d, 0x2469: 0x000d, + 0x246a: 0x000d, 0x246b: 0x000d, 0x246c: 0x000d, 0x246d: 0x000d, 0x246e: 0x000d, 0x246f: 0x000d, + 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, + 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, + 0x247c: 0x000d, 0x247d: 0x000d, 0x247e: 0x000a, 0x247f: 0x000a, + // Block 0x92, offset 0x2480 + 0x2480: 0x000d, 0x2481: 0x000d, 0x2482: 0x000d, 0x2483: 0x000d, 0x2484: 0x000d, 0x2485: 0x000d, + 0x2486: 0x000d, 0x2487: 0x000d, 0x2488: 0x000d, 0x2489: 0x000d, 0x248a: 0x000d, 0x248b: 0x000d, + 0x248c: 0x000d, 0x248d: 0x000d, 0x248e: 0x000d, 0x248f: 0x000d, 0x2490: 0x000b, 0x2491: 0x000b, + 0x2492: 0x000b, 0x2493: 0x000b, 0x2494: 0x000b, 0x2495: 0x000b, 0x2496: 0x000b, 0x2497: 0x000b, + 0x2498: 0x000b, 0x2499: 0x000b, 0x249a: 0x000b, 0x249b: 0x000b, 0x249c: 0x000b, 0x249d: 0x000b, + 0x249e: 0x000b, 0x249f: 0x000b, 0x24a0: 0x000b, 0x24a1: 0x000b, 0x24a2: 0x000b, 0x24a3: 0x000b, + 0x24a4: 0x000b, 0x24a5: 0x000b, 0x24a6: 0x000b, 0x24a7: 0x000b, 0x24a8: 0x000b, 0x24a9: 0x000b, + 0x24aa: 0x000b, 0x24ab: 0x000b, 0x24ac: 0x000b, 0x24ad: 0x000b, 0x24ae: 0x000b, 0x24af: 0x000b, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000a, 0x24be: 0x000d, 0x24bf: 0x000d, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000c, 0x24c1: 0x000c, 0x24c2: 0x000c, 0x24c3: 0x000c, 0x24c4: 0x000c, 0x24c5: 0x000c, + 0x24c6: 0x000c, 0x24c7: 0x000c, 0x24c8: 0x000c, 0x24c9: 0x000c, 0x24ca: 0x000c, 0x24cb: 0x000c, + 0x24cc: 0x000c, 0x24cd: 0x000c, 0x24ce: 0x000c, 0x24cf: 0x000c, 0x24d0: 0x000a, 0x24d1: 0x000a, + 0x24d2: 0x000a, 0x24d3: 0x000a, 0x24d4: 0x000a, 0x24d5: 0x000a, 0x24d6: 0x000a, 0x24d7: 0x000a, + 0x24d8: 0x000a, 0x24d9: 0x000a, + 0x24e0: 0x000c, 0x24e1: 0x000c, 0x24e2: 0x000c, 0x24e3: 0x000c, + 0x24e4: 0x000c, 0x24e5: 0x000c, 0x24e6: 0x000c, 0x24e7: 0x000c, 0x24e8: 0x000c, 0x24e9: 0x000c, + 0x24ea: 0x000c, 0x24eb: 0x000c, 0x24ec: 0x000c, 0x24ed: 0x000c, 0x24ee: 0x000c, 0x24ef: 0x000c, + 0x24f0: 0x000a, 0x24f1: 0x000a, 0x24f2: 0x000a, 0x24f3: 0x000a, 0x24f4: 0x000a, 0x24f5: 0x000a, + 0x24f6: 0x000a, 0x24f7: 0x000a, 0x24f8: 0x000a, 0x24f9: 0x000a, 0x24fa: 0x000a, 0x24fb: 0x000a, + 0x24fc: 0x000a, 0x24fd: 0x000a, 0x24fe: 0x000a, 0x24ff: 0x000a, + // Block 0x94, offset 0x2500 + 0x2500: 0x000a, 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x000a, 0x2504: 0x000a, 0x2505: 0x000a, + 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x000a, 0x2509: 0x000a, 0x250a: 0x000a, 0x250b: 0x000a, + 0x250c: 0x000a, 0x250d: 0x000a, 0x250e: 0x000a, 0x250f: 0x000a, 0x2510: 0x0006, 0x2511: 0x000a, + 0x2512: 0x0006, 0x2514: 0x000a, 0x2515: 0x0006, 0x2516: 0x000a, 0x2517: 0x000a, + 0x2518: 0x000a, 0x2519: 0x009a, 0x251a: 0x008a, 0x251b: 0x007a, 0x251c: 0x006a, 0x251d: 0x009a, + 0x251e: 0x008a, 0x251f: 0x0004, 0x2520: 0x000a, 0x2521: 0x000a, 0x2522: 0x0003, 0x2523: 0x0003, + 0x2524: 0x000a, 0x2525: 0x000a, 0x2526: 0x000a, 0x2528: 0x000a, 0x2529: 0x0004, + 0x252a: 0x0004, 0x252b: 0x000a, + 0x2530: 0x000d, 0x2531: 0x000d, 0x2532: 0x000d, 0x2533: 0x000d, 0x2534: 0x000d, 0x2535: 0x000d, + 0x2536: 0x000d, 0x2537: 0x000d, 0x2538: 0x000d, 0x2539: 0x000d, 0x253a: 0x000d, 0x253b: 0x000d, + 0x253c: 0x000d, 0x253d: 0x000d, 0x253e: 0x000d, 0x253f: 0x000d, + // Block 0x95, offset 0x2540 + 0x2540: 0x000d, 0x2541: 0x000d, 0x2542: 0x000d, 0x2543: 0x000d, 0x2544: 0x000d, 0x2545: 0x000d, + 0x2546: 0x000d, 0x2547: 0x000d, 0x2548: 0x000d, 0x2549: 0x000d, 0x254a: 0x000d, 0x254b: 0x000d, + 0x254c: 0x000d, 0x254d: 0x000d, 0x254e: 0x000d, 0x254f: 0x000d, 0x2550: 0x000d, 0x2551: 0x000d, + 0x2552: 0x000d, 0x2553: 0x000d, 0x2554: 0x000d, 0x2555: 0x000d, 0x2556: 0x000d, 0x2557: 0x000d, + 0x2558: 0x000d, 0x2559: 0x000d, 0x255a: 0x000d, 0x255b: 0x000d, 0x255c: 0x000d, 0x255d: 0x000d, + 0x255e: 0x000d, 0x255f: 0x000d, 0x2560: 0x000d, 0x2561: 0x000d, 0x2562: 0x000d, 0x2563: 0x000d, + 0x2564: 0x000d, 0x2565: 0x000d, 0x2566: 0x000d, 0x2567: 0x000d, 0x2568: 0x000d, 0x2569: 0x000d, + 0x256a: 0x000d, 0x256b: 0x000d, 0x256c: 0x000d, 0x256d: 0x000d, 0x256e: 0x000d, 0x256f: 0x000d, + 0x2570: 0x000d, 0x2571: 0x000d, 0x2572: 0x000d, 0x2573: 0x000d, 0x2574: 0x000d, 0x2575: 0x000d, + 0x2576: 0x000d, 0x2577: 0x000d, 0x2578: 0x000d, 0x2579: 0x000d, 0x257a: 0x000d, 0x257b: 0x000d, + 0x257c: 0x000d, 0x257d: 0x000d, 0x257e: 0x000d, 0x257f: 0x000b, + // Block 0x96, offset 0x2580 + 0x2581: 0x000a, 0x2582: 0x000a, 0x2583: 0x0004, 0x2584: 0x0004, 0x2585: 0x0004, + 0x2586: 0x000a, 0x2587: 0x000a, 0x2588: 0x003a, 0x2589: 0x002a, 0x258a: 0x000a, 0x258b: 0x0003, + 0x258c: 0x0006, 0x258d: 0x0003, 0x258e: 0x0006, 0x258f: 0x0006, 0x2590: 0x0002, 0x2591: 0x0002, + 0x2592: 0x0002, 0x2593: 0x0002, 0x2594: 0x0002, 0x2595: 0x0002, 0x2596: 0x0002, 0x2597: 0x0002, + 0x2598: 0x0002, 0x2599: 0x0002, 0x259a: 0x0006, 0x259b: 0x000a, 0x259c: 0x000a, 0x259d: 0x000a, + 0x259e: 0x000a, 0x259f: 0x000a, 0x25a0: 0x000a, + 0x25bb: 0x005a, + 0x25bc: 0x000a, 0x25bd: 0x004a, 0x25be: 0x000a, 0x25bf: 0x000a, + // Block 0x97, offset 0x25c0 + 0x25c0: 0x000a, + 0x25db: 0x005a, 0x25dc: 0x000a, 0x25dd: 0x004a, + 0x25de: 0x000a, 0x25df: 0x00fa, 0x25e0: 0x00ea, 0x25e1: 0x000a, 0x25e2: 0x003a, 0x25e3: 0x002a, + 0x25e4: 0x000a, 0x25e5: 0x000a, + // Block 0x98, offset 0x2600 + 0x2620: 0x0004, 0x2621: 0x0004, 0x2622: 0x000a, 0x2623: 0x000a, + 0x2624: 0x000a, 0x2625: 0x0004, 0x2626: 0x0004, 0x2628: 0x000a, 0x2629: 0x000a, + 0x262a: 0x000a, 0x262b: 0x000a, 0x262c: 0x000a, 0x262d: 0x000a, 0x262e: 0x000a, + 0x2630: 0x000b, 0x2631: 0x000b, 0x2632: 0x000b, 0x2633: 0x000b, 0x2634: 0x000b, 0x2635: 0x000b, + 0x2636: 0x000b, 0x2637: 0x000b, 0x2638: 0x000b, 0x2639: 0x000a, 0x263a: 0x000a, 0x263b: 0x000a, + 0x263c: 0x000a, 0x263d: 0x000a, 0x263e: 0x000b, 0x263f: 0x000b, + // Block 0x99, offset 0x2640 + 0x2641: 0x000a, + // Block 0x9a, offset 0x2680 + 0x2680: 0x000a, 0x2681: 0x000a, 0x2682: 0x000a, 0x2683: 0x000a, 0x2684: 0x000a, 0x2685: 0x000a, + 0x2686: 0x000a, 0x2687: 0x000a, 0x2688: 0x000a, 0x2689: 0x000a, 0x268a: 0x000a, 0x268b: 0x000a, + 0x268c: 0x000a, 0x2690: 0x000a, 0x2691: 0x000a, + 0x2692: 0x000a, 0x2693: 0x000a, 0x2694: 0x000a, 0x2695: 0x000a, 0x2696: 0x000a, 0x2697: 0x000a, + 0x2698: 0x000a, 0x2699: 0x000a, 0x269a: 0x000a, 0x269b: 0x000a, 0x269c: 0x000a, + 0x26a0: 0x000a, + // Block 0x9b, offset 0x26c0 + 0x26fd: 0x000c, + // Block 0x9c, offset 0x2700 + 0x2720: 0x000c, 0x2721: 0x0002, 0x2722: 0x0002, 0x2723: 0x0002, + 0x2724: 0x0002, 0x2725: 0x0002, 0x2726: 0x0002, 0x2727: 0x0002, 0x2728: 0x0002, 0x2729: 0x0002, + 0x272a: 0x0002, 0x272b: 0x0002, 0x272c: 0x0002, 0x272d: 0x0002, 0x272e: 0x0002, 0x272f: 0x0002, + 0x2730: 0x0002, 0x2731: 0x0002, 0x2732: 0x0002, 0x2733: 0x0002, 0x2734: 0x0002, 0x2735: 0x0002, + 0x2736: 0x0002, 0x2737: 0x0002, 0x2738: 0x0002, 0x2739: 0x0002, 0x273a: 0x0002, 0x273b: 0x0002, + // Block 0x9d, offset 0x2740 + 0x2776: 0x000c, 0x2777: 0x000c, 0x2778: 0x000c, 0x2779: 0x000c, 0x277a: 0x000c, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, + 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x000a, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x000c, 0x2802: 0x000c, 0x2803: 0x000c, 0x2804: 0x0001, 0x2805: 0x000c, + 0x2806: 0x000c, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x000c, 0x280d: 0x000c, 0x280e: 0x000c, 0x280f: 0x000c, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x000c, 0x2839: 0x000c, 0x283a: 0x000c, 0x283b: 0x0001, + 0x283c: 0x0001, 0x283d: 0x0001, 0x283e: 0x0001, 0x283f: 0x000c, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0001, 0x2861: 0x0001, 0x2862: 0x0001, 0x2863: 0x0001, + 0x2864: 0x0001, 0x2865: 0x000c, 0x2866: 0x000c, 0x2867: 0x0001, 0x2868: 0x0001, 0x2869: 0x0001, + 0x286a: 0x0001, 0x286b: 0x0001, 0x286c: 0x0001, 0x286d: 0x0001, 0x286e: 0x0001, 0x286f: 0x0001, + 0x2870: 0x0001, 0x2871: 0x0001, 0x2872: 0x0001, 0x2873: 0x0001, 0x2874: 0x0001, 0x2875: 0x0001, + 0x2876: 0x0001, 0x2877: 0x0001, 0x2878: 0x0001, 0x2879: 0x0001, 0x287a: 0x0001, 0x287b: 0x0001, + 0x287c: 0x0001, 0x287d: 0x0001, 0x287e: 0x0001, 0x287f: 0x0001, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0001, 0x28a1: 0x0001, 0x28a2: 0x0001, 0x28a3: 0x0001, + 0x28a4: 0x0001, 0x28a5: 0x0001, 0x28a6: 0x0001, 0x28a7: 0x0001, 0x28a8: 0x0001, 0x28a9: 0x0001, + 0x28aa: 0x0001, 0x28ab: 0x0001, 0x28ac: 0x0001, 0x28ad: 0x0001, 0x28ae: 0x0001, 0x28af: 0x0001, + 0x28b0: 0x0001, 0x28b1: 0x0001, 0x28b2: 0x0001, 0x28b3: 0x0001, 0x28b4: 0x0001, 0x28b5: 0x0001, + 0x28b6: 0x0001, 0x28b7: 0x0001, 0x28b8: 0x0001, 0x28b9: 0x000a, 0x28ba: 0x000a, 0x28bb: 0x000a, + 0x28bc: 0x000a, 0x28bd: 0x000a, 0x28be: 0x000a, 0x28bf: 0x000a, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x000d, 0x28c1: 0x000d, 0x28c2: 0x000d, 0x28c3: 0x000d, 0x28c4: 0x000d, 0x28c5: 0x000d, + 0x28c6: 0x000d, 0x28c7: 0x000d, 0x28c8: 0x000d, 0x28c9: 0x000d, 0x28ca: 0x000d, 0x28cb: 0x000d, + 0x28cc: 0x000d, 0x28cd: 0x000d, 0x28ce: 0x000d, 0x28cf: 0x000d, 0x28d0: 0x000d, 0x28d1: 0x000d, + 0x28d2: 0x000d, 0x28d3: 0x000d, 0x28d4: 0x000d, 0x28d5: 0x000d, 0x28d6: 0x000d, 0x28d7: 0x000d, + 0x28d8: 0x000d, 0x28d9: 0x000d, 0x28da: 0x000d, 0x28db: 0x000d, 0x28dc: 0x000d, 0x28dd: 0x000d, + 0x28de: 0x000d, 0x28df: 0x000d, 0x28e0: 0x000d, 0x28e1: 0x000d, 0x28e2: 0x000d, 0x28e3: 0x000d, + 0x28e4: 0x000c, 0x28e5: 0x000c, 0x28e6: 0x000c, 0x28e7: 0x000c, 0x28e8: 0x000d, 0x28e9: 0x000d, + 0x28ea: 0x000d, 0x28eb: 0x000d, 0x28ec: 0x000d, 0x28ed: 0x000d, 0x28ee: 0x000d, 0x28ef: 0x000d, + 0x28f0: 0x0005, 0x28f1: 0x0005, 0x28f2: 0x0005, 0x28f3: 0x0005, 0x28f4: 0x0005, 0x28f5: 0x0005, + 0x28f6: 0x0005, 0x28f7: 0x0005, 0x28f8: 0x0005, 0x28f9: 0x0005, 0x28fa: 0x000d, 0x28fb: 0x000d, + 0x28fc: 0x000d, 0x28fd: 0x000d, 0x28fe: 0x000d, 0x28ff: 0x000d, + // Block 0xa4, offset 0x2900 + 0x2900: 0x0001, 0x2901: 0x0001, 0x2902: 0x0001, 0x2903: 0x0001, 0x2904: 0x0001, 0x2905: 0x0001, + 0x2906: 0x0001, 0x2907: 0x0001, 0x2908: 0x0001, 0x2909: 0x0001, 0x290a: 0x0001, 0x290b: 0x0001, + 0x290c: 0x0001, 0x290d: 0x0001, 0x290e: 0x0001, 0x290f: 0x0001, 0x2910: 0x0001, 0x2911: 0x0001, + 0x2912: 0x0001, 0x2913: 0x0001, 0x2914: 0x0001, 0x2915: 0x0001, 0x2916: 0x0001, 0x2917: 0x0001, + 0x2918: 0x0001, 0x2919: 0x0001, 0x291a: 0x0001, 0x291b: 0x0001, 0x291c: 0x0001, 0x291d: 0x0001, + 0x291e: 0x0001, 0x291f: 0x0001, 0x2920: 0x0005, 0x2921: 0x0005, 0x2922: 0x0005, 0x2923: 0x0005, + 0x2924: 0x0005, 0x2925: 0x0005, 0x2926: 0x0005, 0x2927: 0x0005, 0x2928: 0x0005, 0x2929: 0x0005, + 0x292a: 0x0005, 0x292b: 0x0005, 0x292c: 0x0005, 0x292d: 0x0005, 0x292e: 0x0005, 0x292f: 0x0005, + 0x2930: 0x0005, 0x2931: 0x0005, 0x2932: 0x0005, 0x2933: 0x0005, 0x2934: 0x0005, 0x2935: 0x0005, + 0x2936: 0x0005, 0x2937: 0x0005, 0x2938: 0x0005, 0x2939: 0x0005, 0x293a: 0x0005, 0x293b: 0x0005, + 0x293c: 0x0005, 0x293d: 0x0005, 0x293e: 0x0005, 0x293f: 0x0001, + // Block 0xa5, offset 0x2940 + 0x2940: 0x0001, 0x2941: 0x0001, 0x2942: 0x0001, 0x2943: 0x0001, 0x2944: 0x0001, 0x2945: 0x0001, + 0x2946: 0x0001, 0x2947: 0x0001, 0x2948: 0x0001, 0x2949: 0x0001, 0x294a: 0x0001, 0x294b: 0x0001, + 0x294c: 0x0001, 0x294d: 0x0001, 0x294e: 0x0001, 0x294f: 0x0001, 0x2950: 0x0001, 0x2951: 0x0001, + 0x2952: 0x0001, 0x2953: 0x0001, 0x2954: 0x0001, 0x2955: 0x0001, 0x2956: 0x0001, 0x2957: 0x0001, + 0x2958: 0x0001, 0x2959: 0x0001, 0x295a: 0x0001, 0x295b: 0x0001, 0x295c: 0x0001, 0x295d: 0x0001, + 0x295e: 0x0001, 0x295f: 0x0001, 0x2960: 0x0001, 0x2961: 0x0001, 0x2962: 0x0001, 0x2963: 0x0001, + 0x2964: 0x0001, 0x2965: 0x0001, 0x2966: 0x0001, 0x2967: 0x0001, 0x2968: 0x0001, 0x2969: 0x0001, + 0x296a: 0x0001, 0x296b: 0x000c, 0x296c: 0x000c, 0x296d: 0x0001, 0x296e: 0x0001, 0x296f: 0x0001, + 0x2970: 0x0001, 0x2971: 0x0001, 0x2972: 0x0001, 0x2973: 0x0001, 0x2974: 0x0001, 0x2975: 0x0001, + 0x2976: 0x0001, 0x2977: 0x0001, 0x2978: 0x0001, 0x2979: 0x0001, 0x297a: 0x0001, 0x297b: 0x0001, + 0x297c: 0x0001, 0x297d: 0x0001, 0x297e: 0x0001, 0x297f: 0x0001, + // Block 0xa6, offset 0x2980 + 0x2980: 0x0001, 0x2981: 0x0001, 0x2982: 0x0001, 0x2983: 0x0001, 0x2984: 0x0001, 0x2985: 0x0001, + 0x2986: 0x0001, 0x2987: 0x0001, 0x2988: 0x0001, 0x2989: 0x0001, 0x298a: 0x0001, 0x298b: 0x0001, + 0x298c: 0x0001, 0x298d: 0x0001, 0x298e: 0x0001, 0x298f: 0x0001, 0x2990: 0x0001, 0x2991: 0x0001, + 0x2992: 0x0001, 0x2993: 0x0001, 0x2994: 0x0001, 0x2995: 0x0001, 0x2996: 0x0001, 0x2997: 0x0001, + 0x2998: 0x0001, 0x2999: 0x0001, 0x299a: 0x0001, 0x299b: 0x0001, 0x299c: 0x0001, 0x299d: 0x0001, + 0x299e: 0x0001, 0x299f: 0x0001, 0x29a0: 0x0001, 0x29a1: 0x0001, 0x29a2: 0x0001, 0x29a3: 0x0001, + 0x29a4: 0x0001, 0x29a5: 0x0001, 0x29a6: 0x0001, 0x29a7: 0x0001, 0x29a8: 0x0001, 0x29a9: 0x0001, + 0x29aa: 0x0001, 0x29ab: 0x0001, 0x29ac: 0x0001, 0x29ad: 0x0001, 0x29ae: 0x0001, 0x29af: 0x0001, + 0x29b0: 0x000d, 0x29b1: 0x000d, 0x29b2: 0x000d, 0x29b3: 0x000d, 0x29b4: 0x000d, 0x29b5: 0x000d, + 0x29b6: 0x000d, 0x29b7: 0x000d, 0x29b8: 0x000d, 0x29b9: 0x000d, 0x29ba: 0x000d, 0x29bb: 0x000d, + 0x29bc: 0x000d, 0x29bd: 0x000d, 0x29be: 0x000d, 0x29bf: 0x000d, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x000d, 0x29c1: 0x000d, 0x29c2: 0x000d, 0x29c3: 0x000d, 0x29c4: 0x000d, 0x29c5: 0x000d, + 0x29c6: 0x000c, 0x29c7: 0x000c, 0x29c8: 0x000c, 0x29c9: 0x000c, 0x29ca: 0x000c, 0x29cb: 0x000c, + 0x29cc: 0x000c, 0x29cd: 0x000c, 0x29ce: 0x000c, 0x29cf: 0x000c, 0x29d0: 0x000c, 0x29d1: 0x000d, + 0x29d2: 0x000d, 0x29d3: 0x000d, 0x29d4: 0x000d, 0x29d5: 0x000d, 0x29d6: 0x000d, 0x29d7: 0x000d, + 0x29d8: 0x000d, 0x29d9: 0x000d, 0x29da: 0x000d, 0x29db: 0x000d, 0x29dc: 0x000d, 0x29dd: 0x000d, + 0x29de: 0x000d, 0x29df: 0x000d, 0x29e0: 0x000d, 0x29e1: 0x000d, 0x29e2: 0x000d, 0x29e3: 0x000d, + 0x29e4: 0x000d, 0x29e5: 0x000d, 0x29e6: 0x000d, 0x29e7: 0x000d, 0x29e8: 0x000d, 0x29e9: 0x000d, + 0x29ea: 0x000d, 0x29eb: 0x000d, 0x29ec: 0x000d, 0x29ed: 0x000d, 0x29ee: 0x000d, 0x29ef: 0x000d, + 0x29f0: 0x0001, 0x29f1: 0x0001, 0x29f2: 0x0001, 0x29f3: 0x0001, 0x29f4: 0x0001, 0x29f5: 0x0001, + 0x29f6: 0x0001, 0x29f7: 0x0001, 0x29f8: 0x0001, 0x29f9: 0x0001, 0x29fa: 0x0001, 0x29fb: 0x0001, + 0x29fc: 0x0001, 0x29fd: 0x0001, 0x29fe: 0x0001, 0x29ff: 0x0001, + // Block 0xa8, offset 0x2a00 + 0x2a01: 0x000c, + 0x2a38: 0x000c, 0x2a39: 0x000c, 0x2a3a: 0x000c, 0x2a3b: 0x000c, + 0x2a3c: 0x000c, 0x2a3d: 0x000c, 0x2a3e: 0x000c, 0x2a3f: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a40: 0x000c, 0x2a41: 0x000c, 0x2a42: 0x000c, 0x2a43: 0x000c, 0x2a44: 0x000c, 0x2a45: 0x000c, + 0x2a46: 0x000c, + 0x2a52: 0x000a, 0x2a53: 0x000a, 0x2a54: 0x000a, 0x2a55: 0x000a, 0x2a56: 0x000a, 0x2a57: 0x000a, + 0x2a58: 0x000a, 0x2a59: 0x000a, 0x2a5a: 0x000a, 0x2a5b: 0x000a, 0x2a5c: 0x000a, 0x2a5d: 0x000a, + 0x2a5e: 0x000a, 0x2a5f: 0x000a, 0x2a60: 0x000a, 0x2a61: 0x000a, 0x2a62: 0x000a, 0x2a63: 0x000a, + 0x2a64: 0x000a, 0x2a65: 0x000a, + 0x2a7f: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2a80: 0x000c, 0x2a81: 0x000c, + 0x2ab3: 0x000c, 0x2ab4: 0x000c, 0x2ab5: 0x000c, + 0x2ab6: 0x000c, 0x2ab9: 0x000c, 0x2aba: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2ac0: 0x000c, 0x2ac1: 0x000c, 0x2ac2: 0x000c, + 0x2ae7: 0x000c, 0x2ae8: 0x000c, 0x2ae9: 0x000c, + 0x2aea: 0x000c, 0x2aeb: 0x000c, 0x2aed: 0x000c, 0x2aee: 0x000c, 0x2aef: 0x000c, + 0x2af0: 0x000c, 0x2af1: 0x000c, 0x2af2: 0x000c, 0x2af3: 0x000c, 0x2af4: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b33: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x000c, 0x2b41: 0x000c, + 0x2b76: 0x000c, 0x2b77: 0x000c, 0x2b78: 0x000c, 0x2b79: 0x000c, 0x2b7a: 0x000c, 0x2b7b: 0x000c, + 0x2b7c: 0x000c, 0x2b7d: 0x000c, 0x2b7e: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2b89: 0x000c, 0x2b8a: 0x000c, 0x2b8b: 0x000c, + 0x2b8c: 0x000c, 0x2b8f: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bef: 0x000c, + 0x2bf0: 0x000c, 0x2bf1: 0x000c, 0x2bf4: 0x000c, + 0x2bf6: 0x000c, 0x2bf7: 0x000c, + 0x2bfe: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c1f: 0x000c, 0x2c23: 0x000c, + 0x2c24: 0x000c, 0x2c25: 0x000c, 0x2c26: 0x000c, 0x2c27: 0x000c, 0x2c28: 0x000c, 0x2c29: 0x000c, + 0x2c2a: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c40: 0x000c, + 0x2c66: 0x000c, 0x2c67: 0x000c, 0x2c68: 0x000c, 0x2c69: 0x000c, + 0x2c6a: 0x000c, 0x2c6b: 0x000c, 0x2c6c: 0x000c, + 0x2c70: 0x000c, 0x2c71: 0x000c, 0x2c72: 0x000c, 0x2c73: 0x000c, 0x2c74: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2cb8: 0x000c, 0x2cb9: 0x000c, 0x2cba: 0x000c, 0x2cbb: 0x000c, + 0x2cbc: 0x000c, 0x2cbd: 0x000c, 0x2cbe: 0x000c, 0x2cbf: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cc2: 0x000c, 0x2cc3: 0x000c, 0x2cc4: 0x000c, + 0x2cc6: 0x000c, + 0x2cde: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d33: 0x000c, 0x2d34: 0x000c, 0x2d35: 0x000c, + 0x2d36: 0x000c, 0x2d37: 0x000c, 0x2d38: 0x000c, 0x2d3a: 0x000c, + 0x2d3f: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d40: 0x000c, 0x2d42: 0x000c, 0x2d43: 0x000c, + // Block 0xb6, offset 0x2d80 + 0x2db2: 0x000c, 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, + 0x2dbc: 0x000c, 0x2dbd: 0x000c, 0x2dbf: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2dc0: 0x000c, + 0x2ddc: 0x000c, 0x2ddd: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, + 0x2e36: 0x000c, 0x2e37: 0x000c, 0x2e38: 0x000c, 0x2e39: 0x000c, 0x2e3a: 0x000c, + 0x2e3d: 0x000c, 0x2e3f: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e40: 0x000c, + 0x2e60: 0x000a, 0x2e61: 0x000a, 0x2e62: 0x000a, 0x2e63: 0x000a, + 0x2e64: 0x000a, 0x2e65: 0x000a, 0x2e66: 0x000a, 0x2e67: 0x000a, 0x2e68: 0x000a, 0x2e69: 0x000a, + 0x2e6a: 0x000a, 0x2e6b: 0x000a, 0x2e6c: 0x000a, + // Block 0xba, offset 0x2e80 + 0x2eab: 0x000c, 0x2ead: 0x000c, + 0x2eb0: 0x000c, 0x2eb1: 0x000c, 0x2eb2: 0x000c, 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb7: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2edd: 0x000c, + 0x2ede: 0x000c, 0x2edf: 0x000c, 0x2ee2: 0x000c, 0x2ee3: 0x000c, + 0x2ee4: 0x000c, 0x2ee5: 0x000c, 0x2ee7: 0x000c, 0x2ee8: 0x000c, 0x2ee9: 0x000c, + 0x2eea: 0x000c, 0x2eeb: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f2f: 0x000c, + 0x2f30: 0x000c, 0x2f31: 0x000c, 0x2f32: 0x000c, 0x2f33: 0x000c, 0x2f34: 0x000c, 0x2f35: 0x000c, + 0x2f36: 0x000c, 0x2f37: 0x000c, 0x2f39: 0x000c, 0x2f3a: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f7b: 0x000c, + 0x2f7c: 0x000c, 0x2f7e: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2f83: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2fd4: 0x000c, 0x2fd5: 0x000c, 0x2fd6: 0x000c, 0x2fd7: 0x000c, + 0x2fda: 0x000c, 0x2fdb: 0x000c, + 0x2fe0: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3001: 0x000c, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000c, + 0x3006: 0x000c, 0x3009: 0x000c, 0x300a: 0x000c, + 0x3033: 0x000c, 0x3034: 0x000c, 0x3035: 0x000c, + 0x3036: 0x000c, 0x3037: 0x000c, 0x3038: 0x000c, 0x303b: 0x000c, + 0x303c: 0x000c, 0x303d: 0x000c, 0x303e: 0x000c, + // Block 0xc1, offset 0x3040 + 0x3047: 0x000c, + 0x3051: 0x000c, + 0x3052: 0x000c, 0x3053: 0x000c, 0x3054: 0x000c, 0x3055: 0x000c, 0x3056: 0x000c, + 0x3059: 0x000c, 0x305a: 0x000c, 0x305b: 0x000c, + // Block 0xc2, offset 0x3080 + 0x308a: 0x000c, 0x308b: 0x000c, + 0x308c: 0x000c, 0x308d: 0x000c, 0x308e: 0x000c, 0x308f: 0x000c, 0x3090: 0x000c, 0x3091: 0x000c, + 0x3092: 0x000c, 0x3093: 0x000c, 0x3094: 0x000c, 0x3095: 0x000c, 0x3096: 0x000c, + 0x3098: 0x000c, 0x3099: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30f0: 0x000c, 0x30f1: 0x000c, 0x30f2: 0x000c, 0x30f3: 0x000c, 0x30f4: 0x000c, 0x30f5: 0x000c, + 0x30f6: 0x000c, 0x30f8: 0x000c, 0x30f9: 0x000c, 0x30fa: 0x000c, 0x30fb: 0x000c, + 0x30fc: 0x000c, 0x30fd: 0x000c, + // Block 0xc4, offset 0x3100 + 0x3112: 0x000c, 0x3113: 0x000c, 0x3114: 0x000c, 0x3115: 0x000c, 0x3116: 0x000c, 0x3117: 0x000c, + 0x3118: 0x000c, 0x3119: 0x000c, 0x311a: 0x000c, 0x311b: 0x000c, 0x311c: 0x000c, 0x311d: 0x000c, + 0x311e: 0x000c, 0x311f: 0x000c, 0x3120: 0x000c, 0x3121: 0x000c, 0x3122: 0x000c, 0x3123: 0x000c, + 0x3124: 0x000c, 0x3125: 0x000c, 0x3126: 0x000c, 0x3127: 0x000c, + 0x312a: 0x000c, 0x312b: 0x000c, 0x312c: 0x000c, 0x312d: 0x000c, 0x312e: 0x000c, 0x312f: 0x000c, + 0x3130: 0x000c, 0x3132: 0x000c, 0x3133: 0x000c, 0x3135: 0x000c, + 0x3136: 0x000c, + // Block 0xc5, offset 0x3140 + 0x3171: 0x000c, 0x3172: 0x000c, 0x3173: 0x000c, 0x3174: 0x000c, 0x3175: 0x000c, + 0x3176: 0x000c, 0x317a: 0x000c, + 0x317c: 0x000c, 0x317d: 0x000c, 0x317f: 0x000c, + // Block 0xc6, offset 0x3180 + 0x3180: 0x000c, 0x3181: 0x000c, 0x3182: 0x000c, 0x3183: 0x000c, 0x3184: 0x000c, 0x3185: 0x000c, + 0x3187: 0x000c, + // Block 0xc7, offset 0x31c0 + 0x31d0: 0x000c, 0x31d1: 0x000c, + 0x31d5: 0x000c, 0x31d7: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3233: 0x000c, 0x3234: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3255: 0x000a, 0x3256: 0x000a, 0x3257: 0x000a, + 0x3258: 0x000a, 0x3259: 0x000a, 0x325a: 0x000a, 0x325b: 0x000a, 0x325c: 0x000a, 0x325d: 0x0004, + 0x325e: 0x0004, 0x325f: 0x0004, 0x3260: 0x0004, 0x3261: 0x000a, 0x3262: 0x000a, 0x3263: 0x000a, + 0x3264: 0x000a, 0x3265: 0x000a, 0x3266: 0x000a, 0x3267: 0x000a, 0x3268: 0x000a, 0x3269: 0x000a, + 0x326a: 0x000a, 0x326b: 0x000a, 0x326c: 0x000a, 0x326d: 0x000a, 0x326e: 0x000a, 0x326f: 0x000a, + 0x3270: 0x000a, 0x3271: 0x000a, + // Block 0xca, offset 0x3280 + 0x32b0: 0x000c, 0x32b1: 0x000c, 0x32b2: 0x000c, 0x32b3: 0x000c, 0x32b4: 0x000c, + // Block 0xcb, offset 0x32c0 + 0x32f0: 0x000c, 0x32f1: 0x000c, 0x32f2: 0x000c, 0x32f3: 0x000c, 0x32f4: 0x000c, 0x32f5: 0x000c, + 0x32f6: 0x000c, + // Block 0xcc, offset 0x3300 + 0x330f: 0x000c, + // Block 0xcd, offset 0x3340 + 0x334f: 0x000c, 0x3350: 0x000c, 0x3351: 0x000c, + 0x3352: 0x000c, + // Block 0xce, offset 0x3380 + 0x33a2: 0x000a, + 0x33a4: 0x000c, + // Block 0xcf, offset 0x33c0 + 0x33dd: 0x000c, + 0x33de: 0x000c, 0x33e0: 0x000b, 0x33e1: 0x000b, 0x33e2: 0x000b, 0x33e3: 0x000b, + // Block 0xd0, offset 0x3400 + 0x3427: 0x000c, 0x3428: 0x000c, 0x3429: 0x000c, + 0x3433: 0x000b, 0x3434: 0x000b, 0x3435: 0x000b, + 0x3436: 0x000b, 0x3437: 0x000b, 0x3438: 0x000b, 0x3439: 0x000b, 0x343a: 0x000b, 0x343b: 0x000c, + 0x343c: 0x000c, 0x343d: 0x000c, 0x343e: 0x000c, 0x343f: 0x000c, + // Block 0xd1, offset 0x3440 + 0x3440: 0x000c, 0x3441: 0x000c, 0x3442: 0x000c, 0x3445: 0x000c, + 0x3446: 0x000c, 0x3447: 0x000c, 0x3448: 0x000c, 0x3449: 0x000c, 0x344a: 0x000c, 0x344b: 0x000c, + 0x346a: 0x000c, 0x346b: 0x000c, 0x346c: 0x000c, 0x346d: 0x000c, + // Block 0xd2, offset 0x3480 + 0x3480: 0x000a, 0x3481: 0x000a, 0x3482: 0x000c, 0x3483: 0x000c, 0x3484: 0x000c, 0x3485: 0x000a, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000a, 0x34c1: 0x000a, 0x34c2: 0x000a, 0x34c3: 0x000a, 0x34c4: 0x000a, 0x34c5: 0x000a, + 0x34c6: 0x000a, 0x34c7: 0x000a, 0x34c8: 0x000a, 0x34c9: 0x000a, 0x34ca: 0x000a, 0x34cb: 0x000a, + 0x34cc: 0x000a, 0x34cd: 0x000a, 0x34ce: 0x000a, 0x34cf: 0x000a, 0x34d0: 0x000a, 0x34d1: 0x000a, + 0x34d2: 0x000a, 0x34d3: 0x000a, 0x34d4: 0x000a, 0x34d5: 0x000a, 0x34d6: 0x000a, + // Block 0xd4, offset 0x3500 + 0x351b: 0x000a, + // Block 0xd5, offset 0x3540 + 0x3555: 0x000a, + // Block 0xd6, offset 0x3580 + 0x358f: 0x000a, + // Block 0xd7, offset 0x35c0 + 0x35c9: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3603: 0x000a, + 0x360e: 0x0002, 0x360f: 0x0002, 0x3610: 0x0002, 0x3611: 0x0002, + 0x3612: 0x0002, 0x3613: 0x0002, 0x3614: 0x0002, 0x3615: 0x0002, 0x3616: 0x0002, 0x3617: 0x0002, + 0x3618: 0x0002, 0x3619: 0x0002, 0x361a: 0x0002, 0x361b: 0x0002, 0x361c: 0x0002, 0x361d: 0x0002, + 0x361e: 0x0002, 0x361f: 0x0002, 0x3620: 0x0002, 0x3621: 0x0002, 0x3622: 0x0002, 0x3623: 0x0002, + 0x3624: 0x0002, 0x3625: 0x0002, 0x3626: 0x0002, 0x3627: 0x0002, 0x3628: 0x0002, 0x3629: 0x0002, + 0x362a: 0x0002, 0x362b: 0x0002, 0x362c: 0x0002, 0x362d: 0x0002, 0x362e: 0x0002, 0x362f: 0x0002, + 0x3630: 0x0002, 0x3631: 0x0002, 0x3632: 0x0002, 0x3633: 0x0002, 0x3634: 0x0002, 0x3635: 0x0002, + 0x3636: 0x0002, 0x3637: 0x0002, 0x3638: 0x0002, 0x3639: 0x0002, 0x363a: 0x0002, 0x363b: 0x0002, + 0x363c: 0x0002, 0x363d: 0x0002, 0x363e: 0x0002, 0x363f: 0x0002, + // Block 0xd9, offset 0x3640 + 0x3640: 0x000c, 0x3641: 0x000c, 0x3642: 0x000c, 0x3643: 0x000c, 0x3644: 0x000c, 0x3645: 0x000c, + 0x3646: 0x000c, 0x3647: 0x000c, 0x3648: 0x000c, 0x3649: 0x000c, 0x364a: 0x000c, 0x364b: 0x000c, + 0x364c: 0x000c, 0x364d: 0x000c, 0x364e: 0x000c, 0x364f: 0x000c, 0x3650: 0x000c, 0x3651: 0x000c, + 0x3652: 0x000c, 0x3653: 0x000c, 0x3654: 0x000c, 0x3655: 0x000c, 0x3656: 0x000c, 0x3657: 0x000c, + 0x3658: 0x000c, 0x3659: 0x000c, 0x365a: 0x000c, 0x365b: 0x000c, 0x365c: 0x000c, 0x365d: 0x000c, + 0x365e: 0x000c, 0x365f: 0x000c, 0x3660: 0x000c, 0x3661: 0x000c, 0x3662: 0x000c, 0x3663: 0x000c, + 0x3664: 0x000c, 0x3665: 0x000c, 0x3666: 0x000c, 0x3667: 0x000c, 0x3668: 0x000c, 0x3669: 0x000c, + 0x366a: 0x000c, 0x366b: 0x000c, 0x366c: 0x000c, 0x366d: 0x000c, 0x366e: 0x000c, 0x366f: 0x000c, + 0x3670: 0x000c, 0x3671: 0x000c, 0x3672: 0x000c, 0x3673: 0x000c, 0x3674: 0x000c, 0x3675: 0x000c, + 0x3676: 0x000c, 0x367b: 0x000c, + 0x367c: 0x000c, 0x367d: 0x000c, 0x367e: 0x000c, 0x367f: 0x000c, + // Block 0xda, offset 0x3680 + 0x3680: 0x000c, 0x3681: 0x000c, 0x3682: 0x000c, 0x3683: 0x000c, 0x3684: 0x000c, 0x3685: 0x000c, + 0x3686: 0x000c, 0x3687: 0x000c, 0x3688: 0x000c, 0x3689: 0x000c, 0x368a: 0x000c, 0x368b: 0x000c, + 0x368c: 0x000c, 0x368d: 0x000c, 0x368e: 0x000c, 0x368f: 0x000c, 0x3690: 0x000c, 0x3691: 0x000c, + 0x3692: 0x000c, 0x3693: 0x000c, 0x3694: 0x000c, 0x3695: 0x000c, 0x3696: 0x000c, 0x3697: 0x000c, + 0x3698: 0x000c, 0x3699: 0x000c, 0x369a: 0x000c, 0x369b: 0x000c, 0x369c: 0x000c, 0x369d: 0x000c, + 0x369e: 0x000c, 0x369f: 0x000c, 0x36a0: 0x000c, 0x36a1: 0x000c, 0x36a2: 0x000c, 0x36a3: 0x000c, + 0x36a4: 0x000c, 0x36a5: 0x000c, 0x36a6: 0x000c, 0x36a7: 0x000c, 0x36a8: 0x000c, 0x36a9: 0x000c, + 0x36aa: 0x000c, 0x36ab: 0x000c, 0x36ac: 0x000c, + 0x36b5: 0x000c, + // Block 0xdb, offset 0x36c0 + 0x36c4: 0x000c, + 0x36db: 0x000c, 0x36dc: 0x000c, 0x36dd: 0x000c, + 0x36de: 0x000c, 0x36df: 0x000c, 0x36e1: 0x000c, 0x36e2: 0x000c, 0x36e3: 0x000c, + 0x36e4: 0x000c, 0x36e5: 0x000c, 0x36e6: 0x000c, 0x36e7: 0x000c, 0x36e8: 0x000c, 0x36e9: 0x000c, + 0x36ea: 0x000c, 0x36eb: 0x000c, 0x36ec: 0x000c, 0x36ed: 0x000c, 0x36ee: 0x000c, 0x36ef: 0x000c, + // Block 0xdc, offset 0x3700 + 0x3700: 0x000c, 0x3701: 0x000c, 0x3702: 0x000c, 0x3703: 0x000c, 0x3704: 0x000c, 0x3705: 0x000c, + 0x3706: 0x000c, 0x3708: 0x000c, 0x3709: 0x000c, 0x370a: 0x000c, 0x370b: 0x000c, + 0x370c: 0x000c, 0x370d: 0x000c, 0x370e: 0x000c, 0x370f: 0x000c, 0x3710: 0x000c, 0x3711: 0x000c, + 0x3712: 0x000c, 0x3713: 0x000c, 0x3714: 0x000c, 0x3715: 0x000c, 0x3716: 0x000c, 0x3717: 0x000c, + 0x3718: 0x000c, 0x371b: 0x000c, 0x371c: 0x000c, 0x371d: 0x000c, + 0x371e: 0x000c, 0x371f: 0x000c, 0x3720: 0x000c, 0x3721: 0x000c, 0x3723: 0x000c, + 0x3724: 0x000c, 0x3726: 0x000c, 0x3727: 0x000c, 0x3728: 0x000c, 0x3729: 0x000c, + 0x372a: 0x000c, + // Block 0xdd, offset 0x3740 + 0x376c: 0x000c, 0x376d: 0x000c, 0x376e: 0x000c, 0x376f: 0x000c, + 0x377f: 0x0004, + // Block 0xde, offset 0x3780 + 0x3780: 0x0001, 0x3781: 0x0001, 0x3782: 0x0001, 0x3783: 0x0001, 0x3784: 0x0001, 0x3785: 0x0001, + 0x3786: 0x0001, 0x3787: 0x0001, 0x3788: 0x0001, 0x3789: 0x0001, 0x378a: 0x0001, 0x378b: 0x0001, + 0x378c: 0x0001, 0x378d: 0x0001, 0x378e: 0x0001, 0x378f: 0x0001, 0x3790: 0x000c, 0x3791: 0x000c, + 0x3792: 0x000c, 0x3793: 0x000c, 0x3794: 0x000c, 0x3795: 0x000c, 0x3796: 0x000c, 0x3797: 0x0001, + 0x3798: 0x0001, 0x3799: 0x0001, 0x379a: 0x0001, 0x379b: 0x0001, 0x379c: 0x0001, 0x379d: 0x0001, + 0x379e: 0x0001, 0x379f: 0x0001, 0x37a0: 0x0001, 0x37a1: 0x0001, 0x37a2: 0x0001, 0x37a3: 0x0001, + 0x37a4: 0x0001, 0x37a5: 0x0001, 0x37a6: 0x0001, 0x37a7: 0x0001, 0x37a8: 0x0001, 0x37a9: 0x0001, + 0x37aa: 0x0001, 0x37ab: 0x0001, 0x37ac: 0x0001, 0x37ad: 0x0001, 0x37ae: 0x0001, 0x37af: 0x0001, + 0x37b0: 0x0001, 0x37b1: 0x0001, 0x37b2: 0x0001, 0x37b3: 0x0001, 0x37b4: 0x0001, 0x37b5: 0x0001, + 0x37b6: 0x0001, 0x37b7: 0x0001, 0x37b8: 0x0001, 0x37b9: 0x0001, 0x37ba: 0x0001, 0x37bb: 0x0001, + 0x37bc: 0x0001, 0x37bd: 0x0001, 0x37be: 0x0001, 0x37bf: 0x0001, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x0001, 0x37c1: 0x0001, 0x37c2: 0x0001, 0x37c3: 0x0001, 0x37c4: 0x000c, 0x37c5: 0x000c, + 0x37c6: 0x000c, 0x37c7: 0x000c, 0x37c8: 0x000c, 0x37c9: 0x000c, 0x37ca: 0x000c, 0x37cb: 0x0001, + 0x37cc: 0x0001, 0x37cd: 0x0001, 0x37ce: 0x0001, 0x37cf: 0x0001, 0x37d0: 0x0001, 0x37d1: 0x0001, + 0x37d2: 0x0001, 0x37d3: 0x0001, 0x37d4: 0x0001, 0x37d5: 0x0001, 0x37d6: 0x0001, 0x37d7: 0x0001, + 0x37d8: 0x0001, 0x37d9: 0x0001, 0x37da: 0x0001, 0x37db: 0x0001, 0x37dc: 0x0001, 0x37dd: 0x0001, + 0x37de: 0x0001, 0x37df: 0x0001, 0x37e0: 0x0001, 0x37e1: 0x0001, 0x37e2: 0x0001, 0x37e3: 0x0001, + 0x37e4: 0x0001, 0x37e5: 0x0001, 0x37e6: 0x0001, 0x37e7: 0x0001, 0x37e8: 0x0001, 0x37e9: 0x0001, + 0x37ea: 0x0001, 0x37eb: 0x0001, 0x37ec: 0x0001, 0x37ed: 0x0001, 0x37ee: 0x0001, 0x37ef: 0x0001, + 0x37f0: 0x0001, 0x37f1: 0x0001, 0x37f2: 0x0001, 0x37f3: 0x0001, 0x37f4: 0x0001, 0x37f5: 0x0001, + 0x37f6: 0x0001, 0x37f7: 0x0001, 0x37f8: 0x0001, 0x37f9: 0x0001, 0x37fa: 0x0001, 0x37fb: 0x0001, + 0x37fc: 0x0001, 0x37fd: 0x0001, 0x37fe: 0x0001, 0x37ff: 0x0001, + // Block 0xe0, offset 0x3800 + 0x3800: 0x000d, 0x3801: 0x000d, 0x3802: 0x000d, 0x3803: 0x000d, 0x3804: 0x000d, 0x3805: 0x000d, + 0x3806: 0x000d, 0x3807: 0x000d, 0x3808: 0x000d, 0x3809: 0x000d, 0x380a: 0x000d, 0x380b: 0x000d, + 0x380c: 0x000d, 0x380d: 0x000d, 0x380e: 0x000d, 0x380f: 0x000d, 0x3810: 0x0001, 0x3811: 0x0001, + 0x3812: 0x0001, 0x3813: 0x0001, 0x3814: 0x0001, 0x3815: 0x0001, 0x3816: 0x0001, 0x3817: 0x0001, + 0x3818: 0x0001, 0x3819: 0x0001, 0x381a: 0x0001, 0x381b: 0x0001, 0x381c: 0x0001, 0x381d: 0x0001, + 0x381e: 0x0001, 0x381f: 0x0001, 0x3820: 0x0001, 0x3821: 0x0001, 0x3822: 0x0001, 0x3823: 0x0001, + 0x3824: 0x0001, 0x3825: 0x0001, 0x3826: 0x0001, 0x3827: 0x0001, 0x3828: 0x0001, 0x3829: 0x0001, + 0x382a: 0x0001, 0x382b: 0x0001, 0x382c: 0x0001, 0x382d: 0x0001, 0x382e: 0x0001, 0x382f: 0x0001, + 0x3830: 0x0001, 0x3831: 0x0001, 0x3832: 0x0001, 0x3833: 0x0001, 0x3834: 0x0001, 0x3835: 0x0001, + 0x3836: 0x0001, 0x3837: 0x0001, 0x3838: 0x0001, 0x3839: 0x0001, 0x383a: 0x0001, 0x383b: 0x0001, + 0x383c: 0x0001, 0x383d: 0x0001, 0x383e: 0x0001, 0x383f: 0x0001, + // Block 0xe1, offset 0x3840 + 0x3840: 0x000d, 0x3841: 0x000d, 0x3842: 0x000d, 0x3843: 0x000d, 0x3844: 0x000d, 0x3845: 0x000d, + 0x3846: 0x000d, 0x3847: 0x000d, 0x3848: 0x000d, 0x3849: 0x000d, 0x384a: 0x000d, 0x384b: 0x000d, + 0x384c: 0x000d, 0x384d: 0x000d, 0x384e: 0x000d, 0x384f: 0x000d, 0x3850: 0x000d, 0x3851: 0x000d, + 0x3852: 0x000d, 0x3853: 0x000d, 0x3854: 0x000d, 0x3855: 0x000d, 0x3856: 0x000d, 0x3857: 0x000d, + 0x3858: 0x000d, 0x3859: 0x000d, 0x385a: 0x000d, 0x385b: 0x000d, 0x385c: 0x000d, 0x385d: 0x000d, + 0x385e: 0x000d, 0x385f: 0x000d, 0x3860: 0x000d, 0x3861: 0x000d, 0x3862: 0x000d, 0x3863: 0x000d, + 0x3864: 0x000d, 0x3865: 0x000d, 0x3866: 0x000d, 0x3867: 0x000d, 0x3868: 0x000d, 0x3869: 0x000d, + 0x386a: 0x000d, 0x386b: 0x000d, 0x386c: 0x000d, 0x386d: 0x000d, 0x386e: 0x000d, 0x386f: 0x000d, + 0x3870: 0x000a, 0x3871: 0x000a, 0x3872: 0x000d, 0x3873: 0x000d, 0x3874: 0x000d, 0x3875: 0x000d, + 0x3876: 0x000d, 0x3877: 0x000d, 0x3878: 0x000d, 0x3879: 0x000d, 0x387a: 0x000d, 0x387b: 0x000d, + 0x387c: 0x000d, 0x387d: 0x000d, 0x387e: 0x000d, 0x387f: 0x000d, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000a, 0x3881: 0x000a, 0x3882: 0x000a, 0x3883: 0x000a, 0x3884: 0x000a, 0x3885: 0x000a, + 0x3886: 0x000a, 0x3887: 0x000a, 0x3888: 0x000a, 0x3889: 0x000a, 0x388a: 0x000a, 0x388b: 0x000a, + 0x388c: 0x000a, 0x388d: 0x000a, 0x388e: 0x000a, 0x388f: 0x000a, 0x3890: 0x000a, 0x3891: 0x000a, + 0x3892: 0x000a, 0x3893: 0x000a, 0x3894: 0x000a, 0x3895: 0x000a, 0x3896: 0x000a, 0x3897: 0x000a, + 0x3898: 0x000a, 0x3899: 0x000a, 0x389a: 0x000a, 0x389b: 0x000a, 0x389c: 0x000a, 0x389d: 0x000a, + 0x389e: 0x000a, 0x389f: 0x000a, 0x38a0: 0x000a, 0x38a1: 0x000a, 0x38a2: 0x000a, 0x38a3: 0x000a, + 0x38a4: 0x000a, 0x38a5: 0x000a, 0x38a6: 0x000a, 0x38a7: 0x000a, 0x38a8: 0x000a, 0x38a9: 0x000a, + 0x38aa: 0x000a, 0x38ab: 0x000a, + 0x38b0: 0x000a, 0x38b1: 0x000a, 0x38b2: 0x000a, 0x38b3: 0x000a, 0x38b4: 0x000a, 0x38b5: 0x000a, + 0x38b6: 0x000a, 0x38b7: 0x000a, 0x38b8: 0x000a, 0x38b9: 0x000a, 0x38ba: 0x000a, 0x38bb: 0x000a, + 0x38bc: 0x000a, 0x38bd: 0x000a, 0x38be: 0x000a, 0x38bf: 0x000a, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x000a, 0x38c1: 0x000a, 0x38c2: 0x000a, 0x38c3: 0x000a, 0x38c4: 0x000a, 0x38c5: 0x000a, + 0x38c6: 0x000a, 0x38c7: 0x000a, 0x38c8: 0x000a, 0x38c9: 0x000a, 0x38ca: 0x000a, 0x38cb: 0x000a, + 0x38cc: 0x000a, 0x38cd: 0x000a, 0x38ce: 0x000a, 0x38cf: 0x000a, 0x38d0: 0x000a, 0x38d1: 0x000a, + 0x38d2: 0x000a, 0x38d3: 0x000a, + 0x38e0: 0x000a, 0x38e1: 0x000a, 0x38e2: 0x000a, 0x38e3: 0x000a, + 0x38e4: 0x000a, 0x38e5: 0x000a, 0x38e6: 0x000a, 0x38e7: 0x000a, 0x38e8: 0x000a, 0x38e9: 0x000a, + 0x38ea: 0x000a, 0x38eb: 0x000a, 0x38ec: 0x000a, 0x38ed: 0x000a, 0x38ee: 0x000a, + 0x38f1: 0x000a, 0x38f2: 0x000a, 0x38f3: 0x000a, 0x38f4: 0x000a, 0x38f5: 0x000a, + 0x38f6: 0x000a, 0x38f7: 0x000a, 0x38f8: 0x000a, 0x38f9: 0x000a, 0x38fa: 0x000a, 0x38fb: 0x000a, + 0x38fc: 0x000a, 0x38fd: 0x000a, 0x38fe: 0x000a, 0x38ff: 0x000a, + // Block 0xe4, offset 0x3900 + 0x3901: 0x000a, 0x3902: 0x000a, 0x3903: 0x000a, 0x3904: 0x000a, 0x3905: 0x000a, + 0x3906: 0x000a, 0x3907: 0x000a, 0x3908: 0x000a, 0x3909: 0x000a, 0x390a: 0x000a, 0x390b: 0x000a, + 0x390c: 0x000a, 0x390d: 0x000a, 0x390e: 0x000a, 0x390f: 0x000a, 0x3911: 0x000a, + 0x3912: 0x000a, 0x3913: 0x000a, 0x3914: 0x000a, 0x3915: 0x000a, 0x3916: 0x000a, 0x3917: 0x000a, + 0x3918: 0x000a, 0x3919: 0x000a, 0x391a: 0x000a, 0x391b: 0x000a, 0x391c: 0x000a, 0x391d: 0x000a, + 0x391e: 0x000a, 0x391f: 0x000a, 0x3920: 0x000a, 0x3921: 0x000a, 0x3922: 0x000a, 0x3923: 0x000a, + 0x3924: 0x000a, 0x3925: 0x000a, 0x3926: 0x000a, 0x3927: 0x000a, 0x3928: 0x000a, 0x3929: 0x000a, + 0x392a: 0x000a, 0x392b: 0x000a, 0x392c: 0x000a, 0x392d: 0x000a, 0x392e: 0x000a, 0x392f: 0x000a, + 0x3930: 0x000a, 0x3931: 0x000a, 0x3932: 0x000a, 0x3933: 0x000a, 0x3934: 0x000a, 0x3935: 0x000a, + // Block 0xe5, offset 0x3940 + 0x3940: 0x0002, 0x3941: 0x0002, 0x3942: 0x0002, 0x3943: 0x0002, 0x3944: 0x0002, 0x3945: 0x0002, + 0x3946: 0x0002, 0x3947: 0x0002, 0x3948: 0x0002, 0x3949: 0x0002, 0x394a: 0x0002, 0x394b: 0x000a, + 0x394c: 0x000a, 0x394d: 0x000a, 0x394e: 0x000a, 0x394f: 0x000a, + 0x396f: 0x000a, + // Block 0xe6, offset 0x3980 + 0x39aa: 0x000a, 0x39ab: 0x000a, 0x39ac: 0x000a, 0x39ad: 0x000a, 0x39ae: 0x000a, 0x39af: 0x000a, + // Block 0xe7, offset 0x39c0 + 0x39ed: 0x000a, + // Block 0xe8, offset 0x3a00 + 0x3a20: 0x000a, 0x3a21: 0x000a, 0x3a22: 0x000a, 0x3a23: 0x000a, + 0x3a24: 0x000a, 0x3a25: 0x000a, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x000a, 0x3a41: 0x000a, 0x3a42: 0x000a, 0x3a43: 0x000a, 0x3a44: 0x000a, 0x3a45: 0x000a, + 0x3a46: 0x000a, 0x3a47: 0x000a, 0x3a48: 0x000a, 0x3a49: 0x000a, 0x3a4a: 0x000a, 0x3a4b: 0x000a, + 0x3a4c: 0x000a, 0x3a4d: 0x000a, 0x3a4e: 0x000a, 0x3a4f: 0x000a, 0x3a50: 0x000a, 0x3a51: 0x000a, + 0x3a52: 0x000a, 0x3a53: 0x000a, 0x3a54: 0x000a, 0x3a55: 0x000a, 0x3a56: 0x000a, 0x3a57: 0x000a, + 0x3a60: 0x000a, 0x3a61: 0x000a, 0x3a62: 0x000a, 0x3a63: 0x000a, + 0x3a64: 0x000a, 0x3a65: 0x000a, 0x3a66: 0x000a, 0x3a67: 0x000a, 0x3a68: 0x000a, 0x3a69: 0x000a, + 0x3a6a: 0x000a, 0x3a6b: 0x000a, 0x3a6c: 0x000a, + 0x3a70: 0x000a, 0x3a71: 0x000a, 0x3a72: 0x000a, 0x3a73: 0x000a, 0x3a74: 0x000a, 0x3a75: 0x000a, + 0x3a76: 0x000a, 0x3a77: 0x000a, 0x3a78: 0x000a, 0x3a79: 0x000a, 0x3a7a: 0x000a, 0x3a7b: 0x000a, + 0x3a7c: 0x000a, + // Block 0xea, offset 0x3a80 + 0x3a80: 0x000a, 0x3a81: 0x000a, 0x3a82: 0x000a, 0x3a83: 0x000a, 0x3a84: 0x000a, 0x3a85: 0x000a, + 0x3a86: 0x000a, 0x3a87: 0x000a, 0x3a88: 0x000a, 0x3a89: 0x000a, 0x3a8a: 0x000a, 0x3a8b: 0x000a, + 0x3a8c: 0x000a, 0x3a8d: 0x000a, 0x3a8e: 0x000a, 0x3a8f: 0x000a, 0x3a90: 0x000a, 0x3a91: 0x000a, + 0x3a92: 0x000a, 0x3a93: 0x000a, 0x3a94: 0x000a, 0x3a95: 0x000a, 0x3a96: 0x000a, 0x3a97: 0x000a, + 0x3a98: 0x000a, + 0x3aa0: 0x000a, 0x3aa1: 0x000a, 0x3aa2: 0x000a, 0x3aa3: 0x000a, + 0x3aa4: 0x000a, 0x3aa5: 0x000a, 0x3aa6: 0x000a, 0x3aa7: 0x000a, 0x3aa8: 0x000a, 0x3aa9: 0x000a, + 0x3aaa: 0x000a, 0x3aab: 0x000a, + // Block 0xeb, offset 0x3ac0 + 0x3ac0: 0x000a, 0x3ac1: 0x000a, 0x3ac2: 0x000a, 0x3ac3: 0x000a, 0x3ac4: 0x000a, 0x3ac5: 0x000a, + 0x3ac6: 0x000a, 0x3ac7: 0x000a, 0x3ac8: 0x000a, 0x3ac9: 0x000a, 0x3aca: 0x000a, 0x3acb: 0x000a, + 0x3ad0: 0x000a, 0x3ad1: 0x000a, + 0x3ad2: 0x000a, 0x3ad3: 0x000a, 0x3ad4: 0x000a, 0x3ad5: 0x000a, 0x3ad6: 0x000a, 0x3ad7: 0x000a, + 0x3ad8: 0x000a, 0x3ad9: 0x000a, 0x3ada: 0x000a, 0x3adb: 0x000a, 0x3adc: 0x000a, 0x3add: 0x000a, + 0x3ade: 0x000a, 0x3adf: 0x000a, 0x3ae0: 0x000a, 0x3ae1: 0x000a, 0x3ae2: 0x000a, 0x3ae3: 0x000a, + 0x3ae4: 0x000a, 0x3ae5: 0x000a, 0x3ae6: 0x000a, 0x3ae7: 0x000a, 0x3ae8: 0x000a, 0x3ae9: 0x000a, + 0x3aea: 0x000a, 0x3aeb: 0x000a, 0x3aec: 0x000a, 0x3aed: 0x000a, 0x3aee: 0x000a, 0x3aef: 0x000a, + 0x3af0: 0x000a, 0x3af1: 0x000a, 0x3af2: 0x000a, 0x3af3: 0x000a, 0x3af4: 0x000a, 0x3af5: 0x000a, + 0x3af6: 0x000a, 0x3af7: 0x000a, 0x3af8: 0x000a, 0x3af9: 0x000a, 0x3afa: 0x000a, 0x3afb: 0x000a, + 0x3afc: 0x000a, 0x3afd: 0x000a, 0x3afe: 0x000a, 0x3aff: 0x000a, + // Block 0xec, offset 0x3b00 + 0x3b00: 0x000a, 0x3b01: 0x000a, 0x3b02: 0x000a, 0x3b03: 0x000a, 0x3b04: 0x000a, 0x3b05: 0x000a, + 0x3b06: 0x000a, 0x3b07: 0x000a, + 0x3b10: 0x000a, 0x3b11: 0x000a, + 0x3b12: 0x000a, 0x3b13: 0x000a, 0x3b14: 0x000a, 0x3b15: 0x000a, 0x3b16: 0x000a, 0x3b17: 0x000a, + 0x3b18: 0x000a, 0x3b19: 0x000a, + 0x3b20: 0x000a, 0x3b21: 0x000a, 0x3b22: 0x000a, 0x3b23: 0x000a, + 0x3b24: 0x000a, 0x3b25: 0x000a, 0x3b26: 0x000a, 0x3b27: 0x000a, 0x3b28: 0x000a, 0x3b29: 0x000a, + 0x3b2a: 0x000a, 0x3b2b: 0x000a, 0x3b2c: 0x000a, 0x3b2d: 0x000a, 0x3b2e: 0x000a, 0x3b2f: 0x000a, + 0x3b30: 0x000a, 0x3b31: 0x000a, 0x3b32: 0x000a, 0x3b33: 0x000a, 0x3b34: 0x000a, 0x3b35: 0x000a, + 0x3b36: 0x000a, 0x3b37: 0x000a, 0x3b38: 0x000a, 0x3b39: 0x000a, 0x3b3a: 0x000a, 0x3b3b: 0x000a, + 0x3b3c: 0x000a, 0x3b3d: 0x000a, 0x3b3e: 0x000a, 0x3b3f: 0x000a, + // Block 0xed, offset 0x3b40 + 0x3b40: 0x000a, 0x3b41: 0x000a, 0x3b42: 0x000a, 0x3b43: 0x000a, 0x3b44: 0x000a, 0x3b45: 0x000a, + 0x3b46: 0x000a, 0x3b47: 0x000a, + 0x3b50: 0x000a, 0x3b51: 0x000a, + 0x3b52: 0x000a, 0x3b53: 0x000a, 0x3b54: 0x000a, 0x3b55: 0x000a, 0x3b56: 0x000a, 0x3b57: 0x000a, + 0x3b58: 0x000a, 0x3b59: 0x000a, 0x3b5a: 0x000a, 0x3b5b: 0x000a, 0x3b5c: 0x000a, 0x3b5d: 0x000a, + 0x3b5e: 0x000a, 0x3b5f: 0x000a, 0x3b60: 0x000a, 0x3b61: 0x000a, 0x3b62: 0x000a, 0x3b63: 0x000a, + 0x3b64: 0x000a, 0x3b65: 0x000a, 0x3b66: 0x000a, 0x3b67: 0x000a, 0x3b68: 0x000a, 0x3b69: 0x000a, + 0x3b6a: 0x000a, 0x3b6b: 0x000a, 0x3b6c: 0x000a, 0x3b6d: 0x000a, + 0x3b70: 0x000a, 0x3b71: 0x000a, + // Block 0xee, offset 0x3b80 + 0x3b80: 0x000a, 0x3b81: 0x000a, 0x3b82: 0x000a, 0x3b83: 0x000a, 0x3b84: 0x000a, 0x3b85: 0x000a, + 0x3b86: 0x000a, 0x3b87: 0x000a, 0x3b88: 0x000a, 0x3b89: 0x000a, 0x3b8a: 0x000a, 0x3b8b: 0x000a, + 0x3b8c: 0x000a, 0x3b8d: 0x000a, 0x3b8e: 0x000a, 0x3b8f: 0x000a, 0x3b90: 0x000a, 0x3b91: 0x000a, + 0x3b92: 0x000a, 0x3b93: 0x000a, 0x3b94: 0x000a, 0x3b95: 0x000a, 0x3b96: 0x000a, 0x3b97: 0x000a, + 0x3b98: 0x000a, 0x3b99: 0x000a, 0x3b9a: 0x000a, 0x3b9b: 0x000a, 0x3b9c: 0x000a, 0x3b9d: 0x000a, + 0x3b9e: 0x000a, 0x3b9f: 0x000a, 0x3ba0: 0x000a, 0x3ba1: 0x000a, 0x3ba2: 0x000a, 0x3ba3: 0x000a, + 0x3ba4: 0x000a, 0x3ba5: 0x000a, 0x3ba6: 0x000a, 0x3ba7: 0x000a, 0x3ba8: 0x000a, 0x3ba9: 0x000a, + 0x3baa: 0x000a, 0x3bab: 0x000a, 0x3bac: 0x000a, 0x3bad: 0x000a, 0x3bae: 0x000a, 0x3baf: 0x000a, + 0x3bb0: 0x000a, 0x3bb1: 0x000a, 0x3bb2: 0x000a, 0x3bb3: 0x000a, 0x3bb4: 0x000a, 0x3bb5: 0x000a, + 0x3bb6: 0x000a, 0x3bb7: 0x000a, 0x3bb8: 0x000a, 0x3bba: 0x000a, 0x3bbb: 0x000a, + 0x3bbc: 0x000a, 0x3bbd: 0x000a, 0x3bbe: 0x000a, 0x3bbf: 0x000a, + // Block 0xef, offset 0x3bc0 + 0x3bc0: 0x000a, 0x3bc1: 0x000a, 0x3bc2: 0x000a, 0x3bc3: 0x000a, 0x3bc4: 0x000a, 0x3bc5: 0x000a, + 0x3bc6: 0x000a, 0x3bc7: 0x000a, 0x3bc8: 0x000a, 0x3bc9: 0x000a, 0x3bca: 0x000a, 0x3bcb: 0x000a, + 0x3bcd: 0x000a, 0x3bce: 0x000a, 0x3bcf: 0x000a, 0x3bd0: 0x000a, 0x3bd1: 0x000a, + 0x3bd2: 0x000a, 0x3bd3: 0x000a, 0x3bd4: 0x000a, 0x3bd5: 0x000a, 0x3bd6: 0x000a, 0x3bd7: 0x000a, + 0x3bd8: 0x000a, 0x3bd9: 0x000a, 0x3bda: 0x000a, 0x3bdb: 0x000a, 0x3bdc: 0x000a, 0x3bdd: 0x000a, + 0x3bde: 0x000a, 0x3bdf: 0x000a, 0x3be0: 0x000a, 0x3be1: 0x000a, 0x3be2: 0x000a, 0x3be3: 0x000a, + 0x3be4: 0x000a, 0x3be5: 0x000a, 0x3be6: 0x000a, 0x3be7: 0x000a, 0x3be8: 0x000a, 0x3be9: 0x000a, + 0x3bea: 0x000a, 0x3beb: 0x000a, 0x3bec: 0x000a, 0x3bed: 0x000a, 0x3bee: 0x000a, 0x3bef: 0x000a, + 0x3bf0: 0x000a, 0x3bf1: 0x000a, 0x3bf2: 0x000a, 0x3bf3: 0x000a, 0x3bf4: 0x000a, 0x3bf5: 0x000a, + 0x3bf6: 0x000a, 0x3bf7: 0x000a, 0x3bf8: 0x000a, 0x3bf9: 0x000a, 0x3bfa: 0x000a, 0x3bfb: 0x000a, + 0x3bfc: 0x000a, 0x3bfd: 0x000a, 0x3bfe: 0x000a, 0x3bff: 0x000a, + // Block 0xf0, offset 0x3c00 + 0x3c00: 0x000a, 0x3c01: 0x000a, 0x3c02: 0x000a, 0x3c03: 0x000a, 0x3c04: 0x000a, 0x3c05: 0x000a, + 0x3c06: 0x000a, 0x3c07: 0x000a, 0x3c08: 0x000a, 0x3c09: 0x000a, 0x3c0a: 0x000a, 0x3c0b: 0x000a, + 0x3c0c: 0x000a, 0x3c0d: 0x000a, 0x3c0e: 0x000a, 0x3c0f: 0x000a, 0x3c10: 0x000a, 0x3c11: 0x000a, + 0x3c12: 0x000a, 0x3c13: 0x000a, + 0x3c20: 0x000a, 0x3c21: 0x000a, 0x3c22: 0x000a, 0x3c23: 0x000a, + 0x3c24: 0x000a, 0x3c25: 0x000a, 0x3c26: 0x000a, 0x3c27: 0x000a, 0x3c28: 0x000a, 0x3c29: 0x000a, + 0x3c2a: 0x000a, 0x3c2b: 0x000a, 0x3c2c: 0x000a, 0x3c2d: 0x000a, + 0x3c30: 0x000a, 0x3c31: 0x000a, 0x3c32: 0x000a, 0x3c33: 0x000a, 0x3c34: 0x000a, + 0x3c38: 0x000a, 0x3c39: 0x000a, 0x3c3a: 0x000a, + // Block 0xf1, offset 0x3c40 + 0x3c40: 0x000a, 0x3c41: 0x000a, 0x3c42: 0x000a, 0x3c43: 0x000a, 0x3c44: 0x000a, 0x3c45: 0x000a, + 0x3c46: 0x000a, + 0x3c50: 0x000a, 0x3c51: 0x000a, + 0x3c52: 0x000a, 0x3c53: 0x000a, 0x3c54: 0x000a, 0x3c55: 0x000a, 0x3c56: 0x000a, 0x3c57: 0x000a, + 0x3c58: 0x000a, 0x3c59: 0x000a, 0x3c5a: 0x000a, 0x3c5b: 0x000a, 0x3c5c: 0x000a, 0x3c5d: 0x000a, + 0x3c5e: 0x000a, 0x3c5f: 0x000a, 0x3c60: 0x000a, 0x3c61: 0x000a, 0x3c62: 0x000a, 0x3c63: 0x000a, + 0x3c64: 0x000a, 0x3c65: 0x000a, 0x3c66: 0x000a, 0x3c67: 0x000a, 0x3c68: 0x000a, + 0x3c70: 0x000a, 0x3c71: 0x000a, 0x3c72: 0x000a, 0x3c73: 0x000a, 0x3c74: 0x000a, 0x3c75: 0x000a, + 0x3c76: 0x000a, + // Block 0xf2, offset 0x3c80 + 0x3c80: 0x000a, 0x3c81: 0x000a, 0x3c82: 0x000a, + 0x3c90: 0x000a, 0x3c91: 0x000a, + 0x3c92: 0x000a, 0x3c93: 0x000a, 0x3c94: 0x000a, 0x3c95: 0x000a, 0x3c96: 0x000a, + // Block 0xf3, offset 0x3cc0 + 0x3cc0: 0x000a, 0x3cc1: 0x000a, 0x3cc2: 0x000a, 0x3cc3: 0x000a, 0x3cc4: 0x000a, 0x3cc5: 0x000a, + 0x3cc6: 0x000a, 0x3cc7: 0x000a, 0x3cc8: 0x000a, 0x3cc9: 0x000a, 0x3cca: 0x000a, 0x3ccb: 0x000a, + 0x3ccc: 0x000a, 0x3ccd: 0x000a, 0x3cce: 0x000a, 0x3ccf: 0x000a, 0x3cd0: 0x000a, 0x3cd1: 0x000a, + 0x3cd2: 0x000a, 0x3cd4: 0x000a, 0x3cd5: 0x000a, 0x3cd6: 0x000a, 0x3cd7: 0x000a, + 0x3cd8: 0x000a, 0x3cd9: 0x000a, 0x3cda: 0x000a, 0x3cdb: 0x000a, 0x3cdc: 0x000a, 0x3cdd: 0x000a, + 0x3cde: 0x000a, 0x3cdf: 0x000a, 0x3ce0: 0x000a, 0x3ce1: 0x000a, 0x3ce2: 0x000a, 0x3ce3: 0x000a, + 0x3ce4: 0x000a, 0x3ce5: 0x000a, 0x3ce6: 0x000a, 0x3ce7: 0x000a, 0x3ce8: 0x000a, 0x3ce9: 0x000a, + 0x3cea: 0x000a, 0x3ceb: 0x000a, 0x3cec: 0x000a, 0x3ced: 0x000a, 0x3cee: 0x000a, 0x3cef: 0x000a, + 0x3cf0: 0x000a, 0x3cf1: 0x000a, 0x3cf2: 0x000a, 0x3cf3: 0x000a, 0x3cf4: 0x000a, 0x3cf5: 0x000a, + 0x3cf6: 0x000a, 0x3cf7: 0x000a, 0x3cf8: 0x000a, 0x3cf9: 0x000a, 0x3cfa: 0x000a, 0x3cfb: 0x000a, + 0x3cfc: 0x000a, 0x3cfd: 0x000a, 0x3cfe: 0x000a, 0x3cff: 0x000a, + // Block 0xf4, offset 0x3d00 + 0x3d00: 0x000a, 0x3d01: 0x000a, 0x3d02: 0x000a, 0x3d03: 0x000a, 0x3d04: 0x000a, 0x3d05: 0x000a, + 0x3d06: 0x000a, 0x3d07: 0x000a, 0x3d08: 0x000a, 0x3d09: 0x000a, 0x3d0a: 0x000a, + 0x3d30: 0x0002, 0x3d31: 0x0002, 0x3d32: 0x0002, 0x3d33: 0x0002, 0x3d34: 0x0002, 0x3d35: 0x0002, + 0x3d36: 0x0002, 0x3d37: 0x0002, 0x3d38: 0x0002, 0x3d39: 0x0002, + // Block 0xf5, offset 0x3d40 + 0x3d7e: 0x000b, 0x3d7f: 0x000b, + // Block 0xf6, offset 0x3d80 + 0x3d80: 0x000b, 0x3d81: 0x000b, 0x3d82: 0x000b, 0x3d83: 0x000b, 0x3d84: 0x000b, 0x3d85: 0x000b, + 0x3d86: 0x000b, 0x3d87: 0x000b, 0x3d88: 0x000b, 0x3d89: 0x000b, 0x3d8a: 0x000b, 0x3d8b: 0x000b, + 0x3d8c: 0x000b, 0x3d8d: 0x000b, 0x3d8e: 0x000b, 0x3d8f: 0x000b, 0x3d90: 0x000b, 0x3d91: 0x000b, + 0x3d92: 0x000b, 0x3d93: 0x000b, 0x3d94: 0x000b, 0x3d95: 0x000b, 0x3d96: 0x000b, 0x3d97: 0x000b, + 0x3d98: 0x000b, 0x3d99: 0x000b, 0x3d9a: 0x000b, 0x3d9b: 0x000b, 0x3d9c: 0x000b, 0x3d9d: 0x000b, + 0x3d9e: 0x000b, 0x3d9f: 0x000b, 0x3da0: 0x000b, 0x3da1: 0x000b, 0x3da2: 0x000b, 0x3da3: 0x000b, + 0x3da4: 0x000b, 0x3da5: 0x000b, 0x3da6: 0x000b, 0x3da7: 0x000b, 0x3da8: 0x000b, 0x3da9: 0x000b, + 0x3daa: 0x000b, 0x3dab: 0x000b, 0x3dac: 0x000b, 0x3dad: 0x000b, 0x3dae: 0x000b, 0x3daf: 0x000b, + 0x3db0: 0x000b, 0x3db1: 0x000b, 0x3db2: 0x000b, 0x3db3: 0x000b, 0x3db4: 0x000b, 0x3db5: 0x000b, + 0x3db6: 0x000b, 0x3db7: 0x000b, 0x3db8: 0x000b, 0x3db9: 0x000b, 0x3dba: 0x000b, 0x3dbb: 0x000b, + 0x3dbc: 0x000b, 0x3dbd: 0x000b, 0x3dbe: 0x000b, 0x3dbf: 0x000b, + // Block 0xf7, offset 0x3dc0 + 0x3dc0: 0x000c, 0x3dc1: 0x000c, 0x3dc2: 0x000c, 0x3dc3: 0x000c, 0x3dc4: 0x000c, 0x3dc5: 0x000c, + 0x3dc6: 0x000c, 0x3dc7: 0x000c, 0x3dc8: 0x000c, 0x3dc9: 0x000c, 0x3dca: 0x000c, 0x3dcb: 0x000c, + 0x3dcc: 0x000c, 0x3dcd: 0x000c, 0x3dce: 0x000c, 0x3dcf: 0x000c, 0x3dd0: 0x000c, 0x3dd1: 0x000c, + 0x3dd2: 0x000c, 0x3dd3: 0x000c, 0x3dd4: 0x000c, 0x3dd5: 0x000c, 0x3dd6: 0x000c, 0x3dd7: 0x000c, + 0x3dd8: 0x000c, 0x3dd9: 0x000c, 0x3dda: 0x000c, 0x3ddb: 0x000c, 0x3ddc: 0x000c, 0x3ddd: 0x000c, + 0x3dde: 0x000c, 0x3ddf: 0x000c, 0x3de0: 0x000c, 0x3de1: 0x000c, 0x3de2: 0x000c, 0x3de3: 0x000c, + 0x3de4: 0x000c, 0x3de5: 0x000c, 0x3de6: 0x000c, 0x3de7: 0x000c, 0x3de8: 0x000c, 0x3de9: 0x000c, + 0x3dea: 0x000c, 0x3deb: 0x000c, 0x3dec: 0x000c, 0x3ded: 0x000c, 0x3dee: 0x000c, 0x3def: 0x000c, + 0x3df0: 0x000b, 0x3df1: 0x000b, 0x3df2: 0x000b, 0x3df3: 0x000b, 0x3df4: 0x000b, 0x3df5: 0x000b, + 0x3df6: 0x000b, 0x3df7: 0x000b, 0x3df8: 0x000b, 0x3df9: 0x000b, 0x3dfa: 0x000b, 0x3dfb: 0x000b, + 0x3dfc: 0x000b, 0x3dfd: 0x000b, 0x3dfe: 0x000b, 0x3dff: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x136: 0x28, 0x137: 0x29, + 0x138: 0x2a, 0x139: 0x2b, 0x13a: 0x2c, 0x13b: 0x2d, 0x13c: 0x2e, 0x13d: 0x2f, 0x13e: 0x30, 0x13f: 0x31, + // Block 0x5, offset 0x140 + 0x140: 0x32, 0x141: 0x33, 0x142: 0x34, + 0x14d: 0x35, 0x14e: 0x36, + 0x150: 0x37, + 0x15a: 0x38, 0x15c: 0x39, 0x15d: 0x3a, 0x15e: 0x3b, 0x15f: 0x3c, + 0x160: 0x3d, 0x162: 0x3e, 0x164: 0x3f, 0x165: 0x40, 0x167: 0x41, + 0x168: 0x42, 0x169: 0x43, 0x16a: 0x44, 0x16b: 0x45, 0x16c: 0x46, 0x16d: 0x47, 0x16e: 0x48, 0x16f: 0x49, + 0x170: 0x4a, 0x173: 0x4b, 0x177: 0x4c, + 0x17e: 0x4d, 0x17f: 0x4e, + // Block 0x6, offset 0x180 + 0x180: 0x4f, 0x181: 0x50, 0x182: 0x51, 0x183: 0x52, 0x184: 0x53, 0x185: 0x54, 0x186: 0x55, 0x187: 0x56, + 0x188: 0x57, 0x189: 0x56, 0x18a: 0x56, 0x18b: 0x56, 0x18c: 0x58, 0x18d: 0x59, 0x18e: 0x5a, 0x18f: 0x56, + 0x190: 0x5b, 0x191: 0x5c, 0x192: 0x5d, 0x193: 0x5e, 0x194: 0x56, 0x195: 0x56, 0x196: 0x56, 0x197: 0x56, + 0x198: 0x56, 0x199: 0x56, 0x19a: 0x5f, 0x19b: 0x56, 0x19c: 0x56, 0x19d: 0x60, 0x19e: 0x56, 0x19f: 0x61, + 0x1a4: 0x56, 0x1a5: 0x56, 0x1a6: 0x62, 0x1a7: 0x63, + 0x1a8: 0x56, 0x1a9: 0x56, 0x1aa: 0x56, 0x1ab: 0x56, 0x1ac: 0x56, 0x1ad: 0x64, 0x1ae: 0x65, 0x1af: 0x56, + 0x1b3: 0x66, 0x1b5: 0x67, 0x1b7: 0x68, + 0x1b8: 0x69, 0x1b9: 0x6a, 0x1ba: 0x6b, 0x1bb: 0x6c, 0x1bc: 0x56, 0x1bd: 0x56, 0x1be: 0x56, 0x1bf: 0x6d, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6e, 0x1c2: 0x6f, 0x1c3: 0x70, 0x1c7: 0x71, + 0x1c8: 0x72, 0x1c9: 0x73, 0x1ca: 0x74, 0x1cb: 0x75, 0x1cd: 0x76, 0x1cf: 0x77, + // Block 0x8, offset 0x200 + 0x237: 0x56, + // Block 0x9, offset 0x240 + 0x252: 0x78, 0x253: 0x79, + 0x258: 0x7a, 0x259: 0x7b, 0x25a: 0x7c, 0x25b: 0x7d, 0x25c: 0x7e, 0x25e: 0x7f, + 0x260: 0x80, 0x261: 0x81, 0x263: 0x82, 0x264: 0x83, 0x265: 0x84, 0x266: 0x85, 0x267: 0x86, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26d: 0x8b, 0x26f: 0x8c, + // Block 0xa, offset 0x280 + 0x2ac: 0x8d, 0x2ad: 0x8e, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8f, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x90, + 0x2b8: 0x91, 0x2b9: 0x92, 0x2ba: 0x0e, 0x2bb: 0x93, 0x2bc: 0x94, 0x2bd: 0x95, 0x2bf: 0x96, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x97, 0x2c5: 0x56, 0x2c6: 0x98, 0x2c7: 0x99, + 0x2cb: 0x9a, 0x2cd: 0x9b, + 0x2e0: 0x9c, 0x2e1: 0x9c, 0x2e2: 0x9c, 0x2e3: 0x9c, 0x2e4: 0x9d, 0x2e5: 0x9c, 0x2e6: 0x9c, 0x2e7: 0x9c, + 0x2e8: 0x9e, 0x2e9: 0x9c, 0x2ea: 0x9c, 0x2eb: 0x9f, 0x2ec: 0xa0, 0x2ed: 0x9c, 0x2ee: 0x9c, 0x2ef: 0x9c, + 0x2f0: 0x9c, 0x2f1: 0x9c, 0x2f2: 0x9c, 0x2f3: 0x9c, 0x2f4: 0xa1, 0x2f5: 0x9c, 0x2f6: 0x9c, 0x2f7: 0x9c, + 0x2f8: 0x9c, 0x2f9: 0xa2, 0x2fa: 0xa3, 0x2fb: 0x9c, 0x2fc: 0xa4, 0x2fd: 0xa5, 0x2fe: 0x9c, 0x2ff: 0x9c, + // Block 0xc, offset 0x300 + 0x300: 0xa6, 0x301: 0xa7, 0x302: 0xa8, 0x304: 0xa9, 0x305: 0xaa, 0x306: 0xab, 0x307: 0xac, + 0x308: 0xad, 0x30b: 0xae, 0x30c: 0x26, 0x30d: 0xaf, + 0x310: 0xb0, 0x311: 0xb1, 0x312: 0xb2, 0x313: 0xb3, 0x316: 0xb4, 0x317: 0xb5, + 0x318: 0xb6, 0x319: 0xb7, 0x31a: 0xb8, 0x31c: 0xb9, + 0x320: 0xba, 0x324: 0xbb, 0x325: 0xbc, 0x327: 0xbd, + 0x328: 0xbe, 0x329: 0xbf, 0x32a: 0xc0, + 0x330: 0xc1, 0x332: 0xc2, 0x334: 0xc3, 0x335: 0xc4, 0x336: 0xc5, + 0x33b: 0xc6, 0x33f: 0xc7, + // Block 0xd, offset 0x340 + 0x36b: 0xc8, 0x36c: 0xc9, + 0x37d: 0xca, 0x37e: 0xcb, 0x37f: 0xcc, + // Block 0xe, offset 0x380 + 0x3b2: 0xcd, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xce, 0x3c6: 0xcf, + 0x3c8: 0x56, 0x3c9: 0xd0, 0x3cc: 0x56, 0x3cd: 0xd1, + 0x3db: 0xd2, 0x3dc: 0xd3, 0x3dd: 0xd4, 0x3de: 0xd5, 0x3df: 0xd6, + 0x3e8: 0xd7, 0x3e9: 0xd8, 0x3ea: 0xd9, + // Block 0x10, offset 0x400 + 0x400: 0xda, 0x404: 0xc9, + 0x40b: 0xdb, + 0x420: 0x9c, 0x421: 0x9c, 0x422: 0x9c, 0x423: 0xdc, 0x424: 0x9c, 0x425: 0xdd, 0x426: 0x9c, 0x427: 0x9c, + 0x428: 0x9c, 0x429: 0x9c, 0x42a: 0x9c, 0x42b: 0x9c, 0x42c: 0x9c, 0x42d: 0x9c, 0x42e: 0x9c, 0x42f: 0x9c, + 0x430: 0x9c, 0x431: 0xa4, 0x432: 0x0e, 0x433: 0x9c, 0x434: 0x0e, 0x435: 0xde, 0x436: 0x9c, 0x437: 0x9c, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xdf, 0x43c: 0x9c, 0x43d: 0x9c, 0x43e: 0x9c, 0x43f: 0x9c, + // Block 0x11, offset 0x440 + 0x440: 0xe0, 0x441: 0x56, 0x442: 0xe1, 0x443: 0xe2, 0x444: 0xe3, 0x445: 0xe4, 0x446: 0xe5, + 0x449: 0xe6, 0x44c: 0x56, 0x44d: 0x56, 0x44e: 0x56, 0x44f: 0x56, + 0x450: 0x56, 0x451: 0x56, 0x452: 0x56, 0x453: 0x56, 0x454: 0x56, 0x455: 0x56, 0x456: 0x56, 0x457: 0x56, + 0x458: 0x56, 0x459: 0x56, 0x45a: 0x56, 0x45b: 0xe7, 0x45c: 0x56, 0x45d: 0x6c, 0x45e: 0x56, 0x45f: 0xe8, + 0x460: 0xe9, 0x461: 0xea, 0x462: 0xeb, 0x464: 0x56, 0x465: 0xec, 0x466: 0x56, 0x467: 0xed, + 0x468: 0x56, 0x469: 0xee, 0x46a: 0xef, 0x46b: 0xf0, 0x46c: 0x56, 0x46d: 0x56, 0x46e: 0xf1, 0x46f: 0xf2, + 0x47f: 0xf3, + // Block 0x12, offset 0x480 + 0x4bf: 0xf3, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xf4, 0x541: 0xf4, 0x542: 0xf4, 0x543: 0xf4, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xf5, + 0x548: 0xf4, 0x549: 0xf4, 0x54a: 0xf4, 0x54b: 0xf4, 0x54c: 0xf4, 0x54d: 0xf4, 0x54e: 0xf4, 0x54f: 0xf4, + 0x550: 0xf4, 0x551: 0xf4, 0x552: 0xf4, 0x553: 0xf4, 0x554: 0xf4, 0x555: 0xf4, 0x556: 0xf4, 0x557: 0xf4, + 0x558: 0xf4, 0x559: 0xf4, 0x55a: 0xf4, 0x55b: 0xf4, 0x55c: 0xf4, 0x55d: 0xf4, 0x55e: 0xf4, 0x55f: 0xf4, + 0x560: 0xf4, 0x561: 0xf4, 0x562: 0xf4, 0x563: 0xf4, 0x564: 0xf4, 0x565: 0xf4, 0x566: 0xf4, 0x567: 0xf4, + 0x568: 0xf4, 0x569: 0xf4, 0x56a: 0xf4, 0x56b: 0xf4, 0x56c: 0xf4, 0x56d: 0xf4, 0x56e: 0xf4, 0x56f: 0xf4, + 0x570: 0xf4, 0x571: 0xf4, 0x572: 0xf4, 0x573: 0xf4, 0x574: 0xf4, 0x575: 0xf4, 0x576: 0xf4, 0x577: 0xf4, + 0x578: 0xf4, 0x579: 0xf4, 0x57a: 0xf4, 0x57b: 0xf4, 0x57c: 0xf4, 0x57d: 0xf4, 0x57e: 0xf4, 0x57f: 0xf4, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 17464 bytes (17KiB); checksum: F50EF68C diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index 0ca0193eb..f517fdb20 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index 26fbd55a1..f5a078827 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.10 && !go1.13 // +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index 2c58f09ba..cb7239c43 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build go1.13 && !go1.14 // +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 10f5202c6..11b273300 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,6 +1,7 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.14 +//go:build go1.14 && !go1.16 +// +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go new file mode 100644 index 000000000..96a130d30 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -0,0 +1,7761 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.16 +// +build go1.16 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "13.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [56]uint8{ + 0, 1, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 84, 91, 103, 107, 118, 122, 129, + 130, 132, 202, 214, 216, 218, 220, 222, + 224, 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x1870 + firstCCC = 0x2CAB + endMulti = 0x2F77 + firstLeadingCCC = 0x49C5 + firstCCCZeroExcept = 0x4A8F + firstStarterWithNLead = 0x4AB6 + lastDecomp = 0x4AB8 + maxDecomp = 0x8000 +) + +// decomps: 19128 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x8D, 0x42, 0xCA, 0x90, 0x42, 0xCA, 0x91, 0x42, + 0xCA, 0x92, 0x42, 0xCA, 0x95, 0x42, 0xCA, 0x9D, + 0x42, 0xCA, 0x9F, 0x42, 0xCA, 0xB9, 0x42, 0xCE, + 0x91, 0x42, 0xCE, 0x92, 0x42, 0xCE, 0x93, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x94, 0x42, 0xCE, 0x95, 0x42, 0xCE, 0x96, + 0x42, 0xCE, 0x97, 0x42, 0xCE, 0x98, 0x42, 0xCE, + 0x99, 0x42, 0xCE, 0x9A, 0x42, 0xCE, 0x9B, 0x42, + 0xCE, 0x9C, 0x42, 0xCE, 0x9D, 0x42, 0xCE, 0x9E, + 0x42, 0xCE, 0x9F, 0x42, 0xCE, 0xA0, 0x42, 0xCE, + 0xA1, 0x42, 0xCE, 0xA3, 0x42, 0xCE, 0xA4, 0x42, + 0xCE, 0xA5, 0x42, 0xCE, 0xA6, 0x42, 0xCE, 0xA7, + 0x42, 0xCE, 0xA8, 0x42, 0xCE, 0xA9, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB1, 0x42, 0xCE, 0xB2, 0x42, 0xCE, 0xB3, 0x42, + 0xCE, 0xB4, 0x42, 0xCE, 0xB5, 0x42, 0xCE, 0xB6, + 0x42, 0xCE, 0xB7, 0x42, 0xCE, 0xB8, 0x42, 0xCE, + 0xB9, 0x42, 0xCE, 0xBA, 0x42, 0xCE, 0xBB, 0x42, + 0xCE, 0xBC, 0x42, 0xCE, 0xBD, 0x42, 0xCE, 0xBE, + 0x42, 0xCE, 0xBF, 0x42, 0xCF, 0x80, 0x42, 0xCF, + 0x81, 0x42, 0xCF, 0x82, 0x42, 0xCF, 0x83, 0x42, + 0xCF, 0x84, 0x42, 0xCF, 0x85, 0x42, 0xCF, 0x86, + // Bytes 200 - 23f + 0x42, 0xCF, 0x87, 0x42, 0xCF, 0x88, 0x42, 0xCF, + 0x89, 0x42, 0xCF, 0x9C, 0x42, 0xCF, 0x9D, 0x42, + 0xD0, 0xBD, 0x42, 0xD1, 0x8A, 0x42, 0xD1, 0x8C, + 0x42, 0xD7, 0x90, 0x42, 0xD7, 0x91, 0x42, 0xD7, + 0x92, 0x42, 0xD7, 0x93, 0x42, 0xD7, 0x94, 0x42, + 0xD7, 0x9B, 0x42, 0xD7, 0x9C, 0x42, 0xD7, 0x9D, + 0x42, 0xD7, 0xA2, 0x42, 0xD7, 0xA8, 0x42, 0xD7, + 0xAA, 0x42, 0xD8, 0xA1, 0x42, 0xD8, 0xA7, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA8, 0x42, 0xD8, 0xA9, 0x42, 0xD8, 0xAA, + 0x42, 0xD8, 0xAB, 0x42, 0xD8, 0xAC, 0x42, 0xD8, + 0xAD, 0x42, 0xD8, 0xAE, 0x42, 0xD8, 0xAF, 0x42, + 0xD8, 0xB0, 0x42, 0xD8, 0xB1, 0x42, 0xD8, 0xB2, + 0x42, 0xD8, 0xB3, 0x42, 0xD8, 0xB4, 0x42, 0xD8, + 0xB5, 0x42, 0xD8, 0xB6, 0x42, 0xD8, 0xB7, 0x42, + 0xD8, 0xB8, 0x42, 0xD8, 0xB9, 0x42, 0xD8, 0xBA, + 0x42, 0xD9, 0x81, 0x42, 0xD9, 0x82, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x83, 0x42, 0xD9, 0x84, 0x42, 0xD9, 0x85, 0x42, + 0xD9, 0x86, 0x42, 0xD9, 0x87, 0x42, 0xD9, 0x88, + 0x42, 0xD9, 0x89, 0x42, 0xD9, 0x8A, 0x42, 0xD9, + 0xAE, 0x42, 0xD9, 0xAF, 0x42, 0xD9, 0xB1, 0x42, + 0xD9, 0xB9, 0x42, 0xD9, 0xBA, 0x42, 0xD9, 0xBB, + 0x42, 0xD9, 0xBE, 0x42, 0xD9, 0xBF, 0x42, 0xDA, + 0x80, 0x42, 0xDA, 0x83, 0x42, 0xDA, 0x84, 0x42, + 0xDA, 0x86, 0x42, 0xDA, 0x87, 0x42, 0xDA, 0x88, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8C, 0x42, 0xDA, 0x8D, 0x42, 0xDA, + 0x8E, 0x42, 0xDA, 0x91, 0x42, 0xDA, 0x98, 0x42, + 0xDA, 0xA1, 0x42, 0xDA, 0xA4, 0x42, 0xDA, 0xA6, + 0x42, 0xDA, 0xA9, 0x42, 0xDA, 0xAD, 0x42, 0xDA, + 0xAF, 0x42, 0xDA, 0xB1, 0x42, 0xDA, 0xB3, 0x42, + 0xDA, 0xBA, 0x42, 0xDA, 0xBB, 0x42, 0xDA, 0xBE, + 0x42, 0xDB, 0x81, 0x42, 0xDB, 0x85, 0x42, 0xDB, + 0x86, 0x42, 0xDB, 0x87, 0x42, 0xDB, 0x88, 0x42, + // Bytes 300 - 33f + 0xDB, 0x89, 0x42, 0xDB, 0x8B, 0x42, 0xDB, 0x8C, + 0x42, 0xDB, 0x90, 0x42, 0xDB, 0x92, 0x43, 0xE0, + 0xBC, 0x8B, 0x43, 0xE1, 0x83, 0x9C, 0x43, 0xE1, + 0x84, 0x80, 0x43, 0xE1, 0x84, 0x81, 0x43, 0xE1, + 0x84, 0x82, 0x43, 0xE1, 0x84, 0x83, 0x43, 0xE1, + 0x84, 0x84, 0x43, 0xE1, 0x84, 0x85, 0x43, 0xE1, + 0x84, 0x86, 0x43, 0xE1, 0x84, 0x87, 0x43, 0xE1, + 0x84, 0x88, 0x43, 0xE1, 0x84, 0x89, 0x43, 0xE1, + // Bytes 340 - 37f + 0x84, 0x8A, 0x43, 0xE1, 0x84, 0x8B, 0x43, 0xE1, + 0x84, 0x8C, 0x43, 0xE1, 0x84, 0x8D, 0x43, 0xE1, + 0x84, 0x8E, 0x43, 0xE1, 0x84, 0x8F, 0x43, 0xE1, + 0x84, 0x90, 0x43, 0xE1, 0x84, 0x91, 0x43, 0xE1, + 0x84, 0x92, 0x43, 0xE1, 0x84, 0x94, 0x43, 0xE1, + 0x84, 0x95, 0x43, 0xE1, 0x84, 0x9A, 0x43, 0xE1, + 0x84, 0x9C, 0x43, 0xE1, 0x84, 0x9D, 0x43, 0xE1, + 0x84, 0x9E, 0x43, 0xE1, 0x84, 0xA0, 0x43, 0xE1, + // Bytes 380 - 3bf + 0x84, 0xA1, 0x43, 0xE1, 0x84, 0xA2, 0x43, 0xE1, + 0x84, 0xA3, 0x43, 0xE1, 0x84, 0xA7, 0x43, 0xE1, + 0x84, 0xA9, 0x43, 0xE1, 0x84, 0xAB, 0x43, 0xE1, + 0x84, 0xAC, 0x43, 0xE1, 0x84, 0xAD, 0x43, 0xE1, + 0x84, 0xAE, 0x43, 0xE1, 0x84, 0xAF, 0x43, 0xE1, + 0x84, 0xB2, 0x43, 0xE1, 0x84, 0xB6, 0x43, 0xE1, + 0x85, 0x80, 0x43, 0xE1, 0x85, 0x87, 0x43, 0xE1, + 0x85, 0x8C, 0x43, 0xE1, 0x85, 0x97, 0x43, 0xE1, + // Bytes 3c0 - 3ff + 0x85, 0x98, 0x43, 0xE1, 0x85, 0x99, 0x43, 0xE1, + 0x85, 0xA0, 0x43, 0xE1, 0x86, 0x84, 0x43, 0xE1, + 0x86, 0x85, 0x43, 0xE1, 0x86, 0x88, 0x43, 0xE1, + 0x86, 0x91, 0x43, 0xE1, 0x86, 0x92, 0x43, 0xE1, + 0x86, 0x94, 0x43, 0xE1, 0x86, 0x9E, 0x43, 0xE1, + 0x86, 0xA1, 0x43, 0xE1, 0x87, 0x87, 0x43, 0xE1, + 0x87, 0x88, 0x43, 0xE1, 0x87, 0x8C, 0x43, 0xE1, + 0x87, 0x8E, 0x43, 0xE1, 0x87, 0x93, 0x43, 0xE1, + // Bytes 400 - 43f + 0x87, 0x97, 0x43, 0xE1, 0x87, 0x99, 0x43, 0xE1, + 0x87, 0x9D, 0x43, 0xE1, 0x87, 0x9F, 0x43, 0xE1, + 0x87, 0xB1, 0x43, 0xE1, 0x87, 0xB2, 0x43, 0xE1, + 0xB4, 0x82, 0x43, 0xE1, 0xB4, 0x96, 0x43, 0xE1, + 0xB4, 0x97, 0x43, 0xE1, 0xB4, 0x9C, 0x43, 0xE1, + 0xB4, 0x9D, 0x43, 0xE1, 0xB4, 0xA5, 0x43, 0xE1, + 0xB5, 0xBB, 0x43, 0xE1, 0xB6, 0x85, 0x43, 0xE2, + 0x80, 0x82, 0x43, 0xE2, 0x80, 0x83, 0x43, 0xE2, + // Bytes 440 - 47f + 0x80, 0x90, 0x43, 0xE2, 0x80, 0x93, 0x43, 0xE2, + 0x80, 0x94, 0x43, 0xE2, 0x82, 0xA9, 0x43, 0xE2, + 0x86, 0x90, 0x43, 0xE2, 0x86, 0x91, 0x43, 0xE2, + 0x86, 0x92, 0x43, 0xE2, 0x86, 0x93, 0x43, 0xE2, + 0x88, 0x82, 0x43, 0xE2, 0x88, 0x87, 0x43, 0xE2, + 0x88, 0x91, 0x43, 0xE2, 0x88, 0x92, 0x43, 0xE2, + 0x94, 0x82, 0x43, 0xE2, 0x96, 0xA0, 0x43, 0xE2, + 0x97, 0x8B, 0x43, 0xE2, 0xA6, 0x85, 0x43, 0xE2, + // Bytes 480 - 4bf + 0xA6, 0x86, 0x43, 0xE2, 0xB5, 0xA1, 0x43, 0xE3, + 0x80, 0x81, 0x43, 0xE3, 0x80, 0x82, 0x43, 0xE3, + 0x80, 0x88, 0x43, 0xE3, 0x80, 0x89, 0x43, 0xE3, + 0x80, 0x8A, 0x43, 0xE3, 0x80, 0x8B, 0x43, 0xE3, + 0x80, 0x8C, 0x43, 0xE3, 0x80, 0x8D, 0x43, 0xE3, + 0x80, 0x8E, 0x43, 0xE3, 0x80, 0x8F, 0x43, 0xE3, + 0x80, 0x90, 0x43, 0xE3, 0x80, 0x91, 0x43, 0xE3, + 0x80, 0x92, 0x43, 0xE3, 0x80, 0x94, 0x43, 0xE3, + // Bytes 4c0 - 4ff + 0x80, 0x95, 0x43, 0xE3, 0x80, 0x96, 0x43, 0xE3, + 0x80, 0x97, 0x43, 0xE3, 0x82, 0xA1, 0x43, 0xE3, + 0x82, 0xA2, 0x43, 0xE3, 0x82, 0xA3, 0x43, 0xE3, + 0x82, 0xA4, 0x43, 0xE3, 0x82, 0xA5, 0x43, 0xE3, + 0x82, 0xA6, 0x43, 0xE3, 0x82, 0xA7, 0x43, 0xE3, + 0x82, 0xA8, 0x43, 0xE3, 0x82, 0xA9, 0x43, 0xE3, + 0x82, 0xAA, 0x43, 0xE3, 0x82, 0xAB, 0x43, 0xE3, + 0x82, 0xAD, 0x43, 0xE3, 0x82, 0xAF, 0x43, 0xE3, + // Bytes 500 - 53f + 0x82, 0xB1, 0x43, 0xE3, 0x82, 0xB3, 0x43, 0xE3, + 0x82, 0xB5, 0x43, 0xE3, 0x82, 0xB7, 0x43, 0xE3, + 0x82, 0xB9, 0x43, 0xE3, 0x82, 0xBB, 0x43, 0xE3, + 0x82, 0xBD, 0x43, 0xE3, 0x82, 0xBF, 0x43, 0xE3, + 0x83, 0x81, 0x43, 0xE3, 0x83, 0x83, 0x43, 0xE3, + 0x83, 0x84, 0x43, 0xE3, 0x83, 0x86, 0x43, 0xE3, + 0x83, 0x88, 0x43, 0xE3, 0x83, 0x8A, 0x43, 0xE3, + 0x83, 0x8B, 0x43, 0xE3, 0x83, 0x8C, 0x43, 0xE3, + // Bytes 540 - 57f + 0x83, 0x8D, 0x43, 0xE3, 0x83, 0x8E, 0x43, 0xE3, + 0x83, 0x8F, 0x43, 0xE3, 0x83, 0x92, 0x43, 0xE3, + 0x83, 0x95, 0x43, 0xE3, 0x83, 0x98, 0x43, 0xE3, + 0x83, 0x9B, 0x43, 0xE3, 0x83, 0x9E, 0x43, 0xE3, + 0x83, 0x9F, 0x43, 0xE3, 0x83, 0xA0, 0x43, 0xE3, + 0x83, 0xA1, 0x43, 0xE3, 0x83, 0xA2, 0x43, 0xE3, + 0x83, 0xA3, 0x43, 0xE3, 0x83, 0xA4, 0x43, 0xE3, + 0x83, 0xA5, 0x43, 0xE3, 0x83, 0xA6, 0x43, 0xE3, + // Bytes 580 - 5bf + 0x83, 0xA7, 0x43, 0xE3, 0x83, 0xA8, 0x43, 0xE3, + 0x83, 0xA9, 0x43, 0xE3, 0x83, 0xAA, 0x43, 0xE3, + 0x83, 0xAB, 0x43, 0xE3, 0x83, 0xAC, 0x43, 0xE3, + 0x83, 0xAD, 0x43, 0xE3, 0x83, 0xAF, 0x43, 0xE3, + 0x83, 0xB0, 0x43, 0xE3, 0x83, 0xB1, 0x43, 0xE3, + 0x83, 0xB2, 0x43, 0xE3, 0x83, 0xB3, 0x43, 0xE3, + 0x83, 0xBB, 0x43, 0xE3, 0x83, 0xBC, 0x43, 0xE3, + 0x92, 0x9E, 0x43, 0xE3, 0x92, 0xB9, 0x43, 0xE3, + // Bytes 5c0 - 5ff + 0x92, 0xBB, 0x43, 0xE3, 0x93, 0x9F, 0x43, 0xE3, + 0x94, 0x95, 0x43, 0xE3, 0x9B, 0xAE, 0x43, 0xE3, + 0x9B, 0xBC, 0x43, 0xE3, 0x9E, 0x81, 0x43, 0xE3, + 0xA0, 0xAF, 0x43, 0xE3, 0xA1, 0xA2, 0x43, 0xE3, + 0xA1, 0xBC, 0x43, 0xE3, 0xA3, 0x87, 0x43, 0xE3, + 0xA3, 0xA3, 0x43, 0xE3, 0xA4, 0x9C, 0x43, 0xE3, + 0xA4, 0xBA, 0x43, 0xE3, 0xA8, 0xAE, 0x43, 0xE3, + 0xA9, 0xAC, 0x43, 0xE3, 0xAB, 0xA4, 0x43, 0xE3, + // Bytes 600 - 63f + 0xAC, 0x88, 0x43, 0xE3, 0xAC, 0x99, 0x43, 0xE3, + 0xAD, 0x89, 0x43, 0xE3, 0xAE, 0x9D, 0x43, 0xE3, + 0xB0, 0x98, 0x43, 0xE3, 0xB1, 0x8E, 0x43, 0xE3, + 0xB4, 0xB3, 0x43, 0xE3, 0xB6, 0x96, 0x43, 0xE3, + 0xBA, 0xAC, 0x43, 0xE3, 0xBA, 0xB8, 0x43, 0xE3, + 0xBC, 0x9B, 0x43, 0xE3, 0xBF, 0xBC, 0x43, 0xE4, + 0x80, 0x88, 0x43, 0xE4, 0x80, 0x98, 0x43, 0xE4, + 0x80, 0xB9, 0x43, 0xE4, 0x81, 0x86, 0x43, 0xE4, + // Bytes 640 - 67f + 0x82, 0x96, 0x43, 0xE4, 0x83, 0xA3, 0x43, 0xE4, + 0x84, 0xAF, 0x43, 0xE4, 0x88, 0x82, 0x43, 0xE4, + 0x88, 0xA7, 0x43, 0xE4, 0x8A, 0xA0, 0x43, 0xE4, + 0x8C, 0x81, 0x43, 0xE4, 0x8C, 0xB4, 0x43, 0xE4, + 0x8D, 0x99, 0x43, 0xE4, 0x8F, 0x95, 0x43, 0xE4, + 0x8F, 0x99, 0x43, 0xE4, 0x90, 0x8B, 0x43, 0xE4, + 0x91, 0xAB, 0x43, 0xE4, 0x94, 0xAB, 0x43, 0xE4, + 0x95, 0x9D, 0x43, 0xE4, 0x95, 0xA1, 0x43, 0xE4, + // Bytes 680 - 6bf + 0x95, 0xAB, 0x43, 0xE4, 0x97, 0x97, 0x43, 0xE4, + 0x97, 0xB9, 0x43, 0xE4, 0x98, 0xB5, 0x43, 0xE4, + 0x9A, 0xBE, 0x43, 0xE4, 0x9B, 0x87, 0x43, 0xE4, + 0xA6, 0x95, 0x43, 0xE4, 0xA7, 0xA6, 0x43, 0xE4, + 0xA9, 0xAE, 0x43, 0xE4, 0xA9, 0xB6, 0x43, 0xE4, + 0xAA, 0xB2, 0x43, 0xE4, 0xAC, 0xB3, 0x43, 0xE4, + 0xAF, 0x8E, 0x43, 0xE4, 0xB3, 0x8E, 0x43, 0xE4, + 0xB3, 0xAD, 0x43, 0xE4, 0xB3, 0xB8, 0x43, 0xE4, + // Bytes 6c0 - 6ff + 0xB5, 0x96, 0x43, 0xE4, 0xB8, 0x80, 0x43, 0xE4, + 0xB8, 0x81, 0x43, 0xE4, 0xB8, 0x83, 0x43, 0xE4, + 0xB8, 0x89, 0x43, 0xE4, 0xB8, 0x8A, 0x43, 0xE4, + 0xB8, 0x8B, 0x43, 0xE4, 0xB8, 0x8D, 0x43, 0xE4, + 0xB8, 0x99, 0x43, 0xE4, 0xB8, 0xA6, 0x43, 0xE4, + 0xB8, 0xA8, 0x43, 0xE4, 0xB8, 0xAD, 0x43, 0xE4, + 0xB8, 0xB2, 0x43, 0xE4, 0xB8, 0xB6, 0x43, 0xE4, + 0xB8, 0xB8, 0x43, 0xE4, 0xB8, 0xB9, 0x43, 0xE4, + // Bytes 700 - 73f + 0xB8, 0xBD, 0x43, 0xE4, 0xB8, 0xBF, 0x43, 0xE4, + 0xB9, 0x81, 0x43, 0xE4, 0xB9, 0x99, 0x43, 0xE4, + 0xB9, 0x9D, 0x43, 0xE4, 0xBA, 0x82, 0x43, 0xE4, + 0xBA, 0x85, 0x43, 0xE4, 0xBA, 0x86, 0x43, 0xE4, + 0xBA, 0x8C, 0x43, 0xE4, 0xBA, 0x94, 0x43, 0xE4, + 0xBA, 0xA0, 0x43, 0xE4, 0xBA, 0xA4, 0x43, 0xE4, + 0xBA, 0xAE, 0x43, 0xE4, 0xBA, 0xBA, 0x43, 0xE4, + 0xBB, 0x80, 0x43, 0xE4, 0xBB, 0x8C, 0x43, 0xE4, + // Bytes 740 - 77f + 0xBB, 0xA4, 0x43, 0xE4, 0xBC, 0x81, 0x43, 0xE4, + 0xBC, 0x91, 0x43, 0xE4, 0xBD, 0xA0, 0x43, 0xE4, + 0xBE, 0x80, 0x43, 0xE4, 0xBE, 0x86, 0x43, 0xE4, + 0xBE, 0x8B, 0x43, 0xE4, 0xBE, 0xAE, 0x43, 0xE4, + 0xBE, 0xBB, 0x43, 0xE4, 0xBE, 0xBF, 0x43, 0xE5, + 0x80, 0x82, 0x43, 0xE5, 0x80, 0xAB, 0x43, 0xE5, + 0x81, 0xBA, 0x43, 0xE5, 0x82, 0x99, 0x43, 0xE5, + 0x83, 0x8F, 0x43, 0xE5, 0x83, 0x9A, 0x43, 0xE5, + // Bytes 780 - 7bf + 0x83, 0xA7, 0x43, 0xE5, 0x84, 0xAA, 0x43, 0xE5, + 0x84, 0xBF, 0x43, 0xE5, 0x85, 0x80, 0x43, 0xE5, + 0x85, 0x85, 0x43, 0xE5, 0x85, 0x8D, 0x43, 0xE5, + 0x85, 0x94, 0x43, 0xE5, 0x85, 0xA4, 0x43, 0xE5, + 0x85, 0xA5, 0x43, 0xE5, 0x85, 0xA7, 0x43, 0xE5, + 0x85, 0xA8, 0x43, 0xE5, 0x85, 0xA9, 0x43, 0xE5, + 0x85, 0xAB, 0x43, 0xE5, 0x85, 0xAD, 0x43, 0xE5, + 0x85, 0xB7, 0x43, 0xE5, 0x86, 0x80, 0x43, 0xE5, + // Bytes 7c0 - 7ff + 0x86, 0x82, 0x43, 0xE5, 0x86, 0x8D, 0x43, 0xE5, + 0x86, 0x92, 0x43, 0xE5, 0x86, 0x95, 0x43, 0xE5, + 0x86, 0x96, 0x43, 0xE5, 0x86, 0x97, 0x43, 0xE5, + 0x86, 0x99, 0x43, 0xE5, 0x86, 0xA4, 0x43, 0xE5, + 0x86, 0xAB, 0x43, 0xE5, 0x86, 0xAC, 0x43, 0xE5, + 0x86, 0xB5, 0x43, 0xE5, 0x86, 0xB7, 0x43, 0xE5, + 0x87, 0x89, 0x43, 0xE5, 0x87, 0x8C, 0x43, 0xE5, + 0x87, 0x9C, 0x43, 0xE5, 0x87, 0x9E, 0x43, 0xE5, + // Bytes 800 - 83f + 0x87, 0xA0, 0x43, 0xE5, 0x87, 0xB5, 0x43, 0xE5, + 0x88, 0x80, 0x43, 0xE5, 0x88, 0x83, 0x43, 0xE5, + 0x88, 0x87, 0x43, 0xE5, 0x88, 0x97, 0x43, 0xE5, + 0x88, 0x9D, 0x43, 0xE5, 0x88, 0xA9, 0x43, 0xE5, + 0x88, 0xBA, 0x43, 0xE5, 0x88, 0xBB, 0x43, 0xE5, + 0x89, 0x86, 0x43, 0xE5, 0x89, 0x8D, 0x43, 0xE5, + 0x89, 0xB2, 0x43, 0xE5, 0x89, 0xB7, 0x43, 0xE5, + 0x8A, 0x89, 0x43, 0xE5, 0x8A, 0x9B, 0x43, 0xE5, + // Bytes 840 - 87f + 0x8A, 0xA3, 0x43, 0xE5, 0x8A, 0xB3, 0x43, 0xE5, + 0x8A, 0xB4, 0x43, 0xE5, 0x8B, 0x87, 0x43, 0xE5, + 0x8B, 0x89, 0x43, 0xE5, 0x8B, 0x92, 0x43, 0xE5, + 0x8B, 0x9E, 0x43, 0xE5, 0x8B, 0xA4, 0x43, 0xE5, + 0x8B, 0xB5, 0x43, 0xE5, 0x8B, 0xB9, 0x43, 0xE5, + 0x8B, 0xBA, 0x43, 0xE5, 0x8C, 0x85, 0x43, 0xE5, + 0x8C, 0x86, 0x43, 0xE5, 0x8C, 0x95, 0x43, 0xE5, + 0x8C, 0x97, 0x43, 0xE5, 0x8C, 0x9A, 0x43, 0xE5, + // Bytes 880 - 8bf + 0x8C, 0xB8, 0x43, 0xE5, 0x8C, 0xBB, 0x43, 0xE5, + 0x8C, 0xBF, 0x43, 0xE5, 0x8D, 0x81, 0x43, 0xE5, + 0x8D, 0x84, 0x43, 0xE5, 0x8D, 0x85, 0x43, 0xE5, + 0x8D, 0x89, 0x43, 0xE5, 0x8D, 0x91, 0x43, 0xE5, + 0x8D, 0x94, 0x43, 0xE5, 0x8D, 0x9A, 0x43, 0xE5, + 0x8D, 0x9C, 0x43, 0xE5, 0x8D, 0xA9, 0x43, 0xE5, + 0x8D, 0xB0, 0x43, 0xE5, 0x8D, 0xB3, 0x43, 0xE5, + 0x8D, 0xB5, 0x43, 0xE5, 0x8D, 0xBD, 0x43, 0xE5, + // Bytes 8c0 - 8ff + 0x8D, 0xBF, 0x43, 0xE5, 0x8E, 0x82, 0x43, 0xE5, + 0x8E, 0xB6, 0x43, 0xE5, 0x8F, 0x83, 0x43, 0xE5, + 0x8F, 0x88, 0x43, 0xE5, 0x8F, 0x8A, 0x43, 0xE5, + 0x8F, 0x8C, 0x43, 0xE5, 0x8F, 0x9F, 0x43, 0xE5, + 0x8F, 0xA3, 0x43, 0xE5, 0x8F, 0xA5, 0x43, 0xE5, + 0x8F, 0xAB, 0x43, 0xE5, 0x8F, 0xAF, 0x43, 0xE5, + 0x8F, 0xB1, 0x43, 0xE5, 0x8F, 0xB3, 0x43, 0xE5, + 0x90, 0x86, 0x43, 0xE5, 0x90, 0x88, 0x43, 0xE5, + // Bytes 900 - 93f + 0x90, 0x8D, 0x43, 0xE5, 0x90, 0x8F, 0x43, 0xE5, + 0x90, 0x9D, 0x43, 0xE5, 0x90, 0xB8, 0x43, 0xE5, + 0x90, 0xB9, 0x43, 0xE5, 0x91, 0x82, 0x43, 0xE5, + 0x91, 0x88, 0x43, 0xE5, 0x91, 0xA8, 0x43, 0xE5, + 0x92, 0x9E, 0x43, 0xE5, 0x92, 0xA2, 0x43, 0xE5, + 0x92, 0xBD, 0x43, 0xE5, 0x93, 0xB6, 0x43, 0xE5, + 0x94, 0x90, 0x43, 0xE5, 0x95, 0x8F, 0x43, 0xE5, + 0x95, 0x93, 0x43, 0xE5, 0x95, 0x95, 0x43, 0xE5, + // Bytes 940 - 97f + 0x95, 0xA3, 0x43, 0xE5, 0x96, 0x84, 0x43, 0xE5, + 0x96, 0x87, 0x43, 0xE5, 0x96, 0x99, 0x43, 0xE5, + 0x96, 0x9D, 0x43, 0xE5, 0x96, 0xAB, 0x43, 0xE5, + 0x96, 0xB3, 0x43, 0xE5, 0x96, 0xB6, 0x43, 0xE5, + 0x97, 0x80, 0x43, 0xE5, 0x97, 0x82, 0x43, 0xE5, + 0x97, 0xA2, 0x43, 0xE5, 0x98, 0x86, 0x43, 0xE5, + 0x99, 0x91, 0x43, 0xE5, 0x99, 0xA8, 0x43, 0xE5, + 0x99, 0xB4, 0x43, 0xE5, 0x9B, 0x97, 0x43, 0xE5, + // Bytes 980 - 9bf + 0x9B, 0x9B, 0x43, 0xE5, 0x9B, 0xB9, 0x43, 0xE5, + 0x9C, 0x96, 0x43, 0xE5, 0x9C, 0x97, 0x43, 0xE5, + 0x9C, 0x9F, 0x43, 0xE5, 0x9C, 0xB0, 0x43, 0xE5, + 0x9E, 0x8B, 0x43, 0xE5, 0x9F, 0x8E, 0x43, 0xE5, + 0x9F, 0xB4, 0x43, 0xE5, 0xA0, 0x8D, 0x43, 0xE5, + 0xA0, 0xB1, 0x43, 0xE5, 0xA0, 0xB2, 0x43, 0xE5, + 0xA1, 0x80, 0x43, 0xE5, 0xA1, 0x9A, 0x43, 0xE5, + 0xA1, 0x9E, 0x43, 0xE5, 0xA2, 0xA8, 0x43, 0xE5, + // Bytes 9c0 - 9ff + 0xA2, 0xAC, 0x43, 0xE5, 0xA2, 0xB3, 0x43, 0xE5, + 0xA3, 0x98, 0x43, 0xE5, 0xA3, 0x9F, 0x43, 0xE5, + 0xA3, 0xAB, 0x43, 0xE5, 0xA3, 0xAE, 0x43, 0xE5, + 0xA3, 0xB0, 0x43, 0xE5, 0xA3, 0xB2, 0x43, 0xE5, + 0xA3, 0xB7, 0x43, 0xE5, 0xA4, 0x82, 0x43, 0xE5, + 0xA4, 0x86, 0x43, 0xE5, 0xA4, 0x8A, 0x43, 0xE5, + 0xA4, 0x95, 0x43, 0xE5, 0xA4, 0x9A, 0x43, 0xE5, + 0xA4, 0x9C, 0x43, 0xE5, 0xA4, 0xA2, 0x43, 0xE5, + // Bytes a00 - a3f + 0xA4, 0xA7, 0x43, 0xE5, 0xA4, 0xA9, 0x43, 0xE5, + 0xA5, 0x84, 0x43, 0xE5, 0xA5, 0x88, 0x43, 0xE5, + 0xA5, 0x91, 0x43, 0xE5, 0xA5, 0x94, 0x43, 0xE5, + 0xA5, 0xA2, 0x43, 0xE5, 0xA5, 0xB3, 0x43, 0xE5, + 0xA7, 0x98, 0x43, 0xE5, 0xA7, 0xAC, 0x43, 0xE5, + 0xA8, 0x9B, 0x43, 0xE5, 0xA8, 0xA7, 0x43, 0xE5, + 0xA9, 0xA2, 0x43, 0xE5, 0xA9, 0xA6, 0x43, 0xE5, + 0xAA, 0xB5, 0x43, 0xE5, 0xAC, 0x88, 0x43, 0xE5, + // Bytes a40 - a7f + 0xAC, 0xA8, 0x43, 0xE5, 0xAC, 0xBE, 0x43, 0xE5, + 0xAD, 0x90, 0x43, 0xE5, 0xAD, 0x97, 0x43, 0xE5, + 0xAD, 0xA6, 0x43, 0xE5, 0xAE, 0x80, 0x43, 0xE5, + 0xAE, 0x85, 0x43, 0xE5, 0xAE, 0x97, 0x43, 0xE5, + 0xAF, 0x83, 0x43, 0xE5, 0xAF, 0x98, 0x43, 0xE5, + 0xAF, 0xA7, 0x43, 0xE5, 0xAF, 0xAE, 0x43, 0xE5, + 0xAF, 0xB3, 0x43, 0xE5, 0xAF, 0xB8, 0x43, 0xE5, + 0xAF, 0xBF, 0x43, 0xE5, 0xB0, 0x86, 0x43, 0xE5, + // Bytes a80 - abf + 0xB0, 0x8F, 0x43, 0xE5, 0xB0, 0xA2, 0x43, 0xE5, + 0xB0, 0xB8, 0x43, 0xE5, 0xB0, 0xBF, 0x43, 0xE5, + 0xB1, 0xA0, 0x43, 0xE5, 0xB1, 0xA2, 0x43, 0xE5, + 0xB1, 0xA4, 0x43, 0xE5, 0xB1, 0xA5, 0x43, 0xE5, + 0xB1, 0xAE, 0x43, 0xE5, 0xB1, 0xB1, 0x43, 0xE5, + 0xB2, 0x8D, 0x43, 0xE5, 0xB3, 0x80, 0x43, 0xE5, + 0xB4, 0x99, 0x43, 0xE5, 0xB5, 0x83, 0x43, 0xE5, + 0xB5, 0x90, 0x43, 0xE5, 0xB5, 0xAB, 0x43, 0xE5, + // Bytes ac0 - aff + 0xB5, 0xAE, 0x43, 0xE5, 0xB5, 0xBC, 0x43, 0xE5, + 0xB6, 0xB2, 0x43, 0xE5, 0xB6, 0xBA, 0x43, 0xE5, + 0xB7, 0x9B, 0x43, 0xE5, 0xB7, 0xA1, 0x43, 0xE5, + 0xB7, 0xA2, 0x43, 0xE5, 0xB7, 0xA5, 0x43, 0xE5, + 0xB7, 0xA6, 0x43, 0xE5, 0xB7, 0xB1, 0x43, 0xE5, + 0xB7, 0xBD, 0x43, 0xE5, 0xB7, 0xBE, 0x43, 0xE5, + 0xB8, 0xA8, 0x43, 0xE5, 0xB8, 0xBD, 0x43, 0xE5, + 0xB9, 0xA9, 0x43, 0xE5, 0xB9, 0xB2, 0x43, 0xE5, + // Bytes b00 - b3f + 0xB9, 0xB4, 0x43, 0xE5, 0xB9, 0xBA, 0x43, 0xE5, + 0xB9, 0xBC, 0x43, 0xE5, 0xB9, 0xBF, 0x43, 0xE5, + 0xBA, 0xA6, 0x43, 0xE5, 0xBA, 0xB0, 0x43, 0xE5, + 0xBA, 0xB3, 0x43, 0xE5, 0xBA, 0xB6, 0x43, 0xE5, + 0xBB, 0x89, 0x43, 0xE5, 0xBB, 0x8A, 0x43, 0xE5, + 0xBB, 0x92, 0x43, 0xE5, 0xBB, 0x93, 0x43, 0xE5, + 0xBB, 0x99, 0x43, 0xE5, 0xBB, 0xAC, 0x43, 0xE5, + 0xBB, 0xB4, 0x43, 0xE5, 0xBB, 0xBE, 0x43, 0xE5, + // Bytes b40 - b7f + 0xBC, 0x84, 0x43, 0xE5, 0xBC, 0x8B, 0x43, 0xE5, + 0xBC, 0x93, 0x43, 0xE5, 0xBC, 0xA2, 0x43, 0xE5, + 0xBD, 0x90, 0x43, 0xE5, 0xBD, 0x93, 0x43, 0xE5, + 0xBD, 0xA1, 0x43, 0xE5, 0xBD, 0xA2, 0x43, 0xE5, + 0xBD, 0xA9, 0x43, 0xE5, 0xBD, 0xAB, 0x43, 0xE5, + 0xBD, 0xB3, 0x43, 0xE5, 0xBE, 0x8B, 0x43, 0xE5, + 0xBE, 0x8C, 0x43, 0xE5, 0xBE, 0x97, 0x43, 0xE5, + 0xBE, 0x9A, 0x43, 0xE5, 0xBE, 0xA9, 0x43, 0xE5, + // Bytes b80 - bbf + 0xBE, 0xAD, 0x43, 0xE5, 0xBF, 0x83, 0x43, 0xE5, + 0xBF, 0x8D, 0x43, 0xE5, 0xBF, 0x97, 0x43, 0xE5, + 0xBF, 0xB5, 0x43, 0xE5, 0xBF, 0xB9, 0x43, 0xE6, + 0x80, 0x92, 0x43, 0xE6, 0x80, 0x9C, 0x43, 0xE6, + 0x81, 0xB5, 0x43, 0xE6, 0x82, 0x81, 0x43, 0xE6, + 0x82, 0x94, 0x43, 0xE6, 0x83, 0x87, 0x43, 0xE6, + 0x83, 0x98, 0x43, 0xE6, 0x83, 0xA1, 0x43, 0xE6, + 0x84, 0x88, 0x43, 0xE6, 0x85, 0x84, 0x43, 0xE6, + // Bytes bc0 - bff + 0x85, 0x88, 0x43, 0xE6, 0x85, 0x8C, 0x43, 0xE6, + 0x85, 0x8E, 0x43, 0xE6, 0x85, 0xA0, 0x43, 0xE6, + 0x85, 0xA8, 0x43, 0xE6, 0x85, 0xBA, 0x43, 0xE6, + 0x86, 0x8E, 0x43, 0xE6, 0x86, 0x90, 0x43, 0xE6, + 0x86, 0xA4, 0x43, 0xE6, 0x86, 0xAF, 0x43, 0xE6, + 0x86, 0xB2, 0x43, 0xE6, 0x87, 0x9E, 0x43, 0xE6, + 0x87, 0xB2, 0x43, 0xE6, 0x87, 0xB6, 0x43, 0xE6, + 0x88, 0x80, 0x43, 0xE6, 0x88, 0x88, 0x43, 0xE6, + // Bytes c00 - c3f + 0x88, 0x90, 0x43, 0xE6, 0x88, 0x9B, 0x43, 0xE6, + 0x88, 0xAE, 0x43, 0xE6, 0x88, 0xB4, 0x43, 0xE6, + 0x88, 0xB6, 0x43, 0xE6, 0x89, 0x8B, 0x43, 0xE6, + 0x89, 0x93, 0x43, 0xE6, 0x89, 0x9D, 0x43, 0xE6, + 0x8A, 0x95, 0x43, 0xE6, 0x8A, 0xB1, 0x43, 0xE6, + 0x8B, 0x89, 0x43, 0xE6, 0x8B, 0x8F, 0x43, 0xE6, + 0x8B, 0x93, 0x43, 0xE6, 0x8B, 0x94, 0x43, 0xE6, + 0x8B, 0xBC, 0x43, 0xE6, 0x8B, 0xBE, 0x43, 0xE6, + // Bytes c40 - c7f + 0x8C, 0x87, 0x43, 0xE6, 0x8C, 0xBD, 0x43, 0xE6, + 0x8D, 0x90, 0x43, 0xE6, 0x8D, 0x95, 0x43, 0xE6, + 0x8D, 0xA8, 0x43, 0xE6, 0x8D, 0xBB, 0x43, 0xE6, + 0x8E, 0x83, 0x43, 0xE6, 0x8E, 0xA0, 0x43, 0xE6, + 0x8E, 0xA9, 0x43, 0xE6, 0x8F, 0x84, 0x43, 0xE6, + 0x8F, 0x85, 0x43, 0xE6, 0x8F, 0xA4, 0x43, 0xE6, + 0x90, 0x9C, 0x43, 0xE6, 0x90, 0xA2, 0x43, 0xE6, + 0x91, 0x92, 0x43, 0xE6, 0x91, 0xA9, 0x43, 0xE6, + // Bytes c80 - cbf + 0x91, 0xB7, 0x43, 0xE6, 0x91, 0xBE, 0x43, 0xE6, + 0x92, 0x9A, 0x43, 0xE6, 0x92, 0x9D, 0x43, 0xE6, + 0x93, 0x84, 0x43, 0xE6, 0x94, 0xAF, 0x43, 0xE6, + 0x94, 0xB4, 0x43, 0xE6, 0x95, 0x8F, 0x43, 0xE6, + 0x95, 0x96, 0x43, 0xE6, 0x95, 0xAC, 0x43, 0xE6, + 0x95, 0xB8, 0x43, 0xE6, 0x96, 0x87, 0x43, 0xE6, + 0x96, 0x97, 0x43, 0xE6, 0x96, 0x99, 0x43, 0xE6, + 0x96, 0xA4, 0x43, 0xE6, 0x96, 0xB0, 0x43, 0xE6, + // Bytes cc0 - cff + 0x96, 0xB9, 0x43, 0xE6, 0x97, 0x85, 0x43, 0xE6, + 0x97, 0xA0, 0x43, 0xE6, 0x97, 0xA2, 0x43, 0xE6, + 0x97, 0xA3, 0x43, 0xE6, 0x97, 0xA5, 0x43, 0xE6, + 0x98, 0x93, 0x43, 0xE6, 0x98, 0xA0, 0x43, 0xE6, + 0x99, 0x89, 0x43, 0xE6, 0x99, 0xB4, 0x43, 0xE6, + 0x9A, 0x88, 0x43, 0xE6, 0x9A, 0x91, 0x43, 0xE6, + 0x9A, 0x9C, 0x43, 0xE6, 0x9A, 0xB4, 0x43, 0xE6, + 0x9B, 0x86, 0x43, 0xE6, 0x9B, 0xB0, 0x43, 0xE6, + // Bytes d00 - d3f + 0x9B, 0xB4, 0x43, 0xE6, 0x9B, 0xB8, 0x43, 0xE6, + 0x9C, 0x80, 0x43, 0xE6, 0x9C, 0x88, 0x43, 0xE6, + 0x9C, 0x89, 0x43, 0xE6, 0x9C, 0x97, 0x43, 0xE6, + 0x9C, 0x9B, 0x43, 0xE6, 0x9C, 0xA1, 0x43, 0xE6, + 0x9C, 0xA8, 0x43, 0xE6, 0x9D, 0x8E, 0x43, 0xE6, + 0x9D, 0x93, 0x43, 0xE6, 0x9D, 0x96, 0x43, 0xE6, + 0x9D, 0x9E, 0x43, 0xE6, 0x9D, 0xBB, 0x43, 0xE6, + 0x9E, 0x85, 0x43, 0xE6, 0x9E, 0x97, 0x43, 0xE6, + // Bytes d40 - d7f + 0x9F, 0xB3, 0x43, 0xE6, 0x9F, 0xBA, 0x43, 0xE6, + 0xA0, 0x97, 0x43, 0xE6, 0xA0, 0x9F, 0x43, 0xE6, + 0xA0, 0xAA, 0x43, 0xE6, 0xA1, 0x92, 0x43, 0xE6, + 0xA2, 0x81, 0x43, 0xE6, 0xA2, 0x85, 0x43, 0xE6, + 0xA2, 0x8E, 0x43, 0xE6, 0xA2, 0xA8, 0x43, 0xE6, + 0xA4, 0x94, 0x43, 0xE6, 0xA5, 0x82, 0x43, 0xE6, + 0xA6, 0xA3, 0x43, 0xE6, 0xA7, 0xAA, 0x43, 0xE6, + 0xA8, 0x82, 0x43, 0xE6, 0xA8, 0x93, 0x43, 0xE6, + // Bytes d80 - dbf + 0xAA, 0xA8, 0x43, 0xE6, 0xAB, 0x93, 0x43, 0xE6, + 0xAB, 0x9B, 0x43, 0xE6, 0xAC, 0x84, 0x43, 0xE6, + 0xAC, 0xA0, 0x43, 0xE6, 0xAC, 0xA1, 0x43, 0xE6, + 0xAD, 0x94, 0x43, 0xE6, 0xAD, 0xA2, 0x43, 0xE6, + 0xAD, 0xA3, 0x43, 0xE6, 0xAD, 0xB2, 0x43, 0xE6, + 0xAD, 0xB7, 0x43, 0xE6, 0xAD, 0xB9, 0x43, 0xE6, + 0xAE, 0x9F, 0x43, 0xE6, 0xAE, 0xAE, 0x43, 0xE6, + 0xAE, 0xB3, 0x43, 0xE6, 0xAE, 0xBA, 0x43, 0xE6, + // Bytes dc0 - dff + 0xAE, 0xBB, 0x43, 0xE6, 0xAF, 0x8B, 0x43, 0xE6, + 0xAF, 0x8D, 0x43, 0xE6, 0xAF, 0x94, 0x43, 0xE6, + 0xAF, 0x9B, 0x43, 0xE6, 0xB0, 0x8F, 0x43, 0xE6, + 0xB0, 0x94, 0x43, 0xE6, 0xB0, 0xB4, 0x43, 0xE6, + 0xB1, 0x8E, 0x43, 0xE6, 0xB1, 0xA7, 0x43, 0xE6, + 0xB2, 0x88, 0x43, 0xE6, 0xB2, 0xBF, 0x43, 0xE6, + 0xB3, 0x8C, 0x43, 0xE6, 0xB3, 0x8D, 0x43, 0xE6, + 0xB3, 0xA5, 0x43, 0xE6, 0xB3, 0xA8, 0x43, 0xE6, + // Bytes e00 - e3f + 0xB4, 0x96, 0x43, 0xE6, 0xB4, 0x9B, 0x43, 0xE6, + 0xB4, 0x9E, 0x43, 0xE6, 0xB4, 0xB4, 0x43, 0xE6, + 0xB4, 0xBE, 0x43, 0xE6, 0xB5, 0x81, 0x43, 0xE6, + 0xB5, 0xA9, 0x43, 0xE6, 0xB5, 0xAA, 0x43, 0xE6, + 0xB5, 0xB7, 0x43, 0xE6, 0xB5, 0xB8, 0x43, 0xE6, + 0xB6, 0x85, 0x43, 0xE6, 0xB7, 0x8B, 0x43, 0xE6, + 0xB7, 0x9A, 0x43, 0xE6, 0xB7, 0xAA, 0x43, 0xE6, + 0xB7, 0xB9, 0x43, 0xE6, 0xB8, 0x9A, 0x43, 0xE6, + // Bytes e40 - e7f + 0xB8, 0xAF, 0x43, 0xE6, 0xB9, 0xAE, 0x43, 0xE6, + 0xBA, 0x80, 0x43, 0xE6, 0xBA, 0x9C, 0x43, 0xE6, + 0xBA, 0xBA, 0x43, 0xE6, 0xBB, 0x87, 0x43, 0xE6, + 0xBB, 0x8B, 0x43, 0xE6, 0xBB, 0x91, 0x43, 0xE6, + 0xBB, 0x9B, 0x43, 0xE6, 0xBC, 0x8F, 0x43, 0xE6, + 0xBC, 0x94, 0x43, 0xE6, 0xBC, 0xA2, 0x43, 0xE6, + 0xBC, 0xA3, 0x43, 0xE6, 0xBD, 0xAE, 0x43, 0xE6, + 0xBF, 0x86, 0x43, 0xE6, 0xBF, 0xAB, 0x43, 0xE6, + // Bytes e80 - ebf + 0xBF, 0xBE, 0x43, 0xE7, 0x80, 0x9B, 0x43, 0xE7, + 0x80, 0x9E, 0x43, 0xE7, 0x80, 0xB9, 0x43, 0xE7, + 0x81, 0x8A, 0x43, 0xE7, 0x81, 0xAB, 0x43, 0xE7, + 0x81, 0xB0, 0x43, 0xE7, 0x81, 0xB7, 0x43, 0xE7, + 0x81, 0xBD, 0x43, 0xE7, 0x82, 0x99, 0x43, 0xE7, + 0x82, 0xAD, 0x43, 0xE7, 0x83, 0x88, 0x43, 0xE7, + 0x83, 0x99, 0x43, 0xE7, 0x84, 0xA1, 0x43, 0xE7, + 0x85, 0x85, 0x43, 0xE7, 0x85, 0x89, 0x43, 0xE7, + // Bytes ec0 - eff + 0x85, 0xAE, 0x43, 0xE7, 0x86, 0x9C, 0x43, 0xE7, + 0x87, 0x8E, 0x43, 0xE7, 0x87, 0x90, 0x43, 0xE7, + 0x88, 0x90, 0x43, 0xE7, 0x88, 0x9B, 0x43, 0xE7, + 0x88, 0xA8, 0x43, 0xE7, 0x88, 0xAA, 0x43, 0xE7, + 0x88, 0xAB, 0x43, 0xE7, 0x88, 0xB5, 0x43, 0xE7, + 0x88, 0xB6, 0x43, 0xE7, 0x88, 0xBB, 0x43, 0xE7, + 0x88, 0xBF, 0x43, 0xE7, 0x89, 0x87, 0x43, 0xE7, + 0x89, 0x90, 0x43, 0xE7, 0x89, 0x99, 0x43, 0xE7, + // Bytes f00 - f3f + 0x89, 0x9B, 0x43, 0xE7, 0x89, 0xA2, 0x43, 0xE7, + 0x89, 0xB9, 0x43, 0xE7, 0x8A, 0x80, 0x43, 0xE7, + 0x8A, 0x95, 0x43, 0xE7, 0x8A, 0xAC, 0x43, 0xE7, + 0x8A, 0xAF, 0x43, 0xE7, 0x8B, 0x80, 0x43, 0xE7, + 0x8B, 0xBC, 0x43, 0xE7, 0x8C, 0xAA, 0x43, 0xE7, + 0x8D, 0xB5, 0x43, 0xE7, 0x8D, 0xBA, 0x43, 0xE7, + 0x8E, 0x84, 0x43, 0xE7, 0x8E, 0x87, 0x43, 0xE7, + 0x8E, 0x89, 0x43, 0xE7, 0x8E, 0x8B, 0x43, 0xE7, + // Bytes f40 - f7f + 0x8E, 0xA5, 0x43, 0xE7, 0x8E, 0xB2, 0x43, 0xE7, + 0x8F, 0x9E, 0x43, 0xE7, 0x90, 0x86, 0x43, 0xE7, + 0x90, 0x89, 0x43, 0xE7, 0x90, 0xA2, 0x43, 0xE7, + 0x91, 0x87, 0x43, 0xE7, 0x91, 0x9C, 0x43, 0xE7, + 0x91, 0xA9, 0x43, 0xE7, 0x91, 0xB1, 0x43, 0xE7, + 0x92, 0x85, 0x43, 0xE7, 0x92, 0x89, 0x43, 0xE7, + 0x92, 0x98, 0x43, 0xE7, 0x93, 0x8A, 0x43, 0xE7, + 0x93, 0x9C, 0x43, 0xE7, 0x93, 0xA6, 0x43, 0xE7, + // Bytes f80 - fbf + 0x94, 0x86, 0x43, 0xE7, 0x94, 0x98, 0x43, 0xE7, + 0x94, 0x9F, 0x43, 0xE7, 0x94, 0xA4, 0x43, 0xE7, + 0x94, 0xA8, 0x43, 0xE7, 0x94, 0xB0, 0x43, 0xE7, + 0x94, 0xB2, 0x43, 0xE7, 0x94, 0xB3, 0x43, 0xE7, + 0x94, 0xB7, 0x43, 0xE7, 0x94, 0xBB, 0x43, 0xE7, + 0x94, 0xBE, 0x43, 0xE7, 0x95, 0x99, 0x43, 0xE7, + 0x95, 0xA5, 0x43, 0xE7, 0x95, 0xB0, 0x43, 0xE7, + 0x96, 0x8B, 0x43, 0xE7, 0x96, 0x92, 0x43, 0xE7, + // Bytes fc0 - fff + 0x97, 0xA2, 0x43, 0xE7, 0x98, 0x90, 0x43, 0xE7, + 0x98, 0x9D, 0x43, 0xE7, 0x98, 0x9F, 0x43, 0xE7, + 0x99, 0x82, 0x43, 0xE7, 0x99, 0xA9, 0x43, 0xE7, + 0x99, 0xB6, 0x43, 0xE7, 0x99, 0xBD, 0x43, 0xE7, + 0x9A, 0xAE, 0x43, 0xE7, 0x9A, 0xBF, 0x43, 0xE7, + 0x9B, 0x8A, 0x43, 0xE7, 0x9B, 0x9B, 0x43, 0xE7, + 0x9B, 0xA3, 0x43, 0xE7, 0x9B, 0xA7, 0x43, 0xE7, + 0x9B, 0xAE, 0x43, 0xE7, 0x9B, 0xB4, 0x43, 0xE7, + // Bytes 1000 - 103f + 0x9C, 0x81, 0x43, 0xE7, 0x9C, 0x9E, 0x43, 0xE7, + 0x9C, 0x9F, 0x43, 0xE7, 0x9D, 0x80, 0x43, 0xE7, + 0x9D, 0x8A, 0x43, 0xE7, 0x9E, 0x8B, 0x43, 0xE7, + 0x9E, 0xA7, 0x43, 0xE7, 0x9F, 0x9B, 0x43, 0xE7, + 0x9F, 0xA2, 0x43, 0xE7, 0x9F, 0xB3, 0x43, 0xE7, + 0xA1, 0x8E, 0x43, 0xE7, 0xA1, 0xAB, 0x43, 0xE7, + 0xA2, 0x8C, 0x43, 0xE7, 0xA2, 0x91, 0x43, 0xE7, + 0xA3, 0x8A, 0x43, 0xE7, 0xA3, 0x8C, 0x43, 0xE7, + // Bytes 1040 - 107f + 0xA3, 0xBB, 0x43, 0xE7, 0xA4, 0xAA, 0x43, 0xE7, + 0xA4, 0xBA, 0x43, 0xE7, 0xA4, 0xBC, 0x43, 0xE7, + 0xA4, 0xBE, 0x43, 0xE7, 0xA5, 0x88, 0x43, 0xE7, + 0xA5, 0x89, 0x43, 0xE7, 0xA5, 0x90, 0x43, 0xE7, + 0xA5, 0x96, 0x43, 0xE7, 0xA5, 0x9D, 0x43, 0xE7, + 0xA5, 0x9E, 0x43, 0xE7, 0xA5, 0xA5, 0x43, 0xE7, + 0xA5, 0xBF, 0x43, 0xE7, 0xA6, 0x81, 0x43, 0xE7, + 0xA6, 0x8D, 0x43, 0xE7, 0xA6, 0x8E, 0x43, 0xE7, + // Bytes 1080 - 10bf + 0xA6, 0x8F, 0x43, 0xE7, 0xA6, 0xAE, 0x43, 0xE7, + 0xA6, 0xB8, 0x43, 0xE7, 0xA6, 0xBE, 0x43, 0xE7, + 0xA7, 0x8A, 0x43, 0xE7, 0xA7, 0x98, 0x43, 0xE7, + 0xA7, 0xAB, 0x43, 0xE7, 0xA8, 0x9C, 0x43, 0xE7, + 0xA9, 0x80, 0x43, 0xE7, 0xA9, 0x8A, 0x43, 0xE7, + 0xA9, 0x8F, 0x43, 0xE7, 0xA9, 0xB4, 0x43, 0xE7, + 0xA9, 0xBA, 0x43, 0xE7, 0xAA, 0x81, 0x43, 0xE7, + 0xAA, 0xB1, 0x43, 0xE7, 0xAB, 0x8B, 0x43, 0xE7, + // Bytes 10c0 - 10ff + 0xAB, 0xAE, 0x43, 0xE7, 0xAB, 0xB9, 0x43, 0xE7, + 0xAC, 0xA0, 0x43, 0xE7, 0xAE, 0x8F, 0x43, 0xE7, + 0xAF, 0x80, 0x43, 0xE7, 0xAF, 0x86, 0x43, 0xE7, + 0xAF, 0x89, 0x43, 0xE7, 0xB0, 0xBE, 0x43, 0xE7, + 0xB1, 0xA0, 0x43, 0xE7, 0xB1, 0xB3, 0x43, 0xE7, + 0xB1, 0xBB, 0x43, 0xE7, 0xB2, 0x92, 0x43, 0xE7, + 0xB2, 0xBE, 0x43, 0xE7, 0xB3, 0x92, 0x43, 0xE7, + 0xB3, 0x96, 0x43, 0xE7, 0xB3, 0xA3, 0x43, 0xE7, + // Bytes 1100 - 113f + 0xB3, 0xA7, 0x43, 0xE7, 0xB3, 0xA8, 0x43, 0xE7, + 0xB3, 0xB8, 0x43, 0xE7, 0xB4, 0x80, 0x43, 0xE7, + 0xB4, 0x90, 0x43, 0xE7, 0xB4, 0xA2, 0x43, 0xE7, + 0xB4, 0xAF, 0x43, 0xE7, 0xB5, 0x82, 0x43, 0xE7, + 0xB5, 0x9B, 0x43, 0xE7, 0xB5, 0xA3, 0x43, 0xE7, + 0xB6, 0xA0, 0x43, 0xE7, 0xB6, 0xBE, 0x43, 0xE7, + 0xB7, 0x87, 0x43, 0xE7, 0xB7, 0xB4, 0x43, 0xE7, + 0xB8, 0x82, 0x43, 0xE7, 0xB8, 0x89, 0x43, 0xE7, + // Bytes 1140 - 117f + 0xB8, 0xB7, 0x43, 0xE7, 0xB9, 0x81, 0x43, 0xE7, + 0xB9, 0x85, 0x43, 0xE7, 0xBC, 0xB6, 0x43, 0xE7, + 0xBC, 0xBE, 0x43, 0xE7, 0xBD, 0x91, 0x43, 0xE7, + 0xBD, 0xB2, 0x43, 0xE7, 0xBD, 0xB9, 0x43, 0xE7, + 0xBD, 0xBA, 0x43, 0xE7, 0xBE, 0x85, 0x43, 0xE7, + 0xBE, 0x8A, 0x43, 0xE7, 0xBE, 0x95, 0x43, 0xE7, + 0xBE, 0x9A, 0x43, 0xE7, 0xBE, 0xBD, 0x43, 0xE7, + 0xBF, 0xBA, 0x43, 0xE8, 0x80, 0x81, 0x43, 0xE8, + // Bytes 1180 - 11bf + 0x80, 0x85, 0x43, 0xE8, 0x80, 0x8C, 0x43, 0xE8, + 0x80, 0x92, 0x43, 0xE8, 0x80, 0xB3, 0x43, 0xE8, + 0x81, 0x86, 0x43, 0xE8, 0x81, 0xA0, 0x43, 0xE8, + 0x81, 0xAF, 0x43, 0xE8, 0x81, 0xB0, 0x43, 0xE8, + 0x81, 0xBE, 0x43, 0xE8, 0x81, 0xBF, 0x43, 0xE8, + 0x82, 0x89, 0x43, 0xE8, 0x82, 0x8B, 0x43, 0xE8, + 0x82, 0xAD, 0x43, 0xE8, 0x82, 0xB2, 0x43, 0xE8, + 0x84, 0x83, 0x43, 0xE8, 0x84, 0xBE, 0x43, 0xE8, + // Bytes 11c0 - 11ff + 0x87, 0x98, 0x43, 0xE8, 0x87, 0xA3, 0x43, 0xE8, + 0x87, 0xA8, 0x43, 0xE8, 0x87, 0xAA, 0x43, 0xE8, + 0x87, 0xAD, 0x43, 0xE8, 0x87, 0xB3, 0x43, 0xE8, + 0x87, 0xBC, 0x43, 0xE8, 0x88, 0x81, 0x43, 0xE8, + 0x88, 0x84, 0x43, 0xE8, 0x88, 0x8C, 0x43, 0xE8, + 0x88, 0x98, 0x43, 0xE8, 0x88, 0x9B, 0x43, 0xE8, + 0x88, 0x9F, 0x43, 0xE8, 0x89, 0xAE, 0x43, 0xE8, + 0x89, 0xAF, 0x43, 0xE8, 0x89, 0xB2, 0x43, 0xE8, + // Bytes 1200 - 123f + 0x89, 0xB8, 0x43, 0xE8, 0x89, 0xB9, 0x43, 0xE8, + 0x8A, 0x8B, 0x43, 0xE8, 0x8A, 0x91, 0x43, 0xE8, + 0x8A, 0x9D, 0x43, 0xE8, 0x8A, 0xB1, 0x43, 0xE8, + 0x8A, 0xB3, 0x43, 0xE8, 0x8A, 0xBD, 0x43, 0xE8, + 0x8B, 0xA5, 0x43, 0xE8, 0x8B, 0xA6, 0x43, 0xE8, + 0x8C, 0x9D, 0x43, 0xE8, 0x8C, 0xA3, 0x43, 0xE8, + 0x8C, 0xB6, 0x43, 0xE8, 0x8D, 0x92, 0x43, 0xE8, + 0x8D, 0x93, 0x43, 0xE8, 0x8D, 0xA3, 0x43, 0xE8, + // Bytes 1240 - 127f + 0x8E, 0xAD, 0x43, 0xE8, 0x8E, 0xBD, 0x43, 0xE8, + 0x8F, 0x89, 0x43, 0xE8, 0x8F, 0x8A, 0x43, 0xE8, + 0x8F, 0x8C, 0x43, 0xE8, 0x8F, 0x9C, 0x43, 0xE8, + 0x8F, 0xA7, 0x43, 0xE8, 0x8F, 0xAF, 0x43, 0xE8, + 0x8F, 0xB1, 0x43, 0xE8, 0x90, 0xBD, 0x43, 0xE8, + 0x91, 0x89, 0x43, 0xE8, 0x91, 0x97, 0x43, 0xE8, + 0x93, 0xAE, 0x43, 0xE8, 0x93, 0xB1, 0x43, 0xE8, + 0x93, 0xB3, 0x43, 0xE8, 0x93, 0xBC, 0x43, 0xE8, + // Bytes 1280 - 12bf + 0x94, 0x96, 0x43, 0xE8, 0x95, 0xA4, 0x43, 0xE8, + 0x97, 0x8D, 0x43, 0xE8, 0x97, 0xBA, 0x43, 0xE8, + 0x98, 0x86, 0x43, 0xE8, 0x98, 0x92, 0x43, 0xE8, + 0x98, 0xAD, 0x43, 0xE8, 0x98, 0xBF, 0x43, 0xE8, + 0x99, 0x8D, 0x43, 0xE8, 0x99, 0x90, 0x43, 0xE8, + 0x99, 0x9C, 0x43, 0xE8, 0x99, 0xA7, 0x43, 0xE8, + 0x99, 0xA9, 0x43, 0xE8, 0x99, 0xAB, 0x43, 0xE8, + 0x9A, 0x88, 0x43, 0xE8, 0x9A, 0xA9, 0x43, 0xE8, + // Bytes 12c0 - 12ff + 0x9B, 0xA2, 0x43, 0xE8, 0x9C, 0x8E, 0x43, 0xE8, + 0x9C, 0xA8, 0x43, 0xE8, 0x9D, 0xAB, 0x43, 0xE8, + 0x9D, 0xB9, 0x43, 0xE8, 0x9E, 0x86, 0x43, 0xE8, + 0x9E, 0xBA, 0x43, 0xE8, 0x9F, 0xA1, 0x43, 0xE8, + 0xA0, 0x81, 0x43, 0xE8, 0xA0, 0x9F, 0x43, 0xE8, + 0xA1, 0x80, 0x43, 0xE8, 0xA1, 0x8C, 0x43, 0xE8, + 0xA1, 0xA0, 0x43, 0xE8, 0xA1, 0xA3, 0x43, 0xE8, + 0xA3, 0x82, 0x43, 0xE8, 0xA3, 0x8F, 0x43, 0xE8, + // Bytes 1300 - 133f + 0xA3, 0x97, 0x43, 0xE8, 0xA3, 0x9E, 0x43, 0xE8, + 0xA3, 0xA1, 0x43, 0xE8, 0xA3, 0xB8, 0x43, 0xE8, + 0xA3, 0xBA, 0x43, 0xE8, 0xA4, 0x90, 0x43, 0xE8, + 0xA5, 0x81, 0x43, 0xE8, 0xA5, 0xA4, 0x43, 0xE8, + 0xA5, 0xBE, 0x43, 0xE8, 0xA6, 0x86, 0x43, 0xE8, + 0xA6, 0x8B, 0x43, 0xE8, 0xA6, 0x96, 0x43, 0xE8, + 0xA7, 0x92, 0x43, 0xE8, 0xA7, 0xA3, 0x43, 0xE8, + 0xA8, 0x80, 0x43, 0xE8, 0xAA, 0xA0, 0x43, 0xE8, + // Bytes 1340 - 137f + 0xAA, 0xAA, 0x43, 0xE8, 0xAA, 0xBF, 0x43, 0xE8, + 0xAB, 0x8B, 0x43, 0xE8, 0xAB, 0x92, 0x43, 0xE8, + 0xAB, 0x96, 0x43, 0xE8, 0xAB, 0xAD, 0x43, 0xE8, + 0xAB, 0xB8, 0x43, 0xE8, 0xAB, 0xBE, 0x43, 0xE8, + 0xAC, 0x81, 0x43, 0xE8, 0xAC, 0xB9, 0x43, 0xE8, + 0xAD, 0x98, 0x43, 0xE8, 0xAE, 0x80, 0x43, 0xE8, + 0xAE, 0x8A, 0x43, 0xE8, 0xB0, 0xB7, 0x43, 0xE8, + 0xB1, 0x86, 0x43, 0xE8, 0xB1, 0x88, 0x43, 0xE8, + // Bytes 1380 - 13bf + 0xB1, 0x95, 0x43, 0xE8, 0xB1, 0xB8, 0x43, 0xE8, + 0xB2, 0x9D, 0x43, 0xE8, 0xB2, 0xA1, 0x43, 0xE8, + 0xB2, 0xA9, 0x43, 0xE8, 0xB2, 0xAB, 0x43, 0xE8, + 0xB3, 0x81, 0x43, 0xE8, 0xB3, 0x82, 0x43, 0xE8, + 0xB3, 0x87, 0x43, 0xE8, 0xB3, 0x88, 0x43, 0xE8, + 0xB3, 0x93, 0x43, 0xE8, 0xB4, 0x88, 0x43, 0xE8, + 0xB4, 0x9B, 0x43, 0xE8, 0xB5, 0xA4, 0x43, 0xE8, + 0xB5, 0xB0, 0x43, 0xE8, 0xB5, 0xB7, 0x43, 0xE8, + // Bytes 13c0 - 13ff + 0xB6, 0xB3, 0x43, 0xE8, 0xB6, 0xBC, 0x43, 0xE8, + 0xB7, 0x8B, 0x43, 0xE8, 0xB7, 0xAF, 0x43, 0xE8, + 0xB7, 0xB0, 0x43, 0xE8, 0xBA, 0xAB, 0x43, 0xE8, + 0xBB, 0x8A, 0x43, 0xE8, 0xBB, 0x94, 0x43, 0xE8, + 0xBC, 0xA6, 0x43, 0xE8, 0xBC, 0xAA, 0x43, 0xE8, + 0xBC, 0xB8, 0x43, 0xE8, 0xBC, 0xBB, 0x43, 0xE8, + 0xBD, 0xA2, 0x43, 0xE8, 0xBE, 0x9B, 0x43, 0xE8, + 0xBE, 0x9E, 0x43, 0xE8, 0xBE, 0xB0, 0x43, 0xE8, + // Bytes 1400 - 143f + 0xBE, 0xB5, 0x43, 0xE8, 0xBE, 0xB6, 0x43, 0xE9, + 0x80, 0xA3, 0x43, 0xE9, 0x80, 0xB8, 0x43, 0xE9, + 0x81, 0x8A, 0x43, 0xE9, 0x81, 0xA9, 0x43, 0xE9, + 0x81, 0xB2, 0x43, 0xE9, 0x81, 0xBC, 0x43, 0xE9, + 0x82, 0x8F, 0x43, 0xE9, 0x82, 0x91, 0x43, 0xE9, + 0x82, 0x94, 0x43, 0xE9, 0x83, 0x8E, 0x43, 0xE9, + 0x83, 0x9E, 0x43, 0xE9, 0x83, 0xB1, 0x43, 0xE9, + 0x83, 0xBD, 0x43, 0xE9, 0x84, 0x91, 0x43, 0xE9, + // Bytes 1440 - 147f + 0x84, 0x9B, 0x43, 0xE9, 0x85, 0x89, 0x43, 0xE9, + 0x85, 0x8D, 0x43, 0xE9, 0x85, 0xAA, 0x43, 0xE9, + 0x86, 0x99, 0x43, 0xE9, 0x86, 0xB4, 0x43, 0xE9, + 0x87, 0x86, 0x43, 0xE9, 0x87, 0x8C, 0x43, 0xE9, + 0x87, 0x8F, 0x43, 0xE9, 0x87, 0x91, 0x43, 0xE9, + 0x88, 0xB4, 0x43, 0xE9, 0x88, 0xB8, 0x43, 0xE9, + 0x89, 0xB6, 0x43, 0xE9, 0x89, 0xBC, 0x43, 0xE9, + 0x8B, 0x97, 0x43, 0xE9, 0x8B, 0x98, 0x43, 0xE9, + // Bytes 1480 - 14bf + 0x8C, 0x84, 0x43, 0xE9, 0x8D, 0x8A, 0x43, 0xE9, + 0x8F, 0xB9, 0x43, 0xE9, 0x90, 0x95, 0x43, 0xE9, + 0x95, 0xB7, 0x43, 0xE9, 0x96, 0x80, 0x43, 0xE9, + 0x96, 0x8B, 0x43, 0xE9, 0x96, 0xAD, 0x43, 0xE9, + 0x96, 0xB7, 0x43, 0xE9, 0x98, 0x9C, 0x43, 0xE9, + 0x98, 0xAE, 0x43, 0xE9, 0x99, 0x8B, 0x43, 0xE9, + 0x99, 0x8D, 0x43, 0xE9, 0x99, 0xB5, 0x43, 0xE9, + 0x99, 0xB8, 0x43, 0xE9, 0x99, 0xBC, 0x43, 0xE9, + // Bytes 14c0 - 14ff + 0x9A, 0x86, 0x43, 0xE9, 0x9A, 0xA3, 0x43, 0xE9, + 0x9A, 0xB6, 0x43, 0xE9, 0x9A, 0xB7, 0x43, 0xE9, + 0x9A, 0xB8, 0x43, 0xE9, 0x9A, 0xB9, 0x43, 0xE9, + 0x9B, 0x83, 0x43, 0xE9, 0x9B, 0xA2, 0x43, 0xE9, + 0x9B, 0xA3, 0x43, 0xE9, 0x9B, 0xA8, 0x43, 0xE9, + 0x9B, 0xB6, 0x43, 0xE9, 0x9B, 0xB7, 0x43, 0xE9, + 0x9C, 0xA3, 0x43, 0xE9, 0x9C, 0xB2, 0x43, 0xE9, + 0x9D, 0x88, 0x43, 0xE9, 0x9D, 0x91, 0x43, 0xE9, + // Bytes 1500 - 153f + 0x9D, 0x96, 0x43, 0xE9, 0x9D, 0x9E, 0x43, 0xE9, + 0x9D, 0xA2, 0x43, 0xE9, 0x9D, 0xA9, 0x43, 0xE9, + 0x9F, 0x8B, 0x43, 0xE9, 0x9F, 0x9B, 0x43, 0xE9, + 0x9F, 0xA0, 0x43, 0xE9, 0x9F, 0xAD, 0x43, 0xE9, + 0x9F, 0xB3, 0x43, 0xE9, 0x9F, 0xBF, 0x43, 0xE9, + 0xA0, 0x81, 0x43, 0xE9, 0xA0, 0x85, 0x43, 0xE9, + 0xA0, 0x8B, 0x43, 0xE9, 0xA0, 0x98, 0x43, 0xE9, + 0xA0, 0xA9, 0x43, 0xE9, 0xA0, 0xBB, 0x43, 0xE9, + // Bytes 1540 - 157f + 0xA1, 0x9E, 0x43, 0xE9, 0xA2, 0xA8, 0x43, 0xE9, + 0xA3, 0x9B, 0x43, 0xE9, 0xA3, 0x9F, 0x43, 0xE9, + 0xA3, 0xA2, 0x43, 0xE9, 0xA3, 0xAF, 0x43, 0xE9, + 0xA3, 0xBC, 0x43, 0xE9, 0xA4, 0xA8, 0x43, 0xE9, + 0xA4, 0xA9, 0x43, 0xE9, 0xA6, 0x96, 0x43, 0xE9, + 0xA6, 0x99, 0x43, 0xE9, 0xA6, 0xA7, 0x43, 0xE9, + 0xA6, 0xAC, 0x43, 0xE9, 0xA7, 0x82, 0x43, 0xE9, + 0xA7, 0xB1, 0x43, 0xE9, 0xA7, 0xBE, 0x43, 0xE9, + // Bytes 1580 - 15bf + 0xA9, 0xAA, 0x43, 0xE9, 0xAA, 0xA8, 0x43, 0xE9, + 0xAB, 0x98, 0x43, 0xE9, 0xAB, 0x9F, 0x43, 0xE9, + 0xAC, 0x92, 0x43, 0xE9, 0xAC, 0xA5, 0x43, 0xE9, + 0xAC, 0xAF, 0x43, 0xE9, 0xAC, 0xB2, 0x43, 0xE9, + 0xAC, 0xBC, 0x43, 0xE9, 0xAD, 0x9A, 0x43, 0xE9, + 0xAD, 0xAF, 0x43, 0xE9, 0xB1, 0x80, 0x43, 0xE9, + 0xB1, 0x97, 0x43, 0xE9, 0xB3, 0xA5, 0x43, 0xE9, + 0xB3, 0xBD, 0x43, 0xE9, 0xB5, 0xA7, 0x43, 0xE9, + // Bytes 15c0 - 15ff + 0xB6, 0xB4, 0x43, 0xE9, 0xB7, 0xBA, 0x43, 0xE9, + 0xB8, 0x9E, 0x43, 0xE9, 0xB9, 0xB5, 0x43, 0xE9, + 0xB9, 0xBF, 0x43, 0xE9, 0xBA, 0x97, 0x43, 0xE9, + 0xBA, 0x9F, 0x43, 0xE9, 0xBA, 0xA5, 0x43, 0xE9, + 0xBA, 0xBB, 0x43, 0xE9, 0xBB, 0x83, 0x43, 0xE9, + 0xBB, 0x8D, 0x43, 0xE9, 0xBB, 0x8E, 0x43, 0xE9, + 0xBB, 0x91, 0x43, 0xE9, 0xBB, 0xB9, 0x43, 0xE9, + 0xBB, 0xBD, 0x43, 0xE9, 0xBB, 0xBE, 0x43, 0xE9, + // Bytes 1600 - 163f + 0xBC, 0x85, 0x43, 0xE9, 0xBC, 0x8E, 0x43, 0xE9, + 0xBC, 0x8F, 0x43, 0xE9, 0xBC, 0x93, 0x43, 0xE9, + 0xBC, 0x96, 0x43, 0xE9, 0xBC, 0xA0, 0x43, 0xE9, + 0xBC, 0xBB, 0x43, 0xE9, 0xBD, 0x83, 0x43, 0xE9, + 0xBD, 0x8A, 0x43, 0xE9, 0xBD, 0x92, 0x43, 0xE9, + 0xBE, 0x8D, 0x43, 0xE9, 0xBE, 0x8E, 0x43, 0xE9, + 0xBE, 0x9C, 0x43, 0xE9, 0xBE, 0x9F, 0x43, 0xE9, + 0xBE, 0xA0, 0x43, 0xEA, 0x9C, 0xA7, 0x43, 0xEA, + // Bytes 1640 - 167f + 0x9D, 0xAF, 0x43, 0xEA, 0xAC, 0xB7, 0x43, 0xEA, + 0xAD, 0x92, 0x44, 0xF0, 0xA0, 0x84, 0xA2, 0x44, + 0xF0, 0xA0, 0x94, 0x9C, 0x44, 0xF0, 0xA0, 0x94, + 0xA5, 0x44, 0xF0, 0xA0, 0x95, 0x8B, 0x44, 0xF0, + 0xA0, 0x98, 0xBA, 0x44, 0xF0, 0xA0, 0xA0, 0x84, + 0x44, 0xF0, 0xA0, 0xA3, 0x9E, 0x44, 0xF0, 0xA0, + 0xA8, 0xAC, 0x44, 0xF0, 0xA0, 0xAD, 0xA3, 0x44, + 0xF0, 0xA1, 0x93, 0xA4, 0x44, 0xF0, 0xA1, 0x9A, + // Bytes 1680 - 16bf + 0xA8, 0x44, 0xF0, 0xA1, 0x9B, 0xAA, 0x44, 0xF0, + 0xA1, 0xA7, 0x88, 0x44, 0xF0, 0xA1, 0xAC, 0x98, + 0x44, 0xF0, 0xA1, 0xB4, 0x8B, 0x44, 0xF0, 0xA1, + 0xB7, 0xA4, 0x44, 0xF0, 0xA1, 0xB7, 0xA6, 0x44, + 0xF0, 0xA2, 0x86, 0x83, 0x44, 0xF0, 0xA2, 0x86, + 0x9F, 0x44, 0xF0, 0xA2, 0x8C, 0xB1, 0x44, 0xF0, + 0xA2, 0x9B, 0x94, 0x44, 0xF0, 0xA2, 0xA1, 0x84, + 0x44, 0xF0, 0xA2, 0xA1, 0x8A, 0x44, 0xF0, 0xA2, + // Bytes 16c0 - 16ff + 0xAC, 0x8C, 0x44, 0xF0, 0xA2, 0xAF, 0xB1, 0x44, + 0xF0, 0xA3, 0x80, 0x8A, 0x44, 0xF0, 0xA3, 0x8A, + 0xB8, 0x44, 0xF0, 0xA3, 0x8D, 0x9F, 0x44, 0xF0, + 0xA3, 0x8E, 0x93, 0x44, 0xF0, 0xA3, 0x8E, 0x9C, + 0x44, 0xF0, 0xA3, 0x8F, 0x83, 0x44, 0xF0, 0xA3, + 0x8F, 0x95, 0x44, 0xF0, 0xA3, 0x91, 0xAD, 0x44, + 0xF0, 0xA3, 0x9A, 0xA3, 0x44, 0xF0, 0xA3, 0xA2, + 0xA7, 0x44, 0xF0, 0xA3, 0xAA, 0x8D, 0x44, 0xF0, + // Bytes 1700 - 173f + 0xA3, 0xAB, 0xBA, 0x44, 0xF0, 0xA3, 0xB2, 0xBC, + 0x44, 0xF0, 0xA3, 0xB4, 0x9E, 0x44, 0xF0, 0xA3, + 0xBB, 0x91, 0x44, 0xF0, 0xA3, 0xBD, 0x9E, 0x44, + 0xF0, 0xA3, 0xBE, 0x8E, 0x44, 0xF0, 0xA4, 0x89, + 0xA3, 0x44, 0xF0, 0xA4, 0x8B, 0xAE, 0x44, 0xF0, + 0xA4, 0x8E, 0xAB, 0x44, 0xF0, 0xA4, 0x98, 0x88, + 0x44, 0xF0, 0xA4, 0x9C, 0xB5, 0x44, 0xF0, 0xA4, + 0xA0, 0x94, 0x44, 0xF0, 0xA4, 0xB0, 0xB6, 0x44, + // Bytes 1740 - 177f + 0xF0, 0xA4, 0xB2, 0x92, 0x44, 0xF0, 0xA4, 0xBE, + 0xA1, 0x44, 0xF0, 0xA4, 0xBE, 0xB8, 0x44, 0xF0, + 0xA5, 0x81, 0x84, 0x44, 0xF0, 0xA5, 0x83, 0xB2, + 0x44, 0xF0, 0xA5, 0x83, 0xB3, 0x44, 0xF0, 0xA5, + 0x84, 0x99, 0x44, 0xF0, 0xA5, 0x84, 0xB3, 0x44, + 0xF0, 0xA5, 0x89, 0x89, 0x44, 0xF0, 0xA5, 0x90, + 0x9D, 0x44, 0xF0, 0xA5, 0x98, 0xA6, 0x44, 0xF0, + 0xA5, 0x9A, 0x9A, 0x44, 0xF0, 0xA5, 0x9B, 0x85, + // Bytes 1780 - 17bf + 0x44, 0xF0, 0xA5, 0xA5, 0xBC, 0x44, 0xF0, 0xA5, + 0xAA, 0xA7, 0x44, 0xF0, 0xA5, 0xAE, 0xAB, 0x44, + 0xF0, 0xA5, 0xB2, 0x80, 0x44, 0xF0, 0xA5, 0xB3, + 0x90, 0x44, 0xF0, 0xA5, 0xBE, 0x86, 0x44, 0xF0, + 0xA6, 0x87, 0x9A, 0x44, 0xF0, 0xA6, 0x88, 0xA8, + 0x44, 0xF0, 0xA6, 0x89, 0x87, 0x44, 0xF0, 0xA6, + 0x8B, 0x99, 0x44, 0xF0, 0xA6, 0x8C, 0xBE, 0x44, + 0xF0, 0xA6, 0x93, 0x9A, 0x44, 0xF0, 0xA6, 0x94, + // Bytes 17c0 - 17ff + 0xA3, 0x44, 0xF0, 0xA6, 0x96, 0xA8, 0x44, 0xF0, + 0xA6, 0x9E, 0xA7, 0x44, 0xF0, 0xA6, 0x9E, 0xB5, + 0x44, 0xF0, 0xA6, 0xAC, 0xBC, 0x44, 0xF0, 0xA6, + 0xB0, 0xB6, 0x44, 0xF0, 0xA6, 0xB3, 0x95, 0x44, + 0xF0, 0xA6, 0xB5, 0xAB, 0x44, 0xF0, 0xA6, 0xBC, + 0xAC, 0x44, 0xF0, 0xA6, 0xBE, 0xB1, 0x44, 0xF0, + 0xA7, 0x83, 0x92, 0x44, 0xF0, 0xA7, 0x8F, 0x8A, + 0x44, 0xF0, 0xA7, 0x99, 0xA7, 0x44, 0xF0, 0xA7, + // Bytes 1800 - 183f + 0xA2, 0xAE, 0x44, 0xF0, 0xA7, 0xA5, 0xA6, 0x44, + 0xF0, 0xA7, 0xB2, 0xA8, 0x44, 0xF0, 0xA7, 0xBB, + 0x93, 0x44, 0xF0, 0xA7, 0xBC, 0xAF, 0x44, 0xF0, + 0xA8, 0x97, 0x92, 0x44, 0xF0, 0xA8, 0x97, 0xAD, + 0x44, 0xF0, 0xA8, 0x9C, 0xAE, 0x44, 0xF0, 0xA8, + 0xAF, 0xBA, 0x44, 0xF0, 0xA8, 0xB5, 0xB7, 0x44, + 0xF0, 0xA9, 0x85, 0x85, 0x44, 0xF0, 0xA9, 0x87, + 0x9F, 0x44, 0xF0, 0xA9, 0x88, 0x9A, 0x44, 0xF0, + // Bytes 1840 - 187f + 0xA9, 0x90, 0x8A, 0x44, 0xF0, 0xA9, 0x92, 0x96, + 0x44, 0xF0, 0xA9, 0x96, 0xB6, 0x44, 0xF0, 0xA9, + 0xAC, 0xB0, 0x44, 0xF0, 0xAA, 0x83, 0x8E, 0x44, + 0xF0, 0xAA, 0x84, 0x85, 0x44, 0xF0, 0xAA, 0x88, + 0x8E, 0x44, 0xF0, 0xAA, 0x8A, 0x91, 0x44, 0xF0, + 0xAA, 0x8E, 0x92, 0x44, 0xF0, 0xAA, 0x98, 0x80, + 0x42, 0x21, 0x21, 0x42, 0x21, 0x3F, 0x42, 0x2E, + 0x2E, 0x42, 0x30, 0x2C, 0x42, 0x30, 0x2E, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2C, 0x42, 0x31, 0x2E, 0x42, 0x31, 0x30, + 0x42, 0x31, 0x31, 0x42, 0x31, 0x32, 0x42, 0x31, + 0x33, 0x42, 0x31, 0x34, 0x42, 0x31, 0x35, 0x42, + 0x31, 0x36, 0x42, 0x31, 0x37, 0x42, 0x31, 0x38, + 0x42, 0x31, 0x39, 0x42, 0x32, 0x2C, 0x42, 0x32, + 0x2E, 0x42, 0x32, 0x30, 0x42, 0x32, 0x31, 0x42, + 0x32, 0x32, 0x42, 0x32, 0x33, 0x42, 0x32, 0x34, + 0x42, 0x32, 0x35, 0x42, 0x32, 0x36, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x37, 0x42, 0x32, 0x38, 0x42, 0x32, 0x39, 0x42, + 0x33, 0x2C, 0x42, 0x33, 0x2E, 0x42, 0x33, 0x30, + 0x42, 0x33, 0x31, 0x42, 0x33, 0x32, 0x42, 0x33, + 0x33, 0x42, 0x33, 0x34, 0x42, 0x33, 0x35, 0x42, + 0x33, 0x36, 0x42, 0x33, 0x37, 0x42, 0x33, 0x38, + 0x42, 0x33, 0x39, 0x42, 0x34, 0x2C, 0x42, 0x34, + 0x2E, 0x42, 0x34, 0x30, 0x42, 0x34, 0x31, 0x42, + 0x34, 0x32, 0x42, 0x34, 0x33, 0x42, 0x34, 0x34, + // Bytes 1900 - 193f + 0x42, 0x34, 0x35, 0x42, 0x34, 0x36, 0x42, 0x34, + 0x37, 0x42, 0x34, 0x38, 0x42, 0x34, 0x39, 0x42, + 0x35, 0x2C, 0x42, 0x35, 0x2E, 0x42, 0x35, 0x30, + 0x42, 0x36, 0x2C, 0x42, 0x36, 0x2E, 0x42, 0x37, + 0x2C, 0x42, 0x37, 0x2E, 0x42, 0x38, 0x2C, 0x42, + 0x38, 0x2E, 0x42, 0x39, 0x2C, 0x42, 0x39, 0x2E, + 0x42, 0x3D, 0x3D, 0x42, 0x3F, 0x21, 0x42, 0x3F, + 0x3F, 0x42, 0x41, 0x55, 0x42, 0x42, 0x71, 0x42, + // Bytes 1940 - 197f + 0x43, 0x44, 0x42, 0x44, 0x4A, 0x42, 0x44, 0x5A, + 0x42, 0x44, 0x7A, 0x42, 0x47, 0x42, 0x42, 0x47, + 0x79, 0x42, 0x48, 0x50, 0x42, 0x48, 0x56, 0x42, + 0x48, 0x67, 0x42, 0x48, 0x7A, 0x42, 0x49, 0x49, + 0x42, 0x49, 0x4A, 0x42, 0x49, 0x55, 0x42, 0x49, + 0x56, 0x42, 0x49, 0x58, 0x42, 0x4B, 0x42, 0x42, + 0x4B, 0x4B, 0x42, 0x4B, 0x4D, 0x42, 0x4C, 0x4A, + 0x42, 0x4C, 0x6A, 0x42, 0x4D, 0x42, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x43, 0x42, 0x4D, 0x44, 0x42, 0x4D, 0x52, 0x42, + 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, 0x4E, 0x4A, + 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, 0x42, 0x50, + 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, 0x61, 0x42, + 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, 0x53, 0x4D, + 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, 0x42, 0x54, + 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, 0x43, 0x42, + 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, 0x58, 0x49, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, 0x42, 0x63, + 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, 0x61, 0x42, + 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, 0x64, 0x7A, + 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, 0x42, 0x66, + 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, 0x6D, 0x42, + 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, 0x69, 0x6A, + 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, 0x42, 0x69, + 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, 0x56, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, 0x6B, 0x6C, + 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, 0x42, 0x6C, + 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, 0x6E, 0x42, + 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, 0x6D, 0x33, + 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, 0x42, 0x6D, + 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, 0x67, 0x42, + 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, 0x6D, 0x73, + 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, 0x6A, 0x42, + 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, 0x6F, 0x56, + 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, 0x42, 0x70, + 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, 0x63, 0x42, + 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, 0x73, 0x74, + 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, 0x43, 0x28, + 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, 0x43, 0x28, + 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, 0x43, 0x28, + // Bytes 1a80 - 1abf + 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, 0x43, 0x28, + 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, 0x43, 0x28, + 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, 0x43, 0x28, + 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, 0x43, 0x28, + 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, 0x43, 0x28, + 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, 0x43, 0x28, + 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, 0x43, 0x28, + 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, 0x43, 0x28, + // Bytes 1ac0 - 1aff + 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, 0x43, 0x28, + 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, 0x43, 0x28, + 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, 0x43, 0x28, + 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, 0x43, 0x28, + 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, 0x43, 0x28, + 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, 0x43, 0x28, + 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, 0x43, 0x28, + 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, 0x43, 0x28, + // Bytes 1b00 - 1b3f + 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, 0x43, 0x28, + 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, 0x43, 0x28, + 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, 0x43, 0x28, + 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, 0x43, 0x28, + 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, 0x43, 0x28, + 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, 0x43, 0x28, + 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, 0x43, 0x28, + 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, 0x43, 0x28, + // Bytes 1b40 - 1b7f + 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, 0x43, 0x28, + 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, 0x43, 0x28, + 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, 0x43, 0x28, + 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, 0x43, 0x28, + 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, 0x43, 0x31, + 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, 0x43, 0x31, + 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, 0x43, 0x31, + 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, 0x43, 0x31, + // Bytes 1b80 - 1bbf + 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, 0x43, 0x31, + 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, 0x43, 0x32, + 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, 0x43, 0x3D, + 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, 0x43, 0x46, + 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, 0x43, 0x47, + 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, 0x43, 0x4C, + 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, 0x43, 0x4D, + 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, 0x43, 0x4D, + // Bytes 1bc0 - 1bff + 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, 0x43, 0x50, + 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, 0x43, 0x54, + 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, 0x43, 0x56, + 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, 0x43, 0x61, + 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, 0x43, 0x61, + 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, 0x43, 0x63, + 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, 0x43, 0x63, + 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, 0x43, 0x63, + // Bytes 1c00 - 1c3f + 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, 0x43, 0x64, + 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, 0x43, 0x66, + 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, 0x43, 0x67, + 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, 0x43, 0x69, + 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, 0x43, 0x6B, + 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, 0x43, 0x6B, + 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, 0x43, 0x6C, + 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, 0x43, 0x6D, + // Bytes 1c40 - 1c7f + 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, 0x43, 0x6D, + 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, 0x43, 0x72, + 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, 0x43, 0x78, + 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, 0x43, 0xC2, + 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, 0x43, 0xCE, + 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, 0x43, 0xCE, + 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, 0x43, 0xCE, + 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, 0x43, 0xCE, + // Bytes 1c80 - 1cbf + 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, 0x44, 0x28, + 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, 0x31, 0x29, + 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, 0x28, 0x31, + 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, 0x29, 0x44, + 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, 0x31, 0x36, + 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, 0x44, 0x28, + 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, 0x39, 0x29, + 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, 0x30, 0xE7, + // Bytes 1cc0 - 1cff + 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, 0x84, 0x44, + 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, 0xE6, 0x9C, + 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, 0x44, 0x32, + 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, 0x9C, 0x88, + 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, 0x33, 0xE6, + 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, 0x88, 0x44, + 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, 0xE6, 0x97, + 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, 0x44, 0x34, + // Bytes 1d00 - 1d3f + 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, 0x97, 0xA5, + 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, 0x35, 0xE7, + 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, 0xA5, 0x44, + 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, 0xE7, 0x82, + 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, 0x44, 0x37, + 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, 0x82, 0xB9, + 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, 0x38, 0xE6, + 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, 0xB9, 0x44, + // Bytes 1d40 - 1d7f + 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, 0xE6, 0x9C, + 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, 0x44, 0x56, + 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, 0x6D, 0x2E, + 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, 0x70, 0x2E, + 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, 0x69, 0x44, + 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, 0xB4, 0xD5, + 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, 0x44, 0xD5, + 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, 0xD5, 0xB6, + // Bytes 1d80 - 1dbf + 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, 0xD7, 0x90, + 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, 0xB4, 0x44, + 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, 0xA8, 0xD8, + 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, 0x44, 0xD8, + 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, 0xD8, 0xB2, + 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, 0xD8, 0xA8, + 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, 0x87, 0x44, + 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, 0xA8, 0xD9, + // Bytes 1dc0 - 1dff + 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, 0x44, 0xD8, + 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, 0xD8, 0xAE, + 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, 0xD8, 0xAA, + 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, 0x85, 0x44, + 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, 0xAA, 0xD9, + 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, 0x44, 0xD8, + 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, 0xD8, 0xAC, + 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, 0xD8, 0xAB, + // Bytes 1e00 - 1e3f + 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, 0x85, 0x44, + 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, 0xAB, 0xD9, + 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, 0x44, 0xD8, + 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, 0xD8, 0xAD, + 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, 0xD8, 0xAC, + 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, 0x8A, 0x44, + 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, 0xAD, 0xD9, + 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, 0x44, 0xD8, + // Bytes 1e40 - 1e7f + 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, 0xD8, 0xAC, + 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, 0xD8, 0xAE, + 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, 0x89, 0x44, + 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, 0xB3, 0xD8, + 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, 0x44, 0xD8, + 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, 0xD8, 0xB1, + 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, 0xD8, 0xB3, + 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, 0x89, 0x44, + // Bytes 1e80 - 1ebf + 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, 0xB4, 0xD8, + 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, 0x44, 0xD8, + 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, 0xD8, 0xB1, + 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, 0xD8, 0xB4, + 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, 0x89, 0x44, + 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, 0xB5, 0xD8, + 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, 0x44, 0xD8, + 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, 0xD9, 0x85, + // Bytes 1ec0 - 1eff + 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, 0xD8, 0xB5, + 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, 0xAC, 0x44, + 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, 0xB6, 0xD8, + 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, 0x44, 0xD8, + 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, 0xD9, 0x89, + 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, 0xD8, 0xB7, + 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, 0x85, 0x44, + 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, 0xB7, 0xD9, + // Bytes 1f00 - 1f3f + 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, 0x44, 0xD8, + 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, 0xD9, 0x85, + 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, 0xD8, 0xB9, + 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, 0xAC, 0x44, + 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, 0xBA, 0xD9, + 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, 0x44, 0xD9, + 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, 0xD8, 0xAD, + 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, 0xD9, 0x81, + // Bytes 1f40 - 1f7f + 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, 0x89, 0x44, + 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, 0x82, 0xD8, + 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, 0x44, 0xD9, + 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, 0xD9, 0x8A, + 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, 0xD9, 0x83, + 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, 0xAD, 0x44, + 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, 0x83, 0xD9, + 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, 0x44, 0xD9, + // Bytes 1f80 - 1fbf + 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, 0xD9, 0x8A, + 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, 0xD9, 0x84, + 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, 0xAD, 0x44, + 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, 0x84, 0xD9, + 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, 0x44, 0xD9, + 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, 0xD9, 0x8A, + 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, 0xD9, 0x85, + 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, 0xAD, 0x44, + // Bytes 1fc0 - 1fff + 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, 0x85, 0xD9, + 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, 0x44, 0xD9, + 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, 0xD8, 0xAC, + 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, 0xD9, 0x86, + 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, 0xB1, 0x44, + 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, 0x86, 0xD9, + 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, 0x44, 0xD9, + 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, 0xD9, 0x89, + // Bytes 2000 - 203f + 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, 0xD9, 0x87, + 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, 0x85, 0x44, + 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, 0x87, 0xD9, + 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, 0x44, 0xD9, + 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, 0xD8, 0xAD, + 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, 0xD9, 0x8A, + 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, 0xB2, 0x44, + 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, 0x8A, 0xD9, + // Bytes 2040 - 207f + 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, 0x44, 0xD9, + 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, 0xD9, 0x8A, + 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, 0xDB, 0x87, + 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, 0x80, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x86, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, 0x45, 0x28, + // Bytes 2080 - 20bf + 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8C, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x91, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, 0x89, 0x29, + // Bytes 20c0 - 20ff + 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, 0xE4, 0xBA, + 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, 0xA3, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, 0x45, 0x28, + 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, 0xAD, 0x29, + 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, 0xE5, 0x8D, + // Bytes 2100 - 213f + 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, 0x8D, 0x29, + 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, 0x45, 0x28, + 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, 0xE5, 0x9C, + 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, 0xA6, 0x29, + 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, 0xA8, 0x29, + 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, 0x45, 0x28, + // Bytes 2140 - 217f + 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, 0xE7, 0x81, + 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, 0xB9, 0x29, + 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, 0x45, 0x28, + 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, 0xAD, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, 0x45, 0x28, + 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, 0xE8, 0xB2, + 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, 0x87, 0x29, + // Bytes 2180 - 21bf + 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, 0x45, 0x30, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0x30, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, 0x9C, 0x88, + 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x31, 0xE6, + 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x32, 0xE7, + // Bytes 21c0 - 21ff + 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x34, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x36, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, 0x45, 0x31, + // Bytes 2200 - 223f + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x38, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x38, + // Bytes 2240 - 227f + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, 0x45, 0x32, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x30, 0xE7, + 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, 0x45, 0x32, + 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x32, 0xE7, + 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, 0x45, 0x32, + 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x34, 0xE7, + // Bytes 2280 - 22bf + 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x38, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x32, + 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, 0x30, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, 0x97, 0xA5, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, 0x45, 0x33, + // Bytes 22c0 - 22ff + 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x35, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, 0x45, 0x6D, + 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, 0xE2, 0x81, + 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, 0x88, 0x95, + // Bytes 2300 - 233f + 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, 0x95, 0x73, + 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD8, + 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xAA, 0xD8, + 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + // Bytes 2340 - 237f + 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, 0x89, + 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, 0xAA, 0xD9, + 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, 0xD9, 0x8A, + // Bytes 2380 - 23bf + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAD, + 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xAD, 0xD9, + 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, 0xD8, + 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, 0xD9, 0x89, + 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + // Bytes 23c0 - 23ff + 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, 0x46, 0xD8, + 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, 0xB3, 0xD9, + 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAC, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, 0x85, + 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + // Bytes 2400 - 243f + 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB5, + 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, 0xB5, 0xD8, + 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, 0xD9, 0x84, + 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, 0x84, 0xDB, + 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x89, 0x46, + 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, + 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + // Bytes 2440 - 247f + 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB7, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x89, 0x46, + 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xBA, + 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xBA, 0xD9, + // Bytes 2480 - 24bf + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, 0xD8, 0xAE, + 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, 0x85, 0xD9, + 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, 0xDB, 0x92, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x83, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x83, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + // Bytes 24c0 - 24ff + 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x85, 0x46, + 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD9, + 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD9, + 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD8, + // Bytes 2500 - 253f + 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD8, 0xAE, + 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD9, 0x85, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, + 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, 0xAE, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, 0xD9, 0x8A, + // Bytes 2540 - 257f + 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x86, + 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x89, 0x46, + // Bytes 2580 - 25bf + 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, + 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD9, 0x87, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x85, 0xD9, + 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xA7, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, 0x46, 0xD9, + // Bytes 25c0 - 25ff + 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x86, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x87, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, 0xD9, 0x8A, + // Bytes 2600 - 263f + 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x90, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x95, 0x46, + 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, 0x46, 0xE0, + 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, 0xE0, 0xBA, + 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, 0xBB, 0x8D, + // Bytes 2640 - 267f + 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, 0x80, 0xE0, + 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, + 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBE, 0x92, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0x9C, 0xE0, + // Bytes 2680 - 26bf + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, 0xB7, 0x46, + 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x46, 0xE2, + 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, 0xBB, 0xE3, + 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, 0xE3, 0x82, + // Bytes 26c0 - 26ff + 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0xB3, 0x46, + 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, 0x46, 0xE3, + 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, 0x83, 0x9B, + 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, 0x9F, 0xE3, + 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, 0xE3, 0x83, + 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xA0, + // Bytes 2700 - 273f + 0x46, 0xE4, 0xBB, 0xA4, 0xE5, 0x92, 0x8C, 0x46, + 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, 0xA3, 0x46, 0xE5, + 0xB9, 0xB3, 0xE6, 0x88, 0x90, 0x46, 0xE6, 0x98, + 0x8E, 0xE6, 0xB2, 0xBB, 0x46, 0xE6, 0x98, 0xAD, + 0xE5, 0x92, 0x8C, 0x47, 0x72, 0x61, 0x64, 0xE2, + 0x88, 0x95, 0x73, 0x47, 0xE3, 0x80, 0x94, 0x53, + 0xE3, 0x80, 0x95, 0x48, 0x28, 0xE1, 0x84, 0x80, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + // Bytes 2740 - 277f + 0x82, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x89, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8B, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8C, + // Bytes 2780 - 27bf + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x8C, 0xE1, 0x85, 0xAE, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x92, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x72, 0x61, 0x64, 0xE2, 0x88, + // Bytes 27c0 - 27ff + 0x95, 0x73, 0x32, 0x48, 0xD8, 0xA7, 0xD9, 0x83, + 0xD8, 0xA8, 0xD8, 0xB1, 0x48, 0xD8, 0xA7, 0xD9, + 0x84, 0xD9, 0x84, 0xD9, 0x87, 0x48, 0xD8, 0xB1, + 0xD8, 0xB3, 0xD9, 0x88, 0xD9, 0x84, 0x48, 0xD8, + 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, 0xD9, 0x84, 0x48, + 0xD8, 0xB5, 0xD9, 0x84, 0xD8, 0xB9, 0xD9, 0x85, + 0x48, 0xD8, 0xB9, 0xD9, 0x84, 0xD9, 0x8A, 0xD9, + 0x87, 0x48, 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, + // Bytes 2800 - 283f + 0xD8, 0xAF, 0x48, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, + 0x84, 0xD9, 0x85, 0x49, 0xE2, 0x80, 0xB2, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x49, 0xE2, 0x80, + 0xB5, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x49, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0x49, 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0x49, 0xE3, 0x80, 0x94, 0xE4, + 0xB8, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + // Bytes 2840 - 287f + 0x94, 0xE4, 0xBA, 0x8C, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE5, 0x8B, 0x9D, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0xAE, 0x89, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, + 0x89, 0x93, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + 0x94, 0xE6, 0x95, 0x97, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE6, 0x9C, 0xAC, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, 0x82, 0xB9, + // Bytes 2880 - 28bf + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, + 0x9B, 0x97, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x82, + 0xA2, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, + 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x81, 0x49, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0xA9, + 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x82, 0xAA, 0xE3, + 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, + 0xAA, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xA0, 0x49, + // Bytes 28c0 - 28ff + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xAA, 0x49, 0xE3, 0x82, 0xB1, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, 0xB3, 0xE3, + 0x83, 0xAB, 0xE3, 0x83, 0x8A, 0x49, 0xE3, 0x82, + 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0x49, + 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x88, 0x49, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, + 0xE3, 0x82, 0xB7, 0x49, 0xE3, 0x83, 0x88, 0xE3, + // Bytes 2900 - 293f + 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, + 0x8E, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x49, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0x84, 0x49, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x9A, 0xE3, 0x82, 0xB3, 0x49, 0xE3, 0x83, + 0x95, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xB3, 0x49, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, + // Bytes 2940 - 297f + 0xBD, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x9B, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, + 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xB3, 0x49, + 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xAB, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x8F, 0x49, 0xE3, 0x83, 0x9E, 0xE3, + 0x83, 0xAB, 0xE3, 0x82, 0xAF, 0x49, 0xE3, 0x83, + // Bytes 2980 - 29bf + 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, + 0xE3, 0x83, 0xA6, 0xE3, 0x82, 0xA2, 0xE3, 0x83, + 0xB3, 0x49, 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x88, 0x4C, 0xE2, 0x80, 0xB2, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x4C, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x4C, 0xE3, 0x82, + 0xA2, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x95, 0xE3, + // Bytes 29c0 - 29ff + 0x82, 0xA1, 0x4C, 0xE3, 0x82, 0xA8, 0xE3, 0x83, + 0xBC, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xBC, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x9E, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xA9, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE3, + 0x82, 0xAB, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAA, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xBC, + 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xA5, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0xA0, 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAD, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x8D, 0x4C, + 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0xA4, 0xE3, 0x82, + // Bytes 2a40 - 2a7f + 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x82, 0xBF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, + 0xB9, 0x4C, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x84, 0x4C, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xAF, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0xA3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0xE3, + // Bytes 2a80 - 2abf + 0x83, 0xBC, 0xE3, 0x82, 0xBF, 0x4C, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0x8B, 0xE3, + 0x83, 0x92, 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x4C, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xAB, 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x9E, + 0xE3, 0x82, 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAD, 0x4C, 0xE3, 0x83, 0x9F, 0xE3, 0x82, 0xAF, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, + 0x83, 0xA1, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAA, 0xE3, + 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, + 0x4C, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0x4C, 0xE6, 0xA0, + 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, 0xBC, 0x9A, 0xE7, + 0xA4, 0xBE, 0x4E, 0x28, 0xE1, 0x84, 0x8B, 0xE1, + // Bytes 2b00 - 2b3f + 0x85, 0xA9, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xAE, + 0x29, 0x4F, 0xD8, 0xAC, 0xD9, 0x84, 0x20, 0xD8, + 0xAC, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, + 0x87, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x88, 0x4F, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, + 0xA2, 0x4F, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + // Bytes 2b40 - 2b7f + 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4F, 0xE3, 0x82, 0xB5, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x81, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xA0, 0x4F, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAC, 0xE3, 0x83, + 0xAB, 0x4F, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0xBF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x4F, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, + // Bytes 2b80 - 2bbf + 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x88, 0x4F, 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0xB3, + 0xE3, 0x82, 0xB7, 0xE3, 0x83, 0xA7, 0xE3, 0x83, + 0xB3, 0x4F, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x88, 0xE3, 0x83, + 0xB3, 0x4F, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xAB, 0x51, 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, + // Bytes 2bc0 - 2bff + 0xA9, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA5, 0xE1, + 0x86, 0xAB, 0x29, 0x52, 0xE3, 0x82, 0xAD, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xBF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0x52, 0xE3, + 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0xA0, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0x88, 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0xA0, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x52, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0xE3, 0x82, + 0xBB, 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xA4, 0xE3, + 0x83, 0xAD, 0x52, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBB, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x52, 0xE3, 0x83, + // Bytes 2c40 - 2c7f + 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA2, 0xE3, + 0x82, 0xB9, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, + 0x52, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0xE3, + 0x83, 0x83, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0xA7, + 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x9F, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0xAC, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, + // Bytes 2c80 - 2cbf + 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xB3, 0x61, 0xD8, 0xB5, 0xD9, 0x84, 0xD9, 0x89, + 0x20, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, + 0x87, 0x20, 0xD8, 0xB9, 0xD9, 0x84, 0xD9, 0x8A, + 0xD9, 0x87, 0x20, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, + 0x84, 0xD9, 0x85, 0x06, 0xE0, 0xA7, 0x87, 0xE0, + 0xA6, 0xBE, 0x01, 0x06, 0xE0, 0xA7, 0x87, 0xE0, + 0xA7, 0x97, 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, + // Bytes 2cc0 - 2cff + 0xAC, 0xBE, 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, + 0xAD, 0x96, 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, + 0xAD, 0x97, 0x01, 0x06, 0xE0, 0xAE, 0x92, 0xE0, + 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, + 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, + 0xAF, 0x97, 0x01, 0x06, 0xE0, 0xAF, 0x87, 0xE0, + 0xAE, 0xBE, 0x01, 0x06, 0xE0, 0xB2, 0xBF, 0xE0, + 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, + // Bytes 2d00 - 2d3f + 0xB3, 0x95, 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, + 0xB3, 0x96, 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, + 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, + 0xB5, 0x97, 0x01, 0x06, 0xE0, 0xB5, 0x87, 0xE0, + 0xB4, 0xBE, 0x01, 0x06, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x9F, 0x01, 0x06, 0xE1, 0x80, 0xA5, 0xE1, + 0x80, 0xAE, 0x01, 0x06, 0xE1, 0xAC, 0x85, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x87, 0xE1, + // Bytes 2d40 - 2d7f + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x89, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x8B, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x8D, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0x91, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBA, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBC, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBE, 0xE1, + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAC, 0xBF, 0xE1, + // Bytes 2d80 - 2dbf + 0xAC, 0xB5, 0x01, 0x06, 0xE1, 0xAD, 0x82, 0xE1, + 0xAC, 0xB5, 0x01, 0x08, 0xF0, 0x91, 0x84, 0xB1, + 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, 0xF0, 0x91, + 0x84, 0xB2, 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, + 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, 0x8C, 0xBE, + 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, + 0x8D, 0x97, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, + 0xF0, 0x91, 0x92, 0xB0, 0x01, 0x08, 0xF0, 0x91, + // Bytes 2dc0 - 2dff + 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBA, 0x01, 0x08, + 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBD, + 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB8, 0xF0, 0x91, + 0x96, 0xAF, 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB9, + 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, 0x91, + 0xA4, 0xB5, 0xF0, 0x91, 0xA4, 0xB0, 0x01, 0x09, + 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0xE0, 0xB3, + 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, + // Bytes 2e00 - 2e3f + 0x8F, 0xE0, 0xB7, 0x8A, 0x16, 0x44, 0x44, 0x5A, + 0xCC, 0x8C, 0xCD, 0x44, 0x44, 0x7A, 0xCC, 0x8C, + 0xCD, 0x44, 0x64, 0x7A, 0xCC, 0x8C, 0xCD, 0x46, + 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, 0xCD, 0x46, + 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, 0xCD, 0x46, + 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, 0xB9, 0x46, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x01, 0x46, + // Bytes 2e40 - 2e7f + 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, 0x01, 0x46, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x01, 0x46, + // Bytes 2e80 - 2ebf + 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x01, 0x46, + 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x01, 0x49, + 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0x11, 0x4C, 0xE1, 0x84, 0x8C, 0xE1, 0x85, + 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xB4, 0x01, + // Bytes 2ec0 - 2eff + 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, + 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x11, 0x4C, 0xE3, + 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x9B, + 0xE3, 0x82, 0x9A, 0x11, 0x4C, 0xE3, 0x83, 0xA4, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x82, + 0x99, 0x11, 0x4F, 0xE1, 0x84, 0x8E, 0xE1, 0x85, + 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, 0x80, 0xE1, + 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, 0xA4, 0xE3, + // Bytes 2f00 - 2f3f + 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xAF, + 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, 0x82, 0xB7, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, + 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x11, 0x4F, 0xE3, + 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x11, 0x52, + // Bytes 2f40 - 2f7f + 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x11, 0x52, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x11, 0x86, + 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0x01, 0x86, + 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0x01, 0x03, + 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, 0xCC, 0xB8, + // Bytes 2f80 - 2fbf + 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, 0x03, 0x41, + 0xCC, 0x80, 0xCD, 0x03, 0x41, 0xCC, 0x81, 0xCD, + 0x03, 0x41, 0xCC, 0x83, 0xCD, 0x03, 0x41, 0xCC, + 0x84, 0xCD, 0x03, 0x41, 0xCC, 0x89, 0xCD, 0x03, + 0x41, 0xCC, 0x8C, 0xCD, 0x03, 0x41, 0xCC, 0x8F, + 0xCD, 0x03, 0x41, 0xCC, 0x91, 0xCD, 0x03, 0x41, + 0xCC, 0xA5, 0xB9, 0x03, 0x41, 0xCC, 0xA8, 0xA9, + 0x03, 0x42, 0xCC, 0x87, 0xCD, 0x03, 0x42, 0xCC, + // Bytes 2fc0 - 2fff + 0xA3, 0xB9, 0x03, 0x42, 0xCC, 0xB1, 0xB9, 0x03, + 0x43, 0xCC, 0x81, 0xCD, 0x03, 0x43, 0xCC, 0x82, + 0xCD, 0x03, 0x43, 0xCC, 0x87, 0xCD, 0x03, 0x43, + 0xCC, 0x8C, 0xCD, 0x03, 0x44, 0xCC, 0x87, 0xCD, + 0x03, 0x44, 0xCC, 0x8C, 0xCD, 0x03, 0x44, 0xCC, + 0xA3, 0xB9, 0x03, 0x44, 0xCC, 0xA7, 0xA9, 0x03, + 0x44, 0xCC, 0xAD, 0xB9, 0x03, 0x44, 0xCC, 0xB1, + 0xB9, 0x03, 0x45, 0xCC, 0x80, 0xCD, 0x03, 0x45, + // Bytes 3000 - 303f + 0xCC, 0x81, 0xCD, 0x03, 0x45, 0xCC, 0x83, 0xCD, + 0x03, 0x45, 0xCC, 0x86, 0xCD, 0x03, 0x45, 0xCC, + 0x87, 0xCD, 0x03, 0x45, 0xCC, 0x88, 0xCD, 0x03, + 0x45, 0xCC, 0x89, 0xCD, 0x03, 0x45, 0xCC, 0x8C, + 0xCD, 0x03, 0x45, 0xCC, 0x8F, 0xCD, 0x03, 0x45, + 0xCC, 0x91, 0xCD, 0x03, 0x45, 0xCC, 0xA8, 0xA9, + 0x03, 0x45, 0xCC, 0xAD, 0xB9, 0x03, 0x45, 0xCC, + 0xB0, 0xB9, 0x03, 0x46, 0xCC, 0x87, 0xCD, 0x03, + // Bytes 3040 - 307f + 0x47, 0xCC, 0x81, 0xCD, 0x03, 0x47, 0xCC, 0x82, + 0xCD, 0x03, 0x47, 0xCC, 0x84, 0xCD, 0x03, 0x47, + 0xCC, 0x86, 0xCD, 0x03, 0x47, 0xCC, 0x87, 0xCD, + 0x03, 0x47, 0xCC, 0x8C, 0xCD, 0x03, 0x47, 0xCC, + 0xA7, 0xA9, 0x03, 0x48, 0xCC, 0x82, 0xCD, 0x03, + 0x48, 0xCC, 0x87, 0xCD, 0x03, 0x48, 0xCC, 0x88, + 0xCD, 0x03, 0x48, 0xCC, 0x8C, 0xCD, 0x03, 0x48, + 0xCC, 0xA3, 0xB9, 0x03, 0x48, 0xCC, 0xA7, 0xA9, + // Bytes 3080 - 30bf + 0x03, 0x48, 0xCC, 0xAE, 0xB9, 0x03, 0x49, 0xCC, + 0x80, 0xCD, 0x03, 0x49, 0xCC, 0x81, 0xCD, 0x03, + 0x49, 0xCC, 0x82, 0xCD, 0x03, 0x49, 0xCC, 0x83, + 0xCD, 0x03, 0x49, 0xCC, 0x84, 0xCD, 0x03, 0x49, + 0xCC, 0x86, 0xCD, 0x03, 0x49, 0xCC, 0x87, 0xCD, + 0x03, 0x49, 0xCC, 0x89, 0xCD, 0x03, 0x49, 0xCC, + 0x8C, 0xCD, 0x03, 0x49, 0xCC, 0x8F, 0xCD, 0x03, + 0x49, 0xCC, 0x91, 0xCD, 0x03, 0x49, 0xCC, 0xA3, + // Bytes 30c0 - 30ff + 0xB9, 0x03, 0x49, 0xCC, 0xA8, 0xA9, 0x03, 0x49, + 0xCC, 0xB0, 0xB9, 0x03, 0x4A, 0xCC, 0x82, 0xCD, + 0x03, 0x4B, 0xCC, 0x81, 0xCD, 0x03, 0x4B, 0xCC, + 0x8C, 0xCD, 0x03, 0x4B, 0xCC, 0xA3, 0xB9, 0x03, + 0x4B, 0xCC, 0xA7, 0xA9, 0x03, 0x4B, 0xCC, 0xB1, + 0xB9, 0x03, 0x4C, 0xCC, 0x81, 0xCD, 0x03, 0x4C, + 0xCC, 0x8C, 0xCD, 0x03, 0x4C, 0xCC, 0xA7, 0xA9, + 0x03, 0x4C, 0xCC, 0xAD, 0xB9, 0x03, 0x4C, 0xCC, + // Bytes 3100 - 313f + 0xB1, 0xB9, 0x03, 0x4D, 0xCC, 0x81, 0xCD, 0x03, + 0x4D, 0xCC, 0x87, 0xCD, 0x03, 0x4D, 0xCC, 0xA3, + 0xB9, 0x03, 0x4E, 0xCC, 0x80, 0xCD, 0x03, 0x4E, + 0xCC, 0x81, 0xCD, 0x03, 0x4E, 0xCC, 0x83, 0xCD, + 0x03, 0x4E, 0xCC, 0x87, 0xCD, 0x03, 0x4E, 0xCC, + 0x8C, 0xCD, 0x03, 0x4E, 0xCC, 0xA3, 0xB9, 0x03, + 0x4E, 0xCC, 0xA7, 0xA9, 0x03, 0x4E, 0xCC, 0xAD, + 0xB9, 0x03, 0x4E, 0xCC, 0xB1, 0xB9, 0x03, 0x4F, + // Bytes 3140 - 317f + 0xCC, 0x80, 0xCD, 0x03, 0x4F, 0xCC, 0x81, 0xCD, + 0x03, 0x4F, 0xCC, 0x86, 0xCD, 0x03, 0x4F, 0xCC, + 0x89, 0xCD, 0x03, 0x4F, 0xCC, 0x8B, 0xCD, 0x03, + 0x4F, 0xCC, 0x8C, 0xCD, 0x03, 0x4F, 0xCC, 0x8F, + 0xCD, 0x03, 0x4F, 0xCC, 0x91, 0xCD, 0x03, 0x50, + 0xCC, 0x81, 0xCD, 0x03, 0x50, 0xCC, 0x87, 0xCD, + 0x03, 0x52, 0xCC, 0x81, 0xCD, 0x03, 0x52, 0xCC, + 0x87, 0xCD, 0x03, 0x52, 0xCC, 0x8C, 0xCD, 0x03, + // Bytes 3180 - 31bf + 0x52, 0xCC, 0x8F, 0xCD, 0x03, 0x52, 0xCC, 0x91, + 0xCD, 0x03, 0x52, 0xCC, 0xA7, 0xA9, 0x03, 0x52, + 0xCC, 0xB1, 0xB9, 0x03, 0x53, 0xCC, 0x82, 0xCD, + 0x03, 0x53, 0xCC, 0x87, 0xCD, 0x03, 0x53, 0xCC, + 0xA6, 0xB9, 0x03, 0x53, 0xCC, 0xA7, 0xA9, 0x03, + 0x54, 0xCC, 0x87, 0xCD, 0x03, 0x54, 0xCC, 0x8C, + 0xCD, 0x03, 0x54, 0xCC, 0xA3, 0xB9, 0x03, 0x54, + 0xCC, 0xA6, 0xB9, 0x03, 0x54, 0xCC, 0xA7, 0xA9, + // Bytes 31c0 - 31ff + 0x03, 0x54, 0xCC, 0xAD, 0xB9, 0x03, 0x54, 0xCC, + 0xB1, 0xB9, 0x03, 0x55, 0xCC, 0x80, 0xCD, 0x03, + 0x55, 0xCC, 0x81, 0xCD, 0x03, 0x55, 0xCC, 0x82, + 0xCD, 0x03, 0x55, 0xCC, 0x86, 0xCD, 0x03, 0x55, + 0xCC, 0x89, 0xCD, 0x03, 0x55, 0xCC, 0x8A, 0xCD, + 0x03, 0x55, 0xCC, 0x8B, 0xCD, 0x03, 0x55, 0xCC, + 0x8C, 0xCD, 0x03, 0x55, 0xCC, 0x8F, 0xCD, 0x03, + 0x55, 0xCC, 0x91, 0xCD, 0x03, 0x55, 0xCC, 0xA3, + // Bytes 3200 - 323f + 0xB9, 0x03, 0x55, 0xCC, 0xA4, 0xB9, 0x03, 0x55, + 0xCC, 0xA8, 0xA9, 0x03, 0x55, 0xCC, 0xAD, 0xB9, + 0x03, 0x55, 0xCC, 0xB0, 0xB9, 0x03, 0x56, 0xCC, + 0x83, 0xCD, 0x03, 0x56, 0xCC, 0xA3, 0xB9, 0x03, + 0x57, 0xCC, 0x80, 0xCD, 0x03, 0x57, 0xCC, 0x81, + 0xCD, 0x03, 0x57, 0xCC, 0x82, 0xCD, 0x03, 0x57, + 0xCC, 0x87, 0xCD, 0x03, 0x57, 0xCC, 0x88, 0xCD, + 0x03, 0x57, 0xCC, 0xA3, 0xB9, 0x03, 0x58, 0xCC, + // Bytes 3240 - 327f + 0x87, 0xCD, 0x03, 0x58, 0xCC, 0x88, 0xCD, 0x03, + 0x59, 0xCC, 0x80, 0xCD, 0x03, 0x59, 0xCC, 0x81, + 0xCD, 0x03, 0x59, 0xCC, 0x82, 0xCD, 0x03, 0x59, + 0xCC, 0x83, 0xCD, 0x03, 0x59, 0xCC, 0x84, 0xCD, + 0x03, 0x59, 0xCC, 0x87, 0xCD, 0x03, 0x59, 0xCC, + 0x88, 0xCD, 0x03, 0x59, 0xCC, 0x89, 0xCD, 0x03, + 0x59, 0xCC, 0xA3, 0xB9, 0x03, 0x5A, 0xCC, 0x81, + 0xCD, 0x03, 0x5A, 0xCC, 0x82, 0xCD, 0x03, 0x5A, + // Bytes 3280 - 32bf + 0xCC, 0x87, 0xCD, 0x03, 0x5A, 0xCC, 0x8C, 0xCD, + 0x03, 0x5A, 0xCC, 0xA3, 0xB9, 0x03, 0x5A, 0xCC, + 0xB1, 0xB9, 0x03, 0x61, 0xCC, 0x80, 0xCD, 0x03, + 0x61, 0xCC, 0x81, 0xCD, 0x03, 0x61, 0xCC, 0x83, + 0xCD, 0x03, 0x61, 0xCC, 0x84, 0xCD, 0x03, 0x61, + 0xCC, 0x89, 0xCD, 0x03, 0x61, 0xCC, 0x8C, 0xCD, + 0x03, 0x61, 0xCC, 0x8F, 0xCD, 0x03, 0x61, 0xCC, + 0x91, 0xCD, 0x03, 0x61, 0xCC, 0xA5, 0xB9, 0x03, + // Bytes 32c0 - 32ff + 0x61, 0xCC, 0xA8, 0xA9, 0x03, 0x62, 0xCC, 0x87, + 0xCD, 0x03, 0x62, 0xCC, 0xA3, 0xB9, 0x03, 0x62, + 0xCC, 0xB1, 0xB9, 0x03, 0x63, 0xCC, 0x81, 0xCD, + 0x03, 0x63, 0xCC, 0x82, 0xCD, 0x03, 0x63, 0xCC, + 0x87, 0xCD, 0x03, 0x63, 0xCC, 0x8C, 0xCD, 0x03, + 0x64, 0xCC, 0x87, 0xCD, 0x03, 0x64, 0xCC, 0x8C, + 0xCD, 0x03, 0x64, 0xCC, 0xA3, 0xB9, 0x03, 0x64, + 0xCC, 0xA7, 0xA9, 0x03, 0x64, 0xCC, 0xAD, 0xB9, + // Bytes 3300 - 333f + 0x03, 0x64, 0xCC, 0xB1, 0xB9, 0x03, 0x65, 0xCC, + 0x80, 0xCD, 0x03, 0x65, 0xCC, 0x81, 0xCD, 0x03, + 0x65, 0xCC, 0x83, 0xCD, 0x03, 0x65, 0xCC, 0x86, + 0xCD, 0x03, 0x65, 0xCC, 0x87, 0xCD, 0x03, 0x65, + 0xCC, 0x88, 0xCD, 0x03, 0x65, 0xCC, 0x89, 0xCD, + 0x03, 0x65, 0xCC, 0x8C, 0xCD, 0x03, 0x65, 0xCC, + 0x8F, 0xCD, 0x03, 0x65, 0xCC, 0x91, 0xCD, 0x03, + 0x65, 0xCC, 0xA8, 0xA9, 0x03, 0x65, 0xCC, 0xAD, + // Bytes 3340 - 337f + 0xB9, 0x03, 0x65, 0xCC, 0xB0, 0xB9, 0x03, 0x66, + 0xCC, 0x87, 0xCD, 0x03, 0x67, 0xCC, 0x81, 0xCD, + 0x03, 0x67, 0xCC, 0x82, 0xCD, 0x03, 0x67, 0xCC, + 0x84, 0xCD, 0x03, 0x67, 0xCC, 0x86, 0xCD, 0x03, + 0x67, 0xCC, 0x87, 0xCD, 0x03, 0x67, 0xCC, 0x8C, + 0xCD, 0x03, 0x67, 0xCC, 0xA7, 0xA9, 0x03, 0x68, + 0xCC, 0x82, 0xCD, 0x03, 0x68, 0xCC, 0x87, 0xCD, + 0x03, 0x68, 0xCC, 0x88, 0xCD, 0x03, 0x68, 0xCC, + // Bytes 3380 - 33bf + 0x8C, 0xCD, 0x03, 0x68, 0xCC, 0xA3, 0xB9, 0x03, + 0x68, 0xCC, 0xA7, 0xA9, 0x03, 0x68, 0xCC, 0xAE, + 0xB9, 0x03, 0x68, 0xCC, 0xB1, 0xB9, 0x03, 0x69, + 0xCC, 0x80, 0xCD, 0x03, 0x69, 0xCC, 0x81, 0xCD, + 0x03, 0x69, 0xCC, 0x82, 0xCD, 0x03, 0x69, 0xCC, + 0x83, 0xCD, 0x03, 0x69, 0xCC, 0x84, 0xCD, 0x03, + 0x69, 0xCC, 0x86, 0xCD, 0x03, 0x69, 0xCC, 0x89, + 0xCD, 0x03, 0x69, 0xCC, 0x8C, 0xCD, 0x03, 0x69, + // Bytes 33c0 - 33ff + 0xCC, 0x8F, 0xCD, 0x03, 0x69, 0xCC, 0x91, 0xCD, + 0x03, 0x69, 0xCC, 0xA3, 0xB9, 0x03, 0x69, 0xCC, + 0xA8, 0xA9, 0x03, 0x69, 0xCC, 0xB0, 0xB9, 0x03, + 0x6A, 0xCC, 0x82, 0xCD, 0x03, 0x6A, 0xCC, 0x8C, + 0xCD, 0x03, 0x6B, 0xCC, 0x81, 0xCD, 0x03, 0x6B, + 0xCC, 0x8C, 0xCD, 0x03, 0x6B, 0xCC, 0xA3, 0xB9, + 0x03, 0x6B, 0xCC, 0xA7, 0xA9, 0x03, 0x6B, 0xCC, + 0xB1, 0xB9, 0x03, 0x6C, 0xCC, 0x81, 0xCD, 0x03, + // Bytes 3400 - 343f + 0x6C, 0xCC, 0x8C, 0xCD, 0x03, 0x6C, 0xCC, 0xA7, + 0xA9, 0x03, 0x6C, 0xCC, 0xAD, 0xB9, 0x03, 0x6C, + 0xCC, 0xB1, 0xB9, 0x03, 0x6D, 0xCC, 0x81, 0xCD, + 0x03, 0x6D, 0xCC, 0x87, 0xCD, 0x03, 0x6D, 0xCC, + 0xA3, 0xB9, 0x03, 0x6E, 0xCC, 0x80, 0xCD, 0x03, + 0x6E, 0xCC, 0x81, 0xCD, 0x03, 0x6E, 0xCC, 0x83, + 0xCD, 0x03, 0x6E, 0xCC, 0x87, 0xCD, 0x03, 0x6E, + 0xCC, 0x8C, 0xCD, 0x03, 0x6E, 0xCC, 0xA3, 0xB9, + // Bytes 3440 - 347f + 0x03, 0x6E, 0xCC, 0xA7, 0xA9, 0x03, 0x6E, 0xCC, + 0xAD, 0xB9, 0x03, 0x6E, 0xCC, 0xB1, 0xB9, 0x03, + 0x6F, 0xCC, 0x80, 0xCD, 0x03, 0x6F, 0xCC, 0x81, + 0xCD, 0x03, 0x6F, 0xCC, 0x86, 0xCD, 0x03, 0x6F, + 0xCC, 0x89, 0xCD, 0x03, 0x6F, 0xCC, 0x8B, 0xCD, + 0x03, 0x6F, 0xCC, 0x8C, 0xCD, 0x03, 0x6F, 0xCC, + 0x8F, 0xCD, 0x03, 0x6F, 0xCC, 0x91, 0xCD, 0x03, + 0x70, 0xCC, 0x81, 0xCD, 0x03, 0x70, 0xCC, 0x87, + // Bytes 3480 - 34bf + 0xCD, 0x03, 0x72, 0xCC, 0x81, 0xCD, 0x03, 0x72, + 0xCC, 0x87, 0xCD, 0x03, 0x72, 0xCC, 0x8C, 0xCD, + 0x03, 0x72, 0xCC, 0x8F, 0xCD, 0x03, 0x72, 0xCC, + 0x91, 0xCD, 0x03, 0x72, 0xCC, 0xA7, 0xA9, 0x03, + 0x72, 0xCC, 0xB1, 0xB9, 0x03, 0x73, 0xCC, 0x82, + 0xCD, 0x03, 0x73, 0xCC, 0x87, 0xCD, 0x03, 0x73, + 0xCC, 0xA6, 0xB9, 0x03, 0x73, 0xCC, 0xA7, 0xA9, + 0x03, 0x74, 0xCC, 0x87, 0xCD, 0x03, 0x74, 0xCC, + // Bytes 34c0 - 34ff + 0x88, 0xCD, 0x03, 0x74, 0xCC, 0x8C, 0xCD, 0x03, + 0x74, 0xCC, 0xA3, 0xB9, 0x03, 0x74, 0xCC, 0xA6, + 0xB9, 0x03, 0x74, 0xCC, 0xA7, 0xA9, 0x03, 0x74, + 0xCC, 0xAD, 0xB9, 0x03, 0x74, 0xCC, 0xB1, 0xB9, + 0x03, 0x75, 0xCC, 0x80, 0xCD, 0x03, 0x75, 0xCC, + 0x81, 0xCD, 0x03, 0x75, 0xCC, 0x82, 0xCD, 0x03, + 0x75, 0xCC, 0x86, 0xCD, 0x03, 0x75, 0xCC, 0x89, + 0xCD, 0x03, 0x75, 0xCC, 0x8A, 0xCD, 0x03, 0x75, + // Bytes 3500 - 353f + 0xCC, 0x8B, 0xCD, 0x03, 0x75, 0xCC, 0x8C, 0xCD, + 0x03, 0x75, 0xCC, 0x8F, 0xCD, 0x03, 0x75, 0xCC, + 0x91, 0xCD, 0x03, 0x75, 0xCC, 0xA3, 0xB9, 0x03, + 0x75, 0xCC, 0xA4, 0xB9, 0x03, 0x75, 0xCC, 0xA8, + 0xA9, 0x03, 0x75, 0xCC, 0xAD, 0xB9, 0x03, 0x75, + 0xCC, 0xB0, 0xB9, 0x03, 0x76, 0xCC, 0x83, 0xCD, + 0x03, 0x76, 0xCC, 0xA3, 0xB9, 0x03, 0x77, 0xCC, + 0x80, 0xCD, 0x03, 0x77, 0xCC, 0x81, 0xCD, 0x03, + // Bytes 3540 - 357f + 0x77, 0xCC, 0x82, 0xCD, 0x03, 0x77, 0xCC, 0x87, + 0xCD, 0x03, 0x77, 0xCC, 0x88, 0xCD, 0x03, 0x77, + 0xCC, 0x8A, 0xCD, 0x03, 0x77, 0xCC, 0xA3, 0xB9, + 0x03, 0x78, 0xCC, 0x87, 0xCD, 0x03, 0x78, 0xCC, + 0x88, 0xCD, 0x03, 0x79, 0xCC, 0x80, 0xCD, 0x03, + 0x79, 0xCC, 0x81, 0xCD, 0x03, 0x79, 0xCC, 0x82, + 0xCD, 0x03, 0x79, 0xCC, 0x83, 0xCD, 0x03, 0x79, + 0xCC, 0x84, 0xCD, 0x03, 0x79, 0xCC, 0x87, 0xCD, + // Bytes 3580 - 35bf + 0x03, 0x79, 0xCC, 0x88, 0xCD, 0x03, 0x79, 0xCC, + 0x89, 0xCD, 0x03, 0x79, 0xCC, 0x8A, 0xCD, 0x03, + 0x79, 0xCC, 0xA3, 0xB9, 0x03, 0x7A, 0xCC, 0x81, + 0xCD, 0x03, 0x7A, 0xCC, 0x82, 0xCD, 0x03, 0x7A, + 0xCC, 0x87, 0xCD, 0x03, 0x7A, 0xCC, 0x8C, 0xCD, + 0x03, 0x7A, 0xCC, 0xA3, 0xB9, 0x03, 0x7A, 0xCC, + 0xB1, 0xB9, 0x04, 0xC2, 0xA8, 0xCC, 0x80, 0xCE, + 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCE, 0x04, 0xC2, + // Bytes 35c0 - 35ff + 0xA8, 0xCD, 0x82, 0xCE, 0x04, 0xC3, 0x86, 0xCC, + 0x81, 0xCD, 0x04, 0xC3, 0x86, 0xCC, 0x84, 0xCD, + 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xCD, 0x04, 0xC3, + 0xA6, 0xCC, 0x81, 0xCD, 0x04, 0xC3, 0xA6, 0xCC, + 0x84, 0xCD, 0x04, 0xC3, 0xB8, 0xCC, 0x81, 0xCD, + 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xCD, 0x04, 0xC6, + 0xB7, 0xCC, 0x8C, 0xCD, 0x04, 0xCA, 0x92, 0xCC, + 0x8C, 0xCD, 0x04, 0xCE, 0x91, 0xCC, 0x80, 0xCD, + // Bytes 3600 - 363f + 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0x91, 0xCC, 0x84, 0xCD, 0x04, 0xCE, 0x91, 0xCC, + 0x86, 0xCD, 0x04, 0xCE, 0x91, 0xCD, 0x85, 0xDD, + 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0x95, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0x97, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0x97, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xDD, 0x04, 0xCE, + 0x99, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0x99, 0xCC, + // Bytes 3640 - 367f + 0x81, 0xCD, 0x04, 0xCE, 0x99, 0xCC, 0x84, 0xCD, + 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xCD, 0x04, 0xCE, + 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xCE, 0x9F, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0x9F, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xCD, 0x04, 0xCE, + 0xA5, 0xCC, 0x80, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, + 0x81, 0xCD, 0x04, 0xCE, 0xA5, 0xCC, 0x84, 0xCD, + 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xCD, 0x04, 0xCE, + // Bytes 3680 - 36bf + 0xA5, 0xCC, 0x88, 0xCD, 0x04, 0xCE, 0xA9, 0xCC, + 0x80, 0xCD, 0x04, 0xCE, 0xA9, 0xCC, 0x81, 0xCD, + 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xDD, 0x04, 0xCE, + 0xB1, 0xCC, 0x84, 0xCD, 0x04, 0xCE, 0xB1, 0xCC, + 0x86, 0xCD, 0x04, 0xCE, 0xB1, 0xCD, 0x85, 0xDD, + 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0xB5, 0xCC, 0x81, 0xCD, 0x04, 0xCE, 0xB7, 0xCD, + 0x85, 0xDD, 0x04, 0xCE, 0xB9, 0xCC, 0x80, 0xCD, + // Bytes 36c0 - 36ff + 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xCD, 0x04, 0xCE, + 0xB9, 0xCC, 0x84, 0xCD, 0x04, 0xCE, 0xB9, 0xCC, + 0x86, 0xCD, 0x04, 0xCE, 0xB9, 0xCD, 0x82, 0xCD, + 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xCD, 0x04, 0xCE, + 0xBF, 0xCC, 0x81, 0xCD, 0x04, 0xCF, 0x81, 0xCC, + 0x93, 0xCD, 0x04, 0xCF, 0x81, 0xCC, 0x94, 0xCD, + 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xCD, 0x04, 0xCF, + 0x85, 0xCC, 0x81, 0xCD, 0x04, 0xCF, 0x85, 0xCC, + // Bytes 3700 - 373f + 0x84, 0xCD, 0x04, 0xCF, 0x85, 0xCC, 0x86, 0xCD, + 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xCD, 0x04, 0xCF, + 0x89, 0xCD, 0x85, 0xDD, 0x04, 0xCF, 0x92, 0xCC, + 0x81, 0xCD, 0x04, 0xCF, 0x92, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x90, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0x90, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0x93, 0xCC, 0x81, 0xCD, + 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xCD, 0x04, 0xD0, + // Bytes 3740 - 377f + 0x95, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0x95, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0x96, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0x97, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x98, 0xCC, + 0x80, 0xCD, 0x04, 0xD0, 0x98, 0xCC, 0x84, 0xCD, + 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xCD, 0x04, 0xD0, + 0x98, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0x9A, 0xCC, + 0x81, 0xCD, 0x04, 0xD0, 0x9E, 0xCC, 0x88, 0xCD, + // Bytes 3780 - 37bf + 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xCD, 0x04, 0xD0, + 0xA3, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xA3, 0xCC, 0x8B, 0xCD, + 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xAB, 0xCC, 0x88, 0xCD, 0x04, 0xD0, 0xAD, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xB0, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xB3, 0xCC, 0x81, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, + // Bytes 37c0 - 37ff + 0x80, 0xCD, 0x04, 0xD0, 0xB5, 0xCC, 0x86, 0xCD, + 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xCD, 0x04, 0xD0, + 0xB6, 0xCC, 0x86, 0xCD, 0x04, 0xD0, 0xB6, 0xCC, + 0x88, 0xCD, 0x04, 0xD0, 0xB7, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xCD, 0x04, 0xD0, + 0xB8, 0xCC, 0x84, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, + 0x86, 0xCD, 0x04, 0xD0, 0xB8, 0xCC, 0x88, 0xCD, + 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xCD, 0x04, 0xD0, + // Bytes 3800 - 383f + 0xBE, 0xCC, 0x88, 0xCD, 0x04, 0xD1, 0x83, 0xCC, + 0x84, 0xCD, 0x04, 0xD1, 0x83, 0xCC, 0x86, 0xCD, + 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0x83, 0xCC, 0x8B, 0xCD, 0x04, 0xD1, 0x87, 0xCC, + 0x88, 0xCD, 0x04, 0xD1, 0x8B, 0xCC, 0x88, 0xCD, + 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xCD, 0x04, 0xD1, + 0x96, 0xCC, 0x88, 0xCD, 0x04, 0xD1, 0xB4, 0xCC, + 0x8F, 0xCD, 0x04, 0xD1, 0xB5, 0xCC, 0x8F, 0xCD, + // Bytes 3840 - 387f + 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xCD, 0x04, 0xD3, + 0x99, 0xCC, 0x88, 0xCD, 0x04, 0xD3, 0xA8, 0xCC, + 0x88, 0xCD, 0x04, 0xD3, 0xA9, 0xCC, 0x88, 0xCD, + 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xCD, 0x04, 0xD8, + 0xA7, 0xD9, 0x94, 0xCD, 0x04, 0xD8, 0xA7, 0xD9, + 0x95, 0xB9, 0x04, 0xD9, 0x88, 0xD9, 0x94, 0xCD, + 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xCD, 0x04, 0xDB, + 0x81, 0xD9, 0x94, 0xCD, 0x04, 0xDB, 0x92, 0xD9, + // Bytes 3880 - 38bf + 0x94, 0xCD, 0x04, 0xDB, 0x95, 0xD9, 0x94, 0xCD, + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, + 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x41, + 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x41, 0xCC, + 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x80, 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x81, 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x83, + 0xCE, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x89, 0xCE, + // Bytes 38c0 - 38ff + 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, + 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x41, + 0xCC, 0x8A, 0xCC, 0x81, 0xCE, 0x05, 0x41, 0xCC, + 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x41, 0xCC, 0xA3, + 0xCC, 0x86, 0xCE, 0x05, 0x43, 0xCC, 0xA7, 0xCC, + 0x81, 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x80, + 0xCE, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x81, 0xCE, + 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, + // Bytes 3900 - 393f + 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x45, + 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, 0x45, 0xCC, + 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x45, 0xCC, 0xA3, + 0xCC, 0x82, 0xCE, 0x05, 0x45, 0xCC, 0xA7, 0xCC, + 0x86, 0xCE, 0x05, 0x49, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x4F, + // Bytes 3940 - 397f + 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x4F, 0xCC, + 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x4F, 0xCC, 0x83, + 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, 0x83, 0xCC, + 0x84, 0xCE, 0x05, 0x4F, 0xCC, 0x83, 0xCC, 0x88, + 0xCE, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x80, 0xCE, + 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, + 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, 0x4F, + 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x4F, 0xCC, + // Bytes 3980 - 39bf + 0x9B, 0xCC, 0x80, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, + 0xCC, 0x81, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + 0x83, 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0x89, + 0xCE, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, + 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, + 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCE, 0x05, 0x52, + 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, 0x53, 0xCC, + 0x81, 0xCC, 0x87, 0xCE, 0x05, 0x53, 0xCC, 0x8C, + // Bytes 39c0 - 39ff + 0xCC, 0x87, 0xCE, 0x05, 0x53, 0xCC, 0xA3, 0xCC, + 0x87, 0xCE, 0x05, 0x55, 0xCC, 0x83, 0xCC, 0x81, + 0xCE, 0x05, 0x55, 0xCC, 0x84, 0xCC, 0x88, 0xCE, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x05, 0x55, + 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x55, 0xCC, + 0x88, 0xCC, 0x8C, 0xCE, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x80, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + // Bytes 3a00 - 3a3f + 0x81, 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x83, + 0xCE, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x89, 0xCE, + 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x61, 0xCC, + 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x61, 0xCC, 0x82, + 0xCC, 0x89, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x80, 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x81, + // Bytes 3a40 - 3a7f + 0xCE, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x83, 0xCE, + 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, 0xCE, 0x05, + 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, 0x61, + 0xCC, 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x61, 0xCC, + 0x8A, 0xCC, 0x81, 0xCE, 0x05, 0x61, 0xCC, 0xA3, + 0xCC, 0x82, 0xCE, 0x05, 0x61, 0xCC, 0xA3, 0xCC, + 0x86, 0xCE, 0x05, 0x63, 0xCC, 0xA7, 0xCC, 0x81, + 0xCE, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x80, 0xCE, + // Bytes 3a80 - 3abf + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, + 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x65, + 0xCC, 0x82, 0xCC, 0x89, 0xCE, 0x05, 0x65, 0xCC, + 0x84, 0xCC, 0x80, 0xCE, 0x05, 0x65, 0xCC, 0x84, + 0xCC, 0x81, 0xCE, 0x05, 0x65, 0xCC, 0xA3, 0xCC, + 0x82, 0xCE, 0x05, 0x65, 0xCC, 0xA7, 0xCC, 0x86, + 0xCE, 0x05, 0x69, 0xCC, 0x88, 0xCC, 0x81, 0xCE, + 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, 0xCE, 0x05, + // Bytes 3ac0 - 3aff + 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCE, 0x05, 0x6F, + 0xCC, 0x82, 0xCC, 0x81, 0xCE, 0x05, 0x6F, 0xCC, + 0x82, 0xCC, 0x83, 0xCE, 0x05, 0x6F, 0xCC, 0x82, + 0xCC, 0x89, 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x84, + 0xCE, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x88, 0xCE, + 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, 0xCE, 0x05, + 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCE, 0x05, 0x6F, + // Bytes 3b00 - 3b3f + 0xCC, 0x87, 0xCC, 0x84, 0xCE, 0x05, 0x6F, 0xCC, + 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, + 0xCC, 0x80, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x81, 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x83, + 0xCE, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x89, 0xCE, + 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, 0x05, + 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCE, 0x05, 0x6F, + 0xCC, 0xA8, 0xCC, 0x84, 0xCE, 0x05, 0x72, 0xCC, + // Bytes 3b40 - 3b7f + 0xA3, 0xCC, 0x84, 0xCE, 0x05, 0x73, 0xCC, 0x81, + 0xCC, 0x87, 0xCE, 0x05, 0x73, 0xCC, 0x8C, 0xCC, + 0x87, 0xCE, 0x05, 0x73, 0xCC, 0xA3, 0xCC, 0x87, + 0xCE, 0x05, 0x75, 0xCC, 0x83, 0xCC, 0x81, 0xCE, + 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, 0xCE, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x05, 0x75, 0xCC, + 0x88, 0xCC, 0x84, 0xCE, 0x05, 0x75, 0xCC, 0x88, + // Bytes 3b80 - 3bbf + 0xCC, 0x8C, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0x80, 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x81, + 0xCE, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x83, 0xCE, + 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, 0xCE, 0x05, + 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xBA, 0x05, 0xE1, + 0xBE, 0xBF, 0xCC, 0x80, 0xCE, 0x05, 0xE1, 0xBE, + 0xBF, 0xCC, 0x81, 0xCE, 0x05, 0xE1, 0xBE, 0xBF, + 0xCD, 0x82, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, + // Bytes 3bc0 - 3bff + 0x80, 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, 0x81, + 0xCE, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, 0x82, 0xCE, + 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x92, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x94, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, 0xCC, 0xB8, + // Bytes 3c00 - 3c3f + 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, + 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x83, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x85, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, 0xB8, 0x05, + // Bytes 3c40 - 3c7f + 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB3, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB6, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3c80 - 3cbf + 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x83, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x86, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3cc0 - 3cff + 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xAB, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB2, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, 0x05, 0x06, + 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + // Bytes 3d00 - 3d3f + 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3d40 - 3d7f + 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3d80 - 3dbf + 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + // Bytes 3dc0 - 3dff + 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, 0xDE, 0x06, + // Bytes 3e00 - 3e3f + 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, 0xDE, 0x06, + 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3e40 - 3e7f + 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3e80 - 3ebf + 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x06, + 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x06, + // Bytes 3ec0 - 3eff + 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x06, + 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, 0xDE, 0x06, + 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, 0xDE, 0x06, + 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, 0x0D, 0x06, + 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, 0x0D, 0x06, + // Bytes 3f00 - 3f3f + 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, 0x0D, 0x06, + 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, 0x89, 0x06, + 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, 0x15, 0x06, + 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 3f40 - 3f7f + 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 3f80 - 3fbf + 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 3fc0 - 3fff + 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 4000 - 403f + 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0x11, 0x06, + // Bytes 4040 - 407f + 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0x11, 0x06, + // Bytes 4080 - 40bf + 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0x11, 0x06, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x11, 0x06, + // Bytes 40c0 - 40ff + 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, 0x11, 0x06, + 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, 0x11, 0x08, + 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x93, + // Bytes 4100 - 413f + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x91, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + // Bytes 4140 - 417f + 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, 0xCC, 0x94, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0x97, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xA9, + // Bytes 4180 - 41bf + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x80, + // Bytes 41c0 - 41ff + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, 0xCC, 0x94, + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB1, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, 0xCE, 0xB7, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, + // Bytes 4200 - 423f + 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDF, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, + 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, 0xCC, 0x94, + // Bytes 4240 - 427f + 0xCC, 0x81, 0xCD, 0x85, 0xDF, 0x08, 0xCF, 0x89, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDF, 0x08, + 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, 0x82, 0xBA, + 0x0D, 0x08, 0xF0, 0x91, 0x82, 0x9B, 0xF0, 0x91, + 0x82, 0xBA, 0x0D, 0x08, 0xF0, 0x91, 0x82, 0xA5, + 0xF0, 0x91, 0x82, 0xBA, 0x0D, 0x42, 0xC2, 0xB4, + 0x01, 0x43, 0x20, 0xCC, 0x81, 0xCD, 0x43, 0x20, + 0xCC, 0x83, 0xCD, 0x43, 0x20, 0xCC, 0x84, 0xCD, + // Bytes 4280 - 42bf + 0x43, 0x20, 0xCC, 0x85, 0xCD, 0x43, 0x20, 0xCC, + 0x86, 0xCD, 0x43, 0x20, 0xCC, 0x87, 0xCD, 0x43, + 0x20, 0xCC, 0x88, 0xCD, 0x43, 0x20, 0xCC, 0x8A, + 0xCD, 0x43, 0x20, 0xCC, 0x8B, 0xCD, 0x43, 0x20, + 0xCC, 0x93, 0xCD, 0x43, 0x20, 0xCC, 0x94, 0xCD, + 0x43, 0x20, 0xCC, 0xA7, 0xA9, 0x43, 0x20, 0xCC, + 0xA8, 0xA9, 0x43, 0x20, 0xCC, 0xB3, 0xB9, 0x43, + 0x20, 0xCD, 0x82, 0xCD, 0x43, 0x20, 0xCD, 0x85, + // Bytes 42c0 - 42ff + 0xDD, 0x43, 0x20, 0xD9, 0x8B, 0x5D, 0x43, 0x20, + 0xD9, 0x8C, 0x61, 0x43, 0x20, 0xD9, 0x8D, 0x65, + 0x43, 0x20, 0xD9, 0x8E, 0x69, 0x43, 0x20, 0xD9, + 0x8F, 0x6D, 0x43, 0x20, 0xD9, 0x90, 0x71, 0x43, + 0x20, 0xD9, 0x91, 0x75, 0x43, 0x20, 0xD9, 0x92, + 0x79, 0x43, 0x41, 0xCC, 0x8A, 0xCD, 0x43, 0x73, + 0xCC, 0x87, 0xCD, 0x44, 0x20, 0xE3, 0x82, 0x99, + 0x11, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x11, 0x44, + // Bytes 4300 - 433f + 0xC2, 0xA8, 0xCC, 0x81, 0xCE, 0x44, 0xCE, 0x91, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x95, 0xCC, 0x81, + 0xCD, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0x99, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0x9F, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xA5, 0xCC, 0x81, + 0xCD, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xCD, 0x44, + 0xCE, 0xA9, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB1, + 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xB5, 0xCC, 0x81, + // Bytes 4340 - 437f + 0xCD, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x44, + 0xCE, 0xB9, 0xCC, 0x81, 0xCD, 0x44, 0xCE, 0xBF, + 0xCC, 0x81, 0xCD, 0x44, 0xCF, 0x85, 0xCC, 0x81, + 0xCD, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x44, + 0xD7, 0x90, 0xD6, 0xB7, 0x35, 0x44, 0xD7, 0x90, + 0xD6, 0xB8, 0x39, 0x44, 0xD7, 0x90, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0x91, 0xD6, 0xBF, 0x4D, 0x44, 0xD7, 0x92, + // Bytes 4380 - 43bf + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x93, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0x95, 0xD6, 0xB9, 0x3D, 0x44, 0xD7, 0x95, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x96, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0x99, 0xD6, 0xB4, 0x29, 0x44, 0xD7, 0x99, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9A, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x45, 0x44, + // Bytes 43c0 - 43ff + 0xD7, 0x9B, 0xD6, 0xBF, 0x4D, 0x44, 0xD7, 0x9C, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0x9E, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA1, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA3, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA4, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x4D, 0x44, + 0xD7, 0xA6, 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA7, + 0xD6, 0xBC, 0x45, 0x44, 0xD7, 0xA8, 0xD6, 0xBC, + // Bytes 4400 - 443f + 0x45, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x45, 0x44, + 0xD7, 0xA9, 0xD7, 0x81, 0x51, 0x44, 0xD7, 0xA9, + 0xD7, 0x82, 0x55, 0x44, 0xD7, 0xAA, 0xD6, 0xBC, + 0x45, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x35, 0x44, + 0xD8, 0xA7, 0xD9, 0x8B, 0x5D, 0x44, 0xD8, 0xA7, + 0xD9, 0x93, 0xCD, 0x44, 0xD8, 0xA7, 0xD9, 0x94, + 0xCD, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB9, 0x44, + 0xD8, 0xB0, 0xD9, 0xB0, 0x7D, 0x44, 0xD8, 0xB1, + // Bytes 4440 - 447f + 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x80, 0xD9, 0x8B, + 0x5D, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x69, 0x44, + 0xD9, 0x80, 0xD9, 0x8F, 0x6D, 0x44, 0xD9, 0x80, + 0xD9, 0x90, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x91, + 0x75, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x79, 0x44, + 0xD9, 0x87, 0xD9, 0xB0, 0x7D, 0x44, 0xD9, 0x88, + 0xD9, 0x94, 0xCD, 0x44, 0xD9, 0x89, 0xD9, 0xB0, + 0x7D, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xCD, 0x44, + // Bytes 4480 - 44bf + 0xDB, 0x92, 0xD9, 0x94, 0xCD, 0x44, 0xDB, 0x95, + 0xD9, 0x94, 0xCD, 0x45, 0x20, 0xCC, 0x88, 0xCC, + 0x80, 0xCE, 0x45, 0x20, 0xCC, 0x88, 0xCC, 0x81, + 0xCE, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, 0xCE, + 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x45, + 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x45, 0x20, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x45, 0x20, 0xCC, + 0x94, 0xCC, 0x80, 0xCE, 0x45, 0x20, 0xCC, 0x94, + // Bytes 44c0 - 44ff + 0xCC, 0x81, 0xCE, 0x45, 0x20, 0xCC, 0x94, 0xCD, + 0x82, 0xCE, 0x45, 0x20, 0xD9, 0x8C, 0xD9, 0x91, + 0x76, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, 0x76, + 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x76, 0x45, + 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x76, 0x45, 0x20, + 0xD9, 0x90, 0xD9, 0x91, 0x76, 0x45, 0x20, 0xD9, + 0x91, 0xD9, 0xB0, 0x7E, 0x45, 0xE2, 0xAB, 0x9D, + 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, 0x88, + // Bytes 4500 - 453f + 0xCC, 0x81, 0xCE, 0x46, 0xCF, 0x85, 0xCC, 0x88, + 0xCC, 0x81, 0xCE, 0x46, 0xD7, 0xA9, 0xD6, 0xBC, + 0xD7, 0x81, 0x52, 0x46, 0xD7, 0xA9, 0xD6, 0xBC, + 0xD7, 0x82, 0x56, 0x46, 0xD9, 0x80, 0xD9, 0x8E, + 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, 0xD9, 0x8F, + 0xD9, 0x91, 0x76, 0x46, 0xD9, 0x80, 0xD9, 0x90, + 0xD9, 0x91, 0x76, 0x46, 0xE0, 0xA4, 0x95, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0x96, 0xE0, + // Bytes 4540 - 457f + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0x97, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0x9C, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xA1, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xA2, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xAB, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA4, 0xAF, 0xE0, + 0xA4, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, 0xA1, 0xE0, + 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, 0xA2, 0xE0, + // Bytes 4580 - 45bf + 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA6, 0xAF, 0xE0, + 0xA6, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0x96, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0x97, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0x9C, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0xAB, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0xB2, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xA8, 0xB8, 0xE0, + 0xA8, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, 0xA1, 0xE0, + // Bytes 45c0 - 45ff + 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xAC, 0xA2, 0xE0, + 0xAC, 0xBC, 0x0D, 0x46, 0xE0, 0xBE, 0xB2, 0xE0, + 0xBE, 0x80, 0xA1, 0x46, 0xE0, 0xBE, 0xB3, 0xE0, + 0xBE, 0x80, 0xA1, 0x46, 0xE3, 0x83, 0x86, 0xE3, + 0x82, 0x99, 0x11, 0x48, 0xF0, 0x9D, 0x85, 0x97, + 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x48, 0xF0, 0x9D, + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xB1, 0x48, + 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, + // Bytes 4600 - 463f + 0xB1, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, 0x9D, + 0x85, 0xA5, 0xB1, 0x49, 0xE0, 0xBE, 0xB2, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0xA2, 0x49, 0xE0, + 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, + 0xA2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xB2, 0x4C, + 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x4C, 0xF0, 0x9D, + // Bytes 4640 - 467f + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xB0, 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB1, + 0xB2, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xB2, 0x4C, + 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xAE, 0xB2, 0x4C, 0xF0, 0x9D, + 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + // Bytes 4680 - 46bf + 0x85, 0xAF, 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, + 0xB2, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xB2, 0x83, + 0x41, 0xCC, 0x82, 0xCD, 0x83, 0x41, 0xCC, 0x86, + 0xCD, 0x83, 0x41, 0xCC, 0x87, 0xCD, 0x83, 0x41, + 0xCC, 0x88, 0xCD, 0x83, 0x41, 0xCC, 0x8A, 0xCD, + 0x83, 0x41, 0xCC, 0xA3, 0xB9, 0x83, 0x43, 0xCC, + // Bytes 46c0 - 46ff + 0xA7, 0xA9, 0x83, 0x45, 0xCC, 0x82, 0xCD, 0x83, + 0x45, 0xCC, 0x84, 0xCD, 0x83, 0x45, 0xCC, 0xA3, + 0xB9, 0x83, 0x45, 0xCC, 0xA7, 0xA9, 0x83, 0x49, + 0xCC, 0x88, 0xCD, 0x83, 0x4C, 0xCC, 0xA3, 0xB9, + 0x83, 0x4F, 0xCC, 0x82, 0xCD, 0x83, 0x4F, 0xCC, + 0x83, 0xCD, 0x83, 0x4F, 0xCC, 0x84, 0xCD, 0x83, + 0x4F, 0xCC, 0x87, 0xCD, 0x83, 0x4F, 0xCC, 0x88, + 0xCD, 0x83, 0x4F, 0xCC, 0x9B, 0xB1, 0x83, 0x4F, + // Bytes 4700 - 473f + 0xCC, 0xA3, 0xB9, 0x83, 0x4F, 0xCC, 0xA8, 0xA9, + 0x83, 0x52, 0xCC, 0xA3, 0xB9, 0x83, 0x53, 0xCC, + 0x81, 0xCD, 0x83, 0x53, 0xCC, 0x8C, 0xCD, 0x83, + 0x53, 0xCC, 0xA3, 0xB9, 0x83, 0x55, 0xCC, 0x83, + 0xCD, 0x83, 0x55, 0xCC, 0x84, 0xCD, 0x83, 0x55, + 0xCC, 0x88, 0xCD, 0x83, 0x55, 0xCC, 0x9B, 0xB1, + 0x83, 0x61, 0xCC, 0x82, 0xCD, 0x83, 0x61, 0xCC, + 0x86, 0xCD, 0x83, 0x61, 0xCC, 0x87, 0xCD, 0x83, + // Bytes 4740 - 477f + 0x61, 0xCC, 0x88, 0xCD, 0x83, 0x61, 0xCC, 0x8A, + 0xCD, 0x83, 0x61, 0xCC, 0xA3, 0xB9, 0x83, 0x63, + 0xCC, 0xA7, 0xA9, 0x83, 0x65, 0xCC, 0x82, 0xCD, + 0x83, 0x65, 0xCC, 0x84, 0xCD, 0x83, 0x65, 0xCC, + 0xA3, 0xB9, 0x83, 0x65, 0xCC, 0xA7, 0xA9, 0x83, + 0x69, 0xCC, 0x88, 0xCD, 0x83, 0x6C, 0xCC, 0xA3, + 0xB9, 0x83, 0x6F, 0xCC, 0x82, 0xCD, 0x83, 0x6F, + 0xCC, 0x83, 0xCD, 0x83, 0x6F, 0xCC, 0x84, 0xCD, + // Bytes 4780 - 47bf + 0x83, 0x6F, 0xCC, 0x87, 0xCD, 0x83, 0x6F, 0xCC, + 0x88, 0xCD, 0x83, 0x6F, 0xCC, 0x9B, 0xB1, 0x83, + 0x6F, 0xCC, 0xA3, 0xB9, 0x83, 0x6F, 0xCC, 0xA8, + 0xA9, 0x83, 0x72, 0xCC, 0xA3, 0xB9, 0x83, 0x73, + 0xCC, 0x81, 0xCD, 0x83, 0x73, 0xCC, 0x8C, 0xCD, + 0x83, 0x73, 0xCC, 0xA3, 0xB9, 0x83, 0x75, 0xCC, + 0x83, 0xCD, 0x83, 0x75, 0xCC, 0x84, 0xCD, 0x83, + 0x75, 0xCC, 0x88, 0xCD, 0x83, 0x75, 0xCC, 0x9B, + // Bytes 47c0 - 47ff + 0xB1, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x95, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x95, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0x99, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0x99, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0x9F, 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xA5, + // Bytes 4800 - 483f + 0xCC, 0x94, 0xCD, 0x84, 0xCE, 0xA9, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x84, 0xCE, 0xB1, + 0xCC, 0x81, 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x93, + 0xCD, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x84, + 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x84, 0xCE, 0xB5, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB5, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x84, + // Bytes 4840 - 487f + 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x84, 0xCE, 0xB7, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB7, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x84, + 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x84, 0xCE, 0xB9, + 0xCC, 0x93, 0xCD, 0x84, 0xCE, 0xB9, 0xCC, 0x94, + 0xCD, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xCD, 0x84, + 0xCE, 0xBF, 0xCC, 0x94, 0xCD, 0x84, 0xCF, 0x85, + 0xCC, 0x88, 0xCD, 0x84, 0xCF, 0x85, 0xCC, 0x93, + // Bytes 4880 - 48bf + 0xCD, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x84, + 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x84, 0xCF, 0x89, + 0xCC, 0x81, 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x93, + 0xCD, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x84, + 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x86, 0xCE, 0x91, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0x91, + // Bytes 48c0 - 48ff + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x91, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0x97, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0x97, + // Bytes 4900 - 493f + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xA9, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB1, + // Bytes 4940 - 497f + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB1, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCE, 0xB7, + // Bytes 4980 - 49bf + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCE, 0xB7, + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x93, 0xCC, 0x80, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x93, 0xCC, 0x81, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x93, 0xCD, 0x82, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x94, 0xCC, 0x80, 0xCE, 0x86, 0xCF, 0x89, + 0xCC, 0x94, 0xCC, 0x81, 0xCE, 0x86, 0xCF, 0x89, + // Bytes 49c0 - 49ff + 0xCC, 0x94, 0xCD, 0x82, 0xCE, 0x42, 0xCC, 0x80, + 0xCD, 0x33, 0x42, 0xCC, 0x81, 0xCD, 0x33, 0x42, + 0xCC, 0x93, 0xCD, 0x33, 0x43, 0xE1, 0x85, 0xA1, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA5, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43, 0xE1, + // Bytes 4a00 - 4a3f + 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA9, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAD, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB1, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01, 0x00, + // Bytes 4a40 - 4a7f + 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB5, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB0, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB4, + // Bytes 4a80 - 4abf + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01, 0x00, + 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCE, 0x33, 0x43, + 0xE3, 0x82, 0x99, 0x11, 0x04, 0x43, 0xE3, 0x82, + 0x9A, 0x11, 0x04, 0x46, 0xE0, 0xBD, 0xB1, 0xE0, + 0xBD, 0xB2, 0xA2, 0x27, 0x46, 0xE0, 0xBD, 0xB1, + 0xE0, 0xBD, 0xB4, 0xA6, 0x27, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBE, 0x80, 0xA2, 0x27, 0x00, 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10680 bytes (10.43 KiB). Checksum: a555db76d4becdd2. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f86, 0xc1: 0x2f8b, 0xc2: 0x469f, 0xc3: 0x2f90, 0xc4: 0x46ae, 0xc5: 0x46b3, + 0xc6: 0xa000, 0xc7: 0x46bd, 0xc8: 0x2ff9, 0xc9: 0x2ffe, 0xca: 0x46c2, 0xcb: 0x3012, + 0xcc: 0x3085, 0xcd: 0x308a, 0xce: 0x308f, 0xcf: 0x46d6, 0xd1: 0x311b, + 0xd2: 0x313e, 0xd3: 0x3143, 0xd4: 0x46e0, 0xd5: 0x46e5, 0xd6: 0x46f4, + 0xd8: 0xa000, 0xd9: 0x31ca, 0xda: 0x31cf, 0xdb: 0x31d4, 0xdc: 0x4726, 0xdd: 0x324c, + 0xe0: 0x3292, 0xe1: 0x3297, 0xe2: 0x4730, 0xe3: 0x329c, + 0xe4: 0x473f, 0xe5: 0x4744, 0xe6: 0xa000, 0xe7: 0x474e, 0xe8: 0x3305, 0xe9: 0x330a, + 0xea: 0x4753, 0xeb: 0x331e, 0xec: 0x3396, 0xed: 0x339b, 0xee: 0x33a0, 0xef: 0x4767, + 0xf1: 0x342c, 0xf2: 0x344f, 0xf3: 0x3454, 0xf4: 0x4771, 0xf5: 0x4776, + 0xf6: 0x4785, 0xf8: 0xa000, 0xf9: 0x34e0, 0xfa: 0x34e5, 0xfb: 0x34ea, + 0xfc: 0x47b7, 0xfd: 0x3567, 0xff: 0x3580, + // Block 0x4, offset 0x100 + 0x100: 0x2f95, 0x101: 0x32a1, 0x102: 0x46a4, 0x103: 0x4735, 0x104: 0x2fb3, 0x105: 0x32bf, + 0x106: 0x2fc7, 0x107: 0x32d3, 0x108: 0x2fcc, 0x109: 0x32d8, 0x10a: 0x2fd1, 0x10b: 0x32dd, + 0x10c: 0x2fd6, 0x10d: 0x32e2, 0x10e: 0x2fe0, 0x10f: 0x32ec, + 0x112: 0x46c7, 0x113: 0x4758, 0x114: 0x3008, 0x115: 0x3314, 0x116: 0x300d, 0x117: 0x3319, + 0x118: 0x302b, 0x119: 0x3337, 0x11a: 0x301c, 0x11b: 0x3328, 0x11c: 0x3044, 0x11d: 0x3350, + 0x11e: 0x304e, 0x11f: 0x335a, 0x120: 0x3053, 0x121: 0x335f, 0x122: 0x305d, 0x123: 0x3369, + 0x124: 0x3062, 0x125: 0x336e, 0x128: 0x3094, 0x129: 0x33a5, + 0x12a: 0x3099, 0x12b: 0x33aa, 0x12c: 0x309e, 0x12d: 0x33af, 0x12e: 0x30c1, 0x12f: 0x33cd, + 0x130: 0x30a3, 0x134: 0x30cb, 0x135: 0x33d7, + 0x136: 0x30df, 0x137: 0x33f0, 0x139: 0x30e9, 0x13a: 0x33fa, 0x13b: 0x30f3, + 0x13c: 0x3404, 0x13d: 0x30ee, 0x13e: 0x33ff, + // Block 0x5, offset 0x140 + 0x143: 0x3116, 0x144: 0x3427, 0x145: 0x312f, + 0x146: 0x3440, 0x147: 0x3125, 0x148: 0x3436, + 0x14c: 0x46ea, 0x14d: 0x477b, 0x14e: 0x3148, 0x14f: 0x3459, 0x150: 0x3152, 0x151: 0x3463, + 0x154: 0x3170, 0x155: 0x3481, 0x156: 0x3189, 0x157: 0x349a, + 0x158: 0x317a, 0x159: 0x348b, 0x15a: 0x470d, 0x15b: 0x479e, 0x15c: 0x3193, 0x15d: 0x34a4, + 0x15e: 0x31a2, 0x15f: 0x34b3, 0x160: 0x4712, 0x161: 0x47a3, 0x162: 0x31bb, 0x163: 0x34d1, + 0x164: 0x31ac, 0x165: 0x34c2, 0x168: 0x471c, 0x169: 0x47ad, + 0x16a: 0x4721, 0x16b: 0x47b2, 0x16c: 0x31d9, 0x16d: 0x34ef, 0x16e: 0x31e3, 0x16f: 0x34f9, + 0x170: 0x31e8, 0x171: 0x34fe, 0x172: 0x3206, 0x173: 0x351c, 0x174: 0x3229, 0x175: 0x353f, + 0x176: 0x3251, 0x177: 0x356c, 0x178: 0x3265, 0x179: 0x3274, 0x17a: 0x3594, 0x17b: 0x327e, + 0x17c: 0x359e, 0x17d: 0x3283, 0x17e: 0x35a3, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f9f, 0x18e: 0x32ab, 0x18f: 0x30ad, 0x190: 0x33b9, 0x191: 0x3157, + 0x192: 0x3468, 0x193: 0x31ed, 0x194: 0x3503, 0x195: 0x39e6, 0x196: 0x3b75, 0x197: 0x39df, + 0x198: 0x3b6e, 0x199: 0x39ed, 0x19a: 0x3b7c, 0x19b: 0x39d8, 0x19c: 0x3b67, + 0x19e: 0x38c7, 0x19f: 0x3a56, 0x1a0: 0x38c0, 0x1a1: 0x3a4f, 0x1a2: 0x35ca, 0x1a3: 0x35dc, + 0x1a6: 0x3058, 0x1a7: 0x3364, 0x1a8: 0x30d5, 0x1a9: 0x33e6, + 0x1aa: 0x4703, 0x1ab: 0x4794, 0x1ac: 0x39a7, 0x1ad: 0x3b36, 0x1ae: 0x35ee, 0x1af: 0x35f4, + 0x1b0: 0x33dc, 0x1b4: 0x303f, 0x1b5: 0x334b, + 0x1b8: 0x3111, 0x1b9: 0x3422, 0x1ba: 0x38ce, 0x1bb: 0x3a5d, + 0x1bc: 0x35c4, 0x1bd: 0x35d6, 0x1be: 0x35d0, 0x1bf: 0x35e2, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2fa4, 0x1c1: 0x32b0, 0x1c2: 0x2fa9, 0x1c3: 0x32b5, 0x1c4: 0x3021, 0x1c5: 0x332d, + 0x1c6: 0x3026, 0x1c7: 0x3332, 0x1c8: 0x30b2, 0x1c9: 0x33be, 0x1ca: 0x30b7, 0x1cb: 0x33c3, + 0x1cc: 0x315c, 0x1cd: 0x346d, 0x1ce: 0x3161, 0x1cf: 0x3472, 0x1d0: 0x317f, 0x1d1: 0x3490, + 0x1d2: 0x3184, 0x1d3: 0x3495, 0x1d4: 0x31f2, 0x1d5: 0x3508, 0x1d6: 0x31f7, 0x1d7: 0x350d, + 0x1d8: 0x319d, 0x1d9: 0x34ae, 0x1da: 0x31b6, 0x1db: 0x34cc, + 0x1de: 0x3071, 0x1df: 0x337d, + 0x1e6: 0x46a9, 0x1e7: 0x473a, 0x1e8: 0x46d1, 0x1e9: 0x4762, + 0x1ea: 0x3976, 0x1eb: 0x3b05, 0x1ec: 0x3953, 0x1ed: 0x3ae2, 0x1ee: 0x46ef, 0x1ef: 0x4780, + 0x1f0: 0x396f, 0x1f1: 0x3afe, 0x1f2: 0x325b, 0x1f3: 0x3576, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x49c5, 0x241: 0x49ca, 0x242: 0x9933, 0x243: 0x49cf, 0x244: 0x4a88, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x0173, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35b8, + 0x286: 0x3600, 0x287: 0x00ce, 0x288: 0x361e, 0x289: 0x362a, 0x28a: 0x363c, + 0x28c: 0x365a, 0x28e: 0x366c, 0x28f: 0x368a, 0x290: 0x3e1f, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x364e, 0x2ab: 0x367e, 0x2ac: 0x4815, 0x2ad: 0x36ae, 0x2ae: 0x483f, 0x2af: 0x36c0, + 0x2b0: 0x3e87, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3738, 0x2c1: 0x3744, 0x2c3: 0x3732, + 0x2c6: 0xa000, 0x2c7: 0x3720, + 0x2cc: 0x3774, 0x2cd: 0x375c, 0x2ce: 0x3786, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3768, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37ec, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x374a, 0x302: 0x37ce, + 0x310: 0x3726, 0x311: 0x37aa, + 0x312: 0x372c, 0x313: 0x37b0, 0x316: 0x373e, 0x317: 0x37c2, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x3840, 0x31b: 0x3846, 0x31c: 0x3750, 0x31d: 0x37d4, + 0x31e: 0x3756, 0x31f: 0x37da, 0x322: 0x3762, 0x323: 0x37e6, + 0x324: 0x376e, 0x325: 0x37f2, 0x326: 0x377a, 0x327: 0x37fe, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x384c, 0x32b: 0x3852, 0x32c: 0x37a4, 0x32d: 0x3828, 0x32e: 0x3780, 0x32f: 0x3804, + 0x330: 0x378c, 0x331: 0x3810, 0x332: 0x3792, 0x333: 0x3816, 0x334: 0x3798, 0x335: 0x381c, + 0x338: 0x379e, 0x339: 0x3822, + // Block 0xd, offset 0x340 + 0x351: 0x812e, + 0x352: 0x8133, 0x353: 0x8133, 0x354: 0x8133, 0x355: 0x8133, 0x356: 0x812e, 0x357: 0x8133, + 0x358: 0x8133, 0x359: 0x8133, 0x35a: 0x812f, 0x35b: 0x812e, 0x35c: 0x8133, 0x35d: 0x8133, + 0x35e: 0x8133, 0x35f: 0x8133, 0x360: 0x8133, 0x361: 0x8133, 0x362: 0x812e, 0x363: 0x812e, + 0x364: 0x812e, 0x365: 0x812e, 0x366: 0x812e, 0x367: 0x812e, 0x368: 0x8133, 0x369: 0x8133, + 0x36a: 0x812e, 0x36b: 0x8133, 0x36c: 0x8133, 0x36d: 0x812f, 0x36e: 0x8132, 0x36f: 0x8133, + 0x370: 0x8106, 0x371: 0x8107, 0x372: 0x8108, 0x373: 0x8109, 0x374: 0x810a, 0x375: 0x810b, + 0x376: 0x810c, 0x377: 0x810d, 0x378: 0x810e, 0x379: 0x810f, 0x37a: 0x810f, 0x37b: 0x8110, + 0x37c: 0x8111, 0x37d: 0x8112, 0x37f: 0x8113, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8117, + 0x38c: 0x8118, 0x38d: 0x8119, 0x38e: 0x811a, 0x38f: 0x811b, 0x390: 0x811c, 0x391: 0x811d, + 0x392: 0x811e, 0x393: 0x9933, 0x394: 0x9933, 0x395: 0x992e, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x8133, 0x39b: 0x8133, 0x39c: 0x812e, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x812e, + 0x3b0: 0x811f, + // Block 0xf, offset 0x3c0 + 0x3d3: 0x812e, 0x3d4: 0x8133, 0x3d5: 0x8133, 0x3d6: 0x8133, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x8133, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x8133, 0x3e0: 0x8133, 0x3e1: 0x8133, 0x3e3: 0x812e, + 0x3e4: 0x8133, 0x3e5: 0x8133, 0x3e6: 0x812e, 0x3e7: 0x8133, 0x3e8: 0x8133, 0x3e9: 0x812e, + 0x3ea: 0x8133, 0x3eb: 0x8133, 0x3ec: 0x8133, 0x3ed: 0x812e, 0x3ee: 0x812e, 0x3ef: 0x812e, + 0x3f0: 0x8117, 0x3f1: 0x8118, 0x3f2: 0x8119, 0x3f3: 0x8133, 0x3f4: 0x8133, 0x3f5: 0x8133, + 0x3f6: 0x812e, 0x3f7: 0x8133, 0x3f8: 0x8133, 0x3f9: 0x812e, 0x3fa: 0x812e, 0x3fb: 0x8133, + 0x3fc: 0x8133, 0x3fd: 0x8133, 0x3fe: 0x8133, 0x3ff: 0x8133, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d33, 0x407: 0xa000, 0x408: 0x2d3b, 0x409: 0xa000, 0x40a: 0x2d43, 0x40b: 0xa000, + 0x40c: 0x2d4b, 0x40d: 0xa000, 0x40e: 0x2d53, 0x411: 0xa000, + 0x412: 0x2d5b, + 0x434: 0x8103, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d63, + 0x43c: 0xa000, 0x43d: 0x2d6b, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8133, 0x441: 0x8133, 0x442: 0x812e, 0x443: 0x8133, 0x444: 0x8133, 0x445: 0x8133, + 0x446: 0x8133, 0x447: 0x8133, 0x448: 0x8133, 0x449: 0x8133, 0x44a: 0x812e, 0x44b: 0x8133, + 0x44c: 0x8133, 0x44d: 0x8136, 0x44e: 0x812b, 0x44f: 0x812e, 0x450: 0x812a, 0x451: 0x8133, + 0x452: 0x8133, 0x453: 0x8133, 0x454: 0x8133, 0x455: 0x8133, 0x456: 0x8133, 0x457: 0x8133, + 0x458: 0x8133, 0x459: 0x8133, 0x45a: 0x8133, 0x45b: 0x8133, 0x45c: 0x8133, 0x45d: 0x8133, + 0x45e: 0x8133, 0x45f: 0x8133, 0x460: 0x8133, 0x461: 0x8133, 0x462: 0x8133, 0x463: 0x8133, + 0x464: 0x8133, 0x465: 0x8133, 0x466: 0x8133, 0x467: 0x8133, 0x468: 0x8133, 0x469: 0x8133, + 0x46a: 0x8133, 0x46b: 0x8133, 0x46c: 0x8133, 0x46d: 0x8133, 0x46e: 0x8133, 0x46f: 0x8133, + 0x470: 0x8133, 0x471: 0x8133, 0x472: 0x8133, 0x473: 0x8133, 0x474: 0x8133, 0x475: 0x8133, + 0x476: 0x8134, 0x477: 0x8132, 0x478: 0x8132, 0x479: 0x812e, 0x47b: 0x8133, + 0x47c: 0x8135, 0x47d: 0x812e, 0x47e: 0x8133, 0x47f: 0x812e, + // Block 0x12, offset 0x480 + 0x480: 0x2fae, 0x481: 0x32ba, 0x482: 0x2fb8, 0x483: 0x32c4, 0x484: 0x2fbd, 0x485: 0x32c9, + 0x486: 0x2fc2, 0x487: 0x32ce, 0x488: 0x38e3, 0x489: 0x3a72, 0x48a: 0x2fdb, 0x48b: 0x32e7, + 0x48c: 0x2fe5, 0x48d: 0x32f1, 0x48e: 0x2ff4, 0x48f: 0x3300, 0x490: 0x2fea, 0x491: 0x32f6, + 0x492: 0x2fef, 0x493: 0x32fb, 0x494: 0x3906, 0x495: 0x3a95, 0x496: 0x390d, 0x497: 0x3a9c, + 0x498: 0x3030, 0x499: 0x333c, 0x49a: 0x3035, 0x49b: 0x3341, 0x49c: 0x391b, 0x49d: 0x3aaa, + 0x49e: 0x303a, 0x49f: 0x3346, 0x4a0: 0x3049, 0x4a1: 0x3355, 0x4a2: 0x3067, 0x4a3: 0x3373, + 0x4a4: 0x3076, 0x4a5: 0x3382, 0x4a6: 0x306c, 0x4a7: 0x3378, 0x4a8: 0x307b, 0x4a9: 0x3387, + 0x4aa: 0x3080, 0x4ab: 0x338c, 0x4ac: 0x30c6, 0x4ad: 0x33d2, 0x4ae: 0x3922, 0x4af: 0x3ab1, + 0x4b0: 0x30d0, 0x4b1: 0x33e1, 0x4b2: 0x30da, 0x4b3: 0x33eb, 0x4b4: 0x30e4, 0x4b5: 0x33f5, + 0x4b6: 0x46db, 0x4b7: 0x476c, 0x4b8: 0x3929, 0x4b9: 0x3ab8, 0x4ba: 0x30fd, 0x4bb: 0x340e, + 0x4bc: 0x30f8, 0x4bd: 0x3409, 0x4be: 0x3102, 0x4bf: 0x3413, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x3107, 0x4c1: 0x3418, 0x4c2: 0x310c, 0x4c3: 0x341d, 0x4c4: 0x3120, 0x4c5: 0x3431, + 0x4c6: 0x312a, 0x4c7: 0x343b, 0x4c8: 0x3139, 0x4c9: 0x344a, 0x4ca: 0x3134, 0x4cb: 0x3445, + 0x4cc: 0x394c, 0x4cd: 0x3adb, 0x4ce: 0x395a, 0x4cf: 0x3ae9, 0x4d0: 0x3961, 0x4d1: 0x3af0, + 0x4d2: 0x3968, 0x4d3: 0x3af7, 0x4d4: 0x3166, 0x4d5: 0x3477, 0x4d6: 0x316b, 0x4d7: 0x347c, + 0x4d8: 0x3175, 0x4d9: 0x3486, 0x4da: 0x4708, 0x4db: 0x4799, 0x4dc: 0x39ae, 0x4dd: 0x3b3d, + 0x4de: 0x318e, 0x4df: 0x349f, 0x4e0: 0x3198, 0x4e1: 0x34a9, 0x4e2: 0x4717, 0x4e3: 0x47a8, + 0x4e4: 0x39b5, 0x4e5: 0x3b44, 0x4e6: 0x39bc, 0x4e7: 0x3b4b, 0x4e8: 0x39c3, 0x4e9: 0x3b52, + 0x4ea: 0x31a7, 0x4eb: 0x34b8, 0x4ec: 0x31b1, 0x4ed: 0x34c7, 0x4ee: 0x31c5, 0x4ef: 0x34db, + 0x4f0: 0x31c0, 0x4f1: 0x34d6, 0x4f2: 0x3201, 0x4f3: 0x3517, 0x4f4: 0x3210, 0x4f5: 0x3526, + 0x4f6: 0x320b, 0x4f7: 0x3521, 0x4f8: 0x39ca, 0x4f9: 0x3b59, 0x4fa: 0x39d1, 0x4fb: 0x3b60, + 0x4fc: 0x3215, 0x4fd: 0x352b, 0x4fe: 0x321a, 0x4ff: 0x3530, + // Block 0x14, offset 0x500 + 0x500: 0x321f, 0x501: 0x3535, 0x502: 0x3224, 0x503: 0x353a, 0x504: 0x3233, 0x505: 0x3549, + 0x506: 0x322e, 0x507: 0x3544, 0x508: 0x3238, 0x509: 0x3553, 0x50a: 0x323d, 0x50b: 0x3558, + 0x50c: 0x3242, 0x50d: 0x355d, 0x50e: 0x3260, 0x50f: 0x357b, 0x510: 0x3279, 0x511: 0x3599, + 0x512: 0x3288, 0x513: 0x35a8, 0x514: 0x328d, 0x515: 0x35ad, 0x516: 0x3391, 0x517: 0x34bd, + 0x518: 0x354e, 0x519: 0x358a, 0x51b: 0x35e8, + 0x520: 0x46b8, 0x521: 0x4749, 0x522: 0x2f9a, 0x523: 0x32a6, + 0x524: 0x388f, 0x525: 0x3a1e, 0x526: 0x3888, 0x527: 0x3a17, 0x528: 0x389d, 0x529: 0x3a2c, + 0x52a: 0x3896, 0x52b: 0x3a25, 0x52c: 0x38d5, 0x52d: 0x3a64, 0x52e: 0x38ab, 0x52f: 0x3a3a, + 0x530: 0x38a4, 0x531: 0x3a33, 0x532: 0x38b9, 0x533: 0x3a48, 0x534: 0x38b2, 0x535: 0x3a41, + 0x536: 0x38dc, 0x537: 0x3a6b, 0x538: 0x46cc, 0x539: 0x475d, 0x53a: 0x3017, 0x53b: 0x3323, + 0x53c: 0x3003, 0x53d: 0x330f, 0x53e: 0x38f1, 0x53f: 0x3a80, + // Block 0x15, offset 0x540 + 0x540: 0x38ea, 0x541: 0x3a79, 0x542: 0x38ff, 0x543: 0x3a8e, 0x544: 0x38f8, 0x545: 0x3a87, + 0x546: 0x3914, 0x547: 0x3aa3, 0x548: 0x30a8, 0x549: 0x33b4, 0x54a: 0x30bc, 0x54b: 0x33c8, + 0x54c: 0x46fe, 0x54d: 0x478f, 0x54e: 0x314d, 0x54f: 0x345e, 0x550: 0x3937, 0x551: 0x3ac6, + 0x552: 0x3930, 0x553: 0x3abf, 0x554: 0x3945, 0x555: 0x3ad4, 0x556: 0x393e, 0x557: 0x3acd, + 0x558: 0x39a0, 0x559: 0x3b2f, 0x55a: 0x3984, 0x55b: 0x3b13, 0x55c: 0x397d, 0x55d: 0x3b0c, + 0x55e: 0x3992, 0x55f: 0x3b21, 0x560: 0x398b, 0x561: 0x3b1a, 0x562: 0x3999, 0x563: 0x3b28, + 0x564: 0x31fc, 0x565: 0x3512, 0x566: 0x31de, 0x567: 0x34f4, 0x568: 0x39fb, 0x569: 0x3b8a, + 0x56a: 0x39f4, 0x56b: 0x3b83, 0x56c: 0x3a09, 0x56d: 0x3b98, 0x56e: 0x3a02, 0x56f: 0x3b91, + 0x570: 0x3a10, 0x571: 0x3b9f, 0x572: 0x3247, 0x573: 0x3562, 0x574: 0x326f, 0x575: 0x358f, + 0x576: 0x326a, 0x577: 0x3585, 0x578: 0x3256, 0x579: 0x3571, + // Block 0x16, offset 0x580 + 0x580: 0x481b, 0x581: 0x4821, 0x582: 0x4935, 0x583: 0x494d, 0x584: 0x493d, 0x585: 0x4955, + 0x586: 0x4945, 0x587: 0x495d, 0x588: 0x47c1, 0x589: 0x47c7, 0x58a: 0x48a5, 0x58b: 0x48bd, + 0x58c: 0x48ad, 0x58d: 0x48c5, 0x58e: 0x48b5, 0x58f: 0x48cd, 0x590: 0x482d, 0x591: 0x4833, + 0x592: 0x3dcf, 0x593: 0x3ddf, 0x594: 0x3dd7, 0x595: 0x3de7, + 0x598: 0x47cd, 0x599: 0x47d3, 0x59a: 0x3cff, 0x59b: 0x3d0f, 0x59c: 0x3d07, 0x59d: 0x3d17, + 0x5a0: 0x4845, 0x5a1: 0x484b, 0x5a2: 0x4965, 0x5a3: 0x497d, + 0x5a4: 0x496d, 0x5a5: 0x4985, 0x5a6: 0x4975, 0x5a7: 0x498d, 0x5a8: 0x47d9, 0x5a9: 0x47df, + 0x5aa: 0x48d5, 0x5ab: 0x48ed, 0x5ac: 0x48dd, 0x5ad: 0x48f5, 0x5ae: 0x48e5, 0x5af: 0x48fd, + 0x5b0: 0x485d, 0x5b1: 0x4863, 0x5b2: 0x3e2f, 0x5b3: 0x3e47, 0x5b4: 0x3e37, 0x5b5: 0x3e4f, + 0x5b6: 0x3e3f, 0x5b7: 0x3e57, 0x5b8: 0x47e5, 0x5b9: 0x47eb, 0x5ba: 0x3d2f, 0x5bb: 0x3d47, + 0x5bc: 0x3d37, 0x5bd: 0x3d4f, 0x5be: 0x3d3f, 0x5bf: 0x3d57, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4869, 0x5c1: 0x486f, 0x5c2: 0x3e5f, 0x5c3: 0x3e6f, 0x5c4: 0x3e67, 0x5c5: 0x3e77, + 0x5c8: 0x47f1, 0x5c9: 0x47f7, 0x5ca: 0x3d5f, 0x5cb: 0x3d6f, + 0x5cc: 0x3d67, 0x5cd: 0x3d77, 0x5d0: 0x487b, 0x5d1: 0x4881, + 0x5d2: 0x3e97, 0x5d3: 0x3eaf, 0x5d4: 0x3e9f, 0x5d5: 0x3eb7, 0x5d6: 0x3ea7, 0x5d7: 0x3ebf, + 0x5d9: 0x47fd, 0x5db: 0x3d7f, 0x5dd: 0x3d87, + 0x5df: 0x3d8f, 0x5e0: 0x4893, 0x5e1: 0x4899, 0x5e2: 0x4995, 0x5e3: 0x49ad, + 0x5e4: 0x499d, 0x5e5: 0x49b5, 0x5e6: 0x49a5, 0x5e7: 0x49bd, 0x5e8: 0x4803, 0x5e9: 0x4809, + 0x5ea: 0x4905, 0x5eb: 0x491d, 0x5ec: 0x490d, 0x5ed: 0x4925, 0x5ee: 0x4915, 0x5ef: 0x492d, + 0x5f0: 0x480f, 0x5f1: 0x4335, 0x5f2: 0x36a8, 0x5f3: 0x433b, 0x5f4: 0x4839, 0x5f5: 0x4341, + 0x5f6: 0x36ba, 0x5f7: 0x4347, 0x5f8: 0x36d8, 0x5f9: 0x434d, 0x5fa: 0x36f0, 0x5fb: 0x4353, + 0x5fc: 0x4887, 0x5fd: 0x4359, + // Block 0x18, offset 0x600 + 0x600: 0x3db7, 0x601: 0x3dbf, 0x602: 0x419b, 0x603: 0x41b9, 0x604: 0x41a5, 0x605: 0x41c3, + 0x606: 0x41af, 0x607: 0x41cd, 0x608: 0x3cef, 0x609: 0x3cf7, 0x60a: 0x40e7, 0x60b: 0x4105, + 0x60c: 0x40f1, 0x60d: 0x410f, 0x60e: 0x40fb, 0x60f: 0x4119, 0x610: 0x3dff, 0x611: 0x3e07, + 0x612: 0x41d7, 0x613: 0x41f5, 0x614: 0x41e1, 0x615: 0x41ff, 0x616: 0x41eb, 0x617: 0x4209, + 0x618: 0x3d1f, 0x619: 0x3d27, 0x61a: 0x4123, 0x61b: 0x4141, 0x61c: 0x412d, 0x61d: 0x414b, + 0x61e: 0x4137, 0x61f: 0x4155, 0x620: 0x3ed7, 0x621: 0x3edf, 0x622: 0x4213, 0x623: 0x4231, + 0x624: 0x421d, 0x625: 0x423b, 0x626: 0x4227, 0x627: 0x4245, 0x628: 0x3d97, 0x629: 0x3d9f, + 0x62a: 0x415f, 0x62b: 0x417d, 0x62c: 0x4169, 0x62d: 0x4187, 0x62e: 0x4173, 0x62f: 0x4191, + 0x630: 0x369c, 0x631: 0x3696, 0x632: 0x3da7, 0x633: 0x36a2, 0x634: 0x3daf, + 0x636: 0x4827, 0x637: 0x3dc7, 0x638: 0x360c, 0x639: 0x3606, 0x63a: 0x35fa, 0x63b: 0x4305, + 0x63c: 0x3612, 0x63d: 0x8100, 0x63e: 0x01d6, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x35be, 0x642: 0x3def, 0x643: 0x36b4, 0x644: 0x3df7, + 0x646: 0x4851, 0x647: 0x3e0f, 0x648: 0x3618, 0x649: 0x430b, 0x64a: 0x3624, 0x64b: 0x4311, + 0x64c: 0x3630, 0x64d: 0x3ba6, 0x64e: 0x3bad, 0x64f: 0x3bb4, 0x650: 0x36cc, 0x651: 0x36c6, + 0x652: 0x3e17, 0x653: 0x44fb, 0x656: 0x36d2, 0x657: 0x3e27, + 0x658: 0x3648, 0x659: 0x3642, 0x65a: 0x3636, 0x65b: 0x4317, 0x65d: 0x3bbb, + 0x65e: 0x3bc2, 0x65f: 0x3bc9, 0x660: 0x3702, 0x661: 0x36fc, 0x662: 0x3e7f, 0x663: 0x4503, + 0x664: 0x36e4, 0x665: 0x36ea, 0x666: 0x3708, 0x667: 0x3e8f, 0x668: 0x3678, 0x669: 0x3672, + 0x66a: 0x3666, 0x66b: 0x4323, 0x66c: 0x3660, 0x66d: 0x35b2, 0x66e: 0x42ff, 0x66f: 0x0081, + 0x672: 0x3ec7, 0x673: 0x370e, 0x674: 0x3ecf, + 0x676: 0x489f, 0x677: 0x3ee7, 0x678: 0x3654, 0x679: 0x431d, 0x67a: 0x3684, 0x67b: 0x432f, + 0x67c: 0x3690, 0x67d: 0x426d, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3c1d, 0x683: 0xa000, 0x684: 0x3c24, 0x685: 0xa000, + 0x687: 0x3c2b, 0x688: 0xa000, 0x689: 0x3c32, + 0x68d: 0xa000, + 0x6a0: 0x2f7c, 0x6a1: 0xa000, 0x6a2: 0x3c40, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3c39, 0x6ae: 0x2f77, 0x6af: 0x2f81, + 0x6b0: 0x3c47, 0x6b1: 0x3c4e, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3c55, 0x6b5: 0x3c5c, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3c63, 0x6b9: 0x3c6a, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3c71, 0x6c1: 0x3c78, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3c8d, 0x6c5: 0x3c94, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3c9b, 0x6c9: 0x3ca2, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3cb7, 0x6ed: 0x3cbe, 0x6ee: 0x3cc5, 0x6ef: 0x3ccc, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x3f1f, 0x70d: 0xa000, 0x70e: 0x3f27, 0x70f: 0xa000, 0x710: 0x3f2f, 0x711: 0xa000, + 0x712: 0x3f37, 0x713: 0xa000, 0x714: 0x3f3f, 0x715: 0xa000, 0x716: 0x3f47, 0x717: 0xa000, + 0x718: 0x3f4f, 0x719: 0xa000, 0x71a: 0x3f57, 0x71b: 0xa000, 0x71c: 0x3f5f, 0x71d: 0xa000, + 0x71e: 0x3f67, 0x71f: 0xa000, 0x720: 0x3f6f, 0x721: 0xa000, 0x722: 0x3f77, + 0x724: 0xa000, 0x725: 0x3f7f, 0x726: 0xa000, 0x727: 0x3f87, 0x728: 0xa000, 0x729: 0x3f8f, + 0x72f: 0xa000, + 0x730: 0x3f97, 0x731: 0x3f9f, 0x732: 0xa000, 0x733: 0x3fa7, 0x734: 0x3faf, 0x735: 0xa000, + 0x736: 0x3fb7, 0x737: 0x3fbf, 0x738: 0xa000, 0x739: 0x3fc7, 0x73a: 0x3fcf, 0x73b: 0xa000, + 0x73c: 0x3fd7, 0x73d: 0x3fdf, + // Block 0x1d, offset 0x740 + 0x754: 0x3f17, + 0x759: 0x9904, 0x75a: 0x9904, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x3fe7, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x3ff7, 0x76d: 0xa000, 0x76e: 0x3fff, 0x76f: 0xa000, + 0x770: 0x4007, 0x771: 0xa000, 0x772: 0x400f, 0x773: 0xa000, 0x774: 0x4017, 0x775: 0xa000, + 0x776: 0x401f, 0x777: 0xa000, 0x778: 0x4027, 0x779: 0xa000, 0x77a: 0x402f, 0x77b: 0xa000, + 0x77c: 0x4037, 0x77d: 0xa000, 0x77e: 0x403f, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4047, 0x781: 0xa000, 0x782: 0x404f, 0x784: 0xa000, 0x785: 0x4057, + 0x786: 0xa000, 0x787: 0x405f, 0x788: 0xa000, 0x789: 0x4067, + 0x78f: 0xa000, 0x790: 0x406f, 0x791: 0x4077, + 0x792: 0xa000, 0x793: 0x407f, 0x794: 0x4087, 0x795: 0xa000, 0x796: 0x408f, 0x797: 0x4097, + 0x798: 0xa000, 0x799: 0x409f, 0x79a: 0x40a7, 0x79b: 0xa000, 0x79c: 0x40af, 0x79d: 0x40b7, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x3fef, + 0x7b7: 0x40bf, 0x7b8: 0x40c7, 0x7b9: 0x40cf, 0x7ba: 0x40d7, + 0x7bd: 0xa000, 0x7be: 0x40df, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x137a, 0x7c1: 0x0cfe, 0x7c2: 0x13d6, 0x7c3: 0x13a2, 0x7c4: 0x0e5a, 0x7c5: 0x06ee, + 0x7c6: 0x08e2, 0x7c7: 0x162e, 0x7c8: 0x162e, 0x7c9: 0x0a0e, 0x7ca: 0x1462, 0x7cb: 0x0946, + 0x7cc: 0x0a0a, 0x7cd: 0x0bf2, 0x7ce: 0x0fd2, 0x7cf: 0x1162, 0x7d0: 0x129a, 0x7d1: 0x12d6, + 0x7d2: 0x130a, 0x7d3: 0x141e, 0x7d4: 0x0d76, 0x7d5: 0x0e02, 0x7d6: 0x0eae, 0x7d7: 0x0f46, + 0x7d8: 0x1262, 0x7d9: 0x144a, 0x7da: 0x1576, 0x7db: 0x0712, 0x7dc: 0x08b6, 0x7dd: 0x0d8a, + 0x7de: 0x0ed2, 0x7df: 0x1296, 0x7e0: 0x15c6, 0x7e1: 0x0ab6, 0x7e2: 0x0e7a, 0x7e3: 0x1286, + 0x7e4: 0x131a, 0x7e5: 0x0c26, 0x7e6: 0x11be, 0x7e7: 0x12e2, 0x7e8: 0x0b22, 0x7e9: 0x0d12, + 0x7ea: 0x0e1a, 0x7eb: 0x0f1e, 0x7ec: 0x142a, 0x7ed: 0x0752, 0x7ee: 0x07ea, 0x7ef: 0x0856, + 0x7f0: 0x0c8e, 0x7f1: 0x0d82, 0x7f2: 0x0ece, 0x7f3: 0x0ff2, 0x7f4: 0x117a, 0x7f5: 0x128e, + 0x7f6: 0x12a6, 0x7f7: 0x13ca, 0x7f8: 0x14f2, 0x7f9: 0x15a6, 0x7fa: 0x15c2, 0x7fb: 0x102e, + 0x7fc: 0x106e, 0x7fd: 0x1126, 0x7fe: 0x1246, 0x7ff: 0x147e, + // Block 0x20, offset 0x800 + 0x800: 0x15ce, 0x801: 0x134e, 0x802: 0x09ca, 0x803: 0x0b3e, 0x804: 0x10de, 0x805: 0x119e, + 0x806: 0x0f02, 0x807: 0x1036, 0x808: 0x139a, 0x809: 0x14ea, 0x80a: 0x09c6, 0x80b: 0x0a92, + 0x80c: 0x0d7a, 0x80d: 0x0e2e, 0x80e: 0x0e62, 0x80f: 0x1116, 0x810: 0x113e, 0x811: 0x14aa, + 0x812: 0x0852, 0x813: 0x11aa, 0x814: 0x07f6, 0x815: 0x07f2, 0x816: 0x109a, 0x817: 0x112a, + 0x818: 0x125e, 0x819: 0x14b2, 0x81a: 0x136a, 0x81b: 0x0c2a, 0x81c: 0x0d76, 0x81d: 0x135a, + 0x81e: 0x06fa, 0x81f: 0x0a66, 0x820: 0x0b96, 0x821: 0x0f32, 0x822: 0x0fb2, 0x823: 0x0876, + 0x824: 0x103e, 0x825: 0x0762, 0x826: 0x0b7a, 0x827: 0x06da, 0x828: 0x0dee, 0x829: 0x0ca6, + 0x82a: 0x1112, 0x82b: 0x08ca, 0x82c: 0x09b6, 0x82d: 0x0ffe, 0x82e: 0x1266, 0x82f: 0x133e, + 0x830: 0x0dba, 0x831: 0x13fa, 0x832: 0x0de6, 0x833: 0x0c3a, 0x834: 0x121e, 0x835: 0x0c5a, + 0x836: 0x0fae, 0x837: 0x072e, 0x838: 0x07aa, 0x839: 0x07ee, 0x83a: 0x0d56, 0x83b: 0x10fe, + 0x83c: 0x11f6, 0x83d: 0x134a, 0x83e: 0x145e, 0x83f: 0x085e, + // Block 0x21, offset 0x840 + 0x840: 0x0912, 0x841: 0x0a1a, 0x842: 0x0b32, 0x843: 0x0cc2, 0x844: 0x0e7e, 0x845: 0x1042, + 0x846: 0x149a, 0x847: 0x157e, 0x848: 0x15d2, 0x849: 0x15ea, 0x84a: 0x083a, 0x84b: 0x0cf6, + 0x84c: 0x0da6, 0x84d: 0x13ee, 0x84e: 0x0afe, 0x84f: 0x0bda, 0x850: 0x0bf6, 0x851: 0x0c86, + 0x852: 0x0e6e, 0x853: 0x0eba, 0x854: 0x0f6a, 0x855: 0x108e, 0x856: 0x1132, 0x857: 0x1196, + 0x858: 0x13de, 0x859: 0x126e, 0x85a: 0x1406, 0x85b: 0x1482, 0x85c: 0x0812, 0x85d: 0x083e, + 0x85e: 0x0926, 0x85f: 0x0eaa, 0x860: 0x12f6, 0x861: 0x133e, 0x862: 0x0b1e, 0x863: 0x0b8e, + 0x864: 0x0c52, 0x865: 0x0db2, 0x866: 0x10da, 0x867: 0x0f26, 0x868: 0x073e, 0x869: 0x0982, + 0x86a: 0x0a66, 0x86b: 0x0aca, 0x86c: 0x0b9a, 0x86d: 0x0f42, 0x86e: 0x0f5e, 0x86f: 0x116e, + 0x870: 0x118e, 0x871: 0x1466, 0x872: 0x14e6, 0x873: 0x14f6, 0x874: 0x1532, 0x875: 0x0756, + 0x876: 0x1082, 0x877: 0x1452, 0x878: 0x14ce, 0x879: 0x0bb2, 0x87a: 0x071a, 0x87b: 0x077a, + 0x87c: 0x0a6a, 0x87d: 0x0a8a, 0x87e: 0x0cb2, 0x87f: 0x0d76, + // Block 0x22, offset 0x880 + 0x880: 0x0ec6, 0x881: 0x0fce, 0x882: 0x127a, 0x883: 0x141a, 0x884: 0x1626, 0x885: 0x0ce6, + 0x886: 0x14a6, 0x887: 0x0836, 0x888: 0x0d32, 0x889: 0x0d3e, 0x88a: 0x0e12, 0x88b: 0x0e4a, + 0x88c: 0x0f4e, 0x88d: 0x0faa, 0x88e: 0x102a, 0x88f: 0x110e, 0x890: 0x153e, 0x891: 0x07b2, + 0x892: 0x0c06, 0x893: 0x14b6, 0x894: 0x076a, 0x895: 0x0aae, 0x896: 0x0e32, 0x897: 0x13e2, + 0x898: 0x0b6a, 0x899: 0x0bba, 0x89a: 0x0d46, 0x89b: 0x0f32, 0x89c: 0x14be, 0x89d: 0x081a, + 0x89e: 0x0902, 0x89f: 0x0a9a, 0x8a0: 0x0cd6, 0x8a1: 0x0d22, 0x8a2: 0x0d62, 0x8a3: 0x0df6, + 0x8a4: 0x0f4a, 0x8a5: 0x0fbe, 0x8a6: 0x115a, 0x8a7: 0x12fa, 0x8a8: 0x1306, 0x8a9: 0x145a, + 0x8aa: 0x14da, 0x8ab: 0x0886, 0x8ac: 0x0e4e, 0x8ad: 0x0906, 0x8ae: 0x0eca, 0x8af: 0x0f6e, + 0x8b0: 0x128a, 0x8b1: 0x14c2, 0x8b2: 0x15ae, 0x8b3: 0x15d6, 0x8b4: 0x0d3a, 0x8b5: 0x0e2a, + 0x8b6: 0x11c6, 0x8b7: 0x10ba, 0x8b8: 0x10c6, 0x8b9: 0x10ea, 0x8ba: 0x0f1a, 0x8bb: 0x0ea2, + 0x8bc: 0x1366, 0x8bd: 0x0736, 0x8be: 0x122e, 0x8bf: 0x081e, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x080e, 0x8c1: 0x0b0e, 0x8c2: 0x0c2e, 0x8c3: 0x10f6, 0x8c4: 0x0a56, 0x8c5: 0x0e06, + 0x8c6: 0x0cf2, 0x8c7: 0x13ea, 0x8c8: 0x12ea, 0x8c9: 0x14ae, 0x8ca: 0x1326, 0x8cb: 0x0b2a, + 0x8cc: 0x078a, 0x8cd: 0x095e, 0x8d0: 0x09b2, + 0x8d2: 0x0ce2, 0x8d5: 0x07fa, 0x8d6: 0x0f22, 0x8d7: 0x0fe6, + 0x8d8: 0x104a, 0x8d9: 0x1066, 0x8da: 0x106a, 0x8db: 0x107e, 0x8dc: 0x14fe, 0x8dd: 0x10ee, + 0x8de: 0x1172, 0x8e0: 0x1292, 0x8e2: 0x1356, + 0x8e5: 0x140a, 0x8e6: 0x1436, + 0x8ea: 0x1552, 0x8eb: 0x1556, 0x8ec: 0x155a, 0x8ed: 0x15be, 0x8ee: 0x142e, 0x8ef: 0x14ca, + 0x8f0: 0x075a, 0x8f1: 0x077e, 0x8f2: 0x0792, 0x8f3: 0x084e, 0x8f4: 0x085a, 0x8f5: 0x089a, + 0x8f6: 0x094e, 0x8f7: 0x096a, 0x8f8: 0x0972, 0x8f9: 0x09ae, 0x8fa: 0x09ba, 0x8fb: 0x0a96, + 0x8fc: 0x0a9e, 0x8fd: 0x0ba6, 0x8fe: 0x0bce, 0x8ff: 0x0bd6, + // Block 0x24, offset 0x900 + 0x900: 0x0bee, 0x901: 0x0c9a, 0x902: 0x0cca, 0x903: 0x0cea, 0x904: 0x0d5a, 0x905: 0x0e1e, + 0x906: 0x0e3a, 0x907: 0x0e6a, 0x908: 0x0ebe, 0x909: 0x0ede, 0x90a: 0x0f52, 0x90b: 0x1032, + 0x90c: 0x104e, 0x90d: 0x1056, 0x90e: 0x1052, 0x90f: 0x105a, 0x910: 0x105e, 0x911: 0x1062, + 0x912: 0x1076, 0x913: 0x107a, 0x914: 0x109e, 0x915: 0x10b2, 0x916: 0x10ce, 0x917: 0x1132, + 0x918: 0x113a, 0x919: 0x1142, 0x91a: 0x1156, 0x91b: 0x117e, 0x91c: 0x11ce, 0x91d: 0x1202, + 0x91e: 0x1202, 0x91f: 0x126a, 0x920: 0x1312, 0x921: 0x132a, 0x922: 0x135e, 0x923: 0x1362, + 0x924: 0x13a6, 0x925: 0x13aa, 0x926: 0x1402, 0x927: 0x140a, 0x928: 0x14de, 0x929: 0x1522, + 0x92a: 0x153a, 0x92b: 0x0b9e, 0x92c: 0x1721, 0x92d: 0x11e6, + 0x930: 0x06e2, 0x931: 0x07e6, 0x932: 0x07a6, 0x933: 0x074e, 0x934: 0x078e, 0x935: 0x07ba, + 0x936: 0x084a, 0x937: 0x0866, 0x938: 0x094e, 0x939: 0x093a, 0x93a: 0x094a, 0x93b: 0x0966, + 0x93c: 0x09b2, 0x93d: 0x09c2, 0x93e: 0x0a06, 0x93f: 0x0a12, + // Block 0x25, offset 0x940 + 0x940: 0x0a2e, 0x941: 0x0a3e, 0x942: 0x0b26, 0x943: 0x0b2e, 0x944: 0x0b5e, 0x945: 0x0b7e, + 0x946: 0x0bae, 0x947: 0x0bc6, 0x948: 0x0bb6, 0x949: 0x0bd6, 0x94a: 0x0bca, 0x94b: 0x0bee, + 0x94c: 0x0c0a, 0x94d: 0x0c62, 0x94e: 0x0c6e, 0x94f: 0x0c76, 0x950: 0x0c9e, 0x951: 0x0ce2, + 0x952: 0x0d12, 0x953: 0x0d16, 0x954: 0x0d2a, 0x955: 0x0daa, 0x956: 0x0dba, 0x957: 0x0e12, + 0x958: 0x0e5e, 0x959: 0x0e56, 0x95a: 0x0e6a, 0x95b: 0x0e86, 0x95c: 0x0ebe, 0x95d: 0x1016, + 0x95e: 0x0ee2, 0x95f: 0x0f16, 0x960: 0x0f22, 0x961: 0x0f62, 0x962: 0x0f7e, 0x963: 0x0fa2, + 0x964: 0x0fc6, 0x965: 0x0fca, 0x966: 0x0fe6, 0x967: 0x0fea, 0x968: 0x0ffa, 0x969: 0x100e, + 0x96a: 0x100a, 0x96b: 0x103a, 0x96c: 0x10b6, 0x96d: 0x10ce, 0x96e: 0x10e6, 0x96f: 0x111e, + 0x970: 0x1132, 0x971: 0x114e, 0x972: 0x117e, 0x973: 0x1232, 0x974: 0x125a, 0x975: 0x12ce, + 0x976: 0x1316, 0x977: 0x1322, 0x978: 0x132a, 0x979: 0x1342, 0x97a: 0x1356, 0x97b: 0x1346, + 0x97c: 0x135e, 0x97d: 0x135a, 0x97e: 0x1352, 0x97f: 0x1362, + // Block 0x26, offset 0x980 + 0x980: 0x136e, 0x981: 0x13aa, 0x982: 0x13e6, 0x983: 0x1416, 0x984: 0x144e, 0x985: 0x146e, + 0x986: 0x14ba, 0x987: 0x14de, 0x988: 0x14fe, 0x989: 0x1512, 0x98a: 0x1522, 0x98b: 0x152e, + 0x98c: 0x153a, 0x98d: 0x158e, 0x98e: 0x162e, 0x98f: 0x16b8, 0x990: 0x16b3, 0x991: 0x16e5, + 0x992: 0x060a, 0x993: 0x0632, 0x994: 0x0636, 0x995: 0x1767, 0x996: 0x1794, 0x997: 0x180c, + 0x998: 0x161a, 0x999: 0x162a, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x06fe, 0x9c1: 0x06f6, 0x9c2: 0x0706, 0x9c3: 0x164a, 0x9c4: 0x074a, 0x9c5: 0x075a, + 0x9c6: 0x075e, 0x9c7: 0x0766, 0x9c8: 0x076e, 0x9c9: 0x0772, 0x9ca: 0x077e, 0x9cb: 0x0776, + 0x9cc: 0x05b6, 0x9cd: 0x165e, 0x9ce: 0x0792, 0x9cf: 0x0796, 0x9d0: 0x079a, 0x9d1: 0x07b6, + 0x9d2: 0x164f, 0x9d3: 0x05ba, 0x9d4: 0x07a2, 0x9d5: 0x07c2, 0x9d6: 0x1659, 0x9d7: 0x07d2, + 0x9d8: 0x07da, 0x9d9: 0x073a, 0x9da: 0x07e2, 0x9db: 0x07e6, 0x9dc: 0x1834, 0x9dd: 0x0802, + 0x9de: 0x080a, 0x9df: 0x05c2, 0x9e0: 0x0822, 0x9e1: 0x0826, 0x9e2: 0x082e, 0x9e3: 0x0832, + 0x9e4: 0x05c6, 0x9e5: 0x084a, 0x9e6: 0x084e, 0x9e7: 0x085a, 0x9e8: 0x0866, 0x9e9: 0x086a, + 0x9ea: 0x086e, 0x9eb: 0x0876, 0x9ec: 0x0896, 0x9ed: 0x089a, 0x9ee: 0x08a2, 0x9ef: 0x08b2, + 0x9f0: 0x08ba, 0x9f1: 0x08be, 0x9f2: 0x08be, 0x9f3: 0x08be, 0x9f4: 0x166d, 0x9f5: 0x0e96, + 0x9f6: 0x08d2, 0x9f7: 0x08da, 0x9f8: 0x1672, 0x9f9: 0x08e6, 0x9fa: 0x08ee, 0x9fb: 0x08f6, + 0x9fc: 0x091e, 0x9fd: 0x090a, 0x9fe: 0x0916, 0x9ff: 0x091a, + // Block 0x28, offset 0xa00 + 0xa00: 0x0922, 0xa01: 0x092a, 0xa02: 0x092e, 0xa03: 0x0936, 0xa04: 0x093e, 0xa05: 0x0942, + 0xa06: 0x0942, 0xa07: 0x094a, 0xa08: 0x0952, 0xa09: 0x0956, 0xa0a: 0x0962, 0xa0b: 0x0986, + 0xa0c: 0x096a, 0xa0d: 0x098a, 0xa0e: 0x096e, 0xa0f: 0x0976, 0xa10: 0x080e, 0xa11: 0x09d2, + 0xa12: 0x099a, 0xa13: 0x099e, 0xa14: 0x09a2, 0xa15: 0x0996, 0xa16: 0x09aa, 0xa17: 0x09a6, + 0xa18: 0x09be, 0xa19: 0x1677, 0xa1a: 0x09da, 0xa1b: 0x09de, 0xa1c: 0x09e6, 0xa1d: 0x09f2, + 0xa1e: 0x09fa, 0xa1f: 0x0a16, 0xa20: 0x167c, 0xa21: 0x1681, 0xa22: 0x0a22, 0xa23: 0x0a26, + 0xa24: 0x0a2a, 0xa25: 0x0a1e, 0xa26: 0x0a32, 0xa27: 0x05ca, 0xa28: 0x05ce, 0xa29: 0x0a3a, + 0xa2a: 0x0a42, 0xa2b: 0x0a42, 0xa2c: 0x1686, 0xa2d: 0x0a5e, 0xa2e: 0x0a62, 0xa2f: 0x0a66, + 0xa30: 0x0a6e, 0xa31: 0x168b, 0xa32: 0x0a76, 0xa33: 0x0a7a, 0xa34: 0x0b52, 0xa35: 0x0a82, + 0xa36: 0x05d2, 0xa37: 0x0a8e, 0xa38: 0x0a9e, 0xa39: 0x0aaa, 0xa3a: 0x0aa6, 0xa3b: 0x1695, + 0xa3c: 0x0ab2, 0xa3d: 0x169a, 0xa3e: 0x0abe, 0xa3f: 0x0aba, + // Block 0x29, offset 0xa40 + 0xa40: 0x0ac2, 0xa41: 0x0ad2, 0xa42: 0x0ad6, 0xa43: 0x05d6, 0xa44: 0x0ae6, 0xa45: 0x0aee, + 0xa46: 0x0af2, 0xa47: 0x0af6, 0xa48: 0x05da, 0xa49: 0x169f, 0xa4a: 0x05de, 0xa4b: 0x0b12, + 0xa4c: 0x0b16, 0xa4d: 0x0b1a, 0xa4e: 0x0b22, 0xa4f: 0x1866, 0xa50: 0x0b3a, 0xa51: 0x16a9, + 0xa52: 0x16a9, 0xa53: 0x11da, 0xa54: 0x0b4a, 0xa55: 0x0b4a, 0xa56: 0x05e2, 0xa57: 0x16cc, + 0xa58: 0x179e, 0xa59: 0x0b5a, 0xa5a: 0x0b62, 0xa5b: 0x05e6, 0xa5c: 0x0b76, 0xa5d: 0x0b86, + 0xa5e: 0x0b8a, 0xa5f: 0x0b92, 0xa60: 0x0ba2, 0xa61: 0x05ee, 0xa62: 0x05ea, 0xa63: 0x0ba6, + 0xa64: 0x16ae, 0xa65: 0x0baa, 0xa66: 0x0bbe, 0xa67: 0x0bc2, 0xa68: 0x0bc6, 0xa69: 0x0bc2, + 0xa6a: 0x0bd2, 0xa6b: 0x0bd6, 0xa6c: 0x0be6, 0xa6d: 0x0bde, 0xa6e: 0x0be2, 0xa6f: 0x0bea, + 0xa70: 0x0bee, 0xa71: 0x0bf2, 0xa72: 0x0bfe, 0xa73: 0x0c02, 0xa74: 0x0c1a, 0xa75: 0x0c22, + 0xa76: 0x0c32, 0xa77: 0x0c46, 0xa78: 0x16bd, 0xa79: 0x0c42, 0xa7a: 0x0c36, 0xa7b: 0x0c4e, + 0xa7c: 0x0c56, 0xa7d: 0x0c6a, 0xa7e: 0x16c2, 0xa7f: 0x0c72, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0c66, 0xa81: 0x0c5e, 0xa82: 0x05f2, 0xa83: 0x0c7a, 0xa84: 0x0c82, 0xa85: 0x0c8a, + 0xa86: 0x0c7e, 0xa87: 0x05f6, 0xa88: 0x0c9a, 0xa89: 0x0ca2, 0xa8a: 0x16c7, 0xa8b: 0x0cce, + 0xa8c: 0x0d02, 0xa8d: 0x0cde, 0xa8e: 0x0602, 0xa8f: 0x0cea, 0xa90: 0x05fe, 0xa91: 0x05fa, + 0xa92: 0x07c6, 0xa93: 0x07ca, 0xa94: 0x0d06, 0xa95: 0x0cee, 0xa96: 0x11ae, 0xa97: 0x0666, + 0xa98: 0x0d12, 0xa99: 0x0d16, 0xa9a: 0x0d1a, 0xa9b: 0x0d2e, 0xa9c: 0x0d26, 0xa9d: 0x16e0, + 0xa9e: 0x0606, 0xa9f: 0x0d42, 0xaa0: 0x0d36, 0xaa1: 0x0d52, 0xaa2: 0x0d5a, 0xaa3: 0x16ea, + 0xaa4: 0x0d5e, 0xaa5: 0x0d4a, 0xaa6: 0x0d66, 0xaa7: 0x060a, 0xaa8: 0x0d6a, 0xaa9: 0x0d6e, + 0xaaa: 0x0d72, 0xaab: 0x0d7e, 0xaac: 0x16ef, 0xaad: 0x0d86, 0xaae: 0x060e, 0xaaf: 0x0d92, + 0xab0: 0x16f4, 0xab1: 0x0d96, 0xab2: 0x0612, 0xab3: 0x0da2, 0xab4: 0x0dae, 0xab5: 0x0dba, + 0xab6: 0x0dbe, 0xab7: 0x16f9, 0xab8: 0x1690, 0xab9: 0x16fe, 0xaba: 0x0dde, 0xabb: 0x1703, + 0xabc: 0x0dea, 0xabd: 0x0df2, 0xabe: 0x0de2, 0xabf: 0x0dfe, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0e0e, 0xac1: 0x0e1e, 0xac2: 0x0e12, 0xac3: 0x0e16, 0xac4: 0x0e22, 0xac5: 0x0e26, + 0xac6: 0x1708, 0xac7: 0x0e0a, 0xac8: 0x0e3e, 0xac9: 0x0e42, 0xaca: 0x0616, 0xacb: 0x0e56, + 0xacc: 0x0e52, 0xacd: 0x170d, 0xace: 0x0e36, 0xacf: 0x0e72, 0xad0: 0x1712, 0xad1: 0x1717, + 0xad2: 0x0e76, 0xad3: 0x0e8a, 0xad4: 0x0e86, 0xad5: 0x0e82, 0xad6: 0x061a, 0xad7: 0x0e8e, + 0xad8: 0x0e9e, 0xad9: 0x0e9a, 0xada: 0x0ea6, 0xadb: 0x1654, 0xadc: 0x0eb6, 0xadd: 0x171c, + 0xade: 0x0ec2, 0xadf: 0x1726, 0xae0: 0x0ed6, 0xae1: 0x0ee2, 0xae2: 0x0ef6, 0xae3: 0x172b, + 0xae4: 0x0f0a, 0xae5: 0x0f0e, 0xae6: 0x1730, 0xae7: 0x1735, 0xae8: 0x0f2a, 0xae9: 0x0f3a, + 0xaea: 0x061e, 0xaeb: 0x0f3e, 0xaec: 0x0622, 0xaed: 0x0622, 0xaee: 0x0f56, 0xaef: 0x0f5a, + 0xaf0: 0x0f62, 0xaf1: 0x0f66, 0xaf2: 0x0f72, 0xaf3: 0x0626, 0xaf4: 0x0f8a, 0xaf5: 0x173a, + 0xaf6: 0x0fa6, 0xaf7: 0x173f, 0xaf8: 0x0fb2, 0xaf9: 0x16a4, 0xafa: 0x0fc2, 0xafb: 0x1744, + 0xafc: 0x1749, 0xafd: 0x174e, 0xafe: 0x062a, 0xaff: 0x062e, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0ffa, 0xb01: 0x1758, 0xb02: 0x1753, 0xb03: 0x175d, 0xb04: 0x1762, 0xb05: 0x1002, + 0xb06: 0x1006, 0xb07: 0x1006, 0xb08: 0x100e, 0xb09: 0x0636, 0xb0a: 0x1012, 0xb0b: 0x063a, + 0xb0c: 0x063e, 0xb0d: 0x176c, 0xb0e: 0x1026, 0xb0f: 0x102e, 0xb10: 0x103a, 0xb11: 0x0642, + 0xb12: 0x1771, 0xb13: 0x105e, 0xb14: 0x1776, 0xb15: 0x177b, 0xb16: 0x107e, 0xb17: 0x1096, + 0xb18: 0x0646, 0xb19: 0x109e, 0xb1a: 0x10a2, 0xb1b: 0x10a6, 0xb1c: 0x1780, 0xb1d: 0x1785, + 0xb1e: 0x1785, 0xb1f: 0x10be, 0xb20: 0x064a, 0xb21: 0x178a, 0xb22: 0x10d2, 0xb23: 0x10d6, + 0xb24: 0x064e, 0xb25: 0x178f, 0xb26: 0x10f2, 0xb27: 0x0652, 0xb28: 0x1102, 0xb29: 0x10fa, + 0xb2a: 0x110a, 0xb2b: 0x1799, 0xb2c: 0x1122, 0xb2d: 0x0656, 0xb2e: 0x112e, 0xb2f: 0x1136, + 0xb30: 0x1146, 0xb31: 0x065a, 0xb32: 0x17a3, 0xb33: 0x17a8, 0xb34: 0x065e, 0xb35: 0x17ad, + 0xb36: 0x115e, 0xb37: 0x17b2, 0xb38: 0x116a, 0xb39: 0x1176, 0xb3a: 0x117e, 0xb3b: 0x17b7, + 0xb3c: 0x17bc, 0xb3d: 0x1192, 0xb3e: 0x17c1, 0xb3f: 0x119a, + // Block 0x2d, offset 0xb40 + 0xb40: 0x16d1, 0xb41: 0x0662, 0xb42: 0x11b2, 0xb43: 0x11b6, 0xb44: 0x066a, 0xb45: 0x11ba, + 0xb46: 0x0a36, 0xb47: 0x17c6, 0xb48: 0x17cb, 0xb49: 0x16d6, 0xb4a: 0x16db, 0xb4b: 0x11da, + 0xb4c: 0x11de, 0xb4d: 0x13f6, 0xb4e: 0x066e, 0xb4f: 0x120a, 0xb50: 0x1206, 0xb51: 0x120e, + 0xb52: 0x0842, 0xb53: 0x1212, 0xb54: 0x1216, 0xb55: 0x121a, 0xb56: 0x1222, 0xb57: 0x17d0, + 0xb58: 0x121e, 0xb59: 0x1226, 0xb5a: 0x123a, 0xb5b: 0x123e, 0xb5c: 0x122a, 0xb5d: 0x1242, + 0xb5e: 0x1256, 0xb5f: 0x126a, 0xb60: 0x1236, 0xb61: 0x124a, 0xb62: 0x124e, 0xb63: 0x1252, + 0xb64: 0x17d5, 0xb65: 0x17df, 0xb66: 0x17da, 0xb67: 0x0672, 0xb68: 0x1272, 0xb69: 0x1276, + 0xb6a: 0x127e, 0xb6b: 0x17f3, 0xb6c: 0x1282, 0xb6d: 0x17e4, 0xb6e: 0x0676, 0xb6f: 0x067a, + 0xb70: 0x17e9, 0xb71: 0x17ee, 0xb72: 0x067e, 0xb73: 0x12a2, 0xb74: 0x12a6, 0xb75: 0x12aa, + 0xb76: 0x12ae, 0xb77: 0x12ba, 0xb78: 0x12b6, 0xb79: 0x12c2, 0xb7a: 0x12be, 0xb7b: 0x12ce, + 0xb7c: 0x12c6, 0xb7d: 0x12ca, 0xb7e: 0x12d2, 0xb7f: 0x0682, + // Block 0x2e, offset 0xb80 + 0xb80: 0x12da, 0xb81: 0x12de, 0xb82: 0x0686, 0xb83: 0x12ee, 0xb84: 0x12f2, 0xb85: 0x17f8, + 0xb86: 0x12fe, 0xb87: 0x1302, 0xb88: 0x068a, 0xb89: 0x130e, 0xb8a: 0x05be, 0xb8b: 0x17fd, + 0xb8c: 0x1802, 0xb8d: 0x068e, 0xb8e: 0x0692, 0xb8f: 0x133a, 0xb90: 0x1352, 0xb91: 0x136e, + 0xb92: 0x137e, 0xb93: 0x1807, 0xb94: 0x1392, 0xb95: 0x1396, 0xb96: 0x13ae, 0xb97: 0x13ba, + 0xb98: 0x1811, 0xb99: 0x1663, 0xb9a: 0x13c6, 0xb9b: 0x13c2, 0xb9c: 0x13ce, 0xb9d: 0x1668, + 0xb9e: 0x13da, 0xb9f: 0x13e6, 0xba0: 0x1816, 0xba1: 0x181b, 0xba2: 0x1426, 0xba3: 0x1432, + 0xba4: 0x143a, 0xba5: 0x1820, 0xba6: 0x143e, 0xba7: 0x146a, 0xba8: 0x1476, 0xba9: 0x147a, + 0xbaa: 0x1472, 0xbab: 0x1486, 0xbac: 0x148a, 0xbad: 0x1825, 0xbae: 0x1496, 0xbaf: 0x0696, + 0xbb0: 0x149e, 0xbb1: 0x182a, 0xbb2: 0x069a, 0xbb3: 0x14d6, 0xbb4: 0x0ac6, 0xbb5: 0x14ee, + 0xbb6: 0x182f, 0xbb7: 0x1839, 0xbb8: 0x069e, 0xbb9: 0x06a2, 0xbba: 0x1516, 0xbbb: 0x183e, + 0xbbc: 0x06a6, 0xbbd: 0x1843, 0xbbe: 0x152e, 0xbbf: 0x152e, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1536, 0xbc1: 0x1848, 0xbc2: 0x154e, 0xbc3: 0x06aa, 0xbc4: 0x155e, 0xbc5: 0x156a, + 0xbc6: 0x1572, 0xbc7: 0x157a, 0xbc8: 0x06ae, 0xbc9: 0x184d, 0xbca: 0x158e, 0xbcb: 0x15aa, + 0xbcc: 0x15b6, 0xbcd: 0x06b2, 0xbce: 0x06b6, 0xbcf: 0x15ba, 0xbd0: 0x1852, 0xbd1: 0x06ba, + 0xbd2: 0x1857, 0xbd3: 0x185c, 0xbd4: 0x1861, 0xbd5: 0x15de, 0xbd6: 0x06be, 0xbd7: 0x15f2, + 0xbd8: 0x15fa, 0xbd9: 0x15fe, 0xbda: 0x1606, 0xbdb: 0x160e, 0xbdc: 0x1616, 0xbdd: 0x186b, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x123: 0x0d, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40, + 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47, + 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d, + 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55, + // Block 0x5, offset 0x140 + 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b, + 0x14d: 0x5c, + 0x15c: 0x5d, 0x15f: 0x5e, + 0x162: 0x5f, 0x164: 0x60, + 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16b: 0x64, 0x16c: 0x0e, 0x16d: 0x65, 0x16e: 0x66, 0x16f: 0x67, + 0x170: 0x68, 0x173: 0x69, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x6a, 0x183: 0x6b, 0x184: 0x6c, 0x186: 0x6d, 0x187: 0x6e, + 0x188: 0x6f, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x70, 0x18c: 0x71, + 0x1ab: 0x72, + 0x1b3: 0x73, 0x1b5: 0x74, 0x1b7: 0x75, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x76, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x77, 0x1c5: 0x78, + 0x1c9: 0x79, 0x1cc: 0x7a, 0x1cd: 0x7b, + // Block 0x8, offset 0x200 + 0x219: 0x7c, 0x21a: 0x7d, 0x21b: 0x7e, + 0x220: 0x7f, 0x223: 0x80, 0x224: 0x81, 0x225: 0x82, 0x226: 0x83, 0x227: 0x84, + 0x22a: 0x85, 0x22b: 0x86, 0x22f: 0x87, + 0x230: 0x88, 0x231: 0x89, 0x232: 0x8a, 0x233: 0x8b, 0x234: 0x8c, 0x235: 0x8d, 0x236: 0x8e, 0x237: 0x88, + 0x238: 0x89, 0x239: 0x8a, 0x23a: 0x8b, 0x23b: 0x8c, 0x23c: 0x8d, 0x23d: 0x8e, 0x23e: 0x88, 0x23f: 0x89, + // Block 0x9, offset 0x240 + 0x240: 0x8a, 0x241: 0x8b, 0x242: 0x8c, 0x243: 0x8d, 0x244: 0x8e, 0x245: 0x88, 0x246: 0x89, 0x247: 0x8a, + 0x248: 0x8b, 0x249: 0x8c, 0x24a: 0x8d, 0x24b: 0x8e, 0x24c: 0x88, 0x24d: 0x89, 0x24e: 0x8a, 0x24f: 0x8b, + 0x250: 0x8c, 0x251: 0x8d, 0x252: 0x8e, 0x253: 0x88, 0x254: 0x89, 0x255: 0x8a, 0x256: 0x8b, 0x257: 0x8c, + 0x258: 0x8d, 0x259: 0x8e, 0x25a: 0x88, 0x25b: 0x89, 0x25c: 0x8a, 0x25d: 0x8b, 0x25e: 0x8c, 0x25f: 0x8d, + 0x260: 0x8e, 0x261: 0x88, 0x262: 0x89, 0x263: 0x8a, 0x264: 0x8b, 0x265: 0x8c, 0x266: 0x8d, 0x267: 0x8e, + 0x268: 0x88, 0x269: 0x89, 0x26a: 0x8a, 0x26b: 0x8b, 0x26c: 0x8c, 0x26d: 0x8d, 0x26e: 0x8e, 0x26f: 0x88, + 0x270: 0x89, 0x271: 0x8a, 0x272: 0x8b, 0x273: 0x8c, 0x274: 0x8d, 0x275: 0x8e, 0x276: 0x88, 0x277: 0x89, + 0x278: 0x8a, 0x279: 0x8b, 0x27a: 0x8c, 0x27b: 0x8d, 0x27c: 0x8e, 0x27d: 0x88, 0x27e: 0x89, 0x27f: 0x8a, + // Block 0xa, offset 0x280 + 0x280: 0x8b, 0x281: 0x8c, 0x282: 0x8d, 0x283: 0x8e, 0x284: 0x88, 0x285: 0x89, 0x286: 0x8a, 0x287: 0x8b, + 0x288: 0x8c, 0x289: 0x8d, 0x28a: 0x8e, 0x28b: 0x88, 0x28c: 0x89, 0x28d: 0x8a, 0x28e: 0x8b, 0x28f: 0x8c, + 0x290: 0x8d, 0x291: 0x8e, 0x292: 0x88, 0x293: 0x89, 0x294: 0x8a, 0x295: 0x8b, 0x296: 0x8c, 0x297: 0x8d, + 0x298: 0x8e, 0x299: 0x88, 0x29a: 0x89, 0x29b: 0x8a, 0x29c: 0x8b, 0x29d: 0x8c, 0x29e: 0x8d, 0x29f: 0x8e, + 0x2a0: 0x88, 0x2a1: 0x89, 0x2a2: 0x8a, 0x2a3: 0x8b, 0x2a4: 0x8c, 0x2a5: 0x8d, 0x2a6: 0x8e, 0x2a7: 0x88, + 0x2a8: 0x89, 0x2a9: 0x8a, 0x2aa: 0x8b, 0x2ab: 0x8c, 0x2ac: 0x8d, 0x2ad: 0x8e, 0x2ae: 0x88, 0x2af: 0x89, + 0x2b0: 0x8a, 0x2b1: 0x8b, 0x2b2: 0x8c, 0x2b3: 0x8d, 0x2b4: 0x8e, 0x2b5: 0x88, 0x2b6: 0x89, 0x2b7: 0x8a, + 0x2b8: 0x8b, 0x2b9: 0x8c, 0x2ba: 0x8d, 0x2bb: 0x8e, 0x2bc: 0x88, 0x2bd: 0x89, 0x2be: 0x8a, 0x2bf: 0x8b, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8c, 0x2c1: 0x8d, 0x2c2: 0x8e, 0x2c3: 0x88, 0x2c4: 0x89, 0x2c5: 0x8a, 0x2c6: 0x8b, 0x2c7: 0x8c, + 0x2c8: 0x8d, 0x2c9: 0x8e, 0x2ca: 0x88, 0x2cb: 0x89, 0x2cc: 0x8a, 0x2cd: 0x8b, 0x2ce: 0x8c, 0x2cf: 0x8d, + 0x2d0: 0x8e, 0x2d1: 0x88, 0x2d2: 0x89, 0x2d3: 0x8a, 0x2d4: 0x8b, 0x2d5: 0x8c, 0x2d6: 0x8d, 0x2d7: 0x8e, + 0x2d8: 0x88, 0x2d9: 0x89, 0x2da: 0x8a, 0x2db: 0x8b, 0x2dc: 0x8c, 0x2dd: 0x8d, 0x2de: 0x8f, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x90, 0x32d: 0x91, 0x32e: 0x92, + 0x331: 0x93, 0x332: 0x94, 0x333: 0x95, 0x334: 0x96, + 0x338: 0x97, 0x339: 0x98, 0x33a: 0x99, 0x33b: 0x9a, 0x33e: 0x9b, 0x33f: 0x9c, + // Block 0xd, offset 0x340 + 0x347: 0x9d, + 0x34b: 0x9e, 0x34d: 0x9f, + 0x368: 0xa0, 0x36b: 0xa1, + 0x374: 0xa2, + 0x37a: 0xa3, 0x37d: 0xa4, + // Block 0xe, offset 0x380 + 0x381: 0xa5, 0x382: 0xa6, 0x384: 0xa7, 0x385: 0x83, 0x387: 0xa8, + 0x388: 0xa9, 0x38b: 0xaa, 0x38c: 0xab, 0x38d: 0xac, + 0x391: 0xad, 0x392: 0xae, 0x393: 0xaf, 0x396: 0xb0, 0x397: 0xb1, + 0x398: 0x74, 0x39a: 0xb2, 0x39c: 0xb3, + 0x3a0: 0xb4, 0x3a4: 0xb5, 0x3a5: 0xb6, 0x3a7: 0xb7, + 0x3a8: 0xb8, 0x3a9: 0xb9, 0x3aa: 0xba, + 0x3b0: 0x74, 0x3b5: 0xbb, 0x3b6: 0xbc, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xbd, 0x3ec: 0xbe, + 0x3ff: 0xbf, + // Block 0x10, offset 0x400 + 0x432: 0xc0, + // Block 0x11, offset 0x440 + 0x445: 0xc1, 0x446: 0xc2, 0x447: 0xc3, + 0x449: 0xc4, + // Block 0x12, offset 0x480 + 0x480: 0xc5, 0x484: 0xbe, + 0x48b: 0xc6, + 0x4a3: 0xc7, 0x4a5: 0xc8, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xc9, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 156 entries, 312 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x72, 0x79, 0x7c, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x99, 0x9d, 0xa4, 0xa9, 0xac, 0xb6, 0xb9, 0xc0, 0xc8, 0xcb, 0xcd, 0xd0, 0xd2, 0xd7, 0xe8, 0xf4, 0xf6, 0xfc, 0xfe, 0x100, 0x102, 0x104, 0x106, 0x108, 0x10b, 0x10e, 0x110, 0x113, 0x116, 0x11a, 0x120, 0x122, 0x12b, 0x12d, 0x130, 0x132, 0x13d, 0x141, 0x14f, 0x152, 0x158, 0x15e, 0x169, 0x16d, 0x16f, 0x171, 0x173, 0x175, 0x177, 0x17d, 0x181, 0x183, 0x185, 0x18d, 0x191, 0x194, 0x196, 0x198, 0x19b, 0x19e, 0x1a0, 0x1a2, 0x1a4, 0x1a6, 0x1ac, 0x1af, 0x1b1, 0x1b8, 0x1be, 0x1c4, 0x1cc, 0x1d2, 0x1d8, 0x1de, 0x1e2, 0x1f0, 0x1f9, 0x1fc, 0x1ff, 0x201, 0x204, 0x206, 0x20a, 0x20f, 0x211, 0x213, 0x218, 0x21e, 0x220, 0x222, 0x224, 0x22a, 0x22d, 0x22f, 0x231, 0x237, 0x23a, 0x242, 0x249, 0x24c, 0x24f, 0x251, 0x254, 0x25c, 0x260, 0x267, 0x26a, 0x270, 0x272, 0x275, 0x277, 0x27a, 0x27f, 0x281, 0x283, 0x285, 0x287, 0x289, 0x28c, 0x28e, 0x290, 0x292, 0x294, 0x296, 0x2a3, 0x2ad, 0x2af, 0x2b1, 0x2b7, 0x2b9, 0x2bb, 0x2be} + +// nfcSparseValues: 704 entries, 2816 bytes +var nfcSparseValues = [704]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46f9, lo: 0xa0, hi: 0xa1}, + {value: 0x472b, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4857, lo: 0x8a, hi: 0x8a}, + {value: 0x4875, lo: 0x8b, hi: 0x8b}, + {value: 0x36de, lo: 0x8c, hi: 0x8c}, + {value: 0x36f6, lo: 0x8d, hi: 0x8d}, + {value: 0x488d, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3714, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37bc, lo: 0x90, hi: 0x90}, + {value: 0x37c8, lo: 0x91, hi: 0x91}, + {value: 0x37b6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x382e, lo: 0x97, hi: 0x97}, + {value: 0x37f8, lo: 0x9c, hi: 0x9c}, + {value: 0x37e0, lo: 0x9d, hi: 0x9d}, + {value: 0x380a, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x3834, lo: 0xb6, hi: 0xb6}, + {value: 0x383a, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3858, lo: 0xa2, hi: 0xa2}, + {value: 0x385e, lo: 0xa3, hi: 0xa3}, + {value: 0x386a, lo: 0xa4, hi: 0xa4}, + {value: 0x3864, lo: 0xa5, hi: 0xa5}, + {value: 0x3870, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x3882, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x3876, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x387c, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3eef, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ef7, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3eff, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x72 + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x4533, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x79 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x7c + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cab, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x4573, lo: 0x9c, hi: 0x9d}, + {value: 0x4583, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x84 + {value: 0x0000, lo: 0x03}, + {value: 0x45ab, lo: 0xb3, hi: 0xb3}, + {value: 0x45b3, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x88 + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x458b, lo: 0x99, hi: 0x9b}, + {value: 0x45a3, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x8c + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0x8e + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0x90 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cc3, lo: 0x88, hi: 0x88}, + {value: 0x2cbb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ccb, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45bb, lo: 0x9c, hi: 0x9c}, + {value: 0x45c3, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0x99 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cd3, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cdb, lo: 0x8a, hi: 0x8a}, + {value: 0x2ceb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ce3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xa4 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3f07, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xa9 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xac + {value: 0x0000, lo: 0x09}, + {value: 0x2cf3, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cfb, lo: 0x87, hi: 0x87}, + {value: 0x2d03, lo: 0x88, hi: 0x88}, + {value: 0x2f67, lo: 0x8a, hi: 0x8a}, + {value: 0x2def, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2d0b, lo: 0x8a, hi: 0x8a}, + {value: 0x2d1b, lo: 0x8b, hi: 0x8b}, + {value: 0x2d13, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xc0 + {value: 0x6bdd, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3f0f, lo: 0x9a, hi: 0x9a}, + {value: 0x2f6f, lo: 0x9c, hi: 0x9c}, + {value: 0x2dfa, lo: 0x9d, hi: 0x9d}, + {value: 0x2d23, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xcb + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xcd + {value: 0x0000, lo: 0x02}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x23, offset 0xd0 + {value: 0x0000, lo: 0x01}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + // Block 0x24, offset 0xd2 + {value: 0x0000, lo: 0x04}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xd7 + {value: 0x0000, lo: 0x10}, + {value: 0x264a, lo: 0x83, hi: 0x83}, + {value: 0x2651, lo: 0x8d, hi: 0x8d}, + {value: 0x2658, lo: 0x92, hi: 0x92}, + {value: 0x265f, lo: 0x97, hi: 0x97}, + {value: 0x2666, lo: 0x9c, hi: 0x9c}, + {value: 0x2643, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4a9b, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4aa4, lo: 0xb5, hi: 0xb5}, + {value: 0x45cb, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45d3, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xe8 + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4aad, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x2674, lo: 0x93, hi: 0x93}, + {value: 0x267b, lo: 0x9d, hi: 0x9d}, + {value: 0x2682, lo: 0xa2, hi: 0xa2}, + {value: 0x2689, lo: 0xa7, hi: 0xa7}, + {value: 0x2690, lo: 0xac, hi: 0xac}, + {value: 0x266d, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0xf4 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0xf6 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d2b, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0xfc + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0xfe + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x100 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x102 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x104 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x108 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x94}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x10b + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x110 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x113 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x116 + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x11a + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x120 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + // Block 0x37, offset 0x122 + {value: 0x0000, lo: 0x08}, + {value: 0x2d73, lo: 0x80, hi: 0x80}, + {value: 0x2d7b, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d83, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x12b + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x12d + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x130 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x132 + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x13d + {value: 0x0004, lo: 0x03}, + {value: 0x0436, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3d, offset 0x141 + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x3e, offset 0x14f + {value: 0x4292, lo: 0x02}, + {value: 0x01bb, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x3f, offset 0x152 + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bd0, lo: 0x9a, hi: 0x9b}, + {value: 0x3bde, lo: 0xae, hi: 0xae}, + // Block 0x40, offset 0x158 + {value: 0x000e, lo: 0x05}, + {value: 0x3be5, lo: 0x8d, hi: 0x8e}, + {value: 0x3bec, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x41, offset 0x15e + {value: 0x63f1, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3bfa, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3c01, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3c08, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3c0f, lo: 0xa4, hi: 0xa5}, + {value: 0x3c16, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x42, offset 0x169 + {value: 0x0007, lo: 0x03}, + {value: 0x3c7f, lo: 0xa0, hi: 0xa1}, + {value: 0x3ca9, lo: 0xa2, hi: 0xa3}, + {value: 0x3cd3, lo: 0xaa, hi: 0xad}, + // Block 0x43, offset 0x16d + {value: 0x0004, lo: 0x01}, + {value: 0x048e, lo: 0xa9, hi: 0xaa}, + // Block 0x44, offset 0x16f + {value: 0x0000, lo: 0x01}, + {value: 0x44f4, lo: 0x9c, hi: 0x9c}, + // Block 0x45, offset 0x171 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x46, offset 0x173 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x47, offset 0x175 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x48, offset 0x177 + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xaf}, + // Block 0x49, offset 0x17d + {value: 0x0000, lo: 0x03}, + {value: 0x4ab6, lo: 0xb3, hi: 0xb3}, + {value: 0x4ab6, lo: 0xb5, hi: 0xb6}, + {value: 0x4ab6, lo: 0xba, hi: 0xbf}, + // Block 0x4a, offset 0x181 + {value: 0x0000, lo: 0x01}, + {value: 0x4ab6, lo: 0x8f, hi: 0xa3}, + // Block 0x4b, offset 0x183 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4c, offset 0x185 + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4d, offset 0x18d + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4e, offset 0x191 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x4f, offset 0x194 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x50, offset 0x196 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x51, offset 0x198 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x52, offset 0x19b + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x53, offset 0x19e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x54, offset 0x1a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x55, offset 0x1a2 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x56, offset 0x1a4 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x57, offset 0x1a6 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x58, offset 0x1ac + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x59, offset 0x1af + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x5a, offset 0x1b1 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5b, offset 0x1b8 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5c, offset 0x1be + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5d, offset 0x1c4 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5e, offset 0x1cc + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x5f, offset 0x1d2 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x60, offset 0x1d8 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x61, offset 0x1de + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x62, offset 0x1e2 + {value: 0x0006, lo: 0x0d}, + {value: 0x43a7, lo: 0x9d, hi: 0x9d}, + {value: 0x8116, lo: 0x9e, hi: 0x9e}, + {value: 0x4419, lo: 0x9f, hi: 0x9f}, + {value: 0x4407, lo: 0xaa, hi: 0xab}, + {value: 0x450b, lo: 0xac, hi: 0xac}, + {value: 0x4513, lo: 0xad, hi: 0xad}, + {value: 0x435f, lo: 0xae, hi: 0xb1}, + {value: 0x437d, lo: 0xb2, hi: 0xb4}, + {value: 0x4395, lo: 0xb5, hi: 0xb6}, + {value: 0x43a1, lo: 0xb8, hi: 0xb8}, + {value: 0x43ad, lo: 0xb9, hi: 0xbb}, + {value: 0x43c5, lo: 0xbc, hi: 0xbc}, + {value: 0x43cb, lo: 0xbe, hi: 0xbe}, + // Block 0x63, offset 0x1f0 + {value: 0x0006, lo: 0x08}, + {value: 0x43d1, lo: 0x80, hi: 0x81}, + {value: 0x43dd, lo: 0x83, hi: 0x84}, + {value: 0x43ef, lo: 0x86, hi: 0x89}, + {value: 0x4413, lo: 0x8a, hi: 0x8a}, + {value: 0x438f, lo: 0x8b, hi: 0x8b}, + {value: 0x4377, lo: 0x8c, hi: 0x8c}, + {value: 0x43bf, lo: 0x8d, hi: 0x8d}, + {value: 0x43e9, lo: 0x8e, hi: 0x8e}, + // Block 0x64, offset 0x1f9 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x65, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x66, offset 0x1ff + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x67, offset 0x201 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x68, offset 0x204 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x69, offset 0x206 + {value: 0x0000, lo: 0x03}, + {value: 0x8133, lo: 0xa0, hi: 0xa6}, + {value: 0x812e, lo: 0xa7, hi: 0xad}, + {value: 0x8133, lo: 0xae, hi: 0xaf}, + // Block 0x6a, offset 0x20a + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6b, offset 0x20f + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6c, offset 0x211 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6d, offset 0x213 + {value: 0x0000, lo: 0x04}, + {value: 0x4ab6, lo: 0x9e, hi: 0x9f}, + {value: 0x4ab6, lo: 0xa3, hi: 0xa3}, + {value: 0x4ab6, lo: 0xa5, hi: 0xa6}, + {value: 0x4ab6, lo: 0xaa, hi: 0xaf}, + // Block 0x6e, offset 0x218 + {value: 0x0000, lo: 0x05}, + {value: 0x4ab6, lo: 0x82, hi: 0x87}, + {value: 0x4ab6, lo: 0x8a, hi: 0x8f}, + {value: 0x4ab6, lo: 0x92, hi: 0x97}, + {value: 0x4ab6, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x6f, offset 0x21e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x70, offset 0x220 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x71, offset 0x222 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x72, offset 0x224 + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x73, offset 0x22a + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x74, offset 0x22d + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x75, offset 0x22f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x76, offset 0x231 + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x77, offset 0x237 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x78, offset 0x23a + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x424f, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4259, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x4263, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x79, offset 0x242 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d8b, lo: 0xae, hi: 0xae}, + {value: 0x2d95, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x7a, offset 0x249 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x7b, offset 0x24c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x7c, offset 0x24f + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x7d, offset 0x251 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7e, offset 0x254 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d9f, lo: 0x8b, hi: 0x8b}, + {value: 0x2da9, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x7f, offset 0x25c + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x80, offset 0x260 + {value: 0x6b4d, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2dbd, lo: 0xbb, hi: 0xbb}, + {value: 0x2db3, lo: 0xbc, hi: 0xbd}, + {value: 0x2dc7, lo: 0xbe, hi: 0xbe}, + // Block 0x81, offset 0x267 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x82, offset 0x26a + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dd1, lo: 0xba, hi: 0xba}, + {value: 0x2ddb, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x83, offset 0x270 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x84, offset 0x272 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x85, offset 0x275 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x86, offset 0x277 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x87, offset 0x27a + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2de5, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x88, offset 0x27f + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x89, offset 0x281 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x8a, offset 0x283 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x8b, offset 0x285 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x8c, offset 0x287 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x8d, offset 0x289 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x8e, offset 0x28c + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x8f, offset 0x28e + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x90, offset 0x290 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x91, offset 0x292 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x92, offset 0x294 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x93, offset 0x296 + {value: 0x0000, lo: 0x0c}, + {value: 0x45e3, lo: 0x9e, hi: 0x9e}, + {value: 0x45ed, lo: 0x9f, hi: 0x9f}, + {value: 0x4621, lo: 0xa0, hi: 0xa0}, + {value: 0x462f, lo: 0xa1, hi: 0xa1}, + {value: 0x463d, lo: 0xa2, hi: 0xa2}, + {value: 0x464b, lo: 0xa3, hi: 0xa3}, + {value: 0x4659, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x94, offset 0x2a3 + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x45f7, lo: 0xbb, hi: 0xbb}, + {value: 0x4601, lo: 0xbc, hi: 0xbc}, + {value: 0x4667, lo: 0xbd, hi: 0xbd}, + {value: 0x4683, lo: 0xbe, hi: 0xbe}, + {value: 0x4675, lo: 0xbf, hi: 0xbf}, + // Block 0x95, offset 0x2ad + {value: 0x0000, lo: 0x01}, + {value: 0x4691, lo: 0x80, hi: 0x80}, + // Block 0x96, offset 0x2af + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x97, offset 0x2b1 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0x80, hi: 0x86}, + {value: 0x8133, lo: 0x88, hi: 0x98}, + {value: 0x8133, lo: 0x9b, hi: 0xa1}, + {value: 0x8133, lo: 0xa3, hi: 0xa4}, + {value: 0x8133, lo: 0xa6, hi: 0xaa}, + // Block 0x98, offset 0x2b7 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0x99, offset 0x2b9 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0x9a, offset 0x2bb + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x9b, offset 0x2be + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 18768 bytes (18.33 KiB). Checksum: c51186dd2412943d. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 92: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 92 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 94 blocks, 6016 entries, 12032 bytes +// The third block is the zero block. +var nfkcValues = [6016]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f86, 0xc1: 0x2f8b, 0xc2: 0x469f, 0xc3: 0x2f90, 0xc4: 0x46ae, 0xc5: 0x46b3, + 0xc6: 0xa000, 0xc7: 0x46bd, 0xc8: 0x2ff9, 0xc9: 0x2ffe, 0xca: 0x46c2, 0xcb: 0x3012, + 0xcc: 0x3085, 0xcd: 0x308a, 0xce: 0x308f, 0xcf: 0x46d6, 0xd1: 0x311b, + 0xd2: 0x313e, 0xd3: 0x3143, 0xd4: 0x46e0, 0xd5: 0x46e5, 0xd6: 0x46f4, + 0xd8: 0xa000, 0xd9: 0x31ca, 0xda: 0x31cf, 0xdb: 0x31d4, 0xdc: 0x4726, 0xdd: 0x324c, + 0xe0: 0x3292, 0xe1: 0x3297, 0xe2: 0x4730, 0xe3: 0x329c, + 0xe4: 0x473f, 0xe5: 0x4744, 0xe6: 0xa000, 0xe7: 0x474e, 0xe8: 0x3305, 0xe9: 0x330a, + 0xea: 0x4753, 0xeb: 0x331e, 0xec: 0x3396, 0xed: 0x339b, 0xee: 0x33a0, 0xef: 0x4767, + 0xf1: 0x342c, 0xf2: 0x344f, 0xf3: 0x3454, 0xf4: 0x4771, 0xf5: 0x4776, + 0xf6: 0x4785, 0xf8: 0xa000, 0xf9: 0x34e0, 0xfa: 0x34e5, 0xfb: 0x34ea, + 0xfc: 0x47b7, 0xfd: 0x3567, 0xff: 0x3580, + // Block 0x4, offset 0x100 + 0x100: 0x2f95, 0x101: 0x32a1, 0x102: 0x46a4, 0x103: 0x4735, 0x104: 0x2fb3, 0x105: 0x32bf, + 0x106: 0x2fc7, 0x107: 0x32d3, 0x108: 0x2fcc, 0x109: 0x32d8, 0x10a: 0x2fd1, 0x10b: 0x32dd, + 0x10c: 0x2fd6, 0x10d: 0x32e2, 0x10e: 0x2fe0, 0x10f: 0x32ec, + 0x112: 0x46c7, 0x113: 0x4758, 0x114: 0x3008, 0x115: 0x3314, 0x116: 0x300d, 0x117: 0x3319, + 0x118: 0x302b, 0x119: 0x3337, 0x11a: 0x301c, 0x11b: 0x3328, 0x11c: 0x3044, 0x11d: 0x3350, + 0x11e: 0x304e, 0x11f: 0x335a, 0x120: 0x3053, 0x121: 0x335f, 0x122: 0x305d, 0x123: 0x3369, + 0x124: 0x3062, 0x125: 0x336e, 0x128: 0x3094, 0x129: 0x33a5, + 0x12a: 0x3099, 0x12b: 0x33aa, 0x12c: 0x309e, 0x12d: 0x33af, 0x12e: 0x30c1, 0x12f: 0x33cd, + 0x130: 0x30a3, 0x132: 0x1960, 0x133: 0x19ed, 0x134: 0x30cb, 0x135: 0x33d7, + 0x136: 0x30df, 0x137: 0x33f0, 0x139: 0x30e9, 0x13a: 0x33fa, 0x13b: 0x30f3, + 0x13c: 0x3404, 0x13d: 0x30ee, 0x13e: 0x33ff, 0x13f: 0x1bb2, + // Block 0x5, offset 0x140 + 0x140: 0x1c3a, 0x143: 0x3116, 0x144: 0x3427, 0x145: 0x312f, + 0x146: 0x3440, 0x147: 0x3125, 0x148: 0x3436, 0x149: 0x1c62, + 0x14c: 0x46ea, 0x14d: 0x477b, 0x14e: 0x3148, 0x14f: 0x3459, 0x150: 0x3152, 0x151: 0x3463, + 0x154: 0x3170, 0x155: 0x3481, 0x156: 0x3189, 0x157: 0x349a, + 0x158: 0x317a, 0x159: 0x348b, 0x15a: 0x470d, 0x15b: 0x479e, 0x15c: 0x3193, 0x15d: 0x34a4, + 0x15e: 0x31a2, 0x15f: 0x34b3, 0x160: 0x4712, 0x161: 0x47a3, 0x162: 0x31bb, 0x163: 0x34d1, + 0x164: 0x31ac, 0x165: 0x34c2, 0x168: 0x471c, 0x169: 0x47ad, + 0x16a: 0x4721, 0x16b: 0x47b2, 0x16c: 0x31d9, 0x16d: 0x34ef, 0x16e: 0x31e3, 0x16f: 0x34f9, + 0x170: 0x31e8, 0x171: 0x34fe, 0x172: 0x3206, 0x173: 0x351c, 0x174: 0x3229, 0x175: 0x353f, + 0x176: 0x3251, 0x177: 0x356c, 0x178: 0x3265, 0x179: 0x3274, 0x17a: 0x3594, 0x17b: 0x327e, + 0x17c: 0x359e, 0x17d: 0x3283, 0x17e: 0x35a3, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2e05, 0x185: 0x2e0b, + 0x186: 0x2e11, 0x187: 0x1975, 0x188: 0x1978, 0x189: 0x1a0e, 0x18a: 0x198d, 0x18b: 0x1990, + 0x18c: 0x1a44, 0x18d: 0x2f9f, 0x18e: 0x32ab, 0x18f: 0x30ad, 0x190: 0x33b9, 0x191: 0x3157, + 0x192: 0x3468, 0x193: 0x31ed, 0x194: 0x3503, 0x195: 0x39e6, 0x196: 0x3b75, 0x197: 0x39df, + 0x198: 0x3b6e, 0x199: 0x39ed, 0x19a: 0x3b7c, 0x19b: 0x39d8, 0x19c: 0x3b67, + 0x19e: 0x38c7, 0x19f: 0x3a56, 0x1a0: 0x38c0, 0x1a1: 0x3a4f, 0x1a2: 0x35ca, 0x1a3: 0x35dc, + 0x1a6: 0x3058, 0x1a7: 0x3364, 0x1a8: 0x30d5, 0x1a9: 0x33e6, + 0x1aa: 0x4703, 0x1ab: 0x4794, 0x1ac: 0x39a7, 0x1ad: 0x3b36, 0x1ae: 0x35ee, 0x1af: 0x35f4, + 0x1b0: 0x33dc, 0x1b1: 0x1945, 0x1b2: 0x1948, 0x1b3: 0x19d5, 0x1b4: 0x303f, 0x1b5: 0x334b, + 0x1b8: 0x3111, 0x1b9: 0x3422, 0x1ba: 0x38ce, 0x1bb: 0x3a5d, + 0x1bc: 0x35c4, 0x1bd: 0x35d6, 0x1be: 0x35d0, 0x1bf: 0x35e2, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2fa4, 0x1c1: 0x32b0, 0x1c2: 0x2fa9, 0x1c3: 0x32b5, 0x1c4: 0x3021, 0x1c5: 0x332d, + 0x1c6: 0x3026, 0x1c7: 0x3332, 0x1c8: 0x30b2, 0x1c9: 0x33be, 0x1ca: 0x30b7, 0x1cb: 0x33c3, + 0x1cc: 0x315c, 0x1cd: 0x346d, 0x1ce: 0x3161, 0x1cf: 0x3472, 0x1d0: 0x317f, 0x1d1: 0x3490, + 0x1d2: 0x3184, 0x1d3: 0x3495, 0x1d4: 0x31f2, 0x1d5: 0x3508, 0x1d6: 0x31f7, 0x1d7: 0x350d, + 0x1d8: 0x319d, 0x1d9: 0x34ae, 0x1da: 0x31b6, 0x1db: 0x34cc, + 0x1de: 0x3071, 0x1df: 0x337d, + 0x1e6: 0x46a9, 0x1e7: 0x473a, 0x1e8: 0x46d1, 0x1e9: 0x4762, + 0x1ea: 0x3976, 0x1eb: 0x3b05, 0x1ec: 0x3953, 0x1ed: 0x3ae2, 0x1ee: 0x46ef, 0x1ef: 0x4780, + 0x1f0: 0x396f, 0x1f1: 0x3afe, 0x1f2: 0x325b, 0x1f3: 0x3576, + // Block 0x8, offset 0x200 + 0x200: 0x9933, 0x201: 0x9933, 0x202: 0x9933, 0x203: 0x9933, 0x204: 0x9933, 0x205: 0x8133, + 0x206: 0x9933, 0x207: 0x9933, 0x208: 0x9933, 0x209: 0x9933, 0x20a: 0x9933, 0x20b: 0x9933, + 0x20c: 0x9933, 0x20d: 0x8133, 0x20e: 0x8133, 0x20f: 0x9933, 0x210: 0x8133, 0x211: 0x9933, + 0x212: 0x8133, 0x213: 0x9933, 0x214: 0x9933, 0x215: 0x8134, 0x216: 0x812e, 0x217: 0x812e, + 0x218: 0x812e, 0x219: 0x812e, 0x21a: 0x8134, 0x21b: 0x992c, 0x21c: 0x812e, 0x21d: 0x812e, + 0x21e: 0x812e, 0x21f: 0x812e, 0x220: 0x812e, 0x221: 0x812a, 0x222: 0x812a, 0x223: 0x992e, + 0x224: 0x992e, 0x225: 0x992e, 0x226: 0x992e, 0x227: 0x992a, 0x228: 0x992a, 0x229: 0x812e, + 0x22a: 0x812e, 0x22b: 0x812e, 0x22c: 0x812e, 0x22d: 0x992e, 0x22e: 0x992e, 0x22f: 0x812e, + 0x230: 0x992e, 0x231: 0x992e, 0x232: 0x812e, 0x233: 0x812e, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812e, 0x23a: 0x812e, 0x23b: 0x812e, + 0x23c: 0x812e, 0x23d: 0x8133, 0x23e: 0x8133, 0x23f: 0x8133, + // Block 0x9, offset 0x240 + 0x240: 0x49c5, 0x241: 0x49ca, 0x242: 0x9933, 0x243: 0x49cf, 0x244: 0x4a88, 0x245: 0x9937, + 0x246: 0x8133, 0x247: 0x812e, 0x248: 0x812e, 0x249: 0x812e, 0x24a: 0x8133, 0x24b: 0x8133, + 0x24c: 0x8133, 0x24d: 0x812e, 0x24e: 0x812e, 0x250: 0x8133, 0x251: 0x8133, + 0x252: 0x8133, 0x253: 0x812e, 0x254: 0x812e, 0x255: 0x812e, 0x256: 0x812e, 0x257: 0x8133, + 0x258: 0x8134, 0x259: 0x812e, 0x25a: 0x812e, 0x25b: 0x8133, 0x25c: 0x8135, 0x25d: 0x8136, + 0x25e: 0x8136, 0x25f: 0x8135, 0x260: 0x8136, 0x261: 0x8136, 0x262: 0x8135, 0x263: 0x8133, + 0x264: 0x8133, 0x265: 0x8133, 0x266: 0x8133, 0x267: 0x8133, 0x268: 0x8133, 0x269: 0x8133, + 0x26a: 0x8133, 0x26b: 0x8133, 0x26c: 0x8133, 0x26d: 0x8133, 0x26e: 0x8133, 0x26f: 0x8133, + 0x274: 0x0173, + 0x27a: 0x42bc, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x4271, 0x285: 0x4492, + 0x286: 0x3600, 0x287: 0x00ce, 0x288: 0x361e, 0x289: 0x362a, 0x28a: 0x363c, + 0x28c: 0x365a, 0x28e: 0x366c, 0x28f: 0x368a, 0x290: 0x3e1f, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x364e, 0x2ab: 0x367e, 0x2ac: 0x4815, 0x2ad: 0x36ae, 0x2ae: 0x483f, 0x2af: 0x36c0, + 0x2b0: 0x3e87, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4857, 0x2cb: 0x4875, + 0x2cc: 0x36de, 0x2cd: 0x36f6, 0x2ce: 0x488d, 0x2d0: 0x01c1, 0x2d1: 0x01d3, + 0x2d2: 0x01af, 0x2d3: 0x4323, 0x2d4: 0x4329, 0x2d5: 0x01fd, 0x2d6: 0x01eb, + 0x2f0: 0x01d9, 0x2f1: 0x01ee, 0x2f2: 0x01f1, 0x2f4: 0x018b, 0x2f5: 0x01ca, + 0x2f9: 0x01a9, + // Block 0xc, offset 0x300 + 0x300: 0x3738, 0x301: 0x3744, 0x303: 0x3732, + 0x306: 0xa000, 0x307: 0x3720, + 0x30c: 0x3774, 0x30d: 0x375c, 0x30e: 0x3786, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3768, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37ec, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x374a, 0x342: 0x37ce, + 0x350: 0x3726, 0x351: 0x37aa, + 0x352: 0x372c, 0x353: 0x37b0, 0x356: 0x373e, 0x357: 0x37c2, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x3840, 0x35b: 0x3846, 0x35c: 0x3750, 0x35d: 0x37d4, + 0x35e: 0x3756, 0x35f: 0x37da, 0x362: 0x3762, 0x363: 0x37e6, + 0x364: 0x376e, 0x365: 0x37f2, 0x366: 0x377a, 0x367: 0x37fe, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x384c, 0x36b: 0x3852, 0x36c: 0x37a4, 0x36d: 0x3828, 0x36e: 0x3780, 0x36f: 0x3804, + 0x370: 0x378c, 0x371: 0x3810, 0x372: 0x3792, 0x373: 0x3816, 0x374: 0x3798, 0x375: 0x381c, + 0x378: 0x379e, 0x379: 0x3822, + // Block 0xe, offset 0x380 + 0x387: 0x1d67, + 0x391: 0x812e, + 0x392: 0x8133, 0x393: 0x8133, 0x394: 0x8133, 0x395: 0x8133, 0x396: 0x812e, 0x397: 0x8133, + 0x398: 0x8133, 0x399: 0x8133, 0x39a: 0x812f, 0x39b: 0x812e, 0x39c: 0x8133, 0x39d: 0x8133, + 0x39e: 0x8133, 0x39f: 0x8133, 0x3a0: 0x8133, 0x3a1: 0x8133, 0x3a2: 0x812e, 0x3a3: 0x812e, + 0x3a4: 0x812e, 0x3a5: 0x812e, 0x3a6: 0x812e, 0x3a7: 0x812e, 0x3a8: 0x8133, 0x3a9: 0x8133, + 0x3aa: 0x812e, 0x3ab: 0x8133, 0x3ac: 0x8133, 0x3ad: 0x812f, 0x3ae: 0x8132, 0x3af: 0x8133, + 0x3b0: 0x8106, 0x3b1: 0x8107, 0x3b2: 0x8108, 0x3b3: 0x8109, 0x3b4: 0x810a, 0x3b5: 0x810b, + 0x3b6: 0x810c, 0x3b7: 0x810d, 0x3b8: 0x810e, 0x3b9: 0x810f, 0x3ba: 0x810f, 0x3bb: 0x8110, + 0x3bc: 0x8111, 0x3bd: 0x8112, 0x3bf: 0x8113, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8117, + 0x3cc: 0x8118, 0x3cd: 0x8119, 0x3ce: 0x811a, 0x3cf: 0x811b, 0x3d0: 0x811c, 0x3d1: 0x811d, + 0x3d2: 0x811e, 0x3d3: 0x9933, 0x3d4: 0x9933, 0x3d5: 0x992e, 0x3d6: 0x812e, 0x3d7: 0x8133, + 0x3d8: 0x8133, 0x3d9: 0x8133, 0x3da: 0x8133, 0x3db: 0x8133, 0x3dc: 0x812e, 0x3dd: 0x8133, + 0x3de: 0x8133, 0x3df: 0x812e, + 0x3f0: 0x811f, 0x3f5: 0x1d8a, + 0x3f6: 0x2019, 0x3f7: 0x2055, 0x3f8: 0x2050, + // Block 0x10, offset 0x400 + 0x413: 0x812e, 0x414: 0x8133, 0x415: 0x8133, 0x416: 0x8133, 0x417: 0x8133, + 0x418: 0x8133, 0x419: 0x8133, 0x41a: 0x8133, 0x41b: 0x8133, 0x41c: 0x8133, 0x41d: 0x8133, + 0x41e: 0x8133, 0x41f: 0x8133, 0x420: 0x8133, 0x421: 0x8133, 0x423: 0x812e, + 0x424: 0x8133, 0x425: 0x8133, 0x426: 0x812e, 0x427: 0x8133, 0x428: 0x8133, 0x429: 0x812e, + 0x42a: 0x8133, 0x42b: 0x8133, 0x42c: 0x8133, 0x42d: 0x812e, 0x42e: 0x812e, 0x42f: 0x812e, + 0x430: 0x8117, 0x431: 0x8118, 0x432: 0x8119, 0x433: 0x8133, 0x434: 0x8133, 0x435: 0x8133, + 0x436: 0x812e, 0x437: 0x8133, 0x438: 0x8133, 0x439: 0x812e, 0x43a: 0x812e, 0x43b: 0x8133, + 0x43c: 0x8133, 0x43d: 0x8133, 0x43e: 0x8133, 0x43f: 0x8133, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2d33, 0x447: 0xa000, 0x448: 0x2d3b, 0x449: 0xa000, 0x44a: 0x2d43, 0x44b: 0xa000, + 0x44c: 0x2d4b, 0x44d: 0xa000, 0x44e: 0x2d53, 0x451: 0xa000, + 0x452: 0x2d5b, + 0x474: 0x8103, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2d63, + 0x47c: 0xa000, 0x47d: 0x2d6b, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x00f5, 0x485: 0x00f8, + 0x486: 0x0416, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x0104, 0x48b: 0x0107, + 0x48c: 0x010a, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e0, + 0x492: 0x009f, 0x493: 0x00fe, 0x494: 0x041a, 0x495: 0x041e, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0426, 0x49a: 0x012b, 0x49b: 0x00ad, 0x49c: 0x042a, 0x49d: 0x01c1, + 0x49e: 0x01c4, 0x49f: 0x01c7, 0x4a0: 0x01fd, 0x4a1: 0x0200, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x01c1, 0x4a7: 0x01c4, 0x4a8: 0x01ee, 0x4a9: 0x01fd, + 0x4aa: 0x0200, + 0x4b8: 0x020f, + // Block 0x13, offset 0x4c0 + 0x4db: 0x00fb, 0x4dc: 0x0087, 0x4dd: 0x0101, + 0x4de: 0x00d4, 0x4df: 0x010a, 0x4e0: 0x008d, 0x4e1: 0x010d, 0x4e2: 0x0110, 0x4e3: 0x0116, + 0x4e4: 0x011c, 0x4e5: 0x011f, 0x4e6: 0x0122, 0x4e7: 0x042e, 0x4e8: 0x016d, 0x4e9: 0x0128, + 0x4ea: 0x0432, 0x4eb: 0x0170, 0x4ec: 0x0131, 0x4ed: 0x012e, 0x4ee: 0x0134, 0x4ef: 0x0137, + 0x4f0: 0x013a, 0x4f1: 0x013d, 0x4f2: 0x0140, 0x4f3: 0x014c, 0x4f4: 0x014f, 0x4f5: 0x00ec, + 0x4f6: 0x0152, 0x4f7: 0x0155, 0x4f8: 0x0422, 0x4f9: 0x0158, 0x4fa: 0x015b, 0x4fb: 0x00b5, + 0x4fc: 0x0161, 0x4fd: 0x0164, 0x4fe: 0x0167, 0x4ff: 0x01d3, + // Block 0x14, offset 0x500 + 0x500: 0x8133, 0x501: 0x8133, 0x502: 0x812e, 0x503: 0x8133, 0x504: 0x8133, 0x505: 0x8133, + 0x506: 0x8133, 0x507: 0x8133, 0x508: 0x8133, 0x509: 0x8133, 0x50a: 0x812e, 0x50b: 0x8133, + 0x50c: 0x8133, 0x50d: 0x8136, 0x50e: 0x812b, 0x50f: 0x812e, 0x510: 0x812a, 0x511: 0x8133, + 0x512: 0x8133, 0x513: 0x8133, 0x514: 0x8133, 0x515: 0x8133, 0x516: 0x8133, 0x517: 0x8133, + 0x518: 0x8133, 0x519: 0x8133, 0x51a: 0x8133, 0x51b: 0x8133, 0x51c: 0x8133, 0x51d: 0x8133, + 0x51e: 0x8133, 0x51f: 0x8133, 0x520: 0x8133, 0x521: 0x8133, 0x522: 0x8133, 0x523: 0x8133, + 0x524: 0x8133, 0x525: 0x8133, 0x526: 0x8133, 0x527: 0x8133, 0x528: 0x8133, 0x529: 0x8133, + 0x52a: 0x8133, 0x52b: 0x8133, 0x52c: 0x8133, 0x52d: 0x8133, 0x52e: 0x8133, 0x52f: 0x8133, + 0x530: 0x8133, 0x531: 0x8133, 0x532: 0x8133, 0x533: 0x8133, 0x534: 0x8133, 0x535: 0x8133, + 0x536: 0x8134, 0x537: 0x8132, 0x538: 0x8132, 0x539: 0x812e, 0x53b: 0x8133, + 0x53c: 0x8135, 0x53d: 0x812e, 0x53e: 0x8133, 0x53f: 0x812e, + // Block 0x15, offset 0x540 + 0x540: 0x2fae, 0x541: 0x32ba, 0x542: 0x2fb8, 0x543: 0x32c4, 0x544: 0x2fbd, 0x545: 0x32c9, + 0x546: 0x2fc2, 0x547: 0x32ce, 0x548: 0x38e3, 0x549: 0x3a72, 0x54a: 0x2fdb, 0x54b: 0x32e7, + 0x54c: 0x2fe5, 0x54d: 0x32f1, 0x54e: 0x2ff4, 0x54f: 0x3300, 0x550: 0x2fea, 0x551: 0x32f6, + 0x552: 0x2fef, 0x553: 0x32fb, 0x554: 0x3906, 0x555: 0x3a95, 0x556: 0x390d, 0x557: 0x3a9c, + 0x558: 0x3030, 0x559: 0x333c, 0x55a: 0x3035, 0x55b: 0x3341, 0x55c: 0x391b, 0x55d: 0x3aaa, + 0x55e: 0x303a, 0x55f: 0x3346, 0x560: 0x3049, 0x561: 0x3355, 0x562: 0x3067, 0x563: 0x3373, + 0x564: 0x3076, 0x565: 0x3382, 0x566: 0x306c, 0x567: 0x3378, 0x568: 0x307b, 0x569: 0x3387, + 0x56a: 0x3080, 0x56b: 0x338c, 0x56c: 0x30c6, 0x56d: 0x33d2, 0x56e: 0x3922, 0x56f: 0x3ab1, + 0x570: 0x30d0, 0x571: 0x33e1, 0x572: 0x30da, 0x573: 0x33eb, 0x574: 0x30e4, 0x575: 0x33f5, + 0x576: 0x46db, 0x577: 0x476c, 0x578: 0x3929, 0x579: 0x3ab8, 0x57a: 0x30fd, 0x57b: 0x340e, + 0x57c: 0x30f8, 0x57d: 0x3409, 0x57e: 0x3102, 0x57f: 0x3413, + // Block 0x16, offset 0x580 + 0x580: 0x3107, 0x581: 0x3418, 0x582: 0x310c, 0x583: 0x341d, 0x584: 0x3120, 0x585: 0x3431, + 0x586: 0x312a, 0x587: 0x343b, 0x588: 0x3139, 0x589: 0x344a, 0x58a: 0x3134, 0x58b: 0x3445, + 0x58c: 0x394c, 0x58d: 0x3adb, 0x58e: 0x395a, 0x58f: 0x3ae9, 0x590: 0x3961, 0x591: 0x3af0, + 0x592: 0x3968, 0x593: 0x3af7, 0x594: 0x3166, 0x595: 0x3477, 0x596: 0x316b, 0x597: 0x347c, + 0x598: 0x3175, 0x599: 0x3486, 0x59a: 0x4708, 0x59b: 0x4799, 0x59c: 0x39ae, 0x59d: 0x3b3d, + 0x59e: 0x318e, 0x59f: 0x349f, 0x5a0: 0x3198, 0x5a1: 0x34a9, 0x5a2: 0x4717, 0x5a3: 0x47a8, + 0x5a4: 0x39b5, 0x5a5: 0x3b44, 0x5a6: 0x39bc, 0x5a7: 0x3b4b, 0x5a8: 0x39c3, 0x5a9: 0x3b52, + 0x5aa: 0x31a7, 0x5ab: 0x34b8, 0x5ac: 0x31b1, 0x5ad: 0x34c7, 0x5ae: 0x31c5, 0x5af: 0x34db, + 0x5b0: 0x31c0, 0x5b1: 0x34d6, 0x5b2: 0x3201, 0x5b3: 0x3517, 0x5b4: 0x3210, 0x5b5: 0x3526, + 0x5b6: 0x320b, 0x5b7: 0x3521, 0x5b8: 0x39ca, 0x5b9: 0x3b59, 0x5ba: 0x39d1, 0x5bb: 0x3b60, + 0x5bc: 0x3215, 0x5bd: 0x352b, 0x5be: 0x321a, 0x5bf: 0x3530, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x321f, 0x5c1: 0x3535, 0x5c2: 0x3224, 0x5c3: 0x353a, 0x5c4: 0x3233, 0x5c5: 0x3549, + 0x5c6: 0x322e, 0x5c7: 0x3544, 0x5c8: 0x3238, 0x5c9: 0x3553, 0x5ca: 0x323d, 0x5cb: 0x3558, + 0x5cc: 0x3242, 0x5cd: 0x355d, 0x5ce: 0x3260, 0x5cf: 0x357b, 0x5d0: 0x3279, 0x5d1: 0x3599, + 0x5d2: 0x3288, 0x5d3: 0x35a8, 0x5d4: 0x328d, 0x5d5: 0x35ad, 0x5d6: 0x3391, 0x5d7: 0x34bd, + 0x5d8: 0x354e, 0x5d9: 0x358a, 0x5da: 0x1be6, 0x5db: 0x42ee, + 0x5e0: 0x46b8, 0x5e1: 0x4749, 0x5e2: 0x2f9a, 0x5e3: 0x32a6, + 0x5e4: 0x388f, 0x5e5: 0x3a1e, 0x5e6: 0x3888, 0x5e7: 0x3a17, 0x5e8: 0x389d, 0x5e9: 0x3a2c, + 0x5ea: 0x3896, 0x5eb: 0x3a25, 0x5ec: 0x38d5, 0x5ed: 0x3a64, 0x5ee: 0x38ab, 0x5ef: 0x3a3a, + 0x5f0: 0x38a4, 0x5f1: 0x3a33, 0x5f2: 0x38b9, 0x5f3: 0x3a48, 0x5f4: 0x38b2, 0x5f5: 0x3a41, + 0x5f6: 0x38dc, 0x5f7: 0x3a6b, 0x5f8: 0x46cc, 0x5f9: 0x475d, 0x5fa: 0x3017, 0x5fb: 0x3323, + 0x5fc: 0x3003, 0x5fd: 0x330f, 0x5fe: 0x38f1, 0x5ff: 0x3a80, + // Block 0x18, offset 0x600 + 0x600: 0x38ea, 0x601: 0x3a79, 0x602: 0x38ff, 0x603: 0x3a8e, 0x604: 0x38f8, 0x605: 0x3a87, + 0x606: 0x3914, 0x607: 0x3aa3, 0x608: 0x30a8, 0x609: 0x33b4, 0x60a: 0x30bc, 0x60b: 0x33c8, + 0x60c: 0x46fe, 0x60d: 0x478f, 0x60e: 0x314d, 0x60f: 0x345e, 0x610: 0x3937, 0x611: 0x3ac6, + 0x612: 0x3930, 0x613: 0x3abf, 0x614: 0x3945, 0x615: 0x3ad4, 0x616: 0x393e, 0x617: 0x3acd, + 0x618: 0x39a0, 0x619: 0x3b2f, 0x61a: 0x3984, 0x61b: 0x3b13, 0x61c: 0x397d, 0x61d: 0x3b0c, + 0x61e: 0x3992, 0x61f: 0x3b21, 0x620: 0x398b, 0x621: 0x3b1a, 0x622: 0x3999, 0x623: 0x3b28, + 0x624: 0x31fc, 0x625: 0x3512, 0x626: 0x31de, 0x627: 0x34f4, 0x628: 0x39fb, 0x629: 0x3b8a, + 0x62a: 0x39f4, 0x62b: 0x3b83, 0x62c: 0x3a09, 0x62d: 0x3b98, 0x62e: 0x3a02, 0x62f: 0x3b91, + 0x630: 0x3a10, 0x631: 0x3b9f, 0x632: 0x3247, 0x633: 0x3562, 0x634: 0x326f, 0x635: 0x358f, + 0x636: 0x326a, 0x637: 0x3585, 0x638: 0x3256, 0x639: 0x3571, + // Block 0x19, offset 0x640 + 0x640: 0x481b, 0x641: 0x4821, 0x642: 0x4935, 0x643: 0x494d, 0x644: 0x493d, 0x645: 0x4955, + 0x646: 0x4945, 0x647: 0x495d, 0x648: 0x47c1, 0x649: 0x47c7, 0x64a: 0x48a5, 0x64b: 0x48bd, + 0x64c: 0x48ad, 0x64d: 0x48c5, 0x64e: 0x48b5, 0x64f: 0x48cd, 0x650: 0x482d, 0x651: 0x4833, + 0x652: 0x3dcf, 0x653: 0x3ddf, 0x654: 0x3dd7, 0x655: 0x3de7, + 0x658: 0x47cd, 0x659: 0x47d3, 0x65a: 0x3cff, 0x65b: 0x3d0f, 0x65c: 0x3d07, 0x65d: 0x3d17, + 0x660: 0x4845, 0x661: 0x484b, 0x662: 0x4965, 0x663: 0x497d, + 0x664: 0x496d, 0x665: 0x4985, 0x666: 0x4975, 0x667: 0x498d, 0x668: 0x47d9, 0x669: 0x47df, + 0x66a: 0x48d5, 0x66b: 0x48ed, 0x66c: 0x48dd, 0x66d: 0x48f5, 0x66e: 0x48e5, 0x66f: 0x48fd, + 0x670: 0x485d, 0x671: 0x4863, 0x672: 0x3e2f, 0x673: 0x3e47, 0x674: 0x3e37, 0x675: 0x3e4f, + 0x676: 0x3e3f, 0x677: 0x3e57, 0x678: 0x47e5, 0x679: 0x47eb, 0x67a: 0x3d2f, 0x67b: 0x3d47, + 0x67c: 0x3d37, 0x67d: 0x3d4f, 0x67e: 0x3d3f, 0x67f: 0x3d57, + // Block 0x1a, offset 0x680 + 0x680: 0x4869, 0x681: 0x486f, 0x682: 0x3e5f, 0x683: 0x3e6f, 0x684: 0x3e67, 0x685: 0x3e77, + 0x688: 0x47f1, 0x689: 0x47f7, 0x68a: 0x3d5f, 0x68b: 0x3d6f, + 0x68c: 0x3d67, 0x68d: 0x3d77, 0x690: 0x487b, 0x691: 0x4881, + 0x692: 0x3e97, 0x693: 0x3eaf, 0x694: 0x3e9f, 0x695: 0x3eb7, 0x696: 0x3ea7, 0x697: 0x3ebf, + 0x699: 0x47fd, 0x69b: 0x3d7f, 0x69d: 0x3d87, + 0x69f: 0x3d8f, 0x6a0: 0x4893, 0x6a1: 0x4899, 0x6a2: 0x4995, 0x6a3: 0x49ad, + 0x6a4: 0x499d, 0x6a5: 0x49b5, 0x6a6: 0x49a5, 0x6a7: 0x49bd, 0x6a8: 0x4803, 0x6a9: 0x4809, + 0x6aa: 0x4905, 0x6ab: 0x491d, 0x6ac: 0x490d, 0x6ad: 0x4925, 0x6ae: 0x4915, 0x6af: 0x492d, + 0x6b0: 0x480f, 0x6b1: 0x4335, 0x6b2: 0x36a8, 0x6b3: 0x433b, 0x6b4: 0x4839, 0x6b5: 0x4341, + 0x6b6: 0x36ba, 0x6b7: 0x4347, 0x6b8: 0x36d8, 0x6b9: 0x434d, 0x6ba: 0x36f0, 0x6bb: 0x4353, + 0x6bc: 0x4887, 0x6bd: 0x4359, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3db7, 0x6c1: 0x3dbf, 0x6c2: 0x419b, 0x6c3: 0x41b9, 0x6c4: 0x41a5, 0x6c5: 0x41c3, + 0x6c6: 0x41af, 0x6c7: 0x41cd, 0x6c8: 0x3cef, 0x6c9: 0x3cf7, 0x6ca: 0x40e7, 0x6cb: 0x4105, + 0x6cc: 0x40f1, 0x6cd: 0x410f, 0x6ce: 0x40fb, 0x6cf: 0x4119, 0x6d0: 0x3dff, 0x6d1: 0x3e07, + 0x6d2: 0x41d7, 0x6d3: 0x41f5, 0x6d4: 0x41e1, 0x6d5: 0x41ff, 0x6d6: 0x41eb, 0x6d7: 0x4209, + 0x6d8: 0x3d1f, 0x6d9: 0x3d27, 0x6da: 0x4123, 0x6db: 0x4141, 0x6dc: 0x412d, 0x6dd: 0x414b, + 0x6de: 0x4137, 0x6df: 0x4155, 0x6e0: 0x3ed7, 0x6e1: 0x3edf, 0x6e2: 0x4213, 0x6e3: 0x4231, + 0x6e4: 0x421d, 0x6e5: 0x423b, 0x6e6: 0x4227, 0x6e7: 0x4245, 0x6e8: 0x3d97, 0x6e9: 0x3d9f, + 0x6ea: 0x415f, 0x6eb: 0x417d, 0x6ec: 0x4169, 0x6ed: 0x4187, 0x6ee: 0x4173, 0x6ef: 0x4191, + 0x6f0: 0x369c, 0x6f1: 0x3696, 0x6f2: 0x3da7, 0x6f3: 0x36a2, 0x6f4: 0x3daf, + 0x6f6: 0x4827, 0x6f7: 0x3dc7, 0x6f8: 0x360c, 0x6f9: 0x3606, 0x6fa: 0x35fa, 0x6fb: 0x4305, + 0x6fc: 0x3612, 0x6fd: 0x429e, 0x6fe: 0x01d6, 0x6ff: 0x429e, + // Block 0x1c, offset 0x700 + 0x700: 0x42b7, 0x701: 0x4499, 0x702: 0x3def, 0x703: 0x36b4, 0x704: 0x3df7, + 0x706: 0x4851, 0x707: 0x3e0f, 0x708: 0x3618, 0x709: 0x430b, 0x70a: 0x3624, 0x70b: 0x4311, + 0x70c: 0x3630, 0x70d: 0x44a0, 0x70e: 0x44a7, 0x70f: 0x44ae, 0x710: 0x36cc, 0x711: 0x36c6, + 0x712: 0x3e17, 0x713: 0x44fb, 0x716: 0x36d2, 0x717: 0x3e27, + 0x718: 0x3648, 0x719: 0x3642, 0x71a: 0x3636, 0x71b: 0x4317, 0x71d: 0x44b5, + 0x71e: 0x44bc, 0x71f: 0x44c3, 0x720: 0x3702, 0x721: 0x36fc, 0x722: 0x3e7f, 0x723: 0x4503, + 0x724: 0x36e4, 0x725: 0x36ea, 0x726: 0x3708, 0x727: 0x3e8f, 0x728: 0x3678, 0x729: 0x3672, + 0x72a: 0x3666, 0x72b: 0x4323, 0x72c: 0x3660, 0x72d: 0x448b, 0x72e: 0x4492, 0x72f: 0x0081, + 0x732: 0x3ec7, 0x733: 0x370e, 0x734: 0x3ecf, + 0x736: 0x489f, 0x737: 0x3ee7, 0x738: 0x3654, 0x739: 0x431d, 0x73a: 0x3684, 0x73b: 0x432f, + 0x73c: 0x3690, 0x73d: 0x4271, 0x73e: 0x42a3, + // Block 0x1d, offset 0x740 + 0x740: 0x1bde, 0x741: 0x1be2, 0x742: 0x0047, 0x743: 0x1c5a, 0x745: 0x1bee, + 0x746: 0x1bf2, 0x747: 0x00e9, 0x749: 0x1c5e, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00da, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x1993, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x19a5, 0x761: 0x1bce, 0x762: 0x19ae, + 0x764: 0x0075, 0x766: 0x01bb, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x42e9, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0218, + 0x776: 0x021b, 0x777: 0x021e, 0x778: 0x0221, 0x779: 0x0093, 0x77b: 0x1b9e, + 0x77c: 0x01eb, 0x77d: 0x01c4, 0x77e: 0x017c, 0x77f: 0x01a3, + // Block 0x1e, offset 0x780 + 0x780: 0x0466, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x2234, 0x791: 0x2240, + 0x792: 0x22f4, 0x793: 0x221c, 0x794: 0x22a0, 0x795: 0x2228, 0x796: 0x22a6, 0x797: 0x22be, + 0x798: 0x22ca, 0x799: 0x222e, 0x79a: 0x22d0, 0x79b: 0x223a, 0x79c: 0x22c4, 0x79d: 0x22d6, + 0x79e: 0x22dc, 0x79f: 0x1cc2, 0x7a0: 0x0053, 0x7a1: 0x195d, 0x7a2: 0x1baa, 0x7a3: 0x1966, + 0x7a4: 0x006d, 0x7a5: 0x19b1, 0x7a6: 0x1bd6, 0x7a7: 0x1d4e, 0x7a8: 0x1969, 0x7a9: 0x0071, + 0x7aa: 0x19bd, 0x7ab: 0x1bda, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x19ea, 0x7b2: 0x1c1e, 0x7b3: 0x19f3, 0x7b4: 0x00ad, 0x7b5: 0x1a68, + 0x7b6: 0x1c52, 0x7b7: 0x1d62, 0x7b8: 0x19f6, 0x7b9: 0x00b1, 0x7ba: 0x1a6b, 0x7bb: 0x1c56, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3c1d, 0x7c3: 0xa000, 0x7c4: 0x3c24, 0x7c5: 0xa000, + 0x7c7: 0x3c2b, 0x7c8: 0xa000, 0x7c9: 0x3c32, + 0x7cd: 0xa000, + 0x7e0: 0x2f7c, 0x7e1: 0xa000, 0x7e2: 0x3c40, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3c39, 0x7ee: 0x2f77, 0x7ef: 0x2f81, + 0x7f0: 0x3c47, 0x7f1: 0x3c4e, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3c55, 0x7f5: 0x3c5c, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3c63, 0x7f9: 0x3c6a, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3c71, 0x801: 0x3c78, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3c8d, 0x805: 0x3c94, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3c9b, 0x809: 0x3ca2, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3cb7, 0x82d: 0x3cbe, 0x82e: 0x3cc5, 0x82f: 0x3ccc, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x1885, + 0x86a: 0x1888, 0x86b: 0x188b, 0x86c: 0x188e, 0x86d: 0x1891, 0x86e: 0x1894, 0x86f: 0x1897, + 0x870: 0x189a, 0x871: 0x189d, 0x872: 0x18a0, 0x873: 0x18a9, 0x874: 0x1a6e, 0x875: 0x1a72, + 0x876: 0x1a76, 0x877: 0x1a7a, 0x878: 0x1a7e, 0x879: 0x1a82, 0x87a: 0x1a86, 0x87b: 0x1a8a, + 0x87c: 0x1a8e, 0x87d: 0x1c86, 0x87e: 0x1c8b, 0x87f: 0x1c90, + // Block 0x22, offset 0x880 + 0x880: 0x1c95, 0x881: 0x1c9a, 0x882: 0x1c9f, 0x883: 0x1ca4, 0x884: 0x1ca9, 0x885: 0x1cae, + 0x886: 0x1cb3, 0x887: 0x1cb8, 0x888: 0x1882, 0x889: 0x18a6, 0x88a: 0x18ca, 0x88b: 0x18ee, + 0x88c: 0x1912, 0x88d: 0x191b, 0x88e: 0x1921, 0x88f: 0x1927, 0x890: 0x192d, 0x891: 0x1b66, + 0x892: 0x1b6a, 0x893: 0x1b6e, 0x894: 0x1b72, 0x895: 0x1b76, 0x896: 0x1b7a, 0x897: 0x1b7e, + 0x898: 0x1b82, 0x899: 0x1b86, 0x89a: 0x1b8a, 0x89b: 0x1b8e, 0x89c: 0x1afa, 0x89d: 0x1afe, + 0x89e: 0x1b02, 0x89f: 0x1b06, 0x8a0: 0x1b0a, 0x8a1: 0x1b0e, 0x8a2: 0x1b12, 0x8a3: 0x1b16, + 0x8a4: 0x1b1a, 0x8a5: 0x1b1e, 0x8a6: 0x1b22, 0x8a7: 0x1b26, 0x8a8: 0x1b2a, 0x8a9: 0x1b2e, + 0x8aa: 0x1b32, 0x8ab: 0x1b36, 0x8ac: 0x1b3a, 0x8ad: 0x1b3e, 0x8ae: 0x1b42, 0x8af: 0x1b46, + 0x8b0: 0x1b4a, 0x8b1: 0x1b4e, 0x8b2: 0x1b52, 0x8b3: 0x1b56, 0x8b4: 0x1b5a, 0x8b5: 0x1b5e, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x06c2, 0x8c1: 0x06e6, 0x8c2: 0x06f2, 0x8c3: 0x0702, 0x8c4: 0x070a, 0x8c5: 0x0716, + 0x8c6: 0x071e, 0x8c7: 0x0726, 0x8c8: 0x0732, 0x8c9: 0x0786, 0x8ca: 0x079e, 0x8cb: 0x07ae, + 0x8cc: 0x07be, 0x8cd: 0x07ce, 0x8ce: 0x07de, 0x8cf: 0x07fe, 0x8d0: 0x0802, 0x8d1: 0x0806, + 0x8d2: 0x083a, 0x8d3: 0x0862, 0x8d4: 0x0872, 0x8d5: 0x087a, 0x8d6: 0x087e, 0x8d7: 0x088a, + 0x8d8: 0x08a6, 0x8d9: 0x08aa, 0x8da: 0x08c2, 0x8db: 0x08c6, 0x8dc: 0x08ce, 0x8dd: 0x08de, + 0x8de: 0x097a, 0x8df: 0x098e, 0x8e0: 0x09ce, 0x8e1: 0x09e2, 0x8e2: 0x09ea, 0x8e3: 0x09ee, + 0x8e4: 0x09fe, 0x8e5: 0x0a1a, 0x8e6: 0x0a46, 0x8e7: 0x0a52, 0x8e8: 0x0a72, 0x8e9: 0x0a7e, + 0x8ea: 0x0a82, 0x8eb: 0x0a86, 0x8ec: 0x0a9e, 0x8ed: 0x0aa2, 0x8ee: 0x0ace, 0x8ef: 0x0ada, + 0x8f0: 0x0ae2, 0x8f1: 0x0aea, 0x8f2: 0x0afa, 0x8f3: 0x0b02, 0x8f4: 0x0b0a, 0x8f5: 0x0b36, + 0x8f6: 0x0b3a, 0x8f7: 0x0b42, 0x8f8: 0x0b46, 0x8f9: 0x0b4e, 0x8fa: 0x0b56, 0x8fb: 0x0b66, + 0x8fc: 0x0b82, 0x8fd: 0x0bfa, 0x8fe: 0x0c0e, 0x8ff: 0x0c12, + // Block 0x24, offset 0x900 + 0x900: 0x0c92, 0x901: 0x0c96, 0x902: 0x0caa, 0x903: 0x0cae, 0x904: 0x0cb6, 0x905: 0x0cbe, + 0x906: 0x0cc6, 0x907: 0x0cd2, 0x908: 0x0cfa, 0x909: 0x0d0a, 0x90a: 0x0d1e, 0x90b: 0x0d8e, + 0x90c: 0x0d9a, 0x90d: 0x0daa, 0x90e: 0x0db6, 0x90f: 0x0dc2, 0x910: 0x0dca, 0x911: 0x0dce, + 0x912: 0x0dd2, 0x913: 0x0dd6, 0x914: 0x0dda, 0x915: 0x0e92, 0x916: 0x0eda, 0x917: 0x0ee6, + 0x918: 0x0eea, 0x919: 0x0eee, 0x91a: 0x0ef2, 0x91b: 0x0efa, 0x91c: 0x0efe, 0x91d: 0x0f12, + 0x91e: 0x0f2e, 0x91f: 0x0f36, 0x920: 0x0f76, 0x921: 0x0f7a, 0x922: 0x0f82, 0x923: 0x0f86, + 0x924: 0x0f8e, 0x925: 0x0f92, 0x926: 0x0fb6, 0x927: 0x0fba, 0x928: 0x0fd6, 0x929: 0x0fda, + 0x92a: 0x0fde, 0x92b: 0x0fe2, 0x92c: 0x0ff6, 0x92d: 0x101a, 0x92e: 0x101e, 0x92f: 0x1022, + 0x930: 0x1046, 0x931: 0x1086, 0x932: 0x108a, 0x933: 0x10aa, 0x934: 0x10ba, 0x935: 0x10c2, + 0x936: 0x10e2, 0x937: 0x1106, 0x938: 0x114a, 0x939: 0x1152, 0x93a: 0x1166, 0x93b: 0x1172, + 0x93c: 0x117a, 0x93d: 0x1182, 0x93e: 0x1186, 0x93f: 0x118a, + // Block 0x25, offset 0x940 + 0x940: 0x11a2, 0x941: 0x11a6, 0x942: 0x11c2, 0x943: 0x11ca, 0x944: 0x11d2, 0x945: 0x11d6, + 0x946: 0x11e2, 0x947: 0x11ea, 0x948: 0x11ee, 0x949: 0x11f2, 0x94a: 0x11fa, 0x94b: 0x11fe, + 0x94c: 0x129e, 0x94d: 0x12b2, 0x94e: 0x12e6, 0x94f: 0x12ea, 0x950: 0x12f2, 0x951: 0x131e, + 0x952: 0x1326, 0x953: 0x132e, 0x954: 0x1336, 0x955: 0x1372, 0x956: 0x1376, 0x957: 0x137e, + 0x958: 0x1382, 0x959: 0x1386, 0x95a: 0x13b2, 0x95b: 0x13b6, 0x95c: 0x13be, 0x95d: 0x13d2, + 0x95e: 0x13d6, 0x95f: 0x13f2, 0x960: 0x13fa, 0x961: 0x13fe, 0x962: 0x1422, 0x963: 0x1442, + 0x964: 0x1456, 0x965: 0x145a, 0x966: 0x1462, 0x967: 0x148e, 0x968: 0x1492, 0x969: 0x14a2, + 0x96a: 0x14c6, 0x96b: 0x14d2, 0x96c: 0x14e2, 0x96d: 0x14fa, 0x96e: 0x1502, 0x96f: 0x1506, + 0x970: 0x150a, 0x971: 0x150e, 0x972: 0x151a, 0x973: 0x151e, 0x974: 0x1526, 0x975: 0x1542, + 0x976: 0x1546, 0x977: 0x154a, 0x978: 0x1562, 0x979: 0x1566, 0x97a: 0x156e, 0x97b: 0x1582, + 0x97c: 0x1586, 0x97d: 0x158a, 0x97e: 0x1592, 0x97f: 0x1596, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x3f1f, 0x98d: 0xa000, 0x98e: 0x3f27, 0x98f: 0xa000, 0x990: 0x3f2f, 0x991: 0xa000, + 0x992: 0x3f37, 0x993: 0xa000, 0x994: 0x3f3f, 0x995: 0xa000, 0x996: 0x3f47, 0x997: 0xa000, + 0x998: 0x3f4f, 0x999: 0xa000, 0x99a: 0x3f57, 0x99b: 0xa000, 0x99c: 0x3f5f, 0x99d: 0xa000, + 0x99e: 0x3f67, 0x99f: 0xa000, 0x9a0: 0x3f6f, 0x9a1: 0xa000, 0x9a2: 0x3f77, + 0x9a4: 0xa000, 0x9a5: 0x3f7f, 0x9a6: 0xa000, 0x9a7: 0x3f87, 0x9a8: 0xa000, 0x9a9: 0x3f8f, + 0x9af: 0xa000, + 0x9b0: 0x3f97, 0x9b1: 0x3f9f, 0x9b2: 0xa000, 0x9b3: 0x3fa7, 0x9b4: 0x3faf, 0x9b5: 0xa000, + 0x9b6: 0x3fb7, 0x9b7: 0x3fbf, 0x9b8: 0xa000, 0x9b9: 0x3fc7, 0x9ba: 0x3fcf, 0x9bb: 0xa000, + 0x9bc: 0x3fd7, 0x9bd: 0x3fdf, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x3f17, + 0x9d9: 0x9904, 0x9da: 0x9904, 0x9db: 0x42f3, 0x9dc: 0x42f9, 0x9dd: 0xa000, + 0x9de: 0x3fe7, 0x9df: 0x26ba, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x3ff7, 0x9ed: 0xa000, 0x9ee: 0x3fff, 0x9ef: 0xa000, + 0x9f0: 0x4007, 0x9f1: 0xa000, 0x9f2: 0x400f, 0x9f3: 0xa000, 0x9f4: 0x4017, 0x9f5: 0xa000, + 0x9f6: 0x401f, 0x9f7: 0xa000, 0x9f8: 0x4027, 0x9f9: 0xa000, 0x9fa: 0x402f, 0x9fb: 0xa000, + 0x9fc: 0x4037, 0x9fd: 0xa000, 0x9fe: 0x403f, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4047, 0xa01: 0xa000, 0xa02: 0x404f, 0xa04: 0xa000, 0xa05: 0x4057, + 0xa06: 0xa000, 0xa07: 0x405f, 0xa08: 0xa000, 0xa09: 0x4067, + 0xa0f: 0xa000, 0xa10: 0x406f, 0xa11: 0x4077, + 0xa12: 0xa000, 0xa13: 0x407f, 0xa14: 0x4087, 0xa15: 0xa000, 0xa16: 0x408f, 0xa17: 0x4097, + 0xa18: 0xa000, 0xa19: 0x409f, 0xa1a: 0x40a7, 0xa1b: 0xa000, 0xa1c: 0x40af, 0xa1d: 0x40b7, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x3fef, + 0xa37: 0x40bf, 0xa38: 0x40c7, 0xa39: 0x40cf, 0xa3a: 0x40d7, + 0xa3d: 0xa000, 0xa3e: 0x40df, 0xa3f: 0x26cf, + // Block 0x29, offset 0xa40 + 0xa40: 0x036a, 0xa41: 0x032e, 0xa42: 0x0332, 0xa43: 0x0336, 0xa44: 0x037e, 0xa45: 0x033a, + 0xa46: 0x033e, 0xa47: 0x0342, 0xa48: 0x0346, 0xa49: 0x034a, 0xa4a: 0x034e, 0xa4b: 0x0352, + 0xa4c: 0x0356, 0xa4d: 0x035a, 0xa4e: 0x035e, 0xa4f: 0x49d4, 0xa50: 0x49da, 0xa51: 0x49e0, + 0xa52: 0x49e6, 0xa53: 0x49ec, 0xa54: 0x49f2, 0xa55: 0x49f8, 0xa56: 0x49fe, 0xa57: 0x4a04, + 0xa58: 0x4a0a, 0xa59: 0x4a10, 0xa5a: 0x4a16, 0xa5b: 0x4a1c, 0xa5c: 0x4a22, 0xa5d: 0x4a28, + 0xa5e: 0x4a2e, 0xa5f: 0x4a34, 0xa60: 0x4a3a, 0xa61: 0x4a40, 0xa62: 0x4a46, 0xa63: 0x4a4c, + 0xa64: 0x03c6, 0xa65: 0x0362, 0xa66: 0x0366, 0xa67: 0x03ea, 0xa68: 0x03ee, 0xa69: 0x03f2, + 0xa6a: 0x03f6, 0xa6b: 0x03fa, 0xa6c: 0x03fe, 0xa6d: 0x0402, 0xa6e: 0x036e, 0xa6f: 0x0406, + 0xa70: 0x040a, 0xa71: 0x0372, 0xa72: 0x0376, 0xa73: 0x037a, 0xa74: 0x0382, 0xa75: 0x0386, + 0xa76: 0x038a, 0xa77: 0x038e, 0xa78: 0x0392, 0xa79: 0x0396, 0xa7a: 0x039a, 0xa7b: 0x039e, + 0xa7c: 0x03a2, 0xa7d: 0x03a6, 0xa7e: 0x03aa, 0xa7f: 0x03ae, + // Block 0x2a, offset 0xa80 + 0xa80: 0x03b2, 0xa81: 0x03b6, 0xa82: 0x040e, 0xa83: 0x0412, 0xa84: 0x03ba, 0xa85: 0x03be, + 0xa86: 0x03c2, 0xa87: 0x03ca, 0xa88: 0x03ce, 0xa89: 0x03d2, 0xa8a: 0x03d6, 0xa8b: 0x03da, + 0xa8c: 0x03de, 0xa8d: 0x03e2, 0xa8e: 0x03e6, + 0xa92: 0x06c2, 0xa93: 0x071e, 0xa94: 0x06ce, 0xa95: 0x097e, 0xa96: 0x06d2, 0xa97: 0x06ea, + 0xa98: 0x06d6, 0xa99: 0x0f96, 0xa9a: 0x070a, 0xa9b: 0x06de, 0xa9c: 0x06c6, 0xa9d: 0x0a02, + 0xa9e: 0x0992, 0xa9f: 0x0732, + // Block 0x2b, offset 0xac0 + 0xac0: 0x205a, 0xac1: 0x2060, 0xac2: 0x2066, 0xac3: 0x206c, 0xac4: 0x2072, 0xac5: 0x2078, + 0xac6: 0x207e, 0xac7: 0x2084, 0xac8: 0x208a, 0xac9: 0x2090, 0xaca: 0x2096, 0xacb: 0x209c, + 0xacc: 0x20a2, 0xacd: 0x20a8, 0xace: 0x2733, 0xacf: 0x273c, 0xad0: 0x2745, 0xad1: 0x274e, + 0xad2: 0x2757, 0xad3: 0x2760, 0xad4: 0x2769, 0xad5: 0x2772, 0xad6: 0x277b, 0xad7: 0x278d, + 0xad8: 0x2796, 0xad9: 0x279f, 0xada: 0x27a8, 0xadb: 0x27b1, 0xadc: 0x2784, 0xadd: 0x2bb9, + 0xade: 0x2afa, 0xae0: 0x20ae, 0xae1: 0x20c6, 0xae2: 0x20ba, 0xae3: 0x210e, + 0xae4: 0x20cc, 0xae5: 0x20ea, 0xae6: 0x20b4, 0xae7: 0x20e4, 0xae8: 0x20c0, 0xae9: 0x20f6, + 0xaea: 0x2126, 0xaeb: 0x2144, 0xaec: 0x213e, 0xaed: 0x2132, 0xaee: 0x2180, 0xaef: 0x2114, + 0xaf0: 0x2120, 0xaf1: 0x2138, 0xaf2: 0x212c, 0xaf3: 0x2156, 0xaf4: 0x2102, 0xaf5: 0x214a, + 0xaf6: 0x2174, 0xaf7: 0x215c, 0xaf8: 0x20f0, 0xaf9: 0x20d2, 0xafa: 0x2108, 0xafb: 0x211a, + 0xafc: 0x2150, 0xafd: 0x20d8, 0xafe: 0x217a, 0xaff: 0x20fc, + // Block 0x2c, offset 0xb00 + 0xb00: 0x2162, 0xb01: 0x20de, 0xb02: 0x2168, 0xb03: 0x216e, 0xb04: 0x0932, 0xb05: 0x0b06, + 0xb06: 0x0caa, 0xb07: 0x10ca, + 0xb10: 0x1bca, 0xb11: 0x18ac, + 0xb12: 0x18af, 0xb13: 0x18b2, 0xb14: 0x18b5, 0xb15: 0x18b8, 0xb16: 0x18bb, 0xb17: 0x18be, + 0xb18: 0x18c1, 0xb19: 0x18c4, 0xb1a: 0x18cd, 0xb1b: 0x18d0, 0xb1c: 0x18d3, 0xb1d: 0x18d6, + 0xb1e: 0x18d9, 0xb1f: 0x18dc, 0xb20: 0x0316, 0xb21: 0x031e, 0xb22: 0x0322, 0xb23: 0x032a, + 0xb24: 0x032e, 0xb25: 0x0332, 0xb26: 0x033a, 0xb27: 0x0342, 0xb28: 0x0346, 0xb29: 0x034e, + 0xb2a: 0x0352, 0xb2b: 0x0356, 0xb2c: 0x035a, 0xb2d: 0x035e, 0xb2e: 0x2e2f, 0xb2f: 0x2e37, + 0xb30: 0x2e3f, 0xb31: 0x2e47, 0xb32: 0x2e4f, 0xb33: 0x2e57, 0xb34: 0x2e5f, 0xb35: 0x2e67, + 0xb36: 0x2e77, 0xb37: 0x2e7f, 0xb38: 0x2e87, 0xb39: 0x2e8f, 0xb3a: 0x2e97, 0xb3b: 0x2e9f, + 0xb3c: 0x2eea, 0xb3d: 0x2eb2, 0xb3e: 0x2e6f, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06c2, 0xb41: 0x071e, 0xb42: 0x06ce, 0xb43: 0x097e, 0xb44: 0x0722, 0xb45: 0x07b2, + 0xb46: 0x06ca, 0xb47: 0x07ae, 0xb48: 0x070e, 0xb49: 0x088a, 0xb4a: 0x0d0a, 0xb4b: 0x0e92, + 0xb4c: 0x0dda, 0xb4d: 0x0d1e, 0xb4e: 0x1462, 0xb4f: 0x098e, 0xb50: 0x0cd2, 0xb51: 0x0d4e, + 0xb52: 0x0d0e, 0xb53: 0x104e, 0xb54: 0x08fe, 0xb55: 0x0f06, 0xb56: 0x138a, 0xb57: 0x1062, + 0xb58: 0x0846, 0xb59: 0x1092, 0xb5a: 0x0f9e, 0xb5b: 0x0a1a, 0xb5c: 0x1412, 0xb5d: 0x0782, + 0xb5e: 0x08ae, 0xb5f: 0x0dfa, 0xb60: 0x152a, 0xb61: 0x0746, 0xb62: 0x07d6, 0xb63: 0x0d9e, + 0xb64: 0x06d2, 0xb65: 0x06ea, 0xb66: 0x06d6, 0xb67: 0x0ade, 0xb68: 0x08f2, 0xb69: 0x0882, + 0xb6a: 0x0a5a, 0xb6b: 0x0a4e, 0xb6c: 0x0fee, 0xb6d: 0x0742, 0xb6e: 0x139e, 0xb6f: 0x089e, + 0xb70: 0x09f6, 0xb71: 0x18df, 0xb72: 0x18e2, 0xb73: 0x18e5, 0xb74: 0x18e8, 0xb75: 0x18f1, + 0xb76: 0x18f4, 0xb77: 0x18f7, 0xb78: 0x18fa, 0xb79: 0x18fd, 0xb7a: 0x1900, 0xb7b: 0x1903, + 0xb7c: 0x1906, 0xb7d: 0x1909, 0xb7e: 0x190c, 0xb7f: 0x1915, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1ccc, 0xb81: 0x1cdb, 0xb82: 0x1cea, 0xb83: 0x1cf9, 0xb84: 0x1d08, 0xb85: 0x1d17, + 0xb86: 0x1d26, 0xb87: 0x1d35, 0xb88: 0x1d44, 0xb89: 0x2192, 0xb8a: 0x21a4, 0xb8b: 0x21b6, + 0xb8c: 0x1957, 0xb8d: 0x1c0a, 0xb8e: 0x19d8, 0xb8f: 0x1bae, 0xb90: 0x04ce, 0xb91: 0x04d6, + 0xb92: 0x04de, 0xb93: 0x04e6, 0xb94: 0x04ee, 0xb95: 0x04f2, 0xb96: 0x04f6, 0xb97: 0x04fa, + 0xb98: 0x04fe, 0xb99: 0x0502, 0xb9a: 0x0506, 0xb9b: 0x050a, 0xb9c: 0x050e, 0xb9d: 0x0512, + 0xb9e: 0x0516, 0xb9f: 0x051a, 0xba0: 0x051e, 0xba1: 0x0526, 0xba2: 0x052a, 0xba3: 0x052e, + 0xba4: 0x0532, 0xba5: 0x0536, 0xba6: 0x053a, 0xba7: 0x053e, 0xba8: 0x0542, 0xba9: 0x0546, + 0xbaa: 0x054a, 0xbab: 0x054e, 0xbac: 0x0552, 0xbad: 0x0556, 0xbae: 0x055a, 0xbaf: 0x055e, + 0xbb0: 0x0562, 0xbb1: 0x0566, 0xbb2: 0x056a, 0xbb3: 0x0572, 0xbb4: 0x057a, 0xbb5: 0x0582, + 0xbb6: 0x0586, 0xbb7: 0x058a, 0xbb8: 0x058e, 0xbb9: 0x0592, 0xbba: 0x0596, 0xbbb: 0x059a, + 0xbbc: 0x059e, 0xbbd: 0x05a2, 0xbbe: 0x05a6, 0xbbf: 0x2700, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2b19, 0xbc1: 0x29b5, 0xbc2: 0x2b29, 0xbc3: 0x288d, 0xbc4: 0x2efb, 0xbc5: 0x2897, + 0xbc6: 0x28a1, 0xbc7: 0x2f3f, 0xbc8: 0x29c2, 0xbc9: 0x28ab, 0xbca: 0x28b5, 0xbcb: 0x28bf, + 0xbcc: 0x29e9, 0xbcd: 0x29f6, 0xbce: 0x29cf, 0xbcf: 0x29dc, 0xbd0: 0x2ec0, 0xbd1: 0x2a03, + 0xbd2: 0x2a10, 0xbd3: 0x2bcb, 0xbd4: 0x26c1, 0xbd5: 0x2bde, 0xbd6: 0x2bf1, 0xbd7: 0x2b39, + 0xbd8: 0x2a1d, 0xbd9: 0x2c04, 0xbda: 0x2c17, 0xbdb: 0x2a2a, 0xbdc: 0x28c9, 0xbdd: 0x28d3, + 0xbde: 0x2ece, 0xbdf: 0x2a37, 0xbe0: 0x2b49, 0xbe1: 0x2f0c, 0xbe2: 0x28dd, 0xbe3: 0x28e7, + 0xbe4: 0x2a44, 0xbe5: 0x28f1, 0xbe6: 0x28fb, 0xbe7: 0x26d6, 0xbe8: 0x26dd, 0xbe9: 0x2905, + 0xbea: 0x290f, 0xbeb: 0x2c2a, 0xbec: 0x2a51, 0xbed: 0x2b59, 0xbee: 0x2c3d, 0xbef: 0x2a5e, + 0xbf0: 0x2923, 0xbf1: 0x2919, 0xbf2: 0x2f53, 0xbf3: 0x2a6b, 0xbf4: 0x2c50, 0xbf5: 0x292d, + 0xbf6: 0x2b69, 0xbf7: 0x2937, 0xbf8: 0x2a85, 0xbf9: 0x2941, 0xbfa: 0x2a92, 0xbfb: 0x2f1d, + 0xbfc: 0x2a78, 0xbfd: 0x2b79, 0xbfe: 0x2a9f, 0xbff: 0x26e4, + // Block 0x30, offset 0xc00 + 0xc00: 0x2f2e, 0xc01: 0x294b, 0xc02: 0x2955, 0xc03: 0x2aac, 0xc04: 0x295f, 0xc05: 0x2969, + 0xc06: 0x2973, 0xc07: 0x2b89, 0xc08: 0x2ab9, 0xc09: 0x26eb, 0xc0a: 0x2c63, 0xc0b: 0x2ea7, + 0xc0c: 0x2b99, 0xc0d: 0x2ac6, 0xc0e: 0x2edc, 0xc0f: 0x297d, 0xc10: 0x2987, 0xc11: 0x2ad3, + 0xc12: 0x26f2, 0xc13: 0x2ae0, 0xc14: 0x2ba9, 0xc15: 0x26f9, 0xc16: 0x2c76, 0xc17: 0x2991, + 0xc18: 0x1cbd, 0xc19: 0x1cd1, 0xc1a: 0x1ce0, 0xc1b: 0x1cef, 0xc1c: 0x1cfe, 0xc1d: 0x1d0d, + 0xc1e: 0x1d1c, 0xc1f: 0x1d2b, 0xc20: 0x1d3a, 0xc21: 0x1d49, 0xc22: 0x2198, 0xc23: 0x21aa, + 0xc24: 0x21bc, 0xc25: 0x21c8, 0xc26: 0x21d4, 0xc27: 0x21e0, 0xc28: 0x21ec, 0xc29: 0x21f8, + 0xc2a: 0x2204, 0xc2b: 0x2210, 0xc2c: 0x224c, 0xc2d: 0x2258, 0xc2e: 0x2264, 0xc2f: 0x2270, + 0xc30: 0x227c, 0xc31: 0x1c1a, 0xc32: 0x19cc, 0xc33: 0x1939, 0xc34: 0x1bea, 0xc35: 0x1a4d, + 0xc36: 0x1a5c, 0xc37: 0x19d2, 0xc38: 0x1c02, 0xc39: 0x1c06, 0xc3a: 0x1963, 0xc3b: 0x270e, + 0xc3c: 0x271c, 0xc3d: 0x2707, 0xc3e: 0x2715, 0xc3f: 0x2aed, + // Block 0x31, offset 0xc40 + 0xc40: 0x1a50, 0xc41: 0x1a38, 0xc42: 0x1c66, 0xc43: 0x1a20, 0xc44: 0x19f9, 0xc45: 0x196c, + 0xc46: 0x197b, 0xc47: 0x194b, 0xc48: 0x1bf6, 0xc49: 0x1d58, 0xc4a: 0x1a53, 0xc4b: 0x1a3b, + 0xc4c: 0x1c6a, 0xc4d: 0x1c76, 0xc4e: 0x1a2c, 0xc4f: 0x1a02, 0xc50: 0x195a, 0xc51: 0x1c22, + 0xc52: 0x1bb6, 0xc53: 0x1ba2, 0xc54: 0x1bd2, 0xc55: 0x1c7a, 0xc56: 0x1a2f, 0xc57: 0x19cf, + 0xc58: 0x1a05, 0xc59: 0x19e4, 0xc5a: 0x1a47, 0xc5b: 0x1c7e, 0xc5c: 0x1a32, 0xc5d: 0x19c6, + 0xc5e: 0x1a08, 0xc5f: 0x1c42, 0xc60: 0x1bfa, 0xc61: 0x1a1a, 0xc62: 0x1c2a, 0xc63: 0x1c46, + 0xc64: 0x1bfe, 0xc65: 0x1a1d, 0xc66: 0x1c2e, 0xc67: 0x22ee, 0xc68: 0x2302, 0xc69: 0x199c, + 0xc6a: 0x1c26, 0xc6b: 0x1bba, 0xc6c: 0x1ba6, 0xc6d: 0x1c4e, 0xc6e: 0x2723, 0xc6f: 0x27ba, + 0xc70: 0x1a5f, 0xc71: 0x1a4a, 0xc72: 0x1c82, 0xc73: 0x1a35, 0xc74: 0x1a56, 0xc75: 0x1a3e, + 0xc76: 0x1c6e, 0xc77: 0x1a23, 0xc78: 0x19fc, 0xc79: 0x1987, 0xc7a: 0x1a59, 0xc7b: 0x1a41, + 0xc7c: 0x1c72, 0xc7d: 0x1a26, 0xc7e: 0x19ff, 0xc7f: 0x198a, + // Block 0x32, offset 0xc80 + 0xc80: 0x1c32, 0xc81: 0x1bbe, 0xc82: 0x1d53, 0xc83: 0x193c, 0xc84: 0x19c0, 0xc85: 0x19c3, + 0xc86: 0x22fb, 0xc87: 0x1b9a, 0xc88: 0x19c9, 0xc89: 0x194e, 0xc8a: 0x19e7, 0xc8b: 0x1951, + 0xc8c: 0x19f0, 0xc8d: 0x196f, 0xc8e: 0x1972, 0xc8f: 0x1a0b, 0xc90: 0x1a11, 0xc91: 0x1a14, + 0xc92: 0x1c36, 0xc93: 0x1a17, 0xc94: 0x1a29, 0xc95: 0x1c3e, 0xc96: 0x1c4a, 0xc97: 0x1996, + 0xc98: 0x1d5d, 0xc99: 0x1bc2, 0xc9a: 0x1999, 0xc9b: 0x1a62, 0xc9c: 0x19ab, 0xc9d: 0x19ba, + 0xc9e: 0x22e8, 0xc9f: 0x22e2, 0xca0: 0x1cc7, 0xca1: 0x1cd6, 0xca2: 0x1ce5, 0xca3: 0x1cf4, + 0xca4: 0x1d03, 0xca5: 0x1d12, 0xca6: 0x1d21, 0xca7: 0x1d30, 0xca8: 0x1d3f, 0xca9: 0x218c, + 0xcaa: 0x219e, 0xcab: 0x21b0, 0xcac: 0x21c2, 0xcad: 0x21ce, 0xcae: 0x21da, 0xcaf: 0x21e6, + 0xcb0: 0x21f2, 0xcb1: 0x21fe, 0xcb2: 0x220a, 0xcb3: 0x2246, 0xcb4: 0x2252, 0xcb5: 0x225e, + 0xcb6: 0x226a, 0xcb7: 0x2276, 0xcb8: 0x2282, 0xcb9: 0x2288, 0xcba: 0x228e, 0xcbb: 0x2294, + 0xcbc: 0x229a, 0xcbd: 0x22ac, 0xcbe: 0x22b2, 0xcbf: 0x1c16, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x137a, 0xcc1: 0x0cfe, 0xcc2: 0x13d6, 0xcc3: 0x13a2, 0xcc4: 0x0e5a, 0xcc5: 0x06ee, + 0xcc6: 0x08e2, 0xcc7: 0x162e, 0xcc8: 0x162e, 0xcc9: 0x0a0e, 0xcca: 0x1462, 0xccb: 0x0946, + 0xccc: 0x0a0a, 0xccd: 0x0bf2, 0xcce: 0x0fd2, 0xccf: 0x1162, 0xcd0: 0x129a, 0xcd1: 0x12d6, + 0xcd2: 0x130a, 0xcd3: 0x141e, 0xcd4: 0x0d76, 0xcd5: 0x0e02, 0xcd6: 0x0eae, 0xcd7: 0x0f46, + 0xcd8: 0x1262, 0xcd9: 0x144a, 0xcda: 0x1576, 0xcdb: 0x0712, 0xcdc: 0x08b6, 0xcdd: 0x0d8a, + 0xcde: 0x0ed2, 0xcdf: 0x1296, 0xce0: 0x15c6, 0xce1: 0x0ab6, 0xce2: 0x0e7a, 0xce3: 0x1286, + 0xce4: 0x131a, 0xce5: 0x0c26, 0xce6: 0x11be, 0xce7: 0x12e2, 0xce8: 0x0b22, 0xce9: 0x0d12, + 0xcea: 0x0e1a, 0xceb: 0x0f1e, 0xcec: 0x142a, 0xced: 0x0752, 0xcee: 0x07ea, 0xcef: 0x0856, + 0xcf0: 0x0c8e, 0xcf1: 0x0d82, 0xcf2: 0x0ece, 0xcf3: 0x0ff2, 0xcf4: 0x117a, 0xcf5: 0x128e, + 0xcf6: 0x12a6, 0xcf7: 0x13ca, 0xcf8: 0x14f2, 0xcf9: 0x15a6, 0xcfa: 0x15c2, 0xcfb: 0x102e, + 0xcfc: 0x106e, 0xcfd: 0x1126, 0xcfe: 0x1246, 0xcff: 0x147e, + // Block 0x34, offset 0xd00 + 0xd00: 0x15ce, 0xd01: 0x134e, 0xd02: 0x09ca, 0xd03: 0x0b3e, 0xd04: 0x10de, 0xd05: 0x119e, + 0xd06: 0x0f02, 0xd07: 0x1036, 0xd08: 0x139a, 0xd09: 0x14ea, 0xd0a: 0x09c6, 0xd0b: 0x0a92, + 0xd0c: 0x0d7a, 0xd0d: 0x0e2e, 0xd0e: 0x0e62, 0xd0f: 0x1116, 0xd10: 0x113e, 0xd11: 0x14aa, + 0xd12: 0x0852, 0xd13: 0x11aa, 0xd14: 0x07f6, 0xd15: 0x07f2, 0xd16: 0x109a, 0xd17: 0x112a, + 0xd18: 0x125e, 0xd19: 0x14b2, 0xd1a: 0x136a, 0xd1b: 0x0c2a, 0xd1c: 0x0d76, 0xd1d: 0x135a, + 0xd1e: 0x06fa, 0xd1f: 0x0a66, 0xd20: 0x0b96, 0xd21: 0x0f32, 0xd22: 0x0fb2, 0xd23: 0x0876, + 0xd24: 0x103e, 0xd25: 0x0762, 0xd26: 0x0b7a, 0xd27: 0x06da, 0xd28: 0x0dee, 0xd29: 0x0ca6, + 0xd2a: 0x1112, 0xd2b: 0x08ca, 0xd2c: 0x09b6, 0xd2d: 0x0ffe, 0xd2e: 0x1266, 0xd2f: 0x133e, + 0xd30: 0x0dba, 0xd31: 0x13fa, 0xd32: 0x0de6, 0xd33: 0x0c3a, 0xd34: 0x121e, 0xd35: 0x0c5a, + 0xd36: 0x0fae, 0xd37: 0x072e, 0xd38: 0x07aa, 0xd39: 0x07ee, 0xd3a: 0x0d56, 0xd3b: 0x10fe, + 0xd3c: 0x11f6, 0xd3d: 0x134a, 0xd3e: 0x145e, 0xd3f: 0x085e, + // Block 0x35, offset 0xd40 + 0xd40: 0x0912, 0xd41: 0x0a1a, 0xd42: 0x0b32, 0xd43: 0x0cc2, 0xd44: 0x0e7e, 0xd45: 0x1042, + 0xd46: 0x149a, 0xd47: 0x157e, 0xd48: 0x15d2, 0xd49: 0x15ea, 0xd4a: 0x083a, 0xd4b: 0x0cf6, + 0xd4c: 0x0da6, 0xd4d: 0x13ee, 0xd4e: 0x0afe, 0xd4f: 0x0bda, 0xd50: 0x0bf6, 0xd51: 0x0c86, + 0xd52: 0x0e6e, 0xd53: 0x0eba, 0xd54: 0x0f6a, 0xd55: 0x108e, 0xd56: 0x1132, 0xd57: 0x1196, + 0xd58: 0x13de, 0xd59: 0x126e, 0xd5a: 0x1406, 0xd5b: 0x1482, 0xd5c: 0x0812, 0xd5d: 0x083e, + 0xd5e: 0x0926, 0xd5f: 0x0eaa, 0xd60: 0x12f6, 0xd61: 0x133e, 0xd62: 0x0b1e, 0xd63: 0x0b8e, + 0xd64: 0x0c52, 0xd65: 0x0db2, 0xd66: 0x10da, 0xd67: 0x0f26, 0xd68: 0x073e, 0xd69: 0x0982, + 0xd6a: 0x0a66, 0xd6b: 0x0aca, 0xd6c: 0x0b9a, 0xd6d: 0x0f42, 0xd6e: 0x0f5e, 0xd6f: 0x116e, + 0xd70: 0x118e, 0xd71: 0x1466, 0xd72: 0x14e6, 0xd73: 0x14f6, 0xd74: 0x1532, 0xd75: 0x0756, + 0xd76: 0x1082, 0xd77: 0x1452, 0xd78: 0x14ce, 0xd79: 0x0bb2, 0xd7a: 0x071a, 0xd7b: 0x077a, + 0xd7c: 0x0a6a, 0xd7d: 0x0a8a, 0xd7e: 0x0cb2, 0xd7f: 0x0d76, + // Block 0x36, offset 0xd80 + 0xd80: 0x0ec6, 0xd81: 0x0fce, 0xd82: 0x127a, 0xd83: 0x141a, 0xd84: 0x1626, 0xd85: 0x0ce6, + 0xd86: 0x14a6, 0xd87: 0x0836, 0xd88: 0x0d32, 0xd89: 0x0d3e, 0xd8a: 0x0e12, 0xd8b: 0x0e4a, + 0xd8c: 0x0f4e, 0xd8d: 0x0faa, 0xd8e: 0x102a, 0xd8f: 0x110e, 0xd90: 0x153e, 0xd91: 0x07b2, + 0xd92: 0x0c06, 0xd93: 0x14b6, 0xd94: 0x076a, 0xd95: 0x0aae, 0xd96: 0x0e32, 0xd97: 0x13e2, + 0xd98: 0x0b6a, 0xd99: 0x0bba, 0xd9a: 0x0d46, 0xd9b: 0x0f32, 0xd9c: 0x14be, 0xd9d: 0x081a, + 0xd9e: 0x0902, 0xd9f: 0x0a9a, 0xda0: 0x0cd6, 0xda1: 0x0d22, 0xda2: 0x0d62, 0xda3: 0x0df6, + 0xda4: 0x0f4a, 0xda5: 0x0fbe, 0xda6: 0x115a, 0xda7: 0x12fa, 0xda8: 0x1306, 0xda9: 0x145a, + 0xdaa: 0x14da, 0xdab: 0x0886, 0xdac: 0x0e4e, 0xdad: 0x0906, 0xdae: 0x0eca, 0xdaf: 0x0f6e, + 0xdb0: 0x128a, 0xdb1: 0x14c2, 0xdb2: 0x15ae, 0xdb3: 0x15d6, 0xdb4: 0x0d3a, 0xdb5: 0x0e2a, + 0xdb6: 0x11c6, 0xdb7: 0x10ba, 0xdb8: 0x10c6, 0xdb9: 0x10ea, 0xdba: 0x0f1a, 0xdbb: 0x0ea2, + 0xdbc: 0x1366, 0xdbd: 0x0736, 0xdbe: 0x122e, 0xdbf: 0x081e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x080e, 0xdc1: 0x0b0e, 0xdc2: 0x0c2e, 0xdc3: 0x10f6, 0xdc4: 0x0a56, 0xdc5: 0x0e06, + 0xdc6: 0x0cf2, 0xdc7: 0x13ea, 0xdc8: 0x12ea, 0xdc9: 0x14ae, 0xdca: 0x1326, 0xdcb: 0x0b2a, + 0xdcc: 0x078a, 0xdcd: 0x095e, 0xdd0: 0x09b2, + 0xdd2: 0x0ce2, 0xdd5: 0x07fa, 0xdd6: 0x0f22, 0xdd7: 0x0fe6, + 0xdd8: 0x104a, 0xdd9: 0x1066, 0xdda: 0x106a, 0xddb: 0x107e, 0xddc: 0x14fe, 0xddd: 0x10ee, + 0xdde: 0x1172, 0xde0: 0x1292, 0xde2: 0x1356, + 0xde5: 0x140a, 0xde6: 0x1436, + 0xdea: 0x1552, 0xdeb: 0x1556, 0xdec: 0x155a, 0xded: 0x15be, 0xdee: 0x142e, 0xdef: 0x14ca, + 0xdf0: 0x075a, 0xdf1: 0x077e, 0xdf2: 0x0792, 0xdf3: 0x084e, 0xdf4: 0x085a, 0xdf5: 0x089a, + 0xdf6: 0x094e, 0xdf7: 0x096a, 0xdf8: 0x0972, 0xdf9: 0x09ae, 0xdfa: 0x09ba, 0xdfb: 0x0a96, + 0xdfc: 0x0a9e, 0xdfd: 0x0ba6, 0xdfe: 0x0bce, 0xdff: 0x0bd6, + // Block 0x38, offset 0xe00 + 0xe00: 0x0bee, 0xe01: 0x0c9a, 0xe02: 0x0cca, 0xe03: 0x0cea, 0xe04: 0x0d5a, 0xe05: 0x0e1e, + 0xe06: 0x0e3a, 0xe07: 0x0e6a, 0xe08: 0x0ebe, 0xe09: 0x0ede, 0xe0a: 0x0f52, 0xe0b: 0x1032, + 0xe0c: 0x104e, 0xe0d: 0x1056, 0xe0e: 0x1052, 0xe0f: 0x105a, 0xe10: 0x105e, 0xe11: 0x1062, + 0xe12: 0x1076, 0xe13: 0x107a, 0xe14: 0x109e, 0xe15: 0x10b2, 0xe16: 0x10ce, 0xe17: 0x1132, + 0xe18: 0x113a, 0xe19: 0x1142, 0xe1a: 0x1156, 0xe1b: 0x117e, 0xe1c: 0x11ce, 0xe1d: 0x1202, + 0xe1e: 0x1202, 0xe1f: 0x126a, 0xe20: 0x1312, 0xe21: 0x132a, 0xe22: 0x135e, 0xe23: 0x1362, + 0xe24: 0x13a6, 0xe25: 0x13aa, 0xe26: 0x1402, 0xe27: 0x140a, 0xe28: 0x14de, 0xe29: 0x1522, + 0xe2a: 0x153a, 0xe2b: 0x0b9e, 0xe2c: 0x1721, 0xe2d: 0x11e6, + 0xe30: 0x06e2, 0xe31: 0x07e6, 0xe32: 0x07a6, 0xe33: 0x074e, 0xe34: 0x078e, 0xe35: 0x07ba, + 0xe36: 0x084a, 0xe37: 0x0866, 0xe38: 0x094e, 0xe39: 0x093a, 0xe3a: 0x094a, 0xe3b: 0x0966, + 0xe3c: 0x09b2, 0xe3d: 0x09c2, 0xe3e: 0x0a06, 0xe3f: 0x0a12, + // Block 0x39, offset 0xe40 + 0xe40: 0x0a2e, 0xe41: 0x0a3e, 0xe42: 0x0b26, 0xe43: 0x0b2e, 0xe44: 0x0b5e, 0xe45: 0x0b7e, + 0xe46: 0x0bae, 0xe47: 0x0bc6, 0xe48: 0x0bb6, 0xe49: 0x0bd6, 0xe4a: 0x0bca, 0xe4b: 0x0bee, + 0xe4c: 0x0c0a, 0xe4d: 0x0c62, 0xe4e: 0x0c6e, 0xe4f: 0x0c76, 0xe50: 0x0c9e, 0xe51: 0x0ce2, + 0xe52: 0x0d12, 0xe53: 0x0d16, 0xe54: 0x0d2a, 0xe55: 0x0daa, 0xe56: 0x0dba, 0xe57: 0x0e12, + 0xe58: 0x0e5e, 0xe59: 0x0e56, 0xe5a: 0x0e6a, 0xe5b: 0x0e86, 0xe5c: 0x0ebe, 0xe5d: 0x1016, + 0xe5e: 0x0ee2, 0xe5f: 0x0f16, 0xe60: 0x0f22, 0xe61: 0x0f62, 0xe62: 0x0f7e, 0xe63: 0x0fa2, + 0xe64: 0x0fc6, 0xe65: 0x0fca, 0xe66: 0x0fe6, 0xe67: 0x0fea, 0xe68: 0x0ffa, 0xe69: 0x100e, + 0xe6a: 0x100a, 0xe6b: 0x103a, 0xe6c: 0x10b6, 0xe6d: 0x10ce, 0xe6e: 0x10e6, 0xe6f: 0x111e, + 0xe70: 0x1132, 0xe71: 0x114e, 0xe72: 0x117e, 0xe73: 0x1232, 0xe74: 0x125a, 0xe75: 0x12ce, + 0xe76: 0x1316, 0xe77: 0x1322, 0xe78: 0x132a, 0xe79: 0x1342, 0xe7a: 0x1356, 0xe7b: 0x1346, + 0xe7c: 0x135e, 0xe7d: 0x135a, 0xe7e: 0x1352, 0xe7f: 0x1362, + // Block 0x3a, offset 0xe80 + 0xe80: 0x136e, 0xe81: 0x13aa, 0xe82: 0x13e6, 0xe83: 0x1416, 0xe84: 0x144e, 0xe85: 0x146e, + 0xe86: 0x14ba, 0xe87: 0x14de, 0xe88: 0x14fe, 0xe89: 0x1512, 0xe8a: 0x1522, 0xe8b: 0x152e, + 0xe8c: 0x153a, 0xe8d: 0x158e, 0xe8e: 0x162e, 0xe8f: 0x16b8, 0xe90: 0x16b3, 0xe91: 0x16e5, + 0xe92: 0x060a, 0xe93: 0x0632, 0xe94: 0x0636, 0xe95: 0x1767, 0xe96: 0x1794, 0xe97: 0x180c, + 0xe98: 0x161a, 0xe99: 0x162a, + // Block 0x3b, offset 0xec0 + 0xec0: 0x19db, 0xec1: 0x19de, 0xec2: 0x19e1, 0xec3: 0x1c0e, 0xec4: 0x1c12, 0xec5: 0x1a65, + 0xec6: 0x1a65, + 0xed3: 0x1d7b, 0xed4: 0x1d6c, 0xed5: 0x1d71, 0xed6: 0x1d80, 0xed7: 0x1d76, + 0xedd: 0x43a7, + 0xede: 0x8116, 0xedf: 0x4419, 0xee0: 0x0230, 0xee1: 0x0218, 0xee2: 0x0221, 0xee3: 0x0224, + 0xee4: 0x0227, 0xee5: 0x022a, 0xee6: 0x022d, 0xee7: 0x0233, 0xee8: 0x0236, 0xee9: 0x0017, + 0xeea: 0x4407, 0xeeb: 0x440d, 0xeec: 0x450b, 0xeed: 0x4513, 0xeee: 0x435f, 0xeef: 0x4365, + 0xef0: 0x436b, 0xef1: 0x4371, 0xef2: 0x437d, 0xef3: 0x4383, 0xef4: 0x4389, 0xef5: 0x4395, + 0xef6: 0x439b, 0xef8: 0x43a1, 0xef9: 0x43ad, 0xefa: 0x43b3, 0xefb: 0x43b9, + 0xefc: 0x43c5, 0xefe: 0x43cb, + // Block 0x3c, offset 0xf00 + 0xf00: 0x43d1, 0xf01: 0x43d7, 0xf03: 0x43dd, 0xf04: 0x43e3, + 0xf06: 0x43ef, 0xf07: 0x43f5, 0xf08: 0x43fb, 0xf09: 0x4401, 0xf0a: 0x4413, 0xf0b: 0x438f, + 0xf0c: 0x4377, 0xf0d: 0x43bf, 0xf0e: 0x43e9, 0xf0f: 0x1d85, 0xf10: 0x029c, 0xf11: 0x029c, + 0xf12: 0x02a5, 0xf13: 0x02a5, 0xf14: 0x02a5, 0xf15: 0x02a5, 0xf16: 0x02a8, 0xf17: 0x02a8, + 0xf18: 0x02a8, 0xf19: 0x02a8, 0xf1a: 0x02ae, 0xf1b: 0x02ae, 0xf1c: 0x02ae, 0xf1d: 0x02ae, + 0xf1e: 0x02a2, 0xf1f: 0x02a2, 0xf20: 0x02a2, 0xf21: 0x02a2, 0xf22: 0x02ab, 0xf23: 0x02ab, + 0xf24: 0x02ab, 0xf25: 0x02ab, 0xf26: 0x029f, 0xf27: 0x029f, 0xf28: 0x029f, 0xf29: 0x029f, + 0xf2a: 0x02d2, 0xf2b: 0x02d2, 0xf2c: 0x02d2, 0xf2d: 0x02d2, 0xf2e: 0x02d5, 0xf2f: 0x02d5, + 0xf30: 0x02d5, 0xf31: 0x02d5, 0xf32: 0x02b4, 0xf33: 0x02b4, 0xf34: 0x02b4, 0xf35: 0x02b4, + 0xf36: 0x02b1, 0xf37: 0x02b1, 0xf38: 0x02b1, 0xf39: 0x02b1, 0xf3a: 0x02b7, 0xf3b: 0x02b7, + 0xf3c: 0x02b7, 0xf3d: 0x02b7, 0xf3e: 0x02ba, 0xf3f: 0x02ba, + // Block 0x3d, offset 0xf40 + 0xf40: 0x02ba, 0xf41: 0x02ba, 0xf42: 0x02c3, 0xf43: 0x02c3, 0xf44: 0x02c0, 0xf45: 0x02c0, + 0xf46: 0x02c6, 0xf47: 0x02c6, 0xf48: 0x02bd, 0xf49: 0x02bd, 0xf4a: 0x02cc, 0xf4b: 0x02cc, + 0xf4c: 0x02c9, 0xf4d: 0x02c9, 0xf4e: 0x02d8, 0xf4f: 0x02d8, 0xf50: 0x02d8, 0xf51: 0x02d8, + 0xf52: 0x02de, 0xf53: 0x02de, 0xf54: 0x02de, 0xf55: 0x02de, 0xf56: 0x02e4, 0xf57: 0x02e4, + 0xf58: 0x02e4, 0xf59: 0x02e4, 0xf5a: 0x02e1, 0xf5b: 0x02e1, 0xf5c: 0x02e1, 0xf5d: 0x02e1, + 0xf5e: 0x02e7, 0xf5f: 0x02e7, 0xf60: 0x02ea, 0xf61: 0x02ea, 0xf62: 0x02ea, 0xf63: 0x02ea, + 0xf64: 0x4485, 0xf65: 0x4485, 0xf66: 0x02f0, 0xf67: 0x02f0, 0xf68: 0x02f0, 0xf69: 0x02f0, + 0xf6a: 0x02ed, 0xf6b: 0x02ed, 0xf6c: 0x02ed, 0xf6d: 0x02ed, 0xf6e: 0x030b, 0xf6f: 0x030b, + 0xf70: 0x447f, 0xf71: 0x447f, + // Block 0x3e, offset 0xf80 + 0xf93: 0x02db, 0xf94: 0x02db, 0xf95: 0x02db, 0xf96: 0x02db, 0xf97: 0x02f9, + 0xf98: 0x02f9, 0xf99: 0x02f6, 0xf9a: 0x02f6, 0xf9b: 0x02fc, 0xf9c: 0x02fc, 0xf9d: 0x2055, + 0xf9e: 0x0302, 0xf9f: 0x0302, 0xfa0: 0x02f3, 0xfa1: 0x02f3, 0xfa2: 0x02ff, 0xfa3: 0x02ff, + 0xfa4: 0x0308, 0xfa5: 0x0308, 0xfa6: 0x0308, 0xfa7: 0x0308, 0xfa8: 0x0290, 0xfa9: 0x0290, + 0xfaa: 0x25b0, 0xfab: 0x25b0, 0xfac: 0x2620, 0xfad: 0x2620, 0xfae: 0x25ef, 0xfaf: 0x25ef, + 0xfb0: 0x260b, 0xfb1: 0x260b, 0xfb2: 0x2604, 0xfb3: 0x2604, 0xfb4: 0x2612, 0xfb5: 0x2612, + 0xfb6: 0x2619, 0xfb7: 0x2619, 0xfb8: 0x2619, 0xfb9: 0x25f6, 0xfba: 0x25f6, 0xfbb: 0x25f6, + 0xfbc: 0x0305, 0xfbd: 0x0305, 0xfbe: 0x0305, 0xfbf: 0x0305, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x25b7, 0xfc1: 0x25be, 0xfc2: 0x25da, 0xfc3: 0x25f6, 0xfc4: 0x25fd, 0xfc5: 0x1d8f, + 0xfc6: 0x1d94, 0xfc7: 0x1d99, 0xfc8: 0x1da8, 0xfc9: 0x1db7, 0xfca: 0x1dbc, 0xfcb: 0x1dc1, + 0xfcc: 0x1dc6, 0xfcd: 0x1dcb, 0xfce: 0x1dda, 0xfcf: 0x1de9, 0xfd0: 0x1dee, 0xfd1: 0x1df3, + 0xfd2: 0x1e02, 0xfd3: 0x1e11, 0xfd4: 0x1e16, 0xfd5: 0x1e1b, 0xfd6: 0x1e20, 0xfd7: 0x1e2f, + 0xfd8: 0x1e34, 0xfd9: 0x1e43, 0xfda: 0x1e48, 0xfdb: 0x1e4d, 0xfdc: 0x1e5c, 0xfdd: 0x1e61, + 0xfde: 0x1e66, 0xfdf: 0x1e70, 0xfe0: 0x1eac, 0xfe1: 0x1ebb, 0xfe2: 0x1eca, 0xfe3: 0x1ecf, + 0xfe4: 0x1ed4, 0xfe5: 0x1ede, 0xfe6: 0x1eed, 0xfe7: 0x1ef2, 0xfe8: 0x1f01, 0xfe9: 0x1f06, + 0xfea: 0x1f0b, 0xfeb: 0x1f1a, 0xfec: 0x1f1f, 0xfed: 0x1f2e, 0xfee: 0x1f33, 0xfef: 0x1f38, + 0xff0: 0x1f3d, 0xff1: 0x1f42, 0xff2: 0x1f47, 0xff3: 0x1f4c, 0xff4: 0x1f51, 0xff5: 0x1f56, + 0xff6: 0x1f5b, 0xff7: 0x1f60, 0xff8: 0x1f65, 0xff9: 0x1f6a, 0xffa: 0x1f6f, 0xffb: 0x1f74, + 0xffc: 0x1f79, 0xffd: 0x1f7e, 0xffe: 0x1f83, 0xfff: 0x1f8d, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f92, 0x1001: 0x1f97, 0x1002: 0x1f9c, 0x1003: 0x1fa6, 0x1004: 0x1fab, 0x1005: 0x1fb5, + 0x1006: 0x1fba, 0x1007: 0x1fbf, 0x1008: 0x1fc4, 0x1009: 0x1fc9, 0x100a: 0x1fce, 0x100b: 0x1fd3, + 0x100c: 0x1fd8, 0x100d: 0x1fdd, 0x100e: 0x1fec, 0x100f: 0x1ffb, 0x1010: 0x2000, 0x1011: 0x2005, + 0x1012: 0x200a, 0x1013: 0x200f, 0x1014: 0x2014, 0x1015: 0x201e, 0x1016: 0x2023, 0x1017: 0x2028, + 0x1018: 0x2037, 0x1019: 0x2046, 0x101a: 0x204b, 0x101b: 0x4437, 0x101c: 0x443d, 0x101d: 0x4473, + 0x101e: 0x44ca, 0x101f: 0x44d1, 0x1020: 0x44d8, 0x1021: 0x44df, 0x1022: 0x44e6, 0x1023: 0x44ed, + 0x1024: 0x25cc, 0x1025: 0x25d3, 0x1026: 0x25da, 0x1027: 0x25e1, 0x1028: 0x25f6, 0x1029: 0x25fd, + 0x102a: 0x1d9e, 0x102b: 0x1da3, 0x102c: 0x1da8, 0x102d: 0x1dad, 0x102e: 0x1db7, 0x102f: 0x1dbc, + 0x1030: 0x1dd0, 0x1031: 0x1dd5, 0x1032: 0x1dda, 0x1033: 0x1ddf, 0x1034: 0x1de9, 0x1035: 0x1dee, + 0x1036: 0x1df8, 0x1037: 0x1dfd, 0x1038: 0x1e02, 0x1039: 0x1e07, 0x103a: 0x1e11, 0x103b: 0x1e16, + 0x103c: 0x1f42, 0x103d: 0x1f47, 0x103e: 0x1f56, 0x103f: 0x1f5b, + // Block 0x41, offset 0x1040 + 0x1040: 0x1f60, 0x1041: 0x1f74, 0x1042: 0x1f79, 0x1043: 0x1f7e, 0x1044: 0x1f83, 0x1045: 0x1f9c, + 0x1046: 0x1fa6, 0x1047: 0x1fab, 0x1048: 0x1fb0, 0x1049: 0x1fc4, 0x104a: 0x1fe2, 0x104b: 0x1fe7, + 0x104c: 0x1fec, 0x104d: 0x1ff1, 0x104e: 0x1ffb, 0x104f: 0x2000, 0x1050: 0x4473, 0x1051: 0x202d, + 0x1052: 0x2032, 0x1053: 0x2037, 0x1054: 0x203c, 0x1055: 0x2046, 0x1056: 0x204b, 0x1057: 0x25b7, + 0x1058: 0x25be, 0x1059: 0x25c5, 0x105a: 0x25da, 0x105b: 0x25e8, 0x105c: 0x1d8f, 0x105d: 0x1d94, + 0x105e: 0x1d99, 0x105f: 0x1da8, 0x1060: 0x1db2, 0x1061: 0x1dc1, 0x1062: 0x1dc6, 0x1063: 0x1dcb, + 0x1064: 0x1dda, 0x1065: 0x1de4, 0x1066: 0x1e02, 0x1067: 0x1e1b, 0x1068: 0x1e20, 0x1069: 0x1e2f, + 0x106a: 0x1e34, 0x106b: 0x1e43, 0x106c: 0x1e4d, 0x106d: 0x1e5c, 0x106e: 0x1e61, 0x106f: 0x1e66, + 0x1070: 0x1e70, 0x1071: 0x1eac, 0x1072: 0x1eb1, 0x1073: 0x1ebb, 0x1074: 0x1eca, 0x1075: 0x1ecf, + 0x1076: 0x1ed4, 0x1077: 0x1ede, 0x1078: 0x1eed, 0x1079: 0x1f01, 0x107a: 0x1f06, 0x107b: 0x1f0b, + 0x107c: 0x1f1a, 0x107d: 0x1f1f, 0x107e: 0x1f2e, 0x107f: 0x1f33, + // Block 0x42, offset 0x1080 + 0x1080: 0x1f38, 0x1081: 0x1f3d, 0x1082: 0x1f4c, 0x1083: 0x1f51, 0x1084: 0x1f65, 0x1085: 0x1f6a, + 0x1086: 0x1f6f, 0x1087: 0x1f74, 0x1088: 0x1f79, 0x1089: 0x1f8d, 0x108a: 0x1f92, 0x108b: 0x1f97, + 0x108c: 0x1f9c, 0x108d: 0x1fa1, 0x108e: 0x1fb5, 0x108f: 0x1fba, 0x1090: 0x1fbf, 0x1091: 0x1fc4, + 0x1092: 0x1fd3, 0x1093: 0x1fd8, 0x1094: 0x1fdd, 0x1095: 0x1fec, 0x1096: 0x1ff6, 0x1097: 0x2005, + 0x1098: 0x200a, 0x1099: 0x4467, 0x109a: 0x201e, 0x109b: 0x2023, 0x109c: 0x2028, 0x109d: 0x2037, + 0x109e: 0x2041, 0x109f: 0x25da, 0x10a0: 0x25e8, 0x10a1: 0x1da8, 0x10a2: 0x1db2, 0x10a3: 0x1dda, + 0x10a4: 0x1de4, 0x10a5: 0x1e02, 0x10a6: 0x1e0c, 0x10a7: 0x1e70, 0x10a8: 0x1e75, 0x10a9: 0x1e98, + 0x10aa: 0x1e9d, 0x10ab: 0x1f74, 0x10ac: 0x1f79, 0x10ad: 0x1f9c, 0x10ae: 0x1fec, 0x10af: 0x1ff6, + 0x10b0: 0x2037, 0x10b1: 0x2041, 0x10b2: 0x451b, 0x10b3: 0x4523, 0x10b4: 0x452b, 0x10b5: 0x1ef7, + 0x10b6: 0x1efc, 0x10b7: 0x1f10, 0x10b8: 0x1f15, 0x10b9: 0x1f24, 0x10ba: 0x1f29, 0x10bb: 0x1e7a, + 0x10bc: 0x1e7f, 0x10bd: 0x1ea2, 0x10be: 0x1ea7, 0x10bf: 0x1e39, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1e3e, 0x10c1: 0x1e25, 0x10c2: 0x1e2a, 0x10c3: 0x1e52, 0x10c4: 0x1e57, 0x10c5: 0x1ec0, + 0x10c6: 0x1ec5, 0x10c7: 0x1ee3, 0x10c8: 0x1ee8, 0x10c9: 0x1e84, 0x10ca: 0x1e89, 0x10cb: 0x1e8e, + 0x10cc: 0x1e98, 0x10cd: 0x1e93, 0x10ce: 0x1e6b, 0x10cf: 0x1eb6, 0x10d0: 0x1ed9, 0x10d1: 0x1ef7, + 0x10d2: 0x1efc, 0x10d3: 0x1f10, 0x10d4: 0x1f15, 0x10d5: 0x1f24, 0x10d6: 0x1f29, 0x10d7: 0x1e7a, + 0x10d8: 0x1e7f, 0x10d9: 0x1ea2, 0x10da: 0x1ea7, 0x10db: 0x1e39, 0x10dc: 0x1e3e, 0x10dd: 0x1e25, + 0x10de: 0x1e2a, 0x10df: 0x1e52, 0x10e0: 0x1e57, 0x10e1: 0x1ec0, 0x10e2: 0x1ec5, 0x10e3: 0x1ee3, + 0x10e4: 0x1ee8, 0x10e5: 0x1e84, 0x10e6: 0x1e89, 0x10e7: 0x1e8e, 0x10e8: 0x1e98, 0x10e9: 0x1e93, + 0x10ea: 0x1e6b, 0x10eb: 0x1eb6, 0x10ec: 0x1ed9, 0x10ed: 0x1e84, 0x10ee: 0x1e89, 0x10ef: 0x1e8e, + 0x10f0: 0x1e98, 0x10f1: 0x1e75, 0x10f2: 0x1e9d, 0x10f3: 0x1ef2, 0x10f4: 0x1e5c, 0x10f5: 0x1e61, + 0x10f6: 0x1e66, 0x10f7: 0x1e84, 0x10f8: 0x1e89, 0x10f9: 0x1e8e, 0x10fa: 0x1ef2, 0x10fb: 0x1f01, + 0x10fc: 0x441f, 0x10fd: 0x441f, + // Block 0x44, offset 0x1100 + 0x1110: 0x2317, 0x1111: 0x232c, + 0x1112: 0x232c, 0x1113: 0x2333, 0x1114: 0x233a, 0x1115: 0x234f, 0x1116: 0x2356, 0x1117: 0x235d, + 0x1118: 0x2380, 0x1119: 0x2380, 0x111a: 0x23a3, 0x111b: 0x239c, 0x111c: 0x23b8, 0x111d: 0x23aa, + 0x111e: 0x23b1, 0x111f: 0x23d4, 0x1120: 0x23d4, 0x1121: 0x23cd, 0x1122: 0x23db, 0x1123: 0x23db, + 0x1124: 0x2405, 0x1125: 0x2405, 0x1126: 0x2421, 0x1127: 0x23e9, 0x1128: 0x23e9, 0x1129: 0x23e2, + 0x112a: 0x23f7, 0x112b: 0x23f7, 0x112c: 0x23fe, 0x112d: 0x23fe, 0x112e: 0x2428, 0x112f: 0x2436, + 0x1130: 0x2436, 0x1131: 0x243d, 0x1132: 0x243d, 0x1133: 0x2444, 0x1134: 0x244b, 0x1135: 0x2452, + 0x1136: 0x2459, 0x1137: 0x2459, 0x1138: 0x2460, 0x1139: 0x246e, 0x113a: 0x247c, 0x113b: 0x2475, + 0x113c: 0x2483, 0x113d: 0x2483, 0x113e: 0x2498, 0x113f: 0x249f, + // Block 0x45, offset 0x1140 + 0x1140: 0x24d0, 0x1141: 0x24de, 0x1142: 0x24d7, 0x1143: 0x24bb, 0x1144: 0x24bb, 0x1145: 0x24e5, + 0x1146: 0x24e5, 0x1147: 0x24ec, 0x1148: 0x24ec, 0x1149: 0x2516, 0x114a: 0x251d, 0x114b: 0x2524, + 0x114c: 0x24fa, 0x114d: 0x2508, 0x114e: 0x252b, 0x114f: 0x2532, + 0x1152: 0x2501, 0x1153: 0x2586, 0x1154: 0x258d, 0x1155: 0x2563, 0x1156: 0x256a, 0x1157: 0x254e, + 0x1158: 0x254e, 0x1159: 0x2555, 0x115a: 0x257f, 0x115b: 0x2578, 0x115c: 0x25a2, 0x115d: 0x25a2, + 0x115e: 0x2310, 0x115f: 0x2325, 0x1160: 0x231e, 0x1161: 0x2348, 0x1162: 0x2341, 0x1163: 0x236b, + 0x1164: 0x2364, 0x1165: 0x238e, 0x1166: 0x2372, 0x1167: 0x2387, 0x1168: 0x23bf, 0x1169: 0x240c, + 0x116a: 0x23f0, 0x116b: 0x242f, 0x116c: 0x24c9, 0x116d: 0x24f3, 0x116e: 0x259b, 0x116f: 0x2594, + 0x1170: 0x25a9, 0x1171: 0x2540, 0x1172: 0x24a6, 0x1173: 0x2571, 0x1174: 0x2498, 0x1175: 0x24d0, + 0x1176: 0x2467, 0x1177: 0x24b4, 0x1178: 0x2547, 0x1179: 0x2539, 0x117a: 0x24c2, 0x117b: 0x24ad, + 0x117c: 0x24c2, 0x117d: 0x2547, 0x117e: 0x2379, 0x117f: 0x2395, + // Block 0x46, offset 0x1180 + 0x1180: 0x250f, 0x1181: 0x248a, 0x1182: 0x2309, 0x1183: 0x24ad, 0x1184: 0x2452, 0x1185: 0x2421, + 0x1186: 0x23c6, 0x1187: 0x255c, + 0x11b0: 0x241a, 0x11b1: 0x2491, 0x11b2: 0x27cc, 0x11b3: 0x27c3, 0x11b4: 0x27f9, 0x11b5: 0x27e7, + 0x11b6: 0x27d5, 0x11b7: 0x27f0, 0x11b8: 0x2802, 0x11b9: 0x2413, 0x11ba: 0x2c89, 0x11bb: 0x2b09, + 0x11bc: 0x27de, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x0486, + 0x11d2: 0x048a, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x04c2, + 0x11d8: 0x04c6, 0x11d9: 0x1b62, + 0x11e0: 0x8133, 0x11e1: 0x8133, 0x11e2: 0x8133, 0x11e3: 0x8133, + 0x11e4: 0x8133, 0x11e5: 0x8133, 0x11e6: 0x8133, 0x11e7: 0x812e, 0x11e8: 0x812e, 0x11e9: 0x812e, + 0x11ea: 0x812e, 0x11eb: 0x812e, 0x11ec: 0x812e, 0x11ed: 0x812e, 0x11ee: 0x8133, 0x11ef: 0x8133, + 0x11f0: 0x1876, 0x11f1: 0x0446, 0x11f2: 0x0442, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x04ba, 0x11fa: 0x04be, 0x11fb: 0x04ae, + 0x11fc: 0x04b2, 0x11fd: 0x0496, 0x11fe: 0x049a, 0x11ff: 0x048e, + // Block 0x48, offset 0x1200 + 0x1200: 0x0492, 0x1201: 0x049e, 0x1202: 0x04a2, 0x1203: 0x04a6, 0x1204: 0x04aa, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x4280, 0x120a: 0x4280, 0x120b: 0x4280, + 0x120c: 0x4280, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x0486, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x0446, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x04ba, + 0x121e: 0x04be, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x42c1, 0x1231: 0x4443, 0x1232: 0x42c6, 0x1234: 0x42cb, + 0x1236: 0x42d0, 0x1237: 0x4449, 0x1238: 0x42d5, 0x1239: 0x444f, 0x123a: 0x42da, 0x123b: 0x4455, + 0x123c: 0x42df, 0x123d: 0x445b, 0x123e: 0x42e4, 0x123f: 0x4461, + // Block 0x49, offset 0x1240 + 0x1240: 0x0239, 0x1241: 0x4425, 0x1242: 0x4425, 0x1243: 0x442b, 0x1244: 0x442b, 0x1245: 0x446d, + 0x1246: 0x446d, 0x1247: 0x4431, 0x1248: 0x4431, 0x1249: 0x4479, 0x124a: 0x4479, 0x124b: 0x4479, + 0x124c: 0x4479, 0x124d: 0x023c, 0x124e: 0x023c, 0x124f: 0x023f, 0x1250: 0x023f, 0x1251: 0x023f, + 0x1252: 0x023f, 0x1253: 0x0242, 0x1254: 0x0242, 0x1255: 0x0245, 0x1256: 0x0245, 0x1257: 0x0245, + 0x1258: 0x0245, 0x1259: 0x0248, 0x125a: 0x0248, 0x125b: 0x0248, 0x125c: 0x0248, 0x125d: 0x024b, + 0x125e: 0x024b, 0x125f: 0x024b, 0x1260: 0x024b, 0x1261: 0x024e, 0x1262: 0x024e, 0x1263: 0x024e, + 0x1264: 0x024e, 0x1265: 0x0251, 0x1266: 0x0251, 0x1267: 0x0251, 0x1268: 0x0251, 0x1269: 0x0254, + 0x126a: 0x0254, 0x126b: 0x0257, 0x126c: 0x0257, 0x126d: 0x025a, 0x126e: 0x025a, 0x126f: 0x025d, + 0x1270: 0x025d, 0x1271: 0x0260, 0x1272: 0x0260, 0x1273: 0x0260, 0x1274: 0x0260, 0x1275: 0x0263, + 0x1276: 0x0263, 0x1277: 0x0263, 0x1278: 0x0263, 0x1279: 0x0266, 0x127a: 0x0266, 0x127b: 0x0266, + 0x127c: 0x0266, 0x127d: 0x0269, 0x127e: 0x0269, 0x127f: 0x0269, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0269, 0x1281: 0x026c, 0x1282: 0x026c, 0x1283: 0x026c, 0x1284: 0x026c, 0x1285: 0x026f, + 0x1286: 0x026f, 0x1287: 0x026f, 0x1288: 0x026f, 0x1289: 0x0272, 0x128a: 0x0272, 0x128b: 0x0272, + 0x128c: 0x0272, 0x128d: 0x0275, 0x128e: 0x0275, 0x128f: 0x0275, 0x1290: 0x0275, 0x1291: 0x0278, + 0x1292: 0x0278, 0x1293: 0x0278, 0x1294: 0x0278, 0x1295: 0x027b, 0x1296: 0x027b, 0x1297: 0x027b, + 0x1298: 0x027b, 0x1299: 0x027e, 0x129a: 0x027e, 0x129b: 0x027e, 0x129c: 0x027e, 0x129d: 0x0281, + 0x129e: 0x0281, 0x129f: 0x0281, 0x12a0: 0x0281, 0x12a1: 0x0284, 0x12a2: 0x0284, 0x12a3: 0x0284, + 0x12a4: 0x0284, 0x12a5: 0x0287, 0x12a6: 0x0287, 0x12a7: 0x0287, 0x12a8: 0x0287, 0x12a9: 0x028a, + 0x12aa: 0x028a, 0x12ab: 0x028a, 0x12ac: 0x028a, 0x12ad: 0x028d, 0x12ae: 0x028d, 0x12af: 0x0290, + 0x12b0: 0x0290, 0x12b1: 0x0293, 0x12b2: 0x0293, 0x12b3: 0x0293, 0x12b4: 0x0293, 0x12b5: 0x2e17, + 0x12b6: 0x2e17, 0x12b7: 0x2e1f, 0x12b8: 0x2e1f, 0x12b9: 0x2e27, 0x12ba: 0x2e27, 0x12bb: 0x1f88, + 0x12bc: 0x1f88, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x047a, 0x12e0: 0x047e, 0x12e1: 0x048a, 0x12e2: 0x049e, 0x12e3: 0x04a2, + 0x12e4: 0x0486, 0x12e5: 0x05ae, 0x12e6: 0x05a6, 0x12e7: 0x04ca, 0x12e8: 0x04d2, 0x12e9: 0x04da, + 0x12ea: 0x04e2, 0x12eb: 0x04ea, 0x12ec: 0x056e, 0x12ed: 0x0576, 0x12ee: 0x057e, 0x12ef: 0x0522, + 0x12f0: 0x05b2, 0x12f1: 0x04ce, 0x12f2: 0x04d6, 0x12f3: 0x04de, 0x12f4: 0x04e6, 0x12f5: 0x04ee, + 0x12f6: 0x04f2, 0x12f7: 0x04f6, 0x12f8: 0x04fa, 0x12f9: 0x04fe, 0x12fa: 0x0502, 0x12fb: 0x0506, + 0x12fc: 0x050a, 0x12fd: 0x050e, 0x12fe: 0x0512, 0x12ff: 0x0516, + // Block 0x4c, offset 0x1300 + 0x1300: 0x051a, 0x1301: 0x051e, 0x1302: 0x0526, 0x1303: 0x052a, 0x1304: 0x052e, 0x1305: 0x0532, + 0x1306: 0x0536, 0x1307: 0x053a, 0x1308: 0x053e, 0x1309: 0x0542, 0x130a: 0x0546, 0x130b: 0x054a, + 0x130c: 0x054e, 0x130d: 0x0552, 0x130e: 0x0556, 0x130f: 0x055a, 0x1310: 0x055e, 0x1311: 0x0562, + 0x1312: 0x0566, 0x1313: 0x056a, 0x1314: 0x0572, 0x1315: 0x057a, 0x1316: 0x0582, 0x1317: 0x0586, + 0x1318: 0x058a, 0x1319: 0x058e, 0x131a: 0x0592, 0x131b: 0x0596, 0x131c: 0x059a, 0x131d: 0x05aa, + 0x131e: 0x4a8f, 0x131f: 0x4a95, 0x1320: 0x03c6, 0x1321: 0x0316, 0x1322: 0x031a, 0x1323: 0x4a52, + 0x1324: 0x031e, 0x1325: 0x4a58, 0x1326: 0x4a5e, 0x1327: 0x0322, 0x1328: 0x0326, 0x1329: 0x032a, + 0x132a: 0x4a64, 0x132b: 0x4a6a, 0x132c: 0x4a70, 0x132d: 0x4a76, 0x132e: 0x4a7c, 0x132f: 0x4a82, + 0x1330: 0x036a, 0x1331: 0x032e, 0x1332: 0x0332, 0x1333: 0x0336, 0x1334: 0x037e, 0x1335: 0x033a, + 0x1336: 0x033e, 0x1337: 0x0342, 0x1338: 0x0346, 0x1339: 0x034a, 0x133a: 0x034e, 0x133b: 0x0352, + 0x133c: 0x0356, 0x133d: 0x035a, 0x133e: 0x035e, + // Block 0x4d, offset 0x1340 + 0x1342: 0x49d4, 0x1343: 0x49da, 0x1344: 0x49e0, 0x1345: 0x49e6, + 0x1346: 0x49ec, 0x1347: 0x49f2, 0x134a: 0x49f8, 0x134b: 0x49fe, + 0x134c: 0x4a04, 0x134d: 0x4a0a, 0x134e: 0x4a10, 0x134f: 0x4a16, + 0x1352: 0x4a1c, 0x1353: 0x4a22, 0x1354: 0x4a28, 0x1355: 0x4a2e, 0x1356: 0x4a34, 0x1357: 0x4a3a, + 0x135a: 0x4a40, 0x135b: 0x4a46, 0x135c: 0x4a4c, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x427b, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x044a, 0x1368: 0x046e, 0x1369: 0x044e, + 0x136a: 0x0452, 0x136b: 0x0456, 0x136c: 0x045a, 0x136d: 0x0472, 0x136e: 0x0476, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0063, 0x1381: 0x0065, 0x1382: 0x0067, 0x1383: 0x0069, 0x1384: 0x006b, 0x1385: 0x006d, + 0x1386: 0x006f, 0x1387: 0x0071, 0x1388: 0x0073, 0x1389: 0x0075, 0x138a: 0x0083, 0x138b: 0x0085, + 0x138c: 0x0087, 0x138d: 0x0089, 0x138e: 0x008b, 0x138f: 0x008d, 0x1390: 0x008f, 0x1391: 0x0091, + 0x1392: 0x0093, 0x1393: 0x0095, 0x1394: 0x0097, 0x1395: 0x0099, 0x1396: 0x009b, 0x1397: 0x009d, + 0x1398: 0x009f, 0x1399: 0x00a1, 0x139a: 0x00a3, 0x139b: 0x00a5, 0x139c: 0x00a7, 0x139d: 0x00a9, + 0x139e: 0x00ab, 0x139f: 0x00ad, 0x13a0: 0x00af, 0x13a1: 0x00b1, 0x13a2: 0x00b3, 0x13a3: 0x00b5, + 0x13a4: 0x00dd, 0x13a5: 0x00f2, 0x13a8: 0x0176, 0x13a9: 0x0179, + 0x13aa: 0x017c, 0x13ab: 0x017f, 0x13ac: 0x0182, 0x13ad: 0x0185, 0x13ae: 0x0188, 0x13af: 0x018b, + 0x13b0: 0x018e, 0x13b1: 0x0191, 0x13b2: 0x0194, 0x13b3: 0x0197, 0x13b4: 0x019a, 0x13b5: 0x019d, + 0x13b6: 0x01a0, 0x13b7: 0x01a3, 0x13b8: 0x01a6, 0x13b9: 0x018b, 0x13ba: 0x01a9, 0x13bb: 0x01ac, + 0x13bc: 0x01af, 0x13bd: 0x01b2, 0x13be: 0x01b5, 0x13bf: 0x01b8, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0200, 0x13c1: 0x0203, 0x13c2: 0x0206, 0x13c3: 0x045e, 0x13c4: 0x01ca, 0x13c5: 0x01d3, + 0x13c6: 0x01d9, 0x13c7: 0x01fd, 0x13c8: 0x01ee, 0x13c9: 0x01eb, 0x13ca: 0x0209, 0x13cb: 0x020c, + 0x13ce: 0x0021, 0x13cf: 0x0023, 0x13d0: 0x0025, 0x13d1: 0x0027, + 0x13d2: 0x0029, 0x13d3: 0x002b, 0x13d4: 0x002d, 0x13d5: 0x002f, 0x13d6: 0x0031, 0x13d7: 0x0033, + 0x13d8: 0x0021, 0x13d9: 0x0023, 0x13da: 0x0025, 0x13db: 0x0027, 0x13dc: 0x0029, 0x13dd: 0x002b, + 0x13de: 0x002d, 0x13df: 0x002f, 0x13e0: 0x0031, 0x13e1: 0x0033, 0x13e2: 0x0021, 0x13e3: 0x0023, + 0x13e4: 0x0025, 0x13e5: 0x0027, 0x13e6: 0x0029, 0x13e7: 0x002b, 0x13e8: 0x002d, 0x13e9: 0x002f, + 0x13ea: 0x0031, 0x13eb: 0x0033, 0x13ec: 0x0021, 0x13ed: 0x0023, 0x13ee: 0x0025, 0x13ef: 0x0027, + 0x13f0: 0x0029, 0x13f1: 0x002b, 0x13f2: 0x002d, 0x13f3: 0x002f, 0x13f4: 0x0031, 0x13f5: 0x0033, + 0x13f6: 0x0021, 0x13f7: 0x0023, 0x13f8: 0x0025, 0x13f9: 0x0027, 0x13fa: 0x0029, 0x13fb: 0x002b, + 0x13fc: 0x002d, 0x13fd: 0x002f, 0x13fe: 0x0031, 0x13ff: 0x0033, + // Block 0x50, offset 0x1400 + 0x1400: 0x023c, 0x1401: 0x023f, 0x1402: 0x024b, 0x1403: 0x0254, 0x1405: 0x028d, + 0x1406: 0x025d, 0x1407: 0x024e, 0x1408: 0x026c, 0x1409: 0x0293, 0x140a: 0x027e, 0x140b: 0x0281, + 0x140c: 0x0284, 0x140d: 0x0287, 0x140e: 0x0260, 0x140f: 0x0272, 0x1410: 0x0278, 0x1411: 0x0266, + 0x1412: 0x027b, 0x1413: 0x025a, 0x1414: 0x0263, 0x1415: 0x0245, 0x1416: 0x0248, 0x1417: 0x0251, + 0x1418: 0x0257, 0x1419: 0x0269, 0x141a: 0x026f, 0x141b: 0x0275, 0x141c: 0x0296, 0x141d: 0x02e7, + 0x141e: 0x02cf, 0x141f: 0x0299, 0x1421: 0x023f, 0x1422: 0x024b, + 0x1424: 0x028a, 0x1427: 0x024e, 0x1429: 0x0293, + 0x142a: 0x027e, 0x142b: 0x0281, 0x142c: 0x0284, 0x142d: 0x0287, 0x142e: 0x0260, 0x142f: 0x0272, + 0x1430: 0x0278, 0x1431: 0x0266, 0x1432: 0x027b, 0x1434: 0x0263, 0x1435: 0x0245, + 0x1436: 0x0248, 0x1437: 0x0251, 0x1439: 0x0269, 0x143b: 0x0275, + // Block 0x51, offset 0x1440 + 0x1442: 0x024b, + 0x1447: 0x024e, 0x1449: 0x0293, 0x144b: 0x0281, + 0x144d: 0x0287, 0x144e: 0x0260, 0x144f: 0x0272, 0x1451: 0x0266, + 0x1452: 0x027b, 0x1454: 0x0263, 0x1457: 0x0251, + 0x1459: 0x0269, 0x145b: 0x0275, 0x145d: 0x02e7, + 0x145f: 0x0299, 0x1461: 0x023f, 0x1462: 0x024b, + 0x1464: 0x028a, 0x1467: 0x024e, 0x1468: 0x026c, 0x1469: 0x0293, + 0x146a: 0x027e, 0x146c: 0x0284, 0x146d: 0x0287, 0x146e: 0x0260, 0x146f: 0x0272, + 0x1470: 0x0278, 0x1471: 0x0266, 0x1472: 0x027b, 0x1474: 0x0263, 0x1475: 0x0245, + 0x1476: 0x0248, 0x1477: 0x0251, 0x1479: 0x0269, 0x147a: 0x026f, 0x147b: 0x0275, + 0x147c: 0x0296, 0x147e: 0x02cf, + // Block 0x52, offset 0x1480 + 0x1480: 0x023c, 0x1481: 0x023f, 0x1482: 0x024b, 0x1483: 0x0254, 0x1484: 0x028a, 0x1485: 0x028d, + 0x1486: 0x025d, 0x1487: 0x024e, 0x1488: 0x026c, 0x1489: 0x0293, 0x148b: 0x0281, + 0x148c: 0x0284, 0x148d: 0x0287, 0x148e: 0x0260, 0x148f: 0x0272, 0x1490: 0x0278, 0x1491: 0x0266, + 0x1492: 0x027b, 0x1493: 0x025a, 0x1494: 0x0263, 0x1495: 0x0245, 0x1496: 0x0248, 0x1497: 0x0251, + 0x1498: 0x0257, 0x1499: 0x0269, 0x149a: 0x026f, 0x149b: 0x0275, + 0x14a1: 0x023f, 0x14a2: 0x024b, 0x14a3: 0x0254, + 0x14a5: 0x028d, 0x14a6: 0x025d, 0x14a7: 0x024e, 0x14a8: 0x026c, 0x14a9: 0x0293, + 0x14ab: 0x0281, 0x14ac: 0x0284, 0x14ad: 0x0287, 0x14ae: 0x0260, 0x14af: 0x0272, + 0x14b0: 0x0278, 0x14b1: 0x0266, 0x14b2: 0x027b, 0x14b3: 0x025a, 0x14b4: 0x0263, 0x14b5: 0x0245, + 0x14b6: 0x0248, 0x14b7: 0x0251, 0x14b8: 0x0257, 0x14b9: 0x0269, 0x14ba: 0x026f, 0x14bb: 0x0275, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x187c, 0x14c1: 0x1879, 0x14c2: 0x187f, 0x14c3: 0x18a3, 0x14c4: 0x18c7, 0x14c5: 0x18eb, + 0x14c6: 0x190f, 0x14c7: 0x1918, 0x14c8: 0x191e, 0x14c9: 0x1924, 0x14ca: 0x192a, + 0x14d0: 0x1a92, 0x14d1: 0x1a96, + 0x14d2: 0x1a9a, 0x14d3: 0x1a9e, 0x14d4: 0x1aa2, 0x14d5: 0x1aa6, 0x14d6: 0x1aaa, 0x14d7: 0x1aae, + 0x14d8: 0x1ab2, 0x14d9: 0x1ab6, 0x14da: 0x1aba, 0x14db: 0x1abe, 0x14dc: 0x1ac2, 0x14dd: 0x1ac6, + 0x14de: 0x1aca, 0x14df: 0x1ace, 0x14e0: 0x1ad2, 0x14e1: 0x1ad6, 0x14e2: 0x1ada, 0x14e3: 0x1ade, + 0x14e4: 0x1ae2, 0x14e5: 0x1ae6, 0x14e6: 0x1aea, 0x14e7: 0x1aee, 0x14e8: 0x1af2, 0x14e9: 0x1af6, + 0x14ea: 0x272b, 0x14eb: 0x0047, 0x14ec: 0x0065, 0x14ed: 0x193f, 0x14ee: 0x19b7, + 0x14f0: 0x0043, 0x14f1: 0x0045, 0x14f2: 0x0047, 0x14f3: 0x0049, 0x14f4: 0x004b, 0x14f5: 0x004d, + 0x14f6: 0x004f, 0x14f7: 0x0051, 0x14f8: 0x0053, 0x14f9: 0x0055, 0x14fa: 0x0057, 0x14fb: 0x0059, + 0x14fc: 0x005b, 0x14fd: 0x005d, 0x14fe: 0x005f, 0x14ff: 0x0061, + // Block 0x54, offset 0x1500 + 0x1500: 0x26b3, 0x1501: 0x26c8, 0x1502: 0x0506, + 0x1510: 0x0c12, 0x1511: 0x0a4a, + 0x1512: 0x08d6, 0x1513: 0x45db, 0x1514: 0x071e, 0x1515: 0x09f2, 0x1516: 0x1332, 0x1517: 0x0a02, + 0x1518: 0x072a, 0x1519: 0x0cda, 0x151a: 0x0eb2, 0x151b: 0x0cb2, 0x151c: 0x082a, 0x151d: 0x0b6e, + 0x151e: 0x07c2, 0x151f: 0x0cba, 0x1520: 0x0816, 0x1521: 0x111a, 0x1522: 0x0f86, 0x1523: 0x138e, + 0x1524: 0x09d6, 0x1525: 0x090e, 0x1526: 0x0e66, 0x1527: 0x0c1e, 0x1528: 0x0c4a, 0x1529: 0x06c2, + 0x152a: 0x06ce, 0x152b: 0x140e, 0x152c: 0x0ade, 0x152d: 0x06ea, 0x152e: 0x08f2, 0x152f: 0x0c3e, + 0x1530: 0x13b6, 0x1531: 0x0c16, 0x1532: 0x1072, 0x1533: 0x10ae, 0x1534: 0x08fa, 0x1535: 0x0e46, + 0x1536: 0x0d0e, 0x1537: 0x0d0a, 0x1538: 0x0f9a, 0x1539: 0x082e, 0x153a: 0x095a, 0x153b: 0x1446, + // Block 0x55, offset 0x1540 + 0x1540: 0x06fe, 0x1541: 0x06f6, 0x1542: 0x0706, 0x1543: 0x164a, 0x1544: 0x074a, 0x1545: 0x075a, + 0x1546: 0x075e, 0x1547: 0x0766, 0x1548: 0x076e, 0x1549: 0x0772, 0x154a: 0x077e, 0x154b: 0x0776, + 0x154c: 0x05b6, 0x154d: 0x165e, 0x154e: 0x0792, 0x154f: 0x0796, 0x1550: 0x079a, 0x1551: 0x07b6, + 0x1552: 0x164f, 0x1553: 0x05ba, 0x1554: 0x07a2, 0x1555: 0x07c2, 0x1556: 0x1659, 0x1557: 0x07d2, + 0x1558: 0x07da, 0x1559: 0x073a, 0x155a: 0x07e2, 0x155b: 0x07e6, 0x155c: 0x1834, 0x155d: 0x0802, + 0x155e: 0x080a, 0x155f: 0x05c2, 0x1560: 0x0822, 0x1561: 0x0826, 0x1562: 0x082e, 0x1563: 0x0832, + 0x1564: 0x05c6, 0x1565: 0x084a, 0x1566: 0x084e, 0x1567: 0x085a, 0x1568: 0x0866, 0x1569: 0x086a, + 0x156a: 0x086e, 0x156b: 0x0876, 0x156c: 0x0896, 0x156d: 0x089a, 0x156e: 0x08a2, 0x156f: 0x08b2, + 0x1570: 0x08ba, 0x1571: 0x08be, 0x1572: 0x08be, 0x1573: 0x08be, 0x1574: 0x166d, 0x1575: 0x0e96, + 0x1576: 0x08d2, 0x1577: 0x08da, 0x1578: 0x1672, 0x1579: 0x08e6, 0x157a: 0x08ee, 0x157b: 0x08f6, + 0x157c: 0x091e, 0x157d: 0x090a, 0x157e: 0x0916, 0x157f: 0x091a, + // Block 0x56, offset 0x1580 + 0x1580: 0x0922, 0x1581: 0x092a, 0x1582: 0x092e, 0x1583: 0x0936, 0x1584: 0x093e, 0x1585: 0x0942, + 0x1586: 0x0942, 0x1587: 0x094a, 0x1588: 0x0952, 0x1589: 0x0956, 0x158a: 0x0962, 0x158b: 0x0986, + 0x158c: 0x096a, 0x158d: 0x098a, 0x158e: 0x096e, 0x158f: 0x0976, 0x1590: 0x080e, 0x1591: 0x09d2, + 0x1592: 0x099a, 0x1593: 0x099e, 0x1594: 0x09a2, 0x1595: 0x0996, 0x1596: 0x09aa, 0x1597: 0x09a6, + 0x1598: 0x09be, 0x1599: 0x1677, 0x159a: 0x09da, 0x159b: 0x09de, 0x159c: 0x09e6, 0x159d: 0x09f2, + 0x159e: 0x09fa, 0x159f: 0x0a16, 0x15a0: 0x167c, 0x15a1: 0x1681, 0x15a2: 0x0a22, 0x15a3: 0x0a26, + 0x15a4: 0x0a2a, 0x15a5: 0x0a1e, 0x15a6: 0x0a32, 0x15a7: 0x05ca, 0x15a8: 0x05ce, 0x15a9: 0x0a3a, + 0x15aa: 0x0a42, 0x15ab: 0x0a42, 0x15ac: 0x1686, 0x15ad: 0x0a5e, 0x15ae: 0x0a62, 0x15af: 0x0a66, + 0x15b0: 0x0a6e, 0x15b1: 0x168b, 0x15b2: 0x0a76, 0x15b3: 0x0a7a, 0x15b4: 0x0b52, 0x15b5: 0x0a82, + 0x15b6: 0x05d2, 0x15b7: 0x0a8e, 0x15b8: 0x0a9e, 0x15b9: 0x0aaa, 0x15ba: 0x0aa6, 0x15bb: 0x1695, + 0x15bc: 0x0ab2, 0x15bd: 0x169a, 0x15be: 0x0abe, 0x15bf: 0x0aba, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0ac2, 0x15c1: 0x0ad2, 0x15c2: 0x0ad6, 0x15c3: 0x05d6, 0x15c4: 0x0ae6, 0x15c5: 0x0aee, + 0x15c6: 0x0af2, 0x15c7: 0x0af6, 0x15c8: 0x05da, 0x15c9: 0x169f, 0x15ca: 0x05de, 0x15cb: 0x0b12, + 0x15cc: 0x0b16, 0x15cd: 0x0b1a, 0x15ce: 0x0b22, 0x15cf: 0x1866, 0x15d0: 0x0b3a, 0x15d1: 0x16a9, + 0x15d2: 0x16a9, 0x15d3: 0x11da, 0x15d4: 0x0b4a, 0x15d5: 0x0b4a, 0x15d6: 0x05e2, 0x15d7: 0x16cc, + 0x15d8: 0x179e, 0x15d9: 0x0b5a, 0x15da: 0x0b62, 0x15db: 0x05e6, 0x15dc: 0x0b76, 0x15dd: 0x0b86, + 0x15de: 0x0b8a, 0x15df: 0x0b92, 0x15e0: 0x0ba2, 0x15e1: 0x05ee, 0x15e2: 0x05ea, 0x15e3: 0x0ba6, + 0x15e4: 0x16ae, 0x15e5: 0x0baa, 0x15e6: 0x0bbe, 0x15e7: 0x0bc2, 0x15e8: 0x0bc6, 0x15e9: 0x0bc2, + 0x15ea: 0x0bd2, 0x15eb: 0x0bd6, 0x15ec: 0x0be6, 0x15ed: 0x0bde, 0x15ee: 0x0be2, 0x15ef: 0x0bea, + 0x15f0: 0x0bee, 0x15f1: 0x0bf2, 0x15f2: 0x0bfe, 0x15f3: 0x0c02, 0x15f4: 0x0c1a, 0x15f5: 0x0c22, + 0x15f6: 0x0c32, 0x15f7: 0x0c46, 0x15f8: 0x16bd, 0x15f9: 0x0c42, 0x15fa: 0x0c36, 0x15fb: 0x0c4e, + 0x15fc: 0x0c56, 0x15fd: 0x0c6a, 0x15fe: 0x16c2, 0x15ff: 0x0c72, + // Block 0x58, offset 0x1600 + 0x1600: 0x0c66, 0x1601: 0x0c5e, 0x1602: 0x05f2, 0x1603: 0x0c7a, 0x1604: 0x0c82, 0x1605: 0x0c8a, + 0x1606: 0x0c7e, 0x1607: 0x05f6, 0x1608: 0x0c9a, 0x1609: 0x0ca2, 0x160a: 0x16c7, 0x160b: 0x0cce, + 0x160c: 0x0d02, 0x160d: 0x0cde, 0x160e: 0x0602, 0x160f: 0x0cea, 0x1610: 0x05fe, 0x1611: 0x05fa, + 0x1612: 0x07c6, 0x1613: 0x07ca, 0x1614: 0x0d06, 0x1615: 0x0cee, 0x1616: 0x11ae, 0x1617: 0x0666, + 0x1618: 0x0d12, 0x1619: 0x0d16, 0x161a: 0x0d1a, 0x161b: 0x0d2e, 0x161c: 0x0d26, 0x161d: 0x16e0, + 0x161e: 0x0606, 0x161f: 0x0d42, 0x1620: 0x0d36, 0x1621: 0x0d52, 0x1622: 0x0d5a, 0x1623: 0x16ea, + 0x1624: 0x0d5e, 0x1625: 0x0d4a, 0x1626: 0x0d66, 0x1627: 0x060a, 0x1628: 0x0d6a, 0x1629: 0x0d6e, + 0x162a: 0x0d72, 0x162b: 0x0d7e, 0x162c: 0x16ef, 0x162d: 0x0d86, 0x162e: 0x060e, 0x162f: 0x0d92, + 0x1630: 0x16f4, 0x1631: 0x0d96, 0x1632: 0x0612, 0x1633: 0x0da2, 0x1634: 0x0dae, 0x1635: 0x0dba, + 0x1636: 0x0dbe, 0x1637: 0x16f9, 0x1638: 0x1690, 0x1639: 0x16fe, 0x163a: 0x0dde, 0x163b: 0x1703, + 0x163c: 0x0dea, 0x163d: 0x0df2, 0x163e: 0x0de2, 0x163f: 0x0dfe, + // Block 0x59, offset 0x1640 + 0x1640: 0x0e0e, 0x1641: 0x0e1e, 0x1642: 0x0e12, 0x1643: 0x0e16, 0x1644: 0x0e22, 0x1645: 0x0e26, + 0x1646: 0x1708, 0x1647: 0x0e0a, 0x1648: 0x0e3e, 0x1649: 0x0e42, 0x164a: 0x0616, 0x164b: 0x0e56, + 0x164c: 0x0e52, 0x164d: 0x170d, 0x164e: 0x0e36, 0x164f: 0x0e72, 0x1650: 0x1712, 0x1651: 0x1717, + 0x1652: 0x0e76, 0x1653: 0x0e8a, 0x1654: 0x0e86, 0x1655: 0x0e82, 0x1656: 0x061a, 0x1657: 0x0e8e, + 0x1658: 0x0e9e, 0x1659: 0x0e9a, 0x165a: 0x0ea6, 0x165b: 0x1654, 0x165c: 0x0eb6, 0x165d: 0x171c, + 0x165e: 0x0ec2, 0x165f: 0x1726, 0x1660: 0x0ed6, 0x1661: 0x0ee2, 0x1662: 0x0ef6, 0x1663: 0x172b, + 0x1664: 0x0f0a, 0x1665: 0x0f0e, 0x1666: 0x1730, 0x1667: 0x1735, 0x1668: 0x0f2a, 0x1669: 0x0f3a, + 0x166a: 0x061e, 0x166b: 0x0f3e, 0x166c: 0x0622, 0x166d: 0x0622, 0x166e: 0x0f56, 0x166f: 0x0f5a, + 0x1670: 0x0f62, 0x1671: 0x0f66, 0x1672: 0x0f72, 0x1673: 0x0626, 0x1674: 0x0f8a, 0x1675: 0x173a, + 0x1676: 0x0fa6, 0x1677: 0x173f, 0x1678: 0x0fb2, 0x1679: 0x16a4, 0x167a: 0x0fc2, 0x167b: 0x1744, + 0x167c: 0x1749, 0x167d: 0x174e, 0x167e: 0x062a, 0x167f: 0x062e, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0ffa, 0x1681: 0x1758, 0x1682: 0x1753, 0x1683: 0x175d, 0x1684: 0x1762, 0x1685: 0x1002, + 0x1686: 0x1006, 0x1687: 0x1006, 0x1688: 0x100e, 0x1689: 0x0636, 0x168a: 0x1012, 0x168b: 0x063a, + 0x168c: 0x063e, 0x168d: 0x176c, 0x168e: 0x1026, 0x168f: 0x102e, 0x1690: 0x103a, 0x1691: 0x0642, + 0x1692: 0x1771, 0x1693: 0x105e, 0x1694: 0x1776, 0x1695: 0x177b, 0x1696: 0x107e, 0x1697: 0x1096, + 0x1698: 0x0646, 0x1699: 0x109e, 0x169a: 0x10a2, 0x169b: 0x10a6, 0x169c: 0x1780, 0x169d: 0x1785, + 0x169e: 0x1785, 0x169f: 0x10be, 0x16a0: 0x064a, 0x16a1: 0x178a, 0x16a2: 0x10d2, 0x16a3: 0x10d6, + 0x16a4: 0x064e, 0x16a5: 0x178f, 0x16a6: 0x10f2, 0x16a7: 0x0652, 0x16a8: 0x1102, 0x16a9: 0x10fa, + 0x16aa: 0x110a, 0x16ab: 0x1799, 0x16ac: 0x1122, 0x16ad: 0x0656, 0x16ae: 0x112e, 0x16af: 0x1136, + 0x16b0: 0x1146, 0x16b1: 0x065a, 0x16b2: 0x17a3, 0x16b3: 0x17a8, 0x16b4: 0x065e, 0x16b5: 0x17ad, + 0x16b6: 0x115e, 0x16b7: 0x17b2, 0x16b8: 0x116a, 0x16b9: 0x1176, 0x16ba: 0x117e, 0x16bb: 0x17b7, + 0x16bc: 0x17bc, 0x16bd: 0x1192, 0x16be: 0x17c1, 0x16bf: 0x119a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x16d1, 0x16c1: 0x0662, 0x16c2: 0x11b2, 0x16c3: 0x11b6, 0x16c4: 0x066a, 0x16c5: 0x11ba, + 0x16c6: 0x0a36, 0x16c7: 0x17c6, 0x16c8: 0x17cb, 0x16c9: 0x16d6, 0x16ca: 0x16db, 0x16cb: 0x11da, + 0x16cc: 0x11de, 0x16cd: 0x13f6, 0x16ce: 0x066e, 0x16cf: 0x120a, 0x16d0: 0x1206, 0x16d1: 0x120e, + 0x16d2: 0x0842, 0x16d3: 0x1212, 0x16d4: 0x1216, 0x16d5: 0x121a, 0x16d6: 0x1222, 0x16d7: 0x17d0, + 0x16d8: 0x121e, 0x16d9: 0x1226, 0x16da: 0x123a, 0x16db: 0x123e, 0x16dc: 0x122a, 0x16dd: 0x1242, + 0x16de: 0x1256, 0x16df: 0x126a, 0x16e0: 0x1236, 0x16e1: 0x124a, 0x16e2: 0x124e, 0x16e3: 0x1252, + 0x16e4: 0x17d5, 0x16e5: 0x17df, 0x16e6: 0x17da, 0x16e7: 0x0672, 0x16e8: 0x1272, 0x16e9: 0x1276, + 0x16ea: 0x127e, 0x16eb: 0x17f3, 0x16ec: 0x1282, 0x16ed: 0x17e4, 0x16ee: 0x0676, 0x16ef: 0x067a, + 0x16f0: 0x17e9, 0x16f1: 0x17ee, 0x16f2: 0x067e, 0x16f3: 0x12a2, 0x16f4: 0x12a6, 0x16f5: 0x12aa, + 0x16f6: 0x12ae, 0x16f7: 0x12ba, 0x16f8: 0x12b6, 0x16f9: 0x12c2, 0x16fa: 0x12be, 0x16fb: 0x12ce, + 0x16fc: 0x12c6, 0x16fd: 0x12ca, 0x16fe: 0x12d2, 0x16ff: 0x0682, + // Block 0x5c, offset 0x1700 + 0x1700: 0x12da, 0x1701: 0x12de, 0x1702: 0x0686, 0x1703: 0x12ee, 0x1704: 0x12f2, 0x1705: 0x17f8, + 0x1706: 0x12fe, 0x1707: 0x1302, 0x1708: 0x068a, 0x1709: 0x130e, 0x170a: 0x05be, 0x170b: 0x17fd, + 0x170c: 0x1802, 0x170d: 0x068e, 0x170e: 0x0692, 0x170f: 0x133a, 0x1710: 0x1352, 0x1711: 0x136e, + 0x1712: 0x137e, 0x1713: 0x1807, 0x1714: 0x1392, 0x1715: 0x1396, 0x1716: 0x13ae, 0x1717: 0x13ba, + 0x1718: 0x1811, 0x1719: 0x1663, 0x171a: 0x13c6, 0x171b: 0x13c2, 0x171c: 0x13ce, 0x171d: 0x1668, + 0x171e: 0x13da, 0x171f: 0x13e6, 0x1720: 0x1816, 0x1721: 0x181b, 0x1722: 0x1426, 0x1723: 0x1432, + 0x1724: 0x143a, 0x1725: 0x1820, 0x1726: 0x143e, 0x1727: 0x146a, 0x1728: 0x1476, 0x1729: 0x147a, + 0x172a: 0x1472, 0x172b: 0x1486, 0x172c: 0x148a, 0x172d: 0x1825, 0x172e: 0x1496, 0x172f: 0x0696, + 0x1730: 0x149e, 0x1731: 0x182a, 0x1732: 0x069a, 0x1733: 0x14d6, 0x1734: 0x0ac6, 0x1735: 0x14ee, + 0x1736: 0x182f, 0x1737: 0x1839, 0x1738: 0x069e, 0x1739: 0x06a2, 0x173a: 0x1516, 0x173b: 0x183e, + 0x173c: 0x06a6, 0x173d: 0x1843, 0x173e: 0x152e, 0x173f: 0x152e, + // Block 0x5d, offset 0x1740 + 0x1740: 0x1536, 0x1741: 0x1848, 0x1742: 0x154e, 0x1743: 0x06aa, 0x1744: 0x155e, 0x1745: 0x156a, + 0x1746: 0x1572, 0x1747: 0x157a, 0x1748: 0x06ae, 0x1749: 0x184d, 0x174a: 0x158e, 0x174b: 0x15aa, + 0x174c: 0x15b6, 0x174d: 0x06b2, 0x174e: 0x06b6, 0x174f: 0x15ba, 0x1750: 0x1852, 0x1751: 0x06ba, + 0x1752: 0x1857, 0x1753: 0x185c, 0x1754: 0x1861, 0x1755: 0x15de, 0x1756: 0x06be, 0x1757: 0x15f2, + 0x1758: 0x15fa, 0x1759: 0x15fe, 0x175a: 0x1606, 0x175b: 0x160e, 0x175c: 0x1616, 0x175d: 0x186b, +} + +// nfkcIndex: 22 blocks, 1408 entries, 2816 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5d, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5e, 0xcb: 0x5f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x60, 0xd2: 0x61, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x62, + 0xd8: 0x63, 0xd9: 0x0d, 0xdb: 0x64, 0xdc: 0x65, 0xdd: 0x66, 0xdf: 0x67, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x68, 0x121: 0x69, 0x123: 0x0e, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d, + 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74, + 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a, + 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82, + // Block 0x5, offset 0x140 + 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89, + 0x14d: 0x8a, + 0x15c: 0x8b, 0x15f: 0x8c, + 0x162: 0x8d, 0x164: 0x8e, + 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16b: 0x92, 0x16c: 0x0f, 0x16d: 0x93, 0x16e: 0x94, 0x16f: 0x95, + 0x170: 0x96, 0x173: 0x97, 0x174: 0x98, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x99, 0x181: 0x9a, 0x182: 0x9b, 0x183: 0x9c, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0x9d, 0x187: 0x9e, + 0x188: 0x9f, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0xa0, 0x18c: 0xa1, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa2, + 0x1a8: 0xa3, 0x1a9: 0xa4, 0x1ab: 0xa5, + 0x1b1: 0xa6, 0x1b3: 0xa7, 0x1b5: 0xa8, 0x1b7: 0xa9, + 0x1ba: 0xaa, 0x1bb: 0xab, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xac, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xad, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xae, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xaf, 0x21a: 0xb0, 0x21b: 0xb1, 0x21d: 0xb2, 0x21f: 0xb3, + 0x220: 0xb4, 0x223: 0xb5, 0x224: 0xb6, 0x225: 0xb7, 0x226: 0xb8, 0x227: 0xb9, + 0x22a: 0xba, 0x22b: 0xbb, 0x22d: 0xbc, 0x22f: 0xbd, + 0x230: 0xbe, 0x231: 0xbf, 0x232: 0xc0, 0x233: 0xc1, 0x234: 0xc2, 0x235: 0xc3, 0x236: 0xc4, 0x237: 0xbe, + 0x238: 0xbf, 0x239: 0xc0, 0x23a: 0xc1, 0x23b: 0xc2, 0x23c: 0xc3, 0x23d: 0xc4, 0x23e: 0xbe, 0x23f: 0xbf, + // Block 0x9, offset 0x240 + 0x240: 0xc0, 0x241: 0xc1, 0x242: 0xc2, 0x243: 0xc3, 0x244: 0xc4, 0x245: 0xbe, 0x246: 0xbf, 0x247: 0xc0, + 0x248: 0xc1, 0x249: 0xc2, 0x24a: 0xc3, 0x24b: 0xc4, 0x24c: 0xbe, 0x24d: 0xbf, 0x24e: 0xc0, 0x24f: 0xc1, + 0x250: 0xc2, 0x251: 0xc3, 0x252: 0xc4, 0x253: 0xbe, 0x254: 0xbf, 0x255: 0xc0, 0x256: 0xc1, 0x257: 0xc2, + 0x258: 0xc3, 0x259: 0xc4, 0x25a: 0xbe, 0x25b: 0xbf, 0x25c: 0xc0, 0x25d: 0xc1, 0x25e: 0xc2, 0x25f: 0xc3, + 0x260: 0xc4, 0x261: 0xbe, 0x262: 0xbf, 0x263: 0xc0, 0x264: 0xc1, 0x265: 0xc2, 0x266: 0xc3, 0x267: 0xc4, + 0x268: 0xbe, 0x269: 0xbf, 0x26a: 0xc0, 0x26b: 0xc1, 0x26c: 0xc2, 0x26d: 0xc3, 0x26e: 0xc4, 0x26f: 0xbe, + 0x270: 0xbf, 0x271: 0xc0, 0x272: 0xc1, 0x273: 0xc2, 0x274: 0xc3, 0x275: 0xc4, 0x276: 0xbe, 0x277: 0xbf, + 0x278: 0xc0, 0x279: 0xc1, 0x27a: 0xc2, 0x27b: 0xc3, 0x27c: 0xc4, 0x27d: 0xbe, 0x27e: 0xbf, 0x27f: 0xc0, + // Block 0xa, offset 0x280 + 0x280: 0xc1, 0x281: 0xc2, 0x282: 0xc3, 0x283: 0xc4, 0x284: 0xbe, 0x285: 0xbf, 0x286: 0xc0, 0x287: 0xc1, + 0x288: 0xc2, 0x289: 0xc3, 0x28a: 0xc4, 0x28b: 0xbe, 0x28c: 0xbf, 0x28d: 0xc0, 0x28e: 0xc1, 0x28f: 0xc2, + 0x290: 0xc3, 0x291: 0xc4, 0x292: 0xbe, 0x293: 0xbf, 0x294: 0xc0, 0x295: 0xc1, 0x296: 0xc2, 0x297: 0xc3, + 0x298: 0xc4, 0x299: 0xbe, 0x29a: 0xbf, 0x29b: 0xc0, 0x29c: 0xc1, 0x29d: 0xc2, 0x29e: 0xc3, 0x29f: 0xc4, + 0x2a0: 0xbe, 0x2a1: 0xbf, 0x2a2: 0xc0, 0x2a3: 0xc1, 0x2a4: 0xc2, 0x2a5: 0xc3, 0x2a6: 0xc4, 0x2a7: 0xbe, + 0x2a8: 0xbf, 0x2a9: 0xc0, 0x2aa: 0xc1, 0x2ab: 0xc2, 0x2ac: 0xc3, 0x2ad: 0xc4, 0x2ae: 0xbe, 0x2af: 0xbf, + 0x2b0: 0xc0, 0x2b1: 0xc1, 0x2b2: 0xc2, 0x2b3: 0xc3, 0x2b4: 0xc4, 0x2b5: 0xbe, 0x2b6: 0xbf, 0x2b7: 0xc0, + 0x2b8: 0xc1, 0x2b9: 0xc2, 0x2ba: 0xc3, 0x2bb: 0xc4, 0x2bc: 0xbe, 0x2bd: 0xbf, 0x2be: 0xc0, 0x2bf: 0xc1, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc2, 0x2c1: 0xc3, 0x2c2: 0xc4, 0x2c3: 0xbe, 0x2c4: 0xbf, 0x2c5: 0xc0, 0x2c6: 0xc1, 0x2c7: 0xc2, + 0x2c8: 0xc3, 0x2c9: 0xc4, 0x2ca: 0xbe, 0x2cb: 0xbf, 0x2cc: 0xc0, 0x2cd: 0xc1, 0x2ce: 0xc2, 0x2cf: 0xc3, + 0x2d0: 0xc4, 0x2d1: 0xbe, 0x2d2: 0xbf, 0x2d3: 0xc0, 0x2d4: 0xc1, 0x2d5: 0xc2, 0x2d6: 0xc3, 0x2d7: 0xc4, + 0x2d8: 0xbe, 0x2d9: 0xbf, 0x2da: 0xc0, 0x2db: 0xc1, 0x2dc: 0xc2, 0x2dd: 0xc3, 0x2de: 0xc5, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xc6, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xc7, + 0x34b: 0xc8, 0x34d: 0xc9, + 0x368: 0xca, 0x36b: 0xcb, + 0x374: 0xcc, + 0x37a: 0xcd, 0x37d: 0xce, + // Block 0xe, offset 0x380 + 0x381: 0xcf, 0x382: 0xd0, 0x384: 0xd1, 0x385: 0xb8, 0x387: 0xd2, + 0x388: 0xd3, 0x38b: 0xd4, 0x38c: 0xd5, 0x38d: 0xd6, + 0x391: 0xd7, 0x392: 0xd8, 0x393: 0xd9, 0x396: 0xda, 0x397: 0xdb, + 0x398: 0xdc, 0x39a: 0xdd, 0x39c: 0xde, + 0x3a0: 0xdf, 0x3a4: 0xe0, 0x3a5: 0xe1, 0x3a7: 0xe2, + 0x3a8: 0xe3, 0x3a9: 0xe4, 0x3aa: 0xe5, + 0x3b0: 0xdc, 0x3b5: 0xe6, 0x3b6: 0xe7, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xe8, 0x3ec: 0xe9, + 0x3ff: 0xea, + // Block 0x10, offset 0x400 + 0x432: 0xeb, + // Block 0x11, offset 0x440 + 0x445: 0xec, 0x446: 0xed, 0x447: 0xee, + 0x449: 0xef, + 0x450: 0xf0, 0x451: 0xf1, 0x452: 0xf2, 0x453: 0xf3, 0x454: 0xf4, 0x455: 0xf5, 0x456: 0xf6, 0x457: 0xf7, + 0x458: 0xf8, 0x459: 0xf9, 0x45a: 0x4c, 0x45b: 0xfa, 0x45c: 0xfb, 0x45d: 0xfc, 0x45e: 0xfd, 0x45f: 0x4d, + // Block 0x12, offset 0x480 + 0x480: 0xfe, 0x484: 0xe9, + 0x48b: 0xff, + 0x4a3: 0x100, 0x4a5: 0x101, + 0x4b8: 0x4e, 0x4b9: 0x4f, 0x4ba: 0x50, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x51, 0x4c5: 0x102, 0x4c6: 0x103, + 0x4c8: 0x52, 0x4c9: 0x104, + 0x4ef: 0x105, + // Block 0x14, offset 0x500 + 0x520: 0x53, 0x521: 0x54, 0x522: 0x55, 0x523: 0x56, 0x524: 0x57, 0x525: 0x58, 0x526: 0x59, 0x527: 0x5a, + 0x528: 0x5b, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 170 entries, 340 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x70, 0x75, 0x77, 0x7f, 0x86, 0x89, 0x91, 0x95, 0x99, 0x9b, 0x9d, 0xa6, 0xaa, 0xb1, 0xb6, 0xb9, 0xc3, 0xc6, 0xcd, 0xd5, 0xd9, 0xdb, 0xdf, 0xe3, 0xe9, 0xfa, 0x106, 0x108, 0x10e, 0x110, 0x112, 0x114, 0x116, 0x118, 0x11a, 0x11c, 0x11f, 0x122, 0x124, 0x127, 0x12a, 0x12e, 0x134, 0x136, 0x13f, 0x141, 0x144, 0x146, 0x151, 0x15c, 0x16a, 0x178, 0x188, 0x196, 0x19d, 0x1a3, 0x1b2, 0x1b6, 0x1b8, 0x1bc, 0x1be, 0x1c1, 0x1c3, 0x1c6, 0x1c8, 0x1cb, 0x1cd, 0x1cf, 0x1d1, 0x1dd, 0x1e7, 0x1f1, 0x1f4, 0x1f8, 0x1fa, 0x1fc, 0x1fe, 0x201, 0x204, 0x206, 0x208, 0x20a, 0x20c, 0x212, 0x215, 0x21a, 0x21c, 0x223, 0x229, 0x22f, 0x237, 0x23d, 0x243, 0x249, 0x24d, 0x24f, 0x251, 0x253, 0x255, 0x25b, 0x25e, 0x260, 0x262, 0x268, 0x26b, 0x273, 0x27a, 0x27d, 0x280, 0x282, 0x285, 0x28d, 0x291, 0x298, 0x29b, 0x2a1, 0x2a3, 0x2a5, 0x2a8, 0x2aa, 0x2ad, 0x2b2, 0x2b4, 0x2b6, 0x2b8, 0x2ba, 0x2bc, 0x2bf, 0x2c1, 0x2c3, 0x2c5, 0x2c7, 0x2c9, 0x2d6, 0x2e0, 0x2e2, 0x2e4, 0x2e8, 0x2ed, 0x2f9, 0x2fe, 0x307, 0x30d, 0x312, 0x316, 0x31b, 0x31f, 0x32f, 0x33d, 0x34b, 0x359, 0x35f, 0x361, 0x363, 0x366, 0x371, 0x373, 0x37d} + +// nfkcSparseValues: 895 entries, 3580 bytes +var nfkcSparseValues = [895]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x428f, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x427b, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x4271, lo: 0xb4, hi: 0xb4}, + {value: 0x01df, lo: 0xb5, hi: 0xb5}, + {value: 0x42a8, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x2222, lo: 0xbc, hi: 0xbc}, + {value: 0x2216, lo: 0xbd, hi: 0xbd}, + {value: 0x22b8, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46f9, lo: 0xa0, hi: 0xa1}, + {value: 0x472b, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x4285, lo: 0x98, hi: 0x98}, + {value: 0x428a, lo: 0x99, hi: 0x9a}, + {value: 0x42ad, lo: 0x9b, hi: 0x9b}, + {value: 0x4276, lo: 0x9c, hi: 0x9c}, + {value: 0x4299, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x016a, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37bc, lo: 0x90, hi: 0x90}, + {value: 0x37c8, lo: 0x91, hi: 0x91}, + {value: 0x37b6, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x382e, lo: 0x97, hi: 0x97}, + {value: 0x37f8, lo: 0x9c, hi: 0x9c}, + {value: 0x37e0, lo: 0x9d, hi: 0x9d}, + {value: 0x380a, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x3834, lo: 0xb6, hi: 0xb6}, + {value: 0x383a, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8114, lo: 0x81, hi: 0x82}, + {value: 0x8133, lo: 0x84, hi: 0x84}, + {value: 0x812e, lo: 0x85, hi: 0x85}, + {value: 0x810e, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x97}, + {value: 0x811a, lo: 0x98, hi: 0x98}, + {value: 0x811b, lo: 0x99, hi: 0x99}, + {value: 0x811c, lo: 0x9a, hi: 0x9a}, + {value: 0x3858, lo: 0xa2, hi: 0xa2}, + {value: 0x385e, lo: 0xa3, hi: 0xa3}, + {value: 0x386a, lo: 0xa4, hi: 0xa4}, + {value: 0x3864, lo: 0xa5, hi: 0xa5}, + {value: 0x3870, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x3882, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x3876, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x387c, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8133, lo: 0x96, hi: 0x9c}, + {value: 0x8133, lo: 0x9f, hi: 0xa2}, + {value: 0x812e, lo: 0xa3, hi: 0xa3}, + {value: 0x8133, lo: 0xa4, hi: 0xa4}, + {value: 0x8133, lo: 0xa7, hi: 0xa8}, + {value: 0x812e, lo: 0xaa, hi: 0xaa}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x8120, lo: 0x91, hi: 0x91}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x812e, lo: 0xb1, hi: 0xb1}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb5, hi: 0xb6}, + {value: 0x812e, lo: 0xb7, hi: 0xb9}, + {value: 0x8133, lo: 0xba, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbc}, + {value: 0x8133, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbe, hi: 0xbe}, + {value: 0x8133, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8133, lo: 0x80, hi: 0x80}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x812e, lo: 0x82, hi: 0x83}, + {value: 0x812e, lo: 0x84, hi: 0x85}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x812e, lo: 0x88, hi: 0x89}, + {value: 0x8133, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0xab, hi: 0xb1}, + {value: 0x812e, lo: 0xb2, hi: 0xb2}, + {value: 0x8133, lo: 0xb3, hi: 0xb3}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x70 + {value: 0x0000, lo: 0x04}, + {value: 0x8133, lo: 0x96, hi: 0x99}, + {value: 0x8133, lo: 0x9b, hi: 0xa3}, + {value: 0x8133, lo: 0xa5, hi: 0xa7}, + {value: 0x8133, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x75 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x77 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3eef, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ef7, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3eff, lo: 0xb4, hi: 0xb4}, + {value: 0x9903, lo: 0xbc, hi: 0xbc}, + // Block 0xf, offset 0x7f + {value: 0x0008, lo: 0x06}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x91, hi: 0x91}, + {value: 0x812e, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x93, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x94}, + {value: 0x4533, lo: 0x98, hi: 0x9f}, + // Block 0x10, offset 0x86 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x11, offset 0x89 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cab, lo: 0x8b, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x4573, lo: 0x9c, hi: 0x9d}, + {value: 0x4583, lo: 0x9f, hi: 0x9f}, + {value: 0x8133, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x91 + {value: 0x0000, lo: 0x03}, + {value: 0x45ab, lo: 0xb3, hi: 0xb3}, + {value: 0x45b3, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x13, offset 0x95 + {value: 0x0008, lo: 0x03}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x458b, lo: 0x99, hi: 0x9b}, + {value: 0x45a3, lo: 0x9e, hi: 0x9e}, + // Block 0x14, offset 0x99 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + // Block 0x16, offset 0x9d + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cc3, lo: 0x88, hi: 0x88}, + {value: 0x2cbb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ccb, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45bb, lo: 0x9c, hi: 0x9c}, + {value: 0x45c3, lo: 0x9d, hi: 0x9d}, + // Block 0x17, offset 0xa6 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cd3, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x18, offset 0xaa + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cdb, lo: 0x8a, hi: 0x8a}, + {value: 0x2ceb, lo: 0x8b, hi: 0x8b}, + {value: 0x2ce3, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x19, offset 0xb1 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3f07, lo: 0x88, hi: 0x88}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x8121, lo: 0x95, hi: 0x96}, + // Block 0x1a, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0xb9 + {value: 0x0000, lo: 0x09}, + {value: 0x2cf3, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cfb, lo: 0x87, hi: 0x87}, + {value: 0x2d03, lo: 0x88, hi: 0x88}, + {value: 0x2f67, lo: 0x8a, hi: 0x8a}, + {value: 0x2def, lo: 0x8b, hi: 0x8b}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xc3 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1d, offset 0xc6 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2d0b, lo: 0x8a, hi: 0x8a}, + {value: 0x2d1b, lo: 0x8b, hi: 0x8b}, + {value: 0x2d13, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1e, offset 0xcd + {value: 0x6bdd, lo: 0x07}, + {value: 0x9905, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3f0f, lo: 0x9a, hi: 0x9a}, + {value: 0x2f6f, lo: 0x9c, hi: 0x9c}, + {value: 0x2dfa, lo: 0x9d, hi: 0x9d}, + {value: 0x2d23, lo: 0x9e, hi: 0x9f}, + // Block 0x1f, offset 0xd5 + {value: 0x0000, lo: 0x03}, + {value: 0x2627, lo: 0xb3, hi: 0xb3}, + {value: 0x8123, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x20, offset 0xd9 + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0x88, hi: 0x8b}, + // Block 0x21, offset 0xdb + {value: 0x0000, lo: 0x03}, + {value: 0x263c, lo: 0xb3, hi: 0xb3}, + {value: 0x8125, lo: 0xb8, hi: 0xb9}, + {value: 0x8105, lo: 0xba, hi: 0xba}, + // Block 0x22, offset 0xdf + {value: 0x0000, lo: 0x03}, + {value: 0x8126, lo: 0x88, hi: 0x8b}, + {value: 0x262e, lo: 0x9c, hi: 0x9c}, + {value: 0x2635, lo: 0x9d, hi: 0x9d}, + // Block 0x23, offset 0xe3 + {value: 0x0000, lo: 0x05}, + {value: 0x030e, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x98, hi: 0x99}, + {value: 0x812e, lo: 0xb5, hi: 0xb5}, + {value: 0x812e, lo: 0xb7, hi: 0xb7}, + {value: 0x812c, lo: 0xb9, hi: 0xb9}, + // Block 0x24, offset 0xe9 + {value: 0x0000, lo: 0x10}, + {value: 0x264a, lo: 0x83, hi: 0x83}, + {value: 0x2651, lo: 0x8d, hi: 0x8d}, + {value: 0x2658, lo: 0x92, hi: 0x92}, + {value: 0x265f, lo: 0x97, hi: 0x97}, + {value: 0x2666, lo: 0x9c, hi: 0x9c}, + {value: 0x2643, lo: 0xa9, hi: 0xa9}, + {value: 0x8127, lo: 0xb1, hi: 0xb1}, + {value: 0x8128, lo: 0xb2, hi: 0xb2}, + {value: 0x4a9b, lo: 0xb3, hi: 0xb3}, + {value: 0x8129, lo: 0xb4, hi: 0xb4}, + {value: 0x4aa4, lo: 0xb5, hi: 0xb5}, + {value: 0x45cb, lo: 0xb6, hi: 0xb6}, + {value: 0x460b, lo: 0xb7, hi: 0xb7}, + {value: 0x45d3, lo: 0xb8, hi: 0xb8}, + {value: 0x4616, lo: 0xb9, hi: 0xb9}, + {value: 0x8128, lo: 0xba, hi: 0xbd}, + // Block 0x25, offset 0xfa + {value: 0x0000, lo: 0x0b}, + {value: 0x8128, lo: 0x80, hi: 0x80}, + {value: 0x4aad, lo: 0x81, hi: 0x81}, + {value: 0x8133, lo: 0x82, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0x86, hi: 0x87}, + {value: 0x2674, lo: 0x93, hi: 0x93}, + {value: 0x267b, lo: 0x9d, hi: 0x9d}, + {value: 0x2682, lo: 0xa2, hi: 0xa2}, + {value: 0x2689, lo: 0xa7, hi: 0xa7}, + {value: 0x2690, lo: 0xac, hi: 0xac}, + {value: 0x266d, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x86, hi: 0x86}, + // Block 0x27, offset 0x108 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d2b, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x28, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + // Block 0x29, offset 0x110 + {value: 0x0000, lo: 0x01}, + {value: 0x0312, lo: 0xbc, hi: 0xbc}, + // Block 0x2a, offset 0x112 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x114 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x116 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x118 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x11a + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x11c + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x94, hi: 0x94}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x11f + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x92, hi: 0x92}, + {value: 0x8133, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x122 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x124 + {value: 0x0004, lo: 0x02}, + {value: 0x812f, lo: 0xb9, hi: 0xba}, + {value: 0x812e, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x127 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x97, hi: 0x97}, + {value: 0x812e, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x12a + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + {value: 0x8133, lo: 0xb5, hi: 0xbc}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x12e + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + {value: 0x812e, lo: 0xb5, hi: 0xba}, + {value: 0x8133, lo: 0xbb, hi: 0xbc}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + {value: 0x812e, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x134 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x80, hi: 0x80}, + // Block 0x37, offset 0x136 + {value: 0x0000, lo: 0x08}, + {value: 0x2d73, lo: 0x80, hi: 0x80}, + {value: 0x2d7b, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d83, lo: 0x83, hi: 0x83}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xab, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xac}, + {value: 0x8133, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x13f + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x141 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xa6, hi: 0xa6}, + {value: 0x8105, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x144 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x146 + {value: 0x0000, lo: 0x0a}, + {value: 0x8133, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812e, lo: 0x95, hi: 0x99}, + {value: 0x8133, lo: 0x9a, hi: 0x9b}, + {value: 0x812e, lo: 0x9c, hi: 0x9f}, + {value: 0x8133, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x8133, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x151 + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x15c + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043e, lo: 0x91, hi: 0x91}, + {value: 0x42b2, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1876, lo: 0xa5, hi: 0xa5}, + {value: 0x1b62, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2697, lo: 0xb3, hi: 0xb3}, + {value: 0x280b, lo: 0xb4, hi: 0xb4}, + {value: 0x269e, lo: 0xb6, hi: 0xb6}, + {value: 0x2815, lo: 0xb7, hi: 0xb7}, + {value: 0x1870, lo: 0xbc, hi: 0xbc}, + {value: 0x4280, lo: 0xbe, hi: 0xbe}, + // Block 0x3e, offset 0x16a + {value: 0x0002, lo: 0x0d}, + {value: 0x1936, lo: 0x87, hi: 0x87}, + {value: 0x1933, lo: 0x88, hi: 0x88}, + {value: 0x1873, lo: 0x89, hi: 0x89}, + {value: 0x299b, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x046a, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x3f, offset 0x178 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x046a, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x199f, lo: 0xa8, hi: 0xa8}, + // Block 0x40, offset 0x188 + {value: 0x0000, lo: 0x0d}, + {value: 0x8133, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8133, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8133, lo: 0x9b, hi: 0x9c}, + {value: 0x8133, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8133, lo: 0xa7, hi: 0xa7}, + {value: 0x812e, lo: 0xa8, hi: 0xa8}, + {value: 0x8133, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812e, lo: 0xac, hi: 0xaf}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + // Block 0x41, offset 0x196 + {value: 0x0007, lo: 0x06}, + {value: 0x2186, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bd0, lo: 0x9a, hi: 0x9b}, + {value: 0x3bde, lo: 0xae, hi: 0xae}, + // Block 0x42, offset 0x19d + {value: 0x000e, lo: 0x05}, + {value: 0x3be5, lo: 0x8d, hi: 0x8e}, + {value: 0x3bec, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x43, offset 0x1a3 + {value: 0x017a, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3bfa, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3c01, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3c08, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3c0f, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3c16, lo: 0xa6, hi: 0xa6}, + {value: 0x26a5, lo: 0xac, hi: 0xad}, + {value: 0x26ac, lo: 0xaf, hi: 0xaf}, + {value: 0x2829, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x44, offset 0x1b2 + {value: 0x0007, lo: 0x03}, + {value: 0x3c7f, lo: 0xa0, hi: 0xa1}, + {value: 0x3ca9, lo: 0xa2, hi: 0xa3}, + {value: 0x3cd3, lo: 0xaa, hi: 0xad}, + // Block 0x45, offset 0x1b6 + {value: 0x0004, lo: 0x01}, + {value: 0x048e, lo: 0xa9, hi: 0xaa}, + // Block 0x46, offset 0x1b8 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x47, offset 0x1bc + {value: 0x0000, lo: 0x01}, + {value: 0x29a8, lo: 0x8c, hi: 0x8c}, + // Block 0x48, offset 0x1be + {value: 0x0266, lo: 0x02}, + {value: 0x1b92, lo: 0xb4, hi: 0xb4}, + {value: 0x1930, lo: 0xb5, hi: 0xb6}, + // Block 0x49, offset 0x1c1 + {value: 0x0000, lo: 0x01}, + {value: 0x44f4, lo: 0x9c, hi: 0x9c}, + // Block 0x4a, offset 0x1c3 + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4b, offset 0x1c6 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xaf, hi: 0xb1}, + // Block 0x4c, offset 0x1c8 + {value: 0x0000, lo: 0x02}, + {value: 0x0482, lo: 0xaf, hi: 0xaf}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x4d, offset 0x1cb + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa0, hi: 0xbf}, + // Block 0x4e, offset 0x1cd + {value: 0x0000, lo: 0x01}, + {value: 0x0dc6, lo: 0x9f, hi: 0x9f}, + // Block 0x4f, offset 0x1cf + {value: 0x0000, lo: 0x01}, + {value: 0x1632, lo: 0xb3, hi: 0xb3}, + // Block 0x50, offset 0x1d1 + {value: 0x0004, lo: 0x0b}, + {value: 0x159a, lo: 0x80, hi: 0x82}, + {value: 0x15b2, lo: 0x83, hi: 0x83}, + {value: 0x15ca, lo: 0x84, hi: 0x85}, + {value: 0x15da, lo: 0x86, hi: 0x89}, + {value: 0x15ee, lo: 0x8a, hi: 0x8c}, + {value: 0x1602, lo: 0x8d, hi: 0x8d}, + {value: 0x160a, lo: 0x8e, hi: 0x8e}, + {value: 0x1612, lo: 0x8f, hi: 0x90}, + {value: 0x161e, lo: 0x91, hi: 0x93}, + {value: 0x162e, lo: 0x94, hi: 0x94}, + {value: 0x1636, lo: 0x95, hi: 0x95}, + // Block 0x51, offset 0x1dd + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x8134, lo: 0xac, hi: 0xac}, + {value: 0x812f, lo: 0xad, hi: 0xad}, + {value: 0x8130, lo: 0xae, hi: 0xae}, + {value: 0x8130, lo: 0xaf, hi: 0xaf}, + {value: 0x04b6, lo: 0xb6, hi: 0xb6}, + {value: 0x088a, lo: 0xb8, hi: 0xba}, + // Block 0x52, offset 0x1e7 + {value: 0x0006, lo: 0x09}, + {value: 0x0316, lo: 0xb1, hi: 0xb1}, + {value: 0x031a, lo: 0xb2, hi: 0xb2}, + {value: 0x4a52, lo: 0xb3, hi: 0xb3}, + {value: 0x031e, lo: 0xb4, hi: 0xb4}, + {value: 0x4a58, lo: 0xb5, hi: 0xb6}, + {value: 0x0322, lo: 0xb7, hi: 0xb7}, + {value: 0x0326, lo: 0xb8, hi: 0xb8}, + {value: 0x032a, lo: 0xb9, hi: 0xb9}, + {value: 0x4a64, lo: 0xba, hi: 0xbf}, + // Block 0x53, offset 0x1f1 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xaf, hi: 0xaf}, + {value: 0x8133, lo: 0xb4, hi: 0xbd}, + // Block 0x54, offset 0x1f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0212, lo: 0x9c, hi: 0x9c}, + {value: 0x0215, lo: 0x9d, hi: 0x9d}, + {value: 0x8133, lo: 0x9e, hi: 0x9f}, + // Block 0x55, offset 0x1f8 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb1}, + // Block 0x56, offset 0x1fa + {value: 0x0000, lo: 0x01}, + {value: 0x163e, lo: 0xb0, hi: 0xb0}, + // Block 0x57, offset 0x1fc + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x58, offset 0x1fe + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xac, hi: 0xac}, + // Block 0x59, offset 0x201 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x84, hi: 0x84}, + {value: 0x8133, lo: 0xa0, hi: 0xb1}, + // Block 0x5a, offset 0x204 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xab, hi: 0xad}, + // Block 0x5b, offset 0x206 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x93, hi: 0x93}, + // Block 0x5c, offset 0x208 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0xb3, hi: 0xb3}, + // Block 0x5d, offset 0x20a + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + // Block 0x5e, offset 0x20c + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0xb0, hi: 0xb0}, + {value: 0x8133, lo: 0xb2, hi: 0xb3}, + {value: 0x812e, lo: 0xb4, hi: 0xb4}, + {value: 0x8133, lo: 0xb7, hi: 0xb8}, + {value: 0x8133, lo: 0xbe, hi: 0xbf}, + // Block 0x5f, offset 0x212 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x81, hi: 0x81}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + // Block 0x60, offset 0x215 + {value: 0x0008, lo: 0x04}, + {value: 0x163a, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1646, lo: 0x9f, hi: 0x9f}, + {value: 0x015e, lo: 0xa9, hi: 0xa9}, + // Block 0x61, offset 0x21a + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xad, hi: 0xad}, + // Block 0x62, offset 0x21c + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x63, offset 0x223 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x64, offset 0x229 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x65, offset 0x22f + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x66, offset 0x237 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x67, offset 0x23d + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x68, offset 0x243 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x69, offset 0x249 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x6a, offset 0x24d + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6b, offset 0x24f + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xbd, hi: 0xbd}, + // Block 0x6c, offset 0x251 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0xa0, hi: 0xa0}, + // Block 0x6d, offset 0x253 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb6, hi: 0xba}, + // Block 0x6e, offset 0x255 + {value: 0x002d, lo: 0x05}, + {value: 0x812e, lo: 0x8d, hi: 0x8d}, + {value: 0x8133, lo: 0x8f, hi: 0x8f}, + {value: 0x8133, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x6f, offset 0x25b + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0xa5, hi: 0xa5}, + {value: 0x812e, lo: 0xa6, hi: 0xa6}, + // Block 0x70, offset 0x25e + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xa4, hi: 0xa7}, + // Block 0x71, offset 0x260 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xab, hi: 0xac}, + // Block 0x72, offset 0x262 + {value: 0x0000, lo: 0x05}, + {value: 0x812e, lo: 0x86, hi: 0x87}, + {value: 0x8133, lo: 0x88, hi: 0x8a}, + {value: 0x812e, lo: 0x8b, hi: 0x8b}, + {value: 0x8133, lo: 0x8c, hi: 0x8c}, + {value: 0x812e, lo: 0x8d, hi: 0x90}, + // Block 0x73, offset 0x268 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x86, hi: 0x86}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x26b + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x424f, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4259, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x4263, lo: 0xab, hi: 0xab}, + {value: 0x8105, lo: 0xb9, hi: 0xba}, + // Block 0x75, offset 0x273 + {value: 0x0000, lo: 0x06}, + {value: 0x8133, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d8b, lo: 0xae, hi: 0xae}, + {value: 0x2d95, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8105, lo: 0xb3, hi: 0xb4}, + // Block 0x76, offset 0x27a + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x80, hi: 0x80}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0x77, offset 0x27d + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb5, hi: 0xb5}, + {value: 0x8103, lo: 0xb6, hi: 0xb6}, + // Block 0x78, offset 0x280 + {value: 0x0002, lo: 0x01}, + {value: 0x8103, lo: 0xa9, hi: 0xaa}, + // Block 0x79, offset 0x282 + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7a, offset 0x285 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d9f, lo: 0x8b, hi: 0x8b}, + {value: 0x2da9, lo: 0x8c, hi: 0x8c}, + {value: 0x8105, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8133, lo: 0xa6, hi: 0xac}, + {value: 0x8133, lo: 0xb0, hi: 0xb4}, + // Block 0x7b, offset 0x28d + {value: 0x0000, lo: 0x03}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x86, hi: 0x86}, + {value: 0x8133, lo: 0x9e, hi: 0x9e}, + // Block 0x7c, offset 0x291 + {value: 0x6b4d, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2dbd, lo: 0xbb, hi: 0xbb}, + {value: 0x2db3, lo: 0xbc, hi: 0xbd}, + {value: 0x2dc7, lo: 0xbe, hi: 0xbe}, + // Block 0x7d, offset 0x298 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0x82, hi: 0x82}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x7e, offset 0x29b + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dd1, lo: 0xba, hi: 0xba}, + {value: 0x2ddb, lo: 0xbb, hi: 0xbb}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x7f, offset 0x2a1 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x80, hi: 0x80}, + // Block 0x80, offset 0x2a3 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x2a5 + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb6, hi: 0xb6}, + {value: 0x8103, lo: 0xb7, hi: 0xb7}, + // Block 0x82, offset 0x2a8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xab, hi: 0xab}, + // Block 0x83, offset 0x2aa + {value: 0x0000, lo: 0x02}, + {value: 0x8105, lo: 0xb9, hi: 0xb9}, + {value: 0x8103, lo: 0xba, hi: 0xba}, + // Block 0x84, offset 0x2ad + {value: 0x0000, lo: 0x04}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb5, hi: 0xb5}, + {value: 0x2de5, lo: 0xb8, hi: 0xb8}, + {value: 0x8105, lo: 0xbd, hi: 0xbe}, + // Block 0x85, offset 0x2b2 + {value: 0x0000, lo: 0x01}, + {value: 0x8103, lo: 0x83, hi: 0x83}, + // Block 0x86, offset 0x2b4 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xa0, hi: 0xa0}, + // Block 0x87, offset 0x2b6 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0xb4, hi: 0xb4}, + // Block 0x88, offset 0x2b8 + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x87, hi: 0x87}, + // Block 0x89, offset 0x2ba + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x99, hi: 0x99}, + // Block 0x8a, offset 0x2bc + {value: 0x0000, lo: 0x02}, + {value: 0x8103, lo: 0x82, hi: 0x82}, + {value: 0x8105, lo: 0x84, hi: 0x85}, + // Block 0x8b, offset 0x2bf + {value: 0x0000, lo: 0x01}, + {value: 0x8105, lo: 0x97, hi: 0x97}, + // Block 0x8c, offset 0x2c1 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x8d, offset 0x2c3 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xb0, hi: 0xb6}, + // Block 0x8e, offset 0x2c5 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb0, hi: 0xb1}, + // Block 0x8f, offset 0x2c7 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x90, offset 0x2c9 + {value: 0x0000, lo: 0x0c}, + {value: 0x45e3, lo: 0x9e, hi: 0x9e}, + {value: 0x45ed, lo: 0x9f, hi: 0x9f}, + {value: 0x4621, lo: 0xa0, hi: 0xa0}, + {value: 0x462f, lo: 0xa1, hi: 0xa1}, + {value: 0x463d, lo: 0xa2, hi: 0xa2}, + {value: 0x464b, lo: 0xa3, hi: 0xa3}, + {value: 0x4659, lo: 0xa4, hi: 0xa4}, + {value: 0x812c, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8131, lo: 0xad, hi: 0xad}, + {value: 0x812c, lo: 0xae, hi: 0xb2}, + {value: 0x812e, lo: 0xbb, hi: 0xbf}, + // Block 0x91, offset 0x2d6 + {value: 0x0000, lo: 0x09}, + {value: 0x812e, lo: 0x80, hi: 0x82}, + {value: 0x8133, lo: 0x85, hi: 0x89}, + {value: 0x812e, lo: 0x8a, hi: 0x8b}, + {value: 0x8133, lo: 0xaa, hi: 0xad}, + {value: 0x45f7, lo: 0xbb, hi: 0xbb}, + {value: 0x4601, lo: 0xbc, hi: 0xbc}, + {value: 0x4667, lo: 0xbd, hi: 0xbd}, + {value: 0x4683, lo: 0xbe, hi: 0xbe}, + {value: 0x4675, lo: 0xbf, hi: 0xbf}, + // Block 0x92, offset 0x2e0 + {value: 0x0000, lo: 0x01}, + {value: 0x4691, lo: 0x80, hi: 0x80}, + // Block 0x93, offset 0x2e2 + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0x82, hi: 0x84}, + // Block 0x94, offset 0x2e4 + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x95, offset 0x2e8 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x96, offset 0x2ed + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x97, offset 0x2f9 + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x98, offset 0x2fe + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x99, offset 0x307 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x9a, offset 0x30d + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x9b, offset 0x312 + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x9c, offset 0x316 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x9d, offset 0x31b + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x9e, offset 0x31f + {value: 0x0003, lo: 0x0f}, + {value: 0x01bb, lo: 0x80, hi: 0x80}, + {value: 0x0462, lo: 0x81, hi: 0x81}, + {value: 0x01be, lo: 0x82, hi: 0x9a}, + {value: 0x045e, lo: 0x9b, hi: 0x9b}, + {value: 0x01ca, lo: 0x9c, hi: 0x9c}, + {value: 0x01d3, lo: 0x9d, hi: 0x9d}, + {value: 0x01d9, lo: 0x9e, hi: 0x9e}, + {value: 0x01fd, lo: 0x9f, hi: 0x9f}, + {value: 0x01ee, lo: 0xa0, hi: 0xa0}, + {value: 0x01eb, lo: 0xa1, hi: 0xa1}, + {value: 0x0176, lo: 0xa2, hi: 0xb2}, + {value: 0x018b, lo: 0xb3, hi: 0xb3}, + {value: 0x01a9, lo: 0xb4, hi: 0xba}, + {value: 0x0462, lo: 0xbb, hi: 0xbb}, + {value: 0x01be, lo: 0xbc, hi: 0xbf}, + // Block 0x9f, offset 0x32f + {value: 0x0003, lo: 0x0d}, + {value: 0x01ca, lo: 0x80, hi: 0x94}, + {value: 0x045e, lo: 0x95, hi: 0x95}, + {value: 0x01ca, lo: 0x96, hi: 0x96}, + {value: 0x01d3, lo: 0x97, hi: 0x97}, + {value: 0x01d9, lo: 0x98, hi: 0x98}, + {value: 0x01fd, lo: 0x99, hi: 0x99}, + {value: 0x01ee, lo: 0x9a, hi: 0x9a}, + {value: 0x01eb, lo: 0x9b, hi: 0x9b}, + {value: 0x0176, lo: 0x9c, hi: 0xac}, + {value: 0x018b, lo: 0xad, hi: 0xad}, + {value: 0x01a9, lo: 0xae, hi: 0xb4}, + {value: 0x0462, lo: 0xb5, hi: 0xb5}, + {value: 0x01be, lo: 0xb6, hi: 0xbf}, + // Block 0xa0, offset 0x33d + {value: 0x0003, lo: 0x0d}, + {value: 0x01dc, lo: 0x80, hi: 0x8e}, + {value: 0x045e, lo: 0x8f, hi: 0x8f}, + {value: 0x01ca, lo: 0x90, hi: 0x90}, + {value: 0x01d3, lo: 0x91, hi: 0x91}, + {value: 0x01d9, lo: 0x92, hi: 0x92}, + {value: 0x01fd, lo: 0x93, hi: 0x93}, + {value: 0x01ee, lo: 0x94, hi: 0x94}, + {value: 0x01eb, lo: 0x95, hi: 0x95}, + {value: 0x0176, lo: 0x96, hi: 0xa6}, + {value: 0x018b, lo: 0xa7, hi: 0xa7}, + {value: 0x01a9, lo: 0xa8, hi: 0xae}, + {value: 0x0462, lo: 0xaf, hi: 0xaf}, + {value: 0x01be, lo: 0xb0, hi: 0xbf}, + // Block 0xa1, offset 0x34b + {value: 0x0003, lo: 0x0d}, + {value: 0x01ee, lo: 0x80, hi: 0x88}, + {value: 0x045e, lo: 0x89, hi: 0x89}, + {value: 0x01ca, lo: 0x8a, hi: 0x8a}, + {value: 0x01d3, lo: 0x8b, hi: 0x8b}, + {value: 0x01d9, lo: 0x8c, hi: 0x8c}, + {value: 0x01fd, lo: 0x8d, hi: 0x8d}, + {value: 0x01ee, lo: 0x8e, hi: 0x8e}, + {value: 0x01eb, lo: 0x8f, hi: 0x8f}, + {value: 0x0176, lo: 0x90, hi: 0xa0}, + {value: 0x018b, lo: 0xa1, hi: 0xa1}, + {value: 0x01a9, lo: 0xa2, hi: 0xa8}, + {value: 0x0462, lo: 0xa9, hi: 0xa9}, + {value: 0x01be, lo: 0xaa, hi: 0xbf}, + // Block 0xa2, offset 0x359 + {value: 0x0000, lo: 0x05}, + {value: 0x8133, lo: 0x80, hi: 0x86}, + {value: 0x8133, lo: 0x88, hi: 0x98}, + {value: 0x8133, lo: 0x9b, hi: 0xa1}, + {value: 0x8133, lo: 0xa3, hi: 0xa4}, + {value: 0x8133, lo: 0xa6, hi: 0xaa}, + // Block 0xa3, offset 0x35f + {value: 0x0000, lo: 0x01}, + {value: 0x8133, lo: 0xac, hi: 0xaf}, + // Block 0xa4, offset 0x361 + {value: 0x0000, lo: 0x01}, + {value: 0x812e, lo: 0x90, hi: 0x96}, + // Block 0xa5, offset 0x363 + {value: 0x0000, lo: 0x02}, + {value: 0x8133, lo: 0x84, hi: 0x89}, + {value: 0x8103, lo: 0x8a, hi: 0x8a}, + // Block 0xa6, offset 0x366 + {value: 0x0002, lo: 0x0a}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1954, lo: 0x8a, hi: 0x8a}, + {value: 0x1987, lo: 0x8b, hi: 0x8b}, + {value: 0x19a2, lo: 0x8c, hi: 0x8c}, + {value: 0x19a8, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc6, lo: 0x8e, hi: 0x8e}, + {value: 0x19b4, lo: 0x8f, hi: 0x8f}, + {value: 0x197e, lo: 0xaa, hi: 0xaa}, + {value: 0x1981, lo: 0xab, hi: 0xab}, + {value: 0x1984, lo: 0xac, hi: 0xac}, + // Block 0xa7, offset 0x371 + {value: 0x0000, lo: 0x01}, + {value: 0x1942, lo: 0x90, hi: 0x90}, + // Block 0xa8, offset 0x373 + {value: 0x0028, lo: 0x09}, + {value: 0x286f, lo: 0x80, hi: 0x80}, + {value: 0x2833, lo: 0x81, hi: 0x81}, + {value: 0x283d, lo: 0x82, hi: 0x82}, + {value: 0x2851, lo: 0x83, hi: 0x84}, + {value: 0x285b, lo: 0x85, hi: 0x86}, + {value: 0x2847, lo: 0x87, hi: 0x87}, + {value: 0x2865, lo: 0x88, hi: 0x88}, + {value: 0x0b72, lo: 0x90, hi: 0x90}, + {value: 0x08ea, lo: 0x91, hi: 0x91}, + // Block 0xa9, offset 0x37d + {value: 0x0002, lo: 0x01}, + {value: 0x0021, lo: 0xb0, hi: 0xb9}, +} + +// recompMap: 7528 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "\x195\x190\x00\x01\x198" + // 0x19351930: 0x00011938 + "" + // Total size of tables: 55KB (56160 bytes) diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 942906929..0175eae50 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,5 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. +//go:build !go1.10 // +build !go1.10 package norm diff --git a/vendor/golang.org/x/text/width/kind_string.go b/vendor/golang.org/x/text/width/kind_string.go new file mode 100644 index 000000000..dd3febd43 --- /dev/null +++ b/vendor/golang.org/x/text/width/kind_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=Kind"; DO NOT EDIT. + +package width + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Neutral-0] + _ = x[EastAsianAmbiguous-1] + _ = x[EastAsianWide-2] + _ = x[EastAsianNarrow-3] + _ = x[EastAsianFullwidth-4] + _ = x[EastAsianHalfwidth-5] +} + +const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth" + +var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89} + +func (i Kind) String() string { + if i < 0 || i >= Kind(len(_Kind_index)-1) { + return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] +} diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go new file mode 100644 index 000000000..186b1d4ef --- /dev/null +++ b/vendor/golang.org/x/text/width/tables10.0.0.go @@ -0,0 +1,1319 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.10 && !go1.13 +// +build go1.10,!go1.13 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c59df54630d3dc4a. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 101 blocks, 6464 entries, 12928 bytes +// The third block is the zero block. +var widthValues = [6464]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, 0x11e1: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, + // Block 0x5d, offset 0x1740 + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, + // Block 0x60, offset 0x1800 + 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, + 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, + 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, + 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, + 0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, + 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, + 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, + 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, + 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, + 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, + 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, + 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, + 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, + 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, + 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, + 0x193c: 0x2000, 0x193d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, + 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14936 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go new file mode 100644 index 000000000..990f7622f --- /dev/null +++ b/vendor/golang.org/x/text/width/tables11.0.0.go @@ -0,0 +1,1331 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.13 && !go1.14 +// +build go1.13,!go1.14 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c0f7712776e71cd4. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 101 blocks, 6464 entries, 12928 bytes +// The third block is the zero block. +var widthValues = [6464]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, 0x11e1: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, + 0x1230: 0x4000, 0x1231: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, + // Block 0x5d, offset 0x1740 + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, + // Block 0x60, offset 0x1800 + 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, + 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, + 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, + 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, + 0x184c: 0x4000, 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, + 0x1870: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, + 0x1876: 0x4000, 0x187a: 0x4000, + 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, + 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, + 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, + 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, + 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, + 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, + 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, + 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, + 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, + 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, + 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, + 0x193c: 0x2000, 0x193d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, + 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14936 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go new file mode 100644 index 000000000..85296297e --- /dev/null +++ b/vendor/golang.org/x/text/width/tables12.0.0.go @@ -0,0 +1,1351 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.14 && !go1.16 +// +build go1.14,!go1.16 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "12.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14720 bytes (14.38 KiB). Checksum: 3f4f2516ded5489b. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 104 blocks, 6656 entries, 13312 bytes +// The third block is the zero block. +var widthValues = [6656]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, + 0x1230: 0x4000, 0x1231: 0x4000, 0x1232: 0x4000, 0x1233: 0x4000, 0x1234: 0x4000, 0x1235: 0x4000, + 0x1236: 0x4000, 0x1237: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x4000, 0x12d1: 0x4000, + 0x12d2: 0x4000, + 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, + // Block 0x5d, offset 0x1740 + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, 0x17d5: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, 0x17fa: 0x4000, + // Block 0x60, offset 0x1800 + 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, + // Block 0x61, offset 0x1840 + 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, + 0x1870: 0x4000, 0x1871: 0x4000, 0x1872: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, + 0x1876: 0x4000, 0x1877: 0x4000, 0x1878: 0x4000, 0x1879: 0x4000, 0x187a: 0x4000, 0x187b: 0x4000, + 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, + 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, + 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18ba: 0x4000, 0x18bb: 0x4000, + 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, + 0x18c6: 0x4000, 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, + 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, + 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, + 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, + 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, + 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, + 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, + 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, + 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, + 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, + 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, + 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, + 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, + // Block 0x65, offset 0x1940 + 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, + 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, + // Block 0x66, offset 0x1980 + 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, + 0x1990: 0x4000, 0x1991: 0x4000, + 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x2000, 0x19c1: 0x2000, 0x19c2: 0x2000, 0x19c3: 0x2000, 0x19c4: 0x2000, 0x19c5: 0x2000, + 0x19c6: 0x2000, 0x19c7: 0x2000, 0x19c8: 0x2000, 0x19c9: 0x2000, 0x19ca: 0x2000, 0x19cb: 0x2000, + 0x19cc: 0x2000, 0x19cd: 0x2000, 0x19ce: 0x2000, 0x19cf: 0x2000, 0x19d0: 0x2000, 0x19d1: 0x2000, + 0x19d2: 0x2000, 0x19d3: 0x2000, 0x19d4: 0x2000, 0x19d5: 0x2000, 0x19d6: 0x2000, 0x19d7: 0x2000, + 0x19d8: 0x2000, 0x19d9: 0x2000, 0x19da: 0x2000, 0x19db: 0x2000, 0x19dc: 0x2000, 0x19dd: 0x2000, + 0x19de: 0x2000, 0x19df: 0x2000, 0x19e0: 0x2000, 0x19e1: 0x2000, 0x19e2: 0x2000, 0x19e3: 0x2000, + 0x19e4: 0x2000, 0x19e5: 0x2000, 0x19e6: 0x2000, 0x19e7: 0x2000, 0x19e8: 0x2000, 0x19e9: 0x2000, + 0x19ea: 0x2000, 0x19eb: 0x2000, 0x19ec: 0x2000, 0x19ed: 0x2000, 0x19ee: 0x2000, 0x19ef: 0x2000, + 0x19f0: 0x2000, 0x19f1: 0x2000, 0x19f2: 0x2000, 0x19f3: 0x2000, 0x19f4: 0x2000, 0x19f5: 0x2000, + 0x19f6: 0x2000, 0x19f7: 0x2000, 0x19f8: 0x2000, 0x19f9: 0x2000, 0x19fa: 0x2000, 0x19fb: 0x2000, + 0x19fc: 0x2000, 0x19fd: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, 0x41f: 0x5e, + 0x424: 0x5f, 0x425: 0x60, 0x426: 0x61, 0x427: 0x62, + 0x429: 0x63, 0x42a: 0x64, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x65, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 15320 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go new file mode 100644 index 000000000..bac3f1aee --- /dev/null +++ b/vendor/golang.org/x/text/width/tables13.0.0.go @@ -0,0 +1,1352 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.16 +// +build go1.16 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "13.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14848 bytes (14.50 KiB). Checksum: 17e24343536472f6. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 105 blocks, 6720 entries, 13440 bytes +// The third block is the zero block. +var widthValues = [6720]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, 0xe3b: 0x4000, + 0xe3c: 0x4000, 0xe3d: 0x4000, 0xe3e: 0x4000, 0xe3f: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, 0xf3f: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, + // Block 0x3e, offset 0xf80 + 0xfa0: 0x4000, 0xfa1: 0x4000, 0xfa2: 0x4000, 0xfa3: 0x4000, + 0xfa4: 0x4000, 0xfa5: 0x4000, 0xfa6: 0x4000, 0xfa7: 0x4000, 0xfa8: 0x4000, 0xfa9: 0x4000, + 0xfaa: 0x4000, 0xfab: 0x4000, 0xfac: 0x4000, 0xfad: 0x4000, 0xfae: 0x4000, 0xfaf: 0x4000, + 0xfb0: 0x4000, 0xfb1: 0x4000, 0xfb2: 0x4000, 0xfb3: 0x4000, 0xfb4: 0x4000, 0xfb5: 0x4000, + 0xfb6: 0x4000, 0xfb7: 0x4000, 0xfb8: 0x4000, 0xfb9: 0x4000, 0xfba: 0x4000, 0xfbb: 0x4000, + 0xfbc: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x4000, 0xfc1: 0x4000, 0xfc2: 0x4000, 0xfc3: 0x4000, 0xfc4: 0x4000, 0xfc5: 0x4000, + 0xfc6: 0x4000, 0xfc7: 0x4000, 0xfc8: 0x4000, 0xfc9: 0x4000, 0xfca: 0x4000, 0xfcb: 0x4000, + 0xfcc: 0x4000, 0xfcd: 0x4000, 0xfce: 0x4000, 0xfcf: 0x4000, 0xfd0: 0x4000, 0xfd1: 0x4000, + 0xfd2: 0x4000, 0xfd3: 0x4000, 0xfd4: 0x4000, 0xfd5: 0x4000, 0xfd6: 0x4000, 0xfd7: 0x4000, + 0xfd8: 0x4000, 0xfd9: 0x4000, 0xfda: 0x4000, 0xfdb: 0x4000, 0xfdc: 0x4000, 0xfdd: 0x4000, + 0xfde: 0x4000, 0xfdf: 0x4000, 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x2000, 0x1001: 0x2000, 0x1002: 0x2000, 0x1003: 0x2000, 0x1004: 0x2000, 0x1005: 0x2000, + 0x1006: 0x2000, 0x1007: 0x2000, 0x1008: 0x2000, 0x1009: 0x2000, 0x100a: 0x2000, 0x100b: 0x2000, + 0x100c: 0x2000, 0x100d: 0x2000, 0x100e: 0x2000, 0x100f: 0x2000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, + 0x1030: 0x4000, 0x1031: 0x4000, 0x1032: 0x4000, 0x1033: 0x4000, 0x1034: 0x4000, 0x1035: 0x4000, + 0x1036: 0x4000, 0x1037: 0x4000, 0x1038: 0x4000, 0x1039: 0x4000, 0x103a: 0x4000, 0x103b: 0x4000, + 0x103c: 0x4000, 0x103d: 0x4000, 0x103e: 0x4000, 0x103f: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x4000, 0x1041: 0x4000, 0x1042: 0x4000, 0x1043: 0x4000, 0x1044: 0x4000, 0x1045: 0x4000, + 0x1046: 0x4000, 0x1047: 0x4000, 0x1048: 0x4000, 0x1049: 0x4000, 0x104a: 0x4000, 0x104b: 0x4000, + 0x104c: 0x4000, 0x104d: 0x4000, 0x104e: 0x4000, 0x104f: 0x4000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, 0x105a: 0x4000, 0x105b: 0x4000, 0x105c: 0x4000, 0x105d: 0x4000, + 0x105e: 0x4000, 0x105f: 0x4000, 0x1060: 0x4000, 0x1061: 0x4000, 0x1062: 0x4000, 0x1063: 0x4000, + 0x1064: 0x4000, 0x1065: 0x4000, 0x1066: 0x4000, 0x1068: 0x4000, 0x1069: 0x4000, + 0x106a: 0x4000, 0x106b: 0x4000, + // Block 0x42, offset 0x1080 + 0x1081: 0x9012, 0x1082: 0x9012, 0x1083: 0x9012, 0x1084: 0x9012, 0x1085: 0x9012, + 0x1086: 0x9012, 0x1087: 0x9012, 0x1088: 0x9012, 0x1089: 0x9012, 0x108a: 0x9012, 0x108b: 0x9012, + 0x108c: 0x9012, 0x108d: 0x9012, 0x108e: 0x9012, 0x108f: 0x9012, 0x1090: 0x9012, 0x1091: 0x9012, + 0x1092: 0x9012, 0x1093: 0x9012, 0x1094: 0x9012, 0x1095: 0x9012, 0x1096: 0x9012, 0x1097: 0x9012, + 0x1098: 0x9012, 0x1099: 0x9012, 0x109a: 0x9012, 0x109b: 0x9012, 0x109c: 0x9012, 0x109d: 0x9012, + 0x109e: 0x9012, 0x109f: 0x9012, 0x10a0: 0x9049, 0x10a1: 0x9049, 0x10a2: 0x9049, 0x10a3: 0x9049, + 0x10a4: 0x9049, 0x10a5: 0x9049, 0x10a6: 0x9049, 0x10a7: 0x9049, 0x10a8: 0x9049, 0x10a9: 0x9049, + 0x10aa: 0x9049, 0x10ab: 0x9049, 0x10ac: 0x9049, 0x10ad: 0x9049, 0x10ae: 0x9049, 0x10af: 0x9049, + 0x10b0: 0x9049, 0x10b1: 0x9049, 0x10b2: 0x9049, 0x10b3: 0x9049, 0x10b4: 0x9049, 0x10b5: 0x9049, + 0x10b6: 0x9049, 0x10b7: 0x9049, 0x10b8: 0x9049, 0x10b9: 0x9049, 0x10ba: 0x9049, 0x10bb: 0x9049, + 0x10bc: 0x9049, 0x10bd: 0x9049, 0x10be: 0x9049, 0x10bf: 0x9049, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x9049, 0x10c1: 0x9049, 0x10c2: 0x9049, 0x10c3: 0x9049, 0x10c4: 0x9049, 0x10c5: 0x9049, + 0x10c6: 0x9049, 0x10c7: 0x9049, 0x10c8: 0x9049, 0x10c9: 0x9049, 0x10ca: 0x9049, 0x10cb: 0x9049, + 0x10cc: 0x9049, 0x10cd: 0x9049, 0x10ce: 0x9049, 0x10cf: 0x9049, 0x10d0: 0x9049, 0x10d1: 0x9049, + 0x10d2: 0x9049, 0x10d3: 0x9049, 0x10d4: 0x9049, 0x10d5: 0x9049, 0x10d6: 0x9049, 0x10d7: 0x9049, + 0x10d8: 0x9049, 0x10d9: 0x9049, 0x10da: 0x9049, 0x10db: 0x9049, 0x10dc: 0x9049, 0x10dd: 0x9049, + 0x10de: 0x9049, 0x10df: 0x904a, 0x10e0: 0x904b, 0x10e1: 0xb04c, 0x10e2: 0xb04d, 0x10e3: 0xb04d, + 0x10e4: 0xb04e, 0x10e5: 0xb04f, 0x10e6: 0xb050, 0x10e7: 0xb051, 0x10e8: 0xb052, 0x10e9: 0xb053, + 0x10ea: 0xb054, 0x10eb: 0xb055, 0x10ec: 0xb056, 0x10ed: 0xb057, 0x10ee: 0xb058, 0x10ef: 0xb059, + 0x10f0: 0xb05a, 0x10f1: 0xb05b, 0x10f2: 0xb05c, 0x10f3: 0xb05d, 0x10f4: 0xb05e, 0x10f5: 0xb05f, + 0x10f6: 0xb060, 0x10f7: 0xb061, 0x10f8: 0xb062, 0x10f9: 0xb063, 0x10fa: 0xb064, 0x10fb: 0xb065, + 0x10fc: 0xb052, 0x10fd: 0xb066, 0x10fe: 0xb067, 0x10ff: 0xb055, + // Block 0x44, offset 0x1100 + 0x1100: 0xb068, 0x1101: 0xb069, 0x1102: 0xb06a, 0x1103: 0xb06b, 0x1104: 0xb05a, 0x1105: 0xb056, + 0x1106: 0xb06c, 0x1107: 0xb06d, 0x1108: 0xb06b, 0x1109: 0xb06e, 0x110a: 0xb06b, 0x110b: 0xb06f, + 0x110c: 0xb06f, 0x110d: 0xb070, 0x110e: 0xb070, 0x110f: 0xb071, 0x1110: 0xb056, 0x1111: 0xb072, + 0x1112: 0xb073, 0x1113: 0xb072, 0x1114: 0xb074, 0x1115: 0xb073, 0x1116: 0xb075, 0x1117: 0xb075, + 0x1118: 0xb076, 0x1119: 0xb076, 0x111a: 0xb077, 0x111b: 0xb077, 0x111c: 0xb073, 0x111d: 0xb078, + 0x111e: 0xb079, 0x111f: 0xb067, 0x1120: 0xb07a, 0x1121: 0xb07b, 0x1122: 0xb07b, 0x1123: 0xb07b, + 0x1124: 0xb07b, 0x1125: 0xb07b, 0x1126: 0xb07b, 0x1127: 0xb07b, 0x1128: 0xb07b, 0x1129: 0xb07b, + 0x112a: 0xb07b, 0x112b: 0xb07b, 0x112c: 0xb07b, 0x112d: 0xb07b, 0x112e: 0xb07b, 0x112f: 0xb07b, + 0x1130: 0xb07c, 0x1131: 0xb07c, 0x1132: 0xb07c, 0x1133: 0xb07c, 0x1134: 0xb07c, 0x1135: 0xb07c, + 0x1136: 0xb07c, 0x1137: 0xb07c, 0x1138: 0xb07c, 0x1139: 0xb07c, 0x113a: 0xb07c, 0x113b: 0xb07c, + 0x113c: 0xb07c, 0x113d: 0xb07c, 0x113e: 0xb07c, + // Block 0x45, offset 0x1140 + 0x1142: 0xb07d, 0x1143: 0xb07e, 0x1144: 0xb07f, 0x1145: 0xb080, + 0x1146: 0xb07f, 0x1147: 0xb07e, 0x114a: 0xb081, 0x114b: 0xb082, + 0x114c: 0xb083, 0x114d: 0xb07f, 0x114e: 0xb080, 0x114f: 0xb07f, + 0x1152: 0xb084, 0x1153: 0xb085, 0x1154: 0xb084, 0x1155: 0xb086, 0x1156: 0xb084, 0x1157: 0xb087, + 0x115a: 0xb088, 0x115b: 0xb089, 0x115c: 0xb08a, + 0x1160: 0x908b, 0x1161: 0x908b, 0x1162: 0x908c, 0x1163: 0x908d, + 0x1164: 0x908b, 0x1165: 0x908e, 0x1166: 0x908f, 0x1168: 0xb090, 0x1169: 0xb091, + 0x116a: 0xb092, 0x116b: 0xb091, 0x116c: 0xb093, 0x116d: 0xb094, 0x116e: 0xb095, + 0x117d: 0x2000, + // Block 0x46, offset 0x1180 + 0x11a0: 0x4000, 0x11a1: 0x4000, 0x11a2: 0x4000, 0x11a3: 0x4000, + 0x11a4: 0x4000, + 0x11b0: 0x4000, 0x11b1: 0x4000, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x4000, 0x11c1: 0x4000, 0x11c2: 0x4000, 0x11c3: 0x4000, 0x11c4: 0x4000, 0x11c5: 0x4000, + 0x11c6: 0x4000, 0x11c7: 0x4000, 0x11c8: 0x4000, 0x11c9: 0x4000, 0x11ca: 0x4000, 0x11cb: 0x4000, + 0x11cc: 0x4000, 0x11cd: 0x4000, 0x11ce: 0x4000, 0x11cf: 0x4000, 0x11d0: 0x4000, 0x11d1: 0x4000, + 0x11d2: 0x4000, 0x11d3: 0x4000, 0x11d4: 0x4000, 0x11d5: 0x4000, 0x11d6: 0x4000, 0x11d7: 0x4000, + 0x11d8: 0x4000, 0x11d9: 0x4000, 0x11da: 0x4000, 0x11db: 0x4000, 0x11dc: 0x4000, 0x11dd: 0x4000, + 0x11de: 0x4000, 0x11df: 0x4000, 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, + 0x11e4: 0x4000, 0x11e5: 0x4000, 0x11e6: 0x4000, 0x11e7: 0x4000, 0x11e8: 0x4000, 0x11e9: 0x4000, + 0x11ea: 0x4000, 0x11eb: 0x4000, 0x11ec: 0x4000, 0x11ed: 0x4000, 0x11ee: 0x4000, 0x11ef: 0x4000, + 0x11f0: 0x4000, 0x11f1: 0x4000, 0x11f2: 0x4000, 0x11f3: 0x4000, 0x11f4: 0x4000, 0x11f5: 0x4000, + 0x11f6: 0x4000, 0x11f7: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x4000, 0x12d1: 0x4000, + 0x12d2: 0x4000, + 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167d: 0x4000, 0x167e: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bf: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x4000, 0x16c1: 0x4000, 0x16c2: 0x4000, 0x16c3: 0x4000, 0x16c4: 0x4000, 0x16c5: 0x4000, + 0x16c6: 0x4000, 0x16c7: 0x4000, 0x16c8: 0x4000, 0x16c9: 0x4000, 0x16ca: 0x4000, 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16cf: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, 0x16e8: 0x4000, 0x16e9: 0x4000, + 0x16ea: 0x4000, 0x16eb: 0x4000, 0x16ec: 0x4000, 0x16ed: 0x4000, 0x16ee: 0x4000, 0x16ef: 0x4000, + 0x16f0: 0x4000, 0x16f1: 0x4000, 0x16f2: 0x4000, 0x16f3: 0x4000, 0x16f4: 0x4000, 0x16f5: 0x4000, + 0x16f6: 0x4000, 0x16f7: 0x4000, 0x16f8: 0x4000, 0x16f9: 0x4000, 0x16fa: 0x4000, 0x16fb: 0x4000, + 0x16fc: 0x4000, 0x16fd: 0x4000, + // Block 0x5c, offset 0x1700 + 0x170b: 0x4000, + 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x1710: 0x4000, 0x1711: 0x4000, + 0x1712: 0x4000, 0x1713: 0x4000, 0x1714: 0x4000, 0x1715: 0x4000, 0x1716: 0x4000, 0x1717: 0x4000, + 0x1718: 0x4000, 0x1719: 0x4000, 0x171a: 0x4000, 0x171b: 0x4000, 0x171c: 0x4000, 0x171d: 0x4000, + 0x171e: 0x4000, 0x171f: 0x4000, 0x1720: 0x4000, 0x1721: 0x4000, 0x1722: 0x4000, 0x1723: 0x4000, + 0x1724: 0x4000, 0x1725: 0x4000, 0x1726: 0x4000, 0x1727: 0x4000, + 0x173a: 0x4000, + // Block 0x5d, offset 0x1740 + 0x1755: 0x4000, 0x1756: 0x4000, + 0x1764: 0x4000, + // Block 0x5e, offset 0x1780 + 0x17bb: 0x4000, + 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, 0x17bf: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, + 0x17cc: 0x4000, 0x17cd: 0x4000, 0x17ce: 0x4000, 0x17cf: 0x4000, + // Block 0x60, offset 0x1800 + 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, + 0x180c: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x182b: 0x4000, 0x182c: 0x4000, + 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, + // Block 0x61, offset 0x1840 + 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, + // Block 0x62, offset 0x1880 + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, + 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, + 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, 0x18ba: 0x4000, + 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, + 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, + 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, + 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, 0x190b: 0x4000, + 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, + 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, + 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, + 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, + 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, + 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, + 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, + 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, + 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, + // Block 0x65, offset 0x1940 + 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, 0x1974: 0x4000, + 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, + // Block 0x66, offset 0x1980 + 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, 0x1983: 0x4000, 0x1984: 0x4000, 0x1985: 0x4000, + 0x1986: 0x4000, + 0x1990: 0x4000, 0x1991: 0x4000, + 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, 0x1996: 0x4000, 0x1997: 0x4000, + 0x1998: 0x4000, 0x1999: 0x4000, 0x199a: 0x4000, 0x199b: 0x4000, 0x199c: 0x4000, 0x199d: 0x4000, + 0x199e: 0x4000, 0x199f: 0x4000, 0x19a0: 0x4000, 0x19a1: 0x4000, 0x19a2: 0x4000, 0x19a3: 0x4000, + 0x19a4: 0x4000, 0x19a5: 0x4000, 0x19a6: 0x4000, 0x19a7: 0x4000, 0x19a8: 0x4000, + 0x19b0: 0x4000, 0x19b1: 0x4000, 0x19b2: 0x4000, 0x19b3: 0x4000, 0x19b4: 0x4000, 0x19b5: 0x4000, + 0x19b6: 0x4000, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x4000, 0x19c1: 0x4000, 0x19c2: 0x4000, + 0x19d0: 0x4000, 0x19d1: 0x4000, + 0x19d2: 0x4000, 0x19d3: 0x4000, 0x19d4: 0x4000, 0x19d5: 0x4000, 0x19d6: 0x4000, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x2000, 0x1a01: 0x2000, 0x1a02: 0x2000, 0x1a03: 0x2000, 0x1a04: 0x2000, 0x1a05: 0x2000, + 0x1a06: 0x2000, 0x1a07: 0x2000, 0x1a08: 0x2000, 0x1a09: 0x2000, 0x1a0a: 0x2000, 0x1a0b: 0x2000, + 0x1a0c: 0x2000, 0x1a0d: 0x2000, 0x1a0e: 0x2000, 0x1a0f: 0x2000, 0x1a10: 0x2000, 0x1a11: 0x2000, + 0x1a12: 0x2000, 0x1a13: 0x2000, 0x1a14: 0x2000, 0x1a15: 0x2000, 0x1a16: 0x2000, 0x1a17: 0x2000, + 0x1a18: 0x2000, 0x1a19: 0x2000, 0x1a1a: 0x2000, 0x1a1b: 0x2000, 0x1a1c: 0x2000, 0x1a1d: 0x2000, + 0x1a1e: 0x2000, 0x1a1f: 0x2000, 0x1a20: 0x2000, 0x1a21: 0x2000, 0x1a22: 0x2000, 0x1a23: 0x2000, + 0x1a24: 0x2000, 0x1a25: 0x2000, 0x1a26: 0x2000, 0x1a27: 0x2000, 0x1a28: 0x2000, 0x1a29: 0x2000, + 0x1a2a: 0x2000, 0x1a2b: 0x2000, 0x1a2c: 0x2000, 0x1a2d: 0x2000, 0x1a2e: 0x2000, 0x1a2f: 0x2000, + 0x1a30: 0x2000, 0x1a31: 0x2000, 0x1a32: 0x2000, 0x1a33: 0x2000, 0x1a34: 0x2000, 0x1a35: 0x2000, + 0x1a36: 0x2000, 0x1a37: 0x2000, 0x1a38: 0x2000, 0x1a39: 0x2000, 0x1a3a: 0x2000, 0x1a3b: 0x2000, + 0x1a3c: 0x2000, 0x1a3d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x0e, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3a, 0x253: 0x3b, + 0x265: 0x3c, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3e, 0x339: 0x3f, 0x33c: 0x40, 0x33d: 0x41, 0x33e: 0x42, 0x33f: 0x43, + // Block 0xd, offset 0x340 + 0x37f: 0x44, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x45, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x0e, 0x3ac: 0x0e, 0x3ad: 0x0e, 0x3ae: 0x0e, 0x3af: 0x0e, + 0x3b0: 0x0e, 0x3b1: 0x0e, 0x3b2: 0x0e, 0x3b3: 0x46, 0x3b4: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x56, 0x411: 0x57, 0x412: 0x0e, 0x413: 0x58, 0x414: 0x59, 0x415: 0x5a, 0x416: 0x5b, 0x417: 0x5c, + 0x418: 0x0e, 0x419: 0x5d, 0x41a: 0x0e, 0x41b: 0x5e, 0x41f: 0x5f, + 0x424: 0x60, 0x425: 0x61, 0x426: 0x0e, 0x427: 0x62, + 0x429: 0x63, 0x42a: 0x64, 0x42b: 0x65, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x66, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 15448 bytes (15KiB) diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go new file mode 100644 index 000000000..b3db84f6f --- /dev/null +++ b/vendor/golang.org/x/text/width/tables9.0.0.go @@ -0,0 +1,1287 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build !go1.10 +// +build !go1.10 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 99 blocks, 6336 entries, 12672 bytes +// The third block is the zero block. +var widthValues = [6336]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12c4: 0x4000, + // Block 0x4c, offset 0x1300 + 0x130f: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000, + 0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000, + 0x1350: 0x2000, 0x1351: 0x2000, + 0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000, + 0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000, + 0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000, + 0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000, + 0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000, + 0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000, + 0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000, + 0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000, + // Block 0x4e, offset 0x1380 + 0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000, + 0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000, + 0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000, + 0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000, + 0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000, + 0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000, + 0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000, + 0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000, + 0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000, + 0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000, + 0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000, + 0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000, + 0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000, + 0x1410: 0x4000, 0x1411: 0x4000, + 0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000, + 0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000, + 0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000, + 0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000, + 0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000, + 0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000, + 0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000, + // Block 0x51, offset 0x1440 + 0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000, + 0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000, + 0x1450: 0x4000, 0x1451: 0x4000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000, + 0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000, + 0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, + 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + 0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000, + 0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, + 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, + 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, + 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, + 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, + 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, + 0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, + 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, + 0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000, + 0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, + 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, + 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1574: 0x4000, + 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, + 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, + 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, + 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000, + 0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000, + 0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, + 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, + // Block 0x59, offset 0x1640 + 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, + 0x167a: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1695: 0x4000, 0x1696: 0x4000, + 0x16a4: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16fb: 0x4000, + 0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, + 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, + 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, + // Block 0x5d, offset 0x1740 + 0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000, + 0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, + 0x1752: 0x4000, + 0x176b: 0x4000, 0x176c: 0x4000, + 0x1774: 0x4000, 0x1775: 0x4000, + 0x1776: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1790: 0x4000, 0x1791: 0x4000, + 0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000, + 0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000, + 0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000, + 0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000, + 0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000, + 0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000, + 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, + 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000, + 0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000, + 0x17de: 0x4000, + // Block 0x60, offset 0x1800 + 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, + 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, + 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000, + 0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000, + 0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000, + 0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000, + 0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000, + 0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000, + 0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000, + 0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000, + 0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000, + 0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000, + 0x18bc: 0x2000, 0x18bd: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x48, + // Block 0x10, offset 0x400 + 0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d, + 0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53, + 0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59, + 0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b, + 0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14680 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/transform.go b/vendor/golang.org/x/text/width/transform.go new file mode 100644 index 000000000..0049f700a --- /dev/null +++ b/vendor/golang.org/x/text/width/transform.go @@ -0,0 +1,239 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package width + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +type foldTransform struct { + transform.NopResetter +} + +func (foldTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + if src[n] < utf8.RuneSelf { + // ASCII fast path. + for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { + } + continue + } + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if elem(v)&tagNeedsFold != 0 { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (foldTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + if src[nSrc] < utf8.RuneSelf { + // ASCII fast path. + start, end := nSrc, len(src) + if d := len(dst) - nDst; d < end-start { + end = nSrc + d + } + for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { + } + n := copy(dst[nDst:], src[start:nSrc]) + if nDst += n; nDst == len(dst) { + nSrc = start + n + if nSrc == len(src) { + return nDst, nSrc, nil + } + if src[nSrc] < utf8.RuneSelf { + return nDst, nSrc, transform.ErrShortDst + } + } + continue + } + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if elem(v)&tagNeedsFold == 0 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} + +type narrowTransform struct { + transform.NopResetter +} + +func (narrowTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + if src[n] < utf8.RuneSelf { + // ASCII fast path. + for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { + } + continue + } + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { + } else { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (narrowTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + if src[nSrc] < utf8.RuneSelf { + // ASCII fast path. + start, end := nSrc, len(src) + if d := len(dst) - nDst; d < end-start { + end = nSrc + d + } + for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { + } + n := copy(dst[nDst:], src[start:nSrc]) + if nDst += n; nDst == len(dst) { + nSrc = start + n + if nSrc == len(src) { + return nDst, nSrc, nil + } + if src[nSrc] < utf8.RuneSelf { + return nDst, nSrc, transform.ErrShortDst + } + } + continue + } + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} + +type wideTransform struct { + transform.NopResetter +} + +func (wideTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // TODO: Consider ASCII fast path. Special-casing ASCII handling can + // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably + // not enough to warrant the extra code and complexity. + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { + } else { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (wideTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + // TODO: Consider ASCII fast path. Special-casing ASCII handling can + // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably + // not enough to warrant the extra code and complexity. + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} diff --git a/vendor/golang.org/x/text/width/trieval.go b/vendor/golang.org/x/text/width/trieval.go new file mode 100644 index 000000000..ca8e45fd1 --- /dev/null +++ b/vendor/golang.org/x/text/width/trieval.go @@ -0,0 +1,30 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package width + +// elem is an entry of the width trie. The high byte is used to encode the type +// of the rune. The low byte is used to store the index to a mapping entry in +// the inverseData array. +type elem uint16 + +const ( + tagNeutral elem = iota << typeShift + tagAmbiguous + tagWide + tagNarrow + tagFullwidth + tagHalfwidth +) + +const ( + numTypeBits = 3 + typeShift = 16 - numTypeBits + + // tagNeedsFold is true for all fullwidth and halfwidth runes except for + // the Won sign U+20A9. + tagNeedsFold = 0x1000 + + // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide + // variant. + wonSign rune = 0x20A9 +) diff --git a/vendor/golang.org/x/text/width/width.go b/vendor/golang.org/x/text/width/width.go new file mode 100644 index 000000000..29c7509be --- /dev/null +++ b/vendor/golang.org/x/text/width/width.go @@ -0,0 +1,206 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate stringer -type=Kind +//go:generate go run gen.go gen_common.go gen_trieval.go + +// Package width provides functionality for handling different widths in text. +// +// Wide characters behave like ideographs; they tend to allow line breaks after +// each character and remain upright in vertical text layout. Narrow characters +// are kept together in words or runs that are rotated sideways in vertical text +// layout. +// +// For more information, see https://unicode.org/reports/tr11/. +package width // import "golang.org/x/text/width" + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// TODO +// 1) Reduce table size by compressing blocks. +// 2) API proposition for computing display length +// (approximation, fixed pitch only). +// 3) Implement display length. + +// Kind indicates the type of width property as defined in https://unicode.org/reports/tr11/. +type Kind int + +const ( + // Neutral characters do not occur in legacy East Asian character sets. + Neutral Kind = iota + + // EastAsianAmbiguous characters that can be sometimes wide and sometimes + // narrow and require additional information not contained in the character + // code to further resolve their width. + EastAsianAmbiguous + + // EastAsianWide characters are wide in its usual form. They occur only in + // the context of East Asian typography. These runes may have explicit + // halfwidth counterparts. + EastAsianWide + + // EastAsianNarrow characters are narrow in its usual form. They often have + // fullwidth counterparts. + EastAsianNarrow + + // Note: there exist Narrow runes that do not have fullwidth or wide + // counterparts, despite what the definition says (e.g. U+27E6). + + // EastAsianFullwidth characters have a compatibility decompositions of type + // wide that map to a narrow counterpart. + EastAsianFullwidth + + // EastAsianHalfwidth characters have a compatibility decomposition of type + // narrow that map to a wide or ambiguous counterpart, plus U+20A9 ₩ WON + // SIGN. + EastAsianHalfwidth + + // Note: there exist runes that have a halfwidth counterparts but that are + // classified as Ambiguous, rather than wide (e.g. U+2190). +) + +// TODO: the generated tries need to return size 1 for invalid runes for the +// width to be computed correctly (each byte should render width 1) + +var trie = newWidthTrie(0) + +// Lookup reports the Properties of the first rune in b and the number of bytes +// of its UTF-8 encoding. +func Lookup(b []byte) (p Properties, size int) { + v, sz := trie.lookup(b) + return Properties{elem(v), b[sz-1]}, sz +} + +// LookupString reports the Properties of the first rune in s and the number of +// bytes of its UTF-8 encoding. +func LookupString(s string) (p Properties, size int) { + v, sz := trie.lookupString(s) + return Properties{elem(v), s[sz-1]}, sz +} + +// LookupRune reports the Properties of rune r. +func LookupRune(r rune) Properties { + var buf [4]byte + n := utf8.EncodeRune(buf[:], r) + v, _ := trie.lookup(buf[:n]) + last := byte(r) + if r >= utf8.RuneSelf { + last = 0x80 + byte(r&0x3f) + } + return Properties{elem(v), last} +} + +// Properties provides access to width properties of a rune. +type Properties struct { + elem elem + last byte +} + +func (e elem) kind() Kind { + return Kind(e >> typeShift) +} + +// Kind returns the Kind of a rune as defined in Unicode TR #11. +// See https://unicode.org/reports/tr11/ for more details. +func (p Properties) Kind() Kind { + return p.elem.kind() +} + +// Folded returns the folded variant of a rune or 0 if the rune is canonical. +func (p Properties) Folded() rune { + if p.elem&tagNeedsFold != 0 { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// Narrow returns the narrow variant of a rune or 0 if the rune is already +// narrow or doesn't have a narrow variant. +func (p Properties) Narrow() rune { + if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianFullwidth || k == EastAsianWide || k == EastAsianAmbiguous) { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// Wide returns the wide variant of a rune or 0 if the rune is already +// wide or doesn't have a wide variant. +func (p Properties) Wide() rune { + if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianHalfwidth || k == EastAsianNarrow) { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// TODO for Properties: +// - Add Fullwidth/Halfwidth or Inverted methods for computing variants +// mapping. +// - Add width information (including information on non-spacing runes). + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.SpanningTransformer +} + +// Reset implements the transform.Transformer interface. +func (t Transformer) Reset() { t.t.Reset() } + +// Transform implements the transform.Transformer interface. +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +// Span implements the transform.SpanningTransformer interface. +func (t Transformer) Span(src []byte, atEOF bool) (n int, err error) { + return t.t.Span(src, atEOF) +} + +// Bytes returns a new byte slice with the result of applying t to b. +func (t Transformer) Bytes(b []byte) []byte { + b, _, _ = transform.Bytes(t, b) + return b +} + +// String returns a string with the result of applying t to s. +func (t Transformer) String(s string) string { + s, _, _ = transform.String(t, s) + return s +} + +var ( + // Fold is a transform that maps all runes to their canonical width. + // + // Note that the NFKC and NFKD transforms in golang.org/x/text/unicode/norm + // provide a more generic folding mechanism. + Fold Transformer = Transformer{foldTransform{}} + + // Widen is a transform that maps runes to their wide variant, if + // available. + Widen Transformer = Transformer{wideTransform{}} + + // Narrow is a transform that maps runes to their narrow variant, if + // available. + Narrow Transformer = Transformer{narrowTransform{}} +) + +// TODO: Consider the following options: +// - Treat Ambiguous runes that have a halfwidth counterpart as wide, or some +// generalized variant of this. +// - Consider a wide Won character to be the default width (or some generalized +// variant of this). +// - Filter the set of characters that gets converted (the preferred approach is +// to allow applying filters to transforms). diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go new file mode 100644 index 000000000..d11505a16 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -0,0 +1,242 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysis + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + + "golang.org/x/tools/internal/analysisinternal" +) + +// An Analyzer describes an analysis function and its options. +type Analyzer struct { + // The Name of the analyzer must be a valid Go identifier + // as it may appear in command-line flags, URLs, and so on. + Name string + + // Doc is the documentation for the analyzer. + // The part before the first "\n\n" is the title + // (no capital or period, max ~60 letters). + Doc string + + // Flags defines any flags accepted by the analyzer. + // The manner in which these flags are exposed to the user + // depends on the driver which runs the analyzer. + Flags flag.FlagSet + + // Run applies the analyzer to a package. + // It returns an error if the analyzer failed. + // + // On success, the Run function may return a result + // computed by the Analyzer; its type must match ResultType. + // The driver makes this result available as an input to + // another Analyzer that depends directly on this one (see + // Requires) when it analyzes the same package. + // + // To pass analysis results between packages (and thus + // potentially between address spaces), use Facts, which are + // serializable. + Run func(*Pass) (interface{}, error) + + // RunDespiteErrors allows the driver to invoke + // the Run method of this analyzer even on a + // package that contains parse or type errors. + RunDespiteErrors bool + + // Requires is a set of analyzers that must run successfully + // before this one on a given package. This analyzer may inspect + // the outputs produced by each analyzer in Requires. + // The graph over analyzers implied by Requires edges must be acyclic. + // + // Requires establishes a "horizontal" dependency between + // analysis passes (different analyzers, same package). + Requires []*Analyzer + + // ResultType is the type of the optional result of the Run function. + ResultType reflect.Type + + // FactTypes indicates that this analyzer imports and exports + // Facts of the specified concrete types. + // An analyzer that uses facts may assume that its import + // dependencies have been similarly analyzed before it runs. + // Facts must be pointers. + // + // FactTypes establishes a "vertical" dependency between + // analysis passes (same analyzer, different packages). + FactTypes []Fact +} + +func (a *Analyzer) String() string { return a.Name } + +func init() { + // Set the analysisinternal functions to be able to pass type errors + // to the Pass type without modifying the go/analysis API. + analysisinternal.SetTypeErrors = func(p interface{}, errors []types.Error) { + p.(*Pass).typeErrors = errors + } + analysisinternal.GetTypeErrors = func(p interface{}) []types.Error { + return p.(*Pass).typeErrors + } +} + +// A Pass provides information to the Run function that +// applies a specific analyzer to a single Go package. +// +// It forms the interface between the analysis logic and the driver +// program, and has both input and an output components. +// +// As in a compiler, one pass may depend on the result computed by another. +// +// The Run function should not call any of the Pass functions concurrently. +type Pass struct { + Analyzer *Analyzer // the identity of the current analyzer + + // syntax and type information + Fset *token.FileSet // file position information + Files []*ast.File // the abstract syntax tree of each file + OtherFiles []string // names of non-Go files of this package + IgnoredFiles []string // names of ignored source files in this package + Pkg *types.Package // type information about the package + TypesInfo *types.Info // type information about the syntax trees + TypesSizes types.Sizes // function for computing sizes of types + + // Report reports a Diagnostic, a finding about a specific location + // in the analyzed source code such as a potential mistake. + // It may be called by the Run function. + Report func(Diagnostic) + + // ResultOf provides the inputs to this analysis pass, which are + // the corresponding results of its prerequisite analyzers. + // The map keys are the elements of Analysis.Required, + // and the type of each corresponding value is the required + // analysis's ResultType. + ResultOf map[*Analyzer]interface{} + + // -- facts -- + + // ImportObjectFact retrieves a fact associated with obj. + // Given a value ptr of type *T, where *T satisfies Fact, + // ImportObjectFact copies the value to *ptr. + // + // ImportObjectFact panics if called after the pass is complete. + // ImportObjectFact is not concurrency-safe. + ImportObjectFact func(obj types.Object, fact Fact) bool + + // ImportPackageFact retrieves a fact associated with package pkg, + // which must be this package or one of its dependencies. + // See comments for ImportObjectFact. + ImportPackageFact func(pkg *types.Package, fact Fact) bool + + // ExportObjectFact associates a fact of type *T with the obj, + // replacing any previous fact of that type. + // + // ExportObjectFact panics if it is called after the pass is + // complete, or if obj does not belong to the package being analyzed. + // ExportObjectFact is not concurrency-safe. + ExportObjectFact func(obj types.Object, fact Fact) + + // ExportPackageFact associates a fact with the current package. + // See comments for ExportObjectFact. + ExportPackageFact func(fact Fact) + + // AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes + // in unspecified order. + // WARNING: This is an experimental API and may change in the future. + AllPackageFacts func() []PackageFact + + // AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes + // in unspecified order. + // WARNING: This is an experimental API and may change in the future. + AllObjectFacts func() []ObjectFact + + // typeErrors contains types.Errors that are associated with the pkg. + typeErrors []types.Error + + /* Further fields may be added in future. */ + // For example, suggested or applied refactorings. +} + +// PackageFact is a package together with an associated fact. +// WARNING: This is an experimental API and may change in the future. +type PackageFact struct { + Package *types.Package + Fact Fact +} + +// ObjectFact is an object together with an associated fact. +// WARNING: This is an experimental API and may change in the future. +type ObjectFact struct { + Object types.Object + Fact Fact +} + +// Reportf is a helper function that reports a Diagnostic using the +// specified position and formatted error message. +func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + pass.Report(Diagnostic{Pos: pos, Message: msg}) +} + +// The Range interface provides a range. It's equivalent to and satisfied by +// ast.Node. +type Range interface { + Pos() token.Pos // position of first character belonging to the node + End() token.Pos // position of first character immediately after the node +} + +// ReportRangef is a helper function that reports a Diagnostic using the +// range provided. ast.Node values can be passed in as the range because +// they satisfy the Range interface. +func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg}) +} + +func (pass *Pass) String() string { + return fmt.Sprintf("%s@%s", pass.Analyzer.Name, pass.Pkg.Path()) +} + +// A Fact is an intermediate fact produced during analysis. +// +// Each fact is associated with a named declaration (a types.Object) or +// with a package as a whole. A single object or package may have +// multiple associated facts, but only one of any particular fact type. +// +// A Fact represents a predicate such as "never returns", but does not +// represent the subject of the predicate such as "function F" or "package P". +// +// Facts may be produced in one analysis pass and consumed by another +// analysis pass even if these are in different address spaces. +// If package P imports Q, all facts about Q produced during +// analysis of that package will be available during later analysis of P. +// Facts are analogous to type export data in a build system: +// just as export data enables separate compilation of several passes, +// facts enable "separate analysis". +// +// Each pass (a, p) starts with the set of facts produced by the +// same analyzer a applied to the packages directly imported by p. +// The analysis may add facts to the set, and they may be exported in turn. +// An analysis's Run function may retrieve facts by calling +// Pass.Import{Object,Package}Fact and update them using +// Pass.Export{Object,Package}Fact. +// +// A fact is logically private to its Analysis. To pass values +// between different analyzers, use the results mechanism; +// see Analyzer.Requires, Analyzer.ResultType, and Pass.ResultOf. +// +// A Fact type must be a pointer. +// Facts are encoded and decoded using encoding/gob. +// A Fact may implement the GobEncoder/GobDecoder interfaces +// to customize its encoding. Fact encoding should not fail. +// +// A Fact should not be modified once exported. +type Fact interface { + AFact() // dummy method to avoid type errors +} diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go new file mode 100644 index 000000000..cd462a0cb --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysis + +import "go/token" + +// A Diagnostic is a message associated with a source location or range. +// +// An Analyzer may return a variety of diagnostics; the optional Category, +// which should be a constant, may be used to classify them. +// It is primarily intended to make it easy to look up documentation. +// +// If End is provided, the diagnostic is specified to apply to the range between +// Pos and End. +type Diagnostic struct { + Pos token.Pos + End token.Pos // optional + Category string // optional + Message string + + // SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform + // edits to a file that address the diagnostic. + // TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic? + // Diagnostics should not contain SuggestedFixes that overlap. + // Experimental: This API is experimental and may change in the future. + SuggestedFixes []SuggestedFix // optional + + // Experimental: This API is experimental and may change in the future. + Related []RelatedInformation // optional +} + +// RelatedInformation contains information related to a diagnostic. +// For example, a diagnostic that flags duplicated declarations of a +// variable may include one RelatedInformation per existing +// declaration. +type RelatedInformation struct { + Pos token.Pos + End token.Pos + Message string +} + +// A SuggestedFix is a code change associated with a Diagnostic that a user can choose +// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged +// by the diagnostic. +// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix +// should not contain edits for other packages. +// Experimental: This API is experimental and may change in the future. +type SuggestedFix struct { + // A description for this suggested fix to be shown to a user deciding + // whether to accept it. + Message string + TextEdits []TextEdit +} + +// A TextEdit represents the replacement of the code between Pos and End with the new text. +// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos. +// Experimental: This API is experimental and may change in the future. +type TextEdit struct { + // For a pure insertion, End can either be set to Pos or token.NoPos. + Pos token.Pos + End token.Pos + NewText []byte +} diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go new file mode 100644 index 000000000..94a3bd5d0 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -0,0 +1,321 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package analysis defines the interface between a modular static +analysis and an analysis driver program. + + +Background + +A static analysis is a function that inspects a package of Go code and +reports a set of diagnostics (typically mistakes in the code), and +perhaps produces other results as well, such as suggested refactorings +or other facts. An analysis that reports mistakes is informally called a +"checker". For example, the printf checker reports mistakes in +fmt.Printf format strings. + +A "modular" analysis is one that inspects one package at a time but can +save information from a lower-level package and use it when inspecting a +higher-level package, analogous to separate compilation in a toolchain. +The printf checker is modular: when it discovers that a function such as +log.Fatalf delegates to fmt.Printf, it records this fact, and checks +calls to that function too, including calls made from another package. + +By implementing a common interface, checkers from a variety of sources +can be easily selected, incorporated, and reused in a wide range of +driver programs including command-line tools (such as vet), text editors and +IDEs, build and test systems (such as go build, Bazel, or Buck), test +frameworks, code review tools, code-base indexers (such as SourceGraph), +documentation viewers (such as godoc), batch pipelines for large code +bases, and so on. + + +Analyzer + +The primary type in the API is Analyzer. An Analyzer statically +describes an analysis function: its name, documentation, flags, +relationship to other analyzers, and of course, its logic. + +To define an analysis, a user declares a (logically constant) variable +of type Analyzer. Here is a typical example from one of the analyzers in +the go/analysis/passes/ subdirectory: + + package unusedresult + + var Analyzer = &analysis.Analyzer{ + Name: "unusedresult", + Doc: "check for unused results of calls to some functions", + Run: run, + ... + } + + func run(pass *analysis.Pass) (interface{}, error) { + ... + } + +An analysis driver is a program such as vet that runs a set of +analyses and prints the diagnostics that they report. +The driver program must import the list of Analyzers it needs. +Typically each Analyzer resides in a separate package. +To add a new Analyzer to an existing driver, add another item to the list: + + import ( "unusedresult"; "nilness"; "printf" ) + + var analyses = []*analysis.Analyzer{ + unusedresult.Analyzer, + nilness.Analyzer, + printf.Analyzer, + } + +A driver may use the name, flags, and documentation to provide on-line +help that describes the analyses it performs. +The doc comment contains a brief one-line summary, +optionally followed by paragraphs of explanation. + +The Analyzer type has more fields besides those shown above: + + type Analyzer struct { + Name string + Doc string + Flags flag.FlagSet + Run func(*Pass) (interface{}, error) + RunDespiteErrors bool + ResultType reflect.Type + Requires []*Analyzer + FactTypes []Fact + } + +The Flags field declares a set of named (global) flag variables that +control analysis behavior. Unlike vet, analysis flags are not declared +directly in the command line FlagSet; it is up to the driver to set the +flag variables. A driver for a single analysis, a, might expose its flag +f directly on the command line as -f, whereas a driver for multiple +analyses might prefix the flag name by the analysis name (-a.f) to avoid +ambiguity. An IDE might expose the flags through a graphical interface, +and a batch pipeline might configure them from a config file. +See the "findcall" analyzer for an example of flags in action. + +The RunDespiteErrors flag indicates whether the analysis is equipped to +handle ill-typed code. If not, the driver will skip the analysis if +there were parse or type errors. +The optional ResultType field specifies the type of the result value +computed by this analysis and made available to other analyses. +The Requires field specifies a list of analyses upon which +this one depends and whose results it may access, and it constrains the +order in which a driver may run analyses. +The FactTypes field is discussed in the section on Modularity. +The analysis package provides a Validate function to perform basic +sanity checks on an Analyzer, such as that its Requires graph is +acyclic, its fact and result types are unique, and so on. + +Finally, the Run field contains a function to be called by the driver to +execute the analysis on a single package. The driver passes it an +instance of the Pass type. + + +Pass + +A Pass describes a single unit of work: the application of a particular +Analyzer to a particular package of Go code. +The Pass provides information to the Analyzer's Run function about the +package being analyzed, and provides operations to the Run function for +reporting diagnostics and other information back to the driver. + + type Pass struct { + Fset *token.FileSet + Files []*ast.File + OtherFiles []string + IgnoredFiles []string + Pkg *types.Package + TypesInfo *types.Info + ResultOf map[*Analyzer]interface{} + Report func(Diagnostic) + ... + } + +The Fset, Files, Pkg, and TypesInfo fields provide the syntax trees, +type information, and source positions for a single package of Go code. + +The OtherFiles field provides the names, but not the contents, of non-Go +files such as assembly that are part of this package. See the "asmdecl" +or "buildtags" analyzers for examples of loading non-Go files and reporting +diagnostics against them. + +The IgnoredFiles field provides the names, but not the contents, +of ignored Go and non-Go source files that are not part of this package +with the current build configuration but may be part of other build +configurations. See the "buildtags" analyzer for an example of loading +and checking IgnoredFiles. + +The ResultOf field provides the results computed by the analyzers +required by this one, as expressed in its Analyzer.Requires field. The +driver runs the required analyzers first and makes their results +available in this map. Each Analyzer must return a value of the type +described in its Analyzer.ResultType field. +For example, the "ctrlflow" analyzer returns a *ctrlflow.CFGs, which +provides a control-flow graph for each function in the package (see +golang.org/x/tools/go/cfg); the "inspect" analyzer returns a value that +enables other Analyzers to traverse the syntax trees of the package more +efficiently; and the "buildssa" analyzer constructs an SSA-form +intermediate representation. +Each of these Analyzers extends the capabilities of later Analyzers +without adding a dependency to the core API, so an analysis tool pays +only for the extensions it needs. + +The Report function emits a diagnostic, a message associated with a +source position. For most analyses, diagnostics are their primary +result. +For convenience, Pass provides a helper method, Reportf, to report a new +diagnostic by formatting a string. +Diagnostic is defined as: + + type Diagnostic struct { + Pos token.Pos + Category string // optional + Message string + } + +The optional Category field is a short identifier that classifies the +kind of message when an analysis produces several kinds of diagnostic. + +Many analyses want to associate diagnostics with a severity level. +Because Diagnostic does not have a severity level field, an Analyzer's +diagnostics effectively all have the same severity level. To separate which +diagnostics are high severity and which are low severity, expose multiple +Analyzers instead. Analyzers should also be separated when their +diagnostics belong in different groups, or could be tagged differently +before being shown to the end user. Analyzers should document their severity +level to help downstream tools surface diagnostics properly. + +Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl +and buildtag, inspect the raw text of Go source files or even non-Go +files such as assembly. To report a diagnostic against a line of a +raw text file, use the following sequence: + + content, err := ioutil.ReadFile(filename) + if err != nil { ... } + tf := fset.AddFile(filename, -1, len(content)) + tf.SetLinesForContent(content) + ... + pass.Reportf(tf.LineStart(line), "oops") + + +Modular analysis with Facts + +To improve efficiency and scalability, large programs are routinely +built using separate compilation: units of the program are compiled +separately, and recompiled only when one of their dependencies changes; +independent modules may be compiled in parallel. The same technique may +be applied to static analyses, for the same benefits. Such analyses are +described as "modular". + +A compiler’s type checker is an example of a modular static analysis. +Many other checkers we would like to apply to Go programs can be +understood as alternative or non-standard type systems. For example, +vet's printf checker infers whether a function has the "printf wrapper" +type, and it applies stricter checks to calls of such functions. In +addition, it records which functions are printf wrappers for use by +later analysis passes to identify other printf wrappers by induction. +A result such as “f is a printf wrapper” that is not interesting by +itself but serves as a stepping stone to an interesting result (such as +a diagnostic) is called a "fact". + +The analysis API allows an analysis to define new types of facts, to +associate facts of these types with objects (named entities) declared +within the current package, or with the package as a whole, and to query +for an existing fact of a given type associated with an object or +package. + +An Analyzer that uses facts must declare their types: + + var Analyzer = &analysis.Analyzer{ + Name: "printf", + FactTypes: []analysis.Fact{new(isWrapper)}, + ... + } + + type isWrapper struct{} // => *types.Func f “is a printf wrapper” + +The driver program ensures that facts for a pass’s dependencies are +generated before analyzing the package and is responsible for propagating +facts from one package to another, possibly across address spaces. +Consequently, Facts must be serializable. The API requires that drivers +use the gob encoding, an efficient, robust, self-describing binary +protocol. A fact type may implement the GobEncoder/GobDecoder interfaces +if the default encoding is unsuitable. Facts should be stateless. + +The Pass type has functions to import and export facts, +associated either with an object or with a package: + + type Pass struct { + ... + ExportObjectFact func(types.Object, Fact) + ImportObjectFact func(types.Object, Fact) bool + + ExportPackageFact func(fact Fact) + ImportPackageFact func(*types.Package, Fact) bool + } + +An Analyzer may only export facts associated with the current package or +its objects, though it may import facts from any package or object that +is an import dependency of the current package. + +Conceptually, ExportObjectFact(obj, fact) inserts fact into a hidden map keyed by +the pair (obj, TypeOf(fact)), and the ImportObjectFact function +retrieves the entry from this map and copies its value into the variable +pointed to by fact. This scheme assumes that the concrete type of fact +is a pointer; this assumption is checked by the Validate function. +See the "printf" analyzer for an example of object facts in action. + +Some driver implementations (such as those based on Bazel and Blaze) do +not currently apply analyzers to packages of the standard library. +Therefore, for best results, analyzer authors should not rely on +analysis facts being available for standard packages. +For example, although the printf checker is capable of deducing during +analysis of the log package that log.Printf is a printf wrapper, +this fact is built in to the analyzer so that it correctly checks +calls to log.Printf even when run in a driver that does not apply +it to standard packages. We would like to remove this limitation in future. + + +Testing an Analyzer + +The analysistest subpackage provides utilities for testing an Analyzer. +In a few lines of code, it is possible to run an analyzer on a package +of testdata files and check that it reported all the expected +diagnostics and facts (and no more). Expectations are expressed using +"// want ..." comments in the input code. + + +Standalone commands + +Analyzers are provided in the form of packages that a driver program is +expected to import. The vet command imports a set of several analyzers, +but users may wish to define their own analysis commands that perform +additional checks. To simplify the task of creating an analysis command, +either for a single analyzer or for a whole suite, we provide the +singlechecker and multichecker subpackages. + +The singlechecker package provides the main function for a command that +runs one analyzer. By convention, each analyzer such as +go/passes/findcall should be accompanied by a singlechecker-based +command such as go/analysis/passes/findcall/cmd/findcall, defined in its +entirety as: + + package main + + import ( + "golang.org/x/tools/go/analysis/passes/findcall" + "golang.org/x/tools/go/analysis/singlechecker" + ) + + func main() { singlechecker.Main(findcall.Analyzer) } + +A tool that provides multiple analyzers can use multichecker in a +similar way, giving it the list of Analyzers. + +*/ +package analysis diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go new file mode 100644 index 000000000..eb0016b18 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -0,0 +1,802 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asmdecl defines an Analyzer that reports mismatches between +// assembly files and Go declarations. +package asmdecl + +import ( + "bytes" + "fmt" + "go/ast" + "go/build" + "go/token" + "go/types" + "log" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" +) + +const Doc = "report mismatches between assembly files and Go declarations" + +var Analyzer = &analysis.Analyzer{ + Name: "asmdecl", + Doc: Doc, + Run: run, +} + +// 'kind' is a kind of assembly variable. +// The kinds 1, 2, 4, 8 stand for values of that size. +type asmKind int + +// These special kinds are not valid sizes. +const ( + asmString asmKind = 100 + iota + asmSlice + asmArray + asmInterface + asmEmptyInterface + asmStruct + asmComplex +) + +// An asmArch describes assembly parameters for an architecture +type asmArch struct { + name string + bigEndian bool + stack string + lr bool + // calculated during initialization + sizes types.Sizes + intSize int + ptrSize int + maxAlign int +} + +// An asmFunc describes the expected variables for a function on a given architecture. +type asmFunc struct { + arch *asmArch + size int // size of all arguments + vars map[string]*asmVar + varByOffset map[int]*asmVar +} + +// An asmVar describes a single assembly variable. +type asmVar struct { + name string + kind asmKind + typ string + off int + size int + inner []*asmVar +} + +var ( + asmArch386 = asmArch{name: "386", bigEndian: false, stack: "SP", lr: false} + asmArchArm = asmArch{name: "arm", bigEndian: false, stack: "R13", lr: true} + asmArchArm64 = asmArch{name: "arm64", bigEndian: false, stack: "RSP", lr: true} + asmArchAmd64 = asmArch{name: "amd64", bigEndian: false, stack: "SP", lr: false} + asmArchMips = asmArch{name: "mips", bigEndian: true, stack: "R29", lr: true} + asmArchMipsLE = asmArch{name: "mipsle", bigEndian: false, stack: "R29", lr: true} + asmArchMips64 = asmArch{name: "mips64", bigEndian: true, stack: "R29", lr: true} + asmArchMips64LE = asmArch{name: "mips64le", bigEndian: false, stack: "R29", lr: true} + asmArchPpc64 = asmArch{name: "ppc64", bigEndian: true, stack: "R1", lr: true} + asmArchPpc64LE = asmArch{name: "ppc64le", bigEndian: false, stack: "R1", lr: true} + asmArchRISCV64 = asmArch{name: "riscv64", bigEndian: false, stack: "SP", lr: true} + asmArchS390X = asmArch{name: "s390x", bigEndian: true, stack: "R15", lr: true} + asmArchWasm = asmArch{name: "wasm", bigEndian: false, stack: "SP", lr: false} + + arches = []*asmArch{ + &asmArch386, + &asmArchArm, + &asmArchArm64, + &asmArchAmd64, + &asmArchMips, + &asmArchMipsLE, + &asmArchMips64, + &asmArchMips64LE, + &asmArchPpc64, + &asmArchPpc64LE, + &asmArchRISCV64, + &asmArchS390X, + &asmArchWasm, + } +) + +func init() { + for _, arch := range arches { + arch.sizes = types.SizesFor("gc", arch.name) + if arch.sizes == nil { + // TODO(adonovan): fix: now that asmdecl is not in the standard + // library we cannot assume types.SizesFor is consistent with arches. + // For now, assume 64-bit norms and print a warning. + // But this warning should really be deferred until we attempt to use + // arch, which is very unlikely. Better would be + // to defer size computation until we have Pass.TypesSizes. + arch.sizes = types.SizesFor("gc", "amd64") + log.Printf("unknown architecture %s", arch.name) + } + arch.intSize = int(arch.sizes.Sizeof(types.Typ[types.Int])) + arch.ptrSize = int(arch.sizes.Sizeof(types.Typ[types.UnsafePointer])) + arch.maxAlign = int(arch.sizes.Alignof(types.Typ[types.Int64])) + } +} + +var ( + re = regexp.MustCompile + asmPlusBuild = re(`//\s+\+build\s+([^\n]+)`) + asmTEXT = re(`\bTEXT\b(.*)·([^\(]+)\(SB\)(?:\s*,\s*([0-9A-Z|+()]+))?(?:\s*,\s*\$(-?[0-9]+)(?:-([0-9]+))?)?`) + asmDATA = re(`\b(DATA|GLOBL)\b`) + asmNamedFP = re(`\$?([a-zA-Z0-9_\xFF-\x{10FFFF}]+)(?:\+([0-9]+))\(FP\)`) + asmUnnamedFP = re(`[^+\-0-9](([0-9]+)\(FP\))`) + asmSP = re(`[^+\-0-9](([0-9]+)\(([A-Z0-9]+)\))`) + asmOpcode = re(`^\s*(?:[A-Z0-9a-z_]+:)?\s*([A-Z]+)\s*([^,]*)(?:,\s*(.*))?`) + ppc64Suff = re(`([BHWD])(ZU|Z|U|BR)?$`) + abiSuff = re(`^(.+)$`) +) + +func run(pass *analysis.Pass) (interface{}, error) { + // No work if no assembly files. + var sfiles []string + for _, fname := range pass.OtherFiles { + if strings.HasSuffix(fname, ".s") { + sfiles = append(sfiles, fname) + } + } + if sfiles == nil { + return nil, nil + } + + // Gather declarations. knownFunc[name][arch] is func description. + knownFunc := make(map[string]map[string]*asmFunc) + + for _, f := range pass.Files { + for _, decl := range f.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok && decl.Body == nil { + knownFunc[decl.Name.Name] = asmParseDecl(pass, decl) + } + } + } + +Files: + for _, fname := range sfiles { + content, tf, err := analysisutil.ReadFile(pass.Fset, fname) + if err != nil { + return nil, err + } + + // Determine architecture from file name if possible. + var arch string + var archDef *asmArch + for _, a := range arches { + if strings.HasSuffix(fname, "_"+a.name+".s") { + arch = a.name + archDef = a + break + } + } + + lines := strings.SplitAfter(string(content), "\n") + var ( + fn *asmFunc + fnName string + localSize, argSize int + wroteSP bool + noframe bool + haveRetArg bool + retLine []int + ) + + flushRet := func() { + if fn != nil && fn.vars["ret"] != nil && !haveRetArg && len(retLine) > 0 { + v := fn.vars["ret"] + for _, line := range retLine { + pass.Reportf(analysisutil.LineStart(tf, line), "[%s] %s: RET without writing to %d-byte ret+%d(FP)", arch, fnName, v.size, v.off) + } + } + retLine = nil + } + trimABI := func(fnName string) string { + m := abiSuff.FindStringSubmatch(fnName) + if m != nil { + return m[1] + } + return fnName + } + for lineno, line := range lines { + lineno++ + + badf := func(format string, args ...interface{}) { + pass.Reportf(analysisutil.LineStart(tf, lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...)) + } + + if arch == "" { + // Determine architecture from +build line if possible. + if m := asmPlusBuild.FindStringSubmatch(line); m != nil { + // There can be multiple architectures in a single +build line, + // so accumulate them all and then prefer the one that + // matches build.Default.GOARCH. + var archCandidates []*asmArch + for _, fld := range strings.Fields(m[1]) { + for _, a := range arches { + if a.name == fld { + archCandidates = append(archCandidates, a) + } + } + } + for _, a := range archCandidates { + if a.name == build.Default.GOARCH { + archCandidates = []*asmArch{a} + break + } + } + if len(archCandidates) > 0 { + arch = archCandidates[0].name + archDef = archCandidates[0] + } + } + } + + // Ignore comments and commented-out code. + if i := strings.Index(line, "//"); i >= 0 { + line = line[:i] + } + + if m := asmTEXT.FindStringSubmatch(line); m != nil { + flushRet() + if arch == "" { + // Arch not specified by filename or build tags. + // Fall back to build.Default.GOARCH. + for _, a := range arches { + if a.name == build.Default.GOARCH { + arch = a.name + archDef = a + break + } + } + if arch == "" { + log.Printf("%s: cannot determine architecture for assembly file", fname) + continue Files + } + } + fnName = m[2] + if pkgPath := strings.TrimSpace(m[1]); pkgPath != "" { + // The assembler uses Unicode division slash within + // identifiers to represent the directory separator. + pkgPath = strings.Replace(pkgPath, "∕", "/", -1) + if pkgPath != pass.Pkg.Path() { + // log.Printf("%s:%d: [%s] cannot check cross-package assembly function: %s is in package %s", fname, lineno, arch, fnName, pkgPath) + fn = nil + fnName = "" + continue + } + } + // Trim off optional ABI selector. + fnName := trimABI(fnName) + flag := m[3] + fn = knownFunc[fnName][arch] + if fn != nil { + size, _ := strconv.Atoi(m[5]) + if size != fn.size && (flag != "7" && !strings.Contains(flag, "NOSPLIT") || size != 0) { + badf("wrong argument size %d; expected $...-%d", size, fn.size) + } + } + localSize, _ = strconv.Atoi(m[4]) + localSize += archDef.intSize + if archDef.lr && !strings.Contains(flag, "NOFRAME") { + // Account for caller's saved LR + localSize += archDef.intSize + } + argSize, _ = strconv.Atoi(m[5]) + noframe = strings.Contains(flag, "NOFRAME") + if fn == nil && !strings.Contains(fnName, "<>") && !noframe { + badf("function %s missing Go declaration", fnName) + } + wroteSP = false + haveRetArg = false + continue + } else if strings.Contains(line, "TEXT") && strings.Contains(line, "SB") { + // function, but not visible from Go (didn't match asmTEXT), so stop checking + flushRet() + fn = nil + fnName = "" + continue + } + + if strings.Contains(line, "RET") && !strings.Contains(line, "(SB)") { + // RET f(SB) is a tail call. It is okay to not write the results. + retLine = append(retLine, lineno) + } + + if fnName == "" { + continue + } + + if asmDATA.FindStringSubmatch(line) != nil { + fn = nil + } + + if archDef == nil { + continue + } + + if strings.Contains(line, ", "+archDef.stack) || strings.Contains(line, ",\t"+archDef.stack) || strings.Contains(line, "NOP "+archDef.stack) || strings.Contains(line, "NOP\t"+archDef.stack) { + wroteSP = true + continue + } + + if arch == "wasm" && strings.Contains(line, "CallImport") { + // CallImport is a call out to magic that can write the result. + haveRetArg = true + } + + for _, m := range asmSP.FindAllStringSubmatch(line, -1) { + if m[3] != archDef.stack || wroteSP || noframe { + continue + } + off := 0 + if m[1] != "" { + off, _ = strconv.Atoi(m[2]) + } + if off >= localSize { + if fn != nil { + v := fn.varByOffset[off-localSize] + if v != nil { + badf("%s should be %s+%d(FP)", m[1], v.name, off-localSize) + continue + } + } + if off >= localSize+argSize { + badf("use of %s points beyond argument frame", m[1]) + continue + } + badf("use of %s to access argument frame", m[1]) + } + } + + if fn == nil { + continue + } + + for _, m := range asmUnnamedFP.FindAllStringSubmatch(line, -1) { + off, _ := strconv.Atoi(m[2]) + v := fn.varByOffset[off] + if v != nil { + badf("use of unnamed argument %s; offset %d is %s+%d(FP)", m[1], off, v.name, v.off) + } else { + badf("use of unnamed argument %s", m[1]) + } + } + + for _, m := range asmNamedFP.FindAllStringSubmatch(line, -1) { + name := m[1] + off := 0 + if m[2] != "" { + off, _ = strconv.Atoi(m[2]) + } + if name == "ret" || strings.HasPrefix(name, "ret_") { + haveRetArg = true + } + v := fn.vars[name] + if v == nil { + // Allow argframe+0(FP). + if name == "argframe" && off == 0 { + continue + } + v = fn.varByOffset[off] + if v != nil { + badf("unknown variable %s; offset %d is %s+%d(FP)", name, off, v.name, v.off) + } else { + badf("unknown variable %s", name) + } + continue + } + asmCheckVar(badf, fn, line, m[0], off, v, archDef) + } + } + flushRet() + } + return nil, nil +} + +func asmKindForType(t types.Type, size int) asmKind { + switch t := t.Underlying().(type) { + case *types.Basic: + switch t.Kind() { + case types.String: + return asmString + case types.Complex64, types.Complex128: + return asmComplex + } + return asmKind(size) + case *types.Pointer, *types.Chan, *types.Map, *types.Signature: + return asmKind(size) + case *types.Struct: + return asmStruct + case *types.Interface: + if t.Empty() { + return asmEmptyInterface + } + return asmInterface + case *types.Array: + return asmArray + case *types.Slice: + return asmSlice + } + panic("unreachable") +} + +// A component is an assembly-addressable component of a composite type, +// or a composite type itself. +type component struct { + size int + offset int + kind asmKind + typ string + suffix string // Such as _base for string base, _0_lo for lo half of first element of [1]uint64 on 32 bit machine. + outer string // The suffix for immediately containing composite type. +} + +func newComponent(suffix string, kind asmKind, typ string, offset, size int, outer string) component { + return component{suffix: suffix, kind: kind, typ: typ, offset: offset, size: size, outer: outer} +} + +// componentsOfType generates a list of components of type t. +// For example, given string, the components are the string itself, the base, and the length. +func componentsOfType(arch *asmArch, t types.Type) []component { + return appendComponentsRecursive(arch, t, nil, "", 0) +} + +// appendComponentsRecursive implements componentsOfType. +// Recursion is required to correct handle structs and arrays, +// which can contain arbitrary other types. +func appendComponentsRecursive(arch *asmArch, t types.Type, cc []component, suffix string, off int) []component { + s := t.String() + size := int(arch.sizes.Sizeof(t)) + kind := asmKindForType(t, size) + cc = append(cc, newComponent(suffix, kind, s, off, size, suffix)) + + switch kind { + case 8: + if arch.ptrSize == 4 { + w1, w2 := "lo", "hi" + if arch.bigEndian { + w1, w2 = w2, w1 + } + cc = append(cc, newComponent(suffix+"_"+w1, 4, "half "+s, off, 4, suffix)) + cc = append(cc, newComponent(suffix+"_"+w2, 4, "half "+s, off+4, 4, suffix)) + } + + case asmEmptyInterface: + cc = append(cc, newComponent(suffix+"_type", asmKind(arch.ptrSize), "interface type", off, arch.ptrSize, suffix)) + cc = append(cc, newComponent(suffix+"_data", asmKind(arch.ptrSize), "interface data", off+arch.ptrSize, arch.ptrSize, suffix)) + + case asmInterface: + cc = append(cc, newComponent(suffix+"_itable", asmKind(arch.ptrSize), "interface itable", off, arch.ptrSize, suffix)) + cc = append(cc, newComponent(suffix+"_data", asmKind(arch.ptrSize), "interface data", off+arch.ptrSize, arch.ptrSize, suffix)) + + case asmSlice: + cc = append(cc, newComponent(suffix+"_base", asmKind(arch.ptrSize), "slice base", off, arch.ptrSize, suffix)) + cc = append(cc, newComponent(suffix+"_len", asmKind(arch.intSize), "slice len", off+arch.ptrSize, arch.intSize, suffix)) + cc = append(cc, newComponent(suffix+"_cap", asmKind(arch.intSize), "slice cap", off+arch.ptrSize+arch.intSize, arch.intSize, suffix)) + + case asmString: + cc = append(cc, newComponent(suffix+"_base", asmKind(arch.ptrSize), "string base", off, arch.ptrSize, suffix)) + cc = append(cc, newComponent(suffix+"_len", asmKind(arch.intSize), "string len", off+arch.ptrSize, arch.intSize, suffix)) + + case asmComplex: + fsize := size / 2 + cc = append(cc, newComponent(suffix+"_real", asmKind(fsize), fmt.Sprintf("real(complex%d)", size*8), off, fsize, suffix)) + cc = append(cc, newComponent(suffix+"_imag", asmKind(fsize), fmt.Sprintf("imag(complex%d)", size*8), off+fsize, fsize, suffix)) + + case asmStruct: + tu := t.Underlying().(*types.Struct) + fields := make([]*types.Var, tu.NumFields()) + for i := 0; i < tu.NumFields(); i++ { + fields[i] = tu.Field(i) + } + offsets := arch.sizes.Offsetsof(fields) + for i, f := range fields { + cc = appendComponentsRecursive(arch, f.Type(), cc, suffix+"_"+f.Name(), off+int(offsets[i])) + } + + case asmArray: + tu := t.Underlying().(*types.Array) + elem := tu.Elem() + // Calculate offset of each element array. + fields := []*types.Var{ + types.NewVar(token.NoPos, nil, "fake0", elem), + types.NewVar(token.NoPos, nil, "fake1", elem), + } + offsets := arch.sizes.Offsetsof(fields) + elemoff := int(offsets[1]) + for i := 0; i < int(tu.Len()); i++ { + cc = appendComponentsRecursive(arch, elem, cc, suffix+"_"+strconv.Itoa(i), off+i*elemoff) + } + } + + return cc +} + +// asmParseDecl parses a function decl for expected assembly variables. +func asmParseDecl(pass *analysis.Pass, decl *ast.FuncDecl) map[string]*asmFunc { + var ( + arch *asmArch + fn *asmFunc + offset int + ) + + // addParams adds asmVars for each of the parameters in list. + // isret indicates whether the list are the arguments or the return values. + // TODO(adonovan): simplify by passing (*types.Signature).{Params,Results} + // instead of list. + addParams := func(list []*ast.Field, isret bool) { + argnum := 0 + for _, fld := range list { + t := pass.TypesInfo.Types[fld.Type].Type + + // Work around https://golang.org/issue/28277. + if t == nil { + if ell, ok := fld.Type.(*ast.Ellipsis); ok { + t = types.NewSlice(pass.TypesInfo.Types[ell.Elt].Type) + } + } + + align := int(arch.sizes.Alignof(t)) + size := int(arch.sizes.Sizeof(t)) + offset += -offset & (align - 1) + cc := componentsOfType(arch, t) + + // names is the list of names with this type. + names := fld.Names + if len(names) == 0 { + // Anonymous args will be called arg, arg1, arg2, ... + // Similarly so for return values: ret, ret1, ret2, ... + name := "arg" + if isret { + name = "ret" + } + if argnum > 0 { + name += strconv.Itoa(argnum) + } + names = []*ast.Ident{ast.NewIdent(name)} + } + argnum += len(names) + + // Create variable for each name. + for _, id := range names { + name := id.Name + for _, c := range cc { + outer := name + c.outer + v := asmVar{ + name: name + c.suffix, + kind: c.kind, + typ: c.typ, + off: offset + c.offset, + size: c.size, + } + if vo := fn.vars[outer]; vo != nil { + vo.inner = append(vo.inner, &v) + } + fn.vars[v.name] = &v + for i := 0; i < v.size; i++ { + fn.varByOffset[v.off+i] = &v + } + } + offset += size + } + } + } + + m := make(map[string]*asmFunc) + for _, arch = range arches { + fn = &asmFunc{ + arch: arch, + vars: make(map[string]*asmVar), + varByOffset: make(map[int]*asmVar), + } + offset = 0 + addParams(decl.Type.Params.List, false) + if decl.Type.Results != nil && len(decl.Type.Results.List) > 0 { + offset += -offset & (arch.maxAlign - 1) + addParams(decl.Type.Results.List, true) + } + fn.size = offset + m[arch.name] = fn + } + + return m +} + +// asmCheckVar checks a single variable reference. +func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr string, off int, v *asmVar, archDef *asmArch) { + m := asmOpcode.FindStringSubmatch(line) + if m == nil { + if !strings.HasPrefix(strings.TrimSpace(line), "//") { + badf("cannot find assembly opcode") + } + return + } + + addr := strings.HasPrefix(expr, "$") + + // Determine operand sizes from instruction. + // Typically the suffix suffices, but there are exceptions. + var src, dst, kind asmKind + op := m[1] + switch fn.arch.name + "." + op { + case "386.FMOVLP": + src, dst = 8, 4 + case "arm.MOVD": + src = 8 + case "arm.MOVW": + src = 4 + case "arm.MOVH", "arm.MOVHU": + src = 2 + case "arm.MOVB", "arm.MOVBU": + src = 1 + // LEA* opcodes don't really read the second arg. + // They just take the address of it. + case "386.LEAL": + dst = 4 + addr = true + case "amd64.LEAQ": + dst = 8 + addr = true + default: + switch fn.arch.name { + case "386", "amd64": + if strings.HasPrefix(op, "F") && (strings.HasSuffix(op, "D") || strings.HasSuffix(op, "DP")) { + // FMOVDP, FXCHD, etc + src = 8 + break + } + if strings.HasPrefix(op, "P") && strings.HasSuffix(op, "RD") { + // PINSRD, PEXTRD, etc + src = 4 + break + } + if strings.HasPrefix(op, "F") && (strings.HasSuffix(op, "F") || strings.HasSuffix(op, "FP")) { + // FMOVFP, FXCHF, etc + src = 4 + break + } + if strings.HasSuffix(op, "SD") { + // MOVSD, SQRTSD, etc + src = 8 + break + } + if strings.HasSuffix(op, "SS") { + // MOVSS, SQRTSS, etc + src = 4 + break + } + if op == "MOVO" || op == "MOVOU" { + src = 16 + break + } + if strings.HasPrefix(op, "SET") { + // SETEQ, etc + src = 1 + break + } + switch op[len(op)-1] { + case 'B': + src = 1 + case 'W': + src = 2 + case 'L': + src = 4 + case 'D', 'Q': + src = 8 + } + case "ppc64", "ppc64le": + // Strip standard suffixes to reveal size letter. + m := ppc64Suff.FindStringSubmatch(op) + if m != nil { + switch m[1][0] { + case 'B': + src = 1 + case 'H': + src = 2 + case 'W': + src = 4 + case 'D': + src = 8 + } + } + case "mips", "mipsle", "mips64", "mips64le": + switch op { + case "MOVB", "MOVBU": + src = 1 + case "MOVH", "MOVHU": + src = 2 + case "MOVW", "MOVWU", "MOVF": + src = 4 + case "MOVV", "MOVD": + src = 8 + } + case "s390x": + switch op { + case "MOVB", "MOVBZ": + src = 1 + case "MOVH", "MOVHZ": + src = 2 + case "MOVW", "MOVWZ", "FMOVS": + src = 4 + case "MOVD", "FMOVD": + src = 8 + } + } + } + if dst == 0 { + dst = src + } + + // Determine whether the match we're holding + // is the first or second argument. + if strings.Index(line, expr) > strings.Index(line, ",") { + kind = dst + } else { + kind = src + } + + vk := v.kind + vs := v.size + vt := v.typ + switch vk { + case asmInterface, asmEmptyInterface, asmString, asmSlice: + // allow reference to first word (pointer) + vk = v.inner[0].kind + vs = v.inner[0].size + vt = v.inner[0].typ + case asmComplex: + // Allow a single instruction to load both parts of a complex. + if int(kind) == vs { + kind = asmComplex + } + } + if addr { + vk = asmKind(archDef.ptrSize) + vs = archDef.ptrSize + vt = "address" + } + + if off != v.off { + var inner bytes.Buffer + for i, vi := range v.inner { + if len(v.inner) > 1 { + fmt.Fprintf(&inner, ",") + } + fmt.Fprintf(&inner, " ") + if i == len(v.inner)-1 { + fmt.Fprintf(&inner, "or ") + } + fmt.Fprintf(&inner, "%s+%d(FP)", vi.name, vi.off) + } + badf("invalid offset %s; expected %s+%d(FP)%s", expr, v.name, v.off, inner.String()) + return + } + if kind != 0 && kind != vk { + var inner bytes.Buffer + if len(v.inner) > 0 { + fmt.Fprintf(&inner, " containing") + for i, vi := range v.inner { + if i > 0 && len(v.inner) > 2 { + fmt.Fprintf(&inner, ",") + } + fmt.Fprintf(&inner, " ") + if i > 0 && i == len(v.inner)-1 { + fmt.Fprintf(&inner, "and ") + } + fmt.Fprintf(&inner, "%s+%d(FP)", vi.name, vi.off) + } + } + badf("invalid %s of %s; %s is %d-byte value%s", op, expr, vt, vs, inner.String()) + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go new file mode 100644 index 000000000..3586638ef --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go @@ -0,0 +1,76 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package assign defines an Analyzer that detects useless assignments. +package assign + +// TODO(adonovan): check also for assignments to struct fields inside +// methods that are on T instead of *T. + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check for useless assignments + +This checker reports assignments of the form x = x or a[i] = a[i]. +These are almost always useless, and even when they aren't they are +usually a mistake.` + +var Analyzer = &analysis.Analyzer{ + Name: "assign", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.AssignStmt)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + stmt := n.(*ast.AssignStmt) + if stmt.Tok != token.ASSIGN { + return // ignore := + } + if len(stmt.Lhs) != len(stmt.Rhs) { + // If LHS and RHS have different cardinality, they can't be the same. + return + } + for i, lhs := range stmt.Lhs { + rhs := stmt.Rhs[i] + if analysisutil.HasSideEffects(pass.TypesInfo, lhs) || + analysisutil.HasSideEffects(pass.TypesInfo, rhs) { + continue // expressions may not be equal + } + if reflect.TypeOf(lhs) != reflect.TypeOf(rhs) { + continue // short-circuit the heavy-weight gofmt check + } + le := analysisutil.Format(pass.Fset, lhs) + re := analysisutil.Format(pass.Fset, rhs) + if le == re { + pass.Report(analysis.Diagnostic{ + Pos: stmt.Pos(), Message: fmt.Sprintf("self-assignment of %s to %s", re, le), + SuggestedFixes: []analysis.SuggestedFix{ + {Message: "Remove", TextEdits: []analysis.TextEdit{ + {Pos: stmt.Pos(), End: stmt.End(), NewText: []byte{}}, + }}, + }, + }) + } + } + }) + + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go b/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go new file mode 100644 index 000000000..9261db7e4 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go @@ -0,0 +1,96 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atomic defines an Analyzer that checks for common mistakes +// using the sync/atomic package. +package atomic + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check for common mistakes using the sync/atomic package + +The atomic checker looks for assignment statements of the form: + + x = atomic.AddUint64(&x, 1) + +which are not atomic.` + +var Analyzer = &analysis.Analyzer{ + Name: "atomic", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + RunDespiteErrors: true, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.AssignStmt)(nil), + } + inspect.Preorder(nodeFilter, func(node ast.Node) { + n := node.(*ast.AssignStmt) + if len(n.Lhs) != len(n.Rhs) { + return + } + if len(n.Lhs) == 1 && n.Tok == token.DEFINE { + return + } + + for i, right := range n.Rhs { + call, ok := right.(*ast.CallExpr) + if !ok { + continue + } + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + continue + } + pkgIdent, _ := sel.X.(*ast.Ident) + pkgName, ok := pass.TypesInfo.Uses[pkgIdent].(*types.PkgName) + if !ok || pkgName.Imported().Path() != "sync/atomic" { + continue + } + + switch sel.Sel.Name { + case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr": + checkAtomicAddAssignment(pass, n.Lhs[i], call) + } + } + }) + return nil, nil +} + +// checkAtomicAddAssignment walks the atomic.Add* method calls checking +// for assigning the return value to the same variable being used in the +// operation +func checkAtomicAddAssignment(pass *analysis.Pass, left ast.Expr, call *ast.CallExpr) { + if len(call.Args) != 2 { + return + } + arg := call.Args[0] + broken := false + + gofmt := func(e ast.Expr) string { return analysisutil.Format(pass.Fset, e) } + + if uarg, ok := arg.(*ast.UnaryExpr); ok && uarg.Op == token.AND { + broken = gofmt(left) == gofmt(uarg.X) + } else if star, ok := left.(*ast.StarExpr); ok { + broken = gofmt(star.X) == gofmt(arg) + } + + if broken { + pass.ReportRangef(left, "direct assignment to atomic value") + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go b/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go new file mode 100644 index 000000000..e2e1a4f67 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go @@ -0,0 +1,117 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atomicalign defines an Analyzer that checks for non-64-bit-aligned +// arguments to sync/atomic functions. On non-32-bit platforms, those functions +// panic if their argument variables are not 64-bit aligned. It is therefore +// the caller's responsibility to arrange for 64-bit alignment of such variables. +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +package atomicalign + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = "check for non-64-bits-aligned arguments to sync/atomic functions" + +var Analyzer = &analysis.Analyzer{ + Name: "atomicalign", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + if 8*pass.TypesSizes.Sizeof(types.Typ[types.Uintptr]) == 64 { + return nil, nil // 64-bit platform + } + if !analysisutil.Imports(pass.Pkg, "sync/atomic") { + return nil, nil // doesn't directly import sync/atomic + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + + inspect.Preorder(nodeFilter, func(node ast.Node) { + call := node.(*ast.CallExpr) + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + pkgIdent, ok := sel.X.(*ast.Ident) + if !ok { + return + } + pkgName, ok := pass.TypesInfo.Uses[pkgIdent].(*types.PkgName) + if !ok || pkgName.Imported().Path() != "sync/atomic" { + return + } + + switch sel.Sel.Name { + case "AddInt64", "AddUint64", + "LoadInt64", "LoadUint64", + "StoreInt64", "StoreUint64", + "SwapInt64", "SwapUint64", + "CompareAndSwapInt64", "CompareAndSwapUint64": + + // For all the listed functions, the expression to check is always the first function argument. + check64BitAlignment(pass, sel.Sel.Name, call.Args[0]) + } + }) + + return nil, nil +} + +func check64BitAlignment(pass *analysis.Pass, funcName string, arg ast.Expr) { + // Checks the argument is made of the address operator (&) applied to + // to a struct field (as opposed to a variable as the first word of + // uint64 and int64 variables can be relied upon to be 64-bit aligned. + unary, ok := arg.(*ast.UnaryExpr) + if !ok || unary.Op != token.AND { + return + } + + // Retrieve the types.Struct in order to get the offset of the + // atomically accessed field. + sel, ok := unary.X.(*ast.SelectorExpr) + if !ok { + return + } + tvar, ok := pass.TypesInfo.Selections[sel].Obj().(*types.Var) + if !ok || !tvar.IsField() { + return + } + + stype, ok := pass.TypesInfo.Types[sel.X].Type.Underlying().(*types.Struct) + if !ok { + return + } + + var offset int64 + var fields []*types.Var + for i := 0; i < stype.NumFields(); i++ { + f := stype.Field(i) + fields = append(fields, f) + if f == tvar { + // We're done, this is the field we were looking for, + // no need to fill the fields slice further. + offset = pass.TypesSizes.Offsetsof(fields)[i] + break + } + } + if offset&7 == 0 { + return // 64-bit aligned + } + + pass.ReportRangef(arg, "address of non 64-bit aligned field .%s passed to atomic.%s", tvar.Name(), funcName) +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go new file mode 100644 index 000000000..5ae47d894 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go @@ -0,0 +1,221 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bools defines an Analyzer that detects common mistakes +// involving boolean operators. +package bools + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = "check for common mistakes involving boolean operators" + +var Analyzer = &analysis.Analyzer{ + Name: "bools", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.BinaryExpr)(nil), + } + seen := make(map[*ast.BinaryExpr]bool) + inspect.Preorder(nodeFilter, func(n ast.Node) { + e := n.(*ast.BinaryExpr) + if seen[e] { + // Already processed as a subexpression of an earlier node. + return + } + + var op boolOp + switch e.Op { + case token.LOR: + op = or + case token.LAND: + op = and + default: + return + } + + comm := op.commutativeSets(pass.TypesInfo, e, seen) + for _, exprs := range comm { + op.checkRedundant(pass, exprs) + op.checkSuspect(pass, exprs) + } + }) + return nil, nil +} + +type boolOp struct { + name string + tok token.Token // token corresponding to this operator + badEq token.Token // token corresponding to the equality test that should not be used with this operator +} + +var ( + or = boolOp{"or", token.LOR, token.NEQ} + and = boolOp{"and", token.LAND, token.EQL} +) + +// commutativeSets returns all side effect free sets of +// expressions in e that are connected by op. +// For example, given 'a || b || f() || c || d' with the or op, +// commutativeSets returns {{b, a}, {d, c}}. +// commutativeSets adds any expanded BinaryExprs to seen. +func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[*ast.BinaryExpr]bool) [][]ast.Expr { + exprs := op.split(e, seen) + + // Partition the slice of expressions into commutative sets. + i := 0 + var sets [][]ast.Expr + for j := 0; j <= len(exprs); j++ { + if j == len(exprs) || hasSideEffects(info, exprs[j]) { + if i < j { + sets = append(sets, exprs[i:j]) + } + i = j + 1 + } + } + + return sets +} + +// checkRedundant checks for expressions of the form +// e && e +// e || e +// Exprs must contain only side effect free expressions. +func (op boolOp) checkRedundant(pass *analysis.Pass, exprs []ast.Expr) { + seen := make(map[string]bool) + for _, e := range exprs { + efmt := analysisutil.Format(pass.Fset, e) + if seen[efmt] { + pass.ReportRangef(e, "redundant %s: %s %s %s", op.name, efmt, op.tok, efmt) + } else { + seen[efmt] = true + } + } +} + +// checkSuspect checks for expressions of the form +// x != c1 || x != c2 +// x == c1 && x == c2 +// where c1 and c2 are constant expressions. +// If c1 and c2 are the same then it's redundant; +// if c1 and c2 are different then it's always true or always false. +// Exprs must contain only side effect free expressions. +func (op boolOp) checkSuspect(pass *analysis.Pass, exprs []ast.Expr) { + // seen maps from expressions 'x' to equality expressions 'x != c'. + seen := make(map[string]string) + + for _, e := range exprs { + bin, ok := e.(*ast.BinaryExpr) + if !ok || bin.Op != op.badEq { + continue + } + + // In order to avoid false positives, restrict to cases + // in which one of the operands is constant. We're then + // interested in the other operand. + // In the rare case in which both operands are constant + // (e.g. runtime.GOOS and "windows"), we'll only catch + // mistakes if the LHS is repeated, which is how most + // code is written. + var x ast.Expr + switch { + case pass.TypesInfo.Types[bin.Y].Value != nil: + x = bin.X + case pass.TypesInfo.Types[bin.X].Value != nil: + x = bin.Y + default: + continue + } + + // e is of the form 'x != c' or 'x == c'. + xfmt := analysisutil.Format(pass.Fset, x) + efmt := analysisutil.Format(pass.Fset, e) + if prev, found := seen[xfmt]; found { + // checkRedundant handles the case in which efmt == prev. + if efmt != prev { + pass.ReportRangef(e, "suspect %s: %s %s %s", op.name, efmt, op.tok, prev) + } + } else { + seen[xfmt] = efmt + } + } +} + +// hasSideEffects reports whether evaluation of e has side effects. +func hasSideEffects(info *types.Info, e ast.Expr) bool { + safe := true + ast.Inspect(e, func(node ast.Node) bool { + switch n := node.(type) { + case *ast.CallExpr: + typVal := info.Types[n.Fun] + switch { + case typVal.IsType(): + // Type conversion, which is safe. + case typVal.IsBuiltin(): + // Builtin func, conservatively assumed to not + // be safe for now. + safe = false + return false + default: + // A non-builtin func or method call. + // Conservatively assume that all of them have + // side effects for now. + safe = false + return false + } + case *ast.UnaryExpr: + if n.Op == token.ARROW { + safe = false + return false + } + } + return true + }) + return !safe +} + +// split returns a slice of all subexpressions in e that are connected by op. +// For example, given 'a || (b || c) || d' with the or op, +// split returns []{d, c, b, a}. +// seen[e] is already true; any newly processed exprs are added to seen. +func (op boolOp) split(e ast.Expr, seen map[*ast.BinaryExpr]bool) (exprs []ast.Expr) { + for { + e = unparen(e) + if b, ok := e.(*ast.BinaryExpr); ok && b.Op == op.tok { + seen[b] = true + exprs = append(exprs, op.split(b.Y, seen)...) + e = b.X + } else { + exprs = append(exprs, e) + break + } + } + return +} + +// unparen returns e with any enclosing parentheses stripped. +func unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go b/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go new file mode 100644 index 000000000..02b7b18b3 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go @@ -0,0 +1,117 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package buildssa defines an Analyzer that constructs the SSA +// representation of an error-free package and returns the set of all +// functions within it. It does not report any diagnostics itself but +// may be used as an input to other analyzers. +// +// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE. +package buildssa + +import ( + "go/ast" + "go/types" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ssa" +) + +var Analyzer = &analysis.Analyzer{ + Name: "buildssa", + Doc: "build SSA-form IR for later passes", + Run: run, + ResultType: reflect.TypeOf(new(SSA)), +} + +// SSA provides SSA-form intermediate representation for all the +// non-blank source functions in the current package. +type SSA struct { + Pkg *ssa.Package + SrcFuncs []*ssa.Function +} + +func run(pass *analysis.Pass) (interface{}, error) { + // Plundered from ssautil.BuildPackage. + + // We must create a new Program for each Package because the + // analysis API provides no place to hang a Program shared by + // all Packages. Consequently, SSA Packages and Functions do not + // have a canonical representation across an analysis session of + // multiple packages. This is unlikely to be a problem in + // practice because the analysis API essentially forces all + // packages to be analysed independently, so any given call to + // Analysis.Run on a package will see only SSA objects belonging + // to a single Program. + + // Some Analyzers may need GlobalDebug, in which case we'll have + // to set it globally, but let's wait till we need it. + mode := ssa.BuilderMode(0) + + prog := ssa.NewProgram(pass.Fset, mode) + + // Create SSA packages for all imports. + // Order is not significant. + created := make(map[*types.Package]bool) + var createAll func(pkgs []*types.Package) + createAll = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !created[p] { + created[p] = true + prog.CreatePackage(p, nil, nil, true) + createAll(p.Imports()) + } + } + } + createAll(pass.Pkg.Imports()) + + // Create and build the primary package. + ssapkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false) + ssapkg.Build() + + // Compute list of source functions, including literals, + // in source order. + var funcs []*ssa.Function + for _, f := range pass.Files { + for _, decl := range f.Decls { + if fdecl, ok := decl.(*ast.FuncDecl); ok { + + // SSA will not build a Function + // for a FuncDecl named blank. + // That's arguably too strict but + // relaxing it would break uniqueness of + // names of package members. + if fdecl.Name.Name == "_" { + continue + } + + // (init functions have distinct Func + // objects named "init" and distinct + // ssa.Functions named "init#1", ...) + + fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func) + if fn == nil { + panic(fn) + } + + f := ssapkg.Prog.FuncValue(fn) + if f == nil { + panic(fn) + } + + var addAnons func(f *ssa.Function) + addAnons = func(f *ssa.Function) { + funcs = append(funcs, f) + for _, anon := range f.AnonFuncs { + addAnons(anon) + } + } + addAnons(f) + } + } + } + + return &SSA{Pkg: ssapkg, SrcFuncs: funcs}, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go new file mode 100644 index 000000000..c4407ad91 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -0,0 +1,367 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.16 +// +build go1.16 + +// Package buildtag defines an Analyzer that checks build tags. +package buildtag + +import ( + "go/ast" + "go/build/constraint" + "go/parser" + "go/token" + "strings" + "unicode" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" +) + +const Doc = "check that +build tags are well-formed and correctly located" + +var Analyzer = &analysis.Analyzer{ + Name: "buildtag", + Doc: Doc, + Run: runBuildTag, +} + +func runBuildTag(pass *analysis.Pass) (interface{}, error) { + for _, f := range pass.Files { + checkGoFile(pass, f) + } + for _, name := range pass.OtherFiles { + if err := checkOtherFile(pass, name); err != nil { + return nil, err + } + } + for _, name := range pass.IgnoredFiles { + if strings.HasSuffix(name, ".go") { + f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments) + if err != nil { + // Not valid Go source code - not our job to diagnose, so ignore. + return nil, nil + } + checkGoFile(pass, f) + } else { + if err := checkOtherFile(pass, name); err != nil { + return nil, err + } + } + } + return nil, nil +} + +func checkGoFile(pass *analysis.Pass, f *ast.File) { + var check checker + check.init(pass) + defer check.finish() + + for _, group := range f.Comments { + // A +build comment is ignored after or adjoining the package declaration. + if group.End()+1 >= f.Package { + check.plusBuildOK = false + } + // A //go:build comment is ignored after the package declaration + // (but adjoining it is OK, in contrast to +build comments). + if group.Pos() >= f.Package { + check.goBuildOK = false + } + + // Check each line of a //-comment. + for _, c := range group.List { + // "+build" is ignored within or after a /*...*/ comment. + if !strings.HasPrefix(c.Text, "//") { + check.plusBuildOK = false + } + check.comment(c.Slash, c.Text) + } + } +} + +func checkOtherFile(pass *analysis.Pass, filename string) error { + var check checker + check.init(pass) + defer check.finish() + + // We cannot use the Go parser, since this may not be a Go source file. + // Read the raw bytes instead. + content, tf, err := analysisutil.ReadFile(pass.Fset, filename) + if err != nil { + return err + } + + check.file(token.Pos(tf.Base()), string(content)) + return nil +} + +type checker struct { + pass *analysis.Pass + plusBuildOK bool // "+build" lines still OK + goBuildOK bool // "go:build" lines still OK + crossCheck bool // cross-check go:build and +build lines when done reading file + inStar bool // currently in a /* */ comment + goBuildPos token.Pos // position of first go:build line found + plusBuildPos token.Pos // position of first "+build" line found + goBuild constraint.Expr // go:build constraint found + plusBuild constraint.Expr // AND of +build constraints found +} + +func (check *checker) init(pass *analysis.Pass) { + check.pass = pass + check.goBuildOK = true + check.plusBuildOK = true + check.crossCheck = true +} + +func (check *checker) file(pos token.Pos, text string) { + // Determine cutpoint where +build comments are no longer valid. + // They are valid in leading // comments in the file followed by + // a blank line. + // + // This must be done as a separate pass because of the + // requirement that the comment be followed by a blank line. + var plusBuildCutoff int + fullText := text + for text != "" { + i := strings.Index(text, "\n") + if i < 0 { + i = len(text) + } else { + i++ + } + offset := len(fullText) - len(text) + line := text[:i] + text = text[i:] + line = strings.TrimSpace(line) + if !strings.HasPrefix(line, "//") && line != "" { + break + } + if line == "" { + plusBuildCutoff = offset + } + } + + // Process each line. + // Must stop once we hit goBuildOK == false + text = fullText + check.inStar = false + for text != "" { + i := strings.Index(text, "\n") + if i < 0 { + i = len(text) + } else { + i++ + } + offset := len(fullText) - len(text) + line := text[:i] + text = text[i:] + check.plusBuildOK = offset < plusBuildCutoff + + if strings.HasPrefix(line, "//") { + check.comment(pos+token.Pos(offset), line) + continue + } + + // Keep looking for the point at which //go:build comments + // stop being allowed. Skip over, cut out any /* */ comments. + for { + line = strings.TrimSpace(line) + if check.inStar { + i := strings.Index(line, "*/") + if i < 0 { + line = "" + break + } + line = line[i+len("*/"):] + check.inStar = false + continue + } + if strings.HasPrefix(line, "/*") { + check.inStar = true + line = line[len("/*"):] + continue + } + break + } + if line != "" { + // Found non-comment non-blank line. + // Ends space for valid //go:build comments, + // but also ends the fraction of the file we can + // reliably parse. From this point on we might + // incorrectly flag "comments" inside multiline + // string constants or anything else (this might + // not even be a Go program). So stop. + break + } + } +} + +func (check *checker) comment(pos token.Pos, text string) { + if strings.HasPrefix(text, "//") { + if strings.Contains(text, "+build") { + check.plusBuildLine(pos, text) + } + if strings.Contains(text, "//go:build") { + check.goBuildLine(pos, text) + } + } + if strings.HasPrefix(text, "/*") { + if i := strings.Index(text, "\n"); i >= 0 { + // multiline /* */ comment - process interior lines + check.inStar = true + i++ + pos += token.Pos(i) + text = text[i:] + for text != "" { + i := strings.Index(text, "\n") + if i < 0 { + i = len(text) + } else { + i++ + } + line := text[:i] + if strings.HasPrefix(line, "//") { + check.comment(pos, line) + } + pos += token.Pos(i) + text = text[i:] + } + check.inStar = false + } + } +} + +func (check *checker) goBuildLine(pos token.Pos, line string) { + if !constraint.IsGoBuild(line) { + if !strings.HasPrefix(line, "//go:build") && constraint.IsGoBuild("//"+strings.TrimSpace(line[len("//"):])) { + check.pass.Reportf(pos, "malformed //go:build line (space between // and go:build)") + } + return + } + if !check.goBuildOK || check.inStar { + check.pass.Reportf(pos, "misplaced //go:build comment") + check.crossCheck = false + return + } + + if check.goBuildPos == token.NoPos { + check.goBuildPos = pos + } else { + check.pass.Reportf(pos, "unexpected extra //go:build line") + check.crossCheck = false + } + + // testing hack: stop at // ERROR + if i := strings.Index(line, " // ERROR "); i >= 0 { + line = line[:i] + } + + x, err := constraint.Parse(line) + if err != nil { + check.pass.Reportf(pos, "%v", err) + check.crossCheck = false + return + } + + if check.goBuild == nil { + check.goBuild = x + } +} + +func (check *checker) plusBuildLine(pos token.Pos, line string) { + line = strings.TrimSpace(line) + if !constraint.IsPlusBuild(line) { + // Comment with +build but not at beginning. + // Only report early in file. + if check.plusBuildOK && !strings.HasPrefix(line, "// want") { + check.pass.Reportf(pos, "possible malformed +build comment") + } + return + } + if !check.plusBuildOK { // inStar implies !plusBuildOK + check.pass.Reportf(pos, "misplaced +build comment") + check.crossCheck = false + } + + if check.plusBuildPos == token.NoPos { + check.plusBuildPos = pos + } + + // testing hack: stop at // ERROR + if i := strings.Index(line, " // ERROR "); i >= 0 { + line = line[:i] + } + + fields := strings.Fields(line[len("//"):]) + // IsPlusBuildConstraint check above implies fields[0] == "+build" + for _, arg := range fields[1:] { + for _, elem := range strings.Split(arg, ",") { + if strings.HasPrefix(elem, "!!") { + check.pass.Reportf(pos, "invalid double negative in build constraint: %s", arg) + check.crossCheck = false + continue + } + elem = strings.TrimPrefix(elem, "!") + for _, c := range elem { + if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' { + check.pass.Reportf(pos, "invalid non-alphanumeric build constraint: %s", arg) + check.crossCheck = false + break + } + } + } + } + + if check.crossCheck { + y, err := constraint.Parse(line) + if err != nil { + // Should never happen - constraint.Parse never rejects a // +build line. + // Also, we just checked the syntax above. + // Even so, report. + check.pass.Reportf(pos, "%v", err) + check.crossCheck = false + return + } + if check.plusBuild == nil { + check.plusBuild = y + } else { + check.plusBuild = &constraint.AndExpr{X: check.plusBuild, Y: y} + } + } +} + +func (check *checker) finish() { + if !check.crossCheck || check.plusBuildPos == token.NoPos || check.goBuildPos == token.NoPos { + return + } + + // Have both //go:build and // +build, + // with no errors found (crossCheck still true). + // Check they match. + var want constraint.Expr + lines, err := constraint.PlusBuildLines(check.goBuild) + if err != nil { + check.pass.Reportf(check.goBuildPos, "%v", err) + return + } + for _, line := range lines { + y, err := constraint.Parse(line) + if err != nil { + // Definitely should not happen, but not the user's fault. + // Do not report. + return + } + if want == nil { + want = y + } else { + want = &constraint.AndExpr{X: want, Y: y} + } + } + if want.String() != check.plusBuild.String() { + check.pass.Reportf(check.plusBuildPos, "+build lines do not match //go:build condition") + return + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag_old.go b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag_old.go new file mode 100644 index 000000000..e9234925f --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag_old.go @@ -0,0 +1,174 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(rsc): Delete this file once Go 1.17 comes out and we can retire Go 1.15 support. + +//go:build !go1.16 +// +build !go1.16 + +// Package buildtag defines an Analyzer that checks build tags. +package buildtag + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "strings" + "unicode" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" +) + +const Doc = "check that +build tags are well-formed and correctly located" + +var Analyzer = &analysis.Analyzer{ + Name: "buildtag", + Doc: Doc, + Run: runBuildTag, +} + +func runBuildTag(pass *analysis.Pass) (interface{}, error) { + for _, f := range pass.Files { + checkGoFile(pass, f) + } + for _, name := range pass.OtherFiles { + if err := checkOtherFile(pass, name); err != nil { + return nil, err + } + } + for _, name := range pass.IgnoredFiles { + if strings.HasSuffix(name, ".go") { + f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments) + if err != nil { + // Not valid Go source code - not our job to diagnose, so ignore. + return nil, nil + } + checkGoFile(pass, f) + } else { + if err := checkOtherFile(pass, name); err != nil { + return nil, err + } + } + } + return nil, nil +} + +func checkGoFile(pass *analysis.Pass, f *ast.File) { + pastCutoff := false + for _, group := range f.Comments { + // A +build comment is ignored after or adjoining the package declaration. + if group.End()+1 >= f.Package { + pastCutoff = true + } + + // "+build" is ignored within or after a /*...*/ comment. + if !strings.HasPrefix(group.List[0].Text, "//") { + pastCutoff = true + continue + } + + // Check each line of a //-comment. + for _, c := range group.List { + if !strings.Contains(c.Text, "+build") { + continue + } + if err := checkLine(c.Text, pastCutoff); err != nil { + pass.Reportf(c.Pos(), "%s", err) + } + } + } +} + +func checkOtherFile(pass *analysis.Pass, filename string) error { + content, tf, err := analysisutil.ReadFile(pass.Fset, filename) + if err != nil { + return err + } + + // We must look at the raw lines, as build tags may appear in non-Go + // files such as assembly files. + lines := bytes.SplitAfter(content, nl) + + // Determine cutpoint where +build comments are no longer valid. + // They are valid in leading // comments in the file followed by + // a blank line. + // + // This must be done as a separate pass because of the + // requirement that the comment be followed by a blank line. + var cutoff int + for i, line := range lines { + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, slashSlash) { + if len(line) > 0 { + break + } + cutoff = i + } + } + + for i, line := range lines { + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, slashSlash) { + continue + } + if !bytes.Contains(line, []byte("+build")) { + continue + } + if err := checkLine(string(line), i >= cutoff); err != nil { + pass.Reportf(analysisutil.LineStart(tf, i+1), "%s", err) + continue + } + } + return nil +} + +// checkLine checks a line that starts with "//" and contains "+build". +func checkLine(line string, pastCutoff bool) error { + line = strings.TrimPrefix(line, "//") + line = strings.TrimSpace(line) + + if strings.HasPrefix(line, "+build") { + fields := strings.Fields(line) + if fields[0] != "+build" { + // Comment is something like +buildasdf not +build. + return fmt.Errorf("possible malformed +build comment") + } + if pastCutoff { + return fmt.Errorf("+build comment must appear before package clause and be followed by a blank line") + } + if err := checkArguments(fields); err != nil { + return err + } + } else { + // Comment with +build but not at beginning. + if !pastCutoff { + return fmt.Errorf("possible malformed +build comment") + } + } + return nil +} + +func checkArguments(fields []string) error { + for _, arg := range fields[1:] { + for _, elem := range strings.Split(arg, ",") { + if strings.HasPrefix(elem, "!!") { + return fmt.Errorf("invalid double negative in build constraint: %s", arg) + } + elem = strings.TrimPrefix(elem, "!") + for _, c := range elem { + if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' { + return fmt.Errorf("invalid non-alphanumeric build constraint: %s", arg) + } + } + } + } + return nil +} + +var ( + nl = []byte("\n") + slashSlash = []byte("//") +) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go new file mode 100644 index 000000000..5768d0b9b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go @@ -0,0 +1,376 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cgocall defines an Analyzer that detects some violations of +// the cgo pointer passing rules. +package cgocall + +import ( + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "go/types" + "log" + "os" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" +) + +const debug = false + +const Doc = `detect some violations of the cgo pointer passing rules + +Check for invalid cgo pointer passing. +This looks for code that uses cgo to call C code passing values +whose types are almost always invalid according to the cgo pointer +sharing rules. +Specifically, it warns about attempts to pass a Go chan, map, func, +or slice to C, either directly, or via a pointer, array, or struct.` + +var Analyzer = &analysis.Analyzer{ + Name: "cgocall", + Doc: Doc, + RunDespiteErrors: true, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + if !analysisutil.Imports(pass.Pkg, "runtime/cgo") { + return nil, nil // doesn't use cgo + } + + cgofiles, info, err := typeCheckCgoSourceFiles(pass.Fset, pass.Pkg, pass.Files, pass.TypesInfo, pass.TypesSizes) + if err != nil { + return nil, err + } + for _, f := range cgofiles { + checkCgo(pass.Fset, f, info, pass.Reportf) + } + return nil, nil +} + +func checkCgo(fset *token.FileSet, f *ast.File, info *types.Info, reportf func(token.Pos, string, ...interface{})) { + ast.Inspect(f, func(n ast.Node) bool { + call, ok := n.(*ast.CallExpr) + if !ok { + return true + } + + // Is this a C.f() call? + var name string + if sel, ok := analysisutil.Unparen(call.Fun).(*ast.SelectorExpr); ok { + if id, ok := sel.X.(*ast.Ident); ok && id.Name == "C" { + name = sel.Sel.Name + } + } + if name == "" { + return true // not a call we need to check + } + + // A call to C.CBytes passes a pointer but is always safe. + if name == "CBytes" { + return true + } + + if debug { + log.Printf("%s: call to C.%s", fset.Position(call.Lparen), name) + } + + for _, arg := range call.Args { + if !typeOKForCgoCall(cgoBaseType(info, arg), make(map[types.Type]bool)) { + reportf(arg.Pos(), "possibly passing Go type with embedded pointer to C") + break + } + + // Check for passing the address of a bad type. + if conv, ok := arg.(*ast.CallExpr); ok && len(conv.Args) == 1 && + isUnsafePointer(info, conv.Fun) { + arg = conv.Args[0] + } + if u, ok := arg.(*ast.UnaryExpr); ok && u.Op == token.AND { + if !typeOKForCgoCall(cgoBaseType(info, u.X), make(map[types.Type]bool)) { + reportf(arg.Pos(), "possibly passing Go type with embedded pointer to C") + break + } + } + } + return true + }) +} + +// typeCheckCgoSourceFiles returns type-checked syntax trees for the raw +// cgo files of a package (those that import "C"). Such files are not +// Go, so there may be gaps in type information around C.f references. +// +// This checker was initially written in vet to inspect raw cgo source +// files using partial type information. However, Analyzers in the new +// analysis API are presented with the type-checked, "cooked" Go ASTs +// resulting from cgo-processing files, so we must choose between +// working with the cooked file generated by cgo (which was tried but +// proved fragile) or locating the raw cgo file (e.g. from //line +// directives) and working with that, as we now do. +// +// Specifically, we must type-check the raw cgo source files (or at +// least the subtrees needed for this analyzer) in an environment that +// simulates the rest of the already type-checked package. +// +// For example, for each raw cgo source file in the original package, +// such as this one: +// +// package p +// import "C" +// import "fmt" +// type T int +// const k = 3 +// var x, y = fmt.Println() +// func f() { ... } +// func g() { ... C.malloc(k) ... } +// func (T) f(int) string { ... } +// +// we synthesize a new ast.File, shown below, that dot-imports the +// original "cooked" package using a special name ("·this·"), so that all +// references to package members resolve correctly. (References to +// unexported names cause an "unexported" error, which we ignore.) +// +// To avoid shadowing names imported from the cooked package, +// package-level declarations in the new source file are modified so +// that they do not declare any names. +// (The cgocall analysis is concerned with uses, not declarations.) +// Specifically, type declarations are discarded; +// all names in each var and const declaration are blanked out; +// each method is turned into a regular function by turning +// the receiver into the first parameter; +// and all functions are renamed to "_". +// +// package p +// import . "·this·" // declares T, k, x, y, f, g, T.f +// import "C" +// import "fmt" +// const _ = 3 +// var _, _ = fmt.Println() +// func _() { ... } +// func _() { ... C.malloc(k) ... } +// func _(T, int) string { ... } +// +// In this way, the raw function bodies and const/var initializer +// expressions are preserved but refer to the "cooked" objects imported +// from "·this·", and none of the transformed package-level declarations +// actually declares anything. In the example above, the reference to k +// in the argument of the call to C.malloc resolves to "·this·".k, which +// has an accurate type. +// +// This approach could in principle be generalized to more complex +// analyses on raw cgo files. One could synthesize a "C" package so that +// C.f would resolve to "·this·"._C_func_f, for example. But we have +// limited ourselves here to preserving function bodies and initializer +// expressions since that is all that the cgocall analyzer needs. +// +func typeCheckCgoSourceFiles(fset *token.FileSet, pkg *types.Package, files []*ast.File, info *types.Info, sizes types.Sizes) ([]*ast.File, *types.Info, error) { + const thispkg = "·this·" + + // Which files are cgo files? + var cgoFiles []*ast.File + importMap := map[string]*types.Package{thispkg: pkg} + for _, raw := range files { + // If f is a cgo-generated file, Position reports + // the original file, honoring //line directives. + filename := fset.Position(raw.Pos()).Filename + f, err := parser.ParseFile(fset, filename, nil, parser.Mode(0)) + if err != nil { + return nil, nil, fmt.Errorf("can't parse raw cgo file: %v", err) + } + found := false + for _, spec := range f.Imports { + if spec.Path.Value == `"C"` { + found = true + break + } + } + if !found { + continue // not a cgo file + } + + // Record the original import map. + for _, spec := range raw.Imports { + path, _ := strconv.Unquote(spec.Path.Value) + importMap[path] = imported(info, spec) + } + + // Add special dot-import declaration: + // import . "·this·" + var decls []ast.Decl + decls = append(decls, &ast.GenDecl{ + Tok: token.IMPORT, + Specs: []ast.Spec{ + &ast.ImportSpec{ + Name: &ast.Ident{Name: "."}, + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(thispkg), + }, + }, + }, + }) + + // Transform declarations from the raw cgo file. + for _, decl := range f.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + switch decl.Tok { + case token.TYPE: + // Discard type declarations. + continue + case token.IMPORT: + // Keep imports. + case token.VAR, token.CONST: + // Blank the declared var/const names. + for _, spec := range decl.Specs { + spec := spec.(*ast.ValueSpec) + for i := range spec.Names { + spec.Names[i].Name = "_" + } + } + } + case *ast.FuncDecl: + // Blank the declared func name. + decl.Name.Name = "_" + + // Turn a method receiver: func (T) f(P) R {...} + // into regular parameter: func _(T, P) R {...} + if decl.Recv != nil { + var params []*ast.Field + params = append(params, decl.Recv.List...) + params = append(params, decl.Type.Params.List...) + decl.Type.Params.List = params + decl.Recv = nil + } + } + decls = append(decls, decl) + } + f.Decls = decls + if debug { + format.Node(os.Stderr, fset, f) // debugging + } + cgoFiles = append(cgoFiles, f) + } + if cgoFiles == nil { + return nil, nil, nil // nothing to do (can't happen?) + } + + // Type-check the synthetic files. + tc := &types.Config{ + FakeImportC: true, + Importer: importerFunc(func(path string) (*types.Package, error) { + return importMap[path], nil + }), + Sizes: sizes, + Error: func(error) {}, // ignore errors (e.g. unused import) + } + + // It's tempting to record the new types in the + // existing pass.TypesInfo, but we don't own it. + altInfo := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + } + tc.Check(pkg.Path(), fset, cgoFiles, altInfo) + + return cgoFiles, altInfo, nil +} + +// cgoBaseType tries to look through type conversions involving +// unsafe.Pointer to find the real type. It converts: +// unsafe.Pointer(x) => x +// *(*unsafe.Pointer)(unsafe.Pointer(&x)) => x +func cgoBaseType(info *types.Info, arg ast.Expr) types.Type { + switch arg := arg.(type) { + case *ast.CallExpr: + if len(arg.Args) == 1 && isUnsafePointer(info, arg.Fun) { + return cgoBaseType(info, arg.Args[0]) + } + case *ast.StarExpr: + call, ok := arg.X.(*ast.CallExpr) + if !ok || len(call.Args) != 1 { + break + } + // Here arg is *f(v). + t := info.Types[call.Fun].Type + if t == nil { + break + } + ptr, ok := t.Underlying().(*types.Pointer) + if !ok { + break + } + // Here arg is *(*p)(v) + elem, ok := ptr.Elem().Underlying().(*types.Basic) + if !ok || elem.Kind() != types.UnsafePointer { + break + } + // Here arg is *(*unsafe.Pointer)(v) + call, ok = call.Args[0].(*ast.CallExpr) + if !ok || len(call.Args) != 1 { + break + } + // Here arg is *(*unsafe.Pointer)(f(v)) + if !isUnsafePointer(info, call.Fun) { + break + } + // Here arg is *(*unsafe.Pointer)(unsafe.Pointer(v)) + u, ok := call.Args[0].(*ast.UnaryExpr) + if !ok || u.Op != token.AND { + break + } + // Here arg is *(*unsafe.Pointer)(unsafe.Pointer(&v)) + return cgoBaseType(info, u.X) + } + + return info.Types[arg].Type +} + +// typeOKForCgoCall reports whether the type of arg is OK to pass to a +// C function using cgo. This is not true for Go types with embedded +// pointers. m is used to avoid infinite recursion on recursive types. +func typeOKForCgoCall(t types.Type, m map[types.Type]bool) bool { + if t == nil || m[t] { + return true + } + m[t] = true + switch t := t.Underlying().(type) { + case *types.Chan, *types.Map, *types.Signature, *types.Slice: + return false + case *types.Pointer: + return typeOKForCgoCall(t.Elem(), m) + case *types.Array: + return typeOKForCgoCall(t.Elem(), m) + case *types.Struct: + for i := 0; i < t.NumFields(); i++ { + if !typeOKForCgoCall(t.Field(i).Type(), m) { + return false + } + } + } + return true +} + +func isUnsafePointer(info *types.Info, e ast.Expr) bool { + t := info.Types[e].Type + return t != nil && t.Underlying() == types.Typ[types.UnsafePointer] +} + +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// TODO(adonovan): make this a library function or method of Info. +func imported(info *types.Info, spec *ast.ImportSpec) *types.Package { + obj, ok := info.Implicits[spec] + if !ok { + obj = info.Defs[spec.Name] // renaming import + } + return obj.(*types.PkgName).Imported() +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go b/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go new file mode 100644 index 000000000..4c3ac6647 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go @@ -0,0 +1,117 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package composite defines an Analyzer that checks for unkeyed +// composite literals. +package composite + +import ( + "go/ast" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check for unkeyed composite literals + +This analyzer reports a diagnostic for composite literals of struct +types imported from another package that do not use the field-keyed +syntax. Such literals are fragile because the addition of a new field +(even if unexported) to the struct will cause compilation to fail. + +As an example, + + err = &net.DNSConfigError{err} + +should be replaced by: + + err = &net.DNSConfigError{Err: err} +` + +var Analyzer = &analysis.Analyzer{ + Name: "composites", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + RunDespiteErrors: true, + Run: run, +} + +var whitelist = true + +func init() { + Analyzer.Flags.BoolVar(&whitelist, "whitelist", whitelist, "use composite white list; for testing only") +} + +// runUnkeyedLiteral checks if a composite literal is a struct literal with +// unkeyed fields. +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CompositeLit)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + cl := n.(*ast.CompositeLit) + + typ := pass.TypesInfo.Types[cl].Type + if typ == nil { + // cannot determine composite literals' type, skip it + return + } + typeName := typ.String() + if whitelist && unkeyedLiteral[typeName] { + // skip whitelisted types + return + } + under := typ.Underlying() + for { + ptr, ok := under.(*types.Pointer) + if !ok { + break + } + under = ptr.Elem().Underlying() + } + if _, ok := under.(*types.Struct); !ok { + // skip non-struct composite literals + return + } + if isLocalType(pass, typ) { + // allow unkeyed locally defined composite literal + return + } + + // check if the CompositeLit contains an unkeyed field + allKeyValue := true + for _, e := range cl.Elts { + if _, ok := e.(*ast.KeyValueExpr); !ok { + allKeyValue = false + break + } + } + if allKeyValue { + // all the composite literal fields are keyed + return + } + + pass.ReportRangef(cl, "%s composite literal uses unkeyed fields", typeName) + }) + return nil, nil +} + +func isLocalType(pass *analysis.Pass, typ types.Type) bool { + switch x := typ.(type) { + case *types.Struct: + // struct literals are local types + return true + case *types.Pointer: + return isLocalType(pass, x.Elem()) + case *types.Named: + // names in package foo are local to foo_test too + return strings.TrimSuffix(x.Obj().Pkg().Path(), "_test") == strings.TrimSuffix(pass.Pkg.Path(), "_test") + } + return false +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/composite/whitelist.go b/vendor/golang.org/x/tools/go/analysis/passes/composite/whitelist.go new file mode 100644 index 000000000..1e5f5fd20 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/composite/whitelist.go @@ -0,0 +1,34 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package composite + +// unkeyedLiteral is a white list of types in the standard packages +// that are used with unkeyed literals we deem to be acceptable. +var unkeyedLiteral = map[string]bool{ + // These image and image/color struct types are frozen. We will never add fields to them. + "image/color.Alpha16": true, + "image/color.Alpha": true, + "image/color.CMYK": true, + "image/color.Gray16": true, + "image/color.Gray": true, + "image/color.NRGBA64": true, + "image/color.NRGBA": true, + "image/color.NYCbCrA": true, + "image/color.RGBA64": true, + "image/color.RGBA": true, + "image/color.YCbCr": true, + "image.Point": true, + "image.Rectangle": true, + "image.Uniform": true, + + "unicode.Range16": true, + "unicode.Range32": true, + + // These three structs are used in generated test main files, + // but the generator can be trusted. + "testing.InternalBenchmark": true, + "testing.InternalExample": true, + "testing.InternalTest": true, +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go new file mode 100644 index 000000000..c4ebf7857 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go @@ -0,0 +1,300 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package copylock defines an Analyzer that checks for locks +// erroneously passed by value. +package copylock + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check for locks erroneously passed by value + +Inadvertently copying a value containing a lock, such as sync.Mutex or +sync.WaitGroup, may cause both copies to malfunction. Generally such +values should be referred to through a pointer.` + +var Analyzer = &analysis.Analyzer{ + Name: "copylocks", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + RunDespiteErrors: true, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.AssignStmt)(nil), + (*ast.CallExpr)(nil), + (*ast.CompositeLit)(nil), + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + (*ast.GenDecl)(nil), + (*ast.RangeStmt)(nil), + (*ast.ReturnStmt)(nil), + } + inspect.Preorder(nodeFilter, func(node ast.Node) { + switch node := node.(type) { + case *ast.RangeStmt: + checkCopyLocksRange(pass, node) + case *ast.FuncDecl: + checkCopyLocksFunc(pass, node.Name.Name, node.Recv, node.Type) + case *ast.FuncLit: + checkCopyLocksFunc(pass, "func", nil, node.Type) + case *ast.CallExpr: + checkCopyLocksCallExpr(pass, node) + case *ast.AssignStmt: + checkCopyLocksAssign(pass, node) + case *ast.GenDecl: + checkCopyLocksGenDecl(pass, node) + case *ast.CompositeLit: + checkCopyLocksCompositeLit(pass, node) + case *ast.ReturnStmt: + checkCopyLocksReturnStmt(pass, node) + } + }) + return nil, nil +} + +// checkCopyLocksAssign checks whether an assignment +// copies a lock. +func checkCopyLocksAssign(pass *analysis.Pass, as *ast.AssignStmt) { + for i, x := range as.Rhs { + if path := lockPathRhs(pass, x); path != nil { + pass.ReportRangef(x, "assignment copies lock value to %v: %v", analysisutil.Format(pass.Fset, as.Lhs[i]), path) + } + } +} + +// checkCopyLocksGenDecl checks whether lock is copied +// in variable declaration. +func checkCopyLocksGenDecl(pass *analysis.Pass, gd *ast.GenDecl) { + if gd.Tok != token.VAR { + return + } + for _, spec := range gd.Specs { + valueSpec := spec.(*ast.ValueSpec) + for i, x := range valueSpec.Values { + if path := lockPathRhs(pass, x); path != nil { + pass.ReportRangef(x, "variable declaration copies lock value to %v: %v", valueSpec.Names[i].Name, path) + } + } + } +} + +// checkCopyLocksCompositeLit detects lock copy inside a composite literal +func checkCopyLocksCompositeLit(pass *analysis.Pass, cl *ast.CompositeLit) { + for _, x := range cl.Elts { + if node, ok := x.(*ast.KeyValueExpr); ok { + x = node.Value + } + if path := lockPathRhs(pass, x); path != nil { + pass.ReportRangef(x, "literal copies lock value from %v: %v", analysisutil.Format(pass.Fset, x), path) + } + } +} + +// checkCopyLocksReturnStmt detects lock copy in return statement +func checkCopyLocksReturnStmt(pass *analysis.Pass, rs *ast.ReturnStmt) { + for _, x := range rs.Results { + if path := lockPathRhs(pass, x); path != nil { + pass.ReportRangef(x, "return copies lock value: %v", path) + } + } +} + +// checkCopyLocksCallExpr detects lock copy in the arguments to a function call +func checkCopyLocksCallExpr(pass *analysis.Pass, ce *ast.CallExpr) { + var id *ast.Ident + switch fun := ce.Fun.(type) { + case *ast.Ident: + id = fun + case *ast.SelectorExpr: + id = fun.Sel + } + if fun, ok := pass.TypesInfo.Uses[id].(*types.Builtin); ok { + switch fun.Name() { + case "new", "len", "cap", "Sizeof": + return + } + } + for _, x := range ce.Args { + if path := lockPathRhs(pass, x); path != nil { + pass.ReportRangef(x, "call of %s copies lock value: %v", analysisutil.Format(pass.Fset, ce.Fun), path) + } + } +} + +// checkCopyLocksFunc checks whether a function might +// inadvertently copy a lock, by checking whether +// its receiver, parameters, or return values +// are locks. +func checkCopyLocksFunc(pass *analysis.Pass, name string, recv *ast.FieldList, typ *ast.FuncType) { + if recv != nil && len(recv.List) > 0 { + expr := recv.List[0].Type + if path := lockPath(pass.Pkg, pass.TypesInfo.Types[expr].Type); path != nil { + pass.ReportRangef(expr, "%s passes lock by value: %v", name, path) + } + } + + if typ.Params != nil { + for _, field := range typ.Params.List { + expr := field.Type + if path := lockPath(pass.Pkg, pass.TypesInfo.Types[expr].Type); path != nil { + pass.ReportRangef(expr, "%s passes lock by value: %v", name, path) + } + } + } + + // Don't check typ.Results. If T has a Lock field it's OK to write + // return T{} + // because that is returning the zero value. Leave result checking + // to the return statement. +} + +// checkCopyLocksRange checks whether a range statement +// might inadvertently copy a lock by checking whether +// any of the range variables are locks. +func checkCopyLocksRange(pass *analysis.Pass, r *ast.RangeStmt) { + checkCopyLocksRangeVar(pass, r.Tok, r.Key) + checkCopyLocksRangeVar(pass, r.Tok, r.Value) +} + +func checkCopyLocksRangeVar(pass *analysis.Pass, rtok token.Token, e ast.Expr) { + if e == nil { + return + } + id, isId := e.(*ast.Ident) + if isId && id.Name == "_" { + return + } + + var typ types.Type + if rtok == token.DEFINE { + if !isId { + return + } + obj := pass.TypesInfo.Defs[id] + if obj == nil { + return + } + typ = obj.Type() + } else { + typ = pass.TypesInfo.Types[e].Type + } + + if typ == nil { + return + } + if path := lockPath(pass.Pkg, typ); path != nil { + pass.Reportf(e.Pos(), "range var %s copies lock: %v", analysisutil.Format(pass.Fset, e), path) + } +} + +type typePath []types.Type + +// String pretty-prints a typePath. +func (path typePath) String() string { + n := len(path) + var buf bytes.Buffer + for i := range path { + if i > 0 { + fmt.Fprint(&buf, " contains ") + } + // The human-readable path is in reverse order, outermost to innermost. + fmt.Fprint(&buf, path[n-i-1].String()) + } + return buf.String() +} + +func lockPathRhs(pass *analysis.Pass, x ast.Expr) typePath { + if _, ok := x.(*ast.CompositeLit); ok { + return nil + } + if _, ok := x.(*ast.CallExpr); ok { + // A call may return a zero value. + return nil + } + if star, ok := x.(*ast.StarExpr); ok { + if _, ok := star.X.(*ast.CallExpr); ok { + // A call may return a pointer to a zero value. + return nil + } + } + return lockPath(pass.Pkg, pass.TypesInfo.Types[x].Type) +} + +// lockPath returns a typePath describing the location of a lock value +// contained in typ. If there is no contained lock, it returns nil. +func lockPath(tpkg *types.Package, typ types.Type) typePath { + if typ == nil { + return nil + } + + for { + atyp, ok := typ.Underlying().(*types.Array) + if !ok { + break + } + typ = atyp.Elem() + } + + // We're only interested in the case in which the underlying + // type is a struct. (Interfaces and pointers are safe to copy.) + styp, ok := typ.Underlying().(*types.Struct) + if !ok { + return nil + } + + // We're looking for cases in which a pointer to this type + // is a sync.Locker, but a value is not. This differentiates + // embedded interfaces from embedded values. + if types.Implements(types.NewPointer(typ), lockerType) && !types.Implements(typ, lockerType) { + return []types.Type{typ} + } + + // In go1.10, sync.noCopy did not implement Locker. + // (The Unlock method was added only in CL 121876.) + // TODO(adonovan): remove workaround when we drop go1.10. + if named, ok := typ.(*types.Named); ok && + named.Obj().Name() == "noCopy" && + named.Obj().Pkg().Path() == "sync" { + return []types.Type{typ} + } + + nfields := styp.NumFields() + for i := 0; i < nfields; i++ { + ftyp := styp.Field(i).Type() + subpath := lockPath(tpkg, ftyp) + if subpath != nil { + return append(subpath, typ) + } + } + + return nil +} + +var lockerType *types.Interface + +// Construct a sync.Locker interface type. +func init() { + nullary := types.NewSignature(nil, nil, nil, false) // func() + methods := []*types.Func{ + types.NewFunc(token.NoPos, nil, "Lock", nullary), + types.NewFunc(token.NoPos, nil, "Unlock", nullary), + } + lockerType = types.NewInterface(methods, nil).Complete() +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go b/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go new file mode 100644 index 000000000..51600ffc7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go @@ -0,0 +1,226 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctrlflow is an analysis that provides a syntactic +// control-flow graph (CFG) for the body of a function. +// It records whether a function cannot return. +// By itself, it does not report any diagnostics. +package ctrlflow + +import ( + "go/ast" + "go/types" + "log" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/cfg" + "golang.org/x/tools/go/types/typeutil" +) + +var Analyzer = &analysis.Analyzer{ + Name: "ctrlflow", + Doc: "build a control-flow graph", + Run: run, + ResultType: reflect.TypeOf(new(CFGs)), + FactTypes: []analysis.Fact{new(noReturn)}, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +// noReturn is a fact indicating that a function does not return. +type noReturn struct{} + +func (*noReturn) AFact() {} + +func (*noReturn) String() string { return "noReturn" } + +// A CFGs holds the control-flow graphs +// for all the functions of the current package. +type CFGs struct { + defs map[*ast.Ident]types.Object // from Pass.TypesInfo.Defs + funcDecls map[*types.Func]*declInfo + funcLits map[*ast.FuncLit]*litInfo + pass *analysis.Pass // transient; nil after construction +} + +// CFGs has two maps: funcDecls for named functions and funcLits for +// unnamed ones. Unlike funcLits, the funcDecls map is not keyed by its +// syntax node, *ast.FuncDecl, because callMayReturn needs to do a +// look-up by *types.Func, and you can get from an *ast.FuncDecl to a +// *types.Func but not the other way. + +type declInfo struct { + decl *ast.FuncDecl + cfg *cfg.CFG // iff decl.Body != nil + started bool // to break cycles + noReturn bool +} + +type litInfo struct { + cfg *cfg.CFG + noReturn bool +} + +// FuncDecl returns the control-flow graph for a named function. +// It returns nil if decl.Body==nil. +func (c *CFGs) FuncDecl(decl *ast.FuncDecl) *cfg.CFG { + if decl.Body == nil { + return nil + } + fn := c.defs[decl.Name].(*types.Func) + return c.funcDecls[fn].cfg +} + +// FuncLit returns the control-flow graph for a literal function. +func (c *CFGs) FuncLit(lit *ast.FuncLit) *cfg.CFG { + return c.funcLits[lit].cfg +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // Because CFG construction consumes and produces noReturn + // facts, CFGs for exported FuncDecls must be built before 'run' + // returns; we cannot construct them lazily. + // (We could build CFGs for FuncLits lazily, + // but the benefit is marginal.) + + // Pass 1. Map types.Funcs to ast.FuncDecls in this package. + funcDecls := make(map[*types.Func]*declInfo) // functions and methods + funcLits := make(map[*ast.FuncLit]*litInfo) + + var decls []*types.Func // keys(funcDecls), in order + var lits []*ast.FuncLit // keys(funcLits), in order + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.FuncDecl: + // Type information may be incomplete. + if fn, ok := pass.TypesInfo.Defs[n.Name].(*types.Func); ok { + funcDecls[fn] = &declInfo{decl: n} + decls = append(decls, fn) + } + case *ast.FuncLit: + funcLits[n] = new(litInfo) + lits = append(lits, n) + } + }) + + c := &CFGs{ + defs: pass.TypesInfo.Defs, + funcDecls: funcDecls, + funcLits: funcLits, + pass: pass, + } + + // Pass 2. Build CFGs. + + // Build CFGs for named functions. + // Cycles in the static call graph are broken + // arbitrarily but deterministically. + // We create noReturn facts as discovered. + for _, fn := range decls { + c.buildDecl(fn, funcDecls[fn]) + } + + // Build CFGs for literal functions. + // These aren't relevant to facts (since they aren't named) + // but are required for the CFGs.FuncLit API. + for _, lit := range lits { + li := funcLits[lit] + if li.cfg == nil { + li.cfg = cfg.New(lit.Body, c.callMayReturn) + if !hasReachableReturn(li.cfg) { + li.noReturn = true + } + } + } + + // All CFGs are now built. + c.pass = nil + + return c, nil +} + +// di.cfg may be nil on return. +func (c *CFGs) buildDecl(fn *types.Func, di *declInfo) { + // buildDecl may call itself recursively for the same function, + // because cfg.New is passed the callMayReturn method, which + // builds the CFG of the callee, leading to recursion. + // The buildDecl call tree thus resembles the static call graph. + // We mark each node when we start working on it to break cycles. + + if !di.started { // break cycle + di.started = true + + if isIntrinsicNoReturn(fn) { + di.noReturn = true + } + if di.decl.Body != nil { + di.cfg = cfg.New(di.decl.Body, c.callMayReturn) + if !hasReachableReturn(di.cfg) { + di.noReturn = true + } + } + if di.noReturn { + c.pass.ExportObjectFact(fn, new(noReturn)) + } + + // debugging + if false { + log.Printf("CFG for %s:\n%s (noreturn=%t)\n", fn, di.cfg.Format(c.pass.Fset), di.noReturn) + } + } +} + +// callMayReturn reports whether the called function may return. +// It is passed to the CFG builder. +func (c *CFGs) callMayReturn(call *ast.CallExpr) (r bool) { + if id, ok := call.Fun.(*ast.Ident); ok && c.pass.TypesInfo.Uses[id] == panicBuiltin { + return false // panic never returns + } + + // Is this a static call? + fn := typeutil.StaticCallee(c.pass.TypesInfo, call) + if fn == nil { + return true // callee not statically known; be conservative + } + + // Function or method declared in this package? + if di, ok := c.funcDecls[fn]; ok { + c.buildDecl(fn, di) + return !di.noReturn + } + + // Not declared in this package. + // Is there a fact from another package? + return !c.pass.ImportObjectFact(fn, new(noReturn)) +} + +var panicBuiltin = types.Universe.Lookup("panic").(*types.Builtin) + +func hasReachableReturn(g *cfg.CFG) bool { + for _, b := range g.Blocks { + if b.Live && b.Return() != nil { + return true + } + } + return false +} + +// isIntrinsicNoReturn reports whether a function intrinsically never +// returns because it stops execution of the calling thread. +// It is the base case in the recursion. +func isIntrinsicNoReturn(fn *types.Func) bool { + // Add functions here as the need arises, but don't allocate memory. + path, name := fn.Pkg().Path(), fn.Name() + return path == "syscall" && (name == "Exit" || name == "ExitProcess" || name == "ExitThread") || + path == "runtime" && name == "Goexit" +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go new file mode 100644 index 000000000..9ea137386 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go @@ -0,0 +1,115 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package deepequalerrors defines an Analyzer that checks for the use +// of reflect.DeepEqual with error values. +package deepequalerrors + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +const Doc = `check for calls of reflect.DeepEqual on error values + +The deepequalerrors checker looks for calls of the form: + + reflect.DeepEqual(err1, err2) + +where err1 and err2 are errors. Using reflect.DeepEqual to compare +errors is discouraged.` + +var Analyzer = &analysis.Analyzer{ + Name: "deepequalerrors", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func) + if !ok { + return + } + if fn.FullName() == "reflect.DeepEqual" && hasError(pass, call.Args[0]) && hasError(pass, call.Args[1]) { + pass.ReportRangef(call, "avoid using reflect.DeepEqual with errors") + } + }) + return nil, nil +} + +var errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + +// hasError reports whether the type of e contains the type error. +// See containsError, below, for the meaning of "contains". +func hasError(pass *analysis.Pass, e ast.Expr) bool { + tv, ok := pass.TypesInfo.Types[e] + if !ok { // no type info, assume good + return false + } + return containsError(tv.Type) +} + +// Report whether any type that typ could store and that could be compared is the +// error type. This includes typ itself, as well as the types of struct field, slice +// and array elements, map keys and elements, and pointers. It does not include +// channel types (incomparable), arg and result types of a Signature (not stored), or +// methods of a named or interface type (not stored). +func containsError(typ types.Type) bool { + // Track types being processed, to avoid infinite recursion. + // Using types as keys here is OK because we are checking for the identical pointer, not + // type identity. See analysis/passes/printf/types.go. + inProgress := make(map[types.Type]bool) + + var check func(t types.Type) bool + check = func(t types.Type) bool { + if t == errorType { + return true + } + if inProgress[t] { + return false + } + inProgress[t] = true + switch t := t.(type) { + case *types.Pointer: + return check(t.Elem()) + case *types.Slice: + return check(t.Elem()) + case *types.Array: + return check(t.Elem()) + case *types.Map: + return check(t.Key()) || check(t.Elem()) + case *types.Struct: + for i := 0; i < t.NumFields(); i++ { + if check(t.Field(i).Type()) { + return true + } + } + case *types.Named: + return check(t.Underlying()) + + // We list the remaining valid type kinds for completeness. + case *types.Basic: + case *types.Chan: // channels store values, but they are not comparable + case *types.Signature: + case *types.Tuple: // tuples are only part of signatures + case *types.Interface: + } + return false + } + + return check(typ) +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go new file mode 100644 index 000000000..384f02557 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go @@ -0,0 +1,75 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The errorsas package defines an Analyzer that checks that the second argument to +// errors.As is a pointer to a type implementing error. +package errorsas + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +const Doc = `report passing non-pointer or non-error values to errors.As + +The errorsas analysis reports calls to errors.As where the type +of the second argument is not a pointer to a type implementing error.` + +var Analyzer = &analysis.Analyzer{ + Name: "errorsas", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + switch pass.Pkg.Path() { + case "errors", "errors_test": + // These packages know how to use their own APIs. + // Sometimes they are testing what happens to incorrect programs. + return nil, nil + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + fn := typeutil.StaticCallee(pass.TypesInfo, call) + if fn == nil { + return // not a static call + } + if len(call.Args) < 2 { + return // not enough arguments, e.g. called with return values of another function + } + if fn.FullName() == "errors.As" && !pointerToInterfaceOrError(pass, call.Args[1]) { + pass.ReportRangef(call, "second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type") + } + }) + return nil, nil +} + +var errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + +// pointerToInterfaceOrError reports whether the type of e is a pointer to an interface or a type implementing error, +// or is the empty interface. +func pointerToInterfaceOrError(pass *analysis.Pass, e ast.Expr) bool { + t := pass.TypesInfo.Types[e].Type + if it, ok := t.Underlying().(*types.Interface); ok && it.NumMethods() == 0 { + return true + } + pt, ok := t.Underlying().(*types.Pointer) + if !ok { + return false + } + _, ok = pt.Elem().Underlying().(*types.Interface) + return ok || types.Implements(pt.Elem(), errorType) +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go new file mode 100644 index 000000000..78afe94ab --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go @@ -0,0 +1,368 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fieldalignment defines an Analyzer that detects structs that would use less +// memory if their fields were sorted. +package fieldalignment + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/token" + "go/types" + "sort" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `find structs that would use less memory if their fields were sorted + +This analyzer find structs that can be rearranged to use less memory, and provides +a suggested edit with the optimal order. + +Note that there are two different diagnostics reported. One checks struct size, +and the other reports "pointer bytes" used. Pointer bytes is how many bytes of the +object that the garbage collector has to potentially scan for pointers, for example: + + struct { uint32; string } + +have 16 pointer bytes because the garbage collector has to scan up through the string's +inner pointer. + + struct { string; *uint32 } + +has 24 pointer bytes because it has to scan further through the *uint32. + + struct { string; uint32 } + +has 8 because it can stop immediately after the string pointer. +` + +var Analyzer = &analysis.Analyzer{ + Name: "fieldalignment", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + } + inspect.Preorder(nodeFilter, func(node ast.Node) { + var s *ast.StructType + var ok bool + if s, ok = node.(*ast.StructType); !ok { + return + } + if tv, ok := pass.TypesInfo.Types[s]; ok { + fieldalignment(pass, s, tv.Type.(*types.Struct)) + } + }) + return nil, nil +} + +var unsafePointerTyp = types.Unsafe.Scope().Lookup("Pointer").(*types.TypeName).Type() + +func fieldalignment(pass *analysis.Pass, node *ast.StructType, typ *types.Struct) { + wordSize := pass.TypesSizes.Sizeof(unsafePointerTyp) + maxAlign := pass.TypesSizes.Alignof(unsafePointerTyp) + + s := gcSizes{wordSize, maxAlign} + optimal, indexes := optimalOrder(typ, &s) + optsz, optptrs := s.Sizeof(optimal), s.ptrdata(optimal) + + var message string + if sz := s.Sizeof(typ); sz != optsz { + message = fmt.Sprintf("struct of size %d could be %d", sz, optsz) + } else if ptrs := s.ptrdata(typ); ptrs != optptrs { + message = fmt.Sprintf("struct with %d pointer bytes could be %d", ptrs, optptrs) + } else { + // Already optimal order. + return + } + + // Flatten the ast node since it could have multiple field names per list item while + // *types.Struct only have one item per field. + // TODO: Preserve multi-named fields instead of flattening. + var flat []*ast.Field + for _, f := range node.Fields.List { + // TODO: Preserve comment, for now get rid of them. + // See https://github.com/golang/go/issues/20744 + f.Comment = nil + f.Doc = nil + if len(f.Names) <= 1 { + flat = append(flat, f) + continue + } + for _, name := range f.Names { + flat = append(flat, &ast.Field{ + Names: []*ast.Ident{name}, + Type: f.Type, + }) + } + } + + // Sort fields according to the optimal order. + var reordered []*ast.Field + for _, index := range indexes { + reordered = append(reordered, flat[index]) + } + + newStr := &ast.StructType{ + Fields: &ast.FieldList{ + List: reordered, + }, + } + + // Write the newly aligned struct node to get the content for suggested fixes. + var buf bytes.Buffer + if err := format.Node(&buf, token.NewFileSet(), newStr); err != nil { + return + } + + pass.Report(analysis.Diagnostic{ + Pos: node.Pos(), + End: node.Pos() + token.Pos(len("struct")), + Message: message, + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Rearrange fields", + TextEdits: []analysis.TextEdit{{ + Pos: node.Pos(), + End: node.End(), + NewText: buf.Bytes(), + }}, + }}, + }) +} + +func optimalOrder(str *types.Struct, sizes *gcSizes) (*types.Struct, []int) { + nf := str.NumFields() + + type elem struct { + index int + alignof int64 + sizeof int64 + ptrdata int64 + } + + elems := make([]elem, nf) + for i := 0; i < nf; i++ { + field := str.Field(i) + ft := field.Type() + elems[i] = elem{ + i, + sizes.Alignof(ft), + sizes.Sizeof(ft), + sizes.ptrdata(ft), + } + } + + sort.Slice(elems, func(i, j int) bool { + ei := &elems[i] + ej := &elems[j] + + // Place zero sized objects before non-zero sized objects. + zeroi := ei.sizeof == 0 + zeroj := ej.sizeof == 0 + if zeroi != zeroj { + return zeroi + } + + // Next, place more tightly aligned objects before less tightly aligned objects. + if ei.alignof != ej.alignof { + return ei.alignof > ej.alignof + } + + // Place pointerful objects before pointer-free objects. + noptrsi := ei.ptrdata == 0 + noptrsj := ej.ptrdata == 0 + if noptrsi != noptrsj { + return noptrsj + } + + if !noptrsi { + // If both have pointers... + + // ... then place objects with less trailing + // non-pointer bytes earlier. That is, place + // the field with the most trailing + // non-pointer bytes at the end of the + // pointerful section. + traili := ei.sizeof - ei.ptrdata + trailj := ej.sizeof - ej.ptrdata + if traili != trailj { + return traili < trailj + } + } + + // Lastly, order by size. + if ei.sizeof != ej.sizeof { + return ei.sizeof > ej.sizeof + } + + return false + }) + + fields := make([]*types.Var, nf) + indexes := make([]int, nf) + for i, e := range elems { + fields[i] = str.Field(e.index) + indexes[i] = e.index + } + return types.NewStruct(fields, nil), indexes +} + +// Code below based on go/types.StdSizes. + +type gcSizes struct { + WordSize int64 + MaxAlign int64 +} + +func (s *gcSizes) Alignof(T types.Type) int64 { + // For arrays and structs, alignment is defined in terms + // of alignment of the elements and fields, respectively. + switch t := T.Underlying().(type) { + case *types.Array: + // spec: "For a variable x of array type: unsafe.Alignof(x) + // is the same as unsafe.Alignof(x[0]), but at least 1." + return s.Alignof(t.Elem()) + case *types.Struct: + // spec: "For a variable x of struct type: unsafe.Alignof(x) + // is the largest of the values unsafe.Alignof(x.f) for each + // field f of x, but at least 1." + max := int64(1) + for i, nf := 0, t.NumFields(); i < nf; i++ { + if a := s.Alignof(t.Field(i).Type()); a > max { + max = a + } + } + return max + } + a := s.Sizeof(T) // may be 0 + // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1." + if a < 1 { + return 1 + } + if a > s.MaxAlign { + return s.MaxAlign + } + return a +} + +var basicSizes = [...]byte{ + types.Bool: 1, + types.Int8: 1, + types.Int16: 2, + types.Int32: 4, + types.Int64: 8, + types.Uint8: 1, + types.Uint16: 2, + types.Uint32: 4, + types.Uint64: 8, + types.Float32: 4, + types.Float64: 8, + types.Complex64: 8, + types.Complex128: 16, +} + +func (s *gcSizes) Sizeof(T types.Type) int64 { + switch t := T.Underlying().(type) { + case *types.Basic: + k := t.Kind() + if int(k) < len(basicSizes) { + if s := basicSizes[k]; s > 0 { + return int64(s) + } + } + if k == types.String { + return s.WordSize * 2 + } + case *types.Array: + return t.Len() * s.Sizeof(t.Elem()) + case *types.Slice: + return s.WordSize * 3 + case *types.Struct: + nf := t.NumFields() + if nf == 0 { + return 0 + } + + var o int64 + max := int64(1) + for i := 0; i < nf; i++ { + ft := t.Field(i).Type() + a, sz := s.Alignof(ft), s.Sizeof(ft) + if a > max { + max = a + } + if i == nf-1 && sz == 0 && o != 0 { + sz = 1 + } + o = align(o, a) + sz + } + return align(o, max) + case *types.Interface: + return s.WordSize * 2 + } + return s.WordSize // catch-all +} + +// align returns the smallest y >= x such that y % a == 0. +func align(x, a int64) int64 { + y := x + a - 1 + return y - y%a +} + +func (s *gcSizes) ptrdata(T types.Type) int64 { + switch t := T.Underlying().(type) { + case *types.Basic: + switch t.Kind() { + case types.String, types.UnsafePointer: + return s.WordSize + } + return 0 + case *types.Chan, *types.Map, *types.Pointer, *types.Signature, *types.Slice: + return s.WordSize + case *types.Interface: + return 2 * s.WordSize + case *types.Array: + n := t.Len() + if n == 0 { + return 0 + } + a := s.ptrdata(t.Elem()) + if a == 0 { + return 0 + } + z := s.Sizeof(t.Elem()) + return (n-1)*z + a + case *types.Struct: + nf := t.NumFields() + if nf == 0 { + return 0 + } + + var o, p int64 + for i := 0; i < nf; i++ { + ft := t.Field(i).Type() + a, sz := s.Alignof(ft), s.Sizeof(ft) + fp := s.ptrdata(ft) + o = align(o, a) + if fp != 0 { + p = o + fp + } + o += sz + } + return p + } + + panic("impossible") +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go new file mode 100644 index 000000000..27b1b8400 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go @@ -0,0 +1,98 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package findcall defines an Analyzer that serves as a trivial +// example and test of the Analysis API. It reports a diagnostic for +// every call to a function or method of the name specified by its +// -name flag. It also exports a fact for each declaration that +// matches the name, plus a package-level fact if the package contained +// one or more such declarations. +package findcall + +import ( + "fmt" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" +) + +const Doc = `find calls to a particular function + +The findcall analysis reports calls to functions or methods +of a particular name.` + +var Analyzer = &analysis.Analyzer{ + Name: "findcall", + Doc: Doc, + Run: run, + RunDespiteErrors: true, + FactTypes: []analysis.Fact{new(foundFact)}, +} + +var name string // -name flag + +func init() { + Analyzer.Flags.StringVar(&name, "name", name, "name of the function to find") +} + +func run(pass *analysis.Pass) (interface{}, error) { + for _, f := range pass.Files { + ast.Inspect(f, func(n ast.Node) bool { + if call, ok := n.(*ast.CallExpr); ok { + var id *ast.Ident + switch fun := call.Fun.(type) { + case *ast.Ident: + id = fun + case *ast.SelectorExpr: + id = fun.Sel + } + if id != nil && !pass.TypesInfo.Types[id].IsType() && id.Name == name { + pass.Report(analysis.Diagnostic{ + Pos: call.Lparen, + Message: fmt.Sprintf("call of %s(...)", id.Name), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Add '_TEST_'"), + TextEdits: []analysis.TextEdit{{ + Pos: call.Lparen, + End: call.Lparen, + NewText: []byte("_TEST_"), + }}, + }}, + }) + } + } + return true + }) + } + + // Export a fact for each matching function. + // + // These facts are produced only to test the testing + // infrastructure in the analysistest package. + // They are not consumed by the findcall Analyzer + // itself, as would happen in a more realistic example. + for _, f := range pass.Files { + for _, decl := range f.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok && decl.Name.Name == name { + if obj, ok := pass.TypesInfo.Defs[decl.Name].(*types.Func); ok { + pass.ExportObjectFact(obj, new(foundFact)) + } + } + } + } + + if len(pass.AllObjectFacts()) > 0 { + pass.ExportPackageFact(new(foundFact)) + } + + return nil, nil +} + +// foundFact is a fact associated with functions that match -name. +// We use it to exercise the fact machinery in tests. +type foundFact struct{} + +func (*foundFact) String() string { return "found" } +func (*foundFact) AFact() {} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go b/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go new file mode 100644 index 000000000..741492e47 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go @@ -0,0 +1,91 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package framepointer defines an Analyzer that reports assembly code +// that clobbers the frame pointer before saving it. +package framepointer + +import ( + "go/build" + "regexp" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" +) + +const Doc = "report assembly that clobbers the frame pointer before saving it" + +var Analyzer = &analysis.Analyzer{ + Name: "framepointer", + Doc: Doc, + Run: run, +} + +var ( + re = regexp.MustCompile + asmWriteBP = re(`,\s*BP$`) // TODO: can have false positive, e.g. for TESTQ BP,BP. Seems unlikely. + asmMentionBP = re(`\bBP\b`) + asmControlFlow = re(`^(J|RET)`) +) + +func run(pass *analysis.Pass) (interface{}, error) { + if build.Default.GOARCH != "amd64" { // TODO: arm64 also? + return nil, nil + } + if build.Default.GOOS != "linux" && build.Default.GOOS != "darwin" { + return nil, nil + } + + // Find assembly files to work on. + var sfiles []string + for _, fname := range pass.OtherFiles { + if strings.HasSuffix(fname, ".s") && pass.Pkg.Path() != "runtime" { + sfiles = append(sfiles, fname) + } + } + + for _, fname := range sfiles { + content, tf, err := analysisutil.ReadFile(pass.Fset, fname) + if err != nil { + return nil, err + } + + lines := strings.SplitAfter(string(content), "\n") + active := false + for lineno, line := range lines { + lineno++ + + // Ignore comments and commented-out code. + if i := strings.Index(line, "//"); i >= 0 { + line = line[:i] + } + line = strings.TrimSpace(line) + + // We start checking code at a TEXT line for a frameless function. + if strings.HasPrefix(line, "TEXT") && strings.Contains(line, "(SB)") && strings.Contains(line, "$0") { + active = true + continue + } + if !active { + continue + } + + if asmWriteBP.MatchString(line) { // clobber of BP, function is not OK + pass.Reportf(analysisutil.LineStart(tf, lineno), "frame pointer is clobbered before saving") + active = false + continue + } + if asmMentionBP.MatchString(line) { // any other use of BP might be a read, so function is OK + active = false + continue + } + if asmControlFlow.MatchString(line) { // give up after any branch instruction + active = false + continue + } + } + } + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go new file mode 100644 index 000000000..fd9e2af2b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go @@ -0,0 +1,169 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpresponse defines an Analyzer that checks for mistakes +// using HTTP responses. +package httpresponse + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check for mistakes using HTTP responses + +A common mistake when using the net/http package is to defer a function +call to close the http.Response Body before checking the error that +determines whether the response is valid: + + resp, err := http.Head(url) + defer resp.Body.Close() + if err != nil { + log.Fatal(err) + } + // (defer statement belongs here) + +This checker helps uncover latent nil dereference bugs by reporting a +diagnostic for such mistakes.` + +var Analyzer = &analysis.Analyzer{ + Name: "httpresponse", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // Fast path: if the package doesn't import net/http, + // skip the traversal. + if !analysisutil.Imports(pass.Pkg, "net/http") { + return nil, nil + } + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.WithStack(nodeFilter, func(n ast.Node, push bool, stack []ast.Node) bool { + if !push { + return true + } + call := n.(*ast.CallExpr) + if !isHTTPFuncOrMethodOnClient(pass.TypesInfo, call) { + return true // the function call is not related to this check. + } + + // Find the innermost containing block, and get the list + // of statements starting with the one containing call. + stmts := restOfBlock(stack) + if len(stmts) < 2 { + return true // the call to the http function is the last statement of the block. + } + + asg, ok := stmts[0].(*ast.AssignStmt) + if !ok { + return true // the first statement is not assignment. + } + resp := rootIdent(asg.Lhs[0]) + if resp == nil { + return true // could not find the http.Response in the assignment. + } + + def, ok := stmts[1].(*ast.DeferStmt) + if !ok { + return true // the following statement is not a defer. + } + root := rootIdent(def.Call.Fun) + if root == nil { + return true // could not find the receiver of the defer call. + } + + if resp.Obj == root.Obj { + pass.ReportRangef(root, "using %s before checking for errors", resp.Name) + } + return true + }) + return nil, nil +} + +// isHTTPFuncOrMethodOnClient checks whether the given call expression is on +// either a function of the net/http package or a method of http.Client that +// returns (*http.Response, error). +func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { + fun, _ := expr.Fun.(*ast.SelectorExpr) + sig, _ := info.Types[fun].Type.(*types.Signature) + if sig == nil { + return false // the call is not of the form x.f() + } + + res := sig.Results() + if res.Len() != 2 { + return false // the function called does not return two values. + } + if ptr, ok := res.At(0).Type().(*types.Pointer); !ok || !isNamedType(ptr.Elem(), "net/http", "Response") { + return false // the first return type is not *http.Response. + } + + errorType := types.Universe.Lookup("error").Type() + if !types.Identical(res.At(1).Type(), errorType) { + return false // the second return type is not error + } + + typ := info.Types[fun.X].Type + if typ == nil { + id, ok := fun.X.(*ast.Ident) + return ok && id.Name == "http" // function in net/http package. + } + + if isNamedType(typ, "net/http", "Client") { + return true // method on http.Client. + } + ptr, ok := typ.(*types.Pointer) + return ok && isNamedType(ptr.Elem(), "net/http", "Client") // method on *http.Client. +} + +// restOfBlock, given a traversal stack, finds the innermost containing +// block and returns the suffix of its statements starting with the +// current node (the last element of stack). +func restOfBlock(stack []ast.Node) []ast.Stmt { + for i := len(stack) - 1; i >= 0; i-- { + if b, ok := stack[i].(*ast.BlockStmt); ok { + for j, v := range b.List { + if v == stack[i+1] { + return b.List[j:] + } + } + break + } + } + return nil +} + +// rootIdent finds the root identifier x in a chain of selections x.y.z, or nil if not found. +func rootIdent(n ast.Node) *ast.Ident { + switch n := n.(type) { + case *ast.SelectorExpr: + return rootIdent(n.X) + case *ast.Ident: + return n + default: + return nil + } +} + +// isNamedType reports whether t is the named type path.name. +func isNamedType(t types.Type, path, name string) bool { + n, ok := t.(*types.Named) + if !ok { + return false + } + obj := n.Obj() + return obj.Name() == name && obj.Pkg() != nil && obj.Pkg().Path() == path +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go new file mode 100644 index 000000000..fd2285332 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go @@ -0,0 +1,105 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ifaceassert defines an Analyzer that flags +// impossible interface-interface type assertions. +package ifaceassert + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `detect impossible interface-to-interface type assertions + +This checker flags type assertions v.(T) and corresponding type-switch cases +in which the static type V of v is an interface that cannot possibly implement +the target interface T. This occurs when V and T contain methods with the same +name but different signatures. Example: + + var v interface { + Read() + } + _ = v.(io.Reader) + +The Read method in v has a different signature than the Read method in +io.Reader, so this assertion cannot succeed. +` + +var Analyzer = &analysis.Analyzer{ + Name: "ifaceassert", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +// assertableTo checks whether interface v can be asserted into t. It returns +// nil on success, or the first conflicting method on failure. +func assertableTo(v, t types.Type) *types.Func { + if t == nil || v == nil { + // not assertable to, but there is no missing method + return nil + } + // ensure that v and t are interfaces + V, _ := v.Underlying().(*types.Interface) + T, _ := t.Underlying().(*types.Interface) + if V == nil || T == nil { + return nil + } + if f, wrongType := types.MissingMethod(V, T, false); wrongType { + return f + } + return nil +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.TypeAssertExpr)(nil), + (*ast.TypeSwitchStmt)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + var ( + assert *ast.TypeAssertExpr // v.(T) expression + targets []ast.Expr // interfaces T in v.(T) + ) + switch n := n.(type) { + case *ast.TypeAssertExpr: + // take care of v.(type) in *ast.TypeSwitchStmt + if n.Type == nil { + return + } + assert = n + targets = append(targets, n.Type) + case *ast.TypeSwitchStmt: + // retrieve type assertion from type switch's 'assign' field + switch t := n.Assign.(type) { + case *ast.ExprStmt: + assert = t.X.(*ast.TypeAssertExpr) + case *ast.AssignStmt: + assert = t.Rhs[0].(*ast.TypeAssertExpr) + } + // gather target types from case clauses + for _, c := range n.Body.List { + targets = append(targets, c.(*ast.CaseClause).List...) + } + } + V := pass.TypesInfo.TypeOf(assert.X) + for _, target := range targets { + T := pass.TypesInfo.TypeOf(target) + if f := assertableTo(V, T); f != nil { + pass.Reportf( + target.Pos(), + "impossible type assertion: no type can implement both %v and %v (conflicting types for %v method)", + V, T, f.Name(), + ) + } + } + }) + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go new file mode 100644 index 000000000..4bb652a72 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go @@ -0,0 +1,49 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspect defines an Analyzer that provides an AST inspector +// (golang.org/x/tools/go/ast/inspector.Inspector) for the syntax trees +// of a package. It is only a building block for other analyzers. +// +// Example of use in another analysis: +// +// import ( +// "golang.org/x/tools/go/analysis" +// "golang.org/x/tools/go/analysis/passes/inspect" +// "golang.org/x/tools/go/ast/inspector" +// ) +// +// var Analyzer = &analysis.Analyzer{ +// ... +// Requires: []*analysis.Analyzer{inspect.Analyzer}, +// } +// +// func run(pass *analysis.Pass) (interface{}, error) { +// inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) +// inspect.Preorder(nil, func(n ast.Node) { +// ... +// }) +// return nil +// } +// +package inspect + +import ( + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "inspect", + Doc: "optimize AST traversal for later passes", + Run: run, + RunDespiteErrors: true, + ResultType: reflect.TypeOf(new(inspector.Inspector)), +} + +func run(pass *analysis.Pass) (interface{}, error) { + return inspector.New(pass.Files), nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go new file mode 100644 index 000000000..ac37e4784 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go @@ -0,0 +1,120 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysisutil defines various helper functions +// used by two or more packages beneath go/analysis. +package analysisutil + +import ( + "bytes" + "go/ast" + "go/printer" + "go/token" + "go/types" + "io/ioutil" +) + +// Format returns a string representation of the expression. +func Format(fset *token.FileSet, x ast.Expr) string { + var b bytes.Buffer + printer.Fprint(&b, fset, x) + return b.String() +} + +// HasSideEffects reports whether evaluation of e has side effects. +func HasSideEffects(info *types.Info, e ast.Expr) bool { + safe := true + ast.Inspect(e, func(node ast.Node) bool { + switch n := node.(type) { + case *ast.CallExpr: + typVal := info.Types[n.Fun] + switch { + case typVal.IsType(): + // Type conversion, which is safe. + case typVal.IsBuiltin(): + // Builtin func, conservatively assumed to not + // be safe for now. + safe = false + return false + default: + // A non-builtin func or method call. + // Conservatively assume that all of them have + // side effects for now. + safe = false + return false + } + case *ast.UnaryExpr: + if n.Op == token.ARROW { + safe = false + return false + } + } + return true + }) + return !safe +} + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} + +// ReadFile reads a file and adds it to the FileSet +// so that we can report errors against it using lineStart. +func ReadFile(fset *token.FileSet, filename string) ([]byte, *token.File, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, nil, err + } + tf := fset.AddFile(filename, -1, len(content)) + tf.SetLinesForContent(content) + return content, tf, nil +} + +// LineStart returns the position of the start of the specified line +// within file f, or NoPos if there is no line of that number. +func LineStart(f *token.File, line int) token.Pos { + // Use binary search to find the start offset of this line. + // + // TODO(adonovan): eventually replace this function with the + // simpler and more efficient (*go/token.File).LineStart, added + // in go1.12. + + min := 0 // inclusive + max := f.Size() // exclusive + for { + offset := (min + max) / 2 + pos := f.Pos(offset) + posn := f.Position(pos) + if posn.Line == line { + return pos - (token.Pos(posn.Column) - 1) + } + + if min+1 >= max { + return token.NoPos + } + + if posn.Line < line { + min = offset + } else { + max = offset + } + } +} + +// Imports returns true if path is imported by pkg. +func Imports(pkg *types.Package, path string) bool { + for _, imp := range pkg.Imports() { + if imp.Path() == path { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go new file mode 100644 index 000000000..3ea91574d --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go @@ -0,0 +1,165 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loopclosure defines an Analyzer that checks for references to +// enclosing loop variables from within nested functions. +package loopclosure + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +const Doc = `check references to loop variables from within nested functions + +This analyzer checks for references to loop variables from within a +function literal inside the loop body. It checks only instances where +the function literal is called in a defer or go statement that is the +last statement in the loop body, as otherwise we would need whole +program analysis. + +For example: + + for i, v := range s { + go func() { + println(i, v) // not what you might expect + }() + } + +See: https://golang.org/doc/go_faq.html#closures_and_goroutines` + +var Analyzer = &analysis.Analyzer{ + Name: "loopclosure", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.RangeStmt)(nil), + (*ast.ForStmt)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + // Find the variables updated by the loop statement. + var vars []*ast.Ident + addVar := func(expr ast.Expr) { + if id, ok := expr.(*ast.Ident); ok { + vars = append(vars, id) + } + } + var body *ast.BlockStmt + switch n := n.(type) { + case *ast.RangeStmt: + body = n.Body + addVar(n.Key) + addVar(n.Value) + case *ast.ForStmt: + body = n.Body + switch post := n.Post.(type) { + case *ast.AssignStmt: + // e.g. for p = head; p != nil; p = p.next + for _, lhs := range post.Lhs { + addVar(lhs) + } + case *ast.IncDecStmt: + // e.g. for i := 0; i < n; i++ + addVar(post.X) + } + } + if vars == nil { + return + } + + // Inspect a go or defer statement + // if it's the last one in the loop body. + // (We give up if there are following statements, + // because it's hard to prove go isn't followed by wait, + // or defer by return.) + if len(body.List) == 0 { + return + } + // The function invoked in the last return statement. + var fun ast.Expr + switch s := body.List[len(body.List)-1].(type) { + case *ast.GoStmt: + fun = s.Call.Fun + case *ast.DeferStmt: + fun = s.Call.Fun + case *ast.ExprStmt: // check for errgroup.Group.Go() + if call, ok := s.X.(*ast.CallExpr); ok { + fun = goInvokes(pass.TypesInfo, call) + } + } + lit, ok := fun.(*ast.FuncLit) + if !ok { + return + } + ast.Inspect(lit.Body, func(n ast.Node) bool { + id, ok := n.(*ast.Ident) + if !ok || id.Obj == nil { + return true + } + if pass.TypesInfo.Types[id].Type == nil { + // Not referring to a variable (e.g. struct field name) + return true + } + for _, v := range vars { + if v.Obj == id.Obj { + pass.ReportRangef(id, "loop variable %s captured by func literal", + id.Name) + } + } + return true + }) + }) + return nil, nil +} + +// goInvokes returns a function expression that would be called asynchronously +// (but not awaited) in another goroutine as a consequence of the call. +// For example, given the g.Go call below, it returns the function literal expression. +// +// import "sync/errgroup" +// var g errgroup.Group +// g.Go(func() error { ... }) +// +// Currently only "golang.org/x/sync/errgroup.Group()" is considered. +func goInvokes(info *types.Info, call *ast.CallExpr) ast.Expr { + f := typeutil.StaticCallee(info, call) + // Note: Currently only supports: golang.org/x/sync/errgroup.Go. + if f == nil || f.Name() != "Go" { + return nil + } + recv := f.Type().(*types.Signature).Recv() + if recv == nil { + return nil + } + rtype, ok := recv.Type().(*types.Pointer) + if !ok { + return nil + } + named, ok := rtype.Elem().(*types.Named) + if !ok { + return nil + } + if named.Obj().Name() != "Group" { + return nil + } + pkg := f.Pkg() + if pkg == nil { + return nil + } + if pkg.Path() != "golang.org/x/sync/errgroup" { + return nil + } + return call.Args[0] +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go new file mode 100644 index 000000000..de6f840f6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go @@ -0,0 +1,330 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lostcancel defines an Analyzer that checks for failure to +// call a context cancellation function. +package lostcancel + +import ( + "fmt" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/ctrlflow" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/cfg" +) + +const Doc = `check cancel func returned by context.WithCancel is called + +The cancellation function returned by context.WithCancel, WithTimeout, +and WithDeadline must be called or the new context will remain live +until its parent context is cancelled. +(The background context is never cancelled.)` + +var Analyzer = &analysis.Analyzer{ + Name: "lostcancel", + Doc: Doc, + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + ctrlflow.Analyzer, + }, +} + +const debug = false + +var contextPackage = "context" + +// checkLostCancel reports a failure to the call the cancel function +// returned by context.WithCancel, either because the variable was +// assigned to the blank identifier, or because there exists a +// control-flow path from the call to a return statement and that path +// does not "use" the cancel function. Any reference to the variable +// counts as a use, even within a nested function literal. +// If the variable's scope is larger than the function +// containing the assignment, we assume that other uses exist. +// +// checkLostCancel analyzes a single named or literal function. +func run(pass *analysis.Pass) (interface{}, error) { + // Fast path: bypass check if file doesn't use context.WithCancel. + if !hasImport(pass.Pkg, contextPackage) { + return nil, nil + } + + // Call runFunc for each Func{Decl,Lit}. + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeTypes := []ast.Node{ + (*ast.FuncLit)(nil), + (*ast.FuncDecl)(nil), + } + inspect.Preorder(nodeTypes, func(n ast.Node) { + runFunc(pass, n) + }) + return nil, nil +} + +func runFunc(pass *analysis.Pass, node ast.Node) { + // Find scope of function node + var funcScope *types.Scope + switch v := node.(type) { + case *ast.FuncLit: + funcScope = pass.TypesInfo.Scopes[v.Type] + case *ast.FuncDecl: + funcScope = pass.TypesInfo.Scopes[v.Type] + } + + // Maps each cancel variable to its defining ValueSpec/AssignStmt. + cancelvars := make(map[*types.Var]ast.Node) + + // TODO(adonovan): opt: refactor to make a single pass + // over the AST using inspect.WithStack and node types + // {FuncDecl,FuncLit,CallExpr,SelectorExpr}. + + // Find the set of cancel vars to analyze. + stack := make([]ast.Node, 0, 32) + ast.Inspect(node, func(n ast.Node) bool { + switch n.(type) { + case *ast.FuncLit: + if len(stack) > 0 { + return false // don't stray into nested functions + } + case nil: + stack = stack[:len(stack)-1] // pop + return true + } + stack = append(stack, n) // push + + // Look for [{AssignStmt,ValueSpec} CallExpr SelectorExpr]: + // + // ctx, cancel := context.WithCancel(...) + // ctx, cancel = context.WithCancel(...) + // var ctx, cancel = context.WithCancel(...) + // + if !isContextWithCancel(pass.TypesInfo, n) || !isCall(stack[len(stack)-2]) { + return true + } + var id *ast.Ident // id of cancel var + stmt := stack[len(stack)-3] + switch stmt := stmt.(type) { + case *ast.ValueSpec: + if len(stmt.Names) > 1 { + id = stmt.Names[1] + } + case *ast.AssignStmt: + if len(stmt.Lhs) > 1 { + id, _ = stmt.Lhs[1].(*ast.Ident) + } + } + if id != nil { + if id.Name == "_" { + pass.ReportRangef(id, + "the cancel function returned by context.%s should be called, not discarded, to avoid a context leak", + n.(*ast.SelectorExpr).Sel.Name) + } else if v, ok := pass.TypesInfo.Uses[id].(*types.Var); ok { + // If the cancel variable is defined outside function scope, + // do not analyze it. + if funcScope.Contains(v.Pos()) { + cancelvars[v] = stmt + } + } else if v, ok := pass.TypesInfo.Defs[id].(*types.Var); ok { + cancelvars[v] = stmt + } + } + return true + }) + + if len(cancelvars) == 0 { + return // no need to inspect CFG + } + + // Obtain the CFG. + cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) + var g *cfg.CFG + var sig *types.Signature + switch node := node.(type) { + case *ast.FuncDecl: + sig, _ = pass.TypesInfo.Defs[node.Name].Type().(*types.Signature) + if node.Name.Name == "main" && sig.Recv() == nil && pass.Pkg.Name() == "main" { + // Returning from main.main terminates the process, + // so there's no need to cancel contexts. + return + } + g = cfgs.FuncDecl(node) + + case *ast.FuncLit: + sig, _ = pass.TypesInfo.Types[node.Type].Type.(*types.Signature) + g = cfgs.FuncLit(node) + } + if sig == nil { + return // missing type information + } + + // Print CFG. + if debug { + fmt.Println(g.Format(pass.Fset)) + } + + // Examine the CFG for each variable in turn. + // (It would be more efficient to analyze all cancelvars in a + // single pass over the AST, but seldom is there more than one.) + for v, stmt := range cancelvars { + if ret := lostCancelPath(pass, g, v, stmt, sig); ret != nil { + lineno := pass.Fset.Position(stmt.Pos()).Line + pass.ReportRangef(stmt, "the %s function is not used on all paths (possible context leak)", v.Name()) + pass.ReportRangef(ret, "this return statement may be reached without using the %s var defined on line %d", v.Name(), lineno) + } + } +} + +func isCall(n ast.Node) bool { _, ok := n.(*ast.CallExpr); return ok } + +func hasImport(pkg *types.Package, path string) bool { + for _, imp := range pkg.Imports() { + if imp.Path() == path { + return true + } + } + return false +} + +// isContextWithCancel reports whether n is one of the qualified identifiers +// context.With{Cancel,Timeout,Deadline}. +func isContextWithCancel(info *types.Info, n ast.Node) bool { + sel, ok := n.(*ast.SelectorExpr) + if !ok { + return false + } + switch sel.Sel.Name { + case "WithCancel", "WithTimeout", "WithDeadline": + default: + return false + } + if x, ok := sel.X.(*ast.Ident); ok { + if pkgname, ok := info.Uses[x].(*types.PkgName); ok { + return pkgname.Imported().Path() == contextPackage + } + // Import failed, so we can't check package path. + // Just check the local package name (heuristic). + return x.Name == "context" + } + return false +} + +// lostCancelPath finds a path through the CFG, from stmt (which defines +// the 'cancel' variable v) to a return statement, that doesn't "use" v. +// If it finds one, it returns the return statement (which may be synthetic). +// sig is the function's type, if known. +func lostCancelPath(pass *analysis.Pass, g *cfg.CFG, v *types.Var, stmt ast.Node, sig *types.Signature) *ast.ReturnStmt { + vIsNamedResult := sig != nil && tupleContains(sig.Results(), v) + + // uses reports whether stmts contain a "use" of variable v. + uses := func(pass *analysis.Pass, v *types.Var, stmts []ast.Node) bool { + found := false + for _, stmt := range stmts { + ast.Inspect(stmt, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.Ident: + if pass.TypesInfo.Uses[n] == v { + found = true + } + case *ast.ReturnStmt: + // A naked return statement counts as a use + // of the named result variables. + if n.Results == nil && vIsNamedResult { + found = true + } + } + return !found + }) + } + return found + } + + // blockUses computes "uses" for each block, caching the result. + memo := make(map[*cfg.Block]bool) + blockUses := func(pass *analysis.Pass, v *types.Var, b *cfg.Block) bool { + res, ok := memo[b] + if !ok { + res = uses(pass, v, b.Nodes) + memo[b] = res + } + return res + } + + // Find the var's defining block in the CFG, + // plus the rest of the statements of that block. + var defblock *cfg.Block + var rest []ast.Node +outer: + for _, b := range g.Blocks { + for i, n := range b.Nodes { + if n == stmt { + defblock = b + rest = b.Nodes[i+1:] + break outer + } + } + } + if defblock == nil { + panic("internal error: can't find defining block for cancel var") + } + + // Is v "used" in the remainder of its defining block? + if uses(pass, v, rest) { + return nil + } + + // Does the defining block return without using v? + if ret := defblock.Return(); ret != nil { + return ret + } + + // Search the CFG depth-first for a path, from defblock to a + // return block, in which v is never "used". + seen := make(map[*cfg.Block]bool) + var search func(blocks []*cfg.Block) *ast.ReturnStmt + search = func(blocks []*cfg.Block) *ast.ReturnStmt { + for _, b := range blocks { + if seen[b] { + continue + } + seen[b] = true + + // Prune the search if the block uses v. + if blockUses(pass, v, b) { + continue + } + + // Found path to return statement? + if ret := b.Return(); ret != nil { + if debug { + fmt.Printf("found path to return in block %s\n", b) + } + return ret // found + } + + // Recur + if ret := search(b.Succs); ret != nil { + if debug { + fmt.Printf(" from block %s\n", b) + } + return ret + } + } + return nil + } + return search(defblock.Succs) +} + +func tupleContains(tuple *types.Tuple, v *types.Var) bool { + for i := 0; i < tuple.Len(); i++ { + if tuple.At(i) == v { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go b/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go new file mode 100644 index 000000000..cd42c9897 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go @@ -0,0 +1,74 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nilfunc defines an Analyzer that checks for useless +// comparisons against nil. +package nilfunc + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check for useless comparisons between functions and nil + +A useless comparison is one like f == nil as opposed to f() == nil.` + +var Analyzer = &analysis.Analyzer{ + Name: "nilfunc", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.BinaryExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + e := n.(*ast.BinaryExpr) + + // Only want == or != comparisons. + if e.Op != token.EQL && e.Op != token.NEQ { + return + } + + // Only want comparisons with a nil identifier on one side. + var e2 ast.Expr + switch { + case pass.TypesInfo.Types[e.X].IsNil(): + e2 = e.Y + case pass.TypesInfo.Types[e.Y].IsNil(): + e2 = e.X + default: + return + } + + // Only want identifiers or selector expressions. + var obj types.Object + switch v := e2.(type) { + case *ast.Ident: + obj = pass.TypesInfo.Uses[v] + case *ast.SelectorExpr: + obj = pass.TypesInfo.Uses[v.Sel] + default: + return + } + + // Only want functions. + if _, ok := obj.(*types.Func); !ok { + return + } + + pass.ReportRangef(e, "comparison of function %v %v nil is always %v", obj.Name(), e.Op, e.Op == token.NEQ) + }) + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go new file mode 100644 index 000000000..f0d2c7edf --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go @@ -0,0 +1,354 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nilness inspects the control-flow graph of an SSA function +// and reports errors such as nil pointer dereferences and degenerate +// nil pointer comparisons. +package nilness + +import ( + "fmt" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" +) + +const Doc = `check for redundant or impossible nil comparisons + +The nilness checker inspects the control-flow graph of each function in +a package and reports nil pointer dereferences, degenerate nil +pointers, and panics with nil values. A degenerate comparison is of the form +x==nil or x!=nil where x is statically known to be nil or non-nil. These are +often a mistake, especially in control flow related to errors. Panics with nil +values are checked because they are not detectable by + + if r := recover(); r != nil { + +This check reports conditions such as: + + if f == nil { // impossible condition (f is a function) + } + +and: + + p := &v + ... + if p != nil { // tautological condition + } + +and: + + if p == nil { + print(*p) // nil dereference + } + +and: + + if p == nil { + panic(p) + } +` + +var Analyzer = &analysis.Analyzer{ + Name: "nilness", + Doc: Doc, + Run: run, + Requires: []*analysis.Analyzer{buildssa.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + for _, fn := range ssainput.SrcFuncs { + runFunc(pass, fn) + } + return nil, nil +} + +func runFunc(pass *analysis.Pass, fn *ssa.Function) { + reportf := func(category string, pos token.Pos, format string, args ...interface{}) { + pass.Report(analysis.Diagnostic{ + Pos: pos, + Category: category, + Message: fmt.Sprintf(format, args...), + }) + } + + // notNil reports an error if v is provably nil. + notNil := func(stack []fact, instr ssa.Instruction, v ssa.Value, descr string) { + if nilnessOf(stack, v) == isnil { + reportf("nilderef", instr.Pos(), "nil dereference in "+descr) + } + } + + // visit visits reachable blocks of the CFG in dominance order, + // maintaining a stack of dominating nilness facts. + // + // By traversing the dom tree, we can pop facts off the stack as + // soon as we've visited a subtree. Had we traversed the CFG, + // we would need to retain the set of facts for each block. + seen := make([]bool, len(fn.Blocks)) // seen[i] means visit should ignore block i + var visit func(b *ssa.BasicBlock, stack []fact) + visit = func(b *ssa.BasicBlock, stack []fact) { + if seen[b.Index] { + return + } + seen[b.Index] = true + + // Report nil dereferences. + for _, instr := range b.Instrs { + switch instr := instr.(type) { + case ssa.CallInstruction: + notNil(stack, instr, instr.Common().Value, + instr.Common().Description()) + case *ssa.FieldAddr: + notNil(stack, instr, instr.X, "field selection") + case *ssa.IndexAddr: + notNil(stack, instr, instr.X, "index operation") + case *ssa.MapUpdate: + notNil(stack, instr, instr.Map, "map update") + case *ssa.Slice: + // A nilcheck occurs in ptr[:] iff ptr is a pointer to an array. + if _, ok := instr.X.Type().Underlying().(*types.Pointer); ok { + notNil(stack, instr, instr.X, "slice operation") + } + case *ssa.Store: + notNil(stack, instr, instr.Addr, "store") + case *ssa.TypeAssert: + if !instr.CommaOk { + notNil(stack, instr, instr.X, "type assertion") + } + case *ssa.UnOp: + if instr.Op == token.MUL { // *X + notNil(stack, instr, instr.X, "load") + } + } + } + + // Look for panics with nil value + for _, instr := range b.Instrs { + switch instr := instr.(type) { + case *ssa.Panic: + if nilnessOf(stack, instr.X) == isnil { + reportf("nilpanic", instr.Pos(), "panic with nil value") + } + } + } + + // For nil comparison blocks, report an error if the condition + // is degenerate, and push a nilness fact on the stack when + // visiting its true and false successor blocks. + if binop, tsucc, fsucc := eq(b); binop != nil { + xnil := nilnessOf(stack, binop.X) + ynil := nilnessOf(stack, binop.Y) + + if ynil != unknown && xnil != unknown && (xnil == isnil || ynil == isnil) { + // Degenerate condition: + // the nilness of both operands is known, + // and at least one of them is nil. + var adj string + if (xnil == ynil) == (binop.Op == token.EQL) { + adj = "tautological" + } else { + adj = "impossible" + } + reportf("cond", binop.Pos(), "%s condition: %s %s %s", adj, xnil, binop.Op, ynil) + + // If tsucc's or fsucc's sole incoming edge is impossible, + // it is unreachable. Prune traversal of it and + // all the blocks it dominates. + // (We could be more precise with full dataflow + // analysis of control-flow joins.) + var skip *ssa.BasicBlock + if xnil == ynil { + skip = fsucc + } else { + skip = tsucc + } + for _, d := range b.Dominees() { + if d == skip && len(d.Preds) == 1 { + continue + } + visit(d, stack) + } + return + } + + // "if x == nil" or "if nil == y" condition; x, y are unknown. + if xnil == isnil || ynil == isnil { + var newFacts facts + if xnil == isnil { + // x is nil, y is unknown: + // t successor learns y is nil. + newFacts = expandFacts(fact{binop.Y, isnil}) + } else { + // x is nil, y is unknown: + // t successor learns x is nil. + newFacts = expandFacts(fact{binop.X, isnil}) + } + + for _, d := range b.Dominees() { + // Successor blocks learn a fact + // only at non-critical edges. + // (We could do be more precise with full dataflow + // analysis of control-flow joins.) + s := stack + if len(d.Preds) == 1 { + if d == tsucc { + s = append(s, newFacts...) + } else if d == fsucc { + s = append(s, newFacts.negate()...) + } + } + visit(d, s) + } + return + } + } + + for _, d := range b.Dominees() { + visit(d, stack) + } + } + + // Visit the entry block. No need to visit fn.Recover. + if fn.Blocks != nil { + visit(fn.Blocks[0], make([]fact, 0, 20)) // 20 is plenty + } +} + +// A fact records that a block is dominated +// by the condition v == nil or v != nil. +type fact struct { + value ssa.Value + nilness nilness +} + +func (f fact) negate() fact { return fact{f.value, -f.nilness} } + +type nilness int + +const ( + isnonnil = -1 + unknown nilness = 0 + isnil = 1 +) + +var nilnessStrings = []string{"non-nil", "unknown", "nil"} + +func (n nilness) String() string { return nilnessStrings[n+1] } + +// nilnessOf reports whether v is definitely nil, definitely not nil, +// or unknown given the dominating stack of facts. +func nilnessOf(stack []fact, v ssa.Value) nilness { + switch v := v.(type) { + // unwrap ChangeInterface values recursively, to detect if underlying + // values have any facts recorded or are otherwise known with regard to nilness. + // + // This work must be in addition to expanding facts about + // ChangeInterfaces during inference/fact gathering because this covers + // cases where the nilness of a value is intrinsic, rather than based + // on inferred facts, such as a zero value interface variable. That + // said, this work alone would only inform us when facts are about + // underlying values, rather than outer values, when the analysis is + // transitive in both directions. + case *ssa.ChangeInterface: + if underlying := nilnessOf(stack, v.X); underlying != unknown { + return underlying + } + } + + // Is value intrinsically nil or non-nil? + switch v := v.(type) { + case *ssa.Alloc, + *ssa.FieldAddr, + *ssa.FreeVar, + *ssa.Function, + *ssa.Global, + *ssa.IndexAddr, + *ssa.MakeChan, + *ssa.MakeClosure, + *ssa.MakeInterface, + *ssa.MakeMap, + *ssa.MakeSlice: + return isnonnil + case *ssa.Const: + if v.IsNil() { + return isnil + } else { + return isnonnil + } + } + + // Search dominating control-flow facts. + for _, f := range stack { + if f.value == v { + return f.nilness + } + } + return unknown +} + +// If b ends with an equality comparison, eq returns the operation and +// its true (equal) and false (not equal) successors. +func eq(b *ssa.BasicBlock) (op *ssa.BinOp, tsucc, fsucc *ssa.BasicBlock) { + if If, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If); ok { + if binop, ok := If.Cond.(*ssa.BinOp); ok { + switch binop.Op { + case token.EQL: + return binop, b.Succs[0], b.Succs[1] + case token.NEQ: + return binop, b.Succs[1], b.Succs[0] + } + } + } + return nil, nil, nil +} + +// expandFacts takes a single fact and returns the set of facts that can be +// known about it or any of its related values. Some operations, like +// ChangeInterface, have transitive nilness, such that if you know the +// underlying value is nil, you also know the value itself is nil, and vice +// versa. This operation allows callers to match on any of the related values +// in analyses, rather than just the one form of the value that happend to +// appear in a comparison. +// +// This work must be in addition to unwrapping values within nilnessOf because +// while this work helps give facts about transitively known values based on +// inferred facts, the recursive check within nilnessOf covers cases where +// nilness facts are intrinsic to the underlying value, such as a zero value +// interface variables. +// +// ChangeInterface is the only expansion currently supported, but others, like +// Slice, could be added. At this time, this tool does not check slice +// operations in a way this expansion could help. See +// https://play.golang.org/p/mGqXEp7w4fR for an example. +func expandFacts(f fact) []fact { + ff := []fact{f} + +Loop: + for { + switch v := f.value.(type) { + case *ssa.ChangeInterface: + f = fact{v.X, f.nilness} + ff = append(ff, f) + default: + break Loop + } + } + + return ff +} + +type facts []fact + +func (ff facts) negate() facts { + nn := make([]fact, len(ff)) + for i, f := range ff { + nn[i] = f.negate() + } + return nn +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go b/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go new file mode 100644 index 000000000..2262fc4f1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go @@ -0,0 +1,127 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The pkgfact package is a demonstration and test of the package fact +// mechanism. +// +// The output of the pkgfact analysis is a set of key/values pairs +// gathered from the analyzed package and its imported dependencies. +// Each key/value pair comes from a top-level constant declaration +// whose name starts and ends with "_". For example: +// +// package p +// +// const _greeting_ = "hello" +// const _audience_ = "world" +// +// the pkgfact analysis output for package p would be: +// +// {"greeting": "hello", "audience": "world"}. +// +// In addition, the analysis reports a diagnostic at each import +// showing which key/value pairs it contributes. +package pkgfact + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" +) + +var Analyzer = &analysis.Analyzer{ + Name: "pkgfact", + Doc: "gather name/value pairs from constant declarations", + Run: run, + FactTypes: []analysis.Fact{new(pairsFact)}, + ResultType: reflect.TypeOf(map[string]string{}), +} + +// A pairsFact is a package-level fact that records +// an set of key=value strings accumulated from constant +// declarations in this package and its dependencies. +// Elements are ordered by keys, which are unique. +type pairsFact []string + +func (f *pairsFact) AFact() {} +func (f *pairsFact) String() string { return "pairs(" + strings.Join(*f, ", ") + ")" } + +func run(pass *analysis.Pass) (interface{}, error) { + result := make(map[string]string) + + // At each import, print the fact from the imported + // package and accumulate its information into the result. + // (Warning: accumulation leads to quadratic growth of work.) + doImport := func(spec *ast.ImportSpec) { + pkg := imported(pass.TypesInfo, spec) + var fact pairsFact + if pass.ImportPackageFact(pkg, &fact) { + for _, pair := range fact { + eq := strings.IndexByte(pair, '=') + result[pair[:eq]] = pair[1+eq:] + } + pass.ReportRangef(spec, "%s", strings.Join(fact, " ")) + } + } + + // At each "const _name_ = value", add a fact into env. + doConst := func(spec *ast.ValueSpec) { + if len(spec.Names) == len(spec.Values) { + for i := range spec.Names { + name := spec.Names[i].Name + if strings.HasPrefix(name, "_") && strings.HasSuffix(name, "_") { + + if key := strings.Trim(name, "_"); key != "" { + value := pass.TypesInfo.Types[spec.Values[i]].Value.String() + result[key] = value + } + } + } + } + } + + for _, f := range pass.Files { + for _, decl := range f.Decls { + if decl, ok := decl.(*ast.GenDecl); ok { + for _, spec := range decl.Specs { + switch decl.Tok { + case token.IMPORT: + doImport(spec.(*ast.ImportSpec)) + case token.CONST: + doConst(spec.(*ast.ValueSpec)) + } + } + } + } + } + + // Sort/deduplicate the result and save it as a package fact. + keys := make([]string, 0, len(result)) + for key := range result { + keys = append(keys, key) + } + sort.Strings(keys) + var fact pairsFact + for _, key := range keys { + fact = append(fact, fmt.Sprintf("%s=%s", key, result[key])) + } + if len(fact) > 0 { + pass.ExportPackageFact(&fact) + } + + return result, nil +} + +func imported(info *types.Info, spec *ast.ImportSpec) *types.Package { + obj, ok := info.Implicits[spec] + if !ok { + obj = info.Defs[spec.Name] // renaming import + } + return obj.(*types.PkgName).Imported() +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go new file mode 100644 index 000000000..6589478af --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -0,0 +1,1122 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package printf defines an Analyzer that checks consistency +// of Printf format strings and arguments. +package printf + +import ( + "bytes" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +func init() { + Analyzer.Flags.Var(isPrint, "funcs", "comma-separated list of print function names to check") +} + +var Analyzer = &analysis.Analyzer{ + Name: "printf", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, + ResultType: reflect.TypeOf((*Result)(nil)), + FactTypes: []analysis.Fact{new(isWrapper)}, +} + +const Doc = `check consistency of Printf format strings and arguments + +The check applies to known functions (for example, those in package fmt) +as well as any detected wrappers of known functions. + +A function that wants to avail itself of printf checking but is not +found by this analyzer's heuristics (for example, due to use of +dynamic calls) can insert a bogus call: + + if false { + _ = fmt.Sprintf(format, args...) // enable printf checking + } + +The -funcs flag specifies a comma-separated list of names of additional +known formatting functions or methods. If the name contains a period, +it must denote a specific function using one of the following forms: + + dir/pkg.Function + dir/pkg.Type.Method + (*dir/pkg.Type).Method + +Otherwise the name is interpreted as a case-insensitive unqualified +identifier such as "errorf". Either way, if a listed name ends in f, the +function is assumed to be Printf-like, taking a format string before the +argument list. Otherwise it is assumed to be Print-like, taking a list +of arguments with no format string. +` + +// Kind is a kind of fmt function behavior. +type Kind int + +const ( + KindNone Kind = iota // not a fmt wrapper function + KindPrint // function behaves like fmt.Print + KindPrintf // function behaves like fmt.Printf + KindErrorf // function behaves like fmt.Errorf +) + +func (kind Kind) String() string { + switch kind { + case KindPrint: + return "print" + case KindPrintf: + return "printf" + case KindErrorf: + return "errorf" + } + return "" +} + +// Result is the printf analyzer's result type. Clients may query the result +// to learn whether a function behaves like fmt.Print or fmt.Printf. +type Result struct { + funcs map[*types.Func]Kind +} + +// Kind reports whether fn behaves like fmt.Print or fmt.Printf. +func (r *Result) Kind(fn *types.Func) Kind { + _, ok := isPrint[fn.FullName()] + if !ok { + // Next look up just "printf", for use with -printf.funcs. + _, ok = isPrint[strings.ToLower(fn.Name())] + } + if ok { + if strings.HasSuffix(fn.Name(), "f") { + return KindPrintf + } else { + return KindPrint + } + } + + return r.funcs[fn] +} + +// isWrapper is a fact indicating that a function is a print or printf wrapper. +type isWrapper struct{ Kind Kind } + +func (f *isWrapper) AFact() {} + +func (f *isWrapper) String() string { + switch f.Kind { + case KindPrintf: + return "printfWrapper" + case KindPrint: + return "printWrapper" + case KindErrorf: + return "errorfWrapper" + default: + return "unknownWrapper" + } +} + +func run(pass *analysis.Pass) (interface{}, error) { + res := &Result{ + funcs: make(map[*types.Func]Kind), + } + findPrintfLike(pass, res) + checkCall(pass) + return res, nil +} + +type printfWrapper struct { + obj *types.Func + fdecl *ast.FuncDecl + format *types.Var + args *types.Var + callers []printfCaller + failed bool // if true, not a printf wrapper +} + +type printfCaller struct { + w *printfWrapper + call *ast.CallExpr +} + +// maybePrintfWrapper decides whether decl (a declared function) may be a wrapper +// around a fmt.Printf or fmt.Print function. If so it returns a printfWrapper +// function describing the declaration. Later processing will analyze the +// graph of potential printf wrappers to pick out the ones that are true wrappers. +// A function may be a Printf or Print wrapper if its last argument is ...interface{}. +// If the next-to-last argument is a string, then this may be a Printf wrapper. +// Otherwise it may be a Print wrapper. +func maybePrintfWrapper(info *types.Info, decl ast.Decl) *printfWrapper { + // Look for functions with final argument type ...interface{}. + fdecl, ok := decl.(*ast.FuncDecl) + if !ok || fdecl.Body == nil { + return nil + } + fn, ok := info.Defs[fdecl.Name].(*types.Func) + // Type information may be incomplete. + if !ok { + return nil + } + + sig := fn.Type().(*types.Signature) + if !sig.Variadic() { + return nil // not variadic + } + + params := sig.Params() + nparams := params.Len() // variadic => nonzero + + args := params.At(nparams - 1) + iface, ok := args.Type().(*types.Slice).Elem().(*types.Interface) + if !ok || !iface.Empty() { + return nil // final (args) param is not ...interface{} + } + + // Is second last param 'format string'? + var format *types.Var + if nparams >= 2 { + if p := params.At(nparams - 2); p.Type() == types.Typ[types.String] { + format = p + } + } + + return &printfWrapper{ + obj: fn, + fdecl: fdecl, + format: format, + args: args, + } +} + +// findPrintfLike scans the entire package to find printf-like functions. +func findPrintfLike(pass *analysis.Pass, res *Result) (interface{}, error) { + // Gather potential wrappers and call graph between them. + byObj := make(map[*types.Func]*printfWrapper) + var wrappers []*printfWrapper + for _, file := range pass.Files { + for _, decl := range file.Decls { + w := maybePrintfWrapper(pass.TypesInfo, decl) + if w == nil { + continue + } + byObj[w.obj] = w + wrappers = append(wrappers, w) + } + } + + // Walk the graph to figure out which are really printf wrappers. + for _, w := range wrappers { + // Scan function for calls that could be to other printf-like functions. + ast.Inspect(w.fdecl.Body, func(n ast.Node) bool { + if w.failed { + return false + } + + // TODO: Relax these checks; issue 26555. + if assign, ok := n.(*ast.AssignStmt); ok { + for _, lhs := range assign.Lhs { + if match(pass.TypesInfo, lhs, w.format) || + match(pass.TypesInfo, lhs, w.args) { + // Modifies the format + // string or args in + // some way, so not a + // simple wrapper. + w.failed = true + return false + } + } + } + if un, ok := n.(*ast.UnaryExpr); ok && un.Op == token.AND { + if match(pass.TypesInfo, un.X, w.format) || + match(pass.TypesInfo, un.X, w.args) { + // Taking the address of the + // format string or args, + // so not a simple wrapper. + w.failed = true + return false + } + } + + call, ok := n.(*ast.CallExpr) + if !ok || len(call.Args) == 0 || !match(pass.TypesInfo, call.Args[len(call.Args)-1], w.args) { + return true + } + + fn, kind := printfNameAndKind(pass, call) + if kind != 0 { + checkPrintfFwd(pass, w, call, kind, res) + return true + } + + // If the call is to another function in this package, + // maybe we will find out it is printf-like later. + // Remember this call for later checking. + if fn != nil && fn.Pkg() == pass.Pkg && byObj[fn] != nil { + callee := byObj[fn] + callee.callers = append(callee.callers, printfCaller{w, call}) + } + + return true + }) + } + return nil, nil +} + +func match(info *types.Info, arg ast.Expr, param *types.Var) bool { + id, ok := arg.(*ast.Ident) + return ok && info.ObjectOf(id) == param +} + +// checkPrintfFwd checks that a printf-forwarding wrapper is forwarding correctly. +// It diagnoses writing fmt.Printf(format, args) instead of fmt.Printf(format, args...). +func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, kind Kind, res *Result) { + matched := kind == KindPrint || + kind != KindNone && len(call.Args) >= 2 && match(pass.TypesInfo, call.Args[len(call.Args)-2], w.format) + if !matched { + return + } + + if !call.Ellipsis.IsValid() { + typ, ok := pass.TypesInfo.Types[call.Fun].Type.(*types.Signature) + if !ok { + return + } + if len(call.Args) > typ.Params().Len() { + // If we're passing more arguments than what the + // print/printf function can take, adding an ellipsis + // would break the program. For example: + // + // func foo(arg1 string, arg2 ...interface{} { + // fmt.Printf("%s %v", arg1, arg2) + // } + return + } + desc := "printf" + if kind == KindPrint { + desc = "print" + } + pass.ReportRangef(call, "missing ... in args forwarded to %s-like function", desc) + return + } + fn := w.obj + var fact isWrapper + if !pass.ImportObjectFact(fn, &fact) { + fact.Kind = kind + pass.ExportObjectFact(fn, &fact) + res.funcs[fn] = kind + for _, caller := range w.callers { + checkPrintfFwd(pass, caller.w, caller.call, kind, res) + } + } +} + +// isPrint records the print functions. +// If a key ends in 'f' then it is assumed to be a formatted print. +// +// Keys are either values returned by (*types.Func).FullName, +// or case-insensitive identifiers such as "errorf". +// +// The -funcs flag adds to this set. +// +// The set below includes facts for many important standard library +// functions, even though the analysis is capable of deducing that, for +// example, fmt.Printf forwards to fmt.Fprintf. We avoid relying on the +// driver applying analyzers to standard packages because "go vet" does +// not do so with gccgo, and nor do some other build systems. +// TODO(adonovan): eliminate the redundant facts once this restriction +// is lifted. +// +var isPrint = stringSet{ + "fmt.Errorf": true, + "fmt.Fprint": true, + "fmt.Fprintf": true, + "fmt.Fprintln": true, + "fmt.Print": true, + "fmt.Printf": true, + "fmt.Println": true, + "fmt.Sprint": true, + "fmt.Sprintf": true, + "fmt.Sprintln": true, + + "runtime/trace.Logf": true, + + "log.Print": true, + "log.Printf": true, + "log.Println": true, + "log.Fatal": true, + "log.Fatalf": true, + "log.Fatalln": true, + "log.Panic": true, + "log.Panicf": true, + "log.Panicln": true, + "(*log.Logger).Fatal": true, + "(*log.Logger).Fatalf": true, + "(*log.Logger).Fatalln": true, + "(*log.Logger).Panic": true, + "(*log.Logger).Panicf": true, + "(*log.Logger).Panicln": true, + "(*log.Logger).Print": true, + "(*log.Logger).Printf": true, + "(*log.Logger).Println": true, + + "(*testing.common).Error": true, + "(*testing.common).Errorf": true, + "(*testing.common).Fatal": true, + "(*testing.common).Fatalf": true, + "(*testing.common).Log": true, + "(*testing.common).Logf": true, + "(*testing.common).Skip": true, + "(*testing.common).Skipf": true, + // *testing.T and B are detected by induction, but testing.TB is + // an interface and the inference can't follow dynamic calls. + "(testing.TB).Error": true, + "(testing.TB).Errorf": true, + "(testing.TB).Fatal": true, + "(testing.TB).Fatalf": true, + "(testing.TB).Log": true, + "(testing.TB).Logf": true, + "(testing.TB).Skip": true, + "(testing.TB).Skipf": true, +} + +// formatString returns the format string argument and its index within +// the given printf-like call expression. +// +// The last parameter before variadic arguments is assumed to be +// a format string. +// +// The first string literal or string constant is assumed to be a format string +// if the call's signature cannot be determined. +// +// If it cannot find any format string parameter, it returns ("", -1). +func formatString(pass *analysis.Pass, call *ast.CallExpr) (format string, idx int) { + typ := pass.TypesInfo.Types[call.Fun].Type + if typ != nil { + if sig, ok := typ.(*types.Signature); ok { + if !sig.Variadic() { + // Skip checking non-variadic functions. + return "", -1 + } + idx := sig.Params().Len() - 2 + if idx < 0 { + // Skip checking variadic functions without + // fixed arguments. + return "", -1 + } + s, ok := stringConstantArg(pass, call, idx) + if !ok { + // The last argument before variadic args isn't a string. + return "", -1 + } + return s, idx + } + } + + // Cannot determine call's signature. Fall back to scanning for the first + // string constant in the call. + for idx := range call.Args { + if s, ok := stringConstantArg(pass, call, idx); ok { + return s, idx + } + if pass.TypesInfo.Types[call.Args[idx]].Type == types.Typ[types.String] { + // Skip checking a call with a non-constant format + // string argument, since its contents are unavailable + // for validation. + return "", -1 + } + } + return "", -1 +} + +// stringConstantArg returns call's string constant argument at the index idx. +// +// ("", false) is returned if call's argument at the index idx isn't a string +// constant. +func stringConstantArg(pass *analysis.Pass, call *ast.CallExpr, idx int) (string, bool) { + if idx >= len(call.Args) { + return "", false + } + arg := call.Args[idx] + lit := pass.TypesInfo.Types[arg].Value + if lit != nil && lit.Kind() == constant.String { + return constant.StringVal(lit), true + } + return "", false +} + +// checkCall triggers the print-specific checks if the call invokes a print function. +func checkCall(pass *analysis.Pass) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + fn, kind := printfNameAndKind(pass, call) + switch kind { + case KindPrintf, KindErrorf: + checkPrintf(pass, kind, call, fn) + case KindPrint: + checkPrint(pass, call, fn) + } + }) +} + +func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func, kind Kind) { + fn, _ = typeutil.Callee(pass.TypesInfo, call).(*types.Func) + if fn == nil { + return nil, 0 + } + + _, ok := isPrint[fn.FullName()] + if !ok { + // Next look up just "printf", for use with -printf.funcs. + _, ok = isPrint[strings.ToLower(fn.Name())] + } + if ok { + if fn.Name() == "Errorf" { + kind = KindErrorf + } else if strings.HasSuffix(fn.Name(), "f") { + kind = KindPrintf + } else { + kind = KindPrint + } + return fn, kind + } + + var fact isWrapper + if pass.ImportObjectFact(fn, &fact) { + return fn, fact.Kind + } + + return fn, KindNone +} + +// isFormatter reports whether t could satisfy fmt.Formatter. +// The only interface method to look for is "Format(State, rune)". +func isFormatter(typ types.Type) bool { + // If the type is an interface, the value it holds might satisfy fmt.Formatter. + if _, ok := typ.Underlying().(*types.Interface); ok { + return true + } + obj, _, _ := types.LookupFieldOrMethod(typ, false, nil, "Format") + fn, ok := obj.(*types.Func) + if !ok { + return false + } + sig := fn.Type().(*types.Signature) + return sig.Params().Len() == 2 && + sig.Results().Len() == 0 && + isNamed(sig.Params().At(0).Type(), "fmt", "State") && + types.Identical(sig.Params().At(1).Type(), types.Typ[types.Rune]) +} + +func isNamed(T types.Type, pkgpath, name string) bool { + named, ok := T.(*types.Named) + return ok && named.Obj().Pkg().Path() == pkgpath && named.Obj().Name() == name +} + +// formatState holds the parsed representation of a printf directive such as "%3.*[4]d". +// It is constructed by parsePrintfVerb. +type formatState struct { + verb rune // the format verb: 'd' for "%d" + format string // the full format directive from % through verb, "%.3d". + name string // Printf, Sprintf etc. + flags []byte // the list of # + etc. + argNums []int // the successive argument numbers that are consumed, adjusted to refer to actual arg in call + firstArg int // Index of first argument after the format in the Printf call. + // Used only during parse. + pass *analysis.Pass + call *ast.CallExpr + argNum int // Which argument we're expecting to format now. + hasIndex bool // Whether the argument is indexed. + indexPending bool // Whether we have an indexed argument that has not resolved. + nbytes int // number of bytes of the format string consumed. +} + +// checkPrintf checks a call to a formatted print routine such as Printf. +func checkPrintf(pass *analysis.Pass, kind Kind, call *ast.CallExpr, fn *types.Func) { + format, idx := formatString(pass, call) + if idx < 0 { + if false { + pass.Reportf(call.Lparen, "can't check non-constant format in call to %s", fn.FullName()) + } + return + } + + firstArg := idx + 1 // Arguments are immediately after format string. + if !strings.Contains(format, "%") { + if len(call.Args) > firstArg { + pass.Reportf(call.Lparen, "%s call has arguments but no formatting directives", fn.FullName()) + } + return + } + // Hard part: check formats against args. + argNum := firstArg + maxArgNum := firstArg + anyIndex := false + anyW := false + for i, w := 0, 0; i < len(format); i += w { + w = 1 + if format[i] != '%' { + continue + } + state := parsePrintfVerb(pass, call, fn.FullName(), format[i:], firstArg, argNum) + if state == nil { + return + } + w = len(state.format) + if !okPrintfArg(pass, call, state) { // One error per format is enough. + return + } + if state.hasIndex { + anyIndex = true + } + if state.verb == 'w' { + switch kind { + case KindNone, KindPrint: + pass.Reportf(call.Pos(), "%s does not support error-wrapping directive %%w", state.name) + return + case KindPrintf: + pass.Reportf(call.Pos(), "%s call has error-wrapping directive %%w, which is only supported for functions backed by fmt.Errorf", state.name) + return + } + if anyW { + pass.Reportf(call.Pos(), "%s call has more than one error-wrapping directive %%w", state.name) + return + } + anyW = true + } + if len(state.argNums) > 0 { + // Continue with the next sequential argument. + argNum = state.argNums[len(state.argNums)-1] + 1 + } + for _, n := range state.argNums { + if n >= maxArgNum { + maxArgNum = n + 1 + } + } + } + // Dotdotdot is hard. + if call.Ellipsis.IsValid() && maxArgNum >= len(call.Args)-1 { + return + } + // If any formats are indexed, extra arguments are ignored. + if anyIndex { + return + } + // There should be no leftover arguments. + if maxArgNum != len(call.Args) { + expect := maxArgNum - firstArg + numArgs := len(call.Args) - firstArg + pass.ReportRangef(call, "%s call needs %v but has %v", fn.FullName(), count(expect, "arg"), count(numArgs, "arg")) + } +} + +// parseFlags accepts any printf flags. +func (s *formatState) parseFlags() { + for s.nbytes < len(s.format) { + switch c := s.format[s.nbytes]; c { + case '#', '0', '+', '-', ' ': + s.flags = append(s.flags, c) + s.nbytes++ + default: + return + } + } +} + +// scanNum advances through a decimal number if present. +func (s *formatState) scanNum() { + for ; s.nbytes < len(s.format); s.nbytes++ { + c := s.format[s.nbytes] + if c < '0' || '9' < c { + return + } + } +} + +// parseIndex scans an index expression. It returns false if there is a syntax error. +func (s *formatState) parseIndex() bool { + if s.nbytes == len(s.format) || s.format[s.nbytes] != '[' { + return true + } + // Argument index present. + s.nbytes++ // skip '[' + start := s.nbytes + s.scanNum() + ok := true + if s.nbytes == len(s.format) || s.nbytes == start || s.format[s.nbytes] != ']' { + ok = false + s.nbytes = strings.Index(s.format, "]") + if s.nbytes < 0 { + s.pass.ReportRangef(s.call, "%s format %s is missing closing ]", s.name, s.format) + return false + } + } + arg32, err := strconv.ParseInt(s.format[start:s.nbytes], 10, 32) + if err != nil || !ok || arg32 <= 0 || arg32 > int64(len(s.call.Args)-s.firstArg) { + s.pass.ReportRangef(s.call, "%s format has invalid argument index [%s]", s.name, s.format[start:s.nbytes]) + return false + } + s.nbytes++ // skip ']' + arg := int(arg32) + arg += s.firstArg - 1 // We want to zero-index the actual arguments. + s.argNum = arg + s.hasIndex = true + s.indexPending = true + return true +} + +// parseNum scans a width or precision (or *). It returns false if there's a bad index expression. +func (s *formatState) parseNum() bool { + if s.nbytes < len(s.format) && s.format[s.nbytes] == '*' { + if s.indexPending { // Absorb it. + s.indexPending = false + } + s.nbytes++ + s.argNums = append(s.argNums, s.argNum) + s.argNum++ + } else { + s.scanNum() + } + return true +} + +// parsePrecision scans for a precision. It returns false if there's a bad index expression. +func (s *formatState) parsePrecision() bool { + // If there's a period, there may be a precision. + if s.nbytes < len(s.format) && s.format[s.nbytes] == '.' { + s.flags = append(s.flags, '.') // Treat precision as a flag. + s.nbytes++ + if !s.parseIndex() { + return false + } + if !s.parseNum() { + return false + } + } + return true +} + +// parsePrintfVerb looks the formatting directive that begins the format string +// and returns a formatState that encodes what the directive wants, without looking +// at the actual arguments present in the call. The result is nil if there is an error. +func parsePrintfVerb(pass *analysis.Pass, call *ast.CallExpr, name, format string, firstArg, argNum int) *formatState { + state := &formatState{ + format: format, + name: name, + flags: make([]byte, 0, 5), + argNum: argNum, + argNums: make([]int, 0, 1), + nbytes: 1, // There's guaranteed to be a percent sign. + firstArg: firstArg, + pass: pass, + call: call, + } + // There may be flags. + state.parseFlags() + // There may be an index. + if !state.parseIndex() { + return nil + } + // There may be a width. + if !state.parseNum() { + return nil + } + // There may be a precision. + if !state.parsePrecision() { + return nil + } + // Now a verb, possibly prefixed by an index (which we may already have). + if !state.indexPending && !state.parseIndex() { + return nil + } + if state.nbytes == len(state.format) { + pass.ReportRangef(call.Fun, "%s format %s is missing verb at end of string", name, state.format) + return nil + } + verb, w := utf8.DecodeRuneInString(state.format[state.nbytes:]) + state.verb = verb + state.nbytes += w + if verb != '%' { + state.argNums = append(state.argNums, state.argNum) + } + state.format = state.format[:state.nbytes] + return state +} + +// printfArgType encodes the types of expressions a printf verb accepts. It is a bitmask. +type printfArgType int + +const ( + argBool printfArgType = 1 << iota + argInt + argRune + argString + argFloat + argComplex + argPointer + argError + anyType printfArgType = ^0 +) + +type printVerb struct { + verb rune // User may provide verb through Formatter; could be a rune. + flags string // known flags are all ASCII + typ printfArgType +} + +// Common flag sets for printf verbs. +const ( + noFlag = "" + numFlag = " -+.0" + sharpNumFlag = " -+.0#" + allFlags = " -+.0#" +) + +// printVerbs identifies which flags are known to printf for each verb. +var printVerbs = []printVerb{ + // '-' is a width modifier, always valid. + // '.' is a precision for float, max width for strings. + // '+' is required sign for numbers, Go format for %v. + // '#' is alternate format for several verbs. + // ' ' is spacer for numbers + {'%', noFlag, 0}, + {'b', sharpNumFlag, argInt | argFloat | argComplex | argPointer}, + {'c', "-", argRune | argInt}, + {'d', numFlag, argInt | argPointer}, + {'e', sharpNumFlag, argFloat | argComplex}, + {'E', sharpNumFlag, argFloat | argComplex}, + {'f', sharpNumFlag, argFloat | argComplex}, + {'F', sharpNumFlag, argFloat | argComplex}, + {'g', sharpNumFlag, argFloat | argComplex}, + {'G', sharpNumFlag, argFloat | argComplex}, + {'o', sharpNumFlag, argInt | argPointer}, + {'O', sharpNumFlag, argInt | argPointer}, + {'p', "-#", argPointer}, + {'q', " -+.0#", argRune | argInt | argString}, + {'s', " -+.0", argString}, + {'t', "-", argBool}, + {'T', "-", anyType}, + {'U', "-#", argRune | argInt}, + {'v', allFlags, anyType}, + {'w', allFlags, argError}, + {'x', sharpNumFlag, argRune | argInt | argString | argPointer | argFloat | argComplex}, + {'X', sharpNumFlag, argRune | argInt | argString | argPointer | argFloat | argComplex}, +} + +// okPrintfArg compares the formatState to the arguments actually present, +// reporting any discrepancies it can discern. If the final argument is ellipsissed, +// there's little it can do for that. +func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, state *formatState) (ok bool) { + var v printVerb + found := false + // Linear scan is fast enough for a small list. + for _, v = range printVerbs { + if v.verb == state.verb { + found = true + break + } + } + + // Could current arg implement fmt.Formatter? + formatter := false + if state.argNum < len(call.Args) { + if tv, ok := pass.TypesInfo.Types[call.Args[state.argNum]]; ok { + formatter = isFormatter(tv.Type) + } + } + + if !formatter { + if !found { + pass.ReportRangef(call, "%s format %s has unknown verb %c", state.name, state.format, state.verb) + return false + } + for _, flag := range state.flags { + // TODO: Disable complaint about '0' for Go 1.10. To be fixed properly in 1.11. + // See issues 23598 and 23605. + if flag == '0' { + continue + } + if !strings.ContainsRune(v.flags, rune(flag)) { + pass.ReportRangef(call, "%s format %s has unrecognized flag %c", state.name, state.format, flag) + return false + } + } + } + // Verb is good. If len(state.argNums)>trueArgs, we have something like %.*s and all + // but the final arg must be an integer. + trueArgs := 1 + if state.verb == '%' { + trueArgs = 0 + } + nargs := len(state.argNums) + for i := 0; i < nargs-trueArgs; i++ { + argNum := state.argNums[i] + if !argCanBeChecked(pass, call, i, state) { + return + } + arg := call.Args[argNum] + if !matchArgType(pass, argInt, nil, arg) { + pass.ReportRangef(call, "%s format %s uses non-int %s as argument of *", state.name, state.format, analysisutil.Format(pass.Fset, arg)) + return false + } + } + + if state.verb == '%' || formatter { + return true + } + argNum := state.argNums[len(state.argNums)-1] + if !argCanBeChecked(pass, call, len(state.argNums)-1, state) { + return false + } + arg := call.Args[argNum] + if isFunctionValue(pass, arg) && state.verb != 'p' && state.verb != 'T' { + pass.ReportRangef(call, "%s format %s arg %s is a func value, not called", state.name, state.format, analysisutil.Format(pass.Fset, arg)) + return false + } + if !matchArgType(pass, v.typ, nil, arg) { + typeString := "" + if typ := pass.TypesInfo.Types[arg].Type; typ != nil { + typeString = typ.String() + } + pass.ReportRangef(call, "%s format %s has arg %s of wrong type %s", state.name, state.format, analysisutil.Format(pass.Fset, arg), typeString) + return false + } + if v.typ&argString != 0 && v.verb != 'T' && !bytes.Contains(state.flags, []byte{'#'}) { + if methodName, ok := recursiveStringer(pass, arg); ok { + pass.ReportRangef(call, "%s format %s with arg %s causes recursive %s method call", state.name, state.format, analysisutil.Format(pass.Fset, arg), methodName) + return false + } + } + return true +} + +// recursiveStringer reports whether the argument e is a potential +// recursive call to stringer or is an error, such as t and &t in these examples: +// +// func (t *T) String() string { printf("%s", t) } +// func (t T) Error() string { printf("%s", t) } +// func (t T) String() string { printf("%s", &t) } +func recursiveStringer(pass *analysis.Pass, e ast.Expr) (string, bool) { + typ := pass.TypesInfo.Types[e].Type + + // It's unlikely to be a recursive stringer if it has a Format method. + if isFormatter(typ) { + return "", false + } + + // Does e allow e.String() or e.Error()? + strObj, _, _ := types.LookupFieldOrMethod(typ, false, pass.Pkg, "String") + strMethod, strOk := strObj.(*types.Func) + errObj, _, _ := types.LookupFieldOrMethod(typ, false, pass.Pkg, "Error") + errMethod, errOk := errObj.(*types.Func) + if !strOk && !errOk { + return "", false + } + + // Is the expression e within the body of that String or Error method? + var method *types.Func + if strOk && strMethod.Pkg() == pass.Pkg && strMethod.Scope().Contains(e.Pos()) { + method = strMethod + } else if errOk && errMethod.Pkg() == pass.Pkg && errMethod.Scope().Contains(e.Pos()) { + method = errMethod + } else { + return "", false + } + + sig := method.Type().(*types.Signature) + if !isStringer(sig) { + return "", false + } + + // Is it the receiver r, or &r? + if u, ok := e.(*ast.UnaryExpr); ok && u.Op == token.AND { + e = u.X // strip off & from &r + } + if id, ok := e.(*ast.Ident); ok { + if pass.TypesInfo.Uses[id] == sig.Recv() { + return method.FullName(), true + } + } + return "", false +} + +// isStringer reports whether the method signature matches the String() definition in fmt.Stringer. +func isStringer(sig *types.Signature) bool { + return sig.Params().Len() == 0 && + sig.Results().Len() == 1 && + sig.Results().At(0).Type() == types.Typ[types.String] +} + +// isFunctionValue reports whether the expression is a function as opposed to a function call. +// It is almost always a mistake to print a function value. +func isFunctionValue(pass *analysis.Pass, e ast.Expr) bool { + if typ := pass.TypesInfo.Types[e].Type; typ != nil { + _, ok := typ.(*types.Signature) + return ok + } + return false +} + +// argCanBeChecked reports whether the specified argument is statically present; +// it may be beyond the list of arguments or in a terminal slice... argument, which +// means we can't see it. +func argCanBeChecked(pass *analysis.Pass, call *ast.CallExpr, formatArg int, state *formatState) bool { + argNum := state.argNums[formatArg] + if argNum <= 0 { + // Shouldn't happen, so catch it with prejudice. + panic("negative arg num") + } + if argNum < len(call.Args)-1 { + return true // Always OK. + } + if call.Ellipsis.IsValid() { + return false // We just can't tell; there could be many more arguments. + } + if argNum < len(call.Args) { + return true + } + // There are bad indexes in the format or there are fewer arguments than the format needs. + // This is the argument number relative to the format: Printf("%s", "hi") will give 1 for the "hi". + arg := argNum - state.firstArg + 1 // People think of arguments as 1-indexed. + pass.ReportRangef(call, "%s format %s reads arg #%d, but call has %v", state.name, state.format, arg, count(len(call.Args)-state.firstArg, "arg")) + return false +} + +// printFormatRE is the regexp we match and report as a possible format string +// in the first argument to unformatted prints like fmt.Print. +// We exclude the space flag, so that printing a string like "x % y" is not reported as a format. +var printFormatRE = regexp.MustCompile(`%` + flagsRE + numOptRE + `\.?` + numOptRE + indexOptRE + verbRE) + +const ( + flagsRE = `[+\-#]*` + indexOptRE = `(\[[0-9]+\])?` + numOptRE = `([0-9]+|` + indexOptRE + `\*)?` + verbRE = `[bcdefgopqstvxEFGTUX]` +) + +// checkPrint checks a call to an unformatted print routine such as Println. +func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) { + firstArg := 0 + typ := pass.TypesInfo.Types[call.Fun].Type + if typ == nil { + // Skip checking functions with unknown type. + return + } + if sig, ok := typ.(*types.Signature); ok { + if !sig.Variadic() { + // Skip checking non-variadic functions. + return + } + params := sig.Params() + firstArg = params.Len() - 1 + + typ := params.At(firstArg).Type() + typ = typ.(*types.Slice).Elem() + it, ok := typ.(*types.Interface) + if !ok || !it.Empty() { + // Skip variadic functions accepting non-interface{} args. + return + } + } + args := call.Args + if len(args) <= firstArg { + // Skip calls without variadic args. + return + } + args = args[firstArg:] + + if firstArg == 0 { + if sel, ok := call.Args[0].(*ast.SelectorExpr); ok { + if x, ok := sel.X.(*ast.Ident); ok { + if x.Name == "os" && strings.HasPrefix(sel.Sel.Name, "Std") { + pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", fn.FullName(), analysisutil.Format(pass.Fset, call.Args[0])) + } + } + } + } + + arg := args[0] + if lit, ok := arg.(*ast.BasicLit); ok && lit.Kind == token.STRING { + // Ignore trailing % character in lit.Value. + // The % in "abc 0.0%" couldn't be a formatting directive. + s := strings.TrimSuffix(lit.Value, `%"`) + if strings.Contains(s, "%") { + m := printFormatRE.FindStringSubmatch(s) + if m != nil { + pass.ReportRangef(call, "%s call has possible formatting directive %s", fn.FullName(), m[0]) + } + } + } + if strings.HasSuffix(fn.Name(), "ln") { + // The last item, if a string, should not have a newline. + arg = args[len(args)-1] + if lit, ok := arg.(*ast.BasicLit); ok && lit.Kind == token.STRING { + str, _ := strconv.Unquote(lit.Value) + if strings.HasSuffix(str, "\n") { + pass.ReportRangef(call, "%s arg list ends with redundant newline", fn.FullName()) + } + } + } + for _, arg := range args { + if isFunctionValue(pass, arg) { + pass.ReportRangef(call, "%s arg %s is a func value, not called", fn.FullName(), analysisutil.Format(pass.Fset, arg)) + } + if methodName, ok := recursiveStringer(pass, arg); ok { + pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", fn.FullName(), analysisutil.Format(pass.Fset, arg), methodName) + } + } +} + +// count(n, what) returns "1 what" or "N whats" +// (assuming the plural of what is whats). +func count(n int, what string) string { + if n == 1 { + return "1 " + what + } + return fmt.Sprintf("%d %ss", n, what) +} + +// stringSet is a set-of-nonempty-strings-valued flag. +// Note: elements without a '.' get lower-cased. +type stringSet map[string]bool + +func (ss stringSet) String() string { + var list []string + for name := range ss { + list = append(list, name) + } + sort.Strings(list) + return strings.Join(list, ",") +} + +func (ss stringSet) Set(flag string) error { + for _, name := range strings.Split(flag, ",") { + if len(name) == 0 { + return fmt.Errorf("empty string") + } + if !strings.Contains(name, ".") { + name = strings.ToLower(name) + } + ss[name] = true + } + return nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go new file mode 100644 index 000000000..6a5fae44f --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go @@ -0,0 +1,246 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package printf + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" +) + +var errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + +// matchArgType reports an error if printf verb t is not appropriate +// for operand arg. +// +// typ is used only for recursive calls; external callers must supply nil. +// +// (Recursion arises from the compound types {map,chan,slice} which +// may be printed with %d etc. if that is appropriate for their element +// types.) +func matchArgType(pass *analysis.Pass, t printfArgType, typ types.Type, arg ast.Expr) bool { + return matchArgTypeInternal(pass, t, typ, arg, make(map[types.Type]bool)) +} + +// matchArgTypeInternal is the internal version of matchArgType. It carries a map +// remembering what types are in progress so we don't recur when faced with recursive +// types or mutually recursive types. +func matchArgTypeInternal(pass *analysis.Pass, t printfArgType, typ types.Type, arg ast.Expr, inProgress map[types.Type]bool) bool { + // %v, %T accept any argument type. + if t == anyType { + return true + } + if typ == nil { + // external call + typ = pass.TypesInfo.Types[arg].Type + if typ == nil { + return true // probably a type check problem + } + } + + // %w accepts only errors. + if t == argError { + return types.ConvertibleTo(typ, errorType) + } + + // If the type implements fmt.Formatter, we have nothing to check. + if isFormatter(typ) { + return true + } + // If we can use a string, might arg (dynamically) implement the Stringer or Error interface? + if t&argString != 0 && isConvertibleToString(pass, typ) { + return true + } + + typ = typ.Underlying() + if inProgress[typ] { + // We're already looking at this type. The call that started it will take care of it. + return true + } + inProgress[typ] = true + + switch typ := typ.(type) { + case *types.Signature: + return t == argPointer + + case *types.Map: + return t == argPointer || + // Recur: map[int]int matches %d. + (matchArgTypeInternal(pass, t, typ.Key(), arg, inProgress) && matchArgTypeInternal(pass, t, typ.Elem(), arg, inProgress)) + + case *types.Chan: + return t&argPointer != 0 + + case *types.Array: + // Same as slice. + if types.Identical(typ.Elem().Underlying(), types.Typ[types.Byte]) && t&argString != 0 { + return true // %s matches []byte + } + // Recur: []int matches %d. + return matchArgTypeInternal(pass, t, typ.Elem(), arg, inProgress) + + case *types.Slice: + // Same as array. + if types.Identical(typ.Elem().Underlying(), types.Typ[types.Byte]) && t&argString != 0 { + return true // %s matches []byte + } + if t == argPointer { + return true // %p prints a slice's 0th element + } + // Recur: []int matches %d. But watch out for + // type T []T + // If the element is a pointer type (type T[]*T), it's handled fine by the Pointer case below. + return matchArgTypeInternal(pass, t, typ.Elem(), arg, inProgress) + + case *types.Pointer: + // Ugly, but dealing with an edge case: a known pointer to an invalid type, + // probably something from a failed import. + if typ.Elem().String() == "invalid type" { + if false { + pass.Reportf(arg.Pos(), "printf argument %v is pointer to invalid or unknown type", analysisutil.Format(pass.Fset, arg)) + } + return true // special case + } + // If it's actually a pointer with %p, it prints as one. + if t == argPointer { + return true + } + + under := typ.Elem().Underlying() + switch under.(type) { + case *types.Struct: // see below + case *types.Array: // see below + case *types.Slice: // see below + case *types.Map: // see below + default: + // Check whether the rest can print pointers. + return t&argPointer != 0 + } + // If it's a top-level pointer to a struct, array, slice, or + // map, that's equivalent in our analysis to whether we can + // print the type being pointed to. Pointers in nested levels + // are not supported to minimize fmt running into loops. + if len(inProgress) > 1 { + return false + } + return matchArgTypeInternal(pass, t, under, arg, inProgress) + + case *types.Struct: + return matchStructArgType(pass, t, typ, arg, inProgress) + + case *types.Interface: + // There's little we can do. + // Whether any particular verb is valid depends on the argument. + // The user may have reasonable prior knowledge of the contents of the interface. + return true + + case *types.Basic: + switch typ.Kind() { + case types.UntypedBool, + types.Bool: + return t&argBool != 0 + + case types.UntypedInt, + types.Int, + types.Int8, + types.Int16, + types.Int32, + types.Int64, + types.Uint, + types.Uint8, + types.Uint16, + types.Uint32, + types.Uint64, + types.Uintptr: + return t&argInt != 0 + + case types.UntypedFloat, + types.Float32, + types.Float64: + return t&argFloat != 0 + + case types.UntypedComplex, + types.Complex64, + types.Complex128: + return t&argComplex != 0 + + case types.UntypedString, + types.String: + return t&argString != 0 + + case types.UnsafePointer: + return t&(argPointer|argInt) != 0 + + case types.UntypedRune: + return t&(argInt|argRune) != 0 + + case types.UntypedNil: + return false + + case types.Invalid: + if false { + pass.Reportf(arg.Pos(), "printf argument %v has invalid or unknown type", analysisutil.Format(pass.Fset, arg)) + } + return true // Probably a type check problem. + } + panic("unreachable") + } + + return false +} + +func isConvertibleToString(pass *analysis.Pass, typ types.Type) bool { + if bt, ok := typ.(*types.Basic); ok && bt.Kind() == types.UntypedNil { + // We explicitly don't want untyped nil, which is + // convertible to both of the interfaces below, as it + // would just panic anyway. + return false + } + if types.ConvertibleTo(typ, errorType) { + return true // via .Error() + } + + // Does it implement fmt.Stringer? + if obj, _, _ := types.LookupFieldOrMethod(typ, false, nil, "String"); obj != nil { + if fn, ok := obj.(*types.Func); ok { + sig := fn.Type().(*types.Signature) + if sig.Params().Len() == 0 && + sig.Results().Len() == 1 && + sig.Results().At(0).Type() == types.Typ[types.String] { + return true + } + } + } + + return false +} + +// hasBasicType reports whether x's type is a types.Basic with the given kind. +func hasBasicType(pass *analysis.Pass, x ast.Expr, kind types.BasicKind) bool { + t := pass.TypesInfo.Types[x].Type + if t != nil { + t = t.Underlying() + } + b, ok := t.(*types.Basic) + return ok && b.Kind() == kind +} + +// matchStructArgType reports whether all the elements of the struct match the expected +// type. For instance, with "%d" all the elements must be printable with the "%d" format. +func matchStructArgType(pass *analysis.Pass, t printfArgType, typ *types.Struct, arg ast.Expr, inProgress map[types.Type]bool) bool { + for i := 0; i < typ.NumFields(); i++ { + typf := typ.Field(i) + if !matchArgTypeInternal(pass, t, typf.Type(), arg, inProgress) { + return false + } + if t&argString != 0 && !typf.Exported() && isConvertibleToString(pass, typf.Type()) { + // Issue #17798: unexported Stringer or error cannot be properly formatted. + return false + } + } + return true +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go b/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go new file mode 100644 index 000000000..ef21f0e7d --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go @@ -0,0 +1,99 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package reflectvaluecompare defines an Analyzer that checks for accidentally +// using == or reflect.DeepEqual to compare reflect.Value values. +// See issues 43993 and 18871. +package reflectvaluecompare + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +const Doc = `check for comparing reflect.Value values with == or reflect.DeepEqual + +The reflectvaluecompare checker looks for expressions of the form: + + v1 == v2 + v1 != v2 + reflect.DeepEqual(v1, v2) + +where v1 or v2 are reflect.Values. Comparing reflect.Values directly +is almost certainly not correct, as it compares the reflect package's +internal representation, not the underlying value. +Likely what is intended is: + + v1.Interface() == v2.Interface() + v1.Interface() != v2.Interface() + reflect.DeepEqual(v1.Interface(), v2.Interface()) +` + +var Analyzer = &analysis.Analyzer{ + Name: "reflectvaluecompare", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.BinaryExpr)(nil), + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.BinaryExpr: + if n.Op != token.EQL && n.Op != token.NEQ { + return + } + if isReflectValue(pass, n.X) || isReflectValue(pass, n.Y) { + if n.Op == token.EQL { + pass.ReportRangef(n, "avoid using == with reflect.Value") + } else { + pass.ReportRangef(n, "avoid using != with reflect.Value") + } + } + case *ast.CallExpr: + fn, ok := typeutil.Callee(pass.TypesInfo, n).(*types.Func) + if !ok { + return + } + if fn.FullName() == "reflect.DeepEqual" && (isReflectValue(pass, n.Args[0]) || isReflectValue(pass, n.Args[1])) { + pass.ReportRangef(n, "avoid using reflect.DeepEqual with reflect.Value") + } + } + }) + return nil, nil +} + +// isReflectValue reports whether the type of e is reflect.Value. +func isReflectValue(pass *analysis.Pass, e ast.Expr) bool { + tv, ok := pass.TypesInfo.Types[e] + if !ok { // no type info, something else is wrong + return false + } + // See if the type is reflect.Value + named, ok := tv.Type.(*types.Named) + if !ok { + return false + } + if obj := named.Obj(); obj == nil || obj.Pkg() == nil || obj.Pkg().Path() != "reflect" || obj.Name() != "Value" { + return false + } + if _, ok := e.(*ast.CompositeLit); ok { + // This is reflect.Value{}. Don't treat that as an error. + // Users should probably use x.IsValid() rather than x == reflect.Value{}, but the latter isn't wrong. + return false + } + return true +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go b/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go new file mode 100644 index 000000000..b160dcf5b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go @@ -0,0 +1,290 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package shadow defines an Analyzer that checks for shadowed variables. +package shadow + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +// NOTE: Experimental. Not part of the vet suite. + +const Doc = `check for possible unintended shadowing of variables + +This analyzer check for shadowed variables. +A shadowed variable is a variable declared in an inner scope +with the same name and type as a variable in an outer scope, +and where the outer variable is mentioned after the inner one +is declared. + +(This definition can be refined; the module generates too many +false positives and is not yet enabled by default.) + +For example: + + func BadRead(f *os.File, buf []byte) error { + var err error + for { + n, err := f.Read(buf) // shadows the function variable 'err' + if err != nil { + break // causes return of wrong value + } + foo(buf) + } + return err + } +` + +var Analyzer = &analysis.Analyzer{ + Name: "shadow", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +// flags +var strict = false + +func init() { + Analyzer.Flags.BoolVar(&strict, "strict", strict, "whether to be strict about shadowing; can be noisy") +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + spans := make(map[types.Object]span) + for id, obj := range pass.TypesInfo.Defs { + // Ignore identifiers that don't denote objects + // (package names, symbolic variables such as t + // in t := x.(type) of type switch headers). + if obj != nil { + growSpan(spans, obj, id.Pos(), id.End()) + } + } + for id, obj := range pass.TypesInfo.Uses { + growSpan(spans, obj, id.Pos(), id.End()) + } + for node, obj := range pass.TypesInfo.Implicits { + // A type switch with a short variable declaration + // such as t := x.(type) doesn't declare the symbolic + // variable (t in the example) at the switch header; + // instead a new variable t (with specific type) is + // declared implicitly for each case. Such variables + // are found in the types.Info.Implicits (not Defs) + // map. Add them here, assuming they are declared at + // the type cases' colon ":". + if cc, ok := node.(*ast.CaseClause); ok { + growSpan(spans, obj, cc.Colon, cc.Colon) + } + } + + nodeFilter := []ast.Node{ + (*ast.AssignStmt)(nil), + (*ast.GenDecl)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.AssignStmt: + checkShadowAssignment(pass, spans, n) + case *ast.GenDecl: + checkShadowDecl(pass, spans, n) + } + }) + return nil, nil +} + +// A span stores the minimum range of byte positions in the file in which a +// given variable (types.Object) is mentioned. It is lexically defined: it spans +// from the beginning of its first mention to the end of its last mention. +// A variable is considered shadowed (if strict is off) only if the +// shadowing variable is declared within the span of the shadowed variable. +// In other words, if a variable is shadowed but not used after the shadowed +// variable is declared, it is inconsequential and not worth complaining about. +// This simple check dramatically reduces the nuisance rate for the shadowing +// check, at least until something cleverer comes along. +// +// One wrinkle: A "naked return" is a silent use of a variable that the Span +// will not capture, but the compilers catch naked returns of shadowed +// variables so we don't need to. +// +// Cases this gets wrong (TODO): +// - If a for loop's continuation statement mentions a variable redeclared in +// the block, we should complain about it but don't. +// - A variable declared inside a function literal can falsely be identified +// as shadowing a variable in the outer function. +// +type span struct { + min token.Pos + max token.Pos +} + +// contains reports whether the position is inside the span. +func (s span) contains(pos token.Pos) bool { + return s.min <= pos && pos < s.max +} + +// growSpan expands the span for the object to contain the source range [pos, end). +func growSpan(spans map[types.Object]span, obj types.Object, pos, end token.Pos) { + if strict { + return // No need + } + s, ok := spans[obj] + if ok { + if s.min > pos { + s.min = pos + } + if s.max < end { + s.max = end + } + } else { + s = span{pos, end} + } + spans[obj] = s +} + +// checkShadowAssignment checks for shadowing in a short variable declaration. +func checkShadowAssignment(pass *analysis.Pass, spans map[types.Object]span, a *ast.AssignStmt) { + if a.Tok != token.DEFINE { + return + } + if idiomaticShortRedecl(pass, a) { + return + } + for _, expr := range a.Lhs { + ident, ok := expr.(*ast.Ident) + if !ok { + pass.ReportRangef(expr, "invalid AST: short variable declaration of non-identifier") + return + } + checkShadowing(pass, spans, ident) + } +} + +// idiomaticShortRedecl reports whether this short declaration can be ignored for +// the purposes of shadowing, that is, that any redeclarations it contains are deliberate. +func idiomaticShortRedecl(pass *analysis.Pass, a *ast.AssignStmt) bool { + // Don't complain about deliberate redeclarations of the form + // i := i + // Such constructs are idiomatic in range loops to create a new variable + // for each iteration. Another example is + // switch n := n.(type) + if len(a.Rhs) != len(a.Lhs) { + return false + } + // We know it's an assignment, so the LHS must be all identifiers. (We check anyway.) + for i, expr := range a.Lhs { + lhs, ok := expr.(*ast.Ident) + if !ok { + pass.ReportRangef(expr, "invalid AST: short variable declaration of non-identifier") + return true // Don't do any more processing. + } + switch rhs := a.Rhs[i].(type) { + case *ast.Ident: + if lhs.Name != rhs.Name { + return false + } + case *ast.TypeAssertExpr: + if id, ok := rhs.X.(*ast.Ident); ok { + if lhs.Name != id.Name { + return false + } + } + default: + return false + } + } + return true +} + +// idiomaticRedecl reports whether this declaration spec can be ignored for +// the purposes of shadowing, that is, that any redeclarations it contains are deliberate. +func idiomaticRedecl(d *ast.ValueSpec) bool { + // Don't complain about deliberate redeclarations of the form + // var i, j = i, j + // Don't ignore redeclarations of the form + // var i = 3 + if len(d.Names) != len(d.Values) { + return false + } + for i, lhs := range d.Names { + rhs, ok := d.Values[i].(*ast.Ident) + if !ok || lhs.Name != rhs.Name { + return false + } + } + return true +} + +// checkShadowDecl checks for shadowing in a general variable declaration. +func checkShadowDecl(pass *analysis.Pass, spans map[types.Object]span, d *ast.GenDecl) { + if d.Tok != token.VAR { + return + } + for _, spec := range d.Specs { + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + pass.ReportRangef(spec, "invalid AST: var GenDecl not ValueSpec") + return + } + // Don't complain about deliberate redeclarations of the form + // var i = i + if idiomaticRedecl(valueSpec) { + return + } + for _, ident := range valueSpec.Names { + checkShadowing(pass, spans, ident) + } + } +} + +// checkShadowing checks whether the identifier shadows an identifier in an outer scope. +func checkShadowing(pass *analysis.Pass, spans map[types.Object]span, ident *ast.Ident) { + if ident.Name == "_" { + // Can't shadow the blank identifier. + return + } + obj := pass.TypesInfo.Defs[ident] + if obj == nil { + return + } + // obj.Parent.Parent is the surrounding scope. If we can find another declaration + // starting from there, we have a shadowed identifier. + _, shadowed := obj.Parent().Parent().LookupParent(obj.Name(), obj.Pos()) + if shadowed == nil { + return + } + // Don't complain if it's shadowing a universe-declared identifier; that's fine. + if shadowed.Parent() == types.Universe { + return + } + if strict { + // The shadowed identifier must appear before this one to be an instance of shadowing. + if shadowed.Pos() > ident.Pos() { + return + } + } else { + // Don't complain if the span of validity of the shadowed identifier doesn't include + // the shadowing identifier. + span, ok := spans[shadowed] + if !ok { + pass.ReportRangef(ident, "internal error: no range for %q", ident.Name) + return + } + if !span.contains(ident.Pos()) { + return + } + } + // Don't complain if the types differ: that implies the programmer really wants two different things. + if types.Identical(obj.Type(), shadowed.Type()) { + line := pass.Fset.Position(shadowed.Pos()).Line + pass.ReportRangef(ident, "declaration of %q shadows declaration at line %d", obj.Name(), line) + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/shift/dead.go b/vendor/golang.org/x/tools/go/analysis/passes/shift/dead.go new file mode 100644 index 000000000..43415a98d --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/shift/dead.go @@ -0,0 +1,101 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package shift + +// Simplified dead code detector. +// Used for skipping shift checks on unreachable arch-specific code. + +import ( + "go/ast" + "go/constant" + "go/types" +) + +// updateDead puts unreachable "if" and "case" nodes into dead. +func updateDead(info *types.Info, dead map[ast.Node]bool, node ast.Node) { + if dead[node] { + // The node is already marked as dead. + return + } + + // setDead marks the node and all the children as dead. + setDead := func(n ast.Node) { + ast.Inspect(n, func(node ast.Node) bool { + if node != nil { + dead[node] = true + } + return true + }) + } + + switch stmt := node.(type) { + case *ast.IfStmt: + // "if" branch is dead if its condition evaluates + // to constant false. + v := info.Types[stmt.Cond].Value + if v == nil { + return + } + if !constant.BoolVal(v) { + setDead(stmt.Body) + return + } + if stmt.Else != nil { + setDead(stmt.Else) + } + case *ast.SwitchStmt: + // Case clause with empty switch tag is dead if it evaluates + // to constant false. + if stmt.Tag == nil { + BodyLoopBool: + for _, stmt := range stmt.Body.List { + cc := stmt.(*ast.CaseClause) + if cc.List == nil { + // Skip default case. + continue + } + for _, expr := range cc.List { + v := info.Types[expr].Value + if v == nil || v.Kind() != constant.Bool || constant.BoolVal(v) { + continue BodyLoopBool + } + } + setDead(cc) + } + return + } + + // Case clause is dead if its constant value doesn't match + // the constant value from the switch tag. + // TODO: This handles integer comparisons only. + v := info.Types[stmt.Tag].Value + if v == nil || v.Kind() != constant.Int { + return + } + tagN, ok := constant.Uint64Val(v) + if !ok { + return + } + BodyLoopInt: + for _, x := range stmt.Body.List { + cc := x.(*ast.CaseClause) + if cc.List == nil { + // Skip default case. + continue + } + for _, expr := range cc.List { + v := info.Types[expr].Value + if v == nil { + continue BodyLoopInt + } + n, ok := constant.Uint64Val(v) + if !ok || tagN == n { + continue BodyLoopInt + } + } + setDead(cc) + } + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go b/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go new file mode 100644 index 000000000..1f3df07cc --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go @@ -0,0 +1,101 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package shift defines an Analyzer that checks for shifts that exceed +// the width of an integer. +package shift + +// TODO(adonovan): integrate with ctrflow (CFG-based) dead code analysis. May +// have impedance mismatch due to its (non-)treatment of constant +// expressions (such as runtime.GOARCH=="386"). + +import ( + "go/ast" + "go/constant" + "go/token" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = "check for shifts that equal or exceed the width of the integer" + +var Analyzer = &analysis.Analyzer{ + Name: "shift", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // Do a complete pass to compute dead nodes. + dead := make(map[ast.Node]bool) + nodeFilter := []ast.Node{ + (*ast.IfStmt)(nil), + (*ast.SwitchStmt)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + // TODO(adonovan): move updateDead into this file. + updateDead(pass.TypesInfo, dead, n) + }) + + nodeFilter = []ast.Node{ + (*ast.AssignStmt)(nil), + (*ast.BinaryExpr)(nil), + } + inspect.Preorder(nodeFilter, func(node ast.Node) { + if dead[node] { + // Skip shift checks on unreachable nodes. + return + } + + switch node := node.(type) { + case *ast.BinaryExpr: + if node.Op == token.SHL || node.Op == token.SHR { + checkLongShift(pass, node, node.X, node.Y) + } + case *ast.AssignStmt: + if len(node.Lhs) != 1 || len(node.Rhs) != 1 { + return + } + if node.Tok == token.SHL_ASSIGN || node.Tok == token.SHR_ASSIGN { + checkLongShift(pass, node, node.Lhs[0], node.Rhs[0]) + } + } + }) + return nil, nil +} + +// checkLongShift checks if shift or shift-assign operations shift by more than +// the length of the underlying variable. +func checkLongShift(pass *analysis.Pass, node ast.Node, x, y ast.Expr) { + if pass.TypesInfo.Types[x].Value != nil { + // Ignore shifts of constants. + // These are frequently used for bit-twiddling tricks + // like ^uint(0) >> 63 for 32/64 bit detection and compatibility. + return + } + + v := pass.TypesInfo.Types[y].Value + if v == nil { + return + } + amt, ok := constant.Int64Val(v) + if !ok { + return + } + t := pass.TypesInfo.Types[x].Type + if t == nil { + return + } + size := 8 * pass.TypesSizes.Sizeof(t) + if amt >= size { + ident := analysisutil.Format(pass.Fset, x) + pass.ReportRangef(node, "%s (%d bits) too small for shift of %d", ident, size, amt) + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go b/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go new file mode 100644 index 000000000..0d6c8ebf1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go @@ -0,0 +1,154 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sigchanyzer defines an Analyzer that detects +// misuse of unbuffered signal as argument to signal.Notify. +package sigchanyzer + +import ( + "bytes" + "go/ast" + "go/format" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check for unbuffered channel of os.Signal + +This checker reports call expression of the form signal.Notify(c <-chan os.Signal, sig ...os.Signal), +where c is an unbuffered channel, which can be at risk of missing the signal.` + +// Analyzer describes sigchanyzer analysis function detector. +var Analyzer = &analysis.Analyzer{ + Name: "sigchanyzer", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + if !isSignalNotify(pass.TypesInfo, call) { + return + } + var chanDecl *ast.CallExpr + switch arg := call.Args[0].(type) { + case *ast.Ident: + if decl, ok := findDecl(arg).(*ast.CallExpr); ok { + chanDecl = decl + } + case *ast.CallExpr: + // Only signal.Notify(make(chan os.Signal), os.Interrupt) is safe, + // conservatively treate others as not safe, see golang/go#45043 + if isBuiltinMake(pass.TypesInfo, arg) { + return + } + chanDecl = arg + } + if chanDecl == nil || len(chanDecl.Args) != 1 { + return + } + + // Make a copy of the channel's declaration to avoid + // mutating the AST. See https://golang.org/issue/46129. + chanDeclCopy := &ast.CallExpr{} + *chanDeclCopy = *chanDecl + chanDeclCopy.Args = append([]ast.Expr(nil), chanDecl.Args...) + chanDeclCopy.Args = append(chanDeclCopy.Args, &ast.BasicLit{ + Kind: token.INT, + Value: "1", + }) + + var buf bytes.Buffer + if err := format.Node(&buf, token.NewFileSet(), chanDeclCopy); err != nil { + return + } + pass.Report(analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: "misuse of unbuffered os.Signal channel as argument to signal.Notify", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Change to buffer channel", + TextEdits: []analysis.TextEdit{{ + Pos: chanDecl.Pos(), + End: chanDecl.End(), + NewText: buf.Bytes(), + }}, + }}, + }) + }) + return nil, nil +} + +func isSignalNotify(info *types.Info, call *ast.CallExpr) bool { + check := func(id *ast.Ident) bool { + obj := info.ObjectOf(id) + return obj.Name() == "Notify" && obj.Pkg().Path() == "os/signal" + } + switch fun := call.Fun.(type) { + case *ast.SelectorExpr: + return check(fun.Sel) + case *ast.Ident: + if fun, ok := findDecl(fun).(*ast.SelectorExpr); ok { + return check(fun.Sel) + } + return false + default: + return false + } +} + +func findDecl(arg *ast.Ident) ast.Node { + if arg.Obj == nil { + return nil + } + switch as := arg.Obj.Decl.(type) { + case *ast.AssignStmt: + if len(as.Lhs) != len(as.Rhs) { + return nil + } + for i, lhs := range as.Lhs { + lid, ok := lhs.(*ast.Ident) + if !ok { + continue + } + if lid.Obj == arg.Obj { + return as.Rhs[i] + } + } + case *ast.ValueSpec: + if len(as.Names) != len(as.Values) { + return nil + } + for i, name := range as.Names { + if name.Obj == arg.Obj { + return as.Values[i] + } + } + } + return nil +} + +func isBuiltinMake(info *types.Info, call *ast.CallExpr) bool { + typVal := info.Types[call.Fun] + if !typVal.IsBuiltin() { + return false + } + switch fun := call.Fun.(type) { + case *ast.Ident: + return info.ObjectOf(fun).Name() == "make" + default: + return false + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go b/vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go new file mode 100644 index 000000000..69a67939d --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/sortslice/analyzer.go @@ -0,0 +1,123 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sortslice defines an Analyzer that checks for calls +// to sort.Slice that do not use a slice type as first argument. +package sortslice + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +const Doc = `check the argument type of sort.Slice + +sort.Slice requires an argument of a slice type. Check that +the interface{} value passed to sort.Slice is actually a slice.` + +var Analyzer = &analysis.Analyzer{ + Name: "sortslice", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + fn, _ := typeutil.Callee(pass.TypesInfo, call).(*types.Func) + if fn == nil { + return + } + + if fn.FullName() != "sort.Slice" { + return + } + + arg := call.Args[0] + typ := pass.TypesInfo.Types[arg].Type + switch typ.Underlying().(type) { + case *types.Slice, *types.Interface: + return + } + + var fixes []analysis.SuggestedFix + switch v := typ.Underlying().(type) { + case *types.Array: + var buf bytes.Buffer + format.Node(&buf, pass.Fset, &ast.SliceExpr{ + X: arg, + Slice3: false, + Lbrack: arg.End() + 1, + Rbrack: arg.End() + 3, + }) + fixes = append(fixes, analysis.SuggestedFix{ + Message: "Get a slice of the full array", + TextEdits: []analysis.TextEdit{{ + Pos: arg.Pos(), + End: arg.End(), + NewText: buf.Bytes(), + }}, + }) + case *types.Pointer: + _, ok := v.Elem().Underlying().(*types.Slice) + if !ok { + break + } + var buf bytes.Buffer + format.Node(&buf, pass.Fset, &ast.StarExpr{ + X: arg, + }) + fixes = append(fixes, analysis.SuggestedFix{ + Message: "Dereference the pointer to the slice", + TextEdits: []analysis.TextEdit{{ + Pos: arg.Pos(), + End: arg.End(), + NewText: buf.Bytes(), + }}, + }) + case *types.Signature: + if v.Params().Len() != 0 || v.Results().Len() != 1 { + break + } + if _, ok := v.Results().At(0).Type().Underlying().(*types.Slice); !ok { + break + } + var buf bytes.Buffer + format.Node(&buf, pass.Fset, &ast.CallExpr{ + Fun: arg, + }) + fixes = append(fixes, analysis.SuggestedFix{ + Message: "Call the function", + TextEdits: []analysis.TextEdit{{ + Pos: arg.Pos(), + End: arg.End(), + NewText: buf.Bytes(), + }}, + }) + } + + pass.Report(analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fmt.Sprintf("sort.Slice's argument must be a slice; is called with %s", typ.String()), + SuggestedFixes: fixes, + }) + }) + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go new file mode 100644 index 000000000..64a28ac0b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go @@ -0,0 +1,204 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stdmethods defines an Analyzer that checks for misspellings +// in the signatures of methods similar to well-known interfaces. +package stdmethods + +import ( + "go/ast" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check signature of methods of well-known interfaces + +Sometimes a type may be intended to satisfy an interface but may fail to +do so because of a mistake in its method signature. +For example, the result of this WriteTo method should be (int64, error), +not error, to satisfy io.WriterTo: + + type myWriterTo struct{...} + func (myWriterTo) WriteTo(w io.Writer) error { ... } + +This check ensures that each method whose name matches one of several +well-known interface methods from the standard library has the correct +signature for that interface. + +Checked method names include: + Format GobEncode GobDecode MarshalJSON MarshalXML + Peek ReadByte ReadFrom ReadRune Scan Seek + UnmarshalJSON UnreadByte UnreadRune WriteByte + WriteTo +` + +var Analyzer = &analysis.Analyzer{ + Name: "stdmethods", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +// canonicalMethods lists the input and output types for Go methods +// that are checked using dynamic interface checks. Because the +// checks are dynamic, such methods would not cause a compile error +// if they have the wrong signature: instead the dynamic check would +// fail, sometimes mysteriously. If a method is found with a name listed +// here but not the input/output types listed here, vet complains. +// +// A few of the canonical methods have very common names. +// For example, a type might implement a Scan method that +// has nothing to do with fmt.Scanner, but we still want to check +// the methods that are intended to implement fmt.Scanner. +// To do that, the arguments that have a = prefix are treated as +// signals that the canonical meaning is intended: if a Scan +// method doesn't have a fmt.ScanState as its first argument, +// we let it go. But if it does have a fmt.ScanState, then the +// rest has to match. +var canonicalMethods = map[string]struct{ args, results []string }{ + "As": {[]string{"interface{}"}, []string{"bool"}}, // errors.As + // "Flush": {{}, {"error"}}, // http.Flusher and jpeg.writer conflict + "Format": {[]string{"=fmt.State", "rune"}, []string{}}, // fmt.Formatter + "GobDecode": {[]string{"[]byte"}, []string{"error"}}, // gob.GobDecoder + "GobEncode": {[]string{}, []string{"[]byte", "error"}}, // gob.GobEncoder + "Is": {[]string{"error"}, []string{"bool"}}, // errors.Is + "MarshalJSON": {[]string{}, []string{"[]byte", "error"}}, // json.Marshaler + "MarshalXML": {[]string{"*xml.Encoder", "xml.StartElement"}, []string{"error"}}, // xml.Marshaler + "ReadByte": {[]string{}, []string{"byte", "error"}}, // io.ByteReader + "ReadFrom": {[]string{"=io.Reader"}, []string{"int64", "error"}}, // io.ReaderFrom + "ReadRune": {[]string{}, []string{"rune", "int", "error"}}, // io.RuneReader + "Scan": {[]string{"=fmt.ScanState", "rune"}, []string{"error"}}, // fmt.Scanner + "Seek": {[]string{"=int64", "int"}, []string{"int64", "error"}}, // io.Seeker + "UnmarshalJSON": {[]string{"[]byte"}, []string{"error"}}, // json.Unmarshaler + "UnmarshalXML": {[]string{"*xml.Decoder", "xml.StartElement"}, []string{"error"}}, // xml.Unmarshaler + "UnreadByte": {[]string{}, []string{"error"}}, + "UnreadRune": {[]string{}, []string{"error"}}, + "Unwrap": {[]string{}, []string{"error"}}, // errors.Unwrap + "WriteByte": {[]string{"byte"}, []string{"error"}}, // jpeg.writer (matching bufio.Writer) + "WriteTo": {[]string{"=io.Writer"}, []string{"int64", "error"}}, // io.WriterTo +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.InterfaceType)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.FuncDecl: + if n.Recv != nil { + canonicalMethod(pass, n.Name) + } + case *ast.InterfaceType: + for _, field := range n.Methods.List { + for _, id := range field.Names { + canonicalMethod(pass, id) + } + } + } + }) + return nil, nil +} + +func canonicalMethod(pass *analysis.Pass, id *ast.Ident) { + // Expected input/output. + expect, ok := canonicalMethods[id.Name] + if !ok { + return + } + + // Actual input/output + sign := pass.TypesInfo.Defs[id].Type().(*types.Signature) + args := sign.Params() + results := sign.Results() + + // Special case: WriteTo with more than one argument, + // not trying at all to implement io.WriterTo, + // comes up often enough to skip. + if id.Name == "WriteTo" && args.Len() > 1 { + return + } + + // Special case: Is, As and Unwrap only apply when type + // implements error. + if id.Name == "Is" || id.Name == "As" || id.Name == "Unwrap" { + if recv := sign.Recv(); recv == nil || !implementsError(recv.Type()) { + return + } + } + + // Do the =s (if any) all match? + if !matchParams(pass, expect.args, args, "=") || !matchParams(pass, expect.results, results, "=") { + return + } + + // Everything must match. + if !matchParams(pass, expect.args, args, "") || !matchParams(pass, expect.results, results, "") { + expectFmt := id.Name + "(" + argjoin(expect.args) + ")" + if len(expect.results) == 1 { + expectFmt += " " + argjoin(expect.results) + } else if len(expect.results) > 1 { + expectFmt += " (" + argjoin(expect.results) + ")" + } + + actual := typeString(sign) + actual = strings.TrimPrefix(actual, "func") + actual = id.Name + actual + + pass.ReportRangef(id, "method %s should have signature %s", actual, expectFmt) + } +} + +func typeString(typ types.Type) string { + return types.TypeString(typ, (*types.Package).Name) +} + +func argjoin(x []string) string { + y := make([]string, len(x)) + for i, s := range x { + if s[0] == '=' { + s = s[1:] + } + y[i] = s + } + return strings.Join(y, ", ") +} + +// Does each type in expect with the given prefix match the corresponding type in actual? +func matchParams(pass *analysis.Pass, expect []string, actual *types.Tuple, prefix string) bool { + for i, x := range expect { + if !strings.HasPrefix(x, prefix) { + continue + } + if i >= actual.Len() { + return false + } + if !matchParamType(x, actual.At(i).Type()) { + return false + } + } + if prefix == "" && actual.Len() > len(expect) { + return false + } + return true +} + +// Does this one type match? +func matchParamType(expect string, actual types.Type) bool { + expect = strings.TrimPrefix(expect, "=") + // Overkill but easy. + return typeString(actual) == expect +} + +var errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + +func implementsError(actual types.Type) bool { + return types.Implements(actual, errorType) +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go new file mode 100644 index 000000000..7a005901e --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go @@ -0,0 +1,126 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stringintconv defines an Analyzer that flags type conversions +// from integers to strings. +package stringintconv + +import ( + "fmt" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check for string(int) conversions + +This checker flags conversions of the form string(x) where x is an integer +(but not byte or rune) type. Such conversions are discouraged because they +return the UTF-8 representation of the Unicode code point x, and not a decimal +string representation of x as one might expect. Furthermore, if x denotes an +invalid code point, the conversion cannot be statically rejected. + +For conversions that intend on using the code point, consider replacing them +with string(rune(x)). Otherwise, strconv.Itoa and its equivalents return the +string representation of the value in the desired base. +` + +var Analyzer = &analysis.Analyzer{ + Name: "stringintconv", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func typeName(typ types.Type) string { + if v, _ := typ.(interface{ Name() string }); v != nil { + return v.Name() + } + if v, _ := typ.(interface{ Obj() *types.TypeName }); v != nil { + return v.Obj().Name() + } + return "" +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + + // Retrieve target type name. + var tname *types.TypeName + switch fun := call.Fun.(type) { + case *ast.Ident: + tname, _ = pass.TypesInfo.Uses[fun].(*types.TypeName) + case *ast.SelectorExpr: + tname, _ = pass.TypesInfo.Uses[fun.Sel].(*types.TypeName) + } + if tname == nil { + return + } + target := tname.Name() + + // Check that target type T in T(v) has an underlying type of string. + T, _ := tname.Type().Underlying().(*types.Basic) + if T == nil || T.Kind() != types.String { + return + } + if s := T.Name(); target != s { + target += " (" + s + ")" + } + + // Check that type V of v has an underlying integral type that is not byte or rune. + if len(call.Args) != 1 { + return + } + v := call.Args[0] + vtyp := pass.TypesInfo.TypeOf(v) + V, _ := vtyp.Underlying().(*types.Basic) + if V == nil || V.Info()&types.IsInteger == 0 { + return + } + switch V.Kind() { + case types.Byte, types.Rune, types.UntypedRune: + return + } + + // Retrieve source type name. + source := typeName(vtyp) + if source == "" { + return + } + if s := V.Name(); source != s { + source += " (" + s + ")" + } + diag := analysis.Diagnostic{ + Pos: n.Pos(), + Message: fmt.Sprintf("conversion from %s to %s yields a string of one rune, not a string of digits (did you mean fmt.Sprint(x)?)", source, target), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Did you mean to convert a rune to a string?", + TextEdits: []analysis.TextEdit{ + { + Pos: v.Pos(), + End: v.Pos(), + NewText: []byte("rune("), + }, + { + Pos: v.End(), + End: v.End(), + NewText: []byte(")"), + }, + }, + }, + }, + } + pass.Report(diag) + }) + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go new file mode 100644 index 000000000..f0b15051c --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go @@ -0,0 +1,313 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package structtag defines an Analyzer that checks struct field tags +// are well formed. +package structtag + +import ( + "errors" + "go/ast" + "go/token" + "go/types" + "path/filepath" + "reflect" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check that struct field tags conform to reflect.StructTag.Get + +Also report certain struct tags (json, xml) used with unexported fields.` + +var Analyzer = &analysis.Analyzer{ + Name: "structtag", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + RunDespiteErrors: true, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + styp, ok := pass.TypesInfo.Types[n.(*ast.StructType)].Type.(*types.Struct) + // Type information may be incomplete. + if !ok { + return + } + var seen namesSeen + for i := 0; i < styp.NumFields(); i++ { + field := styp.Field(i) + tag := styp.Tag(i) + checkCanonicalFieldTag(pass, field, tag, &seen) + } + }) + return nil, nil +} + +// namesSeen keeps track of encoding tags by their key, name, and nested level +// from the initial struct. The level is taken into account because equal +// encoding key names only conflict when at the same level; otherwise, the lower +// level shadows the higher level. +type namesSeen map[uniqueName]token.Pos + +type uniqueName struct { + key string // "xml" or "json" + name string // the encoding name + level int // anonymous struct nesting level +} + +func (s *namesSeen) Get(key, name string, level int) (token.Pos, bool) { + if *s == nil { + *s = make(map[uniqueName]token.Pos) + } + pos, ok := (*s)[uniqueName{key, name, level}] + return pos, ok +} + +func (s *namesSeen) Set(key, name string, level int, pos token.Pos) { + if *s == nil { + *s = make(map[uniqueName]token.Pos) + } + (*s)[uniqueName{key, name, level}] = pos +} + +var checkTagDups = []string{"json", "xml"} +var checkTagSpaces = map[string]bool{"json": true, "xml": true, "asn1": true} + +// checkCanonicalFieldTag checks a single struct field tag. +func checkCanonicalFieldTag(pass *analysis.Pass, field *types.Var, tag string, seen *namesSeen) { + switch pass.Pkg.Path() { + case "encoding/json", "encoding/xml": + // These packages know how to use their own APIs. + // Sometimes they are testing what happens to incorrect programs. + return + } + + for _, key := range checkTagDups { + checkTagDuplicates(pass, tag, key, field, field, seen, 1) + } + + if err := validateStructTag(tag); err != nil { + pass.Reportf(field.Pos(), "struct field tag %#q not compatible with reflect.StructTag.Get: %s", tag, err) + } + + // Check for use of json or xml tags with unexported fields. + + // Embedded struct. Nothing to do for now, but that + // may change, depending on what happens with issue 7363. + // TODO(adonovan): investigate, now that that issue is fixed. + if field.Anonymous() { + return + } + + if field.Exported() { + return + } + + for _, enc := range [...]string{"json", "xml"} { + switch reflect.StructTag(tag).Get(enc) { + // Ignore warning if the field not exported and the tag is marked as + // ignored. + case "", "-": + default: + pass.Reportf(field.Pos(), "struct field %s has %s tag but is not exported", field.Name(), enc) + return + } + } +} + +// checkTagDuplicates checks a single struct field tag to see if any tags are +// duplicated. nearest is the field that's closest to the field being checked, +// while still being part of the top-level struct type. +func checkTagDuplicates(pass *analysis.Pass, tag, key string, nearest, field *types.Var, seen *namesSeen, level int) { + val := reflect.StructTag(tag).Get(key) + if val == "-" { + // Ignored, even if the field is anonymous. + return + } + if val == "" || val[0] == ',' { + if !field.Anonymous() { + // Ignored if the field isn't anonymous. + return + } + typ, ok := field.Type().Underlying().(*types.Struct) + if !ok { + return + } + for i := 0; i < typ.NumFields(); i++ { + field := typ.Field(i) + if !field.Exported() { + continue + } + tag := typ.Tag(i) + checkTagDuplicates(pass, tag, key, nearest, field, seen, level+1) + } + return + } + if key == "xml" && field.Name() == "XMLName" { + // XMLName defines the XML element name of the struct being + // checked. That name cannot collide with element or attribute + // names defined on other fields of the struct. Vet does not have a + // check for untagged fields of type struct defining their own name + // by containing a field named XMLName; see issue 18256. + return + } + if i := strings.Index(val, ","); i >= 0 { + if key == "xml" { + // Use a separate namespace for XML attributes. + for _, opt := range strings.Split(val[i:], ",") { + if opt == "attr" { + key += " attribute" // Key is part of the error message. + break + } + } + } + val = val[:i] + } + if pos, ok := seen.Get(key, val, level); ok { + alsoPos := pass.Fset.Position(pos) + alsoPos.Column = 0 + + // Make the "also at" position relative to the current position, + // to ensure that all warnings are unambiguous and correct. For + // example, via anonymous struct fields, it's possible for the + // two fields to be in different packages and directories. + thisPos := pass.Fset.Position(field.Pos()) + rel, err := filepath.Rel(filepath.Dir(thisPos.Filename), alsoPos.Filename) + if err != nil { + // Possibly because the paths are relative; leave the + // filename alone. + } else { + alsoPos.Filename = rel + } + + pass.Reportf(nearest.Pos(), "struct field %s repeats %s tag %q also at %s", field.Name(), key, val, alsoPos) + } else { + seen.Set(key, val, level, field.Pos()) + } +} + +var ( + errTagSyntax = errors.New("bad syntax for struct tag pair") + errTagKeySyntax = errors.New("bad syntax for struct tag key") + errTagValueSyntax = errors.New("bad syntax for struct tag value") + errTagValueSpace = errors.New("suspicious space in struct tag value") + errTagSpace = errors.New("key:\"value\" pairs not separated by spaces") +) + +// validateStructTag parses the struct tag and returns an error if it is not +// in the canonical format, which is a space-separated list of key:"value" +// settings. The value may contain spaces. +func validateStructTag(tag string) error { + // This code is based on the StructTag.Get code in package reflect. + + n := 0 + for ; tag != ""; n++ { + if n > 0 && tag != "" && tag[0] != ' ' { + // More restrictive than reflect, but catches likely mistakes + // like `x:"foo",y:"bar"`, which parses as `x:"foo" ,y:"bar"` with second key ",y". + return errTagSpace + } + // Skip leading space. + i := 0 + for i < len(tag) && tag[i] == ' ' { + i++ + } + tag = tag[i:] + if tag == "" { + break + } + + // Scan to colon. A space, a quote or a control character is a syntax error. + // Strictly speaking, control chars include the range [0x7f, 0x9f], not just + // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters + // as it is simpler to inspect the tag's bytes than the tag's runes. + i = 0 + for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { + i++ + } + if i == 0 { + return errTagKeySyntax + } + if i+1 >= len(tag) || tag[i] != ':' { + return errTagSyntax + } + if tag[i+1] != '"' { + return errTagValueSyntax + } + key := tag[:i] + tag = tag[i+1:] + + // Scan quoted string to find value. + i = 1 + for i < len(tag) && tag[i] != '"' { + if tag[i] == '\\' { + i++ + } + i++ + } + if i >= len(tag) { + return errTagValueSyntax + } + qvalue := tag[:i+1] + tag = tag[i+1:] + + value, err := strconv.Unquote(qvalue) + if err != nil { + return errTagValueSyntax + } + + if !checkTagSpaces[key] { + continue + } + + switch key { + case "xml": + // If the first or last character in the XML tag is a space, it is + // suspicious. + if strings.Trim(value, " ") != value { + return errTagValueSpace + } + + // If there are multiple spaces, they are suspicious. + if strings.Count(value, " ") > 1 { + return errTagValueSpace + } + + // If there is no comma, skip the rest of the checks. + comma := strings.IndexRune(value, ',') + if comma < 0 { + continue + } + + // If the character before a comma is a space, this is suspicious. + if comma > 0 && value[comma-1] == ' ' { + return errTagValueSpace + } + value = value[comma+1:] + case "json": + // JSON allows using spaces in the name, so skip it. + comma := strings.IndexRune(value, ',') + if comma < 0 { + continue + } + value = value[comma+1:] + } + + if strings.IndexByte(value, ' ') >= 0 { + return errTagValueSpace + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go new file mode 100644 index 000000000..d2b9a5640 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go @@ -0,0 +1,154 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testinggoroutine + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `report calls to (*testing.T).Fatal from goroutines started by a test. + +Functions that abruptly terminate a test, such as the Fatal, Fatalf, FailNow, and +Skip{,f,Now} methods of *testing.T, must be called from the test goroutine itself. +This checker detects calls to these functions that occur within a goroutine +started by the test. For example: + +func TestFoo(t *testing.T) { + go func() { + t.Fatal("oops") // error: (*T).Fatal called from non-test goroutine + }() +} +` + +var Analyzer = &analysis.Analyzer{ + Name: "testinggoroutine", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +var forbidden = map[string]bool{ + "FailNow": true, + "Fatal": true, + "Fatalf": true, + "Skip": true, + "Skipf": true, + "SkipNow": true, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + if !analysisutil.Imports(pass.Pkg, "testing") { + return nil, nil + } + + // Filter out anything that isn't a function declaration. + onlyFuncs := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + inspect.Nodes(onlyFuncs, func(node ast.Node, push bool) bool { + fnDecl, ok := node.(*ast.FuncDecl) + if !ok { + return false + } + + if !hasBenchmarkOrTestParams(fnDecl) { + return false + } + + // Now traverse the benchmark/test's body and check that none of the + // forbidden methods are invoked in the goroutines within the body. + ast.Inspect(fnDecl, func(n ast.Node) bool { + goStmt, ok := n.(*ast.GoStmt) + if !ok { + return true + } + + checkGoStmt(pass, goStmt) + + // No need to further traverse the GoStmt since right + // above we manually traversed it in the ast.Inspect(goStmt, ...) + return false + }) + + return false + }) + + return nil, nil +} + +func hasBenchmarkOrTestParams(fnDecl *ast.FuncDecl) bool { + // Check that the function's arguments include "*testing.T" or "*testing.B". + params := fnDecl.Type.Params.List + + for _, param := range params { + if _, ok := typeIsTestingDotTOrB(param.Type); ok { + return true + } + } + + return false +} + +func typeIsTestingDotTOrB(expr ast.Expr) (string, bool) { + starExpr, ok := expr.(*ast.StarExpr) + if !ok { + return "", false + } + selExpr, ok := starExpr.X.(*ast.SelectorExpr) + if !ok { + return "", false + } + + varPkg := selExpr.X.(*ast.Ident) + if varPkg.Name != "testing" { + return "", false + } + + varTypeName := selExpr.Sel.Name + ok = varTypeName == "B" || varTypeName == "T" + return varTypeName, ok +} + +// checkGoStmt traverses the goroutine and checks for the +// use of the forbidden *testing.(B, T) methods. +func checkGoStmt(pass *analysis.Pass, goStmt *ast.GoStmt) { + // Otherwise examine the goroutine to check for the forbidden methods. + ast.Inspect(goStmt, func(n ast.Node) bool { + selExpr, ok := n.(*ast.SelectorExpr) + if !ok { + return true + } + + _, bad := forbidden[selExpr.Sel.Name] + if !bad { + return true + } + + // Now filter out false positives by the import-path/type. + ident, ok := selExpr.X.(*ast.Ident) + if !ok { + return true + } + if ident.Obj == nil || ident.Obj.Decl == nil { + return true + } + field, ok := ident.Obj.Decl.(*ast.Field) + if !ok { + return true + } + if typeName, ok := typeIsTestingDotTOrB(field.Type); ok { + pass.ReportRangef(selExpr, "call to (*%s).%s from a non-test goroutine", typeName, selExpr.Sel) + } + return true + }) +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go new file mode 100644 index 000000000..823227618 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go @@ -0,0 +1,188 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tests defines an Analyzer that checks for common mistaken +// usages of tests and examples. +package tests + +import ( + "go/ast" + "go/types" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" +) + +const Doc = `check for common mistaken usages of tests and examples + +The tests checker walks Test, Benchmark and Example functions checking +malformed names, wrong signatures and examples documenting non-existent +identifiers. + +Please see the documentation for package testing in golang.org/pkg/testing +for the conventions that are enforced for Tests, Benchmarks, and Examples.` + +var Analyzer = &analysis.Analyzer{ + Name: "tests", + Doc: Doc, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + for _, f := range pass.Files { + if !strings.HasSuffix(pass.Fset.File(f.Pos()).Name(), "_test.go") { + continue + } + for _, decl := range f.Decls { + fn, ok := decl.(*ast.FuncDecl) + if !ok || fn.Recv != nil { + // Ignore non-functions or functions with receivers. + continue + } + + switch { + case strings.HasPrefix(fn.Name.Name, "Example"): + checkExample(pass, fn) + case strings.HasPrefix(fn.Name.Name, "Test"): + checkTest(pass, fn, "Test") + case strings.HasPrefix(fn.Name.Name, "Benchmark"): + checkTest(pass, fn, "Benchmark") + } + } + } + return nil, nil +} + +func isExampleSuffix(s string) bool { + r, size := utf8.DecodeRuneInString(s) + return size > 0 && unicode.IsLower(r) +} + +func isTestSuffix(name string) bool { + if len(name) == 0 { + // "Test" is ok. + return true + } + r, _ := utf8.DecodeRuneInString(name) + return !unicode.IsLower(r) +} + +func isTestParam(typ ast.Expr, wantType string) bool { + ptr, ok := typ.(*ast.StarExpr) + if !ok { + // Not a pointer. + return false + } + // No easy way of making sure it's a *testing.T or *testing.B: + // ensure the name of the type matches. + if name, ok := ptr.X.(*ast.Ident); ok { + return name.Name == wantType + } + if sel, ok := ptr.X.(*ast.SelectorExpr); ok { + return sel.Sel.Name == wantType + } + return false +} + +func lookup(pkg *types.Package, name string) []types.Object { + if o := pkg.Scope().Lookup(name); o != nil { + return []types.Object{o} + } + + var ret []types.Object + // Search through the imports to see if any of them define name. + // It's hard to tell in general which package is being tested, so + // for the purposes of the analysis, allow the object to appear + // in any of the imports. This guarantees there are no false positives + // because the example needs to use the object so it must be defined + // in the package or one if its imports. On the other hand, false + // negatives are possible, but should be rare. + for _, imp := range pkg.Imports() { + if obj := imp.Scope().Lookup(name); obj != nil { + ret = append(ret, obj) + } + } + return ret +} + +func checkExample(pass *analysis.Pass, fn *ast.FuncDecl) { + fnName := fn.Name.Name + if params := fn.Type.Params; len(params.List) != 0 { + pass.Reportf(fn.Pos(), "%s should be niladic", fnName) + } + if results := fn.Type.Results; results != nil && len(results.List) != 0 { + pass.Reportf(fn.Pos(), "%s should return nothing", fnName) + } + + if fnName == "Example" { + // Nothing more to do. + return + } + + var ( + exName = strings.TrimPrefix(fnName, "Example") + elems = strings.SplitN(exName, "_", 3) + ident = elems[0] + objs = lookup(pass.Pkg, ident) + ) + if ident != "" && len(objs) == 0 { + // Check ExampleFoo and ExampleBadFoo. + pass.Reportf(fn.Pos(), "%s refers to unknown identifier: %s", fnName, ident) + // Abort since obj is absent and no subsequent checks can be performed. + return + } + if len(elems) < 2 { + // Nothing more to do. + return + } + + if ident == "" { + // Check Example_suffix and Example_BadSuffix. + if residual := strings.TrimPrefix(exName, "_"); !isExampleSuffix(residual) { + pass.Reportf(fn.Pos(), "%s has malformed example suffix: %s", fnName, residual) + } + return + } + + mmbr := elems[1] + if !isExampleSuffix(mmbr) { + // Check ExampleFoo_Method and ExampleFoo_BadMethod. + found := false + // Check if Foo.Method exists in this package or its imports. + for _, obj := range objs { + if obj, _, _ := types.LookupFieldOrMethod(obj.Type(), true, obj.Pkg(), mmbr); obj != nil { + found = true + break + } + } + if !found { + pass.Reportf(fn.Pos(), "%s refers to unknown field or method: %s.%s", fnName, ident, mmbr) + } + } + if len(elems) == 3 && !isExampleSuffix(elems[2]) { + // Check ExampleFoo_Method_suffix and ExampleFoo_Method_Badsuffix. + pass.Reportf(fn.Pos(), "%s has malformed example suffix: %s", fnName, elems[2]) + } +} + +func checkTest(pass *analysis.Pass, fn *ast.FuncDecl, prefix string) { + // Want functions with 0 results and 1 parameter. + if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 || + fn.Type.Params == nil || + len(fn.Type.Params.List) != 1 || + len(fn.Type.Params.List[0].Names) > 1 { + return + } + + // The param must look like a *testing.T or *testing.B. + if !isTestParam(fn.Type.Params.List[0].Type, prefix[:1]) { + return + } + + if !isTestSuffix(fn.Name.Name[len(prefix):]) { + pass.Reportf(fn.Pos(), "%s has malformed name: first letter after '%s' must not be lowercase", fn.Name.Name, prefix) + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go new file mode 100644 index 000000000..92b37caff --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go @@ -0,0 +1,100 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The unmarshal package defines an Analyzer that checks for passing +// non-pointer or non-interface types to unmarshal and decode functions. +package unmarshal + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +const Doc = `report passing non-pointer or non-interface values to unmarshal + +The unmarshal analysis reports calls to functions such as json.Unmarshal +in which the argument type is not a pointer or an interface.` + +var Analyzer = &analysis.Analyzer{ + Name: "unmarshal", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + switch pass.Pkg.Path() { + case "encoding/gob", "encoding/json", "encoding/xml", "encoding/asn1": + // These packages know how to use their own APIs. + // Sometimes they are testing what happens to incorrect programs. + return nil, nil + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + fn := typeutil.StaticCallee(pass.TypesInfo, call) + if fn == nil { + return // not a static call + } + + // Classify the callee (without allocating memory). + argidx := -1 + recv := fn.Type().(*types.Signature).Recv() + if fn.Name() == "Unmarshal" && recv == nil { + // "encoding/json".Unmarshal + // "encoding/xml".Unmarshal + // "encoding/asn1".Unmarshal + switch fn.Pkg().Path() { + case "encoding/json", "encoding/xml", "encoding/asn1": + argidx = 1 // func([]byte, interface{}) + } + } else if fn.Name() == "Decode" && recv != nil { + // (*"encoding/json".Decoder).Decode + // (* "encoding/gob".Decoder).Decode + // (* "encoding/xml".Decoder).Decode + t := recv.Type() + if ptr, ok := t.(*types.Pointer); ok { + t = ptr.Elem() + } + tname := t.(*types.Named).Obj() + if tname.Name() == "Decoder" { + switch tname.Pkg().Path() { + case "encoding/json", "encoding/xml", "encoding/gob": + argidx = 0 // func(interface{}) + } + } + } + if argidx < 0 { + return // not a function we are interested in + } + + if len(call.Args) < argidx+1 { + return // not enough arguments, e.g. called with return values of another function + } + + t := pass.TypesInfo.Types[call.Args[argidx]].Type + switch t.Underlying().(type) { + case *types.Pointer, *types.Interface: + return + } + + switch argidx { + case 0: + pass.Reportf(call.Lparen, "call of %s passes non-pointer", fn.Name()) + case 1: + pass.Reportf(call.Lparen, "call of %s passes non-pointer as second argument", fn.Name()) + } + }) + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go new file mode 100644 index 000000000..90896dd1b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go @@ -0,0 +1,325 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unreachable defines an Analyzer that checks for unreachable code. +package unreachable + +// TODO(adonovan): use the new cfg package, which is more precise. + +import ( + "go/ast" + "go/token" + "log" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check for unreachable code + +The unreachable analyzer finds statements that execution can never reach +because they are preceded by an return statement, a call to panic, an +infinite loop, or similar constructs.` + +var Analyzer = &analysis.Analyzer{ + Name: "unreachable", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + RunDespiteErrors: true, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + var body *ast.BlockStmt + switch n := n.(type) { + case *ast.FuncDecl: + body = n.Body + case *ast.FuncLit: + body = n.Body + } + if body == nil { + return + } + d := &deadState{ + pass: pass, + hasBreak: make(map[ast.Stmt]bool), + hasGoto: make(map[string]bool), + labels: make(map[string]ast.Stmt), + } + d.findLabels(body) + d.reachable = true + d.findDead(body) + }) + return nil, nil +} + +type deadState struct { + pass *analysis.Pass + hasBreak map[ast.Stmt]bool + hasGoto map[string]bool + labels map[string]ast.Stmt + breakTarget ast.Stmt + + reachable bool +} + +// findLabels gathers information about the labels defined and used by stmt +// and about which statements break, whether a label is involved or not. +func (d *deadState) findLabels(stmt ast.Stmt) { + switch x := stmt.(type) { + default: + log.Fatalf("%s: internal error in findLabels: unexpected statement %T", d.pass.Fset.Position(x.Pos()), x) + + case *ast.AssignStmt, + *ast.BadStmt, + *ast.DeclStmt, + *ast.DeferStmt, + *ast.EmptyStmt, + *ast.ExprStmt, + *ast.GoStmt, + *ast.IncDecStmt, + *ast.ReturnStmt, + *ast.SendStmt: + // no statements inside + + case *ast.BlockStmt: + for _, stmt := range x.List { + d.findLabels(stmt) + } + + case *ast.BranchStmt: + switch x.Tok { + case token.GOTO: + if x.Label != nil { + d.hasGoto[x.Label.Name] = true + } + + case token.BREAK: + stmt := d.breakTarget + if x.Label != nil { + stmt = d.labels[x.Label.Name] + } + if stmt != nil { + d.hasBreak[stmt] = true + } + } + + case *ast.IfStmt: + d.findLabels(x.Body) + if x.Else != nil { + d.findLabels(x.Else) + } + + case *ast.LabeledStmt: + d.labels[x.Label.Name] = x.Stmt + d.findLabels(x.Stmt) + + // These cases are all the same, but the x.Body only works + // when the specific type of x is known, so the cases cannot + // be merged. + case *ast.ForStmt: + outer := d.breakTarget + d.breakTarget = x + d.findLabels(x.Body) + d.breakTarget = outer + + case *ast.RangeStmt: + outer := d.breakTarget + d.breakTarget = x + d.findLabels(x.Body) + d.breakTarget = outer + + case *ast.SelectStmt: + outer := d.breakTarget + d.breakTarget = x + d.findLabels(x.Body) + d.breakTarget = outer + + case *ast.SwitchStmt: + outer := d.breakTarget + d.breakTarget = x + d.findLabels(x.Body) + d.breakTarget = outer + + case *ast.TypeSwitchStmt: + outer := d.breakTarget + d.breakTarget = x + d.findLabels(x.Body) + d.breakTarget = outer + + case *ast.CommClause: + for _, stmt := range x.Body { + d.findLabels(stmt) + } + + case *ast.CaseClause: + for _, stmt := range x.Body { + d.findLabels(stmt) + } + } +} + +// findDead walks the statement looking for dead code. +// If d.reachable is false on entry, stmt itself is dead. +// When findDead returns, d.reachable tells whether the +// statement following stmt is reachable. +func (d *deadState) findDead(stmt ast.Stmt) { + // Is this a labeled goto target? + // If so, assume it is reachable due to the goto. + // This is slightly conservative, in that we don't + // check that the goto is reachable, so + // L: goto L + // will not provoke a warning. + // But it's good enough. + if x, isLabel := stmt.(*ast.LabeledStmt); isLabel && d.hasGoto[x.Label.Name] { + d.reachable = true + } + + if !d.reachable { + switch stmt.(type) { + case *ast.EmptyStmt: + // do not warn about unreachable empty statements + default: + d.pass.Report(analysis.Diagnostic{ + Pos: stmt.Pos(), + End: stmt.End(), + Message: "unreachable code", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Remove", + TextEdits: []analysis.TextEdit{{ + Pos: stmt.Pos(), + End: stmt.End(), + }}, + }}, + }) + d.reachable = true // silence error about next statement + } + } + + switch x := stmt.(type) { + default: + log.Fatalf("%s: internal error in findDead: unexpected statement %T", d.pass.Fset.Position(x.Pos()), x) + + case *ast.AssignStmt, + *ast.BadStmt, + *ast.DeclStmt, + *ast.DeferStmt, + *ast.EmptyStmt, + *ast.GoStmt, + *ast.IncDecStmt, + *ast.SendStmt: + // no control flow + + case *ast.BlockStmt: + for _, stmt := range x.List { + d.findDead(stmt) + } + + case *ast.BranchStmt: + switch x.Tok { + case token.BREAK, token.GOTO, token.FALLTHROUGH: + d.reachable = false + case token.CONTINUE: + // NOTE: We accept "continue" statements as terminating. + // They are not necessary in the spec definition of terminating, + // because a continue statement cannot be the final statement + // before a return. But for the more general problem of syntactically + // identifying dead code, continue redirects control flow just + // like the other terminating statements. + d.reachable = false + } + + case *ast.ExprStmt: + // Call to panic? + call, ok := x.X.(*ast.CallExpr) + if ok { + name, ok := call.Fun.(*ast.Ident) + if ok && name.Name == "panic" && name.Obj == nil { + d.reachable = false + } + } + + case *ast.ForStmt: + d.findDead(x.Body) + d.reachable = x.Cond != nil || d.hasBreak[x] + + case *ast.IfStmt: + d.findDead(x.Body) + if x.Else != nil { + r := d.reachable + d.reachable = true + d.findDead(x.Else) + d.reachable = d.reachable || r + } else { + // might not have executed if statement + d.reachable = true + } + + case *ast.LabeledStmt: + d.findDead(x.Stmt) + + case *ast.RangeStmt: + d.findDead(x.Body) + d.reachable = true + + case *ast.ReturnStmt: + d.reachable = false + + case *ast.SelectStmt: + // NOTE: Unlike switch and type switch below, we don't care + // whether a select has a default, because a select without a + // default blocks until one of the cases can run. That's different + // from a switch without a default, which behaves like it has + // a default with an empty body. + anyReachable := false + for _, comm := range x.Body.List { + d.reachable = true + for _, stmt := range comm.(*ast.CommClause).Body { + d.findDead(stmt) + } + anyReachable = anyReachable || d.reachable + } + d.reachable = anyReachable || d.hasBreak[x] + + case *ast.SwitchStmt: + anyReachable := false + hasDefault := false + for _, cas := range x.Body.List { + cc := cas.(*ast.CaseClause) + if cc.List == nil { + hasDefault = true + } + d.reachable = true + for _, stmt := range cc.Body { + d.findDead(stmt) + } + anyReachable = anyReachable || d.reachable + } + d.reachable = anyReachable || d.hasBreak[x] || !hasDefault + + case *ast.TypeSwitchStmt: + anyReachable := false + hasDefault := false + for _, cas := range x.Body.List { + cc := cas.(*ast.CaseClause) + if cc.List == nil { + hasDefault = true + } + d.reachable = true + for _, stmt := range cc.Body { + d.findDead(stmt) + } + anyReachable = anyReachable || d.reachable + } + d.reachable = anyReachable || d.hasBreak[x] || !hasDefault + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go new file mode 100644 index 000000000..ed86e5ebf --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go @@ -0,0 +1,168 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unsafeptr defines an Analyzer that checks for invalid +// conversions of uintptr to unsafe.Pointer. +package unsafeptr + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check for invalid conversions of uintptr to unsafe.Pointer + +The unsafeptr analyzer reports likely incorrect uses of unsafe.Pointer +to convert integers to pointers. A conversion from uintptr to +unsafe.Pointer is invalid if it implies that there is a uintptr-typed +word in memory that holds a pointer value, because that word will be +invisible to stack copying and to the garbage collector.` + +var Analyzer = &analysis.Analyzer{ + Name: "unsafeptr", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + (*ast.StarExpr)(nil), + (*ast.UnaryExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch x := n.(type) { + case *ast.CallExpr: + if len(x.Args) == 1 && + hasBasicType(pass.TypesInfo, x.Fun, types.UnsafePointer) && + hasBasicType(pass.TypesInfo, x.Args[0], types.Uintptr) && + !isSafeUintptr(pass.TypesInfo, x.Args[0]) { + pass.ReportRangef(x, "possible misuse of unsafe.Pointer") + } + case *ast.StarExpr: + if t := pass.TypesInfo.Types[x].Type; isReflectHeader(t) { + pass.ReportRangef(x, "possible misuse of %s", t) + } + case *ast.UnaryExpr: + if x.Op != token.AND { + return + } + if t := pass.TypesInfo.Types[x.X].Type; isReflectHeader(t) { + pass.ReportRangef(x, "possible misuse of %s", t) + } + } + }) + return nil, nil +} + +// isSafeUintptr reports whether x - already known to be a uintptr - +// is safe to convert to unsafe.Pointer. +func isSafeUintptr(info *types.Info, x ast.Expr) bool { + // Check unsafe.Pointer safety rules according to + // https://golang.org/pkg/unsafe/#Pointer. + + switch x := analysisutil.Unparen(x).(type) { + case *ast.SelectorExpr: + // "(6) Conversion of a reflect.SliceHeader or + // reflect.StringHeader Data field to or from Pointer." + if x.Sel.Name != "Data" { + break + } + // reflect.SliceHeader and reflect.StringHeader are okay, + // but only if they are pointing at a real slice or string. + // It's not okay to do: + // var x SliceHeader + // x.Data = uintptr(unsafe.Pointer(...)) + // ... use x ... + // p := unsafe.Pointer(x.Data) + // because in the middle the garbage collector doesn't + // see x.Data as a pointer and so x.Data may be dangling + // by the time we get to the conversion at the end. + // For now approximate by saying that *Header is okay + // but Header is not. + pt, ok := info.Types[x.X].Type.(*types.Pointer) + if ok && isReflectHeader(pt.Elem()) { + return true + } + + case *ast.CallExpr: + // "(5) Conversion of the result of reflect.Value.Pointer or + // reflect.Value.UnsafeAddr from uintptr to Pointer." + if len(x.Args) != 0 { + break + } + sel, ok := x.Fun.(*ast.SelectorExpr) + if !ok { + break + } + switch sel.Sel.Name { + case "Pointer", "UnsafeAddr": + t, ok := info.Types[sel.X].Type.(*types.Named) + if ok && t.Obj().Pkg().Path() == "reflect" && t.Obj().Name() == "Value" { + return true + } + } + } + + // "(3) Conversion of a Pointer to a uintptr and back, with arithmetic." + return isSafeArith(info, x) +} + +// isSafeArith reports whether x is a pointer arithmetic expression that is safe +// to convert to unsafe.Pointer. +func isSafeArith(info *types.Info, x ast.Expr) bool { + switch x := analysisutil.Unparen(x).(type) { + case *ast.CallExpr: + // Base case: initial conversion from unsafe.Pointer to uintptr. + return len(x.Args) == 1 && + hasBasicType(info, x.Fun, types.Uintptr) && + hasBasicType(info, x.Args[0], types.UnsafePointer) + + case *ast.BinaryExpr: + // "It is valid both to add and to subtract offsets from a + // pointer in this way. It is also valid to use &^ to round + // pointers, usually for alignment." + switch x.Op { + case token.ADD, token.SUB, token.AND_NOT: + // TODO(mdempsky): Match compiler + // semantics. ADD allows a pointer on either + // side; SUB and AND_NOT don't care about RHS. + return isSafeArith(info, x.X) && !isSafeArith(info, x.Y) + } + } + + return false +} + +// hasBasicType reports whether x's type is a types.Basic with the given kind. +func hasBasicType(info *types.Info, x ast.Expr, kind types.BasicKind) bool { + t := info.Types[x].Type + if t != nil { + t = t.Underlying() + } + b, ok := t.(*types.Basic) + return ok && b.Kind() == kind +} + +// isReflectHeader reports whether t is reflect.SliceHeader or reflect.StringHeader. +func isReflectHeader(t types.Type) bool { + if named, ok := t.(*types.Named); ok { + if obj := named.Obj(); obj.Pkg() != nil && obj.Pkg().Path() == "reflect" { + switch obj.Name() { + case "SliceHeader", "StringHeader": + return true + } + } + } + return false +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go new file mode 100644 index 000000000..bececee7e --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unusedresult defines an analyzer that checks for unused +// results of calls to certain pure functions. +package unusedresult + +import ( + "go/ast" + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" +) + +// TODO(adonovan): make this analysis modular: export a mustUseResult +// fact for each function that tail-calls one of the functions that we +// check, and check those functions too. + +const Doc = `check for unused results of calls to some functions + +Some functions like fmt.Errorf return a result and have no side effects, +so it is always a mistake to discard the result. This analyzer reports +calls to certain functions in which the result of the call is ignored. + +The set of functions may be controlled using flags.` + +var Analyzer = &analysis.Analyzer{ + Name: "unusedresult", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +// flags +var funcs, stringMethods stringSetFlag + +func init() { + // TODO(adonovan): provide a comment syntax to allow users to + // add their functions to this set using facts. + funcs.Set("errors.New,fmt.Errorf,fmt.Sprintf,fmt.Sprint,sort.Reverse,context.WithValue,context.WithCancel,context.WithDeadline,context.WithTimeout") + Analyzer.Flags.Var(&funcs, "funcs", + "comma-separated list of functions whose results must be used") + + stringMethods.Set("Error,String") + Analyzer.Flags.Var(&stringMethods, "stringmethods", + "comma-separated list of names of methods of type func() string whose results must be used") +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.ExprStmt)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call, ok := analysisutil.Unparen(n.(*ast.ExprStmt).X).(*ast.CallExpr) + if !ok { + return // not a call statement + } + fun := analysisutil.Unparen(call.Fun) + + if pass.TypesInfo.Types[fun].IsType() { + return // a conversion, not a call + } + + selector, ok := fun.(*ast.SelectorExpr) + if !ok { + return // neither a method call nor a qualified ident + } + + sel, ok := pass.TypesInfo.Selections[selector] + if ok && sel.Kind() == types.MethodVal { + // method (e.g. foo.String()) + obj := sel.Obj().(*types.Func) + sig := sel.Type().(*types.Signature) + if types.Identical(sig, sigNoArgsStringResult) { + if stringMethods[obj.Name()] { + pass.Reportf(call.Lparen, "result of (%s).%s call not used", + sig.Recv().Type(), obj.Name()) + } + } + } else if !ok { + // package-qualified function (e.g. fmt.Errorf) + obj := pass.TypesInfo.Uses[selector.Sel] + if obj, ok := obj.(*types.Func); ok { + qname := obj.Pkg().Path() + "." + obj.Name() + if funcs[qname] { + pass.Reportf(call.Lparen, "result of %v call not used", qname) + } + } + } + }) + return nil, nil +} + +// func() string +var sigNoArgsStringResult = types.NewSignature(nil, nil, + types.NewTuple(types.NewVar(token.NoPos, nil, "", types.Typ[types.String])), + false) + +type stringSetFlag map[string]bool + +func (ss *stringSetFlag) String() string { + var items []string + for item := range *ss { + items = append(items, item) + } + sort.Strings(items) + return strings.Join(items, ",") +} + +func (ss *stringSetFlag) Set(s string) error { + m := make(map[string]bool) // clobber previous value + if s != "" { + for _, name := range strings.Split(s, ",") { + if name == "" { + continue // TODO: report error? proceed? + } + m[name] = true + } + } + *ss = m + return nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go new file mode 100644 index 000000000..37a0e784b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go @@ -0,0 +1,184 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unusedwrite checks for unused writes to the elements of a struct or array object. +package unusedwrite + +import ( + "fmt" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" +) + +// Doc is a documentation string. +const Doc = `checks for unused writes + +The analyzer reports instances of writes to struct fields and +arrays that are never read. Specifically, when a struct object +or an array is copied, its elements are copied implicitly by +the compiler, and any element write to this copy does nothing +with the original object. + +For example: + + type T struct { x int } + func f(input []T) { + for i, v := range input { // v is a copy + v.x = i // unused write to field x + } + } + +Another example is about non-pointer receiver: + + type T struct { x int } + func (t T) f() { // t is a copy + t.x = i // unused write to field x + } +` + +// Analyzer reports instances of writes to struct fields and arrays +//that are never read. +var Analyzer = &analysis.Analyzer{ + Name: "unusedwrite", + Doc: Doc, + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + // Check the writes to struct and array objects. + checkStore := func(store *ssa.Store) { + // Consider field/index writes to an object whose elements are copied and not shared. + // MapUpdate is excluded since only the reference of the map is copied. + switch addr := store.Addr.(type) { + case *ssa.FieldAddr: + if isDeadStore(store, addr.X, addr) { + // Report the bug. + pass.Reportf(store.Pos(), + "unused write to field %s", + getFieldName(addr.X.Type(), addr.Field)) + } + case *ssa.IndexAddr: + if isDeadStore(store, addr.X, addr) { + // Report the bug. + pass.Reportf(store.Pos(), + "unused write to array index %s", addr.Index) + } + } + } + + ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + for _, fn := range ssainput.SrcFuncs { + // Visit each block. No need to visit fn.Recover. + for _, blk := range fn.Blocks { + for _, instr := range blk.Instrs { + // Identify writes. + if store, ok := instr.(*ssa.Store); ok { + checkStore(store) + } + } + } + } + return nil, nil +} + +// isDeadStore determines whether a field/index write to an object is dead. +// Argument "obj" is the object, and "addr" is the instruction fetching the field/index. +func isDeadStore(store *ssa.Store, obj ssa.Value, addr ssa.Instruction) bool { + // Consider only struct or array objects. + if !hasStructOrArrayType(obj) { + return false + } + // Check liveness: if the value is used later, then don't report the write. + for _, ref := range *obj.Referrers() { + if ref == store || ref == addr { + continue + } + switch ins := ref.(type) { + case ssa.CallInstruction: + return false + case *ssa.FieldAddr: + // Check whether the same field is used. + if ins.X == obj { + if faddr, ok := addr.(*ssa.FieldAddr); ok { + if faddr.Field == ins.Field { + return false + } + } + } + // Otherwise another field is used, and this usage doesn't count. + continue + case *ssa.IndexAddr: + if ins.X == obj { + return false + } + continue // Otherwise another object is used + case *ssa.Lookup: + if ins.X == obj { + return false + } + continue // Otherwise another object is used + case *ssa.Store: + if ins.Val == obj { + return false + } + continue // Otherwise other object is stored + default: // consider live if the object is used in any other instruction + return false + } + } + return true +} + +// isStructOrArray returns whether the underlying type is struct or array. +func isStructOrArray(tp types.Type) bool { + if named, ok := tp.(*types.Named); ok { + tp = named.Underlying() + } + switch tp.(type) { + case *types.Array: + return true + case *types.Struct: + return true + } + return false +} + +// hasStructOrArrayType returns whether a value is of struct or array type. +func hasStructOrArrayType(v ssa.Value) bool { + if instr, ok := v.(ssa.Instruction); ok { + if alloc, ok := instr.(*ssa.Alloc); ok { + // Check the element type of an allocated register (which always has pointer type) + // e.g., for + // func (t T) f() { ...} + // the receiver object is of type *T: + // t0 = local T (t) *T + if tp, ok := alloc.Type().(*types.Pointer); ok { + return isStructOrArray(tp.Elem()) + } + return false + } + } + return isStructOrArray(v.Type()) +} + +// getFieldName returns the name of a field in a struct. +// It the field is not found, then it returns the string format of the index. +// +// For example, for struct T {x int, y int), getFieldName(*T, 1) returns "y". +func getFieldName(tp types.Type, index int) string { + if pt, ok := tp.(*types.Pointer); ok { + tp = pt.Elem() + } + if named, ok := tp.(*types.Named); ok { + tp = named.Underlying() + } + if stp, ok := tp.(*types.Struct); ok { + return stp.Field(index).Name() + } + return fmt.Sprintf("%d", index) +} diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go new file mode 100644 index 000000000..23e57bf02 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/validate.go @@ -0,0 +1,130 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysis + +import ( + "fmt" + "reflect" + "strings" + "unicode" +) + +// Validate reports an error if any of the analyzers are misconfigured. +// Checks include: +// that the name is a valid identifier; +// that the Requires graph is acyclic; +// that analyzer fact types are unique; +// that each fact type is a pointer. +func Validate(analyzers []*Analyzer) error { + // Map each fact type to its sole generating analyzer. + factTypes := make(map[reflect.Type]*Analyzer) + + // Traverse the Requires graph, depth first. + const ( + white = iota + grey + black + finished + ) + color := make(map[*Analyzer]uint8) + var visit func(a *Analyzer) error + visit = func(a *Analyzer) error { + if a == nil { + return fmt.Errorf("nil *Analyzer") + } + if color[a] == white { + color[a] = grey + + // names + if !validIdent(a.Name) { + return fmt.Errorf("invalid analyzer name %q", a) + } + + if a.Doc == "" { + return fmt.Errorf("analyzer %q is undocumented", a) + } + + // fact types + for _, f := range a.FactTypes { + if f == nil { + return fmt.Errorf("analyzer %s has nil FactType", a) + } + t := reflect.TypeOf(f) + if prev := factTypes[t]; prev != nil { + return fmt.Errorf("fact type %s registered by two analyzers: %v, %v", + t, a, prev) + } + if t.Kind() != reflect.Ptr { + return fmt.Errorf("%s: fact type %s is not a pointer", a, t) + } + factTypes[t] = a + } + + // recursion + for _, req := range a.Requires { + if err := visit(req); err != nil { + return err + } + } + color[a] = black + } + + if color[a] == grey { + stack := []*Analyzer{a} + inCycle := map[string]bool{} + for len(stack) > 0 { + current := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if color[current] == grey && !inCycle[current.Name] { + inCycle[current.Name] = true + stack = append(stack, current.Requires...) + } + } + return &CycleInRequiresGraphError{AnalyzerNames: inCycle} + } + + return nil + } + for _, a := range analyzers { + if err := visit(a); err != nil { + return err + } + } + + // Reject duplicates among analyzers. + // Precondition: color[a] == black. + // Postcondition: color[a] == finished. + for _, a := range analyzers { + if color[a] == finished { + return fmt.Errorf("duplicate analyzer: %s", a.Name) + } + color[a] = finished + } + + return nil +} + +func validIdent(name string) bool { + for i, r := range name { + if !(r == '_' || unicode.IsLetter(r) || i > 0 && unicode.IsDigit(r)) { + return false + } + } + return name != "" +} + +type CycleInRequiresGraphError struct { + AnalyzerNames map[string]bool +} + +func (e *CycleInRequiresGraphError) Error() string { + var b strings.Builder + b.WriteString("cycle detected involving the following analyzers:") + for n := range e.AnalyzerNames { + b.WriteByte(' ') + b.WriteString(n) + } + return b.String() +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 000000000..6b7052b89 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,627 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// Precondition: [start, end) both lie within the same file as root. +// TODO(adonovan): return (nil, false) in this case and remove precond. +// Requires FileSet; see loader.tokenFileContainsPos. +// +// Postcondition: path is never nil; it always contains at least 'root'. +// +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + _, isToken := child.(tokenNode) + return isToken || visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +// +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +// +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), + tok(n.Closing, len(")"))) + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("{")), + tok(n.Rbrack, len("}"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +// +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 000000000..2087ceec9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,482 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// AddNamedImport(fset, f, "pathpkg", "path") +// adds +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.Position(lastImpspec.Path.ValuePos).Line + line := fset.Position(impspec.Path.ValuePos).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 || !gen.Rparen.IsValid() { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +func UsesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 000000000..b949fc840 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,483 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" + + "golang.org/x/tools/internal/typeparams" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +// +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + if typeparams.IsListExpr(n) { + a.applyList(n, "ElemList") + } else { + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 000000000..919d5305a --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go new file mode 100644 index 000000000..af5e17fee --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -0,0 +1,186 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspector provides helper functions for traversal over the +// syntax trees of a package, including node filtering by type, and +// materialization of the traversal stack. +// +// During construction, the inspector does a complete traversal and +// builds a list of push/pop events and their node type. Subsequent +// method calls that request a traversal scan this list, rather than walk +// the AST, and perform type filtering using efficient bit sets. +// +// Experiments suggest the inspector's traversals are about 2.5x faster +// than ast.Inspect, but it may take around 5 traversals for this +// benefit to amortize the inspector's construction cost. +// If efficiency is the primary concern, do not use Inspector for +// one-off traversals. +package inspector + +// There are four orthogonal features in a traversal: +// 1 type filtering +// 2 pruning +// 3 postorder calls to f +// 4 stack +// Rather than offer all of them in the API, +// only a few combinations are exposed: +// - Preorder is the fastest and has fewest features, +// but is the most commonly needed traversal. +// - Nodes and WithStack both provide pruning and postorder calls, +// even though few clients need it, because supporting two versions +// is not justified. +// More combinations could be supported by expressing them as +// wrappers around a more generic traversal, but this was measured +// and found to degrade performance significantly (30%). + +import ( + "go/ast" +) + +// An Inspector provides methods for inspecting +// (traversing) the syntax trees of a package. +type Inspector struct { + events []event +} + +// New returns an Inspector for the specified syntax trees. +func New(files []*ast.File) *Inspector { + return &Inspector{traverse(files)} +} + +// An event represents a push or a pop +// of an ast.Node during a traversal. +type event struct { + node ast.Node + typ uint64 // typeOf(node) + index int // 1 + index of corresponding pop event, or 0 if this is a pop +} + +// Preorder visits all the nodes of the files supplied to New in +// depth-first order. It calls f(n) for each node n before it visits +// n's children. +// +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { + // Because it avoids postorder calls to f, and the pruning + // check, Preorder is almost twice as fast as Nodes. The two + // features seem to contribute similar slowdowns (~1.4x each). + + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.typ&mask != 0 { + if ev.index > 0 { + f(ev.node) + } + } + i++ + } +} + +// Nodes visits the nodes of the files supplied to New in depth-first +// order. It calls f(n, true) for each node n before it visits n's +// children. If f returns true, Nodes invokes f recursively for each +// of the non-nil children of the node, followed by a call of +// f(n, false). +// +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.typ&mask != 0 { + if ev.index > 0 { + // push + if !f(ev.node, true) { + i = ev.index // jump to corresponding pop + 1 + continue + } + } else { + // pop + f(ev.node, false) + } + } + i++ + } +} + +// WithStack visits nodes in a similar manner to Nodes, but it +// supplies each call to f an additional argument, the current +// traversal stack. The stack's first element is the outermost node, +// an *ast.File; its last is the innermost, n. +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { + mask := maskOf(types) + var stack []ast.Node + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > 0 { + // push + stack = append(stack, ev.node) + if ev.typ&mask != 0 { + if !f(ev.node, true, stack) { + i = ev.index + stack = stack[:len(stack)-1] + continue + } + } + } else { + // pop + if ev.typ&mask != 0 { + f(ev.node, false, stack) + } + stack = stack[:len(stack)-1] + } + i++ + } +} + +// traverse builds the table of events representing a traversal. +func traverse(files []*ast.File) []event { + // Preallocate approximate number of events + // based on source file extent. + // This makes traverse faster by 4x (!). + var extent int + for _, f := range files { + extent += int(f.End() - f.Pos()) + } + // This estimate is based on the net/http package. + capacity := extent * 33 / 100 + if capacity > 1e6 { + capacity = 1e6 // impose some reasonable maximum + } + events := make([]event, 0, capacity) + + var stack []event + for _, f := range files { + ast.Inspect(f, func(n ast.Node) bool { + if n != nil { + // push + ev := event{ + node: n, + typ: typeOf(n), + index: len(events), // push event temporarily holds own index + } + stack = append(stack, ev) + events = append(events, ev) + } else { + // pop + ev := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + events[ev.index].index = len(events) + 1 // make push refer to pop + + ev.index = 0 // turn ev into a pop event + events = append(events, ev) + } + return true + }) + } + + return events +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go new file mode 100644 index 000000000..b6b00cf2e --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -0,0 +1,220 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file defines func typeOf(ast.Node) uint64. +// +// The initial map-based implementation was too slow; +// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 + +import "go/ast" + +const ( + nArrayType = iota + nAssignStmt + nBadDecl + nBadExpr + nBadStmt + nBasicLit + nBinaryExpr + nBlockStmt + nBranchStmt + nCallExpr + nCaseClause + nChanType + nCommClause + nComment + nCommentGroup + nCompositeLit + nDeclStmt + nDeferStmt + nEllipsis + nEmptyStmt + nExprStmt + nField + nFieldList + nFile + nForStmt + nFuncDecl + nFuncLit + nFuncType + nGenDecl + nGoStmt + nIdent + nIfStmt + nImportSpec + nIncDecStmt + nIndexExpr + nInterfaceType + nKeyValueExpr + nLabeledStmt + nMapType + nPackage + nParenExpr + nRangeStmt + nReturnStmt + nSelectStmt + nSelectorExpr + nSendStmt + nSliceExpr + nStarExpr + nStructType + nSwitchStmt + nTypeAssertExpr + nTypeSpec + nTypeSwitchStmt + nUnaryExpr + nValueSpec +) + +// typeOf returns a distinct single-bit value that represents the type of n. +// +// Various implementations were benchmarked with BenchmarkNewInspector: +// GOGC=off +// - type switch 4.9-5.5ms 2.1ms +// - binary search over a sorted list of types 5.5-5.9ms 2.5ms +// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms +// - linear scan, unordered list 6.4ms 2.7ms +// - hash table 6.5ms 3.1ms +// A perfect hash seemed like overkill. +// +// The compiler's switch statement is the clear winner +// as it produces a binary tree in code, +// with constant conditions and good branch prediction. +// (Sadly it is the most verbose in source code.) +// Binary search suffered from poor branch prediction. +// +func typeOf(n ast.Node) uint64 { + // Fast path: nearly half of all nodes are identifiers. + if _, ok := n.(*ast.Ident); ok { + return 1 << nIdent + } + + // These cases include all nodes encountered by ast.Inspect. + switch n.(type) { + case *ast.ArrayType: + return 1 << nArrayType + case *ast.AssignStmt: + return 1 << nAssignStmt + case *ast.BadDecl: + return 1 << nBadDecl + case *ast.BadExpr: + return 1 << nBadExpr + case *ast.BadStmt: + return 1 << nBadStmt + case *ast.BasicLit: + return 1 << nBasicLit + case *ast.BinaryExpr: + return 1 << nBinaryExpr + case *ast.BlockStmt: + return 1 << nBlockStmt + case *ast.BranchStmt: + return 1 << nBranchStmt + case *ast.CallExpr: + return 1 << nCallExpr + case *ast.CaseClause: + return 1 << nCaseClause + case *ast.ChanType: + return 1 << nChanType + case *ast.CommClause: + return 1 << nCommClause + case *ast.Comment: + return 1 << nComment + case *ast.CommentGroup: + return 1 << nCommentGroup + case *ast.CompositeLit: + return 1 << nCompositeLit + case *ast.DeclStmt: + return 1 << nDeclStmt + case *ast.DeferStmt: + return 1 << nDeferStmt + case *ast.Ellipsis: + return 1 << nEllipsis + case *ast.EmptyStmt: + return 1 << nEmptyStmt + case *ast.ExprStmt: + return 1 << nExprStmt + case *ast.Field: + return 1 << nField + case *ast.FieldList: + return 1 << nFieldList + case *ast.File: + return 1 << nFile + case *ast.ForStmt: + return 1 << nForStmt + case *ast.FuncDecl: + return 1 << nFuncDecl + case *ast.FuncLit: + return 1 << nFuncLit + case *ast.FuncType: + return 1 << nFuncType + case *ast.GenDecl: + return 1 << nGenDecl + case *ast.GoStmt: + return 1 << nGoStmt + case *ast.Ident: + return 1 << nIdent + case *ast.IfStmt: + return 1 << nIfStmt + case *ast.ImportSpec: + return 1 << nImportSpec + case *ast.IncDecStmt: + return 1 << nIncDecStmt + case *ast.IndexExpr: + return 1 << nIndexExpr + case *ast.InterfaceType: + return 1 << nInterfaceType + case *ast.KeyValueExpr: + return 1 << nKeyValueExpr + case *ast.LabeledStmt: + return 1 << nLabeledStmt + case *ast.MapType: + return 1 << nMapType + case *ast.Package: + return 1 << nPackage + case *ast.ParenExpr: + return 1 << nParenExpr + case *ast.RangeStmt: + return 1 << nRangeStmt + case *ast.ReturnStmt: + return 1 << nReturnStmt + case *ast.SelectStmt: + return 1 << nSelectStmt + case *ast.SelectorExpr: + return 1 << nSelectorExpr + case *ast.SendStmt: + return 1 << nSendStmt + case *ast.SliceExpr: + return 1 << nSliceExpr + case *ast.StarExpr: + return 1 << nStarExpr + case *ast.StructType: + return 1 << nStructType + case *ast.SwitchStmt: + return 1 << nSwitchStmt + case *ast.TypeAssertExpr: + return 1 << nTypeAssertExpr + case *ast.TypeSpec: + return 1 << nTypeSpec + case *ast.TypeSwitchStmt: + return 1 << nTypeSwitchStmt + case *ast.UnaryExpr: + return 1 << nUnaryExpr + case *ast.ValueSpec: + return 1 << nValueSpec + } + return 0 +} + +func maskOf(nodes []ast.Node) uint64 { + if nodes == nil { + return 1<<64 - 1 // match all node types + } + var mask uint64 + for _, n := range nodes { + mask |= typeOf(n) + } + return mask +} diff --git a/vendor/golang.org/x/tools/go/cfg/builder.go b/vendor/golang.org/x/tools/go/cfg/builder.go new file mode 100644 index 000000000..7f95a2961 --- /dev/null +++ b/vendor/golang.org/x/tools/go/cfg/builder.go @@ -0,0 +1,510 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cfg + +// This file implements the CFG construction pass. + +import ( + "fmt" + "go/ast" + "go/token" +) + +type builder struct { + cfg *CFG + mayReturn func(*ast.CallExpr) bool + current *Block + lblocks map[*ast.Object]*lblock // labeled blocks + targets *targets // linked stack of branch targets +} + +func (b *builder) stmt(_s ast.Stmt) { + // The label of the current statement. If non-nil, its _goto + // target is always set; its _break and _continue are set only + // within the body of switch/typeswitch/select/for/range. + // It is effectively an additional default-nil parameter of stmt(). + var label *lblock +start: + switch s := _s.(type) { + case *ast.BadStmt, + *ast.SendStmt, + *ast.IncDecStmt, + *ast.GoStmt, + *ast.DeferStmt, + *ast.EmptyStmt, + *ast.AssignStmt: + // No effect on control flow. + b.add(s) + + case *ast.ExprStmt: + b.add(s) + if call, ok := s.X.(*ast.CallExpr); ok && !b.mayReturn(call) { + // Calls to panic, os.Exit, etc, never return. + b.current = b.newBlock("unreachable.call") + } + + case *ast.DeclStmt: + // Treat each var ValueSpec as a separate statement. + d := s.Decl.(*ast.GenDecl) + if d.Tok == token.VAR { + for _, spec := range d.Specs { + if spec, ok := spec.(*ast.ValueSpec); ok { + b.add(spec) + } + } + } + + case *ast.LabeledStmt: + label = b.labeledBlock(s.Label) + b.jump(label._goto) + b.current = label._goto + _s = s.Stmt + goto start // effectively: tailcall stmt(g, s.Stmt, label) + + case *ast.ReturnStmt: + b.add(s) + b.current = b.newBlock("unreachable.return") + + case *ast.BranchStmt: + b.branchStmt(s) + + case *ast.BlockStmt: + b.stmtList(s.List) + + case *ast.IfStmt: + if s.Init != nil { + b.stmt(s.Init) + } + then := b.newBlock("if.then") + done := b.newBlock("if.done") + _else := done + if s.Else != nil { + _else = b.newBlock("if.else") + } + b.add(s.Cond) + b.ifelse(then, _else) + b.current = then + b.stmt(s.Body) + b.jump(done) + + if s.Else != nil { + b.current = _else + b.stmt(s.Else) + b.jump(done) + } + + b.current = done + + case *ast.SwitchStmt: + b.switchStmt(s, label) + + case *ast.TypeSwitchStmt: + b.typeSwitchStmt(s, label) + + case *ast.SelectStmt: + b.selectStmt(s, label) + + case *ast.ForStmt: + b.forStmt(s, label) + + case *ast.RangeStmt: + b.rangeStmt(s, label) + + default: + panic(fmt.Sprintf("unexpected statement kind: %T", s)) + } +} + +func (b *builder) stmtList(list []ast.Stmt) { + for _, s := range list { + b.stmt(s) + } +} + +func (b *builder) branchStmt(s *ast.BranchStmt) { + var block *Block + switch s.Tok { + case token.BREAK: + if s.Label != nil { + if lb := b.labeledBlock(s.Label); lb != nil { + block = lb._break + } + } else { + for t := b.targets; t != nil && block == nil; t = t.tail { + block = t._break + } + } + + case token.CONTINUE: + if s.Label != nil { + if lb := b.labeledBlock(s.Label); lb != nil { + block = lb._continue + } + } else { + for t := b.targets; t != nil && block == nil; t = t.tail { + block = t._continue + } + } + + case token.FALLTHROUGH: + for t := b.targets; t != nil && block == nil; t = t.tail { + block = t._fallthrough + } + + case token.GOTO: + if s.Label != nil { + block = b.labeledBlock(s.Label)._goto + } + } + if block == nil { + block = b.newBlock("undefined.branch") + } + b.jump(block) + b.current = b.newBlock("unreachable.branch") +} + +func (b *builder) switchStmt(s *ast.SwitchStmt, label *lblock) { + if s.Init != nil { + b.stmt(s.Init) + } + if s.Tag != nil { + b.add(s.Tag) + } + done := b.newBlock("switch.done") + if label != nil { + label._break = done + } + // We pull the default case (if present) down to the end. + // But each fallthrough label must point to the next + // body block in source order, so we preallocate a + // body block (fallthru) for the next case. + // Unfortunately this makes for a confusing block order. + var defaultBody *[]ast.Stmt + var defaultFallthrough *Block + var fallthru, defaultBlock *Block + ncases := len(s.Body.List) + for i, clause := range s.Body.List { + body := fallthru + if body == nil { + body = b.newBlock("switch.body") // first case only + } + + // Preallocate body block for the next case. + fallthru = done + if i+1 < ncases { + fallthru = b.newBlock("switch.body") + } + + cc := clause.(*ast.CaseClause) + if cc.List == nil { + // Default case. + defaultBody = &cc.Body + defaultFallthrough = fallthru + defaultBlock = body + continue + } + + var nextCond *Block + for _, cond := range cc.List { + nextCond = b.newBlock("switch.next") + b.add(cond) // one half of the tag==cond condition + b.ifelse(body, nextCond) + b.current = nextCond + } + b.current = body + b.targets = &targets{ + tail: b.targets, + _break: done, + _fallthrough: fallthru, + } + b.stmtList(cc.Body) + b.targets = b.targets.tail + b.jump(done) + b.current = nextCond + } + if defaultBlock != nil { + b.jump(defaultBlock) + b.current = defaultBlock + b.targets = &targets{ + tail: b.targets, + _break: done, + _fallthrough: defaultFallthrough, + } + b.stmtList(*defaultBody) + b.targets = b.targets.tail + } + b.jump(done) + b.current = done +} + +func (b *builder) typeSwitchStmt(s *ast.TypeSwitchStmt, label *lblock) { + if s.Init != nil { + b.stmt(s.Init) + } + if s.Assign != nil { + b.add(s.Assign) + } + + done := b.newBlock("typeswitch.done") + if label != nil { + label._break = done + } + var default_ *ast.CaseClause + for _, clause := range s.Body.List { + cc := clause.(*ast.CaseClause) + if cc.List == nil { + default_ = cc + continue + } + body := b.newBlock("typeswitch.body") + var next *Block + for _, casetype := range cc.List { + next = b.newBlock("typeswitch.next") + // casetype is a type, so don't call b.add(casetype). + // This block logically contains a type assertion, + // x.(casetype), but it's unclear how to represent x. + _ = casetype + b.ifelse(body, next) + b.current = next + } + b.current = body + b.typeCaseBody(cc, done) + b.current = next + } + if default_ != nil { + b.typeCaseBody(default_, done) + } else { + b.jump(done) + } + b.current = done +} + +func (b *builder) typeCaseBody(cc *ast.CaseClause, done *Block) { + b.targets = &targets{ + tail: b.targets, + _break: done, + } + b.stmtList(cc.Body) + b.targets = b.targets.tail + b.jump(done) +} + +func (b *builder) selectStmt(s *ast.SelectStmt, label *lblock) { + // First evaluate channel expressions. + // TODO(adonovan): fix: evaluate only channel exprs here. + for _, clause := range s.Body.List { + if comm := clause.(*ast.CommClause).Comm; comm != nil { + b.stmt(comm) + } + } + + done := b.newBlock("select.done") + if label != nil { + label._break = done + } + + var defaultBody *[]ast.Stmt + for _, cc := range s.Body.List { + clause := cc.(*ast.CommClause) + if clause.Comm == nil { + defaultBody = &clause.Body + continue + } + body := b.newBlock("select.body") + next := b.newBlock("select.next") + b.ifelse(body, next) + b.current = body + b.targets = &targets{ + tail: b.targets, + _break: done, + } + switch comm := clause.Comm.(type) { + case *ast.ExprStmt: // <-ch + // nop + case *ast.AssignStmt: // x := <-states[state].Chan + b.add(comm.Lhs[0]) + } + b.stmtList(clause.Body) + b.targets = b.targets.tail + b.jump(done) + b.current = next + } + if defaultBody != nil { + b.targets = &targets{ + tail: b.targets, + _break: done, + } + b.stmtList(*defaultBody) + b.targets = b.targets.tail + b.jump(done) + } + b.current = done +} + +func (b *builder) forStmt(s *ast.ForStmt, label *lblock) { + // ...init... + // jump loop + // loop: + // if cond goto body else done + // body: + // ...body... + // jump post + // post: (target of continue) + // ...post... + // jump loop + // done: (target of break) + if s.Init != nil { + b.stmt(s.Init) + } + body := b.newBlock("for.body") + done := b.newBlock("for.done") // target of 'break' + loop := body // target of back-edge + if s.Cond != nil { + loop = b.newBlock("for.loop") + } + cont := loop // target of 'continue' + if s.Post != nil { + cont = b.newBlock("for.post") + } + if label != nil { + label._break = done + label._continue = cont + } + b.jump(loop) + b.current = loop + if loop != body { + b.add(s.Cond) + b.ifelse(body, done) + b.current = body + } + b.targets = &targets{ + tail: b.targets, + _break: done, + _continue: cont, + } + b.stmt(s.Body) + b.targets = b.targets.tail + b.jump(cont) + + if s.Post != nil { + b.current = cont + b.stmt(s.Post) + b.jump(loop) // back-edge + } + b.current = done +} + +func (b *builder) rangeStmt(s *ast.RangeStmt, label *lblock) { + b.add(s.X) + + if s.Key != nil { + b.add(s.Key) + } + if s.Value != nil { + b.add(s.Value) + } + + // ... + // loop: (target of continue) + // if ... goto body else done + // body: + // ... + // jump loop + // done: (target of break) + + loop := b.newBlock("range.loop") + b.jump(loop) + b.current = loop + + body := b.newBlock("range.body") + done := b.newBlock("range.done") + b.ifelse(body, done) + b.current = body + + if label != nil { + label._break = done + label._continue = loop + } + b.targets = &targets{ + tail: b.targets, + _break: done, + _continue: loop, + } + b.stmt(s.Body) + b.targets = b.targets.tail + b.jump(loop) // back-edge + b.current = done +} + +// -------- helpers -------- + +// Destinations associated with unlabeled for/switch/select stmts. +// We push/pop one of these as we enter/leave each construct and for +// each BranchStmt we scan for the innermost target of the right type. +// +type targets struct { + tail *targets // rest of stack + _break *Block + _continue *Block + _fallthrough *Block +} + +// Destinations associated with a labeled block. +// We populate these as labels are encountered in forward gotos or +// labeled statements. +// +type lblock struct { + _goto *Block + _break *Block + _continue *Block +} + +// labeledBlock returns the branch target associated with the +// specified label, creating it if needed. +// +func (b *builder) labeledBlock(label *ast.Ident) *lblock { + lb := b.lblocks[label.Obj] + if lb == nil { + lb = &lblock{_goto: b.newBlock(label.Name)} + if b.lblocks == nil { + b.lblocks = make(map[*ast.Object]*lblock) + } + b.lblocks[label.Obj] = lb + } + return lb +} + +// newBlock appends a new unconnected basic block to b.cfg's block +// slice and returns it. +// It does not automatically become the current block. +// comment is an optional string for more readable debugging output. +func (b *builder) newBlock(comment string) *Block { + g := b.cfg + block := &Block{ + Index: int32(len(g.Blocks)), + comment: comment, + } + block.Succs = block.succs2[:0] + g.Blocks = append(g.Blocks, block) + return block +} + +func (b *builder) add(n ast.Node) { + b.current.Nodes = append(b.current.Nodes, n) +} + +// jump adds an edge from the current block to the target block, +// and sets b.current to nil. +func (b *builder) jump(target *Block) { + b.current.Succs = append(b.current.Succs, target) + b.current = nil +} + +// ifelse emits edges from the current block to the t and f blocks, +// and sets b.current to nil. +func (b *builder) ifelse(t, f *Block) { + b.current.Succs = append(b.current.Succs, t, f) + b.current = nil +} diff --git a/vendor/golang.org/x/tools/go/cfg/cfg.go b/vendor/golang.org/x/tools/go/cfg/cfg.go new file mode 100644 index 000000000..3ebc65f60 --- /dev/null +++ b/vendor/golang.org/x/tools/go/cfg/cfg.go @@ -0,0 +1,150 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cfg constructs a simple control-flow graph (CFG) of the +// statements and expressions within a single function. +// +// Use cfg.New to construct the CFG for a function body. +// +// The blocks of the CFG contain all the function's non-control +// statements. The CFG does not contain control statements such as If, +// Switch, Select, and Branch, but does contain their subexpressions. +// For example, this source code: +// +// if x := f(); x != nil { +// T() +// } else { +// F() +// } +// +// produces this CFG: +// +// 1: x := f() +// x != nil +// succs: 2, 3 +// 2: T() +// succs: 4 +// 3: F() +// succs: 4 +// 4: +// +// The CFG does contain Return statements; even implicit returns are +// materialized (at the position of the function's closing brace). +// +// The CFG does not record conditions associated with conditional branch +// edges, nor the short-circuit semantics of the && and || operators, +// nor abnormal control flow caused by panic. If you need this +// information, use golang.org/x/tools/go/ssa instead. +// +package cfg + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/token" +) + +// A CFG represents the control-flow graph of a single function. +// +// The entry point is Blocks[0]; there may be multiple return blocks. +type CFG struct { + Blocks []*Block // block[0] is entry; order otherwise undefined +} + +// A Block represents a basic block: a list of statements and +// expressions that are always evaluated sequentially. +// +// A block may have 0-2 successors: zero for a return block or a block +// that calls a function such as panic that never returns; one for a +// normal (jump) block; and two for a conditional (if) block. +type Block struct { + Nodes []ast.Node // statements, expressions, and ValueSpecs + Succs []*Block // successor nodes in the graph + Index int32 // index within CFG.Blocks + Live bool // block is reachable from entry + + comment string // for debugging + succs2 [2]*Block // underlying array for Succs +} + +// New returns a new control-flow graph for the specified function body, +// which must be non-nil. +// +// The CFG builder calls mayReturn to determine whether a given function +// call may return. For example, calls to panic, os.Exit, and log.Fatal +// do not return, so the builder can remove infeasible graph edges +// following such calls. The builder calls mayReturn only for a +// CallExpr beneath an ExprStmt. +func New(body *ast.BlockStmt, mayReturn func(*ast.CallExpr) bool) *CFG { + b := builder{ + mayReturn: mayReturn, + cfg: new(CFG), + } + b.current = b.newBlock("entry") + b.stmt(body) + + // Compute liveness (reachability from entry point), breadth-first. + q := make([]*Block, 0, len(b.cfg.Blocks)) + q = append(q, b.cfg.Blocks[0]) // entry point + for len(q) > 0 { + b := q[len(q)-1] + q = q[:len(q)-1] + + if !b.Live { + b.Live = true + q = append(q, b.Succs...) + } + } + + // Does control fall off the end of the function's body? + // Make implicit return explicit. + if b.current != nil && b.current.Live { + b.add(&ast.ReturnStmt{ + Return: body.End() - 1, + }) + } + + return b.cfg +} + +func (b *Block) String() string { + return fmt.Sprintf("block %d (%s)", b.Index, b.comment) +} + +// Return returns the return statement at the end of this block if present, nil otherwise. +func (b *Block) Return() (ret *ast.ReturnStmt) { + if len(b.Nodes) > 0 { + ret, _ = b.Nodes[len(b.Nodes)-1].(*ast.ReturnStmt) + } + return +} + +// Format formats the control-flow graph for ease of debugging. +func (g *CFG) Format(fset *token.FileSet) string { + var buf bytes.Buffer + for _, b := range g.Blocks { + fmt.Fprintf(&buf, ".%d: # %s\n", b.Index, b.comment) + for _, n := range b.Nodes { + fmt.Fprintf(&buf, "\t%s\n", formatNode(fset, n)) + } + if len(b.Succs) > 0 { + fmt.Fprintf(&buf, "\tsuccs:") + for _, succ := range b.Succs { + fmt.Fprintf(&buf, " %d", succ.Index) + } + buf.WriteByte('\n') + } + buf.WriteByte('\n') + } + return buf.String() +} + +func formatNode(fset *token.FileSet, n ast.Node) string { + var buf bytes.Buffer + format.Node(&buf, fset, n) + // Indent secondary lines by a tab. + return string(bytes.Replace(buf.Bytes(), []byte("\n"), []byte("\n\t"), -1)) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 000000000..fc8beea5d --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,133 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +// +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "fmt" + "go/token" + "go/types" + "io" + "io/ioutil" + + "golang.org/x/tools/go/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the workspace layout conventions of go/build. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +func Find(importPath, srcDir string) (filename, path string) { + return gcimporter.FindPkg(importPath, srcDir) +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, err := gcimporter.FindExportData(buf) + // If we ever switch to a zip-like archive format with the ToC + // at the end, we can return the correct portion of export data, + // but for now we must return the entire rest of the file. + return buf, err +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// The package name is specified by path. +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The App Engine Go runtime v1.6 uses the old export data format. + // TODO(adonovan): delete once v1.7 has been around for a while. + if bytes.HasPrefix(data, []byte("package ")) { + return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + } + + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + if _, err := io.WriteString(out, "i"); err != nil { + return err + } + return gcimporter.IExportData(out, fset, pkg) +} + +// ReadBundle reads an export bundle from in, decodes it, and returns type +// information for the packages. +// File position information is added to fset. +// +// ReadBundle may inspect and add to the imports map to ensure that references +// within the export bundle to other packages are consistent. +// +// On return, the state of the reader is undefined. +// +// Experimental: This API is experimental and may change in the future. +func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, fmt.Errorf("reading export bundle: %v", err) + } + return gcimporter.IImportBundle(fset, imports, data) +} + +// WriteBundle writes encoded type information for the specified packages to out. +// The FileSet provides file position information for named objects. +// +// Experimental: This API is experimental and may change in the future. +func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + return gcimporter.IExportBundle(out, fset, pkgs) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 000000000..efe221e7e --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,73 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go new file mode 100644 index 000000000..d01fb04a6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go @@ -0,0 +1,222 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cgo handles cgo preprocessing of files containing `import "C"`. +// +// DESIGN +// +// The approach taken is to run the cgo processor on the package's +// CgoFiles and parse the output, faking the filenames of the +// resulting ASTs so that the synthetic file containing the C types is +// called "C" (e.g. "~/go/src/net/C") and the preprocessed files +// have their original names (e.g. "~/go/src/net/cgo_unix.go"), +// not the names of the actual temporary files. +// +// The advantage of this approach is its fidelity to 'go build'. The +// downside is that the token.Position.Offset for each AST node is +// incorrect, being an offset within the temporary file. Line numbers +// should still be correct because of the //line comments. +// +// The logic of this file is mostly plundered from the 'go build' +// tool, which also invokes the cgo preprocessor. +// +// +// REJECTED ALTERNATIVE +// +// An alternative approach that we explored is to extend go/types' +// Importer mechanism to provide the identity of the importing package +// so that each time `import "C"` appears it resolves to a different +// synthetic package containing just the objects needed in that case. +// The loader would invoke cgo but parse only the cgo_types.go file +// defining the package-level objects, discarding the other files +// resulting from preprocessing. +// +// The benefit of this approach would have been that source-level +// syntax information would correspond exactly to the original cgo +// file, with no preprocessing involved, making source tools like +// godoc, guru, and eg happy. However, the approach was rejected +// due to the additional complexity it would impose on go/types. (It +// made for a beautiful demo, though.) +// +// cgo files, despite their *.go extension, are not legal Go source +// files per the specification since they may refer to unexported +// members of package "C" such as C.int. Also, a function such as +// C.getpwent has in effect two types, one matching its C type and one +// which additionally returns (errno C.int). The cgo preprocessor +// uses name mangling to distinguish these two functions in the +// processed code, but go/types would need to duplicate this logic in +// its handling of function calls, analogous to the treatment of map +// lookups in which y=m[k] and y,ok=m[k] are both legal. + +package cgo + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "path/filepath" + "regexp" + "strings" + + exec "golang.org/x/sys/execabs" +) + +// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses +// the output and returns the resulting ASTs. +// +func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) { + tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmpdir) + + pkgdir := bp.Dir + if DisplayPath != nil { + pkgdir = DisplayPath(pkgdir) + } + + cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false) + if err != nil { + return nil, err + } + var files []*ast.File + for i := range cgoFiles { + rd, err := os.Open(cgoFiles[i]) + if err != nil { + return nil, err + } + display := filepath.Join(bp.Dir, cgoDisplayFiles[i]) + f, err := parser.ParseFile(fset, display, rd, mode) + rd.Close() + if err != nil { + return nil, err + } + files = append(files, f) + } + return files, nil +} + +var cgoRe = regexp.MustCompile(`[/\\:]`) + +// Run invokes the cgo preprocessor on bp.CgoFiles and returns two +// lists of files: the resulting processed files (in temporary +// directory tmpdir) and the corresponding names of the unprocessed files. +// +// Run is adapted from (*builder).cgo in +// $GOROOT/src/cmd/go/build.go, but these features are unsupported: +// Objective C, CGOPKGPATH, CGO_FLAGS. +// +// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in +// to the cgo preprocessor. This in turn will set the // line comments +// referring to those files to use absolute paths. This is needed for +// go/packages using the legacy go list support so it is able to find +// the original files. +func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) { + cgoCPPFLAGS, _, _, _ := cflags(bp, true) + _, cgoexeCFLAGS, _, _ := cflags(bp, false) + + if len(bp.CgoPkgConfig) > 0 { + pcCFLAGS, err := pkgConfigFlags(bp) + if err != nil { + return nil, nil, err + } + cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) + } + + // Allows including _cgo_export.h from .[ch] files in the package. + cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir) + + // _cgo_gotypes.go (displayed "C") contains the type definitions. + files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go")) + displayFiles = append(displayFiles, "C") + for _, fn := range bp.CgoFiles { + // "foo.cgo1.go" (displayed "foo.go") is the processed Go source. + f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_") + files = append(files, filepath.Join(tmpdir, f+"cgo1.go")) + displayFiles = append(displayFiles, fn) + } + + var cgoflags []string + if bp.Goroot && bp.ImportPath == "runtime/cgo" { + cgoflags = append(cgoflags, "-import_runtime_cgo=false") + } + if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" { + cgoflags = append(cgoflags, "-import_syscall=false") + } + + var cgoFiles []string = bp.CgoFiles + if useabs { + cgoFiles = make([]string, len(bp.CgoFiles)) + for i := range cgoFiles { + cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i]) + } + } + + args := stringList( + "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--", + cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles, + ) + if false { + log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir) + } + cmd := exec.Command(args[0], args[1:]...) + cmd.Dir = pkgdir + cmd.Env = append(os.Environ(), "PWD="+pkgdir) + cmd.Stdout = os.Stderr + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err) + } + + return files, displayFiles, nil +} + +// -- unmodified from 'go build' --------------------------------------- + +// Return the flags to use when invoking the C or C++ compilers, or cgo. +func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) { + var defaults string + if def { + defaults = "-g -O2" + } + + cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) + cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) + cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS) + ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS) + return +} + +// envList returns the value of the given environment variable broken +// into fields, using the default value when the variable is empty. +func envList(key, def string) []string { + v := os.Getenv(key) + if v == "" { + v = def + } + return strings.Fields(v) +} + +// stringList's arguments should be a sequence of string or []string values. +// stringList flattens them into a single []string. +func stringList(args ...interface{}) []string { + var x []string + for _, arg := range args { + switch arg := arg.(type) { + case []string: + x = append(x, arg...) + case string: + x = append(x, arg) + default: + panic("stringList: invalid argument") + } + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go new file mode 100644 index 000000000..7d94bbc1e --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go @@ -0,0 +1,39 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgo + +import ( + "errors" + "fmt" + "go/build" + exec "golang.org/x/sys/execabs" + "strings" +) + +// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints. +func pkgConfig(mode string, pkgs []string) (flags []string, err error) { + cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...) + out, err := cmd.CombinedOutput() + if err != nil { + s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err) + if len(out) > 0 { + s = fmt.Sprintf("%s: %s", s, out) + } + return nil, errors.New(s) + } + if len(out) > 0 { + flags = strings.Fields(string(out)) + } + return +} + +// pkgConfigFlags calls pkg-config if needed and returns the cflags +// needed to build the package. +func pkgConfigFlags(p *build.Package) (cflags []string, err error) { + if len(p.CgoPkgConfig) == 0 { + return nil, nil + } + return pkgConfig("--cflags", p.CgoPkgConfig) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go new file mode 100644 index 000000000..a807d0aaa --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -0,0 +1,852 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "math" + "math/big" + "sort" + "strings" +) + +// If debugFormat is set, each integer and string value is preceded by a marker +// and position information in the encoding. This mechanism permits an importer +// to recognize immediately when it is out of sync. The importer recognizes this +// mode automatically (i.e., it can import export data produced with debugging +// support even if debugFormat is not set at the time of import). This mode will +// lead to massively larger export data (by a factor of 2 to 3) and should only +// be enabled during development and debugging. +// +// NOTE: This flag is the first flag to enable if importing dies because of +// (suspected) format errors, and whenever a change is made to the format. +const debugFormat = false // default: false + +// If trace is set, debugging output is printed to std out. +const trace = false // default: false + +// Current export format version. Increase with each format change. +// Note: The latest binary (non-indexed) export format is at version 6. +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding +const exportVersion = 4 + +// trackAllTypes enables cycle tracking for all types, not just named +// types. The existing compiler invariants assume that unnamed types +// that are not completely set up are not used, or else there are spurious +// errors. +// If disabled, only named types are tracked, possibly leading to slightly +// less efficient encoding in rare cases. It also prevents the export of +// some corner-case type declarations (but those are not handled correctly +// with with the textual export format either). +// TODO(gri) enable and remove once issues caused by it are fixed +const trackAllTypes = false + +type exporter struct { + fset *token.FileSet + out bytes.Buffer + + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*types.Package]int + typIndex map[types.Type]int + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + + // debugging support + written int // bytes written + indent int // for trace +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} + +// BExportData returns binary export data for pkg. +// If no file set is provided, position info will be missing. +func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := exporter{ + fset: fset, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 + pkgIndex: make(map[*types.Package]int), + typIndex: make(map[types.Type]int), + posInfoFormat: true, // TODO(gri) might become a flag, eventually + } + + // write version info + // The version string must start with "version %d" where %d is the version + // number. Additional debugging information may follow after a blank; that + // text is ignored by the importer. + p.rawStringln(fmt.Sprintf("version %d", exportVersion)) + var debug string + if debugFormat { + debug = "debug" + } + p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly + p.bool(trackAllTypes) + p.bool(p.posInfoFormat) + + // --- generic export data --- + + // populate type map with predeclared "known" types + for index, typ := range predeclared() { + p.typIndex[typ] = index + } + if len(p.typIndex) != len(predeclared()) { + return nil, internalError("duplicate entries in type map?") + } + + // write package data + p.pkg(pkg, true) + if trace { + p.tracef("\n") + } + + // write objects + objcount := 0 + scope := pkg.Scope() + for _, name := range scope.Names() { + if !ast.IsExported(name) { + continue + } + if trace { + p.tracef("\n") + } + p.obj(scope.Lookup(name)) + objcount++ + } + + // indicate end of list + if trace { + p.tracef("\n") + } + p.tag(endTag) + + // for self-verification only (redundant) + p.int(objcount) + + if trace { + p.tracef("\n") + } + + // --- end of export data --- + + return p.out.Bytes(), nil +} + +func (p *exporter) pkg(pkg *types.Package, emptypath bool) { + if pkg == nil { + panic(internalError("unexpected nil pkg")) + } + + // if we saw the package before, write its index (>= 0) + if i, ok := p.pkgIndex[pkg]; ok { + p.index('P', i) + return + } + + // otherwise, remember the package, write the package tag (< 0) and package data + if trace { + p.tracef("P%d = { ", len(p.pkgIndex)) + defer p.tracef("} ") + } + p.pkgIndex[pkg] = len(p.pkgIndex) + + p.tag(packageTag) + p.string(pkg.Name()) + if emptypath { + p.string("") + } else { + p.string(pkg.Path()) + } +} + +func (p *exporter) obj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + p.tag(constTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + p.value(obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + p.tag(aliasTag) + p.pos(obj) + p.qualifiedName(obj) + } else { + p.tag(typeTag) + } + p.typ(obj.Type()) + + case *types.Var: + p.tag(varTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + + case *types.Func: + p.tag(funcTag) + p.pos(obj) + p.qualifiedName(obj) + sig := obj.Type().(*types.Signature) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + + default: + panic(internalErrorf("unexpected object %v (%T)", obj, obj)) + } +} + +func (p *exporter) pos(obj types.Object) { + if !p.posInfoFormat { + return + } + + file, line := p.fileLine(obj) + if file == p.prevFile { + // common case: write line delta + // delta == 0 means different file or no line change + delta := line - p.prevLine + p.int(delta) + if delta == 0 { + p.int(-1) // -1 means no file change + } + } else { + // different file + p.int(0) + // Encode filename as length of common prefix with previous + // filename, followed by (possibly empty) suffix. Filenames + // frequently share path prefixes, so this can save a lot + // of space and make export data size less dependent on file + // path length. The suffix is unlikely to be empty because + // file names tend to end in ".go". + n := commonPrefixLen(p.prevFile, file) + p.int(n) // n >= 0 + p.string(file[n:]) // write suffix only + p.prevFile = file + p.int(line) + } + p.prevLine = line +} + +func (p *exporter) fileLine(obj types.Object) (file string, line int) { + if p.fset != nil { + pos := p.fset.Position(obj.Pos()) + file = pos.Filename + line = pos.Line + } + return +} + +func commonPrefixLen(a, b string) int { + if len(a) > len(b) { + a, b = b, a + } + // len(a) <= len(b) + i := 0 + for i < len(a) && a[i] == b[i] { + i++ + } + return i +} + +func (p *exporter) qualifiedName(obj types.Object) { + p.string(obj.Name()) + p.pkg(obj.Pkg(), false) +} + +func (p *exporter) typ(t types.Type) { + if t == nil { + panic(internalError("nil type")) + } + + // Possible optimization: Anonymous pointer types *T where + // T is a named type are common. We could canonicalize all + // such types *T to a single type PT = *T. This would lead + // to at most one *T entry in typIndex, and all future *T's + // would be encoded as the respective index directly. Would + // save 1 byte (pointerTag) per *T and reduce the typIndex + // size (at the cost of a canonicalization map). We can do + // this later, without encoding format change. + + // if we saw the type before, write its index (>= 0) + if i, ok := p.typIndex[t]; ok { + p.index('T', i) + return + } + + // otherwise, remember the type, write the type tag (< 0) and type data + if trackAllTypes { + if trace { + p.tracef("T%d = {>\n", len(p.typIndex)) + defer p.tracef("<\n} ") + } + p.typIndex[t] = len(p.typIndex) + } + + switch t := t.(type) { + case *types.Named: + if !trackAllTypes { + // if we don't track all types, track named types now + p.typIndex[t] = len(p.typIndex) + } + + p.tag(namedTag) + p.pos(t.Obj()) + p.qualifiedName(t.Obj()) + p.typ(t.Underlying()) + if !types.IsInterface(t) { + p.assocMethods(t) + } + + case *types.Array: + p.tag(arrayTag) + p.int64(t.Len()) + p.typ(t.Elem()) + + case *types.Slice: + p.tag(sliceTag) + p.typ(t.Elem()) + + case *dddSlice: + p.tag(dddTag) + p.typ(t.elem) + + case *types.Struct: + p.tag(structTag) + p.fieldList(t) + + case *types.Pointer: + p.tag(pointerTag) + p.typ(t.Elem()) + + case *types.Signature: + p.tag(signatureTag) + p.paramList(t.Params(), t.Variadic()) + p.paramList(t.Results(), false) + + case *types.Interface: + p.tag(interfaceTag) + p.iface(t) + + case *types.Map: + p.tag(mapTag) + p.typ(t.Key()) + p.typ(t.Elem()) + + case *types.Chan: + p.tag(chanTag) + p.int(int(3 - t.Dir())) // hack + p.typ(t.Elem()) + + default: + panic(internalErrorf("unexpected type %T: %s", t, t)) + } +} + +func (p *exporter) assocMethods(named *types.Named) { + // Sort methods (for determinism). + var methods []*types.Func + for i := 0; i < named.NumMethods(); i++ { + methods = append(methods, named.Method(i)) + } + sort.Sort(methodsByName(methods)) + + p.int(len(methods)) + + if trace && methods != nil { + p.tracef("associated methods {>\n") + } + + for i, m := range methods { + if trace && i > 0 { + p.tracef("\n") + } + + p.pos(m) + name := m.Name() + p.string(name) + if !exported(name) { + p.pkg(m.Pkg(), false) + } + + sig := m.Type().(*types.Signature) + p.paramList(types.NewTuple(sig.Recv()), false) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + p.int(0) // dummy value for go:nointerface pragma - ignored by importer + } + + if trace && methods != nil { + p.tracef("<\n} ") + } +} + +type methodsByName []*types.Func + +func (x methodsByName) Len() int { return len(x) } +func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } + +func (p *exporter) fieldList(t *types.Struct) { + if trace && t.NumFields() > 0 { + p.tracef("fields {>\n") + defer p.tracef("<\n} ") + } + + p.int(t.NumFields()) + for i := 0; i < t.NumFields(); i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.field(t.Field(i)) + p.string(t.Tag(i)) + } +} + +func (p *exporter) field(f *types.Var) { + if !f.IsField() { + panic(internalError("field expected")) + } + + p.pos(f) + p.fieldName(f) + p.typ(f.Type()) +} + +func (p *exporter) iface(t *types.Interface) { + // TODO(gri): enable importer to load embedded interfaces, + // then emit Embeddeds and ExplicitMethods separately here. + p.int(0) + + n := t.NumMethods() + if trace && n > 0 { + p.tracef("methods {>\n") + defer p.tracef("<\n} ") + } + p.int(n) + for i := 0; i < n; i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.method(t.Method(i)) + } +} + +func (p *exporter) method(m *types.Func) { + sig := m.Type().(*types.Signature) + if sig.Recv() == nil { + panic(internalError("method expected")) + } + + p.pos(m) + p.string(m.Name()) + if m.Name() != "_" && !ast.IsExported(m.Name()) { + p.pkg(m.Pkg(), false) + } + + // interface method; no need to encode receiver. + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) +} + +func (p *exporter) fieldName(f *types.Var) { + name := f.Name() + + if f.Anonymous() { + // anonymous field - we distinguish between 3 cases: + // 1) field name matches base type name and is exported + // 2) field name matches base type name and is not exported + // 3) field name doesn't match base type name (alias name) + bname := basetypeName(f.Type()) + if name == bname { + if ast.IsExported(name) { + name = "" // 1) we don't need to know the field name or package + } else { + name = "?" // 2) use unexported name "?" to force package export + } + } else { + // 3) indicate alias and export name as is + // (this requires an extra "@" but this is a rare case) + p.string("@") + } + } + + p.string(name) + if name != "" && !ast.IsExported(name) { + p.pkg(f.Pkg(), false) + } +} + +func basetypeName(typ types.Type) string { + switch typ := deref(typ).(type) { + case *types.Basic: + return typ.Name() + case *types.Named: + return typ.Obj().Name() + default: + return "" // unnamed type + } +} + +func (p *exporter) paramList(params *types.Tuple, variadic bool) { + // use negative length to indicate unnamed parameters + // (look at the first parameter only since either all + // names are present or all are absent) + n := params.Len() + if n > 0 && params.At(0).Name() == "" { + n = -n + } + p.int(n) + for i := 0; i < params.Len(); i++ { + q := params.At(i) + t := q.Type() + if variadic && i == params.Len()-1 { + t = &dddSlice{t.(*types.Slice).Elem()} + } + p.typ(t) + if n > 0 { + name := q.Name() + p.string(name) + if name != "_" { + p.pkg(q.Pkg(), false) + } + } + p.string("") // no compiler-specific info + } +} + +func (p *exporter) value(x constant.Value) { + if trace { + p.tracef("= ") + } + + switch x.Kind() { + case constant.Bool: + tag := falseTag + if constant.BoolVal(x) { + tag = trueTag + } + p.tag(tag) + + case constant.Int: + if v, exact := constant.Int64Val(x); exact { + // common case: x fits into an int64 - use compact encoding + p.tag(int64Tag) + p.int64(v) + return + } + // uncommon case: large x - use float encoding + // (powers of 2 will be encoded efficiently with exponent) + p.tag(floatTag) + p.float(constant.ToFloat(x)) + + case constant.Float: + p.tag(floatTag) + p.float(x) + + case constant.Complex: + p.tag(complexTag) + p.float(constant.Real(x)) + p.float(constant.Imag(x)) + + case constant.String: + p.tag(stringTag) + p.string(constant.StringVal(x)) + + case constant.Unknown: + // package contains type errors + p.tag(unknownTag) + + default: + panic(internalErrorf("unexpected value %v (%T)", x, x)) + } +} + +func (p *exporter) float(x constant.Value) { + if x.Kind() != constant.Float { + panic(internalErrorf("unexpected constant %v, want float", x)) + } + // extract sign (there is no -0) + sign := constant.Sign(x) + if sign == 0 { + // x == 0 + p.int(0) + return + } + // x != 0 + + var f big.Float + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + r := valueToRat(num) + f.SetRat(r.Quo(r, valueToRat(denom))) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + f.SetFloat64(math.MaxFloat64) // FIXME + } + + // extract exponent such that 0.5 <= m < 1.0 + var m big.Float + exp := f.MantExp(&m) + + // extract mantissa as *big.Int + // - set exponent large enough so mant satisfies mant.IsInt() + // - get *big.Int from mant + m.SetMantExp(&m, int(m.MinPrec())) + mant, acc := m.Int(nil) + if acc != big.Exact { + panic(internalError("internal error")) + } + + p.int(sign) + p.int(exp) + p.string(string(mant.Bytes())) +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +func (p *exporter) bool(b bool) bool { + if trace { + p.tracef("[") + defer p.tracef("= %v] ", b) + } + + x := 0 + if b { + x = 1 + } + p.int(x) + return b +} + +// ---------------------------------------------------------------------------- +// Low-level encoders + +func (p *exporter) index(marker byte, index int) { + if index < 0 { + panic(internalError("invalid index < 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%c%d ", marker, index) + } + p.rawInt64(int64(index)) +} + +func (p *exporter) tag(tag int) { + if tag >= 0 { + panic(internalError("invalid tag >= 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%s ", tagString[-tag]) + } + p.rawInt64(int64(tag)) +} + +func (p *exporter) int(x int) { + p.int64(int64(x)) +} + +func (p *exporter) int64(x int64) { + if debugFormat { + p.marker('i') + } + if trace { + p.tracef("%d ", x) + } + p.rawInt64(x) +} + +func (p *exporter) string(s string) { + if debugFormat { + p.marker('s') + } + if trace { + p.tracef("%q ", s) + } + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } +} + +// marker emits a marker byte and position information which makes +// it easy for a reader to detect if it is "out of sync". Used for +// debugFormat format only. +func (p *exporter) marker(m byte) { + p.rawByte(m) + // Enable this for help tracking down the location + // of an incorrect marker when running in debugFormat. + if false && trace { + p.tracef("#%d ", p.written) + } + p.rawInt64(int64(p.written)) +} + +// rawInt64 should only be used by low-level encoders. +func (p *exporter) rawInt64(x int64) { + var tmp [binary.MaxVarintLen64]byte + n := binary.PutVarint(tmp[:], x) + for i := 0; i < n; i++ { + p.rawByte(tmp[i]) + } +} + +// rawStringln should only be used to emit the initial version string. +func (p *exporter) rawStringln(s string) { + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } + p.rawByte('\n') +} + +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that +// hides '$'): +// +// '$' => '|' 'S' +// '|' => '|' '|' +// +// Necessary so other tools can find the end of the +// export data by searching for "$$". +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { + switch b { + case '$': + // write '$' as '|' 'S' + b = 'S' + fallthrough + case '|': + // write '|' as '|' '|' + p.out.WriteByte('|') + p.written++ + } + p.out.WriteByte(b) + p.written++ +} + +// tracef is like fmt.Printf but it rewrites the format string +// to take care of indentation. +func (p *exporter) tracef(format string, args ...interface{}) { + if strings.ContainsAny(format, "<>\n") { + var buf bytes.Buffer + for i := 0; i < len(format); i++ { + // no need to deal with runes + ch := format[i] + switch ch { + case '>': + p.indent++ + continue + case '<': + p.indent-- + continue + } + buf.WriteByte(ch) + if ch == '\n' { + for j := p.indent; j > 0; j-- { + buf.WriteString(". ") + } + } + } + format = buf.String() + } + fmt.Printf(format, args...) +} + +// Debugging support. +// (tagString is only used when tracing is enabled) +var tagString = [...]string{ + // Packages + -packageTag: "package", + + // Types + -namedTag: "named type", + -arrayTag: "array", + -sliceTag: "slice", + -dddTag: "ddd", + -structTag: "struct", + -pointerTag: "pointer", + -signatureTag: "signature", + -interfaceTag: "interface", + -mapTag: "map", + -chanTag: "chan", + + // Values + -falseTag: "false", + -trueTag: "true", + -int64Tag: "int64", + -floatTag: "float", + -fractionTag: "fraction", + -complexTag: "complex", + -stringTag: "string", + -unknownTag: "unknown", + + // Type aliases + -aliasTag: "alias", +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go new file mode 100644 index 000000000..e9f73d14a --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go @@ -0,0 +1,1039 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type importer struct { + imports map[string]*types.Package + data []byte + importpath string + buf []byte // for reading strings + version int // export format version + + // object lists + strList []string // in order of appearance + pathList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance + interfaceList []*types.Interface // for delayed completion only + trackAllTypes bool + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + fake fakeFileSet + + // debugging support + debugFormat bool + read int // bytes read +} + +// BImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + // catch panics and return them as errors + const currentVersion = 6 + version := -1 // unknown version + defer func() { + if e := recover(); e != nil { + // Return a (possibly nil or incomplete) package unchanged (see #16088). + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + p := importer{ + imports: imports, + data: data, + importpath: path, + version: version, + strList: []string{""}, // empty string is mapped to 0 + pathList: []string{""}, // empty string is mapped to 0 + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + // read version info + var versionstr string + if b := p.rawByte(); b == 'c' || b == 'd' { + // Go1.7 encoding; first byte encodes low-level + // encoding format (compact vs debug). + // For backward-compatibility only (avoid problems with + // old installed packages). Newly compiled packages use + // the extensible format string. + // TODO(gri) Remove this support eventually; after Go1.8. + if b == 'd' { + p.debugFormat = true + } + p.trackAllTypes = p.rawByte() == 'a' + p.posInfoFormat = p.int() != 0 + versionstr = p.string() + if versionstr == "v1" { + version = 0 + } + } else { + // Go1.8 extensible encoding + // read version string and extract version number (ignore anything after the version number) + versionstr = p.rawStringln(b) + if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { + if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { + version = v + } + } + } + p.version = version + + // read version specific flags - extend as necessary + switch p.version { + // case currentVersion: + // ... + // fallthrough + case currentVersion, 5, 4, 3, 2, 1: + p.debugFormat = p.rawStringln(p.rawByte()) == "debug" + p.trackAllTypes = p.int() != 0 + p.posInfoFormat = p.int() != 0 + case 0: + // Go1.7 encoding format - nothing to do here + default: + errorf("unknown bexport format version %d (%q)", p.version, versionstr) + } + + // --- generic export data --- + + // populate typList with predeclared "known" types + p.typList = append(p.typList, predeclared()...) + + // read package data + pkg = p.pkg() + + // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) + objcount := 0 + for { + tag := p.tagOrIndex() + if tag == endTag { + break + } + p.obj(tag) + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + errorf("got %d objects; want %d", objcount, count) + } + + // ignore compiler-specific import data + + // complete interfaces + // TODO(gri) re-investigate if we still need to do this in a delayed fashion + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), p.pkgList[1:]...) + sort.Sort(byPath(list)) + pkg.SetImports(list) + + // package was imported completely and without errors + pkg.MarkComplete() + + return p.read, pkg, nil +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +func (p *importer) pkg() *types.Package { + // if the package was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.pkgList[i] + } + + // otherwise, i is the package tag (< 0) + if i != packageTag { + errorf("unexpected package tag %d version %d", i, p.version) + } + + // read package data + name := p.string() + var path string + if p.version >= 5 { + path = p.path() + } else { + path = p.string() + } + if p.version >= 6 { + p.int() // package height; unused by go/types + } + + // we should never see an empty package name + if name == "" { + errorf("empty package name in import") + } + + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + errorf("package path %q for pkg index %d", path, len(p.pkgList)) + } + + // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.importpath + } + pkg := p.imports[path] + if pkg == nil { + pkg = types.NewPackage(path, name) + p.imports[path] = pkg + } else if pkg.Name() != name { + errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) + } + p.pkgList = append(p.pkgList, pkg) + + return pkg +} + +// objTag returns the tag value for each object kind. +func objTag(obj types.Object) int { + switch obj.(type) { + case *types.Const: + return constTag + case *types.TypeName: + return typeTag + case *types.Var: + return varTag + case *types.Func: + return funcTag + default: + errorf("unexpected object: %v (%T)", obj, obj) // panics + panic("unreachable") + } +} + +func sameObj(a, b types.Object) bool { + // Because unnamed types are not canonicalized, we cannot simply compare types for + // (pointer) identity. + // Ideally we'd check equality of constant values as well, but this is good enough. + return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) +} + +func (p *importer) declare(obj types.Object) { + pkg := obj.Pkg() + if alt := pkg.Scope().Insert(obj); alt != nil { + // This can only trigger if we import a (non-type) object a second time. + // Excluding type aliases, this cannot happen because 1) we only import a package + // once; and b) we ignore compiler-specific export data which may contain + // functions whose inlined function bodies refer to other functions that + // were already imported. + // However, type aliases require reexporting the original type, so we need + // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, + // method importer.obj, switch case importing functions). + // TODO(gri) review/update this comment once the gc compiler handles type aliases. + if !sameObj(obj, alt) { + errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) + } + } +} + +func (p *importer) obj(tag int) { + switch tag { + case constTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + val := p.value() + p.declare(types.NewConst(pos, pkg, name, typ, val)) + + case aliasTag: + // TODO(gri) verify type alias hookup is correct + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewTypeName(pos, pkg, name, typ)) + + case typeTag: + p.typ(nil, nil) + + case varTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewVar(pos, pkg, name, typ)) + + case funcTag: + pos := p.pos() + pkg, name := p.qualifiedName() + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + p.declare(types.NewFunc(pos, pkg, name, sig)) + + default: + errorf("unexpected object tag %d", tag) + } +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +func (p *importer) pos() token.Pos { + if !p.posInfoFormat { + return token.NoPos + } + + file := p.prevFile + line := p.prevLine + delta := p.int() + line += delta + if p.version >= 5 { + if delta == deltaNewFile { + if n := p.int(); n >= 0 { + // file changed + file = p.path() + line = n + } + } + } else { + if delta == 0 { + if n := p.int(); n >= 0 { + // file changed + file = p.prevFile[:n] + p.string() + line = p.int() + } + } + } + p.prevFile = file + p.prevLine = line + + return p.fake.pos(file, line, 0) +} + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*token.File +} + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we + // reserve maxlines positions per file. + const maxlines = 64 * 1024 + f := s.files[file] + if f == nil { + f = s.fset.AddFile(file, -1, maxlines) + s.files[file] = f + // Allocate the fake linebreak indices on first use. + // TODO(adonovan): opt: save ~512KB using a more complex scheme? + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + f.SetLines(fakeLines) + } + + if line > maxlines { + line = 1 + } + + // Treat the file as if it contained only newlines + // and column=1: use the line number as the offset. + return f.Pos(line - 1) +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func (p *importer) qualifiedName() (pkg *types.Package, name string) { + name = p.string() + pkg = p.pkg() + return +} + +func (p *importer) record(t types.Type) { + p.typList = append(p.typList, t) +} + +// A dddSlice is a types.Type representing ...T parameters. +// It only appears for parameter types and does not escape +// the importer. +type dddSlice struct { + elem types.Type +} + +func (t *dddSlice) Underlying() types.Type { return t } +func (t *dddSlice) String() string { return "..." + t.elem.String() } + +// parent is the package which declared the type; parent == nil means +// the package currently imported. The parent package is needed for +// exported struct fields and interface methods which don't contain +// explicit package information in the export data. +// +// A non-nil tname is used as the "owner" of the result type; i.e., +// the result type is the underlying type of tname. tname is used +// to give interface methods a named receiver type where possible. +func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { + // if the type was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.typList[i] + } + + // otherwise, i is the type tag (< 0) + switch i { + case namedTag: + // read type object + pos := p.pos() + parent, name := p.qualifiedName() + scope := parent.Scope() + obj := scope.Lookup(name) + + // if the object doesn't exist yet, create and insert it + if obj == nil { + obj = types.NewTypeName(pos, parent, name, nil) + scope.Insert(obj) + } + + if _, ok := obj.(*types.TypeName); !ok { + errorf("pkg = %s, name = %s => %s", parent, name, obj) + } + + // associate new named type with obj if it doesn't exist yet + t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) + + // but record the existing type, if any + tname := obj.Type().(*types.Named) // tname is either t0 or the existing type + p.record(tname) + + // read underlying type + t0.SetUnderlying(p.typ(parent, t0)) + + // interfaces don't have associated methods + if types.IsInterface(t0) { + return tname + } + + // read associated methods + for i := p.int(); i > 0; i-- { + // TODO(gri) replace this with something closer to fieldName + pos := p.pos() + name := p.string() + if !exported(name) { + p.pkg() + } + + recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? + params, isddd := p.paramList() + result, _ := p.paramList() + p.int() // go:nointerface pragma - discarded + + sig := types.NewSignature(recv.At(0), params, result, isddd) + t0.AddMethod(types.NewFunc(pos, parent, name, sig)) + } + + return tname + + case arrayTag: + t := new(types.Array) + if p.trackAllTypes { + p.record(t) + } + + n := p.int64() + *t = *types.NewArray(p.typ(parent, nil), n) + return t + + case sliceTag: + t := new(types.Slice) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewSlice(p.typ(parent, nil)) + return t + + case dddTag: + t := new(dddSlice) + if p.trackAllTypes { + p.record(t) + } + + t.elem = p.typ(parent, nil) + return t + + case structTag: + t := new(types.Struct) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewStruct(p.fieldList(parent)) + return t + + case pointerTag: + t := new(types.Pointer) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewPointer(p.typ(parent, nil)) + return t + + case signatureTag: + t := new(types.Signature) + if p.trackAllTypes { + p.record(t) + } + + params, isddd := p.paramList() + result, _ := p.paramList() + *t = *types.NewSignature(nil, params, result, isddd) + return t + + case interfaceTag: + // Create a dummy entry in the type list. This is safe because we + // cannot expect the interface type to appear in a cycle, as any + // such cycle must contain a named type which would have been + // first defined earlier. + // TODO(gri) Is this still true now that we have type aliases? + // See issue #23225. + n := len(p.typList) + if p.trackAllTypes { + p.record(nil) + } + + var embeddeds []types.Type + for n := p.int(); n > 0; n-- { + p.pos() + embeddeds = append(embeddeds, p.typ(parent, nil)) + } + + t := newInterface(p.methodList(parent, tname), embeddeds) + p.interfaceList = append(p.interfaceList, t) + if p.trackAllTypes { + p.typList[n] = t + } + return t + + case mapTag: + t := new(types.Map) + if p.trackAllTypes { + p.record(t) + } + + key := p.typ(parent, nil) + val := p.typ(parent, nil) + *t = *types.NewMap(key, val) + return t + + case chanTag: + t := new(types.Chan) + if p.trackAllTypes { + p.record(t) + } + + dir := chanDir(p.int()) + val := p.typ(parent, nil) + *t = *types.NewChan(dir, val) + return t + + default: + errorf("unexpected type tag %d", i) // panics + panic("unreachable") + } +} + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i], tags[i] = p.field(parent) + } + } + return +} + +func (p *importer) field(parent *types.Package) (*types.Var, string) { + pos := p.pos() + pkg, name, alias := p.fieldName(parent) + typ := p.typ(parent, nil) + tag := p.string() + + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + errorf("named base type expected") + } + anonymous = true + } else if alias { + // anonymous field: we have an explicit name because it's an alias + anonymous = true + } + + return types.NewField(pos, pkg, name, typ, anonymous), tag +} + +func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent, baseType) + } + } + return +} + +func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { + pos := p.pos() + pkg, name, _ := p.fieldName(parent) + // If we don't have a baseType, use a nil receiver. + // A receiver using the actual interface type (which + // we don't know yet) will be filled in when we call + // types.Interface.Complete. + var recv *types.Var + if baseType != nil { + recv = types.NewVar(token.NoPos, parent, "", baseType) + } + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(recv, params, result, isddd) + return types.NewFunc(pos, pkg, name, sig) +} + +func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { + name = p.string() + pkg = parent + if pkg == nil { + // use the imported package instead + pkg = p.pkgList[0] + } + if p.version == 0 && name == "_" { + // version 0 didn't export a package for _ fields + return + } + switch name { + case "": + // 1) field name matches base type name and is exported: nothing to do + case "?": + // 2) field name matches base type name and is not exported: need package + name = "" + pkg = p.pkg() + case "@": + // 3) field name doesn't match type name (alias) + name = p.string() + alias = true + fallthrough + default: + if !exported(name) { + pkg = p.pkg() + } + } + return +} + +func (p *importer) paramList() (*types.Tuple, bool) { + n := p.int() + if n == 0 { + return nil, false + } + // negative length indicates unnamed parameters + named := true + if n < 0 { + n = -n + named = false + } + // n > 0 + params := make([]*types.Var, n) + isddd := false + for i := range params { + params[i], isddd = p.param(named) + } + return types.NewTuple(params...), isddd +} + +func (p *importer) param(named bool) (*types.Var, bool) { + t := p.typ(nil, nil) + td, isddd := t.(*dddSlice) + if isddd { + t = types.NewSlice(td.elem) + } + + var pkg *types.Package + var name string + if named { + name = p.string() + if name == "" { + errorf("expected named parameter") + } + if name != "_" { + pkg = p.pkg() + } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } + } + + // read and discard compiler-specific info + p.string() + + return types.NewVar(token.NoPos, pkg, name, t), isddd +} + +func exported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +func (p *importer) value() constant.Value { + switch tag := p.tagOrIndex(); tag { + case falseTag: + return constant.MakeBool(false) + case trueTag: + return constant.MakeBool(true) + case int64Tag: + return constant.MakeInt64(p.int64()) + case floatTag: + return p.float() + case complexTag: + re := p.float() + im := p.float() + return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + case stringTag: + return constant.MakeString(p.string()) + case unknownTag: + return constant.MakeUnknown() + default: + errorf("unexpected value tag %d", tag) // panics + panic("unreachable") + } +} + +func (p *importer) float() constant.Value { + sign := p.int() + if sign == 0 { + return constant.MakeInt64(0) + } + + exp := p.int() + mant := []byte(p.string()) // big endian + + // remove leading 0's if any + for len(mant) > 0 && mant[0] == 0 { + mant = mant[1:] + } + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { + mant[i], mant[j] = mant[j], mant[i] + } + + // adjust exponent (constant.MakeFromBytes creates an integer value, + // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) + exp -= len(mant) << 3 + if len(mant) > 0 { + for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { + exp++ + } + } + + x := constant.MakeFromBytes(mant) + switch { + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + } + + if sign < 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +// ---------------------------------------------------------------------------- +// Low-level decoders + +func (p *importer) tagOrIndex() int { + if p.debugFormat { + p.marker('t') + } + + return int(p.rawInt64()) +} + +func (p *importer) int() int { + x := p.int64() + if int64(int(x)) != x { + errorf("exported integer too large") + } + return int(x) +} + +func (p *importer) int64() int64 { + if p.debugFormat { + p.marker('i') + } + + return p.rawInt64() +} + +func (p *importer) path() string { + if p.debugFormat { + p.marker('p') + } + // if the path was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.pathList[i] + } + // otherwise, i is the negative path length (< 0) + a := make([]string, -i) + for n := range a { + a[n] = p.string() + } + s := strings.Join(a, "/") + p.pathList = append(p.pathList, s) + return s +} + +func (p *importer) string() string { + if p.debugFormat { + p.marker('s') + } + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] + } + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s +} + +func (p *importer) marker(want byte) { + if got := p.rawByte(); got != want { + errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) + } + + pos := p.read + if n := int(p.rawInt64()); n != pos { + errorf("incorrect position: got %d; want %d", n, pos) + } +} + +// rawInt64 should only be used by low-level decoders. +func (p *importer) rawInt64() int64 { + i, err := binary.ReadVarint(p) + if err != nil { + errorf("read error: %v", err) + } + return i +} + +// rawStringln should only be used to read the initial version string. +func (p *importer) rawStringln(b byte) string { + p.buf = p.buf[:0] + for b != '\n' { + p.buf = append(p.buf, b) + b = p.rawByte() + } + return string(p.buf) +} + +// needed for binary.ReadVarint in rawInt64 +func (p *importer) ReadByte() (byte, error) { + return p.rawByte(), nil +} + +// byte is the bottleneck interface for reading p.data. +// It unescapes '|' 'S' to '$' and '|' '|' to '|'. +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { + b := p.data[0] + r := 1 + if b == '|' { + b = p.data[1] + r = 2 + switch b { + case 'S': + b = '$' + case '|': + // nothing to do + default: + errorf("unexpected escape sequence in export data") + } + } + p.data = p.data[r:] + p.read += r + return b + +} + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag // only used by gc (appears in exported inlined function bodies) + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go new file mode 100644 index 000000000..f33dc5613 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + size, err = strconv.Atoi(s) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// +func FindExportData(r *bufio.Reader) (hdr string, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, _, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + hdr = string(line) + + return +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go new file mode 100644 index 000000000..e8cba6b23 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -0,0 +1,1078 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, +// but it also contains the original source-based importer code for Go1.6. +// Once we stop supporting 1.6, we can remove that code. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" + +import ( + "bufio" + "errors" + "fmt" + "go/build" + "go/constant" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "text/scanner" +) + +// debugging/development support +const debug = false + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +// +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + id = path // make sure we have an id to print in error message + return + } + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// ImportData imports a package by reading the gc-generated export data, +// adds the corresponding package object to the packages map indexed by id, +// and returns the object. +// +// The packages map must contains all packages already imported. The data +// reader position must be the beginning of the export data section. The +// filename is only used in error messages. +// +// If packages[id] contains the completely imported package, that package +// can be used directly, and there is no need to call this function (but +// there is also no harm but for extra time used). +// +func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { + // support for parser error handling + defer func() { + switch r := recover().(type) { + case nil: + // nothing to do + case importError: + err = r + default: + panic(r) // internal error + } + }() + + var p parser + p.init(filename, id, data, packages) + pkg = p.parseExport() + + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +// +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + buf := bufio.NewReader(rc) + if hdr, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$\n": + // Work-around if we don't have a filename; happens only if lookup != nil. + // Either way, the filename is only needed for importer error messages, so + // this is fine. + if filename == "" { + filename = path + } + return ImportData(packages, filename, id, buf) + + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err = IImportData(fset, packages, data[1:], id) + } else { + _, pkg, err = BImportData(fset, packages, data, id) + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +// ---------------------------------------------------------------------------- +// Parser + +// TODO(gri) Imported objects don't have position information. +// Ideally use the debug table line info; alternatively +// create some fake position (or the position of the +// import). That way error messages referring to imported +// objects can print meaningful information. + +// parser parses the exports inside a gc compiler-produced +// object/archive file and populates its scope with the results. +type parser struct { + scanner scanner.Scanner + tok rune // current token + lit string // literal string; only valid for Ident, Int, String tokens + id string // package id of imported package + sharedPkgs map[string]*types.Package // package id -> package object (across importer) + localPkgs map[string]*types.Package // package id -> package object (just this package) +} + +func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { + p.scanner.Init(src) + p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } + p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments + p.scanner.Whitespace = 1<<'\t' | 1<<' ' + p.scanner.Filename = filename // for good error messages + p.next() + p.id = id + p.sharedPkgs = packages + if debug { + // check consistency of packages map + for _, pkg := range packages { + if pkg.Name() == "" { + fmt.Printf("no package name for %s\n", pkg.Path()) + } + } + } +} + +func (p *parser) next() { + p.tok = p.scanner.Scan() + switch p.tok { + case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·': + p.lit = p.scanner.TokenText() + default: + p.lit = "" + } + if debug { + fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) + } +} + +func declTypeName(pkg *types.Package, name string) *types.TypeName { + scope := pkg.Scope() + if obj := scope.Lookup(name); obj != nil { + return obj.(*types.TypeName) + } + obj := types.NewTypeName(token.NoPos, pkg, name, nil) + // a named type may be referred to before the underlying type + // is known - set it up + types.NewNamed(obj, nil, nil) + scope.Insert(obj) + return obj +} + +// ---------------------------------------------------------------------------- +// Error handling + +// Internal errors are boxed as importErrors. +type importError struct { + pos scanner.Position + err error +} + +func (e importError) Error() string { + return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) +} + +func (p *parser) error(err interface{}) { + if s, ok := err.(string); ok { + err = errors.New(s) + } + // panic with a runtime.Error if err is not an error + panic(importError{p.scanner.Pos(), err.(error)}) +} + +func (p *parser) errorf(format string, args ...interface{}) { + p.error(fmt.Sprintf(format, args...)) +} + +func (p *parser) expect(tok rune) string { + lit := p.lit + if p.tok != tok { + p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) + } + p.next() + return lit +} + +func (p *parser) expectSpecial(tok string) { + sep := 'x' // not white space + i := 0 + for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + i++ + } + if i < len(tok) { + p.errorf("expected %q, got %q", tok, tok[0:i]) + } +} + +func (p *parser) expectKeyword(keyword string) { + lit := p.expect(scanner.Ident) + if lit != keyword { + p.errorf("expected keyword %s, got %q", keyword, lit) + } +} + +// ---------------------------------------------------------------------------- +// Qualified and unqualified names + +// PackageId = string_lit . +// +func (p *parser) parsePackageID() string { + id, err := strconv.Unquote(p.expect(scanner.String)) + if err != nil { + p.error(err) + } + // id == "" stands for the imported package id + // (only known at time of package installation) + if id == "" { + id = p.id + } + return id +} + +// PackageName = ident . +// +func (p *parser) parsePackageName() string { + return p.expect(scanner.Ident) +} + +// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . +func (p *parser) parseDotIdent() string { + ident := "" + if p.tok != scanner.Int { + sep := 'x' // not white space + for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' { + ident += p.lit + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + } + } + if ident == "" { + p.expect(scanner.Ident) // use expect() for error handling + } + return ident +} + +// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . +// +func (p *parser) parseQualifiedName() (id, name string) { + p.expect('@') + id = p.parsePackageID() + p.expect('.') + // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. + if p.tok == '?' { + p.next() + } else { + name = p.parseDotIdent() + } + return +} + +// getPkg returns the package for a given id. If the package is +// not found, create the package and add it to the p.localPkgs +// and p.sharedPkgs maps. name is the (expected) name of the +// package. If name == "", the package name is expected to be +// set later via an import clause in the export data. +// +// id identifies a package, usually by a canonical package path like +// "encoding/json" but possibly by a non-canonical import path like +// "./json". +// +func (p *parser) getPkg(id, name string) *types.Package { + // package unsafe is not in the packages maps - handle explicitly + if id == "unsafe" { + return types.Unsafe + } + + pkg := p.localPkgs[id] + if pkg == nil { + // first import of id from this package + pkg = p.sharedPkgs[id] + if pkg == nil { + // first import of id by this importer; + // add (possibly unnamed) pkg to shared packages + pkg = types.NewPackage(id, name) + p.sharedPkgs[id] = pkg + } + // add (possibly unnamed) pkg to local packages + if p.localPkgs == nil { + p.localPkgs = make(map[string]*types.Package) + } + p.localPkgs[id] = pkg + } else if name != "" { + // package exists already and we have an expected package name; + // make sure names match or set package name if necessary + if pname := pkg.Name(); pname == "" { + pkg.SetName(name) + } else if pname != name { + p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) + } + } + return pkg +} + +// parseExportedName is like parseQualifiedName, but +// the package id is resolved to an imported *types.Package. +// +func (p *parser) parseExportedName() (pkg *types.Package, name string) { + id, name := p.parseQualifiedName() + pkg = p.getPkg(id, "") + return +} + +// ---------------------------------------------------------------------------- +// Types + +// BasicType = identifier . +// +func (p *parser) parseBasicType() types.Type { + id := p.expect(scanner.Ident) + obj := types.Universe.Lookup(id) + if obj, ok := obj.(*types.TypeName); ok { + return obj.Type() + } + p.errorf("not a basic type: %s", id) + return nil +} + +// ArrayType = "[" int_lit "]" Type . +// +func (p *parser) parseArrayType(parent *types.Package) types.Type { + // "[" already consumed and lookahead known not to be "]" + lit := p.expect(scanner.Int) + p.expect(']') + elem := p.parseType(parent) + n, err := strconv.ParseInt(lit, 10, 64) + if err != nil { + p.error(err) + } + return types.NewArray(elem, n) +} + +// MapType = "map" "[" Type "]" Type . +// +func (p *parser) parseMapType(parent *types.Package) types.Type { + p.expectKeyword("map") + p.expect('[') + key := p.parseType(parent) + p.expect(']') + elem := p.parseType(parent) + return types.NewMap(key, elem) +} + +// Name = identifier | "?" | QualifiedName . +// +// For unqualified and anonymous names, the returned package is the parent +// package unless parent == nil, in which case the returned package is the +// package being imported. (The parent package is not nil if the name +// is an unqualified struct field or interface method name belonging to a +// type declared in another package.) +// +// For qualified names, the returned package is nil (and not created if +// it doesn't exist yet) unless materializePkg is set (which creates an +// unnamed package with valid package path). In the latter case, a +// subsequent import clause is expected to provide a name for the package. +// +func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { + pkg = parent + if pkg == nil { + pkg = p.sharedPkgs[p.id] + } + switch p.tok { + case scanner.Ident: + name = p.lit + p.next() + case '?': + // anonymous + p.next() + case '@': + // exported name prefixed with package path + pkg = nil + var id string + id, name = p.parseQualifiedName() + if materializePkg { + pkg = p.getPkg(id, "") + } + default: + p.error("name expected") + } + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +// Field = Name Type [ string_lit ] . +// +func (p *parser) parseField(parent *types.Package) (*types.Var, string) { + pkg, name := p.parseName(parent, true) + + if name == "_" { + // Blank fields should be package-qualified because they + // are unexported identifiers, but gc does not qualify them. + // Assuming that the ident belongs to the current package + // causes types to change during re-exporting, leading + // to spurious "can't assign A to B" errors from go/types. + // As a workaround, pretend all blank fields belong + // to the same unique dummy package. + const blankpkg = "<_>" + pkg = p.getPkg(blankpkg, blankpkg) + } + + typ := p.parseType(parent) + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + p.errorf("anonymous field expected") + } + anonymous = true + } + tag := "" + if p.tok == scanner.String { + s := p.expect(scanner.String) + var err error + tag, err = strconv.Unquote(s) + if err != nil { + p.errorf("invalid struct tag %s: %s", s, err) + } + } + return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag +} + +// StructType = "struct" "{" [ FieldList ] "}" . +// FieldList = Field { ";" Field } . +// +func (p *parser) parseStructType(parent *types.Package) types.Type { + var fields []*types.Var + var tags []string + + p.expectKeyword("struct") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + fld, tag := p.parseField(parent) + if tag != "" && tags == nil { + tags = make([]string, i) + } + if tags != nil { + tags = append(tags, tag) + } + fields = append(fields, fld) + } + p.expect('}') + + return types.NewStruct(fields, tags) +} + +// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . +// +func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { + _, name := p.parseName(nil, false) + // remove gc-specific parameter numbering + if i := strings.Index(name, "·"); i >= 0 { + name = name[:i] + } + if p.tok == '.' { + p.expectSpecial("...") + isVariadic = true + } + typ := p.parseType(nil) + if isVariadic { + typ = types.NewSlice(typ) + } + // ignore argument tag (e.g. "noescape") + if p.tok == scanner.String { + p.next() + } + // TODO(gri) should we provide a package? + par = types.NewVar(token.NoPos, nil, name, typ) + return +} + +// Parameters = "(" [ ParameterList ] ")" . +// ParameterList = { Parameter "," } Parameter . +// +func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { + p.expect('(') + for p.tok != ')' && p.tok != scanner.EOF { + if len(list) > 0 { + p.expect(',') + } + par, variadic := p.parseParameter() + list = append(list, par) + if variadic { + if isVariadic { + p.error("... not on final argument") + } + isVariadic = true + } + } + p.expect(')') + + return +} + +// Signature = Parameters [ Result ] . +// Result = Type | Parameters . +// +func (p *parser) parseSignature(recv *types.Var) *types.Signature { + params, isVariadic := p.parseParameters() + + // optional result type + var results []*types.Var + if p.tok == '(' { + var variadic bool + results, variadic = p.parseParameters() + if variadic { + p.error("... not permitted on result type") + } + } + + return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) +} + +// InterfaceType = "interface" "{" [ MethodList ] "}" . +// MethodList = Method { ";" Method } . +// Method = Name Signature . +// +// The methods of embedded interfaces are always "inlined" +// by the compiler and thus embedded interfaces are never +// visible in the export data. +// +func (p *parser) parseInterfaceType(parent *types.Package) types.Type { + var methods []*types.Func + + p.expectKeyword("interface") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + pkg, name := p.parseName(parent, true) + sig := p.parseSignature(nil) + methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) + } + p.expect('}') + + // Complete requires the type's embedded interfaces to be fully defined, + // but we do not define any + return newInterface(methods, nil).Complete() +} + +// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . +// +func (p *parser) parseChanType(parent *types.Package) types.Type { + dir := types.SendRecv + if p.tok == scanner.Ident { + p.expectKeyword("chan") + if p.tok == '<' { + p.expectSpecial("<-") + dir = types.SendOnly + } + } else { + p.expectSpecial("<-") + p.expectKeyword("chan") + dir = types.RecvOnly + } + elem := p.parseType(parent) + return types.NewChan(dir, elem) +} + +// Type = +// BasicType | TypeName | ArrayType | SliceType | StructType | +// PointerType | FuncType | InterfaceType | MapType | ChanType | +// "(" Type ")" . +// +// BasicType = ident . +// TypeName = ExportedName . +// SliceType = "[" "]" Type . +// PointerType = "*" Type . +// FuncType = "func" Signature . +// +func (p *parser) parseType(parent *types.Package) types.Type { + switch p.tok { + case scanner.Ident: + switch p.lit { + default: + return p.parseBasicType() + case "struct": + return p.parseStructType(parent) + case "func": + // FuncType + p.next() + return p.parseSignature(nil) + case "interface": + return p.parseInterfaceType(parent) + case "map": + return p.parseMapType(parent) + case "chan": + return p.parseChanType(parent) + } + case '@': + // TypeName + pkg, name := p.parseExportedName() + return declTypeName(pkg, name).Type() + case '[': + p.next() // look ahead + if p.tok == ']' { + // SliceType + p.next() + return types.NewSlice(p.parseType(parent)) + } + return p.parseArrayType(parent) + case '*': + // PointerType + p.next() + return types.NewPointer(p.parseType(parent)) + case '<': + return p.parseChanType(parent) + case '(': + // "(" Type ")" + p.next() + typ := p.parseType(parent) + p.expect(')') + return typ + } + p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) + return nil +} + +// ---------------------------------------------------------------------------- +// Declarations + +// ImportDecl = "import" PackageName PackageId . +// +func (p *parser) parseImportDecl() { + p.expectKeyword("import") + name := p.parsePackageName() + p.getPkg(p.parsePackageID(), name) +} + +// int_lit = [ "+" | "-" ] { "0" ... "9" } . +// +func (p *parser) parseInt() string { + s := "" + switch p.tok { + case '-': + s = "-" + p.next() + case '+': + p.next() + } + return s + p.expect(scanner.Int) +} + +// number = int_lit [ "p" int_lit ] . +// +func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { + // mantissa + mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) + if mant == nil { + panic("invalid mantissa") + } + + if p.lit == "p" { + // exponent (base 2) + p.next() + exp, err := strconv.ParseInt(p.parseInt(), 10, 0) + if err != nil { + p.error(err) + } + if exp < 0 { + denom := constant.MakeInt64(1) + denom = constant.Shift(denom, token.SHL, uint(-exp)) + typ = types.Typ[types.UntypedFloat] + val = constant.BinaryOp(mant, token.QUO, denom) + return + } + if exp > 0 { + mant = constant.Shift(mant, token.SHL, uint(exp)) + } + typ = types.Typ[types.UntypedFloat] + val = mant + return + } + + typ = types.Typ[types.UntypedInt] + val = mant + return +} + +// ConstDecl = "const" ExportedName [ Type ] "=" Literal . +// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . +// bool_lit = "true" | "false" . +// complex_lit = "(" float_lit "+" float_lit "i" ")" . +// rune_lit = "(" int_lit "+" int_lit ")" . +// string_lit = `"` { unicode_char } `"` . +// +func (p *parser) parseConstDecl() { + p.expectKeyword("const") + pkg, name := p.parseExportedName() + + var typ0 types.Type + if p.tok != '=' { + // constant types are never structured - no need for parent type + typ0 = p.parseType(nil) + } + + p.expect('=') + var typ types.Type + var val constant.Value + switch p.tok { + case scanner.Ident: + // bool_lit + if p.lit != "true" && p.lit != "false" { + p.error("expected true or false") + } + typ = types.Typ[types.UntypedBool] + val = constant.MakeBool(p.lit == "true") + p.next() + + case '-', scanner.Int: + // int_lit + typ, val = p.parseNumber() + + case '(': + // complex_lit or rune_lit + p.next() + if p.tok == scanner.Char { + p.next() + p.expect('+') + typ = types.Typ[types.UntypedRune] + _, val = p.parseNumber() + p.expect(')') + break + } + _, re := p.parseNumber() + p.expect('+') + _, im := p.parseNumber() + p.expectKeyword("i") + p.expect(')') + typ = types.Typ[types.UntypedComplex] + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + case scanner.Char: + // rune_lit + typ = types.Typ[types.UntypedRune] + val = constant.MakeFromLiteral(p.lit, token.CHAR, 0) + p.next() + + case scanner.String: + // string_lit + typ = types.Typ[types.UntypedString] + val = constant.MakeFromLiteral(p.lit, token.STRING, 0) + p.next() + + default: + p.errorf("expected literal got %s", scanner.TokenString(p.tok)) + } + + if typ0 == nil { + typ0 = typ + } + + pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) +} + +// TypeDecl = "type" ExportedName Type . +// +func (p *parser) parseTypeDecl() { + p.expectKeyword("type") + pkg, name := p.parseExportedName() + obj := declTypeName(pkg, name) + + // The type object may have been imported before and thus already + // have a type associated with it. We still need to parse the type + // structure, but throw it away if the object already has a type. + // This ensures that all imports refer to the same type object for + // a given type declaration. + typ := p.parseType(pkg) + + if name := obj.Type().(*types.Named); name.Underlying() == nil { + name.SetUnderlying(typ) + } +} + +// VarDecl = "var" ExportedName Type . +// +func (p *parser) parseVarDecl() { + p.expectKeyword("var") + pkg, name := p.parseExportedName() + typ := p.parseType(pkg) + pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) +} + +// Func = Signature [ Body ] . +// Body = "{" ... "}" . +// +func (p *parser) parseFunc(recv *types.Var) *types.Signature { + sig := p.parseSignature(recv) + if p.tok == '{' { + p.next() + for i := 1; i > 0; p.next() { + switch p.tok { + case '{': + i++ + case '}': + i-- + } + } + } + return sig +} + +// MethodDecl = "func" Receiver Name Func . +// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . +// +func (p *parser) parseMethodDecl() { + // "func" already consumed + p.expect('(') + recv, _ := p.parseParameter() // receiver + p.expect(')') + + // determine receiver base type object + base := deref(recv.Type()).(*types.Named) + + // parse method name, signature, and possibly inlined body + _, name := p.parseName(nil, false) + sig := p.parseFunc(recv) + + // methods always belong to the same package as the base type object + pkg := base.Obj().Pkg() + + // add method to type unless type was imported before + // and method exists already + // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. + base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) +} + +// FuncDecl = "func" ExportedName Func . +// +func (p *parser) parseFuncDecl() { + // "func" already consumed + pkg, name := p.parseExportedName() + typ := p.parseFunc(nil) + pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) +} + +// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . +// +func (p *parser) parseDecl() { + if p.tok == scanner.Ident { + switch p.lit { + case "import": + p.parseImportDecl() + case "const": + p.parseConstDecl() + case "type": + p.parseTypeDecl() + case "var": + p.parseVarDecl() + case "func": + p.next() // look ahead + if p.tok == '(' { + p.parseMethodDecl() + } else { + p.parseFuncDecl() + } + } + } + p.expect('\n') +} + +// ---------------------------------------------------------------------------- +// Export + +// Export = "PackageClause { Decl } "$$" . +// PackageClause = "package" PackageName [ "safe" ] "\n" . +// +func (p *parser) parseExport() *types.Package { + p.expectKeyword("package") + name := p.parsePackageName() + if p.tok == scanner.Ident && p.lit == "safe" { + // package was compiled with -u option - ignore + p.next() + } + p.expect('\n') + + pkg := p.getPkg(p.id, name) + + for p.tok != '$' && p.tok != scanner.EOF { + p.parseDecl() + } + + if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { + // don't call next()/expect() since reading past the + // export data may cause scanner errors (e.g. NUL chars) + p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) + } + + if n := p.scanner.ErrorCount; n != 0 { + p.errorf("expected no scanner errors, got %d", n) + } + + // Record all locally referenced packages as imports. + var imports []*types.Package + for id, pkg2 := range p.localPkgs { + if pkg2.Name() == "" { + p.errorf("%s package has no name", id) + } + if id == p.id { + continue // avoid self-edge + } + imports = append(imports, pkg2) + } + sort.Sort(byPath(imports)) + pkg.SetImports(imports) + + // package was imported completely and without errors + pkg.MarkComplete() + + return pkg +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go new file mode 100644 index 000000000..d2fc8b6fa --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -0,0 +1,781 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "go/ast" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" +) + +// Current indexed export format version. Increase with each format change. +// 0: Go1.11 encoding +const iexportVersion = 0 + +// Current bundled export format version. Increase with each format change. +// 0: initial implementation +const bundleVersion = 0 + +// IExportData writes indexed export data for pkg to out. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + return iexportCommon(out, fset, false, []*types.Package{pkg}) +} + +// IExportBundle writes an indexed export bundle for pkgs to out. +func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + return iexportCommon(out, fset, true, pkgs) +} + +func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, pkgs []*types.Package) (err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := iexporter{ + fset: fset, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + typIndex: map[types.Type]uint64{}, + } + if !bundle { + p.localpkg = pkgs[0] + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + for _, pkg := range pkgs { + scope := pkg.Scope() + for _, name := range scope.Names() { + if ast.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + if bundle { + // Ensure pkg and its imports are included in the index. + p.allPkgs[pkg] = true + for _, imp := range pkg.Imports() { + p.allPkgs[imp] = true + } + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + + if bundle { + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.pkg(pkg) + imps := pkg.Imports() + w.uint64(uint64(len(imps))) + for _, imp := range imps { + w.pkg(imp) + } + } + } + w.flush() + + // Assemble header. + var hdr intWriter + if bundle { + hdr.uint64(bundleVersion) + } + hdr.uint64(iexportVersion) + hdr.uint64(uint64(p.strings.Len())) + hdr.uint64(dataLen) + + // Flush output. + io.Copy(out, &hdr) + io.Copy(out, &p.strings) + io.Copy(out, &p.data0) + + return nil +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]types.Object{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + if w.p.localpkg != nil { + pkgObjs[w.p.localpkg] = nil + } + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].Name() < objs[j].Name() + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.Name()) + w.uint64(index[obj]) + } + } +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + + localpkg *types.Package + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + data0 intWriter + declIndex map[types.Object]uint64 + typIndex map[types.Type]uint64 +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + assert(obj.Pkg() != types.Unsafe) + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark n present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + w := p.newWriter() + w.setPkg(obj.Pkg(), false) + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + panic(internalErrorf("unexpected method: %v", sig)) + } + w.tag('F') + w.pos(obj.Pos()) + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + break + } + + // Defined type. + w.tag('T') + w.pos(obj.Pos()) + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + t := obj.Type() + if types.IsInterface(t) { + break + } + + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedIdent(obj types.Object) { + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + + w.string(obj.Name()) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + switch t := t.(type) { + case *types.Named: + w.startType(definedType) + w.qualifiedIdent(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.setPkg(pkg, true) + w.signature(t) + + case *types.Struct: + w.startType(structType) + w.setPkg(pkg, true) + + n := t.NumFields() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Field(i) + w.pos(f.Pos()) + w.string(f.Name()) + w.typ(f.Type(), pkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.setPkg(pkg, true) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Embedded(i) + w.pos(f.Obj().Pos()) + w.typ(f.Obj().Type(), f.Obj().Pkg()) + } + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +func (w *exportWriter) setPkg(pkg *types.Package, write bool) { + if write { + w.pkg(pkg) + } + + w.currPkg = pkg +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + w.bool(constant.BoolVal(v)) + case types.IsInteger: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case types.IsFloat: + f := constantToFloat(v) + w.mpfloat(f, typ) + case types.IsComplex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case types.IsString: + w.string(constant.StringVal(v)) + default: + if b.Kind() == types.Invalid { + // package contains type errors + break + } + panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + x = constant.ToFloat(x) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go new file mode 100644 index 000000000..8ed8bc62d --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -0,0 +1,676 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "sort" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType +) + +// IImportData imports a package from the serialized package data +// and returns 0 and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { + pkgs, err := iimportCommon(fset, imports, data, false, path) + if err != nil { + return 0, nil, err + } + return 0, pkgs[0], nil +} + +// IImportBundle imports a set of packages from the serialized package bundle. +func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { + return iimportCommon(fset, imports, data, true, "") +} + +func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) { + const currentVersion = 1 + version := int64(-1) + defer func() { + if e := recover(); e != nil { + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + r := &intReader{bytes.NewReader(data), path} + + if bundle { + bundleVersion := r.uint64() + switch bundleVersion { + case bundleVersion: + default: + errorf("unknown bundle format version %d", bundleVersion) + } + } + + version = int64(r.uint64()) + switch version { + case currentVersion, 0: + default: + errorf("unknown iexport format version %d", version) + } + + sLen := int64(r.uint64()) + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + declData := data[whence+sLen : whence+sLen+dLen] + r.Seek(sLen+dLen, io.SeekCurrent) + + p := iimporter{ + ipath: path, + version: int(version), + + stringData: stringData, + stringCache: make(map[uint64]string), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + pkgList := make([]*types.Package, r.uint64()) + for i := range pkgList { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + pkg := imports[pkgPath] + if pkg == nil { + pkg = types.NewPackage(pkgPath, pkgName) + imports[pkgPath] = pkg + } else if pkg.Name() != pkgName { + errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) + } + + p.pkgCache[pkgPathOff] = pkg + + nameIndex := make(map[string]uint64) + for nSyms := r.uint64(); nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + p.pkgIndex[pkg] = nameIndex + pkgList[i] = pkg + } + + if bundle { + pkgs = make([]*types.Package, r.uint64()) + for i := range pkgs { + pkg := p.pkgAt(r.uint64()) + imps := make([]*types.Package, r.uint64()) + for j := range imps { + imps[j] = p.pkgAt(r.uint64()) + } + pkg.SetImports(imps) + pkgs[i] = pkg + } + } else { + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + pkgs = pkgList[:1] + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + pkgs[0].SetImports(list) + } + + for _, pkg := range pkgs { + if pkg.Complete() { + continue + } + + names := make([]string, 0, len(p.pkgIndex[pkg])) + for name := range p.pkgIndex[pkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(pkg, name) + } + + // package was imported completely and without errors + pkg.MarkComplete() + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + return pkgs, nil +} + +type iimporter struct { + ipath string + version int + + stringData []byte + stringCache map[uint64]string + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + + fake fakeFileSet + interfaceList []*types.Interface +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if base == nil || !isInterface(t) { + p.typCache[off] = t + } + return t +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F': + sig := r.signature(nil) + + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + r.declare(obj) + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + msig := r.signature(recv) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + val = r.mpint(b) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(b *types.Basic) constant.Value { + signed, maxBytes := intSize(b) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + return constant.MakeInt64(v) + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + + buf := make([]byte, v) + io.ReadFull(&r.declReader, buf) + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { + buf[i], buf[j] = buf[j], buf[i] + } + + x := constant.MakeFromBytes(buf) + if signed && n&1 != 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +func (r *importReader) mpfloat(b *types.Basic) constant.Value { + x := r.mpint(b) + if constant.Sign(x) == 0 { + return x + } + + exp := r.int64() + switch { + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + // Ensure that the imported Kind is Float, else this constant may run into + // bitsize limits on overlarge integers. Eventually we can instead adopt + // the approach of CL 288632, but that CL relies on go/constant APIs that + // were introduced in go1.13. + // + // TODO(rFindley): sync the logic here with tip Go once we no longer + // support go1.12. + x = constant.ToFloat(x) + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + } + return x +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.version >= 1 { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) types.Type { + switch k := r.kind(); k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + + msig := r.signature(recv) + methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +func (r *importReader) signature(recv *types.Var) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types.NewSignature(recv, params, results, variadic) +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go new file mode 100644 index 000000000..8b163e3d0 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go new file mode 100644 index 000000000..49984f40f --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 000000000..18a002f82 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,49 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "context" + "fmt" + "go/types" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +var debug = false + +func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return nil, enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else { + return nil, friendlyErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return types.SizesFor(compiler, goarch), nil +} diff --git a/vendor/golang.org/x/tools/go/loader/doc.go b/vendor/golang.org/x/tools/go/loader/doc.go new file mode 100644 index 000000000..c5aa31c1a --- /dev/null +++ b/vendor/golang.org/x/tools/go/loader/doc.go @@ -0,0 +1,204 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loader loads a complete Go program from source code, parsing +// and type-checking the initial packages plus their transitive closure +// of dependencies. The ASTs and the derived facts are retained for +// later use. +// +// Deprecated: This is an older API and does not have support +// for modules. Use golang.org/x/tools/go/packages instead. +// +// The package defines two primary types: Config, which specifies a +// set of initial packages to load and various other options; and +// Program, which is the result of successfully loading the packages +// specified by a configuration. +// +// The configuration can be set directly, but *Config provides various +// convenience methods to simplify the common cases, each of which can +// be called any number of times. Finally, these are followed by a +// call to Load() to actually load and type-check the program. +// +// var conf loader.Config +// +// // Use the command-line arguments to specify +// // a set of initial packages to load from source. +// // See FromArgsUsage for help. +// rest, err := conf.FromArgs(os.Args[1:], wantTests) +// +// // Parse the specified files and create an ad hoc package with path "foo". +// // All files must have the same 'package' declaration. +// conf.CreateFromFilenames("foo", "foo.go", "bar.go") +// +// // Create an ad hoc package with path "foo" from +// // the specified already-parsed files. +// // All ASTs must have the same 'package' declaration. +// conf.CreateFromFiles("foo", parsedFiles) +// +// // Add "runtime" to the set of packages to be loaded. +// conf.Import("runtime") +// +// // Adds "fmt" and "fmt_test" to the set of packages +// // to be loaded. "fmt" will include *_test.go files. +// conf.ImportWithTests("fmt") +// +// // Finally, load all the packages specified by the configuration. +// prog, err := conf.Load() +// +// See examples_test.go for examples of API usage. +// +// +// CONCEPTS AND TERMINOLOGY +// +// The WORKSPACE is the set of packages accessible to the loader. The +// workspace is defined by Config.Build, a *build.Context. The +// default context treats subdirectories of $GOROOT and $GOPATH as +// packages, but this behavior may be overridden. +// +// An AD HOC package is one specified as a set of source files on the +// command line. In the simplest case, it may consist of a single file +// such as $GOROOT/src/net/http/triv.go. +// +// EXTERNAL TEST packages are those comprised of a set of *_test.go +// files all with the same 'package foo_test' declaration, all in the +// same directory. (go/build.Package calls these files XTestFiles.) +// +// An IMPORTABLE package is one that can be referred to by some import +// spec. Every importable package is uniquely identified by its +// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json", +// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path +// typically denotes a subdirectory of the workspace. +// +// An import declaration uses an IMPORT PATH to refer to a package. +// Most import declarations use the package path as the import path. +// +// Due to VENDORING (https://golang.org/s/go15vendor), the +// interpretation of an import path may depend on the directory in which +// it appears. To resolve an import path to a package path, go/build +// must search the enclosing directories for a subdirectory named +// "vendor". +// +// ad hoc packages and external test packages are NON-IMPORTABLE. The +// path of an ad hoc package is inferred from the package +// declarations of its files and is therefore not a unique package key. +// For example, Config.CreatePkgs may specify two initial ad hoc +// packages, both with path "main". +// +// An AUGMENTED package is an importable package P plus all the +// *_test.go files with same 'package foo' declaration as P. +// (go/build.Package calls these files TestFiles.) +// +// The INITIAL packages are those specified in the configuration. A +// DEPENDENCY is a package loaded to satisfy an import in an initial +// package or another dependency. +// +package loader + +// IMPLEMENTATION NOTES +// +// 'go test', in-package test files, and import cycles +// --------------------------------------------------- +// +// An external test package may depend upon members of the augmented +// package that are not in the unaugmented package, such as functions +// that expose internals. (See bufio/export_test.go for an example.) +// So, the loader must ensure that for each external test package +// it loads, it also augments the corresponding non-test package. +// +// The import graph over n unaugmented packages must be acyclic; the +// import graph over n-1 unaugmented packages plus one augmented +// package must also be acyclic. ('go test' relies on this.) But the +// import graph over n augmented packages may contain cycles. +// +// First, all the (unaugmented) non-test packages and their +// dependencies are imported in the usual way; the loader reports an +// error if it detects an import cycle. +// +// Then, each package P for which testing is desired is augmented by +// the list P' of its in-package test files, by calling +// (*types.Checker).Files. This arrangement ensures that P' may +// reference definitions within P, but P may not reference definitions +// within P'. Furthermore, P' may import any other package, including +// ones that depend upon P, without an import cycle error. +// +// Consider two packages A and B, both of which have lists of +// in-package test files we'll call A' and B', and which have the +// following import graph edges: +// B imports A +// B' imports A +// A' imports B +// This last edge would be expected to create an error were it not +// for the special type-checking discipline above. +// Cycles of size greater than two are possible. For example: +// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil" +// io/ioutil/tempfile_test.go (package ioutil) imports "regexp" +// regexp/exec_test.go (package regexp) imports "compress/bzip2" +// +// +// Concurrency +// ----------- +// +// Let us define the import dependency graph as follows. Each node is a +// list of files passed to (Checker).Files at once. Many of these lists +// are the production code of an importable Go package, so those nodes +// are labelled by the package's path. The remaining nodes are +// ad hoc packages and lists of in-package *_test.go files that augment +// an importable package; those nodes have no label. +// +// The edges of the graph represent import statements appearing within a +// file. An edge connects a node (a list of files) to the node it +// imports, which is importable and thus always labelled. +// +// Loading is controlled by this dependency graph. +// +// To reduce I/O latency, we start loading a package's dependencies +// asynchronously as soon as we've parsed its files and enumerated its +// imports (scanImports). This performs a preorder traversal of the +// import dependency graph. +// +// To exploit hardware parallelism, we type-check unrelated packages in +// parallel, where "unrelated" means not ordered by the partial order of +// the import dependency graph. +// +// We use a concurrency-safe non-blocking cache (importer.imported) to +// record the results of type-checking, whether success or failure. An +// entry is created in this cache by startLoad the first time the +// package is imported. The first goroutine to request an entry becomes +// responsible for completing the task and broadcasting completion to +// subsequent requestors, which block until then. +// +// Type checking occurs in (parallel) postorder: we cannot type-check a +// set of files until we have loaded and type-checked all of their +// immediate dependencies (and thus all of their transitive +// dependencies). If the input were guaranteed free of import cycles, +// this would be trivial: we could simply wait for completion of the +// dependencies and then invoke the typechecker. +// +// But as we saw in the 'go test' section above, some cycles in the +// import graph over packages are actually legal, so long as the +// cycle-forming edge originates in the in-package test files that +// augment the package. This explains why the nodes of the import +// dependency graph are not packages, but lists of files: the unlabelled +// nodes avoid the cycles. Consider packages A and B where B imports A +// and A's in-package tests AT import B. The naively constructed import +// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but +// the graph over lists of files is AT --> B --> A, where AT is an +// unlabelled node. +// +// Awaiting completion of the dependencies in a cyclic graph would +// deadlock, so we must materialize the import dependency graph (as +// importer.graph) and check whether each import edge forms a cycle. If +// x imports y, and the graph already contains a path from y to x, then +// there is an import cycle, in which case the processing of x must not +// wait for the completion of processing of y. +// +// When the type-checker makes a callback (doImport) to the loader for a +// given import edge, there are two possible cases. In the normal case, +// the dependency has already been completely type-checked; doImport +// does a cache lookup and returns it. In the cyclic case, the entry in +// the cache is still necessarily incomplete, indicating a cycle. We +// perform the cycle check again to obtain the error message, and return +// the error. +// +// The result of using concurrency is about a 2.5x speedup for stdlib_test. diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go new file mode 100644 index 000000000..508a1fd01 --- /dev/null +++ b/vendor/golang.org/x/tools/go/loader/loader.go @@ -0,0 +1,1078 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loader + +// See doc.go for package documentation and implementation notes. + +import ( + "errors" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "go/types" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/internal/cgo" +) + +var ignoreVendor build.ImportMode + +const trace = false // show timing info for type-checking + +// Config specifies the configuration for loading a whole program from +// Go source code. +// The zero value for Config is a ready-to-use default configuration. +type Config struct { + // Fset is the file set for the parser to use when loading the + // program. If nil, it may be lazily initialized by any + // method of Config. + Fset *token.FileSet + + // ParserMode specifies the mode to be used by the parser when + // loading source packages. + ParserMode parser.Mode + + // TypeChecker contains options relating to the type checker. + // + // The supplied IgnoreFuncBodies is not used; the effective + // value comes from the TypeCheckFuncBodies func below. + // The supplied Import function is not used either. + TypeChecker types.Config + + // TypeCheckFuncBodies is a predicate over package paths. + // A package for which the predicate is false will + // have its package-level declarations type checked, but not + // its function bodies; this can be used to quickly load + // dependencies from source. If nil, all func bodies are type + // checked. + TypeCheckFuncBodies func(path string) bool + + // If Build is non-nil, it is used to locate source packages. + // Otherwise &build.Default is used. + // + // By default, cgo is invoked to preprocess Go files that + // import the fake package "C". This behaviour can be + // disabled by setting CGO_ENABLED=0 in the environment prior + // to startup, or by setting Build.CgoEnabled=false. + Build *build.Context + + // The current directory, used for resolving relative package + // references such as "./go/loader". If empty, os.Getwd will be + // used instead. + Cwd string + + // If DisplayPath is non-nil, it is used to transform each + // file name obtained from Build.Import(). This can be used + // to prevent a virtualized build.Config's file names from + // leaking into the user interface. + DisplayPath func(path string) string + + // If AllowErrors is true, Load will return a Program even + // if some of the its packages contained I/O, parser or type + // errors; such errors are accessible via PackageInfo.Errors. If + // false, Load will fail if any package had an error. + AllowErrors bool + + // CreatePkgs specifies a list of non-importable initial + // packages to create. The resulting packages will appear in + // the corresponding elements of the Program.Created slice. + CreatePkgs []PkgSpec + + // ImportPkgs specifies a set of initial packages to load. + // The map keys are package paths. + // + // The map value indicates whether to load tests. If true, Load + // will add and type-check two lists of files to the package: + // non-test files followed by in-package *_test.go files. In + // addition, it will append the external test package (if any) + // to Program.Created. + ImportPkgs map[string]bool + + // FindPackage is called during Load to create the build.Package + // for a given import path from a given directory. + // If FindPackage is nil, (*build.Context).Import is used. + // A client may use this hook to adapt to a proprietary build + // system that does not follow the "go build" layout + // conventions, for example. + // + // It must be safe to call concurrently from multiple goroutines. + FindPackage func(ctxt *build.Context, importPath, fromDir string, mode build.ImportMode) (*build.Package, error) + + // AfterTypeCheck is called immediately after a list of files + // has been type-checked and appended to info.Files. + // + // This optional hook function is the earliest opportunity for + // the client to observe the output of the type checker, + // which may be useful to reduce analysis latency when loading + // a large program. + // + // The function is permitted to modify info.Info, for instance + // to clear data structures that are no longer needed, which can + // dramatically reduce peak memory consumption. + // + // The function may be called twice for the same PackageInfo: + // once for the files of the package and again for the + // in-package test files. + // + // It must be safe to call concurrently from multiple goroutines. + AfterTypeCheck func(info *PackageInfo, files []*ast.File) +} + +// A PkgSpec specifies a non-importable package to be created by Load. +// Files are processed first, but typically only one of Files and +// Filenames is provided. The path needn't be globally unique. +// +// For vendoring purposes, the package's directory is the one that +// contains the first file. +type PkgSpec struct { + Path string // package path ("" => use package declaration) + Files []*ast.File // ASTs of already-parsed files + Filenames []string // names of files to be parsed +} + +// A Program is a Go program loaded from source as specified by a Config. +type Program struct { + Fset *token.FileSet // the file set for this program + + // Created[i] contains the initial package whose ASTs or + // filenames were supplied by Config.CreatePkgs[i], followed by + // the external test package, if any, of each package in + // Config.ImportPkgs ordered by ImportPath. + // + // NOTE: these files must not import "C". Cgo preprocessing is + // only performed on imported packages, not ad hoc packages. + // + // TODO(adonovan): we need to copy and adapt the logic of + // goFilesPackage (from $GOROOT/src/cmd/go/build.go) and make + // Config.Import and Config.Create methods return the same kind + // of entity, essentially a build.Package. + // Perhaps we can even reuse that type directly. + Created []*PackageInfo + + // Imported contains the initially imported packages, + // as specified by Config.ImportPkgs. + Imported map[string]*PackageInfo + + // AllPackages contains the PackageInfo of every package + // encountered by Load: all initial packages and all + // dependencies, including incomplete ones. + AllPackages map[*types.Package]*PackageInfo + + // importMap is the canonical mapping of package paths to + // packages. It contains all Imported initial packages, but not + // Created ones, and all imported dependencies. + importMap map[string]*types.Package +} + +// PackageInfo holds the ASTs and facts derived by the type-checker +// for a single package. +// +// Not mutated once exposed via the API. +// +type PackageInfo struct { + Pkg *types.Package + Importable bool // true if 'import "Pkg.Path()"' would resolve to this + TransitivelyErrorFree bool // true if Pkg and all its dependencies are free of errors + Files []*ast.File // syntax trees for the package's files + Errors []error // non-nil if the package had errors + types.Info // type-checker deductions. + dir string // package directory + + checker *types.Checker // transient type-checker state + errorFunc func(error) +} + +func (info *PackageInfo) String() string { return info.Pkg.Path() } + +func (info *PackageInfo) appendError(err error) { + if info.errorFunc != nil { + info.errorFunc(err) + } else { + fmt.Fprintln(os.Stderr, err) + } + info.Errors = append(info.Errors, err) +} + +func (conf *Config) fset() *token.FileSet { + if conf.Fset == nil { + conf.Fset = token.NewFileSet() + } + return conf.Fset +} + +// ParseFile is a convenience function (intended for testing) that invokes +// the parser using the Config's FileSet, which is initialized if nil. +// +// src specifies the parser input as a string, []byte, or io.Reader, and +// filename is its apparent name. If src is nil, the contents of +// filename are read from the file system. +// +func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) { + // TODO(adonovan): use conf.build() etc like parseFiles does. + return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode) +} + +// FromArgsUsage is a partial usage message that applications calling +// FromArgs may wish to include in their -help output. +const FromArgsUsage = ` + is a list of arguments denoting a set of initial packages. +It may take one of two forms: + +1. A list of *.go source files. + + All of the specified files are loaded, parsed and type-checked + as a single package. All the files must belong to the same directory. + +2. A list of import paths, each denoting a package. + + The package's directory is found relative to the $GOROOT and + $GOPATH using similar logic to 'go build', and the *.go files in + that directory are loaded, parsed and type-checked as a single + package. + + In addition, all *_test.go files in the directory are then loaded + and parsed. Those files whose package declaration equals that of + the non-*_test.go files are included in the primary package. Test + files whose package declaration ends with "_test" are type-checked + as another package, the 'external' test package, so that a single + import path may denote two packages. (Whether this behaviour is + enabled is tool-specific, and may depend on additional flags.) + +A '--' argument terminates the list of packages. +` + +// FromArgs interprets args as a set of initial packages to load from +// source and updates the configuration. It returns the list of +// unconsumed arguments. +// +// It is intended for use in command-line interfaces that require a +// set of initial packages to be specified; see FromArgsUsage message +// for details. +// +// Only superficial errors are reported at this stage; errors dependent +// on I/O are detected during Load. +// +func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) { + var rest []string + for i, arg := range args { + if arg == "--" { + rest = args[i+1:] + args = args[:i] + break // consume "--" and return the remaining args + } + } + + if len(args) > 0 && strings.HasSuffix(args[0], ".go") { + // Assume args is a list of a *.go files + // denoting a single ad hoc package. + for _, arg := range args { + if !strings.HasSuffix(arg, ".go") { + return nil, fmt.Errorf("named files must be .go files: %s", arg) + } + } + conf.CreateFromFilenames("", args...) + } else { + // Assume args are directories each denoting a + // package and (perhaps) an external test, iff xtest. + for _, arg := range args { + if xtest { + conf.ImportWithTests(arg) + } else { + conf.Import(arg) + } + } + } + + return rest, nil +} + +// CreateFromFilenames is a convenience function that adds +// a conf.CreatePkgs entry to create a package of the specified *.go +// files. +// +func (conf *Config) CreateFromFilenames(path string, filenames ...string) { + conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames}) +} + +// CreateFromFiles is a convenience function that adds a conf.CreatePkgs +// entry to create package of the specified path and parsed files. +// +func (conf *Config) CreateFromFiles(path string, files ...*ast.File) { + conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files}) +} + +// ImportWithTests is a convenience function that adds path to +// ImportPkgs, the set of initial source packages located relative to +// $GOPATH. The package will be augmented by any *_test.go files in +// its directory that contain a "package x" (not "package x_test") +// declaration. +// +// In addition, if any *_test.go files contain a "package x_test" +// declaration, an additional package comprising just those files will +// be added to CreatePkgs. +// +func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) } + +// Import is a convenience function that adds path to ImportPkgs, the +// set of initial packages that will be imported from source. +// +func (conf *Config) Import(path string) { conf.addImport(path, false) } + +func (conf *Config) addImport(path string, tests bool) { + if path == "C" { + return // ignore; not a real package + } + if conf.ImportPkgs == nil { + conf.ImportPkgs = make(map[string]bool) + } + conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests +} + +// PathEnclosingInterval returns the PackageInfo and ast.Node that +// contain source interval [start, end), and all the node's ancestors +// up to the AST root. It searches all ast.Files of all packages in prog. +// exact is defined as for astutil.PathEnclosingInterval. +// +// The zero value is returned if not found. +// +func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) { + for _, info := range prog.AllPackages { + for _, f := range info.Files { + if f.Pos() == token.NoPos { + // This can happen if the parser saw + // too many errors and bailed out. + // (Use parser.AllErrors to prevent that.) + continue + } + if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) { + continue + } + if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil { + return info, path, exact + } + } + } + return nil, nil, false +} + +// InitialPackages returns a new slice containing the set of initial +// packages (Created + Imported) in unspecified order. +// +func (prog *Program) InitialPackages() []*PackageInfo { + infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported)) + infos = append(infos, prog.Created...) + for _, info := range prog.Imported { + infos = append(infos, info) + } + return infos +} + +// Package returns the ASTs and results of type checking for the +// specified package. +func (prog *Program) Package(path string) *PackageInfo { + if info, ok := prog.AllPackages[prog.importMap[path]]; ok { + return info + } + for _, info := range prog.Created { + if path == info.Pkg.Path() { + return info + } + } + return nil +} + +// ---------- Implementation ---------- + +// importer holds the working state of the algorithm. +type importer struct { + conf *Config // the client configuration + start time.Time // for logging + + progMu sync.Mutex // guards prog + prog *Program // the resulting program + + // findpkg is a memoization of FindPackage. + findpkgMu sync.Mutex // guards findpkg + findpkg map[findpkgKey]*findpkgValue + + importedMu sync.Mutex // guards imported + imported map[string]*importInfo // all imported packages (incl. failures) by import path + + // import dependency graph: graph[x][y] => x imports y + // + // Since non-importable packages cannot be cyclic, we ignore + // their imports, thus we only need the subgraph over importable + // packages. Nodes are identified by their import paths. + graphMu sync.Mutex + graph map[string]map[string]bool +} + +type findpkgKey struct { + importPath string + fromDir string + mode build.ImportMode +} + +type findpkgValue struct { + ready chan struct{} // closed to broadcast readiness + bp *build.Package + err error +} + +// importInfo tracks the success or failure of a single import. +// +// Upon completion, exactly one of info and err is non-nil: +// info on successful creation of a package, err otherwise. +// A successful package may still contain type errors. +// +type importInfo struct { + path string // import path + info *PackageInfo // results of typechecking (including errors) + complete chan struct{} // closed to broadcast that info is set. +} + +// awaitCompletion blocks until ii is complete, +// i.e. the info field is safe to inspect. +func (ii *importInfo) awaitCompletion() { + <-ii.complete // wait for close +} + +// Complete marks ii as complete. +// Its info and err fields will not be subsequently updated. +func (ii *importInfo) Complete(info *PackageInfo) { + if info == nil { + panic("info == nil") + } + ii.info = info + close(ii.complete) +} + +type importError struct { + path string // import path + err error // reason for failure to create a package +} + +// Load creates the initial packages specified by conf.{Create,Import}Pkgs, +// loading their dependencies packages as needed. +// +// On success, Load returns a Program containing a PackageInfo for +// each package. On failure, it returns an error. +// +// If AllowErrors is true, Load will return a Program even if some +// packages contained I/O, parser or type errors, or if dependencies +// were missing. (Such errors are accessible via PackageInfo.Errors. If +// false, Load will fail if any package had an error. +// +// It is an error if no packages were loaded. +// +func (conf *Config) Load() (*Program, error) { + // Create a simple default error handler for parse/type errors. + if conf.TypeChecker.Error == nil { + conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) } + } + + // Set default working directory for relative package references. + if conf.Cwd == "" { + var err error + conf.Cwd, err = os.Getwd() + if err != nil { + return nil, err + } + } + + // Install default FindPackage hook using go/build logic. + if conf.FindPackage == nil { + conf.FindPackage = (*build.Context).Import + } + + prog := &Program{ + Fset: conf.fset(), + Imported: make(map[string]*PackageInfo), + importMap: make(map[string]*types.Package), + AllPackages: make(map[*types.Package]*PackageInfo), + } + + imp := importer{ + conf: conf, + prog: prog, + findpkg: make(map[findpkgKey]*findpkgValue), + imported: make(map[string]*importInfo), + start: time.Now(), + graph: make(map[string]map[string]bool), + } + + // -- loading proper (concurrent phase) -------------------------------- + + var errpkgs []string // packages that contained errors + + // Load the initially imported packages and their dependencies, + // in parallel. + // No vendor check on packages imported from the command line. + infos, importErrors := imp.importAll("", conf.Cwd, conf.ImportPkgs, ignoreVendor) + for _, ie := range importErrors { + conf.TypeChecker.Error(ie.err) // failed to create package + errpkgs = append(errpkgs, ie.path) + } + for _, info := range infos { + prog.Imported[info.Pkg.Path()] = info + } + + // Augment the designated initial packages by their tests. + // Dependencies are loaded in parallel. + var xtestPkgs []*build.Package + for importPath, augment := range conf.ImportPkgs { + if !augment { + continue + } + + // No vendor check on packages imported from command line. + bp, err := imp.findPackage(importPath, conf.Cwd, ignoreVendor) + if err != nil { + // Package not found, or can't even parse package declaration. + // Already reported by previous loop; ignore it. + continue + } + + // Needs external test package? + if len(bp.XTestGoFiles) > 0 { + xtestPkgs = append(xtestPkgs, bp) + } + + // Consult the cache using the canonical package path. + path := bp.ImportPath + imp.importedMu.Lock() // (unnecessary, we're sequential here) + ii, ok := imp.imported[path] + // Paranoid checks added due to issue #11012. + if !ok { + // Unreachable. + // The previous loop called importAll and thus + // startLoad for each path in ImportPkgs, which + // populates imp.imported[path] with a non-zero value. + panic(fmt.Sprintf("imported[%q] not found", path)) + } + if ii == nil { + // Unreachable. + // The ii values in this loop are the same as in + // the previous loop, which enforced the invariant + // that at least one of ii.err and ii.info is non-nil. + panic(fmt.Sprintf("imported[%q] == nil", path)) + } + if ii.info == nil { + // Unreachable. + // awaitCompletion has the postcondition + // ii.info != nil. + panic(fmt.Sprintf("imported[%q].info = nil", path)) + } + info := ii.info + imp.importedMu.Unlock() + + // Parse the in-package test files. + files, errs := imp.conf.parsePackageFiles(bp, 't') + for _, err := range errs { + info.appendError(err) + } + + // The test files augmenting package P cannot be imported, + // but may import packages that import P, + // so we must disable the cycle check. + imp.addFiles(info, files, false) + } + + createPkg := func(path, dir string, files []*ast.File, errs []error) { + info := imp.newPackageInfo(path, dir) + for _, err := range errs { + info.appendError(err) + } + + // Ad hoc packages are non-importable, + // so no cycle check is needed. + // addFiles loads dependencies in parallel. + imp.addFiles(info, files, false) + prog.Created = append(prog.Created, info) + } + + // Create packages specified by conf.CreatePkgs. + for _, cp := range conf.CreatePkgs { + files, errs := parseFiles(conf.fset(), conf.build(), nil, conf.Cwd, cp.Filenames, conf.ParserMode) + files = append(files, cp.Files...) + + path := cp.Path + if path == "" { + if len(files) > 0 { + path = files[0].Name.Name + } else { + path = "(unnamed)" + } + } + + dir := conf.Cwd + if len(files) > 0 && files[0].Pos().IsValid() { + dir = filepath.Dir(conf.fset().File(files[0].Pos()).Name()) + } + createPkg(path, dir, files, errs) + } + + // Create external test packages. + sort.Sort(byImportPath(xtestPkgs)) + for _, bp := range xtestPkgs { + files, errs := imp.conf.parsePackageFiles(bp, 'x') + createPkg(bp.ImportPath+"_test", bp.Dir, files, errs) + } + + // -- finishing up (sequential) ---------------------------------------- + + if len(prog.Imported)+len(prog.Created) == 0 { + return nil, errors.New("no initial packages were loaded") + } + + // Create infos for indirectly imported packages. + // e.g. incomplete packages without syntax, loaded from export data. + for _, obj := range prog.importMap { + info := prog.AllPackages[obj] + if info == nil { + prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true} + } else { + // finished + info.checker = nil + info.errorFunc = nil + } + } + + if !conf.AllowErrors { + // Report errors in indirectly imported packages. + for _, info := range prog.AllPackages { + if len(info.Errors) > 0 { + errpkgs = append(errpkgs, info.Pkg.Path()) + } + } + if errpkgs != nil { + var more string + if len(errpkgs) > 3 { + more = fmt.Sprintf(" and %d more", len(errpkgs)-3) + errpkgs = errpkgs[:3] + } + return nil, fmt.Errorf("couldn't load packages due to errors: %s%s", + strings.Join(errpkgs, ", "), more) + } + } + + markErrorFreePackages(prog.AllPackages) + + return prog, nil +} + +type byImportPath []*build.Package + +func (b byImportPath) Len() int { return len(b) } +func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath } +func (b byImportPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +// markErrorFreePackages sets the TransitivelyErrorFree flag on all +// applicable packages. +func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) { + // Build the transpose of the import graph. + importedBy := make(map[*types.Package]map[*types.Package]bool) + for P := range allPackages { + for _, Q := range P.Imports() { + clients, ok := importedBy[Q] + if !ok { + clients = make(map[*types.Package]bool) + importedBy[Q] = clients + } + clients[P] = true + } + } + + // Find all packages reachable from some error package. + reachable := make(map[*types.Package]bool) + var visit func(*types.Package) + visit = func(p *types.Package) { + if !reachable[p] { + reachable[p] = true + for q := range importedBy[p] { + visit(q) + } + } + } + for _, info := range allPackages { + if len(info.Errors) > 0 { + visit(info.Pkg) + } + } + + // Mark the others as "transitively error-free". + for _, info := range allPackages { + if !reachable[info.Pkg] { + info.TransitivelyErrorFree = true + } + } +} + +// build returns the effective build context. +func (conf *Config) build() *build.Context { + if conf.Build != nil { + return conf.Build + } + return &build.Default +} + +// parsePackageFiles enumerates the files belonging to package path, +// then loads, parses and returns them, plus a list of I/O or parse +// errors that were encountered. +// +// 'which' indicates which files to include: +// 'g': include non-test *.go source files (GoFiles + processed CgoFiles) +// 't': include in-package *_test.go source files (TestGoFiles) +// 'x': include external *_test.go source files. (XTestGoFiles) +// +func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) { + if bp.ImportPath == "unsafe" { + return nil, nil + } + var filenames []string + switch which { + case 'g': + filenames = bp.GoFiles + case 't': + filenames = bp.TestGoFiles + case 'x': + filenames = bp.XTestGoFiles + default: + panic(which) + } + + files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode) + + // Preprocess CgoFiles and parse the outputs (sequentially). + if which == 'g' && bp.CgoFiles != nil { + cgofiles, err := cgo.ProcessFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode) + if err != nil { + errs = append(errs, err) + } else { + files = append(files, cgofiles...) + } + } + + return files, errs +} + +// doImport imports the package denoted by path. +// It implements the types.Importer signature. +// +// It returns an error if a package could not be created +// (e.g. go/build or parse error), but type errors are reported via +// the types.Config.Error callback (the first of which is also saved +// in the package's PackageInfo). +// +// Idempotent. +// +func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) { + if to == "C" { + // This should be unreachable, but ad hoc packages are + // not currently subject to cgo preprocessing. + // See https://golang.org/issue/11627. + return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`, + from.Pkg.Path()) + } + + bp, err := imp.findPackage(to, from.dir, 0) + if err != nil { + return nil, err + } + + // The standard unsafe package is handled specially, + // and has no PackageInfo. + if bp.ImportPath == "unsafe" { + return types.Unsafe, nil + } + + // Look for the package in the cache using its canonical path. + path := bp.ImportPath + imp.importedMu.Lock() + ii := imp.imported[path] + imp.importedMu.Unlock() + if ii == nil { + panic("internal error: unexpected import: " + path) + } + if ii.info != nil { + return ii.info.Pkg, nil + } + + // Import of incomplete package: this indicates a cycle. + fromPath := from.Pkg.Path() + if cycle := imp.findPath(path, fromPath); cycle != nil { + // Normalize cycle: start from alphabetically largest node. + pos, start := -1, "" + for i, s := range cycle { + if pos < 0 || s > start { + pos, start = i, s + } + } + cycle = append(cycle, cycle[:pos]...)[pos:] // rotate cycle to start from largest + cycle = append(cycle, cycle[0]) // add start node to end to show cycliness + return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> ")) + } + + panic("internal error: import of incomplete (yet acyclic) package: " + fromPath) +} + +// findPackage locates the package denoted by the importPath in the +// specified directory. +func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMode) (*build.Package, error) { + // We use a non-blocking duplicate-suppressing cache (gopl.io §9.7) + // to avoid holding the lock around FindPackage. + key := findpkgKey{importPath, fromDir, mode} + imp.findpkgMu.Lock() + v, ok := imp.findpkg[key] + if ok { + // cache hit + imp.findpkgMu.Unlock() + + <-v.ready // wait for entry to become ready + } else { + // Cache miss: this goroutine becomes responsible for + // populating the map entry and broadcasting its readiness. + v = &findpkgValue{ready: make(chan struct{})} + imp.findpkg[key] = v + imp.findpkgMu.Unlock() + + ioLimit <- true + v.bp, v.err = imp.conf.FindPackage(imp.conf.build(), importPath, fromDir, mode) + <-ioLimit + + if _, ok := v.err.(*build.NoGoError); ok { + v.err = nil // empty directory is not an error + } + + close(v.ready) // broadcast ready condition + } + return v.bp, v.err +} + +// importAll loads, parses, and type-checks the specified packages in +// parallel and returns their completed importInfos in unspecified order. +// +// fromPath is the package path of the importing package, if it is +// importable, "" otherwise. It is used for cycle detection. +// +// fromDir is the directory containing the import declaration that +// caused these imports. +// +func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) { + if fromPath != "" { + // We're loading a set of imports. + // + // We must record graph edges from the importing package + // to its dependencies, and check for cycles. + imp.graphMu.Lock() + deps, ok := imp.graph[fromPath] + if !ok { + deps = make(map[string]bool) + imp.graph[fromPath] = deps + } + for importPath := range imports { + deps[importPath] = true + } + imp.graphMu.Unlock() + } + + var pending []*importInfo + for importPath := range imports { + if fromPath != "" { + if cycle := imp.findPath(importPath, fromPath); cycle != nil { + // Cycle-forming import: we must not check it + // since it would deadlock. + if trace { + fmt.Fprintf(os.Stderr, "import cycle: %q\n", cycle) + } + continue + } + } + bp, err := imp.findPackage(importPath, fromDir, mode) + if err != nil { + errors = append(errors, importError{ + path: importPath, + err: err, + }) + continue + } + pending = append(pending, imp.startLoad(bp)) + } + + for _, ii := range pending { + ii.awaitCompletion() + infos = append(infos, ii.info) + } + + return infos, errors +} + +// findPath returns an arbitrary path from 'from' to 'to' in the import +// graph, or nil if there was none. +func (imp *importer) findPath(from, to string) []string { + imp.graphMu.Lock() + defer imp.graphMu.Unlock() + + seen := make(map[string]bool) + var search func(stack []string, importPath string) []string + search = func(stack []string, importPath string) []string { + if !seen[importPath] { + seen[importPath] = true + stack = append(stack, importPath) + if importPath == to { + return stack + } + for x := range imp.graph[importPath] { + if p := search(stack, x); p != nil { + return p + } + } + } + return nil + } + return search(make([]string, 0, 20), from) +} + +// startLoad initiates the loading, parsing and type-checking of the +// specified package and its dependencies, if it has not already begun. +// +// It returns an importInfo, not necessarily in a completed state. The +// caller must call awaitCompletion() before accessing its info field. +// +// startLoad is concurrency-safe and idempotent. +// +func (imp *importer) startLoad(bp *build.Package) *importInfo { + path := bp.ImportPath + imp.importedMu.Lock() + ii, ok := imp.imported[path] + if !ok { + ii = &importInfo{path: path, complete: make(chan struct{})} + imp.imported[path] = ii + go func() { + info := imp.load(bp) + ii.Complete(info) + }() + } + imp.importedMu.Unlock() + + return ii +} + +// load implements package loading by parsing Go source files +// located by go/build. +func (imp *importer) load(bp *build.Package) *PackageInfo { + info := imp.newPackageInfo(bp.ImportPath, bp.Dir) + info.Importable = true + files, errs := imp.conf.parsePackageFiles(bp, 'g') + for _, err := range errs { + info.appendError(err) + } + + imp.addFiles(info, files, true) + + imp.progMu.Lock() + imp.prog.importMap[bp.ImportPath] = info.Pkg + imp.progMu.Unlock() + + return info +} + +// addFiles adds and type-checks the specified files to info, loading +// their dependencies if needed. The order of files determines the +// package initialization order. It may be called multiple times on the +// same package. Errors are appended to the info.Errors field. +// +// cycleCheck determines whether the imports within files create +// dependency edges that should be checked for potential cycles. +// +func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) { + // Ensure the dependencies are loaded, in parallel. + var fromPath string + if cycleCheck { + fromPath = info.Pkg.Path() + } + // TODO(adonovan): opt: make the caller do scanImports. + // Callers with a build.Package can skip it. + imp.importAll(fromPath, info.dir, scanImports(files), 0) + + if trace { + fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n", + time.Since(imp.start), info.Pkg.Path(), len(files)) + } + + // Don't call checker.Files on Unsafe, even with zero files, + // because it would mutate the package, which is a global. + if info.Pkg == types.Unsafe { + if len(files) > 0 { + panic(`"unsafe" package contains unexpected files`) + } + } else { + // Ignore the returned (first) error since we + // already collect them all in the PackageInfo. + info.checker.Files(files) + info.Files = append(info.Files, files...) + } + + if imp.conf.AfterTypeCheck != nil { + imp.conf.AfterTypeCheck(info, files) + } + + if trace { + fmt.Fprintf(os.Stderr, "%s: stop %q\n", + time.Since(imp.start), info.Pkg.Path()) + } +} + +func (imp *importer) newPackageInfo(path, dir string) *PackageInfo { + var pkg *types.Package + if path == "unsafe" { + pkg = types.Unsafe + } else { + pkg = types.NewPackage(path, "") + } + info := &PackageInfo{ + Pkg: pkg, + Info: types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + }, + errorFunc: imp.conf.TypeChecker.Error, + dir: dir, + } + + // Copy the types.Config so we can vary it across PackageInfos. + tc := imp.conf.TypeChecker + tc.IgnoreFuncBodies = false + if f := imp.conf.TypeCheckFuncBodies; f != nil { + tc.IgnoreFuncBodies = !f(path) + } + tc.Importer = closure{imp, info} + tc.Error = info.appendError // appendError wraps the user's Error function + + info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info) + imp.progMu.Lock() + imp.prog.AllPackages[pkg] = info + imp.progMu.Unlock() + return info +} + +type closure struct { + imp *importer + info *PackageInfo +} + +func (c closure) Import(to string) (*types.Package, error) { return c.imp.doImport(c.info, to) } diff --git a/vendor/golang.org/x/tools/go/loader/util.go b/vendor/golang.org/x/tools/go/loader/util.go new file mode 100644 index 000000000..7f38dd740 --- /dev/null +++ b/vendor/golang.org/x/tools/go/loader/util.go @@ -0,0 +1,124 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loader + +import ( + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "os" + "strconv" + "sync" + + "golang.org/x/tools/go/buildutil" +) + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 10) + +// parseFiles parses the Go source files within directory dir and +// returns the ASTs of the ones that could be at least partially parsed, +// along with a list of I/O and parse errors encountered. +// +// I/O is done via ctxt, which may specify a virtual file system. +// displayPath is used to transform the filenames attached to the ASTs. +// +func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) { + if displayPath == nil { + displayPath = func(path string) string { return path } + } + var wg sync.WaitGroup + n := len(files) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range files { + if !buildutil.IsAbsPath(ctxt, file) { + file = buildutil.JoinPath(ctxt, dir, file) + } + wg.Add(1) + go func(i int, file string) { + ioLimit <- true // wait + defer func() { + wg.Done() + <-ioLimit // signal + }() + var rd io.ReadCloser + var err error + if ctxt.OpenFile != nil { + rd, err = ctxt.OpenFile(file) + } else { + rd, err = os.Open(file) + } + if err != nil { + errors[i] = err // open failed + return + } + + // ParseFile may return both an AST and an error. + parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode) + rd.Close() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// scanImports returns the set of all import paths from all +// import specs in the specified files. +func scanImports(files []*ast.File) map[string]bool { + imports := make(map[string]bool) + for _, f := range files { + for _, decl := range f.Decls { + if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT { + for _, spec := range decl.Specs { + spec := spec.(*ast.ImportSpec) + + // NB: do not assume the program is well-formed! + path, err := strconv.Unquote(spec.Path.Value) + if err != nil { + continue // quietly ignore the error + } + if path == "C" { + continue // skip pseudopackage + } + imports[path] = true + } + } + } + } + return imports +} + +// ---------- Internal helpers ---------- + +// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos) +func tokenFileContainsPos(f *token.File, pos token.Pos) bool { + p := int(pos) + base := f.Base() + return base <= p && p < base+f.Size() +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 000000000..4bfe28a51 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,221 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypeInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. + +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 000000000..7242a0a7d --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,101 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + exec "golang.org/x/sys/execabs" + "os" + "strings" +) + +// The Driver Protocol +// +// The driver, given the inputs to a call to Load, returns metadata about the packages specified. +// This allows for different build systems to support go/packages by telling go/packages how the +// packages' source is organized. +// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in +// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package +// documentation in doc.go for the full description of the patterns that need to be supported. +// A driver receives as a JSON-serialized driverRequest struct in standard input and will +// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. + +// driverRequest is used to provide the portion of Load's Config that is needed by a driver. +type driverRequest struct { + Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents + // of overlay files. + Overlay map[string][]byte `json:"overlay"` +} + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + req, err := json.Marshal(driverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) + } + + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 000000000..0e1e7f11f --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1099 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/types" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + + exec "golang.org/x/sys/execabs" + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/xerrors" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a driverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *driverResponse +} + +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &driverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a driverResponse. +func (r *responseDeduper) addAll(dr *driverResponse) { + for _, pkg := range dr.Packages { + r.addPackage(pkg) + } + for _, root := range dr.Roots { + r.addRoot(root) + } +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + goVersionOnce sync.Once + goVersionError error + goVersion int // The X in Go 1.X. + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool +} + +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError +} + +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() + if err != nil { + panic(fmt.Sprintf("mustGetEnv: %v", err)) + } + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, + } + + // Fill in response.Sizes asynchronously if necessary. + var sizeserr error + var sizeswg sync.WaitGroup + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + sizeswg.Add(1) + go func() { + var sizes types.Sizes + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + // types.SizesFor always returns nil or a *types.StdSizes. + response.dr.Sizes, _ = sizes.(*types.StdSizes) + sizeswg.Done() + }() + } + + // Determine files requested in contains patterns + var containFiles []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := state.createDriverResponse(restPatterns...) + if err != nil { + return nil, err + } + response.addAll(dr) + } + + if len(containFiles) != 0 { + if err := state.runContainsQueries(response, containFiles); err != nil { + return nil, err + } + } + + // Only use go/packages' overlay processing if we're using a Go version + // below 1.16. Otherwise, go list handles it. + if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { + modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return nil, err + } + + var containsCandidates []string + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg, ok := response.seenPackages[id] + if !ok { + response.addPackage(&Package{ + ID: id, + Errors: []Error{{ + Kind: ListError, + Msg: fmt.Sprintf("package %s expected but not seen", id), + }}, + }) + continue + } + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } + } + } + } + } + // Add root for any package that matches a pattern. This applies only to + // packages that are modified by overlays, since they are not added as + // roots automatically. + for _, pattern := range restPatterns { + match := matchPattern(pattern) + for _, pkgID := range modifiedPkgs { + pkg, ok := response.seenPackages[pkgID] + if !ok { + continue + } + if match(pkg.PkgPath) { + response.addRoot(pkg.ID) + } + } + } + } + + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } + return response.dr, nil +} + +func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { + if len(pkgs) == 0 { + return nil + } + dr, err := state.createDriverResponse(pkgs...) + if err != nil { + return err + } + for _, pkg := range dr.Packages { + response.addPackage(pkg) + } + _, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return err + } + return state.addNeededOverlayPackages(response, needPkgs) +} + +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or the package is returned + // with errors, try to load the file as an ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { + var queryErr error + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { + return err // return the original error + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { + response, err := state.createDriverResponse(query) + if err != nil { + return nil, err + } + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), + }) + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + } + return response, nil +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + IgnoredGoFiles []string + IgnoredOtherFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + Module *Module + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *packagesinternal.PackageError + DepsErrors []*packagesinternal.PackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + buf, err := state.invokeGo("list", golistargs(state.cfg, words)...) + if err != nil { + return nil, err + } + seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) + // Decode the JSON and convert it to Package form. + var response driverResponse + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue + } + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 1 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] + if importingPkg == old.ImportPath { + // Using an older version of Go which put this package itself on top of import + // stack, instead of the importer. Look for importer in second from top + // position. + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) + } + importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] + } + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), + forTest: p.ForTest, + depsErrors: p.DepsErrors, + Module: p.Module, + } + + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if len(p.CompiledGoFiles) > len(p.GoFiles) { + // We need the cgo definitions, which are in the first + // CompiledGoFile after the non-cgo ones. This is a hack but there + // isn't currently a better way to find it. We also need the pure + // Go files and unprocessed cgo files, all of which are already + // in pkg.GoFiles. + cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] + pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) + } else { + // golang/go#38990: go list silently fails to do cgo processing + pkg.CompiledGoFiles = nil + pkg.Errors = append(pkg.Errors, Error{ + Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.", + Kind: ListError, + }) + } + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Filter out any elements of CompiledGoFiles that are also in OtherFiles. + // We have to keep this workaround in place until go1.12 is a distant memory. + if len(pkg.OtherFiles) > 0 { + other := make(map[string]bool, len(pkg.OtherFiles)) + for _, f := range pkg.OtherFiles { + other[f] = true + } + + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if other[f] { + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.GoFiles = nil // ignore fake unsafe.go file + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + if len(pkg.CompiledGoFiles) == 0 { + pkg.CompiledGoFiles = pkg.GoFiles + } + + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + + if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, + }) + } + + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { + response.Packages = append(response.Packages, pkg) + } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) + + return &response, nil +} + +func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { + if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { + return false + } + + goV, err := state.getGoVersion() + if err != nil { + return false + } + + // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. + // The import stack behaves differently for these versions than newer Go versions. + if goV < 15 { + return len(p.Error.ImportStack) == 0 + } + + // On Go 1.15 and later, only parse filenames out of error if there's no import stack, + // or the current package is at the top of the import stack. This is not guaranteed + // to work perfectly, but should avoid some cases where files in errors don't belong to this + // package. + return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath +} + +func (state *golistState) getGoVersion() (int, error) { + state.goVersionOnce.Do(func() { + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + }) + return state.goVersion, state.goVersionError +} + +// getPkgPath finds the package path of a directory if it's relative to a root +// directory. +func (state *golistState) getPkgPath(dir string) (string, bool, error) { + absDir, err := filepath.Abs(dir) + if err != nil { + return "", false, err + } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { + // Make sure that the directory is in the module, + // to avoid creating a path relative to another module. + if !strings.HasPrefix(absDir, rdir) { + continue + } + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only one root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true, nil + } + return filepath.ToSlash(r), true, nil + } + return "", false, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func golistargs(cfg *Config, words []string) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), + } + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// cfgInvocation returns an Invocation that reflects cfg's settings. +func (state *golistState) cfgInvocation() gocommand.Invocation { + cfg := state.cfg + return gocommand.Invocation{ + BuildFlags: cfg.BuildFlags, + ModFile: cfg.modFile, + ModFlag: cfg.modFlag, + CleanEnv: cfg.Env != nil, + Env: cfg.Env, + Logf: cfg.Logf, + WorkingDir: cfg.Dir, + } +} + +// invokeGo returns the stdout of a go command invocation. +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := state.cfgInvocation() + + // For Go versions 1.16 and above, `go list` accepts overlays directly via + // the -overlay flag. Set it, if it's available. + // + // The check for "list" is not necessarily required, but we should avoid + // getting the go version if possible. + if verb == "list" { + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + if goVersion >= 16 { + filename, cleanup, err := state.writeOverlays() + if err != nil { + return nil, err + } + defer cleanup() + inv.Overlay = filename + } + } + inv.Verb = verb + inv.Args = args + gocmdRunner := cfg.gocmdRunner + if gocmdRunner == nil { + gocmdRunner = &gocommand.Runner{} + } + stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + if err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, xerrors.Errorf("couldn't run 'go': %w", err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, friendlyErr + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) + } + // golang/go#36770: Handle case where cmd/go prints module download messages before the error. + msg := stderr.String() + for strings.HasPrefix(msg, "go: downloading") { + msg = msg[strings.IndexRune(msg, '\n')+1:] + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + msg := msg[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Another variation of the previous error + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // Don't clobber stdout if `go list` actually returned something. + if len(stdout.String()) > 0 { + return stdout, nil + } + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, friendlyErr + } + } + return stdout, nil +} + +// OverlayJSON is the format overlay files are expected to be in. +// The Replace map maps from overlaid paths to replacement paths: +// the Go command will forward all reads trying to open +// each overlaid path to its replacement path, or consider the overlaid +// path not to exist if the replacement path is empty. +// +// From golang/go#39958. +type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` +} + +// writeOverlays writes out files for go list's -overlay flag, as described +// above. +func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(state.cfg.Overlay) == 0 { + return "", func() {}, nil + } + dir, err := ioutil.TempDir("", "gopackages-*") + if err != nil { + return "", nil, err + } + // The caller must clean up this directory, unless this function returns an + // error. + cleanup = func() { + os.RemoveAll(dir) + } + defer func() { + if err != nil { + cleanup() + } + }() + overlays := map[string]string{} + for k, v := range state.cfg.Overlay { + // Create a unique filename for the overlaid files, to avoid + // creating nested directories. + noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") + f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + if err != nil { + return "", func() {}, err + } + if _, err := f.Write(v); err != nil { + return "", func() {}, err + } + if err := f.Close(); err != nil { + return "", func() {}, err + } + overlays[k] = f.Name() + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", func() {}, err + } + // Write out the overlay file that contains the filepath mappings. + filename = filepath.Join(dir, "overlay.json") + if err := ioutil.WriteFile(filename, b, 0665); err != nil { + return "", func() {}, err + } + return filename, cleanup, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + k, v := split[0], split[1] + env[k] = v + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 000000000..9576b472f --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,575 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +// processGolistOverlay provides rudimentary support for adding +// files that don't exist on disk to an overlay. The results can be +// sometimes incorrect. +// TODO(matloob): Handle unsupported cases, including the following: +// - determining the correct package to add given a new import path +func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { + havePkgs := make(map[string]string) // importPath -> non-test package ID + needPkgsSet := make(map[string]bool) + modifiedPkgsSet := make(map[string]bool) + + pkgOfDir := make(map[string][]*Package) + for _, pkg := range response.dr.Packages { + // This is an approximation of import path to id. This can be + // wrong for tests, vendored packages, and a number of other cases. + havePkgs[pkg.PkgPath] = pkg.ID + dir, err := commonDir(pkg.GoFiles) + if err != nil { + return nil, nil, err + } + if dir != "" { + pkgOfDir[dir] = append(pkgOfDir[dir], pkg) + } + } + + // If no new imports are added, it is safe to avoid loading any needPkgs. + // Otherwise, it's hard to tell which package is actually being loaded + // (due to vendoring) and whether any modified package will show up + // in the transitive set of dependencies (because new imports are added, + // potentially modifying the transitive set of dependencies). + var overlayAddsImports bool + + // If both a package and its test package are created by the overlay, we + // need the real package first. Process all non-test files before test + // files, and make the whole process deterministic while we're at it. + var overlayFiles []string + for opath := range state.cfg.Overlay { + overlayFiles = append(overlayFiles, opath) + } + sort.Slice(overlayFiles, func(i, j int) bool { + iTest := strings.HasSuffix(overlayFiles[i], "_test.go") + jTest := strings.HasSuffix(overlayFiles[j], "_test.go") + if iTest != jTest { + return !iTest // non-tests are before tests. + } + return overlayFiles[i] < overlayFiles[j] + }) + for _, opath := range overlayFiles { + contents := state.cfg.Overlay[opath] + base := filepath.Base(opath) + dir := filepath.Dir(opath) + var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant + var testVariantOf *Package // if opath is a test file, this is the package it is testing + var fileExists bool + isTestFile := strings.HasSuffix(opath, "_test.go") + pkgName, ok := extractPackageName(opath, contents) + if !ok { + // Don't bother adding a file that doesn't even have a parsable package statement + // to the overlay. + continue + } + // If all the overlay files belong to a different package, change the + // package name to that package. + maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) + nextPackage: + for _, p := range response.dr.Packages { + if pkgName != p.Name && p.ID != "command-line-arguments" { + continue + } + for _, f := range p.GoFiles { + if !sameFile(filepath.Dir(f), dir) { + continue + } + // Make sure to capture information on the package's test variant, if needed. + if isTestFile && !hasTestFiles(p) { + // TODO(matloob): Are there packages other than the 'production' variant + // of a package that this can match? This shouldn't match the test main package + // because the file is generated in another directory. + testVariantOf = p + continue nextPackage + } else if !isTestFile && hasTestFiles(p) { + // We're examining a test variant, but the overlaid file is + // a non-test file. Because the overlay implementation + // (currently) only adds a file to one package, skip this + // package, so that we can add the file to the production + // variant of the package. (https://golang.org/issue/36857 + // tracks handling overlays on both the production and test + // variant of a package). + continue nextPackage + } + if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { + // We have already seen the production version of the + // for which p is a test variant. + if hasTestFiles(p) { + testVariantOf = pkg + } + } + pkg = p + if filepath.Base(f) == base { + fileExists = true + } + } + } + // The overlay could have included an entirely new package or an + // ad-hoc package. An ad-hoc package is one that we have manually + // constructed from inadequate `go list` results for a file= query. + // It will have the ID command-line-arguments. + if pkg == nil || pkg.ID == "command-line-arguments" { + // Try to find the module or gopath dir the file is contained in. + // Then for modules, add the module opath to the beginning. + pkgPath, ok, err := state.getPkgPath(dir) + if err != nil { + return nil, nil, err + } + if !ok { + break + } + var forTest string // only set for x tests + isXTest := strings.HasSuffix(pkgName, "_test") + if isXTest { + forTest = pkgPath + pkgPath += "_test" + } + id := pkgPath + if isTestFile { + if isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) + } else { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) + } + } + if pkg != nil { + // TODO(rstambler): We should change the package's path and ID + // here. The only issue is that this messes with the roots. + } else { + // Try to reclaim a package with the same ID, if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package. + if pkg == nil { + pkg = &Package{ + PkgPath: pkgPath, + ID: id, + Name: pkgName, + Imports: make(map[string]*Package), + } + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } + } + if isXTest { + pkg.forTest = forTest + } + } + } + } + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, opath) + // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior + // if the file will be ignored due to its build tags. + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) + modifiedPkgsSet[pkg.ID] = true + } + imports, err := extractImports(opath, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue + } + for _, imp := range imports { + // TODO(rstambler): If the package is an x test and the import has + // a test variant, make sure to replace it. + if _, found := pkg.Imports[imp]; found { + continue + } + overlayAddsImports = true + id, ok := havePkgs[imp] + if !ok { + var err error + id, err = state.resolveImport(dir, imp) + if err != nil { + return nil, nil, err + } + } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as well. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } + } + } + + // toPkgPath guesses the package path given the id. + toPkgPath := func(sourceDir, id string) (string, error) { + if i := strings.IndexByte(id, ' '); i >= 0 { + return state.resolveImport(sourceDir, id[:i]) + } + return state.resolveImport(sourceDir, id) + } + + // Now that new packages have been created, do another pass to determine + // the new set of missing packages. + for _, pkg := range response.dr.Packages { + for _, imp := range pkg.Imports { + if len(pkg.GoFiles) == 0 { + return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) + } + pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) + if err != nil { + return nil, nil, err + } + if _, ok := havePkgs[pkgPath]; !ok { + needPkgsSet[pkgPath] = true + } + } + } + + if overlayAddsImports { + needPkgs = make([]string, 0, len(needPkgsSet)) + for pkg := range needPkgsSet { + needPkgs = append(needPkgs, pkg) + } + } + modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) + for pkg := range modifiedPkgsSet { + modifiedPkgs = append(modifiedPkgs, pkg) + } + return modifiedPkgs, needPkgs, err +} + +// resolveImport finds the ID of a package given its import path. +// In particular, it will find the right vendored copy when in GOPATH mode. +func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { + env, err := state.getEnv() + if err != nil { + return "", err + } + if env["GOMOD"] != "" { + return importPath, nil + } + + searchDir := sourceDir + for { + vendorDir := filepath.Join(searchDir, "vendor") + exists, ok := state.vendorDirs[vendorDir] + if !ok { + info, err := os.Stat(vendorDir) + exists = err == nil && info.IsDir() + state.vendorDirs[vendorDir] = exists + } + + if exists { + vendoredPath := filepath.Join(vendorDir, importPath) + if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { + // We should probably check for .go files here, but shame on anyone who fools us. + path, ok, err := state.getPkgPath(vendoredPath) + if err != nil { + return "", err + } + if ok { + return path, nil + } + } + } + + // We know we've hit the top of the filesystem when we Dir / and get /, + // or C:\ and get C:\, etc. + next := filepath.Dir(searchDir) + if next == searchDir { + break + } + searchDir = next + } + return importPath, nil +} + +func hasTestFiles(p *Package) bool { + for _, f := range p.GoFiles { + if strings.HasSuffix(f, "_test.go") { + return true + } + } + return false +} + +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() + if err != nil { + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + // List all of the modules--the first will be the directory for the main + // module. Any replaced modules will also need to be treated as roots. + // Editing files in the module cache isn't a great idea, so we don't + // plan to ever support that. + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + // 'go list all' will fail if we're outside of a module and + // GO111MODULE=on. Try falling back without 'all'. + var innerErr error + out, innerErr = state.invokeGo("list", "-m", "-json") + if innerErr != nil { + return nil, err + } + } + roots := map[string]string{} + modules := map[string]string{} + var i int + for dec := json.NewDecoder(out); dec.More(); { + mod := new(gocommand.ModuleJSON) + if err := dec.Decode(mod); err != nil { + return nil, err + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + modules[absDir] = mod.Path + // The first result is the main module. + if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { + roots[absDir] = mod.Path + } + } + i++ + } + return roots, nil +} + +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { + m := map[string]string{} + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" + } + return m, nil +} + +func extractImports(filename string, contents []byte) ([]string, error) { + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? + if err != nil { + return nil, err + } + var res []string + for _, imp := range f.Imports { + quotedPath := imp.Path.Value + path, err := strconv.Unquote(quotedPath) + if err != nil { + return nil, err + } + res = append(res, path) + } + return res, nil +} + +// reclaimPackage attempts to reuse a package that failed to load in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if pkg.ID != id { + return false + } + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + pkgName, ok := extractPackageName(filename, contents) + if !ok { + return false + } + pkg.Name = pkgName + pkg.Errors = nil + return true +} + +func extractPackageName(filename string, contents []byte) (string, bool) { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return "", false + } + return f.Name.Name, true +} + +// commonDir returns the directory that all files are in, "" if files is empty, +// or an error if they aren't in the same directory. +func commonDir(files []string) (string, error) { + seen := make(map[string]bool) + for _, f := range files { + seen[filepath.Dir(f)] = true + } + if len(seen) > 1 { + return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) + } + for k := range seen { + // seen has only one element; return it. + return k, nil + } + return "", nil // no files +} + +// It is possible that the files in the disk directory dir have a different package +// name from newName, which is deduced from the overlays. If they all have a different +// package name, and they all have the same package name, then that name becomes +// the package name. +// It returns true if it changes the package name, false otherwise. +func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { + names := make(map[string]int) + for _, p := range pkgsOfDir { + names[p.Name]++ + } + if len(names) != 1 { + // some files are in different packages + return + } + var oldName string + for k := range names { + oldName = k + } + if newName == oldName { + return + } + // We might have a case where all of the package names in the directory are + // the same, but the overlay file is for an x test, which belongs to its + // own package. If the x test does not yet exist on disk, we may not yet + // have its package name on disk, but we should not rename the packages. + // + // We use a heuristic to determine if this file belongs to an x test: + // The test file should have a package name whose package name has a _test + // suffix or looks like "newName_test". + maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") + if isTestFile && maybeXTest { + return + } + for _, p := range pkgsOfDir { + p.Name = newName + } +} + +// This function is copy-pasted from +// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. +// It should be deleted when we remove support for overlays from go/packages. +// +// NOTE: This does not handle any ./... or ./ style queries, as this function +// doesn't know the working directory. +// +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +// Unfortunately, there are two special cases. Quoting "go help packages": +// +// First, /... at the end of the pattern can match an empty string, +// so that net/... matches both net and packages in its subdirectories, like net/http. +// Second, any slash-separated pattern element containing a wildcard never +// participates in a match of the "vendor" element in the path of a vendored +// package, so that ./... does not match packages in subdirectories of +// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. +// Note, however, that a directory named vendor that itself contains code +// is not a vendored package: cmd/vendor would be a command named vendor, +// and the pattern cmd/... matches it. +func matchPattern(pattern string) func(name string) bool { + // Convert pattern to regular expression. + // The strategy for the trailing /... is to nest it in an explicit ? expression. + // The strategy for the vendor exclusion is to change the unmatchable + // vendor strings to a disallowed code point (vendorChar) and to use + // "(anything but that codepoint)*" as the implementation of the ... wildcard. + // This is a bit complicated but the obvious alternative, + // namely a hand-written search like in most shell glob matchers, + // is too easy to make accidentally exponential. + // Using package regexp guarantees linear-time matching. + + const vendorChar = "\x00" + + if strings.Contains(pattern, vendorChar) { + return func(name string) bool { return false } + } + + re := regexp.QuoteMeta(pattern) + re = replaceVendor(re, vendorChar) + switch { + case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): + re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` + case re == vendorChar+`/\.\.\.`: + re = `(/vendor|/` + vendorChar + `/\.\.\.)` + case strings.HasSuffix(re, `/\.\.\.`): + re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` + } + re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) + + reg := regexp.MustCompile(`^` + re + `$`) + + return func(name string) bool { + if strings.Contains(name, vendorChar) { + return false + } + return reg.MatchString(replaceVendor(name, vendorChar)) + } +} + +// replaceVendor returns the result of replacing +// non-trailing vendor path elements in x with repl. +func replaceVendor(x, repl string) string { + if !strings.Contains(x, "vendor") { + return x + } + elem := strings.Split(x, "/") + for i := 0; i < len(elem)-1; i++ { + if elem[i] == "vendor" { + elem[i] = repl + } + } + return strings.Join(elem, "/") +} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 000000000..7ea37e7ee --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportsFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, +} + +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportsFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { + return "LoadMode(0)" + } + var out []string + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x + } + } + if m != 0 { + out = append(out, "Unknown") + } + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 000000000..8a1a2d681 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1239 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typesinternal" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// ID and Errors (if present) will always be filled. +// Load may return more information than requested. +type LoadMode int + +// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to +// NeedExportFile to make it consistent with the Package field it's adding. + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportsFile adds ExportFile. + NeedExportsFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes + + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // Modifies CompiledGoFiles and Types, and has no effect on its own. + typecheckCgo + + // NeedModule adds Module. + NeedModule +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax | NeedDeps +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // gocmdRunner guards go command calls from concurrency errors. + gocmdRunner *gocommand.Runner + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the driverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + l.sizes = response.Sizes + return l.refine(response.Roots, response.Packages...) +} + +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + response, err := driver(cfg, patterns...) + if err != nil { + return response, err + } else if response.NotHandled { + return goListDriver(cfg, patterns...) + } + return response, nil +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // GoFiles lists the absolute file paths of the package's Go source files. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that are suitable for type checking. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // IgnoredFiles lists source files that are not part of the package + // using the current build configuration but that might be part of + // the package using other build configurations. + IgnoredFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes + + // forTest is the package under test, if any. + forTest string + + // depsErrors is the DepsErrors field from the go list response, if any. + depsErrors []*packagesinternal.PackageError + + // module is the module information for the package if it exists. + Module *Module +} + +// Module provides module information for a package. +type Module struct { + Path string // module path + Version string // module version + Replace *Module // replaced by this module + Time *time.Time // time version was created + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module + Error *ModuleError // error loading module +} + +// ModuleError holds errors loading a module. +type ModuleError struct { + Err string // the error itself +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } + packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + return p.(*Package).depsErrors + } + packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { + return config.(*Config).gocmdRunner + } + packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { + config.(*Config).gocmdRunner = runner + } + packagesinternal.SetModFile = func(config interface{}, value string) { + config.(*Config).modFile = value + } + packagesinternal.SetModFlag = func(config interface{}, value string) { + config.(*Config).modFlag = value + } + packagesinternal.TypecheckCgo = int(typecheckCgo) +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + IgnoredFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + IgnoredFiles: p.IgnoredFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Config.gocmdRunner == nil { + ld.Config.gocmdRunner = &gocommand.Runner{} + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type and +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range list { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + lpkg := &loaderPackage{ + Package: pkg, + needtypes: needtypes, + needsrc: needsrc, + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + // If NeedImports isn't set, the imports fields will all be zeroed out. + if ld.Mode&NeedImports != 0 { + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode&NeedImports == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + } + // Load type data and syntax if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, + // to catch programs that use more than they request. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + ld.pkgs[i].IgnoredFiles = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportsFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.requestedMode&NeedModule == 0 { + ld.pkgs[i].Module = nil + } + } + + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // The Diamond test exercises this case. + if !lpkg.needtypes && !lpkg.needsrc { + return + } + if !lpkg.needsrc { + ld.loadFromExportData(lpkg) + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + ld.loadFromExportData(lpkg) + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + if ld.Config.Mode&NeedTypes == 0 { + return + } + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in non-initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, + } + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { + appendError(Error{ + Msg: "typecheckCgo requires Go 1.15+", + Kind: ListError, + }) + return + } + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +// +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + if ld.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = ld.Config.Context.Err() + continue + } + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +// +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData returns type information for the specified +// package, loading it from an export data file on the first request. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the Package.Pkg field and the + // types.Package it points to, for each Package in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return tpkg, nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return nil, fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return nil, err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if viewLen != len(view) { + log.Fatalf("Unexpected package creation during export data loading") + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + + return tpkg, nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 { + // If NeedTypesInfo, go/packages needs to do typechecking itself so it can + // associate type info with the AST. To do so, we need the export data + // for dependencies, which means we need to ask for the direct dependencies. + // NeedImports is used to ask for the direct dependencies. + loadMode |= NeedImports + } + + if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 { + // With NeedDeps we need to load at least direct dependencies. + // NeedImports is used to ask for the direct dependencies. + loadMode |= NeedImports + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 000000000..a1dcc40b7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/vendor/golang.org/x/tools/go/ssa/blockopt.go b/vendor/golang.org/x/tools/go/ssa/blockopt.go new file mode 100644 index 000000000..e79260a21 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/blockopt.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// Simple block optimizations to simplify the control flow graph. + +// TODO(adonovan): opt: instead of creating several "unreachable" blocks +// per function in the Builder, reuse a single one (e.g. at Blocks[1]) +// to reduce garbage. + +import ( + "fmt" + "os" +) + +// If true, perform sanity checking and show progress at each +// successive iteration of optimizeBlocks. Very verbose. +const debugBlockOpt = false + +// markReachable sets Index=-1 for all blocks reachable from b. +func markReachable(b *BasicBlock) { + b.Index = -1 + for _, succ := range b.Succs { + if succ.Index == 0 { + markReachable(succ) + } + } +} + +// deleteUnreachableBlocks marks all reachable blocks of f and +// eliminates (nils) all others, including possibly cyclic subgraphs. +// +func deleteUnreachableBlocks(f *Function) { + const white, black = 0, -1 + // We borrow b.Index temporarily as the mark bit. + for _, b := range f.Blocks { + b.Index = white + } + markReachable(f.Blocks[0]) + if f.Recover != nil { + markReachable(f.Recover) + } + for i, b := range f.Blocks { + if b.Index == white { + for _, c := range b.Succs { + if c.Index == black { + c.removePred(b) // delete white->black edge + } + } + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "unreachable", b) + } + f.Blocks[i] = nil // delete b + } + } + f.removeNilBlocks() +} + +// jumpThreading attempts to apply simple jump-threading to block b, +// in which a->b->c become a->c if b is just a Jump. +// The result is true if the optimization was applied. +// +func jumpThreading(f *Function, b *BasicBlock) bool { + if b.Index == 0 { + return false // don't apply to entry block + } + if b.Instrs == nil { + return false + } + if _, ok := b.Instrs[0].(*Jump); !ok { + return false // not just a jump + } + c := b.Succs[0] + if c == b { + return false // don't apply to degenerate jump-to-self. + } + if c.hasPhi() { + return false // not sound without more effort + } + for j, a := range b.Preds { + a.replaceSucc(b, c) + + // If a now has two edges to c, replace its degenerate If by Jump. + if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c { + jump := new(Jump) + jump.setBlock(a) + a.Instrs[len(a.Instrs)-1] = jump + a.Succs = a.Succs[:1] + c.removePred(b) + } else { + if j == 0 { + c.replacePred(b, a) + } else { + c.Preds = append(c.Preds, a) + } + } + + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c) + } + } + f.Blocks[b.Index] = nil // delete b + return true +} + +// fuseBlocks attempts to apply the block fusion optimization to block +// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1. +// The result is true if the optimization was applied. +// +func fuseBlocks(f *Function, a *BasicBlock) bool { + if len(a.Succs) != 1 { + return false + } + b := a.Succs[0] + if len(b.Preds) != 1 { + return false + } + + // Degenerate &&/|| ops may result in a straight-line CFG + // containing φ-nodes. (Ideally we'd replace such them with + // their sole operand but that requires Referrers, built later.) + if b.hasPhi() { + return false // not sound without further effort + } + + // Eliminate jump at end of A, then copy all of B across. + a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...) + for _, instr := range b.Instrs { + instr.setBlock(a) + } + + // A inherits B's successors + a.Succs = append(a.succs2[:0], b.Succs...) + + // Fix up Preds links of all successors of B. + for _, c := range b.Succs { + c.replacePred(b, a) + } + + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "fuseBlocks", a, b) + } + + f.Blocks[b.Index] = nil // delete b + return true +} + +// optimizeBlocks() performs some simple block optimizations on a +// completed function: dead block elimination, block fusion, jump +// threading. +// +func optimizeBlocks(f *Function) { + deleteUnreachableBlocks(f) + + // Loop until no further progress. + changed := true + for changed { + changed = false + + if debugBlockOpt { + f.WriteTo(os.Stderr) + mustSanityCheck(f, nil) + } + + for _, b := range f.Blocks { + // f.Blocks will temporarily contain nils to indicate + // deleted blocks; we remove them at the end. + if b == nil { + continue + } + + // Fuse blocks. b->c becomes bc. + if fuseBlocks(f, b) { + changed = true + } + + // a->b->c becomes a->c if b contains only a Jump. + if jumpThreading(f, b) { + changed = true + continue // (b was disconnected) + } + } + } + f.removeNilBlocks() +} diff --git a/vendor/golang.org/x/tools/go/ssa/builder.go b/vendor/golang.org/x/tools/go/ssa/builder.go new file mode 100644 index 000000000..2d0fdaa4e --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/builder.go @@ -0,0 +1,2386 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file implements the BUILD phase of SSA construction. +// +// SSA construction has two phases, CREATE and BUILD. In the CREATE phase +// (create.go), all packages are constructed and type-checked and +// definitions of all package members are created, method-sets are +// computed, and wrapper methods are synthesized. +// ssa.Packages are created in arbitrary order. +// +// In the BUILD phase (builder.go), the builder traverses the AST of +// each Go source function and generates SSA instructions for the +// function body. Initializer expressions for package-level variables +// are emitted to the package's init() function in the order specified +// by go/types.Info.InitOrder, then code for each function in the +// package is generated in lexical order. +// The BUILD phases for distinct packages are independent and are +// executed in parallel. +// +// TODO(adonovan): indeed, building functions is now embarrassingly parallel. +// Audit for concurrency then benchmark using more goroutines. +// +// The builder's and Program's indices (maps) are populated and +// mutated during the CREATE phase, but during the BUILD phase they +// remain constant. The sole exception is Prog.methodSets and its +// related maps, which are protected by a dedicated mutex. + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "os" + "sync" +) + +type opaqueType struct { + types.Type + name string +} + +func (t *opaqueType) String() string { return t.name } + +var ( + varOk = newVar("ok", tBool) + varIndex = newVar("index", tInt) + + // Type constants. + tBool = types.Typ[types.Bool] + tByte = types.Typ[types.Byte] + tInt = types.Typ[types.Int] + tInvalid = types.Typ[types.Invalid] + tString = types.Typ[types.String] + tUntypedNil = types.Typ[types.UntypedNil] + tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators + tEface = types.NewInterface(nil, nil).Complete() + + // SSA Value constants. + vZero = intConst(0) + vOne = intConst(1) + vTrue = NewConst(constant.MakeBool(true), tBool) +) + +// builder holds state associated with the package currently being built. +// Its methods contain all the logic for AST-to-SSA conversion. +type builder struct{} + +// cond emits to fn code to evaluate boolean condition e and jump +// to t or f depending on its value, performing various simplifications. +// +// Postcondition: fn.currentBlock is nil. +// +func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) { + switch e := e.(type) { + case *ast.ParenExpr: + b.cond(fn, e.X, t, f) + return + + case *ast.BinaryExpr: + switch e.Op { + case token.LAND: + ltrue := fn.newBasicBlock("cond.true") + b.cond(fn, e.X, ltrue, f) + fn.currentBlock = ltrue + b.cond(fn, e.Y, t, f) + return + + case token.LOR: + lfalse := fn.newBasicBlock("cond.false") + b.cond(fn, e.X, t, lfalse) + fn.currentBlock = lfalse + b.cond(fn, e.Y, t, f) + return + } + + case *ast.UnaryExpr: + if e.Op == token.NOT { + b.cond(fn, e.X, f, t) + return + } + } + + // A traditional compiler would simplify "if false" (etc) here + // but we do not, for better fidelity to the source code. + // + // The value of a constant condition may be platform-specific, + // and may cause blocks that are reachable in some configuration + // to be hidden from subsequent analyses such as bug-finding tools. + emitIf(fn, b.expr(fn, e), t, f) +} + +// logicalBinop emits code to fn to evaluate e, a &&- or +// ||-expression whose reified boolean value is wanted. +// The value is returned. +// +func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { + rhs := fn.newBasicBlock("binop.rhs") + done := fn.newBasicBlock("binop.done") + + // T(e) = T(e.X) = T(e.Y) after untyped constants have been + // eliminated. + // TODO(adonovan): not true; MyBool==MyBool yields UntypedBool. + t := fn.Pkg.typeOf(e) + + var short Value // value of the short-circuit path + switch e.Op { + case token.LAND: + b.cond(fn, e.X, rhs, done) + short = NewConst(constant.MakeBool(false), t) + + case token.LOR: + b.cond(fn, e.X, done, rhs) + short = NewConst(constant.MakeBool(true), t) + } + + // Is rhs unreachable? + if rhs.Preds == nil { + // Simplify false&&y to false, true||y to true. + fn.currentBlock = done + return short + } + + // Is done unreachable? + if done.Preds == nil { + // Simplify true&&y (or false||y) to y. + fn.currentBlock = rhs + return b.expr(fn, e.Y) + } + + // All edges from e.X to done carry the short-circuit value. + var edges []Value + for range done.Preds { + edges = append(edges, short) + } + + // The edge from e.Y to done carries the value of e.Y. + fn.currentBlock = rhs + edges = append(edges, b.expr(fn, e.Y)) + emitJump(fn, done) + fn.currentBlock = done + + phi := &Phi{Edges: edges, Comment: e.Op.String()} + phi.pos = e.OpPos + phi.typ = t + return done.emit(phi) +} + +// exprN lowers a multi-result expression e to SSA form, emitting code +// to fn and returning a single Value whose type is a *types.Tuple. +// The caller must access the components via Extract. +// +// Multi-result expressions include CallExprs in a multi-value +// assignment or return statement, and "value,ok" uses of +// TypeAssertExpr, IndexExpr (when X is a map), and UnaryExpr (when Op +// is token.ARROW). +// +func (b *builder) exprN(fn *Function, e ast.Expr) Value { + typ := fn.Pkg.typeOf(e).(*types.Tuple) + switch e := e.(type) { + case *ast.ParenExpr: + return b.exprN(fn, e.X) + + case *ast.CallExpr: + // Currently, no built-in function nor type conversion + // has multiple results, so we can avoid some of the + // cases for single-valued CallExpr. + var c Call + b.setCall(fn, e, &c.Call) + c.typ = typ + return fn.emit(&c) + + case *ast.IndexExpr: + mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) + lookup := &Lookup{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), + CommaOk: true, + } + lookup.setType(typ) + lookup.setPos(e.Lbrack) + return fn.emit(lookup) + + case *ast.TypeAssertExpr: + return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e.Lparen) + + case *ast.UnaryExpr: // must be receive <- + unop := &UnOp{ + Op: token.ARROW, + X: b.expr(fn, e.X), + CommaOk: true, + } + unop.setType(typ) + unop.setPos(e.OpPos) + return fn.emit(unop) + } + panic(fmt.Sprintf("exprN(%T) in %s", e, fn)) +} + +// builtin emits to fn SSA instructions to implement a call to the +// built-in function obj with the specified arguments +// and return type. It returns the value defined by the result. +// +// The result is nil if no special handling was required; in this case +// the caller should treat this like an ordinary library function +// call. +// +func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, pos token.Pos) Value { + switch obj.Name() { + case "make": + switch typ.Underlying().(type) { + case *types.Slice: + n := b.expr(fn, args[1]) + m := n + if len(args) == 3 { + m = b.expr(fn, args[2]) + } + if m, ok := m.(*Const); ok { + // treat make([]T, n, m) as new([m]T)[:n] + cap := m.Int64() + at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap) + alloc := emitNew(fn, at, pos) + alloc.Comment = "makeslice" + v := &Slice{ + X: alloc, + High: n, + } + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + } + v := &MakeSlice{ + Len: n, + Cap: m, + } + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + + case *types.Map: + var res Value + if len(args) == 2 { + res = b.expr(fn, args[1]) + } + v := &MakeMap{Reserve: res} + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + + case *types.Chan: + var sz Value = vZero + if len(args) == 2 { + sz = b.expr(fn, args[1]) + } + v := &MakeChan{Size: sz} + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + } + + case "new": + alloc := emitNew(fn, deref(typ), pos) + alloc.Comment = "new" + return alloc + + case "len", "cap": + // Special case: len or cap of an array or *array is + // based on the type, not the value which may be nil. + // We must still evaluate the value, though. (If it + // was side-effect free, the whole call would have + // been constant-folded.) + t := deref(fn.Pkg.typeOf(args[0])).Underlying() + if at, ok := t.(*types.Array); ok { + b.expr(fn, args[0]) // for effects only + return intConst(at.Len()) + } + // Otherwise treat as normal. + + case "panic": + fn.emit(&Panic{ + X: emitConv(fn, b.expr(fn, args[0]), tEface), + pos: pos, + }) + fn.currentBlock = fn.newBasicBlock("unreachable") + return vTrue // any non-nil Value will do + } + return nil // treat all others as a regular function call +} + +// addr lowers a single-result addressable expression e to SSA form, +// emitting code to fn and returning the location (an lvalue) defined +// by the expression. +// +// If escaping is true, addr marks the base variable of the +// addressable expression e as being a potentially escaping pointer +// value. For example, in this code: +// +// a := A{ +// b: [1]B{B{c: 1}} +// } +// return &a.b[0].c +// +// the application of & causes a.b[0].c to have its address taken, +// which means that ultimately the local variable a must be +// heap-allocated. This is a simple but very conservative escape +// analysis. +// +// Operations forming potentially escaping pointers include: +// - &x, including when implicit in method call or composite literals. +// - a[:] iff a is an array (not *array) +// - references to variables in lexically enclosing functions. +// +func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { + switch e := e.(type) { + case *ast.Ident: + if isBlankIdent(e) { + return blank{} + } + obj := fn.Pkg.objectOf(e) + v := fn.Prog.packageLevelValue(obj) // var (address) + if v == nil { + v = fn.lookup(obj, escaping) + } + return &address{addr: v, pos: e.Pos(), expr: e} + + case *ast.CompositeLit: + t := deref(fn.Pkg.typeOf(e)) + var v *Alloc + if escaping { + v = emitNew(fn, t, e.Lbrace) + } else { + v = fn.addLocal(t, e.Lbrace) + } + v.Comment = "complit" + var sb storebuf + b.compLit(fn, v, e, true, &sb) + sb.emit(fn) + return &address{addr: v, pos: e.Lbrace, expr: e} + + case *ast.ParenExpr: + return b.addr(fn, e.X, escaping) + + case *ast.SelectorExpr: + sel, ok := fn.Pkg.info.Selections[e] + if !ok { + // qualified identifier + return b.addr(fn, e.Sel, escaping) + } + if sel.Kind() != types.FieldVal { + panic(sel) + } + wantAddr := true + v := b.receiver(fn, e.X, wantAddr, escaping, sel) + last := len(sel.Index()) - 1 + return &address{ + addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel), + pos: e.Sel.Pos(), + expr: e.Sel, + } + + case *ast.IndexExpr: + var x Value + var et types.Type + switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + x = b.addr(fn, e.X, escaping).address(fn) + et = types.NewPointer(t.Elem()) + case *types.Pointer: // *array + x = b.expr(fn, e.X) + et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem()) + case *types.Slice: + x = b.expr(fn, e.X) + et = types.NewPointer(t.Elem()) + case *types.Map: + return &element{ + m: b.expr(fn, e.X), + k: emitConv(fn, b.expr(fn, e.Index), t.Key()), + t: t.Elem(), + pos: e.Lbrack, + } + default: + panic("unexpected container type in IndexExpr: " + t.String()) + } + v := &IndexAddr{ + X: x, + Index: emitConv(fn, b.expr(fn, e.Index), tInt), + } + v.setPos(e.Lbrack) + v.setType(et) + return &address{addr: fn.emit(v), pos: e.Lbrack, expr: e} + + case *ast.StarExpr: + return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e} + } + + panic(fmt.Sprintf("unexpected address expression: %T", e)) +} + +type store struct { + lhs lvalue + rhs Value +} + +type storebuf struct{ stores []store } + +func (sb *storebuf) store(lhs lvalue, rhs Value) { + sb.stores = append(sb.stores, store{lhs, rhs}) +} + +func (sb *storebuf) emit(fn *Function) { + for _, s := range sb.stores { + s.lhs.store(fn, s.rhs) + } +} + +// assign emits to fn code to initialize the lvalue loc with the value +// of expression e. If isZero is true, assign assumes that loc holds +// the zero value for its type. +// +// This is equivalent to loc.store(fn, b.expr(fn, e)), but may generate +// better code in some cases, e.g., for composite literals in an +// addressable location. +// +// If sb is not nil, assign generates code to evaluate expression e, but +// not to update loc. Instead, the necessary stores are appended to the +// storebuf sb so that they can be executed later. This allows correct +// in-place update of existing variables when the RHS is a composite +// literal that may reference parts of the LHS. +// +func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf) { + // Can we initialize it in place? + if e, ok := unparen(e).(*ast.CompositeLit); ok { + // A CompositeLit never evaluates to a pointer, + // so if the type of the location is a pointer, + // an &-operation is implied. + if _, ok := loc.(blank); !ok { // avoid calling blank.typ() + if isPointer(loc.typ()) { + ptr := b.addr(fn, e, true).address(fn) + // copy address + if sb != nil { + sb.store(loc, ptr) + } else { + loc.store(fn, ptr) + } + return + } + } + + if _, ok := loc.(*address); ok { + if isInterface(loc.typ()) { + // e.g. var x interface{} = T{...} + // Can't in-place initialize an interface value. + // Fall back to copying. + } else { + // x = T{...} or x := T{...} + addr := loc.address(fn) + if sb != nil { + b.compLit(fn, addr, e, isZero, sb) + } else { + var sb storebuf + b.compLit(fn, addr, e, isZero, &sb) + sb.emit(fn) + } + + // Subtle: emit debug ref for aggregate types only; + // slice and map are handled by store ops in compLit. + switch loc.typ().Underlying().(type) { + case *types.Struct, *types.Array: + emitDebugRef(fn, e, addr, true) + } + + return + } + } + } + + // simple case: just copy + rhs := b.expr(fn, e) + if sb != nil { + sb.store(loc, rhs) + } else { + loc.store(fn, rhs) + } +} + +// expr lowers a single-result expression e to SSA form, emitting code +// to fn and returning the Value defined by the expression. +// +func (b *builder) expr(fn *Function, e ast.Expr) Value { + e = unparen(e) + + tv := fn.Pkg.info.Types[e] + + // Is expression a constant? + if tv.Value != nil { + return NewConst(tv.Value, tv.Type) + } + + var v Value + if tv.Addressable() { + // Prefer pointer arithmetic ({Index,Field}Addr) followed + // by Load over subelement extraction (e.g. Index, Field), + // to avoid large copies. + v = b.addr(fn, e, false).load(fn) + } else { + v = b.expr0(fn, e, tv) + } + if fn.debugInfo() { + emitDebugRef(fn, e, v, false) + } + return v +} + +func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { + switch e := e.(type) { + case *ast.BasicLit: + panic("non-constant BasicLit") // unreachable + + case *ast.FuncLit: + fn2 := &Function{ + name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)), + Signature: fn.Pkg.typeOf(e.Type).Underlying().(*types.Signature), + pos: e.Type.Func, + parent: fn, + Pkg: fn.Pkg, + Prog: fn.Prog, + syntax: e, + } + fn.AnonFuncs = append(fn.AnonFuncs, fn2) + b.buildFunction(fn2) + if fn2.FreeVars == nil { + return fn2 + } + v := &MakeClosure{Fn: fn2} + v.setType(tv.Type) + for _, fv := range fn2.FreeVars { + v.Bindings = append(v.Bindings, fv.outer) + fv.outer = nil + } + return fn.emit(v) + + case *ast.TypeAssertExpr: // single-result form only + return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e.Lparen) + + case *ast.CallExpr: + if fn.Pkg.info.Types[e.Fun].IsType() { + // Explicit type conversion, e.g. string(x) or big.Int(x) + x := b.expr(fn, e.Args[0]) + y := emitConv(fn, x, tv.Type) + if y != x { + switch y := y.(type) { + case *Convert: + y.pos = e.Lparen + case *ChangeType: + y.pos = e.Lparen + case *MakeInterface: + y.pos = e.Lparen + } + } + return y + } + // Call to "intrinsic" built-ins, e.g. new, make, panic. + if id, ok := unparen(e.Fun).(*ast.Ident); ok { + if obj, ok := fn.Pkg.info.Uses[id].(*types.Builtin); ok { + if v := b.builtin(fn, obj, e.Args, tv.Type, e.Lparen); v != nil { + return v + } + } + } + // Regular function call. + var v Call + b.setCall(fn, e, &v.Call) + v.setType(tv.Type) + return fn.emit(&v) + + case *ast.UnaryExpr: + switch e.Op { + case token.AND: // &X --- potentially escaping. + addr := b.addr(fn, e.X, true) + if _, ok := unparen(e.X).(*ast.StarExpr); ok { + // &*p must panic if p is nil (http://golang.org/s/go12nil). + // For simplicity, we'll just (suboptimally) rely + // on the side effects of a load. + // TODO(adonovan): emit dedicated nilcheck. + addr.load(fn) + } + return addr.address(fn) + case token.ADD: + return b.expr(fn, e.X) + case token.NOT, token.ARROW, token.SUB, token.XOR: // ! <- - ^ + v := &UnOp{ + Op: e.Op, + X: b.expr(fn, e.X), + } + v.setPos(e.OpPos) + v.setType(tv.Type) + return fn.emit(v) + default: + panic(e.Op) + } + + case *ast.BinaryExpr: + switch e.Op { + case token.LAND, token.LOR: + return b.logicalBinop(fn, e) + case token.SHL, token.SHR: + fallthrough + case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: + return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e.OpPos) + + case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ: + cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e.OpPos) + // The type of x==y may be UntypedBool. + return emitConv(fn, cmp, types.Default(tv.Type)) + default: + panic("illegal op in BinaryExpr: " + e.Op.String()) + } + + case *ast.SliceExpr: + var low, high, max Value + var x Value + switch fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + // Potentially escaping. + x = b.addr(fn, e.X, true).address(fn) + case *types.Basic, *types.Slice, *types.Pointer: // *array + x = b.expr(fn, e.X) + default: + panic("unreachable") + } + if e.High != nil { + high = b.expr(fn, e.High) + } + if e.Low != nil { + low = b.expr(fn, e.Low) + } + if e.Slice3 { + max = b.expr(fn, e.Max) + } + v := &Slice{ + X: x, + Low: low, + High: high, + Max: max, + } + v.setPos(e.Lbrack) + v.setType(tv.Type) + return fn.emit(v) + + case *ast.Ident: + obj := fn.Pkg.info.Uses[e] + // Universal built-in or nil? + switch obj := obj.(type) { + case *types.Builtin: + return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)} + case *types.Nil: + return nilConst(tv.Type) + } + // Package-level func or var? + if v := fn.Prog.packageLevelValue(obj); v != nil { + if _, ok := obj.(*types.Var); ok { + return emitLoad(fn, v) // var (address) + } + return v // (func) + } + // Local var. + return emitLoad(fn, fn.lookup(obj, false)) // var (address) + + case *ast.SelectorExpr: + sel, ok := fn.Pkg.info.Selections[e] + if !ok { + // builtin unsafe.{Add,Slice} + if obj, ok := fn.Pkg.info.Uses[e.Sel].(*types.Builtin); ok { + return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)} + } + // qualified identifier + return b.expr(fn, e.Sel) + } + switch sel.Kind() { + case types.MethodExpr: + // (*T).f or T.f, the method f from the method-set of type T. + // The result is a "thunk". + return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type) + + case types.MethodVal: + // e.f where e is an expression and f is a method. + // The result is a "bound". + obj := sel.Obj().(*types.Func) + rt := recvType(obj) + wantAddr := isPointer(rt) + escaping := true + v := b.receiver(fn, e.X, wantAddr, escaping, sel) + if isInterface(rt) { + // If v has interface type I, + // we must emit a check that v is non-nil. + // We use: typeassert v.(I). + emitTypeAssert(fn, v, rt, token.NoPos) + } + c := &MakeClosure{ + Fn: makeBound(fn.Prog, obj), + Bindings: []Value{v}, + } + c.setPos(e.Sel.Pos()) + c.setType(tv.Type) + return fn.emit(c) + + case types.FieldVal: + indices := sel.Index() + last := len(indices) - 1 + v := b.expr(fn, e.X) + v = emitImplicitSelections(fn, v, indices[:last]) + v = emitFieldSelection(fn, v, indices[last], false, e.Sel) + return v + } + + panic("unexpected expression-relative selector") + + case *ast.IndexExpr: + switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + // Non-addressable array (in a register). + v := &Index{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), tInt), + } + v.setPos(e.Lbrack) + v.setType(t.Elem()) + return fn.emit(v) + + case *types.Map: + // Maps are not addressable. + mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) + v := &Lookup{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), + } + v.setPos(e.Lbrack) + v.setType(mapt.Elem()) + return fn.emit(v) + + case *types.Basic: // => string + // Strings are not addressable. + v := &Lookup{ + X: b.expr(fn, e.X), + Index: b.expr(fn, e.Index), + } + v.setPos(e.Lbrack) + v.setType(tByte) + return fn.emit(v) + + case *types.Slice, *types.Pointer: // *array + // Addressable slice/array; use IndexAddr and Load. + return b.addr(fn, e, false).load(fn) + + default: + panic("unexpected container type in IndexExpr: " + t.String()) + } + + case *ast.CompositeLit, *ast.StarExpr: + // Addressable types (lvalues) + return b.addr(fn, e, false).load(fn) + } + + panic(fmt.Sprintf("unexpected expr: %T", e)) +} + +// stmtList emits to fn code for all statements in list. +func (b *builder) stmtList(fn *Function, list []ast.Stmt) { + for _, s := range list { + b.stmt(fn, s) + } +} + +// receiver emits to fn code for expression e in the "receiver" +// position of selection e.f (where f may be a field or a method) and +// returns the effective receiver after applying the implicit field +// selections of sel. +// +// wantAddr requests that the result is an an address. If +// !sel.Indirect(), this may require that e be built in addr() mode; it +// must thus be addressable. +// +// escaping is defined as per builder.addr(). +// +func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection) Value { + var v Value + if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) { + v = b.addr(fn, e, escaping).address(fn) + } else { + v = b.expr(fn, e) + } + + last := len(sel.Index()) - 1 + v = emitImplicitSelections(fn, v, sel.Index()[:last]) + if !wantAddr && isPointer(v.Type()) { + v = emitLoad(fn, v) + } + return v +} + +// setCallFunc populates the function parts of a CallCommon structure +// (Func, Method, Recv, Args[0]) based on the kind of invocation +// occurring in e. +// +func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { + c.pos = e.Lparen + + // Is this a method call? + if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { + sel, ok := fn.Pkg.info.Selections[selector] + if ok && sel.Kind() == types.MethodVal { + obj := sel.Obj().(*types.Func) + recv := recvType(obj) + wantAddr := isPointer(recv) + escaping := true + v := b.receiver(fn, selector.X, wantAddr, escaping, sel) + if isInterface(recv) { + // Invoke-mode call. + c.Value = v + c.Method = obj + } else { + // "Call"-mode call. + c.Value = fn.Prog.declaredFunc(obj) + c.Args = append(c.Args, v) + } + return + } + + // sel.Kind()==MethodExpr indicates T.f() or (*T).f(): + // a statically dispatched call to the method f in the + // method-set of T or *T. T may be an interface. + // + // e.Fun would evaluate to a concrete method, interface + // wrapper function, or promotion wrapper. + // + // For now, we evaluate it in the usual way. + // + // TODO(adonovan): opt: inline expr() here, to make the + // call static and to avoid generation of wrappers. + // It's somewhat tricky as it may consume the first + // actual parameter if the call is "invoke" mode. + // + // Examples: + // type T struct{}; func (T) f() {} // "call" mode + // type T interface { f() } // "invoke" mode + // + // type S struct{ T } + // + // var s S + // S.f(s) + // (*S).f(&s) + // + // Suggested approach: + // - consume the first actual parameter expression + // and build it with b.expr(). + // - apply implicit field selections. + // - use MethodVal logic to populate fields of c. + } + + // Evaluate the function operand in the usual way. + c.Value = b.expr(fn, e.Fun) +} + +// emitCallArgs emits to f code for the actual parameters of call e to +// a (possibly built-in) function of effective type sig. +// The argument values are appended to args, which is then returned. +// +func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value { + // f(x, y, z...): pass slice z straight through. + if e.Ellipsis != 0 { + for i, arg := range e.Args { + v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type()) + args = append(args, v) + } + return args + } + + offset := len(args) // 1 if call has receiver, 0 otherwise + + // Evaluate actual parameter expressions. + // + // If this is a chained call of the form f(g()) where g has + // multiple return values (MRV), they are flattened out into + // args; a suffix of them may end up in a varargs slice. + for _, arg := range e.Args { + v := b.expr(fn, arg) + if ttuple, ok := v.Type().(*types.Tuple); ok { // MRV chain + for i, n := 0, ttuple.Len(); i < n; i++ { + args = append(args, emitExtract(fn, v, i)) + } + } else { + args = append(args, v) + } + } + + // Actual->formal assignability conversions for normal parameters. + np := sig.Params().Len() // number of normal parameters + if sig.Variadic() { + np-- + } + for i := 0; i < np; i++ { + args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type()) + } + + // Actual->formal assignability conversions for variadic parameter, + // and construction of slice. + if sig.Variadic() { + varargs := args[offset+np:] + st := sig.Params().At(np).Type().(*types.Slice) + vt := st.Elem() + if len(varargs) == 0 { + args = append(args, nilConst(st)) + } else { + // Replace a suffix of args with a slice containing it. + at := types.NewArray(vt, int64(len(varargs))) + a := emitNew(fn, at, token.NoPos) + a.setPos(e.Rparen) + a.Comment = "varargs" + for i, arg := range varargs { + iaddr := &IndexAddr{ + X: a, + Index: intConst(int64(i)), + } + iaddr.setType(types.NewPointer(vt)) + fn.emit(iaddr) + emitStore(fn, iaddr, arg, arg.Pos()) + } + s := &Slice{X: a} + s.setType(st) + args[offset+np] = fn.emit(s) + args = args[:offset+np+1] + } + } + return args +} + +// setCall emits to fn code to evaluate all the parameters of a function +// call e, and populates *c with those values. +// +func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { + // First deal with the f(...) part and optional receiver. + b.setCallFunc(fn, e, c) + + // Then append the other actual parameters. + sig, _ := fn.Pkg.typeOf(e.Fun).Underlying().(*types.Signature) + if sig == nil { + panic(fmt.Sprintf("no signature for call of %s", e.Fun)) + } + c.Args = b.emitCallArgs(fn, sig, e, c.Args) +} + +// assignOp emits to fn code to perform loc = val. +func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, pos token.Pos) { + oldv := loc.load(fn) + loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, val, oldv.Type()), loc.typ(), pos)) +} + +// localValueSpec emits to fn code to define all of the vars in the +// function-local ValueSpec, spec. +// +func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { + switch { + case len(spec.Values) == len(spec.Names): + // e.g. var x, y = 0, 1 + // 1:1 assignment + for i, id := range spec.Names { + if !isBlankIdent(id) { + fn.addLocalForIdent(id) + } + lval := b.addr(fn, id, false) // non-escaping + b.assign(fn, lval, spec.Values[i], true, nil) + } + + case len(spec.Values) == 0: + // e.g. var x, y int + // Locals are implicitly zero-initialized. + for _, id := range spec.Names { + if !isBlankIdent(id) { + lhs := fn.addLocalForIdent(id) + if fn.debugInfo() { + emitDebugRef(fn, id, lhs, true) + } + } + } + + default: + // e.g. var x, y = pos() + tuple := b.exprN(fn, spec.Values[0]) + for i, id := range spec.Names { + if !isBlankIdent(id) { + fn.addLocalForIdent(id) + lhs := b.addr(fn, id, false) // non-escaping + lhs.store(fn, emitExtract(fn, tuple, i)) + } + } + } +} + +// assignStmt emits code to fn for a parallel assignment of rhss to lhss. +// isDef is true if this is a short variable declaration (:=). +// +// Note the similarity with localValueSpec. +// +func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) { + // Side effects of all LHSs and RHSs must occur in left-to-right order. + lvals := make([]lvalue, len(lhss)) + isZero := make([]bool, len(lhss)) + for i, lhs := range lhss { + var lval lvalue = blank{} + if !isBlankIdent(lhs) { + if isDef { + if obj := fn.Pkg.info.Defs[lhs.(*ast.Ident)]; obj != nil { + fn.addNamedLocal(obj) + isZero[i] = true + } + } + lval = b.addr(fn, lhs, false) // non-escaping + } + lvals[i] = lval + } + if len(lhss) == len(rhss) { + // Simple assignment: x = f() (!isDef) + // Parallel assignment: x, y = f(), g() (!isDef) + // or short var decl: x, y := f(), g() (isDef) + // + // In all cases, the RHSs may refer to the LHSs, + // so we need a storebuf. + var sb storebuf + for i := range rhss { + b.assign(fn, lvals[i], rhss[i], isZero[i], &sb) + } + sb.emit(fn) + } else { + // e.g. x, y = pos() + tuple := b.exprN(fn, rhss[0]) + emitDebugRef(fn, rhss[0], tuple, false) + for i, lval := range lvals { + lval.store(fn, emitExtract(fn, tuple, i)) + } + } +} + +// arrayLen returns the length of the array whose composite literal elements are elts. +func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { + var max int64 = -1 + var i int64 = -1 + for _, e := range elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + i = b.expr(fn, kv.Key).(*Const).Int64() + } else { + i++ + } + if i > max { + max = i + } + } + return max + 1 +} + +// compLit emits to fn code to initialize a composite literal e at +// address addr with type typ. +// +// Nested composite literals are recursively initialized in place +// where possible. If isZero is true, compLit assumes that addr +// holds the zero value for typ. +// +// Because the elements of a composite literal may refer to the +// variables being updated, as in the second line below, +// x := T{a: 1} +// x = T{a: x.a} +// all the reads must occur before all the writes. Thus all stores to +// loc are emitted to the storebuf sb for later execution. +// +// A CompositeLit may have pointer type only in the recursive (nested) +// case when the type name is implicit. e.g. in []*T{{}}, the inner +// literal has type *T behaves like &T{}. +// In that case, addr must hold a T, not a *T. +// +func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { + typ := deref(fn.Pkg.typeOf(e)) + switch t := typ.Underlying().(type) { + case *types.Struct: + if !isZero && len(e.Elts) != t.NumFields() { + // memclear + sb.store(&address{addr, e.Lbrace, nil}, + zeroValue(fn, deref(addr.Type()))) + isZero = true + } + for i, e := range e.Elts { + fieldIndex := i + pos := e.Pos() + if kv, ok := e.(*ast.KeyValueExpr); ok { + fname := kv.Key.(*ast.Ident).Name + for i, n := 0, t.NumFields(); i < n; i++ { + sf := t.Field(i) + if sf.Name() == fname { + fieldIndex = i + pos = kv.Colon + e = kv.Value + break + } + } + } + sf := t.Field(fieldIndex) + faddr := &FieldAddr{ + X: addr, + Field: fieldIndex, + } + faddr.setType(types.NewPointer(sf.Type())) + fn.emit(faddr) + b.assign(fn, &address{addr: faddr, pos: pos, expr: e}, e, isZero, sb) + } + + case *types.Array, *types.Slice: + var at *types.Array + var array Value + switch t := t.(type) { + case *types.Slice: + at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts)) + alloc := emitNew(fn, at, e.Lbrace) + alloc.Comment = "slicelit" + array = alloc + case *types.Array: + at = t + array = addr + + if !isZero && int64(len(e.Elts)) != at.Len() { + // memclear + sb.store(&address{array, e.Lbrace, nil}, + zeroValue(fn, deref(array.Type()))) + } + } + + var idx *Const + for _, e := range e.Elts { + pos := e.Pos() + if kv, ok := e.(*ast.KeyValueExpr); ok { + idx = b.expr(fn, kv.Key).(*Const) + pos = kv.Colon + e = kv.Value + } else { + var idxval int64 + if idx != nil { + idxval = idx.Int64() + 1 + } + idx = intConst(idxval) + } + iaddr := &IndexAddr{ + X: array, + Index: idx, + } + iaddr.setType(types.NewPointer(at.Elem())) + fn.emit(iaddr) + if t != at { // slice + // backing array is unaliased => storebuf not needed. + b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, nil) + } else { + b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, sb) + } + } + + if t != at { // slice + s := &Slice{X: array} + s.setPos(e.Lbrace) + s.setType(typ) + sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, fn.emit(s)) + } + + case *types.Map: + m := &MakeMap{Reserve: intConst(int64(len(e.Elts)))} + m.setPos(e.Lbrace) + m.setType(typ) + fn.emit(m) + for _, e := range e.Elts { + e := e.(*ast.KeyValueExpr) + + // If a key expression in a map literal is itself a + // composite literal, the type may be omitted. + // For example: + // map[*struct{}]bool{{}: true} + // An &-operation may be implied: + // map[*struct{}]bool{&struct{}{}: true} + var key Value + if _, ok := unparen(e.Key).(*ast.CompositeLit); ok && isPointer(t.Key()) { + // A CompositeLit never evaluates to a pointer, + // so if the type of the location is a pointer, + // an &-operation is implied. + key = b.addr(fn, e.Key, true).address(fn) + } else { + key = b.expr(fn, e.Key) + } + + loc := element{ + m: m, + k: emitConv(fn, key, t.Key()), + t: t.Elem(), + pos: e.Colon, + } + + // We call assign() only because it takes care + // of any &-operation required in the recursive + // case, e.g., + // map[int]*struct{}{0: {}} implies &struct{}{}. + // In-place update is of course impossible, + // and no storebuf is needed. + b.assign(fn, &loc, e.Value, true, nil) + } + sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, m) + + default: + panic("unexpected CompositeLit type: " + t.String()) + } +} + +// switchStmt emits to fn code for the switch statement s, optionally +// labelled by label. +// +func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { + // We treat SwitchStmt like a sequential if-else chain. + // Multiway dispatch can be recovered later by ssautil.Switches() + // to those cases that are free of side effects. + if s.Init != nil { + b.stmt(fn, s.Init) + } + var tag Value = vTrue + if s.Tag != nil { + tag = b.expr(fn, s.Tag) + } + done := fn.newBasicBlock("switch.done") + if label != nil { + label._break = done + } + // We pull the default case (if present) down to the end. + // But each fallthrough label must point to the next + // body block in source order, so we preallocate a + // body block (fallthru) for the next case. + // Unfortunately this makes for a confusing block order. + var dfltBody *[]ast.Stmt + var dfltFallthrough *BasicBlock + var fallthru, dfltBlock *BasicBlock + ncases := len(s.Body.List) + for i, clause := range s.Body.List { + body := fallthru + if body == nil { + body = fn.newBasicBlock("switch.body") // first case only + } + + // Preallocate body block for the next case. + fallthru = done + if i+1 < ncases { + fallthru = fn.newBasicBlock("switch.body") + } + + cc := clause.(*ast.CaseClause) + if cc.List == nil { + // Default case. + dfltBody = &cc.Body + dfltFallthrough = fallthru + dfltBlock = body + continue + } + + var nextCond *BasicBlock + for _, cond := range cc.List { + nextCond = fn.newBasicBlock("switch.next") + // TODO(adonovan): opt: when tag==vTrue, we'd + // get better code if we use b.cond(cond) + // instead of BinOp(EQL, tag, b.expr(cond)) + // followed by If. Don't forget conversions + // though. + cond := emitCompare(fn, token.EQL, tag, b.expr(fn, cond), token.NoPos) + emitIf(fn, cond, body, nextCond) + fn.currentBlock = nextCond + } + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: fallthru, + } + b.stmtList(fn, cc.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = nextCond + } + if dfltBlock != nil { + emitJump(fn, dfltBlock) + fn.currentBlock = dfltBlock + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: dfltFallthrough, + } + b.stmtList(fn, *dfltBody) + fn.targets = fn.targets.tail + } + emitJump(fn, done) + fn.currentBlock = done +} + +// typeSwitchStmt emits to fn code for the type switch statement s, optionally +// labelled by label. +// +func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) { + // We treat TypeSwitchStmt like a sequential if-else chain. + // Multiway dispatch can be recovered later by ssautil.Switches(). + + // Typeswitch lowering: + // + // var x X + // switch y := x.(type) { + // case T1, T2: S1 // >1 (y := x) + // case nil: SN // nil (y := x) + // default: SD // 0 types (y := x) + // case T3: S3 // 1 type (y := x.(T3)) + // } + // + // ...s.Init... + // x := eval x + // .caseT1: + // t1, ok1 := typeswitch,ok x + // if ok1 then goto S1 else goto .caseT2 + // .caseT2: + // t2, ok2 := typeswitch,ok x + // if ok2 then goto S1 else goto .caseNil + // .S1: + // y := x + // ...S1... + // goto done + // .caseNil: + // if t2, ok2 := typeswitch,ok x + // if x == nil then goto SN else goto .caseT3 + // .SN: + // y := x + // ...SN... + // goto done + // .caseT3: + // t3, ok3 := typeswitch,ok x + // if ok3 then goto S3 else goto default + // .S3: + // y := t3 + // ...S3... + // goto done + // .default: + // y := x + // ...SD... + // goto done + // .done: + + if s.Init != nil { + b.stmt(fn, s.Init) + } + + var x Value + switch ass := s.Assign.(type) { + case *ast.ExprStmt: // x.(type) + x = b.expr(fn, unparen(ass.X).(*ast.TypeAssertExpr).X) + case *ast.AssignStmt: // y := x.(type) + x = b.expr(fn, unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X) + } + + done := fn.newBasicBlock("typeswitch.done") + if label != nil { + label._break = done + } + var default_ *ast.CaseClause + for _, clause := range s.Body.List { + cc := clause.(*ast.CaseClause) + if cc.List == nil { + default_ = cc + continue + } + body := fn.newBasicBlock("typeswitch.body") + var next *BasicBlock + var casetype types.Type + var ti Value // ti, ok := typeassert,ok x + for _, cond := range cc.List { + next = fn.newBasicBlock("typeswitch.next") + casetype = fn.Pkg.typeOf(cond) + var condv Value + if casetype == tUntypedNil { + condv = emitCompare(fn, token.EQL, x, nilConst(x.Type()), token.NoPos) + ti = x + } else { + yok := emitTypeTest(fn, x, casetype, cc.Case) + ti = emitExtract(fn, yok, 0) + condv = emitExtract(fn, yok, 1) + } + emitIf(fn, condv, body, next) + fn.currentBlock = next + } + if len(cc.List) != 1 { + ti = x + } + fn.currentBlock = body + b.typeCaseBody(fn, cc, ti, done) + fn.currentBlock = next + } + if default_ != nil { + b.typeCaseBody(fn, default_, x, done) + } else { + emitJump(fn, done) + } + fn.currentBlock = done +} + +func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *BasicBlock) { + if obj := fn.Pkg.info.Implicits[cc]; obj != nil { + // In a switch y := x.(type), each case clause + // implicitly declares a distinct object y. + // In a single-type case, y has that type. + // In multi-type cases, 'case nil' and default, + // y has the same type as the interface operand. + emitStore(fn, fn.addNamedLocal(obj), x, obj.Pos()) + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, cc.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) +} + +// selectStmt emits to fn code for the select statement s, optionally +// labelled by label. +// +func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { + // A blocking select of a single case degenerates to a + // simple send or receive. + // TODO(adonovan): opt: is this optimization worth its weight? + if len(s.Body.List) == 1 { + clause := s.Body.List[0].(*ast.CommClause) + if clause.Comm != nil { + b.stmt(fn, clause.Comm) + done := fn.newBasicBlock("select.done") + if label != nil { + label._break = done + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = done + return + } + } + + // First evaluate all channels in all cases, and find + // the directions of each state. + var states []*SelectState + blocking := true + debugInfo := fn.debugInfo() + for _, clause := range s.Body.List { + var st *SelectState + switch comm := clause.(*ast.CommClause).Comm.(type) { + case nil: // default case + blocking = false + continue + + case *ast.SendStmt: // ch<- i + ch := b.expr(fn, comm.Chan) + st = &SelectState{ + Dir: types.SendOnly, + Chan: ch, + Send: emitConv(fn, b.expr(fn, comm.Value), + ch.Type().Underlying().(*types.Chan).Elem()), + Pos: comm.Arrow, + } + if debugInfo { + st.DebugNode = comm + } + + case *ast.AssignStmt: // x := <-ch + recv := unparen(comm.Rhs[0]).(*ast.UnaryExpr) + st = &SelectState{ + Dir: types.RecvOnly, + Chan: b.expr(fn, recv.X), + Pos: recv.OpPos, + } + if debugInfo { + st.DebugNode = recv + } + + case *ast.ExprStmt: // <-ch + recv := unparen(comm.X).(*ast.UnaryExpr) + st = &SelectState{ + Dir: types.RecvOnly, + Chan: b.expr(fn, recv.X), + Pos: recv.OpPos, + } + if debugInfo { + st.DebugNode = recv + } + } + states = append(states, st) + } + + // We dispatch on the (fair) result of Select using a + // sequential if-else chain, in effect: + // + // idx, recvOk, r0...r_n-1 := select(...) + // if idx == 0 { // receive on channel 0 (first receive => r0) + // x, ok := r0, recvOk + // ...state0... + // } else if v == 1 { // send on channel 1 + // ...state1... + // } else { + // ...default... + // } + sel := &Select{ + States: states, + Blocking: blocking, + } + sel.setPos(s.Select) + var vars []*types.Var + vars = append(vars, varIndex, varOk) + for _, st := range states { + if st.Dir == types.RecvOnly { + tElem := st.Chan.Type().Underlying().(*types.Chan).Elem() + vars = append(vars, anonVar(tElem)) + } + } + sel.setType(types.NewTuple(vars...)) + + fn.emit(sel) + idx := emitExtract(fn, sel, 0) + + done := fn.newBasicBlock("select.done") + if label != nil { + label._break = done + } + + var defaultBody *[]ast.Stmt + state := 0 + r := 2 // index in 'sel' tuple of value; increments if st.Dir==RECV + for _, cc := range s.Body.List { + clause := cc.(*ast.CommClause) + if clause.Comm == nil { + defaultBody = &clause.Body + continue + } + body := fn.newBasicBlock("select.body") + next := fn.newBasicBlock("select.next") + emitIf(fn, emitCompare(fn, token.EQL, idx, intConst(int64(state)), token.NoPos), body, next) + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + switch comm := clause.Comm.(type) { + case *ast.ExprStmt: // <-ch + if debugInfo { + v := emitExtract(fn, sel, r) + emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) + } + r++ + + case *ast.AssignStmt: // x := <-states[state].Chan + if comm.Tok == token.DEFINE { + fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident)) + } + x := b.addr(fn, comm.Lhs[0], false) // non-escaping + v := emitExtract(fn, sel, r) + if debugInfo { + emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) + } + x.store(fn, v) + + if len(comm.Lhs) == 2 { // x, ok := ... + if comm.Tok == token.DEFINE { + fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident)) + } + ok := b.addr(fn, comm.Lhs[1], false) // non-escaping + ok.store(fn, emitExtract(fn, sel, 1)) + } + r++ + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = next + state++ + } + if defaultBody != nil { + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, *defaultBody) + fn.targets = fn.targets.tail + } else { + // A blocking select must match some case. + // (This should really be a runtime.errorString, not a string.) + fn.emit(&Panic{ + X: emitConv(fn, stringConst("blocking select matched no case"), tEface), + }) + fn.currentBlock = fn.newBasicBlock("unreachable") + } + emitJump(fn, done) + fn.currentBlock = done +} + +// forStmt emits to fn code for the for statement s, optionally +// labelled by label. +// +func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { + // ...init... + // jump loop + // loop: + // if cond goto body else done + // body: + // ...body... + // jump post + // post: (target of continue) + // ...post... + // jump loop + // done: (target of break) + if s.Init != nil { + b.stmt(fn, s.Init) + } + body := fn.newBasicBlock("for.body") + done := fn.newBasicBlock("for.done") // target of 'break' + loop := body // target of back-edge + if s.Cond != nil { + loop = fn.newBasicBlock("for.loop") + } + cont := loop // target of 'continue' + if s.Post != nil { + cont = fn.newBasicBlock("for.post") + } + if label != nil { + label._break = done + label._continue = cont + } + emitJump(fn, loop) + fn.currentBlock = loop + if loop != body { + b.cond(fn, s.Cond, body, done) + fn.currentBlock = body + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: cont, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, cont) + + if s.Post != nil { + fn.currentBlock = cont + b.stmt(fn, s.Post) + emitJump(fn, loop) // back-edge + } + fn.currentBlock = done +} + +// rangeIndexed emits to fn the header for an integer-indexed loop +// over array, *array or slice value x. +// The v result is defined only if tv is non-nil. +// forPos is the position of the "for" token. +// +func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { + // + // length = len(x) + // index = -1 + // loop: (target of continue) + // index++ + // if index < length goto body else done + // body: + // k = index + // v = x[index] + // ...body... + // jump loop + // done: (target of break) + + // Determine number of iterations. + var length Value + if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok { + // For array or *array, the number of iterations is + // known statically thanks to the type. We avoid a + // data dependence upon x, permitting later dead-code + // elimination if x is pure, static unrolling, etc. + // Ranging over a nil *array may have >0 iterations. + // We still generate code for x, in case it has effects. + length = intConst(arr.Len()) + } else { + // length = len(x). + var c Call + c.Call.Value = makeLen(x.Type()) + c.Call.Args = []Value{x} + c.setType(tInt) + length = fn.emit(&c) + } + + index := fn.addLocal(tInt, token.NoPos) + emitStore(fn, index, intConst(-1), pos) + + loop = fn.newBasicBlock("rangeindex.loop") + emitJump(fn, loop) + fn.currentBlock = loop + + incr := &BinOp{ + Op: token.ADD, + X: emitLoad(fn, index), + Y: vOne, + } + incr.setType(tInt) + emitStore(fn, index, fn.emit(incr), pos) + + body := fn.newBasicBlock("rangeindex.body") + done = fn.newBasicBlock("rangeindex.done") + emitIf(fn, emitCompare(fn, token.LSS, incr, length, token.NoPos), body, done) + fn.currentBlock = body + + k = emitLoad(fn, index) + if tv != nil { + switch t := x.Type().Underlying().(type) { + case *types.Array: + instr := &Index{ + X: x, + Index: k, + } + instr.setType(t.Elem()) + instr.setPos(x.Pos()) + v = fn.emit(instr) + + case *types.Pointer: // *array + instr := &IndexAddr{ + X: x, + Index: k, + } + instr.setType(types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())) + instr.setPos(x.Pos()) + v = emitLoad(fn, fn.emit(instr)) + + case *types.Slice: + instr := &IndexAddr{ + X: x, + Index: k, + } + instr.setType(types.NewPointer(t.Elem())) + instr.setPos(x.Pos()) + v = emitLoad(fn, fn.emit(instr)) + + default: + panic("rangeIndexed x:" + t.String()) + } + } + return +} + +// rangeIter emits to fn the header for a loop using +// Range/Next/Extract to iterate over map or string value x. +// tk and tv are the types of the key/value results k and v, or nil +// if the respective component is not wanted. +// +func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { + // + // it = range x + // loop: (target of continue) + // okv = next it (ok, key, value) + // ok = extract okv #0 + // if ok goto body else done + // body: + // k = extract okv #1 + // v = extract okv #2 + // ...body... + // jump loop + // done: (target of break) + // + + if tk == nil { + tk = tInvalid + } + if tv == nil { + tv = tInvalid + } + + rng := &Range{X: x} + rng.setPos(pos) + rng.setType(tRangeIter) + it := fn.emit(rng) + + loop = fn.newBasicBlock("rangeiter.loop") + emitJump(fn, loop) + fn.currentBlock = loop + + _, isString := x.Type().Underlying().(*types.Basic) + + okv := &Next{ + Iter: it, + IsString: isString, + } + okv.setType(types.NewTuple( + varOk, + newVar("k", tk), + newVar("v", tv), + )) + fn.emit(okv) + + body := fn.newBasicBlock("rangeiter.body") + done = fn.newBasicBlock("rangeiter.done") + emitIf(fn, emitExtract(fn, okv, 0), body, done) + fn.currentBlock = body + + if tk != tInvalid { + k = emitExtract(fn, okv, 1) + } + if tv != tInvalid { + v = emitExtract(fn, okv, 2) + } + return +} + +// rangeChan emits to fn the header for a loop that receives from +// channel x until it fails. +// tk is the channel's element type, or nil if the k result is +// not wanted +// pos is the position of the '=' or ':=' token. +// +func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) { + // + // loop: (target of continue) + // ko = <-x (key, ok) + // ok = extract ko #1 + // if ok goto body else done + // body: + // k = extract ko #0 + // ... + // goto loop + // done: (target of break) + + loop = fn.newBasicBlock("rangechan.loop") + emitJump(fn, loop) + fn.currentBlock = loop + recv := &UnOp{ + Op: token.ARROW, + X: x, + CommaOk: true, + } + recv.setPos(pos) + recv.setType(types.NewTuple( + newVar("k", x.Type().Underlying().(*types.Chan).Elem()), + varOk, + )) + ko := fn.emit(recv) + body := fn.newBasicBlock("rangechan.body") + done = fn.newBasicBlock("rangechan.done") + emitIf(fn, emitExtract(fn, ko, 1), body, done) + fn.currentBlock = body + if tk != nil { + k = emitExtract(fn, ko, 0) + } + return +} + +// rangeStmt emits to fn code for the range statement s, optionally +// labelled by label. +// +func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { + var tk, tv types.Type + if s.Key != nil && !isBlankIdent(s.Key) { + tk = fn.Pkg.typeOf(s.Key) + } + if s.Value != nil && !isBlankIdent(s.Value) { + tv = fn.Pkg.typeOf(s.Value) + } + + // If iteration variables are defined (:=), this + // occurs once outside the loop. + // + // Unlike a short variable declaration, a RangeStmt + // using := never redeclares an existing variable; it + // always creates a new one. + if s.Tok == token.DEFINE { + if tk != nil { + fn.addLocalForIdent(s.Key.(*ast.Ident)) + } + if tv != nil { + fn.addLocalForIdent(s.Value.(*ast.Ident)) + } + } + + x := b.expr(fn, s.X) + + var k, v Value + var loop, done *BasicBlock + switch rt := x.Type().Underlying().(type) { + case *types.Slice, *types.Array, *types.Pointer: // *array + k, v, loop, done = b.rangeIndexed(fn, x, tv, s.For) + + case *types.Chan: + k, loop, done = b.rangeChan(fn, x, tk, s.For) + + case *types.Map, *types.Basic: // string + k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For) + + default: + panic("Cannot range over: " + rt.String()) + } + + // Evaluate both LHS expressions before we update either. + var kl, vl lvalue + if tk != nil { + kl = b.addr(fn, s.Key, false) // non-escaping + } + if tv != nil { + vl = b.addr(fn, s.Value, false) // non-escaping + } + if tk != nil { + kl.store(fn, k) + } + if tv != nil { + vl.store(fn, v) + } + + if label != nil { + label._break = done + label._continue = loop + } + + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: loop, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, loop) // back-edge + fn.currentBlock = done +} + +// stmt lowers statement s to SSA form, emitting code to fn. +func (b *builder) stmt(fn *Function, _s ast.Stmt) { + // The label of the current statement. If non-nil, its _goto + // target is always set; its _break and _continue are set only + // within the body of switch/typeswitch/select/for/range. + // It is effectively an additional default-nil parameter of stmt(). + var label *lblock +start: + switch s := _s.(type) { + case *ast.EmptyStmt: + // ignore. (Usually removed by gofmt.) + + case *ast.DeclStmt: // Con, Var or Typ + d := s.Decl.(*ast.GenDecl) + if d.Tok == token.VAR { + for _, spec := range d.Specs { + if vs, ok := spec.(*ast.ValueSpec); ok { + b.localValueSpec(fn, vs) + } + } + } + + case *ast.LabeledStmt: + label = fn.labelledBlock(s.Label) + emitJump(fn, label._goto) + fn.currentBlock = label._goto + _s = s.Stmt + goto start // effectively: tailcall stmt(fn, s.Stmt, label) + + case *ast.ExprStmt: + b.expr(fn, s.X) + + case *ast.SendStmt: + fn.emit(&Send{ + Chan: b.expr(fn, s.Chan), + X: emitConv(fn, b.expr(fn, s.Value), + fn.Pkg.typeOf(s.Chan).Underlying().(*types.Chan).Elem()), + pos: s.Arrow, + }) + + case *ast.IncDecStmt: + op := token.ADD + if s.Tok == token.DEC { + op = token.SUB + } + loc := b.addr(fn, s.X, false) + b.assignOp(fn, loc, NewConst(constant.MakeInt64(1), loc.typ()), op, s.Pos()) + + case *ast.AssignStmt: + switch s.Tok { + case token.ASSIGN, token.DEFINE: + b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE) + + default: // +=, etc. + op := s.Tok + token.ADD - token.ADD_ASSIGN + b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op, s.Pos()) + } + + case *ast.GoStmt: + // The "intrinsics" new/make/len/cap are forbidden here. + // panic is treated like an ordinary function call. + v := Go{pos: s.Go} + b.setCall(fn, s.Call, &v.Call) + fn.emit(&v) + + case *ast.DeferStmt: + // The "intrinsics" new/make/len/cap are forbidden here. + // panic is treated like an ordinary function call. + v := Defer{pos: s.Defer} + b.setCall(fn, s.Call, &v.Call) + fn.emit(&v) + + // A deferred call can cause recovery from panic, + // and control resumes at the Recover block. + createRecoverBlock(fn) + + case *ast.ReturnStmt: + var results []Value + if len(s.Results) == 1 && fn.Signature.Results().Len() > 1 { + // Return of one expression in a multi-valued function. + tuple := b.exprN(fn, s.Results[0]) + ttuple := tuple.Type().(*types.Tuple) + for i, n := 0, ttuple.Len(); i < n; i++ { + results = append(results, + emitConv(fn, emitExtract(fn, tuple, i), + fn.Signature.Results().At(i).Type())) + } + } else { + // 1:1 return, or no-arg return in non-void function. + for i, r := range s.Results { + v := emitConv(fn, b.expr(fn, r), fn.Signature.Results().At(i).Type()) + results = append(results, v) + } + } + if fn.namedResults != nil { + // Function has named result parameters (NRPs). + // Perform parallel assignment of return operands to NRPs. + for i, r := range results { + emitStore(fn, fn.namedResults[i], r, s.Return) + } + } + // Run function calls deferred in this + // function when explicitly returning from it. + fn.emit(new(RunDefers)) + if fn.namedResults != nil { + // Reload NRPs to form the result tuple. + results = results[:0] + for _, r := range fn.namedResults { + results = append(results, emitLoad(fn, r)) + } + } + fn.emit(&Return{Results: results, pos: s.Return}) + fn.currentBlock = fn.newBasicBlock("unreachable") + + case *ast.BranchStmt: + var block *BasicBlock + switch s.Tok { + case token.BREAK: + if s.Label != nil { + block = fn.labelledBlock(s.Label)._break + } else { + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._break + } + } + + case token.CONTINUE: + if s.Label != nil { + block = fn.labelledBlock(s.Label)._continue + } else { + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._continue + } + } + + case token.FALLTHROUGH: + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._fallthrough + } + + case token.GOTO: + block = fn.labelledBlock(s.Label)._goto + } + emitJump(fn, block) + fn.currentBlock = fn.newBasicBlock("unreachable") + + case *ast.BlockStmt: + b.stmtList(fn, s.List) + + case *ast.IfStmt: + if s.Init != nil { + b.stmt(fn, s.Init) + } + then := fn.newBasicBlock("if.then") + done := fn.newBasicBlock("if.done") + els := done + if s.Else != nil { + els = fn.newBasicBlock("if.else") + } + b.cond(fn, s.Cond, then, els) + fn.currentBlock = then + b.stmt(fn, s.Body) + emitJump(fn, done) + + if s.Else != nil { + fn.currentBlock = els + b.stmt(fn, s.Else) + emitJump(fn, done) + } + + fn.currentBlock = done + + case *ast.SwitchStmt: + b.switchStmt(fn, s, label) + + case *ast.TypeSwitchStmt: + b.typeSwitchStmt(fn, s, label) + + case *ast.SelectStmt: + b.selectStmt(fn, s, label) + + case *ast.ForStmt: + b.forStmt(fn, s, label) + + case *ast.RangeStmt: + b.rangeStmt(fn, s, label) + + default: + panic(fmt.Sprintf("unexpected statement kind: %T", s)) + } +} + +// buildFunction builds SSA code for the body of function fn. Idempotent. +func (b *builder) buildFunction(fn *Function) { + if fn.Blocks != nil { + return // building already started + } + + var recvField *ast.FieldList + var body *ast.BlockStmt + var functype *ast.FuncType + switch n := fn.syntax.(type) { + case nil: + return // not a Go source function. (Synthetic, or from object file.) + case *ast.FuncDecl: + functype = n.Type + recvField = n.Recv + body = n.Body + case *ast.FuncLit: + functype = n.Type + body = n.Body + default: + panic(n) + } + + if body == nil { + // External function. + if fn.Params == nil { + // This condition ensures we add a non-empty + // params list once only, but we may attempt + // the degenerate empty case repeatedly. + // TODO(adonovan): opt: don't do that. + + // We set Function.Params even though there is no body + // code to reference them. This simplifies clients. + if recv := fn.Signature.Recv(); recv != nil { + fn.addParamObj(recv) + } + params := fn.Signature.Params() + for i, n := 0, params.Len(); i < n; i++ { + fn.addParamObj(params.At(i)) + } + } + return + } + if fn.Prog.mode&LogSource != 0 { + defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))() + } + fn.startBody() + fn.createSyntacticParams(recvField, functype) + b.stmt(fn, body) + if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb == fn.Recover || cb.Preds != nil) { + // Control fell off the end of the function's body block. + // + // Block optimizations eliminate the current block, if + // unreachable. It is a builder invariant that + // if this no-arg return is ill-typed for + // fn.Signature.Results, this block must be + // unreachable. The sanity checker checks this. + fn.emit(new(RunDefers)) + fn.emit(new(Return)) + } + fn.finishBody() +} + +// buildFuncDecl builds SSA code for the function or method declared +// by decl in package pkg. +// +func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { + id := decl.Name + if isBlankIdent(id) { + return // discard + } + fn := pkg.values[pkg.info.Defs[id]].(*Function) + if decl.Recv == nil && id.Name == "init" { + var v Call + v.Call.Value = fn + v.setType(types.NewTuple()) + pkg.init.emit(&v) + } + b.buildFunction(fn) +} + +// Build calls Package.Build for each package in prog. +// Building occurs in parallel unless the BuildSerially mode flag was set. +// +// Build is intended for whole-program analysis; a typical compiler +// need only build a single package. +// +// Build is idempotent and thread-safe. +// +func (prog *Program) Build() { + var wg sync.WaitGroup + for _, p := range prog.packages { + if prog.mode&BuildSerially != 0 { + p.Build() + } else { + wg.Add(1) + go func(p *Package) { + p.Build() + wg.Done() + }(p) + } + } + wg.Wait() +} + +// Build builds SSA code for all functions and vars in package p. +// +// Precondition: CreatePackage must have been called for all of p's +// direct imports (and hence its direct imports must have been +// error-free). +// +// Build is idempotent and thread-safe. +// +func (p *Package) Build() { p.buildOnce.Do(p.build) } + +func (p *Package) build() { + if p.info == nil { + return // synthetic package, e.g. "testmain" + } + + // Ensure we have runtime type info for all exported members. + // TODO(adonovan): ideally belongs in memberFromObject, but + // that would require package creation in topological order. + for name, mem := range p.Members { + if ast.IsExported(name) { + p.Prog.needMethodsOf(mem.Type()) + } + } + if p.Prog.mode&LogSource != 0 { + defer logStack("build %s", p)() + } + init := p.init + init.startBody() + + var done *BasicBlock + + if p.Prog.mode&BareInits == 0 { + // Make init() skip if package is already initialized. + initguard := p.Var("init$guard") + doinit := init.newBasicBlock("init.start") + done = init.newBasicBlock("init.done") + emitIf(init, emitLoad(init, initguard), done, doinit) + init.currentBlock = doinit + emitStore(init, initguard, vTrue, token.NoPos) + + // Call the init() function of each package we import. + for _, pkg := range p.Pkg.Imports() { + prereq := p.Prog.packages[pkg] + if prereq == nil { + panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Pkg.Path(), pkg.Path())) + } + var v Call + v.Call.Value = prereq.init + v.Call.pos = init.pos + v.setType(types.NewTuple()) + init.emit(&v) + } + } + + var b builder + + // Initialize package-level vars in correct order. + for _, varinit := range p.info.InitOrder { + if init.Prog.mode&LogSource != 0 { + fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n", + varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos())) + } + if len(varinit.Lhs) == 1 { + // 1:1 initialization: var x, y = a(), b() + var lval lvalue + if v := varinit.Lhs[0]; v.Name() != "_" { + lval = &address{addr: p.values[v].(*Global), pos: v.Pos()} + } else { + lval = blank{} + } + b.assign(init, lval, varinit.Rhs, true, nil) + } else { + // n:1 initialization: var x, y := f() + tuple := b.exprN(init, varinit.Rhs) + for i, v := range varinit.Lhs { + if v.Name() == "_" { + continue + } + emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i), v.Pos()) + } + } + } + + // Build all package-level functions, init functions + // and methods, including unreachable/blank ones. + // We build them in source order, but it's not significant. + for _, file := range p.files { + for _, decl := range file.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok { + b.buildFuncDecl(p, decl) + } + } + } + + // Finish up init(). + if p.Prog.mode&BareInits == 0 { + emitJump(init, done) + init.currentBlock = done + } + init.emit(new(Return)) + init.finishBody() + + p.info = nil // We no longer need ASTs or go/types deductions. + + if p.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckPackage(p) + } +} + +// Like ObjectOf, but panics instead of returning nil. +// Only valid during p's create and build phases. +func (p *Package) objectOf(id *ast.Ident) types.Object { + if o := p.info.ObjectOf(id); o != nil { + return o + } + panic(fmt.Sprintf("no types.Object for ast.Ident %s @ %s", + id.Name, p.Prog.Fset.Position(id.Pos()))) +} + +// Like TypeOf, but panics instead of returning nil. +// Only valid during p's create and build phases. +func (p *Package) typeOf(e ast.Expr) types.Type { + if T := p.info.TypeOf(e); T != nil { + return T + } + panic(fmt.Sprintf("no type for %T @ %s", + e, p.Prog.Fset.Position(e.Pos()))) +} diff --git a/vendor/golang.org/x/tools/go/ssa/const.go b/vendor/golang.org/x/tools/go/ssa/const.go new file mode 100644 index 000000000..f43792e7f --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/const.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines the Const SSA value type. + +import ( + "fmt" + "go/constant" + "go/token" + "go/types" + "strconv" +) + +// NewConst returns a new constant of the specified value and type. +// val must be valid according to the specification of Const.Value. +// +func NewConst(val constant.Value, typ types.Type) *Const { + return &Const{typ, val} +} + +// intConst returns an 'int' constant that evaluates to i. +// (i is an int64 in case the host is narrower than the target.) +func intConst(i int64) *Const { + return NewConst(constant.MakeInt64(i), tInt) +} + +// nilConst returns a nil constant of the specified type, which may +// be any reference type, including interfaces. +// +func nilConst(typ types.Type) *Const { + return NewConst(nil, typ) +} + +// stringConst returns a 'string' constant that evaluates to s. +func stringConst(s string) *Const { + return NewConst(constant.MakeString(s), tString) +} + +// zeroConst returns a new "zero" constant of the specified type, +// which must not be an array or struct type: the zero values of +// aggregates are well-defined but cannot be represented by Const. +// +func zeroConst(t types.Type) *Const { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return NewConst(constant.MakeBool(false), t) + case t.Info()&types.IsNumeric != 0: + return NewConst(constant.MakeInt64(0), t) + case t.Info()&types.IsString != 0: + return NewConst(constant.MakeString(""), t) + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return nilConst(t) + default: + panic(fmt.Sprint("zeroConst for unexpected type:", t)) + } + case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: + return nilConst(t) + case *types.Named: + return NewConst(zeroConst(t.Underlying()).Value, t) + case *types.Array, *types.Struct, *types.Tuple: + panic(fmt.Sprint("zeroConst applied to aggregate:", t)) + } + panic(fmt.Sprint("zeroConst: unexpected ", t)) +} + +func (c *Const) RelString(from *types.Package) string { + var s string + if c.Value == nil { + s = "nil" + } else if c.Value.Kind() == constant.String { + s = constant.StringVal(c.Value) + const max = 20 + // TODO(adonovan): don't cut a rune in half. + if len(s) > max { + s = s[:max-3] + "..." // abbreviate + } + s = strconv.Quote(s) + } else { + s = c.Value.String() + } + return s + ":" + relType(c.Type(), from) +} + +func (c *Const) Name() string { + return c.RelString(nil) +} + +func (c *Const) String() string { + return c.Name() +} + +func (c *Const) Type() types.Type { + return c.typ +} + +func (c *Const) Referrers() *[]Instruction { + return nil +} + +func (c *Const) Parent() *Function { return nil } + +func (c *Const) Pos() token.Pos { + return token.NoPos +} + +// IsNil returns true if this constant represents a typed or untyped nil value. +func (c *Const) IsNil() bool { + return c.Value == nil +} + +// TODO(adonovan): move everything below into golang.org/x/tools/go/ssa/interp. + +// Int64 returns the numeric value of this constant truncated to fit +// a signed 64-bit integer. +// +func (c *Const) Int64() int64 { + switch x := constant.ToInt(c.Value); x.Kind() { + case constant.Int: + if i, ok := constant.Int64Val(x); ok { + return i + } + return 0 + case constant.Float: + f, _ := constant.Float64Val(x) + return int64(f) + } + panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) +} + +// Uint64 returns the numeric value of this constant truncated to fit +// an unsigned 64-bit integer. +// +func (c *Const) Uint64() uint64 { + switch x := constant.ToInt(c.Value); x.Kind() { + case constant.Int: + if u, ok := constant.Uint64Val(x); ok { + return u + } + return 0 + case constant.Float: + f, _ := constant.Float64Val(x) + return uint64(f) + } + panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) +} + +// Float64 returns the numeric value of this constant truncated to fit +// a float64. +// +func (c *Const) Float64() float64 { + f, _ := constant.Float64Val(c.Value) + return f +} + +// Complex128 returns the complex value of this constant truncated to +// fit a complex128. +// +func (c *Const) Complex128() complex128 { + re, _ := constant.Float64Val(constant.Real(c.Value)) + im, _ := constant.Float64Val(constant.Imag(c.Value)) + return complex(re, im) +} diff --git a/vendor/golang.org/x/tools/go/ssa/create.go b/vendor/golang.org/x/tools/go/ssa/create.go new file mode 100644 index 000000000..85163a0c5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/create.go @@ -0,0 +1,270 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file implements the CREATE phase of SSA construction. +// See builder.go for explanation. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + "sync" + + "golang.org/x/tools/go/types/typeutil" +) + +// NewProgram returns a new SSA Program. +// +// mode controls diagnostics and checking during SSA construction. +// +func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { + prog := &Program{ + Fset: fset, + imported: make(map[string]*Package), + packages: make(map[*types.Package]*Package), + thunks: make(map[selectionKey]*Function), + bounds: make(map[*types.Func]*Function), + mode: mode, + } + + h := typeutil.MakeHasher() // protected by methodsMu, in effect + prog.methodSets.SetHasher(h) + prog.canon.SetHasher(h) + + return prog +} + +// memberFromObject populates package pkg with a member for the +// typechecker object obj. +// +// For objects from Go source code, syntax is the associated syntax +// tree (for funcs and vars only); it will be used during the build +// phase. +// +func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { + name := obj.Name() + switch obj := obj.(type) { + case *types.Builtin: + if pkg.Pkg != types.Unsafe { + panic("unexpected builtin object: " + obj.String()) + } + + case *types.TypeName: + pkg.Members[name] = &Type{ + object: obj, + pkg: pkg, + } + + case *types.Const: + c := &NamedConst{ + object: obj, + Value: NewConst(obj.Val(), obj.Type()), + pkg: pkg, + } + pkg.values[obj] = c.Value + pkg.Members[name] = c + + case *types.Var: + g := &Global{ + Pkg: pkg, + name: name, + object: obj, + typ: types.NewPointer(obj.Type()), // address + pos: obj.Pos(), + } + pkg.values[obj] = g + pkg.Members[name] = g + + case *types.Func: + sig := obj.Type().(*types.Signature) + if sig.Recv() == nil && name == "init" { + pkg.ninit++ + name = fmt.Sprintf("init#%d", pkg.ninit) + } + fn := &Function{ + name: name, + object: obj, + Signature: sig, + syntax: syntax, + pos: obj.Pos(), + Pkg: pkg, + Prog: pkg.Prog, + } + if syntax == nil { + fn.Synthetic = "loaded from gc object file" + } + + pkg.values[obj] = fn + if sig.Recv() == nil { + pkg.Members[name] = fn // package-level function + } + + default: // (incl. *types.Package) + panic("unexpected Object type: " + obj.String()) + } +} + +// membersFromDecl populates package pkg with members for each +// typechecker object (var, func, const or type) associated with the +// specified decl. +// +func membersFromDecl(pkg *Package, decl ast.Decl) { + switch decl := decl.(type) { + case *ast.GenDecl: // import, const, type or var + switch decl.Tok { + case token.CONST: + for _, spec := range decl.Specs { + for _, id := range spec.(*ast.ValueSpec).Names { + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], nil) + } + } + } + + case token.VAR: + for _, spec := range decl.Specs { + for _, id := range spec.(*ast.ValueSpec).Names { + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], spec) + } + } + } + + case token.TYPE: + for _, spec := range decl.Specs { + id := spec.(*ast.TypeSpec).Name + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], nil) + } + } + } + + case *ast.FuncDecl: + id := decl.Name + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], decl) + } + } +} + +// CreatePackage constructs and returns an SSA Package from the +// specified type-checked, error-free file ASTs, and populates its +// Members mapping. +// +// importable determines whether this package should be returned by a +// subsequent call to ImportedPackage(pkg.Path()). +// +// The real work of building SSA form for each function is not done +// until a subsequent call to Package.Build(). +// +func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { + p := &Package{ + Prog: prog, + Members: make(map[string]Member), + values: make(map[types.Object]Value), + Pkg: pkg, + info: info, // transient (CREATE and BUILD phases) + files: files, // transient (CREATE and BUILD phases) + } + + // Add init() function. + p.init = &Function{ + name: "init", + Signature: new(types.Signature), + Synthetic: "package initializer", + Pkg: p, + Prog: prog, + } + p.Members[p.init.name] = p.init + + // CREATE phase. + // Allocate all package members: vars, funcs, consts and types. + if len(files) > 0 { + // Go source package. + for _, file := range files { + for _, decl := range file.Decls { + membersFromDecl(p, decl) + } + } + } else { + // GC-compiled binary package (or "unsafe") + // No code. + // No position information. + scope := p.Pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + memberFromObject(p, obj, nil) + if obj, ok := obj.(*types.TypeName); ok { + if named, ok := obj.Type().(*types.Named); ok { + for i, n := 0, named.NumMethods(); i < n; i++ { + memberFromObject(p, named.Method(i), nil) + } + } + } + } + } + + if prog.mode&BareInits == 0 { + // Add initializer guard variable. + initguard := &Global{ + Pkg: p, + name: "init$guard", + typ: types.NewPointer(tBool), + } + p.Members[initguard.Name()] = initguard + } + + if prog.mode&GlobalDebug != 0 { + p.SetDebugMode(true) + } + + if prog.mode&PrintPackages != 0 { + printMu.Lock() + p.WriteTo(os.Stdout) + printMu.Unlock() + } + + if importable { + prog.imported[p.Pkg.Path()] = p + } + prog.packages[p.Pkg] = p + + return p +} + +// printMu serializes printing of Packages/Functions to stdout. +var printMu sync.Mutex + +// AllPackages returns a new slice containing all packages in the +// program prog in unspecified order. +// +func (prog *Program) AllPackages() []*Package { + pkgs := make([]*Package, 0, len(prog.packages)) + for _, pkg := range prog.packages { + pkgs = append(pkgs, pkg) + } + return pkgs +} + +// ImportedPackage returns the importable Package whose PkgPath +// is path, or nil if no such Package has been created. +// +// A parameter to CreatePackage determines whether a package should be +// considered importable. For example, no import declaration can resolve +// to the ad-hoc main package created by 'go build foo.go'. +// +// TODO(adonovan): rethink this function and the "importable" concept; +// most packages are importable. This function assumes that all +// types.Package.Path values are unique within the ssa.Program, which is +// false---yet this function remains very convenient. +// Clients should use (*Program).Package instead where possible. +// SSA doesn't really need a string-keyed map of packages. +// +func (prog *Program) ImportedPackage(path string) *Package { + return prog.imported[path] +} diff --git a/vendor/golang.org/x/tools/go/ssa/doc.go b/vendor/golang.org/x/tools/go/ssa/doc.go new file mode 100644 index 000000000..1a13640f9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/doc.go @@ -0,0 +1,125 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ssa defines a representation of the elements of Go programs +// (packages, types, functions, variables and constants) using a +// static single-assignment (SSA) form intermediate representation +// (IR) for the bodies of functions. +// +// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE. +// +// For an introduction to SSA form, see +// http://en.wikipedia.org/wiki/Static_single_assignment_form. +// This page provides a broader reading list: +// http://www.dcs.gla.ac.uk/~jsinger/ssa.html. +// +// The level of abstraction of the SSA form is intentionally close to +// the source language to facilitate construction of source analysis +// tools. It is not intended for machine code generation. +// +// All looping, branching and switching constructs are replaced with +// unstructured control flow. Higher-level control flow constructs +// such as multi-way branch can be reconstructed as needed; see +// ssautil.Switches() for an example. +// +// The simplest way to create the SSA representation of a package is +// to load typed syntax trees using golang.org/x/tools/go/packages, then +// invoke the ssautil.Packages helper function. See ExampleLoadPackages +// and ExampleWholeProgram for examples. +// The resulting ssa.Program contains all the packages and their +// members, but SSA code is not created for function bodies until a +// subsequent call to (*Package).Build or (*Program).Build. +// +// The builder initially builds a naive SSA form in which all local +// variables are addresses of stack locations with explicit loads and +// stores. Registerisation of eligible locals and φ-node insertion +// using dominance and dataflow are then performed as a second pass +// called "lifting" to improve the accuracy and performance of +// subsequent analyses; this pass can be skipped by setting the +// NaiveForm builder flag. +// +// The primary interfaces of this package are: +// +// - Member: a named member of a Go package. +// - Value: an expression that yields a value. +// - Instruction: a statement that consumes values and performs computation. +// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph) +// +// A computation that yields a result implements both the Value and +// Instruction interfaces. The following table shows for each +// concrete type which of these interfaces it implements. +// +// Value? Instruction? Member? +// *Alloc ✔ ✔ +// *BinOp ✔ ✔ +// *Builtin ✔ +// *Call ✔ ✔ +// *ChangeInterface ✔ ✔ +// *ChangeType ✔ ✔ +// *Const ✔ +// *Convert ✔ ✔ +// *DebugRef ✔ +// *Defer ✔ +// *Extract ✔ ✔ +// *Field ✔ ✔ +// *FieldAddr ✔ ✔ +// *FreeVar ✔ +// *Function ✔ ✔ (func) +// *Global ✔ ✔ (var) +// *Go ✔ +// *If ✔ +// *Index ✔ ✔ +// *IndexAddr ✔ ✔ +// *Jump ✔ +// *Lookup ✔ ✔ +// *MakeChan ✔ ✔ +// *MakeClosure ✔ ✔ +// *MakeInterface ✔ ✔ +// *MakeMap ✔ ✔ +// *MakeSlice ✔ ✔ +// *MapUpdate ✔ +// *NamedConst ✔ (const) +// *Next ✔ ✔ +// *Panic ✔ +// *Parameter ✔ +// *Phi ✔ ✔ +// *Range ✔ ✔ +// *Return ✔ +// *RunDefers ✔ +// *Select ✔ ✔ +// *Send ✔ +// *Slice ✔ ✔ +// *Store ✔ +// *Type ✔ (type) +// *TypeAssert ✔ ✔ +// *UnOp ✔ ✔ +// +// Other key types in this package include: Program, Package, Function +// and BasicBlock. +// +// The program representation constructed by this package is fully +// resolved internally, i.e. it does not rely on the names of Values, +// Packages, Functions, Types or BasicBlocks for the correct +// interpretation of the program. Only the identities of objects and +// the topology of the SSA and type graphs are semantically +// significant. (There is one exception: Ids, used to identify field +// and method names, contain strings.) Avoidance of name-based +// operations simplifies the implementation of subsequent passes and +// can make them very efficient. Many objects are nonetheless named +// to aid in debugging, but it is not essential that the names be +// either accurate or unambiguous. The public API exposes a number of +// name-based maps for client convenience. +// +// The ssa/ssautil package provides various utilities that depend only +// on the public API of this package. +// +// TODO(adonovan): Consider the exceptional control-flow implications +// of defer and recover(). +// +// TODO(adonovan): write a how-to document for all the various cases +// of trying to determine corresponding elements across the four +// domains of source locations, ast.Nodes, types.Objects, +// ssa.Values/Instructions. +// +package ssa // import "golang.org/x/tools/go/ssa" diff --git a/vendor/golang.org/x/tools/go/ssa/dom.go b/vendor/golang.org/x/tools/go/ssa/dom.go new file mode 100644 index 000000000..822fe9772 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/dom.go @@ -0,0 +1,341 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines algorithms related to dominance. + +// Dominator tree construction ---------------------------------------- +// +// We use the algorithm described in Lengauer & Tarjan. 1979. A fast +// algorithm for finding dominators in a flowgraph. +// http://doi.acm.org/10.1145/357062.357071 +// +// We also apply the optimizations to SLT described in Georgiadis et +// al, Finding Dominators in Practice, JGAA 2006, +// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf +// to avoid the need for buckets of size > 1. + +import ( + "bytes" + "fmt" + "math/big" + "os" + "sort" +) + +// Idom returns the block that immediately dominates b: +// its parent in the dominator tree, if any. +// Neither the entry node (b.Index==0) nor recover node +// (b==b.Parent().Recover()) have a parent. +// +func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom } + +// Dominees returns the list of blocks that b immediately dominates: +// its children in the dominator tree. +// +func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children } + +// Dominates reports whether b dominates c. +func (b *BasicBlock) Dominates(c *BasicBlock) bool { + return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post +} + +type byDomPreorder []*BasicBlock + +func (a byDomPreorder) Len() int { return len(a) } +func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre } + +// DomPreorder returns a new slice containing the blocks of f in +// dominator tree preorder. +// +func (f *Function) DomPreorder() []*BasicBlock { + n := len(f.Blocks) + order := make(byDomPreorder, n) + copy(order, f.Blocks) + sort.Sort(order) + return order +} + +// domInfo contains a BasicBlock's dominance information. +type domInfo struct { + idom *BasicBlock // immediate dominator (parent in domtree) + children []*BasicBlock // nodes immediately dominated by this one + pre, post int32 // pre- and post-order numbering within domtree +} + +// ltState holds the working state for Lengauer-Tarjan algorithm +// (during which domInfo.pre is repurposed for CFG DFS preorder number). +type ltState struct { + // Each slice is indexed by b.Index. + sdom []*BasicBlock // b's semidominator + parent []*BasicBlock // b's parent in DFS traversal of CFG + ancestor []*BasicBlock // b's ancestor with least sdom +} + +// dfs implements the depth-first search part of the LT algorithm. +func (lt *ltState) dfs(v *BasicBlock, i int32, preorder []*BasicBlock) int32 { + preorder[i] = v + v.dom.pre = i // For now: DFS preorder of spanning tree of CFG + i++ + lt.sdom[v.Index] = v + lt.link(nil, v) + for _, w := range v.Succs { + if lt.sdom[w.Index] == nil { + lt.parent[w.Index] = v + i = lt.dfs(w, i, preorder) + } + } + return i +} + +// eval implements the EVAL part of the LT algorithm. +func (lt *ltState) eval(v *BasicBlock) *BasicBlock { + // TODO(adonovan): opt: do path compression per simple LT. + u := v + for ; lt.ancestor[v.Index] != nil; v = lt.ancestor[v.Index] { + if lt.sdom[v.Index].dom.pre < lt.sdom[u.Index].dom.pre { + u = v + } + } + return u +} + +// link implements the LINK part of the LT algorithm. +func (lt *ltState) link(v, w *BasicBlock) { + lt.ancestor[w.Index] = v +} + +// buildDomTree computes the dominator tree of f using the LT algorithm. +// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run). +// +func buildDomTree(f *Function) { + // The step numbers refer to the original LT paper; the + // reordering is due to Georgiadis. + + // Clear any previous domInfo. + for _, b := range f.Blocks { + b.dom = domInfo{} + } + + n := len(f.Blocks) + // Allocate space for 5 contiguous [n]*BasicBlock arrays: + // sdom, parent, ancestor, preorder, buckets. + space := make([]*BasicBlock, 5*n) + lt := ltState{ + sdom: space[0:n], + parent: space[n : 2*n], + ancestor: space[2*n : 3*n], + } + + // Step 1. Number vertices by depth-first preorder. + preorder := space[3*n : 4*n] + root := f.Blocks[0] + prenum := lt.dfs(root, 0, preorder) + recover := f.Recover + if recover != nil { + lt.dfs(recover, prenum, preorder) + } + + buckets := space[4*n : 5*n] + copy(buckets, preorder) + + // In reverse preorder... + for i := int32(n) - 1; i > 0; i-- { + w := preorder[i] + + // Step 3. Implicitly define the immediate dominator of each node. + for v := buckets[i]; v != w; v = buckets[v.dom.pre] { + u := lt.eval(v) + if lt.sdom[u.Index].dom.pre < i { + v.dom.idom = u + } else { + v.dom.idom = w + } + } + + // Step 2. Compute the semidominators of all nodes. + lt.sdom[w.Index] = lt.parent[w.Index] + for _, v := range w.Preds { + u := lt.eval(v) + if lt.sdom[u.Index].dom.pre < lt.sdom[w.Index].dom.pre { + lt.sdom[w.Index] = lt.sdom[u.Index] + } + } + + lt.link(lt.parent[w.Index], w) + + if lt.parent[w.Index] == lt.sdom[w.Index] { + w.dom.idom = lt.parent[w.Index] + } else { + buckets[i] = buckets[lt.sdom[w.Index].dom.pre] + buckets[lt.sdom[w.Index].dom.pre] = w + } + } + + // The final 'Step 3' is now outside the loop. + for v := buckets[0]; v != root; v = buckets[v.dom.pre] { + v.dom.idom = root + } + + // Step 4. Explicitly define the immediate dominator of each + // node, in preorder. + for _, w := range preorder[1:] { + if w == root || w == recover { + w.dom.idom = nil + } else { + if w.dom.idom != lt.sdom[w.Index] { + w.dom.idom = w.dom.idom.dom.idom + } + // Calculate Children relation as inverse of Idom. + w.dom.idom.dom.children = append(w.dom.idom.dom.children, w) + } + } + + pre, post := numberDomTree(root, 0, 0) + if recover != nil { + numberDomTree(recover, pre, post) + } + + // printDomTreeDot(os.Stderr, f) // debugging + // printDomTreeText(os.Stderr, root, 0) // debugging + + if f.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckDomTree(f) + } +} + +// numberDomTree sets the pre- and post-order numbers of a depth-first +// traversal of the dominator tree rooted at v. These are used to +// answer dominance queries in constant time. +// +func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) { + v.dom.pre = pre + pre++ + for _, child := range v.dom.children { + pre, post = numberDomTree(child, pre, post) + } + v.dom.post = post + post++ + return pre, post +} + +// Testing utilities ---------------------------------------- + +// sanityCheckDomTree checks the correctness of the dominator tree +// computed by the LT algorithm by comparing against the dominance +// relation computed by a naive Kildall-style forward dataflow +// analysis (Algorithm 10.16 from the "Dragon" book). +// +func sanityCheckDomTree(f *Function) { + n := len(f.Blocks) + + // D[i] is the set of blocks that dominate f.Blocks[i], + // represented as a bit-set of block indices. + D := make([]big.Int, n) + + one := big.NewInt(1) + + // all is the set of all blocks; constant. + var all big.Int + all.Set(one).Lsh(&all, uint(n)).Sub(&all, one) + + // Initialization. + for i, b := range f.Blocks { + if i == 0 || b == f.Recover { + // A root is dominated only by itself. + D[i].SetBit(&D[0], 0, 1) + } else { + // All other blocks are (initially) dominated + // by every block. + D[i].Set(&all) + } + } + + // Iteration until fixed point. + for changed := true; changed; { + changed = false + for i, b := range f.Blocks { + if i == 0 || b == f.Recover { + continue + } + // Compute intersection across predecessors. + var x big.Int + x.Set(&all) + for _, pred := range b.Preds { + x.And(&x, &D[pred.Index]) + } + x.SetBit(&x, i, 1) // a block always dominates itself. + if D[i].Cmp(&x) != 0 { + D[i].Set(&x) + changed = true + } + } + } + + // Check the entire relation. O(n^2). + // The Recover block (if any) must be treated specially so we skip it. + ok := true + for i := 0; i < n; i++ { + for j := 0; j < n; j++ { + b, c := f.Blocks[i], f.Blocks[j] + if c == f.Recover { + continue + } + actual := b.Dominates(c) + expected := D[j].Bit(i) == 1 + if actual != expected { + fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected) + ok = false + } + } + } + + preorder := f.DomPreorder() + for _, b := range f.Blocks { + if got := preorder[b.dom.pre]; got != b { + fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b) + ok = false + } + } + + if !ok { + panic("sanityCheckDomTree failed for " + f.String()) + } + +} + +// Printing functions ---------------------------------------- + +// printDomTree prints the dominator tree as text, using indentation. +func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { + fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) + for _, child := range v.dom.children { + printDomTreeText(buf, child, indent+1) + } +} + +// printDomTreeDot prints the dominator tree of f in AT&T GraphViz +// (.dot) format. +func printDomTreeDot(buf *bytes.Buffer, f *Function) { + fmt.Fprintln(buf, "//", f) + fmt.Fprintln(buf, "digraph domtree {") + for i, b := range f.Blocks { + v := b.dom + fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post) + // TODO(adonovan): improve appearance of edges + // belonging to both dominator tree and CFG. + + // Dominator tree edge. + if i != 0 { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre) + } + // CFG edges. + for _, pred := range b.Preds { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre) + } + } + fmt.Fprintln(buf, "}") +} diff --git a/vendor/golang.org/x/tools/go/ssa/emit.go b/vendor/golang.org/x/tools/go/ssa/emit.go new file mode 100644 index 000000000..df9ca4ff0 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/emit.go @@ -0,0 +1,478 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// Helpers for emitting SSA instructions. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" +) + +// emitNew emits to f a new (heap Alloc) instruction allocating an +// object of type typ. pos is the optional source location. +// +func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc { + v := &Alloc{Heap: true} + v.setType(types.NewPointer(typ)) + v.setPos(pos) + f.emit(v) + return v +} + +// emitLoad emits to f an instruction to load the address addr into a +// new temporary, and returns the value so defined. +// +func emitLoad(f *Function, addr Value) *UnOp { + v := &UnOp{Op: token.MUL, X: addr} + v.setType(deref(addr.Type())) + f.emit(v) + return v +} + +// emitDebugRef emits to f a DebugRef pseudo-instruction associating +// expression e with value v. +// +func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) { + if !f.debugInfo() { + return // debugging not enabled + } + if v == nil || e == nil { + panic("nil") + } + var obj types.Object + e = unparen(e) + if id, ok := e.(*ast.Ident); ok { + if isBlankIdent(id) { + return + } + obj = f.Pkg.objectOf(id) + switch obj.(type) { + case *types.Nil, *types.Const, *types.Builtin: + return + } + } + f.emit(&DebugRef{ + X: v, + Expr: e, + IsAddr: isAddr, + object: obj, + }) +} + +// emitArith emits to f code to compute the binary operation op(x, y) +// where op is an eager shift, logical or arithmetic operation. +// (Use emitCompare() for comparisons and Builder.logicalBinop() for +// non-eager operations.) +// +func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.Pos) Value { + switch op { + case token.SHL, token.SHR: + x = emitConv(f, x, t) + // y may be signed or an 'untyped' constant. + // TODO(adonovan): whence signed values? + if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUnsigned == 0 { + y = emitConv(f, y, types.Typ[types.Uint64]) + } + + case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: + x = emitConv(f, x, t) + y = emitConv(f, y, t) + + default: + panic("illegal op in emitArith: " + op.String()) + + } + v := &BinOp{ + Op: op, + X: x, + Y: y, + } + v.setPos(pos) + v.setType(t) + return f.emit(v) +} + +// emitCompare emits to f code compute the boolean result of +// comparison comparison 'x op y'. +// +func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { + xt := x.Type().Underlying() + yt := y.Type().Underlying() + + // Special case to optimise a tagless SwitchStmt so that + // these are equivalent + // switch { case e: ...} + // switch true { case e: ... } + // if e==true { ... } + // even in the case when e's type is an interface. + // TODO(adonovan): opt: generalise to x==true, false!=y, etc. + if x == vTrue && op == token.EQL { + if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 { + return y + } + } + + if types.Identical(xt, yt) { + // no conversion necessary + } else if _, ok := xt.(*types.Interface); ok { + y = emitConv(f, y, x.Type()) + } else if _, ok := yt.(*types.Interface); ok { + x = emitConv(f, x, y.Type()) + } else if _, ok := x.(*Const); ok { + x = emitConv(f, x, y.Type()) + } else if _, ok := y.(*Const); ok { + y = emitConv(f, y, x.Type()) + } else { + // other cases, e.g. channels. No-op. + } + + v := &BinOp{ + Op: op, + X: x, + Y: y, + } + v.setPos(pos) + v.setType(tBool) + return f.emit(v) +} + +// isValuePreserving returns true if a conversion from ut_src to +// ut_dst is value-preserving, i.e. just a change of type. +// Precondition: neither argument is a named type. +// +func isValuePreserving(ut_src, ut_dst types.Type) bool { + // Identical underlying types? + if structTypesIdentical(ut_dst, ut_src) { + return true + } + + switch ut_dst.(type) { + case *types.Chan: + // Conversion between channel types? + _, ok := ut_src.(*types.Chan) + return ok + + case *types.Pointer: + // Conversion between pointers with identical base types? + _, ok := ut_src.(*types.Pointer) + return ok + } + return false +} + +// emitConv emits to f code to convert Value val to exactly type typ, +// and returns the converted value. Implicit conversions are required +// by language assignability rules in assignments, parameter passing, +// etc. +// +func emitConv(f *Function, val Value, typ types.Type) Value { + t_src := val.Type() + + // Identical types? Conversion is a no-op. + if types.Identical(t_src, typ) { + return val + } + + ut_dst := typ.Underlying() + ut_src := t_src.Underlying() + + // Just a change of type, but not value or representation? + if isValuePreserving(ut_src, ut_dst) { + c := &ChangeType{X: val} + c.setType(typ) + return f.emit(c) + } + + // Conversion to, or construction of a value of, an interface type? + if _, ok := ut_dst.(*types.Interface); ok { + // Assignment from one interface type to another? + if _, ok := ut_src.(*types.Interface); ok { + c := &ChangeInterface{X: val} + c.setType(typ) + return f.emit(c) + } + + // Untyped nil constant? Return interface-typed nil constant. + if ut_src == tUntypedNil { + return nilConst(typ) + } + + // Convert (non-nil) "untyped" literals to their default type. + if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 { + val = emitConv(f, val, types.Default(ut_src)) + } + + f.Pkg.Prog.needMethodsOf(val.Type()) + mi := &MakeInterface{X: val} + mi.setType(typ) + return f.emit(mi) + } + + // Conversion of a compile-time constant value? + if c, ok := val.(*Const); ok { + if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() { + // Conversion of a compile-time constant to + // another constant type results in a new + // constant of the destination type and + // (initially) the same abstract value. + // We don't truncate the value yet. + return NewConst(c.Value, typ) + } + + // We're converting from constant to non-constant type, + // e.g. string -> []byte/[]rune. + } + + // Conversion from slice to array pointer? + if slice, ok := ut_src.(*types.Slice); ok { + if ptr, ok := ut_dst.(*types.Pointer); ok { + if arr, ok := ptr.Elem().(*types.Array); ok && types.Identical(slice.Elem(), arr.Elem()) { + c := &Convert{X: val} + c.setType(ut_dst) + return f.emit(c) + } + } + } + // A representation-changing conversion? + // At least one of {ut_src,ut_dst} must be *Basic. + // (The other may be []byte or []rune.) + _, ok1 := ut_src.(*types.Basic) + _, ok2 := ut_dst.(*types.Basic) + if ok1 || ok2 { + c := &Convert{X: val} + c.setType(typ) + return f.emit(c) + } + + panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ)) +} + +// emitStore emits to f an instruction to store value val at location +// addr, applying implicit conversions as required by assignability rules. +// +func emitStore(f *Function, addr, val Value, pos token.Pos) *Store { + s := &Store{ + Addr: addr, + Val: emitConv(f, val, deref(addr.Type())), + pos: pos, + } + f.emit(s) + return s +} + +// emitJump emits to f a jump to target, and updates the control-flow graph. +// Postcondition: f.currentBlock is nil. +// +func emitJump(f *Function, target *BasicBlock) { + b := f.currentBlock + b.emit(new(Jump)) + addEdge(b, target) + f.currentBlock = nil +} + +// emitIf emits to f a conditional jump to tblock or fblock based on +// cond, and updates the control-flow graph. +// Postcondition: f.currentBlock is nil. +// +func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock) { + b := f.currentBlock + b.emit(&If{Cond: cond}) + addEdge(b, tblock) + addEdge(b, fblock) + f.currentBlock = nil +} + +// emitExtract emits to f an instruction to extract the index'th +// component of tuple. It returns the extracted value. +// +func emitExtract(f *Function, tuple Value, index int) Value { + e := &Extract{Tuple: tuple, Index: index} + e.setType(tuple.Type().(*types.Tuple).At(index).Type()) + return f.emit(e) +} + +// emitTypeAssert emits to f a type assertion value := x.(t) and +// returns the value. x.Type() must be an interface. +// +func emitTypeAssert(f *Function, x Value, t types.Type, pos token.Pos) Value { + a := &TypeAssert{X: x, AssertedType: t} + a.setPos(pos) + a.setType(t) + return f.emit(a) +} + +// emitTypeTest emits to f a type test value,ok := x.(t) and returns +// a (value, ok) tuple. x.Type() must be an interface. +// +func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value { + a := &TypeAssert{ + X: x, + AssertedType: t, + CommaOk: true, + } + a.setPos(pos) + a.setType(types.NewTuple( + newVar("value", t), + varOk, + )) + return f.emit(a) +} + +// emitTailCall emits to f a function call in tail position. The +// caller is responsible for all fields of 'call' except its type. +// Intended for wrapper methods. +// Precondition: f does/will not use deferred procedure calls. +// Postcondition: f.currentBlock is nil. +// +func emitTailCall(f *Function, call *Call) { + tresults := f.Signature.Results() + nr := tresults.Len() + if nr == 1 { + call.typ = tresults.At(0).Type() + } else { + call.typ = tresults + } + tuple := f.emit(call) + var ret Return + switch nr { + case 0: + // no-op + case 1: + ret.Results = []Value{tuple} + default: + for i := 0; i < nr; i++ { + v := emitExtract(f, tuple, i) + // TODO(adonovan): in principle, this is required: + // v = emitConv(f, o.Type, f.Signature.Results[i].Type) + // but in practice emitTailCall is only used when + // the types exactly match. + ret.Results = append(ret.Results, v) + } + } + f.emit(&ret) + f.currentBlock = nil +} + +// emitImplicitSelections emits to f code to apply the sequence of +// implicit field selections specified by indices to base value v, and +// returns the selected value. +// +// If v is the address of a struct, the result will be the address of +// a field; if it is the value of a struct, the result will be the +// value of a field. +// +func emitImplicitSelections(f *Function, v Value, indices []int) Value { + for _, index := range indices { + fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) + + if isPointer(v.Type()) { + instr := &FieldAddr{ + X: v, + Field: index, + } + instr.setType(types.NewPointer(fld.Type())) + v = f.emit(instr) + // Load the field's value iff indirectly embedded. + if isPointer(fld.Type()) { + v = emitLoad(f, v) + } + } else { + instr := &Field{ + X: v, + Field: index, + } + instr.setType(fld.Type()) + v = f.emit(instr) + } + } + return v +} + +// emitFieldSelection emits to f code to select the index'th field of v. +// +// If wantAddr, the input must be a pointer-to-struct and the result +// will be the field's address; otherwise the result will be the +// field's value. +// Ident id is used for position and debug info. +// +func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { + fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) + if isPointer(v.Type()) { + instr := &FieldAddr{ + X: v, + Field: index, + } + instr.setPos(id.Pos()) + instr.setType(types.NewPointer(fld.Type())) + v = f.emit(instr) + // Load the field's value iff we don't want its address. + if !wantAddr { + v = emitLoad(f, v) + } + } else { + instr := &Field{ + X: v, + Field: index, + } + instr.setPos(id.Pos()) + instr.setType(fld.Type()) + v = f.emit(instr) + } + emitDebugRef(f, id, v, wantAddr) + return v +} + +// zeroValue emits to f code to produce a zero value of type t, +// and returns it. +// +func zeroValue(f *Function, t types.Type) Value { + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return emitLoad(f, f.addLocal(t, token.NoPos)) + default: + return zeroConst(t) + } +} + +// createRecoverBlock emits to f a block of code to return after a +// recovered panic, and sets f.Recover to it. +// +// If f's result parameters are named, the code loads and returns +// their current values, otherwise it returns the zero values of their +// type. +// +// Idempotent. +// +func createRecoverBlock(f *Function) { + if f.Recover != nil { + return // already created + } + saved := f.currentBlock + + f.Recover = f.newBasicBlock("recover") + f.currentBlock = f.Recover + + var results []Value + if f.namedResults != nil { + // Reload NRPs to form value tuple. + for _, r := range f.namedResults { + results = append(results, emitLoad(f, r)) + } + } else { + R := f.Signature.Results() + for i, n := 0, R.Len(); i < n; i++ { + T := R.At(i).Type() + + // Return zero value of each result type. + results = append(results, zeroValue(f, T)) + } + } + f.emit(&Return{Results: results}) + + f.currentBlock = saved +} diff --git a/vendor/golang.org/x/tools/go/ssa/func.go b/vendor/golang.org/x/tools/go/ssa/func.go new file mode 100644 index 000000000..0b99bc9ba --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/func.go @@ -0,0 +1,691 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file implements the Function and BasicBlock types. + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "go/types" + "io" + "os" + "strings" +) + +// addEdge adds a control-flow graph edge from from to to. +func addEdge(from, to *BasicBlock) { + from.Succs = append(from.Succs, to) + to.Preds = append(to.Preds, from) +} + +// Parent returns the function that contains block b. +func (b *BasicBlock) Parent() *Function { return b.parent } + +// String returns a human-readable label of this block. +// It is not guaranteed unique within the function. +// +func (b *BasicBlock) String() string { + return fmt.Sprintf("%d", b.Index) +} + +// emit appends an instruction to the current basic block. +// If the instruction defines a Value, it is returned. +// +func (b *BasicBlock) emit(i Instruction) Value { + i.setBlock(b) + b.Instrs = append(b.Instrs, i) + v, _ := i.(Value) + return v +} + +// predIndex returns the i such that b.Preds[i] == c or panics if +// there is none. +func (b *BasicBlock) predIndex(c *BasicBlock) int { + for i, pred := range b.Preds { + if pred == c { + return i + } + } + panic(fmt.Sprintf("no edge %s -> %s", c, b)) +} + +// hasPhi returns true if b.Instrs contains φ-nodes. +func (b *BasicBlock) hasPhi() bool { + _, ok := b.Instrs[0].(*Phi) + return ok +} + +// phis returns the prefix of b.Instrs containing all the block's φ-nodes. +func (b *BasicBlock) phis() []Instruction { + for i, instr := range b.Instrs { + if _, ok := instr.(*Phi); !ok { + return b.Instrs[:i] + } + } + return nil // unreachable in well-formed blocks +} + +// replacePred replaces all occurrences of p in b's predecessor list with q. +// Ordinarily there should be at most one. +// +func (b *BasicBlock) replacePred(p, q *BasicBlock) { + for i, pred := range b.Preds { + if pred == p { + b.Preds[i] = q + } + } +} + +// replaceSucc replaces all occurrences of p in b's successor list with q. +// Ordinarily there should be at most one. +// +func (b *BasicBlock) replaceSucc(p, q *BasicBlock) { + for i, succ := range b.Succs { + if succ == p { + b.Succs[i] = q + } + } +} + +// removePred removes all occurrences of p in b's +// predecessor list and φ-nodes. +// Ordinarily there should be at most one. +// +func (b *BasicBlock) removePred(p *BasicBlock) { + phis := b.phis() + + // We must preserve edge order for φ-nodes. + j := 0 + for i, pred := range b.Preds { + if pred != p { + b.Preds[j] = b.Preds[i] + // Strike out φ-edge too. + for _, instr := range phis { + phi := instr.(*Phi) + phi.Edges[j] = phi.Edges[i] + } + j++ + } + } + // Nil out b.Preds[j:] and φ-edges[j:] to aid GC. + for i := j; i < len(b.Preds); i++ { + b.Preds[i] = nil + for _, instr := range phis { + instr.(*Phi).Edges[i] = nil + } + } + b.Preds = b.Preds[:j] + for _, instr := range phis { + phi := instr.(*Phi) + phi.Edges = phi.Edges[:j] + } +} + +// Destinations associated with unlabelled for/switch/select stmts. +// We push/pop one of these as we enter/leave each construct and for +// each BranchStmt we scan for the innermost target of the right type. +// +type targets struct { + tail *targets // rest of stack + _break *BasicBlock + _continue *BasicBlock + _fallthrough *BasicBlock +} + +// Destinations associated with a labelled block. +// We populate these as labels are encountered in forward gotos or +// labelled statements. +// +type lblock struct { + _goto *BasicBlock + _break *BasicBlock + _continue *BasicBlock +} + +// labelledBlock returns the branch target associated with the +// specified label, creating it if needed. +// +func (f *Function) labelledBlock(label *ast.Ident) *lblock { + lb := f.lblocks[label.Obj] + if lb == nil { + lb = &lblock{_goto: f.newBasicBlock(label.Name)} + if f.lblocks == nil { + f.lblocks = make(map[*ast.Object]*lblock) + } + f.lblocks[label.Obj] = lb + } + return lb +} + +// addParam adds a (non-escaping) parameter to f.Params of the +// specified name, type and source position. +// +func (f *Function) addParam(name string, typ types.Type, pos token.Pos) *Parameter { + v := &Parameter{ + name: name, + typ: typ, + pos: pos, + parent: f, + } + f.Params = append(f.Params, v) + return v +} + +func (f *Function) addParamObj(obj types.Object) *Parameter { + name := obj.Name() + if name == "" { + name = fmt.Sprintf("arg%d", len(f.Params)) + } + param := f.addParam(name, obj.Type(), obj.Pos()) + param.object = obj + return param +} + +// addSpilledParam declares a parameter that is pre-spilled to the +// stack; the function body will load/store the spilled location. +// Subsequent lifting will eliminate spills where possible. +// +func (f *Function) addSpilledParam(obj types.Object) { + param := f.addParamObj(obj) + spill := &Alloc{Comment: obj.Name()} + spill.setType(types.NewPointer(obj.Type())) + spill.setPos(obj.Pos()) + f.objects[obj] = spill + f.Locals = append(f.Locals, spill) + f.emit(spill) + f.emit(&Store{Addr: spill, Val: param}) +} + +// startBody initializes the function prior to generating SSA code for its body. +// Precondition: f.Type() already set. +// +func (f *Function) startBody() { + f.currentBlock = f.newBasicBlock("entry") + f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init +} + +// createSyntacticParams populates f.Params and generates code (spills +// and named result locals) for all the parameters declared in the +// syntax. In addition it populates the f.objects mapping. +// +// Preconditions: +// f.startBody() was called. +// Postcondition: +// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0) +// +func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) { + // Receiver (at most one inner iteration). + if recv != nil { + for _, field := range recv.List { + for _, n := range field.Names { + f.addSpilledParam(f.Pkg.info.Defs[n]) + } + // Anonymous receiver? No need to spill. + if field.Names == nil { + f.addParamObj(f.Signature.Recv()) + } + } + } + + // Parameters. + if functype.Params != nil { + n := len(f.Params) // 1 if has recv, 0 otherwise + for _, field := range functype.Params.List { + for _, n := range field.Names { + f.addSpilledParam(f.Pkg.info.Defs[n]) + } + // Anonymous parameter? No need to spill. + if field.Names == nil { + f.addParamObj(f.Signature.Params().At(len(f.Params) - n)) + } + } + } + + // Named results. + if functype.Results != nil { + for _, field := range functype.Results.List { + // Implicit "var" decl of locals for named results. + for _, n := range field.Names { + f.namedResults = append(f.namedResults, f.addLocalForIdent(n)) + } + } + } +} + +type setNumable interface { + setNum(int) +} + +// numberRegisters assigns numbers to all SSA registers +// (value-defining Instructions) in f, to aid debugging. +// (Non-Instruction Values are named at construction.) +// +func numberRegisters(f *Function) { + v := 0 + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + switch instr.(type) { + case Value: + instr.(setNumable).setNum(v) + v++ + } + } + } +} + +// buildReferrers populates the def/use information in all non-nil +// Value.Referrers slice. +// Precondition: all such slices are initially empty. +func buildReferrers(f *Function) { + var rands []*Value + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + rands = instr.Operands(rands[:0]) // recycle storage + for _, rand := range rands { + if r := *rand; r != nil { + if ref := r.Referrers(); ref != nil { + *ref = append(*ref, instr) + } + } + } + } + } +} + +// finishBody() finalizes the function after SSA code generation of its body. +func (f *Function) finishBody() { + f.objects = nil + f.currentBlock = nil + f.lblocks = nil + + // Don't pin the AST in memory (except in debug mode). + if n := f.syntax; n != nil && !f.debugInfo() { + f.syntax = extentNode{n.Pos(), n.End()} + } + + // Remove from f.Locals any Allocs that escape to the heap. + j := 0 + for _, l := range f.Locals { + if !l.Heap { + f.Locals[j] = l + j++ + } + } + // Nil out f.Locals[j:] to aid GC. + for i := j; i < len(f.Locals); i++ { + f.Locals[i] = nil + } + f.Locals = f.Locals[:j] + + optimizeBlocks(f) + + buildReferrers(f) + + buildDomTree(f) + + if f.Prog.mode&NaiveForm == 0 { + // For debugging pre-state of lifting pass: + // numberRegisters(f) + // f.WriteTo(os.Stderr) + lift(f) + } + + f.namedResults = nil // (used by lifting) + + numberRegisters(f) + + if f.Prog.mode&PrintFunctions != 0 { + printMu.Lock() + f.WriteTo(os.Stdout) + printMu.Unlock() + } + + if f.Prog.mode&SanityCheckFunctions != 0 { + mustSanityCheck(f, nil) + } +} + +// removeNilBlocks eliminates nils from f.Blocks and updates each +// BasicBlock.Index. Use this after any pass that may delete blocks. +// +func (f *Function) removeNilBlocks() { + j := 0 + for _, b := range f.Blocks { + if b != nil { + b.Index = j + f.Blocks[j] = b + j++ + } + } + // Nil out f.Blocks[j:] to aid GC. + for i := j; i < len(f.Blocks); i++ { + f.Blocks[i] = nil + } + f.Blocks = f.Blocks[:j] +} + +// SetDebugMode sets the debug mode for package pkg. If true, all its +// functions will include full debug info. This greatly increases the +// size of the instruction stream, and causes Functions to depend upon +// the ASTs, potentially keeping them live in memory for longer. +// +func (pkg *Package) SetDebugMode(debug bool) { + // TODO(adonovan): do we want ast.File granularity? + pkg.debug = debug +} + +// debugInfo reports whether debug info is wanted for this function. +func (f *Function) debugInfo() bool { + return f.Pkg != nil && f.Pkg.debug +} + +// addNamedLocal creates a local variable, adds it to function f and +// returns it. Its name and type are taken from obj. Subsequent +// calls to f.lookup(obj) will return the same local. +// +func (f *Function) addNamedLocal(obj types.Object) *Alloc { + l := f.addLocal(obj.Type(), obj.Pos()) + l.Comment = obj.Name() + f.objects[obj] = l + return l +} + +func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc { + return f.addNamedLocal(f.Pkg.info.Defs[id]) +} + +// addLocal creates an anonymous local variable of type typ, adds it +// to function f and returns it. pos is the optional source location. +// +func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc { + v := &Alloc{} + v.setType(types.NewPointer(typ)) + v.setPos(pos) + f.Locals = append(f.Locals, v) + f.emit(v) + return v +} + +// lookup returns the address of the named variable identified by obj +// that is local to function f or one of its enclosing functions. +// If escaping, the reference comes from a potentially escaping pointer +// expression and the referent must be heap-allocated. +// +func (f *Function) lookup(obj types.Object, escaping bool) Value { + if v, ok := f.objects[obj]; ok { + if alloc, ok := v.(*Alloc); ok && escaping { + alloc.Heap = true + } + return v // function-local var (address) + } + + // Definition must be in an enclosing function; + // plumb it through intervening closures. + if f.parent == nil { + panic("no ssa.Value for " + obj.String()) + } + outer := f.parent.lookup(obj, true) // escaping + v := &FreeVar{ + name: obj.Name(), + typ: outer.Type(), + pos: outer.Pos(), + outer: outer, + parent: f, + } + f.objects[obj] = v + f.FreeVars = append(f.FreeVars, v) + return v +} + +// emit emits the specified instruction to function f. +func (f *Function) emit(instr Instruction) Value { + return f.currentBlock.emit(instr) +} + +// RelString returns the full name of this function, qualified by +// package name, receiver type, etc. +// +// The specific formatting rules are not guaranteed and may change. +// +// Examples: +// "math.IsNaN" // a package-level function +// "(*bytes.Buffer).Bytes" // a declared method or a wrapper +// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0) +// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure) +// "main.main$1" // an anonymous function in main +// "main.init#1" // a declared init function +// "main.init" // the synthesized package initializer +// +// When these functions are referred to from within the same package +// (i.e. from == f.Pkg.Object), they are rendered without the package path. +// For example: "IsNaN", "(*Buffer).Bytes", etc. +// +// All non-synthetic functions have distinct package-qualified names. +// (But two methods may have the same name "(T).f" if one is a synthetic +// wrapper promoting a non-exported method "f" from another package; in +// that case, the strings are equal but the identifiers "f" are distinct.) +// +func (f *Function) RelString(from *types.Package) string { + // Anonymous? + if f.parent != nil { + // An anonymous function's Name() looks like "parentName$1", + // but its String() should include the type/package/etc. + parent := f.parent.RelString(from) + for i, anon := range f.parent.AnonFuncs { + if anon == f { + return fmt.Sprintf("%s$%d", parent, 1+i) + } + } + + return f.name // should never happen + } + + // Method (declared or wrapper)? + if recv := f.Signature.Recv(); recv != nil { + return f.relMethod(from, recv.Type()) + } + + // Thunk? + if f.method != nil { + return f.relMethod(from, f.method.Recv()) + } + + // Bound? + if len(f.FreeVars) == 1 && strings.HasSuffix(f.name, "$bound") { + return f.relMethod(from, f.FreeVars[0].Type()) + } + + // Package-level function? + // Prefix with package name for cross-package references only. + if p := f.pkg(); p != nil && p != from { + return fmt.Sprintf("%s.%s", p.Path(), f.name) + } + + // Unknown. + return f.name +} + +func (f *Function) relMethod(from *types.Package, recv types.Type) string { + return fmt.Sprintf("(%s).%s", relType(recv, from), f.name) +} + +// writeSignature writes to buf the signature sig in declaration syntax. +func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) { + buf.WriteString("func ") + if recv := sig.Recv(); recv != nil { + buf.WriteString("(") + if n := params[0].Name(); n != "" { + buf.WriteString(n) + buf.WriteString(" ") + } + types.WriteType(buf, params[0].Type(), types.RelativeTo(from)) + buf.WriteString(") ") + } + buf.WriteString(name) + types.WriteSignature(buf, sig, types.RelativeTo(from)) +} + +func (f *Function) pkg() *types.Package { + if f.Pkg != nil { + return f.Pkg.Pkg + } + return nil +} + +var _ io.WriterTo = (*Function)(nil) // *Function implements io.Writer + +func (f *Function) WriteTo(w io.Writer) (int64, error) { + var buf bytes.Buffer + WriteFunction(&buf, f) + n, err := w.Write(buf.Bytes()) + return int64(n), err +} + +// WriteFunction writes to buf a human-readable "disassembly" of f. +func WriteFunction(buf *bytes.Buffer, f *Function) { + fmt.Fprintf(buf, "# Name: %s\n", f.String()) + if f.Pkg != nil { + fmt.Fprintf(buf, "# Package: %s\n", f.Pkg.Pkg.Path()) + } + if syn := f.Synthetic; syn != "" { + fmt.Fprintln(buf, "# Synthetic:", syn) + } + if pos := f.Pos(); pos.IsValid() { + fmt.Fprintf(buf, "# Location: %s\n", f.Prog.Fset.Position(pos)) + } + + if f.parent != nil { + fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name()) + } + + if f.Recover != nil { + fmt.Fprintf(buf, "# Recover: %s\n", f.Recover) + } + + from := f.pkg() + + if f.FreeVars != nil { + buf.WriteString("# Free variables:\n") + for i, fv := range f.FreeVars { + fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, fv.Name(), relType(fv.Type(), from)) + } + } + + if len(f.Locals) > 0 { + buf.WriteString("# Locals:\n") + for i, l := range f.Locals { + fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from)) + } + } + writeSignature(buf, from, f.Name(), f.Signature, f.Params) + buf.WriteString(":\n") + + if f.Blocks == nil { + buf.WriteString("\t(external)\n") + } + + // NB. column calculations are confused by non-ASCII + // characters and assume 8-space tabs. + const punchcard = 80 // for old time's sake. + const tabwidth = 8 + for _, b := range f.Blocks { + if b == nil { + // Corrupt CFG. + fmt.Fprintf(buf, ".nil:\n") + continue + } + n, _ := fmt.Fprintf(buf, "%d:", b.Index) + bmsg := fmt.Sprintf("%s P:%d S:%d", b.Comment, len(b.Preds), len(b.Succs)) + fmt.Fprintf(buf, "%*s%s\n", punchcard-1-n-len(bmsg), "", bmsg) + + if false { // CFG debugging + fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs) + } + for _, instr := range b.Instrs { + buf.WriteString("\t") + switch v := instr.(type) { + case Value: + l := punchcard - tabwidth + // Left-align the instruction. + if name := v.Name(); name != "" { + n, _ := fmt.Fprintf(buf, "%s = ", name) + l -= n + } + n, _ := buf.WriteString(instr.String()) + l -= n + // Right-align the type if there's space. + if t := v.Type(); t != nil { + buf.WriteByte(' ') + ts := relType(t, from) + l -= len(ts) + len(" ") // (spaces before and after type) + if l > 0 { + fmt.Fprintf(buf, "%*s", l, "") + } + buf.WriteString(ts) + } + case nil: + // Be robust against bad transforms. + buf.WriteString("") + default: + buf.WriteString(instr.String()) + } + buf.WriteString("\n") + } + } + fmt.Fprintf(buf, "\n") +} + +// newBasicBlock adds to f a new basic block and returns it. It does +// not automatically become the current block for subsequent calls to emit. +// comment is an optional string for more readable debugging output. +// +func (f *Function) newBasicBlock(comment string) *BasicBlock { + b := &BasicBlock{ + Index: len(f.Blocks), + Comment: comment, + parent: f, + } + b.Succs = b.succs2[:0] + f.Blocks = append(f.Blocks, b) + return b +} + +// NewFunction returns a new synthetic Function instance belonging to +// prog, with its name and signature fields set as specified. +// +// The caller is responsible for initializing the remaining fields of +// the function object, e.g. Pkg, Params, Blocks. +// +// It is practically impossible for clients to construct well-formed +// SSA functions/packages/programs directly, so we assume this is the +// job of the Builder alone. NewFunction exists to provide clients a +// little flexibility. For example, analysis tools may wish to +// construct fake Functions for the root of the callgraph, a fake +// "reflect" package, etc. +// +// TODO(adonovan): think harder about the API here. +// +func (prog *Program) NewFunction(name string, sig *types.Signature, provenance string) *Function { + return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance} +} + +type extentNode [2]token.Pos + +func (n extentNode) Pos() token.Pos { return n[0] } +func (n extentNode) End() token.Pos { return n[1] } + +// Syntax returns an ast.Node whose Pos/End methods provide the +// lexical extent of the function if it was defined by Go source code +// (f.Synthetic==""), or nil otherwise. +// +// If f was built with debug information (see Package.SetDebugRef), +// the result is the *ast.FuncDecl or *ast.FuncLit that declared the +// function. Otherwise, it is an opaque Node providing only position +// information; this avoids pinning the AST in memory. +// +func (f *Function) Syntax() ast.Node { return f.syntax } diff --git a/vendor/golang.org/x/tools/go/ssa/identical.go b/vendor/golang.org/x/tools/go/ssa/identical.go new file mode 100644 index 000000000..e8026967b --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/identical.go @@ -0,0 +1,12 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.8 +// +build go1.8 + +package ssa + +import "go/types" + +var structTypesIdentical = types.IdenticalIgnoreTags diff --git a/vendor/golang.org/x/tools/go/ssa/identical_17.go b/vendor/golang.org/x/tools/go/ssa/identical_17.go new file mode 100644 index 000000000..575aa5dfc --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/identical_17.go @@ -0,0 +1,12 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.8 +// +build !go1.8 + +package ssa + +import "go/types" + +var structTypesIdentical = types.Identical diff --git a/vendor/golang.org/x/tools/go/ssa/lift.go b/vendor/golang.org/x/tools/go/ssa/lift.go new file mode 100644 index 000000000..048e9b032 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/lift.go @@ -0,0 +1,653 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines the lifting pass which tries to "lift" Alloc +// cells (new/local variables) into SSA registers, replacing loads +// with the dominating stored value, eliminating loads and stores, and +// inserting φ-nodes as needed. + +// Cited papers and resources: +// +// Ron Cytron et al. 1991. Efficiently computing SSA form... +// http://doi.acm.org/10.1145/115372.115320 +// +// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm. +// Software Practice and Experience 2001, 4:1-10. +// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf +// +// Daniel Berlin, llvmdev mailing list, 2012. +// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html +// (Be sure to expand the whole thread.) + +// TODO(adonovan): opt: there are many optimizations worth evaluating, and +// the conventional wisdom for SSA construction is that a simple +// algorithm well engineered often beats those of better asymptotic +// complexity on all but the most egregious inputs. +// +// Danny Berlin suggests that the Cooper et al. algorithm for +// computing the dominance frontier is superior to Cytron et al. +// Furthermore he recommends that rather than computing the DF for the +// whole function then renaming all alloc cells, it may be cheaper to +// compute the DF for each alloc cell separately and throw it away. +// +// Consider exploiting liveness information to avoid creating dead +// φ-nodes which we then immediately remove. +// +// Also see many other "TODO: opt" suggestions in the code. + +import ( + "fmt" + "go/token" + "go/types" + "math/big" + "os" +) + +// If true, show diagnostic information at each step of lifting. +// Very verbose. +const debugLifting = false + +// domFrontier maps each block to the set of blocks in its dominance +// frontier. The outer slice is conceptually a map keyed by +// Block.Index. The inner slice is conceptually a set, possibly +// containing duplicates. +// +// TODO(adonovan): opt: measure impact of dups; consider a packed bit +// representation, e.g. big.Int, and bitwise parallel operations for +// the union step in the Children loop. +// +// domFrontier's methods mutate the slice's elements but not its +// length, so their receivers needn't be pointers. +// +type domFrontier [][]*BasicBlock + +func (df domFrontier) add(u, v *BasicBlock) { + p := &df[u.Index] + *p = append(*p, v) +} + +// build builds the dominance frontier df for the dominator (sub)tree +// rooted at u, using the Cytron et al. algorithm. +// +// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA +// by pruning the entire IDF computation, rather than merely pruning +// the DF -> IDF step. +func (df domFrontier) build(u *BasicBlock) { + // Encounter each node u in postorder of dom tree. + for _, child := range u.dom.children { + df.build(child) + } + for _, vb := range u.Succs { + if v := vb.dom; v.idom != u { + df.add(u, vb) + } + } + for _, w := range u.dom.children { + for _, vb := range df[w.Index] { + // TODO(adonovan): opt: use word-parallel bitwise union. + if v := vb.dom; v.idom != u { + df.add(u, vb) + } + } + } +} + +func buildDomFrontier(fn *Function) domFrontier { + df := make(domFrontier, len(fn.Blocks)) + df.build(fn.Blocks[0]) + if fn.Recover != nil { + df.build(fn.Recover) + } + return df +} + +func removeInstr(refs []Instruction, instr Instruction) []Instruction { + i := 0 + for _, ref := range refs { + if ref == instr { + continue + } + refs[i] = ref + i++ + } + for j := i; j != len(refs); j++ { + refs[j] = nil // aid GC + } + return refs[:i] +} + +// lift replaces local and new Allocs accessed only with +// load/store by SSA registers, inserting φ-nodes where necessary. +// The result is a program in classical pruned SSA form. +// +// Preconditions: +// - fn has no dead blocks (blockopt has run). +// - Def/use info (Operands and Referrers) is up-to-date. +// - The dominator tree is up-to-date. +// +func lift(fn *Function) { + // TODO(adonovan): opt: lots of little optimizations may be + // worthwhile here, especially if they cause us to avoid + // buildDomFrontier. For example: + // + // - Alloc never loaded? Eliminate. + // - Alloc never stored? Replace all loads with a zero constant. + // - Alloc stored once? Replace loads with dominating store; + // don't forget that an Alloc is itself an effective store + // of zero. + // - Alloc used only within a single block? + // Use degenerate algorithm avoiding φ-nodes. + // - Consider synergy with scalar replacement of aggregates (SRA). + // e.g. *(&x.f) where x is an Alloc. + // Perhaps we'd get better results if we generated this as x.f + // i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)). + // Unclear. + // + // But we will start with the simplest correct code. + df := buildDomFrontier(fn) + + if debugLifting { + title := false + for i, blocks := range df { + if blocks != nil { + if !title { + fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn) + title = true + } + fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks) + } + } + } + + newPhis := make(newPhiMap) + + // During this pass we will replace some BasicBlock.Instrs + // (allocs, loads and stores) with nil, keeping a count in + // BasicBlock.gaps. At the end we will reset Instrs to the + // concatenation of all non-dead newPhis and non-nil Instrs + // for the block, reusing the original array if space permits. + + // While we're here, we also eliminate 'rundefers' + // instructions in functions that contain no 'defer' + // instructions. + usesDefer := false + + // A counter used to generate ~unique ids for Phi nodes, as an + // aid to debugging. We use large numbers to make them highly + // visible. All nodes are renumbered later. + fresh := 1000 + + // Determine which allocs we can lift and number them densely. + // The renaming phase uses this numbering for compact maps. + numAllocs := 0 + for _, b := range fn.Blocks { + b.gaps = 0 + b.rundefers = 0 + for _, instr := range b.Instrs { + switch instr := instr.(type) { + case *Alloc: + index := -1 + if liftAlloc(df, instr, newPhis, &fresh) { + index = numAllocs + numAllocs++ + } + instr.index = index + case *Defer: + usesDefer = true + case *RunDefers: + b.rundefers++ + } + } + } + + // renaming maps an alloc (keyed by index) to its replacement + // value. Initially the renaming contains nil, signifying the + // zero constant of the appropriate type; we construct the + // Const lazily at most once on each path through the domtree. + // TODO(adonovan): opt: cache per-function not per subtree. + renaming := make([]Value, numAllocs) + + // Renaming. + rename(fn.Blocks[0], renaming, newPhis) + + // Eliminate dead φ-nodes. + removeDeadPhis(fn.Blocks, newPhis) + + // Prepend remaining live φ-nodes to each block. + for _, b := range fn.Blocks { + nps := newPhis[b] + j := len(nps) + + rundefersToKill := b.rundefers + if usesDefer { + rundefersToKill = 0 + } + + if j+b.gaps+rundefersToKill == 0 { + continue // fast path: no new phis or gaps + } + + // Compact nps + non-nil Instrs into a new slice. + // TODO(adonovan): opt: compact in situ (rightwards) + // if Instrs has sufficient space or slack. + dst := make([]Instruction, len(b.Instrs)+j-b.gaps-rundefersToKill) + for i, np := range nps { + dst[i] = np.phi + } + for _, instr := range b.Instrs { + if instr == nil { + continue + } + if !usesDefer { + if _, ok := instr.(*RunDefers); ok { + continue + } + } + dst[j] = instr + j++ + } + b.Instrs = dst + } + + // Remove any fn.Locals that were lifted. + j := 0 + for _, l := range fn.Locals { + if l.index < 0 { + fn.Locals[j] = l + j++ + } + } + // Nil out fn.Locals[j:] to aid GC. + for i := j; i < len(fn.Locals); i++ { + fn.Locals[i] = nil + } + fn.Locals = fn.Locals[:j] +} + +// removeDeadPhis removes φ-nodes not transitively needed by a +// non-Phi, non-DebugRef instruction. +func removeDeadPhis(blocks []*BasicBlock, newPhis newPhiMap) { + // First pass: find the set of "live" φ-nodes: those reachable + // from some non-Phi instruction. + // + // We compute reachability in reverse, starting from each φ, + // rather than forwards, starting from each live non-Phi + // instruction, because this way visits much less of the + // Value graph. + livePhis := make(map[*Phi]bool) + for _, npList := range newPhis { + for _, np := range npList { + phi := np.phi + if !livePhis[phi] && phiHasDirectReferrer(phi) { + markLivePhi(livePhis, phi) + } + } + } + + // Existing φ-nodes due to && and || operators + // are all considered live (see Go issue 19622). + for _, b := range blocks { + for _, phi := range b.phis() { + markLivePhi(livePhis, phi.(*Phi)) + } + } + + // Second pass: eliminate unused phis from newPhis. + for block, npList := range newPhis { + j := 0 + for _, np := range npList { + if livePhis[np.phi] { + npList[j] = np + j++ + } else { + // discard it, first removing it from referrers + for _, val := range np.phi.Edges { + if refs := val.Referrers(); refs != nil { + *refs = removeInstr(*refs, np.phi) + } + } + np.phi.block = nil + } + } + newPhis[block] = npList[:j] + } +} + +// markLivePhi marks phi, and all φ-nodes transitively reachable via +// its Operands, live. +func markLivePhi(livePhis map[*Phi]bool, phi *Phi) { + livePhis[phi] = true + for _, rand := range phi.Operands(nil) { + if q, ok := (*rand).(*Phi); ok { + if !livePhis[q] { + markLivePhi(livePhis, q) + } + } + } +} + +// phiHasDirectReferrer reports whether phi is directly referred to by +// a non-Phi instruction. Such instructions are the +// roots of the liveness traversal. +func phiHasDirectReferrer(phi *Phi) bool { + for _, instr := range *phi.Referrers() { + if _, ok := instr.(*Phi); !ok { + return true + } + } + return false +} + +type blockSet struct{ big.Int } // (inherit methods from Int) + +// add adds b to the set and returns true if the set changed. +func (s *blockSet) add(b *BasicBlock) bool { + i := b.Index + if s.Bit(i) != 0 { + return false + } + s.SetBit(&s.Int, i, 1) + return true +} + +// take removes an arbitrary element from a set s and +// returns its index, or returns -1 if empty. +func (s *blockSet) take() int { + l := s.BitLen() + for i := 0; i < l; i++ { + if s.Bit(i) == 1 { + s.SetBit(&s.Int, i, 0) + return i + } + } + return -1 +} + +// newPhi is a pair of a newly introduced φ-node and the lifted Alloc +// it replaces. +type newPhi struct { + phi *Phi + alloc *Alloc +} + +// newPhiMap records for each basic block, the set of newPhis that +// must be prepended to the block. +type newPhiMap map[*BasicBlock][]newPhi + +// liftAlloc determines whether alloc can be lifted into registers, +// and if so, it populates newPhis with all the φ-nodes it may require +// and returns true. +// +// fresh is a source of fresh ids for phi nodes. +// +func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool { + // Don't lift aggregates into registers, because we don't have + // a way to express their zero-constants. + switch deref(alloc.Type()).Underlying().(type) { + case *types.Array, *types.Struct: + return false + } + + // Don't lift named return values in functions that defer + // calls that may recover from panic. + if fn := alloc.Parent(); fn.Recover != nil { + for _, nr := range fn.namedResults { + if nr == alloc { + return false + } + } + } + + // Compute defblocks, the set of blocks containing a + // definition of the alloc cell. + var defblocks blockSet + for _, instr := range *alloc.Referrers() { + // Bail out if we discover the alloc is not liftable; + // the only operations permitted to use the alloc are + // loads/stores into the cell, and DebugRef. + switch instr := instr.(type) { + case *Store: + if instr.Val == alloc { + return false // address used as value + } + if instr.Addr != alloc { + panic("Alloc.Referrers is inconsistent") + } + defblocks.add(instr.Block()) + case *UnOp: + if instr.Op != token.MUL { + return false // not a load + } + if instr.X != alloc { + panic("Alloc.Referrers is inconsistent") + } + case *DebugRef: + // ok + default: + return false // some other instruction + } + } + // The Alloc itself counts as a (zero) definition of the cell. + defblocks.add(alloc.Block()) + + if debugLifting { + fmt.Fprintln(os.Stderr, "\tlifting ", alloc, alloc.Name()) + } + + fn := alloc.Parent() + + // Φ-insertion. + // + // What follows is the body of the main loop of the insert-φ + // function described by Cytron et al, but instead of using + // counter tricks, we just reset the 'hasAlready' and 'work' + // sets each iteration. These are bitmaps so it's pretty cheap. + // + // TODO(adonovan): opt: recycle slice storage for W, + // hasAlready, defBlocks across liftAlloc calls. + var hasAlready blockSet + + // Initialize W and work to defblocks. + var work blockSet = defblocks // blocks seen + var W blockSet // blocks to do + W.Set(&defblocks.Int) + + // Traverse iterated dominance frontier, inserting φ-nodes. + for i := W.take(); i != -1; i = W.take() { + u := fn.Blocks[i] + for _, v := range df[u.Index] { + if hasAlready.add(v) { + // Create φ-node. + // It will be prepended to v.Instrs later, if needed. + phi := &Phi{ + Edges: make([]Value, len(v.Preds)), + Comment: alloc.Comment, + } + // This is merely a debugging aid: + phi.setNum(*fresh) + *fresh++ + + phi.pos = alloc.Pos() + phi.setType(deref(alloc.Type())) + phi.block = v + if debugLifting { + fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v) + } + newPhis[v] = append(newPhis[v], newPhi{phi, alloc}) + + if work.add(v) { + W.add(v) + } + } + } + } + + return true +} + +// replaceAll replaces all intraprocedural uses of x with y, +// updating x.Referrers and y.Referrers. +// Precondition: x.Referrers() != nil, i.e. x must be local to some function. +// +func replaceAll(x, y Value) { + var rands []*Value + pxrefs := x.Referrers() + pyrefs := y.Referrers() + for _, instr := range *pxrefs { + rands = instr.Operands(rands[:0]) // recycle storage + for _, rand := range rands { + if *rand != nil { + if *rand == x { + *rand = y + } + } + } + if pyrefs != nil { + *pyrefs = append(*pyrefs, instr) // dups ok + } + } + *pxrefs = nil // x is now unreferenced +} + +// renamed returns the value to which alloc is being renamed, +// constructing it lazily if it's the implicit zero initialization. +// +func renamed(renaming []Value, alloc *Alloc) Value { + v := renaming[alloc.index] + if v == nil { + v = zeroConst(deref(alloc.Type())) + renaming[alloc.index] = v + } + return v +} + +// rename implements the (Cytron et al) SSA renaming algorithm, a +// preorder traversal of the dominator tree replacing all loads of +// Alloc cells with the value stored to that cell by the dominating +// store instruction. For lifting, we need only consider loads, +// stores and φ-nodes. +// +// renaming is a map from *Alloc (keyed by index number) to its +// dominating stored value; newPhis[x] is the set of new φ-nodes to be +// prepended to block x. +// +func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap) { + // Each φ-node becomes the new name for its associated Alloc. + for _, np := range newPhis[u] { + phi := np.phi + alloc := np.alloc + renaming[alloc.index] = phi + } + + // Rename loads and stores of allocs. + for i, instr := range u.Instrs { + switch instr := instr.(type) { + case *Alloc: + if instr.index >= 0 { // store of zero to Alloc cell + // Replace dominated loads by the zero value. + renaming[instr.index] = nil + if debugLifting { + fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr) + } + // Delete the Alloc. + u.Instrs[i] = nil + u.gaps++ + } + + case *Store: + if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell + // Replace dominated loads by the stored value. + renaming[alloc.index] = instr.Val + if debugLifting { + fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n", + instr, instr.Val.Name()) + } + // Remove the store from the referrer list of the stored value. + if refs := instr.Val.Referrers(); refs != nil { + *refs = removeInstr(*refs, instr) + } + // Delete the Store. + u.Instrs[i] = nil + u.gaps++ + } + + case *UnOp: + if instr.Op == token.MUL { + if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell + newval := renamed(renaming, alloc) + if debugLifting { + fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n", + instr.Name(), instr, newval.Name()) + } + // Replace all references to + // the loaded value by the + // dominating stored value. + replaceAll(instr, newval) + // Delete the Load. + u.Instrs[i] = nil + u.gaps++ + } + } + + case *DebugRef: + if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // ref of Alloc cell + if instr.IsAddr { + instr.X = renamed(renaming, alloc) + instr.IsAddr = false + + // Add DebugRef to instr.X's referrers. + if refs := instr.X.Referrers(); refs != nil { + *refs = append(*refs, instr) + } + } else { + // A source expression denotes the address + // of an Alloc that was optimized away. + instr.X = nil + + // Delete the DebugRef. + u.Instrs[i] = nil + u.gaps++ + } + } + } + } + + // For each φ-node in a CFG successor, rename the edge. + for _, v := range u.Succs { + phis := newPhis[v] + if len(phis) == 0 { + continue + } + i := v.predIndex(u) + for _, np := range phis { + phi := np.phi + alloc := np.alloc + newval := renamed(renaming, alloc) + if debugLifting { + fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n", + phi.Name(), u, v, i, alloc.Name(), newval.Name()) + } + phi.Edges[i] = newval + if prefs := newval.Referrers(); prefs != nil { + *prefs = append(*prefs, phi) + } + } + } + + // Continue depth-first recursion over domtree, pushing a + // fresh copy of the renaming map for each subtree. + for i, v := range u.dom.children { + r := renaming + if i < len(u.dom.children)-1 { + // On all but the final iteration, we must make + // a copy to avoid destructive update. + r = make([]Value, len(renaming)) + copy(r, renaming) + } + rename(v, r, newPhis) + } + +} diff --git a/vendor/golang.org/x/tools/go/ssa/lvalue.go b/vendor/golang.org/x/tools/go/ssa/lvalue.go new file mode 100644 index 000000000..4d85be3ec --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/lvalue.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// lvalues are the union of addressable expressions and map-index +// expressions. + +import ( + "go/ast" + "go/token" + "go/types" +) + +// An lvalue represents an assignable location that may appear on the +// left-hand side of an assignment. This is a generalization of a +// pointer to permit updates to elements of maps. +// +type lvalue interface { + store(fn *Function, v Value) // stores v into the location + load(fn *Function) Value // loads the contents of the location + address(fn *Function) Value // address of the location + typ() types.Type // returns the type of the location +} + +// An address is an lvalue represented by a true pointer. +type address struct { + addr Value + pos token.Pos // source position + expr ast.Expr // source syntax of the value (not address) [debug mode] +} + +func (a *address) load(fn *Function) Value { + load := emitLoad(fn, a.addr) + load.pos = a.pos + return load +} + +func (a *address) store(fn *Function, v Value) { + store := emitStore(fn, a.addr, v, a.pos) + if a.expr != nil { + // store.Val is v, converted for assignability. + emitDebugRef(fn, a.expr, store.Val, false) + } +} + +func (a *address) address(fn *Function) Value { + if a.expr != nil { + emitDebugRef(fn, a.expr, a.addr, true) + } + return a.addr +} + +func (a *address) typ() types.Type { + return deref(a.addr.Type()) +} + +// An element is an lvalue represented by m[k], the location of an +// element of a map or string. These locations are not addressable +// since pointers cannot be formed from them, but they do support +// load(), and in the case of maps, store(). +// +type element struct { + m, k Value // map or string + t types.Type // map element type or string byte type + pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v) +} + +func (e *element) load(fn *Function) Value { + l := &Lookup{ + X: e.m, + Index: e.k, + } + l.setPos(e.pos) + l.setType(e.t) + return fn.emit(l) +} + +func (e *element) store(fn *Function, v Value) { + up := &MapUpdate{ + Map: e.m, + Key: e.k, + Value: emitConv(fn, v, e.t), + } + up.pos = e.pos + fn.emit(up) +} + +func (e *element) address(fn *Function) Value { + panic("map/string elements are not addressable") +} + +func (e *element) typ() types.Type { + return e.t +} + +// A blank is a dummy variable whose name is "_". +// It is not reified: loads are illegal and stores are ignored. +// +type blank struct{} + +func (bl blank) load(fn *Function) Value { + panic("blank.load is illegal") +} + +func (bl blank) store(fn *Function, v Value) { + // no-op +} + +func (bl blank) address(fn *Function) Value { + panic("blank var is not addressable") +} + +func (bl blank) typ() types.Type { + // This should be the type of the blank Ident; the typechecker + // doesn't provide this yet, but fortunately, we don't need it + // yet either. + panic("blank.typ is unimplemented") +} diff --git a/vendor/golang.org/x/tools/go/ssa/methods.go b/vendor/golang.org/x/tools/go/ssa/methods.go new file mode 100644 index 000000000..9cf383916 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/methods.go @@ -0,0 +1,239 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines utilities for population of method sets. + +import ( + "fmt" + "go/types" +) + +// MethodValue returns the Function implementing method sel, building +// wrapper methods on demand. It returns nil if sel denotes an +// abstract (interface) method. +// +// Precondition: sel.Kind() == MethodVal. +// +// Thread-safe. +// +// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// +func (prog *Program) MethodValue(sel *types.Selection) *Function { + if sel.Kind() != types.MethodVal { + panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel)) + } + T := sel.Recv() + if isInterface(T) { + return nil // abstract method + } + if prog.mode&LogSource != 0 { + defer logStack("MethodValue %s %v", T, sel)() + } + + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + return prog.addMethod(prog.createMethodSet(T), sel) +} + +// LookupMethod returns the implementation of the method of type T +// identified by (pkg, name). It returns nil if the method exists but +// is abstract, and panics if T has no such method. +// +func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function { + sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name) + if sel == nil { + panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name))) + } + return prog.MethodValue(sel) +} + +// methodSet contains the (concrete) methods of a non-interface type. +type methodSet struct { + mapping map[string]*Function // populated lazily + complete bool // mapping contains all methods +} + +// Precondition: !isInterface(T). +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +func (prog *Program) createMethodSet(T types.Type) *methodSet { + mset, ok := prog.methodSets.At(T).(*methodSet) + if !ok { + mset = &methodSet{mapping: make(map[string]*Function)} + prog.methodSets.Set(T, mset) + } + return mset +} + +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function { + if sel.Kind() == types.MethodExpr { + panic(sel) + } + id := sel.Obj().Id() + fn := mset.mapping[id] + if fn == nil { + obj := sel.Obj().(*types.Func) + + needsPromotion := len(sel.Index()) > 1 + needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv()) + if needsPromotion || needsIndirection { + fn = makeWrapper(prog, sel) + } else { + fn = prog.declaredFunc(obj) + } + if fn.Signature.Recv() == nil { + panic(fn) // missing receiver + } + mset.mapping[id] = fn + } + return fn +} + +// RuntimeTypes returns a new unordered slice containing all +// concrete types in the program for which a complete (non-empty) +// method set is required at run-time. +// +// Thread-safe. +// +// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// +func (prog *Program) RuntimeTypes() []types.Type { + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + var res []types.Type + prog.methodSets.Iterate(func(T types.Type, v interface{}) { + if v.(*methodSet).complete { + res = append(res, T) + } + }) + return res +} + +// declaredFunc returns the concrete function/method denoted by obj. +// Panic ensues if there is none. +// +func (prog *Program) declaredFunc(obj *types.Func) *Function { + if v := prog.packageLevelValue(obj); v != nil { + return v.(*Function) + } + panic("no concrete method: " + obj.String()) +} + +// needMethodsOf ensures that runtime type information (including the +// complete method set) is available for the specified type T and all +// its subcomponents. +// +// needMethodsOf must be called for at least every type that is an +// operand of some MakeInterface instruction, and for the type of +// every exported package member. +// +// Precondition: T is not a method signature (*Signature with Recv()!=nil). +// +// Thread-safe. (Called via emitConv from multiple builder goroutines.) +// +// TODO(adonovan): make this faster. It accounts for 20% of SSA build time. +// +// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// +func (prog *Program) needMethodsOf(T types.Type) { + prog.methodsMu.Lock() + prog.needMethods(T, false) + prog.methodsMu.Unlock() +} + +// Precondition: T is not a method signature (*Signature with Recv()!=nil). +// Recursive case: skip => don't create methods for T. +// +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +// +func (prog *Program) needMethods(T types.Type, skip bool) { + // Each package maintains its own set of types it has visited. + if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok { + // needMethods(T) was previously called + if !prevSkip || skip { + return // already seen, with same or false 'skip' value + } + } + prog.runtimeTypes.Set(T, skip) + + tmset := prog.MethodSets.MethodSet(T) + + if !skip && !isInterface(T) && tmset.Len() > 0 { + // Create methods of T. + mset := prog.createMethodSet(T) + if !mset.complete { + mset.complete = true + n := tmset.Len() + for i := 0; i < n; i++ { + prog.addMethod(mset, tmset.At(i)) + } + } + } + + // Recursion over signatures of each method. + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + prog.needMethods(sig.Params(), false) + prog.needMethods(sig.Results(), false) + } + + switch t := T.(type) { + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + prog.needMethods(t.Elem(), false) + + case *types.Slice: + prog.needMethods(t.Elem(), false) + + case *types.Chan: + prog.needMethods(t.Elem(), false) + + case *types.Map: + prog.needMethods(t.Key(), false) + prog.needMethods(t.Elem(), false) + + case *types.Signature: + if t.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv())) + } + prog.needMethods(t.Params(), false) + prog.needMethods(t.Results(), false) + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + prog.needMethods(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + prog.needMethods(t.Underlying(), true) + + case *types.Array: + prog.needMethods(t.Elem(), false) + + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + prog.needMethods(t.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, t.Len(); i < n; i++ { + prog.needMethods(t.At(i).Type(), false) + } + + default: + panic(T) + } +} diff --git a/vendor/golang.org/x/tools/go/ssa/mode.go b/vendor/golang.org/x/tools/go/ssa/mode.go new file mode 100644 index 000000000..298f24b91 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/mode.go @@ -0,0 +1,105 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines the BuilderMode type and its command-line flag. + +import ( + "bytes" + "fmt" +) + +// BuilderMode is a bitmask of options for diagnostics and checking. +// +// *BuilderMode satisfies the flag.Value interface. Example: +// +// var mode = ssa.BuilderMode(0) +// func init() { flag.Var(&mode, "build", ssa.BuilderModeDoc) } +// +type BuilderMode uint + +const ( + PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout + PrintFunctions // Print function SSA code to stdout + LogSource // Log source locations as SSA builder progresses + SanityCheckFunctions // Perform sanity checking of function bodies + NaiveForm // Build naïve SSA form: don't replace local loads/stores with registers + BuildSerially // Build packages serially, not in parallel. + GlobalDebug // Enable debug info for all packages + BareInits // Build init functions without guards or calls to dependent inits +) + +const BuilderModeDoc = `Options controlling the SSA builder. +The value is a sequence of zero or more of these letters: +C perform sanity [C]hecking of the SSA form. +D include [D]ebug info for every function. +P print [P]ackage inventory. +F print [F]unction SSA code. +S log [S]ource locations as SSA builder progresses. +L build distinct packages seria[L]ly instead of in parallel. +N build [N]aive SSA form: don't replace local loads/stores with registers. +I build bare [I]nit functions: no init guards or calls to dependent inits. +` + +func (m BuilderMode) String() string { + var buf bytes.Buffer + if m&GlobalDebug != 0 { + buf.WriteByte('D') + } + if m&PrintPackages != 0 { + buf.WriteByte('P') + } + if m&PrintFunctions != 0 { + buf.WriteByte('F') + } + if m&LogSource != 0 { + buf.WriteByte('S') + } + if m&SanityCheckFunctions != 0 { + buf.WriteByte('C') + } + if m&NaiveForm != 0 { + buf.WriteByte('N') + } + if m&BuildSerially != 0 { + buf.WriteByte('L') + } + if m&BareInits != 0 { + buf.WriteByte('I') + } + return buf.String() +} + +// Set parses the flag characters in s and updates *m. +func (m *BuilderMode) Set(s string) error { + var mode BuilderMode + for _, c := range s { + switch c { + case 'D': + mode |= GlobalDebug + case 'P': + mode |= PrintPackages + case 'F': + mode |= PrintFunctions + case 'S': + mode |= LogSource | BuildSerially + case 'C': + mode |= SanityCheckFunctions + case 'N': + mode |= NaiveForm + case 'L': + mode |= BuildSerially + case 'I': + mode |= BareInits + default: + return fmt.Errorf("unknown BuilderMode option: %q", c) + } + } + *m = mode + return nil +} + +// Get returns m. +func (m BuilderMode) Get() interface{} { return m } diff --git a/vendor/golang.org/x/tools/go/ssa/print.go b/vendor/golang.org/x/tools/go/ssa/print.go new file mode 100644 index 000000000..3333ba41a --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/print.go @@ -0,0 +1,431 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file implements the String() methods for all Value and +// Instruction types. + +import ( + "bytes" + "fmt" + "go/types" + "io" + "reflect" + "sort" + + "golang.org/x/tools/go/types/typeutil" +) + +// relName returns the name of v relative to i. +// In most cases, this is identical to v.Name(), but references to +// Functions (including methods) and Globals use RelString and +// all types are displayed with relType, so that only cross-package +// references are package-qualified. +// +func relName(v Value, i Instruction) string { + var from *types.Package + if i != nil { + from = i.Parent().pkg() + } + switch v := v.(type) { + case Member: // *Function or *Global + return v.RelString(from) + case *Const: + return v.RelString(from) + } + return v.Name() +} + +func relType(t types.Type, from *types.Package) string { + return types.TypeString(t, types.RelativeTo(from)) +} + +func relString(m Member, from *types.Package) string { + // NB: not all globals have an Object (e.g. init$guard), + // so use Package().Object not Object.Package(). + if pkg := m.Package().Pkg; pkg != nil && pkg != from { + return fmt.Sprintf("%s.%s", pkg.Path(), m.Name()) + } + return m.Name() +} + +// Value.String() +// +// This method is provided only for debugging. +// It never appears in disassembly, which uses Value.Name(). + +func (v *Parameter) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("parameter %s : %s", v.Name(), relType(v.Type(), from)) +} + +func (v *FreeVar) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("freevar %s : %s", v.Name(), relType(v.Type(), from)) +} + +func (v *Builtin) String() string { + return fmt.Sprintf("builtin %s", v.Name()) +} + +// Instruction.String() + +func (v *Alloc) String() string { + op := "local" + if v.Heap { + op = "new" + } + from := v.Parent().pkg() + return fmt.Sprintf("%s %s (%s)", op, relType(deref(v.Type()), from), v.Comment) +} + +func (v *Phi) String() string { + var b bytes.Buffer + b.WriteString("phi [") + for i, edge := range v.Edges { + if i > 0 { + b.WriteString(", ") + } + // Be robust against malformed CFG. + if v.block == nil { + b.WriteString("??") + continue + } + block := -1 + if i < len(v.block.Preds) { + block = v.block.Preds[i].Index + } + fmt.Fprintf(&b, "%d: ", block) + edgeVal := "" // be robust + if edge != nil { + edgeVal = relName(edge, v) + } + b.WriteString(edgeVal) + } + b.WriteString("]") + if v.Comment != "" { + b.WriteString(" #") + b.WriteString(v.Comment) + } + return b.String() +} + +func printCall(v *CallCommon, prefix string, instr Instruction) string { + var b bytes.Buffer + b.WriteString(prefix) + if !v.IsInvoke() { + b.WriteString(relName(v.Value, instr)) + } else { + fmt.Fprintf(&b, "invoke %s.%s", relName(v.Value, instr), v.Method.Name()) + } + b.WriteString("(") + for i, arg := range v.Args { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(relName(arg, instr)) + } + if v.Signature().Variadic() { + b.WriteString("...") + } + b.WriteString(")") + return b.String() +} + +func (c *CallCommon) String() string { + return printCall(c, "", nil) +} + +func (v *Call) String() string { + return printCall(&v.Call, "", v) +} + +func (v *BinOp) String() string { + return fmt.Sprintf("%s %s %s", relName(v.X, v), v.Op.String(), relName(v.Y, v)) +} + +func (v *UnOp) String() string { + return fmt.Sprintf("%s%s%s", v.Op, relName(v.X, v), commaOk(v.CommaOk)) +} + +func printConv(prefix string, v, x Value) string { + from := v.Parent().pkg() + return fmt.Sprintf("%s %s <- %s (%s)", + prefix, + relType(v.Type(), from), + relType(x.Type(), from), + relName(x, v.(Instruction))) +} + +func (v *ChangeType) String() string { return printConv("changetype", v, v.X) } +func (v *Convert) String() string { return printConv("convert", v, v.X) } +func (v *ChangeInterface) String() string { return printConv("change interface", v, v.X) } +func (v *MakeInterface) String() string { return printConv("make", v, v.X) } + +func (v *MakeClosure) String() string { + var b bytes.Buffer + fmt.Fprintf(&b, "make closure %s", relName(v.Fn, v)) + if v.Bindings != nil { + b.WriteString(" [") + for i, c := range v.Bindings { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(relName(c, v)) + } + b.WriteString("]") + } + return b.String() +} + +func (v *MakeSlice) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("make %s %s %s", + relType(v.Type(), from), + relName(v.Len, v), + relName(v.Cap, v)) +} + +func (v *Slice) String() string { + var b bytes.Buffer + b.WriteString("slice ") + b.WriteString(relName(v.X, v)) + b.WriteString("[") + if v.Low != nil { + b.WriteString(relName(v.Low, v)) + } + b.WriteString(":") + if v.High != nil { + b.WriteString(relName(v.High, v)) + } + if v.Max != nil { + b.WriteString(":") + b.WriteString(relName(v.Max, v)) + } + b.WriteString("]") + return b.String() +} + +func (v *MakeMap) String() string { + res := "" + if v.Reserve != nil { + res = relName(v.Reserve, v) + } + from := v.Parent().pkg() + return fmt.Sprintf("make %s %s", relType(v.Type(), from), res) +} + +func (v *MakeChan) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("make %s %s", relType(v.Type(), from), relName(v.Size, v)) +} + +func (v *FieldAddr) String() string { + st := deref(v.X.Type()).Underlying().(*types.Struct) + // Be robust against a bad index. + name := "?" + if 0 <= v.Field && v.Field < st.NumFields() { + name = st.Field(v.Field).Name() + } + return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field) +} + +func (v *Field) String() string { + st := v.X.Type().Underlying().(*types.Struct) + // Be robust against a bad index. + name := "?" + if 0 <= v.Field && v.Field < st.NumFields() { + name = st.Field(v.Field).Name() + } + return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field) +} + +func (v *IndexAddr) String() string { + return fmt.Sprintf("&%s[%s]", relName(v.X, v), relName(v.Index, v)) +} + +func (v *Index) String() string { + return fmt.Sprintf("%s[%s]", relName(v.X, v), relName(v.Index, v)) +} + +func (v *Lookup) String() string { + return fmt.Sprintf("%s[%s]%s", relName(v.X, v), relName(v.Index, v), commaOk(v.CommaOk)) +} + +func (v *Range) String() string { + return "range " + relName(v.X, v) +} + +func (v *Next) String() string { + return "next " + relName(v.Iter, v) +} + +func (v *TypeAssert) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("typeassert%s %s.(%s)", commaOk(v.CommaOk), relName(v.X, v), relType(v.AssertedType, from)) +} + +func (v *Extract) String() string { + return fmt.Sprintf("extract %s #%d", relName(v.Tuple, v), v.Index) +} + +func (s *Jump) String() string { + // Be robust against malformed CFG. + block := -1 + if s.block != nil && len(s.block.Succs) == 1 { + block = s.block.Succs[0].Index + } + return fmt.Sprintf("jump %d", block) +} + +func (s *If) String() string { + // Be robust against malformed CFG. + tblock, fblock := -1, -1 + if s.block != nil && len(s.block.Succs) == 2 { + tblock = s.block.Succs[0].Index + fblock = s.block.Succs[1].Index + } + return fmt.Sprintf("if %s goto %d else %d", relName(s.Cond, s), tblock, fblock) +} + +func (s *Go) String() string { + return printCall(&s.Call, "go ", s) +} + +func (s *Panic) String() string { + return "panic " + relName(s.X, s) +} + +func (s *Return) String() string { + var b bytes.Buffer + b.WriteString("return") + for i, r := range s.Results { + if i == 0 { + b.WriteString(" ") + } else { + b.WriteString(", ") + } + b.WriteString(relName(r, s)) + } + return b.String() +} + +func (*RunDefers) String() string { + return "rundefers" +} + +func (s *Send) String() string { + return fmt.Sprintf("send %s <- %s", relName(s.Chan, s), relName(s.X, s)) +} + +func (s *Defer) String() string { + return printCall(&s.Call, "defer ", s) +} + +func (s *Select) String() string { + var b bytes.Buffer + for i, st := range s.States { + if i > 0 { + b.WriteString(", ") + } + if st.Dir == types.RecvOnly { + b.WriteString("<-") + b.WriteString(relName(st.Chan, s)) + } else { + b.WriteString(relName(st.Chan, s)) + b.WriteString("<-") + b.WriteString(relName(st.Send, s)) + } + } + non := "" + if !s.Blocking { + non = "non" + } + return fmt.Sprintf("select %sblocking [%s]", non, b.String()) +} + +func (s *Store) String() string { + return fmt.Sprintf("*%s = %s", relName(s.Addr, s), relName(s.Val, s)) +} + +func (s *MapUpdate) String() string { + return fmt.Sprintf("%s[%s] = %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s)) +} + +func (s *DebugRef) String() string { + p := s.Parent().Prog.Fset.Position(s.Pos()) + var descr interface{} + if s.object != nil { + descr = s.object // e.g. "var x int" + } else { + descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr" + } + var addr string + if s.IsAddr { + addr = "address of " + } + return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name()) +} + +func (p *Package) String() string { + return "package " + p.Pkg.Path() +} + +var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer + +func (p *Package) WriteTo(w io.Writer) (int64, error) { + var buf bytes.Buffer + WritePackage(&buf, p) + n, err := w.Write(buf.Bytes()) + return int64(n), err +} + +// WritePackage writes to buf a human-readable summary of p. +func WritePackage(buf *bytes.Buffer, p *Package) { + fmt.Fprintf(buf, "%s:\n", p) + + var names []string + maxname := 0 + for name := range p.Members { + if l := len(name); l > maxname { + maxname = l + } + names = append(names, name) + } + + from := p.Pkg + sort.Strings(names) + for _, name := range names { + switch mem := p.Members[name].(type) { + case *NamedConst: + fmt.Fprintf(buf, " const %-*s %s = %s\n", + maxname, name, mem.Name(), mem.Value.RelString(from)) + + case *Function: + fmt.Fprintf(buf, " func %-*s %s\n", + maxname, name, relType(mem.Type(), from)) + + case *Type: + fmt.Fprintf(buf, " type %-*s %s\n", + maxname, name, relType(mem.Type().Underlying(), from)) + for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) { + fmt.Fprintf(buf, " %s\n", types.SelectionString(meth, types.RelativeTo(from))) + } + + case *Global: + fmt.Fprintf(buf, " var %-*s %s\n", + maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from)) + } + } + + fmt.Fprintf(buf, "\n") +} + +func commaOk(x bool) string { + if x { + return ",ok" + } + return "" +} diff --git a/vendor/golang.org/x/tools/go/ssa/sanity.go b/vendor/golang.org/x/tools/go/ssa/sanity.go new file mode 100644 index 000000000..16df7e4f0 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/sanity.go @@ -0,0 +1,539 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// An optional pass for sanity-checking invariants of the SSA representation. +// Currently it checks CFG invariants but little at the instruction level. + +import ( + "fmt" + "go/types" + "io" + "os" + "strings" +) + +type sanity struct { + reporter io.Writer + fn *Function + block *BasicBlock + instrs map[Instruction]struct{} + insane bool +} + +// sanityCheck performs integrity checking of the SSA representation +// of the function fn and returns true if it was valid. Diagnostics +// are written to reporter if non-nil, os.Stderr otherwise. Some +// diagnostics are only warnings and do not imply a negative result. +// +// Sanity-checking is intended to facilitate the debugging of code +// transformation passes. +// +func sanityCheck(fn *Function, reporter io.Writer) bool { + if reporter == nil { + reporter = os.Stderr + } + return (&sanity{reporter: reporter}).checkFunction(fn) +} + +// mustSanityCheck is like sanityCheck but panics instead of returning +// a negative result. +// +func mustSanityCheck(fn *Function, reporter io.Writer) { + if !sanityCheck(fn, reporter) { + fn.WriteTo(os.Stderr) + panic("SanityCheck failed") + } +} + +func (s *sanity) diagnostic(prefix, format string, args ...interface{}) { + fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn) + if s.block != nil { + fmt.Fprintf(s.reporter, ", block %s", s.block) + } + io.WriteString(s.reporter, ": ") + fmt.Fprintf(s.reporter, format, args...) + io.WriteString(s.reporter, "\n") +} + +func (s *sanity) errorf(format string, args ...interface{}) { + s.insane = true + s.diagnostic("Error", format, args...) +} + +func (s *sanity) warnf(format string, args ...interface{}) { + s.diagnostic("Warning", format, args...) +} + +// findDuplicate returns an arbitrary basic block that appeared more +// than once in blocks, or nil if all were unique. +func findDuplicate(blocks []*BasicBlock) *BasicBlock { + if len(blocks) < 2 { + return nil + } + if blocks[0] == blocks[1] { + return blocks[0] + } + // Slow path: + m := make(map[*BasicBlock]bool) + for _, b := range blocks { + if m[b] { + return b + } + m[b] = true + } + return nil +} + +func (s *sanity) checkInstr(idx int, instr Instruction) { + switch instr := instr.(type) { + case *If, *Jump, *Return, *Panic: + s.errorf("control flow instruction not at end of block") + case *Phi: + if idx == 0 { + // It suffices to apply this check to just the first phi node. + if dup := findDuplicate(s.block.Preds); dup != nil { + s.errorf("phi node in block with duplicate predecessor %s", dup) + } + } else { + prev := s.block.Instrs[idx-1] + if _, ok := prev.(*Phi); !ok { + s.errorf("Phi instruction follows a non-Phi: %T", prev) + } + } + if ne, np := len(instr.Edges), len(s.block.Preds); ne != np { + s.errorf("phi node has %d edges but %d predecessors", ne, np) + + } else { + for i, e := range instr.Edges { + if e == nil { + s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i]) + } + } + } + + case *Alloc: + if !instr.Heap { + found := false + for _, l := range s.fn.Locals { + if l == instr { + found = true + break + } + } + if !found { + s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr) + } + } + + case *BinOp: + case *Call: + case *ChangeInterface: + case *ChangeType: + case *Convert: + if _, ok := instr.X.Type().Underlying().(*types.Slice); ok { + if ptr, ok := instr.Type().Underlying().(*types.Pointer); ok { + if _, ok := ptr.Elem().(*types.Array); ok { + break + } + } + } + if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok { + if _, ok := instr.Type().Underlying().(*types.Basic); !ok { + s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type()) + } + } + + case *Defer: + case *Extract: + case *Field: + case *FieldAddr: + case *Go: + case *Index: + case *IndexAddr: + case *Lookup: + case *MakeChan: + case *MakeClosure: + numFree := len(instr.Fn.(*Function).FreeVars) + numBind := len(instr.Bindings) + if numFree != numBind { + s.errorf("MakeClosure has %d Bindings for function %s with %d free vars", + numBind, instr.Fn, numFree) + + } + if recv := instr.Type().(*types.Signature).Recv(); recv != nil { + s.errorf("MakeClosure's type includes receiver %s", recv.Type()) + } + + case *MakeInterface: + case *MakeMap: + case *MakeSlice: + case *MapUpdate: + case *Next: + case *Range: + case *RunDefers: + case *Select: + case *Send: + case *Slice: + case *Store: + case *TypeAssert: + case *UnOp: + case *DebugRef: + // TODO(adonovan): implement checks. + default: + panic(fmt.Sprintf("Unknown instruction type: %T", instr)) + } + + if call, ok := instr.(CallInstruction); ok { + if call.Common().Signature() == nil { + s.errorf("nil signature: %s", call) + } + } + + // Check that value-defining instructions have valid types + // and a valid referrer list. + if v, ok := instr.(Value); ok { + t := v.Type() + if t == nil { + s.errorf("no type: %s = %s", v.Name(), v) + } else if t == tRangeIter { + // not a proper type; ignore. + } else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 { + s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t) + } + s.checkReferrerList(v) + } + + // Untyped constants are legal as instruction Operands(), + // for example: + // _ = "foo"[0] + // or: + // if wordsize==64 {...} + + // All other non-Instruction Values can be found via their + // enclosing Function or Package. +} + +func (s *sanity) checkFinalInstr(instr Instruction) { + switch instr := instr.(type) { + case *If: + if nsuccs := len(s.block.Succs); nsuccs != 2 { + s.errorf("If-terminated block has %d successors; expected 2", nsuccs) + return + } + if s.block.Succs[0] == s.block.Succs[1] { + s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0]) + return + } + + case *Jump: + if nsuccs := len(s.block.Succs); nsuccs != 1 { + s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs) + return + } + + case *Return: + if nsuccs := len(s.block.Succs); nsuccs != 0 { + s.errorf("Return-terminated block has %d successors; expected none", nsuccs) + return + } + if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na { + s.errorf("%d-ary return in %d-ary function", na, nf) + } + + case *Panic: + if nsuccs := len(s.block.Succs); nsuccs != 0 { + s.errorf("Panic-terminated block has %d successors; expected none", nsuccs) + return + } + + default: + s.errorf("non-control flow instruction at end of block") + } +} + +func (s *sanity) checkBlock(b *BasicBlock, index int) { + s.block = b + + if b.Index != index { + s.errorf("block has incorrect Index %d", b.Index) + } + if b.parent != s.fn { + s.errorf("block has incorrect parent %s", b.parent) + } + + // Check all blocks are reachable. + // (The entry block is always implicitly reachable, + // as is the Recover block, if any.) + if (index > 0 && b != b.parent.Recover) && len(b.Preds) == 0 { + s.warnf("unreachable block") + if b.Instrs == nil { + // Since this block is about to be pruned, + // tolerating transient problems in it + // simplifies other optimizations. + return + } + } + + // Check predecessor and successor relations are dual, + // and that all blocks in CFG belong to same function. + for _, a := range b.Preds { + found := false + for _, bb := range a.Succs { + if bb == b { + found = true + break + } + } + if !found { + s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs) + } + if a.parent != s.fn { + s.errorf("predecessor %s belongs to different function %s", a, a.parent) + } + } + for _, c := range b.Succs { + found := false + for _, bb := range c.Preds { + if bb == b { + found = true + break + } + } + if !found { + s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds) + } + if c.parent != s.fn { + s.errorf("successor %s belongs to different function %s", c, c.parent) + } + } + + // Check each instruction is sane. + n := len(b.Instrs) + if n == 0 { + s.errorf("basic block contains no instructions") + } + var rands [10]*Value // reuse storage + for j, instr := range b.Instrs { + if instr == nil { + s.errorf("nil instruction at index %d", j) + continue + } + if b2 := instr.Block(); b2 == nil { + s.errorf("nil Block() for instruction at index %d", j) + continue + } else if b2 != b { + s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j) + continue + } + if j < n-1 { + s.checkInstr(j, instr) + } else { + s.checkFinalInstr(instr) + } + + // Check Instruction.Operands. + operands: + for i, op := range instr.Operands(rands[:0]) { + if op == nil { + s.errorf("nil operand pointer %d of %s", i, instr) + continue + } + val := *op + if val == nil { + continue // a nil operand is ok + } + + // Check that "untyped" types only appear on constant operands. + if _, ok := (*op).(*Const); !ok { + if basic, ok := (*op).Type().(*types.Basic); ok { + if basic.Info()&types.IsUntyped != 0 { + s.errorf("operand #%d of %s is untyped: %s", i, instr, basic) + } + } + } + + // Check that Operands that are also Instructions belong to same function. + // TODO(adonovan): also check their block dominates block b. + if val, ok := val.(Instruction); ok { + if val.Block() == nil { + s.errorf("operand %d of %s is an instruction (%s) that belongs to no block", i, instr, val) + } else if val.Parent() != s.fn { + s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent()) + } + } + + // Check that each function-local operand of + // instr refers back to instr. (NB: quadratic) + switch val := val.(type) { + case *Const, *Global, *Builtin: + continue // not local + case *Function: + if val.parent == nil { + continue // only anon functions are local + } + } + + // TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined. + + if refs := val.Referrers(); refs != nil { + for _, ref := range *refs { + if ref == instr { + continue operands + } + } + s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val) + } else { + s.errorf("operand %d of %s (%s) has no referrers", i, instr, val) + } + } + } +} + +func (s *sanity) checkReferrerList(v Value) { + refs := v.Referrers() + if refs == nil { + s.errorf("%s has missing referrer list", v.Name()) + return + } + for i, ref := range *refs { + if _, ok := s.instrs[ref]; !ok { + s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref) + } + } +} + +func (s *sanity) checkFunction(fn *Function) bool { + // TODO(adonovan): check Function invariants: + // - check params match signature + // - check transient fields are nil + // - warn if any fn.Locals do not appear among block instructions. + s.fn = fn + if fn.Prog == nil { + s.errorf("nil Prog") + } + + _ = fn.String() // must not crash + _ = fn.RelString(fn.pkg()) // must not crash + + // All functions have a package, except delegates (which are + // shared across packages, or duplicated as weak symbols in a + // separate-compilation model), and error.Error. + if fn.Pkg == nil { + if strings.HasPrefix(fn.Synthetic, "wrapper ") || + strings.HasPrefix(fn.Synthetic, "bound ") || + strings.HasPrefix(fn.Synthetic, "thunk ") || + strings.HasSuffix(fn.name, "Error") { + // ok + } else { + s.errorf("nil Pkg") + } + } + if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn { + s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn) + } + for i, l := range fn.Locals { + if l.Parent() != fn { + s.errorf("Local %s at index %d has wrong parent", l.Name(), i) + } + if l.Heap { + s.errorf("Local %s at index %d has Heap flag set", l.Name(), i) + } + } + // Build the set of valid referrers. + s.instrs = make(map[Instruction]struct{}) + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + s.instrs[instr] = struct{}{} + } + } + for i, p := range fn.Params { + if p.Parent() != fn { + s.errorf("Param %s at index %d has wrong parent", p.Name(), i) + } + // Check common suffix of Signature and Params match type. + if sig := fn.Signature; sig != nil { + j := i - len(fn.Params) + sig.Params().Len() // index within sig.Params + if j < 0 { + continue + } + if !types.Identical(p.Type(), sig.Params().At(j).Type()) { + s.errorf("Param %s at index %d has wrong type (%s, versus %s in Signature)", p.Name(), i, p.Type(), sig.Params().At(j).Type()) + + } + } + s.checkReferrerList(p) + } + for i, fv := range fn.FreeVars { + if fv.Parent() != fn { + s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i) + } + s.checkReferrerList(fv) + } + + if fn.Blocks != nil && len(fn.Blocks) == 0 { + // Function _had_ blocks (so it's not external) but + // they were "optimized" away, even the entry block. + s.errorf("Blocks slice is non-nil but empty") + } + for i, b := range fn.Blocks { + if b == nil { + s.warnf("nil *BasicBlock at f.Blocks[%d]", i) + continue + } + s.checkBlock(b, i) + } + if fn.Recover != nil && fn.Blocks[fn.Recover.Index] != fn.Recover { + s.errorf("Recover block is not in Blocks slice") + } + + s.block = nil + for i, anon := range fn.AnonFuncs { + if anon.Parent() != fn { + s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent()) + } + } + s.fn = nil + return !s.insane +} + +// sanityCheckPackage checks invariants of packages upon creation. +// It does not require that the package is built. +// Unlike sanityCheck (for functions), it just panics at the first error. +func sanityCheckPackage(pkg *Package) { + if pkg.Pkg == nil { + panic(fmt.Sprintf("Package %s has no Object", pkg)) + } + _ = pkg.String() // must not crash + + for name, mem := range pkg.Members { + if name != mem.Name() { + panic(fmt.Sprintf("%s: %T.Name() = %s, want %s", + pkg.Pkg.Path(), mem, mem.Name(), name)) + } + obj := mem.Object() + if obj == nil { + // This check is sound because fields + // {Global,Function}.object have type + // types.Object. (If they were declared as + // *types.{Var,Func}, we'd have a non-empty + // interface containing a nil pointer.) + + continue // not all members have typechecker objects + } + if obj.Name() != name { + if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") { + // Ok. The name of a declared init function varies between + // its types.Func ("init") and its ssa.Function ("init#%d"). + } else { + panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s", + pkg.Pkg.Path(), mem, obj.Name(), name)) + } + } + if obj.Pos() != mem.Pos() { + panic(fmt.Sprintf("%s Pos=%d obj.Pos=%d", mem, mem.Pos(), obj.Pos())) + } + } +} diff --git a/vendor/golang.org/x/tools/go/ssa/source.go b/vendor/golang.org/x/tools/go/ssa/source.go new file mode 100644 index 000000000..8d9cca170 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/source.go @@ -0,0 +1,293 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines utilities for working with source positions +// or source-level named entities ("objects"). + +// TODO(adonovan): test that {Value,Instruction}.Pos() positions match +// the originating syntax, as specified. + +import ( + "go/ast" + "go/token" + "go/types" +) + +// EnclosingFunction returns the function that contains the syntax +// node denoted by path. +// +// Syntax associated with package-level variable specifications is +// enclosed by the package's init() function. +// +// Returns nil if not found; reasons might include: +// - the node is not enclosed by any function. +// - the node is within an anonymous function (FuncLit) and +// its SSA function has not been created yet +// (pkg.Build() has not yet been called). +// +func EnclosingFunction(pkg *Package, path []ast.Node) *Function { + // Start with package-level function... + fn := findEnclosingPackageLevelFunction(pkg, path) + if fn == nil { + return nil // not in any function + } + + // ...then walk down the nested anonymous functions. + n := len(path) +outer: + for i := range path { + if lit, ok := path[n-1-i].(*ast.FuncLit); ok { + for _, anon := range fn.AnonFuncs { + if anon.Pos() == lit.Type.Func { + fn = anon + continue outer + } + } + // SSA function not found: + // - package not yet built, or maybe + // - builder skipped FuncLit in dead block + // (in principle; but currently the Builder + // generates even dead FuncLits). + return nil + } + } + return fn +} + +// HasEnclosingFunction returns true if the AST node denoted by path +// is contained within the declaration of some function or +// package-level variable. +// +// Unlike EnclosingFunction, the behaviour of this function does not +// depend on whether SSA code for pkg has been built, so it can be +// used to quickly reject check inputs that will cause +// EnclosingFunction to fail, prior to SSA building. +// +func HasEnclosingFunction(pkg *Package, path []ast.Node) bool { + return findEnclosingPackageLevelFunction(pkg, path) != nil +} + +// findEnclosingPackageLevelFunction returns the Function +// corresponding to the package-level function enclosing path. +// +func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function { + if n := len(path); n >= 2 { // [... {Gen,Func}Decl File] + switch decl := path[n-2].(type) { + case *ast.GenDecl: + if decl.Tok == token.VAR && n >= 3 { + // Package-level 'var' initializer. + return pkg.init + } + + case *ast.FuncDecl: + if decl.Recv == nil && decl.Name.Name == "init" { + // Explicit init() function. + for _, b := range pkg.init.Blocks { + for _, instr := range b.Instrs { + if instr, ok := instr.(*Call); ok { + if callee, ok := instr.Call.Value.(*Function); ok && callee.Pkg == pkg && callee.Pos() == decl.Name.NamePos { + return callee + } + } + } + } + // Hack: return non-nil when SSA is not yet + // built so that HasEnclosingFunction works. + return pkg.init + } + // Declared function/method. + return findNamedFunc(pkg, decl.Name.NamePos) + } + } + return nil // not in any function +} + +// findNamedFunc returns the named function whose FuncDecl.Ident is at +// position pos. +// +func findNamedFunc(pkg *Package, pos token.Pos) *Function { + // Look at all package members and method sets of named types. + // Not very efficient. + for _, mem := range pkg.Members { + switch mem := mem.(type) { + case *Function: + if mem.Pos() == pos { + return mem + } + case *Type: + mset := pkg.Prog.MethodSets.MethodSet(types.NewPointer(mem.Type())) + for i, n := 0, mset.Len(); i < n; i++ { + // Don't call Program.Method: avoid creating wrappers. + obj := mset.At(i).Obj().(*types.Func) + if obj.Pos() == pos { + return pkg.values[obj].(*Function) + } + } + } + } + return nil +} + +// ValueForExpr returns the SSA Value that corresponds to non-constant +// expression e. +// +// It returns nil if no value was found, e.g. +// - the expression is not lexically contained within f; +// - f was not built with debug information; or +// - e is a constant expression. (For efficiency, no debug +// information is stored for constants. Use +// go/types.Info.Types[e].Value instead.) +// - e is a reference to nil or a built-in function. +// - the value was optimised away. +// +// If e is an addressable expression used in an lvalue context, +// value is the address denoted by e, and isAddr is true. +// +// The types of e (or &e, if isAddr) and the result are equal +// (modulo "untyped" bools resulting from comparisons). +// +// (Tip: to find the ssa.Value given a source position, use +// astutil.PathEnclosingInterval to locate the ast.Node, then +// EnclosingFunction to locate the Function, then ValueForExpr to find +// the ssa.Value.) +// +func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { + if f.debugInfo() { // (opt) + e = unparen(e) + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + if ref, ok := instr.(*DebugRef); ok { + if ref.Expr == e { + return ref.X, ref.IsAddr + } + } + } + } + } + return +} + +// --- Lookup functions for source-level named entities (types.Objects) --- + +// Package returns the SSA Package corresponding to the specified +// type-checker package object. +// It returns nil if no such SSA package has been created. +// +func (prog *Program) Package(obj *types.Package) *Package { + return prog.packages[obj] +} + +// packageLevelValue returns the package-level value corresponding to +// the specified named object, which may be a package-level const +// (*Const), var (*Global) or func (*Function) of some package in +// prog. It returns nil if the object is not found. +// +func (prog *Program) packageLevelValue(obj types.Object) Value { + if pkg, ok := prog.packages[obj.Pkg()]; ok { + return pkg.values[obj] + } + return nil +} + +// FuncValue returns the concrete Function denoted by the source-level +// named function obj, or nil if obj denotes an interface method. +// +// TODO(adonovan): check the invariant that obj.Type() matches the +// result's Signature, both in the params/results and in the receiver. +// +func (prog *Program) FuncValue(obj *types.Func) *Function { + fn, _ := prog.packageLevelValue(obj).(*Function) + return fn +} + +// ConstValue returns the SSA Value denoted by the source-level named +// constant obj. +// +func (prog *Program) ConstValue(obj *types.Const) *Const { + // TODO(adonovan): opt: share (don't reallocate) + // Consts for const objects and constant ast.Exprs. + + // Universal constant? {true,false,nil} + if obj.Parent() == types.Universe { + return NewConst(obj.Val(), obj.Type()) + } + // Package-level named constant? + if v := prog.packageLevelValue(obj); v != nil { + return v.(*Const) + } + return NewConst(obj.Val(), obj.Type()) +} + +// VarValue returns the SSA Value that corresponds to a specific +// identifier denoting the source-level named variable obj. +// +// VarValue returns nil if a local variable was not found, perhaps +// because its package was not built, the debug information was not +// requested during SSA construction, or the value was optimized away. +// +// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval), +// and that ident must resolve to obj. +// +// pkg is the package enclosing the reference. (A reference to a var +// always occurs within a function, so we need to know where to find it.) +// +// If the identifier is a field selector and its base expression is +// non-addressable, then VarValue returns the value of that field. +// For example: +// func f() struct {x int} +// f().x // VarValue(x) returns a *Field instruction of type int +// +// All other identifiers denote addressable locations (variables). +// For them, VarValue may return either the variable's address or its +// value, even when the expression is evaluated only for its value; the +// situation is reported by isAddr, the second component of the result. +// +// If !isAddr, the returned value is the one associated with the +// specific identifier. For example, +// var x int // VarValue(x) returns Const 0 here +// x = 1 // VarValue(x) returns Const 1 here +// +// It is not specified whether the value or the address is returned in +// any particular case, as it may depend upon optimizations performed +// during SSA code generation, such as registerization, constant +// folding, avoidance of materialization of subexpressions, etc. +// +func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) { + // All references to a var are local to some function, possibly init. + fn := EnclosingFunction(pkg, ref) + if fn == nil { + return // e.g. def of struct field; SSA not built? + } + + id := ref[0].(*ast.Ident) + + // Defining ident of a parameter? + if id.Pos() == obj.Pos() { + for _, param := range fn.Params { + if param.Object() == obj { + return param, false + } + } + } + + // Other ident? + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + if dr, ok := instr.(*DebugRef); ok { + if dr.Pos() == id.Pos() { + return dr.X, dr.IsAddr + } + } + } + } + + // Defining ident of package-level var? + if v := prog.packageLevelValue(obj); v != nil { + return v.(*Global), true + } + + return // e.g. debug info not requested, or var optimized away +} diff --git a/vendor/golang.org/x/tools/go/ssa/ssa.go b/vendor/golang.org/x/tools/go/ssa/ssa.go new file mode 100644 index 000000000..d3faf4438 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/ssa.go @@ -0,0 +1,1696 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This package defines a high-level intermediate representation for +// Go programs using static single-assignment (SSA) form. + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "sync" + + "golang.org/x/tools/go/types/typeutil" +) + +// A Program is a partial or complete Go program converted to SSA form. +type Program struct { + Fset *token.FileSet // position information for the files of this Program + imported map[string]*Package // all importable Packages, keyed by import path + packages map[*types.Package]*Package // all loaded Packages, keyed by object + mode BuilderMode // set of mode bits for SSA construction + MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets + + methodsMu sync.Mutex // guards the following maps: + methodSets typeutil.Map // maps type to its concrete methodSet + runtimeTypes typeutil.Map // types for which rtypes are needed + canon typeutil.Map // type canonicalization map + bounds map[*types.Func]*Function // bounds for curried x.Method closures + thunks map[selectionKey]*Function // thunks for T.Method expressions +} + +// A Package is a single analyzed Go package containing Members for +// all package-level functions, variables, constants and types it +// declares. These may be accessed directly via Members, or via the +// type-specific accessor methods Func, Type, Var and Const. +// +// Members also contains entries for "init" (the synthetic package +// initializer) and "init#%d", the nth declared init function, +// and unspecified other things too. +// +type Package struct { + Prog *Program // the owning program + Pkg *types.Package // the corresponding go/types.Package + Members map[string]Member // all package members keyed by name (incl. init and init#%d) + values map[types.Object]Value // package members (incl. types and methods), keyed by object + init *Function // Func("init"); the package's init function + debug bool // include full debug info in this package + + // The following fields are set transiently, then cleared + // after building. + buildOnce sync.Once // ensures package building occurs once + ninit int32 // number of init functions + info *types.Info // package type information + files []*ast.File // package ASTs +} + +// A Member is a member of a Go package, implemented by *NamedConst, +// *Global, *Function, or *Type; they are created by package-level +// const, var, func and type declarations respectively. +// +type Member interface { + Name() string // declared name of the package member + String() string // package-qualified name of the package member + RelString(*types.Package) string // like String, but relative refs are unqualified + Object() types.Object // typechecker's object for this member, if any + Pos() token.Pos // position of member's declaration, if known + Type() types.Type // type of the package member + Token() token.Token // token.{VAR,FUNC,CONST,TYPE} + Package() *Package // the containing package +} + +// A Type is a Member of a Package representing a package-level named type. +type Type struct { + object *types.TypeName + pkg *Package +} + +// A NamedConst is a Member of a Package representing a package-level +// named constant. +// +// Pos() returns the position of the declaring ast.ValueSpec.Names[*] +// identifier. +// +// NB: a NamedConst is not a Value; it contains a constant Value, which +// it augments with the name and position of its 'const' declaration. +// +type NamedConst struct { + object *types.Const + Value *Const + pkg *Package +} + +// A Value is an SSA value that can be referenced by an instruction. +type Value interface { + // Name returns the name of this value, and determines how + // this Value appears when used as an operand of an + // Instruction. + // + // This is the same as the source name for Parameters, + // Builtins, Functions, FreeVars, Globals. + // For constants, it is a representation of the constant's value + // and type. For all other Values this is the name of the + // virtual register defined by the instruction. + // + // The name of an SSA Value is not semantically significant, + // and may not even be unique within a function. + Name() string + + // If this value is an Instruction, String returns its + // disassembled form; otherwise it returns unspecified + // human-readable information about the Value, such as its + // kind, name and type. + String() string + + // Type returns the type of this value. Many instructions + // (e.g. IndexAddr) change their behaviour depending on the + // types of their operands. + Type() types.Type + + // Parent returns the function to which this Value belongs. + // It returns nil for named Functions, Builtin, Const and Global. + Parent() *Function + + // Referrers returns the list of instructions that have this + // value as one of their operands; it may contain duplicates + // if an instruction has a repeated operand. + // + // Referrers actually returns a pointer through which the + // caller may perform mutations to the object's state. + // + // Referrers is currently only defined if Parent()!=nil, + // i.e. for the function-local values FreeVar, Parameter, + // Functions (iff anonymous) and all value-defining instructions. + // It returns nil for named Functions, Builtin, Const and Global. + // + // Instruction.Operands contains the inverse of this relation. + Referrers() *[]Instruction + + // Pos returns the location of the AST token most closely + // associated with the operation that gave rise to this value, + // or token.NoPos if it was not explicit in the source. + // + // For each ast.Node type, a particular token is designated as + // the closest location for the expression, e.g. the Lparen + // for an *ast.CallExpr. This permits a compact but + // approximate mapping from Values to source positions for use + // in diagnostic messages, for example. + // + // (Do not use this position to determine which Value + // corresponds to an ast.Expr; use Function.ValueForExpr + // instead. NB: it requires that the function was built with + // debug information.) + Pos() token.Pos +} + +// An Instruction is an SSA instruction that computes a new Value or +// has some effect. +// +// An Instruction that defines a value (e.g. BinOp) also implements +// the Value interface; an Instruction that only has an effect (e.g. Store) +// does not. +// +type Instruction interface { + // String returns the disassembled form of this value. + // + // Examples of Instructions that are Values: + // "x + y" (BinOp) + // "len([])" (Call) + // Note that the name of the Value is not printed. + // + // Examples of Instructions that are not Values: + // "return x" (Return) + // "*y = x" (Store) + // + // (The separation Value.Name() from Value.String() is useful + // for some analyses which distinguish the operation from the + // value it defines, e.g., 'y = local int' is both an allocation + // of memory 'local int' and a definition of a pointer y.) + String() string + + // Parent returns the function to which this instruction + // belongs. + Parent() *Function + + // Block returns the basic block to which this instruction + // belongs. + Block() *BasicBlock + + // setBlock sets the basic block to which this instruction belongs. + setBlock(*BasicBlock) + + // Operands returns the operands of this instruction: the + // set of Values it references. + // + // Specifically, it appends their addresses to rands, a + // user-provided slice, and returns the resulting slice, + // permitting avoidance of memory allocation. + // + // The operands are appended in undefined order, but the order + // is consistent for a given Instruction; the addresses are + // always non-nil but may point to a nil Value. Clients may + // store through the pointers, e.g. to effect a value + // renaming. + // + // Value.Referrers is a subset of the inverse of this + // relation. (Referrers are not tracked for all types of + // Values.) + Operands(rands []*Value) []*Value + + // Pos returns the location of the AST token most closely + // associated with the operation that gave rise to this + // instruction, or token.NoPos if it was not explicit in the + // source. + // + // For each ast.Node type, a particular token is designated as + // the closest location for the expression, e.g. the Go token + // for an *ast.GoStmt. This permits a compact but approximate + // mapping from Instructions to source positions for use in + // diagnostic messages, for example. + // + // (Do not use this position to determine which Instruction + // corresponds to an ast.Expr; see the notes for Value.Pos. + // This position may be used to determine which non-Value + // Instruction corresponds to some ast.Stmts, but not all: If + // and Jump instructions have no Pos(), for example.) + Pos() token.Pos +} + +// A Node is a node in the SSA value graph. Every concrete type that +// implements Node is also either a Value, an Instruction, or both. +// +// Node contains the methods common to Value and Instruction, plus the +// Operands and Referrers methods generalized to return nil for +// non-Instructions and non-Values, respectively. +// +// Node is provided to simplify SSA graph algorithms. Clients should +// use the more specific and informative Value or Instruction +// interfaces where appropriate. +// +type Node interface { + // Common methods: + String() string + Pos() token.Pos + Parent() *Function + + // Partial methods: + Operands(rands []*Value) []*Value // nil for non-Instructions + Referrers() *[]Instruction // nil for non-Values +} + +// Function represents the parameters, results, and code of a function +// or method. +// +// If Blocks is nil, this indicates an external function for which no +// Go source code is available. In this case, FreeVars and Locals +// are nil too. Clients performing whole-program analysis must +// handle external functions specially. +// +// Blocks contains the function's control-flow graph (CFG). +// Blocks[0] is the function entry point; block order is not otherwise +// semantically significant, though it may affect the readability of +// the disassembly. +// To iterate over the blocks in dominance order, use DomPreorder(). +// +// Recover is an optional second entry point to which control resumes +// after a recovered panic. The Recover block may contain only a return +// statement, preceded by a load of the function's named return +// parameters, if any. +// +// A nested function (Parent()!=nil) that refers to one or more +// lexically enclosing local variables ("free variables") has FreeVars. +// Such functions cannot be called directly but require a +// value created by MakeClosure which, via its Bindings, supplies +// values for these parameters. +// +// If the function is a method (Signature.Recv() != nil) then the first +// element of Params is the receiver parameter. +// +// A Go package may declare many functions called "init". +// For each one, Object().Name() returns "init" but Name() returns +// "init#1", etc, in declaration order. +// +// Pos() returns the declaring ast.FuncLit.Type.Func or the position +// of the ast.FuncDecl.Name, if the function was explicit in the +// source. Synthetic wrappers, for which Synthetic != "", may share +// the same position as the function they wrap. +// Syntax.Pos() always returns the position of the declaring "func" token. +// +// Type() returns the function's Signature. +// +type Function struct { + name string + object types.Object // a declared *types.Func or one of its wrappers + method *types.Selection // info about provenance of synthetic methods + Signature *types.Signature + pos token.Pos + + Synthetic string // provenance of synthetic function; "" for true source functions + syntax ast.Node // *ast.Func{Decl,Lit}; replaced with simple ast.Node after build, unless debug mode + parent *Function // enclosing function if anon; nil if global + Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) + Prog *Program // enclosing program + Params []*Parameter // function parameters; for methods, includes receiver + FreeVars []*FreeVar // free variables whose values must be supplied by closure + Locals []*Alloc // local variables of this function + Blocks []*BasicBlock // basic blocks of the function; nil => external + Recover *BasicBlock // optional; control transfers here after recovered panic + AnonFuncs []*Function // anonymous functions directly beneath this one + referrers []Instruction // referring instructions (iff Parent() != nil) + + // The following fields are set transiently during building, + // then cleared. + currentBlock *BasicBlock // where to emit code + objects map[types.Object]Value // addresses of local variables + namedResults []*Alloc // tuple of named results + targets *targets // linked stack of branch targets + lblocks map[*ast.Object]*lblock // labelled blocks +} + +// BasicBlock represents an SSA basic block. +// +// The final element of Instrs is always an explicit transfer of +// control (If, Jump, Return, or Panic). +// +// A block may contain no Instructions only if it is unreachable, +// i.e., Preds is nil. Empty blocks are typically pruned. +// +// BasicBlocks and their Preds/Succs relation form a (possibly cyclic) +// graph independent of the SSA Value graph: the control-flow graph or +// CFG. It is illegal for multiple edges to exist between the same +// pair of blocks. +// +// Each BasicBlock is also a node in the dominator tree of the CFG. +// The tree may be navigated using Idom()/Dominees() and queried using +// Dominates(). +// +// The order of Preds and Succs is significant (to Phi and If +// instructions, respectively). +// +type BasicBlock struct { + Index int // index of this block within Parent().Blocks + Comment string // optional label; no semantic significance + parent *Function // parent function + Instrs []Instruction // instructions in order + Preds, Succs []*BasicBlock // predecessors and successors + succs2 [2]*BasicBlock // initial space for Succs + dom domInfo // dominator tree info + gaps int // number of nil Instrs (transient) + rundefers int // number of rundefers (transient) +} + +// Pure values ---------------------------------------- + +// A FreeVar represents a free variable of the function to which it +// belongs. +// +// FreeVars are used to implement anonymous functions, whose free +// variables are lexically captured in a closure formed by +// MakeClosure. The value of such a free var is an Alloc or another +// FreeVar and is considered a potentially escaping heap address, with +// pointer type. +// +// FreeVars are also used to implement bound method closures. Such a +// free var represents the receiver value and may be of any type that +// has concrete methods. +// +// Pos() returns the position of the value that was captured, which +// belongs to an enclosing function. +// +type FreeVar struct { + name string + typ types.Type + pos token.Pos + parent *Function + referrers []Instruction + + // Transiently needed during building. + outer Value // the Value captured from the enclosing context. +} + +// A Parameter represents an input parameter of a function. +// +type Parameter struct { + name string + object types.Object // a *types.Var; nil for non-source locals + typ types.Type + pos token.Pos + parent *Function + referrers []Instruction +} + +// A Const represents the value of a constant expression. +// +// The underlying type of a constant may be any boolean, numeric, or +// string type. In addition, a Const may represent the nil value of +// any reference type---interface, map, channel, pointer, slice, or +// function---but not "untyped nil". +// +// All source-level constant expressions are represented by a Const +// of the same type and value. +// +// Value holds the value of the constant, independent of its Type(), +// using go/constant representation, or nil for a typed nil value. +// +// Pos() returns token.NoPos. +// +// Example printed form: +// 42:int +// "hello":untyped string +// 3+4i:MyComplex +// +type Const struct { + typ types.Type + Value constant.Value +} + +// A Global is a named Value holding the address of a package-level +// variable. +// +// Pos() returns the position of the ast.ValueSpec.Names[*] +// identifier. +// +type Global struct { + name string + object types.Object // a *types.Var; may be nil for synthetics e.g. init$guard + typ types.Type + pos token.Pos + + Pkg *Package +} + +// A Builtin represents a specific use of a built-in function, e.g. len. +// +// Builtins are immutable values. Builtins do not have addresses. +// Builtins can only appear in CallCommon.Value. +// +// Name() indicates the function: one of the built-in functions from the +// Go spec (excluding "make" and "new") or one of these ssa-defined +// intrinsics: +// +// // wrapnilchk returns ptr if non-nil, panics otherwise. +// // (For use in indirection wrappers.) +// func ssa:wrapnilchk(ptr *T, recvType, methodName string) *T +// +// Object() returns a *types.Builtin for built-ins defined by the spec, +// nil for others. +// +// Type() returns a *types.Signature representing the effective +// signature of the built-in for this call. +// +type Builtin struct { + name string + sig *types.Signature +} + +// Value-defining instructions ---------------------------------------- + +// The Alloc instruction reserves space for a variable of the given type, +// zero-initializes it, and yields its address. +// +// Alloc values are always addresses, and have pointer types, so the +// type of the allocated variable is actually +// Type().Underlying().(*types.Pointer).Elem(). +// +// If Heap is false, Alloc allocates space in the function's +// activation record (frame); we refer to an Alloc(Heap=false) as a +// "local" alloc. Each local Alloc returns the same address each time +// it is executed within the same activation; the space is +// re-initialized to zero. +// +// If Heap is true, Alloc allocates space in the heap; we +// refer to an Alloc(Heap=true) as a "new" alloc. Each new Alloc +// returns a different address each time it is executed. +// +// When Alloc is applied to a channel, map or slice type, it returns +// the address of an uninitialized (nil) reference of that kind; store +// the result of MakeSlice, MakeMap or MakeChan in that location to +// instantiate these types. +// +// Pos() returns the ast.CompositeLit.Lbrace for a composite literal, +// or the ast.CallExpr.Rparen for a call to new() or for a call that +// allocates a varargs slice. +// +// Example printed form: +// t0 = local int +// t1 = new int +// +type Alloc struct { + register + Comment string + Heap bool + index int // dense numbering; for lifting +} + +// The Phi instruction represents an SSA φ-node, which combines values +// that differ across incoming control-flow edges and yields a new +// value. Within a block, all φ-nodes must appear before all non-φ +// nodes. +// +// Pos() returns the position of the && or || for short-circuit +// control-flow joins, or that of the *Alloc for φ-nodes inserted +// during SSA renaming. +// +// Example printed form: +// t2 = phi [0: t0, 1: t1] +// +type Phi struct { + register + Comment string // a hint as to its purpose + Edges []Value // Edges[i] is value for Block().Preds[i] +} + +// The Call instruction represents a function or method call. +// +// The Call instruction yields the function result if there is exactly +// one. Otherwise it returns a tuple, the components of which are +// accessed via Extract. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.CallExpr.Lparen, if explicit in the source. +// +// Example printed form: +// t2 = println(t0, t1) +// t4 = t3() +// t7 = invoke t5.Println(...t6) +// +type Call struct { + register + Call CallCommon +} + +// The BinOp instruction yields the result of binary operation X Op Y. +// +// Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source. +// +// Example printed form: +// t1 = t0 + 1:int +// +type BinOp struct { + register + // One of: + // ADD SUB MUL QUO REM + - * / % + // AND OR XOR SHL SHR AND_NOT & | ^ << >> &^ + // EQL NEQ LSS LEQ GTR GEQ == != < <= < >= + Op token.Token + X, Y Value +} + +// The UnOp instruction yields the result of Op X. +// ARROW is channel receive. +// MUL is pointer indirection (load). +// XOR is bitwise complement. +// SUB is negation. +// NOT is logical negation. +// +// If CommaOk and Op=ARROW, the result is a 2-tuple of the value above +// and a boolean indicating the success of the receive. The +// components of the tuple are accessed using Extract. +// +// Pos() returns the ast.UnaryExpr.OpPos, if explicit in the source. +// For receive operations (ARROW) implicit in ranging over a channel, +// Pos() returns the ast.RangeStmt.For. +// For implicit memory loads (STAR), Pos() returns the position of the +// most closely associated source-level construct; the details are not +// specified. +// +// Example printed form: +// t0 = *x +// t2 = <-t1,ok +// +type UnOp struct { + register + Op token.Token // One of: NOT SUB ARROW MUL XOR ! - <- * ^ + X Value + CommaOk bool +} + +// The ChangeType instruction applies to X a value-preserving type +// change to Type(). +// +// Type changes are permitted: +// - between a named type and its underlying type. +// - between two named types of the same underlying type. +// - between (possibly named) pointers to identical base types. +// - from a bidirectional channel to a read- or write-channel, +// optionally adding/removing a name. +// +// This operation cannot fail dynamically. +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// t1 = changetype *int <- IntPtr (t0) +// +type ChangeType struct { + register + X Value +} + +// The Convert instruction yields the conversion of value X to type +// Type(). One or both of those types is basic (but possibly named). +// +// A conversion may change the value and representation of its operand. +// Conversions are permitted: +// - between real numeric types. +// - between complex numeric types. +// - between string and []byte or []rune. +// - between pointers and unsafe.Pointer. +// - between unsafe.Pointer and uintptr. +// - from (Unicode) integer to (UTF-8) string. +// - from slice to array pointer. +// A conversion may imply a type name change also. +// +// Conversions of untyped string/number/bool constants to a specific +// representation are eliminated during SSA construction. +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// t1 = convert []byte <- string (t0) +// +type Convert struct { + register + X Value +} + +// ChangeInterface constructs a value of one interface type from a +// value of another interface type known to be assignable to it. +// This operation cannot fail. +// +// Pos() returns the ast.CallExpr.Lparen if the instruction arose from +// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the +// instruction arose from an explicit e.(T) operation; or token.NoPos +// otherwise. +// +// Example printed form: +// t1 = change interface interface{} <- I (t0) +// +type ChangeInterface struct { + register + X Value +} + +// MakeInterface constructs an instance of an interface type from a +// value of a concrete type. +// +// Use Program.MethodSets.MethodSet(X.Type()) to find the method-set +// of X, and Program.MethodValue(m) to find the implementation of a method. +// +// To construct the zero value of an interface type T, use: +// NewConst(constant.MakeNil(), T, pos) +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// t1 = make interface{} <- int (42:int) +// t2 = make Stringer <- t0 +// +type MakeInterface struct { + register + X Value +} + +// The MakeClosure instruction yields a closure value whose code is +// Fn and whose free variables' values are supplied by Bindings. +// +// Type() returns a (possibly named) *types.Signature. +// +// Pos() returns the ast.FuncLit.Type.Func for a function literal +// closure or the ast.SelectorExpr.Sel for a bound method closure. +// +// Example printed form: +// t0 = make closure anon@1.2 [x y z] +// t1 = make closure bound$(main.I).add [i] +// +type MakeClosure struct { + register + Fn Value // always a *Function + Bindings []Value // values for each free variable in Fn.FreeVars +} + +// The MakeMap instruction creates a new hash-table-based map object +// and yields a value of kind map. +// +// Type() returns a (possibly named) *types.Map. +// +// Pos() returns the ast.CallExpr.Lparen, if created by make(map), or +// the ast.CompositeLit.Lbrack if created by a literal. +// +// Example printed form: +// t1 = make map[string]int t0 +// t1 = make StringIntMap t0 +// +type MakeMap struct { + register + Reserve Value // initial space reservation; nil => default +} + +// The MakeChan instruction creates a new channel object and yields a +// value of kind chan. +// +// Type() returns a (possibly named) *types.Chan. +// +// Pos() returns the ast.CallExpr.Lparen for the make(chan) that +// created it. +// +// Example printed form: +// t0 = make chan int 0 +// t0 = make IntChan 0 +// +type MakeChan struct { + register + Size Value // int; size of buffer; zero => synchronous. +} + +// The MakeSlice instruction yields a slice of length Len backed by a +// newly allocated array of length Cap. +// +// Both Len and Cap must be non-nil Values of integer type. +// +// (Alloc(types.Array) followed by Slice will not suffice because +// Alloc can only create arrays of constant length.) +// +// Type() returns a (possibly named) *types.Slice. +// +// Pos() returns the ast.CallExpr.Lparen for the make([]T) that +// created it. +// +// Example printed form: +// t1 = make []string 1:int t0 +// t1 = make StringSlice 1:int t0 +// +type MakeSlice struct { + register + Len Value + Cap Value +} + +// The Slice instruction yields a slice of an existing string, slice +// or *array X between optional integer bounds Low and High. +// +// Dynamically, this instruction panics if X evaluates to a nil *array +// pointer. +// +// Type() returns string if the type of X was string, otherwise a +// *types.Slice with the same element type as X. +// +// Pos() returns the ast.SliceExpr.Lbrack if created by a x[:] slice +// operation, the ast.CompositeLit.Lbrace if created by a literal, or +// NoPos if not explicit in the source (e.g. a variadic argument slice). +// +// Example printed form: +// t1 = slice t0[1:] +// +type Slice struct { + register + X Value // slice, string, or *array + Low, High, Max Value // each may be nil +} + +// The FieldAddr instruction yields the address of Field of *struct X. +// +// The field is identified by its index within the field list of the +// struct type of X. +// +// Dynamically, this instruction panics if X evaluates to a nil +// pointer. +// +// Type() returns a (possibly named) *types.Pointer. +// +// Pos() returns the position of the ast.SelectorExpr.Sel for the +// field, if explicit in the source. +// +// Example printed form: +// t1 = &t0.name [#1] +// +type FieldAddr struct { + register + X Value // *struct + Field int // field is X.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct).Field(Field) +} + +// The Field instruction yields the Field of struct X. +// +// The field is identified by its index within the field list of the +// struct type of X; by using numeric indices we avoid ambiguity of +// package-local identifiers and permit compact representations. +// +// Pos() returns the position of the ast.SelectorExpr.Sel for the +// field, if explicit in the source. +// +// Example printed form: +// t1 = t0.name [#1] +// +type Field struct { + register + X Value // struct + Field int // index into X.Type().(*types.Struct).Fields +} + +// The IndexAddr instruction yields the address of the element at +// index Index of collection X. Index is an integer expression. +// +// The elements of maps and strings are not addressable; use Lookup or +// MapUpdate instead. +// +// Dynamically, this instruction panics if X evaluates to a nil *array +// pointer. +// +// Type() returns a (possibly named) *types.Pointer. +// +// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if +// explicit in the source. +// +// Example printed form: +// t2 = &t0[t1] +// +type IndexAddr struct { + register + X Value // slice or *array, + Index Value // numeric index +} + +// The Index instruction yields element Index of array X. +// +// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if +// explicit in the source. +// +// Example printed form: +// t2 = t0[t1] +// +type Index struct { + register + X Value // array + Index Value // integer index +} + +// The Lookup instruction yields element Index of collection X, a map +// or string. Index is an integer expression if X is a string or the +// appropriate key type if X is a map. +// +// If CommaOk, the result is a 2-tuple of the value above and a +// boolean indicating the result of a map membership test for the key. +// The components of the tuple are accessed using Extract. +// +// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. +// +// Example printed form: +// t2 = t0[t1] +// t5 = t3[t4],ok +// +type Lookup struct { + register + X Value // string or map + Index Value // numeric or key-typed index + CommaOk bool // return a value,ok pair +} + +// SelectState is a helper for Select. +// It represents one goal state and its corresponding communication. +// +type SelectState struct { + Dir types.ChanDir // direction of case (SendOnly or RecvOnly) + Chan Value // channel to use (for send or receive) + Send Value // value to send (for send) + Pos token.Pos // position of token.ARROW + DebugNode ast.Node // ast.SendStmt or ast.UnaryExpr(<-) [debug mode] +} + +// The Select instruction tests whether (or blocks until) one +// of the specified sent or received states is entered. +// +// Let n be the number of States for which Dir==RECV and T_i (0<=i string iterator; false => map iterator. +} + +// The TypeAssert instruction tests whether interface value X has type +// AssertedType. +// +// If !CommaOk, on success it returns v, the result of the conversion +// (defined below); on failure it panics. +// +// If CommaOk: on success it returns a pair (v, true) where v is the +// result of the conversion; on failure it returns (z, false) where z +// is AssertedType's zero value. The components of the pair must be +// accessed using the Extract instruction. +// +// If AssertedType is a concrete type, TypeAssert checks whether the +// dynamic type in interface X is equal to it, and if so, the result +// of the conversion is a copy of the value in the interface. +// +// If AssertedType is an interface, TypeAssert checks whether the +// dynamic type of the interface is assignable to it, and if so, the +// result of the conversion is a copy of the interface value X. +// If AssertedType is a superinterface of X.Type(), the operation will +// fail iff the operand is nil. (Contrast with ChangeInterface, which +// performs no nil-check.) +// +// Type() reflects the actual type of the result, possibly a +// 2-types.Tuple; AssertedType is the asserted type. +// +// Pos() returns the ast.CallExpr.Lparen if the instruction arose from +// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the +// instruction arose from an explicit e.(T) operation; or the +// ast.CaseClause.Case if the instruction arose from a case of a +// type-switch statement. +// +// Example printed form: +// t1 = typeassert t0.(int) +// t3 = typeassert,ok t2.(T) +// +type TypeAssert struct { + register + X Value + AssertedType types.Type + CommaOk bool +} + +// The Extract instruction yields component Index of Tuple. +// +// This is used to access the results of instructions with multiple +// return values, such as Call, TypeAssert, Next, UnOp(ARROW) and +// IndexExpr(Map). +// +// Example printed form: +// t1 = extract t0 #1 +// +type Extract struct { + register + Tuple Value + Index int +} + +// Instructions executed for effect. They do not yield a value. -------------------- + +// The Jump instruction transfers control to the sole successor of its +// owning block. +// +// A Jump must be the last instruction of its containing BasicBlock. +// +// Pos() returns NoPos. +// +// Example printed form: +// jump done +// +type Jump struct { + anInstruction +} + +// The If instruction transfers control to one of the two successors +// of its owning block, depending on the boolean Cond: the first if +// true, the second if false. +// +// An If instruction must be the last instruction of its containing +// BasicBlock. +// +// Pos() returns NoPos. +// +// Example printed form: +// if t0 goto done else body +// +type If struct { + anInstruction + Cond Value +} + +// The Return instruction returns values and control back to the calling +// function. +// +// len(Results) is always equal to the number of results in the +// function's signature. +// +// If len(Results) > 1, Return returns a tuple value with the specified +// components which the caller must access using Extract instructions. +// +// There is no instruction to return a ready-made tuple like those +// returned by a "value,ok"-mode TypeAssert, Lookup or UnOp(ARROW) or +// a tail-call to a function with multiple result parameters. +// +// Return must be the last instruction of its containing BasicBlock. +// Such a block has no successors. +// +// Pos() returns the ast.ReturnStmt.Return, if explicit in the source. +// +// Example printed form: +// return +// return nil:I, 2:int +// +type Return struct { + anInstruction + Results []Value + pos token.Pos +} + +// The RunDefers instruction pops and invokes the entire stack of +// procedure calls pushed by Defer instructions in this function. +// +// It is legal to encounter multiple 'rundefers' instructions in a +// single control-flow path through a function; this is useful in +// the combined init() function, for example. +// +// Pos() returns NoPos. +// +// Example printed form: +// rundefers +// +type RunDefers struct { + anInstruction +} + +// The Panic instruction initiates a panic with value X. +// +// A Panic instruction must be the last instruction of its containing +// BasicBlock, which must have no successors. +// +// NB: 'go panic(x)' and 'defer panic(x)' do not use this instruction; +// they are treated as calls to a built-in function. +// +// Pos() returns the ast.CallExpr.Lparen if this panic was explicit +// in the source. +// +// Example printed form: +// panic t0 +// +type Panic struct { + anInstruction + X Value // an interface{} + pos token.Pos +} + +// The Go instruction creates a new goroutine and calls the specified +// function within it. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.GoStmt.Go. +// +// Example printed form: +// go println(t0, t1) +// go t3() +// go invoke t5.Println(...t6) +// +type Go struct { + anInstruction + Call CallCommon + pos token.Pos +} + +// The Defer instruction pushes the specified call onto a stack of +// functions to be called by a RunDefers instruction or by a panic. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.DeferStmt.Defer. +// +// Example printed form: +// defer println(t0, t1) +// defer t3() +// defer invoke t5.Println(...t6) +// +type Defer struct { + anInstruction + Call CallCommon + pos token.Pos +} + +// The Send instruction sends X on channel Chan. +// +// Pos() returns the ast.SendStmt.Arrow, if explicit in the source. +// +// Example printed form: +// send t0 <- t1 +// +type Send struct { + anInstruction + Chan, X Value + pos token.Pos +} + +// The Store instruction stores Val at address Addr. +// Stores can be of arbitrary types. +// +// Pos() returns the position of the source-level construct most closely +// associated with the memory store operation. +// Since implicit memory stores are numerous and varied and depend upon +// implementation choices, the details are not specified. +// +// Example printed form: +// *x = y +// +type Store struct { + anInstruction + Addr Value + Val Value + pos token.Pos +} + +// The MapUpdate instruction updates the association of Map[Key] to +// Value. +// +// Pos() returns the ast.KeyValueExpr.Colon or ast.IndexExpr.Lbrack, +// if explicit in the source. +// +// Example printed form: +// t0[t1] = t2 +// +type MapUpdate struct { + anInstruction + Map Value + Key Value + Value Value + pos token.Pos +} + +// A DebugRef instruction maps a source-level expression Expr to the +// SSA value X that represents the value (!IsAddr) or address (IsAddr) +// of that expression. +// +// DebugRef is a pseudo-instruction: it has no dynamic effect. +// +// Pos() returns Expr.Pos(), the start position of the source-level +// expression. This is not the same as the "designated" token as +// documented at Value.Pos(). e.g. CallExpr.Pos() does not return the +// position of the ("designated") Lparen token. +// +// If Expr is an *ast.Ident denoting a var or func, Object() returns +// the object; though this information can be obtained from the type +// checker, including it here greatly facilitates debugging. +// For non-Ident expressions, Object() returns nil. +// +// DebugRefs are generated only for functions built with debugging +// enabled; see Package.SetDebugMode() and the GlobalDebug builder +// mode flag. +// +// DebugRefs are not emitted for ast.Idents referring to constants or +// predeclared identifiers, since they are trivial and numerous. +// Nor are they emitted for ast.ParenExprs. +// +// (By representing these as instructions, rather than out-of-band, +// consistency is maintained during transformation passes by the +// ordinary SSA renaming machinery.) +// +// Example printed form: +// ; *ast.CallExpr @ 102:9 is t5 +// ; var x float64 @ 109:72 is x +// ; address of *ast.CompositeLit @ 216:10 is t0 +// +type DebugRef struct { + anInstruction + Expr ast.Expr // the referring expression (never *ast.ParenExpr) + object types.Object // the identity of the source var/func + IsAddr bool // Expr is addressable and X is the address it denotes + X Value // the value or address of Expr +} + +// Embeddable mix-ins and helpers for common parts of other structs. ----------- + +// register is a mix-in embedded by all SSA values that are also +// instructions, i.e. virtual registers, and provides a uniform +// implementation of most of the Value interface: Value.Name() is a +// numbered register (e.g. "t0"); the other methods are field accessors. +// +// Temporary names are automatically assigned to each register on +// completion of building a function in SSA form. +// +// Clients must not assume that the 'id' value (and the Name() derived +// from it) is unique within a function. As always in this API, +// semantics are determined only by identity; names exist only to +// facilitate debugging. +// +type register struct { + anInstruction + num int // "name" of virtual register, e.g. "t0". Not guaranteed unique. + typ types.Type // type of virtual register + pos token.Pos // position of source expression, or NoPos + referrers []Instruction +} + +// anInstruction is a mix-in embedded by all Instructions. +// It provides the implementations of the Block and setBlock methods. +type anInstruction struct { + block *BasicBlock // the basic block of this instruction +} + +// CallCommon is contained by Go, Defer and Call to hold the +// common parts of a function or method call. +// +// Each CallCommon exists in one of two modes, function call and +// interface method invocation, or "call" and "invoke" for short. +// +// 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon +// represents an ordinary function call of the value in Value, +// which may be a *Builtin, a *Function or any other value of kind +// 'func'. +// +// Value may be one of: +// (a) a *Function, indicating a statically dispatched call +// to a package-level function, an anonymous function, or +// a method of a named type. +// (b) a *MakeClosure, indicating an immediately applied +// function literal with free variables. +// (c) a *Builtin, indicating a statically dispatched call +// to a built-in function. +// (d) any other value, indicating a dynamically dispatched +// function call. +// StaticCallee returns the identity of the callee in cases +// (a) and (b), nil otherwise. +// +// Args contains the arguments to the call. If Value is a method, +// Args[0] contains the receiver parameter. +// +// Example printed form: +// t2 = println(t0, t1) +// go t3() +// defer t5(...t6) +// +// 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon +// represents a dynamically dispatched call to an interface method. +// In this mode, Value is the interface value and Method is the +// interface's abstract method. Note: an abstract method may be +// shared by multiple interfaces due to embedding; Value.Type() +// provides the specific interface used for this call. +// +// Value is implicitly supplied to the concrete method implementation +// as the receiver parameter; in other words, Args[0] holds not the +// receiver but the first true argument. +// +// Example printed form: +// t1 = invoke t0.String() +// go invoke t3.Run(t2) +// defer invoke t4.Handle(...t5) +// +// For all calls to variadic functions (Signature().Variadic()), +// the last element of Args is a slice. +// +type CallCommon struct { + Value Value // receiver (invoke mode) or func value (call mode) + Method *types.Func // abstract method (invoke mode) + Args []Value // actual parameters (in static method call, includes receiver) + pos token.Pos // position of CallExpr.Lparen, iff explicit in source +} + +// IsInvoke returns true if this call has "invoke" (not "call") mode. +func (c *CallCommon) IsInvoke() bool { + return c.Method != nil +} + +func (c *CallCommon) Pos() token.Pos { return c.pos } + +// Signature returns the signature of the called function. +// +// For an "invoke"-mode call, the signature of the interface method is +// returned. +// +// In either "call" or "invoke" mode, if the callee is a method, its +// receiver is represented by sig.Recv, not sig.Params().At(0). +// +func (c *CallCommon) Signature() *types.Signature { + if c.Method != nil { + return c.Method.Type().(*types.Signature) + } + return c.Value.Type().Underlying().(*types.Signature) +} + +// StaticCallee returns the callee if this is a trivially static +// "call"-mode call to a function. +func (c *CallCommon) StaticCallee() *Function { + switch fn := c.Value.(type) { + case *Function: + return fn + case *MakeClosure: + return fn.Fn.(*Function) + } + return nil +} + +// Description returns a description of the mode of this call suitable +// for a user interface, e.g., "static method call". +func (c *CallCommon) Description() string { + switch fn := c.Value.(type) { + case *Builtin: + return "built-in function call" + case *MakeClosure: + return "static function closure call" + case *Function: + if fn.Signature.Recv() != nil { + return "static method call" + } + return "static function call" + } + if c.IsInvoke() { + return "dynamic method call" // ("invoke" mode) + } + return "dynamic function call" +} + +// The CallInstruction interface, implemented by *Go, *Defer and *Call, +// exposes the common parts of function-calling instructions, +// yet provides a way back to the Value defined by *Call alone. +// +type CallInstruction interface { + Instruction + Common() *CallCommon // returns the common parts of the call + Value() *Call // returns the result value of the call (*Call) or nil (*Go, *Defer) +} + +func (s *Call) Common() *CallCommon { return &s.Call } +func (s *Defer) Common() *CallCommon { return &s.Call } +func (s *Go) Common() *CallCommon { return &s.Call } + +func (s *Call) Value() *Call { return s } +func (s *Defer) Value() *Call { return nil } +func (s *Go) Value() *Call { return nil } + +func (v *Builtin) Type() types.Type { return v.sig } +func (v *Builtin) Name() string { return v.name } +func (*Builtin) Referrers() *[]Instruction { return nil } +func (v *Builtin) Pos() token.Pos { return token.NoPos } +func (v *Builtin) Object() types.Object { return types.Universe.Lookup(v.name) } +func (v *Builtin) Parent() *Function { return nil } + +func (v *FreeVar) Type() types.Type { return v.typ } +func (v *FreeVar) Name() string { return v.name } +func (v *FreeVar) Referrers() *[]Instruction { return &v.referrers } +func (v *FreeVar) Pos() token.Pos { return v.pos } +func (v *FreeVar) Parent() *Function { return v.parent } + +func (v *Global) Type() types.Type { return v.typ } +func (v *Global) Name() string { return v.name } +func (v *Global) Parent() *Function { return nil } +func (v *Global) Pos() token.Pos { return v.pos } +func (v *Global) Referrers() *[]Instruction { return nil } +func (v *Global) Token() token.Token { return token.VAR } +func (v *Global) Object() types.Object { return v.object } +func (v *Global) String() string { return v.RelString(nil) } +func (v *Global) Package() *Package { return v.Pkg } +func (v *Global) RelString(from *types.Package) string { return relString(v, from) } + +func (v *Function) Name() string { return v.name } +func (v *Function) Type() types.Type { return v.Signature } +func (v *Function) Pos() token.Pos { return v.pos } +func (v *Function) Token() token.Token { return token.FUNC } +func (v *Function) Object() types.Object { return v.object } +func (v *Function) String() string { return v.RelString(nil) } +func (v *Function) Package() *Package { return v.Pkg } +func (v *Function) Parent() *Function { return v.parent } +func (v *Function) Referrers() *[]Instruction { + if v.parent != nil { + return &v.referrers + } + return nil +} + +func (v *Parameter) Type() types.Type { return v.typ } +func (v *Parameter) Name() string { return v.name } +func (v *Parameter) Object() types.Object { return v.object } +func (v *Parameter) Referrers() *[]Instruction { return &v.referrers } +func (v *Parameter) Pos() token.Pos { return v.pos } +func (v *Parameter) Parent() *Function { return v.parent } + +func (v *Alloc) Type() types.Type { return v.typ } +func (v *Alloc) Referrers() *[]Instruction { return &v.referrers } +func (v *Alloc) Pos() token.Pos { return v.pos } + +func (v *register) Type() types.Type { return v.typ } +func (v *register) setType(typ types.Type) { v.typ = typ } +func (v *register) Name() string { return fmt.Sprintf("t%d", v.num) } +func (v *register) setNum(num int) { v.num = num } +func (v *register) Referrers() *[]Instruction { return &v.referrers } +func (v *register) Pos() token.Pos { return v.pos } +func (v *register) setPos(pos token.Pos) { v.pos = pos } + +func (v *anInstruction) Parent() *Function { return v.block.parent } +func (v *anInstruction) Block() *BasicBlock { return v.block } +func (v *anInstruction) setBlock(block *BasicBlock) { v.block = block } +func (v *anInstruction) Referrers() *[]Instruction { return nil } + +func (t *Type) Name() string { return t.object.Name() } +func (t *Type) Pos() token.Pos { return t.object.Pos() } +func (t *Type) Type() types.Type { return t.object.Type() } +func (t *Type) Token() token.Token { return token.TYPE } +func (t *Type) Object() types.Object { return t.object } +func (t *Type) String() string { return t.RelString(nil) } +func (t *Type) Package() *Package { return t.pkg } +func (t *Type) RelString(from *types.Package) string { return relString(t, from) } + +func (c *NamedConst) Name() string { return c.object.Name() } +func (c *NamedConst) Pos() token.Pos { return c.object.Pos() } +func (c *NamedConst) String() string { return c.RelString(nil) } +func (c *NamedConst) Type() types.Type { return c.object.Type() } +func (c *NamedConst) Token() token.Token { return token.CONST } +func (c *NamedConst) Object() types.Object { return c.object } +func (c *NamedConst) Package() *Package { return c.pkg } +func (c *NamedConst) RelString(from *types.Package) string { return relString(c, from) } + +func (d *DebugRef) Object() types.Object { return d.object } + +// Func returns the package-level function of the specified name, +// or nil if not found. +// +func (p *Package) Func(name string) (f *Function) { + f, _ = p.Members[name].(*Function) + return +} + +// Var returns the package-level variable of the specified name, +// or nil if not found. +// +func (p *Package) Var(name string) (g *Global) { + g, _ = p.Members[name].(*Global) + return +} + +// Const returns the package-level constant of the specified name, +// or nil if not found. +// +func (p *Package) Const(name string) (c *NamedConst) { + c, _ = p.Members[name].(*NamedConst) + return +} + +// Type returns the package-level type of the specified name, +// or nil if not found. +// +func (p *Package) Type(name string) (t *Type) { + t, _ = p.Members[name].(*Type) + return +} + +func (v *Call) Pos() token.Pos { return v.Call.pos } +func (s *Defer) Pos() token.Pos { return s.pos } +func (s *Go) Pos() token.Pos { return s.pos } +func (s *MapUpdate) Pos() token.Pos { return s.pos } +func (s *Panic) Pos() token.Pos { return s.pos } +func (s *Return) Pos() token.Pos { return s.pos } +func (s *Send) Pos() token.Pos { return s.pos } +func (s *Store) Pos() token.Pos { return s.pos } +func (s *If) Pos() token.Pos { return token.NoPos } +func (s *Jump) Pos() token.Pos { return token.NoPos } +func (s *RunDefers) Pos() token.Pos { return token.NoPos } +func (s *DebugRef) Pos() token.Pos { return s.Expr.Pos() } + +// Operands. + +func (v *Alloc) Operands(rands []*Value) []*Value { + return rands +} + +func (v *BinOp) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Y) +} + +func (c *CallCommon) Operands(rands []*Value) []*Value { + rands = append(rands, &c.Value) + for i := range c.Args { + rands = append(rands, &c.Args[i]) + } + return rands +} + +func (s *Go) Operands(rands []*Value) []*Value { + return s.Call.Operands(rands) +} + +func (s *Call) Operands(rands []*Value) []*Value { + return s.Call.Operands(rands) +} + +func (s *Defer) Operands(rands []*Value) []*Value { + return s.Call.Operands(rands) +} + +func (v *ChangeInterface) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *ChangeType) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *Convert) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *DebugRef) Operands(rands []*Value) []*Value { + return append(rands, &s.X) +} + +func (v *Extract) Operands(rands []*Value) []*Value { + return append(rands, &v.Tuple) +} + +func (v *Field) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *FieldAddr) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *If) Operands(rands []*Value) []*Value { + return append(rands, &s.Cond) +} + +func (v *Index) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (v *IndexAddr) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (*Jump) Operands(rands []*Value) []*Value { + return rands +} + +func (v *Lookup) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (v *MakeChan) Operands(rands []*Value) []*Value { + return append(rands, &v.Size) +} + +func (v *MakeClosure) Operands(rands []*Value) []*Value { + rands = append(rands, &v.Fn) + for i := range v.Bindings { + rands = append(rands, &v.Bindings[i]) + } + return rands +} + +func (v *MakeInterface) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *MakeMap) Operands(rands []*Value) []*Value { + return append(rands, &v.Reserve) +} + +func (v *MakeSlice) Operands(rands []*Value) []*Value { + return append(rands, &v.Len, &v.Cap) +} + +func (v *MapUpdate) Operands(rands []*Value) []*Value { + return append(rands, &v.Map, &v.Key, &v.Value) +} + +func (v *Next) Operands(rands []*Value) []*Value { + return append(rands, &v.Iter) +} + +func (s *Panic) Operands(rands []*Value) []*Value { + return append(rands, &s.X) +} + +func (v *Phi) Operands(rands []*Value) []*Value { + for i := range v.Edges { + rands = append(rands, &v.Edges[i]) + } + return rands +} + +func (v *Range) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *Return) Operands(rands []*Value) []*Value { + for i := range s.Results { + rands = append(rands, &s.Results[i]) + } + return rands +} + +func (*RunDefers) Operands(rands []*Value) []*Value { + return rands +} + +func (v *Select) Operands(rands []*Value) []*Value { + for i := range v.States { + rands = append(rands, &v.States[i].Chan, &v.States[i].Send) + } + return rands +} + +func (s *Send) Operands(rands []*Value) []*Value { + return append(rands, &s.Chan, &s.X) +} + +func (v *Slice) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Low, &v.High, &v.Max) +} + +func (s *Store) Operands(rands []*Value) []*Value { + return append(rands, &s.Addr, &s.Val) +} + +func (v *TypeAssert) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *UnOp) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +// Non-Instruction Values: +func (v *Builtin) Operands(rands []*Value) []*Value { return rands } +func (v *FreeVar) Operands(rands []*Value) []*Value { return rands } +func (v *Const) Operands(rands []*Value) []*Value { return rands } +func (v *Function) Operands(rands []*Value) []*Value { return rands } +func (v *Global) Operands(rands []*Value) []*Value { return rands } +func (v *Parameter) Operands(rands []*Value) []*Value { return rands } diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/load.go b/vendor/golang.org/x/tools/go/ssa/ssautil/load.go new file mode 100644 index 000000000..eab12dc55 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/load.go @@ -0,0 +1,175 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssautil + +// This file defines utility functions for constructing programs in SSA form. + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/go/ssa" +) + +// Packages creates an SSA program for a set of packages. +// +// The packages must have been loaded from source syntax using the +// golang.org/x/tools/go/packages.Load function in LoadSyntax or +// LoadAllSyntax mode. +// +// Packages creates an SSA package for each well-typed package in the +// initial list, plus all their dependencies. The resulting list of +// packages corresponds to the list of initial packages, and may contain +// a nil if SSA code could not be constructed for the corresponding initial +// package due to type errors. +// +// Code for bodies of functions is not built until Build is called on +// the resulting Program. SSA code is constructed only for the initial +// packages with well-typed syntax trees. +// +// The mode parameter controls diagnostics and checking during SSA construction. +// +func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) { + return doPackages(initial, mode, false) +} + +// AllPackages creates an SSA program for a set of packages plus all +// their dependencies. +// +// The packages must have been loaded from source syntax using the +// golang.org/x/tools/go/packages.Load function in LoadAllSyntax mode. +// +// AllPackages creates an SSA package for each well-typed package in the +// initial list, plus all their dependencies. The resulting list of +// packages corresponds to the list of initial packages, and may contain +// a nil if SSA code could not be constructed for the corresponding +// initial package due to type errors. +// +// Code for bodies of functions is not built until Build is called on +// the resulting Program. SSA code is constructed for all packages with +// well-typed syntax trees. +// +// The mode parameter controls diagnostics and checking during SSA construction. +// +func AllPackages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) { + return doPackages(initial, mode, true) +} + +func doPackages(initial []*packages.Package, mode ssa.BuilderMode, deps bool) (*ssa.Program, []*ssa.Package) { + + var fset *token.FileSet + if len(initial) > 0 { + fset = initial[0].Fset + } + + prog := ssa.NewProgram(fset, mode) + + isInitial := make(map[*packages.Package]bool, len(initial)) + for _, p := range initial { + isInitial[p] = true + } + + ssamap := make(map[*packages.Package]*ssa.Package) + packages.Visit(initial, nil, func(p *packages.Package) { + if p.Types != nil && !p.IllTyped { + var files []*ast.File + if deps || isInitial[p] { + files = p.Syntax + } + ssamap[p] = prog.CreatePackage(p.Types, files, p.TypesInfo, true) + } + }) + + var ssapkgs []*ssa.Package + for _, p := range initial { + ssapkgs = append(ssapkgs, ssamap[p]) // may be nil + } + return prog, ssapkgs +} + +// CreateProgram returns a new program in SSA form, given a program +// loaded from source. An SSA package is created for each transitively +// error-free package of lprog. +// +// Code for bodies of functions is not built until Build is called +// on the result. +// +// The mode parameter controls diagnostics and checking during SSA construction. +// +// Deprecated: Use golang.org/x/tools/go/packages and the Packages +// function instead; see ssa.ExampleLoadPackages. +// +func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program { + prog := ssa.NewProgram(lprog.Fset, mode) + + for _, info := range lprog.AllPackages { + if info.TransitivelyErrorFree { + prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) + } + } + + return prog +} + +// BuildPackage builds an SSA program with IR for a single package. +// +// It populates pkg by type-checking the specified file ASTs. All +// dependencies are loaded using the importer specified by tc, which +// typically loads compiler export data; SSA code cannot be built for +// those packages. BuildPackage then constructs an ssa.Program with all +// dependency packages created, and builds and returns the SSA package +// corresponding to pkg. +// +// The caller must have set pkg.Path() to the import path. +// +// The operation fails if there were any type-checking or import errors. +// +// See ../example_test.go for an example. +// +func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ssa.BuilderMode) (*ssa.Package, *types.Info, error) { + if fset == nil { + panic("no token.FileSet") + } + if pkg.Path() == "" { + panic("package has no import path") + } + + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil { + return nil, nil, err + } + + prog := ssa.NewProgram(fset, mode) + + // Create SSA packages for all imports. + // Order is not significant. + created := make(map[*types.Package]bool) + var createAll func(pkgs []*types.Package) + createAll = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !created[p] { + created[p] = true + prog.CreatePackage(p, nil, nil, true) + createAll(p.Imports()) + } + } + } + createAll(pkg.Imports()) + + // Create and build the primary package. + ssapkg := prog.CreatePackage(pkg, files, info, false) + ssapkg.Build() + return ssapkg, info, nil +} diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/switch.go b/vendor/golang.org/x/tools/go/ssa/ssautil/switch.go new file mode 100644 index 000000000..db03bf555 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/switch.go @@ -0,0 +1,234 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssautil + +// This file implements discovery of switch and type-switch constructs +// from low-level control flow. +// +// Many techniques exist for compiling a high-level switch with +// constant cases to efficient machine code. The optimal choice will +// depend on the data type, the specific case values, the code in the +// body of each case, and the hardware. +// Some examples: +// - a lookup table (for a switch that maps constants to constants) +// - a computed goto +// - a binary tree +// - a perfect hash +// - a two-level switch (to partition constant strings by their first byte). + +import ( + "bytes" + "fmt" + "go/token" + "go/types" + + "golang.org/x/tools/go/ssa" +) + +// A ConstCase represents a single constant comparison. +// It is part of a Switch. +type ConstCase struct { + Block *ssa.BasicBlock // block performing the comparison + Body *ssa.BasicBlock // body of the case + Value *ssa.Const // case comparand +} + +// A TypeCase represents a single type assertion. +// It is part of a Switch. +type TypeCase struct { + Block *ssa.BasicBlock // block performing the type assert + Body *ssa.BasicBlock // body of the case + Type types.Type // case type + Binding ssa.Value // value bound by this case +} + +// A Switch is a logical high-level control flow operation +// (a multiway branch) discovered by analysis of a CFG containing +// only if/else chains. It is not part of the ssa.Instruction set. +// +// One of ConstCases and TypeCases has length >= 2; +// the other is nil. +// +// In a value switch, the list of cases may contain duplicate constants. +// A type switch may contain duplicate types, or types assignable +// to an interface type also in the list. +// TODO(adonovan): eliminate such duplicates. +// +type Switch struct { + Start *ssa.BasicBlock // block containing start of if/else chain + X ssa.Value // the switch operand + ConstCases []ConstCase // ordered list of constant comparisons + TypeCases []TypeCase // ordered list of type assertions + Default *ssa.BasicBlock // successor if all comparisons fail +} + +func (sw *Switch) String() string { + // We represent each block by the String() of its + // first Instruction, e.g. "print(42:int)". + var buf bytes.Buffer + if sw.ConstCases != nil { + fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name()) + for _, c := range sw.ConstCases { + fmt.Fprintf(&buf, "case %s: %s\n", c.Value, c.Body.Instrs[0]) + } + } else { + fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name()) + for _, c := range sw.TypeCases { + fmt.Fprintf(&buf, "case %s %s: %s\n", + c.Binding.Name(), c.Type, c.Body.Instrs[0]) + } + } + if sw.Default != nil { + fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0]) + } + fmt.Fprintf(&buf, "}") + return buf.String() +} + +// Switches examines the control-flow graph of fn and returns the +// set of inferred value and type switches. A value switch tests an +// ssa.Value for equality against two or more compile-time constant +// values. Switches involving link-time constants (addresses) are +// ignored. A type switch type-asserts an ssa.Value against two or +// more types. +// +// The switches are returned in dominance order. +// +// The resulting switches do not necessarily correspond to uses of the +// 'switch' keyword in the source: for example, a single source-level +// switch statement with non-constant cases may result in zero, one or +// many Switches, one per plural sequence of constant cases. +// Switches may even be inferred from if/else- or goto-based control flow. +// (In general, the control flow constructs of the source program +// cannot be faithfully reproduced from the SSA representation.) +// +func Switches(fn *ssa.Function) []Switch { + // Traverse the CFG in dominance order, so we don't + // enter an if/else-chain in the middle. + var switches []Switch + seen := make(map[*ssa.BasicBlock]bool) // TODO(adonovan): opt: use ssa.blockSet + for _, b := range fn.DomPreorder() { + if x, k := isComparisonBlock(b); x != nil { + // Block b starts a switch. + sw := Switch{Start: b, X: x} + valueSwitch(&sw, k, seen) + if len(sw.ConstCases) > 1 { + switches = append(switches, sw) + } + } + + if y, x, T := isTypeAssertBlock(b); y != nil { + // Block b starts a type switch. + sw := Switch{Start: b, X: x} + typeSwitch(&sw, y, T, seen) + if len(sw.TypeCases) > 1 { + switches = append(switches, sw) + } + } + } + return switches +} + +func valueSwitch(sw *Switch, k *ssa.Const, seen map[*ssa.BasicBlock]bool) { + b := sw.Start + x := sw.X + for x == sw.X { + if seen[b] { + break + } + seen[b] = true + + sw.ConstCases = append(sw.ConstCases, ConstCase{ + Block: b, + Body: b.Succs[0], + Value: k, + }) + b = b.Succs[1] + if len(b.Instrs) > 2 { + // Block b contains not just 'if x == k', + // so it may have side effects that + // make it unsafe to elide. + break + } + if len(b.Preds) != 1 { + // Block b has multiple predecessors, + // so it cannot be treated as a case. + break + } + x, k = isComparisonBlock(b) + } + sw.Default = b +} + +func typeSwitch(sw *Switch, y ssa.Value, T types.Type, seen map[*ssa.BasicBlock]bool) { + b := sw.Start + x := sw.X + for x == sw.X { + if seen[b] { + break + } + seen[b] = true + + sw.TypeCases = append(sw.TypeCases, TypeCase{ + Block: b, + Body: b.Succs[0], + Type: T, + Binding: y, + }) + b = b.Succs[1] + if len(b.Instrs) > 4 { + // Block b contains not just + // {TypeAssert; Extract #0; Extract #1; If} + // so it may have side effects that + // make it unsafe to elide. + break + } + if len(b.Preds) != 1 { + // Block b has multiple predecessors, + // so it cannot be treated as a case. + break + } + y, x, T = isTypeAssertBlock(b) + } + sw.Default = b +} + +// isComparisonBlock returns the operands (v, k) if a block ends with +// a comparison v==k, where k is a compile-time constant. +// +func isComparisonBlock(b *ssa.BasicBlock) (v ssa.Value, k *ssa.Const) { + if n := len(b.Instrs); n >= 2 { + if i, ok := b.Instrs[n-1].(*ssa.If); ok { + if binop, ok := i.Cond.(*ssa.BinOp); ok && binop.Block() == b && binop.Op == token.EQL { + if k, ok := binop.Y.(*ssa.Const); ok { + return binop.X, k + } + if k, ok := binop.X.(*ssa.Const); ok { + return binop.Y, k + } + } + } + } + return +} + +// isTypeAssertBlock returns the operands (y, x, T) if a block ends with +// a type assertion "if y, ok := x.(T); ok {". +// +func isTypeAssertBlock(b *ssa.BasicBlock) (y, x ssa.Value, T types.Type) { + if n := len(b.Instrs); n >= 4 { + if i, ok := b.Instrs[n-1].(*ssa.If); ok { + if ext1, ok := i.Cond.(*ssa.Extract); ok && ext1.Block() == b && ext1.Index == 1 { + if ta, ok := ext1.Tuple.(*ssa.TypeAssert); ok && ta.Block() == b { + // hack: relies upon instruction ordering. + if ext0, ok := b.Instrs[n-3].(*ssa.Extract); ok { + return ext0, ta.X, ta.AssertedType + } + } + } + } + } + return +} diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go new file mode 100644 index 000000000..3424e8a30 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go @@ -0,0 +1,79 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssautil // import "golang.org/x/tools/go/ssa/ssautil" + +import "golang.org/x/tools/go/ssa" + +// This file defines utilities for visiting the SSA representation of +// a Program. +// +// TODO(adonovan): test coverage. + +// AllFunctions finds and returns the set of functions potentially +// needed by program prog, as determined by a simple linker-style +// reachability algorithm starting from the members and method-sets of +// each package. The result may include anonymous functions and +// synthetic wrappers. +// +// Precondition: all packages are built. +// +func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool { + visit := visitor{ + prog: prog, + seen: make(map[*ssa.Function]bool), + } + visit.program() + return visit.seen +} + +type visitor struct { + prog *ssa.Program + seen map[*ssa.Function]bool +} + +func (visit *visitor) program() { + for _, pkg := range visit.prog.AllPackages() { + for _, mem := range pkg.Members { + if fn, ok := mem.(*ssa.Function); ok { + visit.function(fn) + } + } + } + for _, T := range visit.prog.RuntimeTypes() { + mset := visit.prog.MethodSets.MethodSet(T) + for i, n := 0, mset.Len(); i < n; i++ { + visit.function(visit.prog.MethodValue(mset.At(i))) + } + } +} + +func (visit *visitor) function(fn *ssa.Function) { + if !visit.seen[fn] { + visit.seen[fn] = true + var buf [10]*ssa.Value // avoid alloc in common case + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + for _, op := range instr.Operands(buf[:0]) { + if fn, ok := (*op).(*ssa.Function); ok { + visit.function(fn) + } + } + } + } + } +} + +// MainPackages returns the subset of the specified packages +// named "main" that define a main function. +// The result may include synthetic "testmain" packages. +func MainPackages(pkgs []*ssa.Package) []*ssa.Package { + var mains []*ssa.Package + for _, pkg := range pkgs { + if pkg.Pkg.Name() == "main" && pkg.Func("main") != nil { + mains = append(mains, pkg) + } + } + return mains +} diff --git a/vendor/golang.org/x/tools/go/ssa/testmain.go b/vendor/golang.org/x/tools/go/ssa/testmain.go new file mode 100644 index 000000000..c4256d1ef --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/testmain.go @@ -0,0 +1,274 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// CreateTestMainPackage synthesizes a main package that runs all the +// tests of the supplied packages. +// It is closely coupled to $GOROOT/src/cmd/go/test.go and $GOROOT/src/testing. +// +// TODO(adonovan): throws this all away now that x/tools/go/packages +// provides access to the actual synthetic test main files. + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/types" + "log" + "os" + "strings" + "text/template" +) + +// FindTests returns the Test, Benchmark, and Example functions +// (as defined by "go test") defined in the specified package, +// and its TestMain function, if any. +// +// Deprecated: Use golang.org/x/tools/go/packages to access synthetic +// testmain packages. +func FindTests(pkg *Package) (tests, benchmarks, examples []*Function, main *Function) { + prog := pkg.Prog + + // The first two of these may be nil: if the program doesn't import "testing", + // it can't contain any tests, but it may yet contain Examples. + var testSig *types.Signature // func(*testing.T) + var benchmarkSig *types.Signature // func(*testing.B) + var exampleSig = types.NewSignature(nil, nil, nil, false) // func() + + // Obtain the types from the parameters of testing.MainStart. + if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil { + mainStart := testingPkg.Func("MainStart") + params := mainStart.Signature.Params() + testSig = funcField(params.At(1).Type()) + benchmarkSig = funcField(params.At(2).Type()) + + // Does the package define this function? + // func TestMain(*testing.M) + if f := pkg.Func("TestMain"); f != nil { + sig := f.Type().(*types.Signature) + starM := mainStart.Signature.Results().At(0).Type() // *testing.M + if sig.Results().Len() == 0 && + sig.Params().Len() == 1 && + types.Identical(sig.Params().At(0).Type(), starM) { + main = f + } + } + } + + // TODO(adonovan): use a stable order, e.g. lexical. + for _, mem := range pkg.Members { + if f, ok := mem.(*Function); ok && + ast.IsExported(f.Name()) && + strings.HasSuffix(prog.Fset.Position(f.Pos()).Filename, "_test.go") { + + switch { + case testSig != nil && isTestSig(f, "Test", testSig): + tests = append(tests, f) + case benchmarkSig != nil && isTestSig(f, "Benchmark", benchmarkSig): + benchmarks = append(benchmarks, f) + case isTestSig(f, "Example", exampleSig): + examples = append(examples, f) + default: + continue + } + } + } + return +} + +// Like isTest, but checks the signature too. +func isTestSig(f *Function, prefix string, sig *types.Signature) bool { + return isTest(f.Name(), prefix) && types.Identical(f.Signature, sig) +} + +// Given the type of one of the three slice parameters of testing.Main, +// returns the function type. +func funcField(slice types.Type) *types.Signature { + return slice.(*types.Slice).Elem().Underlying().(*types.Struct).Field(1).Type().(*types.Signature) +} + +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +// Plundered from $GOROOT/src/cmd/go/test.go +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + return ast.IsExported(name[len(prefix):]) +} + +// CreateTestMainPackage creates and returns a synthetic "testmain" +// package for the specified package if it defines tests, benchmarks or +// executable examples, or nil otherwise. The new package is named +// "main" and provides a function named "main" that runs the tests, +// similar to the one that would be created by the 'go test' tool. +// +// Subsequent calls to prog.AllPackages include the new package. +// The package pkg must belong to the program prog. +// +// Deprecated: Use golang.org/x/tools/go/packages to access synthetic +// testmain packages. +func (prog *Program) CreateTestMainPackage(pkg *Package) *Package { + if pkg.Prog != prog { + log.Fatal("Package does not belong to Program") + } + + // Template data + var data struct { + Pkg *Package + Tests, Benchmarks, Examples []*Function + Main *Function + Go18 bool + } + data.Pkg = pkg + + // Enumerate tests. + data.Tests, data.Benchmarks, data.Examples, data.Main = FindTests(pkg) + if data.Main == nil && + data.Tests == nil && data.Benchmarks == nil && data.Examples == nil { + return nil + } + + // Synthesize source for testmain package. + path := pkg.Pkg.Path() + "$testmain" + tmpl := testmainTmpl + if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil { + // In Go 1.8, testing.MainStart's first argument is an interface, not a func. + data.Go18 = types.IsInterface(testingPkg.Func("MainStart").Signature.Params().At(0).Type()) + } else { + // The program does not import "testing", but FindTests + // returned non-nil, which must mean there were Examples + // but no Test, Benchmark, or TestMain functions. + + // We'll simply call them from testmain.main; this will + // ensure they don't panic, but will not check any + // "Output:" comments. + // (We should not execute an Example that has no + // "Output:" comment, but it's impossible to tell here.) + tmpl = examplesOnlyTmpl + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + log.Fatalf("internal error expanding template for %s: %v", path, err) + } + if false { // debugging + fmt.Fprintln(os.Stderr, buf.String()) + } + + // Parse and type-check the testmain package. + f, err := parser.ParseFile(prog.Fset, path+".go", &buf, parser.Mode(0)) + if err != nil { + log.Fatalf("internal error parsing %s: %v", path, err) + } + conf := types.Config{ + DisableUnusedImportCheck: true, + Importer: importer{pkg}, + } + files := []*ast.File{f} + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + testmainPkg, err := conf.Check(path, prog.Fset, files, info) + if err != nil { + log.Fatalf("internal error type-checking %s: %v", path, err) + } + + // Create and build SSA code. + testmain := prog.CreatePackage(testmainPkg, files, info, false) + testmain.SetDebugMode(false) + testmain.Build() + testmain.Func("main").Synthetic = "test main function" + testmain.Func("init").Synthetic = "package initializer" + return testmain +} + +// An implementation of types.Importer for an already loaded SSA program. +type importer struct { + pkg *Package // package under test; may be non-importable +} + +func (imp importer) Import(path string) (*types.Package, error) { + if p := imp.pkg.Prog.ImportedPackage(path); p != nil { + return p.Pkg, nil + } + if path == imp.pkg.Pkg.Path() { + return imp.pkg.Pkg, nil + } + return nil, fmt.Errorf("not found") // can't happen +} + +var testmainTmpl = template.Must(template.New("testmain").Parse(` +package main + +import "io" +import "os" +import "testing" +import p {{printf "%q" .Pkg.Pkg.Path}} + +{{if .Go18}} +type deps struct{} + +func (deps) ImportPath() string { return "" } +func (deps) MatchString(pat, str string) (bool, error) { return true, nil } +func (deps) SetPanicOnExit0(bool) {} +func (deps) StartCPUProfile(io.Writer) error { return nil } +func (deps) StartTestLog(io.Writer) {} +func (deps) StopCPUProfile() {} +func (deps) StopTestLog() error { return nil } +func (deps) WriteHeapProfile(io.Writer) error { return nil } +func (deps) WriteProfileTo(string, io.Writer, int) error { return nil } + +var match deps +{{else}} +func match(_, _ string) (bool, error) { return true, nil } +{{end}} + +func main() { + tests := []testing.InternalTest{ +{{range .Tests}} + { {{printf "%q" .Name}}, p.{{.Name}} }, +{{end}} + } + benchmarks := []testing.InternalBenchmark{ +{{range .Benchmarks}} + { {{printf "%q" .Name}}, p.{{.Name}} }, +{{end}} + } + examples := []testing.InternalExample{ +{{range .Examples}} + {Name: {{printf "%q" .Name}}, F: p.{{.Name}}}, +{{end}} + } + m := testing.MainStart(match, tests, benchmarks, examples) +{{with .Main}} + p.{{.Name}}(m) +{{else}} + os.Exit(m.Run()) +{{end}} +} + +`)) + +var examplesOnlyTmpl = template.Must(template.New("examples").Parse(` +package main + +import p {{printf "%q" .Pkg.Pkg.Path}} + +func main() { +{{range .Examples}} + p.{{.Name}}() +{{end}} +} +`)) diff --git a/vendor/golang.org/x/tools/go/ssa/util.go b/vendor/golang.org/x/tools/go/ssa/util.go new file mode 100644 index 000000000..a09949a31 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/util.go @@ -0,0 +1,89 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines a number of miscellaneous utility functions. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "io" + "os" + + "golang.org/x/tools/go/ast/astutil" +) + +//// AST utilities + +func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } + +// isBlankIdent returns true iff e is an Ident with name "_". +// They have no associated types.Object, and thus no type. +// +func isBlankIdent(e ast.Expr) bool { + id, ok := e.(*ast.Ident) + return ok && id.Name == "_" +} + +//// Type utilities. Some of these belong in go/types. + +// isPointer returns true for types whose underlying type is a pointer. +func isPointer(typ types.Type) bool { + _, ok := typ.Underlying().(*types.Pointer) + return ok +} + +func isInterface(T types.Type) bool { return types.IsInterface(T) } + +// deref returns a pointer's element type; otherwise it returns typ. +func deref(typ types.Type) types.Type { + if p, ok := typ.Underlying().(*types.Pointer); ok { + return p.Elem() + } + return typ +} + +// recvType returns the receiver type of method obj. +func recvType(obj *types.Func) types.Type { + return obj.Type().(*types.Signature).Recv().Type() +} + +// logStack prints the formatted "start" message to stderr and +// returns a closure that prints the corresponding "end" message. +// Call using 'defer logStack(...)()' to show builder stack on panic. +// Don't forget trailing parens! +// +func logStack(format string, args ...interface{}) func() { + msg := fmt.Sprintf(format, args...) + io.WriteString(os.Stderr, msg) + io.WriteString(os.Stderr, "\n") + return func() { + io.WriteString(os.Stderr, msg) + io.WriteString(os.Stderr, " end\n") + } +} + +// newVar creates a 'var' for use in a types.Tuple. +func newVar(name string, typ types.Type) *types.Var { + return types.NewParam(token.NoPos, nil, name, typ) +} + +// anonVar creates an anonymous 'var' for use in a types.Tuple. +func anonVar(typ types.Type) *types.Var { + return newVar("", typ) +} + +var lenResults = types.NewTuple(anonVar(tInt)) + +// makeLen returns the len builtin specialized to type func(T)int. +func makeLen(T types.Type) *Builtin { + lenParams := types.NewTuple(anonVar(T)) + return &Builtin{ + name: "len", + sig: types.NewSignature(nil, lenParams, lenResults, false), + } +} diff --git a/vendor/golang.org/x/tools/go/ssa/wrappers.go b/vendor/golang.org/x/tools/go/ssa/wrappers.go new file mode 100644 index 000000000..a4ae71d8c --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/wrappers.go @@ -0,0 +1,290 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines synthesis of Functions that delegate to declared +// methods; they come in three kinds: +// +// (1) wrappers: methods that wrap declared methods, performing +// implicit pointer indirections and embedded field selections. +// +// (2) thunks: funcs that wrap declared methods. Like wrappers, +// thunks perform indirections and field selections. The thunk's +// first parameter is used as the receiver for the method call. +// +// (3) bounds: funcs that wrap declared methods. The bound's sole +// free variable, supplied by a closure, is used as the receiver +// for the method call. No indirections or field selections are +// performed since they can be done before the call. + +import ( + "fmt" + + "go/types" +) + +// -- wrappers ----------------------------------------------------------- + +// makeWrapper returns a synthetic method that delegates to the +// declared method denoted by meth.Obj(), first performing any +// necessary pointer indirections or field selections implied by meth. +// +// The resulting method's receiver type is meth.Recv(). +// +// This function is versatile but quite subtle! Consider the +// following axes of variation when making changes: +// - optional receiver indirection +// - optional implicit field selections +// - meth.Obj() may denote a concrete or an interface method +// - the result may be a thunk or a wrapper. +// +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +// +func makeWrapper(prog *Program, sel *types.Selection) *Function { + obj := sel.Obj().(*types.Func) // the declared function + sig := sel.Type().(*types.Signature) // type of this wrapper + + var recv *types.Var // wrapper's receiver or thunk's params[0] + name := obj.Name() + var description string + var start int // first regular param + if sel.Kind() == types.MethodExpr { + name += "$thunk" + description = "thunk" + recv = sig.Params().At(0) + start = 1 + } else { + description = "wrapper" + recv = sig.Recv() + } + + description = fmt.Sprintf("%s for %s", description, sel.Obj()) + if prog.mode&LogSource != 0 { + defer logStack("make %s to (%s)", description, recv.Type())() + } + fn := &Function{ + name: name, + method: sel, + object: obj, + Signature: sig, + Synthetic: description, + Prog: prog, + pos: obj.Pos(), + } + fn.startBody() + fn.addSpilledParam(recv) + createParams(fn, start) + + indices := sel.Index() + + var v Value = fn.Locals[0] // spilled receiver + if isPointer(sel.Recv()) { + v = emitLoad(fn, v) + + // For simple indirection wrappers, perform an informative nil-check: + // "value method (T).f called using nil *T pointer" + if len(indices) == 1 && !isPointer(recvType(obj)) { + var c Call + c.Call.Value = &Builtin{ + name: "ssa:wrapnilchk", + sig: types.NewSignature(nil, + types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)), + types.NewTuple(anonVar(sel.Recv())), false), + } + c.Call.Args = []Value{ + v, + stringConst(deref(sel.Recv()).String()), + stringConst(sel.Obj().Name()), + } + c.setType(v.Type()) + v = fn.emit(&c) + } + } + + // Invariant: v is a pointer, either + // value of *A receiver param, or + // address of A spilled receiver. + + // We use pointer arithmetic (FieldAddr possibly followed by + // Load) in preference to value extraction (Field possibly + // preceded by Load). + + v = emitImplicitSelections(fn, v, indices[:len(indices)-1]) + + // Invariant: v is a pointer, either + // value of implicit *C field, or + // address of implicit C field. + + var c Call + if r := recvType(obj); !isInterface(r) { // concrete method + if !isPointer(r) { + v = emitLoad(fn, v) + } + c.Call.Value = prog.declaredFunc(obj) + c.Call.Args = append(c.Call.Args, v) + } else { + c.Call.Method = obj + c.Call.Value = emitLoad(fn, v) + } + for _, arg := range fn.Params[1:] { + c.Call.Args = append(c.Call.Args, arg) + } + emitTailCall(fn, &c) + fn.finishBody() + return fn +} + +// createParams creates parameters for wrapper method fn based on its +// Signature.Params, which do not include the receiver. +// start is the index of the first regular parameter to use. +// +func createParams(fn *Function, start int) { + tparams := fn.Signature.Params() + for i, n := start, tparams.Len(); i < n; i++ { + fn.addParamObj(tparams.At(i)) + } +} + +// -- bounds ----------------------------------------------------------- + +// makeBound returns a bound method wrapper (or "bound"), a synthetic +// function that delegates to a concrete or interface method denoted +// by obj. The resulting function has no receiver, but has one free +// variable which will be used as the method's receiver in the +// tail-call. +// +// Use MakeClosure with such a wrapper to construct a bound method +// closure. e.g.: +// +// type T int or: type T interface { meth() } +// func (t T) meth() +// var t T +// f := t.meth +// f() // calls t.meth() +// +// f is a closure of a synthetic wrapper defined as if by: +// +// f := func() { return t.meth() } +// +// Unlike makeWrapper, makeBound need perform no indirection or field +// selections because that can be done before the closure is +// constructed. +// +// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) +// +func makeBound(prog *Program, obj *types.Func) *Function { + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + fn, ok := prog.bounds[obj] + if !ok { + description := fmt.Sprintf("bound method wrapper for %s", obj) + if prog.mode&LogSource != 0 { + defer logStack("%s", description)() + } + fn = &Function{ + name: obj.Name() + "$bound", + object: obj, + Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver + Synthetic: description, + Prog: prog, + pos: obj.Pos(), + } + + fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn} + fn.FreeVars = []*FreeVar{fv} + fn.startBody() + createParams(fn, 0) + var c Call + + if !isInterface(recvType(obj)) { // concrete + c.Call.Value = prog.declaredFunc(obj) + c.Call.Args = []Value{fv} + } else { + c.Call.Value = fv + c.Call.Method = obj + } + for _, arg := range fn.Params { + c.Call.Args = append(c.Call.Args, arg) + } + emitTailCall(fn, &c) + fn.finishBody() + + prog.bounds[obj] = fn + } + return fn +} + +// -- thunks ----------------------------------------------------------- + +// makeThunk returns a thunk, a synthetic function that delegates to a +// concrete or interface method denoted by sel.Obj(). The resulting +// function has no receiver, but has an additional (first) regular +// parameter. +// +// Precondition: sel.Kind() == types.MethodExpr. +// +// type T int or: type T interface { meth() } +// func (t T) meth() +// f := T.meth +// var t T +// f(t) // calls t.meth() +// +// f is a synthetic wrapper defined as if by: +// +// f := func(t T) { return t.meth() } +// +// TODO(adonovan): opt: currently the stub is created even when used +// directly in a function call: C.f(i, 0). This is less efficient +// than inlining the stub. +// +// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) +// +func makeThunk(prog *Program, sel *types.Selection) *Function { + if sel.Kind() != types.MethodExpr { + panic(sel) + } + + key := selectionKey{ + kind: sel.Kind(), + recv: sel.Recv(), + obj: sel.Obj(), + index: fmt.Sprint(sel.Index()), + indirect: sel.Indirect(), + } + + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + // Canonicalize key.recv to avoid constructing duplicate thunks. + canonRecv, ok := prog.canon.At(key.recv).(types.Type) + if !ok { + canonRecv = key.recv + prog.canon.Set(key.recv, canonRecv) + } + key.recv = canonRecv + + fn, ok := prog.thunks[key] + if !ok { + fn = makeWrapper(prog, sel) + if fn.Signature.Recv() != nil { + panic(fn) // unexpected receiver + } + prog.thunks[key] = fn + } + return fn +} + +func changeRecv(s *types.Signature, recv *types.Var) *types.Signature { + return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic()) +} + +// selectionKey is like types.Selection but a usable map key. +type selectionKey struct { + kind types.SelectionKind + recv types.Type // canonicalized via Program.canon + obj types.Object + index string + indirect bool +} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 000000000..cffd7acbe --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,524 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// type A struct{ X int } +// type B A +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "strconv" + "strings" + + "go/types" +) + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRU]. +// - The OT operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +// +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named) +) + +// The For function returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.Const, // Only package-level constants have a path. + *types.TypeName, // Only package-level types have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + // TODO(adonovan): opt: if the method is concrete, + // do a specialized version of the rest of this function so + // that it's O(1) not O(|scope|). Basically 'find' is needed + // only for struct fields and interface methods. + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + names := scope.Names() + for _, name := range names { + o := scope.Lookup(name) + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, name...) + path = append(path, opType) + + T := o.Type() + + if tname.IsAlias() { + // type alias + if r := find(obj, T, path); r != nil { + return Path(r), nil + } + } else { + // defined (named) type + if r := find(obj, T.Underlying(), append(path, opUnderlying)); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, name := range names { + o := scope.Lookup(name) + path := append(empty, name...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType)); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := o.Type().(*types.Named); ok { + path = append(path, opType) + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType)); r != nil { + return Path(r), nil + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// find finds obj within type T, returning the path to it, or nil if not found. +func find(obj types.Object, T types.Type, path []byte) []byte { + switch T := T.(type) { + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey)); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem)) + case *types.Signature: + if r := find(obj, T.Params(), append(path, opParams)); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults)) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + f := T.Field(i) + path2 := appendOpArg(path, opField, i) + if f == obj { + return path2 // found field var + } + if r := find(obj, f.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + } + panic(T) +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + if p == "" { + return nil, fmt.Errorf("empty path") + } + + pathstr := string(p) + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Interface,Named} + type hasMethods interface { + Method(int) *types.Func + NumMethods() int + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFM] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t) + } + t = named.Underlying() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + hasMethods, ok := t.(hasMethods) // Interface or Named + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want interface or named)", code, t, t) + } + if n := hasMethods.NumMethods(); index >= n { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n) + } + obj = hasMethods.Method(index) + t = nil + + case opObj: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t) + } + obj = named.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 000000000..38f596daf --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/ast/astutil" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + var obj types.Object + switch fun := astutil.Unparen(call.Fun).(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + return obj +} + +// StaticCallee returns the target (function or method) of a static +// function call, if any. It returns nil for calls to builtins. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 000000000..9c441dba9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +// +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 000000000..c7f754500 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,313 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as Map, +// a mapping from types.Type to interface{} values. +package typeutil // import "golang.org/x/tools/go/types/typeutil" + +import ( + "bytes" + "fmt" + "go/types" + "reflect" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary interface{} values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Not thread-safe. +// +type Map struct { + hasher Hasher // shared by many Maps + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value interface{} +} + +// SetHasher sets the hasher used by Map. +// +// All Hashers are functionally equivalent but contain internal state +// used to cache the results of hashing previously seen types. +// +// A single Hasher created by MakeHasher() may be shared among many +// Maps. This is recommended if the instances have many keys in +// common, as it will amortize the cost of hash computation. +// +// A Hasher may grow without bound as new types are seen. Even when a +// type is deleted from the map, the Hasher never shrinks, since other +// types in the map may reference the deleted type indirectly. +// +// Hashers are not thread-safe, and read-only operations such as +// Map.Lookup require updates to the hasher, so a full Mutex lock (not a +// read-lock) is require around all Map operations if a shared +// hasher is accessed from multiple threads. +// +// If SetHasher is not called, the Map will create a private hasher at +// the first call to Insert. +// +func (m *Map) SetHasher(hasher Hasher) { + m.hasher = hasher +} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +// +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +// +func (m *Map) At(key types.Type) interface{} { + if m != nil && m.table != nil { + for _, e := range m.table[m.hasher.Hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) { + if m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + if m.hasher.memo == nil { + m.hasher = MakeHasher() + } + hash := m.hasher.Hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +// +func (m *Map) Iterate(f func(key types.Type, value interface{})) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ interface{}) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value interface{}) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +// +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +// +func (m *Map) KeysString() string { + return m.toString(false) +} + +//////////////////////////////////////////////////////////////////////// +// Hasher + +// A Hasher maps each type to its hash value. +// For efficiency, a hasher uses memoization; thus its memory +// footprint grows monotonically over time. +// Hashers are not thread-safe. +// Hashers have reference semantics. +// Call MakeHasher to create a Hasher. +type Hasher struct { + memo map[types.Type]uint32 +} + +// MakeHasher returns a new Hasher instance. +func MakeHasher() Hasher { + return Hasher{make(map[types.Type]uint32)} +} + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + hash, ok := h.memo[t] + if !ok { + hash = h.hashFor(t) + h.memo[t] = hash + } + return hash +} + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hashFor computes the hash of t. +func (h Hasher) hashFor(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.Hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.Hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.Hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Interface: + var hash uint32 = 9103 + for i, n := 0, t.NumMethods(); i < n; i++ { + // See go/types.identicalMethods for rationale. + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) + } + return hash + + case *types.Map: + return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + + case *types.Named: + // Not safe with a copying GC; objects may move. + return uint32(reflect.ValueOf(t.Obj()).Pointer()) + + case *types.Tuple: + return h.hashTuple(t) + } + panic(t) +} + +func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + var hash uint32 = 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 3 * h.Hash(tuple.At(i).Type()) + } + return hash +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 000000000..32084610f --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +// +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := T.(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := T.Elem().(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 000000000..9849c24ce --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,52 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import "go/types" + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +// +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := T.(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go new file mode 100644 index 000000000..8be18a66b --- /dev/null +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports // import "golang.org/x/tools/imports" + +import ( + "io/ioutil" + "log" + + "golang.org/x/tools/internal/gocommand" + intimp "golang.org/x/tools/internal/imports" +) + +// Options specifies options for processing files. +type Options struct { + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Debug controls verbose logging. +var Debug = false + +// LocalPrefix is a comma-separated string of import path prefixes, which, if +// set, instructs Process to sort the import paths with the given prefixes +// into another group after 3rd-party packages. +var LocalPrefix string + +// Process formats and adjusts imports for the provided file. +// If opt is nil the defaults are used, and if src is nil the source +// is read from the filesystem. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +// To process data ``as if'' it were in filename, pass the data as a non-nil src. +func Process(filename string, src []byte, opt *Options) ([]byte, error) { + var err error + if src == nil { + src, err = ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + } + if opt == nil { + opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} + } + intopt := &intimp.Options{ + Env: &intimp.ProcessEnv{ + GocmdRunner: &gocommand.Runner{}, + }, + LocalPrefix: LocalPrefix, + AllErrors: opt.AllErrors, + Comments: opt.Comments, + FormatOnly: opt.FormatOnly, + Fragment: opt.Fragment, + TabIndent: opt.TabIndent, + TabWidth: opt.TabWidth, + } + if Debug { + intopt.Env.Logf = log.Printf + } + return intimp.Process(filename, src, intopt) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +func VendorlessPath(ipath string) string { + return intimp.VendorlessPath(ipath) +} diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go new file mode 100644 index 000000000..01f6e829f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -0,0 +1,425 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysisinternal exposes internal-only fields from go/analysis. +package analysisinternal + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/lsp/fuzzy" +) + +var ( + GetTypeErrors func(p interface{}) []types.Error + SetTypeErrors func(p interface{}, errors []types.Error) +) + +func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { + // Get the end position for the type error. + offset, end := fset.PositionFor(start, false).Offset, start + if offset >= len(src) { + return end + } + if width := bytes.IndexAny(src[offset:], " \n,():;[]+-*"); width > 0 { + end = start + token.Pos(width) + } + return end +} + +func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + under := typ + if n, ok := typ.(*types.Named); ok { + under = n.Underlying() + } + switch u := under.(type) { + case *types.Basic: + switch { + case u.Info()&types.IsNumeric != 0: + return &ast.BasicLit{Kind: token.INT, Value: "0"} + case u.Info()&types.IsBoolean != 0: + return &ast.Ident{Name: "false"} + case u.Info()&types.IsString != 0: + return &ast.BasicLit{Kind: token.STRING, Value: `""`} + default: + panic("unknown basic type") + } + case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array: + return ast.NewIdent("nil") + case *types.Struct: + texpr := TypeExpr(fset, f, pkg, typ) // typ because we want the name here. + if texpr == nil { + return nil + } + return &ast.CompositeLit{ + Type: texpr, + } + } + return nil +} + +// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of +// analysisinternal.ZeroValue) +func IsZeroValue(expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.BasicLit: + return e.Value == "0" || e.Value == `""` + case *ast.Ident: + return e.Name == "nil" || e.Name == "false" + default: + return false + } +} + +func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + switch t := typ.(type) { + case *types.Basic: + switch t.Kind() { + case types.UnsafePointer: + return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} + default: + return ast.NewIdent(t.Name()) + } + case *types.Pointer: + x := TypeExpr(fset, f, pkg, t.Elem()) + if x == nil { + return nil + } + return &ast.UnaryExpr{ + Op: token.MUL, + X: x, + } + case *types.Array: + elt := TypeExpr(fset, f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Len: &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprintf("%d", t.Len()), + }, + Elt: elt, + } + case *types.Slice: + elt := TypeExpr(fset, f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Elt: elt, + } + case *types.Map: + key := TypeExpr(fset, f, pkg, t.Key()) + value := TypeExpr(fset, f, pkg, t.Elem()) + if key == nil || value == nil { + return nil + } + return &ast.MapType{ + Key: key, + Value: value, + } + case *types.Chan: + dir := ast.ChanDir(t.Dir()) + if t.Dir() == types.SendRecv { + dir = ast.SEND | ast.RECV + } + value := TypeExpr(fset, f, pkg, t.Elem()) + if value == nil { + return nil + } + return &ast.ChanType{ + Dir: dir, + Value: value, + } + case *types.Signature: + var params []*ast.Field + for i := 0; i < t.Params().Len(); i++ { + p := TypeExpr(fset, f, pkg, t.Params().At(i).Type()) + if p == nil { + return nil + } + params = append(params, &ast.Field{ + Type: p, + Names: []*ast.Ident{ + { + Name: t.Params().At(i).Name(), + }, + }, + }) + } + var returns []*ast.Field + for i := 0; i < t.Results().Len(); i++ { + r := TypeExpr(fset, f, pkg, t.Results().At(i).Type()) + if r == nil { + return nil + } + returns = append(returns, &ast.Field{ + Type: r, + }) + } + return &ast.FuncType{ + Params: &ast.FieldList{ + List: params, + }, + Results: &ast.FieldList{ + List: returns, + }, + } + case *types.Named: + if t.Obj().Pkg() == nil { + return ast.NewIdent(t.Obj().Name()) + } + if t.Obj().Pkg() == pkg { + return ast.NewIdent(t.Obj().Name()) + } + pkgName := t.Obj().Pkg().Name() + // If the file already imports the package under another name, use that. + for _, group := range astutil.Imports(fset, f) { + for _, cand := range group { + if strings.Trim(cand.Path.Value, `"`) == t.Obj().Pkg().Path() { + if cand.Name != nil && cand.Name.Name != "" { + pkgName = cand.Name.Name + } + } + } + } + if pkgName == "." { + return ast.NewIdent(t.Obj().Name()) + } + return &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: ast.NewIdent(t.Obj().Name()), + } + case *types.Struct: + return ast.NewIdent(t.String()) + case *types.Interface: + return ast.NewIdent(t.String()) + default: + return nil + } +} + +type TypeErrorPass string + +const ( + NoNewVars TypeErrorPass = "nonewvars" + NoResultValues TypeErrorPass = "noresultvalues" + UndeclaredName TypeErrorPass = "undeclaredname" +) + +// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. +// Some examples: +// +// Basic Example: +// z := 1 +// y := z + x +// If x is undeclared, then this function would return `y := z + x`, so that we +// can insert `x := ` on the line before `y := z + x`. +// +// If stmt example: +// if z == 1 { +// } else if z == y {} +// If y is undeclared, then this function would return `if z == 1 {`, because we cannot +// insert a statement between an if and an else if statement. As a result, we need to find +// the top of the if chain to insert `y := ` before. +func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { + enclosingIndex := -1 + for i, p := range path { + if _, ok := p.(ast.Stmt); ok { + enclosingIndex = i + break + } + } + if enclosingIndex == -1 { + return nil + } + enclosingStmt := path[enclosingIndex] + switch enclosingStmt.(type) { + case *ast.IfStmt: + // The enclosingStmt is inside of the if declaration, + // We need to check if we are in an else-if stmt and + // get the base if statement. + return baseIfStmt(path, enclosingIndex) + case *ast.CaseClause: + // Get the enclosing switch stmt if the enclosingStmt is + // inside of the case statement. + for i := enclosingIndex + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.SwitchStmt); ok { + return node + } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok { + return node + } + } + } + if len(path) <= enclosingIndex+1 { + return enclosingStmt.(ast.Stmt) + } + // Check if the enclosing statement is inside another node. + switch expr := path[enclosingIndex+1].(type) { + case *ast.IfStmt: + // Get the base if statement. + return baseIfStmt(path, enclosingIndex+1) + case *ast.ForStmt: + if expr.Init == enclosingStmt || expr.Post == enclosingStmt { + return expr + } + } + return enclosingStmt.(ast.Stmt) +} + +// baseIfStmt walks up the if/else-if chain until we get to +// the top of the current if chain. +func baseIfStmt(path []ast.Node, index int) ast.Stmt { + stmt := path[index] + for i := index + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt { + stmt = node + continue + } + break + } + return stmt.(ast.Stmt) +} + +// WalkASTWithParent walks the AST rooted at n. The semantics are +// similar to ast.Inspect except it does not call f(nil). +func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) { + var ancestors []ast.Node + ast.Inspect(n, func(n ast.Node) (recurse bool) { + if n == nil { + ancestors = ancestors[:len(ancestors)-1] + return false + } + + var parent ast.Node + if len(ancestors) > 0 { + parent = ancestors[len(ancestors)-1] + } + ancestors = append(ancestors, n) + return f(n, parent) + }) +} + +// FindMatchingIdents finds all identifiers in 'node' that match any of the given types. +// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within +// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that +// is unrecognized. +func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]*ast.Ident { + matches := map[types.Type][]*ast.Ident{} + // Initialize matches to contain the variable types we are searching for. + for _, typ := range typs { + if typ == nil { + continue + } + matches[typ] = []*ast.Ident{} + } + seen := map[types.Object]struct{}{} + ast.Inspect(node, func(n ast.Node) bool { + if n == nil { + return false + } + // Prevent circular definitions. If 'pos' is within an assignment statement, do not + // allow any identifiers in that assignment statement to be selected. Otherwise, + // we could do the following, where 'x' satisfies the type of 'f0': + // + // x := fakeStruct{f0: x} + // + assignment, ok := n.(*ast.AssignStmt) + if ok && pos > assignment.Pos() && pos <= assignment.End() { + return false + } + if n.End() > pos { + return n.Pos() <= pos + } + ident, ok := n.(*ast.Ident) + if !ok || ident.Name == "_" { + return true + } + obj := info.Defs[ident] + if obj == nil || obj.Type() == nil { + return true + } + if _, ok := obj.(*types.TypeName); ok { + return true + } + // Prevent duplicates in matches' values. + if _, ok = seen[obj]; ok { + return true + } + seen[obj] = struct{}{} + // Find the scope for the given position. Then, check whether the object + // exists within the scope. + innerScope := pkg.Scope().Innermost(pos) + if innerScope == nil { + return true + } + _, foundObj := innerScope.LookupParent(ident.Name, pos) + if foundObj != obj { + return true + } + // The object must match one of the types that we are searching for. + if idents, ok := matches[obj.Type()]; ok { + matches[obj.Type()] = append(idents, ast.NewIdent(ident.Name)) + } + // If the object type does not exactly match any of the target types, greedily + // find the first target type that the object type can satisfy. + for typ := range matches { + if obj.Type() == typ { + continue + } + if equivalentTypes(obj.Type(), typ) { + matches[typ] = append(matches[typ], ast.NewIdent(ident.Name)) + } + } + return true + }) + return matches +} + +func equivalentTypes(want, got types.Type) bool { + if want == got || types.Identical(want, got) { + return true + } + // Code segment to help check for untyped equality from (golang/go#32146). + if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 { + if lhs, ok := got.Underlying().(*types.Basic); ok { + return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType + } + } + return types.AssignableTo(want, got) +} + +// FindBestMatch employs fuzzy matching to evaluate the similarity of each given identifier to the +// given pattern. We return the identifier whose name is most similar to the pattern. +func FindBestMatch(pattern string, idents []*ast.Ident) ast.Expr { + fuzz := fuzzy.NewMatcher(pattern) + var bestFuzz ast.Expr + highScore := float32(0) // minimum score is 0 (no match) + for _, ident := range idents { + // TODO: Improve scoring algorithm. + score := fuzz.Score(ident.Name) + if score > highScore { + highScore = score + bestFuzz = ident + } else if score == 0 { + // Order matters in the fuzzy matching algorithm. If we find no match + // when matching the target to the identifier, try matching the identifier + // to the target. + revFuzz := fuzzy.NewMatcher(ident.Name) + revScore := revFuzz.Score(pattern) + if revScore > highScore { + highScore = revScore + bestFuzz = ident + } + } + } + return bestFuzz +} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go new file mode 100644 index 000000000..a6cf0e64a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package core provides support for event based telemetry. +package core + +import ( + "fmt" + "time" + + "golang.org/x/tools/internal/event/label" +) + +// Event holds the information about an event of note that occurred. +type Event struct { + at time.Time + + // As events are often on the stack, storing the first few labels directly + // in the event can avoid an allocation at all for the very common cases of + // simple events. + // The length needs to be large enough to cope with the majority of events + // but no so large as to cause undue stack pressure. + // A log message with two values will use 3 labels (one for each value and + // one for the message itself). + + static [3]label.Label // inline storage for the first few labels + dynamic []label.Label // dynamically sized storage for remaining labels +} + +// eventLabelMap implements label.Map for a the labels of an Event. +type eventLabelMap struct { + event Event +} + +func (ev Event) At() time.Time { return ev.at } + +func (ev Event) Format(f fmt.State, r rune) { + if !ev.at.IsZero() { + fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) + } + for index := 0; ev.Valid(index); index++ { + if l := ev.Label(index); l.Valid() { + fmt.Fprintf(f, "\n\t%v", l) + } + } +} + +func (ev Event) Valid(index int) bool { + return index >= 0 && index < len(ev.static)+len(ev.dynamic) +} + +func (ev Event) Label(index int) label.Label { + if index < len(ev.static) { + return ev.static[index] + } + return ev.dynamic[index-len(ev.static)] +} + +func (ev Event) Find(key label.Key) label.Label { + for _, l := range ev.static { + if l.Key() == key { + return l + } + } + for _, l := range ev.dynamic { + if l.Key() == key { + return l + } + } + return label.Label{} +} + +func MakeEvent(static [3]label.Label, labels []label.Label) Event { + return Event{ + static: static, + dynamic: labels, + } +} + +// CloneEvent event returns a copy of the event with the time adjusted to at. +func CloneEvent(ev Event, at time.Time) Event { + ev.at = at + return ev +} diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go new file mode 100644 index 000000000..05f3a9a57 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/export.go @@ -0,0 +1,70 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "sync/atomic" + "time" + "unsafe" + + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, Event, label.Map) context.Context + +var ( + exporter unsafe.Pointer +) + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + p := unsafe.Pointer(&e) + if e == nil { + // &e is always valid, and so p is always valid, but for the early abort + // of ProcessEvent to be efficient it needs to make the nil check on the + // pointer without having to dereference it, so we make the nil function + // also a nil pointer + p = nil + } + atomic.StorePointer(&exporter, p) +} + +// deliver is called to deliver an event to the supplied exporter. +// it will fill in the time. +func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { + // add the current time to the event + ev.at = time.Now() + // hand the event off to the current exporter + return exporter(ctx, ev, ev) +} + +// Export is called to deliver an event to the global exporter if set. +func Export(ctx context.Context, ev Event) context.Context { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx + } + return deliver(ctx, *exporterPtr, ev) +} + +// ExportPair is called to deliver a start event to the supplied exporter. +// It also returns a function that will deliver the end event to the same +// exporter. +// It will fill in the time. +func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx, func() {} + } + ctx = deliver(ctx, *exporterPtr, begin) + return ctx, func() { deliver(ctx, *exporterPtr, end) } +} diff --git a/vendor/golang.org/x/tools/internal/event/core/fast.go b/vendor/golang.org/x/tools/internal/event/core/fast.go new file mode 100644 index 000000000..06c1d4615 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/fast.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Log1 takes a message and one label delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log1(ctx context.Context, message string, t1 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + }, nil)) +} + +// Log2 takes a message and two labels and delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + t2, + }, nil)) +} + +// Metric1 sends a label event to the exporter with the supplied labels. +func Metric1(ctx context.Context, t1 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + }, nil)) +} + +// Metric2 sends a label event to the exporter with the supplied labels. +func Metric2(ctx context.Context, t1, t2 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + t2, + }, nil)) +} + +// Start1 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// Start2 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + t2, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} diff --git a/vendor/golang.org/x/tools/internal/event/doc.go b/vendor/golang.org/x/tools/internal/event/doc.go new file mode 100644 index 000000000..5dc6e6bab --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package event provides a set of packages that cover the main +// concepts of telemetry in an implementation agnostic way. +package event diff --git a/vendor/golang.org/x/tools/internal/event/event.go b/vendor/golang.org/x/tools/internal/event/event.go new file mode 100644 index 000000000..4d55e577d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/event.go @@ -0,0 +1,127 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package event + +import ( + "context" + + "golang.org/x/tools/internal/event/core" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, core.Event, label.Map) context.Context + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + core.SetExporter(core.Exporter(e)) +} + +// Log takes a message and a label list and combines them into a single event +// before delivering them to the exporter. +func Log(ctx context.Context, message string, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + }, labels)) +} + +// IsLog returns true if the event was built by the Log function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLog(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg +} + +// Error takes a message and a label list and combines them into a single event +// before delivering them to the exporter. It captures the error in the +// delivered event. +func Error(ctx context.Context, message string, err error, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + keys.Err.Of(err), + }, labels)) +} + +// IsError returns true if the event was built by the Error function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsError(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg && + ev.Label(1).Key() == keys.Err +} + +// Metric sends a label event to the exporter with the supplied labels. +func Metric(ctx context.Context, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Metric.New(), + }, labels)) +} + +// IsMetric returns true if the event was built by the Metric function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsMetric(ev core.Event) bool { + return ev.Label(0).Key() == keys.Metric +} + +// Label sends a label event to the exporter with the supplied labels. +func Label(ctx context.Context, labels ...label.Label) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Label.New(), + }, labels)) +} + +// IsLabel returns true if the event was built by the Label function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLabel(ev core.Event) bool { + return ev.Label(0).Key() == keys.Label +} + +// Start sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) { + return core.ExportPair(ctx, + core.MakeEvent([3]label.Label{ + keys.Start.Of(name), + }, labels), + core.MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// IsStart returns true if the event was built by the Start function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsStart(ev core.Event) bool { + return ev.Label(0).Key() == keys.Start +} + +// IsEnd returns true if the event was built by the End function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsEnd(ev core.Event) bool { + return ev.Label(0).Key() == keys.End +} + +// Detach returns a context without an associated span. +// This allows the creation of spans that are not children of the current span. +func Detach(ctx context.Context) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Detach.New(), + }, nil)) +} + +// IsDetach returns true if the event was built by the Detach function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsDetach(ev core.Event) bool { + return ev.Label(0).Key() == keys.Detach +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go new file mode 100644 index 000000000..a02206e30 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -0,0 +1,564 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "fmt" + "io" + "math" + "strconv" + + "golang.org/x/tools/internal/event/label" +) + +// Value represents a key for untyped values. +type Value struct { + name string + description string +} + +// New creates a new Key for untyped values. +func New(name, description string) *Value { + return &Value{name: name, description: description} +} + +func (k *Value) Name() string { return k.name } +func (k *Value) Description() string { return k.description } + +func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { + fmt.Fprint(w, k.From(l)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Value) Get(lm label.Map) interface{} { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } + +// Of creates a new Label with this key and the supplied value. +func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } + +// Tag represents a key for tagging labels that have no value. +// These are used when the existence of the label is the entire information it +// carries, such as marking events to be of a specific kind, or from a specific +// package. +type Tag struct { + name string + description string +} + +// NewTag creates a new Key for tagging labels. +func NewTag(name, description string) *Tag { + return &Tag{name: name, description: description} +} + +func (k *Tag) Name() string { return k.name } +func (k *Tag) Description() string { return k.description } + +func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} + +// New creates a new Label with this key. +func (k *Tag) New() label.Label { return label.OfValue(k, nil) } + +// Int represents a key +type Int struct { + name string + description string +} + +// NewInt creates a new Key for int values. +func NewInt(name, description string) *Int { + return &Int{name: name, description: description} +} + +func (k *Int) Name() string { return k.name } +func (k *Int) Description() string { return k.description } + +func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int) Get(lm label.Map) int { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } + +// Int8 represents a key +type Int8 struct { + name string + description string +} + +// NewInt8 creates a new Key for int8 values. +func NewInt8(name, description string) *Int8 { + return &Int8{name: name, description: description} +} + +func (k *Int8) Name() string { return k.name } +func (k *Int8) Description() string { return k.description } + +func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int8) Get(lm label.Map) int8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } + +// Int16 represents a key +type Int16 struct { + name string + description string +} + +// NewInt16 creates a new Key for int16 values. +func NewInt16(name, description string) *Int16 { + return &Int16{name: name, description: description} +} + +func (k *Int16) Name() string { return k.name } +func (k *Int16) Description() string { return k.description } + +func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int16) Get(lm label.Map) int16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } + +// Int32 represents a key +type Int32 struct { + name string + description string +} + +// NewInt32 creates a new Key for int32 values. +func NewInt32(name, description string) *Int32 { + return &Int32{name: name, description: description} +} + +func (k *Int32) Name() string { return k.name } +func (k *Int32) Description() string { return k.description } + +func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int32) Get(lm label.Map) int32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } + +// Int64 represents a key +type Int64 struct { + name string + description string +} + +// NewInt64 creates a new Key for int64 values. +func NewInt64(name, description string) *Int64 { + return &Int64{name: name, description: description} +} + +func (k *Int64) Name() string { return k.name } +func (k *Int64) Description() string { return k.description } + +func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int64) Get(lm label.Map) int64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } + +// UInt represents a key +type UInt struct { + name string + description string +} + +// NewUInt creates a new Key for uint values. +func NewUInt(name, description string) *UInt { + return &UInt{name: name, description: description} +} + +func (k *UInt) Name() string { return k.name } +func (k *UInt) Description() string { return k.description } + +func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt) Get(lm label.Map) uint { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } + +// UInt8 represents a key +type UInt8 struct { + name string + description string +} + +// NewUInt8 creates a new Key for uint8 values. +func NewUInt8(name, description string) *UInt8 { + return &UInt8{name: name, description: description} +} + +func (k *UInt8) Name() string { return k.name } +func (k *UInt8) Description() string { return k.description } + +func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt8) Get(lm label.Map) uint8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } + +// UInt16 represents a key +type UInt16 struct { + name string + description string +} + +// NewUInt16 creates a new Key for uint16 values. +func NewUInt16(name, description string) *UInt16 { + return &UInt16{name: name, description: description} +} + +func (k *UInt16) Name() string { return k.name } +func (k *UInt16) Description() string { return k.description } + +func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt16) Get(lm label.Map) uint16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } + +// UInt32 represents a key +type UInt32 struct { + name string + description string +} + +// NewUInt32 creates a new Key for uint32 values. +func NewUInt32(name, description string) *UInt32 { + return &UInt32{name: name, description: description} +} + +func (k *UInt32) Name() string { return k.name } +func (k *UInt32) Description() string { return k.description } + +func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt32) Get(lm label.Map) uint32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } + +// UInt64 represents a key +type UInt64 struct { + name string + description string +} + +// NewUInt64 creates a new Key for uint64 values. +func NewUInt64(name, description string) *UInt64 { + return &UInt64{name: name, description: description} +} + +func (k *UInt64) Name() string { return k.name } +func (k *UInt64) Description() string { return k.description } + +func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt64) Get(lm label.Map) uint64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } + +// Float32 represents a key +type Float32 struct { + name string + description string +} + +// NewFloat32 creates a new Key for float32 values. +func NewFloat32(name, description string) *Float32 { + return &Float32{name: name, description: description} +} + +func (k *Float32) Name() string { return k.name } +func (k *Float32) Description() string { return k.description } + +func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float32) Of(v float32) label.Label { + return label.Of64(k, uint64(math.Float32bits(v))) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float32) Get(lm label.Map) float32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float32) From(t label.Label) float32 { + return math.Float32frombits(uint32(t.Unpack64())) +} + +// Float64 represents a key +type Float64 struct { + name string + description string +} + +// NewFloat64 creates a new Key for int64 values. +func NewFloat64(name, description string) *Float64 { + return &Float64{name: name, description: description} +} + +func (k *Float64) Name() string { return k.name } +func (k *Float64) Description() string { return k.description } + +func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float64) Of(v float64) label.Label { + return label.Of64(k, math.Float64bits(v)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float64) Get(lm label.Map) float64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float64) From(t label.Label) float64 { + return math.Float64frombits(t.Unpack64()) +} + +// String represents a key +type String struct { + name string + description string +} + +// NewString creates a new Key for int64 values. +func NewString(name, description string) *String { + return &String{name: name, description: description} +} + +func (k *String) Name() string { return k.name } +func (k *String) Description() string { return k.description } + +func (k *String) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendQuote(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *String) Of(v string) label.Label { return label.OfString(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *String) Get(lm label.Map) string { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return "" +} + +// From can be used to get a value from a Label. +func (k *String) From(t label.Label) string { return t.UnpackString() } + +// Boolean represents a key +type Boolean struct { + name string + description string +} + +// NewBoolean creates a new Key for bool values. +func NewBoolean(name, description string) *Boolean { + return &Boolean{name: name, description: description} +} + +func (k *Boolean) Name() string { return k.name } +func (k *Boolean) Description() string { return k.description } + +func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendBool(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Boolean) Of(v bool) label.Label { + if v { + return label.Of64(k, 1) + } + return label.Of64(k, 0) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Boolean) Get(lm label.Map) bool { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return false +} + +// From can be used to get a value from a Label. +func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } + +// Error represents a key +type Error struct { + name string + description string +} + +// NewError creates a new Key for int64 values. +func NewError(name, description string) *Error { + return &Error{name: name, description: description} +} + +func (k *Error) Name() string { return k.name } +func (k *Error) Description() string { return k.description } + +func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { + io.WriteString(w, k.From(l).Error()) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Error) Get(lm label.Map) error { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Error) From(t label.Label) error { + err, _ := t.UnpackValue().(error) + return err +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/standard.go b/vendor/golang.org/x/tools/internal/event/keys/standard.go new file mode 100644 index 000000000..7e9586659 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/standard.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +var ( + // Msg is a key used to add message strings to label lists. + Msg = NewString("message", "a readable message") + // Label is a key used to indicate an event adds labels to the context. + Label = NewTag("label", "a label context marker") + // Start is used for things like traces that have a name. + Start = NewString("start", "span start") + // Metric is a key used to indicate an event records metrics. + End = NewTag("end", "a span end marker") + // Metric is a key used to indicate an event records metrics. + Detach = NewTag("detach", "a span detach marker") + // Err is a key used to add error values to label lists. + Err = NewError("error", "an error that occurred") + // Metric is a key used to indicate an event records metrics. + Metric = NewTag("metric", "a metric event marker") +) diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go new file mode 100644 index 000000000..0f526e1f9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -0,0 +1,215 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package label + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +// Key is used as the identity of a Label. +// Keys are intended to be compared by pointer only, the name should be unique +// for communicating with external systems, but it is not required or enforced. +type Key interface { + // Name returns the key name. + Name() string + // Description returns a string that can be used to describe the value. + Description() string + + // Format is used in formatting to append the value of the label to the + // supplied buffer. + // The formatter may use the supplied buf as a scratch area to avoid + // allocations. + Format(w io.Writer, buf []byte, l Label) +} + +// Label holds a key and value pair. +// It is normally used when passing around lists of labels. +type Label struct { + key Key + packed uint64 + untyped interface{} +} + +// Map is the interface to a collection of Labels indexed by key. +type Map interface { + // Find returns the label that matches the supplied key. + Find(key Key) Label +} + +// List is the interface to something that provides an iterable +// list of labels. +// Iteration should start from 0 and continue until Valid returns false. +type List interface { + // Valid returns true if the index is within range for the list. + // It does not imply the label at that index will itself be valid. + Valid(index int) bool + // Label returns the label at the given index. + Label(index int) Label +} + +// list implements LabelList for a list of Labels. +type list struct { + labels []Label +} + +// filter wraps a LabelList filtering out specific labels. +type filter struct { + keys []Key + underlying List +} + +// listMap implements LabelMap for a simple list of labels. +type listMap struct { + labels []Label +} + +// mapChain implements LabelMap for a list of underlying LabelMap. +type mapChain struct { + maps []Map +} + +// OfValue creates a new label from the key and value. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } + +// UnpackValue assumes the label was built using LabelOfValue and returns the value +// that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackValue() interface{} { return t.untyped } + +// Of64 creates a new label from a key and a uint64. This is often +// used for non uint64 values that can be packed into a uint64. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} } + +// Unpack64 assumes the label was built using LabelOf64 and returns the value that +// was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) Unpack64() uint64 { return t.packed } + +type stringptr unsafe.Pointer + +// OfString creates a new label from a key and a string. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfString(k Key, v string) Label { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + return Label{ + key: k, + packed: uint64(hdr.Len), + untyped: stringptr(hdr.Data), + } +} + +// UnpackString assumes the label was built using LabelOfString and returns the +// value that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackString() string { + var v string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + hdr.Data = uintptr(t.untyped.(stringptr)) + hdr.Len = int(t.packed) + return v +} + +// Valid returns true if the Label is a valid one (it has a key). +func (t Label) Valid() bool { return t.key != nil } + +// Key returns the key of this Label. +func (t Label) Key() Key { return t.key } + +// Format is used for debug printing of labels. +func (t Label) Format(f fmt.State, r rune) { + if !t.Valid() { + io.WriteString(f, `nil`) + return + } + io.WriteString(f, t.Key().Name()) + io.WriteString(f, "=") + var buf [128]byte + t.Key().Format(f, buf[:0], t) +} + +func (l *list) Valid(index int) bool { + return index >= 0 && index < len(l.labels) +} + +func (l *list) Label(index int) Label { + return l.labels[index] +} + +func (f *filter) Valid(index int) bool { + return f.underlying.Valid(index) +} + +func (f *filter) Label(index int) Label { + l := f.underlying.Label(index) + for _, f := range f.keys { + if l.Key() == f { + return Label{} + } + } + return l +} + +func (lm listMap) Find(key Key) Label { + for _, l := range lm.labels { + if l.Key() == key { + return l + } + } + return Label{} +} + +func (c mapChain) Find(key Key) Label { + for _, src := range c.maps { + l := src.Find(key) + if l.Valid() { + return l + } + } + return Label{} +} + +var emptyList = &list{} + +func NewList(labels ...Label) List { + if len(labels) == 0 { + return emptyList + } + return &list{labels: labels} +} + +func Filter(l List, keys ...Key) List { + if len(keys) == 0 { + return l + } + return &filter{keys: keys, underlying: l} +} + +func NewMap(labels ...Label) Map { + return listMap{labels: labels} +} + +func MergeMaps(srcs ...Map) Map { + var nonNil []Map + for _, src := range srcs { + if src != nil { + nonNil = append(nonNil, src) + } + } + if len(nonNil) == 1 { + return nonNil[0] + } + return mapChain{maps: nonNil} +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go new file mode 100644 index 000000000..9887f7e7a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -0,0 +1,196 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fastwalk provides a faster version of filepath.Walk for file system +// scanning tools. +package fastwalk + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "sync" +) + +// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the +// symlink named in the call may be traversed. +var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") + +// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the +// callback should not be called for any other files in the current directory. +// Child directories will still be traversed. +var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory") + +// Walk is a faster implementation of filepath.Walk. +// +// filepath.Walk's design necessarily calls os.Lstat on each file, +// even if the caller needs less info. +// Many tools need only the type of each file. +// On some platforms, this information is provided directly by the readdir +// system call, avoiding the need to stat each file individually. +// fastwalk_unix.go contains a fork of the syscall routines. +// +// See golang.org/issue/16399 +// +// Walk walks the file tree rooted at root, calling walkFn for +// each file or directory in the tree, including root. +// +// If fastWalk returns filepath.SkipDir, the directory is skipped. +// +// Unlike filepath.Walk: +// * file stat calls must be done by the user. +// The only provided metadata is the file type, which does not include +// any permission bits. +// * multiple goroutines stat the filesystem concurrently. The provided +// walkFn must be safe for concurrent use. +// * fastWalk can follow symlinks if walkFn returns the TraverseLink +// sentinel error. It is the walkFn's responsibility to prevent +// fastWalk from going into symlink cycles. +func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { + // TODO(bradfitz): make numWorkers configurable? We used a + // minimum of 4 to give the kernel more info about multiple + // things we want, in hopes its I/O scheduling can take + // advantage of that. Hopefully most are in cache. Maybe 4 is + // even too low of a minimum. Profile more. + numWorkers := 4 + if n := runtime.NumCPU(); n > numWorkers { + numWorkers = n + } + + // Make sure to wait for all workers to finish, otherwise + // walkFn could still be called after returning. This Wait call + // runs after close(e.donec) below. + var wg sync.WaitGroup + defer wg.Wait() + + w := &walker{ + fn: walkFn, + enqueuec: make(chan walkItem, numWorkers), // buffered for performance + workc: make(chan walkItem, numWorkers), // buffered for performance + donec: make(chan struct{}), + + // buffered for correctness & not leaking goroutines: + resc: make(chan error, numWorkers), + } + defer close(w.donec) + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go w.doWork(&wg) + } + todo := []walkItem{{dir: root}} + out := 0 + for { + workc := w.workc + var workItem walkItem + if len(todo) == 0 { + workc = nil + } else { + workItem = todo[len(todo)-1] + } + select { + case workc <- workItem: + todo = todo[:len(todo)-1] + out++ + case it := <-w.enqueuec: + todo = append(todo, it) + case err := <-w.resc: + out-- + if err != nil { + return err + } + if out == 0 && len(todo) == 0 { + // It's safe to quit here, as long as the buffered + // enqueue channel isn't also readable, which might + // happen if the worker sends both another unit of + // work and its result before the other select was + // scheduled and both w.resc and w.enqueuec were + // readable. + select { + case it := <-w.enqueuec: + todo = append(todo, it) + default: + return nil + } + } + } + } +} + +// doWork reads directories as instructed (via workc) and runs the +// user's callback function. +func (w *walker) doWork(wg *sync.WaitGroup) { + defer wg.Done() + for { + select { + case <-w.donec: + return + case it := <-w.workc: + select { + case <-w.donec: + return + case w.resc <- w.walk(it.dir, !it.callbackDone): + } + } + } +} + +type walker struct { + fn func(path string, typ os.FileMode) error + + donec chan struct{} // closed on fastWalk's return + workc chan walkItem // to workers + enqueuec chan walkItem // from workers + resc chan error // from workers +} + +type walkItem struct { + dir string + callbackDone bool // callback already called; don't do it again +} + +func (w *walker) enqueue(it walkItem) { + select { + case w.enqueuec <- it: + case <-w.donec: + } +} + +func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { + joined := dirName + string(os.PathSeparator) + baseName + if typ == os.ModeDir { + w.enqueue(walkItem{dir: joined}) + return nil + } + + err := w.fn(joined, typ) + if typ == os.ModeSymlink { + if err == ErrTraverseLink { + // Set callbackDone so we don't call it twice for both the + // symlink-as-symlink and the symlink-as-directory later: + w.enqueue(walkItem{dir: joined, callbackDone: true}) + return nil + } + if err == filepath.SkipDir { + // Permit SkipDir on symlinks too. + return nil + } + } + return err +} + +func (w *walker) walk(root string, runUserCallback bool) error { + if runUserCallback { + err := w.fn(root, os.ModeDir) + if err == filepath.SkipDir { + return nil + } + if err != nil { + return err + } + } + + return readDir(root, w.onDirEnt) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go new file mode 100644 index 000000000..d58595dbd --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd || openbsd || netbsd +// +build freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Fileno) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go new file mode 100644 index 000000000..ea02b9ebf --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (linux || darwin) && !appengine +// +build linux darwin +// +build !appengine + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Ino) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go new file mode 100644 index 000000000..d5c9c321e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || freebsd || openbsd || netbsd +// +build darwin freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntNamlen(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Namlen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go new file mode 100644 index 000000000..c82e57df8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !appengine +// +build linux,!appengine + +package fastwalk + +import ( + "bytes" + "syscall" + "unsafe" +) + +func direntNamlen(dirent *syscall.Dirent) uint64 { + const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + const nameBufLen = uint16(len(nameBuf)) + limit := dirent.Reclen - fixedHdr + if limit > nameBufLen { + limit = nameBufLen + } + nameLen := bytes.IndexByte(nameBuf[:limit], 0) + if nameLen < 0 { + panic("failed to find terminating 0 byte in dirent") + } + return uint64(nameLen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go new file mode 100644 index 000000000..085d31160 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -0,0 +1,38 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build appengine || (!linux && !darwin && !freebsd && !openbsd && !netbsd) +// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd + +package fastwalk + +import ( + "io/ioutil" + "os" +) + +// readDir calls fn for each directory entry in dirName. +// It does not descend into directories or follow symlinks. +// If fn returns a non-nil error, readDir returns with that error +// immediately. +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fis, err := ioutil.ReadDir(dirName) + if err != nil { + return err + } + skipFiles := false + for _, fi := range fis { + if fi.Mode().IsRegular() && skipFiles { + continue + } + if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { + if err == ErrSkipFiles { + skipFiles = true + continue + } + return err + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go new file mode 100644 index 000000000..58bd87841 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -0,0 +1,153 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (linux || darwin || freebsd || openbsd || netbsd) && !appengine +// +build linux darwin freebsd openbsd netbsd +// +build !appengine + +package fastwalk + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const blockSize = 8 << 10 + +// unknownFileMode is a sentinel (and bogus) os.FileMode +// value used to represent a syscall.DT_UNKNOWN Dirent.Type. +const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := open(dirName, 0, 0) + if err != nil { + return &os.PathError{Op: "open", Path: dirName, Err: err} + } + defer syscall.Close(fd) + + // The buffer must be at least a block long. + buf := make([]byte, blockSize) // stack-allocated; doesn't escape + bufp := 0 // starting read position in buf + nbuf := 0 // end valid data in buf + skipFiles := false + for { + if bufp >= nbuf { + bufp = 0 + nbuf, err = readDirent(fd, buf) + if err != nil { + return os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + return nil + } + } + consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) + bufp += consumed + if name == "" || name == "." || name == ".." { + continue + } + // Fallback for filesystems (like old XFS) that don't + // support Dirent.Type and have DT_UNKNOWN (0) there + // instead. + if typ == unknownFileMode { + fi, err := os.Lstat(dirName + "/" + name) + if err != nil { + // It got deleted in the meantime. + if os.IsNotExist(err) { + continue + } + return err + } + typ = fi.Mode() & os.ModeType + } + if skipFiles && typ.IsRegular() { + continue + } + if err := fn(dirName, name, typ); err != nil { + if err == ErrSkipFiles { + skipFiles = true + continue + } + return err + } + } +} + +func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { + // golang.org/issue/37269 + dirent := &syscall.Dirent{} + copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf) + if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { + panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) + } + if len(buf) < int(dirent.Reclen) { + panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) + } + consumed = int(dirent.Reclen) + if direntInode(dirent) == 0 { // File absent in directory. + return + } + switch dirent.Type { + case syscall.DT_REG: + typ = 0 + case syscall.DT_DIR: + typ = os.ModeDir + case syscall.DT_LNK: + typ = os.ModeSymlink + case syscall.DT_BLK: + typ = os.ModeDevice + case syscall.DT_FIFO: + typ = os.ModeNamedPipe + case syscall.DT_SOCK: + typ = os.ModeSocket + case syscall.DT_UNKNOWN: + typ = unknownFileMode + default: + // Skip weird things. + // It's probably a DT_WHT (http://lwn.net/Articles/325369/) + // or something. Revisit if/when this package is moved outside + // of goimports. goimports only cares about regular files, + // symlinks, and directories. + return + } + + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + nameLen := direntNamlen(dirent) + + // Special cases for common things: + if nameLen == 1 && nameBuf[0] == '.' { + name = "." + } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { + name = ".." + } else { + name = string(nameBuf[:nameLen]) + } + return +} + +// According to https://golang.org/doc/go1.14#runtime +// A consequence of the implementation of preemption is that on Unix systems, including Linux and macOS +// systems, programs built with Go 1.14 will receive more signals than programs built with earlier releases. +// +// This causes syscall.Open and syscall.ReadDirent sometimes fail with EINTR errors. +// We need to retry in this case. +func open(path string, mode int, perm uint32) (fd int, err error) { + for { + fd, err := syscall.Open(path, mode, perm) + if err != syscall.EINTR { + return fd, err + } + } +} + +func readDirent(fd int, buf []byte) (n int, err error) { + for { + nbuf, err := syscall.ReadDirent(fd, buf) + if err != syscall.EINTR { + return nbuf, err + } + } +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go new file mode 100644 index 000000000..8659a0c5d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -0,0 +1,273 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocommand is a helper for calling the go command. +package gocommand + +import ( + "bytes" + "context" + "fmt" + exec "golang.org/x/sys/execabs" + "io" + "os" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/tools/internal/event" +) + +// An Runner will run go command invocations and serialize +// them if it sees a concurrency error. +type Runner struct { + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) +} + +// 1.13: go: updates to go.mod needed, but contents have changed +// 1.14: go: updating go.mod: existing contents have changed since last read +var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) + +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. +func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) + return stdout, friendly +} + +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over +// go.mod changes. +func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { + return stdout, stderr, friendlyErr, err + } + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, nil, ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for i := 0; i < maxInFlight; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() + } + } + + return inv.runWithFriendlyError(ctx, stdout, stderr) +} + +// An Invocation represents a call to the go command. +type Invocation struct { + Verb string + Args []string + BuildFlags []string + ModFlag string + ModFile string + Overlay string + // If CleanEnv is set, the invocation will run only with the environment + // in Env, not starting with os.Environ. + CleanEnv bool + Env []string + WorkingDir string + Logf func(format string, args ...interface{}) +} + +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) + if rawError != nil { + friendlyError = rawError + // Check for 'go' executable not being found. + if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + friendlyError = fmt.Errorf("go command required, not found: %v", ee) + } + if ctx.Err() != nil { + friendlyError = ctx.Err() + } + friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr) + } + return +} + +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { + log := i.Logf + if log == nil { + log = func(string, ...interface{}) {} + } + + goArgs := []string{i.Verb} + + appendModFile := func() { + if i.ModFile != "" { + goArgs = append(goArgs, "-modfile="+i.ModFile) + } + } + appendModFlag := func() { + if i.ModFlag != "" { + goArgs = append(goArgs, "-mod="+i.ModFlag) + } + } + appendOverlayFlag := func() { + if i.Overlay != "" { + goArgs = append(goArgs, "-overlay="+i.Overlay) + } + } + + switch i.Verb { + case "env", "version": + goArgs = append(goArgs, i.Args...) + case "mod": + // mod needs the sub-verb before flags. + goArgs = append(goArgs, i.Args[0]) + appendModFile() + goArgs = append(goArgs, i.Args[1:]...) + case "get": + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + goArgs = append(goArgs, i.Args...) + + default: // notably list and build. + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + appendModFlag() + appendOverlayFlag() + goArgs = append(goArgs, i.Args...) + } + cmd := exec.Command("go", goArgs...) + cmd.Stdout = stdout + cmd.Stderr = stderr + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + if !i.CleanEnv { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, i.Env...) + if i.WorkingDir != "" { + cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) + cmd.Dir = i.WorkingDir + } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + + return runCmdContext(ctx, cmd) +} + +// runCmdContext is like exec.CommandContext except it sends os.Interrupt +// before os.Kill. +func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + return err + } + resChan := make(chan error, 1) + go func() { + resChan <- cmd.Wait() + }() + + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } + // Cancelled. Interrupt and see if it ends voluntarily. + cmd.Process.Signal(os.Interrupt) + select { + case err := <-resChan: + return err + case <-time.After(time.Second): + } + // Didn't shut down in response to interrupt. Kill it hard. + cmd.Process.Kill() + return <-resChan +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + k, v := split[0], split[1] + env[k] = v + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 000000000..5e75bd6d8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,107 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Version string // module version + Versions []string // available module versions (with -versions) + Replace *ModuleJSON // replaced by this module + Time *time.Time // time version was created + Update *ModuleJSON // available update, if any (with -u) + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return nil, false, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + if modFlag != "" { + // Don't override an explicit '-mod=' argument. + return mainMod, modFlag == "vendor", nil + } + if mainMod == nil || !go114 { + return mainMod, false, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return mainMod, true, nil + } + } + return mainMod, false, nil +} + +// getMainModuleAnd114 gets the main module's information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go new file mode 100644 index 000000000..713043680 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -0,0 +1,51 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "context" + "fmt" + "strings" +) + +// GoVersion checks the go version by running "go list" with modules off. +// It returns the X in Go 1.X. +func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { + inv.Verb = "list" + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} + inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") + // Unset any unneeded flags, and remove them from BuildFlags, if they're + // present. + inv.ModFile = "" + inv.ModFlag = "" + var buildFlags []string + for _, flag := range inv.BuildFlags { + // Flags can be prefixed by one or two dashes. + f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") + if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { + continue + } + buildFlags = append(buildFlags, flag) + } + inv.BuildFlags = buildFlags + stdoutBytes, err := r.Run(ctx, inv) + if err != nil { + return 0, err + } + stdout := stdoutBytes.String() + if len(stdout) < 3 { + return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) + } + // Split up "[go1.1 go1.15]" + tags := strings.Fields(stdout[1 : len(stdout)-2]) + for i := len(tags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil { + continue + } + return version, nil + } + return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go new file mode 100644 index 000000000..925ff5356 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -0,0 +1,264 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gopathwalk is like filepath.Walk but specialized for finding Go +// packages, particularly in $GOPATH and $GOROOT. +package gopathwalk + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/tools/internal/fastwalk" +) + +// Options controls the behavior of a Walk call. +type Options struct { + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...interface{}) + // Search module caches. Also disables legacy goimports ignore rules. + ModulesEnabled bool +} + +// RootType indicates the type of a Root. +type RootType int + +const ( + RootUnknown RootType = iota + RootGOROOT + RootGOPATH + RootCurrentModule + RootModuleCache + RootOther +) + +// A Root is a starting point for a Walk. +type Root struct { + Path string + Type RootType +} + +// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// add will be called concurrently. +func Walk(roots []Root, add func(root Root, dir string), opts Options) { + WalkSkip(roots, add, func(Root, string) bool { return false }, opts) +} + +// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// For each directory that will be scanned, skip will be called (concurrently) +// with the absolute paths of the containing source directory and the directory. +// If skip returns false on a directory it will be processed. +// add will be called concurrently. +// skip will be called concurrently. +func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { + for _, root := range roots { + walkDir(root, add, skip, opts) + } +} + +// walkDir creates a walker and starts fastwalk with this walker. +func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if _, err := os.Stat(root.Path); os.IsNotExist(err) { + if opts.Logf != nil { + opts.Logf("skipping nonexistent directory: %v", root.Path) + } + return + } + start := time.Now() + if opts.Logf != nil { + opts.Logf("gopathwalk: scanning %s", root.Path) + } + w := &walker{ + root: root, + add: add, + skip: skip, + opts: opts, + } + w.init() + if err := fastwalk.Walk(root.Path, w.walk); err != nil { + log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) + } + + if opts.Logf != nil { + opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) + } +} + +// walker is the callback for fastwalk.Walk. +type walker struct { + root Root // The source directory to scan. + add func(Root, string) // The callback that will be invoked for every possible Go package dir. + skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. + opts Options // Options passed to Walk by the user. + + ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. +} + +// init initializes the walker based on its Options +func (w *walker) init() { + var ignoredPaths []string + if w.root.Type == RootModuleCache { + ignoredPaths = []string{"cache"} + } + if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH { + ignoredPaths = w.getIgnoredDirs(w.root.Path) + ignoredPaths = append(ignoredPaths, "v", "mod") + } + + for _, p := range ignoredPaths { + full := filepath.Join(w.root.Path, p) + if fi, err := os.Stat(full); err == nil { + w.ignoredDirs = append(w.ignoredDirs, fi) + if w.opts.Logf != nil { + w.opts.Logf("Directory added to ignore list: %s", full) + } + } else if w.opts.Logf != nil { + w.opts.Logf("Error statting ignored directory: %v", err) + } + } +} + +// getIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func (w *walker) getIgnoredDirs(path string) []string { + file := filepath.Join(path, ".goimportsignore") + slurp, err := ioutil.ReadFile(file) + if w.opts.Logf != nil { + if err != nil { + w.opts.Logf("%v", err) + } else { + w.opts.Logf("Read %s", file) + } + } + if err != nil { + return nil + } + + var ignoredDirs []string + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + ignoredDirs = append(ignoredDirs, line) + } + return ignoredDirs +} + +// shouldSkipDir reports whether the file should be skipped or not. +func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { + for _, ignoredDir := range w.ignoredDirs { + if os.SameFile(fi, ignoredDir) { + return true + } + } + if w.skip != nil { + // Check with the user specified callback. + return w.skip(w.root, dir) + } + return false +} + +// walk walks through the given path. +func (w *walker) walk(path string, typ os.FileMode) error { + dir := filepath.Dir(path) + if typ.IsRegular() { + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + return fastwalk.ErrSkipFiles + } + if !strings.HasSuffix(path, ".go") { + return nil + } + + w.add(w.root, dir) + return fastwalk.ErrSkipFiles + } + if typ == os.ModeDir { + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") { + return filepath.SkipDir + } + fi, err := os.Lstat(path) + if err == nil && w.shouldSkipDir(fi, path) { + return filepath.SkipDir + } + return nil + } + if typ == os.ModeSymlink { + base := filepath.Base(path) + if strings.HasPrefix(base, ".#") { + // Emacs noise. + return nil + } + fi, err := os.Lstat(path) + if err != nil { + // Just ignore it. + return nil + } + if w.shouldTraverse(dir, fi) { + return fastwalk.ErrTraverseLink + } + } + return nil +} + +// shouldTraverse reports whether the symlink fi, found in dir, +// should be followed. It makes sure symlinks were never visited +// before to avoid symlink loops. +func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool { + path := filepath.Join(dir, fi.Name()) + target, err := filepath.EvalSymlinks(path) + if err != nil { + return false + } + ts, err := os.Stat(target) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + if !ts.IsDir() { + return false + } + if w.shouldSkipDir(ts, dir) { + return false + } + // Check for symlink loops by statting each directory component + // and seeing if any are the same file as ts. + for { + parent := filepath.Dir(path) + if parent == path { + // Made it to the root without seeing a cycle. + // Use this symlink. + return true + } + parentInfo, err := os.Stat(parent) + if err != nil { + return false + } + if os.SameFile(ts, parentInfo) { + // Cycle. Don't traverse. + return false + } + path = parent + } + +} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go new file mode 100644 index 000000000..d859617b7 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -0,0 +1,1730 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/gopathwalk" +) + +// importToGroup is a list of functions which map from an import path to +// a group number. +var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ + func(localPrefix, importPath string) (num int, ok bool) { + if localPrefix == "" { + return + } + for _, p := range strings.Split(localPrefix, ",") { + if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { + return 3, true + } + } + return + }, + func(_, importPath string) (num int, ok bool) { + if strings.HasPrefix(importPath, "appengine") { + return 2, true + } + return + }, + func(_, importPath string) (num int, ok bool) { + firstComponent := strings.Split(importPath, "/")[0] + if strings.Contains(firstComponent, ".") { + return 1, true + } + return + }, +} + +func importGroup(localPrefix, importPath string) int { + for _, fn := range importToGroup { + if n, ok := fn(localPrefix, importPath); ok { + return n + } + } + return 0 +} + +type ImportFixType int + +const ( + AddImport ImportFixType = iota + DeleteImport + SetImportName +) + +type ImportFix struct { + // StmtInfo represents the import statement this fix will add, remove, or change. + StmtInfo ImportInfo + // IdentName is the identifier that this fix will add or remove. + IdentName string + // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). + FixType ImportFixType + Relevance float64 // see pkg +} + +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. +} + +// A packageInfo represents what's known about a package. +type packageInfo struct { + name string // real package name, if known. + exports map[string]bool // known exports. +} + +// parseOtherFiles parses all the Go files in srcDir except filename, including +// test files if filename looks like a test. +func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { + // This could use go/packages but it doesn't buy much, and it fails + // with https://golang.org/issue/26296 in LoadFiles mode in some cases. + considerTests := strings.HasSuffix(filename, "_test.go") + + fileBase := filepath.Base(filename) + packageFileInfos, err := ioutil.ReadDir(srcDir) + if err != nil { + return nil + } + + var files []*ast.File + for _, fi := range packageFileInfos { + if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") { + continue + } + if !considerTests && strings.HasSuffix(fi.Name(), "_test.go") { + continue + } + + f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0) + if err != nil { + continue + } + + files = append(files, f) + } + + return files +} + +// addGlobals puts the names of package vars into the provided map. +func addGlobals(f *ast.File, globals map[string]bool) { + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + + for _, spec := range genDecl.Specs { + valueSpec, ok := spec.(*ast.ValueSpec) + if !ok { + continue + } + globals[valueSpec.Names[0].Name] = true + } + } +} + +// collectReferences builds a map of selector expressions, from +// left hand side (X) to a set of right hand sides (Sel). +func collectReferences(f *ast.File) references { + refs := references{} + + var visitor visitFn + visitor = func(node ast.Node) ast.Visitor { + if node == nil { + return visitor + } + switch v := node.(type) { + case *ast.SelectorExpr: + xident, ok := v.X.(*ast.Ident) + if !ok { + break + } + if xident.Obj != nil { + // If the parser can resolve it, it's not a package ref. + break + } + if !ast.IsExported(v.Sel.Name) { + // Whatever this is, it's not exported from a package. + break + } + pkgName := xident.Name + r := refs[pkgName] + if r == nil { + r = make(map[string]bool) + refs[pkgName] = r + } + r[v.Sel.Name] = true + } + return visitor + } + ast.Walk(visitor, f) + return refs +} + +// collectImports returns all the imports in f. +// Unnamed imports (., _) and "C" are ignored. +func collectImports(f *ast.File) []*ImportInfo { + var imports []*ImportInfo + for _, imp := range f.Imports { + var name string + if imp.Name != nil { + name = imp.Name.Name + } + if imp.Path.Value == `"C"` || name == "_" || name == "." { + continue + } + path := strings.Trim(imp.Path.Value, `"`) + imports = append(imports, &ImportInfo{ + Name: name, + ImportPath: path, + }) + } + return imports +} + +// findMissingImport searches pass's candidates for an import that provides +// pkg, containing all of syms. +func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { + for _, candidate := range p.candidates { + pkgInfo, ok := p.knownPackages[candidate.ImportPath] + if !ok { + continue + } + if p.importIdentifier(candidate) != pkg { + continue + } + + allFound := true + for right := range syms { + if !pkgInfo.exports[right] { + allFound = false + break + } + } + + if allFound { + return candidate + } + } + return nil +} + +// references is set of references found in a Go file. The first map key is the +// left hand side of a selector expression, the second key is the right hand +// side, and the value should always be true. +type references map[string]map[string]bool + +// A pass contains all the inputs and state necessary to fix a file's imports. +// It can be modified in some ways during use; see comments below. +type pass struct { + // Inputs. These must be set before a call to load, and not modified after. + fset *token.FileSet // fset used to parse f and its siblings. + f *ast.File // the file being fixed. + srcDir string // the directory containing f. + env *ProcessEnv // the environment to use for go commands, etc. + loadRealPackageNames bool // if true, load package names from disk rather than guessing them. + otherFiles []*ast.File // sibling files. + + // Intermediate state, generated by load. + existingImports map[string]*ImportInfo + allRefs references + missingRefs references + + // Inputs to fix. These can be augmented between successive fix calls. + lastTry bool // indicates that this is the last call and fix should clean up as best it can. + candidates []*ImportInfo // candidate imports in priority order. + knownPackages map[string]*packageInfo // information about all known packages. +} + +// loadPackageNames saves the package names for everything referenced by imports. +func (p *pass) loadPackageNames(imports []*ImportInfo) error { + if p.env.Logf != nil { + p.env.Logf("loading package names for %v packages", len(imports)) + defer func() { + p.env.Logf("done loading package names for %v packages", len(imports)) + }() + } + var unknown []string + for _, imp := range imports { + if _, ok := p.knownPackages[imp.ImportPath]; ok { + continue + } + unknown = append(unknown, imp.ImportPath) + } + + resolver, err := p.env.GetResolver() + if err != nil { + return err + } + + names, err := resolver.loadPackageNames(unknown, p.srcDir) + if err != nil { + return err + } + + for path, name := range names { + p.knownPackages[path] = &packageInfo{ + name: name, + exports: map[string]bool{}, + } + } + return nil +} + +// importIdentifier returns the identifier that imp will introduce. It will +// guess if the package name has not been loaded, e.g. because the source +// is not available. +func (p *pass) importIdentifier(imp *ImportInfo) string { + if imp.Name != "" { + return imp.Name + } + known := p.knownPackages[imp.ImportPath] + if known != nil && known.name != "" { + return known.name + } + return ImportPathToAssumedName(imp.ImportPath) +} + +// load reads in everything necessary to run a pass, and reports whether the +// file already has all the imports it needs. It fills in p.missingRefs with the +// file's missing symbols, if any, or removes unused imports if not. +func (p *pass) load() ([]*ImportFix, bool) { + p.knownPackages = map[string]*packageInfo{} + p.missingRefs = references{} + p.existingImports = map[string]*ImportInfo{} + + // Load basic information about the file in question. + p.allRefs = collectReferences(p.f) + + // Load stuff from other files in the same package: + // global variables so we know they don't need resolving, and imports + // that we might want to mimic. + globals := map[string]bool{} + for _, otherFile := range p.otherFiles { + // Don't load globals from files that are in the same directory + // but a different package. Using them to suggest imports is OK. + if p.f.Name.Name == otherFile.Name.Name { + addGlobals(otherFile, globals) + } + p.candidates = append(p.candidates, collectImports(otherFile)...) + } + + // Resolve all the import paths we've seen to package names, and store + // f's imports by the identifier they introduce. + imports := collectImports(p.f) + if p.loadRealPackageNames { + err := p.loadPackageNames(append(imports, p.candidates...)) + if err != nil { + if p.env.Logf != nil { + p.env.Logf("loading package names: %v", err) + } + return nil, false + } + } + for _, imp := range imports { + p.existingImports[p.importIdentifier(imp)] = imp + } + + // Find missing references. + for left, rights := range p.allRefs { + if globals[left] { + continue + } + _, ok := p.existingImports[left] + if !ok { + p.missingRefs[left] = rights + continue + } + } + if len(p.missingRefs) != 0 { + return nil, false + } + + return p.fix() +} + +// fix attempts to satisfy missing imports using p.candidates. If it finds +// everything, or if p.lastTry is true, it updates fixes to add the imports it found, +// delete anything unused, and update import names, and returns true. +func (p *pass) fix() ([]*ImportFix, bool) { + // Find missing imports. + var selected []*ImportInfo + for left, rights := range p.missingRefs { + if imp := p.findMissingImport(left, rights); imp != nil { + selected = append(selected, imp) + } + } + + if !p.lastTry && len(selected) != len(p.missingRefs) { + return nil, false + } + + // Found everything, or giving up. Add the new imports and remove any unused. + var fixes []*ImportFix + for _, imp := range p.existingImports { + // We deliberately ignore globals here, because we can't be sure + // they're in the same package. People do things like put multiple + // main packages in the same directory, and we don't want to + // remove imports if they happen to have the same name as a var in + // a different package. + if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { + fixes = append(fixes, &ImportFix{ + StmtInfo: *imp, + IdentName: p.importIdentifier(imp), + FixType: DeleteImport, + }) + continue + } + + // An existing import may need to update its import name to be correct. + if name := p.importSpecName(imp); name != imp.Name { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: name, + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: SetImportName, + }) + } + } + + for _, imp := range selected { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: p.importSpecName(imp), + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: AddImport, + }) + } + + return fixes, true +} + +// importSpecName gets the import name of imp in the import spec. +// +// When the import identifier matches the assumed import name, the import name does +// not appear in the import spec. +func (p *pass) importSpecName(imp *ImportInfo) string { + // If we did not load the real package names, or the name is already set, + // we just return the existing name. + if !p.loadRealPackageNames || imp.Name != "" { + return imp.Name + } + + ident := p.importIdentifier(imp) + if ident == ImportPathToAssumedName(imp.ImportPath) { + return "" // ident not needed since the assumed and real names are the same. + } + return ident +} + +// apply will perform the fixes on f in order. +func apply(fset *token.FileSet, f *ast.File, fixes []*ImportFix) { + for _, fix := range fixes { + switch fix.FixType { + case DeleteImport: + astutil.DeleteNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case AddImport: + astutil.AddNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case SetImportName: + // Find the matching import path and change the name. + for _, spec := range f.Imports { + path := strings.Trim(spec.Path.Value, `"`) + if path == fix.StmtInfo.ImportPath { + spec.Name = &ast.Ident{ + Name: fix.StmtInfo.Name, + NamePos: spec.Pos(), + } + } + } + } + } +} + +// assumeSiblingImportsValid assumes that siblings' use of packages is valid, +// adding the exports they use. +func (p *pass) assumeSiblingImportsValid() { + for _, f := range p.otherFiles { + refs := collectReferences(f) + imports := collectImports(f) + importsByName := map[string]*ImportInfo{} + for _, imp := range imports { + importsByName[p.importIdentifier(imp)] = imp + } + for left, rights := range refs { + if imp, ok := importsByName[left]; ok { + if m, ok := stdlib[imp.ImportPath]; ok { + // We have the stdlib in memory; no need to guess. + rights = copyExports(m) + } + p.addCandidate(imp, &packageInfo{ + // no name; we already know it. + exports: rights, + }) + } + } + } +} + +// addCandidate adds a candidate import to p, and merges in the information +// in pkg. +func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { + p.candidates = append(p.candidates, imp) + if existing, ok := p.knownPackages[imp.ImportPath]; ok { + if existing.name == "" { + existing.name = pkg.name + } + for export := range pkg.exports { + existing.exports[export] = true + } + } else { + p.knownPackages[imp.ImportPath] = pkg + } +} + +// fixImports adds and removes imports from f so that all its references are +// satisfied and there are no unused imports. +// +// This is declared as a variable rather than a function so goimports can +// easily be extended by adding a file with an init function. +var fixImports = fixImportsDefault + +func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { + fixes, err := getFixes(fset, f, filename, env) + if err != nil { + return err + } + apply(fset, f, fixes) + return err +} + +// getFixes gets the import fixes that need to be made to f in order to fix the imports. +// It does not modify the ast. +func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + if env.Logf != nil { + env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) + } + + // First pass: looking only at f, and using the naive algorithm to + // derive package names from import paths, see if the file is already + // complete. We can't add any imports yet, because we don't know + // if missing references are actually package vars. + p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} + if fixes, done := p.load(); done { + return fixes, nil + } + + otherFiles := parseOtherFiles(fset, srcDir, filename) + + // Second pass: add information from other files in the same package, + // like their package vars and imports. + p.otherFiles = otherFiles + if fixes, done := p.load(); done { + return fixes, nil + } + + // Now we can try adding imports from the stdlib. + p.assumeSiblingImportsValid() + addStdlibCandidates(p, p.missingRefs) + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Third pass: get real package names where we had previously used + // the naive algorithm. + p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} + p.loadRealPackageNames = true + p.otherFiles = otherFiles + if fixes, done := p.load(); done { + return fixes, nil + } + + if err := addStdlibCandidates(p, p.missingRefs); err != nil { + return nil, err + } + p.assumeSiblingImportsValid() + if fixes, done := p.fix(); done { + return fixes, nil + } + + // Go look for candidates in $GOPATH, etc. We don't necessarily load + // the real exports of sibling imports, so keep assuming their contents. + if err := addExternalCandidates(p, p.missingRefs, filename); err != nil { + return nil, err + } + + p.lastTry = true + fixes, _ := p.fix() + return fixes, nil +} + +// MaxRelevance is the highest relevance, used for the standard library. +// Chosen arbitrarily to match pre-existing gopls code. +const MaxRelevance = 7.0 + +// getCandidatePkgs works with the passed callback to find all acceptable packages. +// It deduplicates by import path, and uses a cached stdlib rather than reading +// from disk. +func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filename, filePkg string, env *ProcessEnv) error { + notSelf := func(p *pkg) bool { + return p.packageName != filePkg || p.dir != filepath.Dir(filename) + } + goenv, err := env.goEnv() + if err != nil { + return err + } + + var mu sync.Mutex // to guard asynchronous access to dupCheck + dupCheck := map[string]struct{}{} + + // Start off with the standard library. + for importPath, exports := range stdlib { + p := &pkg{ + dir: filepath.Join(goenv["GOROOT"], "src", importPath), + importPathShort: importPath, + packageName: path.Base(importPath), + relevance: MaxRelevance, + } + dupCheck[importPath] = struct{}{} + if notSelf(p) && wrappedCallback.dirFound(p) && wrappedCallback.packageNameLoaded(p) { + wrappedCallback.exportsLoaded(p, exports) + } + } + + scanFilter := &scanCallback{ + rootFound: func(root gopathwalk.Root) bool { + // Exclude goroot results -- getting them is relatively expensive, not cached, + // and generally redundant with the in-memory version. + return root.Type != gopathwalk.RootGOROOT && wrappedCallback.rootFound(root) + }, + dirFound: wrappedCallback.dirFound, + packageNameLoaded: func(pkg *pkg) bool { + mu.Lock() + defer mu.Unlock() + if _, ok := dupCheck[pkg.importPathShort]; ok { + return false + } + dupCheck[pkg.importPathShort] = struct{}{} + return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg) + }, + exportsLoaded: func(pkg *pkg, exports []string) { + // If we're an x_test, load the package under test's test variant. + if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) { + var err error + _, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true) + if err != nil { + return + } + } + wrappedCallback.exportsLoaded(pkg, exports) + }, + } + resolver, err := env.GetResolver() + if err != nil { + return err + } + return resolver.scan(ctx, scanFilter) +} + +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]float64, error) { + result := make(map[string]float64) + resolver, err := env.GetResolver() + if err != nil { + return nil, err + } + for _, path := range paths { + result[path] = resolver.scoreImportPath(ctx, path) + } + return result, nil +} + +func PrimeCache(ctx context.Context, env *ProcessEnv) error { + // Fully scan the disk for directories, but don't actually read any Go files. + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return false + }, + packageNameLoaded: func(pkg *pkg) bool { + return false + }, + } + return getCandidatePkgs(ctx, callback, "", "", env) +} + +func candidateImportName(pkg *pkg) string { + if ImportPathToAssumedName(pkg.importPathShort) != pkg.packageName { + return pkg.packageName + } + return "" +} + +// GetAllCandidates calls wrapped for each package whose name starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + // Try the assumed package name first, then a simpler path match + // in case of packages named vN, which are not uncommon. + return strings.HasPrefix(ImportPathToAssumedName(pkg.importPathShort), searchPrefix) || + strings.HasPrefix(path.Base(pkg.importPathShort), searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + if !strings.HasPrefix(pkg.packageName, searchPrefix) { + return false + } + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +// GetImportPaths calls wrapped for each package whose import path starts with +// searchPrefix, and can be imported from filename with the package name filePkg. +func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + return strings.HasPrefix(pkg.importPathShort, searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +// A PackageExport is a package and its exports. +type PackageExport struct { + Fix *ImportFix + Exports []string +} + +// GetPackageExports returns all known packages with name pkg and their exports. +func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + return pkg.packageName == searchPkg + }, + exportsLoaded: func(pkg *pkg, exports []string) { + sort.Strings(exports) + wrapped(PackageExport{ + Fix: &ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }, + Exports: exports, + }) + }, + } + return getCandidatePkgs(ctx, callback, filename, filePkg, env) +} + +var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB"} + +// ProcessEnv contains environment variables and settings that affect the use of +// the go command, the go/build package, etc. +type ProcessEnv struct { + GocmdRunner *gocommand.Runner + + BuildFlags []string + ModFlag string + ModFile string + + // Env overrides the OS environment, and can be used to specify + // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because + // exec.Command will not honor it. + // Specifying all of RequiredGoEnvVars avoids a call to `go env`. + Env map[string]string + + WorkingDir string + + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...interface{}) + + initialized bool + + resolver Resolver +} + +func (e *ProcessEnv) goEnv() (map[string]string, error) { + if err := e.init(); err != nil { + return nil, err + } + return e.Env, nil +} + +func (e *ProcessEnv) matchFile(dir, name string) (bool, error) { + bctx, err := e.buildContext() + if err != nil { + return false, err + } + return bctx.MatchFile(dir, name) +} + +// CopyConfig copies the env's configuration into a new env. +func (e *ProcessEnv) CopyConfig() *ProcessEnv { + copy := &ProcessEnv{ + GocmdRunner: e.GocmdRunner, + initialized: e.initialized, + BuildFlags: e.BuildFlags, + Logf: e.Logf, + WorkingDir: e.WorkingDir, + resolver: nil, + Env: map[string]string{}, + } + for k, v := range e.Env { + copy.Env[k] = v + } + return copy +} + +func (e *ProcessEnv) init() error { + if e.initialized { + return nil + } + + foundAllRequired := true + for _, k := range RequiredGoEnvVars { + if _, ok := e.Env[k]; !ok { + foundAllRequired = false + break + } + } + if foundAllRequired { + e.initialized = true + return nil + } + + if e.Env == nil { + e.Env = map[string]string{} + } + + goEnv := map[string]string{} + stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, RequiredGoEnvVars...)...) + if err != nil { + return err + } + if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { + return err + } + for k, v := range goEnv { + e.Env[k] = v + } + e.initialized = true + return nil +} + +func (e *ProcessEnv) env() []string { + var env []string // the gocommand package will prepend os.Environ. + for k, v := range e.Env { + env = append(env, k+"="+v) + } + return env +} + +func (e *ProcessEnv) GetResolver() (Resolver, error) { + if e.resolver != nil { + return e.resolver, nil + } + if err := e.init(); err != nil { + return nil, err + } + if len(e.Env["GOMOD"]) == 0 { + e.resolver = newGopathResolver(e) + return e.resolver, nil + } + e.resolver = newModuleResolver(e) + return e.resolver, nil +} + +func (e *ProcessEnv) buildContext() (*build.Context, error) { + ctx := build.Default + goenv, err := e.goEnv() + if err != nil { + return nil, err + } + ctx.GOROOT = goenv["GOROOT"] + ctx.GOPATH = goenv["GOPATH"] + + // As of Go 1.14, build.Context has a Dir field + // (see golang.org/issue/34860). + // Populate it only if present. + rc := reflect.ValueOf(&ctx).Elem() + dir := rc.FieldByName("Dir") + if dir.IsValid() && dir.Kind() == reflect.String { + dir.SetString(e.WorkingDir) + } + + // Since Go 1.11, go/build.Context.Import may invoke 'go list' depending on + // the value in GO111MODULE in the process's environment. We always want to + // run in GOPATH mode when calling Import, so we need to prevent this from + // happening. In Go 1.16, GO111MODULE defaults to "on", so this problem comes + // up more frequently. + // + // HACK: setting any of the Context I/O hooks prevents Import from invoking + // 'go list', regardless of GO111MODULE. This is undocumented, but it's + // unlikely to change before GOPATH support is removed. + ctx.ReadDir = ioutil.ReadDir + + return &ctx, nil +} + +func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) { + inv := gocommand.Invocation{ + Verb: verb, + Args: args, + BuildFlags: e.BuildFlags, + Env: e.env(), + Logf: e.Logf, + WorkingDir: e.WorkingDir, + } + return e.GocmdRunner.Run(ctx, inv) +} + +func addStdlibCandidates(pass *pass, refs references) error { + goenv, err := pass.env.goEnv() + if err != nil { + return err + } + add := func(pkg string) { + // Prevent self-imports. + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { + return + } + exports := copyExports(stdlib[pkg]) + pass.addCandidate( + &ImportInfo{ImportPath: pkg}, + &packageInfo{name: path.Base(pkg), exports: exports}) + } + for left := range refs { + if left == "rand" { + // Make sure we try crypto/rand before math/rand. + add("crypto/rand") + add("math/rand") + continue + } + for importPath := range stdlib { + if path.Base(importPath) == left { + add(importPath) + } + } + } + return nil +} + +// A Resolver does the build-system-specific parts of goimports. +type Resolver interface { + // loadPackageNames loads the package names in importPaths. + loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) + // scan works with callback to search for packages. See scanCallback for details. + scan(ctx context.Context, callback *scanCallback) error + // loadExports returns the set of exported symbols in the package at dir. + // loadExports may be called concurrently. + loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + // scoreImportPath returns the relevance for an import path. + scoreImportPath(ctx context.Context, path string) float64 + + ClearForNewScan() +} + +// A scanCallback controls a call to scan and receives its results. +// In general, minor errors will be silently discarded; a user should not +// expect to receive a full series of calls for everything. +type scanCallback struct { + // rootFound is called before scanning a new root dir. If it returns true, + // the root will be scanned. Returning false will not necessarily prevent + // directories from that root making it to dirFound. + rootFound func(gopathwalk.Root) bool + // dirFound is called when a directory is found that is possibly a Go package. + // pkg will be populated with everything except packageName. + // If it returns true, the package's name will be loaded. + dirFound func(pkg *pkg) bool + // packageNameLoaded is called when a package is found and its name is loaded. + // If it returns true, the package's exports will be loaded. + packageNameLoaded func(pkg *pkg) bool + // exportsLoaded is called when a package's exports have been loaded. + exportsLoaded func(pkg *pkg, exports []string) +} + +func addExternalCandidates(pass *pass, refs references, filename string) error { + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false + } + if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !canUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, + } + resolver, err := pass.env.GetResolver() + if err != nil { + return err + } + if err = resolver.scan(context.Background(), callback); err != nil { + return err + } + + // Search for imports matching potential package references. + type result struct { + imp *ImportInfo + pkg *packageInfo + } + results := make(chan result, len(refs)) + + ctx, cancel := context.WithCancel(context.TODO()) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + var ( + firstErr error + firstErrOnce sync.Once + ) + for pkgName, symbols := range refs { + wg.Add(1) + go func(pkgName string, symbols map[string]bool) { + defer wg.Done() + + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename) + + if err != nil { + firstErrOnce.Do(func() { + firstErr = err + cancel() + }) + return + } + + if found == nil { + return // No matching package. + } + + imp := &ImportInfo{ + ImportPath: found.importPathShort, + } + + pkg := &packageInfo{ + name: pkgName, + exports: symbols, + } + results <- result{imp, pkg} + }(pkgName, symbols) + } + go func() { + wg.Wait() + close(results) + }() + + for result := range results { + pass.addCandidate(result.imp, result.pkg) + } + return firstErr +} + +// notIdentifier reports whether ch is an invalid identifier character. +func notIdentifier(ch rune) bool { + return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || + '0' <= ch && ch <= '9' || + ch == '_' || + ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) +} + +// ImportPathToAssumedName returns the assumed package name of an import path. +// It does this using only string parsing of the import path. +// It picks the last element of the path that does not look like a major +// version, and then picks the valid identifier off the start of that element. +// It is used to determine if a local rename should be added to an import for +// clarity. +// This function could be moved to a standard package and exported if we want +// for use in other tools. +func ImportPathToAssumedName(importPath string) string { + base := path.Base(importPath) + if strings.HasPrefix(base, "v") { + if _, err := strconv.Atoi(base[1:]); err == nil { + dir := path.Dir(importPath) + if dir != "." { + base = path.Base(dir) + } + } + } + base = strings.TrimPrefix(base, "go-") + if i := strings.IndexFunc(base, notIdentifier); i >= 0 { + base = base[:i] + } + return base +} + +// gopathResolver implements resolver for GOPATH workspaces. +type gopathResolver struct { + env *ProcessEnv + walked bool + cache *dirInfoCache + scanSema chan struct{} // scanSema prevents concurrent scans. +} + +func newGopathResolver(env *ProcessEnv) *gopathResolver { + r := &gopathResolver{ + env: env, + cache: &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + }, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + +func (r *gopathResolver) ClearForNewScan() { + <-r.scanSema + r.cache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + r.walked = false + r.scanSema <- struct{}{} +} + +func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + names := map[string]string{} + bctx, err := r.env.buildContext() + if err != nil { + return nil, err + } + for _, path := range importPaths { + names[path] = importPathToName(bctx, path, srcDir) + } + return names, nil +} + +// importPathToName finds out the actual package name, as declared in its .go files. +func importPathToName(bctx *build.Context, importPath, srcDir string) string { + // Fast path for standard library without going to disk. + if _, ok := stdlib[importPath]; ok { + return path.Base(importPath) // stdlib packages always match their paths. + } + + buildPkg, err := bctx.Import(importPath, srcDir, build.FindOnly) + if err != nil { + return "" + } + pkgName, err := packageDirToName(buildPkg.Dir) + if err != nil { + return "" + } + return pkgName +} + +// packageDirToName is a faster version of build.Import if +// the only thing desired is the package name. Given a directory, +// packageDirToName then only parses one file in the package, +// trusting that the files in the directory are consistent. +func packageDirToName(dir string) (packageName string, err error) { + d, err := os.Open(dir) + if err != nil { + return "", err + } + names, err := d.Readdirnames(-1) + d.Close() + if err != nil { + return "", err + } + sort.Strings(names) // to have predictable behavior + var lastErr error + var nfile int + for _, name := range names { + if !strings.HasSuffix(name, ".go") { + continue + } + if strings.HasSuffix(name, "_test.go") { + continue + } + nfile++ + fullFile := filepath.Join(dir, name) + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, fullFile, nil, parser.PackageClauseOnly) + if err != nil { + lastErr = err + continue + } + pkgName := f.Name.Name + if pkgName == "documentation" { + // Special case from go/build.ImportDir, not + // handled by ctx.MatchFile. + continue + } + if pkgName == "main" { + // Also skip package main, assuming it's a +build ignore generator or example. + // Since you can't import a package main anyway, there's no harm here. + continue + } + return pkgName, nil + } + if lastErr != nil { + return "", lastErr + } + return "", fmt.Errorf("no importable package found in %d Go files", nfile) +} + +type pkg struct { + dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") + importPathShort string // vendorless import path ("net/http", "a/b") + packageName string // package name loaded from source if requested + relevance float64 // a weakly-defined score of how relevant a package is. 0 is most relevant. +} + +type pkgDistance struct { + pkg *pkg + distance int // relative distance to target +} + +// byDistanceOrImportPathShortLength sorts by relative distance breaking ties +// on the short import path length and then the import string itself. +type byDistanceOrImportPathShortLength []pkgDistance + +func (s byDistanceOrImportPathShortLength) Len() int { return len(s) } +func (s byDistanceOrImportPathShortLength) Less(i, j int) bool { + di, dj := s[i].distance, s[j].distance + if di == -1 { + return false + } + if dj == -1 { + return true + } + if di != dj { + return di < dj + } + + vi, vj := s[i].pkg.importPathShort, s[j].pkg.importPathShort + if len(vi) != len(vj) { + return len(vi) < len(vj) + } + return vi < vj +} +func (s byDistanceOrImportPathShortLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func distance(basepath, targetpath string) int { + p, err := filepath.Rel(basepath, targetpath) + if err != nil { + return -1 + } + if p == "." { + return 0 + } + return strings.Count(p, string(filepath.Separator)) + 1 +} + +func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error { + add := func(root gopathwalk.Root, dir string) { + // We assume cached directories have not changed. We can skip them and their + // children. + if _, ok := r.cache.Load(dir); ok { + return + } + + importpath := filepath.ToSlash(dir[len(root.Path)+len("/"):]) + info := directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: VendorlessPath(importpath), + } + r.cache.Store(dir, info) + } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + + p := &pkg{ + importPathShort: info.nonCanonicalImportPath, + dir: info.dir, + relevance: MaxRelevance - 1, + } + if info.rootType == gopathwalk.RootGOROOT { + p.relevance = MaxRelevance + } + + if !callback.dirFound(p) { + return + } + var err error + p.packageName, err = r.cache.CachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(p) { + return + } + if _, exports, err := r.loadExports(ctx, p, false); err == nil { + callback.exportsLoaded(p, exports) + } + } + stop := r.cache.ScanAndListen(ctx, processDir) + defer stop() + + goenv, err := r.env.goEnv() + if err != nil { + return err + } + var roots []gopathwalk.Root + roots = append(roots, gopathwalk.Root{filepath.Join(goenv["GOROOT"], "src"), gopathwalk.RootGOROOT}) + for _, p := range filepath.SplitList(goenv["GOPATH"]) { + roots = append(roots, gopathwalk.Root{filepath.Join(p, "src"), gopathwalk.RootGOPATH}) + } + // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. + roots = filterRoots(roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + gopathwalk.Walk(roots, add, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: false}) + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) float64 { + if _, ok := stdlib[path]; ok { + return MaxRelevance + } + return MaxRelevance - 1 +} + +func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root { + var result []gopathwalk.Root + for _, root := range roots { + if !include(root) { + continue + } + result = append(result, root) + } + return result +} + +func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { + if info, ok := r.cache.Load(pkg.dir); ok && !includeTest { + return r.cache.CacheExports(ctx, r.env, info) + } + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) +} + +// VendorlessPath returns the devendorized version of the import path ipath. +// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". +func VendorlessPath(ipath string) string { + // Devendorize for use in import statement. + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + if strings.HasPrefix(ipath, "vendor/") { + return ipath[len("vendor/"):] + } + return ipath +} + +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { + // Look for non-test, buildable .go files which could provide exports. + all, err := ioutil.ReadDir(dir) + if err != nil { + return "", nil, err + } + var files []os.FileInfo + for _, fi := range all { + name := fi.Name() + if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { + continue + } + match, err := env.matchFile(dir, fi.Name()) + if err != nil || !match { + continue + } + files = append(files, fi) + } + + if len(files) == 0 { + return "", nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", dir) + } + + var pkgName string + var exports []string + fset := token.NewFileSet() + for _, fi := range files { + select { + case <-ctx.Done(): + return "", nil, ctx.Err() + default: + } + + fullFile := filepath.Join(dir, fi.Name()) + f, err := parser.ParseFile(fset, fullFile, nil, 0) + if err != nil { + if env.Logf != nil { + env.Logf("error parsing %v: %v", fullFile, err) + } + continue + } + if f.Name.Name == "documentation" { + // Special case from go/build.ImportDir, not + // handled by MatchFile above. + continue + } + if includeTest && strings.HasSuffix(f.Name.Name, "_test") { + // x_test package. We want internal test files only. + continue + } + pkgName = f.Name.Name + for name := range f.Scope.Objects { + if ast.IsExported(name) { + exports = append(exports, name) + } + } + } + + if env.Logf != nil { + sortedExports := append([]string(nil), exports...) + sort.Strings(sortedExports) + env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", ")) + } + return pkgName, exports, nil +} + +// findImport searches for a package with the given symbols. +// If no package is found, findImport returns ("", false, nil) +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { + // Sort the candidates by their import package length, + // assuming that shorter package names are better than long + // ones. Note that this sorts by the de-vendored name, so + // there's no "penalty" for vendoring. + sort.Sort(byDistanceOrImportPathShortLength(candidates)) + if pass.env.Logf != nil { + for i, c := range candidates { + pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) + } + } + resolver, err := pass.env.GetResolver() + if err != nil { + return nil, err + } + + // Collect exports for packages with matching names. + rescv := make([]chan *pkg, len(candidates)) + for i := range candidates { + rescv[i] = make(chan *pkg, 1) + } + const maxConcurrentPackageImport = 4 + loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) + + ctx, cancel := context.WithCancel(ctx) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + + wg.Add(1) + go func() { + defer wg.Done() + for i, c := range candidates { + select { + case loadExportsSem <- struct{}{}: + case <-ctx.Done(): + return + } + + wg.Add(1) + go func(c pkgDistance, resc chan<- *pkg) { + defer func() { + <-loadExportsSem + wg.Done() + }() + + if pass.env.Logf != nil { + pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) + } + // If we're an x_test, load the package under test's test variant. + includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir + _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) + if err != nil { + if pass.env.Logf != nil { + pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) + } + resc <- nil + return + } + + exportsMap := make(map[string]bool, len(exports)) + for _, sym := range exports { + exportsMap[sym] = true + } + + // If it doesn't have the right + // symbols, send nil to mean no match. + for symbol := range symbols { + if !exportsMap[symbol] { + resc <- nil + return + } + } + resc <- c.pkg + }(c, rescv[i]) + } + }() + + for _, resc := range rescv { + pkg := <-resc + if pkg == nil { + continue + } + return pkg, nil + } + return nil, nil +} + +// pkgIsCandidate reports whether pkg is a candidate for satisfying the +// finding which package pkgIdent in the file named by filename is trying +// to refer to. +// +// This check is purely lexical and is meant to be as fast as possible +// because it's run over all $GOPATH directories to filter out poor +// candidates in order to limit the CPU and I/O later parsing the +// exports in candidate packages. +// +// filename is the file being formatted. +// pkgIdent is the package being searched for, like "client" (if +// searching for "client.New") +func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { + // Check "internal" and "vendor" visibility: + if !canUse(filename, pkg.dir) { + return false + } + + // Speed optimization to minimize disk I/O: + // the last two components on disk must contain the + // package name somewhere. + // + // This permits mismatch naming like directory + // "go-foo" being package "foo", or "pkg.v3" being "pkg", + // or directory "google.golang.org/api/cloudbilling/v1" + // being package "cloudbilling", but doesn't + // permit a directory "foo" to be package + // "bar", which is strongly discouraged + // anyway. There's no reason goimports needs + // to be slow just to accommodate that. + for pkgIdent := range refs { + lastTwo := lastTwoComponents(pkg.importPathShort) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } + } + return false +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// canUse reports whether the package in dir is usable from filename, +// respecting the Go "internal" and "vendor" visibility rules. +func canUse(filename, dir string) bool { + // Fast path check, before any allocations. If it doesn't contain vendor + // or internal, it's not tricky: + // Note that this can false-negative on directories like "notinternal", + // but we check it correctly below. This is just a fast path. + if !strings.Contains(dir, "vendor") && !strings.Contains(dir, "internal") { + return true + } + + dirSlash := filepath.ToSlash(dir) + if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") { + return true + } + // Vendor or internal directory only visible from children of parent. + // That means the path from the current directory to the target directory + // can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal + // or bar/vendor or bar/internal. + // After stripping all the leading ../, the only okay place to see vendor or internal + // is at the very beginning of the path. + absfile, err := filepath.Abs(filename) + if err != nil { + return false + } + absdir, err := filepath.Abs(dir) + if err != nil { + return false + } + rel, err := filepath.Rel(absfile, absdir) + if err != nil { + return false + } + relSlash := filepath.ToSlash(rel) + if i := strings.LastIndex(relSlash, "../"); i >= 0 { + relSlash = relSlash[i+len("../"):] + } + return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +type visitFn func(node ast.Node) ast.Visitor + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + return fn(node) +} + +func copyExports(pkg []string) map[string]bool { + m := make(map[string]bool, len(pkg)) + for _, v := range pkg { + m[v] = true + } + return m +} diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go new file mode 100644 index 000000000..2815edc33 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -0,0 +1,346 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run mkstdlib.go + +// Package imports implements a Go pretty-printer (like package "go/format") +// that also adds or removes import statements as necessary. +package imports + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/printer" + "go/token" + "io" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/ast/astutil" +) + +// Options is golang.org/x/tools/imports.Options with extra internal-only options. +type Options struct { + Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state. + + // LocalPrefix is a comma-separated string of import path prefixes, which, if + // set, instructs Process to sort the import paths with the given prefixes + // into another group after 3rd-party packages. + LocalPrefix string + + Fragment bool // Accept fragment of a source file (no package statement) + AllErrors bool // Report all errors (not just the first 10 on different lines) + + Comments bool // Print comments (true if nil *Options provided) + TabIndent bool // Use tabs for indent (true if nil *Options provided) + TabWidth int // Tab width (8 if nil *Options provided) + + FormatOnly bool // Disable the insertion and deletion of imports +} + +// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. +func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { + fileSet := token.NewFileSet() + file, adjust, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + if !opt.FormatOnly { + if err := fixImports(fileSet, file, filename, opt.Env); err != nil { + return nil, err + } + } + return formatFile(fileSet, file, src, adjust, opt) +} + +// FixImports returns a list of fixes to the imports that, when applied, +// will leave the imports in the same state as Process. src and opt must +// be specified. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { + fileSet := token.NewFileSet() + file, _, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + return getFixes(fileSet, file, filename, opt.Env) +} + +// ApplyFixes applies all of the fixes to the file and formats it. extraMode +// is added in when parsing the file. src and opts must be specified, but no +// env is needed. +func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { + // Don't use parse() -- we don't care about fragments or statement lists + // here, and we need to work with unparseable files. + fileSet := token.NewFileSet() + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + parserMode |= extraMode + + file, err := parser.ParseFile(fileSet, filename, src, parserMode) + if file == nil { + return nil, err + } + + // Apply the fixes to the file. + apply(fileSet, file, fixes) + + return formatFile(fileSet, file, src, nil, opt) +} + +func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { + mergeImports(fileSet, file) + sortImports(opt.LocalPrefix, fileSet, file) + imps := astutil.Imports(fileSet, file) + var spacesBefore []string // import paths we need spaces before + for _, impSection := range imps { + // Within each block of contiguous imports, see if any + // import lines are in different group numbers. If so, + // we'll need to put a space between them so it's + // compatible with gofmt. + lastGroup := -1 + for _, importSpec := range impSection { + importPath, _ := strconv.Unquote(importSpec.Path.Value) + groupNum := importGroup(opt.LocalPrefix, importPath) + if groupNum != lastGroup && lastGroup != -1 { + spacesBefore = append(spacesBefore, importPath) + } + lastGroup = groupNum + } + + } + + printerMode := printer.UseSpaces + if opt.TabIndent { + printerMode |= printer.TabIndent + } + printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth} + + var buf bytes.Buffer + err := printConfig.Fprint(&buf, fileSet, file) + if err != nil { + return nil, err + } + out := buf.Bytes() + if adjust != nil { + out = adjust(src, out) + } + if len(spacesBefore) > 0 { + out, err = addImportSpaces(bytes.NewReader(out), spacesBefore) + if err != nil { + return nil, err + } + } + + out, err = format.Source(out) + if err != nil { + return nil, err + } + return out, nil +} + +// parse parses src, which was read from filename, +// as a Go source file or statement list. +func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + + // Try as whole source file. + file, err := parser.ParseFile(fset, filename, src, parserMode) + if err == nil { + return file, nil, nil + } + // If the error is that the source file didn't begin with a + // package line and we accept fragmented input, fall through to + // try as a source fragment. Stop and return on any other error. + if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { + return nil, nil, err + } + + // If this is a declaration list, make it a source file + // by inserting a package clause. + // Insert using a ;, not a newline, so that parse errors are on + // the correct line. + const prefix = "package main;" + psrc := append([]byte(prefix), src...) + file, err = parser.ParseFile(fset, filename, psrc, parserMode) + if err == nil { + // Gofmt will turn the ; into a \n. + // Do that ourselves now and update the file contents, + // so that positions and line numbers are correct going forward. + psrc[len(prefix)-1] = '\n' + fset.File(file.Package).SetLinesForContent(psrc) + + // If a main function exists, we will assume this is a main + // package and leave the file. + if containsMainFunc(file) { + return file, nil, nil + } + + adjust := func(orig, src []byte) []byte { + // Remove the package clause. + src = src[len(prefix):] + return matchSpace(orig, src) + } + return file, adjust, nil + } + // If the error is that the source file didn't begin with a + // declaration, fall through to try as a statement list. + // Stop and return on any other error. + if !strings.Contains(err.Error(), "expected declaration") { + return nil, nil, err + } + + // If this is a statement list, make it a source file + // by inserting a package clause and turning the list + // into a function body. This handles expressions too. + // Insert using a ;, not a newline, so that the line numbers + // in fsrc match the ones in src. + fsrc := append(append([]byte("package p; func _() {"), src...), '}') + file, err = parser.ParseFile(fset, filename, fsrc, parserMode) + if err == nil { + adjust := func(orig, src []byte) []byte { + // Remove the wrapping. + // Gofmt has turned the ; into a \n\n. + src = src[len("package p\n\nfunc _() {"):] + src = src[:len(src)-len("}\n")] + // Gofmt has also indented the function body one level. + // Remove that indent. + src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) + return matchSpace(orig, src) + } + return file, adjust, nil + } + + // Failed, and out of options. + return nil, nil, err +} + +// containsMainFunc checks if a file contains a function declaration with the +// function signature 'func main()' +func containsMainFunc(file *ast.File) bool { + for _, decl := range file.Decls { + if f, ok := decl.(*ast.FuncDecl); ok { + if f.Name.Name != "main" { + continue + } + + if len(f.Type.Params.List) != 0 { + continue + } + + if f.Type.Results != nil && len(f.Type.Results.List) != 0 { + continue + } + + return true + } + } + + return false +} + +func cutSpace(b []byte) (before, middle, after []byte) { + i := 0 + for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') { + i++ + } + j := len(b) + for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') { + j-- + } + if i <= j { + return b[:i], b[i:j], b[j:] + } + return nil, nil, b[j:] +} + +// matchSpace reformats src to use the same space context as orig. +// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src. +// 2) matchSpace copies the indentation of the first non-blank line in orig +// to every non-blank line in src. +// 3) matchSpace copies the trailing space from orig and uses it in place +// of src's trailing space. +func matchSpace(orig []byte, src []byte) []byte { + before, _, after := cutSpace(orig) + i := bytes.LastIndex(before, []byte{'\n'}) + before, indent := before[:i+1], before[i+1:] + + _, src, _ = cutSpace(src) + + var b bytes.Buffer + b.Write(before) + for len(src) > 0 { + line := src + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, src = line[:i+1], line[i+1:] + } else { + src = nil + } + if len(line) > 0 && line[0] != '\n' { // not blank + b.Write(indent) + } + b.Write(line) + } + b.Write(after) + return b.Bytes() +} + +var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`) + +func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) { + var out bytes.Buffer + in := bufio.NewReader(r) + inImports := false + done := false + for { + s, err := in.ReadString('\n') + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + if !inImports && !done && strings.HasPrefix(s, "import") { + inImports = true + } + if inImports && (strings.HasPrefix(s, "var") || + strings.HasPrefix(s, "func") || + strings.HasPrefix(s, "const") || + strings.HasPrefix(s, "type")) { + done = true + inImports = false + } + if inImports && len(breaks) > 0 { + if m := impLine.FindStringSubmatch(s); m != nil { + if m[1] == breaks[0] { + out.WriteByte('\n') + breaks = breaks[1:] + } + } + } + + fmt.Fprint(&out, s) + } + return out.Bytes(), nil +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go new file mode 100644 index 000000000..dff6d5536 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -0,0 +1,695 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/mod/module" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/gopathwalk" +) + +// ModuleResolver implements resolver for modules using the go command as little +// as feasible. +type ModuleResolver struct { + env *ProcessEnv + moduleCacheDir string + dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. + roots []gopathwalk.Root + scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. + scannedRoots map[gopathwalk.Root]bool + + initialized bool + main *gocommand.ModuleJSON + modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... + modsByDir []*gocommand.ModuleJSON // ...or Dir. + + // moduleCacheCache stores information about the module cache. + moduleCacheCache *dirInfoCache + otherCache *dirInfoCache +} + +func newModuleResolver(e *ProcessEnv) *ModuleResolver { + r := &ModuleResolver{ + env: e, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + +func (r *ModuleResolver) init() error { + if r.initialized { + return nil + } + + goenv, err := r.env.goEnv() + if err != nil { + return err + } + inv := gocommand.Invocation{ + BuildFlags: r.env.BuildFlags, + ModFlag: r.env.ModFlag, + ModFile: r.env.ModFile, + Env: r.env.env(), + Logf: r.env.Logf, + WorkingDir: r.env.WorkingDir, + } + mainMod, vendorEnabled, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) + if err != nil { + return err + } + + if mainMod != nil && vendorEnabled { + // Vendor mode is on, so all the non-Main modules are irrelevant, + // and we need to search /vendor for everything. + r.main = mainMod + r.dummyVendorMod = &gocommand.ModuleJSON{ + Path: "", + Dir: filepath.Join(mainMod.Dir, "vendor"), + } + r.modsByModPath = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByDir = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod} + } else { + // Vendor mode is off, so run go list -m ... to find everything. + err := r.initAllMods() + // We expect an error when running outside of a module with + // GO111MODULE=on. Other errors are fatal. + if err != nil { + if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") { + return err + } + } + } + + if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { + r.moduleCacheDir = gmc + } else { + gopaths := filepath.SplitList(goenv["GOPATH"]) + if len(gopaths) == 0 { + return fmt.Errorf("empty GOPATH") + } + r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod") + } + + sort.Slice(r.modsByModPath, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.modsByModPath[x].Path, "/") + } + return count(j) < count(i) // descending order + }) + sort.Slice(r.modsByDir, func(i, j int) bool { + count := func(x int) int { + return strings.Count(r.modsByDir[x].Dir, "/") + } + return count(j) < count(i) // descending order + }) + + r.roots = []gopathwalk.Root{ + {filepath.Join(goenv["GOROOT"], "/src"), gopathwalk.RootGOROOT}, + } + if r.main != nil { + r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) + } + if vendorEnabled { + r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) + } else { + addDep := func(mod *gocommand.ModuleJSON) { + if mod.Replace == nil { + // This is redundant with the cache, but we'll skip it cheaply enough. + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache}) + } else { + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) + } + } + // Walk dependent modules before scanning the full mod cache, direct deps first. + for _, mod := range r.modsByModPath { + if !mod.Indirect && !mod.Main { + addDep(mod) + } + } + for _, mod := range r.modsByModPath { + if mod.Indirect && !mod.Main { + addDep(mod) + } + } + r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) + } + + r.scannedRoots = map[gopathwalk.Root]bool{} + if r.moduleCacheCache == nil { + r.moduleCacheCache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + } + if r.otherCache == nil { + r.otherCache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + } + r.initialized = true + return nil +} + +func (r *ModuleResolver) initAllMods() error { + stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-e", "-json", "...") + if err != nil { + return err + } + for dec := json.NewDecoder(stdout); dec.More(); { + mod := &gocommand.ModuleJSON{} + if err := dec.Decode(mod); err != nil { + return err + } + if mod.Dir == "" { + if r.env.Logf != nil { + r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path) + } + // Can't do anything with a module that's not downloaded. + continue + } + // golang/go#36193: the go command doesn't always clean paths. + mod.Dir = filepath.Clean(mod.Dir) + r.modsByModPath = append(r.modsByModPath, mod) + r.modsByDir = append(r.modsByDir, mod) + if mod.Main { + r.main = mod + } + } + return nil +} + +func (r *ModuleResolver) ClearForNewScan() { + <-r.scanSema + r.scannedRoots = map[gopathwalk.Root]bool{} + r.otherCache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + r.scanSema <- struct{}{} +} + +func (r *ModuleResolver) ClearForNewMod() { + <-r.scanSema + *r = ModuleResolver{ + env: r.env, + moduleCacheCache: r.moduleCacheCache, + otherCache: r.otherCache, + scanSema: r.scanSema, + } + r.init() + r.scanSema <- struct{}{} +} + +// findPackage returns the module and directory that contains the package at +// the given import path, or returns nil, "" if no module is in scope. +func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { + // This can't find packages in the stdlib, but that's harmless for all + // the existing code paths. + for _, m := range r.modsByModPath { + if !strings.HasPrefix(importPath, m.Path) { + continue + } + pathInModule := importPath[len(m.Path):] + pkgDir := filepath.Join(m.Dir, pathInModule) + if r.dirIsNestedModule(pkgDir, m) { + continue + } + + if info, ok := r.cacheLoad(pkgDir); ok { + if loaded, err := info.reachedStatus(nameLoaded); loaded { + if err != nil { + continue // No package in this dir. + } + return m, pkgDir + } + if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil { + continue // Dir is unreadable, etc. + } + // This is slightly wrong: a directory doesn't have to have an + // importable package to count as a package for package-to-module + // resolution. package main or _test files should count but + // don't. + // TODO(heschi): fix this. + if _, err := r.cachePackageName(info); err == nil { + return m, pkgDir + } + } + + // Not cached. Read the filesystem. + pkgFiles, err := ioutil.ReadDir(pkgDir) + if err != nil { + continue + } + // A module only contains a package if it has buildable go + // files in that directory. If not, it could be provided by an + // outer module. See #29736. + for _, fi := range pkgFiles { + if ok, _ := r.env.matchFile(pkgDir, fi.Name()); ok { + return m, pkgDir + } + } + } + return nil, "" +} + +func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) { + if info, ok := r.moduleCacheCache.Load(dir); ok { + return info, ok + } + return r.otherCache.Load(dir) +} + +func (r *ModuleResolver) cacheStore(info directoryPackageInfo) { + if info.rootType == gopathwalk.RootModuleCache { + r.moduleCacheCache.Store(info.dir, info) + } else { + r.otherCache.Store(info.dir, info) + } +} + +func (r *ModuleResolver) cacheKeys() []string { + return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...) +} + +// cachePackageName caches the package name for a dir already in the cache. +func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { + if info.rootType == gopathwalk.RootModuleCache { + return r.moduleCacheCache.CachePackageName(info) + } + return r.otherCache.CachePackageName(info) +} + +func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { + if info.rootType == gopathwalk.RootModuleCache { + return r.moduleCacheCache.CacheExports(ctx, env, info) + } + return r.otherCache.CacheExports(ctx, env, info) +} + +// findModuleByDir returns the module that contains dir, or nil if no such +// module is in scope. +func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { + // This is quite tricky and may not be correct. dir could be: + // - a package in the main module. + // - a replace target underneath the main module's directory. + // - a nested module in the above. + // - a replace target somewhere totally random. + // - a nested module in the above. + // - in the mod cache. + // - in /vendor/ in -mod=vendor mode. + // - nested module? Dunno. + // Rumor has it that replace targets cannot contain other replace targets. + for _, m := range r.modsByDir { + if !strings.HasPrefix(dir, m.Dir) { + continue + } + + if r.dirIsNestedModule(dir, m) { + continue + } + + return m + } + return nil +} + +// dirIsNestedModule reports if dir is contained in a nested module underneath +// mod, not actually in mod. +func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool { + if !strings.HasPrefix(dir, mod.Dir) { + return false + } + if r.dirInModuleCache(dir) { + // Nested modules in the module cache are pruned, + // so it cannot be a nested module. + return false + } + if mod != nil && mod == r.dummyVendorMod { + // The /vendor pseudomodule is flattened and doesn't actually count. + return false + } + modDir, _ := r.modInfo(dir) + if modDir == "" { + return false + } + return modDir != mod.Dir +} + +func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { + readModName := func(modFile string) string { + modBytes, err := ioutil.ReadFile(modFile) + if err != nil { + return "" + } + return modulePath(modBytes) + } + + if r.dirInModuleCache(dir) { + if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 { + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + return modDir, readModName(filepath.Join(modDir, "go.mod")) + } + } + for { + if info, ok := r.cacheLoad(dir); ok { + return info.moduleDir, info.moduleName + } + f := filepath.Join(dir, "go.mod") + info, err := os.Stat(f) + if err == nil && !info.IsDir() { + return dir, readModName(f) + } + + d := filepath.Dir(dir) + if len(d) >= len(dir) { + return "", "" // reached top of file system, no go.mod + } + dir = d + } +} + +func (r *ModuleResolver) dirInModuleCache(dir string) bool { + if r.moduleCacheDir == "" { + return false + } + return strings.HasPrefix(dir, r.moduleCacheDir) +} + +func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + if err := r.init(); err != nil { + return nil, err + } + names := map[string]string{} + for _, path := range importPaths { + _, packageDir := r.findPackage(path) + if packageDir == "" { + continue + } + name, err := packageDirToName(packageDir) + if err != nil { + continue + } + names[path] = name + } + return names, nil +} + +func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { + if err := r.init(); err != nil { + return err + } + + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + pkg, err := r.canonicalize(info) + if err != nil { + return + } + + if !callback.dirFound(pkg) { + return + } + pkg.packageName, err = r.cachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(pkg) { + return + } + _, exports, err := r.loadExports(ctx, pkg, false) + if err != nil { + return + } + callback.exportsLoaded(pkg, exports) + } + + // Start processing everything in the cache, and listen for the new stuff + // we discover in the walk below. + stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir) + defer stop1() + stop2 := r.otherCache.ScanAndListen(ctx, processDir) + defer stop2() + + // We assume cached directories are fully cached, including all their + // children, and have not changed. We can skip them. + skip := func(root gopathwalk.Root, dir string) bool { + info, ok := r.cacheLoad(dir) + if !ok { + return false + } + // This directory can be skipped as long as we have already scanned it. + // Packages with errors will continue to have errors, so there is no need + // to rescan them. + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + // Add anything new to the cache, and process it if we're still listening. + add := func(root gopathwalk.Root, dir string) { + r.cacheStore(r.scanDirForPackage(root, dir)) + } + + // r.roots and the callback are not necessarily safe to use in the + // goroutine below. Process them eagerly. + roots := filterRoots(r.roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + // We have the lock on r.scannedRoots, and no other scans can run. + for _, root := range roots { + if ctx.Err() != nil { + return + } + + if r.scannedRoots[root] { + continue + } + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true}) + r.scannedRoots[root] = true + } + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 { + if _, ok := stdlib[path]; ok { + return MaxRelevance + } + mod, _ := r.findPackage(path) + return modRelevance(mod) +} + +func modRelevance(mod *gocommand.ModuleJSON) float64 { + var relevance float64 + switch { + case mod == nil: // out of scope + return MaxRelevance - 4 + case mod.Indirect: + relevance = MaxRelevance - 3 + case !mod.Main: + relevance = MaxRelevance - 2 + default: + relevance = MaxRelevance - 1 // main module ties with stdlib + } + + _, versionString, ok := module.SplitPathVersion(mod.Path) + if ok { + index := strings.Index(versionString, "v") + if index == -1 { + return relevance + } + if versionNumber, err := strconv.ParseFloat(versionString[index+1:], 64); err == nil { + relevance += versionNumber / 1000 + } + } + + return relevance +} + +// canonicalize gets the result of canonicalizing the packages using the results +// of initializing the resolver from 'go list -m'. +func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { + // Packages in GOROOT are already canonical, regardless of the std/cmd modules. + if info.rootType == gopathwalk.RootGOROOT { + return &pkg{ + importPathShort: info.nonCanonicalImportPath, + dir: info.dir, + packageName: path.Base(info.nonCanonicalImportPath), + relevance: MaxRelevance, + }, nil + } + + importPath := info.nonCanonicalImportPath + mod := r.findModuleByDir(info.dir) + // Check if the directory is underneath a module that's in scope. + if mod != nil { + // It is. If dir is the target of a replace directive, + // our guessed import path is wrong. Use the real one. + if mod.Dir == info.dir { + importPath = mod.Path + } else { + dirInMod := info.dir[len(mod.Dir)+len("/"):] + importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) + } + } else if !strings.HasPrefix(importPath, info.moduleName) { + // The module's name doesn't match the package's import path. It + // probably needs a replace directive we don't have. + return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir) + } + + res := &pkg{ + importPathShort: importPath, + dir: info.dir, + relevance: modRelevance(mod), + } + // We may have discovered a package that has a different version + // in scope already. Canonicalize to that one if possible. + if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" { + res.dir = canonicalDir + } + return res, nil +} + +func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { + if err := r.init(); err != nil { + return "", nil, err + } + if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { + return r.cacheExports(ctx, r.env, info) + } + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) +} + +func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { + subdir := "" + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] + } + importPath := filepath.ToSlash(subdir) + if strings.HasPrefix(importPath, "vendor/") { + // Only enter vendor directories if they're explicitly requested as a root. + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("unwanted vendor directory"), + } + } + switch root.Type { + case gopathwalk.RootCurrentModule: + importPath = path.Join(r.main.Path, filepath.ToSlash(subdir)) + case gopathwalk.RootModuleCache: + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + } + } + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) + if err != nil { + if r.env.Logf != nil { + r.env.Logf("decoding module cache path %q: %v", subdir, err) + } + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + } + } + importPath = path.Join(modPath, filepath.ToSlash(matches[3])) + } + + modDir, modName := r.modInfo(dir) + result := directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: importPath, + moduleDir: modDir, + moduleName: modName, + } + if root.Type == gopathwalk.RootGOROOT { + // stdlib packages are always in scope, despite the confusing go.mod + return result + } + return result +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// modulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +// +// Copied from cmd/go/internal/modfile. +func modulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go new file mode 100644 index 000000000..18dada495 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -0,0 +1,236 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "fmt" + "sync" + + "golang.org/x/tools/internal/gopathwalk" +) + +// To find packages to import, the resolver needs to know about all of the +// the packages that could be imported. This includes packages that are +// already in modules that are in (1) the current module, (2) replace targets, +// and (3) packages in the module cache. Packages in (1) and (2) may change over +// time, as the client may edit the current module and locally replaced modules. +// The module cache (which includes all of the packages in (3)) can only +// ever be added to. +// +// The resolver can thus save state about packages in the module cache +// and guarantee that this will not change over time. To obtain information +// about new modules added to the module cache, the module cache should be +// rescanned. +// +// It is OK to serve information about modules that have been deleted, +// as they do still exist. +// TODO(suzmue): can we share information with the caller about +// what module needs to be downloaded to import this package? + +type directoryPackageStatus int + +const ( + _ directoryPackageStatus = iota + directoryScanned + nameLoaded + exportsLoaded +) + +type directoryPackageInfo struct { + // status indicates the extent to which this struct has been filled in. + status directoryPackageStatus + // err is non-nil when there was an error trying to reach status. + err error + + // Set when status >= directoryScanned. + + // dir is the absolute directory of this package. + dir string + rootType gopathwalk.RootType + // nonCanonicalImportPath is the package's expected import path. It may + // not actually be importable at that path. + nonCanonicalImportPath string + + // Module-related information. + moduleDir string // The directory that is the module root of this dir. + moduleName string // The module name that contains this dir. + + // Set when status >= nameLoaded. + + packageName string // the package name, as declared in the source. + + // Set when status >= exportsLoaded. + + exports []string +} + +// reachedStatus returns true when info has a status at least target and any error associated with +// an attempt to reach target. +func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) { + if info.err == nil { + return info.status >= target, nil + } + if info.status == target { + return true, info.err + } + return true, nil +} + +// dirInfoCache is a concurrency safe map for storing information about +// directories that may contain packages. +// +// The information in this cache is built incrementally. Entries are initialized in scan. +// No new keys should be added in any other functions, as all directories containing +// packages are identified in scan. +// +// Other functions, including loadExports and findPackage, may update entries in this cache +// as they discover new things about the directory. +// +// The information in the cache is not expected to change for the cache's +// lifetime, so there is no protection against competing writes. Users should +// take care not to hold the cache across changes to the underlying files. +// +// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc) +type dirInfoCache struct { + mu sync.Mutex + // dirs stores information about packages in directories, keyed by absolute path. + dirs map[string]*directoryPackageInfo + listeners map[*int]cacheListener +} + +type cacheListener func(directoryPackageInfo) + +// ScanAndListen calls listener on all the items in the cache, and on anything +// newly added. The returned stop function waits for all in-flight callbacks to +// finish and blocks new ones. +func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { + ctx, cancel := context.WithCancel(ctx) + + // Flushing out all the callbacks is tricky without knowing how many there + // are going to be. Setting an arbitrary limit makes it much easier. + const maxInFlight = 10 + sema := make(chan struct{}, maxInFlight) + for i := 0; i < maxInFlight; i++ { + sema <- struct{}{} + } + + cookie := new(int) // A unique ID we can use for the listener. + + // We can't hold mu while calling the listener. + d.mu.Lock() + var keys []string + for key := range d.dirs { + keys = append(keys, key) + } + d.listeners[cookie] = func(info directoryPackageInfo) { + select { + case <-ctx.Done(): + return + case <-sema: + } + listener(info) + sema <- struct{}{} + } + d.mu.Unlock() + + stop := func() { + cancel() + d.mu.Lock() + delete(d.listeners, cookie) + d.mu.Unlock() + for i := 0; i < maxInFlight; i++ { + <-sema + } + } + + // Process the pre-existing keys. + for _, k := range keys { + select { + case <-ctx.Done(): + return stop + default: + } + if v, ok := d.Load(k); ok { + listener(v) + } + } + + return stop +} + +// Store stores the package info for dir. +func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { + d.mu.Lock() + _, old := d.dirs[dir] + d.dirs[dir] = &info + var listeners []cacheListener + for _, l := range d.listeners { + listeners = append(listeners, l) + } + d.mu.Unlock() + + if !old { + for _, l := range listeners { + l(info) + } + } +} + +// Load returns a copy of the directoryPackageInfo for absolute directory dir. +func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { + d.mu.Lock() + defer d.mu.Unlock() + info, ok := d.dirs[dir] + if !ok { + return directoryPackageInfo{}, false + } + return *info, true +} + +// Keys returns the keys currently present in d. +func (d *dirInfoCache) Keys() (keys []string) { + d.mu.Lock() + defer d.mu.Unlock() + for key := range d.dirs { + keys = append(keys, key) + } + return keys +} + +func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { + if loaded, err := info.reachedStatus(nameLoaded); loaded { + return info.packageName, err + } + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return "", fmt.Errorf("cannot read package name, scan error: %v", err) + } + info.packageName, info.err = packageDirToName(info.dir) + info.status = nameLoaded + d.Store(info.dir, info) + return info.packageName, info.err +} + +func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { + if reached, _ := info.reachedStatus(exportsLoaded); reached { + return info.packageName, info.exports, info.err + } + if reached, err := info.reachedStatus(nameLoaded); reached && err != nil { + return "", nil, err + } + info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false) + if info.err == context.Canceled || info.err == context.DeadlineExceeded { + return info.packageName, info.exports, info.err + } + // The cache structure wants things to proceed linearly. We can skip a + // step here, but only if we succeed. + if info.status == nameLoaded || info.err == nil { + info.status = exportsLoaded + } else { + info.status = nameLoaded + } + d.Store(info.dir, info) + return info.packageName, info.exports, info.err +} diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go new file mode 100644 index 000000000..be8ffa25f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -0,0 +1,280 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hacked up copy of go/ast/import.go + +package imports + +import ( + "go/ast" + "go/token" + "sort" + "strconv" +) + +// sortImports sorts runs of consecutive import lines in import blocks in f. +// It also removes duplicate imports when it is possible to do so without data loss. +func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) { + for i, d := range f.Decls { + d, ok := d.(*ast.GenDecl) + if !ok || d.Tok != token.IMPORT { + // Not an import declaration, so we're done. + // Imports are always first. + break + } + + if len(d.Specs) == 0 { + // Empty import block, remove it. + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + } + + if !d.Lparen.IsValid() { + // Not a block: sorted by default. + continue + } + + // Identify and sort runs of specs on successive lines. + i := 0 + specs := d.Specs[:0] + for j, s := range d.Specs { + if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { + // j begins a new run. End this one. + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:j])...) + i = j + } + } + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:])...) + d.Specs = specs + + // Deduping can leave a blank line before the rparen; clean that up. + if len(d.Specs) > 0 { + lastSpec := d.Specs[len(d.Specs)-1] + lastLine := fset.Position(lastSpec.Pos()).Line + if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 { + fset.File(d.Rparen).MergeLine(rParenLine - 1) + } + } + } +} + +// mergeImports merges all the import declarations into the first one. +// Taken from golang.org/x/tools/ast/astutil. +func mergeImports(fset *token.FileSet, f *ast.File) { + if len(f.Decls) <= 1 { + return + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } +} + +// declImports reports whether gen contains an import of path. +// Taken from golang.org/x/tools/ast/astutil. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +func importPath(s ast.Spec) string { + t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value) + if err == nil { + return t + } + return "" +} + +func importName(s ast.Spec) string { + n := s.(*ast.ImportSpec).Name + if n == nil { + return "" + } + return n.Name +} + +func importComment(s ast.Spec) string { + c := s.(*ast.ImportSpec).Comment + if c == nil { + return "" + } + return c.Text() +} + +// collapse indicates whether prev may be removed, leaving only next. +func collapse(prev, next ast.Spec) bool { + if importPath(next) != importPath(prev) || importName(next) != importName(prev) { + return false + } + return prev.(*ast.ImportSpec).Comment == nil +} + +type posSpan struct { + Start token.Pos + End token.Pos +} + +func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { + // Can't short-circuit here even if specs are already sorted, + // since they might yet need deduplication. + // A lone import, however, may be safely ignored. + if len(specs) <= 1 { + return specs + } + + // Record positions for specs. + pos := make([]posSpan, len(specs)) + for i, s := range specs { + pos[i] = posSpan{s.Pos(), s.End()} + } + + // Identify comments in this range. + // Any comment from pos[0].Start to the final line counts. + lastLine := fset.Position(pos[len(pos)-1].End).Line + cstart := len(f.Comments) + cend := len(f.Comments) + for i, g := range f.Comments { + if g.Pos() < pos[0].Start { + continue + } + if i < cstart { + cstart = i + } + if fset.Position(g.End()).Line > lastLine { + cend = i + break + } + } + comments := f.Comments[cstart:cend] + + // Assign each comment to the import spec preceding it. + importComment := map[*ast.ImportSpec][]*ast.CommentGroup{} + specIndex := 0 + for _, g := range comments { + for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() { + specIndex++ + } + s := specs[specIndex].(*ast.ImportSpec) + importComment[s] = append(importComment[s], g) + } + + // Sort the import specs by import path. + // Remove duplicates, when possible without data loss. + // Reassign the import paths to have the same position sequence. + // Reassign each comment to abut the end of its spec. + // Sort the comments by new position. + sort.Sort(byImportSpec{localPrefix, specs}) + + // Dedup. Thanks to our sorting, we can just consider + // adjacent pairs of imports. + deduped := specs[:0] + for i, s := range specs { + if i == len(specs)-1 || !collapse(s, specs[i+1]) { + deduped = append(deduped, s) + } else { + p := s.Pos() + fset.File(p).MergeLine(fset.Position(p).Line) + } + } + specs = deduped + + // Fix up comment positions + for i, s := range specs { + s := s.(*ast.ImportSpec) + if s.Name != nil { + s.Name.NamePos = pos[i].Start + } + s.Path.ValuePos = pos[i].Start + s.EndPos = pos[i].End + nextSpecPos := pos[i].End + + for _, g := range importComment[s] { + for _, c := range g.List { + c.Slash = pos[i].End + nextSpecPos = c.End() + } + } + if i < len(specs)-1 { + pos[i+1].Start = nextSpecPos + pos[i+1].End = nextSpecPos + } + } + + sort.Sort(byCommentPos(comments)) + + // Fixup comments can insert blank lines, because import specs are on different lines. + // We remove those blank lines here by merging import spec to the first import spec line. + firstSpecLine := fset.Position(specs[0].Pos()).Line + for _, s := range specs[1:] { + p := s.Pos() + line := fset.File(p).Line(p) + for previousLine := line - 1; previousLine >= firstSpecLine; { + fset.File(p).MergeLine(previousLine) + previousLine-- + } + } + return specs +} + +type byImportSpec struct { + localPrefix string + specs []ast.Spec // slice of *ast.ImportSpec +} + +func (x byImportSpec) Len() int { return len(x.specs) } +func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] } +func (x byImportSpec) Less(i, j int) bool { + ipath := importPath(x.specs[i]) + jpath := importPath(x.specs[j]) + + igroup := importGroup(x.localPrefix, ipath) + jgroup := importGroup(x.localPrefix, jpath) + if igroup != jgroup { + return igroup < jgroup + } + + if ipath != jpath { + return ipath < jpath + } + iname := importName(x.specs[i]) + jname := importName(x.specs[j]) + + if iname != jname { + return iname < jname + } + return importComment(x.specs[i]) < importComment(x.specs[j]) +} + +type byCommentPos []*ast.CommentGroup + +func (x byCommentPos) Len() int { return len(x) } +func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() } diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go new file mode 100644 index 000000000..ccdd4e0ff --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -0,0 +1,10733 @@ +// Code generated by mkstdlib.go. DO NOT EDIT. + +package imports + +var stdlib = map[string][]string{ + "archive/tar": []string{ + "ErrFieldTooLong", + "ErrHeader", + "ErrWriteAfterClose", + "ErrWriteTooLong", + "FileInfoHeader", + "Format", + "FormatGNU", + "FormatPAX", + "FormatUSTAR", + "FormatUnknown", + "Header", + "NewReader", + "NewWriter", + "Reader", + "TypeBlock", + "TypeChar", + "TypeCont", + "TypeDir", + "TypeFifo", + "TypeGNULongLink", + "TypeGNULongName", + "TypeGNUSparse", + "TypeLink", + "TypeReg", + "TypeRegA", + "TypeSymlink", + "TypeXGlobalHeader", + "TypeXHeader", + "Writer", + }, + "archive/zip": []string{ + "Compressor", + "Decompressor", + "Deflate", + "ErrAlgorithm", + "ErrChecksum", + "ErrFormat", + "File", + "FileHeader", + "FileInfoHeader", + "NewReader", + "NewWriter", + "OpenReader", + "ReadCloser", + "Reader", + "RegisterCompressor", + "RegisterDecompressor", + "Store", + "Writer", + }, + "bufio": []string{ + "ErrAdvanceTooFar", + "ErrBadReadCount", + "ErrBufferFull", + "ErrFinalToken", + "ErrInvalidUnreadByte", + "ErrInvalidUnreadRune", + "ErrNegativeAdvance", + "ErrNegativeCount", + "ErrTooLong", + "MaxScanTokenSize", + "NewReadWriter", + "NewReader", + "NewReaderSize", + "NewScanner", + "NewWriter", + "NewWriterSize", + "ReadWriter", + "Reader", + "ScanBytes", + "ScanLines", + "ScanRunes", + "ScanWords", + "Scanner", + "SplitFunc", + "Writer", + }, + "bytes": []string{ + "Buffer", + "Compare", + "Contains", + "ContainsAny", + "ContainsRune", + "Count", + "Equal", + "EqualFold", + "ErrTooLarge", + "Fields", + "FieldsFunc", + "HasPrefix", + "HasSuffix", + "Index", + "IndexAny", + "IndexByte", + "IndexFunc", + "IndexRune", + "Join", + "LastIndex", + "LastIndexAny", + "LastIndexByte", + "LastIndexFunc", + "Map", + "MinRead", + "NewBuffer", + "NewBufferString", + "NewReader", + "Reader", + "Repeat", + "Replace", + "ReplaceAll", + "Runes", + "Split", + "SplitAfter", + "SplitAfterN", + "SplitN", + "Title", + "ToLower", + "ToLowerSpecial", + "ToTitle", + "ToTitleSpecial", + "ToUpper", + "ToUpperSpecial", + "ToValidUTF8", + "Trim", + "TrimFunc", + "TrimLeft", + "TrimLeftFunc", + "TrimPrefix", + "TrimRight", + "TrimRightFunc", + "TrimSpace", + "TrimSuffix", + }, + "compress/bzip2": []string{ + "NewReader", + "StructuralError", + }, + "compress/flate": []string{ + "BestCompression", + "BestSpeed", + "CorruptInputError", + "DefaultCompression", + "HuffmanOnly", + "InternalError", + "NewReader", + "NewReaderDict", + "NewWriter", + "NewWriterDict", + "NoCompression", + "ReadError", + "Reader", + "Resetter", + "WriteError", + "Writer", + }, + "compress/gzip": []string{ + "BestCompression", + "BestSpeed", + "DefaultCompression", + "ErrChecksum", + "ErrHeader", + "Header", + "HuffmanOnly", + "NewReader", + "NewWriter", + "NewWriterLevel", + "NoCompression", + "Reader", + "Writer", + }, + "compress/lzw": []string{ + "LSB", + "MSB", + "NewReader", + "NewWriter", + "Order", + }, + "compress/zlib": []string{ + "BestCompression", + "BestSpeed", + "DefaultCompression", + "ErrChecksum", + "ErrDictionary", + "ErrHeader", + "HuffmanOnly", + "NewReader", + "NewReaderDict", + "NewWriter", + "NewWriterLevel", + "NewWriterLevelDict", + "NoCompression", + "Resetter", + "Writer", + }, + "container/heap": []string{ + "Fix", + "Init", + "Interface", + "Pop", + "Push", + "Remove", + }, + "container/list": []string{ + "Element", + "List", + "New", + }, + "container/ring": []string{ + "New", + "Ring", + }, + "context": []string{ + "Background", + "CancelFunc", + "Canceled", + "Context", + "DeadlineExceeded", + "TODO", + "WithCancel", + "WithDeadline", + "WithTimeout", + "WithValue", + }, + "crypto": []string{ + "BLAKE2b_256", + "BLAKE2b_384", + "BLAKE2b_512", + "BLAKE2s_256", + "Decrypter", + "DecrypterOpts", + "Hash", + "MD4", + "MD5", + "MD5SHA1", + "PrivateKey", + "PublicKey", + "RIPEMD160", + "RegisterHash", + "SHA1", + "SHA224", + "SHA256", + "SHA384", + "SHA3_224", + "SHA3_256", + "SHA3_384", + "SHA3_512", + "SHA512", + "SHA512_224", + "SHA512_256", + "Signer", + "SignerOpts", + }, + "crypto/aes": []string{ + "BlockSize", + "KeySizeError", + "NewCipher", + }, + "crypto/cipher": []string{ + "AEAD", + "Block", + "BlockMode", + "NewCBCDecrypter", + "NewCBCEncrypter", + "NewCFBDecrypter", + "NewCFBEncrypter", + "NewCTR", + "NewGCM", + "NewGCMWithNonceSize", + "NewGCMWithTagSize", + "NewOFB", + "Stream", + "StreamReader", + "StreamWriter", + }, + "crypto/des": []string{ + "BlockSize", + "KeySizeError", + "NewCipher", + "NewTripleDESCipher", + }, + "crypto/dsa": []string{ + "ErrInvalidPublicKey", + "GenerateKey", + "GenerateParameters", + "L1024N160", + "L2048N224", + "L2048N256", + "L3072N256", + "ParameterSizes", + "Parameters", + "PrivateKey", + "PublicKey", + "Sign", + "Verify", + }, + "crypto/ecdsa": []string{ + "GenerateKey", + "PrivateKey", + "PublicKey", + "Sign", + "SignASN1", + "Verify", + "VerifyASN1", + }, + "crypto/ed25519": []string{ + "GenerateKey", + "NewKeyFromSeed", + "PrivateKey", + "PrivateKeySize", + "PublicKey", + "PublicKeySize", + "SeedSize", + "Sign", + "SignatureSize", + "Verify", + }, + "crypto/elliptic": []string{ + "Curve", + "CurveParams", + "GenerateKey", + "Marshal", + "MarshalCompressed", + "P224", + "P256", + "P384", + "P521", + "Unmarshal", + "UnmarshalCompressed", + }, + "crypto/hmac": []string{ + "Equal", + "New", + }, + "crypto/md5": []string{ + "BlockSize", + "New", + "Size", + "Sum", + }, + "crypto/rand": []string{ + "Int", + "Prime", + "Read", + "Reader", + }, + "crypto/rc4": []string{ + "Cipher", + "KeySizeError", + "NewCipher", + }, + "crypto/rsa": []string{ + "CRTValue", + "DecryptOAEP", + "DecryptPKCS1v15", + "DecryptPKCS1v15SessionKey", + "EncryptOAEP", + "EncryptPKCS1v15", + "ErrDecryption", + "ErrMessageTooLong", + "ErrVerification", + "GenerateKey", + "GenerateMultiPrimeKey", + "OAEPOptions", + "PKCS1v15DecryptOptions", + "PSSOptions", + "PSSSaltLengthAuto", + "PSSSaltLengthEqualsHash", + "PrecomputedValues", + "PrivateKey", + "PublicKey", + "SignPKCS1v15", + "SignPSS", + "VerifyPKCS1v15", + "VerifyPSS", + }, + "crypto/sha1": []string{ + "BlockSize", + "New", + "Size", + "Sum", + }, + "crypto/sha256": []string{ + "BlockSize", + "New", + "New224", + "Size", + "Size224", + "Sum224", + "Sum256", + }, + "crypto/sha512": []string{ + "BlockSize", + "New", + "New384", + "New512_224", + "New512_256", + "Size", + "Size224", + "Size256", + "Size384", + "Sum384", + "Sum512", + "Sum512_224", + "Sum512_256", + }, + "crypto/subtle": []string{ + "ConstantTimeByteEq", + "ConstantTimeCompare", + "ConstantTimeCopy", + "ConstantTimeEq", + "ConstantTimeLessOrEq", + "ConstantTimeSelect", + }, + "crypto/tls": []string{ + "Certificate", + "CertificateRequestInfo", + "CipherSuite", + "CipherSuiteName", + "CipherSuites", + "Client", + "ClientAuthType", + "ClientHelloInfo", + "ClientSessionCache", + "ClientSessionState", + "Config", + "Conn", + "ConnectionState", + "CurveID", + "CurveP256", + "CurveP384", + "CurveP521", + "Dial", + "DialWithDialer", + "Dialer", + "ECDSAWithP256AndSHA256", + "ECDSAWithP384AndSHA384", + "ECDSAWithP521AndSHA512", + "ECDSAWithSHA1", + "Ed25519", + "InsecureCipherSuites", + "Listen", + "LoadX509KeyPair", + "NewLRUClientSessionCache", + "NewListener", + "NoClientCert", + "PKCS1WithSHA1", + "PKCS1WithSHA256", + "PKCS1WithSHA384", + "PKCS1WithSHA512", + "PSSWithSHA256", + "PSSWithSHA384", + "PSSWithSHA512", + "RecordHeaderError", + "RenegotiateFreelyAsClient", + "RenegotiateNever", + "RenegotiateOnceAsClient", + "RenegotiationSupport", + "RequestClientCert", + "RequireAndVerifyClientCert", + "RequireAnyClientCert", + "Server", + "SignatureScheme", + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + "TLS_FALLBACK_SCSV", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_RSA_WITH_AES_128_CBC_SHA", + "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA", + "TLS_RSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_RC4_128_SHA", + "VerifyClientCertIfGiven", + "VersionSSL30", + "VersionTLS10", + "VersionTLS11", + "VersionTLS12", + "VersionTLS13", + "X25519", + "X509KeyPair", + }, + "crypto/x509": []string{ + "CANotAuthorizedForExtKeyUsage", + "CANotAuthorizedForThisName", + "CertPool", + "Certificate", + "CertificateInvalidError", + "CertificateRequest", + "ConstraintViolationError", + "CreateCertificate", + "CreateCertificateRequest", + "CreateRevocationList", + "DSA", + "DSAWithSHA1", + "DSAWithSHA256", + "DecryptPEMBlock", + "ECDSA", + "ECDSAWithSHA1", + "ECDSAWithSHA256", + "ECDSAWithSHA384", + "ECDSAWithSHA512", + "Ed25519", + "EncryptPEMBlock", + "ErrUnsupportedAlgorithm", + "Expired", + "ExtKeyUsage", + "ExtKeyUsageAny", + "ExtKeyUsageClientAuth", + "ExtKeyUsageCodeSigning", + "ExtKeyUsageEmailProtection", + "ExtKeyUsageIPSECEndSystem", + "ExtKeyUsageIPSECTunnel", + "ExtKeyUsageIPSECUser", + "ExtKeyUsageMicrosoftCommercialCodeSigning", + "ExtKeyUsageMicrosoftKernelCodeSigning", + "ExtKeyUsageMicrosoftServerGatedCrypto", + "ExtKeyUsageNetscapeServerGatedCrypto", + "ExtKeyUsageOCSPSigning", + "ExtKeyUsageServerAuth", + "ExtKeyUsageTimeStamping", + "HostnameError", + "IncompatibleUsage", + "IncorrectPasswordError", + "InsecureAlgorithmError", + "InvalidReason", + "IsEncryptedPEMBlock", + "KeyUsage", + "KeyUsageCRLSign", + "KeyUsageCertSign", + "KeyUsageContentCommitment", + "KeyUsageDataEncipherment", + "KeyUsageDecipherOnly", + "KeyUsageDigitalSignature", + "KeyUsageEncipherOnly", + "KeyUsageKeyAgreement", + "KeyUsageKeyEncipherment", + "MD2WithRSA", + "MD5WithRSA", + "MarshalECPrivateKey", + "MarshalPKCS1PrivateKey", + "MarshalPKCS1PublicKey", + "MarshalPKCS8PrivateKey", + "MarshalPKIXPublicKey", + "NameConstraintsWithoutSANs", + "NameMismatch", + "NewCertPool", + "NotAuthorizedToSign", + "PEMCipher", + "PEMCipher3DES", + "PEMCipherAES128", + "PEMCipherAES192", + "PEMCipherAES256", + "PEMCipherDES", + "ParseCRL", + "ParseCertificate", + "ParseCertificateRequest", + "ParseCertificates", + "ParseDERCRL", + "ParseECPrivateKey", + "ParsePKCS1PrivateKey", + "ParsePKCS1PublicKey", + "ParsePKCS8PrivateKey", + "ParsePKIXPublicKey", + "PublicKeyAlgorithm", + "PureEd25519", + "RSA", + "RevocationList", + "SHA1WithRSA", + "SHA256WithRSA", + "SHA256WithRSAPSS", + "SHA384WithRSA", + "SHA384WithRSAPSS", + "SHA512WithRSA", + "SHA512WithRSAPSS", + "SignatureAlgorithm", + "SystemCertPool", + "SystemRootsError", + "TooManyConstraints", + "TooManyIntermediates", + "UnconstrainedName", + "UnhandledCriticalExtension", + "UnknownAuthorityError", + "UnknownPublicKeyAlgorithm", + "UnknownSignatureAlgorithm", + "VerifyOptions", + }, + "crypto/x509/pkix": []string{ + "AlgorithmIdentifier", + "AttributeTypeAndValue", + "AttributeTypeAndValueSET", + "CertificateList", + "Extension", + "Name", + "RDNSequence", + "RelativeDistinguishedNameSET", + "RevokedCertificate", + "TBSCertificateList", + }, + "database/sql": []string{ + "ColumnType", + "Conn", + "DB", + "DBStats", + "Drivers", + "ErrConnDone", + "ErrNoRows", + "ErrTxDone", + "IsolationLevel", + "LevelDefault", + "LevelLinearizable", + "LevelReadCommitted", + "LevelReadUncommitted", + "LevelRepeatableRead", + "LevelSerializable", + "LevelSnapshot", + "LevelWriteCommitted", + "Named", + "NamedArg", + "NullBool", + "NullFloat64", + "NullInt32", + "NullInt64", + "NullString", + "NullTime", + "Open", + "OpenDB", + "Out", + "RawBytes", + "Register", + "Result", + "Row", + "Rows", + "Scanner", + "Stmt", + "Tx", + "TxOptions", + }, + "database/sql/driver": []string{ + "Bool", + "ColumnConverter", + "Conn", + "ConnBeginTx", + "ConnPrepareContext", + "Connector", + "DefaultParameterConverter", + "Driver", + "DriverContext", + "ErrBadConn", + "ErrRemoveArgument", + "ErrSkip", + "Execer", + "ExecerContext", + "Int32", + "IsScanValue", + "IsValue", + "IsolationLevel", + "NamedValue", + "NamedValueChecker", + "NotNull", + "Null", + "Pinger", + "Queryer", + "QueryerContext", + "Result", + "ResultNoRows", + "Rows", + "RowsAffected", + "RowsColumnTypeDatabaseTypeName", + "RowsColumnTypeLength", + "RowsColumnTypeNullable", + "RowsColumnTypePrecisionScale", + "RowsColumnTypeScanType", + "RowsNextResultSet", + "SessionResetter", + "Stmt", + "StmtExecContext", + "StmtQueryContext", + "String", + "Tx", + "TxOptions", + "Validator", + "Value", + "ValueConverter", + "Valuer", + }, + "debug/dwarf": []string{ + "AddrType", + "ArrayType", + "Attr", + "AttrAbstractOrigin", + "AttrAccessibility", + "AttrAddrBase", + "AttrAddrClass", + "AttrAlignment", + "AttrAllocated", + "AttrArtificial", + "AttrAssociated", + "AttrBaseTypes", + "AttrBinaryScale", + "AttrBitOffset", + "AttrBitSize", + "AttrByteSize", + "AttrCallAllCalls", + "AttrCallAllSourceCalls", + "AttrCallAllTailCalls", + "AttrCallColumn", + "AttrCallDataLocation", + "AttrCallDataValue", + "AttrCallFile", + "AttrCallLine", + "AttrCallOrigin", + "AttrCallPC", + "AttrCallParameter", + "AttrCallReturnPC", + "AttrCallTailCall", + "AttrCallTarget", + "AttrCallTargetClobbered", + "AttrCallValue", + "AttrCalling", + "AttrCommonRef", + "AttrCompDir", + "AttrConstExpr", + "AttrConstValue", + "AttrContainingType", + "AttrCount", + "AttrDataBitOffset", + "AttrDataLocation", + "AttrDataMemberLoc", + "AttrDecimalScale", + "AttrDecimalSign", + "AttrDeclColumn", + "AttrDeclFile", + "AttrDeclLine", + "AttrDeclaration", + "AttrDefaultValue", + "AttrDefaulted", + "AttrDeleted", + "AttrDescription", + "AttrDigitCount", + "AttrDiscr", + "AttrDiscrList", + "AttrDiscrValue", + "AttrDwoName", + "AttrElemental", + "AttrEncoding", + "AttrEndianity", + "AttrEntrypc", + "AttrEnumClass", + "AttrExplicit", + "AttrExportSymbols", + "AttrExtension", + "AttrExternal", + "AttrFrameBase", + "AttrFriend", + "AttrHighpc", + "AttrIdentifierCase", + "AttrImport", + "AttrInline", + "AttrIsOptional", + "AttrLanguage", + "AttrLinkageName", + "AttrLocation", + "AttrLoclistsBase", + "AttrLowerBound", + "AttrLowpc", + "AttrMacroInfo", + "AttrMacros", + "AttrMainSubprogram", + "AttrMutable", + "AttrName", + "AttrNamelistItem", + "AttrNoreturn", + "AttrObjectPointer", + "AttrOrdering", + "AttrPictureString", + "AttrPriority", + "AttrProducer", + "AttrPrototyped", + "AttrPure", + "AttrRanges", + "AttrRank", + "AttrRecursive", + "AttrReference", + "AttrReturnAddr", + "AttrRnglistsBase", + "AttrRvalueReference", + "AttrSegment", + "AttrSibling", + "AttrSignature", + "AttrSmall", + "AttrSpecification", + "AttrStartScope", + "AttrStaticLink", + "AttrStmtList", + "AttrStrOffsetsBase", + "AttrStride", + "AttrStrideSize", + "AttrStringLength", + "AttrStringLengthBitSize", + "AttrStringLengthByteSize", + "AttrThreadsScaled", + "AttrTrampoline", + "AttrType", + "AttrUpperBound", + "AttrUseLocation", + "AttrUseUTF8", + "AttrVarParam", + "AttrVirtuality", + "AttrVisibility", + "AttrVtableElemLoc", + "BasicType", + "BoolType", + "CharType", + "Class", + "ClassAddrPtr", + "ClassAddress", + "ClassBlock", + "ClassConstant", + "ClassExprLoc", + "ClassFlag", + "ClassLinePtr", + "ClassLocList", + "ClassLocListPtr", + "ClassMacPtr", + "ClassRangeListPtr", + "ClassReference", + "ClassReferenceAlt", + "ClassReferenceSig", + "ClassRngList", + "ClassRngListsPtr", + "ClassStrOffsetsPtr", + "ClassString", + "ClassStringAlt", + "ClassUnknown", + "CommonType", + "ComplexType", + "Data", + "DecodeError", + "DotDotDotType", + "Entry", + "EnumType", + "EnumValue", + "ErrUnknownPC", + "Field", + "FloatType", + "FuncType", + "IntType", + "LineEntry", + "LineFile", + "LineReader", + "LineReaderPos", + "New", + "Offset", + "PtrType", + "QualType", + "Reader", + "StructField", + "StructType", + "Tag", + "TagAccessDeclaration", + "TagArrayType", + "TagAtomicType", + "TagBaseType", + "TagCallSite", + "TagCallSiteParameter", + "TagCatchDwarfBlock", + "TagClassType", + "TagCoarrayType", + "TagCommonDwarfBlock", + "TagCommonInclusion", + "TagCompileUnit", + "TagCondition", + "TagConstType", + "TagConstant", + "TagDwarfProcedure", + "TagDynamicType", + "TagEntryPoint", + "TagEnumerationType", + "TagEnumerator", + "TagFileType", + "TagFormalParameter", + "TagFriend", + "TagGenericSubrange", + "TagImmutableType", + "TagImportedDeclaration", + "TagImportedModule", + "TagImportedUnit", + "TagInheritance", + "TagInlinedSubroutine", + "TagInterfaceType", + "TagLabel", + "TagLexDwarfBlock", + "TagMember", + "TagModule", + "TagMutableType", + "TagNamelist", + "TagNamelistItem", + "TagNamespace", + "TagPackedType", + "TagPartialUnit", + "TagPointerType", + "TagPtrToMemberType", + "TagReferenceType", + "TagRestrictType", + "TagRvalueReferenceType", + "TagSetType", + "TagSharedType", + "TagSkeletonUnit", + "TagStringType", + "TagStructType", + "TagSubprogram", + "TagSubrangeType", + "TagSubroutineType", + "TagTemplateAlias", + "TagTemplateTypeParameter", + "TagTemplateValueParameter", + "TagThrownType", + "TagTryDwarfBlock", + "TagTypeUnit", + "TagTypedef", + "TagUnionType", + "TagUnspecifiedParameters", + "TagUnspecifiedType", + "TagVariable", + "TagVariant", + "TagVariantPart", + "TagVolatileType", + "TagWithStmt", + "Type", + "TypedefType", + "UcharType", + "UintType", + "UnspecifiedType", + "UnsupportedType", + "VoidType", + }, + "debug/elf": []string{ + "ARM_MAGIC_TRAMP_NUMBER", + "COMPRESS_HIOS", + "COMPRESS_HIPROC", + "COMPRESS_LOOS", + "COMPRESS_LOPROC", + "COMPRESS_ZLIB", + "Chdr32", + "Chdr64", + "Class", + "CompressionType", + "DF_BIND_NOW", + "DF_ORIGIN", + "DF_STATIC_TLS", + "DF_SYMBOLIC", + "DF_TEXTREL", + "DT_ADDRRNGHI", + "DT_ADDRRNGLO", + "DT_AUDIT", + "DT_AUXILIARY", + "DT_BIND_NOW", + "DT_CHECKSUM", + "DT_CONFIG", + "DT_DEBUG", + "DT_DEPAUDIT", + "DT_ENCODING", + "DT_FEATURE", + "DT_FILTER", + "DT_FINI", + "DT_FINI_ARRAY", + "DT_FINI_ARRAYSZ", + "DT_FLAGS", + "DT_FLAGS_1", + "DT_GNU_CONFLICT", + "DT_GNU_CONFLICTSZ", + "DT_GNU_HASH", + "DT_GNU_LIBLIST", + "DT_GNU_LIBLISTSZ", + "DT_GNU_PRELINKED", + "DT_HASH", + "DT_HIOS", + "DT_HIPROC", + "DT_INIT", + "DT_INIT_ARRAY", + "DT_INIT_ARRAYSZ", + "DT_JMPREL", + "DT_LOOS", + "DT_LOPROC", + "DT_MIPS_AUX_DYNAMIC", + "DT_MIPS_BASE_ADDRESS", + "DT_MIPS_COMPACT_SIZE", + "DT_MIPS_CONFLICT", + "DT_MIPS_CONFLICTNO", + "DT_MIPS_CXX_FLAGS", + "DT_MIPS_DELTA_CLASS", + "DT_MIPS_DELTA_CLASSSYM", + "DT_MIPS_DELTA_CLASSSYM_NO", + "DT_MIPS_DELTA_CLASS_NO", + "DT_MIPS_DELTA_INSTANCE", + "DT_MIPS_DELTA_INSTANCE_NO", + "DT_MIPS_DELTA_RELOC", + "DT_MIPS_DELTA_RELOC_NO", + "DT_MIPS_DELTA_SYM", + "DT_MIPS_DELTA_SYM_NO", + "DT_MIPS_DYNSTR_ALIGN", + "DT_MIPS_FLAGS", + "DT_MIPS_GOTSYM", + "DT_MIPS_GP_VALUE", + "DT_MIPS_HIDDEN_GOTIDX", + "DT_MIPS_HIPAGENO", + "DT_MIPS_ICHECKSUM", + "DT_MIPS_INTERFACE", + "DT_MIPS_INTERFACE_SIZE", + "DT_MIPS_IVERSION", + "DT_MIPS_LIBLIST", + "DT_MIPS_LIBLISTNO", + "DT_MIPS_LOCALPAGE_GOTIDX", + "DT_MIPS_LOCAL_GOTIDX", + "DT_MIPS_LOCAL_GOTNO", + "DT_MIPS_MSYM", + "DT_MIPS_OPTIONS", + "DT_MIPS_PERF_SUFFIX", + "DT_MIPS_PIXIE_INIT", + "DT_MIPS_PLTGOT", + "DT_MIPS_PROTECTED_GOTIDX", + "DT_MIPS_RLD_MAP", + "DT_MIPS_RLD_MAP_REL", + "DT_MIPS_RLD_TEXT_RESOLVE_ADDR", + "DT_MIPS_RLD_VERSION", + "DT_MIPS_RWPLT", + "DT_MIPS_SYMBOL_LIB", + "DT_MIPS_SYMTABNO", + "DT_MIPS_TIME_STAMP", + "DT_MIPS_UNREFEXTNO", + "DT_MOVEENT", + "DT_MOVESZ", + "DT_MOVETAB", + "DT_NEEDED", + "DT_NULL", + "DT_PLTGOT", + "DT_PLTPAD", + "DT_PLTPADSZ", + "DT_PLTREL", + "DT_PLTRELSZ", + "DT_POSFLAG_1", + "DT_PPC64_GLINK", + "DT_PPC64_OPD", + "DT_PPC64_OPDSZ", + "DT_PPC64_OPT", + "DT_PPC_GOT", + "DT_PPC_OPT", + "DT_PREINIT_ARRAY", + "DT_PREINIT_ARRAYSZ", + "DT_REL", + "DT_RELA", + "DT_RELACOUNT", + "DT_RELAENT", + "DT_RELASZ", + "DT_RELCOUNT", + "DT_RELENT", + "DT_RELSZ", + "DT_RPATH", + "DT_RUNPATH", + "DT_SONAME", + "DT_SPARC_REGISTER", + "DT_STRSZ", + "DT_STRTAB", + "DT_SYMBOLIC", + "DT_SYMENT", + "DT_SYMINENT", + "DT_SYMINFO", + "DT_SYMINSZ", + "DT_SYMTAB", + "DT_SYMTAB_SHNDX", + "DT_TEXTREL", + "DT_TLSDESC_GOT", + "DT_TLSDESC_PLT", + "DT_USED", + "DT_VALRNGHI", + "DT_VALRNGLO", + "DT_VERDEF", + "DT_VERDEFNUM", + "DT_VERNEED", + "DT_VERNEEDNUM", + "DT_VERSYM", + "Data", + "Dyn32", + "Dyn64", + "DynFlag", + "DynTag", + "EI_ABIVERSION", + "EI_CLASS", + "EI_DATA", + "EI_NIDENT", + "EI_OSABI", + "EI_PAD", + "EI_VERSION", + "ELFCLASS32", + "ELFCLASS64", + "ELFCLASSNONE", + "ELFDATA2LSB", + "ELFDATA2MSB", + "ELFDATANONE", + "ELFMAG", + "ELFOSABI_86OPEN", + "ELFOSABI_AIX", + "ELFOSABI_ARM", + "ELFOSABI_AROS", + "ELFOSABI_CLOUDABI", + "ELFOSABI_FENIXOS", + "ELFOSABI_FREEBSD", + "ELFOSABI_HPUX", + "ELFOSABI_HURD", + "ELFOSABI_IRIX", + "ELFOSABI_LINUX", + "ELFOSABI_MODESTO", + "ELFOSABI_NETBSD", + "ELFOSABI_NONE", + "ELFOSABI_NSK", + "ELFOSABI_OPENBSD", + "ELFOSABI_OPENVMS", + "ELFOSABI_SOLARIS", + "ELFOSABI_STANDALONE", + "ELFOSABI_TRU64", + "EM_386", + "EM_486", + "EM_56800EX", + "EM_68HC05", + "EM_68HC08", + "EM_68HC11", + "EM_68HC12", + "EM_68HC16", + "EM_68K", + "EM_78KOR", + "EM_8051", + "EM_860", + "EM_88K", + "EM_960", + "EM_AARCH64", + "EM_ALPHA", + "EM_ALPHA_STD", + "EM_ALTERA_NIOS2", + "EM_AMDGPU", + "EM_ARC", + "EM_ARCA", + "EM_ARC_COMPACT", + "EM_ARC_COMPACT2", + "EM_ARM", + "EM_AVR", + "EM_AVR32", + "EM_BA1", + "EM_BA2", + "EM_BLACKFIN", + "EM_BPF", + "EM_C166", + "EM_CDP", + "EM_CE", + "EM_CLOUDSHIELD", + "EM_COGE", + "EM_COLDFIRE", + "EM_COOL", + "EM_COREA_1ST", + "EM_COREA_2ND", + "EM_CR", + "EM_CR16", + "EM_CRAYNV2", + "EM_CRIS", + "EM_CRX", + "EM_CSR_KALIMBA", + "EM_CUDA", + "EM_CYPRESS_M8C", + "EM_D10V", + "EM_D30V", + "EM_DSP24", + "EM_DSPIC30F", + "EM_DXP", + "EM_ECOG1", + "EM_ECOG16", + "EM_ECOG1X", + "EM_ECOG2", + "EM_ETPU", + "EM_EXCESS", + "EM_F2MC16", + "EM_FIREPATH", + "EM_FR20", + "EM_FR30", + "EM_FT32", + "EM_FX66", + "EM_H8S", + "EM_H8_300", + "EM_H8_300H", + "EM_H8_500", + "EM_HUANY", + "EM_IA_64", + "EM_INTEL205", + "EM_INTEL206", + "EM_INTEL207", + "EM_INTEL208", + "EM_INTEL209", + "EM_IP2K", + "EM_JAVELIN", + "EM_K10M", + "EM_KM32", + "EM_KMX16", + "EM_KMX32", + "EM_KMX8", + "EM_KVARC", + "EM_L10M", + "EM_LANAI", + "EM_LATTICEMICO32", + "EM_M16C", + "EM_M32", + "EM_M32C", + "EM_M32R", + "EM_MANIK", + "EM_MAX", + "EM_MAXQ30", + "EM_MCHP_PIC", + "EM_MCST_ELBRUS", + "EM_ME16", + "EM_METAG", + "EM_MICROBLAZE", + "EM_MIPS", + "EM_MIPS_RS3_LE", + "EM_MIPS_RS4_BE", + "EM_MIPS_X", + "EM_MMA", + "EM_MMDSP_PLUS", + "EM_MMIX", + "EM_MN10200", + "EM_MN10300", + "EM_MOXIE", + "EM_MSP430", + "EM_NCPU", + "EM_NDR1", + "EM_NDS32", + "EM_NONE", + "EM_NORC", + "EM_NS32K", + "EM_OPEN8", + "EM_OPENRISC", + "EM_PARISC", + "EM_PCP", + "EM_PDP10", + "EM_PDP11", + "EM_PDSP", + "EM_PJ", + "EM_PPC", + "EM_PPC64", + "EM_PRISM", + "EM_QDSP6", + "EM_R32C", + "EM_RCE", + "EM_RH32", + "EM_RISCV", + "EM_RL78", + "EM_RS08", + "EM_RX", + "EM_S370", + "EM_S390", + "EM_SCORE7", + "EM_SEP", + "EM_SE_C17", + "EM_SE_C33", + "EM_SH", + "EM_SHARC", + "EM_SLE9X", + "EM_SNP1K", + "EM_SPARC", + "EM_SPARC32PLUS", + "EM_SPARCV9", + "EM_ST100", + "EM_ST19", + "EM_ST200", + "EM_ST7", + "EM_ST9PLUS", + "EM_STARCORE", + "EM_STM8", + "EM_STXP7X", + "EM_SVX", + "EM_TILE64", + "EM_TILEGX", + "EM_TILEPRO", + "EM_TINYJ", + "EM_TI_ARP32", + "EM_TI_C2000", + "EM_TI_C5500", + "EM_TI_C6000", + "EM_TI_PRU", + "EM_TMM_GPP", + "EM_TPC", + "EM_TRICORE", + "EM_TRIMEDIA", + "EM_TSK3000", + "EM_UNICORE", + "EM_V800", + "EM_V850", + "EM_VAX", + "EM_VIDEOCORE", + "EM_VIDEOCORE3", + "EM_VIDEOCORE5", + "EM_VISIUM", + "EM_VPP500", + "EM_X86_64", + "EM_XCORE", + "EM_XGATE", + "EM_XIMO16", + "EM_XTENSA", + "EM_Z80", + "EM_ZSP", + "ET_CORE", + "ET_DYN", + "ET_EXEC", + "ET_HIOS", + "ET_HIPROC", + "ET_LOOS", + "ET_LOPROC", + "ET_NONE", + "ET_REL", + "EV_CURRENT", + "EV_NONE", + "ErrNoSymbols", + "File", + "FileHeader", + "FormatError", + "Header32", + "Header64", + "ImportedSymbol", + "Machine", + "NT_FPREGSET", + "NT_PRPSINFO", + "NT_PRSTATUS", + "NType", + "NewFile", + "OSABI", + "Open", + "PF_MASKOS", + "PF_MASKPROC", + "PF_R", + "PF_W", + "PF_X", + "PT_AARCH64_ARCHEXT", + "PT_AARCH64_UNWIND", + "PT_ARM_ARCHEXT", + "PT_ARM_EXIDX", + "PT_DYNAMIC", + "PT_GNU_EH_FRAME", + "PT_GNU_MBIND_HI", + "PT_GNU_MBIND_LO", + "PT_GNU_PROPERTY", + "PT_GNU_RELRO", + "PT_GNU_STACK", + "PT_HIOS", + "PT_HIPROC", + "PT_INTERP", + "PT_LOAD", + "PT_LOOS", + "PT_LOPROC", + "PT_MIPS_ABIFLAGS", + "PT_MIPS_OPTIONS", + "PT_MIPS_REGINFO", + "PT_MIPS_RTPROC", + "PT_NOTE", + "PT_NULL", + "PT_OPENBSD_BOOTDATA", + "PT_OPENBSD_RANDOMIZE", + "PT_OPENBSD_WXNEEDED", + "PT_PAX_FLAGS", + "PT_PHDR", + "PT_S390_PGSTE", + "PT_SHLIB", + "PT_SUNWSTACK", + "PT_SUNW_EH_FRAME", + "PT_TLS", + "Prog", + "Prog32", + "Prog64", + "ProgFlag", + "ProgHeader", + "ProgType", + "R_386", + "R_386_16", + "R_386_32", + "R_386_32PLT", + "R_386_8", + "R_386_COPY", + "R_386_GLOB_DAT", + "R_386_GOT32", + "R_386_GOT32X", + "R_386_GOTOFF", + "R_386_GOTPC", + "R_386_IRELATIVE", + "R_386_JMP_SLOT", + "R_386_NONE", + "R_386_PC16", + "R_386_PC32", + "R_386_PC8", + "R_386_PLT32", + "R_386_RELATIVE", + "R_386_SIZE32", + "R_386_TLS_DESC", + "R_386_TLS_DESC_CALL", + "R_386_TLS_DTPMOD32", + "R_386_TLS_DTPOFF32", + "R_386_TLS_GD", + "R_386_TLS_GD_32", + "R_386_TLS_GD_CALL", + "R_386_TLS_GD_POP", + "R_386_TLS_GD_PUSH", + "R_386_TLS_GOTDESC", + "R_386_TLS_GOTIE", + "R_386_TLS_IE", + "R_386_TLS_IE_32", + "R_386_TLS_LDM", + "R_386_TLS_LDM_32", + "R_386_TLS_LDM_CALL", + "R_386_TLS_LDM_POP", + "R_386_TLS_LDM_PUSH", + "R_386_TLS_LDO_32", + "R_386_TLS_LE", + "R_386_TLS_LE_32", + "R_386_TLS_TPOFF", + "R_386_TLS_TPOFF32", + "R_390", + "R_390_12", + "R_390_16", + "R_390_20", + "R_390_32", + "R_390_64", + "R_390_8", + "R_390_COPY", + "R_390_GLOB_DAT", + "R_390_GOT12", + "R_390_GOT16", + "R_390_GOT20", + "R_390_GOT32", + "R_390_GOT64", + "R_390_GOTENT", + "R_390_GOTOFF", + "R_390_GOTOFF16", + "R_390_GOTOFF64", + "R_390_GOTPC", + "R_390_GOTPCDBL", + "R_390_GOTPLT12", + "R_390_GOTPLT16", + "R_390_GOTPLT20", + "R_390_GOTPLT32", + "R_390_GOTPLT64", + "R_390_GOTPLTENT", + "R_390_GOTPLTOFF16", + "R_390_GOTPLTOFF32", + "R_390_GOTPLTOFF64", + "R_390_JMP_SLOT", + "R_390_NONE", + "R_390_PC16", + "R_390_PC16DBL", + "R_390_PC32", + "R_390_PC32DBL", + "R_390_PC64", + "R_390_PLT16DBL", + "R_390_PLT32", + "R_390_PLT32DBL", + "R_390_PLT64", + "R_390_RELATIVE", + "R_390_TLS_DTPMOD", + "R_390_TLS_DTPOFF", + "R_390_TLS_GD32", + "R_390_TLS_GD64", + "R_390_TLS_GDCALL", + "R_390_TLS_GOTIE12", + "R_390_TLS_GOTIE20", + "R_390_TLS_GOTIE32", + "R_390_TLS_GOTIE64", + "R_390_TLS_IE32", + "R_390_TLS_IE64", + "R_390_TLS_IEENT", + "R_390_TLS_LDCALL", + "R_390_TLS_LDM32", + "R_390_TLS_LDM64", + "R_390_TLS_LDO32", + "R_390_TLS_LDO64", + "R_390_TLS_LE32", + "R_390_TLS_LE64", + "R_390_TLS_LOAD", + "R_390_TLS_TPOFF", + "R_AARCH64", + "R_AARCH64_ABS16", + "R_AARCH64_ABS32", + "R_AARCH64_ABS64", + "R_AARCH64_ADD_ABS_LO12_NC", + "R_AARCH64_ADR_GOT_PAGE", + "R_AARCH64_ADR_PREL_LO21", + "R_AARCH64_ADR_PREL_PG_HI21", + "R_AARCH64_ADR_PREL_PG_HI21_NC", + "R_AARCH64_CALL26", + "R_AARCH64_CONDBR19", + "R_AARCH64_COPY", + "R_AARCH64_GLOB_DAT", + "R_AARCH64_GOT_LD_PREL19", + "R_AARCH64_IRELATIVE", + "R_AARCH64_JUMP26", + "R_AARCH64_JUMP_SLOT", + "R_AARCH64_LD64_GOTOFF_LO15", + "R_AARCH64_LD64_GOTPAGE_LO15", + "R_AARCH64_LD64_GOT_LO12_NC", + "R_AARCH64_LDST128_ABS_LO12_NC", + "R_AARCH64_LDST16_ABS_LO12_NC", + "R_AARCH64_LDST32_ABS_LO12_NC", + "R_AARCH64_LDST64_ABS_LO12_NC", + "R_AARCH64_LDST8_ABS_LO12_NC", + "R_AARCH64_LD_PREL_LO19", + "R_AARCH64_MOVW_SABS_G0", + "R_AARCH64_MOVW_SABS_G1", + "R_AARCH64_MOVW_SABS_G2", + "R_AARCH64_MOVW_UABS_G0", + "R_AARCH64_MOVW_UABS_G0_NC", + "R_AARCH64_MOVW_UABS_G1", + "R_AARCH64_MOVW_UABS_G1_NC", + "R_AARCH64_MOVW_UABS_G2", + "R_AARCH64_MOVW_UABS_G2_NC", + "R_AARCH64_MOVW_UABS_G3", + "R_AARCH64_NONE", + "R_AARCH64_NULL", + "R_AARCH64_P32_ABS16", + "R_AARCH64_P32_ABS32", + "R_AARCH64_P32_ADD_ABS_LO12_NC", + "R_AARCH64_P32_ADR_GOT_PAGE", + "R_AARCH64_P32_ADR_PREL_LO21", + "R_AARCH64_P32_ADR_PREL_PG_HI21", + "R_AARCH64_P32_CALL26", + "R_AARCH64_P32_CONDBR19", + "R_AARCH64_P32_COPY", + "R_AARCH64_P32_GLOB_DAT", + "R_AARCH64_P32_GOT_LD_PREL19", + "R_AARCH64_P32_IRELATIVE", + "R_AARCH64_P32_JUMP26", + "R_AARCH64_P32_JUMP_SLOT", + "R_AARCH64_P32_LD32_GOT_LO12_NC", + "R_AARCH64_P32_LDST128_ABS_LO12_NC", + "R_AARCH64_P32_LDST16_ABS_LO12_NC", + "R_AARCH64_P32_LDST32_ABS_LO12_NC", + "R_AARCH64_P32_LDST64_ABS_LO12_NC", + "R_AARCH64_P32_LDST8_ABS_LO12_NC", + "R_AARCH64_P32_LD_PREL_LO19", + "R_AARCH64_P32_MOVW_SABS_G0", + "R_AARCH64_P32_MOVW_UABS_G0", + "R_AARCH64_P32_MOVW_UABS_G0_NC", + "R_AARCH64_P32_MOVW_UABS_G1", + "R_AARCH64_P32_PREL16", + "R_AARCH64_P32_PREL32", + "R_AARCH64_P32_RELATIVE", + "R_AARCH64_P32_TLSDESC", + "R_AARCH64_P32_TLSDESC_ADD_LO12_NC", + "R_AARCH64_P32_TLSDESC_ADR_PAGE21", + "R_AARCH64_P32_TLSDESC_ADR_PREL21", + "R_AARCH64_P32_TLSDESC_CALL", + "R_AARCH64_P32_TLSDESC_LD32_LO12_NC", + "R_AARCH64_P32_TLSDESC_LD_PREL19", + "R_AARCH64_P32_TLSGD_ADD_LO12_NC", + "R_AARCH64_P32_TLSGD_ADR_PAGE21", + "R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", + "R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", + "R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", + "R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", + "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", + "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", + "R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", + "R_AARCH64_P32_TLS_DTPMOD", + "R_AARCH64_P32_TLS_DTPREL", + "R_AARCH64_P32_TLS_TPREL", + "R_AARCH64_P32_TSTBR14", + "R_AARCH64_PREL16", + "R_AARCH64_PREL32", + "R_AARCH64_PREL64", + "R_AARCH64_RELATIVE", + "R_AARCH64_TLSDESC", + "R_AARCH64_TLSDESC_ADD", + "R_AARCH64_TLSDESC_ADD_LO12_NC", + "R_AARCH64_TLSDESC_ADR_PAGE21", + "R_AARCH64_TLSDESC_ADR_PREL21", + "R_AARCH64_TLSDESC_CALL", + "R_AARCH64_TLSDESC_LD64_LO12_NC", + "R_AARCH64_TLSDESC_LDR", + "R_AARCH64_TLSDESC_LD_PREL19", + "R_AARCH64_TLSDESC_OFF_G0_NC", + "R_AARCH64_TLSDESC_OFF_G1", + "R_AARCH64_TLSGD_ADD_LO12_NC", + "R_AARCH64_TLSGD_ADR_PAGE21", + "R_AARCH64_TLSGD_ADR_PREL21", + "R_AARCH64_TLSGD_MOVW_G0_NC", + "R_AARCH64_TLSGD_MOVW_G1", + "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", + "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", + "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", + "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", + "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", + "R_AARCH64_TLSLD_ADR_PAGE21", + "R_AARCH64_TLSLD_ADR_PREL21", + "R_AARCH64_TLSLD_LDST128_DTPREL_LO12", + "R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", + "R_AARCH64_TLSLE_ADD_TPREL_HI12", + "R_AARCH64_TLSLE_ADD_TPREL_LO12", + "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", + "R_AARCH64_TLSLE_LDST128_TPREL_LO12", + "R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", + "R_AARCH64_TLSLE_MOVW_TPREL_G0", + "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", + "R_AARCH64_TLSLE_MOVW_TPREL_G1", + "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", + "R_AARCH64_TLSLE_MOVW_TPREL_G2", + "R_AARCH64_TLS_DTPMOD64", + "R_AARCH64_TLS_DTPREL64", + "R_AARCH64_TLS_TPREL64", + "R_AARCH64_TSTBR14", + "R_ALPHA", + "R_ALPHA_BRADDR", + "R_ALPHA_COPY", + "R_ALPHA_GLOB_DAT", + "R_ALPHA_GPDISP", + "R_ALPHA_GPREL32", + "R_ALPHA_GPRELHIGH", + "R_ALPHA_GPRELLOW", + "R_ALPHA_GPVALUE", + "R_ALPHA_HINT", + "R_ALPHA_IMMED_BR_HI32", + "R_ALPHA_IMMED_GP_16", + "R_ALPHA_IMMED_GP_HI32", + "R_ALPHA_IMMED_LO32", + "R_ALPHA_IMMED_SCN_HI32", + "R_ALPHA_JMP_SLOT", + "R_ALPHA_LITERAL", + "R_ALPHA_LITUSE", + "R_ALPHA_NONE", + "R_ALPHA_OP_PRSHIFT", + "R_ALPHA_OP_PSUB", + "R_ALPHA_OP_PUSH", + "R_ALPHA_OP_STORE", + "R_ALPHA_REFLONG", + "R_ALPHA_REFQUAD", + "R_ALPHA_RELATIVE", + "R_ALPHA_SREL16", + "R_ALPHA_SREL32", + "R_ALPHA_SREL64", + "R_ARM", + "R_ARM_ABS12", + "R_ARM_ABS16", + "R_ARM_ABS32", + "R_ARM_ABS32_NOI", + "R_ARM_ABS8", + "R_ARM_ALU_PCREL_15_8", + "R_ARM_ALU_PCREL_23_15", + "R_ARM_ALU_PCREL_7_0", + "R_ARM_ALU_PC_G0", + "R_ARM_ALU_PC_G0_NC", + "R_ARM_ALU_PC_G1", + "R_ARM_ALU_PC_G1_NC", + "R_ARM_ALU_PC_G2", + "R_ARM_ALU_SBREL_19_12_NC", + "R_ARM_ALU_SBREL_27_20_CK", + "R_ARM_ALU_SB_G0", + "R_ARM_ALU_SB_G0_NC", + "R_ARM_ALU_SB_G1", + "R_ARM_ALU_SB_G1_NC", + "R_ARM_ALU_SB_G2", + "R_ARM_AMP_VCALL9", + "R_ARM_BASE_ABS", + "R_ARM_CALL", + "R_ARM_COPY", + "R_ARM_GLOB_DAT", + "R_ARM_GNU_VTENTRY", + "R_ARM_GNU_VTINHERIT", + "R_ARM_GOT32", + "R_ARM_GOTOFF", + "R_ARM_GOTOFF12", + "R_ARM_GOTPC", + "R_ARM_GOTRELAX", + "R_ARM_GOT_ABS", + "R_ARM_GOT_BREL12", + "R_ARM_GOT_PREL", + "R_ARM_IRELATIVE", + "R_ARM_JUMP24", + "R_ARM_JUMP_SLOT", + "R_ARM_LDC_PC_G0", + "R_ARM_LDC_PC_G1", + "R_ARM_LDC_PC_G2", + "R_ARM_LDC_SB_G0", + "R_ARM_LDC_SB_G1", + "R_ARM_LDC_SB_G2", + "R_ARM_LDRS_PC_G0", + "R_ARM_LDRS_PC_G1", + "R_ARM_LDRS_PC_G2", + "R_ARM_LDRS_SB_G0", + "R_ARM_LDRS_SB_G1", + "R_ARM_LDRS_SB_G2", + "R_ARM_LDR_PC_G1", + "R_ARM_LDR_PC_G2", + "R_ARM_LDR_SBREL_11_10_NC", + "R_ARM_LDR_SB_G0", + "R_ARM_LDR_SB_G1", + "R_ARM_LDR_SB_G2", + "R_ARM_ME_TOO", + "R_ARM_MOVT_ABS", + "R_ARM_MOVT_BREL", + "R_ARM_MOVT_PREL", + "R_ARM_MOVW_ABS_NC", + "R_ARM_MOVW_BREL", + "R_ARM_MOVW_BREL_NC", + "R_ARM_MOVW_PREL_NC", + "R_ARM_NONE", + "R_ARM_PC13", + "R_ARM_PC24", + "R_ARM_PLT32", + "R_ARM_PLT32_ABS", + "R_ARM_PREL31", + "R_ARM_PRIVATE_0", + "R_ARM_PRIVATE_1", + "R_ARM_PRIVATE_10", + "R_ARM_PRIVATE_11", + "R_ARM_PRIVATE_12", + "R_ARM_PRIVATE_13", + "R_ARM_PRIVATE_14", + "R_ARM_PRIVATE_15", + "R_ARM_PRIVATE_2", + "R_ARM_PRIVATE_3", + "R_ARM_PRIVATE_4", + "R_ARM_PRIVATE_5", + "R_ARM_PRIVATE_6", + "R_ARM_PRIVATE_7", + "R_ARM_PRIVATE_8", + "R_ARM_PRIVATE_9", + "R_ARM_RABS32", + "R_ARM_RBASE", + "R_ARM_REL32", + "R_ARM_REL32_NOI", + "R_ARM_RELATIVE", + "R_ARM_RPC24", + "R_ARM_RREL32", + "R_ARM_RSBREL32", + "R_ARM_RXPC25", + "R_ARM_SBREL31", + "R_ARM_SBREL32", + "R_ARM_SWI24", + "R_ARM_TARGET1", + "R_ARM_TARGET2", + "R_ARM_THM_ABS5", + "R_ARM_THM_ALU_ABS_G0_NC", + "R_ARM_THM_ALU_ABS_G1_NC", + "R_ARM_THM_ALU_ABS_G2_NC", + "R_ARM_THM_ALU_ABS_G3", + "R_ARM_THM_ALU_PREL_11_0", + "R_ARM_THM_GOT_BREL12", + "R_ARM_THM_JUMP11", + "R_ARM_THM_JUMP19", + "R_ARM_THM_JUMP24", + "R_ARM_THM_JUMP6", + "R_ARM_THM_JUMP8", + "R_ARM_THM_MOVT_ABS", + "R_ARM_THM_MOVT_BREL", + "R_ARM_THM_MOVT_PREL", + "R_ARM_THM_MOVW_ABS_NC", + "R_ARM_THM_MOVW_BREL", + "R_ARM_THM_MOVW_BREL_NC", + "R_ARM_THM_MOVW_PREL_NC", + "R_ARM_THM_PC12", + "R_ARM_THM_PC22", + "R_ARM_THM_PC8", + "R_ARM_THM_RPC22", + "R_ARM_THM_SWI8", + "R_ARM_THM_TLS_CALL", + "R_ARM_THM_TLS_DESCSEQ16", + "R_ARM_THM_TLS_DESCSEQ32", + "R_ARM_THM_XPC22", + "R_ARM_TLS_CALL", + "R_ARM_TLS_DESCSEQ", + "R_ARM_TLS_DTPMOD32", + "R_ARM_TLS_DTPOFF32", + "R_ARM_TLS_GD32", + "R_ARM_TLS_GOTDESC", + "R_ARM_TLS_IE12GP", + "R_ARM_TLS_IE32", + "R_ARM_TLS_LDM32", + "R_ARM_TLS_LDO12", + "R_ARM_TLS_LDO32", + "R_ARM_TLS_LE12", + "R_ARM_TLS_LE32", + "R_ARM_TLS_TPOFF32", + "R_ARM_V4BX", + "R_ARM_XPC25", + "R_INFO", + "R_INFO32", + "R_MIPS", + "R_MIPS_16", + "R_MIPS_26", + "R_MIPS_32", + "R_MIPS_64", + "R_MIPS_ADD_IMMEDIATE", + "R_MIPS_CALL16", + "R_MIPS_CALL_HI16", + "R_MIPS_CALL_LO16", + "R_MIPS_DELETE", + "R_MIPS_GOT16", + "R_MIPS_GOT_DISP", + "R_MIPS_GOT_HI16", + "R_MIPS_GOT_LO16", + "R_MIPS_GOT_OFST", + "R_MIPS_GOT_PAGE", + "R_MIPS_GPREL16", + "R_MIPS_GPREL32", + "R_MIPS_HI16", + "R_MIPS_HIGHER", + "R_MIPS_HIGHEST", + "R_MIPS_INSERT_A", + "R_MIPS_INSERT_B", + "R_MIPS_JALR", + "R_MIPS_LITERAL", + "R_MIPS_LO16", + "R_MIPS_NONE", + "R_MIPS_PC16", + "R_MIPS_PJUMP", + "R_MIPS_REL16", + "R_MIPS_REL32", + "R_MIPS_RELGOT", + "R_MIPS_SCN_DISP", + "R_MIPS_SHIFT5", + "R_MIPS_SHIFT6", + "R_MIPS_SUB", + "R_MIPS_TLS_DTPMOD32", + "R_MIPS_TLS_DTPMOD64", + "R_MIPS_TLS_DTPREL32", + "R_MIPS_TLS_DTPREL64", + "R_MIPS_TLS_DTPREL_HI16", + "R_MIPS_TLS_DTPREL_LO16", + "R_MIPS_TLS_GD", + "R_MIPS_TLS_GOTTPREL", + "R_MIPS_TLS_LDM", + "R_MIPS_TLS_TPREL32", + "R_MIPS_TLS_TPREL64", + "R_MIPS_TLS_TPREL_HI16", + "R_MIPS_TLS_TPREL_LO16", + "R_PPC", + "R_PPC64", + "R_PPC64_ADDR14", + "R_PPC64_ADDR14_BRNTAKEN", + "R_PPC64_ADDR14_BRTAKEN", + "R_PPC64_ADDR16", + "R_PPC64_ADDR16_DS", + "R_PPC64_ADDR16_HA", + "R_PPC64_ADDR16_HI", + "R_PPC64_ADDR16_HIGH", + "R_PPC64_ADDR16_HIGHA", + "R_PPC64_ADDR16_HIGHER", + "R_PPC64_ADDR16_HIGHERA", + "R_PPC64_ADDR16_HIGHEST", + "R_PPC64_ADDR16_HIGHESTA", + "R_PPC64_ADDR16_LO", + "R_PPC64_ADDR16_LO_DS", + "R_PPC64_ADDR24", + "R_PPC64_ADDR32", + "R_PPC64_ADDR64", + "R_PPC64_ADDR64_LOCAL", + "R_PPC64_DTPMOD64", + "R_PPC64_DTPREL16", + "R_PPC64_DTPREL16_DS", + "R_PPC64_DTPREL16_HA", + "R_PPC64_DTPREL16_HI", + "R_PPC64_DTPREL16_HIGH", + "R_PPC64_DTPREL16_HIGHA", + "R_PPC64_DTPREL16_HIGHER", + "R_PPC64_DTPREL16_HIGHERA", + "R_PPC64_DTPREL16_HIGHEST", + "R_PPC64_DTPREL16_HIGHESTA", + "R_PPC64_DTPREL16_LO", + "R_PPC64_DTPREL16_LO_DS", + "R_PPC64_DTPREL64", + "R_PPC64_ENTRY", + "R_PPC64_GOT16", + "R_PPC64_GOT16_DS", + "R_PPC64_GOT16_HA", + "R_PPC64_GOT16_HI", + "R_PPC64_GOT16_LO", + "R_PPC64_GOT16_LO_DS", + "R_PPC64_GOT_DTPREL16_DS", + "R_PPC64_GOT_DTPREL16_HA", + "R_PPC64_GOT_DTPREL16_HI", + "R_PPC64_GOT_DTPREL16_LO_DS", + "R_PPC64_GOT_TLSGD16", + "R_PPC64_GOT_TLSGD16_HA", + "R_PPC64_GOT_TLSGD16_HI", + "R_PPC64_GOT_TLSGD16_LO", + "R_PPC64_GOT_TLSLD16", + "R_PPC64_GOT_TLSLD16_HA", + "R_PPC64_GOT_TLSLD16_HI", + "R_PPC64_GOT_TLSLD16_LO", + "R_PPC64_GOT_TPREL16_DS", + "R_PPC64_GOT_TPREL16_HA", + "R_PPC64_GOT_TPREL16_HI", + "R_PPC64_GOT_TPREL16_LO_DS", + "R_PPC64_IRELATIVE", + "R_PPC64_JMP_IREL", + "R_PPC64_JMP_SLOT", + "R_PPC64_NONE", + "R_PPC64_PLT16_LO_DS", + "R_PPC64_PLTGOT16", + "R_PPC64_PLTGOT16_DS", + "R_PPC64_PLTGOT16_HA", + "R_PPC64_PLTGOT16_HI", + "R_PPC64_PLTGOT16_LO", + "R_PPC64_PLTGOT_LO_DS", + "R_PPC64_REL14", + "R_PPC64_REL14_BRNTAKEN", + "R_PPC64_REL14_BRTAKEN", + "R_PPC64_REL16", + "R_PPC64_REL16DX_HA", + "R_PPC64_REL16_HA", + "R_PPC64_REL16_HI", + "R_PPC64_REL16_LO", + "R_PPC64_REL24", + "R_PPC64_REL24_NOTOC", + "R_PPC64_REL32", + "R_PPC64_REL64", + "R_PPC64_SECTOFF_DS", + "R_PPC64_SECTOFF_LO_DS", + "R_PPC64_TLS", + "R_PPC64_TLSGD", + "R_PPC64_TLSLD", + "R_PPC64_TOC", + "R_PPC64_TOC16", + "R_PPC64_TOC16_DS", + "R_PPC64_TOC16_HA", + "R_PPC64_TOC16_HI", + "R_PPC64_TOC16_LO", + "R_PPC64_TOC16_LO_DS", + "R_PPC64_TOCSAVE", + "R_PPC64_TPREL16", + "R_PPC64_TPREL16_DS", + "R_PPC64_TPREL16_HA", + "R_PPC64_TPREL16_HI", + "R_PPC64_TPREL16_HIGH", + "R_PPC64_TPREL16_HIGHA", + "R_PPC64_TPREL16_HIGHER", + "R_PPC64_TPREL16_HIGHERA", + "R_PPC64_TPREL16_HIGHEST", + "R_PPC64_TPREL16_HIGHESTA", + "R_PPC64_TPREL16_LO", + "R_PPC64_TPREL16_LO_DS", + "R_PPC64_TPREL64", + "R_PPC_ADDR14", + "R_PPC_ADDR14_BRNTAKEN", + "R_PPC_ADDR14_BRTAKEN", + "R_PPC_ADDR16", + "R_PPC_ADDR16_HA", + "R_PPC_ADDR16_HI", + "R_PPC_ADDR16_LO", + "R_PPC_ADDR24", + "R_PPC_ADDR32", + "R_PPC_COPY", + "R_PPC_DTPMOD32", + "R_PPC_DTPREL16", + "R_PPC_DTPREL16_HA", + "R_PPC_DTPREL16_HI", + "R_PPC_DTPREL16_LO", + "R_PPC_DTPREL32", + "R_PPC_EMB_BIT_FLD", + "R_PPC_EMB_MRKREF", + "R_PPC_EMB_NADDR16", + "R_PPC_EMB_NADDR16_HA", + "R_PPC_EMB_NADDR16_HI", + "R_PPC_EMB_NADDR16_LO", + "R_PPC_EMB_NADDR32", + "R_PPC_EMB_RELSDA", + "R_PPC_EMB_RELSEC16", + "R_PPC_EMB_RELST_HA", + "R_PPC_EMB_RELST_HI", + "R_PPC_EMB_RELST_LO", + "R_PPC_EMB_SDA21", + "R_PPC_EMB_SDA2I16", + "R_PPC_EMB_SDA2REL", + "R_PPC_EMB_SDAI16", + "R_PPC_GLOB_DAT", + "R_PPC_GOT16", + "R_PPC_GOT16_HA", + "R_PPC_GOT16_HI", + "R_PPC_GOT16_LO", + "R_PPC_GOT_TLSGD16", + "R_PPC_GOT_TLSGD16_HA", + "R_PPC_GOT_TLSGD16_HI", + "R_PPC_GOT_TLSGD16_LO", + "R_PPC_GOT_TLSLD16", + "R_PPC_GOT_TLSLD16_HA", + "R_PPC_GOT_TLSLD16_HI", + "R_PPC_GOT_TLSLD16_LO", + "R_PPC_GOT_TPREL16", + "R_PPC_GOT_TPREL16_HA", + "R_PPC_GOT_TPREL16_HI", + "R_PPC_GOT_TPREL16_LO", + "R_PPC_JMP_SLOT", + "R_PPC_LOCAL24PC", + "R_PPC_NONE", + "R_PPC_PLT16_HA", + "R_PPC_PLT16_HI", + "R_PPC_PLT16_LO", + "R_PPC_PLT32", + "R_PPC_PLTREL24", + "R_PPC_PLTREL32", + "R_PPC_REL14", + "R_PPC_REL14_BRNTAKEN", + "R_PPC_REL14_BRTAKEN", + "R_PPC_REL24", + "R_PPC_REL32", + "R_PPC_RELATIVE", + "R_PPC_SDAREL16", + "R_PPC_SECTOFF", + "R_PPC_SECTOFF_HA", + "R_PPC_SECTOFF_HI", + "R_PPC_SECTOFF_LO", + "R_PPC_TLS", + "R_PPC_TPREL16", + "R_PPC_TPREL16_HA", + "R_PPC_TPREL16_HI", + "R_PPC_TPREL16_LO", + "R_PPC_TPREL32", + "R_PPC_UADDR16", + "R_PPC_UADDR32", + "R_RISCV", + "R_RISCV_32", + "R_RISCV_32_PCREL", + "R_RISCV_64", + "R_RISCV_ADD16", + "R_RISCV_ADD32", + "R_RISCV_ADD64", + "R_RISCV_ADD8", + "R_RISCV_ALIGN", + "R_RISCV_BRANCH", + "R_RISCV_CALL", + "R_RISCV_CALL_PLT", + "R_RISCV_COPY", + "R_RISCV_GNU_VTENTRY", + "R_RISCV_GNU_VTINHERIT", + "R_RISCV_GOT_HI20", + "R_RISCV_GPREL_I", + "R_RISCV_GPREL_S", + "R_RISCV_HI20", + "R_RISCV_JAL", + "R_RISCV_JUMP_SLOT", + "R_RISCV_LO12_I", + "R_RISCV_LO12_S", + "R_RISCV_NONE", + "R_RISCV_PCREL_HI20", + "R_RISCV_PCREL_LO12_I", + "R_RISCV_PCREL_LO12_S", + "R_RISCV_RELATIVE", + "R_RISCV_RELAX", + "R_RISCV_RVC_BRANCH", + "R_RISCV_RVC_JUMP", + "R_RISCV_RVC_LUI", + "R_RISCV_SET16", + "R_RISCV_SET32", + "R_RISCV_SET6", + "R_RISCV_SET8", + "R_RISCV_SUB16", + "R_RISCV_SUB32", + "R_RISCV_SUB6", + "R_RISCV_SUB64", + "R_RISCV_SUB8", + "R_RISCV_TLS_DTPMOD32", + "R_RISCV_TLS_DTPMOD64", + "R_RISCV_TLS_DTPREL32", + "R_RISCV_TLS_DTPREL64", + "R_RISCV_TLS_GD_HI20", + "R_RISCV_TLS_GOT_HI20", + "R_RISCV_TLS_TPREL32", + "R_RISCV_TLS_TPREL64", + "R_RISCV_TPREL_ADD", + "R_RISCV_TPREL_HI20", + "R_RISCV_TPREL_I", + "R_RISCV_TPREL_LO12_I", + "R_RISCV_TPREL_LO12_S", + "R_RISCV_TPREL_S", + "R_SPARC", + "R_SPARC_10", + "R_SPARC_11", + "R_SPARC_13", + "R_SPARC_16", + "R_SPARC_22", + "R_SPARC_32", + "R_SPARC_5", + "R_SPARC_6", + "R_SPARC_64", + "R_SPARC_7", + "R_SPARC_8", + "R_SPARC_COPY", + "R_SPARC_DISP16", + "R_SPARC_DISP32", + "R_SPARC_DISP64", + "R_SPARC_DISP8", + "R_SPARC_GLOB_DAT", + "R_SPARC_GLOB_JMP", + "R_SPARC_GOT10", + "R_SPARC_GOT13", + "R_SPARC_GOT22", + "R_SPARC_H44", + "R_SPARC_HH22", + "R_SPARC_HI22", + "R_SPARC_HIPLT22", + "R_SPARC_HIX22", + "R_SPARC_HM10", + "R_SPARC_JMP_SLOT", + "R_SPARC_L44", + "R_SPARC_LM22", + "R_SPARC_LO10", + "R_SPARC_LOPLT10", + "R_SPARC_LOX10", + "R_SPARC_M44", + "R_SPARC_NONE", + "R_SPARC_OLO10", + "R_SPARC_PC10", + "R_SPARC_PC22", + "R_SPARC_PCPLT10", + "R_SPARC_PCPLT22", + "R_SPARC_PCPLT32", + "R_SPARC_PC_HH22", + "R_SPARC_PC_HM10", + "R_SPARC_PC_LM22", + "R_SPARC_PLT32", + "R_SPARC_PLT64", + "R_SPARC_REGISTER", + "R_SPARC_RELATIVE", + "R_SPARC_UA16", + "R_SPARC_UA32", + "R_SPARC_UA64", + "R_SPARC_WDISP16", + "R_SPARC_WDISP19", + "R_SPARC_WDISP22", + "R_SPARC_WDISP30", + "R_SPARC_WPLT30", + "R_SYM32", + "R_SYM64", + "R_TYPE32", + "R_TYPE64", + "R_X86_64", + "R_X86_64_16", + "R_X86_64_32", + "R_X86_64_32S", + "R_X86_64_64", + "R_X86_64_8", + "R_X86_64_COPY", + "R_X86_64_DTPMOD64", + "R_X86_64_DTPOFF32", + "R_X86_64_DTPOFF64", + "R_X86_64_GLOB_DAT", + "R_X86_64_GOT32", + "R_X86_64_GOT64", + "R_X86_64_GOTOFF64", + "R_X86_64_GOTPC32", + "R_X86_64_GOTPC32_TLSDESC", + "R_X86_64_GOTPC64", + "R_X86_64_GOTPCREL", + "R_X86_64_GOTPCREL64", + "R_X86_64_GOTPCRELX", + "R_X86_64_GOTPLT64", + "R_X86_64_GOTTPOFF", + "R_X86_64_IRELATIVE", + "R_X86_64_JMP_SLOT", + "R_X86_64_NONE", + "R_X86_64_PC16", + "R_X86_64_PC32", + "R_X86_64_PC32_BND", + "R_X86_64_PC64", + "R_X86_64_PC8", + "R_X86_64_PLT32", + "R_X86_64_PLT32_BND", + "R_X86_64_PLTOFF64", + "R_X86_64_RELATIVE", + "R_X86_64_RELATIVE64", + "R_X86_64_REX_GOTPCRELX", + "R_X86_64_SIZE32", + "R_X86_64_SIZE64", + "R_X86_64_TLSDESC", + "R_X86_64_TLSDESC_CALL", + "R_X86_64_TLSGD", + "R_X86_64_TLSLD", + "R_X86_64_TPOFF32", + "R_X86_64_TPOFF64", + "Rel32", + "Rel64", + "Rela32", + "Rela64", + "SHF_ALLOC", + "SHF_COMPRESSED", + "SHF_EXECINSTR", + "SHF_GROUP", + "SHF_INFO_LINK", + "SHF_LINK_ORDER", + "SHF_MASKOS", + "SHF_MASKPROC", + "SHF_MERGE", + "SHF_OS_NONCONFORMING", + "SHF_STRINGS", + "SHF_TLS", + "SHF_WRITE", + "SHN_ABS", + "SHN_COMMON", + "SHN_HIOS", + "SHN_HIPROC", + "SHN_HIRESERVE", + "SHN_LOOS", + "SHN_LOPROC", + "SHN_LORESERVE", + "SHN_UNDEF", + "SHN_XINDEX", + "SHT_DYNAMIC", + "SHT_DYNSYM", + "SHT_FINI_ARRAY", + "SHT_GNU_ATTRIBUTES", + "SHT_GNU_HASH", + "SHT_GNU_LIBLIST", + "SHT_GNU_VERDEF", + "SHT_GNU_VERNEED", + "SHT_GNU_VERSYM", + "SHT_GROUP", + "SHT_HASH", + "SHT_HIOS", + "SHT_HIPROC", + "SHT_HIUSER", + "SHT_INIT_ARRAY", + "SHT_LOOS", + "SHT_LOPROC", + "SHT_LOUSER", + "SHT_NOBITS", + "SHT_NOTE", + "SHT_NULL", + "SHT_PREINIT_ARRAY", + "SHT_PROGBITS", + "SHT_REL", + "SHT_RELA", + "SHT_SHLIB", + "SHT_STRTAB", + "SHT_SYMTAB", + "SHT_SYMTAB_SHNDX", + "STB_GLOBAL", + "STB_HIOS", + "STB_HIPROC", + "STB_LOCAL", + "STB_LOOS", + "STB_LOPROC", + "STB_WEAK", + "STT_COMMON", + "STT_FILE", + "STT_FUNC", + "STT_HIOS", + "STT_HIPROC", + "STT_LOOS", + "STT_LOPROC", + "STT_NOTYPE", + "STT_OBJECT", + "STT_SECTION", + "STT_TLS", + "STV_DEFAULT", + "STV_HIDDEN", + "STV_INTERNAL", + "STV_PROTECTED", + "ST_BIND", + "ST_INFO", + "ST_TYPE", + "ST_VISIBILITY", + "Section", + "Section32", + "Section64", + "SectionFlag", + "SectionHeader", + "SectionIndex", + "SectionType", + "Sym32", + "Sym32Size", + "Sym64", + "Sym64Size", + "SymBind", + "SymType", + "SymVis", + "Symbol", + "Type", + "Version", + }, + "debug/gosym": []string{ + "DecodingError", + "Func", + "LineTable", + "NewLineTable", + "NewTable", + "Obj", + "Sym", + "Table", + "UnknownFileError", + "UnknownLineError", + }, + "debug/macho": []string{ + "ARM64_RELOC_ADDEND", + "ARM64_RELOC_BRANCH26", + "ARM64_RELOC_GOT_LOAD_PAGE21", + "ARM64_RELOC_GOT_LOAD_PAGEOFF12", + "ARM64_RELOC_PAGE21", + "ARM64_RELOC_PAGEOFF12", + "ARM64_RELOC_POINTER_TO_GOT", + "ARM64_RELOC_SUBTRACTOR", + "ARM64_RELOC_TLVP_LOAD_PAGE21", + "ARM64_RELOC_TLVP_LOAD_PAGEOFF12", + "ARM64_RELOC_UNSIGNED", + "ARM_RELOC_BR24", + "ARM_RELOC_HALF", + "ARM_RELOC_HALF_SECTDIFF", + "ARM_RELOC_LOCAL_SECTDIFF", + "ARM_RELOC_PAIR", + "ARM_RELOC_PB_LA_PTR", + "ARM_RELOC_SECTDIFF", + "ARM_RELOC_VANILLA", + "ARM_THUMB_32BIT_BRANCH", + "ARM_THUMB_RELOC_BR22", + "Cpu", + "Cpu386", + "CpuAmd64", + "CpuArm", + "CpuArm64", + "CpuPpc", + "CpuPpc64", + "Dylib", + "DylibCmd", + "Dysymtab", + "DysymtabCmd", + "ErrNotFat", + "FatArch", + "FatArchHeader", + "FatFile", + "File", + "FileHeader", + "FlagAllModsBound", + "FlagAllowStackExecution", + "FlagAppExtensionSafe", + "FlagBindAtLoad", + "FlagBindsToWeak", + "FlagCanonical", + "FlagDeadStrippableDylib", + "FlagDyldLink", + "FlagForceFlat", + "FlagHasTLVDescriptors", + "FlagIncrLink", + "FlagLazyInit", + "FlagNoFixPrebinding", + "FlagNoHeapExecution", + "FlagNoMultiDefs", + "FlagNoReexportedDylibs", + "FlagNoUndefs", + "FlagPIE", + "FlagPrebindable", + "FlagPrebound", + "FlagRootSafe", + "FlagSetuidSafe", + "FlagSplitSegs", + "FlagSubsectionsViaSymbols", + "FlagTwoLevel", + "FlagWeakDefines", + "FormatError", + "GENERIC_RELOC_LOCAL_SECTDIFF", + "GENERIC_RELOC_PAIR", + "GENERIC_RELOC_PB_LA_PTR", + "GENERIC_RELOC_SECTDIFF", + "GENERIC_RELOC_TLV", + "GENERIC_RELOC_VANILLA", + "Load", + "LoadBytes", + "LoadCmd", + "LoadCmdDylib", + "LoadCmdDylinker", + "LoadCmdDysymtab", + "LoadCmdRpath", + "LoadCmdSegment", + "LoadCmdSegment64", + "LoadCmdSymtab", + "LoadCmdThread", + "LoadCmdUnixThread", + "Magic32", + "Magic64", + "MagicFat", + "NewFatFile", + "NewFile", + "Nlist32", + "Nlist64", + "Open", + "OpenFat", + "Regs386", + "RegsAMD64", + "Reloc", + "RelocTypeARM", + "RelocTypeARM64", + "RelocTypeGeneric", + "RelocTypeX86_64", + "Rpath", + "RpathCmd", + "Section", + "Section32", + "Section64", + "SectionHeader", + "Segment", + "Segment32", + "Segment64", + "SegmentHeader", + "Symbol", + "Symtab", + "SymtabCmd", + "Thread", + "Type", + "TypeBundle", + "TypeDylib", + "TypeExec", + "TypeObj", + "X86_64_RELOC_BRANCH", + "X86_64_RELOC_GOT", + "X86_64_RELOC_GOT_LOAD", + "X86_64_RELOC_SIGNED", + "X86_64_RELOC_SIGNED_1", + "X86_64_RELOC_SIGNED_2", + "X86_64_RELOC_SIGNED_4", + "X86_64_RELOC_SUBTRACTOR", + "X86_64_RELOC_TLV", + "X86_64_RELOC_UNSIGNED", + }, + "debug/pe": []string{ + "COFFSymbol", + "COFFSymbolSize", + "DataDirectory", + "File", + "FileHeader", + "FormatError", + "IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", + "IMAGE_DIRECTORY_ENTRY_BASERELOC", + "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", + "IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", + "IMAGE_DIRECTORY_ENTRY_DEBUG", + "IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", + "IMAGE_DIRECTORY_ENTRY_EXCEPTION", + "IMAGE_DIRECTORY_ENTRY_EXPORT", + "IMAGE_DIRECTORY_ENTRY_GLOBALPTR", + "IMAGE_DIRECTORY_ENTRY_IAT", + "IMAGE_DIRECTORY_ENTRY_IMPORT", + "IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", + "IMAGE_DIRECTORY_ENTRY_RESOURCE", + "IMAGE_DIRECTORY_ENTRY_SECURITY", + "IMAGE_DIRECTORY_ENTRY_TLS", + "IMAGE_DLLCHARACTERISTICS_APPCONTAINER", + "IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", + "IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", + "IMAGE_DLLCHARACTERISTICS_GUARD_CF", + "IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", + "IMAGE_DLLCHARACTERISTICS_NO_BIND", + "IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", + "IMAGE_DLLCHARACTERISTICS_NO_SEH", + "IMAGE_DLLCHARACTERISTICS_NX_COMPAT", + "IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", + "IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", + "IMAGE_FILE_32BIT_MACHINE", + "IMAGE_FILE_AGGRESIVE_WS_TRIM", + "IMAGE_FILE_BYTES_REVERSED_HI", + "IMAGE_FILE_BYTES_REVERSED_LO", + "IMAGE_FILE_DEBUG_STRIPPED", + "IMAGE_FILE_DLL", + "IMAGE_FILE_EXECUTABLE_IMAGE", + "IMAGE_FILE_LARGE_ADDRESS_AWARE", + "IMAGE_FILE_LINE_NUMS_STRIPPED", + "IMAGE_FILE_LOCAL_SYMS_STRIPPED", + "IMAGE_FILE_MACHINE_AM33", + "IMAGE_FILE_MACHINE_AMD64", + "IMAGE_FILE_MACHINE_ARM", + "IMAGE_FILE_MACHINE_ARM64", + "IMAGE_FILE_MACHINE_ARMNT", + "IMAGE_FILE_MACHINE_EBC", + "IMAGE_FILE_MACHINE_I386", + "IMAGE_FILE_MACHINE_IA64", + "IMAGE_FILE_MACHINE_M32R", + "IMAGE_FILE_MACHINE_MIPS16", + "IMAGE_FILE_MACHINE_MIPSFPU", + "IMAGE_FILE_MACHINE_MIPSFPU16", + "IMAGE_FILE_MACHINE_POWERPC", + "IMAGE_FILE_MACHINE_POWERPCFP", + "IMAGE_FILE_MACHINE_R4000", + "IMAGE_FILE_MACHINE_SH3", + "IMAGE_FILE_MACHINE_SH3DSP", + "IMAGE_FILE_MACHINE_SH4", + "IMAGE_FILE_MACHINE_SH5", + "IMAGE_FILE_MACHINE_THUMB", + "IMAGE_FILE_MACHINE_UNKNOWN", + "IMAGE_FILE_MACHINE_WCEMIPSV2", + "IMAGE_FILE_NET_RUN_FROM_SWAP", + "IMAGE_FILE_RELOCS_STRIPPED", + "IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", + "IMAGE_FILE_SYSTEM", + "IMAGE_FILE_UP_SYSTEM_ONLY", + "IMAGE_SUBSYSTEM_EFI_APPLICATION", + "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", + "IMAGE_SUBSYSTEM_EFI_ROM", + "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", + "IMAGE_SUBSYSTEM_NATIVE", + "IMAGE_SUBSYSTEM_NATIVE_WINDOWS", + "IMAGE_SUBSYSTEM_OS2_CUI", + "IMAGE_SUBSYSTEM_POSIX_CUI", + "IMAGE_SUBSYSTEM_UNKNOWN", + "IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", + "IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", + "IMAGE_SUBSYSTEM_WINDOWS_CUI", + "IMAGE_SUBSYSTEM_WINDOWS_GUI", + "IMAGE_SUBSYSTEM_XBOX", + "ImportDirectory", + "NewFile", + "Open", + "OptionalHeader32", + "OptionalHeader64", + "Reloc", + "Section", + "SectionHeader", + "SectionHeader32", + "StringTable", + "Symbol", + }, + "debug/plan9obj": []string{ + "File", + "FileHeader", + "Magic386", + "Magic64", + "MagicAMD64", + "MagicARM", + "NewFile", + "Open", + "Section", + "SectionHeader", + "Sym", + }, + "embed": []string{ + "FS", + }, + "encoding": []string{ + "BinaryMarshaler", + "BinaryUnmarshaler", + "TextMarshaler", + "TextUnmarshaler", + }, + "encoding/ascii85": []string{ + "CorruptInputError", + "Decode", + "Encode", + "MaxEncodedLen", + "NewDecoder", + "NewEncoder", + }, + "encoding/asn1": []string{ + "BitString", + "ClassApplication", + "ClassContextSpecific", + "ClassPrivate", + "ClassUniversal", + "Enumerated", + "Flag", + "Marshal", + "MarshalWithParams", + "NullBytes", + "NullRawValue", + "ObjectIdentifier", + "RawContent", + "RawValue", + "StructuralError", + "SyntaxError", + "TagBMPString", + "TagBitString", + "TagBoolean", + "TagEnum", + "TagGeneralString", + "TagGeneralizedTime", + "TagIA5String", + "TagInteger", + "TagNull", + "TagNumericString", + "TagOID", + "TagOctetString", + "TagPrintableString", + "TagSequence", + "TagSet", + "TagT61String", + "TagUTCTime", + "TagUTF8String", + "Unmarshal", + "UnmarshalWithParams", + }, + "encoding/base32": []string{ + "CorruptInputError", + "Encoding", + "HexEncoding", + "NewDecoder", + "NewEncoder", + "NewEncoding", + "NoPadding", + "StdEncoding", + "StdPadding", + }, + "encoding/base64": []string{ + "CorruptInputError", + "Encoding", + "NewDecoder", + "NewEncoder", + "NewEncoding", + "NoPadding", + "RawStdEncoding", + "RawURLEncoding", + "StdEncoding", + "StdPadding", + "URLEncoding", + }, + "encoding/binary": []string{ + "BigEndian", + "ByteOrder", + "LittleEndian", + "MaxVarintLen16", + "MaxVarintLen32", + "MaxVarintLen64", + "PutUvarint", + "PutVarint", + "Read", + "ReadUvarint", + "ReadVarint", + "Size", + "Uvarint", + "Varint", + "Write", + }, + "encoding/csv": []string{ + "ErrBareQuote", + "ErrFieldCount", + "ErrQuote", + "ErrTrailingComma", + "NewReader", + "NewWriter", + "ParseError", + "Reader", + "Writer", + }, + "encoding/gob": []string{ + "CommonType", + "Decoder", + "Encoder", + "GobDecoder", + "GobEncoder", + "NewDecoder", + "NewEncoder", + "Register", + "RegisterName", + }, + "encoding/hex": []string{ + "Decode", + "DecodeString", + "DecodedLen", + "Dump", + "Dumper", + "Encode", + "EncodeToString", + "EncodedLen", + "ErrLength", + "InvalidByteError", + "NewDecoder", + "NewEncoder", + }, + "encoding/json": []string{ + "Compact", + "Decoder", + "Delim", + "Encoder", + "HTMLEscape", + "Indent", + "InvalidUTF8Error", + "InvalidUnmarshalError", + "Marshal", + "MarshalIndent", + "Marshaler", + "MarshalerError", + "NewDecoder", + "NewEncoder", + "Number", + "RawMessage", + "SyntaxError", + "Token", + "Unmarshal", + "UnmarshalFieldError", + "UnmarshalTypeError", + "Unmarshaler", + "UnsupportedTypeError", + "UnsupportedValueError", + "Valid", + }, + "encoding/pem": []string{ + "Block", + "Decode", + "Encode", + "EncodeToMemory", + }, + "encoding/xml": []string{ + "Attr", + "CharData", + "Comment", + "CopyToken", + "Decoder", + "Directive", + "Encoder", + "EndElement", + "Escape", + "EscapeText", + "HTMLAutoClose", + "HTMLEntity", + "Header", + "Marshal", + "MarshalIndent", + "Marshaler", + "MarshalerAttr", + "Name", + "NewDecoder", + "NewEncoder", + "NewTokenDecoder", + "ProcInst", + "StartElement", + "SyntaxError", + "TagPathError", + "Token", + "TokenReader", + "Unmarshal", + "UnmarshalError", + "Unmarshaler", + "UnmarshalerAttr", + "UnsupportedTypeError", + }, + "errors": []string{ + "As", + "Is", + "New", + "Unwrap", + }, + "expvar": []string{ + "Do", + "Float", + "Func", + "Get", + "Handler", + "Int", + "KeyValue", + "Map", + "NewFloat", + "NewInt", + "NewMap", + "NewString", + "Publish", + "String", + "Var", + }, + "flag": []string{ + "Arg", + "Args", + "Bool", + "BoolVar", + "CommandLine", + "ContinueOnError", + "Duration", + "DurationVar", + "ErrHelp", + "ErrorHandling", + "ExitOnError", + "Flag", + "FlagSet", + "Float64", + "Float64Var", + "Func", + "Getter", + "Int", + "Int64", + "Int64Var", + "IntVar", + "Lookup", + "NArg", + "NFlag", + "NewFlagSet", + "PanicOnError", + "Parse", + "Parsed", + "PrintDefaults", + "Set", + "String", + "StringVar", + "Uint", + "Uint64", + "Uint64Var", + "UintVar", + "UnquoteUsage", + "Usage", + "Value", + "Var", + "Visit", + "VisitAll", + }, + "fmt": []string{ + "Errorf", + "Formatter", + "Fprint", + "Fprintf", + "Fprintln", + "Fscan", + "Fscanf", + "Fscanln", + "GoStringer", + "Print", + "Printf", + "Println", + "Scan", + "ScanState", + "Scanf", + "Scanln", + "Scanner", + "Sprint", + "Sprintf", + "Sprintln", + "Sscan", + "Sscanf", + "Sscanln", + "State", + "Stringer", + }, + "go/ast": []string{ + "ArrayType", + "AssignStmt", + "Bad", + "BadDecl", + "BadExpr", + "BadStmt", + "BasicLit", + "BinaryExpr", + "BlockStmt", + "BranchStmt", + "CallExpr", + "CaseClause", + "ChanDir", + "ChanType", + "CommClause", + "Comment", + "CommentGroup", + "CommentMap", + "CompositeLit", + "Con", + "Decl", + "DeclStmt", + "DeferStmt", + "Ellipsis", + "EmptyStmt", + "Expr", + "ExprStmt", + "Field", + "FieldFilter", + "FieldList", + "File", + "FileExports", + "Filter", + "FilterDecl", + "FilterFile", + "FilterFuncDuplicates", + "FilterImportDuplicates", + "FilterPackage", + "FilterUnassociatedComments", + "ForStmt", + "Fprint", + "Fun", + "FuncDecl", + "FuncLit", + "FuncType", + "GenDecl", + "GoStmt", + "Ident", + "IfStmt", + "ImportSpec", + "Importer", + "IncDecStmt", + "IndexExpr", + "Inspect", + "InterfaceType", + "IsExported", + "KeyValueExpr", + "LabeledStmt", + "Lbl", + "MapType", + "MergeMode", + "MergePackageFiles", + "NewCommentMap", + "NewIdent", + "NewObj", + "NewPackage", + "NewScope", + "Node", + "NotNilFilter", + "ObjKind", + "Object", + "Package", + "PackageExports", + "ParenExpr", + "Pkg", + "Print", + "RECV", + "RangeStmt", + "ReturnStmt", + "SEND", + "Scope", + "SelectStmt", + "SelectorExpr", + "SendStmt", + "SliceExpr", + "SortImports", + "Spec", + "StarExpr", + "Stmt", + "StructType", + "SwitchStmt", + "Typ", + "TypeAssertExpr", + "TypeSpec", + "TypeSwitchStmt", + "UnaryExpr", + "ValueSpec", + "Var", + "Visitor", + "Walk", + }, + "go/build": []string{ + "AllowBinary", + "ArchChar", + "Context", + "Default", + "FindOnly", + "IgnoreVendor", + "Import", + "ImportComment", + "ImportDir", + "ImportMode", + "IsLocalImport", + "MultiplePackageError", + "NoGoError", + "Package", + "ToolDir", + }, + "go/build/constraint": []string{ + "AndExpr", + "Expr", + "IsGoBuild", + "IsPlusBuild", + "NotExpr", + "OrExpr", + "Parse", + "PlusBuildLines", + "SyntaxError", + "TagExpr", + }, + "go/constant": []string{ + "BinaryOp", + "BitLen", + "Bool", + "BoolVal", + "Bytes", + "Compare", + "Complex", + "Denom", + "Float", + "Float32Val", + "Float64Val", + "Imag", + "Int", + "Int64Val", + "Kind", + "Make", + "MakeBool", + "MakeFloat64", + "MakeFromBytes", + "MakeFromLiteral", + "MakeImag", + "MakeInt64", + "MakeString", + "MakeUint64", + "MakeUnknown", + "Num", + "Real", + "Shift", + "Sign", + "String", + "StringVal", + "ToComplex", + "ToFloat", + "ToInt", + "Uint64Val", + "UnaryOp", + "Unknown", + "Val", + "Value", + }, + "go/doc": []string{ + "AllDecls", + "AllMethods", + "Example", + "Examples", + "Filter", + "Func", + "IllegalPrefixes", + "IsPredeclared", + "Mode", + "New", + "NewFromFiles", + "Note", + "Package", + "PreserveAST", + "Synopsis", + "ToHTML", + "ToText", + "Type", + "Value", + }, + "go/format": []string{ + "Node", + "Source", + }, + "go/importer": []string{ + "Default", + "For", + "ForCompiler", + "Lookup", + }, + "go/parser": []string{ + "AllErrors", + "DeclarationErrors", + "ImportsOnly", + "Mode", + "PackageClauseOnly", + "ParseComments", + "ParseDir", + "ParseExpr", + "ParseExprFrom", + "ParseFile", + "SpuriousErrors", + "Trace", + }, + "go/printer": []string{ + "CommentedNode", + "Config", + "Fprint", + "Mode", + "RawFormat", + "SourcePos", + "TabIndent", + "UseSpaces", + }, + "go/scanner": []string{ + "Error", + "ErrorHandler", + "ErrorList", + "Mode", + "PrintError", + "ScanComments", + "Scanner", + }, + "go/token": []string{ + "ADD", + "ADD_ASSIGN", + "AND", + "AND_ASSIGN", + "AND_NOT", + "AND_NOT_ASSIGN", + "ARROW", + "ASSIGN", + "BREAK", + "CASE", + "CHAN", + "CHAR", + "COLON", + "COMMA", + "COMMENT", + "CONST", + "CONTINUE", + "DEC", + "DEFAULT", + "DEFER", + "DEFINE", + "ELLIPSIS", + "ELSE", + "EOF", + "EQL", + "FALLTHROUGH", + "FLOAT", + "FOR", + "FUNC", + "File", + "FileSet", + "GEQ", + "GO", + "GOTO", + "GTR", + "HighestPrec", + "IDENT", + "IF", + "ILLEGAL", + "IMAG", + "IMPORT", + "INC", + "INT", + "INTERFACE", + "IsExported", + "IsIdentifier", + "IsKeyword", + "LAND", + "LBRACE", + "LBRACK", + "LEQ", + "LOR", + "LPAREN", + "LSS", + "Lookup", + "LowestPrec", + "MAP", + "MUL", + "MUL_ASSIGN", + "NEQ", + "NOT", + "NewFileSet", + "NoPos", + "OR", + "OR_ASSIGN", + "PACKAGE", + "PERIOD", + "Pos", + "Position", + "QUO", + "QUO_ASSIGN", + "RANGE", + "RBRACE", + "RBRACK", + "REM", + "REM_ASSIGN", + "RETURN", + "RPAREN", + "SELECT", + "SEMICOLON", + "SHL", + "SHL_ASSIGN", + "SHR", + "SHR_ASSIGN", + "STRING", + "STRUCT", + "SUB", + "SUB_ASSIGN", + "SWITCH", + "TYPE", + "Token", + "UnaryPrec", + "VAR", + "XOR", + "XOR_ASSIGN", + }, + "go/types": []string{ + "Array", + "AssertableTo", + "AssignableTo", + "Basic", + "BasicInfo", + "BasicKind", + "Bool", + "Builtin", + "Byte", + "Chan", + "ChanDir", + "CheckExpr", + "Checker", + "Comparable", + "Complex128", + "Complex64", + "Config", + "Const", + "ConvertibleTo", + "DefPredeclaredTestFuncs", + "Default", + "Error", + "Eval", + "ExprString", + "FieldVal", + "Float32", + "Float64", + "Func", + "Id", + "Identical", + "IdenticalIgnoreTags", + "Implements", + "ImportMode", + "Importer", + "ImporterFrom", + "Info", + "Initializer", + "Int", + "Int16", + "Int32", + "Int64", + "Int8", + "Interface", + "Invalid", + "IsBoolean", + "IsComplex", + "IsConstType", + "IsFloat", + "IsInteger", + "IsInterface", + "IsNumeric", + "IsOrdered", + "IsString", + "IsUnsigned", + "IsUntyped", + "Label", + "LookupFieldOrMethod", + "Map", + "MethodExpr", + "MethodSet", + "MethodVal", + "MissingMethod", + "Named", + "NewArray", + "NewChan", + "NewChecker", + "NewConst", + "NewField", + "NewFunc", + "NewInterface", + "NewInterfaceType", + "NewLabel", + "NewMap", + "NewMethodSet", + "NewNamed", + "NewPackage", + "NewParam", + "NewPkgName", + "NewPointer", + "NewScope", + "NewSignature", + "NewSlice", + "NewStruct", + "NewTuple", + "NewTypeName", + "NewVar", + "Nil", + "Object", + "ObjectString", + "Package", + "PkgName", + "Pointer", + "Qualifier", + "RecvOnly", + "RelativeTo", + "Rune", + "Scope", + "Selection", + "SelectionKind", + "SelectionString", + "SendOnly", + "SendRecv", + "Signature", + "Sizes", + "SizesFor", + "Slice", + "StdSizes", + "String", + "Struct", + "Tuple", + "Typ", + "Type", + "TypeAndValue", + "TypeName", + "TypeString", + "Uint", + "Uint16", + "Uint32", + "Uint64", + "Uint8", + "Uintptr", + "Universe", + "Unsafe", + "UnsafePointer", + "UntypedBool", + "UntypedComplex", + "UntypedFloat", + "UntypedInt", + "UntypedNil", + "UntypedRune", + "UntypedString", + "Var", + "WriteExpr", + "WriteSignature", + "WriteType", + }, + "hash": []string{ + "Hash", + "Hash32", + "Hash64", + }, + "hash/adler32": []string{ + "Checksum", + "New", + "Size", + }, + "hash/crc32": []string{ + "Castagnoli", + "Checksum", + "ChecksumIEEE", + "IEEE", + "IEEETable", + "Koopman", + "MakeTable", + "New", + "NewIEEE", + "Size", + "Table", + "Update", + }, + "hash/crc64": []string{ + "Checksum", + "ECMA", + "ISO", + "MakeTable", + "New", + "Size", + "Table", + "Update", + }, + "hash/fnv": []string{ + "New128", + "New128a", + "New32", + "New32a", + "New64", + "New64a", + }, + "hash/maphash": []string{ + "Hash", + "MakeSeed", + "Seed", + }, + "html": []string{ + "EscapeString", + "UnescapeString", + }, + "html/template": []string{ + "CSS", + "ErrAmbigContext", + "ErrBadHTML", + "ErrBranchEnd", + "ErrEndContext", + "ErrNoSuchTemplate", + "ErrOutputContext", + "ErrPartialCharset", + "ErrPartialEscape", + "ErrPredefinedEscaper", + "ErrRangeLoopReentry", + "ErrSlashAmbig", + "Error", + "ErrorCode", + "FuncMap", + "HTML", + "HTMLAttr", + "HTMLEscape", + "HTMLEscapeString", + "HTMLEscaper", + "IsTrue", + "JS", + "JSEscape", + "JSEscapeString", + "JSEscaper", + "JSStr", + "Must", + "New", + "OK", + "ParseFS", + "ParseFiles", + "ParseGlob", + "Srcset", + "Template", + "URL", + "URLQueryEscaper", + }, + "image": []string{ + "Alpha", + "Alpha16", + "Black", + "CMYK", + "Config", + "Decode", + "DecodeConfig", + "ErrFormat", + "Gray", + "Gray16", + "Image", + "NRGBA", + "NRGBA64", + "NYCbCrA", + "NewAlpha", + "NewAlpha16", + "NewCMYK", + "NewGray", + "NewGray16", + "NewNRGBA", + "NewNRGBA64", + "NewNYCbCrA", + "NewPaletted", + "NewRGBA", + "NewRGBA64", + "NewUniform", + "NewYCbCr", + "Opaque", + "Paletted", + "PalettedImage", + "Point", + "Pt", + "RGBA", + "RGBA64", + "Rect", + "Rectangle", + "RegisterFormat", + "Transparent", + "Uniform", + "White", + "YCbCr", + "YCbCrSubsampleRatio", + "YCbCrSubsampleRatio410", + "YCbCrSubsampleRatio411", + "YCbCrSubsampleRatio420", + "YCbCrSubsampleRatio422", + "YCbCrSubsampleRatio440", + "YCbCrSubsampleRatio444", + "ZP", + "ZR", + }, + "image/color": []string{ + "Alpha", + "Alpha16", + "Alpha16Model", + "AlphaModel", + "Black", + "CMYK", + "CMYKModel", + "CMYKToRGB", + "Color", + "Gray", + "Gray16", + "Gray16Model", + "GrayModel", + "Model", + "ModelFunc", + "NRGBA", + "NRGBA64", + "NRGBA64Model", + "NRGBAModel", + "NYCbCrA", + "NYCbCrAModel", + "Opaque", + "Palette", + "RGBA", + "RGBA64", + "RGBA64Model", + "RGBAModel", + "RGBToCMYK", + "RGBToYCbCr", + "Transparent", + "White", + "YCbCr", + "YCbCrModel", + "YCbCrToRGB", + }, + "image/color/palette": []string{ + "Plan9", + "WebSafe", + }, + "image/draw": []string{ + "Draw", + "DrawMask", + "Drawer", + "FloydSteinberg", + "Image", + "Op", + "Over", + "Quantizer", + "Src", + }, + "image/gif": []string{ + "Decode", + "DecodeAll", + "DecodeConfig", + "DisposalBackground", + "DisposalNone", + "DisposalPrevious", + "Encode", + "EncodeAll", + "GIF", + "Options", + }, + "image/jpeg": []string{ + "Decode", + "DecodeConfig", + "DefaultQuality", + "Encode", + "FormatError", + "Options", + "Reader", + "UnsupportedError", + }, + "image/png": []string{ + "BestCompression", + "BestSpeed", + "CompressionLevel", + "Decode", + "DecodeConfig", + "DefaultCompression", + "Encode", + "Encoder", + "EncoderBuffer", + "EncoderBufferPool", + "FormatError", + "NoCompression", + "UnsupportedError", + }, + "index/suffixarray": []string{ + "Index", + "New", + }, + "io": []string{ + "ByteReader", + "ByteScanner", + "ByteWriter", + "Closer", + "Copy", + "CopyBuffer", + "CopyN", + "Discard", + "EOF", + "ErrClosedPipe", + "ErrNoProgress", + "ErrShortBuffer", + "ErrShortWrite", + "ErrUnexpectedEOF", + "LimitReader", + "LimitedReader", + "MultiReader", + "MultiWriter", + "NewSectionReader", + "NopCloser", + "Pipe", + "PipeReader", + "PipeWriter", + "ReadAll", + "ReadAtLeast", + "ReadCloser", + "ReadFull", + "ReadSeekCloser", + "ReadSeeker", + "ReadWriteCloser", + "ReadWriteSeeker", + "ReadWriter", + "Reader", + "ReaderAt", + "ReaderFrom", + "RuneReader", + "RuneScanner", + "SectionReader", + "SeekCurrent", + "SeekEnd", + "SeekStart", + "Seeker", + "StringWriter", + "TeeReader", + "WriteCloser", + "WriteSeeker", + "WriteString", + "Writer", + "WriterAt", + "WriterTo", + }, + "io/fs": []string{ + "DirEntry", + "ErrClosed", + "ErrExist", + "ErrInvalid", + "ErrNotExist", + "ErrPermission", + "FS", + "File", + "FileInfo", + "FileMode", + "Glob", + "GlobFS", + "ModeAppend", + "ModeCharDevice", + "ModeDevice", + "ModeDir", + "ModeExclusive", + "ModeIrregular", + "ModeNamedPipe", + "ModePerm", + "ModeSetgid", + "ModeSetuid", + "ModeSocket", + "ModeSticky", + "ModeSymlink", + "ModeTemporary", + "ModeType", + "PathError", + "ReadDir", + "ReadDirFS", + "ReadDirFile", + "ReadFile", + "ReadFileFS", + "SkipDir", + "Stat", + "StatFS", + "Sub", + "SubFS", + "ValidPath", + "WalkDir", + "WalkDirFunc", + }, + "io/ioutil": []string{ + "Discard", + "NopCloser", + "ReadAll", + "ReadDir", + "ReadFile", + "TempDir", + "TempFile", + "WriteFile", + }, + "log": []string{ + "Default", + "Fatal", + "Fatalf", + "Fatalln", + "Flags", + "LUTC", + "Ldate", + "Llongfile", + "Lmicroseconds", + "Lmsgprefix", + "Logger", + "Lshortfile", + "LstdFlags", + "Ltime", + "New", + "Output", + "Panic", + "Panicf", + "Panicln", + "Prefix", + "Print", + "Printf", + "Println", + "SetFlags", + "SetOutput", + "SetPrefix", + "Writer", + }, + "log/syslog": []string{ + "Dial", + "LOG_ALERT", + "LOG_AUTH", + "LOG_AUTHPRIV", + "LOG_CRIT", + "LOG_CRON", + "LOG_DAEMON", + "LOG_DEBUG", + "LOG_EMERG", + "LOG_ERR", + "LOG_FTP", + "LOG_INFO", + "LOG_KERN", + "LOG_LOCAL0", + "LOG_LOCAL1", + "LOG_LOCAL2", + "LOG_LOCAL3", + "LOG_LOCAL4", + "LOG_LOCAL5", + "LOG_LOCAL6", + "LOG_LOCAL7", + "LOG_LPR", + "LOG_MAIL", + "LOG_NEWS", + "LOG_NOTICE", + "LOG_SYSLOG", + "LOG_USER", + "LOG_UUCP", + "LOG_WARNING", + "New", + "NewLogger", + "Priority", + "Writer", + }, + "math": []string{ + "Abs", + "Acos", + "Acosh", + "Asin", + "Asinh", + "Atan", + "Atan2", + "Atanh", + "Cbrt", + "Ceil", + "Copysign", + "Cos", + "Cosh", + "Dim", + "E", + "Erf", + "Erfc", + "Erfcinv", + "Erfinv", + "Exp", + "Exp2", + "Expm1", + "FMA", + "Float32bits", + "Float32frombits", + "Float64bits", + "Float64frombits", + "Floor", + "Frexp", + "Gamma", + "Hypot", + "Ilogb", + "Inf", + "IsInf", + "IsNaN", + "J0", + "J1", + "Jn", + "Ldexp", + "Lgamma", + "Ln10", + "Ln2", + "Log", + "Log10", + "Log10E", + "Log1p", + "Log2", + "Log2E", + "Logb", + "Max", + "MaxFloat32", + "MaxFloat64", + "MaxInt16", + "MaxInt32", + "MaxInt64", + "MaxInt8", + "MaxUint16", + "MaxUint32", + "MaxUint64", + "MaxUint8", + "Min", + "MinInt16", + "MinInt32", + "MinInt64", + "MinInt8", + "Mod", + "Modf", + "NaN", + "Nextafter", + "Nextafter32", + "Phi", + "Pi", + "Pow", + "Pow10", + "Remainder", + "Round", + "RoundToEven", + "Signbit", + "Sin", + "Sincos", + "Sinh", + "SmallestNonzeroFloat32", + "SmallestNonzeroFloat64", + "Sqrt", + "Sqrt2", + "SqrtE", + "SqrtPhi", + "SqrtPi", + "Tan", + "Tanh", + "Trunc", + "Y0", + "Y1", + "Yn", + }, + "math/big": []string{ + "Above", + "Accuracy", + "AwayFromZero", + "Below", + "ErrNaN", + "Exact", + "Float", + "Int", + "Jacobi", + "MaxBase", + "MaxExp", + "MaxPrec", + "MinExp", + "NewFloat", + "NewInt", + "NewRat", + "ParseFloat", + "Rat", + "RoundingMode", + "ToNearestAway", + "ToNearestEven", + "ToNegativeInf", + "ToPositiveInf", + "ToZero", + "Word", + }, + "math/bits": []string{ + "Add", + "Add32", + "Add64", + "Div", + "Div32", + "Div64", + "LeadingZeros", + "LeadingZeros16", + "LeadingZeros32", + "LeadingZeros64", + "LeadingZeros8", + "Len", + "Len16", + "Len32", + "Len64", + "Len8", + "Mul", + "Mul32", + "Mul64", + "OnesCount", + "OnesCount16", + "OnesCount32", + "OnesCount64", + "OnesCount8", + "Rem", + "Rem32", + "Rem64", + "Reverse", + "Reverse16", + "Reverse32", + "Reverse64", + "Reverse8", + "ReverseBytes", + "ReverseBytes16", + "ReverseBytes32", + "ReverseBytes64", + "RotateLeft", + "RotateLeft16", + "RotateLeft32", + "RotateLeft64", + "RotateLeft8", + "Sub", + "Sub32", + "Sub64", + "TrailingZeros", + "TrailingZeros16", + "TrailingZeros32", + "TrailingZeros64", + "TrailingZeros8", + "UintSize", + }, + "math/cmplx": []string{ + "Abs", + "Acos", + "Acosh", + "Asin", + "Asinh", + "Atan", + "Atanh", + "Conj", + "Cos", + "Cosh", + "Cot", + "Exp", + "Inf", + "IsInf", + "IsNaN", + "Log", + "Log10", + "NaN", + "Phase", + "Polar", + "Pow", + "Rect", + "Sin", + "Sinh", + "Sqrt", + "Tan", + "Tanh", + }, + "math/rand": []string{ + "ExpFloat64", + "Float32", + "Float64", + "Int", + "Int31", + "Int31n", + "Int63", + "Int63n", + "Intn", + "New", + "NewSource", + "NewZipf", + "NormFloat64", + "Perm", + "Rand", + "Read", + "Seed", + "Shuffle", + "Source", + "Source64", + "Uint32", + "Uint64", + "Zipf", + }, + "mime": []string{ + "AddExtensionType", + "BEncoding", + "ErrInvalidMediaParameter", + "ExtensionsByType", + "FormatMediaType", + "ParseMediaType", + "QEncoding", + "TypeByExtension", + "WordDecoder", + "WordEncoder", + }, + "mime/multipart": []string{ + "ErrMessageTooLarge", + "File", + "FileHeader", + "Form", + "NewReader", + "NewWriter", + "Part", + "Reader", + "Writer", + }, + "mime/quotedprintable": []string{ + "NewReader", + "NewWriter", + "Reader", + "Writer", + }, + "net": []string{ + "Addr", + "AddrError", + "Buffers", + "CIDRMask", + "Conn", + "DNSConfigError", + "DNSError", + "DefaultResolver", + "Dial", + "DialIP", + "DialTCP", + "DialTimeout", + "DialUDP", + "DialUnix", + "Dialer", + "ErrClosed", + "ErrWriteToConnected", + "Error", + "FileConn", + "FileListener", + "FilePacketConn", + "FlagBroadcast", + "FlagLoopback", + "FlagMulticast", + "FlagPointToPoint", + "FlagUp", + "Flags", + "HardwareAddr", + "IP", + "IPAddr", + "IPConn", + "IPMask", + "IPNet", + "IPv4", + "IPv4Mask", + "IPv4allrouter", + "IPv4allsys", + "IPv4bcast", + "IPv4len", + "IPv4zero", + "IPv6interfacelocalallnodes", + "IPv6len", + "IPv6linklocalallnodes", + "IPv6linklocalallrouters", + "IPv6loopback", + "IPv6unspecified", + "IPv6zero", + "Interface", + "InterfaceAddrs", + "InterfaceByIndex", + "InterfaceByName", + "Interfaces", + "InvalidAddrError", + "JoinHostPort", + "Listen", + "ListenConfig", + "ListenIP", + "ListenMulticastUDP", + "ListenPacket", + "ListenTCP", + "ListenUDP", + "ListenUnix", + "ListenUnixgram", + "Listener", + "LookupAddr", + "LookupCNAME", + "LookupHost", + "LookupIP", + "LookupMX", + "LookupNS", + "LookupPort", + "LookupSRV", + "LookupTXT", + "MX", + "NS", + "OpError", + "PacketConn", + "ParseCIDR", + "ParseError", + "ParseIP", + "ParseMAC", + "Pipe", + "ResolveIPAddr", + "ResolveTCPAddr", + "ResolveUDPAddr", + "ResolveUnixAddr", + "Resolver", + "SRV", + "SplitHostPort", + "TCPAddr", + "TCPConn", + "TCPListener", + "UDPAddr", + "UDPConn", + "UnixAddr", + "UnixConn", + "UnixListener", + "UnknownNetworkError", + }, + "net/http": []string{ + "CanonicalHeaderKey", + "Client", + "CloseNotifier", + "ConnState", + "Cookie", + "CookieJar", + "DefaultClient", + "DefaultMaxHeaderBytes", + "DefaultMaxIdleConnsPerHost", + "DefaultServeMux", + "DefaultTransport", + "DetectContentType", + "Dir", + "ErrAbortHandler", + "ErrBodyNotAllowed", + "ErrBodyReadAfterClose", + "ErrContentLength", + "ErrHandlerTimeout", + "ErrHeaderTooLong", + "ErrHijacked", + "ErrLineTooLong", + "ErrMissingBoundary", + "ErrMissingContentLength", + "ErrMissingFile", + "ErrNoCookie", + "ErrNoLocation", + "ErrNotMultipart", + "ErrNotSupported", + "ErrServerClosed", + "ErrShortBody", + "ErrSkipAltProtocol", + "ErrUnexpectedTrailer", + "ErrUseLastResponse", + "ErrWriteAfterFlush", + "Error", + "FS", + "File", + "FileServer", + "FileSystem", + "Flusher", + "Get", + "Handle", + "HandleFunc", + "Handler", + "HandlerFunc", + "Head", + "Header", + "Hijacker", + "ListenAndServe", + "ListenAndServeTLS", + "LocalAddrContextKey", + "MaxBytesReader", + "MethodConnect", + "MethodDelete", + "MethodGet", + "MethodHead", + "MethodOptions", + "MethodPatch", + "MethodPost", + "MethodPut", + "MethodTrace", + "NewFileTransport", + "NewRequest", + "NewRequestWithContext", + "NewServeMux", + "NoBody", + "NotFound", + "NotFoundHandler", + "ParseHTTPVersion", + "ParseTime", + "Post", + "PostForm", + "ProtocolError", + "ProxyFromEnvironment", + "ProxyURL", + "PushOptions", + "Pusher", + "ReadRequest", + "ReadResponse", + "Redirect", + "RedirectHandler", + "Request", + "Response", + "ResponseWriter", + "RoundTripper", + "SameSite", + "SameSiteDefaultMode", + "SameSiteLaxMode", + "SameSiteNoneMode", + "SameSiteStrictMode", + "Serve", + "ServeContent", + "ServeFile", + "ServeMux", + "ServeTLS", + "Server", + "ServerContextKey", + "SetCookie", + "StateActive", + "StateClosed", + "StateHijacked", + "StateIdle", + "StateNew", + "StatusAccepted", + "StatusAlreadyReported", + "StatusBadGateway", + "StatusBadRequest", + "StatusConflict", + "StatusContinue", + "StatusCreated", + "StatusEarlyHints", + "StatusExpectationFailed", + "StatusFailedDependency", + "StatusForbidden", + "StatusFound", + "StatusGatewayTimeout", + "StatusGone", + "StatusHTTPVersionNotSupported", + "StatusIMUsed", + "StatusInsufficientStorage", + "StatusInternalServerError", + "StatusLengthRequired", + "StatusLocked", + "StatusLoopDetected", + "StatusMethodNotAllowed", + "StatusMisdirectedRequest", + "StatusMovedPermanently", + "StatusMultiStatus", + "StatusMultipleChoices", + "StatusNetworkAuthenticationRequired", + "StatusNoContent", + "StatusNonAuthoritativeInfo", + "StatusNotAcceptable", + "StatusNotExtended", + "StatusNotFound", + "StatusNotImplemented", + "StatusNotModified", + "StatusOK", + "StatusPartialContent", + "StatusPaymentRequired", + "StatusPermanentRedirect", + "StatusPreconditionFailed", + "StatusPreconditionRequired", + "StatusProcessing", + "StatusProxyAuthRequired", + "StatusRequestEntityTooLarge", + "StatusRequestHeaderFieldsTooLarge", + "StatusRequestTimeout", + "StatusRequestURITooLong", + "StatusRequestedRangeNotSatisfiable", + "StatusResetContent", + "StatusSeeOther", + "StatusServiceUnavailable", + "StatusSwitchingProtocols", + "StatusTeapot", + "StatusTemporaryRedirect", + "StatusText", + "StatusTooEarly", + "StatusTooManyRequests", + "StatusUnauthorized", + "StatusUnavailableForLegalReasons", + "StatusUnprocessableEntity", + "StatusUnsupportedMediaType", + "StatusUpgradeRequired", + "StatusUseProxy", + "StatusVariantAlsoNegotiates", + "StripPrefix", + "TimeFormat", + "TimeoutHandler", + "TrailerPrefix", + "Transport", + }, + "net/http/cgi": []string{ + "Handler", + "Request", + "RequestFromMap", + "Serve", + }, + "net/http/cookiejar": []string{ + "Jar", + "New", + "Options", + "PublicSuffixList", + }, + "net/http/fcgi": []string{ + "ErrConnClosed", + "ErrRequestAborted", + "ProcessEnv", + "Serve", + }, + "net/http/httptest": []string{ + "DefaultRemoteAddr", + "NewRecorder", + "NewRequest", + "NewServer", + "NewTLSServer", + "NewUnstartedServer", + "ResponseRecorder", + "Server", + }, + "net/http/httptrace": []string{ + "ClientTrace", + "ContextClientTrace", + "DNSDoneInfo", + "DNSStartInfo", + "GotConnInfo", + "WithClientTrace", + "WroteRequestInfo", + }, + "net/http/httputil": []string{ + "BufferPool", + "ClientConn", + "DumpRequest", + "DumpRequestOut", + "DumpResponse", + "ErrClosed", + "ErrLineTooLong", + "ErrPersistEOF", + "ErrPipeline", + "NewChunkedReader", + "NewChunkedWriter", + "NewClientConn", + "NewProxyClientConn", + "NewServerConn", + "NewSingleHostReverseProxy", + "ReverseProxy", + "ServerConn", + }, + "net/http/pprof": []string{ + "Cmdline", + "Handler", + "Index", + "Profile", + "Symbol", + "Trace", + }, + "net/mail": []string{ + "Address", + "AddressParser", + "ErrHeaderNotPresent", + "Header", + "Message", + "ParseAddress", + "ParseAddressList", + "ParseDate", + "ReadMessage", + }, + "net/rpc": []string{ + "Accept", + "Call", + "Client", + "ClientCodec", + "DefaultDebugPath", + "DefaultRPCPath", + "DefaultServer", + "Dial", + "DialHTTP", + "DialHTTPPath", + "ErrShutdown", + "HandleHTTP", + "NewClient", + "NewClientWithCodec", + "NewServer", + "Register", + "RegisterName", + "Request", + "Response", + "ServeCodec", + "ServeConn", + "ServeRequest", + "Server", + "ServerCodec", + "ServerError", + }, + "net/rpc/jsonrpc": []string{ + "Dial", + "NewClient", + "NewClientCodec", + "NewServerCodec", + "ServeConn", + }, + "net/smtp": []string{ + "Auth", + "CRAMMD5Auth", + "Client", + "Dial", + "NewClient", + "PlainAuth", + "SendMail", + "ServerInfo", + }, + "net/textproto": []string{ + "CanonicalMIMEHeaderKey", + "Conn", + "Dial", + "Error", + "MIMEHeader", + "NewConn", + "NewReader", + "NewWriter", + "Pipeline", + "ProtocolError", + "Reader", + "TrimBytes", + "TrimString", + "Writer", + }, + "net/url": []string{ + "Error", + "EscapeError", + "InvalidHostError", + "Parse", + "ParseQuery", + "ParseRequestURI", + "PathEscape", + "PathUnescape", + "QueryEscape", + "QueryUnescape", + "URL", + "User", + "UserPassword", + "Userinfo", + "Values", + }, + "os": []string{ + "Args", + "Chdir", + "Chmod", + "Chown", + "Chtimes", + "Clearenv", + "Create", + "CreateTemp", + "DevNull", + "DirEntry", + "DirFS", + "Environ", + "ErrClosed", + "ErrDeadlineExceeded", + "ErrExist", + "ErrInvalid", + "ErrNoDeadline", + "ErrNotExist", + "ErrPermission", + "ErrProcessDone", + "Executable", + "Exit", + "Expand", + "ExpandEnv", + "File", + "FileInfo", + "FileMode", + "FindProcess", + "Getegid", + "Getenv", + "Geteuid", + "Getgid", + "Getgroups", + "Getpagesize", + "Getpid", + "Getppid", + "Getuid", + "Getwd", + "Hostname", + "Interrupt", + "IsExist", + "IsNotExist", + "IsPathSeparator", + "IsPermission", + "IsTimeout", + "Kill", + "Lchown", + "Link", + "LinkError", + "LookupEnv", + "Lstat", + "Mkdir", + "MkdirAll", + "MkdirTemp", + "ModeAppend", + "ModeCharDevice", + "ModeDevice", + "ModeDir", + "ModeExclusive", + "ModeIrregular", + "ModeNamedPipe", + "ModePerm", + "ModeSetgid", + "ModeSetuid", + "ModeSocket", + "ModeSticky", + "ModeSymlink", + "ModeTemporary", + "ModeType", + "NewFile", + "NewSyscallError", + "O_APPEND", + "O_CREATE", + "O_EXCL", + "O_RDONLY", + "O_RDWR", + "O_SYNC", + "O_TRUNC", + "O_WRONLY", + "Open", + "OpenFile", + "PathError", + "PathListSeparator", + "PathSeparator", + "Pipe", + "ProcAttr", + "Process", + "ProcessState", + "ReadDir", + "ReadFile", + "Readlink", + "Remove", + "RemoveAll", + "Rename", + "SEEK_CUR", + "SEEK_END", + "SEEK_SET", + "SameFile", + "Setenv", + "Signal", + "StartProcess", + "Stat", + "Stderr", + "Stdin", + "Stdout", + "Symlink", + "SyscallError", + "TempDir", + "Truncate", + "Unsetenv", + "UserCacheDir", + "UserConfigDir", + "UserHomeDir", + "WriteFile", + }, + "os/exec": []string{ + "Cmd", + "Command", + "CommandContext", + "ErrNotFound", + "Error", + "ExitError", + "LookPath", + }, + "os/signal": []string{ + "Ignore", + "Ignored", + "Notify", + "NotifyContext", + "Reset", + "Stop", + }, + "os/user": []string{ + "Current", + "Group", + "Lookup", + "LookupGroup", + "LookupGroupId", + "LookupId", + "UnknownGroupError", + "UnknownGroupIdError", + "UnknownUserError", + "UnknownUserIdError", + "User", + }, + "path": []string{ + "Base", + "Clean", + "Dir", + "ErrBadPattern", + "Ext", + "IsAbs", + "Join", + "Match", + "Split", + }, + "path/filepath": []string{ + "Abs", + "Base", + "Clean", + "Dir", + "ErrBadPattern", + "EvalSymlinks", + "Ext", + "FromSlash", + "Glob", + "HasPrefix", + "IsAbs", + "Join", + "ListSeparator", + "Match", + "Rel", + "Separator", + "SkipDir", + "Split", + "SplitList", + "ToSlash", + "VolumeName", + "Walk", + "WalkDir", + "WalkFunc", + }, + "plugin": []string{ + "Open", + "Plugin", + "Symbol", + }, + "reflect": []string{ + "Append", + "AppendSlice", + "Array", + "ArrayOf", + "Bool", + "BothDir", + "Chan", + "ChanDir", + "ChanOf", + "Complex128", + "Complex64", + "Copy", + "DeepEqual", + "Float32", + "Float64", + "Func", + "FuncOf", + "Indirect", + "Int", + "Int16", + "Int32", + "Int64", + "Int8", + "Interface", + "Invalid", + "Kind", + "MakeChan", + "MakeFunc", + "MakeMap", + "MakeMapWithSize", + "MakeSlice", + "Map", + "MapIter", + "MapOf", + "Method", + "New", + "NewAt", + "Ptr", + "PtrTo", + "RecvDir", + "Select", + "SelectCase", + "SelectDefault", + "SelectDir", + "SelectRecv", + "SelectSend", + "SendDir", + "Slice", + "SliceHeader", + "SliceOf", + "String", + "StringHeader", + "Struct", + "StructField", + "StructOf", + "StructTag", + "Swapper", + "Type", + "TypeOf", + "Uint", + "Uint16", + "Uint32", + "Uint64", + "Uint8", + "Uintptr", + "UnsafePointer", + "Value", + "ValueError", + "ValueOf", + "Zero", + }, + "regexp": []string{ + "Compile", + "CompilePOSIX", + "Match", + "MatchReader", + "MatchString", + "MustCompile", + "MustCompilePOSIX", + "QuoteMeta", + "Regexp", + }, + "regexp/syntax": []string{ + "ClassNL", + "Compile", + "DotNL", + "EmptyBeginLine", + "EmptyBeginText", + "EmptyEndLine", + "EmptyEndText", + "EmptyNoWordBoundary", + "EmptyOp", + "EmptyOpContext", + "EmptyWordBoundary", + "ErrInternalError", + "ErrInvalidCharClass", + "ErrInvalidCharRange", + "ErrInvalidEscape", + "ErrInvalidNamedCapture", + "ErrInvalidPerlOp", + "ErrInvalidRepeatOp", + "ErrInvalidRepeatSize", + "ErrInvalidUTF8", + "ErrMissingBracket", + "ErrMissingParen", + "ErrMissingRepeatArgument", + "ErrTrailingBackslash", + "ErrUnexpectedParen", + "Error", + "ErrorCode", + "Flags", + "FoldCase", + "Inst", + "InstAlt", + "InstAltMatch", + "InstCapture", + "InstEmptyWidth", + "InstFail", + "InstMatch", + "InstNop", + "InstOp", + "InstRune", + "InstRune1", + "InstRuneAny", + "InstRuneAnyNotNL", + "IsWordChar", + "Literal", + "MatchNL", + "NonGreedy", + "OneLine", + "Op", + "OpAlternate", + "OpAnyChar", + "OpAnyCharNotNL", + "OpBeginLine", + "OpBeginText", + "OpCapture", + "OpCharClass", + "OpConcat", + "OpEmptyMatch", + "OpEndLine", + "OpEndText", + "OpLiteral", + "OpNoMatch", + "OpNoWordBoundary", + "OpPlus", + "OpQuest", + "OpRepeat", + "OpStar", + "OpWordBoundary", + "POSIX", + "Parse", + "Perl", + "PerlX", + "Prog", + "Regexp", + "Simple", + "UnicodeGroups", + "WasDollar", + }, + "runtime": []string{ + "BlockProfile", + "BlockProfileRecord", + "Breakpoint", + "CPUProfile", + "Caller", + "Callers", + "CallersFrames", + "Compiler", + "Error", + "Frame", + "Frames", + "Func", + "FuncForPC", + "GC", + "GOARCH", + "GOMAXPROCS", + "GOOS", + "GOROOT", + "Goexit", + "GoroutineProfile", + "Gosched", + "KeepAlive", + "LockOSThread", + "MemProfile", + "MemProfileRate", + "MemProfileRecord", + "MemStats", + "MutexProfile", + "NumCPU", + "NumCgoCall", + "NumGoroutine", + "ReadMemStats", + "ReadTrace", + "SetBlockProfileRate", + "SetCPUProfileRate", + "SetCgoTraceback", + "SetFinalizer", + "SetMutexProfileFraction", + "Stack", + "StackRecord", + "StartTrace", + "StopTrace", + "ThreadCreateProfile", + "TypeAssertionError", + "UnlockOSThread", + "Version", + }, + "runtime/debug": []string{ + "BuildInfo", + "FreeOSMemory", + "GCStats", + "Module", + "PrintStack", + "ReadBuildInfo", + "ReadGCStats", + "SetGCPercent", + "SetMaxStack", + "SetMaxThreads", + "SetPanicOnFault", + "SetTraceback", + "Stack", + "WriteHeapDump", + }, + "runtime/metrics": []string{ + "All", + "Description", + "Float64Histogram", + "KindBad", + "KindFloat64", + "KindFloat64Histogram", + "KindUint64", + "Read", + "Sample", + "Value", + "ValueKind", + }, + "runtime/pprof": []string{ + "Do", + "ForLabels", + "Label", + "LabelSet", + "Labels", + "Lookup", + "NewProfile", + "Profile", + "Profiles", + "SetGoroutineLabels", + "StartCPUProfile", + "StopCPUProfile", + "WithLabels", + "WriteHeapProfile", + }, + "runtime/trace": []string{ + "IsEnabled", + "Log", + "Logf", + "NewTask", + "Region", + "Start", + "StartRegion", + "Stop", + "Task", + "WithRegion", + }, + "sort": []string{ + "Float64Slice", + "Float64s", + "Float64sAreSorted", + "IntSlice", + "Interface", + "Ints", + "IntsAreSorted", + "IsSorted", + "Reverse", + "Search", + "SearchFloat64s", + "SearchInts", + "SearchStrings", + "Slice", + "SliceIsSorted", + "SliceStable", + "Sort", + "Stable", + "StringSlice", + "Strings", + "StringsAreSorted", + }, + "strconv": []string{ + "AppendBool", + "AppendFloat", + "AppendInt", + "AppendQuote", + "AppendQuoteRune", + "AppendQuoteRuneToASCII", + "AppendQuoteRuneToGraphic", + "AppendQuoteToASCII", + "AppendQuoteToGraphic", + "AppendUint", + "Atoi", + "CanBackquote", + "ErrRange", + "ErrSyntax", + "FormatBool", + "FormatComplex", + "FormatFloat", + "FormatInt", + "FormatUint", + "IntSize", + "IsGraphic", + "IsPrint", + "Itoa", + "NumError", + "ParseBool", + "ParseComplex", + "ParseFloat", + "ParseInt", + "ParseUint", + "Quote", + "QuoteRune", + "QuoteRuneToASCII", + "QuoteRuneToGraphic", + "QuoteToASCII", + "QuoteToGraphic", + "Unquote", + "UnquoteChar", + }, + "strings": []string{ + "Builder", + "Compare", + "Contains", + "ContainsAny", + "ContainsRune", + "Count", + "EqualFold", + "Fields", + "FieldsFunc", + "HasPrefix", + "HasSuffix", + "Index", + "IndexAny", + "IndexByte", + "IndexFunc", + "IndexRune", + "Join", + "LastIndex", + "LastIndexAny", + "LastIndexByte", + "LastIndexFunc", + "Map", + "NewReader", + "NewReplacer", + "Reader", + "Repeat", + "Replace", + "ReplaceAll", + "Replacer", + "Split", + "SplitAfter", + "SplitAfterN", + "SplitN", + "Title", + "ToLower", + "ToLowerSpecial", + "ToTitle", + "ToTitleSpecial", + "ToUpper", + "ToUpperSpecial", + "ToValidUTF8", + "Trim", + "TrimFunc", + "TrimLeft", + "TrimLeftFunc", + "TrimPrefix", + "TrimRight", + "TrimRightFunc", + "TrimSpace", + "TrimSuffix", + }, + "sync": []string{ + "Cond", + "Locker", + "Map", + "Mutex", + "NewCond", + "Once", + "Pool", + "RWMutex", + "WaitGroup", + }, + "sync/atomic": []string{ + "AddInt32", + "AddInt64", + "AddUint32", + "AddUint64", + "AddUintptr", + "CompareAndSwapInt32", + "CompareAndSwapInt64", + "CompareAndSwapPointer", + "CompareAndSwapUint32", + "CompareAndSwapUint64", + "CompareAndSwapUintptr", + "LoadInt32", + "LoadInt64", + "LoadPointer", + "LoadUint32", + "LoadUint64", + "LoadUintptr", + "StoreInt32", + "StoreInt64", + "StorePointer", + "StoreUint32", + "StoreUint64", + "StoreUintptr", + "SwapInt32", + "SwapInt64", + "SwapPointer", + "SwapUint32", + "SwapUint64", + "SwapUintptr", + "Value", + }, + "syscall": []string{ + "AF_ALG", + "AF_APPLETALK", + "AF_ARP", + "AF_ASH", + "AF_ATM", + "AF_ATMPVC", + "AF_ATMSVC", + "AF_AX25", + "AF_BLUETOOTH", + "AF_BRIDGE", + "AF_CAIF", + "AF_CAN", + "AF_CCITT", + "AF_CHAOS", + "AF_CNT", + "AF_COIP", + "AF_DATAKIT", + "AF_DECnet", + "AF_DLI", + "AF_E164", + "AF_ECMA", + "AF_ECONET", + "AF_ENCAP", + "AF_FILE", + "AF_HYLINK", + "AF_IEEE80211", + "AF_IEEE802154", + "AF_IMPLINK", + "AF_INET", + "AF_INET6", + "AF_INET6_SDP", + "AF_INET_SDP", + "AF_IPX", + "AF_IRDA", + "AF_ISDN", + "AF_ISO", + "AF_IUCV", + "AF_KEY", + "AF_LAT", + "AF_LINK", + "AF_LLC", + "AF_LOCAL", + "AF_MAX", + "AF_MPLS", + "AF_NATM", + "AF_NDRV", + "AF_NETBEUI", + "AF_NETBIOS", + "AF_NETGRAPH", + "AF_NETLINK", + "AF_NETROM", + "AF_NS", + "AF_OROUTE", + "AF_OSI", + "AF_PACKET", + "AF_PHONET", + "AF_PPP", + "AF_PPPOX", + "AF_PUP", + "AF_RDS", + "AF_RESERVED_36", + "AF_ROSE", + "AF_ROUTE", + "AF_RXRPC", + "AF_SCLUSTER", + "AF_SECURITY", + "AF_SIP", + "AF_SLOW", + "AF_SNA", + "AF_SYSTEM", + "AF_TIPC", + "AF_UNIX", + "AF_UNSPEC", + "AF_VENDOR00", + "AF_VENDOR01", + "AF_VENDOR02", + "AF_VENDOR03", + "AF_VENDOR04", + "AF_VENDOR05", + "AF_VENDOR06", + "AF_VENDOR07", + "AF_VENDOR08", + "AF_VENDOR09", + "AF_VENDOR10", + "AF_VENDOR11", + "AF_VENDOR12", + "AF_VENDOR13", + "AF_VENDOR14", + "AF_VENDOR15", + "AF_VENDOR16", + "AF_VENDOR17", + "AF_VENDOR18", + "AF_VENDOR19", + "AF_VENDOR20", + "AF_VENDOR21", + "AF_VENDOR22", + "AF_VENDOR23", + "AF_VENDOR24", + "AF_VENDOR25", + "AF_VENDOR26", + "AF_VENDOR27", + "AF_VENDOR28", + "AF_VENDOR29", + "AF_VENDOR30", + "AF_VENDOR31", + "AF_VENDOR32", + "AF_VENDOR33", + "AF_VENDOR34", + "AF_VENDOR35", + "AF_VENDOR36", + "AF_VENDOR37", + "AF_VENDOR38", + "AF_VENDOR39", + "AF_VENDOR40", + "AF_VENDOR41", + "AF_VENDOR42", + "AF_VENDOR43", + "AF_VENDOR44", + "AF_VENDOR45", + "AF_VENDOR46", + "AF_VENDOR47", + "AF_WANPIPE", + "AF_X25", + "AI_CANONNAME", + "AI_NUMERICHOST", + "AI_PASSIVE", + "APPLICATION_ERROR", + "ARPHRD_ADAPT", + "ARPHRD_APPLETLK", + "ARPHRD_ARCNET", + "ARPHRD_ASH", + "ARPHRD_ATM", + "ARPHRD_AX25", + "ARPHRD_BIF", + "ARPHRD_CHAOS", + "ARPHRD_CISCO", + "ARPHRD_CSLIP", + "ARPHRD_CSLIP6", + "ARPHRD_DDCMP", + "ARPHRD_DLCI", + "ARPHRD_ECONET", + "ARPHRD_EETHER", + "ARPHRD_ETHER", + "ARPHRD_EUI64", + "ARPHRD_FCAL", + "ARPHRD_FCFABRIC", + "ARPHRD_FCPL", + "ARPHRD_FCPP", + "ARPHRD_FDDI", + "ARPHRD_FRAD", + "ARPHRD_FRELAY", + "ARPHRD_HDLC", + "ARPHRD_HIPPI", + "ARPHRD_HWX25", + "ARPHRD_IEEE1394", + "ARPHRD_IEEE802", + "ARPHRD_IEEE80211", + "ARPHRD_IEEE80211_PRISM", + "ARPHRD_IEEE80211_RADIOTAP", + "ARPHRD_IEEE802154", + "ARPHRD_IEEE802154_PHY", + "ARPHRD_IEEE802_TR", + "ARPHRD_INFINIBAND", + "ARPHRD_IPDDP", + "ARPHRD_IPGRE", + "ARPHRD_IRDA", + "ARPHRD_LAPB", + "ARPHRD_LOCALTLK", + "ARPHRD_LOOPBACK", + "ARPHRD_METRICOM", + "ARPHRD_NETROM", + "ARPHRD_NONE", + "ARPHRD_PIMREG", + "ARPHRD_PPP", + "ARPHRD_PRONET", + "ARPHRD_RAWHDLC", + "ARPHRD_ROSE", + "ARPHRD_RSRVD", + "ARPHRD_SIT", + "ARPHRD_SKIP", + "ARPHRD_SLIP", + "ARPHRD_SLIP6", + "ARPHRD_STRIP", + "ARPHRD_TUNNEL", + "ARPHRD_TUNNEL6", + "ARPHRD_VOID", + "ARPHRD_X25", + "AUTHTYPE_CLIENT", + "AUTHTYPE_SERVER", + "Accept", + "Accept4", + "AcceptEx", + "Access", + "Acct", + "AddrinfoW", + "Adjtime", + "Adjtimex", + "AllThreadsSyscall", + "AllThreadsSyscall6", + "AttachLsf", + "B0", + "B1000000", + "B110", + "B115200", + "B1152000", + "B1200", + "B134", + "B14400", + "B150", + "B1500000", + "B1800", + "B19200", + "B200", + "B2000000", + "B230400", + "B2400", + "B2500000", + "B28800", + "B300", + "B3000000", + "B3500000", + "B38400", + "B4000000", + "B460800", + "B4800", + "B50", + "B500000", + "B57600", + "B576000", + "B600", + "B7200", + "B75", + "B76800", + "B921600", + "B9600", + "BASE_PROTOCOL", + "BIOCFEEDBACK", + "BIOCFLUSH", + "BIOCGBLEN", + "BIOCGDIRECTION", + "BIOCGDIRFILT", + "BIOCGDLT", + "BIOCGDLTLIST", + "BIOCGETBUFMODE", + "BIOCGETIF", + "BIOCGETZMAX", + "BIOCGFEEDBACK", + "BIOCGFILDROP", + "BIOCGHDRCMPLT", + "BIOCGRSIG", + "BIOCGRTIMEOUT", + "BIOCGSEESENT", + "BIOCGSTATS", + "BIOCGSTATSOLD", + "BIOCGTSTAMP", + "BIOCIMMEDIATE", + "BIOCLOCK", + "BIOCPROMISC", + "BIOCROTZBUF", + "BIOCSBLEN", + "BIOCSDIRECTION", + "BIOCSDIRFILT", + "BIOCSDLT", + "BIOCSETBUFMODE", + "BIOCSETF", + "BIOCSETFNR", + "BIOCSETIF", + "BIOCSETWF", + "BIOCSETZBUF", + "BIOCSFEEDBACK", + "BIOCSFILDROP", + "BIOCSHDRCMPLT", + "BIOCSRSIG", + "BIOCSRTIMEOUT", + "BIOCSSEESENT", + "BIOCSTCPF", + "BIOCSTSTAMP", + "BIOCSUDPF", + "BIOCVERSION", + "BPF_A", + "BPF_ABS", + "BPF_ADD", + "BPF_ALIGNMENT", + "BPF_ALIGNMENT32", + "BPF_ALU", + "BPF_AND", + "BPF_B", + "BPF_BUFMODE_BUFFER", + "BPF_BUFMODE_ZBUF", + "BPF_DFLTBUFSIZE", + "BPF_DIRECTION_IN", + "BPF_DIRECTION_OUT", + "BPF_DIV", + "BPF_H", + "BPF_IMM", + "BPF_IND", + "BPF_JA", + "BPF_JEQ", + "BPF_JGE", + "BPF_JGT", + "BPF_JMP", + "BPF_JSET", + "BPF_K", + "BPF_LD", + "BPF_LDX", + "BPF_LEN", + "BPF_LSH", + "BPF_MAJOR_VERSION", + "BPF_MAXBUFSIZE", + "BPF_MAXINSNS", + "BPF_MEM", + "BPF_MEMWORDS", + "BPF_MINBUFSIZE", + "BPF_MINOR_VERSION", + "BPF_MISC", + "BPF_MSH", + "BPF_MUL", + "BPF_NEG", + "BPF_OR", + "BPF_RELEASE", + "BPF_RET", + "BPF_RSH", + "BPF_ST", + "BPF_STX", + "BPF_SUB", + "BPF_TAX", + "BPF_TXA", + "BPF_T_BINTIME", + "BPF_T_BINTIME_FAST", + "BPF_T_BINTIME_MONOTONIC", + "BPF_T_BINTIME_MONOTONIC_FAST", + "BPF_T_FAST", + "BPF_T_FLAG_MASK", + "BPF_T_FORMAT_MASK", + "BPF_T_MICROTIME", + "BPF_T_MICROTIME_FAST", + "BPF_T_MICROTIME_MONOTONIC", + "BPF_T_MICROTIME_MONOTONIC_FAST", + "BPF_T_MONOTONIC", + "BPF_T_MONOTONIC_FAST", + "BPF_T_NANOTIME", + "BPF_T_NANOTIME_FAST", + "BPF_T_NANOTIME_MONOTONIC", + "BPF_T_NANOTIME_MONOTONIC_FAST", + "BPF_T_NONE", + "BPF_T_NORMAL", + "BPF_W", + "BPF_X", + "BRKINT", + "Bind", + "BindToDevice", + "BpfBuflen", + "BpfDatalink", + "BpfHdr", + "BpfHeadercmpl", + "BpfInsn", + "BpfInterface", + "BpfJump", + "BpfProgram", + "BpfStat", + "BpfStats", + "BpfStmt", + "BpfTimeout", + "BpfTimeval", + "BpfVersion", + "BpfZbuf", + "BpfZbufHeader", + "ByHandleFileInformation", + "BytePtrFromString", + "ByteSliceFromString", + "CCR0_FLUSH", + "CERT_CHAIN_POLICY_AUTHENTICODE", + "CERT_CHAIN_POLICY_AUTHENTICODE_TS", + "CERT_CHAIN_POLICY_BASE", + "CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", + "CERT_CHAIN_POLICY_EV", + "CERT_CHAIN_POLICY_MICROSOFT_ROOT", + "CERT_CHAIN_POLICY_NT_AUTH", + "CERT_CHAIN_POLICY_SSL", + "CERT_E_CN_NO_MATCH", + "CERT_E_EXPIRED", + "CERT_E_PURPOSE", + "CERT_E_ROLE", + "CERT_E_UNTRUSTEDROOT", + "CERT_STORE_ADD_ALWAYS", + "CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", + "CERT_STORE_PROV_MEMORY", + "CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", + "CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", + "CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", + "CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", + "CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", + "CERT_TRUST_INVALID_BASIC_CONSTRAINTS", + "CERT_TRUST_INVALID_EXTENSION", + "CERT_TRUST_INVALID_NAME_CONSTRAINTS", + "CERT_TRUST_INVALID_POLICY_CONSTRAINTS", + "CERT_TRUST_IS_CYCLIC", + "CERT_TRUST_IS_EXPLICIT_DISTRUST", + "CERT_TRUST_IS_NOT_SIGNATURE_VALID", + "CERT_TRUST_IS_NOT_TIME_VALID", + "CERT_TRUST_IS_NOT_VALID_FOR_USAGE", + "CERT_TRUST_IS_OFFLINE_REVOCATION", + "CERT_TRUST_IS_REVOKED", + "CERT_TRUST_IS_UNTRUSTED_ROOT", + "CERT_TRUST_NO_ERROR", + "CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", + "CERT_TRUST_REVOCATION_STATUS_UNKNOWN", + "CFLUSH", + "CLOCAL", + "CLONE_CHILD_CLEARTID", + "CLONE_CHILD_SETTID", + "CLONE_CSIGNAL", + "CLONE_DETACHED", + "CLONE_FILES", + "CLONE_FS", + "CLONE_IO", + "CLONE_NEWIPC", + "CLONE_NEWNET", + "CLONE_NEWNS", + "CLONE_NEWPID", + "CLONE_NEWUSER", + "CLONE_NEWUTS", + "CLONE_PARENT", + "CLONE_PARENT_SETTID", + "CLONE_PID", + "CLONE_PTRACE", + "CLONE_SETTLS", + "CLONE_SIGHAND", + "CLONE_SYSVSEM", + "CLONE_THREAD", + "CLONE_UNTRACED", + "CLONE_VFORK", + "CLONE_VM", + "CPUID_CFLUSH", + "CREAD", + "CREATE_ALWAYS", + "CREATE_NEW", + "CREATE_NEW_PROCESS_GROUP", + "CREATE_UNICODE_ENVIRONMENT", + "CRYPT_DEFAULT_CONTAINER_OPTIONAL", + "CRYPT_DELETEKEYSET", + "CRYPT_MACHINE_KEYSET", + "CRYPT_NEWKEYSET", + "CRYPT_SILENT", + "CRYPT_VERIFYCONTEXT", + "CS5", + "CS6", + "CS7", + "CS8", + "CSIZE", + "CSTART", + "CSTATUS", + "CSTOP", + "CSTOPB", + "CSUSP", + "CTL_MAXNAME", + "CTL_NET", + "CTL_QUERY", + "CTRL_BREAK_EVENT", + "CTRL_CLOSE_EVENT", + "CTRL_C_EVENT", + "CTRL_LOGOFF_EVENT", + "CTRL_SHUTDOWN_EVENT", + "CancelIo", + "CancelIoEx", + "CertAddCertificateContextToStore", + "CertChainContext", + "CertChainElement", + "CertChainPara", + "CertChainPolicyPara", + "CertChainPolicyStatus", + "CertCloseStore", + "CertContext", + "CertCreateCertificateContext", + "CertEnhKeyUsage", + "CertEnumCertificatesInStore", + "CertFreeCertificateChain", + "CertFreeCertificateContext", + "CertGetCertificateChain", + "CertInfo", + "CertOpenStore", + "CertOpenSystemStore", + "CertRevocationCrlInfo", + "CertRevocationInfo", + "CertSimpleChain", + "CertTrustListInfo", + "CertTrustStatus", + "CertUsageMatch", + "CertVerifyCertificateChainPolicy", + "Chdir", + "CheckBpfVersion", + "Chflags", + "Chmod", + "Chown", + "Chroot", + "Clearenv", + "Close", + "CloseHandle", + "CloseOnExec", + "Closesocket", + "CmsgLen", + "CmsgSpace", + "Cmsghdr", + "CommandLineToArgv", + "ComputerName", + "Conn", + "Connect", + "ConnectEx", + "ConvertSidToStringSid", + "ConvertStringSidToSid", + "CopySid", + "Creat", + "CreateDirectory", + "CreateFile", + "CreateFileMapping", + "CreateHardLink", + "CreateIoCompletionPort", + "CreatePipe", + "CreateProcess", + "CreateProcessAsUser", + "CreateSymbolicLink", + "CreateToolhelp32Snapshot", + "Credential", + "CryptAcquireContext", + "CryptGenRandom", + "CryptReleaseContext", + "DIOCBSFLUSH", + "DIOCOSFPFLUSH", + "DLL", + "DLLError", + "DLT_A429", + "DLT_A653_ICM", + "DLT_AIRONET_HEADER", + "DLT_AOS", + "DLT_APPLE_IP_OVER_IEEE1394", + "DLT_ARCNET", + "DLT_ARCNET_LINUX", + "DLT_ATM_CLIP", + "DLT_ATM_RFC1483", + "DLT_AURORA", + "DLT_AX25", + "DLT_AX25_KISS", + "DLT_BACNET_MS_TP", + "DLT_BLUETOOTH_HCI_H4", + "DLT_BLUETOOTH_HCI_H4_WITH_PHDR", + "DLT_CAN20B", + "DLT_CAN_SOCKETCAN", + "DLT_CHAOS", + "DLT_CHDLC", + "DLT_CISCO_IOS", + "DLT_C_HDLC", + "DLT_C_HDLC_WITH_DIR", + "DLT_DBUS", + "DLT_DECT", + "DLT_DOCSIS", + "DLT_DVB_CI", + "DLT_ECONET", + "DLT_EN10MB", + "DLT_EN3MB", + "DLT_ENC", + "DLT_ERF", + "DLT_ERF_ETH", + "DLT_ERF_POS", + "DLT_FC_2", + "DLT_FC_2_WITH_FRAME_DELIMS", + "DLT_FDDI", + "DLT_FLEXRAY", + "DLT_FRELAY", + "DLT_FRELAY_WITH_DIR", + "DLT_GCOM_SERIAL", + "DLT_GCOM_T1E1", + "DLT_GPF_F", + "DLT_GPF_T", + "DLT_GPRS_LLC", + "DLT_GSMTAP_ABIS", + "DLT_GSMTAP_UM", + "DLT_HDLC", + "DLT_HHDLC", + "DLT_HIPPI", + "DLT_IBM_SN", + "DLT_IBM_SP", + "DLT_IEEE802", + "DLT_IEEE802_11", + "DLT_IEEE802_11_RADIO", + "DLT_IEEE802_11_RADIO_AVS", + "DLT_IEEE802_15_4", + "DLT_IEEE802_15_4_LINUX", + "DLT_IEEE802_15_4_NOFCS", + "DLT_IEEE802_15_4_NONASK_PHY", + "DLT_IEEE802_16_MAC_CPS", + "DLT_IEEE802_16_MAC_CPS_RADIO", + "DLT_IPFILTER", + "DLT_IPMB", + "DLT_IPMB_LINUX", + "DLT_IPNET", + "DLT_IPOIB", + "DLT_IPV4", + "DLT_IPV6", + "DLT_IP_OVER_FC", + "DLT_JUNIPER_ATM1", + "DLT_JUNIPER_ATM2", + "DLT_JUNIPER_ATM_CEMIC", + "DLT_JUNIPER_CHDLC", + "DLT_JUNIPER_ES", + "DLT_JUNIPER_ETHER", + "DLT_JUNIPER_FIBRECHANNEL", + "DLT_JUNIPER_FRELAY", + "DLT_JUNIPER_GGSN", + "DLT_JUNIPER_ISM", + "DLT_JUNIPER_MFR", + "DLT_JUNIPER_MLFR", + "DLT_JUNIPER_MLPPP", + "DLT_JUNIPER_MONITOR", + "DLT_JUNIPER_PIC_PEER", + "DLT_JUNIPER_PPP", + "DLT_JUNIPER_PPPOE", + "DLT_JUNIPER_PPPOE_ATM", + "DLT_JUNIPER_SERVICES", + "DLT_JUNIPER_SRX_E2E", + "DLT_JUNIPER_ST", + "DLT_JUNIPER_VP", + "DLT_JUNIPER_VS", + "DLT_LAPB_WITH_DIR", + "DLT_LAPD", + "DLT_LIN", + "DLT_LINUX_EVDEV", + "DLT_LINUX_IRDA", + "DLT_LINUX_LAPD", + "DLT_LINUX_PPP_WITHDIRECTION", + "DLT_LINUX_SLL", + "DLT_LOOP", + "DLT_LTALK", + "DLT_MATCHING_MAX", + "DLT_MATCHING_MIN", + "DLT_MFR", + "DLT_MOST", + "DLT_MPEG_2_TS", + "DLT_MPLS", + "DLT_MTP2", + "DLT_MTP2_WITH_PHDR", + "DLT_MTP3", + "DLT_MUX27010", + "DLT_NETANALYZER", + "DLT_NETANALYZER_TRANSPARENT", + "DLT_NFC_LLCP", + "DLT_NFLOG", + "DLT_NG40", + "DLT_NULL", + "DLT_PCI_EXP", + "DLT_PFLOG", + "DLT_PFSYNC", + "DLT_PPI", + "DLT_PPP", + "DLT_PPP_BSDOS", + "DLT_PPP_ETHER", + "DLT_PPP_PPPD", + "DLT_PPP_SERIAL", + "DLT_PPP_WITH_DIR", + "DLT_PPP_WITH_DIRECTION", + "DLT_PRISM_HEADER", + "DLT_PRONET", + "DLT_RAIF1", + "DLT_RAW", + "DLT_RAWAF_MASK", + "DLT_RIO", + "DLT_SCCP", + "DLT_SITA", + "DLT_SLIP", + "DLT_SLIP_BSDOS", + "DLT_STANAG_5066_D_PDU", + "DLT_SUNATM", + "DLT_SYMANTEC_FIREWALL", + "DLT_TZSP", + "DLT_USB", + "DLT_USB_LINUX", + "DLT_USB_LINUX_MMAPPED", + "DLT_USER0", + "DLT_USER1", + "DLT_USER10", + "DLT_USER11", + "DLT_USER12", + "DLT_USER13", + "DLT_USER14", + "DLT_USER15", + "DLT_USER2", + "DLT_USER3", + "DLT_USER4", + "DLT_USER5", + "DLT_USER6", + "DLT_USER7", + "DLT_USER8", + "DLT_USER9", + "DLT_WIHART", + "DLT_X2E_SERIAL", + "DLT_X2E_XORAYA", + "DNSMXData", + "DNSPTRData", + "DNSRecord", + "DNSSRVData", + "DNSTXTData", + "DNS_INFO_NO_RECORDS", + "DNS_TYPE_A", + "DNS_TYPE_A6", + "DNS_TYPE_AAAA", + "DNS_TYPE_ADDRS", + "DNS_TYPE_AFSDB", + "DNS_TYPE_ALL", + "DNS_TYPE_ANY", + "DNS_TYPE_ATMA", + "DNS_TYPE_AXFR", + "DNS_TYPE_CERT", + "DNS_TYPE_CNAME", + "DNS_TYPE_DHCID", + "DNS_TYPE_DNAME", + "DNS_TYPE_DNSKEY", + "DNS_TYPE_DS", + "DNS_TYPE_EID", + "DNS_TYPE_GID", + "DNS_TYPE_GPOS", + "DNS_TYPE_HINFO", + "DNS_TYPE_ISDN", + "DNS_TYPE_IXFR", + "DNS_TYPE_KEY", + "DNS_TYPE_KX", + "DNS_TYPE_LOC", + "DNS_TYPE_MAILA", + "DNS_TYPE_MAILB", + "DNS_TYPE_MB", + "DNS_TYPE_MD", + "DNS_TYPE_MF", + "DNS_TYPE_MG", + "DNS_TYPE_MINFO", + "DNS_TYPE_MR", + "DNS_TYPE_MX", + "DNS_TYPE_NAPTR", + "DNS_TYPE_NBSTAT", + "DNS_TYPE_NIMLOC", + "DNS_TYPE_NS", + "DNS_TYPE_NSAP", + "DNS_TYPE_NSAPPTR", + "DNS_TYPE_NSEC", + "DNS_TYPE_NULL", + "DNS_TYPE_NXT", + "DNS_TYPE_OPT", + "DNS_TYPE_PTR", + "DNS_TYPE_PX", + "DNS_TYPE_RP", + "DNS_TYPE_RRSIG", + "DNS_TYPE_RT", + "DNS_TYPE_SIG", + "DNS_TYPE_SINK", + "DNS_TYPE_SOA", + "DNS_TYPE_SRV", + "DNS_TYPE_TEXT", + "DNS_TYPE_TKEY", + "DNS_TYPE_TSIG", + "DNS_TYPE_UID", + "DNS_TYPE_UINFO", + "DNS_TYPE_UNSPEC", + "DNS_TYPE_WINS", + "DNS_TYPE_WINSR", + "DNS_TYPE_WKS", + "DNS_TYPE_X25", + "DT_BLK", + "DT_CHR", + "DT_DIR", + "DT_FIFO", + "DT_LNK", + "DT_REG", + "DT_SOCK", + "DT_UNKNOWN", + "DT_WHT", + "DUPLICATE_CLOSE_SOURCE", + "DUPLICATE_SAME_ACCESS", + "DeleteFile", + "DetachLsf", + "DeviceIoControl", + "Dirent", + "DnsNameCompare", + "DnsQuery", + "DnsRecordListFree", + "DnsSectionAdditional", + "DnsSectionAnswer", + "DnsSectionAuthority", + "DnsSectionQuestion", + "Dup", + "Dup2", + "Dup3", + "DuplicateHandle", + "E2BIG", + "EACCES", + "EADDRINUSE", + "EADDRNOTAVAIL", + "EADV", + "EAFNOSUPPORT", + "EAGAIN", + "EALREADY", + "EAUTH", + "EBADARCH", + "EBADE", + "EBADEXEC", + "EBADF", + "EBADFD", + "EBADMACHO", + "EBADMSG", + "EBADR", + "EBADRPC", + "EBADRQC", + "EBADSLT", + "EBFONT", + "EBUSY", + "ECANCELED", + "ECAPMODE", + "ECHILD", + "ECHO", + "ECHOCTL", + "ECHOE", + "ECHOK", + "ECHOKE", + "ECHONL", + "ECHOPRT", + "ECHRNG", + "ECOMM", + "ECONNABORTED", + "ECONNREFUSED", + "ECONNRESET", + "EDEADLK", + "EDEADLOCK", + "EDESTADDRREQ", + "EDEVERR", + "EDOM", + "EDOOFUS", + "EDOTDOT", + "EDQUOT", + "EEXIST", + "EFAULT", + "EFBIG", + "EFER_LMA", + "EFER_LME", + "EFER_NXE", + "EFER_SCE", + "EFTYPE", + "EHOSTDOWN", + "EHOSTUNREACH", + "EHWPOISON", + "EIDRM", + "EILSEQ", + "EINPROGRESS", + "EINTR", + "EINVAL", + "EIO", + "EIPSEC", + "EISCONN", + "EISDIR", + "EISNAM", + "EKEYEXPIRED", + "EKEYREJECTED", + "EKEYREVOKED", + "EL2HLT", + "EL2NSYNC", + "EL3HLT", + "EL3RST", + "ELAST", + "ELF_NGREG", + "ELF_PRARGSZ", + "ELIBACC", + "ELIBBAD", + "ELIBEXEC", + "ELIBMAX", + "ELIBSCN", + "ELNRNG", + "ELOOP", + "EMEDIUMTYPE", + "EMFILE", + "EMLINK", + "EMSGSIZE", + "EMT_TAGOVF", + "EMULTIHOP", + "EMUL_ENABLED", + "EMUL_LINUX", + "EMUL_LINUX32", + "EMUL_MAXID", + "EMUL_NATIVE", + "ENAMETOOLONG", + "ENAVAIL", + "ENDRUNDISC", + "ENEEDAUTH", + "ENETDOWN", + "ENETRESET", + "ENETUNREACH", + "ENFILE", + "ENOANO", + "ENOATTR", + "ENOBUFS", + "ENOCSI", + "ENODATA", + "ENODEV", + "ENOENT", + "ENOEXEC", + "ENOKEY", + "ENOLCK", + "ENOLINK", + "ENOMEDIUM", + "ENOMEM", + "ENOMSG", + "ENONET", + "ENOPKG", + "ENOPOLICY", + "ENOPROTOOPT", + "ENOSPC", + "ENOSR", + "ENOSTR", + "ENOSYS", + "ENOTBLK", + "ENOTCAPABLE", + "ENOTCONN", + "ENOTDIR", + "ENOTEMPTY", + "ENOTNAM", + "ENOTRECOVERABLE", + "ENOTSOCK", + "ENOTSUP", + "ENOTTY", + "ENOTUNIQ", + "ENXIO", + "EN_SW_CTL_INF", + "EN_SW_CTL_PREC", + "EN_SW_CTL_ROUND", + "EN_SW_DATACHAIN", + "EN_SW_DENORM", + "EN_SW_INVOP", + "EN_SW_OVERFLOW", + "EN_SW_PRECLOSS", + "EN_SW_UNDERFLOW", + "EN_SW_ZERODIV", + "EOPNOTSUPP", + "EOVERFLOW", + "EOWNERDEAD", + "EPERM", + "EPFNOSUPPORT", + "EPIPE", + "EPOLLERR", + "EPOLLET", + "EPOLLHUP", + "EPOLLIN", + "EPOLLMSG", + "EPOLLONESHOT", + "EPOLLOUT", + "EPOLLPRI", + "EPOLLRDBAND", + "EPOLLRDHUP", + "EPOLLRDNORM", + "EPOLLWRBAND", + "EPOLLWRNORM", + "EPOLL_CLOEXEC", + "EPOLL_CTL_ADD", + "EPOLL_CTL_DEL", + "EPOLL_CTL_MOD", + "EPOLL_NONBLOCK", + "EPROCLIM", + "EPROCUNAVAIL", + "EPROGMISMATCH", + "EPROGUNAVAIL", + "EPROTO", + "EPROTONOSUPPORT", + "EPROTOTYPE", + "EPWROFF", + "ERANGE", + "EREMCHG", + "EREMOTE", + "EREMOTEIO", + "ERESTART", + "ERFKILL", + "EROFS", + "ERPCMISMATCH", + "ERROR_ACCESS_DENIED", + "ERROR_ALREADY_EXISTS", + "ERROR_BROKEN_PIPE", + "ERROR_BUFFER_OVERFLOW", + "ERROR_DIR_NOT_EMPTY", + "ERROR_ENVVAR_NOT_FOUND", + "ERROR_FILE_EXISTS", + "ERROR_FILE_NOT_FOUND", + "ERROR_HANDLE_EOF", + "ERROR_INSUFFICIENT_BUFFER", + "ERROR_IO_PENDING", + "ERROR_MOD_NOT_FOUND", + "ERROR_MORE_DATA", + "ERROR_NETNAME_DELETED", + "ERROR_NOT_FOUND", + "ERROR_NO_MORE_FILES", + "ERROR_OPERATION_ABORTED", + "ERROR_PATH_NOT_FOUND", + "ERROR_PRIVILEGE_NOT_HELD", + "ERROR_PROC_NOT_FOUND", + "ESHLIBVERS", + "ESHUTDOWN", + "ESOCKTNOSUPPORT", + "ESPIPE", + "ESRCH", + "ESRMNT", + "ESTALE", + "ESTRPIPE", + "ETHERCAP_JUMBO_MTU", + "ETHERCAP_VLAN_HWTAGGING", + "ETHERCAP_VLAN_MTU", + "ETHERMIN", + "ETHERMTU", + "ETHERMTU_JUMBO", + "ETHERTYPE_8023", + "ETHERTYPE_AARP", + "ETHERTYPE_ACCTON", + "ETHERTYPE_AEONIC", + "ETHERTYPE_ALPHA", + "ETHERTYPE_AMBER", + "ETHERTYPE_AMOEBA", + "ETHERTYPE_AOE", + "ETHERTYPE_APOLLO", + "ETHERTYPE_APOLLODOMAIN", + "ETHERTYPE_APPLETALK", + "ETHERTYPE_APPLITEK", + "ETHERTYPE_ARGONAUT", + "ETHERTYPE_ARP", + "ETHERTYPE_AT", + "ETHERTYPE_ATALK", + "ETHERTYPE_ATOMIC", + "ETHERTYPE_ATT", + "ETHERTYPE_ATTSTANFORD", + "ETHERTYPE_AUTOPHON", + "ETHERTYPE_AXIS", + "ETHERTYPE_BCLOOP", + "ETHERTYPE_BOFL", + "ETHERTYPE_CABLETRON", + "ETHERTYPE_CHAOS", + "ETHERTYPE_COMDESIGN", + "ETHERTYPE_COMPUGRAPHIC", + "ETHERTYPE_COUNTERPOINT", + "ETHERTYPE_CRONUS", + "ETHERTYPE_CRONUSVLN", + "ETHERTYPE_DCA", + "ETHERTYPE_DDE", + "ETHERTYPE_DEBNI", + "ETHERTYPE_DECAM", + "ETHERTYPE_DECCUST", + "ETHERTYPE_DECDIAG", + "ETHERTYPE_DECDNS", + "ETHERTYPE_DECDTS", + "ETHERTYPE_DECEXPER", + "ETHERTYPE_DECLAST", + "ETHERTYPE_DECLTM", + "ETHERTYPE_DECMUMPS", + "ETHERTYPE_DECNETBIOS", + "ETHERTYPE_DELTACON", + "ETHERTYPE_DIDDLE", + "ETHERTYPE_DLOG1", + "ETHERTYPE_DLOG2", + "ETHERTYPE_DN", + "ETHERTYPE_DOGFIGHT", + "ETHERTYPE_DSMD", + "ETHERTYPE_ECMA", + "ETHERTYPE_ENCRYPT", + "ETHERTYPE_ES", + "ETHERTYPE_EXCELAN", + "ETHERTYPE_EXPERDATA", + "ETHERTYPE_FLIP", + "ETHERTYPE_FLOWCONTROL", + "ETHERTYPE_FRARP", + "ETHERTYPE_GENDYN", + "ETHERTYPE_HAYES", + "ETHERTYPE_HIPPI_FP", + "ETHERTYPE_HITACHI", + "ETHERTYPE_HP", + "ETHERTYPE_IEEEPUP", + "ETHERTYPE_IEEEPUPAT", + "ETHERTYPE_IMLBL", + "ETHERTYPE_IMLBLDIAG", + "ETHERTYPE_IP", + "ETHERTYPE_IPAS", + "ETHERTYPE_IPV6", + "ETHERTYPE_IPX", + "ETHERTYPE_IPXNEW", + "ETHERTYPE_KALPANA", + "ETHERTYPE_LANBRIDGE", + "ETHERTYPE_LANPROBE", + "ETHERTYPE_LAT", + "ETHERTYPE_LBACK", + "ETHERTYPE_LITTLE", + "ETHERTYPE_LLDP", + "ETHERTYPE_LOGICRAFT", + "ETHERTYPE_LOOPBACK", + "ETHERTYPE_MATRA", + "ETHERTYPE_MAX", + "ETHERTYPE_MERIT", + "ETHERTYPE_MICP", + "ETHERTYPE_MOPDL", + "ETHERTYPE_MOPRC", + "ETHERTYPE_MOTOROLA", + "ETHERTYPE_MPLS", + "ETHERTYPE_MPLS_MCAST", + "ETHERTYPE_MUMPS", + "ETHERTYPE_NBPCC", + "ETHERTYPE_NBPCLAIM", + "ETHERTYPE_NBPCLREQ", + "ETHERTYPE_NBPCLRSP", + "ETHERTYPE_NBPCREQ", + "ETHERTYPE_NBPCRSP", + "ETHERTYPE_NBPDG", + "ETHERTYPE_NBPDGB", + "ETHERTYPE_NBPDLTE", + "ETHERTYPE_NBPRAR", + "ETHERTYPE_NBPRAS", + "ETHERTYPE_NBPRST", + "ETHERTYPE_NBPSCD", + "ETHERTYPE_NBPVCD", + "ETHERTYPE_NBS", + "ETHERTYPE_NCD", + "ETHERTYPE_NESTAR", + "ETHERTYPE_NETBEUI", + "ETHERTYPE_NOVELL", + "ETHERTYPE_NS", + "ETHERTYPE_NSAT", + "ETHERTYPE_NSCOMPAT", + "ETHERTYPE_NTRAILER", + "ETHERTYPE_OS9", + "ETHERTYPE_OS9NET", + "ETHERTYPE_PACER", + "ETHERTYPE_PAE", + "ETHERTYPE_PCS", + "ETHERTYPE_PLANNING", + "ETHERTYPE_PPP", + "ETHERTYPE_PPPOE", + "ETHERTYPE_PPPOEDISC", + "ETHERTYPE_PRIMENTS", + "ETHERTYPE_PUP", + "ETHERTYPE_PUPAT", + "ETHERTYPE_QINQ", + "ETHERTYPE_RACAL", + "ETHERTYPE_RATIONAL", + "ETHERTYPE_RAWFR", + "ETHERTYPE_RCL", + "ETHERTYPE_RDP", + "ETHERTYPE_RETIX", + "ETHERTYPE_REVARP", + "ETHERTYPE_SCA", + "ETHERTYPE_SECTRA", + "ETHERTYPE_SECUREDATA", + "ETHERTYPE_SGITW", + "ETHERTYPE_SG_BOUNCE", + "ETHERTYPE_SG_DIAG", + "ETHERTYPE_SG_NETGAMES", + "ETHERTYPE_SG_RESV", + "ETHERTYPE_SIMNET", + "ETHERTYPE_SLOW", + "ETHERTYPE_SLOWPROTOCOLS", + "ETHERTYPE_SNA", + "ETHERTYPE_SNMP", + "ETHERTYPE_SONIX", + "ETHERTYPE_SPIDER", + "ETHERTYPE_SPRITE", + "ETHERTYPE_STP", + "ETHERTYPE_TALARIS", + "ETHERTYPE_TALARISMC", + "ETHERTYPE_TCPCOMP", + "ETHERTYPE_TCPSM", + "ETHERTYPE_TEC", + "ETHERTYPE_TIGAN", + "ETHERTYPE_TRAIL", + "ETHERTYPE_TRANSETHER", + "ETHERTYPE_TYMSHARE", + "ETHERTYPE_UBBST", + "ETHERTYPE_UBDEBUG", + "ETHERTYPE_UBDIAGLOOP", + "ETHERTYPE_UBDL", + "ETHERTYPE_UBNIU", + "ETHERTYPE_UBNMC", + "ETHERTYPE_VALID", + "ETHERTYPE_VARIAN", + "ETHERTYPE_VAXELN", + "ETHERTYPE_VEECO", + "ETHERTYPE_VEXP", + "ETHERTYPE_VGLAB", + "ETHERTYPE_VINES", + "ETHERTYPE_VINESECHO", + "ETHERTYPE_VINESLOOP", + "ETHERTYPE_VITAL", + "ETHERTYPE_VLAN", + "ETHERTYPE_VLTLMAN", + "ETHERTYPE_VPROD", + "ETHERTYPE_VURESERVED", + "ETHERTYPE_WATERLOO", + "ETHERTYPE_WELLFLEET", + "ETHERTYPE_X25", + "ETHERTYPE_X75", + "ETHERTYPE_XNSSM", + "ETHERTYPE_XTP", + "ETHER_ADDR_LEN", + "ETHER_ALIGN", + "ETHER_CRC_LEN", + "ETHER_CRC_POLY_BE", + "ETHER_CRC_POLY_LE", + "ETHER_HDR_LEN", + "ETHER_MAX_DIX_LEN", + "ETHER_MAX_LEN", + "ETHER_MAX_LEN_JUMBO", + "ETHER_MIN_LEN", + "ETHER_PPPOE_ENCAP_LEN", + "ETHER_TYPE_LEN", + "ETHER_VLAN_ENCAP_LEN", + "ETH_P_1588", + "ETH_P_8021Q", + "ETH_P_802_2", + "ETH_P_802_3", + "ETH_P_AARP", + "ETH_P_ALL", + "ETH_P_AOE", + "ETH_P_ARCNET", + "ETH_P_ARP", + "ETH_P_ATALK", + "ETH_P_ATMFATE", + "ETH_P_ATMMPOA", + "ETH_P_AX25", + "ETH_P_BPQ", + "ETH_P_CAIF", + "ETH_P_CAN", + "ETH_P_CONTROL", + "ETH_P_CUST", + "ETH_P_DDCMP", + "ETH_P_DEC", + "ETH_P_DIAG", + "ETH_P_DNA_DL", + "ETH_P_DNA_RC", + "ETH_P_DNA_RT", + "ETH_P_DSA", + "ETH_P_ECONET", + "ETH_P_EDSA", + "ETH_P_FCOE", + "ETH_P_FIP", + "ETH_P_HDLC", + "ETH_P_IEEE802154", + "ETH_P_IEEEPUP", + "ETH_P_IEEEPUPAT", + "ETH_P_IP", + "ETH_P_IPV6", + "ETH_P_IPX", + "ETH_P_IRDA", + "ETH_P_LAT", + "ETH_P_LINK_CTL", + "ETH_P_LOCALTALK", + "ETH_P_LOOP", + "ETH_P_MOBITEX", + "ETH_P_MPLS_MC", + "ETH_P_MPLS_UC", + "ETH_P_PAE", + "ETH_P_PAUSE", + "ETH_P_PHONET", + "ETH_P_PPPTALK", + "ETH_P_PPP_DISC", + "ETH_P_PPP_MP", + "ETH_P_PPP_SES", + "ETH_P_PUP", + "ETH_P_PUPAT", + "ETH_P_RARP", + "ETH_P_SCA", + "ETH_P_SLOW", + "ETH_P_SNAP", + "ETH_P_TEB", + "ETH_P_TIPC", + "ETH_P_TRAILER", + "ETH_P_TR_802_2", + "ETH_P_WAN_PPP", + "ETH_P_WCCP", + "ETH_P_X25", + "ETIME", + "ETIMEDOUT", + "ETOOMANYREFS", + "ETXTBSY", + "EUCLEAN", + "EUNATCH", + "EUSERS", + "EVFILT_AIO", + "EVFILT_FS", + "EVFILT_LIO", + "EVFILT_MACHPORT", + "EVFILT_PROC", + "EVFILT_READ", + "EVFILT_SIGNAL", + "EVFILT_SYSCOUNT", + "EVFILT_THREADMARKER", + "EVFILT_TIMER", + "EVFILT_USER", + "EVFILT_VM", + "EVFILT_VNODE", + "EVFILT_WRITE", + "EV_ADD", + "EV_CLEAR", + "EV_DELETE", + "EV_DISABLE", + "EV_DISPATCH", + "EV_DROP", + "EV_ENABLE", + "EV_EOF", + "EV_ERROR", + "EV_FLAG0", + "EV_FLAG1", + "EV_ONESHOT", + "EV_OOBAND", + "EV_POLL", + "EV_RECEIPT", + "EV_SYSFLAGS", + "EWINDOWS", + "EWOULDBLOCK", + "EXDEV", + "EXFULL", + "EXTA", + "EXTB", + "EXTPROC", + "Environ", + "EpollCreate", + "EpollCreate1", + "EpollCtl", + "EpollEvent", + "EpollWait", + "Errno", + "EscapeArg", + "Exchangedata", + "Exec", + "Exit", + "ExitProcess", + "FD_CLOEXEC", + "FD_SETSIZE", + "FILE_ACTION_ADDED", + "FILE_ACTION_MODIFIED", + "FILE_ACTION_REMOVED", + "FILE_ACTION_RENAMED_NEW_NAME", + "FILE_ACTION_RENAMED_OLD_NAME", + "FILE_APPEND_DATA", + "FILE_ATTRIBUTE_ARCHIVE", + "FILE_ATTRIBUTE_DIRECTORY", + "FILE_ATTRIBUTE_HIDDEN", + "FILE_ATTRIBUTE_NORMAL", + "FILE_ATTRIBUTE_READONLY", + "FILE_ATTRIBUTE_REPARSE_POINT", + "FILE_ATTRIBUTE_SYSTEM", + "FILE_BEGIN", + "FILE_CURRENT", + "FILE_END", + "FILE_FLAG_BACKUP_SEMANTICS", + "FILE_FLAG_OPEN_REPARSE_POINT", + "FILE_FLAG_OVERLAPPED", + "FILE_LIST_DIRECTORY", + "FILE_MAP_COPY", + "FILE_MAP_EXECUTE", + "FILE_MAP_READ", + "FILE_MAP_WRITE", + "FILE_NOTIFY_CHANGE_ATTRIBUTES", + "FILE_NOTIFY_CHANGE_CREATION", + "FILE_NOTIFY_CHANGE_DIR_NAME", + "FILE_NOTIFY_CHANGE_FILE_NAME", + "FILE_NOTIFY_CHANGE_LAST_ACCESS", + "FILE_NOTIFY_CHANGE_LAST_WRITE", + "FILE_NOTIFY_CHANGE_SIZE", + "FILE_SHARE_DELETE", + "FILE_SHARE_READ", + "FILE_SHARE_WRITE", + "FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", + "FILE_SKIP_SET_EVENT_ON_HANDLE", + "FILE_TYPE_CHAR", + "FILE_TYPE_DISK", + "FILE_TYPE_PIPE", + "FILE_TYPE_REMOTE", + "FILE_TYPE_UNKNOWN", + "FILE_WRITE_ATTRIBUTES", + "FLUSHO", + "FORMAT_MESSAGE_ALLOCATE_BUFFER", + "FORMAT_MESSAGE_ARGUMENT_ARRAY", + "FORMAT_MESSAGE_FROM_HMODULE", + "FORMAT_MESSAGE_FROM_STRING", + "FORMAT_MESSAGE_FROM_SYSTEM", + "FORMAT_MESSAGE_IGNORE_INSERTS", + "FORMAT_MESSAGE_MAX_WIDTH_MASK", + "FSCTL_GET_REPARSE_POINT", + "F_ADDFILESIGS", + "F_ADDSIGS", + "F_ALLOCATEALL", + "F_ALLOCATECONTIG", + "F_CANCEL", + "F_CHKCLEAN", + "F_CLOSEM", + "F_DUP2FD", + "F_DUP2FD_CLOEXEC", + "F_DUPFD", + "F_DUPFD_CLOEXEC", + "F_EXLCK", + "F_FLUSH_DATA", + "F_FREEZE_FS", + "F_FSCTL", + "F_FSDIRMASK", + "F_FSIN", + "F_FSINOUT", + "F_FSOUT", + "F_FSPRIV", + "F_FSVOID", + "F_FULLFSYNC", + "F_GETFD", + "F_GETFL", + "F_GETLEASE", + "F_GETLK", + "F_GETLK64", + "F_GETLKPID", + "F_GETNOSIGPIPE", + "F_GETOWN", + "F_GETOWN_EX", + "F_GETPATH", + "F_GETPATH_MTMINFO", + "F_GETPIPE_SZ", + "F_GETPROTECTIONCLASS", + "F_GETSIG", + "F_GLOBAL_NOCACHE", + "F_LOCK", + "F_LOG2PHYS", + "F_LOG2PHYS_EXT", + "F_MARKDEPENDENCY", + "F_MAXFD", + "F_NOCACHE", + "F_NODIRECT", + "F_NOTIFY", + "F_OGETLK", + "F_OK", + "F_OSETLK", + "F_OSETLKW", + "F_PARAM_MASK", + "F_PARAM_MAX", + "F_PATHPKG_CHECK", + "F_PEOFPOSMODE", + "F_PREALLOCATE", + "F_RDADVISE", + "F_RDAHEAD", + "F_RDLCK", + "F_READAHEAD", + "F_READBOOTSTRAP", + "F_SETBACKINGSTORE", + "F_SETFD", + "F_SETFL", + "F_SETLEASE", + "F_SETLK", + "F_SETLK64", + "F_SETLKW", + "F_SETLKW64", + "F_SETLK_REMOTE", + "F_SETNOSIGPIPE", + "F_SETOWN", + "F_SETOWN_EX", + "F_SETPIPE_SZ", + "F_SETPROTECTIONCLASS", + "F_SETSIG", + "F_SETSIZE", + "F_SHLCK", + "F_TEST", + "F_THAW_FS", + "F_TLOCK", + "F_ULOCK", + "F_UNLCK", + "F_UNLCKSYS", + "F_VOLPOSMODE", + "F_WRITEBOOTSTRAP", + "F_WRLCK", + "Faccessat", + "Fallocate", + "Fbootstraptransfer_t", + "Fchdir", + "Fchflags", + "Fchmod", + "Fchmodat", + "Fchown", + "Fchownat", + "FcntlFlock", + "FdSet", + "Fdatasync", + "FileNotifyInformation", + "Filetime", + "FindClose", + "FindFirstFile", + "FindNextFile", + "Flock", + "Flock_t", + "FlushBpf", + "FlushFileBuffers", + "FlushViewOfFile", + "ForkExec", + "ForkLock", + "FormatMessage", + "Fpathconf", + "FreeAddrInfoW", + "FreeEnvironmentStrings", + "FreeLibrary", + "Fsid", + "Fstat", + "Fstatat", + "Fstatfs", + "Fstore_t", + "Fsync", + "Ftruncate", + "FullPath", + "Futimes", + "Futimesat", + "GENERIC_ALL", + "GENERIC_EXECUTE", + "GENERIC_READ", + "GENERIC_WRITE", + "GUID", + "GetAcceptExSockaddrs", + "GetAdaptersInfo", + "GetAddrInfoW", + "GetCommandLine", + "GetComputerName", + "GetConsoleMode", + "GetCurrentDirectory", + "GetCurrentProcess", + "GetEnvironmentStrings", + "GetEnvironmentVariable", + "GetExitCodeProcess", + "GetFileAttributes", + "GetFileAttributesEx", + "GetFileExInfoStandard", + "GetFileExMaxInfoLevel", + "GetFileInformationByHandle", + "GetFileType", + "GetFullPathName", + "GetHostByName", + "GetIfEntry", + "GetLastError", + "GetLengthSid", + "GetLongPathName", + "GetProcAddress", + "GetProcessTimes", + "GetProtoByName", + "GetQueuedCompletionStatus", + "GetServByName", + "GetShortPathName", + "GetStartupInfo", + "GetStdHandle", + "GetSystemTimeAsFileTime", + "GetTempPath", + "GetTimeZoneInformation", + "GetTokenInformation", + "GetUserNameEx", + "GetUserProfileDirectory", + "GetVersion", + "Getcwd", + "Getdents", + "Getdirentries", + "Getdtablesize", + "Getegid", + "Getenv", + "Geteuid", + "Getfsstat", + "Getgid", + "Getgroups", + "Getpagesize", + "Getpeername", + "Getpgid", + "Getpgrp", + "Getpid", + "Getppid", + "Getpriority", + "Getrlimit", + "Getrusage", + "Getsid", + "Getsockname", + "Getsockopt", + "GetsockoptByte", + "GetsockoptICMPv6Filter", + "GetsockoptIPMreq", + "GetsockoptIPMreqn", + "GetsockoptIPv6MTUInfo", + "GetsockoptIPv6Mreq", + "GetsockoptInet4Addr", + "GetsockoptInt", + "GetsockoptUcred", + "Gettid", + "Gettimeofday", + "Getuid", + "Getwd", + "Getxattr", + "HANDLE_FLAG_INHERIT", + "HKEY_CLASSES_ROOT", + "HKEY_CURRENT_CONFIG", + "HKEY_CURRENT_USER", + "HKEY_DYN_DATA", + "HKEY_LOCAL_MACHINE", + "HKEY_PERFORMANCE_DATA", + "HKEY_USERS", + "HUPCL", + "Handle", + "Hostent", + "ICANON", + "ICMP6_FILTER", + "ICMPV6_FILTER", + "ICMPv6Filter", + "ICRNL", + "IEXTEN", + "IFAN_ARRIVAL", + "IFAN_DEPARTURE", + "IFA_ADDRESS", + "IFA_ANYCAST", + "IFA_BROADCAST", + "IFA_CACHEINFO", + "IFA_F_DADFAILED", + "IFA_F_DEPRECATED", + "IFA_F_HOMEADDRESS", + "IFA_F_NODAD", + "IFA_F_OPTIMISTIC", + "IFA_F_PERMANENT", + "IFA_F_SECONDARY", + "IFA_F_TEMPORARY", + "IFA_F_TENTATIVE", + "IFA_LABEL", + "IFA_LOCAL", + "IFA_MAX", + "IFA_MULTICAST", + "IFA_ROUTE", + "IFA_UNSPEC", + "IFF_ALLMULTI", + "IFF_ALTPHYS", + "IFF_AUTOMEDIA", + "IFF_BROADCAST", + "IFF_CANTCHANGE", + "IFF_CANTCONFIG", + "IFF_DEBUG", + "IFF_DRV_OACTIVE", + "IFF_DRV_RUNNING", + "IFF_DYING", + "IFF_DYNAMIC", + "IFF_LINK0", + "IFF_LINK1", + "IFF_LINK2", + "IFF_LOOPBACK", + "IFF_MASTER", + "IFF_MONITOR", + "IFF_MULTICAST", + "IFF_NOARP", + "IFF_NOTRAILERS", + "IFF_NO_PI", + "IFF_OACTIVE", + "IFF_ONE_QUEUE", + "IFF_POINTOPOINT", + "IFF_POINTTOPOINT", + "IFF_PORTSEL", + "IFF_PPROMISC", + "IFF_PROMISC", + "IFF_RENAMING", + "IFF_RUNNING", + "IFF_SIMPLEX", + "IFF_SLAVE", + "IFF_SMART", + "IFF_STATICARP", + "IFF_TAP", + "IFF_TUN", + "IFF_TUN_EXCL", + "IFF_UP", + "IFF_VNET_HDR", + "IFLA_ADDRESS", + "IFLA_BROADCAST", + "IFLA_COST", + "IFLA_IFALIAS", + "IFLA_IFNAME", + "IFLA_LINK", + "IFLA_LINKINFO", + "IFLA_LINKMODE", + "IFLA_MAP", + "IFLA_MASTER", + "IFLA_MAX", + "IFLA_MTU", + "IFLA_NET_NS_PID", + "IFLA_OPERSTATE", + "IFLA_PRIORITY", + "IFLA_PROTINFO", + "IFLA_QDISC", + "IFLA_STATS", + "IFLA_TXQLEN", + "IFLA_UNSPEC", + "IFLA_WEIGHT", + "IFLA_WIRELESS", + "IFNAMSIZ", + "IFT_1822", + "IFT_A12MPPSWITCH", + "IFT_AAL2", + "IFT_AAL5", + "IFT_ADSL", + "IFT_AFLANE8023", + "IFT_AFLANE8025", + "IFT_ARAP", + "IFT_ARCNET", + "IFT_ARCNETPLUS", + "IFT_ASYNC", + "IFT_ATM", + "IFT_ATMDXI", + "IFT_ATMFUNI", + "IFT_ATMIMA", + "IFT_ATMLOGICAL", + "IFT_ATMRADIO", + "IFT_ATMSUBINTERFACE", + "IFT_ATMVCIENDPT", + "IFT_ATMVIRTUAL", + "IFT_BGPPOLICYACCOUNTING", + "IFT_BLUETOOTH", + "IFT_BRIDGE", + "IFT_BSC", + "IFT_CARP", + "IFT_CCTEMUL", + "IFT_CELLULAR", + "IFT_CEPT", + "IFT_CES", + "IFT_CHANNEL", + "IFT_CNR", + "IFT_COFFEE", + "IFT_COMPOSITELINK", + "IFT_DCN", + "IFT_DIGITALPOWERLINE", + "IFT_DIGITALWRAPPEROVERHEADCHANNEL", + "IFT_DLSW", + "IFT_DOCSCABLEDOWNSTREAM", + "IFT_DOCSCABLEMACLAYER", + "IFT_DOCSCABLEUPSTREAM", + "IFT_DOCSCABLEUPSTREAMCHANNEL", + "IFT_DS0", + "IFT_DS0BUNDLE", + "IFT_DS1FDL", + "IFT_DS3", + "IFT_DTM", + "IFT_DUMMY", + "IFT_DVBASILN", + "IFT_DVBASIOUT", + "IFT_DVBRCCDOWNSTREAM", + "IFT_DVBRCCMACLAYER", + "IFT_DVBRCCUPSTREAM", + "IFT_ECONET", + "IFT_ENC", + "IFT_EON", + "IFT_EPLRS", + "IFT_ESCON", + "IFT_ETHER", + "IFT_FAITH", + "IFT_FAST", + "IFT_FASTETHER", + "IFT_FASTETHERFX", + "IFT_FDDI", + "IFT_FIBRECHANNEL", + "IFT_FRAMERELAYINTERCONNECT", + "IFT_FRAMERELAYMPI", + "IFT_FRDLCIENDPT", + "IFT_FRELAY", + "IFT_FRELAYDCE", + "IFT_FRF16MFRBUNDLE", + "IFT_FRFORWARD", + "IFT_G703AT2MB", + "IFT_G703AT64K", + "IFT_GIF", + "IFT_GIGABITETHERNET", + "IFT_GR303IDT", + "IFT_GR303RDT", + "IFT_H323GATEKEEPER", + "IFT_H323PROXY", + "IFT_HDH1822", + "IFT_HDLC", + "IFT_HDSL2", + "IFT_HIPERLAN2", + "IFT_HIPPI", + "IFT_HIPPIINTERFACE", + "IFT_HOSTPAD", + "IFT_HSSI", + "IFT_HY", + "IFT_IBM370PARCHAN", + "IFT_IDSL", + "IFT_IEEE1394", + "IFT_IEEE80211", + "IFT_IEEE80212", + "IFT_IEEE8023ADLAG", + "IFT_IFGSN", + "IFT_IMT", + "IFT_INFINIBAND", + "IFT_INTERLEAVE", + "IFT_IP", + "IFT_IPFORWARD", + "IFT_IPOVERATM", + "IFT_IPOVERCDLC", + "IFT_IPOVERCLAW", + "IFT_IPSWITCH", + "IFT_IPXIP", + "IFT_ISDN", + "IFT_ISDNBASIC", + "IFT_ISDNPRIMARY", + "IFT_ISDNS", + "IFT_ISDNU", + "IFT_ISO88022LLC", + "IFT_ISO88023", + "IFT_ISO88024", + "IFT_ISO88025", + "IFT_ISO88025CRFPINT", + "IFT_ISO88025DTR", + "IFT_ISO88025FIBER", + "IFT_ISO88026", + "IFT_ISUP", + "IFT_L2VLAN", + "IFT_L3IPVLAN", + "IFT_L3IPXVLAN", + "IFT_LAPB", + "IFT_LAPD", + "IFT_LAPF", + "IFT_LINEGROUP", + "IFT_LOCALTALK", + "IFT_LOOP", + "IFT_MEDIAMAILOVERIP", + "IFT_MFSIGLINK", + "IFT_MIOX25", + "IFT_MODEM", + "IFT_MPC", + "IFT_MPLS", + "IFT_MPLSTUNNEL", + "IFT_MSDSL", + "IFT_MVL", + "IFT_MYRINET", + "IFT_NFAS", + "IFT_NSIP", + "IFT_OPTICALCHANNEL", + "IFT_OPTICALTRANSPORT", + "IFT_OTHER", + "IFT_P10", + "IFT_P80", + "IFT_PARA", + "IFT_PDP", + "IFT_PFLOG", + "IFT_PFLOW", + "IFT_PFSYNC", + "IFT_PLC", + "IFT_PON155", + "IFT_PON622", + "IFT_POS", + "IFT_PPP", + "IFT_PPPMULTILINKBUNDLE", + "IFT_PROPATM", + "IFT_PROPBWAP2MP", + "IFT_PROPCNLS", + "IFT_PROPDOCSWIRELESSDOWNSTREAM", + "IFT_PROPDOCSWIRELESSMACLAYER", + "IFT_PROPDOCSWIRELESSUPSTREAM", + "IFT_PROPMUX", + "IFT_PROPVIRTUAL", + "IFT_PROPWIRELESSP2P", + "IFT_PTPSERIAL", + "IFT_PVC", + "IFT_Q2931", + "IFT_QLLC", + "IFT_RADIOMAC", + "IFT_RADSL", + "IFT_REACHDSL", + "IFT_RFC1483", + "IFT_RS232", + "IFT_RSRB", + "IFT_SDLC", + "IFT_SDSL", + "IFT_SHDSL", + "IFT_SIP", + "IFT_SIPSIG", + "IFT_SIPTG", + "IFT_SLIP", + "IFT_SMDSDXI", + "IFT_SMDSICIP", + "IFT_SONET", + "IFT_SONETOVERHEADCHANNEL", + "IFT_SONETPATH", + "IFT_SONETVT", + "IFT_SRP", + "IFT_SS7SIGLINK", + "IFT_STACKTOSTACK", + "IFT_STARLAN", + "IFT_STF", + "IFT_T1", + "IFT_TDLC", + "IFT_TELINK", + "IFT_TERMPAD", + "IFT_TR008", + "IFT_TRANSPHDLC", + "IFT_TUNNEL", + "IFT_ULTRA", + "IFT_USB", + "IFT_V11", + "IFT_V35", + "IFT_V36", + "IFT_V37", + "IFT_VDSL", + "IFT_VIRTUALIPADDRESS", + "IFT_VIRTUALTG", + "IFT_VOICEDID", + "IFT_VOICEEM", + "IFT_VOICEEMFGD", + "IFT_VOICEENCAP", + "IFT_VOICEFGDEANA", + "IFT_VOICEFXO", + "IFT_VOICEFXS", + "IFT_VOICEOVERATM", + "IFT_VOICEOVERCABLE", + "IFT_VOICEOVERFRAMERELAY", + "IFT_VOICEOVERIP", + "IFT_X213", + "IFT_X25", + "IFT_X25DDN", + "IFT_X25HUNTGROUP", + "IFT_X25MLP", + "IFT_X25PLE", + "IFT_XETHER", + "IGNBRK", + "IGNCR", + "IGNORE", + "IGNPAR", + "IMAXBEL", + "INFINITE", + "INLCR", + "INPCK", + "INVALID_FILE_ATTRIBUTES", + "IN_ACCESS", + "IN_ALL_EVENTS", + "IN_ATTRIB", + "IN_CLASSA_HOST", + "IN_CLASSA_MAX", + "IN_CLASSA_NET", + "IN_CLASSA_NSHIFT", + "IN_CLASSB_HOST", + "IN_CLASSB_MAX", + "IN_CLASSB_NET", + "IN_CLASSB_NSHIFT", + "IN_CLASSC_HOST", + "IN_CLASSC_NET", + "IN_CLASSC_NSHIFT", + "IN_CLASSD_HOST", + "IN_CLASSD_NET", + "IN_CLASSD_NSHIFT", + "IN_CLOEXEC", + "IN_CLOSE", + "IN_CLOSE_NOWRITE", + "IN_CLOSE_WRITE", + "IN_CREATE", + "IN_DELETE", + "IN_DELETE_SELF", + "IN_DONT_FOLLOW", + "IN_EXCL_UNLINK", + "IN_IGNORED", + "IN_ISDIR", + "IN_LINKLOCALNETNUM", + "IN_LOOPBACKNET", + "IN_MASK_ADD", + "IN_MODIFY", + "IN_MOVE", + "IN_MOVED_FROM", + "IN_MOVED_TO", + "IN_MOVE_SELF", + "IN_NONBLOCK", + "IN_ONESHOT", + "IN_ONLYDIR", + "IN_OPEN", + "IN_Q_OVERFLOW", + "IN_RFC3021_HOST", + "IN_RFC3021_MASK", + "IN_RFC3021_NET", + "IN_RFC3021_NSHIFT", + "IN_UNMOUNT", + "IOC_IN", + "IOC_INOUT", + "IOC_OUT", + "IOC_VENDOR", + "IOC_WS2", + "IO_REPARSE_TAG_SYMLINK", + "IPMreq", + "IPMreqn", + "IPPROTO_3PC", + "IPPROTO_ADFS", + "IPPROTO_AH", + "IPPROTO_AHIP", + "IPPROTO_APES", + "IPPROTO_ARGUS", + "IPPROTO_AX25", + "IPPROTO_BHA", + "IPPROTO_BLT", + "IPPROTO_BRSATMON", + "IPPROTO_CARP", + "IPPROTO_CFTP", + "IPPROTO_CHAOS", + "IPPROTO_CMTP", + "IPPROTO_COMP", + "IPPROTO_CPHB", + "IPPROTO_CPNX", + "IPPROTO_DCCP", + "IPPROTO_DDP", + "IPPROTO_DGP", + "IPPROTO_DIVERT", + "IPPROTO_DIVERT_INIT", + "IPPROTO_DIVERT_RESP", + "IPPROTO_DONE", + "IPPROTO_DSTOPTS", + "IPPROTO_EGP", + "IPPROTO_EMCON", + "IPPROTO_ENCAP", + "IPPROTO_EON", + "IPPROTO_ESP", + "IPPROTO_ETHERIP", + "IPPROTO_FRAGMENT", + "IPPROTO_GGP", + "IPPROTO_GMTP", + "IPPROTO_GRE", + "IPPROTO_HELLO", + "IPPROTO_HMP", + "IPPROTO_HOPOPTS", + "IPPROTO_ICMP", + "IPPROTO_ICMPV6", + "IPPROTO_IDP", + "IPPROTO_IDPR", + "IPPROTO_IDRP", + "IPPROTO_IGMP", + "IPPROTO_IGP", + "IPPROTO_IGRP", + "IPPROTO_IL", + "IPPROTO_INLSP", + "IPPROTO_INP", + "IPPROTO_IP", + "IPPROTO_IPCOMP", + "IPPROTO_IPCV", + "IPPROTO_IPEIP", + "IPPROTO_IPIP", + "IPPROTO_IPPC", + "IPPROTO_IPV4", + "IPPROTO_IPV6", + "IPPROTO_IPV6_ICMP", + "IPPROTO_IRTP", + "IPPROTO_KRYPTOLAN", + "IPPROTO_LARP", + "IPPROTO_LEAF1", + "IPPROTO_LEAF2", + "IPPROTO_MAX", + "IPPROTO_MAXID", + "IPPROTO_MEAS", + "IPPROTO_MH", + "IPPROTO_MHRP", + "IPPROTO_MICP", + "IPPROTO_MOBILE", + "IPPROTO_MPLS", + "IPPROTO_MTP", + "IPPROTO_MUX", + "IPPROTO_ND", + "IPPROTO_NHRP", + "IPPROTO_NONE", + "IPPROTO_NSP", + "IPPROTO_NVPII", + "IPPROTO_OLD_DIVERT", + "IPPROTO_OSPFIGP", + "IPPROTO_PFSYNC", + "IPPROTO_PGM", + "IPPROTO_PIGP", + "IPPROTO_PIM", + "IPPROTO_PRM", + "IPPROTO_PUP", + "IPPROTO_PVP", + "IPPROTO_RAW", + "IPPROTO_RCCMON", + "IPPROTO_RDP", + "IPPROTO_ROUTING", + "IPPROTO_RSVP", + "IPPROTO_RVD", + "IPPROTO_SATEXPAK", + "IPPROTO_SATMON", + "IPPROTO_SCCSP", + "IPPROTO_SCTP", + "IPPROTO_SDRP", + "IPPROTO_SEND", + "IPPROTO_SEP", + "IPPROTO_SKIP", + "IPPROTO_SPACER", + "IPPROTO_SRPC", + "IPPROTO_ST", + "IPPROTO_SVMTP", + "IPPROTO_SWIPE", + "IPPROTO_TCF", + "IPPROTO_TCP", + "IPPROTO_TLSP", + "IPPROTO_TP", + "IPPROTO_TPXX", + "IPPROTO_TRUNK1", + "IPPROTO_TRUNK2", + "IPPROTO_TTP", + "IPPROTO_UDP", + "IPPROTO_UDPLITE", + "IPPROTO_VINES", + "IPPROTO_VISA", + "IPPROTO_VMTP", + "IPPROTO_VRRP", + "IPPROTO_WBEXPAK", + "IPPROTO_WBMON", + "IPPROTO_WSN", + "IPPROTO_XNET", + "IPPROTO_XTP", + "IPV6_2292DSTOPTS", + "IPV6_2292HOPLIMIT", + "IPV6_2292HOPOPTS", + "IPV6_2292NEXTHOP", + "IPV6_2292PKTINFO", + "IPV6_2292PKTOPTIONS", + "IPV6_2292RTHDR", + "IPV6_ADDRFORM", + "IPV6_ADD_MEMBERSHIP", + "IPV6_AUTHHDR", + "IPV6_AUTH_LEVEL", + "IPV6_AUTOFLOWLABEL", + "IPV6_BINDANY", + "IPV6_BINDV6ONLY", + "IPV6_BOUND_IF", + "IPV6_CHECKSUM", + "IPV6_DEFAULT_MULTICAST_HOPS", + "IPV6_DEFAULT_MULTICAST_LOOP", + "IPV6_DEFHLIM", + "IPV6_DONTFRAG", + "IPV6_DROP_MEMBERSHIP", + "IPV6_DSTOPTS", + "IPV6_ESP_NETWORK_LEVEL", + "IPV6_ESP_TRANS_LEVEL", + "IPV6_FAITH", + "IPV6_FLOWINFO_MASK", + "IPV6_FLOWLABEL_MASK", + "IPV6_FRAGTTL", + "IPV6_FW_ADD", + "IPV6_FW_DEL", + "IPV6_FW_FLUSH", + "IPV6_FW_GET", + "IPV6_FW_ZERO", + "IPV6_HLIMDEC", + "IPV6_HOPLIMIT", + "IPV6_HOPOPTS", + "IPV6_IPCOMP_LEVEL", + "IPV6_IPSEC_POLICY", + "IPV6_JOIN_ANYCAST", + "IPV6_JOIN_GROUP", + "IPV6_LEAVE_ANYCAST", + "IPV6_LEAVE_GROUP", + "IPV6_MAXHLIM", + "IPV6_MAXOPTHDR", + "IPV6_MAXPACKET", + "IPV6_MAX_GROUP_SRC_FILTER", + "IPV6_MAX_MEMBERSHIPS", + "IPV6_MAX_SOCK_SRC_FILTER", + "IPV6_MIN_MEMBERSHIPS", + "IPV6_MMTU", + "IPV6_MSFILTER", + "IPV6_MTU", + "IPV6_MTU_DISCOVER", + "IPV6_MULTICAST_HOPS", + "IPV6_MULTICAST_IF", + "IPV6_MULTICAST_LOOP", + "IPV6_NEXTHOP", + "IPV6_OPTIONS", + "IPV6_PATHMTU", + "IPV6_PIPEX", + "IPV6_PKTINFO", + "IPV6_PMTUDISC_DO", + "IPV6_PMTUDISC_DONT", + "IPV6_PMTUDISC_PROBE", + "IPV6_PMTUDISC_WANT", + "IPV6_PORTRANGE", + "IPV6_PORTRANGE_DEFAULT", + "IPV6_PORTRANGE_HIGH", + "IPV6_PORTRANGE_LOW", + "IPV6_PREFER_TEMPADDR", + "IPV6_RECVDSTOPTS", + "IPV6_RECVDSTPORT", + "IPV6_RECVERR", + "IPV6_RECVHOPLIMIT", + "IPV6_RECVHOPOPTS", + "IPV6_RECVPATHMTU", + "IPV6_RECVPKTINFO", + "IPV6_RECVRTHDR", + "IPV6_RECVTCLASS", + "IPV6_ROUTER_ALERT", + "IPV6_RTABLE", + "IPV6_RTHDR", + "IPV6_RTHDRDSTOPTS", + "IPV6_RTHDR_LOOSE", + "IPV6_RTHDR_STRICT", + "IPV6_RTHDR_TYPE_0", + "IPV6_RXDSTOPTS", + "IPV6_RXHOPOPTS", + "IPV6_SOCKOPT_RESERVED1", + "IPV6_TCLASS", + "IPV6_UNICAST_HOPS", + "IPV6_USE_MIN_MTU", + "IPV6_V6ONLY", + "IPV6_VERSION", + "IPV6_VERSION_MASK", + "IPV6_XFRM_POLICY", + "IP_ADD_MEMBERSHIP", + "IP_ADD_SOURCE_MEMBERSHIP", + "IP_AUTH_LEVEL", + "IP_BINDANY", + "IP_BLOCK_SOURCE", + "IP_BOUND_IF", + "IP_DEFAULT_MULTICAST_LOOP", + "IP_DEFAULT_MULTICAST_TTL", + "IP_DF", + "IP_DIVERTFL", + "IP_DONTFRAG", + "IP_DROP_MEMBERSHIP", + "IP_DROP_SOURCE_MEMBERSHIP", + "IP_DUMMYNET3", + "IP_DUMMYNET_CONFIGURE", + "IP_DUMMYNET_DEL", + "IP_DUMMYNET_FLUSH", + "IP_DUMMYNET_GET", + "IP_EF", + "IP_ERRORMTU", + "IP_ESP_NETWORK_LEVEL", + "IP_ESP_TRANS_LEVEL", + "IP_FAITH", + "IP_FREEBIND", + "IP_FW3", + "IP_FW_ADD", + "IP_FW_DEL", + "IP_FW_FLUSH", + "IP_FW_GET", + "IP_FW_NAT_CFG", + "IP_FW_NAT_DEL", + "IP_FW_NAT_GET_CONFIG", + "IP_FW_NAT_GET_LOG", + "IP_FW_RESETLOG", + "IP_FW_TABLE_ADD", + "IP_FW_TABLE_DEL", + "IP_FW_TABLE_FLUSH", + "IP_FW_TABLE_GETSIZE", + "IP_FW_TABLE_LIST", + "IP_FW_ZERO", + "IP_HDRINCL", + "IP_IPCOMP_LEVEL", + "IP_IPSECFLOWINFO", + "IP_IPSEC_LOCAL_AUTH", + "IP_IPSEC_LOCAL_CRED", + "IP_IPSEC_LOCAL_ID", + "IP_IPSEC_POLICY", + "IP_IPSEC_REMOTE_AUTH", + "IP_IPSEC_REMOTE_CRED", + "IP_IPSEC_REMOTE_ID", + "IP_MAXPACKET", + "IP_MAX_GROUP_SRC_FILTER", + "IP_MAX_MEMBERSHIPS", + "IP_MAX_SOCK_MUTE_FILTER", + "IP_MAX_SOCK_SRC_FILTER", + "IP_MAX_SOURCE_FILTER", + "IP_MF", + "IP_MINFRAGSIZE", + "IP_MINTTL", + "IP_MIN_MEMBERSHIPS", + "IP_MSFILTER", + "IP_MSS", + "IP_MTU", + "IP_MTU_DISCOVER", + "IP_MULTICAST_IF", + "IP_MULTICAST_IFINDEX", + "IP_MULTICAST_LOOP", + "IP_MULTICAST_TTL", + "IP_MULTICAST_VIF", + "IP_NAT__XXX", + "IP_OFFMASK", + "IP_OLD_FW_ADD", + "IP_OLD_FW_DEL", + "IP_OLD_FW_FLUSH", + "IP_OLD_FW_GET", + "IP_OLD_FW_RESETLOG", + "IP_OLD_FW_ZERO", + "IP_ONESBCAST", + "IP_OPTIONS", + "IP_ORIGDSTADDR", + "IP_PASSSEC", + "IP_PIPEX", + "IP_PKTINFO", + "IP_PKTOPTIONS", + "IP_PMTUDISC", + "IP_PMTUDISC_DO", + "IP_PMTUDISC_DONT", + "IP_PMTUDISC_PROBE", + "IP_PMTUDISC_WANT", + "IP_PORTRANGE", + "IP_PORTRANGE_DEFAULT", + "IP_PORTRANGE_HIGH", + "IP_PORTRANGE_LOW", + "IP_RECVDSTADDR", + "IP_RECVDSTPORT", + "IP_RECVERR", + "IP_RECVIF", + "IP_RECVOPTS", + "IP_RECVORIGDSTADDR", + "IP_RECVPKTINFO", + "IP_RECVRETOPTS", + "IP_RECVRTABLE", + "IP_RECVTOS", + "IP_RECVTTL", + "IP_RETOPTS", + "IP_RF", + "IP_ROUTER_ALERT", + "IP_RSVP_OFF", + "IP_RSVP_ON", + "IP_RSVP_VIF_OFF", + "IP_RSVP_VIF_ON", + "IP_RTABLE", + "IP_SENDSRCADDR", + "IP_STRIPHDR", + "IP_TOS", + "IP_TRAFFIC_MGT_BACKGROUND", + "IP_TRANSPARENT", + "IP_TTL", + "IP_UNBLOCK_SOURCE", + "IP_XFRM_POLICY", + "IPv6MTUInfo", + "IPv6Mreq", + "ISIG", + "ISTRIP", + "IUCLC", + "IUTF8", + "IXANY", + "IXOFF", + "IXON", + "IfAddrmsg", + "IfAnnounceMsghdr", + "IfData", + "IfInfomsg", + "IfMsghdr", + "IfaMsghdr", + "IfmaMsghdr", + "IfmaMsghdr2", + "ImplementsGetwd", + "Inet4Pktinfo", + "Inet6Pktinfo", + "InotifyAddWatch", + "InotifyEvent", + "InotifyInit", + "InotifyInit1", + "InotifyRmWatch", + "InterfaceAddrMessage", + "InterfaceAnnounceMessage", + "InterfaceInfo", + "InterfaceMessage", + "InterfaceMulticastAddrMessage", + "InvalidHandle", + "Ioperm", + "Iopl", + "Iovec", + "IpAdapterInfo", + "IpAddrString", + "IpAddressString", + "IpMaskString", + "Issetugid", + "KEY_ALL_ACCESS", + "KEY_CREATE_LINK", + "KEY_CREATE_SUB_KEY", + "KEY_ENUMERATE_SUB_KEYS", + "KEY_EXECUTE", + "KEY_NOTIFY", + "KEY_QUERY_VALUE", + "KEY_READ", + "KEY_SET_VALUE", + "KEY_WOW64_32KEY", + "KEY_WOW64_64KEY", + "KEY_WRITE", + "Kevent", + "Kevent_t", + "Kill", + "Klogctl", + "Kqueue", + "LANG_ENGLISH", + "LAYERED_PROTOCOL", + "LCNT_OVERLOAD_FLUSH", + "LINUX_REBOOT_CMD_CAD_OFF", + "LINUX_REBOOT_CMD_CAD_ON", + "LINUX_REBOOT_CMD_HALT", + "LINUX_REBOOT_CMD_KEXEC", + "LINUX_REBOOT_CMD_POWER_OFF", + "LINUX_REBOOT_CMD_RESTART", + "LINUX_REBOOT_CMD_RESTART2", + "LINUX_REBOOT_CMD_SW_SUSPEND", + "LINUX_REBOOT_MAGIC1", + "LINUX_REBOOT_MAGIC2", + "LOCK_EX", + "LOCK_NB", + "LOCK_SH", + "LOCK_UN", + "LazyDLL", + "LazyProc", + "Lchown", + "Linger", + "Link", + "Listen", + "Listxattr", + "LoadCancelIoEx", + "LoadConnectEx", + "LoadCreateSymbolicLink", + "LoadDLL", + "LoadGetAddrInfo", + "LoadLibrary", + "LoadSetFileCompletionNotificationModes", + "LocalFree", + "Log2phys_t", + "LookupAccountName", + "LookupAccountSid", + "LookupSID", + "LsfJump", + "LsfSocket", + "LsfStmt", + "Lstat", + "MADV_AUTOSYNC", + "MADV_CAN_REUSE", + "MADV_CORE", + "MADV_DOFORK", + "MADV_DONTFORK", + "MADV_DONTNEED", + "MADV_FREE", + "MADV_FREE_REUSABLE", + "MADV_FREE_REUSE", + "MADV_HUGEPAGE", + "MADV_HWPOISON", + "MADV_MERGEABLE", + "MADV_NOCORE", + "MADV_NOHUGEPAGE", + "MADV_NORMAL", + "MADV_NOSYNC", + "MADV_PROTECT", + "MADV_RANDOM", + "MADV_REMOVE", + "MADV_SEQUENTIAL", + "MADV_SPACEAVAIL", + "MADV_UNMERGEABLE", + "MADV_WILLNEED", + "MADV_ZERO_WIRED_PAGES", + "MAP_32BIT", + "MAP_ALIGNED_SUPER", + "MAP_ALIGNMENT_16MB", + "MAP_ALIGNMENT_1TB", + "MAP_ALIGNMENT_256TB", + "MAP_ALIGNMENT_4GB", + "MAP_ALIGNMENT_64KB", + "MAP_ALIGNMENT_64PB", + "MAP_ALIGNMENT_MASK", + "MAP_ALIGNMENT_SHIFT", + "MAP_ANON", + "MAP_ANONYMOUS", + "MAP_COPY", + "MAP_DENYWRITE", + "MAP_EXECUTABLE", + "MAP_FILE", + "MAP_FIXED", + "MAP_FLAGMASK", + "MAP_GROWSDOWN", + "MAP_HASSEMAPHORE", + "MAP_HUGETLB", + "MAP_INHERIT", + "MAP_INHERIT_COPY", + "MAP_INHERIT_DEFAULT", + "MAP_INHERIT_DONATE_COPY", + "MAP_INHERIT_NONE", + "MAP_INHERIT_SHARE", + "MAP_JIT", + "MAP_LOCKED", + "MAP_NOCACHE", + "MAP_NOCORE", + "MAP_NOEXTEND", + "MAP_NONBLOCK", + "MAP_NORESERVE", + "MAP_NOSYNC", + "MAP_POPULATE", + "MAP_PREFAULT_READ", + "MAP_PRIVATE", + "MAP_RENAME", + "MAP_RESERVED0080", + "MAP_RESERVED0100", + "MAP_SHARED", + "MAP_STACK", + "MAP_TRYFIXED", + "MAP_TYPE", + "MAP_WIRED", + "MAXIMUM_REPARSE_DATA_BUFFER_SIZE", + "MAXLEN_IFDESCR", + "MAXLEN_PHYSADDR", + "MAX_ADAPTER_ADDRESS_LENGTH", + "MAX_ADAPTER_DESCRIPTION_LENGTH", + "MAX_ADAPTER_NAME_LENGTH", + "MAX_COMPUTERNAME_LENGTH", + "MAX_INTERFACE_NAME_LEN", + "MAX_LONG_PATH", + "MAX_PATH", + "MAX_PROTOCOL_CHAIN", + "MCL_CURRENT", + "MCL_FUTURE", + "MNT_DETACH", + "MNT_EXPIRE", + "MNT_FORCE", + "MSG_BCAST", + "MSG_CMSG_CLOEXEC", + "MSG_COMPAT", + "MSG_CONFIRM", + "MSG_CONTROLMBUF", + "MSG_CTRUNC", + "MSG_DONTROUTE", + "MSG_DONTWAIT", + "MSG_EOF", + "MSG_EOR", + "MSG_ERRQUEUE", + "MSG_FASTOPEN", + "MSG_FIN", + "MSG_FLUSH", + "MSG_HAVEMORE", + "MSG_HOLD", + "MSG_IOVUSRSPACE", + "MSG_LENUSRSPACE", + "MSG_MCAST", + "MSG_MORE", + "MSG_NAMEMBUF", + "MSG_NBIO", + "MSG_NEEDSA", + "MSG_NOSIGNAL", + "MSG_NOTIFICATION", + "MSG_OOB", + "MSG_PEEK", + "MSG_PROXY", + "MSG_RCVMORE", + "MSG_RST", + "MSG_SEND", + "MSG_SYN", + "MSG_TRUNC", + "MSG_TRYHARD", + "MSG_USERFLAGS", + "MSG_WAITALL", + "MSG_WAITFORONE", + "MSG_WAITSTREAM", + "MS_ACTIVE", + "MS_ASYNC", + "MS_BIND", + "MS_DEACTIVATE", + "MS_DIRSYNC", + "MS_INVALIDATE", + "MS_I_VERSION", + "MS_KERNMOUNT", + "MS_KILLPAGES", + "MS_MANDLOCK", + "MS_MGC_MSK", + "MS_MGC_VAL", + "MS_MOVE", + "MS_NOATIME", + "MS_NODEV", + "MS_NODIRATIME", + "MS_NOEXEC", + "MS_NOSUID", + "MS_NOUSER", + "MS_POSIXACL", + "MS_PRIVATE", + "MS_RDONLY", + "MS_REC", + "MS_RELATIME", + "MS_REMOUNT", + "MS_RMT_MASK", + "MS_SHARED", + "MS_SILENT", + "MS_SLAVE", + "MS_STRICTATIME", + "MS_SYNC", + "MS_SYNCHRONOUS", + "MS_UNBINDABLE", + "Madvise", + "MapViewOfFile", + "MaxTokenInfoClass", + "Mclpool", + "MibIfRow", + "Mkdir", + "Mkdirat", + "Mkfifo", + "Mknod", + "Mknodat", + "Mlock", + "Mlockall", + "Mmap", + "Mount", + "MoveFile", + "Mprotect", + "Msghdr", + "Munlock", + "Munlockall", + "Munmap", + "MustLoadDLL", + "NAME_MAX", + "NETLINK_ADD_MEMBERSHIP", + "NETLINK_AUDIT", + "NETLINK_BROADCAST_ERROR", + "NETLINK_CONNECTOR", + "NETLINK_DNRTMSG", + "NETLINK_DROP_MEMBERSHIP", + "NETLINK_ECRYPTFS", + "NETLINK_FIB_LOOKUP", + "NETLINK_FIREWALL", + "NETLINK_GENERIC", + "NETLINK_INET_DIAG", + "NETLINK_IP6_FW", + "NETLINK_ISCSI", + "NETLINK_KOBJECT_UEVENT", + "NETLINK_NETFILTER", + "NETLINK_NFLOG", + "NETLINK_NO_ENOBUFS", + "NETLINK_PKTINFO", + "NETLINK_RDMA", + "NETLINK_ROUTE", + "NETLINK_SCSITRANSPORT", + "NETLINK_SELINUX", + "NETLINK_UNUSED", + "NETLINK_USERSOCK", + "NETLINK_XFRM", + "NET_RT_DUMP", + "NET_RT_DUMP2", + "NET_RT_FLAGS", + "NET_RT_IFLIST", + "NET_RT_IFLIST2", + "NET_RT_IFLISTL", + "NET_RT_IFMALIST", + "NET_RT_MAXID", + "NET_RT_OIFLIST", + "NET_RT_OOIFLIST", + "NET_RT_STAT", + "NET_RT_STATS", + "NET_RT_TABLE", + "NET_RT_TRASH", + "NLA_ALIGNTO", + "NLA_F_NESTED", + "NLA_F_NET_BYTEORDER", + "NLA_HDRLEN", + "NLMSG_ALIGNTO", + "NLMSG_DONE", + "NLMSG_ERROR", + "NLMSG_HDRLEN", + "NLMSG_MIN_TYPE", + "NLMSG_NOOP", + "NLMSG_OVERRUN", + "NLM_F_ACK", + "NLM_F_APPEND", + "NLM_F_ATOMIC", + "NLM_F_CREATE", + "NLM_F_DUMP", + "NLM_F_ECHO", + "NLM_F_EXCL", + "NLM_F_MATCH", + "NLM_F_MULTI", + "NLM_F_REPLACE", + "NLM_F_REQUEST", + "NLM_F_ROOT", + "NOFLSH", + "NOTE_ABSOLUTE", + "NOTE_ATTRIB", + "NOTE_CHILD", + "NOTE_DELETE", + "NOTE_EOF", + "NOTE_EXEC", + "NOTE_EXIT", + "NOTE_EXITSTATUS", + "NOTE_EXTEND", + "NOTE_FFAND", + "NOTE_FFCOPY", + "NOTE_FFCTRLMASK", + "NOTE_FFLAGSMASK", + "NOTE_FFNOP", + "NOTE_FFOR", + "NOTE_FORK", + "NOTE_LINK", + "NOTE_LOWAT", + "NOTE_NONE", + "NOTE_NSECONDS", + "NOTE_PCTRLMASK", + "NOTE_PDATAMASK", + "NOTE_REAP", + "NOTE_RENAME", + "NOTE_RESOURCEEND", + "NOTE_REVOKE", + "NOTE_SECONDS", + "NOTE_SIGNAL", + "NOTE_TRACK", + "NOTE_TRACKERR", + "NOTE_TRIGGER", + "NOTE_TRUNCATE", + "NOTE_USECONDS", + "NOTE_VM_ERROR", + "NOTE_VM_PRESSURE", + "NOTE_VM_PRESSURE_SUDDEN_TERMINATE", + "NOTE_VM_PRESSURE_TERMINATE", + "NOTE_WRITE", + "NameCanonical", + "NameCanonicalEx", + "NameDisplay", + "NameDnsDomain", + "NameFullyQualifiedDN", + "NameSamCompatible", + "NameServicePrincipal", + "NameUniqueId", + "NameUnknown", + "NameUserPrincipal", + "Nanosleep", + "NetApiBufferFree", + "NetGetJoinInformation", + "NetSetupDomainName", + "NetSetupUnjoined", + "NetSetupUnknownStatus", + "NetSetupWorkgroupName", + "NetUserGetInfo", + "NetlinkMessage", + "NetlinkRIB", + "NetlinkRouteAttr", + "NetlinkRouteRequest", + "NewCallback", + "NewCallbackCDecl", + "NewLazyDLL", + "NlAttr", + "NlMsgerr", + "NlMsghdr", + "NsecToFiletime", + "NsecToTimespec", + "NsecToTimeval", + "Ntohs", + "OCRNL", + "OFDEL", + "OFILL", + "OFIOGETBMAP", + "OID_PKIX_KP_SERVER_AUTH", + "OID_SERVER_GATED_CRYPTO", + "OID_SGC_NETSCAPE", + "OLCUC", + "ONLCR", + "ONLRET", + "ONOCR", + "ONOEOT", + "OPEN_ALWAYS", + "OPEN_EXISTING", + "OPOST", + "O_ACCMODE", + "O_ALERT", + "O_ALT_IO", + "O_APPEND", + "O_ASYNC", + "O_CLOEXEC", + "O_CREAT", + "O_DIRECT", + "O_DIRECTORY", + "O_DSYNC", + "O_EVTONLY", + "O_EXCL", + "O_EXEC", + "O_EXLOCK", + "O_FSYNC", + "O_LARGEFILE", + "O_NDELAY", + "O_NOATIME", + "O_NOCTTY", + "O_NOFOLLOW", + "O_NONBLOCK", + "O_NOSIGPIPE", + "O_POPUP", + "O_RDONLY", + "O_RDWR", + "O_RSYNC", + "O_SHLOCK", + "O_SYMLINK", + "O_SYNC", + "O_TRUNC", + "O_TTY_INIT", + "O_WRONLY", + "Open", + "OpenCurrentProcessToken", + "OpenProcess", + "OpenProcessToken", + "Openat", + "Overlapped", + "PACKET_ADD_MEMBERSHIP", + "PACKET_BROADCAST", + "PACKET_DROP_MEMBERSHIP", + "PACKET_FASTROUTE", + "PACKET_HOST", + "PACKET_LOOPBACK", + "PACKET_MR_ALLMULTI", + "PACKET_MR_MULTICAST", + "PACKET_MR_PROMISC", + "PACKET_MULTICAST", + "PACKET_OTHERHOST", + "PACKET_OUTGOING", + "PACKET_RECV_OUTPUT", + "PACKET_RX_RING", + "PACKET_STATISTICS", + "PAGE_EXECUTE_READ", + "PAGE_EXECUTE_READWRITE", + "PAGE_EXECUTE_WRITECOPY", + "PAGE_READONLY", + "PAGE_READWRITE", + "PAGE_WRITECOPY", + "PARENB", + "PARMRK", + "PARODD", + "PENDIN", + "PFL_HIDDEN", + "PFL_MATCHES_PROTOCOL_ZERO", + "PFL_MULTIPLE_PROTO_ENTRIES", + "PFL_NETWORKDIRECT_PROVIDER", + "PFL_RECOMMENDED_PROTO_ENTRY", + "PF_FLUSH", + "PKCS_7_ASN_ENCODING", + "PMC5_PIPELINE_FLUSH", + "PRIO_PGRP", + "PRIO_PROCESS", + "PRIO_USER", + "PRI_IOFLUSH", + "PROCESS_QUERY_INFORMATION", + "PROCESS_TERMINATE", + "PROT_EXEC", + "PROT_GROWSDOWN", + "PROT_GROWSUP", + "PROT_NONE", + "PROT_READ", + "PROT_WRITE", + "PROV_DH_SCHANNEL", + "PROV_DSS", + "PROV_DSS_DH", + "PROV_EC_ECDSA_FULL", + "PROV_EC_ECDSA_SIG", + "PROV_EC_ECNRA_FULL", + "PROV_EC_ECNRA_SIG", + "PROV_FORTEZZA", + "PROV_INTEL_SEC", + "PROV_MS_EXCHANGE", + "PROV_REPLACE_OWF", + "PROV_RNG", + "PROV_RSA_AES", + "PROV_RSA_FULL", + "PROV_RSA_SCHANNEL", + "PROV_RSA_SIG", + "PROV_SPYRUS_LYNKS", + "PROV_SSL", + "PR_CAPBSET_DROP", + "PR_CAPBSET_READ", + "PR_CLEAR_SECCOMP_FILTER", + "PR_ENDIAN_BIG", + "PR_ENDIAN_LITTLE", + "PR_ENDIAN_PPC_LITTLE", + "PR_FPEMU_NOPRINT", + "PR_FPEMU_SIGFPE", + "PR_FP_EXC_ASYNC", + "PR_FP_EXC_DISABLED", + "PR_FP_EXC_DIV", + "PR_FP_EXC_INV", + "PR_FP_EXC_NONRECOV", + "PR_FP_EXC_OVF", + "PR_FP_EXC_PRECISE", + "PR_FP_EXC_RES", + "PR_FP_EXC_SW_ENABLE", + "PR_FP_EXC_UND", + "PR_GET_DUMPABLE", + "PR_GET_ENDIAN", + "PR_GET_FPEMU", + "PR_GET_FPEXC", + "PR_GET_KEEPCAPS", + "PR_GET_NAME", + "PR_GET_PDEATHSIG", + "PR_GET_SECCOMP", + "PR_GET_SECCOMP_FILTER", + "PR_GET_SECUREBITS", + "PR_GET_TIMERSLACK", + "PR_GET_TIMING", + "PR_GET_TSC", + "PR_GET_UNALIGN", + "PR_MCE_KILL", + "PR_MCE_KILL_CLEAR", + "PR_MCE_KILL_DEFAULT", + "PR_MCE_KILL_EARLY", + "PR_MCE_KILL_GET", + "PR_MCE_KILL_LATE", + "PR_MCE_KILL_SET", + "PR_SECCOMP_FILTER_EVENT", + "PR_SECCOMP_FILTER_SYSCALL", + "PR_SET_DUMPABLE", + "PR_SET_ENDIAN", + "PR_SET_FPEMU", + "PR_SET_FPEXC", + "PR_SET_KEEPCAPS", + "PR_SET_NAME", + "PR_SET_PDEATHSIG", + "PR_SET_PTRACER", + "PR_SET_SECCOMP", + "PR_SET_SECCOMP_FILTER", + "PR_SET_SECUREBITS", + "PR_SET_TIMERSLACK", + "PR_SET_TIMING", + "PR_SET_TSC", + "PR_SET_UNALIGN", + "PR_TASK_PERF_EVENTS_DISABLE", + "PR_TASK_PERF_EVENTS_ENABLE", + "PR_TIMING_STATISTICAL", + "PR_TIMING_TIMESTAMP", + "PR_TSC_ENABLE", + "PR_TSC_SIGSEGV", + "PR_UNALIGN_NOPRINT", + "PR_UNALIGN_SIGBUS", + "PTRACE_ARCH_PRCTL", + "PTRACE_ATTACH", + "PTRACE_CONT", + "PTRACE_DETACH", + "PTRACE_EVENT_CLONE", + "PTRACE_EVENT_EXEC", + "PTRACE_EVENT_EXIT", + "PTRACE_EVENT_FORK", + "PTRACE_EVENT_VFORK", + "PTRACE_EVENT_VFORK_DONE", + "PTRACE_GETCRUNCHREGS", + "PTRACE_GETEVENTMSG", + "PTRACE_GETFPREGS", + "PTRACE_GETFPXREGS", + "PTRACE_GETHBPREGS", + "PTRACE_GETREGS", + "PTRACE_GETREGSET", + "PTRACE_GETSIGINFO", + "PTRACE_GETVFPREGS", + "PTRACE_GETWMMXREGS", + "PTRACE_GET_THREAD_AREA", + "PTRACE_KILL", + "PTRACE_OLDSETOPTIONS", + "PTRACE_O_MASK", + "PTRACE_O_TRACECLONE", + "PTRACE_O_TRACEEXEC", + "PTRACE_O_TRACEEXIT", + "PTRACE_O_TRACEFORK", + "PTRACE_O_TRACESYSGOOD", + "PTRACE_O_TRACEVFORK", + "PTRACE_O_TRACEVFORKDONE", + "PTRACE_PEEKDATA", + "PTRACE_PEEKTEXT", + "PTRACE_PEEKUSR", + "PTRACE_POKEDATA", + "PTRACE_POKETEXT", + "PTRACE_POKEUSR", + "PTRACE_SETCRUNCHREGS", + "PTRACE_SETFPREGS", + "PTRACE_SETFPXREGS", + "PTRACE_SETHBPREGS", + "PTRACE_SETOPTIONS", + "PTRACE_SETREGS", + "PTRACE_SETREGSET", + "PTRACE_SETSIGINFO", + "PTRACE_SETVFPREGS", + "PTRACE_SETWMMXREGS", + "PTRACE_SET_SYSCALL", + "PTRACE_SET_THREAD_AREA", + "PTRACE_SINGLEBLOCK", + "PTRACE_SINGLESTEP", + "PTRACE_SYSCALL", + "PTRACE_SYSEMU", + "PTRACE_SYSEMU_SINGLESTEP", + "PTRACE_TRACEME", + "PT_ATTACH", + "PT_ATTACHEXC", + "PT_CONTINUE", + "PT_DATA_ADDR", + "PT_DENY_ATTACH", + "PT_DETACH", + "PT_FIRSTMACH", + "PT_FORCEQUOTA", + "PT_KILL", + "PT_MASK", + "PT_READ_D", + "PT_READ_I", + "PT_READ_U", + "PT_SIGEXC", + "PT_STEP", + "PT_TEXT_ADDR", + "PT_TEXT_END_ADDR", + "PT_THUPDATE", + "PT_TRACE_ME", + "PT_WRITE_D", + "PT_WRITE_I", + "PT_WRITE_U", + "ParseDirent", + "ParseNetlinkMessage", + "ParseNetlinkRouteAttr", + "ParseRoutingMessage", + "ParseRoutingSockaddr", + "ParseSocketControlMessage", + "ParseUnixCredentials", + "ParseUnixRights", + "PathMax", + "Pathconf", + "Pause", + "Pipe", + "Pipe2", + "PivotRoot", + "Pointer", + "PostQueuedCompletionStatus", + "Pread", + "Proc", + "ProcAttr", + "Process32First", + "Process32Next", + "ProcessEntry32", + "ProcessInformation", + "Protoent", + "PtraceAttach", + "PtraceCont", + "PtraceDetach", + "PtraceGetEventMsg", + "PtraceGetRegs", + "PtracePeekData", + "PtracePeekText", + "PtracePokeData", + "PtracePokeText", + "PtraceRegs", + "PtraceSetOptions", + "PtraceSetRegs", + "PtraceSingleStep", + "PtraceSyscall", + "Pwrite", + "REG_BINARY", + "REG_DWORD", + "REG_DWORD_BIG_ENDIAN", + "REG_DWORD_LITTLE_ENDIAN", + "REG_EXPAND_SZ", + "REG_FULL_RESOURCE_DESCRIPTOR", + "REG_LINK", + "REG_MULTI_SZ", + "REG_NONE", + "REG_QWORD", + "REG_QWORD_LITTLE_ENDIAN", + "REG_RESOURCE_LIST", + "REG_RESOURCE_REQUIREMENTS_LIST", + "REG_SZ", + "RLIMIT_AS", + "RLIMIT_CORE", + "RLIMIT_CPU", + "RLIMIT_DATA", + "RLIMIT_FSIZE", + "RLIMIT_NOFILE", + "RLIMIT_STACK", + "RLIM_INFINITY", + "RTAX_ADVMSS", + "RTAX_AUTHOR", + "RTAX_BRD", + "RTAX_CWND", + "RTAX_DST", + "RTAX_FEATURES", + "RTAX_FEATURE_ALLFRAG", + "RTAX_FEATURE_ECN", + "RTAX_FEATURE_SACK", + "RTAX_FEATURE_TIMESTAMP", + "RTAX_GATEWAY", + "RTAX_GENMASK", + "RTAX_HOPLIMIT", + "RTAX_IFA", + "RTAX_IFP", + "RTAX_INITCWND", + "RTAX_INITRWND", + "RTAX_LABEL", + "RTAX_LOCK", + "RTAX_MAX", + "RTAX_MTU", + "RTAX_NETMASK", + "RTAX_REORDERING", + "RTAX_RTO_MIN", + "RTAX_RTT", + "RTAX_RTTVAR", + "RTAX_SRC", + "RTAX_SRCMASK", + "RTAX_SSTHRESH", + "RTAX_TAG", + "RTAX_UNSPEC", + "RTAX_WINDOW", + "RTA_ALIGNTO", + "RTA_AUTHOR", + "RTA_BRD", + "RTA_CACHEINFO", + "RTA_DST", + "RTA_FLOW", + "RTA_GATEWAY", + "RTA_GENMASK", + "RTA_IFA", + "RTA_IFP", + "RTA_IIF", + "RTA_LABEL", + "RTA_MAX", + "RTA_METRICS", + "RTA_MULTIPATH", + "RTA_NETMASK", + "RTA_OIF", + "RTA_PREFSRC", + "RTA_PRIORITY", + "RTA_SRC", + "RTA_SRCMASK", + "RTA_TABLE", + "RTA_TAG", + "RTA_UNSPEC", + "RTCF_DIRECTSRC", + "RTCF_DOREDIRECT", + "RTCF_LOG", + "RTCF_MASQ", + "RTCF_NAT", + "RTCF_VALVE", + "RTF_ADDRCLASSMASK", + "RTF_ADDRCONF", + "RTF_ALLONLINK", + "RTF_ANNOUNCE", + "RTF_BLACKHOLE", + "RTF_BROADCAST", + "RTF_CACHE", + "RTF_CLONED", + "RTF_CLONING", + "RTF_CONDEMNED", + "RTF_DEFAULT", + "RTF_DELCLONE", + "RTF_DONE", + "RTF_DYNAMIC", + "RTF_FLOW", + "RTF_FMASK", + "RTF_GATEWAY", + "RTF_GWFLAG_COMPAT", + "RTF_HOST", + "RTF_IFREF", + "RTF_IFSCOPE", + "RTF_INTERFACE", + "RTF_IRTT", + "RTF_LINKRT", + "RTF_LLDATA", + "RTF_LLINFO", + "RTF_LOCAL", + "RTF_MASK", + "RTF_MODIFIED", + "RTF_MPATH", + "RTF_MPLS", + "RTF_MSS", + "RTF_MTU", + "RTF_MULTICAST", + "RTF_NAT", + "RTF_NOFORWARD", + "RTF_NONEXTHOP", + "RTF_NOPMTUDISC", + "RTF_PERMANENT_ARP", + "RTF_PINNED", + "RTF_POLICY", + "RTF_PRCLONING", + "RTF_PROTO1", + "RTF_PROTO2", + "RTF_PROTO3", + "RTF_REINSTATE", + "RTF_REJECT", + "RTF_RNH_LOCKED", + "RTF_SOURCE", + "RTF_SRC", + "RTF_STATIC", + "RTF_STICKY", + "RTF_THROW", + "RTF_TUNNEL", + "RTF_UP", + "RTF_USETRAILERS", + "RTF_WASCLONED", + "RTF_WINDOW", + "RTF_XRESOLVE", + "RTM_ADD", + "RTM_BASE", + "RTM_CHANGE", + "RTM_CHGADDR", + "RTM_DELACTION", + "RTM_DELADDR", + "RTM_DELADDRLABEL", + "RTM_DELETE", + "RTM_DELLINK", + "RTM_DELMADDR", + "RTM_DELNEIGH", + "RTM_DELQDISC", + "RTM_DELROUTE", + "RTM_DELRULE", + "RTM_DELTCLASS", + "RTM_DELTFILTER", + "RTM_DESYNC", + "RTM_F_CLONED", + "RTM_F_EQUALIZE", + "RTM_F_NOTIFY", + "RTM_F_PREFIX", + "RTM_GET", + "RTM_GET2", + "RTM_GETACTION", + "RTM_GETADDR", + "RTM_GETADDRLABEL", + "RTM_GETANYCAST", + "RTM_GETDCB", + "RTM_GETLINK", + "RTM_GETMULTICAST", + "RTM_GETNEIGH", + "RTM_GETNEIGHTBL", + "RTM_GETQDISC", + "RTM_GETROUTE", + "RTM_GETRULE", + "RTM_GETTCLASS", + "RTM_GETTFILTER", + "RTM_IEEE80211", + "RTM_IFANNOUNCE", + "RTM_IFINFO", + "RTM_IFINFO2", + "RTM_LLINFO_UPD", + "RTM_LOCK", + "RTM_LOSING", + "RTM_MAX", + "RTM_MAXSIZE", + "RTM_MISS", + "RTM_NEWACTION", + "RTM_NEWADDR", + "RTM_NEWADDRLABEL", + "RTM_NEWLINK", + "RTM_NEWMADDR", + "RTM_NEWMADDR2", + "RTM_NEWNDUSEROPT", + "RTM_NEWNEIGH", + "RTM_NEWNEIGHTBL", + "RTM_NEWPREFIX", + "RTM_NEWQDISC", + "RTM_NEWROUTE", + "RTM_NEWRULE", + "RTM_NEWTCLASS", + "RTM_NEWTFILTER", + "RTM_NR_FAMILIES", + "RTM_NR_MSGTYPES", + "RTM_OIFINFO", + "RTM_OLDADD", + "RTM_OLDDEL", + "RTM_OOIFINFO", + "RTM_REDIRECT", + "RTM_RESOLVE", + "RTM_RTTUNIT", + "RTM_SETDCB", + "RTM_SETGATE", + "RTM_SETLINK", + "RTM_SETNEIGHTBL", + "RTM_VERSION", + "RTNH_ALIGNTO", + "RTNH_F_DEAD", + "RTNH_F_ONLINK", + "RTNH_F_PERVASIVE", + "RTNLGRP_IPV4_IFADDR", + "RTNLGRP_IPV4_MROUTE", + "RTNLGRP_IPV4_ROUTE", + "RTNLGRP_IPV4_RULE", + "RTNLGRP_IPV6_IFADDR", + "RTNLGRP_IPV6_IFINFO", + "RTNLGRP_IPV6_MROUTE", + "RTNLGRP_IPV6_PREFIX", + "RTNLGRP_IPV6_ROUTE", + "RTNLGRP_IPV6_RULE", + "RTNLGRP_LINK", + "RTNLGRP_ND_USEROPT", + "RTNLGRP_NEIGH", + "RTNLGRP_NONE", + "RTNLGRP_NOTIFY", + "RTNLGRP_TC", + "RTN_ANYCAST", + "RTN_BLACKHOLE", + "RTN_BROADCAST", + "RTN_LOCAL", + "RTN_MAX", + "RTN_MULTICAST", + "RTN_NAT", + "RTN_PROHIBIT", + "RTN_THROW", + "RTN_UNICAST", + "RTN_UNREACHABLE", + "RTN_UNSPEC", + "RTN_XRESOLVE", + "RTPROT_BIRD", + "RTPROT_BOOT", + "RTPROT_DHCP", + "RTPROT_DNROUTED", + "RTPROT_GATED", + "RTPROT_KERNEL", + "RTPROT_MRT", + "RTPROT_NTK", + "RTPROT_RA", + "RTPROT_REDIRECT", + "RTPROT_STATIC", + "RTPROT_UNSPEC", + "RTPROT_XORP", + "RTPROT_ZEBRA", + "RTV_EXPIRE", + "RTV_HOPCOUNT", + "RTV_MTU", + "RTV_RPIPE", + "RTV_RTT", + "RTV_RTTVAR", + "RTV_SPIPE", + "RTV_SSTHRESH", + "RTV_WEIGHT", + "RT_CACHING_CONTEXT", + "RT_CLASS_DEFAULT", + "RT_CLASS_LOCAL", + "RT_CLASS_MAIN", + "RT_CLASS_MAX", + "RT_CLASS_UNSPEC", + "RT_DEFAULT_FIB", + "RT_NORTREF", + "RT_SCOPE_HOST", + "RT_SCOPE_LINK", + "RT_SCOPE_NOWHERE", + "RT_SCOPE_SITE", + "RT_SCOPE_UNIVERSE", + "RT_TABLEID_MAX", + "RT_TABLE_COMPAT", + "RT_TABLE_DEFAULT", + "RT_TABLE_LOCAL", + "RT_TABLE_MAIN", + "RT_TABLE_MAX", + "RT_TABLE_UNSPEC", + "RUSAGE_CHILDREN", + "RUSAGE_SELF", + "RUSAGE_THREAD", + "Radvisory_t", + "RawConn", + "RawSockaddr", + "RawSockaddrAny", + "RawSockaddrDatalink", + "RawSockaddrInet4", + "RawSockaddrInet6", + "RawSockaddrLinklayer", + "RawSockaddrNetlink", + "RawSockaddrUnix", + "RawSyscall", + "RawSyscall6", + "Read", + "ReadConsole", + "ReadDirectoryChanges", + "ReadDirent", + "ReadFile", + "Readlink", + "Reboot", + "Recvfrom", + "Recvmsg", + "RegCloseKey", + "RegEnumKeyEx", + "RegOpenKeyEx", + "RegQueryInfoKey", + "RegQueryValueEx", + "RemoveDirectory", + "Removexattr", + "Rename", + "Renameat", + "Revoke", + "Rlimit", + "Rmdir", + "RouteMessage", + "RouteRIB", + "RoutingMessage", + "RtAttr", + "RtGenmsg", + "RtMetrics", + "RtMsg", + "RtMsghdr", + "RtNexthop", + "Rusage", + "SCM_BINTIME", + "SCM_CREDENTIALS", + "SCM_CREDS", + "SCM_RIGHTS", + "SCM_TIMESTAMP", + "SCM_TIMESTAMPING", + "SCM_TIMESTAMPNS", + "SCM_TIMESTAMP_MONOTONIC", + "SHUT_RD", + "SHUT_RDWR", + "SHUT_WR", + "SID", + "SIDAndAttributes", + "SIGABRT", + "SIGALRM", + "SIGBUS", + "SIGCHLD", + "SIGCLD", + "SIGCONT", + "SIGEMT", + "SIGFPE", + "SIGHUP", + "SIGILL", + "SIGINFO", + "SIGINT", + "SIGIO", + "SIGIOT", + "SIGKILL", + "SIGLIBRT", + "SIGLWP", + "SIGPIPE", + "SIGPOLL", + "SIGPROF", + "SIGPWR", + "SIGQUIT", + "SIGSEGV", + "SIGSTKFLT", + "SIGSTOP", + "SIGSYS", + "SIGTERM", + "SIGTHR", + "SIGTRAP", + "SIGTSTP", + "SIGTTIN", + "SIGTTOU", + "SIGUNUSED", + "SIGURG", + "SIGUSR1", + "SIGUSR2", + "SIGVTALRM", + "SIGWINCH", + "SIGXCPU", + "SIGXFSZ", + "SIOCADDDLCI", + "SIOCADDMULTI", + "SIOCADDRT", + "SIOCAIFADDR", + "SIOCAIFGROUP", + "SIOCALIFADDR", + "SIOCARPIPLL", + "SIOCATMARK", + "SIOCAUTOADDR", + "SIOCAUTONETMASK", + "SIOCBRDGADD", + "SIOCBRDGADDS", + "SIOCBRDGARL", + "SIOCBRDGDADDR", + "SIOCBRDGDEL", + "SIOCBRDGDELS", + "SIOCBRDGFLUSH", + "SIOCBRDGFRL", + "SIOCBRDGGCACHE", + "SIOCBRDGGFD", + "SIOCBRDGGHT", + "SIOCBRDGGIFFLGS", + "SIOCBRDGGMA", + "SIOCBRDGGPARAM", + "SIOCBRDGGPRI", + "SIOCBRDGGRL", + "SIOCBRDGGSIFS", + "SIOCBRDGGTO", + "SIOCBRDGIFS", + "SIOCBRDGRTS", + "SIOCBRDGSADDR", + "SIOCBRDGSCACHE", + "SIOCBRDGSFD", + "SIOCBRDGSHT", + "SIOCBRDGSIFCOST", + "SIOCBRDGSIFFLGS", + "SIOCBRDGSIFPRIO", + "SIOCBRDGSMA", + "SIOCBRDGSPRI", + "SIOCBRDGSPROTO", + "SIOCBRDGSTO", + "SIOCBRDGSTXHC", + "SIOCDARP", + "SIOCDELDLCI", + "SIOCDELMULTI", + "SIOCDELRT", + "SIOCDEVPRIVATE", + "SIOCDIFADDR", + "SIOCDIFGROUP", + "SIOCDIFPHYADDR", + "SIOCDLIFADDR", + "SIOCDRARP", + "SIOCGARP", + "SIOCGDRVSPEC", + "SIOCGETKALIVE", + "SIOCGETLABEL", + "SIOCGETPFLOW", + "SIOCGETPFSYNC", + "SIOCGETSGCNT", + "SIOCGETVIFCNT", + "SIOCGETVLAN", + "SIOCGHIWAT", + "SIOCGIFADDR", + "SIOCGIFADDRPREF", + "SIOCGIFALIAS", + "SIOCGIFALTMTU", + "SIOCGIFASYNCMAP", + "SIOCGIFBOND", + "SIOCGIFBR", + "SIOCGIFBRDADDR", + "SIOCGIFCAP", + "SIOCGIFCONF", + "SIOCGIFCOUNT", + "SIOCGIFDATA", + "SIOCGIFDESCR", + "SIOCGIFDEVMTU", + "SIOCGIFDLT", + "SIOCGIFDSTADDR", + "SIOCGIFENCAP", + "SIOCGIFFIB", + "SIOCGIFFLAGS", + "SIOCGIFGATTR", + "SIOCGIFGENERIC", + "SIOCGIFGMEMB", + "SIOCGIFGROUP", + "SIOCGIFHARDMTU", + "SIOCGIFHWADDR", + "SIOCGIFINDEX", + "SIOCGIFKPI", + "SIOCGIFMAC", + "SIOCGIFMAP", + "SIOCGIFMEDIA", + "SIOCGIFMEM", + "SIOCGIFMETRIC", + "SIOCGIFMTU", + "SIOCGIFNAME", + "SIOCGIFNETMASK", + "SIOCGIFPDSTADDR", + "SIOCGIFPFLAGS", + "SIOCGIFPHYS", + "SIOCGIFPRIORITY", + "SIOCGIFPSRCADDR", + "SIOCGIFRDOMAIN", + "SIOCGIFRTLABEL", + "SIOCGIFSLAVE", + "SIOCGIFSTATUS", + "SIOCGIFTIMESLOT", + "SIOCGIFTXQLEN", + "SIOCGIFVLAN", + "SIOCGIFWAKEFLAGS", + "SIOCGIFXFLAGS", + "SIOCGLIFADDR", + "SIOCGLIFPHYADDR", + "SIOCGLIFPHYRTABLE", + "SIOCGLIFPHYTTL", + "SIOCGLINKSTR", + "SIOCGLOWAT", + "SIOCGPGRP", + "SIOCGPRIVATE_0", + "SIOCGPRIVATE_1", + "SIOCGRARP", + "SIOCGSPPPPARAMS", + "SIOCGSTAMP", + "SIOCGSTAMPNS", + "SIOCGVH", + "SIOCGVNETID", + "SIOCIFCREATE", + "SIOCIFCREATE2", + "SIOCIFDESTROY", + "SIOCIFGCLONERS", + "SIOCINITIFADDR", + "SIOCPROTOPRIVATE", + "SIOCRSLVMULTI", + "SIOCRTMSG", + "SIOCSARP", + "SIOCSDRVSPEC", + "SIOCSETKALIVE", + "SIOCSETLABEL", + "SIOCSETPFLOW", + "SIOCSETPFSYNC", + "SIOCSETVLAN", + "SIOCSHIWAT", + "SIOCSIFADDR", + "SIOCSIFADDRPREF", + "SIOCSIFALTMTU", + "SIOCSIFASYNCMAP", + "SIOCSIFBOND", + "SIOCSIFBR", + "SIOCSIFBRDADDR", + "SIOCSIFCAP", + "SIOCSIFDESCR", + "SIOCSIFDSTADDR", + "SIOCSIFENCAP", + "SIOCSIFFIB", + "SIOCSIFFLAGS", + "SIOCSIFGATTR", + "SIOCSIFGENERIC", + "SIOCSIFHWADDR", + "SIOCSIFHWBROADCAST", + "SIOCSIFKPI", + "SIOCSIFLINK", + "SIOCSIFLLADDR", + "SIOCSIFMAC", + "SIOCSIFMAP", + "SIOCSIFMEDIA", + "SIOCSIFMEM", + "SIOCSIFMETRIC", + "SIOCSIFMTU", + "SIOCSIFNAME", + "SIOCSIFNETMASK", + "SIOCSIFPFLAGS", + "SIOCSIFPHYADDR", + "SIOCSIFPHYS", + "SIOCSIFPRIORITY", + "SIOCSIFRDOMAIN", + "SIOCSIFRTLABEL", + "SIOCSIFRVNET", + "SIOCSIFSLAVE", + "SIOCSIFTIMESLOT", + "SIOCSIFTXQLEN", + "SIOCSIFVLAN", + "SIOCSIFVNET", + "SIOCSIFXFLAGS", + "SIOCSLIFPHYADDR", + "SIOCSLIFPHYRTABLE", + "SIOCSLIFPHYTTL", + "SIOCSLINKSTR", + "SIOCSLOWAT", + "SIOCSPGRP", + "SIOCSRARP", + "SIOCSSPPPPARAMS", + "SIOCSVH", + "SIOCSVNETID", + "SIOCZIFDATA", + "SIO_GET_EXTENSION_FUNCTION_POINTER", + "SIO_GET_INTERFACE_LIST", + "SIO_KEEPALIVE_VALS", + "SIO_UDP_CONNRESET", + "SOCK_CLOEXEC", + "SOCK_DCCP", + "SOCK_DGRAM", + "SOCK_FLAGS_MASK", + "SOCK_MAXADDRLEN", + "SOCK_NONBLOCK", + "SOCK_NOSIGPIPE", + "SOCK_PACKET", + "SOCK_RAW", + "SOCK_RDM", + "SOCK_SEQPACKET", + "SOCK_STREAM", + "SOL_AAL", + "SOL_ATM", + "SOL_DECNET", + "SOL_ICMPV6", + "SOL_IP", + "SOL_IPV6", + "SOL_IRDA", + "SOL_PACKET", + "SOL_RAW", + "SOL_SOCKET", + "SOL_TCP", + "SOL_X25", + "SOMAXCONN", + "SO_ACCEPTCONN", + "SO_ACCEPTFILTER", + "SO_ATTACH_FILTER", + "SO_BINDANY", + "SO_BINDTODEVICE", + "SO_BINTIME", + "SO_BROADCAST", + "SO_BSDCOMPAT", + "SO_DEBUG", + "SO_DETACH_FILTER", + "SO_DOMAIN", + "SO_DONTROUTE", + "SO_DONTTRUNC", + "SO_ERROR", + "SO_KEEPALIVE", + "SO_LABEL", + "SO_LINGER", + "SO_LINGER_SEC", + "SO_LISTENINCQLEN", + "SO_LISTENQLEN", + "SO_LISTENQLIMIT", + "SO_MARK", + "SO_NETPROC", + "SO_NKE", + "SO_NOADDRERR", + "SO_NOHEADER", + "SO_NOSIGPIPE", + "SO_NOTIFYCONFLICT", + "SO_NO_CHECK", + "SO_NO_DDP", + "SO_NO_OFFLOAD", + "SO_NP_EXTENSIONS", + "SO_NREAD", + "SO_NWRITE", + "SO_OOBINLINE", + "SO_OVERFLOWED", + "SO_PASSCRED", + "SO_PASSSEC", + "SO_PEERCRED", + "SO_PEERLABEL", + "SO_PEERNAME", + "SO_PEERSEC", + "SO_PRIORITY", + "SO_PROTOCOL", + "SO_PROTOTYPE", + "SO_RANDOMPORT", + "SO_RCVBUF", + "SO_RCVBUFFORCE", + "SO_RCVLOWAT", + "SO_RCVTIMEO", + "SO_RESTRICTIONS", + "SO_RESTRICT_DENYIN", + "SO_RESTRICT_DENYOUT", + "SO_RESTRICT_DENYSET", + "SO_REUSEADDR", + "SO_REUSEPORT", + "SO_REUSESHAREUID", + "SO_RTABLE", + "SO_RXQ_OVFL", + "SO_SECURITY_AUTHENTICATION", + "SO_SECURITY_ENCRYPTION_NETWORK", + "SO_SECURITY_ENCRYPTION_TRANSPORT", + "SO_SETFIB", + "SO_SNDBUF", + "SO_SNDBUFFORCE", + "SO_SNDLOWAT", + "SO_SNDTIMEO", + "SO_SPLICE", + "SO_TIMESTAMP", + "SO_TIMESTAMPING", + "SO_TIMESTAMPNS", + "SO_TIMESTAMP_MONOTONIC", + "SO_TYPE", + "SO_UPCALLCLOSEWAIT", + "SO_UPDATE_ACCEPT_CONTEXT", + "SO_UPDATE_CONNECT_CONTEXT", + "SO_USELOOPBACK", + "SO_USER_COOKIE", + "SO_VENDOR", + "SO_WANTMORE", + "SO_WANTOOBFLAG", + "SSLExtraCertChainPolicyPara", + "STANDARD_RIGHTS_ALL", + "STANDARD_RIGHTS_EXECUTE", + "STANDARD_RIGHTS_READ", + "STANDARD_RIGHTS_REQUIRED", + "STANDARD_RIGHTS_WRITE", + "STARTF_USESHOWWINDOW", + "STARTF_USESTDHANDLES", + "STD_ERROR_HANDLE", + "STD_INPUT_HANDLE", + "STD_OUTPUT_HANDLE", + "SUBLANG_ENGLISH_US", + "SW_FORCEMINIMIZE", + "SW_HIDE", + "SW_MAXIMIZE", + "SW_MINIMIZE", + "SW_NORMAL", + "SW_RESTORE", + "SW_SHOW", + "SW_SHOWDEFAULT", + "SW_SHOWMAXIMIZED", + "SW_SHOWMINIMIZED", + "SW_SHOWMINNOACTIVE", + "SW_SHOWNA", + "SW_SHOWNOACTIVATE", + "SW_SHOWNORMAL", + "SYMBOLIC_LINK_FLAG_DIRECTORY", + "SYNCHRONIZE", + "SYSCTL_VERSION", + "SYSCTL_VERS_0", + "SYSCTL_VERS_1", + "SYSCTL_VERS_MASK", + "SYS_ABORT2", + "SYS_ACCEPT", + "SYS_ACCEPT4", + "SYS_ACCEPT_NOCANCEL", + "SYS_ACCESS", + "SYS_ACCESS_EXTENDED", + "SYS_ACCT", + "SYS_ADD_KEY", + "SYS_ADD_PROFIL", + "SYS_ADJFREQ", + "SYS_ADJTIME", + "SYS_ADJTIMEX", + "SYS_AFS_SYSCALL", + "SYS_AIO_CANCEL", + "SYS_AIO_ERROR", + "SYS_AIO_FSYNC", + "SYS_AIO_READ", + "SYS_AIO_RETURN", + "SYS_AIO_SUSPEND", + "SYS_AIO_SUSPEND_NOCANCEL", + "SYS_AIO_WRITE", + "SYS_ALARM", + "SYS_ARCH_PRCTL", + "SYS_ARM_FADVISE64_64", + "SYS_ARM_SYNC_FILE_RANGE", + "SYS_ATGETMSG", + "SYS_ATPGETREQ", + "SYS_ATPGETRSP", + "SYS_ATPSNDREQ", + "SYS_ATPSNDRSP", + "SYS_ATPUTMSG", + "SYS_ATSOCKET", + "SYS_AUDIT", + "SYS_AUDITCTL", + "SYS_AUDITON", + "SYS_AUDIT_SESSION_JOIN", + "SYS_AUDIT_SESSION_PORT", + "SYS_AUDIT_SESSION_SELF", + "SYS_BDFLUSH", + "SYS_BIND", + "SYS_BINDAT", + "SYS_BREAK", + "SYS_BRK", + "SYS_BSDTHREAD_CREATE", + "SYS_BSDTHREAD_REGISTER", + "SYS_BSDTHREAD_TERMINATE", + "SYS_CAPGET", + "SYS_CAPSET", + "SYS_CAP_ENTER", + "SYS_CAP_FCNTLS_GET", + "SYS_CAP_FCNTLS_LIMIT", + "SYS_CAP_GETMODE", + "SYS_CAP_GETRIGHTS", + "SYS_CAP_IOCTLS_GET", + "SYS_CAP_IOCTLS_LIMIT", + "SYS_CAP_NEW", + "SYS_CAP_RIGHTS_GET", + "SYS_CAP_RIGHTS_LIMIT", + "SYS_CHDIR", + "SYS_CHFLAGS", + "SYS_CHFLAGSAT", + "SYS_CHMOD", + "SYS_CHMOD_EXTENDED", + "SYS_CHOWN", + "SYS_CHOWN32", + "SYS_CHROOT", + "SYS_CHUD", + "SYS_CLOCK_ADJTIME", + "SYS_CLOCK_GETCPUCLOCKID2", + "SYS_CLOCK_GETRES", + "SYS_CLOCK_GETTIME", + "SYS_CLOCK_NANOSLEEP", + "SYS_CLOCK_SETTIME", + "SYS_CLONE", + "SYS_CLOSE", + "SYS_CLOSEFROM", + "SYS_CLOSE_NOCANCEL", + "SYS_CONNECT", + "SYS_CONNECTAT", + "SYS_CONNECT_NOCANCEL", + "SYS_COPYFILE", + "SYS_CPUSET", + "SYS_CPUSET_GETAFFINITY", + "SYS_CPUSET_GETID", + "SYS_CPUSET_SETAFFINITY", + "SYS_CPUSET_SETID", + "SYS_CREAT", + "SYS_CREATE_MODULE", + "SYS_CSOPS", + "SYS_DELETE", + "SYS_DELETE_MODULE", + "SYS_DUP", + "SYS_DUP2", + "SYS_DUP3", + "SYS_EACCESS", + "SYS_EPOLL_CREATE", + "SYS_EPOLL_CREATE1", + "SYS_EPOLL_CTL", + "SYS_EPOLL_CTL_OLD", + "SYS_EPOLL_PWAIT", + "SYS_EPOLL_WAIT", + "SYS_EPOLL_WAIT_OLD", + "SYS_EVENTFD", + "SYS_EVENTFD2", + "SYS_EXCHANGEDATA", + "SYS_EXECVE", + "SYS_EXIT", + "SYS_EXIT_GROUP", + "SYS_EXTATTRCTL", + "SYS_EXTATTR_DELETE_FD", + "SYS_EXTATTR_DELETE_FILE", + "SYS_EXTATTR_DELETE_LINK", + "SYS_EXTATTR_GET_FD", + "SYS_EXTATTR_GET_FILE", + "SYS_EXTATTR_GET_LINK", + "SYS_EXTATTR_LIST_FD", + "SYS_EXTATTR_LIST_FILE", + "SYS_EXTATTR_LIST_LINK", + "SYS_EXTATTR_SET_FD", + "SYS_EXTATTR_SET_FILE", + "SYS_EXTATTR_SET_LINK", + "SYS_FACCESSAT", + "SYS_FADVISE64", + "SYS_FADVISE64_64", + "SYS_FALLOCATE", + "SYS_FANOTIFY_INIT", + "SYS_FANOTIFY_MARK", + "SYS_FCHDIR", + "SYS_FCHFLAGS", + "SYS_FCHMOD", + "SYS_FCHMODAT", + "SYS_FCHMOD_EXTENDED", + "SYS_FCHOWN", + "SYS_FCHOWN32", + "SYS_FCHOWNAT", + "SYS_FCHROOT", + "SYS_FCNTL", + "SYS_FCNTL64", + "SYS_FCNTL_NOCANCEL", + "SYS_FDATASYNC", + "SYS_FEXECVE", + "SYS_FFCLOCK_GETCOUNTER", + "SYS_FFCLOCK_GETESTIMATE", + "SYS_FFCLOCK_SETESTIMATE", + "SYS_FFSCTL", + "SYS_FGETATTRLIST", + "SYS_FGETXATTR", + "SYS_FHOPEN", + "SYS_FHSTAT", + "SYS_FHSTATFS", + "SYS_FILEPORT_MAKEFD", + "SYS_FILEPORT_MAKEPORT", + "SYS_FKTRACE", + "SYS_FLISTXATTR", + "SYS_FLOCK", + "SYS_FORK", + "SYS_FPATHCONF", + "SYS_FREEBSD6_FTRUNCATE", + "SYS_FREEBSD6_LSEEK", + "SYS_FREEBSD6_MMAP", + "SYS_FREEBSD6_PREAD", + "SYS_FREEBSD6_PWRITE", + "SYS_FREEBSD6_TRUNCATE", + "SYS_FREMOVEXATTR", + "SYS_FSCTL", + "SYS_FSETATTRLIST", + "SYS_FSETXATTR", + "SYS_FSGETPATH", + "SYS_FSTAT", + "SYS_FSTAT64", + "SYS_FSTAT64_EXTENDED", + "SYS_FSTATAT", + "SYS_FSTATAT64", + "SYS_FSTATFS", + "SYS_FSTATFS64", + "SYS_FSTATV", + "SYS_FSTATVFS1", + "SYS_FSTAT_EXTENDED", + "SYS_FSYNC", + "SYS_FSYNC_NOCANCEL", + "SYS_FSYNC_RANGE", + "SYS_FTIME", + "SYS_FTRUNCATE", + "SYS_FTRUNCATE64", + "SYS_FUTEX", + "SYS_FUTIMENS", + "SYS_FUTIMES", + "SYS_FUTIMESAT", + "SYS_GETATTRLIST", + "SYS_GETAUDIT", + "SYS_GETAUDIT_ADDR", + "SYS_GETAUID", + "SYS_GETCONTEXT", + "SYS_GETCPU", + "SYS_GETCWD", + "SYS_GETDENTS", + "SYS_GETDENTS64", + "SYS_GETDIRENTRIES", + "SYS_GETDIRENTRIES64", + "SYS_GETDIRENTRIESATTR", + "SYS_GETDTABLECOUNT", + "SYS_GETDTABLESIZE", + "SYS_GETEGID", + "SYS_GETEGID32", + "SYS_GETEUID", + "SYS_GETEUID32", + "SYS_GETFH", + "SYS_GETFSSTAT", + "SYS_GETFSSTAT64", + "SYS_GETGID", + "SYS_GETGID32", + "SYS_GETGROUPS", + "SYS_GETGROUPS32", + "SYS_GETHOSTUUID", + "SYS_GETITIMER", + "SYS_GETLCID", + "SYS_GETLOGIN", + "SYS_GETLOGINCLASS", + "SYS_GETPEERNAME", + "SYS_GETPGID", + "SYS_GETPGRP", + "SYS_GETPID", + "SYS_GETPMSG", + "SYS_GETPPID", + "SYS_GETPRIORITY", + "SYS_GETRESGID", + "SYS_GETRESGID32", + "SYS_GETRESUID", + "SYS_GETRESUID32", + "SYS_GETRLIMIT", + "SYS_GETRTABLE", + "SYS_GETRUSAGE", + "SYS_GETSGROUPS", + "SYS_GETSID", + "SYS_GETSOCKNAME", + "SYS_GETSOCKOPT", + "SYS_GETTHRID", + "SYS_GETTID", + "SYS_GETTIMEOFDAY", + "SYS_GETUID", + "SYS_GETUID32", + "SYS_GETVFSSTAT", + "SYS_GETWGROUPS", + "SYS_GETXATTR", + "SYS_GET_KERNEL_SYMS", + "SYS_GET_MEMPOLICY", + "SYS_GET_ROBUST_LIST", + "SYS_GET_THREAD_AREA", + "SYS_GTTY", + "SYS_IDENTITYSVC", + "SYS_IDLE", + "SYS_INITGROUPS", + "SYS_INIT_MODULE", + "SYS_INOTIFY_ADD_WATCH", + "SYS_INOTIFY_INIT", + "SYS_INOTIFY_INIT1", + "SYS_INOTIFY_RM_WATCH", + "SYS_IOCTL", + "SYS_IOPERM", + "SYS_IOPL", + "SYS_IOPOLICYSYS", + "SYS_IOPRIO_GET", + "SYS_IOPRIO_SET", + "SYS_IO_CANCEL", + "SYS_IO_DESTROY", + "SYS_IO_GETEVENTS", + "SYS_IO_SETUP", + "SYS_IO_SUBMIT", + "SYS_IPC", + "SYS_ISSETUGID", + "SYS_JAIL", + "SYS_JAIL_ATTACH", + "SYS_JAIL_GET", + "SYS_JAIL_REMOVE", + "SYS_JAIL_SET", + "SYS_KDEBUG_TRACE", + "SYS_KENV", + "SYS_KEVENT", + "SYS_KEVENT64", + "SYS_KEXEC_LOAD", + "SYS_KEYCTL", + "SYS_KILL", + "SYS_KLDFIND", + "SYS_KLDFIRSTMOD", + "SYS_KLDLOAD", + "SYS_KLDNEXT", + "SYS_KLDSTAT", + "SYS_KLDSYM", + "SYS_KLDUNLOAD", + "SYS_KLDUNLOADF", + "SYS_KQUEUE", + "SYS_KQUEUE1", + "SYS_KTIMER_CREATE", + "SYS_KTIMER_DELETE", + "SYS_KTIMER_GETOVERRUN", + "SYS_KTIMER_GETTIME", + "SYS_KTIMER_SETTIME", + "SYS_KTRACE", + "SYS_LCHFLAGS", + "SYS_LCHMOD", + "SYS_LCHOWN", + "SYS_LCHOWN32", + "SYS_LGETFH", + "SYS_LGETXATTR", + "SYS_LINK", + "SYS_LINKAT", + "SYS_LIO_LISTIO", + "SYS_LISTEN", + "SYS_LISTXATTR", + "SYS_LLISTXATTR", + "SYS_LOCK", + "SYS_LOOKUP_DCOOKIE", + "SYS_LPATHCONF", + "SYS_LREMOVEXATTR", + "SYS_LSEEK", + "SYS_LSETXATTR", + "SYS_LSTAT", + "SYS_LSTAT64", + "SYS_LSTAT64_EXTENDED", + "SYS_LSTATV", + "SYS_LSTAT_EXTENDED", + "SYS_LUTIMES", + "SYS_MAC_SYSCALL", + "SYS_MADVISE", + "SYS_MADVISE1", + "SYS_MAXSYSCALL", + "SYS_MBIND", + "SYS_MIGRATE_PAGES", + "SYS_MINCORE", + "SYS_MINHERIT", + "SYS_MKCOMPLEX", + "SYS_MKDIR", + "SYS_MKDIRAT", + "SYS_MKDIR_EXTENDED", + "SYS_MKFIFO", + "SYS_MKFIFOAT", + "SYS_MKFIFO_EXTENDED", + "SYS_MKNOD", + "SYS_MKNODAT", + "SYS_MLOCK", + "SYS_MLOCKALL", + "SYS_MMAP", + "SYS_MMAP2", + "SYS_MODCTL", + "SYS_MODFIND", + "SYS_MODFNEXT", + "SYS_MODIFY_LDT", + "SYS_MODNEXT", + "SYS_MODSTAT", + "SYS_MODWATCH", + "SYS_MOUNT", + "SYS_MOVE_PAGES", + "SYS_MPROTECT", + "SYS_MPX", + "SYS_MQUERY", + "SYS_MQ_GETSETATTR", + "SYS_MQ_NOTIFY", + "SYS_MQ_OPEN", + "SYS_MQ_TIMEDRECEIVE", + "SYS_MQ_TIMEDSEND", + "SYS_MQ_UNLINK", + "SYS_MREMAP", + "SYS_MSGCTL", + "SYS_MSGGET", + "SYS_MSGRCV", + "SYS_MSGRCV_NOCANCEL", + "SYS_MSGSND", + "SYS_MSGSND_NOCANCEL", + "SYS_MSGSYS", + "SYS_MSYNC", + "SYS_MSYNC_NOCANCEL", + "SYS_MUNLOCK", + "SYS_MUNLOCKALL", + "SYS_MUNMAP", + "SYS_NAME_TO_HANDLE_AT", + "SYS_NANOSLEEP", + "SYS_NEWFSTATAT", + "SYS_NFSCLNT", + "SYS_NFSSERVCTL", + "SYS_NFSSVC", + "SYS_NFSTAT", + "SYS_NICE", + "SYS_NLSTAT", + "SYS_NMOUNT", + "SYS_NSTAT", + "SYS_NTP_ADJTIME", + "SYS_NTP_GETTIME", + "SYS_OABI_SYSCALL_BASE", + "SYS_OBREAK", + "SYS_OLDFSTAT", + "SYS_OLDLSTAT", + "SYS_OLDOLDUNAME", + "SYS_OLDSTAT", + "SYS_OLDUNAME", + "SYS_OPEN", + "SYS_OPENAT", + "SYS_OPENBSD_POLL", + "SYS_OPEN_BY_HANDLE_AT", + "SYS_OPEN_EXTENDED", + "SYS_OPEN_NOCANCEL", + "SYS_OVADVISE", + "SYS_PACCEPT", + "SYS_PATHCONF", + "SYS_PAUSE", + "SYS_PCICONFIG_IOBASE", + "SYS_PCICONFIG_READ", + "SYS_PCICONFIG_WRITE", + "SYS_PDFORK", + "SYS_PDGETPID", + "SYS_PDKILL", + "SYS_PERF_EVENT_OPEN", + "SYS_PERSONALITY", + "SYS_PID_HIBERNATE", + "SYS_PID_RESUME", + "SYS_PID_SHUTDOWN_SOCKETS", + "SYS_PID_SUSPEND", + "SYS_PIPE", + "SYS_PIPE2", + "SYS_PIVOT_ROOT", + "SYS_PMC_CONTROL", + "SYS_PMC_GET_INFO", + "SYS_POLL", + "SYS_POLLTS", + "SYS_POLL_NOCANCEL", + "SYS_POSIX_FADVISE", + "SYS_POSIX_FALLOCATE", + "SYS_POSIX_OPENPT", + "SYS_POSIX_SPAWN", + "SYS_PPOLL", + "SYS_PRCTL", + "SYS_PREAD", + "SYS_PREAD64", + "SYS_PREADV", + "SYS_PREAD_NOCANCEL", + "SYS_PRLIMIT64", + "SYS_PROCCTL", + "SYS_PROCESS_POLICY", + "SYS_PROCESS_VM_READV", + "SYS_PROCESS_VM_WRITEV", + "SYS_PROC_INFO", + "SYS_PROF", + "SYS_PROFIL", + "SYS_PSELECT", + "SYS_PSELECT6", + "SYS_PSET_ASSIGN", + "SYS_PSET_CREATE", + "SYS_PSET_DESTROY", + "SYS_PSYNCH_CVBROAD", + "SYS_PSYNCH_CVCLRPREPOST", + "SYS_PSYNCH_CVSIGNAL", + "SYS_PSYNCH_CVWAIT", + "SYS_PSYNCH_MUTEXDROP", + "SYS_PSYNCH_MUTEXWAIT", + "SYS_PSYNCH_RW_DOWNGRADE", + "SYS_PSYNCH_RW_LONGRDLOCK", + "SYS_PSYNCH_RW_RDLOCK", + "SYS_PSYNCH_RW_UNLOCK", + "SYS_PSYNCH_RW_UNLOCK2", + "SYS_PSYNCH_RW_UPGRADE", + "SYS_PSYNCH_RW_WRLOCK", + "SYS_PSYNCH_RW_YIELDWRLOCK", + "SYS_PTRACE", + "SYS_PUTPMSG", + "SYS_PWRITE", + "SYS_PWRITE64", + "SYS_PWRITEV", + "SYS_PWRITE_NOCANCEL", + "SYS_QUERY_MODULE", + "SYS_QUOTACTL", + "SYS_RASCTL", + "SYS_RCTL_ADD_RULE", + "SYS_RCTL_GET_LIMITS", + "SYS_RCTL_GET_RACCT", + "SYS_RCTL_GET_RULES", + "SYS_RCTL_REMOVE_RULE", + "SYS_READ", + "SYS_READAHEAD", + "SYS_READDIR", + "SYS_READLINK", + "SYS_READLINKAT", + "SYS_READV", + "SYS_READV_NOCANCEL", + "SYS_READ_NOCANCEL", + "SYS_REBOOT", + "SYS_RECV", + "SYS_RECVFROM", + "SYS_RECVFROM_NOCANCEL", + "SYS_RECVMMSG", + "SYS_RECVMSG", + "SYS_RECVMSG_NOCANCEL", + "SYS_REMAP_FILE_PAGES", + "SYS_REMOVEXATTR", + "SYS_RENAME", + "SYS_RENAMEAT", + "SYS_REQUEST_KEY", + "SYS_RESTART_SYSCALL", + "SYS_REVOKE", + "SYS_RFORK", + "SYS_RMDIR", + "SYS_RTPRIO", + "SYS_RTPRIO_THREAD", + "SYS_RT_SIGACTION", + "SYS_RT_SIGPENDING", + "SYS_RT_SIGPROCMASK", + "SYS_RT_SIGQUEUEINFO", + "SYS_RT_SIGRETURN", + "SYS_RT_SIGSUSPEND", + "SYS_RT_SIGTIMEDWAIT", + "SYS_RT_TGSIGQUEUEINFO", + "SYS_SBRK", + "SYS_SCHED_GETAFFINITY", + "SYS_SCHED_GETPARAM", + "SYS_SCHED_GETSCHEDULER", + "SYS_SCHED_GET_PRIORITY_MAX", + "SYS_SCHED_GET_PRIORITY_MIN", + "SYS_SCHED_RR_GET_INTERVAL", + "SYS_SCHED_SETAFFINITY", + "SYS_SCHED_SETPARAM", + "SYS_SCHED_SETSCHEDULER", + "SYS_SCHED_YIELD", + "SYS_SCTP_GENERIC_RECVMSG", + "SYS_SCTP_GENERIC_SENDMSG", + "SYS_SCTP_GENERIC_SENDMSG_IOV", + "SYS_SCTP_PEELOFF", + "SYS_SEARCHFS", + "SYS_SECURITY", + "SYS_SELECT", + "SYS_SELECT_NOCANCEL", + "SYS_SEMCONFIG", + "SYS_SEMCTL", + "SYS_SEMGET", + "SYS_SEMOP", + "SYS_SEMSYS", + "SYS_SEMTIMEDOP", + "SYS_SEM_CLOSE", + "SYS_SEM_DESTROY", + "SYS_SEM_GETVALUE", + "SYS_SEM_INIT", + "SYS_SEM_OPEN", + "SYS_SEM_POST", + "SYS_SEM_TRYWAIT", + "SYS_SEM_UNLINK", + "SYS_SEM_WAIT", + "SYS_SEM_WAIT_NOCANCEL", + "SYS_SEND", + "SYS_SENDFILE", + "SYS_SENDFILE64", + "SYS_SENDMMSG", + "SYS_SENDMSG", + "SYS_SENDMSG_NOCANCEL", + "SYS_SENDTO", + "SYS_SENDTO_NOCANCEL", + "SYS_SETATTRLIST", + "SYS_SETAUDIT", + "SYS_SETAUDIT_ADDR", + "SYS_SETAUID", + "SYS_SETCONTEXT", + "SYS_SETDOMAINNAME", + "SYS_SETEGID", + "SYS_SETEUID", + "SYS_SETFIB", + "SYS_SETFSGID", + "SYS_SETFSGID32", + "SYS_SETFSUID", + "SYS_SETFSUID32", + "SYS_SETGID", + "SYS_SETGID32", + "SYS_SETGROUPS", + "SYS_SETGROUPS32", + "SYS_SETHOSTNAME", + "SYS_SETITIMER", + "SYS_SETLCID", + "SYS_SETLOGIN", + "SYS_SETLOGINCLASS", + "SYS_SETNS", + "SYS_SETPGID", + "SYS_SETPRIORITY", + "SYS_SETPRIVEXEC", + "SYS_SETREGID", + "SYS_SETREGID32", + "SYS_SETRESGID", + "SYS_SETRESGID32", + "SYS_SETRESUID", + "SYS_SETRESUID32", + "SYS_SETREUID", + "SYS_SETREUID32", + "SYS_SETRLIMIT", + "SYS_SETRTABLE", + "SYS_SETSGROUPS", + "SYS_SETSID", + "SYS_SETSOCKOPT", + "SYS_SETTID", + "SYS_SETTID_WITH_PID", + "SYS_SETTIMEOFDAY", + "SYS_SETUID", + "SYS_SETUID32", + "SYS_SETWGROUPS", + "SYS_SETXATTR", + "SYS_SET_MEMPOLICY", + "SYS_SET_ROBUST_LIST", + "SYS_SET_THREAD_AREA", + "SYS_SET_TID_ADDRESS", + "SYS_SGETMASK", + "SYS_SHARED_REGION_CHECK_NP", + "SYS_SHARED_REGION_MAP_AND_SLIDE_NP", + "SYS_SHMAT", + "SYS_SHMCTL", + "SYS_SHMDT", + "SYS_SHMGET", + "SYS_SHMSYS", + "SYS_SHM_OPEN", + "SYS_SHM_UNLINK", + "SYS_SHUTDOWN", + "SYS_SIGACTION", + "SYS_SIGALTSTACK", + "SYS_SIGNAL", + "SYS_SIGNALFD", + "SYS_SIGNALFD4", + "SYS_SIGPENDING", + "SYS_SIGPROCMASK", + "SYS_SIGQUEUE", + "SYS_SIGQUEUEINFO", + "SYS_SIGRETURN", + "SYS_SIGSUSPEND", + "SYS_SIGSUSPEND_NOCANCEL", + "SYS_SIGTIMEDWAIT", + "SYS_SIGWAIT", + "SYS_SIGWAITINFO", + "SYS_SOCKET", + "SYS_SOCKETCALL", + "SYS_SOCKETPAIR", + "SYS_SPLICE", + "SYS_SSETMASK", + "SYS_SSTK", + "SYS_STACK_SNAPSHOT", + "SYS_STAT", + "SYS_STAT64", + "SYS_STAT64_EXTENDED", + "SYS_STATFS", + "SYS_STATFS64", + "SYS_STATV", + "SYS_STATVFS1", + "SYS_STAT_EXTENDED", + "SYS_STIME", + "SYS_STTY", + "SYS_SWAPCONTEXT", + "SYS_SWAPCTL", + "SYS_SWAPOFF", + "SYS_SWAPON", + "SYS_SYMLINK", + "SYS_SYMLINKAT", + "SYS_SYNC", + "SYS_SYNCFS", + "SYS_SYNC_FILE_RANGE", + "SYS_SYSARCH", + "SYS_SYSCALL", + "SYS_SYSCALL_BASE", + "SYS_SYSFS", + "SYS_SYSINFO", + "SYS_SYSLOG", + "SYS_TEE", + "SYS_TGKILL", + "SYS_THREAD_SELFID", + "SYS_THR_CREATE", + "SYS_THR_EXIT", + "SYS_THR_KILL", + "SYS_THR_KILL2", + "SYS_THR_NEW", + "SYS_THR_SELF", + "SYS_THR_SET_NAME", + "SYS_THR_SUSPEND", + "SYS_THR_WAKE", + "SYS_TIME", + "SYS_TIMERFD_CREATE", + "SYS_TIMERFD_GETTIME", + "SYS_TIMERFD_SETTIME", + "SYS_TIMER_CREATE", + "SYS_TIMER_DELETE", + "SYS_TIMER_GETOVERRUN", + "SYS_TIMER_GETTIME", + "SYS_TIMER_SETTIME", + "SYS_TIMES", + "SYS_TKILL", + "SYS_TRUNCATE", + "SYS_TRUNCATE64", + "SYS_TUXCALL", + "SYS_UGETRLIMIT", + "SYS_ULIMIT", + "SYS_UMASK", + "SYS_UMASK_EXTENDED", + "SYS_UMOUNT", + "SYS_UMOUNT2", + "SYS_UNAME", + "SYS_UNDELETE", + "SYS_UNLINK", + "SYS_UNLINKAT", + "SYS_UNMOUNT", + "SYS_UNSHARE", + "SYS_USELIB", + "SYS_USTAT", + "SYS_UTIME", + "SYS_UTIMENSAT", + "SYS_UTIMES", + "SYS_UTRACE", + "SYS_UUIDGEN", + "SYS_VADVISE", + "SYS_VFORK", + "SYS_VHANGUP", + "SYS_VM86", + "SYS_VM86OLD", + "SYS_VMSPLICE", + "SYS_VM_PRESSURE_MONITOR", + "SYS_VSERVER", + "SYS_WAIT4", + "SYS_WAIT4_NOCANCEL", + "SYS_WAIT6", + "SYS_WAITEVENT", + "SYS_WAITID", + "SYS_WAITID_NOCANCEL", + "SYS_WAITPID", + "SYS_WATCHEVENT", + "SYS_WORKQ_KERNRETURN", + "SYS_WORKQ_OPEN", + "SYS_WRITE", + "SYS_WRITEV", + "SYS_WRITEV_NOCANCEL", + "SYS_WRITE_NOCANCEL", + "SYS_YIELD", + "SYS__LLSEEK", + "SYS__LWP_CONTINUE", + "SYS__LWP_CREATE", + "SYS__LWP_CTL", + "SYS__LWP_DETACH", + "SYS__LWP_EXIT", + "SYS__LWP_GETNAME", + "SYS__LWP_GETPRIVATE", + "SYS__LWP_KILL", + "SYS__LWP_PARK", + "SYS__LWP_SELF", + "SYS__LWP_SETNAME", + "SYS__LWP_SETPRIVATE", + "SYS__LWP_SUSPEND", + "SYS__LWP_UNPARK", + "SYS__LWP_UNPARK_ALL", + "SYS__LWP_WAIT", + "SYS__LWP_WAKEUP", + "SYS__NEWSELECT", + "SYS__PSET_BIND", + "SYS__SCHED_GETAFFINITY", + "SYS__SCHED_GETPARAM", + "SYS__SCHED_SETAFFINITY", + "SYS__SCHED_SETPARAM", + "SYS__SYSCTL", + "SYS__UMTX_LOCK", + "SYS__UMTX_OP", + "SYS__UMTX_UNLOCK", + "SYS___ACL_ACLCHECK_FD", + "SYS___ACL_ACLCHECK_FILE", + "SYS___ACL_ACLCHECK_LINK", + "SYS___ACL_DELETE_FD", + "SYS___ACL_DELETE_FILE", + "SYS___ACL_DELETE_LINK", + "SYS___ACL_GET_FD", + "SYS___ACL_GET_FILE", + "SYS___ACL_GET_LINK", + "SYS___ACL_SET_FD", + "SYS___ACL_SET_FILE", + "SYS___ACL_SET_LINK", + "SYS___CLONE", + "SYS___DISABLE_THREADSIGNAL", + "SYS___GETCWD", + "SYS___GETLOGIN", + "SYS___GET_TCB", + "SYS___MAC_EXECVE", + "SYS___MAC_GETFSSTAT", + "SYS___MAC_GET_FD", + "SYS___MAC_GET_FILE", + "SYS___MAC_GET_LCID", + "SYS___MAC_GET_LCTX", + "SYS___MAC_GET_LINK", + "SYS___MAC_GET_MOUNT", + "SYS___MAC_GET_PID", + "SYS___MAC_GET_PROC", + "SYS___MAC_MOUNT", + "SYS___MAC_SET_FD", + "SYS___MAC_SET_FILE", + "SYS___MAC_SET_LCTX", + "SYS___MAC_SET_LINK", + "SYS___MAC_SET_PROC", + "SYS___MAC_SYSCALL", + "SYS___OLD_SEMWAIT_SIGNAL", + "SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", + "SYS___POSIX_CHOWN", + "SYS___POSIX_FCHOWN", + "SYS___POSIX_LCHOWN", + "SYS___POSIX_RENAME", + "SYS___PTHREAD_CANCELED", + "SYS___PTHREAD_CHDIR", + "SYS___PTHREAD_FCHDIR", + "SYS___PTHREAD_KILL", + "SYS___PTHREAD_MARKCANCEL", + "SYS___PTHREAD_SIGMASK", + "SYS___QUOTACTL", + "SYS___SEMCTL", + "SYS___SEMWAIT_SIGNAL", + "SYS___SEMWAIT_SIGNAL_NOCANCEL", + "SYS___SETLOGIN", + "SYS___SETUGID", + "SYS___SET_TCB", + "SYS___SIGACTION_SIGTRAMP", + "SYS___SIGTIMEDWAIT", + "SYS___SIGWAIT", + "SYS___SIGWAIT_NOCANCEL", + "SYS___SYSCTL", + "SYS___TFORK", + "SYS___THREXIT", + "SYS___THRSIGDIVERT", + "SYS___THRSLEEP", + "SYS___THRWAKEUP", + "S_ARCH1", + "S_ARCH2", + "S_BLKSIZE", + "S_IEXEC", + "S_IFBLK", + "S_IFCHR", + "S_IFDIR", + "S_IFIFO", + "S_IFLNK", + "S_IFMT", + "S_IFREG", + "S_IFSOCK", + "S_IFWHT", + "S_IREAD", + "S_IRGRP", + "S_IROTH", + "S_IRUSR", + "S_IRWXG", + "S_IRWXO", + "S_IRWXU", + "S_ISGID", + "S_ISTXT", + "S_ISUID", + "S_ISVTX", + "S_IWGRP", + "S_IWOTH", + "S_IWRITE", + "S_IWUSR", + "S_IXGRP", + "S_IXOTH", + "S_IXUSR", + "S_LOGIN_SET", + "SecurityAttributes", + "Seek", + "Select", + "Sendfile", + "Sendmsg", + "SendmsgN", + "Sendto", + "Servent", + "SetBpf", + "SetBpfBuflen", + "SetBpfDatalink", + "SetBpfHeadercmpl", + "SetBpfImmediate", + "SetBpfInterface", + "SetBpfPromisc", + "SetBpfTimeout", + "SetCurrentDirectory", + "SetEndOfFile", + "SetEnvironmentVariable", + "SetFileAttributes", + "SetFileCompletionNotificationModes", + "SetFilePointer", + "SetFileTime", + "SetHandleInformation", + "SetKevent", + "SetLsfPromisc", + "SetNonblock", + "Setdomainname", + "Setegid", + "Setenv", + "Seteuid", + "Setfsgid", + "Setfsuid", + "Setgid", + "Setgroups", + "Sethostname", + "Setlogin", + "Setpgid", + "Setpriority", + "Setprivexec", + "Setregid", + "Setresgid", + "Setresuid", + "Setreuid", + "Setrlimit", + "Setsid", + "Setsockopt", + "SetsockoptByte", + "SetsockoptICMPv6Filter", + "SetsockoptIPMreq", + "SetsockoptIPMreqn", + "SetsockoptIPv6Mreq", + "SetsockoptInet4Addr", + "SetsockoptInt", + "SetsockoptLinger", + "SetsockoptString", + "SetsockoptTimeval", + "Settimeofday", + "Setuid", + "Setxattr", + "Shutdown", + "SidTypeAlias", + "SidTypeComputer", + "SidTypeDeletedAccount", + "SidTypeDomain", + "SidTypeGroup", + "SidTypeInvalid", + "SidTypeLabel", + "SidTypeUnknown", + "SidTypeUser", + "SidTypeWellKnownGroup", + "Signal", + "SizeofBpfHdr", + "SizeofBpfInsn", + "SizeofBpfProgram", + "SizeofBpfStat", + "SizeofBpfVersion", + "SizeofBpfZbuf", + "SizeofBpfZbufHeader", + "SizeofCmsghdr", + "SizeofICMPv6Filter", + "SizeofIPMreq", + "SizeofIPMreqn", + "SizeofIPv6MTUInfo", + "SizeofIPv6Mreq", + "SizeofIfAddrmsg", + "SizeofIfAnnounceMsghdr", + "SizeofIfData", + "SizeofIfInfomsg", + "SizeofIfMsghdr", + "SizeofIfaMsghdr", + "SizeofIfmaMsghdr", + "SizeofIfmaMsghdr2", + "SizeofInet4Pktinfo", + "SizeofInet6Pktinfo", + "SizeofInotifyEvent", + "SizeofLinger", + "SizeofMsghdr", + "SizeofNlAttr", + "SizeofNlMsgerr", + "SizeofNlMsghdr", + "SizeofRtAttr", + "SizeofRtGenmsg", + "SizeofRtMetrics", + "SizeofRtMsg", + "SizeofRtMsghdr", + "SizeofRtNexthop", + "SizeofSockFilter", + "SizeofSockFprog", + "SizeofSockaddrAny", + "SizeofSockaddrDatalink", + "SizeofSockaddrInet4", + "SizeofSockaddrInet6", + "SizeofSockaddrLinklayer", + "SizeofSockaddrNetlink", + "SizeofSockaddrUnix", + "SizeofTCPInfo", + "SizeofUcred", + "SlicePtrFromStrings", + "SockFilter", + "SockFprog", + "Sockaddr", + "SockaddrDatalink", + "SockaddrGen", + "SockaddrInet4", + "SockaddrInet6", + "SockaddrLinklayer", + "SockaddrNetlink", + "SockaddrUnix", + "Socket", + "SocketControlMessage", + "SocketDisableIPv6", + "Socketpair", + "Splice", + "StartProcess", + "StartupInfo", + "Stat", + "Stat_t", + "Statfs", + "Statfs_t", + "Stderr", + "Stdin", + "Stdout", + "StringBytePtr", + "StringByteSlice", + "StringSlicePtr", + "StringToSid", + "StringToUTF16", + "StringToUTF16Ptr", + "Symlink", + "Sync", + "SyncFileRange", + "SysProcAttr", + "SysProcIDMap", + "Syscall", + "Syscall12", + "Syscall15", + "Syscall18", + "Syscall6", + "Syscall9", + "Sysctl", + "SysctlUint32", + "Sysctlnode", + "Sysinfo", + "Sysinfo_t", + "Systemtime", + "TCGETS", + "TCIFLUSH", + "TCIOFLUSH", + "TCOFLUSH", + "TCPInfo", + "TCPKeepalive", + "TCP_CA_NAME_MAX", + "TCP_CONGCTL", + "TCP_CONGESTION", + "TCP_CONNECTIONTIMEOUT", + "TCP_CORK", + "TCP_DEFER_ACCEPT", + "TCP_INFO", + "TCP_KEEPALIVE", + "TCP_KEEPCNT", + "TCP_KEEPIDLE", + "TCP_KEEPINIT", + "TCP_KEEPINTVL", + "TCP_LINGER2", + "TCP_MAXBURST", + "TCP_MAXHLEN", + "TCP_MAXOLEN", + "TCP_MAXSEG", + "TCP_MAXWIN", + "TCP_MAX_SACK", + "TCP_MAX_WINSHIFT", + "TCP_MD5SIG", + "TCP_MD5SIG_MAXKEYLEN", + "TCP_MINMSS", + "TCP_MINMSSOVERLOAD", + "TCP_MSS", + "TCP_NODELAY", + "TCP_NOOPT", + "TCP_NOPUSH", + "TCP_NSTATES", + "TCP_QUICKACK", + "TCP_RXT_CONNDROPTIME", + "TCP_RXT_FINDROP", + "TCP_SACK_ENABLE", + "TCP_SYNCNT", + "TCP_VENDOR", + "TCP_WINDOW_CLAMP", + "TCSAFLUSH", + "TCSETS", + "TF_DISCONNECT", + "TF_REUSE_SOCKET", + "TF_USE_DEFAULT_WORKER", + "TF_USE_KERNEL_APC", + "TF_USE_SYSTEM_THREAD", + "TF_WRITE_BEHIND", + "TH32CS_INHERIT", + "TH32CS_SNAPALL", + "TH32CS_SNAPHEAPLIST", + "TH32CS_SNAPMODULE", + "TH32CS_SNAPMODULE32", + "TH32CS_SNAPPROCESS", + "TH32CS_SNAPTHREAD", + "TIME_ZONE_ID_DAYLIGHT", + "TIME_ZONE_ID_STANDARD", + "TIME_ZONE_ID_UNKNOWN", + "TIOCCBRK", + "TIOCCDTR", + "TIOCCONS", + "TIOCDCDTIMESTAMP", + "TIOCDRAIN", + "TIOCDSIMICROCODE", + "TIOCEXCL", + "TIOCEXT", + "TIOCFLAG_CDTRCTS", + "TIOCFLAG_CLOCAL", + "TIOCFLAG_CRTSCTS", + "TIOCFLAG_MDMBUF", + "TIOCFLAG_PPS", + "TIOCFLAG_SOFTCAR", + "TIOCFLUSH", + "TIOCGDEV", + "TIOCGDRAINWAIT", + "TIOCGETA", + "TIOCGETD", + "TIOCGFLAGS", + "TIOCGICOUNT", + "TIOCGLCKTRMIOS", + "TIOCGLINED", + "TIOCGPGRP", + "TIOCGPTN", + "TIOCGQSIZE", + "TIOCGRANTPT", + "TIOCGRS485", + "TIOCGSERIAL", + "TIOCGSID", + "TIOCGSIZE", + "TIOCGSOFTCAR", + "TIOCGTSTAMP", + "TIOCGWINSZ", + "TIOCINQ", + "TIOCIXOFF", + "TIOCIXON", + "TIOCLINUX", + "TIOCMBIC", + "TIOCMBIS", + "TIOCMGDTRWAIT", + "TIOCMGET", + "TIOCMIWAIT", + "TIOCMODG", + "TIOCMODS", + "TIOCMSDTRWAIT", + "TIOCMSET", + "TIOCM_CAR", + "TIOCM_CD", + "TIOCM_CTS", + "TIOCM_DCD", + "TIOCM_DSR", + "TIOCM_DTR", + "TIOCM_LE", + "TIOCM_RI", + "TIOCM_RNG", + "TIOCM_RTS", + "TIOCM_SR", + "TIOCM_ST", + "TIOCNOTTY", + "TIOCNXCL", + "TIOCOUTQ", + "TIOCPKT", + "TIOCPKT_DATA", + "TIOCPKT_DOSTOP", + "TIOCPKT_FLUSHREAD", + "TIOCPKT_FLUSHWRITE", + "TIOCPKT_IOCTL", + "TIOCPKT_NOSTOP", + "TIOCPKT_START", + "TIOCPKT_STOP", + "TIOCPTMASTER", + "TIOCPTMGET", + "TIOCPTSNAME", + "TIOCPTYGNAME", + "TIOCPTYGRANT", + "TIOCPTYUNLK", + "TIOCRCVFRAME", + "TIOCREMOTE", + "TIOCSBRK", + "TIOCSCONS", + "TIOCSCTTY", + "TIOCSDRAINWAIT", + "TIOCSDTR", + "TIOCSERCONFIG", + "TIOCSERGETLSR", + "TIOCSERGETMULTI", + "TIOCSERGSTRUCT", + "TIOCSERGWILD", + "TIOCSERSETMULTI", + "TIOCSERSWILD", + "TIOCSER_TEMT", + "TIOCSETA", + "TIOCSETAF", + "TIOCSETAW", + "TIOCSETD", + "TIOCSFLAGS", + "TIOCSIG", + "TIOCSLCKTRMIOS", + "TIOCSLINED", + "TIOCSPGRP", + "TIOCSPTLCK", + "TIOCSQSIZE", + "TIOCSRS485", + "TIOCSSERIAL", + "TIOCSSIZE", + "TIOCSSOFTCAR", + "TIOCSTART", + "TIOCSTAT", + "TIOCSTI", + "TIOCSTOP", + "TIOCSTSTAMP", + "TIOCSWINSZ", + "TIOCTIMESTAMP", + "TIOCUCNTL", + "TIOCVHANGUP", + "TIOCXMTFRAME", + "TOKEN_ADJUST_DEFAULT", + "TOKEN_ADJUST_GROUPS", + "TOKEN_ADJUST_PRIVILEGES", + "TOKEN_ADJUST_SESSIONID", + "TOKEN_ALL_ACCESS", + "TOKEN_ASSIGN_PRIMARY", + "TOKEN_DUPLICATE", + "TOKEN_EXECUTE", + "TOKEN_IMPERSONATE", + "TOKEN_QUERY", + "TOKEN_QUERY_SOURCE", + "TOKEN_READ", + "TOKEN_WRITE", + "TOSTOP", + "TRUNCATE_EXISTING", + "TUNATTACHFILTER", + "TUNDETACHFILTER", + "TUNGETFEATURES", + "TUNGETIFF", + "TUNGETSNDBUF", + "TUNGETVNETHDRSZ", + "TUNSETDEBUG", + "TUNSETGROUP", + "TUNSETIFF", + "TUNSETLINK", + "TUNSETNOCSUM", + "TUNSETOFFLOAD", + "TUNSETOWNER", + "TUNSETPERSIST", + "TUNSETSNDBUF", + "TUNSETTXFILTER", + "TUNSETVNETHDRSZ", + "Tee", + "TerminateProcess", + "Termios", + "Tgkill", + "Time", + "Time_t", + "Times", + "Timespec", + "TimespecToNsec", + "Timeval", + "Timeval32", + "TimevalToNsec", + "Timex", + "Timezoneinformation", + "Tms", + "Token", + "TokenAccessInformation", + "TokenAuditPolicy", + "TokenDefaultDacl", + "TokenElevation", + "TokenElevationType", + "TokenGroups", + "TokenGroupsAndPrivileges", + "TokenHasRestrictions", + "TokenImpersonationLevel", + "TokenIntegrityLevel", + "TokenLinkedToken", + "TokenLogonSid", + "TokenMandatoryPolicy", + "TokenOrigin", + "TokenOwner", + "TokenPrimaryGroup", + "TokenPrivileges", + "TokenRestrictedSids", + "TokenSandBoxInert", + "TokenSessionId", + "TokenSessionReference", + "TokenSource", + "TokenStatistics", + "TokenType", + "TokenUIAccess", + "TokenUser", + "TokenVirtualizationAllowed", + "TokenVirtualizationEnabled", + "Tokenprimarygroup", + "Tokenuser", + "TranslateAccountName", + "TranslateName", + "TransmitFile", + "TransmitFileBuffers", + "Truncate", + "UNIX_PATH_MAX", + "USAGE_MATCH_TYPE_AND", + "USAGE_MATCH_TYPE_OR", + "UTF16FromString", + "UTF16PtrFromString", + "UTF16ToString", + "Ucred", + "Umask", + "Uname", + "Undelete", + "UnixCredentials", + "UnixRights", + "Unlink", + "Unlinkat", + "UnmapViewOfFile", + "Unmount", + "Unsetenv", + "Unshare", + "UserInfo10", + "Ustat", + "Ustat_t", + "Utimbuf", + "Utime", + "Utimes", + "UtimesNano", + "Utsname", + "VDISCARD", + "VDSUSP", + "VEOF", + "VEOL", + "VEOL2", + "VERASE", + "VERASE2", + "VINTR", + "VKILL", + "VLNEXT", + "VMIN", + "VQUIT", + "VREPRINT", + "VSTART", + "VSTATUS", + "VSTOP", + "VSUSP", + "VSWTC", + "VT0", + "VT1", + "VTDLY", + "VTIME", + "VWERASE", + "VirtualLock", + "VirtualUnlock", + "WAIT_ABANDONED", + "WAIT_FAILED", + "WAIT_OBJECT_0", + "WAIT_TIMEOUT", + "WALL", + "WALLSIG", + "WALTSIG", + "WCLONE", + "WCONTINUED", + "WCOREFLAG", + "WEXITED", + "WLINUXCLONE", + "WNOHANG", + "WNOTHREAD", + "WNOWAIT", + "WNOZOMBIE", + "WOPTSCHECKED", + "WORDSIZE", + "WSABuf", + "WSACleanup", + "WSADESCRIPTION_LEN", + "WSAData", + "WSAEACCES", + "WSAECONNABORTED", + "WSAECONNRESET", + "WSAEnumProtocols", + "WSAID_CONNECTEX", + "WSAIoctl", + "WSAPROTOCOL_LEN", + "WSAProtocolChain", + "WSAProtocolInfo", + "WSARecv", + "WSARecvFrom", + "WSASYS_STATUS_LEN", + "WSASend", + "WSASendTo", + "WSASendto", + "WSAStartup", + "WSTOPPED", + "WTRAPPED", + "WUNTRACED", + "Wait4", + "WaitForSingleObject", + "WaitStatus", + "Win32FileAttributeData", + "Win32finddata", + "Write", + "WriteConsole", + "WriteFile", + "X509_ASN_ENCODING", + "XCASE", + "XP1_CONNECTIONLESS", + "XP1_CONNECT_DATA", + "XP1_DISCONNECT_DATA", + "XP1_EXPEDITED_DATA", + "XP1_GRACEFUL_CLOSE", + "XP1_GUARANTEED_DELIVERY", + "XP1_GUARANTEED_ORDER", + "XP1_IFS_HANDLES", + "XP1_MESSAGE_ORIENTED", + "XP1_MULTIPOINT_CONTROL_PLANE", + "XP1_MULTIPOINT_DATA_PLANE", + "XP1_PARTIAL_MESSAGE", + "XP1_PSEUDO_STREAM", + "XP1_QOS_SUPPORTED", + "XP1_SAN_SUPPORT_SDP", + "XP1_SUPPORT_BROADCAST", + "XP1_SUPPORT_MULTIPOINT", + "XP1_UNI_RECV", + "XP1_UNI_SEND", + }, + "syscall/js": []string{ + "CopyBytesToGo", + "CopyBytesToJS", + "Error", + "Func", + "FuncOf", + "Global", + "Null", + "Type", + "TypeBoolean", + "TypeFunction", + "TypeNull", + "TypeNumber", + "TypeObject", + "TypeString", + "TypeSymbol", + "TypeUndefined", + "Undefined", + "Value", + "ValueError", + "ValueOf", + "Wrapper", + }, + "testing": []string{ + "AllocsPerRun", + "B", + "Benchmark", + "BenchmarkResult", + "Cover", + "CoverBlock", + "CoverMode", + "Coverage", + "Init", + "InternalBenchmark", + "InternalExample", + "InternalTest", + "M", + "Main", + "MainStart", + "PB", + "RegisterCover", + "RunBenchmarks", + "RunExamples", + "RunTests", + "Short", + "T", + "TB", + "Verbose", + }, + "testing/fstest": []string{ + "MapFS", + "MapFile", + "TestFS", + }, + "testing/iotest": []string{ + "DataErrReader", + "ErrReader", + "ErrTimeout", + "HalfReader", + "NewReadLogger", + "NewWriteLogger", + "OneByteReader", + "TestReader", + "TimeoutReader", + "TruncateWriter", + }, + "testing/quick": []string{ + "Check", + "CheckEqual", + "CheckEqualError", + "CheckError", + "Config", + "Generator", + "SetupError", + "Value", + }, + "text/scanner": []string{ + "Char", + "Comment", + "EOF", + "Float", + "GoTokens", + "GoWhitespace", + "Ident", + "Int", + "Position", + "RawString", + "ScanChars", + "ScanComments", + "ScanFloats", + "ScanIdents", + "ScanInts", + "ScanRawStrings", + "ScanStrings", + "Scanner", + "SkipComments", + "String", + "TokenString", + }, + "text/tabwriter": []string{ + "AlignRight", + "Debug", + "DiscardEmptyColumns", + "Escape", + "FilterHTML", + "NewWriter", + "StripEscape", + "TabIndent", + "Writer", + }, + "text/template": []string{ + "ExecError", + "FuncMap", + "HTMLEscape", + "HTMLEscapeString", + "HTMLEscaper", + "IsTrue", + "JSEscape", + "JSEscapeString", + "JSEscaper", + "Must", + "New", + "ParseFS", + "ParseFiles", + "ParseGlob", + "Template", + "URLQueryEscaper", + }, + "text/template/parse": []string{ + "ActionNode", + "BoolNode", + "BranchNode", + "ChainNode", + "CommandNode", + "CommentNode", + "DotNode", + "FieldNode", + "IdentifierNode", + "IfNode", + "IsEmptyTree", + "ListNode", + "Mode", + "New", + "NewIdentifier", + "NilNode", + "Node", + "NodeAction", + "NodeBool", + "NodeChain", + "NodeCommand", + "NodeComment", + "NodeDot", + "NodeField", + "NodeIdentifier", + "NodeIf", + "NodeList", + "NodeNil", + "NodeNumber", + "NodePipe", + "NodeRange", + "NodeString", + "NodeTemplate", + "NodeText", + "NodeType", + "NodeVariable", + "NodeWith", + "NumberNode", + "Parse", + "ParseComments", + "PipeNode", + "Pos", + "RangeNode", + "StringNode", + "TemplateNode", + "TextNode", + "Tree", + "VariableNode", + "WithNode", + }, + "time": []string{ + "ANSIC", + "After", + "AfterFunc", + "April", + "August", + "Date", + "December", + "Duration", + "February", + "FixedZone", + "Friday", + "Hour", + "January", + "July", + "June", + "Kitchen", + "LoadLocation", + "LoadLocationFromTZData", + "Local", + "Location", + "March", + "May", + "Microsecond", + "Millisecond", + "Minute", + "Monday", + "Month", + "Nanosecond", + "NewTicker", + "NewTimer", + "November", + "Now", + "October", + "Parse", + "ParseDuration", + "ParseError", + "ParseInLocation", + "RFC1123", + "RFC1123Z", + "RFC3339", + "RFC3339Nano", + "RFC822", + "RFC822Z", + "RFC850", + "RubyDate", + "Saturday", + "Second", + "September", + "Since", + "Sleep", + "Stamp", + "StampMicro", + "StampMilli", + "StampNano", + "Sunday", + "Thursday", + "Tick", + "Ticker", + "Time", + "Timer", + "Tuesday", + "UTC", + "Unix", + "UnixDate", + "Until", + "Wednesday", + "Weekday", + }, + "unicode": []string{ + "ASCII_Hex_Digit", + "Adlam", + "Ahom", + "Anatolian_Hieroglyphs", + "Arabic", + "Armenian", + "Avestan", + "AzeriCase", + "Balinese", + "Bamum", + "Bassa_Vah", + "Batak", + "Bengali", + "Bhaiksuki", + "Bidi_Control", + "Bopomofo", + "Brahmi", + "Braille", + "Buginese", + "Buhid", + "C", + "Canadian_Aboriginal", + "Carian", + "CaseRange", + "CaseRanges", + "Categories", + "Caucasian_Albanian", + "Cc", + "Cf", + "Chakma", + "Cham", + "Cherokee", + "Chorasmian", + "Co", + "Common", + "Coptic", + "Cs", + "Cuneiform", + "Cypriot", + "Cyrillic", + "Dash", + "Deprecated", + "Deseret", + "Devanagari", + "Diacritic", + "Digit", + "Dives_Akuru", + "Dogra", + "Duployan", + "Egyptian_Hieroglyphs", + "Elbasan", + "Elymaic", + "Ethiopic", + "Extender", + "FoldCategory", + "FoldScript", + "Georgian", + "Glagolitic", + "Gothic", + "Grantha", + "GraphicRanges", + "Greek", + "Gujarati", + "Gunjala_Gondi", + "Gurmukhi", + "Han", + "Hangul", + "Hanifi_Rohingya", + "Hanunoo", + "Hatran", + "Hebrew", + "Hex_Digit", + "Hiragana", + "Hyphen", + "IDS_Binary_Operator", + "IDS_Trinary_Operator", + "Ideographic", + "Imperial_Aramaic", + "In", + "Inherited", + "Inscriptional_Pahlavi", + "Inscriptional_Parthian", + "Is", + "IsControl", + "IsDigit", + "IsGraphic", + "IsLetter", + "IsLower", + "IsMark", + "IsNumber", + "IsOneOf", + "IsPrint", + "IsPunct", + "IsSpace", + "IsSymbol", + "IsTitle", + "IsUpper", + "Javanese", + "Join_Control", + "Kaithi", + "Kannada", + "Katakana", + "Kayah_Li", + "Kharoshthi", + "Khitan_Small_Script", + "Khmer", + "Khojki", + "Khudawadi", + "L", + "Lao", + "Latin", + "Lepcha", + "Letter", + "Limbu", + "Linear_A", + "Linear_B", + "Lisu", + "Ll", + "Lm", + "Lo", + "Logical_Order_Exception", + "Lower", + "LowerCase", + "Lt", + "Lu", + "Lycian", + "Lydian", + "M", + "Mahajani", + "Makasar", + "Malayalam", + "Mandaic", + "Manichaean", + "Marchen", + "Mark", + "Masaram_Gondi", + "MaxASCII", + "MaxCase", + "MaxLatin1", + "MaxRune", + "Mc", + "Me", + "Medefaidrin", + "Meetei_Mayek", + "Mende_Kikakui", + "Meroitic_Cursive", + "Meroitic_Hieroglyphs", + "Miao", + "Mn", + "Modi", + "Mongolian", + "Mro", + "Multani", + "Myanmar", + "N", + "Nabataean", + "Nandinagari", + "Nd", + "New_Tai_Lue", + "Newa", + "Nko", + "Nl", + "No", + "Noncharacter_Code_Point", + "Number", + "Nushu", + "Nyiakeng_Puachue_Hmong", + "Ogham", + "Ol_Chiki", + "Old_Hungarian", + "Old_Italic", + "Old_North_Arabian", + "Old_Permic", + "Old_Persian", + "Old_Sogdian", + "Old_South_Arabian", + "Old_Turkic", + "Oriya", + "Osage", + "Osmanya", + "Other", + "Other_Alphabetic", + "Other_Default_Ignorable_Code_Point", + "Other_Grapheme_Extend", + "Other_ID_Continue", + "Other_ID_Start", + "Other_Lowercase", + "Other_Math", + "Other_Uppercase", + "P", + "Pahawh_Hmong", + "Palmyrene", + "Pattern_Syntax", + "Pattern_White_Space", + "Pau_Cin_Hau", + "Pc", + "Pd", + "Pe", + "Pf", + "Phags_Pa", + "Phoenician", + "Pi", + "Po", + "Prepended_Concatenation_Mark", + "PrintRanges", + "Properties", + "Ps", + "Psalter_Pahlavi", + "Punct", + "Quotation_Mark", + "Radical", + "Range16", + "Range32", + "RangeTable", + "Regional_Indicator", + "Rejang", + "ReplacementChar", + "Runic", + "S", + "STerm", + "Samaritan", + "Saurashtra", + "Sc", + "Scripts", + "Sentence_Terminal", + "Sharada", + "Shavian", + "Siddham", + "SignWriting", + "SimpleFold", + "Sinhala", + "Sk", + "Sm", + "So", + "Soft_Dotted", + "Sogdian", + "Sora_Sompeng", + "Soyombo", + "Space", + "SpecialCase", + "Sundanese", + "Syloti_Nagri", + "Symbol", + "Syriac", + "Tagalog", + "Tagbanwa", + "Tai_Le", + "Tai_Tham", + "Tai_Viet", + "Takri", + "Tamil", + "Tangut", + "Telugu", + "Terminal_Punctuation", + "Thaana", + "Thai", + "Tibetan", + "Tifinagh", + "Tirhuta", + "Title", + "TitleCase", + "To", + "ToLower", + "ToTitle", + "ToUpper", + "TurkishCase", + "Ugaritic", + "Unified_Ideograph", + "Upper", + "UpperCase", + "UpperLower", + "Vai", + "Variation_Selector", + "Version", + "Wancho", + "Warang_Citi", + "White_Space", + "Yezidi", + "Yi", + "Z", + "Zanabazar_Square", + "Zl", + "Zp", + "Zs", + }, + "unicode/utf16": []string{ + "Decode", + "DecodeRune", + "Encode", + "EncodeRune", + "IsSurrogate", + }, + "unicode/utf8": []string{ + "DecodeLastRune", + "DecodeLastRuneInString", + "DecodeRune", + "DecodeRuneInString", + "EncodeRune", + "FullRune", + "FullRuneInString", + "MaxRune", + "RuneCount", + "RuneCountInString", + "RuneError", + "RuneLen", + "RuneSelf", + "RuneStart", + "UTFMax", + "Valid", + "ValidRune", + "ValidString", + }, + "unsafe": []string{ + "Alignof", + "ArbitraryType", + "Offsetof", + "Pointer", + "Sizeof", + }, +} diff --git a/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go b/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go new file mode 100644 index 000000000..ac377035e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go @@ -0,0 +1,168 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzzy + +import ( + "unicode" +) + +// RuneRole specifies the role of a rune in the context of an input. +type RuneRole byte + +const ( + // RNone specifies a rune without any role in the input (i.e., whitespace/non-ASCII). + RNone RuneRole = iota + // RSep specifies a rune with the role of segment separator. + RSep + // RTail specifies a rune which is a lower-case tail in a word in the input. + RTail + // RUCTail specifies a rune which is an upper-case tail in a word in the input. + RUCTail + // RHead specifies a rune which is the first character in a word in the input. + RHead +) + +// RuneRoles detects the roles of each byte rune in an input string and stores it in the output +// slice. The rune role depends on the input type. Stops when it parsed all the runes in the string +// or when it filled the output. If output is nil, then it gets created. +func RuneRoles(str string, reuse []RuneRole) []RuneRole { + var output []RuneRole + if cap(reuse) < len(str) { + output = make([]RuneRole, 0, len(str)) + } else { + output = reuse[:0] + } + + prev, prev2 := rtNone, rtNone + for i := 0; i < len(str); i++ { + r := rune(str[i]) + + role := RNone + + curr := rtLower + if str[i] <= unicode.MaxASCII { + curr = runeType(rt[str[i]] - '0') + } + + if curr == rtLower { + if prev == rtNone || prev == rtPunct { + role = RHead + } else { + role = RTail + } + } else if curr == rtUpper { + role = RHead + + if prev == rtUpper { + // This and previous characters are both upper case. + + if i+1 == len(str) { + // This is last character, previous was also uppercase -> this is UCTail + // i.e., (current char is C): aBC / BC / ABC + role = RUCTail + } + } + } else if curr == rtPunct { + switch r { + case '.', ':': + role = RSep + } + } + if curr != rtLower { + if i > 1 && output[i-1] == RHead && prev2 == rtUpper && (output[i-2] == RHead || output[i-2] == RUCTail) { + // The previous two characters were uppercase. The current one is not a lower case, so the + // previous one can't be a HEAD. Make it a UCTail. + // i.e., (last char is current char - B must be a UCTail): ABC / ZABC / AB. + output[i-1] = RUCTail + } + } + + output = append(output, role) + prev2 = prev + prev = curr + } + return output +} + +type runeType byte + +const ( + rtNone runeType = iota + rtPunct + rtLower + rtUpper +) + +const rt = "00000000000000000000000000000000000000000000001122222222221000000333333333333333333333333330000002222222222222222222222222200000" + +// LastSegment returns the substring representing the last segment from the input, where each +// byte has an associated RuneRole in the roles slice. This makes sense only for inputs of Symbol +// or Filename type. +func LastSegment(input string, roles []RuneRole) string { + // Exclude ending separators. + end := len(input) - 1 + for end >= 0 && roles[end] == RSep { + end-- + } + if end < 0 { + return "" + } + + start := end - 1 + for start >= 0 && roles[start] != RSep { + start-- + } + + return input[start+1 : end+1] +} + +// ToLower transforms the input string to lower case, which is stored in the output byte slice. +// The lower casing considers only ASCII values - non ASCII values are left unmodified. +// Stops when parsed all input or when it filled the output slice. If output is nil, then it gets +// created. +func ToLower(input string, reuse []byte) []byte { + output := reuse + if cap(reuse) < len(input) { + output = make([]byte, len(input)) + } + + for i := 0; i < len(input); i++ { + r := rune(input[i]) + if r <= unicode.MaxASCII { + if 'A' <= r && r <= 'Z' { + r += 'a' - 'A' + } + } + output[i] = byte(r) + } + return output[:len(input)] +} + +// WordConsumer defines a consumer for a word delimited by the [start,end) byte offsets in an input +// (start is inclusive, end is exclusive). +type WordConsumer func(start, end int) + +// Words find word delimiters in an input based on its bytes' mappings to rune roles. The offset +// delimiters for each word are fed to the provided consumer function. +func Words(roles []RuneRole, consume WordConsumer) { + var wordStart int + for i, r := range roles { + switch r { + case RUCTail, RTail: + case RHead, RNone, RSep: + if i != wordStart { + consume(wordStart, i) + } + wordStart = i + if r != RHead { + // Skip this character. + wordStart = i + 1 + } + } + } + if wordStart != len(roles) { + consume(wordStart, len(roles)) + } +} diff --git a/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go b/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go new file mode 100644 index 000000000..16a643097 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go @@ -0,0 +1,398 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fuzzy implements a fuzzy matching algorithm. +package fuzzy + +import ( + "bytes" + "fmt" +) + +const ( + // MaxInputSize is the maximum size of the input scored against the fuzzy matcher. Longer inputs + // will be truncated to this size. + MaxInputSize = 127 + // MaxPatternSize is the maximum size of the pattern used to construct the fuzzy matcher. Longer + // inputs are truncated to this size. + MaxPatternSize = 63 +) + +type scoreVal int + +func (s scoreVal) val() int { + return int(s) >> 1 +} + +func (s scoreVal) prevK() int { + return int(s) & 1 +} + +func score(val int, prevK int /*0 or 1*/) scoreVal { + return scoreVal(val<<1 + prevK) +} + +// Matcher implements a fuzzy matching algorithm for scoring candidates against a pattern. +// The matcher does not support parallel usage. +type Matcher struct { + pattern string + patternLower []byte // lower-case version of the pattern + patternShort []byte // first characters of the pattern + caseSensitive bool // set if the pattern is mix-cased + + patternRoles []RuneRole // the role of each character in the pattern + roles []RuneRole // the role of each character in the tested string + + scores [MaxInputSize + 1][MaxPatternSize + 1][2]scoreVal + + scoreScale float32 + + lastCandidateLen int // in bytes + lastCandidateMatched bool + + // Here we save the last candidate in lower-case. This is basically a byte slice we reuse for + // performance reasons, so the slice is not reallocated for every candidate. + lowerBuf [MaxInputSize]byte + rolesBuf [MaxInputSize]RuneRole +} + +func (m *Matcher) bestK(i, j int) int { + if m.scores[i][j][0].val() < m.scores[i][j][1].val() { + return 1 + } + return 0 +} + +// NewMatcher returns a new fuzzy matcher for scoring candidates against the provided pattern. +func NewMatcher(pattern string) *Matcher { + if len(pattern) > MaxPatternSize { + pattern = pattern[:MaxPatternSize] + } + + m := &Matcher{ + pattern: pattern, + patternLower: ToLower(pattern, nil), + } + + for i, c := range m.patternLower { + if pattern[i] != c { + m.caseSensitive = true + break + } + } + + if len(pattern) > 3 { + m.patternShort = m.patternLower[:3] + } else { + m.patternShort = m.patternLower + } + + m.patternRoles = RuneRoles(pattern, nil) + + if len(pattern) > 0 { + maxCharScore := 4 + m.scoreScale = 1 / float32(maxCharScore*len(pattern)) + } + + return m +} + +// Score returns the score returned by matching the candidate to the pattern. +// This is not designed for parallel use. Multiple candidates must be scored sequentially. +// Returns a score between 0 and 1 (0 - no match, 1 - perfect match). +func (m *Matcher) Score(candidate string) float32 { + if len(candidate) > MaxInputSize { + candidate = candidate[:MaxInputSize] + } + lower := ToLower(candidate, m.lowerBuf[:]) + m.lastCandidateLen = len(candidate) + + if len(m.pattern) == 0 { + // Empty patterns perfectly match candidates. + return 1 + } + + if m.match(candidate, lower) { + sc := m.computeScore(candidate, lower) + if sc > minScore/2 && !m.poorMatch() { + m.lastCandidateMatched = true + if len(m.pattern) == len(candidate) { + // Perfect match. + return 1 + } + + if sc < 0 { + sc = 0 + } + normalizedScore := float32(sc) * m.scoreScale + if normalizedScore > 1 { + normalizedScore = 1 + } + + return normalizedScore + } + } + + m.lastCandidateMatched = false + return 0 +} + +const minScore = -10000 + +// MatchedRanges returns matches ranges for the last scored string as a flattened array of +// [begin, end) byte offset pairs. +func (m *Matcher) MatchedRanges() []int { + if len(m.pattern) == 0 || !m.lastCandidateMatched { + return nil + } + i, j := m.lastCandidateLen, len(m.pattern) + if m.scores[i][j][0].val() < minScore/2 && m.scores[i][j][1].val() < minScore/2 { + return nil + } + + var ret []int + k := m.bestK(i, j) + for i > 0 { + take := (k == 1) + k = m.scores[i][j][k].prevK() + if take { + if len(ret) == 0 || ret[len(ret)-1] != i { + ret = append(ret, i) + ret = append(ret, i-1) + } else { + ret[len(ret)-1] = i - 1 + } + j-- + } + i-- + } + // Reverse slice. + for i := 0; i < len(ret)/2; i++ { + ret[i], ret[len(ret)-1-i] = ret[len(ret)-1-i], ret[i] + } + return ret +} + +func (m *Matcher) match(candidate string, candidateLower []byte) bool { + i, j := 0, 0 + for ; i < len(candidateLower) && j < len(m.patternLower); i++ { + if candidateLower[i] == m.patternLower[j] { + j++ + } + } + if j != len(m.patternLower) { + return false + } + + // The input passes the simple test against pattern, so it is time to classify its characters. + // Character roles are used below to find the last segment. + m.roles = RuneRoles(candidate, m.rolesBuf[:]) + + return true +} + +func (m *Matcher) computeScore(candidate string, candidateLower []byte) int { + pattLen, candLen := len(m.pattern), len(candidate) + + for j := 0; j <= len(m.pattern); j++ { + m.scores[0][j][0] = minScore << 1 + m.scores[0][j][1] = minScore << 1 + } + m.scores[0][0][0] = score(0, 0) // Start with 0. + + segmentsLeft, lastSegStart := 1, 0 + for i := 0; i < candLen; i++ { + if m.roles[i] == RSep { + segmentsLeft++ + lastSegStart = i + 1 + } + } + + // A per-character bonus for a consecutive match. + consecutiveBonus := 2 + wordIdx := 0 // Word count within segment. + for i := 1; i <= candLen; i++ { + + role := m.roles[i-1] + isHead := role == RHead + + if isHead { + wordIdx++ + } else if role == RSep && segmentsLeft > 1 { + wordIdx = 0 + segmentsLeft-- + } + + var skipPenalty int + if i == 1 || (i-1) == lastSegStart { + // Skipping the start of first or last segment. + skipPenalty++ + } + + for j := 0; j <= pattLen; j++ { + // By default, we don't have a match. Fill in the skip data. + m.scores[i][j][1] = minScore << 1 + + // Compute the skip score. + k := 0 + if m.scores[i-1][j][0].val() < m.scores[i-1][j][1].val() { + k = 1 + } + + skipScore := m.scores[i-1][j][k].val() + // Do not penalize missing characters after the last matched segment. + if j != pattLen { + skipScore -= skipPenalty + } + m.scores[i][j][0] = score(skipScore, k) + + if j == 0 || candidateLower[i-1] != m.patternLower[j-1] { + // Not a match. + continue + } + pRole := m.patternRoles[j-1] + + if role == RTail && pRole == RHead { + if j > 1 { + // Not a match: a head in the pattern matches a tail character in the candidate. + continue + } + // Special treatment for the first character of the pattern. We allow + // matches in the middle of a word if they are long enough, at least + // min(3, pattern.length) characters. + if !bytes.HasPrefix(candidateLower[i-1:], m.patternShort) { + continue + } + } + + // Compute the char score. + var charScore int + // Bonus 1: the char is in the candidate's last segment. + if segmentsLeft <= 1 { + charScore++ + } + // Bonus 2: Case match or a Head in the pattern aligns with one in the word. + // Single-case patterns lack segmentation signals and we assume any character + // can be a head of a segment. + if candidate[i-1] == m.pattern[j-1] || role == RHead && (!m.caseSensitive || pRole == RHead) { + charScore++ + } + + // Penalty 1: pattern char is Head, candidate char is Tail. + if role == RTail && pRole == RHead { + charScore-- + } + // Penalty 2: first pattern character matched in the middle of a word. + if j == 1 && role == RTail { + charScore -= 4 + } + + // Third dimension encodes whether there is a gap between the previous match and the current + // one. + for k := 0; k < 2; k++ { + sc := m.scores[i-1][j-1][k].val() + charScore + + isConsecutive := k == 1 || i-1 == 0 || i-1 == lastSegStart + if isConsecutive { + // Bonus 3: a consecutive match. First character match also gets a bonus to + // ensure prefix final match score normalizes to 1.0. + // Logically, this is a part of charScore, but we have to compute it here because it + // only applies for consecutive matches (k == 1). + sc += consecutiveBonus + } + if k == 0 { + // Penalty 3: Matching inside a segment (and previous char wasn't matched). Penalize for the lack + // of alignment. + if role == RTail || role == RUCTail { + sc -= 3 + } + } + + if sc > m.scores[i][j][1].val() { + m.scores[i][j][1] = score(sc, k) + } + } + } + } + + result := m.scores[len(candidate)][len(m.pattern)][m.bestK(len(candidate), len(m.pattern))].val() + + return result +} + +// ScoreTable returns the score table computed for the provided candidate. Used only for debugging. +func (m *Matcher) ScoreTable(candidate string) string { + var buf bytes.Buffer + + var line1, line2, separator bytes.Buffer + line1.WriteString("\t") + line2.WriteString("\t") + for j := 0; j < len(m.pattern); j++ { + line1.WriteString(fmt.Sprintf("%c\t\t", m.pattern[j])) + separator.WriteString("----------------") + } + + buf.WriteString(line1.String()) + buf.WriteString("\n") + buf.WriteString(separator.String()) + buf.WriteString("\n") + + for i := 1; i <= len(candidate); i++ { + line1.Reset() + line2.Reset() + + line1.WriteString(fmt.Sprintf("%c\t", candidate[i-1])) + line2.WriteString("\t") + + for j := 1; j <= len(m.pattern); j++ { + line1.WriteString(fmt.Sprintf("M%6d(%c)\t", m.scores[i][j][0].val(), dir(m.scores[i][j][0].prevK()))) + line2.WriteString(fmt.Sprintf("H%6d(%c)\t", m.scores[i][j][1].val(), dir(m.scores[i][j][1].prevK()))) + } + buf.WriteString(line1.String()) + buf.WriteString("\n") + buf.WriteString(line2.String()) + buf.WriteString("\n") + buf.WriteString(separator.String()) + buf.WriteString("\n") + } + + return buf.String() +} + +func dir(prevK int) rune { + if prevK == 0 { + return 'M' + } + return 'H' +} + +func (m *Matcher) poorMatch() bool { + if len(m.pattern) < 2 { + return false + } + + i, j := m.lastCandidateLen, len(m.pattern) + k := m.bestK(i, j) + + var counter, len int + for i > 0 { + take := (k == 1) + k = m.scores[i][j][k].prevK() + if take { + len++ + if k == 0 && len < 3 && m.roles[i-1] == RTail { + // Short match in the middle of a word + counter++ + if counter > 1 { + return true + } + } + j-- + } else { + len = 0 + } + i-- + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 000000000..9702094c5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,28 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +import ( + "golang.org/x/tools/internal/gocommand" +) + +var GetForTest = func(p interface{}) string { return "" } +var GetDepsErrors = func(p interface{}) []*PackageError { return nil } + +type PackageError struct { + ImportStack []string // shortest path from package named on command line to this one + Pos string // position of error (if present, file:line:col) + Err string // the error itself +} + +var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } + +var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} + +var TypecheckCgo int + +var SetModFlag = func(config interface{}, value string) {} +var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/typeparams/doc.go b/vendor/golang.org/x/tools/internal/typeparams/doc.go new file mode 100644 index 000000000..5583947e2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/doc.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams provides functions to work indirectly with type parameter +// data stored in go/ast and go/types objects, while these API are guarded by a +// build constraint. +// +// This package exists to make it easier for tools to work with generic code, +// while also compiling against older Go versions. +package typeparams diff --git a/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go b/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go new file mode 100644 index 000000000..3a0abc7c1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/notypeparams.go @@ -0,0 +1,90 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !typeparams || !go1.17 +// +build !typeparams !go1.17 + +package typeparams + +import ( + "go/ast" + "go/types" +) + +// NOTE: doc comments must be kept in sync with typeparams.go. + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = false + +// UnpackIndex extracts all index expressions from e. For non-generic code this +// is always one expression: e.Index, but may be more than one expression for +// generic type instantiation. +func UnpackIndex(e *ast.IndexExpr) []ast.Expr { + return []ast.Expr{e.Index} +} + +// IsListExpr reports whether n is an *ast.ListExpr, which is a new node type +// introduced to hold type arguments for generic type instantiation. +func IsListExpr(n ast.Node) bool { + return false +} + +// ForTypeDecl extracts the (possibly nil) type parameter node list from n. +func ForTypeDecl(*ast.TypeSpec) *ast.FieldList { + return nil +} + +// ForFuncDecl extracts the (possibly nil) type parameter node list from n. +func ForFuncDecl(*ast.FuncDecl) *ast.FieldList { + return nil +} + +// ForSignature extracts the (possibly empty) type parameter object list from +// sig. +func ForSignature(*types.Signature) []*types.TypeName { + return nil +} + +// HasTypeSet reports if iface has a type set. +func HasTypeSet(*types.Interface) bool { + return false +} + +// IsComparable reports if iface is the comparable interface. +func IsComparable(*types.Interface) bool { + return false +} + +// IsConstraint reports whether iface may only be used as a type parameter +// constraint (i.e. has a type set or is the comparable interface). +func IsConstraint(*types.Interface) bool { + return false +} + +// ForNamed extracts the (possibly empty) type parameter object list from +// named. +func ForNamed(*types.Named) []*types.TypeName { + return nil +} + +// NamedTArgs extracts the (possibly empty) type argument list from named. +func NamedTArgs(*types.Named) []types.Type { + return nil +} + +// InitInferred initializes info to record inferred type information. +func InitInferred(*types.Info) { +} + +// GetInferred extracts inferred type information from info for e. +// +// The expression e may have an inferred type if it is an *ast.IndexExpr +// representing partial instantiation of a generic function type for which type +// arguments have been inferred using constraint type inference, or if it is an +// *ast.CallExpr for which type type arguments have be inferred using both +// constraint type inference and function argument inference. +func GetInferred(*types.Info, ast.Expr) ([]types.Type, *types.Signature) { + return nil, nil +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams.go new file mode 100644 index 000000000..6b7958af0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams.go @@ -0,0 +1,105 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build typeparams && go1.17 +// +build typeparams,go1.17 + +package typeparams + +import ( + "go/ast" + "go/types" +) + +// NOTE: doc comments must be kept in sync with notypeparams.go. + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = true + +// UnpackIndex extracts all index expressions from e. For non-generic code this +// is always one expression: e.Index, but may be more than one expression for +// generic type instantiation. +func UnpackIndex(e *ast.IndexExpr) []ast.Expr { + if x, _ := e.Index.(*ast.ListExpr); x != nil { + return x.ElemList + } + if e.Index != nil { + return []ast.Expr{e.Index} + } + return nil +} + +// IsListExpr reports whether n is an *ast.ListExpr, which is a new node type +// introduced to hold type arguments for generic type instantiation. +func IsListExpr(n ast.Node) bool { + _, ok := n.(*ast.ListExpr) + return ok +} + +// ForTypeDecl extracts the (possibly nil) type parameter node list from n. +func ForTypeDecl(n *ast.TypeSpec) *ast.FieldList { + return n.TParams +} + +// ForFuncDecl extracts the (possibly nil) type parameter node list from n. +func ForFuncDecl(n *ast.FuncDecl) *ast.FieldList { + if n.Type != nil { + return n.Type.TParams + } + return nil +} + +// ForSignature extracts the (possibly empty) type parameter object list from +// sig. +func ForSignature(sig *types.Signature) []*types.TypeName { + return sig.TParams() +} + +// HasTypeSet reports if iface has a type set. +func HasTypeSet(iface *types.Interface) bool { + return iface.HasTypeList() +} + +// IsComparable reports if iface is the comparable interface. +func IsComparable(iface *types.Interface) bool { + return iface.IsComparable() +} + +// IsConstraint reports whether iface may only be used as a type parameter +// constraint (i.e. has a type set or is the comparable interface). +func IsConstraint(iface *types.Interface) bool { + return iface.IsConstraint() +} + +// ForNamed extracts the (possibly empty) type parameter object list from +// named. +func ForNamed(named *types.Named) []*types.TypeName { + return named.TParams() +} + +// NamedTArgs extracts the (possibly empty) type argument list from named. +func NamedTArgs(named *types.Named) []types.Type { + return named.TArgs() +} + +// InitInferred initializes info to record inferred type information. +func InitInferred(info *types.Info) { + info.Inferred = make(map[ast.Expr]types.Inferred) +} + +// GetInferred extracts inferred type information from info for e. +// +// The expression e may have an inferred type if it is an *ast.IndexExpr +// representing partial instantiation of a generic function type for which type +// arguments have been inferred using constraint type inference, or if it is an +// *ast.CallExpr for which type type arguments have be inferred using both +// constraint type inference and function argument inference. +func GetInferred(info *types.Info, e ast.Expr) ([]types.Type, *types.Signature) { + if info.Inferred == nil { + return nil, nil + } + inf := info.Inferred[e] + return inf.TArgs, inf.Sig +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go new file mode 100644 index 000000000..fa2834e2a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -0,0 +1,1368 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +//go:generate stringer -type=ErrorCode + +type ErrorCode int + +// This file defines the error codes that can be produced during type-checking. +// Collectively, these codes provide an identifier that may be used to +// implement special handling for certain types of errors. +// +// Error codes should be fine-grained enough that the exact nature of the error +// can be easily determined, but coarse enough that they are not an +// implementation detail of the type checking algorithm. As a rule-of-thumb, +// errors should be considered equivalent if there is a theoretical refactoring +// of the type checker in which they are emitted in exactly one place. For +// example, the type checker emits different error messages for "too many +// arguments" and "too few arguments", but one can imagine an alternative type +// checker where this check instead just emits a single "wrong number of +// arguments", so these errors should have the same code. +// +// Error code names should be as brief as possible while retaining accuracy and +// distinctiveness. In most cases names should start with an adjective +// describing the nature of the error (e.g. "invalid", "unused", "misplaced"), +// and end with a noun identifying the relevant language object. For example, +// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the +// convention that "bad" implies a problem with syntax, and "invalid" implies a +// problem with types. + +const ( + _ ErrorCode = iota + + // Test is reserved for errors that only apply while in self-test mode. + Test + + /* package names */ + + // BlankPkgName occurs when a package name is the blank identifier "_". + // + // Per the spec: + // "The PackageName must not be the blank identifier." + BlankPkgName + + // MismatchedPkgName occurs when a file's package name doesn't match the + // package name already established by other files. + MismatchedPkgName + + // InvalidPkgUse occurs when a package identifier is used outside of a + // selector expression. + // + // Example: + // import "fmt" + // + // var _ = fmt + InvalidPkgUse + + /* imports */ + + // BadImportPath occurs when an import path is not valid. + BadImportPath + + // BrokenImport occurs when importing a package fails. + // + // Example: + // import "amissingpackage" + BrokenImport + + // ImportCRenamed occurs when the special import "C" is renamed. "C" is a + // pseudo-package, and must not be renamed. + // + // Example: + // import _ "C" + ImportCRenamed + + // UnusedImport occurs when an import is unused. + // + // Example: + // import "fmt" + // + // func main() {} + UnusedImport + + /* initialization */ + + // InvalidInitCycle occurs when an invalid cycle is detected within the + // initialization graph. + // + // Example: + // var x int = f() + // + // func f() int { return x } + InvalidInitCycle + + /* decls */ + + // DuplicateDecl occurs when an identifier is declared multiple times. + // + // Example: + // var x = 1 + // var x = 2 + DuplicateDecl + + // InvalidDeclCycle occurs when a declaration cycle is not valid. + // + // Example: + // import "unsafe" + // + // type T struct { + // a [n]int + // } + // + // var n = unsafe.Sizeof(T{}) + InvalidDeclCycle + + // InvalidTypeCycle occurs when a cycle in type definitions results in a + // type that is not well-defined. + // + // Example: + // import "unsafe" + // + // type T [unsafe.Sizeof(T{})]int + InvalidTypeCycle + + /* decls > const */ + + // InvalidConstInit occurs when a const declaration has a non-constant + // initializer. + // + // Example: + // var x int + // const _ = x + InvalidConstInit + + // InvalidConstVal occurs when a const value cannot be converted to its + // target type. + // + // TODO(findleyr): this error code and example are not very clear. Consider + // removing it. + // + // Example: + // const _ = 1 << "hello" + InvalidConstVal + + // InvalidConstType occurs when the underlying type in a const declaration + // is not a valid constant type. + // + // Example: + // const c *int = 4 + InvalidConstType + + /* decls > var (+ other variable assignment codes) */ + + // UntypedNil occurs when the predeclared (untyped) value nil is used to + // initialize a variable declared without an explicit type. + // + // Example: + // var x = nil + UntypedNil + + // WrongAssignCount occurs when the number of values on the right-hand side + // of an assignment or or initialization expression does not match the number + // of variables on the left-hand side. + // + // Example: + // var x = 1, 2 + WrongAssignCount + + // UnassignableOperand occurs when the left-hand side of an assignment is + // not assignable. + // + // Example: + // func f() { + // const c = 1 + // c = 2 + // } + UnassignableOperand + + // NoNewVar occurs when a short variable declaration (':=') does not declare + // new variables. + // + // Example: + // func f() { + // x := 1 + // x := 2 + // } + NoNewVar + + // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does + // not have single-valued left-hand or right-hand side. + // + // Per the spec: + // "In assignment operations, both the left- and right-hand expression lists + // must contain exactly one single-valued expression" + // + // Example: + // func f() int { + // x, y := 1, 2 + // x, y += 1 + // return x + y + // } + MultiValAssignOp + + // InvalidIfaceAssign occurs when a value of type T is used as an + // interface, but T does not implement a method of the expected interface. + // + // Example: + // type I interface { + // f() + // } + // + // type T int + // + // var x I = T(1) + InvalidIfaceAssign + + // InvalidChanAssign occurs when a chan assignment is invalid. + // + // Per the spec, a value x is assignable to a channel type T if: + // "x is a bidirectional channel value, T is a channel type, x's type V and + // T have identical element types, and at least one of V or T is not a + // defined type." + // + // Example: + // type T1 chan int + // type T2 chan int + // + // var x T1 + // // Invalid assignment because both types are named + // var _ T2 = x + InvalidChanAssign + + // IncompatibleAssign occurs when the type of the right-hand side expression + // in an assignment cannot be assigned to the type of the variable being + // assigned. + // + // Example: + // var x []int + // var _ int = x + IncompatibleAssign + + // UnaddressableFieldAssign occurs when trying to assign to a struct field + // in a map value. + // + // Example: + // func f() { + // m := make(map[string]struct{i int}) + // m["foo"].i = 42 + // } + UnaddressableFieldAssign + + /* decls > type (+ other type expression codes) */ + + // NotAType occurs when the identifier used as the underlying type in a type + // declaration or the right-hand side of a type alias does not denote a type. + // + // Example: + // var S = 2 + // + // type T S + NotAType + + // InvalidArrayLen occurs when an array length is not a constant value. + // + // Example: + // var n = 3 + // var _ = [n]int{} + InvalidArrayLen + + // BlankIfaceMethod occurs when a method name is '_'. + // + // Per the spec: + // "The name of each explicitly specified method must be unique and not + // blank." + // + // Example: + // type T interface { + // _(int) + // } + BlankIfaceMethod + + // IncomparableMapKey occurs when a map key type does not support the == and + // != operators. + // + // Per the spec: + // "The comparison operators == and != must be fully defined for operands of + // the key type; thus the key type must not be a function, map, or slice." + // + // Example: + // var x map[T]int + // + // type T []int + IncomparableMapKey + + // InvalidIfaceEmbed occurs when a non-interface type is embedded in an + // interface. + // + // Example: + // type T struct {} + // + // func (T) m() + // + // type I interface { + // T + // } + InvalidIfaceEmbed + + // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T, + // and T itself is itself a pointer, an unsafe.Pointer, or an interface. + // + // Per the spec: + // "An embedded field must be specified as a type name T or as a pointer to + // a non-interface type name *T, and T itself may not be a pointer type." + // + // Example: + // type T *int + // + // type S struct { + // *T + // } + InvalidPtrEmbed + + /* decls > func and method */ + + // BadRecv occurs when a method declaration does not have exactly one + // receiver parameter. + // + // Example: + // func () _() {} + BadRecv + + // InvalidRecv occurs when a receiver type expression is not of the form T + // or *T, or T is a pointer type. + // + // Example: + // type T struct {} + // + // func (**T) m() {} + InvalidRecv + + // DuplicateFieldAndMethod occurs when an identifier appears as both a field + // and method name. + // + // Example: + // type T struct { + // m int + // } + // + // func (T) m() {} + DuplicateFieldAndMethod + + // DuplicateMethod occurs when two methods on the same receiver type have + // the same name. + // + // Example: + // type T struct {} + // func (T) m() {} + // func (T) m(i int) int { return i } + DuplicateMethod + + /* decls > special */ + + // InvalidBlank occurs when a blank identifier is used as a value or type. + // + // Per the spec: + // "The blank identifier may appear as an operand only on the left-hand side + // of an assignment." + // + // Example: + // var x = _ + InvalidBlank + + // InvalidIota occurs when the predeclared identifier iota is used outside + // of a constant declaration. + // + // Example: + // var x = iota + InvalidIota + + // MissingInitBody occurs when an init function is missing its body. + // + // Example: + // func init() + MissingInitBody + + // InvalidInitSig occurs when an init function declares parameters or + // results. + // + // Example: + // func init() int { return 1 } + InvalidInitSig + + // InvalidInitDecl occurs when init is declared as anything other than a + // function. + // + // Example: + // var init = 1 + InvalidInitDecl + + // InvalidMainDecl occurs when main is declared as anything other than a + // function, in a main package. + InvalidMainDecl + + /* exprs */ + + // TooManyValues occurs when a function returns too many values for the + // expression context in which it is used. + // + // Example: + // func ReturnTwo() (int, int) { + // return 1, 2 + // } + // + // var x = ReturnTwo() + TooManyValues + + // NotAnExpr occurs when a type expression is used where a value expression + // is expected. + // + // Example: + // type T struct {} + // + // func f() { + // T + // } + NotAnExpr + + /* exprs > const */ + + // TruncatedFloat occurs when a float constant is truncated to an integer + // value. + // + // Example: + // var _ int = 98.6 + TruncatedFloat + + // NumericOverflow occurs when a numeric constant overflows its target type. + // + // Example: + // var x int8 = 1000 + NumericOverflow + + /* exprs > operation */ + + // UndefinedOp occurs when an operator is not defined for the type(s) used + // in an operation. + // + // Example: + // var c = "a" - "b" + UndefinedOp + + // MismatchedTypes occurs when operand types are incompatible in a binary + // operation. + // + // Example: + // var a = "hello" + // var b = 1 + // var c = a - b + MismatchedTypes + + // DivByZero occurs when a division operation is provable at compile + // time to be a division by zero. + // + // Example: + // const divisor = 0 + // var x int = 1/divisor + DivByZero + + // NonNumericIncDec occurs when an increment or decrement operator is + // applied to a non-numeric value. + // + // Example: + // func f() { + // var c = "c" + // c++ + // } + NonNumericIncDec + + /* exprs > ptr */ + + // UnaddressableOperand occurs when the & operator is applied to an + // unaddressable expression. + // + // Example: + // var x = &1 + UnaddressableOperand + + // InvalidIndirection occurs when a non-pointer value is indirected via the + // '*' operator. + // + // Example: + // var x int + // var y = *x + InvalidIndirection + + /* exprs > [] */ + + // NonIndexableOperand occurs when an index operation is applied to a value + // that cannot be indexed. + // + // Example: + // var x = 1 + // var y = x[1] + NonIndexableOperand + + // InvalidIndex occurs when an index argument is not of integer type, + // negative, or out-of-bounds. + // + // Example: + // var s = [...]int{1,2,3} + // var x = s[5] + // + // Example: + // var s = []int{1,2,3} + // var _ = s[-1] + // + // Example: + // var s = []int{1,2,3} + // var i string + // var _ = s[i] + InvalidIndex + + // SwappedSliceIndices occurs when constant indices in a slice expression + // are decreasing in value. + // + // Example: + // var _ = []int{1,2,3}[2:1] + SwappedSliceIndices + + /* operators > slice */ + + // NonSliceableOperand occurs when a slice operation is applied to a value + // whose type is not sliceable, or is unaddressable. + // + // Example: + // var x = [...]int{1, 2, 3}[:1] + // + // Example: + // var x = 1 + // var y = 1[:1] + NonSliceableOperand + + // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is + // applied to a string. + // + // Example: + // var s = "hello" + // var x = s[1:2:3] + InvalidSliceExpr + + /* exprs > shift */ + + // InvalidShiftCount occurs when the right-hand side of a shift operation is + // either non-integer, negative, or too large. + // + // Example: + // var ( + // x string + // y int = 1 << x + // ) + InvalidShiftCount + + // InvalidShiftOperand occurs when the shifted operand is not an integer. + // + // Example: + // var s = "hello" + // var x = s << 2 + InvalidShiftOperand + + /* exprs > chan */ + + // InvalidReceive occurs when there is a channel receive from a value that + // is either not a channel, or is a send-only channel. + // + // Example: + // func f() { + // var x = 1 + // <-x + // } + InvalidReceive + + // InvalidSend occurs when there is a channel send to a value that is not a + // channel, or is a receive-only channel. + // + // Example: + // func f() { + // var x = 1 + // x <- "hello!" + // } + InvalidSend + + /* exprs > literal */ + + // DuplicateLitKey occurs when an index is duplicated in a slice, array, or + // map literal. + // + // Example: + // var _ = []int{0:1, 0:2} + // + // Example: + // var _ = map[string]int{"a": 1, "a": 2} + DuplicateLitKey + + // MissingLitKey occurs when a map literal is missing a key expression. + // + // Example: + // var _ = map[string]int{1} + MissingLitKey + + // InvalidLitIndex occurs when the key in a key-value element of a slice or + // array literal is not an integer constant. + // + // Example: + // var i = 0 + // var x = []string{i: "world"} + InvalidLitIndex + + // OversizeArrayLit occurs when an array literal exceeds its length. + // + // Example: + // var _ = [2]int{1,2,3} + OversizeArrayLit + + // MixedStructLit occurs when a struct literal contains a mix of positional + // and named elements. + // + // Example: + // var _ = struct{i, j int}{i: 1, 2} + MixedStructLit + + // InvalidStructLit occurs when a positional struct literal has an incorrect + // number of values. + // + // Example: + // var _ = struct{i, j int}{1,2,3} + InvalidStructLit + + // MissingLitField occurs when a struct literal refers to a field that does + // not exist on the struct type. + // + // Example: + // var _ = struct{i int}{j: 2} + MissingLitField + + // DuplicateLitField occurs when a struct literal contains duplicated + // fields. + // + // Example: + // var _ = struct{i int}{i: 1, i: 2} + DuplicateLitField + + // UnexportedLitField occurs when a positional struct literal implicitly + // assigns an unexported field of an imported type. + UnexportedLitField + + // InvalidLitField occurs when a field name is not a valid identifier. + // + // Example: + // var _ = struct{i int}{1: 1} + InvalidLitField + + // UntypedLit occurs when a composite literal omits a required type + // identifier. + // + // Example: + // type outer struct{ + // inner struct { i int } + // } + // + // var _ = outer{inner: {1}} + UntypedLit + + // InvalidLit occurs when a composite literal expression does not match its + // type. + // + // Example: + // type P *struct{ + // x int + // } + // var _ = P {} + InvalidLit + + /* exprs > selector */ + + // AmbiguousSelector occurs when a selector is ambiguous. + // + // Example: + // type E1 struct { i int } + // type E2 struct { i int } + // type T struct { E1; E2 } + // + // var x T + // var _ = x.i + AmbiguousSelector + + // UndeclaredImportedName occurs when a package-qualified identifier is + // undeclared by the imported package. + // + // Example: + // import "go/types" + // + // var _ = types.NotAnActualIdentifier + UndeclaredImportedName + + // UnexportedName occurs when a selector refers to an unexported identifier + // of an imported package. + // + // Example: + // import "reflect" + // + // type _ reflect.flag + UnexportedName + + // UndeclaredName occurs when an identifier is not declared in the current + // scope. + // + // Example: + // var x T + UndeclaredName + + // MissingFieldOrMethod occurs when a selector references a field or method + // that does not exist. + // + // Example: + // type T struct {} + // + // var x = T{}.f + MissingFieldOrMethod + + /* exprs > ... */ + + // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is + // not valid. + // + // Example: + // var _ = map[int][...]int{0: {}} + BadDotDotDotSyntax + + // NonVariadicDotDotDot occurs when a "..." is used on the final argument to + // a non-variadic function. + // + // Example: + // func printArgs(s []string) { + // for _, a := range s { + // println(a) + // } + // } + // + // func f() { + // s := []string{"a", "b", "c"} + // printArgs(s...) + // } + NonVariadicDotDotDot + + // MisplacedDotDotDot occurs when a "..." is used somewhere other than the + // final argument to a function call. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := []int{1,2,3} + // printArgs(0, a...) + // } + MisplacedDotDotDot + + // InvalidDotDotDotOperand occurs when a "..." operator is applied to a + // single-valued operand. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := 1 + // printArgs(a...) + // } + // + // Example: + // func args() (int, int) { + // return 1, 2 + // } + // + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func g() { + // printArgs(args()...) + // } + InvalidDotDotDotOperand + + // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in + // function. + // + // Example: + // var s = []int{1, 2, 3} + // var l = len(s...) + InvalidDotDotDot + + /* exprs > built-in */ + + // UncalledBuiltin occurs when a built-in function is used as a + // function-valued expression, instead of being called. + // + // Per the spec: + // "The built-in functions do not have standard Go types, so they can only + // appear in call expressions; they cannot be used as function values." + // + // Example: + // var _ = copy + UncalledBuiltin + + // InvalidAppend occurs when append is called with a first argument that is + // not a slice. + // + // Example: + // var _ = append(1, 2) + InvalidAppend + + // InvalidCap occurs when an argument to the cap built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = cap(s) + InvalidCap + + // InvalidClose occurs when close(...) is called with an argument that is + // not of channel type, or that is a receive-only channel. + // + // Example: + // func f() { + // var x int + // close(x) + // } + InvalidClose + + // InvalidCopy occurs when the arguments are not of slice type or do not + // have compatible type. + // + // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // information on the type requirements for the copy built-in. + // + // Example: + // func f() { + // var x []int + // y := []int64{1,2,3} + // copy(x, y) + // } + InvalidCopy + + // InvalidComplex occurs when the complex built-in function is called with + // arguments with incompatible types. + // + // Example: + // var _ = complex(float32(1), float64(2)) + InvalidComplex + + // InvalidDelete occurs when the delete built-in function is called with a + // first argument that is not a map. + // + // Example: + // func f() { + // m := "hello" + // delete(m, "e") + // } + InvalidDelete + + // InvalidImag occurs when the imag built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = imag(int(1)) + InvalidImag + + // InvalidLen occurs when an argument to the len built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = len(s) + InvalidLen + + // SwappedMakeArgs occurs when make is called with three arguments, and its + // length argument is larger than its capacity argument. + // + // Example: + // var x = make([]int, 3, 2) + SwappedMakeArgs + + // InvalidMake occurs when make is called with an unsupported type argument. + // + // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // information on the types that may be created using make. + // + // Example: + // var x = make(int) + InvalidMake + + // InvalidReal occurs when the real built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = real(int(1)) + InvalidReal + + /* exprs > assertion */ + + // InvalidAssert occurs when a type assertion is applied to a + // value that is not of interface type. + // + // Example: + // var x = 1 + // var _ = x.(float64) + InvalidAssert + + // ImpossibleAssert occurs for a type assertion x.(T) when the value x of + // interface cannot have dynamic type T, due to a missing or mismatching + // method on T. + // + // Example: + // type T int + // + // func (t *T) m() int { return int(*t) } + // + // type I interface { m() int } + // + // var x I + // var _ = x.(T) + ImpossibleAssert + + /* exprs > conversion */ + + // InvalidConversion occurs when the argument type cannot be converted to the + // target. + // + // See https://golang.org/ref/spec#Conversions for the rules of + // convertibility. + // + // Example: + // var x float64 + // var _ = string(x) + InvalidConversion + + // InvalidUntypedConversion occurs when an there is no valid implicit + // conversion from an untyped value satisfying the type constraints of the + // context in which it is used. + // + // Example: + // var _ = 1 + "" + InvalidUntypedConversion + + /* offsetof */ + + // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument + // that is not a selector expression. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Offsetof(x) + BadOffsetofSyntax + + // InvalidOffsetof occurs when unsafe.Offsetof is called with a method + // selector, rather than a field selector, or when the field is embedded via + // a pointer. + // + // Per the spec: + // + // "If f is an embedded field, it must be reachable without pointer + // indirections through fields of the struct. " + // + // Example: + // import "unsafe" + // + // type T struct { f int } + // type S struct { *T } + // var s S + // var _ = unsafe.Offsetof(s.f) + // + // Example: + // import "unsafe" + // + // type S struct{} + // + // func (S) m() {} + // + // var s S + // var _ = unsafe.Offsetof(s.m) + InvalidOffsetof + + /* control flow > scope */ + + // UnusedExpr occurs when a side-effect free expression is used as a + // statement. Such a statement has no effect. + // + // Example: + // func f(i int) { + // i*i + // } + UnusedExpr + + // UnusedVar occurs when a variable is declared but unused. + // + // Example: + // func f() { + // x := 1 + // } + UnusedVar + + // MissingReturn occurs when a function with results is missing a return + // statement. + // + // Example: + // func f() int {} + MissingReturn + + // WrongResultCount occurs when a return statement returns an incorrect + // number of values. + // + // Example: + // func ReturnOne() int { + // return 1, 2 + // } + WrongResultCount + + // OutOfScopeResult occurs when the name of a value implicitly returned by + // an empty return statement is shadowed in a nested scope. + // + // Example: + // func factor(n int) (i int) { + // for i := 2; i < n; i++ { + // if n%i == 0 { + // return + // } + // } + // return 0 + // } + OutOfScopeResult + + /* control flow > if */ + + // InvalidCond occurs when an if condition is not a boolean expression. + // + // Example: + // func checkReturn(i int) { + // if i { + // panic("non-zero return") + // } + // } + InvalidCond + + /* control flow > for */ + + // InvalidPostDecl occurs when there is a declaration in a for-loop post + // statement. + // + // Example: + // func f() { + // for i := 0; i < 10; j := 0 {} + // } + InvalidPostDecl + + // InvalidChanRange occurs when a send-only channel used in a range + // expression. + // + // Example: + // func sum(c chan<- int) { + // s := 0 + // for i := range c { + // s += i + // } + // } + InvalidChanRange + + // InvalidIterVar occurs when two iteration variables are used while ranging + // over a channel. + // + // Example: + // func f(c chan int) { + // for k, v := range c { + // println(k, v) + // } + // } + InvalidIterVar + + // InvalidRangeExpr occurs when the type of a range expression is not array, + // slice, string, map, or channel. + // + // Example: + // func f(i int) { + // for j := range i { + // println(j) + // } + // } + InvalidRangeExpr + + /* control flow > switch */ + + // MisplacedBreak occurs when a break statement is not within a for, switch, + // or select statement of the innermost function definition. + // + // Example: + // func f() { + // break + // } + MisplacedBreak + + // MisplacedContinue occurs when a continue statement is not within a for + // loop of the innermost function definition. + // + // Example: + // func sumeven(n int) int { + // proceed := func() { + // continue + // } + // sum := 0 + // for i := 1; i <= n; i++ { + // if i % 2 != 0 { + // proceed() + // } + // sum += i + // } + // return sum + // } + MisplacedContinue + + // MisplacedFallthrough occurs when a fallthrough statement is not within an + // expression switch. + // + // Example: + // func typename(i interface{}) string { + // switch i.(type) { + // case int64: + // fallthrough + // case int: + // return "int" + // } + // return "unsupported" + // } + MisplacedFallthrough + + // DuplicateCase occurs when a type or expression switch has duplicate + // cases. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // case 1: + // println("One") + // } + // } + DuplicateCase + + // DuplicateDefault occurs when a type or expression switch has multiple + // default clauses. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // default: + // println("One") + // default: + // println("1") + // } + // } + DuplicateDefault + + // BadTypeKeyword occurs when a .(type) expression is used anywhere other + // than a type switch. + // + // Example: + // type I interface { + // m() + // } + // var t I + // var _ = t.(type) + BadTypeKeyword + + // InvalidTypeSwitch occurs when .(type) is used on an expression that is + // not of interface type. + // + // Example: + // func f(i int) { + // switch x := i.(type) {} + // } + InvalidTypeSwitch + + // InvalidExprSwitch occurs when a switch expression is not comparable. + // + // Example: + // func _() { + // var a struct{ _ func() } + // switch a /* ERROR cannot switch on a */ { + // } + // } + InvalidExprSwitch + + /* control flow > select */ + + // InvalidSelectCase occurs when a select case is not a channel send or + // receive. + // + // Example: + // func checkChan(c <-chan int) bool { + // select { + // case c: + // return true + // default: + // return false + // } + // } + InvalidSelectCase + + /* control flow > labels and jumps */ + + // UndeclaredLabel occurs when an undeclared label is jumped to. + // + // Example: + // func f() { + // goto L + // } + UndeclaredLabel + + // DuplicateLabel occurs when a label is declared more than once. + // + // Example: + // func f() int { + // L: + // L: + // return 1 + // } + DuplicateLabel + + // MisplacedLabel occurs when a break or continue label is not on a for, + // switch, or select statement. + // + // Example: + // func f() { + // L: + // a := []int{1,2,3} + // for _, e := range a { + // if e > 10 { + // break L + // } + // println(a) + // } + // } + MisplacedLabel + + // UnusedLabel occurs when a label is declared but not used. + // + // Example: + // func f() { + // L: + // } + UnusedLabel + + // JumpOverDecl occurs when a label jumps over a variable declaration. + // + // Example: + // func f() int { + // goto L + // x := 2 + // L: + // x++ + // return x + // } + JumpOverDecl + + // JumpIntoBlock occurs when a forward jump goes to a label inside a nested + // block. + // + // Example: + // func f(x int) { + // goto L + // if x > 0 { + // L: + // print("inside block") + // } + // } + JumpIntoBlock + + /* control flow > calls */ + + // InvalidMethodExpr occurs when a pointer method is called but the argument + // is not addressable. + // + // Example: + // type T struct {} + // + // func (*T) m() int { return 1 } + // + // var _ = T.m(T{}) + InvalidMethodExpr + + // WrongArgCount occurs when too few or too many arguments are passed by a + // function call. + // + // Example: + // func f(i int) {} + // var x = f() + WrongArgCount + + // InvalidCall occurs when an expression is called that is not of function + // type. + // + // Example: + // var x = "x" + // var y = x() + InvalidCall + + /* control flow > suspended */ + + // UnusedResults occurs when a restricted expression-only built-in function + // is suspended via go or defer. Such a suspension discards the results of + // these side-effect free built-in functions, and therefore is ineffectual. + // + // Example: + // func f(a []int) int { + // defer len(a) + // return i + // } + UnusedResults + + // InvalidDefer occurs when a deferred expression is not a function call, + // for example if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // defer int32(i) + // return i + // } + InvalidDefer + + // InvalidGo occurs when a go expression is not a function call, for example + // if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // go int32(i) + // return i + // } + InvalidGo +) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go new file mode 100644 index 000000000..3e5842a5f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -0,0 +1,153 @@ +// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT. + +package typesinternal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Test-1] + _ = x[BlankPkgName-2] + _ = x[MismatchedPkgName-3] + _ = x[InvalidPkgUse-4] + _ = x[BadImportPath-5] + _ = x[BrokenImport-6] + _ = x[ImportCRenamed-7] + _ = x[UnusedImport-8] + _ = x[InvalidInitCycle-9] + _ = x[DuplicateDecl-10] + _ = x[InvalidDeclCycle-11] + _ = x[InvalidTypeCycle-12] + _ = x[InvalidConstInit-13] + _ = x[InvalidConstVal-14] + _ = x[InvalidConstType-15] + _ = x[UntypedNil-16] + _ = x[WrongAssignCount-17] + _ = x[UnassignableOperand-18] + _ = x[NoNewVar-19] + _ = x[MultiValAssignOp-20] + _ = x[InvalidIfaceAssign-21] + _ = x[InvalidChanAssign-22] + _ = x[IncompatibleAssign-23] + _ = x[UnaddressableFieldAssign-24] + _ = x[NotAType-25] + _ = x[InvalidArrayLen-26] + _ = x[BlankIfaceMethod-27] + _ = x[IncomparableMapKey-28] + _ = x[InvalidIfaceEmbed-29] + _ = x[InvalidPtrEmbed-30] + _ = x[BadRecv-31] + _ = x[InvalidRecv-32] + _ = x[DuplicateFieldAndMethod-33] + _ = x[DuplicateMethod-34] + _ = x[InvalidBlank-35] + _ = x[InvalidIota-36] + _ = x[MissingInitBody-37] + _ = x[InvalidInitSig-38] + _ = x[InvalidInitDecl-39] + _ = x[InvalidMainDecl-40] + _ = x[TooManyValues-41] + _ = x[NotAnExpr-42] + _ = x[TruncatedFloat-43] + _ = x[NumericOverflow-44] + _ = x[UndefinedOp-45] + _ = x[MismatchedTypes-46] + _ = x[DivByZero-47] + _ = x[NonNumericIncDec-48] + _ = x[UnaddressableOperand-49] + _ = x[InvalidIndirection-50] + _ = x[NonIndexableOperand-51] + _ = x[InvalidIndex-52] + _ = x[SwappedSliceIndices-53] + _ = x[NonSliceableOperand-54] + _ = x[InvalidSliceExpr-55] + _ = x[InvalidShiftCount-56] + _ = x[InvalidShiftOperand-57] + _ = x[InvalidReceive-58] + _ = x[InvalidSend-59] + _ = x[DuplicateLitKey-60] + _ = x[MissingLitKey-61] + _ = x[InvalidLitIndex-62] + _ = x[OversizeArrayLit-63] + _ = x[MixedStructLit-64] + _ = x[InvalidStructLit-65] + _ = x[MissingLitField-66] + _ = x[DuplicateLitField-67] + _ = x[UnexportedLitField-68] + _ = x[InvalidLitField-69] + _ = x[UntypedLit-70] + _ = x[InvalidLit-71] + _ = x[AmbiguousSelector-72] + _ = x[UndeclaredImportedName-73] + _ = x[UnexportedName-74] + _ = x[UndeclaredName-75] + _ = x[MissingFieldOrMethod-76] + _ = x[BadDotDotDotSyntax-77] + _ = x[NonVariadicDotDotDot-78] + _ = x[MisplacedDotDotDot-79] + _ = x[InvalidDotDotDotOperand-80] + _ = x[InvalidDotDotDot-81] + _ = x[UncalledBuiltin-82] + _ = x[InvalidAppend-83] + _ = x[InvalidCap-84] + _ = x[InvalidClose-85] + _ = x[InvalidCopy-86] + _ = x[InvalidComplex-87] + _ = x[InvalidDelete-88] + _ = x[InvalidImag-89] + _ = x[InvalidLen-90] + _ = x[SwappedMakeArgs-91] + _ = x[InvalidMake-92] + _ = x[InvalidReal-93] + _ = x[InvalidAssert-94] + _ = x[ImpossibleAssert-95] + _ = x[InvalidConversion-96] + _ = x[InvalidUntypedConversion-97] + _ = x[BadOffsetofSyntax-98] + _ = x[InvalidOffsetof-99] + _ = x[UnusedExpr-100] + _ = x[UnusedVar-101] + _ = x[MissingReturn-102] + _ = x[WrongResultCount-103] + _ = x[OutOfScopeResult-104] + _ = x[InvalidCond-105] + _ = x[InvalidPostDecl-106] + _ = x[InvalidChanRange-107] + _ = x[InvalidIterVar-108] + _ = x[InvalidRangeExpr-109] + _ = x[MisplacedBreak-110] + _ = x[MisplacedContinue-111] + _ = x[MisplacedFallthrough-112] + _ = x[DuplicateCase-113] + _ = x[DuplicateDefault-114] + _ = x[BadTypeKeyword-115] + _ = x[InvalidTypeSwitch-116] + _ = x[InvalidExprSwitch-117] + _ = x[InvalidSelectCase-118] + _ = x[UndeclaredLabel-119] + _ = x[DuplicateLabel-120] + _ = x[MisplacedLabel-121] + _ = x[UnusedLabel-122] + _ = x[JumpOverDecl-123] + _ = x[JumpIntoBlock-124] + _ = x[InvalidMethodExpr-125] + _ = x[WrongArgCount-126] + _ = x[InvalidCall-127] + _ = x[UnusedResults-128] + _ = x[InvalidDefer-129] + _ = x[InvalidGo-130] +} + +const _ErrorCode_name = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGo" + +var _ErrorCode_index = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 215, 231, 250, 258, 274, 292, 309, 327, 351, 359, 374, 390, 408, 425, 440, 447, 458, 481, 496, 508, 519, 534, 548, 563, 578, 591, 600, 614, 629, 640, 655, 664, 680, 700, 718, 737, 749, 768, 787, 803, 820, 839, 853, 864, 879, 892, 907, 923, 937, 953, 968, 985, 1003, 1018, 1028, 1038, 1055, 1077, 1091, 1105, 1125, 1143, 1163, 1181, 1204, 1220, 1235, 1248, 1258, 1270, 1281, 1295, 1308, 1319, 1329, 1344, 1355, 1366, 1379, 1395, 1412, 1436, 1453, 1468, 1478, 1487, 1500, 1516, 1532, 1543, 1558, 1574, 1588, 1604, 1618, 1635, 1655, 1668, 1684, 1698, 1715, 1732, 1749, 1764, 1778, 1792, 1803, 1815, 1828, 1845, 1858, 1869, 1882, 1894, 1903} + +func (i ErrorCode) String() string { + i -= 1 + if i < 0 || i >= ErrorCode(len(_ErrorCode_index)-1) { + return "ErrorCode(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _ErrorCode_name[_ErrorCode_index[i]:_ErrorCode_index[i+1]] +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go new file mode 100644 index 000000000..c3e1a397d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -0,0 +1,45 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typesinternal provides access to internal go/types APIs that are not +// yet exported. +package typesinternal + +import ( + "go/token" + "go/types" + "reflect" + "unsafe" +) + +func SetUsesCgo(conf *types.Config) bool { + v := reflect.ValueOf(conf).Elem() + + f := v.FieldByName("go115UsesCgo") + if !f.IsValid() { + f = v.FieldByName("UsesCgo") + if !f.IsValid() { + return false + } + } + + addr := unsafe.Pointer(f.UnsafeAddr()) + *(*bool)(addr) = true + + return true +} + +func ReadGo116ErrorData(terr types.Error) (ErrorCode, token.Pos, token.Pos, bool) { + var data [3]int + // By coincidence all of these fields are ints, which simplifies things. + v := reflect.ValueOf(terr) + for i, name := range []string{"go116code", "go116start", "go116end"} { + f := v.FieldByName(name) + if !f.IsValid() { + return 0, 0, 0, false + } + data[i] = int(f.Int()) + } + return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true +} diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 000000000..f34a38e4e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,201 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/rpc/status.proto + +package status + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 000000000..9d4213ebc --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 000000000..cd03f8c76 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,61 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things at + a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. If your contribution introduces new dependencies which are NOT + in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a + discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line + to address an issue. PRs with irrelevant changes won't be merged. If you do + want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 weeks + of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs + with messy commit history are difficult to review and won't be merged. Use + `rebase -i upstream/master` to curate your commit history and/or to bring in + latest changes from master (but avoid rebasing in the middle of a code + review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we + can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** before creating your PR to catch breakages + early on. + - `make all` to test everything, OR + - `make vet` to catch vet errors + - `make test` to run the tests + - `make testrace` to run tests in race mode + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 000000000..d6ff26747 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 000000000..093c82b3a --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,27 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) +- [canguler](https://github.com/canguler), Google LLC +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 000000000..1f0722f16 --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,48 @@ +all: vet test testrace + +build: + go build google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +deps: + GO111MODULE=on go get -d -v google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testsubmodule: + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... + +testrace: + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... + +vet: vetdeps + ./vet.sh + +vetdeps: + ./vet.sh -install + +.PHONY: \ + all \ + build \ + clean \ + proto \ + test \ + testappengine \ + testappenginedeps \ + testrace \ + vet \ + vetdeps diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 000000000..0e6ae69a5 --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,141 @@ +# gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) + +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. + +## Prerequisites + +- **[Go][]**: any one of the **three latest major** [releases][go-releases]. + +## Installation + +With [Go module][] support (Go 1.11+), simply add the following import + +```go +import "google.golang.org/grpc" +``` + +to your code, and then `go [build|run|test]` will automatically fetch the +necessary dependencies. + +Otherwise, to install the `grpc-go` package, run the following command: + +```console +$ go get -u google.golang.org/grpc +``` + +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. + +## Learn more + +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) + +## FAQ + +### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +```console +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- Without Go module support: `git clone` the repo manually: + + ```sh + git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc + ``` + + You will need to do the same for all of grpc's dependencies in `golang.org`, + e.g. `golang.org/x/net`. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ```sh + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + +### Compiling error, undefined: grpc.SupportPackageIsVersion + +#### If you are using Go modules: + +Ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files. For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: + +```go +module + +require ( + google.golang.org/grpc v1.27.0 +) +``` + +#### If you are *not* using Go modules: + +Update the `proto` package, gRPC package, and rebuild the `.proto` files: + +```sh +go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +go get -u google.golang.org/grpc +protoc --go_out=plugins=grpc:. *.proto +``` + +### How to turn on logging + +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have configured + your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 000000000..be6e10870 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 000000000..3220d87be --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package attributes + +import "fmt" + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. +type Attributes struct { + m map[interface{}]interface{} +} + +// New returns a new Attributes containing all key/value pairs in kvs. If the +// same key appears multiple times, the last value overwrites all previous +// values for that key. Panics if len(kvs) is not even. +func New(kvs ...interface{}) *Attributes { + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} + for i := 0; i < len(kvs)/2; i++ { + a.m[kvs[i*2]] = kvs[i*2+1] + } + return a +} + +// WithValues returns a new Attributes containing all key/value pairs in a and +// kvs. Panics if len(kvs) is not even. If the same key appears multiple +// times, the last value overwrites all previous values for that key. To +// remove an existing key, use a nil value. +func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { + if a == nil { + return New(kvs...) + } + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + for k, v := range a.m { + n.m[k] = v + } + for i := 0; i < len(kvs)/2; i++ { + n.m[kvs[i*2]] = kvs[i*2+1] + } + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. +func (a *Attributes) Value(key interface{}) interface{} { + if a == nil { + return nil + } + return a.m[key] +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 000000000..542594f5c --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" + + "google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 000000000..0787d0b50 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 000000000..ab531f4c0 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,388 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +func init() { + internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: This method is now part of the ClientConn interface and will + // eventually be removed from here. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses(SubConn, []resolver.Address) + + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. + UpdateState(State) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOptions) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle that the Balancer can use. + CredsBundle credentials.Bundle + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) + // ChannelzParentID is the entity parent's channelz unique identification number. + ChannelzParentID int64 + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string + // Target contains the parsed address info of the dial target. It is the same resolver.Target as + // passed to the resolver. + // See the documentation for the resolver.Target type for details about what it contains. + Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v1.LoadReport. + ServerLoad interface{} +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + // + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) +} + +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. + // + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. + Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 000000000..c883efa0b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,270 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +var logger = grpclog.Component("balancer") + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: make(map[resolver.Address]subConnInfo), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + config: bb.config, + } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + return bal +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type subConnInfo struct { + subConn balancer.SubConn + attrs *attributes.Attributes +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses) + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // TODO: handle s.ResolverState.ServiceConfig? + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range s.ResolverState.Addresses { + // Strip attributes from addresses before using them as map keys. So + // that when two addresses only differ in attributes pointers (but with + // the same attribute content), they are considered the same address. + // + // Note that this doesn't handle the case where the attribute content is + // different. So if users want to set different attributes to create + // duplicate connections to the same backend, it doesn't work. This is + // fine for now, because duplicate is done by setting Metadata today. + // + // TODO: read attributes to handle duplicate connections. + aNoAttrs := a + aNoAttrs.Attributes = nil + addrsSet[aNoAttrs] = struct{}{} + if scInfo, ok := b.subConns[aNoAttrs]; !ok { + // a is a new address (not existing in b.subConns). + // + // When creating SubConn, the original address with attributes is + // passed through. So that connection configurations in attributes + // (like creds) will be used. + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} + b.scStates[sc] = connectivity.Idle + sc.Connect() + } else { + // Always update the subconn's address in case the attributes + // changed. + // + // The SubConn does a reflect.DeepEqual of the new and old + // addresses. So this is a noop if the current address is the same + // as the old one (including attributes). + scInfo.attrs = a.Attributes + b.subConns[aNoAttrs] = scInfo + b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a}) + } + } + for a, scInfo := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(scInfo.subConn) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in UpdateSubConnState. + } + } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(b.mergeErrors()) + return + } + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for addr, scInfo := range b.subConns { + if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready { + addr.Attributes = scInfo.attrs + readySCs[scInfo.subConn] = SubConnInfo{Address: addr} + } + } + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) +} + +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + if oldS == connectivity.TransientFailure && s == connectivity.Connecting { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + b.state == connectivity.TransientFailure { + b.regeneratePicker() + } + + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// NewErrPicker returns a Picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 000000000..e31d76e33 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 000000000..a24264a34 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValues(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 000000000..43c2a1537 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +var logger = grpclog.Component("roundrobin") + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: newPicker called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + var scs []balancer.SubConn + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: grpcrand.Intn(len(scs)), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return balancer.PickResult{SubConn: sc}, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 000000000..dd8397963 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,267 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancerMu sync.Mutex // synchronizes calls to the balancer + balancer balancer.Balancer + updateCh *buffer.Unbounded + closed *grpcsync.Event + done *grpcsync.Event + + mu sync.Mutex + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequentially, so the balancer can be implemented +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.updateCh.Get(): + ccb.updateCh.Load() + if ccb.closed.HasFired() { + break + } + switch u := t.(type) { + case *scStateUpdate: + ccb.balancerMu.Lock() + ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) + ccb.balancerMu.Unlock() + case *acBalancerWrapper: + ccb.mu.Lock() + if ccb.subConns != nil { + delete(ccb.subConns, u) + ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) + } + ccb.mu.Unlock() + default: + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + } + case <-ccb.closed.Done(): + } + + if ccb.closed.HasFired() { + ccb.balancerMu.Lock() + ccb.balancer.Close() + ccb.balancerMu.Unlock() + ccb.mu.Lock() + scs := ccb.subConns + ccb.subConns = nil + ccb.mu.Unlock() + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + ccb.done.Fire() + // Fire done before removing the addr conns. We can safely unblock + // ccb.close and allow the removeAddrConns to happen + // asynchronously. + for acbw := range scs { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + return + } + } +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.updateCh.Put(&scStateUpdate{ + sc: sc, + state: s, + err: err, + }) +} + +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() + return ccb.balancer.UpdateClientConnState(*ccs) +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.balancerMu.Lock() + ccb.balancer.ResolverError(err) + ccb.balancerMu.Unlock() +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock + // during switchBalancer() if the old balancer calls RemoveSubConn() in its + // Close(). + ccb.updateCh.Put(sc) +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + opts := acbw.ac.scopts + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs, opts) + if err != nil { + channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 000000000..ed75290cd --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,1187 @@ +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/binlog/v1/binarylog.proto + +package grpc_binarylog_v1 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +// Enum value maps for GrpcLogEntry_EventType. +var ( + GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", + } + GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, + } +) + +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { + p := new(GrpcLogEntry_EventType) + *p = x + return p +} + +func (x GrpcLogEntry_EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +// Enum value maps for GrpcLogEntry_Logger. +var ( + GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) + +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { + p := new(GrpcLogEntry_Logger) + *p = x + return p +} + +func (x GrpcLogEntry_Logger) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +// Enum value maps for Address_Type. +var ( + Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) + +func (x Address_Type) Enum() *Address_Type { + p := new(Address_Type) + *p = x + return p +} + +func (x Address_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Address_Type.Descriptor instead. +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timestamp of the binary log message + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are assignable to Payload: + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` +} + +func (x *GrpcLogEntry) Reset() { + *x = GrpcLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *GrpcLogEntry) GetCallId() uint64 { + if x != nil { + return x.CallId + } + return 0 +} + +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if x != nil { + return x.SequenceIdWithinCall + } + return 0 +} + +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if x != nil { + return x.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if x != nil { + return x.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (x *GrpcLogEntry) GetMessage() *Message { + if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (x *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (x *GrpcLogEntry) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated + } + return false +} + +func (x *GrpcLogEntry) GetPeer() *Address { + if x != nil { + return x.Peer + } + return nil +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +type ClientHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identitiy. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *ClientHeader) Reset() { + *x = ClientHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. +func (*ClientHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *ClientHeader) GetMethodName() string { + if x != nil { + return x.MethodName + } + return "" +} + +func (x *ClientHeader) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ClientHeader) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +type ServerHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ServerHeader) Reset() { + *x = ServerHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. +func (*ServerHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type Trailer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` +} + +func (x *Trailer) Reset() { + *x = Trailer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trailer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. +func (*Trailer) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} +} + +func (x *Trailer) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Trailer) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode + } + return 0 +} + +func (x *Trailer) GetStatusMessage() string { + if x != nil { + return x.StatusMessage + } + return "" +} + +func (x *Trailer) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} +} + +func (x *Message) GetLength() uint32 { + if x != nil { + return x.Length + } + return 0 +} + +func (x *Message) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} +} + +func (x *Metadata) GetEntry() []*MetadataEntry { + if x != nil { + return x.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MetadataEntry) Reset() { + *x = MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} +} + +func (x *MetadataEntry) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Address information +type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` +} + +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} +} + +func (x *Address) GetType() Address_Type { + if x != nil { + return x.Type + } + return Address_TYPE_UNKNOWN +} + +func (x *Address) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort + } + return 0 +} + +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, + 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once + file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { + file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + }) + return file_grpc_binlog_v1_binarylog_proto_rawDescData +} + +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ + 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType + 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger + 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader + 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader + 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message + 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer + 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address + 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration + 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata + 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry + 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { + if File_grpc_binlog_v1_binarylog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, + DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, + EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, + MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, + }.Build() + File_grpc_binlog_v1_binarylog_proto = out.File + file_grpc_binlog_v1_binarylog_proto_rawDesc = nil + file_grpc_binlog_v1_binarylog_proto_goTypes = nil + file_grpc_binlog_v1_binarylog_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 000000000..9e20e4d38 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 000000000..5cef39295 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1601 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., OAuth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +type defaultConfigSelector struct { + sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + firstResolveEvent: grpcsync.NewEvent(), + } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + defer func() { + if err != nil { + cc.Close() + } + }() + + if channelz.IsOn() { + if cc.dopts.channelzParentID != 0 { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), + Severity: channelz.CtInfo, + }, + }) + } else { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) + channelz.Info(logger, cc.channelzID, "Channel Created") + } + cc.csMgr.channelzID = cc.channelzID + } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + } else { + if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } + default: + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + scSet = true + } + default: + } + } + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.DefaultExponential + } + + // Determine the resolver to use. + cc.parsedTarget = grpcutil.ParseTarget(cc.target, cc.dopts.copts.Dialer != nil) + channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) + resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + // If resolver builder is still nil, the parsed target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original target. + channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) + } + } + + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.authority != "" { + cc.authority = cc.dopts.authority + } else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") { + cc.authority = "localhost" + } else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") { + cc.authority = "localhost" + cc.parsedTarget.Endpoint + } else { + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint + } + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + } + + // Build the resolver. + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + cc.mu.Lock() + cc.resolverWrapper = rWrapper + cc.mu.Unlock() + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID int64 +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager + + balancerBuildOpts balancer.BuildOptions + blockingpicker *pickerWrapper + + safeConfigSelector iresolver.SafeConfigSelector + + mu sync.RWMutex + resolverWrapper *ccResolverWrapper + sc *ServiceConfig + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters + curBalancerName string + balancerWrapper *ccBalancerWrapper + retryThrottler atomic.Value + + firstResolveEvent *grpcsync.Event + + channelzID int64 // channelz unique identification number + czData *channelzData + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revisit this decision in the future. + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}") + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() + cc.mu.Lock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + + if cc.balancerWrapper != nil { + cc.balancerWrapper.resolverError(err) + } + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState + } + + var ret error + if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + } + } else { + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) + } else { + ret = balancer.ErrBadResolverState + if cc.balancerWrapper == nil { + var err error + if s.ServiceConfig.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) + } + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) + cc.mu.Unlock() + return ret + } + } + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } + + cbn := cc.curBalancerName + bw := cc.balancerWrapper + cc.mu.Unlock() + if cbn != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + for i := 0; i < len(s.Addresses); { + if s.Addresses[i].Type == resolver.GRPCLB { + copy(s.Addresses[i:], s.Addresses[i+1:]) + s.Addresses = s.Addresses[:len(s.Addresses)-1] + continue + } + i++ + } + } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret +} + +// switchBalancer starts the switching from current balancer to the balancer +// with the given name. +// +// It will NOT send the current address list to the new balancer. If needed, +// caller of this function should send address list to the new balancer after +// this function returns. +// +// Caller must hold cc.mu. +func (cc *ClientConn) switchBalancer(name string) { + if strings.EqualFold(cc.curBalancerName, name) { + return + } + + channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) + if cc.dopts.balancerBuilder != nil { + channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") + return + } + if cc.balancerWrapper != nil { + // Don't hold cc.mu while closing the balancers. The balancers may call + // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex + // would cause a deadlock in that case. + cc.mu.Unlock() + cc.balancerWrapper.close() + cc.mu.Lock() + } + + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) + channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + } + + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s, err) + cc.mu.Unlock() +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, + addrs: addrs, + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + if channelz.IsOn() { + ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel Created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), + Severity: channelz.CtInfo, + }, + }) + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + // Start a goroutine connecting to the server asynchronously. + go ac.resetTransport() + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + ac.addrs = addrs + return true + } + + if ac.state == connectivity.Connecting { + return false + } + + // ac.state is Ready, try to find the connected address. + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + return getMethodConfig(cc.sc, method) +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { + if sc == nil { + // should never reach here. + return + } + cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.curBalancerName = cc.dopts.balancerBuilder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil + cc.mu.Unlock() + + cc.blockingpicker.close() + + if bWrapper != nil { + bWrapper.close() + } + if rWrapper != nil { + rWrapper.close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + if channelz.IsOn() { + ted := &channelz.TraceEventDesc{ + Desc: "Channel Deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != 0 { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(cc.channelzID) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw balancer.SubConn + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelzID int64 // channelz unique identification number. + czData *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { + if ac.state == s { + return + } + ac.state = s + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +func (ac *addrConn) resetTransport() { + for i := 0; ; i++ { + if i > 0 { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.transport = nil + ac.mu.Unlock() + + newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) + if err != nil { + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return + } + continue + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + newTr.Close(fmt.Errorf("reached connectivity state: SHUTDOWN")) + return + } + ac.curAddr = addr + ac.transport = newTr + ac.backoffIdx = 0 + + hctx, hcancel := context.WithCancel(ac.ctx) + ac.startHealthCheck(hctx) + ac.mu.Unlock() + + // Block until the created transport is down. And when this happens, + // we restart from the top of the addr list. + <-reconnect.Done() + hcancel() + // restart connecting - the top of the loop will set state to + // CONNECTING. This is against the current connectivity semantics doc, + // however it allows for graceful behavior for RPCs not yet dispatched + // - unfortunate timing would otherwise lead to the RPC failing even + // though the TRANSIENT_FAILURE state (called for by the doc) would be + // instantaneous. + // + // Ideally we should transition to Idle here and block until there is + // RPC activity that leads to the balancer requesting a reconnect of + // the associated SubConn. + } +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at the +// first successful one. It returns the transport, the address and a Event in +// the successful case. The Event fires when the returned transport disconnects. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { + var firstConnErr error + for _, addr := range addrs { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return nil, resolver.Address{}, nil, errConnClosing + } + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + + newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + if err == nil { + return newTr, addr, reconnect, nil + } + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.updateConnectionError(err) + } + + // Couldn't connect to any address. + return nil, resolver.Address{}, nil, firstConnErr +} + +// createTransport creates a connection to addr. It returns the transport and a +// Event in the successful case. The Event fires when the returned transport +// disconnects. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { + prefaceReceived := make(chan struct{}) + onCloseCalled := make(chan struct{}) + reconnect := grpcsync.NewEvent() + + // addr.ServerName takes precedent over ClientConn authority, if present. + if addr.ServerName == "" { + addr.ServerName = ac.cc.authority + } + + once := sync.Once{} + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting, nil) + } + }) + ac.mu.Unlock() + reconnect.Fire() + } + + onClose := func() { + ac.mu.Lock() + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting, nil) + } + }) + ac.mu.Unlock() + close(onCloseCalled) + reconnect.Fire() + } + + onPrefaceReceipt := func() { + close(prefaceReceived) + } + + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + defer cancel() + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) + if err != nil { + // newTr is either nil, or closed. + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) + return nil, nil, err + } + + select { + case <-time.After(time.Until(connectDeadline)): + // We didn't get the preface in time. + newTr.Close(fmt.Errorf("failed to receive server preface within timeout")) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + return nil, nil, errors.New("timed out waiting for server handshake") + case <-prefaceReceived: + // We got the preface - huzzah! things are good. + case <-onCloseCalled: + // The transport has already closed - noop. + return nil, nil, errors.New("connection closed") + // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + } + return newTr, reconnect, nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/health package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready, nil) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (interface{}, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State, lastErr error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s, lastErr) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready && ac.transport != nil { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect() + } + return nil, false +} + +// tearDown starts to tear down the addrConn. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} + if err == errConnDrain && curTr != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. + ac.mu.Unlock() + curTr.GracefulClose() + ac.mu.Lock() + } + if channelz.IsOn() { + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel Deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), + Severity: channelz.CtInfo, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(ac.channelzID) + } + ac.mu.Unlock() +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/LICENSE b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/README.md b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/README.md new file mode 100644 index 000000000..4758125de --- /dev/null +++ b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/README.md @@ -0,0 +1,21 @@ +# protoc-gen-go-grpc + +This tool generates Go language bindings of `service`s in protobuf definition +files for gRPC. For usage information, please see our [quick start +guide](https://grpc.io/docs/languages/go/quickstart/). + +## Future-proofing services + +By default, to register services using the methods generated by this tool, the +service implementations must embed the corresponding +`UnimplementedServer` for future compatibility. This is a behavior +change from the grpc code generator previously included with `protoc-gen-go`. +To restore this behavior, set the option `require_unimplemented_servers=false`. +E.g.: + +``` + protoc --go-grpc_out=require_unimplemented_servers=false[,other options...]:. \ +``` + +Note that this is not recommended, and the option is only provided to restore +backward compatibility with previously-generated code. diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.mod b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.mod new file mode 100644 index 000000000..d0cfd8ebf --- /dev/null +++ b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.mod @@ -0,0 +1,5 @@ +module google.golang.org/grpc/cmd/protoc-gen-go-grpc + +go 1.9 + +require google.golang.org/protobuf v1.23.0 diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.sum b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.sum new file mode 100644 index 000000000..92baf2631 --- /dev/null +++ b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/go.sum @@ -0,0 +1,18 @@ +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/grpc.go b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/grpc.go new file mode 100644 index 000000000..1e787344e --- /dev/null +++ b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/grpc.go @@ -0,0 +1,430 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "fmt" + "strconv" + "strings" + + "google.golang.org/protobuf/compiler/protogen" + + "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + contextPackage = protogen.GoImportPath("context") + grpcPackage = protogen.GoImportPath("google.golang.org/grpc") + codesPackage = protogen.GoImportPath("google.golang.org/grpc/codes") + statusPackage = protogen.GoImportPath("google.golang.org/grpc/status") +) + +// generateFile generates a _grpc.pb.go file containing gRPC service definitions. +func generateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { + if len(file.Services) == 0 { + return nil + } + filename := file.GeneratedFilenamePrefix + "_grpc.pb.go" + g := gen.NewGeneratedFile(filename, file.GoImportPath) + g.P("// Code generated by protoc-gen-go-grpc. DO NOT EDIT.") + g.P() + g.P("package ", file.GoPackageName) + g.P() + generateFileContent(gen, file, g) + return g +} + +// generateFileContent generates the gRPC service definitions, excluding the package statement. +func generateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile) { + if len(file.Services) == 0 { + return + } + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the grpc package it is being compiled against.") + g.P("// Requires gRPC-Go v1.32.0 or later.") + g.P("const _ = ", grpcPackage.Ident("SupportPackageIsVersion7")) // When changing, update version number above. + g.P() + for _, service := range file.Services { + genService(gen, file, g, service) + } +} + +func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) { + clientName := service.GoName + "Client" + + g.P("// ", clientName, " is the client API for ", service.GoName, " service.") + g.P("//") + g.P("// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.") + + // Client interface. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P("//") + g.P(deprecationComment) + } + g.Annotate(clientName, service.Location) + g.P("type ", clientName, " interface {") + for _, method := range service.Methods { + g.Annotate(clientName+"."+method.GoName, method.Location) + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P(method.Comments.Leading, + clientSignature(g, method)) + } + g.P("}") + g.P() + + // Client structure. + g.P("type ", unexport(clientName), " struct {") + g.P("cc ", grpcPackage.Ident("ClientConnInterface")) + g.P("}") + g.P() + + // NewClient factory. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P("func New", clientName, " (cc ", grpcPackage.Ident("ClientConnInterface"), ") ", clientName, " {") + g.P("return &", unexport(clientName), "{cc}") + g.P("}") + g.P() + + var methodIndex, streamIndex int + // Client method implementations. + for _, method := range service.Methods { + if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() { + // Unary RPC method + genClientMethod(gen, file, g, method, methodIndex) + methodIndex++ + } else { + // Streaming RPC method + genClientMethod(gen, file, g, method, streamIndex) + streamIndex++ + } + } + + mustOrShould := "must" + if !*requireUnimplemented { + mustOrShould = "should" + } + + // Server interface. + serverType := service.GoName + "Server" + g.P("// ", serverType, " is the server API for ", service.GoName, " service.") + g.P("// All implementations ", mustOrShould, " embed Unimplemented", serverType) + g.P("// for forward compatibility") + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P("//") + g.P(deprecationComment) + } + g.Annotate(serverType, service.Location) + g.P("type ", serverType, " interface {") + for _, method := range service.Methods { + g.Annotate(serverType+"."+method.GoName, method.Location) + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P(method.Comments.Leading, + serverSignature(g, method)) + } + if *requireUnimplemented { + g.P("mustEmbedUnimplemented", serverType, "()") + } + g.P("}") + g.P() + + // Server Unimplemented struct for forward compatibility. + g.P("// Unimplemented", serverType, " ", mustOrShould, " be embedded to have forward compatible implementations.") + g.P("type Unimplemented", serverType, " struct {") + g.P("}") + g.P() + for _, method := range service.Methods { + nilArg := "" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + nilArg = "nil," + } + g.P("func (Unimplemented", serverType, ") ", serverSignature(g, method), "{") + g.P("return ", nilArg, statusPackage.Ident("Errorf"), "(", codesPackage.Ident("Unimplemented"), `, "method `, method.GoName, ` not implemented")`) + g.P("}") + } + if *requireUnimplemented { + g.P("func (Unimplemented", serverType, ") mustEmbedUnimplemented", serverType, "() {}") + } + g.P() + + // Unsafe Server interface to opt-out of forward compatibility. + g.P("// Unsafe", serverType, " may be embedded to opt out of forward compatibility for this service.") + g.P("// Use of this interface is not recommended, as added methods to ", serverType, " will") + g.P("// result in compilation errors.") + g.P("type Unsafe", serverType, " interface {") + g.P("mustEmbedUnimplemented", serverType, "()") + g.P("}") + + // Server registration. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P(deprecationComment) + } + serviceDescVar := service.GoName + "_ServiceDesc" + g.P("func Register", service.GoName, "Server(s ", grpcPackage.Ident("ServiceRegistrar"), ", srv ", serverType, ") {") + g.P("s.RegisterService(&", serviceDescVar, `, srv)`) + g.P("}") + g.P() + + // Server handler implementations. + var handlerNames []string + for _, method := range service.Methods { + hname := genServerMethod(gen, file, g, method) + handlerNames = append(handlerNames, hname) + } + + // Service descriptor. + g.P("// ", serviceDescVar, " is the ", grpcPackage.Ident("ServiceDesc"), " for ", service.GoName, " service.") + g.P("// It's only intended for direct use with ", grpcPackage.Ident("RegisterService"), ",") + g.P("// and not to be introspected or modified (even as a copy)") + g.P("var ", serviceDescVar, " = ", grpcPackage.Ident("ServiceDesc"), " {") + g.P("ServiceName: ", strconv.Quote(string(service.Desc.FullName())), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPackage.Ident("MethodDesc"), "{") + for i, method := range service.Methods { + if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPackage.Ident("StreamDesc"), "{") + for i, method := range service.Methods { + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.Desc.IsStreamingServer() { + g.P("ServerStreams: true,") + } + if method.Desc.IsStreamingClient() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("Metadata: \"", file.Desc.Path(), "\",") + g.P("}") + g.P() +} + +func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string { + s := method.GoName + "(ctx " + g.QualifiedGoIdent(contextPackage.Ident("Context")) + if !method.Desc.IsStreamingClient() { + s += ", in *" + g.QualifiedGoIdent(method.Input.GoIdent) + } + s += ", opts ..." + g.QualifiedGoIdent(grpcPackage.Ident("CallOption")) + ") (" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + s += "*" + g.QualifiedGoIdent(method.Output.GoIdent) + } else { + s += method.Parent.GoName + "_" + method.GoName + "Client" + } + s += ", error)" + return s +} + +func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, index int) { + service := method.Parent + sname := fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) + + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P("func (c *", unexport(service.GoName), "Client) ", clientSignature(g, method), "{") + if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() { + g.P("out := new(", method.Output.GoIdent, ")") + g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return + } + streamType := unexport(service.GoName) + method.GoName + "Client" + serviceDescVar := service.GoName + "_ServiceDesc" + g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], "`, sname, `", opts...)`) + g.P("if err != nil { return nil, err }") + g.P("x := &", streamType, "{stream}") + if !method.Desc.IsStreamingClient() { + g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + } + g.P("return x, nil") + g.P("}") + g.P() + + genSend := method.Desc.IsStreamingClient() + genRecv := method.Desc.IsStreamingServer() + genCloseAndRecv := !method.Desc.IsStreamingServer() + + // Stream auxiliary types and methods. + g.P("type ", service.GoName, "_", method.GoName, "Client interface {") + if genSend { + g.P("Send(*", method.Input.GoIdent, ") error") + } + if genRecv { + g.P("Recv() (*", method.Output.GoIdent, ", error)") + } + if genCloseAndRecv { + g.P("CloseAndRecv() (*", method.Output.GoIdent, ", error)") + } + g.P(grpcPackage.Ident("ClientStream")) + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPackage.Ident("ClientStream")) + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", method.Input.GoIdent, ") error {") + g.P("return x.ClientStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", method.Output.GoIdent, ", error) {") + g.P("m := new(", method.Output.GoIdent, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + if genCloseAndRecv { + g.P("func (x *", streamType, ") CloseAndRecv() (*", method.Output.GoIdent, ", error) {") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + g.P("m := new(", method.Output.GoIdent, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } +} + +func serverSignature(g *protogen.GeneratedFile, method *protogen.Method) string { + var reqArgs []string + ret := "error" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + reqArgs = append(reqArgs, g.QualifiedGoIdent(contextPackage.Ident("Context"))) + ret = "(*" + g.QualifiedGoIdent(method.Output.GoIdent) + ", error)" + } + if !method.Desc.IsStreamingClient() { + reqArgs = append(reqArgs, "*"+g.QualifiedGoIdent(method.Input.GoIdent)) + } + if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { + reqArgs = append(reqArgs, method.Parent.GoName+"_"+method.GoName+"Server") + } + return method.GoName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method) string { + service := method.Parent + hname := fmt.Sprintf("_%s_%s_Handler", service.GoName, method.GoName) + + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + g.P("func ", hname, "(srv interface{}, ctx ", contextPackage.Ident("Context"), ", dec func(interface{}) error, interceptor ", grpcPackage.Ident("UnaryServerInterceptor"), ") (interface{}, error) {") + g.P("in := new(", method.Input.GoIdent, ")") + g.P("if err := dec(in); err != nil { return nil, err }") + g.P("if interceptor == nil { return srv.(", service.GoName, "Server).", method.GoName, "(ctx, in) }") + g.P("info := &", grpcPackage.Ident("UnaryServerInfo"), "{") + g.P("Server: srv,") + g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name())), ",") + g.P("}") + g.P("handler := func(ctx ", contextPackage.Ident("Context"), ", req interface{}) (interface{}, error) {") + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(ctx, req.(*", method.Input.GoIdent, "))") + g.P("}") + g.P("return interceptor(ctx, in, info, handler)") + g.P("}") + g.P() + return hname + } + streamType := unexport(service.GoName) + method.GoName + "Server" + g.P("func ", hname, "(srv interface{}, stream ", grpcPackage.Ident("ServerStream"), ") error {") + if !method.Desc.IsStreamingClient() { + g.P("m := new(", method.Input.GoIdent, ")") + g.P("if err := stream.RecvMsg(m); err != nil { return err }") + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(m, &", streamType, "{stream})") + } else { + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(&", streamType, "{stream})") + } + g.P("}") + g.P() + + genSend := method.Desc.IsStreamingServer() + genSendAndClose := !method.Desc.IsStreamingServer() + genRecv := method.Desc.IsStreamingClient() + + // Stream auxiliary types and methods. + g.P("type ", service.GoName, "_", method.GoName, "Server interface {") + if genSend { + g.P("Send(*", method.Output.GoIdent, ") error") + } + if genSendAndClose { + g.P("SendAndClose(*", method.Output.GoIdent, ") error") + } + if genRecv { + g.P("Recv() (*", method.Input.GoIdent, ", error)") + } + g.P(grpcPackage.Ident("ServerStream")) + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPackage.Ident("ServerStream")) + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", method.Output.GoIdent, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genSendAndClose { + g.P("func (x *", streamType, ") SendAndClose(m *", method.Output.GoIdent, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", method.Input.GoIdent, ", error) {") + g.P("m := new(", method.Input.GoIdent, ")") + g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + + return hname +} + +const deprecationComment = "// Deprecated: Do not use." + +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } diff --git a/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/main.go b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/main.go new file mode 100644 index 000000000..7f104da7d --- /dev/null +++ b/vendor/google.golang.org/grpc/cmd/protoc-gen-go-grpc/main.go @@ -0,0 +1,68 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// protoc-gen-go-grpc is a plugin for the Google protocol buffer compiler to +// generate Go code. Install it by building this program and making it +// accessible within your PATH with the name: +// protoc-gen-go-grpc +// +// The 'go-grpc' suffix becomes part of the argument for the protocol compiler, +// such that it can be invoked as: +// protoc --go-grpc_out=. path/to/file.proto +// +// This generates Go service definitions for the protocol buffer defined by +// file.proto. With that input, the output will be written to: +// path/to/file_grpc.pb.go +package main + +import ( + "flag" + "fmt" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/types/pluginpb" +) + +const version = "1.1.0" + +var requireUnimplemented *bool + +func main() { + showVersion := flag.Bool("version", false, "print the version and exit") + flag.Parse() + if *showVersion { + fmt.Printf("protoc-gen-go-grpc %v\n", version) + return + } + + var flags flag.FlagSet + requireUnimplemented = flags.Bool("require_unimplemented_servers", true, "set to false to match legacy behavior") + + protogen.Options{ + ParamFunc: flags.Set, + }.Run(func(gen *protogen.Plugin) error { + gen.SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) + for _, f := range gen.Files { + if !f.Generate { + continue + } + generateFile(gen, f) + } + return nil + }) +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 000000000..129776547 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100644 index 000000000..4cdc6ba7c --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 000000000..0b206a578 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000..11b106182 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,244 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + // + // This error code will not be generated by the gRPC framework. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + // + // This error code will not be generated by the gRPC framework. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + // + // This error code will not be generated by the gRPC framework. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 000000000..010156261 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + logger.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 000000000..7eee7e4ec --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,272 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/attributes" + icredentials "google.golang.org/grpc/internal/credentials" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status + // for the RPC. uri is the URI of the entry point for the request. + // When supported by the underlying implementation, ctx can be used for + // timeout and cancellation. Additionally, RequestInfo data will be + // available via ctx to this call. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // InvalidSecurityLevel indicates an invalid security level. + // The zero SecurityLevel value is invalid for backward compatibility. + InvalidSecurityLevel SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { + return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + TransportCredentials() TransportCredentials + PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo +} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok +} + +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes +} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) + return chi +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() CommonAuthInfo + } + if ai == nil { + return errors.New("AuthInfo is nil") + } + if ci, ok := ai.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/credentials/go12.go new file mode 100644 index 000000000..ccbf35b33 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/go12.go @@ -0,0 +1,30 @@ +// +build go1.12 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import "crypto/tls" + +// This init function adds cipher suite constants only defined in Go 1.12. +func init() { + cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" + cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" + cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 000000000..8ee7124f2 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,233 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "net/url" + + credinternal "google.golang.org/grpc/internal/credentials" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := credinternal.CloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 000000000..7a497237b --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,622 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "net" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by WithBalancerName dial option. + balancerBuilder balancer.Builder + channelzParentID int64 + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + resolvers []resolver.Builder +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig +// instead. Will be removed in a future 1.x release. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = builder + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be +// removed in a future 1.x release. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithConnectParams configures the dialer to use the provided ConnectParams. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Note that transport security is required unless WithInsecure is +// set. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.insecure = true + }) +} + +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UseProxy = false + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead. Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.StatsHandler = h + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header. This value only works with WithInsecure and has no +// effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithChannelzParentID(id int64) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used. +// 2. Resolver does not return a service config or if the resolver returns an +// invalid service config. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +// +// Retry support is currently disabled by default, but will be enabled by +// default in the future. Until then, it may be enabled by setting the +// environment variable "GRPC_GO_RETRY" to "on". +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + disableRetry: !envconfig.Retry, + healthCheckFunc: internal.HealthCheckFunc, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + UseProxy: true, + }, + } +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 000000000..0022859ad --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 000000000..6d84f74c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package encoding + +import ( + "io" + "strings" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string + // If a Compressor implements + // DecompressedSize(compressedBytes []byte) int, gRPC will call it + // to determine the size of the buffer allocated for the result of decompression. + // Return -1 to indicate unknown size. + // + // Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 000000000..3009b35af --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +func (codec) Marshal(v interface{}) ([]byte, error) { + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) + } + return proto.Unmarshal(data, vv) +} + +func (codec) Name() string { + return Name +} diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod new file mode 100644 index 000000000..6eed9370b --- /dev/null +++ b/vendor/google.golang.org/grpc/go.mod @@ -0,0 +1,17 @@ +module google.golang.org/grpc + +go 1.11 + +require ( + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 + github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/golang/protobuf v1.4.3 + github.com/google/go-cmp v0.5.0 + github.com/google/uuid v1.1.2 + golang.org/x/net v0.0.0-20200822124328-c89045814202 + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 + google.golang.org/protobuf v1.25.0 +) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum new file mode 100644 index 000000000..51fd1436e --- /dev/null +++ b/vendor/google.golang.org/grpc/go.sum @@ -0,0 +1,122 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0 h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 000000000..8358dd6e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...interface{}) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...interface{}) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...interface{}) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...interface{}) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 000000000..c8bb2be34 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import ( + "os" + + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return grpclog.Logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + grpclog.Logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + grpclog.Logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + grpclog.Logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + grpclog.Logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + grpclog.Logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + grpclog.Logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + grpclog.Logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + grpclog.Logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + grpclog.Logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + grpclog.Logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + grpclog.Logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + grpclog.Logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + grpclog.Logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + grpclog.Logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + grpclog.Logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000..ef06a4822 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import "google.golang.org/grpc/internal/grpclog" + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + grpclog.Logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 000000000..4ee33171e --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,221 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" + + "google.golang.org/grpc/internal/grpclog" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh new file mode 100644 index 000000000..15ff9facd --- /dev/null +++ b/vendor/google.golang.org/grpc/install_gae.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +TMP=$(mktemp -d /tmp/sdk.XXX) \ +&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ +&& unzip -q $TMP.zip -d $TMP \ +&& export PATH="$PATH:$TMP/go_appengine" \ No newline at end of file diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 000000000..668e0adcf --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. +// +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. +// +// The returned error must be compatible with the status package. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. +// +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. +// +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the +// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as +// the status message of the RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 000000000..5fc0ee3da --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + grpcbackoff "google.golang.org/grpc/backoff" + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 000000000..3a905d966 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) interface{} +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) interface{} { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 000000000..5cc3aeddb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" +) + +// Logger is the global binary logger. It can be used to get binary logger for +// each method. +type Logger interface { + getMethodLogger(methodName string) *MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a methodLogger for each individual method. +var binLogger Logger + +var grpclogLogger = grpclog.Component("binarylog") + +// SetLogger sets the binarg logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) *MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.getMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +type methodLoggerConfig struct { + // Max length of header and message. + hdr, msg uint64 +} + +type logger struct { + all *methodLoggerConfig + services map[string]*methodLoggerConfig + methods map[string]*methodLoggerConfig + + blacklist map[string]struct{} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { + if l.all != nil { + return fmt.Errorf("conflicting global rules found") + } + l.all = ml + return nil +} + +// Set method logger for "service/*". +// +// New methodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { + if _, ok := l.services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } + if l.services == nil { + l.services = make(map[string]*methodLoggerConfig) + } + l.services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New methodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.methods == nil { + l.methods = make(map[string]*methodLoggerConfig) + } + l.methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.blacklist == nil { + l.blacklist = make(map[string]struct{}) + } + l.blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) getMethodLogger(methodName string) *MethodLogger { + s, m, err := grpcutil.ParseMethod(methodName) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.methods[s+"/"+m]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if _, ok := l.blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.services[s]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if l.all == nil { + return nil + } + return newMethodLogger(l.all.hdr, l.all.msg) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 000000000..1ee00a39a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 000000000..d8f4e7602 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclogLogger.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 000000000..0cdb41831 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,422 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "net" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +type MethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this plugable. +} + +func newMethodLogger(h, m uint64) *MethodLogger { + return &MethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: DefaultSink, // TODO(blog): make it plugable. + } +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *MethodLogger) Log(c LogEntryConfig) { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *pb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *pb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *pb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + + ml.sink.Write(m) +} + +func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.Value)) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +type LogEntryConfig interface { + toProto() *pb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *pb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &pb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &pb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &pb.GrpcLogEntry_ServerHeader{ + ServerHeader: &pb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ClientMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ServerMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclogLogger.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &pb.GrpcLogEntry_Trailer{ + Trailer: &pb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *pb.Metadata { + ret := &pb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &pb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *pb.Address { + ret := &pb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = pb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = pb.Address_TYPE_IPV6 + } else { + ret.Type = pb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = pb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = pb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 000000000..7d7a3056b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,159 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "io" + "sync" + "time" + + "github.com/golang/protobuf/proto" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +) + +var ( + // DefaultSink is the sink where the logs will be written to. It's exported + // for the binarylog package to update. + DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*pb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) Sink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufferedSink struct { + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + + writeStartOnce sync.Once + writeTicker *time.Ticker +} + +func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { + // Start the write loop when Write is called. + fs.writeStartOnce.Do(fs.startFlushGoroutine) + fs.mu.Lock() + if err := fs.out.Write(e); err != nil { + fs.mu.Unlock() + return err + } + fs.mu.Unlock() + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufferedSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for range fs.writeTicker.C { + fs.mu.Lock() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + fs.mu.Unlock() + } + }() +} + +func (fs *bufferedSink) Close() error { + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + fs.mu.Lock() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + if err := fs.closer.Close(); err != nil { + grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) + } + if err := fs.out.Close(); err != nil { + grpclogLogger.Warningf("failed to close the Sink: %v", err) + } + fs.mu.Unlock() + return nil +} + +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufferedSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + } +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 000000000..9f6a0c120 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,85 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import "sync" + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `interface{}`. This means that a call to Put() incurs an extra memory +// allocation, and also that users need a type assertion while reading. For +// performance critical code paths, using Unbounded is strongly discouraged and +// defining a new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan interface{} + mu sync.Mutex + backlog []interface{} +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan interface{}, 1)} +} + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t interface{}) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, t) + b.mu.Unlock() +} + +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +func (b *Unbounded) Get() <-chan interface{} { + return b.c +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 000000000..f73141393 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,737 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 + maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + NewChannelzStorage() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorage initializes channelz data storage and id generator. +// +// This function returns a cleanup function to wait for all channelz state to be reset by the +// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests +// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen +// to remove some entity just register by the new test, since the id space is the same. +// +// Note: This function is exported for testing purpose only. User should not call +// it in most cases. +func NewChannelzStorage() (cleanup func() error) { + db.set(&channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + }) + idGen.reset() + return func() error { + var err error + cm := db.get() + if cm == nil { + return nil + } + for i := 0; i < 1000; i++ { + cm.mu.Lock() + if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { + cm.mu.Unlock() + // all things stored in the channelz map have been cleared. + return nil + } + cm.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + cm.mu.Lock() + err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) + cm.mu.Unlock() + return err + } +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid, ref) + } else { + db.get().addChannel(id, cn, false, pid, ref) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + logger.Error("a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addSubChannel(id, sc, pid, ref) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + logger.Error("a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid, ref) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + logger.Error("a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid, ref) + return id +} + +// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added +// to the channel trace. +// The Parent field is optional. It is used for event that will be recorded in the entity's parent +// trace also. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. +func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { + for d := desc; d != nil; d = d.Parent { + switch d.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, d.Desc) + case CtWarning: + l.WarningDepth(depth+1, d.Desc) + case CtError: + l.ErrorDepth(depth+1, d.Desc) + } + } + if getMaxTraceEntry() == 0 { + return + } + db.get().traceEvent(id, desc) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, maxResults)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() + } + return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.servers)) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, maxResults)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := int64(len(svrskts)) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + count := int64(0) + var end bool + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + var s []*SocketMetric + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when + // holding the lock to prevent potential data race. + chanCopy := cn.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when + // holding the lock to prevent potential data race. + chanCopy := sc.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { + sm := &ServerMetric{} + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + c.mu.RUnlock() + return nil + } + sm.ListenSockets = copyMap(svr.listenSockets) + c.mu.RUnlock() + sm.ID = svr.id + sm.RefName = svr.refName + sm.ServerData = svr.s.ChannelzMetric() + return sm +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 000000000..b0013f9c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) + } else { + l.InfoDepth(1, args...) + } +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtInfo, + }) + } else { + l.InfoDepth(1, msg) + } +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) + } else { + l.WarningDepth(1, args...) + } +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtWarning, + }) + } else { + l.WarningDepth(1, msg) + } +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) + } else { + l.ErrorDepth(1, args...) + } +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtError, + }) + } else { + l.ErrorDepth(1, msg) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 000000000..3c595d154 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,701 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + sc.cm.deleteEntry(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefChannel indicates the referenced entity is a Channel. + RefChannel RefChannelType = iota + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel +) + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 000000000..692dd6181 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,53 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 000000000..19c2fc521 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,42 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 000000000..fdf409d55 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,39 @@ +// +build linux,!appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 000000000..8864a0811 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,26 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 000000000..32c9b5903 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 000000000..be70b6cdf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,77 @@ +// +build !appengine + +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { + if cert == nil || cert.URIs == nil { + return nil + } + var spiffeID *url.URL + for _, uri := range cert.URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(cert.URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go new file mode 100644 index 000000000..af6f57719 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go @@ -0,0 +1,31 @@ +// +build appengine + +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" + "net/url" +) + +// SPIFFEIDFromState is a no-op for appengine builds. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go new file mode 100644 index 000000000..f499a614c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -0,0 +1,60 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go new file mode 100644 index 000000000..a6144cd66 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go @@ -0,0 +1,30 @@ +// +build appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "net" +) + +// WrapSyscallConn returns newConn on appengine. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + return newConn +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 000000000..55664fa46 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import "crypto/tls" + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 000000000..73931a94b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strings" +) + +const ( + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" +) + +var ( + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". + Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 000000000..e6f975cbf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +import ( + "os" +) + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatalln(args...) + } + os.Exit(1) +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 000000000..82af70e96 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + if !Logger.V(2) { + return + } + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 000000000..7bc3583b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + defer mu.Unlock() + return r.Int63n(n) +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + defer mu.Unlock() + return r.Intn(n) +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + defer mu.Unlock() + return r.Float64() +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 000000000..fbe697c37 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 000000000..b25b0baec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 000000000..6f22bd891 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 000000000..4e7475060 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +// +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go new file mode 100644 index 000000000..8833021da --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/target.go @@ -0,0 +1,89 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides a bunch of utility functions to be used across the +// gRPC codebase. +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/resolver" +) + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// ParseTarget splits target into a resolver.Target struct containing scheme, +// authority and endpoint. skipUnixColonParsing indicates that the parse should +// not parse "unix:[path]" cases. This should be true in cases where a custom +// dialer is present, to prevent a behavior change. +// +// If target is not a valid scheme://authority/endpoint as specified in +// https://github.com/grpc/grpc/blob/master/doc/naming.md, +// it returns {Endpoint: target}. +func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) { + var ok bool + if strings.HasPrefix(target, "unix-abstract:") { + if strings.HasPrefix(target, "unix-abstract://") { + // Maybe, with Authority specified, try to parse it + var remain string + ret.Scheme, remain, _ = split2(target, "://") + ret.Authority, ret.Endpoint, ok = split2(remain, "/") + if !ok { + // No Authority, add the "//" back + ret.Endpoint = "//" + remain + } else { + // Found Authority, add the "/" back + ret.Endpoint = "/" + ret.Endpoint + } + } else { + // Without Authority specified, split target on ":" + ret.Scheme, ret.Endpoint, _ = split2(target, ":") + } + return ret + } + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing { + // Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases, + // because splitting on :// only handles the + // "unix://[/absolute/path]" case. Only handle if the dialer is nil, + // to avoid a behavior change with custom dialers. + return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]} + } + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + if ret.Scheme == "unix" { + // Add the "/" back in the unix case, so the unix resolver receives the + // actual endpoint in the "unix://[/absolute/path]" case. + ret.Endpoint = "/" + ret.Endpoint + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000..1b596bf35 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,88 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // ParseServiceConfigForTesting is for creating a fake + // ClientConn for resolver testing only + ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool + // GetCertificateProviderBuilder returns the registered builder for the + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. + GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports interface{} // func(*grpc.Server, string) +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 000000000..302262613 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(metadata.MD) + return md +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValues(mdKey, md) + return addr +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 000000000..5e7f36703 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,164 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is unimplementable; do not use. +type ServerInterceptor interface { + notDefined() +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValues(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..03825bbe7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,463 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records. Must not be changed after init time. +var EnableSRVLookups = false + +var logger = grpclog.Component("dns") + +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( + defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, authority) + } +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: customAuthorityDialler(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint, defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + if target.Authority == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = customAuthorityResolver(target.Authority) + if err != nil { + return nil, err + } + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + backoffIndex := 1 + for { + state, err := d.lookup() + if err != nil { + // Report error to the underlying grpc.ClientConn. + d.cc.ReportError(err) + } else { + err = d.cc.UpdateState(*state) + } + + var timer *time.Timer + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least + // to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-timer.C: + } + } +} + +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +var filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + return err +} + +func handleDNSError(err error, lookupType string) error { + err = filterError(err) + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + logger.Info(err) + } + return err +} + +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { + var newAddrs []resolver.Address + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT() + } + return &state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + logger.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + logger.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go new file mode 100644 index 000000000..8783a8cf8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go @@ -0,0 +1,33 @@ +// +build go1.13 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +func init() { + filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { + // The name does not exist; not an error. + return nil + } + return err + } +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 000000000..520d9229e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 000000000..0d5a811dd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.Authority != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + } + addr := resolver.Address{Addr: target.Endpoint} + if b.scheme == unixAbstractScheme { + // prepend "\x00" to address for unix-abstract + addr.Addr = "\x00" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..c0634d152 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,178 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. + for name, jsonCfg = range lbcfg { + } + + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 000000000..710223b8d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{e: s.Proto()} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + e *spb.Status +} + +func (e *Error) Error() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return FromProto(e.e) +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.e, tse.e) +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 000000000..4b2964f2a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,114 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + logger.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage = syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + var ( + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 000000000..7913ef1db --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,76 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once +var logger = grpclog.Component("core") + +func log() { + once.Do(func() { + logger.Info("CPU time info is unavailable on non-linux or appengine environment.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +// It always returns 0 under non-linux or appengine environment. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux or appengine environment. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux or appengine environment. +func GetRusage() *Rusage { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux or appengine environment. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux or appengine environments +// a negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 000000000..070680edb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 000000000..45532f8ae --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,980 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "strconv" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type earlyAbortStream struct { + streamID uint32 + contentSubtype string + status *status.Status +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Value // chan struct{} +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + select { + case <-ch: + case <-c.done: + } + } +} + +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + c.trfChan.Store(make(chan struct{})) + } + } + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) + } + c.transportResponseFrames-- + } + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. +func (l *loopyWriter) run() (err error) { + defer func() { + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter.run returning. %v", err) + } + err = nil + } + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + if logger.V(logLevel) { + logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + } + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + if err := hdr.initStream(str.id); err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + if logger.V(logLevel) { + logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + } + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: "200"}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } + return nil +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possilbe HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + buf []byte + ) + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, len(dataItem.d)) + if hSize != 0 { + if dSize == 0 { + buf = dataItem.h + } else { + // We can add some data to grpc message header to distribute bytes more equally across frames. + // Copy on the stack to avoid generating garbage + var localBuf [http2MaxFrameLen]byte + copy(localBuf[:hSize], dataItem.h) + copy(localBuf[hSize:], dataItem.d[:dSize]) + buf = localBuf[:hSize+dSize] + } + } else { + buf = dataItem.d + } + + size := hSize + dSize + + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + endStream = true + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + dataItem.h = dataItem.h[hSize:] + dataItem.d = dataItem.d[dSize:] + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 000000000..9fa306b2e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 000000000..f262edd8e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,217 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + d := n - f.limit + f.limit = n + f.mu.Unlock() + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 000000000..1c3459c2b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,462 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) + if !validContentType { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats stats.Handler +} + +func (ht *serverHandlerTransport) Close() { + ht.closeOnce.Do(ht.closeCloseChanOnce) +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + if ht.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } + ht.Close() + return err +} + +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + headersWritten := s.updateHeaderSent() + return ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + if ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + if ht.stats != nil { + s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + ht.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 000000000..d10b6aacb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1648 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + md metadata.MD + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandler stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + // onPrefaceReceipt is a callback that client transport calls upon + // receiving server preface to signal that a succefull HTTP2 + // connection was established. + onPrefaceReceipt func() + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + + onGoAway func(GoAwayReason) + onClose func() + + bufferPool *bufferPool + + connectionID uint64 +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) + if fn != nil { + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // For backward compatibility, if the user dialed "unix:///path", + // the passthrough resolver would be used and the user's custom + // dialer would see "unix:///path". Since the unix resolver is used + // and the address is now "/path", prepend "unix://" so the user's + // custom dialer sees the same address. + return fn(ctx, "unix://"+address) + } + return fn(ctx, address) + } + if !ok { + networkType, address = parseDialTarget(address) + } + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return (&net.Dialer{}).DialContext(ctx, networkType, address) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the credential handshaker. This makes it possible for + // address specific arbitrary data to reach the credential handshaker. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } + isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + onPrefaceReceipt: onPrefaceReceipt, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + onGoAway: onGoAway, + onClose: onClose, + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), + } + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + t.Close(err) + return nil, err + } + if n != len(clientPreface) { + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + t.Close(err) + return nil, err + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + t.Close(err) + return nil, err + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + t.Close(err) + return nil, err + } + } + + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } + } + // Do not close the transport. Let reader goroutine handle it since + // there might be data in the buffers. + t.conn.Close() + t.controlBuf.finish() + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, + } +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = strings.ToLower(v) + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, status.Errorf(codes.Internal, "transport: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// PerformedIOError wraps an error to indicate IO may have been performed +// before the error occurred. +type PerformedIOError struct { + Err error +} + +// Error implements error. +func (p PerformedIOError) Error() string { + return p.Err.Error() +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + // We may have performed I/O in the per-RPC creds callback, so do not + // allow transparent retry. + return nil, PerformedIOError{err} + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) error { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return err + } + t.activeStreams[id] = s + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + return nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + if !checkForStreamQuota(it) { + return false + } + if !checkForHeaderListSize(it) { + return false + } + return true + }, hdr) + if err != nil { + return nil, err + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, hdrListSizeErr + } + firstTry = false + select { + case <-ch: + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-t.goAway: + return nil, errStreamDrain + case <-t.ctx.Done(): + return nil, ErrConnClosing + } + } + if t.statsHandler != nil { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +// +// This method blocks until the addrConn that initiated this transport is +// re-connected. This happens because t.onClose() begins reconnect logic at the +// addrConn level and blocks until the addrConn is successfully connected. +func (t *http2Client) Close(err error) { + t.mu.Lock() + // Make sure we only Close once. + if t.state == closing { + t.mu.Unlock() + return + } + // Call t.onClose before setting the state to closing to prevent the client + // from attempting to create new streams ASAP. + t.onClose() + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) + } + + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close(ErrConnClosing) + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + h: hdr, + d: data, + } + if hdr != nil || data != nil { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { + t.mu.Lock() + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s := t.getStream(f) + if s == nil { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s := t.getStream(f) + if s == nil { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + if logger.V(logLevel) { + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + } + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancelation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + if logger.V(logLevel) { + logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + } + id := f.LastStreamID + if id > 0 && id%2 == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.controlBuf.put(&incomingGoAway{}) + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + t.onGoAway(t.goAwayReason) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %v", f.ErrCode, string(f.DebugData())) +} + +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason, t.goAwayDebugMessage +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s := t.getStream(frame) + if s == nil { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) + return + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr string + grpcMessage string + statusGen *status.Status + + httpStatus string + rawStatus string + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", hf.Value) + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + s.recvCompress = hf.Value + case "grpc-status": + rawStatus = hf.Value + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } + case ":status": + httpStatus = hf.Value + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC { + var ( + code = codes.Internal // when header does not include HTTP status, return INTERNAL + httpStatusCode int + ) + + if httpStatus != "" { + c, err := strconv.ParseInt(httpStatus, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + httpStatusCode = int(c) + + var ok bool + code, ok = HTTPStatusConvTab[httpStatusCode] + if !ok { + code = codes.Unknown + } + } + + // Verify the HTTP response is a 200. + se := status.New(code, constructHTTPErrMsg(&httpStatusCode, contentTypeErr)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + isHeader := false + defer func() { + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: s.header.Copy(), + Compression: s.recvCompress, + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: s.trailer.Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + }() + + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + if !endStream { + // HEADERS frame block carries a Response-Headers. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + if len(mdata) > 0 { + s.header = mdata + } + } else { + // HEADERS frame block carries a Trailers-Only. + s.noHeaders = true + } + close(s.headerChan) + } + + if !endStream { + return + } + + if statusGen == nil { + rawStatusCode := codes.Unknown + if rawStatus != "" { + code, err := strconv.ParseInt(rawStatus, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + } + statusGen = status.New(rawStatusCode, grpcMessage) + } + + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + defer close(t.readerDone) + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + err = connectionErrorf(true, err, "error reading server preface: %v", err) + t.Close(err) // this kicks off resetTransport, so must be last before return + return + } + t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + // this kicks off resetTransport, so must be last before return + t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) + return + } + t.onPrefaceReceipt() + t.handleSettings(sf, true) + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + errorDetail := t.framer.fr.ErrorDetail() + var msg string + if errorDetail != nil { + msg = errorDetail.Error() + } else { + msg = "received invalid frame" + } + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } + } +} + +func minTime(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && timeoutLeft <= 0 { + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + return + } + t.mu.Lock() + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. + t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 000000000..e3799d50a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1347 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainChan is initialized when Drain() is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + bufferPool *bufferPool + + connectionID uint64 +} + +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a non-nil transport and a nil-error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + done := make(chan struct{}) + t := &http2Server{ + ctx: context.Background(), + done: done, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close() + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Skipping the error here will help + // reduce log clutter. + if err == io.EOF { + return nil, nil + } + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + if err := t.loopy.run(); err != nil { + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } + } + t.conn.Close() + t.controlBuf.finish() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + streamID := frame.Header().StreamID + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) + return false + } + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = false + mdata = make(map[string][]string) + httpMethod string + // headerError is set if an error is encountered while parsing the headers + headerError bool + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = true + } + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = true + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || headerError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return false + } + + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + s.cancel() + return false + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return false + } + if streamID%2 != 1 || streamID <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } + s.cancel() + return true + } + t.maxStreamID = streamID + if httpMethod != http.MethodPost { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + } + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + s.cancel() + return false + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + }) + return false + } + } + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return false +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + } + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + } + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + if logger.V(logLevel) { + logger.Errorf("transport: Got too many pings from the client, closing the connection.") + } + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + if logger.V(logLevel) { + logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } + return false + } + } + return true +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + if s.updateHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + if t.stats != nil { + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } + t.stats.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + if s.getState() == streamDone { + return nil + } + s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + if t.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // TODO(mmukhi, dfawley): Make sure this is the right code to return. + return status.Errorf(codes.Internal, "transport: %v", err) + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + // TODO(mmukhi, dfawley): Should the server write also return io.EOF? + s.cancel() + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + } + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) + defer func() { + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() + }() + + for { + select { + case <-idleTimer.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + idleTimer.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.Drain() + return + } + idleTimer.Reset(val) + case <-ageTimer.C: + t.Drain() + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to maximum connection age.") + } + t.Close() + case <-t.done: + } + return + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to idleness.") + } + t.Close() + return + } + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) + if err := t.conn.Close(); err != nil && logger.V(logLevel) { + logger.Infof("transport: error closing conn during Close: %v", err) + } + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + sid := t.maxStreamID + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.done: + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.done: + return -1 + case <-timer.C: + return -2 + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 000000000..15d775fca --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,444 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "math" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // baseContentType is the base content-type for gRPC. This is a valid + // content-type on it's own, but can also include a content-subtype such as + // "proto" as a suffix after "+" or ";". See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } + logger = grpclog.Component("transport") +) + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err + } + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err + } + return status.FromProto(st), nil +} + +// constructErrMsg constructs error message to be returned in HTTP fallback mode. +// Format: HTTP status code and its corresponding message + content-type error message. +func constructHTTPErrMsg(httpStatus *int, contentTypeErr string) string { + var errMsgs []string + + if httpStatus == nil { + errMsgs = append(errMsgs, "malformed header: missing HTTP status") + } else { + errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(httpStatus)), *httpStatus)) + } + + if contentTypeErr == "" { + errMsgs = append(errMsgs, "transport: missing content-type field") + } else { + errMsgs = append(errMsgs, contentTypeErr) + } + + return strings.Join(errMsgs, "; ") +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + buf.WriteString(fmt.Sprintf("%%%02X", b)) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + buf.WriteByte(b) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", b)) + } + } + msg = msg[size:] + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error + + onFlush func() +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + return w.conn.Write(b) + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + if w.onFlush != nil { + w.onFlush() + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + w := newBufWriter(conn, writeBufferSize) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 000000000..7bb53cff1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dialer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValues(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go new file mode 100644 index 000000000..a662bf39a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -0,0 +1,142 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "context" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { + newAddr := addr + proxyURL, err := mapAddress(ctx, addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } + + conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + if err != nil { + return + } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + } + return +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 000000000..141981264 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,804 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const logLevel = 2 + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer *bytes.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last *bytes.Buffer // Stores the remaining data in the previous calls. + err error + freeBuffer func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } + return copied, nil + } + if r.closeStream != nil { + n, r.err = r.readClient(p) + } else { + n, r.err = r.read(p) + } + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, p) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } + return copied, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ct *http2Client // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + if !s.headerValid { + return nil, s.status.Err() + } + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + ChannelzParentID int64 + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID int64 + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 + // UseProxy specifies if a proxy should be used. + UseProxy bool +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close(err error) + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go new file mode 100644 index 000000000..3677c3f04 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValues(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 000000000..34d31b5e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 000000000..8d9686375 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,240 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Join joins any number of mds into a single MD. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := MD{} + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } + return out, true +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +// +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + out := MD{} + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } + } + return out, ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 000000000..e01d219ff --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 000000000..a58174b6f --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,177 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "io" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker +} + +func newPickerWrapper() *pickerWrapper { + return &pickerWrapper{blockingCh: make(chan struct{})} +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return + } + pw.picker = p + // pw.blockingCh should never be nil. + close(pw.blockingCh) + pw.blockingCh = make(chan struct{}) + pw.mu.Unlock() +} + +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ch chan struct{} + + var lastPickErr error + for { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if pw.picker == nil { + ch = pw.blockingCh + } + if ch == pw.blockingCh { + // This could happen when either: + // - pw.picker is nil (the previous if condition), or + // - has called pick on the current picker. + pw.mu.Unlock() + select { + case <-ctx.Done(): + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else { + errStr = ctx.Err().Error() + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, nil, status.Error(codes.Canceled, errStr) + } + case <-ch: + } + continue + } + + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() + + pickResult, err := p.Pick(info) + + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } + if _, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. + return nil, nil, err + } + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, nil, status.Error(codes.Unavailable, err.Error()) + } + + acw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { + logger.Error("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, pickResult.Done), nil + } + return t, pickResult.Done, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + pickResult.Done(balancer.DoneInfo{}) + } + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (pw *pickerWrapper) close() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.done = true + close(pw.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 000000000..b858c2a5e --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,136 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pickfirstBalancer struct { + state connectivity.State + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) ResolverError(err error) { + switch b.state { + case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: + // Set a failing picker if we don't have a good picker. + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) + } + if logger.V(2) { + logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + } +} + +func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { + if len(cs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + if b.sc == nil { + var err error + b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) + b.sc.Connect() + } else { + b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) + b.sc.Connect() + } + return nil +} + +func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + if logger.V(2) { + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + } + if b.sc != sc { + if logger.V(2) { + logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + } + return + } + b.state = s.ConnectivityState + if s.ConnectivityState == connectivity.Shutdown { + b.sc = nil + return + } + + switch s.ConnectivityState { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + case connectivity.Connecting: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: s.ConnectivityState, + Picker: &picker{err: s.ConnectionError}, + }) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 000000000..0a1e975ad --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 000000000..dfd3226a1 --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,119 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { + rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "remove existing generated files" +# grpc_testingv3/testv3.pb.go is not re-generated because it was +# intentionally generated by an older version of protoc-gen-go. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') + +echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" +(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +echo "git clone https://github.com/protocolbuffers/protobuf" +git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf + +# Pull in code.proto as a proto dependency +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +mkdir -p ${WORKDIR}/out + +# Generates sources without the embed requirement +LEGACY_SOURCES=( + ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto + ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + profiling/proto/service.proto + reflection/grpc_reflection_v1alpha/reflection.proto +) + +# Generates only the new gRPC Service symbols +SOURCES=( + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto + ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto + ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto +) + +# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an +# import path of 'bar' in the generated code when 'foo.proto' is imported in +# one of the sources. +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core + +for src in ${SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + -I${WORKDIR}/istio \ + ${src} +done + +for src in ${LEGACY_SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + -I${WORKDIR}/istio \ + ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 + +# grpc_testingv3/testv3.pb.go is not re-generated because it was +# intentionally generated by an older version of protoc-gen-go. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go + +# grpc/service_config/service_config.proto does not have a go_package option. +mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config + +# grpc/testing does not have a go_package option. +mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ +mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 000000000..6a9d234a5 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,260 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "context" + "net" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +// +// Deprecated: use Attributes in Address instead. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + // + // Deprecated: use Attributes in Address instead. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + // + // Deprecated: to select the GRPCLB load balancing policy, use a service + // config with a corresponding loadBalancingConfig. To supply balancer + // addresses to the GRPCLB load balancing policy, set State.Attributes + // using balancer/grpclb/state.Set. + GRPCLB +) + +// Address represents a server the client connects to. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // + // If Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. + ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes + + // Type is the type of this address. + // + // Deprecated: use Attributes instead. + Type AddressType + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. + Metadata interface{} +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. + DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + // Addresses is the latest set of resolved addresses for the target. + Addresses []Address + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + UpdateState(State) error + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext by the user. And +// grpc passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will +// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed +// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// +// If the target does not contain a scheme, we will apply the default scheme, and set the Target to +// be the full target string. e.g. "foo.bar" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// +// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the +// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target +// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOptions) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 000000000..2c47cd54f --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,187 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + cc *ClientConn + resolverMu sync.Mutex + resolver resolver.Resolver + done *grpcsync.Event + curState resolver.State + + incomingMu sync.Mutex // Synchronizes all the incoming calls. +} + +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { + ccr := &ccResolverWrapper{ + cc: cc, + done: grpcsync.NewEvent(), + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + rbo := resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + } + + var err error + // We need to hold the lock here while we assign to the ccr.resolver field + // to guard against a data race caused by the following code path, + // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up + // accessing ccr.resolver which is being assigned here. + ccr.resolverMu.Lock() + defer ccr.resolverMu.Unlock() + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.resolverMu.Lock() + if !ccr.done.HasFired() { + ccr.resolver.ResolveNow(o) + } + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolverMu.Lock() + ccr.resolver.Close() + ccr.done.Fire() + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return nil + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(s) + } + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + return balancer.ErrBadResolverState + } + return nil +} + +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + } + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + if ccr.cc.dopts.disableServiceConfig { + channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + } + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) +} + +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), + Severity: channelz.CtInfo, + }) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 000000000..1831a73e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,914 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo, *csAttempt) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x + } +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can receive. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can send. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + wireLength int // The compressed length got from wire. + uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { + payInfo.wireLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, st.Err() + } + + var size int + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + size = len(d) + } else { + d, size, err = decompress(compressor, d, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } else { + size = len(d) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + return d, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, 0, err + } + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = d + } + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + return status.Error(codes.Internal, err.Error()) + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.Codec); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 7. +// +// Older versions are kept for compatibility. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 000000000..d90f3fcd3 --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1868 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" +) + +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) + } +} + +var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl interface{} + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata interface{} +} + +type serverWorkerData struct { + st transport.ServerTransport + wg *sync.WaitGroup + stream *transport.Stream +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events trace.EventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID int64 // channelz unique identification number + czData *channelzData + + serverWorkerChannels []chan *serverWorkerData +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 +} + +var defaultServerOptions = serverOptions{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < time.Second { + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = time.Second + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.statsHandler = h + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxHeaderListSize = &s + }) +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorkers blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows different requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker(ch chan *serverWorkerData) { + // To make sure all server workers don't reset at the same time, choose a + // random number of iterations before resetting. + threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) + for completed := 0; completed < threshold; completed++ { + data, ok := <-ch + if !ok { + return + } + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) + data.wg.Done() + } + go s.serverWorker(ch) +} + +// initServerWorkers creates worker goroutines and channels to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + s.serverWorkerChannels[i] = make(chan *serverWorkerData) + go s.serverWorker(s.serverWorkerChannels[i]) + } +} + +func (s *Server) stopServerWorkers() { + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + close(s.serverWorkerChannels[i]) + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), + } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + + if channelz.IsOn() { + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl interface{}) +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.services[sd.ServiceName]; ok { + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + info.methods[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + info.streams[d.StreamName] = d + } + s.services[sd.ServiceName] = info +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.streams { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + +type listenSocket struct { + net.Listener + channelzID int64 +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + if channelz.IsOn() { + channelz.RemoveEntry(l.channelzID) + } + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + if channelz.IsOn() { + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + } + s.mu.Unlock() + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(lis.Addr().String(), rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // In deployments where a gRPC server runs behind a cloud load + // balancer which performs regular TCP level health checks, the + // connection is closed immediately by the latter. Skipping the + // error here will help reduce log clutter. + if err != io.EOF { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + } + rawConn.Close() + } + rawConn.SetDeadline(time.Time{}) + return + } + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + + rawConn.SetDeadline(time.Time{}) + if !s.addConn(lisAddr, st) { + return + } + go func() { + s.serveStreams(st) + s.removeConn(lisAddr, st) + }() +} + +func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { + st.Drain() + } + s.mu.Unlock() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + } + st, err := transport.NewServerTransport(c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + + var roundRobinCounter uint32 + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { + case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + default: + // If all stream workers are busy, fallback to the default code path. + go func() { + s.handleStream(st, stream, s.traceInfo(st, stream)) + wg.Done() + }() + } + } else { + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + } + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(listenerAddressForServeHTTP, st) { + return + } + defer s.removeConn(listenerAddressForServeHTTP, st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: st.RemoteAddr(), + }, + } + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = time.Until(dl) + } + return trInfo +} + +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close() + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain() + } + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true + return true +} + +func (s *Server) removeConn(addr string, st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil && s.opts.statsHandler != nil { + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } + return err +} + +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } + } + + s.opts.unaryInt = chainedInt +} + +// getChainUnaryHandler recursively generate the chained UnaryHandler +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + sh := s.opts.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + binlog := binarylog.GetMethodLogger(stream.Method()) + if binlog != nil { + ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + binlog.Log(logEntry) + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } + } + + var payInfo *payloadInfo + if sh != nil || binlog != nil { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v interface{}) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if sh != nil { + sh.HandleRPC(stream.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength + headerLen, + Data: d, + Length: len(d), + }) + } + if binlog != nil { + binlog.Log(&binarylog.ClientMessage{ + Message: d, + }) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(codes.Unknown, appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if binlog != nil { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + } + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerMessage{ + Message: reply, + }) + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + err = t.WriteStatus(stream, statusOK) + if binlog != nil { + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err +} + +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } + } + + s.opts.streamInt = chainedInt +} + +// getChainStreamHandler recursively generate the chained StreamHandler +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(srv interface{}, ss ServerStream) error { + return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } + sh := s.opts.statsHandler + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + + if sh != nil || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + ss.binlog = binarylog.GetMethodLogger(stream.Method()) + if ss.binlog != nil { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + ss.binlog.Log(logEntry) + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error + var server interface{} + if info != nil { + server = info.serviceImpl + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + appStatus = status.New(codes.Unknown, appErr.Error()) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + err = t.WriteStatus(ss.s, statusOK) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + srv, knownService := s.services[service] + if knownService { + if md, ok := srv.methods[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.streams[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if trInfo != nil { + trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quit.Fire() + + defer func() { + s.serveWG.Wait() + s.done.Fire() + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + conns := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for _, cs := range conns { + for st := range cs { + st.Close() + } + } + if s.opts.numServerWorkers > 0 { + s.stopServerWorkers() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for _, conns := range s.conns { + for st := range conns { + st.Drain() + } + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 000000000..22c4240cf --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,404 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig = internalserviceconfig.MethodConfig + +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // LB is the load balancer the service providers recommends. The balancer + // specified via grpc.WithBalancerName will override this. This is deprecated; + // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig + // will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff string + MaxBackoff string + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service string + Method string +} + +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil + } + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method + } + return res, nil +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *internalserviceconfig.BalancerConfig + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfigForTesting = parseServiceConfig +} +func parseServiceConfig(js string) *serviceconfig.ParseResult { + if len(js) == 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + if c := rsc.LoadBalancingConfig; c != nil { + sc.lbConfig = &lbConfig{ + name: c.Name, + cfg: c.Config, + } + } + + if rsc.MethodConfig == nil { + return &serviceconfig.ParseResult{Config: &sc} + } + + paths := map[string]struct{}{} + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + paths[path] = struct{}{} + sc.Methods[path] = mc + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} + } + } + return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { + if jrp == nil { + return nil, nil + } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } + + if jrp.MaxAttempts <= 1 || + *ib <= 0 || + *mb <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: jrp.MaxAttempts, + InitialBackoff: *ib, + MaxBackoff: *mb, + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), + } + if rp.MaxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.MaxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.RetryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..73a2f9266 --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { + isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 000000000..dc03731e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 000000000..63e476ee7 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,312 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. + WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 000000000..54d187186 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,129 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/status" +) + +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return status.New(c, msg) +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return status.FromProto(s) +} + +// FromError returns a Status representing err if it was produced by this +// package or has a method `GRPCStatus() *Status`. +// If err is nil, a Status is returned with codes.OK and no message. +// Otherwise, ok is false and a Status is returned with codes.Unknown and +// the original error message. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus(), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return nil + case context.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 000000000..1f3e70d2c --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1600 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. If a StreamHandler returns an error, it +// should be produced by the status package, or else gRPC will use +// codes.Unknown as the status code and err.Error() as the status message +// of the RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. +type StreamDesc struct { + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m interface{}) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m interface{}) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + + var mc serviceconfig.MethodConfig + var onCommit func() + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { + return nil, toRPCErr(err) + } + + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } + } + + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) + sh := cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + beginTime: beginTime, + firstAttempt: true, + onCommit: onCommit, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + cs.binlog = binarylog.GetMethodLogger(method) + + // Only this initial attempt has stats/tracing. + // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. + if err := cs.newAttemptLocked(sh, trInfo); err != nil { + cs.finish(err) + return nil, err + } + + op := func(a *csAttempt) error { return a.newStream() } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + cs.finish(err) + return nil, err + } + + if cs.binlog != nil { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + cs.binlog.Log(logEntry) + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new attempt with a transport. +// If it succeeds, then it replaces clientStream's attempt with this new attempt. +func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { + newAttempt := &csAttempt{ + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + } + defer func() { + if retErr != nil { + // This attempt is not set in the clientStream, so it's finish won't + // be called. Call it here for stats and trace in case they are not + // nil. + newAttempt.finish(retErr) + } + }() + + if err := cs.ctx.Err(); err != nil { + return toRPCErr(err) + } + + ctx := cs.ctx + if cs.cc.parsedTarget.Scheme == "xds" { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + return err + } + if trInfo != nil { + trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + } + newAttempt.t = t + newAttempt.done = done + cs.attempt = newAttempt + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + s, err := a.t.NewStream(cs.ctx, cs.callHdr) + if err != nil { + if _, ok := err.(transport.PerformedIOError); ok { + // Return without converting to an RPC error so retry code can + // inspect. + return err + } + return toRPCErr(err) + } + cs.attempt.s = s + cs.attempt.p = &parser{r: s} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + beginTime time.Time + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlog *binarylog.MethodLogger // Binary logger, can be nil. + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + onCommit func() + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandler stats.Handler +} + +func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. +func (cs *clientStream) shouldRetry(err error) error { + unprocessed := false + if cs.attempt.s == nil { + pioErr, ok := err.(transport.PerformedIOError) + if ok { + // Unwrap error. + err = toRPCErr(pioErr.Err) + } else { + unprocessed = true + } + if !ok && !cs.callInfo.failFast { + // In the event of a non-IO operation error from NewStream, we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } + } + if cs.finished || cs.committed { + // RPC is finished or committed; cannot retry. + return err + } + // Wait for the trailers. + if cs.attempt.s != nil { + <-cs.attempt.s.Done() + unprocessed = cs.attempt.s.Unprocessed() + } + if cs.firstAttempt && unprocessed { + // First attempt, stream unprocessed: transparently retry. + return nil + } + if cs.cc.dopts.disableRetry { + return err + } + + pushback := 0 + hasPushback := false + if cs.attempt.s != nil { + if !cs.attempt.s.TrailersOnly() { + return err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + hasPushback = true + } else if len(sps) > 1 { + channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + } + + var code codes.Code + if cs.attempt.s != nil { + code = cs.attempt.s.Status().Code() + } else { + code = status.Convert(err).Code() + } + + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { + return err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return err + } + if cs.numRetries+1 >= rp.MaxAttempts { + return err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return nil + case <-cs.ctx.Done(): + t.Stop() + return status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(lastErr error) error { + for { + cs.attempt.finish(lastErr) + if err := cs.shouldRetry(lastErr); err != nil { + cs.commitAttemptLocked() + return err + } + cs.firstAttempt = false + if err := cs.newAttemptLocked(nil, nil); err != nil { + return err + } + if lastErr = cs.replayBufferLocked(); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + return cs.attempt.s.Context() +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + return op(cs.attempt) + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + if err != nil { + cs.finish(err) + return nil, err + } + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Only log if binary log is on and header has not been logged. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + cs.serverHeaderBinlogged = true + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked() error { + a := cs.attempt + for _, f := range cs.buffer { + if err := f(a); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + msgBytes := data // Store the pointer before setting to nil. For binary logging. + op := func(a *csAttempt) error { + err := a.sendMsg(m, hdr, payload, data) + // nil out the message and uncomp when replaying; they are only needed for + // stats which is disabled for subsequent attempts. + m, data = nil, nil + return err + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ClientMessage{ + OnClientSide: true, + Message: msgBytes, + }) + } + return +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if cs.binlog != nil { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, + }) + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + + if cs.binlog != nil { + // finish will not log Trailer. Log Trailer here. + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if logEntry.Err == io.EOF { + logEntry.Err = nil + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + } + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + if cs.binlog != nil { + cs.binlog.Log(&binarylog.ClientHalfClose{ + OnClientSide: true, + }) + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } + cs.mu.Unlock() + // For binary logging. only log cancel in finish (could be caused by RPC ctx + // canceled or ClientConn closed). Trailer will be logged in RecvMsg. + // + // Only one of cancel or trailer needs to be logged. In the cases where + // users don't call RecvMsg, users must have already canceled the RPC. + if cs.binlog != nil && status.Code(err) == codes.Canceled { + cs.binlog.Log(&binarylog.Cancel{ + OnClientSide: true, + }) + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + cs := a.cs + if a.statsHandler != nil && payInfo == nil { + payInfo = &payloadInfo{} + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + if a.statsHandler != nil { + end := &stats.End{ + Client: true, + BeginTime: a.cs.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + a.statsHandler.HandleRPC(a.cs.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-ac.ctx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m interface{}) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// All errors returned from ServerStream methods are compatible with the +// status package. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + binlog *binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := ss.t.WriteHeader(ss.s, md) + if ss.binlog != nil && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if ss.binlog != nil { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + ss.binlog.Log(&binarylog.ServerMessage{ + Message: data, + }) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if ss.statsHandler != nil || ss.binlog != nil { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientHalfClose{}) + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, + }) + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 000000000..dbf34e6bb --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +package tap + +import ( + "context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 000000000..07a2d26b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 000000000..3dd146afb --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.39.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100644 index 000000000..5eaa8b05d --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,215 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +# not makes sure the command passed to it does not exit with a return code of 0. +not() { + # This is required instead of the earlier (! $COMMAND) because subshells and + # pipefail don't work the same on Darwin as in Linux. + ! "$@" +} + +die() { + echo "$@" >&2 + exit 1 +} + +fail_on_output() { + tee /dev/stderr | not read +} + +# Check to make sure it's safe to modify the user's git repo. +git status --porcelain | fail_on_output + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +go version + +if [[ "$1" = "-install" ]]; then + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd + if [[ -z "${VET_SKIP_PROTO}" ]]; then + if [[ "${TRAVIS}" = "true" ]]; then + PROTOBUF_VERSION=3.14.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=3.14.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/runner/go + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif not which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +# - Ensure all source files contain a copyright message. +not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' + +# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go + +# - Do not import x/net/context. +not git grep -l 'x/net/context' -- "*.go" + +# - Do not import math/rand for real library code. Use internal/grpcrand for +# thread safety. +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not call grpclog directly. Use grpclog.Component instead. +git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' + +# - Ensure all ptypes proto packages are renamed when importing. +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + +# - Ensure all xds proto imports are renamed to *pb or *grpc. +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' + +# - Check imports that are illegal in appengine (until Go 1.11). +# TODO: Remove when we drop Go 1.10 support +go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go + +misspell -error . + +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do + MOD_DIR=$(dirname ${MOD_FILE}) + pushd ${MOD_DIR} + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + + go mod tidy + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) + popd +done + +# - Collection of static analysis checks +# +# TODO(dfawley): don't use deprecated functions in examples or first-party +# plugins. +SC_OUT="$(mktemp)" +staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +# Error if anything other than deprecation warnings are printed. +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" +# Only ignore the following deprecated types/fields/functions. +not grep -Fv '.CredsBundle +.HeaderMap +.Metadata is deprecated: use Attributes +.NewAddress +.NewServiceConfig +.Type is deprecated: use Attributes +BuildVersion is deprecated +balancer.ErrTransientFailure +balancer.Picker +extDesc.Filename is deprecated +github.com/golang/protobuf/jsonpb is deprecated +grpc.CallCustomCodec +grpc.Code +grpc.Compressor +grpc.CustomCodec +grpc.Decompressor +grpc.MaxMsgSize +grpc.MethodConfig +grpc.NewGZIPCompressor +grpc.NewGZIPDecompressor +grpc.RPCCompressor +grpc.RPCDecompressor +grpc.ServiceConfig +grpc.WithBalancerName +grpc.WithCompressor +grpc.WithDecompressor +grpc.WithDialer +grpc.WithMaxMsgSize +grpc.WithServiceConfig +grpc.WithTimeout +http.CloseNotifier +info.SecurityVersion +proto is deprecated +proto.InternalMessageInfo is deprecated +proto.EnumName is deprecated +proto.ErrInternalBadWireType is deprecated +proto.FileDescriptor is deprecated +proto.Marshaler is deprecated +proto.MessageType is deprecated +proto.RegisterEnum is deprecated +proto.RegisterFile is deprecated +proto.RegisterType is deprecated +proto.RegisterExtension is deprecated +proto.RegisteredExtension is deprecated +proto.RegisteredExtensions is deprecated +proto.RegisterMapType is deprecated +proto.Unmarshaler is deprecated +resolver.Backend +resolver.GRPCLB +Target is deprecated: Use the Target field in the BuildOptions instead. +xxx_messageInfo_ +' "${SC_OUT}" + +# - special golint on package comments. +lint_package_comment_per_package() { + # Number of files in this go package. + fileCount=$(go list -f '{{len .GoFiles}}' $1) + if [ ${fileCount} -eq 0 ]; then + return 0 + fi + # Number of package errors generated by golint. + lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") + # golint complains about every file that's missing the package comment. If the + # number of files for this package is greater than the number of errors, there's + # at least one file with package comment, good. Otherwise, fail. + if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then + echo "Package $1 (with ${fileCount} files) is missing package comment" + return 1 + fi +} +lint_package_comment() { + set +ex + + count=0 + for i in $(go list ./...); do + lint_package_comment_per_package "$i" + ((count += $?)) + done + + set -ex + return $count +} +lint_package_comment + +echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS new file mode 100644 index 000000000..2b00ddba0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS new file mode 100644 index 000000000..1fbd3e976 --- /dev/null +++ b/vendor/google.golang.org/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/google.golang.org/protobuf/LICENSE b/vendor/google.golang.org/protobuf/LICENSE new file mode 100644 index 000000000..49ea0f928 --- /dev/null +++ b/vendor/google.golang.org/protobuf/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/protobuf/PATENTS b/vendor/google.golang.org/protobuf/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/google.golang.org/protobuf/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go new file mode 100644 index 000000000..369df13da --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go @@ -0,0 +1,168 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/encoding/protowire" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type fileInfo struct { + *protogen.File + + allEnums []*enumInfo + allMessages []*messageInfo + allExtensions []*extensionInfo + + allEnumsByPtr map[*enumInfo]int // value is index into allEnums + allMessagesByPtr map[*messageInfo]int // value is index into allMessages + allMessageFieldsByPtr map[*messageInfo]*structFields + + // needRawDesc specifies whether the generator should emit logic to provide + // the legacy raw descriptor in GZIP'd form. + // This is updated by enum and message generation logic as necessary, + // and checked at the end of file generation. + needRawDesc bool +} + +type structFields struct { + count int + unexported map[int]string +} + +func (sf *structFields) append(name string) { + if r, _ := utf8.DecodeRuneInString(name); !unicode.IsUpper(r) { + if sf.unexported == nil { + sf.unexported = make(map[int]string) + } + sf.unexported[sf.count] = name + } + sf.count++ +} + +func newFileInfo(file *protogen.File) *fileInfo { + f := &fileInfo{File: file} + + // Collect all enums, messages, and extensions in "flattened ordering". + // See filetype.TypeBuilder. + var walkMessages func([]*protogen.Message, func(*protogen.Message)) + walkMessages = func(messages []*protogen.Message, f func(*protogen.Message)) { + for _, m := range messages { + f(m) + walkMessages(m.Messages, f) + } + } + initEnumInfos := func(enums []*protogen.Enum) { + for _, enum := range enums { + f.allEnums = append(f.allEnums, newEnumInfo(f, enum)) + } + } + initMessageInfos := func(messages []*protogen.Message) { + for _, message := range messages { + f.allMessages = append(f.allMessages, newMessageInfo(f, message)) + } + } + initExtensionInfos := func(extensions []*protogen.Extension) { + for _, extension := range extensions { + f.allExtensions = append(f.allExtensions, newExtensionInfo(f, extension)) + } + } + initEnumInfos(f.Enums) + initMessageInfos(f.Messages) + initExtensionInfos(f.Extensions) + walkMessages(f.Messages, func(m *protogen.Message) { + initEnumInfos(m.Enums) + initMessageInfos(m.Messages) + initExtensionInfos(m.Extensions) + }) + + // Derive a reverse mapping of enum and message pointers to their index + // in allEnums and allMessages. + if len(f.allEnums) > 0 { + f.allEnumsByPtr = make(map[*enumInfo]int) + for i, e := range f.allEnums { + f.allEnumsByPtr[e] = i + } + } + if len(f.allMessages) > 0 { + f.allMessagesByPtr = make(map[*messageInfo]int) + f.allMessageFieldsByPtr = make(map[*messageInfo]*structFields) + for i, m := range f.allMessages { + f.allMessagesByPtr[m] = i + f.allMessageFieldsByPtr[m] = new(structFields) + } + } + + return f +} + +type enumInfo struct { + *protogen.Enum + + genJSONMethod bool + genRawDescMethod bool +} + +func newEnumInfo(f *fileInfo, enum *protogen.Enum) *enumInfo { + e := &enumInfo{Enum: enum} + e.genJSONMethod = true + e.genRawDescMethod = true + return e +} + +type messageInfo struct { + *protogen.Message + + genRawDescMethod bool + genExtRangeMethod bool + + isTracked bool + hasWeak bool +} + +func newMessageInfo(f *fileInfo, message *protogen.Message) *messageInfo { + m := &messageInfo{Message: message} + m.genRawDescMethod = true + m.genExtRangeMethod = true + m.isTracked = isTrackedMessage(m) + for _, field := range m.Fields { + m.hasWeak = m.hasWeak || field.Desc.IsWeak() + } + return m +} + +// isTrackedMessage reports whether field tracking is enabled on the message. +func isTrackedMessage(m *messageInfo) (tracked bool) { + const trackFieldUse_fieldNumber = 37383685 + + // Decode the option from unknown fields to avoid a dependency on the + // annotation proto from protoc-gen-go. + b := m.Desc.Options().(*descriptorpb.MessageOptions).ProtoReflect().GetUnknown() + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + if num == trackFieldUse_fieldNumber && typ == protowire.VarintType { + v, _ := protowire.ConsumeVarint(b) + tracked = protowire.DecodeBool(v) + } + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + return tracked +} + +type extensionInfo struct { + *protogen.Extension +} + +func newExtensionInfo(f *fileInfo, extension *protogen.Extension) *extensionInfo { + x := &extensionInfo{Extension: extension} + return x +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go new file mode 100644 index 000000000..d34efa9b1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go @@ -0,0 +1,884 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal_gengo is internal to the protobuf module. +package internal_gengo + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "math" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/version" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" + + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/pluginpb" +) + +// SupportedFeatures reports the set of supported protobuf language features. +var SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) + +// GenerateVersionMarkers specifies whether to generate version markers. +var GenerateVersionMarkers = true + +// Standard library dependencies. +const ( + base64Package = protogen.GoImportPath("encoding/base64") + mathPackage = protogen.GoImportPath("math") + reflectPackage = protogen.GoImportPath("reflect") + sortPackage = protogen.GoImportPath("sort") + stringsPackage = protogen.GoImportPath("strings") + syncPackage = protogen.GoImportPath("sync") + timePackage = protogen.GoImportPath("time") + utf8Package = protogen.GoImportPath("unicode/utf8") +) + +// Protobuf library dependencies. +// +// These are declared as an interface type so that they can be more easily +// patched to support unique build environments that impose restrictions +// on the dependencies of generated source code. +var ( + protoPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/proto") + protoifacePackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoiface") + protoimplPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoimpl") + protojsonPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/encoding/protojson") + protoreflectPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoreflect") + protoregistryPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoregistry") +) + +type goImportPath interface { + String() string + Ident(string) protogen.GoIdent +} + +// GenerateFile generates the contents of a .pb.go file. +func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { + filename := file.GeneratedFilenamePrefix + ".pb.go" + g := gen.NewGeneratedFile(filename, file.GoImportPath) + f := newFileInfo(file) + + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Syntax_field_number)) + genGeneratedHeader(gen, g, f) + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Package_field_number)) + + packageDoc := genPackageKnownComment(f) + g.P(packageDoc, "package ", f.GoPackageName) + g.P() + + // Emit a static check that enforces a minimum version of the proto package. + if GenerateVersionMarkers { + g.P("const (") + g.P("// Verify that this generated code is sufficiently up-to-date.") + g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimpl.GenVersion, " - ", protoimplPackage.Ident("MinVersion"), ")") + g.P("// Verify that runtime/protoimpl is sufficiently up-to-date.") + g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimplPackage.Ident("MaxVersion"), " - ", protoimpl.GenVersion, ")") + g.P(")") + g.P() + } + + for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ { + genImport(gen, g, f, imps.Get(i)) + } + for _, enum := range f.allEnums { + genEnum(g, f, enum) + } + for _, message := range f.allMessages { + genMessage(g, f, message) + } + genExtensions(g, f) + + genReflectFileDescriptor(gen, g, f) + + return g +} + +// genStandaloneComments prints all leading comments for a FileDescriptorProto +// location identified by the field number n. +func genStandaloneComments(g *protogen.GeneratedFile, f *fileInfo, n int32) { + loc := f.Desc.SourceLocations().ByPath(protoreflect.SourcePath{n}) + for _, s := range loc.LeadingDetachedComments { + g.P(protogen.Comments(s)) + g.P() + } + if s := loc.LeadingComments; s != "" { + g.P(protogen.Comments(s)) + g.P() + } +} + +func genGeneratedHeader(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") + + if GenerateVersionMarkers { + g.P("// versions:") + protocGenGoVersion := version.String() + protocVersion := "(unknown)" + if v := gen.Request.GetCompilerVersion(); v != nil { + protocVersion = fmt.Sprintf("v%v.%v.%v", v.GetMajor(), v.GetMinor(), v.GetPatch()) + if s := v.GetSuffix(); s != "" { + protocVersion += "-" + s + } + } + g.P("// \tprotoc-gen-go ", protocGenGoVersion) + g.P("// \tprotoc ", protocVersion) + } + + if f.Proto.GetOptions().GetDeprecated() { + g.P("// ", f.Desc.Path(), " is a deprecated file.") + } else { + g.P("// source: ", f.Desc.Path()) + } + g.P() +} + +func genImport(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo, imp protoreflect.FileImport) { + impFile, ok := gen.FilesByPath[imp.Path()] + if !ok { + return + } + if impFile.GoImportPath == f.GoImportPath { + // Don't generate imports or aliases for types in the same Go package. + return + } + // Generate imports for all non-weak dependencies, even if they are not + // referenced, because other code and tools depend on having the + // full transitive closure of protocol buffer types in the binary. + if !imp.IsWeak { + g.Import(impFile.GoImportPath) + } + if !imp.IsPublic { + return + } + + // Generate public imports by generating the imported file, parsing it, + // and extracting every symbol that should receive a forwarding declaration. + impGen := GenerateFile(gen, impFile) + impGen.Skip() + b, err := impGen.Content() + if err != nil { + gen.Error(err) + return + } + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, "", b, parser.ParseComments) + if err != nil { + gen.Error(err) + return + } + genForward := func(tok token.Token, name string, expr ast.Expr) { + // Don't import unexported symbols. + r, _ := utf8.DecodeRuneInString(name) + if !unicode.IsUpper(r) { + return + } + // Don't import the FileDescriptor. + if name == impFile.GoDescriptorIdent.GoName { + return + } + // Don't import decls referencing a symbol defined in another package. + // i.e., don't import decls which are themselves public imports: + // + // type T = somepackage.T + if _, ok := expr.(*ast.SelectorExpr); ok { + return + } + g.P(tok, " ", name, " = ", impFile.GoImportPath.Ident(name)) + } + g.P("// Symbols defined in public import of ", imp.Path(), ".") + g.P() + for _, decl := range astFile.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + genForward(decl.Tok, spec.Name.Name, spec.Type) + case *ast.ValueSpec: + for i, name := range spec.Names { + var expr ast.Expr + if i < len(spec.Values) { + expr = spec.Values[i] + } + genForward(decl.Tok, name.Name, expr) + } + case *ast.ImportSpec: + default: + panic(fmt.Sprintf("can't generate forward for spec type %T", spec)) + } + } + } + } + g.P() +} + +func genEnum(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) { + // Enum type declaration. + g.Annotate(e.GoIdent.GoName, e.Location) + leadingComments := appendDeprecationSuffix(e.Comments.Leading, + e.Desc.Options().(*descriptorpb.EnumOptions).GetDeprecated()) + g.P(leadingComments, + "type ", e.GoIdent, " int32") + + // Enum value constants. + g.P("const (") + for _, value := range e.Values { + g.Annotate(value.GoIdent.GoName, value.Location) + leadingComments := appendDeprecationSuffix(value.Comments.Leading, + value.Desc.Options().(*descriptorpb.EnumValueOptions).GetDeprecated()) + g.P(leadingComments, + value.GoIdent, " ", e.GoIdent, " = ", value.Desc.Number(), + trailingComment(value.Comments.Trailing)) + } + g.P(")") + g.P() + + // Enum value maps. + g.P("// Enum value maps for ", e.GoIdent, ".") + g.P("var (") + g.P(e.GoIdent.GoName+"_name", " = map[int32]string{") + for _, value := range e.Values { + duplicate := "" + if value.Desc != e.Desc.Values().ByNumber(value.Desc.Number()) { + duplicate = "// Duplicate value: " + } + g.P(duplicate, value.Desc.Number(), ": ", strconv.Quote(string(value.Desc.Name())), ",") + } + g.P("}") + g.P(e.GoIdent.GoName+"_value", " = map[string]int32{") + for _, value := range e.Values { + g.P(strconv.Quote(string(value.Desc.Name())), ": ", value.Desc.Number(), ",") + } + g.P("}") + g.P(")") + g.P() + + // Enum method. + // + // NOTE: A pointer value is needed to represent presence in proto2. + // Since a proto2 message can reference a proto3 enum, it is useful to + // always generate this method (even on proto3 enums) to support that case. + g.P("func (x ", e.GoIdent, ") Enum() *", e.GoIdent, " {") + g.P("p := new(", e.GoIdent, ")") + g.P("*p = x") + g.P("return p") + g.P("}") + g.P() + + // String method. + g.P("func (x ", e.GoIdent, ") String() string {") + g.P("return ", protoimplPackage.Ident("X"), ".EnumStringOf(x.Descriptor(), ", protoreflectPackage.Ident("EnumNumber"), "(x))") + g.P("}") + g.P() + + genEnumReflectMethods(g, f, e) + + // UnmarshalJSON method. + if e.genJSONMethod && e.Desc.Syntax() == protoreflect.Proto2 { + g.P("// Deprecated: Do not use.") + g.P("func (x *", e.GoIdent, ") UnmarshalJSON(b []byte) error {") + g.P("num, err := ", protoimplPackage.Ident("X"), ".UnmarshalJSONEnum(x.Descriptor(), b)") + g.P("if err != nil {") + g.P("return err") + g.P("}") + g.P("*x = ", e.GoIdent, "(num)") + g.P("return nil") + g.P("}") + g.P() + } + + // EnumDescriptor method. + if e.genRawDescMethod { + var indexes []string + for i := 1; i < len(e.Location.Path); i += 2 { + indexes = append(indexes, strconv.Itoa(int(e.Location.Path[i]))) + } + g.P("// Deprecated: Use ", e.GoIdent, ".Descriptor instead.") + g.P("func (", e.GoIdent, ") EnumDescriptor() ([]byte, []int) {") + g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}") + g.P("}") + g.P() + f.needRawDesc = true + } +} + +func genMessage(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + if m.Desc.IsMapEntry() { + return + } + + // Message type declaration. + g.Annotate(m.GoIdent.GoName, m.Location) + leadingComments := appendDeprecationSuffix(m.Comments.Leading, + m.Desc.Options().(*descriptorpb.MessageOptions).GetDeprecated()) + g.P(leadingComments, + "type ", m.GoIdent, " struct {") + genMessageFields(g, f, m) + g.P("}") + g.P() + + genMessageKnownFunctions(g, f, m) + genMessageDefaultDecls(g, f, m) + genMessageMethods(g, f, m) + genMessageOneofWrapperTypes(g, f, m) +} + +func genMessageFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + sf := f.allMessageFieldsByPtr[m] + genMessageInternalFields(g, f, m, sf) + for _, field := range m.Fields { + genMessageField(g, f, m, field, sf) + } +} + +func genMessageInternalFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, sf *structFields) { + g.P(genid.State_goname, " ", protoimplPackage.Ident("MessageState")) + sf.append(genid.State_goname) + g.P(genid.SizeCache_goname, " ", protoimplPackage.Ident("SizeCache")) + sf.append(genid.SizeCache_goname) + if m.hasWeak { + g.P(genid.WeakFields_goname, " ", protoimplPackage.Ident("WeakFields")) + sf.append(genid.WeakFields_goname) + } + g.P(genid.UnknownFields_goname, " ", protoimplPackage.Ident("UnknownFields")) + sf.append(genid.UnknownFields_goname) + if m.Desc.ExtensionRanges().Len() > 0 { + g.P(genid.ExtensionFields_goname, " ", protoimplPackage.Ident("ExtensionFields")) + sf.append(genid.ExtensionFields_goname) + } + if sf.count > 0 { + g.P() + } +} + +func genMessageField(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field, sf *structFields) { + if oneof := field.Oneof; oneof != nil && !oneof.Desc.IsSynthetic() { + // It would be a bit simpler to iterate over the oneofs below, + // but generating the field here keeps the contents of the Go + // struct in the same order as the contents of the source + // .proto file. + if oneof.Fields[0] != field { + return // only generate for first appearance + } + + tags := structTags{ + {"protobuf_oneof", string(oneof.Desc.Name())}, + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + + g.Annotate(m.GoIdent.GoName+"."+oneof.GoName, oneof.Location) + leadingComments := oneof.Comments.Leading + if leadingComments != "" { + leadingComments += "\n" + } + ss := []string{fmt.Sprintf(" Types that are assignable to %s:\n", oneof.GoName)} + for _, field := range oneof.Fields { + ss = append(ss, "\t*"+field.GoIdent.GoName+"\n") + } + leadingComments += protogen.Comments(strings.Join(ss, "")) + g.P(leadingComments, + oneof.GoName, " ", oneofInterfaceName(oneof), tags) + sf.append(oneof.GoName) + return + } + goType, pointer := fieldGoType(g, f, field) + if pointer { + goType = "*" + goType + } + tags := structTags{ + {"protobuf", fieldProtobufTagValue(field)}, + {"json", fieldJSONTagValue(field)}, + } + if field.Desc.IsMap() { + key := field.Message.Fields[0] + val := field.Message.Fields[1] + tags = append(tags, structTags{ + {"protobuf_key", fieldProtobufTagValue(key)}, + {"protobuf_val", fieldProtobufTagValue(val)}, + }...) + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + + name := field.GoName + if field.Desc.IsWeak() { + name = genid.WeakFieldPrefix_goname + name + } + g.Annotate(m.GoIdent.GoName+"."+name, field.Location) + leadingComments := appendDeprecationSuffix(field.Comments.Leading, + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + name, " ", goType, tags, + trailingComment(field.Comments.Trailing)) + sf.append(field.GoName) +} + +// genMessageDefaultDecls generates consts and vars holding the default +// values of fields. +func genMessageDefaultDecls(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + var consts, vars []string + for _, field := range m.Fields { + if !field.Desc.HasDefault() { + continue + } + name := "Default_" + m.GoIdent.GoName + "_" + field.GoName + goType, _ := fieldGoType(g, f, field) + defVal := field.Desc.Default() + switch field.Desc.Kind() { + case protoreflect.StringKind: + consts = append(consts, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.String())) + case protoreflect.BytesKind: + vars = append(vars, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.Bytes())) + case protoreflect.EnumKind: + idx := field.Desc.DefaultEnumValue().Index() + val := field.Enum.Values[idx] + if val.GoIdent.GoImportPath == f.GoImportPath { + consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent))) + } else { + // If the enum value is declared in a different Go package, + // reference it by number since the name may not be correct. + // See https://github.com/golang/protobuf/issues/513. + consts = append(consts, fmt.Sprintf("%s = %s(%d) // %s", + name, g.QualifiedGoIdent(field.Enum.GoIdent), val.Desc.Number(), g.QualifiedGoIdent(val.GoIdent))) + } + case protoreflect.FloatKind, protoreflect.DoubleKind: + if f := defVal.Float(); math.IsNaN(f) || math.IsInf(f, 0) { + var fn, arg string + switch f := defVal.Float(); { + case math.IsInf(f, -1): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "-1" + case math.IsInf(f, +1): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "+1" + case math.IsNaN(f): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("NaN")), "" + } + vars = append(vars, fmt.Sprintf("%s = %s(%s(%s))", name, goType, fn, arg)) + } else { + consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, f)) + } + default: + consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, defVal.Interface())) + } + } + if len(consts) > 0 { + g.P("// Default values for ", m.GoIdent, " fields.") + g.P("const (") + for _, s := range consts { + g.P(s) + } + g.P(")") + } + if len(vars) > 0 { + g.P("// Default values for ", m.GoIdent, " fields.") + g.P("var (") + for _, s := range vars { + g.P(s) + } + g.P(")") + } + g.P() +} + +func genMessageMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + genMessageBaseMethods(g, f, m) + genMessageGetterMethods(g, f, m) + genMessageSetterMethods(g, f, m) +} + +func genMessageBaseMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + // Reset method. + g.P("func (x *", m.GoIdent, ") Reset() {") + g.P("*x = ", m.GoIdent, "{}") + g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " {") + g.P("mi := &", messageTypesVarName(f), "[", f.allMessagesByPtr[m], "]") + g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))") + g.P("ms.StoreMessageInfo(mi)") + g.P("}") + g.P("}") + g.P() + + // String method. + g.P("func (x *", m.GoIdent, ") String() string {") + g.P("return ", protoimplPackage.Ident("X"), ".MessageStringOf(x)") + g.P("}") + g.P() + + // ProtoMessage method. + g.P("func (*", m.GoIdent, ") ProtoMessage() {}") + g.P() + + // ProtoReflect method. + genMessageReflectMethods(g, f, m) + + // Descriptor method. + if m.genRawDescMethod { + var indexes []string + for i := 1; i < len(m.Location.Path); i += 2 { + indexes = append(indexes, strconv.Itoa(int(m.Location.Path[i]))) + } + g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor instead.") + g.P("func (*", m.GoIdent, ") Descriptor() ([]byte, []int) {") + g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}") + g.P("}") + g.P() + f.needRawDesc = true + } +} + +func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, field := range m.Fields { + genNoInterfacePragma(g, m.isTracked) + + // Getter for parent oneof. + if oneof := field.Oneof; oneof != nil && oneof.Fields[0] == field && !oneof.Desc.IsSynthetic() { + g.Annotate(m.GoIdent.GoName+".Get"+oneof.GoName, oneof.Location) + g.P("func (m *", m.GoIdent.GoName, ") Get", oneof.GoName, "() ", oneofInterfaceName(oneof), " {") + g.P("if m != nil {") + g.P("return m.", oneof.GoName) + g.P("}") + g.P("return nil") + g.P("}") + g.P() + } + + // Getter for message field. + goType, pointer := fieldGoType(g, f, field) + defaultValue := fieldDefaultValue(g, f, m, field) + g.Annotate(m.GoIdent.GoName+".Get"+field.GoName, field.Location) + leadingComments := appendDeprecationSuffix("", + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + switch { + case field.Desc.IsWeak(): + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", protoPackage.Ident("Message"), "{") + g.P("var w ", protoimplPackage.Ident("WeakFields")) + g.P("if x != nil {") + g.P("w = x.", genid.WeakFields_goname) + if m.isTracked { + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) + } + g.P("}") + g.P("return ", protoimplPackage.Ident("X"), ".GetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ")") + g.P("}") + case field.Oneof != nil && !field.Oneof.Desc.IsSynthetic(): + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {") + g.P("if x, ok := x.Get", field.Oneof.GoName, "().(*", field.GoIdent, "); ok {") + g.P("return x.", field.GoName) + g.P("}") + g.P("return ", defaultValue) + g.P("}") + default: + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {") + if !field.Desc.HasPresence() || defaultValue == "nil" { + g.P("if x != nil {") + } else { + g.P("if x != nil && x.", field.GoName, " != nil {") + } + star := "" + if pointer { + star = "*" + } + g.P("return ", star, " x.", field.GoName) + g.P("}") + g.P("return ", defaultValue) + g.P("}") + } + g.P() + } +} + +func genMessageSetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, field := range m.Fields { + if !field.Desc.IsWeak() { + continue + } + + genNoInterfacePragma(g, m.isTracked) + + g.Annotate(m.GoIdent.GoName+".Set"+field.GoName, field.Location) + leadingComments := appendDeprecationSuffix("", + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, "func (x *", m.GoIdent, ") Set", field.GoName, "(v ", protoPackage.Ident("Message"), ") {") + g.P("var w *", protoimplPackage.Ident("WeakFields")) + g.P("if x != nil {") + g.P("w = &x.", genid.WeakFields_goname) + if m.isTracked { + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) + } + g.P("}") + g.P(protoimplPackage.Ident("X"), ".SetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ", v)") + g.P("}") + g.P() + } +} + +// fieldGoType returns the Go type used for a field. +// +// If it returns pointer=true, the struct field is a pointer to the type. +func fieldGoType(g *protogen.GeneratedFile, f *fileInfo, field *protogen.Field) (goType string, pointer bool) { + if field.Desc.IsWeak() { + return "struct{}", false + } + + pointer = field.Desc.HasPresence() + switch field.Desc.Kind() { + case protoreflect.BoolKind: + goType = "bool" + case protoreflect.EnumKind: + goType = g.QualifiedGoIdent(field.Enum.GoIdent) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + goType = "int32" + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + goType = "uint32" + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + goType = "int64" + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + goType = "uint64" + case protoreflect.FloatKind: + goType = "float32" + case protoreflect.DoubleKind: + goType = "float64" + case protoreflect.StringKind: + goType = "string" + case protoreflect.BytesKind: + goType = "[]byte" + pointer = false // rely on nullability of slices for presence + case protoreflect.MessageKind, protoreflect.GroupKind: + goType = "*" + g.QualifiedGoIdent(field.Message.GoIdent) + pointer = false // pointer captured as part of the type + } + switch { + case field.Desc.IsList(): + return "[]" + goType, false + case field.Desc.IsMap(): + keyType, _ := fieldGoType(g, f, field.Message.Fields[0]) + valType, _ := fieldGoType(g, f, field.Message.Fields[1]) + return fmt.Sprintf("map[%v]%v", keyType, valType), false + } + return goType, pointer +} + +func fieldProtobufTagValue(field *protogen.Field) string { + var enumName string + if field.Desc.Kind() == protoreflect.EnumKind { + enumName = protoimpl.X.LegacyEnumName(field.Enum.Desc) + } + return tag.Marshal(field.Desc, enumName) +} + +func fieldDefaultValue(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field) string { + if field.Desc.IsList() { + return "nil" + } + if field.Desc.HasDefault() { + defVarName := "Default_" + m.GoIdent.GoName + "_" + field.GoName + if field.Desc.Kind() == protoreflect.BytesKind { + return "append([]byte(nil), " + defVarName + "...)" + } + return defVarName + } + switch field.Desc.Kind() { + case protoreflect.BoolKind: + return "false" + case protoreflect.StringKind: + return `""` + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.BytesKind: + return "nil" + case protoreflect.EnumKind: + val := field.Enum.Values[0] + if val.GoIdent.GoImportPath == f.GoImportPath { + return g.QualifiedGoIdent(val.GoIdent) + } else { + // If the enum value is declared in a different Go package, + // reference it by number since the name may not be correct. + // See https://github.com/golang/protobuf/issues/513. + return g.QualifiedGoIdent(field.Enum.GoIdent) + "(" + strconv.FormatInt(int64(val.Desc.Number()), 10) + ")" + } + default: + return "0" + } +} + +func fieldJSONTagValue(field *protogen.Field) string { + return string(field.Desc.Name()) + ",omitempty" +} + +func genExtensions(g *protogen.GeneratedFile, f *fileInfo) { + if len(f.allExtensions) == 0 { + return + } + + g.P("var ", extensionTypesVarName(f), " = []", protoimplPackage.Ident("ExtensionInfo"), "{") + for _, x := range f.allExtensions { + g.P("{") + g.P("ExtendedType: (*", x.Extendee.GoIdent, ")(nil),") + goType, pointer := fieldGoType(g, f, x.Extension) + if pointer { + goType = "*" + goType + } + g.P("ExtensionType: (", goType, ")(nil),") + g.P("Field: ", x.Desc.Number(), ",") + g.P("Name: ", strconv.Quote(string(x.Desc.FullName())), ",") + g.P("Tag: ", strconv.Quote(fieldProtobufTagValue(x.Extension)), ",") + g.P("Filename: ", strconv.Quote(f.Desc.Path()), ",") + g.P("},") + } + g.P("}") + g.P() + + // Group extensions by the target message. + var orderedTargets []protogen.GoIdent + allExtensionsByTarget := make(map[protogen.GoIdent][]*extensionInfo) + allExtensionsByPtr := make(map[*extensionInfo]int) + for i, x := range f.allExtensions { + target := x.Extendee.GoIdent + if len(allExtensionsByTarget[target]) == 0 { + orderedTargets = append(orderedTargets, target) + } + allExtensionsByTarget[target] = append(allExtensionsByTarget[target], x) + allExtensionsByPtr[x] = i + } + for _, target := range orderedTargets { + g.P("// Extension fields to ", target, ".") + g.P("var (") + for _, x := range allExtensionsByTarget[target] { + xd := x.Desc + typeName := xd.Kind().String() + switch xd.Kind() { + case protoreflect.EnumKind: + typeName = string(xd.Enum().FullName()) + case protoreflect.MessageKind, protoreflect.GroupKind: + typeName = string(xd.Message().FullName()) + } + fieldName := string(xd.Name()) + + leadingComments := x.Comments.Leading + if leadingComments != "" { + leadingComments += "\n" + } + leadingComments += protogen.Comments(fmt.Sprintf(" %v %v %v = %v;\n", + xd.Cardinality(), typeName, fieldName, xd.Number())) + leadingComments = appendDeprecationSuffix(leadingComments, + x.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + "E_", x.GoIdent, " = &", extensionTypesVarName(f), "[", allExtensionsByPtr[x], "]", + trailingComment(x.Comments.Trailing)) + } + g.P(")") + g.P() + } +} + +// genMessageOneofWrapperTypes generates the oneof wrapper types and +// associates the types with the parent message type. +func genMessageOneofWrapperTypes(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, oneof := range m.Oneofs { + if oneof.Desc.IsSynthetic() { + continue + } + ifName := oneofInterfaceName(oneof) + g.P("type ", ifName, " interface {") + g.P(ifName, "()") + g.P("}") + g.P() + for _, field := range oneof.Fields { + g.Annotate(field.GoIdent.GoName, field.Location) + g.Annotate(field.GoIdent.GoName+"."+field.GoName, field.Location) + g.P("type ", field.GoIdent, " struct {") + goType, _ := fieldGoType(g, f, field) + tags := structTags{ + {"protobuf", fieldProtobufTagValue(field)}, + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + leadingComments := appendDeprecationSuffix(field.Comments.Leading, + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + field.GoName, " ", goType, tags, + trailingComment(field.Comments.Trailing)) + g.P("}") + g.P() + } + for _, field := range oneof.Fields { + g.P("func (*", field.GoIdent, ") ", ifName, "() {}") + g.P() + } + } +} + +// oneofInterfaceName returns the name of the interface type implemented by +// the oneof field value types. +func oneofInterfaceName(oneof *protogen.Oneof) string { + return "is" + oneof.GoIdent.GoName +} + +// genNoInterfacePragma generates a standalone "nointerface" pragma to +// decorate methods with field-tracking support. +func genNoInterfacePragma(g *protogen.GeneratedFile, tracked bool) { + if tracked { + g.P("//go:nointerface") + g.P() + } +} + +var gotrackTags = structTags{{"go", "track"}} + +// structTags is a data structure for build idiomatic Go struct tags. +// Each [2]string is a key-value pair, where value is the unescaped string. +// +// Example: structTags{{"key", "value"}}.String() -> `key:"value"` +type structTags [][2]string + +func (tags structTags) String() string { + if len(tags) == 0 { + return "" + } + var ss []string + for _, tag := range tags { + // NOTE: When quoting the value, we need to make sure the backtick + // character does not appear. Convert all cases to the escaped hex form. + key := tag[0] + val := strings.Replace(strconv.Quote(tag[1]), "`", `\x60`, -1) + ss = append(ss, fmt.Sprintf("%s:%s", key, val)) + } + return "`" + strings.Join(ss, " ") + "`" +} + +// appendDeprecationSuffix optionally appends a deprecation notice as a suffix. +func appendDeprecationSuffix(prefix protogen.Comments, deprecated bool) protogen.Comments { + if !deprecated { + return prefix + } + if prefix != "" { + prefix += "\n" + } + return prefix + " Deprecated: Do not use.\n" +} + +// trailingComment is like protogen.Comments, but lacks a trailing newline. +type trailingComment protogen.Comments + +func (c trailingComment) String() string { + s := strings.TrimSuffix(protogen.Comments(c).String(), "\n") + if strings.Contains(s, "\n") { + // We don't support multi-lined trailing comments as it is unclear + // how to best render them in the generated code. + return "" + } + return s +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go new file mode 100644 index 000000000..1319a1267 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go @@ -0,0 +1,351 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func genReflectFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + g.P("var ", f.GoDescriptorIdent, " ", protoreflectPackage.Ident("FileDescriptor")) + g.P() + + genFileDescriptor(gen, g, f) + if len(f.allEnums) > 0 { + g.P("var ", enumTypesVarName(f), " = make([]", protoimplPackage.Ident("EnumInfo"), ",", len(f.allEnums), ")") + } + if len(f.allMessages) > 0 { + g.P("var ", messageTypesVarName(f), " = make([]", protoimplPackage.Ident("MessageInfo"), ",", len(f.allMessages), ")") + } + + // Generate a unique list of Go types for all declarations and dependencies, + // and the associated index into the type list for all dependencies. + var goTypes []string + var depIdxs []string + seen := map[protoreflect.FullName]int{} + genDep := func(name protoreflect.FullName, depSource string) { + if depSource != "" { + line := fmt.Sprintf("%d, // %d: %s -> %s", seen[name], len(depIdxs), depSource, name) + depIdxs = append(depIdxs, line) + } + } + genEnum := func(e *protogen.Enum, depSource string) { + if e != nil { + name := e.Desc.FullName() + if _, ok := seen[name]; !ok { + line := fmt.Sprintf("(%s)(0), // %d: %s", g.QualifiedGoIdent(e.GoIdent), len(goTypes), name) + goTypes = append(goTypes, line) + seen[name] = len(seen) + } + if depSource != "" { + genDep(name, depSource) + } + } + } + genMessage := func(m *protogen.Message, depSource string) { + if m != nil { + name := m.Desc.FullName() + if _, ok := seen[name]; !ok { + line := fmt.Sprintf("(*%s)(nil), // %d: %s", g.QualifiedGoIdent(m.GoIdent), len(goTypes), name) + if m.Desc.IsMapEntry() { + // Map entry messages have no associated Go type. + line = fmt.Sprintf("nil, // %d: %s", len(goTypes), name) + } + goTypes = append(goTypes, line) + seen[name] = len(seen) + } + if depSource != "" { + genDep(name, depSource) + } + } + } + + // This ordering is significant. + // See filetype.TypeBuilder.DependencyIndexes. + type offsetEntry struct { + start int + name string + } + var depOffsets []offsetEntry + for _, enum := range f.allEnums { + genEnum(enum.Enum, "") + } + for _, message := range f.allMessages { + genMessage(message.Message, "") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "field type_name"}) + for _, message := range f.allMessages { + for _, field := range message.Fields { + if field.Desc.IsWeak() { + continue + } + source := string(field.Desc.FullName()) + genEnum(field.Enum, source+":type_name") + genMessage(field.Message, source+":type_name") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension extendee"}) + for _, extension := range f.allExtensions { + source := string(extension.Desc.FullName()) + genMessage(extension.Extendee, source+":extendee") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension type_name"}) + for _, extension := range f.allExtensions { + source := string(extension.Desc.FullName()) + genEnum(extension.Enum, source+":type_name") + genMessage(extension.Message, source+":type_name") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method input_type"}) + for _, service := range f.Services { + for _, method := range service.Methods { + source := string(method.Desc.FullName()) + genMessage(method.Input, source+":input_type") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method output_type"}) + for _, service := range f.Services { + for _, method := range service.Methods { + source := string(method.Desc.FullName()) + genMessage(method.Output, source+":output_type") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), ""}) + for i := len(depOffsets) - 2; i >= 0; i-- { + curr, next := depOffsets[i], depOffsets[i+1] + depIdxs = append(depIdxs, fmt.Sprintf("%d, // [%d:%d] is the sub-list for %s", + curr.start, curr.start, next.start, curr.name)) + } + if len(depIdxs) > math.MaxInt32 { + panic("too many dependencies") // sanity check + } + + g.P("var ", goTypesVarName(f), " = []interface{}{") + for _, s := range goTypes { + g.P(s) + } + g.P("}") + + g.P("var ", depIdxsVarName(f), " = []int32{") + for _, s := range depIdxs { + g.P(s) + } + g.P("}") + + g.P("func init() { ", initFuncName(f.File), "() }") + + g.P("func ", initFuncName(f.File), "() {") + g.P("if ", f.GoDescriptorIdent, " != nil {") + g.P("return") + g.P("}") + + // Ensure that initialization functions for different files in the same Go + // package run in the correct order: Call the init funcs for every .proto file + // imported by this one that is in the same Go package. + for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ { + impFile := gen.FilesByPath[imps.Get(i).Path()] + if impFile.GoImportPath != f.GoImportPath { + continue + } + g.P(initFuncName(impFile), "()") + } + + if len(f.allMessages) > 0 { + // Populate MessageInfo.Exporters. + g.P("if !", protoimplPackage.Ident("UnsafeEnabled"), " {") + for _, message := range f.allMessages { + if sf := f.allMessageFieldsByPtr[message]; len(sf.unexported) > 0 { + idx := f.allMessagesByPtr[message] + typesVar := messageTypesVarName(f) + + g.P(typesVar, "[", idx, "].Exporter = func(v interface{}, i int) interface{} {") + g.P("switch v := v.(*", message.GoIdent, "); i {") + for i := 0; i < sf.count; i++ { + if name := sf.unexported[i]; name != "" { + g.P("case ", i, ": return &v.", name) + } + } + g.P("default: return nil") + g.P("}") + g.P("}") + } + } + g.P("}") + + // Populate MessageInfo.OneofWrappers. + for _, message := range f.allMessages { + if len(message.Oneofs) > 0 { + idx := f.allMessagesByPtr[message] + typesVar := messageTypesVarName(f) + + // Associate the wrapper types by directly passing them to the MessageInfo. + g.P(typesVar, "[", idx, "].OneofWrappers = []interface{} {") + for _, oneof := range message.Oneofs { + if !oneof.Desc.IsSynthetic() { + for _, field := range oneof.Fields { + g.P("(*", field.GoIdent, ")(nil),") + } + } + } + g.P("}") + } + } + } + + g.P("type x struct{}") + g.P("out := ", protoimplPackage.Ident("TypeBuilder"), "{") + g.P("File: ", protoimplPackage.Ident("DescBuilder"), "{") + g.P("GoPackagePath: ", reflectPackage.Ident("TypeOf"), "(x{}).PkgPath(),") + g.P("RawDescriptor: ", rawDescVarName(f), ",") + g.P("NumEnums: ", len(f.allEnums), ",") + g.P("NumMessages: ", len(f.allMessages), ",") + g.P("NumExtensions: ", len(f.allExtensions), ",") + g.P("NumServices: ", len(f.Services), ",") + g.P("},") + g.P("GoTypes: ", goTypesVarName(f), ",") + g.P("DependencyIndexes: ", depIdxsVarName(f), ",") + if len(f.allEnums) > 0 { + g.P("EnumInfos: ", enumTypesVarName(f), ",") + } + if len(f.allMessages) > 0 { + g.P("MessageInfos: ", messageTypesVarName(f), ",") + } + if len(f.allExtensions) > 0 { + g.P("ExtensionInfos: ", extensionTypesVarName(f), ",") + } + g.P("}.Build()") + g.P(f.GoDescriptorIdent, " = out.File") + + // Set inputs to nil to allow GC to reclaim resources. + g.P(rawDescVarName(f), " = nil") + g.P(goTypesVarName(f), " = nil") + g.P(depIdxsVarName(f), " = nil") + g.P("}") +} + +func genFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + descProto := proto.Clone(f.Proto).(*descriptorpb.FileDescriptorProto) + descProto.SourceCodeInfo = nil // drop source code information + + b, err := proto.MarshalOptions{AllowPartial: true, Deterministic: true}.Marshal(descProto) + if err != nil { + gen.Error(err) + return + } + + g.P("var ", rawDescVarName(f), " = []byte{") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.P("}") + g.P() + + if f.needRawDesc { + onceVar := rawDescVarName(f) + "Once" + dataVar := rawDescVarName(f) + "Data" + g.P("var (") + g.P(onceVar, " ", syncPackage.Ident("Once")) + g.P(dataVar, " = ", rawDescVarName(f)) + g.P(")") + g.P() + + g.P("func ", rawDescVarName(f), "GZIP() []byte {") + g.P(onceVar, ".Do(func() {") + g.P(dataVar, " = ", protoimplPackage.Ident("X"), ".CompressGZIP(", dataVar, ")") + g.P("})") + g.P("return ", dataVar) + g.P("}") + g.P() + } +} + +func genEnumReflectMethods(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) { + idx := f.allEnumsByPtr[e] + typesVar := enumTypesVarName(f) + + // Descriptor method. + g.P("func (", e.GoIdent, ") Descriptor() ", protoreflectPackage.Ident("EnumDescriptor"), " {") + g.P("return ", typesVar, "[", idx, "].Descriptor()") + g.P("}") + g.P() + + // Type method. + g.P("func (", e.GoIdent, ") Type() ", protoreflectPackage.Ident("EnumType"), " {") + g.P("return &", typesVar, "[", idx, "]") + g.P("}") + g.P() + + // Number method. + g.P("func (x ", e.GoIdent, ") Number() ", protoreflectPackage.Ident("EnumNumber"), " {") + g.P("return ", protoreflectPackage.Ident("EnumNumber"), "(x)") + g.P("}") + g.P() +} + +func genMessageReflectMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + idx := f.allMessagesByPtr[m] + typesVar := messageTypesVarName(f) + + // ProtoReflect method. + g.P("func (x *", m.GoIdent, ") ProtoReflect() ", protoreflectPackage.Ident("Message"), " {") + g.P("mi := &", typesVar, "[", idx, "]") + g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " && x != nil {") + g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))") + g.P("if ms.LoadMessageInfo() == nil {") + g.P("ms.StoreMessageInfo(mi)") + g.P("}") + g.P("return ms") + g.P("}") + g.P("return mi.MessageOf(x)") + g.P("}") + g.P() +} + +func fileVarName(f *protogen.File, suffix string) string { + prefix := f.GoDescriptorIdent.GoName + _, n := utf8.DecodeRuneInString(prefix) + prefix = strings.ToLower(prefix[:n]) + prefix[n:] + return prefix + "_" + suffix +} +func rawDescVarName(f *fileInfo) string { + return fileVarName(f.File, "rawDesc") +} +func goTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "goTypes") +} +func depIdxsVarName(f *fileInfo) string { + return fileVarName(f.File, "depIdxs") +} +func enumTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "enumTypes") +} +func messageTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "msgTypes") +} +func extensionTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "extTypes") +} +func initFuncName(f *protogen.File) string { + return fileVarName(f, "init") +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go new file mode 100644 index 000000000..dbaa529ca --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go @@ -0,0 +1,1080 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "strings" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/genid" +) + +// Specialized support for well-known types are hard-coded into the generator +// as opposed to being injected in adjacent .go sources in the generated package +// in order to support specialized build systems like Bazel that always generate +// dynamically from the source .proto files. + +func genPackageKnownComment(f *fileInfo) protogen.Comments { + switch f.Desc.Path() { + case genid.File_google_protobuf_any_proto: + return ` Package anypb contains generated types for ` + genid.File_google_protobuf_any_proto + `. + + The Any message is a dynamic representation of any other message value. + It is functionally a tuple of the full name of the remote message type and + the serialized bytes of the remote message value. + + + Constructing an Any + + An Any message containing another message value is constructed using New: + + any, err := anypb.New(m) + if err != nil { + ... // handle error + } + ... // make use of any + + + Unmarshaling an Any + + With a populated Any message, the underlying message can be serialized into + a remote concrete message value in a few ways. + + If the exact concrete type is known, then a new (or pre-existing) instance + of that message can be passed to the UnmarshalTo method: + + m := new(foopb.MyMessage) + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + + If the exact concrete type is not known, then the UnmarshalNew method can be + used to unmarshal the contents into a new instance of the remote message type: + + m, err := any.UnmarshalNew() + if err != nil { + ... // handle error + } + ... // make use of m + + UnmarshalNew uses the global type registry to resolve the message type and + construct a new instance of that message to unmarshal into. In order for a + message type to appear in the global registry, the Go type representing that + protobuf message type must be linked into the Go binary. For messages + generated by protoc-gen-go, this is achieved through an import of the + generated Go package representing a .proto file. + + A common pattern with UnmarshalNew is to use a type switch with the resulting + proto.Message value: + + switch m := m.(type) { + case *foopb.MyMessage: + ... // make use of m as a *foopb.MyMessage + case *barpb.OtherMessage: + ... // make use of m as a *barpb.OtherMessage + case *bazpb.SomeMessage: + ... // make use of m as a *bazpb.SomeMessage + } + + This pattern ensures that the generated packages containing the message types + listed in the case clauses are linked into the Go binary and therefore also + registered in the global registry. + + + Type checking an Any + + In order to type check whether an Any message represents some other message, + then use the MessageIs method: + + if any.MessageIs((*foopb.MyMessage)(nil)) { + ... // make use of any, knowing that it contains a foopb.MyMessage + } + + The MessageIs method can also be used with an allocated instance of the target + message type if the intention is to unmarshal into it if the type matches: + + m := new(foopb.MyMessage) + if any.MessageIs(m) { + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + } + +` + case genid.File_google_protobuf_timestamp_proto: + return ` Package timestamppb contains generated types for ` + genid.File_google_protobuf_timestamp_proto + `. + + The Timestamp message represents a timestamp, + an instant in time since the Unix epoch (January 1st, 1970). + + + Conversion to a Go Time + + The AsTime method can be used to convert a Timestamp message to a + standard Go time.Time value in UTC: + + t := ts.AsTime() + ... // make use of t as a time.Time + + Converting to a time.Time is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsTime method performs the conversion on a best-effort basis. Timestamps + with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) + are normalized during the conversion to a time.Time. To manually check for + invalid Timestamps per the documented limitations in timestamp.proto, + additionally call the CheckValid method: + + if err := ts.CheckValid(); err != nil { + ... // handle error + } + + + Conversion from a Go Time + + The timestamppb.New function can be used to construct a Timestamp message + from a standard Go time.Time value: + + ts := timestamppb.New(t) + ... // make use of ts as a *timestamppb.Timestamp + + In order to construct a Timestamp representing the current time, use Now: + + ts := timestamppb.Now() + ... // make use of ts as a *timestamppb.Timestamp + +` + case genid.File_google_protobuf_duration_proto: + return ` Package durationpb contains generated types for ` + genid.File_google_protobuf_duration_proto + `. + + The Duration message represents a signed span of time. + + + Conversion to a Go Duration + + The AsDuration method can be used to convert a Duration message to a + standard Go time.Duration value: + + d := dur.AsDuration() + ... // make use of d as a time.Duration + + Converting to a time.Duration is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsDuration method performs the conversion on a best-effort basis. + Durations with denormal values (e.g., nanoseconds beyond -99999999 and + +99999999, inclusive; or seconds and nanoseconds with opposite signs) + are normalized during the conversion to a time.Duration. To manually check for + invalid Duration per the documented limitations in duration.proto, + additionally call the CheckValid method: + + if err := dur.CheckValid(); err != nil { + ... // handle error + } + + Note that the documented limitations in duration.proto does not protect a + Duration from overflowing the representable range of a time.Duration in Go. + The AsDuration method uses saturation arithmetic such that an overflow clamps + the resulting value to the closest representable value (e.g., math.MaxInt64 + for positive overflow and math.MinInt64 for negative overflow). + + + Conversion from a Go Duration + + The durationpb.New function can be used to construct a Duration message + from a standard Go time.Duration value: + + dur := durationpb.New(d) + ... // make use of d as a *durationpb.Duration + +` + case genid.File_google_protobuf_struct_proto: + return ` Package structpb contains generated types for ` + genid.File_google_protobuf_struct_proto + `. + + The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are + used to represent arbitrary JSON. The Value message represents a JSON value, + the Struct message represents a JSON object, and the ListValue message + represents a JSON array. See https://json.org for more information. + + The Value, Struct, and ListValue types have generated MarshalJSON and + UnmarshalJSON methods such that they serialize JSON equivalent to what the + messages themselves represent. Use of these types with the + "google.golang.org/protobuf/encoding/protojson" package + ensures that they will be serialized as their JSON equivalent. + + + Conversion to and from a Go interface + + The standard Go "encoding/json" package has functionality to serialize + arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and + ListValue.AsSlice methods can convert the protobuf message representation into + a form represented by interface{}, map[string]interface{}, and []interface{}. + This form can be used with other packages that operate on such data structures + and also directly with the standard json package. + + In order to convert the interface{}, map[string]interface{}, and []interface{} + forms back as Value, Struct, and ListValue messages, use the NewStruct, + NewList, and NewValue constructor functions. + + + Example usage + + Consider the following example JSON object: + + { + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": { + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100" + }, + "phoneNumbers": [ + { + "type": "home", + "number": "212 555-1234" + }, + { + "type": "office", + "number": "646 555-4567" + } + ], + "children": [], + "spouse": null + } + + To construct a Value message representing the above JSON object: + + m, err := structpb.NewValue(map[string]interface{}{ + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": map[string]interface{}{ + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100", + }, + "phoneNumbers": []interface{}{ + map[string]interface{}{ + "type": "home", + "number": "212 555-1234", + }, + map[string]interface{}{ + "type": "office", + "number": "646 555-4567", + }, + }, + "children": []interface{}{}, + "spouse": nil, + }) + if err != nil { + ... // handle error + } + ... // make use of m as a *structpb.Value + +` + case genid.File_google_protobuf_field_mask_proto: + return ` Package fieldmaskpb contains generated types for ` + genid.File_google_protobuf_field_mask_proto + `. + + The FieldMask message represents a set of symbolic field paths. + The paths are specific to some target message type, + which is not stored within the FieldMask message itself. + + + Constructing a FieldMask + + The New function is used construct a FieldMask: + + var messageType *descriptorpb.DescriptorProto + fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") + if err != nil { + ... // handle error + } + ... // make use of fm + + The "field.name" and "field.number" paths are valid paths according to the + google.protobuf.DescriptorProto message. Use of a path that does not correlate + to valid fields reachable from DescriptorProto would result in an error. + + Once a FieldMask message has been constructed, + the Append method can be used to insert additional paths to the path set: + + var messageType *descriptorpb.DescriptorProto + if err := fm.Append(messageType, "options"); err != nil { + ... // handle error + } + + + Type checking a FieldMask + + In order to verify that a FieldMask represents a set of fields that are + reachable from some target message type, use the IsValid method: + + var messageType *descriptorpb.DescriptorProto + if fm.IsValid(messageType) { + ... // make use of fm + } + + IsValid needs to be passed the target message type as an input since the + FieldMask message itself does not store the message type that the set of paths + are for. +` + default: + return "" + } +} + +func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + switch m.Desc.FullName() { + case genid.Any_message_fullname: + g.P("// New marshals src into a new Any instance.") + g.P("func New(src ", protoPackage.Ident("Message"), ") (*Any, error) {") + g.P(" dst := new(Any)") + g.P(" if err := dst.MarshalFrom(src); err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return dst, nil") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals src into dst as the underlying message") + g.P("// using the provided marshal options.") + g.P("//") + g.P("// If no options are specified, call dst.MarshalFrom instead.") + g.P("func MarshalFrom(dst *Any, src ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("MarshalOptions"), ") error {") + g.P(" const urlPrefix = \"type.googleapis.com/\"") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" b, err := opts.Marshal(src)") + g.P(" if err != nil {") + g.P(" return err") + g.P(" }") + g.P(" dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName())") + g.P(" dst.Value = b") + g.P(" return nil") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the underlying message from src into dst") + g.P("// using the provided unmarshal options.") + g.P("// It reports an error if dst is not of the right message type.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalTo instead.") + g.P("func UnmarshalTo(src *Any, dst ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("UnmarshalOptions"), ") error {") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" if !src.MessageIs(dst) {") + g.P(" got := dst.ProtoReflect().Descriptor().FullName()") + g.P(" want := src.MessageName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"mismatched message type: got %q, want %q\", got, want)") + g.P(" }") + g.P(" return opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the underlying message from src into dst,") + g.P("// which is newly created message using a type resolved from the type URL.") + g.P("// The message type is resolved according to opt.Resolver,") + g.P("// which should implement protoregistry.MessageTypeResolver.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalNew instead.") + g.P("func UnmarshalNew(src *Any, opts ", protoPackage.Ident("UnmarshalOptions"), ") (dst ", protoPackage.Ident("Message"), ", err error) {") + g.P(" if src.GetTypeUrl() == \"\" {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid empty type URL\")") + g.P(" }") + g.P(" if opts.Resolver == nil {") + g.P(" opts.Resolver = ", protoregistryPackage.Ident("GlobalTypes")) + g.P(" }") + g.P(" r, ok := opts.Resolver.(", protoregistryPackage.Ident("MessageTypeResolver"), ")") + g.P(" if !ok {") + g.P(" return nil, ", protoregistryPackage.Ident("NotFound")) + g.P(" }") + g.P(" mt, err := r.FindMessageByURL(src.GetTypeUrl())") + g.P(" if err != nil {") + g.P(" if err == ", protoregistryPackage.Ident("NotFound"), " {") + g.P(" return nil, err") + g.P(" }") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"could not resolve %q: %v\", src.GetTypeUrl(), err)") + g.P(" }") + g.P(" dst = mt.New().Interface()") + g.P(" return dst, opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// MessageIs reports whether the underlying message is of the same type as m.") + g.P("func (x *Any) MessageIs(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" if m == nil {") + g.P(" return false") + g.P(" }") + g.P(" url := x.GetTypeUrl()") + g.P(" name := string(m.ProtoReflect().Descriptor().FullName())") + g.P(" if !", stringsPackage.Ident("HasSuffix"), "(url, name) {") + g.P(" return false") + g.P(" }") + g.P(" return len(url) == len(name) || url[len(url)-len(name)-1] == '/'") + g.P("}") + g.P() + + g.P("// MessageName reports the full name of the underlying message,") + g.P("// returning an empty string if invalid.") + g.P("func (x *Any) MessageName() ", protoreflectPackage.Ident("FullName"), " {") + g.P(" url := x.GetTypeUrl()") + g.P(" name := ", protoreflectPackage.Ident("FullName"), "(url)") + g.P(" if i := ", stringsPackage.Ident("LastIndexByte"), "(url, '/'); i >= 0 {") + g.P(" name = name[i+len(\"/\"):]") + g.P(" }") + g.P(" if !name.IsValid() {") + g.P(" return \"\"") + g.P(" }") + g.P(" return name") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals m into x as the underlying message.") + g.P("func (x *Any) MarshalFrom(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return MarshalFrom(x, m, ", protoPackage.Ident("MarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the contents of the underlying message of x into m.") + g.P("// It resets m before performing the unmarshal operation.") + g.P("// It reports an error if m is not of the right message type.") + g.P("func (x *Any) UnmarshalTo(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return UnmarshalTo(x, m, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the contents of the underlying message of x into") + g.P("// a newly allocated message of the specified type.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("func (x *Any) UnmarshalNew() (", protoPackage.Ident("Message"), ", error) {") + g.P(" return UnmarshalNew(x, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + case genid.Timestamp_message_fullname: + g.P("// Now constructs a new Timestamp from the current time.") + g.P("func Now() *Timestamp {") + g.P(" return New(", timePackage.Ident("Now"), "())") + g.P("}") + g.P() + + g.P("// New constructs a new Timestamp from the provided time.Time.") + g.P("func New(t ", timePackage.Ident("Time"), ") *Timestamp {") + g.P(" return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}") + g.P("}") + g.P() + + g.P("// AsTime converts x to a time.Time.") + g.P("func (x *Timestamp) AsTime() ", timePackage.Ident("Time"), " {") + g.P(" return ", timePackage.Ident("Unix"), "(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()") + g.P("}") + g.P() + + g.P("// IsValid reports whether the timestamp is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Timestamp) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the timestamp is invalid.") + g.P("// In particular, it checks whether the value represents a date that is") + g.P("// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.") + g.P("// An error is reported for a nil Timestamp.") + g.P("func (x *Timestamp) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Timestamp\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) before 0001-01-01\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) after 9999-12-31\", x)") + g.P(" case invalidNanos:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) has out-of-range nanos\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanos") + g.P(")") + g.P() + + g.P("func (x *Timestamp) check() uint {") + g.P(" const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive") + g.P(" const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < minTimestamp:") + g.P(" return invalidUnderflow") + g.P(" case secs > maxTimestamp:") + g.P(" return invalidOverflow") + g.P(" case nanos < 0 || nanos >= 1e9:") + g.P(" return invalidNanos") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Duration_message_fullname: + g.P("// New constructs a new Duration from the provided time.Duration.") + g.P("func New(d ", timePackage.Ident("Duration"), ") *Duration {") + g.P(" nanos := d.Nanoseconds()") + g.P(" secs := nanos / 1e9") + g.P(" nanos -= secs * 1e9") + g.P(" return &Duration{Seconds: int64(secs), Nanos: int32(nanos)}") + g.P("}") + g.P() + + g.P("// AsDuration converts x to a time.Duration,") + g.P("// returning the closest duration value in the event of overflow.") + g.P("func (x *Duration) AsDuration() ", timePackage.Ident("Duration"), " {") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" d := ", timePackage.Ident("Duration"), "(secs) * ", timePackage.Ident("Second")) + g.P(" overflow := d/", timePackage.Ident("Second"), " != ", timePackage.Ident("Duration"), "(secs)") + g.P(" d += ", timePackage.Ident("Duration"), "(nanos) * ", timePackage.Ident("Nanosecond")) + g.P(" overflow = overflow || (secs < 0 && nanos < 0 && d > 0)") + g.P(" overflow = overflow || (secs > 0 && nanos > 0 && d < 0)") + g.P(" if overflow {") + g.P(" switch {") + g.P(" case secs < 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MinInt64"), ")") + g.P(" case secs > 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MaxInt64"), ")") + g.P(" }") + g.P(" }") + g.P(" return d") + g.P("}") + g.P() + + g.P("// IsValid reports whether the duration is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Duration) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the duration is invalid.") + g.P("// In particular, it checks whether the value is within the range of") + g.P("// -10000 years to +10000 years inclusive.") + g.P("// An error is reported for a nil Duration.") + g.P("func (x *Duration) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Duration\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds -10000 years\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds +10000 years\", x)") + g.P(" case invalidNanosRange:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has out-of-range nanos\", x)") + g.P(" case invalidNanosSign:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has seconds and nanos with different signs\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanosRange") + g.P(" invalidNanosSign") + g.P(")") + g.P() + + g.P("func (x *Duration) check() uint {") + g.P(" const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < -absDuration:") + g.P(" return invalidUnderflow") + g.P(" case secs > +absDuration:") + g.P(" return invalidOverflow") + g.P(" case nanos <= -1e9 || nanos >= +1e9:") + g.P(" return invalidNanosRange") + g.P(" case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0):") + g.P(" return invalidNanosSign") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Struct_message_fullname: + g.P("// NewStruct constructs a Struct from a general-purpose Go map.") + g.P("// The map keys must be valid UTF-8.") + g.P("// The map values are converted using NewValue.") + g.P("func NewStruct(v map[string]interface{}) (*Struct, error) {") + g.P(" x := &Struct{Fields: make(map[string]*Value, len(v))}") + g.P(" for k, v := range v {") + g.P(" if !", utf8Package.Ident("ValidString"), "(k) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", k)") + g.P(" }") + g.P(" var err error") + g.P(" x.Fields[k], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsMap converts x to a general-purpose Go map.") + g.P("// The map values are converted by calling Value.AsInterface.") + g.P("func (x *Struct) AsMap() map[string]interface{} {") + g.P(" vs := make(map[string]interface{})") + g.P(" for k, v := range x.GetFields() {") + g.P(" vs[k] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *Struct) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Struct) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.ListValue_message_fullname: + g.P("// NewList constructs a ListValue from a general-purpose Go slice.") + g.P("// The slice elements are converted using NewValue.") + g.P("func NewList(v []interface{}) (*ListValue, error) {") + g.P(" x := &ListValue{Values: make([]*Value, len(v))}") + g.P(" for i, v := range v {") + g.P(" var err error") + g.P(" x.Values[i], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsSlice converts x to a general-purpose Go slice.") + g.P("// The slice elements are converted by calling Value.AsInterface.") + g.P("func (x *ListValue) AsSlice() []interface{} {") + g.P(" vs := make([]interface{}, len(x.GetValues()))") + g.P(" for i, v := range x.GetValues() {") + g.P(" vs[i] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *ListValue) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *ListValue) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.Value_message_fullname: + g.P("// NewValue constructs a Value from a general-purpose Go interface.") + g.P("//") + g.P("// ╔════════════════════════╤════════════════════════════════════════════╗") + g.P("// ║ Go type │ Conversion ║") + g.P("// ╠════════════════════════╪════════════════════════════════════════════╣") + g.P("// ║ nil │ stored as NullValue ║") + g.P("// ║ bool │ stored as BoolValue ║") + g.P("// ║ int, int32, int64 │ stored as NumberValue ║") + g.P("// ║ uint, uint32, uint64 │ stored as NumberValue ║") + g.P("// ║ float32, float64 │ stored as NumberValue ║") + g.P("// ║ string │ stored as StringValue; must be valid UTF-8 ║") + g.P("// ║ []byte │ stored as StringValue; base64-encoded ║") + g.P("// ║ map[string]interface{} │ stored as StructValue ║") + g.P("// ║ []interface{} │ stored as ListValue ║") + g.P("// ╚════════════════════════╧════════════════════════════════════════════╝") + g.P("//") + g.P("// When converting an int64 or uint64 to a NumberValue, numeric precision loss") + g.P("// is possible since they are stored as a float64.") + g.P("func NewValue(v interface{}) (*Value, error) {") + g.P(" switch v := v.(type) {") + g.P(" case nil:") + g.P(" return NewNullValue(), nil") + g.P(" case bool:") + g.P(" return NewBoolValue(v), nil") + g.P(" case int:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case string:") + g.P(" if !", utf8Package.Ident("ValidString"), "(v) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", v)") + g.P(" }") + g.P(" return NewStringValue(v), nil") + g.P(" case []byte:") + g.P(" s := ", base64Package.Ident("StdEncoding"), ".EncodeToString(v)") + g.P(" return NewStringValue(s), nil") + g.P(" case map[string]interface{}:") + g.P(" v2, err := NewStruct(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewStructValue(v2), nil") + g.P(" case []interface{}:") + g.P(" v2, err := NewList(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewListValue(v2), nil") + g.P(" default:") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid type: %T\", v)") + g.P(" }") + g.P("}") + g.P() + + g.P("// NewNullValue constructs a new null Value.") + g.P("func NewNullValue() *Value {") + g.P(" return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}") + g.P("}") + g.P() + + g.P("// NewBoolValue constructs a new boolean Value.") + g.P("func NewBoolValue(v bool) *Value {") + g.P(" return &Value{Kind: &Value_BoolValue{BoolValue: v}}") + g.P("}") + g.P() + + g.P("// NewNumberValue constructs a new number Value.") + g.P("func NewNumberValue(v float64) *Value {") + g.P(" return &Value{Kind: &Value_NumberValue{NumberValue: v}}") + g.P("}") + g.P() + + g.P("// NewStringValue constructs a new string Value.") + g.P("func NewStringValue(v string) *Value {") + g.P(" return &Value{Kind: &Value_StringValue{StringValue: v}}") + g.P("}") + g.P() + + g.P("// NewStructValue constructs a new struct Value.") + g.P("func NewStructValue(v *Struct) *Value {") + g.P(" return &Value{Kind: &Value_StructValue{StructValue: v}}") + g.P("}") + g.P() + + g.P("// NewListValue constructs a new list Value.") + g.P("func NewListValue(v *ListValue) *Value {") + g.P(" return &Value{Kind: &Value_ListValue{ListValue: v}}") + g.P("}") + g.P() + + g.P("// AsInterface converts x to a general-purpose Go interface.") + g.P("//") + g.P("// Calling Value.MarshalJSON and \"encoding/json\".Marshal on this output produce") + g.P("// semantically equivalent JSON (assuming no errors occur).") + g.P("//") + g.P("// Floating-point values (i.e., \"NaN\", \"Infinity\", and \"-Infinity\") are") + g.P("// converted as strings to remain compatible with MarshalJSON.") + g.P("func (x *Value) AsInterface() interface{} {") + g.P(" switch v := x.GetKind().(type) {") + g.P(" case *Value_NumberValue:") + g.P(" if v != nil {") + g.P(" switch {") + g.P(" case ", mathPackage.Ident("IsNaN"), "(v.NumberValue):") + g.P(" return \"NaN\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, +1):") + g.P(" return \"Infinity\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, -1):") + g.P(" return \"-Infinity\"") + g.P(" default:") + g.P(" return v.NumberValue") + g.P(" }") + g.P(" }") + g.P(" case *Value_StringValue:") + g.P(" if v != nil {") + g.P(" return v.StringValue") + g.P(" }") + g.P(" case *Value_BoolValue:") + g.P(" if v != nil {") + g.P(" return v.BoolValue") + g.P(" }") + g.P(" case *Value_StructValue:") + g.P(" if v != nil {") + g.P(" return v.StructValue.AsMap()") + g.P(" }") + g.P(" case *Value_ListValue:") + g.P(" if v != nil {") + g.P(" return v.ListValue.AsSlice()") + g.P(" }") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func (x *Value) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Value) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.FieldMask_message_fullname: + g.P("// New constructs a field mask from a list of paths and verifies that") + g.P("// each one is valid according to the specified message type.") + g.P("func New(m ", protoPackage.Ident("Message"), ", paths ...string) (*FieldMask, error) {") + g.P(" x := new(FieldMask)") + g.P(" return x, x.Append(m, paths...)") + g.P("}") + g.P() + + g.P("// Union returns the union of all the paths in the input field masks.") + g.P("func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var out []string") + g.P(" out = append(out, mx.GetPaths()...)") + g.P(" out = append(out, my.GetPaths()...)") + g.P(" for _, m := range ms {") + g.P(" out = append(out, m.GetPaths()...)") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// Intersect returns the intersection of all the paths in the input field masks.") + g.P("func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var ss1, ss2 []string // reused buffers for performance") + g.P(" intersect := func(out, in []string) []string {") + g.P(" ss1 = normalizePaths(append(ss1[:0], in...))") + g.P(" ss2 = normalizePaths(append(ss2[:0], out...))") + g.P(" out = out[:0]") + g.P(" for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {") + g.P(" switch s1, s2 := ss1[i1], ss2[i2]; {") + g.P(" case hasPathPrefix(s1, s2):") + g.P(" out = append(out, s1)") + g.P(" i1++") + g.P(" case hasPathPrefix(s2, s1):") + g.P(" out = append(out, s2)") + g.P(" i2++") + g.P(" case lessPath(s1, s2):") + g.P(" i1++") + g.P(" case lessPath(s2, s1):") + g.P(" i2++") + g.P(" }") + g.P(" }") + g.P(" return out") + g.P(" }") + g.P() + g.P(" out := Union(mx, my, ms...).GetPaths()") + g.P(" out = intersect(out, mx.GetPaths())") + g.P(" out = intersect(out, my.GetPaths())") + g.P(" for _, m := range ms {") + g.P(" out = intersect(out, m.GetPaths())") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// IsValid reports whether all the paths are syntactically valid and") + g.P("// refer to known fields in the specified message type.") + g.P("// It reports false for a nil FieldMask.") + g.P("func (x *FieldMask) IsValid(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" paths := x.GetPaths()") + g.P(" return x != nil && numValidPaths(m, paths) == len(paths)") + g.P("}") + g.P() + + g.P("// Append appends a list of paths to the mask and verifies that each one") + g.P("// is valid according to the specified message type.") + g.P("// An invalid path is not appended and breaks insertion of subsequent paths.") + g.P("func (x *FieldMask) Append(m ", protoPackage.Ident("Message"), ", paths ...string) error {") + g.P(" numValid := numValidPaths(m, paths)") + g.P(" x.Paths = append(x.Paths, paths[:numValid]...)") + g.P(" paths = paths[numValid:]") + g.P(" if len(paths) > 0 {") + g.P(" name := m.ProtoReflect().Descriptor().FullName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid path %q for message %q\", paths[0], name)") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func numValidPaths(m ", protoPackage.Ident("Message"), ", paths []string) int {") + g.P(" md0 := m.ProtoReflect().Descriptor()") + g.P(" for i, path := range paths {") + g.P(" md := md0") + g.P(" if !rangeFields(path, func(field string) bool {") + g.P(" // Search the field within the message.") + g.P(" if md == nil {") + g.P(" return false // not within a message") + g.P(" }") + g.P(" fd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(field))") + g.P(" // The real field name of a group is the message name.") + g.P(" if fd == nil {") + g.P(" gd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(", stringsPackage.Ident("ToLower"), "(field)))") + g.P(" if gd != nil && gd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(gd.Message().Name()) == field {") + g.P(" fd = gd") + g.P(" }") + g.P(" } else if fd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(fd.Message().Name()) != field {") + g.P(" fd = nil") + g.P(" }") + g.P(" if fd == nil {") + g.P(" return false // message has does not have this field") + g.P(" }") + g.P() + g.P(" // Identify the next message to search within.") + g.P(" md = fd.Message() // may be nil") + g.P() + g.P(" // Repeated fields are only allowed at the last postion.") + g.P(" if fd.IsList() || fd.IsMap() {") + g.P(" md = nil") + g.P(" }") + g.P() + g.P(" return true") + g.P(" }) {") + g.P(" return i") + g.P(" }") + g.P(" }") + g.P(" return len(paths)") + g.P("}") + g.P() + + g.P("// Normalize converts the mask to its canonical form where all paths are sorted") + g.P("// and redundant paths are removed.") + g.P("func (x *FieldMask) Normalize() {") + g.P(" x.Paths = normalizePaths(x.Paths)") + g.P("}") + g.P() + g.P("func normalizePaths(paths []string) []string {") + g.P(" ", sortPackage.Ident("Slice"), "(paths, func(i, j int) bool {") + g.P(" return lessPath(paths[i], paths[j])") + g.P(" })") + g.P() + g.P(" // Elide any path that is a prefix match on the previous.") + g.P(" out := paths[:0]") + g.P(" for _, path := range paths {") + g.P(" if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {") + g.P(" continue") + g.P(" }") + g.P(" out = append(out, path)") + g.P(" }") + g.P(" return out") + g.P("}") + g.P() + + g.P("// hasPathPrefix is like strings.HasPrefix, but further checks for either") + g.P("// an exact matche or that the prefix is delimited by a dot.") + g.P("func hasPathPrefix(path, prefix string) bool {") + g.P(" return ", stringsPackage.Ident("HasPrefix"), "(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')") + g.P("}") + g.P() + + g.P("// lessPath is a lexicographical comparison where dot is specially treated") + g.P("// as the smallest symbol.") + g.P("func lessPath(x, y string) bool {") + g.P(" for i := 0; i < len(x) && i < len(y); i++ {") + g.P(" if x[i] != y[i] {") + g.P(" return (x[i] - '.') < (y[i] - '.')") + g.P(" }") + g.P(" }") + g.P(" return len(x) < len(y)") + g.P("}") + g.P() + + g.P("// rangeFields is like strings.Split(path, \".\"), but avoids allocations by") + g.P("// iterating over each field in place and calling a iterator function.") + g.P("func rangeFields(path string, f func(field string) bool) bool {") + g.P(" for {") + g.P(" var field string") + g.P(" if i := ", stringsPackage.Ident("IndexByte"), "(path, '.'); i >= 0 {") + g.P(" field, path = path[:i], path[i:]") + g.P(" } else {") + g.P(" field, path = path, \"\"") + g.P(" }") + g.P() + g.P(" if !f(field) {") + g.P(" return false") + g.P(" }") + g.P() + g.P(" if len(path) == 0 {") + g.P(" return true") + g.P(" }") + g.P(" path = ", stringsPackage.Ident("TrimPrefix"), "(path, \".\")") + g.P(" }") + g.P("}") + g.P() + + case genid.BoolValue_message_fullname, + genid.Int32Value_message_fullname, + genid.Int64Value_message_fullname, + genid.UInt32Value_message_fullname, + genid.UInt64Value_message_fullname, + genid.FloatValue_message_fullname, + genid.DoubleValue_message_fullname, + genid.StringValue_message_fullname, + genid.BytesValue_message_fullname: + funcName := strings.TrimSuffix(m.GoIdent.GoName, "Value") + typeName := strings.ToLower(funcName) + switch typeName { + case "float": + typeName = "float32" + case "double": + typeName = "float64" + case "bytes": + typeName = "[]byte" + } + + g.P("// ", funcName, " stores v in a new ", m.GoIdent, " and returns a pointer to it.") + g.P("func ", funcName, "(v ", typeName, ") *", m.GoIdent, " {") + g.P(" return &", m.GoIdent, "{Value: v}") + g.P("}") + g.P() + } +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/main.go new file mode 100644 index 000000000..0559ee330 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/main.go @@ -0,0 +1,56 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoc-gen-go binary is a protoc plugin to generate Go code for +// both proto2 and proto3 versions of the protocol buffer language. +// +// For more information about the usage of this plugin, see: +// https://developers.google.com/protocol-buffers/docs/reference/go-generated +package main + +import ( + "errors" + "flag" + "fmt" + "os" + "path/filepath" + + gengo "google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo" + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/version" +) + +const genGoDocURL = "https://developers.google.com/protocol-buffers/docs/reference/go-generated" +const grpcDocURL = "https://grpc.io/docs/languages/go/quickstart/#regenerate-grpc-code" + +func main() { + if len(os.Args) == 2 && os.Args[1] == "--version" { + fmt.Fprintf(os.Stdout, "%v %v\n", filepath.Base(os.Args[0]), version.String()) + os.Exit(0) + } + if len(os.Args) == 2 && os.Args[1] == "--help" { + fmt.Fprintf(os.Stdout, "See "+genGoDocURL+" for usage information.\n") + os.Exit(0) + } + + var ( + flags flag.FlagSet + plugins = flags.String("plugins", "", "deprecated option") + ) + protogen.Options{ + ParamFunc: flags.Set, + }.Run(func(gen *protogen.Plugin) error { + if *plugins != "" { + return errors.New("protoc-gen-go: plugins are not supported; use 'protoc --go-grpc_out=...' to generate gRPC\n\n" + + "See " + grpcDocURL + " for more information.") + } + for _, f := range gen.Files { + if f.Generate { + gengo.GenerateFile(gen, f) + } + } + gen.SupportedFeatures = gengo.SupportedFeatures + return nil + }) +} diff --git a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go new file mode 100644 index 000000000..2ee676fbb --- /dev/null +++ b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go @@ -0,0 +1,1261 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protogen provides support for writing protoc plugins. +// +// Plugins for protoc, the Protocol Buffer compiler, +// are programs which read a CodeGeneratorRequest message from standard input +// and write a CodeGeneratorResponse message to standard output. +// This package provides support for writing plugins which generate Go code. +package protogen + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "io/ioutil" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/pluginpb" +) + +const goPackageDocURL = "https://developers.google.com/protocol-buffers/docs/reference/go-generated#package" + +// Run executes a function as a protoc plugin. +// +// It reads a CodeGeneratorRequest message from os.Stdin, invokes the plugin +// function, and writes a CodeGeneratorResponse message to os.Stdout. +// +// If a failure occurs while reading or writing, Run prints an error to +// os.Stderr and calls os.Exit(1). +func (opts Options) Run(f func(*Plugin) error) { + if err := run(opts, f); err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", filepath.Base(os.Args[0]), err) + os.Exit(1) + } +} + +func run(opts Options, f func(*Plugin) error) error { + if len(os.Args) > 1 { + return fmt.Errorf("unknown argument %q (this program should be run by protoc, not directly)", os.Args[1]) + } + in, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + req := &pluginpb.CodeGeneratorRequest{} + if err := proto.Unmarshal(in, req); err != nil { + return err + } + gen, err := opts.New(req) + if err != nil { + return err + } + if err := f(gen); err != nil { + // Errors from the plugin function are reported by setting the + // error field in the CodeGeneratorResponse. + // + // In contrast, errors that indicate a problem in protoc + // itself (unparsable input, I/O errors, etc.) are reported + // to stderr. + gen.Error(err) + } + resp := gen.Response() + out, err := proto.Marshal(resp) + if err != nil { + return err + } + if _, err := os.Stdout.Write(out); err != nil { + return err + } + return nil +} + +// A Plugin is a protoc plugin invocation. +type Plugin struct { + // Request is the CodeGeneratorRequest provided by protoc. + Request *pluginpb.CodeGeneratorRequest + + // Files is the set of files to generate and everything they import. + // Files appear in topological order, so each file appears before any + // file that imports it. + Files []*File + FilesByPath map[string]*File + + // SupportedFeatures is the set of protobuf language features supported by + // this generator plugin. See the documentation for + // google.protobuf.CodeGeneratorResponse.supported_features for details. + SupportedFeatures uint64 + + fileReg *protoregistry.Files + enumsByName map[protoreflect.FullName]*Enum + messagesByName map[protoreflect.FullName]*Message + annotateCode bool + pathType pathType + module string + genFiles []*GeneratedFile + opts Options + err error +} + +type Options struct { + // If ParamFunc is non-nil, it will be called with each unknown + // generator parameter. + // + // Plugins for protoc can accept parameters from the command line, + // passed in the --_out protoc, separated from the output + // directory with a colon; e.g., + // + // --go_out==,=: + // + // Parameters passed in this fashion as a comma-separated list of + // key=value pairs will be passed to the ParamFunc. + // + // The (flag.FlagSet).Set method matches this function signature, + // so parameters can be converted into flags as in the following: + // + // var flags flag.FlagSet + // value := flags.Bool("param", false, "") + // opts := &protogen.Options{ + // ParamFunc: flags.Set, + // } + // protogen.Run(opts, func(p *protogen.Plugin) error { + // if *value { ... } + // }) + ParamFunc func(name, value string) error + + // ImportRewriteFunc is called with the import path of each package + // imported by a generated file. It returns the import path to use + // for this package. + ImportRewriteFunc func(GoImportPath) GoImportPath +} + +// New returns a new Plugin. +func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) { + gen := &Plugin{ + Request: req, + FilesByPath: make(map[string]*File), + fileReg: new(protoregistry.Files), + enumsByName: make(map[protoreflect.FullName]*Enum), + messagesByName: make(map[protoreflect.FullName]*Message), + opts: opts, + } + + packageNames := make(map[string]GoPackageName) // filename -> package name + importPaths := make(map[string]GoImportPath) // filename -> import path + for _, param := range strings.Split(req.GetParameter(), ",") { + var value string + if i := strings.Index(param, "="); i >= 0 { + value = param[i+1:] + param = param[0:i] + } + switch param { + case "": + // Ignore. + case "module": + gen.module = value + case "paths": + switch value { + case "import": + gen.pathType = pathTypeImport + case "source_relative": + gen.pathType = pathTypeSourceRelative + default: + return nil, fmt.Errorf(`unknown path type %q: want "import" or "source_relative"`, value) + } + case "annotate_code": + switch value { + case "true", "": + gen.annotateCode = true + case "false": + default: + return nil, fmt.Errorf(`bad value for parameter %q: want "true" or "false"`, param) + } + default: + if param[0] == 'M' { + impPath, pkgName := splitImportPathAndPackageName(value) + if pkgName != "" { + packageNames[param[1:]] = pkgName + } + if impPath != "" { + importPaths[param[1:]] = impPath + } + continue + } + if opts.ParamFunc != nil { + if err := opts.ParamFunc(param, value); err != nil { + return nil, err + } + } + } + } + // When the module= option is provided, we strip the module name + // prefix from generated files. This only makes sense if generated + // filenames are based on the import path. + if gen.module != "" && gen.pathType == pathTypeSourceRelative { + return nil, fmt.Errorf("cannot use module= with paths=source_relative") + } + + // Figure out the import path and package name for each file. + // + // The rules here are complicated and have grown organically over time. + // Interactions between different ways of specifying package information + // may be surprising. + // + // The recommended approach is to include a go_package option in every + // .proto source file specifying the full import path of the Go package + // associated with this file. + // + // option go_package = "google.golang.org/protobuf/types/known/anypb"; + // + // Alternatively, build systems which want to exert full control over + // import paths may specify M= flags. + for _, fdesc := range gen.Request.ProtoFile { + // The "M" command-line flags take precedence over + // the "go_package" option in the .proto source file. + filename := fdesc.GetName() + impPath, pkgName := splitImportPathAndPackageName(fdesc.GetOptions().GetGoPackage()) + if importPaths[filename] == "" && impPath != "" { + importPaths[filename] = impPath + } + if packageNames[filename] == "" && pkgName != "" { + packageNames[filename] = pkgName + } + switch { + case importPaths[filename] == "": + // The import path must be specified one way or another. + return nil, fmt.Errorf( + "unable to determine Go import path for %q\n\n"+ + "Please specify either:\n"+ + "\t• a \"go_package\" option in the .proto source file, or\n"+ + "\t• a \"M\" argument on the command line.\n\n"+ + "See %v for more information.\n", + fdesc.GetName(), goPackageDocURL) + case !strings.Contains(string(importPaths[filename]), ".") && + !strings.Contains(string(importPaths[filename]), "/"): + // Check that import paths contain at least a dot or slash to avoid + // a common mistake where import path is confused with package name. + return nil, fmt.Errorf( + "invalid Go import path %q for %q\n\n"+ + "The import path must contain at least one period ('.') or forward slash ('/') character.\n\n"+ + "See %v for more information.\n", + string(importPaths[filename]), fdesc.GetName(), goPackageDocURL) + case packageNames[filename] == "": + // If the package name is not explicitly specified, + // then derive a reasonable package name from the import path. + // + // NOTE: The package name is derived first from the import path in + // the "go_package" option (if present) before trying the "M" flag. + // The inverted order for this is because the primary use of the "M" + // flag is by build systems that have full control over the + // import paths all packages, where it is generally expected that + // the Go package name still be identical for the Go toolchain and + // for custom build systems like Bazel. + if impPath == "" { + impPath = importPaths[filename] + } + packageNames[filename] = cleanPackageName(path.Base(string(impPath))) + } + } + + // Consistency check: Every file with the same Go import path should have + // the same Go package name. + packageFiles := make(map[GoImportPath][]string) + for filename, importPath := range importPaths { + if _, ok := packageNames[filename]; !ok { + // Skip files mentioned in a M= parameter + // but which do not appear in the CodeGeneratorRequest. + continue + } + packageFiles[importPath] = append(packageFiles[importPath], filename) + } + for importPath, filenames := range packageFiles { + for i := 1; i < len(filenames); i++ { + if a, b := packageNames[filenames[0]], packageNames[filenames[i]]; a != b { + return nil, fmt.Errorf("Go package %v has inconsistent names %v (%v) and %v (%v)", + importPath, a, filenames[0], b, filenames[i]) + } + } + } + + for _, fdesc := range gen.Request.ProtoFile { + filename := fdesc.GetName() + if gen.FilesByPath[filename] != nil { + return nil, fmt.Errorf("duplicate file name: %q", filename) + } + f, err := newFile(gen, fdesc, packageNames[filename], importPaths[filename]) + if err != nil { + return nil, err + } + gen.Files = append(gen.Files, f) + gen.FilesByPath[filename] = f + } + for _, filename := range gen.Request.FileToGenerate { + f, ok := gen.FilesByPath[filename] + if !ok { + return nil, fmt.Errorf("no descriptor for generated file: %v", filename) + } + f.Generate = true + } + return gen, nil +} + +// Error records an error in code generation. The generator will report the +// error back to protoc and will not produce output. +func (gen *Plugin) Error(err error) { + if gen.err == nil { + gen.err = err + } +} + +// Response returns the generator output. +func (gen *Plugin) Response() *pluginpb.CodeGeneratorResponse { + resp := &pluginpb.CodeGeneratorResponse{} + if gen.err != nil { + resp.Error = proto.String(gen.err.Error()) + return resp + } + for _, g := range gen.genFiles { + if g.skip { + continue + } + content, err := g.Content() + if err != nil { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(err.Error()), + } + } + filename := g.filename + if gen.module != "" { + trim := gen.module + "/" + if !strings.HasPrefix(filename, trim) { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(fmt.Sprintf("%v: generated file does not match prefix %q", filename, gen.module)), + } + } + filename = strings.TrimPrefix(filename, trim) + } + resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{ + Name: proto.String(filename), + Content: proto.String(string(content)), + }) + if gen.annotateCode && strings.HasSuffix(g.filename, ".go") { + meta, err := g.metaFile(content) + if err != nil { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(err.Error()), + } + } + resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{ + Name: proto.String(filename + ".meta"), + Content: proto.String(meta), + }) + } + } + if gen.SupportedFeatures > 0 { + resp.SupportedFeatures = proto.Uint64(gen.SupportedFeatures) + } + return resp +} + +// A File describes a .proto source file. +type File struct { + Desc protoreflect.FileDescriptor + Proto *descriptorpb.FileDescriptorProto + + GoDescriptorIdent GoIdent // name of Go variable for the file descriptor + GoPackageName GoPackageName // name of this file's Go package + GoImportPath GoImportPath // import path of this file's Go package + + Enums []*Enum // top-level enum declarations + Messages []*Message // top-level message declarations + Extensions []*Extension // top-level extension declarations + Services []*Service // top-level service declarations + + Generate bool // true if we should generate code for this file + + // GeneratedFilenamePrefix is used to construct filenames for generated + // files associated with this source file. + // + // For example, the source file "dir/foo.proto" might have a filename prefix + // of "dir/foo". Appending ".pb.go" produces an output file of "dir/foo.pb.go". + GeneratedFilenamePrefix string + + location Location +} + +func newFile(gen *Plugin, p *descriptorpb.FileDescriptorProto, packageName GoPackageName, importPath GoImportPath) (*File, error) { + desc, err := protodesc.NewFile(p, gen.fileReg) + if err != nil { + return nil, fmt.Errorf("invalid FileDescriptorProto %q: %v", p.GetName(), err) + } + if err := gen.fileReg.RegisterFile(desc); err != nil { + return nil, fmt.Errorf("cannot register descriptor %q: %v", p.GetName(), err) + } + f := &File{ + Desc: desc, + Proto: p, + GoPackageName: packageName, + GoImportPath: importPath, + location: Location{SourceFile: desc.Path()}, + } + + // Determine the prefix for generated Go files. + prefix := p.GetName() + if ext := path.Ext(prefix); ext == ".proto" || ext == ".protodevel" { + prefix = prefix[:len(prefix)-len(ext)] + } + switch gen.pathType { + case pathTypeImport: + // If paths=import, the output filename is derived from the Go import path. + prefix = path.Join(string(f.GoImportPath), path.Base(prefix)) + case pathTypeSourceRelative: + // If paths=source_relative, the output filename is derived from + // the input filename. + } + f.GoDescriptorIdent = GoIdent{ + GoName: "File_" + strs.GoSanitized(p.GetName()), + GoImportPath: f.GoImportPath, + } + f.GeneratedFilenamePrefix = prefix + + for i, eds := 0, desc.Enums(); i < eds.Len(); i++ { + f.Enums = append(f.Enums, newEnum(gen, f, nil, eds.Get(i))) + } + for i, mds := 0, desc.Messages(); i < mds.Len(); i++ { + f.Messages = append(f.Messages, newMessage(gen, f, nil, mds.Get(i))) + } + for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ { + f.Extensions = append(f.Extensions, newField(gen, f, nil, xds.Get(i))) + } + for i, sds := 0, desc.Services(); i < sds.Len(); i++ { + f.Services = append(f.Services, newService(gen, f, sds.Get(i))) + } + for _, message := range f.Messages { + if err := message.resolveDependencies(gen); err != nil { + return nil, err + } + } + for _, extension := range f.Extensions { + if err := extension.resolveDependencies(gen); err != nil { + return nil, err + } + } + for _, service := range f.Services { + for _, method := range service.Methods { + if err := method.resolveDependencies(gen); err != nil { + return nil, err + } + } + } + return f, nil +} + +// splitImportPathAndPackageName splits off the optional Go package name +// from the Go import path when seperated by a ';' delimiter. +func splitImportPathAndPackageName(s string) (GoImportPath, GoPackageName) { + if i := strings.Index(s, ";"); i >= 0 { + return GoImportPath(s[:i]), GoPackageName(s[i+1:]) + } + return GoImportPath(s), "" +} + +// An Enum describes an enum. +type Enum struct { + Desc protoreflect.EnumDescriptor + + GoIdent GoIdent // name of the generated Go type + + Values []*EnumValue // enum value declarations + + Location Location // location of this enum + Comments CommentSet // comments associated with this enum +} + +func newEnum(gen *Plugin, f *File, parent *Message, desc protoreflect.EnumDescriptor) *Enum { + var loc Location + if parent != nil { + loc = parent.Location.appendPath(genid.DescriptorProto_EnumType_field_number, desc.Index()) + } else { + loc = f.location.appendPath(genid.FileDescriptorProto_EnumType_field_number, desc.Index()) + } + enum := &Enum{ + Desc: desc, + GoIdent: newGoIdent(f, desc), + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + gen.enumsByName[desc.FullName()] = enum + for i, vds := 0, enum.Desc.Values(); i < vds.Len(); i++ { + enum.Values = append(enum.Values, newEnumValue(gen, f, parent, enum, vds.Get(i))) + } + return enum +} + +// An EnumValue describes an enum value. +type EnumValue struct { + Desc protoreflect.EnumValueDescriptor + + GoIdent GoIdent // name of the generated Go declaration + + Parent *Enum // enum in which this value is declared + + Location Location // location of this enum value + Comments CommentSet // comments associated with this enum value +} + +func newEnumValue(gen *Plugin, f *File, message *Message, enum *Enum, desc protoreflect.EnumValueDescriptor) *EnumValue { + // A top-level enum value's name is: EnumName_ValueName + // An enum value contained in a message is: MessageName_ValueName + // + // For historical reasons, enum value names are not camel-cased. + parentIdent := enum.GoIdent + if message != nil { + parentIdent = message.GoIdent + } + name := parentIdent.GoName + "_" + string(desc.Name()) + loc := enum.Location.appendPath(genid.EnumDescriptorProto_Value_field_number, desc.Index()) + return &EnumValue{ + Desc: desc, + GoIdent: f.GoImportPath.Ident(name), + Parent: enum, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } +} + +// A Message describes a message. +type Message struct { + Desc protoreflect.MessageDescriptor + + GoIdent GoIdent // name of the generated Go type + + Fields []*Field // message field declarations + Oneofs []*Oneof // message oneof declarations + + Enums []*Enum // nested enum declarations + Messages []*Message // nested message declarations + Extensions []*Extension // nested extension declarations + + Location Location // location of this message + Comments CommentSet // comments associated with this message +} + +func newMessage(gen *Plugin, f *File, parent *Message, desc protoreflect.MessageDescriptor) *Message { + var loc Location + if parent != nil { + loc = parent.Location.appendPath(genid.DescriptorProto_NestedType_field_number, desc.Index()) + } else { + loc = f.location.appendPath(genid.FileDescriptorProto_MessageType_field_number, desc.Index()) + } + message := &Message{ + Desc: desc, + GoIdent: newGoIdent(f, desc), + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + gen.messagesByName[desc.FullName()] = message + for i, eds := 0, desc.Enums(); i < eds.Len(); i++ { + message.Enums = append(message.Enums, newEnum(gen, f, message, eds.Get(i))) + } + for i, mds := 0, desc.Messages(); i < mds.Len(); i++ { + message.Messages = append(message.Messages, newMessage(gen, f, message, mds.Get(i))) + } + for i, fds := 0, desc.Fields(); i < fds.Len(); i++ { + message.Fields = append(message.Fields, newField(gen, f, message, fds.Get(i))) + } + for i, ods := 0, desc.Oneofs(); i < ods.Len(); i++ { + message.Oneofs = append(message.Oneofs, newOneof(gen, f, message, ods.Get(i))) + } + for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ { + message.Extensions = append(message.Extensions, newField(gen, f, message, xds.Get(i))) + } + + // Resolve local references between fields and oneofs. + for _, field := range message.Fields { + if od := field.Desc.ContainingOneof(); od != nil { + oneof := message.Oneofs[od.Index()] + field.Oneof = oneof + oneof.Fields = append(oneof.Fields, field) + } + } + + // Field name conflict resolution. + // + // We assume well-known method names that may be attached to a generated + // message type, as well as a 'Get*' method for each field. For each + // field in turn, we add _s to its name until there are no conflicts. + // + // Any change to the following set of method names is a potential + // incompatible API change because it may change generated field names. + // + // TODO: If we ever support a 'go_name' option to set the Go name of a + // field, we should consider dropping this entirely. The conflict + // resolution algorithm is subtle and surprising (changing the order + // in which fields appear in the .proto source file can change the + // names of fields in generated code), and does not adapt well to + // adding new per-field methods such as setters. + usedNames := map[string]bool{ + "Reset": true, + "String": true, + "ProtoMessage": true, + "Marshal": true, + "Unmarshal": true, + "ExtensionRangeArray": true, + "ExtensionMap": true, + "Descriptor": true, + } + makeNameUnique := func(name string, hasGetter bool) string { + for usedNames[name] || (hasGetter && usedNames["Get"+name]) { + name += "_" + } + usedNames[name] = true + usedNames["Get"+name] = hasGetter + return name + } + for _, field := range message.Fields { + field.GoName = makeNameUnique(field.GoName, true) + field.GoIdent.GoName = message.GoIdent.GoName + "_" + field.GoName + if field.Oneof != nil && field.Oneof.Fields[0] == field { + // Make the name for a oneof unique as well. For historical reasons, + // this assumes that a getter method is not generated for oneofs. + // This is incorrect, but fixing it breaks existing code. + field.Oneof.GoName = makeNameUnique(field.Oneof.GoName, false) + field.Oneof.GoIdent.GoName = message.GoIdent.GoName + "_" + field.Oneof.GoName + } + } + + // Oneof field name conflict resolution. + // + // This conflict resolution is incomplete as it does not consider collisions + // with other oneof field types, but fixing it breaks existing code. + for _, field := range message.Fields { + if field.Oneof != nil { + Loop: + for { + for _, nestedMessage := range message.Messages { + if nestedMessage.GoIdent == field.GoIdent { + field.GoIdent.GoName += "_" + continue Loop + } + } + for _, nestedEnum := range message.Enums { + if nestedEnum.GoIdent == field.GoIdent { + field.GoIdent.GoName += "_" + continue Loop + } + } + break Loop + } + } + } + + return message +} + +func (message *Message) resolveDependencies(gen *Plugin) error { + for _, field := range message.Fields { + if err := field.resolveDependencies(gen); err != nil { + return err + } + } + for _, message := range message.Messages { + if err := message.resolveDependencies(gen); err != nil { + return err + } + } + for _, extension := range message.Extensions { + if err := extension.resolveDependencies(gen); err != nil { + return err + } + } + return nil +} + +// A Field describes a message field. +type Field struct { + Desc protoreflect.FieldDescriptor + + // GoName is the base name of this field's Go field and methods. + // For code generated by protoc-gen-go, this means a field named + // '{{GoName}}' and a getter method named 'Get{{GoName}}'. + GoName string // e.g., "FieldName" + + // GoIdent is the base name of a top-level declaration for this field. + // For code generated by protoc-gen-go, this means a wrapper type named + // '{{GoIdent}}' for members fields of a oneof, and a variable named + // 'E_{{GoIdent}}' for extension fields. + GoIdent GoIdent // e.g., "MessageName_FieldName" + + Parent *Message // message in which this field is declared; nil if top-level extension + Oneof *Oneof // containing oneof; nil if not part of a oneof + Extendee *Message // extended message for extension fields; nil otherwise + + Enum *Enum // type for enum fields; nil otherwise + Message *Message // type for message or group fields; nil otherwise + + Location Location // location of this field + Comments CommentSet // comments associated with this field +} + +func newField(gen *Plugin, f *File, message *Message, desc protoreflect.FieldDescriptor) *Field { + var loc Location + switch { + case desc.IsExtension() && message == nil: + loc = f.location.appendPath(genid.FileDescriptorProto_Extension_field_number, desc.Index()) + case desc.IsExtension() && message != nil: + loc = message.Location.appendPath(genid.DescriptorProto_Extension_field_number, desc.Index()) + default: + loc = message.Location.appendPath(genid.DescriptorProto_Field_field_number, desc.Index()) + } + camelCased := strs.GoCamelCase(string(desc.Name())) + var parentPrefix string + if message != nil { + parentPrefix = message.GoIdent.GoName + "_" + } + field := &Field{ + Desc: desc, + GoName: camelCased, + GoIdent: GoIdent{ + GoImportPath: f.GoImportPath, + GoName: parentPrefix + camelCased, + }, + Parent: message, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + return field +} + +func (field *Field) resolveDependencies(gen *Plugin) error { + desc := field.Desc + switch desc.Kind() { + case protoreflect.EnumKind: + name := field.Desc.Enum().FullName() + enum, ok := gen.enumsByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for enum %v", desc.FullName(), name) + } + field.Enum = enum + case protoreflect.MessageKind, protoreflect.GroupKind: + name := desc.Message().FullName() + message, ok := gen.messagesByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name) + } + field.Message = message + } + if desc.IsExtension() { + name := desc.ContainingMessage().FullName() + message, ok := gen.messagesByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name) + } + field.Extendee = message + } + return nil +} + +// A Oneof describes a message oneof. +type Oneof struct { + Desc protoreflect.OneofDescriptor + + // GoName is the base name of this oneof's Go field and methods. + // For code generated by protoc-gen-go, this means a field named + // '{{GoName}}' and a getter method named 'Get{{GoName}}'. + GoName string // e.g., "OneofName" + + // GoIdent is the base name of a top-level declaration for this oneof. + GoIdent GoIdent // e.g., "MessageName_OneofName" + + Parent *Message // message in which this oneof is declared + + Fields []*Field // fields that are part of this oneof + + Location Location // location of this oneof + Comments CommentSet // comments associated with this oneof +} + +func newOneof(gen *Plugin, f *File, message *Message, desc protoreflect.OneofDescriptor) *Oneof { + loc := message.Location.appendPath(genid.DescriptorProto_OneofDecl_field_number, desc.Index()) + camelCased := strs.GoCamelCase(string(desc.Name())) + parentPrefix := message.GoIdent.GoName + "_" + return &Oneof{ + Desc: desc, + Parent: message, + GoName: camelCased, + GoIdent: GoIdent{ + GoImportPath: f.GoImportPath, + GoName: parentPrefix + camelCased, + }, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } +} + +// Extension is an alias of Field for documentation. +type Extension = Field + +// A Service describes a service. +type Service struct { + Desc protoreflect.ServiceDescriptor + + GoName string + + Methods []*Method // service method declarations + + Location Location // location of this service + Comments CommentSet // comments associated with this service +} + +func newService(gen *Plugin, f *File, desc protoreflect.ServiceDescriptor) *Service { + loc := f.location.appendPath(genid.FileDescriptorProto_Service_field_number, desc.Index()) + service := &Service{ + Desc: desc, + GoName: strs.GoCamelCase(string(desc.Name())), + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + for i, mds := 0, desc.Methods(); i < mds.Len(); i++ { + service.Methods = append(service.Methods, newMethod(gen, f, service, mds.Get(i))) + } + return service +} + +// A Method describes a method in a service. +type Method struct { + Desc protoreflect.MethodDescriptor + + GoName string + + Parent *Service // service in which this method is declared + + Input *Message + Output *Message + + Location Location // location of this method + Comments CommentSet // comments associated with this method +} + +func newMethod(gen *Plugin, f *File, service *Service, desc protoreflect.MethodDescriptor) *Method { + loc := service.Location.appendPath(genid.ServiceDescriptorProto_Method_field_number, desc.Index()) + method := &Method{ + Desc: desc, + GoName: strs.GoCamelCase(string(desc.Name())), + Parent: service, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + return method +} + +func (method *Method) resolveDependencies(gen *Plugin) error { + desc := method.Desc + + inName := desc.Input().FullName() + in, ok := gen.messagesByName[inName] + if !ok { + return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), inName) + } + method.Input = in + + outName := desc.Output().FullName() + out, ok := gen.messagesByName[outName] + if !ok { + return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), outName) + } + method.Output = out + + return nil +} + +// A GeneratedFile is a generated file. +type GeneratedFile struct { + gen *Plugin + skip bool + filename string + goImportPath GoImportPath + buf bytes.Buffer + packageNames map[GoImportPath]GoPackageName + usedPackageNames map[GoPackageName]bool + manualImports map[GoImportPath]bool + annotations map[string][]Location +} + +// NewGeneratedFile creates a new generated file with the given filename +// and import path. +func (gen *Plugin) NewGeneratedFile(filename string, goImportPath GoImportPath) *GeneratedFile { + g := &GeneratedFile{ + gen: gen, + filename: filename, + goImportPath: goImportPath, + packageNames: make(map[GoImportPath]GoPackageName), + usedPackageNames: make(map[GoPackageName]bool), + manualImports: make(map[GoImportPath]bool), + annotations: make(map[string][]Location), + } + + // All predeclared identifiers in Go are already used. + for _, s := range types.Universe.Names() { + g.usedPackageNames[GoPackageName(s)] = true + } + + gen.genFiles = append(gen.genFiles, g) + return g +} + +// P prints a line to the generated output. It converts each parameter to a +// string following the same rules as fmt.Print. It never inserts spaces +// between parameters. +func (g *GeneratedFile) P(v ...interface{}) { + for _, x := range v { + switch x := x.(type) { + case GoIdent: + fmt.Fprint(&g.buf, g.QualifiedGoIdent(x)) + default: + fmt.Fprint(&g.buf, x) + } + } + fmt.Fprintln(&g.buf) +} + +// QualifiedGoIdent returns the string to use for a Go identifier. +// +// If the identifier is from a different Go package than the generated file, +// the returned name will be qualified (package.name) and an import statement +// for the identifier's package will be included in the file. +func (g *GeneratedFile) QualifiedGoIdent(ident GoIdent) string { + if ident.GoImportPath == g.goImportPath { + return ident.GoName + } + if packageName, ok := g.packageNames[ident.GoImportPath]; ok { + return string(packageName) + "." + ident.GoName + } + packageName := cleanPackageName(path.Base(string(ident.GoImportPath))) + for i, orig := 1, packageName; g.usedPackageNames[packageName]; i++ { + packageName = orig + GoPackageName(strconv.Itoa(i)) + } + g.packageNames[ident.GoImportPath] = packageName + g.usedPackageNames[packageName] = true + return string(packageName) + "." + ident.GoName +} + +// Import ensures a package is imported by the generated file. +// +// Packages referenced by QualifiedGoIdent are automatically imported. +// Explicitly importing a package with Import is generally only necessary +// when the import will be blank (import _ "package"). +func (g *GeneratedFile) Import(importPath GoImportPath) { + g.manualImports[importPath] = true +} + +// Write implements io.Writer. +func (g *GeneratedFile) Write(p []byte) (n int, err error) { + return g.buf.Write(p) +} + +// Skip removes the generated file from the plugin output. +func (g *GeneratedFile) Skip() { + g.skip = true +} + +// Unskip reverts a previous call to Skip, re-including the generated file in +// the plugin output. +func (g *GeneratedFile) Unskip() { + g.skip = false +} + +// Annotate associates a symbol in a generated Go file with a location in a +// source .proto file. +// +// The symbol may refer to a type, constant, variable, function, method, or +// struct field. The "T.sel" syntax is used to identify the method or field +// 'sel' on type 'T'. +func (g *GeneratedFile) Annotate(symbol string, loc Location) { + g.annotations[symbol] = append(g.annotations[symbol], loc) +} + +// Content returns the contents of the generated file. +func (g *GeneratedFile) Content() ([]byte, error) { + if !strings.HasSuffix(g.filename, ".go") { + return g.buf.Bytes(), nil + } + + // Reformat generated code. + original := g.buf.Bytes() + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "", original, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(original)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + return nil, fmt.Errorf("%v: unparsable Go source: %v\n%v", g.filename, err, src.String()) + } + + // Collect a sorted list of all imports. + var importPaths [][2]string + rewriteImport := func(importPath string) string { + if f := g.gen.opts.ImportRewriteFunc; f != nil { + return string(f(GoImportPath(importPath))) + } + return importPath + } + for importPath := range g.packageNames { + pkgName := string(g.packageNames[GoImportPath(importPath)]) + pkgPath := rewriteImport(string(importPath)) + importPaths = append(importPaths, [2]string{pkgName, pkgPath}) + } + for importPath := range g.manualImports { + if _, ok := g.packageNames[importPath]; !ok { + pkgPath := rewriteImport(string(importPath)) + importPaths = append(importPaths, [2]string{"_", pkgPath}) + } + } + sort.Slice(importPaths, func(i, j int) bool { + return importPaths[i][1] < importPaths[j][1] + }) + + // Modify the AST to include a new import block. + if len(importPaths) > 0 { + // Insert block after package statement or + // possible comment attached to the end of the package statement. + pos := file.Package + tokFile := fset.File(file.Package) + pkgLine := tokFile.Line(file.Package) + for _, c := range file.Comments { + if tokFile.Line(c.Pos()) > pkgLine { + break + } + pos = c.End() + } + + // Construct the import block. + impDecl := &ast.GenDecl{ + Tok: token.IMPORT, + TokPos: pos, + Lparen: pos, + Rparen: pos, + } + for _, importPath := range importPaths { + impDecl.Specs = append(impDecl.Specs, &ast.ImportSpec{ + Name: &ast.Ident{ + Name: importPath[0], + NamePos: pos, + }, + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(importPath[1]), + ValuePos: pos, + }, + EndPos: pos, + }) + } + file.Decls = append([]ast.Decl{impDecl}, file.Decls...) + } + + var out bytes.Buffer + if err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(&out, fset, file); err != nil { + return nil, fmt.Errorf("%v: can not reformat Go source: %v", g.filename, err) + } + return out.Bytes(), nil +} + +// metaFile returns the contents of the file's metadata file, which is a +// text formatted string of the google.protobuf.GeneratedCodeInfo. +func (g *GeneratedFile) metaFile(content []byte) (string, error) { + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, "", content, 0) + if err != nil { + return "", err + } + info := &descriptorpb.GeneratedCodeInfo{} + + seenAnnotations := make(map[string]bool) + annotate := func(s string, ident *ast.Ident) { + seenAnnotations[s] = true + for _, loc := range g.annotations[s] { + info.Annotation = append(info.Annotation, &descriptorpb.GeneratedCodeInfo_Annotation{ + SourceFile: proto.String(loc.SourceFile), + Path: loc.Path, + Begin: proto.Int32(int32(fset.Position(ident.Pos()).Offset)), + End: proto.Int32(int32(fset.Position(ident.End()).Offset)), + }) + } + } + for _, decl := range astFile.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + annotate(spec.Name.Name, spec.Name) + switch st := spec.Type.(type) { + case *ast.StructType: + for _, field := range st.Fields.List { + for _, name := range field.Names { + annotate(spec.Name.Name+"."+name.Name, name) + } + } + case *ast.InterfaceType: + for _, field := range st.Methods.List { + for _, name := range field.Names { + annotate(spec.Name.Name+"."+name.Name, name) + } + } + } + case *ast.ValueSpec: + for _, name := range spec.Names { + annotate(name.Name, name) + } + } + } + case *ast.FuncDecl: + if decl.Recv == nil { + annotate(decl.Name.Name, decl.Name) + } else { + recv := decl.Recv.List[0].Type + if s, ok := recv.(*ast.StarExpr); ok { + recv = s.X + } + if id, ok := recv.(*ast.Ident); ok { + annotate(id.Name+"."+decl.Name.Name, decl.Name) + } + } + } + } + for a := range g.annotations { + if !seenAnnotations[a] { + return "", fmt.Errorf("%v: no symbol matching annotation %q", g.filename, a) + } + } + + b, err := prototext.Marshal(info) + if err != nil { + return "", err + } + return string(b), nil +} + +// A GoIdent is a Go identifier, consisting of a name and import path. +// The name is a single identifier and may not be a dot-qualified selector. +type GoIdent struct { + GoName string + GoImportPath GoImportPath +} + +func (id GoIdent) String() string { return fmt.Sprintf("%q.%v", id.GoImportPath, id.GoName) } + +// newGoIdent returns the Go identifier for a descriptor. +func newGoIdent(f *File, d protoreflect.Descriptor) GoIdent { + name := strings.TrimPrefix(string(d.FullName()), string(f.Desc.Package())+".") + return GoIdent{ + GoName: strs.GoCamelCase(name), + GoImportPath: f.GoImportPath, + } +} + +// A GoImportPath is the import path of a Go package. +// For example: "google.golang.org/protobuf/compiler/protogen" +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// Ident returns a GoIdent with s as the GoName and p as the GoImportPath. +func (p GoImportPath) Ident(s string) GoIdent { + return GoIdent{GoName: s, GoImportPath: p} +} + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + +// cleanPackageName converts a string to a valid Go package name. +func cleanPackageName(name string) GoPackageName { + return GoPackageName(strs.GoSanitized(name)) +} + +type pathType int + +const ( + pathTypeImport pathType = iota + pathTypeSourceRelative +) + +// A Location is a location in a .proto source file. +// +// See the google.protobuf.SourceCodeInfo documentation in descriptor.proto +// for details. +type Location struct { + SourceFile string + Path protoreflect.SourcePath +} + +// appendPath add elements to a Location's path, returning a new Location. +func (loc Location) appendPath(num protoreflect.FieldNumber, idx int) Location { + loc.Path = append(protoreflect.SourcePath(nil), loc.Path...) // make copy + loc.Path = append(loc.Path, int32(num), int32(idx)) + return loc +} + +// CommentSet is a set of leading and trailing comments associated +// with a .proto descriptor declaration. +type CommentSet struct { + LeadingDetached []Comments + Leading Comments + Trailing Comments +} + +func makeCommentSet(loc protoreflect.SourceLocation) CommentSet { + var leadingDetached []Comments + for _, s := range loc.LeadingDetachedComments { + leadingDetached = append(leadingDetached, Comments(s)) + } + return CommentSet{ + LeadingDetached: leadingDetached, + Leading: Comments(loc.LeadingComments), + Trailing: Comments(loc.TrailingComments), + } +} + +// Comments is a comments string as provided by protoc. +type Comments string + +// String formats the comments by inserting // to the start of each line, +// ensuring that there is a trailing newline. +// An empty comment is formatted as an empty string. +func (c Comments) String() string { + if c == "" { + return "" + } + var b []byte + for _, line := range strings.Split(strings.TrimSuffix(string(c), "\n"), "\n") { + b = append(b, "//"...) + b = append(b, line...) + b = append(b, "\n"...) + } + return string(b) +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go new file mode 100644 index 000000000..179d6e8fc --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -0,0 +1,770 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "unicode/utf8" + + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable textproto format unmarshaler. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return error if there are any missing required fields. + AllowPartial bool + + // DiscardUnknown specifies whether to ignore unknown fields when parsing. + // An unknown field is any field whose field name or field number does not + // resolve to any known or extension field in the message. + // By default, unmarshal rejects unknown fields as an error. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{text.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *text.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok text.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if messageDesc.FullName() == genid.Any_message_fullname { + return d.unmarshalAny(m, checkDelims) + } + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch typ := tok.Kind(); typ { + case text.Name: + // Continue below. + case text.EOF: + if checkDelims { + return text.ErrUnexpectedEOF + } + return nil + default: + if checkDelims && typ == text.MessageClose { + return nil + } + return d.unexpectedTokenError(tok) + } + + // Resolve the field descriptor. + var name pref.Name + var fd pref.FieldDescriptor + var xt pref.ExtensionType + var xtErr error + var isFieldNumberName bool + + switch tok.NameKind() { + case text.IdentName: + name = pref.Name(tok.IdentName()) + fd = fieldDescs.ByTextName(string(name)) + + case text.TypeName: + // Handle extensions only. This code path is not for Any. + xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) + + case text.FieldNumber: + isFieldNumberName = true + num := pref.FieldNumber(tok.FieldNumber()) + if !num.IsValid() { + return d.newError(tok.Pos(), "invalid field number: %d", num) + } + fd = fieldDescs.ByNumber(num) + if fd == nil { + xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num) + } + } + + if xt != nil { + fd = xt.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } else if xtErr != nil && xtErr != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + // Handle unknown fields. + if fd == nil { + if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) { + d.skipValue() + continue + } + return d.newError(tok.Pos(), "unknown field: %v", tok.RawString()) + } + + // Handle fields identified by field number. + if isFieldNumberName { + // TODO: Add an option to permit parsing field numbers. + // + // This requires careful thought as the MarshalOptions.EmitUnknown + // option allows formatting unknown fields as the field number and the + // best-effort textual representation of the field value. In that case, + // it may not be possible to unmarshal the value from a parser that does + // have information about the unknown field. + return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString()) + } + + switch { + case fd.IsList(): + kind := fd.Kind() + if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + list := m.Mutable(fd).List() + if err := d.unmarshalList(fd, list); err != nil { + return err + } + + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(fd, mmap); err != nil { + return err + } + + default: + kind := fd.Kind() + if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString()) + } + + if err := d.unmarshalSingular(fd, m); err != nil { + return err + } + seenNums.Set(num) + } + } + + return nil +} + +// unmarshalSingular unmarshals a non-repeated field value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { + var val pref.Value + var err error + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), true) + default: + val, err = d.unmarshalScalar(fd) + } + if err == nil { + m.Set(fd, val) + } + return err +} + +// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { + tok, err := d.Read() + if err != nil { + return pref.Value{}, err + } + + if tok.Kind() != text.Scalar { + return pref.Value{}, d.unexpectedTokenError(tok) + } + + kind := fd.Kind() + switch kind { + case pref.BoolKind: + if b, ok := tok.Bool(); ok { + return pref.ValueOfBool(b), nil + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if n, ok := tok.Int32(); ok { + return pref.ValueOfInt32(n), nil + } + + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if n, ok := tok.Int64(); ok { + return pref.ValueOfInt64(n), nil + } + + case pref.Uint32Kind, pref.Fixed32Kind: + if n, ok := tok.Uint32(); ok { + return pref.ValueOfUint32(n), nil + } + + case pref.Uint64Kind, pref.Fixed64Kind: + if n, ok := tok.Uint64(); ok { + return pref.ValueOfUint64(n), nil + } + + case pref.FloatKind: + if n, ok := tok.Float32(); ok { + return pref.ValueOfFloat32(n), nil + } + + case pref.DoubleKind: + if n, ok := tok.Float64(); ok { + return pref.ValueOfFloat64(n), nil + } + + case pref.StringKind: + if s, ok := tok.String(); ok { + if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + } + return pref.ValueOfString(s), nil + } + + case pref.BytesKind: + if b, ok := tok.String(); ok { + return pref.ValueOfBytes([]byte(b)), nil + } + + case pref.EnumKind: + if lit, ok := tok.Enum(); ok { + // Lookup EnumNumber based on name. + if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { + return pref.ValueOfEnum(enumVal.Number()), nil + } + } + if num, ok := tok.Int32(); ok { + return pref.ValueOfEnum(pref.EnumNumber(num)), nil + } + + default: + panic(fmt.Sprintf("invalid scalar kind %v", kind)) + } + + return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +// unmarshalList unmarshals into given protoreflect.List. A list value can +// either be in [] syntax or simply just a single scalar/message value. +func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { + tok, err := d.Peek() + if err != nil { + return err + } + + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + return nil + } + + default: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + return nil + } + } + + return d.unexpectedTokenError(tok) +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside + // unmarshalMapEntry. + var unmarshalMapValue func() (pref.Value, error) + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + unmarshalMapValue = func() (pref.Value, error) { + pval := mmap.NewValue() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return pref.Value{}, err + } + return pval, nil + } + default: + unmarshalMapValue = func() (pref.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageOpen: + return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue) + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil { + return err + } + default: + return d.unexpectedTokenError(tok) + } + } + + default: + return d.unexpectedTokenError(tok) + } +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { + var key pref.MapKey + var pval pref.Value +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.Name: + if tok.NameKind() != text.IdentName { + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString()) + } + d.skipValue() + continue Loop + } + // Continue below. + case text.MessageClose: + break Loop + default: + return d.unexpectedTokenError(tok) + } + + switch name := pref.Name(tok.IdentName()); name { + case genid.MapEntry_Key_field_name: + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + if key.IsValid() { + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) + } + val, err := d.unmarshalScalar(fd.MapKey()) + if err != nil { + return err + } + key = val.MapKey() + + case genid.MapEntry_Value_field_name: + if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + } + if pval.IsValid() { + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) + } + pval, err = unmarshalMapValue() + if err != nil { + return err + } + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", name) + } + d.skipValue() + } + } + + if !key.IsValid() { + key = fd.MapKey().Default().MapKey() + } + if !pval.IsValid() { + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + // If value field is not set for message/group types, construct an + // empty one as default. + pval = mmap.NewValue() + default: + pval = fd.MapValue().Default() + } + } + mmap.Set(key, pval) + return nil +} + +// unmarshalAny unmarshals an Any textproto. It can either be in expanded form +// or non-expanded form. +func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { + var typeURL string + var bValue []byte + var seenTypeUrl bool + var seenValue bool + var isExpanded bool + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + +Loop: + for { + // Read field name. Can only have 3 possible field names, i.e. type_url, + // value and type URL name inside []. + tok, err := d.Read() + if err != nil { + return err + } + if typ := tok.Kind(); typ != text.Name { + if checkDelims { + if typ == text.MessageClose { + break Loop + } + } else if typ == text.EOF { + break Loop + } + return d.unexpectedTokenError(tok) + } + + switch tok.NameKind() { + case text.IdentName: + // Both type_url and value fields require field separator :. + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + switch name := pref.Name(tok.IdentName()); name { + case genid.Any_TypeUrl_field_name: + if seenTypeUrl { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) + } + if isExpanded { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + var ok bool + typeURL, ok = tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString()) + } + seenTypeUrl = true + + case genid.Any_Value_field_name: + if seenValue { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname) + } + if isExpanded { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + s, ok := tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString()) + } + bValue = []byte(s) + seenValue = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) + } + } + + case text.TypeName: + if isExpanded { + return d.newError(tok.Pos(), "cannot have more than one type") + } + if seenTypeUrl { + return d.newError(tok.Pos(), "conflict with type_url field") + } + typeURL = tok.TypeName() + var err error + bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos()) + if err != nil { + return err + } + isExpanded = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) + } + } + } + + fds := m.Descriptor().Fields() + if len(typeURL) > 0 { + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) + } + if len(bValue) > 0 { + m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) + } + return nil +} + +func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) { + mt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err) + } + // Create new message for the embedded message type and unmarshal the value + // field into it. + m := mt.New() + if err := d.unmarshalMessage(m, true); err != nil { + return nil, err + } + // Serialize the embedded message and return the resulting bytes. + b, err := proto.MarshalOptions{ + AllowPartial: true, // Never check required fields inside an Any. + Deterministic: true, + }.Marshal(m.Interface()) + if err != nil { + return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err) + } + return b, nil +} + +// skipValue makes the decoder parse a field value in order to advance the read +// to the next field. It relies on Read returning an error if the types are not +// in valid sequence. +func (d decoder) skipValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for messages and lists. + switch tok.Kind() { + case text.MessageOpen: + return d.skipMessageValue() + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + return d.skipMessageValue() + default: + // Skip items. This will not validate whether skipped values are + // of the same type or not, same behavior as C++ + // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. + } + } + } + return nil +} + +// skipMessageValue makes the decoder parse and skip over all fields in a +// message. It assumes that the previous read type is MessageOpen. +func (d decoder) skipMessageValue() error { + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageClose: + return nil + case text.Name: + if err := d.skipValue(); err != nil { + return err + } + } + } +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/doc.go b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go new file mode 100644 index 000000000..162b4f98a --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package prototext marshals and unmarshals protocol buffer messages as the +// textproto format. +package prototext diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go new file mode 100644 index 000000000..8d5304dc5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -0,0 +1,371 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given proto.Message in textproto format using default +// options. Do not depend on the output being stable. It may change over time +// across different versions of the program. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable text format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // EmitASCII specifies whether to format strings and bytes as ASCII only + // as opposed to using UTF-8 encoding when possible. + EmitASCII bool + + // allowInvalidUTF8 specifies whether to permit the encoding of strings + // with invalid UTF-8. This is unexported as it is intended to only + // be specified by the Format method. + allowInvalidUTF8 bool + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // EmitUnknown specifies whether to emit unknown fields in the output. + // If specified, the unmarshaler may be unable to parse the output. + // The default is to exclude unknown fields. + EmitUnknown bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.allowInvalidUTF8 = true + o.AllowPartial = true + o.EmitUnknown = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal writes the given proto.Message in textproto format using options in +// MarshalOptions object. Do not depend on the output being stable. It may +// change over time across different versions of the program. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { + var delims = [2]byte{'{', '}'} + + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case there is nothing to output. + if m == nil { + return []byte{}, nil + } + + enc := encoder{internalEnc, o} + err = enc.marshalMessage(m.ProtoReflect(), false) + if err != nil { + return nil, err + } + out := enc.Bytes() + if len(o.Indent) > 0 && len(out) > 0 { + out = append(out, '\n') + } + if o.AllowPartial { + return out, nil + } + return out, proto.CheckInitialized(m) +} + +type encoder struct { + *text.Encoder + opts MarshalOptions +} + +// marshalMessage marshals the given protoreflect.Message. +func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if inclDelims { + e.StartMessage() + defer e.EndMessage() + } + + // Handle Any expansion. + if messageDesc.FullName() == genid.Any_message_fullname { + if e.marshalAny(m) { + return nil + } + // If unable to expand, continue on to marshal Any as a regular message. + } + + // Marshal fields. + var err error + order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if err = e.marshalField(fd.TextName(), v, fd); err != nil { + return false + } + return true + }) + if err != nil { + return err + } + + // Marshal unknown fields. + if e.opts.EmitUnknown { + e.marshalUnknown(m.GetUnknown()) + } + + return nil +} + +// marshalField marshals the given field with protoreflect.Value. +func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(name, val.List(), fd) + case fd.IsMap(): + return e.marshalMap(name, val.Map(), fd) + default: + e.WriteName(name) + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { + kind := fd.Kind() + switch kind { + case pref.BoolKind: + e.WriteBool(val.Bool()) + + case pref.StringKind: + s := val.String() + if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return errors.InvalidUTF8(string(fd.FullName())) + } + e.WriteString(s) + + case pref.Int32Kind, pref.Int64Kind, + pref.Sint32Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + e.WriteInt(val.Int()) + + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + e.WriteUint(val.Uint()) + + case pref.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case pref.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case pref.BytesKind: + e.WriteString(string(val.Bytes())) + + case pref.EnumKind: + num := val.Enum() + if desc := fd.Enum().Values().ByNumber(num); desc != nil { + e.WriteLiteral(string(desc.Name())) + } else { + // Use numeric value if there is no enum description. + e.WriteInt(int64(num)) + } + + case pref.MessageKind, pref.GroupKind: + return e.marshalMessage(val.Message(), true) + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List as multiple name-value fields. +func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { + size := list.Len() + for i := 0; i < size; i++ { + e.WriteName(name) + if err := e.marshalSingular(list.Get(i), fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals the given protoreflect.Map as multiple name-value fields. +func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { + e.WriteName(name) + e.StartMessage() + defer e.EndMessage() + + e.WriteName(string(genid.MapEntry_Key_field_name)) + err = e.marshalSingular(key.Value(), fd.MapKey()) + if err != nil { + return false + } + + e.WriteName(string(genid.MapEntry_Value_field_name)) + err = e.marshalSingular(val, fd.MapValue()) + if err != nil { + return false + } + return true + }) + return err +} + +// marshalUnknown parses the given []byte and marshals fields out. +// This function assumes proper encoding in the given []byte. +func (e encoder) marshalUnknown(b []byte) { + const dec = 10 + const hex = 16 + for len(b) > 0 { + num, wtype, n := protowire.ConsumeTag(b) + b = b[n:] + e.WriteName(strconv.FormatInt(int64(num), dec)) + + switch wtype { + case protowire.VarintType: + var v uint64 + v, n = protowire.ConsumeVarint(b) + e.WriteUint(v) + case protowire.Fixed32Type: + var v uint32 + v, n = protowire.ConsumeFixed32(b) + e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex)) + case protowire.Fixed64Type: + var v uint64 + v, n = protowire.ConsumeFixed64(b) + e.WriteLiteral("0x" + strconv.FormatUint(v, hex)) + case protowire.BytesType: + var v []byte + v, n = protowire.ConsumeBytes(b) + e.WriteString(string(v)) + case protowire.StartGroupType: + e.StartMessage() + var v []byte + v, n = protowire.ConsumeGroup(num, b) + e.marshalUnknown(v) + e.EndMessage() + default: + panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype)) + } + + b = b[n:] + } +} + +// marshalAny marshals the given google.protobuf.Any message in expanded form. +// It returns true if it was able to marshal, else false. +func (e encoder) marshalAny(any pref.Message) bool { + // Construct the embedded message. + fds := any.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + typeURL := any.Get(fdType).String() + mt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return false + } + m := mt.New().Interface() + + // Unmarshal bytes into embedded message. + fdValue := fds.ByNumber(genid.Any_Value_field_number) + value := any.Get(fdValue) + err = proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: e.opts.Resolver, + }.Unmarshal(value.Bytes(), m) + if err != nil { + return false + } + + // Get current encoder position. If marshaling fails, reset encoder output + // back to this position. + pos := e.Snapshot() + + // Field name is the proto field name enclosed in []. + e.WriteName("[" + typeURL + "]") + err = e.marshalMessage(m.ProtoReflect(), true) + if err != nil { + e.Reset(pos) + return false + } + return true +} diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go new file mode 100644 index 000000000..a427f8b70 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -0,0 +1,538 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protowire parses and formats the raw wire encoding. +// See https://developers.google.com/protocol-buffers/docs/encoding. +// +// For marshaling and unmarshaling entire protobuf messages, +// use the "google.golang.org/protobuf/proto" package instead. +package protowire + +import ( + "io" + "math" + "math/bits" + + "google.golang.org/protobuf/internal/errors" +) + +// Number represents the field number. +type Number int32 + +const ( + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 +) + +// IsValid reports whether the field number is semantically valid. +// +// Note that while numbers within the reserved range are semantically invalid, +// they are syntactically valid in the wire format. +// Implementations may treat records with reserved field numbers as unknown. +func (n Number) IsValid() bool { + return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber +} + +// Type represents the wire type. +type Type int8 + +const ( + VarintType Type = 0 + Fixed32Type Type = 5 + Fixed64Type Type = 1 + BytesType Type = 2 + StartGroupType Type = 3 + EndGroupType Type = 4 +) + +const ( + _ = -iota + errCodeTruncated + errCodeFieldNumber + errCodeOverflow + errCodeReserved + errCodeEndGroup +) + +var ( + errFieldNumber = errors.New("invalid field number") + errOverflow = errors.New("variable length integer overflow") + errReserved = errors.New("cannot parse reserved wire type") + errEndGroup = errors.New("mismatching end group marker") + errParse = errors.New("parse error") +) + +// ParseError converts an error code into an error value. +// This returns nil if n is a non-negative number. +func ParseError(n int) error { + if n >= 0 { + return nil + } + switch n { + case errCodeTruncated: + return io.ErrUnexpectedEOF + case errCodeFieldNumber: + return errFieldNumber + case errCodeOverflow: + return errOverflow + case errCodeReserved: + return errReserved + case errCodeEndGroup: + return errEndGroup + default: + return errParse + } +} + +// ConsumeField parses an entire field record (both tag and value) and returns +// the field number, the wire type, and the total length. +// This returns a negative length upon an error (see ParseError). +// +// The total length includes the tag header and the end group marker (if the +// field is a group). +func ConsumeField(b []byte) (Number, Type, int) { + num, typ, n := ConsumeTag(b) + if n < 0 { + return 0, 0, n // forward error code + } + m := ConsumeFieldValue(num, typ, b[n:]) + if m < 0 { + return 0, 0, m // forward error code + } + return num, typ, n + m +} + +// ConsumeFieldValue parses a field value and returns its length. +// This assumes that the field Number and wire Type have already been parsed. +// This returns a negative length upon an error (see ParseError). +// +// When parsing a group, the length includes the end group marker and +// the end group is verified to match the starting field number. +func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + switch typ { + case VarintType: + _, n = ConsumeVarint(b) + return n + case Fixed32Type: + _, n = ConsumeFixed32(b) + return n + case Fixed64Type: + _, n = ConsumeFixed64(b) + return n + case BytesType: + _, n = ConsumeBytes(b) + return n + case StartGroupType: + n0 := len(b) + for { + num2, typ2, n := ConsumeTag(b) + if n < 0 { + return n // forward error code + } + b = b[n:] + if typ2 == EndGroupType { + if num != num2 { + return errCodeEndGroup + } + return n0 - len(b) + } + + n = ConsumeFieldValue(num2, typ2, b) + if n < 0 { + return n // forward error code + } + b = b[n:] + } + case EndGroupType: + return errCodeEndGroup + default: + return errCodeReserved + } +} + +// AppendTag encodes num and typ as a varint-encoded tag and appends it to b. +func AppendTag(b []byte, num Number, typ Type) []byte { + return AppendVarint(b, EncodeTag(num, typ)) +} + +// ConsumeTag parses b as a varint-encoded tag, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeTag(b []byte) (Number, Type, int) { + v, n := ConsumeVarint(b) + if n < 0 { + return 0, 0, n // forward error code + } + num, typ := DecodeTag(v) + if num < MinValidNumber { + return 0, 0, errCodeFieldNumber + } + return num, typ, n +} + +func SizeTag(num Number) int { + return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size +} + +// AppendVarint appends v to b as a varint-encoded uint64. +func AppendVarint(b []byte, v uint64) []byte { + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +// ConsumeVarint parses b as a varint-encoded uint64, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeVarint(b []byte) (v uint64, n int) { + var y uint64 + if len(b) <= 0 { + return 0, errCodeTruncated + } + v = uint64(b[0]) + if v < 0x80 { + return v, 1 + } + v -= 0x80 + + if len(b) <= 1 { + return 0, errCodeTruncated + } + y = uint64(b[1]) + v += y << 7 + if y < 0x80 { + return v, 2 + } + v -= 0x80 << 7 + + if len(b) <= 2 { + return 0, errCodeTruncated + } + y = uint64(b[2]) + v += y << 14 + if y < 0x80 { + return v, 3 + } + v -= 0x80 << 14 + + if len(b) <= 3 { + return 0, errCodeTruncated + } + y = uint64(b[3]) + v += y << 21 + if y < 0x80 { + return v, 4 + } + v -= 0x80 << 21 + + if len(b) <= 4 { + return 0, errCodeTruncated + } + y = uint64(b[4]) + v += y << 28 + if y < 0x80 { + return v, 5 + } + v -= 0x80 << 28 + + if len(b) <= 5 { + return 0, errCodeTruncated + } + y = uint64(b[5]) + v += y << 35 + if y < 0x80 { + return v, 6 + } + v -= 0x80 << 35 + + if len(b) <= 6 { + return 0, errCodeTruncated + } + y = uint64(b[6]) + v += y << 42 + if y < 0x80 { + return v, 7 + } + v -= 0x80 << 42 + + if len(b) <= 7 { + return 0, errCodeTruncated + } + y = uint64(b[7]) + v += y << 49 + if y < 0x80 { + return v, 8 + } + v -= 0x80 << 49 + + if len(b) <= 8 { + return 0, errCodeTruncated + } + y = uint64(b[8]) + v += y << 56 + if y < 0x80 { + return v, 9 + } + v -= 0x80 << 56 + + if len(b) <= 9 { + return 0, errCodeTruncated + } + y = uint64(b[9]) + v += y << 63 + if y < 2 { + return v, 10 + } + return 0, errCodeOverflow +} + +// SizeVarint returns the encoded size of a varint. +// The size is guaranteed to be within 1 and 10, inclusive. +func SizeVarint(v uint64) int { + // This computes 1 + (bits.Len64(v)-1)/7. + // 9/64 is a good enough approximation of 1/7 + return int(9*uint32(bits.Len64(v))+64) / 64 +} + +// AppendFixed32 appends v to b as a little-endian uint32. +func AppendFixed32(b []byte, v uint32) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24)) +} + +// ConsumeFixed32 parses b as a little-endian uint32, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeFixed32(b []byte) (v uint32, n int) { + if len(b) < 4 { + return 0, errCodeTruncated + } + v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return v, 4 +} + +// SizeFixed32 returns the encoded size of a fixed32; which is always 4. +func SizeFixed32() int { + return 4 +} + +// AppendFixed64 appends v to b as a little-endian uint64. +func AppendFixed64(b []byte, v uint64) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) +} + +// ConsumeFixed64 parses b as a little-endian uint64, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeFixed64(b []byte) (v uint64, n int) { + if len(b) < 8 { + return 0, errCodeTruncated + } + v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return v, 8 +} + +// SizeFixed64 returns the encoded size of a fixed64; which is always 8. +func SizeFixed64() int { + return 8 +} + +// AppendBytes appends v to b as a length-prefixed bytes value. +func AppendBytes(b []byte, v []byte) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeBytes(b []byte) (v []byte, n int) { + m, n := ConsumeVarint(b) + if n < 0 { + return nil, n // forward error code + } + if m > uint64(len(b[n:])) { + return nil, errCodeTruncated + } + return b[n:][:m], n + int(m) +} + +// SizeBytes returns the encoded size of a length-prefixed bytes value, +// given only the length. +func SizeBytes(n int) int { + return SizeVarint(uint64(n)) + n +} + +// AppendString appends v to b as a length-prefixed bytes value. +func AppendString(b []byte, v string) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeString parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeString(b []byte) (v string, n int) { + bb, n := ConsumeBytes(b) + return string(bb), n +} + +// AppendGroup appends v to b as group value, with a trailing end group marker. +// The value v must not contain the end marker. +func AppendGroup(b []byte, num Number, v []byte) []byte { + return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType)) +} + +// ConsumeGroup parses b as a group value until the trailing end group marker, +// and verifies that the end marker matches the provided num. The value v +// does not contain the end marker, while the length does contain the end marker. +// This returns a negative length upon an error (see ParseError). +func ConsumeGroup(num Number, b []byte) (v []byte, n int) { + n = ConsumeFieldValue(num, StartGroupType, b) + if n < 0 { + return nil, n // forward error code + } + b = b[:n] + + // Truncate off end group marker, but need to handle denormalized varints. + // Assuming end marker is never 0 (which is always the case since + // EndGroupType is non-zero), we can truncate all trailing bytes where the + // lower 7 bits are all zero (implying that the varint is denormalized). + for len(b) > 0 && b[len(b)-1]&0x7f == 0 { + b = b[:len(b)-1] + } + b = b[:len(b)-SizeTag(num)] + return b, n +} + +// SizeGroup returns the encoded size of a group, given only the length. +func SizeGroup(num Number, n int) int { + return n + SizeTag(num) +} + +// DecodeTag decodes the field Number and wire Type from its unified form. +// The Number is -1 if the decoded field number overflows int32. +// Other than overflow, this does not check for field number validity. +func DecodeTag(x uint64) (Number, Type) { + // NOTE: MessageSet allows for larger field numbers than normal. + if x>>3 > uint64(math.MaxInt32) { + return -1, 0 + } + return Number(x >> 3), Type(x & 7) +} + +// EncodeTag encodes the field Number and wire Type into its unified form. +func EncodeTag(num Number, typ Type) uint64 { + return uint64(num)<<3 | uint64(typ&7) +} + +// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. +// Input: {…, 5, 3, 1, 0, 2, 4, 6, …} +// Output: {…, -3, -2, -1, 0, +1, +2, +3, …} +func DecodeZigZag(x uint64) int64 { + return int64(x>>1) ^ int64(x)<<63>>63 +} + +// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. +// Input: {…, -3, -2, -1, 0, +1, +2, +3, …} +// Output: {…, 5, 3, 1, 0, 2, 4, 6, …} +func EncodeZigZag(x int64) uint64 { + return uint64(x<<1) ^ uint64(x>>63) +} + +// DecodeBool decodes a uint64 as a bool. +// Input: { 0, 1, 2, …} +// Output: {false, true, true, …} +func DecodeBool(x uint64) bool { + return x != 0 +} + +// EncodeBool encodes a bool as a uint64. +// Input: {false, true} +// Output: { 0, 1} +func EncodeBool(x bool) uint64 { + if x { + return 1 + } + return 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go new file mode 100644 index 000000000..360c63329 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -0,0 +1,318 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descfmt provides functionality to format descriptors. +package descfmt + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type list interface { + Len() int + pragma.DoNotImplement +} + +func FormatList(s fmt.State, r rune, vs list) { + io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +} +func formatListOpt(vs list, isRoot, allowMulti bool) string { + start, end := "[", "]" + if isRoot { + var name string + switch vs.(type) { + case pref.Names: + name = "Names" + case pref.FieldNumbers: + name = "FieldNumbers" + case pref.FieldRanges: + name = "FieldRanges" + case pref.EnumRanges: + name = "EnumRanges" + case pref.FileImports: + name = "FileImports" + case pref.Descriptor: + name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + default: + name = reflect.ValueOf(vs).Elem().Type().Name() + } + start, end = name+"{", "}" + } + + var ss []string + switch vs := vs.(type) { + case pref.Names: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case pref.FieldNumbers: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case pref.FieldRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0]+1 == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive + } + } + return start + joinStrings(ss, false) + end + case pref.EnumRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0] == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive + } + } + return start + joinStrings(ss, false) + end + case pref.FileImports: + for i := 0; i < vs.Len(); i++ { + var rs records + rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + ss = append(ss, "{"+rs.Join()+"}") + } + return start + joinStrings(ss, allowMulti) + end + default: + _, isEnumValue := vs.(pref.EnumValueDescriptors) + for i := 0; i < vs.Len(); i++ { + m := reflect.ValueOf(vs).MethodByName("Get") + v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() + ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) + } + return start + joinStrings(ss, allowMulti && isEnumValue) + end + } +} + +// descriptorAccessors is a list of accessors to print for each descriptor. +// +// Do not print all accessors since some contain redundant information, +// while others are pointers that we do not want to follow since the descriptor +// is actually a cyclic graph. +// +// Using a list allows us to print the accessors in a sensible order. +var descriptorAccessors = map[reflect.Type][]string{ + reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, + reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, + reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, + reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt + reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, + reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, + reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, + reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +} + +func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +} +func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { + rv := reflect.ValueOf(t) + rt := rv.MethodByName("ProtoType").Type().In(0) + + start, end := "{", "}" + if isRoot { + start = rt.Name() + "{" + } + + _, isFile := t.(pref.FileDescriptor) + rs := records{allowMulti: allowMulti} + if t.IsPlaceholder() { + if isFile { + rs.Append(rv, "Path", "Package", "IsPlaceholder") + } else { + rs.Append(rv, "FullName", "IsPlaceholder") + } + } else { + switch { + case isFile: + rs.Append(rv, "Syntax") + case isRoot: + rs.Append(rv, "Syntax", "FullName") + default: + rs.Append(rv, "Name") + } + switch t := t.(type) { + case pref.FieldDescriptor: + for _, s := range descriptorAccessors[rt] { + switch s { + case "MapKey": + if k := t.MapKey(); k != nil { + rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) + } + case "MapValue": + if v := t.MapValue(); v != nil { + switch v.Kind() { + case pref.EnumKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + case pref.MessageKind, pref.GroupKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + default: + rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + } + } + case "ContainingOneof": + if od := t.ContainingOneof(); od != nil { + rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + } + case "ContainingMessage": + if t.IsExtension() { + rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + } + case "Message": + if !t.IsMap() { + rs.Append(rv, s) + } + default: + rs.Append(rv, s) + } + } + case pref.OneofDescriptor: + var ss []string + fs := t.Fields() + for i := 0; i < fs.Len(); i++ { + ss = append(ss, string(fs.Get(i).Name())) + } + if len(ss) > 0 { + rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + } + default: + rs.Append(rv, descriptorAccessors[rt]...) + } + if rv.MethodByName("GoType").IsValid() { + rs.Append(rv, "GoType") + } + } + return start + rs.Join() + end +} + +type records struct { + recs [][2]string + allowMulti bool +} + +func (rs *records) Append(v reflect.Value, accessors ...string) { + for _, a := range accessors { + var rv reflect.Value + if m := v.MethodByName(a); m.IsValid() { + rv = m.Call(nil)[0] + } + if v.Kind() == reflect.Struct && !rv.IsValid() { + rv = v.FieldByName(a) + } + if !rv.IsValid() { + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + } + if _, ok := rv.Interface().(pref.Value); ok { + rv = rv.MethodByName("Interface").Call(nil)[0] + if !rv.IsNil() { + rv = rv.Elem() + } + } + + // Ignore zero values. + var isZero bool + switch rv.Kind() { + case reflect.Interface, reflect.Slice: + isZero = rv.IsNil() + case reflect.Bool: + isZero = rv.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + isZero = rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + isZero = rv.Uint() == 0 + case reflect.String: + isZero = rv.String() == "" + } + if n, ok := rv.Interface().(list); ok { + isZero = n.Len() == 0 + } + if isZero { + continue + } + + // Format the value. + var s string + v := rv.Interface() + switch v := v.(type) { + case list: + s = formatListOpt(v, false, rs.allowMulti) + case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: + s = string(v.(pref.Descriptor).Name()) + case pref.Descriptor: + s = string(v.FullName()) + case string: + s = strconv.Quote(v) + case []byte: + s = fmt.Sprintf("%q", v) + default: + s = fmt.Sprint(v) + } + rs.recs = append(rs.recs, [2]string{a, s}) + } +} + +func (rs *records) Join() string { + var ss []string + + // In single line mode, simply join all records with commas. + if !rs.allowMulti { + for _, r := range rs.recs { + ss = append(ss, r[0]+formatColon(0)+r[1]) + } + return joinStrings(ss, false) + } + + // In allowMulti line mode, align single line records for more readable output. + var maxLen int + flush := func(i int) { + for _, r := range rs.recs[len(ss):i] { + ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1]) + } + maxLen = 0 + } + for i, r := range rs.recs { + if isMulti := strings.Contains(r[1], "\n"); isMulti { + flush(i) + ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t")) + } else if maxLen < len(r[0]) { + maxLen = len(r[0]) + } + } + flush(len(rs.recs)) + return joinStrings(ss, true) +} + +func formatColon(padding int) string { + // Deliberately introduce instability into the debug output to + // discourage users from performing string comparisons. + // This provides us flexibility to change the output in the future. + if detrand.Bool() { + return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0) + } else { + return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020) + } +} + +func joinStrings(ss []string, isMulti bool) string { + if len(ss) == 0 { + return "" + } + if isMulti { + return "\n\t" + strings.Join(ss, "\n\t") + "\n" + } + return strings.Join(ss, ", ") +} diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go new file mode 100644 index 000000000..8401be8c8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descopts contains the nil pointers to concrete descriptor options. +// +// This package exists as a form of reverse dependency injection so that certain +// packages (e.g., internal/filedesc and internal/filetype can avoid a direct +// dependency on the descriptor proto package). +package descopts + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +// These variables are set by the init function in descriptor.pb.go via logic +// in internal/filetype. In other words, so long as the descriptor proto package +// is linked in, these variables will be populated. +// +// Each variable is populated with a nil pointer to the options struct. +var ( + File pref.ProtoMessage + Enum pref.ProtoMessage + EnumValue pref.ProtoMessage + Message pref.ProtoMessage + Field pref.ProtoMessage + Oneof pref.ProtoMessage + ExtensionRange pref.ProtoMessage + Service pref.ProtoMessage + Method pref.ProtoMessage +) diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go new file mode 100644 index 000000000..49c8676d4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go @@ -0,0 +1,69 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package detrand provides deterministically random functionality. +// +// The pseudo-randomness of these functions is seeded by the program binary +// itself and guarantees that the output does not change within a program, +// while ensuring that the output is unstable across different builds. +package detrand + +import ( + "encoding/binary" + "hash/fnv" + "os" +) + +// Disable disables detrand such that all functions returns the zero value. +// This function is not concurrent-safe and must be called during program init. +func Disable() { + randSeed = 0 +} + +// Bool returns a deterministically random boolean. +func Bool() bool { + return randSeed%2 == 1 +} + +// Intn returns a deterministically random integer between 0 and n-1, inclusive. +func Intn(n int) int { + if n <= 0 { + panic("must be positive") + } + return int(randSeed % uint64(n)) +} + +// randSeed is a best-effort at an approximate hash of the Go binary. +var randSeed = binaryHash() + +func binaryHash() uint64 { + // Open the Go binary. + s, err := os.Executable() + if err != nil { + return 0 + } + f, err := os.Open(s) + if err != nil { + return 0 + } + defer f.Close() + + // Hash the size and several samples of the Go binary. + const numSamples = 8 + var buf [64]byte + h := fnv.New64() + fi, err := f.Stat() + if err != nil { + return 0 + } + binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size())) + h.Write(buf[:8]) + for i := int64(0); i < numSamples; i++ { + if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil { + return 0 + } + h.Write(buf[:]) + } + return h.Sum64() +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go new file mode 100644 index 000000000..fdd9b13f2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go @@ -0,0 +1,213 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package defval marshals and unmarshals textual forms of default values. +// +// This package handles both the form historically used in Go struct field tags +// and also the form used by google.protobuf.FieldDescriptorProto.default_value +// since they differ in superficial ways. +package defval + +import ( + "fmt" + "math" + "strconv" + + ptext "google.golang.org/protobuf/internal/encoding/text" + errors "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// Format is the serialization format used to represent the default value. +type Format int + +const ( + _ Format = iota + + // Descriptor uses the serialization format that protoc uses with the + // google.protobuf.FieldDescriptorProto.default_value field. + Descriptor + + // GoTag uses the historical serialization format in Go struct field tags. + GoTag +) + +// Unmarshal deserializes the default string s according to the given kind k. +// When k is an enum, a list of enum value descriptors must be provided. +func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { + switch k { + case pref.BoolKind: + if f == GoTag { + switch s { + case "1": + return pref.ValueOfBool(true), nil, nil + case "0": + return pref.ValueOfBool(false), nil, nil + } + } else { + switch s { + case "true": + return pref.ValueOfBool(true), nil, nil + case "false": + return pref.ValueOfBool(false), nil, nil + } + } + case pref.EnumKind: + if f == GoTag { + // Go tags use the numeric form of the enum value. + if n, err := strconv.ParseInt(s, 10, 32); err == nil { + if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { + return pref.ValueOfEnum(ev.Number()), ev, nil + } + } + } else { + // Descriptor default_value use the enum identifier. + ev := evs.ByName(pref.Name(s)) + if ev != nil { + return pref.ValueOfEnum(ev.Number()), ev, nil + } + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if v, err := strconv.ParseInt(s, 10, 32); err == nil { + return pref.ValueOfInt32(int32(v)), nil, nil + } + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if v, err := strconv.ParseInt(s, 10, 64); err == nil { + return pref.ValueOfInt64(int64(v)), nil, nil + } + case pref.Uint32Kind, pref.Fixed32Kind: + if v, err := strconv.ParseUint(s, 10, 32); err == nil { + return pref.ValueOfUint32(uint32(v)), nil, nil + } + case pref.Uint64Kind, pref.Fixed64Kind: + if v, err := strconv.ParseUint(s, 10, 64); err == nil { + return pref.ValueOfUint64(uint64(v)), nil, nil + } + case pref.FloatKind, pref.DoubleKind: + var v float64 + var err error + switch s { + case "-inf": + v = math.Inf(-1) + case "inf": + v = math.Inf(+1) + case "nan": + v = math.NaN() + default: + v, err = strconv.ParseFloat(s, 64) + } + if err == nil { + if k == pref.FloatKind { + return pref.ValueOfFloat32(float32(v)), nil, nil + } else { + return pref.ValueOfFloat64(float64(v)), nil, nil + } + } + case pref.StringKind: + // String values are already unescaped and can be used as is. + return pref.ValueOfString(s), nil, nil + case pref.BytesKind: + if b, ok := unmarshalBytes(s); ok { + return pref.ValueOfBytes(b), nil, nil + } + } + return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) +} + +// Marshal serializes v as the default string according to the given kind k. +// When specifying the Descriptor format for an enum kind, the associated +// enum value descriptor must be provided. +func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { + switch k { + case pref.BoolKind: + if f == GoTag { + if v.Bool() { + return "1", nil + } else { + return "0", nil + } + } else { + if v.Bool() { + return "true", nil + } else { + return "false", nil + } + } + case pref.EnumKind: + if f == GoTag { + return strconv.FormatInt(int64(v.Enum()), 10), nil + } else { + return string(ev.Name()), nil + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + return strconv.FormatInt(v.Int(), 10), nil + case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: + return strconv.FormatUint(v.Uint(), 10), nil + case pref.FloatKind, pref.DoubleKind: + f := v.Float() + switch { + case math.IsInf(f, -1): + return "-inf", nil + case math.IsInf(f, +1): + return "inf", nil + case math.IsNaN(f): + return "nan", nil + default: + if k == pref.FloatKind { + return strconv.FormatFloat(f, 'g', -1, 32), nil + } else { + return strconv.FormatFloat(f, 'g', -1, 64), nil + } + } + case pref.StringKind: + // String values are serialized as is without any escaping. + return v.String(), nil + case pref.BytesKind: + if s, ok := marshalBytes(v.Bytes()); ok { + return s, nil + } + } + return "", errors.New("could not format value for %v: %v", k, v) +} + +// unmarshalBytes deserializes bytes by applying C unescaping. +func unmarshalBytes(s string) ([]byte, bool) { + // Bytes values use the same escaping as the text format, + // however they lack the surrounding double quotes. + v, err := ptext.UnmarshalString(`"` + s + `"`) + if err != nil { + return nil, false + } + return []byte(v), true +} + +// marshalBytes serializes bytes by using C escaping. +// To match the exact output of protoc, this is identical to the +// CEscape function in strutil.cc of the protoc source code. +func marshalBytes(b []byte) (string, bool) { + var s []byte + for _, c := range b { + switch c { + case '\n': + s = append(s, `\n`...) + case '\r': + s = append(s, `\r`...) + case '\t': + s = append(s, `\t`...) + case '"': + s = append(s, `\"`...) + case '\'': + s = append(s, `\'`...) + case '\\': + s = append(s, `\\`...) + default: + if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII { + s = append(s, c) + } else { + s = append(s, fmt.Sprintf(`\%03o`, c)...) + } + } + } + return string(s), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go new file mode 100644 index 000000000..c1866f3c1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -0,0 +1,241 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package messageset encodes and decodes the obsolete MessageSet wire format. +package messageset + +import ( + "math" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// The MessageSet wire format is equivalent to a message defined as follows, +// where each Item defines an extension field with a field number of 'type_id' +// and content of 'message'. MessageSet extensions must be non-repeated message +// fields. +// +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// } +// } +const ( + FieldItem = protowire.Number(1) + FieldTypeID = protowire.Number(2) + FieldMessage = protowire.Number(3) +) + +// ExtensionName is the field name for extensions of MessageSet. +// +// A valid MessageSet extension must be of the form: +// message MyMessage { +// extend proto2.bridge.MessageSet { +// optional MyMessage message_set_extension = 1234; +// } +// ... +// } +const ExtensionName = "message_set_extension" + +// IsMessageSet returns whether the message uses the MessageSet wire format. +func IsMessageSet(md pref.MessageDescriptor) bool { + xmd, ok := md.(interface{ IsMessageSet() bool }) + return ok && xmd.IsMessageSet() +} + +// IsMessageSetExtension reports this field properly extends a MessageSet. +func IsMessageSetExtension(fd pref.FieldDescriptor) bool { + switch { + case fd.Name() != ExtensionName: + return false + case !IsMessageSet(fd.ContainingMessage()): + return false + case fd.FullName().Parent() != fd.Message().FullName(): + return false + } + return true +} + +// SizeField returns the size of a MessageSet item field containing an extension +// with the given field number, not counting the contents of the message subfield. +func SizeField(num protowire.Number) int { + return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num)) +} + +// Unmarshal parses a MessageSet. +// +// It calls fn with the type ID and value of each item in the MessageSet. +// Unknown fields are discarded. +// +// If wantLen is true, the item values include the varint length prefix. +// This is ugly, but simplifies the fast-path decoder in internal/impl. +func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error { + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + if num != FieldItem || wtyp != protowire.StartGroupType { + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + continue + } + typeID, value, n, err := ConsumeFieldValue(b, wantLen) + if err != nil { + return err + } + b = b[n:] + if typeID == 0 { + continue + } + if err := fn(typeID, value); err != nil { + return err + } + } + return nil +} + +// ConsumeFieldValue parses b as a MessageSet item field value until and including +// the trailing end group marker. It assumes the start group tag has already been parsed. +// It returns the contents of the type_id and message subfields and the total +// item length. +// +// If wantLen is true, the returned message value includes the length prefix. +func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) { + ilen := len(b) + for { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + switch { + case num == FieldItem && wtyp == protowire.EndGroupType: + if wantLen && len(message) == 0 { + // The message field was missing, which should never happen. + // Be prepared for this case anyway. + message = protowire.AppendVarint(message, 0) + } + return typeid, message, ilen - len(b), nil + case num == FieldTypeID && wtyp == protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + if v < 1 || v > math.MaxInt32 { + return 0, nil, 0, errors.New("invalid type_id in message set") + } + typeid = protowire.Number(v) + case num == FieldMessage && wtyp == protowire.BytesType: + m, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + if message == nil { + if wantLen { + message = b[:n:n] + } else { + message = m[:len(m):len(m)] + } + } else { + // This case should never happen in practice, but handle it for + // correctness: The MessageSet item contains multiple message + // fields, which need to be merged. + // + // In the case where we're returning the length, this becomes + // quite inefficient since we need to strip the length off + // the existing data and reconstruct it with the combined length. + if wantLen { + _, nn := protowire.ConsumeVarint(message) + m0 := message[nn:] + message = nil + message = protowire.AppendVarint(message, uint64(len(m0)+len(m))) + message = append(message, m0...) + message = append(message, m...) + } else { + message = append(message, m...) + } + } + b = b[n:] + default: + // We have no place to put it, so we just ignore unknown fields. + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + } + } +} + +// AppendFieldStart appends the start of a MessageSet item field containing +// an extension with the given number. The caller must add the message +// subfield (including the tag). +func AppendFieldStart(b []byte, num protowire.Number) []byte { + b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType) + b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType) + b = protowire.AppendVarint(b, uint64(num)) + return b +} + +// AppendFieldEnd appends the trailing end group marker for a MessageSet item field. +func AppendFieldEnd(b []byte) []byte { + return protowire.AppendTag(b, FieldItem, protowire.EndGroupType) +} + +// SizeUnknown returns the size of an unknown fields section in MessageSet format. +// +// See AppendUnknown. +func SizeUnknown(unknown []byte) (size int) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return 0 + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return 0 + } + unknown = unknown[n:] + size += SizeField(num) + protowire.SizeTag(FieldMessage) + n + } + return size +} + +// AppendUnknown appends unknown fields to b in MessageSet format. +// +// For historic reasons, unresolved items in a MessageSet are stored in a +// message's unknown fields section in non-MessageSet format. That is, an +// unknown item with typeID T and value V appears in the unknown fields as +// a field with number T and value V. +// +// This function converts the unknown fields back into MessageSet form. +func AppendUnknown(b, unknown []byte) ([]byte, error) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return nil, errors.New("invalid data in message set unknown fields") + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return nil, errors.New("invalid data in message set unknown fields") + } + b = AppendFieldStart(b, num) + b = protowire.AppendTag(b, FieldMessage, protowire.BytesType) + b = append(b, unknown[:n]...) + b = AppendFieldEnd(b) + unknown = unknown[n:] + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go new file mode 100644 index 000000000..38f1931c6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -0,0 +1,207 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag marshals and unmarshals the legacy struct tags as generated +// by historical versions of protoc-gen-go. +package tag + +import ( + "reflect" + "strconv" + "strings" + + defval "google.golang.org/protobuf/internal/encoding/defval" + fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +var byteType = reflect.TypeOf(byte(0)) + +// Unmarshal decodes the tag into a prototype.Field. +// +// The goType is needed to determine the original protoreflect.Kind since the +// tag does not record sufficient information to determine that. +// The type is the underlying field type (e.g., a repeated field may be +// represented by []T, but the Go type passed in is just T). +// A list of enum value descriptors must be provided for enum fields. +// This does not populate the Enum or Message (except for weak message). +// +// This function is a best effort attempt; parsing errors are ignored. +func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { + f := new(fdesc.Field) + f.L0.ParentFile = fdesc.SurrogateProto2 + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + f.L0.FullName = pref.FullName(s[len("name="):]) + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + f.L1.Number = pref.FieldNumber(n) + case s == "opt": + f.L1.Cardinality = pref.Optional + case s == "req": + f.L1.Cardinality = pref.Required + case s == "rep": + f.L1.Cardinality = pref.Repeated + case s == "varint": + switch goType.Kind() { + case reflect.Bool: + f.L1.Kind = pref.BoolKind + case reflect.Int32: + f.L1.Kind = pref.Int32Kind + case reflect.Int64: + f.L1.Kind = pref.Int64Kind + case reflect.Uint32: + f.L1.Kind = pref.Uint32Kind + case reflect.Uint64: + f.L1.Kind = pref.Uint64Kind + } + case s == "zigzag32": + if goType.Kind() == reflect.Int32 { + f.L1.Kind = pref.Sint32Kind + } + case s == "zigzag64": + if goType.Kind() == reflect.Int64 { + f.L1.Kind = pref.Sint64Kind + } + case s == "fixed32": + switch goType.Kind() { + case reflect.Int32: + f.L1.Kind = pref.Sfixed32Kind + case reflect.Uint32: + f.L1.Kind = pref.Fixed32Kind + case reflect.Float32: + f.L1.Kind = pref.FloatKind + } + case s == "fixed64": + switch goType.Kind() { + case reflect.Int64: + f.L1.Kind = pref.Sfixed64Kind + case reflect.Uint64: + f.L1.Kind = pref.Fixed64Kind + case reflect.Float64: + f.L1.Kind = pref.DoubleKind + } + case s == "bytes": + switch { + case goType.Kind() == reflect.String: + f.L1.Kind = pref.StringKind + case goType.Kind() == reflect.Slice && goType.Elem() == byteType: + f.L1.Kind = pref.BytesKind + default: + f.L1.Kind = pref.MessageKind + } + case s == "group": + f.L1.Kind = pref.GroupKind + case strings.HasPrefix(s, "enum="): + f.L1.Kind = pref.EnumKind + case strings.HasPrefix(s, "json="): + jsonName := s[len("json="):] + if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { + f.L1.StringName.InitJSON(jsonName) + } + case s == "packed": + f.L1.HasPacked = true + f.L1.IsPacked = true + case strings.HasPrefix(s, "weak="): + f.L1.IsWeak = true + f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. + s, i = tag[len("def="):], len(tag) + v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) + f.L1.Default = fdesc.DefaultValue(v, ev) + case s == "proto3": + f.L0.ParentFile = fdesc.SurrogateProto3 + } + tag = strings.TrimPrefix(tag[i:], ",") + } + + // The generator uses the group message name instead of the field name. + // We obtain the real field name by lowercasing the group name. + if f.L1.Kind == pref.GroupKind { + f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) + } + return f +} + +// Marshal encodes the protoreflect.FieldDescriptor as a tag. +// +// The enumName must be provided if the kind is an enum. +// Historically, the formulation of the enum "name" was the proto package +// dot-concatenated with the generated Go identifier for the enum type. +// Depending on the context on how Marshal is called, there are different ways +// through which that information is determined. As such it is the caller's +// responsibility to provide a function to obtain that information. +func Marshal(fd pref.FieldDescriptor, enumName string) string { + var tag []string + switch fd.Kind() { + case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: + tag = append(tag, "varint") + case pref.Sint32Kind: + tag = append(tag, "zigzag32") + case pref.Sint64Kind: + tag = append(tag, "zigzag64") + case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: + tag = append(tag, "fixed32") + case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: + tag = append(tag, "fixed64") + case pref.StringKind, pref.BytesKind, pref.MessageKind: + tag = append(tag, "bytes") + case pref.GroupKind: + tag = append(tag, "group") + } + tag = append(tag, strconv.Itoa(int(fd.Number()))) + switch fd.Cardinality() { + case pref.Optional: + tag = append(tag, "opt") + case pref.Required: + tag = append(tag, "req") + case pref.Repeated: + tag = append(tag, "rep") + } + if fd.IsPacked() { + tag = append(tag, "packed") + } + name := string(fd.Name()) + if fd.Kind() == pref.GroupKind { + // The name of the FieldDescriptor for a group field is + // lowercased. To find the original capitalization, we + // look in the field's MessageType. + name = string(fd.Message().Name()) + } + tag = append(tag, "name="+name) + if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() { + // NOTE: The jsonName != name condition is suspect, but it preserve + // the exact same semantics from the previous generator. + tag = append(tag, "json="+jsonName) + } + if fd.IsWeak() { + tag = append(tag, "weak="+string(fd.Message().FullName())) + } + // The previous implementation does not tag extension fields as proto3, + // even when the field is defined in a proto3 file. Match that behavior + // for consistency. + if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { + tag = append(tag, "proto3") + } + if fd.Kind() == pref.EnumKind && enumName != "" { + tag = append(tag, "enum="+enumName) + } + if fd.ContainingOneof() != nil { + tag = append(tag, "oneof") + } + // This must appear last in the tag, since commas in strings aren't escaped. + if fd.HasDefault() { + def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag) + tag = append(tag, "def="+def) + } + return strings.Join(tag, ",") +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go new file mode 100644 index 000000000..eb10ea102 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -0,0 +1,665 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// Decoder is a token-based textproto decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing the byte characters for MessageOpen and + // ListOpen kinds. The top of stack represents the message or the list that + // the current token is nested in. An empty stack means the current token is + // at the top level message. The characters '{' and '<' both represent the + // MessageOpen kind. + openStack []byte + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +// Peek looks ahead and returns the next token and error without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext(d.lastToken.kind) + if err != nil { + return Token{}, err + } + + switch tok.kind { + case comma, semicolon: + tok, err = d.parseNext(tok.kind) + if err != nil { + return Token{}, err + } + } + d.lastToken = tok + return tok, nil +} + +const ( + mismatchedFmt = "mismatched close character %q" + unexpectedFmt = "unexpected character %q" +) + +// parseNext parses the next Token based on given last kind. +func (d *Decoder) parseNext(lastKind Kind) (Token, error) { + // Trim leading spaces. + d.consume(0) + isEOF := false + if len(d.in) == 0 { + isEOF = true + } + + switch lastKind { + case EOF: + return d.consumeToken(EOF, 0, 0), nil + + case bof: + // Start of top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case Name: + // Next token can be MessageOpen, ListOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + case '[': + d.pushOpenStack(ch) + return d.consumeToken(ListOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case Scalar: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch d.in[0] { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + _, closeCh := d.currentOpenKind() + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case MessageClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case ListOpen: + // Next token can be ListClose, MessageStart or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case ListClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + default: + // It is not possible to have this case. Let it panic below. + } + + case comma, semicolon: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case ListOpen: + if lastKind == semicolon { + // It is not be possible to have this case as logic here + // should not have produced a semicolon Token when inside a + // list. Let it panic below. + break + } + // Next token can be MessageOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + } + } + + line, column := d.Position(len(d.orig) - len(d.in)) + panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind)) +} + +var otherCloseChar = map[byte]byte{ + '}': '>', + '>': '}', +} + +// currentOpenKind indicates whether current position is inside a message, list +// or top-level message by returning MessageOpen, ListOpen or bof respectively. +// If the returned kind is either a MessageOpen or ListOpen, it also returns the +// corresponding closing character. +func (d *Decoder) currentOpenKind() (Kind, byte) { + if len(d.openStack) == 0 { + return bof, 0 + } + openCh := d.openStack[len(d.openStack)-1] + switch openCh { + case '{': + return MessageOpen, '}' + case '<': + return MessageOpen, '>' + case '[': + return ListOpen, ']' + } + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) +} + +func (d *Decoder) pushOpenStack(ch byte) { + d.openStack = append(d.openStack, ch) +} + +func (d *Decoder) popOpenStack() { + d.openStack = d.openStack[:len(d.openStack)-1] +} + +// parseFieldName parses field name and separator. +func (d *Decoder) parseFieldName() (tok Token, err error) { + defer func() { + if err == nil && d.tryConsumeChar(':') { + tok.attrs |= hasSeparator + } + }() + + // Extension or Any type URL. + if d.in[0] == '[' { + return d.parseTypeName() + } + + // Identifier. + if size := parseIdent(d.in, false); size > 0 { + return d.consumeToken(Name, size, uint8(IdentName)), nil + } + + // Field number. Identify if input is a valid number that is not negative + // and is decimal integer within 32-bit range. + if num := parseNumber(d.in); num.size > 0 { + if !num.neg && num.kind == numDec { + if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { + return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil + } + } + return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) + } + + return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) +} + +// parseTypeName parses Any type URL or extension field name. The name is +// enclosed in [ and ] characters. The C++ parser does not handle many legal URL +// strings. This implementation is more liberal and allows for the pattern +// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed +// in between [ ], '.', '/' and the sub names. +func (d *Decoder) parseTypeName() (Token, error) { + startPos := len(d.orig) - len(d.in) + // Use alias s to advance first in order to use d.in for error handling. + // Caller already checks for [ as first character. + s := consume(d.in[1:], 0) + if len(s) == 0 { + return Token{}, ErrUnexpectedEOF + } + + var name []byte + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + var closed bool + for len(s) > 0 && !closed { + switch { + case s[0] == ']': + s = s[1:] + closed = true + + case s[0] == '/', s[0] == '.': + if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)+1]) + } + name = append(name, s[0]) + s = s[1:] + s = consume(s, 0) + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + default: + return Token{}, d.newSyntaxError( + "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1]) + } + } + + if !closed { + return Token{}, ErrUnexpectedEOF + } + + // First character cannot be '.'. Last character cannot be '.' or '/'. + size := len(name) + if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)]) + } + + d.in = s + endPos := len(d.orig) - len(d.in) + d.consume(0) + + return Token{ + kind: Name, + attrs: uint8(TypeName), + pos: startPos, + raw: d.orig[startPos:endPos], + str: string(name), + }, nil +} + +func isTypeNameChar(b byte) bool { + return (b == '-' || b == '_' || + ('0' <= b && b <= '9') || + ('a' <= b && b <= 'z') || + ('A' <= b && b <= 'Z')) +} + +func isWhiteSpace(b byte) bool { + switch b { + case ' ', '\n', '\r', '\t': + return true + default: + return false + } +} + +// parseIdent parses an unquoted proto identifier and returns size. +// If allowNeg is true, it allows '-' to be the first character in the +// identifier. This is used when parsing literal values like -infinity, etc. +// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*` +func parseIdent(input []byte, allowNeg bool) int { + var size int + + s := input + if len(s) == 0 { + return 0 + } + + if allowNeg && s[0] == '-' { + s = s[1:] + size++ + if len(s) == 0 { + return 0 + } + } + + switch { + case s[0] == '_', + 'a' <= s[0] && s[0] <= 'z', + 'A' <= s[0] && s[0] <= 'Z': + s = s[1:] + size++ + default: + return 0 + } + + for len(s) > 0 && (s[0] == '_' || + 'a' <= s[0] && s[0] <= 'z' || + 'A' <= s[0] && s[0] <= 'Z' || + '0' <= s[0] && s[0] <= '9') { + s = s[1:] + size++ + } + + if len(s) > 0 && !isDelim(s[0]) { + return 0 + } + + return size +} + +// parseScalar parses for a string, literal or number value. +func (d *Decoder) parseScalar() (Token, error) { + if d.in[0] == '"' || d.in[0] == '\'' { + return d.parseStringValue() + } + + if tok, ok := d.parseLiteralValue(); ok { + return tok, nil + } + + if tok, ok := d.parseNumberValue(); ok { + return tok, nil + } + + return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) +} + +// parseLiteralValue parses a literal value. A literal value is used for +// bools, special floats and enums. This function simply identifies that the +// field value is a literal. +func (d *Decoder) parseLiteralValue() (Token, bool) { + size := parseIdent(d.in, true) + if size == 0 { + return Token{}, false + } + return d.consumeToken(Scalar, size, literalValue), true +} + +// consumeToken constructs a Token for given Kind from d.in and consumes given +// size-length from it. +func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { + // Important to compute raw and pos before consuming. + tok := Token{ + kind: kind, + attrs: attrs, + pos: len(d.orig) - len(d.in), + raw: d.in[:size], + } + d.consume(size) + return tok +} + +// newSyntaxError returns a syntax error with line and column information for +// current position. +func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { + e := errors.New(f, x...) + line, column := d.Position(len(d.orig) - len(d.in)) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +func (d *Decoder) tryConsumeChar(c byte) bool { + if len(d.in) > 0 && d.in[0] == c { + d.consume(1) + return true + } + return false +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func (d *Decoder) consume(n int) { + d.in = consume(d.in, n) + return +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func consume(b []byte, n int) []byte { + b = b[n:] + for len(b) > 0 { + switch b[0] { + case ' ', '\n', '\r', '\t': + b = b[1:] + case '#': + if i := bytes.IndexByte(b, '\n'); i >= 0 { + b = b[i+len("\n"):] + } else { + b = nil + } + default: + return b + } + } + return b +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) + +// isDelim returns true if given byte is a delimiter character. +func isDelim(c byte) bool { + return !(c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go new file mode 100644 index 000000000..f2d90b789 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -0,0 +1,190 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +// parseNumberValue parses a number from the input and returns a Token object. +func (d *Decoder) parseNumberValue() (Token, bool) { + in := d.in + num := parseNumber(in) + if num.size == 0 { + return Token{}, false + } + numAttrs := num.kind + if num.neg { + numAttrs |= isNegative + } + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { + strSize = last + } + tok := Token{ + kind: Scalar, + attrs: numberValue, + pos: len(d.orig) - len(d.in), + raw: d.in[:num.size], + str: string(d.in[:strSize]), + numAttrs: numAttrs, + } + d.consume(num.size) + return tok, true +} + +const ( + numDec uint8 = (1 << iota) / 2 + numHex + numOct + numFloat +) + +// number is the result of parsing out a valid number from parseNumber. It +// contains data for doing float or integer conversion via the strconv package +// in conjunction with the input bytes. +type number struct { + kind uint8 + neg bool + size int +} + +// parseNumber constructs a number object from given input. It allows for the +// following patterns: +// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// It also returns the number of parsed bytes for the given number, 0 if it is +// not a number. +func parseNumber(input []byte) number { + kind := numDec + var size int + var neg bool + + s := input + if len(s) == 0 { + return number{} + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + size++ + if len(s) == 0 { + return number{} + } + } + + // C++ allows for whitespace and comments in between the negative sign and + // the rest of the number. This logic currently does not but is consistent + // with v1. + + switch { + case s[0] == '0': + if len(s) > 1 { + switch { + case s[1] == 'x' || s[1] == 'X': + // Parse as hex number. + kind = numHex + n := 2 + s = s[2:] + for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') || + ('a' <= s[0] && s[0] <= 'f') || + ('A' <= s[0] && s[0] <= 'F')) { + s = s[1:] + n++ + } + if n == 2 { + return number{} + } + size += n + + case '0' <= s[1] && s[1] <= '7': + // Parse as octal number. + kind = numOct + n := 2 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '7' { + s = s[1:] + n++ + } + size += n + } + + if kind&(numHex|numOct) > 0 { + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + return number{kind: kind, neg: neg, size: size} + } + } + s = s[1:] + size++ + + case '1' <= s[0] && s[0] <= '9': + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + + case s[0] == '.': + // Set kind to numFloat to signify the intent to parse as float. And + // that it needs to have other digits after '.'. + kind = numFloat + + default: + return number{} + } + + // . followed by 0 or more digits. + if len(s) > 0 && s[0] == '.' { + n := 1 + s = s[1:] + // If decimal point was before any digits, it should be followed by + // other digits. + if len(s) == 0 && kind == numFloat { + return number{} + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + kind = numFloat + } + + // e or E followed by an optional - or + and 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + kind = numFloat + s = s[1:] + n := 1 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return number{} + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + } + + // Optional suffix f or F for floats. + if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') { + kind = numFloat + s = s[1:] + size++ + } + + // Check that next byte is a delimiter or it is at the end. + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + + return number{kind: kind, neg: neg, size: size} +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go new file mode 100644 index 000000000..d4d349023 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go @@ -0,0 +1,161 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +// parseStringValue parses string field token. +// This differs from parseString since the text format allows +// multiple back-to-back string literals where they are semantically treated +// as a single large string with all values concatenated. +// +// E.g., `"foo" "bar" "baz"` => "foobarbaz" +func (d *Decoder) parseStringValue() (Token, error) { + // Note that the ending quote is sufficient to unambiguously mark the end + // of a string. Thus, the text grammar does not require intervening + // whitespace or control characters in-between strings. + // Thus, the following is valid: + // `"foo"'bar'"baz"` => "foobarbaz" + in0 := d.in + var ss []string + for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') { + s, err := d.parseString() + if err != nil { + return Token{}, err + } + ss = append(ss, s) + } + // d.in already points to the end of the value at this point. + return Token{ + kind: Scalar, + attrs: stringValue, + pos: len(d.orig) - len(in0), + raw: in0[:len(in0)-len(d.in)], + str: strings.Join(ss, ""), + }, nil +} + +// parseString parses a string value enclosed in " or '. +func (d *Decoder) parseString() (string, error) { + in := d.in + if len(in) == 0 { + return "", ErrUnexpectedEOF + } + quote := in[0] + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", d.newSyntaxError("invalid UTF-8 detected") + case r == 0 || r == '\n': + return "", d.newSyntaxError("invalid character %q in string", r) + case r == rune(quote): + in = in[1:] + d.consume(len(d.in) - len(in)) + return string(out), nil + case r == '\\': + if len(in) < 2 { + return "", ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\'', '\\', '?': + in, out = in[2:], append(out, r) + case 'a': + in, out = in[2:], append(out, '\a') + case 'b': + in, out = in[2:], append(out, '\b') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'v': + in, out = in[2:], append(out, '\v') + case 'f': + in, out = in[2:], append(out, '\f') + case '0', '1', '2', '3', '4', '5', '6', '7': + // One, two, or three octal characters. + n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8) + if err != nil { + return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n]) + } + in, out = in[1+n:], append(out, byte(v)) + case 'x': + // One or two hexadecimal characters. + n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF")) + if n > 2 { + n = 2 + } + v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8) + if err != nil { + return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n]) + } + in, out = in[2+n:], append(out, byte(v)) + case 'u', 'U': + // Four or eight hexadecimal characters + n := 6 + if r == 'U' { + n = 10 + } + if len(in) < n { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:n]), 16, 32) + if utf8.MaxRune < v || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n]) + } + in = in[n:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", d.newSyntaxError("invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", ErrUnexpectedEOF +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } + +// UnmarshalString returns an unescaped string given a textproto string value. +// String value needs to contain single or double quotes. This is only used by +// internal/encoding/defval package for unmarshaling bytes. +func UnmarshalString(s string) (string, error) { + d := NewDecoder([]byte(s)) + return d.parseString() +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go new file mode 100644 index 000000000..83d2b0d5a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go @@ -0,0 +1,373 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/flags" +) + +// Kind represents a token kind expressible in the textproto format. +type Kind uint8 + +// Kind values. +const ( + Invalid Kind = iota + EOF + Name // Name indicates the field name. + Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true. + MessageOpen + MessageClose + ListOpen + ListClose + + // comma and semi-colon are only for parsing in between values and should not be exposed. + comma + semicolon + + // bof indicates beginning of file, which is the default token + // kind at the beginning of parsing. + bof = Invalid +) + +func (t Kind) String() string { + switch t { + case Invalid: + return "" + case EOF: + return "eof" + case Scalar: + return "scalar" + case Name: + return "name" + case MessageOpen: + return "{" + case MessageClose: + return "}" + case ListOpen: + return "[" + case ListClose: + return "]" + case comma: + return "," + case semicolon: + return ";" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// NameKind represents different types of field names. +type NameKind uint8 + +// NameKind values. +const ( + IdentName NameKind = iota + 1 + TypeName + FieldNumber +) + +func (t NameKind) String() string { + switch t { + case IdentName: + return "IdentName" + case TypeName: + return "TypeName" + case FieldNumber: + return "FieldNumber" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// Bit mask in Token.attrs to indicate if a Name token is followed by the +// separator char ':'. The field name separator char is optional for message +// field or repeated message field, but required for all other types. Decoder +// simply indicates whether a Name token is followed by separator or not. It is +// up to the prototext package to validate. +const hasSeparator = 1 << 7 + +// Scalar value types. +const ( + numberValue = iota + 1 + stringValue + literalValue +) + +// Bit mask in Token.numAttrs to indicate that the number is a negative. +const isNegative = 1 << 7 + +// Token provides a parsed token kind and value. Values are provided by the +// different accessor methods. +type Token struct { + // Kind of the Token object. + kind Kind + // attrs contains metadata for the following Kinds: + // Name: hasSeparator bit and one of NameKind. + // Scalar: one of numberValue, stringValue, literalValue. + attrs uint8 + // numAttrs contains metadata for numberValue: + // - highest bit is whether negative or positive. + // - lower bits indicate one of numDec, numHex, numOct, numFloat. + numAttrs uint8 + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // str contains parsed string for the following: + // - stringValue of Scalar kind + // - numberValue of Scalar kind + // - TypeName of Name kind + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// NameKind returns IdentName, TypeName or FieldNumber. +// It panics if type is not Name. +func (t Token) NameKind() NameKind { + if t.kind == Name { + return NameKind(t.attrs &^ hasSeparator) + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// HasSeparator returns true if the field name is followed by the separator char +// ':', else false. It panics if type is not Name. +func (t Token) HasSeparator() bool { + if t.kind == Name { + return t.attrs&hasSeparator != 0 + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// IdentName returns the value for IdentName type. +func (t Token) IdentName() string { + if t.kind == Name && t.attrs&uint8(IdentName) != 0 { + return string(t.raw) + } + panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// TypeName returns the value for TypeName type. +func (t Token) TypeName() string { + if t.kind == Name && t.attrs&uint8(TypeName) != 0 { + return t.str + } + panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// FieldNumber returns the value for FieldNumber type. It returns a +// non-negative int32 value. Caller will still need to validate for the correct +// field number range. +func (t Token) FieldNumber() int32 { + if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 { + panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) + } + // Following should not return an error as it had already been called right + // before this Token was constructed. + num, _ := strconv.ParseInt(string(t.raw), 10, 32) + return int32(num) +} + +// String returns the string value for a Scalar type. +func (t Token) String() (string, bool) { + if t.kind != Scalar || t.attrs != stringValue { + return "", false + } + return t.str, true +} + +// Enum returns the literal value for a Scalar type for use as enum literals. +func (t Token) Enum() (string, bool) { + if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') { + return "", false + } + return string(t.raw), true +} + +// Bool returns the bool value for a Scalar type. +func (t Token) Bool() (bool, bool) { + if t.kind != Scalar { + return false, false + } + switch t.attrs { + case literalValue: + if b, ok := boolLits[string(t.raw)]; ok { + return b, true + } + case numberValue: + // Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01, + // 0x1, etc. + n, err := strconv.ParseUint(t.str, 0, 64) + if err == nil { + switch n { + case 0: + return false, true + case 1: + return true, true + } + } + } + return false, false +} + +// These exact boolean literals are the ones supported in C++. +var boolLits = map[string]bool{ + "t": true, + "true": true, + "True": true, + "f": false, + "false": false, + "False": false, +} + +// Uint64 returns the uint64 value for a Scalar type. +func (t Token) Uint64() (uint64, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 64) + if err != nil { + return 0, false + } + return n, true +} + +// Uint32 returns the uint32 value for a Scalar type. +func (t Token) Uint32() (uint32, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 32) + if err != nil { + return 0, false + } + return uint32(n), true +} + +// Int64 returns the int64 value for a Scalar type. +func (t Token) Int64() (int64, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 64); err == nil { + return n, true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 64); err == nil { + return int64(n), true + } + } + return 0, false +} + +// Int32 returns the int32 value for a Scalar type. +func (t Token) Int32() (int32, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 32); err == nil { + return int32(n), true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 32); err == nil { + return int32(n), true + } + } + return 0, false +} + +// Float64 returns the float64 value for a Scalar type. +func (t Token) Float64() (float64, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return f, true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + return n, true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return n, true + } + } + return 0, false +} + +// Float32 returns the float32 value for a Scalar type. +func (t Token) Float32() (float32, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return float32(f), true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + // Overflows are treated as (-)infinity. + return float32(n), true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return float32(n), true + } + } + return 0, false +} + +// These are the supported float literals which C++ permits case-insensitive +// variants of these. +var floatLits = map[string]float64{ + "nan": math.NaN(), + "inf": math.Inf(1), + "infinity": math.Inf(1), + "-inf": math.Inf(-1), + "-infinity": math.Inf(-1), +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.attrs == y.attrs && + x.numAttrs == y.numAttrs && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go new file mode 100644 index 000000000..0ce8d6fb8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package text implements the text format for protocol buffers. +// This package has no semantic understanding for protocol buffers and is only +// a parser and composer for the format. +// +// There is no formal specification for the protobuf text format, as such the +// C++ implementation (see google::protobuf::TextFormat) is the reference +// implementation of the text format. +// +// This package is neither a superset nor a subset of the C++ implementation. +// This implementation permits a more liberal grammar in some cases to be +// backwards compatible with the historical Go implementation. +// Future parsings unique to Go should not be added. +// Some grammars allowed by the C++ implementation are deliberately +// not implemented here because they are considered a bug by the protobuf team +// and should not be replicated. +// +// The Go implementation should implement a sufficient amount of the C++ +// grammar such that the default text serialization by C++ can be parsed by Go. +// However, just because the C++ parser accepts some input does not mean that +// the Go implementation should as well. +// +// The text format is almost a superset of JSON except: +// * message keys are not quoted strings, but identifiers +// * the top-level value must be a message without the delimiters +package text diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go new file mode 100644 index 000000000..da289ccce --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -0,0 +1,270 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// encType represents an encoding type. +type encType uint8 + +const ( + _ encType = (1 << iota) / 2 + name + scalar + messageOpen + messageClose +) + +// Encoder provides methods to write out textproto constructs and values. The user is +// responsible for producing valid sequences of constructs and values. +type Encoder struct { + encoderState + + indent string + delims [2]byte + outputASCII bool +} + +type encoderState struct { + lastType encType + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry in a List or Message +// to be preceded by the indent and trailed by a newline. +// +// If delims is not the zero value, it controls the delimiter characters used +// for messages (e.g., "{}" vs "<>"). +// +// If outputASCII is true, strings will be serialized in such a way that +// multi-byte UTF-8 sequences are escaped. This property ensures that the +// overall output is ASCII (as opposed to UTF-8). +func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{} + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space and tab characters") + } + e.indent = indent + } + switch delims { + case [2]byte{0, 0}: + e.delims = [2]byte{'{', '}'} + case [2]byte{'{', '}'}, [2]byte{'<', '>'}: + e.delims = delims + default: + return nil, errors.New("delimiters may only be \"{}\" or \"<>\"") + } + e.outputASCII = outputASCII + + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// StartMessage writes out the '{' or '<' symbol. +func (e *Encoder) StartMessage() { + e.prepareNext(messageOpen) + e.out = append(e.out, e.delims[0]) +} + +// EndMessage writes out the '}' or '>' symbol. +func (e *Encoder) EndMessage() { + e.prepareNext(messageClose) + e.out = append(e.out, e.delims[1]) +} + +// WriteName writes out the field name and the separator ':'. +func (e *Encoder) WriteName(s string) { + e.prepareNext(name) + e.out = append(e.out, s...) + e.out = append(e.out, ':') +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + if b { + e.WriteLiteral("true") + } else { + e.WriteLiteral("false") + } +} + +// WriteString writes out the given string value. +func (e *Encoder) WriteString(s string) { + e.prepareNext(scalar) + e.out = appendString(e.out, s, e.outputASCII) +} + +func appendString(out []byte, in string, outputASCII bool) []byte { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + // We do not report invalid UTF-8 because strings in the text format + // are used to represent both the proto string and bytes type. + r = rune(in[0]) + fallthrough + case r < ' ' || r == '"' || r == '\\' || r == 0x7f: + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'x') + out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f): + out = append(out, '\\') + if r <= math.MaxUint16 { + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } else { + out = append(out, 'U') + out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i := 0; i < len(s); i++ { + if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float value for given bitSize. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, "nan"...) + case math.IsInf(n, +1): + return append(out, "inf"...) + case math.IsInf(n, -1): + return append(out, "-inf"...) + default: + return strconv.AppendFloat(out, n, 'g', -1, bitSize) + } +} + +// WriteInt writes out the given signed integer value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatInt(n, 10)...) +} + +// WriteUint writes out the given unsigned integer value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatUint(n, 10)...) +} + +// WriteLiteral writes out the given string as a literal value without quotes. +// This is used for writing enum literal strings. +func (e *Encoder) WriteLiteral(s string) { + e.prepareNext(scalar) + e.out = append(e.out, s...) +} + +// prepareNext adds possible space and indentation for the next value based +// on last encType and indent option. It also updates e.lastType to next. +func (e *Encoder) prepareNext(next encType) { + defer func() { + e.lastType = next + }() + + // Single line. + if len(e.indent) == 0 { + // Add space after each field before the next one. + if e.lastType&(scalar|messageClose) != 0 && next == name { + e.out = append(e.out, ' ') + // Add a random extra space to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + // Multi-line. + switch { + case e.lastType == name: + e.out = append(e.out, ' ') + // Add a random extra space after name: to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + + case e.lastType == messageOpen && next != messageClose: + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + + case e.lastType&(scalar|messageClose) != 0: + if next == messageClose { + e.indents = e.indents[:len(e.indents)-len(e.indent)] + } + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } +} + +// Snapshot returns the current snapshot for use in Reset. +func (e *Encoder) Snapshot() encoderState { + return e.encoderState +} + +// Reset resets the Encoder to the given encoderState from a Snapshot. +func (e *Encoder) Reset(es encoderState) { + e.encoderState = es +} + +// AppendString appends the escaped form of the input string to b. +func AppendString(b []byte, s string) []byte { + return appendString(b, s, false) +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go new file mode 100644 index 000000000..20c17b35e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors implements functions to manipulate errors. +package errors + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/internal/detrand" +) + +// Error is a sentinel matching all errors produced by this package. +var Error = errors.New("protobuf error") + +// New formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func New(f string, x ...interface{}) error { + return &prefixError{s: format(f, x...)} +} + +type prefixError struct{ s string } + +var prefix = func() string { + // Deliberately introduce instability into the error message string to + // discourage users from performing error string comparisons. + if detrand.Bool() { + return "proto: " // use non-breaking spaces (U+00a0) + } else { + return "proto: " // use regular spaces (U+0020) + } +}() + +func (e *prefixError) Error() string { + return prefix + e.s +} + +func (e *prefixError) Unwrap() error { + return Error +} + +// Wrap returns an error that has a "proto" prefix, the formatted string described +// by the format specifier and arguments, and a suffix of err. The error wraps err. +func Wrap(err error, f string, x ...interface{}) error { + return &wrapError{ + s: format(f, x...), + err: err, + } +} + +type wrapError struct { + s string + err error +} + +func (e *wrapError) Error() string { + return format("%v%v: %v", prefix, e.s, e.err) +} + +func (e *wrapError) Unwrap() error { + return e.err +} + +func (e *wrapError) Is(target error) bool { + return target == Error +} + +func format(f string, x ...interface{}) string { + // avoid "proto: " prefix when chaining + for i := 0; i < len(x); i++ { + switch e := x[i].(type) { + case *prefixError: + x[i] = e.s + case *wrapError: + x[i] = format("%v: %v", e.s, e.err) + } + } + return fmt.Sprintf(f, x...) +} + +func InvalidUTF8(name string) error { + return New("field %v contains invalid UTF-8", name) +} + +func RequiredNotSet(name string) error { + return New("required field %v not set", name) +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go new file mode 100644 index 000000000..f90e909b3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -0,0 +1,39 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package errors + +import "reflect" + +// Is is a copy of Go 1.13's errors.Is for use with older Go versions. +func Is(err, target error) bool { + if target == nil { + return err == target + } + + isComparable := reflect.TypeOf(target).Comparable() + for { + if isComparable && err == target { + return true + } + if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { + return true + } + if err = unwrap(err); err == nil { + return false + } + } +} + +func unwrap(err error) error { + u, ok := err.(interface { + Unwrap() error + }) + if !ok { + return nil + } + return u.Unwrap() +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go new file mode 100644 index 000000000..dc05f4191 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package errors + +import "errors" + +// Is is errors.Is. +func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go new file mode 100644 index 000000000..b293b6947 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -0,0 +1,158 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filedesc provides functionality for constructing descriptors. +// +// The types in this package implement interfaces in the protoreflect package +// related to protobuf descripriptors. +package filedesc + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder construct a protoreflect.FileDescriptor from the raw descriptor. +type Builder struct { + // GoPackagePath is the Go package path that is invoking this builder. + GoPackagePath string + + // RawDescriptor is the wire-encoded bytes of FileDescriptorProto + // and must be populated. + RawDescriptor []byte + + // NumEnums is the total number of enums declared in the file. + NumEnums int32 + // NumMessages is the total number of messages declared in the file. + // It includes the implicit message declarations for map entries. + NumMessages int32 + // NumExtensions is the total number of extensions declared in the file. + NumExtensions int32 + // NumServices is the total number of services declared in the file. + NumServices int32 + + // TypeResolver resolves extension field types for descriptor options. + // If nil, it uses protoregistry.GlobalTypes. + TypeResolver interface { + preg.ExtensionTypeResolver + } + + // FileRegistry is use to lookup file, enum, and message dependencies. + // Once constructed, the file descriptor is registered here. + // If nil, it uses protoregistry.GlobalFiles. + FileRegistry interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(pref.FullName) (pref.Descriptor, error) + RegisterFile(pref.FileDescriptor) error + } +} + +// resolverByIndex is an interface Builder.FileRegistry may implement. +// If so, it permits looking up an enum or message dependency based on the +// sub-list and element index into filetype.Builder.DependencyIndexes. +type resolverByIndex interface { + FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor + FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor +} + +// Indexes of each sub-list in filetype.Builder.DependencyIndexes. +const ( + listFieldDeps int32 = iota + listExtTargets + listExtDeps + listMethInDeps + listMethOutDeps +) + +// Out is the output of the Builder. +type Out struct { + File pref.FileDescriptor + + // Enums is all enum descriptors in "flattened ordering". + Enums []Enum + // Messages is all message descriptors in "flattened ordering". + // It includes the implicit message declarations for map entries. + Messages []Message + // Extensions is all extension descriptors in "flattened ordering". + Extensions []Extension + // Service is all service descriptors in "flattened ordering". + Services []Service +} + +// Build constructs a FileDescriptor given the parameters set in Builder. +// It assumes that the inputs are well-formed and panics if any inconsistencies +// are encountered. +// +// If NumEnums+NumMessages+NumExtensions+NumServices is zero, +// then Build automatically derives them from the raw descriptor. +func (db Builder) Build() (out Out) { + // Populate the counts if uninitialized. + if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 { + db.unmarshalCounts(db.RawDescriptor, true) + } + + // Initialize resolvers and registries if unpopulated. + if db.TypeResolver == nil { + db.TypeResolver = preg.GlobalTypes + } + if db.FileRegistry == nil { + db.FileRegistry = preg.GlobalFiles + } + + fd := newRawFile(db) + out.File = fd + out.Enums = fd.allEnums + out.Messages = fd.allMessages + out.Extensions = fd.allExtensions + out.Services = fd.allServices + + if err := db.FileRegistry.RegisterFile(fd); err != nil { + panic(err) + } + return out +} + +// unmarshalCounts counts the number of enum, message, extension, and service +// declarations in the raw message, which is either a FileDescriptorProto +// or a MessageDescriptorProto depending on whether isFile is set. +func (db *Builder) unmarshalCounts(b []byte, isFile bool) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + if isFile { + switch num { + case genid.FileDescriptorProto_EnumType_field_number: + db.NumEnums++ + case genid.FileDescriptorProto_MessageType_field_number: + db.unmarshalCounts(v, false) + db.NumMessages++ + case genid.FileDescriptorProto_Extension_field_number: + db.NumExtensions++ + case genid.FileDescriptorProto_Service_field_number: + db.NumServices++ + } + } else { + switch num { + case genid.DescriptorProto_EnumType_field_number: + db.NumEnums++ + case genid.DescriptorProto_NestedType_field_number: + db.unmarshalCounts(v, false) + db.NumMessages++ + case genid.DescriptorProto_Extension_field_number: + db.NumExtensions++ + } + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go new file mode 100644 index 000000000..98ab142ae --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -0,0 +1,631 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "bytes" + "fmt" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// The types in this file may have a suffix: +// • L0: Contains fields common to all descriptors (except File) and +// must be initialized up front. +// • L1: Contains fields specific to a descriptor and +// must be initialized up front. +// • L2: Contains fields that are lazily initialized when constructing +// from the raw file descriptor. When constructing as a literal, the L2 +// fields must be initialized up front. +// +// The types are exported so that packages like reflect/protodesc can +// directly construct descriptors. + +type ( + File struct { + fileRaw + L1 FileL1 + + once uint32 // atomically set if L2 is valid + mu sync.Mutex // protects L2 + L2 *FileL2 + } + FileL1 struct { + Syntax pref.Syntax + Path string + Package pref.FullName + + Enums Enums + Messages Messages + Extensions Extensions + Services Services + } + FileL2 struct { + Options func() pref.ProtoMessage + Imports FileImports + Locations SourceLocations + } +) + +func (fd *File) ParentFile() pref.FileDescriptor { return fd } +func (fd *File) Parent() pref.Descriptor { return nil } +func (fd *File) Index() int { return 0 } +func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } +func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() pref.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Options() pref.ProtoMessage { + if f := fd.lazyInit().Options; f != nil { + return f() + } + return descopts.File +} +func (fd *File) Path() string { return fd.L1.Path } +func (fd *File) Package() pref.FullName { return fd.L1.Package } +func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } +func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } +func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } +func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } +func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } +func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } +func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *File) ProtoType(pref.FileDescriptor) {} +func (fd *File) ProtoInternal(pragma.DoNotImplement) {} + +func (fd *File) lazyInit() *FileL2 { + if atomic.LoadUint32(&fd.once) == 0 { + fd.lazyInitOnce() + } + return fd.L2 +} + +func (fd *File) lazyInitOnce() { + fd.mu.Lock() + if fd.L2 == nil { + fd.lazyRawInit() // recursively initializes all L2 structures + } + atomic.StoreUint32(&fd.once, 1) + fd.mu.Unlock() +} + +// GoPackagePath is a pseudo-internal API for determining the Go package path +// that this file descriptor is declared in. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *File) GoPackagePath() string { + return fd.builder.GoPackagePath +} + +type ( + Enum struct { + Base + L1 EnumL1 + L2 *EnumL2 // protected by fileDesc.once + } + EnumL1 struct { + eagerValues bool // controls whether EnumL2.Values is already populated + } + EnumL2 struct { + Options func() pref.ProtoMessage + Values EnumValues + ReservedNames Names + ReservedRanges EnumRanges + } + + EnumValue struct { + Base + L1 EnumValueL1 + } + EnumValueL1 struct { + Options func() pref.ProtoMessage + Number pref.EnumNumber + } +) + +func (ed *Enum) Options() pref.ProtoMessage { + if f := ed.lazyInit().Options; f != nil { + return f() + } + return descopts.Enum +} +func (ed *Enum) Values() pref.EnumValueDescriptors { + if ed.L1.eagerValues { + return &ed.L2.Values + } + return &ed.lazyInit().Values +} +func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } +func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } +func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *Enum) ProtoType(pref.EnumDescriptor) {} +func (ed *Enum) lazyInit() *EnumL2 { + ed.L0.ParentFile.lazyInit() // implicitly initializes L2 + return ed.L2 +} + +func (ed *EnumValue) Options() pref.ProtoMessage { + if f := ed.L1.Options; f != nil { + return f() + } + return descopts.EnumValue +} +func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } +func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} + +type ( + Message struct { + Base + L1 MessageL1 + L2 *MessageL2 // protected by fileDesc.once + } + MessageL1 struct { + Enums Enums + Messages Messages + Extensions Extensions + IsMapEntry bool // promoted from google.protobuf.MessageOptions + IsMessageSet bool // promoted from google.protobuf.MessageOptions + } + MessageL2 struct { + Options func() pref.ProtoMessage + Fields Fields + Oneofs Oneofs + ReservedNames Names + ReservedRanges FieldRanges + RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality + ExtensionRanges FieldRanges + ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges + } + + Field struct { + Base + L1 FieldL1 + } + FieldL1 struct { + Options func() pref.ProtoMessage + Number pref.FieldNumber + Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers + Kind pref.Kind + StringName stringName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsWeak bool // promoted from google.protobuf.FieldOptions + HasPacked bool // promoted from google.protobuf.FieldOptions + IsPacked bool // promoted from google.protobuf.FieldOptions + HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions + EnforceUTF8 bool // promoted from google.protobuf.FieldOptions + Default defaultValue + ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum pref.EnumDescriptor + Message pref.MessageDescriptor + } + + Oneof struct { + Base + L1 OneofL1 + } + OneofL1 struct { + Options func() pref.ProtoMessage + Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + } +) + +func (md *Message) Options() pref.ProtoMessage { + if f := md.lazyInit().Options; f != nil { + return f() + } + return descopts.Message +} +func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } +func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } +func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } +func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } +func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } +func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } +func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { + if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { + return f() + } + return descopts.ExtensionRange +} +func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } +func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } +func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } +func (md *Message) ProtoType(pref.MessageDescriptor) {} +func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Message) lazyInit() *MessageL2 { + md.L0.ParentFile.lazyInit() // implicitly initializes L2 + return md.L2 +} + +// IsMessageSet is a pseudo-internal API for checking whether a message +// should serialize in the proto1 message format. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (md *Message) IsMessageSet() bool { + return md.L1.IsMessageSet +} + +func (fd *Field) Options() pref.ProtoMessage { + if f := fd.L1.Options; f != nil { + return f() + } + return descopts.Field +} +func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } +func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } +func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) HasPresence() bool { + return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) +} +func (fd *Field) HasOptionalKeyword() bool { + return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional +} +func (fd *Field) IsPacked() bool { + if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { + switch fd.L1.Kind { + case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: + default: + return true + } + } + return fd.L1.IsPacked +} +func (fd *Field) IsExtension() bool { return false } +func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } +func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } +func (fd *Field) MapKey() pref.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) +} +func (fd *Field) MapValue() pref.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) +} +func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } +func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } +func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } +func (fd *Field) ContainingMessage() pref.MessageDescriptor { + return fd.L0.Parent.(pref.MessageDescriptor) +} +func (fd *Field) Enum() pref.EnumDescriptor { + return fd.L1.Enum +} +func (fd *Field) Message() pref.MessageDescriptor { + if fd.L1.IsWeak { + if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { + return d.(pref.MessageDescriptor) + } + } + return fd.L1.Message +} +func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *Field) ProtoType(pref.FieldDescriptor) {} + +// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 +// validation for the string field. This exists for Google-internal use only +// since proto3 did not enforce UTF-8 validity prior to the open-source release. +// If this method does not exist, the default is to enforce valid UTF-8. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *Field) EnforceUTF8() bool { + if fd.L1.HasEnforceUTF8 { + return fd.L1.EnforceUTF8 + } + return fd.L0.ParentFile.L1.Syntax == pref.Proto3 +} + +func (od *Oneof) IsSynthetic() bool { + return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() +} +func (od *Oneof) Options() pref.ProtoMessage { + if f := od.L1.Options; f != nil { + return f() + } + return descopts.Oneof +} +func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } +func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +func (od *Oneof) ProtoType(pref.OneofDescriptor) {} + +type ( + Extension struct { + Base + L1 ExtensionL1 + L2 *ExtensionL2 // protected by fileDesc.once + } + ExtensionL1 struct { + Number pref.FieldNumber + Extendee pref.MessageDescriptor + Cardinality pref.Cardinality + Kind pref.Kind + } + ExtensionL2 struct { + Options func() pref.ProtoMessage + StringName stringName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsPacked bool // promoted from google.protobuf.FieldOptions + Default defaultValue + Enum pref.EnumDescriptor + Message pref.MessageDescriptor + } +) + +func (xd *Extension) Options() pref.ProtoMessage { + if f := xd.lazyInit().Options; f != nil { + return f() + } + return descopts.Field +} +func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } +func (xd *Extension) HasOptionalKeyword() bool { + return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional +} +func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsExtension() bool { return true } +func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } +func (xd *Extension) IsMap() bool { return false } +func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } +func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } +func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } +func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } +func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } +func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } +func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } +func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } +func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +func (xd *Extension) ProtoType(pref.FieldDescriptor) {} +func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} +func (xd *Extension) lazyInit() *ExtensionL2 { + xd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return xd.L2 +} + +type ( + Service struct { + Base + L1 ServiceL1 + L2 *ServiceL2 // protected by fileDesc.once + } + ServiceL1 struct{} + ServiceL2 struct { + Options func() pref.ProtoMessage + Methods Methods + } + + Method struct { + Base + L1 MethodL1 + } + MethodL1 struct { + Options func() pref.ProtoMessage + Input pref.MessageDescriptor + Output pref.MessageDescriptor + IsStreamingClient bool + IsStreamingServer bool + } +) + +func (sd *Service) Options() pref.ProtoMessage { + if f := sd.lazyInit().Options; f != nil { + return f() + } + return descopts.Service +} +func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } +func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +func (sd *Service) ProtoType(pref.ServiceDescriptor) {} +func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} +func (sd *Service) lazyInit() *ServiceL2 { + sd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return sd.L2 +} + +func (md *Method) Options() pref.ProtoMessage { + if f := md.L1.Options; f != nil { + return f() + } + return descopts.Method +} +func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } +func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } +func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Method) ProtoType(pref.MethodDescriptor) {} +func (md *Method) ProtoInternal(pragma.DoNotImplement) {} + +// Surrogate files are can be used to create standalone descriptors +// where the syntax is only information derived from the parent file. +var ( + SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} +) + +type ( + Base struct { + L0 BaseL0 + } + BaseL0 struct { + FullName pref.FullName // must be populated + ParentFile *File // must be populated + Parent pref.Descriptor + Index int + } +) + +func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } +func (d *Base) FullName() pref.FullName { return d.L0.FullName } +func (d *Base) ParentFile() pref.FileDescriptor { + if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { + return nil // surrogate files are not real parents + } + return d.L0.ParentFile +} +func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } +func (d *Base) Index() int { return d.L0.Index } +func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } +func (d *Base) IsPlaceholder() bool { return false } +func (d *Base) ProtoInternal(pragma.DoNotImplement) {} + +type stringName struct { + hasJSON bool + once sync.Once + nameJSON string + nameText string +} + +// InitJSON initializes the name. It is exported for use by other internal packages. +func (s *stringName) InitJSON(name string) { + s.hasJSON = true + s.nameJSON = name +} + +func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { + s.once.Do(func() { + if fd.IsExtension() { + // For extensions, JSON and text are formatted the same way. + var name string + if messageset.IsMessageSetExtension(fd) { + name = string("[" + fd.FullName().Parent() + "]") + } else { + name = string("[" + fd.FullName() + "]") + } + s.nameJSON = name + s.nameText = name + } else { + // Format the JSON name. + if !s.hasJSON { + s.nameJSON = strs.JSONCamelCase(string(fd.Name())) + } + + // Format the text name. + s.nameText = string(fd.Name()) + if fd.Kind() == pref.GroupKind { + s.nameText = string(fd.Message().Name()) + } + } + }) + return s +} + +func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } + +func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { + dv := defaultValue{has: v.IsValid(), val: v, enum: ev} + if b, ok := v.Interface().([]byte); ok { + // Store a copy of the default bytes, so that we can detect + // accidental mutations of the original value. + dv.bytes = append([]byte(nil), b...) + } + return dv +} + +func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { + var evs pref.EnumValueDescriptors + if k == pref.EnumKind { + // If the enum is declared within the same file, be careful not to + // blindly call the Values method, lest we bind ourselves in a deadlock. + if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { + evs = &e.L2.Values + } else { + evs = ed.Values() + } + + // If we are unable to resolve the enum dependency, use a placeholder + // enum value since we will not be able to parse the default value. + if ed.IsPlaceholder() && pref.Name(b).IsValid() { + v := pref.ValueOfEnum(0) + ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) + return DefaultValue(v, ev) + } + } + + v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor) + if err != nil { + panic(err) + } + return DefaultValue(v, ev) +} + +type defaultValue struct { + has bool + val pref.Value + enum pref.EnumValueDescriptor + bytes []byte +} + +func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { + // Return the zero value as the default if unpopulated. + if !dv.has { + if fd.Cardinality() == pref.Repeated { + return pref.Value{} + } + switch fd.Kind() { + case pref.BoolKind: + return pref.ValueOfBool(false) + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + return pref.ValueOfInt32(0) + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + return pref.ValueOfInt64(0) + case pref.Uint32Kind, pref.Fixed32Kind: + return pref.ValueOfUint32(0) + case pref.Uint64Kind, pref.Fixed64Kind: + return pref.ValueOfUint64(0) + case pref.FloatKind: + return pref.ValueOfFloat32(0) + case pref.DoubleKind: + return pref.ValueOfFloat64(0) + case pref.StringKind: + return pref.ValueOfString("") + case pref.BytesKind: + return pref.ValueOfBytes(nil) + case pref.EnumKind: + if evs := fd.Enum().Values(); evs.Len() > 0 { + return pref.ValueOfEnum(evs.Get(0).Number()) + } + return pref.ValueOfEnum(0) + } + } + + if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) { + // TODO: Avoid panic if we're running with the race detector + // and instead spawn a goroutine that periodically resets + // this value back to the original to induce a race. + panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName())) + } + return dv.val +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go new file mode 100644 index 000000000..66e1fee52 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -0,0 +1,471 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// fileRaw is a data struct used when initializing a file descriptor from +// a raw FileDescriptorProto. +type fileRaw struct { + builder Builder + allEnums []Enum + allMessages []Message + allExtensions []Extension + allServices []Service +} + +func newRawFile(db Builder) *File { + fd := &File{fileRaw: fileRaw{builder: db}} + fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices) + fd.unmarshalSeed(db.RawDescriptor) + + // Extended message targets are eagerly resolved since registration + // needs this information at program init time. + for i := range fd.allExtensions { + xd := &fd.allExtensions[i] + xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i)) + } + + fd.checkDecls() + return fd +} + +// initDecls pre-allocates slices for the exact number of enums, messages +// (including map entries), extensions, and services declared in the proto file. +// This is done to avoid regrowing the slice, which would change the address +// for any previously seen declaration. +// +// The alloc methods "allocates" slices by pulling from the capacity. +func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) { + fd.allEnums = make([]Enum, 0, numEnums) + fd.allMessages = make([]Message, 0, numMessages) + fd.allExtensions = make([]Extension, 0, numExtensions) + fd.allServices = make([]Service, 0, numServices) +} + +func (fd *File) allocEnums(n int) []Enum { + total := len(fd.allEnums) + es := fd.allEnums[total : total+n] + fd.allEnums = fd.allEnums[:total+n] + return es +} +func (fd *File) allocMessages(n int) []Message { + total := len(fd.allMessages) + ms := fd.allMessages[total : total+n] + fd.allMessages = fd.allMessages[:total+n] + return ms +} +func (fd *File) allocExtensions(n int) []Extension { + total := len(fd.allExtensions) + xs := fd.allExtensions[total : total+n] + fd.allExtensions = fd.allExtensions[:total+n] + return xs +} +func (fd *File) allocServices(n int) []Service { + total := len(fd.allServices) + xs := fd.allServices[total : total+n] + fd.allServices = fd.allServices[:total+n] + return xs +} + +// checkDecls performs a sanity check that the expected number of expected +// declarations matches the number that were found in the descriptor proto. +func (fd *File) checkDecls() { + switch { + case len(fd.allEnums) != cap(fd.allEnums): + case len(fd.allMessages) != cap(fd.allMessages): + case len(fd.allExtensions) != cap(fd.allExtensions): + case len(fd.allServices) != cap(fd.allServices): + default: + return + } + panic("mismatching cardinality") +} + +func (fd *File) unmarshalSeed(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var prevField pref.FieldNumber + var numEnums, numMessages, numExtensions, numServices int + var posEnums, posMessages, posExtensions, posServices int + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Syntax_field_number: + switch string(v) { + case "proto2": + fd.L1.Syntax = pref.Proto2 + case "proto3": + fd.L1.Syntax = pref.Proto3 + default: + panic("invalid syntax") + } + case genid.FileDescriptorProto_Name_field_number: + fd.L1.Path = sb.MakeString(v) + case genid.FileDescriptorProto_Package_field_number: + fd.L1.Package = pref.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_EnumType_field_number: + if prevField != genid.FileDescriptorProto_EnumType_field_number { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case genid.FileDescriptorProto_MessageType_field_number: + if prevField != genid.FileDescriptorProto_MessageType_field_number { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case genid.FileDescriptorProto_Extension_field_number: + if prevField != genid.FileDescriptorProto_Extension_field_number { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case genid.FileDescriptorProto_Service_field_number: + if prevField != genid.FileDescriptorProto_Service_field_number { + if numServices > 0 { + panic("non-contiguous repeated field") + } + posServices = len(b0) - len(b) - n - m + } + numServices++ + } + prevField = num + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // If syntax is missing, it is assumed to be proto2. + if fd.L1.Syntax == 0 { + fd.L1.Syntax = pref.Proto2 + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + fd.L1.Enums.List = fd.allocEnums(numEnums) + } + if numMessages > 0 { + fd.L1.Messages.List = fd.allocMessages(numMessages) + } + if numExtensions > 0 { + fd.L1.Extensions.List = fd.allocExtensions(numExtensions) + } + if numServices > 0 { + fd.L1.Services.List = fd.allocServices(numServices) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range fd.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range fd.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range fd.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numServices > 0 { + b := b0[posServices:] + for i := range fd.L1.Services.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } +} + +func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + ed.L0.ParentFile = pf + ed.L0.Parent = pd + ed.L0.Index = i + + var numValues int + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Name_field_number: + ed.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.EnumDescriptorProto_Value_field_number: + numValues++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + // Only construct enum value descriptors for top-level enums since + // they are needed for registration. + if pd != pf { + return + } + ed.L1.eagerValues = true + ed.L2 = new(EnumL2) + ed.L2.Values.List = make([]EnumValue, numValues) + for i := 0; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Value_field_number: + ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i) + i++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + + var prevField pref.FieldNumber + var numEnums, numMessages, numExtensions int + var posEnums, posMessages, posExtensions int + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Name_field_number: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.DescriptorProto_EnumType_field_number: + if prevField != genid.DescriptorProto_EnumType_field_number { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case genid.DescriptorProto_NestedType_field_number: + if prevField != genid.DescriptorProto_NestedType_field_number { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case genid.DescriptorProto_Extension_field_number: + if prevField != genid.DescriptorProto_Extension_field_number { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case genid.DescriptorProto_Options_field_number: + md.unmarshalSeedOptions(v) + } + prevField = num + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + md.L1.Enums.List = pf.allocEnums(numEnums) + } + if numMessages > 0 { + md.L1.Messages.List = pf.allocMessages(numMessages) + } + if numExtensions > 0 { + md.L1.Extensions.List = pf.allocExtensions(numExtensions) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range md.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range md.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range md.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } +} + +func (md *Message) unmarshalSeedOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MessageOptions_MapEntry_field_number: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case genid.MessageOptions_MessageSetWireFormat_field_number: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + xd.L0.ParentFile = pf + xd.L0.Parent = pd + xd.L0.Index = i + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: + xd.L1.Number = pref.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: + xd.L1.Cardinality = pref.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: + xd.L1.Kind = pref.Kind(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Name_field_number: + xd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.FieldDescriptorProto_Extendee_field_number: + xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + sd.L0.ParentFile = pf + sd.L0.Parent = pd + sd.L0.Index = i + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.ServiceDescriptorProto_Name_field_number: + sd.L0.FullName = appendFullName(sb, pd.FullName(), v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +var nameBuilderPool = sync.Pool{ + New: func() interface{} { return new(strs.Builder) }, +} + +func getBuilder() *strs.Builder { + return nameBuilderPool.Get().(*strs.Builder) +} +func putBuilder(b *strs.Builder) { + nameBuilderPool.Put(b) +} + +// makeFullName converts b to a protoreflect.FullName, +// where b must start with a leading dot. +func makeFullName(sb *strs.Builder, b []byte) pref.FullName { + if len(b) == 0 || b[0] != '.' { + panic("name reference must be fully qualified") + } + return pref.FullName(sb.MakeString(b[1:])) +} + +func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { + return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go new file mode 100644 index 000000000..198451e3e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -0,0 +1,704 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +func (fd *File) lazyRawInit() { + fd.unmarshalFull(fd.builder.RawDescriptor) + fd.resolveMessages() + fd.resolveExtensions() + fd.resolveServices() +} + +func (file *File) resolveMessages() { + var depIdx int32 + for i := range file.allMessages { + md := &file.allMessages[i] + + // Resolve message field dependencies. + for j := range md.L2.Fields.List { + fd := &md.L2.Fields.List[j] + + // Weak fields are resolved upon actual use. + if fd.L1.IsWeak { + continue + } + + // Resolve message field dependency. + switch fd.L1.Kind { + case pref.EnumKind: + fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) + depIdx++ + case pref.MessageKind, pref.GroupKind: + fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) + depIdx++ + } + + // Default is resolved here since it depends on Enum being resolved. + if v := fd.L1.Default.val; v.IsValid() { + fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum) + } + } + } +} + +func (file *File) resolveExtensions() { + var depIdx int32 + for i := range file.allExtensions { + xd := &file.allExtensions[i] + + // Resolve extension field dependency. + switch xd.L1.Kind { + case pref.EnumKind: + xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) + depIdx++ + case pref.MessageKind, pref.GroupKind: + xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) + depIdx++ + } + + // Default is resolved here since it depends on Enum being resolved. + if v := xd.L2.Default.val; v.IsValid() { + xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum) + } + } +} + +func (file *File) resolveServices() { + var depIdx int32 + for i := range file.allServices { + sd := &file.allServices[i] + + // Resolve method dependencies. + for j := range sd.L2.Methods.List { + md := &sd.L2.Methods.List[j] + md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx) + md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx) + depIdx++ + } + } +} + +func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { + return ed2 + } + } + for i := range file.allEnums { + if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() { + return ed2 + } + } + if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { + return d.(pref.EnumDescriptor) + } + return ed +} + +func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { + return md2 + } + } + for i := range file.allMessages { + if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() { + return md2 + } + } + if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { + return d.(pref.MessageDescriptor) + } + return md +} + +func (fd *File) unmarshalFull(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var enumIdx, messageIdx, extensionIdx, serviceIdx int + var rawOptions []byte + fd.L2 = new(FileL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_PublicDependency_field_number: + fd.L2.Imports[v].IsPublic = true + case genid.FileDescriptorProto_WeakDependency_field_number: + fd.L2.Imports[v].IsWeak = true + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Dependency_field_number: + path := sb.MakeString(v) + imp, _ := fd.builder.FileRegistry.FindFileByPath(path) + if imp == nil { + imp = PlaceholderFile(path) + } + fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) + case genid.FileDescriptorProto_EnumType_field_number: + fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case genid.FileDescriptorProto_MessageType_field_number: + fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case genid.FileDescriptorProto_Extension_field_number: + fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case genid.FileDescriptorProto_Service_field_number: + fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb) + serviceIdx++ + case genid.FileDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) +} + +func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { + var rawValues [][]byte + var rawOptions []byte + if !ed.L1.eagerValues { + ed.L2 = new(EnumL2) + } + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Value_field_number: + rawValues = append(rawValues, v) + case genid.EnumDescriptorProto_ReservedName_field_number: + ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + case genid.EnumDescriptorProto_ReservedRange_field_number: + ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) + case genid.EnumDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if !ed.L1.eagerValues && len(rawValues) > 0 { + ed.L2.Values.List = make([]EnumValue, len(rawValues)) + for i, b := range rawValues { + ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i) + } + } + ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) +} + +func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: + r[0] = pref.EnumNumber(v) + case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: + r[1] = pref.EnumNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + vd.L0.ParentFile = pf + vd.L0.Parent = pd + vd.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumValueDescriptorProto_Number_field_number: + vd.L1.Number = pref.EnumNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumValueDescriptorProto_Name_field_number: + // NOTE: Enum values are in the same scope as the enum parent. + vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v) + case genid.EnumValueDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions) +} + +func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { + var rawFields, rawOneofs [][]byte + var enumIdx, messageIdx, extensionIdx int + var rawOptions []byte + md.L2 = new(MessageL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Field_field_number: + rawFields = append(rawFields, v) + case genid.DescriptorProto_OneofDecl_field_number: + rawOneofs = append(rawOneofs, v) + case genid.DescriptorProto_ReservedName_field_number: + md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + case genid.DescriptorProto_ReservedRange_field_number: + md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) + case genid.DescriptorProto_ExtensionRange_field_number: + r, rawOptions := unmarshalMessageExtensionRange(v) + opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts) + case genid.DescriptorProto_EnumType_field_number: + md.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case genid.DescriptorProto_NestedType_field_number: + md.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case genid.DescriptorProto_Extension_field_number: + md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case genid.DescriptorProto_Options_field_number: + md.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawFields) > 0 || len(rawOneofs) > 0 { + md.L2.Fields.List = make([]Field, len(rawFields)) + md.L2.Oneofs.List = make([]Oneof, len(rawOneofs)) + for i, b := range rawFields { + fd := &md.L2.Fields.List[i] + fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + if fd.L1.Cardinality == pref.Required { + md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) + } + } + for i, b := range rawOneofs { + od := &md.L2.Oneofs.List[i] + od.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + } + } + md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions) +} + +func (md *Message) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MessageOptions_MapEntry_field_number: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case genid.MessageOptions_MessageSetWireFormat_field_number: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ReservedRange_Start_field_number: + r[0] = pref.FieldNumber(v) + case genid.DescriptorProto_ReservedRange_End_field_number: + r[1] = pref.FieldNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ExtensionRange_Start_field_number: + r[0] = pref.FieldNumber(v) + case genid.DescriptorProto_ExtensionRange_End_field_number: + r[1] = pref.FieldNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ExtensionRange_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r, rawOptions +} + +func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + fd.L0.ParentFile = pf + fd.L0.Parent = pd + fd.L0.Index = i + + var rawTypeName []byte + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: + fd.L1.Number = pref.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: + fd.L1.Cardinality = pref.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: + fd.L1.Kind = pref.Kind(v) + case genid.FieldDescriptorProto_OneofIndex_field_number: + // In Message.unmarshalFull, we allocate slices for both + // the field and oneof descriptors before unmarshaling either + // of them. This ensures pointers to slice elements are stable. + od := &pd.(*Message).L2.Oneofs.List[v] + od.L1.Fields.List = append(od.L1.Fields.List, fd) + if fd.L1.ContainingOneof != nil { + panic("oneof type already set") + } + fd.L1.ContainingOneof = od + case genid.FieldDescriptorProto_Proto3Optional_field_number: + fd.L1.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Name_field_number: + fd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.FieldDescriptorProto_JsonName_field_number: + fd.L1.StringName.InitJSON(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: + fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: + fd.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch fd.L1.Kind { + case pref.EnumKind: + fd.L1.Enum = PlaceholderEnum(name) + case pref.MessageKind, pref.GroupKind: + fd.L1.Message = PlaceholderMessage(name) + } + } + fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (fd *Field) unmarshalOptions(b []byte) { + const FieldOptions_EnforceUTF8 = 13 + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + fd.L1.HasPacked = true + fd.L1.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Weak_field_number: + fd.L1.IsWeak = protowire.DecodeBool(v) + case FieldOptions_EnforceUTF8: + fd.L1.HasEnforceUTF8 = true + fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + od.L0.ParentFile = pf + od.L0.Parent = pd + od.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.OneofDescriptorProto_Name_field_number: + od.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.OneofDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions) +} + +func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { + var rawTypeName []byte + var rawOptions []byte + xd.L2 = new(ExtensionL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Proto3Optional_field_number: + xd.L2.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_JsonName_field_number: + xd.L2.StringName.InitJSON(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: + xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch xd.L1.Kind { + case pref.EnumKind: + xd.L2.Enum = PlaceholderEnum(name) + case pref.MessageKind, pref.GroupKind: + xd.L2.Message = PlaceholderMessage(name) + } + } + xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L2.IsPacked = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { + var rawMethods [][]byte + var rawOptions []byte + sd.L2 = new(ServiceL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.ServiceDescriptorProto_Method_field_number: + rawMethods = append(rawMethods, v) + case genid.ServiceDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawMethods) > 0 { + sd.L2.Methods.List = make([]Method, len(rawMethods)) + for i, b := range rawMethods { + sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i) + } + } + sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) +} + +func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MethodDescriptorProto_ClientStreaming_field_number: + md.L1.IsStreamingClient = protowire.DecodeBool(v) + case genid.MethodDescriptorProto_ServerStreaming_field_number: + md.L1.IsStreamingServer = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MethodDescriptorProto_Name_field_number: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.MethodDescriptorProto_InputType_field_number: + md.L1.Input = PlaceholderMessage(makeFullName(sb, v)) + case genid.MethodDescriptorProto_OutputType_field_number: + md.L1.Output = PlaceholderMessage(makeFullName(sb, v)) + case genid.MethodDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions) +} + +// appendOptions appends src to dst, where the returned slice is never nil. +// This is necessary to distinguish between empty and unpopulated options. +func appendOptions(dst, src []byte) []byte { + if dst == nil { + dst = []byte{} + } + return append(dst, src...) +} + +// optionsUnmarshaler constructs a lazy unmarshal function for an options message. +// +// The type of message to unmarshal to is passed as a pointer since the +// vars in descopts may not yet be populated at the time this function is called. +func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { + if b == nil { + return nil + } + var opts pref.ProtoMessage + var once sync.Once + return func() pref.ProtoMessage { + once.Do(func() { + if *p == nil { + panic("Descriptor.Options called without importing the descriptor package") + } + opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) + if err := (proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: db.TypeResolver, + }).Unmarshal(b, opts); err != nil { + panic(err) + } + }) + return opts + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go new file mode 100644 index 000000000..aa294fff9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -0,0 +1,450 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + "math" + "sort" + "sync" + + "google.golang.org/protobuf/internal/genid" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type FileImports []pref.FileImport + +func (p *FileImports) Len() int { return len(*p) } +func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } +func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} + +type Names struct { + List []pref.Name + once sync.Once + has map[pref.Name]int // protected by once +} + +func (p *Names) Len() int { return len(p.List) } +func (p *Names) Get(i int) pref.Name { return p.List[i] } +func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } +func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *Names) ProtoInternal(pragma.DoNotImplement) {} +func (p *Names) lazyInit() *Names { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[pref.Name]int, len(p.List)) + for _, s := range p.List { + p.has[s] = p.has[s] + 1 + } + } + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *Names) CheckValid() error { + for s, n := range p.lazyInit().has { + switch { + case n > 1: + return errors.New("duplicate name: %q", s) + case false && !s.IsValid(): + // NOTE: The C++ implementation does not validate the identifier. + // See https://github.com/protocolbuffers/protobuf/issues/6335. + return errors.New("invalid name: %q", s) + } + } + return nil +} + +type EnumRanges struct { + List [][2]pref.EnumNumber // start inclusive; end inclusive + once sync.Once + sorted [][2]pref.EnumNumber // protected by once +} + +func (p *EnumRanges) Len() int { return len(p.List) } +func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } +func (p *EnumRanges) Has(n pref.EnumNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := enumRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumRanges) lazyInit() *EnumRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *EnumRanges) CheckValid() error { + var rp enumRange + for i, r := range p.lazyInit().sorted { + r := enumRange(r) + switch { + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +type enumRange [2]protoreflect.EnumNumber + +func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive +func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive +func (r enumRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldRanges struct { + List [][2]pref.FieldNumber // start inclusive; end exclusive + once sync.Once + sorted [][2]pref.FieldNumber // protected by once +} + +func (p *FieldRanges) Len() int { return len(p.List) } +func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } +func (p *FieldRanges) Has(n pref.FieldNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := fieldRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *FieldRanges) lazyInit() *FieldRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of ranges with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *FieldRanges) CheckValid(isMessageSet bool) error { + var rp fieldRange + for i, r := range p.lazyInit().sorted { + r := fieldRange(r) + switch { + case !isValidFieldNumber(r.Start(), isMessageSet): + return errors.New("invalid field number: %d", r.Start()) + case !isValidFieldNumber(r.End(), isMessageSet): + return errors.New("invalid field number: %d", r.End()) + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +// isValidFieldNumber reports whether the field number is valid. +// Unlike the FieldNumber.IsValid method, it allows ranges that cover the +// reserved number range. +func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool { + return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet) +} + +// CheckOverlap reports an error if p and q overlap. +func (p *FieldRanges) CheckOverlap(q *FieldRanges) error { + rps := p.lazyInit().sorted + rqs := q.lazyInit().sorted + for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); { + rp := fieldRange(rps[pi]) + rq := fieldRange(rqs[qi]) + if !(rp.End() < rq.Start() || rq.End() < rp.Start()) { + return errors.New("overlapping ranges: %v with %v", rp, rq) + } + if rp.Start() < rq.Start() { + pi++ + } else { + qi++ + } + } + return nil +} + +type fieldRange [2]protoreflect.FieldNumber + +func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive +func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive +func (r fieldRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldNumbers struct { + List []pref.FieldNumber + once sync.Once + has map[pref.FieldNumber]struct{} // protected by once +} + +func (p *FieldNumbers) Len() int { return len(p.List) } +func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } +func (p *FieldNumbers) Has(n pref.FieldNumber) bool { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) + for _, n := range p.List { + p.has[n] = struct{}{} + } + } + }) + _, ok := p.has[n] + return ok +} +func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} + +type OneofFields struct { + List []pref.FieldDescriptor + once sync.Once + byName map[pref.Name]pref.FieldDescriptor // protected by once + byJSON map[string]pref.FieldDescriptor // protected by once + byText map[string]pref.FieldDescriptor // protected by once + byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once +} + +func (p *OneofFields) Len() int { return len(p.List) } +func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } +func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } +func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } +func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } +func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } +func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} + +func (p *OneofFields) lazyInit() *OneofFields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) + p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) + for _, f := range p.List { + // Field names and numbers are guaranteed to be unique. + p.byName[f.Name()] = f + p.byJSON[f.JSONName()] = f + p.byText[f.TextName()] = f + p.byNum[f.Number()] = f + } + } + }) + return p +} + +type SourceLocations struct { + // List is a list of SourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. + List []pref.SourceLocation + + // File is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. + File pref.FileDescriptor + + once sync.Once + byPath map[pathKey]int +} + +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.List[i] + } + return pref.SourceLocation{} +} +func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { + return p.byKey(newPathKey(path)) +} +func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { + if p.File != nil && desc != nil && p.File != desc.ParentFile() { + return pref.SourceLocation{} // mismatching parent files + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { + case pref.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) + case pref.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.FieldDescriptor: + isExtension := desc.(pref.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Extension_field_number)) + default: + return pref.SourceLocation{} + } + } else { + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Field_field_number)) + default: + return pref.SourceLocation{} + } + } + case pref.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.EnumDescriptor: + path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) + default: + return pref.SourceLocation{} + } + case pref.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) + default: + return pref.SourceLocation{} + } + case pref.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.ServiceDescriptor: + path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) + default: + return pref.SourceLocation{} + } + default: + return pref.SourceLocation{} + } + } +} +func (p *SourceLocations) lazyInit() *SourceLocations { + p.once.Do(func() { + if len(p.List) > 0 { + // Collect all the indexes for a given path. + pathIdxs := make(map[pathKey][]int, len(p.List)) + for i, l := range p.List { + k := newPathKey(l.Path) + pathIdxs[k] = append(pathIdxs[k], i) + } + + // Update the next index for all locations. + p.byPath = make(map[pathKey]int, len(p.List)) + for k, idxs := range pathIdxs { + for i := 0; i < len(idxs)-1; i++ { + p.List[idxs[i]].Next = idxs[i+1] + } + p.List[idxs[len(idxs)-1]].Next = 0 + p.byPath[k] = idxs[0] // record the first location for this path + } + } + }) + return p +} +func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} + +// pathKey is a comparable representation of protoreflect.SourcePath. +type pathKey struct { + arr [16]uint8 // first n-1 path segments; last element is the length + str string // used if the path does not fit in arr +} + +func newPathKey(p pref.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { + return pathKey{str: p.String()} + } + k.arr[i] = uint8(ps) + } + k.arr[len(k.arr)-1] = uint8(len(p)) + return k + } + return pathKey{str: p.String()} +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go new file mode 100644 index 000000000..30db19fdc --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -0,0 +1,356 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package filedesc + +import ( + "fmt" + "sync" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type Enums struct { + List []Enum + once sync.Once + byName map[protoreflect.Name]*Enum // protected by once +} + +func (p *Enums) Len() int { + return len(p.List) +} +func (p *Enums) Get(i int) protoreflect.EnumDescriptor { + return &p.List[i] +} +func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Enums) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Enums) ProtoInternal(pragma.DoNotImplement) {} +func (p *Enums) lazyInit() *Enums { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Enum, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type EnumValues struct { + List []EnumValue + once sync.Once + byName map[protoreflect.Name]*EnumValue // protected by once + byNum map[protoreflect.EnumNumber]*EnumValue // protected by once +} + +func (p *EnumValues) Len() int { + return len(p.List) +} +func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor { + return &p.List[i] +} +func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *EnumValues) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumValues) lazyInit() *EnumValues { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List)) + p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Messages struct { + List []Message + once sync.Once + byName map[protoreflect.Name]*Message // protected by once +} + +func (p *Messages) Len() int { + return len(p.List) +} +func (p *Messages) Get(i int) protoreflect.MessageDescriptor { + return &p.List[i] +} +func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Messages) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Messages) ProtoInternal(pragma.DoNotImplement) {} +func (p *Messages) lazyInit() *Messages { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Message, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Fields struct { + List []Field + once sync.Once + byName map[protoreflect.Name]*Field // protected by once + byJSON map[string]*Field // protected by once + byText map[string]*Field // protected by once + byNum map[protoreflect.FieldNumber]*Field // protected by once +} + +func (p *Fields) Len() int { + return len(p.List) +} +func (p *Fields) Get(i int) protoreflect.FieldDescriptor { + return &p.List[i] +} +func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byJSON[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byText[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *Fields) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Fields) ProtoInternal(pragma.DoNotImplement) {} +func (p *Fields) lazyInit() *Fields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Field, len(p.List)) + p.byJSON = make(map[string]*Field, len(p.List)) + p.byText = make(map[string]*Field, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byJSON[d.JSONName()]; !ok { + p.byJSON[d.JSONName()] = d + } + if _, ok := p.byText[d.TextName()]; !ok { + p.byText[d.TextName()] = d + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Oneofs struct { + List []Oneof + once sync.Once + byName map[protoreflect.Name]*Oneof // protected by once +} + +func (p *Oneofs) Len() int { + return len(p.List) +} +func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor { + return &p.List[i] +} +func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Oneofs) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {} +func (p *Oneofs) lazyInit() *Oneofs { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Oneof, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Extensions struct { + List []Extension + once sync.Once + byName map[protoreflect.Name]*Extension // protected by once +} + +func (p *Extensions) Len() int { + return len(p.List) +} +func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor { + return &p.List[i] +} +func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Extensions) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {} +func (p *Extensions) lazyInit() *Extensions { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Extension, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Services struct { + List []Service + once sync.Once + byName map[protoreflect.Name]*Service // protected by once +} + +func (p *Services) Len() int { + return len(p.List) +} +func (p *Services) Get(i int) protoreflect.ServiceDescriptor { + return &p.List[i] +} +func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Services) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Services) ProtoInternal(pragma.DoNotImplement) {} +func (p *Services) lazyInit() *Services { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Service, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Methods struct { + List []Method + once sync.Once + byName map[protoreflect.Name]*Method // protected by once +} + +func (p *Methods) Len() int { + return len(p.List) +} +func (p *Methods) Get(i int) protoreflect.MethodDescriptor { + return &p.List[i] +} +func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Methods) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Methods) ProtoInternal(pragma.DoNotImplement) {} +func (p *Methods) lazyInit() *Methods { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Method, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go new file mode 100644 index 000000000..dbf2c605b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -0,0 +1,107 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + emptyNames = new(Names) + emptyEnumRanges = new(EnumRanges) + emptyFieldRanges = new(FieldRanges) + emptyFieldNumbers = new(FieldNumbers) + emptySourceLocations = new(SourceLocations) + + emptyFiles = new(FileImports) + emptyMessages = new(Messages) + emptyFields = new(Fields) + emptyOneofs = new(Oneofs) + emptyEnums = new(Enums) + emptyEnumValues = new(EnumValues) + emptyExtensions = new(Extensions) + emptyServices = new(Services) +) + +// PlaceholderFile is a placeholder, representing only the file path. +type PlaceholderFile string + +func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } +func (f PlaceholderFile) Parent() pref.Descriptor { return nil } +func (f PlaceholderFile) Index() int { return 0 } +func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } +func (f PlaceholderFile) Name() pref.Name { return "" } +func (f PlaceholderFile) FullName() pref.FullName { return "" } +func (f PlaceholderFile) IsPlaceholder() bool { return true } +func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } +func (f PlaceholderFile) Path() string { return string(f) } +func (f PlaceholderFile) Package() pref.FullName { return "" } +func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } +func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } +func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } +func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } +func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } +func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } +func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnum is a placeholder, representing only the full name. +type PlaceholderEnum pref.FullName + +func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } +func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } +func (e PlaceholderEnum) Index() int { return 0 } +func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } +func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } +func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } +func (e PlaceholderEnum) IsPlaceholder() bool { return true } +func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } +func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } +func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } +func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } +func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnumValue is a placeholder, representing only the full name. +type PlaceholderEnumValue pref.FullName + +func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } +func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } +func (e PlaceholderEnumValue) Index() int { return 0 } +func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } +func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } +func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } +func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } +func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } +func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } +func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderMessage is a placeholder, representing only the full name. +type PlaceholderMessage pref.FullName + +func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } +func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } +func (m PlaceholderMessage) Index() int { return 0 } +func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } +func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } +func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } +func (m PlaceholderMessage) IsPlaceholder() bool { return true } +func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } +func (m PlaceholderMessage) IsMapEntry() bool { return false } +func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } +func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } +func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } +func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } +func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } +func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } +func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } +func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } +func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go new file mode 100644 index 000000000..0a0dd35de --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -0,0 +1,297 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filetype provides functionality for wrapping descriptors +// with Go type information. +package filetype + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + fdesc "google.golang.org/protobuf/internal/filedesc" + pimpl "google.golang.org/protobuf/internal/impl" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder constructs type descriptors from a raw file descriptor +// and associated Go types for each enum and message declaration. +// +// +// Flattened Ordering +// +// The protobuf type system represents declarations as a tree. Certain nodes in +// the tree require us to either associate it with a concrete Go type or to +// resolve a dependency, which is information that must be provided separately +// since it cannot be derived from the file descriptor alone. +// +// However, representing a tree as Go literals is difficult to simply do in a +// space and time efficient way. Thus, we store them as a flattened list of +// objects where the serialization order from the tree-based form is important. +// +// The "flattened ordering" is defined as a tree traversal of all enum, message, +// extension, and service declarations using the following algorithm: +// +// def VisitFileDecls(fd): +// for e in fd.Enums: yield e +// for m in fd.Messages: yield m +// for x in fd.Extensions: yield x +// for s in fd.Services: yield s +// for m in fd.Messages: yield from VisitMessageDecls(m) +// +// def VisitMessageDecls(md): +// for e in md.Enums: yield e +// for m in md.Messages: yield m +// for x in md.Extensions: yield x +// for m in md.Messages: yield from VisitMessageDecls(m) +// +// The traversal starts at the root file descriptor and yields each direct +// declaration within each node before traversing into sub-declarations +// that children themselves may have. +type Builder struct { + // File is the underlying file descriptor builder. + File fdesc.Builder + + // GoTypes is a unique set of the Go types for all declarations and + // dependencies. Each type is represented as a zero value of the Go type. + // + // Declarations are Go types generated for enums and messages directly + // declared (not publicly imported) in the proto source file. + // Messages for map entries are accounted for, but represented by nil. + // Enum declarations in "flattened ordering" come first, followed by + // message declarations in "flattened ordering". + // + // Dependencies are Go types for enums or messages referenced by + // message fields (excluding weak fields), for parent extended messages of + // extension fields, for enums or messages referenced by extension fields, + // and for input and output messages referenced by service methods. + // Dependencies must come after declarations, but the ordering of + // dependencies themselves is unspecified. + GoTypes []interface{} + + // DependencyIndexes is an ordered list of indexes into GoTypes for the + // dependencies of messages, extensions, or services. + // + // There are 5 sub-lists in "flattened ordering" concatenated back-to-back: + // 0. Message field dependencies: list of the enum or message type + // referred to by every message field. + // 1. Extension field targets: list of the extended parent message of + // every extension. + // 2. Extension field dependencies: list of the enum or message type + // referred to by every extension field. + // 3. Service method inputs: list of the input message type + // referred to by every service method. + // 4. Service method outputs: list of the output message type + // referred to by every service method. + // + // The offset into DependencyIndexes for the start of each sub-list + // is appended to the end in reverse order. + DependencyIndexes []int32 + + // EnumInfos is a list of enum infos in "flattened ordering". + EnumInfos []pimpl.EnumInfo + + // MessageInfos is a list of message infos in "flattened ordering". + // If provided, the GoType and PBType for each element is populated. + // + // Requirement: len(MessageInfos) == len(Build.Messages) + MessageInfos []pimpl.MessageInfo + + // ExtensionInfos is a list of extension infos in "flattened ordering". + // Each element is initialized and registered with the protoregistry package. + // + // Requirement: len(LegacyExtensions) == len(Build.Extensions) + ExtensionInfos []pimpl.ExtensionInfo + + // TypeRegistry is the registry to register each type descriptor. + // If nil, it uses protoregistry.GlobalTypes. + TypeRegistry interface { + RegisterMessage(pref.MessageType) error + RegisterEnum(pref.EnumType) error + RegisterExtension(pref.ExtensionType) error + } +} + +// Out is the output of the builder. +type Out struct { + File pref.FileDescriptor +} + +func (tb Builder) Build() (out Out) { + // Replace the resolver with one that resolves dependencies by index, + // which is faster and more reliable than relying on the global registry. + if tb.File.FileRegistry == nil { + tb.File.FileRegistry = preg.GlobalFiles + } + tb.File.FileRegistry = &resolverByIndex{ + goTypes: tb.GoTypes, + depIdxs: tb.DependencyIndexes, + fileRegistry: tb.File.FileRegistry, + } + + // Initialize registry if unpopulated. + if tb.TypeRegistry == nil { + tb.TypeRegistry = preg.GlobalTypes + } + + fbOut := tb.File.Build() + out.File = fbOut.File + + // Process enums. + enumGoTypes := tb.GoTypes[:len(fbOut.Enums)] + if len(tb.EnumInfos) != len(fbOut.Enums) { + panic("mismatching enum lengths") + } + if len(fbOut.Enums) > 0 { + for i := range fbOut.Enums { + tb.EnumInfos[i] = pimpl.EnumInfo{ + GoReflectType: reflect.TypeOf(enumGoTypes[i]), + Desc: &fbOut.Enums[i], + } + // Register enum types. + if err := tb.TypeRegistry.RegisterEnum(&tb.EnumInfos[i]); err != nil { + panic(err) + } + } + } + + // Process messages. + messageGoTypes := tb.GoTypes[len(fbOut.Enums):][:len(fbOut.Messages)] + if len(tb.MessageInfos) != len(fbOut.Messages) { + panic("mismatching message lengths") + } + if len(fbOut.Messages) > 0 { + for i := range fbOut.Messages { + if messageGoTypes[i] == nil { + continue // skip map entry + } + + tb.MessageInfos[i].GoReflectType = reflect.TypeOf(messageGoTypes[i]) + tb.MessageInfos[i].Desc = &fbOut.Messages[i] + + // Register message types. + if err := tb.TypeRegistry.RegisterMessage(&tb.MessageInfos[i]); err != nil { + panic(err) + } + } + + // As a special-case for descriptor.proto, + // locally register concrete message type for the options. + if out.File.Path() == "google/protobuf/descriptor.proto" && out.File.Package() == "google.protobuf" { + for i := range fbOut.Messages { + switch fbOut.Messages[i].Name() { + case "FileOptions": + descopts.File = messageGoTypes[i].(pref.ProtoMessage) + case "EnumOptions": + descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) + case "EnumValueOptions": + descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) + case "MessageOptions": + descopts.Message = messageGoTypes[i].(pref.ProtoMessage) + case "FieldOptions": + descopts.Field = messageGoTypes[i].(pref.ProtoMessage) + case "OneofOptions": + descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) + case "ExtensionRangeOptions": + descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) + case "ServiceOptions": + descopts.Service = messageGoTypes[i].(pref.ProtoMessage) + case "MethodOptions": + descopts.Method = messageGoTypes[i].(pref.ProtoMessage) + } + } + } + } + + // Process extensions. + if len(tb.ExtensionInfos) != len(fbOut.Extensions) { + panic("mismatching extension lengths") + } + var depIdx int32 + for i := range fbOut.Extensions { + // For enum and message kinds, determine the referent Go type so + // that we can construct their constructors. + const listExtDeps = 2 + var goType reflect.Type + switch fbOut.Extensions[i].L1.Kind { + case pref.EnumKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + case pref.MessageKind, pref.GroupKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + default: + goType = goTypeForPBKind[fbOut.Extensions[i].L1.Kind] + } + if fbOut.Extensions[i].IsList() { + goType = reflect.SliceOf(goType) + } + + pimpl.InitExtensionInfo(&tb.ExtensionInfos[i], &fbOut.Extensions[i], goType) + + // Register extension types. + if err := tb.TypeRegistry.RegisterExtension(&tb.ExtensionInfos[i]); err != nil { + panic(err) + } + } + + return out +} + +var goTypeForPBKind = map[pref.Kind]reflect.Type{ + pref.BoolKind: reflect.TypeOf(bool(false)), + pref.Int32Kind: reflect.TypeOf(int32(0)), + pref.Sint32Kind: reflect.TypeOf(int32(0)), + pref.Sfixed32Kind: reflect.TypeOf(int32(0)), + pref.Int64Kind: reflect.TypeOf(int64(0)), + pref.Sint64Kind: reflect.TypeOf(int64(0)), + pref.Sfixed64Kind: reflect.TypeOf(int64(0)), + pref.Uint32Kind: reflect.TypeOf(uint32(0)), + pref.Fixed32Kind: reflect.TypeOf(uint32(0)), + pref.Uint64Kind: reflect.TypeOf(uint64(0)), + pref.Fixed64Kind: reflect.TypeOf(uint64(0)), + pref.FloatKind: reflect.TypeOf(float32(0)), + pref.DoubleKind: reflect.TypeOf(float64(0)), + pref.StringKind: reflect.TypeOf(string("")), + pref.BytesKind: reflect.TypeOf([]byte(nil)), +} + +type depIdxs []int32 + +// Get retrieves the jth element of the ith sub-list. +func (x depIdxs) Get(i, j int32) int32 { + return x[x[int32(len(x))-i-1]+j] +} + +type ( + resolverByIndex struct { + goTypes []interface{} + depIdxs depIdxs + fileRegistry + } + fileRegistry interface { + FindFileByPath(string) (pref.FileDescriptor, error) + FindDescriptorByName(pref.FullName) (pref.Descriptor, error) + RegisterFile(pref.FileDescriptor) error + } +) + +func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { + return &es[depIdx] + } else { + return pimpl.Export{}.EnumDescriptorOf(r.goTypes[depIdx]) + } +} + +func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { + return &ms[depIdx-len(es)] + } else { + return pimpl.Export{}.MessageDescriptorOf(r.goTypes[depIdx]) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go new file mode 100644 index 000000000..58372dd34 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -0,0 +1,24 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flags provides a set of flags controlled by build tags. +package flags + +// ProtoLegacy specifies whether to enable support for legacy functionality +// such as MessageSets, weak fields, and various other obscure behavior +// that is necessary to maintain backwards compatibility with proto1 or +// the pre-release variants of proto2 and proto3. +// +// This is disabled by default unless built with the "protolegacy" tag. +// +// WARNING: The compatibility agreement covers nothing provided by this flag. +// As such, functionality may suddenly be removed or changed at our discretion. +const ProtoLegacy = protoLegacy + +// LazyUnmarshalExtensions specifies whether to lazily unmarshal extensions. +// +// Lazy extension unmarshaling validates the contents of message-valued +// extension fields at unmarshal time, but defers creating the message +// structure until the extension is first accessed. +const LazyUnmarshalExtensions = ProtoLegacy diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go new file mode 100644 index 000000000..a72995f02 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !protolegacy + +package flags + +const protoLegacy = false diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go new file mode 100644 index 000000000..772e2f0e4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build protolegacy + +package flags + +const protoLegacy = true diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go new file mode 100644 index 000000000..e6f7d47ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_any_proto = "google/protobuf/any.proto" + +// Names for google.protobuf.Any. +const ( + Any_message_name protoreflect.Name = "Any" + Any_message_fullname protoreflect.FullName = "google.protobuf.Any" +) + +// Field names for google.protobuf.Any. +const ( + Any_TypeUrl_field_name protoreflect.Name = "type_url" + Any_Value_field_name protoreflect.Name = "value" + + Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url" + Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value" +) + +// Field numbers for google.protobuf.Any. +const ( + Any_TypeUrl_field_number protoreflect.FieldNumber = 1 + Any_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go new file mode 100644 index 000000000..df8f91850 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -0,0 +1,106 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_api_proto = "google/protobuf/api.proto" + +// Names for google.protobuf.Api. +const ( + Api_message_name protoreflect.Name = "Api" + Api_message_fullname protoreflect.FullName = "google.protobuf.Api" +) + +// Field names for google.protobuf.Api. +const ( + Api_Name_field_name protoreflect.Name = "name" + Api_Methods_field_name protoreflect.Name = "methods" + Api_Options_field_name protoreflect.Name = "options" + Api_Version_field_name protoreflect.Name = "version" + Api_SourceContext_field_name protoreflect.Name = "source_context" + Api_Mixins_field_name protoreflect.Name = "mixins" + Api_Syntax_field_name protoreflect.Name = "syntax" + + Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" + Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" + Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options" + Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version" + Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" + Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" + Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" +) + +// Field numbers for google.protobuf.Api. +const ( + Api_Name_field_number protoreflect.FieldNumber = 1 + Api_Methods_field_number protoreflect.FieldNumber = 2 + Api_Options_field_number protoreflect.FieldNumber = 3 + Api_Version_field_number protoreflect.FieldNumber = 4 + Api_SourceContext_field_number protoreflect.FieldNumber = 5 + Api_Mixins_field_number protoreflect.FieldNumber = 6 + Api_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Method. +const ( + Method_message_name protoreflect.Name = "Method" + Method_message_fullname protoreflect.FullName = "google.protobuf.Method" +) + +// Field names for google.protobuf.Method. +const ( + Method_Name_field_name protoreflect.Name = "name" + Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url" + Method_RequestStreaming_field_name protoreflect.Name = "request_streaming" + Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url" + Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" + Method_Options_field_name protoreflect.Name = "options" + Method_Syntax_field_name protoreflect.Name = "syntax" + + Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" + Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" + Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming" + Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url" + Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" + Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" + Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" +) + +// Field numbers for google.protobuf.Method. +const ( + Method_Name_field_number protoreflect.FieldNumber = 1 + Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2 + Method_RequestStreaming_field_number protoreflect.FieldNumber = 3 + Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4 + Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 + Method_Options_field_number protoreflect.FieldNumber = 6 + Method_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Mixin. +const ( + Mixin_message_name protoreflect.Name = "Mixin" + Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin" +) + +// Field names for google.protobuf.Mixin. +const ( + Mixin_Name_field_name protoreflect.Name = "name" + Mixin_Root_field_name protoreflect.Name = "root" + + Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name" + Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root" +) + +// Field numbers for google.protobuf.Mixin. +const ( + Mixin_Name_field_number protoreflect.FieldNumber = 1 + Mixin_Root_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go new file mode 100644 index 000000000..e3cdf1c20 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -0,0 +1,829 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" + +// Names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" + FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet" +) + +// Field names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_name protoreflect.Name = "file" + + FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file" +) + +// Field numbers for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto" + FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto" +) + +// Field names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_name protoreflect.Name = "name" + FileDescriptorProto_Package_field_name protoreflect.Name = "package" + FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" + FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" + FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" + FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + FileDescriptorProto_Service_field_name protoreflect.Name = "service" + FileDescriptorProto_Extension_field_name protoreflect.Name = "extension" + FileDescriptorProto_Options_field_name protoreflect.Name = "options" + FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" + FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + + FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" + FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" + FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" + FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" + FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" + FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" + FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" + FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension" + FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" + FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" + FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" +) + +// Field numbers for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2 + FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 + FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 + FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 + FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 + FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 + FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7 + FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 + FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 +) + +// Names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_message_name protoreflect.Name = "DescriptorProto" + DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto" +) + +// Field names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_name protoreflect.Name = "name" + DescriptorProto_Field_field_name protoreflect.Name = "field" + DescriptorProto_Extension_field_name protoreflect.Name = "extension" + DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type" + DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range" + DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl" + DescriptorProto_Options_field_name protoreflect.Name = "options" + DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" + DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" + DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension" + DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type" + DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type" + DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range" + DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl" + DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" + DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" + DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + DescriptorProto_Field_field_number protoreflect.FieldNumber = 2 + DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6 + DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3 + DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4 + DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5 + DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8 + DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 + DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 + DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 +) + +// Names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange" + DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange" +) + +// Field names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end" + DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options" + + DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start" + DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end" + DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options" +) + +// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2 + DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange" + DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange" +) + +// Field names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end" + + DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start" + DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end" +) + +// Field numbers for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions" + ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions" +) + +// Field names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto" + FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto" +) + +// Field names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_name protoreflect.Name = "name" + FieldDescriptorProto_Number_field_name protoreflect.Name = "number" + FieldDescriptorProto_Label_field_name protoreflect.Name = "label" + FieldDescriptorProto_Type_field_name protoreflect.Name = "type" + FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name" + FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee" + FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value" + FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index" + FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name" + FieldDescriptorProto_Options_field_name protoreflect.Name = "options" + FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional" + + FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name" + FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number" + FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label" + FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type" + FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name" + FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee" + FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value" + FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index" + FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name" + FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options" + FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional" +) + +// Field numbers for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3 + FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4 + FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5 + FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6 + FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2 + FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7 + FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9 + FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10 + FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17 +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type" + FieldDescriptorProto_Type_enum_name = "Type" +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" + FieldDescriptorProto_Label_enum_name = "Label" +) + +// Names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" + OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto" +) + +// Field names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_name protoreflect.Name = "name" + OneofDescriptorProto_Options_field_name protoreflect.Name = "options" + + OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name" + OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options" +) + +// Field numbers for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto" + EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto" +) + +// Field names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumDescriptorProto_Value_field_name protoreflect.Name = "value" + EnumDescriptorProto_Options_field_name protoreflect.Name = "options" + EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" + EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" + EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" + EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" + EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2 + EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 + EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 + EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange" + EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange" +) + +// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start" + EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end" + + EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start" + EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end" +) + +// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto" + EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto" +) + +// Field names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number" + EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options" + + EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name" + EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number" + EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options" +) + +// Field numbers for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2 + EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto" + ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto" +) + +// Field names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_name protoreflect.Name = "name" + ServiceDescriptorProto_Method_field_name protoreflect.Name = "method" + ServiceDescriptorProto_Options_field_name protoreflect.Name = "options" + + ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name" + ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method" + ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options" +) + +// Field numbers for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2 + ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto" + MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto" +) + +// Field names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_name protoreflect.Name = "name" + MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type" + MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type" + MethodDescriptorProto_Options_field_name protoreflect.Name = "options" + MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming" + MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming" + + MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name" + MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type" + MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type" + MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options" + MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming" + MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming" +) + +// Field numbers for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2 + MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3 + MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4 + MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5 + MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.FileOptions. +const ( + FileOptions_message_name protoreflect.Name = "FileOptions" + FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions" +) + +// Field names for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_name protoreflect.Name = "java_package" + FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname" + FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8" + FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for" + FileOptions_GoPackage_field_name protoreflect.Name = "go_package" + FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" + FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" + FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" + FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" + FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" + FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace" + FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix" + FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix" + FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" + FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" + FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" + FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname" + FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8" + FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for" + FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package" + FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" + FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" + FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" + FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" + FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" + FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" + FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace" + FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix" + FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix" + FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" + FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" + FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1 + FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8 + FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10 + FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20 + FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27 + FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9 + FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11 + FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 + FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 + FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 + FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 + FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 + FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 + FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 + FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37 + FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39 + FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40 + FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 + FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 + FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode" + FileOptions_OptimizeMode_enum_name = "OptimizeMode" +) + +// Names for google.protobuf.MessageOptions. +const ( + MessageOptions_message_name protoreflect.Name = "MessageOptions" + MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions" +) + +// Field names for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldOptions. +const ( + FieldOptions_message_name protoreflect.Name = "FieldOptions" + FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions" +) + +// Field names for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_name protoreflect.Name = "ctype" + FieldOptions_Packed_field_name protoreflect.Name = "packed" + FieldOptions_Jstype_field_name protoreflect.Name = "jstype" + FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" + FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" + FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" + FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" + FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1 + FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 + FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 + FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType" + FieldOptions_CType_enum_name = "CType" +) + +// Full and short names for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" + FieldOptions_JSType_enum_name = "JSType" +) + +// Names for google.protobuf.OneofOptions. +const ( + OneofOptions_message_name protoreflect.Name = "OneofOptions" + OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions" +) + +// Field names for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumOptions. +const ( + EnumOptions_message_name protoreflect.Name = "EnumOptions" + EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions" +) + +// Field names for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions" + EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions" +) + +// Field names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_message_name protoreflect.Name = "ServiceOptions" + ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions" +) + +// Field names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" + ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" + ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.MethodOptions. +const ( + MethodOptions_message_name protoreflect.Name = "MethodOptions" + MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions" +) + +// Field names for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" + MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel" + MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" +) + +// Names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" + UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption" +) + +// Field names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_name protoreflect.Name = "name" + UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value" + UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value" + UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value" + UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value" + UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value" + UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value" + + UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name" + UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value" + UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value" + UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value" + UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value" + UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value" + UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value" +) + +// Field numbers for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2 + UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3 + UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4 + UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5 + UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6 + UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7 + UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8 +) + +// Names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart" + UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart" +) + +// Field names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part" + UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension" + + UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part" + UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension" +) + +// Field numbers for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1 + UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" + SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo" +) + +// Field names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_name protoreflect.Name = "location" + + SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location" +) + +// Field numbers for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_message_name protoreflect.Name = "Location" + SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location" +) + +// Field names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path" + SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span" + SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments" + SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments" + + SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path" + SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span" + SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments" + SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments" +) + +// Field numbers for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1 + SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2 + SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3 + SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4 + SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo" + GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo" +) + +// Field names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation" + + GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation" + GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation" +) + +// Field names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path" + GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" + GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" + GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + + GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" + GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" + GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" + GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1 + GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 + GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 + GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go new file mode 100644 index 000000000..45ccd0121 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package genid contains constants for declarations in descriptor.proto +// and the well-known types. +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go new file mode 100644 index 000000000..b070ef4fd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_duration_proto = "google/protobuf/duration.proto" + +// Names for google.protobuf.Duration. +const ( + Duration_message_name protoreflect.Name = "Duration" + Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration" +) + +// Field names for google.protobuf.Duration. +const ( + Duration_Seconds_field_name protoreflect.Name = "seconds" + Duration_Nanos_field_name protoreflect.Name = "nanos" + + Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds" + Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos" +) + +// Field numbers for google.protobuf.Duration. +const ( + Duration_Seconds_field_number protoreflect.FieldNumber = 1 + Duration_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go new file mode 100644 index 000000000..762abb34a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_empty_proto = "google/protobuf/empty.proto" + +// Names for google.protobuf.Empty. +const ( + Empty_message_name protoreflect.Name = "Empty" + Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go new file mode 100644 index 000000000..70bed453f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto" + +// Names for google.protobuf.FieldMask. +const ( + FieldMask_message_name protoreflect.Name = "FieldMask" + FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask" +) + +// Field names for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_name protoreflect.Name = "paths" + + FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths" +) + +// Field numbers for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go new file mode 100644 index 000000000..693d2e9e1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +// Go names of implementation-specific struct fields in generated messages. +const ( + State_goname = "state" + + SizeCache_goname = "sizeCache" + SizeCacheA_goname = "XXX_sizecache" + + WeakFields_goname = "weakFields" + WeakFieldsA_goname = "XXX_weak" + + UnknownFields_goname = "unknownFields" + UnknownFieldsA_goname = "XXX_unrecognized" + + ExtensionFields_goname = "extensionFields" + ExtensionFieldsA_goname = "XXX_InternalExtensions" + ExtensionFieldsB_goname = "XXX_extensions" + + WeakFieldPrefix_goname = "XXX_weak_" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go new file mode 100644 index 000000000..8f9ea02ff --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field names and numbers for synthetic map entry messages. +const ( + MapEntry_Key_field_name protoreflect.Name = "key" + MapEntry_Value_field_name protoreflect.Name = "value" + + MapEntry_Key_field_number protoreflect.FieldNumber = 1 + MapEntry_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go new file mode 100644 index 000000000..3e99ae16c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto" + +// Names for google.protobuf.SourceContext. +const ( + SourceContext_message_name protoreflect.Name = "SourceContext" + SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext" +) + +// Field names for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_name protoreflect.Name = "file_name" + + SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name" +) + +// Field numbers for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go new file mode 100644 index 000000000..1a38944b2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_struct_proto = "google/protobuf/struct.proto" + +// Full and short names for google.protobuf.NullValue. +const ( + NullValue_enum_fullname = "google.protobuf.NullValue" + NullValue_enum_name = "NullValue" +) + +// Names for google.protobuf.Struct. +const ( + Struct_message_name protoreflect.Name = "Struct" + Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct" +) + +// Field names for google.protobuf.Struct. +const ( + Struct_Fields_field_name protoreflect.Name = "fields" + + Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields" +) + +// Field numbers for google.protobuf.Struct. +const ( + Struct_Fields_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry" + Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry" +) + +// Field names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_name protoreflect.Name = "key" + Struct_FieldsEntry_Value_field_name protoreflect.Name = "value" + + Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key" + Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value" +) + +// Field numbers for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1 + Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.Value. +const ( + Value_message_name protoreflect.Name = "Value" + Value_message_fullname protoreflect.FullName = "google.protobuf.Value" +) + +// Field names for google.protobuf.Value. +const ( + Value_NullValue_field_name protoreflect.Name = "null_value" + Value_NumberValue_field_name protoreflect.Name = "number_value" + Value_StringValue_field_name protoreflect.Name = "string_value" + Value_BoolValue_field_name protoreflect.Name = "bool_value" + Value_StructValue_field_name protoreflect.Name = "struct_value" + Value_ListValue_field_name protoreflect.Name = "list_value" + + Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value" + Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value" + Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value" + Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value" + Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value" + Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value" +) + +// Field numbers for google.protobuf.Value. +const ( + Value_NullValue_field_number protoreflect.FieldNumber = 1 + Value_NumberValue_field_number protoreflect.FieldNumber = 2 + Value_StringValue_field_number protoreflect.FieldNumber = 3 + Value_BoolValue_field_number protoreflect.FieldNumber = 4 + Value_StructValue_field_number protoreflect.FieldNumber = 5 + Value_ListValue_field_number protoreflect.FieldNumber = 6 +) + +// Oneof names for google.protobuf.Value. +const ( + Value_Kind_oneof_name protoreflect.Name = "kind" + + Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind" +) + +// Names for google.protobuf.ListValue. +const ( + ListValue_message_name protoreflect.Name = "ListValue" + ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue" +) + +// Field names for google.protobuf.ListValue. +const ( + ListValue_Values_field_name protoreflect.Name = "values" + + ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values" +) + +// Field numbers for google.protobuf.ListValue. +const ( + ListValue_Values_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go new file mode 100644 index 000000000..f5cd5634c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto" + +// Names for google.protobuf.Timestamp. +const ( + Timestamp_message_name protoreflect.Name = "Timestamp" + Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp" +) + +// Field names for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_name protoreflect.Name = "seconds" + Timestamp_Nanos_field_name protoreflect.Name = "nanos" + + Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds" + Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos" +) + +// Field numbers for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_number protoreflect.FieldNumber = 1 + Timestamp_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go new file mode 100644 index 000000000..3bc710138 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -0,0 +1,184 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_type_proto = "google/protobuf/type.proto" + +// Full and short names for google.protobuf.Syntax. +const ( + Syntax_enum_fullname = "google.protobuf.Syntax" + Syntax_enum_name = "Syntax" +) + +// Names for google.protobuf.Type. +const ( + Type_message_name protoreflect.Name = "Type" + Type_message_fullname protoreflect.FullName = "google.protobuf.Type" +) + +// Field names for google.protobuf.Type. +const ( + Type_Name_field_name protoreflect.Name = "name" + Type_Fields_field_name protoreflect.Name = "fields" + Type_Oneofs_field_name protoreflect.Name = "oneofs" + Type_Options_field_name protoreflect.Name = "options" + Type_SourceContext_field_name protoreflect.Name = "source_context" + Type_Syntax_field_name protoreflect.Name = "syntax" + + Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" + Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" + Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs" + Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" + Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" + Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" +) + +// Field numbers for google.protobuf.Type. +const ( + Type_Name_field_number protoreflect.FieldNumber = 1 + Type_Fields_field_number protoreflect.FieldNumber = 2 + Type_Oneofs_field_number protoreflect.FieldNumber = 3 + Type_Options_field_number protoreflect.FieldNumber = 4 + Type_SourceContext_field_number protoreflect.FieldNumber = 5 + Type_Syntax_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.Field. +const ( + Field_message_name protoreflect.Name = "Field" + Field_message_fullname protoreflect.FullName = "google.protobuf.Field" +) + +// Field names for google.protobuf.Field. +const ( + Field_Kind_field_name protoreflect.Name = "kind" + Field_Cardinality_field_name protoreflect.Name = "cardinality" + Field_Number_field_name protoreflect.Name = "number" + Field_Name_field_name protoreflect.Name = "name" + Field_TypeUrl_field_name protoreflect.Name = "type_url" + Field_OneofIndex_field_name protoreflect.Name = "oneof_index" + Field_Packed_field_name protoreflect.Name = "packed" + Field_Options_field_name protoreflect.Name = "options" + Field_JsonName_field_name protoreflect.Name = "json_name" + Field_DefaultValue_field_name protoreflect.Name = "default_value" + + Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind" + Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality" + Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number" + Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name" + Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url" + Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index" + Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed" + Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options" + Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name" + Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value" +) + +// Field numbers for google.protobuf.Field. +const ( + Field_Kind_field_number protoreflect.FieldNumber = 1 + Field_Cardinality_field_number protoreflect.FieldNumber = 2 + Field_Number_field_number protoreflect.FieldNumber = 3 + Field_Name_field_number protoreflect.FieldNumber = 4 + Field_TypeUrl_field_number protoreflect.FieldNumber = 6 + Field_OneofIndex_field_number protoreflect.FieldNumber = 7 + Field_Packed_field_number protoreflect.FieldNumber = 8 + Field_Options_field_number protoreflect.FieldNumber = 9 + Field_JsonName_field_number protoreflect.FieldNumber = 10 + Field_DefaultValue_field_number protoreflect.FieldNumber = 11 +) + +// Full and short names for google.protobuf.Field.Kind. +const ( + Field_Kind_enum_fullname = "google.protobuf.Field.Kind" + Field_Kind_enum_name = "Kind" +) + +// Full and short names for google.protobuf.Field.Cardinality. +const ( + Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" + Field_Cardinality_enum_name = "Cardinality" +) + +// Names for google.protobuf.Enum. +const ( + Enum_message_name protoreflect.Name = "Enum" + Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum" +) + +// Field names for google.protobuf.Enum. +const ( + Enum_Name_field_name protoreflect.Name = "name" + Enum_Enumvalue_field_name protoreflect.Name = "enumvalue" + Enum_Options_field_name protoreflect.Name = "options" + Enum_SourceContext_field_name protoreflect.Name = "source_context" + Enum_Syntax_field_name protoreflect.Name = "syntax" + + Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" + Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" + Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" + Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" + Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" +) + +// Field numbers for google.protobuf.Enum. +const ( + Enum_Name_field_number protoreflect.FieldNumber = 1 + Enum_Enumvalue_field_number protoreflect.FieldNumber = 2 + Enum_Options_field_number protoreflect.FieldNumber = 3 + Enum_SourceContext_field_number protoreflect.FieldNumber = 4 + Enum_Syntax_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumValue. +const ( + EnumValue_message_name protoreflect.Name = "EnumValue" + EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue" +) + +// Field names for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_name protoreflect.Name = "name" + EnumValue_Number_field_name protoreflect.Name = "number" + EnumValue_Options_field_name protoreflect.Name = "options" + + EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name" + EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number" + EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options" +) + +// Field numbers for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_number protoreflect.FieldNumber = 1 + EnumValue_Number_field_number protoreflect.FieldNumber = 2 + EnumValue_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.Option. +const ( + Option_message_name protoreflect.Name = "Option" + Option_message_fullname protoreflect.FullName = "google.protobuf.Option" +) + +// Field names for google.protobuf.Option. +const ( + Option_Name_field_name protoreflect.Name = "name" + Option_Value_field_name protoreflect.Name = "value" + + Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name" + Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value" +) + +// Field numbers for google.protobuf.Option. +const ( + Option_Name_field_number protoreflect.FieldNumber = 1 + Option_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go new file mode 100644 index 000000000..429384b85 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field name and number for messages in wrappers.proto. +const ( + WrapperValue_Value_field_name protoreflect.Name = "value" + WrapperValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go new file mode 100644 index 000000000..72527d2ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go @@ -0,0 +1,175 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto" + +// Names for google.protobuf.DoubleValue. +const ( + DoubleValue_message_name protoreflect.Name = "DoubleValue" + DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue" +) + +// Field names for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_name protoreflect.Name = "value" + + DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value" +) + +// Field numbers for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FloatValue. +const ( + FloatValue_message_name protoreflect.Name = "FloatValue" + FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue" +) + +// Field names for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_name protoreflect.Name = "value" + + FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value" +) + +// Field numbers for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int64Value. +const ( + Int64Value_message_name protoreflect.Name = "Int64Value" + Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value" +) + +// Field names for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_name protoreflect.Name = "value" + + Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value" +) + +// Field numbers for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt64Value. +const ( + UInt64Value_message_name protoreflect.Name = "UInt64Value" + UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value" +) + +// Field names for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_name protoreflect.Name = "value" + + UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value" +) + +// Field numbers for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int32Value. +const ( + Int32Value_message_name protoreflect.Name = "Int32Value" + Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value" +) + +// Field names for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_name protoreflect.Name = "value" + + Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value" +) + +// Field numbers for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt32Value. +const ( + UInt32Value_message_name protoreflect.Name = "UInt32Value" + UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value" +) + +// Field names for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_name protoreflect.Name = "value" + + UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value" +) + +// Field numbers for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BoolValue. +const ( + BoolValue_message_name protoreflect.Name = "BoolValue" + BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue" +) + +// Field names for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_name protoreflect.Name = "value" + + BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value" +) + +// Field numbers for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.StringValue. +const ( + StringValue_message_name protoreflect.Name = "StringValue" + StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue" +) + +// Field names for google.protobuf.StringValue. +const ( + StringValue_Value_field_name protoreflect.Name = "value" + + StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value" +) + +// Field numbers for google.protobuf.StringValue. +const ( + StringValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BytesValue. +const ( + BytesValue_message_name protoreflect.Name = "BytesValue" + BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue" +) + +// Field names for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_name protoreflect.Name = "value" + + BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value" +) + +// Field numbers for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go new file mode 100644 index 000000000..abee5f30e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -0,0 +1,177 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// Export is a zero-length named type that exists only to export a set of +// functions that we do not want to appear in godoc. +type Export struct{} + +// NewError formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func (Export) NewError(f string, x ...interface{}) error { + return errors.New(f, x...) +} + +// enum is any enum type generated by protoc-gen-go +// and must be a named int32 type. +type enum = interface{} + +// EnumOf returns the protoreflect.Enum interface over e. +// It returns nil if e is nil. +func (Export) EnumOf(e enum) pref.Enum { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e + default: + return legacyWrapEnum(reflect.ValueOf(e)) + } +} + +// EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. +// It returns nil if e is nil. +func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e.Descriptor() + default: + return LegacyLoadEnumDesc(reflect.TypeOf(e)) + } +} + +// EnumTypeOf returns the protoreflect.EnumType for e. +// It returns nil if e is nil. +func (Export) EnumTypeOf(e enum) pref.EnumType { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e.Type() + default: + return legacyLoadEnumType(reflect.TypeOf(e)) + } +} + +// EnumStringOf returns the enum value as a string, either as the name if +// the number is resolvable, or the number formatted as a string. +func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { + ev := ed.Values().ByNumber(n) + if ev != nil { + return string(ev.Name()) + } + return strconv.Itoa(int(n)) +} + +// message is any message type generated by protoc-gen-go +// and must be a pointer to a named struct type. +type message = interface{} + +// legacyMessageWrapper wraps a v2 message as a v1 message. +type legacyMessageWrapper struct{ m pref.ProtoMessage } + +func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } +func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } +func (m legacyMessageWrapper) ProtoMessage() {} + +// ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { + switch mv := m.(type) { + case nil: + return nil + case piface.MessageV1: + return mv + case unwrapper: + return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) + case pref.ProtoMessage: + return legacyMessageWrapper{mv} + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +func (Export) protoMessageV2Of(m message) pref.ProtoMessage { + switch mv := m.(type) { + case nil: + return nil + case pref.ProtoMessage: + return mv + case legacyMessageWrapper: + return mv.m + case piface.MessageV1: + return nil + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +// ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv + } + return legacyWrapMessage(reflect.ValueOf(m)).Interface() +} + +// MessageOf returns the protoreflect.Message interface over m. +// It returns nil if m is nil. +func (Export) MessageOf(m message) pref.Message { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect() + } + return legacyWrapMessage(reflect.ValueOf(m)) +} + +// MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. +// It returns nil if m is nil. +func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Descriptor() + } + return LegacyLoadMessageDesc(reflect.TypeOf(m)) +} + +// MessageTypeOf returns the protoreflect.MessageType for m. +// It returns nil if m is nil. +func (Export) MessageTypeOf(m message) pref.MessageType { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageType(reflect.TypeOf(m), "") +} + +// MessageStringOf returns the message value as a string, +// which is the message serialized in the protobuf text format. +func (Export) MessageStringOf(m pref.ProtoMessage) string { + return prototext.MarshalOptions{Multiline: false}.Format(m) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go new file mode 100644 index 000000000..b82341e57 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -0,0 +1,141 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) +} + +func (mi *MessageInfo) checkInitializedPointer(p pointer) error { + mi.init() + if !mi.needsInitCheck { + return nil + } + if p.IsNil() { + for _, f := range mi.orderedCoderFields { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + } + return nil + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + if err := mi.isInitExtensions(e); err != nil { + return err + } + } + for _, f := range mi.orderedCoderFields { + if !f.isRequired && f.funcs.isInit == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit == nil { + continue + } + if err := f.funcs.isInit(fptr, f); err != nil { + return err + } + } + return nil +} + +func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { + if ext == nil { + return nil + } + for _, x := range *ext { + ei := getExtensionFieldInfo(x.Type()) + if ei.funcs.isInit == nil { + continue + } + v := x.Value() + if !v.IsValid() { + continue + } + if err := ei.funcs.isInit(v); err != nil { + return err + } + } + return nil +} + +var ( + needsInitCheckMu sync.Mutex + needsInitCheckMap sync.Map +) + +// needsInitCheck reports whether a message needs to be checked for partial initialization. +// +// It returns true if the message transitively includes any required or extension fields. +func needsInitCheck(md pref.MessageDescriptor) bool { + if v, ok := needsInitCheckMap.Load(md); ok { + if has, ok := v.(bool); ok { + return has + } + } + needsInitCheckMu.Lock() + defer needsInitCheckMu.Unlock() + return needsInitCheckLocked(md) +} + +func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { + if v, ok := needsInitCheckMap.Load(md); ok { + // If has is true, we've previously determined that this message + // needs init checks. + // + // If has is false, we've previously determined that it can never + // be uninitialized. + // + // If has is not a bool, we've just encountered a cycle in the + // message graph. In this case, it is safe to return false: If + // the message does have required fields, we'll detect them later + // in the graph traversal. + has, ok := v.(bool) + return ok && has + } + needsInitCheckMap.Store(md, struct{}{}) // avoid cycles while descending into this message + defer func() { + needsInitCheckMap.Store(md, has) + }() + if md.RequiredNumbers().Len() > 0 { + return true + } + if md.ExtensionRanges().Len() > 0 { + return true + } + for i := 0; i < md.Fields().Len(); i++ { + fd := md.Fields().Get(i) + // Map keys are never messages, so just consider the map value. + if fd.IsMap() { + fd = fd.MapValue() + } + fmd := fd.Message() + if fmd != nil && needsInitCheckLocked(fmd) { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go new file mode 100644 index 000000000..08d35170b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -0,0 +1,223 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type extensionFieldInfo struct { + wiretag uint64 + tagsize int + unmarshalNeedsValue bool + funcs valueCoderFuncs + validation validationInfo +} + +var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo + +func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + if xi, ok := xt.(*ExtensionInfo); ok { + xi.lazyInit() + return xi.info + } + return legacyLoadExtensionFieldInfo(xt) +} + +// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. +func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { + return xi.(*extensionFieldInfo) + } + e := makeExtensionFieldInfo(xt.TypeDescriptor()) + if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { + return e.(*extensionFieldInfo) + } + return e +} + +func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { + var wiretag uint64 + if !xd.IsPacked() { + wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) + } else { + wiretag = protowire.EncodeTag(xd.Number(), protowire.BytesType) + } + e := &extensionFieldInfo{ + wiretag: wiretag, + tagsize: protowire.SizeVarint(wiretag), + funcs: encoderFuncsForValue(xd), + } + // Does the unmarshal function need a value passed to it? + // This is true for composite types, where we pass in a message, list, or map to fill in, + // and for enums, where we pass in a prototype value to specify the concrete enum type. + switch xd.Kind() { + case pref.MessageKind, pref.GroupKind, pref.EnumKind: + e.unmarshalNeedsValue = true + default: + if xd.Cardinality() == pref.Repeated { + e.unmarshalNeedsValue = true + } + } + return e +} + +type lazyExtensionValue struct { + atomicOnce uint32 // atomically set if value is valid + mu sync.Mutex + xi *extensionFieldInfo + value pref.Value + b []byte + fn func() pref.Value +} + +type ExtensionField struct { + typ pref.ExtensionType + + // value is either the value of GetValue, + // or a *lazyExtensionValue that then returns the value of GetValue. + value pref.Value + lazy *lazyExtensionValue +} + +func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { + if f.lazy == nil { + f.lazy = &lazyExtensionValue{xi: xi} + } + f.typ = xt + f.lazy.xi = xi + f.lazy.b = protowire.AppendTag(f.lazy.b, num, wtyp) + f.lazy.b = append(f.lazy.b, b...) +} + +func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { + if f.typ == nil { + return true + } + if f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + return true + } + return false +} + +func (f *ExtensionField) lazyInit() { + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + if atomic.LoadUint32(&f.lazy.atomicOnce) == 1 { + return + } + if f.lazy.xi != nil { + b := f.lazy.b + val := f.typ.New() + for len(b) > 0 { + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + panic(errors.New("bad tag in lazy extension decoding")) + } + b = b[n:] + } + num := protowire.Number(tag >> 3) + wtyp := protowire.Type(tag & 7) + var out unmarshalOutput + var err error + val, out, err = f.lazy.xi.funcs.unmarshal(b, val, num, wtyp, lazyUnmarshalOptions) + if err != nil { + panic(errors.New("decode failure in lazy extension decoding: %v", err)) + } + b = b[out.n:] + } + f.lazy.value = val + } else { + f.lazy.value = f.lazy.fn() + } + f.lazy.xi = nil + f.lazy.fn = nil + f.lazy.b = nil + atomic.StoreUint32(&f.lazy.atomicOnce, 1) +} + +// Set sets the type and value of the extension field. +// This must not be called concurrently. +func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { + f.typ = t + f.value = v + f.lazy = nil +} + +// SetLazy sets the type and a value that is to be lazily evaluated upon first use. +// This must not be called concurrently. +func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { + f.typ = t + f.lazy = &lazyExtensionValue{fn: fn} +} + +// Value returns the value of the extension field. +// This may be called concurrently. +func (f *ExtensionField) Value() pref.Value { + if f.lazy != nil { + if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + f.lazyInit() + } + return f.lazy.value + } + return f.value +} + +// Type returns the type of the extension field. +// This may be called concurrently. +func (f ExtensionField) Type() pref.ExtensionType { + return f.typ +} + +// IsSet returns whether the extension field is set. +// This may be called concurrently. +func (f ExtensionField) IsSet() bool { + return f.typ != nil +} + +// IsLazy reports whether a field is lazily encoded. +// It is exported for testing. +func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { + var mi *MessageInfo + var p pointer + switch m := m.(type) { + case *messageState: + mi = m.messageInfo() + p = m.pointer() + case *messageReflectWrapper: + mi = m.messageInfo() + p = m.pointer() + default: + return false + } + xd, ok := fd.(pref.ExtensionTypeDescriptor) + if !ok { + return false + } + xt := xd.Type() + ext := mi.extensionMap(p) + if ext == nil { + return false + } + f, ok := (*ext)[int32(fd.Number())] + if !ok { + return false + } + return f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go new file mode 100644 index 000000000..cb4b482d1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -0,0 +1,830 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type errInvalidUTF8 struct{} + +func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } +func (errInvalidUTF8) InvalidUTF8() bool { return true } +func (errInvalidUTF8) Unwrap() error { return errors.Error } + +// initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. +// +// For size, marshal, and isInit operations, functions are set only on the first field +// in the oneof. The functions are called when the oneof is non-nil, and will dispatch +// to the appropriate field-specific function as necessary. +// +// The unmarshal function is set on each field individually as usual. +func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { + fs := si.oneofsByName[od.Name()] + ft := fs.Type + oneofFields := make(map[reflect.Type]*coderFieldInfo) + needIsInit := false + fields := od.Fields() + for i, lim := 0, fields.Len(); i < lim; i++ { + fd := od.Fields().Get(i) + num := fd.Number() + // Make a copy of the original coderFieldInfo for use in unmarshaling. + // + // oneofFields[oneofType].funcs.marshal is the field-specific marshal function. + // + // mi.coderFields[num].marshal is set on only the first field in the oneof, + // and dispatches to the field-specific marshaler in oneofFields. + cf := *mi.coderFields[num] + ot := si.oneofWrappersByNumber[num] + cf.ft = ot.Field(0).Type + cf.mi, cf.funcs = fieldCoder(fd, cf.ft) + oneofFields[ot] = &cf + if cf.funcs.isInit != nil { + needIsInit = true + } + mi.coderFields[num].funcs.unmarshal = func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + var vw reflect.Value // pointer to wrapper type + vi := p.AsValueOf(ft).Elem() // oneof field value of interface kind + if !vi.IsNil() && !vi.Elem().IsNil() && vi.Elem().Elem().Type() == ot { + vw = vi.Elem() + } else { + vw = reflect.New(ot) + } + out, err := cf.funcs.unmarshal(b, pointerOfValue(vw).Apply(zeroOffset), wtyp, &cf, opts) + if err != nil { + return out, err + } + vi.Set(vw) + return out, nil + } + } + getInfo := func(p pointer) (pointer, *coderFieldInfo) { + v := p.AsValueOf(ft).Elem() + if v.IsNil() { + return pointer{}, nil + } + v = v.Elem() // interface -> *struct + if v.IsNil() { + return pointer{}, nil + } + return pointerOfValue(v).Apply(zeroOffset), oneofFields[v.Elem().Type()] + } + first := mi.coderFields[od.Fields().Get(0).Number()] + first.funcs.size = func(p pointer, _ *coderFieldInfo, opts marshalOptions) int { + p, info := getInfo(p) + if info == nil || info.funcs.size == nil { + return 0 + } + return info.funcs.size(p, info, opts) + } + first.funcs.marshal = func(b []byte, p pointer, _ *coderFieldInfo, opts marshalOptions) ([]byte, error) { + p, info := getInfo(p) + if info == nil || info.funcs.marshal == nil { + return b, nil + } + return info.funcs.marshal(b, p, info, opts) + } + first.funcs.merge = func(dst, src pointer, _ *coderFieldInfo, opts mergeOptions) { + srcp, srcinfo := getInfo(src) + if srcinfo == nil || srcinfo.funcs.merge == nil { + return + } + dstp, dstinfo := getInfo(dst) + if dstinfo != srcinfo { + dst.AsValueOf(ft).Elem().Set(reflect.New(src.AsValueOf(ft).Elem().Elem().Elem().Type())) + dstp = pointerOfValue(dst.AsValueOf(ft).Elem().Elem()).Apply(zeroOffset) + } + srcinfo.funcs.merge(dstp, srcp, srcinfo, opts) + } + if needIsInit { + first.funcs.isInit = func(p pointer, _ *coderFieldInfo) error { + p, info := getInfo(p) + if info == nil || info.funcs.isInit == nil { + return nil + } + return info.funcs.isInit(p, info) + } + } +} + +func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { + var once sync.Once + var messageType pref.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + }) + } + + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m, ok := p.WeakFields().get(f.num) + if !ok { + return 0 + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m, ok := p.WeakFields().get(f.num) + if !ok { + return b, nil + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + fs := p.WeakFields() + m, ok := fs.get(f.num) + if !ok { + lazyInit() + if messageType == nil { + return unmarshalOutput{}, errUnknown + } + m = messageType.New().Interface() + fs.set(f.num, m) + } + return consumeMessage(b, m, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m, ok := p.WeakFields().get(f.num) + if !ok { + return nil + } + return proto.CheckInitialized(m) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + sm, ok := src.WeakFields().get(f.num) + if !ok { + return + } + dm, ok := dst.WeakFields().get(f.num) + if !ok { + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + dm = messageType.New().Interface() + dst.WeakFields().set(f.num, dm) + } + opts.Merge(dm, sm) + }, + } +} + +func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageInfo, + marshal: appendMessageInfo, + unmarshal: consumeMessageInfo, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeMessage(b, asMessage(mp), wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return protowire.SizeBytes(f.mi.sizePointer(p.Elem(), opts)) + f.tagsize +} + +func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) + return f.mi.marshalAppendPointer(b, p.Elem(), opts) +} + +func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, p.Elem(), 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageInfo(p pointer, f *coderFieldInfo) error { + return f.mi.checkInitializedPointer(p.Elem()) +} + +func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { + return protowire.SizeBytes(proto.Size(m)) + tagsize +} + +func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(proto.Size(m))) + return opts.Options().MarshalAppend(b, m) +} + +func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeMessage(m, tagsize, opts) +} + +func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendMessage(b, m, wiretag, opts) +} + +func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeMessage(b, m, wtyp, opts) + return v, out, err +} + +func isInitMessageValue(v pref.Value) error { + m := v.Message().Interface() + return proto.CheckInitialized(m) +} + +var coderMessageValue = valueCoderFuncs{ + size: sizeMessageValue, + marshal: appendMessageValue, + unmarshal: consumeMessageValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeGroup(m, tagsize, opts) +} + +func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendGroup(b, m, wiretag, opts) +} + +func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeGroup(b, m, num, wtyp, opts) + return v, out, err +} + +var coderGroupValue = valueCoderFuncs{ + size: sizeGroupValue, + marshal: appendGroupValue, + unmarshal: consumeGroupValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupType, + marshal: appendGroupType, + unmarshal: consumeGroupType, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeGroup(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendGroup(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeGroup(b, asMessage(mp), num, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeGroupType(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 2*f.tagsize + f.mi.sizePointer(p.Elem(), opts) +} + +func appendGroupType(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) +} + +func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { + return 2*tagsize + proto.Size(m) +} + +func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) // start group + b, err := opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, wiretag+1) // end group + return b, err +} + +func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, errDecode + } + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageSliceInfo, + marshal: appendMessageSliceInfo, + unmarshal: consumeMessageSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMessageSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMessageSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeMessageSlice(b, p, ft, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeMessageSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { + s := p.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + n += protowire.SizeBytes(proto.Size(m)) + tagsize + } + return n +} + +func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + b = protowire.AppendVarint(b, wiretag) + siz := proto.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + b, err = opts.Options().MarshalAppend(b, m) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func isInitMessageSlice(p pointer, goType reflect.Type) error { + s := p.PointerSlice() + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +// Slices of messages + +func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += protowire.SizeBytes(proto.Size(m)) + tagsize + } + return n +} + +func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) + siz := proto.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return pref.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return pref.Value{}, out, errDecode + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: m.Message(), + }) + if err != nil { + return pref.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return listv, out, nil +} + +func isInitMessageSliceValue(listv pref.Value) error { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +var coderMessageSliceValue = valueCoderFuncs{ + size: sizeMessageSliceValue, + marshal: appendMessageSliceValue, + unmarshal: consumeMessageSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += 2*tagsize + proto.Size(m) + } + return n +} + +func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) // start group + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.StartGroupType { + return pref.Value{}, out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return pref.Value{}, out, errDecode + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: m.Message(), + }) + if err != nil { + return pref.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return listv, out, nil +} + +var coderGroupSliceValue = valueCoderFuncs{ + size: sizeGroupSliceValue, + marshal: appendGroupSliceValue, + unmarshal: consumeGroupSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupSliceInfo, + marshal: appendGroupSliceInfo, + unmarshal: consumeGroupSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeGroupSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendGroupSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeGroupSlice(b, p, num, wtyp, ft, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + n += 2*tagsize + proto.Size(m) + } + return n +} + +func appendGroupSlice(b []byte, p pointer, wiretag uint64, messageType reflect.Type, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + b = protowire.AppendVarint(b, wiretag) // start group + b, err = opts.Options().MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire.Type, goType reflect.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, errDecode + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeGroupSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendGroupSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + if wtyp != protowire.StartGroupType { + return unmarshalOutput{}, errUnknown + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + out, err := f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + return out, nil +} + +func asMessage(v reflect.Value) pref.ProtoMessage { + if m, ok := v.Interface().(pref.ProtoMessage); ok { + return m + } + return legacyWrapMessage(v).Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go new file mode 100644 index 000000000..1a509b63e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -0,0 +1,5637 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// sizeBool returns the size of wire encoding a bool pointer as a Bool. +func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bool() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBool wire encodes a bool pointer as a Bool. +func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bool() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBool wire decodes a bool pointer as a Bool. +func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Bool() = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBool = pointerCoderFuncs{ + size: sizeBool, + marshal: appendBool, + unmarshal: consumeBool, + merge: mergeBool, +} + +// sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. +// The zero value is not encoded. +func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bool() + if v == false { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolNoZero wire encodes a bool pointer as a Bool. +// The zero value is not encoded. +func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bool() + if v == false { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +var coderBoolNoZero = pointerCoderFuncs{ + size: sizeBoolNoZero, + marshal: appendBoolNoZero, + unmarshal: consumeBool, + merge: mergeBoolNoZero, +} + +// sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. +// It panics if the pointer is nil. +func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.BoolPtr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolPtr wire encodes a *bool pointer as a Bool. +// It panics if the pointer is nil. +func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.BoolPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBoolPtr wire decodes a *bool pointer as a Bool. +func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.BoolPtr() + if *vp == nil { + *vp = new(bool) + } + **vp = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBoolPtr = pointerCoderFuncs{ + size: sizeBoolPtr, + marshal: appendBoolPtr, + unmarshal: consumeBoolPtr, + merge: mergeBoolPtr, +} + +// sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. +func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.BoolSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) + } + return size +} + +// appendBoolSlice encodes a []bool pointer as a repeated Bool. +func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +// consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. +func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BoolSlice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, protowire.DecodeBool(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, protowire.DecodeBool(v)) + out.n = n + return out, nil +} + +var coderBoolSlice = pointerCoderFuncs{ + size: sizeBoolSlice, + marshal: appendBoolSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. +func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.BoolSlice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. +func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +var coderBoolPackedSlice = pointerCoderFuncs{ + size: sizeBoolPackedSlice, + marshal: appendBoolPackedSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolValue returns the size of wire encoding a bool value as a Bool. +func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) +} + +// appendBoolValue encodes a bool value as a Bool. +func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + return b, nil +} + +// consumeBoolValue decodes a bool value as a Bool. +func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil +} + +var coderBoolValue = valueCoderFuncs{ + size: sizeBoolValue, + marshal: appendBoolValue, + unmarshal: consumeBoolValue, + merge: mergeScalarValue, +} + +// sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. +func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return size +} + +// appendBoolSliceValue encodes a []bool value as a repeated Bool. +func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +// consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. +func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + out.n = n + return listv, out, nil +} + +var coderBoolSliceValue = valueCoderFuncs{ + size: sizeBoolSliceValue, + marshal: appendBoolSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. +func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. +func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +var coderBoolPackedSliceValue = valueCoderFuncs{ + size: sizeBoolPackedSliceValue, + marshal: appendBoolPackedSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeEnumValue returns the size of wire encoding a value as a Enum. +func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Enum())) +} + +// appendEnumValue encodes a value as a Enum. +func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + return b, nil +} + +// consumeEnumValue decodes a value as a Enum. +func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil +} + +var coderEnumValue = valueCoderFuncs{ + size: sizeEnumValue, + marshal: appendEnumValue, + unmarshal: consumeEnumValue, + merge: mergeScalarValue, +} + +// sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. +func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Enum())) + } + return size +} + +// appendEnumSliceValue encodes a [] value as a repeated Enum. +func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +// consumeEnumSliceValue wire decodes a [] value as a repeated Enum. +func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + out.n = n + return listv, out, nil +} + +var coderEnumSliceValue = valueCoderFuncs{ + size: sizeEnumSliceValue, + marshal: appendEnumSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. +func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. +func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +var coderEnumPackedSliceValue = valueCoderFuncs{ + size: sizeEnumPackedSliceValue, + marshal: appendEnumPackedSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. +func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32 wire encodes a int32 pointer as a Int32. +func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32 wire decodes a int32 pointer as a Int32. +func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderInt32 = pointerCoderFuncs{ + size: sizeInt32, + marshal: appendInt32, + unmarshal: consumeInt32, + merge: mergeInt32, +} + +// sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. +// The zero value is not encoded. +func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32NoZero wire encodes a int32 pointer as a Int32. +// The zero value is not encoded. +func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt32NoZero = pointerCoderFuncs{ + size: sizeInt32NoZero, + marshal: appendInt32NoZero, + unmarshal: consumeInt32, + merge: mergeInt32NoZero, +} + +// sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32Ptr wire encodes a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32Ptr wire decodes a *int32 pointer as a Int32. +func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderInt32Ptr = pointerCoderFuncs{ + size: sizeInt32Ptr, + marshal: appendInt32Ptr, + unmarshal: consumeInt32Ptr, + merge: mergeInt32Ptr, +} + +// sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. +func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt32Slice encodes a []int32 pointer as a repeated Int32. +func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. +func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderInt32Slice = pointerCoderFuncs{ + size: sizeInt32Slice, + marshal: appendInt32Slice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. +func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. +func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt32PackedSlice = pointerCoderFuncs{ + size: sizeInt32PackedSlice, + marshal: appendInt32PackedSlice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32Value returns the size of wire encoding a int32 value as a Int32. +func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) +} + +// appendInt32Value encodes a int32 value as a Int32. +func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + return b, nil +} + +// consumeInt32Value decodes a int32 value as a Int32. +func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderInt32Value = valueCoderFuncs{ + size: sizeInt32Value, + marshal: appendInt32Value, + unmarshal: consumeInt32Value, + merge: mergeScalarValue, +} + +// sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. +func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) + } + return size +} + +// appendInt32SliceValue encodes a []int32 value as a repeated Int32. +func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +// consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. +func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderInt32SliceValue = valueCoderFuncs{ + size: sizeInt32SliceValue, + marshal: appendInt32SliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. +func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. +func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +var coderInt32PackedSliceValue = valueCoderFuncs{ + size: sizeInt32PackedSliceValue, + marshal: appendInt32PackedSliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. +func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32 wire encodes a int32 pointer as a Sint32. +func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32 wire decodes a int32 pointer as a Sint32. +func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32 = pointerCoderFuncs{ + size: sizeSint32, + marshal: appendSint32, + unmarshal: consumeSint32, + merge: mergeInt32, +} + +// sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. +// The zero value is not encoded. +func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32NoZero wire encodes a int32 pointer as a Sint32. +// The zero value is not encoded. +func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +var coderSint32NoZero = pointerCoderFuncs{ + size: sizeSint32NoZero, + marshal: appendSint32NoZero, + unmarshal: consumeSint32, + merge: mergeInt32NoZero, +} + +// sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32Ptr wire encodes a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. +func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32Ptr = pointerCoderFuncs{ + size: sizeSint32Ptr, + marshal: appendSint32Ptr, + unmarshal: consumeSint32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. +func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return size +} + +// appendSint32Slice encodes a []int32 pointer as a repeated Sint32. +func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +// consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. +func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + out.n = n + return out, nil +} + +var coderSint32Slice = pointerCoderFuncs{ + size: sizeSint32Slice, + marshal: appendSint32Slice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. +func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. +func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +var coderSint32PackedSlice = pointerCoderFuncs{ + size: sizeSint32PackedSlice, + marshal: appendSint32PackedSlice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. +func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) +} + +// appendSint32Value encodes a int32 value as a Sint32. +func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + return b, nil +} + +// consumeSint32Value decodes a int32 value as a Sint32. +func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil +} + +var coderSint32Value = valueCoderFuncs{ + size: sizeSint32Value, + marshal: appendSint32Value, + unmarshal: consumeSint32Value, + merge: mergeScalarValue, +} + +// sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. +func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return size +} + +// appendSint32SliceValue encodes a []int32 value as a repeated Sint32. +func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +// consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. +func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + out.n = n + return listv, out, nil +} + +var coderSint32SliceValue = valueCoderFuncs{ + size: sizeSint32SliceValue, + marshal: appendSint32SliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. +func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. +func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +var coderSint32PackedSliceValue = valueCoderFuncs{ + size: sizeSint32PackedSliceValue, + marshal: appendSint32PackedSliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. +func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32 wire encodes a uint32 pointer as a Uint32. +func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32 wire decodes a uint32 pointer as a Uint32. +func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Uint32() = uint32(v) + out.n = n + return out, nil +} + +var coderUint32 = pointerCoderFuncs{ + size: sizeUint32, + marshal: appendUint32, + unmarshal: consumeUint32, + merge: mergeUint32, +} + +// sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. +// The zero value is not encoded. +func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32NoZero wire encodes a uint32 pointer as a Uint32. +// The zero value is not encoded. +func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderUint32NoZero = pointerCoderFuncs{ + size: sizeUint32NoZero, + marshal: appendUint32NoZero, + unmarshal: consumeUint32, + merge: mergeUint32NoZero, +} + +// sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Uint32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. +func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = uint32(v) + out.n = n + return out, nil +} + +var coderUint32Ptr = pointerCoderFuncs{ + size: sizeUint32Ptr, + marshal: appendUint32Ptr, + unmarshal: consumeUint32Ptr, + merge: mergeUint32Ptr, +} + +// sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. +func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. +func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. +func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, uint32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, uint32(v)) + out.n = n + return out, nil +} + +var coderUint32Slice = pointerCoderFuncs{ + size: sizeUint32Slice, + marshal: appendUint32Slice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. +func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. +func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderUint32PackedSlice = pointerCoderFuncs{ + size: sizeUint32PackedSlice, + marshal: appendUint32PackedSlice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. +func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) +} + +// appendUint32Value encodes a uint32 value as a Uint32. +func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + return b, nil +} + +// consumeUint32Value decodes a uint32 value as a Uint32. +func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderUint32Value = valueCoderFuncs{ + size: sizeUint32Value, + marshal: appendUint32Value, + unmarshal: consumeUint32Value, + merge: mergeScalarValue, +} + +// sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. +func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return size +} + +// appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. +func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +// consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. +func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderUint32SliceValue = valueCoderFuncs{ + size: sizeUint32SliceValue, + marshal: appendUint32SliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. +func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. +func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +var coderUint32PackedSliceValue = valueCoderFuncs{ + size: sizeUint32PackedSliceValue, + marshal: appendUint32PackedSliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. +func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64 wire encodes a int64 pointer as a Int64. +func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64 wire decodes a int64 pointer as a Int64. +func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderInt64 = pointerCoderFuncs{ + size: sizeInt64, + marshal: appendInt64, + unmarshal: consumeInt64, + merge: mergeInt64, +} + +// sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. +// The zero value is not encoded. +func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64NoZero wire encodes a int64 pointer as a Int64. +// The zero value is not encoded. +func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt64NoZero = pointerCoderFuncs{ + size: sizeInt64NoZero, + marshal: appendInt64NoZero, + unmarshal: consumeInt64, + merge: mergeInt64NoZero, +} + +// sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64Ptr wire encodes a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64Ptr wire decodes a *int64 pointer as a Int64. +func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderInt64Ptr = pointerCoderFuncs{ + size: sizeInt64Ptr, + marshal: appendInt64Ptr, + unmarshal: consumeInt64Ptr, + merge: mergeInt64Ptr, +} + +// sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. +func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt64Slice encodes a []int64 pointer as a repeated Int64. +func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. +func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderInt64Slice = pointerCoderFuncs{ + size: sizeInt64Slice, + marshal: appendInt64Slice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. +func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. +func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt64PackedSlice = pointerCoderFuncs{ + size: sizeInt64PackedSlice, + marshal: appendInt64PackedSlice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64Value returns the size of wire encoding a int64 value as a Int64. +func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Int())) +} + +// appendInt64Value encodes a int64 value as a Int64. +func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + return b, nil +} + +// consumeInt64Value decodes a int64 value as a Int64. +func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderInt64Value = valueCoderFuncs{ + size: sizeInt64Value, + marshal: appendInt64Value, + unmarshal: consumeInt64Value, + merge: mergeScalarValue, +} + +// sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. +func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Int())) + } + return size +} + +// appendInt64SliceValue encodes a []int64 value as a repeated Int64. +func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +// consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. +func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderInt64SliceValue = valueCoderFuncs{ + size: sizeInt64SliceValue, + marshal: appendInt64SliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. +func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. +func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +var coderInt64PackedSliceValue = valueCoderFuncs{ + size: sizeInt64PackedSliceValue, + marshal: appendInt64PackedSliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. +func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64 wire encodes a int64 pointer as a Sint64. +func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64 wire decodes a int64 pointer as a Sint64. +func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int64() = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64 = pointerCoderFuncs{ + size: sizeSint64, + marshal: appendSint64, + unmarshal: consumeSint64, + merge: mergeInt64, +} + +// sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. +// The zero value is not encoded. +func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64NoZero wire encodes a int64 pointer as a Sint64. +// The zero value is not encoded. +func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +var coderSint64NoZero = pointerCoderFuncs{ + size: sizeSint64NoZero, + marshal: appendSint64NoZero, + unmarshal: consumeSint64, + merge: mergeInt64NoZero, +} + +// sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64Ptr wire encodes a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. +func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64Ptr = pointerCoderFuncs{ + size: sizeSint64Ptr, + marshal: appendSint64Ptr, + unmarshal: consumeSint64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. +func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return size +} + +// appendSint64Slice encodes a []int64 pointer as a repeated Sint64. +func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +// consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. +func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, protowire.DecodeZigZag(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, protowire.DecodeZigZag(v)) + out.n = n + return out, nil +} + +var coderSint64Slice = pointerCoderFuncs{ + size: sizeSint64Slice, + marshal: appendSint64Slice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. +func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. +func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +var coderSint64PackedSlice = pointerCoderFuncs{ + size: sizeSint64PackedSlice, + marshal: appendSint64PackedSlice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. +func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) +} + +// appendSint64Value encodes a int64 value as a Sint64. +func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + return b, nil +} + +// consumeSint64Value decodes a int64 value as a Sint64. +func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil +} + +var coderSint64Value = valueCoderFuncs{ + size: sizeSint64Value, + marshal: appendSint64Value, + unmarshal: consumeSint64Value, + merge: mergeScalarValue, +} + +// sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. +func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return size +} + +// appendSint64SliceValue encodes a []int64 value as a repeated Sint64. +func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +// consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. +func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + out.n = n + return listv, out, nil +} + +var coderSint64SliceValue = valueCoderFuncs{ + size: sizeSint64SliceValue, + marshal: appendSint64SliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. +func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. +func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +var coderSint64PackedSliceValue = valueCoderFuncs{ + size: sizeSint64PackedSliceValue, + marshal: appendSint64PackedSliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. +func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint64() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64 wire encodes a uint64 pointer as a Uint64. +func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64 wire decodes a uint64 pointer as a Uint64. +func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderUint64 = pointerCoderFuncs{ + size: sizeUint64, + marshal: appendUint64, + unmarshal: consumeUint64, + merge: mergeUint64, +} + +// sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. +// The zero value is not encoded. +func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64NoZero wire encodes a uint64 pointer as a Uint64. +// The zero value is not encoded. +func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +var coderUint64NoZero = pointerCoderFuncs{ + size: sizeUint64NoZero, + marshal: appendUint64NoZero, + unmarshal: consumeUint64, + merge: mergeUint64NoZero, +} + +// sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Uint64Ptr() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. +func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderUint64Ptr = pointerCoderFuncs{ + size: sizeUint64Ptr, + marshal: appendUint64Ptr, + unmarshal: consumeUint64Ptr, + merge: mergeUint64Ptr, +} + +// sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. +func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(v) + } + return size +} + +// appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. +func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +// consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. +func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderUint64Slice = pointerCoderFuncs{ + size: sizeUint64Slice, + marshal: appendUint64Slice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. +func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. +func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +var coderUint64PackedSlice = pointerCoderFuncs{ + size: sizeUint64PackedSlice, + marshal: appendUint64PackedSlice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. +func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(v.Uint()) +} + +// appendUint64Value encodes a uint64 value as a Uint64. +func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + return b, nil +} + +// consumeUint64Value decodes a uint64 value as a Uint64. +func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderUint64Value = valueCoderFuncs{ + size: sizeUint64Value, + marshal: appendUint64Value, + unmarshal: consumeUint64Value, + merge: mergeScalarValue, +} + +// sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. +func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(v.Uint()) + } + return size +} + +// appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. +func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +// consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. +func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderUint64SliceValue = valueCoderFuncs{ + size: sizeUint64SliceValue, + marshal: appendUint64SliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. +func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. +func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +var coderUint64PackedSliceValue = valueCoderFuncs{ + size: sizeUint64PackedSliceValue, + marshal: appendUint64PackedSliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. +func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32 wire encodes a int32 pointer as a Sfixed32. +func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. +func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32 = pointerCoderFuncs{ + size: sizeSfixed32, + marshal: appendSfixed32, + unmarshal: consumeSfixed32, + merge: mergeInt32, +} + +// sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +var coderSfixed32NoZero = pointerCoderFuncs{ + size: sizeSfixed32NoZero, + marshal: appendSfixed32NoZero, + unmarshal: consumeSfixed32, + merge: mergeInt32NoZero, +} + +// sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. +func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32Ptr = pointerCoderFuncs{ + size: sizeSfixed32Ptr, + marshal: appendSfixed32Ptr, + unmarshal: consumeSfixed32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. +func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. +func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +// consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. +func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderSfixed32Slice = pointerCoderFuncs{ + size: sizeSfixed32Slice, + marshal: appendSfixed32Slice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. +func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. +func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +var coderSfixed32PackedSlice = pointerCoderFuncs{ + size: sizeSfixed32PackedSlice, + marshal: appendSfixed32PackedSlice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. +func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Value encodes a int32 value as a Sfixed32. +func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + return b, nil +} + +// consumeSfixed32Value decodes a int32 value as a Sfixed32. +func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderSfixed32Value = valueCoderFuncs{ + size: sizeSfixed32Value, + marshal: appendSfixed32Value, + unmarshal: consumeSfixed32Value, + merge: mergeScalarValue, +} + +// sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. +func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. +func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +// consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. +func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed32SliceValue = valueCoderFuncs{ + size: sizeSfixed32SliceValue, + marshal: appendSfixed32SliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. +func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. +func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +var coderSfixed32PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed32PackedSliceValue, + marshal: appendSfixed32PackedSliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. +func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32 wire encodes a uint32 pointer as a Fixed32. +func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32 wire decodes a uint32 pointer as a Fixed32. +func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *p.Uint32() = v + out.n = n + return out, nil +} + +var coderFixed32 = pointerCoderFuncs{ + size: sizeFixed32, + marshal: appendFixed32, + unmarshal: consumeFixed32, + merge: mergeUint32, +} + +// sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +var coderFixed32NoZero = pointerCoderFuncs{ + size: sizeFixed32NoZero, + marshal: appendFixed32NoZero, + unmarshal: consumeFixed32, + merge: mergeUint32NoZero, +} + +// sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. +func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed32Ptr = pointerCoderFuncs{ + size: sizeFixed32Ptr, + marshal: appendFixed32Ptr, + unmarshal: consumeFixed32Ptr, + merge: mergeUint32Ptr, +} + +// sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. +func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. +func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +// consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. +func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed32Slice = pointerCoderFuncs{ + size: sizeFixed32Slice, + marshal: appendFixed32Slice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. +func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. +func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +var coderFixed32PackedSlice = pointerCoderFuncs{ + size: sizeFixed32PackedSlice, + marshal: appendFixed32PackedSlice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. +func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFixed32Value encodes a uint32 value as a Fixed32. +func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + return b, nil +} + +// consumeFixed32Value decodes a uint32 value as a Fixed32. +func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderFixed32Value = valueCoderFuncs{ + size: sizeFixed32Value, + marshal: appendFixed32Value, + unmarshal: consumeFixed32Value, + merge: mergeScalarValue, +} + +// sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. +func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. +func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +// consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. +func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderFixed32SliceValue = valueCoderFuncs{ + size: sizeFixed32SliceValue, + marshal: appendFixed32SliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. +func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. +func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +var coderFixed32PackedSliceValue = valueCoderFuncs{ + size: sizeFixed32PackedSliceValue, + marshal: appendFixed32PackedSliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFloat returns the size of wire encoding a float32 pointer as a Float. +func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloat wire encodes a float32 pointer as a Float. +func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloat wire decodes a float32 pointer as a Float. +func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *p.Float32() = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloat = pointerCoderFuncs{ + size: sizeFloat, + marshal: appendFloat, + unmarshal: consumeFloat, + merge: mergeFloat32, +} + +// sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. +// The zero value is not encoded. +func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatNoZero wire encodes a float32 pointer as a Float. +// The zero value is not encoded. +func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +var coderFloatNoZero = pointerCoderFuncs{ + size: sizeFloatNoZero, + marshal: appendFloatNoZero, + unmarshal: consumeFloat, + merge: mergeFloat32NoZero, +} + +// sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. +// It panics if the pointer is nil. +func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatPtr wire encodes a *float32 pointer as a Float. +// It panics if the pointer is nil. +func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Float32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloatPtr wire decodes a *float32 pointer as a Float. +func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + vp := p.Float32Ptr() + if *vp == nil { + *vp = new(float32) + } + **vp = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloatPtr = pointerCoderFuncs{ + size: sizeFloatPtr, + marshal: appendFloatPtr, + unmarshal: consumeFloatPtr, + merge: mergeFloat32Ptr, +} + +// sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. +func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSlice encodes a []float32 pointer as a repeated Float. +func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +// consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. +func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + s = append(s, math.Float32frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, math.Float32frombits(v)) + out.n = n + return out, nil +} + +var coderFloatSlice = pointerCoderFuncs{ + size: sizeFloatSlice, + marshal: appendFloatSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. +func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. +func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +var coderFloatPackedSlice = pointerCoderFuncs{ + size: sizeFloatPackedSlice, + marshal: appendFloatPackedSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatValue returns the size of wire encoding a float32 value as a Float. +func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFloatValue encodes a float32 value as a Float. +func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + return b, nil +} + +// consumeFloatValue decodes a float32 value as a Float. +func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil +} + +var coderFloatValue = valueCoderFuncs{ + size: sizeFloatValue, + marshal: appendFloatValue, + unmarshal: consumeFloatValue, + merge: mergeScalarValue, +} + +// sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. +func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSliceValue encodes a []float32 value as a repeated Float. +func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +// consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. +func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + out.n = n + return listv, out, nil +} + +var coderFloatSliceValue = valueCoderFuncs{ + size: sizeFloatSliceValue, + marshal: appendFloatSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. +func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. +func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +var coderFloatPackedSliceValue = valueCoderFuncs{ + size: sizeFloatPackedSliceValue, + marshal: appendFloatPackedSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. +func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64 wire encodes a int64 pointer as a Sfixed64. +func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. +func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64 = pointerCoderFuncs{ + size: sizeSfixed64, + marshal: appendSfixed64, + unmarshal: consumeSfixed64, + merge: mergeInt64, +} + +// sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +var coderSfixed64NoZero = pointerCoderFuncs{ + size: sizeSfixed64NoZero, + marshal: appendSfixed64NoZero, + unmarshal: consumeSfixed64, + merge: mergeInt64NoZero, +} + +// sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. +func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64Ptr = pointerCoderFuncs{ + size: sizeSfixed64Ptr, + marshal: appendSfixed64Ptr, + unmarshal: consumeSfixed64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. +func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. +func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +// consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. +func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderSfixed64Slice = pointerCoderFuncs{ + size: sizeSfixed64Slice, + marshal: appendSfixed64Slice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. +func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. +func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +var coderSfixed64PackedSlice = pointerCoderFuncs{ + size: sizeSfixed64PackedSlice, + marshal: appendSfixed64PackedSlice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. +func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Value encodes a int64 value as a Sfixed64. +func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + return b, nil +} + +// consumeSfixed64Value decodes a int64 value as a Sfixed64. +func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderSfixed64Value = valueCoderFuncs{ + size: sizeSfixed64Value, + marshal: appendSfixed64Value, + unmarshal: consumeSfixed64Value, + merge: mergeScalarValue, +} + +// sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. +func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. +func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +// consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. +func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed64SliceValue = valueCoderFuncs{ + size: sizeSfixed64SliceValue, + marshal: appendSfixed64SliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. +func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. +func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +var coderSfixed64PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed64PackedSliceValue, + marshal: appendSfixed64PackedSliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. +func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64 wire encodes a uint64 pointer as a Fixed64. +func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64 wire decodes a uint64 pointer as a Fixed64. +func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderFixed64 = pointerCoderFuncs{ + size: sizeFixed64, + marshal: appendFixed64, + unmarshal: consumeFixed64, + merge: mergeUint64, +} + +// sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +var coderFixed64NoZero = pointerCoderFuncs{ + size: sizeFixed64NoZero, + marshal: appendFixed64NoZero, + unmarshal: consumeFixed64, + merge: mergeUint64NoZero, +} + +// sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. +func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed64Ptr = pointerCoderFuncs{ + size: sizeFixed64Ptr, + marshal: appendFixed64Ptr, + unmarshal: consumeFixed64Ptr, + merge: mergeUint64Ptr, +} + +// sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. +func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. +func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +// consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. +func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed64Slice = pointerCoderFuncs{ + size: sizeFixed64Slice, + marshal: appendFixed64Slice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. +func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. +func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +var coderFixed64PackedSlice = pointerCoderFuncs{ + size: sizeFixed64PackedSlice, + marshal: appendFixed64PackedSlice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. +func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendFixed64Value encodes a uint64 value as a Fixed64. +func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + return b, nil +} + +// consumeFixed64Value decodes a uint64 value as a Fixed64. +func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderFixed64Value = valueCoderFuncs{ + size: sizeFixed64Value, + marshal: appendFixed64Value, + unmarshal: consumeFixed64Value, + merge: mergeScalarValue, +} + +// sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. +func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. +func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +// consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. +func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderFixed64SliceValue = valueCoderFuncs{ + size: sizeFixed64SliceValue, + marshal: appendFixed64SliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. +func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. +func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +var coderFixed64PackedSliceValue = valueCoderFuncs{ + size: sizeFixed64PackedSliceValue, + marshal: appendFixed64PackedSliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeDouble returns the size of wire encoding a float64 pointer as a Double. +func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendDouble wire encodes a float64 pointer as a Double. +func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDouble wire decodes a float64 pointer as a Double. +func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *p.Float64() = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDouble = pointerCoderFuncs{ + size: sizeDouble, + marshal: appendDouble, + unmarshal: consumeDouble, + merge: mergeFloat64, +} + +// sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. +// The zero value is not encoded. +func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoubleNoZero wire encodes a float64 pointer as a Double. +// The zero value is not encoded. +func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +var coderDoubleNoZero = pointerCoderFuncs{ + size: sizeDoubleNoZero, + marshal: appendDoubleNoZero, + unmarshal: consumeDouble, + merge: mergeFloat64NoZero, +} + +// sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. +// It panics if the pointer is nil. +func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoublePtr wire encodes a *float64 pointer as a Double. +// It panics if the pointer is nil. +func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Float64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDoublePtr wire decodes a *float64 pointer as a Double. +func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + vp := p.Float64Ptr() + if *vp == nil { + *vp = new(float64) + } + **vp = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDoublePtr = pointerCoderFuncs{ + size: sizeDoublePtr, + marshal: appendDoublePtr, + unmarshal: consumeDoublePtr, + merge: mergeFloat64Ptr, +} + +// sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. +func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSlice encodes a []float64 pointer as a repeated Double. +func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +// consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. +func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + s = append(s, math.Float64frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, math.Float64frombits(v)) + out.n = n + return out, nil +} + +var coderDoubleSlice = pointerCoderFuncs{ + size: sizeDoubleSlice, + marshal: appendDoubleSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. +func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. +func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +var coderDoublePackedSlice = pointerCoderFuncs{ + size: sizeDoublePackedSlice, + marshal: appendDoublePackedSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoubleValue returns the size of wire encoding a float64 value as a Double. +func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendDoubleValue encodes a float64 value as a Double. +func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + return b, nil +} + +// consumeDoubleValue decodes a float64 value as a Double. +func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil +} + +var coderDoubleValue = valueCoderFuncs{ + size: sizeDoubleValue, + marshal: appendDoubleValue, + unmarshal: consumeDoubleValue, + merge: mergeScalarValue, +} + +// sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. +func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSliceValue encodes a []float64 value as a repeated Double. +func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +// consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. +func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + out.n = n + return listv, out, nil +} + +var coderDoubleSliceValue = valueCoderFuncs{ + size: sizeDoubleSliceValue, + marshal: appendDoubleSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. +func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. +func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +var coderDoublePackedSliceValue = valueCoderFuncs{ + size: sizeDoublePackedSliceValue, + marshal: appendDoublePackedSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeString returns the size of wire encoding a string pointer as a String. +func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.String() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendString wire encodes a string pointer as a String. +func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeString wire decodes a string pointer as a String. +func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *p.String() = string(v) + out.n = n + return out, nil +} + +var coderString = pointerCoderFuncs{ + size: sizeString, + marshal: appendString, + unmarshal: consumeString, + merge: mergeString, +} + +// appendStringValidateUTF8 wire encodes a string pointer as a String. +func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValidateUTF8 wire decodes a string pointer as a String. +func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.String() = string(v) + out.n = n + return out, nil +} + +var coderStringValidateUTF8 = pointerCoderFuncs{ + size: sizeString, + marshal: appendStringValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeString, +} + +// sizeStringNoZero returns the size of wire encoding a string pointer as a String. +// The zero value is not encoded. +func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.String() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringNoZero wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +var coderStringNoZero = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZero, + unmarshal: consumeString, + merge: mergeStringNoZero, +} + +// appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZeroValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeStringNoZero, +} + +// sizeStringPtr returns the size of wire encoding a *string pointer as a String. +// It panics if the pointer is nil. +func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.StringPtr() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringPtr wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeStringPtr wire decodes a *string pointer as a String. +func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = string(v) + out.n = n + return out, nil +} + +var coderStringPtr = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtr, + unmarshal: consumeStringPtr, + merge: mergeStringPtr, +} + +// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = string(v) + out.n = n + return out, nil +} + +var coderStringPtrValidateUTF8 = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtrValidateUTF8, + unmarshal: consumeStringPtrValidateUTF8, + merge: mergeStringPtr, +} + +// sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. +func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.StringSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendStringSlice encodes a []string pointer as a repeated String. +func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + } + return b, nil +} + +// consumeStringSlice wire decodes a []string pointer as a repeated String. +func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.StringSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, string(v)) + out.n = n + return out, nil +} + +var coderStringSlice = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSlice, + unmarshal: consumeStringSlice, + merge: mergeStringSlice, +} + +// appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. +func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. +func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + sp := p.StringSlice() + *sp = append(*sp, string(v)) + out.n = n + return out, nil +} + +var coderStringSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSliceValidateUTF8, + unmarshal: consumeStringSliceValidateUTF8, + merge: mergeStringSlice, +} + +// sizeStringValue returns the size of wire encoding a string value as a String. +func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.String())) +} + +// appendStringValue encodes a string value as a String. +func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + return b, nil +} + +// consumeStringValue decodes a string value as a String. +func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValue = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValue, + unmarshal: consumeStringValue, + merge: mergeScalarValue, +} + +// appendStringValueValidateUTF8 encodes a string value as a String. +func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + if !utf8.ValidString(v.String()) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValueValidateUTF8 decodes a string value as a String. +func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + if !utf8.Valid(v) { + return protoreflect.Value{}, out, errInvalidUTF8{} + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValueValidateUTF8 = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValueValidateUTF8, + unmarshal: consumeStringValueValidateUTF8, + merge: mergeScalarValue, +} + +// sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. +func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.String())) + } + return size +} + +// appendStringSliceValue encodes a []string value as a repeated String. +func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + } + return b, nil +} + +// consumeStringSliceValue wire decodes a []string value as a repeated String. +func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfString(string(v))) + out.n = n + return listv, out, nil +} + +var coderStringSliceValue = valueCoderFuncs{ + size: sizeStringSliceValue, + marshal: appendStringSliceValue, + unmarshal: consumeStringSliceValue, + merge: mergeListValue, +} + +// sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. +func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bytes() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytes wire encodes a []byte pointer as a Bytes. +func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytes wire decodes a []byte pointer as a Bytes. +func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytes = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytes, + unmarshal: consumeBytes, + merge: mergeBytes, +} + +// appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. +func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytesValidateUTF8 = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytesValidateUTF8, + unmarshal: consumeBytesValidateUTF8, + merge: mergeBytes, +} + +// sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. +// The zero value is not encoded. +func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bytes() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytesNoZero wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytesNoZero wire decodes a []byte pointer as a Bytes. +// The zero value is not decoded. +func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZero = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZero, + unmarshal: consumeBytesNoZero, + merge: mergeBytesNoZero, +} + +// appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZeroValidateUTF8, + unmarshal: consumeBytesNoZeroValidateUTF8, + merge: mergeBytesNoZero, +} + +// sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. +func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.BytesSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + } + return b, nil +} + +// consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BytesSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSlice = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSlice, + unmarshal: consumeBytesSlice, + merge: mergeBytesSlice, +} + +// appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + sp := p.BytesSlice() + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSliceValidateUTF8, + unmarshal: consumeBytesSliceValidateUTF8, + merge: mergeBytesSlice, +} + +// sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. +func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.Bytes())) +} + +// appendBytesValue encodes a []byte value as a Bytes. +func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + return b, nil +} + +// consumeBytesValue decodes a []byte value as a Bytes. +func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil +} + +var coderBytesValue = valueCoderFuncs{ + size: sizeBytesValue, + marshal: appendBytesValue, + unmarshal: consumeBytesValue, + merge: mergeBytesValue, +} + +// sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. +func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.Bytes())) + } + return size +} + +// appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. +func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + } + return b, nil +} + +// consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. +func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + out.n = n + return listv, out, nil +} + +var coderBytesSliceValue = valueCoderFuncs{ + size: sizeBytesSliceValue, + marshal: appendBytesSliceValue, + unmarshal: consumeBytesSliceValue, + merge: mergeBytesListValue, +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go new file mode 100644 index 000000000..c1245fef4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -0,0 +1,388 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapInfo struct { + goType reflect.Type + keyWiretag uint64 + valWiretag uint64 + keyFuncs valueCoderFuncs + valFuncs valueCoderFuncs + keyZero pref.Value + keyKind pref.Kind + conv *mapConverter +} + +func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { + // TODO: Consider generating specialized map coders. + keyField := fd.MapKey() + valField := fd.MapValue() + keyWiretag := protowire.EncodeTag(1, wireTypes[keyField.Kind()]) + valWiretag := protowire.EncodeTag(2, wireTypes[valField.Kind()]) + keyFuncs := encoderFuncsForValue(keyField) + valFuncs := encoderFuncsForValue(valField) + conv := newMapConverter(ft, fd) + + mapi := &mapInfo{ + goType: ft, + keyWiretag: keyWiretag, + valWiretag: valWiretag, + keyFuncs: keyFuncs, + valFuncs: valFuncs, + keyZero: keyField.Default(), + keyKind: keyField.Kind(), + conv: conv, + } + if valField.Kind() == pref.MessageKind { + valueMessage = getMessageInfo(ft.Elem()) + } + + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMap(p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMap(b, p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft) + if mp.Elem().IsNil() { + mp.Elem().Set(reflect.MakeMap(mapi.goType)) + } + if f.mi == nil { + return consumeMap(b, mp.Elem(), wtyp, mapi, f, opts) + } else { + return consumeMapOfMessage(b, mp.Elem(), wtyp, mapi, f, opts) + } + }, + } + switch valField.Kind() { + case pref.MessageKind: + funcs.merge = mergeMapOfMessage + case pref.BytesKind: + funcs.merge = mergeMapOfBytes + default: + funcs.merge = mergeMap + } + if valFuncs.isInit != nil { + funcs.isInit = func(p pointer, f *coderFieldInfo) error { + return isInitMap(p.AsValueOf(ft).Elem(), mapi, f) + } + } + return valueMessage, funcs +} + +const ( + mapKeyTagSize = 1 // field 1, tag size 1. + mapValTagSize = 1 // field 2, tag size 2. +) + +func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) int { + if mapv.Len() == 0 { + return 0 + } + n := 0 + iter := mapRange(mapv) + for iter.Next() { + key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() + keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + var valSize int + value := mapi.conv.valConv.PBValueOf(iter.Value()) + if f.mi == nil { + valSize = mapi.valFuncs.size(value, mapValTagSize, opts) + } else { + p := pointerOfValue(iter.Value()) + valSize += mapValTagSize + valSize += protowire.SizeBytes(f.mi.sizePointer(p, opts)) + } + n += f.tagsize + protowire.SizeBytes(keySize+valSize) + } + return n +} + +func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + var ( + key = mapi.keyZero + val = mapi.conv.valConv.New() + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, errDecode + } + if num > protowire.MaxValidNumber { + return out, errDecode + } + b = b[n:] + err := errUnknown + switch num { + case genid.MapEntry_Key_field_number: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case genid.MapEntry_Value_field_number: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) + if err != nil { + break + } + val = v + n = o.n + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), mapi.conv.valConv.GoValueOf(val)) + out.n = n + return out, nil +} + +func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + var ( + key = mapi.keyZero + val = reflect.New(f.mi.GoReflectType.Elem()) + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, errDecode + } + if num > protowire.MaxValidNumber { + return out, errDecode + } + b = b[n:] + err := errUnknown + switch num { + case 1: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case 2: + if wtyp != protowire.BytesType { + break + } + var v []byte + v, n = protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + var o unmarshalOutput + o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) + if o.initialized { + // Consider this map item initialized so long as we see + // an initialized value. + out.initialized = true + } + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), val) + out.n = n + return out, nil +} + +func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if f.mi == nil { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := mapi.conv.valConv.PBValueOf(valrv) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapi.valFuncs.size(val, mapValTagSize, opts) + b = protowire.AppendVarint(b, uint64(size)) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + } else { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := pointerOfValue(valrv) + valSize := f.mi.sizePointer(val, opts) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapValTagSize + protowire.SizeBytes(valSize) + b = protowire.AppendVarint(b, uint64(size)) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + b = protowire.AppendVarint(b, mapi.valWiretag) + b = protowire.AppendVarint(b, uint64(valSize)) + return f.mi.marshalAppendPointer(b, val, opts) + } +} + +func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if mapv.Len() == 0 { + return b, nil + } + if opts.Deterministic() { + return appendMapDeterministic(b, mapv, mapi, f, opts) + } + iter := mapRange(mapv) + for iter.Next() { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, iter.Key(), iter.Value(), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func appendMapDeterministic(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + keys := mapv.MapKeys() + sort.Slice(keys, func(i, j int) bool { + switch keys[i].Kind() { + case reflect.Bool: + return !keys[i].Bool() && keys[j].Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return keys[i].Int() < keys[j].Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return keys[i].Uint() < keys[j].Uint() + case reflect.Float32, reflect.Float64: + return keys[i].Float() < keys[j].Float() + case reflect.String: + return keys[i].String() < keys[j].String() + default: + panic("invalid kind: " + keys[i].Kind().String()) + } + }) + for _, key := range keys { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, key, mapv.MapIndex(key), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { + if mi := f.mi; mi != nil { + mi.init() + if !mi.needsInitCheck { + return nil + } + iter := mapRange(mapv) + for iter.Next() { + val := pointerOfValue(iter.Value()) + if err := mi.checkInitializedPointer(val); err != nil { + return err + } + } + } else { + iter := mapRange(mapv) + for iter.Next() { + val := mapi.conv.valConv.PBValueOf(iter.Value()) + if err := mapi.valFuncs.isInit(val); err != nil { + return err + } + } + } + return nil +} + +func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), iter.Value()) + } +} + +func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) + } +} + +func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + val := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(val), pointerOfValue(iter.Value()), opts) + } else { + opts.Merge(asMessage(val), asMessage(iter.Value())) + } + dstm.SetMapIndex(iter.Key(), val) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go new file mode 100644 index 000000000..2706bb67f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package impl + +import "reflect" + +type mapIter struct { + v reflect.Value + keys []reflect.Value +} + +// mapRange provides a less-efficient equivalent to +// the Go 1.12 reflect.Value.MapRange method. +func mapRange(v reflect.Value) *mapIter { + return &mapIter{v: v} +} + +func (i *mapIter) Next() bool { + if i.keys == nil { + i.keys = i.v.MapKeys() + } else { + i.keys = i.keys[1:] + } + return len(i.keys) > 0 +} + +func (i *mapIter) Key() reflect.Value { + return i.keys[0] +} + +func (i *mapIter) Value() reflect.Value { + return i.v.MapIndex(i.keys[0]) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go new file mode 100644 index 000000000..1533ef600 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.12 + +package impl + +import "reflect" + +func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go new file mode 100644 index 000000000..cd40527ff --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -0,0 +1,217 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// coderMessageInfo contains per-message information used by the fast-path functions. +// This is a different type from MessageInfo to keep MessageInfo as general-purpose as +// possible. +type coderMessageInfo struct { + methods piface.Methods + + orderedCoderFields []*coderFieldInfo + denseCoderFields []*coderFieldInfo + coderFields map[protowire.Number]*coderFieldInfo + sizecacheOffset offset + unknownOffset offset + unknownPtrKind bool + extensionOffset offset + needsInitCheck bool + isMessageSet bool + numRequiredFields uint8 +} + +type coderFieldInfo struct { + funcs pointerCoderFuncs // fast-path per-field functions + mi *MessageInfo // field's message + ft reflect.Type + validation validationInfo // information used by message validation + num pref.FieldNumber // field number + offset offset // struct field offset + wiretag uint64 // field tag (number + wire type) + tagsize int // size of the varint-encoded tag + isPointer bool // true if IsNil may be called on the struct field + isRequired bool // true if field is required +} + +func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { + mi.sizecacheOffset = invalidOffset + mi.unknownOffset = invalidOffset + mi.extensionOffset = invalidOffset + + if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { + mi.sizecacheOffset = si.sizecacheOffset + } + if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) { + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + } + if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType { + mi.extensionOffset = si.extensionOffset + } + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + preallocFields := make([]coderFieldInfo, fields.Len()) + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case ft == nil: + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 0 + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return nil, nil + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + panic("missing Go struct field for " + string(fd.FullName())) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + } + case isOneof: + fieldOffset = offsetOf(fs, mi.Exporter) + case fd.IsWeak(): + fieldOffset = si.weakOffset + funcs = makeWeakMessageFieldCoder(fd) + default: + fieldOffset = offsetOf(fs, mi.Exporter) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &preallocFields[i] + *cf = coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si, fd, ft), + isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), + isRequired: fd.Cardinality() == pref.Required, + } + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense pref.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) >= len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return order.LegacyFieldOrder(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } +} + +// getUnknownBytes returns a *[]byte for the unknown fields. +// It is the caller's responsibility to check whether the pointer is nil. +// This function is specially designed to be inlineable. +func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + return *p.Apply(mi.unknownOffset).BytesPtr() + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} + +// mutableUnknownBytes returns a *[]byte for the unknown fields. +// The returned pointer is guaranteed to not be nil. +func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + return *bp + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go new file mode 100644 index 000000000..b7a23faf1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -0,0 +1,123 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" +) + +func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) { + if !flags.ProtoLegacy { + return 0 + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + for _, x := range ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + num, _ := protowire.DecodeTag(xi.wiretag) + size += messageset.SizeField(num) + size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) + } + + if u := mi.getUnknownBytes(p); u != nil { + size += messageset.SizeUnknown(*u) + } + + return size +} + +func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + switch len(ext) { + case 0: + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + for _, x := range ext { + var err error + b, err = marshalMessageSetField(mi, b, x, opts) + if err != nil { + return b, err + } + } + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(ext)) + for k := range ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + var err error + b, err = marshalMessageSetField(mi, b, ext[int32(k)], opts) + if err != nil { + return b, err + } + } + } + + if u := mi.getUnknownBytes(p); u != nil { + var err error + b, err = messageset.AppendUnknown(b, *u) + if err != nil { + return b, err + } + } + + return b, nil +} + +func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts marshalOptions) ([]byte, error) { + xi := getExtensionFieldInfo(x.Type()) + num, _ := protowire.DecodeTag(xi.wiretag) + b = messageset.AppendFieldStart(b, num) + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) + if err != nil { + return b, err + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOptions) (out unmarshalOutput, err error) { + if !flags.ProtoLegacy { + return out, errors.New("no support for message_set_wire_format") + } + + ep := p.Apply(mi.extensionOffset).Extensions() + if *ep == nil { + *ep = make(map[int32]ExtensionField) + } + ext := *ep + initialized := true + err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { + o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) + if err == errUnknown { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, protowire.BytesType) + *u = append(*u, v...) + return nil + } + if !o.initialized { + initialized = false + } + return err + }) + out.n = len(b) + out.initialized = initialized + return out, err +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go new file mode 100644 index 000000000..90705e3ae --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -0,0 +1,209 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := p.v.Elem().Int() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := p.v.Elem().Int() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + p.v.Elem().SetInt(int64(v)) + out.n = n + return out, nil +} + +func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(src.v.Elem()) +} + +var coderEnum = pointerCoderFuncs{ + size: sizeEnum, + marshal: appendEnum, + unmarshal: consumeEnum, + merge: mergeEnum, +} + +func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + if p.v.Elem().Int() == 0 { + return 0 + } + return sizeEnum(p, f, opts) +} + +func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if p.v.Elem().Int() == 0 { + return b, nil + } + return appendEnum(b, p, f, opts) +} + +func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if src.v.Elem().Int() != 0 { + dst.v.Elem().Set(src.v.Elem()) + } +} + +var coderEnumNoZero = pointerCoderFuncs{ + size: sizeEnumNoZero, + marshal: appendEnumNoZero, + unmarshal: consumeEnum, + merge: mergeEnumNoZero, +} + +func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return sizeEnum(pointer{p.v.Elem()}, f, opts) +} + +func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendEnum(b, pointer{p.v.Elem()}, f, opts) +} + +func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + if p.v.Elem().IsNil() { + p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) + } + return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) +} + +func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if !src.v.Elem().IsNil() { + v := reflect.New(dst.v.Type().Elem().Elem()) + v.Elem().Set(src.v.Elem().Elem()) + dst.v.Elem().Set(v) + } +} + +var coderEnumPtr = pointerCoderFuncs{ + size: sizeEnumPtr, + marshal: appendEnumPtr, + unmarshal: consumeEnumPtr, + merge: mergeEnumPtr, +} + +func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize + } + return size +} + +func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + s := p.v.Elem() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + b = b[n:] + } + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + out.n = n + return out, nil +} + +func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) +} + +var coderEnumSlice = pointerCoderFuncs{ + size: sizeEnumSlice, + marshal: appendEnumSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} + +func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return 0 + } + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + return f.tagsize + protowire.SizeBytes(n) +} + +func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +var coderEnumPackedSlice = pointerCoderFuncs{ + size: sizeEnumPackedSlice, + marshal: appendEnumPackedSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go new file mode 100644 index 000000000..e89971238 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -0,0 +1,557 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// pointerCoderFuncs is a set of pointer encoding functions. +type pointerCoderFuncs struct { + mi *MessageInfo + size func(p pointer, f *coderFieldInfo, opts marshalOptions) int + marshal func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) + isInit func(p pointer, f *coderFieldInfo) error + merge func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) +} + +// valueCoderFuncs is a set of protoreflect.Value encoding functions. +type valueCoderFuncs struct { + size func(v pref.Value, tagsize int, opts marshalOptions) int + marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) + isInit func(v pref.Value) error + merge func(dst, src pref.Value, opts mergeOptions) pref.Value +} + +// fieldCoder returns pointer functions for a field, used for operating on +// struct fields. +func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + switch { + case fd.IsMap(): + return encoderFuncsForMap(fd, ft) + case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + // Repeated fields (not packed). + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolSlice + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumSlice + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Slice + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Slice + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Slice + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Slice + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Slice + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Slice + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Slice + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Slice + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatSlice + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Slice + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Slice + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleSlice + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringSliceValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesSliceValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case pref.MessageKind: + return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) + case pref.GroupKind: + return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) + } + case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + // Packed repeated fields. + // + // Only repeated fields of primitive numeric types + // (Varint, Fixed32, or Fixed64 wire type) can be packed. + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPackedSlice + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPackedSlice + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32PackedSlice + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32PackedSlice + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32PackedSlice + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64PackedSlice + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64PackedSlice + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64PackedSlice + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32PackedSlice + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32PackedSlice + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPackedSlice + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64PackedSlice + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64PackedSlice + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePackedSlice + } + } + case fd.Kind() == pref.MessageKind: + return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) + case fd.Kind() == pref.GroupKind: + return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) + case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: + // Populated oneof fields always encode even if set to the zero value, + // which normally are not encoded in proto3. + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolNoZero + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumNoZero + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32NoZero + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32NoZero + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32NoZero + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64NoZero + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64NoZero + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64NoZero + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32NoZero + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32NoZero + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatNoZero + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64NoZero + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64NoZero + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleNoZero + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringNoZeroValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesNoZeroValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + } + case ft.Kind() == reflect.Ptr: + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPtr + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPtr + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Ptr + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Ptr + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Ptr + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Ptr + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Ptr + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Ptr + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Ptr + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Ptr + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPtr + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Ptr + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Ptr + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePtr + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringPtrValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + } + default: + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBool + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnum + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32 + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32 + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32 + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64 + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64 + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64 + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32 + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32 + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloat + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64 + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64 + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDouble + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + } + } + panic(fmt.Sprintf("invalid type: no encoder for %v %v %v/%v", fd.FullName(), fd.Cardinality(), fd.Kind(), ft)) +} + +// encoderFuncsForValue returns value functions for a field, used for +// extension values and map encoding. +func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { + switch { + case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + switch fd.Kind() { + case pref.BoolKind: + return coderBoolSliceValue + case pref.EnumKind: + return coderEnumSliceValue + case pref.Int32Kind: + return coderInt32SliceValue + case pref.Sint32Kind: + return coderSint32SliceValue + case pref.Uint32Kind: + return coderUint32SliceValue + case pref.Int64Kind: + return coderInt64SliceValue + case pref.Sint64Kind: + return coderSint64SliceValue + case pref.Uint64Kind: + return coderUint64SliceValue + case pref.Sfixed32Kind: + return coderSfixed32SliceValue + case pref.Fixed32Kind: + return coderFixed32SliceValue + case pref.FloatKind: + return coderFloatSliceValue + case pref.Sfixed64Kind: + return coderSfixed64SliceValue + case pref.Fixed64Kind: + return coderFixed64SliceValue + case pref.DoubleKind: + return coderDoubleSliceValue + case pref.StringKind: + // We don't have a UTF-8 validating coder for repeated string fields. + // Value coders are used for extensions and maps. + // Extensions are never proto3, and maps never contain lists. + return coderStringSliceValue + case pref.BytesKind: + return coderBytesSliceValue + case pref.MessageKind: + return coderMessageSliceValue + case pref.GroupKind: + return coderGroupSliceValue + } + case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + switch fd.Kind() { + case pref.BoolKind: + return coderBoolPackedSliceValue + case pref.EnumKind: + return coderEnumPackedSliceValue + case pref.Int32Kind: + return coderInt32PackedSliceValue + case pref.Sint32Kind: + return coderSint32PackedSliceValue + case pref.Uint32Kind: + return coderUint32PackedSliceValue + case pref.Int64Kind: + return coderInt64PackedSliceValue + case pref.Sint64Kind: + return coderSint64PackedSliceValue + case pref.Uint64Kind: + return coderUint64PackedSliceValue + case pref.Sfixed32Kind: + return coderSfixed32PackedSliceValue + case pref.Fixed32Kind: + return coderFixed32PackedSliceValue + case pref.FloatKind: + return coderFloatPackedSliceValue + case pref.Sfixed64Kind: + return coderSfixed64PackedSliceValue + case pref.Fixed64Kind: + return coderFixed64PackedSliceValue + case pref.DoubleKind: + return coderDoublePackedSliceValue + } + default: + switch fd.Kind() { + default: + case pref.BoolKind: + return coderBoolValue + case pref.EnumKind: + return coderEnumValue + case pref.Int32Kind: + return coderInt32Value + case pref.Sint32Kind: + return coderSint32Value + case pref.Uint32Kind: + return coderUint32Value + case pref.Int64Kind: + return coderInt64Value + case pref.Sint64Kind: + return coderSint64Value + case pref.Uint64Kind: + return coderUint64Value + case pref.Sfixed32Kind: + return coderSfixed32Value + case pref.Fixed32Kind: + return coderFixed32Value + case pref.FloatKind: + return coderFloatValue + case pref.Sfixed64Kind: + return coderSfixed64Value + case pref.Fixed64Kind: + return coderFixed64Value + case pref.DoubleKind: + return coderDoubleValue + case pref.StringKind: + if strs.EnforceUTF8(fd) { + return coderStringValueValidateUTF8 + } + return coderStringValue + case pref.BytesKind: + return coderBytesValue + case pref.MessageKind: + return coderMessageValue + case pref.GroupKind: + return coderGroupValue + } + } + panic(fmt.Sprintf("invalid field: no encoder for %v %v %v", fd.FullName(), fd.Cardinality(), fd.Kind())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go new file mode 100644 index 000000000..e118af1e2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package impl + +// When using unsafe pointers, we can just treat enum values as int32s. + +var ( + coderEnumNoZero = coderInt32NoZero + coderEnum = coderInt32 + coderEnumPtr = coderInt32Ptr + coderEnumSlice = coderInt32Slice + coderEnumPackedSlice = coderInt32PackedSlice +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go new file mode 100644 index 000000000..acd61bb50 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -0,0 +1,496 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// unwrapper unwraps the value to the underlying value. +// This is implemented by List and Map. +type unwrapper interface { + protoUnwrap() interface{} +} + +// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. +type Converter interface { + // PBValueOf converts a reflect.Value to a protoreflect.Value. + PBValueOf(reflect.Value) pref.Value + + // GoValueOf converts a protoreflect.Value to a reflect.Value. + GoValueOf(pref.Value) reflect.Value + + // IsValidPB returns whether a protoreflect.Value is compatible with this type. + IsValidPB(pref.Value) bool + + // IsValidGo returns whether a reflect.Value is compatible with this type. + IsValidGo(reflect.Value) bool + + // New returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns a new mutable value. + New() pref.Value + + // Zero returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns an immutable, empty value. + Zero() pref.Value +} + +// NewConverter matches a Go type with a protobuf field and returns a Converter +// that converts between the two. Enums must be a named int32 kind that +// implements protoreflect.Enum, and messages must be pointer to a named +// struct type that implements protoreflect.ProtoMessage. +// +// This matcher deliberately supports a wider range of Go types than what +// protoc-gen-go historically generated to be able to automatically wrap some +// v1 messages generated by other forks of protoc-gen-go. +func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + switch { + case fd.IsList(): + return newListConverter(t, fd) + case fd.IsMap(): + return newMapConverter(t, fd) + default: + return newSingularConverter(t, fd) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +var ( + boolType = reflect.TypeOf(bool(false)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf(string("")) + bytesType = reflect.TypeOf([]byte(nil)) + byteType = reflect.TypeOf(byte(0)) +) + +var ( + boolZero = pref.ValueOfBool(false) + int32Zero = pref.ValueOfInt32(0) + int64Zero = pref.ValueOfInt64(0) + uint32Zero = pref.ValueOfUint32(0) + uint64Zero = pref.ValueOfUint64(0) + float32Zero = pref.ValueOfFloat32(0) + float64Zero = pref.ValueOfFloat64(0) + stringZero = pref.ValueOfString("") + bytesZero = pref.ValueOfBytes(nil) +) + +func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { + if fd.Cardinality() == pref.Repeated { + // Default isn't defined for repeated fields. + return zero + } + return fd.Default() + } + switch fd.Kind() { + case pref.BoolKind: + if t.Kind() == reflect.Bool { + return &boolConverter{t, defVal(fd, boolZero)} + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if t.Kind() == reflect.Int32 { + return &int32Converter{t, defVal(fd, int32Zero)} + } + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if t.Kind() == reflect.Int64 { + return &int64Converter{t, defVal(fd, int64Zero)} + } + case pref.Uint32Kind, pref.Fixed32Kind: + if t.Kind() == reflect.Uint32 { + return &uint32Converter{t, defVal(fd, uint32Zero)} + } + case pref.Uint64Kind, pref.Fixed64Kind: + if t.Kind() == reflect.Uint64 { + return &uint64Converter{t, defVal(fd, uint64Zero)} + } + case pref.FloatKind: + if t.Kind() == reflect.Float32 { + return &float32Converter{t, defVal(fd, float32Zero)} + } + case pref.DoubleKind: + if t.Kind() == reflect.Float64 { + return &float64Converter{t, defVal(fd, float64Zero)} + } + case pref.StringKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &stringConverter{t, defVal(fd, stringZero)} + } + case pref.BytesKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &bytesConverter{t, defVal(fd, bytesZero)} + } + case pref.EnumKind: + // Handle enums, which must be a named int32 type. + if t.Kind() == reflect.Int32 { + return newEnumConverter(t, fd) + } + case pref.MessageKind, pref.GroupKind: + return newMessageConverter(t) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type boolConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfBool(v.Bool()) +} +func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Bool()).Convert(c.goType) +} +func (c *boolConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(bool) + return ok +} +func (c *boolConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *boolConverter) New() pref.Value { return c.def } +func (c *boolConverter) Zero() pref.Value { return c.def } + +type int32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfInt32(int32(v.Int())) +} +func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(int32(v.Int())).Convert(c.goType) +} +func (c *int32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(int32) + return ok +} +func (c *int32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int32Converter) New() pref.Value { return c.def } +func (c *int32Converter) Zero() pref.Value { return c.def } + +type int64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfInt64(int64(v.Int())) +} +func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(int64(v.Int())).Convert(c.goType) +} +func (c *int64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(int64) + return ok +} +func (c *int64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int64Converter) New() pref.Value { return c.def } +func (c *int64Converter) Zero() pref.Value { return c.def } + +type uint32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfUint32(uint32(v.Uint())) +} +func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) +} +func (c *uint32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(uint32) + return ok +} +func (c *uint32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint32Converter) New() pref.Value { return c.def } +func (c *uint32Converter) Zero() pref.Value { return c.def } + +type uint64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfUint64(uint64(v.Uint())) +} +func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) +} +func (c *uint64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(uint64) + return ok +} +func (c *uint64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint64Converter) New() pref.Value { return c.def } +func (c *uint64Converter) Zero() pref.Value { return c.def } + +type float32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfFloat32(float32(v.Float())) +} +func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(float32(v.Float())).Convert(c.goType) +} +func (c *float32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(float32) + return ok +} +func (c *float32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float32Converter) New() pref.Value { return c.def } +func (c *float32Converter) Zero() pref.Value { return c.def } + +type float64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfFloat64(float64(v.Float())) +} +func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(float64(v.Float())).Convert(c.goType) +} +func (c *float64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(float64) + return ok +} +func (c *float64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float64Converter) New() pref.Value { return c.def } +func (c *float64Converter) Zero() pref.Value { return c.def } + +type stringConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfString(v.Convert(stringType).String()) +} +func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { + // pref.Value.String never panics, so we go through an interface + // conversion here to check the type. + s := v.Interface().(string) + if c.goType.Kind() == reflect.Slice && s == "" { + return reflect.Zero(c.goType) // ensure empty string is []byte(nil) + } + return reflect.ValueOf(s).Convert(c.goType) +} +func (c *stringConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(string) + return ok +} +func (c *stringConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *stringConverter) New() pref.Value { return c.def } +func (c *stringConverter) Zero() pref.Value { return c.def } + +type bytesConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if c.goType.Kind() == reflect.String && v.Len() == 0 { + return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) + } + return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) +} +func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Bytes()).Convert(c.goType) +} +func (c *bytesConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().([]byte) + return ok +} +func (c *bytesConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *bytesConverter) New() pref.Value { return c.def } +func (c *bytesConverter) Zero() pref.Value { return c.def } + +type enumConverter struct { + goType reflect.Type + def pref.Value +} + +func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { + var def pref.Value + if fd.Cardinality() == pref.Repeated { + def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) + } else { + def = fd.Default() + } + return &enumConverter{goType, def} +} + +func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfEnum(pref.EnumNumber(v.Int())) +} + +func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Enum()).Convert(c.goType) +} + +func (c *enumConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(pref.EnumNumber) + return ok +} + +func (c *enumConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *enumConverter) New() pref.Value { + return c.def +} + +func (c *enumConverter) Zero() pref.Value { + return c.def +} + +type messageConverter struct { + goType reflect.Type +} + +func newMessageConverter(goType reflect.Type) Converter { + return &messageConverter{goType} +} + +func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if c.isNonPointer() { + if v.CanAddr() { + v = v.Addr() // T => *T + } else { + v = reflect.Zero(reflect.PtrTo(v.Type())) + } + } + if m, ok := v.Interface().(pref.ProtoMessage); ok { + return pref.ValueOfMessage(m.ProtoReflect()) + } + return pref.ValueOfMessage(legacyWrapMessage(v)) +} + +func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + if c.isNonPointer() { + if rv.Type() != reflect.PtrTo(c.goType) { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType))) + } + if !rv.IsNil() { + rv = rv.Elem() // *T => T + } else { + rv = reflect.Zero(rv.Type().Elem()) + } + } + if rv.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) + } + return rv +} + +func (c *messageConverter) IsValidPB(v pref.Value) bool { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + if c.isNonPointer() { + return rv.Type() == reflect.PtrTo(c.goType) + } + return rv.Type() == c.goType +} + +func (c *messageConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *messageConverter) New() pref.Value { + if c.isNonPointer() { + return c.PBValueOf(reflect.New(c.goType).Elem()) + } + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *messageConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +// isNonPointer reports whether the type is a non-pointer type. +// This never occurs for generated message types. +func (c *messageConverter) isNonPointer() bool { + return c.goType.Kind() != reflect.Ptr +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go new file mode 100644 index 000000000..6fccab520 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -0,0 +1,141 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + switch { + case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: + return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} + case t.Kind() == reflect.Slice: + return &listConverter{t, newSingularConverter(t.Elem(), fd)} + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type listConverter struct { + goType reflect.Type // []T + c Converter +} + +func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + pv := reflect.New(c.goType) + pv.Elem().Set(v) + return pref.ValueOfList(&listReflect{pv, c.c}) +} + +func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { + rv := v.List().(*listReflect).v + if rv.IsNil() { + return reflect.Zero(c.goType) + } + return rv.Elem() +} + +func (c *listConverter) IsValidPB(v pref.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type().Elem() == c.goType +} + +func (c *listConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listConverter) New() pref.Value { + return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) +} + +func (c *listConverter) Zero() pref.Value { + return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) +} + +type listPtrConverter struct { + goType reflect.Type // *[]T + c Converter +} + +func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfList(&listReflect{v, c.c}) +} + +func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { + return v.List().(*listReflect).v +} + +func (c *listPtrConverter) IsValidPB(v pref.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type() == c.goType +} + +func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listPtrConverter) New() pref.Value { + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *listPtrConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type listReflect struct { + v reflect.Value // *[]T + conv Converter +} + +func (ls *listReflect) Len() int { + if ls.v.IsNil() { + return 0 + } + return ls.v.Elem().Len() +} +func (ls *listReflect) Get(i int) pref.Value { + return ls.conv.PBValueOf(ls.v.Elem().Index(i)) +} +func (ls *listReflect) Set(i int, v pref.Value) { + ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) +} +func (ls *listReflect) Append(v pref.Value) { + ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) +} +func (ls *listReflect) AppendMutable() pref.Value { + if _, ok := ls.conv.(*messageConverter); !ok { + panic("invalid AppendMutable on list with non-message type") + } + v := ls.NewElement() + ls.Append(v) + return v +} +func (ls *listReflect) Truncate(i int) { + ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) +} +func (ls *listReflect) NewElement() pref.Value { + return ls.conv.New() +} +func (ls *listReflect) IsValid() bool { + return !ls.v.IsNil() +} +func (ls *listReflect) protoUnwrap() interface{} { + return ls.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go new file mode 100644 index 000000000..de06b2593 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -0,0 +1,121 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapConverter struct { + goType reflect.Type // map[K]V + keyConv, valConv Converter +} + +func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) + } + return &mapConverter{ + goType: t, + keyConv: newSingularConverter(t.Key(), fd.MapKey()), + valConv: newSingularConverter(t.Elem(), fd.MapValue()), + } +} + +func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) +} + +func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { + return v.Map().(*mapReflect).v +} + +func (c *mapConverter) IsValidPB(v pref.Value) bool { + mapv, ok := v.Interface().(*mapReflect) + if !ok { + return false + } + return mapv.v.Type() == c.goType +} + +func (c *mapConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *mapConverter) New() pref.Value { + return c.PBValueOf(reflect.MakeMap(c.goType)) +} + +func (c *mapConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type mapReflect struct { + v reflect.Value // map[K]V + keyConv Converter + valConv Converter +} + +func (ms *mapReflect) Len() int { + return ms.v.Len() +} +func (ms *mapReflect) Has(k pref.MapKey) bool { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + return rv.IsValid() +} +func (ms *mapReflect) Get(k pref.MapKey) pref.Value { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + if !rv.IsValid() { + return pref.Value{} + } + return ms.valConv.PBValueOf(rv) +} +func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.valConv.GoValueOf(v) + ms.v.SetMapIndex(rk, rv) +} +func (ms *mapReflect) Clear(k pref.MapKey) { + rk := ms.keyConv.GoValueOf(k.Value()) + ms.v.SetMapIndex(rk, reflect.Value{}) +} +func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { + if _, ok := ms.valConv.(*messageConverter); !ok { + panic("invalid Mutable on map with non-message value type") + } + v := ms.Get(k) + if !v.IsValid() { + v = ms.NewValue() + ms.Set(k, v) + } + return v +} +func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { + iter := mapRange(ms.v) + for iter.Next() { + k := ms.keyConv.PBValueOf(iter.Key()).MapKey() + v := ms.valConv.PBValueOf(iter.Value()) + if !f(k, v) { + return + } + } +} +func (ms *mapReflect) NewValue() pref.Value { + return ms.valConv.New() +} +func (ms *mapReflect) IsValid() bool { + return !ms.v.IsNil() +} +func (ms *mapReflect) protoUnwrap() interface{} { + return ms.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go new file mode 100644 index 000000000..949dc49a6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -0,0 +1,276 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math/bits" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +var errDecode = errors.New("cannot parse invalid wire-format data") + +type unmarshalOptions struct { + flags protoiface.UnmarshalInputFlags + resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +func (o unmarshalOptions) Options() proto.UnmarshalOptions { + return proto.UnmarshalOptions{ + Merge: true, + AllowPartial: true, + DiscardUnknown: o.DiscardUnknown(), + Resolver: o.resolver, + } +} + +func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } + +func (o unmarshalOptions) IsDefault() bool { + return o.flags == 0 && o.resolver == preg.GlobalTypes +} + +var lazyUnmarshalOptions = unmarshalOptions{ + resolver: preg.GlobalTypes, +} + +type unmarshalOutput struct { + n int // number of bytes consumed + initialized bool +} + +// unmarshal is protoreflect.Methods.Unmarshal. +func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + }) + var flags piface.UnmarshalOutputFlags + if out.initialized { + flags |= piface.UnmarshalInitialized + } + return piface.UnmarshalOutput{ + Flags: flags, + }, err +} + +// errUnknown is returned during unmarshaling to indicate a parse error that +// should result in a field being placed in the unknown fields section (for example, +// when the wire type doesn't match) as opposed to the entire unmarshal operation +// failing (for example, when a field extends past the available input). +// +// This is a sentinel error which should never be visible to the user. +var errUnknown = errors.New("unknown") + +func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + mi.init() + if flags.ProtoLegacy && mi.isMessageSet { + return unmarshalMessageSet(mi, b, p, opts) + } + initialized := true + var requiredMask uint64 + var exts *map[int32]ExtensionField + start := len(b) + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errDecode + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errDecode + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + } + if groupTag != 0 { + return out, errDecode + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} + +func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp protowire.Type, exts map[int32]ExtensionField, opts unmarshalOptions) (out unmarshalOutput, err error) { + x := exts[int32(num)] + xt := x.Type() + if xt == nil { + var err error + xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) + if err != nil { + if err == preg.NotFound { + return out, errUnknown + } + return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) + } + } + xi := getExtensionFieldInfo(xt) + if xi.funcs.unmarshal == nil { + return out, errUnknown + } + if flags.LazyUnmarshalExtensions { + if opts.IsDefault() && x.canLazy(xt) { + out, valid := skipExtension(b, xi, num, wtyp, opts) + switch valid { + case ValidationValid: + if out.initialized { + x.appendLazyBytes(xt, xi, num, wtyp, b[:out.n]) + exts[int32(num)] = x + return out, nil + } + case ValidationInvalid: + return out, errDecode + case ValidationUnknown: + } + } + } + ival := x.Value() + if !ival.IsValid() && xi.unmarshalNeedsValue { + // Create a new message, list, or map value to fill in. + // For enums, create a prototype value to let the unmarshal func know the + // concrete type. + ival = xt.New() + } + v, out, err := xi.funcs.unmarshal(b, ival, num, wtyp, opts) + if err != nil { + return out, err + } + if xi.funcs.isInit == nil { + out.initialized = true + } + x.Set(xt, v) + exts[int32(num)] = x + return out, nil +} + +func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + if xi.validation.mi == nil { + return out, ValidationUnknown + } + xi.validation.mi.init() + switch xi.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(b, num, opts) + return out, st + default: + return out, ValidationUnknown + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go new file mode 100644 index 000000000..845c67d6e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -0,0 +1,201 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/internal/flags" + proto "google.golang.org/protobuf/proto" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type marshalOptions struct { + flags piface.MarshalInputFlags +} + +func (o marshalOptions) Options() proto.MarshalOptions { + return proto.MarshalOptions{ + AllowPartial: true, + Deterministic: o.Deterministic(), + UseCachedSize: o.UseCachedSize(), + } +} + +func (o marshalOptions) Deterministic() bool { return o.flags&piface.MarshalDeterministic != 0 } +func (o marshalOptions) UseCachedSize() bool { return o.flags&piface.MarshalUseCachedSize != 0 } + +// size is protoreflect.Methods.Size. +func (mi *MessageInfo) size(in piface.SizeInput) piface.SizeOutput { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + size := mi.sizePointer(p, marshalOptions{ + flags: in.Flags, + }) + return piface.SizeOutput{Size: size} +} + +func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { + mi.init() + if p.IsNil() { + return 0 + } + if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { + return int(size) + } + } + return mi.sizePointerSlow(p, opts) +} + +func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int) { + if flags.ProtoLegacy && mi.isMessageSet { + size = sizeMessageSet(mi, p, opts) + if mi.sizecacheOffset.IsValid() { + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + } + return size + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + size += mi.sizeExtensions(e, opts) + } + for _, f := range mi.orderedCoderFields { + if f.funcs.size == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + size += f.funcs.size(fptr, f, opts) + } + if mi.unknownOffset.IsValid() { + if u := mi.getUnknownBytes(p); u != nil { + size += len(*u) + } + } + if mi.sizecacheOffset.IsValid() { + if size > math.MaxInt32 { + // The size is too large for the int32 sizecache field. + // We will need to recompute the size when encoding; + // unfortunately expensive, but better than invalid output. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + } else { + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + } + } + return size +} + +// marshal is protoreflect.Methods.Marshal. +func (mi *MessageInfo) marshal(in piface.MarshalInput) (out piface.MarshalOutput, err error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + b, err := mi.marshalAppendPointer(in.Buf, p, marshalOptions{ + flags: in.Flags, + }) + return piface.MarshalOutput{Buf: b}, err +} + +func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOptions) ([]byte, error) { + mi.init() + if p.IsNil() { + return b, nil + } + if flags.ProtoLegacy && mi.isMessageSet { + return marshalMessageSet(mi, b, p, opts) + } + var err error + // The old marshaler encodes extensions at beginning. + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + // TODO: Special handling for MessageSet? + b, err = mi.appendExtensions(b, e, opts) + if err != nil { + return b, err + } + } + for _, f := range mi.orderedCoderFields { + if f.funcs.marshal == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + } + if mi.unknownOffset.IsValid() && !mi.isMessageSet { + if u := mi.getUnknownBytes(p); u != nil { + b = append(b, (*u)...) + } + } + return b, nil +} + +func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { + if ext == nil { + return 0 + } + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + n += xi.funcs.size(x.Value(), xi.tagsize, opts) + } + return n +} + +func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, opts marshalOptions) ([]byte, error) { + if ext == nil { + return b, nil + } + + switch len(*ext) { + case 0: + return b, nil + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + var err error + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + } + return b, err + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(*ext)) + for k := range *ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + var err error + for _, k := range keys { + x := (*ext)[int32(k)] + xi := getExtensionFieldInfo(x.Type()) + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + if err != nil { + return b, err + } + } + return b, nil + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go new file mode 100644 index 000000000..8c1eab4bf --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type EnumInfo struct { + GoReflectType reflect.Type // int32 kind + Desc pref.EnumDescriptor +} + +func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { + return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) +} +func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go new file mode 100644 index 000000000..e904fd993 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -0,0 +1,156 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + "sync" + "sync/atomic" + + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// ExtensionInfo implements ExtensionType. +// +// This type contains a number of exported fields for legacy compatibility. +// The only non-deprecated use of this type is through the methods of the +// ExtensionType interface. +type ExtensionInfo struct { + // An ExtensionInfo may exist in several stages of initialization. + // + // extensionInfoUninitialized: Some or all of the legacy exported + // fields may be set, but none of the unexported fields have been + // initialized. This is the starting state for an ExtensionInfo + // in legacy generated code. + // + // extensionInfoDescInit: The desc field is set, but other unexported fields + // may not be initialized. Legacy exported fields may or may not be set. + // This is the starting state for an ExtensionInfo in newly generated code. + // + // extensionInfoFullInit: The ExtensionInfo is fully initialized. + // This state is only entered after lazy initialization is complete. + init uint32 + mu sync.Mutex + + goType reflect.Type + desc extensionTypeDescriptor + conv Converter + info *extensionFieldInfo // for fast-path method implementations + + // ExtendedType is a typed nil-pointer to the parent message type that + // is being extended. It is possible for this to be unpopulated in v2 + // since the message may no longer implement the MessageV1 interface. + // + // Deprecated: Use the ExtendedType method instead. + ExtendedType piface.MessageV1 + + // ExtensionType is the zero value of the extension type. + // + // For historical reasons, reflect.TypeOf(ExtensionType) and the + // type returned by InterfaceOf may not be identical. + // + // Deprecated: Use InterfaceOf(xt.Zero()) instead. + ExtensionType interface{} + + // Field is the field number of the extension. + // + // Deprecated: Use the Descriptor().Number method instead. + Field int32 + + // Name is the fully qualified name of extension. + // + // Deprecated: Use the Descriptor().FullName method instead. + Name string + + // Tag is the protobuf struct tag used in the v1 API. + // + // Deprecated: Do not use. + Tag string + + // Filename is the proto filename in which the extension is defined. + // + // Deprecated: Use Descriptor().ParentFile().Path() instead. + Filename string +} + +// Stages of initialization: See the ExtensionInfo.init field. +const ( + extensionInfoUninitialized = 0 + extensionInfoDescInit = 1 + extensionInfoFullInit = 2 +) + +func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { + xi.goType = goType + xi.desc = extensionTypeDescriptor{xd, xi} + xi.init = extensionInfoDescInit +} + +func (xi *ExtensionInfo) New() pref.Value { + return xi.lazyInit().New() +} +func (xi *ExtensionInfo) Zero() pref.Value { + return xi.lazyInit().Zero() +} +func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { + return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { + return xi.lazyInit().GoValueOf(v).Interface() +} +func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { + return xi.lazyInit().IsValidPB(v) +} +func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { + return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { + if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { + xi.lazyInitSlow() + } + return &xi.desc +} + +func (xi *ExtensionInfo) lazyInit() Converter { + if atomic.LoadUint32(&xi.init) < extensionInfoFullInit { + xi.lazyInitSlow() + } + return xi.conv +} + +func (xi *ExtensionInfo) lazyInitSlow() { + xi.mu.Lock() + defer xi.mu.Unlock() + + if xi.init == extensionInfoFullInit { + return + } + defer atomic.StoreUint32(&xi.init, extensionInfoFullInit) + + if xi.desc.ExtensionDescriptor == nil { + xi.initFromLegacy() + } + if !xi.desc.ExtensionDescriptor.IsPlaceholder() { + if xi.ExtensionType == nil { + xi.initToLegacy() + } + xi.conv = NewConverter(xi.goType, xi.desc.ExtensionDescriptor) + xi.info = makeExtensionFieldInfo(xi.desc.ExtensionDescriptor) + xi.info.validation = newValidationInfo(xi.desc.ExtensionDescriptor, xi.goType) + } +} + +type extensionTypeDescriptor struct { + pref.ExtensionDescriptor + xi *ExtensionInfo +} + +func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { + return xtd.xi +} +func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { + return xtd.ExtensionDescriptor +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go new file mode 100644 index 000000000..f7d7ffb51 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -0,0 +1,219 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// legacyEnumName returns the name of enums used in legacy code. +// It is neither the protobuf full name nor the qualified Go name, +// but rather an odd hybrid of both. +func legacyEnumName(ed pref.EnumDescriptor) string { + var protoPkg string + enumName := string(ed.FullName()) + if fd := ed.ParentFile(); fd != nil { + protoPkg = string(fd.Package()) + enumName = strings.TrimPrefix(enumName, protoPkg+".") + } + if protoPkg == "" { + return strs.GoCamelCase(enumName) + } + return protoPkg + "." + strs.GoCamelCase(enumName) +} + +// legacyWrapEnum wraps v as a protoreflect.Enum, +// where v must be a int32 kind and not implement the v2 API already. +func legacyWrapEnum(v reflect.Value) pref.Enum { + et := legacyLoadEnumType(v.Type()) + return et.New(pref.EnumNumber(v.Int())) +} + +var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType + +// legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, +// where t must be an int32 kind and not implement the v2 API already. +func legacyLoadEnumType(t reflect.Type) pref.EnumType { + // Fast-path: check if a EnumType is cached for this concrete type. + if et, ok := legacyEnumTypeCache.Load(t); ok { + return et.(pref.EnumType) + } + + // Slow-path: derive enum descriptor and initialize EnumType. + var et pref.EnumType + ed := LegacyLoadEnumDesc(t) + et = &legacyEnumType{ + desc: ed, + goType: t, + } + if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { + return et.(pref.EnumType) + } + return et +} + +type legacyEnumType struct { + desc pref.EnumDescriptor + goType reflect.Type + m sync.Map // map[protoreflect.EnumNumber]proto.Enum +} + +func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { + if e, ok := t.m.Load(n); ok { + return e.(pref.Enum) + } + e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} + t.m.Store(n, e) + return e +} +func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { + return t.desc +} + +type legacyEnumWrapper struct { + num pref.EnumNumber + pbTyp pref.EnumType + goTyp reflect.Type +} + +func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { + return e.pbTyp.Descriptor() +} +func (e *legacyEnumWrapper) Type() pref.EnumType { + return e.pbTyp +} +func (e *legacyEnumWrapper) Number() pref.EnumNumber { + return e.num +} +func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { + return e +} +func (e *legacyEnumWrapper) protoUnwrap() interface{} { + v := reflect.New(e.goTyp).Elem() + v.SetInt(int64(e.num)) + return v.Interface() +} + +var ( + _ pref.Enum = (*legacyEnumWrapper)(nil) + _ unwrapper = (*legacyEnumWrapper)(nil) +) + +var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// LegacyLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must be an int32 kind and not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := legacyEnumDescCache.Load(t); ok { + return ed.(pref.EnumDescriptor) + } + + // Slow-path: initialize EnumDescriptor from the raw descriptor. + ev := reflect.Zero(t).Interface() + if _, ok := ev.(pref.Enum); ok { + panic(fmt.Sprintf("%v already implements proto.Enum", t)) + } + edV1, ok := ev.(enumV1) + if !ok { + return aberrantLoadEnumDesc(t) + } + b, idxs := edV1.EnumDescriptor() + + var ed pref.EnumDescriptor + if len(idxs) == 1 { + ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) + } else { + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1 : len(idxs)-1] { + md = md.Messages().Get(i) + } + ed = md.Enums().Get(idxs[len(idxs)-1]) + } + if ed, ok := legacyEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(protoreflect.EnumDescriptor) + } + return ed +} + +var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// aberrantLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must not implement protoreflect.Enum or enumV1. +// +// If the type does not implement enumV1, then there is no reliable +// way to derive the original protobuf type information. +// We are unable to use the global enum registry since it is +// unfortunately keyed by the protobuf full name, which we also do not know. +// Thus, this produces some bogus enum descriptor based on the Go type name. +func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := aberrantEnumDescCache.Load(t); ok { + return ed.(pref.EnumDescriptor) + } + + // Slow-path: construct a bogus, but unique EnumDescriptor. + ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} + ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum + ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) + + // TODO: Use the presence of a UnmarshalJSON method to determine proto2? + + vd := &ed.L2.Values.List[0] + vd.L0.FullName = ed.L0.FullName + "_UNKNOWN" // e.g., github_com.user.repo.MyEnum_UNKNOWN + vd.L0.ParentFile = ed.L0.ParentFile + vd.L0.Parent = ed + + // TODO: We could use the String method to obtain some enum value names by + // starting at 0 and print the enum until it produces invalid identifiers. + // An exhaustive query is clearly impractical, but can be best-effort. + + if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(pref.EnumDescriptor) + } + return ed +} + +// AberrantDeriveFullName derives a fully qualified protobuf name for the given Go type +// The provided name is not guaranteed to be stable nor universally unique. +// It should be sufficiently unique within a program. +// +// This is exported for testing purposes. +func AberrantDeriveFullName(t reflect.Type) pref.FullName { + sanitize := func(r rune) rune { + switch { + case r == '/': + return '.' + case 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z', '0' <= r && r <= '9': + return r + default: + return '_' + } + } + prefix := strings.Map(sanitize, t.PkgPath()) + suffix := strings.Map(sanitize, t.Name()) + if suffix == "" { + suffix = fmt.Sprintf("UnknownX%X", reflect.ValueOf(t).Pointer()) + } + + ss := append(strings.Split(prefix, "."), suffix) + for i, s := range ss { + if s == "" || ('0' <= s[0] && s[0] <= '9') { + ss[i] = "x" + s + } + } + return pref.FullName(strings.Join(ss, ".")) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go new file mode 100644 index 000000000..e3fb0b578 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "encoding/binary" + "encoding/json" + "hash/crc32" + "math" + "reflect" + + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// These functions exist to support exported APIs in generated protobufs. +// While these are deprecated, they cannot be removed for compatibility reasons. + +// LegacyEnumName returns the name of enums used in legacy code. +func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { + return legacyEnumName(ed) +} + +// LegacyMessageTypeOf returns the protoreflect.MessageType for m, +// with name used as the message name if necessary. +func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageType(reflect.TypeOf(m), name) +} + +// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. +// The input can either be a string representing the enum value by name, +// or a number representing the enum number itself. +func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { + if b[0] == '"' { + var name pref.Name + if err := json.Unmarshal(b, &name); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + ev := ed.Values().ByName(name) + if ev == nil { + return 0, errors.New("invalid value for enum %v: %s", ed.FullName(), name) + } + return ev.Number(), nil + } else { + var num pref.EnumNumber + if err := json.Unmarshal(b, &num); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + return num, nil + } +} + +// CompressGZIP compresses the input as a GZIP-encoded file. +// The current implementation does no compression. +func (Export) CompressGZIP(in []byte) (out []byte) { + // RFC 1952, section 2.3.1. + var gzipHeader = [10]byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff} + + // RFC 1951, section 3.2.4. + var blockHeader [5]byte + const maxBlockSize = math.MaxUint16 + numBlocks := 1 + len(in)/maxBlockSize + + // RFC 1952, section 2.3.1. + var gzipFooter [8]byte + binary.LittleEndian.PutUint32(gzipFooter[0:4], crc32.ChecksumIEEE(in)) + binary.LittleEndian.PutUint32(gzipFooter[4:8], uint32(len(in))) + + // Encode the input without compression using raw DEFLATE blocks. + out = make([]byte, 0, len(gzipHeader)+len(blockHeader)*numBlocks+len(in)+len(gzipFooter)) + out = append(out, gzipHeader[:]...) + for blockHeader[0] == 0 { + blockSize := maxBlockSize + if blockSize > len(in) { + blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. + blockSize = len(in) + } + binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) + binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) + out = append(out, blockHeader[:]...) + out = append(out, in[:blockSize]...) + in = in[blockSize:] + } + out = append(out, gzipFooter[:]...) + return out +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go new file mode 100644 index 000000000..49e723161 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -0,0 +1,176 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/messageset" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (xi *ExtensionInfo) initToLegacy() { + xd := xi.desc + var parent piface.MessageV1 + messageName := xd.ContainingMessage().FullName() + if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { + // Create a new parent message and unwrap it if possible. + mv := mt.New().Interface() + t := reflect.TypeOf(mv) + if mv, ok := mv.(unwrapper); ok { + t = reflect.TypeOf(mv.protoUnwrap()) + } + + // Check whether the message implements the legacy v1 Message interface. + mz := reflect.Zero(t).Interface() + if mz, ok := mz.(piface.MessageV1); ok { + parent = mz + } + } + + // Determine the v1 extension type, which is unfortunately not the same as + // the v2 ExtensionType.GoType. + extType := xi.goType + switch extType.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + extType = reflect.PtrTo(extType) // T -> *T for singular scalar fields + } + + // Reconstruct the legacy enum full name. + var enumName string + if xd.Kind() == pref.EnumKind { + enumName = legacyEnumName(xd.Enum()) + } + + // Derive the proto file that the extension was declared within. + var filename string + if fd := xd.ParentFile(); fd != nil { + filename = fd.Path() + } + + // For MessageSet extensions, the name used is the parent message. + name := xd.FullName() + if messageset.IsMessageSetExtension(xd) { + name = name.Parent() + } + + xi.ExtendedType = parent + xi.ExtensionType = reflect.Zero(extType).Interface() + xi.Field = int32(xd.Number()) + xi.Name = string(name) + xi.Tag = ptag.Marshal(xd, enumName) + xi.Filename = filename +} + +// initFromLegacy initializes an ExtensionInfo from +// the contents of the deprecated exported fields of the type. +func (xi *ExtensionInfo) initFromLegacy() { + // The v1 API returns "type incomplete" descriptors where only the + // field number is specified. In such a case, use a placeholder. + if xi.ExtendedType == nil || xi.ExtensionType == nil { + xd := placeholderExtension{ + name: pref.FullName(xi.Name), + number: pref.FieldNumber(xi.Field), + } + xi.desc = extensionTypeDescriptor{xd, xi} + return + } + + // Resolve enum or message dependencies. + var ed pref.EnumDescriptor + var md pref.MessageDescriptor + t := reflect.TypeOf(xi.ExtensionType) + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + switch v := reflect.Zero(t).Interface().(type) { + case pref.Enum: + ed = v.Descriptor() + case enumV1: + ed = LegacyLoadEnumDesc(t) + case pref.ProtoMessage: + md = v.ProtoReflect().Descriptor() + case messageV1: + md = LegacyLoadMessageDesc(t) + } + + // Derive basic field information from the struct tag. + var evs pref.EnumValueDescriptors + if ed != nil { + evs = ed.Values() + } + fd := ptag.Unmarshal(xi.Tag, t, evs).(*filedesc.Field) + + // Construct a v2 ExtensionType. + xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} + xd.L0.ParentFile = filedesc.SurrogateProto2 + xd.L0.FullName = pref.FullName(xi.Name) + xd.L1.Number = pref.FieldNumber(xi.Field) + xd.L1.Cardinality = fd.L1.Cardinality + xd.L1.Kind = fd.L1.Kind + xd.L2.IsPacked = fd.L1.IsPacked + xd.L2.Default = fd.L1.Default + xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) + xd.L2.Enum = ed + xd.L2.Message = md + + // Derive real extension field name for MessageSets. + if messageset.IsMessageSet(xd.L1.Extendee) && md.FullName() == xd.L0.FullName { + xd.L0.FullName = xd.L0.FullName.Append(messageset.ExtensionName) + } + + tt := reflect.TypeOf(xi.ExtensionType) + if isOptional { + tt = tt.Elem() + } + xi.goType = tt + xi.desc = extensionTypeDescriptor{xd, xi} +} + +type placeholderExtension struct { + name pref.FullName + number pref.FieldNumber +} + +func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } +func (x placeholderExtension) Parent() pref.Descriptor { return nil } +func (x placeholderExtension) Index() int { return 0 } +func (x placeholderExtension) Syntax() pref.Syntax { return 0 } +func (x placeholderExtension) Name() pref.Name { return x.name.Name() } +func (x placeholderExtension) FullName() pref.FullName { return x.name } +func (x placeholderExtension) IsPlaceholder() bool { return true } +func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } +func (x placeholderExtension) Number() pref.FieldNumber { return x.number } +func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } +func (x placeholderExtension) Kind() pref.Kind { return 0 } +func (x placeholderExtension) HasJSONName() bool { return false } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } +func (x placeholderExtension) IsExtension() bool { return true } +func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsPacked() bool { return false } +func (x placeholderExtension) IsList() bool { return false } +func (x placeholderExtension) IsMap() bool { return false } +func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } +func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } +func (x placeholderExtension) HasDefault() bool { return false } +func (x placeholderExtension) Default() pref.Value { return pref.Value{} } +func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } +func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } +func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } +func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } +func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } +func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } +func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go new file mode 100644 index 000000000..9ab091086 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -0,0 +1,81 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Every enum and message type generated by protoc-gen-go since commit 2fc053c5 +// on February 25th, 2016 has had a method to get the raw descriptor. +// Types that were not generated by protoc-gen-go or were generated prior +// to that version are not supported. +// +// The []byte returned is the encoded form of a FileDescriptorProto message +// compressed using GZIP. The []int is the path from the top-level file +// to the specific message or enum declaration. +type ( + enumV1 interface { + EnumDescriptor() ([]byte, []int) + } + messageV1 interface { + Descriptor() ([]byte, []int) + } +) + +var legacyFileDescCache sync.Map // map[*byte]protoreflect.FileDescriptor + +// legacyLoadFileDesc unmarshals b as a compressed FileDescriptorProto message. +// +// This assumes that b is immutable and that b does not refer to part of a +// concatenated series of GZIP files (which would require shenanigans that +// rely on the concatenation properties of both protobufs and GZIP). +// File descriptors generated by protoc-gen-go do not rely on that property. +func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { + // Fast-path: check whether we already have a cached file descriptor. + if fd, ok := legacyFileDescCache.Load(&b[0]); ok { + return fd.(protoreflect.FileDescriptor) + } + + // Slow-path: decompress and unmarshal the file descriptor proto. + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + panic(err) + } + b2, err := ioutil.ReadAll(zr) + if err != nil { + panic(err) + } + + fd := filedesc.Builder{ + RawDescriptor: b2, + FileRegistry: resolverOnly{protoregistry.GlobalFiles}, // do not register back to global registry + }.Build().File + if fd, ok := legacyFileDescCache.LoadOrStore(&b[0], fd); ok { + return fd.(protoreflect.FileDescriptor) + } + return fd +} + +type resolverOnly struct { + reg *protoregistry.Files +} + +func (r resolverOnly) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + return r.reg.FindFileByPath(path) +} +func (r resolverOnly) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + return r.reg.FindDescriptorByName(name) +} +func (resolverOnly) RegisterFile(protoreflect.FileDescriptor) error { + return nil +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go new file mode 100644 index 000000000..029feeefd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -0,0 +1,565 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/descopts" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// legacyWrapMessage wraps v as a protoreflect.Message, +// where v must be a *struct kind and not implement the v2 API already. +func legacyWrapMessage(v reflect.Value) pref.Message { + t := v.Type() + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessage{v: v} + } + mt := legacyLoadMessageInfo(t, "") + return mt.MessageOf(v.Interface()) +} + +// legacyLoadMessageType dynamically loads a protoreflect.Type for t, +// where t must be not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessageType{t} + } + return legacyLoadMessageInfo(t, name) +} + +var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo + +// legacyLoadMessageInfo dynamically loads a *MessageInfo for t, +// where t must be a *struct kind and not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { + // Fast-path: check if a MessageInfo is cached for this concrete type. + if mt, ok := legacyMessageTypeCache.Load(t); ok { + return mt.(*MessageInfo) + } + + // Slow-path: derive message descriptor and initialize MessageInfo. + mi := &MessageInfo{ + Desc: legacyLoadMessageDesc(t, name), + GoReflectType: t, + } + + var hasMarshal, hasUnmarshal bool + v := reflect.Zero(t).Interface() + if _, hasMarshal = v.(legacyMarshaler); hasMarshal { + mi.methods.Marshal = legacyMarshal + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + mi.methods.Flags |= piface.SupportMarshalDeterministic + } + if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { + mi.methods.Unmarshal = legacyUnmarshal + } + if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) { + mi.methods.Merge = legacyMerge + } + + if mi, ok := legacyMessageTypeCache.LoadOrStore(t, mi); ok { + return mi.(*MessageInfo) + } + return mi +} + +var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor + +// LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which should be a *struct kind and must not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { + return legacyLoadMessageDesc(t, "") +} +func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + // Fast-path: check if a MessageDescriptor is cached for this concrete type. + if mi, ok := legacyMessageDescCache.Load(t); ok { + return mi.(pref.MessageDescriptor) + } + + // Slow-path: initialize MessageDescriptor from the raw descriptor. + mv := reflect.Zero(t).Interface() + if _, ok := mv.(pref.ProtoMessage); ok { + panic(fmt.Sprintf("%v already implements proto.Message", t)) + } + mdV1, ok := mv.(messageV1) + if !ok { + return aberrantLoadMessageDesc(t, name) + } + + // If this is a dynamic message type where there isn't a 1-1 mapping between + // Go and protobuf types, calling the Descriptor method on the zero value of + // the message type isn't likely to work. If it panics, swallow the panic and + // continue as if the Descriptor method wasn't present. + b, idxs := func() ([]byte, []int) { + defer func() { + recover() + }() + return mdV1.Descriptor() + }() + if b == nil { + return aberrantLoadMessageDesc(t, name) + } + + // If the Go type has no fields, then this might be a proto3 empty message + // from before the size cache was added. If there are any fields, check to + // see that at least one of them looks like something we generated. + if t.Elem().Kind() == reflect.Struct { + if nfield := t.Elem().NumField(); nfield > 0 { + hasProtoField := false + for i := 0; i < nfield; i++ { + f := t.Elem().Field(i) + if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { + hasProtoField = true + break + } + } + if !hasProtoField { + return aberrantLoadMessageDesc(t, name) + } + } + } + + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1:] { + md = md.Messages().Get(i) + } + if name != "" && md.FullName() != name { + panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name)) + } + if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok { + return md.(protoreflect.MessageDescriptor) + } + return md +} + +var ( + aberrantMessageDescLock sync.Mutex + aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor +) + +// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which must not implement protoreflect.ProtoMessage or messageV1. +// +// This is a best-effort derivation of the message descriptor using the protobuf +// tags on the struct fields. +func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + aberrantMessageDescLock.Lock() + defer aberrantMessageDescLock.Unlock() + if aberrantMessageDescCache == nil { + aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor) + } + return aberrantLoadMessageDescReentrant(t, name) +} +func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + // Fast-path: check if an MessageDescriptor is cached for this concrete type. + if md, ok := aberrantMessageDescCache[t]; ok { + return md + } + + // Slow-path: construct a descriptor from the Go struct type (best-effort). + // Cache the MessageDescriptor early on so that we can resolve internal + // cyclic references. + md := &filedesc.Message{L2: new(filedesc.MessageL2)} + md.L0.FullName = aberrantDeriveMessageName(t, name) + md.L0.ParentFile = filedesc.SurrogateProto2 + aberrantMessageDescCache[t] = md + + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return md + } + + // Try to determine if the message is using proto3 by checking scalars. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + switch f.Type.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + md.L0.ParentFile = filedesc.SurrogateProto3 + } + for _, s := range strings.Split(tag, ",") { + if s == "proto3" { + md.L0.ParentFile = filedesc.SurrogateProto3 + } + } + } + } + + // Obtain a list of oneof wrapper types. + var oneofWrappers []reflect.Type + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := t.MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) + } + } + } + } + } + + // Obtain a list of the extension ranges. + if fn, ok := t.MethodByName("ExtensionRangeArray"); ok { + vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] + for i := 0; i < vs.Len(); i++ { + v := vs.Index(i) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ + pref.FieldNumber(v.FieldByName("Start").Int()), + pref.FieldNumber(v.FieldByName("End").Int() + 1), + }) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) + } + } + + // Derive the message fields by inspecting the struct fields. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + tagKey := f.Tag.Get("protobuf_key") + tagVal := f.Tag.Get("protobuf_val") + aberrantAppendField(md, f.Type, tag, tagKey, tagVal) + } + if tag := f.Tag.Get("protobuf_oneof"); tag != "" { + n := len(md.L2.Oneofs.List) + md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) + od := &md.L2.Oneofs.List[n] + od.L0.FullName = md.FullName().Append(pref.Name(tag)) + od.L0.ParentFile = md.L0.ParentFile + od.L0.Parent = md + od.L0.Index = n + + for _, t := range oneofWrappers { + if t.Implements(f.Type) { + f := t.Elem().Field(0) + if tag := f.Tag.Get("protobuf"); tag != "" { + aberrantAppendField(md, f.Type, tag, "", "") + fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] + fd.L1.ContainingOneof = od + od.L1.Fields.List = append(od.L1.Fields.List, fd) + } + } + } + } + } + + return md +} + +func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { + if name.IsValid() { + return name + } + func() { + defer func() { recover() }() // swallow possible nil panics + if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { + name = pref.FullName(m.XXX_MessageName()) + } + }() + if name.IsValid() { + return name + } + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return AberrantDeriveFullName(t) +} + +func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) { + t := goType + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field) + + // Append field descriptor to the message. + n := len(md.L2.Fields.List) + md.L2.Fields.List = append(md.L2.Fields.List, *fd) + fd = &md.L2.Fields.List[n] + fd.L0.FullName = md.FullName().Append(fd.Name()) + fd.L0.ParentFile = md.L0.ParentFile + fd.L0.Parent = md + fd.L0.Index = n + + if fd.L1.IsWeak || fd.L1.HasPacked { + fd.L1.Options = func() pref.ProtoMessage { + opts := descopts.Field.ProtoReflect().New() + if fd.L1.IsWeak { + opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) + } + if fd.L1.HasPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + } + return opts.Interface() + } + } + + // Populate Enum and Message. + if fd.Enum() == nil && fd.Kind() == pref.EnumKind { + switch v := reflect.Zero(t).Interface().(type) { + case pref.Enum: + fd.L1.Enum = v.Descriptor() + default: + fd.L1.Enum = LegacyLoadEnumDesc(t) + } + } + if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { + switch v := reflect.Zero(t).Interface().(type) { + case pref.ProtoMessage: + fd.L1.Message = v.ProtoReflect().Descriptor() + case messageV1: + fd.L1.Message = LegacyLoadMessageDesc(t) + default: + if t.Kind() == reflect.Map { + n := len(md.L1.Messages.List) + md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) + md2 := &md.L1.Messages.List[n] + md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.ParentFile = md.L0.ParentFile + md2.L0.Parent = md + md2.L0.Index = n + + md2.L1.IsMapEntry = true + md2.L2.Options = func() pref.ProtoMessage { + opts := descopts.Message.ProtoReflect().New() + opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) + return opts.Interface() + } + + aberrantAppendField(md2, t.Key(), tagKey, "", "") + aberrantAppendField(md2, t.Elem(), tagVal, "", "") + + fd.L1.Message = md2 + break + } + fd.L1.Message = aberrantLoadMessageDescReentrant(t, "") + } + } +} + +type placeholderEnumValues struct { + protoreflect.EnumValueDescriptors +} + +func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { + return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) +} + +// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. +type legacyMarshaler interface { + Marshal() ([]byte, error) +} + +// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder. +type legacyUnmarshaler interface { + Unmarshal([]byte) error +} + +// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder. +type legacyMerger interface { + Merge(protoiface.MessageV1) +} + +var aberrantProtoMethods = &piface.Methods{ + Marshal: legacyMarshal, + Unmarshal: legacyUnmarshal, + Merge: legacyMerge, + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + Flags: piface.SupportMarshalDeterministic, +} + +func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + marshaler, ok := v.(legacyMarshaler) + if !ok { + return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + } + out, err := marshaler.Marshal() + if in.Buf != nil { + out = append(in.Buf, out...) + } + return piface.MarshalOutput{ + Buf: out, + }, err +} + +func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + unmarshaler, ok := v.(legacyUnmarshaler) + if !ok { + return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) + } + return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) +} + +func legacyMerge(in piface.MergeInput) piface.MergeOutput { + // Check whether this supports the legacy merger. + dstv := in.Destination.(unwrapper).protoUnwrap() + merger, ok := dstv.(legacyMerger) + if ok { + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + return piface.MergeOutput{Flags: piface.MergeComplete} + } + + // If legacy merger is unavailable, implement merge in terms of + // a marshal and unmarshal operation. + srcv := in.Source.(unwrapper).protoUnwrap() + marshaler, ok := srcv.(legacyMarshaler) + if !ok { + return piface.MergeOutput{} + } + dstv = in.Destination.(unwrapper).protoUnwrap() + unmarshaler, ok := dstv.(legacyUnmarshaler) + if !ok { + return piface.MergeOutput{} + } + if !in.Source.IsValid() { + // Legacy Marshal methods may not function on nil messages. + // Check for a typed nil source only after we confirm that + // legacy Marshal/Unmarshal methods are present, for + // consistency. + return piface.MergeOutput{Flags: piface.MergeComplete} + } + b, err := marshaler.Marshal() + if err != nil { + return piface.MergeOutput{} + } + err = unmarshaler.Unmarshal(b) + if err != nil { + return piface.MergeOutput{} + } + return piface.MergeOutput{Flags: piface.MergeComplete} +} + +// aberrantMessageType implements MessageType for all types other than pointer-to-struct. +type aberrantMessageType struct { + t reflect.Type +} + +func (mt aberrantMessageType) New() pref.Message { + if mt.t.Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(mt.t.Elem())} + } + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) Zero() pref.Message { + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) GoType() reflect.Type { + return mt.t +} +func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { + return LegacyLoadMessageDesc(mt.t) +} + +// aberrantMessage implements Message for all types other than pointer-to-struct. +// +// When the underlying type implements legacyMarshaler or legacyUnmarshaler, +// the aberrant Message can be marshaled or unmarshaled. Otherwise, there is +// not much that can be done with values of this type. +type aberrantMessage struct { + v reflect.Value +} + +// Reset implements the v1 proto.Message.Reset method. +func (m aberrantMessage) Reset() { + if mr, ok := m.v.Interface().(interface{ Reset() }); ok { + mr.Reset() + return + } + if m.v.Kind() == reflect.Ptr && !m.v.IsNil() { + m.v.Elem().Set(reflect.Zero(m.v.Type().Elem())) + } +} + +func (m aberrantMessage) ProtoReflect() pref.Message { + return m +} + +func (m aberrantMessage) Descriptor() pref.MessageDescriptor { + return LegacyLoadMessageDesc(m.v.Type()) +} +func (m aberrantMessage) Type() pref.MessageType { + return aberrantMessageType{m.v.Type()} +} +func (m aberrantMessage) New() pref.Message { + if m.v.Type().Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(m.v.Type().Elem())} + } + return aberrantMessage{reflect.Zero(m.v.Type())} +} +func (m aberrantMessage) Interface() pref.ProtoMessage { + return m +} +func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + return +} +func (m aberrantMessage) Has(pref.FieldDescriptor) bool { + return false +} +func (m aberrantMessage) Clear(pref.FieldDescriptor) { + panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { + if fd.Default().IsValid() { + return fd.Default() + } + panic("invalid Message.Get on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { + panic("invalid Message.Set on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { + panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { + panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { + panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) GetUnknown() pref.RawFields { + return nil +} +func (m aberrantMessage) SetUnknown(pref.RawFields) { + // SetUnknown discards its input on messages which don't support unknown field storage. +} +func (m aberrantMessage) IsValid() bool { + if m.v.Kind() == reflect.Ptr { + return !m.v.IsNil() + } + return false +} +func (m aberrantMessage) ProtoMethods() *piface.Methods { + return aberrantProtoMethods +} +func (m aberrantMessage) protoUnwrap() interface{} { + return m.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go new file mode 100644 index 000000000..c65bbc044 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -0,0 +1,176 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type mergeOptions struct{} + +func (o mergeOptions) Merge(dst, src proto.Message) { + proto.Merge(dst, src) +} + +// merge is protoreflect.Methods.Merge. +func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { + dp, ok := mi.getPointer(in.Destination) + if !ok { + return piface.MergeOutput{} + } + sp, ok := mi.getPointer(in.Source) + if !ok { + return piface.MergeOutput{} + } + mi.mergePointer(dp, sp, mergeOptions{}) + return piface.MergeOutput{Flags: piface.MergeComplete} +} + +func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { + mi.init() + if dst.IsNil() { + panic(fmt.Sprintf("invalid value: merging into nil message")) + } + if src.IsNil() { + return + } + for _, f := range mi.orderedCoderFields { + if f.funcs.merge == nil { + continue + } + sfptr := src.Apply(f.offset) + if f.isPointer && sfptr.Elem().IsNil() { + continue + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + } + if mi.extensionOffset.IsValid() { + sext := src.Apply(mi.extensionOffset).Extensions() + dext := dst.Apply(mi.extensionOffset).Extensions() + if *dext == nil { + *dext = make(map[int32]ExtensionField) + } + for num, sx := range *sext { + xt := sx.Type() + xi := getExtensionFieldInfo(xt) + if xi.funcs.merge == nil { + continue + } + dx := (*dext)[num] + var dv pref.Value + if dx.Type() == sx.Type() { + dv = dx.Value() + } + if !dv.IsValid() && xi.unmarshalNeedsValue { + dv = xt.New() + } + dv = xi.funcs.merge(dv, sx.Value(), opts) + dx.Set(sx.Type(), dv) + (*dext)[num] = dx + } + } + if mi.unknownOffset.IsValid() { + su := mi.getUnknownBytes(src) + if su != nil && len(*su) > 0 { + du := mi.mutableUnknownBytes(dst) + *du = append(*du, *su...) + } + } +} + +func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { + return src +} + +func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { + return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) +} + +func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + dstl.Append(srcl.Get(i)) + } + return dst +} + +func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sb := srcl.Get(i).Bytes() + db := append(emptyBuf[:], sb...) + dstl.Append(pref.ValueOfBytes(db)) + } + return dst +} + +func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sm := srcl.Get(i).Message() + dm := proto.Clone(sm.Interface()).ProtoReflect() + dstl.Append(pref.ValueOfMessage(dm)) + } + return dst +} + +func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { + opts.Merge(dst.Message().Interface(), src.Message().Interface()) + return dst +} + +func mergeMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + if f.mi != nil { + if dst.Elem().IsNil() { + dst.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dst.Elem(), src.Elem(), opts) + } else { + dm := dst.AsValueOf(f.ft).Elem() + sm := src.AsValueOf(f.ft).Elem() + if dm.IsNil() { + dm.Set(reflect.New(f.ft.Elem())) + } + opts.Merge(asMessage(dm), asMessage(sm)) + } +} + +func mergeMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + for _, sp := range src.PointerSlice() { + dm := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(dm), sp, opts) + } else { + opts.Merge(asMessage(dm), asMessage(sp.AsValueOf(f.ft.Elem().Elem()))) + } + dst.AppendPointerSlice(pointerOfValue(dm)) + } +} + +func mergeBytes(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bytes() = append(emptyBuf[:], *src.Bytes()...) +} + +func mergeBytesNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bytes() + if len(v) > 0 { + *dst.Bytes() = append(emptyBuf[:], v...) + } +} + +func mergeBytesSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BytesSlice() + for _, v := range *src.BytesSlice() { + *ds = append(*ds, append(emptyBuf[:], v...)) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go new file mode 100644 index 000000000..8816c274d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go @@ -0,0 +1,209 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import () + +func mergeBool(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bool() = *src.Bool() +} + +func mergeBoolNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bool() + if v != false { + *dst.Bool() = v + } +} + +func mergeBoolPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.BoolPtr() + if p != nil { + v := *p + *dst.BoolPtr() = &v + } +} + +func mergeBoolSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BoolSlice() + ss := src.BoolSlice() + *ds = append(*ds, *ss...) +} + +func mergeInt32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int32() = *src.Int32() +} + +func mergeInt32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int32() + if v != 0 { + *dst.Int32() = v + } +} + +func mergeInt32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int32Ptr() + if p != nil { + v := *p + *dst.Int32Ptr() = &v + } +} + +func mergeInt32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int32Slice() + ss := src.Int32Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint32() = *src.Uint32() +} + +func mergeUint32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint32() + if v != 0 { + *dst.Uint32() = v + } +} + +func mergeUint32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint32Ptr() + if p != nil { + v := *p + *dst.Uint32Ptr() = &v + } +} + +func mergeUint32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint32Slice() + ss := src.Uint32Slice() + *ds = append(*ds, *ss...) +} + +func mergeInt64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int64() = *src.Int64() +} + +func mergeInt64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int64() + if v != 0 { + *dst.Int64() = v + } +} + +func mergeInt64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int64Ptr() + if p != nil { + v := *p + *dst.Int64Ptr() = &v + } +} + +func mergeInt64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int64Slice() + ss := src.Int64Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint64() = *src.Uint64() +} + +func mergeUint64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint64() + if v != 0 { + *dst.Uint64() = v + } +} + +func mergeUint64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint64Ptr() + if p != nil { + v := *p + *dst.Uint64Ptr() = &v + } +} + +func mergeUint64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint64Slice() + ss := src.Uint64Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float32() = *src.Float32() +} + +func mergeFloat32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float32() + if v != 0 { + *dst.Float32() = v + } +} + +func mergeFloat32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float32Ptr() + if p != nil { + v := *p + *dst.Float32Ptr() = &v + } +} + +func mergeFloat32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float32Slice() + ss := src.Float32Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float64() = *src.Float64() +} + +func mergeFloat64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float64() + if v != 0 { + *dst.Float64() = v + } +} + +func mergeFloat64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float64Ptr() + if p != nil { + v := *p + *dst.Float64Ptr() = &v + } +} + +func mergeFloat64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float64Slice() + ss := src.Float64Slice() + *ds = append(*ds, *ss...) +} + +func mergeString(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.String() = *src.String() +} + +func mergeStringNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.String() + if v != "" { + *dst.String() = v + } +} + +func mergeStringPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.StringPtr() + if p != nil { + v := *p + *dst.StringPtr() = &v + } +} + +func mergeStringSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.StringSlice() + ss := src.StringSlice() + *ds = append(*ds, *ss...) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go new file mode 100644 index 000000000..a104e28e8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// MessageInfo provides protobuf related functionality for a given Go type +// that represents a message. A given instance of MessageInfo is tied to +// exactly one Go type, which must be a pointer to a struct type. +// +// The exported fields must be populated before any methods are called +// and cannot be mutated after set. +type MessageInfo struct { + // GoReflectType is the underlying message Go type and must be populated. + GoReflectType reflect.Type // pointer to struct + + // Desc is the underlying message descriptor type and must be populated. + Desc pref.MessageDescriptor + + // Exporter must be provided in a purego environment in order to provide + // access to unexported fields. + Exporter exporter + + // OneofWrappers is list of pointers to oneof wrapper struct types. + OneofWrappers []interface{} + + initMu sync.Mutex // protects all unexported fields + initDone uint32 + + reflectMessageInfo // for reflection implementation + coderMessageInfo // for fast-path method implementations +} + +// exporter is a function that returns a reference to the ith field of v, +// where v is a pointer to a struct. It returns nil if it does not support +// exporting the requested field (e.g., already exported). +type exporter func(v interface{}, i int) interface{} + +// getMessageInfo returns the MessageInfo for any message type that +// is generated by our implementation of protoc-gen-go (for v2 and on). +// If it is unable to obtain a MessageInfo, it returns nil. +func getMessageInfo(mt reflect.Type) *MessageInfo { + m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) + if !ok { + return nil + } + mr, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *MessageInfo }) + if !ok { + return nil + } + return mr.ProtoMessageInfo() +} + +func (mi *MessageInfo) init() { + // This function is called in the hot path. Inline the sync.Once logic, + // since allocating a closure for Once.Do is expensive. + // Keep init small to ensure that it can be inlined. + if atomic.LoadUint32(&mi.initDone) == 0 { + mi.initOnce() + } +} + +func (mi *MessageInfo) initOnce() { + mi.initMu.Lock() + defer mi.initMu.Unlock() + if mi.initDone == 1 { + return + } + + t := mi.GoReflectType + if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { + panic(fmt.Sprintf("got %v, want *struct kind", t)) + } + t = t.Elem() + + si := mi.makeStructInfo(t) + mi.makeReflectFuncs(t, si) + mi.makeCoderMethods(t, si) + + atomic.StoreUint32(&mi.initDone, 1) +} + +// getPointer returns the pointer for a message, which should be of +// the type of the MessageInfo. If the message is of a different type, +// it returns ok==false. +func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { + switch m := m.(type) { + case *messageState: + return m.pointer(), m.messageInfo() == mi + case *messageReflectWrapper: + return m.pointer(), m.messageInfo() == mi + } + return pointer{}, false +} + +type ( + SizeCache = int32 + WeakFields = map[int32]protoreflect.ProtoMessage + UnknownFields = unknownFieldsA // TODO: switch to unknownFieldsB + unknownFieldsA = []byte + unknownFieldsB = *[]byte + ExtensionFields = map[int32]ExtensionField +) + +var ( + sizecacheType = reflect.TypeOf(SizeCache(0)) + weakFieldsType = reflect.TypeOf(WeakFields(nil)) + unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) + unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) + extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) +) + +type structInfo struct { + sizecacheOffset offset + sizecacheType reflect.Type + weakOffset offset + weakType reflect.Type + unknownOffset offset + unknownType reflect.Type + extensionOffset offset + extensionType reflect.Type + + fieldsByNumber map[pref.FieldNumber]reflect.StructField + oneofsByName map[pref.Name]reflect.StructField + oneofWrappersByType map[reflect.Type]pref.FieldNumber + oneofWrappersByNumber map[pref.FieldNumber]reflect.Type +} + +func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { + si := structInfo{ + sizecacheOffset: invalidOffset, + weakOffset: invalidOffset, + unknownOffset: invalidOffset, + extensionOffset: invalidOffset, + + fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, + oneofsByName: map[pref.Name]reflect.StructField{}, + oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, + oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, + } + +fieldLoop: + for i := 0; i < t.NumField(); i++ { + switch f := t.Field(i); f.Name { + case genid.SizeCache_goname, genid.SizeCacheA_goname: + if f.Type == sizecacheType { + si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheType = f.Type + } + case genid.WeakFields_goname, genid.WeakFieldsA_goname: + if f.Type == weakFieldsType { + si.weakOffset = offsetOf(f, mi.Exporter) + si.weakType = f.Type + } + case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: + if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { + si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownType = f.Type + } + case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: + if f.Type == extensionFieldsType { + si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionType = f.Type + } + default: + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.fieldsByNumber[pref.FieldNumber(n)] = f + continue fieldLoop + } + } + if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { + si.oneofsByName[pref.Name(s)] = f + continue fieldLoop + } + } + } + + // Derive a mapping of oneof wrappers to fields. + oneofWrappers := mi.OneofWrappers + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs + } + } + } + } + for _, v := range oneofWrappers { + tf := reflect.TypeOf(v).Elem() + f := tf.Field(0) + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.oneofWrappersByType[tf] = pref.FieldNumber(n) + si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf + break + } + } + } + + return si +} + +func (mi *MessageInfo) New() protoreflect.Message { + return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) +} +func (mi *MessageInfo) Zero() protoreflect.Message { + return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) +} +func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { + return mi.Desc +} +func (mi *MessageInfo) Enum(i int) protoreflect.EnumType { + mi.init() + fd := mi.Desc.Fields().Get(i) + return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()]) +} +func (mi *MessageInfo) Message(i int) protoreflect.MessageType { + mi.init() + fd := mi.Desc.Fields().Get(i) + switch { + case fd.IsWeak(): + mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + return mt + case fd.IsMap(): + return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} + default: + return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()]) + } +} + +type mapEntryType struct { + desc protoreflect.MessageDescriptor + valType interface{} // zero value of enum or message type +} + +func (mt mapEntryType) New() protoreflect.Message { + return nil +} +func (mt mapEntryType) Zero() protoreflect.Message { + return nil +} +func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor { + return mt.desc +} +func (mt mapEntryType) Enum(i int) protoreflect.EnumType { + fd := mt.desc.Fields().Get(i) + if fd.Enum() == nil { + return nil + } + return Export{}.EnumTypeOf(mt.valType) +} +func (mt mapEntryType) Message(i int) protoreflect.MessageType { + fd := mt.desc.Fields().Get(i) + if fd.Message() == nil { + return nil + } + return Export{}.MessageTypeOf(mt.valType) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go new file mode 100644 index 000000000..9488b7261 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -0,0 +1,465 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type reflectMessageInfo struct { + fields map[pref.FieldNumber]*fieldInfo + oneofs map[pref.Name]*oneofInfo + + // fieldTypes contains the zero value of an enum or message field. + // For lists, it contains the element type. + // For maps, it contains the entry value type. + fieldTypes map[pref.FieldNumber]interface{} + + // denseFields is a subset of fields where: + // 0 < fieldDesc.Number() < len(denseFields) + // It provides faster access to the fieldInfo, but may be incomplete. + denseFields []*fieldInfo + + // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. + rangeInfos []interface{} // either *fieldInfo or *oneofInfo + + getUnknown func(pointer) pref.RawFields + setUnknown func(pointer, pref.RawFields) + extensionMap func(pointer) *extensionMap + + nilMessage atomicNilMessage +} + +// makeReflectFuncs generates the set of functions to support reflection. +func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { + mi.makeKnownFieldsFunc(si) + mi.makeUnknownFieldsFunc(t, si) + mi.makeExtensionFieldsFunc(t, si) + mi.makeFieldTypes(si) +} + +// makeKnownFieldsFunc generates functions for operations that can be performed +// on each protobuf message field. It takes in a reflect.Type representing the +// Go struct and matches message fields with struct fields. +// +// This code assumes that the struct is well-formed and panics if there are +// any discrepancies. +func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { + mi.fields = map[pref.FieldNumber]*fieldInfo{} + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + var fi fieldInfo + switch { + case fs.Type == nil: + fi = fieldInfoForMissing(fd) // never occurs for officially generated message types + case isOneof: + fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = fieldInfoForMap(fd, fs, mi.Exporter) + case fd.IsList(): + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsWeak(): + fi = fieldInfoForWeakMessage(fd, si.weakOffset) + case fd.Message() != nil: + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = fieldInfoForScalar(fd, fs, mi.Exporter) + } + mi.fields[fd.Number()] = &fi + } + + mi.oneofs = map[pref.Name]*oneofInfo{} + for i := 0; i < md.Oneofs().Len(); i++ { + od := md.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } + + // Introduce instability to iteration order, but keep it deterministic. + if len(mi.rangeInfos) > 1 && detrand.Bool() { + i := detrand.Intn(len(mi.rangeInfos) - 1) + mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i] + } +} + +func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { + switch { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: + // Handle as []byte. + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + return *p.Apply(mi.unknownOffset).Bytes() + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + *p.Apply(mi.unknownOffset).Bytes() = b + } + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: + // Handle as *[]byte. + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + return nil + } + return **bp + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + **bp = b + } + default: + mi.getUnknown = func(pointer) pref.RawFields { + return nil + } + mi.setUnknown = func(p pointer, _ pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + } + } +} + +func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { + if si.extensionOffset.IsValid() { + mi.extensionMap = func(p pointer) *extensionMap { + if p.IsNil() { + return (*extensionMap)(nil) + } + v := p.Apply(si.extensionOffset).AsValueOf(extensionFieldsType) + return (*extensionMap)(v.Interface().(*map[int32]ExtensionField)) + } + } else { + mi.extensionMap = func(pointer) *extensionMap { + return (*extensionMap)(nil) + } + } +} +func (mi *MessageInfo) makeFieldTypes(si structInfo) { + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + var ft reflect.Type + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + var isMessage bool + switch { + case fs.Type == nil: + continue // never occurs for officially generated message types + case isOneof: + if fd.Enum() != nil || fd.Message() != nil { + ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type + } + case fd.IsMap(): + if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.MapValue().Message() != nil + case fd.IsList(): + if fd.Enum() != nil || fd.Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.Message() != nil + case fd.Enum() != nil: + ft = fs.Type + if fd.HasPresence() && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + case fd.Message() != nil: + ft = fs.Type + if fd.IsWeak() { + ft = nil + } + isMessage = true + } + if isMessage && ft != nil && ft.Kind() != reflect.Ptr { + ft = reflect.PtrTo(ft) // never occurs for officially generated message types + } + if ft != nil { + if mi.fieldTypes == nil { + mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + } + mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() + } + } +} + +type extensionMap map[int32]ExtensionField + +func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + if m != nil { + for _, x := range *m { + xd := x.Type().TypeDescriptor() + v := x.Value() + if xd.IsList() && v.List().Len() == 0 { + continue + } + if !f(xd, v) { + return + } + } + } +} +func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { + if m == nil { + return false + } + xd := xt.TypeDescriptor() + x, ok := (*m)[int32(xd.Number())] + if !ok { + return false + } + switch { + case xd.IsList(): + return x.Value().List().Len() > 0 + case xd.IsMap(): + return x.Value().Map().Len() > 0 + case xd.Message() != nil: + return x.Value().Message().IsValid() + } + return true +} +func (m *extensionMap) Clear(xt pref.ExtensionType) { + delete(*m, int32(xt.TypeDescriptor().Number())) +} +func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { + xd := xt.TypeDescriptor() + if m != nil { + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + } + return xt.Zero() +} +func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { + xd := xt.TypeDescriptor() + isValid := true + switch { + case !xt.IsValidValue(v): + isValid = false + case xd.IsList(): + isValid = v.List().IsValid() + case xd.IsMap(): + isValid = v.Map().IsValid() + case xd.Message() != nil: + isValid = v.Message().IsValid() + } + if !isValid { + panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + } + + if *m == nil { + *m = make(map[int32]ExtensionField) + } + var x ExtensionField + x.Set(xt, v) + (*m)[int32(xd.Number())] = x +} +func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { + xd := xt.TypeDescriptor() + if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { + panic("invalid Mutable on field with non-composite type") + } + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + v := xt.New() + m.Set(xt, v) + return v +} + +// MessageState is a data structure that is nested as the first field in a +// concrete message. It provides a way to implement the ProtoReflect method +// in an allocation-free way without needing to have a shadow Go type generated +// for every message type. This technique only works using unsafe. +// +// +// Example generated code: +// +// type M struct { +// state protoimpl.MessageState +// +// Field1 int32 +// Field2 string +// Field3 *BarMessage +// ... +// } +// +// func (m *M) ProtoReflect() protoreflect.Message { +// mi := &file_fizz_buzz_proto_msgInfos[5] +// if protoimpl.UnsafeEnabled && m != nil { +// ms := protoimpl.X.MessageStateOf(Pointer(m)) +// if ms.LoadMessageInfo() == nil { +// ms.StoreMessageInfo(mi) +// } +// return ms +// } +// return mi.MessageOf(m) +// } +// +// The MessageState type holds a *MessageInfo, which must be atomically set to +// the message info associated with a given message instance. +// By unsafely converting a *M into a *MessageState, the MessageState object +// has access to all the information needed to implement protobuf reflection. +// It has access to the message info as its first field, and a pointer to the +// MessageState is identical to a pointer to the concrete message value. +// +// +// Requirements: +// • The type M must implement protoreflect.ProtoMessage. +// • The address of m must not be nil. +// • The address of m and the address of m.state must be equal, +// even though they are different Go types. +type MessageState struct { + pragma.NoUnkeyedLiterals + pragma.DoNotCompare + pragma.DoNotCopy + + atomicMessageInfo *MessageInfo +} + +type messageState MessageState + +var ( + _ pref.Message = (*messageState)(nil) + _ unwrapper = (*messageState)(nil) +) + +// messageDataType is a tuple of a pointer to the message data and +// a pointer to the message type. It is a generalized way of providing a +// reflective view over a message instance. The disadvantage of this approach +// is the need to allocate this tuple of 16B. +type messageDataType struct { + p pointer + mi *MessageInfo +} + +type ( + messageReflectWrapper messageDataType + messageIfaceWrapper messageDataType +) + +var ( + _ pref.Message = (*messageReflectWrapper)(nil) + _ unwrapper = (*messageReflectWrapper)(nil) + _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) + _ unwrapper = (*messageIfaceWrapper)(nil) +) + +// MessageOf returns a reflective view over a message. The input must be a +// pointer to a named Go struct. If the provided type has a ProtoReflect method, +// it must be implemented by calling this method. +func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { + if reflect.TypeOf(m) != mi.GoReflectType { + panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) + } + p := pointerOfIface(m) + if p.IsNil() { + return mi.nilMessage.Init(mi) + } + return &messageReflectWrapper{p, mi} +} + +func (m *messageReflectWrapper) pointer() pointer { return m.p } +func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } + +// Reset implements the v1 proto.Message.Reset method. +func (m *messageIfaceWrapper) Reset() { + if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok { + mr.Reset() + return + } + rv := reflect.ValueOf(m.protoUnwrap()) + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } +} +func (m *messageIfaceWrapper) ProtoReflect() pref.Message { + return (*messageReflectWrapper)(m) +} +func (m *messageIfaceWrapper) protoUnwrap() interface{} { + return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) +} + +// checkField verifies that the provided field descriptor is valid. +// Exactly one of the returned values is populated. +func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { + var fi *fieldInfo + if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { + fi = mi.denseFields[n] + } else { + fi = mi.fields[n] + } + if fi != nil { + if fi.fieldDesc != fd { + if got, want := fd.FullName(), fi.fieldDesc.FullName(); got != want { + panic(fmt.Sprintf("mismatching field: got %v, want %v", got, want)) + } + panic(fmt.Sprintf("mismatching field: %v", fd.FullName())) + } + return fi, nil + } + + if fd.IsExtension() { + if got, want := fd.ContainingMessage().FullName(), mi.Desc.FullName(); got != want { + // TODO: Should this be exact containing message descriptor match? + panic(fmt.Sprintf("extension %v has mismatching containing message: got %v, want %v", fd.FullName(), got, want)) + } + if !mi.Desc.ExtensionRanges().Has(fd.Number()) { + panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) + } + xtd, ok := fd.(pref.ExtensionTypeDescriptor) + if !ok { + panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) + } + return nil, xtd.Type() + } + panic(fmt.Sprintf("field %v is invalid", fd.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go new file mode 100644 index 000000000..343cf8721 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -0,0 +1,543 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "sync" + + "google.golang.org/protobuf/internal/flags" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +type fieldInfo struct { + fieldDesc pref.FieldDescriptor + + // These fields are used for protobuf reflection support. + has func(pointer) bool + clear func(pointer) + get func(pointer) pref.Value + set func(pointer, pref.Value) + mutable func(pointer) pref.Value + newMessage func() pref.Message + newField func() pref.Value +} + +func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + return false + }, + clear: func(p pointer) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + get: func(p pointer) pref.Value { + return fd.Default() + }, + set: func(p pointer, v pref.Value) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + mutable: func(p pointer) pref.Value { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newMessage: func() pref.Message { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newField: func() pref.Value { + if v := fd.Default(); v.IsValid() { + return v + } + panic("missing Go struct field for " + string(fd.FullName())) + }, + } +} + +func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Interface { + panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) + } + if ot.Kind() != reflect.Struct { + panic(fmt.Sprintf("field %v has invalid type: got %v, want struct kind", fd.FullName(), ot)) + } + if !reflect.PtrTo(ot).Implements(ft) { + panic(fmt.Sprintf("field %v has invalid type: %v does not implement %v", fd.FullName(), ot, ft)) + } + conv := NewConverter(ot.Field(0).Type, fd) + isMessage := fd.Message() != nil + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + // NOTE: The logic below intentionally assumes that oneof fields are + // well-formatted. That is, the oneof interface never contains a + // typed nil pointer to one of the wrapper structs. + + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return false + } + return true + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot { + // NOTE: We intentionally don't check for rv.Elem().IsNil() + // so that (*OneofWrapperType)(nil) gets cleared to nil. + return + } + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return conv.Zero() + } + rv = rv.Elem().Elem().Field(0) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + rv.Set(conv.GoValueOf(v)) + }, + mutable: func(p pointer) pref.Value { + if !isMessage { + panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + if rv.Kind() == reflect.Ptr && rv.IsNil() { + rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) + } + return conv.PBValueOf(rv) + }, + newMessage: func() pref.Message { + return conv.New().Message() + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("map field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv) + }, + mutable: func(p pointer) pref.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("list field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) pref.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type) + return conv.PBValueOf(v) + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +var ( + nilBytes = reflect.ValueOf([]byte(nil)) + emptyBytes = reflect.ValueOf([]byte{}) +) + +func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + if nullable { + if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { + // This never occurs for generated message types. + // Despite the protobuf type system specifying presence, + // the Go field type cannot represent it. + nullable = false + } + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + return !rv.IsNil() + } + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("field %v has invalid type: %v", fd.FullName(), rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + if rv.IsNil() { + return conv.Zero() + } + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable && rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + rv.Set(conv.GoValueOf(v)) + if isBytes && rv.Len() == 0 { + if nullable { + rv.Set(emptyBytes) // preserve presence + } else { + rv.Set(nilBytes) // do not preserve presence + } + } + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { + if !flags.ProtoLegacy { + panic("no support for proto1 weak fields") + } + + var once sync.Once + var messageType pref.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + if messageType == nil { + panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) + } + }) + } + + num := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + _, ok := p.Apply(weakOffset).WeakFields().get(num) + return ok + }, + clear: func(p pointer) { + p.Apply(weakOffset).WeakFields().clear(num) + }, + get: func(p pointer) pref.Value { + lazyInit() + if p.IsNil() { + return pref.ValueOfMessage(messageType.Zero()) + } + m, ok := p.Apply(weakOffset).WeakFields().get(num) + if !ok { + return pref.ValueOfMessage(messageType.Zero()) + } + return pref.ValueOfMessage(m.ProtoReflect()) + }, + set: func(p pointer, v pref.Value) { + lazyInit() + m := v.Message() + if m.Descriptor() != messageType.Descriptor() { + if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) + } + panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) + } + p.Apply(weakOffset).WeakFields().set(num, m.Interface()) + }, + mutable: func(p pointer) pref.Value { + lazyInit() + fs := p.Apply(weakOffset).WeakFields() + m, ok := fs.get(num) + if !ok { + m = messageType.New().Interface() + fs.set(num, m) + } + return pref.ValueOfMessage(m.ProtoReflect()) + }, + newMessage: func() pref.Message { + lazyInit() + return messageType.New() + }, + newField: func() pref.Value { + lazyInit() + return pref.ValueOfMessage(messageType.New()) + }, + } +} + +func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() != reflect.Ptr { + return !isZero(rv) + } + return !rv.IsNil() + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(conv.GoValueOf(v)) + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { + panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) + } + }, + mutable: func(p pointer) pref.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { + rv.Set(conv.GoValueOf(conv.New())) + } + return conv.PBValueOf(rv) + }, + newMessage: func() pref.Message { + return conv.New().Message() + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +type oneofInfo struct { + oneofDesc pref.OneofDescriptor + which func(pointer) pref.FieldNumber +} + +func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fs := si.fieldsByNumber[od.Fields().Get(0).Number()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { // valid on either *T or []byte + return 0 + } + return od.Fields().Get(0).Number() + } + } else { + fs := si.oneofsByName[od.Name()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + return 0 + } + rv = rv.Elem() + if rv.IsNil() { + return 0 + } + return si.oneofWrappersByType[rv.Type().Elem()] + } + } + return oi +} + +// isZero is identical to reflect.Value.IsZero. +// TODO: Remove this when Go1.13 is the minimally supported Go version. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + default: + panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go new file mode 100644 index 000000000..741d6e5b6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -0,0 +1,249 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func (m *messageState) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageState) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageState) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageState) Interface() protoreflect.ProtoMessage { + return m.protoUnwrap().(protoreflect.ProtoMessage) +} +func (m *messageState) protoUnwrap() interface{} { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageState) ProtoMethods() *protoiface.Methods { + m.messageInfo().init() + return &m.messageInfo().methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageState) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := m.messageInfo().fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + m.messageInfo().extensionMap(m.pointer()).Range(f) +} +func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Has(xt) + } +} +func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + m.messageInfo().extensionMap(m.pointer()).Clear(xt) + } +} +func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Get(xt) + } +} +func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + } +} +func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + } +} +func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.newField() + } else { + return xt.New() + } +} +func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageState) GetUnknown() protoreflect.RawFields { + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) +} +func (m *messageState) SetUnknown(b protoreflect.RawFields) { + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) +} +func (m *messageState) IsValid() bool { + return !m.pointer().IsNil() +} + +func (m *messageReflectWrapper) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageReflectWrapper) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageReflectWrapper) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { + if m, ok := m.protoUnwrap().(protoreflect.ProtoMessage); ok { + return m + } + return (*messageIfaceWrapper)(m) +} +func (m *messageReflectWrapper) protoUnwrap() interface{} { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { + m.messageInfo().init() + return &m.messageInfo().methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := m.messageInfo().fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + m.messageInfo().extensionMap(m.pointer()).Range(f) +} +func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Has(xt) + } +} +func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + m.messageInfo().extensionMap(m.pointer()).Clear(xt) + } +} +func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Get(xt) + } +} +func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + } +} +func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + } +} +func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.newField() + } else { + return xt.New() + } +} +func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) +} +func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) +} +func (m *messageReflectWrapper) IsValid() bool { + return !m.pointer().IsNil() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go new file mode 100644 index 000000000..9e3ed821e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -0,0 +1,178 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package impl + +import ( + "fmt" + "reflect" + "sync" +) + +const UnsafeEnabled = false + +// Pointer is an opaque pointer type. +type Pointer interface{} + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the field index into a struct. +type offset struct { + index int + export exporter +} + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + if len(f.Index) != 1 { + panic("embedded structs are not supported") + } + if f.PkgPath == "" { + return offset{index: f.Index[0]} // field is already exported + } + if x == nil { + panic("exporter must be provided for unexported field") + } + return offset{index: f.Index[0], export: x} +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f.index >= 0 } + +// invalidOffset is an invalid field offset. +var invalidOffset = offset{index: -1} + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset{index: 0} + +// pointer is an abstract representation of a pointer to a struct or field. +type pointer struct{ v reflect.Value } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointerOfIface(p) +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{v: v} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v interface{}) pointer { + return pointer{v: reflect.ValueOf(v)} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.v.IsNil() +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The current pointer must be pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if f.export != nil { + if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { + return pointer{v: v} + } + } + return pointer{v: p.v.Elem().Field(f.index).Addr()} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + if got := p.v.Type().Elem(); got != t { + panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) + } + return p.v +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } +func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } +func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } +func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } +func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } +func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } +func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } +func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } +func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } +func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } +func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } +func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } +func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } +func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } +func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } +func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } +func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } +func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } +func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } +func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } +func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } +func (p pointer) String() *string { return p.v.Interface().(*string) } +func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } +func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } +func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } +func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } +func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } +func (p pointer) Extensions() *map[int32]ExtensionField { + return p.v.Interface().(*map[int32]ExtensionField) +} + +func (p pointer) Elem() pointer { + return pointer{v: p.v.Elem()} +} + +// PointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) PointerSlice() []pointer { + // TODO: reconsider this + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + sp := p.v.Elem() + sp.Set(reflect.Append(sp, v.v)) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + p.v.Elem().Set(v.v) +} + +func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } +func (ms *messageState) pointer() pointer { panic("not supported") } +func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } + +type atomicNilMessage struct { + once sync.Once + m messageReflectWrapper +} + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + m.once.Do(func() { + m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) + m.m.mi = mi + }) + return &m.m +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go new file mode 100644 index 000000000..9ecf23a85 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -0,0 +1,174 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package impl + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const UnsafeEnabled = true + +// Pointer is an opaque pointer type. +type Pointer unsafe.Pointer + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the byte offset to the field from the start of the struct. +type offset uintptr + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + return offset(f.Offset) +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f != invalidOffset } + +// invalidOffset is an invalid field offset. +var invalidOffset = ^offset(0) + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset(0) + +// pointer is a pointer to a message struct or field. +type pointer struct{ p unsafe.Pointer } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointer{p: unsafe.Pointer(p)} +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v interface{}) pointer { + type ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } + return pointer{p: (*ifaceHeader)(unsafe.Pointer(&v)).Data} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.p == nil +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The pointer must be valid and pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if p.IsNil() { + panic("invalid nil pointer") + } + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { + // TODO: Use tricky unsafe magic to directly create ifaceHeader. + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return (*bool)(p.p) } +func (p pointer) BoolPtr() **bool { return (**bool)(p.p) } +func (p pointer) BoolSlice() *[]bool { return (*[]bool)(p.p) } +func (p pointer) Int32() *int32 { return (*int32)(p.p) } +func (p pointer) Int32Ptr() **int32 { return (**int32)(p.p) } +func (p pointer) Int32Slice() *[]int32 { return (*[]int32)(p.p) } +func (p pointer) Int64() *int64 { return (*int64)(p.p) } +func (p pointer) Int64Ptr() **int64 { return (**int64)(p.p) } +func (p pointer) Int64Slice() *[]int64 { return (*[]int64)(p.p) } +func (p pointer) Uint32() *uint32 { return (*uint32)(p.p) } +func (p pointer) Uint32Ptr() **uint32 { return (**uint32)(p.p) } +func (p pointer) Uint32Slice() *[]uint32 { return (*[]uint32)(p.p) } +func (p pointer) Uint64() *uint64 { return (*uint64)(p.p) } +func (p pointer) Uint64Ptr() **uint64 { return (**uint64)(p.p) } +func (p pointer) Uint64Slice() *[]uint64 { return (*[]uint64)(p.p) } +func (p pointer) Float32() *float32 { return (*float32)(p.p) } +func (p pointer) Float32Ptr() **float32 { return (**float32)(p.p) } +func (p pointer) Float32Slice() *[]float32 { return (*[]float32)(p.p) } +func (p pointer) Float64() *float64 { return (*float64)(p.p) } +func (p pointer) Float64Ptr() **float64 { return (**float64)(p.p) } +func (p pointer) Float64Slice() *[]float64 { return (*[]float64)(p.p) } +func (p pointer) String() *string { return (*string)(p.p) } +func (p pointer) StringPtr() **string { return (**string)(p.p) } +func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } +func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } +func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } +func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } +func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } + +func (p pointer) Elem() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// PointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) PointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + *(*[]pointer)(p.p) = append(*(*[]pointer)(p.p), v) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) +} + +// Static check that MessageState does not exceed the size of a pointer. +const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) + +func (Export) MessageStateOf(p Pointer) *messageState { + // Super-tricky - see documentation on MessageState. + return (*messageState)(unsafe.Pointer(p)) +} +func (ms *messageState) pointer() pointer { + // Super-tricky - see documentation on MessageState. + return pointer{p: unsafe.Pointer(ms)} +} +func (ms *messageState) messageInfo() *MessageInfo { + mi := ms.LoadMessageInfo() + if mi == nil { + panic("invalid nil message info; this suggests memory corruption due to a race or shallow copy on the message struct") + } + return mi +} +func (ms *messageState) LoadMessageInfo() *MessageInfo { + return (*MessageInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)))) +} +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)), unsafe.Pointer(mi)) +} + +type atomicNilMessage struct{ p unsafe.Pointer } // p is a *messageReflectWrapper + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + if p := atomic.LoadPointer(&m.p); p != nil { + return (*messageReflectWrapper)(p) + } + w := &messageReflectWrapper{mi: mi} + atomic.CompareAndSwapPointer(&m.p, nil, (unsafe.Pointer)(w)) + return (*messageReflectWrapper)(atomic.LoadPointer(&m.p)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go new file mode 100644 index 000000000..08cfb6054 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -0,0 +1,576 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "math/bits" + "reflect" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// ValidationStatus is the result of validating the wire-format encoding of a message. +type ValidationStatus int + +const ( + // ValidationUnknown indicates that unmarshaling the message might succeed or fail. + // The validator was unable to render a judgement. + // + // The only causes of this status are an aberrant message type appearing somewhere + // in the message or a failure in the extension resolver. + ValidationUnknown ValidationStatus = iota + 1 + + // ValidationInvalid indicates that unmarshaling the message will fail. + ValidationInvalid + + // ValidationValid indicates that unmarshaling the message will succeed. + ValidationValid +) + +func (v ValidationStatus) String() string { + switch v { + case ValidationUnknown: + return "ValidationUnknown" + case ValidationInvalid: + return "ValidationInvalid" + case ValidationValid: + return "ValidationValid" + default: + return fmt.Sprintf("ValidationStatus(%d)", int(v)) + } +} + +// Validate determines whether the contents of the buffer are a valid wire encoding +// of the message type. +// +// This function is exposed for testing. +func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { + mi, ok := mt.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + if in.Resolver == nil { + in.Resolver = preg.GlobalTypes + } + o, st := mi.validate(in.Buf, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + }) + if o.initialized { + out.Flags |= piface.UnmarshalInitialized + } + return out, st +} + +type validationInfo struct { + mi *MessageInfo + typ validationType + keyType, valType validationType + + // For non-required fields, requiredBit is 0. + // + // For required fields, requiredBit's nth bit is set, where n is a + // unique index in the range [0, MessageInfo.numRequiredFields). + // + // If there are more than 64 required fields, requiredBit is 0. + requiredBit uint64 +} + +type validationType uint8 + +const ( + validationTypeOther validationType = iota + validationTypeMessage + validationTypeGroup + validationTypeMap + validationTypeRepeatedVarint + validationTypeRepeatedFixed32 + validationTypeRepeatedFixed64 + validationTypeVarint + validationTypeFixed32 + validationTypeFixed64 + validationTypeBytes + validationTypeUTF8String + validationTypeMessageSetItem +) + +func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + } + default: + vi = newValidationInfo(fd, ft) + } + if fd.Cardinality() == pref.Required { + // Avoid overflow. The required field check is done with a 64-bit mask, with + // any message containing more than 64 required fields always reported as + // potentially uninitialized, so it is not important to get a precise count + // of the required fields past 64. + if mi.numRequiredFields < math.MaxUint8 { + mi.numRequiredFields++ + vi.requiredBit = 1 << (mi.numRequiredFields - 1) + } + } + return vi +} + +func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.IsList(): + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeRepeatedVarint + case protowire.Fixed32Type: + vi.typ = validationTypeRepeatedFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeRepeatedFixed64 + } + } + case fd.IsMap(): + vi.typ = validationTypeMap + switch fd.MapKey().Kind() { + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.keyType = validationTypeUTF8String + } + } + switch fd.MapValue().Kind() { + case pref.MessageKind: + vi.valType = validationTypeMessage + if ft.Kind() == reflect.Map { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.valType = validationTypeUTF8String + } + } + default: + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if !fd.IsWeak() { + vi.mi = getMessageInfo(ft) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + vi.mi = getMessageInfo(ft) + case pref.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeVarint + case protowire.Fixed32Type: + vi.typ = validationTypeFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeFixed64 + case protowire.BytesType: + vi.typ = validationTypeBytes + } + } + } + return vi +} + +func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, result ValidationStatus) { + mi.init() + type validationState struct { + typ validationType + keyType, valType validationType + endGroup protowire.Number + mi *MessageInfo + tail []byte + requiredMask uint64 + } + + // Pre-allocate some slots to avoid repeated slice reallocation. + states := make([]validationState, 0, 16) + states = append(states, validationState{ + typ: validationTypeMessage, + mi: mi, + }) + if groupTag > 0 { + states[0].typ = validationTypeGroup + states[0].endGroup = groupTag + } + initialized := true + start := len(b) +State: + for len(states) > 0 { + st := &states[len(states)-1] + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, ValidationInvalid + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if st.endGroup == num { + goto PopState + } + return out, ValidationInvalid + } + var vi validationInfo + switch { + case st.typ == validationTypeMap: + switch num { + case genid.MapEntry_Key_field_number: + vi.typ = st.keyType + case genid.MapEntry_Value_field_number: + vi.typ = st.valType + vi.mi = st.mi + vi.requiredBit = 1 + } + case flags.ProtoLegacy && st.mi.isMessageSet: + switch num { + case messageset.FieldItem: + vi.typ = validationTypeMessageSetItem + } + default: + var f *coderFieldInfo + if int(num) < len(st.mi.denseCoderFields) { + f = st.mi.denseCoderFields[num] + } else { + f = st.mi.coderFields[num] + } + if f != nil { + vi = f.validation + if vi.typ == validationTypeMessage && vi.mi == nil { + // Probable weak field. + // + // TODO: Consider storing the results of this lookup somewhere + // rather than recomputing it on every validation. + fd := st.mi.Desc.Fields().ByNumber(num) + if fd == nil || !fd.IsWeak() { + break + } + messageName := fd.Message().FullName() + messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + switch err { + case nil: + vi.mi, _ = messageType.(*MessageInfo) + case preg.NotFound: + vi.typ = validationTypeBytes + default: + return out, ValidationUnknown + } + } + break + } + // Possible extension field. + // + // TODO: We should return ValidationUnknown when: + // 1. The resolver is not frozen. (More extensions may be added to it.) + // 2. The resolver returns preg.NotFound. + // In this case, a type added to the resolver in the future could cause + // unmarshaling to begin failing. Supporting this requires some way to + // determine if the resolver is frozen. + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) + if err != nil && err != preg.NotFound { + return out, ValidationUnknown + } + if err == nil { + vi = getExtensionFieldInfo(xt).validation + } + } + if vi.requiredBit != 0 { + // Check that the field has a compatible wire type. + // We only need to consider non-repeated field types, + // since repeated fields (and maps) can never be required. + ok := false + switch vi.typ { + case validationTypeVarint: + ok = wtyp == protowire.VarintType + case validationTypeFixed32: + ok = wtyp == protowire.Fixed32Type + case validationTypeFixed64: + ok = wtyp == protowire.Fixed64Type + case validationTypeBytes, validationTypeUTF8String, validationTypeMessage: + ok = wtyp == protowire.BytesType + case validationTypeGroup: + ok = wtyp == protowire.StartGroupType + } + if ok { + st.requiredMask |= vi.requiredBit + } + } + + switch wtyp { + case protowire.VarintType: + if len(b) >= 10 { + switch { + case b[0] < 0x80: + b = b[1:] + case b[1] < 0x80: + b = b[2:] + case b[2] < 0x80: + b = b[3:] + case b[3] < 0x80: + b = b[4:] + case b[4] < 0x80: + b = b[5:] + case b[5] < 0x80: + b = b[6:] + case b[6] < 0x80: + b = b[7:] + case b[7] < 0x80: + b = b[8:] + case b[8] < 0x80: + b = b[9:] + case b[9] < 0x80 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } else { + switch { + case len(b) > 0 && b[0] < 0x80: + b = b[1:] + case len(b) > 1 && b[1] < 0x80: + b = b[2:] + case len(b) > 2 && b[2] < 0x80: + b = b[3:] + case len(b) > 3 && b[3] < 0x80: + b = b[4:] + case len(b) > 4 && b[4] < 0x80: + b = b[5:] + case len(b) > 5 && b[5] < 0x80: + b = b[6:] + case len(b) > 6 && b[6] < 0x80: + b = b[7:] + case len(b) > 7 && b[7] < 0x80: + b = b[8:] + case len(b) > 8 && b[8] < 0x80: + b = b[9:] + case len(b) > 9 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } + continue State + case protowire.BytesType: + var size uint64 + if len(b) >= 1 && b[0] < 0x80 { + size = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + size = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + size, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + if size > uint64(len(b)) { + return out, ValidationInvalid + } + v := b[:size] + b = b[size:] + switch vi.typ { + case validationTypeMessage: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + fallthrough + case validationTypeMap: + if vi.mi != nil { + vi.mi.init() + } + states = append(states, validationState{ + typ: vi.typ, + keyType: vi.keyType, + valType: vi.valType, + mi: vi.mi, + tail: b, + }) + b = v + continue State + case validationTypeRepeatedVarint: + // Packed field. + for len(v) > 0 { + _, n := protowire.ConsumeVarint(v) + if n < 0 { + return out, ValidationInvalid + } + v = v[n:] + } + case validationTypeRepeatedFixed32: + // Packed field. + if len(v)%4 != 0 { + return out, ValidationInvalid + } + case validationTypeRepeatedFixed64: + // Packed field. + if len(v)%8 != 0 { + return out, ValidationInvalid + } + case validationTypeUTF8String: + if !utf8.Valid(v) { + return out, ValidationInvalid + } + } + case protowire.Fixed32Type: + if len(b) < 4 { + return out, ValidationInvalid + } + b = b[4:] + case protowire.Fixed64Type: + if len(b) < 8 { + return out, ValidationInvalid + } + b = b[8:] + case protowire.StartGroupType: + switch { + case vi.typ == validationTypeGroup: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + states = append(states, validationState{ + typ: validationTypeGroup, + mi: vi.mi, + endGroup: num, + }) + continue State + case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem: + typeid, v, n, err := messageset.ConsumeFieldValue(b, false) + if err != nil { + return out, ValidationInvalid + } + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) + switch { + case err == preg.NotFound: + b = b[n:] + case err != nil: + return out, ValidationUnknown + default: + xvi := getExtensionFieldInfo(xt).validation + if xvi.mi != nil { + xvi.mi.init() + } + states = append(states, validationState{ + typ: xvi.typ, + mi: xvi.mi, + tail: b[n:], + }) + b = v + continue State + } + default: + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + default: + return out, ValidationInvalid + } + } + if st.endGroup != 0 { + return out, ValidationInvalid + } + if len(b) != 0 { + return out, ValidationInvalid + } + b = st.tail + PopState: + numRequiredFields := 0 + switch st.typ { + case validationTypeMessage, validationTypeGroup: + numRequiredFields = int(st.mi.numRequiredFields) + case validationTypeMap: + // If this is a map field with a message value that contains + // required fields, require that the value be present. + if st.mi != nil && st.mi.numRequiredFields > 0 { + numRequiredFields = 1 + } + } + // If there are more than 64 required fields, this check will + // always fail and we will report that the message is potentially + // uninitialized. + if numRequiredFields > 0 && bits.OnesCount64(st.requiredMask) != numRequiredFields { + initialized = false + } + states = states[:len(states)-1] + } + out.n = start - len(b) + if initialized { + out.initialized = true + } + return out, ValidationValid +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go new file mode 100644 index 000000000..009cbefd1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -0,0 +1,74 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// weakFields adds methods to the exported WeakFields type for internal use. +// +// The exported type is an alias to an unnamed type, so methods can't be +// defined directly on it. +type weakFields WeakFields + +func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { + m, ok := w[int32(num)] + return m, ok +} + +func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} + +func (w *weakFields) clear(num pref.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { + _, ok := w[int32(num)] + return ok +} + +func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { + if m, ok := w[int32(num)]; ok { + return m + } + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + return mt.Zero().Interface() +} + +func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { + if m != nil { + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + if mt != m.ProtoReflect().Type() { + panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) + } + } + if m == nil || !m.ProtoReflect().IsValid() { + delete(*w, int32(num)) + return + } + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go new file mode 100644 index 000000000..2a24953f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -0,0 +1,89 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package order + +import ( + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// FieldOrder specifies the ordering to visit message fields. +// It is a function that reports whether x is ordered before y. +type FieldOrder func(x, y pref.FieldDescriptor) bool + +var ( + // AnyFieldOrder specifies no specific field ordering. + AnyFieldOrder FieldOrder = nil + + // LegacyFieldOrder sorts fields in the same ordering as emitted by + // wire serialization in the github.com/golang/protobuf implementation. + LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + ox, oy := x.ContainingOneof(), y.ContainingOneof() + inOneof := func(od pref.OneofDescriptor) bool { + return od != nil && !od.IsSynthetic() + } + + // Extension fields sort before non-extension fields. + if x.IsExtension() != y.IsExtension() { + return x.IsExtension() && !y.IsExtension() + } + // Fields not within a oneof sort before those within a oneof. + if inOneof(ox) != inOneof(oy) { + return !inOneof(ox) && inOneof(oy) + } + // Fields in disjoint oneof sets are sorted by declaration index. + if ox != nil && oy != nil && ox != oy { + return ox.Index() < oy.Index() + } + // Fields sorted by field number. + return x.Number() < y.Number() + } + + // NumberFieldOrder sorts fields by their field number. + NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + return x.Number() < y.Number() + } + + // IndexNameFieldOrder sorts non-extension fields before extension fields. + // Non-extensions are sorted according to their declaration index. + // Extensions are sorted according to their full name. + IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + // Non-extension fields sort before extension fields. + if x.IsExtension() != y.IsExtension() { + return !x.IsExtension() && y.IsExtension() + } + // Extensions sorted by fullname. + if x.IsExtension() && y.IsExtension() { + return x.FullName() < y.FullName() + } + // Non-extensions sorted by declaration index. + return x.Index() < y.Index() + } +) + +// KeyOrder specifies the ordering to visit map entries. +// It is a function that reports whether x is ordered before y. +type KeyOrder func(x, y pref.MapKey) bool + +var ( + // AnyKeyOrder specifies no specific key ordering. + AnyKeyOrder KeyOrder = nil + + // GenericKeyOrder sorts false before true, numeric keys in ascending order, + // and strings in lexicographical ordering according to UTF-8 codepoints. + GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + switch x.Interface().(type) { + case bool: + return !x.Bool() && y.Bool() + case int32, int64: + return x.Int() < y.Int() + case uint32, uint64: + return x.Uint() < y.Uint() + case string: + return x.String() < y.String() + default: + panic("invalid map key type") + } + } +) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go new file mode 100644 index 000000000..c8090e0c5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -0,0 +1,115 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package order provides ordered access to messages and maps. +package order + +import ( + "sort" + "sync" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type messageField struct { + fd pref.FieldDescriptor + v pref.Value +} + +var messageFieldPool = sync.Pool{ + New: func() interface{} { return new([]messageField) }, +} + +type ( + // FieldRnger is an interface for visiting all fields in a message. + // The protoreflect.Message type implements this interface. + FieldRanger interface{ Range(VisitField) } + // VisitField is called everytime a message field is visited. + VisitField = func(pref.FieldDescriptor, pref.Value) bool +) + +// RangeFields iterates over the fields of fs according to the specified order. +func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { + if less == nil { + fs.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := messageFieldPool.Get().(*[]messageField) + fields := (*p)[:0] + defer func() { + if cap(fields) < 1024 { + *p = fields + messageFieldPool.Put(p) + } + }() + + // Collect all fields in the message and sort them. + fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fields = append(fields, messageField{fd, v}) + return true + }) + sort.Slice(fields, func(i, j int) bool { + return less(fields[i].fd, fields[j].fd) + }) + + // Visit the fields in the specified ordering. + for _, f := range fields { + if !fn(f.fd, f.v) { + return + } + } +} + +type mapEntry struct { + k pref.MapKey + v pref.Value +} + +var mapEntryPool = sync.Pool{ + New: func() interface{} { return new([]mapEntry) }, +} + +type ( + // EntryRanger is an interface for visiting all fields in a message. + // The protoreflect.Map type implements this interface. + EntryRanger interface{ Range(VisitEntry) } + // VisitEntry is called everytime a map entry is visited. + VisitEntry = func(pref.MapKey, pref.Value) bool +) + +// RangeEntries iterates over the entries of es according to the specified order. +func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { + if less == nil { + es.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := mapEntryPool.Get().(*[]mapEntry) + entries := (*p)[:0] + defer func() { + if cap(entries) < 1024 { + *p = entries + mapEntryPool.Put(p) + } + }() + + // Collect all entries in the map and sort them. + es.Range(func(k pref.MapKey, v pref.Value) bool { + entries = append(entries, mapEntry{k, v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + return less(entries[i].k, entries[j].k) + }) + + // Visit the entries in the specified ordering. + for _, e := range entries { + if !fn(e.k, e.v) { + return + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/pragma/pragma.go b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go new file mode 100644 index 000000000..49dc4fcd9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pragma provides types that can be embedded into a struct to +// statically enforce or prevent certain language properties. +package pragma + +import "sync" + +// NoUnkeyedLiterals can be embedded in a struct to prevent unkeyed literals. +type NoUnkeyedLiterals struct{} + +// DoNotImplement can be embedded in an interface to prevent trivial +// implementations of the interface. +// +// This is useful to prevent unauthorized implementations of an interface +// so that it can be extended in the future for any protobuf language changes. +type DoNotImplement interface{ ProtoInternal(DoNotImplement) } + +// DoNotCompare can be embedded in a struct to prevent comparability. +type DoNotCompare [0]func() + +// DoNotCopy can be embedded in a struct to help prevent shallow copies. +// This does not rely on a Go language feature, but rather a special case +// within the vet checker. +// +// See https://golang.org/issues/8005. +type DoNotCopy [0]sync.Mutex diff --git a/vendor/google.golang.org/protobuf/internal/set/ints.go b/vendor/google.golang.org/protobuf/internal/set/ints.go new file mode 100644 index 000000000..d3d7f89ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/set/ints.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package set provides simple set data structures for uint64s. +package set + +import "math/bits" + +// int64s represents a set of integers within the range of 0..63. +type int64s uint64 + +func (bs *int64s) Len() int { + return bits.OnesCount64(uint64(*bs)) +} +func (bs *int64s) Has(n uint64) bool { + return uint64(*bs)&(uint64(1)< 0 +} +func (bs *int64s) Set(n uint64) { + *(*uint64)(bs) |= uint64(1) << n +} +func (bs *int64s) Clear(n uint64) { + *(*uint64)(bs) &^= uint64(1) << n +} + +// Ints represents a set of integers within the range of 0..math.MaxUint64. +type Ints struct { + lo int64s + hi map[uint64]struct{} +} + +func (bs *Ints) Len() int { + return bs.lo.Len() + len(bs.hi) +} +func (bs *Ints) Has(n uint64) bool { + if n < 64 { + return bs.lo.Has(n) + } + _, ok := bs.hi[n] + return ok +} +func (bs *Ints) Set(n uint64) { + if n < 64 { + bs.lo.Set(n) + return + } + if bs.hi == nil { + bs.hi = make(map[uint64]struct{}) + } + bs.hi[n] = struct{}{} +} +func (bs *Ints) Clear(n uint64) { + if n < 64 { + bs.lo.Clear(n) + return + } + delete(bs.hi, n) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go new file mode 100644 index 000000000..0b74e7658 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package strs provides string manipulation functionality specific to protobuf. +package strs + +import ( + "go/token" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// EnforceUTF8 reports whether to enforce strict UTF-8 validation. +func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { + if flags.ProtoLegacy { + if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { + return fd.EnforceUTF8() + } + } + return fd.Syntax() == protoreflect.Proto3 +} + +// GoCamelCase camel-cases a protobuf name for use as a Go identifier. +// +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +func GoCamelCase(s string) string { + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + var b []byte + for i := 0; i < len(s); i++ { + c := s[i] + switch { + case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '.' in ".{{lowercase}}". + case c == '.': + b = append(b, '_') // convert '.' to '_' + case c == '_' && (i == 0 || s[i-1] == '.'): + // Convert initial '_' to ensure we start with a capital letter. + // Do the same for '_' after '.' to match historic behavior. + b = append(b, 'X') // convert '_' to 'X' + case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '_' in "_{{lowercase}}". + case isASCIIDigit(c): + b = append(b, c) + default: + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c -= 'a' - 'A' // convert lowercase to uppercase + } + b = append(b, c) + + // Accept lower case sequence that follows. + for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ { + b = append(b, s[i+1]) + } + } + } + return string(b) +} + +// GoSanitized converts a string to a valid Go identifier. +func GoSanitized(s string) string { + // Sanitize the input to the set of valid characters, + // which must be '_' or be in the Unicode L or N categories. + s = strings.Map(func(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + return '_' + }, s) + + // Prepend '_' in the event of a Go keyword conflict or if + // the identifier is invalid (does not start in the Unicode L category). + r, _ := utf8.DecodeRuneInString(s) + if token.Lookup(s).IsKeyword() || !unicode.IsLetter(r) { + return "_" + s + } + return s +} + +// JSONCamelCase converts a snake_case identifier to a camelCase identifier, +// according to the protobuf JSON specification. +func JSONCamelCase(s string) string { + var b []byte + var wasUnderscore bool + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if c != '_' { + if wasUnderscore && isASCIILower(c) { + c -= 'a' - 'A' // convert to uppercase + } + b = append(b, c) + } + wasUnderscore = c == '_' + } + return string(b) +} + +// JSONSnakeCase converts a camelCase identifier to a snake_case identifier, +// according to the protobuf JSON specification. +func JSONSnakeCase(s string) string { + var b []byte + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if isASCIIUpper(c) { + b = append(b, '_') + c += 'a' - 'A' // convert to lowercase + } + b = append(b, c) + } + return string(b) +} + +// MapEntryName derives the name of the map entry message given the field name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:254-276,6057 +func MapEntryName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(c)) + } + } + b = append(b, "Entry"...) + return string(b) +} + +// EnumValueName derives the camel-cased enum value name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:297-313 +func EnumValueName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(unicode.ToLower(c))) + upperNext = false + } + } + return string(b) +} + +// TrimEnumPrefix trims the enum name prefix from an enum value name, +// where the prefix is all lowercase without underscores. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:330-375 +func TrimEnumPrefix(s, prefix string) string { + s0 := s // original input + for len(s) > 0 && len(prefix) > 0 { + if s[0] == '_' { + s = s[1:] + continue + } + if unicode.ToLower(rune(s[0])) != rune(prefix[0]) { + return s0 // no prefix match + } + s, prefix = s[1:], prefix[1:] + } + if len(prefix) > 0 { + return s0 // no prefix match + } + s = strings.TrimLeft(s, "_") + if len(s) == 0 { + return s0 // avoid returning empty string + } + return s +} + +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} +func isASCIIUpper(c byte) bool { + return 'A' <= c && c <= 'Z' +} +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go new file mode 100644 index 000000000..85e074c97 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package strs + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} + +type Builder struct{} + +func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { + return prefix.Append(name) +} + +func (*Builder) MakeString(b []byte) string { + return string(b) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go new file mode 100644 index 000000000..2160c7019 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -0,0 +1,94 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package strs + +import ( + "unsafe" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) (s string) { + src := (*sliceHeader)(unsafe.Pointer(&b)) + dst := (*stringHeader)(unsafe.Pointer(&s)) + dst.Data = src.Data + dst.Len = src.Len + return s +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) (b []byte) { + src := (*stringHeader)(unsafe.Pointer(&s)) + dst := (*sliceHeader)(unsafe.Pointer(&b)) + dst.Data = src.Data + dst.Len = src.Len + dst.Cap = src.Len + return b +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return pref.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go new file mode 100644 index 000000000..14e774fb2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package version records versioning information about this module. +package version + +import ( + "fmt" + "strings" +) + +// These constants determine the current version of this module. +// +// +// For our release process, we enforce the following rules: +// * Tagged releases use a tag that is identical to String. +// * Tagged releases never reference a commit where the String +// contains "devel". +// * The set of all commits in this repository where String +// does not contain "devel" must have a unique String. +// +// +// Steps for tagging a new release: +// 1. Create a new CL. +// +// 2. Update Minor, Patch, and/or PreRelease as necessary. +// PreRelease must not contain the string "devel". +// +// 3. Since the last released minor version, have there been any changes to +// generator that relies on new functionality in the runtime? +// If yes, then increment RequiredGenerated. +// +// 4. Since the last released minor version, have there been any changes to +// the runtime that removes support for old .pb.go source code? +// If yes, then increment SupportMinimum. +// +// 5. Send out the CL for review and submit it. +// Note that the next CL in step 8 must be submitted after this CL +// without any other CLs in-between. +// +// 6. Tag a new version, where the tag is is the current String. +// +// 7. Write release notes for all notable changes +// between this release and the last release. +// +// 8. Create a new CL. +// +// 9. Update PreRelease to include the string "devel". +// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// +// 10. Send out the CL for review and submit it. +const ( + Major = 1 + Minor = 27 + Patch = 1 + PreRelease = "" +) + +// String formats the version string for this module in semver format. +// +// Examples: +// v1.20.1 +// v1.21.0-rc.1 +func String() string { + v := fmt.Sprintf("v%d.%d.%d", Major, Minor, Patch) + if PreRelease != "" { + v += "-" + PreRelease + + // TODO: Add metadata about the commit or build hash. + // See https://golang.org/issue/29814 + // See https://golang.org/issue/33533 + var metadata string + if strings.Contains(PreRelease, "devel") && metadata != "" { + v += "+" + metadata + } + } + return v +} diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go new file mode 100644 index 000000000..3e9a6a2f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/checkinit.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// CheckInitialized returns an error if any required fields in m are not set. +func CheckInitialized(m Message) error { + // Treat a nil message interface as an "untyped" empty message, + // which we assume to have no required fields. + if m == nil { + return nil + } + + return checkInitialized(m.ProtoReflect()) +} + +// CheckInitialized returns an error if any required fields in m are not set. +func checkInitialized(m protoreflect.Message) error { + if methods := protoMethods(m); methods != nil && methods.CheckInitialized != nil { + _, err := methods.CheckInitialized(protoiface.CheckInitializedInput{ + Message: m, + }) + return err + } + return checkInitializedSlow(m) +} + +func checkInitializedSlow(m protoreflect.Message) error { + md := m.Descriptor() + fds := md.Fields() + for i, nums := 0, md.RequiredNumbers(); i < nums.Len(); i++ { + fd := fds.ByNumber(nums.Get(i)) + if !m.Has(fd) { + return errors.RequiredNotSet(string(fd.FullName())) + } + } + var err error + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + if fd.Message() == nil { + return true + } + for i, list := 0, v.List(); i < list.Len() && err == nil; i++ { + err = checkInitialized(list.Get(i).Message()) + } + case fd.IsMap(): + if fd.MapValue().Message() == nil { + return true + } + v.Map().Range(func(key protoreflect.MapKey, v protoreflect.Value) bool { + err = checkInitialized(v.Message()) + return err == nil + }) + default: + if fd.Message() == nil { + return true + } + err = checkInitialized(v.Message()) + } + return err == nil + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go new file mode 100644 index 000000000..49f9b8c88 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -0,0 +1,278 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" +) + +// UnmarshalOptions configures the unmarshaler. +// +// Example usage: +// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Merge merges the input into the destination message. + // The default behavior is to always reset the message before unmarshaling, + // unless Merge is specified. + Merge bool + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return an error if there are any missing required fields. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +// Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m Message) error { + _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + return err +} + +// Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + _, err := o.unmarshal(b, m.ProtoReflect()) + return err +} + +// UnmarshalState parses a wire-format message and places the result in m. +// +// This method permits fine-grained control over the unmarshaler. +// Most users should use Unmarshal instead. +func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + return o.unmarshal(in.Buf, in.Message) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) { + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if !o.Merge { + Reset(m.Interface()) + } + allowPartial := o.AllowPartial + o.Merge = true + o.AllowPartial = true + methods := protoMethods(m) + if methods != nil && methods.Unmarshal != nil && + !(o.DiscardUnknown && methods.Flags&protoiface.SupportUnmarshalDiscardUnknown == 0) { + in := protoiface.UnmarshalInput{ + Message: m, + Buf: b, + Resolver: o.Resolver, + } + if o.DiscardUnknown { + in.Flags |= protoiface.UnmarshalDiscardUnknown + } + out, err = methods.Unmarshal(in) + } else { + err = o.unmarshalMessageSlow(b, m) + } + if err != nil { + return out, err + } + if allowPartial || (out.Flags&protoiface.UnmarshalInitialized != 0) { + return out, nil + } + return out, checkInitialized(m) +} + +func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) error { + _, err := o.unmarshal(b, m) + return err +} + +func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error { + md := m.Descriptor() + if messageset.IsMessageSet(md) { + return o.unmarshalMessageSet(b, m) + } + fields := md.Fields() + for len(b) > 0 { + // Parse the tag (field number and wire type). + num, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return errDecode + } + if num > protowire.MaxValidNumber { + return errDecode + } + + // Find the field descriptor for this field number. + fd := fields.ByNumber(num) + if fd == nil && md.ExtensionRanges().Has(num) { + extType, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err != nil && err != protoregistry.NotFound { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + if extType != nil { + fd = extType.TypeDescriptor() + } + } + var err error + if fd == nil { + err = errUnknown + } else if flags.ProtoLegacy { + if fd.IsWeak() && fd.Message().IsPlaceholder() { + err = errUnknown // weak referent is not linked in + } + } + + // Parse the field value. + var valLen int + switch { + case err != nil: + case fd.IsList(): + valLen, err = o.unmarshalList(b[tagLen:], wtyp, m.Mutable(fd).List(), fd) + case fd.IsMap(): + valLen, err = o.unmarshalMap(b[tagLen:], wtyp, m.Mutable(fd).Map(), fd) + default: + valLen, err = o.unmarshalSingular(b[tagLen:], wtyp, m, fd) + } + if err != nil { + if err != errUnknown { + return err + } + valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) + if valLen < 0 { + return errDecode + } + if !o.DiscardUnknown { + m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) + } + } + b = b[tagLen+valLen:] + } + return nil +} + +func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m protoreflect.Message, fd protoreflect.FieldDescriptor) (n int, err error) { + v, n, err := o.unmarshalScalar(b, wtyp, fd) + if err != nil { + return 0, err + } + switch fd.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + m2 := m.Mutable(fd).Message() + if err := o.unmarshalMessage(v.Bytes(), m2); err != nil { + return n, err + } + default: + // Non-message scalars replace the previous value. + m.Set(fd, v) + } + return n, nil +} + +func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) { + if wtyp != protowire.BytesType { + return 0, errUnknown + } + b, n = protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + var ( + keyField = fd.MapKey() + valField = fd.MapValue() + key protoreflect.Value + val protoreflect.Value + haveKey bool + haveVal bool + ) + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + val = mapv.NewValue() + } + // Map entries are represented as a two-element message with fields + // containing the key and value. + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, errDecode + } + if num > protowire.MaxValidNumber { + return 0, errDecode + } + b = b[n:] + err = errUnknown + switch num { + case genid.MapEntry_Key_field_number: + key, n, err = o.unmarshalScalar(b, wtyp, keyField) + if err != nil { + break + } + haveKey = true + case genid.MapEntry_Value_field_number: + var v protoreflect.Value + v, n, err = o.unmarshalScalar(b, wtyp, valField) + if err != nil { + break + } + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + if err := o.unmarshalMessage(v.Bytes(), val.Message()); err != nil { + return 0, err + } + default: + val = v + } + haveVal = true + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, errDecode + } + } else if err != nil { + return 0, err + } + b = b[n:] + } + // Every map entry should have entries for key and value, but this is not strictly required. + if !haveKey { + key = keyField.Default() + } + if !haveVal { + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + default: + val = valField.Default() + } + } + mapv.Set(key.MapKey(), val) + return n, nil +} + +// errUnknown is used internally to indicate fields which should be added +// to the unknown field set of a message. It is never returned from an exported +// function. +var errUnknown = errors.New("BUG: internal error (unknown)") + +var errDecode = errors.New("cannot parse invalid wire-format data") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go new file mode 100644 index 000000000..301eeb20f --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go @@ -0,0 +1,603 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// unmarshalScalar decodes a value of the given kind. +// +// Message values are decoded into a []byte which aliases the input data. +func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd protoreflect.FieldDescriptor) (val protoreflect.Value, n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil + case protoreflect.EnumKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil + case protoreflect.Int32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Sint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil + case protoreflect.Uint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.Int64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Sint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil + case protoreflect.Uint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.Sfixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Fixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.FloatKind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil + case protoreflect.Sfixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Fixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.DoubleKind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, errDecode + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) + } + return protoreflect.ValueOfString(string(v)), n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBytes(v), n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBytes(v), n, nil + default: + return val, 0, errUnknown + } +} + +func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list protoreflect.List, fd protoreflect.FieldDescriptor) (n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + return n, nil + case protoreflect.EnumKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + return n, nil + case protoreflect.Int32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Sint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + return n, nil + case protoreflect.Uint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.Int64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Sint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + return n, nil + case protoreflect.Uint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.Sfixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Fixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.FloatKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + return n, nil + case protoreflect.Sfixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Fixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.DoubleKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + return n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return 0, errors.InvalidUTF8(string(fd.FullName())) + } + list.Append(protoreflect.ValueOfString(string(v))) + return n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + return n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return 0, errDecode + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + default: + return 0, errUnknown + } +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go new file mode 100644 index 000000000..c52d8c4ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -0,0 +1,94 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functions operating on protocol buffer messages. +// +// For documentation on protocol buffers in general, see: +// +// https://developers.google.com/protocol-buffers +// +// For a tutorial on using protocol buffers with Go, see: +// +// https://developers.google.com/protocol-buffers/docs/gotutorial +// +// For a guide to generated Go protocol buffer code, see: +// +// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// +// +// Binary serialization +// +// This package contains functions to convert to and from the wire format, +// an efficient binary serialization of protocol buffers. +// +// • Size reports the size of a message in the wire format. +// +// • Marshal converts a message to the wire format. +// The MarshalOptions type provides more control over wire marshaling. +// +// • Unmarshal converts a message from the wire format. +// The UnmarshalOptions type provides more control over wire unmarshaling. +// +// +// Basic message operations +// +// • Clone makes a deep copy of a message. +// +// • Merge merges the content of a message into another. +// +// • Equal compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// "google.golang.org/protobuf/testing/protocmp". +// +// • Reset clears the content of a message. +// +// • CheckInitialized reports whether all required fields in a message are set. +// +// +// Optional scalar constructors +// +// The API for some generated messages represents optional scalar fields +// as pointers to a value. For example, an optional string field has the +// Go type *string. +// +// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. +// +// Generated enum types usually have an Enum method which performs the +// same operation. +// +// Optional scalar fields are only supported in proto2. +// +// +// Extension accessors +// +// • HasExtension, GetExtension, SetExtension, and ClearExtension +// access extension field values in a protocol buffer message. +// +// Extension fields are only supported in proto2. +// +// +// Related packages +// +// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to +// and from JSON. +// +// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to +// and from the text format. +// +// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a +// reflection interface for protocol buffer data types. +// +// • Package "google.golang.org/protobuf/testing/protocmp" provides features +// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" +// package. +// +// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. +// +// This module contains additional packages for more specialized use cases. +// Consult the individual package documentation for details. +package proto diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go new file mode 100644 index 000000000..d18239c23 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -0,0 +1,319 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// MarshalOptions configures the marshaler. +// +// Example usage: +// b, err := MarshalOptions{Deterministic: true}.Marshal(m) +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return an error if there are any missing required fields. + AllowPartial bool + + // Deterministic controls whether the same message will always be + // serialized to the same bytes within the same binary. + // + // Setting this option guarantees that repeated serialization of + // the same message will return the same bytes, and that different + // processes of the same binary (which may be executing on different + // machines) will serialize equal messages to the same bytes. + // It has no effect on the resulting size of the encoded message compared + // to a non-deterministic marshal. + // + // Note that the deterministic serialization is NOT canonical across + // languages. It is not guaranteed to remain stable over time. It is + // unstable across different builds with schema changes due to unknown + // fields. Users who need canonical serialization (e.g., persistent + // storage in a canonical form, fingerprinting, etc.) must define + // their own canonicalization specification and implement their own + // serializer rather than relying on this API. + // + // If deterministic serialization is requested, map entries will be + // sorted by keys in lexographical order. This is an implementation + // detail and subject to change. + Deterministic bool + + // UseCachedSize indicates that the result of a previous Size call + // may be reused. + // + // Setting this option asserts that: + // + // 1. Size has previously been called on this message with identical + // options (except for UseCachedSize itself). + // + // 2. The message and all its submessages have not changed in any + // way since the Size call. + // + // If either of these invariants is violated, + // the results are undefined and may include panics or corrupted output. + // + // Implementations MAY take this option into account to provide + // better performance, but there is no guarantee that they will do so. + // There is absolutely no guarantee that Size followed by Marshal with + // UseCachedSize set will perform equivalently to Marshal alone. + UseCachedSize bool +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// Marshal returns the wire-format encoding of m. +func (o MarshalOptions) Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := o.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// emptyBytesForMessage returns a nil buffer if and only if m is invalid, +// otherwise it returns a non-nil empty buffer. +// +// This is to assist the edge-case where user-code does the following: +// m1.OptionalBytes, _ = proto.Marshal(m2) +// where they expect the proto2 "optional_bytes" field to be populated +// if any only if m2 is a valid message. +func emptyBytesForMessage(m Message) []byte { + if m == nil || !m.ProtoReflect().IsValid() { + return nil + } + return emptyBuf[:] +} + +// MarshalAppend appends the wire-format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to append. + if m == nil { + return b, nil + } + + out, err := o.marshal(b, m.ProtoReflect()) + return out.Buf, err +} + +// MarshalState returns the wire-format encoding of a message. +// +// This method permits fine-grained control over the marshaler. +// Most users should use Marshal instead. +func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + return o.marshal(in.Buf, in.Message) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) { + allowPartial := o.AllowPartial + o.AllowPartial = true + if methods := protoMethods(m); methods != nil && methods.Marshal != nil && + !(o.Deterministic && methods.Flags&protoiface.SupportMarshalDeterministic == 0) { + in := protoiface.MarshalInput{ + Message: m, + Buf: b, + } + if o.Deterministic { + in.Flags |= protoiface.MarshalDeterministic + } + if o.UseCachedSize { + in.Flags |= protoiface.MarshalUseCachedSize + } + if methods.Size != nil { + sout := methods.Size(protoiface.SizeInput{ + Message: m, + Flags: in.Flags, + }) + if cap(b) < len(b)+sout.Size { + in.Buf = make([]byte, len(b), growcap(cap(b), len(b)+sout.Size)) + copy(in.Buf, b) + } + in.Flags |= protoiface.MarshalUseCachedSize + } + out, err = methods.Marshal(in) + } else { + out.Buf, err = o.marshalMessageSlow(b, m) + } + if err != nil { + return out, err + } + if allowPartial { + return out, nil + } + return out, checkInitialized(m) +} + +func (o MarshalOptions) marshalMessage(b []byte, m protoreflect.Message) ([]byte, error) { + out, err := o.marshal(b, m) + return out.Buf, err +} + +// growcap scales up the capacity of a slice. +// +// Given a slice with a current capacity of oldcap and a desired +// capacity of wantcap, growcap returns a new capacity >= wantcap. +// +// The algorithm is mostly identical to the one used by append as of Go 1.14. +func growcap(oldcap, wantcap int) (newcap int) { + if wantcap > oldcap*2 { + newcap = wantcap + } else if oldcap < 1024 { + // The Go 1.14 runtime takes this case when len(s) < 1024, + // not when cap(s) < 1024. The difference doesn't seem + // significant here. + newcap = oldcap * 2 + } else { + newcap = oldcap + for 0 < newcap && newcap < wantcap { + newcap += newcap / 4 + } + if newcap <= 0 { + newcap = wantcap + } + } + return newcap +} + +func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) { + if messageset.IsMessageSet(m.Descriptor()) { + return o.marshalMessageSet(b, m) + } + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + // TODO: This should use a more natural ordering like NumberFieldOrder, + // but doing so breaks golden tests that make invalid assumption about + // output stability of this implementation. + fieldOrder = order.LegacyFieldOrder + } + var err error + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = o.marshalField(b, fd, v) + return err == nil + }) + if err != nil { + return b, err + } + b = append(b, m.GetUnknown()...) + return b, nil +} + +func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { + switch { + case fd.IsList(): + return o.marshalList(b, fd, value.List()) + case fd.IsMap(): + return o.marshalMap(b, fd, value.Map()) + default: + b = protowire.AppendTag(b, fd.Number(), wireTypes[fd.Kind()]) + return o.marshalSingular(b, fd, value) + } +} + +func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, list protoreflect.List) ([]byte, error) { + if fd.IsPacked() && list.Len() > 0 { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + b, pos := appendSpeculativeLength(b) + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + b = finishSpeculativeLength(b, pos) + return b, nil + } + + kind := fd.Kind() + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b = protowire.AppendTag(b, fd.Number(), wireTypes[kind]) + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + return b, nil +} + +func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { + keyf := fd.MapKey() + valf := fd.MapValue() + keyOrder := order.AnyKeyOrder + if o.Deterministic { + keyOrder = order.GenericKeyOrder + } + var err error + order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + var pos int + b, pos = appendSpeculativeLength(b) + + b, err = o.marshalField(b, keyf, key.Value()) + if err != nil { + return false + } + b, err = o.marshalField(b, valf, value) + if err != nil { + return false + } + b = finishSpeculativeLength(b, pos) + return true + }) + return b, err +} + +// When encoding length-prefixed fields, we speculatively set aside some number of bytes +// for the length, encode the data, and then encode the length (shifting the data if necessary +// to make room). +const speculativeLength = 1 + +func appendSpeculativeLength(b []byte) ([]byte, int) { + pos := len(b) + b = append(b, "\x00\x00\x00\x00"[:speculativeLength]...) + return b, pos +} + +func finishSpeculativeLength(b []byte, pos int) []byte { + mlen := len(b) - pos - speculativeLength + msiz := protowire.SizeVarint(uint64(mlen)) + if msiz != speculativeLength { + for i := 0; i < msiz-speculativeLength; i++ { + b = append(b, 0) + } + copy(b[pos+msiz:], b[pos+speculativeLength:]) + b = b[:pos+msiz+mlen] + } + protowire.AppendVarint(b[:pos], uint64(mlen)) + return b +} diff --git a/vendor/google.golang.org/protobuf/proto/encode_gen.go b/vendor/google.golang.org/protobuf/proto/encode_gen.go new file mode 100644 index 000000000..185dacfb4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode_gen.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} + +func (o MarshalOptions) marshalSingular(b []byte, fd protoreflect.FieldDescriptor, v protoreflect.Value) ([]byte, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + b = protowire.AppendVarint(b, uint64(v.Enum())) + case protoreflect.Int32Kind: + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + b = protowire.AppendVarint(b, uint64(v.Int())) + case protoreflect.Sint64Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + b = protowire.AppendVarint(b, v.Uint()) + case protoreflect.Sfixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Int())) + case protoreflect.Fixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Uint())) + case protoreflect.FloatKind: + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + case protoreflect.Sfixed64Kind: + b = protowire.AppendFixed64(b, uint64(v.Int())) + case protoreflect.Fixed64Kind: + b = protowire.AppendFixed64(b, v.Uint()) + case protoreflect.DoubleKind: + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + case protoreflect.StringKind: + if strs.EnforceUTF8(fd) && !utf8.ValidString(v.String()) { + return b, errors.InvalidUTF8(string(fd.FullName())) + } + b = protowire.AppendString(b, v.String()) + case protoreflect.BytesKind: + b = protowire.AppendBytes(b, v.Bytes()) + case protoreflect.MessageKind: + var pos int + var err error + b, pos = appendSpeculativeLength(b) + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = finishSpeculativeLength(b, pos) + case protoreflect.GroupKind: + var err error + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, protowire.EncodeTag(fd.Number(), protowire.EndGroupType)) + default: + return b, errors.New("invalid kind %v", fd.Kind()) + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go new file mode 100644 index 000000000..4dba2b969 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. If either of the top-level +// messages are invalid, then Equal reports true only if both are invalid. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + mx := x.ProtoReflect() + my := y.ProtoReflect() + if mx.IsValid() != my.IsValid() { + return false + } + return equalMessage(mx, my) +} + +// equalMessage compares two messages. +func equalMessage(mx, my pref.Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalField(fd, vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalField compares two fields. +func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { + switch { + case fd.IsList(): + return equalList(fd, x.List(), y.List()) + case fd.IsMap(): + return equalMap(fd, x.Map(), y.Map()) + default: + return equalValue(fd, x, y) + } +} + +// equalMap compares two maps. +func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k pref.MapKey, vx pref.Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) + return equal + }) + return equal +} + +// equalList compares two lists. +func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(fd, x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalValue compares two singular values. +func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { + switch fd.Kind() { + case pref.BoolKind: + return x.Bool() == y.Bool() + case pref.EnumKind: + return x.Enum() == y.Enum() + case pref.Int32Kind, pref.Sint32Kind, + pref.Int64Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + return x.Int() == y.Int() + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + return x.Uint() == y.Uint() + case pref.FloatKind, pref.DoubleKind: + fx := x.Float() + fy := y.Float() + if math.IsNaN(fx) || math.IsNaN(fy) { + return math.IsNaN(fx) && math.IsNaN(fy) + } + return fx == fy + case pref.StringKind: + return x.String() == y.String() + case pref.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case pref.MessageKind, pref.GroupKind: + return equalMessage(x.Message(), y.Message()) + default: + return x.Interface() == y.Interface() + } +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y pref.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[pref.FieldNumber]pref.RawFields) + my := make(map[pref.FieldNumber]pref.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go new file mode 100644 index 000000000..5f293cda8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// HasExtension reports whether an extension field is populated. +// It returns false if m is invalid or if xt does not extend m. +func HasExtension(m Message, xt protoreflect.ExtensionType) bool { + // Treat nil message interface as an empty message; no populated fields. + if m == nil { + return false + } + + // As a special-case, we reports invalid or mismatching descriptors + // as always not being populated (since they aren't). + if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + return false + } + + return m.ProtoReflect().Has(xt.TypeDescriptor()) +} + +// ClearExtension clears an extension field such that subsequent +// HasExtension calls return false. +// It panics if m is invalid or if xt does not extend m. +func ClearExtension(m Message, xt protoreflect.ExtensionType) { + m.ProtoReflect().Clear(xt.TypeDescriptor()) +} + +// GetExtension retrieves the value for an extension field. +// If the field is unpopulated, it returns the default value for +// scalars and an immutable, empty value for lists or messages. +// It panics if xt does not extend m. +func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { + // Treat nil message interface as an empty message; return the default. + if m == nil { + return xt.InterfaceOf(xt.Zero()) + } + + return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor())) +} + +// SetExtension stores the value of an extension field. +// It panics if m is invalid, xt does not extend m, or if type of v +// is invalid for the specified extension field. +func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { + xd := xt.TypeDescriptor() + pv := xt.ValueOf(v) + + // Specially treat an invalid list, map, or message as clear. + isValid := true + switch { + case xd.IsList(): + isValid = pv.List().IsValid() + case xd.IsMap(): + isValid = pv.Map().IsValid() + case xd.Message() != nil: + isValid = pv.Message().IsValid() + } + if !isValid { + m.ProtoReflect().Clear(xd) + return + } + + m.ProtoReflect().Set(xd, pv) +} + +// RangeExtensions iterates over every populated extension field in m in an +// undefined order, calling f for each extension type and value encountered. +// It returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current extension field. +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { + // Treat nil message interface as an empty message; nothing to range over. + if m == nil { + return + } + + m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor).Type() + vi := xt.InterfaceOf(v) + return f(xt, vi) + } + return true + }) +} diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go new file mode 100644 index 000000000..d761ab331 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -0,0 +1,139 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Merge merges src into dst, which must be a message with the same descriptor. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +// +// It is semantically equivalent to unmarshaling the encoded form of src +// into dst with the UnmarshalOptions.Merge option specified. +func Merge(dst, src Message) { + // TODO: Should nil src be treated as semantically equivalent to a + // untyped, read-only, empty message? What about a nil dst? + + dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect() + if dstMsg.Descriptor() != srcMsg.Descriptor() { + if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want)) + } + panic("descriptor mismatch") + } + mergeOptions{}.mergeMessage(dstMsg, srcMsg) +} + +// Clone returns a deep copy of m. +// If the top-level message is invalid, it returns an invalid message as well. +func Clone(m Message) Message { + // NOTE: Most usages of Clone assume the following properties: + // t := reflect.TypeOf(m) + // t == reflect.TypeOf(m.ProtoReflect().New().Interface()) + // t == reflect.TypeOf(m.ProtoReflect().Type().Zero().Interface()) + // + // Embedding protobuf messages breaks this since the parent type will have + // a forwarded ProtoReflect method, but the Interface method will return + // the underlying embedded message type. + if m == nil { + return nil + } + src := m.ProtoReflect() + if !src.IsValid() { + return src.Type().Zero().Interface() + } + dst := src.New() + mergeOptions{}.mergeMessage(dst, src) + return dst.Interface() +} + +// mergeOptions provides a namespace for merge functions, and can be +// exported in the future if we add user-visible merge options. +type mergeOptions struct{} + +func (o mergeOptions) mergeMessage(dst, src protoreflect.Message) { + methods := protoMethods(dst) + if methods != nil && methods.Merge != nil { + in := protoiface.MergeInput{ + Destination: dst, + Source: src, + } + out := methods.Merge(in) + if out.Flags&protoiface.MergeComplete != 0 { + return + } + } + + if !dst.IsValid() { + panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName())) + } + + src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + o.mergeList(dst.Mutable(fd).List(), v.List(), fd) + case fd.IsMap(): + o.mergeMap(dst.Mutable(fd).Map(), v.Map(), fd.MapValue()) + case fd.Message() != nil: + o.mergeMessage(dst.Mutable(fd).Message(), v.Message()) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(fd, o.cloneBytes(v)) + default: + dst.Set(fd, v) + } + return true + }) + + if len(src.GetUnknown()) > 0 { + dst.SetUnknown(append(dst.GetUnknown(), src.GetUnknown()...)) + } +} + +func (o mergeOptions) mergeList(dst, src protoreflect.List, fd protoreflect.FieldDescriptor) { + // Merge semantics appends to the end of the existing list. + for i, n := 0, src.Len(); i < n; i++ { + switch v := src.Get(i); { + case fd.Message() != nil: + dstv := dst.NewElement() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Append(dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Append(o.cloneBytes(v)) + default: + dst.Append(v) + } + } +} + +func (o mergeOptions) mergeMap(dst, src protoreflect.Map, fd protoreflect.FieldDescriptor) { + // Merge semantics replaces, rather than merges into existing entries. + src.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + switch { + case fd.Message() != nil: + dstv := dst.NewValue() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Set(k, dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(k, o.cloneBytes(v)) + default: + dst.Set(k, v) + } + return true + }) +} + +func (o mergeOptions) cloneBytes(v protoreflect.Value) protoreflect.Value { + return protoreflect.ValueOfBytes(append([]byte{}, v.Bytes()...)) +} diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go new file mode 100644 index 000000000..312d5d45c --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -0,0 +1,93 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +func (o MarshalOptions) sizeMessageSet(m protoreflect.Message) (size int) { + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += messageset.SizeField(fd.Number()) + size += protowire.SizeTag(messageset.FieldMessage) + size += protowire.SizeBytes(o.size(v.Message())) + return true + }) + size += messageset.SizeUnknown(m.GetUnknown()) + return size +} + +func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + fieldOrder = order.NumberFieldOrder + } + var err error + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = o.marshalMessageSetField(b, fd, v) + return err == nil + }) + if err != nil { + return b, err + } + return messageset.AppendUnknown(b, m.GetUnknown()) +} + +func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { + b = messageset.AppendFieldStart(b, fd.Number()) + b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) + b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + b, err := o.marshalMessage(b, value.Message()) + if err != nil { + return b, err + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func (o UnmarshalOptions) unmarshalMessageSet(b []byte, m protoreflect.Message) error { + if !flags.ProtoLegacy { + return errors.New("no support for message_set_wire_format") + } + return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error { + err := o.unmarshalMessageSetField(m, num, v) + if err == errUnknown { + unknown := m.GetUnknown() + unknown = protowire.AppendTag(unknown, num, protowire.BytesType) + unknown = protowire.AppendBytes(unknown, v) + m.SetUnknown(unknown) + return nil + } + return err + }) +} + +func (o UnmarshalOptions) unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte) error { + md := m.Descriptor() + if !md.ExtensionRanges().Has(num) { + return errUnknown + } + xt, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err == protoregistry.NotFound { + return errUnknown + } + if err != nil { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + xd := xt.TypeDescriptor() + if err := o.unmarshalMessage(v, m.Mutable(xd).Message()); err != nil { + return err + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go new file mode 100644 index 000000000..1f0d183b1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Message is the top-level interface that all messages must implement. +// It provides access to a reflective view of a message. +// Any implementation of this interface may be used with all functions in the +// protobuf module that accept a Message, except where otherwise specified. +// +// This is the v2 interface definition for protobuf messages. +// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// +// To convert a v1 message to a v2 message, +// use "github.com/golang/protobuf/proto".MessageV2. +// To convert a v2 message to a v1 message, +// use "github.com/golang/protobuf/proto".MessageV1. +type Message = protoreflect.ProtoMessage + +// Error matches all errors produced by packages in the protobuf module. +// +// That is, errors.Is(err, Error) reports whether an error is produced +// by this module. +var Error error + +func init() { + Error = errors.Error +} + +// MessageName returns the full name of m. +// If m is nil, it returns an empty string. +func MessageName(m Message) protoreflect.FullName { + if m == nil { + return "" + } + return m.ProtoReflect().Descriptor().FullName() +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go new file mode 100644 index 000000000..d8dd604f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +// +build !protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = true + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return m.ProtoMethods() +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go new file mode 100644 index 000000000..b103d4320 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +// +build protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = false + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/reset.go b/vendor/google.golang.org/protobuf/proto/reset.go new file mode 100644 index 000000000..3d7f89436 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/reset.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Reset clears every field in the message. +// The resulting message shares no observable memory with its previous state +// other than the memory for the message itself. +func Reset(m Message) { + if mr, ok := m.(interface{ Reset() }); ok && hasProtoMethods { + mr.Reset() + return + } + resetMessage(m.ProtoReflect()) +} + +func resetMessage(m protoreflect.Message) { + if !m.IsValid() { + panic(fmt.Sprintf("cannot reset invalid %v message", m.Descriptor().FullName())) + } + + // Clear all known fields. + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + m.Clear(fds.Get(i)) + } + + // Clear extension fields. + m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + m.Clear(fd) + return true + }) + + // Clear unknown fields. + m.SetUnknown(nil) +} diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go new file mode 100644 index 000000000..554b9c6c0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -0,0 +1,97 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + return MarshalOptions{}.Size(m) +} + +// Size returns the size in bytes of the wire-format encoding of m. +func (o MarshalOptions) Size(m Message) int { + // Treat a nil message interface as an empty message; nothing to output. + if m == nil { + return 0 + } + + return o.size(m.ProtoReflect()) +} + +// size is a centralized function that all size operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for size that do not go through this. +func (o MarshalOptions) size(m protoreflect.Message) (size int) { + methods := protoMethods(m) + if methods != nil && methods.Size != nil { + out := methods.Size(protoiface.SizeInput{ + Message: m, + }) + return out.Size + } + if methods != nil && methods.Marshal != nil { + // This is not efficient, but we don't have any choice. + // This case is mainly used for legacy types with a Marshal method. + out, _ := methods.Marshal(protoiface.MarshalInput{ + Message: m, + }) + return len(out.Buf) + } + return o.sizeMessageSlow(m) +} + +func (o MarshalOptions) sizeMessageSlow(m protoreflect.Message) (size int) { + if messageset.IsMessageSet(m.Descriptor()) { + return o.sizeMessageSet(m) + } + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += o.sizeField(fd, v) + return true + }) + size += len(m.GetUnknown()) + return size +} + +func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) { + num := fd.Number() + switch { + case fd.IsList(): + return o.sizeList(num, fd, value.List()) + case fd.IsMap(): + return o.sizeMap(num, fd, value.Map()) + default: + return protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), value) + } +} + +func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + if fd.IsPacked() && list.Len() > 0 { + content := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + content += o.sizeSingular(num, fd.Kind(), list.Get(i)) + } + return protowire.SizeTag(num) + protowire.SizeBytes(content) + } + + for i, llen := 0, list.Len(); i < llen; i++ { + size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i)) + } + return size +} + +func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { + size += protowire.SizeTag(num) + size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) + return true + }) + return size +} diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go new file mode 100644 index 000000000..3cf61a824 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size_gen.go @@ -0,0 +1,55 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (o MarshalOptions) sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int { + switch kind { + case protoreflect.BoolKind: + return protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + return protowire.SizeVarint(uint64(v.Enum())) + case protoreflect.Int32Kind: + return protowire.SizeVarint(uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + return protowire.SizeVarint(uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + return protowire.SizeVarint(uint64(v.Int())) + case protoreflect.Sint64Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + return protowire.SizeVarint(v.Uint()) + case protoreflect.Sfixed32Kind: + return protowire.SizeFixed32() + case protoreflect.Fixed32Kind: + return protowire.SizeFixed32() + case protoreflect.FloatKind: + return protowire.SizeFixed32() + case protoreflect.Sfixed64Kind: + return protowire.SizeFixed64() + case protoreflect.Fixed64Kind: + return protowire.SizeFixed64() + case protoreflect.DoubleKind: + return protowire.SizeFixed64() + case protoreflect.StringKind: + return protowire.SizeBytes(len(v.String())) + case protoreflect.BytesKind: + return protowire.SizeBytes(len(v.Bytes())) + case protoreflect.MessageKind: + return protowire.SizeBytes(o.size(v.Message())) + case protoreflect.GroupKind: + return protowire.SizeGroup(num, o.size(v.Message())) + default: + return 0 + } +} diff --git a/vendor/google.golang.org/protobuf/proto/wrappers.go b/vendor/google.golang.org/protobuf/proto/wrappers.go new file mode 100644 index 000000000..653b12c3a --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/wrappers.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go new file mode 100644 index 000000000..e4dfb1205 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodesc provides functionality for converting +// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// +// The google.protobuf.FileDescriptorProto is a protobuf message that describes +// the type information for a .proto file in a form that is easily serializable. +// The protoreflect.FileDescriptor is a more structured representation of +// the FileDescriptorProto message where references and remote dependencies +// can be directly followed. +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// Resolver is the resolver used by NewFile to resolve dependencies. +// The enums and messages provided must belong to some parent file, +// which is also registered. +// +// It is implemented by protoregistry.Files. +type Resolver interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) +} + +// FileOptions configures the construction of file descriptors. +type FileOptions struct { + pragma.NoUnkeyedLiterals + + // AllowUnresolvable configures New to permissively allow unresolvable + // file, enum, or message dependencies. Unresolved dependencies are replaced + // by placeholder equivalents. + // + // The following dependencies may be left unresolved: + // • Resolving an imported file. + // • Resolving the type for a message field or extension field. + // If the kind of the field is unknown, then a placeholder is used for both + // the Enum and Message accessors on the protoreflect.FieldDescriptor. + // • Resolving an enum value set as the default for an optional enum field. + // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the + // first value in the associated enum (or zero if the also enum dependency + // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue + // is populated with a placeholder. + // • Resolving the extended message type for an extension field. + // • Resolving the input or output message type for a service method. + // + // If the unresolved dependency uses a relative name, + // then the placeholder will contain an invalid FullName with a "*." prefix, + // indicating that the starting prefix of the full name is unknown. + AllowUnresolvable bool +} + +// NewFile creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. See FileOptions.New for more information. +func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + return FileOptions{}.New(fd, r) +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. See FileOptions.NewFiles for more information. +func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + return FileOptions{}.NewFiles(fd) +} + +// New creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. The file must represent a valid proto file according +// to protobuf semantics. The returned descriptor is a deep copy of the input. +// +// Any imported files, enum types, or message types referenced in the file are +// resolved using the provided registry. When looking up an import file path, +// the path must be unique. The newly created file descriptor is not registered +// back into the provided file registry. +func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + if r == nil { + r = (*protoregistry.Files)(nil) // empty resolver + } + + // Handle the file descriptor content. + f := &filedesc.File{L2: &filedesc.FileL2{}} + switch fd.GetSyntax() { + case "proto2", "": + f.L1.Syntax = protoreflect.Proto2 + case "proto3": + f.L1.Syntax = protoreflect.Proto3 + default: + return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) + } + f.L1.Path = fd.GetName() + if f.L1.Path == "" { + return nil, errors.New("file path must be populated") + } + f.L1.Package = protoreflect.FullName(fd.GetPackage()) + if !f.L1.Package.IsValid() && f.L1.Package != "" { + return nil, errors.New("invalid package: %q", f.L1.Package) + } + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FileOptions) + f.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + + f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) + for _, i := range fd.GetPublicDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic { + return nil, errors.New("invalid or duplicate public import index: %d", i) + } + f.L2.Imports[i].IsPublic = true + } + for _, i := range fd.GetWeakDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { + return nil, errors.New("invalid or duplicate weak import index: %d", i) + } + f.L2.Imports[i].IsWeak = true + } + imps := importSet{f.Path(): true} + for i, path := range fd.GetDependency() { + imp := &f.L2.Imports[i] + f, err := r.FindFileByPath(path) + if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + f = filedesc.PlaceholderFile(path) + } else if err != nil { + return nil, errors.New("could not resolve import %q: %v", path, err) + } + imp.FileDescriptor = f + + if imps[imp.Path()] { + return nil, errors.New("already imported %q", path) + } + imps[imp.Path()] = true + } + for i := range fd.GetDependency() { + imp := &f.L2.Imports[i] + imps.importPublic(imp.Imports()) + } + + // Handle source locations. + f.L2.Locations.File = f + for _, loc := range fd.GetSourceCodeInfo().GetLocation() { + var l protoreflect.SourceLocation + // TODO: Validate that the path points to an actual declaration? + l.Path = protoreflect.SourcePath(loc.GetPath()) + s := loc.GetSpan() + switch len(s) { + case 3: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2]) + case 4: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3]) + default: + return nil, errors.New("invalid span: %v", s) + } + // TODO: Validate that the span information is sensible? + // See https://github.com/protocolbuffers/protobuf/issues/6378. + if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 || + (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) { + return nil, errors.New("invalid span: %v", s) + } + l.LeadingDetachedComments = loc.GetLeadingDetachedComments() + l.LeadingComments = loc.GetLeadingComments() + l.TrailingComments = loc.GetTrailingComments() + f.L2.Locations.List = append(f.L2.Locations.List, l) + } + + // Step 1: Allocate and derive the names for all declarations. + // This copies all fields from the descriptor proto except: + // google.protobuf.FieldDescriptorProto.type_name + // google.protobuf.FieldDescriptorProto.default_value + // google.protobuf.FieldDescriptorProto.oneof_index + // google.protobuf.FieldDescriptorProto.extendee + // google.protobuf.MethodDescriptorProto.input + // google.protobuf.MethodDescriptorProto.output + var err error + sb := new(strs.Builder) + r1 := make(descsByName) + if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil { + return nil, err + } + if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil { + return nil, err + } + if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil { + return nil, err + } + if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil { + return nil, err + } + + // Step 2: Resolve every dependency reference not handled by step 1. + r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable} + if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil { + return nil, err + } + + // Step 3: Validate every enum, message, and extension declaration. + if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { + return nil, err + } + if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + + return f, nil +} + +type importSet map[string]bool + +func (is importSet) importPublic(imps protoreflect.FileImports) { + for i := 0; i < imps.Len(); i++ { + if imp := imps.Get(i); imp.IsPublic { + is[imp.Path()] = true + is.importPublic(imp.Imports()) + } + } +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. The descriptor set must include only +// valid files according to protobuf semantics. The returned descriptors +// are a deep copy of the input. +func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + files := make(map[string]*descriptorpb.FileDescriptorProto) + for _, fd := range fds.File { + if _, ok := files[fd.GetName()]; ok { + return nil, errors.New("file appears multiple times: %q", fd.GetName()) + } + files[fd.GetName()] = fd + } + r := &protoregistry.Files{} + for _, fd := range files { + if err := o.addFileDeps(r, fd, files); err != nil { + return nil, err + } + } + return r, nil +} +func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error { + // Set the entry to nil while descending into a file's dependencies to detect cycles. + files[fd.GetName()] = nil + for _, dep := range fd.Dependency { + depfd, ok := files[dep] + if depfd == nil { + if ok { + return errors.New("import cycle in file: %q", dep) + } + continue + } + if err := o.addFileDeps(r, depfd, files); err != nil { + return err + } + } + // Delete the entry once dependencies are processed. + delete(files, fd.GetName()) + f, err := o.New(fd, r) + if err != nil { + return err + } + return r.RegisterFile(f) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go new file mode 100644 index 000000000..37efda1af --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -0,0 +1,248 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type descsByName map[protoreflect.FullName]protoreflect.Descriptor + +func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) { + es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers + for i, ed := range eds { + e := &es[i] + e.L2 = new(filedesc.EnumL2) + if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil { + return nil, err + } + if opts := ed.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumOptions) + e.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + for _, s := range ed.GetReservedName() { + e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range ed.GetReservedRange() { + e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{ + protoreflect.EnumNumber(rr.GetStart()), + protoreflect.EnumNumber(rr.GetEnd()), + }) + } + if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil { + return nil, err + } + } + return es, nil +} + +func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) { + vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers + for i, vd := range vds { + v := &vs[i] + if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := vd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions) + v.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + v.L1.Number = protoreflect.EnumNumber(vd.GetNumber()) + } + return vs, nil +} + +func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) { + ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + m.L2 = new(filedesc.MessageL2) + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MessageOptions) + m.L2.Options = func() protoreflect.ProtoMessage { return opts } + m.L1.IsMapEntry = opts.GetMapEntry() + m.L1.IsMessageSet = opts.GetMessageSetWireFormat() + } + for _, s := range md.GetReservedName() { + m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range md.GetReservedRange() { + m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(rr.GetStart()), + protoreflect.FieldNumber(rr.GetEnd()), + }) + } + for _, xr := range md.GetExtensionRange() { + m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(xr.GetStart()), + protoreflect.FieldNumber(xr.GetEnd()), + }) + var optsFunc func() protoreflect.ProtoMessage + if opts := xr.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions) + optsFunc = func() protoreflect.ProtoMessage { return opts } + } + m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc) + } + if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil { + return nil, err + } + if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil { + return nil, err + } + if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil { + return nil, err + } + if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil { + return nil, err + } + if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil { + return nil, err + } + } + return ms, nil +} + +func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { + fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers + for i, fd := range fds { + f := &fs[i] + if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { + return nil, err + } + f.L1.IsProto3Optional = fd.GetProto3Optional() + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + f.L1.Options = func() protoreflect.ProtoMessage { return opts } + f.L1.IsWeak = opts.GetWeak() + f.L1.HasPacked = opts.Packed != nil + f.L1.IsPacked = opts.GetPacked() + } + f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) + f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) + if fd.Type != nil { + f.L1.Kind = protoreflect.Kind(fd.GetType()) + } + if fd.JsonName != nil { + f.L1.StringName.InitJSON(fd.GetJsonName()) + } + } + return fs, nil +} + +func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) { + os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers + for i, od := range ods { + o := &os[i] + if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { + return nil, err + } + if opts := od.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.OneofOptions) + o.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + } + return os, nil +} + +func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) { + xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers + for i, xd := range xds { + x := &xs[i] + x.L2 = new(filedesc.ExtensionL2) + if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := xd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + x.L2.Options = func() protoreflect.ProtoMessage { return opts } + x.L2.IsPacked = opts.GetPacked() + } + x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) + x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) + if xd.Type != nil { + x.L1.Kind = protoreflect.Kind(xd.GetType()) + } + if xd.JsonName != nil { + x.L2.StringName.InitJSON(xd.GetJsonName()) + } + } + return xs, nil +} + +func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) { + ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers + for i, sd := range sds { + s := &ss[i] + s.L2 = new(filedesc.ServiceL2) + if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := sd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ServiceOptions) + s.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil { + return nil, err + } + } + return ss, nil +} + +func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) { + ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MethodOptions) + m.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + m.L1.IsStreamingClient = md.GetClientStreaming() + m.L1.IsStreamingServer = md.GetServerStreaming() + } + return ms, nil +} + +func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) { + if !protoreflect.Name(name).IsValid() { + return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name) + } + + // Derive the full name of the child. + // Note that enum values are a sibling to the enum parent in the namespace. + var fullName protoreflect.FullName + if _, ok := parent.(protoreflect.EnumDescriptor); ok { + fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name)) + } else { + fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name)) + } + if _, ok := r[fullName]; ok { + return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName) + } + r[fullName] = child + + // TODO: Verify that the full name does not already exist in the resolver? + // This is not as critical since most usages of NewFile will register + // the created file back into the registry, which will perform this check. + + return filedesc.BaseL0{ + FullName: fullName, + ParentFile: parent.ParentFile().(*filedesc.File), + Parent: parent, + Index: idx, + }, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go new file mode 100644 index 000000000..cebb36cda --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -0,0 +1,286 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// resolver is a wrapper around a local registry of declarations within the file +// and the remote resolver. The remote resolver is restricted to only return +// descriptors that have been imported. +type resolver struct { + local descsByName + remote Resolver + imports importSet + + allowUnresolvable bool +} + +func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) { + for i, md := range mds { + m := &ms[i] + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if f.L1.Cardinality == protoreflect.Required { + m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number) + } + if fd.OneofIndex != nil { + k := int(fd.GetOneofIndex()) + if !(0 <= k && k < len(md.GetOneofDecl())) { + return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k) + } + o := &m.L2.Oneofs.List[k] + f.L1.ContainingOneof = o + o.L1.Fields.List = append(o.L1.Fields.List, f) + } + + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) + } + if fd.DefaultValue != nil { + v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) + if err != nil { + return errors.New("message field %q has invalid default: %v", f.FullName(), err) + } + f.L1.Default = filedesc.DefaultValue(v, ev) + } + } + + if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { + for i, xd := range xds { + x := &xs[i] + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) + } + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) + } + if xd.DefaultValue != nil { + v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable) + if err != nil { + return errors.New("extension field %q has invalid default: %v", x.FullName(), err) + } + x.L2.Default = filedesc.DefaultValue(v, ev) + } + } + return nil +} + +func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) { + for i, sd := range sds { + s := &ss[i] + for j, md := range sd.GetMethod() { + m := &s.L2.Methods.List[j] + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) + } + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) + } + } + } + return nil +} + +// findTarget finds an enum or message descriptor if k is an enum, message, +// group, or unknown. If unknown, and the name could be resolved, the kind +// returned kind is set based on the type of the resolved descriptor. +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { + switch k { + case protoreflect.EnumKind: + ed, err := r.findEnumDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, ed, nil, nil + case protoreflect.MessageKind, protoreflect.GroupKind: + md, err := r.findMessageDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, nil, md, nil + case 0: + // Handle unspecified kinds (possible with parsers that operate + // on a per-file basis without knowledge of dependencies). + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return 0, nil, nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return 0, nil, nil, err + } + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return protoreflect.EnumKind, d, nil, nil + case protoreflect.MessageDescriptor: + return protoreflect.MessageKind, nil, d, nil + default: + return 0, nil, nil, errors.New("unknown kind") + } + default: + if ref != "" { + return 0, nil, nil, errors.New("target name cannot be specified for %v", k) + } + if !k.IsValid() { + return 0, nil, nil, errors.New("invalid kind: %d", k) + } + return k, nil, nil, nil + } +} + +// findDescriptor finds the descriptor by name, +// which may be a relative name within some scope. +// +// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", +// then the following full names are searched: +// * fizz.buzz.Foo.Bar +// * fizz.Foo.Bar +// * Foo.Bar +func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { + if !ref.IsValid() { + return nil, errors.New("invalid name reference: %q", ref) + } + if ref.IsFull() { + scope, ref = "", ref[1:] + } + var foundButNotImported protoreflect.Descriptor + for { + // Derive the full name to search. + s := protoreflect.FullName(ref) + if scope != "" { + s = scope + "." + s + } + + // Check the current file for the descriptor. + if d, ok := r.local[s]; ok { + return d, nil + } + + // Check the remote registry for the descriptor. + d, err := r.remote.FindDescriptorByName(s) + if err == nil { + // Only allow descriptors covered by one of the imports. + if r.imports[d.ParentFile().Path()] { + return d, nil + } + foundButNotImported = d + } else if err != protoregistry.NotFound { + return nil, errors.Wrap(err, "%q", s) + } + + // Continue on at a higher level of scoping. + if scope == "" { + if d := foundButNotImported; d != nil { + return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path()) + } + return nil, protoregistry.NotFound + } + scope = scope.Parent() + } +} + +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderEnum(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an enum", d.FullName()) + } + return ed, nil +} + +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an message", d.FullName()) + } + return md, nil +} + +// partialName is the partial name. A leading dot means that the name is full, +// otherwise the name is relative to some current scope. +// See google.protobuf.FieldDescriptorProto.type_name. +type partialName string + +func (s partialName) IsFull() bool { + return len(s) > 0 && s[0] == '.' +} + +func (s partialName) IsValid() bool { + if s.IsFull() { + return protoreflect.FullName(s[1:]).IsValid() + } + return protoreflect.FullName(s).IsValid() +} + +const unknownPrefix = "*." + +// FullName converts the partial name to a full name on a best-effort basis. +// If relative, it creates an invalid full name, using a "*." prefix +// to indicate that the start of the full name is unknown. +func (s partialName) FullName() protoreflect.FullName { + if s.IsFull() { + return protoreflect.FullName(s[1:]) + } + return protoreflect.FullName(unknownPrefix + s) +} + +func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { + var evs protoreflect.EnumValueDescriptors + if fd.Enum() != nil { + evs = fd.Enum().Values() + } + v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor) + if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() { + v = protoreflect.ValueOfEnum(0) + if evs.Len() > 0 { + v = protoreflect.ValueOfEnum(evs.Get(0).Number()) + } + ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s))) + } else if err != nil { + return v, ev, err + } + if fd.Syntax() == protoreflect.Proto3 { + return v, ev, errors.New("cannot be specified under proto3 semantics") + } + if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { + return v, ev, errors.New("cannot be specified on composite types") + } + return v, ev, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go new file mode 100644 index 000000000..9af1d5648 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -0,0 +1,374 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "strings" + "unicode" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error { + for i, ed := range eds { + e := &es[i] + if err := e.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("enum %q reserved names has %v", e.FullName(), err) + } + if err := e.L2.ReservedRanges.CheckValid(); err != nil { + return errors.New("enum %q reserved ranges has %v", e.FullName(), err) + } + if len(ed.GetValue()) == 0 { + return errors.New("enum %q must contain at least one value declaration", e.FullName()) + } + allowAlias := ed.GetOptions().GetAllowAlias() + foundAlias := false + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 { + foundAlias = true + if !allowAlias { + return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name()) + } + } + } + if allowAlias && !foundAlias { + return errors.New("enum %q allows aliases, but none were found", e.FullName()) + } + if e.Syntax() == protoreflect.Proto3 { + if v := e.Values().Get(0); v.Number() != 0 { + return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + } + // Verify that value names in proto3 do not conflict if the + // case-insensitive prefix is removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 + names := map[string]protoreflect.EnumValueDescriptor{} + prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1) + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) + if v2, ok := names[s]; ok && v1.Number() != v2.Number() { + return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + } + names[s] = v1 + } + } + + for j, vd := range ed.GetValue() { + v := &e.L2.Values.List[j] + if vd.Number == nil { + return errors.New("enum value %q must have a specified number", v.FullName()) + } + if e.L2.ReservedNames.Has(v.Name()) { + return errors.New("enum value %q must not use reserved name", v.FullName()) + } + if e.L2.ReservedRanges.Has(v.Number()) { + return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number()) + } + } + } + return nil +} + +func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + for i, md := range mds { + m := &ms[i] + + // Handle the message descriptor itself. + isMessageSet := md.GetOptions().GetMessageSetWireFormat() + if err := m.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("message %q reserved names has %v", m.FullName(), err) + } + if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q reserved ranges has %v", m.FullName(), err) + } + if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q extension ranges has %v", m.FullName(), err) + } + if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil { + return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err) + } + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 { + return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + } + if isMessageSet && !flags.ProtoLegacy { + return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) + } + if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) + } + if m.Syntax() == protoreflect.Proto3 { + if m.ExtensionRanges().Len() > 0 { + return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) + } + // Verify that field names in proto3 do not conflict if lowercased + // with all underscores removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 + names := map[string]protoreflect.FieldDescriptor{} + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) + if f2, ok := names[s]; ok { + return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + names[s] = f1 + } + } + + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if m.L2.ReservedNames.Has(f.Name()) { + return errors.New("message field %q must not use reserved name", f.FullName()) + } + if !f.Number().IsValid() { + return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number()) + } + if !f.Cardinality().IsValid() { + return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality()) + } + if m.L2.ReservedRanges.Has(f.Number()) { + return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number()) + } + if m.L2.ExtensionRanges.Has(f.Number()) { + return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number()) + } + if fd.Extendee != nil { + return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) + } + if f.L1.IsProto3Optional { + if f.Syntax() != protoreflect.Proto3 { + return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) + } + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName()) + } + if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 { + return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) + } + } + if f.IsWeak() && !flags.ProtoLegacy { + return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) + } + if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + return errors.New("message field %q may only be weak for an optional message", f.FullName()) + } + if f.IsPacked() && !isPackable(f) { + return errors.New("message field %q is not packable", f.FullName()) + } + if err := checkValidGroup(f); err != nil { + return errors.New("message field %q is an invalid group: %v", f.FullName(), err) + } + if err := checkValidMap(f); err != nil { + return errors.New("message field %q is an invalid map: %v", f.FullName(), err) + } + if f.Syntax() == protoreflect.Proto3 { + if f.Cardinality() == protoreflect.Required { + return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) + } + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { + return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + } + } + } + seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs + for j := range md.GetOneofDecl() { + o := &m.L2.Oneofs.List[j] + if o.Fields().Len() == 0 { + return errors.New("message oneof %q must contain at least one field declaration", o.FullName()) + } + if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) { + return errors.New("message oneof %q must have consecutively declared fields", o.FullName()) + } + + if o.IsSynthetic() { + seenSynthetic = true + continue + } + if !o.IsSynthetic() && seenSynthetic { + return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName()) + } + + for i := 0; i < o.Fields().Len(); i++ { + f := o.Fields().Get(i) + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) + } + if f.IsWeak() { + return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) + } + } + } + + if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { + return err + } + if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { + for i, xd := range xds { + x := &xs[i] + // NOTE: Avoid using the IsValid method since extensions to MessageSet + // may have a field number higher than normal. This check only verifies + // that the number is not negative or reserved. We check again later + // if we know that the extendee is definitely not a MessageSet. + if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required { + return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) + } + if xd.JsonName != nil { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { + return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) + } + } + if xd.OneofIndex != nil { + return errors.New("extension field %q may not be part of a oneof", x.FullName()) + } + if md := x.ContainingMessage(); !md.IsPlaceholder() { + if !md.ExtensionRanges().Has(x.Number()) { + return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number()) + } + isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() + if isMessageSet && !isOptionalMessage(x) { + return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName()) + } + if !isMessageSet && !x.Number().IsValid() { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + } + if xd.GetOptions().GetWeak() { + return errors.New("extension field %q cannot be a weak reference", x.FullName()) + } + if x.IsPacked() && !isPackable(x) { + return errors.New("extension field %q is not packable", x.FullName()) + } + if err := checkValidGroup(x); err != nil { + return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) + } + if md := x.Message(); md != nil && md.IsMapEntry() { + return errors.New("extension field %q cannot be a map entry", x.FullName()) + } + if x.Syntax() == protoreflect.Proto3 { + switch x.ContainingMessage().FullName() { + case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName(): + default: + return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName()) + } + } + } + return nil +} + +// isOptionalMessage reports whether this is an optional message. +// If the kind is unknown, it is assumed to be a message. +func isOptionalMessage(fd protoreflect.FieldDescriptor) bool { + return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional +} + +// isPackable checks whether the pack option can be specified. +func isPackable(fd protoreflect.FieldDescriptor) bool { + switch fd.Kind() { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.IsList() +} + +// checkValidGroup reports whether fd is a valid group according to the same +// rules that protoc imposes. +func checkValidGroup(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case fd.Kind() != protoreflect.GroupKind: + return nil + case fd.Syntax() != protoreflect.Proto2: + return errors.New("invalid under proto2 semantics") + case md == nil || md.IsPlaceholder(): + return errors.New("message must be resolvable") + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } + return nil +} + +// checkValidMap checks whether the field is a valid map according to the same +// rules that protoc imposes. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115 +func checkValidMap(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case md == nil || !md.IsMapEntry(): + return nil + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))): + return errors.New("incorrect implicit map entry name") + case fd.Cardinality() != protoreflect.Repeated: + return errors.New("field must be repeated") + case md.Fields().Len() != 2: + return errors.New("message must have exactly two fields") + case md.ExtensionRanges().Len() > 0: + return errors.New("message must not have any extension ranges") + case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0: + return errors.New("message must not have any nested declarations") + } + kf := md.Fields().Get(0) + vf := md.Fields().Get(1) + switch { + case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): + return errors.New("invalid key field") + case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): + return errors.New("invalid value field") + } + switch kf.Kind() { + case protoreflect.BoolKind: // bool + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32 + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64 + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32 + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64 + case protoreflect.StringKind: // string + default: + return errors.New("invalid key kind: %v", kf.Kind()) + } + if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 { + return errors.New("map enum value must have zero number for the first value") + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go new file mode 100644 index 000000000..a7c5ceffc --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -0,0 +1,252 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// google.protobuf.FileDescriptorProto message. +func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { + p := &descriptorpb.FileDescriptorProto{ + Name: proto.String(file.Path()), + Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), + } + if file.Package() != "" { + p.Package = proto.String(string(file.Package())) + } + for i, imports := 0, file.Imports(); i < imports.Len(); i++ { + imp := imports.Get(i) + p.Dependency = append(p.Dependency, imp.Path()) + if imp.IsPublic { + p.PublicDependency = append(p.PublicDependency, int32(i)) + } + if imp.IsWeak { + p.WeakDependency = append(p.WeakDependency, int32(i)) + } + } + for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { + loc := locs.Get(i) + l := &descriptorpb.SourceCodeInfo_Location{} + l.Path = append(l.Path, loc.Path...) + if loc.StartLine == loc.EndLine { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)} + } else { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)} + } + l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...) + if loc.LeadingComments != "" { + l.LeadingComments = proto.String(loc.LeadingComments) + } + if loc.TrailingComments != "" { + l.TrailingComments = proto.String(loc.TrailingComments) + } + if p.SourceCodeInfo == nil { + p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{} + } + p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l) + + } + for i, messages := 0, file.Messages(); i < messages.Len(); i++ { + p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, file.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, services := 0, file.Services(); i < services.Len(); i++ { + p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i))) + } + for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + p.Syntax = proto.String(file.Syntax().String()) + } + return p +} + +// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// google.protobuf.DescriptorProto message. +func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { + p := &descriptorpb.DescriptorProto{ + Name: proto.String(string(message.Name())), + Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions), + } + for i, fields := 0, message.Fields(); i < fields.Len(); i++ { + p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i))) + } + for i, exts := 0, message.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + for i, messages := 0, message.Messages(); i < messages.Len(); i++ { + p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, message.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ { + xrange := xranges.Get(i) + p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{ + Start: proto.Int32(int32(xrange[0])), + End: proto.Int32(int32(xrange[1])), + Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions), + }) + } + for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ { + p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i))) + } + for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// google.protobuf.FieldDescriptorProto message. +func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { + p := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(string(field.Name())), + Number: proto.Int32(int32(field.Number())), + Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(), + Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions), + } + if field.IsExtension() { + p.Extendee = fullNameOf(field.ContainingMessage()) + } + if field.Kind().IsValid() { + p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum() + } + if field.Enum() != nil { + p.TypeName = fullNameOf(field.Enum()) + } + if field.Message() != nil { + p.TypeName = fullNameOf(field.Message()) + } + if field.HasJSONName() { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if field.IsExtension() { + p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name()))) + } else { + p.JsonName = proto.String(field.JSONName()) + } + } + if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { + p.Proto3Optional = proto.Bool(true) + } + if field.HasDefault() { + def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) + if err != nil && field.DefaultEnumValue() != nil { + def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values + } else if err != nil { + panic(fmt.Sprintf("%v: %v", field.FullName(), err)) + } + p.DefaultValue = proto.String(def) + } + if oneof := field.ContainingOneof(); oneof != nil { + p.OneofIndex = proto.Int32(int32(oneof.Index())) + } + return p +} + +// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// google.protobuf.OneofDescriptorProto message. +func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { + return &descriptorpb.OneofDescriptorProto{ + Name: proto.String(string(oneof.Name())), + Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions), + } +} + +// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// google.protobuf.EnumDescriptorProto message. +func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { + p := &descriptorpb.EnumDescriptorProto{ + Name: proto.String(string(enum.Name())), + Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions), + } + for i, values := 0, enum.Values(); i < values.Len(); i++ { + p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i))) + } + for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// google.protobuf.EnumValueDescriptorProto message. +func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { + return &descriptorpb.EnumValueDescriptorProto{ + Name: proto.String(string(value.Name())), + Number: proto.Int32(int32(value.Number())), + Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions), + } +} + +// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// google.protobuf.ServiceDescriptorProto message. +func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { + p := &descriptorpb.ServiceDescriptorProto{ + Name: proto.String(string(service.Name())), + Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions), + } + for i, methods := 0, service.Methods(); i < methods.Len(); i++ { + p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i))) + } + return p +} + +// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// google.protobuf.MethodDescriptorProto message. +func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { + p := &descriptorpb.MethodDescriptorProto{ + Name: proto.String(string(method.Name())), + InputType: fullNameOf(method.Input()), + OutputType: fullNameOf(method.Output()), + Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions), + } + if method.IsStreamingClient() { + p.ClientStreaming = proto.Bool(true) + } + if method.IsStreamingServer() { + p.ServerStreaming = proto.Bool(true) + } + return p +} + +func fullNameOf(d protoreflect.Descriptor) *string { + if d == nil { + return nil + } + if strings.HasPrefix(string(d.FullName()), unknownPrefix) { + return proto.String(string(d.FullName()[len(unknownPrefix):])) + } + return proto.String("." + string(d.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go new file mode 100644 index 000000000..6be5d16e9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -0,0 +1,77 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "google.golang.org/protobuf/internal/pragma" +) + +// The following types are used by the fast-path Message.ProtoMethods method. +// +// To avoid polluting the public protoreflect API with types used only by +// low-level implementations, the canonical definitions of these types are +// in the runtime/protoiface package. The definitions here and in protoiface +// must be kept in sync. +type ( + methods = struct { + pragma.NoUnkeyedLiterals + Flags supportFlags + Size func(sizeInput) sizeOutput + Marshal func(marshalInput) (marshalOutput, error) + Unmarshal func(unmarshalInput) (unmarshalOutput, error) + Merge func(mergeInput) mergeOutput + CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + } + supportFlags = uint64 + sizeInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Flags uint8 + } + sizeOutput = struct { + pragma.NoUnkeyedLiterals + Size int + } + marshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + } + marshalOutput = struct { + pragma.NoUnkeyedLiterals + Buf []byte + } + unmarshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + Resolver interface { + FindExtensionByName(field FullName) (ExtensionType, error) + FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) + } + } + unmarshalOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + mergeInput = struct { + pragma.NoUnkeyedLiterals + Source Message + Destination Message + } + mergeOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + checkInitializedInput = struct { + pragma.NoUnkeyedLiterals + Message Message + } + checkInitializedOutput = struct { + pragma.NoUnkeyedLiterals + } +) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go new file mode 100644 index 000000000..dd85915bd --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -0,0 +1,504 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoreflect provides interfaces to dynamically manipulate messages. +// +// This package includes type descriptors which describe the structure of types +// defined in proto source files and value interfaces which provide the +// ability to examine and manipulate the contents of messages. +// +// +// Protocol Buffer Descriptors +// +// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// are immutable objects that represent protobuf type information. +// They are wrappers around the messages declared in descriptor.proto. +// Protobuf descriptors alone lack any information regarding Go types. +// +// Enums and messages generated by this module implement Enum and ProtoMessage, +// where the Descriptor and ProtoReflect.Descriptor accessors respectively +// return the protobuf descriptor for the values. +// +// The protobuf descriptor interfaces are not meant to be implemented by +// user code since they might need to be extended in the future to support +// additions to the protobuf language. +// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// google.protobuf.DescriptorProto messages and protobuf descriptors. +// +// +// Go Type Descriptors +// +// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// a concrete Go type that represents the associated protobuf descriptor. +// There is commonly a one-to-one relationship between protobuf descriptors and +// Go type descriptors, but it can potentially be a one-to-many relationship. +// +// Enums and messages generated by this module implement Enum and ProtoMessage, +// where the Type and ProtoReflect.Type accessors respectively +// return the protobuf descriptor for the values. +// +// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// create Go type descriptors from protobuf descriptors. +// +// +// Value Interfaces +// +// The Enum and Message interfaces provide a reflective view over an +// enum or message instance. For enums, it provides the ability to retrieve +// the enum value number for any concrete enum type. For messages, it provides +// the ability to access or manipulate fields of the message. +// +// To convert a proto.Message to a protoreflect.Message, use the +// former's ProtoReflect method. Since the ProtoReflect method is new to the +// v2 message interface, it may not be present on older message implementations. +// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// to obtain a reflective view on older messages. +// +// +// Relationships +// +// The following diagrams demonstrate the relationships between +// various types declared in this package. +// +// +// ┌───────────────────────────────────┐ +// V │ +// ┌────────────── New(n) ─────────────┐ │ +// │ │ │ +// │ ┌──── Descriptor() ──┐ │ ┌── Number() ──┐ │ +// │ │ V V │ V │ +// ╔════════════╗ ╔════════════════╗ ╔════════╗ ╔════════════╗ +// ║ EnumType ║ ║ EnumDescriptor ║ ║ Enum ║ ║ EnumNumber ║ +// ╚════════════╝ ╚════════════════╝ ╚════════╝ ╚════════════╝ +// Λ Λ │ │ +// │ └─── Descriptor() ──┘ │ +// │ │ +// └────────────────── Type() ───────┘ +// +// • An EnumType describes a concrete Go enum type. +// It has an EnumDescriptor and can construct an Enum instance. +// +// • An EnumDescriptor describes an abstract protobuf enum type. +// +// • An Enum is a concrete enum instance. Generated enums implement Enum. +// +// +// ┌──────────────── New() ─────────────────┐ +// │ │ +// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ +// │ │ V V │ V +// ╔═════════════╗ ╔═══════════════════╗ ╔═════════╗ ╔══════════════╗ +// ║ MessageType ║ ║ MessageDescriptor ║ ║ Message ║ ║ ProtoMessage ║ +// ╚═════════════╝ ╚═══════════════════╝ ╚═════════╝ ╚══════════════╝ +// Λ Λ │ │ Λ │ +// │ └──── Descriptor() ────┘ │ └─ ProtoReflect() ─┘ +// │ │ +// └─────────────────── Type() ─────────┘ +// +// • A MessageType describes a concrete Go message type. +// It has a MessageDescriptor and can construct a Message instance. +// +// • A MessageDescriptor describes an abstract protobuf message type. +// +// • A Message is a concrete message instance. Generated messages implement +// ProtoMessage, which can convert to/from a Message. +// +// +// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ +// │ V │ V +// ╔═══════════════╗ ╔═════════════════════════╗ ╔═════════════════════╗ +// ║ ExtensionType ║ ║ ExtensionTypeDescriptor ║ ║ ExtensionDescriptor ║ +// ╚═══════════════╝ ╚═════════════════════════╝ ╚═════════════════════╝ +// Λ │ │ Λ │ Λ +// └─────── Type() ───────┘ │ └─── may implement ────┘ │ +// │ │ +// └────── implements ────────┘ +// +// • An ExtensionType describes a concrete Go implementation of an extension. +// It has an ExtensionTypeDescriptor and can convert to/from +// abstract Values and Go values. +// +// • An ExtensionTypeDescriptor is an ExtensionDescriptor +// which also has an ExtensionType. +// +// • An ExtensionDescriptor describes an abstract protobuf extension field and +// may not always be an ExtensionTypeDescriptor. +package protoreflect + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/pragma" +) + +type doNotImplement pragma.DoNotImplement + +// ProtoMessage is the top-level interface that all proto messages implement. +// This is declared in the protoreflect package to avoid a cyclic dependency; +// use the proto.Message type instead, which aliases this type. +type ProtoMessage interface{ ProtoReflect() Message } + +// Syntax is the language version of the proto file. +type Syntax syntax + +type syntax int8 // keep exact type opaque as the int type may change + +const ( + Proto2 Syntax = 2 + Proto3 Syntax = 3 +) + +// IsValid reports whether the syntax is valid. +func (s Syntax) IsValid() bool { + switch s { + case Proto2, Proto3: + return true + default: + return false + } +} + +// String returns s as a proto source identifier (e.g., "proto2"). +func (s Syntax) String() string { + switch s { + case Proto2: + return "proto2" + case Proto3: + return "proto3" + default: + return fmt.Sprintf("", s) + } +} + +// GoString returns s as a Go source identifier (e.g., "Proto2"). +func (s Syntax) GoString() string { + switch s { + case Proto2: + return "Proto2" + case Proto3: + return "Proto3" + default: + return fmt.Sprintf("Syntax(%d)", s) + } +} + +// Cardinality determines whether a field is optional, required, or repeated. +type Cardinality cardinality + +type cardinality int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Cardinality enumeration. +const ( + Optional Cardinality = 1 // appears zero or one times + Required Cardinality = 2 // appears exactly one time; invalid with Proto3 + Repeated Cardinality = 3 // appears zero or more times +) + +// IsValid reports whether the cardinality is valid. +func (c Cardinality) IsValid() bool { + switch c { + case Optional, Required, Repeated: + return true + default: + return false + } +} + +// String returns c as a proto source identifier (e.g., "optional"). +func (c Cardinality) String() string { + switch c { + case Optional: + return "optional" + case Required: + return "required" + case Repeated: + return "repeated" + default: + return fmt.Sprintf("", c) + } +} + +// GoString returns c as a Go source identifier (e.g., "Optional"). +func (c Cardinality) GoString() string { + switch c { + case Optional: + return "Optional" + case Required: + return "Required" + case Repeated: + return "Repeated" + default: + return fmt.Sprintf("Cardinality(%d)", c) + } +} + +// Kind indicates the basic proto kind of a field. +type Kind kind + +type kind int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Field.Kind enumeration. +const ( + BoolKind Kind = 8 + EnumKind Kind = 14 + Int32Kind Kind = 5 + Sint32Kind Kind = 17 + Uint32Kind Kind = 13 + Int64Kind Kind = 3 + Sint64Kind Kind = 18 + Uint64Kind Kind = 4 + Sfixed32Kind Kind = 15 + Fixed32Kind Kind = 7 + FloatKind Kind = 2 + Sfixed64Kind Kind = 16 + Fixed64Kind Kind = 6 + DoubleKind Kind = 1 + StringKind Kind = 9 + BytesKind Kind = 12 + MessageKind Kind = 11 + GroupKind Kind = 10 +) + +// IsValid reports whether the kind is valid. +func (k Kind) IsValid() bool { + switch k { + case BoolKind, EnumKind, + Int32Kind, Sint32Kind, Uint32Kind, + Int64Kind, Sint64Kind, Uint64Kind, + Sfixed32Kind, Fixed32Kind, FloatKind, + Sfixed64Kind, Fixed64Kind, DoubleKind, + StringKind, BytesKind, MessageKind, GroupKind: + return true + default: + return false + } +} + +// String returns k as a proto source identifier (e.g., "bool"). +func (k Kind) String() string { + switch k { + case BoolKind: + return "bool" + case EnumKind: + return "enum" + case Int32Kind: + return "int32" + case Sint32Kind: + return "sint32" + case Uint32Kind: + return "uint32" + case Int64Kind: + return "int64" + case Sint64Kind: + return "sint64" + case Uint64Kind: + return "uint64" + case Sfixed32Kind: + return "sfixed32" + case Fixed32Kind: + return "fixed32" + case FloatKind: + return "float" + case Sfixed64Kind: + return "sfixed64" + case Fixed64Kind: + return "fixed64" + case DoubleKind: + return "double" + case StringKind: + return "string" + case BytesKind: + return "bytes" + case MessageKind: + return "message" + case GroupKind: + return "group" + default: + return fmt.Sprintf("", k) + } +} + +// GoString returns k as a Go source identifier (e.g., "BoolKind"). +func (k Kind) GoString() string { + switch k { + case BoolKind: + return "BoolKind" + case EnumKind: + return "EnumKind" + case Int32Kind: + return "Int32Kind" + case Sint32Kind: + return "Sint32Kind" + case Uint32Kind: + return "Uint32Kind" + case Int64Kind: + return "Int64Kind" + case Sint64Kind: + return "Sint64Kind" + case Uint64Kind: + return "Uint64Kind" + case Sfixed32Kind: + return "Sfixed32Kind" + case Fixed32Kind: + return "Fixed32Kind" + case FloatKind: + return "FloatKind" + case Sfixed64Kind: + return "Sfixed64Kind" + case Fixed64Kind: + return "Fixed64Kind" + case DoubleKind: + return "DoubleKind" + case StringKind: + return "StringKind" + case BytesKind: + return "BytesKind" + case MessageKind: + return "MessageKind" + case GroupKind: + return "GroupKind" + default: + return fmt.Sprintf("Kind(%d)", k) + } +} + +// FieldNumber is the field number in a message. +type FieldNumber = protowire.Number + +// FieldNumbers represent a list of field numbers. +type FieldNumbers interface { + // Len reports the number of fields in the list. + Len() int + // Get returns the ith field number. It panics if out of bounds. + Get(i int) FieldNumber + // Has reports whether n is within the list of fields. + Has(n FieldNumber) bool + + doNotImplement +} + +// FieldRanges represent a list of field number ranges. +type FieldRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]FieldNumber // start inclusive; end exclusive + // Has reports whether n is within any of the ranges. + Has(n FieldNumber) bool + + doNotImplement +} + +// EnumNumber is the numeric value for an enum. +type EnumNumber int32 + +// EnumRanges represent a list of enum number ranges. +type EnumRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]EnumNumber // start inclusive; end inclusive + // Has reports whether n is within any of the ranges. + Has(n EnumNumber) bool + + doNotImplement +} + +// Name is the short name for a proto declaration. This is not the name +// as used in Go source code, which might not be identical to the proto name. +type Name string // e.g., "Kind" + +// IsValid reports whether s is a syntactically valid name. +// An empty name is invalid. +func (s Name) IsValid() bool { + return consumeIdent(string(s)) == len(s) +} + +// Names represent a list of names. +type Names interface { + // Len reports the number of names in the list. + Len() int + // Get returns the ith name. It panics if out of bounds. + Get(i int) Name + // Has reports whether s matches any names in the list. + Has(s Name) bool + + doNotImplement +} + +// FullName is a qualified name that uniquely identifies a proto declaration. +// A qualified name is the concatenation of the proto package along with the +// fully-declared name (i.e., name of parent preceding the name of the child), +// with a '.' delimiter placed between each Name. +// +// This should not have any leading or trailing dots. +type FullName string // e.g., "google.protobuf.Field.Kind" + +// IsValid reports whether s is a syntactically valid full name. +// An empty full name is invalid. +func (s FullName) IsValid() bool { + i := consumeIdent(string(s)) + if i < 0 { + return false + } + for len(s) > i { + if s[i] != '.' { + return false + } + i++ + n := consumeIdent(string(s[i:])) + if n < 0 { + return false + } + i += n + } + return true +} + +func consumeIdent(s string) (i int) { + if len(s) == 0 || !isLetter(s[i]) { + return -1 + } + i++ + for len(s) > i && isLetterDigit(s[i]) { + i++ + } + return i +} +func isLetter(c byte) bool { + return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} +func isLetterDigit(c byte) bool { + return isLetter(c) || ('0' <= c && c <= '9') +} + +// Name returns the short name, which is the last identifier segment. +// A single segment FullName is the Name itself. +func (n FullName) Name() Name { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return Name(n[i+1:]) + } + return Name(n) +} + +// Parent returns the full name with the trailing identifier removed. +// A single segment FullName has no parent. +func (n FullName) Parent() FullName { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return n[:i] + } + return "" +} + +// Append returns the qualified name appended with the provided short name. +// +// Invariant: n == n.Parent().Append(n.Name()) // assuming n is valid +func (n FullName) Append(s Name) FullName { + if n == "" { + return FullName(s) + } + return n + "." + FullName(s) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go new file mode 100644 index 000000000..121ba3a07 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -0,0 +1,128 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "strconv" +) + +// SourceLocations is a list of source locations. +type SourceLocations interface { + // Len reports the number of source locations in the proto file. + Len() int + // Get returns the ith SourceLocation. It panics if out of bounds. + Get(int) SourceLocation + + // ByPath returns the SourceLocation for the given path, + // returning the first location if multiple exist for the same path. + // If multiple locations exist for the same path, + // then SourceLocation.Next index can be used to identify the + // index of the next SourceLocation. + // If no location exists for this path, it returns the zero value. + ByPath(path SourcePath) SourceLocation + + // ByDescriptor returns the SourceLocation for the given descriptor, + // returning the first location if multiple exist for the same path. + // If no location exists for this descriptor, it returns the zero value. + ByDescriptor(desc Descriptor) SourceLocation + + doNotImplement +} + +// SourceLocation describes a source location and +// corresponds with the google.protobuf.SourceCodeInfo.Location message. +type SourceLocation struct { + // Path is the path to the declaration from the root file descriptor. + // The contents of this slice must not be mutated. + Path SourcePath + + // StartLine and StartColumn are the zero-indexed starting location + // in the source file for the declaration. + StartLine, StartColumn int + // EndLine and EndColumn are the zero-indexed ending location + // in the source file for the declaration. + // In the descriptor.proto, the end line may be omitted if it is identical + // to the start line. Here, it is always populated. + EndLine, EndColumn int + + // LeadingDetachedComments are the leading detached comments + // for the declaration. The contents of this slice must not be mutated. + LeadingDetachedComments []string + // LeadingComments is the leading attached comment for the declaration. + LeadingComments string + // TrailingComments is the trailing attached comment for the declaration. + TrailingComments string + + // Next is an index into SourceLocations for the next source location that + // has the same Path. It is zero if there is no next location. + Next int +} + +// SourcePath identifies part of a file descriptor for a source location. +// The SourcePath is a sequence of either field numbers or indexes into +// a repeated field that form a path starting from the root file descriptor. +// +// See google.protobuf.SourceCodeInfo.Location.path. +type SourcePath []int32 + +// Equal reports whether p1 equals p2. +func (p1 SourcePath) Equal(p2 SourcePath) bool { + if len(p1) != len(p2) { + return false + } + for i := range p1 { + if p1[i] != p2[i] { + return false + } + } + return true +} + +// String formats the path in a humanly readable manner. +// The output is guaranteed to be deterministic, +// making it suitable for use as a key into a Go map. +// It is not guaranteed to be stable as the exact output could change +// in a future version of this module. +// +// Example output: +// .message_type[6].nested_type[15].field[3] +func (p SourcePath) String() string { + b := p.appendFileDescriptorProto(nil) + for _, i := range p { + b = append(b, '.') + b = strconv.AppendInt(b, int64(i), 10) + } + return string(b) +} + +type appendFunc func(*SourcePath, []byte) []byte + +func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte { + if len(*p) == 0 { + return b + } + b = append(b, '.') + b = append(b, name...) + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} + +func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte { + b = p.appendSingularField(b, name, nil) + if len(*p) == 0 || (*p)[0] < 0 { + return b + } + b = append(b, '[') + b = strconv.AppendUint(b, uint64((*p)[0]), 10) + b = append(b, ']') + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go new file mode 100644 index 000000000..b03c1223c --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -0,0 +1,461 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package protoreflect + +func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "package", nil) + case 3: + b = p.appendRepeatedField(b, "dependency", nil) + case 10: + b = p.appendRepeatedField(b, "public_dependency", nil) + case 11: + b = p.appendRepeatedField(b, "weak_dependency", nil) + case 4: + b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto) + case 7: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions) + case 9: + b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) + case 12: + b = p.appendSingularField(b, "syntax", nil) + } + return b +} + +func (p *SourcePath) appendDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 3: + b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto) + case 4: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange) + case 8: + b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto) + case 7: + b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions) + case 9: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) + case 10: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions) + case 4: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) + case 5: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions) + } + return b +} + +func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 3: + b = p.appendSingularField(b, "number", nil) + case 4: + b = p.appendSingularField(b, "label", nil) + case 5: + b = p.appendSingularField(b, "type", nil) + case 6: + b = p.appendSingularField(b, "type_name", nil) + case 2: + b = p.appendSingularField(b, "extendee", nil) + case 7: + b = p.appendSingularField(b, "default_value", nil) + case 9: + b = p.appendSingularField(b, "oneof_index", nil) + case 10: + b = p.appendSingularField(b, "json_name", nil) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions) + case 17: + b = p.appendSingularField(b, "proto3_optional", nil) + } + return b +} + +func (p *SourcePath) appendFileOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "java_package", nil) + case 8: + b = p.appendSingularField(b, "java_outer_classname", nil) + case 10: + b = p.appendSingularField(b, "java_multiple_files", nil) + case 20: + b = p.appendSingularField(b, "java_generate_equals_and_hash", nil) + case 27: + b = p.appendSingularField(b, "java_string_check_utf8", nil) + case 9: + b = p.appendSingularField(b, "optimize_for", nil) + case 11: + b = p.appendSingularField(b, "go_package", nil) + case 16: + b = p.appendSingularField(b, "cc_generic_services", nil) + case 17: + b = p.appendSingularField(b, "java_generic_services", nil) + case 18: + b = p.appendSingularField(b, "py_generic_services", nil) + case 42: + b = p.appendSingularField(b, "php_generic_services", nil) + case 23: + b = p.appendSingularField(b, "deprecated", nil) + case 31: + b = p.appendSingularField(b, "cc_enable_arenas", nil) + case 36: + b = p.appendSingularField(b, "objc_class_prefix", nil) + case 37: + b = p.appendSingularField(b, "csharp_namespace", nil) + case 39: + b = p.appendSingularField(b, "swift_prefix", nil) + case 40: + b = p.appendSingularField(b, "php_class_prefix", nil) + case 41: + b = p.appendSingularField(b, "php_namespace", nil) + case 44: + b = p.appendSingularField(b, "php_metadata_namespace", nil) + case 45: + b = p.appendSingularField(b, "ruby_package", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions) + } + return b +} + +func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions) + } + return b +} + +func (p *SourcePath) appendMessageOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "message_set_wire_format", nil) + case 2: + b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 7: + b = p.appendSingularField(b, "map_entry", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "number", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions) + } + return b +} + +func (p *SourcePath) appendEnumOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendSingularField(b, "allow_alias", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "input_type", nil) + case 3: + b = p.appendSingularField(b, "output_type", nil) + case 4: + b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions) + case 5: + b = p.appendSingularField(b, "client_streaming", nil) + case 6: + b = p.appendSingularField(b, "server_streaming", nil) + } + return b +} + +func (p *SourcePath) appendServiceOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendFieldOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "ctype", nil) + case 2: + b = p.appendSingularField(b, "packed", nil) + case 6: + b = p.appendSingularField(b, "jstype", nil) + case 5: + b = p.appendSingularField(b, "lazy", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 10: + b = p.appendSingularField(b, "weak", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart) + case 3: + b = p.appendSingularField(b, "identifier_value", nil) + case 4: + b = p.appendSingularField(b, "positive_int_value", nil) + case 5: + b = p.appendSingularField(b, "negative_int_value", nil) + case 6: + b = p.appendSingularField(b, "double_value", nil) + case 7: + b = p.appendSingularField(b, "string_value", nil) + case 8: + b = p.appendSingularField(b, "aggregate_value", nil) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "path", nil) + case 2: + b = p.appendRepeatedField(b, "span", nil) + case 3: + b = p.appendSingularField(b, "leading_comments", nil) + case 4: + b = p.appendSingularField(b, "trailing_comments", nil) + case 6: + b = p.appendRepeatedField(b, "leading_detached_comments", nil) + } + return b +} + +func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendOneofOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendMethodOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 34: + b = p.appendSingularField(b, "idempotency_level", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name_part", nil) + case 2: + b = p.appendSingularField(b, "is_extension", nil) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go new file mode 100644 index 000000000..8e53c44a9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -0,0 +1,665 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +// Descriptor provides a set of accessors that are common to every descriptor. +// Each descriptor type wraps the equivalent google.protobuf.XXXDescriptorProto, +// but provides efficient lookup and immutability. +// +// Each descriptor is comparable. Equality implies that the two types are +// exactly identical. However, it is possible for the same semantically +// identical proto type to be represented by multiple type descriptors. +// +// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// If t1 == t2, then the types are definitely equal and all accessors return +// the same information. However, if t1 != t2, then it is still possible that +// they still represent the same proto type (e.g., t1.FullName == t2.FullName). +// This can occur if a descriptor type is created dynamically, or multiple +// versions of the same proto type are accidentally linked into the Go binary. +type Descriptor interface { + // ParentFile returns the parent file descriptor that this descriptor + // is declared within. The parent file for the file descriptor is itself. + // + // Support for this functionality is optional and may return nil. + ParentFile() FileDescriptor + + // Parent returns the parent containing this descriptor declaration. + // The following shows the mapping from child type to possible parent types: + // + // ╔═════════════════════╤═══════════════════════════════════╗ + // ║ Child type │ Possible parent types ║ + // ╠═════════════════════╪═══════════════════════════════════╣ + // ║ FileDescriptor │ nil ║ + // ║ MessageDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ FieldDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ OneofDescriptor │ MessageDescriptor ║ + // ║ EnumDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ EnumValueDescriptor │ EnumDescriptor ║ + // ║ ServiceDescriptor │ FileDescriptor ║ + // ║ MethodDescriptor │ ServiceDescriptor ║ + // ╚═════════════════════╧═══════════════════════════════════╝ + // + // Support for this functionality is optional and may return nil. + Parent() Descriptor + + // Index returns the index of this descriptor within its parent. + // It returns 0 if the descriptor does not have a parent or if the parent + // is unknown. + Index() int + + // Syntax is the protobuf syntax. + Syntax() Syntax // e.g., Proto2 or Proto3 + + // Name is the short name of the declaration (i.e., FullName.Name). + Name() Name // e.g., "Any" + + // FullName is the fully-qualified name of the declaration. + // + // The FullName is a concatenation of the full name of the type that this + // type is declared within and the declaration name. For example, + // field "foo_field" in message "proto.package.MyMessage" is + // uniquely identified as "proto.package.MyMessage.foo_field". + // Enum values are an exception to the rule (see EnumValueDescriptor). + FullName() FullName // e.g., "google.protobuf.Any" + + // IsPlaceholder reports whether type information is missing since a + // dependency is not resolved, in which case only name information is known. + // + // Placeholder types may only be returned by the following accessors + // as a result of unresolved dependencies or weak imports: + // + // ╔═══════════════════════════════════╤═════════════════════╗ + // ║ Accessor │ Descriptor ║ + // ╠═══════════════════════════════════╪═════════════════════╣ + // ║ FileImports.FileDescriptor │ FileDescriptor ║ + // ║ FieldDescriptor.Enum │ EnumDescriptor ║ + // ║ FieldDescriptor.Message │ MessageDescriptor ║ + // ║ FieldDescriptor.DefaultEnumValue │ EnumValueDescriptor ║ + // ║ FieldDescriptor.ContainingMessage │ MessageDescriptor ║ + // ║ MethodDescriptor.Input │ MessageDescriptor ║ + // ║ MethodDescriptor.Output │ MessageDescriptor ║ + // ╚═══════════════════════════════════╧═════════════════════╝ + // + // If true, only Name and FullName are valid. + // For FileDescriptor, the Path is also valid. + IsPlaceholder() bool + + // Options returns the descriptor options. The caller must not modify + // the returned value. + // + // To avoid a dependency cycle, this function returns a proto.Message value. + // The proto message type returned for each descriptor type is as follows: + // ╔═════════════════════╤══════════════════════════════════════════╗ + // ║ Go type │ Protobuf message type ║ + // ╠═════════════════════╪══════════════════════════════════════════╣ + // ║ FileDescriptor │ google.protobuf.FileOptions ║ + // ║ EnumDescriptor │ google.protobuf.EnumOptions ║ + // ║ EnumValueDescriptor │ google.protobuf.EnumValueOptions ║ + // ║ MessageDescriptor │ google.protobuf.MessageOptions ║ + // ║ FieldDescriptor │ google.protobuf.FieldOptions ║ + // ║ OneofDescriptor │ google.protobuf.OneofOptions ║ + // ║ ServiceDescriptor │ google.protobuf.ServiceOptions ║ + // ║ MethodDescriptor │ google.protobuf.MethodOptions ║ + // ╚═════════════════════╧══════════════════════════════════════════╝ + // + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + Options() ProtoMessage + + doNotImplement +} + +// FileDescriptor describes the types in a complete proto file and +// corresponds with the google.protobuf.FileDescriptorProto message. +// +// Top-level declarations: +// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +type FileDescriptor interface { + Descriptor // Descriptor.FullName is identical to Package + + // Path returns the file name, relative to the source tree root. + Path() string // e.g., "path/to/file.proto" + // Package returns the protobuf package namespace. + Package() FullName // e.g., "google.protobuf" + + // Imports is a list of imported proto files. + Imports() FileImports + + // Enums is a list of the top-level enum declarations. + Enums() EnumDescriptors + // Messages is a list of the top-level message declarations. + Messages() MessageDescriptors + // Extensions is a list of the top-level extension declarations. + Extensions() ExtensionDescriptors + // Services is a list of the top-level service declarations. + Services() ServiceDescriptors + + // SourceLocations is a list of source locations. + SourceLocations() SourceLocations + + isFileDescriptor +} +type isFileDescriptor interface{ ProtoType(FileDescriptor) } + +// FileImports is a list of file imports. +type FileImports interface { + // Len reports the number of files imported by this proto file. + Len() int + // Get returns the ith FileImport. It panics if out of bounds. + Get(i int) FileImport + + doNotImplement +} + +// FileImport is the declaration for a proto file import. +type FileImport struct { + // FileDescriptor is the file type for the given import. + // It is a placeholder descriptor if IsWeak is set or if a dependency has + // not been regenerated to implement the new reflection APIs. + FileDescriptor + + // IsPublic reports whether this is a public import, which causes this file + // to alias declarations within the imported file. The intended use cases + // for this feature is the ability to move proto files without breaking + // existing dependencies. + // + // The current file and the imported file must be within proto package. + IsPublic bool + + // IsWeak reports whether this is a weak import, which does not impose + // a direct dependency on the target file. + // + // Weak imports are a legacy proto1 feature. Equivalent behavior is + // achieved using proto2 extension fields or proto3 Any messages. + IsWeak bool +} + +// MessageDescriptor describes a message and +// corresponds with the google.protobuf.DescriptorProto message. +// +// Nested declarations: +// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, +// and/or MessageDescriptor. +type MessageDescriptor interface { + Descriptor + + // IsMapEntry indicates that this is an auto-generated message type to + // represent the entry type for a map field. + // + // Map entry messages have only two fields: + // • a "key" field with a field number of 1 + // • a "value" field with a field number of 2 + // The key and value types are determined by these two fields. + // + // If IsMapEntry is true, it implies that FieldDescriptor.IsMap is true + // for some field with this message type. + IsMapEntry() bool + + // Fields is a list of nested field declarations. + Fields() FieldDescriptors + // Oneofs is a list of nested oneof declarations. + Oneofs() OneofDescriptors + + // ReservedNames is a list of reserved field names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of field numbers. + ReservedRanges() FieldRanges + // RequiredNumbers is a list of required field numbers. + // In Proto3, it is always an empty list. + RequiredNumbers() FieldNumbers + // ExtensionRanges is the field ranges used for extension fields. + // In Proto3, it is always an empty ranges. + ExtensionRanges() FieldRanges + // ExtensionRangeOptions returns the ith extension range options. + // + // To avoid a dependency cycle, this method returns a proto.Message value, + // which always contains a google.protobuf.ExtensionRangeOptions message. + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + ExtensionRangeOptions(i int) ProtoMessage + + // Enums is a list of nested enum declarations. + Enums() EnumDescriptors + // Messages is a list of nested message declarations. + Messages() MessageDescriptors + // Extensions is a list of nested extension declarations. + Extensions() ExtensionDescriptors + + isMessageDescriptor +} +type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } + +// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// It is recommended that implementations of this interface also implement the +// MessageFieldTypes interface. +type MessageType interface { + // New returns a newly allocated empty message. + // It may return nil for synthetic messages representing a map entry. + New() Message + + // Zero returns an empty, read-only message. + // It may return nil for synthetic messages representing a map entry. + Zero() Message + + // Descriptor returns the message descriptor. + // + // Invariant: t.Descriptor() == t.New().Descriptor() + Descriptor() MessageDescriptor +} + +// MessageFieldTypes extends a MessageType by providing type information +// regarding enums and messages referenced by the message fields. +type MessageFieldTypes interface { + MessageType + + // Enum returns the EnumType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not an enum kind. + // It panics if out of bounds. + // + // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() + Enum(i int) EnumType + + // Message returns the MessageType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not a message or group kind. + // It panics if out of bounds. + // + // Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message() + Message(i int) MessageType +} + +// MessageDescriptors is a list of message declarations. +type MessageDescriptors interface { + // Len reports the number of messages. + Len() int + // Get returns the ith MessageDescriptor. It panics if out of bounds. + Get(i int) MessageDescriptor + // ByName returns the MessageDescriptor for a message named s. + // It returns nil if not found. + ByName(s Name) MessageDescriptor + + doNotImplement +} + +// FieldDescriptor describes a field within a message and +// corresponds with the google.protobuf.FieldDescriptorProto message. +// +// It is used for both normal fields defined within the parent message +// (e.g., MessageDescriptor.Fields) and fields that extend some remote message +// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +type FieldDescriptor interface { + Descriptor + + // Number reports the unique number for this field. + Number() FieldNumber + // Cardinality reports the cardinality for this field. + Cardinality() Cardinality + // Kind reports the basic kind for this field. + Kind() Kind + + // HasJSONName reports whether this field has an explicitly set JSON name. + HasJSONName() bool + + // JSONName reports the name used for JSON serialization. + // It is usually the camel-cased form of the field name. + // Extension fields are represented by the full name surrounded by brackets. + JSONName() string + + // TextName reports the name used for text serialization. + // It is usually the name of the field, except that groups use the name + // of the inlined message, and extension fields are represented by the + // full name surrounded by brackets. + TextName() string + + // HasPresence reports whether the field distinguishes between unpopulated + // and default values. + HasPresence() bool + + // IsExtension reports whether this is an extension field. If false, + // then Parent and ContainingMessage refer to the same message. + // Otherwise, ContainingMessage and Parent likely differ. + IsExtension() bool + + // HasOptionalKeyword reports whether the "optional" keyword was explicitly + // specified in the source .proto file. + HasOptionalKeyword() bool + + // IsWeak reports whether this is a weak field, which does not impose a + // direct dependency on the target type. + // If true, then Message returns a placeholder type. + IsWeak() bool + + // IsPacked reports whether repeated primitive numeric kinds should be + // serialized using a packed encoding. + // If true, then it implies Cardinality is Repeated. + IsPacked() bool + + // IsList reports whether this field represents a list, + // where the value type for the associated field is a List. + // It is equivalent to checking whether Cardinality is Repeated and + // that IsMap reports false. + IsList() bool + + // IsMap reports whether this field represents a map, + // where the value type for the associated field is a Map. + // It is equivalent to checking whether Cardinality is Repeated, + // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + IsMap() bool + + // MapKey returns the field descriptor for the key in the map entry. + // It returns nil if IsMap reports false. + MapKey() FieldDescriptor + + // MapValue returns the field descriptor for the value in the map entry. + // It returns nil if IsMap reports false. + MapValue() FieldDescriptor + + // HasDefault reports whether this field has a default value. + HasDefault() bool + + // Default returns the default value for scalar fields. + // For proto2, it is the default value as specified in the proto file, + // or the zero value if unspecified. + // For proto3, it is always the zero value of the scalar. + // The Value type is determined by the Kind. + Default() Value + + // DefaultEnumValue returns the enum value descriptor for the default value + // of an enum field, and is nil for any other kind of field. + DefaultEnumValue() EnumValueDescriptor + + // ContainingOneof is the containing oneof that this field belongs to, + // and is nil if this field is not part of a oneof. + ContainingOneof() OneofDescriptor + + // ContainingMessage is the containing message that this field belongs to. + // For extension fields, this may not necessarily be the parent message + // that the field is declared within. + ContainingMessage() MessageDescriptor + + // Enum is the enum descriptor if Kind is EnumKind. + // It returns nil for any other Kind. + Enum() EnumDescriptor + + // Message is the message descriptor if Kind is + // MessageKind or GroupKind. It returns nil for any other Kind. + Message() MessageDescriptor + + isFieldDescriptor +} +type isFieldDescriptor interface{ ProtoType(FieldDescriptor) } + +// FieldDescriptors is a list of field declarations. +type FieldDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith FieldDescriptor. It panics if out of bounds. + Get(i int) FieldDescriptor + // ByName returns the FieldDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) FieldDescriptor + // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. + // It returns nil if not found. + ByJSONName(s string) FieldDescriptor + // ByTextName returns the FieldDescriptor for a field with s as the text name. + // It returns nil if not found. + ByTextName(s string) FieldDescriptor + // ByNumber returns the FieldDescriptor for a field numbered n. + // It returns nil if not found. + ByNumber(n FieldNumber) FieldDescriptor + + doNotImplement +} + +// OneofDescriptor describes a oneof field set within a given message and +// corresponds with the google.protobuf.OneofDescriptorProto message. +type OneofDescriptor interface { + Descriptor + + // IsSynthetic reports whether this is a synthetic oneof created to support + // proto3 optional semantics. If true, Fields contains exactly one field + // with HasOptionalKeyword specified. + IsSynthetic() bool + + // Fields is a list of fields belonging to this oneof. + Fields() FieldDescriptors + + isOneofDescriptor +} +type isOneofDescriptor interface{ ProtoType(OneofDescriptor) } + +// OneofDescriptors is a list of oneof declarations. +type OneofDescriptors interface { + // Len reports the number of oneof fields. + Len() int + // Get returns the ith OneofDescriptor. It panics if out of bounds. + Get(i int) OneofDescriptor + // ByName returns the OneofDescriptor for a oneof named s. + // It returns nil if not found. + ByName(s Name) OneofDescriptor + + doNotImplement +} + +// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +type ExtensionDescriptor = FieldDescriptor + +// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +type ExtensionTypeDescriptor interface { + ExtensionDescriptor + + // Type returns the associated ExtensionType. + Type() ExtensionType + + // Descriptor returns the plain ExtensionDescriptor without the + // associated ExtensionType. + Descriptor() ExtensionDescriptor +} + +// ExtensionDescriptors is a list of field declarations. +type ExtensionDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith ExtensionDescriptor. It panics if out of bounds. + Get(i int) ExtensionDescriptor + // ByName returns the ExtensionDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) ExtensionDescriptor + + doNotImplement +} + +// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// Go implementation. The nested field descriptor must be for a extension field. +// +// While a normal field is a member of the parent message that it is declared +// within (see Descriptor.Parent), an extension field is a member of some other +// target message (see ExtensionDescriptor.Extendee) and may have no +// relationship with the parent. However, the full name of an extension field is +// relative to the parent that it is declared within. +// +// For example: +// syntax = "proto2"; +// package example; +// message FooMessage { +// extensions 100 to max; +// } +// message BarMessage { +// extends FooMessage { optional BarMessage bar_field = 100; } +// } +// +// Field "bar_field" is an extension of FooMessage, but its full name is +// "example.BarMessage.bar_field" instead of "example.FooMessage.bar_field". +type ExtensionType interface { + // New returns a new value for the field. + // For scalars, this returns the default value in native Go form. + New() Value + + // Zero returns a new value for the field. + // For scalars, this returns the default value in native Go form. + // For composite types, this returns an empty, read-only message, list, or map. + Zero() Value + + // TypeDescriptor returns the extension type descriptor. + TypeDescriptor() ExtensionTypeDescriptor + + // ValueOf wraps the input and returns it as a Value. + // ValueOf panics if the input value is invalid or not the appropriate type. + // + // ValueOf is more extensive than protoreflect.ValueOf for a given field's + // value as it has more type information available. + ValueOf(interface{}) Value + + // InterfaceOf completely unwraps the Value to the underlying Go type. + // InterfaceOf panics if the input is nil or does not represent the + // appropriate underlying Go type. For composite types, it panics if the + // value is not mutable. + // + // InterfaceOf is able to unwrap the Value further than Value.Interface + // as it has more type information available. + InterfaceOf(Value) interface{} + + // IsValidValue reports whether the Value is valid to assign to the field. + IsValidValue(Value) bool + + // IsValidInterface reports whether the input is valid to assign to the field. + IsValidInterface(interface{}) bool +} + +// EnumDescriptor describes an enum and +// corresponds with the google.protobuf.EnumDescriptorProto message. +// +// Nested declarations: +// EnumValueDescriptor. +type EnumDescriptor interface { + Descriptor + + // Values is a list of nested enum value declarations. + Values() EnumValueDescriptors + + // ReservedNames is a list of reserved enum names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of enum numbers. + ReservedRanges() EnumRanges + + isEnumDescriptor +} +type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } + +// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +type EnumType interface { + // New returns an instance of this enum type with its value set to n. + New(n EnumNumber) Enum + + // Descriptor returns the enum descriptor. + // + // Invariant: t.Descriptor() == t.New(0).Descriptor() + Descriptor() EnumDescriptor +} + +// EnumDescriptors is a list of enum declarations. +type EnumDescriptors interface { + // Len reports the number of enum types. + Len() int + // Get returns the ith EnumDescriptor. It panics if out of bounds. + Get(i int) EnumDescriptor + // ByName returns the EnumDescriptor for an enum named s. + // It returns nil if not found. + ByName(s Name) EnumDescriptor + + doNotImplement +} + +// EnumValueDescriptor describes an enum value and +// corresponds with the google.protobuf.EnumValueDescriptorProto message. +// +// All other proto declarations are in the namespace of the parent. +// However, enum values do not follow this rule and are within the namespace +// of the parent's parent (i.e., they are a sibling of the containing enum). +// Thus, a value named "FOO_VALUE" declared within an enum uniquely identified +// as "proto.package.MyEnum" has a full name of "proto.package.FOO_VALUE". +type EnumValueDescriptor interface { + Descriptor + + // Number returns the enum value as an integer. + Number() EnumNumber + + isEnumValueDescriptor +} +type isEnumValueDescriptor interface{ ProtoType(EnumValueDescriptor) } + +// EnumValueDescriptors is a list of enum value declarations. +type EnumValueDescriptors interface { + // Len reports the number of enum values. + Len() int + // Get returns the ith EnumValueDescriptor. It panics if out of bounds. + Get(i int) EnumValueDescriptor + // ByName returns the EnumValueDescriptor for the enum value named s. + // It returns nil if not found. + ByName(s Name) EnumValueDescriptor + // ByNumber returns the EnumValueDescriptor for the enum value numbered n. + // If multiple have the same number, the first one defined is returned + // It returns nil if not found. + ByNumber(n EnumNumber) EnumValueDescriptor + + doNotImplement +} + +// ServiceDescriptor describes a service and +// corresponds with the google.protobuf.ServiceDescriptorProto message. +// +// Nested declarations: MethodDescriptor. +type ServiceDescriptor interface { + Descriptor + + // Methods is a list of nested message declarations. + Methods() MethodDescriptors + + isServiceDescriptor +} +type isServiceDescriptor interface{ ProtoType(ServiceDescriptor) } + +// ServiceDescriptors is a list of service declarations. +type ServiceDescriptors interface { + // Len reports the number of services. + Len() int + // Get returns the ith ServiceDescriptor. It panics if out of bounds. + Get(i int) ServiceDescriptor + // ByName returns the ServiceDescriptor for a service named s. + // It returns nil if not found. + ByName(s Name) ServiceDescriptor + + doNotImplement +} + +// MethodDescriptor describes a method and +// corresponds with the google.protobuf.MethodDescriptorProto message. +type MethodDescriptor interface { + Descriptor + + // Input is the input message descriptor. + Input() MessageDescriptor + // Output is the output message descriptor. + Output() MessageDescriptor + // IsStreamingClient reports whether the client streams multiple messages. + IsStreamingClient() bool + // IsStreamingServer reports whether the server streams multiple messages. + IsStreamingServer() bool + + isMethodDescriptor +} +type isMethodDescriptor interface{ ProtoType(MethodDescriptor) } + +// MethodDescriptors is a list of method declarations. +type MethodDescriptors interface { + // Len reports the number of methods. + Len() int + // Get returns the ith MethodDescriptor. It panics if out of bounds. + Get(i int) MethodDescriptor + // ByName returns the MethodDescriptor for a service method named s. + // It returns nil if not found. + ByName(s Name) MethodDescriptor + + doNotImplement +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go new file mode 100644 index 000000000..f31981077 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -0,0 +1,285 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import "google.golang.org/protobuf/encoding/protowire" + +// Enum is a reflection interface for a concrete enum value, +// which provides type information and a getter for the enum number. +// Enum does not provide a mutable API since enums are commonly backed by +// Go constants, which are not addressable. +type Enum interface { + // Descriptor returns enum descriptor, which contains only the protobuf + // type information for the enum. + Descriptor() EnumDescriptor + + // Type returns the enum type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the enum descriptor be used instead. + Type() EnumType + + // Number returns the enum value as an integer. + Number() EnumNumber +} + +// Message is a reflective interface for a concrete message value, +// encapsulating both type and value information for the message. +// +// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// For non-extension fields, the descriptor must exactly match the +// field known by the parent message. +// For extension fields, the descriptor must implement ExtensionTypeDescriptor, +// extend the parent message (i.e., have the same message FullName), and +// be within the parent's extension range. +// +// Each field Value can be a scalar or a composite type (Message, List, or Map). +// See Value for the Go types associated with a FieldDescriptor. +// Providing a Value that is invalid or of an incorrect type panics. +type Message interface { + // Descriptor returns message descriptor, which contains only the protobuf + // type information for the message. + Descriptor() MessageDescriptor + + // Type returns the message type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the message descriptor be used instead. + Type() MessageType + + // New returns a newly allocated and mutable empty message. + New() Message + + // Interface unwraps the message reflection interface and + // returns the underlying ProtoMessage interface. + Interface() ProtoMessage + + // Range iterates over every populated field in an undefined order, + // calling f for each field descriptor and value encountered. + // Range returns immediately if f returns false. + // While iterating, mutating operations may only be performed + // on the current field descriptor. + Range(f func(FieldDescriptor, Value) bool) + + // Has reports whether a field is populated. + // + // Some fields have the property of nullability where it is possible to + // distinguish between the default value of a field and whether the field + // was explicitly populated with the default value. Singular message fields, + // member fields of a oneof, and proto2 scalar fields are nullable. Such + // fields are populated only if explicitly set. + // + // In other cases (aside from the nullable cases above), + // a proto3 scalar field is populated if it contains a non-zero value, and + // a repeated field is populated if it is non-empty. + Has(FieldDescriptor) bool + + // Clear clears the field such that a subsequent Has call reports false. + // + // Clearing an extension field clears both the extension type and value + // associated with the given field number. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(FieldDescriptor) + + // Get retrieves the value for a field. + // + // For unpopulated scalars, it returns the default value, where + // the default value of a bytes scalar is guaranteed to be a copy. + // For unpopulated composite types, it returns an empty, read-only view + // of the value; to obtain a mutable reference, use Mutable. + Get(FieldDescriptor) Value + + // Set stores the value for a field. + // + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType. + // When setting a composite type, it is unspecified whether the stored value + // aliases the source's memory in any way. If the composite value is an + // empty, read-only value, then it panics. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(FieldDescriptor, Value) + + // Mutable returns a mutable reference to a composite type. + // + // If the field is unpopulated, it may allocate a composite value. + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType + // if not already stored. + // It panics if the field does not contain a composite type. + // + // Mutable is a mutating operation and unsafe for concurrent use. + Mutable(FieldDescriptor) Value + + // NewField returns a new value that is assignable to the field + // for the given descriptor. For scalars, this returns the default value. + // For lists, maps, and messages, this returns a new, empty, mutable value. + NewField(FieldDescriptor) Value + + // WhichOneof reports which field within the oneof is populated, + // returning nil if none are populated. + // It panics if the oneof descriptor does not belong to this message. + WhichOneof(OneofDescriptor) FieldDescriptor + + // GetUnknown retrieves the entire list of unknown fields. + // The caller may only mutate the contents of the RawFields + // if the mutated bytes are stored back into the message with SetUnknown. + GetUnknown() RawFields + + // SetUnknown stores an entire list of unknown fields. + // The raw fields must be syntactically valid according to the wire format. + // An implementation may panic if this is not the case. + // Once stored, the caller must not mutate the content of the RawFields. + // An empty RawFields may be passed to clear the fields. + // + // SetUnknown is a mutating operation and unsafe for concurrent use. + SetUnknown(RawFields) + + // IsValid reports whether the message is valid. + // + // An invalid message is an empty, read-only value. + // + // An invalid message often corresponds to a nil pointer of the concrete + // message type, but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool + + // ProtoMethods returns optional fast-path implementions of various operations. + // This method may return nil. + // + // The returned methods type is identical to + // "google.golang.org/protobuf/runtime/protoiface".Methods. + // Consult the protoiface package documentation for details. + ProtoMethods() *methods +} + +// RawFields is the raw bytes for an ordered sequence of fields. +// Each field contains both the tag (representing field number and wire type), +// and also the wire data itself. +type RawFields []byte + +// IsValid reports whether b is syntactically correct wire format. +func (b RawFields) IsValid() bool { + for len(b) > 0 { + _, _, n := protowire.ConsumeField(b) + if n < 0 { + return false + } + b = b[n:] + } + return true +} + +// List is a zero-indexed, ordered list. +// The element Value type is determined by FieldDescriptor.Kind. +// Providing a Value that is invalid or of an incorrect type panics. +type List interface { + // Len reports the number of entries in the List. + // Get, Set, and Truncate panic with out of bound indexes. + Len() int + + // Get retrieves the value at the given index. + // It never returns an invalid value. + Get(int) Value + + // Set stores a value for the given index. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(int, Value) + + // Append appends the provided value to the end of the list. + // When appending a composite type, it is unspecified whether the appended + // value aliases the source's memory in any way. + // + // Append is a mutating operation and unsafe for concurrent use. + Append(Value) + + // AppendMutable appends a new, empty, mutable message value to the end + // of the list and returns it. + // It panics if the list does not contain a message type. + AppendMutable() Value + + // Truncate truncates the list to a smaller length. + // + // Truncate is a mutating operation and unsafe for concurrent use. + Truncate(int) + + // NewElement returns a new value for a list element. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewElement() Value + + // IsValid reports whether the list is valid. + // + // An invalid list is an empty, read-only value. + // + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} + +// Map is an unordered, associative map. +// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. +// The entry Value type is determined by FieldDescriptor.MapValue.Kind. +// Providing a MapKey or Value that is invalid or of an incorrect type panics. +type Map interface { + // Len reports the number of elements in the map. + Len() int + + // Range iterates over every map entry in an undefined order, + // calling f for each key and value encountered. + // Range calls f Len times unless f returns false, which stops iteration. + // While iterating, mutating operations may only be performed + // on the current map key. + Range(f func(MapKey, Value) bool) + + // Has reports whether an entry with the given key is in the map. + Has(MapKey) bool + + // Clear clears the entry associated with they given key. + // The operation does nothing if there is no entry associated with the key. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(MapKey) + + // Get retrieves the value for an entry with the given key. + // It returns an invalid value for non-existent entries. + Get(MapKey) Value + + // Set stores the value for an entry with the given key. + // It panics when given a key or value that is invalid or the wrong type. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(MapKey, Value) + + // Mutable retrieves a mutable reference to the entry for the given key. + // If no entry exists for the key, it creates a new, empty, mutable value + // and stores it as the entry for the key. + // It panics if the map value is not a message. + Mutable(MapKey) Value + + // NewValue returns a new value assignable as a map value. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewValue() Value + + // IsValid reports whether the map is valid. + // + // An invalid map is an empty, read-only value. + // + // An invalid message often corresponds to a nil Go map value, + // but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go new file mode 100644 index 000000000..918e685e1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package protoreflect + +import "google.golang.org/protobuf/internal/pragma" + +type valueType int + +const ( + nilType valueType = iota + boolType + int32Type + int64Type + uint32Type + uint64Type + float32Type + float64Type + stringType + bytesType + enumType + ifaceType +) + +// value is a union where only one type can be represented at a time. +// This uses a distinct field for each type. This is type safe in Go, but +// occupies more memory than necessary (72B). +type value struct { + pragma.DoNotCompare // 0B + + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface interface{} // 16B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, str: v} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, bin: v} +} +func valueOfIface(v interface{}) Value { + return Value{typ: ifaceType, iface: v} +} + +func (v Value) getString() string { + return v.str +} +func (v Value) getBytes() []byte { + return v.bin +} +func (v Value) getIface() interface{} { + return v.iface +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go new file mode 100644 index 000000000..5a3414724 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -0,0 +1,411 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "fmt" + "math" +) + +// Value is a union where only one Go type may be set at a time. +// The Value is used to represent all possible values a field may take. +// The following shows which Go type is used to represent each proto Kind: +// +// ╔════════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠════════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ float32 │ FloatKind ║ +// ║ float64 │ DoubleKind ║ +// ║ string │ StringKind ║ +// ║ []byte │ BytesKind ║ +// ║ EnumNumber │ EnumKind ║ +// ║ Message │ MessageKind, GroupKind ║ +// ╚════════════╧═════════════════════════════════════╝ +// +// Multiple protobuf Kinds may be represented by a single Go type if the type +// can losslessly represent the information for the proto kind. For example, +// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// but use different integer encoding methods. +// +// The List or Map types are used if the field cardinality is repeated. +// A field is a List if FieldDescriptor.IsList reports true. +// A field is a Map if FieldDescriptor.IsMap reports true. +// +// Converting to/from a Value and a concrete Go value panics on type mismatch. +// For example, ValueOf("hello").Int() panics because this attempts to +// retrieve an int64 from a string. +type Value value + +// The protoreflect API uses a custom Value union type instead of interface{} +// to keep the future open for performance optimizations. Using an interface{} +// always incurs an allocation for primitives (e.g., int64) since it needs to +// be boxed on the heap (as interfaces can only contain pointers natively). +// Instead, we represent the Value union as a flat struct that internally keeps +// track of which type is set. Using unsafe, the Value union can be reduced +// down to 24B, which is identical in size to a slice. +// +// The latest compiler (Go1.11) currently suffers from some limitations: +// • With inlining, the compiler should be able to statically prove that +// only one of these switch cases are taken and inline one specific case. +// See https://golang.org/issue/22310. + +// ValueOf returns a Value initialized with the concrete value stored in v. +// This panics if the type does not match one of the allowed types in the +// Value union. +func ValueOf(v interface{}) Value { + switch v := v.(type) { + case nil: + return Value{} + case bool: + return ValueOfBool(v) + case int32: + return ValueOfInt32(v) + case int64: + return ValueOfInt64(v) + case uint32: + return ValueOfUint32(v) + case uint64: + return ValueOfUint64(v) + case float32: + return ValueOfFloat32(v) + case float64: + return ValueOfFloat64(v) + case string: + return ValueOfString(v) + case []byte: + return ValueOfBytes(v) + case EnumNumber: + return ValueOfEnum(v) + case Message, List, Map: + return valueOfIface(v) + case ProtoMessage: + panic(fmt.Sprintf("invalid proto.Message(%T) type, expected a protoreflect.Message type", v)) + default: + panic(fmt.Sprintf("invalid type: %T", v)) + } +} + +// ValueOfBool returns a new boolean value. +func ValueOfBool(v bool) Value { + if v { + return Value{typ: boolType, num: 1} + } else { + return Value{typ: boolType, num: 0} + } +} + +// ValueOfInt32 returns a new int32 value. +func ValueOfInt32(v int32) Value { + return Value{typ: int32Type, num: uint64(v)} +} + +// ValueOfInt64 returns a new int64 value. +func ValueOfInt64(v int64) Value { + return Value{typ: int64Type, num: uint64(v)} +} + +// ValueOfUint32 returns a new uint32 value. +func ValueOfUint32(v uint32) Value { + return Value{typ: uint32Type, num: uint64(v)} +} + +// ValueOfUint64 returns a new uint64 value. +func ValueOfUint64(v uint64) Value { + return Value{typ: uint64Type, num: v} +} + +// ValueOfFloat32 returns a new float32 value. +func ValueOfFloat32(v float32) Value { + return Value{typ: float32Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfFloat64 returns a new float64 value. +func ValueOfFloat64(v float64) Value { + return Value{typ: float64Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfString returns a new string value. +func ValueOfString(v string) Value { + return valueOfString(v) +} + +// ValueOfBytes returns a new bytes value. +func ValueOfBytes(v []byte) Value { + return valueOfBytes(v[:len(v):len(v)]) +} + +// ValueOfEnum returns a new enum value. +func ValueOfEnum(v EnumNumber) Value { + return Value{typ: enumType, num: uint64(v)} +} + +// ValueOfMessage returns a new Message value. +func ValueOfMessage(v Message) Value { + return valueOfIface(v) +} + +// ValueOfList returns a new List value. +func ValueOfList(v List) Value { + return valueOfIface(v) +} + +// ValueOfMap returns a new Map value. +func ValueOfMap(v Map) Value { + return valueOfIface(v) +} + +// IsValid reports whether v is populated with a value. +func (v Value) IsValid() bool { + return v.typ != nilType +} + +// Interface returns v as an interface{}. +// +// Invariant: v == ValueOf(v).Interface() +func (v Value) Interface() interface{} { + switch v.typ { + case nilType: + return nil + case boolType: + return v.Bool() + case int32Type: + return int32(v.Int()) + case int64Type: + return int64(v.Int()) + case uint32Type: + return uint32(v.Uint()) + case uint64Type: + return uint64(v.Uint()) + case float32Type: + return float32(v.Float()) + case float64Type: + return float64(v.Float()) + case stringType: + return v.String() + case bytesType: + return v.Bytes() + case enumType: + return v.Enum() + default: + return v.getIface() + } +} + +func (v Value) typeName() string { + switch v.typ { + case nilType: + return "nil" + case boolType: + return "bool" + case int32Type: + return "int32" + case int64Type: + return "int64" + case uint32Type: + return "uint32" + case uint64Type: + return "uint64" + case float32Type: + return "float32" + case float64Type: + return "float64" + case stringType: + return "string" + case bytesType: + return "bytes" + case enumType: + return "enum" + default: + switch v := v.getIface().(type) { + case Message: + return "message" + case List: + return "list" + case Map: + return "map" + default: + return fmt.Sprintf("", v) + } + } +} + +func (v Value) panicMessage(what string) string { + return fmt.Sprintf("type mismatch: cannot convert %v to %s", v.typeName(), what) +} + +// Bool returns v as a bool and panics if the type is not a bool. +func (v Value) Bool() bool { + switch v.typ { + case boolType: + return v.num > 0 + default: + panic(v.panicMessage("bool")) + } +} + +// Int returns v as a int64 and panics if the type is not a int32 or int64. +func (v Value) Int() int64 { + switch v.typ { + case int32Type, int64Type: + return int64(v.num) + default: + panic(v.panicMessage("int")) + } +} + +// Uint returns v as a uint64 and panics if the type is not a uint32 or uint64. +func (v Value) Uint() uint64 { + switch v.typ { + case uint32Type, uint64Type: + return uint64(v.num) + default: + panic(v.panicMessage("uint")) + } +} + +// Float returns v as a float64 and panics if the type is not a float32 or float64. +func (v Value) Float() float64 { + switch v.typ { + case float32Type, float64Type: + return math.Float64frombits(uint64(v.num)) + default: + panic(v.panicMessage("float")) + } +} + +// String returns v as a string. Since this method implements fmt.Stringer, +// this returns the formatted string value for any non-string type. +func (v Value) String() string { + switch v.typ { + case stringType: + return v.getString() + default: + return fmt.Sprint(v.Interface()) + } +} + +// Bytes returns v as a []byte and panics if the type is not a []byte. +func (v Value) Bytes() []byte { + switch v.typ { + case bytesType: + return v.getBytes() + default: + panic(v.panicMessage("bytes")) + } +} + +// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +func (v Value) Enum() EnumNumber { + switch v.typ { + case enumType: + return EnumNumber(v.num) + default: + panic(v.panicMessage("enum")) + } +} + +// Message returns v as a Message and panics if the type is not a Message. +func (v Value) Message() Message { + switch vi := v.getIface().(type) { + case Message: + return vi + default: + panic(v.panicMessage("message")) + } +} + +// List returns v as a List and panics if the type is not a List. +func (v Value) List() List { + switch vi := v.getIface().(type) { + case List: + return vi + default: + panic(v.panicMessage("list")) + } +} + +// Map returns v as a Map and panics if the type is not a Map. +func (v Value) Map() Map { + switch vi := v.getIface().(type) { + case Map: + return vi + default: + panic(v.panicMessage("map")) + } +} + +// MapKey returns v as a MapKey and panics for invalid MapKey types. +func (v Value) MapKey() MapKey { + switch v.typ { + case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: + return MapKey(v) + default: + panic(v.panicMessage("map key")) + } +} + +// MapKey is used to index maps, where the Go type of the MapKey must match +// the specified key Kind (see MessageDescriptor.IsMapEntry). +// The following shows what Go type is used to represent each proto Kind: +// +// ╔═════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ string │ StringKind ║ +// ╚═════════╧═════════════════════════════════════╝ +// +// A MapKey is constructed and accessed through a Value: +// k := ValueOf("hash").MapKey() // convert string to MapKey +// s := k.String() // convert MapKey to string +// +// The MapKey is a strict subset of valid types used in Value; +// converting a Value to a MapKey with an invalid type panics. +type MapKey value + +// IsValid reports whether k is populated with a value. +func (k MapKey) IsValid() bool { + return Value(k).IsValid() +} + +// Interface returns k as an interface{}. +func (k MapKey) Interface() interface{} { + return Value(k).Interface() +} + +// Bool returns k as a bool and panics if the type is not a bool. +func (k MapKey) Bool() bool { + return Value(k).Bool() +} + +// Int returns k as a int64 and panics if the type is not a int32 or int64. +func (k MapKey) Int() int64 { + return Value(k).Int() +} + +// Uint returns k as a uint64 and panics if the type is not a uint32 or uint64. +func (k MapKey) Uint() uint64 { + return Value(k).Uint() +} + +// String returns k as a string. Since this method implements fmt.Stringer, +// this returns the formatted string value for any non-string type. +func (k MapKey) String() string { + return Value(k).String() +} + +// Value returns k as a Value. +func (k MapKey) Value() Value { + return Value(k) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go new file mode 100644 index 000000000..c45debdca --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -0,0 +1,98 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } + ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + p := (*stringHeader)(unsafe.Pointer(&v)) + return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + p := (*sliceHeader)(unsafe.Pointer(&v)) + return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() (x string) { + *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} + return x +} +func (v Value) getBytes() (x []byte) { + *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} + return x +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go new file mode 100644 index 000000000..59f024c44 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -0,0 +1,880 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoregistry provides data structures to register and lookup +// protobuf descriptor types. +// +// The Files registry contains file descriptors and provides the ability +// to iterate over the files or lookup a specific descriptor within the files. +// Files only contains protobuf descriptors and has no understanding of Go +// type information that may be associated with each descriptor. +// +// The Types registry contains descriptor types for which there is a known +// Go type associated with that descriptor. It provides the ability to iterate +// over the registered types or lookup a type by name. +package protoregistry + +import ( + "fmt" + "os" + "strings" + "sync" + + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// conflictPolicy configures the policy for handling registration conflicts. +// +// It can be over-written at compile time with a linker-initialized variable: +// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" +// +// It can be over-written at program execution with an environment variable: +// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main +// +// Neither of the above are covered by the compatibility promise and +// may be removed in a future release of this module. +var conflictPolicy = "panic" // "panic" | "warn" | "ignore" + +// ignoreConflict reports whether to ignore a registration conflict +// given the descriptor being registered and the error. +// It is a variable so that the behavior is easily overridden in another file. +var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { + const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" + const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + policy := conflictPolicy + if v := os.Getenv(env); v != "" { + policy = v + } + switch policy { + case "panic": + panic(fmt.Sprintf("%v\nSee %v\n", err, faq)) + case "warn": + fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq) + return true + case "ignore": + return true + default: + panic("invalid " + env + " value: " + os.Getenv(env)) + } +} + +var globalMutex sync.RWMutex + +// GlobalFiles is a global registry of file descriptors. +var GlobalFiles *Files = new(Files) + +// GlobalTypes is the registry used by default for type lookups +// unless a local registry is provided by the user. +var GlobalTypes *Types = new(Types) + +// NotFound is a sentinel error value to indicate that the type was not found. +// +// Since registry lookup can happen in the critical performance path, resolvers +// must return this exact error value, not an error wrapping it. +var NotFound = errors.New("not found") + +// Files is a registry for looking up or iterating over files and the +// descriptors contained within them. +// The Find and Range methods are safe for concurrent use. +type Files struct { + // The map of descsByName contains: + // EnumDescriptor + // EnumValueDescriptor + // MessageDescriptor + // ExtensionDescriptor + // ServiceDescriptor + // *packageDescriptor + // + // Note that files are stored as a slice, since a package may contain + // multiple files. Only top-level declarations are registered. + // Note that enum values are in the top-level since that are in the same + // scope as the parent enum. + descsByName map[protoreflect.FullName]interface{} + filesByPath map[string][]protoreflect.FileDescriptor + numFiles int +} + +type packageDescriptor struct { + files []protoreflect.FileDescriptor +} + +// RegisterFile registers the provided file descriptor. +// +// If any descriptor within the file conflicts with the descriptor of any +// previously registered file (e.g., two enums with the same full name), +// then the file is not registered and an error is returned. +// +// It is permitted for multiple files to have the same file path. +func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { + if r == GlobalFiles { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if r.descsByName == nil { + r.descsByName = map[protoreflect.FullName]interface{}{ + "": &packageDescriptor{}, + } + r.filesByPath = make(map[string][]protoreflect.FileDescriptor) + } + path := file.Path() + if prev := r.filesByPath[path]; len(prev) > 0 { + r.checkGenProtoConflict(path) + err := errors.New("file %q is already registered", file.Path()) + err = amendErrorWithCaller(err, prev[0], file) + if !(r == GlobalFiles && ignoreConflict(file, err)) { + return err + } + } + + for name := file.Package(); name != ""; name = name.Parent() { + switch prev := r.descsByName[name]; prev.(type) { + case nil, *packageDescriptor: + default: + err := errors.New("file %q has a package name conflict over %v", file.Path(), name) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(file, err) { + err = nil + } + return err + } + } + var err error + var hasConflict bool + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + if prev := r.descsByName[d.FullName()]; prev != nil { + hasConflict = true + err = errors.New("file %q has a name conflict over %v", file.Path(), d.FullName()) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(d, err) { + err = nil + } + } + }) + if hasConflict { + return err + } + + for name := file.Package(); name != ""; name = name.Parent() { + if r.descsByName[name] == nil { + r.descsByName[name] = &packageDescriptor{} + } + } + p := r.descsByName[file.Package()].(*packageDescriptor) + p.files = append(p.files, file) + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + r.descsByName[d.FullName()] = d + }) + r.filesByPath[path] = append(r.filesByPath[path], file) + r.numFiles++ + return nil +} + +// Several well-known types were hosted in the google.golang.org/genproto module +// but were later moved to this module. To avoid a weak dependency on the +// genproto module (and its relatively large set of transitive dependencies), +// we rely on a registration conflict to determine whether the genproto version +// is too old (i.e., does not contain aliases to the new type declarations). +func (r *Files) checkGenProtoConflict(path string) { + if r != GlobalFiles { + return + } + var prevPath string + const prevModule = "google.golang.org/genproto" + const prevVersion = "cb27e3aa (May 26th, 2020)" + switch path { + case "google/protobuf/field_mask.proto": + prevPath = prevModule + "/protobuf/field_mask" + case "google/protobuf/api.proto": + prevPath = prevModule + "/protobuf/api" + case "google/protobuf/type.proto": + prevPath = prevModule + "/protobuf/ptype" + case "google/protobuf/source_context.proto": + prevPath = prevModule + "/protobuf/source_context" + default: + return + } + pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") + pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb" + currPath := "google.golang.org/protobuf/types/known/" + pkgName + panic(fmt.Sprintf(""+ + "duplicate registration of %q\n"+ + "\n"+ + "The generated definition for this file has moved:\n"+ + "\tfrom: %q\n"+ + "\tto: %q\n"+ + "A dependency on the %q module must\n"+ + "be at version %v or higher.\n"+ + "\n"+ + "Upgrade the dependency by running:\n"+ + "\tgo get -u %v\n", + path, prevPath, currPath, prevModule, prevVersion, prevPath)) +} + +// FindDescriptorByName looks up a descriptor by the full name. +// +// This returns (nil, NotFound) if not found. +func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + prefix := name + suffix := nameSuffix("") + for prefix != "" { + if d, ok := r.descsByName[prefix]; ok { + switch d := d.(type) { + case protoreflect.EnumDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.EnumValueDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.MessageDescriptor: + if d.FullName() == name { + return d, nil + } + if d := findDescriptorInMessage(d, suffix); d != nil && d.FullName() == name { + return d, nil + } + case protoreflect.ExtensionDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.ServiceDescriptor: + if d.FullName() == name { + return d, nil + } + if d := d.Methods().ByName(suffix.Pop()); d != nil && d.FullName() == name { + return d, nil + } + } + return nil, NotFound + } + prefix = prefix.Parent() + suffix = nameSuffix(name[len(prefix)+len("."):]) + } + return nil, NotFound +} + +func findDescriptorInMessage(md protoreflect.MessageDescriptor, suffix nameSuffix) protoreflect.Descriptor { + name := suffix.Pop() + if suffix == "" { + if ed := md.Enums().ByName(name); ed != nil { + return ed + } + for i := md.Enums().Len() - 1; i >= 0; i-- { + if vd := md.Enums().Get(i).Values().ByName(name); vd != nil { + return vd + } + } + if xd := md.Extensions().ByName(name); xd != nil { + return xd + } + if fd := md.Fields().ByName(name); fd != nil { + return fd + } + if od := md.Oneofs().ByName(name); od != nil { + return od + } + } + if md := md.Messages().ByName(name); md != nil { + if suffix == "" { + return md + } + return findDescriptorInMessage(md, suffix) + } + return nil +} + +type nameSuffix string + +func (s *nameSuffix) Pop() (name protoreflect.Name) { + if i := strings.IndexByte(string(*s), '.'); i >= 0 { + name, *s = protoreflect.Name((*s)[:i]), (*s)[i+1:] + } else { + name, *s = protoreflect.Name((*s)), "" + } + return name +} + +// FindFileByPath looks up a file by the path. +// +// This returns (nil, NotFound) if not found. +// This returns an error if multiple files have the same path. +func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + fds := r.filesByPath[path] + switch len(fds) { + case 0: + return nil, NotFound + case 1: + return fds[0], nil + default: + return nil, errors.New("multiple files named %q", path) + } +} + +// NumFiles reports the number of registered files, +// including duplicate files with the same name. +func (r *Files) NumFiles() int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numFiles +} + +// RangeFiles iterates over all registered files while f returns true. +// If multiple files have the same name, RangeFiles iterates over all of them. +// The iteration order is undefined. +func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, files := range r.filesByPath { + for _, file := range files { + if !f(file) { + return + } + } + } +} + +// NumFilesByPackage reports the number of registered files in a proto package. +func (r *Files) NumFilesByPackage(name protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return 0 + } + return len(p.files) +} + +// RangeFilesByPackage iterates over all registered files in a given proto package +// while f returns true. The iteration order is undefined. +func (r *Files) RangeFilesByPackage(name protoreflect.FullName, f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return + } + for _, file := range p.files { + if !f(file) { + return + } + } +} + +// rangeTopLevelDescriptors iterates over all top-level descriptors in a file +// which will be directly entered into the registry. +func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflect.Descriptor)) { + eds := fd.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + vds := eds.Get(i).Values() + for i := vds.Len() - 1; i >= 0; i-- { + f(vds.Get(i)) + } + } + mds := fd.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + f(mds.Get(i)) + } + xds := fd.Extensions() + for i := xds.Len() - 1; i >= 0; i-- { + f(xds.Get(i)) + } + sds := fd.Services() + for i := sds.Len() - 1; i >= 0; i-- { + f(sds.Get(i)) + } +} + +// MessageTypeResolver is an interface for looking up messages. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The Types type implements this interface. +type MessageTypeResolver interface { + // FindMessageByName looks up a message by its full name. + // E.g., "google.protobuf.Any" + // + // This return (nil, NotFound) if not found. + FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) + + // FindMessageByURL looks up a message by a URL identifier. + // See documentation on google.protobuf.Any.type_url for the URL format. + // + // This returns (nil, NotFound) if not found. + FindMessageByURL(url string) (protoreflect.MessageType, error) +} + +// ExtensionTypeResolver is an interface for looking up extensions. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The Types type implements this interface. +type ExtensionTypeResolver interface { + // FindExtensionByName looks up a extension field by the field's full name. + // Note that this is the full name of the field as determined by + // where the extension is declared and is unrelated to the full name of the + // message being extended. + // + // This returns (nil, NotFound) if not found. + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + + // FindExtensionByNumber looks up a extension field by the field number + // within some parent message, identified by full name. + // + // This returns (nil, NotFound) if not found. + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) +} + +var ( + _ MessageTypeResolver = (*Types)(nil) + _ ExtensionTypeResolver = (*Types)(nil) +) + +// Types is a registry for looking up or iterating over descriptor types. +// The Find and Range methods are safe for concurrent use. +type Types struct { + typesByName typesByName + extensionsByMessage extensionsByMessage + + numEnums int + numMessages int + numExtensions int +} + +type ( + typesByName map[protoreflect.FullName]interface{} + extensionsByMessage map[protoreflect.FullName]extensionsByNumber + extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType +) + +// RegisterMessage registers the provided message type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterMessage(mt protoreflect.MessageType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + md := mt.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("message", md, mt); err != nil { + return err + } + r.numMessages++ + return nil +} + +// RegisterEnum registers the provided enum type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterEnum(et protoreflect.EnumType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + ed := et.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("enum", ed, et); err != nil { + return err + } + r.numEnums++ + return nil +} + +// RegisterExtension registers the provided extension type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + // + // A known case where this can happen: Fetching the TypeDescriptor for a + // legacy ExtensionDesc can consult the global registry. + xd := xt.TypeDescriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + field := xd.Number() + message := xd.ContainingMessage().FullName() + if prev := r.extensionsByMessage[message][field]; prev != nil { + err := errors.New("extension number %d is already registered on message %v", field, message) + err = amendErrorWithCaller(err, prev, xt) + if !(r == GlobalTypes && ignoreConflict(xd, err)) { + return err + } + } + + if err := r.register("extension", xd, xt); err != nil { + return err + } + if r.extensionsByMessage == nil { + r.extensionsByMessage = make(extensionsByMessage) + } + if r.extensionsByMessage[message] == nil { + r.extensionsByMessage[message] = make(extensionsByNumber) + } + r.extensionsByMessage[message][field] = xt + r.numExtensions++ + return nil +} + +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { + name := desc.FullName() + prev := r.typesByName[name] + if prev != nil { + err := errors.New("%v %v is already registered", kind, name) + err = amendErrorWithCaller(err, prev, typ) + if !(r == GlobalTypes && ignoreConflict(desc, err)) { + return err + } + } + if r.typesByName == nil { + r.typesByName = make(typesByName) + } + r.typesByName[name] = typ + return nil +} + +// FindEnumByName looks up an enum by its full name. +// E.g., "google.protobuf.Field.Kind". +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[enum]; v != nil { + if et, _ := v.(protoreflect.EnumType); et != nil { + return et, nil + } + return nil, errors.New("found wrong type: got %v, want enum", typeName(v)) + } + return nil, NotFound +} + +// FindMessageByName looks up a message by its full name, +// e.g. "google.protobuf.Any". +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound +} + +// FindMessageByURL looks up a message by a URL identifier. +// See documentation on google.protobuf.Any.type_url for the URL format. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // This function is similar to FindMessageByName but + // truncates anything before and including '/' in the URL. + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + message := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + message = message[i+len("/"):] + } + + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByName looks up a extension field by the field's full name. +// Note that this is the full name of the field as determined by +// where the extension is declared and is unrelated to the full name of the +// message being extended. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + return xt, nil + } + + // MessageSet extensions are special in that the name of the extension + // is the name of the message type used to extend the MessageSet. + // This naming scheme is used by text and JSON serialization. + // + // This feature is protected by the ProtoLegacy flag since MessageSets + // are a proto1 feature that is long deprecated. + if flags.ProtoLegacy { + if _, ok := v.(protoreflect.MessageType); ok { + field := field.Append(messageset.ExtensionName) + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + if messageset.IsMessageSetExtension(xt.TypeDescriptor()) { + return xt, nil + } + } + } + } + } + + return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByNumber looks up a extension field by the field number +// within some parent message, identified by full name. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if xt, ok := r.extensionsByMessage[message][field]; ok { + return xt, nil + } + return nil, NotFound +} + +// NumEnums reports the number of registered enums. +func (r *Types) NumEnums() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numEnums +} + +// RangeEnums iterates over all registered enums while f returns true. +// Iteration order is undefined. +func (r *Types) RangeEnums(f func(protoreflect.EnumType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if et, ok := typ.(protoreflect.EnumType); ok { + if !f(et) { + return + } + } + } +} + +// NumMessages reports the number of registered messages. +func (r *Types) NumMessages() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numMessages +} + +// RangeMessages iterates over all registered messages while f returns true. +// Iteration order is undefined. +func (r *Types) RangeMessages(f func(protoreflect.MessageType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if mt, ok := typ.(protoreflect.MessageType); ok { + if !f(mt) { + return + } + } + } +} + +// NumExtensions reports the number of registered extensions. +func (r *Types) NumExtensions() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numExtensions +} + +// RangeExtensions iterates over all registered extensions while f returns true. +// Iteration order is undefined. +func (r *Types) RangeExtensions(f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if xt, ok := typ.(protoreflect.ExtensionType); ok { + if !f(xt) { + return + } + } + } +} + +// NumExtensionsByMessage reports the number of registered extensions for +// a given message type. +func (r *Types) NumExtensionsByMessage(message protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return len(r.extensionsByMessage[message]) +} + +// RangeExtensionsByMessage iterates over all registered extensions filtered +// by a given message type while f returns true. Iteration order is undefined. +func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, xt := range r.extensionsByMessage[message] { + if !f(xt) { + return + } + } +} + +func typeName(t interface{}) string { + switch t.(type) { + case protoreflect.EnumType: + return "enum" + case protoreflect.MessageType: + return "message" + case protoreflect.ExtensionType: + return "extension" + default: + return fmt.Sprintf("%T", t) + } +} + +func amendErrorWithCaller(err error, prev, curr interface{}) error { + prevPkg := goPackage(prev) + currPkg := goPackage(curr) + if prevPkg == "" || currPkg == "" || prevPkg == currPkg { + return err + } + return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) +} + +func goPackage(v interface{}) string { + switch d := v.(type) { + case protoreflect.EnumType: + v = d.Descriptor() + case protoreflect.MessageType: + v = d.Descriptor() + case protoreflect.ExtensionType: + v = d.TypeDescriptor() + } + if d, ok := v.(protoreflect.Descriptor); ok { + v = d.ParentFile() + } + if d, ok := v.(interface{ GoPackagePath() string }); ok { + return d.GoPackagePath() + } + return "" +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go new file mode 100644 index 000000000..c58727675 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoiface + +type MessageV1 interface { + Reset() + String() string + ProtoMessage() +} + +type ExtensionRangeV1 struct { + Start, End int32 // both inclusive +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go new file mode 100644 index 000000000..32c04f67e --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoiface contains types referenced or implemented by messages. +// +// WARNING: This package should only be imported by message implementations. +// The functionality found in this package should be accessed through +// higher-level abstractions provided by the proto package. +package protoiface + +import ( + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Methods is a set of optional fast-path implementations of various operations. +type Methods = struct { + pragma.NoUnkeyedLiterals + + // Flags indicate support for optional features. + Flags SupportFlags + + // Size returns the size in bytes of the wire-format encoding of a message. + // Marshal must be provided if a custom Size is provided. + Size func(SizeInput) SizeOutput + + // Marshal formats a message in the wire-format encoding to the provided buffer. + // Size should be provided if a custom Marshal is provided. + // It must not return an error for a partial message. + Marshal func(MarshalInput) (MarshalOutput, error) + + // Unmarshal parses the wire-format encoding and merges the result into a message. + // It must not reset the target message or return an error for a partial message. + Unmarshal func(UnmarshalInput) (UnmarshalOutput, error) + + // Merge merges the contents of a source message into a destination message. + Merge func(MergeInput) MergeOutput + + // CheckInitialized returns an error if any required fields in the message are not set. + CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) +} + +// SupportFlags indicate support for optional features. +type SupportFlags = uint64 + +const ( + // SupportMarshalDeterministic reports whether MarshalOptions.Deterministic is supported. + SupportMarshalDeterministic SupportFlags = 1 << iota + + // SupportUnmarshalDiscardUnknown reports whether UnmarshalOptions.DiscardUnknown is supported. + SupportUnmarshalDiscardUnknown +) + +// SizeInput is input to the Size method. +type SizeInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Flags MarshalInputFlags +} + +// SizeOutput is output from the Size method. +type SizeOutput = struct { + pragma.NoUnkeyedLiterals + + Size int +} + +// MarshalInput is input to the Marshal method. +type MarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // output is appended to this buffer + Flags MarshalInputFlags +} + +// MarshalOutput is output from the Marshal method. +type MarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Buf []byte // contains marshaled message +} + +// MarshalInputFlags configure the marshaler. +// Most flags correspond to fields in proto.MarshalOptions. +type MarshalInputFlags = uint8 + +const ( + MarshalDeterministic MarshalInputFlags = 1 << iota + MarshalUseCachedSize +) + +// UnmarshalInput is input to the Unmarshal method. +type UnmarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // input buffer + Flags UnmarshalInputFlags + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +// UnmarshalOutput is output from the Unmarshal method. +type UnmarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Flags UnmarshalOutputFlags +} + +// UnmarshalInputFlags configure the unmarshaler. +// Most flags correspond to fields in proto.UnmarshalOptions. +type UnmarshalInputFlags = uint8 + +const ( + UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota +) + +// UnmarshalOutputFlags are output from the Unmarshal method. +type UnmarshalOutputFlags = uint8 + +const ( + // UnmarshalInitialized may be set on return if all required fields are known to be set. + // If unset, then it does not necessarily indicate that the message is uninitialized, + // only that its status could not be confirmed. + UnmarshalInitialized UnmarshalOutputFlags = 1 << iota +) + +// MergeInput is input to the Merge method. +type MergeInput = struct { + pragma.NoUnkeyedLiterals + + Source protoreflect.Message + Destination protoreflect.Message +} + +// MergeOutput is output from the Merge method. +type MergeOutput = struct { + pragma.NoUnkeyedLiterals + + Flags MergeOutputFlags +} + +// MergeOutputFlags are output from the Merge method. +type MergeOutputFlags = uint8 + +const ( + // MergeComplete reports whether the merge was performed. + // If unset, the merger must have made no changes to the destination. + MergeComplete MergeOutputFlags = 1 << iota +) + +// CheckInitializedInput is input to the CheckInitialized method. +type CheckInitializedInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message +} + +// CheckInitializedOutput is output from the CheckInitialized method. +type CheckInitializedOutput = struct { + pragma.NoUnkeyedLiterals +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go new file mode 100644 index 000000000..4a1ab7fb3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoimpl contains the default implementation for messages +// generated by protoc-gen-go. +// +// WARNING: This package should only ever be imported by generated messages. +// The compatibility agreement covers nothing except for functionality needed +// to keep existing generated messages operational. Breakages that occur due +// to unauthorized usages of this package are not the author's responsibility. +package protoimpl + +import ( + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/filetype" + "google.golang.org/protobuf/internal/impl" +) + +// UnsafeEnabled specifies whether package unsafe can be used. +const UnsafeEnabled = impl.UnsafeEnabled + +type ( + // Types used by generated code in init functions. + DescBuilder = filedesc.Builder + TypeBuilder = filetype.Builder + + // Types used by generated code to implement EnumType, MessageType, and ExtensionType. + EnumInfo = impl.EnumInfo + MessageInfo = impl.MessageInfo + ExtensionInfo = impl.ExtensionInfo + + // Types embedded in generated messages. + MessageState = impl.MessageState + SizeCache = impl.SizeCache + WeakFields = impl.WeakFields + UnknownFields = impl.UnknownFields + ExtensionFields = impl.ExtensionFields + ExtensionFieldV1 = impl.ExtensionField + + Pointer = impl.Pointer +) + +var X impl.Export diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go new file mode 100644 index 000000000..ff094e1ba --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go @@ -0,0 +1,56 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoimpl + +import ( + "google.golang.org/protobuf/internal/version" +) + +const ( + // MaxVersion is the maximum supported version for generated .pb.go files. + // It is always the current version of the module. + MaxVersion = version.Minor + + // GenVersion is the runtime version required by generated .pb.go files. + // This is incremented when generated code relies on new functionality + // in the runtime. + GenVersion = 20 + + // MinVersion is the minimum supported version for generated .pb.go files. + // This is incremented when the runtime drops support for old code. + MinVersion = 0 +) + +// EnforceVersion is used by code generated by protoc-gen-go +// to statically enforce minimum and maximum versions of this package. +// A compilation failure implies either that: +// * the runtime package is too old and needs to be updated OR +// * the generated code is too old and needs to be regenerated. +// +// The runtime package can be upgraded by running: +// go get google.golang.org/protobuf +// +// The generated code can be regenerated by running: +// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} +// +// Example usage by generated code: +// const ( +// // Verify that this generated code is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) +// // Verify that runtime/protoimpl is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion) +// ) +// +// The genVersion is the current minor version used to generated the code. +// This compile-time check relies on negative integer overflow of a uint +// being a compilation failure (guaranteed by the Go specification). +type EnforceVersion uint + +// This enforces the following invariant: +// MinVersion ≤ GenVersion ≤ MaxVersion +const ( + _ = EnforceVersion(GenVersion - MinVersion) + _ = EnforceVersion(MaxVersion - GenVersion) +) diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go new file mode 100644 index 000000000..abe4ab511 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -0,0 +1,3957 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptorpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 // Uses ZigZag encoding. + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 // Uses ZigZag encoding. +) + +// Enum value maps for FieldDescriptorProto_Type. +var ( + FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", + } + FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, + } +) + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Type) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Type.Descriptor instead. +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +// Enum value maps for FieldDescriptorProto_Label. +var ( + FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", + } + FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, + } +) + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Label) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Label.Descriptor instead. +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 // Generate complete code for parsing, serialization, + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 // Use ReflectionOps to implement these methods. + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 // Generate code using MessageLite and the lite runtime. +) + +// Enum value maps for FileOptions_OptimizeMode. +var ( + FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", + } + FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, + } +) + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() +} + +func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[2] +} + +func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FileOptions_OptimizeMode) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(num) + return nil +} + +// Deprecated: Use FileOptions_OptimizeMode.Descriptor instead. +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +// Enum value maps for FieldOptions_CType. +var ( + FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", + } + FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, + } +) + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() +} + +func (FieldOptions_CType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[3] +} + +func (x FieldOptions_CType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_CType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_CType(num) + return nil +} + +// Deprecated: Use FieldOptions_CType.Descriptor instead. +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +// Enum value maps for FieldOptions_JSType. +var ( + FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", + } + FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, + } +) + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() +} + +func (FieldOptions_JSType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[4] +} + +func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_JSType(num) + return nil +} + +// Deprecated: Use FieldOptions_JSType.Descriptor instead. +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects +) + +// Enum value maps for MethodOptions_IdempotencyLevel. +var ( + MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", + } + MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, + } +) + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +} + +func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[5] +} + +func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(num) + return nil +} + +// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead. +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` +} + +func (x *FileDescriptorSet) Reset() { + *x = FileDescriptorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorSet) ProtoMessage() {} + +func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorSet.ProtoReflect.Descriptor instead. +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if x != nil { + return x.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` +} + +func (x *FileDescriptorProto) Reset() { + *x = FileDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorProto) ProtoMessage() {} + +func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorProto.ProtoReflect.Descriptor instead. +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + +func (x *FileDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FileDescriptorProto) GetPackage() string { + if x != nil && x.Package != nil { + return *x.Package + } + return "" +} + +func (x *FileDescriptorProto) GetDependency() []string { + if x != nil { + return x.Dependency + } + return nil +} + +func (x *FileDescriptorProto) GetPublicDependency() []int32 { + if x != nil { + return x.PublicDependency + } + return nil +} + +func (x *FileDescriptorProto) GetWeakDependency() []int32 { + if x != nil { + return x.WeakDependency + } + return nil +} + +func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if x != nil { + return x.MessageType + } + return nil +} + +func (x *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if x != nil { + return x.Service + } + return nil +} + +func (x *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *FileDescriptorProto) GetOptions() *FileOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if x != nil { + return x.SourceCodeInfo + } + return nil +} + +func (x *FileDescriptorProto) GetSyntax() string { + if x != nil && x.Syntax != nil { + return *x.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *DescriptorProto) Reset() { + *x = DescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto) ProtoMessage() {} + +func (x *DescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto.ProtoReflect.Descriptor instead. +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2} +} + +func (x *DescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *DescriptorProto) GetField() []*FieldDescriptorProto { + if x != nil { + return x.Field + } + return nil +} + +func (x *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *DescriptorProto) GetNestedType() []*DescriptorProto { + if x != nil { + return x.NestedType + } + return nil +} + +func (x *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if x != nil { + return x.ExtensionRange + } + return nil +} + +func (x *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if x != nil { + return x.OneofDecl + } + return nil +} + +func (x *DescriptorProto) GetOptions() *MessageOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *DescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +type ExtensionRangeOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *ExtensionRangeOptions) Reset() { + *x = ExtensionRangeOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions) ProtoMessage() {} + +func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} +} + +func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` +} + +func (x *FieldDescriptorProto) Reset() { + *x = FieldDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldDescriptorProto) ProtoMessage() {} + +func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldDescriptorProto.ProtoReflect.Descriptor instead. +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4} +} + +func (x *FieldDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FieldDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if x != nil && x.Label != nil { + return *x.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (x *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if x != nil && x.Type != nil { + return *x.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (x *FieldDescriptorProto) GetTypeName() string { + if x != nil && x.TypeName != nil { + return *x.TypeName + } + return "" +} + +func (x *FieldDescriptorProto) GetExtendee() string { + if x != nil && x.Extendee != nil { + return *x.Extendee + } + return "" +} + +func (x *FieldDescriptorProto) GetDefaultValue() string { + if x != nil && x.DefaultValue != nil { + return *x.DefaultValue + } + return "" +} + +func (x *FieldDescriptorProto) GetOneofIndex() int32 { + if x != nil && x.OneofIndex != nil { + return *x.OneofIndex + } + return 0 +} + +func (x *FieldDescriptorProto) GetJsonName() string { + if x != nil && x.JsonName != nil { + return *x.JsonName + } + return "" +} + +func (x *FieldDescriptorProto) GetOptions() *FieldOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FieldDescriptorProto) GetProto3Optional() bool { + if x != nil && x.Proto3Optional != nil { + return *x.Proto3Optional + } + return false +} + +// Describes a oneof. +type OneofDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (x *OneofDescriptorProto) Reset() { + *x = OneofDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofDescriptorProto) ProtoMessage() {} + +func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofDescriptorProto.ProtoReflect.Descriptor instead. +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{5} +} + +func (x *OneofDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *OneofDescriptorProto) GetOptions() *OneofOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *EnumDescriptorProto) Reset() { + *x = EnumDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto) ProtoMessage() {} + +func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6} +} + +func (x *EnumDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if x != nil { + return x.Value + } + return nil +} + +func (x *EnumDescriptorProto) GetOptions() *EnumOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *EnumValueDescriptorProto) Reset() { + *x = EnumValueDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueDescriptorProto) ProtoMessage() {} + +func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{7} +} + +func (x *EnumValueDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumValueDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *ServiceDescriptorProto) Reset() { + *x = ServiceDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceDescriptorProto) ProtoMessage() {} + +func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceDescriptorProto.ProtoReflect.Descriptor instead. +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{8} +} + +func (x *ServiceDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if x != nil { + return x.Method + } + return nil +} + +func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` +} + +// Default values for MethodDescriptorProto fields. +const ( + Default_MethodDescriptorProto_ClientStreaming = bool(false) + Default_MethodDescriptorProto_ServerStreaming = bool(false) +) + +func (x *MethodDescriptorProto) Reset() { + *x = MethodDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodDescriptorProto) ProtoMessage() {} + +func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodDescriptorProto.ProtoReflect.Descriptor instead. +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{9} +} + +func (x *MethodDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *MethodDescriptorProto) GetInputType() string { + if x != nil && x.InputType != nil { + return *x.InputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOutputType() string { + if x != nil && x.OutputType != nil { + return *x.OutputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOptions() *MethodOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *MethodDescriptorProto) GetClientStreaming() bool { + if x != nil && x.ClientStreaming != nil { + return *x.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (x *MethodDescriptorProto) GetServerStreaming() bool { + if x != nil && x.ServerStreaming != nil { + return *x.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + // + // Deprecated: Do not use. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=1" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FileOptions fields. +const ( + Default_FileOptions_JavaMultipleFiles = bool(false) + Default_FileOptions_JavaStringCheckUtf8 = bool(false) + Default_FileOptions_OptimizeFor = FileOptions_SPEED + Default_FileOptions_CcGenericServices = bool(false) + Default_FileOptions_JavaGenericServices = bool(false) + Default_FileOptions_PyGenericServices = bool(false) + Default_FileOptions_PhpGenericServices = bool(false) + Default_FileOptions_Deprecated = bool(false) + Default_FileOptions_CcEnableArenas = bool(true) +) + +func (x *FileOptions) Reset() { + *x = FileOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileOptions) ProtoMessage() {} + +func (x *FileOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileOptions.ProtoReflect.Descriptor instead. +func (*FileOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} +} + +func (x *FileOptions) GetJavaPackage() string { + if x != nil && x.JavaPackage != nil { + return *x.JavaPackage + } + return "" +} + +func (x *FileOptions) GetJavaOuterClassname() string { + if x != nil && x.JavaOuterClassname != nil { + return *x.JavaOuterClassname + } + return "" +} + +func (x *FileOptions) GetJavaMultipleFiles() bool { + if x != nil && x.JavaMultipleFiles != nil { + return *x.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if x != nil && x.JavaGenerateEqualsAndHash != nil { + return *x.JavaGenerateEqualsAndHash + } + return false +} + +func (x *FileOptions) GetJavaStringCheckUtf8() bool { + if x != nil && x.JavaStringCheckUtf8 != nil { + return *x.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (x *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if x != nil && x.OptimizeFor != nil { + return *x.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (x *FileOptions) GetGoPackage() string { + if x != nil && x.GoPackage != nil { + return *x.GoPackage + } + return "" +} + +func (x *FileOptions) GetCcGenericServices() bool { + if x != nil && x.CcGenericServices != nil { + return *x.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (x *FileOptions) GetJavaGenericServices() bool { + if x != nil && x.JavaGenericServices != nil { + return *x.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (x *FileOptions) GetPyGenericServices() bool { + if x != nil && x.PyGenericServices != nil { + return *x.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (x *FileOptions) GetPhpGenericServices() bool { + if x != nil && x.PhpGenericServices != nil { + return *x.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (x *FileOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (x *FileOptions) GetCcEnableArenas() bool { + if x != nil && x.CcEnableArenas != nil { + return *x.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (x *FileOptions) GetObjcClassPrefix() string { + if x != nil && x.ObjcClassPrefix != nil { + return *x.ObjcClassPrefix + } + return "" +} + +func (x *FileOptions) GetCsharpNamespace() string { + if x != nil && x.CsharpNamespace != nil { + return *x.CsharpNamespace + } + return "" +} + +func (x *FileOptions) GetSwiftPrefix() string { + if x != nil && x.SwiftPrefix != nil { + return *x.SwiftPrefix + } + return "" +} + +func (x *FileOptions) GetPhpClassPrefix() string { + if x != nil && x.PhpClassPrefix != nil { + return *x.PhpClassPrefix + } + return "" +} + +func (x *FileOptions) GetPhpNamespace() string { + if x != nil && x.PhpNamespace != nil { + return *x.PhpNamespace + } + return "" +} + +func (x *FileOptions) GetPhpMetadataNamespace() string { + if x != nil && x.PhpMetadataNamespace != nil { + return *x.PhpMetadataNamespace + } + return "" +} + +func (x *FileOptions) GetRubyPackage() string { + if x != nil && x.RubyPackage != nil { + return *x.RubyPackage + } + return "" +} + +func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MessageOptions fields. +const ( + Default_MessageOptions_MessageSetWireFormat = bool(false) + Default_MessageOptions_NoStandardDescriptorAccessor = bool(false) + Default_MessageOptions_Deprecated = bool(false) +) + +func (x *MessageOptions) Reset() { + *x = MessageOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageOptions) ProtoMessage() {} + +func (x *MessageOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageOptions.ProtoReflect.Descriptor instead. +func (*MessageOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} +} + +func (x *MessageOptions) GetMessageSetWireFormat() bool { + if x != nil && x.MessageSetWireFormat != nil { + return *x.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (x *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if x != nil && x.NoStandardDescriptorAccessor != nil { + return *x.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (x *MessageOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (x *MessageOptions) GetMapEntry() bool { + if x != nil && x.MapEntry != nil { + return *x.MapEntry + } + return false +} + +func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FieldOptions fields. +const ( + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) +) + +func (x *FieldOptions) Reset() { + *x = FieldOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions) ProtoMessage() {} + +func (x *FieldOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions.ProtoReflect.Descriptor instead. +func (*FieldOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} +} + +func (x *FieldOptions) GetCtype() FieldOptions_CType { + if x != nil && x.Ctype != nil { + return *x.Ctype + } + return Default_FieldOptions_Ctype +} + +func (x *FieldOptions) GetPacked() bool { + if x != nil && x.Packed != nil { + return *x.Packed + } + return false +} + +func (x *FieldOptions) GetJstype() FieldOptions_JSType { + if x != nil && x.Jstype != nil { + return *x.Jstype + } + return Default_FieldOptions_Jstype +} + +func (x *FieldOptions) GetLazy() bool { + if x != nil && x.Lazy != nil { + return *x.Lazy + } + return Default_FieldOptions_Lazy +} + +func (x *FieldOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (x *FieldOptions) GetWeak() bool { + if x != nil && x.Weak != nil { + return *x.Weak + } + return Default_FieldOptions_Weak +} + +func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *OneofOptions) Reset() { + *x = OneofOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofOptions) ProtoMessage() {} + +func (x *OneofOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofOptions.ProtoReflect.Descriptor instead. +func (*OneofOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} +} + +func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumOptions fields. +const ( + Default_EnumOptions_Deprecated = bool(false) +) + +func (x *EnumOptions) Reset() { + *x = EnumOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumOptions) ProtoMessage() {} + +func (x *EnumOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumOptions.ProtoReflect.Descriptor instead. +func (*EnumOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} +} + +func (x *EnumOptions) GetAllowAlias() bool { + if x != nil && x.AllowAlias != nil { + return *x.AllowAlias + } + return false +} + +func (x *EnumOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumValueOptions fields. +const ( + Default_EnumValueOptions_Deprecated = bool(false) +) + +func (x *EnumValueOptions) Reset() { + *x = EnumValueOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueOptions) ProtoMessage() {} + +func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor instead. +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} +} + +func (x *EnumValueOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for ServiceOptions fields. +const ( + Default_ServiceOptions_Deprecated = bool(false) +) + +func (x *ServiceOptions) Reset() { + *x = ServiceOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceOptions) ProtoMessage() {} + +func (x *ServiceOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor instead. +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} +} + +func (x *ServiceOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MethodOptions fields. +const ( + Default_MethodOptions_Deprecated = bool(false) + Default_MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN +) + +func (x *MethodOptions) Reset() { + *x = MethodOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodOptions) ProtoMessage() {} + +func (x *MethodOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodOptions.ProtoReflect.Descriptor instead. +func (*MethodOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} +} + +func (x *MethodOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if x != nil && x.IdempotencyLevel != nil { + return *x.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` +} + +func (x *UninterpretedOption) Reset() { + *x = UninterpretedOption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption) ProtoMessage() {} + +func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption.ProtoReflect.Descriptor instead. +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18} +} + +func (x *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if x != nil { + return x.Name + } + return nil +} + +func (x *UninterpretedOption) GetIdentifierValue() string { + if x != nil && x.IdentifierValue != nil { + return *x.IdentifierValue + } + return "" +} + +func (x *UninterpretedOption) GetPositiveIntValue() uint64 { + if x != nil && x.PositiveIntValue != nil { + return *x.PositiveIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetNegativeIntValue() int64 { + if x != nil && x.NegativeIntValue != nil { + return *x.NegativeIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` +} + +func (x *SourceCodeInfo) Reset() { + *x = SourceCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo) ProtoMessage() {} + +func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if x != nil { + return x.Location + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` +} + +func (x *GeneratedCodeInfo) Reset() { + *x = GeneratedCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo) ProtoMessage() {} + +func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if x != nil { + return x.Annotation + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *DescriptorProto_ExtensionRange) Reset() { + *x = DescriptorProto_ExtensionRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ExtensionRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} + +func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ExtensionRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *DescriptorProto_ExtensionRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if x != nil { + return x.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. +} + +func (x *DescriptorProto_ReservedRange) Reset() { + *x = DescriptorProto_ReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ReservedRange) ProtoMessage() {} + +func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ReservedRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *DescriptorProto_ReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. +} + +func (x *EnumDescriptorProto_EnumReservedRange) Reset() { + *x = EnumDescriptorProto_EnumReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto_EnumReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} + +func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto_EnumReservedRange.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` +} + +func (x *UninterpretedOption_NamePart) Reset() { + *x = UninterpretedOption_NamePart{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption_NamePart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption_NamePart) ProtoMessage() {} + +func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption_NamePart.ProtoReflect.Descriptor instead. +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *UninterpretedOption_NamePart) GetNamePart() string { + if x != nil && x.NamePart != nil { + return *x.NamePart + } + return "" +} + +func (x *UninterpretedOption_NamePart) GetIsExtension() bool { + if x != nil && x.IsExtension != nil { + return *x.IsExtension + } + return false +} + +type SourceCodeInfo_Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` +} + +func (x *SourceCodeInfo_Location) Reset() { + *x = SourceCodeInfo_Location{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo_Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo_Location) ProtoMessage() {} + +func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *SourceCodeInfo_Location) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *SourceCodeInfo_Location) GetSpan() []int32 { + if x != nil { + return x.Span + } + return nil +} + +func (x *SourceCodeInfo_Location) GetLeadingComments() string { + if x != nil && x.LeadingComments != nil { + return *x.LeadingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetTrailingComments() string { + if x != nil && x.TrailingComments != nil { + return *x.TrailingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if x != nil { + return x.LeadingDetachedComments + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` +} + +func (x *GeneratedCodeInfo_Annotation) Reset() { + *x = GeneratedCodeInfo_Annotation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo_Annotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} + +func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if x != nil && x.SourceFile != nil { + return *x.SourceFile + } + return "" +} + +func (x *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if x != nil && x.Begin != nil { + return *x.Begin + } + return 0 +} + +func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor + +var file_google_protobuf_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, + 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, + 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, + 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, + 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, + 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, + 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, + 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, + 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, + 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, + 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, + 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, + 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, + 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, + 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, + 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, + 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, + 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, + 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, + 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, + 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, + 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, + 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, + 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, + 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, + 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, + 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, + 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, + 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, + 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, + 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, + 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, + 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, + 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, + 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, + 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, + 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, + 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, + 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, + 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, + 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, + 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, + 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, + 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, + 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, + 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, + 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, + 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, + 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, + 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, + 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, +} + +var ( + file_google_protobuf_descriptor_proto_rawDescOnce sync.Once + file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc +) + +func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { + file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + }) + return file_google_protobuf_descriptor_proto_rawDescData +} + +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ + (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType + (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel + (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 16: google.protobuf.FileOptions + (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange + (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation +} +var file_google_protobuf_descriptor_proto_depIdxs = []int32{ + 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 43, // [43:43] is the sub-list for method output_type + 43, // [43:43] is the sub-list for method input_type + 43, // [43:43] is the sub-list for extension type_name + 43, // [43:43] is the sub-list for extension extendee + 0, // [0:43] is the sub-list for field type_name +} + +func init() { file_google_protobuf_descriptor_proto_init() } +func file_google_protobuf_descriptor_proto_init() { + if File_google_protobuf_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRangeOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ExtensionRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo_Annotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + NumEnums: 6, + NumMessages: 27, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_descriptor_proto_goTypes, + DependencyIndexes: file_google_protobuf_descriptor_proto_depIdxs, + EnumInfos: file_google_protobuf_descriptor_proto_enumTypes, + MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, + }.Build() + File_google_protobuf_descriptor_proto = out.File + file_google_protobuf_descriptor_proto_rawDesc = nil + file_google_protobuf_descriptor_proto_goTypes = nil + file_google_protobuf_descriptor_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go new file mode 100644 index 000000000..8c10797b9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -0,0 +1,498 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +// Package anypb contains generated types for google/protobuf/any.proto. +// +// The Any message is a dynamic representation of any other message value. +// It is functionally a tuple of the full name of the remote message type and +// the serialized bytes of the remote message value. +// +// +// Constructing an Any +// +// An Any message containing another message value is constructed using New: +// +// any, err := anypb.New(m) +// if err != nil { +// ... // handle error +// } +// ... // make use of any +// +// +// Unmarshaling an Any +// +// With a populated Any message, the underlying message can be serialized into +// a remote concrete message value in a few ways. +// +// If the exact concrete type is known, then a new (or pre-existing) instance +// of that message can be passed to the UnmarshalTo method: +// +// m := new(foopb.MyMessage) +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// +// If the exact concrete type is not known, then the UnmarshalNew method can be +// used to unmarshal the contents into a new instance of the remote message type: +// +// m, err := any.UnmarshalNew() +// if err != nil { +// ... // handle error +// } +// ... // make use of m +// +// UnmarshalNew uses the global type registry to resolve the message type and +// construct a new instance of that message to unmarshal into. In order for a +// message type to appear in the global registry, the Go type representing that +// protobuf message type must be linked into the Go binary. For messages +// generated by protoc-gen-go, this is achieved through an import of the +// generated Go package representing a .proto file. +// +// A common pattern with UnmarshalNew is to use a type switch with the resulting +// proto.Message value: +// +// switch m := m.(type) { +// case *foopb.MyMessage: +// ... // make use of m as a *foopb.MyMessage +// case *barpb.OtherMessage: +// ... // make use of m as a *barpb.OtherMessage +// case *bazpb.SomeMessage: +// ... // make use of m as a *bazpb.SomeMessage +// } +// +// This pattern ensures that the generated packages containing the message types +// listed in the case clauses are linked into the Go binary and therefore also +// registered in the global registry. +// +// +// Type checking an Any +// +// In order to type check whether an Any message represents some other message, +// then use the MessageIs method: +// +// if any.MessageIs((*foopb.MyMessage)(nil)) { +// ... // make use of any, knowing that it contains a foopb.MyMessage +// } +// +// The MessageIs method can also be used with an allocated instance of the target +// message type if the intention is to unmarshal into it if the type matches: +// +// m := new(foopb.MyMessage) +// if any.MessageIs(m) { +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// } +// +package anypb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoregistry "google.golang.org/protobuf/reflect/protoregistry" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + strings "strings" + sync "sync" +) + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +// New marshals src into a new Any instance. +func New(src proto.Message) (*Any, error) { + dst := new(Any) + if err := dst.MarshalFrom(src); err != nil { + return nil, err + } + return dst, nil +} + +// MarshalFrom marshals src into dst as the underlying message +// using the provided marshal options. +// +// If no options are specified, call dst.MarshalFrom instead. +func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error { + const urlPrefix = "type.googleapis.com/" + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + b, err := opts.Marshal(src) + if err != nil { + return err + } + dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName()) + dst.Value = b + return nil +} + +// UnmarshalTo unmarshals the underlying message from src into dst +// using the provided unmarshal options. +// It reports an error if dst is not of the right message type. +// +// If no options are specified, call src.UnmarshalTo instead. +func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error { + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + if !src.MessageIs(dst) { + got := dst.ProtoReflect().Descriptor().FullName() + want := src.MessageName() + return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want) + } + return opts.Unmarshal(src.GetValue(), dst) +} + +// UnmarshalNew unmarshals the underlying message from src into dst, +// which is newly created message using a type resolved from the type URL. +// The message type is resolved according to opt.Resolver, +// which should implement protoregistry.MessageTypeResolver. +// It reports an error if the underlying message type could not be resolved. +// +// If no options are specified, call src.UnmarshalNew instead. +func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) { + if src.GetTypeUrl() == "" { + return nil, protoimpl.X.NewError("invalid empty type URL") + } + if opts.Resolver == nil { + opts.Resolver = protoregistry.GlobalTypes + } + r, ok := opts.Resolver.(protoregistry.MessageTypeResolver) + if !ok { + return nil, protoregistry.NotFound + } + mt, err := r.FindMessageByURL(src.GetTypeUrl()) + if err != nil { + if err == protoregistry.NotFound { + return nil, err + } + return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err) + } + dst = mt.New().Interface() + return dst, opts.Unmarshal(src.GetValue(), dst) +} + +// MessageIs reports whether the underlying message is of the same type as m. +func (x *Any) MessageIs(m proto.Message) bool { + if m == nil { + return false + } + url := x.GetTypeUrl() + name := string(m.ProtoReflect().Descriptor().FullName()) + if !strings.HasSuffix(url, name) { + return false + } + return len(url) == len(name) || url[len(url)-len(name)-1] == '/' +} + +// MessageName reports the full name of the underlying message, +// returning an empty string if invalid. +func (x *Any) MessageName() protoreflect.FullName { + url := x.GetTypeUrl() + name := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "" + } + return name +} + +// MarshalFrom marshals m into x as the underlying message. +func (x *Any) MarshalFrom(m proto.Message) error { + return MarshalFrom(x, m, proto.MarshalOptions{}) +} + +// UnmarshalTo unmarshals the contents of the underlying message of x into m. +// It resets m before performing the unmarshal operation. +// It reports an error if m is not of the right message type. +func (x *Any) UnmarshalTo(m proto.Message) error { + return UnmarshalTo(x, m, proto.UnmarshalOptions{}) +} + +// UnmarshalNew unmarshals the contents of the underlying message of x into +// a newly allocated message of the specified type. +// It reports an error if the underlying message type could not be resolved. +func (x *Any) UnmarshalNew() (proto.Message, error) { + return UnmarshalNew(x, proto.UnmarshalOptions{}) +} + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_any_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_google_protobuf_any_proto_rawDescGZIP(), []int{0} +} + +func (x *Any) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *Any) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_any_proto protoreflect.FileDescriptor + +var file_google_protobuf_any_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, + 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, + 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, + 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_any_proto_rawDescOnce sync.Once + file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc +) + +func file_google_protobuf_any_proto_rawDescGZIP() []byte { + file_google_protobuf_any_proto_rawDescOnce.Do(func() { + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + }) + return file_google_protobuf_any_proto_rawDescData +} + +var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_any_proto_goTypes = []interface{}{ + (*Any)(nil), // 0: google.protobuf.Any +} +var file_google_protobuf_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_any_proto_init() } +func file_google_protobuf_any_proto_init() { + if File_google_protobuf_any_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_any_proto_goTypes, + DependencyIndexes: file_google_protobuf_any_proto_depIdxs, + MessageInfos: file_google_protobuf_any_proto_msgTypes, + }.Build() + File_google_protobuf_any_proto = out.File + file_google_protobuf_any_proto_rawDesc = nil + file_google_protobuf_any_proto_goTypes = nil + file_google_protobuf_any_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go new file mode 100644 index 000000000..a583ca2f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -0,0 +1,379 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// +// Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// +// Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +// +package durationpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + time "time" +) + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + +func (x *Duration) Reset() { + *x = Duration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Duration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Duration) ProtoMessage() {} + +func (x *Duration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Duration.ProtoReflect.Descriptor instead. +func (*Duration) Descriptor() ([]byte, []int) { + return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} +} + +func (x *Duration) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Duration) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_duration_proto protoreflect.FileDescriptor + +var file_google_protobuf_duration_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_duration_proto_rawDescOnce sync.Once + file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc +) + +func file_google_protobuf_duration_proto_rawDescGZIP() []byte { + file_google_protobuf_duration_proto_rawDescOnce.Do(func() { + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + }) + return file_google_protobuf_duration_proto_rawDescData +} + +var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_duration_proto_goTypes = []interface{}{ + (*Duration)(nil), // 0: google.protobuf.Duration +} +var file_google_protobuf_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_duration_proto_init() } +func file_google_protobuf_duration_proto_init() { + if File_google_protobuf_duration_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Duration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_duration_proto_goTypes, + DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, + MessageInfos: file_google_protobuf_duration_proto_msgTypes, + }.Build() + File_google_protobuf_duration_proto = out.File + file_google_protobuf_duration_proto_rawDesc = nil + file_google_protobuf_duration_proto_goTypes = nil + file_google_protobuf_duration_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go new file mode 100644 index 000000000..c9ae92132 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -0,0 +1,390 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +// Package timestamppb contains generated types for google/protobuf/timestamp.proto. +// +// The Timestamp message represents a timestamp, +// an instant in time since the Unix epoch (January 1st, 1970). +// +// +// Conversion to a Go Time +// +// The AsTime method can be used to convert a Timestamp message to a +// standard Go time.Time value in UTC: +// +// t := ts.AsTime() +// ... // make use of t as a time.Time +// +// Converting to a time.Time is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsTime method performs the conversion on a best-effort basis. Timestamps +// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) +// are normalized during the conversion to a time.Time. To manually check for +// invalid Timestamps per the documented limitations in timestamp.proto, +// additionally call the CheckValid method: +// +// if err := ts.CheckValid(); err != nil { +// ... // handle error +// } +// +// +// Conversion from a Go Time +// +// The timestamppb.New function can be used to construct a Timestamp message +// from a standard Go time.Time value: +// +// ts := timestamppb.New(t) +// ... // make use of ts as a *timestamppb.Timestamp +// +// In order to construct a Timestamp representing the current time, use Now: +// +// ts := timestamppb.Now() +// ... // make use of ts as a *timestamppb.Timestamp +// +package timestamppb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + time "time" +) + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// +// Example 6: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// Now constructs a new Timestamp from the current time. +func Now() *Timestamp { + return New(time.Now()) +} + +// New constructs a new Timestamp from the provided time.Time. +func New(t time.Time) *Timestamp { + return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} +} + +// AsTime converts x to a time.Time. +func (x *Timestamp) AsTime() time.Time { + return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() +} + +// IsValid reports whether the timestamp is valid. +// It is equivalent to CheckValid == nil. +func (x *Timestamp) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the timestamp is invalid. +// In particular, it checks whether the value represents a date that is +// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. +// An error is reported for a nil Timestamp. +func (x *Timestamp) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Timestamp") + case invalidUnderflow: + return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) + case invalidOverflow: + return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) + case invalidNanos: + return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanos +) + +func (x *Timestamp) check() uint { + const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive + const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < minTimestamp: + return invalidUnderflow + case secs > maxTimestamp: + return invalidOverflow + case nanos < 0 || nanos >= 1e9: + return invalidNanos + default: + return 0 + } +} + +func (x *Timestamp) Reset() { + *x = Timestamp{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Timestamp) ProtoMessage() {} + +func (x *Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. +func (*Timestamp) Descriptor() ([]byte, []int) { + return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0} +} + +func (x *Timestamp) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Timestamp) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor + +var file_google_protobuf_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, + 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, + 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, + 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_timestamp_proto_rawDescOnce sync.Once + file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc +) + +func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { + file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + }) + return file_google_protobuf_timestamp_proto_rawDescData +} + +var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ + (*Timestamp)(nil), // 0: google.protobuf.Timestamp +} +var file_google_protobuf_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_timestamp_proto_init() } +func file_google_protobuf_timestamp_proto_init() { + if File_google_protobuf_timestamp_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Timestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_timestamp_proto_goTypes, + DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs, + MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, + }.Build() + File_google_protobuf_timestamp_proto = out.File + file_google_protobuf_timestamp_proto_rawDesc = nil + file_google_protobuf_timestamp_proto_goTypes = nil + file_google_protobuf_timestamp_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go new file mode 100644 index 000000000..e511ad6f7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go @@ -0,0 +1,653 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +package pluginpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +// Sync with code_generator.h. +type CodeGeneratorResponse_Feature int32 + +const ( + CodeGeneratorResponse_FEATURE_NONE CodeGeneratorResponse_Feature = 0 + CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL CodeGeneratorResponse_Feature = 1 +) + +// Enum value maps for CodeGeneratorResponse_Feature. +var ( + CodeGeneratorResponse_Feature_name = map[int32]string{ + 0: "FEATURE_NONE", + 1: "FEATURE_PROTO3_OPTIONAL", + } + CodeGeneratorResponse_Feature_value = map[string]int32{ + "FEATURE_NONE": 0, + "FEATURE_PROTO3_OPTIONAL": 1, + } +) + +func (x CodeGeneratorResponse_Feature) Enum() *CodeGeneratorResponse_Feature { + p := new(CodeGeneratorResponse_Feature) + *p = x + return p +} + +func (x CodeGeneratorResponse_Feature) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CodeGeneratorResponse_Feature) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_compiler_plugin_proto_enumTypes[0].Descriptor() +} + +func (CodeGeneratorResponse_Feature) Type() protoreflect.EnumType { + return &file_google_protobuf_compiler_plugin_proto_enumTypes[0] +} + +func (x CodeGeneratorResponse_Feature) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *CodeGeneratorResponse_Feature) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = CodeGeneratorResponse_Feature(num) + return nil +} + +// Deprecated: Use CodeGeneratorResponse_Feature.Descriptor instead. +func (CodeGeneratorResponse_Feature) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +// The version number of protocol compiler. +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{0} +} + +func (x *Version) GetMajor() int32 { + if x != nil && x.Major != nil { + return *x.Major + } + return 0 +} + +func (x *Version) GetMinor() int32 { + if x != nil && x.Minor != nil { + return *x.Minor + } + return 0 +} + +func (x *Version) GetPatch() int32 { + if x != nil && x.Patch != nil { + return *x.Patch + } + return 0 +} + +func (x *Version) GetSuffix() string { + if x != nil && x.Suffix != nil { + return *x.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*descriptorpb.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (x *CodeGeneratorRequest) Reset() { + *x = CodeGeneratorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (x *CodeGeneratorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorRequest.ProtoReflect.Descriptor instead. +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{1} +} + +func (x *CodeGeneratorRequest) GetFileToGenerate() []string { + if x != nil { + return x.FileToGenerate + } + return nil +} + +func (x *CodeGeneratorRequest) GetParameter() string { + if x != nil && x.Parameter != nil { + return *x.Parameter + } + return "" +} + +func (x *CodeGeneratorRequest) GetProtoFile() []*descriptorpb.FileDescriptorProto { + if x != nil { + return x.ProtoFile + } + return nil +} + +func (x *CodeGeneratorRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + // A bitmask of supported features that the code generator supports. + // This is a bitwise "or" of values from the Feature enum. + SupportedFeatures *uint64 `protobuf:"varint,2,opt,name=supported_features,json=supportedFeatures" json:"supported_features,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` +} + +func (x *CodeGeneratorResponse) Reset() { + *x = CodeGeneratorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (x *CodeGeneratorResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2} +} + +func (x *CodeGeneratorResponse) GetError() string { + if x != nil && x.Error != nil { + return *x.Error + } + return "" +} + +func (x *CodeGeneratorResponse) GetSupportedFeatures() uint64 { + if x != nil && x.SupportedFeatures != nil { + return *x.SupportedFeatures + } + return 0 +} + +func (x *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if x != nil { + return x.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + // Information describing the file content being inserted. If an insertion + // point is used, this information will be appropriately offset and inserted + // into the code generation metadata for the generated files. + GeneratedCodeInfo *descriptorpb.GeneratedCodeInfo `protobuf:"bytes,16,opt,name=generated_code_info,json=generatedCodeInfo" json:"generated_code_info,omitempty"` +} + +func (x *CodeGeneratorResponse_File) Reset() { + *x = CodeGeneratorResponse_File{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse_File) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (x *CodeGeneratorResponse_File) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse_File.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *CodeGeneratorResponse_File) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetInsertionPoint() string { + if x != nil && x.InsertionPoint != nil { + return *x.InsertionPoint + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetContent() string { + if x != nil && x.Content != nil { + return *x.Content + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetGeneratedCodeInfo() *descriptorpb.GeneratedCodeInfo { + if x != nil { + return x.GeneratedCodeInfo + } + return nil +} + +var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor + +var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, + 0x72, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xf1, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x64, + 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, + 0x65, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x4c, + 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, + 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x03, 0x0a, + 0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, + 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x04, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, + 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0xb1, 0x01, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73, + 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x52, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, + 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, + 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, + 0x4c, 0x10, 0x01, 0x42, 0x57, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62, +} + +var ( + file_google_protobuf_compiler_plugin_proto_rawDescOnce sync.Once + file_google_protobuf_compiler_plugin_proto_rawDescData = file_google_protobuf_compiler_plugin_proto_rawDesc +) + +func file_google_protobuf_compiler_plugin_proto_rawDescGZIP() []byte { + file_google_protobuf_compiler_plugin_proto_rawDescOnce.Do(func() { + file_google_protobuf_compiler_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_compiler_plugin_proto_rawDescData) + }) + return file_google_protobuf_compiler_plugin_proto_rawDescData +} + +var file_google_protobuf_compiler_plugin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_compiler_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{ + (CodeGeneratorResponse_Feature)(0), // 0: google.protobuf.compiler.CodeGeneratorResponse.Feature + (*Version)(nil), // 1: google.protobuf.compiler.Version + (*CodeGeneratorRequest)(nil), // 2: google.protobuf.compiler.CodeGeneratorRequest + (*CodeGeneratorResponse)(nil), // 3: google.protobuf.compiler.CodeGeneratorResponse + (*CodeGeneratorResponse_File)(nil), // 4: google.protobuf.compiler.CodeGeneratorResponse.File + (*descriptorpb.FileDescriptorProto)(nil), // 5: google.protobuf.FileDescriptorProto + (*descriptorpb.GeneratedCodeInfo)(nil), // 6: google.protobuf.GeneratedCodeInfo +} +var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{ + 5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto + 1, // 1: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version + 4, // 2: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File + 6, // 3: google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info:type_name -> google.protobuf.GeneratedCodeInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_protobuf_compiler_plugin_proto_init() } +func file_google_protobuf_compiler_plugin_proto_init() { + if File_google_protobuf_compiler_plugin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse_File); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_compiler_plugin_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_compiler_plugin_proto_goTypes, + DependencyIndexes: file_google_protobuf_compiler_plugin_proto_depIdxs, + EnumInfos: file_google_protobuf_compiler_plugin_proto_enumTypes, + MessageInfos: file_google_protobuf_compiler_plugin_proto_msgTypes, + }.Build() + File_google_protobuf_compiler_plugin_proto = out.File + file_google_protobuf_compiler_plugin_proto_rawDesc = nil + file_google_protobuf_compiler_plugin_proto_goTypes = nil + file_google_protobuf_compiler_plugin_proto_depIdxs = nil +} diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/gopkg.in/ini.v1/file.go index f95606f90..b96d172cf 100644 --- a/vendor/gopkg.in/ini.v1/file.go +++ b/vendor/gopkg.in/ini.v1/file.go @@ -55,6 +55,9 @@ func newFile(dataSources []dataSource, opts LoadOptions) *File { if len(opts.KeyValueDelimiterOnWrite) == 0 { opts.KeyValueDelimiterOnWrite = "=" } + if len(opts.ChildSectionDelimiter) == 0 { + opts.ChildSectionDelimiter = "." + } return &File{ BlockMode: true, @@ -82,7 +85,7 @@ func (f *File) NewSection(name string) (*Section, error) { return nil, errors.New("empty section name") } - if f.options.Insensitive && name != DefaultSection { + if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { name = strings.ToLower(name) } @@ -144,7 +147,7 @@ func (f *File) SectionsByName(name string) ([]*Section, error) { if len(name) == 0 { name = DefaultSection } - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(name) } @@ -236,7 +239,7 @@ func (f *File) DeleteSectionWithIndex(name string, index int) error { if len(name) == 0 { name = DefaultSection } - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(name) } @@ -299,6 +302,9 @@ func (f *File) Reload() (err error) { } return err } + if f.options.ShortCircuit { + return nil + } } return nil } @@ -347,7 +353,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { } } - if i > 0 || DefaultHeader { + if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { return nil, err } @@ -451,6 +457,8 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { val = `"""` + val + `"""` } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { val = "`" + val + "`" + } else if len(strings.TrimSpace(val)) != len(val) { + val = `"` + val + `"` } if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { return nil, err @@ -494,7 +502,7 @@ func (f *File) WriteTo(w io.Writer) (int64, error) { // SaveToIndent writes content to file system with given value indention. func (f *File) SaveToIndent(filename, indent string) error { // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. + // so it's safer to save to a temporary file location and rename after done. buf, err := f.writeToBuffer(indent) if err != nil { return err diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go index 2961543f9..23f07422e 100644 --- a/vendor/gopkg.in/ini.v1/ini.go +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -71,12 +71,18 @@ type LoadOptions struct { Loose bool // Insensitive indicates whether the parser forces all section and key names to lowercase. Insensitive bool + // InsensitiveSections indicates whether the parser forces all section to lowercase. + InsensitiveSections bool + // InsensitiveKeys indicates whether the parser forces all key names to lowercase. + InsensitiveKeys bool // IgnoreContinuation indicates whether to ignore continuation lines while parsing. IgnoreContinuation bool // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. IgnoreInlineComment bool // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. SkipUnrecognizableLines bool + // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. + ShortCircuit bool // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. // This type of keys are mostly used in my.cnf. AllowBooleanKeys bool @@ -107,8 +113,10 @@ type LoadOptions struct { UnparseableSections []string // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". KeyValueDelimiters string - // KeyValueDelimiters is the delimiter that are used to separate key and value output. By default, it is "=". + // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". KeyValueDelimiterOnWrite string + // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". + ChildSectionDelimiter string // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). PreserveSurroundedQuote bool // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go index ea6c08b02..65147166f 100644 --- a/vendor/gopkg.in/ini.v1/parser.go +++ b/vendor/gopkg.in/ini.v1/parser.go @@ -377,7 +377,7 @@ func (f *File) parse(reader io.Reader) (err error) { // Ignore error because default section name is never empty string. name := DefaultSection - if f.options.Insensitive { + if f.options.Insensitive || f.options.InsensitiveSections { name = strings.ToLower(DefaultSection) } section, _ := f.NewSection(name) @@ -469,7 +469,7 @@ func (f *File) parse(reader io.Reader) (err error) { inUnparseableSection = false for i := range f.options.UnparseableSections { if f.options.UnparseableSections[i] == name || - (f.options.Insensitive && strings.EqualFold(f.options.UnparseableSections[i], name)) { + ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { inUnparseableSection = true continue } diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go index 6ba5ac290..afaa97c97 100644 --- a/vendor/gopkg.in/ini.v1/section.go +++ b/vendor/gopkg.in/ini.v1/section.go @@ -66,7 +66,7 @@ func (s *Section) SetBody(body string) { func (s *Section) NewKey(name, val string) (*Key, error) { if len(name) == 0 { return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive { + } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { name = strings.ToLower(name) } @@ -109,7 +109,7 @@ func (s *Section) GetKey(name string) (*Key, error) { if s.f.BlockMode { s.f.lock.RLock() } - if s.f.options.Insensitive { + if s.f.options.Insensitive || s.f.options.InsensitiveKeys { name = strings.ToLower(name) } key := s.keys[name] @@ -121,7 +121,7 @@ func (s *Section) GetKey(name string) (*Key, error) { // Check if it is a child-section. sname := s.name for { - if i := strings.LastIndex(sname, "."); i > -1 { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { sname = sname[:i] sec, err := s.f.GetSection(sname) if err != nil { @@ -188,7 +188,7 @@ func (s *Section) ParentKeys() []*Key { var parentKeys []*Key sname := s.name for { - if i := strings.LastIndex(sname, "."); i > -1 { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { sname = sname[:i] sec, err := s.f.GetSection(sname) if err != nil { @@ -245,7 +245,7 @@ func (s *Section) DeleteKey(name string) { // For example, "[parent.child1]" and "[parent.child12]" are child sections // of section "[parent]". func (s *Section) ChildSections() []*Section { - prefix := s.name + "." + prefix := s.name + s.f.options.ChildSectionDelimiter children := make([]*Section, 0, 3) for _, name := range s.f.sectionList { if strings.HasPrefix(name, prefix) { diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go index 9be40a920..a486b2fe0 100644 --- a/vendor/gopkg.in/ini.v1/struct.go +++ b/vendor/gopkg.in/ini.v1/struct.go @@ -263,24 +263,21 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri return nil } -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool) { - opts := strings.SplitN(tag, ",", 4) +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { + opts := strings.SplitN(tag, ",", 5) rawName = opts[0] - if len(opts) > 1 { - omitEmpty = opts[1] == "omitempty" + for _, opt := range opts[1:] { + omitEmpty = omitEmpty || (opt == "omitempty") + allowShadow = allowShadow || (opt == "allowshadow") + allowNonUnique = allowNonUnique || (opt == "nonunique") + extends = extends || (opt == "extends") } - if len(opts) > 2 { - allowShadow = opts[2] == "allowshadow" - } - if len(opts) > 3 { - allowNonUnique = opts[3] == "nonunique" - } - return rawName, omitEmpty, allowShadow, allowNonUnique + return rawName, omitEmpty, allowShadow, allowNonUnique, extends } // mapToField maps the given value to the matching field of the given section. // The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. -func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) error { +func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { if val.Kind() == reflect.Ptr { val = val.Elem() } @@ -295,7 +292,7 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) continue } - rawName, _, allowShadow, allowNonUnique := parseTagOptions(tag) + rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) fieldName := s.parseFieldName(tpField.Name, rawName) if len(fieldName) == 0 || !field.CanSet() { continue @@ -303,12 +300,26 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) isStruct := tpField.Type.Kind() == reflect.Struct isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct - isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - if isAnonymous { + isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + if isAnonymousPtr { field.Set(reflect.New(tpField.Type.Elem())) } - if isAnonymous || isStruct || isStructPtr { + if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + fieldSection := s + if rawName != "" { + sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName + if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { + fieldSection = secs[sectionIndex] + } + } + if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + } else if isAnonymousPtr || isStruct || isStructPtr { if secs, err := s.f.SectionsByName(fieldName); err == nil { if len(secs) <= sectionIndex { return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) @@ -318,7 +329,7 @@ func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) if isStructPtr && field.IsNil() { field.Set(reflect.New(tpField.Type.Elem())) } - if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex); err != nil { + if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { return fmt.Errorf("map to field %q: %v", fieldName, err) } continue @@ -357,7 +368,7 @@ func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) ( typ := val.Type().Elem() for i, sec := range secs { elem := reflect.New(typ) - if err = sec.mapToField(elem, isStrict, i); err != nil { + if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) } @@ -387,7 +398,7 @@ func (s *Section) mapTo(v interface{}, isStrict bool) error { return nil } - return s.mapToField(val, isStrict, 0) + return s.mapToField(val, isStrict, 0, s.name) } // MapTo maps section to given struct. @@ -479,7 +490,7 @@ func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, all _ = keyWithShadows.AddShadow(val) } } - key = keyWithShadows + *key = *keyWithShadows return nil } @@ -581,7 +592,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } - rawName, omitEmpty, allowShadow, allowNonUnique := parseTagOptions(tag) + rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) if omitEmpty && isEmptyValue(field) { continue } @@ -595,7 +606,14 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } - if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { + if err := s.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { // Note: The only error here is section doesn't exist. sec, err := s.f.GetSection(fieldName) diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml index 055480b9e..7348c50c0 100644 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -11,6 +11,7 @@ go: - "1.11.x" - "1.12.x" - "1.13.x" + - "1.14.x" - "tip" go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index 1f7e87e67..acf71402c 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -79,6 +79,8 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { parser.encoding = encoding } +var disableLineWrapping = false + // Create a new emitter object. func yaml_emitter_initialize(emitter *yaml_emitter_t) { *emitter = yaml_emitter_t{ @@ -87,6 +89,9 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), } + if disableLineWrapping { + emitter.best_width = -1 + } } // Destroy an emitter object. diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod index 1934e8769..2cbb85aea 100644 --- a/vendor/gopkg.in/yaml.v2/go.mod +++ b/vendor/gopkg.in/yaml.v2/go.mod @@ -1,5 +1,5 @@ -module "gopkg.in/yaml.v2" +module gopkg.in/yaml.v2 -require ( - "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 -) +go 1.15 + +require gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index 89650e293..30813884c 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -175,7 +175,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // Zero valued structs will be omitted if all their public // fields are zero, unless they implement an IsZero // method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. +// case the field will be excluded if IsZero returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -464,3 +464,15 @@ func isZero(v reflect.Value) bool { } return false } + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml deleted file mode 100644 index 04d4dae09..000000000 --- a/vendor/gopkg.in/yaml.v3/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go - -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - "tip" - -go_import_path: gopkg.in/yaml.v3 diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go index 65846e674..ae7d049f1 100644 --- a/vendor/gopkg.in/yaml.v3/apic.go +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -108,6 +108,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, } } diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go index be63169b7..df36e3a30 100644 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -35,6 +35,7 @@ type parser struct { doc *Node anchors map[string]*Node doneInit bool + textless bool } func newParser(b []byte) *parser { @@ -108,14 +109,18 @@ func (p *parser) peek() yaml_event_type_t { func (p *parser) fail() { var where string var line int - if p.parser.problem_mark.line != 0 { + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line // Scanner errors don't iterate line before returning error if p.parser.error == yaml_SCANNER_ERROR { line++ } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line } if line != 0 { where = "line " + strconv.Itoa(line) + ": " @@ -169,17 +174,20 @@ func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { } else if kind == ScalarNode { tag, _ = resolve("", value) } - return &Node{ - Kind: kind, - Tag: tag, - Value: value, - Style: style, - Line: p.event.start_mark.line + 1, - Column: p.event.start_mark.column + 1, - HeadComment: string(p.event.head_comment), - LineComment: string(p.event.line_comment), - FootComment: string(p.event.foot_comment), + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) } + return n } func (p *parser) parseChild(parent *Node) *Node { @@ -497,8 +505,13 @@ func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { good = d.mapping(n, out) case SequenceNode: good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough default: - panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind))) + failf("cannot decode node with unknown kind %d", n.Kind) } return good } @@ -533,6 +546,17 @@ func resetMap(out reflect.Value) { } } +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + func (d *decoder) scalar(n *Node, out reflect.Value) bool { var tag string var resolved interface{} @@ -550,14 +574,7 @@ func (d *decoder) scalar(n *Node, out reflect.Value) bool { } } if resolved == nil { - if out.CanAddr() { - switch out.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - out.Set(reflect.Zero(out.Type())) - return true - } - } - return false + return d.null(out) } if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { // We've resolved to exactly the type we want, so use that. @@ -791,8 +808,10 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) + mapIsNew = true } for i := 0; i < l; i += 2 { if isMerge(n.Content[i]) { @@ -809,7 +828,7 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { failf("invalid map key: %#v", k.Interface()) } e := reflect.New(et).Elem() - if d.unmarshal(n.Content[i+1], e) { + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { out.SetMapIndex(k, e) } } diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go index ab2a06619..0f47c9ca8 100644 --- a/vendor/gopkg.in/yaml.v3/emitterc.go +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -235,10 +235,13 @@ func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool emitter.indent = 0 } } else if !indentless { - emitter.indent += emitter.best_indent - // [Go] If inside a block sequence item, discount the space taken by the indicator. - if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { - emitter.indent -= 2 + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) } } return true @@ -725,16 +728,9 @@ func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_e // Expect a block item node. func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { - // [Go] The original logic here would not indent the sequence when inside a mapping. - // In Go we always indent it, but take the sequence indicator out of the indentation. - indentless := emitter.best_indent == 2 && emitter.mapping_context && (emitter.column == 0 || !emitter.indention) - original := emitter.indent - if !yaml_emitter_increase_indent(emitter, false, indentless) { + if !yaml_emitter_increase_indent(emitter, false, false) { return false } - if emitter.indent > original+2 { - emitter.indent -= 2 - } } if event.typ == yaml_SEQUENCE_END_EVENT { emitter.indent = emitter.indents[len(emitter.indents)-1] @@ -785,6 +781,13 @@ func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_ev if !yaml_emitter_write_indent(emitter) { return false } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } if yaml_emitter_check_simple_key(emitter) { emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, true) @@ -810,6 +813,27 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return false } } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { return false @@ -823,6 +847,10 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return true } +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + // Expect a node. func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, root bool, sequence bool, mapping bool, simple_key bool) bool { @@ -1866,7 +1894,7 @@ func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - if !put_break(emitter) { + if !yaml_emitter_process_line_comment(emitter) { return false } //emitter.indention = true @@ -1903,10 +1931,10 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - - if !put_break(emitter) { + if !yaml_emitter_process_line_comment(emitter) { return false } + //emitter.indention = true emitter.whitespace = true diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go index 1f37271ce..de9e72a3e 100644 --- a/vendor/gopkg.in/yaml.v3/encode.go +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -119,6 +119,14 @@ func (e *encoder) marshal(tag string, in reflect.Value) { case *Node: e.nodev(in) return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return case time.Time: e.timev(tag, in) return @@ -422,18 +430,23 @@ func (e *encoder) nodev(in reflect.Value) { } func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + // If the tag was not explicitly requested, and dropping it won't change the // implicit tag of the value, don't include it in the presentation. var tag = node.Tag var stag = shortTag(tag) - var rtag string var forceQuoting bool if tag != "" && node.Style&TaggedStyle == 0 { if node.Kind == ScalarNode { if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { tag = "" } else { - rtag, _ = resolve("", node.Value) + rtag, _ := resolve("", node.Value) if rtag == stag { tag = "" } else if stag == strTag { @@ -442,6 +455,7 @@ func (e *encoder) node(node *Node, tail string) { } } } else { + var rtag string switch node.Kind { case MappingNode: rtag = mapTag @@ -471,7 +485,7 @@ func (e *encoder) node(node *Node, tail string) { if node.Style&FlowStyle != 0 { style = yaml_FLOW_SEQUENCE_STYLE } - e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)) + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) e.event.head_comment = []byte(node.HeadComment) e.emit() for _, node := range node.Content { @@ -487,7 +501,7 @@ func (e *encoder) node(node *Node, tail string) { if node.Style&FlowStyle != 0 { style = yaml_FLOW_MAPPING_STYLE } - yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style) + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) e.event.tail_comment = []byte(tail) e.event.head_comment = []byte(node.HeadComment) e.emit() @@ -528,11 +542,11 @@ func (e *encoder) node(node *Node, tail string) { case ScalarNode: value := node.Value if !utf8.ValidString(value) { - if tag == binaryTag { + if stag == binaryTag { failf("explicitly tagged !!binary data must be base64-encoded") } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) } // It can't be encoded directly as YAML so use a binary tag // and encode it as base64. @@ -557,5 +571,7 @@ func (e *encoder) node(node *Node, tail string) { } e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) } } diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index aea9050b8..ac66fccc0 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -648,6 +648,10 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } return true } if len(anchor) > 0 || len(tag) > 0 { @@ -694,25 +698,13 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark - prior_head := len(parser.head_comment) + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false } - if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - // [Go] It's a sequence under a sequence entry, so the former head comment - // is for the list itself, not the first list item under it. - parser.stem_comment = parser.head_comment[:prior_head] - if len(parser.head_comment) == prior_head { - parser.head_comment = nil - } else { - // Copy suffix to prevent very strange bugs if someone ever appends - // further bytes to the prefix in the stem_comment slice above. - parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...) - } - - } if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) return yaml_parser_parse_node(parser, event, true, false) @@ -754,7 +746,9 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false @@ -780,6 +774,32 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y return true } +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // ******************* diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go index 57e954ca5..ca0070108 100644 --- a/vendor/gopkg.in/yaml.v3/scannerc.go +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -749,6 +749,11 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { if !ok { return } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } if !yaml_parser_scan_line_comment(parser, comment_mark) { ok = false return @@ -2255,10 +2260,9 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l } } if parser.buffer[parser.buffer_pos] == '#' { - // TODO Test this and then re-enable it. - //if !yaml_parser_scan_line_comment(parser, start_mark) { - // return false - //} + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -2856,13 +2860,12 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t return false } skip_line(parser) - } else { - if parser.mark.index >= seen { - if len(text) == 0 { - start_mark = parser.mark - } - text = append(text, parser.buffer[parser.buffer_pos]) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark } + text = read(parser, text) + } else { skip(parser) } } @@ -2888,6 +2891,10 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo var token_mark = token.start_mark var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } var recent_empty = false var first_empty = parser.newlines <= 1 @@ -2919,15 +2926,18 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo continue } c := parser.buffer[parser.buffer_pos+peek] - if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') { + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { // Got line break or terminator. - if !recent_empty { - if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) { + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { // This is the first empty line and there were no empty lines before, // so this initial part of the comment is a foot of the prior token // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. if len(text) > 0 { - if start_mark.column-1 < parser.indent { + if start_mark.column-1 < next_indent { // If dedented it's unrelated to the prior token. token_mark = start_mark } @@ -2958,7 +2968,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo continue } - if len(text) > 0 && column < parser.indent+1 && column != start_mark.column { + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { // The comment at the different indentation is a foot of the // preceding data rather than a head of the upcoming one. parser.comments = append(parser.comments, yaml_comment_t{ @@ -2999,10 +3009,9 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo return false } skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) } else { - if parser.mark.index >= seen { - text = append(text, parser.buffer[parser.buffer_pos]) - } skip(parser) } } @@ -3010,6 +3019,10 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo peek = 0 column = 0 line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } } if len(text) > 0 { diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go index b5d35a50d..8cec6da48 100644 --- a/vendor/gopkg.in/yaml.v3/yaml.go +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -89,7 +89,7 @@ func Unmarshal(in []byte, out interface{}) (err error) { return unmarshal(in, out, false) } -// A Decorder reads and decodes YAML values from an input stream. +// A Decoder reads and decodes YAML values from an input stream. type Decoder struct { parser *parser knownFields bool @@ -194,7 +194,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // Zero valued structs will be omitted if all their public // fields are zero, unless they implement an IsZero // method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. +// case the field will be excluded if IsZero returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -252,6 +252,24 @@ func (e *Encoder) Encode(v interface{}) (err error) { return nil } +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + // SetIndent changes the used indentation used when encoding. func (e *Encoder) SetIndent(spaces int) { if spaces < 0 { @@ -328,6 +346,12 @@ const ( // and maps, Node is an intermediate representation that allows detailed // control over the content being decoded or encoded. // +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// // Values that make use of the Node type interact with the yaml package in the // same way any other type would do, by encoding and decoding yaml data // directly or indirectly into them. @@ -391,6 +415,13 @@ type Node struct { Column int } +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + // LongTag returns the long form of the tag that indicates the data type for // the node. If the Tag field isn't explicitly defined, one will be computed // based on the node properties. @@ -418,6 +449,11 @@ func (n *Node) ShortTag() string { case ScalarNode: tag, _ := resolve("", n.Value) return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } } return "" } diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go index 2719cfbb0..7c6d00770 100644 --- a/vendor/gopkg.in/yaml.v3/yamlh.go +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -787,6 +787,8 @@ type yaml_emitter_t struct { foot_comment []byte tail_comment []byte + key_line_comment []byte + // Dumper stuff opened bool // If the stream was already opened? diff --git a/vendor/honnef.co/go/tools/LICENSE b/vendor/honnef.co/go/tools/LICENSE new file mode 100644 index 000000000..dfd031454 --- /dev/null +++ b/vendor/honnef.co/go/tools/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 Dominik Honnef + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY b/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY new file mode 100644 index 000000000..f2c0fa93a --- /dev/null +++ b/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY @@ -0,0 +1,121 @@ +Staticcheck and its related tools make use of third party projects, +either by reusing their code, or by statically linking them into +resulting binaries. These projects are: + +* The Go Programming Language - https://golang.org/ + golang.org/x/mod - https://github.com/golang/mod + golang.org/x/tools - https://github.com/golang/tools + golang.org/x/sys - https://github.com/golang/sys + golang.org/x/xerrors - https://github.com/golang/xerrors + + Copyright (c) 2009 The Go Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +* github.com/BurntSushi/toml - https://github.com/BurntSushi/toml + + The MIT License (MIT) + + Copyright (c) 2013 TOML authors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + +* gogrep - https://github.com/mvdan/gogrep + + Copyright (c) 2017, Daniel Martí. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +* gosmith - https://github.com/dvyukov/gosmith + + Copyright (c) 2014 Dmitry Vyukov. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * The name of Dmitry Vyukov may be used to endorse or promote + products derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/honnef.co/go/tools/analysis/code/code.go b/vendor/honnef.co/go/tools/analysis/code/code.go new file mode 100644 index 000000000..27b01c4ec --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/code/code.go @@ -0,0 +1,294 @@ +// Package code answers structural and type questions about Go code. +package code + +import ( + "flag" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "strings" + + "honnef.co/go/tools/analysis/facts" + "honnef.co/go/tools/go/ast/astutil" + "honnef.co/go/tools/go/types/typeutil" + + "golang.org/x/tools/go/analysis" +) + +type Positioner interface { + Pos() token.Pos +} + +func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool { + return typeutil.IsType(pass.TypesInfo.TypeOf(expr), name) +} + +func IsInTest(pass *analysis.Pass, node Positioner) bool { + // FIXME(dh): this doesn't work for global variables with + // initializers + f := pass.Fset.File(node.Pos()) + return f != nil && strings.HasSuffix(f.Name(), "_test.go") +} + +// IsMain reports whether the package being processed is a package +// main. +func IsMain(pass *analysis.Pass) bool { + return pass.Pkg.Name() == "main" +} + +// IsMainLike reports whether the package being processed is a +// main-like package. A main-like package is a package that is +// package main, or that is intended to be used by a tool framework +// such as cobra to implement a command. +// +// Note that this function errs on the side of false positives; it may +// return true for packages that aren't main-like. IsMainLike is +// intended for analyses that wish to suppress diagnostics for +// main-like packages to avoid false positives. +func IsMainLike(pass *analysis.Pass) bool { + if pass.Pkg.Name() == "main" { + return true + } + for _, imp := range pass.Pkg.Imports() { + if imp.Path() == "github.com/spf13/cobra" { + return true + } + } + return false +} + +func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string { + info := pass.TypesInfo + sel := info.Selections[expr] + if sel == nil { + if x, ok := expr.X.(*ast.Ident); ok { + pkg, ok := info.ObjectOf(x).(*types.PkgName) + if !ok { + // This shouldn't happen + return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name) + } + return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name) + } + panic(fmt.Sprintf("unsupported selector: %v", expr)) + } + return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name()) +} + +func IsNil(pass *analysis.Pass, expr ast.Expr) bool { + return pass.TypesInfo.Types[expr].IsNil() +} + +func BoolConst(pass *analysis.Pass, expr ast.Expr) bool { + val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val() + return constant.BoolVal(val) +} + +func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool { + // We explicitly don't support typed bools because more often than + // not, custom bool types are used as binary enums and the + // explicit comparison is desired. + + ident, ok := expr.(*ast.Ident) + if !ok { + return false + } + obj := pass.TypesInfo.ObjectOf(ident) + c, ok := obj.(*types.Const) + if !ok { + return false + } + basic, ok := c.Type().(*types.Basic) + if !ok { + return false + } + if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool { + return false + } + return true +} + +func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) { + tv := pass.TypesInfo.Types[expr] + if tv.Value == nil { + return 0, false + } + if tv.Value.Kind() != constant.Int { + return 0, false + } + return constant.Int64Val(tv.Value) +} + +func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) { + val := pass.TypesInfo.Types[expr].Value + if val == nil { + return "", false + } + if val.Kind() != constant.String { + return "", false + } + return constant.StringVal(val), true +} + +func CallName(pass *analysis.Pass, call *ast.CallExpr) string { + switch fun := astutil.Unparen(call.Fun).(type) { + case *ast.SelectorExpr: + fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func) + if !ok { + return "" + } + return typeutil.FuncName(fn) + case *ast.Ident: + obj := pass.TypesInfo.ObjectOf(fun) + switch obj := obj.(type) { + case *types.Func: + return typeutil.FuncName(obj) + case *types.Builtin: + return obj.Name() + default: + return "" + } + default: + return "" + } +} + +func IsCallTo(pass *analysis.Pass, node ast.Node, name string) bool { + call, ok := node.(*ast.CallExpr) + if !ok { + return false + } + return CallName(pass, call) == name +} + +func IsCallToAny(pass *analysis.Pass, node ast.Node, names ...string) bool { + call, ok := node.(*ast.CallExpr) + if !ok { + return false + } + q := CallName(pass, call) + for _, name := range names { + if q == name { + return true + } + } + return false +} + +func File(pass *analysis.Pass, node Positioner) *ast.File { + m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File) + return m[pass.Fset.File(node.Pos())] +} + +// IsGenerated reports whether pos is in a generated file, It ignores +// //line directives. +func IsGenerated(pass *analysis.Pass, pos token.Pos) bool { + _, ok := Generator(pass, pos) + return ok +} + +// Generator returns the generator that generated the file containing +// pos. It ignores //line directives. +func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) { + file := pass.Fset.PositionFor(pos, false).Filename + m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) + g, ok := m[file] + return g, ok +} + +// MayHaveSideEffects reports whether expr may have side effects. If +// the purity argument is nil, this function implements a purely +// syntactic check, meaning that any function call may have side +// effects, regardless of the called function's body. Otherwise, +// purity will be consulted to determine the purity of function calls. +func MayHaveSideEffects(pass *analysis.Pass, expr ast.Expr, purity facts.PurityResult) bool { + switch expr := expr.(type) { + case *ast.BadExpr: + return true + case *ast.Ellipsis: + return MayHaveSideEffects(pass, expr.Elt, purity) + case *ast.FuncLit: + // the literal itself cannot have side effects, only calling it + // might, which is handled by CallExpr. + return false + case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType: + // types cannot have side effects + return false + case *ast.BasicLit: + return false + case *ast.BinaryExpr: + return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Y, purity) + case *ast.CallExpr: + if purity == nil { + return true + } + switch obj := typeutil.Callee(pass.TypesInfo, expr).(type) { + case *types.Func: + if _, ok := purity[obj]; !ok { + return true + } + case *types.Builtin: + switch obj.Name() { + case "len", "cap": + default: + return true + } + default: + return true + } + for _, arg := range expr.Args { + if MayHaveSideEffects(pass, arg, purity) { + return true + } + } + return false + case *ast.CompositeLit: + if MayHaveSideEffects(pass, expr.Type, purity) { + return true + } + for _, elt := range expr.Elts { + if MayHaveSideEffects(pass, elt, purity) { + return true + } + } + return false + case *ast.Ident: + return false + case *ast.IndexExpr: + return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Index, purity) + case *ast.KeyValueExpr: + return MayHaveSideEffects(pass, expr.Key, purity) || MayHaveSideEffects(pass, expr.Value, purity) + case *ast.SelectorExpr: + return MayHaveSideEffects(pass, expr.X, purity) + case *ast.SliceExpr: + return MayHaveSideEffects(pass, expr.X, purity) || + MayHaveSideEffects(pass, expr.Low, purity) || + MayHaveSideEffects(pass, expr.High, purity) || + MayHaveSideEffects(pass, expr.Max, purity) + case *ast.StarExpr: + return MayHaveSideEffects(pass, expr.X, purity) + case *ast.TypeAssertExpr: + return MayHaveSideEffects(pass, expr.X, purity) + case *ast.UnaryExpr: + if MayHaveSideEffects(pass, expr.X, purity) { + return true + } + return expr.Op == token.ARROW + case *ast.ParenExpr: + return MayHaveSideEffects(pass, expr.X, purity) + case nil: + return false + default: + panic(fmt.Sprintf("internal error: unhandled type %T", expr)) + } +} + +func IsGoVersion(pass *analysis.Pass, minor int) bool { + f, ok := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter) + if !ok { + panic("requested Go version, but analyzer has no version flag") + } + version := f.Get().(int) + return version >= minor +} diff --git a/vendor/honnef.co/go/tools/analysis/code/visit.go b/vendor/honnef.co/go/tools/analysis/code/visit.go new file mode 100644 index 000000000..f8bf2d169 --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/code/visit.go @@ -0,0 +1,51 @@ +package code + +import ( + "bytes" + "go/ast" + "go/format" + + "honnef.co/go/tools/pattern" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +func Preorder(pass *analysis.Pass, fn func(ast.Node), types ...ast.Node) { + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder(types, fn) +} + +func PreorderStack(pass *analysis.Pass, fn func(ast.Node, []ast.Node), types ...ast.Node) { + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).WithStack(types, func(n ast.Node, push bool, stack []ast.Node) (proceed bool) { + if push { + fn(n, stack) + } + return true + }) +} + +func Match(pass *analysis.Pass, q pattern.Pattern, node ast.Node) (*pattern.Matcher, bool) { + // Note that we ignore q.Relevant – callers of Match usually use + // AST inspectors that already filter on nodes we're interested + // in. + m := &pattern.Matcher{TypesInfo: pass.TypesInfo} + ok := m.Match(q.Root, node) + return m, ok +} + +func MatchAndEdit(pass *analysis.Pass, before, after pattern.Pattern, node ast.Node) (*pattern.Matcher, []analysis.TextEdit, bool) { + m, ok := Match(pass, before, node) + if !ok { + return m, nil, false + } + r := pattern.NodeToAST(after.Root, m.State) + buf := &bytes.Buffer{} + format.Node(buf, pass.Fset, r) + edit := []analysis.TextEdit{{ + Pos: node.Pos(), + End: node.End(), + NewText: buf.Bytes(), + }} + return m, edit, true +} diff --git a/vendor/honnef.co/go/tools/analysis/edit/edit.go b/vendor/honnef.co/go/tools/analysis/edit/edit.go new file mode 100644 index 000000000..90bc5f8cc --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/edit/edit.go @@ -0,0 +1,74 @@ +package edit + +import ( + "bytes" + "go/ast" + "go/format" + "go/token" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/pattern" +) + +type Ranger interface { + Pos() token.Pos + End() token.Pos +} + +type Range [2]token.Pos + +func (r Range) Pos() token.Pos { return r[0] } +func (r Range) End() token.Pos { return r[1] } + +func ReplaceWithString(fset *token.FileSet, old Ranger, new string) analysis.TextEdit { + return analysis.TextEdit{ + Pos: old.Pos(), + End: old.End(), + NewText: []byte(new), + } +} + +func ReplaceWithNode(fset *token.FileSet, old Ranger, new ast.Node) analysis.TextEdit { + buf := &bytes.Buffer{} + if err := format.Node(buf, fset, new); err != nil { + panic("internal error: " + err.Error()) + } + return analysis.TextEdit{ + Pos: old.Pos(), + End: old.End(), + NewText: buf.Bytes(), + } +} + +func ReplaceWithPattern(pass *analysis.Pass, after pattern.Pattern, state pattern.State, node Ranger) analysis.TextEdit { + r := pattern.NodeToAST(after.Root, state) + buf := &bytes.Buffer{} + format.Node(buf, pass.Fset, r) + return analysis.TextEdit{ + Pos: node.Pos(), + End: node.End(), + NewText: buf.Bytes(), + } +} + +func Delete(old Ranger) analysis.TextEdit { + return analysis.TextEdit{ + Pos: old.Pos(), + End: old.End(), + NewText: nil, + } +} + +func Fix(msg string, edits ...analysis.TextEdit) analysis.SuggestedFix { + return analysis.SuggestedFix{ + Message: msg, + TextEdits: edits, + } +} + +func Selector(x, sel string) *ast.SelectorExpr { + return &ast.SelectorExpr{ + X: &ast.Ident{Name: x}, + Sel: &ast.Ident{Name: sel}, + } +} diff --git a/vendor/honnef.co/go/tools/analysis/facts/deprecated.go b/vendor/honnef.co/go/tools/analysis/facts/deprecated.go new file mode 100644 index 000000000..dbc5ede5c --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/facts/deprecated.go @@ -0,0 +1,145 @@ +package facts + +import ( + "go/ast" + "go/token" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" +) + +type IsDeprecated struct{ Msg string } + +func (*IsDeprecated) AFact() {} +func (d *IsDeprecated) String() string { return "Deprecated: " + d.Msg } + +type DeprecatedResult struct { + Objects map[types.Object]*IsDeprecated + Packages map[*types.Package]*IsDeprecated +} + +var Deprecated = &analysis.Analyzer{ + Name: "fact_deprecated", + Doc: "Mark deprecated objects", + Run: deprecated, + FactTypes: []analysis.Fact{(*IsDeprecated)(nil)}, + ResultType: reflect.TypeOf(DeprecatedResult{}), +} + +func deprecated(pass *analysis.Pass) (interface{}, error) { + var names []*ast.Ident + + extractDeprecatedMessage := func(docs []*ast.CommentGroup) string { + for _, doc := range docs { + if doc == nil { + continue + } + parts := strings.Split(doc.Text(), "\n\n") + for _, part := range parts { + if !strings.HasPrefix(part, "Deprecated: ") { + continue + } + alt := part[len("Deprecated: "):] + alt = strings.Replace(alt, "\n", " ", -1) + return alt + } + } + return "" + } + doDocs := func(names []*ast.Ident, docs []*ast.CommentGroup) { + alt := extractDeprecatedMessage(docs) + if alt == "" { + return + } + + for _, name := range names { + obj := pass.TypesInfo.ObjectOf(name) + pass.ExportObjectFact(obj, &IsDeprecated{alt}) + } + } + + var docs []*ast.CommentGroup + for _, f := range pass.Files { + docs = append(docs, f.Doc) + } + if alt := extractDeprecatedMessage(docs); alt != "" { + // Don't mark package syscall as deprecated, even though + // it is. A lot of people still use it for simple + // constants like SIGKILL, and I am not comfortable + // telling them to use x/sys for that. + if pass.Pkg.Path() != "syscall" { + pass.ExportPackageFact(&IsDeprecated{alt}) + } + } + + docs = docs[:0] + for _, f := range pass.Files { + fn := func(node ast.Node) bool { + if node == nil { + return true + } + var ret bool + switch node := node.(type) { + case *ast.GenDecl: + switch node.Tok { + case token.TYPE, token.CONST, token.VAR: + docs = append(docs, node.Doc) + return true + default: + return false + } + case *ast.FuncDecl: + docs = append(docs, node.Doc) + names = []*ast.Ident{node.Name} + ret = false + case *ast.TypeSpec: + docs = append(docs, node.Doc) + names = []*ast.Ident{node.Name} + ret = true + case *ast.ValueSpec: + docs = append(docs, node.Doc) + names = node.Names + ret = false + case *ast.File: + return true + case *ast.StructType: + for _, field := range node.Fields.List { + doDocs(field.Names, []*ast.CommentGroup{field.Doc}) + } + return false + case *ast.InterfaceType: + for _, field := range node.Methods.List { + doDocs(field.Names, []*ast.CommentGroup{field.Doc}) + } + return false + default: + return false + } + if len(names) == 0 || len(docs) == 0 { + return ret + } + doDocs(names, docs) + + docs = docs[:0] + names = nil + return ret + } + ast.Inspect(f, fn) + } + + out := DeprecatedResult{ + Objects: map[types.Object]*IsDeprecated{}, + Packages: map[*types.Package]*IsDeprecated{}, + } + + for _, fact := range pass.AllObjectFacts() { + out.Objects[fact.Object] = fact.Fact.(*IsDeprecated) + } + for _, fact := range pass.AllPackageFacts() { + out.Packages[fact.Package] = fact.Fact.(*IsDeprecated) + } + + return out, nil +} diff --git a/vendor/honnef.co/go/tools/analysis/facts/directives.go b/vendor/honnef.co/go/tools/analysis/facts/directives.go new file mode 100644 index 000000000..800fce2e0 --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/facts/directives.go @@ -0,0 +1,20 @@ +package facts + +import ( + "reflect" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/analysis/lint" +) + +func directives(pass *analysis.Pass) (interface{}, error) { + return lint.ParseDirectives(pass.Files, pass.Fset), nil +} + +var Directives = &analysis.Analyzer{ + Name: "directives", + Doc: "extracts linter directives", + Run: directives, + RunDespiteErrors: true, + ResultType: reflect.TypeOf([]lint.Directive{}), +} diff --git a/vendor/honnef.co/go/tools/analysis/facts/generated.go b/vendor/honnef.co/go/tools/analysis/facts/generated.go new file mode 100644 index 000000000..058fd8922 --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/facts/generated.go @@ -0,0 +1,97 @@ +package facts + +import ( + "bufio" + "bytes" + "io" + "os" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" +) + +type Generator int + +// A list of known generators we can detect +const ( + Unknown Generator = iota + Goyacc + Cgo + Stringer + ProtocGenGo +) + +var ( + // used by cgo before Go 1.11 + oldCgo = []byte("// Created by cgo - DO NOT EDIT") + prefix = []byte("// Code generated ") + suffix = []byte(" DO NOT EDIT.") + nl = []byte("\n") + crnl = []byte("\r\n") +) + +func isGenerated(path string) (Generator, bool) { + f, err := os.Open(path) + if err != nil { + return 0, false + } + defer f.Close() + br := bufio.NewReader(f) + for { + s, err := br.ReadBytes('\n') + if err != nil && err != io.EOF { + return 0, false + } + s = bytes.TrimSuffix(s, crnl) + s = bytes.TrimSuffix(s, nl) + if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) { + if len(s)-len(suffix) < len(prefix) { + return Unknown, true + } + + text := string(s[len(prefix) : len(s)-len(suffix)]) + switch text { + case "by goyacc.": + return Goyacc, true + case "by cmd/cgo;": + return Cgo, true + case "by protoc-gen-go.": + return ProtocGenGo, true + } + if strings.HasPrefix(text, `by "stringer `) { + return Stringer, true + } + if strings.HasPrefix(text, `by goyacc `) { + return Goyacc, true + } + + return Unknown, true + } + if bytes.Equal(s, oldCgo) { + return Cgo, true + } + if err == io.EOF { + break + } + } + return 0, false +} + +var Generated = &analysis.Analyzer{ + Name: "isgenerated", + Doc: "annotate file names that have been code generated", + Run: func(pass *analysis.Pass) (interface{}, error) { + m := map[string]Generator{} + for _, f := range pass.Files { + path := pass.Fset.PositionFor(f.Pos(), false).Filename + g, ok := isGenerated(path) + if ok { + m[path] = g + } + } + return m, nil + }, + RunDespiteErrors: true, + ResultType: reflect.TypeOf(map[string]Generator{}), +} diff --git a/vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go b/vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go new file mode 100644 index 000000000..4d49d1327 --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go @@ -0,0 +1,242 @@ +package nilness + +import ( + "fmt" + "go/token" + "go/types" + "reflect" + + "honnef.co/go/tools/go/ir" + "honnef.co/go/tools/go/types/typeutil" + "honnef.co/go/tools/internal/passes/buildir" + + "golang.org/x/tools/go/analysis" +) + +// neverReturnsNilFact denotes that a function's return value will never +// be nil (typed or untyped). The analysis errs on the side of false +// negatives. +type neverReturnsNilFact struct { + Rets []neverNilness +} + +func (*neverReturnsNilFact) AFact() {} +func (fact *neverReturnsNilFact) String() string { + return fmt.Sprintf("never returns nil: %v", fact.Rets) +} + +type Result struct { + m map[*types.Func][]neverNilness +} + +var Analysis = &analysis.Analyzer{ + Name: "nilness", + Doc: "Annotates return values that will never be nil (typed or untyped)", + Run: run, + Requires: []*analysis.Analyzer{buildir.Analyzer}, + FactTypes: []analysis.Fact{(*neverReturnsNilFact)(nil)}, + ResultType: reflect.TypeOf((*Result)(nil)), +} + +// MayReturnNil reports whether the ret's return value of fn might be +// a typed or untyped nil value. The value of ret is zero-based. When +// globalOnly is true, the only possible nil values are global +// variables. +// +// The analysis has false positives: MayReturnNil can incorrectly +// report true, but never incorrectly reports false. +func (r *Result) MayReturnNil(fn *types.Func, ret int) (yes bool, globalOnly bool) { + if !typeutil.IsPointerLike(fn.Type().(*types.Signature).Results().At(ret).Type()) { + return false, false + } + if len(r.m[fn]) == 0 { + return true, false + } + + v := r.m[fn][ret] + return v != neverNil, v == onlyGlobal +} + +func run(pass *analysis.Pass) (interface{}, error) { + seen := map[*ir.Function]struct{}{} + out := &Result{ + m: map[*types.Func][]neverNilness{}, + } + for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs { + impl(pass, fn, seen) + } + + for _, fact := range pass.AllObjectFacts() { + out.m[fact.Object.(*types.Func)] = fact.Fact.(*neverReturnsNilFact).Rets + } + + return out, nil +} + +type neverNilness uint8 + +const ( + neverNil neverNilness = 1 + onlyGlobal neverNilness = 2 + nilly neverNilness = 3 +) + +func (n neverNilness) String() string { + switch n { + case neverNil: + return "never" + case onlyGlobal: + return "global" + case nilly: + return "nil" + default: + return "BUG" + } +} + +func impl(pass *analysis.Pass, fn *ir.Function, seenFns map[*ir.Function]struct{}) []neverNilness { + if fn.Object() == nil { + // TODO(dh): support closures + return nil + } + if fact := new(neverReturnsNilFact); pass.ImportObjectFact(fn.Object(), fact) { + return fact.Rets + } + if fn.Pkg != pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg { + return nil + } + if fn.Blocks == nil { + return nil + } + if _, ok := seenFns[fn]; ok { + // break recursion + return nil + } + + seenFns[fn] = struct{}{} + + seen := map[ir.Value]struct{}{} + + var mightReturnNil func(v ir.Value) neverNilness + mightReturnNil = func(v ir.Value) neverNilness { + if _, ok := seen[v]; ok { + // break cycle + return nilly + } + if !typeutil.IsPointerLike(v.Type()) { + return neverNil + } + seen[v] = struct{}{} + switch v := v.(type) { + case *ir.MakeInterface: + return mightReturnNil(v.X) + case *ir.Convert: + return mightReturnNil(v.X) + case *ir.Slice: + return mightReturnNil(v.X) + case *ir.Phi: + ret := neverNil + for _, e := range v.Edges { + if n := mightReturnNil(e); n > ret { + ret = n + } + } + return ret + case *ir.Extract: + switch d := v.Tuple.(type) { + case *ir.Call: + if callee := d.Call.StaticCallee(); callee != nil { + ret := impl(pass, callee, seenFns) + if len(ret) == 0 { + return nilly + } + return ret[v.Index] + } else { + return nilly + } + case *ir.TypeAssert, *ir.Next, *ir.Select, *ir.MapLookup, *ir.TypeSwitch, *ir.Recv: + // we don't need to look at the Extract's index + // because we've already checked its type. + return nilly + default: + panic(fmt.Sprintf("internal error: unhandled type %T", d)) + } + case *ir.Call: + if callee := v.Call.StaticCallee(); callee != nil { + ret := impl(pass, callee, seenFns) + if len(ret) == 0 { + return nilly + } + return ret[0] + } else { + return nilly + } + case *ir.BinOp, *ir.UnOp, *ir.Alloc, *ir.FieldAddr, *ir.IndexAddr, *ir.Global, *ir.MakeSlice, *ir.MakeClosure, *ir.Function, *ir.MakeMap, *ir.MakeChan: + return neverNil + case *ir.Sigma: + iff, ok := v.From.Control().(*ir.If) + if !ok { + return nilly + } + binop, ok := iff.Cond.(*ir.BinOp) + if !ok { + return nilly + } + isNil := func(v ir.Value) bool { + k, ok := v.(*ir.Const) + if !ok { + return false + } + return k.Value == nil + } + if binop.X == v.X && isNil(binop.Y) || binop.Y == v.X && isNil(binop.X) { + op := binop.Op + if v.From.Succs[0] != v.Block() { + // we're in the false branch, negate op + switch op { + case token.EQL: + op = token.NEQ + case token.NEQ: + op = token.EQL + default: + panic(fmt.Sprintf("internal error: unhandled token %v", op)) + } + } + switch op { + case token.EQL: + return nilly + case token.NEQ: + return neverNil + default: + panic(fmt.Sprintf("internal error: unhandled token %v", op)) + } + } + return nilly + case *ir.ChangeType: + return mightReturnNil(v.X) + case *ir.Load: + if _, ok := v.X.(*ir.Global); ok { + return onlyGlobal + } + return nilly + case *ir.TypeAssert, *ir.ChangeInterface, *ir.Field, *ir.Const, *ir.Index, *ir.MapLookup, *ir.Parameter, *ir.Recv, *ir.TypeSwitch: + return nilly + default: + panic(fmt.Sprintf("internal error: unhandled type %T", v)) + } + } + ret := fn.Exit.Control().(*ir.Return) + out := make([]neverNilness, len(ret.Results)) + export := false + for i, v := range ret.Results { + v := mightReturnNil(v) + out[i] = v + if v != nilly && typeutil.IsPointerLike(fn.Signature.Results().At(i).Type()) { + export = true + } + } + if export { + pass.ExportObjectFact(fn.Object(), &neverReturnsNilFact{out}) + } + return out +} diff --git a/vendor/honnef.co/go/tools/analysis/facts/purity.go b/vendor/honnef.co/go/tools/analysis/facts/purity.go new file mode 100644 index 000000000..582b6209e --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/facts/purity.go @@ -0,0 +1,178 @@ +package facts + +import ( + "go/types" + "reflect" + + "honnef.co/go/tools/go/ir" + "honnef.co/go/tools/go/ir/irutil" + "honnef.co/go/tools/internal/passes/buildir" + + "golang.org/x/tools/go/analysis" +) + +type IsPure struct{} + +func (*IsPure) AFact() {} +func (d *IsPure) String() string { return "is pure" } + +type PurityResult map[*types.Func]*IsPure + +var Purity = &analysis.Analyzer{ + Name: "fact_purity", + Doc: "Mark pure functions", + Run: purity, + Requires: []*analysis.Analyzer{buildir.Analyzer}, + FactTypes: []analysis.Fact{(*IsPure)(nil)}, + ResultType: reflect.TypeOf(PurityResult{}), +} + +var pureStdlib = map[string]struct{}{ + "errors.New": {}, + "fmt.Errorf": {}, + "fmt.Sprintf": {}, + "fmt.Sprint": {}, + "sort.Reverse": {}, + "strings.Map": {}, + "strings.Repeat": {}, + "strings.Replace": {}, + "strings.Title": {}, + "strings.ToLower": {}, + "strings.ToLowerSpecial": {}, + "strings.ToTitle": {}, + "strings.ToTitleSpecial": {}, + "strings.ToUpper": {}, + "strings.ToUpperSpecial": {}, + "strings.Trim": {}, + "strings.TrimFunc": {}, + "strings.TrimLeft": {}, + "strings.TrimLeftFunc": {}, + "strings.TrimPrefix": {}, + "strings.TrimRight": {}, + "strings.TrimRightFunc": {}, + "strings.TrimSpace": {}, + "strings.TrimSuffix": {}, + "(*net/http.Request).WithContext": {}, +} + +func purity(pass *analysis.Pass) (interface{}, error) { + seen := map[*ir.Function]struct{}{} + irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg + var check func(fn *ir.Function) (ret bool) + check = func(fn *ir.Function) (ret bool) { + if fn.Object() == nil { + // TODO(dh): support closures + return false + } + if pass.ImportObjectFact(fn.Object(), new(IsPure)) { + return true + } + if fn.Pkg != irpkg { + // Function is in another package but wasn't marked as + // pure, ergo it isn't pure + return false + } + // Break recursion + if _, ok := seen[fn]; ok { + return false + } + + seen[fn] = struct{}{} + defer func() { + if ret { + pass.ExportObjectFact(fn.Object(), &IsPure{}) + } + }() + + if irutil.IsStub(fn) { + return false + } + + if _, ok := pureStdlib[fn.Object().(*types.Func).FullName()]; ok { + return true + } + + if fn.Signature.Results().Len() == 0 { + // A function with no return values is empty or is doing some + // work we cannot see (for example because of build tags); + // don't consider it pure. + return false + } + + for _, param := range fn.Params { + // TODO(dh): this may not be strictly correct. pure code + // can, to an extent, operate on non-basic types. + if _, ok := param.Type().Underlying().(*types.Basic); !ok { + return false + } + } + + // Don't consider external functions pure. + if fn.Blocks == nil { + return false + } + checkCall := func(common *ir.CallCommon) bool { + if common.IsInvoke() { + return false + } + builtin, ok := common.Value.(*ir.Builtin) + if !ok { + if common.StaticCallee() != fn { + if common.StaticCallee() == nil { + return false + } + if !check(common.StaticCallee()) { + return false + } + } + } else { + switch builtin.Name() { + case "len", "cap": + default: + return false + } + } + return true + } + for _, b := range fn.Blocks { + for _, ins := range b.Instrs { + switch ins := ins.(type) { + case *ir.Call: + if !checkCall(ins.Common()) { + return false + } + case *ir.Defer: + if !checkCall(&ins.Call) { + return false + } + case *ir.Select: + return false + case *ir.Send: + return false + case *ir.Go: + return false + case *ir.Panic: + return false + case *ir.Store: + return false + case *ir.FieldAddr: + return false + case *ir.Alloc: + return false + case *ir.Load: + return false + } + } + } + return true + } + for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs { + check(fn) + } + + out := PurityResult{} + for _, fact := range pass.AllObjectFacts() { + out[fact.Object.(*types.Func)] = fact.Fact.(*IsPure) + } + return out, nil +} diff --git a/vendor/honnef.co/go/tools/analysis/facts/token.go b/vendor/honnef.co/go/tools/analysis/facts/token.go new file mode 100644 index 000000000..26e76ff73 --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/facts/token.go @@ -0,0 +1,24 @@ +package facts + +import ( + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" +) + +var TokenFile = &analysis.Analyzer{ + Name: "tokenfileanalyzer", + Doc: "creates a mapping of *token.File to *ast.File", + Run: func(pass *analysis.Pass) (interface{}, error) { + m := map[*token.File]*ast.File{} + for _, af := range pass.Files { + tf := pass.Fset.File(af.Pos()) + m[tf] = af + } + return m, nil + }, + RunDespiteErrors: true, + ResultType: reflect.TypeOf(map[*token.File]*ast.File{}), +} diff --git a/vendor/honnef.co/go/tools/analysis/facts/typedness/typedness.go b/vendor/honnef.co/go/tools/analysis/facts/typedness/typedness.go new file mode 100644 index 000000000..5e5644711 --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/facts/typedness/typedness.go @@ -0,0 +1,242 @@ +package typedness + +import ( + "fmt" + "go/token" + "go/types" + "reflect" + + "honnef.co/go/tools/go/ir" + "honnef.co/go/tools/go/ir/irutil" + "honnef.co/go/tools/internal/passes/buildir" + + "golang.org/x/tools/go/analysis" +) + +// alwaysTypedFact denotes that a function's return value will never +// be untyped nil. The analysis errs on the side of false negatives. +type alwaysTypedFact struct { + Rets uint8 +} + +func (*alwaysTypedFact) AFact() {} +func (fact *alwaysTypedFact) String() string { + return fmt.Sprintf("always typed: %08b", fact.Rets) +} + +type Result struct { + m map[*types.Func]uint8 +} + +var Analysis = &analysis.Analyzer{ + Name: "typedness", + Doc: "Annotates return values that are always typed values", + Run: run, + Requires: []*analysis.Analyzer{buildir.Analyzer}, + FactTypes: []analysis.Fact{(*alwaysTypedFact)(nil)}, + ResultType: reflect.TypeOf((*Result)(nil)), +} + +// MustReturnTyped reports whether the ret's return value of fn must +// be a typed value, i.e. an interface value containing a concrete +// type or trivially a concrete type. The value of ret is zero-based. +// +// The analysis has false negatives: MustReturnTyped may incorrectly +// report false, but never incorrectly reports true. +func (r *Result) MustReturnTyped(fn *types.Func, ret int) bool { + if _, ok := fn.Type().(*types.Signature).Results().At(ret).Type().Underlying().(*types.Interface); !ok { + return true + } + return (r.m[fn] & (1 << ret)) != 0 +} + +func run(pass *analysis.Pass) (interface{}, error) { + seen := map[*ir.Function]struct{}{} + out := &Result{ + m: map[*types.Func]uint8{}, + } + for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs { + impl(pass, fn, seen) + } + + for _, fact := range pass.AllObjectFacts() { + out.m[fact.Object.(*types.Func)] = fact.Fact.(*alwaysTypedFact).Rets + } + + return out, nil +} + +func impl(pass *analysis.Pass, fn *ir.Function, seenFns map[*ir.Function]struct{}) (out uint8) { + if fn.Signature.Results().Len() > 8 { + return 0 + } + if fn.Object() == nil { + // TODO(dh): support closures + return 0 + } + if fact := new(alwaysTypedFact); pass.ImportObjectFact(fn.Object(), fact) { + return fact.Rets + } + if fn.Pkg != pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg { + return 0 + } + if fn.Blocks == nil { + return 0 + } + if irutil.IsStub(fn) { + return 0 + } + if _, ok := seenFns[fn]; ok { + // break recursion + return 0 + } + + seenFns[fn] = struct{}{} + defer func() { + for i := 0; i < fn.Signature.Results().Len(); i++ { + if _, ok := fn.Signature.Results().At(i).Type().Underlying().(*types.Interface); !ok { + // we don't need facts to know that non-interface + // types can't be untyped nil. zeroing out those bits + // may result in all bits being zero, in which case we + // don't have to save any fact. + out &= ^(1 << i) + } + } + if out > 0 { + pass.ExportObjectFact(fn.Object(), &alwaysTypedFact{out}) + } + }() + + isUntypedNil := func(v ir.Value) bool { + k, ok := v.(*ir.Const) + if !ok { + return false + } + if _, ok := k.Type().Underlying().(*types.Interface); !ok { + return false + } + return k.Value == nil + } + + var do func(v ir.Value, seen map[ir.Value]struct{}) bool + do = func(v ir.Value, seen map[ir.Value]struct{}) bool { + if _, ok := seen[v]; ok { + // break cycle + return false + } + seen[v] = struct{}{} + switch v := v.(type) { + case *ir.Const: + // can't be a typed nil, because then we'd be returning the + // result of MakeInterface. + return false + case *ir.ChangeInterface: + return do(v.X, seen) + case *ir.Extract: + call, ok := v.Tuple.(*ir.Call) + if !ok { + // We only care about extracts of function results. For + // everything else (e.g. channel receives and map + // lookups), we can either not deduce any information, or + // will see a MakeInterface. + return false + } + if callee := call.Call.StaticCallee(); callee != nil { + return impl(pass, callee, seenFns)&(1<interface conversions, which + // don't tell us anything about the nilness. + return false + case *ir.MapLookup, *ir.Index, *ir.Recv, *ir.Parameter, *ir.Load, *ir.Field: + // All other instructions that tell us nothing about the + // typedness of interface values. + return false + default: + panic(fmt.Sprintf("internal error: unhandled type %T", v)) + } + } + + ret := fn.Exit.Control().(*ir.Return) + for i, v := range ret.Results { + if _, ok := fn.Signature.Results().At(i).Type().Underlying().(*types.Interface); ok { + if do(v, map[ir.Value]struct{}{}) { + out |= 1 << i + } + } + } + return out +} diff --git a/vendor/honnef.co/go/tools/analysis/lint/lint.go b/vendor/honnef.co/go/tools/analysis/lint/lint.go new file mode 100644 index 000000000..fdc4cb5dc --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/lint/lint.go @@ -0,0 +1,198 @@ +// Package lint provides abstractions on top of go/analysis. +package lint + +import ( + "flag" + "fmt" + "go/ast" + "go/build" + "go/token" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" +) + +type Analyzer struct { + // The analyzer's documentation. Unlike go/analysis.Analyzer.Doc, + // this field is structured, providing access to severity, options + // etc. + Doc *Documentation + Analyzer *analysis.Analyzer +} + +func (a *Analyzer) initialize() { + a.Analyzer.Doc = a.Doc.String() + if a.Analyzer.Flags.Usage == nil { + fs := flag.NewFlagSet("", flag.PanicOnError) + fs.Var(newVersionFlag(), "go", "Target Go version") + a.Analyzer.Flags = *fs + } +} + +func InitializeAnalyzers(docs map[string]*Documentation, analyzers map[string]*analysis.Analyzer) []*Analyzer { + out := make([]*Analyzer, 0, len(analyzers)) + for k, v := range analyzers { + v.Name = k + a := &Analyzer{ + Doc: docs[k], + Analyzer: v, + } + a.initialize() + out = append(out, a) + } + return out +} + +type Severity int + +const ( + SeverityNone Severity = iota + SeverityError + SeverityDeprecated + SeverityWarning + SeverityInfo + SeverityHint +) + +type Documentation struct { + Title string + Text string + Since string + NonDefault bool + Options []string + Severity Severity +} + +func Markdownify(m map[string]*Documentation) map[string]*Documentation { + for _, v := range m { + v.Title = toMarkdown(v.Title) + v.Text = toMarkdown(v.Text) + } + return m +} + +func toMarkdown(s string) string { + return strings.ReplaceAll(s, `\'`, "`") +} + +func (doc *Documentation) String() string { + if doc == nil { + return "Error: No documentation." + } + + b := &strings.Builder{} + fmt.Fprintf(b, "%s\n\n", doc.Title) + if doc.Text != "" { + fmt.Fprintf(b, "%s\n\n", doc.Text) + } + fmt.Fprint(b, "Available since\n ") + if doc.Since == "" { + fmt.Fprint(b, "unreleased") + } else { + fmt.Fprintf(b, "%s", doc.Since) + } + if doc.NonDefault { + fmt.Fprint(b, ", non-default") + } + fmt.Fprint(b, "\n") + if len(doc.Options) > 0 { + fmt.Fprintf(b, "\nOptions\n") + for _, opt := range doc.Options { + fmt.Fprintf(b, " %s", opt) + } + fmt.Fprint(b, "\n") + } + return b.String() +} + +func newVersionFlag() flag.Getter { + tags := build.Default.ReleaseTags + v := tags[len(tags)-1][2:] + version := new(VersionFlag) + if err := version.Set(v); err != nil { + panic(fmt.Sprintf("internal error: %s", err)) + } + return version +} + +type VersionFlag int + +func (v *VersionFlag) String() string { + return fmt.Sprintf("1.%d", *v) +} + +func (v *VersionFlag) Set(s string) error { + if len(s) < 3 { + return fmt.Errorf("invalid Go version: %q", s) + } + if s[0] != '1' { + return fmt.Errorf("invalid Go version: %q", s) + } + if s[1] != '.' { + return fmt.Errorf("invalid Go version: %q", s) + } + i, err := strconv.Atoi(s[2:]) + if err != nil { + return fmt.Errorf("invalid Go version: %q", s) + } + *v = VersionFlag(i) + return nil +} + +func (v *VersionFlag) Get() interface{} { + return int(*v) +} + +// ExhaustiveTypeSwitch panics when called. It can be used to ensure +// that type switches are exhaustive. +func ExhaustiveTypeSwitch(v interface{}) { + panic(fmt.Sprintf("internal error: unhandled case %T", v)) +} + +// A directive is a comment of the form '//lint: +// [arguments...]'. It represents instructions to the static analysis +// tool. +type Directive struct { + Command string + Arguments []string + Directive *ast.Comment + Node ast.Node +} + +func parseDirective(s string) (cmd string, args []string) { + if !strings.HasPrefix(s, "//lint:") { + return "", nil + } + s = strings.TrimPrefix(s, "//lint:") + fields := strings.Split(s, " ") + return fields[0], fields[1:] +} + +func ParseDirectives(files []*ast.File, fset *token.FileSet) []Directive { + var dirs []Directive + for _, f := range files { + // OPT(dh): in our old code, we skip all the comment map work if we + // couldn't find any directives, benchmark if that's actually + // worth doing + cm := ast.NewCommentMap(fset, f, f.Comments) + for node, cgs := range cm { + for _, cg := range cgs { + for _, c := range cg.List { + if !strings.HasPrefix(c.Text, "//lint:") { + continue + } + cmd, args := parseDirective(c.Text) + d := Directive{ + Command: cmd, + Arguments: args, + Directive: c, + Node: node, + } + dirs = append(dirs, d) + } + } + } + } + return dirs +} diff --git a/vendor/honnef.co/go/tools/analysis/report/report.go b/vendor/honnef.co/go/tools/analysis/report/report.go new file mode 100644 index 000000000..f7cd64ab2 --- /dev/null +++ b/vendor/honnef.co/go/tools/analysis/report/report.go @@ -0,0 +1,247 @@ +package report + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/token" + "path/filepath" + "strconv" + "strings" + + "honnef.co/go/tools/analysis/facts" + "honnef.co/go/tools/go/ast/astutil" + + "golang.org/x/tools/go/analysis" +) + +type Options struct { + ShortRange bool + FilterGenerated bool + Fixes []analysis.SuggestedFix + Related []analysis.RelatedInformation +} + +type Option func(*Options) + +func ShortRange() Option { + return func(opts *Options) { + opts.ShortRange = true + } +} + +func FilterGenerated() Option { + return func(opts *Options) { + opts.FilterGenerated = true + } +} + +func Fixes(fixes ...analysis.SuggestedFix) Option { + return func(opts *Options) { + opts.Fixes = append(opts.Fixes, fixes...) + } +} + +func Related(node Positioner, message string) Option { + return func(opts *Options) { + pos, end, ok := getRange(node, opts.ShortRange) + if !ok { + return + } + r := analysis.RelatedInformation{ + Pos: pos, + End: end, + Message: message, + } + opts.Related = append(opts.Related, r) + } +} + +type Positioner interface { + Pos() token.Pos +} + +type fullPositioner interface { + Pos() token.Pos + End() token.Pos +} + +type sourcer interface { + Source() ast.Node +} + +// shortRange returns the position and end of the main component of an +// AST node. For nodes that have no body, the short range is identical +// to the node's Pos and End. For nodes that do have a body, the short +// range excludes the body. +func shortRange(node ast.Node) (pos, end token.Pos) { + switch node := node.(type) { + case *ast.File: + return node.Pos(), node.Name.End() + case *ast.CaseClause: + return node.Pos(), node.Colon + 1 + case *ast.CommClause: + return node.Pos(), node.Colon + 1 + case *ast.DeferStmt: + return node.Pos(), node.Defer + token.Pos(len("defer")) + case *ast.ExprStmt: + return shortRange(node.X) + case *ast.ForStmt: + if node.Post != nil { + return node.For, node.Post.End() + } else if node.Cond != nil { + return node.For, node.Cond.End() + } else if node.Init != nil { + // +1 to catch the semicolon, for gofmt'ed code + return node.Pos(), node.Init.End() + 1 + } else { + return node.Pos(), node.For + token.Pos(len("for")) + } + case *ast.FuncDecl: + return node.Pos(), node.Type.End() + case *ast.FuncLit: + return node.Pos(), node.Type.End() + case *ast.GoStmt: + if _, ok := astutil.Unparen(node.Call.Fun).(*ast.FuncLit); ok { + return node.Pos(), node.Go + token.Pos(len("go")) + } else { + return node.Pos(), node.End() + } + case *ast.IfStmt: + return node.Pos(), node.Cond.End() + case *ast.RangeStmt: + return node.Pos(), node.X.End() + case *ast.SelectStmt: + return node.Pos(), node.Pos() + token.Pos(len("select")) + case *ast.SwitchStmt: + if node.Tag != nil { + return node.Pos(), node.Tag.End() + } else if node.Init != nil { + // +1 to catch the semicolon, for gofmt'ed code + return node.Pos(), node.Init.End() + 1 + } else { + return node.Pos(), node.Pos() + token.Pos(len("switch")) + } + case *ast.TypeSwitchStmt: + return node.Pos(), node.Assign.End() + default: + return node.Pos(), node.End() + } +} + +func HasRange(node Positioner) bool { + // we don't know if getRange will be called with shortRange set to + // true, so make sure that both work. + _, _, ok := getRange(node, false) + if !ok { + return false + } + _, _, ok = getRange(node, true) + return ok +} + +func getRange(node Positioner, short bool) (pos, end token.Pos, ok bool) { + switch n := node.(type) { + case sourcer: + s := n.Source() + if s == nil { + return 0, 0, false + } + if short { + p, e := shortRange(s) + return p, e, true + } + return s.Pos(), s.End(), true + case fullPositioner: + if short { + p, e := shortRange(n) + return p, e, true + } + return n.Pos(), n.End(), true + default: + return n.Pos(), token.NoPos, true + } +} + +func Report(pass *analysis.Pass, node Positioner, message string, opts ...Option) { + cfg := &Options{} + for _, opt := range opts { + opt(cfg) + } + + file := DisplayPosition(pass.Fset, node.Pos()).Filename + if cfg.FilterGenerated { + m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) + if _, ok := m[file]; ok { + return + } + } + + pos, end, ok := getRange(node, cfg.ShortRange) + if !ok { + panic(fmt.Sprintf("no valid position for reporting node %v", node)) + } + d := analysis.Diagnostic{ + Pos: pos, + End: end, + Message: message, + SuggestedFixes: cfg.Fixes, + Related: cfg.Related, + } + pass.Report(d) +} + +func Render(pass *analysis.Pass, x interface{}) string { + var buf bytes.Buffer + if err := format.Node(&buf, pass.Fset, x); err != nil { + panic(err) + } + return buf.String() +} + +func RenderArgs(pass *analysis.Pass, args []ast.Expr) string { + var ss []string + for _, arg := range args { + ss = append(ss, Render(pass, arg)) + } + return strings.Join(ss, ", ") +} + +func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position { + if p == token.NoPos { + return token.Position{} + } + + // Only use the adjusted position if it points to another Go file. + // This means we'll point to the original file for cgo files, but + // we won't point to a YACC grammar file. + pos := fset.PositionFor(p, false) + adjPos := fset.PositionFor(p, true) + + if filepath.Ext(adjPos.Filename) == ".go" { + return adjPos + } + + return pos +} + +func Ordinal(n int) string { + suffix := "th" + if n < 10 || n > 20 { + switch n % 10 { + case 0: + suffix = "th" + case 1: + suffix = "st" + case 2: + suffix = "nd" + case 3: + suffix = "rd" + default: + suffix = "th" + } + } + + return strconv.Itoa(n) + suffix +} diff --git a/vendor/honnef.co/go/tools/config/config.go b/vendor/honnef.co/go/tools/config/config.go new file mode 100644 index 000000000..14de76f4c --- /dev/null +++ b/vendor/honnef.co/go/tools/config/config.go @@ -0,0 +1,245 @@ +package config + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "os" + "path/filepath" + "reflect" + "strings" + + "github.com/BurntSushi/toml" + "golang.org/x/tools/go/analysis" +) + +// Dir looks at a list of absolute file names, which should make up a +// single package, and returns the path of the directory that may +// contain a staticcheck.conf file. It returns the empty string if no +// such directory could be determined, for example because all files +// were located in Go's build cache. +func Dir(files []string) string { + if len(files) == 0 { + return "" + } + cache, err := os.UserCacheDir() + if err != nil { + cache = "" + } + var path string + for _, p := range files { + // FIXME(dh): using strings.HasPrefix isn't technically + // correct, but it should be good enough for now. + if cache != "" && strings.HasPrefix(p, cache) { + // File in the build cache of the standard Go build system + continue + } + path = p + break + } + + if path == "" { + // The package only consists of generated files. + return "" + } + + dir := filepath.Dir(path) + return dir +} + +func dirAST(files []*ast.File, fset *token.FileSet) string { + names := make([]string, len(files)) + for i, f := range files { + names[i] = fset.PositionFor(f.Pos(), true).Filename + } + return Dir(names) +} + +var Analyzer = &analysis.Analyzer{ + Name: "config", + Doc: "loads configuration for the current package tree", + Run: func(pass *analysis.Pass) (interface{}, error) { + dir := dirAST(pass.Files, pass.Fset) + if dir == "" { + cfg := DefaultConfig + return &cfg, nil + } + cfg, err := Load(dir) + if err != nil { + return nil, fmt.Errorf("error loading staticcheck.conf: %s", err) + } + return &cfg, nil + }, + RunDespiteErrors: true, + ResultType: reflect.TypeOf((*Config)(nil)), +} + +func For(pass *analysis.Pass) *Config { + return pass.ResultOf[Analyzer].(*Config) +} + +func mergeLists(a, b []string) []string { + out := make([]string, 0, len(a)+len(b)) + for _, el := range b { + if el == "inherit" { + out = append(out, a...) + } else { + out = append(out, el) + } + } + + return out +} + +func normalizeList(list []string) []string { + if len(list) > 1 { + nlist := make([]string, 0, len(list)) + nlist = append(nlist, list[0]) + for i, el := range list[1:] { + if el != list[i] { + nlist = append(nlist, el) + } + } + list = nlist + } + + for _, el := range list { + if el == "inherit" { + // This should never happen, because the default config + // should not use "inherit" + panic(`unresolved "inherit"`) + } + } + + return list +} + +func (cfg Config) Merge(ocfg Config) Config { + if ocfg.Checks != nil { + cfg.Checks = mergeLists(cfg.Checks, ocfg.Checks) + } + if ocfg.Initialisms != nil { + cfg.Initialisms = mergeLists(cfg.Initialisms, ocfg.Initialisms) + } + if ocfg.DotImportWhitelist != nil { + cfg.DotImportWhitelist = mergeLists(cfg.DotImportWhitelist, ocfg.DotImportWhitelist) + } + if ocfg.HTTPStatusCodeWhitelist != nil { + cfg.HTTPStatusCodeWhitelist = mergeLists(cfg.HTTPStatusCodeWhitelist, ocfg.HTTPStatusCodeWhitelist) + } + return cfg +} + +type Config struct { + // TODO(dh): this implementation makes it impossible for external + // clients to add their own checkers with configuration. At the + // moment, we don't really care about that; we don't encourage + // that people use this package. In the future, we may. The + // obvious solution would be using map[string]interface{}, but + // that's obviously subpar. + + Checks []string `toml:"checks"` + Initialisms []string `toml:"initialisms"` + DotImportWhitelist []string `toml:"dot_import_whitelist"` + HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"` +} + +func (c Config) String() string { + buf := &bytes.Buffer{} + + fmt.Fprintf(buf, "Checks: %#v\n", c.Checks) + fmt.Fprintf(buf, "Initialisms: %#v\n", c.Initialisms) + fmt.Fprintf(buf, "DotImportWhitelist: %#v\n", c.DotImportWhitelist) + fmt.Fprintf(buf, "HTTPStatusCodeWhitelist: %#v", c.HTTPStatusCodeWhitelist) + + return buf.String() +} + +var DefaultConfig = Config{ + Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"}, + Initialisms: []string{ + "ACL", "API", "ASCII", "CPU", "CSS", "DNS", + "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", + "IP", "JSON", "QPS", "RAM", "RPC", "SLA", + "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", + "UDP", "UI", "GID", "UID", "UUID", "URI", + "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", + "XSS", "SIP", "RTP", "AMQP", "DB", "TS", + }, + DotImportWhitelist: []string{}, + HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"}, +} + +const ConfigName = "staticcheck.conf" + +func parseConfigs(dir string) ([]Config, error) { + var out []Config + + // TODO(dh): consider stopping at the GOPATH/module boundary + for dir != "" { + f, err := os.Open(filepath.Join(dir, ConfigName)) + if os.IsNotExist(err) { + ndir := filepath.Dir(dir) + if ndir == dir { + break + } + dir = ndir + continue + } + if err != nil { + return nil, err + } + var cfg Config + _, err = toml.DecodeReader(f, &cfg) + f.Close() + if err != nil { + return nil, err + } + out = append(out, cfg) + ndir := filepath.Dir(dir) + if ndir == dir { + break + } + dir = ndir + } + out = append(out, DefaultConfig) + if len(out) < 2 { + return out, nil + } + for i := 0; i < len(out)/2; i++ { + out[i], out[len(out)-1-i] = out[len(out)-1-i], out[i] + } + return out, nil +} + +func mergeConfigs(confs []Config) Config { + if len(confs) == 0 { + // This shouldn't happen because we always have at least a + // default config. + panic("trying to merge zero configs") + } + if len(confs) == 1 { + return confs[0] + } + conf := confs[0] + for _, oconf := range confs[1:] { + conf = conf.Merge(oconf) + } + return conf +} + +func Load(dir string) (Config, error) { + confs, err := parseConfigs(dir) + if err != nil { + return Config{}, err + } + conf := mergeConfigs(confs) + + conf.Checks = normalizeList(conf.Checks) + conf.Initialisms = normalizeList(conf.Initialisms) + conf.DotImportWhitelist = normalizeList(conf.DotImportWhitelist) + conf.HTTPStatusCodeWhitelist = normalizeList(conf.HTTPStatusCodeWhitelist) + + return conf, nil +} diff --git a/vendor/honnef.co/go/tools/config/example.conf b/vendor/honnef.co/go/tools/config/example.conf new file mode 100644 index 000000000..106da5bcb --- /dev/null +++ b/vendor/honnef.co/go/tools/config/example.conf @@ -0,0 +1,10 @@ +checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"] +initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", + "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", + "IP", "JSON", "QPS", "RAM", "RPC", "SLA", + "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", + "UDP", "UI", "GID", "UID", "UUID", "URI", + "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", + "XSS", "SIP", "RTP", "AMQP", "DB", "TS"] +dot_import_whitelist = [] +http_status_code_whitelist = ["200", "400", "404", "500"] diff --git a/vendor/honnef.co/go/tools/go/ast/astutil/upstream.go b/vendor/honnef.co/go/tools/go/ast/astutil/upstream.go new file mode 100644 index 000000000..fc647c4d3 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ast/astutil/upstream.go @@ -0,0 +1,20 @@ +package astutil + +import ( + "go/ast" + "go/token" + _ "unsafe" + + "golang.org/x/tools/go/ast/astutil" +) + +type Cursor = astutil.Cursor +type ApplyFunc = astutil.ApplyFunc + +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + return astutil.Apply(root, pre, post) +} + +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + return astutil.PathEnclosingInterval(root, start, end) +} diff --git a/vendor/honnef.co/go/tools/go/ast/astutil/util.go b/vendor/honnef.co/go/tools/go/ast/astutil/util.go new file mode 100644 index 000000000..fac33780d --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ast/astutil/util.go @@ -0,0 +1,299 @@ +package astutil + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" + "strings" +) + +func IsIdent(expr ast.Expr, ident string) bool { + id, ok := expr.(*ast.Ident) + return ok && id.Name == ident +} + +// isBlank returns whether id is the blank identifier "_". +// If id == nil, the answer is false. +func IsBlank(id ast.Expr) bool { + ident, _ := id.(*ast.Ident) + return ident != nil && ident.Name == "_" +} + +func IsIntLiteral(expr ast.Expr, literal string) bool { + lit, ok := expr.(*ast.BasicLit) + return ok && lit.Kind == token.INT && lit.Value == literal +} + +// Deprecated: use IsIntLiteral instead +func IsZero(expr ast.Expr) bool { + return IsIntLiteral(expr, "0") +} + +func Preamble(f *ast.File) string { + cutoff := f.Package + if f.Doc != nil { + cutoff = f.Doc.Pos() + } + var out []string + for _, cmt := range f.Comments { + if cmt.Pos() >= cutoff { + break + } + out = append(out, cmt.Text()) + } + return strings.Join(out, "\n") +} + +func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec { + if len(specs) == 0 { + return nil + } + groups := make([][]ast.Spec, 1) + groups[0] = append(groups[0], specs[0]) + + for _, spec := range specs[1:] { + g := groups[len(groups)-1] + if fset.PositionFor(spec.Pos(), false).Line-1 != + fset.PositionFor(g[len(g)-1].End(), false).Line { + + groups = append(groups, nil) + } + + groups[len(groups)-1] = append(groups[len(groups)-1], spec) + } + + return groups +} + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} + +func CopyExpr(node ast.Expr) ast.Expr { + switch node := node.(type) { + case *ast.BasicLit: + cp := *node + return &cp + case *ast.BinaryExpr: + cp := *node + cp.X = CopyExpr(cp.X) + cp.Y = CopyExpr(cp.Y) + return &cp + case *ast.CallExpr: + cp := *node + cp.Fun = CopyExpr(cp.Fun) + cp.Args = make([]ast.Expr, len(node.Args)) + for i, v := range node.Args { + cp.Args[i] = CopyExpr(v) + } + return &cp + case *ast.CompositeLit: + cp := *node + cp.Type = CopyExpr(cp.Type) + cp.Elts = make([]ast.Expr, len(node.Elts)) + for i, v := range node.Elts { + cp.Elts[i] = CopyExpr(v) + } + return &cp + case *ast.Ident: + cp := *node + return &cp + case *ast.IndexExpr: + cp := *node + cp.X = CopyExpr(cp.X) + cp.Index = CopyExpr(cp.Index) + return &cp + case *ast.KeyValueExpr: + cp := *node + cp.Key = CopyExpr(cp.Key) + cp.Value = CopyExpr(cp.Value) + return &cp + case *ast.ParenExpr: + cp := *node + cp.X = CopyExpr(cp.X) + return &cp + case *ast.SelectorExpr: + cp := *node + cp.X = CopyExpr(cp.X) + cp.Sel = CopyExpr(cp.Sel).(*ast.Ident) + return &cp + case *ast.SliceExpr: + cp := *node + cp.X = CopyExpr(cp.X) + cp.Low = CopyExpr(cp.Low) + cp.High = CopyExpr(cp.High) + cp.Max = CopyExpr(cp.Max) + return &cp + case *ast.StarExpr: + cp := *node + cp.X = CopyExpr(cp.X) + return &cp + case *ast.TypeAssertExpr: + cp := *node + cp.X = CopyExpr(cp.X) + cp.Type = CopyExpr(cp.Type) + return &cp + case *ast.UnaryExpr: + cp := *node + cp.X = CopyExpr(cp.X) + return &cp + case *ast.MapType: + cp := *node + cp.Key = CopyExpr(cp.Key) + cp.Value = CopyExpr(cp.Value) + return &cp + case *ast.ArrayType: + cp := *node + cp.Len = CopyExpr(cp.Len) + cp.Elt = CopyExpr(cp.Elt) + return &cp + case *ast.Ellipsis: + cp := *node + cp.Elt = CopyExpr(cp.Elt) + return &cp + case *ast.InterfaceType: + cp := *node + return &cp + case *ast.StructType: + cp := *node + return &cp + case *ast.FuncLit: + // TODO(dh): implement copying of function literals. + return nil + case *ast.ChanType: + cp := *node + cp.Value = CopyExpr(cp.Value) + return &cp + case nil: + return nil + default: + panic(fmt.Sprintf("unreachable: %T", node)) + } +} + +func Equal(a, b ast.Node) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + if reflect.TypeOf(a) != reflect.TypeOf(b) { + return false + } + + switch a := a.(type) { + case *ast.BasicLit: + b := b.(*ast.BasicLit) + return a.Kind == b.Kind && a.Value == b.Value + case *ast.BinaryExpr: + b := b.(*ast.BinaryExpr) + return Equal(a.X, b.X) && a.Op == b.Op && Equal(a.Y, b.Y) + case *ast.CallExpr: + b := b.(*ast.CallExpr) + if len(a.Args) != len(b.Args) { + return false + } + for i, arg := range a.Args { + if !Equal(arg, b.Args[i]) { + return false + } + } + return Equal(a.Fun, b.Fun) && + (a.Ellipsis == token.NoPos && b.Ellipsis == token.NoPos || a.Ellipsis != token.NoPos && b.Ellipsis != token.NoPos) + case *ast.CompositeLit: + b := b.(*ast.CompositeLit) + if len(a.Elts) != len(b.Elts) { + return false + } + for i, elt := range b.Elts { + if !Equal(elt, b.Elts[i]) { + return false + } + } + return Equal(a.Type, b.Type) && a.Incomplete == b.Incomplete + case *ast.Ident: + b := b.(*ast.Ident) + return a.Name == b.Name + case *ast.IndexExpr: + b := b.(*ast.IndexExpr) + return Equal(a.X, b.X) && Equal(a.Index, b.Index) + case *ast.KeyValueExpr: + b := b.(*ast.KeyValueExpr) + return Equal(a.Key, b.Key) && Equal(a.Value, b.Value) + case *ast.ParenExpr: + b := b.(*ast.ParenExpr) + return Equal(a.X, b.X) + case *ast.SelectorExpr: + b := b.(*ast.SelectorExpr) + return Equal(a.X, b.X) && Equal(a.Sel, b.Sel) + case *ast.SliceExpr: + b := b.(*ast.SliceExpr) + return Equal(a.X, b.X) && Equal(a.Low, b.Low) && Equal(a.High, b.High) && Equal(a.Max, b.Max) && a.Slice3 == b.Slice3 + case *ast.StarExpr: + b := b.(*ast.StarExpr) + return Equal(a.X, b.X) + case *ast.TypeAssertExpr: + b := b.(*ast.TypeAssertExpr) + return Equal(a.X, b.X) && Equal(a.Type, b.Type) + case *ast.UnaryExpr: + b := b.(*ast.UnaryExpr) + return a.Op == b.Op && Equal(a.X, b.X) + case *ast.MapType: + b := b.(*ast.MapType) + return Equal(a.Key, b.Key) && Equal(a.Value, b.Value) + case *ast.ArrayType: + b := b.(*ast.ArrayType) + return Equal(a.Len, b.Len) && Equal(a.Elt, b.Elt) + case *ast.Ellipsis: + b := b.(*ast.Ellipsis) + return Equal(a.Elt, b.Elt) + case *ast.InterfaceType: + b := b.(*ast.InterfaceType) + return a.Incomplete == b.Incomplete && Equal(a.Methods, b.Methods) + case *ast.StructType: + b := b.(*ast.StructType) + return a.Incomplete == b.Incomplete && Equal(a.Fields, b.Fields) + case *ast.FuncLit: + // TODO(dh): support function literals + return false + case *ast.ChanType: + b := b.(*ast.ChanType) + return a.Dir == b.Dir && (a.Arrow == token.NoPos && b.Arrow == token.NoPos || a.Arrow != token.NoPos && b.Arrow != token.NoPos) + case *ast.FieldList: + b := b.(*ast.FieldList) + if len(a.List) != len(b.List) { + return false + } + for i, fieldA := range a.List { + if !Equal(fieldA, b.List[i]) { + return false + } + } + return true + case *ast.Field: + b := b.(*ast.Field) + if len(a.Names) != len(b.Names) { + return false + } + for j, name := range a.Names { + if !Equal(name, b.Names[j]) { + return false + } + } + if !Equal(a.Type, b.Type) || !Equal(a.Tag, b.Tag) { + return false + } + return true + default: + panic(fmt.Sprintf("unreachable: %T", a)) + } +} diff --git a/vendor/honnef.co/go/tools/go/ir/LICENSE b/vendor/honnef.co/go/tools/go/ir/LICENSE new file mode 100644 index 000000000..aee48041e --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2016 Dominik Honnef. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/honnef.co/go/tools/go/ir/UPSTREAM b/vendor/honnef.co/go/tools/go/ir/UPSTREAM new file mode 100644 index 000000000..b12782503 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/UPSTREAM @@ -0,0 +1,9 @@ +This package started as a copy of golang.org/x/tools/go/ssa, imported from an unknown commit in 2016. +It has since been heavily modified to match our own needs in an IR. +The changes are too many to list here, and it is best to consider this package independent of go/ssa. + +Upstream changes still get applied when they address bugs in portions of code we have inherited. + +The last upstream commit we've looked at was: +640c1dea83015e5271a001c99370762fc63dc280 + diff --git a/vendor/honnef.co/go/tools/go/ir/blockopt.go b/vendor/honnef.co/go/tools/go/ir/blockopt.go new file mode 100644 index 000000000..d7a0e3567 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/blockopt.go @@ -0,0 +1,209 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// Simple block optimizations to simplify the control flow graph. + +// TODO(adonovan): opt: instead of creating several "unreachable" blocks +// per function in the Builder, reuse a single one (e.g. at Blocks[1]) +// to reduce garbage. + +import ( + "fmt" + "os" +) + +// If true, perform sanity checking and show progress at each +// successive iteration of optimizeBlocks. Very verbose. +const debugBlockOpt = false + +// markReachable sets Index=-1 for all blocks reachable from b. +func markReachable(b *BasicBlock) { + b.gaps = -1 + for _, succ := range b.Succs { + if succ.gaps == 0 { + markReachable(succ) + } + } +} + +// deleteUnreachableBlocks marks all reachable blocks of f and +// eliminates (nils) all others, including possibly cyclic subgraphs. +// +func deleteUnreachableBlocks(f *Function) { + const white, black = 0, -1 + // We borrow b.gaps temporarily as the mark bit. + for _, b := range f.Blocks { + b.gaps = white + } + markReachable(f.Blocks[0]) + // In SSI form, we need the exit to be reachable for correct + // post-dominance information. In original form, however, we + // cannot unconditionally mark it reachable because we won't + // be adding fake edges, and this breaks the calculation of + // dominance information. + markReachable(f.Exit) + for i, b := range f.Blocks { + if b.gaps == white { + for _, c := range b.Succs { + if c.gaps == black { + c.removePred(b) // delete white->black edge + } + } + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "unreachable", b) + } + f.Blocks[i] = nil // delete b + } + } + f.removeNilBlocks() +} + +// jumpThreading attempts to apply simple jump-threading to block b, +// in which a->b->c become a->c if b is just a Jump. +// The result is true if the optimization was applied. +// +func jumpThreading(f *Function, b *BasicBlock) bool { + if b.Index == 0 { + return false // don't apply to entry block + } + if b.Instrs == nil { + return false + } + for _, pred := range b.Preds { + switch pred.Control().(type) { + case *ConstantSwitch: + // don't optimize away the head blocks of switch statements + return false + } + } + if _, ok := b.Instrs[0].(*Jump); !ok { + return false // not just a jump + } + c := b.Succs[0] + if c == b { + return false // don't apply to degenerate jump-to-self. + } + if c.hasPhi() { + return false // not sound without more effort + } + for j, a := range b.Preds { + a.replaceSucc(b, c) + + // If a now has two edges to c, replace its degenerate If by Jump. + if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c { + jump := new(Jump) + jump.setBlock(a) + a.Instrs[len(a.Instrs)-1] = jump + a.Succs = a.Succs[:1] + c.removePred(b) + } else { + if j == 0 { + c.replacePred(b, a) + } else { + c.Preds = append(c.Preds, a) + } + } + + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c) + } + } + f.Blocks[b.Index] = nil // delete b + return true +} + +// fuseBlocks attempts to apply the block fusion optimization to block +// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1. +// The result is true if the optimization was applied. +// +func fuseBlocks(f *Function, a *BasicBlock) bool { + if len(a.Succs) != 1 { + return false + } + if a.Succs[0] == f.Exit { + return false + } + b := a.Succs[0] + if len(b.Preds) != 1 { + return false + } + if _, ok := a.Instrs[len(a.Instrs)-1].(*Panic); ok { + // panics aren't simple jumps, they have side effects. + return false + } + + // Degenerate &&/|| ops may result in a straight-line CFG + // containing φ-nodes. (Ideally we'd replace such them with + // their sole operand but that requires Referrers, built later.) + if b.hasPhi() { + return false // not sound without further effort + } + + // Eliminate jump at end of A, then copy all of B across. + a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...) + for _, instr := range b.Instrs { + instr.setBlock(a) + } + + // A inherits B's successors + a.Succs = append(a.succs2[:0], b.Succs...) + + // Fix up Preds links of all successors of B. + for _, c := range b.Succs { + c.replacePred(b, a) + } + + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "fuseBlocks", a, b) + } + + f.Blocks[b.Index] = nil // delete b + return true +} + +// optimizeBlocks() performs some simple block optimizations on a +// completed function: dead block elimination, block fusion, jump +// threading. +// +func optimizeBlocks(f *Function) { + if debugBlockOpt { + f.WriteTo(os.Stderr) + mustSanityCheck(f, nil) + } + + deleteUnreachableBlocks(f) + + // Loop until no further progress. + changed := true + for changed { + changed = false + + if debugBlockOpt { + f.WriteTo(os.Stderr) + mustSanityCheck(f, nil) + } + + for _, b := range f.Blocks { + // f.Blocks will temporarily contain nils to indicate + // deleted blocks; we remove them at the end. + if b == nil { + continue + } + + // Fuse blocks. b->c becomes bc. + if fuseBlocks(f, b) { + changed = true + } + + // a->b->c becomes a->c if b contains only a Jump. + if jumpThreading(f, b) { + changed = true + continue // (b was disconnected) + } + } + } + f.removeNilBlocks() +} diff --git a/vendor/honnef.co/go/tools/go/ir/builder.go b/vendor/honnef.co/go/tools/go/ir/builder.go new file mode 100644 index 000000000..0cc1bedcd --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/builder.go @@ -0,0 +1,2479 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file implements the BUILD phase of IR construction. +// +// IR construction has two phases, CREATE and BUILD. In the CREATE phase +// (create.go), all packages are constructed and type-checked and +// definitions of all package members are created, method-sets are +// computed, and wrapper methods are synthesized. +// ir.Packages are created in arbitrary order. +// +// In the BUILD phase (builder.go), the builder traverses the AST of +// each Go source function and generates IR instructions for the +// function body. Initializer expressions for package-level variables +// are emitted to the package's init() function in the order specified +// by go/types.Info.InitOrder, then code for each function in the +// package is generated in lexical order. +// +// The builder's and Program's indices (maps) are populated and +// mutated during the CREATE phase, but during the BUILD phase they +// remain constant. The sole exception is Prog.methodSets and its +// related maps, which are protected by a dedicated mutex. + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "os" +) + +type opaqueType struct { + types.Type + name string +} + +func (t *opaqueType) String() string { return t.name } + +var ( + varOk = newVar("ok", tBool) + varIndex = newVar("index", tInt) + + // Type constants. + tBool = types.Typ[types.Bool] + tByte = types.Typ[types.Byte] + tInt = types.Typ[types.Int] + tInvalid = types.Typ[types.Invalid] + tString = types.Typ[types.String] + tUntypedNil = types.Typ[types.UntypedNil] + tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators + tEface = types.NewInterfaceType(nil, nil).Complete() +) + +// builder holds state associated with the package currently being built. +// Its methods contain all the logic for AST-to-IR conversion. +type builder struct { + printFunc string + + blocksets [5]BlockSet +} + +// cond emits to fn code to evaluate boolean condition e and jump +// to t or f depending on its value, performing various simplifications. +// +// Postcondition: fn.currentBlock is nil. +// +func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) *If { + switch e := e.(type) { + case *ast.ParenExpr: + return b.cond(fn, e.X, t, f) + + case *ast.BinaryExpr: + switch e.Op { + case token.LAND: + ltrue := fn.newBasicBlock("cond.true") + b.cond(fn, e.X, ltrue, f) + fn.currentBlock = ltrue + return b.cond(fn, e.Y, t, f) + + case token.LOR: + lfalse := fn.newBasicBlock("cond.false") + b.cond(fn, e.X, t, lfalse) + fn.currentBlock = lfalse + return b.cond(fn, e.Y, t, f) + } + + case *ast.UnaryExpr: + if e.Op == token.NOT { + return b.cond(fn, e.X, f, t) + } + } + + // A traditional compiler would simplify "if false" (etc) here + // but we do not, for better fidelity to the source code. + // + // The value of a constant condition may be platform-specific, + // and may cause blocks that are reachable in some configuration + // to be hidden from subsequent analyses such as bug-finding tools. + return emitIf(fn, b.expr(fn, e), t, f, e) +} + +// logicalBinop emits code to fn to evaluate e, a &&- or +// ||-expression whose reified boolean value is wanted. +// The value is returned. +// +func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { + rhs := fn.newBasicBlock("binop.rhs") + done := fn.newBasicBlock("binop.done") + + // T(e) = T(e.X) = T(e.Y) after untyped constants have been + // eliminated. + // TODO(adonovan): not true; MyBool==MyBool yields UntypedBool. + t := fn.Pkg.typeOf(e) + + var short Value // value of the short-circuit path + switch e.Op { + case token.LAND: + b.cond(fn, e.X, rhs, done) + short = emitConst(fn, NewConst(constant.MakeBool(false), t)) + + case token.LOR: + b.cond(fn, e.X, done, rhs) + short = emitConst(fn, NewConst(constant.MakeBool(true), t)) + } + + // Is rhs unreachable? + if rhs.Preds == nil { + // Simplify false&&y to false, true||y to true. + fn.currentBlock = done + return short + } + + // Is done unreachable? + if done.Preds == nil { + // Simplify true&&y (or false||y) to y. + fn.currentBlock = rhs + return b.expr(fn, e.Y) + } + + // All edges from e.X to done carry the short-circuit value. + var edges []Value + for range done.Preds { + edges = append(edges, short) + } + + // The edge from e.Y to done carries the value of e.Y. + fn.currentBlock = rhs + edges = append(edges, b.expr(fn, e.Y)) + emitJump(fn, done, e) + fn.currentBlock = done + + phi := &Phi{Edges: edges} + phi.typ = t + return done.emit(phi, e) +} + +// exprN lowers a multi-result expression e to IR form, emitting code +// to fn and returning a single Value whose type is a *types.Tuple. +// The caller must access the components via Extract. +// +// Multi-result expressions include CallExprs in a multi-value +// assignment or return statement, and "value,ok" uses of +// TypeAssertExpr, IndexExpr (when X is a map), and Recv. +// +func (b *builder) exprN(fn *Function, e ast.Expr) Value { + typ := fn.Pkg.typeOf(e).(*types.Tuple) + switch e := e.(type) { + case *ast.ParenExpr: + return b.exprN(fn, e.X) + + case *ast.CallExpr: + // Currently, no built-in function nor type conversion + // has multiple results, so we can avoid some of the + // cases for single-valued CallExpr. + var c Call + b.setCall(fn, e, &c.Call) + c.typ = typ + return fn.emit(&c, e) + + case *ast.IndexExpr: + mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) + lookup := &MapLookup{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key(), e), + CommaOk: true, + } + lookup.setType(typ) + return fn.emit(lookup, e) + + case *ast.TypeAssertExpr: + return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e) + + case *ast.UnaryExpr: // must be receive <- + return emitRecv(fn, b.expr(fn, e.X), true, typ, e) + } + panic(fmt.Sprintf("exprN(%T) in %s", e, fn)) +} + +// builtin emits to fn IR instructions to implement a call to the +// built-in function obj with the specified arguments +// and return type. It returns the value defined by the result. +// +// The result is nil if no special handling was required; in this case +// the caller should treat this like an ordinary library function +// call. +// +func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, source ast.Node) Value { + switch obj.Name() { + case "make": + switch typ.Underlying().(type) { + case *types.Slice: + n := b.expr(fn, args[1]) + m := n + if len(args) == 3 { + m = b.expr(fn, args[2]) + } + if m, ok := m.(*Const); ok { + // treat make([]T, n, m) as new([m]T)[:n] + cap := m.Int64() + at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap) + alloc := emitNew(fn, at, source) + v := &Slice{ + X: alloc, + High: n, + } + v.setType(typ) + return fn.emit(v, source) + } + v := &MakeSlice{ + Len: n, + Cap: m, + } + v.setType(typ) + return fn.emit(v, source) + + case *types.Map: + var res Value + if len(args) == 2 { + res = b.expr(fn, args[1]) + } + v := &MakeMap{Reserve: res} + v.setType(typ) + return fn.emit(v, source) + + case *types.Chan: + var sz Value = emitConst(fn, intConst(0)) + if len(args) == 2 { + sz = b.expr(fn, args[1]) + } + v := &MakeChan{Size: sz} + v.setType(typ) + return fn.emit(v, source) + } + + case "new": + alloc := emitNew(fn, deref(typ), source) + return alloc + + case "len", "cap": + // Special case: len or cap of an array or *array is + // based on the type, not the value which may be nil. + // We must still evaluate the value, though. (If it + // was side-effect free, the whole call would have + // been constant-folded.) + t := deref(fn.Pkg.typeOf(args[0])).Underlying() + if at, ok := t.(*types.Array); ok { + b.expr(fn, args[0]) // for effects only + return emitConst(fn, intConst(at.Len())) + } + // Otherwise treat as normal. + + case "panic": + fn.emit(&Panic{ + X: emitConv(fn, b.expr(fn, args[0]), tEface, source), + }, source) + addEdge(fn.currentBlock, fn.Exit) + fn.currentBlock = fn.newBasicBlock("unreachable") + return emitConst(fn, NewConst(constant.MakeBool(true), tBool)) // any non-nil Value will do + } + return nil // treat all others as a regular function call +} + +// addr lowers a single-result addressable expression e to IR form, +// emitting code to fn and returning the location (an lvalue) defined +// by the expression. +// +// If escaping is true, addr marks the base variable of the +// addressable expression e as being a potentially escaping pointer +// value. For example, in this code: +// +// a := A{ +// b: [1]B{B{c: 1}} +// } +// return &a.b[0].c +// +// the application of & causes a.b[0].c to have its address taken, +// which means that ultimately the local variable a must be +// heap-allocated. This is a simple but very conservative escape +// analysis. +// +// Operations forming potentially escaping pointers include: +// - &x, including when implicit in method call or composite literals. +// - a[:] iff a is an array (not *array) +// - references to variables in lexically enclosing functions. +// +func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { + switch e := e.(type) { + case *ast.Ident: + if isBlankIdent(e) { + return blank{} + } + obj := fn.Pkg.objectOf(e) + v := fn.Prog.packageLevelValue(obj) // var (address) + if v == nil { + v = fn.lookup(obj, escaping) + } + return &address{addr: v, expr: e} + + case *ast.CompositeLit: + t := deref(fn.Pkg.typeOf(e)) + var v *Alloc + if escaping { + v = emitNew(fn, t, e) + } else { + v = fn.addLocal(t, e) + } + var sb storebuf + b.compLit(fn, v, e, true, &sb) + sb.emit(fn) + return &address{addr: v, expr: e} + + case *ast.ParenExpr: + return b.addr(fn, e.X, escaping) + + case *ast.SelectorExpr: + sel, ok := fn.Pkg.info.Selections[e] + if !ok { + // qualified identifier + return b.addr(fn, e.Sel, escaping) + } + if sel.Kind() != types.FieldVal { + panic(sel) + } + wantAddr := true + v := b.receiver(fn, e.X, wantAddr, escaping, sel, e) + last := len(sel.Index()) - 1 + return &address{ + addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel), + expr: e.Sel, + } + + case *ast.IndexExpr: + var x Value + var et types.Type + switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + x = b.addr(fn, e.X, escaping).address(fn) + et = types.NewPointer(t.Elem()) + case *types.Pointer: // *array + x = b.expr(fn, e.X) + et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem()) + case *types.Slice: + x = b.expr(fn, e.X) + et = types.NewPointer(t.Elem()) + case *types.Map: + return &element{ + m: b.expr(fn, e.X), + k: emitConv(fn, b.expr(fn, e.Index), t.Key(), e.Index), + t: t.Elem(), + } + default: + panic("unexpected container type in IndexExpr: " + t.String()) + } + v := &IndexAddr{ + X: x, + Index: emitConv(fn, b.expr(fn, e.Index), tInt, e.Index), + } + v.setType(et) + return &address{addr: fn.emit(v, e), expr: e} + + case *ast.StarExpr: + return &address{addr: b.expr(fn, e.X), expr: e} + } + + panic(fmt.Sprintf("unexpected address expression: %T", e)) +} + +type store struct { + lhs lvalue + rhs Value + source ast.Node +} + +type storebuf struct{ stores []store } + +func (sb *storebuf) store(lhs lvalue, rhs Value, source ast.Node) { + sb.stores = append(sb.stores, store{lhs, rhs, source}) +} + +func (sb *storebuf) emit(fn *Function) { + for _, s := range sb.stores { + s.lhs.store(fn, s.rhs, s.source) + } +} + +// assign emits to fn code to initialize the lvalue loc with the value +// of expression e. If isZero is true, assign assumes that loc holds +// the zero value for its type. +// +// This is equivalent to loc.store(fn, b.expr(fn, e)), but may generate +// better code in some cases, e.g., for composite literals in an +// addressable location. +// +// If sb is not nil, assign generates code to evaluate expression e, but +// not to update loc. Instead, the necessary stores are appended to the +// storebuf sb so that they can be executed later. This allows correct +// in-place update of existing variables when the RHS is a composite +// literal that may reference parts of the LHS. +// +func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf, source ast.Node) { + // Can we initialize it in place? + if e, ok := unparen(e).(*ast.CompositeLit); ok { + // A CompositeLit never evaluates to a pointer, + // so if the type of the location is a pointer, + // an &-operation is implied. + if _, ok := loc.(blank); !ok { // avoid calling blank.typ() + if isPointer(loc.typ()) { + ptr := b.addr(fn, e, true).address(fn) + // copy address + if sb != nil { + sb.store(loc, ptr, source) + } else { + loc.store(fn, ptr, source) + } + return + } + } + + if _, ok := loc.(*address); ok { + if isInterface(loc.typ()) { + // e.g. var x interface{} = T{...} + // Can't in-place initialize an interface value. + // Fall back to copying. + } else { + // x = T{...} or x := T{...} + addr := loc.address(fn) + if sb != nil { + b.compLit(fn, addr, e, isZero, sb) + } else { + var sb storebuf + b.compLit(fn, addr, e, isZero, &sb) + sb.emit(fn) + } + + // Subtle: emit debug ref for aggregate types only; + // slice and map are handled by store ops in compLit. + switch loc.typ().Underlying().(type) { + case *types.Struct, *types.Array: + emitDebugRef(fn, e, addr, true) + } + + return + } + } + } + + // simple case: just copy + rhs := b.expr(fn, e) + if sb != nil { + sb.store(loc, rhs, source) + } else { + loc.store(fn, rhs, source) + } +} + +// expr lowers a single-result expression e to IR form, emitting code +// to fn and returning the Value defined by the expression. +// +func (b *builder) expr(fn *Function, e ast.Expr) Value { + e = unparen(e) + + tv := fn.Pkg.info.Types[e] + + // Is expression a constant? + if tv.Value != nil { + return emitConst(fn, NewConst(tv.Value, tv.Type)) + } + + var v Value + if tv.Addressable() { + // Prefer pointer arithmetic ({Index,Field}Addr) followed + // by Load over subelement extraction (e.g. Index, Field), + // to avoid large copies. + v = b.addr(fn, e, false).load(fn, e) + } else { + v = b.expr0(fn, e, tv) + } + if fn.debugInfo() { + emitDebugRef(fn, e, v, false) + } + return v +} + +func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { + switch e := e.(type) { + case *ast.BasicLit: + panic("non-constant BasicLit") // unreachable + + case *ast.FuncLit: + fn2 := &Function{ + name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)), + Signature: fn.Pkg.typeOf(e.Type).Underlying().(*types.Signature), + parent: fn, + Pkg: fn.Pkg, + Prog: fn.Prog, + functionBody: new(functionBody), + } + fn2.source = e + fn.AnonFuncs = append(fn.AnonFuncs, fn2) + fn2.initHTML(b.printFunc) + b.buildFunction(fn2) + if fn2.FreeVars == nil { + return fn2 + } + v := &MakeClosure{Fn: fn2} + v.setType(tv.Type) + for _, fv := range fn2.FreeVars { + v.Bindings = append(v.Bindings, fv.outer) + fv.outer = nil + } + return fn.emit(v, e) + + case *ast.TypeAssertExpr: // single-result form only + return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e) + + case *ast.CallExpr: + if fn.Pkg.info.Types[e.Fun].IsType() { + // Explicit type conversion, e.g. string(x) or big.Int(x) + x := b.expr(fn, e.Args[0]) + y := emitConv(fn, x, tv.Type, e) + return y + } + // Call to "intrinsic" built-ins, e.g. new, make, panic. + if id, ok := unparen(e.Fun).(*ast.Ident); ok { + if obj, ok := fn.Pkg.info.Uses[id].(*types.Builtin); ok { + if v := b.builtin(fn, obj, e.Args, tv.Type, e); v != nil { + return v + } + } + } + // Regular function call. + var v Call + b.setCall(fn, e, &v.Call) + v.setType(tv.Type) + return fn.emit(&v, e) + + case *ast.UnaryExpr: + switch e.Op { + case token.AND: // &X --- potentially escaping. + addr := b.addr(fn, e.X, true) + if _, ok := unparen(e.X).(*ast.StarExpr); ok { + // &*p must panic if p is nil (http://golang.org/s/go12nil). + // For simplicity, we'll just (suboptimally) rely + // on the side effects of a load. + // TODO(adonovan): emit dedicated nilcheck. + addr.load(fn, e) + } + return addr.address(fn) + case token.ADD: + return b.expr(fn, e.X) + case token.NOT, token.SUB, token.XOR: // ! <- - ^ + v := &UnOp{ + Op: e.Op, + X: b.expr(fn, e.X), + } + v.setType(tv.Type) + return fn.emit(v, e) + case token.ARROW: + return emitRecv(fn, b.expr(fn, e.X), false, tv.Type, e) + default: + panic(e.Op) + } + + case *ast.BinaryExpr: + switch e.Op { + case token.LAND, token.LOR: + return b.logicalBinop(fn, e) + case token.SHL, token.SHR: + fallthrough + case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: + return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e) + + case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ: + cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e) + // The type of x==y may be UntypedBool. + return emitConv(fn, cmp, types.Default(tv.Type), e) + default: + panic("illegal op in BinaryExpr: " + e.Op.String()) + } + + case *ast.SliceExpr: + var low, high, max Value + var x Value + switch fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + // Potentially escaping. + x = b.addr(fn, e.X, true).address(fn) + case *types.Basic, *types.Slice, *types.Pointer: // *array + x = b.expr(fn, e.X) + default: + panic("unreachable") + } + if e.High != nil { + high = b.expr(fn, e.High) + } + if e.Low != nil { + low = b.expr(fn, e.Low) + } + if e.Slice3 { + max = b.expr(fn, e.Max) + } + v := &Slice{ + X: x, + Low: low, + High: high, + Max: max, + } + v.setType(tv.Type) + return fn.emit(v, e) + + case *ast.Ident: + obj := fn.Pkg.info.Uses[e] + // Universal built-in or nil? + switch obj := obj.(type) { + case *types.Builtin: + return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)} + case *types.Nil: + return emitConst(fn, nilConst(tv.Type)) + } + // Package-level func or var? + if v := fn.Prog.packageLevelValue(obj); v != nil { + if _, ok := obj.(*types.Var); ok { + return emitLoad(fn, v, e) // var (address) + } + return v // (func) + } + // Local var. + return emitLoad(fn, fn.lookup(obj, false), e) // var (address) + + case *ast.SelectorExpr: + sel, ok := fn.Pkg.info.Selections[e] + if !ok { + // builtin unsafe.{Add,Slice} + if obj, ok := fn.Pkg.info.Uses[e.Sel].(*types.Builtin); ok { + return &Builtin{name: "Unsafe" + obj.Name(), sig: tv.Type.(*types.Signature)} + } + // qualified identifier + return b.expr(fn, e.Sel) + } + switch sel.Kind() { + case types.MethodExpr: + // (*T).f or T.f, the method f from the method-set of type T. + // The result is a "thunk". + return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type, e) + + case types.MethodVal: + // e.f where e is an expression and f is a method. + // The result is a "bound". + obj := sel.Obj().(*types.Func) + rt := recvType(obj) + wantAddr := isPointer(rt) + escaping := true + v := b.receiver(fn, e.X, wantAddr, escaping, sel, e) + if isInterface(rt) { + // If v has interface type I, + // we must emit a check that v is non-nil. + // We use: typeassert v.(I). + emitTypeAssert(fn, v, rt, e) + } + c := &MakeClosure{ + Fn: makeBound(fn.Prog, obj), + Bindings: []Value{v}, + } + c.source = e.Sel + c.setType(tv.Type) + return fn.emit(c, e) + + case types.FieldVal: + indices := sel.Index() + last := len(indices) - 1 + v := b.expr(fn, e.X) + v = emitImplicitSelections(fn, v, indices[:last], e) + v = emitFieldSelection(fn, v, indices[last], false, e.Sel) + return v + } + + panic("unexpected expression-relative selector") + + case *ast.IndexExpr: + switch t := fn.Pkg.typeOf(e.X).Underlying().(type) { + case *types.Array: + // Non-addressable array (in a register). + v := &Index{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), tInt, e.Index), + } + v.setType(t.Elem()) + return fn.emit(v, e) + + case *types.Map: + // Maps are not addressable. + mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) + v := &MapLookup{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key(), e.Index), + } + v.setType(mapt.Elem()) + return fn.emit(v, e) + + case *types.Basic: // => string + // Strings are not addressable. + v := &StringLookup{ + X: b.expr(fn, e.X), + Index: b.expr(fn, e.Index), + } + v.setType(tByte) + return fn.emit(v, e) + + case *types.Slice, *types.Pointer: // *array + // Addressable slice/array; use IndexAddr and Load. + return b.addr(fn, e, false).load(fn, e) + + default: + panic("unexpected container type in IndexExpr: " + t.String()) + } + + case *ast.CompositeLit, *ast.StarExpr: + // Addressable types (lvalues) + return b.addr(fn, e, false).load(fn, e) + } + + panic(fmt.Sprintf("unexpected expr: %T", e)) +} + +// stmtList emits to fn code for all statements in list. +func (b *builder) stmtList(fn *Function, list []ast.Stmt) { + for _, s := range list { + b.stmt(fn, s) + } +} + +// receiver emits to fn code for expression e in the "receiver" +// position of selection e.f (where f may be a field or a method) and +// returns the effective receiver after applying the implicit field +// selections of sel. +// +// wantAddr requests that the result is an an address. If +// !sel.Indirect(), this may require that e be built in addr() mode; it +// must thus be addressable. +// +// escaping is defined as per builder.addr(). +// +func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection, source ast.Node) Value { + var v Value + if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) { + v = b.addr(fn, e, escaping).address(fn) + } else { + v = b.expr(fn, e) + } + + last := len(sel.Index()) - 1 + v = emitImplicitSelections(fn, v, sel.Index()[:last], source) + if !wantAddr && isPointer(v.Type()) { + v = emitLoad(fn, v, e) + } + return v +} + +// setCallFunc populates the function parts of a CallCommon structure +// (Func, Method, Recv, Args[0]) based on the kind of invocation +// occurring in e. +// +func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { + // Is this a method call? + if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { + sel, ok := fn.Pkg.info.Selections[selector] + if ok && sel.Kind() == types.MethodVal { + obj := sel.Obj().(*types.Func) + recv := recvType(obj) + wantAddr := isPointer(recv) + escaping := true + v := b.receiver(fn, selector.X, wantAddr, escaping, sel, selector) + if isInterface(recv) { + // Invoke-mode call. + c.Value = v + c.Method = obj + } else { + // "Call"-mode call. + c.Value = fn.Prog.declaredFunc(obj) + c.Args = append(c.Args, v) + } + return + } + + // sel.Kind()==MethodExpr indicates T.f() or (*T).f(): + // a statically dispatched call to the method f in the + // method-set of T or *T. T may be an interface. + // + // e.Fun would evaluate to a concrete method, interface + // wrapper function, or promotion wrapper. + // + // For now, we evaluate it in the usual way. + // + // TODO(adonovan): opt: inline expr() here, to make the + // call static and to avoid generation of wrappers. + // It's somewhat tricky as it may consume the first + // actual parameter if the call is "invoke" mode. + // + // Examples: + // type T struct{}; func (T) f() {} // "call" mode + // type T interface { f() } // "invoke" mode + // + // type S struct{ T } + // + // var s S + // S.f(s) + // (*S).f(&s) + // + // Suggested approach: + // - consume the first actual parameter expression + // and build it with b.expr(). + // - apply implicit field selections. + // - use MethodVal logic to populate fields of c. + } + + // Evaluate the function operand in the usual way. + c.Value = b.expr(fn, e.Fun) +} + +// emitCallArgs emits to f code for the actual parameters of call e to +// a (possibly built-in) function of effective type sig. +// The argument values are appended to args, which is then returned. +// +func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value { + // f(x, y, z...): pass slice z straight through. + if e.Ellipsis != 0 { + for i, arg := range e.Args { + v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type(), arg) + args = append(args, v) + } + return args + } + + offset := len(args) // 1 if call has receiver, 0 otherwise + + // Evaluate actual parameter expressions. + // + // If this is a chained call of the form f(g()) where g has + // multiple return values (MRV), they are flattened out into + // args; a suffix of them may end up in a varargs slice. + for _, arg := range e.Args { + v := b.expr(fn, arg) + if ttuple, ok := v.Type().(*types.Tuple); ok { // MRV chain + for i, n := 0, ttuple.Len(); i < n; i++ { + args = append(args, emitExtract(fn, v, i, arg)) + } + } else { + args = append(args, v) + } + } + + // Actual->formal assignability conversions for normal parameters. + np := sig.Params().Len() // number of normal parameters + if sig.Variadic() { + np-- + } + for i := 0; i < np; i++ { + args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type(), args[offset+i].Source()) + } + + // Actual->formal assignability conversions for variadic parameter, + // and construction of slice. + if sig.Variadic() { + varargs := args[offset+np:] + st := sig.Params().At(np).Type().(*types.Slice) + vt := st.Elem() + if len(varargs) == 0 { + args = append(args, emitConst(fn, nilConst(st))) + } else { + // Replace a suffix of args with a slice containing it. + at := types.NewArray(vt, int64(len(varargs))) + a := emitNew(fn, at, e) + a.source = e + for i, arg := range varargs { + iaddr := &IndexAddr{ + X: a, + Index: emitConst(fn, intConst(int64(i))), + } + iaddr.setType(types.NewPointer(vt)) + fn.emit(iaddr, e) + emitStore(fn, iaddr, arg, arg.Source()) + } + s := &Slice{X: a} + s.setType(st) + args[offset+np] = fn.emit(s, args[offset+np].Source()) + args = args[:offset+np+1] + } + } + return args +} + +// setCall emits to fn code to evaluate all the parameters of a function +// call e, and populates *c with those values. +// +func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { + // First deal with the f(...) part and optional receiver. + b.setCallFunc(fn, e, c) + + // Then append the other actual parameters. + sig, _ := fn.Pkg.typeOf(e.Fun).Underlying().(*types.Signature) + if sig == nil { + panic(fmt.Sprintf("no signature for call of %s", e.Fun)) + } + c.Args = b.emitCallArgs(fn, sig, e, c.Args) +} + +// assignOp emits to fn code to perform loc = val. +func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, source ast.Node) { + oldv := loc.load(fn, source) + loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, val, oldv.Type(), source), loc.typ(), source), source) +} + +// localValueSpec emits to fn code to define all of the vars in the +// function-local ValueSpec, spec. +// +func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { + switch { + case len(spec.Values) == len(spec.Names): + // e.g. var x, y = 0, 1 + // 1:1 assignment + for i, id := range spec.Names { + if !isBlankIdent(id) { + fn.addLocalForIdent(id) + } + lval := b.addr(fn, id, false) // non-escaping + b.assign(fn, lval, spec.Values[i], true, nil, spec) + } + + case len(spec.Values) == 0: + // e.g. var x, y int + // Locals are implicitly zero-initialized. + for _, id := range spec.Names { + if !isBlankIdent(id) { + lhs := fn.addLocalForIdent(id) + if fn.debugInfo() { + emitDebugRef(fn, id, lhs, true) + } + } + } + + default: + // e.g. var x, y = pos() + tuple := b.exprN(fn, spec.Values[0]) + for i, id := range spec.Names { + if !isBlankIdent(id) { + fn.addLocalForIdent(id) + lhs := b.addr(fn, id, false) // non-escaping + lhs.store(fn, emitExtract(fn, tuple, i, id), id) + } + } + } +} + +// assignStmt emits code to fn for a parallel assignment of rhss to lhss. +// isDef is true if this is a short variable declaration (:=). +// +// Note the similarity with localValueSpec. +// +func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool, source ast.Node) { + // Side effects of all LHSs and RHSs must occur in left-to-right order. + lvals := make([]lvalue, len(lhss)) + isZero := make([]bool, len(lhss)) + for i, lhs := range lhss { + var lval lvalue = blank{} + if !isBlankIdent(lhs) { + if isDef { + if obj := fn.Pkg.info.Defs[lhs.(*ast.Ident)]; obj != nil { + fn.addNamedLocal(obj, lhs) + isZero[i] = true + } + } + lval = b.addr(fn, lhs, false) // non-escaping + } + lvals[i] = lval + } + if len(lhss) == len(rhss) { + // Simple assignment: x = f() (!isDef) + // Parallel assignment: x, y = f(), g() (!isDef) + // or short var decl: x, y := f(), g() (isDef) + // + // In all cases, the RHSs may refer to the LHSs, + // so we need a storebuf. + var sb storebuf + for i := range rhss { + b.assign(fn, lvals[i], rhss[i], isZero[i], &sb, source) + } + sb.emit(fn) + } else { + // e.g. x, y = pos() + tuple := b.exprN(fn, rhss[0]) + emitDebugRef(fn, rhss[0], tuple, false) + for i, lval := range lvals { + lval.store(fn, emitExtract(fn, tuple, i, source), source) + } + } +} + +// arrayLen returns the length of the array whose composite literal elements are elts. +func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { + var max int64 = -1 + var i int64 = -1 + for _, e := range elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + i = b.expr(fn, kv.Key).(*Const).Int64() + } else { + i++ + } + if i > max { + max = i + } + } + return max + 1 +} + +// compLit emits to fn code to initialize a composite literal e at +// address addr with type typ. +// +// Nested composite literals are recursively initialized in place +// where possible. If isZero is true, compLit assumes that addr +// holds the zero value for typ. +// +// Because the elements of a composite literal may refer to the +// variables being updated, as in the second line below, +// x := T{a: 1} +// x = T{a: x.a} +// all the reads must occur before all the writes. Thus all stores to +// loc are emitted to the storebuf sb for later execution. +// +// A CompositeLit may have pointer type only in the recursive (nested) +// case when the type name is implicit. e.g. in []*T{{}}, the inner +// literal has type *T behaves like &T{}. +// In that case, addr must hold a T, not a *T. +// +func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { + typ := deref(fn.Pkg.typeOf(e)) + switch t := typ.Underlying().(type) { + case *types.Struct: + if !isZero && len(e.Elts) != t.NumFields() { + // memclear + sb.store(&address{addr, nil}, zeroValue(fn, deref(addr.Type()), e), e) + isZero = true + } + for i, e := range e.Elts { + fieldIndex := i + if kv, ok := e.(*ast.KeyValueExpr); ok { + fname := kv.Key.(*ast.Ident).Name + for i, n := 0, t.NumFields(); i < n; i++ { + sf := t.Field(i) + if sf.Name() == fname { + fieldIndex = i + e = kv.Value + break + } + } + } + sf := t.Field(fieldIndex) + faddr := &FieldAddr{ + X: addr, + Field: fieldIndex, + } + faddr.setType(types.NewPointer(sf.Type())) + fn.emit(faddr, e) + b.assign(fn, &address{addr: faddr, expr: e}, e, isZero, sb, e) + } + + case *types.Array, *types.Slice: + var at *types.Array + var array Value + switch t := t.(type) { + case *types.Slice: + at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts)) + alloc := emitNew(fn, at, e) + array = alloc + case *types.Array: + at = t + array = addr + + if !isZero && int64(len(e.Elts)) != at.Len() { + // memclear + sb.store(&address{array, nil}, zeroValue(fn, deref(array.Type()), e), e) + } + } + + var idx *Const + for _, e := range e.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + idx = b.expr(fn, kv.Key).(*Const) + e = kv.Value + } else { + var idxval int64 + if idx != nil { + idxval = idx.Int64() + 1 + } + idx = emitConst(fn, intConst(idxval)) + } + iaddr := &IndexAddr{ + X: array, + Index: idx, + } + iaddr.setType(types.NewPointer(at.Elem())) + fn.emit(iaddr, e) + if t != at { // slice + // backing array is unaliased => storebuf not needed. + b.assign(fn, &address{addr: iaddr, expr: e}, e, true, nil, e) + } else { + b.assign(fn, &address{addr: iaddr, expr: e}, e, true, sb, e) + } + } + + if t != at { // slice + s := &Slice{X: array} + s.setType(typ) + sb.store(&address{addr: addr, expr: e}, fn.emit(s, e), e) + } + + case *types.Map: + m := &MakeMap{Reserve: emitConst(fn, intConst(int64(len(e.Elts))))} + m.setType(typ) + fn.emit(m, e) + for _, e := range e.Elts { + e := e.(*ast.KeyValueExpr) + + // If a key expression in a map literal is itself a + // composite literal, the type may be omitted. + // For example: + // map[*struct{}]bool{{}: true} + // An &-operation may be implied: + // map[*struct{}]bool{&struct{}{}: true} + var key Value + if _, ok := unparen(e.Key).(*ast.CompositeLit); ok && isPointer(t.Key()) { + // A CompositeLit never evaluates to a pointer, + // so if the type of the location is a pointer, + // an &-operation is implied. + key = b.addr(fn, e.Key, true).address(fn) + } else { + key = b.expr(fn, e.Key) + } + + loc := element{ + m: m, + k: emitConv(fn, key, t.Key(), e), + t: t.Elem(), + } + + // We call assign() only because it takes care + // of any &-operation required in the recursive + // case, e.g., + // map[int]*struct{}{0: {}} implies &struct{}{}. + // In-place update is of course impossible, + // and no storebuf is needed. + b.assign(fn, &loc, e.Value, true, nil, e) + } + sb.store(&address{addr: addr, expr: e}, m, e) + + default: + panic("unexpected CompositeLit type: " + t.String()) + } +} + +func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { + if s.Tag == nil { + b.switchStmtDynamic(fn, s, label) + return + } + dynamic := false + for _, iclause := range s.Body.List { + clause := iclause.(*ast.CaseClause) + for _, cond := range clause.List { + if fn.Pkg.info.Types[unparen(cond)].Value == nil { + dynamic = true + break + } + } + } + + if dynamic { + b.switchStmtDynamic(fn, s, label) + return + } + + if s.Init != nil { + b.stmt(fn, s.Init) + } + + entry := fn.currentBlock + tag := b.expr(fn, s.Tag) + + heads := make([]*BasicBlock, 0, len(s.Body.List)) + bodies := make([]*BasicBlock, len(s.Body.List)) + conds := make([]Value, 0, len(s.Body.List)) + + hasDefault := false + done := fn.newBasicBlock("switch.done") + if label != nil { + label._break = done + } + for i, stmt := range s.Body.List { + body := fn.newBasicBlock(fmt.Sprintf("switch.body.%d", i)) + bodies[i] = body + cas := stmt.(*ast.CaseClause) + if cas.List == nil { + // default branch + hasDefault = true + head := fn.newBasicBlock(fmt.Sprintf("switch.head.%d", i)) + conds = append(conds, nil) + heads = append(heads, head) + fn.currentBlock = head + emitJump(fn, body, cas) + } + for j, cond := range stmt.(*ast.CaseClause).List { + fn.currentBlock = entry + head := fn.newBasicBlock(fmt.Sprintf("switch.head.%d.%d", i, j)) + conds = append(conds, b.expr(fn, cond)) + heads = append(heads, head) + fn.currentBlock = head + emitJump(fn, body, cond) + } + } + + for i, stmt := range s.Body.List { + clause := stmt.(*ast.CaseClause) + body := bodies[i] + fn.currentBlock = body + fallthru := done + if i+1 < len(bodies) { + fallthru = bodies[i+1] + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: fallthru, + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done, stmt) + } + + if !hasDefault { + head := fn.newBasicBlock("switch.head.implicit-default") + body := fn.newBasicBlock("switch.body.implicit-default") + fn.currentBlock = head + emitJump(fn, body, s) + fn.currentBlock = body + emitJump(fn, done, s) + heads = append(heads, head) + conds = append(conds, nil) + } + + if len(heads) != len(conds) { + panic(fmt.Sprintf("internal error: %d heads for %d conds", len(heads), len(conds))) + } + for _, head := range heads { + addEdge(entry, head) + } + fn.currentBlock = entry + entry.emit(&ConstantSwitch{ + Tag: tag, + Conds: conds, + }, s) + fn.currentBlock = done +} + +// switchStmt emits to fn code for the switch statement s, optionally +// labelled by label. +// +func (b *builder) switchStmtDynamic(fn *Function, s *ast.SwitchStmt, label *lblock) { + // We treat SwitchStmt like a sequential if-else chain. + // Multiway dispatch can be recovered later by irutil.Switches() + // to those cases that are free of side effects. + if s.Init != nil { + b.stmt(fn, s.Init) + } + kTrue := emitConst(fn, NewConst(constant.MakeBool(true), tBool)) + + var tagv Value = kTrue + var tagSource ast.Node = s + if s.Tag != nil { + tagv = b.expr(fn, s.Tag) + tagSource = s.Tag + } + // lifting only considers loads and stores, but we want different + // sigma nodes for the different comparisons. use a temporary and + // load it in every branch. + tag := fn.addLocal(tagv.Type(), tagSource) + emitStore(fn, tag, tagv, tagSource) + + done := fn.newBasicBlock("switch.done") + if label != nil { + label._break = done + } + // We pull the default case (if present) down to the end. + // But each fallthrough label must point to the next + // body block in source order, so we preallocate a + // body block (fallthru) for the next case. + // Unfortunately this makes for a confusing block order. + var dfltBody *[]ast.Stmt + var dfltFallthrough *BasicBlock + var fallthru, dfltBlock *BasicBlock + ncases := len(s.Body.List) + for i, clause := range s.Body.List { + body := fallthru + if body == nil { + body = fn.newBasicBlock("switch.body") // first case only + } + + // Preallocate body block for the next case. + fallthru = done + if i+1 < ncases { + fallthru = fn.newBasicBlock("switch.body") + } + + cc := clause.(*ast.CaseClause) + if cc.List == nil { + // Default case. + dfltBody = &cc.Body + dfltFallthrough = fallthru + dfltBlock = body + continue + } + + var nextCond *BasicBlock + for _, cond := range cc.List { + nextCond = fn.newBasicBlock("switch.next") + if tagv == kTrue { + // emit a proper if/else chain instead of a comparison + // of a value against true. + // + // NOTE(dh): adonovan had a todo saying "don't forget + // conversions though". As far as I can tell, there + // aren't any conversions that we need to take care of + // here. `case bool(a) && bool(b)` as well as `case + // bool(a && b)` are being taken care of by b.cond, + // and `case a` where a is not of type bool is + // invalid. + b.cond(fn, cond, body, nextCond) + } else { + cond := emitCompare(fn, token.EQL, emitLoad(fn, tag, cond), b.expr(fn, cond), cond) + emitIf(fn, cond, body, nextCond, cond.Source()) + } + + fn.currentBlock = nextCond + } + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: fallthru, + } + b.stmtList(fn, cc.Body) + fn.targets = fn.targets.tail + emitJump(fn, done, s) + fn.currentBlock = nextCond + } + if dfltBlock != nil { + // The lack of a Source for the jump doesn't matter, block + // fusing will get rid of the jump later. + + emitJump(fn, dfltBlock, s) + fn.currentBlock = dfltBlock + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: dfltFallthrough, + } + b.stmtList(fn, *dfltBody) + fn.targets = fn.targets.tail + } + emitJump(fn, done, s) + fn.currentBlock = done +} + +func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) { + if s.Init != nil { + b.stmt(fn, s.Init) + } + + var tag Value + switch e := s.Assign.(type) { + case *ast.ExprStmt: // x.(type) + tag = b.expr(fn, unparen(e.X).(*ast.TypeAssertExpr).X) + case *ast.AssignStmt: // y := x.(type) + tag = b.expr(fn, unparen(e.Rhs[0]).(*ast.TypeAssertExpr).X) + default: + panic("unreachable") + } + tagPtr := fn.addLocal(tag.Type(), tag.Source()) + emitStore(fn, tagPtr, tag, tag.Source()) + + // +1 in case there's no explicit default case + heads := make([]*BasicBlock, 0, len(s.Body.List)+1) + + entry := fn.currentBlock + done := fn.newBasicBlock("done") + if label != nil { + label._break = done + } + + // set up type switch and constant switch, populate their conditions + tswtch := &TypeSwitch{ + Tag: emitLoad(fn, tagPtr, tag.Source()), + Conds: make([]types.Type, 0, len(s.Body.List)+1), + } + cswtch := &ConstantSwitch{ + Conds: make([]Value, 0, len(s.Body.List)+1), + } + + rets := make([]types.Type, 0, len(s.Body.List)+1) + index := 0 + var default_ *ast.CaseClause + for _, clause := range s.Body.List { + cc := clause.(*ast.CaseClause) + if obj := fn.Pkg.info.Implicits[cc]; obj != nil { + fn.addNamedLocal(obj, cc) + } + if cc.List == nil { + // default case + default_ = cc + } else { + for _, expr := range cc.List { + tswtch.Conds = append(tswtch.Conds, fn.Pkg.typeOf(expr)) + cswtch.Conds = append(cswtch.Conds, emitConst(fn, intConst(int64(index)))) + index++ + } + if len(cc.List) == 1 { + rets = append(rets, fn.Pkg.typeOf(cc.List[0])) + } else { + for range cc.List { + rets = append(rets, tag.Type()) + } + } + } + } + + // default branch + rets = append(rets, tag.Type()) + + var vars []*types.Var + vars = append(vars, varIndex) + for _, typ := range rets { + vars = append(vars, anonVar(typ)) + } + tswtch.setType(types.NewTuple(vars...)) + // default branch + fn.currentBlock = entry + fn.emit(tswtch, s) + cswtch.Conds = append(cswtch.Conds, emitConst(fn, intConst(int64(-1)))) + // in theory we should add a local and stores/loads for tswtch, to + // generate sigma nodes in the branches. however, there isn't any + // useful information we could possibly attach to it. + cswtch.Tag = emitExtract(fn, tswtch, 0, s) + fn.emit(cswtch, s) + + // build heads and bodies + index = 0 + for _, clause := range s.Body.List { + cc := clause.(*ast.CaseClause) + if cc.List == nil { + continue + } + + body := fn.newBasicBlock("typeswitch.body") + for _, expr := range cc.List { + head := fn.newBasicBlock("typeswitch.head") + heads = append(heads, head) + fn.currentBlock = head + + if obj := fn.Pkg.info.Implicits[cc]; obj != nil { + // In a switch y := x.(type), each case clause + // implicitly declares a distinct object y. + // In a single-type case, y has that type. + // In multi-type cases, 'case nil' and default, + // y has the same type as the interface operand. + + l := fn.objects[obj] + if rets[index] == tUntypedNil { + emitStore(fn, l, emitConst(fn, nilConst(tswtch.Tag.Type())), s.Assign) + } else { + x := emitExtract(fn, tswtch, index+1, s.Assign) + emitStore(fn, l, x, nil) + } + } + + emitJump(fn, body, expr) + index++ + } + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, cc.Body) + fn.targets = fn.targets.tail + emitJump(fn, done, clause) + } + + if default_ == nil { + // implicit default + heads = append(heads, done) + } else { + body := fn.newBasicBlock("typeswitch.default") + heads = append(heads, body) + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + if obj := fn.Pkg.info.Implicits[default_]; obj != nil { + l := fn.objects[obj] + x := emitExtract(fn, tswtch, index+1, s.Assign) + emitStore(fn, l, x, s) + } + b.stmtList(fn, default_.Body) + fn.targets = fn.targets.tail + emitJump(fn, done, s) + } + + fn.currentBlock = entry + for _, head := range heads { + addEdge(entry, head) + } + fn.currentBlock = done +} + +// selectStmt emits to fn code for the select statement s, optionally +// labelled by label. +// +func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) (noreturn bool) { + if len(s.Body.List) == 0 { + instr := &Select{Blocking: true} + instr.setType(types.NewTuple(varIndex, varOk)) + fn.emit(instr, s) + fn.emit(new(Unreachable), s) + addEdge(fn.currentBlock, fn.Exit) + return true + } + + // A blocking select of a single case degenerates to a + // simple send or receive. + // TODO(adonovan): opt: is this optimization worth its weight? + if len(s.Body.List) == 1 { + clause := s.Body.List[0].(*ast.CommClause) + if clause.Comm != nil { + b.stmt(fn, clause.Comm) + done := fn.newBasicBlock("select.done") + if label != nil { + label._break = done + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done, clause) + fn.currentBlock = done + return false + } + } + + // First evaluate all channels in all cases, and find + // the directions of each state. + var states []*SelectState + blocking := true + debugInfo := fn.debugInfo() + for _, clause := range s.Body.List { + var st *SelectState + switch comm := clause.(*ast.CommClause).Comm.(type) { + case nil: // default case + blocking = false + continue + + case *ast.SendStmt: // ch<- i + ch := b.expr(fn, comm.Chan) + st = &SelectState{ + Dir: types.SendOnly, + Chan: ch, + Send: emitConv(fn, b.expr(fn, comm.Value), + ch.Type().Underlying().(*types.Chan).Elem(), comm), + Pos: comm.Arrow, + } + if debugInfo { + st.DebugNode = comm + } + + case *ast.AssignStmt: // x := <-ch + recv := unparen(comm.Rhs[0]).(*ast.UnaryExpr) + st = &SelectState{ + Dir: types.RecvOnly, + Chan: b.expr(fn, recv.X), + Pos: recv.OpPos, + } + if debugInfo { + st.DebugNode = recv + } + + case *ast.ExprStmt: // <-ch + recv := unparen(comm.X).(*ast.UnaryExpr) + st = &SelectState{ + Dir: types.RecvOnly, + Chan: b.expr(fn, recv.X), + Pos: recv.OpPos, + } + if debugInfo { + st.DebugNode = recv + } + } + states = append(states, st) + } + + // We dispatch on the (fair) result of Select using a + // switch on the returned index. + sel := &Select{ + States: states, + Blocking: blocking, + } + sel.source = s + var vars []*types.Var + vars = append(vars, varIndex, varOk) + for _, st := range states { + if st.Dir == types.RecvOnly { + tElem := st.Chan.Type().Underlying().(*types.Chan).Elem() + vars = append(vars, anonVar(tElem)) + } + } + sel.setType(types.NewTuple(vars...)) + fn.emit(sel, s) + idx := emitExtract(fn, sel, 0, s) + + done := fn.newBasicBlock("select.done") + if label != nil { + label._break = done + } + + entry := fn.currentBlock + swtch := &ConstantSwitch{ + Tag: idx, + // one condition per case + Conds: make([]Value, 0, len(s.Body.List)+1), + } + // note that we don't need heads; a select case can only have a single condition + var bodies []*BasicBlock + + state := 0 + r := 2 // index in 'sel' tuple of value; increments if st.Dir==RECV + for _, cc := range s.Body.List { + clause := cc.(*ast.CommClause) + if clause.Comm == nil { + body := fn.newBasicBlock("select.default") + fn.currentBlock = body + bodies = append(bodies, body) + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, clause.Body) + emitJump(fn, done, s) + fn.targets = fn.targets.tail + swtch.Conds = append(swtch.Conds, emitConst(fn, intConst(-1))) + continue + } + swtch.Conds = append(swtch.Conds, emitConst(fn, intConst(int64(state)))) + body := fn.newBasicBlock("select.body") + fn.currentBlock = body + bodies = append(bodies, body) + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + switch comm := clause.Comm.(type) { + case *ast.ExprStmt: // <-ch + if debugInfo { + v := emitExtract(fn, sel, r, comm) + emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) + } + r++ + + case *ast.AssignStmt: // x := <-states[state].Chan + if comm.Tok == token.DEFINE { + fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident)) + } + x := b.addr(fn, comm.Lhs[0], false) // non-escaping + v := emitExtract(fn, sel, r, comm) + if debugInfo { + emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) + } + x.store(fn, v, comm) + + if len(comm.Lhs) == 2 { // x, ok := ... + if comm.Tok == token.DEFINE { + fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident)) + } + ok := b.addr(fn, comm.Lhs[1], false) // non-escaping + ok.store(fn, emitExtract(fn, sel, 1, comm), comm) + } + r++ + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done, s) + state++ + } + fn.currentBlock = entry + fn.emit(swtch, s) + for _, body := range bodies { + addEdge(entry, body) + } + fn.currentBlock = done + return false +} + +// forStmt emits to fn code for the for statement s, optionally +// labelled by label. +// +func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { + // ...init... + // jump loop + // loop: + // if cond goto body else done + // body: + // ...body... + // jump post + // post: (target of continue) + // ...post... + // jump loop + // done: (target of break) + if s.Init != nil { + b.stmt(fn, s.Init) + } + body := fn.newBasicBlock("for.body") + done := fn.newBasicBlock("for.done") // target of 'break' + loop := body // target of back-edge + if s.Cond != nil { + loop = fn.newBasicBlock("for.loop") + } + cont := loop // target of 'continue' + if s.Post != nil { + cont = fn.newBasicBlock("for.post") + } + if label != nil { + label._break = done + label._continue = cont + } + emitJump(fn, loop, s) + fn.currentBlock = loop + if loop != body { + b.cond(fn, s.Cond, body, done) + fn.currentBlock = body + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: cont, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, cont, s) + + if s.Post != nil { + fn.currentBlock = cont + b.stmt(fn, s.Post) + emitJump(fn, loop, s) // back-edge + } + fn.currentBlock = done +} + +// rangeIndexed emits to fn the header for an integer-indexed loop +// over array, *array or slice value x. +// The v result is defined only if tv is non-nil. +// forPos is the position of the "for" token. +// +func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) { + // + // length = len(x) + // index = -1 + // loop: (target of continue) + // index++ + // if index < length goto body else done + // body: + // k = index + // v = x[index] + // ...body... + // jump loop + // done: (target of break) + + // Determine number of iterations. + var length Value + if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok { + // For array or *array, the number of iterations is + // known statically thanks to the type. We avoid a + // data dependence upon x, permitting later dead-code + // elimination if x is pure, static unrolling, etc. + // Ranging over a nil *array may have >0 iterations. + // We still generate code for x, in case it has effects. + length = emitConst(fn, intConst(arr.Len())) + } else { + // length = len(x). + var c Call + c.Call.Value = makeLen(x.Type()) + c.Call.Args = []Value{x} + c.setType(tInt) + length = fn.emit(&c, source) + } + + index := fn.addLocal(tInt, source) + emitStore(fn, index, emitConst(fn, intConst(-1)), source) + + loop = fn.newBasicBlock("rangeindex.loop") + emitJump(fn, loop, source) + fn.currentBlock = loop + + incr := &BinOp{ + Op: token.ADD, + X: emitLoad(fn, index, source), + Y: emitConst(fn, intConst(1)), + } + incr.setType(tInt) + emitStore(fn, index, fn.emit(incr, source), source) + + body := fn.newBasicBlock("rangeindex.body") + done = fn.newBasicBlock("rangeindex.done") + emitIf(fn, emitCompare(fn, token.LSS, incr, length, source), body, done, source) + fn.currentBlock = body + + k = emitLoad(fn, index, source) + if tv != nil { + switch t := x.Type().Underlying().(type) { + case *types.Array: + instr := &Index{ + X: x, + Index: k, + } + instr.setType(t.Elem()) + v = fn.emit(instr, source) + + case *types.Pointer: // *array + instr := &IndexAddr{ + X: x, + Index: k, + } + instr.setType(types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())) + v = emitLoad(fn, fn.emit(instr, source), source) + + case *types.Slice: + instr := &IndexAddr{ + X: x, + Index: k, + } + instr.setType(types.NewPointer(t.Elem())) + v = emitLoad(fn, fn.emit(instr, source), source) + + default: + panic("rangeIndexed x:" + t.String()) + } + } + return +} + +// rangeIter emits to fn the header for a loop using +// Range/Next/Extract to iterate over map or string value x. +// tk and tv are the types of the key/value results k and v, or nil +// if the respective component is not wanted. +// +func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) { + // + // it = range x + // loop: (target of continue) + // okv = next it (ok, key, value) + // ok = extract okv #0 + // if ok goto body else done + // body: + // k = extract okv #1 + // v = extract okv #2 + // ...body... + // jump loop + // done: (target of break) + // + + if tk == nil { + tk = tInvalid + } + if tv == nil { + tv = tInvalid + } + + rng := &Range{X: x} + rng.setType(tRangeIter) + it := fn.emit(rng, source) + + loop = fn.newBasicBlock("rangeiter.loop") + emitJump(fn, loop, source) + fn.currentBlock = loop + + _, isString := x.Type().Underlying().(*types.Basic) + + okv := &Next{ + Iter: it, + IsString: isString, + } + okv.setType(types.NewTuple( + varOk, + newVar("k", tk), + newVar("v", tv), + )) + fn.emit(okv, source) + + body := fn.newBasicBlock("rangeiter.body") + done = fn.newBasicBlock("rangeiter.done") + emitIf(fn, emitExtract(fn, okv, 0, source), body, done, source) + fn.currentBlock = body + + if tk != tInvalid { + k = emitExtract(fn, okv, 1, source) + } + if tv != tInvalid { + v = emitExtract(fn, okv, 2, source) + } + return +} + +// rangeChan emits to fn the header for a loop that receives from +// channel x until it fails. +// tk is the channel's element type, or nil if the k result is +// not wanted +// pos is the position of the '=' or ':=' token. +// +func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, source ast.Node) (k Value, loop, done *BasicBlock) { + // + // loop: (target of continue) + // ko = <-x (key, ok) + // ok = extract ko #1 + // if ok goto body else done + // body: + // k = extract ko #0 + // ... + // goto loop + // done: (target of break) + + loop = fn.newBasicBlock("rangechan.loop") + emitJump(fn, loop, source) + fn.currentBlock = loop + retv := emitRecv(fn, x, true, types.NewTuple(newVar("k", x.Type().Underlying().(*types.Chan).Elem()), varOk), source) + body := fn.newBasicBlock("rangechan.body") + done = fn.newBasicBlock("rangechan.done") + emitIf(fn, emitExtract(fn, retv, 1, source), body, done, source) + fn.currentBlock = body + if tk != nil { + k = emitExtract(fn, retv, 0, source) + } + return +} + +// rangeStmt emits to fn code for the range statement s, optionally +// labelled by label. +// +func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock, source ast.Node) { + var tk, tv types.Type + if s.Key != nil && !isBlankIdent(s.Key) { + tk = fn.Pkg.typeOf(s.Key) + } + if s.Value != nil && !isBlankIdent(s.Value) { + tv = fn.Pkg.typeOf(s.Value) + } + + // If iteration variables are defined (:=), this + // occurs once outside the loop. + // + // Unlike a short variable declaration, a RangeStmt + // using := never redeclares an existing variable; it + // always creates a new one. + if s.Tok == token.DEFINE { + if tk != nil { + fn.addLocalForIdent(s.Key.(*ast.Ident)) + } + if tv != nil { + fn.addLocalForIdent(s.Value.(*ast.Ident)) + } + } + + x := b.expr(fn, s.X) + + var k, v Value + var loop, done *BasicBlock + switch rt := x.Type().Underlying().(type) { + case *types.Slice, *types.Array, *types.Pointer: // *array + k, v, loop, done = b.rangeIndexed(fn, x, tv, source) + + case *types.Chan: + k, loop, done = b.rangeChan(fn, x, tk, source) + + case *types.Map, *types.Basic: // string + k, v, loop, done = b.rangeIter(fn, x, tk, tv, source) + + default: + panic("Cannot range over: " + rt.String()) + } + + // Evaluate both LHS expressions before we update either. + var kl, vl lvalue + if tk != nil { + kl = b.addr(fn, s.Key, false) // non-escaping + } + if tv != nil { + vl = b.addr(fn, s.Value, false) // non-escaping + } + if tk != nil { + kl.store(fn, k, s) + } + if tv != nil { + vl.store(fn, v, s) + } + + if label != nil { + label._break = done + label._continue = loop + } + + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: loop, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, loop, source) // back-edge + fn.currentBlock = done +} + +// stmt lowers statement s to IR form, emitting code to fn. +func (b *builder) stmt(fn *Function, _s ast.Stmt) { + // The label of the current statement. If non-nil, its _goto + // target is always set; its _break and _continue are set only + // within the body of switch/typeswitch/select/for/range. + // It is effectively an additional default-nil parameter of stmt(). + var label *lblock +start: + switch s := _s.(type) { + case *ast.EmptyStmt: + // ignore. (Usually removed by gofmt.) + + case *ast.DeclStmt: // Con, Var or Typ + d := s.Decl.(*ast.GenDecl) + if d.Tok == token.VAR { + for _, spec := range d.Specs { + if vs, ok := spec.(*ast.ValueSpec); ok { + b.localValueSpec(fn, vs) + } + } + } + + case *ast.LabeledStmt: + label = fn.labelledBlock(s.Label) + emitJump(fn, label._goto, s) + fn.currentBlock = label._goto + _s = s.Stmt + goto start // effectively: tailcall stmt(fn, s.Stmt, label) + + case *ast.ExprStmt: + b.expr(fn, s.X) + + case *ast.SendStmt: + instr := &Send{ + Chan: b.expr(fn, s.Chan), + X: emitConv(fn, b.expr(fn, s.Value), + fn.Pkg.typeOf(s.Chan).Underlying().(*types.Chan).Elem(), s), + } + fn.emit(instr, s) + + case *ast.IncDecStmt: + op := token.ADD + if s.Tok == token.DEC { + op = token.SUB + } + loc := b.addr(fn, s.X, false) + b.assignOp(fn, loc, emitConst(fn, NewConst(constant.MakeInt64(1), loc.typ())), op, s) + + case *ast.AssignStmt: + switch s.Tok { + case token.ASSIGN, token.DEFINE: + b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE, _s) + + default: // +=, etc. + op := s.Tok + token.ADD - token.ADD_ASSIGN + b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op, s) + } + + case *ast.GoStmt: + // The "intrinsics" new/make/len/cap are forbidden here. + // panic is treated like an ordinary function call. + v := Go{} + b.setCall(fn, s.Call, &v.Call) + fn.emit(&v, s) + + case *ast.DeferStmt: + // The "intrinsics" new/make/len/cap are forbidden here. + // panic is treated like an ordinary function call. + v := Defer{} + b.setCall(fn, s.Call, &v.Call) + fn.hasDefer = true + fn.emit(&v, s) + + case *ast.ReturnStmt: + // TODO(dh): we could emit tighter position information by + // using the ith returned expression + + var results []Value + if len(s.Results) == 1 && fn.Signature.Results().Len() > 1 { + // Return of one expression in a multi-valued function. + tuple := b.exprN(fn, s.Results[0]) + ttuple := tuple.Type().(*types.Tuple) + for i, n := 0, ttuple.Len(); i < n; i++ { + results = append(results, + emitConv(fn, emitExtract(fn, tuple, i, s), + fn.Signature.Results().At(i).Type(), s)) + } + } else { + // 1:1 return, or no-arg return in non-void function. + for i, r := range s.Results { + v := emitConv(fn, b.expr(fn, r), fn.Signature.Results().At(i).Type(), s) + results = append(results, v) + } + } + + ret := fn.results() + for i, r := range results { + emitStore(fn, ret[i], r, s) + } + + emitJump(fn, fn.Exit, s) + fn.currentBlock = fn.newBasicBlock("unreachable") + + case *ast.BranchStmt: + var block *BasicBlock + switch s.Tok { + case token.BREAK: + if s.Label != nil { + block = fn.labelledBlock(s.Label)._break + } else { + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._break + } + } + + case token.CONTINUE: + if s.Label != nil { + block = fn.labelledBlock(s.Label)._continue + } else { + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._continue + } + } + + case token.FALLTHROUGH: + for t := fn.targets; t != nil && block == nil; t = t.tail { + block = t._fallthrough + } + + case token.GOTO: + block = fn.labelledBlock(s.Label)._goto + } + j := emitJump(fn, block, s) + j.Comment = s.Tok.String() + fn.currentBlock = fn.newBasicBlock("unreachable") + + case *ast.BlockStmt: + b.stmtList(fn, s.List) + + case *ast.IfStmt: + if s.Init != nil { + b.stmt(fn, s.Init) + } + then := fn.newBasicBlock("if.then") + done := fn.newBasicBlock("if.done") + els := done + if s.Else != nil { + els = fn.newBasicBlock("if.else") + } + instr := b.cond(fn, s.Cond, then, els) + instr.source = s + fn.currentBlock = then + b.stmt(fn, s.Body) + emitJump(fn, done, s) + + if s.Else != nil { + fn.currentBlock = els + b.stmt(fn, s.Else) + emitJump(fn, done, s) + } + + fn.currentBlock = done + + case *ast.SwitchStmt: + b.switchStmt(fn, s, label) + + case *ast.TypeSwitchStmt: + b.typeSwitchStmt(fn, s, label) + + case *ast.SelectStmt: + if b.selectStmt(fn, s, label) { + // the select has no cases, it blocks forever + fn.currentBlock = fn.newBasicBlock("unreachable") + } + + case *ast.ForStmt: + b.forStmt(fn, s, label) + + case *ast.RangeStmt: + b.rangeStmt(fn, s, label, s) + + default: + panic(fmt.Sprintf("unexpected statement kind: %T", s)) + } +} + +// buildFunction builds IR code for the body of function fn. Idempotent. +func (b *builder) buildFunction(fn *Function) { + if fn.Blocks != nil { + return // building already started + } + + var recvField *ast.FieldList + var body *ast.BlockStmt + var functype *ast.FuncType + switch n := fn.source.(type) { + case nil: + return // not a Go source function. (Synthetic, or from object file.) + case *ast.FuncDecl: + functype = n.Type + recvField = n.Recv + body = n.Body + case *ast.FuncLit: + functype = n.Type + body = n.Body + default: + panic(n) + } + + if fn.Package().Pkg.Path() == "syscall" && fn.Name() == "Exit" { + // syscall.Exit is a stub and the way os.Exit terminates the + // process. Note that there are other functions in the runtime + // that also terminate or unwind that we cannot analyze. + // However, they aren't stubs, so buildExits ends up getting + // called on them, so that's where we handle those special + // cases. + fn.NoReturn = AlwaysExits + } + + if body == nil { + // External function. + if fn.Params == nil { + // This condition ensures we add a non-empty + // params list once only, but we may attempt + // the degenerate empty case repeatedly. + // TODO(adonovan): opt: don't do that. + + // We set Function.Params even though there is no body + // code to reference them. This simplifies clients. + if recv := fn.Signature.Recv(); recv != nil { + // XXX synthesize an ast.Node + fn.addParamObj(recv, nil) + } + params := fn.Signature.Params() + for i, n := 0, params.Len(); i < n; i++ { + // XXX synthesize an ast.Node + fn.addParamObj(params.At(i), nil) + } + } + return + } + if fn.Prog.mode&LogSource != 0 { + defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.Pos()))() + } + fn.blocksets = b.blocksets + fn.Blocks = make([]*BasicBlock, 0, avgBlocks) + fn.startBody() + fn.createSyntacticParams(recvField, functype) + fn.exitBlock() + b.stmt(fn, body) + if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb.Preds != nil) { + // Control fell off the end of the function's body block. + // + // Block optimizations eliminate the current block, if + // unreachable. It is a builder invariant that + // if this no-arg return is ill-typed for + // fn.Signature.Results, this block must be + // unreachable. The sanity checker checks this. + // fn.emit(new(RunDefers)) + // fn.emit(new(Return)) + emitJump(fn, fn.Exit, nil) + } + optimizeBlocks(fn) + buildFakeExits(fn) + b.buildExits(fn) + b.addUnreachables(fn) + fn.finishBody() + b.blocksets = fn.blocksets + fn.functionBody = nil +} + +// buildFuncDecl builds IR code for the function or method declared +// by decl in package pkg. +// +func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { + id := decl.Name + if isBlankIdent(id) { + return // discard + } + fn := pkg.values[pkg.info.Defs[id]].(*Function) + if decl.Recv == nil && id.Name == "init" { + var v Call + v.Call.Value = fn + v.setType(types.NewTuple()) + pkg.init.emit(&v, decl) + } + fn.source = decl + b.buildFunction(fn) +} + +// Build calls Package.Build for each package in prog. +// +// Build is intended for whole-program analysis; a typical compiler +// need only build a single package. +// +// Build is idempotent and thread-safe. +// +func (prog *Program) Build() { + for _, p := range prog.packages { + p.Build() + } +} + +// Build builds IR code for all functions and vars in package p. +// +// Precondition: CreatePackage must have been called for all of p's +// direct imports (and hence its direct imports must have been +// error-free). +// +// Build is idempotent and thread-safe. +// +func (p *Package) Build() { p.buildOnce.Do(p.build) } + +func (p *Package) build() { + if p.info == nil { + return // synthetic package, e.g. "testmain" + } + + // Ensure we have runtime type info for all exported members. + // TODO(adonovan): ideally belongs in memberFromObject, but + // that would require package creation in topological order. + for name, mem := range p.Members { + if ast.IsExported(name) { + p.Prog.needMethodsOf(mem.Type()) + } + } + if p.Prog.mode&LogSource != 0 { + defer logStack("build %s", p)() + } + init := p.init + init.startBody() + init.exitBlock() + + var done *BasicBlock + + // Make init() skip if package is already initialized. + initguard := p.Var("init$guard") + doinit := init.newBasicBlock("init.start") + done = init.Exit + emitIf(init, emitLoad(init, initguard, nil), done, doinit, nil) + init.currentBlock = doinit + emitStore(init, initguard, emitConst(init, NewConst(constant.MakeBool(true), tBool)), nil) + + // Call the init() function of each package we import. + for _, pkg := range p.Pkg.Imports() { + prereq := p.Prog.packages[pkg] + if prereq == nil { + panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Pkg.Path(), pkg.Path())) + } + var v Call + v.Call.Value = prereq.init + v.setType(types.NewTuple()) + init.emit(&v, nil) + } + + b := builder{ + printFunc: p.printFunc, + } + + // Initialize package-level vars in correct order. + for _, varinit := range p.info.InitOrder { + if init.Prog.mode&LogSource != 0 { + fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n", + varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos())) + } + if len(varinit.Lhs) == 1 { + // 1:1 initialization: var x, y = a(), b() + var lval lvalue + if v := varinit.Lhs[0]; v.Name() != "_" { + lval = &address{addr: p.values[v].(*Global)} + } else { + lval = blank{} + } + // TODO(dh): do emit position information + b.assign(init, lval, varinit.Rhs, true, nil, nil) + } else { + // n:1 initialization: var x, y := f() + tuple := b.exprN(init, varinit.Rhs) + for i, v := range varinit.Lhs { + if v.Name() == "_" { + continue + } + emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i, nil), nil) + } + } + } + + // Build all package-level functions, init functions + // and methods, including unreachable/blank ones. + // We build them in source order, but it's not significant. + for _, file := range p.files { + for _, decl := range file.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok { + b.buildFuncDecl(p, decl) + } + } + } + + // Finish up init(). + emitJump(init, done, nil) + init.finishBody() + + p.info = nil // We no longer need ASTs or go/types deductions. + + if p.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckPackage(p) + } +} + +// Like ObjectOf, but panics instead of returning nil. +// Only valid during p's create and build phases. +func (p *Package) objectOf(id *ast.Ident) types.Object { + if o := p.info.ObjectOf(id); o != nil { + return o + } + panic(fmt.Sprintf("no types.Object for ast.Ident %s @ %s", + id.Name, p.Prog.Fset.Position(id.Pos()))) +} + +// Like TypeOf, but panics instead of returning nil. +// Only valid during p's create and build phases. +func (p *Package) typeOf(e ast.Expr) types.Type { + if T := p.info.TypeOf(e); T != nil { + return T + } + panic(fmt.Sprintf("no type for %T @ %s", + e, p.Prog.Fset.Position(e.Pos()))) +} diff --git a/vendor/honnef.co/go/tools/go/ir/const.go b/vendor/honnef.co/go/tools/go/ir/const.go new file mode 100644 index 000000000..7cdf006e8 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/const.go @@ -0,0 +1,153 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file defines the Const SSA value type. + +import ( + "fmt" + "go/constant" + "go/types" + "strconv" +) + +// NewConst returns a new constant of the specified value and type. +// val must be valid according to the specification of Const.Value. +// +func NewConst(val constant.Value, typ types.Type) *Const { + return &Const{ + register: register{ + typ: typ, + }, + Value: val, + } +} + +// intConst returns an 'int' constant that evaluates to i. +// (i is an int64 in case the host is narrower than the target.) +func intConst(i int64) *Const { + return NewConst(constant.MakeInt64(i), tInt) +} + +// nilConst returns a nil constant of the specified type, which may +// be any reference type, including interfaces. +// +func nilConst(typ types.Type) *Const { + return NewConst(nil, typ) +} + +// stringConst returns a 'string' constant that evaluates to s. +func stringConst(s string) *Const { + return NewConst(constant.MakeString(s), tString) +} + +// zeroConst returns a new "zero" constant of the specified type, +// which must not be an array or struct type: the zero values of +// aggregates are well-defined but cannot be represented by Const. +// +func zeroConst(t types.Type) *Const { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return NewConst(constant.MakeBool(false), t) + case t.Info()&types.IsNumeric != 0: + return NewConst(constant.MakeInt64(0), t) + case t.Info()&types.IsString != 0: + return NewConst(constant.MakeString(""), t) + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return nilConst(t) + default: + panic(fmt.Sprint("zeroConst for unexpected type:", t)) + } + case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: + return nilConst(t) + case *types.Named: + return NewConst(zeroConst(t.Underlying()).Value, t) + case *types.Array, *types.Struct, *types.Tuple: + panic(fmt.Sprint("zeroConst applied to aggregate:", t)) + } + panic(fmt.Sprint("zeroConst: unexpected ", t)) +} + +func (c *Const) RelString(from *types.Package) string { + var p string + if c.Value == nil { + p = "nil" + } else if c.Value.Kind() == constant.String { + v := constant.StringVal(c.Value) + const max = 20 + // TODO(adonovan): don't cut a rune in half. + if len(v) > max { + v = v[:max-3] + "..." // abbreviate + } + p = strconv.Quote(v) + } else { + p = c.Value.String() + } + return fmt.Sprintf("Const <%s> {%s}", relType(c.Type(), from), p) +} + +func (c *Const) String() string { + return c.RelString(c.Parent().pkg()) +} + +// IsNil returns true if this constant represents a typed or untyped nil value. +func (c *Const) IsNil() bool { + return c.Value == nil +} + +// Int64 returns the numeric value of this constant truncated to fit +// a signed 64-bit integer. +// +func (c *Const) Int64() int64 { + switch x := constant.ToInt(c.Value); x.Kind() { + case constant.Int: + if i, ok := constant.Int64Val(x); ok { + return i + } + return 0 + case constant.Float: + f, _ := constant.Float64Val(x) + return int64(f) + } + panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) +} + +// Uint64 returns the numeric value of this constant truncated to fit +// an unsigned 64-bit integer. +// +func (c *Const) Uint64() uint64 { + switch x := constant.ToInt(c.Value); x.Kind() { + case constant.Int: + if u, ok := constant.Uint64Val(x); ok { + return u + } + return 0 + case constant.Float: + f, _ := constant.Float64Val(x) + return uint64(f) + } + panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) +} + +// Float64 returns the numeric value of this constant truncated to fit +// a float64. +// +func (c *Const) Float64() float64 { + f, _ := constant.Float64Val(c.Value) + return f +} + +// Complex128 returns the complex value of this constant truncated to +// fit a complex128. +// +func (c *Const) Complex128() complex128 { + re, _ := constant.Float64Val(constant.Real(c.Value)) + im, _ := constant.Float64Val(constant.Imag(c.Value)) + return complex(re, im) +} diff --git a/vendor/honnef.co/go/tools/go/ir/create.go b/vendor/honnef.co/go/tools/go/ir/create.go new file mode 100644 index 000000000..5e7f6ed94 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/create.go @@ -0,0 +1,288 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file implements the CREATE phase of IR construction. +// See builder.go for explanation. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + "sync" + + "honnef.co/go/tools/go/types/typeutil" +) + +// measured on the standard library and rounded up to powers of two, +// on average there are 8 blocks and 16 instructions per block in a +// function. +const avgBlocks = 8 +const avgInstructionsPerBlock = 16 + +// NewProgram returns a new IR Program. +// +// mode controls diagnostics and checking during IR construction. +// +func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { + prog := &Program{ + Fset: fset, + imported: make(map[string]*Package), + packages: make(map[*types.Package]*Package), + thunks: make(map[selectionKey]*Function), + bounds: make(map[*types.Func]*Function), + mode: mode, + } + + h := typeutil.MakeHasher() // protected by methodsMu, in effect + prog.methodSets.SetHasher(h) + prog.canon.SetHasher(h) + + return prog +} + +// memberFromObject populates package pkg with a member for the +// typechecker object obj. +// +// For objects from Go source code, syntax is the associated syntax +// tree (for funcs and vars only); it will be used during the build +// phase. +// +func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { + name := obj.Name() + switch obj := obj.(type) { + case *types.Builtin: + if pkg.Pkg != types.Unsafe { + panic("unexpected builtin object: " + obj.String()) + } + + case *types.TypeName: + pkg.Members[name] = &Type{ + object: obj, + pkg: pkg, + } + + case *types.Const: + c := &NamedConst{ + object: obj, + Value: NewConst(obj.Val(), obj.Type()), + pkg: pkg, + } + pkg.values[obj] = c.Value + pkg.Members[name] = c + + case *types.Var: + g := &Global{ + Pkg: pkg, + name: name, + object: obj, + typ: types.NewPointer(obj.Type()), // address + } + pkg.values[obj] = g + pkg.Members[name] = g + + case *types.Func: + sig := obj.Type().(*types.Signature) + if sig.Recv() == nil && name == "init" { + pkg.ninit++ + name = fmt.Sprintf("init#%d", pkg.ninit) + } + fn := &Function{ + name: name, + object: obj, + Signature: sig, + Pkg: pkg, + Prog: pkg.Prog, + } + + fn.source = syntax + fn.initHTML(pkg.printFunc) + if syntax == nil { + fn.Synthetic = SyntheticLoadedFromExportData + } else { + // Note: we initialize fn.Blocks in + // (*builder).buildFunction and not here because Blocks + // being nil is used to indicate that building of the + // function hasn't started yet. + + fn.functionBody = &functionBody{ + scratchInstructions: make([]Instruction, avgBlocks*avgInstructionsPerBlock), + } + } + + pkg.values[obj] = fn + pkg.Functions = append(pkg.Functions, fn) + if sig.Recv() == nil { + pkg.Members[name] = fn // package-level function + } + + default: // (incl. *types.Package) + panic("unexpected Object type: " + obj.String()) + } +} + +// membersFromDecl populates package pkg with members for each +// typechecker object (var, func, const or type) associated with the +// specified decl. +// +func membersFromDecl(pkg *Package, decl ast.Decl) { + switch decl := decl.(type) { + case *ast.GenDecl: // import, const, type or var + switch decl.Tok { + case token.CONST: + for _, spec := range decl.Specs { + for _, id := range spec.(*ast.ValueSpec).Names { + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], nil) + } + } + } + + case token.VAR: + for _, spec := range decl.Specs { + for _, id := range spec.(*ast.ValueSpec).Names { + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], spec) + } + } + } + + case token.TYPE: + for _, spec := range decl.Specs { + id := spec.(*ast.TypeSpec).Name + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], nil) + } + } + } + + case *ast.FuncDecl: + id := decl.Name + if !isBlankIdent(id) { + memberFromObject(pkg, pkg.info.Defs[id], decl) + } + } +} + +// CreatePackage constructs and returns an IR Package from the +// specified type-checked, error-free file ASTs, and populates its +// Members mapping. +// +// importable determines whether this package should be returned by a +// subsequent call to ImportedPackage(pkg.Path()). +// +// The real work of building IR form for each function is not done +// until a subsequent call to Package.Build(). +// +func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { + p := &Package{ + Prog: prog, + Members: make(map[string]Member), + values: make(map[types.Object]Value), + Pkg: pkg, + info: info, // transient (CREATE and BUILD phases) + files: files, // transient (CREATE and BUILD phases) + printFunc: prog.PrintFunc, + } + + // Add init() function. + p.init = &Function{ + name: "init", + Signature: new(types.Signature), + Synthetic: SyntheticPackageInitializer, + Pkg: p, + Prog: prog, + functionBody: new(functionBody), + } + p.init.initHTML(prog.PrintFunc) + p.Members[p.init.name] = p.init + p.Functions = append(p.Functions, p.init) + + // CREATE phase. + // Allocate all package members: vars, funcs, consts and types. + if len(files) > 0 { + // Go source package. + for _, file := range files { + for _, decl := range file.Decls { + membersFromDecl(p, decl) + } + } + } else { + // GC-compiled binary package (or "unsafe") + // No code. + // No position information. + scope := p.Pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + memberFromObject(p, obj, nil) + if obj, ok := obj.(*types.TypeName); ok { + if named, ok := obj.Type().(*types.Named); ok { + for i, n := 0, named.NumMethods(); i < n; i++ { + memberFromObject(p, named.Method(i), nil) + } + } + } + } + } + + // Add initializer guard variable. + initguard := &Global{ + Pkg: p, + name: "init$guard", + typ: types.NewPointer(tBool), + } + p.Members[initguard.Name()] = initguard + + if prog.mode&GlobalDebug != 0 { + p.SetDebugMode(true) + } + + if prog.mode&PrintPackages != 0 { + printMu.Lock() + p.WriteTo(os.Stdout) + printMu.Unlock() + } + + if importable { + prog.imported[p.Pkg.Path()] = p + } + prog.packages[p.Pkg] = p + + return p +} + +// printMu serializes printing of Packages/Functions to stdout. +var printMu sync.Mutex + +// AllPackages returns a new slice containing all packages in the +// program prog in unspecified order. +// +func (prog *Program) AllPackages() []*Package { + pkgs := make([]*Package, 0, len(prog.packages)) + for _, pkg := range prog.packages { + pkgs = append(pkgs, pkg) + } + return pkgs +} + +// ImportedPackage returns the importable Package whose PkgPath +// is path, or nil if no such Package has been created. +// +// A parameter to CreatePackage determines whether a package should be +// considered importable. For example, no import declaration can resolve +// to the ad-hoc main package created by 'go build foo.go'. +// +// TODO(adonovan): rethink this function and the "importable" concept; +// most packages are importable. This function assumes that all +// types.Package.Path values are unique within the ir.Program, which is +// false---yet this function remains very convenient. +// Clients should use (*Program).Package instead where possible. +// IR doesn't really need a string-keyed map of packages. +// +func (prog *Program) ImportedPackage(path string) *Package { + return prog.imported[path] +} diff --git a/vendor/honnef.co/go/tools/go/ir/doc.go b/vendor/honnef.co/go/tools/go/ir/doc.go new file mode 100644 index 000000000..0765d439e --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/doc.go @@ -0,0 +1,129 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ir defines a representation of the elements of Go programs +// (packages, types, functions, variables and constants) using a +// static single-information (SSI) form intermediate representation +// (IR) for the bodies of functions. +// +// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE. +// +// For an introduction to SSA form, upon which SSI builds, see +// http://en.wikipedia.org/wiki/Static_single_assignment_form. +// This page provides a broader reading list: +// http://www.dcs.gla.ac.uk/~jsinger/ssa.html. +// +// For an introduction to SSI form, see The static single information +// form by C. Scott Ananian. +// +// The level of abstraction of the IR form is intentionally close to +// the source language to facilitate construction of source analysis +// tools. It is not intended for machine code generation. +// +// The simplest way to create the IR of a package is +// to load typed syntax trees using golang.org/x/tools/go/packages, then +// invoke the irutil.Packages helper function. See ExampleLoadPackages +// and ExampleWholeProgram for examples. +// The resulting ir.Program contains all the packages and their +// members, but IR code is not created for function bodies until a +// subsequent call to (*Package).Build or (*Program).Build. +// +// The builder initially builds a naive IR form in which all local +// variables are addresses of stack locations with explicit loads and +// stores. Registerization of eligible locals and φ-node insertion +// using dominance and dataflow are then performed as a second pass +// called "lifting" to improve the accuracy and performance of +// subsequent analyses; this pass can be skipped by setting the +// NaiveForm builder flag. +// +// The primary interfaces of this package are: +// +// - Member: a named member of a Go package. +// - Value: an expression that yields a value. +// - Instruction: a statement that consumes values and performs computation. +// - Node: a Value or Instruction (emphasizing its membership in the IR value graph) +// +// A computation that yields a result implements both the Value and +// Instruction interfaces. The following table shows for each +// concrete type which of these interfaces it implements. +// +// Value? Instruction? Member? +// *Alloc ✔ ✔ +// *BinOp ✔ ✔ +// *BlankStore ✔ +// *Builtin ✔ +// *Call ✔ ✔ +// *ChangeInterface ✔ ✔ +// *ChangeType ✔ ✔ +// *Const ✔ ✔ +// *Convert ✔ ✔ +// *DebugRef ✔ +// *Defer ✔ ✔ +// *Extract ✔ ✔ +// *Field ✔ ✔ +// *FieldAddr ✔ ✔ +// *FreeVar ✔ +// *Function ✔ ✔ (func) +// *Global ✔ ✔ (var) +// *Go ✔ ✔ +// *If ✔ +// *Index ✔ ✔ +// *IndexAddr ✔ ✔ +// *Jump ✔ +// *Load ✔ ✔ +// *MakeChan ✔ ✔ +// *MakeClosure ✔ ✔ +// *MakeInterface ✔ ✔ +// *MakeMap ✔ ✔ +// *MakeSlice ✔ ✔ +// *MapLookup ✔ ✔ +// *MapUpdate ✔ ✔ +// *NamedConst ✔ (const) +// *Next ✔ ✔ +// *Panic ✔ +// *Parameter ✔ ✔ +// *Phi ✔ ✔ +// *Range ✔ ✔ +// *Recv ✔ ✔ +// *Return ✔ +// *RunDefers ✔ +// *Select ✔ ✔ +// *Send ✔ ✔ +// *Sigma ✔ ✔ +// *Slice ✔ ✔ +// *Store ✔ ✔ +// *StringLookup ✔ ✔ +// *Type ✔ (type) +// *TypeAssert ✔ ✔ +// *UnOp ✔ ✔ +// *Unreachable ✔ +// +// Other key types in this package include: Program, Package, Function +// and BasicBlock. +// +// The program representation constructed by this package is fully +// resolved internally, i.e. it does not rely on the names of Values, +// Packages, Functions, Types or BasicBlocks for the correct +// interpretation of the program. Only the identities of objects and +// the topology of the IR and type graphs are semantically +// significant. (There is one exception: Ids, used to identify field +// and method names, contain strings.) Avoidance of name-based +// operations simplifies the implementation of subsequent passes and +// can make them very efficient. Many objects are nonetheless named +// to aid in debugging, but it is not essential that the names be +// either accurate or unambiguous. The public API exposes a number of +// name-based maps for client convenience. +// +// The ir/irutil package provides various utilities that depend only +// on the public API of this package. +// +// TODO(adonovan): Consider the exceptional control-flow implications +// of defer and recover(). +// +// TODO(adonovan): write a how-to document for all the various cases +// of trying to determine corresponding elements across the four +// domains of source locations, ast.Nodes, types.Objects, +// ir.Values/Instructions. +// +package ir diff --git a/vendor/honnef.co/go/tools/go/ir/dom.go b/vendor/honnef.co/go/tools/go/ir/dom.go new file mode 100644 index 000000000..13ecd47cb --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/dom.go @@ -0,0 +1,469 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file defines algorithms related to dominance. + +// Dominator tree construction ---------------------------------------- +// +// We use the algorithm described in Lengauer & Tarjan. 1979. A fast +// algorithm for finding dominators in a flowgraph. +// http://doi.acm.org/10.1145/357062.357071 +// +// We also apply the optimizations to SLT described in Georgiadis et +// al, Finding Dominators in Practice, JGAA 2006, +// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf +// to avoid the need for buckets of size > 1. + +import ( + "bytes" + "fmt" + "io" + "math/big" + "os" + "sort" +) + +// Idom returns the block that immediately dominates b: +// its parent in the dominator tree, if any. +// The entry node (b.Index==0) does not have a parent. +// +func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom } + +// Dominees returns the list of blocks that b immediately dominates: +// its children in the dominator tree. +// +func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children } + +// Dominates reports whether b dominates c. +func (b *BasicBlock) Dominates(c *BasicBlock) bool { + return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post +} + +type byDomPreorder []*BasicBlock + +func (a byDomPreorder) Len() int { return len(a) } +func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre } + +// DomPreorder returns a new slice containing the blocks of f in +// dominator tree preorder. +// +func (f *Function) DomPreorder() []*BasicBlock { + n := len(f.Blocks) + order := make(byDomPreorder, n) + copy(order, f.Blocks) + sort.Sort(order) + return order +} + +// domInfo contains a BasicBlock's dominance information. +type domInfo struct { + idom *BasicBlock // immediate dominator (parent in domtree) + children []*BasicBlock // nodes immediately dominated by this one + pre, post int32 // pre- and post-order numbering within domtree +} + +// buildDomTree computes the dominator tree of f using the LT algorithm. +// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run). +// +func buildDomTree(fn *Function) { + // The step numbers refer to the original LT paper; the + // reordering is due to Georgiadis. + + // Clear any previous domInfo. + for _, b := range fn.Blocks { + b.dom = domInfo{} + } + + idoms := make([]*BasicBlock, len(fn.Blocks)) + + order := make([]*BasicBlock, 0, len(fn.Blocks)) + seen := fn.blockset(0) + var dfs func(b *BasicBlock) + dfs = func(b *BasicBlock) { + if !seen.Add(b) { + return + } + for _, succ := range b.Succs { + dfs(succ) + } + if fn.fakeExits.Has(b) { + dfs(fn.Exit) + } + order = append(order, b) + b.post = len(order) - 1 + } + dfs(fn.Blocks[0]) + + for i := 0; i < len(order)/2; i++ { + o := len(order) - i - 1 + order[i], order[o] = order[o], order[i] + } + + idoms[fn.Blocks[0].Index] = fn.Blocks[0] + changed := true + for changed { + changed = false + // iterate over all nodes in reverse postorder, except for the + // entry node + for _, b := range order[1:] { + var newIdom *BasicBlock + do := func(p *BasicBlock) { + if idoms[p.Index] == nil { + return + } + if newIdom == nil { + newIdom = p + } else { + finger1 := p + finger2 := newIdom + for finger1 != finger2 { + for finger1.post < finger2.post { + finger1 = idoms[finger1.Index] + } + for finger2.post < finger1.post { + finger2 = idoms[finger2.Index] + } + } + newIdom = finger1 + } + } + for _, p := range b.Preds { + do(p) + } + if b == fn.Exit { + for _, p := range fn.Blocks { + if fn.fakeExits.Has(p) { + do(p) + } + } + } + + if idoms[b.Index] != newIdom { + idoms[b.Index] = newIdom + changed = true + } + } + } + + for i, b := range idoms { + fn.Blocks[i].dom.idom = b + if b == nil { + // malformed CFG + continue + } + if i == b.Index { + continue + } + b.dom.children = append(b.dom.children, fn.Blocks[i]) + } + + numberDomTree(fn.Blocks[0], 0, 0) + + // printDomTreeDot(os.Stderr, fn) // debugging + // printDomTreeText(os.Stderr, root, 0) // debugging + + if fn.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckDomTree(fn) + } +} + +// buildPostDomTree is like buildDomTree, but builds the post-dominator tree instead. +func buildPostDomTree(fn *Function) { + // The step numbers refer to the original LT paper; the + // reordering is due to Georgiadis. + + // Clear any previous domInfo. + for _, b := range fn.Blocks { + b.pdom = domInfo{} + } + + idoms := make([]*BasicBlock, len(fn.Blocks)) + + order := make([]*BasicBlock, 0, len(fn.Blocks)) + seen := fn.blockset(0) + var dfs func(b *BasicBlock) + dfs = func(b *BasicBlock) { + if !seen.Add(b) { + return + } + for _, pred := range b.Preds { + dfs(pred) + } + if b == fn.Exit { + for _, p := range fn.Blocks { + if fn.fakeExits.Has(p) { + dfs(p) + } + } + } + order = append(order, b) + b.post = len(order) - 1 + } + dfs(fn.Exit) + + for i := 0; i < len(order)/2; i++ { + o := len(order) - i - 1 + order[i], order[o] = order[o], order[i] + } + + idoms[fn.Exit.Index] = fn.Exit + changed := true + for changed { + changed = false + // iterate over all nodes in reverse postorder, except for the + // exit node + for _, b := range order[1:] { + var newIdom *BasicBlock + do := func(p *BasicBlock) { + if idoms[p.Index] == nil { + return + } + if newIdom == nil { + newIdom = p + } else { + finger1 := p + finger2 := newIdom + for finger1 != finger2 { + for finger1.post < finger2.post { + finger1 = idoms[finger1.Index] + } + for finger2.post < finger1.post { + finger2 = idoms[finger2.Index] + } + } + newIdom = finger1 + } + } + for _, p := range b.Succs { + do(p) + } + if fn.fakeExits.Has(b) { + do(fn.Exit) + } + + if idoms[b.Index] != newIdom { + idoms[b.Index] = newIdom + changed = true + } + } + } + + for i, b := range idoms { + fn.Blocks[i].pdom.idom = b + if b == nil { + // malformed CFG + continue + } + if i == b.Index { + continue + } + b.pdom.children = append(b.pdom.children, fn.Blocks[i]) + } + + numberPostDomTree(fn.Exit, 0, 0) + + // printPostDomTreeDot(os.Stderr, fn) // debugging + // printPostDomTreeText(os.Stderr, fn.Exit, 0) // debugging + + if fn.Prog.mode&SanityCheckFunctions != 0 { // XXX + sanityCheckDomTree(fn) // XXX + } +} + +// numberDomTree sets the pre- and post-order numbers of a depth-first +// traversal of the dominator tree rooted at v. These are used to +// answer dominance queries in constant time. +// +func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) { + v.dom.pre = pre + pre++ + for _, child := range v.dom.children { + pre, post = numberDomTree(child, pre, post) + } + v.dom.post = post + post++ + return pre, post +} + +// numberPostDomTree sets the pre- and post-order numbers of a depth-first +// traversal of the post-dominator tree rooted at v. These are used to +// answer post-dominance queries in constant time. +// +func numberPostDomTree(v *BasicBlock, pre, post int32) (int32, int32) { + v.pdom.pre = pre + pre++ + for _, child := range v.pdom.children { + pre, post = numberPostDomTree(child, pre, post) + } + v.pdom.post = post + post++ + return pre, post +} + +// Testing utilities ---------------------------------------- + +// sanityCheckDomTree checks the correctness of the dominator tree +// computed by the LT algorithm by comparing against the dominance +// relation computed by a naive Kildall-style forward dataflow +// analysis (Algorithm 10.16 from the "Dragon" book). +// +func sanityCheckDomTree(f *Function) { + n := len(f.Blocks) + + // D[i] is the set of blocks that dominate f.Blocks[i], + // represented as a bit-set of block indices. + D := make([]big.Int, n) + + one := big.NewInt(1) + + // all is the set of all blocks; constant. + var all big.Int + all.Set(one).Lsh(&all, uint(n)).Sub(&all, one) + + // Initialization. + for i := range f.Blocks { + if i == 0 { + // A root is dominated only by itself. + D[i].SetBit(&D[0], 0, 1) + } else { + // All other blocks are (initially) dominated + // by every block. + D[i].Set(&all) + } + } + + // Iteration until fixed point. + for changed := true; changed; { + changed = false + for i, b := range f.Blocks { + if i == 0 { + continue + } + // Compute intersection across predecessors. + var x big.Int + x.Set(&all) + for _, pred := range b.Preds { + x.And(&x, &D[pred.Index]) + } + if b == f.Exit { + for _, p := range f.Blocks { + if f.fakeExits.Has(p) { + x.And(&x, &D[p.Index]) + } + } + } + x.SetBit(&x, i, 1) // a block always dominates itself. + if D[i].Cmp(&x) != 0 { + D[i].Set(&x) + changed = true + } + } + } + + // Check the entire relation. O(n^2). + ok := true + for i := 0; i < n; i++ { + for j := 0; j < n; j++ { + b, c := f.Blocks[i], f.Blocks[j] + actual := b.Dominates(c) + expected := D[j].Bit(i) == 1 + if actual != expected { + fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected) + ok = false + } + } + } + + preorder := f.DomPreorder() + for _, b := range f.Blocks { + if got := preorder[b.dom.pre]; got != b { + fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b) + ok = false + } + } + + if !ok { + panic("sanityCheckDomTree failed for " + f.String()) + } + +} + +// Printing functions ---------------------------------------- + +// printDomTree prints the dominator tree as text, using indentation. +//lint:ignore U1000 used during debugging +func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { + fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) + for _, child := range v.dom.children { + printDomTreeText(buf, child, indent+1) + } +} + +// printDomTreeDot prints the dominator tree of f in AT&T GraphViz +// (.dot) format. +//lint:ignore U1000 used during debugging +func printDomTreeDot(buf io.Writer, f *Function) { + fmt.Fprintln(buf, "//", f) + fmt.Fprintln(buf, "digraph domtree {") + for i, b := range f.Blocks { + v := b.dom + fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post) + // TODO(adonovan): improve appearance of edges + // belonging to both dominator tree and CFG. + + // Dominator tree edge. + if i != 0 { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre) + } + // CFG edges. + for _, pred := range b.Preds { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre) + } + + if f.fakeExits.Has(b) { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0,color=red];\n", b.dom.pre, f.Exit.dom.pre) + } + } + fmt.Fprintln(buf, "}") +} + +// printDomTree prints the dominator tree as text, using indentation. +//lint:ignore U1000 used during debugging +func printPostDomTreeText(buf io.Writer, v *BasicBlock, indent int) { + fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) + for _, child := range v.pdom.children { + printPostDomTreeText(buf, child, indent+1) + } +} + +// printDomTreeDot prints the dominator tree of f in AT&T GraphViz +// (.dot) format. +//lint:ignore U1000 used during debugging +func printPostDomTreeDot(buf io.Writer, f *Function) { + fmt.Fprintln(buf, "//", f) + fmt.Fprintln(buf, "digraph pdomtree {") + for _, b := range f.Blocks { + v := b.pdom + fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post) + // TODO(adonovan): improve appearance of edges + // belonging to both dominator tree and CFG. + + // Dominator tree edge. + if b != f.Exit { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.pdom.pre, v.pre) + } + // CFG edges. + for _, pred := range b.Preds { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.pdom.pre, v.pre) + } + + if f.fakeExits.Has(b) { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0,color=red];\n", b.dom.pre, f.Exit.dom.pre) + } + } + fmt.Fprintln(buf, "}") +} diff --git a/vendor/honnef.co/go/tools/go/ir/emit.go b/vendor/honnef.co/go/tools/go/ir/emit.go new file mode 100644 index 000000000..f7629646a --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/emit.go @@ -0,0 +1,461 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// Helpers for emitting IR instructions. + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" +) + +// emitNew emits to f a new (heap Alloc) instruction allocating an +// object of type typ. pos is the optional source location. +// +func emitNew(f *Function, typ types.Type, source ast.Node) *Alloc { + v := &Alloc{Heap: true} + v.setType(types.NewPointer(typ)) + f.emit(v, source) + return v +} + +// emitLoad emits to f an instruction to load the address addr into a +// new temporary, and returns the value so defined. +// +func emitLoad(f *Function, addr Value, source ast.Node) *Load { + v := &Load{X: addr} + v.setType(deref(addr.Type())) + f.emit(v, source) + return v +} + +func emitRecv(f *Function, ch Value, commaOk bool, typ types.Type, source ast.Node) Value { + recv := &Recv{ + Chan: ch, + CommaOk: commaOk, + } + recv.setType(typ) + return f.emit(recv, source) +} + +// emitDebugRef emits to f a DebugRef pseudo-instruction associating +// expression e with value v. +// +func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) { + if !f.debugInfo() { + return // debugging not enabled + } + if v == nil || e == nil { + panic("nil") + } + var obj types.Object + e = unparen(e) + if id, ok := e.(*ast.Ident); ok { + if isBlankIdent(id) { + return + } + obj = f.Pkg.objectOf(id) + switch obj.(type) { + case *types.Nil, *types.Const, *types.Builtin: + return + } + } + f.emit(&DebugRef{ + X: v, + Expr: e, + IsAddr: isAddr, + object: obj, + }, nil) +} + +// emitArith emits to f code to compute the binary operation op(x, y) +// where op is an eager shift, logical or arithmetic operation. +// (Use emitCompare() for comparisons and Builder.logicalBinop() for +// non-eager operations.) +// +func emitArith(f *Function, op token.Token, x, y Value, t types.Type, source ast.Node) Value { + switch op { + case token.SHL, token.SHR: + x = emitConv(f, x, t, source) + // y may be signed or an 'untyped' constant. + // TODO(adonovan): whence signed values? + if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUnsigned == 0 { + y = emitConv(f, y, types.Typ[types.Uint64], source) + } + + case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: + x = emitConv(f, x, t, source) + y = emitConv(f, y, t, source) + + default: + panic("illegal op in emitArith: " + op.String()) + + } + v := &BinOp{ + Op: op, + X: x, + Y: y, + } + v.setType(t) + return f.emit(v, source) +} + +// emitCompare emits to f code compute the boolean result of +// comparison comparison 'x op y'. +// +func emitCompare(f *Function, op token.Token, x, y Value, source ast.Node) Value { + xt := x.Type().Underlying() + yt := y.Type().Underlying() + + // Special case to optimise a tagless SwitchStmt so that + // these are equivalent + // switch { case e: ...} + // switch true { case e: ... } + // if e==true { ... } + // even in the case when e's type is an interface. + // TODO(adonovan): opt: generalise to x==true, false!=y, etc. + if x, ok := x.(*Const); ok && op == token.EQL && x.Value != nil && x.Value.Kind() == constant.Bool && constant.BoolVal(x.Value) { + if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 { + return y + } + } + + if types.Identical(xt, yt) { + // no conversion necessary + } else if _, ok := xt.(*types.Interface); ok { + y = emitConv(f, y, x.Type(), source) + } else if _, ok := yt.(*types.Interface); ok { + x = emitConv(f, x, y.Type(), source) + } else if _, ok := x.(*Const); ok { + x = emitConv(f, x, y.Type(), source) + } else if _, ok := y.(*Const); ok { + y = emitConv(f, y, x.Type(), source) + //lint:ignore SA9003 no-op + } else { + // other cases, e.g. channels. No-op. + } + + v := &BinOp{ + Op: op, + X: x, + Y: y, + } + v.setType(tBool) + return f.emit(v, source) +} + +// isValuePreserving returns true if a conversion from ut_src to +// ut_dst is value-preserving, i.e. just a change of type. +// Precondition: neither argument is a named type. +// +func isValuePreserving(ut_src, ut_dst types.Type) bool { + // Identical underlying types? + if structTypesIdentical(ut_dst, ut_src) { + return true + } + + switch ut_dst.(type) { + case *types.Chan: + // Conversion between channel types? + _, ok := ut_src.(*types.Chan) + return ok + + case *types.Pointer: + // Conversion between pointers with identical base types? + _, ok := ut_src.(*types.Pointer) + return ok + } + return false +} + +// emitConv emits to f code to convert Value val to exactly type typ, +// and returns the converted value. Implicit conversions are required +// by language assignability rules in assignments, parameter passing, +// etc. +// +func emitConv(f *Function, val Value, typ types.Type, source ast.Node) Value { + t_src := val.Type() + + // Identical types? Conversion is a no-op. + if types.Identical(t_src, typ) { + return val + } + + ut_dst := typ.Underlying() + ut_src := t_src.Underlying() + + // Just a change of type, but not value or representation? + if isValuePreserving(ut_src, ut_dst) { + c := &ChangeType{X: val} + c.setType(typ) + return f.emit(c, source) + } + + // Conversion to, or construction of a value of, an interface type? + if _, ok := ut_dst.(*types.Interface); ok { + // Assignment from one interface type to another? + if _, ok := ut_src.(*types.Interface); ok { + c := &ChangeInterface{X: val} + c.setType(typ) + return f.emit(c, source) + } + + // Untyped nil constant? Return interface-typed nil constant. + if ut_src == tUntypedNil { + return emitConst(f, nilConst(typ)) + } + + // Convert (non-nil) "untyped" literals to their default type. + if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 { + val = emitConv(f, val, types.Default(ut_src), source) + } + + f.Pkg.Prog.needMethodsOf(val.Type()) + mi := &MakeInterface{X: val} + mi.setType(typ) + return f.emit(mi, source) + } + + // Conversion of a compile-time constant value? + if c, ok := val.(*Const); ok { + if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() { + // Conversion of a compile-time constant to + // another constant type results in a new + // constant of the destination type and + // (initially) the same abstract value. + // We don't truncate the value yet. + return emitConst(f, NewConst(c.Value, typ)) + } + + // We're converting from constant to non-constant type, + // e.g. string -> []byte/[]rune. + } + + // Conversion from slice to array pointer? + if slice, ok := ut_src.(*types.Slice); ok { + if ptr, ok := ut_dst.(*types.Pointer); ok { + if arr, ok := ptr.Elem().(*types.Array); ok && types.Identical(slice.Elem(), arr.Elem()) { + c := &Convert{X: val} + c.setType(ut_dst) + return f.emit(c, source) + } + } + } + + // A representation-changing conversion? + // At least one of {ut_src,ut_dst} must be *Basic. + // (The other may be []byte or []rune.) + _, ok1 := ut_src.(*types.Basic) + _, ok2 := ut_dst.(*types.Basic) + if ok1 || ok2 { + c := &Convert{X: val} + c.setType(typ) + return f.emit(c, source) + } + + panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ)) +} + +// emitStore emits to f an instruction to store value val at location +// addr, applying implicit conversions as required by assignability rules. +// +func emitStore(f *Function, addr, val Value, source ast.Node) *Store { + s := &Store{ + Addr: addr, + Val: emitConv(f, val, deref(addr.Type()), source), + } + // make sure we call getMem after the call to emitConv, which may + // itself update the memory state + f.emit(s, source) + return s +} + +// emitJump emits to f a jump to target, and updates the control-flow graph. +// Postcondition: f.currentBlock is nil. +// +func emitJump(f *Function, target *BasicBlock, source ast.Node) *Jump { + b := f.currentBlock + j := new(Jump) + b.emit(j, source) + addEdge(b, target) + f.currentBlock = nil + return j +} + +// emitIf emits to f a conditional jump to tblock or fblock based on +// cond, and updates the control-flow graph. +// Postcondition: f.currentBlock is nil. +// +func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock, source ast.Node) *If { + b := f.currentBlock + stmt := &If{Cond: cond} + b.emit(stmt, source) + addEdge(b, tblock) + addEdge(b, fblock) + f.currentBlock = nil + return stmt +} + +// emitExtract emits to f an instruction to extract the index'th +// component of tuple. It returns the extracted value. +// +func emitExtract(f *Function, tuple Value, index int, source ast.Node) Value { + e := &Extract{Tuple: tuple, Index: index} + e.setType(tuple.Type().(*types.Tuple).At(index).Type()) + return f.emit(e, source) +} + +// emitTypeAssert emits to f a type assertion value := x.(t) and +// returns the value. x.Type() must be an interface. +// +func emitTypeAssert(f *Function, x Value, t types.Type, source ast.Node) Value { + a := &TypeAssert{X: x, AssertedType: t} + a.setType(t) + return f.emit(a, source) +} + +// emitTypeTest emits to f a type test value,ok := x.(t) and returns +// a (value, ok) tuple. x.Type() must be an interface. +// +func emitTypeTest(f *Function, x Value, t types.Type, source ast.Node) Value { + a := &TypeAssert{ + X: x, + AssertedType: t, + CommaOk: true, + } + a.setType(types.NewTuple( + newVar("value", t), + varOk, + )) + return f.emit(a, source) +} + +// emitTailCall emits to f a function call in tail position. The +// caller is responsible for all fields of 'call' except its type. +// Intended for wrapper methods. +// Precondition: f does/will not use deferred procedure calls. +// Postcondition: f.currentBlock is nil. +// +func emitTailCall(f *Function, call *Call, source ast.Node) { + tresults := f.Signature.Results() + nr := tresults.Len() + if nr == 1 { + call.typ = tresults.At(0).Type() + } else { + call.typ = tresults + } + tuple := f.emit(call, source) + var ret Return + switch nr { + case 0: + // no-op + case 1: + ret.Results = []Value{tuple} + default: + for i := 0; i < nr; i++ { + v := emitExtract(f, tuple, i, source) + // TODO(adonovan): in principle, this is required: + // v = emitConv(f, o.Type, f.Signature.Results[i].Type) + // but in practice emitTailCall is only used when + // the types exactly match. + ret.Results = append(ret.Results, v) + } + } + + f.Exit = f.newBasicBlock("exit") + emitJump(f, f.Exit, source) + f.currentBlock = f.Exit + f.emit(&ret, source) + f.currentBlock = nil +} + +// emitImplicitSelections emits to f code to apply the sequence of +// implicit field selections specified by indices to base value v, and +// returns the selected value. +// +// If v is the address of a struct, the result will be the address of +// a field; if it is the value of a struct, the result will be the +// value of a field. +// +func emitImplicitSelections(f *Function, v Value, indices []int, source ast.Node) Value { + for _, index := range indices { + fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) + + if isPointer(v.Type()) { + instr := &FieldAddr{ + X: v, + Field: index, + } + instr.setType(types.NewPointer(fld.Type())) + v = f.emit(instr, source) + // Load the field's value iff indirectly embedded. + if isPointer(fld.Type()) { + v = emitLoad(f, v, source) + } + } else { + instr := &Field{ + X: v, + Field: index, + } + instr.setType(fld.Type()) + v = f.emit(instr, source) + } + } + return v +} + +// emitFieldSelection emits to f code to select the index'th field of v. +// +// If wantAddr, the input must be a pointer-to-struct and the result +// will be the field's address; otherwise the result will be the +// field's value. +// Ident id is used for position and debug info. +// +func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { + fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) + if isPointer(v.Type()) { + instr := &FieldAddr{ + X: v, + Field: index, + } + instr.setSource(id) + instr.setType(types.NewPointer(fld.Type())) + v = f.emit(instr, id) + // Load the field's value iff we don't want its address. + if !wantAddr { + v = emitLoad(f, v, id) + } + } else { + instr := &Field{ + X: v, + Field: index, + } + instr.setSource(id) + instr.setType(fld.Type()) + v = f.emit(instr, id) + } + emitDebugRef(f, id, v, wantAddr) + return v +} + +// zeroValue emits to f code to produce a zero value of type t, +// and returns it. +// +func zeroValue(f *Function, t types.Type, source ast.Node) Value { + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return emitLoad(f, f.addLocal(t, source), source) + default: + return emitConst(f, zeroConst(t)) + } +} + +func emitConst(f *Function, c *Const) *Const { + f.consts = append(f.consts, c) + return c +} diff --git a/vendor/honnef.co/go/tools/go/ir/exits.go b/vendor/honnef.co/go/tools/go/ir/exits.go new file mode 100644 index 000000000..0abf58089 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/exits.go @@ -0,0 +1,317 @@ +package ir + +import ( + "go/types" +) + +func (b *builder) buildExits(fn *Function) { + if obj := fn.Object(); obj != nil { + switch obj.Pkg().Path() { + case "runtime": + switch obj.Name() { + case "exit": + fn.NoReturn = AlwaysExits + return + case "throw": + fn.NoReturn = AlwaysExits + return + case "Goexit": + fn.NoReturn = AlwaysUnwinds + return + } + case "github.com/sirupsen/logrus": + switch obj.(*types.Func).FullName() { + case "(*github.com/sirupsen/logrus.Logger).Exit": + // Technically, this method does not unconditionally exit + // the process. It dynamically calls a function stored in + // the logger. If the function is nil, it defaults to + // os.Exit. + // + // The main intent of this method is to terminate the + // process, and that's what the vast majority of people + // will use it for. We'll happily accept some false + // negatives to avoid a lot of false positives. + fn.NoReturn = AlwaysExits + return + case "(*github.com/sirupsen/logrus.Logger).Panic", + "(*github.com/sirupsen/logrus.Logger).Panicf", + "(*github.com/sirupsen/logrus.Logger).Panicln": + + // These methods will always panic, but that's not + // statically known from the code alone, because they + // take a detour through the generic Log methods. + fn.NoReturn = AlwaysUnwinds + return + case "(*github.com/sirupsen/logrus.Entry).Panicf", + "(*github.com/sirupsen/logrus.Entry).Panicln": + + // Entry.Panic has an explicit panic, but Panicf and + // Panicln do not, relying fully on the generic Log + // method. + fn.NoReturn = AlwaysUnwinds + return + case "(*github.com/sirupsen/logrus.Logger).Log", + "(*github.com/sirupsen/logrus.Logger).Logf", + "(*github.com/sirupsen/logrus.Logger).Logln": + // TODO(dh): we cannot handle these cases. Whether they + // exit or unwind depends on the level, which is set + // via the first argument. We don't currently support + // call-site-specific exit information. + } + case "github.com/golang/glog": + switch obj.(*types.Func).FullName() { + case "github.com/golang/glog.Exit", + "github.com/golang/glog.ExitDepth", + "github.com/golang/glog.Exitf", + "github.com/golang/glog.Exitln", + "github.com/golang/glog.Fatal", + "github.com/golang/glog.FatalDepth", + "github.com/golang/glog.Fatalf", + "github.com/golang/glog.Fatalln": + // all of these call os.Exit after logging + fn.NoReturn = AlwaysExits + } + } + } + + isRecoverCall := func(instr Instruction) bool { + if instr, ok := instr.(*Call); ok { + if builtin, ok := instr.Call.Value.(*Builtin); ok { + if builtin.Name() == "recover" { + return true + } + } + } + return false + } + + both := NewBlockSet(len(fn.Blocks)) + exits := NewBlockSet(len(fn.Blocks)) + unwinds := NewBlockSet(len(fn.Blocks)) + recovers := false + for _, u := range fn.Blocks { + for _, instr := range u.Instrs { + instrSwitch: + switch instr := instr.(type) { + case *Defer: + if recovers { + // avoid doing extra work, we already know that this function calls recover + continue + } + call := instr.Call.StaticCallee() + if call == nil { + // not a static call, so we can't be sure the + // deferred call isn't calling recover + recovers = true + break + } + if call.Package() == fn.Package() { + b.buildFunction(call) + } + if len(call.Blocks) == 0 { + // external function, we don't know what's + // happening inside it + // + // TODO(dh): this includes functions from + // imported packages, due to how go/analysis + // works. We could introduce another fact, + // like we've done for exiting and unwinding. + recovers = true + break + } + for _, y := range call.Blocks { + for _, instr2 := range y.Instrs { + if isRecoverCall(instr2) { + recovers = true + break instrSwitch + } + } + } + + case *Panic: + both.Add(u) + unwinds.Add(u) + + case CallInstruction: + switch instr.(type) { + case *Defer, *Call: + default: + continue + } + if instr.Common().IsInvoke() { + // give up + return + } + var call *Function + switch instr.Common().Value.(type) { + case *Function, *MakeClosure: + call = instr.Common().StaticCallee() + case *Builtin: + // the only builtins that affect control flow are + // panic and recover, and we've already handled + // those + continue + default: + // dynamic dispatch + return + } + // buildFunction is idempotent. if we're part of a + // (mutually) recursive call chain, then buildFunction + // will immediately return, and fn.WillExit will be false. + if call.Package() == fn.Package() { + b.buildFunction(call) + } + switch call.NoReturn { + case AlwaysExits: + both.Add(u) + exits.Add(u) + case AlwaysUnwinds: + both.Add(u) + unwinds.Add(u) + case NeverReturns: + both.Add(u) + } + } + } + } + + // depth-first search trying to find a path to the exit block that + // doesn't cross any of the blacklisted blocks + seen := NewBlockSet(len(fn.Blocks)) + var findPath func(root *BasicBlock, bl *BlockSet) bool + findPath = func(root *BasicBlock, bl *BlockSet) bool { + if root == fn.Exit { + return true + } + if seen.Has(root) { + return false + } + if bl.Has(root) { + return false + } + seen.Add(root) + for _, succ := range root.Succs { + if findPath(succ, bl) { + return true + } + } + return false + } + findPathEntry := func(root *BasicBlock, bl *BlockSet) bool { + if bl.Num() == 0 { + return true + } + seen.Clear() + return findPath(root, bl) + } + + if !findPathEntry(fn.Blocks[0], exits) { + fn.NoReturn = AlwaysExits + } else if !recovers { + // Only consider unwinding and "never returns" if we don't + // call recover. If we do call recover, then panics don't + // bubble up the stack. + + // TODO(dh): the position of the defer matters. If we + // unconditionally terminate before we defer a recover, then + // the recover is ineffective. + + if !findPathEntry(fn.Blocks[0], unwinds) { + fn.NoReturn = AlwaysUnwinds + } else if !findPathEntry(fn.Blocks[0], both) { + fn.NoReturn = NeverReturns + } + } +} + +func (b *builder) addUnreachables(fn *Function) { + var unreachable *BasicBlock + + for _, bb := range fn.Blocks { + instrLoop: + for i, instr := range bb.Instrs { + if instr, ok := instr.(*Call); ok { + var call *Function + switch v := instr.Common().Value.(type) { + case *Function: + call = v + case *MakeClosure: + call = v.Fn.(*Function) + } + if call == nil { + continue + } + if call.Package() == fn.Package() { + // make sure we have information on all functions in this package + b.buildFunction(call) + } + switch call.NoReturn { + case AlwaysExits: + // This call will cause the process to terminate. + // Remove remaining instructions in the block and + // replace any control flow with Unreachable. + for _, succ := range bb.Succs { + succ.removePred(bb) + } + bb.Succs = bb.Succs[:0] + + bb.Instrs = bb.Instrs[:i+1] + bb.emit(new(Unreachable), instr.Source()) + addEdge(bb, fn.Exit) + break instrLoop + + case AlwaysUnwinds: + // This call will cause the goroutine to terminate + // and defers to run (i.e. a panic or + // runtime.Goexit). Remove remaining instructions + // in the block and replace any control flow with + // an unconditional jump to the exit block. + for _, succ := range bb.Succs { + succ.removePred(bb) + } + bb.Succs = bb.Succs[:0] + + bb.Instrs = bb.Instrs[:i+1] + bb.emit(new(Jump), instr.Source()) + addEdge(bb, fn.Exit) + break instrLoop + + case NeverReturns: + // This call will either cause the goroutine to + // terminate, or the process to terminate. Remove + // remaining instructions in the block and replace + // any control flow with a conditional jump to + // either the exit block, or Unreachable. + for _, succ := range bb.Succs { + succ.removePred(bb) + } + bb.Succs = bb.Succs[:0] + + bb.Instrs = bb.Instrs[:i+1] + var c Call + c.Call.Value = &Builtin{ + name: "ir:noreturnWasPanic", + sig: types.NewSignature(nil, + types.NewTuple(), + types.NewTuple(anonVar(types.Typ[types.Bool])), + false, + ), + } + c.setType(types.Typ[types.Bool]) + + if unreachable == nil { + unreachable = fn.newBasicBlock("unreachable") + unreachable.emit(&Unreachable{}, nil) + addEdge(unreachable, fn.Exit) + } + + bb.emit(&c, instr.Source()) + bb.emit(&If{Cond: &c}, instr.Source()) + addEdge(bb, fn.Exit) + addEdge(bb, unreachable) + break instrLoop + } + } + } + } +} diff --git a/vendor/honnef.co/go/tools/go/ir/func.go b/vendor/honnef.co/go/tools/go/ir/func.go new file mode 100644 index 000000000..69e381cd6 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/func.go @@ -0,0 +1,983 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file implements the Function and BasicBlock types. + +import ( + "bytes" + "fmt" + "go/ast" + "go/constant" + "go/format" + "go/token" + "go/types" + "io" + "os" + "strings" +) + +// addEdge adds a control-flow graph edge from from to to. +func addEdge(from, to *BasicBlock) { + from.Succs = append(from.Succs, to) + to.Preds = append(to.Preds, from) +} + +// Control returns the last instruction in the block. +func (b *BasicBlock) Control() Instruction { + if len(b.Instrs) == 0 { + return nil + } + return b.Instrs[len(b.Instrs)-1] +} + +// SigmaFor returns the sigma node for v coming from pred. +func (b *BasicBlock) SigmaFor(v Value, pred *BasicBlock) *Sigma { + for _, instr := range b.Instrs { + sigma, ok := instr.(*Sigma) + if !ok { + // no more sigmas + return nil + } + if sigma.From == pred && sigma.X == v { + return sigma + } + } + return nil +} + +// Parent returns the function that contains block b. +func (b *BasicBlock) Parent() *Function { return b.parent } + +// String returns a human-readable label of this block. +// It is not guaranteed unique within the function. +// +func (b *BasicBlock) String() string { + return fmt.Sprintf("%d", b.Index) +} + +// emit appends an instruction to the current basic block. +// If the instruction defines a Value, it is returned. +// +func (b *BasicBlock) emit(i Instruction, source ast.Node) Value { + i.setSource(source) + i.setBlock(b) + b.Instrs = append(b.Instrs, i) + v, _ := i.(Value) + return v +} + +// predIndex returns the i such that b.Preds[i] == c or panics if +// there is none. +func (b *BasicBlock) predIndex(c *BasicBlock) int { + for i, pred := range b.Preds { + if pred == c { + return i + } + } + panic(fmt.Sprintf("no edge %s -> %s", c, b)) +} + +// succIndex returns the i such that b.Succs[i] == c or -1 if there is none. +func (b *BasicBlock) succIndex(c *BasicBlock) int { + for i, succ := range b.Succs { + if succ == c { + return i + } + } + return -1 +} + +// hasPhi returns true if b.Instrs contains φ-nodes. +func (b *BasicBlock) hasPhi() bool { + _, ok := b.Instrs[0].(*Phi) + return ok +} + +func (b *BasicBlock) Phis() []Instruction { + return b.phis() +} + +// phis returns the prefix of b.Instrs containing all the block's φ-nodes. +func (b *BasicBlock) phis() []Instruction { + for i, instr := range b.Instrs { + if _, ok := instr.(*Phi); !ok { + return b.Instrs[:i] + } + } + return nil // unreachable in well-formed blocks +} + +// replacePred replaces all occurrences of p in b's predecessor list with q. +// Ordinarily there should be at most one. +// +func (b *BasicBlock) replacePred(p, q *BasicBlock) { + for i, pred := range b.Preds { + if pred == p { + b.Preds[i] = q + } + } +} + +// replaceSucc replaces all occurrences of p in b's successor list with q. +// Ordinarily there should be at most one. +// +func (b *BasicBlock) replaceSucc(p, q *BasicBlock) { + for i, succ := range b.Succs { + if succ == p { + b.Succs[i] = q + } + } +} + +// removePred removes all occurrences of p in b's +// predecessor list and φ-nodes. +// Ordinarily there should be at most one. +// +func (b *BasicBlock) removePred(p *BasicBlock) { + phis := b.phis() + + // We must preserve edge order for φ-nodes. + j := 0 + for i, pred := range b.Preds { + if pred != p { + b.Preds[j] = b.Preds[i] + // Strike out φ-edge too. + for _, instr := range phis { + phi := instr.(*Phi) + phi.Edges[j] = phi.Edges[i] + } + j++ + } + } + // Nil out b.Preds[j:] and φ-edges[j:] to aid GC. + for i := j; i < len(b.Preds); i++ { + b.Preds[i] = nil + for _, instr := range phis { + instr.(*Phi).Edges[i] = nil + } + } + b.Preds = b.Preds[:j] + for _, instr := range phis { + phi := instr.(*Phi) + phi.Edges = phi.Edges[:j] + } +} + +// Destinations associated with unlabelled for/switch/select stmts. +// We push/pop one of these as we enter/leave each construct and for +// each BranchStmt we scan for the innermost target of the right type. +// +type targets struct { + tail *targets // rest of stack + _break *BasicBlock + _continue *BasicBlock + _fallthrough *BasicBlock +} + +// Destinations associated with a labelled block. +// We populate these as labels are encountered in forward gotos or +// labelled statements. +// +type lblock struct { + _goto *BasicBlock + _break *BasicBlock + _continue *BasicBlock +} + +// labelledBlock returns the branch target associated with the +// specified label, creating it if needed. +// +func (f *Function) labelledBlock(label *ast.Ident) *lblock { + lb := f.lblocks[label.Obj] + if lb == nil { + lb = &lblock{_goto: f.newBasicBlock(label.Name)} + if f.lblocks == nil { + f.lblocks = make(map[*ast.Object]*lblock) + } + f.lblocks[label.Obj] = lb + } + return lb +} + +// addParam adds a (non-escaping) parameter to f.Params of the +// specified name, type and source position. +// +func (f *Function) addParam(name string, typ types.Type, source ast.Node) *Parameter { + var b *BasicBlock + if len(f.Blocks) > 0 { + b = f.Blocks[0] + } + v := &Parameter{ + name: name, + } + v.setBlock(b) + v.setType(typ) + v.setSource(source) + f.Params = append(f.Params, v) + if b != nil { + // There may be no blocks if this function has no body. We + // still create params, but aren't interested in the + // instruction. + f.Blocks[0].Instrs = append(f.Blocks[0].Instrs, v) + } + return v +} + +func (f *Function) addParamObj(obj types.Object, source ast.Node) *Parameter { + name := obj.Name() + if name == "" { + name = fmt.Sprintf("arg%d", len(f.Params)) + } + param := f.addParam(name, obj.Type(), source) + param.object = obj + return param +} + +// addSpilledParam declares a parameter that is pre-spilled to the +// stack; the function body will load/store the spilled location. +// Subsequent lifting will eliminate spills where possible. +// +func (f *Function) addSpilledParam(obj types.Object, source ast.Node) { + param := f.addParamObj(obj, source) + spill := &Alloc{} + spill.setType(types.NewPointer(obj.Type())) + spill.source = source + f.objects[obj] = spill + f.Locals = append(f.Locals, spill) + f.emit(spill, source) + emitStore(f, spill, param, source) + // f.emit(&Store{Addr: spill, Val: param}) +} + +// startBody initializes the function prior to generating IR code for its body. +// Precondition: f.Type() already set. +// +func (f *Function) startBody() { + entry := f.newBasicBlock("entry") + f.currentBlock = entry + f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init +} + +func (f *Function) blockset(i int) *BlockSet { + bs := &f.blocksets[i] + if len(bs.values) != len(f.Blocks) { + if cap(bs.values) >= len(f.Blocks) { + bs.values = bs.values[:len(f.Blocks)] + bs.Clear() + } else { + bs.values = make([]bool, len(f.Blocks)) + } + } else { + bs.Clear() + } + return bs +} + +func (f *Function) exitBlock() { + old := f.currentBlock + + f.Exit = f.newBasicBlock("exit") + f.currentBlock = f.Exit + + ret := f.results() + results := make([]Value, len(ret)) + // Run function calls deferred in this + // function when explicitly returning from it. + f.emit(new(RunDefers), nil) + for i, r := range ret { + results[i] = emitLoad(f, r, nil) + } + + f.emit(&Return{Results: results}, nil) + f.currentBlock = old +} + +// createSyntacticParams populates f.Params and generates code (spills +// and named result locals) for all the parameters declared in the +// syntax. In addition it populates the f.objects mapping. +// +// Preconditions: +// f.startBody() was called. +// Postcondition: +// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0) +// +func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) { + // Receiver (at most one inner iteration). + if recv != nil { + for _, field := range recv.List { + for _, n := range field.Names { + f.addSpilledParam(f.Pkg.info.Defs[n], n) + } + // Anonymous receiver? No need to spill. + if field.Names == nil { + f.addParamObj(f.Signature.Recv(), field) + } + } + } + + // Parameters. + if functype.Params != nil { + n := len(f.Params) // 1 if has recv, 0 otherwise + for _, field := range functype.Params.List { + for _, n := range field.Names { + f.addSpilledParam(f.Pkg.info.Defs[n], n) + } + // Anonymous parameter? No need to spill. + if field.Names == nil { + f.addParamObj(f.Signature.Params().At(len(f.Params)-n), field) + } + } + } + + // Named results. + if functype.Results != nil { + for _, field := range functype.Results.List { + // Implicit "var" decl of locals for named results. + for _, n := range field.Names { + f.namedResults = append(f.namedResults, f.addLocalForIdent(n)) + } + } + + if len(f.namedResults) == 0 { + sig := f.Signature.Results() + for i := 0; i < sig.Len(); i++ { + // XXX position information + v := f.addLocal(sig.At(i).Type(), nil) + f.implicitResults = append(f.implicitResults, v) + } + } + } +} + +func numberNodes(f *Function) { + var base ID + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + if instr == nil { + continue + } + base++ + instr.setID(base) + } + } +} + +// buildReferrers populates the def/use information in all non-nil +// Value.Referrers slice. +// Precondition: all such slices are initially empty. +func buildReferrers(f *Function) { + var rands []*Value + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + rands = instr.Operands(rands[:0]) // recycle storage + for _, rand := range rands { + if r := *rand; r != nil { + if ref := r.Referrers(); ref != nil { + if len(*ref) == 0 { + // per median, each value has two referrers, so we can avoid one call into growslice + // + // Note: we experimented with allocating + // sequential scratch space, but we + // couldn't find a value that gave better + // performance than making many individual + // allocations + *ref = make([]Instruction, 1, 2) + (*ref)[0] = instr + } else { + *ref = append(*ref, instr) + } + } + } + } + } + } +} + +func (f *Function) emitConsts() { + if len(f.Blocks) == 0 { + f.consts = nil + return + } + + // TODO(dh): our deduplication only works on booleans and + // integers. other constants are represented as pointers to + // things. + if len(f.consts) == 0 { + return + } else if len(f.consts) <= 32 { + f.emitConstsFew() + } else { + f.emitConstsMany() + } +} + +func (f *Function) emitConstsFew() { + dedup := make([]*Const, 0, 32) + for _, c := range f.consts { + if len(*c.Referrers()) == 0 { + continue + } + found := false + for _, d := range dedup { + if c.typ == d.typ && c.Value == d.Value { + replaceAll(c, d) + found = true + break + } + } + if !found { + dedup = append(dedup, c) + } + } + + instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(dedup)) + for i, c := range dedup { + instrs[i] = c + c.setBlock(f.Blocks[0]) + } + copy(instrs[len(dedup):], f.Blocks[0].Instrs) + f.Blocks[0].Instrs = instrs + f.consts = nil +} + +func (f *Function) emitConstsMany() { + type constKey struct { + typ types.Type + value constant.Value + } + + m := make(map[constKey]Value, len(f.consts)) + areNil := 0 + for i, c := range f.consts { + if len(*c.Referrers()) == 0 { + f.consts[i] = nil + areNil++ + continue + } + + k := constKey{ + typ: c.typ, + value: c.Value, + } + if dup, ok := m[k]; !ok { + m[k] = c + } else { + f.consts[i] = nil + areNil++ + replaceAll(c, dup) + } + } + + instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(f.consts)-areNil) + i := 0 + for _, c := range f.consts { + if c != nil { + instrs[i] = c + c.setBlock(f.Blocks[0]) + i++ + } + } + copy(instrs[i:], f.Blocks[0].Instrs) + f.Blocks[0].Instrs = instrs + f.consts = nil +} + +// buildFakeExits ensures that every block in the function is +// reachable in reverse from the Exit block. This is required to build +// a full post-dominator tree, and to ensure the exit block's +// inclusion in the dominator tree. +func buildFakeExits(fn *Function) { + // Find back-edges via forward DFS + fn.fakeExits = BlockSet{values: make([]bool, len(fn.Blocks))} + seen := fn.blockset(0) + backEdges := fn.blockset(1) + + var dfs func(b *BasicBlock) + dfs = func(b *BasicBlock) { + if !seen.Add(b) { + backEdges.Add(b) + return + } + for _, pred := range b.Succs { + dfs(pred) + } + } + dfs(fn.Blocks[0]) +buildLoop: + for { + seen := fn.blockset(2) + var dfs func(b *BasicBlock) + dfs = func(b *BasicBlock) { + if !seen.Add(b) { + return + } + for _, pred := range b.Preds { + dfs(pred) + } + if b == fn.Exit { + for _, b := range fn.Blocks { + if fn.fakeExits.Has(b) { + dfs(b) + } + } + } + } + dfs(fn.Exit) + + for _, b := range fn.Blocks { + if !seen.Has(b) && backEdges.Has(b) { + // Block b is not reachable from the exit block. Add a + // fake jump from b to exit, then try again. Note that we + // only add one fake edge at a time, as it may make + // multiple blocks reachable. + // + // We only consider those blocks that have back edges. + // Any unreachable block that doesn't have a back edge + // must flow into a loop, which by definition has a + // back edge. Thus, by looking for loops, we should + // need fewer fake edges overall. + fn.fakeExits.Add(b) + continue buildLoop + } + } + + break + } +} + +// finishBody() finalizes the function after IR code generation of its body. +func (f *Function) finishBody() { + f.objects = nil + f.currentBlock = nil + f.lblocks = nil + + // Remove from f.Locals any Allocs that escape to the heap. + j := 0 + for _, l := range f.Locals { + if !l.Heap { + f.Locals[j] = l + j++ + } + } + // Nil out f.Locals[j:] to aid GC. + for i := j; i < len(f.Locals); i++ { + f.Locals[i] = nil + } + f.Locals = f.Locals[:j] + + optimizeBlocks(f) + buildFakeExits(f) + buildReferrers(f) + buildDomTree(f) + buildPostDomTree(f) + + if f.Prog.mode&NaiveForm == 0 { + lift(f) + } + + // emit constants after lifting, because lifting may produce new constants. + f.emitConsts() + + f.namedResults = nil // (used by lifting) + f.implicitResults = nil + + numberNodes(f) + + defer f.wr.Close() + f.wr.WriteFunc("start", "start", f) + + if f.Prog.mode&PrintFunctions != 0 { + printMu.Lock() + f.WriteTo(os.Stdout) + printMu.Unlock() + } + + if f.Prog.mode&SanityCheckFunctions != 0 { + mustSanityCheck(f, nil) + } +} + +func isUselessPhi(phi *Phi) (Value, bool) { + var v0 Value + for _, e := range phi.Edges { + if e == phi { + continue + } + if v0 == nil { + v0 = e + } + if v0 != e { + if v0, ok := v0.(*Const); ok { + if e, ok := e.(*Const); ok { + if v0.typ == e.typ && v0.Value == e.Value { + continue + } + } + } + return nil, false + } + } + return v0, true +} + +func (f *Function) RemoveNilBlocks() { + f.removeNilBlocks() +} + +// removeNilBlocks eliminates nils from f.Blocks and updates each +// BasicBlock.Index. Use this after any pass that may delete blocks. +// +func (f *Function) removeNilBlocks() { + j := 0 + for _, b := range f.Blocks { + if b != nil { + b.Index = j + f.Blocks[j] = b + j++ + } + } + // Nil out f.Blocks[j:] to aid GC. + for i := j; i < len(f.Blocks); i++ { + f.Blocks[i] = nil + } + f.Blocks = f.Blocks[:j] +} + +// SetDebugMode sets the debug mode for package pkg. If true, all its +// functions will include full debug info. This greatly increases the +// size of the instruction stream, and causes Functions to depend upon +// the ASTs, potentially keeping them live in memory for longer. +// +func (pkg *Package) SetDebugMode(debug bool) { + // TODO(adonovan): do we want ast.File granularity? + pkg.debug = debug +} + +// debugInfo reports whether debug info is wanted for this function. +func (f *Function) debugInfo() bool { + return f.Pkg != nil && f.Pkg.debug +} + +// addNamedLocal creates a local variable, adds it to function f and +// returns it. Its name and type are taken from obj. Subsequent +// calls to f.lookup(obj) will return the same local. +// +func (f *Function) addNamedLocal(obj types.Object, source ast.Node) *Alloc { + l := f.addLocal(obj.Type(), source) + f.objects[obj] = l + return l +} + +func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc { + return f.addNamedLocal(f.Pkg.info.Defs[id], id) +} + +// addLocal creates an anonymous local variable of type typ, adds it +// to function f and returns it. pos is the optional source location. +// +func (f *Function) addLocal(typ types.Type, source ast.Node) *Alloc { + v := &Alloc{} + v.setType(types.NewPointer(typ)) + f.Locals = append(f.Locals, v) + f.emit(v, source) + return v +} + +// lookup returns the address of the named variable identified by obj +// that is local to function f or one of its enclosing functions. +// If escaping, the reference comes from a potentially escaping pointer +// expression and the referent must be heap-allocated. +// +func (f *Function) lookup(obj types.Object, escaping bool) Value { + if v, ok := f.objects[obj]; ok { + if alloc, ok := v.(*Alloc); ok && escaping { + alloc.Heap = true + } + return v // function-local var (address) + } + + // Definition must be in an enclosing function; + // plumb it through intervening closures. + if f.parent == nil { + panic("no ir.Value for " + obj.String()) + } + outer := f.parent.lookup(obj, true) // escaping + v := &FreeVar{ + name: obj.Name(), + typ: outer.Type(), + outer: outer, + parent: f, + } + f.objects[obj] = v + f.FreeVars = append(f.FreeVars, v) + return v +} + +// emit emits the specified instruction to function f. +func (f *Function) emit(instr Instruction, source ast.Node) Value { + return f.currentBlock.emit(instr, source) +} + +// RelString returns the full name of this function, qualified by +// package name, receiver type, etc. +// +// The specific formatting rules are not guaranteed and may change. +// +// Examples: +// "math.IsNaN" // a package-level function +// "(*bytes.Buffer).Bytes" // a declared method or a wrapper +// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0) +// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure) +// "main.main$1" // an anonymous function in main +// "main.init#1" // a declared init function +// "main.init" // the synthesized package initializer +// +// When these functions are referred to from within the same package +// (i.e. from == f.Pkg.Object), they are rendered without the package path. +// For example: "IsNaN", "(*Buffer).Bytes", etc. +// +// All non-synthetic functions have distinct package-qualified names. +// (But two methods may have the same name "(T).f" if one is a synthetic +// wrapper promoting a non-exported method "f" from another package; in +// that case, the strings are equal but the identifiers "f" are distinct.) +// +func (f *Function) RelString(from *types.Package) string { + // Anonymous? + if f.parent != nil { + // An anonymous function's Name() looks like "parentName$1", + // but its String() should include the type/package/etc. + parent := f.parent.RelString(from) + for i, anon := range f.parent.AnonFuncs { + if anon == f { + return fmt.Sprintf("%s$%d", parent, 1+i) + } + } + + return f.name // should never happen + } + + // Method (declared or wrapper)? + if recv := f.Signature.Recv(); recv != nil { + return f.relMethod(from, recv.Type()) + } + + // Thunk? + if f.method != nil { + return f.relMethod(from, f.method.Recv()) + } + + // Bound? + if len(f.FreeVars) == 1 && strings.HasSuffix(f.name, "$bound") { + return f.relMethod(from, f.FreeVars[0].Type()) + } + + // Package-level function? + // Prefix with package name for cross-package references only. + if p := f.pkg(); p != nil && p != from { + return fmt.Sprintf("%s.%s", p.Path(), f.name) + } + + // Unknown. + return f.name +} + +func (f *Function) relMethod(from *types.Package, recv types.Type) string { + return fmt.Sprintf("(%s).%s", relType(recv, from), f.name) +} + +// writeSignature writes to buf the signature sig in declaration syntax. +func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) { + buf.WriteString("func ") + if recv := sig.Recv(); recv != nil { + buf.WriteString("(") + if n := params[0].Name(); n != "" { + buf.WriteString(n) + buf.WriteString(" ") + } + types.WriteType(buf, params[0].Type(), types.RelativeTo(from)) + buf.WriteString(") ") + } + buf.WriteString(name) + types.WriteSignature(buf, sig, types.RelativeTo(from)) +} + +func (f *Function) pkg() *types.Package { + if f.Pkg != nil { + return f.Pkg.Pkg + } + return nil +} + +var _ io.WriterTo = (*Function)(nil) // *Function implements io.Writer + +func (f *Function) WriteTo(w io.Writer) (int64, error) { + var buf bytes.Buffer + WriteFunction(&buf, f) + n, err := w.Write(buf.Bytes()) + return int64(n), err +} + +// WriteFunction writes to buf a human-readable "disassembly" of f. +func WriteFunction(buf *bytes.Buffer, f *Function) { + fmt.Fprintf(buf, "# Name: %s\n", f.String()) + if f.Pkg != nil { + fmt.Fprintf(buf, "# Package: %s\n", f.Pkg.Pkg.Path()) + } + if syn := f.Synthetic; syn != 0 { + fmt.Fprintln(buf, "# Synthetic:", syn) + } + if pos := f.Pos(); pos.IsValid() { + fmt.Fprintf(buf, "# Location: %s\n", f.Prog.Fset.Position(pos)) + } + + if f.parent != nil { + fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name()) + } + + from := f.pkg() + + if f.FreeVars != nil { + buf.WriteString("# Free variables:\n") + for i, fv := range f.FreeVars { + fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, fv.Name(), relType(fv.Type(), from)) + } + } + + if len(f.Locals) > 0 { + buf.WriteString("# Locals:\n") + for i, l := range f.Locals { + fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from)) + } + } + writeSignature(buf, from, f.Name(), f.Signature, f.Params) + buf.WriteString(":\n") + + if f.Blocks == nil { + buf.WriteString("\t(external)\n") + } + + for _, b := range f.Blocks { + if b == nil { + // Corrupt CFG. + fmt.Fprintf(buf, ".nil:\n") + continue + } + fmt.Fprintf(buf, "b%d:", b.Index) + if len(b.Preds) > 0 { + fmt.Fprint(buf, " ←") + for _, pred := range b.Preds { + fmt.Fprintf(buf, " b%d", pred.Index) + } + } + if b.Comment != "" { + fmt.Fprintf(buf, " # %s", b.Comment) + } + buf.WriteByte('\n') + + if false { // CFG debugging + fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs) + } + + buf2 := &bytes.Buffer{} + for _, instr := range b.Instrs { + buf.WriteString("\t") + switch v := instr.(type) { + case Value: + // Left-align the instruction. + if name := v.Name(); name != "" { + fmt.Fprintf(buf, "%s = ", name) + } + buf.WriteString(instr.String()) + case nil: + // Be robust against bad transforms. + buf.WriteString("") + default: + buf.WriteString(instr.String()) + } + buf.WriteString("\n") + + if f.Prog.mode&PrintSource != 0 { + if s := instr.Source(); s != nil { + buf2.Reset() + format.Node(buf2, f.Prog.Fset, s) + for { + line, err := buf2.ReadString('\n') + if len(line) == 0 { + break + } + buf.WriteString("\t\t> ") + buf.WriteString(line) + if line[len(line)-1] != '\n' { + buf.WriteString("\n") + } + if err != nil { + break + } + } + } + } + } + buf.WriteString("\n") + } +} + +// newBasicBlock adds to f a new basic block and returns it. It does +// not automatically become the current block for subsequent calls to emit. +// comment is an optional string for more readable debugging output. +// +func (f *Function) newBasicBlock(comment string) *BasicBlock { + var instrs []Instruction + if len(f.functionBody.scratchInstructions) > 0 { + instrs = f.functionBody.scratchInstructions[0:0:avgInstructionsPerBlock] + f.functionBody.scratchInstructions = f.functionBody.scratchInstructions[avgInstructionsPerBlock:] + } else { + instrs = make([]Instruction, 0, avgInstructionsPerBlock) + } + + b := &BasicBlock{ + Index: len(f.Blocks), + Comment: comment, + parent: f, + Instrs: instrs, + } + b.Succs = b.succs2[:0] + f.Blocks = append(f.Blocks, b) + return b +} + +// NewFunction returns a new synthetic Function instance belonging to +// prog, with its name and signature fields set as specified. +// +// The caller is responsible for initializing the remaining fields of +// the function object, e.g. Pkg, Params, Blocks. +// +// It is practically impossible for clients to construct well-formed +// IR functions/packages/programs directly, so we assume this is the +// job of the Builder alone. NewFunction exists to provide clients a +// little flexibility. For example, analysis tools may wish to +// construct fake Functions for the root of the callgraph, a fake +// "reflect" package, etc. +// +// TODO(adonovan): think harder about the API here. +// +func (prog *Program) NewFunction(name string, sig *types.Signature, provenance Synthetic) *Function { + return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance} +} + +//lint:ignore U1000 we may make use of this for functions loaded from export data +type extentNode [2]token.Pos + +func (n extentNode) Pos() token.Pos { return n[0] } +func (n extentNode) End() token.Pos { return n[1] } + +func (f *Function) initHTML(name string) { + if name == "" { + return + } + if rel := f.RelString(nil); rel == name { + f.wr = NewHTMLWriter("ir.html", rel, "") + } +} diff --git a/vendor/honnef.co/go/tools/go/ir/html.go b/vendor/honnef.co/go/tools/go/ir/html.go new file mode 100644 index 000000000..35b421a70 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/html.go @@ -0,0 +1,1124 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2019 Dominik Honnef. All rights reserved. + +package ir + +import ( + "bytes" + "fmt" + "go/types" + "html" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "reflect" + "sort" + "strings" +) + +func live(f *Function) []bool { + max := 0 + var ops []*Value + + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + if int(instr.ID()) > max { + max = int(instr.ID()) + } + } + } + + out := make([]bool, max+1) + var q []Node + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + switch instr.(type) { + case *BlankStore, *Call, *ConstantSwitch, *Defer, *Go, *If, *Jump, *MapUpdate, *Next, *Panic, *Recv, *Return, *RunDefers, *Send, *Store, *Unreachable: + out[instr.ID()] = true + q = append(q, instr) + } + } + } + + for len(q) > 0 { + v := q[len(q)-1] + q = q[:len(q)-1] + for _, op := range v.Operands(ops) { + if *op == nil { + continue + } + if !out[(*op).ID()] { + out[(*op).ID()] = true + q = append(q, *op) + } + } + } + + return out +} + +type funcPrinter interface { + startBlock(b *BasicBlock, reachable bool) + endBlock(b *BasicBlock) + value(v Node, live bool) + startDepCycle() + endDepCycle() + named(n string, vals []Value) +} + +func namedValues(f *Function) map[types.Object][]Value { + names := map[types.Object][]Value{} + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + if instr, ok := instr.(*DebugRef); ok { + if obj := instr.object; obj != nil { + names[obj] = append(names[obj], instr.X) + } + } + } + } + // XXX deduplicate values + return names +} + +func fprintFunc(p funcPrinter, f *Function) { + // XXX does our IR form preserve unreachable blocks? + // reachable, live := findlive(f) + + l := live(f) + for _, b := range f.Blocks { + // XXX + // p.startBlock(b, reachable[b.Index]) + p.startBlock(b, true) + + end := len(b.Instrs) - 1 + if end < 0 { + end = 0 + } + for _, v := range b.Instrs[:end] { + if _, ok := v.(*DebugRef); !ok { + p.value(v, l[v.ID()]) + } + } + p.endBlock(b) + } + + names := namedValues(f) + keys := make([]types.Object, 0, len(names)) + for key := range names { + keys = append(keys, key) + } + sort.Slice(keys, func(i, j int) bool { + return keys[i].Pos() < keys[j].Pos() + }) + for _, key := range keys { + p.named(key.Name(), names[key]) + } +} + +func opName(v Node) string { + switch v := v.(type) { + case *Call: + if v.Common().IsInvoke() { + return "Invoke" + } + return "Call" + case *Alloc: + if v.Heap { + return "HeapAlloc" + } + return "StackAlloc" + case *Select: + if v.Blocking { + return "SelectBlocking" + } + return "SelectNonBlocking" + default: + return reflect.ValueOf(v).Type().Elem().Name() + } +} + +type HTMLWriter struct { + w io.WriteCloser + path string + dot *dotWriter +} + +func NewHTMLWriter(path string, funcname, cfgMask string) *HTMLWriter { + out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + log.Fatalf("%v", err) + } + pwd, err := os.Getwd() + if err != nil { + log.Fatalf("%v", err) + } + html := HTMLWriter{w: out, path: filepath.Join(pwd, path)} + html.dot = newDotWriter() + html.start(funcname) + return &html +} + +func (w *HTMLWriter) start(name string) { + if w == nil { + return + } + w.WriteString("") + w.WriteString(` + + + + + +`) + w.WriteString("") + w.WriteString("

") + w.WriteString(html.EscapeString(name)) + w.WriteString("

") + w.WriteString(` +help +
+ +

+Click on a value or block to toggle highlighting of that value/block +and its uses. (Values and blocks are highlighted by ID, and IDs of +dead items may be reused, so not all highlights necessarily correspond +to the clicked item.) +

+ +

+Faded out values and blocks are dead code that has not been eliminated. +

+ +

+Values printed in italics have a dependency cycle. +

+ +

+CFG: Dashed edge is for unlikely branches. Blue color is for backward edges. +Edge with a dot means that this edge follows the order in which blocks were laid out. +

+ +
+`) + w.WriteString("") + w.WriteString("") +} + +func (w *HTMLWriter) Close() { + if w == nil { + return + } + io.WriteString(w.w, "") + io.WriteString(w.w, "
") + io.WriteString(w.w, "") + io.WriteString(w.w, "") + w.w.Close() + fmt.Printf("dumped IR to %v\n", w.path) +} + +// WriteFunc writes f in a column headed by title. +// phase is used for collapsing columns and should be unique across the table. +func (w *HTMLWriter) WriteFunc(phase, title string, f *Function) { + if w == nil { + return + } + w.WriteColumn(phase, title, "", funcHTML(f, phase, w.dot)) +} + +// WriteColumn writes raw HTML in a column headed by title. +// It is intended for pre- and post-compilation log output. +func (w *HTMLWriter) WriteColumn(phase, title, class, html string) { + if w == nil { + return + } + id := strings.Replace(phase, " ", "-", -1) + // collapsed column + w.Printf("
%v
", id, phase) + + if class == "" { + w.Printf("", id) + } else { + w.Printf("", id, class) + } + w.WriteString("

" + title + "

") + w.WriteString(html) + w.WriteString("") +} + +func (w *HTMLWriter) Printf(msg string, v ...interface{}) { + if _, err := fmt.Fprintf(w.w, msg, v...); err != nil { + log.Fatalf("%v", err) + } +} + +func (w *HTMLWriter) WriteString(s string) { + if _, err := io.WriteString(w.w, s); err != nil { + log.Fatalf("%v", err) + } +} + +func valueHTML(v Node) string { + if v == nil { + return "<nil>" + } + // TODO: Using the value ID as the class ignores the fact + // that value IDs get recycled and that some values + // are transmuted into other values. + class := fmt.Sprintf("t%d", v.ID()) + var label string + switch v := v.(type) { + case *Function: + label = v.RelString(nil) + case *Builtin: + label = v.Name() + default: + label = class + } + return fmt.Sprintf("%s", class, label) +} + +func valueLongHTML(v Node) string { + // TODO: Any intra-value formatting? + // I'm wary of adding too much visual noise, + // but a little bit might be valuable. + // We already have visual noise in the form of punctuation + // maybe we could replace some of that with formatting. + s := fmt.Sprintf("", v.ID()) + + linenumber := "(?)" + if v.Pos().IsValid() { + line := v.Parent().Prog.Fset.Position(v.Pos()).Line + linenumber = fmt.Sprintf("(%d)", line, line) + } + + s += fmt.Sprintf("%s %s = %s", valueHTML(v), linenumber, opName(v)) + + if v, ok := v.(Value); ok { + s += " <" + html.EscapeString(v.Type().String()) + ">" + } + + switch v := v.(type) { + case *Parameter: + s += fmt.Sprintf(" {%s}", html.EscapeString(v.name)) + case *BinOp: + s += fmt.Sprintf(" {%s}", html.EscapeString(v.Op.String())) + case *UnOp: + s += fmt.Sprintf(" {%s}", html.EscapeString(v.Op.String())) + case *Extract: + name := v.Tuple.Type().(*types.Tuple).At(v.Index).Name() + s += fmt.Sprintf(" [%d] (%s)", v.Index, name) + case *Field: + st := v.X.Type().Underlying().(*types.Struct) + // Be robust against a bad index. + name := "?" + if 0 <= v.Field && v.Field < st.NumFields() { + name = st.Field(v.Field).Name() + } + s += fmt.Sprintf(" [%d] (%s)", v.Field, name) + case *FieldAddr: + st := deref(v.X.Type()).Underlying().(*types.Struct) + // Be robust against a bad index. + name := "?" + if 0 <= v.Field && v.Field < st.NumFields() { + name = st.Field(v.Field).Name() + } + + s += fmt.Sprintf(" [%d] (%s)", v.Field, name) + case *Recv: + s += fmt.Sprintf(" {%t}", v.CommaOk) + case *Call: + if v.Common().IsInvoke() { + s += fmt.Sprintf(" {%s}", html.EscapeString(v.Common().Method.FullName())) + } + case *Const: + if v.Value == nil { + s += " {<nil>}" + } else { + s += fmt.Sprintf(" {%s}", html.EscapeString(v.Value.String())) + } + case *Sigma: + s += fmt.Sprintf(" [#%s]", v.From) + } + for _, a := range v.Operands(nil) { + s += fmt.Sprintf(" %s", valueHTML(*a)) + } + + // OPT(dh): we're calling namedValues many times on the same function. + allNames := namedValues(v.Parent()) + var names []string + for name, values := range allNames { + for _, value := range values { + if v == value { + names = append(names, name.Name()) + break + } + } + } + if len(names) != 0 { + s += " (" + strings.Join(names, ", ") + ")" + } + + s += "" + return s +} + +func blockHTML(b *BasicBlock) string { + // TODO: Using the value ID as the class ignores the fact + // that value IDs get recycled and that some values + // are transmuted into other values. + s := html.EscapeString(b.String()) + return fmt.Sprintf("%s", s, s) +} + +func blockLongHTML(b *BasicBlock) string { + var kind string + var term Instruction + if len(b.Instrs) > 0 { + term = b.Control() + kind = opName(term) + } + // TODO: improve this for HTML? + s := fmt.Sprintf("%s", b.Index, kind) + + if term != nil { + ops := term.Operands(nil) + if len(ops) > 0 { + var ss []string + for _, op := range ops { + ss = append(ss, valueHTML(*op)) + } + s += " " + strings.Join(ss, ", ") + } + } + if len(b.Succs) > 0 { + s += " →" // right arrow + for _, c := range b.Succs { + s += " " + blockHTML(c) + } + } + return s +} + +func funcHTML(f *Function, phase string, dot *dotWriter) string { + buf := new(bytes.Buffer) + if dot != nil { + dot.writeFuncSVG(buf, phase, f) + } + fmt.Fprint(buf, "") + p := htmlFuncPrinter{w: buf} + fprintFunc(p, f) + + // fprintFunc(&buf, f) // TODO: HTML, not text,
for line breaks, etc. + fmt.Fprint(buf, "
") + return buf.String() +} + +type htmlFuncPrinter struct { + w io.Writer +} + +func (p htmlFuncPrinter) startBlock(b *BasicBlock, reachable bool) { + var dead string + if !reachable { + dead = "dead-block" + } + fmt.Fprintf(p.w, "
    ", b, dead) + fmt.Fprintf(p.w, "
  • %s:", blockHTML(b)) + if len(b.Preds) > 0 { + io.WriteString(p.w, " ←") // left arrow + for _, pred := range b.Preds { + fmt.Fprintf(p.w, " %s", blockHTML(pred)) + } + } + if len(b.Instrs) > 0 { + io.WriteString(p.w, ``) + } + io.WriteString(p.w, "
  • ") + if len(b.Instrs) > 0 { // start list of values + io.WriteString(p.w, "
  • ") + io.WriteString(p.w, "
      ") + } +} + +func (p htmlFuncPrinter) endBlock(b *BasicBlock) { + if len(b.Instrs) > 0 { // end list of values + io.WriteString(p.w, "
    ") + io.WriteString(p.w, "
  • ") + } + io.WriteString(p.w, "
  • ") + fmt.Fprint(p.w, blockLongHTML(b)) + io.WriteString(p.w, "
  • ") + io.WriteString(p.w, "
") +} + +func (p htmlFuncPrinter) value(v Node, live bool) { + var dead string + if !live { + dead = "dead-value" + } + fmt.Fprintf(p.w, "
  • ", dead) + fmt.Fprint(p.w, valueLongHTML(v)) + io.WriteString(p.w, "
  • ") +} + +func (p htmlFuncPrinter) startDepCycle() { + fmt.Fprintln(p.w, "") +} + +func (p htmlFuncPrinter) endDepCycle() { + fmt.Fprintln(p.w, "") +} + +func (p htmlFuncPrinter) named(n string, vals []Value) { + fmt.Fprintf(p.w, "
  • name %s: ", n) + for _, val := range vals { + fmt.Fprintf(p.w, "%s ", valueHTML(val)) + } + fmt.Fprintf(p.w, "
  • ") +} + +type dotWriter struct { + path string + broken bool +} + +// newDotWriter returns non-nil value when mask is valid. +// dotWriter will generate SVGs only for the phases specified in the mask. +// mask can contain following patterns and combinations of them: +// * - all of them; +// x-y - x through y, inclusive; +// x,y - x and y, but not the passes between. +func newDotWriter() *dotWriter { + path, err := exec.LookPath("dot") + if err != nil { + fmt.Println(err) + return nil + } + return &dotWriter{path: path} +} + +func (d *dotWriter) writeFuncSVG(w io.Writer, phase string, f *Function) { + if d.broken { + return + } + cmd := exec.Command(d.path, "-Tsvg") + pipe, err := cmd.StdinPipe() + if err != nil { + d.broken = true + fmt.Println(err) + return + } + buf := new(bytes.Buffer) + cmd.Stdout = buf + bufErr := new(bytes.Buffer) + cmd.Stderr = bufErr + err = cmd.Start() + if err != nil { + d.broken = true + fmt.Println(err) + return + } + fmt.Fprint(pipe, `digraph "" { margin=0; size="4,40"; ranksep=.2; `) + id := strings.Replace(phase, " ", "-", -1) + fmt.Fprintf(pipe, `id="g_graph_%s";`, id) + fmt.Fprintf(pipe, `node [style=filled,fillcolor=white,fontsize=16,fontname="Menlo,Times,serif",margin="0.01,0.03"];`) + fmt.Fprintf(pipe, `edge [fontsize=16,fontname="Menlo,Times,serif"];`) + for _, b := range f.Blocks { + layout := "" + fmt.Fprintf(pipe, `%v [label="%v%s\n%v",id="graph_node_%v_%v"];`, b, b, layout, b.Control().String(), id, b) + } + indexOf := make([]int, len(f.Blocks)) + for i, b := range f.Blocks { + indexOf[b.Index] = i + } + + // XXX + /* + ponums := make([]int32, len(f.Blocks)) + _ = postorderWithNumbering(f, ponums) + isBackEdge := func(from, to int) bool { + return ponums[from] <= ponums[to] + } + */ + isBackEdge := func(from, to int) bool { return false } + + for _, b := range f.Blocks { + for i, s := range b.Succs { + style := "solid" + color := "black" + arrow := "vee" + if isBackEdge(b.Index, s.Index) { + color = "blue" + } + fmt.Fprintf(pipe, `%v -> %v [label=" %d ",style="%s",color="%s",arrowhead="%s"];`, b, s, i, style, color, arrow) + } + } + fmt.Fprint(pipe, "}") + pipe.Close() + err = cmd.Wait() + if err != nil { + d.broken = true + fmt.Printf("dot: %v\n%v\n", err, bufErr.String()) + return + } + + svgID := "svg_graph_" + id + fmt.Fprintf(w, `
    `, svgID, svgID) + // For now, an awful hack: edit the html as it passes through + // our fingers, finding ' 0 { + fset = initial[0].Fset + } + + prog := ir.NewProgram(fset, mode) + if opts != nil { + prog.PrintFunc = opts.PrintFunc + } + + isInitial := make(map[*packages.Package]bool, len(initial)) + for _, p := range initial { + isInitial[p] = true + } + + irmap := make(map[*packages.Package]*ir.Package) + packages.Visit(initial, nil, func(p *packages.Package) { + if p.Types != nil && !p.IllTyped { + var files []*ast.File + if deps || isInitial[p] { + files = p.Syntax + } + irmap[p] = prog.CreatePackage(p.Types, files, p.TypesInfo, true) + } + }) + + var irpkgs []*ir.Package + for _, p := range initial { + irpkgs = append(irpkgs, irmap[p]) // may be nil + } + return prog, irpkgs +} + +// CreateProgram returns a new program in IR form, given a program +// loaded from source. An IR package is created for each transitively +// error-free package of lprog. +// +// Code for bodies of functions is not built until Build is called +// on the result. +// +// The mode parameter controls diagnostics and checking during IR construction. +// +// Deprecated: use golang.org/x/tools/go/packages and the Packages +// function instead; see ir.ExampleLoadPackages. +// +func CreateProgram(lprog *loader.Program, mode ir.BuilderMode) *ir.Program { + prog := ir.NewProgram(lprog.Fset, mode) + + for _, info := range lprog.AllPackages { + if info.TransitivelyErrorFree { + prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) + } + } + + return prog +} + +// BuildPackage builds an IR program with IR for a single package. +// +// It populates pkg by type-checking the specified file ASTs. All +// dependencies are loaded using the importer specified by tc, which +// typically loads compiler export data; IR code cannot be built for +// those packages. BuildPackage then constructs an ir.Program with all +// dependency packages created, and builds and returns the IR package +// corresponding to pkg. +// +// The caller must have set pkg.Path() to the import path. +// +// The operation fails if there were any type-checking or import errors. +// +// See ../ir/example_test.go for an example. +// +func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ir.BuilderMode) (*ir.Package, *types.Info, error) { + if fset == nil { + panic("no token.FileSet") + } + if pkg.Path() == "" { + panic("package has no import path") + } + + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil { + return nil, nil, err + } + + prog := ir.NewProgram(fset, mode) + + // Create IR packages for all imports. + // Order is not significant. + created := make(map[*types.Package]bool) + var createAll func(pkgs []*types.Package) + createAll = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !created[p] { + created[p] = true + prog.CreatePackage(p, nil, nil, true) + createAll(p.Imports()) + } + } + } + createAll(pkg.Imports()) + + // Create and build the primary package. + irpkg := prog.CreatePackage(pkg, files, info, false) + irpkg.Build() + return irpkg, info, nil +} diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/loops.go b/vendor/honnef.co/go/tools/go/ir/irutil/loops.go new file mode 100644 index 000000000..751cc680b --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/irutil/loops.go @@ -0,0 +1,54 @@ +package irutil + +import "honnef.co/go/tools/go/ir" + +type Loop struct{ *ir.BlockSet } + +func FindLoops(fn *ir.Function) []Loop { + if fn.Blocks == nil { + return nil + } + tree := fn.DomPreorder() + var sets []Loop + for _, h := range tree { + for _, n := range h.Preds { + if !h.Dominates(n) { + continue + } + // n is a back-edge to h + // h is the loop header + if n == h { + set := Loop{ir.NewBlockSet(len(fn.Blocks))} + set.Add(n) + sets = append(sets, set) + continue + } + set := Loop{ir.NewBlockSet(len(fn.Blocks))} + set.Add(h) + set.Add(n) + for _, b := range allPredsBut(n, h, nil) { + set.Add(b) + } + sets = append(sets, set) + } + } + return sets +} + +func allPredsBut(b, but *ir.BasicBlock, list []*ir.BasicBlock) []*ir.BasicBlock { +outer: + for _, pred := range b.Preds { + if pred == but { + continue + } + for _, p := range list { + // TODO improve big-o complexity of this function + if pred == p { + continue outer + } + } + list = append(list, pred) + list = allPredsBut(pred, but, list) + } + return list +} diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/stub.go b/vendor/honnef.co/go/tools/go/ir/irutil/stub.go new file mode 100644 index 000000000..4311c7dbe --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/irutil/stub.go @@ -0,0 +1,32 @@ +package irutil + +import ( + "honnef.co/go/tools/go/ir" +) + +// IsStub reports whether a function is a stub. A function is +// considered a stub if it has no instructions or if all it does is +// return a constant value. +func IsStub(fn *ir.Function) bool { + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + switch instr.(type) { + case *ir.Const: + // const naturally has no side-effects + case *ir.Panic: + // panic is a stub if it only uses constants + case *ir.Return: + // return is a stub if it only uses constants + case *ir.DebugRef: + case *ir.Jump: + // if there are no disallowed instructions, then we're + // only jumping to the exit block (or possibly + // somewhere else that's stubby?) + default: + // all other instructions are assumed to do actual work + return false + } + } + } + return true +} diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/switch.go b/vendor/honnef.co/go/tools/go/ir/irutil/switch.go new file mode 100644 index 000000000..e7654e008 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/irutil/switch.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package irutil + +// This file implements discovery of switch and type-switch constructs +// from low-level control flow. +// +// Many techniques exist for compiling a high-level switch with +// constant cases to efficient machine code. The optimal choice will +// depend on the data type, the specific case values, the code in the +// body of each case, and the hardware. +// Some examples: +// - a lookup table (for a switch that maps constants to constants) +// - a computed goto +// - a binary tree +// - a perfect hash +// - a two-level switch (to partition constant strings by their first byte). + +import ( + "bytes" + "fmt" + "go/token" + "go/types" + + "honnef.co/go/tools/go/ir" +) + +// A ConstCase represents a single constant comparison. +// It is part of a Switch. +type ConstCase struct { + Block *ir.BasicBlock // block performing the comparison + Body *ir.BasicBlock // body of the case + Value *ir.Const // case comparand +} + +// A TypeCase represents a single type assertion. +// It is part of a Switch. +type TypeCase struct { + Block *ir.BasicBlock // block performing the type assert + Body *ir.BasicBlock // body of the case + Type types.Type // case type + Binding ir.Value // value bound by this case +} + +// A Switch is a logical high-level control flow operation +// (a multiway branch) discovered by analysis of a CFG containing +// only if/else chains. It is not part of the ir.Instruction set. +// +// One of ConstCases and TypeCases has length >= 2; +// the other is nil. +// +// In a value switch, the list of cases may contain duplicate constants. +// A type switch may contain duplicate types, or types assignable +// to an interface type also in the list. +// TODO(adonovan): eliminate such duplicates. +// +type Switch struct { + Start *ir.BasicBlock // block containing start of if/else chain + X ir.Value // the switch operand + ConstCases []ConstCase // ordered list of constant comparisons + TypeCases []TypeCase // ordered list of type assertions + Default *ir.BasicBlock // successor if all comparisons fail +} + +func (sw *Switch) String() string { + // We represent each block by the String() of its + // first Instruction, e.g. "print(42:int)". + var buf bytes.Buffer + if sw.ConstCases != nil { + fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name()) + for _, c := range sw.ConstCases { + fmt.Fprintf(&buf, "case %s: %s\n", c.Value.Name(), c.Body.Instrs[0]) + } + } else { + fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name()) + for _, c := range sw.TypeCases { + fmt.Fprintf(&buf, "case %s %s: %s\n", + c.Binding.Name(), c.Type, c.Body.Instrs[0]) + } + } + if sw.Default != nil { + fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0]) + } + fmt.Fprintf(&buf, "}") + return buf.String() +} + +// Switches examines the control-flow graph of fn and returns the +// set of inferred value and type switches. A value switch tests an +// ir.Value for equality against two or more compile-time constant +// values. Switches involving link-time constants (addresses) are +// ignored. A type switch type-asserts an ir.Value against two or +// more types. +// +// The switches are returned in dominance order. +// +// The resulting switches do not necessarily correspond to uses of the +// 'switch' keyword in the source: for example, a single source-level +// switch statement with non-constant cases may result in zero, one or +// many Switches, one per plural sequence of constant cases. +// Switches may even be inferred from if/else- or goto-based control flow. +// (In general, the control flow constructs of the source program +// cannot be faithfully reproduced from the IR.) +// +func Switches(fn *ir.Function) []Switch { + // Traverse the CFG in dominance order, so we don't + // enter an if/else-chain in the middle. + var switches []Switch + seen := make(map[*ir.BasicBlock]bool) // TODO(adonovan): opt: use ir.blockSet + for _, b := range fn.DomPreorder() { + if x, k := isComparisonBlock(b); x != nil { + // Block b starts a switch. + sw := Switch{Start: b, X: x} + valueSwitch(&sw, k, seen) + if len(sw.ConstCases) > 1 { + switches = append(switches, sw) + } + } + + if y, x, T := isTypeAssertBlock(b); y != nil { + // Block b starts a type switch. + sw := Switch{Start: b, X: x} + typeSwitch(&sw, y, T, seen) + if len(sw.TypeCases) > 1 { + switches = append(switches, sw) + } + } + } + return switches +} + +func isSameX(x1 ir.Value, x2 ir.Value) bool { + if x1 == x2 { + return true + } + if x2, ok := x2.(*ir.Sigma); ok { + return isSameX(x1, x2.X) + } + return false +} + +func valueSwitch(sw *Switch, k *ir.Const, seen map[*ir.BasicBlock]bool) { + b := sw.Start + x := sw.X + for isSameX(sw.X, x) { + if seen[b] { + break + } + seen[b] = true + + sw.ConstCases = append(sw.ConstCases, ConstCase{ + Block: b, + Body: b.Succs[0], + Value: k, + }) + b = b.Succs[1] + n := 0 + for _, instr := range b.Instrs { + switch instr.(type) { + case *ir.If, *ir.BinOp: + n++ + case *ir.Sigma, *ir.Phi, *ir.DebugRef: + default: + n += 1000 + } + } + if n != 2 { + // Block b contains not just 'if x == k' and σ/ϕ nodes, + // so it may have side effects that + // make it unsafe to elide. + break + } + if len(b.Preds) != 1 { + // Block b has multiple predecessors, + // so it cannot be treated as a case. + break + } + x, k = isComparisonBlock(b) + } + sw.Default = b +} + +func typeSwitch(sw *Switch, y ir.Value, T types.Type, seen map[*ir.BasicBlock]bool) { + b := sw.Start + x := sw.X + for isSameX(sw.X, x) { + if seen[b] { + break + } + seen[b] = true + + sw.TypeCases = append(sw.TypeCases, TypeCase{ + Block: b, + Body: b.Succs[0], + Type: T, + Binding: y, + }) + b = b.Succs[1] + n := 0 + for _, instr := range b.Instrs { + switch instr.(type) { + case *ir.TypeAssert, *ir.Extract, *ir.If: + n++ + case *ir.Sigma, *ir.Phi: + default: + n += 1000 + } + } + if n != 4 { + // Block b contains not just + // {TypeAssert; Extract #0; Extract #1; If} + // so it may have side effects that + // make it unsafe to elide. + break + } + if len(b.Preds) != 1 { + // Block b has multiple predecessors, + // so it cannot be treated as a case. + break + } + y, x, T = isTypeAssertBlock(b) + } + sw.Default = b +} + +// isComparisonBlock returns the operands (v, k) if a block ends with +// a comparison v==k, where k is a compile-time constant. +// +func isComparisonBlock(b *ir.BasicBlock) (v ir.Value, k *ir.Const) { + if n := len(b.Instrs); n >= 2 { + if i, ok := b.Instrs[n-1].(*ir.If); ok { + if binop, ok := i.Cond.(*ir.BinOp); ok && binop.Block() == b && binop.Op == token.EQL { + if k, ok := binop.Y.(*ir.Const); ok { + return binop.X, k + } + if k, ok := binop.X.(*ir.Const); ok { + return binop.Y, k + } + } + } + } + return +} + +// isTypeAssertBlock returns the operands (y, x, T) if a block ends with +// a type assertion "if y, ok := x.(T); ok {". +// +func isTypeAssertBlock(b *ir.BasicBlock) (y, x ir.Value, T types.Type) { + if n := len(b.Instrs); n >= 4 { + if i, ok := b.Instrs[n-1].(*ir.If); ok { + if ext1, ok := i.Cond.(*ir.Extract); ok && ext1.Block() == b && ext1.Index == 1 { + if ta, ok := ext1.Tuple.(*ir.TypeAssert); ok && ta.Block() == b { + // hack: relies upon instruction ordering. + if ext0, ok := b.Instrs[n-3].(*ir.Extract); ok { + return ext0, ta.X, ta.AssertedType + } + } + } + } + } + return +} diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/terminates.go b/vendor/honnef.co/go/tools/go/ir/irutil/terminates.go new file mode 100644 index 000000000..84e7503bb --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/irutil/terminates.go @@ -0,0 +1,70 @@ +package irutil + +import ( + "go/types" + + "honnef.co/go/tools/go/ir" +) + +// Terminates reports whether fn is supposed to return, that is if it +// has at least one theoretic path that returns from the function. +// Explicit panics do not count as terminating. +func Terminates(fn *ir.Function) bool { + if fn.Blocks == nil { + // assuming that a function terminates is the conservative + // choice + return true + } + + for _, block := range fn.Blocks { + if _, ok := block.Control().(*ir.Return); ok { + if len(block.Preds) == 0 { + return true + } + for _, pred := range block.Preds { + switch ctrl := pred.Control().(type) { + case *ir.Panic: + // explicit panics do not count as terminating + case *ir.If: + // Check if we got here by receiving from a closed + // time.Tick channel – this cannot happen at + // runtime and thus doesn't constitute termination + iff := ctrl + if !ok { + return true + } + ex, ok := iff.Cond.(*ir.Extract) + if !ok { + return true + } + if ex.Index != 1 { + return true + } + recv, ok := ex.Tuple.(*ir.Recv) + if !ok { + return true + } + call, ok := recv.Chan.(*ir.Call) + if !ok { + return true + } + fn, ok := call.Common().Value.(*ir.Function) + if !ok { + return true + } + fn2, ok := fn.Object().(*types.Func) + if !ok { + return true + } + if fn2.FullName() != "time.Tick" { + return true + } + default: + // we've reached the exit block + return true + } + } + } + } + return false +} diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/util.go b/vendor/honnef.co/go/tools/go/ir/irutil/util.go new file mode 100644 index 000000000..49b978a96 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/irutil/util.go @@ -0,0 +1,165 @@ +package irutil + +import ( + "go/types" + "strings" + + "honnef.co/go/tools/go/ir" + "honnef.co/go/tools/go/types/typeutil" +) + +func Reachable(from, to *ir.BasicBlock) bool { + if from == to { + return true + } + if from.Dominates(to) { + return true + } + + found := false + Walk(from, func(b *ir.BasicBlock) bool { + if b == to { + found = true + return false + } + return true + }) + return found +} + +func Walk(b *ir.BasicBlock, fn func(*ir.BasicBlock) bool) { + seen := map[*ir.BasicBlock]bool{} + wl := []*ir.BasicBlock{b} + for len(wl) > 0 { + b := wl[len(wl)-1] + wl = wl[:len(wl)-1] + if seen[b] { + continue + } + seen[b] = true + if !fn(b) { + continue + } + wl = append(wl, b.Succs...) + } +} + +func Vararg(x *ir.Slice) ([]ir.Value, bool) { + var out []ir.Value + slice, ok := x.X.(*ir.Alloc) + if !ok { + return nil, false + } + for _, ref := range *slice.Referrers() { + if ref == x { + continue + } + if ref.Block() != x.Block() { + return nil, false + } + idx, ok := ref.(*ir.IndexAddr) + if !ok { + return nil, false + } + if len(*idx.Referrers()) != 1 { + return nil, false + } + store, ok := (*idx.Referrers())[0].(*ir.Store) + if !ok { + return nil, false + } + out = append(out, store.Val) + } + return out, true +} + +func CallName(call *ir.CallCommon) string { + if call.IsInvoke() { + return "" + } + switch v := call.Value.(type) { + case *ir.Function: + fn, ok := v.Object().(*types.Func) + if !ok { + return "" + } + return typeutil.FuncName(fn) + case *ir.Builtin: + return v.Name() + } + return "" +} + +func IsCallTo(call *ir.CallCommon, name string) bool { return CallName(call) == name } + +func IsCallToAny(call *ir.CallCommon, names ...string) bool { + q := CallName(call) + for _, name := range names { + if q == name { + return true + } + } + return false +} + +func FilterDebug(instr []ir.Instruction) []ir.Instruction { + var out []ir.Instruction + for _, ins := range instr { + if _, ok := ins.(*ir.DebugRef); !ok { + out = append(out, ins) + } + } + return out +} + +func IsExample(fn *ir.Function) bool { + if !strings.HasPrefix(fn.Name(), "Example") { + return false + } + f := fn.Prog.Fset.File(fn.Pos()) + if f == nil { + return false + } + return strings.HasSuffix(f.Name(), "_test.go") +} + +// Flatten recursively returns the underlying value of an ir.Sigma or +// ir.Phi node. If all edges in an ir.Phi node are the same (after +// flattening), the flattened edge will get returned. If flattening is +// not possible, nil is returned. +func Flatten(v ir.Value) ir.Value { + failed := false + seen := map[ir.Value]struct{}{} + var out ir.Value + var dfs func(v ir.Value) + dfs = func(v ir.Value) { + if failed { + return + } + if _, ok := seen[v]; ok { + return + } + seen[v] = struct{}{} + + switch v := v.(type) { + case *ir.Sigma: + dfs(v.X) + case *ir.Phi: + for _, e := range v.Edges { + dfs(e) + } + default: + if out == nil { + out = v + } else if out != v { + failed = true + } + } + } + dfs(v) + + if failed { + return nil + } + return out +} diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/visit.go b/vendor/honnef.co/go/tools/go/ir/irutil/visit.go new file mode 100644 index 000000000..f6d0503dd --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/irutil/visit.go @@ -0,0 +1,79 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package irutil + +import "honnef.co/go/tools/go/ir" + +// This file defines utilities for visiting the IR of +// a Program. +// +// TODO(adonovan): test coverage. + +// AllFunctions finds and returns the set of functions potentially +// needed by program prog, as determined by a simple linker-style +// reachability algorithm starting from the members and method-sets of +// each package. The result may include anonymous functions and +// synthetic wrappers. +// +// Precondition: all packages are built. +// +func AllFunctions(prog *ir.Program) map[*ir.Function]bool { + visit := visitor{ + prog: prog, + seen: make(map[*ir.Function]bool), + } + visit.program() + return visit.seen +} + +type visitor struct { + prog *ir.Program + seen map[*ir.Function]bool +} + +func (visit *visitor) program() { + for _, pkg := range visit.prog.AllPackages() { + for _, mem := range pkg.Members { + if fn, ok := mem.(*ir.Function); ok { + visit.function(fn) + } + } + } + for _, T := range visit.prog.RuntimeTypes() { + mset := visit.prog.MethodSets.MethodSet(T) + for i, n := 0, mset.Len(); i < n; i++ { + visit.function(visit.prog.MethodValue(mset.At(i))) + } + } +} + +func (visit *visitor) function(fn *ir.Function) { + if !visit.seen[fn] { + visit.seen[fn] = true + var buf [10]*ir.Value // avoid alloc in common case + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + for _, op := range instr.Operands(buf[:0]) { + if fn, ok := (*op).(*ir.Function); ok { + visit.function(fn) + } + } + } + } + } +} + +// MainPackages returns the subset of the specified packages +// named "main" that define a main function. +// The result may include synthetic "testmain" packages. +func MainPackages(pkgs []*ir.Package) []*ir.Package { + var mains []*ir.Package + for _, pkg := range pkgs { + if pkg.Pkg.Name() == "main" && pkg.Func("main") != nil { + mains = append(mains, pkg) + } + } + return mains +} diff --git a/vendor/honnef.co/go/tools/go/ir/lift.go b/vendor/honnef.co/go/tools/go/ir/lift.go new file mode 100644 index 000000000..336470464 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/lift.go @@ -0,0 +1,1075 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file defines the lifting pass which tries to "lift" Alloc +// cells (new/local variables) into SSA registers, replacing loads +// with the dominating stored value, eliminating loads and stores, and +// inserting φ- and σ-nodes as needed. + +// Cited papers and resources: +// +// Ron Cytron et al. 1991. Efficiently computing SSA form... +// http://doi.acm.org/10.1145/115372.115320 +// +// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm. +// Software Practice and Experience 2001, 4:1-10. +// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf +// +// Daniel Berlin, llvmdev mailing list, 2012. +// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html +// (Be sure to expand the whole thread.) +// +// C. Scott Ananian. 1997. The static single information form. +// +// Jeremy Singer. 2006. Static program analysis based on virtual register renaming. + +// TODO(adonovan): opt: there are many optimizations worth evaluating, and +// the conventional wisdom for SSA construction is that a simple +// algorithm well engineered often beats those of better asymptotic +// complexity on all but the most egregious inputs. +// +// Danny Berlin suggests that the Cooper et al. algorithm for +// computing the dominance frontier is superior to Cytron et al. +// Furthermore he recommends that rather than computing the DF for the +// whole function then renaming all alloc cells, it may be cheaper to +// compute the DF for each alloc cell separately and throw it away. +// +// Consider exploiting liveness information to avoid creating dead +// φ-nodes which we then immediately remove. +// +// Also see many other "TODO: opt" suggestions in the code. + +import ( + "fmt" + "go/types" + "os" +) + +// If true, show diagnostic information at each step of lifting. +// Very verbose. +const debugLifting = false + +// domFrontier maps each block to the set of blocks in its dominance +// frontier. The outer slice is conceptually a map keyed by +// Block.Index. The inner slice is conceptually a set, possibly +// containing duplicates. +// +// TODO(adonovan): opt: measure impact of dups; consider a packed bit +// representation, e.g. big.Int, and bitwise parallel operations for +// the union step in the Children loop. +// +// domFrontier's methods mutate the slice's elements but not its +// length, so their receivers needn't be pointers. +// +type domFrontier [][]*BasicBlock + +func (df domFrontier) add(u, v *BasicBlock) { + df[u.Index] = append(df[u.Index], v) +} + +// build builds the dominance frontier df for the dominator tree of +// fn, using the algorithm found in A Simple, Fast Dominance +// Algorithm, Figure 5. +// +// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA +// by pruning the entire IDF computation, rather than merely pruning +// the DF -> IDF step. +func (df domFrontier) build(fn *Function) { + for _, b := range fn.Blocks { + preds := b.Preds[0:len(b.Preds):len(b.Preds)] + if b == fn.Exit { + for i, v := range fn.fakeExits.values { + if v { + preds = append(preds, fn.Blocks[i]) + } + } + } + if len(preds) >= 2 { + for _, p := range preds { + runner := p + for runner != b.dom.idom { + df.add(runner, b) + runner = runner.dom.idom + } + } + } + } +} + +func buildDomFrontier(fn *Function) domFrontier { + df := make(domFrontier, len(fn.Blocks)) + df.build(fn) + return df +} + +type postDomFrontier [][]*BasicBlock + +func (rdf postDomFrontier) add(u, v *BasicBlock) { + rdf[u.Index] = append(rdf[u.Index], v) +} + +func (rdf postDomFrontier) build(fn *Function) { + for _, b := range fn.Blocks { + succs := b.Succs[0:len(b.Succs):len(b.Succs)] + if fn.fakeExits.Has(b) { + succs = append(succs, fn.Exit) + } + if len(succs) >= 2 { + for _, s := range succs { + runner := s + for runner != b.pdom.idom { + rdf.add(runner, b) + runner = runner.pdom.idom + } + } + } + } +} + +func buildPostDomFrontier(fn *Function) postDomFrontier { + rdf := make(postDomFrontier, len(fn.Blocks)) + rdf.build(fn) + return rdf +} + +func removeInstr(refs []Instruction, instr Instruction) []Instruction { + i := 0 + for _, ref := range refs { + if ref == instr { + continue + } + refs[i] = ref + i++ + } + for j := i; j != len(refs); j++ { + refs[j] = nil // aid GC + } + return refs[:i] +} + +func clearInstrs(instrs []Instruction) { + for i := range instrs { + instrs[i] = nil + } +} + +// lift replaces local and new Allocs accessed only with +// load/store by IR registers, inserting φ- and σ-nodes where necessary. +// The result is a program in pruned SSI form. +// +// Preconditions: +// - fn has no dead blocks (blockopt has run). +// - Def/use info (Operands and Referrers) is up-to-date. +// - The dominator tree is up-to-date. +// +func lift(fn *Function) { + // TODO(adonovan): opt: lots of little optimizations may be + // worthwhile here, especially if they cause us to avoid + // buildDomFrontier. For example: + // + // - Alloc never loaded? Eliminate. + // - Alloc never stored? Replace all loads with a zero constant. + // - Alloc stored once? Replace loads with dominating store; + // don't forget that an Alloc is itself an effective store + // of zero. + // - Alloc used only within a single block? + // Use degenerate algorithm avoiding φ-nodes. + // - Consider synergy with scalar replacement of aggregates (SRA). + // e.g. *(&x.f) where x is an Alloc. + // Perhaps we'd get better results if we generated this as x.f + // i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)). + // Unclear. + // + // But we will start with the simplest correct code. + var df domFrontier + var rdf postDomFrontier + var closure *closure + var newPhis newPhiMap + var newSigmas newSigmaMap + + // During this pass we will replace some BasicBlock.Instrs + // (allocs, loads and stores) with nil, keeping a count in + // BasicBlock.gaps. At the end we will reset Instrs to the + // concatenation of all non-dead newPhis and non-nil Instrs + // for the block, reusing the original array if space permits. + + // While we're here, we also eliminate 'rundefers' + // instructions in functions that contain no 'defer' + // instructions. + usesDefer := false + + // Determine which allocs we can lift and number them densely. + // The renaming phase uses this numbering for compact maps. + numAllocs := 0 + for _, b := range fn.Blocks { + b.gaps = 0 + b.rundefers = 0 + for _, instr := range b.Instrs { + switch instr := instr.(type) { + case *Alloc: + if !liftable(instr) { + instr.index = -1 + continue + } + index := -1 + if numAllocs == 0 { + df = buildDomFrontier(fn) + rdf = buildPostDomFrontier(fn) + if len(fn.Blocks) > 2 { + closure = transitiveClosure(fn) + } + newPhis = make(newPhiMap, len(fn.Blocks)) + newSigmas = make(newSigmaMap, len(fn.Blocks)) + + if debugLifting { + title := false + for i, blocks := range df { + if blocks != nil { + if !title { + fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn) + title = true + } + fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks) + } + } + } + } + liftAlloc(closure, df, rdf, instr, newPhis, newSigmas) + index = numAllocs + numAllocs++ + instr.index = index + case *Defer: + usesDefer = true + case *RunDefers: + b.rundefers++ + } + } + } + + if numAllocs > 0 { + // renaming maps an alloc (keyed by index) to its replacement + // value. Initially the renaming contains nil, signifying the + // zero constant of the appropriate type; we construct the + // Const lazily at most once on each path through the domtree. + // TODO(adonovan): opt: cache per-function not per subtree. + renaming := make([]Value, numAllocs) + + // Renaming. + rename(fn.Blocks[0], renaming, newPhis, newSigmas) + + simplifyPhis(newPhis) + + // Eliminate dead φ- and σ-nodes. + markLiveNodes(fn.Blocks, newPhis, newSigmas) + } + + // Prepend remaining live φ-nodes to each block and possibly kill rundefers. + for _, b := range fn.Blocks { + var head []Instruction + if numAllocs > 0 { + nps := newPhis[b.Index] + head = make([]Instruction, 0, len(nps)) + for _, pred := range b.Preds { + nss := newSigmas[pred.Index] + idx := pred.succIndex(b) + for _, newSigma := range nss { + if sigma := newSigma.sigmas[idx]; sigma != nil && sigma.live { + head = append(head, sigma) + + // we didn't populate referrers before, as most + // sigma nodes will be killed + if refs := sigma.X.Referrers(); refs != nil { + *refs = append(*refs, sigma) + } + } else if sigma != nil { + sigma.block = nil + } + } + } + for _, np := range nps { + if np.phi.live { + head = append(head, np.phi) + } else { + for _, edge := range np.phi.Edges { + if refs := edge.Referrers(); refs != nil { + *refs = removeInstr(*refs, np.phi) + } + } + np.phi.block = nil + } + } + } + + rundefersToKill := b.rundefers + if usesDefer { + rundefersToKill = 0 + } + + j := len(head) + if j+b.gaps+rundefersToKill == 0 { + continue // fast path: no new phis or gaps + } + + // We could do straight copies instead of element-wise copies + // when both b.gaps and rundefersToKill are zero. However, + // that seems to only be the case ~1% of the time, which + // doesn't seem worth the extra branch. + + // Remove dead instructions, add phis and sigmas + ns := len(b.Instrs) + j - b.gaps - rundefersToKill + if ns <= cap(b.Instrs) { + // b.Instrs has enough capacity to store all instructions + + // OPT(dh): check cap vs the actually required space; if + // there is a big enough difference, it may be worth + // allocating a new slice, to avoid pinning memory. + dst := b.Instrs[:cap(b.Instrs)] + i := len(dst) - 1 + for n := len(b.Instrs) - 1; n >= 0; n-- { + instr := dst[n] + if instr == nil { + continue + } + if !usesDefer { + if _, ok := instr.(*RunDefers); ok { + continue + } + } + dst[i] = instr + i-- + } + off := i + 1 - len(head) + // aid GC + clearInstrs(dst[:off]) + dst = dst[off:] + copy(dst, head) + b.Instrs = dst + } else { + // not enough space, so allocate a new slice and copy + // over. + dst := make([]Instruction, ns) + copy(dst, head) + + for _, instr := range b.Instrs { + if instr == nil { + continue + } + if !usesDefer { + if _, ok := instr.(*RunDefers); ok { + continue + } + } + dst[j] = instr + j++ + } + b.Instrs = dst + } + } + + // Remove any fn.Locals that were lifted. + j := 0 + for _, l := range fn.Locals { + if l.index < 0 { + fn.Locals[j] = l + j++ + } + } + // Nil out fn.Locals[j:] to aid GC. + for i := j; i < len(fn.Locals); i++ { + fn.Locals[i] = nil + } + fn.Locals = fn.Locals[:j] +} + +func hasDirectReferrer(instr Instruction) bool { + for _, instr := range *instr.Referrers() { + switch instr.(type) { + case *Phi, *Sigma: + // ignore + default: + return true + } + } + return false +} + +func markLiveNodes(blocks []*BasicBlock, newPhis newPhiMap, newSigmas newSigmaMap) { + // Phi and sigma nodes are considered live if a non-phi, non-sigma + // node uses them. Once we find a node that is live, we mark all + // of its operands as used, too. + for _, npList := range newPhis { + for _, np := range npList { + phi := np.phi + if !phi.live && hasDirectReferrer(phi) { + markLivePhi(phi) + } + } + } + for _, npList := range newSigmas { + for _, np := range npList { + for _, sigma := range np.sigmas { + if sigma != nil && !sigma.live && hasDirectReferrer(sigma) { + markLiveSigma(sigma) + } + } + } + } + // Existing φ-nodes due to && and || operators + // are all considered live (see Go issue 19622). + for _, b := range blocks { + for _, phi := range b.phis() { + markLivePhi(phi.(*Phi)) + } + } +} + +func markLivePhi(phi *Phi) { + phi.live = true + for _, rand := range phi.Edges { + switch rand := rand.(type) { + case *Phi: + if !rand.live { + markLivePhi(rand) + } + case *Sigma: + if !rand.live { + markLiveSigma(rand) + } + } + } +} + +func markLiveSigma(sigma *Sigma) { + sigma.live = true + switch rand := sigma.X.(type) { + case *Phi: + if !rand.live { + markLivePhi(rand) + } + case *Sigma: + if !rand.live { + markLiveSigma(rand) + } + } +} + +// simplifyPhis replaces trivial phis with non-phi alternatives. Phi +// nodes where all edges are identical, or consist of only the phi +// itself and one other value, may be replaced with the value. +func simplifyPhis(newPhis newPhiMap) { + // find all phis that are trivial and can be replaced with a + // non-phi value. run until we reach a fixpoint, because replacing + // a phi may make other phis trivial. + for changed := true; changed; { + changed = false + for _, npList := range newPhis { + for _, np := range npList { + if np.phi.live { + // we're reusing 'live' to mean 'dead' in the context of simplifyPhis + continue + } + if r, ok := isUselessPhi(np.phi); ok { + // useless phi, replace its uses with the + // replacement value. the dead phi pass will clean + // up the phi afterwards. + replaceAll(np.phi, r) + np.phi.live = true + changed = true + } + } + } + } + + for _, npList := range newPhis { + for _, np := range npList { + np.phi.live = false + } + } +} + +type BlockSet struct { + idx int + values []bool + count int +} + +func NewBlockSet(size int) *BlockSet { + return &BlockSet{values: make([]bool, size)} +} + +func (s *BlockSet) Set(s2 *BlockSet) { + copy(s.values, s2.values) + s.count = 0 + for _, v := range s.values { + if v { + s.count++ + } + } +} + +func (s *BlockSet) Num() int { + return s.count +} + +func (s *BlockSet) Has(b *BasicBlock) bool { + if b.Index >= len(s.values) { + return false + } + return s.values[b.Index] +} + +// add adds b to the set and returns true if the set changed. +func (s *BlockSet) Add(b *BasicBlock) bool { + if s.values[b.Index] { + return false + } + s.count++ + s.values[b.Index] = true + s.idx = b.Index + + return true +} + +func (s *BlockSet) Clear() { + for j := range s.values { + s.values[j] = false + } + s.count = 0 +} + +// take removes an arbitrary element from a set s and +// returns its index, or returns -1 if empty. +func (s *BlockSet) Take() int { + // [i, end] + for i := s.idx; i < len(s.values); i++ { + if s.values[i] { + s.values[i] = false + s.idx = i + s.count-- + return i + } + } + + // [start, i) + for i := 0; i < s.idx; i++ { + if s.values[i] { + s.values[i] = false + s.idx = i + s.count-- + return i + } + } + + return -1 +} + +type closure struct { + span []uint32 + reachables []interval +} + +type interval uint32 + +const ( + flagMask = 1 << 31 + numBits = 20 + lengthBits = 32 - numBits - 1 + lengthMask = (1<>numBits + } else { + // large interval + i++ + start = uint32(inv & numMask) + end = uint32(r[i]) + } + if idx >= start && idx <= end { + return true + } + } + return false +} + +func (c closure) reachable(id int) []interval { + return c.reachables[c.span[id]:c.span[id+1]] +} + +func (c closure) walk(current *BasicBlock, b *BasicBlock, visited []bool) { + visited[b.Index] = true + for _, succ := range b.Succs { + if visited[succ.Index] { + continue + } + visited[succ.Index] = true + c.walk(current, succ, visited) + } +} + +func transitiveClosure(fn *Function) *closure { + reachable := make([]bool, len(fn.Blocks)) + c := &closure{} + c.span = make([]uint32, len(fn.Blocks)+1) + + addInterval := func(start, end uint32) { + if l := end - start; l <= 1<= 0 { // store of zero to Alloc cell + // Replace dominated loads by the zero value. + renaming[instr.index] = nil + if debugLifting { + fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr) + } + // Delete the Alloc. + u.Instrs[i] = nil + u.gaps++ + } + + case *Store: + if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell + // Replace dominated loads by the stored value. + renaming[alloc.index] = instr.Val + if debugLifting { + fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n", + instr, instr.Val.Name()) + } + if refs := instr.Addr.Referrers(); refs != nil { + *refs = removeInstr(*refs, instr) + } + if refs := instr.Val.Referrers(); refs != nil { + *refs = removeInstr(*refs, instr) + } + // Delete the Store. + u.Instrs[i] = nil + u.gaps++ + } + + case *Load: + if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell + // In theory, we wouldn't be able to replace loads + // directly, because a loaded value could be used in + // different branches, in which case it should be + // replaced with different sigma nodes. But we can't + // simply defer replacement, either, because then + // later stores might incorrectly affect this load. + // + // To avoid doing renaming on _all_ values (instead of + // just loads and stores like we're doing), we make + // sure during code generation that each load is only + // used in one block. For example, in constant switch + // statements, where the tag is only evaluated once, + // we store it in a temporary and load it for each + // comparison, so that we have individual loads to + // replace. + newval := renamed(u.Parent(), renaming, alloc) + if debugLifting { + fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n", + instr.Name(), instr, newval) + } + replaceAll(instr, newval) + u.Instrs[i] = nil + u.gaps++ + } + + case *DebugRef: + if x, ok := instr.X.(*Alloc); ok && x.index >= 0 { + if instr.IsAddr { + instr.X = renamed(u.Parent(), renaming, x) + instr.IsAddr = false + + // Add DebugRef to instr.X's referrers. + if refs := instr.X.Referrers(); refs != nil { + *refs = append(*refs, instr) + } + } else { + // A source expression denotes the address + // of an Alloc that was optimized away. + instr.X = nil + + // Delete the DebugRef. + u.Instrs[i] = nil + u.gaps++ + } + } + } + } + + // update all outgoing sigma nodes with the dominating store + for _, sigmas := range newSigmas[u.Index] { + for _, sigma := range sigmas.sigmas { + if sigma == nil { + continue + } + sigma.X = renamed(u.Parent(), renaming, sigmas.alloc) + } + } + + // For each φ-node in a CFG successor, rename the edge. + for succi, v := range u.Succs { + phis := newPhis[v.Index] + if len(phis) == 0 { + continue + } + i := v.predIndex(u) + for _, np := range phis { + phi := np.phi + alloc := np.alloc + // if there's a sigma node, use it, else use the dominating value + var newval Value + for _, sigmas := range newSigmas[u.Index] { + if sigmas.alloc == alloc && sigmas.sigmas[succi] != nil { + newval = sigmas.sigmas[succi] + break + } + } + if newval == nil { + newval = renamed(u.Parent(), renaming, alloc) + } + if debugLifting { + fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n", + phi.Name(), u, v, i, alloc.Name(), newval.Name()) + } + phi.Edges[i] = newval + if prefs := newval.Referrers(); prefs != nil { + *prefs = append(*prefs, phi) + } + } + } + + // Continue depth-first recursion over domtree, pushing a + // fresh copy of the renaming map for each subtree. + r := make([]Value, len(renaming)) + for _, v := range u.dom.children { + // XXX add debugging + copy(r, renaming) + + // on entry to a block, the incoming sigma nodes become the new values for their alloc + if idx := u.succIndex(v); idx != -1 { + for _, sigma := range newSigmas[u.Index] { + if sigma.sigmas[idx] != nil { + r[sigma.alloc.index] = sigma.sigmas[idx] + } + } + } + rename(v, r, newPhis, newSigmas) + } + +} diff --git a/vendor/honnef.co/go/tools/go/ir/lvalue.go b/vendor/honnef.co/go/tools/go/ir/lvalue.go new file mode 100644 index 000000000..f676a1f7a --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/lvalue.go @@ -0,0 +1,116 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// lvalues are the union of addressable expressions and map-index +// expressions. + +import ( + "go/ast" + "go/types" +) + +// An lvalue represents an assignable location that may appear on the +// left-hand side of an assignment. This is a generalization of a +// pointer to permit updates to elements of maps. +// +type lvalue interface { + store(fn *Function, v Value, source ast.Node) // stores v into the location + load(fn *Function, source ast.Node) Value // loads the contents of the location + address(fn *Function) Value // address of the location + typ() types.Type // returns the type of the location +} + +// An address is an lvalue represented by a true pointer. +type address struct { + addr Value + expr ast.Expr // source syntax of the value (not address) [debug mode] +} + +func (a *address) load(fn *Function, source ast.Node) Value { + return emitLoad(fn, a.addr, source) +} + +func (a *address) store(fn *Function, v Value, source ast.Node) { + store := emitStore(fn, a.addr, v, source) + if a.expr != nil { + // store.Val is v, converted for assignability. + emitDebugRef(fn, a.expr, store.Val, false) + } +} + +func (a *address) address(fn *Function) Value { + if a.expr != nil { + emitDebugRef(fn, a.expr, a.addr, true) + } + return a.addr +} + +func (a *address) typ() types.Type { + return deref(a.addr.Type()) +} + +// An element is an lvalue represented by m[k], the location of an +// element of a map. These locations are not addressable +// since pointers cannot be formed from them, but they do support +// load() and store(). +// +type element struct { + m, k Value // map + t types.Type // map element type +} + +func (e *element) load(fn *Function, source ast.Node) Value { + l := &MapLookup{ + X: e.m, + Index: e.k, + } + l.setType(e.t) + return fn.emit(l, source) +} + +func (e *element) store(fn *Function, v Value, source ast.Node) { + up := &MapUpdate{ + Map: e.m, + Key: e.k, + Value: emitConv(fn, v, e.t, source), + } + fn.emit(up, source) +} + +func (e *element) address(fn *Function) Value { + panic("map elements are not addressable") +} + +func (e *element) typ() types.Type { + return e.t +} + +// A blank is a dummy variable whose name is "_". +// It is not reified: loads are illegal and stores are ignored. +// +type blank struct{} + +func (bl blank) load(fn *Function, source ast.Node) Value { + panic("blank.load is illegal") +} + +func (bl blank) store(fn *Function, v Value, source ast.Node) { + s := &BlankStore{ + Val: v, + } + fn.emit(s, source) +} + +func (bl blank) address(fn *Function) Value { + panic("blank var is not addressable") +} + +func (bl blank) typ() types.Type { + // This should be the type of the blank Ident; the typechecker + // doesn't provide this yet, but fortunately, we don't need it + // yet either. + panic("blank.typ is unimplemented") +} diff --git a/vendor/honnef.co/go/tools/go/ir/methods.go b/vendor/honnef.co/go/tools/go/ir/methods.go new file mode 100644 index 000000000..517f448b8 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/methods.go @@ -0,0 +1,239 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file defines utilities for population of method sets. + +import ( + "fmt" + "go/types" +) + +// MethodValue returns the Function implementing method sel, building +// wrapper methods on demand. It returns nil if sel denotes an +// abstract (interface) method. +// +// Precondition: sel.Kind() == MethodVal. +// +// Thread-safe. +// +// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// +func (prog *Program) MethodValue(sel *types.Selection) *Function { + if sel.Kind() != types.MethodVal { + panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel)) + } + T := sel.Recv() + if isInterface(T) { + return nil // abstract method + } + if prog.mode&LogSource != 0 { + defer logStack("MethodValue %s %v", T, sel)() + } + + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + return prog.addMethod(prog.createMethodSet(T), sel) +} + +// LookupMethod returns the implementation of the method of type T +// identified by (pkg, name). It returns nil if the method exists but +// is abstract, and panics if T has no such method. +// +func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function { + sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name) + if sel == nil { + panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name))) + } + return prog.MethodValue(sel) +} + +// methodSet contains the (concrete) methods of a non-interface type. +type methodSet struct { + mapping map[string]*Function // populated lazily + complete bool // mapping contains all methods +} + +// Precondition: !isInterface(T). +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +func (prog *Program) createMethodSet(T types.Type) *methodSet { + mset, ok := prog.methodSets.At(T).(*methodSet) + if !ok { + mset = &methodSet{mapping: make(map[string]*Function)} + prog.methodSets.Set(T, mset) + } + return mset +} + +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function { + if sel.Kind() == types.MethodExpr { + panic(sel) + } + id := sel.Obj().Id() + fn := mset.mapping[id] + if fn == nil { + obj := sel.Obj().(*types.Func) + + needsPromotion := len(sel.Index()) > 1 + needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv()) + if needsPromotion || needsIndirection { + fn = makeWrapper(prog, sel) + } else { + fn = prog.declaredFunc(obj) + } + if fn.Signature.Recv() == nil { + panic(fn) // missing receiver + } + mset.mapping[id] = fn + } + return fn +} + +// RuntimeTypes returns a new unordered slice containing all +// concrete types in the program for which a complete (non-empty) +// method set is required at run-time. +// +// Thread-safe. +// +// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// +func (prog *Program) RuntimeTypes() []types.Type { + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + var res []types.Type + prog.methodSets.Iterate(func(T types.Type, v interface{}) { + if v.(*methodSet).complete { + res = append(res, T) + } + }) + return res +} + +// declaredFunc returns the concrete function/method denoted by obj. +// Panic ensues if there is none. +// +func (prog *Program) declaredFunc(obj *types.Func) *Function { + if v := prog.packageLevelValue(obj); v != nil { + return v.(*Function) + } + panic("no concrete method: " + obj.String()) +} + +// needMethodsOf ensures that runtime type information (including the +// complete method set) is available for the specified type T and all +// its subcomponents. +// +// needMethodsOf must be called for at least every type that is an +// operand of some MakeInterface instruction, and for the type of +// every exported package member. +// +// Precondition: T is not a method signature (*Signature with Recv()!=nil). +// +// Thread-safe. (Called via emitConv from multiple builder goroutines.) +// +// TODO(adonovan): make this faster. It accounts for 20% of SSA build time. +// +// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// +func (prog *Program) needMethodsOf(T types.Type) { + prog.methodsMu.Lock() + prog.needMethods(T, false) + prog.methodsMu.Unlock() +} + +// Precondition: T is not a method signature (*Signature with Recv()!=nil). +// Recursive case: skip => don't create methods for T. +// +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +// +func (prog *Program) needMethods(T types.Type, skip bool) { + // Each package maintains its own set of types it has visited. + if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok { + // needMethods(T) was previously called + if !prevSkip || skip { + return // already seen, with same or false 'skip' value + } + } + prog.runtimeTypes.Set(T, skip) + + tmset := prog.MethodSets.MethodSet(T) + + if !skip && !isInterface(T) && tmset.Len() > 0 { + // Create methods of T. + mset := prog.createMethodSet(T) + if !mset.complete { + mset.complete = true + n := tmset.Len() + for i := 0; i < n; i++ { + prog.addMethod(mset, tmset.At(i)) + } + } + } + + // Recursion over signatures of each method. + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + prog.needMethods(sig.Params(), false) + prog.needMethods(sig.Results(), false) + } + + switch t := T.(type) { + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + prog.needMethods(t.Elem(), false) + + case *types.Slice: + prog.needMethods(t.Elem(), false) + + case *types.Chan: + prog.needMethods(t.Elem(), false) + + case *types.Map: + prog.needMethods(t.Key(), false) + prog.needMethods(t.Elem(), false) + + case *types.Signature: + if t.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv())) + } + prog.needMethods(t.Params(), false) + prog.needMethods(t.Results(), false) + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + prog.needMethods(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + prog.needMethods(t.Underlying(), true) + + case *types.Array: + prog.needMethods(t.Elem(), false) + + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + prog.needMethods(t.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, t.Len(); i < n; i++ { + prog.needMethods(t.At(i).Type(), false) + } + + default: + panic(T) + } +} diff --git a/vendor/honnef.co/go/tools/go/ir/mode.go b/vendor/honnef.co/go/tools/go/ir/mode.go new file mode 100644 index 000000000..da548fdbb --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/mode.go @@ -0,0 +1,98 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file defines the BuilderMode type and its command-line flag. + +import ( + "bytes" + "fmt" +) + +// BuilderMode is a bitmask of options for diagnostics and checking. +// +// *BuilderMode satisfies the flag.Value interface. Example: +// +// var mode = ir.BuilderMode(0) +// func init() { flag.Var(&mode, "build", ir.BuilderModeDoc) } +// +type BuilderMode uint + +const ( + PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout + PrintFunctions // Print function IR code to stdout + PrintSource // Print source code when printing function IR + LogSource // Log source locations as IR builder progresses + SanityCheckFunctions // Perform sanity checking of function bodies + NaiveForm // Build naïve IR form: don't replace local loads/stores with registers + GlobalDebug // Enable debug info for all packages +) + +const BuilderModeDoc = `Options controlling the IR builder. +The value is a sequence of zero or more of these letters: +C perform sanity [C]hecking of the IR form. +D include [D]ebug info for every function. +P print [P]ackage inventory. +F print [F]unction IR code. +A print [A]ST nodes responsible for IR instructions +S log [S]ource locations as IR builder progresses. +N build [N]aive IR form: don't replace local loads/stores with registers. +` + +func (m BuilderMode) String() string { + var buf bytes.Buffer + if m&GlobalDebug != 0 { + buf.WriteByte('D') + } + if m&PrintPackages != 0 { + buf.WriteByte('P') + } + if m&PrintFunctions != 0 { + buf.WriteByte('F') + } + if m&PrintSource != 0 { + buf.WriteByte('A') + } + if m&LogSource != 0 { + buf.WriteByte('S') + } + if m&SanityCheckFunctions != 0 { + buf.WriteByte('C') + } + if m&NaiveForm != 0 { + buf.WriteByte('N') + } + return buf.String() +} + +// Set parses the flag characters in s and updates *m. +func (m *BuilderMode) Set(s string) error { + var mode BuilderMode + for _, c := range s { + switch c { + case 'D': + mode |= GlobalDebug + case 'P': + mode |= PrintPackages + case 'F': + mode |= PrintFunctions + case 'A': + mode |= PrintSource + case 'S': + mode |= LogSource + case 'C': + mode |= SanityCheckFunctions + case 'N': + mode |= NaiveForm + default: + return fmt.Errorf("unknown BuilderMode option: %q", c) + } + } + *m = mode + return nil +} + +// Get returns m. +func (m BuilderMode) Get() interface{} { return m } diff --git a/vendor/honnef.co/go/tools/go/ir/print.go b/vendor/honnef.co/go/tools/go/ir/print.go new file mode 100644 index 000000000..f6ed431b6 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/print.go @@ -0,0 +1,472 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file implements the String() methods for all Value and +// Instruction types. + +import ( + "bytes" + "fmt" + "go/types" + "io" + "reflect" + "sort" + + "honnef.co/go/tools/go/types/typeutil" +) + +// relName returns the name of v relative to i. +// In most cases, this is identical to v.Name(), but references to +// Functions (including methods) and Globals use RelString and +// all types are displayed with relType, so that only cross-package +// references are package-qualified. +// +func relName(v Value, i Instruction) string { + if v == nil { + return "" + } + var from *types.Package + if i != nil { + from = i.Parent().pkg() + } + switch v := v.(type) { + case Member: // *Function or *Global + return v.RelString(from) + } + return v.Name() +} + +func relType(t types.Type, from *types.Package) string { + return types.TypeString(t, types.RelativeTo(from)) +} + +func relString(m Member, from *types.Package) string { + // NB: not all globals have an Object (e.g. init$guard), + // so use Package().Object not Object.Package(). + if pkg := m.Package().Pkg; pkg != nil && pkg != from { + return fmt.Sprintf("%s.%s", pkg.Path(), m.Name()) + } + return m.Name() +} + +// Value.String() +// +// This method is provided only for debugging. +// It never appears in disassembly, which uses Value.Name(). + +func (v *Parameter) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("Parameter <%s> {%s}", relType(v.Type(), from), v.name) +} + +func (v *FreeVar) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("FreeVar <%s> %s", relType(v.Type(), from), v.Name()) +} + +func (v *Builtin) String() string { + return fmt.Sprintf("Builtin %s", v.Name()) +} + +// Instruction.String() + +func (v *Alloc) String() string { + from := v.Parent().pkg() + storage := "Stack" + if v.Heap { + storage = "Heap" + } + return fmt.Sprintf("%sAlloc <%s>", storage, relType(v.Type(), from)) +} + +func (v *Sigma) String() string { + from := v.Parent().pkg() + s := fmt.Sprintf("Sigma <%s> [b%d] %s", relType(v.Type(), from), v.From.Index, v.X.Name()) + return s +} + +func (v *Phi) String() string { + var b bytes.Buffer + fmt.Fprintf(&b, "Phi <%s>", v.Type()) + for i, edge := range v.Edges { + b.WriteString(" ") + // Be robust against malformed CFG. + if v.block == nil { + b.WriteString("??") + continue + } + block := -1 + if i < len(v.block.Preds) { + block = v.block.Preds[i].Index + } + fmt.Fprintf(&b, "%d:", block) + edgeVal := "" // be robust + if edge != nil { + edgeVal = relName(edge, v) + } + b.WriteString(edgeVal) + } + return b.String() +} + +func printCall(v *CallCommon, prefix string, instr Instruction) string { + var b bytes.Buffer + if !v.IsInvoke() { + if value, ok := instr.(Value); ok { + fmt.Fprintf(&b, "%s <%s> %s", prefix, relType(value.Type(), instr.Parent().pkg()), relName(v.Value, instr)) + } else { + fmt.Fprintf(&b, "%s %s", prefix, relName(v.Value, instr)) + } + } else { + if value, ok := instr.(Value); ok { + fmt.Fprintf(&b, "%sInvoke <%s> %s.%s", prefix, relType(value.Type(), instr.Parent().pkg()), relName(v.Value, instr), v.Method.Name()) + } else { + fmt.Fprintf(&b, "%sInvoke %s.%s", prefix, relName(v.Value, instr), v.Method.Name()) + } + } + for _, arg := range v.Args { + b.WriteString(" ") + b.WriteString(relName(arg, instr)) + } + return b.String() +} + +func (c *CallCommon) String() string { + return printCall(c, "", nil) +} + +func (v *Call) String() string { + return printCall(&v.Call, "Call", v) +} + +func (v *BinOp) String() string { + return fmt.Sprintf("BinOp <%s> {%s} %s %s", relType(v.Type(), v.Parent().pkg()), v.Op.String(), relName(v.X, v), relName(v.Y, v)) +} + +func (v *UnOp) String() string { + return fmt.Sprintf("UnOp <%s> {%s} %s", relType(v.Type(), v.Parent().pkg()), v.Op.String(), relName(v.X, v)) +} + +func (v *Load) String() string { + return fmt.Sprintf("Load <%s> %s", relType(v.Type(), v.Parent().pkg()), relName(v.X, v)) +} + +func printConv(prefix string, v, x Value) string { + from := v.Parent().pkg() + return fmt.Sprintf("%s <%s> %s", + prefix, + relType(v.Type(), from), + relName(x, v.(Instruction))) +} + +func (v *ChangeType) String() string { return printConv("ChangeType", v, v.X) } +func (v *Convert) String() string { return printConv("Convert", v, v.X) } +func (v *ChangeInterface) String() string { return printConv("ChangeInterface", v, v.X) } +func (v *MakeInterface) String() string { return printConv("MakeInterface", v, v.X) } + +func (v *MakeClosure) String() string { + from := v.Parent().pkg() + var b bytes.Buffer + fmt.Fprintf(&b, "MakeClosure <%s> %s", relType(v.Type(), from), relName(v.Fn, v)) + if v.Bindings != nil { + for _, c := range v.Bindings { + b.WriteString(" ") + b.WriteString(relName(c, v)) + } + } + return b.String() +} + +func (v *MakeSlice) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("MakeSlice <%s> %s %s", + relType(v.Type(), from), + relName(v.Len, v), + relName(v.Cap, v)) +} + +func (v *Slice) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("Slice <%s> %s %s %s %s", + relType(v.Type(), from), relName(v.X, v), relName(v.Low, v), relName(v.High, v), relName(v.Max, v)) +} + +func (v *MakeMap) String() string { + res := "" + if v.Reserve != nil { + res = relName(v.Reserve, v) + } + from := v.Parent().pkg() + return fmt.Sprintf("MakeMap <%s> %s", relType(v.Type(), from), res) +} + +func (v *MakeChan) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("MakeChan <%s> %s", relType(v.Type(), from), relName(v.Size, v)) +} + +func (v *FieldAddr) String() string { + from := v.Parent().pkg() + st := deref(v.X.Type()).Underlying().(*types.Struct) + // Be robust against a bad index. + name := "?" + if 0 <= v.Field && v.Field < st.NumFields() { + name = st.Field(v.Field).Name() + } + return fmt.Sprintf("FieldAddr <%s> [%d] (%s) %s", relType(v.Type(), from), v.Field, name, relName(v.X, v)) +} + +func (v *Field) String() string { + st := v.X.Type().Underlying().(*types.Struct) + // Be robust against a bad index. + name := "?" + if 0 <= v.Field && v.Field < st.NumFields() { + name = st.Field(v.Field).Name() + } + from := v.Parent().pkg() + return fmt.Sprintf("Field <%s> [%d] (%s) %s", relType(v.Type(), from), v.Field, name, relName(v.X, v)) +} + +func (v *IndexAddr) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("IndexAddr <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) +} + +func (v *Index) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("Index <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) +} + +func (v *MapLookup) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("MapLookup <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) +} + +func (v *StringLookup) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("StringLookup <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) +} + +func (v *Range) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("Range <%s> %s", relType(v.Type(), from), relName(v.X, v)) +} + +func (v *Next) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("Next <%s> %s", relType(v.Type(), from), relName(v.Iter, v)) +} + +func (v *TypeAssert) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("TypeAssert <%s> %s", relType(v.Type(), from), relName(v.X, v)) +} + +func (v *Extract) String() string { + from := v.Parent().pkg() + name := v.Tuple.Type().(*types.Tuple).At(v.Index).Name() + return fmt.Sprintf("Extract <%s> [%d] (%s) %s", relType(v.Type(), from), v.Index, name, relName(v.Tuple, v)) +} + +func (s *Jump) String() string { + // Be robust against malformed CFG. + block := -1 + if s.block != nil && len(s.block.Succs) == 1 { + block = s.block.Succs[0].Index + } + str := fmt.Sprintf("Jump → b%d", block) + if s.Comment != "" { + str = fmt.Sprintf("%s # %s", str, s.Comment) + } + return str +} + +func (s *Unreachable) String() string { + // Be robust against malformed CFG. + block := -1 + if s.block != nil && len(s.block.Succs) == 1 { + block = s.block.Succs[0].Index + } + return fmt.Sprintf("Unreachable → b%d", block) +} + +func (s *If) String() string { + // Be robust against malformed CFG. + tblock, fblock := -1, -1 + if s.block != nil && len(s.block.Succs) == 2 { + tblock = s.block.Succs[0].Index + fblock = s.block.Succs[1].Index + } + return fmt.Sprintf("If %s → b%d b%d", relName(s.Cond, s), tblock, fblock) +} + +func (s *ConstantSwitch) String() string { + var b bytes.Buffer + fmt.Fprintf(&b, "ConstantSwitch %s", relName(s.Tag, s)) + for _, cond := range s.Conds { + fmt.Fprintf(&b, " %s", relName(cond, s)) + } + fmt.Fprint(&b, " →") + for _, succ := range s.block.Succs { + fmt.Fprintf(&b, " b%d", succ.Index) + } + return b.String() +} + +func (s *TypeSwitch) String() string { + from := s.Parent().pkg() + var b bytes.Buffer + fmt.Fprintf(&b, "TypeSwitch <%s> %s", relType(s.typ, from), relName(s.Tag, s)) + for _, cond := range s.Conds { + fmt.Fprintf(&b, " %q", relType(cond, s.block.parent.pkg())) + } + return b.String() +} + +func (s *Go) String() string { + return printCall(&s.Call, "Go", s) +} + +func (s *Panic) String() string { + // Be robust against malformed CFG. + block := -1 + if s.block != nil && len(s.block.Succs) == 1 { + block = s.block.Succs[0].Index + } + return fmt.Sprintf("Panic %s → b%d", relName(s.X, s), block) +} + +func (s *Return) String() string { + var b bytes.Buffer + b.WriteString("Return") + for _, r := range s.Results { + b.WriteString(" ") + b.WriteString(relName(r, s)) + } + return b.String() +} + +func (*RunDefers) String() string { + return "RunDefers" +} + +func (s *Send) String() string { + return fmt.Sprintf("Send %s %s", relName(s.Chan, s), relName(s.X, s)) +} + +func (recv *Recv) String() string { + from := recv.Parent().pkg() + return fmt.Sprintf("Recv <%s> %s", relType(recv.Type(), from), relName(recv.Chan, recv)) +} + +func (s *Defer) String() string { + return printCall(&s.Call, "Defer", s) +} + +func (s *Select) String() string { + var b bytes.Buffer + for i, st := range s.States { + if i > 0 { + b.WriteString(", ") + } + if st.Dir == types.RecvOnly { + b.WriteString("<-") + b.WriteString(relName(st.Chan, s)) + } else { + b.WriteString(relName(st.Chan, s)) + b.WriteString("<-") + b.WriteString(relName(st.Send, s)) + } + } + non := "" + if !s.Blocking { + non = "Non" + } + from := s.Parent().pkg() + return fmt.Sprintf("Select%sBlocking <%s> [%s]", non, relType(s.Type(), from), b.String()) +} + +func (s *Store) String() string { + return fmt.Sprintf("Store {%s} %s %s", + s.Val.Type(), relName(s.Addr, s), relName(s.Val, s)) +} + +func (s *BlankStore) String() string { + return fmt.Sprintf("BlankStore %s", relName(s.Val, s)) +} + +func (s *MapUpdate) String() string { + return fmt.Sprintf("MapUpdate %s %s %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s)) +} + +func (s *DebugRef) String() string { + p := s.Parent().Prog.Fset.Position(s.Pos()) + var descr interface{} + if s.object != nil { + descr = s.object // e.g. "var x int" + } else { + descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr" + } + var addr string + if s.IsAddr { + addr = "address of " + } + return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name()) +} + +func (p *Package) String() string { + return "package " + p.Pkg.Path() +} + +var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer + +func (p *Package) WriteTo(w io.Writer) (int64, error) { + var buf bytes.Buffer + WritePackage(&buf, p) + n, err := w.Write(buf.Bytes()) + return int64(n), err +} + +// WritePackage writes to buf a human-readable summary of p. +func WritePackage(buf *bytes.Buffer, p *Package) { + fmt.Fprintf(buf, "%s:\n", p) + + var names []string + maxname := 0 + for name := range p.Members { + if l := len(name); l > maxname { + maxname = l + } + names = append(names, name) + } + + from := p.Pkg + sort.Strings(names) + for _, name := range names { + switch mem := p.Members[name].(type) { + case *NamedConst: + fmt.Fprintf(buf, " const %-*s %s = %s\n", + maxname, name, mem.Name(), mem.Value.RelString(from)) + + case *Function: + fmt.Fprintf(buf, " func %-*s %s\n", + maxname, name, relType(mem.Type(), from)) + + case *Type: + fmt.Fprintf(buf, " type %-*s %s\n", + maxname, name, relType(mem.Type().Underlying(), from)) + for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) { + fmt.Fprintf(buf, " %s\n", types.SelectionString(meth, types.RelativeTo(from))) + } + + case *Global: + fmt.Fprintf(buf, " var %-*s %s\n", + maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from)) + } + } + + fmt.Fprintf(buf, "\n") +} diff --git a/vendor/honnef.co/go/tools/go/ir/sanity.go b/vendor/honnef.co/go/tools/go/ir/sanity.go new file mode 100644 index 000000000..1d4c5f74a --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/sanity.go @@ -0,0 +1,561 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// An optional pass for sanity-checking invariants of the IR representation. +// Currently it checks CFG invariants but little at the instruction level. + +import ( + "fmt" + "go/types" + "io" + "os" + "strings" +) + +type sanity struct { + reporter io.Writer + fn *Function + block *BasicBlock + instrs map[Instruction]struct{} + insane bool +} + +// sanityCheck performs integrity checking of the IR representation +// of the function fn and returns true if it was valid. Diagnostics +// are written to reporter if non-nil, os.Stderr otherwise. Some +// diagnostics are only warnings and do not imply a negative result. +// +// Sanity-checking is intended to facilitate the debugging of code +// transformation passes. +// +func sanityCheck(fn *Function, reporter io.Writer) bool { + if reporter == nil { + reporter = os.Stderr + } + return (&sanity{reporter: reporter}).checkFunction(fn) +} + +// mustSanityCheck is like sanityCheck but panics instead of returning +// a negative result. +// +func mustSanityCheck(fn *Function, reporter io.Writer) { + if !sanityCheck(fn, reporter) { + fn.WriteTo(os.Stderr) + panic("SanityCheck failed") + } +} + +func (s *sanity) diagnostic(prefix, format string, args ...interface{}) { + fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn) + if s.block != nil { + fmt.Fprintf(s.reporter, ", block %s", s.block) + } + io.WriteString(s.reporter, ": ") + fmt.Fprintf(s.reporter, format, args...) + io.WriteString(s.reporter, "\n") +} + +func (s *sanity) errorf(format string, args ...interface{}) { + s.insane = true + s.diagnostic("Error", format, args...) +} + +func (s *sanity) warnf(format string, args ...interface{}) { + s.diagnostic("Warning", format, args...) +} + +// findDuplicate returns an arbitrary basic block that appeared more +// than once in blocks, or nil if all were unique. +func findDuplicate(blocks []*BasicBlock) *BasicBlock { + if len(blocks) < 2 { + return nil + } + if blocks[0] == blocks[1] { + return blocks[0] + } + // Slow path: + m := make(map[*BasicBlock]bool) + for _, b := range blocks { + if m[b] { + return b + } + m[b] = true + } + return nil +} + +func (s *sanity) checkInstr(idx int, instr Instruction) { + switch instr := instr.(type) { + case *If, *Jump, *Return, *Panic, *Unreachable, *ConstantSwitch: + s.errorf("control flow instruction not at end of block") + case *Sigma: + if idx > 0 { + prev := s.block.Instrs[idx-1] + if _, ok := prev.(*Sigma); !ok { + s.errorf("Sigma instruction follows a non-Sigma: %T", prev) + } + } + case *Phi: + if idx == 0 { + // It suffices to apply this check to just the first phi node. + if dup := findDuplicate(s.block.Preds); dup != nil { + s.errorf("phi node in block with duplicate predecessor %s", dup) + } + } else { + prev := s.block.Instrs[idx-1] + switch prev.(type) { + case *Phi, *Sigma: + default: + s.errorf("Phi instruction follows a non-Phi, non-Sigma: %T", prev) + } + } + if ne, np := len(instr.Edges), len(s.block.Preds); ne != np { + s.errorf("phi node has %d edges but %d predecessors", ne, np) + + } else { + for i, e := range instr.Edges { + if e == nil { + s.errorf("phi node '%v' has no value for edge #%d from %s", instr, i, s.block.Preds[i]) + } + } + } + + case *Alloc: + if !instr.Heap { + found := false + for _, l := range s.fn.Locals { + if l == instr { + found = true + break + } + } + if !found { + s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr) + } + } + + case *BinOp: + case *Call: + case *ChangeInterface: + case *ChangeType: + case *Convert: + if _, ok := instr.X.Type().Underlying().(*types.Slice); ok { + if ptr, ok := instr.Type().Underlying().(*types.Pointer); ok { + if _, ok := ptr.Elem().(*types.Array); ok { + break + } + } + } + if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok { + if _, ok := instr.Type().Underlying().(*types.Basic); !ok { + s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type()) + } + } + + case *Defer: + case *Extract: + case *Field: + case *FieldAddr: + case *Go: + case *Index: + case *IndexAddr: + case *MapLookup: + case *StringLookup: + case *MakeChan: + case *MakeClosure: + numFree := len(instr.Fn.(*Function).FreeVars) + numBind := len(instr.Bindings) + if numFree != numBind { + s.errorf("MakeClosure has %d Bindings for function %s with %d free vars", + numBind, instr.Fn, numFree) + + } + if recv := instr.Type().(*types.Signature).Recv(); recv != nil { + s.errorf("MakeClosure's type includes receiver %s", recv.Type()) + } + + case *MakeInterface: + case *MakeMap: + case *MakeSlice: + case *MapUpdate: + case *Next: + case *Range: + case *RunDefers: + case *Select: + case *Send: + case *Slice: + case *Store: + case *TypeAssert: + case *UnOp: + case *DebugRef: + case *BlankStore: + case *Load: + case *Parameter: + case *Const: + case *Recv: + case *TypeSwitch: + default: + panic(fmt.Sprintf("Unknown instruction type: %T", instr)) + } + + if call, ok := instr.(CallInstruction); ok { + if call.Common().Signature() == nil { + s.errorf("nil signature: %s", call) + } + } + + // Check that value-defining instructions have valid types + // and a valid referrer list. + if v, ok := instr.(Value); ok { + t := v.Type() + if t == nil { + s.errorf("no type: %s = %s", v.Name(), v) + } else if t == tRangeIter { + // not a proper type; ignore. + } else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 { + if _, ok := v.(*Const); !ok { + s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t) + } + } + s.checkReferrerList(v) + } + + // Untyped constants are legal as instruction Operands(), + // for example: + // _ = "foo"[0] + // or: + // if wordsize==64 {...} + + // All other non-Instruction Values can be found via their + // enclosing Function or Package. +} + +func (s *sanity) checkFinalInstr(instr Instruction) { + switch instr := instr.(type) { + case *If: + if nsuccs := len(s.block.Succs); nsuccs != 2 { + s.errorf("If-terminated block has %d successors; expected 2", nsuccs) + return + } + if s.block.Succs[0] == s.block.Succs[1] { + s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0]) + return + } + + case *Jump: + if nsuccs := len(s.block.Succs); nsuccs != 1 { + s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs) + return + } + + case *Return: + if nsuccs := len(s.block.Succs); nsuccs != 0 { + s.errorf("Return-terminated block has %d successors; expected none", nsuccs) + return + } + if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na { + s.errorf("%d-ary return in %d-ary function", na, nf) + } + + case *Panic: + if nsuccs := len(s.block.Succs); nsuccs != 1 { + s.errorf("Panic-terminated block has %d successors; expected one", nsuccs) + return + } + + case *Unreachable: + if nsuccs := len(s.block.Succs); nsuccs != 1 { + s.errorf("Unreachable-terminated block has %d successors; expected one", nsuccs) + return + } + + case *ConstantSwitch: + + default: + s.errorf("non-control flow instruction at end of block") + } +} + +func (s *sanity) checkBlock(b *BasicBlock, index int) { + s.block = b + + if b.Index != index { + s.errorf("block has incorrect Index %d", b.Index) + } + if b.parent != s.fn { + s.errorf("block has incorrect parent %s", b.parent) + } + + // Check all blocks are reachable. + // (The entry block is always implicitly reachable, the exit block may be unreachable.) + if index > 1 && len(b.Preds) == 0 { + s.warnf("unreachable block") + if b.Instrs == nil { + // Since this block is about to be pruned, + // tolerating transient problems in it + // simplifies other optimizations. + return + } + } + + // Check predecessor and successor relations are dual, + // and that all blocks in CFG belong to same function. + for _, a := range b.Preds { + found := false + for _, bb := range a.Succs { + if bb == b { + found = true + break + } + } + if !found { + s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs) + } + if a.parent != s.fn { + s.errorf("predecessor %s belongs to different function %s", a, a.parent) + } + } + for _, c := range b.Succs { + found := false + for _, bb := range c.Preds { + if bb == b { + found = true + break + } + } + if !found { + s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds) + } + if c.parent != s.fn { + s.errorf("successor %s belongs to different function %s", c, c.parent) + } + } + + // Check each instruction is sane. + n := len(b.Instrs) + if n == 0 { + s.errorf("basic block contains no instructions") + } + var rands [10]*Value // reuse storage + for j, instr := range b.Instrs { + if instr == nil { + s.errorf("nil instruction at index %d", j) + continue + } + if b2 := instr.Block(); b2 == nil { + s.errorf("nil Block() for instruction at index %d", j) + continue + } else if b2 != b { + s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j) + continue + } + if j < n-1 { + s.checkInstr(j, instr) + } else { + s.checkFinalInstr(instr) + } + + // Check Instruction.Operands. + operands: + for i, op := range instr.Operands(rands[:0]) { + if op == nil { + s.errorf("nil operand pointer %d of %s", i, instr) + continue + } + val := *op + if val == nil { + continue // a nil operand is ok + } + + // Check that "untyped" types only appear on constant operands. + if _, ok := (*op).(*Const); !ok { + if basic, ok := (*op).Type().(*types.Basic); ok { + if basic.Info()&types.IsUntyped != 0 { + s.errorf("operand #%d of %s is untyped: %s", i, instr, basic) + } + } + } + + // Check that Operands that are also Instructions belong to same function. + // TODO(adonovan): also check their block dominates block b. + if val, ok := val.(Instruction); ok { + if val.Block() == nil { + s.errorf("operand %d of %s is an instruction (%s) that belongs to no block", i, instr, val) + } else if val.Parent() != s.fn { + s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent()) + } + } + + // Check that each function-local operand of + // instr refers back to instr. (NB: quadratic) + switch val := val.(type) { + case *Const, *Global, *Builtin: + continue // not local + case *Function: + if val.parent == nil { + continue // only anon functions are local + } + } + + // TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined. + + if refs := val.Referrers(); refs != nil { + for _, ref := range *refs { + if ref == instr { + continue operands + } + } + s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val) + } else { + s.errorf("operand %d of %s (%s) has no referrers", i, instr, val) + } + } + } +} + +func (s *sanity) checkReferrerList(v Value) { + refs := v.Referrers() + if refs == nil { + s.errorf("%s has missing referrer list", v.Name()) + return + } + for i, ref := range *refs { + if _, ok := s.instrs[ref]; !ok { + if val, ok := ref.(Value); ok { + s.errorf("%s.Referrers()[%d] = %s = %s is not an instruction belonging to this function", v.Name(), i, val.Name(), val) + } else { + s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref) + } + } + } +} + +func (s *sanity) checkFunction(fn *Function) bool { + // TODO(adonovan): check Function invariants: + // - check params match signature + // - check transient fields are nil + // - warn if any fn.Locals do not appear among block instructions. + s.fn = fn + if fn.Prog == nil { + s.errorf("nil Prog") + } + + _ = fn.String() // must not crash + _ = fn.RelString(fn.pkg()) // must not crash + + // All functions have a package, except delegates (which are + // shared across packages, or duplicated as weak symbols in a + // separate-compilation model), and error.Error. + if fn.Pkg == nil { + switch fn.Synthetic { + case SyntheticWrapper, SyntheticBound, SyntheticThunk: + default: + if !strings.HasSuffix(fn.name, "Error") { + s.errorf("nil Pkg") + } + } + } + if src, syn := fn.Synthetic == 0, fn.source != nil; src != syn { + s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn) + } + for i, l := range fn.Locals { + if l.Parent() != fn { + s.errorf("Local %s at index %d has wrong parent", l.Name(), i) + } + if l.Heap { + s.errorf("Local %s at index %d has Heap flag set", l.Name(), i) + } + } + // Build the set of valid referrers. + s.instrs = make(map[Instruction]struct{}) + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + s.instrs[instr] = struct{}{} + } + } + for i, p := range fn.Params { + if p.Parent() != fn { + s.errorf("Param %s at index %d has wrong parent", p.Name(), i) + } + // Check common suffix of Signature and Params match type. + if sig := fn.Signature; sig != nil { + j := i - len(fn.Params) + sig.Params().Len() // index within sig.Params + if j < 0 { + continue + } + if !types.Identical(p.Type(), sig.Params().At(j).Type()) { + s.errorf("Param %s at index %d has wrong type (%s, versus %s in Signature)", p.Name(), i, p.Type(), sig.Params().At(j).Type()) + + } + } + + s.checkReferrerList(p) + } + for i, fv := range fn.FreeVars { + if fv.Parent() != fn { + s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i) + } + s.checkReferrerList(fv) + } + + if fn.Blocks != nil && len(fn.Blocks) == 0 { + // Function _had_ blocks (so it's not external) but + // they were "optimized" away, even the entry block. + s.errorf("Blocks slice is non-nil but empty") + } + for i, b := range fn.Blocks { + if b == nil { + s.warnf("nil *BasicBlock at f.Blocks[%d]", i) + continue + } + s.checkBlock(b, i) + } + + s.block = nil + for i, anon := range fn.AnonFuncs { + if anon.Parent() != fn { + s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent()) + } + } + s.fn = nil + return !s.insane +} + +// sanityCheckPackage checks invariants of packages upon creation. +// It does not require that the package is built. +// Unlike sanityCheck (for functions), it just panics at the first error. +func sanityCheckPackage(pkg *Package) { + if pkg.Pkg == nil { + panic(fmt.Sprintf("Package %s has no Object", pkg)) + } + _ = pkg.String() // must not crash + + for name, mem := range pkg.Members { + if name != mem.Name() { + panic(fmt.Sprintf("%s: %T.Name() = %s, want %s", + pkg.Pkg.Path(), mem, mem.Name(), name)) + } + obj := mem.Object() + if obj == nil { + // This check is sound because fields + // {Global,Function}.object have type + // types.Object. (If they were declared as + // *types.{Var,Func}, we'd have a non-empty + // interface containing a nil pointer.) + + continue // not all members have typechecker objects + } + if obj.Name() != name { + if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") { + // Ok. The name of a declared init function varies between + // its types.Func ("init") and its ir.Function ("init#%d"). + } else { + panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s", + pkg.Pkg.Path(), mem, obj.Name(), name)) + } + } + } +} diff --git a/vendor/honnef.co/go/tools/go/ir/source.go b/vendor/honnef.co/go/tools/go/ir/source.go new file mode 100644 index 000000000..93d1ccbd2 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/source.go @@ -0,0 +1,270 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file defines utilities for working with source positions +// or source-level named entities ("objects"). + +// TODO(adonovan): test that {Value,Instruction}.Pos() positions match +// the originating syntax, as specified. + +import ( + "go/ast" + "go/token" + "go/types" +) + +// EnclosingFunction returns the function that contains the syntax +// node denoted by path. +// +// Syntax associated with package-level variable specifications is +// enclosed by the package's init() function. +// +// Returns nil if not found; reasons might include: +// - the node is not enclosed by any function. +// - the node is within an anonymous function (FuncLit) and +// its IR function has not been created yet +// (pkg.Build() has not yet been called). +// +func EnclosingFunction(pkg *Package, path []ast.Node) *Function { + // Start with package-level function... + fn := findEnclosingPackageLevelFunction(pkg, path) + if fn == nil { + return nil // not in any function + } + + // ...then walk down the nested anonymous functions. + n := len(path) +outer: + for i := range path { + if lit, ok := path[n-1-i].(*ast.FuncLit); ok { + for _, anon := range fn.AnonFuncs { + if anon.Pos() == lit.Type.Func { + fn = anon + continue outer + } + } + // IR function not found: + // - package not yet built, or maybe + // - builder skipped FuncLit in dead block + // (in principle; but currently the Builder + // generates even dead FuncLits). + return nil + } + } + return fn +} + +// HasEnclosingFunction returns true if the AST node denoted by path +// is contained within the declaration of some function or +// package-level variable. +// +// Unlike EnclosingFunction, the behaviour of this function does not +// depend on whether IR code for pkg has been built, so it can be +// used to quickly reject check inputs that will cause +// EnclosingFunction to fail, prior to IR building. +// +func HasEnclosingFunction(pkg *Package, path []ast.Node) bool { + return findEnclosingPackageLevelFunction(pkg, path) != nil +} + +// findEnclosingPackageLevelFunction returns the Function +// corresponding to the package-level function enclosing path. +// +func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function { + if n := len(path); n >= 2 { // [... {Gen,Func}Decl File] + switch decl := path[n-2].(type) { + case *ast.GenDecl: + if decl.Tok == token.VAR && n >= 3 { + // Package-level 'var' initializer. + return pkg.init + } + + case *ast.FuncDecl: + // Declared function/method. + fn := findNamedFunc(pkg, decl.Pos()) + if fn == nil && decl.Recv == nil && decl.Name.Name == "init" { + // Hack: return non-nil when IR is not yet + // built so that HasEnclosingFunction works. + return pkg.init + } + return fn + } + } + return nil // not in any function +} + +// findNamedFunc returns the named function whose FuncDecl.Ident is at +// position pos. +// +func findNamedFunc(pkg *Package, pos token.Pos) *Function { + for _, fn := range pkg.Functions { + if fn.Pos() == pos { + return fn + } + } + return nil +} + +// ValueForExpr returns the IR Value that corresponds to non-constant +// expression e. +// +// It returns nil if no value was found, e.g. +// - the expression is not lexically contained within f; +// - f was not built with debug information; or +// - e is a constant expression. (For efficiency, no debug +// information is stored for constants. Use +// go/types.Info.Types[e].Value instead.) +// - e is a reference to nil or a built-in function. +// - the value was optimised away. +// +// If e is an addressable expression used in an lvalue context, +// value is the address denoted by e, and isAddr is true. +// +// The types of e (or &e, if isAddr) and the result are equal +// (modulo "untyped" bools resulting from comparisons). +// +// (Tip: to find the ir.Value given a source position, use +// astutil.PathEnclosingInterval to locate the ast.Node, then +// EnclosingFunction to locate the Function, then ValueForExpr to find +// the ir.Value.) +// +func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { + if f.debugInfo() { // (opt) + e = unparen(e) + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + if ref, ok := instr.(*DebugRef); ok { + if ref.Expr == e { + return ref.X, ref.IsAddr + } + } + } + } + } + return +} + +// --- Lookup functions for source-level named entities (types.Objects) --- + +// Package returns the IR Package corresponding to the specified +// type-checker package object. +// It returns nil if no such IR package has been created. +// +func (prog *Program) Package(obj *types.Package) *Package { + return prog.packages[obj] +} + +// packageLevelValue returns the package-level value corresponding to +// the specified named object, which may be a package-level const +// (*Const), var (*Global) or func (*Function) of some package in +// prog. It returns nil if the object is not found. +// +func (prog *Program) packageLevelValue(obj types.Object) Value { + if pkg, ok := prog.packages[obj.Pkg()]; ok { + return pkg.values[obj] + } + return nil +} + +// FuncValue returns the concrete Function denoted by the source-level +// named function obj, or nil if obj denotes an interface method. +// +// TODO(adonovan): check the invariant that obj.Type() matches the +// result's Signature, both in the params/results and in the receiver. +// +func (prog *Program) FuncValue(obj *types.Func) *Function { + fn, _ := prog.packageLevelValue(obj).(*Function) + return fn +} + +// ConstValue returns the IR Value denoted by the source-level named +// constant obj. +// +func (prog *Program) ConstValue(obj *types.Const) *Const { + // TODO(adonovan): opt: share (don't reallocate) + // Consts for const objects and constant ast.Exprs. + + // Universal constant? {true,false,nil} + if obj.Parent() == types.Universe { + return NewConst(obj.Val(), obj.Type()) + } + // Package-level named constant? + if v := prog.packageLevelValue(obj); v != nil { + return v.(*Const) + } + return NewConst(obj.Val(), obj.Type()) +} + +// VarValue returns the IR Value that corresponds to a specific +// identifier denoting the source-level named variable obj. +// +// VarValue returns nil if a local variable was not found, perhaps +// because its package was not built, the debug information was not +// requested during IR construction, or the value was optimized away. +// +// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval), +// and that ident must resolve to obj. +// +// pkg is the package enclosing the reference. (A reference to a var +// always occurs within a function, so we need to know where to find it.) +// +// If the identifier is a field selector and its base expression is +// non-addressable, then VarValue returns the value of that field. +// For example: +// func f() struct {x int} +// f().x // VarValue(x) returns a *Field instruction of type int +// +// All other identifiers denote addressable locations (variables). +// For them, VarValue may return either the variable's address or its +// value, even when the expression is evaluated only for its value; the +// situation is reported by isAddr, the second component of the result. +// +// If !isAddr, the returned value is the one associated with the +// specific identifier. For example, +// var x int // VarValue(x) returns Const 0 here +// x = 1 // VarValue(x) returns Const 1 here +// +// It is not specified whether the value or the address is returned in +// any particular case, as it may depend upon optimizations performed +// during IR code generation, such as registerization, constant +// folding, avoidance of materialization of subexpressions, etc. +// +func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) { + // All references to a var are local to some function, possibly init. + fn := EnclosingFunction(pkg, ref) + if fn == nil { + return // e.g. def of struct field; IR not built? + } + + id := ref[0].(*ast.Ident) + + // Defining ident of a parameter? + if id.Pos() == obj.Pos() { + for _, param := range fn.Params { + if param.Object() == obj { + return param, false + } + } + } + + // Other ident? + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + if dr, ok := instr.(*DebugRef); ok { + if dr.Pos() == id.Pos() { + return dr.X, dr.IsAddr + } + } + } + } + + // Defining ident of package-level var? + if v := prog.packageLevelValue(obj); v != nil { + return v.(*Global), true + } + + return // e.g. debug info not requested, or var optimized away +} diff --git a/vendor/honnef.co/go/tools/go/ir/ssa.go b/vendor/honnef.co/go/tools/go/ir/ssa.go new file mode 100644 index 000000000..6dfdfcd80 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/ssa.go @@ -0,0 +1,1898 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This package defines a high-level intermediate representation for +// Go programs using static single-information (SSI) form. + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "sync" + + "honnef.co/go/tools/go/types/typeutil" +) + +type ID int + +// A Program is a partial or complete Go program converted to IR form. +type Program struct { + Fset *token.FileSet // position information for the files of this Program + PrintFunc string // create ir.html for function specified in PrintFunc + imported map[string]*Package // all importable Packages, keyed by import path + packages map[*types.Package]*Package // all loaded Packages, keyed by object + mode BuilderMode // set of mode bits for IR construction + MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets + + methodsMu sync.Mutex // guards the following maps: + methodSets typeutil.Map // maps type to its concrete methodSet + runtimeTypes typeutil.Map // types for which rtypes are needed + canon typeutil.Map // type canonicalization map + bounds map[*types.Func]*Function // bounds for curried x.Method closures + thunks map[selectionKey]*Function // thunks for T.Method expressions +} + +// A Package is a single analyzed Go package containing Members for +// all package-level functions, variables, constants and types it +// declares. These may be accessed directly via Members, or via the +// type-specific accessor methods Func, Type, Var and Const. +// +// Members also contains entries for "init" (the synthetic package +// initializer) and "init#%d", the nth declared init function, +// and unspecified other things too. +// +type Package struct { + Prog *Program // the owning program + Pkg *types.Package // the corresponding go/types.Package + Members map[string]Member // all package members keyed by name (incl. init and init#%d) + Functions []*Function // all functions, excluding anonymous ones + values map[types.Object]Value // package members (incl. types and methods), keyed by object + init *Function // Func("init"); the package's init function + debug bool // include full debug info in this package + printFunc string // which function to print in HTML form + + // The following fields are set transiently, then cleared + // after building. + buildOnce sync.Once // ensures package building occurs once + ninit int32 // number of init functions + info *types.Info // package type information + files []*ast.File // package ASTs +} + +// A Member is a member of a Go package, implemented by *NamedConst, +// *Global, *Function, or *Type; they are created by package-level +// const, var, func and type declarations respectively. +// +type Member interface { + Name() string // declared name of the package member + String() string // package-qualified name of the package member + RelString(*types.Package) string // like String, but relative refs are unqualified + Object() types.Object // typechecker's object for this member, if any + Type() types.Type // type of the package member + Token() token.Token // token.{VAR,FUNC,CONST,TYPE} + Package() *Package // the containing package +} + +// A Type is a Member of a Package representing a package-level named type. +type Type struct { + object *types.TypeName + pkg *Package +} + +// A NamedConst is a Member of a Package representing a package-level +// named constant. +// +// Pos() returns the position of the declaring ast.ValueSpec.Names[*] +// identifier. +// +// NB: a NamedConst is not a Value; it contains a constant Value, which +// it augments with the name and position of its 'const' declaration. +// +type NamedConst struct { + object *types.Const + Value *Const + pkg *Package +} + +// A Value is an IR value that can be referenced by an instruction. +type Value interface { + setID(ID) + + // Name returns the name of this value, and determines how + // this Value appears when used as an operand of an + // Instruction. + // + // This is the same as the source name for Parameters, + // Builtins, Functions, FreeVars, Globals. + // For constants, it is a representation of the constant's value + // and type. For all other Values this is the name of the + // virtual register defined by the instruction. + // + // The name of an IR Value is not semantically significant, + // and may not even be unique within a function. + Name() string + + // ID returns the ID of this value. IDs are unique within a single + // function and are densely numbered, but may contain gaps. + // Values and other Instructions share the same ID space. + // Globally, values are identified by their addresses. However, + // IDs exist to facilitate efficient storage of mappings between + // values and data when analysing functions. + // + // NB: IDs are allocated late in the IR construction process and + // are not available to early stages of said process. + ID() ID + + // If this value is an Instruction, String returns its + // disassembled form; otherwise it returns unspecified + // human-readable information about the Value, such as its + // kind, name and type. + String() string + + // Type returns the type of this value. Many instructions + // (e.g. IndexAddr) change their behaviour depending on the + // types of their operands. + Type() types.Type + + // Parent returns the function to which this Value belongs. + // It returns nil for named Functions, Builtin and Global. + Parent() *Function + + // Referrers returns the list of instructions that have this + // value as one of their operands; it may contain duplicates + // if an instruction has a repeated operand. + // + // Referrers actually returns a pointer through which the + // caller may perform mutations to the object's state. + // + // Referrers is currently only defined if Parent()!=nil, + // i.e. for the function-local values FreeVar, Parameter, + // Functions (iff anonymous) and all value-defining instructions. + // It returns nil for named Functions, Builtin and Global. + // + // Instruction.Operands contains the inverse of this relation. + Referrers() *[]Instruction + + Operands(rands []*Value) []*Value // nil for non-Instructions + + // Source returns the AST node responsible for creating this + // value. A single AST node may be responsible for more than one + // value, and not all values have an associated AST node. + // + // Do not use this method to find a Value given an ast.Expr; use + // ValueForExpr instead. + Source() ast.Node + + // Pos returns Source().Pos() if Source is not nil, else it + // returns token.NoPos. + Pos() token.Pos +} + +// An Instruction is an IR instruction that computes a new Value or +// has some effect. +// +// An Instruction that defines a value (e.g. BinOp) also implements +// the Value interface; an Instruction that only has an effect (e.g. Store) +// does not. +// +type Instruction interface { + setSource(ast.Node) + setID(ID) + + // String returns the disassembled form of this value. + // + // Examples of Instructions that are Values: + // "BinOp {+} t1 t2" (BinOp) + // "Call len t1" (Call) + // Note that the name of the Value is not printed. + // + // Examples of Instructions that are not Values: + // "Return t1" (Return) + // "Store {int} t2 t1" (Store) + // + // (The separation of Value.Name() from Value.String() is useful + // for some analyses which distinguish the operation from the + // value it defines, e.g., 'y = local int' is both an allocation + // of memory 'local int' and a definition of a pointer y.) + String() string + + // ID returns the ID of this instruction. IDs are unique within a single + // function and are densely numbered, but may contain gaps. + // Globally, instructions are identified by their addresses. However, + // IDs exist to facilitate efficient storage of mappings between + // instructions and data when analysing functions. + // + // NB: IDs are allocated late in the IR construction process and + // are not available to early stages of said process. + ID() ID + + // Parent returns the function to which this instruction + // belongs. + Parent() *Function + + // Block returns the basic block to which this instruction + // belongs. + Block() *BasicBlock + + // setBlock sets the basic block to which this instruction belongs. + setBlock(*BasicBlock) + + // Operands returns the operands of this instruction: the + // set of Values it references. + // + // Specifically, it appends their addresses to rands, a + // user-provided slice, and returns the resulting slice, + // permitting avoidance of memory allocation. + // + // The operands are appended in undefined order, but the order + // is consistent for a given Instruction; the addresses are + // always non-nil but may point to a nil Value. Clients may + // store through the pointers, e.g. to effect a value + // renaming. + // + // Value.Referrers is a subset of the inverse of this + // relation. (Referrers are not tracked for all types of + // Values.) + Operands(rands []*Value) []*Value + + Referrers() *[]Instruction // nil for non-Values + + // Source returns the AST node responsible for creating this + // instruction. A single AST node may be responsible for more than + // one instruction, and not all instructions have an associated + // AST node. + Source() ast.Node + + // Pos returns Source().Pos() if Source is not nil, else it + // returns token.NoPos. + Pos() token.Pos +} + +// A Node is a node in the IR value graph. Every concrete type that +// implements Node is also either a Value, an Instruction, or both. +// +// Node contains the methods common to Value and Instruction, plus the +// Operands and Referrers methods generalized to return nil for +// non-Instructions and non-Values, respectively. +// +// Node is provided to simplify IR graph algorithms. Clients should +// use the more specific and informative Value or Instruction +// interfaces where appropriate. +// +type Node interface { + setID(ID) + + // Common methods: + ID() ID + String() string + Source() ast.Node + Pos() token.Pos + Parent() *Function + + // Partial methods: + Operands(rands []*Value) []*Value // nil for non-Instructions + Referrers() *[]Instruction // nil for non-Values +} + +type Synthetic int + +const ( + SyntheticLoadedFromExportData Synthetic = iota + 1 + SyntheticPackageInitializer + SyntheticThunk + SyntheticWrapper + SyntheticBound +) + +func (syn Synthetic) String() string { + switch syn { + case SyntheticLoadedFromExportData: + return "loaded from export data" + case SyntheticPackageInitializer: + return "package initializer" + case SyntheticThunk: + return "thunk" + case SyntheticWrapper: + return "wrapper" + case SyntheticBound: + return "bound" + default: + return fmt.Sprintf("Synthetic(%d)", syn) + } +} + +// Function represents the parameters, results, and code of a function +// or method. +// +// If Blocks is nil, this indicates an external function for which no +// Go source code is available. In this case, FreeVars and Locals +// are nil too. Clients performing whole-program analysis must +// handle external functions specially. +// +// Blocks contains the function's control-flow graph (CFG). +// Blocks[0] is the function entry point; block order is not otherwise +// semantically significant, though it may affect the readability of +// the disassembly. +// To iterate over the blocks in dominance order, use DomPreorder(). +// +// A nested function (Parent()!=nil) that refers to one or more +// lexically enclosing local variables ("free variables") has FreeVars. +// Such functions cannot be called directly but require a +// value created by MakeClosure which, via its Bindings, supplies +// values for these parameters. +// +// If the function is a method (Signature.Recv() != nil) then the first +// element of Params is the receiver parameter. +// +// A Go package may declare many functions called "init". +// For each one, Object().Name() returns "init" but Name() returns +// "init#1", etc, in declaration order. +// +// Pos() returns the declaring ast.FuncLit.Type.Func or the position +// of the ast.FuncDecl.Name, if the function was explicit in the +// source. Synthetic wrappers, for which Synthetic != "", may share +// the same position as the function they wrap. +// Syntax.Pos() always returns the position of the declaring "func" token. +// +// Type() returns the function's Signature. +// +type Function struct { + node + + name string + object types.Object // a declared *types.Func or one of its wrappers + method *types.Selection // info about provenance of synthetic methods + Signature *types.Signature + + Synthetic Synthetic + parent *Function // enclosing function if anon; nil if global + Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) + Prog *Program // enclosing program + Params []*Parameter // function parameters; for methods, includes receiver + FreeVars []*FreeVar // free variables whose values must be supplied by closure + Locals []*Alloc // local variables of this function + Blocks []*BasicBlock // basic blocks of the function; nil => external + Exit *BasicBlock // The function's exit block + AnonFuncs []*Function // anonymous functions directly beneath this one + referrers []Instruction // referring instructions (iff Parent() != nil) + NoReturn NoReturn // Calling this function will always terminate control flow. + + *functionBody +} + +type NoReturn uint8 + +const ( + Returns NoReturn = iota + AlwaysExits + AlwaysUnwinds + NeverReturns +) + +type functionBody struct { + // The following fields are set transiently during building, + // then cleared. + currentBlock *BasicBlock // where to emit code + objects map[types.Object]Value // addresses of local variables + namedResults []*Alloc // tuple of named results + implicitResults []*Alloc // tuple of results + targets *targets // linked stack of branch targets + lblocks map[*ast.Object]*lblock // labelled blocks + consts []*Const + wr *HTMLWriter + fakeExits BlockSet + blocksets [5]BlockSet + hasDefer bool + + // a contiguous block of instructions that will be used by blocks, + // to avoid making multiple allocations. + scratchInstructions []Instruction +} + +func (fn *Function) results() []*Alloc { + if len(fn.namedResults) > 0 { + return fn.namedResults + } + return fn.implicitResults +} + +// BasicBlock represents an IR basic block. +// +// The final element of Instrs is always an explicit transfer of +// control (If, Jump, Return, Panic, or Unreachable). +// +// A block may contain no Instructions only if it is unreachable, +// i.e., Preds is nil. Empty blocks are typically pruned. +// +// BasicBlocks and their Preds/Succs relation form a (possibly cyclic) +// graph independent of the IR Value graph: the control-flow graph or +// CFG. It is illegal for multiple edges to exist between the same +// pair of blocks. +// +// Each BasicBlock is also a node in the dominator tree of the CFG. +// The tree may be navigated using Idom()/Dominees() and queried using +// Dominates(). +// +// The order of Preds and Succs is significant (to Phi and If +// instructions, respectively). +// +type BasicBlock struct { + Index int // index of this block within Parent().Blocks + Comment string // optional label; no semantic significance + parent *Function // parent function + Instrs []Instruction // instructions in order + Preds, Succs []*BasicBlock // predecessors and successors + succs2 [2]*BasicBlock // initial space for Succs + dom domInfo // dominator tree info + pdom domInfo // post-dominator tree info + post int + gaps int // number of nil Instrs (transient) + rundefers int // number of rundefers (transient) +} + +// Pure values ---------------------------------------- + +// A FreeVar represents a free variable of the function to which it +// belongs. +// +// FreeVars are used to implement anonymous functions, whose free +// variables are lexically captured in a closure formed by +// MakeClosure. The value of such a free var is an Alloc or another +// FreeVar and is considered a potentially escaping heap address, with +// pointer type. +// +// FreeVars are also used to implement bound method closures. Such a +// free var represents the receiver value and may be of any type that +// has concrete methods. +// +// Pos() returns the position of the value that was captured, which +// belongs to an enclosing function. +// +type FreeVar struct { + node + + name string + typ types.Type + parent *Function + referrers []Instruction + + // Transiently needed during building. + outer Value // the Value captured from the enclosing context. +} + +// A Parameter represents an input parameter of a function. +// +type Parameter struct { + register + + name string + object types.Object // a *types.Var; nil for non-source locals +} + +// A Const represents the value of a constant expression. +// +// The underlying type of a constant may be any boolean, numeric, or +// string type. In addition, a Const may represent the nil value of +// any reference type---interface, map, channel, pointer, slice, or +// function---but not "untyped nil". +// +// All source-level constant expressions are represented by a Const +// of the same type and value. +// +// Value holds the exact value of the constant, independent of its +// Type(), using the same representation as package go/constant uses for +// constants, or nil for a typed nil value. +// +// Pos() returns token.NoPos. +// +// Example printed form: +// Const {42} +// Const {"test"} +// Const {(3 + 4i)} +// +type Const struct { + register + + Value constant.Value +} + +// A Global is a named Value holding the address of a package-level +// variable. +// +// Pos() returns the position of the ast.ValueSpec.Names[*] +// identifier. +// +type Global struct { + node + + name string + object types.Object // a *types.Var; may be nil for synthetics e.g. init$guard + typ types.Type + + Pkg *Package +} + +// A Builtin represents a specific use of a built-in function, e.g. len. +// +// Builtins are immutable values. Builtins do not have addresses. +// Builtins can only appear in CallCommon.Func. +// +// Name() indicates the function: one of the built-in functions from the +// Go spec (excluding "make" and "new") or one of these ir-defined +// intrinsics: +// +// // wrapnilchk returns ptr if non-nil, panics otherwise. +// // (For use in indirection wrappers.) +// func ir:wrapnilchk(ptr *T, recvType, methodName string) *T +// +// // noreturnWasPanic returns true if the previously called +// // function panicked, false if it exited the process. +// func ir:noreturnWasPanic() bool +// +// Object() returns a *types.Builtin for built-ins defined by the spec, +// nil for others. +// +// Type() returns a *types.Signature representing the effective +// signature of the built-in for this call. +// +type Builtin struct { + node + + name string + sig *types.Signature +} + +// Value-defining instructions ---------------------------------------- + +// The Alloc instruction reserves space for a variable of the given type, +// zero-initializes it, and yields its address. +// +// Alloc values are always addresses, and have pointer types, so the +// type of the allocated variable is actually +// Type().Underlying().(*types.Pointer).Elem(). +// +// If Heap is false, Alloc allocates space in the function's +// activation record (frame); we refer to an Alloc(Heap=false) as a +// "stack" alloc. Each stack Alloc returns the same address each time +// it is executed within the same activation; the space is +// re-initialized to zero. +// +// If Heap is true, Alloc allocates space in the heap; we +// refer to an Alloc(Heap=true) as a "heap" alloc. Each heap Alloc +// returns a different address each time it is executed. +// +// When Alloc is applied to a channel, map or slice type, it returns +// the address of an uninitialized (nil) reference of that kind; store +// the result of MakeSlice, MakeMap or MakeChan in that location to +// instantiate these types. +// +// Pos() returns the ast.CompositeLit.Lbrace for a composite literal, +// or the ast.CallExpr.Rparen for a call to new() or for a call that +// allocates a varargs slice. +// +// Example printed form: +// t1 = StackAlloc <*int> +// t2 = HeapAlloc <*int> (new) +// +type Alloc struct { + register + Heap bool + index int // dense numbering; for lifting +} + +var _ Instruction = (*Sigma)(nil) +var _ Value = (*Sigma)(nil) + +// The Sigma instruction represents an SSI σ-node, which splits values +// at branches in the control flow. +// +// Conceptually, σ-nodes exist at the end of blocks that branch and +// constitute parallel assignments to one value per destination block. +// However, such a representation would be awkward to work with, so +// instead we place σ-nodes at the beginning of branch targets. The +// From field denotes to which incoming edge the node applies. +// +// Within a block, all σ-nodes must appear before all non-σ nodes. +// +// Example printed form: +// t2 = Sigma [#0] t1 (x) +// +type Sigma struct { + register + From *BasicBlock + X Value + + live bool // used during lifting +} + +// The Phi instruction represents an SSA φ-node, which combines values +// that differ across incoming control-flow edges and yields a new +// value. Within a block, all φ-nodes must appear before all non-φ, non-σ +// nodes. +// +// Pos() returns the position of the && or || for short-circuit +// control-flow joins, or that of the *Alloc for φ-nodes inserted +// during SSA renaming. +// +// Example printed form: +// t3 = Phi 2:t1 4:t2 (x) +// +type Phi struct { + register + Edges []Value // Edges[i] is value for Block().Preds[i] + + live bool // used during lifting +} + +// The Call instruction represents a function or method call. +// +// The Call instruction yields the function result if there is exactly +// one. Otherwise it returns a tuple, the components of which are +// accessed via Extract. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.CallExpr.Lparen, if explicit in the source. +// +// Example printed form: +// t3 = Call <()> println t1 t2 +// t4 = Call <()> foo$1 +// t6 = Invoke t5.String +// +type Call struct { + register + Call CallCommon +} + +// The BinOp instruction yields the result of binary operation X Op Y. +// +// Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source. +// +// Example printed form: +// t3 = BinOp {+} t2 t1 +// +type BinOp struct { + register + // One of: + // ADD SUB MUL QUO REM + - * / % + // AND OR XOR SHL SHR AND_NOT & | ^ << >> &^ + // EQL NEQ LSS LEQ GTR GEQ == != < <= < >= + Op token.Token + X, Y Value +} + +// The UnOp instruction yields the result of Op X. +// XOR is bitwise complement. +// SUB is negation. +// NOT is logical negation. +// +// +// Example printed form: +// t2 = UnOp {^} t1 +// +type UnOp struct { + register + Op token.Token // One of: NOT SUB XOR ! - ^ + X Value +} + +// The Load instruction loads a value from a memory address. +// +// For implicit memory loads, Pos() returns the position of the +// most closely associated source-level construct; the details are not +// specified. +// +// Example printed form: +// t2 = Load t1 +// +type Load struct { + register + X Value +} + +// The ChangeType instruction applies to X a value-preserving type +// change to Type(). +// +// Type changes are permitted: +// - between a named type and its underlying type. +// - between two named types of the same underlying type. +// - between (possibly named) pointers to identical base types. +// - from a bidirectional channel to a read- or write-channel, +// optionally adding/removing a name. +// +// This operation cannot fail dynamically. +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// t2 = ChangeType <*T> t1 +// +type ChangeType struct { + register + X Value +} + +// The Convert instruction yields the conversion of value X to type +// Type(). One or both of those types is basic (but possibly named). +// +// A conversion may change the value and representation of its operand. +// Conversions are permitted: +// - between real numeric types. +// - between complex numeric types. +// - between string and []byte or []rune. +// - between pointers and unsafe.Pointer. +// - between unsafe.Pointer and uintptr. +// - from (Unicode) integer to (UTF-8) string. +// - from slice to array pointer. +// A conversion may imply a type name change also. +// +// Conversions of untyped string/number/bool constants to a specific +// representation are eliminated during IR construction. +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// t2 = Convert <[]byte> t1 +// +type Convert struct { + register + X Value +} + +// ChangeInterface constructs a value of one interface type from a +// value of another interface type known to be assignable to it. +// This operation cannot fail. +// +// Pos() returns the ast.CallExpr.Lparen if the instruction arose from +// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the +// instruction arose from an explicit e.(T) operation; or token.NoPos +// otherwise. +// +// Example printed form: +// t2 = ChangeInterface t1 +// +type ChangeInterface struct { + register + X Value +} + +// MakeInterface constructs an instance of an interface type from a +// value of a concrete type. +// +// Use Program.MethodSets.MethodSet(X.Type()) to find the method-set +// of X, and Program.MethodValue(m) to find the implementation of a method. +// +// To construct the zero value of an interface type T, use: +// NewConst(constant.MakeNil(), T, pos) +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// t2 = MakeInterface t1 +// +type MakeInterface struct { + register + X Value +} + +// The MakeClosure instruction yields a closure value whose code is +// Fn and whose free variables' values are supplied by Bindings. +// +// Type() returns a (possibly named) *types.Signature. +// +// Pos() returns the ast.FuncLit.Type.Func for a function literal +// closure or the ast.SelectorExpr.Sel for a bound method closure. +// +// Example printed form: +// t1 = MakeClosure foo$1 t1 t2 +// t5 = MakeClosure (T).foo$bound t4 +// +type MakeClosure struct { + register + Fn Value // always a *Function + Bindings []Value // values for each free variable in Fn.FreeVars +} + +// The MakeMap instruction creates a new hash-table-based map object +// and yields a value of kind map. +// +// Type() returns a (possibly named) *types.Map. +// +// Pos() returns the ast.CallExpr.Lparen, if created by make(map), or +// the ast.CompositeLit.Lbrack if created by a literal. +// +// Example printed form: +// t1 = MakeMap +// t2 = MakeMap t1 +// +type MakeMap struct { + register + Reserve Value // initial space reservation; nil => default +} + +// The MakeChan instruction creates a new channel object and yields a +// value of kind chan. +// +// Type() returns a (possibly named) *types.Chan. +// +// Pos() returns the ast.CallExpr.Lparen for the make(chan) that +// created it. +// +// Example printed form: +// t3 = MakeChan t1 +// t4 = MakeChan t2 +// +type MakeChan struct { + register + Size Value // int; size of buffer; zero => synchronous. +} + +// The MakeSlice instruction yields a slice of length Len backed by a +// newly allocated array of length Cap. +// +// Both Len and Cap must be non-nil Values of integer type. +// +// (Alloc(types.Array) followed by Slice will not suffice because +// Alloc can only create arrays of constant length.) +// +// Type() returns a (possibly named) *types.Slice. +// +// Pos() returns the ast.CallExpr.Lparen for the make([]T) that +// created it. +// +// Example printed form: +// t3 = MakeSlice <[]string> t1 t2 +// t4 = MakeSlice t1 t2 +// +type MakeSlice struct { + register + Len Value + Cap Value +} + +// The Slice instruction yields a slice of an existing string, slice +// or *array X between optional integer bounds Low and High. +// +// Dynamically, this instruction panics if X evaluates to a nil *array +// pointer. +// +// Type() returns string if the type of X was string, otherwise a +// *types.Slice with the same element type as X. +// +// Pos() returns the ast.SliceExpr.Lbrack if created by a x[:] slice +// operation, the ast.CompositeLit.Lbrace if created by a literal, or +// NoPos if not explicit in the source (e.g. a variadic argument slice). +// +// Example printed form: +// t4 = Slice <[]int> t3 t2 t1 +// +type Slice struct { + register + X Value // slice, string, or *array + Low, High, Max Value // each may be nil +} + +// The FieldAddr instruction yields the address of Field of *struct X. +// +// The field is identified by its index within the field list of the +// struct type of X. +// +// Dynamically, this instruction panics if X evaluates to a nil +// pointer. +// +// Type() returns a (possibly named) *types.Pointer. +// +// Pos() returns the position of the ast.SelectorExpr.Sel for the +// field, if explicit in the source. +// +// Example printed form: +// t2 = FieldAddr <*int> [0] (X) t1 +// +type FieldAddr struct { + register + X Value // *struct + Field int // field is X.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct).Field(Field) +} + +// The Field instruction yields the Field of struct X. +// +// The field is identified by its index within the field list of the +// struct type of X; by using numeric indices we avoid ambiguity of +// package-local identifiers and permit compact representations. +// +// Pos() returns the position of the ast.SelectorExpr.Sel for the +// field, if explicit in the source. +// +// Example printed form: +// t2 = FieldAddr [0] (X) t1 +// +type Field struct { + register + X Value // struct + Field int // index into X.Type().(*types.Struct).Fields +} + +// The IndexAddr instruction yields the address of the element at +// index Index of collection X. Index is an integer expression. +// +// The elements of maps and strings are not addressable; use StringLookup, MapLookup or +// MapUpdate instead. +// +// Dynamically, this instruction panics if X evaluates to a nil *array +// pointer. +// +// Type() returns a (possibly named) *types.Pointer. +// +// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if +// explicit in the source. +// +// Example printed form: +// t3 = IndexAddr <*int> t2 t1 +// +type IndexAddr struct { + register + X Value // slice or *array, + Index Value // numeric index +} + +// The Index instruction yields element Index of array X. +// +// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if +// explicit in the source. +// +// Example printed form: +// t3 = Index t2 t1 +// +type Index struct { + register + X Value // array + Index Value // integer index +} + +// The MapLookup instruction yields element Index of collection X, a map. +// +// If CommaOk, the result is a 2-tuple of the value above and a +// boolean indicating the result of a map membership test for the key. +// The components of the tuple are accessed using Extract. +// +// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. +// +// Example printed form: +// t4 = MapLookup t3 t1 +// t6 = MapLookup <(string, bool)> t3 t2 +// +type MapLookup struct { + register + X Value // map + Index Value // key-typed index + CommaOk bool // return a value,ok pair +} + +// The StringLookup instruction yields element Index of collection X, a string. +// Index is an integer expression. +// +// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. +// +// Example printed form: +// t3 = StringLookup t2 t1 +// +type StringLookup struct { + register + X Value // string + Index Value // numeric index +} + +// SelectState is a helper for Select. +// It represents one goal state and its corresponding communication. +// +type SelectState struct { + Dir types.ChanDir // direction of case (SendOnly or RecvOnly) + Chan Value // channel to use (for send or receive) + Send Value // value to send (for send) + Pos token.Pos // position of token.ARROW + DebugNode ast.Node // ast.SendStmt or ast.UnaryExpr(<-) [debug mode] +} + +// The Select instruction tests whether (or blocks until) one +// of the specified sent or received states is entered. +// +// Let n be the number of States for which Dir==RECV and Tᵢ (0 ≤ i < n) +// be the element type of each such state's Chan. +// Select returns an n+2-tuple +// (index int, recvOk bool, r₀ T₀, ... rₙ-1 Tₙ-1) +// The tuple's components, described below, must be accessed via the +// Extract instruction. +// +// If Blocking, select waits until exactly one state holds, i.e. a +// channel becomes ready for the designated operation of sending or +// receiving; select chooses one among the ready states +// pseudorandomly, performs the send or receive operation, and sets +// 'index' to the index of the chosen channel. +// +// If !Blocking, select doesn't block if no states hold; instead it +// returns immediately with index equal to -1. +// +// If the chosen channel was used for a receive, the rᵢ component is +// set to the received value, where i is the index of that state among +// all n receive states; otherwise rᵢ has the zero value of type Tᵢ. +// Note that the receive index i is not the same as the state +// index index. +// +// The second component of the triple, recvOk, is a boolean whose value +// is true iff the selected operation was a receive and the receive +// successfully yielded a value. +// +// Pos() returns the ast.SelectStmt.Select. +// +// Example printed form: +// t6 = SelectNonBlocking <(index int, ok bool, int)> [<-t4, t5<-t1] +// t11 = SelectBlocking <(index int, ok bool)> [] +// +type Select struct { + register + States []*SelectState + Blocking bool +} + +// The Range instruction yields an iterator over the domain and range +// of X, which must be a string or map. +// +// Elements are accessed via Next. +// +// Type() returns an opaque and degenerate "rangeIter" type. +// +// Pos() returns the ast.RangeStmt.For. +// +// Example printed form: +// t2 = Range t1 +// +type Range struct { + register + X Value // string or map +} + +// The Next instruction reads and advances the (map or string) +// iterator Iter and returns a 3-tuple value (ok, k, v). If the +// iterator is not exhausted, ok is true and k and v are the next +// elements of the domain and range, respectively. Otherwise ok is +// false and k and v are undefined. +// +// Components of the tuple are accessed using Extract. +// +// The IsString field distinguishes iterators over strings from those +// over maps, as the Type() alone is insufficient: consider +// map[int]rune. +// +// Type() returns a *types.Tuple for the triple (ok, k, v). +// The types of k and/or v may be types.Invalid. +// +// Example printed form: +// t5 = Next <(ok bool, k int, v rune)> t2 +// t5 = Next <(ok bool, k invalid type, v invalid type)> t2 +// +type Next struct { + register + Iter Value + IsString bool // true => string iterator; false => map iterator. +} + +// The TypeAssert instruction tests whether interface value X has type +// AssertedType. +// +// If !CommaOk, on success it returns v, the result of the conversion +// (defined below); on failure it panics. +// +// If CommaOk: on success it returns a pair (v, true) where v is the +// result of the conversion; on failure it returns (z, false) where z +// is AssertedType's zero value. The components of the pair must be +// accessed using the Extract instruction. +// +// If AssertedType is a concrete type, TypeAssert checks whether the +// dynamic type in interface X is equal to it, and if so, the result +// of the conversion is a copy of the value in the interface. +// +// If AssertedType is an interface, TypeAssert checks whether the +// dynamic type of the interface is assignable to it, and if so, the +// result of the conversion is a copy of the interface value X. +// If AssertedType is a superinterface of X.Type(), the operation will +// fail iff the operand is nil. (Contrast with ChangeInterface, which +// performs no nil-check.) +// +// Type() reflects the actual type of the result, possibly a +// 2-types.Tuple; AssertedType is the asserted type. +// +// Pos() returns the ast.CallExpr.Lparen if the instruction arose from +// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the +// instruction arose from an explicit e.(T) operation; or the +// ast.CaseClause.Case if the instruction arose from a case of a +// type-switch statement. +// +// Example printed form: +// t2 = TypeAssert t1 +// t4 = TypeAssert <(value fmt.Stringer, ok bool)> t1 +// +type TypeAssert struct { + register + X Value + AssertedType types.Type + CommaOk bool +} + +// The Extract instruction yields component Index of Tuple. +// +// This is used to access the results of instructions with multiple +// return values, such as Call, TypeAssert, Next, Recv, +// MapLookup and others. +// +// Example printed form: +// t7 = Extract [1] (ok) t4 +// +type Extract struct { + register + Tuple Value + Index int +} + +// Instructions executed for effect. They do not yield a value. -------------------- + +// The Jump instruction transfers control to the sole successor of its +// owning block. +// +// A Jump must be the last instruction of its containing BasicBlock. +// +// Pos() returns NoPos. +// +// Example printed form: +// Jump → b1 +// +type Jump struct { + anInstruction + Comment string +} + +// The Unreachable pseudo-instruction signals that execution cannot +// continue after the preceding function call because it terminates +// the process. +// +// The instruction acts as a control instruction, jumping to the exit +// block. However, this jump will never execute. +// +// An Unreachable instruction must be the last instruction of its +// containing BasicBlock. +// +// Example printed form: +// Unreachable → b1 +// +type Unreachable struct { + anInstruction +} + +// The If instruction transfers control to one of the two successors +// of its owning block, depending on the boolean Cond: the first if +// true, the second if false. +// +// An If instruction must be the last instruction of its containing +// BasicBlock. +// +// Pos() returns the *ast.IfStmt, if explicit in the source. +// +// Example printed form: +// If t2 → b1 b2 +// +type If struct { + anInstruction + Cond Value +} + +type ConstantSwitch struct { + anInstruction + Tag Value + // Constant branch conditions. A nil Value denotes the (implicit + // or explicit) default branch. + Conds []Value +} + +type TypeSwitch struct { + register + Tag Value + Conds []types.Type +} + +// The Return instruction returns values and control back to the calling +// function. +// +// len(Results) is always equal to the number of results in the +// function's signature. +// +// If len(Results) > 1, Return returns a tuple value with the specified +// components which the caller must access using Extract instructions. +// +// There is no instruction to return a ready-made tuple like those +// returned by a "value,ok"-mode TypeAssert, MapLookup or Recv or +// a tail-call to a function with multiple result parameters. +// +// Return must be the last instruction of its containing BasicBlock. +// Such a block has no successors. +// +// Pos() returns the ast.ReturnStmt.Return, if explicit in the source. +// +// Example printed form: +// Return +// Return t1 t2 +// +type Return struct { + anInstruction + Results []Value +} + +// The RunDefers instruction pops and invokes the entire stack of +// procedure calls pushed by Defer instructions in this function. +// +// It is legal to encounter multiple 'rundefers' instructions in a +// single control-flow path through a function; this is useful in +// the combined init() function, for example. +// +// Pos() returns NoPos. +// +// Example printed form: +// RunDefers +// +type RunDefers struct { + anInstruction +} + +// The Panic instruction initiates a panic with value X. +// +// A Panic instruction must be the last instruction of its containing +// BasicBlock, which must have one successor, the exit block. +// +// NB: 'go panic(x)' and 'defer panic(x)' do not use this instruction; +// they are treated as calls to a built-in function. +// +// Pos() returns the ast.CallExpr.Lparen if this panic was explicit +// in the source. +// +// Example printed form: +// Panic t1 +// +type Panic struct { + anInstruction + X Value // an interface{} +} + +// The Go instruction creates a new goroutine and calls the specified +// function within it. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.GoStmt.Go. +// +// Example printed form: +// Go println t1 +// Go t3 +// GoInvoke t4.Bar t2 +// +type Go struct { + anInstruction + Call CallCommon +} + +// The Defer instruction pushes the specified call onto a stack of +// functions to be called by a RunDefers instruction or by a panic. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.DeferStmt.Defer. +// +// Example printed form: +// Defer println t1 +// Defer t3 +// DeferInvoke t4.Bar t2 +// +type Defer struct { + anInstruction + Call CallCommon +} + +// The Send instruction sends X on channel Chan. +// +// Pos() returns the ast.SendStmt.Arrow, if explicit in the source. +// +// Example printed form: +// Send t2 t1 +// +type Send struct { + anInstruction + Chan, X Value +} + +// The Recv instruction receives from channel Chan. +// +// If CommaOk, the result is a 2-tuple of the value above +// and a boolean indicating the success of the receive. The +// components of the tuple are accessed using Extract. +// +// Pos() returns the ast.UnaryExpr.OpPos, if explicit in the source. +// For receive operations implicit in ranging over a channel, +// Pos() returns the ast.RangeStmt.For. +// +// Example printed form: +// t2 = Recv t1 +// t3 = Recv <(int, bool)> t1 +type Recv struct { + register + Chan Value + CommaOk bool +} + +// The Store instruction stores Val at address Addr. +// Stores can be of arbitrary types. +// +// Pos() returns the position of the source-level construct most closely +// associated with the memory store operation. +// Since implicit memory stores are numerous and varied and depend upon +// implementation choices, the details are not specified. +// +// Example printed form: +// Store {int} t2 t1 +// +type Store struct { + anInstruction + Addr Value + Val Value +} + +// The BlankStore instruction is emitted for assignments to the blank +// identifier. +// +// BlankStore is a pseudo-instruction: it has no dynamic effect. +// +// Pos() returns NoPos. +// +// Example printed form: +// BlankStore t1 +// +type BlankStore struct { + anInstruction + Val Value +} + +// The MapUpdate instruction updates the association of Map[Key] to +// Value. +// +// Pos() returns the ast.KeyValueExpr.Colon or ast.IndexExpr.Lbrack, +// if explicit in the source. +// +// Example printed form: +// MapUpdate t3 t1 t2 +// +type MapUpdate struct { + anInstruction + Map Value + Key Value + Value Value +} + +// A DebugRef instruction maps a source-level expression Expr to the +// IR value X that represents the value (!IsAddr) or address (IsAddr) +// of that expression. +// +// DebugRef is a pseudo-instruction: it has no dynamic effect. +// +// Pos() returns Expr.Pos(), the start position of the source-level +// expression. This is not the same as the "designated" token as +// documented at Value.Pos(). e.g. CallExpr.Pos() does not return the +// position of the ("designated") Lparen token. +// +// DebugRefs are generated only for functions built with debugging +// enabled; see Package.SetDebugMode() and the GlobalDebug builder +// mode flag. +// +// DebugRefs are not emitted for ast.Idents referring to constants or +// predeclared identifiers, since they are trivial and numerous. +// Nor are they emitted for ast.ParenExprs. +// +// (By representing these as instructions, rather than out-of-band, +// consistency is maintained during transformation passes by the +// ordinary SSA renaming machinery.) +// +// Example printed form: +// ; *ast.CallExpr @ 102:9 is t5 +// ; var x float64 @ 109:72 is x +// ; address of *ast.CompositeLit @ 216:10 is t0 +// +type DebugRef struct { + anInstruction + Expr ast.Expr // the referring expression (never *ast.ParenExpr) + object types.Object // the identity of the source var/func + IsAddr bool // Expr is addressable and X is the address it denotes + X Value // the value or address of Expr +} + +// Embeddable mix-ins and helpers for common parts of other structs. ----------- + +// register is a mix-in embedded by all IR values that are also +// instructions, i.e. virtual registers, and provides a uniform +// implementation of most of the Value interface: Value.Name() is a +// numbered register (e.g. "t0"); the other methods are field accessors. +// +// Temporary names are automatically assigned to each register on +// completion of building a function in IR form. +// +type register struct { + anInstruction + typ types.Type // type of virtual register + referrers []Instruction +} + +type node struct { + source ast.Node + id ID +} + +func (n *node) setID(id ID) { n.id = id } +func (n node) ID() ID { return n.id } + +func (n *node) setSource(source ast.Node) { n.source = source } +func (n *node) Source() ast.Node { return n.source } + +func (n *node) Pos() token.Pos { + if n.source != nil { + return n.source.Pos() + } + return token.NoPos +} + +// anInstruction is a mix-in embedded by all Instructions. +// It provides the implementations of the Block and setBlock methods. +type anInstruction struct { + node + block *BasicBlock // the basic block of this instruction +} + +// CallCommon is contained by Go, Defer and Call to hold the +// common parts of a function or method call. +// +// Each CallCommon exists in one of two modes, function call and +// interface method invocation, or "call" and "invoke" for short. +// +// 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon +// represents an ordinary function call of the value in Value, +// which may be a *Builtin, a *Function or any other value of kind +// 'func'. +// +// Value may be one of: +// (a) a *Function, indicating a statically dispatched call +// to a package-level function, an anonymous function, or +// a method of a named type. +// (b) a *MakeClosure, indicating an immediately applied +// function literal with free variables. +// (c) a *Builtin, indicating a statically dispatched call +// to a built-in function. +// (d) any other value, indicating a dynamically dispatched +// function call. +// StaticCallee returns the identity of the callee in cases +// (a) and (b), nil otherwise. +// +// Args contains the arguments to the call. If Value is a method, +// Args[0] contains the receiver parameter. +// +// Example printed form: +// t3 = Call <()> println t1 t2 +// Go t3 +// Defer t3 +// +// 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon +// represents a dynamically dispatched call to an interface method. +// In this mode, Value is the interface value and Method is the +// interface's abstract method. Note: an abstract method may be +// shared by multiple interfaces due to embedding; Value.Type() +// provides the specific interface used for this call. +// +// Value is implicitly supplied to the concrete method implementation +// as the receiver parameter; in other words, Args[0] holds not the +// receiver but the first true argument. +// +// Example printed form: +// t6 = Invoke t5.String +// GoInvoke t4.Bar t2 +// DeferInvoke t4.Bar t2 +// +// For all calls to variadic functions (Signature().Variadic()), +// the last element of Args is a slice. +// +type CallCommon struct { + Value Value // receiver (invoke mode) or func value (call mode) + Method *types.Func // abstract method (invoke mode) + Args []Value // actual parameters (in static method call, includes receiver) + Results Value +} + +// IsInvoke returns true if this call has "invoke" (not "call") mode. +func (c *CallCommon) IsInvoke() bool { + return c.Method != nil +} + +// Signature returns the signature of the called function. +// +// For an "invoke"-mode call, the signature of the interface method is +// returned. +// +// In either "call" or "invoke" mode, if the callee is a method, its +// receiver is represented by sig.Recv, not sig.Params().At(0). +// +func (c *CallCommon) Signature() *types.Signature { + if c.Method != nil { + return c.Method.Type().(*types.Signature) + } + return c.Value.Type().Underlying().(*types.Signature) +} + +// StaticCallee returns the callee if this is a trivially static +// "call"-mode call to a function. +func (c *CallCommon) StaticCallee() *Function { + switch fn := c.Value.(type) { + case *Function: + return fn + case *MakeClosure: + return fn.Fn.(*Function) + } + return nil +} + +// Description returns a description of the mode of this call suitable +// for a user interface, e.g., "static method call". +func (c *CallCommon) Description() string { + switch fn := c.Value.(type) { + case *Builtin: + return "built-in function call" + case *MakeClosure: + return "static function closure call" + case *Function: + if fn.Signature.Recv() != nil { + return "static method call" + } + return "static function call" + } + if c.IsInvoke() { + return "dynamic method call" // ("invoke" mode) + } + return "dynamic function call" +} + +// The CallInstruction interface, implemented by *Go, *Defer and *Call, +// exposes the common parts of function-calling instructions, +// yet provides a way back to the Value defined by *Call alone. +// +type CallInstruction interface { + Instruction + Common() *CallCommon // returns the common parts of the call + Value() *Call +} + +func (s *Call) Common() *CallCommon { return &s.Call } +func (s *Defer) Common() *CallCommon { return &s.Call } +func (s *Go) Common() *CallCommon { return &s.Call } + +func (s *Call) Value() *Call { return s } +func (s *Defer) Value() *Call { return nil } +func (s *Go) Value() *Call { return nil } + +func (v *Builtin) Type() types.Type { return v.sig } +func (v *Builtin) Name() string { return v.name } +func (*Builtin) Referrers() *[]Instruction { return nil } +func (v *Builtin) Pos() token.Pos { return token.NoPos } +func (v *Builtin) Object() types.Object { return types.Universe.Lookup(v.name) } +func (v *Builtin) Parent() *Function { return nil } + +func (v *FreeVar) Type() types.Type { return v.typ } +func (v *FreeVar) Name() string { return v.name } +func (v *FreeVar) Referrers() *[]Instruction { return &v.referrers } +func (v *FreeVar) Parent() *Function { return v.parent } + +func (v *Global) Type() types.Type { return v.typ } +func (v *Global) Name() string { return v.name } +func (v *Global) Parent() *Function { return nil } +func (v *Global) Referrers() *[]Instruction { return nil } +func (v *Global) Token() token.Token { return token.VAR } +func (v *Global) Object() types.Object { return v.object } +func (v *Global) String() string { return v.RelString(nil) } +func (v *Global) Package() *Package { return v.Pkg } +func (v *Global) RelString(from *types.Package) string { return relString(v, from) } + +func (v *Function) Name() string { return v.name } +func (v *Function) Type() types.Type { return v.Signature } +func (v *Function) Token() token.Token { return token.FUNC } +func (v *Function) Object() types.Object { return v.object } +func (v *Function) String() string { return v.RelString(nil) } +func (v *Function) Package() *Package { return v.Pkg } +func (v *Function) Parent() *Function { return v.parent } +func (v *Function) Referrers() *[]Instruction { + if v.parent != nil { + return &v.referrers + } + return nil +} + +func (v *Parameter) Object() types.Object { return v.object } + +func (v *Alloc) Type() types.Type { return v.typ } +func (v *Alloc) Referrers() *[]Instruction { return &v.referrers } + +func (v *register) Type() types.Type { return v.typ } +func (v *register) setType(typ types.Type) { v.typ = typ } +func (v *register) Name() string { return fmt.Sprintf("t%d", v.id) } +func (v *register) Referrers() *[]Instruction { return &v.referrers } + +func (v *anInstruction) Parent() *Function { return v.block.parent } +func (v *anInstruction) Block() *BasicBlock { return v.block } +func (v *anInstruction) setBlock(block *BasicBlock) { v.block = block } +func (v *anInstruction) Referrers() *[]Instruction { return nil } + +func (t *Type) Name() string { return t.object.Name() } +func (t *Type) Pos() token.Pos { return t.object.Pos() } +func (t *Type) Type() types.Type { return t.object.Type() } +func (t *Type) Token() token.Token { return token.TYPE } +func (t *Type) Object() types.Object { return t.object } +func (t *Type) String() string { return t.RelString(nil) } +func (t *Type) Package() *Package { return t.pkg } +func (t *Type) RelString(from *types.Package) string { return relString(t, from) } + +func (c *NamedConst) Name() string { return c.object.Name() } +func (c *NamedConst) Pos() token.Pos { return c.object.Pos() } +func (c *NamedConst) String() string { return c.RelString(nil) } +func (c *NamedConst) Type() types.Type { return c.object.Type() } +func (c *NamedConst) Token() token.Token { return token.CONST } +func (c *NamedConst) Object() types.Object { return c.object } +func (c *NamedConst) Package() *Package { return c.pkg } +func (c *NamedConst) RelString(from *types.Package) string { return relString(c, from) } + +// Func returns the package-level function of the specified name, +// or nil if not found. +// +func (p *Package) Func(name string) (f *Function) { + f, _ = p.Members[name].(*Function) + return +} + +// Var returns the package-level variable of the specified name, +// or nil if not found. +// +func (p *Package) Var(name string) (g *Global) { + g, _ = p.Members[name].(*Global) + return +} + +// Const returns the package-level constant of the specified name, +// or nil if not found. +// +func (p *Package) Const(name string) (c *NamedConst) { + c, _ = p.Members[name].(*NamedConst) + return +} + +// Type returns the package-level type of the specified name, +// or nil if not found. +// +func (p *Package) Type(name string) (t *Type) { + t, _ = p.Members[name].(*Type) + return +} + +func (s *DebugRef) Pos() token.Pos { return s.Expr.Pos() } + +// Operands. + +func (v *Alloc) Operands(rands []*Value) []*Value { + return rands +} + +func (v *BinOp) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Y) +} + +func (c *CallCommon) Operands(rands []*Value) []*Value { + rands = append(rands, &c.Value) + for i := range c.Args { + rands = append(rands, &c.Args[i]) + } + return rands +} + +func (s *Go) Operands(rands []*Value) []*Value { + return s.Call.Operands(rands) +} + +func (s *Call) Operands(rands []*Value) []*Value { + return s.Call.Operands(rands) +} + +func (s *Defer) Operands(rands []*Value) []*Value { + return s.Call.Operands(rands) +} + +func (v *ChangeInterface) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *ChangeType) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *Convert) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *DebugRef) Operands(rands []*Value) []*Value { + return append(rands, &s.X) +} + +func (v *Extract) Operands(rands []*Value) []*Value { + return append(rands, &v.Tuple) +} + +func (v *Field) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *FieldAddr) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *If) Operands(rands []*Value) []*Value { + return append(rands, &s.Cond) +} + +func (s *ConstantSwitch) Operands(rands []*Value) []*Value { + rands = append(rands, &s.Tag) + for i := range s.Conds { + rands = append(rands, &s.Conds[i]) + } + return rands +} + +func (s *TypeSwitch) Operands(rands []*Value) []*Value { + rands = append(rands, &s.Tag) + return rands +} + +func (v *Index) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (v *IndexAddr) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (*Jump) Operands(rands []*Value) []*Value { + return rands +} + +func (*Unreachable) Operands(rands []*Value) []*Value { + return rands +} + +func (v *MapLookup) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (v *StringLookup) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (v *MakeChan) Operands(rands []*Value) []*Value { + return append(rands, &v.Size) +} + +func (v *MakeClosure) Operands(rands []*Value) []*Value { + rands = append(rands, &v.Fn) + for i := range v.Bindings { + rands = append(rands, &v.Bindings[i]) + } + return rands +} + +func (v *MakeInterface) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *MakeMap) Operands(rands []*Value) []*Value { + return append(rands, &v.Reserve) +} + +func (v *MakeSlice) Operands(rands []*Value) []*Value { + return append(rands, &v.Len, &v.Cap) +} + +func (v *MapUpdate) Operands(rands []*Value) []*Value { + return append(rands, &v.Map, &v.Key, &v.Value) +} + +func (v *Next) Operands(rands []*Value) []*Value { + return append(rands, &v.Iter) +} + +func (s *Panic) Operands(rands []*Value) []*Value { + return append(rands, &s.X) +} + +func (v *Sigma) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *Phi) Operands(rands []*Value) []*Value { + for i := range v.Edges { + rands = append(rands, &v.Edges[i]) + } + return rands +} + +func (v *Range) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *Return) Operands(rands []*Value) []*Value { + for i := range s.Results { + rands = append(rands, &s.Results[i]) + } + return rands +} + +func (*RunDefers) Operands(rands []*Value) []*Value { + return rands +} + +func (v *Select) Operands(rands []*Value) []*Value { + for i := range v.States { + rands = append(rands, &v.States[i].Chan, &v.States[i].Send) + } + return rands +} + +func (s *Send) Operands(rands []*Value) []*Value { + return append(rands, &s.Chan, &s.X) +} + +func (recv *Recv) Operands(rands []*Value) []*Value { + return append(rands, &recv.Chan) +} + +func (v *Slice) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Low, &v.High, &v.Max) +} + +func (s *Store) Operands(rands []*Value) []*Value { + return append(rands, &s.Addr, &s.Val) +} + +func (s *BlankStore) Operands(rands []*Value) []*Value { + return append(rands, &s.Val) +} + +func (v *TypeAssert) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *UnOp) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *Load) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +// Non-Instruction Values: +func (v *Builtin) Operands(rands []*Value) []*Value { return rands } +func (v *FreeVar) Operands(rands []*Value) []*Value { return rands } +func (v *Const) Operands(rands []*Value) []*Value { return rands } +func (v *Function) Operands(rands []*Value) []*Value { return rands } +func (v *Global) Operands(rands []*Value) []*Value { return rands } +func (v *Parameter) Operands(rands []*Value) []*Value { return rands } diff --git a/vendor/honnef.co/go/tools/go/ir/staticcheck.conf b/vendor/honnef.co/go/tools/go/ir/staticcheck.conf new file mode 100644 index 000000000..d7b38bc35 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/staticcheck.conf @@ -0,0 +1,3 @@ +# ssa/... is mostly imported from upstream and we don't want to +# deviate from it too much, hence disabling SA1019 +checks = ["inherit", "-SA1019"] diff --git a/vendor/honnef.co/go/tools/go/ir/util.go b/vendor/honnef.co/go/tools/go/ir/util.go new file mode 100644 index 000000000..343a6320a --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/util.go @@ -0,0 +1,89 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file defines a number of miscellaneous utility functions. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "io" + "os" + + "honnef.co/go/tools/go/ast/astutil" +) + +//// AST utilities + +func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } + +// isBlankIdent returns true iff e is an Ident with name "_". +// They have no associated types.Object, and thus no type. +// +func isBlankIdent(e ast.Expr) bool { + id, ok := e.(*ast.Ident) + return ok && id.Name == "_" +} + +//// Type utilities. Some of these belong in go/types. + +// isPointer returns true for types whose underlying type is a pointer. +func isPointer(typ types.Type) bool { + _, ok := typ.Underlying().(*types.Pointer) + return ok +} + +func isInterface(T types.Type) bool { return types.IsInterface(T) } + +// deref returns a pointer's element type; otherwise it returns typ. +func deref(typ types.Type) types.Type { + if p, ok := typ.Underlying().(*types.Pointer); ok { + return p.Elem() + } + return typ +} + +// recvType returns the receiver type of method obj. +func recvType(obj *types.Func) types.Type { + return obj.Type().(*types.Signature).Recv().Type() +} + +// logStack prints the formatted "start" message to stderr and +// returns a closure that prints the corresponding "end" message. +// Call using 'defer logStack(...)()' to show builder stack on panic. +// Don't forget trailing parens! +// +func logStack(format string, args ...interface{}) func() { + msg := fmt.Sprintf(format, args...) + io.WriteString(os.Stderr, msg) + io.WriteString(os.Stderr, "\n") + return func() { + io.WriteString(os.Stderr, msg) + io.WriteString(os.Stderr, " end\n") + } +} + +// newVar creates a 'var' for use in a types.Tuple. +func newVar(name string, typ types.Type) *types.Var { + return types.NewParam(token.NoPos, nil, name, typ) +} + +// anonVar creates an anonymous 'var' for use in a types.Tuple. +func anonVar(typ types.Type) *types.Var { + return newVar("", typ) +} + +var lenResults = types.NewTuple(anonVar(tInt)) + +// makeLen returns the len builtin specialized to type func(T)int. +func makeLen(T types.Type) *Builtin { + lenParams := types.NewTuple(anonVar(T)) + return &Builtin{ + name: "len", + sig: types.NewSignature(nil, lenParams, lenResults, false), + } +} diff --git a/vendor/honnef.co/go/tools/go/ir/wrappers.go b/vendor/honnef.co/go/tools/go/ir/wrappers.go new file mode 100644 index 000000000..1d51b5dc9 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/wrappers.go @@ -0,0 +1,290 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file defines synthesis of Functions that delegate to declared +// methods; they come in three kinds: +// +// (1) wrappers: methods that wrap declared methods, performing +// implicit pointer indirections and embedded field selections. +// +// (2) thunks: funcs that wrap declared methods. Like wrappers, +// thunks perform indirections and field selections. The thunk's +// first parameter is used as the receiver for the method call. +// +// (3) bounds: funcs that wrap declared methods. The bound's sole +// free variable, supplied by a closure, is used as the receiver +// for the method call. No indirections or field selections are +// performed since they can be done before the call. + +import ( + "fmt" + + "go/types" +) + +// -- wrappers ----------------------------------------------------------- + +// makeWrapper returns a synthetic method that delegates to the +// declared method denoted by meth.Obj(), first performing any +// necessary pointer indirections or field selections implied by meth. +// +// The resulting method's receiver type is meth.Recv(). +// +// This function is versatile but quite subtle! Consider the +// following axes of variation when making changes: +// - optional receiver indirection +// - optional implicit field selections +// - meth.Obj() may denote a concrete or an interface method +// - the result may be a thunk or a wrapper. +// +// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) +// +func makeWrapper(prog *Program, sel *types.Selection) *Function { + obj := sel.Obj().(*types.Func) // the declared function + sig := sel.Type().(*types.Signature) // type of this wrapper + + var recv *types.Var // wrapper's receiver or thunk's params[0] + name := obj.Name() + var description Synthetic + var start int // first regular param + if sel.Kind() == types.MethodExpr { + name += "$thunk" + description = SyntheticThunk + recv = sig.Params().At(0) + start = 1 + } else { + description = SyntheticWrapper + recv = sig.Recv() + } + + if prog.mode&LogSource != 0 { + defer logStack("make %s to (%s)", description, recv.Type())() + } + fn := &Function{ + name: name, + method: sel, + object: obj, + Signature: sig, + Synthetic: description, + Prog: prog, + functionBody: new(functionBody), + } + fn.initHTML(prog.PrintFunc) + fn.startBody() + fn.addSpilledParam(recv, nil) + createParams(fn, start) + + indices := sel.Index() + + var v Value = fn.Locals[0] // spilled receiver + if isPointer(sel.Recv()) { + v = emitLoad(fn, v, nil) + + // For simple indirection wrappers, perform an informative nil-check: + // "value method (T).f called using nil *T pointer" + if len(indices) == 1 && !isPointer(recvType(obj)) { + var c Call + c.Call.Value = &Builtin{ + name: "ir:wrapnilchk", + sig: types.NewSignature(nil, + types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)), + types.NewTuple(anonVar(sel.Recv())), false), + } + c.Call.Args = []Value{ + v, + emitConst(fn, stringConst(deref(sel.Recv()).String())), + emitConst(fn, stringConst(sel.Obj().Name())), + } + c.setType(v.Type()) + v = fn.emit(&c, nil) + } + } + + // Invariant: v is a pointer, either + // value of *A receiver param, or + // address of A spilled receiver. + + // We use pointer arithmetic (FieldAddr possibly followed by + // Load) in preference to value extraction (Field possibly + // preceded by Load). + + v = emitImplicitSelections(fn, v, indices[:len(indices)-1], nil) + + // Invariant: v is a pointer, either + // value of implicit *C field, or + // address of implicit C field. + + var c Call + if r := recvType(obj); !isInterface(r) { // concrete method + if !isPointer(r) { + v = emitLoad(fn, v, nil) + } + c.Call.Value = prog.declaredFunc(obj) + c.Call.Args = append(c.Call.Args, v) + } else { + c.Call.Method = obj + c.Call.Value = emitLoad(fn, v, nil) + } + for _, arg := range fn.Params[1:] { + c.Call.Args = append(c.Call.Args, arg) + } + emitTailCall(fn, &c, nil) + fn.finishBody() + return fn +} + +// createParams creates parameters for wrapper method fn based on its +// Signature.Params, which do not include the receiver. +// start is the index of the first regular parameter to use. +// +func createParams(fn *Function, start int) { + tparams := fn.Signature.Params() + for i, n := start, tparams.Len(); i < n; i++ { + fn.addParamObj(tparams.At(i), nil) + } +} + +// -- bounds ----------------------------------------------------------- + +// makeBound returns a bound method wrapper (or "bound"), a synthetic +// function that delegates to a concrete or interface method denoted +// by obj. The resulting function has no receiver, but has one free +// variable which will be used as the method's receiver in the +// tail-call. +// +// Use MakeClosure with such a wrapper to construct a bound method +// closure. e.g.: +// +// type T int or: type T interface { meth() } +// func (t T) meth() +// var t T +// f := t.meth +// f() // calls t.meth() +// +// f is a closure of a synthetic wrapper defined as if by: +// +// f := func() { return t.meth() } +// +// Unlike makeWrapper, makeBound need perform no indirection or field +// selections because that can be done before the closure is +// constructed. +// +// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) +// +func makeBound(prog *Program, obj *types.Func) *Function { + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + fn, ok := prog.bounds[obj] + if !ok { + if prog.mode&LogSource != 0 { + defer logStack("%s", SyntheticBound)() + } + fn = &Function{ + name: obj.Name() + "$bound", + object: obj, + Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver + Synthetic: SyntheticBound, + Prog: prog, + functionBody: new(functionBody), + } + fn.initHTML(prog.PrintFunc) + + fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn} + fn.FreeVars = []*FreeVar{fv} + fn.startBody() + createParams(fn, 0) + var c Call + + if !isInterface(recvType(obj)) { // concrete + c.Call.Value = prog.declaredFunc(obj) + c.Call.Args = []Value{fv} + } else { + c.Call.Value = fv + c.Call.Method = obj + } + for _, arg := range fn.Params { + c.Call.Args = append(c.Call.Args, arg) + } + emitTailCall(fn, &c, nil) + fn.finishBody() + + prog.bounds[obj] = fn + } + return fn +} + +// -- thunks ----------------------------------------------------------- + +// makeThunk returns a thunk, a synthetic function that delegates to a +// concrete or interface method denoted by sel.Obj(). The resulting +// function has no receiver, but has an additional (first) regular +// parameter. +// +// Precondition: sel.Kind() == types.MethodExpr. +// +// type T int or: type T interface { meth() } +// func (t T) meth() +// f := T.meth +// var t T +// f(t) // calls t.meth() +// +// f is a synthetic wrapper defined as if by: +// +// f := func(t T) { return t.meth() } +// +// TODO(adonovan): opt: currently the stub is created even when used +// directly in a function call: C.f(i, 0). This is less efficient +// than inlining the stub. +// +// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) +// +func makeThunk(prog *Program, sel *types.Selection) *Function { + if sel.Kind() != types.MethodExpr { + panic(sel) + } + + key := selectionKey{ + kind: sel.Kind(), + recv: sel.Recv(), + obj: sel.Obj(), + index: fmt.Sprint(sel.Index()), + indirect: sel.Indirect(), + } + + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + // Canonicalize key.recv to avoid constructing duplicate thunks. + canonRecv, ok := prog.canon.At(key.recv).(types.Type) + if !ok { + canonRecv = key.recv + prog.canon.Set(key.recv, canonRecv) + } + key.recv = canonRecv + + fn, ok := prog.thunks[key] + if !ok { + fn = makeWrapper(prog, sel) + if fn.Signature.Recv() != nil { + panic(fn) // unexpected receiver + } + prog.thunks[key] = fn + } + return fn +} + +func changeRecv(s *types.Signature, recv *types.Var) *types.Signature { + return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic()) +} + +// selectionKey is like types.Selection but a usable map key. +type selectionKey struct { + kind types.SelectionKind + recv types.Type // canonicalized via Program.canon + obj types.Object + index string + indirect bool +} diff --git a/vendor/honnef.co/go/tools/go/ir/write.go b/vendor/honnef.co/go/tools/go/ir/write.go new file mode 100644 index 000000000..b936bc985 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/ir/write.go @@ -0,0 +1,5 @@ +package ir + +func NewJump(parent *BasicBlock) *Jump { + return &Jump{anInstruction{block: parent}, ""} +} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/upstream.go b/vendor/honnef.co/go/tools/go/types/typeutil/upstream.go new file mode 100644 index 000000000..d35d08e00 --- /dev/null +++ b/vendor/honnef.co/go/tools/go/types/typeutil/upstream.go @@ -0,0 +1,25 @@ +package typeutil + +import ( + "go/ast" + "go/types" + _ "unsafe" + + "golang.org/x/tools/go/types/typeutil" +) + +type MethodSetCache = typeutil.MethodSetCache +type Map = typeutil.Map +type Hasher = typeutil.Hasher + +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + return typeutil.Callee(info, call) +} + +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + return typeutil.IntuitiveMethodSet(T, msets) +} + +func MakeHasher() Hasher { + return typeutil.MakeHasher() +} diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/util.go b/vendor/honnef.co/go/tools/go/types/typeutil/util.go new file mode 100644 index 000000000..b0aca16bd --- /dev/null +++ b/vendor/honnef.co/go/tools/go/types/typeutil/util.go @@ -0,0 +1,131 @@ +package typeutil + +import ( + "bytes" + "go/types" + "sync" +) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + buf := bytes.NewBuffer(nil) + buf.Grow(64) + return buf + }, +} + +func FuncName(f *types.Func) string { + buf := bufferPool.Get().(*bytes.Buffer) + buf.Reset() + if f.Type() != nil { + sig := f.Type().(*types.Signature) + if recv := sig.Recv(); recv != nil { + buf.WriteByte('(') + if _, ok := recv.Type().(*types.Interface); ok { + // gcimporter creates abstract methods of + // named interfaces using the interface type + // (not the named type) as the receiver. + // Don't print it in full. + buf.WriteString("interface") + } else { + types.WriteType(buf, recv.Type(), nil) + } + buf.WriteByte(')') + buf.WriteByte('.') + } else if f.Pkg() != nil { + writePackage(buf, f.Pkg()) + } + } + buf.WriteString(f.Name()) + s := buf.String() + bufferPool.Put(buf) + return s +} + +func writePackage(buf *bytes.Buffer, pkg *types.Package) { + if pkg == nil { + return + } + s := pkg.Path() + if s != "" { + buf.WriteString(s) + buf.WriteByte('.') + } +} + +// Dereference returns a pointer's element type; otherwise it returns +// T. +func Dereference(T types.Type) types.Type { + if p, ok := T.Underlying().(*types.Pointer); ok { + return p.Elem() + } + return T +} + +// DereferenceR returns a pointer's element type; otherwise it returns +// T. If the element type is itself a pointer, DereferenceR will be +// applied recursively. +func DereferenceR(T types.Type) types.Type { + if p, ok := T.Underlying().(*types.Pointer); ok { + return DereferenceR(p.Elem()) + } + return T +} + +func IsObject(obj types.Object, name string) bool { + var path string + if pkg := obj.Pkg(); pkg != nil { + path = pkg.Path() + "." + } + return path+obj.Name() == name +} + +// OPT(dh): IsType is kind of expensive; should we really use it? +func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name } + +func IsPointerLike(T types.Type) bool { + switch T := T.Underlying().(type) { + case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer, *types.Slice: + return true + case *types.Basic: + return T.Kind() == types.UnsafePointer + } + return false +} + +type Field struct { + Var *types.Var + Tag string + Path []int +} + +// FlattenFields recursively flattens T and embedded structs, +// returning a list of fields. If multiple fields with the same name +// exist, all will be returned. +func FlattenFields(T *types.Struct) []Field { + return flattenFields(T, nil, nil) +} + +func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field { + if seen == nil { + seen = map[types.Type]bool{} + } + if seen[T] { + return nil + } + seen[T] = true + var out []Field + for i := 0; i < T.NumFields(); i++ { + field := T.Field(i) + tag := T.Tag(i) + np := append(path[:len(path):len(path)], i) + if field.Anonymous() { + if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok { + out = append(out, flattenFields(s, np, seen)...) + } + } else { + out = append(out, Field{field, tag, np}) + } + } + return out +} diff --git a/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go b/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go new file mode 100644 index 000000000..51dfaef53 --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go @@ -0,0 +1,107 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package buildir defines an Analyzer that constructs the IR +// of an error-free package and returns the set of all +// functions within it. It does not report any diagnostics itself but +// may be used as an input to other analyzers. +// +// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE. +package buildir + +import ( + "go/ast" + "go/types" + "reflect" + + "honnef.co/go/tools/go/ir" + + "golang.org/x/tools/go/analysis" +) + +type noReturn struct { + Kind ir.NoReturn +} + +func (*noReturn) AFact() {} + +var Analyzer = &analysis.Analyzer{ + Name: "buildir", + Doc: "build IR for later passes", + Run: run, + ResultType: reflect.TypeOf(new(IR)), + FactTypes: []analysis.Fact{new(noReturn)}, +} + +// IR provides intermediate representation for all the +// non-blank source functions in the current package. +type IR struct { + Pkg *ir.Package + SrcFuncs []*ir.Function +} + +func run(pass *analysis.Pass) (interface{}, error) { + // Plundered from ssautil.BuildPackage. + + // We must create a new Program for each Package because the + // analysis API provides no place to hang a Program shared by + // all Packages. Consequently, IR Packages and Functions do not + // have a canonical representation across an analysis session of + // multiple packages. This is unlikely to be a problem in + // practice because the analysis API essentially forces all + // packages to be analysed independently, so any given call to + // Analysis.Run on a package will see only IR objects belonging + // to a single Program. + + mode := ir.GlobalDebug + + prog := ir.NewProgram(pass.Fset, mode) + + // Create IR packages for all imports. + // Order is not significant. + created := make(map[*types.Package]bool) + var createAll func(pkgs []*types.Package) + createAll = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !created[p] { + created[p] = true + irpkg := prog.CreatePackage(p, nil, nil, true) + for _, fn := range irpkg.Functions { + if ast.IsExported(fn.Name()) { + var noRet noReturn + if pass.ImportObjectFact(fn.Object(), &noRet) { + fn.NoReturn = noRet.Kind + } + } + } + createAll(p.Imports()) + } + } + } + createAll(pass.Pkg.Imports()) + + // Create and build the primary package. + irpkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false) + irpkg.Build() + + // Compute list of source functions, including literals, + // in source order. + var addAnons func(f *ir.Function) + funcs := make([]*ir.Function, len(irpkg.Functions)) + copy(funcs, irpkg.Functions) + addAnons = func(f *ir.Function) { + for _, anon := range f.AnonFuncs { + funcs = append(funcs, anon) + addAnons(anon) + } + } + for _, fn := range irpkg.Functions { + addAnons(fn) + if fn.NoReturn > 0 { + pass.ExportObjectFact(fn.Object(), &noReturn{fn.NoReturn}) + } + } + + return &IR{Pkg: irpkg, SrcFuncs: funcs}, nil +} diff --git a/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go b/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go new file mode 100644 index 000000000..2e839614a --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go @@ -0,0 +1,206 @@ +package sharedcheck + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "honnef.co/go/tools/analysis/code" + "honnef.co/go/tools/analysis/edit" + "honnef.co/go/tools/analysis/facts" + "honnef.co/go/tools/analysis/report" + "honnef.co/go/tools/go/ast/astutil" + "honnef.co/go/tools/go/ir" + "honnef.co/go/tools/go/ir/irutil" + "honnef.co/go/tools/internal/passes/buildir" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" +) + +func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) { + for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs { + cb := func(node ast.Node) bool { + rng, ok := node.(*ast.RangeStmt) + if !ok || !astutil.IsBlank(rng.Key) { + return true + } + + v, _ := fn.ValueForExpr(rng.X) + + // Check that we're converting from string to []rune + val, _ := v.(*ir.Convert) + if val == nil { + return true + } + Tsrc, ok := val.X.Type().Underlying().(*types.Basic) + if !ok || Tsrc.Kind() != types.String { + return true + } + Tdst, ok := val.Type().(*types.Slice) + if !ok { + return true + } + TdstElem, ok := Tdst.Elem().(*types.Basic) + if !ok || TdstElem.Kind() != types.Int32 { + return true + } + + // Check that the result of the conversion is only used to + // range over + refs := val.Referrers() + if refs == nil { + return true + } + + // Expect two refs: one for obtaining the length of the slice, + // one for accessing the elements + if len(irutil.FilterDebug(*refs)) != 2 { + // TODO(dh): right now, we check that only one place + // refers to our slice. This will miss cases such as + // ranging over the slice twice. Ideally, we'd ensure that + // the slice is only used for ranging over (without + // accessing the key), but that is harder to do because in + // IR form, ranging over a slice looks like an ordinary + // loop with index increments and slice accesses. We'd + // have to look at the associated AST node to check that + // it's a range statement. + return true + } + + pass.Reportf(rng.Pos(), "should range over string, not []rune(string)") + + return true + } + if source := fn.Source(); source != nil { + ast.Inspect(source, cb) + } + } + return nil, nil +} + +// RedundantTypeInDeclarationChecker returns a checker that flags variable declarations with redundantly specified types. +// That is, it flags 'var v T = e' where e's type is identical to T and 'var v = e' (or 'v := e') would have the same effect. +// +// It does not flag variables under the following conditions, to reduce the number of false positives: +// - global variables – these often specify types to aid godoc +// - files that use cgo – cgo code generation and pointer checking emits redundant types +// +// It does not flag variables under the following conditions, unless flagHelpfulTypes is true, to reduce the number of noisy positives: +// - packages that import syscall or unsafe – these sometimes use this form of assignment to make sure types are as expected +// - variables named the blank identifier – a pattern used to confirm the types of variables +// - named untyped constants on the rhs – the explicitness might aid readability +func RedundantTypeInDeclarationChecker(verb string, flagHelpfulTypes bool) *analysis.Analyzer { + fn := func(pass *analysis.Pass) (interface{}, error) { + eval := func(expr ast.Expr) (types.TypeAndValue, error) { + info := &types.Info{ + Types: map[ast.Expr]types.TypeAndValue{}, + } + err := types.CheckExpr(pass.Fset, pass.Pkg, expr.Pos(), expr, info) + return info.Types[expr], err + } + + if !flagHelpfulTypes { + // Don't look at code in low-level packages + for _, imp := range pass.Pkg.Imports() { + if imp.Path() == "syscall" || imp.Path() == "unsafe" { + return nil, nil + } + } + } + + fn := func(node ast.Node) { + decl := node.(*ast.GenDecl) + if decl.Tok != token.VAR { + return + } + + gen, _ := code.Generator(pass, decl.Pos()) + if gen == facts.Cgo { + // TODO(dh): remove this exception once we can use UsesCgo + return + } + + // Delay looking up parent AST nodes until we have to + checkedDecl := false + + specLoop: + for _, spec := range decl.Specs { + spec := spec.(*ast.ValueSpec) + if spec.Type == nil { + continue + } + if len(spec.Names) != len(spec.Values) { + continue + } + Tlhs := pass.TypesInfo.TypeOf(spec.Type) + for i, v := range spec.Values { + if !flagHelpfulTypes && spec.Names[i].Name == "_" { + continue specLoop + } + Trhs := pass.TypesInfo.TypeOf(v) + if !types.Identical(Tlhs, Trhs) { + continue specLoop + } + + // Some expressions are untyped and get converted to the lhs type implicitly. + // This applies to untyped constants, shift operations with an untyped lhs, and possibly others. + // + // Check if the type is truly redundant, i.e. if the type on the lhs doesn't match the default type of the untyped constant. + tv, err := eval(v) + if err != nil { + panic(err) + } + if b, ok := tv.Type.(*types.Basic); ok && (b.Info()&types.IsUntyped) != 0 { + switch v := v.(type) { + case *ast.Ident: + // Only flag named constant rhs if it's a predeclared identifier. + // Don't flag other named constants, as the explicit type may aid readability. + if pass.TypesInfo.ObjectOf(v).Pkg() != nil && !flagHelpfulTypes { + continue specLoop + } + case *ast.SelectorExpr: + // Constant selector expressions can only refer to named constants that arent predeclared. + if !flagHelpfulTypes { + continue specLoop + } + default: + // don't skip if the type on the lhs matches the default type of the constant + if Tlhs != types.Default(b) { + continue specLoop + } + } + } + } + + if !checkedDecl { + // Don't flag global variables. These often have explicit types for godoc's sake. + path, _ := astutil.PathEnclosingInterval(code.File(pass, decl), decl.Pos(), decl.Pos()) + pathLoop: + for _, el := range path { + switch el.(type) { + case *ast.FuncDecl, *ast.FuncLit: + checkedDecl = true + break pathLoop + } + } + if !checkedDecl { + // decl is not inside a function + break specLoop + } + } + + report.Report(pass, spec.Type, fmt.Sprintf("%s omit type %s from declaration; it will be inferred from the right-hand side", verb, report.Render(pass, spec.Type)), report.FilterGenerated(), + report.Fixes(edit.Fix("Remove redundant type", edit.Delete(spec.Type)))) + } + } + code.Preorder(pass, fn, (*ast.GenDecl)(nil)) + return nil, nil + } + + return &analysis.Analyzer{ + Run: fn, + Requires: []*analysis.Analyzer{facts.Generated, inspect.Analyzer, facts.TokenFile, facts.Generated}, + } +} diff --git a/vendor/honnef.co/go/tools/knowledge/arg.go b/vendor/honnef.co/go/tools/knowledge/arg.go new file mode 100644 index 000000000..7ac0b358d --- /dev/null +++ b/vendor/honnef.co/go/tools/knowledge/arg.go @@ -0,0 +1,64 @@ +package knowledge + +var args = map[string]int{ + "(*encoding/json.Decoder).Decode.v": 0, + "(*encoding/json.Encoder).Encode.v": 0, + "(*encoding/xml.Decoder).Decode.v": 0, + "(*encoding/xml.Encoder).Encode.v": 0, + "(*sync.Pool).Put.x": 0, + "(*text/template.Template).Parse.text": 0, + "(io.Seeker).Seek.offset": 0, + "(time.Time).Sub.u": 0, + "append.elems": 1, + "append.slice": 0, + "bytes.Equal.a": 0, + "bytes.Equal.b": 1, + "encoding/binary.Write.data": 2, + "errors.New.text": 0, + "fmt.Fprintf.format": 1, + "fmt.Printf.format": 0, + "fmt.Sprintf.a[0]": 1, + "fmt.Sprintf.format": 0, + "json.Marshal.v": 0, + "json.Unmarshal.v": 1, + "len.v": 0, + "make.size[0]": 1, + "make.size[1]": 2, + "make.t": 0, + "net/url.Parse.rawurl": 0, + "os.OpenFile.flag": 1, + "os/exec.Command.name": 0, + "os/signal.Notify.c": 0, + "regexp.Compile.expr": 0, + "runtime.SetFinalizer.finalizer": 1, + "runtime.SetFinalizer.obj": 0, + "sort.Sort.data": 0, + "strconv.AppendFloat.bitSize": 4, + "strconv.AppendFloat.fmt": 2, + "strconv.AppendInt.base": 2, + "strconv.AppendUint.base": 2, + "strconv.FormatComplex.bitSize": 3, + "strconv.FormatComplex.fmt": 1, + "strconv.FormatFloat.bitSize": 3, + "strconv.FormatFloat.fmt": 1, + "strconv.FormatInt.base": 1, + "strconv.FormatUint.base": 1, + "strconv.ParseComplex.bitSize": 1, + "strconv.ParseFloat.bitSize": 1, + "strconv.ParseInt.base": 1, + "strconv.ParseInt.bitSize": 2, + "strconv.ParseUint.base": 1, + "strconv.ParseUint.bitSize": 2, + "time.Parse.layout": 0, + "time.Sleep.d": 0, + "xml.Marshal.v": 0, + "xml.Unmarshal.v": 1, +} + +func Arg(name string) int { + n, ok := args[name] + if !ok { + panic("unknown argument " + name) + } + return n +} diff --git a/vendor/honnef.co/go/tools/knowledge/deprecated.go b/vendor/honnef.co/go/tools/knowledge/deprecated.go new file mode 100644 index 000000000..8fa84fd44 --- /dev/null +++ b/vendor/honnef.co/go/tools/knowledge/deprecated.go @@ -0,0 +1,217 @@ +package knowledge + +const ( + DeprecatedNeverUse = -1 + DeprecatedUseNoLonger = -2 +) + +type Deprecation struct { + DeprecatedSince int + AlternativeAvailableSince int +} + +// go/importer.ForCompiler contains "Deprecated:", but it refers to a single argument, not the whole function. +// Luckily, the notice starts in the middle of a paragraph, and as such isn't detected by us. + +var StdlibDeprecations = map[string]Deprecation{ + // FIXME(dh): AllowBinary isn't being detected as deprecated + // because the comment has a newline right after "Deprecated:" + "go/build.AllowBinary": {7, 7}, + "(archive/zip.FileHeader).CompressedSize": {1, 1}, + "(archive/zip.FileHeader).UncompressedSize": {1, 1}, + "(archive/zip.FileHeader).ModifiedTime": {10, 10}, + "(archive/zip.FileHeader).ModifiedDate": {10, 10}, + "(*archive/zip.FileHeader).ModTime": {10, 10}, + "(*archive/zip.FileHeader).SetModTime": {10, 10}, + "(go/doc.Package).Bugs": {1, 1}, + "os.SEEK_SET": {7, 7}, + "os.SEEK_CUR": {7, 7}, + "os.SEEK_END": {7, 7}, + "(net.Dialer).Cancel": {7, 7}, + "runtime.CPUProfile": {9, 0}, + "compress/flate.ReadError": {6, DeprecatedUseNoLonger}, + "compress/flate.WriteError": {6, DeprecatedUseNoLonger}, + "path/filepath.HasPrefix": {0, DeprecatedNeverUse}, + "(net/http.Transport).Dial": {7, 7}, + "(net/http.Transport).DialTLS": {14, 14}, + "(*net/http.Transport).CancelRequest": {6, 5}, + "net/http.ErrWriteAfterFlush": {7, DeprecatedUseNoLonger}, + "net/http.ErrHeaderTooLong": {8, DeprecatedUseNoLonger}, + "net/http.ErrShortBody": {8, DeprecatedUseNoLonger}, + "net/http.ErrMissingContentLength": {8, DeprecatedUseNoLonger}, + "net/http/httputil.ErrPersistEOF": {0, DeprecatedUseNoLonger}, + "net/http/httputil.ErrClosed": {0, DeprecatedUseNoLonger}, + "net/http/httputil.ErrPipeline": {0, DeprecatedUseNoLonger}, + "net/http/httputil.ServerConn": {0, 0}, + "net/http/httputil.NewServerConn": {0, 0}, + "net/http/httputil.ClientConn": {0, 0}, + "net/http/httputil.NewClientConn": {0, 0}, + "net/http/httputil.NewProxyClientConn": {0, 0}, + "(net/http.Request).Cancel": {7, 7}, + "(text/template/parse.PipeNode).Line": {1, DeprecatedUseNoLonger}, + "(text/template/parse.ActionNode).Line": {1, DeprecatedUseNoLonger}, + "(text/template/parse.BranchNode).Line": {1, DeprecatedUseNoLonger}, + "(text/template/parse.TemplateNode).Line": {1, DeprecatedUseNoLonger}, + "database/sql/driver.ColumnConverter": {9, 9}, + "database/sql/driver.Execer": {8, 8}, + "database/sql/driver.Queryer": {8, 8}, + "(database/sql/driver.Conn).Begin": {8, 8}, + "(database/sql/driver.Stmt).Exec": {8, 8}, + "(database/sql/driver.Stmt).Query": {8, 8}, + "syscall.StringByteSlice": {1, 1}, + "syscall.StringBytePtr": {1, 1}, + "syscall.StringSlicePtr": {1, 1}, + "syscall.StringToUTF16": {1, 1}, + "syscall.StringToUTF16Ptr": {1, 1}, + "(*regexp.Regexp).Copy": {12, DeprecatedUseNoLonger}, + "(archive/tar.Header).Xattrs": {10, 10}, + "archive/tar.TypeRegA": {11, 1}, + "go/types.NewInterface": {11, 11}, + "(*go/types.Interface).Embedded": {11, 11}, + "go/importer.For": {12, 12}, + "encoding/json.InvalidUTF8Error": {2, DeprecatedUseNoLonger}, + "encoding/json.UnmarshalFieldError": {2, DeprecatedUseNoLonger}, + "encoding/csv.ErrTrailingComma": {2, DeprecatedUseNoLonger}, + "(encoding/csv.Reader).TrailingComma": {2, DeprecatedUseNoLonger}, + "(net.Dialer).DualStack": {12, 12}, + "net/http.ErrUnexpectedTrailer": {12, DeprecatedUseNoLonger}, + "net/http.CloseNotifier": {11, 7}, + // This is hairy. The notice says "Not all errors in the http package related to protocol errors are of type ProtocolError", but doesn't that imply that some errors do? + "net/http.ProtocolError": {8, DeprecatedUseNoLonger}, + "(crypto/x509.CertificateRequest).Attributes": {5, 3}, + + // These functions have no direct alternative, but they are insecure and should no longer be used. + "crypto/x509.IsEncryptedPEMBlock": {16, DeprecatedNeverUse}, + "crypto/x509.DecryptPEMBlock": {16, DeprecatedNeverUse}, + "crypto/x509.EncryptPEMBlock": {16, DeprecatedNeverUse}, + "crypto/dsa": {16, DeprecatedNeverUse}, + + // This function has no alternative, but also no purpose. + "(*crypto/rc4.Cipher).Reset": {12, DeprecatedNeverUse}, + "(net/http/httptest.ResponseRecorder).HeaderMap": {11, 7}, + "image.ZP": {13, 0}, + "image.ZR": {13, 0}, + "(*debug/gosym.LineTable).LineToPC": {2, 2}, + "(*debug/gosym.LineTable).PCToLine": {2, 2}, + "crypto/tls.VersionSSL30": {13, DeprecatedNeverUse}, + "(crypto/tls.Config).NameToCertificate": {14, DeprecatedUseNoLonger}, + "(*crypto/tls.Config).BuildNameToCertificate": {14, DeprecatedUseNoLonger}, + "(crypto/tls.Config).SessionTicketKey": {16, 5}, + // No alternative, no use + "(crypto/tls.ConnectionState).NegotiatedProtocolIsMutual": {16, DeprecatedNeverUse}, + // No alternative, but insecure + "(crypto/tls.ConnectionState).TLSUnique": {16, DeprecatedNeverUse}, + "image/jpeg.Reader": {4, DeprecatedNeverUse}, + + // All of these have been deprecated in favour of external libraries + "syscall.AttachLsf": {7, 0}, + "syscall.DetachLsf": {7, 0}, + "syscall.LsfSocket": {7, 0}, + "syscall.SetLsfPromisc": {7, 0}, + "syscall.LsfJump": {7, 0}, + "syscall.LsfStmt": {7, 0}, + "syscall.BpfStmt": {7, 0}, + "syscall.BpfJump": {7, 0}, + "syscall.BpfBuflen": {7, 0}, + "syscall.SetBpfBuflen": {7, 0}, + "syscall.BpfDatalink": {7, 0}, + "syscall.SetBpfDatalink": {7, 0}, + "syscall.SetBpfPromisc": {7, 0}, + "syscall.FlushBpf": {7, 0}, + "syscall.BpfInterface": {7, 0}, + "syscall.SetBpfInterface": {7, 0}, + "syscall.BpfTimeout": {7, 0}, + "syscall.SetBpfTimeout": {7, 0}, + "syscall.BpfStats": {7, 0}, + "syscall.SetBpfImmediate": {7, 0}, + "syscall.SetBpf": {7, 0}, + "syscall.CheckBpfVersion": {7, 0}, + "syscall.BpfHeadercmpl": {7, 0}, + "syscall.SetBpfHeadercmpl": {7, 0}, + "syscall.RouteRIB": {8, 0}, + "syscall.RoutingMessage": {8, 0}, + "syscall.RouteMessage": {8, 0}, + "syscall.InterfaceMessage": {8, 0}, + "syscall.InterfaceAddrMessage": {8, 0}, + "syscall.ParseRoutingMessage": {8, 0}, + "syscall.ParseRoutingSockaddr": {8, 0}, + "syscall.InterfaceAnnounceMessage": {7, 0}, + "syscall.InterfaceMulticastAddrMessage": {7, 0}, + "syscall.FormatMessage": {5, 0}, + "syscall.PostQueuedCompletionStatus": {17, 0}, + "syscall.GetQueuedCompletionStatus": {17, 0}, + "syscall.CreateIoCompletionPort": {17, 0}, + + // Not marked as deprecated with a recognizable header, but deprecated nonetheless. + "io/ioutil": {16, 16}, +} + +// Last imported from Go at 32b73ae18026e8a9dc4c5aa49999b1ea445bc68c with the following numbers of deprecations: +// +// chulak go@master ./src $ rg -c "Deprecated: " +// vendor/golang.org/x/crypto/curve25519/curve25519.go:1 +// vendor/golang.org/x/text/transform/transform.go:1 +// cmd/compile/internal/types/sym.go:2 +// syscall/route_netbsd.go:1 +// syscall/route_bsd.go:7 +// syscall/lsf_linux.go:6 +// syscall/exec_unix.go:1 +// syscall/route_darwin.go:1 +// syscall/route_freebsd.go:2 +// syscall/route_dragonfly.go:2 +// syscall/bpf_darwin.go:18 +// syscall/route_openbsd.go:1 +// syscall/syscall_windows.go:6 +// syscall/syscall.go:3 +// syscall/bpf_bsd.go:18 +// cmd/compile/internal/types2/type.go:2 +// syscall/exec_plan9.go:1 +// cmd/internal/obj/textflag.go:1 +// cmd/internal/obj/link.go:5 +// cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go:1 +// cmd/vendor/golang.org/x/mod/semver/semver.go:1 +// cmd/vendor/golang.org/x/sys/windows/syscall_windows.go:2 +// cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go:1 +// cmd/vendor/golang.org/x/mod/modfile/rule.go:2 +// cmd/vendor/golang.org/x/sys/windows/security_windows.go:1 +// cmd/go/internal/modcmd/edit.go:1 +// cmd/go/testdata/mod/example.com_deprecated_a_v1.9.0.txt:2 +// cmd/go/testdata/mod/example.com_undeprecated_v1.0.0.txt:2 +// cmd/go/testdata/mod/example.com_deprecated_b_v1.9.0.txt:2 +// encoding/csv/reader.go:2 +// encoding/json/decode.go:1 +// encoding/json/encode.go:1 +// cmd/go/testdata/script/mod_list_deprecated.txt:2 +// cmd/go/testdata/script/mod_deprecate_message.txt:4 +// cmd/go/testdata/script/mod_list_deprecated_replace.txt:1 +// runtime/cpuprof.go:1 +// cmd/go/testdata/script/mod_edit.txt:1 +// crypto/tls/common.go:6 +// crypto/rc4/rc4.go:1 +// crypto/dsa/dsa.go:1 +// path/filepath/path_unix.go:1 +// path/filepath/path_windows.go:1 +// path/filepath/path_plan9.go:1 +// regexp/regexp.go:1 +// crypto/x509/pem_decrypt.go:3 +// crypto/x509/x509.go:1 +// archive/zip/struct.go:6 +// archive/tar/common.go:2 +// debug/gosym/pclntab.go:2 +// compress/flate/inflate.go:2 +// image/geom.go:2 +// image/jpeg/reader.go:1 +// os/file.go:1 +// net/dial.go:2 +// net/http/server.go:2 +// net/http/socks_bundle.go:1 +// net/http/httputil/persist.go:8 +// net/http/request.go:6 +// net/http/transport.go:3 +// net/http/httptest/recorder.go:1 +// go/doc/doc.go:1 +// go/types/errorcodes.go:1 +// go/types/type.go:2 +// database/sql/driver/driver.go:6 +// text/template/parse/node.go:5 +// go/importer/importer.go:2 diff --git a/vendor/honnef.co/go/tools/pattern/convert.go b/vendor/honnef.co/go/tools/pattern/convert.go new file mode 100644 index 000000000..dfcd1560d --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/convert.go @@ -0,0 +1,242 @@ +package pattern + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" +) + +var astTypes = map[string]reflect.Type{ + "Ellipsis": reflect.TypeOf(ast.Ellipsis{}), + "RangeStmt": reflect.TypeOf(ast.RangeStmt{}), + "AssignStmt": reflect.TypeOf(ast.AssignStmt{}), + "IndexExpr": reflect.TypeOf(ast.IndexExpr{}), + "Ident": reflect.TypeOf(ast.Ident{}), + "ValueSpec": reflect.TypeOf(ast.ValueSpec{}), + "GenDecl": reflect.TypeOf(ast.GenDecl{}), + "BinaryExpr": reflect.TypeOf(ast.BinaryExpr{}), + "ForStmt": reflect.TypeOf(ast.ForStmt{}), + "ArrayType": reflect.TypeOf(ast.ArrayType{}), + "DeferStmt": reflect.TypeOf(ast.DeferStmt{}), + "MapType": reflect.TypeOf(ast.MapType{}), + "ReturnStmt": reflect.TypeOf(ast.ReturnStmt{}), + "SliceExpr": reflect.TypeOf(ast.SliceExpr{}), + "StarExpr": reflect.TypeOf(ast.StarExpr{}), + "UnaryExpr": reflect.TypeOf(ast.UnaryExpr{}), + "SendStmt": reflect.TypeOf(ast.SendStmt{}), + "SelectStmt": reflect.TypeOf(ast.SelectStmt{}), + "ImportSpec": reflect.TypeOf(ast.ImportSpec{}), + "IfStmt": reflect.TypeOf(ast.IfStmt{}), + "GoStmt": reflect.TypeOf(ast.GoStmt{}), + "Field": reflect.TypeOf(ast.Field{}), + "SelectorExpr": reflect.TypeOf(ast.SelectorExpr{}), + "StructType": reflect.TypeOf(ast.StructType{}), + "KeyValueExpr": reflect.TypeOf(ast.KeyValueExpr{}), + "FuncType": reflect.TypeOf(ast.FuncType{}), + "FuncLit": reflect.TypeOf(ast.FuncLit{}), + "FuncDecl": reflect.TypeOf(ast.FuncDecl{}), + "ChanType": reflect.TypeOf(ast.ChanType{}), + "CallExpr": reflect.TypeOf(ast.CallExpr{}), + "CaseClause": reflect.TypeOf(ast.CaseClause{}), + "CommClause": reflect.TypeOf(ast.CommClause{}), + "CompositeLit": reflect.TypeOf(ast.CompositeLit{}), + "EmptyStmt": reflect.TypeOf(ast.EmptyStmt{}), + "SwitchStmt": reflect.TypeOf(ast.SwitchStmt{}), + "TypeSwitchStmt": reflect.TypeOf(ast.TypeSwitchStmt{}), + "TypeAssertExpr": reflect.TypeOf(ast.TypeAssertExpr{}), + "TypeSpec": reflect.TypeOf(ast.TypeSpec{}), + "InterfaceType": reflect.TypeOf(ast.InterfaceType{}), + "BranchStmt": reflect.TypeOf(ast.BranchStmt{}), + "IncDecStmt": reflect.TypeOf(ast.IncDecStmt{}), + "BasicLit": reflect.TypeOf(ast.BasicLit{}), +} + +func ASTToNode(node interface{}) Node { + switch node := node.(type) { + case *ast.File: + panic("cannot convert *ast.File to Node") + case nil: + return Nil{} + case string: + return String(node) + case token.Token: + return Token(node) + case *ast.ExprStmt: + return ASTToNode(node.X) + case *ast.BlockStmt: + if node == nil { + return Nil{} + } + return ASTToNode(node.List) + case *ast.FieldList: + if node == nil { + return Nil{} + } + return ASTToNode(node.List) + case *ast.BasicLit: + if node == nil { + return Nil{} + } + case *ast.ParenExpr: + return ASTToNode(node.X) + } + + if node, ok := node.(ast.Node); ok { + name := reflect.TypeOf(node).Elem().Name() + T, ok := structNodes[name] + if !ok { + panic(fmt.Sprintf("internal error: unhandled type %T", node)) + } + + if reflect.ValueOf(node).IsNil() { + return Nil{} + } + v := reflect.ValueOf(node).Elem() + objs := make([]Node, T.NumField()) + for i := 0; i < T.NumField(); i++ { + f := v.FieldByName(T.Field(i).Name) + objs[i] = ASTToNode(f.Interface()) + } + + n, err := populateNode(name, objs, false) + if err != nil { + panic(fmt.Sprintf("internal error: %s", err)) + } + return n + } + + s := reflect.ValueOf(node) + if s.Kind() == reflect.Slice { + if s.Len() == 0 { + return List{} + } + if s.Len() == 1 { + return ASTToNode(s.Index(0).Interface()) + } + + tail := List{} + for i := s.Len() - 1; i >= 0; i-- { + head := ASTToNode(s.Index(i).Interface()) + l := List{ + Head: head, + Tail: tail, + } + tail = l + } + return tail + } + + panic(fmt.Sprintf("internal error: unhandled type %T", node)) +} + +func NodeToAST(node Node, state State) interface{} { + switch node := node.(type) { + case Binding: + v, ok := state[node.Name] + if !ok { + // really we want to return an error here + panic("XXX") + } + switch v := v.(type) { + case types.Object: + return &ast.Ident{Name: v.Name()} + default: + return v + } + case Builtin, Any, Object, Function, Not, Or: + panic("XXX") + case List: + if (node == List{}) { + return []ast.Node{} + } + x := []ast.Node{NodeToAST(node.Head, state).(ast.Node)} + x = append(x, NodeToAST(node.Tail, state).([]ast.Node)...) + return x + case Token: + return token.Token(node) + case String: + return string(node) + case Nil: + return nil + } + + name := reflect.TypeOf(node).Name() + T, ok := astTypes[name] + if !ok { + panic(fmt.Sprintf("internal error: unhandled type %T", node)) + } + v := reflect.ValueOf(node) + out := reflect.New(T) + for i := 0; i < T.NumField(); i++ { + fNode := v.FieldByName(T.Field(i).Name) + if (fNode == reflect.Value{}) { + continue + } + fAST := out.Elem().FieldByName(T.Field(i).Name) + switch fAST.Type().Kind() { + case reflect.Slice: + c := reflect.ValueOf(NodeToAST(fNode.Interface().(Node), state)) + if c.Kind() != reflect.Slice { + // it's a single node in the pattern, we have to wrap + // it in a slice + slice := reflect.MakeSlice(fAST.Type(), 1, 1) + slice.Index(0).Set(c) + c = slice + } + switch fAST.Interface().(type) { + case []ast.Node: + switch cc := c.Interface().(type) { + case []ast.Node: + fAST.Set(c) + case []ast.Expr: + var slice []ast.Node + for _, el := range cc { + slice = append(slice, el) + } + fAST.Set(reflect.ValueOf(slice)) + default: + panic("XXX") + } + case []ast.Expr: + switch cc := c.Interface().(type) { + case []ast.Node: + var slice []ast.Expr + for _, el := range cc { + slice = append(slice, el.(ast.Expr)) + } + fAST.Set(reflect.ValueOf(slice)) + case []ast.Expr: + fAST.Set(c) + default: + panic("XXX") + } + default: + panic("XXX") + } + case reflect.Int: + c := reflect.ValueOf(NodeToAST(fNode.Interface().(Node), state)) + switch c.Kind() { + case reflect.String: + tok, ok := tokensByString[c.Interface().(string)] + if !ok { + // really we want to return an error here + panic("XXX") + } + fAST.SetInt(int64(tok)) + case reflect.Int: + fAST.Set(c) + default: + panic(fmt.Sprintf("internal error: unexpected kind %s", c.Kind())) + } + default: + r := NodeToAST(fNode.Interface().(Node), state) + if r != nil { + fAST.Set(reflect.ValueOf(r)) + } + } + } + + return out.Interface().(ast.Node) +} diff --git a/vendor/honnef.co/go/tools/pattern/doc.go b/vendor/honnef.co/go/tools/pattern/doc.go new file mode 100644 index 000000000..974617543 --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/doc.go @@ -0,0 +1,273 @@ +/* +Package pattern implements a simple language for pattern matching Go ASTs. + +Design decisions and trade-offs + +The language is designed specifically for the task of filtering ASTs +to simplify the implementation of analyses in staticcheck. +It is also intended to be trivial to parse and execute. + +To that end, we make certain decisions that make the language more +suited to its task, while making certain queries infeasible. + +Furthermore, it is fully expected that the majority of analyses will still require ordinary Go code +to further process the filtered AST, to make use of type information and to enforce complex invariants. +It is not our goal to design a scripting language for writing entire checks in. + +The language + +At its core, patterns are a representation of Go ASTs, allowing for the use of placeholders to enable pattern matching. +Their syntax is inspired by LISP and Haskell, but unlike LISP, the core unit of patterns isn't the list, but the node. +There is a fixed set of nodes, identified by name, and with the exception of the Or node, all nodes have a fixed number of arguments. +In addition to nodes, there are atoms, which represent basic units such as strings or the nil value. + +Pattern matching is implemented via bindings, represented by the Binding node. +A Binding can match nodes and associate them with names, to later recall the nodes. +This allows for expressing "this node must be equal to that node" constraints. + +To simplify writing and reading patterns, a small amount of additional syntax exists on top of nodes and atoms. +This additional syntax doesn't add any new features of its own, it simply provides shortcuts to creating nodes and atoms. + +To show an example of a pattern, first consider this snippet of Go code: + + if x := fn(); x != nil { + for _, v := range x { + println(v, x) + } + } + +The corresponding AST expressed as an idiomatic pattern would look as follows: + + (IfStmt + (AssignStmt (Ident "x") ":=" (CallExpr (Ident "fn") [])) + (BinaryExpr (Ident "x") "!=" (Ident "nil")) + (RangeStmt + (Ident "_") (Ident "v") ":=" (Ident "x") + (CallExpr (Ident "println") [(Ident "v") (Ident "x")])) + nil) + +Two things are worth noting about this representation. +First, the [el1 el2 ...] syntax is a short-hand for creating lists. +It is a short-hand for el1:el2:[], which itself is a short-hand for (List el1 (List el2 (List nil nil)). +Second, note the absence of a lot of lists in places that normally accept lists. +For example, assignment assigns a number of right-hands to a number of left-hands, yet our AssignStmt is lacking any form of list. +This is due to the fact that a single node can match a list of exactly one element. +Thus, the two following forms have identical matching behavior: + + (AssignStmt (Ident "x") ":=" (CallExpr (Ident "fn") [])) + (AssignStmt [(Ident "x")] ":=" [(CallExpr (Ident "fn") [])]) + +This section serves as an overview of the language's syntax. +More in-depth explanations of the matching behavior as well as an exhaustive list of node types follows in the coming sections. + +Pattern matching + +TODO write about pattern matching + +- inspired by haskell syntax, but much, much simpler and naive + +Node types + +The language contains two kinds of nodes: those that map to nodes in the AST, and those that implement additional logic. + +Nodes that map directly to AST nodes are named identically to the types in the go/ast package. +What follows is an exhaustive list of these nodes: + + (ArrayType len elt) + (AssignStmt lhs tok rhs) + (BasicLit kind value) + (BinaryExpr x op y) + (BranchStmt tok label) + (CallExpr fun args) + (CaseClause list body) + (ChanType dir value) + (CommClause comm body) + (CompositeLit type elts) + (DeferStmt call) + (Ellipsis elt) + (EmptyStmt) + (Field names type tag) + (ForStmt init cond post body) + (FuncDecl recv name type body) + (FuncLit type body) + (FuncType params results) + (GenDecl specs) + (GoStmt call) + (Ident name) + (IfStmt init cond body else) + (ImportSpec name path) + (IncDecStmt x tok) + (IndexExpr x index) + (InterfaceType methods) + (KeyValueExpr key value) + (MapType key value) + (RangeStmt key value tok x body) + (ReturnStmt results) + (SelectStmt body) + (SelectorExpr x sel) + (SendStmt chan value) + (SliceExpr x low high max) + (StarExpr x) + (StructType fields) + (SwitchStmt init tag body) + (TypeAssertExpr) + (TypeSpec name type) + (TypeSwitchStmt init assign body) + (UnaryExpr op x) + (ValueSpec names type values) + +Additionally, there are the String, Token and nil atoms. +Strings are double-quoted string literals, as in (Ident "someName"). +Tokens are also represented as double-quoted string literals, but are converted to token.Token values in contexts that require tokens, +such as in (BinaryExpr x "<" y), where "<" is transparently converted to token.LSS during matching. +The keyword 'nil' denotes the nil value, which represents the absence of any value. + +We also define the (List head tail) node, which is used to represent sequences of elements as a singly linked list. +The head is a single element, and the tail is the remainder of the list. +For example, + + (List "foo" (List "bar" (List "baz" (List nil nil)))) + +represents a list of three elements, "foo", "bar" and "baz". There is dedicated syntax for writing lists, which looks as follows: + + ["foo" "bar" "baz"] + +This syntax is itself syntactic sugar for the following form: + + "foo":"bar":"baz":[] + +This form is of particular interest for pattern matching, as it allows matching on the head and tail. For example, + + "foo":"bar":_ + +would match any list with at least two elements, where the first two elements are "foo" and "bar". This is equivalent to writing + + (List "foo" (List "bar" _)) + +Note that it is not possible to match from the end of the list. +That is, there is no way to express a query such as "a list of any length where the last element is foo". + +Note that unlike in LISP, nil and empty lists are distinct from one another. +In patterns, with respect to lists, nil is akin to Go's untyped nil. +It will match a nil ast.Node, but it will not match a nil []ast.Expr. Nil will, however, match pointers to named types such as *ast.Ident. +Similarly, lists are akin to Go's +slices. An empty list will match both a nil and an empty []ast.Expr, but it will not match a nil ast.Node. + +Due to the difference between nil and empty lists, an empty list is represented as (List nil nil), i.e. a list with no head or tail. +Similarly, a list of one element is represented as (List el (List nil nil)). Unlike in LISP, it cannot be represented by (List el nil). + +Finally, there are nodes that implement special logic or matching behavior. + +(Any) matches any value. The underscore (_) maps to this node, making the following two forms equivalent: + + (Ident _) + (Ident (Any)) + +(Builtin name) matches a built-in identifier or function by name. +This is a type-aware variant of (Ident name). +Instead of only comparing the name, it resolves the object behind the name and makes sure it's a pre-declared identifier. + +For example, in the following piece of code + + func fn() { + println(true) + true := false + println(true) + } + +the pattern + + (Builtin "true") + +will match exactly once, on the first use of 'true' in the function. +Subsequent occurrences of 'true' no longer refer to the pre-declared identifier. + +(Object name) matches an identifier by name, but yields the +types.Object it refers to. + +(Function name) matches ast.Idents and ast.SelectorExprs that refer to a function with a given fully qualified name. +For example, "net/url.PathEscape" matches the PathEscape function in the net/url package, +and "(net/url.EscapeError).Error" refers to the Error method on the net/url.EscapeError type, +either on an instance of the type, or on the type itself. + +For example, the following patterns match the following lines of code: + + (CallExpr (Function "fmt.Println") _) // pattern 1 + (CallExpr (Function "(net/url.EscapeError).Error") _) // pattern 2 + + fmt.Println("hello, world") // matches pattern 1 + var x url.EscapeError + x.Error() // matches pattern 2 + (url.EscapeError).Error(x) // also matches pattern 2 + +(Binding name node) creates or uses a binding. +Bindings work like variable assignments, allowing referring to already matched nodes. +As an example, bindings are necessary to match self-assignment of the form "x = x", +since we need to express that the right-hand side is identical to the left-hand side. + +If a binding's node is not nil, the matcher will attempt to match a node according to the pattern. +If a binding's node is nil, the binding will either recall an existing value, or match the Any node. +It is an error to provide a non-nil node to a binding that has already been bound. + +Referring back to the earlier example, the following pattern will match self-assignment of idents: + + (AssignStmt (Binding "lhs" (Ident _)) "=" (Binding "lhs" nil)) + +Because bindings are a crucial component of pattern matching, there is special syntax for creating and recalling bindings. +Lower-case names refer to bindings. If standing on its own, the name "foo" will be equivalent to (Binding "foo" nil). +If a name is followed by an at-sign (@) then it will create a binding for the node that follows. +Together, this allows us to rewrite the earlier example as follows: + + (AssignStmt lhs@(Ident _) "=" lhs) + +(Or nodes...) is a variadic node that tries matching each node until one succeeds. For example, the following pattern matches all idents of name "foo" or "bar": + + (Ident (Or "foo" "bar")) + +We could also have written + + (Or (Ident "foo") (Ident "bar")) + +and achieved the same result. We can also mix different kinds of nodes: + + (Or (Ident "foo") (CallExpr (Ident "bar") _)) + +When using bindings inside of nodes used inside Or, all or none of the bindings will be bound. +That is, partially matched nodes that ultimately failed to match will not produce any bindings observable outside of the matching attempt. +We can thus write + + (Or (Ident name) (CallExpr name)) + +and 'name' will either be a String if the first option matched, or an Ident or SelectorExpr if the second option matched. + +(Not node) + +The Not node negates a match. For example, (Not (Ident _)) will match all nodes that aren't identifiers. + +ChanDir(0) + +Automatic unnesting of AST nodes + +The Go AST has several types of nodes that wrap other nodes. +To simplify matching, we automatically unwrap some of these nodes. + +These nodes are ExprStmt (for using expressions in a statement context), +ParenExpr (for parenthesized expressions), +DeclStmt (for declarations in a statement context), +and LabeledStmt (for labeled statements). + +Thus, the query + + (FuncLit _ [(CallExpr _ _)] + +will match a function literal containing a single function call, +even though in the actual Go AST, the CallExpr is nested inside an ExprStmt, +as function bodies are made up of sequences of statements. + +On the flip-side, there is no way to specifically match these wrapper nodes. +For example, there is no way of searching for unnecessary parentheses, like in the following piece of Go code: + + ((x)) += 2 + +*/ +package pattern diff --git a/vendor/honnef.co/go/tools/pattern/fuzz.go b/vendor/honnef.co/go/tools/pattern/fuzz.go new file mode 100644 index 000000000..52e7df974 --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/fuzz.go @@ -0,0 +1,50 @@ +// +build gofuzz + +package pattern + +import ( + "go/ast" + goparser "go/parser" + "go/token" + "os" + "path/filepath" + "strings" +) + +var files []*ast.File + +func init() { + fset := token.NewFileSet() + filepath.Walk("/usr/lib/go/src", func(path string, info os.FileInfo, err error) error { + if err != nil { + // XXX error handling + panic(err) + } + if !strings.HasSuffix(path, ".go") { + return nil + } + f, err := goparser.ParseFile(fset, path, nil, 0) + if err != nil { + return nil + } + files = append(files, f) + return nil + }) +} + +func Fuzz(data []byte) int { + p := &Parser{} + pat, err := p.Parse(string(data)) + if err != nil { + if strings.Contains(err.Error(), "internal error") { + panic(err) + } + return 0 + } + _ = pat.Root.String() + + for _, f := range files { + Match(pat.Root, f) + } + return 1 +} diff --git a/vendor/honnef.co/go/tools/pattern/lexer.go b/vendor/honnef.co/go/tools/pattern/lexer.go new file mode 100644 index 000000000..fb72e392b --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/lexer.go @@ -0,0 +1,221 @@ +package pattern + +import ( + "fmt" + "go/token" + "unicode" + "unicode/utf8" +) + +type lexer struct { + f *token.File + + input string + start int + pos int + width int + items chan item +} + +type itemType int + +const eof = -1 + +const ( + itemError itemType = iota + itemLeftParen + itemRightParen + itemLeftBracket + itemRightBracket + itemTypeName + itemVariable + itemAt + itemColon + itemBlank + itemString + itemEOF +) + +func (typ itemType) String() string { + switch typ { + case itemError: + return "ERROR" + case itemLeftParen: + return "(" + case itemRightParen: + return ")" + case itemLeftBracket: + return "[" + case itemRightBracket: + return "]" + case itemTypeName: + return "TYPE" + case itemVariable: + return "VAR" + case itemAt: + return "@" + case itemColon: + return ":" + case itemBlank: + return "_" + case itemString: + return "STRING" + case itemEOF: + return "EOF" + default: + return fmt.Sprintf("itemType(%d)", typ) + } +} + +type item struct { + typ itemType + val string + pos int +} + +type stateFn func(*lexer) stateFn + +func (l *lexer) run() { + for state := lexStart; state != nil; { + state = state(l) + } + close(l.items) +} + +func (l *lexer) emitValue(t itemType, value string) { + l.items <- item{t, value, l.start} + l.start = l.pos +} + +func (l *lexer) emit(t itemType) { + l.items <- item{t, l.input[l.start:l.pos], l.start} + l.start = l.pos +} + +func lexStart(l *lexer) stateFn { + switch r := l.next(); { + case r == eof: + l.emit(itemEOF) + return nil + case unicode.IsSpace(r): + l.ignore() + case r == '(': + l.emit(itemLeftParen) + case r == ')': + l.emit(itemRightParen) + case r == '[': + l.emit(itemLeftBracket) + case r == ']': + l.emit(itemRightBracket) + case r == '@': + l.emit(itemAt) + case r == ':': + l.emit(itemColon) + case r == '_': + l.emit(itemBlank) + case r == '"': + l.backup() + return lexString + case unicode.IsUpper(r): + l.backup() + return lexType + case unicode.IsLower(r): + l.backup() + return lexVariable + default: + return l.errorf("unexpected character %c", r) + } + return lexStart +} + +func (l *lexer) next() (r rune) { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + + if r == '\n' { + l.f.AddLine(l.pos) + } + + l.pos += l.width + + return r +} + +func (l *lexer) ignore() { + l.start = l.pos +} + +func (l *lexer) backup() { + l.pos -= l.width +} + +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + // TODO(dh): emit position information in errors + l.items <- item{ + itemError, + fmt.Sprintf(format, args...), + l.start, + } + return nil +} + +func isAlphaNumeric(r rune) bool { + return r >= '0' && r <= '9' || + r >= 'a' && r <= 'z' || + r >= 'A' && r <= 'Z' +} + +func lexString(l *lexer) stateFn { + l.next() // skip quote + escape := false + + var runes []rune + for { + switch r := l.next(); r { + case eof: + return l.errorf("unterminated string") + case '"': + if !escape { + l.emitValue(itemString, string(runes)) + return lexStart + } else { + runes = append(runes, '"') + escape = false + } + case '\\': + if escape { + runes = append(runes, '\\') + escape = false + } else { + escape = true + } + default: + runes = append(runes, r) + } + } +} + +func lexType(l *lexer) stateFn { + l.next() + for { + if !isAlphaNumeric(l.next()) { + l.backup() + l.emit(itemTypeName) + return lexStart + } + } +} + +func lexVariable(l *lexer) stateFn { + l.next() + for { + if !isAlphaNumeric(l.next()) { + l.backup() + l.emit(itemVariable) + return lexStart + } + } +} diff --git a/vendor/honnef.co/go/tools/pattern/match.go b/vendor/honnef.co/go/tools/pattern/match.go new file mode 100644 index 000000000..ebbbdd469 --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/match.go @@ -0,0 +1,547 @@ +package pattern + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" +) + +var tokensByString = map[string]Token{ + "INT": Token(token.INT), + "FLOAT": Token(token.FLOAT), + "IMAG": Token(token.IMAG), + "CHAR": Token(token.CHAR), + "STRING": Token(token.STRING), + "+": Token(token.ADD), + "-": Token(token.SUB), + "*": Token(token.MUL), + "/": Token(token.QUO), + "%": Token(token.REM), + "&": Token(token.AND), + "|": Token(token.OR), + "^": Token(token.XOR), + "<<": Token(token.SHL), + ">>": Token(token.SHR), + "&^": Token(token.AND_NOT), + "+=": Token(token.ADD_ASSIGN), + "-=": Token(token.SUB_ASSIGN), + "*=": Token(token.MUL_ASSIGN), + "/=": Token(token.QUO_ASSIGN), + "%=": Token(token.REM_ASSIGN), + "&=": Token(token.AND_ASSIGN), + "|=": Token(token.OR_ASSIGN), + "^=": Token(token.XOR_ASSIGN), + "<<=": Token(token.SHL_ASSIGN), + ">>=": Token(token.SHR_ASSIGN), + "&^=": Token(token.AND_NOT_ASSIGN), + "&&": Token(token.LAND), + "||": Token(token.LOR), + "<-": Token(token.ARROW), + "++": Token(token.INC), + "--": Token(token.DEC), + "==": Token(token.EQL), + "<": Token(token.LSS), + ">": Token(token.GTR), + "=": Token(token.ASSIGN), + "!": Token(token.NOT), + "!=": Token(token.NEQ), + "<=": Token(token.LEQ), + ">=": Token(token.GEQ), + ":=": Token(token.DEFINE), + "...": Token(token.ELLIPSIS), + "IMPORT": Token(token.IMPORT), + "VAR": Token(token.VAR), + "TYPE": Token(token.TYPE), + "CONST": Token(token.CONST), + "BREAK": Token(token.BREAK), + "CONTINUE": Token(token.CONTINUE), + "GOTO": Token(token.GOTO), + "FALLTHROUGH": Token(token.FALLTHROUGH), +} + +func maybeToken(node Node) (Node, bool) { + if node, ok := node.(String); ok { + if tok, ok := tokensByString[string(node)]; ok { + return tok, true + } + return node, false + } + return node, false +} + +func isNil(v interface{}) bool { + if v == nil { + return true + } + if _, ok := v.(Nil); ok { + return true + } + return false +} + +type matcher interface { + Match(*Matcher, interface{}) (interface{}, bool) +} + +type State = map[string]interface{} + +type Matcher struct { + TypesInfo *types.Info + State State +} + +func (m *Matcher) fork() *Matcher { + state := make(State, len(m.State)) + for k, v := range m.State { + state[k] = v + } + return &Matcher{ + TypesInfo: m.TypesInfo, + State: state, + } +} + +func (m *Matcher) merge(mc *Matcher) { + m.State = mc.State +} + +func (m *Matcher) Match(a Node, b ast.Node) bool { + m.State = State{} + _, ok := match(m, a, b) + return ok +} + +func Match(a Node, b ast.Node) (*Matcher, bool) { + m := &Matcher{} + ret := m.Match(a, b) + return m, ret +} + +// Match two items, which may be (Node, AST) or (AST, AST) +func match(m *Matcher, l, r interface{}) (interface{}, bool) { + if _, ok := r.(Node); ok { + panic("Node mustn't be on right side of match") + } + + switch l := l.(type) { + case *ast.ParenExpr: + return match(m, l.X, r) + case *ast.ExprStmt: + return match(m, l.X, r) + case *ast.DeclStmt: + return match(m, l.Decl, r) + case *ast.LabeledStmt: + return match(m, l.Stmt, r) + case *ast.BlockStmt: + return match(m, l.List, r) + case *ast.FieldList: + return match(m, l.List, r) + } + + switch r := r.(type) { + case *ast.ParenExpr: + return match(m, l, r.X) + case *ast.ExprStmt: + return match(m, l, r.X) + case *ast.DeclStmt: + return match(m, l, r.Decl) + case *ast.LabeledStmt: + return match(m, l, r.Stmt) + case *ast.BlockStmt: + if r == nil { + return match(m, l, nil) + } + return match(m, l, r.List) + case *ast.FieldList: + if r == nil { + return match(m, l, nil) + } + return match(m, l, r.List) + case *ast.BasicLit: + if r == nil { + return match(m, l, nil) + } + } + + if l, ok := l.(matcher); ok { + return l.Match(m, r) + } + + if l, ok := l.(Node); ok { + // Matching of pattern with concrete value + return matchNodeAST(m, l, r) + } + + if l == nil || r == nil { + return nil, l == r + } + + { + ln, ok1 := l.(ast.Node) + rn, ok2 := r.(ast.Node) + if ok1 && ok2 { + return matchAST(m, ln, rn) + } + } + + { + obj, ok := l.(types.Object) + if ok { + switch r := r.(type) { + case *ast.Ident: + return obj, obj == m.TypesInfo.ObjectOf(r) + case *ast.SelectorExpr: + return obj, obj == m.TypesInfo.ObjectOf(r.Sel) + default: + return obj, false + } + } + } + + { + ln, ok1 := l.([]ast.Expr) + rn, ok2 := r.([]ast.Expr) + if ok1 || ok2 { + if ok1 && !ok2 { + rn = []ast.Expr{r.(ast.Expr)} + } else if !ok1 && ok2 { + ln = []ast.Expr{l.(ast.Expr)} + } + + if len(ln) != len(rn) { + return nil, false + } + for i, ll := range ln { + if _, ok := match(m, ll, rn[i]); !ok { + return nil, false + } + } + return r, true + } + } + + { + ln, ok1 := l.([]ast.Stmt) + rn, ok2 := r.([]ast.Stmt) + if ok1 || ok2 { + if ok1 && !ok2 { + rn = []ast.Stmt{r.(ast.Stmt)} + } else if !ok1 && ok2 { + ln = []ast.Stmt{l.(ast.Stmt)} + } + + if len(ln) != len(rn) { + return nil, false + } + for i, ll := range ln { + if _, ok := match(m, ll, rn[i]); !ok { + return nil, false + } + } + return r, true + } + } + + { + ln, ok1 := l.([]*ast.Field) + rn, ok2 := r.([]*ast.Field) + if ok1 || ok2 { + if ok1 && !ok2 { + rn = []*ast.Field{r.(*ast.Field)} + } else if !ok1 && ok2 { + ln = []*ast.Field{l.(*ast.Field)} + } + + if len(ln) != len(rn) { + return nil, false + } + for i, ll := range ln { + if _, ok := match(m, ll, rn[i]); !ok { + return nil, false + } + } + return r, true + } + } + + panic(fmt.Sprintf("unsupported comparison: %T and %T", l, r)) +} + +// Match a Node with an AST node +func matchNodeAST(m *Matcher, a Node, b interface{}) (interface{}, bool) { + switch b := b.(type) { + case []ast.Stmt: + // 'a' is not a List or we'd be using its Match + // implementation. + + if len(b) != 1 { + return nil, false + } + return match(m, a, b[0]) + case []ast.Expr: + // 'a' is not a List or we'd be using its Match + // implementation. + + if len(b) != 1 { + return nil, false + } + return match(m, a, b[0]) + case ast.Node: + ra := reflect.ValueOf(a) + rb := reflect.ValueOf(b).Elem() + + if ra.Type().Name() != rb.Type().Name() { + return nil, false + } + + for i := 0; i < ra.NumField(); i++ { + af := ra.Field(i) + fieldName := ra.Type().Field(i).Name + bf := rb.FieldByName(fieldName) + if (bf == reflect.Value{}) { + panic(fmt.Sprintf("internal error: could not find field %s in type %t when comparing with %T", fieldName, b, a)) + } + ai := af.Interface() + bi := bf.Interface() + if ai == nil { + return b, bi == nil + } + if _, ok := match(m, ai.(Node), bi); !ok { + return b, false + } + } + return b, true + case nil: + return nil, a == Nil{} + default: + panic(fmt.Sprintf("unhandled type %T", b)) + } +} + +// Match two AST nodes +func matchAST(m *Matcher, a, b ast.Node) (interface{}, bool) { + ra := reflect.ValueOf(a) + rb := reflect.ValueOf(b) + + if ra.Type() != rb.Type() { + return nil, false + } + if ra.IsNil() || rb.IsNil() { + return rb, ra.IsNil() == rb.IsNil() + } + + ra = ra.Elem() + rb = rb.Elem() + for i := 0; i < ra.NumField(); i++ { + af := ra.Field(i) + bf := rb.Field(i) + if af.Type() == rtTokPos || af.Type() == rtObject || af.Type() == rtCommentGroup { + continue + } + + switch af.Kind() { + case reflect.Slice: + if af.Len() != bf.Len() { + return nil, false + } + for j := 0; j < af.Len(); j++ { + if _, ok := match(m, af.Index(j).Interface().(ast.Node), bf.Index(j).Interface().(ast.Node)); !ok { + return nil, false + } + } + case reflect.String: + if af.String() != bf.String() { + return nil, false + } + case reflect.Int: + if af.Int() != bf.Int() { + return nil, false + } + case reflect.Bool: + if af.Bool() != bf.Bool() { + return nil, false + } + case reflect.Ptr, reflect.Interface: + if _, ok := match(m, af.Interface(), bf.Interface()); !ok { + return nil, false + } + default: + panic(fmt.Sprintf("internal error: unhandled kind %s (%T)", af.Kind(), af.Interface())) + } + } + return b, true +} + +func (b Binding) Match(m *Matcher, node interface{}) (interface{}, bool) { + if isNil(b.Node) { + v, ok := m.State[b.Name] + if ok { + // Recall value + return match(m, v, node) + } + // Matching anything + b.Node = Any{} + } + + // Store value + if _, ok := m.State[b.Name]; ok { + panic(fmt.Sprintf("binding already created: %s", b.Name)) + } + new, ret := match(m, b.Node, node) + if ret { + m.State[b.Name] = new + } + return new, ret +} + +func (Any) Match(m *Matcher, node interface{}) (interface{}, bool) { + return node, true +} + +func (l List) Match(m *Matcher, node interface{}) (interface{}, bool) { + v := reflect.ValueOf(node) + if v.Kind() == reflect.Slice { + if isNil(l.Head) { + return node, v.Len() == 0 + } + if v.Len() == 0 { + return nil, false + } + // OPT(dh): don't check the entire tail if head didn't match + _, ok1 := match(m, l.Head, v.Index(0).Interface()) + _, ok2 := match(m, l.Tail, v.Slice(1, v.Len()).Interface()) + return node, ok1 && ok2 + } + // Our empty list does not equal an untyped Go nil. This way, we can + // tell apart an if with no else and an if with an empty else. + return nil, false +} + +func (s String) Match(m *Matcher, node interface{}) (interface{}, bool) { + switch o := node.(type) { + case token.Token: + if tok, ok := maybeToken(s); ok { + return match(m, tok, node) + } + return nil, false + case string: + return o, string(s) == o + default: + return nil, false + } +} + +func (tok Token) Match(m *Matcher, node interface{}) (interface{}, bool) { + o, ok := node.(token.Token) + if !ok { + return nil, false + } + return o, token.Token(tok) == o +} + +func (Nil) Match(m *Matcher, node interface{}) (interface{}, bool) { + return nil, isNil(node) || reflect.ValueOf(node).IsNil() +} + +func (builtin Builtin) Match(m *Matcher, node interface{}) (interface{}, bool) { + r, ok := match(m, Ident(builtin), node) + if !ok { + return nil, false + } + ident := r.(*ast.Ident) + obj := m.TypesInfo.ObjectOf(ident) + if obj != types.Universe.Lookup(ident.Name) { + return nil, false + } + return ident, true +} + +func (obj Object) Match(m *Matcher, node interface{}) (interface{}, bool) { + r, ok := match(m, Ident(obj), node) + if !ok { + return nil, false + } + ident := r.(*ast.Ident) + + id := m.TypesInfo.ObjectOf(ident) + _, ok = match(m, obj.Name, ident.Name) + return id, ok +} + +func (fn Function) Match(m *Matcher, node interface{}) (interface{}, bool) { + var name string + var obj types.Object + + r, ok := match(m, Or{Nodes: []Node{Ident{Any{}}, SelectorExpr{Any{}, Any{}}}}, node) + if !ok { + return nil, false + } + + switch r := r.(type) { + case *ast.Ident: + obj = m.TypesInfo.ObjectOf(r) + switch obj := obj.(type) { + case *types.Func: + // OPT(dh): optimize this similar to code.FuncName + name = obj.FullName() + case *types.Builtin: + name = obj.Name() + default: + return nil, false + } + case *ast.SelectorExpr: + var ok bool + obj, ok = m.TypesInfo.ObjectOf(r.Sel).(*types.Func) + if !ok { + return nil, false + } + // OPT(dh): optimize this similar to code.FuncName + name = obj.(*types.Func).FullName() + default: + panic("unreachable") + } + _, ok = match(m, fn.Name, name) + return obj, ok +} + +func (or Or) Match(m *Matcher, node interface{}) (interface{}, bool) { + for _, opt := range or.Nodes { + mc := m.fork() + if ret, ok := match(mc, opt, node); ok { + m.merge(mc) + return ret, true + } + } + return nil, false +} + +func (not Not) Match(m *Matcher, node interface{}) (interface{}, bool) { + _, ok := match(m, not.Node, node) + if ok { + return nil, false + } + return node, true +} + +var ( + // Types of fields in go/ast structs that we want to skip + rtTokPos = reflect.TypeOf(token.Pos(0)) + rtObject = reflect.TypeOf((*ast.Object)(nil)) + rtCommentGroup = reflect.TypeOf((*ast.CommentGroup)(nil)) +) + +var ( + _ matcher = Binding{} + _ matcher = Any{} + _ matcher = List{} + _ matcher = String("") + _ matcher = Token(0) + _ matcher = Nil{} + _ matcher = Builtin{} + _ matcher = Object{} + _ matcher = Function{} + _ matcher = Or{} + _ matcher = Not{} +) diff --git a/vendor/honnef.co/go/tools/pattern/parser.go b/vendor/honnef.co/go/tools/pattern/parser.go new file mode 100644 index 000000000..68cc54b2e --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/parser.go @@ -0,0 +1,463 @@ +package pattern + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" +) + +type Pattern struct { + Root Node + // Relevant contains instances of ast.Node that could potentially + // initiate a successful match of the pattern. + Relevant []reflect.Type +} + +func MustParse(s string) Pattern { + p := &Parser{AllowTypeInfo: true} + pat, err := p.Parse(s) + if err != nil { + panic(err) + } + return pat +} + +func roots(node Node) []reflect.Type { + switch node := node.(type) { + case Or: + var out []reflect.Type + for _, el := range node.Nodes { + out = append(out, roots(el)...) + } + return out + case Not: + return roots(node.Node) + case Binding: + return roots(node.Node) + case Nil, nil: + // this branch is reached via bindings + return allTypes + default: + Ts, ok := nodeToASTTypes[reflect.TypeOf(node)] + if !ok { + panic(fmt.Sprintf("internal error: unhandled type %T", node)) + } + return Ts + } +} + +var allTypes = []reflect.Type{ + reflect.TypeOf((*ast.RangeStmt)(nil)), + reflect.TypeOf((*ast.AssignStmt)(nil)), + reflect.TypeOf((*ast.IndexExpr)(nil)), + reflect.TypeOf((*ast.Ident)(nil)), + reflect.TypeOf((*ast.ValueSpec)(nil)), + reflect.TypeOf((*ast.GenDecl)(nil)), + reflect.TypeOf((*ast.BinaryExpr)(nil)), + reflect.TypeOf((*ast.ForStmt)(nil)), + reflect.TypeOf((*ast.ArrayType)(nil)), + reflect.TypeOf((*ast.DeferStmt)(nil)), + reflect.TypeOf((*ast.MapType)(nil)), + reflect.TypeOf((*ast.ReturnStmt)(nil)), + reflect.TypeOf((*ast.SliceExpr)(nil)), + reflect.TypeOf((*ast.StarExpr)(nil)), + reflect.TypeOf((*ast.UnaryExpr)(nil)), + reflect.TypeOf((*ast.SendStmt)(nil)), + reflect.TypeOf((*ast.SelectStmt)(nil)), + reflect.TypeOf((*ast.ImportSpec)(nil)), + reflect.TypeOf((*ast.IfStmt)(nil)), + reflect.TypeOf((*ast.GoStmt)(nil)), + reflect.TypeOf((*ast.Field)(nil)), + reflect.TypeOf((*ast.SelectorExpr)(nil)), + reflect.TypeOf((*ast.StructType)(nil)), + reflect.TypeOf((*ast.KeyValueExpr)(nil)), + reflect.TypeOf((*ast.FuncType)(nil)), + reflect.TypeOf((*ast.FuncLit)(nil)), + reflect.TypeOf((*ast.FuncDecl)(nil)), + reflect.TypeOf((*ast.ChanType)(nil)), + reflect.TypeOf((*ast.CallExpr)(nil)), + reflect.TypeOf((*ast.CaseClause)(nil)), + reflect.TypeOf((*ast.CommClause)(nil)), + reflect.TypeOf((*ast.CompositeLit)(nil)), + reflect.TypeOf((*ast.EmptyStmt)(nil)), + reflect.TypeOf((*ast.SwitchStmt)(nil)), + reflect.TypeOf((*ast.TypeSwitchStmt)(nil)), + reflect.TypeOf((*ast.TypeAssertExpr)(nil)), + reflect.TypeOf((*ast.TypeSpec)(nil)), + reflect.TypeOf((*ast.InterfaceType)(nil)), + reflect.TypeOf((*ast.BranchStmt)(nil)), + reflect.TypeOf((*ast.IncDecStmt)(nil)), + reflect.TypeOf((*ast.BasicLit)(nil)), +} + +var nodeToASTTypes = map[reflect.Type][]reflect.Type{ + reflect.TypeOf(String("")): nil, + reflect.TypeOf(Token(0)): nil, + reflect.TypeOf(List{}): {reflect.TypeOf((*ast.BlockStmt)(nil)), reflect.TypeOf((*ast.FieldList)(nil))}, + reflect.TypeOf(Builtin{}): {reflect.TypeOf((*ast.Ident)(nil))}, + reflect.TypeOf(Object{}): {reflect.TypeOf((*ast.Ident)(nil))}, + reflect.TypeOf(Function{}): {reflect.TypeOf((*ast.Ident)(nil)), reflect.TypeOf((*ast.SelectorExpr)(nil))}, + reflect.TypeOf(Any{}): allTypes, + reflect.TypeOf(RangeStmt{}): {reflect.TypeOf((*ast.RangeStmt)(nil))}, + reflect.TypeOf(AssignStmt{}): {reflect.TypeOf((*ast.AssignStmt)(nil))}, + reflect.TypeOf(IndexExpr{}): {reflect.TypeOf((*ast.IndexExpr)(nil))}, + reflect.TypeOf(Ident{}): {reflect.TypeOf((*ast.Ident)(nil))}, + reflect.TypeOf(ValueSpec{}): {reflect.TypeOf((*ast.ValueSpec)(nil))}, + reflect.TypeOf(GenDecl{}): {reflect.TypeOf((*ast.GenDecl)(nil))}, + reflect.TypeOf(BinaryExpr{}): {reflect.TypeOf((*ast.BinaryExpr)(nil))}, + reflect.TypeOf(ForStmt{}): {reflect.TypeOf((*ast.ForStmt)(nil))}, + reflect.TypeOf(ArrayType{}): {reflect.TypeOf((*ast.ArrayType)(nil))}, + reflect.TypeOf(DeferStmt{}): {reflect.TypeOf((*ast.DeferStmt)(nil))}, + reflect.TypeOf(MapType{}): {reflect.TypeOf((*ast.MapType)(nil))}, + reflect.TypeOf(ReturnStmt{}): {reflect.TypeOf((*ast.ReturnStmt)(nil))}, + reflect.TypeOf(SliceExpr{}): {reflect.TypeOf((*ast.SliceExpr)(nil))}, + reflect.TypeOf(StarExpr{}): {reflect.TypeOf((*ast.StarExpr)(nil))}, + reflect.TypeOf(UnaryExpr{}): {reflect.TypeOf((*ast.UnaryExpr)(nil))}, + reflect.TypeOf(SendStmt{}): {reflect.TypeOf((*ast.SendStmt)(nil))}, + reflect.TypeOf(SelectStmt{}): {reflect.TypeOf((*ast.SelectStmt)(nil))}, + reflect.TypeOf(ImportSpec{}): {reflect.TypeOf((*ast.ImportSpec)(nil))}, + reflect.TypeOf(IfStmt{}): {reflect.TypeOf((*ast.IfStmt)(nil))}, + reflect.TypeOf(GoStmt{}): {reflect.TypeOf((*ast.GoStmt)(nil))}, + reflect.TypeOf(Field{}): {reflect.TypeOf((*ast.Field)(nil))}, + reflect.TypeOf(SelectorExpr{}): {reflect.TypeOf((*ast.SelectorExpr)(nil))}, + reflect.TypeOf(StructType{}): {reflect.TypeOf((*ast.StructType)(nil))}, + reflect.TypeOf(KeyValueExpr{}): {reflect.TypeOf((*ast.KeyValueExpr)(nil))}, + reflect.TypeOf(FuncType{}): {reflect.TypeOf((*ast.FuncType)(nil))}, + reflect.TypeOf(FuncLit{}): {reflect.TypeOf((*ast.FuncLit)(nil))}, + reflect.TypeOf(FuncDecl{}): {reflect.TypeOf((*ast.FuncDecl)(nil))}, + reflect.TypeOf(ChanType{}): {reflect.TypeOf((*ast.ChanType)(nil))}, + reflect.TypeOf(CallExpr{}): {reflect.TypeOf((*ast.CallExpr)(nil))}, + reflect.TypeOf(CaseClause{}): {reflect.TypeOf((*ast.CaseClause)(nil))}, + reflect.TypeOf(CommClause{}): {reflect.TypeOf((*ast.CommClause)(nil))}, + reflect.TypeOf(CompositeLit{}): {reflect.TypeOf((*ast.CompositeLit)(nil))}, + reflect.TypeOf(EmptyStmt{}): {reflect.TypeOf((*ast.EmptyStmt)(nil))}, + reflect.TypeOf(SwitchStmt{}): {reflect.TypeOf((*ast.SwitchStmt)(nil))}, + reflect.TypeOf(TypeSwitchStmt{}): {reflect.TypeOf((*ast.TypeSwitchStmt)(nil))}, + reflect.TypeOf(TypeAssertExpr{}): {reflect.TypeOf((*ast.TypeAssertExpr)(nil))}, + reflect.TypeOf(TypeSpec{}): {reflect.TypeOf((*ast.TypeSpec)(nil))}, + reflect.TypeOf(InterfaceType{}): {reflect.TypeOf((*ast.InterfaceType)(nil))}, + reflect.TypeOf(BranchStmt{}): {reflect.TypeOf((*ast.BranchStmt)(nil))}, + reflect.TypeOf(IncDecStmt{}): {reflect.TypeOf((*ast.IncDecStmt)(nil))}, + reflect.TypeOf(BasicLit{}): {reflect.TypeOf((*ast.BasicLit)(nil))}, +} + +var requiresTypeInfo = map[string]bool{ + "Function": true, + "Builtin": true, + "Object": true, +} + +type Parser struct { + // Allow nodes that rely on type information + AllowTypeInfo bool + + lex *lexer + cur item + last *item + items chan item +} + +func (p *Parser) Parse(s string) (Pattern, error) { + p.cur = item{} + p.last = nil + p.items = nil + + fset := token.NewFileSet() + p.lex = &lexer{ + f: fset.AddFile("